xref: /qemu/hw/virtio/vhost-vdpa.c (revision 3312e6c8c9aa8f32019f14c74d209db17b9306eb)
1 /*
2  * vhost-vdpa
3  *
4  *  Copyright(c) 2017-2018 Intel Corporation.
5  *  Copyright(c) 2020 Red Hat, Inc.
6  *
7  * This work is licensed under the terms of the GNU GPL, version 2 or later.
8  * See the COPYING file in the top-level directory.
9  *
10  */
11 
12 #include "qemu/osdep.h"
13 #include <linux/vhost.h>
14 #include <linux/vfio.h>
15 #include <sys/eventfd.h>
16 #include <sys/ioctl.h>
17 #include "exec/target_page.h"
18 #include "hw/virtio/vhost.h"
19 #include "hw/virtio/vhost-backend.h"
20 #include "hw/virtio/virtio-net.h"
21 #include "hw/virtio/vhost-shadow-virtqueue.h"
22 #include "hw/virtio/vhost-vdpa.h"
23 #include "system/address-spaces.h"
24 #include "migration/blocker.h"
25 #include "qemu/cutils.h"
26 #include "qemu/main-loop.h"
27 #include "trace.h"
28 #include "qapi/error.h"
29 
30 /*
31  * Return one past the end of the end of section. Be careful with uint64_t
32  * conversions!
33  */
34 static Int128 vhost_vdpa_section_end(const MemoryRegionSection *section,
35                                      int page_mask)
36 {
37     Int128 llend = int128_make64(section->offset_within_address_space);
38     llend = int128_add(llend, section->size);
39     llend = int128_and(llend, int128_exts64(page_mask));
40 
41     return llend;
42 }
43 
44 static bool vhost_vdpa_listener_skipped_section(MemoryRegionSection *section,
45                                                 uint64_t iova_min,
46                                                 uint64_t iova_max,
47                                                 int page_mask)
48 {
49     Int128 llend;
50     bool is_ram = memory_region_is_ram(section->mr);
51     bool is_iommu = memory_region_is_iommu(section->mr);
52     bool is_protected = memory_region_is_protected(section->mr);
53 
54     /* vhost-vDPA doesn't allow MMIO to be mapped  */
55     bool is_ram_device = memory_region_is_ram_device(section->mr);
56 
57     if ((!is_ram && !is_iommu) || is_protected || is_ram_device) {
58         trace_vhost_vdpa_skipped_memory_section(is_ram, is_iommu, is_protected,
59                                                 is_ram_device, iova_min,
60                                                 iova_max, page_mask);
61         return true;
62     }
63 
64     if (section->offset_within_address_space < iova_min) {
65         error_report("RAM section out of device range (min=0x%" PRIx64
66                      ", addr=0x%" HWADDR_PRIx ")",
67                      iova_min, section->offset_within_address_space);
68         return true;
69     }
70     /*
71      * While using vIOMMU, sometimes the section will be larger than iova_max,
72      * but the memory that actually maps is smaller, so move the check to
73      * function vhost_vdpa_iommu_map_notify(). That function will use the actual
74      * size that maps to the kernel
75      */
76 
77     if (!is_iommu) {
78         llend = vhost_vdpa_section_end(section, page_mask);
79         if (int128_gt(llend, int128_make64(iova_max))) {
80             error_report("RAM section out of device range (max=0x%" PRIx64
81                          ", end addr=0x%" PRIx64 ")",
82                          iova_max, int128_get64(llend));
83             return true;
84         }
85     }
86 
87     return false;
88 }
89 
90 /*
91  * The caller must set asid = 0 if the device does not support asid.
92  * This is not an ABI break since it is set to 0 by the initializer anyway.
93  */
94 int vhost_vdpa_dma_map(VhostVDPAShared *s, uint32_t asid, hwaddr iova,
95                        hwaddr size, void *vaddr, bool readonly)
96 {
97     struct vhost_msg_v2 msg = {};
98     int fd = s->device_fd;
99     int ret = 0;
100 
101     msg.type = VHOST_IOTLB_MSG_V2;
102     msg.asid = asid;
103     msg.iotlb.iova = iova;
104     msg.iotlb.size = size;
105     msg.iotlb.uaddr = (uint64_t)(uintptr_t)vaddr;
106     msg.iotlb.perm = readonly ? VHOST_ACCESS_RO : VHOST_ACCESS_RW;
107     msg.iotlb.type = VHOST_IOTLB_UPDATE;
108 
109     trace_vhost_vdpa_dma_map(s, fd, msg.type, msg.asid, msg.iotlb.iova,
110                              msg.iotlb.size, msg.iotlb.uaddr, msg.iotlb.perm,
111                              msg.iotlb.type);
112 
113     if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
114         error_report("failed to write, fd=%d, errno=%d (%s)",
115             fd, errno, strerror(errno));
116         return -EIO ;
117     }
118 
119     return ret;
120 }
121 
122 /*
123  * The caller must set asid = 0 if the device does not support asid.
124  * This is not an ABI break since it is set to 0 by the initializer anyway.
125  */
126 int vhost_vdpa_dma_unmap(VhostVDPAShared *s, uint32_t asid, hwaddr iova,
127                          hwaddr size)
128 {
129     struct vhost_msg_v2 msg = {};
130     int fd = s->device_fd;
131     int ret = 0;
132 
133     msg.type = VHOST_IOTLB_MSG_V2;
134     msg.asid = asid;
135     msg.iotlb.iova = iova;
136     msg.iotlb.size = size;
137     msg.iotlb.type = VHOST_IOTLB_INVALIDATE;
138 
139     trace_vhost_vdpa_dma_unmap(s, fd, msg.type, msg.asid, msg.iotlb.iova,
140                                msg.iotlb.size, msg.iotlb.type);
141 
142     if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
143         error_report("failed to write, fd=%d, errno=%d (%s)",
144             fd, errno, strerror(errno));
145         return -EIO ;
146     }
147 
148     return ret;
149 }
150 
151 static void vhost_vdpa_listener_begin_batch(VhostVDPAShared *s)
152 {
153     int fd = s->device_fd;
154     struct vhost_msg_v2 msg = {
155         .type = VHOST_IOTLB_MSG_V2,
156         .iotlb.type = VHOST_IOTLB_BATCH_BEGIN,
157     };
158 
159     trace_vhost_vdpa_listener_begin_batch(s, fd, msg.type, msg.iotlb.type);
160     if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
161         error_report("failed to write, fd=%d, errno=%d (%s)",
162                      fd, errno, strerror(errno));
163     }
164 }
165 
166 static void vhost_vdpa_iotlb_batch_begin_once(VhostVDPAShared *s)
167 {
168     if (s->backend_cap & (0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH) &&
169         !s->iotlb_batch_begin_sent) {
170         vhost_vdpa_listener_begin_batch(s);
171     }
172 
173     s->iotlb_batch_begin_sent = true;
174 }
175 
176 static void vhost_vdpa_listener_commit(MemoryListener *listener)
177 {
178     VhostVDPAShared *s = container_of(listener, VhostVDPAShared, listener);
179     struct vhost_msg_v2 msg = {};
180     int fd = s->device_fd;
181 
182     if (!(s->backend_cap & (0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH))) {
183         return;
184     }
185 
186     if (!s->iotlb_batch_begin_sent) {
187         return;
188     }
189 
190     msg.type = VHOST_IOTLB_MSG_V2;
191     msg.iotlb.type = VHOST_IOTLB_BATCH_END;
192 
193     trace_vhost_vdpa_listener_commit(s, fd, msg.type, msg.iotlb.type);
194     if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
195         error_report("failed to write, fd=%d, errno=%d (%s)",
196                      fd, errno, strerror(errno));
197     }
198 
199     s->iotlb_batch_begin_sent = false;
200 }
201 
202 static void vhost_vdpa_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
203 {
204     struct vdpa_iommu *iommu = container_of(n, struct vdpa_iommu, n);
205 
206     hwaddr iova = iotlb->iova + iommu->iommu_offset;
207     VhostVDPAShared *s = iommu->dev_shared;
208     void *vaddr;
209     int ret;
210     Int128 llend;
211     Error *local_err = NULL;
212 
213     if (iotlb->target_as != &address_space_memory) {
214         error_report("Wrong target AS \"%s\", only system memory is allowed",
215                      iotlb->target_as->name ? iotlb->target_as->name : "none");
216         return;
217     }
218     RCU_READ_LOCK_GUARD();
219     /* check if RAM section out of device range */
220     llend = int128_add(int128_makes64(iotlb->addr_mask), int128_makes64(iova));
221     if (int128_gt(llend, int128_make64(s->iova_range.last))) {
222         error_report("RAM section out of device range (max=0x%" PRIx64
223                      ", end addr=0x%" PRIx64 ")",
224                      s->iova_range.last, int128_get64(llend));
225         return;
226     }
227 
228     if ((iotlb->perm & IOMMU_RW) != IOMMU_NONE) {
229         bool read_only;
230 
231         if (!memory_get_xlat_addr(iotlb, &vaddr, NULL, &read_only, NULL,
232                                   &local_err)) {
233             error_report_err(local_err);
234             return;
235         }
236         ret = vhost_vdpa_dma_map(s, VHOST_VDPA_GUEST_PA_ASID, iova,
237                                  iotlb->addr_mask + 1, vaddr, read_only);
238         if (ret) {
239             error_report("vhost_vdpa_dma_map(%p, 0x%" HWADDR_PRIx ", "
240                          "0x%" HWADDR_PRIx ", %p) = %d (%m)",
241                          s, iova, iotlb->addr_mask + 1, vaddr, ret);
242         }
243     } else {
244         ret = vhost_vdpa_dma_unmap(s, VHOST_VDPA_GUEST_PA_ASID, iova,
245                                    iotlb->addr_mask + 1);
246         if (ret) {
247             error_report("vhost_vdpa_dma_unmap(%p, 0x%" HWADDR_PRIx ", "
248                          "0x%" HWADDR_PRIx ") = %d (%m)",
249                          s, iova, iotlb->addr_mask + 1, ret);
250         }
251     }
252 }
253 
254 static void vhost_vdpa_iommu_region_add(MemoryListener *listener,
255                                         MemoryRegionSection *section)
256 {
257     VhostVDPAShared *s = container_of(listener, VhostVDPAShared, listener);
258 
259     struct vdpa_iommu *iommu;
260     Int128 end;
261     int iommu_idx;
262     IOMMUMemoryRegion *iommu_mr;
263     int ret;
264 
265     iommu_mr = IOMMU_MEMORY_REGION(section->mr);
266 
267     iommu = g_malloc0(sizeof(*iommu));
268     end = int128_add(int128_make64(section->offset_within_region),
269                      section->size);
270     end = int128_sub(end, int128_one());
271     iommu_idx = memory_region_iommu_attrs_to_index(iommu_mr,
272                                                    MEMTXATTRS_UNSPECIFIED);
273     iommu->iommu_mr = iommu_mr;
274     iommu_notifier_init(&iommu->n, vhost_vdpa_iommu_map_notify,
275                         IOMMU_NOTIFIER_IOTLB_EVENTS,
276                         section->offset_within_region,
277                         int128_get64(end),
278                         iommu_idx);
279     iommu->iommu_offset = section->offset_within_address_space -
280                           section->offset_within_region;
281     iommu->dev_shared = s;
282 
283     ret = memory_region_register_iommu_notifier(section->mr, &iommu->n, NULL);
284     if (ret) {
285         g_free(iommu);
286         return;
287     }
288 
289     QLIST_INSERT_HEAD(&s->iommu_list, iommu, iommu_next);
290     memory_region_iommu_replay(iommu->iommu_mr, &iommu->n);
291 }
292 
293 static void vhost_vdpa_iommu_region_del(MemoryListener *listener,
294                                         MemoryRegionSection *section)
295 {
296     VhostVDPAShared *s = container_of(listener, VhostVDPAShared, listener);
297 
298     struct vdpa_iommu *iommu;
299 
300     QLIST_FOREACH(iommu, &s->iommu_list, iommu_next)
301     {
302         if (MEMORY_REGION(iommu->iommu_mr) == section->mr &&
303             iommu->n.start == section->offset_within_region) {
304             memory_region_unregister_iommu_notifier(section->mr, &iommu->n);
305             QLIST_REMOVE(iommu, iommu_next);
306             g_free(iommu);
307             break;
308         }
309     }
310 }
311 
312 static void vhost_vdpa_listener_region_add(MemoryListener *listener,
313                                            MemoryRegionSection *section)
314 {
315     DMAMap mem_region = {};
316     VhostVDPAShared *s = container_of(listener, VhostVDPAShared, listener);
317     hwaddr iova;
318     Int128 llend, llsize;
319     void *vaddr;
320     int ret;
321     int page_size = qemu_target_page_size();
322     int page_mask = -page_size;
323 
324     if (vhost_vdpa_listener_skipped_section(section, s->iova_range.first,
325                                             s->iova_range.last, page_mask)) {
326         return;
327     }
328     if (memory_region_is_iommu(section->mr)) {
329         vhost_vdpa_iommu_region_add(listener, section);
330         return;
331     }
332 
333     if (unlikely((section->offset_within_address_space & ~page_mask) !=
334                  (section->offset_within_region & ~page_mask))) {
335         trace_vhost_vdpa_listener_region_add_unaligned(s, section->mr->name,
336                        section->offset_within_address_space & ~page_mask,
337                        section->offset_within_region & ~page_mask);
338         return;
339     }
340 
341     iova = ROUND_UP(section->offset_within_address_space, page_size);
342     llend = vhost_vdpa_section_end(section, page_mask);
343     if (int128_ge(int128_make64(iova), llend)) {
344         return;
345     }
346 
347     memory_region_ref(section->mr);
348 
349     /* Here we assume that memory_region_is_ram(section->mr)==true */
350 
351     vaddr = memory_region_get_ram_ptr(section->mr) +
352             section->offset_within_region +
353             (iova - section->offset_within_address_space);
354 
355     trace_vhost_vdpa_listener_region_add(s, iova, int128_get64(llend),
356                                          vaddr, section->readonly);
357 
358     llsize = int128_sub(llend, int128_make64(iova));
359     if (s->shadow_data) {
360         int r;
361         hwaddr gpa = section->offset_within_address_space;
362 
363         mem_region.size = int128_get64(llsize) - 1,
364         mem_region.perm = IOMMU_ACCESS_FLAG(true, section->readonly),
365 
366         r = vhost_iova_tree_map_alloc_gpa(s->iova_tree, &mem_region, gpa);
367         if (unlikely(r != IOVA_OK)) {
368             error_report("Can't allocate a mapping (%d)", r);
369 
370             if (mem_region.translated_addr == gpa) {
371                 error_report("Insertion to GPA->IOVA tree failed");
372                 /* Remove the mapping from the IOVA-only tree */
373                 goto fail_map;
374             }
375             goto fail;
376         }
377 
378         iova = mem_region.iova;
379     }
380 
381     vhost_vdpa_iotlb_batch_begin_once(s);
382     ret = vhost_vdpa_dma_map(s, VHOST_VDPA_GUEST_PA_ASID, iova,
383                              int128_get64(llsize), vaddr, section->readonly);
384     if (ret) {
385         error_report("vhost vdpa map fail!");
386         goto fail_map;
387     }
388 
389     return;
390 
391 fail_map:
392     if (s->shadow_data) {
393         vhost_iova_tree_remove_gpa(s->iova_tree, mem_region);
394     }
395 
396 fail:
397     /*
398      * On the initfn path, store the first error in the container so we
399      * can gracefully fail.  Runtime, there's not much we can do other
400      * than throw a hardware error.
401      */
402     error_report("vhost-vdpa: DMA mapping failed, unable to continue");
403     return;
404 
405 }
406 
407 static void vhost_vdpa_listener_region_del(MemoryListener *listener,
408                                            MemoryRegionSection *section)
409 {
410     VhostVDPAShared *s = container_of(listener, VhostVDPAShared, listener);
411     hwaddr iova;
412     Int128 llend, llsize;
413     int ret;
414     int page_size = qemu_target_page_size();
415     int page_mask = -page_size;
416 
417     if (vhost_vdpa_listener_skipped_section(section, s->iova_range.first,
418                                             s->iova_range.last, page_mask)) {
419         return;
420     }
421     if (memory_region_is_iommu(section->mr)) {
422         vhost_vdpa_iommu_region_del(listener, section);
423     }
424 
425     if (unlikely((section->offset_within_address_space & ~page_mask) !=
426                  (section->offset_within_region & ~page_mask))) {
427         trace_vhost_vdpa_listener_region_del_unaligned(s, section->mr->name,
428                        section->offset_within_address_space & ~page_mask,
429                        section->offset_within_region & ~page_mask);
430         return;
431     }
432 
433     iova = ROUND_UP(section->offset_within_address_space, page_size);
434     llend = vhost_vdpa_section_end(section, page_mask);
435 
436     trace_vhost_vdpa_listener_region_del(s, iova,
437         int128_get64(int128_sub(llend, int128_one())));
438 
439     if (int128_ge(int128_make64(iova), llend)) {
440         return;
441     }
442 
443     llsize = int128_sub(llend, int128_make64(iova));
444 
445     if (s->shadow_data) {
446         const DMAMap *result;
447         DMAMap mem_region = {
448             .translated_addr = section->offset_within_address_space,
449             .size = int128_get64(llsize) - 1,
450         };
451 
452         result = vhost_iova_tree_find_gpa(s->iova_tree, &mem_region);
453         if (!result) {
454             /* The memory listener map wasn't mapped */
455             return;
456         }
457         iova = result->iova;
458         vhost_iova_tree_remove_gpa(s->iova_tree, *result);
459     }
460     vhost_vdpa_iotlb_batch_begin_once(s);
461     /*
462      * The unmap ioctl doesn't accept a full 64-bit. need to check it
463      */
464     if (int128_eq(llsize, int128_2_64())) {
465         llsize = int128_rshift(llsize, 1);
466         ret = vhost_vdpa_dma_unmap(s, VHOST_VDPA_GUEST_PA_ASID, iova,
467                                    int128_get64(llsize));
468 
469         if (ret) {
470             error_report("vhost_vdpa_dma_unmap(%p, 0x%" HWADDR_PRIx ", "
471                          "0x%" HWADDR_PRIx ") = %d (%m)",
472                          s, iova, int128_get64(llsize), ret);
473         }
474         iova += int128_get64(llsize);
475     }
476     ret = vhost_vdpa_dma_unmap(s, VHOST_VDPA_GUEST_PA_ASID, iova,
477                                int128_get64(llsize));
478 
479     if (ret) {
480         error_report("vhost_vdpa_dma_unmap(%p, 0x%" HWADDR_PRIx ", "
481                      "0x%" HWADDR_PRIx ") = %d (%m)",
482                      s, iova, int128_get64(llsize), ret);
483     }
484 
485     memory_region_unref(section->mr);
486 }
487 /*
488  * IOTLB API is used by vhost-vdpa which requires incremental updating
489  * of the mapping. So we can not use generic vhost memory listener which
490  * depends on the addnop().
491  */
492 static const MemoryListener vhost_vdpa_memory_listener = {
493     .name = "vhost-vdpa",
494     .commit = vhost_vdpa_listener_commit,
495     .region_add = vhost_vdpa_listener_region_add,
496     .region_del = vhost_vdpa_listener_region_del,
497 };
498 
499 static int vhost_vdpa_call(struct vhost_dev *dev, unsigned long int request,
500                              void *arg)
501 {
502     struct vhost_vdpa *v = dev->opaque;
503     int fd = v->shared->device_fd;
504     int ret;
505 
506     assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
507 
508     ret = ioctl(fd, request, arg);
509     return ret < 0 ? -errno : ret;
510 }
511 
512 static int vhost_vdpa_add_status(struct vhost_dev *dev, uint8_t status)
513 {
514     uint8_t s;
515     int ret;
516 
517     trace_vhost_vdpa_add_status(dev, status);
518     ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &s);
519     if (ret < 0) {
520         return ret;
521     }
522     if ((s & status) == status) {
523         /* Don't set bits already set */
524         return 0;
525     }
526 
527     s |= status;
528 
529     ret = vhost_vdpa_call(dev, VHOST_VDPA_SET_STATUS, &s);
530     if (ret < 0) {
531         return ret;
532     }
533 
534     ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &s);
535     if (ret < 0) {
536         return ret;
537     }
538 
539     if (!(s & status)) {
540         return -EIO;
541     }
542 
543     return 0;
544 }
545 
546 int vhost_vdpa_get_iova_range(int fd, struct vhost_vdpa_iova_range *iova_range)
547 {
548     int ret = ioctl(fd, VHOST_VDPA_GET_IOVA_RANGE, iova_range);
549 
550     return ret < 0 ? -errno : 0;
551 }
552 
553 /*
554  * The use of this function is for requests that only need to be
555  * applied once. Typically such request occurs at the beginning
556  * of operation, and before setting up queues. It should not be
557  * used for request that performs operation until all queues are
558  * set, which would need to check dev->vq_index_end instead.
559  */
560 static bool vhost_vdpa_first_dev(struct vhost_dev *dev)
561 {
562     struct vhost_vdpa *v = dev->opaque;
563 
564     return v->index == 0;
565 }
566 
567 static bool vhost_vdpa_last_dev(struct vhost_dev *dev)
568 {
569     return dev->vq_index + dev->nvqs == dev->vq_index_end;
570 }
571 
572 static int vhost_vdpa_get_dev_features(struct vhost_dev *dev,
573                                        uint64_t *features)
574 {
575     int ret;
576 
577     ret = vhost_vdpa_call(dev, VHOST_GET_FEATURES, features);
578     trace_vhost_vdpa_get_features(dev, *features);
579     return ret;
580 }
581 
582 static void vhost_vdpa_init_svq(struct vhost_dev *hdev, struct vhost_vdpa *v)
583 {
584     g_autoptr(GPtrArray) shadow_vqs = NULL;
585 
586     shadow_vqs = g_ptr_array_new_full(hdev->nvqs, vhost_svq_free);
587     for (unsigned n = 0; n < hdev->nvqs; ++n) {
588         VhostShadowVirtqueue *svq;
589 
590         svq = vhost_svq_new(v->shadow_vq_ops, v->shadow_vq_ops_opaque);
591         g_ptr_array_add(shadow_vqs, svq);
592     }
593 
594     v->shadow_vqs = g_steal_pointer(&shadow_vqs);
595 }
596 
597 static int vhost_vdpa_set_backend_cap(struct vhost_dev *dev)
598 {
599     struct vhost_vdpa *v = dev->opaque;
600 
601     uint64_t features;
602     uint64_t f = 0x1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2 |
603         0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH |
604         0x1ULL << VHOST_BACKEND_F_IOTLB_ASID |
605         0x1ULL << VHOST_BACKEND_F_SUSPEND;
606     int r;
607 
608     if (vhost_vdpa_call(dev, VHOST_GET_BACKEND_FEATURES, &features)) {
609         return -EFAULT;
610     }
611 
612     features &= f;
613 
614     if (vhost_vdpa_first_dev(dev)) {
615         r = vhost_vdpa_call(dev, VHOST_SET_BACKEND_FEATURES, &features);
616         if (r) {
617             return -EFAULT;
618         }
619     }
620 
621     dev->backend_cap = features;
622     v->shared->backend_cap = features;
623 
624     return 0;
625 }
626 
627 static int vhost_vdpa_init(struct vhost_dev *dev, void *opaque, Error **errp)
628 {
629     struct vhost_vdpa *v = opaque;
630     assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
631     trace_vhost_vdpa_init(dev, v->shared, opaque);
632     int ret;
633 
634     v->dev = dev;
635     dev->opaque =  opaque ;
636     v->shared->listener = vhost_vdpa_memory_listener;
637     vhost_vdpa_init_svq(dev, v);
638 
639     error_propagate(&dev->migration_blocker, v->migration_blocker);
640     if (!vhost_vdpa_first_dev(dev)) {
641         return 0;
642     }
643 
644     /*
645      * If dev->shadow_vqs_enabled at initialization that means the device has
646      * been started with x-svq=on, so don't block migration
647      */
648     if (dev->migration_blocker == NULL && !v->shadow_vqs_enabled) {
649         /* We don't have dev->features yet */
650         uint64_t features;
651         ret = vhost_vdpa_get_dev_features(dev, &features);
652         if (unlikely(ret)) {
653             error_setg_errno(errp, -ret, "Could not get device features");
654             return ret;
655         }
656         vhost_svq_valid_features(features, &dev->migration_blocker);
657     }
658 
659     /*
660      * Similar to VFIO, we end up pinning all guest memory and have to
661      * disable discarding of RAM.
662      */
663     ret = ram_block_discard_disable(true);
664     if (ret) {
665         error_report("Cannot set discarding of RAM broken");
666         return ret;
667     }
668 
669     vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE |
670                                VIRTIO_CONFIG_S_DRIVER);
671 
672     return 0;
673 }
674 
675 static void vhost_vdpa_host_notifier_uninit(struct vhost_dev *dev,
676                                             int queue_index)
677 {
678     size_t page_size = qemu_real_host_page_size();
679     struct vhost_vdpa *v = dev->opaque;
680     VirtIODevice *vdev = dev->vdev;
681     VhostVDPAHostNotifier *n;
682 
683     n = &v->notifier[queue_index];
684 
685     if (n->addr) {
686         virtio_queue_set_host_notifier_mr(vdev, queue_index, &n->mr, false);
687         object_unparent(OBJECT(&n->mr));
688         munmap(n->addr, page_size);
689         n->addr = NULL;
690     }
691 }
692 
693 static int vhost_vdpa_host_notifier_init(struct vhost_dev *dev, int queue_index)
694 {
695     size_t page_size = qemu_real_host_page_size();
696     struct vhost_vdpa *v = dev->opaque;
697     VirtIODevice *vdev = dev->vdev;
698     VhostVDPAHostNotifier *n;
699     int fd = v->shared->device_fd;
700     void *addr;
701     char *name;
702 
703     vhost_vdpa_host_notifier_uninit(dev, queue_index);
704 
705     n = &v->notifier[queue_index];
706 
707     addr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, fd,
708                 queue_index * page_size);
709     if (addr == MAP_FAILED) {
710         goto err;
711     }
712 
713     name = g_strdup_printf("vhost-vdpa/host-notifier@%p mmaps[%d]",
714                            v, queue_index);
715     memory_region_init_ram_device_ptr(&n->mr, OBJECT(vdev), name,
716                                       page_size, addr);
717     g_free(name);
718 
719     if (virtio_queue_set_host_notifier_mr(vdev, queue_index, &n->mr, true)) {
720         object_unparent(OBJECT(&n->mr));
721         munmap(addr, page_size);
722         goto err;
723     }
724     n->addr = addr;
725 
726     return 0;
727 
728 err:
729     return -1;
730 }
731 
732 static void vhost_vdpa_host_notifiers_uninit(struct vhost_dev *dev, int n)
733 {
734     int i;
735 
736     /*
737      * Pack all the changes to the memory regions in a single
738      * transaction to avoid a few updating of the address space
739      * topology.
740      */
741     memory_region_transaction_begin();
742 
743     for (i = dev->vq_index; i < dev->vq_index + n; i++) {
744         vhost_vdpa_host_notifier_uninit(dev, i);
745     }
746 
747     memory_region_transaction_commit();
748 }
749 
750 static void vhost_vdpa_host_notifiers_init(struct vhost_dev *dev)
751 {
752     struct vhost_vdpa *v = dev->opaque;
753     int i;
754 
755     if (v->shadow_vqs_enabled) {
756         /* FIXME SVQ is not compatible with host notifiers mr */
757         return;
758     }
759 
760     /*
761      * Pack all the changes to the memory regions in a single
762      * transaction to avoid a few updating of the address space
763      * topology.
764      */
765     memory_region_transaction_begin();
766 
767     for (i = dev->vq_index; i < dev->vq_index + dev->nvqs; i++) {
768         if (vhost_vdpa_host_notifier_init(dev, i)) {
769             vhost_vdpa_host_notifiers_uninit(dev, i - dev->vq_index);
770             break;
771         }
772     }
773 
774     memory_region_transaction_commit();
775 }
776 
777 static void vhost_vdpa_svq_cleanup(struct vhost_dev *dev)
778 {
779     struct vhost_vdpa *v = dev->opaque;
780     size_t idx;
781 
782     for (idx = 0; idx < v->shadow_vqs->len; ++idx) {
783         vhost_svq_stop(g_ptr_array_index(v->shadow_vqs, idx));
784     }
785     g_ptr_array_free(v->shadow_vqs, true);
786 }
787 
788 static int vhost_vdpa_cleanup(struct vhost_dev *dev)
789 {
790     struct vhost_vdpa *v;
791     assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
792     v = dev->opaque;
793     trace_vhost_vdpa_cleanup(dev, v);
794     if (vhost_vdpa_first_dev(dev)) {
795         ram_block_discard_disable(false);
796         memory_listener_unregister(&v->shared->listener);
797     }
798 
799     vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs);
800     vhost_vdpa_svq_cleanup(dev);
801 
802     dev->opaque = NULL;
803 
804     return 0;
805 }
806 
807 static int vhost_vdpa_memslots_limit(struct vhost_dev *dev)
808 {
809     trace_vhost_vdpa_memslots_limit(dev, INT_MAX);
810     return INT_MAX;
811 }
812 
813 static int vhost_vdpa_set_mem_table(struct vhost_dev *dev,
814                                     struct vhost_memory *mem)
815 {
816     if (!vhost_vdpa_first_dev(dev)) {
817         return 0;
818     }
819 
820     trace_vhost_vdpa_set_mem_table(dev, mem->nregions, mem->padding);
821     if (trace_event_get_state_backends(TRACE_VHOST_VDPA_SET_MEM_TABLE) &&
822         trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_REGIONS)) {
823         int i;
824         for (i = 0; i < mem->nregions; i++) {
825             trace_vhost_vdpa_dump_regions(dev, i,
826                                           mem->regions[i].guest_phys_addr,
827                                           mem->regions[i].memory_size,
828                                           mem->regions[i].userspace_addr,
829                                           mem->regions[i].flags_padding);
830         }
831     }
832     if (mem->padding) {
833         return -EINVAL;
834     }
835 
836     return 0;
837 }
838 
839 static int vhost_vdpa_set_features(struct vhost_dev *dev,
840                                    uint64_t features)
841 {
842     struct vhost_vdpa *v = dev->opaque;
843     int ret;
844 
845     if (!vhost_vdpa_first_dev(dev)) {
846         return 0;
847     }
848 
849     if (v->shadow_vqs_enabled) {
850         if ((v->acked_features ^ features) == BIT_ULL(VHOST_F_LOG_ALL)) {
851             /*
852              * QEMU is just trying to enable or disable logging. SVQ handles
853              * this sepparately, so no need to forward this.
854              */
855             v->acked_features = features;
856             return 0;
857         }
858 
859         v->acked_features = features;
860 
861         /* We must not ack _F_LOG if SVQ is enabled */
862         features &= ~BIT_ULL(VHOST_F_LOG_ALL);
863     }
864 
865     trace_vhost_vdpa_set_features(dev, features);
866     ret = vhost_vdpa_call(dev, VHOST_SET_FEATURES, &features);
867     if (ret) {
868         return ret;
869     }
870 
871     return vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_FEATURES_OK);
872 }
873 
874 static int vhost_vdpa_get_device_id(struct vhost_dev *dev,
875                                     uint32_t *device_id)
876 {
877     int ret;
878     ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_DEVICE_ID, device_id);
879     trace_vhost_vdpa_get_device_id(dev, *device_id);
880     return ret;
881 }
882 
883 static int vhost_vdpa_reset_device(struct vhost_dev *dev)
884 {
885     struct vhost_vdpa *v = dev->opaque;
886     int ret;
887     uint8_t status = 0;
888 
889     ret = vhost_vdpa_call(dev, VHOST_VDPA_SET_STATUS, &status);
890     trace_vhost_vdpa_reset_device(dev);
891     v->suspended = false;
892     return ret;
893 }
894 
895 static int vhost_vdpa_get_vq_index(struct vhost_dev *dev, int idx)
896 {
897     assert(idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs);
898 
899     trace_vhost_vdpa_get_vq_index(dev, idx, idx);
900     return idx;
901 }
902 
903 static int vhost_vdpa_set_vring_enable_one(struct vhost_vdpa *v, unsigned idx,
904                                            int enable)
905 {
906     struct vhost_dev *dev = v->dev;
907     struct vhost_vring_state state = {
908         .index = idx,
909         .num = enable,
910     };
911     int r = vhost_vdpa_call(dev, VHOST_VDPA_SET_VRING_ENABLE, &state);
912 
913     trace_vhost_vdpa_set_vring_enable_one(dev, idx, enable, r);
914     return r;
915 }
916 
917 static int vhost_vdpa_set_vring_enable(struct vhost_dev *dev, int enable)
918 {
919     struct vhost_vdpa *v = dev->opaque;
920     unsigned int i;
921     int ret;
922 
923     for (i = 0; i < dev->nvqs; ++i) {
924         ret = vhost_vdpa_set_vring_enable_one(v, i, enable);
925         if (ret < 0) {
926             return ret;
927         }
928     }
929 
930     return 0;
931 }
932 
933 int vhost_vdpa_set_vring_ready(struct vhost_vdpa *v, unsigned idx)
934 {
935     return vhost_vdpa_set_vring_enable_one(v, idx, 1);
936 }
937 
938 static int vhost_vdpa_set_config_call(struct vhost_dev *dev,
939                                        int fd)
940 {
941     trace_vhost_vdpa_set_config_call(dev, fd);
942     return vhost_vdpa_call(dev, VHOST_VDPA_SET_CONFIG_CALL, &fd);
943 }
944 
945 static void vhost_vdpa_dump_config(struct vhost_dev *dev, const uint8_t *config,
946                                    uint32_t config_len)
947 {
948     g_autoptr(GString) str = g_string_sized_new(4 * 16);
949     size_t b, len;
950 
951     for (b = 0; b < config_len; b += len) {
952         len = MIN(config_len - b, 16);
953 
954         g_string_truncate(str, 0);
955         qemu_hexdump_line(str, config + b, len, 1, 4);
956         trace_vhost_vdpa_dump_config(dev, b, str->str);
957     }
958 }
959 
960 static int vhost_vdpa_set_config(struct vhost_dev *dev, const uint8_t *data,
961                                    uint32_t offset, uint32_t size,
962                                    uint32_t flags)
963 {
964     struct vhost_vdpa_config *config;
965     int ret;
966     unsigned long config_size = offsetof(struct vhost_vdpa_config, buf);
967 
968     trace_vhost_vdpa_set_config(dev, offset, size, flags);
969     config = g_malloc(size + config_size);
970     config->off = offset;
971     config->len = size;
972     memcpy(config->buf, data, size);
973     if (trace_event_get_state_backends(TRACE_VHOST_VDPA_SET_CONFIG) &&
974         trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_CONFIG)) {
975         vhost_vdpa_dump_config(dev, data, size);
976     }
977     ret = vhost_vdpa_call(dev, VHOST_VDPA_SET_CONFIG, config);
978     g_free(config);
979     return ret;
980 }
981 
982 static int vhost_vdpa_get_config(struct vhost_dev *dev, uint8_t *config,
983                                    uint32_t config_len, Error **errp)
984 {
985     struct vhost_vdpa_config *v_config;
986     unsigned long config_size = offsetof(struct vhost_vdpa_config, buf);
987     int ret;
988 
989     trace_vhost_vdpa_get_config(dev, config, config_len);
990     v_config = g_malloc(config_len + config_size);
991     v_config->len = config_len;
992     v_config->off = 0;
993     ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_CONFIG, v_config);
994     memcpy(config, v_config->buf, config_len);
995     g_free(v_config);
996     if (trace_event_get_state_backends(TRACE_VHOST_VDPA_GET_CONFIG) &&
997         trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_CONFIG)) {
998         vhost_vdpa_dump_config(dev, config, config_len);
999     }
1000     return ret;
1001  }
1002 
1003 static int vhost_vdpa_set_dev_vring_base(struct vhost_dev *dev,
1004                                          struct vhost_vring_state *ring)
1005 {
1006     struct vhost_vdpa *v = dev->opaque;
1007 
1008     trace_vhost_vdpa_set_dev_vring_base(dev, ring->index, ring->num,
1009                                         v->shadow_vqs_enabled);
1010     return vhost_vdpa_call(dev, VHOST_SET_VRING_BASE, ring);
1011 }
1012 
1013 static int vhost_vdpa_set_vring_dev_kick(struct vhost_dev *dev,
1014                                          struct vhost_vring_file *file)
1015 {
1016     trace_vhost_vdpa_set_vring_kick(dev, file->index, file->fd);
1017     return vhost_vdpa_call(dev, VHOST_SET_VRING_KICK, file);
1018 }
1019 
1020 static int vhost_vdpa_set_vring_dev_call(struct vhost_dev *dev,
1021                                          struct vhost_vring_file *file)
1022 {
1023     trace_vhost_vdpa_set_vring_call(dev, file->index, file->fd);
1024     return vhost_vdpa_call(dev, VHOST_SET_VRING_CALL, file);
1025 }
1026 
1027 static int vhost_vdpa_set_vring_dev_addr(struct vhost_dev *dev,
1028                                          struct vhost_vring_addr *addr)
1029 {
1030     trace_vhost_vdpa_set_vring_addr(dev, addr->index, addr->flags,
1031                                 addr->desc_user_addr, addr->used_user_addr,
1032                                 addr->avail_user_addr,
1033                                 addr->log_guest_addr);
1034 
1035     return vhost_vdpa_call(dev, VHOST_SET_VRING_ADDR, addr);
1036 
1037 }
1038 
1039 /**
1040  * Set the shadow virtqueue descriptors to the device
1041  *
1042  * @dev: The vhost device model
1043  * @svq: The shadow virtqueue
1044  * @idx: The index of the virtqueue in the vhost device
1045  * @errp: Error
1046  *
1047  * Note that this function does not rewind kick file descriptor if cannot set
1048  * call one.
1049  */
1050 static int vhost_vdpa_svq_set_fds(struct vhost_dev *dev,
1051                                   VhostShadowVirtqueue *svq, unsigned idx,
1052                                   Error **errp)
1053 {
1054     struct vhost_vring_file file = {
1055         .index = dev->vq_index + idx,
1056     };
1057     const EventNotifier *event_notifier = &svq->hdev_kick;
1058     int r;
1059 
1060     r = event_notifier_init(&svq->hdev_kick, 0);
1061     if (r != 0) {
1062         error_setg_errno(errp, -r, "Couldn't create kick event notifier");
1063         goto err_init_hdev_kick;
1064     }
1065 
1066     r = event_notifier_init(&svq->hdev_call, 0);
1067     if (r != 0) {
1068         error_setg_errno(errp, -r, "Couldn't create call event notifier");
1069         goto err_init_hdev_call;
1070     }
1071 
1072     file.fd = event_notifier_get_fd(event_notifier);
1073     r = vhost_vdpa_set_vring_dev_kick(dev, &file);
1074     if (unlikely(r != 0)) {
1075         error_setg_errno(errp, -r, "Can't set device kick fd");
1076         goto err_init_set_dev_fd;
1077     }
1078 
1079     event_notifier = &svq->hdev_call;
1080     file.fd = event_notifier_get_fd(event_notifier);
1081     r = vhost_vdpa_set_vring_dev_call(dev, &file);
1082     if (unlikely(r != 0)) {
1083         error_setg_errno(errp, -r, "Can't set device call fd");
1084         goto err_init_set_dev_fd;
1085     }
1086 
1087     return 0;
1088 
1089 err_init_set_dev_fd:
1090     event_notifier_set_handler(&svq->hdev_call, NULL);
1091 
1092 err_init_hdev_call:
1093     event_notifier_cleanup(&svq->hdev_kick);
1094 
1095 err_init_hdev_kick:
1096     return r;
1097 }
1098 
1099 /**
1100  * Unmap a SVQ area in the device
1101  */
1102 static void vhost_vdpa_svq_unmap_ring(struct vhost_vdpa *v, hwaddr addr)
1103 {
1104     const DMAMap needle = {
1105         .translated_addr = addr,
1106     };
1107     const DMAMap *result = vhost_iova_tree_find_iova(v->shared->iova_tree,
1108                                                      &needle);
1109     hwaddr size;
1110     int r;
1111 
1112     if (unlikely(!result)) {
1113         error_report("Unable to find SVQ address to unmap");
1114         return;
1115     }
1116 
1117     size = ROUND_UP(result->size, qemu_real_host_page_size());
1118     r = vhost_vdpa_dma_unmap(v->shared, v->address_space_id, result->iova,
1119                              size);
1120     if (unlikely(r < 0)) {
1121         error_report("Unable to unmap SVQ vring: %s (%d)", g_strerror(-r), -r);
1122         return;
1123     }
1124 
1125     vhost_iova_tree_remove(v->shared->iova_tree, *result);
1126 }
1127 
1128 static void vhost_vdpa_svq_unmap_rings(struct vhost_dev *dev,
1129                                        const VhostShadowVirtqueue *svq)
1130 {
1131     struct vhost_vdpa *v = dev->opaque;
1132     struct vhost_vring_addr svq_addr;
1133 
1134     vhost_svq_get_vring_addr(svq, &svq_addr);
1135 
1136     vhost_vdpa_svq_unmap_ring(v, svq_addr.desc_user_addr);
1137 
1138     vhost_vdpa_svq_unmap_ring(v, svq_addr.used_user_addr);
1139 }
1140 
1141 /**
1142  * Map the SVQ area in the device
1143  *
1144  * @v: Vhost-vdpa device
1145  * @needle: The area to search iova
1146  * @taddr: The translated address (HVA)
1147  * @errorp: Error pointer
1148  */
1149 static bool vhost_vdpa_svq_map_ring(struct vhost_vdpa *v, DMAMap *needle,
1150                                     hwaddr taddr, Error **errp)
1151 {
1152     int r;
1153 
1154     r = vhost_iova_tree_map_alloc(v->shared->iova_tree, needle, taddr);
1155     if (unlikely(r != IOVA_OK)) {
1156         error_setg(errp, "Cannot allocate iova (%d)", r);
1157 
1158         if (needle->translated_addr == taddr) {
1159             error_append_hint(errp, "Insertion to IOVA->HVA tree failed");
1160             /* Remove the mapping from the IOVA-only tree */
1161             vhost_iova_tree_remove(v->shared->iova_tree, *needle);
1162         }
1163         return false;
1164     }
1165 
1166     r = vhost_vdpa_dma_map(v->shared, v->address_space_id, needle->iova,
1167                            needle->size + 1,
1168                            (void *)(uintptr_t)needle->translated_addr,
1169                            needle->perm == IOMMU_RO);
1170     if (unlikely(r != 0)) {
1171         error_setg_errno(errp, -r, "Cannot map region to device");
1172         vhost_iova_tree_remove(v->shared->iova_tree, *needle);
1173     }
1174 
1175     return r == 0;
1176 }
1177 
1178 /**
1179  * Map the shadow virtqueue rings in the device
1180  *
1181  * @dev: The vhost device
1182  * @svq: The shadow virtqueue
1183  * @addr: Assigned IOVA addresses
1184  * @errp: Error pointer
1185  */
1186 static bool vhost_vdpa_svq_map_rings(struct vhost_dev *dev,
1187                                      const VhostShadowVirtqueue *svq,
1188                                      struct vhost_vring_addr *addr,
1189                                      Error **errp)
1190 {
1191     ERRP_GUARD();
1192     DMAMap device_region, driver_region;
1193     struct vhost_vring_addr svq_addr;
1194     struct vhost_vdpa *v = dev->opaque;
1195     size_t device_size = vhost_svq_device_area_size(svq);
1196     size_t driver_size = vhost_svq_driver_area_size(svq);
1197     size_t avail_offset;
1198     bool ok;
1199 
1200     vhost_svq_get_vring_addr(svq, &svq_addr);
1201 
1202     driver_region = (DMAMap) {
1203         .size = driver_size - 1,
1204         .perm = IOMMU_RO,
1205     };
1206     ok = vhost_vdpa_svq_map_ring(v, &driver_region, svq_addr.desc_user_addr,
1207                                  errp);
1208     if (unlikely(!ok)) {
1209         error_prepend(errp, "Cannot create vq driver region: ");
1210         return false;
1211     }
1212     addr->desc_user_addr = driver_region.iova;
1213     avail_offset = svq_addr.avail_user_addr - svq_addr.desc_user_addr;
1214     addr->avail_user_addr = driver_region.iova + avail_offset;
1215 
1216     device_region = (DMAMap) {
1217         .size = device_size - 1,
1218         .perm = IOMMU_RW,
1219     };
1220     ok = vhost_vdpa_svq_map_ring(v, &device_region, svq_addr.used_user_addr,
1221                                  errp);
1222     if (unlikely(!ok)) {
1223         error_prepend(errp, "Cannot create vq device region: ");
1224         vhost_vdpa_svq_unmap_ring(v, driver_region.translated_addr);
1225     }
1226     addr->used_user_addr = device_region.iova;
1227 
1228     return ok;
1229 }
1230 
1231 static bool vhost_vdpa_svq_setup(struct vhost_dev *dev,
1232                                  VhostShadowVirtqueue *svq, unsigned idx,
1233                                  Error **errp)
1234 {
1235     uint16_t vq_index = dev->vq_index + idx;
1236     struct vhost_vring_state s = {
1237         .index = vq_index,
1238     };
1239     int r;
1240 
1241     r = vhost_vdpa_set_dev_vring_base(dev, &s);
1242     if (unlikely(r)) {
1243         error_setg_errno(errp, -r, "Cannot set vring base");
1244         return false;
1245     }
1246 
1247     r = vhost_vdpa_svq_set_fds(dev, svq, idx, errp);
1248     return r == 0;
1249 }
1250 
1251 static bool vhost_vdpa_svqs_start(struct vhost_dev *dev)
1252 {
1253     struct vhost_vdpa *v = dev->opaque;
1254     Error *err = NULL;
1255     unsigned i;
1256 
1257     if (!v->shadow_vqs_enabled) {
1258         return true;
1259     }
1260 
1261     for (i = 0; i < v->shadow_vqs->len; ++i) {
1262         VirtQueue *vq = virtio_get_queue(dev->vdev, dev->vq_index + i);
1263         VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, i);
1264         struct vhost_vring_addr addr = {
1265             .index = dev->vq_index + i,
1266         };
1267         int r;
1268         bool ok = vhost_vdpa_svq_setup(dev, svq, i, &err);
1269         if (unlikely(!ok)) {
1270             goto err;
1271         }
1272 
1273         vhost_svq_start(svq, dev->vdev, vq, v->shared->iova_tree);
1274         ok = vhost_vdpa_svq_map_rings(dev, svq, &addr, &err);
1275         if (unlikely(!ok)) {
1276             goto err_map;
1277         }
1278 
1279         /* Override vring GPA set by vhost subsystem */
1280         r = vhost_vdpa_set_vring_dev_addr(dev, &addr);
1281         if (unlikely(r != 0)) {
1282             error_setg_errno(&err, -r, "Cannot set device address");
1283             goto err_set_addr;
1284         }
1285     }
1286 
1287     return true;
1288 
1289 err_set_addr:
1290     vhost_vdpa_svq_unmap_rings(dev, g_ptr_array_index(v->shadow_vqs, i));
1291 
1292 err_map:
1293     vhost_svq_stop(g_ptr_array_index(v->shadow_vqs, i));
1294 
1295 err:
1296     error_reportf_err(err, "Cannot setup SVQ %u: ", i);
1297     for (unsigned j = 0; j < i; ++j) {
1298         VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, j);
1299         vhost_vdpa_svq_unmap_rings(dev, svq);
1300         vhost_svq_stop(svq);
1301     }
1302 
1303     return false;
1304 }
1305 
1306 static void vhost_vdpa_svqs_stop(struct vhost_dev *dev)
1307 {
1308     struct vhost_vdpa *v = dev->opaque;
1309 
1310     if (!v->shadow_vqs_enabled) {
1311         return;
1312     }
1313 
1314     for (unsigned i = 0; i < v->shadow_vqs->len; ++i) {
1315         VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, i);
1316 
1317         vhost_svq_stop(svq);
1318         vhost_vdpa_svq_unmap_rings(dev, svq);
1319 
1320         event_notifier_cleanup(&svq->hdev_kick);
1321         event_notifier_cleanup(&svq->hdev_call);
1322     }
1323 }
1324 
1325 static void vhost_vdpa_suspend(struct vhost_dev *dev)
1326 {
1327     struct vhost_vdpa *v = dev->opaque;
1328     int r;
1329 
1330     if (!vhost_vdpa_first_dev(dev)) {
1331         return;
1332     }
1333 
1334     if (dev->backend_cap & BIT_ULL(VHOST_BACKEND_F_SUSPEND)) {
1335         trace_vhost_vdpa_suspend(dev);
1336         r = ioctl(v->shared->device_fd, VHOST_VDPA_SUSPEND);
1337         if (unlikely(r)) {
1338             error_report("Cannot suspend: %s(%d)", g_strerror(errno), errno);
1339         } else {
1340             v->suspended = true;
1341             return;
1342         }
1343     }
1344 
1345     vhost_vdpa_reset_device(dev);
1346 }
1347 
1348 static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started)
1349 {
1350     struct vhost_vdpa *v = dev->opaque;
1351     bool ok;
1352     trace_vhost_vdpa_dev_start(dev, started);
1353 
1354     if (started) {
1355         vhost_vdpa_host_notifiers_init(dev);
1356         ok = vhost_vdpa_svqs_start(dev);
1357         if (unlikely(!ok)) {
1358             return -1;
1359         }
1360     } else {
1361         vhost_vdpa_suspend(dev);
1362         vhost_vdpa_svqs_stop(dev);
1363         vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs);
1364     }
1365 
1366     if (!vhost_vdpa_last_dev(dev)) {
1367         return 0;
1368     }
1369 
1370     if (started) {
1371         if (vhost_dev_has_iommu(dev) && (v->shadow_vqs_enabled)) {
1372             error_report("SVQ can not work while IOMMU enable, please disable"
1373                          "IOMMU and try again");
1374             return -1;
1375         }
1376         memory_listener_register(&v->shared->listener, dev->vdev->dma_as);
1377 
1378         return vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK);
1379     }
1380 
1381     return 0;
1382 }
1383 
1384 static void vhost_vdpa_reset_status(struct vhost_dev *dev)
1385 {
1386     struct vhost_vdpa *v = dev->opaque;
1387 
1388     if (!vhost_vdpa_last_dev(dev)) {
1389         return;
1390     }
1391 
1392     vhost_vdpa_reset_device(dev);
1393     vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE |
1394                                VIRTIO_CONFIG_S_DRIVER);
1395     memory_listener_unregister(&v->shared->listener);
1396 }
1397 
1398 static int vhost_vdpa_set_log_base(struct vhost_dev *dev, uint64_t base,
1399                                      struct vhost_log *log)
1400 {
1401     struct vhost_vdpa *v = dev->opaque;
1402     if (v->shadow_vqs_enabled || !vhost_vdpa_first_dev(dev)) {
1403         return 0;
1404     }
1405 
1406     trace_vhost_vdpa_set_log_base(dev, base, log->size, log->refcnt, log->fd,
1407                                   log->log);
1408     return vhost_vdpa_call(dev, VHOST_SET_LOG_BASE, &base);
1409 }
1410 
1411 static int vhost_vdpa_set_vring_addr(struct vhost_dev *dev,
1412                                        struct vhost_vring_addr *addr)
1413 {
1414     struct vhost_vdpa *v = dev->opaque;
1415 
1416     if (v->shadow_vqs_enabled) {
1417         /*
1418          * Device vring addr was set at device start. SVQ base is handled by
1419          * VirtQueue code.
1420          */
1421         return 0;
1422     }
1423 
1424     return vhost_vdpa_set_vring_dev_addr(dev, addr);
1425 }
1426 
1427 static int vhost_vdpa_set_vring_num(struct vhost_dev *dev,
1428                                       struct vhost_vring_state *ring)
1429 {
1430     trace_vhost_vdpa_set_vring_num(dev, ring->index, ring->num);
1431     return vhost_vdpa_call(dev, VHOST_SET_VRING_NUM, ring);
1432 }
1433 
1434 static int vhost_vdpa_set_vring_base(struct vhost_dev *dev,
1435                                        struct vhost_vring_state *ring)
1436 {
1437     struct vhost_vdpa *v = dev->opaque;
1438 
1439     if (v->shadow_vqs_enabled) {
1440         /*
1441          * Device vring base was set at device start. SVQ base is handled by
1442          * VirtQueue code.
1443          */
1444         return 0;
1445     }
1446 
1447     return vhost_vdpa_set_dev_vring_base(dev, ring);
1448 }
1449 
1450 static int vhost_vdpa_get_vring_base(struct vhost_dev *dev,
1451                                        struct vhost_vring_state *ring)
1452 {
1453     struct vhost_vdpa *v = dev->opaque;
1454     int ret;
1455 
1456     if (v->shadow_vqs_enabled) {
1457         ring->num = virtio_queue_get_last_avail_idx(dev->vdev, ring->index);
1458         trace_vhost_vdpa_get_vring_base(dev, ring->index, ring->num, true);
1459         return 0;
1460     }
1461 
1462     if (!v->suspended) {
1463         /*
1464          * Cannot trust in value returned by device, let vhost recover used
1465          * idx from guest.
1466          */
1467         return -1;
1468     }
1469 
1470     ret = vhost_vdpa_call(dev, VHOST_GET_VRING_BASE, ring);
1471     trace_vhost_vdpa_get_vring_base(dev, ring->index, ring->num, false);
1472     return ret;
1473 }
1474 
1475 static int vhost_vdpa_set_vring_kick(struct vhost_dev *dev,
1476                                        struct vhost_vring_file *file)
1477 {
1478     struct vhost_vdpa *v = dev->opaque;
1479     int vdpa_idx = file->index - dev->vq_index;
1480 
1481     if (v->shadow_vqs_enabled) {
1482         VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, vdpa_idx);
1483         vhost_svq_set_svq_kick_fd(svq, file->fd);
1484         return 0;
1485     } else {
1486         return vhost_vdpa_set_vring_dev_kick(dev, file);
1487     }
1488 }
1489 
1490 static int vhost_vdpa_set_vring_call(struct vhost_dev *dev,
1491                                        struct vhost_vring_file *file)
1492 {
1493     struct vhost_vdpa *v = dev->opaque;
1494     int vdpa_idx = file->index - dev->vq_index;
1495     VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, vdpa_idx);
1496 
1497     /* Remember last call fd because we can switch to SVQ anytime. */
1498     vhost_svq_set_svq_call_fd(svq, file->fd);
1499     /*
1500      * When SVQ is transitioning to off, shadow_vqs_enabled has
1501      * not been set back to false yet, but the underlying call fd
1502      * will have to switch back to the guest notifier to signal the
1503      * passthrough virtqueues. In other situations, SVQ's own call
1504      * fd shall be used to signal the device model.
1505      */
1506     if (v->shadow_vqs_enabled &&
1507         v->shared->svq_switching != SVQ_TSTATE_DISABLING) {
1508         return 0;
1509     }
1510 
1511     return vhost_vdpa_set_vring_dev_call(dev, file);
1512 }
1513 
1514 static int vhost_vdpa_get_features(struct vhost_dev *dev,
1515                                      uint64_t *features)
1516 {
1517     int ret = vhost_vdpa_get_dev_features(dev, features);
1518 
1519     if (ret == 0) {
1520         /* Add SVQ logging capabilities */
1521         *features |= BIT_ULL(VHOST_F_LOG_ALL);
1522     }
1523 
1524     return ret;
1525 }
1526 
1527 static int vhost_vdpa_set_owner(struct vhost_dev *dev)
1528 {
1529     if (!vhost_vdpa_first_dev(dev)) {
1530         return 0;
1531     }
1532 
1533     trace_vhost_vdpa_set_owner(dev);
1534     return vhost_vdpa_call(dev, VHOST_SET_OWNER, NULL);
1535 }
1536 
1537 static int vhost_vdpa_vq_get_addr(struct vhost_dev *dev,
1538                     struct vhost_vring_addr *addr, struct vhost_virtqueue *vq)
1539 {
1540     assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
1541     addr->desc_user_addr = (uint64_t)(unsigned long)vq->desc_phys;
1542     addr->avail_user_addr = (uint64_t)(unsigned long)vq->avail_phys;
1543     addr->used_user_addr = (uint64_t)(unsigned long)vq->used_phys;
1544     trace_vhost_vdpa_vq_get_addr(dev, vq, addr->desc_user_addr,
1545                                  addr->avail_user_addr, addr->used_user_addr);
1546     return 0;
1547 }
1548 
1549 static bool  vhost_vdpa_force_iommu(struct vhost_dev *dev)
1550 {
1551     return true;
1552 }
1553 
1554 const VhostOps vdpa_ops = {
1555         .backend_type = VHOST_BACKEND_TYPE_VDPA,
1556         .vhost_backend_init = vhost_vdpa_init,
1557         .vhost_backend_cleanup = vhost_vdpa_cleanup,
1558         .vhost_set_log_base = vhost_vdpa_set_log_base,
1559         .vhost_set_vring_addr = vhost_vdpa_set_vring_addr,
1560         .vhost_set_vring_num = vhost_vdpa_set_vring_num,
1561         .vhost_set_vring_base = vhost_vdpa_set_vring_base,
1562         .vhost_get_vring_base = vhost_vdpa_get_vring_base,
1563         .vhost_set_vring_kick = vhost_vdpa_set_vring_kick,
1564         .vhost_set_vring_call = vhost_vdpa_set_vring_call,
1565         .vhost_get_features = vhost_vdpa_get_features,
1566         .vhost_set_backend_cap = vhost_vdpa_set_backend_cap,
1567         .vhost_set_owner = vhost_vdpa_set_owner,
1568         .vhost_set_vring_endian = NULL,
1569         .vhost_backend_memslots_limit = vhost_vdpa_memslots_limit,
1570         .vhost_set_mem_table = vhost_vdpa_set_mem_table,
1571         .vhost_set_features = vhost_vdpa_set_features,
1572         .vhost_reset_device = vhost_vdpa_reset_device,
1573         .vhost_get_vq_index = vhost_vdpa_get_vq_index,
1574         .vhost_set_vring_enable = vhost_vdpa_set_vring_enable,
1575         .vhost_get_config  = vhost_vdpa_get_config,
1576         .vhost_set_config = vhost_vdpa_set_config,
1577         .vhost_requires_shm_log = NULL,
1578         .vhost_migration_done = NULL,
1579         .vhost_net_set_mtu = NULL,
1580         .vhost_set_iotlb_callback = NULL,
1581         .vhost_send_device_iotlb_msg = NULL,
1582         .vhost_dev_start = vhost_vdpa_dev_start,
1583         .vhost_get_device_id = vhost_vdpa_get_device_id,
1584         .vhost_vq_get_addr = vhost_vdpa_vq_get_addr,
1585         .vhost_force_iommu = vhost_vdpa_force_iommu,
1586         .vhost_set_config_call = vhost_vdpa_set_config_call,
1587         .vhost_reset_status = vhost_vdpa_reset_status,
1588 };
1589