xref: /qemu/hw/virtio/vhost-vdpa.c (revision 9c8ff2a1ed51b52ac64b80d35bdbd239b7b5d8e5)
1 /*
2  * vhost-vdpa
3  *
4  *  Copyright(c) 2017-2018 Intel Corporation.
5  *  Copyright(c) 2020 Red Hat, Inc.
6  *
7  * This work is licensed under the terms of the GNU GPL, version 2 or later.
8  * See the COPYING file in the top-level directory.
9  *
10  */
11 
12 #include "qemu/osdep.h"
13 #include <linux/vhost.h>
14 #include <linux/vfio.h>
15 #include <sys/eventfd.h>
16 #include <sys/ioctl.h>
17 #include "exec/target_page.h"
18 #include "hw/virtio/vhost.h"
19 #include "hw/virtio/vhost-backend.h"
20 #include "hw/virtio/virtio-net.h"
21 #include "hw/virtio/vhost-shadow-virtqueue.h"
22 #include "hw/virtio/vhost-vdpa.h"
23 #include "system/address-spaces.h"
24 #include "migration/blocker.h"
25 #include "qemu/cutils.h"
26 #include "qemu/main-loop.h"
27 #include "trace.h"
28 #include "qapi/error.h"
29 
30 /*
31  * Return one past the end of the end of section. Be careful with uint64_t
32  * conversions!
33  */
34 static Int128 vhost_vdpa_section_end(const MemoryRegionSection *section,
35                                      int page_mask)
36 {
37     Int128 llend = int128_make64(section->offset_within_address_space);
38     llend = int128_add(llend, section->size);
39     llend = int128_and(llend, int128_exts64(page_mask));
40 
41     return llend;
42 }
43 
44 static bool vhost_vdpa_listener_skipped_section(MemoryRegionSection *section,
45                                                 uint64_t iova_min,
46                                                 uint64_t iova_max,
47                                                 int page_mask)
48 {
49     Int128 llend;
50     bool is_ram = memory_region_is_ram(section->mr);
51     bool is_iommu = memory_region_is_iommu(section->mr);
52     bool is_protected = memory_region_is_protected(section->mr);
53 
54     /* vhost-vDPA doesn't allow MMIO to be mapped  */
55     bool is_ram_device = memory_region_is_ram_device(section->mr);
56 
57     if ((!is_ram && !is_iommu) || is_protected || is_ram_device) {
58         trace_vhost_vdpa_skipped_memory_section(is_ram, is_iommu, is_protected,
59                                                 is_ram_device, iova_min,
60                                                 iova_max, page_mask);
61         return true;
62     }
63 
64     if (section->offset_within_address_space < iova_min) {
65         error_report("RAM section out of device range (min=0x%" PRIx64
66                      ", addr=0x%" HWADDR_PRIx ")",
67                      iova_min, section->offset_within_address_space);
68         return true;
69     }
70     /*
71      * While using vIOMMU, sometimes the section will be larger than iova_max,
72      * but the memory that actually maps is smaller, so move the check to
73      * function vhost_vdpa_iommu_map_notify(). That function will use the actual
74      * size that maps to the kernel
75      */
76 
77     if (!is_iommu) {
78         llend = vhost_vdpa_section_end(section, page_mask);
79         if (int128_gt(llend, int128_make64(iova_max))) {
80             error_report("RAM section out of device range (max=0x%" PRIx64
81                          ", end addr=0x%" PRIx64 ")",
82                          iova_max, int128_get64(llend));
83             return true;
84         }
85     }
86 
87     return false;
88 }
89 
90 /*
91  * The caller must set asid = 0 if the device does not support asid.
92  * This is not an ABI break since it is set to 0 by the initializer anyway.
93  */
94 int vhost_vdpa_dma_map(VhostVDPAShared *s, uint32_t asid, hwaddr iova,
95                        hwaddr size, void *vaddr, bool readonly)
96 {
97     struct vhost_msg_v2 msg = {};
98     int fd = s->device_fd;
99     int ret = 0;
100 
101     msg.type = VHOST_IOTLB_MSG_V2;
102     msg.asid = asid;
103     msg.iotlb.iova = iova;
104     msg.iotlb.size = size;
105     msg.iotlb.uaddr = (uint64_t)(uintptr_t)vaddr;
106     msg.iotlb.perm = readonly ? VHOST_ACCESS_RO : VHOST_ACCESS_RW;
107     msg.iotlb.type = VHOST_IOTLB_UPDATE;
108 
109     trace_vhost_vdpa_dma_map(s, fd, msg.type, msg.asid, msg.iotlb.iova,
110                              msg.iotlb.size, msg.iotlb.uaddr, msg.iotlb.perm,
111                              msg.iotlb.type);
112 
113     if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
114         error_report("failed to write, fd=%d, errno=%d (%s)",
115             fd, errno, strerror(errno));
116         return -EIO ;
117     }
118 
119     return ret;
120 }
121 
122 /*
123  * The caller must set asid = 0 if the device does not support asid.
124  * This is not an ABI break since it is set to 0 by the initializer anyway.
125  */
126 int vhost_vdpa_dma_unmap(VhostVDPAShared *s, uint32_t asid, hwaddr iova,
127                          hwaddr size)
128 {
129     struct vhost_msg_v2 msg = {};
130     int fd = s->device_fd;
131     int ret = 0;
132 
133     msg.type = VHOST_IOTLB_MSG_V2;
134     msg.asid = asid;
135     msg.iotlb.iova = iova;
136     msg.iotlb.size = size;
137     msg.iotlb.type = VHOST_IOTLB_INVALIDATE;
138 
139     trace_vhost_vdpa_dma_unmap(s, fd, msg.type, msg.asid, msg.iotlb.iova,
140                                msg.iotlb.size, msg.iotlb.type);
141 
142     if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
143         error_report("failed to write, fd=%d, errno=%d (%s)",
144             fd, errno, strerror(errno));
145         return -EIO ;
146     }
147 
148     return ret;
149 }
150 
151 static void vhost_vdpa_listener_begin_batch(VhostVDPAShared *s)
152 {
153     int fd = s->device_fd;
154     struct vhost_msg_v2 msg = {
155         .type = VHOST_IOTLB_MSG_V2,
156         .iotlb.type = VHOST_IOTLB_BATCH_BEGIN,
157     };
158 
159     trace_vhost_vdpa_listener_begin_batch(s, fd, msg.type, msg.iotlb.type);
160     if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
161         error_report("failed to write, fd=%d, errno=%d (%s)",
162                      fd, errno, strerror(errno));
163     }
164 }
165 
166 static void vhost_vdpa_iotlb_batch_begin_once(VhostVDPAShared *s)
167 {
168     if (s->backend_cap & (0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH) &&
169         !s->iotlb_batch_begin_sent) {
170         vhost_vdpa_listener_begin_batch(s);
171     }
172 
173     s->iotlb_batch_begin_sent = true;
174 }
175 
176 static void vhost_vdpa_listener_commit(MemoryListener *listener)
177 {
178     VhostVDPAShared *s = container_of(listener, VhostVDPAShared, listener);
179     struct vhost_msg_v2 msg = {};
180     int fd = s->device_fd;
181 
182     if (!(s->backend_cap & (0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH))) {
183         return;
184     }
185 
186     if (!s->iotlb_batch_begin_sent) {
187         return;
188     }
189 
190     msg.type = VHOST_IOTLB_MSG_V2;
191     msg.iotlb.type = VHOST_IOTLB_BATCH_END;
192 
193     trace_vhost_vdpa_listener_commit(s, fd, msg.type, msg.iotlb.type);
194     if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
195         error_report("failed to write, fd=%d, errno=%d (%s)",
196                      fd, errno, strerror(errno));
197     }
198 
199     s->iotlb_batch_begin_sent = false;
200 }
201 
202 static void vhost_vdpa_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
203 {
204     struct vdpa_iommu *iommu = container_of(n, struct vdpa_iommu, n);
205 
206     hwaddr iova = iotlb->iova + iommu->iommu_offset;
207     VhostVDPAShared *s = iommu->dev_shared;
208     void *vaddr;
209     int ret;
210     Int128 llend;
211     Error *local_err = NULL;
212 
213     if (iotlb->target_as != &address_space_memory) {
214         error_report("Wrong target AS \"%s\", only system memory is allowed",
215                      iotlb->target_as->name ? iotlb->target_as->name : "none");
216         return;
217     }
218     RCU_READ_LOCK_GUARD();
219     /* check if RAM section out of device range */
220     llend = int128_add(int128_makes64(iotlb->addr_mask), int128_makes64(iova));
221     if (int128_gt(llend, int128_make64(s->iova_range.last))) {
222         error_report("RAM section out of device range (max=0x%" PRIx64
223                      ", end addr=0x%" PRIx64 ")",
224                      s->iova_range.last, int128_get64(llend));
225         return;
226     }
227 
228     if ((iotlb->perm & IOMMU_RW) != IOMMU_NONE) {
229         bool read_only;
230 
231         if (!memory_get_xlat_addr(iotlb, &vaddr, NULL, &read_only, NULL,
232                                   &local_err)) {
233             error_report_err(local_err);
234             return;
235         }
236         ret = vhost_vdpa_dma_map(s, VHOST_VDPA_GUEST_PA_ASID, iova,
237                                  iotlb->addr_mask + 1, vaddr, read_only);
238         if (ret) {
239             error_report("vhost_vdpa_dma_map(%p, 0x%" HWADDR_PRIx ", "
240                          "0x%" HWADDR_PRIx ", %p) = %d (%m)",
241                          s, iova, iotlb->addr_mask + 1, vaddr, ret);
242         }
243     } else {
244         ret = vhost_vdpa_dma_unmap(s, VHOST_VDPA_GUEST_PA_ASID, iova,
245                                    iotlb->addr_mask + 1);
246         if (ret) {
247             error_report("vhost_vdpa_dma_unmap(%p, 0x%" HWADDR_PRIx ", "
248                          "0x%" HWADDR_PRIx ") = %d (%m)",
249                          s, iova, iotlb->addr_mask + 1, ret);
250         }
251     }
252 }
253 
254 static void vhost_vdpa_iommu_region_add(MemoryListener *listener,
255                                         MemoryRegionSection *section)
256 {
257     VhostVDPAShared *s = container_of(listener, VhostVDPAShared, listener);
258 
259     struct vdpa_iommu *iommu;
260     Int128 end;
261     int iommu_idx;
262     IOMMUMemoryRegion *iommu_mr;
263     int ret;
264 
265     iommu_mr = IOMMU_MEMORY_REGION(section->mr);
266 
267     iommu = g_malloc0(sizeof(*iommu));
268     end = int128_add(int128_make64(section->offset_within_region),
269                      section->size);
270     end = int128_sub(end, int128_one());
271     iommu_idx = memory_region_iommu_attrs_to_index(iommu_mr,
272                                                    MEMTXATTRS_UNSPECIFIED);
273     iommu->iommu_mr = iommu_mr;
274     iommu_notifier_init(&iommu->n, vhost_vdpa_iommu_map_notify,
275                         IOMMU_NOTIFIER_IOTLB_EVENTS,
276                         section->offset_within_region,
277                         int128_get64(end),
278                         iommu_idx);
279     iommu->iommu_offset = section->offset_within_address_space -
280                           section->offset_within_region;
281     iommu->dev_shared = s;
282 
283     ret = memory_region_register_iommu_notifier(section->mr, &iommu->n, NULL);
284     if (ret) {
285         g_free(iommu);
286         return;
287     }
288 
289     QLIST_INSERT_HEAD(&s->iommu_list, iommu, iommu_next);
290     memory_region_iommu_replay(iommu->iommu_mr, &iommu->n);
291 }
292 
293 static void vhost_vdpa_iommu_region_del(MemoryListener *listener,
294                                         MemoryRegionSection *section)
295 {
296     VhostVDPAShared *s = container_of(listener, VhostVDPAShared, listener);
297 
298     struct vdpa_iommu *iommu;
299 
300     QLIST_FOREACH(iommu, &s->iommu_list, iommu_next)
301     {
302         if (MEMORY_REGION(iommu->iommu_mr) == section->mr &&
303             iommu->n.start == section->offset_within_region) {
304             memory_region_unregister_iommu_notifier(section->mr, &iommu->n);
305             QLIST_REMOVE(iommu, iommu_next);
306             g_free(iommu);
307             break;
308         }
309     }
310 }
311 
312 static void vhost_vdpa_listener_region_add(MemoryListener *listener,
313                                            MemoryRegionSection *section)
314 {
315     DMAMap mem_region = {};
316     VhostVDPAShared *s = container_of(listener, VhostVDPAShared, listener);
317     hwaddr iova;
318     Int128 llend, llsize;
319     void *vaddr;
320     int ret;
321     int page_size = qemu_target_page_size();
322     int page_mask = -page_size;
323 
324     if (vhost_vdpa_listener_skipped_section(section, s->iova_range.first,
325                                             s->iova_range.last, page_mask)) {
326         return;
327     }
328     if (memory_region_is_iommu(section->mr)) {
329         vhost_vdpa_iommu_region_add(listener, section);
330         return;
331     }
332 
333     if (unlikely((section->offset_within_address_space & ~page_mask) !=
334                  (section->offset_within_region & ~page_mask))) {
335         trace_vhost_vdpa_listener_region_add_unaligned(s, section->mr->name,
336                        section->offset_within_address_space & ~page_mask,
337                        section->offset_within_region & ~page_mask);
338         return;
339     }
340 
341     iova = ROUND_UP(section->offset_within_address_space, page_size);
342     llend = vhost_vdpa_section_end(section, page_mask);
343     if (int128_ge(int128_make64(iova), llend)) {
344         return;
345     }
346 
347     memory_region_ref(section->mr);
348 
349     /* Here we assume that memory_region_is_ram(section->mr)==true */
350 
351     vaddr = memory_region_get_ram_ptr(section->mr) +
352             section->offset_within_region +
353             (iova - section->offset_within_address_space);
354 
355     trace_vhost_vdpa_listener_region_add(s, iova, int128_get64(llend),
356                                          vaddr, section->readonly);
357 
358     llsize = int128_sub(llend, int128_make64(iova));
359     if (s->shadow_data) {
360         int r;
361         hwaddr gpa = section->offset_within_address_space;
362 
363         mem_region.size = int128_get64(llsize) - 1,
364         mem_region.perm = IOMMU_ACCESS_FLAG(true, section->readonly),
365 
366         r = vhost_iova_tree_map_alloc_gpa(s->iova_tree, &mem_region, gpa);
367         if (unlikely(r != IOVA_OK)) {
368             error_report("Can't allocate a mapping (%d)", r);
369 
370             if (mem_region.translated_addr == gpa) {
371                 error_report("Insertion to GPA->IOVA tree failed");
372                 /* Remove the mapping from the IOVA-only tree */
373                 goto fail_map;
374             }
375             goto fail;
376         }
377 
378         iova = mem_region.iova;
379     }
380 
381     vhost_vdpa_iotlb_batch_begin_once(s);
382     ret = vhost_vdpa_dma_map(s, VHOST_VDPA_GUEST_PA_ASID, iova,
383                              int128_get64(llsize), vaddr, section->readonly);
384     if (ret) {
385         error_report("vhost vdpa map fail!");
386         goto fail_map;
387     }
388 
389     return;
390 
391 fail_map:
392     if (s->shadow_data) {
393         vhost_iova_tree_remove_gpa(s->iova_tree, mem_region);
394     }
395 
396 fail:
397     /*
398      * On the initfn path, store the first error in the container so we
399      * can gracefully fail.  Runtime, there's not much we can do other
400      * than throw a hardware error.
401      */
402     error_report("vhost-vdpa: DMA mapping failed, unable to continue");
403     return;
404 
405 }
406 
407 static void vhost_vdpa_listener_region_del(MemoryListener *listener,
408                                            MemoryRegionSection *section)
409 {
410     VhostVDPAShared *s = container_of(listener, VhostVDPAShared, listener);
411     hwaddr iova;
412     Int128 llend, llsize;
413     int ret;
414     int page_size = qemu_target_page_size();
415     int page_mask = -page_size;
416 
417     if (vhost_vdpa_listener_skipped_section(section, s->iova_range.first,
418                                             s->iova_range.last, page_mask)) {
419         return;
420     }
421     if (memory_region_is_iommu(section->mr)) {
422         vhost_vdpa_iommu_region_del(listener, section);
423     }
424 
425     if (unlikely((section->offset_within_address_space & ~page_mask) !=
426                  (section->offset_within_region & ~page_mask))) {
427         trace_vhost_vdpa_listener_region_del_unaligned(s, section->mr->name,
428                        section->offset_within_address_space & ~page_mask,
429                        section->offset_within_region & ~page_mask);
430         return;
431     }
432 
433     iova = ROUND_UP(section->offset_within_address_space, page_size);
434     llend = vhost_vdpa_section_end(section, page_mask);
435 
436     trace_vhost_vdpa_listener_region_del(s, iova,
437         int128_get64(int128_sub(llend, int128_one())));
438 
439     if (int128_ge(int128_make64(iova), llend)) {
440         return;
441     }
442 
443     llsize = int128_sub(llend, int128_make64(iova));
444 
445     if (s->shadow_data) {
446         const DMAMap *result;
447         DMAMap mem_region = {
448             .translated_addr = section->offset_within_address_space,
449             .size = int128_get64(llsize) - 1,
450         };
451 
452         result = vhost_iova_tree_find_gpa(s->iova_tree, &mem_region);
453         if (!result) {
454             /* The memory listener map wasn't mapped */
455             return;
456         }
457         iova = result->iova;
458         vhost_iova_tree_remove_gpa(s->iova_tree, *result);
459     }
460     vhost_vdpa_iotlb_batch_begin_once(s);
461     /*
462      * The unmap ioctl doesn't accept a full 64-bit. need to check it
463      */
464     if (int128_eq(llsize, int128_2_64())) {
465         llsize = int128_rshift(llsize, 1);
466         ret = vhost_vdpa_dma_unmap(s, VHOST_VDPA_GUEST_PA_ASID, iova,
467                                    int128_get64(llsize));
468 
469         if (ret) {
470             error_report("vhost_vdpa_dma_unmap(%p, 0x%" HWADDR_PRIx ", "
471                          "0x%" HWADDR_PRIx ") = %d (%m)",
472                          s, iova, int128_get64(llsize), ret);
473         }
474         iova += int128_get64(llsize);
475     }
476     ret = vhost_vdpa_dma_unmap(s, VHOST_VDPA_GUEST_PA_ASID, iova,
477                                int128_get64(llsize));
478 
479     if (ret) {
480         error_report("vhost_vdpa_dma_unmap(%p, 0x%" HWADDR_PRIx ", "
481                      "0x%" HWADDR_PRIx ") = %d (%m)",
482                      s, iova, int128_get64(llsize), ret);
483     }
484 
485     memory_region_unref(section->mr);
486 }
487 /*
488  * IOTLB API is used by vhost-vdpa which requires incremental updating
489  * of the mapping. So we can not use generic vhost memory listener which
490  * depends on the addnop().
491  */
492 static const MemoryListener vhost_vdpa_memory_listener = {
493     .name = "vhost-vdpa",
494     .commit = vhost_vdpa_listener_commit,
495     .region_add = vhost_vdpa_listener_region_add,
496     .region_del = vhost_vdpa_listener_region_del,
497 };
498 
499 static int vhost_vdpa_call(struct vhost_dev *dev, unsigned long int request,
500                              void *arg)
501 {
502     struct vhost_vdpa *v = dev->opaque;
503     int fd = v->shared->device_fd;
504     int ret;
505 
506     assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
507 
508     ret = ioctl(fd, request, arg);
509     return ret < 0 ? -errno : ret;
510 }
511 
512 static int vhost_vdpa_add_status(struct vhost_dev *dev, uint8_t status)
513 {
514     uint8_t s;
515     int ret;
516 
517     trace_vhost_vdpa_add_status(dev, status);
518     ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &s);
519     if (ret < 0) {
520         return ret;
521     }
522     if ((s & status) == status) {
523         /* Don't set bits already set */
524         return 0;
525     }
526 
527     s |= status;
528 
529     ret = vhost_vdpa_call(dev, VHOST_VDPA_SET_STATUS, &s);
530     if (ret < 0) {
531         return ret;
532     }
533 
534     ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &s);
535     if (ret < 0) {
536         return ret;
537     }
538 
539     if (!(s & status)) {
540         return -EIO;
541     }
542 
543     return 0;
544 }
545 
546 int vhost_vdpa_get_iova_range(int fd, struct vhost_vdpa_iova_range *iova_range)
547 {
548     int ret = ioctl(fd, VHOST_VDPA_GET_IOVA_RANGE, iova_range);
549 
550     return ret < 0 ? -errno : 0;
551 }
552 
553 /*
554  * The use of this function is for requests that only need to be
555  * applied once. Typically such request occurs at the beginning
556  * of operation, and before setting up queues. It should not be
557  * used for request that performs operation until all queues are
558  * set, which would need to check dev->vq_index_end instead.
559  */
560 static bool vhost_vdpa_first_dev(struct vhost_dev *dev)
561 {
562     struct vhost_vdpa *v = dev->opaque;
563 
564     return v->index == 0;
565 }
566 
567 static bool vhost_vdpa_last_dev(struct vhost_dev *dev)
568 {
569     return dev->vq_index + dev->nvqs == dev->vq_index_end;
570 }
571 
572 static int vhost_vdpa_get_dev_features(struct vhost_dev *dev,
573                                        uint64_t *features)
574 {
575     int ret;
576 
577     ret = vhost_vdpa_call(dev, VHOST_GET_FEATURES, features);
578     trace_vhost_vdpa_get_features(dev, *features);
579     return ret;
580 }
581 
582 static void vhost_vdpa_init_svq(struct vhost_dev *hdev, struct vhost_vdpa *v)
583 {
584     g_autoptr(GPtrArray) shadow_vqs = NULL;
585 
586     shadow_vqs = g_ptr_array_new_full(hdev->nvqs, vhost_svq_free);
587     for (unsigned n = 0; n < hdev->nvqs; ++n) {
588         VhostShadowVirtqueue *svq;
589 
590         svq = vhost_svq_new(v->shadow_vq_ops, v->shadow_vq_ops_opaque);
591         g_ptr_array_add(shadow_vqs, svq);
592     }
593 
594     v->shadow_vqs = g_steal_pointer(&shadow_vqs);
595 }
596 
597 static int vhost_vdpa_set_backend_cap(struct vhost_dev *dev)
598 {
599     struct vhost_vdpa *v = dev->opaque;
600 
601     uint64_t features;
602     uint64_t f = 0x1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2 |
603         0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH |
604         0x1ULL << VHOST_BACKEND_F_IOTLB_ASID |
605         0x1ULL << VHOST_BACKEND_F_SUSPEND;
606     int r;
607 
608     if (vhost_vdpa_call(dev, VHOST_GET_BACKEND_FEATURES, &features)) {
609         return -EFAULT;
610     }
611 
612     features &= f;
613 
614     if (vhost_vdpa_first_dev(dev)) {
615         r = vhost_vdpa_call(dev, VHOST_SET_BACKEND_FEATURES, &features);
616         if (r) {
617             return -EFAULT;
618         }
619     }
620 
621     dev->backend_cap = features;
622     v->shared->backend_cap = features;
623 
624     return 0;
625 }
626 
627 static int vhost_vdpa_init(struct vhost_dev *dev, void *opaque, Error **errp)
628 {
629     struct vhost_vdpa *v = opaque;
630     assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
631     trace_vhost_vdpa_init(dev, v->shared, opaque);
632     int ret;
633 
634     v->dev = dev;
635     dev->opaque =  opaque ;
636 
637     ret = vhost_vdpa_set_backend_cap(dev);
638     if (unlikely(ret != 0)) {
639         return ret;
640     }
641 
642     vhost_vdpa_init_svq(dev, v);
643 
644     error_propagate(&dev->migration_blocker, v->migration_blocker);
645     if (!vhost_vdpa_first_dev(dev)) {
646         return 0;
647     }
648 
649     /*
650      * If dev->shadow_vqs_enabled at initialization that means the device has
651      * been started with x-svq=on, so don't block migration
652      */
653     if (dev->migration_blocker == NULL && !v->shadow_vqs_enabled) {
654         /* We don't have dev->features yet */
655         uint64_t features;
656         ret = vhost_vdpa_get_dev_features(dev, &features);
657         if (unlikely(ret)) {
658             error_setg_errno(errp, -ret, "Could not get device features");
659             return ret;
660         }
661         vhost_svq_valid_features(features, &dev->migration_blocker);
662     }
663 
664     /*
665      * Similar to VFIO, we end up pinning all guest memory and have to
666      * disable discarding of RAM.
667      */
668     ret = ram_block_discard_disable(true);
669     if (ret) {
670         error_report("Cannot set discarding of RAM broken");
671         return ret;
672     }
673 
674     vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE |
675                                VIRTIO_CONFIG_S_DRIVER);
676 
677     v->shared->listener = vhost_vdpa_memory_listener;
678     return 0;
679 }
680 
681 static void vhost_vdpa_host_notifier_uninit(struct vhost_dev *dev,
682                                             int queue_index)
683 {
684     size_t page_size = qemu_real_host_page_size();
685     struct vhost_vdpa *v = dev->opaque;
686     VirtIODevice *vdev = dev->vdev;
687     VhostVDPAHostNotifier *n;
688 
689     n = &v->notifier[queue_index];
690 
691     if (n->addr) {
692         virtio_queue_set_host_notifier_mr(vdev, queue_index, &n->mr, false);
693         object_unparent(OBJECT(&n->mr));
694         munmap(n->addr, page_size);
695         n->addr = NULL;
696     }
697 }
698 
699 static int vhost_vdpa_host_notifier_init(struct vhost_dev *dev, int queue_index)
700 {
701     size_t page_size = qemu_real_host_page_size();
702     struct vhost_vdpa *v = dev->opaque;
703     VirtIODevice *vdev = dev->vdev;
704     VhostVDPAHostNotifier *n;
705     int fd = v->shared->device_fd;
706     void *addr;
707     char *name;
708 
709     vhost_vdpa_host_notifier_uninit(dev, queue_index);
710 
711     n = &v->notifier[queue_index];
712 
713     addr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, fd,
714                 queue_index * page_size);
715     if (addr == MAP_FAILED) {
716         goto err;
717     }
718 
719     name = g_strdup_printf("vhost-vdpa/host-notifier@%p mmaps[%d]",
720                            v, queue_index);
721     memory_region_init_ram_device_ptr(&n->mr, OBJECT(vdev), name,
722                                       page_size, addr);
723     g_free(name);
724 
725     if (virtio_queue_set_host_notifier_mr(vdev, queue_index, &n->mr, true)) {
726         object_unparent(OBJECT(&n->mr));
727         munmap(addr, page_size);
728         goto err;
729     }
730     n->addr = addr;
731 
732     return 0;
733 
734 err:
735     return -1;
736 }
737 
738 static void vhost_vdpa_host_notifiers_uninit(struct vhost_dev *dev, int n)
739 {
740     int i;
741 
742     /*
743      * Pack all the changes to the memory regions in a single
744      * transaction to avoid a few updating of the address space
745      * topology.
746      */
747     memory_region_transaction_begin();
748 
749     for (i = dev->vq_index; i < dev->vq_index + n; i++) {
750         vhost_vdpa_host_notifier_uninit(dev, i);
751     }
752 
753     memory_region_transaction_commit();
754 }
755 
756 static void vhost_vdpa_host_notifiers_init(struct vhost_dev *dev)
757 {
758     struct vhost_vdpa *v = dev->opaque;
759     int i;
760 
761     if (v->shadow_vqs_enabled) {
762         /* FIXME SVQ is not compatible with host notifiers mr */
763         return;
764     }
765 
766     /*
767      * Pack all the changes to the memory regions in a single
768      * transaction to avoid a few updating of the address space
769      * topology.
770      */
771     memory_region_transaction_begin();
772 
773     for (i = dev->vq_index; i < dev->vq_index + dev->nvqs; i++) {
774         if (vhost_vdpa_host_notifier_init(dev, i)) {
775             vhost_vdpa_host_notifiers_uninit(dev, i - dev->vq_index);
776             break;
777         }
778     }
779 
780     memory_region_transaction_commit();
781 }
782 
783 static void vhost_vdpa_svq_cleanup(struct vhost_dev *dev)
784 {
785     struct vhost_vdpa *v = dev->opaque;
786     size_t idx;
787 
788     for (idx = 0; idx < v->shadow_vqs->len; ++idx) {
789         vhost_svq_stop(g_ptr_array_index(v->shadow_vqs, idx));
790     }
791     g_ptr_array_free(v->shadow_vqs, true);
792 }
793 
794 static int vhost_vdpa_cleanup(struct vhost_dev *dev)
795 {
796     struct vhost_vdpa *v;
797     assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
798     v = dev->opaque;
799     trace_vhost_vdpa_cleanup(dev, v);
800     if (vhost_vdpa_first_dev(dev)) {
801         ram_block_discard_disable(false);
802         memory_listener_unregister(&v->shared->listener);
803     }
804 
805     vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs);
806     vhost_vdpa_svq_cleanup(dev);
807 
808     dev->opaque = NULL;
809 
810     return 0;
811 }
812 
813 static int vhost_vdpa_memslots_limit(struct vhost_dev *dev)
814 {
815     trace_vhost_vdpa_memslots_limit(dev, INT_MAX);
816     return INT_MAX;
817 }
818 
819 static int vhost_vdpa_set_mem_table(struct vhost_dev *dev,
820                                     struct vhost_memory *mem)
821 {
822     if (!vhost_vdpa_first_dev(dev)) {
823         return 0;
824     }
825 
826     trace_vhost_vdpa_set_mem_table(dev, mem->nregions, mem->padding);
827     if (trace_event_get_state_backends(TRACE_VHOST_VDPA_SET_MEM_TABLE) &&
828         trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_REGIONS)) {
829         int i;
830         for (i = 0; i < mem->nregions; i++) {
831             trace_vhost_vdpa_dump_regions(dev, i,
832                                           mem->regions[i].guest_phys_addr,
833                                           mem->regions[i].memory_size,
834                                           mem->regions[i].userspace_addr,
835                                           mem->regions[i].flags_padding);
836         }
837     }
838     if (mem->padding) {
839         return -EINVAL;
840     }
841 
842     return 0;
843 }
844 
845 static int vhost_vdpa_set_features(struct vhost_dev *dev,
846                                    uint64_t features)
847 {
848     struct vhost_vdpa *v = dev->opaque;
849     int ret;
850 
851     if (!vhost_vdpa_first_dev(dev)) {
852         return 0;
853     }
854 
855     if (v->shadow_vqs_enabled) {
856         if ((v->acked_features ^ features) == BIT_ULL(VHOST_F_LOG_ALL)) {
857             /*
858              * QEMU is just trying to enable or disable logging. SVQ handles
859              * this sepparately, so no need to forward this.
860              */
861             v->acked_features = features;
862             return 0;
863         }
864 
865         v->acked_features = features;
866 
867         /* We must not ack _F_LOG if SVQ is enabled */
868         features &= ~BIT_ULL(VHOST_F_LOG_ALL);
869     }
870 
871     trace_vhost_vdpa_set_features(dev, features);
872     ret = vhost_vdpa_call(dev, VHOST_SET_FEATURES, &features);
873     if (ret) {
874         return ret;
875     }
876 
877     return vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_FEATURES_OK);
878 }
879 
880 static int vhost_vdpa_get_device_id(struct vhost_dev *dev,
881                                     uint32_t *device_id)
882 {
883     int ret;
884     ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_DEVICE_ID, device_id);
885     trace_vhost_vdpa_get_device_id(dev, *device_id);
886     return ret;
887 }
888 
889 static int vhost_vdpa_reset_device(struct vhost_dev *dev)
890 {
891     struct vhost_vdpa *v = dev->opaque;
892     int ret;
893     uint8_t status = 0;
894 
895     ret = vhost_vdpa_call(dev, VHOST_VDPA_SET_STATUS, &status);
896     trace_vhost_vdpa_reset_device(dev);
897     if (ret) {
898         return ret;
899     }
900 
901     memory_listener_unregister(&v->shared->listener);
902     v->shared->listener_registered = false;
903     v->suspended = false;
904     return 0;
905 }
906 
907 static int vhost_vdpa_get_vq_index(struct vhost_dev *dev, int idx)
908 {
909     assert(idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs);
910 
911     trace_vhost_vdpa_get_vq_index(dev, idx, idx);
912     return idx;
913 }
914 
915 static int vhost_vdpa_set_vring_enable_one(struct vhost_vdpa *v, unsigned idx,
916                                            int enable)
917 {
918     struct vhost_dev *dev = v->dev;
919     struct vhost_vring_state state = {
920         .index = idx,
921         .num = enable,
922     };
923     int r = vhost_vdpa_call(dev, VHOST_VDPA_SET_VRING_ENABLE, &state);
924 
925     trace_vhost_vdpa_set_vring_enable_one(dev, idx, enable, r);
926     return r;
927 }
928 
929 static int vhost_vdpa_set_vring_enable(struct vhost_dev *dev, int enable)
930 {
931     struct vhost_vdpa *v = dev->opaque;
932     unsigned int i;
933     int ret;
934 
935     for (i = 0; i < dev->nvqs; ++i) {
936         ret = vhost_vdpa_set_vring_enable_one(v, i, enable);
937         if (ret < 0) {
938             return ret;
939         }
940     }
941 
942     return 0;
943 }
944 
945 int vhost_vdpa_set_vring_ready(struct vhost_vdpa *v, unsigned idx)
946 {
947     return vhost_vdpa_set_vring_enable_one(v, idx, 1);
948 }
949 
950 static int vhost_vdpa_set_config_call(struct vhost_dev *dev,
951                                        int fd)
952 {
953     trace_vhost_vdpa_set_config_call(dev, fd);
954     return vhost_vdpa_call(dev, VHOST_VDPA_SET_CONFIG_CALL, &fd);
955 }
956 
957 static void vhost_vdpa_dump_config(struct vhost_dev *dev, const uint8_t *config,
958                                    uint32_t config_len)
959 {
960     g_autoptr(GString) str = g_string_sized_new(4 * 16);
961     size_t b, len;
962 
963     for (b = 0; b < config_len; b += len) {
964         len = MIN(config_len - b, 16);
965 
966         g_string_truncate(str, 0);
967         qemu_hexdump_line(str, config + b, len, 1, 4);
968         trace_vhost_vdpa_dump_config(dev, b, str->str);
969     }
970 }
971 
972 static int vhost_vdpa_set_config(struct vhost_dev *dev, const uint8_t *data,
973                                    uint32_t offset, uint32_t size,
974                                    uint32_t flags)
975 {
976     struct vhost_vdpa_config *config;
977     int ret;
978     unsigned long config_size = offsetof(struct vhost_vdpa_config, buf);
979 
980     trace_vhost_vdpa_set_config(dev, offset, size, flags);
981     config = g_malloc(size + config_size);
982     config->off = offset;
983     config->len = size;
984     memcpy(config->buf, data, size);
985     if (trace_event_get_state_backends(TRACE_VHOST_VDPA_SET_CONFIG) &&
986         trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_CONFIG)) {
987         vhost_vdpa_dump_config(dev, data, size);
988     }
989     ret = vhost_vdpa_call(dev, VHOST_VDPA_SET_CONFIG, config);
990     g_free(config);
991     return ret;
992 }
993 
994 static int vhost_vdpa_get_config(struct vhost_dev *dev, uint8_t *config,
995                                    uint32_t config_len, Error **errp)
996 {
997     struct vhost_vdpa_config *v_config;
998     unsigned long config_size = offsetof(struct vhost_vdpa_config, buf);
999     int ret;
1000 
1001     trace_vhost_vdpa_get_config(dev, config, config_len);
1002     v_config = g_malloc(config_len + config_size);
1003     v_config->len = config_len;
1004     v_config->off = 0;
1005     ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_CONFIG, v_config);
1006     memcpy(config, v_config->buf, config_len);
1007     g_free(v_config);
1008     if (trace_event_get_state_backends(TRACE_VHOST_VDPA_GET_CONFIG) &&
1009         trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_CONFIG)) {
1010         vhost_vdpa_dump_config(dev, config, config_len);
1011     }
1012     return ret;
1013  }
1014 
1015 static int vhost_vdpa_set_dev_vring_base(struct vhost_dev *dev,
1016                                          struct vhost_vring_state *ring)
1017 {
1018     struct vhost_vdpa *v = dev->opaque;
1019 
1020     trace_vhost_vdpa_set_dev_vring_base(dev, ring->index, ring->num,
1021                                         v->shadow_vqs_enabled);
1022     return vhost_vdpa_call(dev, VHOST_SET_VRING_BASE, ring);
1023 }
1024 
1025 static int vhost_vdpa_set_vring_dev_kick(struct vhost_dev *dev,
1026                                          struct vhost_vring_file *file)
1027 {
1028     trace_vhost_vdpa_set_vring_kick(dev, file->index, file->fd);
1029     return vhost_vdpa_call(dev, VHOST_SET_VRING_KICK, file);
1030 }
1031 
1032 static int vhost_vdpa_set_vring_dev_call(struct vhost_dev *dev,
1033                                          struct vhost_vring_file *file)
1034 {
1035     trace_vhost_vdpa_set_vring_call(dev, file->index, file->fd);
1036     return vhost_vdpa_call(dev, VHOST_SET_VRING_CALL, file);
1037 }
1038 
1039 static int vhost_vdpa_set_vring_dev_addr(struct vhost_dev *dev,
1040                                          struct vhost_vring_addr *addr)
1041 {
1042     trace_vhost_vdpa_set_vring_addr(dev, addr->index, addr->flags,
1043                                 addr->desc_user_addr, addr->used_user_addr,
1044                                 addr->avail_user_addr,
1045                                 addr->log_guest_addr);
1046 
1047     return vhost_vdpa_call(dev, VHOST_SET_VRING_ADDR, addr);
1048 
1049 }
1050 
1051 /**
1052  * Set the shadow virtqueue descriptors to the device
1053  *
1054  * @dev: The vhost device model
1055  * @svq: The shadow virtqueue
1056  * @idx: The index of the virtqueue in the vhost device
1057  * @errp: Error
1058  *
1059  * Note that this function does not rewind kick file descriptor if cannot set
1060  * call one.
1061  */
1062 static int vhost_vdpa_svq_set_fds(struct vhost_dev *dev,
1063                                   VhostShadowVirtqueue *svq, unsigned idx,
1064                                   Error **errp)
1065 {
1066     struct vhost_vring_file file = {
1067         .index = dev->vq_index + idx,
1068     };
1069     const EventNotifier *event_notifier = &svq->hdev_kick;
1070     int r;
1071 
1072     r = event_notifier_init(&svq->hdev_kick, 0);
1073     if (r != 0) {
1074         error_setg_errno(errp, -r, "Couldn't create kick event notifier");
1075         goto err_init_hdev_kick;
1076     }
1077 
1078     r = event_notifier_init(&svq->hdev_call, 0);
1079     if (r != 0) {
1080         error_setg_errno(errp, -r, "Couldn't create call event notifier");
1081         goto err_init_hdev_call;
1082     }
1083 
1084     file.fd = event_notifier_get_fd(event_notifier);
1085     r = vhost_vdpa_set_vring_dev_kick(dev, &file);
1086     if (unlikely(r != 0)) {
1087         error_setg_errno(errp, -r, "Can't set device kick fd");
1088         goto err_init_set_dev_fd;
1089     }
1090 
1091     event_notifier = &svq->hdev_call;
1092     file.fd = event_notifier_get_fd(event_notifier);
1093     r = vhost_vdpa_set_vring_dev_call(dev, &file);
1094     if (unlikely(r != 0)) {
1095         error_setg_errno(errp, -r, "Can't set device call fd");
1096         goto err_init_set_dev_fd;
1097     }
1098 
1099     return 0;
1100 
1101 err_init_set_dev_fd:
1102     event_notifier_set_handler(&svq->hdev_call, NULL);
1103 
1104 err_init_hdev_call:
1105     event_notifier_cleanup(&svq->hdev_kick);
1106 
1107 err_init_hdev_kick:
1108     return r;
1109 }
1110 
1111 /**
1112  * Unmap a SVQ area in the device
1113  */
1114 static void vhost_vdpa_svq_unmap_ring(struct vhost_vdpa *v, hwaddr addr)
1115 {
1116     const DMAMap needle = {
1117         .translated_addr = addr,
1118     };
1119     const DMAMap *result = vhost_iova_tree_find_iova(v->shared->iova_tree,
1120                                                      &needle);
1121     hwaddr size;
1122     int r;
1123 
1124     if (unlikely(!result)) {
1125         error_report("Unable to find SVQ address to unmap");
1126         return;
1127     }
1128 
1129     size = ROUND_UP(result->size, qemu_real_host_page_size());
1130     r = vhost_vdpa_dma_unmap(v->shared, v->address_space_id, result->iova,
1131                              size);
1132     if (unlikely(r < 0)) {
1133         error_report("Unable to unmap SVQ vring: %s (%d)", g_strerror(-r), -r);
1134         return;
1135     }
1136 
1137     vhost_iova_tree_remove(v->shared->iova_tree, *result);
1138 }
1139 
1140 static void vhost_vdpa_svq_unmap_rings(struct vhost_dev *dev,
1141                                        const VhostShadowVirtqueue *svq)
1142 {
1143     struct vhost_vdpa *v = dev->opaque;
1144     struct vhost_vring_addr svq_addr;
1145 
1146     vhost_svq_get_vring_addr(svq, &svq_addr);
1147 
1148     vhost_vdpa_svq_unmap_ring(v, svq_addr.desc_user_addr);
1149 
1150     vhost_vdpa_svq_unmap_ring(v, svq_addr.used_user_addr);
1151 }
1152 
1153 /**
1154  * Map the SVQ area in the device
1155  *
1156  * @v: Vhost-vdpa device
1157  * @needle: The area to search iova
1158  * @taddr: The translated address (HVA)
1159  * @errorp: Error pointer
1160  */
1161 static bool vhost_vdpa_svq_map_ring(struct vhost_vdpa *v, DMAMap *needle,
1162                                     hwaddr taddr, Error **errp)
1163 {
1164     int r;
1165 
1166     r = vhost_iova_tree_map_alloc(v->shared->iova_tree, needle, taddr);
1167     if (unlikely(r != IOVA_OK)) {
1168         error_setg(errp, "Cannot allocate iova (%d)", r);
1169 
1170         if (needle->translated_addr == taddr) {
1171             error_append_hint(errp, "Insertion to IOVA->HVA tree failed");
1172             /* Remove the mapping from the IOVA-only tree */
1173             vhost_iova_tree_remove(v->shared->iova_tree, *needle);
1174         }
1175         return false;
1176     }
1177 
1178     r = vhost_vdpa_dma_map(v->shared, v->address_space_id, needle->iova,
1179                            needle->size + 1,
1180                            (void *)(uintptr_t)needle->translated_addr,
1181                            needle->perm == IOMMU_RO);
1182     if (unlikely(r != 0)) {
1183         error_setg_errno(errp, -r, "Cannot map region to device");
1184         vhost_iova_tree_remove(v->shared->iova_tree, *needle);
1185     }
1186 
1187     return r == 0;
1188 }
1189 
1190 /**
1191  * Map the shadow virtqueue rings in the device
1192  *
1193  * @dev: The vhost device
1194  * @svq: The shadow virtqueue
1195  * @addr: Assigned IOVA addresses
1196  * @errp: Error pointer
1197  */
1198 static bool vhost_vdpa_svq_map_rings(struct vhost_dev *dev,
1199                                      const VhostShadowVirtqueue *svq,
1200                                      struct vhost_vring_addr *addr,
1201                                      Error **errp)
1202 {
1203     ERRP_GUARD();
1204     DMAMap device_region, driver_region;
1205     struct vhost_vring_addr svq_addr;
1206     struct vhost_vdpa *v = dev->opaque;
1207     size_t device_size = vhost_svq_device_area_size(svq);
1208     size_t driver_size = vhost_svq_driver_area_size(svq);
1209     size_t avail_offset;
1210     bool ok;
1211 
1212     vhost_svq_get_vring_addr(svq, &svq_addr);
1213 
1214     driver_region = (DMAMap) {
1215         .size = driver_size - 1,
1216         .perm = IOMMU_RO,
1217     };
1218     ok = vhost_vdpa_svq_map_ring(v, &driver_region, svq_addr.desc_user_addr,
1219                                  errp);
1220     if (unlikely(!ok)) {
1221         error_prepend(errp, "Cannot create vq driver region: ");
1222         return false;
1223     }
1224     addr->desc_user_addr = driver_region.iova;
1225     avail_offset = svq_addr.avail_user_addr - svq_addr.desc_user_addr;
1226     addr->avail_user_addr = driver_region.iova + avail_offset;
1227 
1228     device_region = (DMAMap) {
1229         .size = device_size - 1,
1230         .perm = IOMMU_RW,
1231     };
1232     ok = vhost_vdpa_svq_map_ring(v, &device_region, svq_addr.used_user_addr,
1233                                  errp);
1234     if (unlikely(!ok)) {
1235         error_prepend(errp, "Cannot create vq device region: ");
1236         vhost_vdpa_svq_unmap_ring(v, driver_region.translated_addr);
1237     }
1238     addr->used_user_addr = device_region.iova;
1239 
1240     return ok;
1241 }
1242 
1243 static bool vhost_vdpa_svq_setup(struct vhost_dev *dev,
1244                                  VhostShadowVirtqueue *svq, unsigned idx,
1245                                  Error **errp)
1246 {
1247     uint16_t vq_index = dev->vq_index + idx;
1248     struct vhost_vring_state s = {
1249         .index = vq_index,
1250     };
1251     int r;
1252 
1253     r = vhost_vdpa_set_dev_vring_base(dev, &s);
1254     if (unlikely(r)) {
1255         error_setg_errno(errp, -r, "Cannot set vring base");
1256         return false;
1257     }
1258 
1259     r = vhost_vdpa_svq_set_fds(dev, svq, idx, errp);
1260     return r == 0;
1261 }
1262 
1263 static bool vhost_vdpa_svqs_start(struct vhost_dev *dev)
1264 {
1265     struct vhost_vdpa *v = dev->opaque;
1266     Error *err = NULL;
1267     unsigned i;
1268 
1269     if (!v->shadow_vqs_enabled) {
1270         return true;
1271     }
1272 
1273     for (i = 0; i < v->shadow_vqs->len; ++i) {
1274         VirtQueue *vq = virtio_get_queue(dev->vdev, dev->vq_index + i);
1275         VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, i);
1276         struct vhost_vring_addr addr = {
1277             .index = dev->vq_index + i,
1278         };
1279         int r;
1280         bool ok = vhost_vdpa_svq_setup(dev, svq, i, &err);
1281         if (unlikely(!ok)) {
1282             goto err;
1283         }
1284 
1285         vhost_svq_start(svq, dev->vdev, vq, v->shared->iova_tree);
1286         ok = vhost_vdpa_svq_map_rings(dev, svq, &addr, &err);
1287         if (unlikely(!ok)) {
1288             goto err_map;
1289         }
1290 
1291         /* Override vring GPA set by vhost subsystem */
1292         r = vhost_vdpa_set_vring_dev_addr(dev, &addr);
1293         if (unlikely(r != 0)) {
1294             error_setg_errno(&err, -r, "Cannot set device address");
1295             goto err_set_addr;
1296         }
1297     }
1298 
1299     return true;
1300 
1301 err_set_addr:
1302     vhost_vdpa_svq_unmap_rings(dev, g_ptr_array_index(v->shadow_vqs, i));
1303 
1304 err_map:
1305     vhost_svq_stop(g_ptr_array_index(v->shadow_vqs, i));
1306 
1307 err:
1308     error_reportf_err(err, "Cannot setup SVQ %u: ", i);
1309     for (unsigned j = 0; j < i; ++j) {
1310         VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, j);
1311         vhost_vdpa_svq_unmap_rings(dev, svq);
1312         vhost_svq_stop(svq);
1313     }
1314 
1315     return false;
1316 }
1317 
1318 static void vhost_vdpa_svqs_stop(struct vhost_dev *dev)
1319 {
1320     struct vhost_vdpa *v = dev->opaque;
1321 
1322     if (!v->shadow_vqs_enabled) {
1323         return;
1324     }
1325 
1326     for (unsigned i = 0; i < v->shadow_vqs->len; ++i) {
1327         VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, i);
1328 
1329         vhost_svq_stop(svq);
1330         vhost_vdpa_svq_unmap_rings(dev, svq);
1331 
1332         event_notifier_cleanup(&svq->hdev_kick);
1333         event_notifier_cleanup(&svq->hdev_call);
1334     }
1335 }
1336 
1337 static void vhost_vdpa_suspend(struct vhost_dev *dev)
1338 {
1339     struct vhost_vdpa *v = dev->opaque;
1340     int r;
1341 
1342     if (!vhost_vdpa_first_dev(dev)) {
1343         return;
1344     }
1345 
1346     if (dev->backend_cap & BIT_ULL(VHOST_BACKEND_F_SUSPEND)) {
1347         trace_vhost_vdpa_suspend(dev);
1348         r = ioctl(v->shared->device_fd, VHOST_VDPA_SUSPEND);
1349         if (unlikely(r)) {
1350             error_report("Cannot suspend: %s(%d)", g_strerror(errno), errno);
1351         } else {
1352             v->suspended = true;
1353             return;
1354         }
1355     }
1356 
1357     vhost_vdpa_reset_device(dev);
1358 }
1359 
1360 static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started)
1361 {
1362     struct vhost_vdpa *v = dev->opaque;
1363     bool ok;
1364     trace_vhost_vdpa_dev_start(dev, started);
1365 
1366     if (started) {
1367         vhost_vdpa_host_notifiers_init(dev);
1368         ok = vhost_vdpa_svqs_start(dev);
1369         if (unlikely(!ok)) {
1370             return -1;
1371         }
1372     } else {
1373         vhost_vdpa_suspend(dev);
1374         vhost_vdpa_svqs_stop(dev);
1375         vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs);
1376     }
1377 
1378     if (!vhost_vdpa_last_dev(dev)) {
1379         return 0;
1380     }
1381 
1382     if (started) {
1383         if (vhost_dev_has_iommu(dev) && (v->shadow_vqs_enabled)) {
1384             error_report("SVQ can not work while IOMMU enable, please disable"
1385                          "IOMMU and try again");
1386             return -1;
1387         }
1388         if (v->shared->listener_registered &&
1389             dev->vdev->dma_as != v->shared->listener.address_space) {
1390             memory_listener_unregister(&v->shared->listener);
1391             v->shared->listener_registered = false;
1392         }
1393         if (!v->shared->listener_registered) {
1394             memory_listener_register(&v->shared->listener, dev->vdev->dma_as);
1395             v->shared->listener_registered = true;
1396         }
1397 
1398         return vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK);
1399     }
1400 
1401     return 0;
1402 }
1403 
1404 static void vhost_vdpa_reset_status(struct vhost_dev *dev)
1405 {
1406     if (!vhost_vdpa_last_dev(dev)) {
1407         return;
1408     }
1409 
1410     vhost_vdpa_reset_device(dev);
1411     vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE |
1412                                VIRTIO_CONFIG_S_DRIVER);
1413 }
1414 
1415 static int vhost_vdpa_set_log_base(struct vhost_dev *dev, uint64_t base,
1416                                      struct vhost_log *log)
1417 {
1418     struct vhost_vdpa *v = dev->opaque;
1419     if (v->shadow_vqs_enabled || !vhost_vdpa_first_dev(dev)) {
1420         return 0;
1421     }
1422 
1423     trace_vhost_vdpa_set_log_base(dev, base, log->size, log->refcnt, log->fd,
1424                                   log->log);
1425     return vhost_vdpa_call(dev, VHOST_SET_LOG_BASE, &base);
1426 }
1427 
1428 static int vhost_vdpa_set_vring_addr(struct vhost_dev *dev,
1429                                        struct vhost_vring_addr *addr)
1430 {
1431     struct vhost_vdpa *v = dev->opaque;
1432 
1433     if (v->shadow_vqs_enabled) {
1434         /*
1435          * Device vring addr was set at device start. SVQ base is handled by
1436          * VirtQueue code.
1437          */
1438         return 0;
1439     }
1440 
1441     return vhost_vdpa_set_vring_dev_addr(dev, addr);
1442 }
1443 
1444 static int vhost_vdpa_set_vring_num(struct vhost_dev *dev,
1445                                       struct vhost_vring_state *ring)
1446 {
1447     trace_vhost_vdpa_set_vring_num(dev, ring->index, ring->num);
1448     return vhost_vdpa_call(dev, VHOST_SET_VRING_NUM, ring);
1449 }
1450 
1451 static int vhost_vdpa_set_vring_base(struct vhost_dev *dev,
1452                                        struct vhost_vring_state *ring)
1453 {
1454     struct vhost_vdpa *v = dev->opaque;
1455 
1456     if (v->shadow_vqs_enabled) {
1457         /*
1458          * Device vring base was set at device start. SVQ base is handled by
1459          * VirtQueue code.
1460          */
1461         return 0;
1462     }
1463 
1464     return vhost_vdpa_set_dev_vring_base(dev, ring);
1465 }
1466 
1467 static int vhost_vdpa_get_vring_base(struct vhost_dev *dev,
1468                                        struct vhost_vring_state *ring)
1469 {
1470     struct vhost_vdpa *v = dev->opaque;
1471     int ret;
1472 
1473     if (v->shadow_vqs_enabled) {
1474         ring->num = virtio_queue_get_last_avail_idx(dev->vdev, ring->index);
1475         trace_vhost_vdpa_get_vring_base(dev, ring->index, ring->num, true);
1476         return 0;
1477     }
1478 
1479     if (!v->suspended) {
1480         /*
1481          * Cannot trust in value returned by device, let vhost recover used
1482          * idx from guest.
1483          */
1484         return -1;
1485     }
1486 
1487     ret = vhost_vdpa_call(dev, VHOST_GET_VRING_BASE, ring);
1488     trace_vhost_vdpa_get_vring_base(dev, ring->index, ring->num, false);
1489     return ret;
1490 }
1491 
1492 static int vhost_vdpa_set_vring_kick(struct vhost_dev *dev,
1493                                        struct vhost_vring_file *file)
1494 {
1495     struct vhost_vdpa *v = dev->opaque;
1496     int vdpa_idx = file->index - dev->vq_index;
1497 
1498     if (v->shadow_vqs_enabled) {
1499         VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, vdpa_idx);
1500         vhost_svq_set_svq_kick_fd(svq, file->fd);
1501         return 0;
1502     } else {
1503         return vhost_vdpa_set_vring_dev_kick(dev, file);
1504     }
1505 }
1506 
1507 static int vhost_vdpa_set_vring_call(struct vhost_dev *dev,
1508                                        struct vhost_vring_file *file)
1509 {
1510     struct vhost_vdpa *v = dev->opaque;
1511     int vdpa_idx = file->index - dev->vq_index;
1512     VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, vdpa_idx);
1513 
1514     /* Remember last call fd because we can switch to SVQ anytime. */
1515     vhost_svq_set_svq_call_fd(svq, file->fd);
1516     /*
1517      * When SVQ is transitioning to off, shadow_vqs_enabled has
1518      * not been set back to false yet, but the underlying call fd
1519      * will have to switch back to the guest notifier to signal the
1520      * passthrough virtqueues. In other situations, SVQ's own call
1521      * fd shall be used to signal the device model.
1522      */
1523     if (v->shadow_vqs_enabled &&
1524         v->shared->svq_switching != SVQ_TSTATE_DISABLING) {
1525         return 0;
1526     }
1527 
1528     return vhost_vdpa_set_vring_dev_call(dev, file);
1529 }
1530 
1531 static int vhost_vdpa_get_features(struct vhost_dev *dev,
1532                                      uint64_t *features)
1533 {
1534     int ret = vhost_vdpa_get_dev_features(dev, features);
1535 
1536     if (ret == 0) {
1537         /* Add SVQ logging capabilities */
1538         *features |= BIT_ULL(VHOST_F_LOG_ALL);
1539     }
1540 
1541     return ret;
1542 }
1543 
1544 static int vhost_vdpa_set_owner(struct vhost_dev *dev)
1545 {
1546     int r;
1547     struct vhost_vdpa *v;
1548 
1549     if (!vhost_vdpa_first_dev(dev)) {
1550         return 0;
1551     }
1552 
1553     trace_vhost_vdpa_set_owner(dev);
1554     r = vhost_vdpa_call(dev, VHOST_SET_OWNER, NULL);
1555     if (unlikely(r < 0)) {
1556         return r;
1557     }
1558 
1559     /*
1560      * Being optimistic and listening address space memory. If the device
1561      * uses vIOMMU, it is changed at vhost_vdpa_dev_start.
1562      */
1563     v = dev->opaque;
1564     memory_listener_register(&v->shared->listener, &address_space_memory);
1565     v->shared->listener_registered = true;
1566     return 0;
1567 }
1568 
1569 static int vhost_vdpa_vq_get_addr(struct vhost_dev *dev,
1570                     struct vhost_vring_addr *addr, struct vhost_virtqueue *vq)
1571 {
1572     assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
1573     addr->desc_user_addr = (uint64_t)(unsigned long)vq->desc_phys;
1574     addr->avail_user_addr = (uint64_t)(unsigned long)vq->avail_phys;
1575     addr->used_user_addr = (uint64_t)(unsigned long)vq->used_phys;
1576     trace_vhost_vdpa_vq_get_addr(dev, vq, addr->desc_user_addr,
1577                                  addr->avail_user_addr, addr->used_user_addr);
1578     return 0;
1579 }
1580 
1581 static bool  vhost_vdpa_force_iommu(struct vhost_dev *dev)
1582 {
1583     return true;
1584 }
1585 
1586 const VhostOps vdpa_ops = {
1587         .backend_type = VHOST_BACKEND_TYPE_VDPA,
1588         .vhost_backend_init = vhost_vdpa_init,
1589         .vhost_backend_cleanup = vhost_vdpa_cleanup,
1590         .vhost_set_log_base = vhost_vdpa_set_log_base,
1591         .vhost_set_vring_addr = vhost_vdpa_set_vring_addr,
1592         .vhost_set_vring_num = vhost_vdpa_set_vring_num,
1593         .vhost_set_vring_base = vhost_vdpa_set_vring_base,
1594         .vhost_get_vring_base = vhost_vdpa_get_vring_base,
1595         .vhost_set_vring_kick = vhost_vdpa_set_vring_kick,
1596         .vhost_set_vring_call = vhost_vdpa_set_vring_call,
1597         .vhost_get_features = vhost_vdpa_get_features,
1598         .vhost_set_owner = vhost_vdpa_set_owner,
1599         .vhost_set_vring_endian = NULL,
1600         .vhost_backend_memslots_limit = vhost_vdpa_memslots_limit,
1601         .vhost_set_mem_table = vhost_vdpa_set_mem_table,
1602         .vhost_set_features = vhost_vdpa_set_features,
1603         .vhost_reset_device = vhost_vdpa_reset_device,
1604         .vhost_get_vq_index = vhost_vdpa_get_vq_index,
1605         .vhost_set_vring_enable = vhost_vdpa_set_vring_enable,
1606         .vhost_get_config  = vhost_vdpa_get_config,
1607         .vhost_set_config = vhost_vdpa_set_config,
1608         .vhost_requires_shm_log = NULL,
1609         .vhost_migration_done = NULL,
1610         .vhost_net_set_mtu = NULL,
1611         .vhost_set_iotlb_callback = NULL,
1612         .vhost_send_device_iotlb_msg = NULL,
1613         .vhost_dev_start = vhost_vdpa_dev_start,
1614         .vhost_get_device_id = vhost_vdpa_get_device_id,
1615         .vhost_vq_get_addr = vhost_vdpa_vq_get_addr,
1616         .vhost_force_iommu = vhost_vdpa_force_iommu,
1617         .vhost_set_config_call = vhost_vdpa_set_config_call,
1618         .vhost_reset_status = vhost_vdpa_reset_status,
1619 };
1620