xref: /qemu/hw/virtio/vhost-vdpa.c (revision 92cf61e70838c20adc82daa3170fdbb9d174b508)
1 /*
2  * vhost-vdpa
3  *
4  *  Copyright(c) 2017-2018 Intel Corporation.
5  *  Copyright(c) 2020 Red Hat, Inc.
6  *
7  * This work is licensed under the terms of the GNU GPL, version 2 or later.
8  * See the COPYING file in the top-level directory.
9  *
10  */
11 
12 #include "qemu/osdep.h"
13 #include <linux/vhost.h>
14 #include <linux/vfio.h>
15 #include <sys/eventfd.h>
16 #include <sys/ioctl.h>
17 #include "exec/target_page.h"
18 #include "hw/virtio/vhost.h"
19 #include "hw/virtio/vhost-backend.h"
20 #include "hw/virtio/virtio-net.h"
21 #include "hw/virtio/vhost-shadow-virtqueue.h"
22 #include "hw/virtio/vhost-vdpa.h"
23 #include "exec/address-spaces.h"
24 #include "migration/blocker.h"
25 #include "qemu/cutils.h"
26 #include "qemu/main-loop.h"
27 #include "trace.h"
28 #include "qapi/error.h"
29 
30 /*
31  * Return one past the end of the end of section. Be careful with uint64_t
32  * conversions!
33  */
34 static Int128 vhost_vdpa_section_end(const MemoryRegionSection *section,
35                                      int page_mask)
36 {
37     Int128 llend = int128_make64(section->offset_within_address_space);
38     llend = int128_add(llend, section->size);
39     llend = int128_and(llend, int128_exts64(page_mask));
40 
41     return llend;
42 }
43 
44 static bool vhost_vdpa_listener_skipped_section(MemoryRegionSection *section,
45                                                 uint64_t iova_min,
46                                                 uint64_t iova_max,
47                                                 int page_mask)
48 {
49     Int128 llend;
50     bool is_ram = memory_region_is_ram(section->mr);
51     bool is_iommu = memory_region_is_iommu(section->mr);
52     bool is_protected = memory_region_is_protected(section->mr);
53 
54     /* vhost-vDPA doesn't allow MMIO to be mapped  */
55     bool is_ram_device = memory_region_is_ram_device(section->mr);
56 
57     if ((!is_ram && !is_iommu) || is_protected || is_ram_device) {
58         trace_vhost_vdpa_skipped_memory_section(is_ram, is_iommu, is_protected,
59                                                 is_ram_device, iova_min,
60                                                 iova_max, page_mask);
61         return true;
62     }
63 
64     if (section->offset_within_address_space < iova_min) {
65         error_report("RAM section out of device range (min=0x%" PRIx64
66                      ", addr=0x%" HWADDR_PRIx ")",
67                      iova_min, section->offset_within_address_space);
68         return true;
69     }
70     /*
71      * While using vIOMMU, sometimes the section will be larger than iova_max,
72      * but the memory that actually maps is smaller, so move the check to
73      * function vhost_vdpa_iommu_map_notify(). That function will use the actual
74      * size that maps to the kernel
75      */
76 
77     if (!is_iommu) {
78         llend = vhost_vdpa_section_end(section, page_mask);
79         if (int128_gt(llend, int128_make64(iova_max))) {
80             error_report("RAM section out of device range (max=0x%" PRIx64
81                          ", end addr=0x%" PRIx64 ")",
82                          iova_max, int128_get64(llend));
83             return true;
84         }
85     }
86 
87     return false;
88 }
89 
90 /*
91  * The caller must set asid = 0 if the device does not support asid.
92  * This is not an ABI break since it is set to 0 by the initializer anyway.
93  */
94 int vhost_vdpa_dma_map(VhostVDPAShared *s, uint32_t asid, hwaddr iova,
95                        hwaddr size, void *vaddr, bool readonly)
96 {
97     struct vhost_msg_v2 msg = {};
98     int fd = s->device_fd;
99     int ret = 0;
100 
101     msg.type = VHOST_IOTLB_MSG_V2;
102     msg.asid = asid;
103     msg.iotlb.iova = iova;
104     msg.iotlb.size = size;
105     msg.iotlb.uaddr = (uint64_t)(uintptr_t)vaddr;
106     msg.iotlb.perm = readonly ? VHOST_ACCESS_RO : VHOST_ACCESS_RW;
107     msg.iotlb.type = VHOST_IOTLB_UPDATE;
108 
109     trace_vhost_vdpa_dma_map(s, fd, msg.type, msg.asid, msg.iotlb.iova,
110                              msg.iotlb.size, msg.iotlb.uaddr, msg.iotlb.perm,
111                              msg.iotlb.type);
112 
113     if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
114         error_report("failed to write, fd=%d, errno=%d (%s)",
115             fd, errno, strerror(errno));
116         return -EIO ;
117     }
118 
119     return ret;
120 }
121 
122 /*
123  * The caller must set asid = 0 if the device does not support asid.
124  * This is not an ABI break since it is set to 0 by the initializer anyway.
125  */
126 int vhost_vdpa_dma_unmap(VhostVDPAShared *s, uint32_t asid, hwaddr iova,
127                          hwaddr size)
128 {
129     struct vhost_msg_v2 msg = {};
130     int fd = s->device_fd;
131     int ret = 0;
132 
133     msg.type = VHOST_IOTLB_MSG_V2;
134     msg.asid = asid;
135     msg.iotlb.iova = iova;
136     msg.iotlb.size = size;
137     msg.iotlb.type = VHOST_IOTLB_INVALIDATE;
138 
139     trace_vhost_vdpa_dma_unmap(s, fd, msg.type, msg.asid, msg.iotlb.iova,
140                                msg.iotlb.size, msg.iotlb.type);
141 
142     if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
143         error_report("failed to write, fd=%d, errno=%d (%s)",
144             fd, errno, strerror(errno));
145         return -EIO ;
146     }
147 
148     return ret;
149 }
150 
151 static void vhost_vdpa_listener_begin_batch(VhostVDPAShared *s)
152 {
153     int fd = s->device_fd;
154     struct vhost_msg_v2 msg = {
155         .type = VHOST_IOTLB_MSG_V2,
156         .iotlb.type = VHOST_IOTLB_BATCH_BEGIN,
157     };
158 
159     trace_vhost_vdpa_listener_begin_batch(s, fd, msg.type, msg.iotlb.type);
160     if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
161         error_report("failed to write, fd=%d, errno=%d (%s)",
162                      fd, errno, strerror(errno));
163     }
164 }
165 
166 static void vhost_vdpa_iotlb_batch_begin_once(VhostVDPAShared *s)
167 {
168     if (s->backend_cap & (0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH) &&
169         !s->iotlb_batch_begin_sent) {
170         vhost_vdpa_listener_begin_batch(s);
171     }
172 
173     s->iotlb_batch_begin_sent = true;
174 }
175 
176 static void vhost_vdpa_listener_commit(MemoryListener *listener)
177 {
178     VhostVDPAShared *s = container_of(listener, VhostVDPAShared, listener);
179     struct vhost_msg_v2 msg = {};
180     int fd = s->device_fd;
181 
182     if (!(s->backend_cap & (0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH))) {
183         return;
184     }
185 
186     if (!s->iotlb_batch_begin_sent) {
187         return;
188     }
189 
190     msg.type = VHOST_IOTLB_MSG_V2;
191     msg.iotlb.type = VHOST_IOTLB_BATCH_END;
192 
193     trace_vhost_vdpa_listener_commit(s, fd, msg.type, msg.iotlb.type);
194     if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
195         error_report("failed to write, fd=%d, errno=%d (%s)",
196                      fd, errno, strerror(errno));
197     }
198 
199     s->iotlb_batch_begin_sent = false;
200 }
201 
202 static void vhost_vdpa_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
203 {
204     struct vdpa_iommu *iommu = container_of(n, struct vdpa_iommu, n);
205 
206     hwaddr iova = iotlb->iova + iommu->iommu_offset;
207     VhostVDPAShared *s = iommu->dev_shared;
208     void *vaddr;
209     int ret;
210     Int128 llend;
211     Error *local_err = NULL;
212 
213     if (iotlb->target_as != &address_space_memory) {
214         error_report("Wrong target AS \"%s\", only system memory is allowed",
215                      iotlb->target_as->name ? iotlb->target_as->name : "none");
216         return;
217     }
218     RCU_READ_LOCK_GUARD();
219     /* check if RAM section out of device range */
220     llend = int128_add(int128_makes64(iotlb->addr_mask), int128_makes64(iova));
221     if (int128_gt(llend, int128_make64(s->iova_range.last))) {
222         error_report("RAM section out of device range (max=0x%" PRIx64
223                      ", end addr=0x%" PRIx64 ")",
224                      s->iova_range.last, int128_get64(llend));
225         return;
226     }
227 
228     if ((iotlb->perm & IOMMU_RW) != IOMMU_NONE) {
229         bool read_only;
230 
231         if (!memory_get_xlat_addr(iotlb, &vaddr, NULL, &read_only, NULL,
232                                   &local_err)) {
233             error_report_err(local_err);
234             return;
235         }
236         ret = vhost_vdpa_dma_map(s, VHOST_VDPA_GUEST_PA_ASID, iova,
237                                  iotlb->addr_mask + 1, vaddr, read_only);
238         if (ret) {
239             error_report("vhost_vdpa_dma_map(%p, 0x%" HWADDR_PRIx ", "
240                          "0x%" HWADDR_PRIx ", %p) = %d (%m)",
241                          s, iova, iotlb->addr_mask + 1, vaddr, ret);
242         }
243     } else {
244         ret = vhost_vdpa_dma_unmap(s, VHOST_VDPA_GUEST_PA_ASID, iova,
245                                    iotlb->addr_mask + 1);
246         if (ret) {
247             error_report("vhost_vdpa_dma_unmap(%p, 0x%" HWADDR_PRIx ", "
248                          "0x%" HWADDR_PRIx ") = %d (%m)",
249                          s, iova, iotlb->addr_mask + 1, ret);
250         }
251     }
252 }
253 
254 static void vhost_vdpa_iommu_region_add(MemoryListener *listener,
255                                         MemoryRegionSection *section)
256 {
257     VhostVDPAShared *s = container_of(listener, VhostVDPAShared, listener);
258 
259     struct vdpa_iommu *iommu;
260     Int128 end;
261     int iommu_idx;
262     IOMMUMemoryRegion *iommu_mr;
263     int ret;
264 
265     iommu_mr = IOMMU_MEMORY_REGION(section->mr);
266 
267     iommu = g_malloc0(sizeof(*iommu));
268     end = int128_add(int128_make64(section->offset_within_region),
269                      section->size);
270     end = int128_sub(end, int128_one());
271     iommu_idx = memory_region_iommu_attrs_to_index(iommu_mr,
272                                                    MEMTXATTRS_UNSPECIFIED);
273     iommu->iommu_mr = iommu_mr;
274     iommu_notifier_init(&iommu->n, vhost_vdpa_iommu_map_notify,
275                         IOMMU_NOTIFIER_IOTLB_EVENTS,
276                         section->offset_within_region,
277                         int128_get64(end),
278                         iommu_idx);
279     iommu->iommu_offset = section->offset_within_address_space -
280                           section->offset_within_region;
281     iommu->dev_shared = s;
282 
283     ret = memory_region_register_iommu_notifier(section->mr, &iommu->n, NULL);
284     if (ret) {
285         g_free(iommu);
286         return;
287     }
288 
289     QLIST_INSERT_HEAD(&s->iommu_list, iommu, iommu_next);
290     memory_region_iommu_replay(iommu->iommu_mr, &iommu->n);
291 
292     return;
293 }
294 
295 static void vhost_vdpa_iommu_region_del(MemoryListener *listener,
296                                         MemoryRegionSection *section)
297 {
298     VhostVDPAShared *s = container_of(listener, VhostVDPAShared, listener);
299 
300     struct vdpa_iommu *iommu;
301 
302     QLIST_FOREACH(iommu, &s->iommu_list, iommu_next)
303     {
304         if (MEMORY_REGION(iommu->iommu_mr) == section->mr &&
305             iommu->n.start == section->offset_within_region) {
306             memory_region_unregister_iommu_notifier(section->mr, &iommu->n);
307             QLIST_REMOVE(iommu, iommu_next);
308             g_free(iommu);
309             break;
310         }
311     }
312 }
313 
314 static void vhost_vdpa_listener_region_add(MemoryListener *listener,
315                                            MemoryRegionSection *section)
316 {
317     DMAMap mem_region = {};
318     VhostVDPAShared *s = container_of(listener, VhostVDPAShared, listener);
319     hwaddr iova;
320     Int128 llend, llsize;
321     void *vaddr;
322     int ret;
323     int page_size = qemu_target_page_size();
324     int page_mask = -page_size;
325 
326     if (vhost_vdpa_listener_skipped_section(section, s->iova_range.first,
327                                             s->iova_range.last, page_mask)) {
328         return;
329     }
330     if (memory_region_is_iommu(section->mr)) {
331         vhost_vdpa_iommu_region_add(listener, section);
332         return;
333     }
334 
335     if (unlikely((section->offset_within_address_space & ~page_mask) !=
336                  (section->offset_within_region & ~page_mask))) {
337         trace_vhost_vdpa_listener_region_add_unaligned(s, section->mr->name,
338                        section->offset_within_address_space & ~page_mask,
339                        section->offset_within_region & ~page_mask);
340         return;
341     }
342 
343     iova = ROUND_UP(section->offset_within_address_space, page_size);
344     llend = vhost_vdpa_section_end(section, page_mask);
345     if (int128_ge(int128_make64(iova), llend)) {
346         return;
347     }
348 
349     memory_region_ref(section->mr);
350 
351     /* Here we assume that memory_region_is_ram(section->mr)==true */
352 
353     vaddr = memory_region_get_ram_ptr(section->mr) +
354             section->offset_within_region +
355             (iova - section->offset_within_address_space);
356 
357     trace_vhost_vdpa_listener_region_add(s, iova, int128_get64(llend),
358                                          vaddr, section->readonly);
359 
360     llsize = int128_sub(llend, int128_make64(iova));
361     if (s->shadow_data) {
362         int r;
363         hwaddr hw_vaddr = (hwaddr)(uintptr_t)vaddr;
364 
365         mem_region.size = int128_get64(llsize) - 1,
366         mem_region.perm = IOMMU_ACCESS_FLAG(true, section->readonly),
367 
368         r = vhost_iova_tree_map_alloc(s->iova_tree, &mem_region, hw_vaddr);
369         if (unlikely(r != IOVA_OK)) {
370             error_report("Can't allocate a mapping (%d)", r);
371 
372             if (mem_region.translated_addr == hw_vaddr) {
373                 error_report("Insertion to IOVA->HVA tree failed");
374                 /* Remove the mapping from the IOVA-only tree */
375                 goto fail_map;
376             }
377             goto fail;
378         }
379 
380         iova = mem_region.iova;
381     }
382 
383     vhost_vdpa_iotlb_batch_begin_once(s);
384     ret = vhost_vdpa_dma_map(s, VHOST_VDPA_GUEST_PA_ASID, iova,
385                              int128_get64(llsize), vaddr, section->readonly);
386     if (ret) {
387         error_report("vhost vdpa map fail!");
388         goto fail_map;
389     }
390 
391     return;
392 
393 fail_map:
394     if (s->shadow_data) {
395         vhost_iova_tree_remove(s->iova_tree, mem_region);
396     }
397 
398 fail:
399     /*
400      * On the initfn path, store the first error in the container so we
401      * can gracefully fail.  Runtime, there's not much we can do other
402      * than throw a hardware error.
403      */
404     error_report("vhost-vdpa: DMA mapping failed, unable to continue");
405     return;
406 
407 }
408 
409 static void vhost_vdpa_listener_region_del(MemoryListener *listener,
410                                            MemoryRegionSection *section)
411 {
412     VhostVDPAShared *s = container_of(listener, VhostVDPAShared, listener);
413     hwaddr iova;
414     Int128 llend, llsize;
415     int ret;
416     int page_size = qemu_target_page_size();
417     int page_mask = -page_size;
418 
419     if (vhost_vdpa_listener_skipped_section(section, s->iova_range.first,
420                                             s->iova_range.last, page_mask)) {
421         return;
422     }
423     if (memory_region_is_iommu(section->mr)) {
424         vhost_vdpa_iommu_region_del(listener, section);
425     }
426 
427     if (unlikely((section->offset_within_address_space & ~page_mask) !=
428                  (section->offset_within_region & ~page_mask))) {
429         trace_vhost_vdpa_listener_region_del_unaligned(s, section->mr->name,
430                        section->offset_within_address_space & ~page_mask,
431                        section->offset_within_region & ~page_mask);
432         return;
433     }
434 
435     iova = ROUND_UP(section->offset_within_address_space, page_size);
436     llend = vhost_vdpa_section_end(section, page_mask);
437 
438     trace_vhost_vdpa_listener_region_del(s, iova,
439         int128_get64(int128_sub(llend, int128_one())));
440 
441     if (int128_ge(int128_make64(iova), llend)) {
442         return;
443     }
444 
445     llsize = int128_sub(llend, int128_make64(iova));
446 
447     if (s->shadow_data) {
448         const DMAMap *result;
449         const void *vaddr = memory_region_get_ram_ptr(section->mr) +
450             section->offset_within_region +
451             (iova - section->offset_within_address_space);
452         DMAMap mem_region = {
453             .translated_addr = (hwaddr)(uintptr_t)vaddr,
454             .size = int128_get64(llsize) - 1,
455         };
456 
457         result = vhost_iova_tree_find_iova(s->iova_tree, &mem_region);
458         if (!result) {
459             /* The memory listener map wasn't mapped */
460             return;
461         }
462         iova = result->iova;
463         vhost_iova_tree_remove(s->iova_tree, *result);
464     }
465     vhost_vdpa_iotlb_batch_begin_once(s);
466     /*
467      * The unmap ioctl doesn't accept a full 64-bit. need to check it
468      */
469     if (int128_eq(llsize, int128_2_64())) {
470         llsize = int128_rshift(llsize, 1);
471         ret = vhost_vdpa_dma_unmap(s, VHOST_VDPA_GUEST_PA_ASID, iova,
472                                    int128_get64(llsize));
473 
474         if (ret) {
475             error_report("vhost_vdpa_dma_unmap(%p, 0x%" HWADDR_PRIx ", "
476                          "0x%" HWADDR_PRIx ") = %d (%m)",
477                          s, iova, int128_get64(llsize), ret);
478         }
479         iova += int128_get64(llsize);
480     }
481     ret = vhost_vdpa_dma_unmap(s, VHOST_VDPA_GUEST_PA_ASID, iova,
482                                int128_get64(llsize));
483 
484     if (ret) {
485         error_report("vhost_vdpa_dma_unmap(%p, 0x%" HWADDR_PRIx ", "
486                      "0x%" HWADDR_PRIx ") = %d (%m)",
487                      s, iova, int128_get64(llsize), ret);
488     }
489 
490     memory_region_unref(section->mr);
491 }
492 /*
493  * IOTLB API is used by vhost-vdpa which requires incremental updating
494  * of the mapping. So we can not use generic vhost memory listener which
495  * depends on the addnop().
496  */
497 static const MemoryListener vhost_vdpa_memory_listener = {
498     .name = "vhost-vdpa",
499     .commit = vhost_vdpa_listener_commit,
500     .region_add = vhost_vdpa_listener_region_add,
501     .region_del = vhost_vdpa_listener_region_del,
502 };
503 
504 static int vhost_vdpa_call(struct vhost_dev *dev, unsigned long int request,
505                              void *arg)
506 {
507     struct vhost_vdpa *v = dev->opaque;
508     int fd = v->shared->device_fd;
509     int ret;
510 
511     assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
512 
513     ret = ioctl(fd, request, arg);
514     return ret < 0 ? -errno : ret;
515 }
516 
517 static int vhost_vdpa_add_status(struct vhost_dev *dev, uint8_t status)
518 {
519     uint8_t s;
520     int ret;
521 
522     trace_vhost_vdpa_add_status(dev, status);
523     ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &s);
524     if (ret < 0) {
525         return ret;
526     }
527     if ((s & status) == status) {
528         /* Don't set bits already set */
529         return 0;
530     }
531 
532     s |= status;
533 
534     ret = vhost_vdpa_call(dev, VHOST_VDPA_SET_STATUS, &s);
535     if (ret < 0) {
536         return ret;
537     }
538 
539     ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &s);
540     if (ret < 0) {
541         return ret;
542     }
543 
544     if (!(s & status)) {
545         return -EIO;
546     }
547 
548     return 0;
549 }
550 
551 int vhost_vdpa_get_iova_range(int fd, struct vhost_vdpa_iova_range *iova_range)
552 {
553     int ret = ioctl(fd, VHOST_VDPA_GET_IOVA_RANGE, iova_range);
554 
555     return ret < 0 ? -errno : 0;
556 }
557 
558 /*
559  * The use of this function is for requests that only need to be
560  * applied once. Typically such request occurs at the beginning
561  * of operation, and before setting up queues. It should not be
562  * used for request that performs operation until all queues are
563  * set, which would need to check dev->vq_index_end instead.
564  */
565 static bool vhost_vdpa_first_dev(struct vhost_dev *dev)
566 {
567     struct vhost_vdpa *v = dev->opaque;
568 
569     return v->index == 0;
570 }
571 
572 static bool vhost_vdpa_last_dev(struct vhost_dev *dev)
573 {
574     return dev->vq_index + dev->nvqs == dev->vq_index_end;
575 }
576 
577 static int vhost_vdpa_get_dev_features(struct vhost_dev *dev,
578                                        uint64_t *features)
579 {
580     int ret;
581 
582     ret = vhost_vdpa_call(dev, VHOST_GET_FEATURES, features);
583     trace_vhost_vdpa_get_features(dev, *features);
584     return ret;
585 }
586 
587 static void vhost_vdpa_init_svq(struct vhost_dev *hdev, struct vhost_vdpa *v)
588 {
589     g_autoptr(GPtrArray) shadow_vqs = NULL;
590 
591     shadow_vqs = g_ptr_array_new_full(hdev->nvqs, vhost_svq_free);
592     for (unsigned n = 0; n < hdev->nvqs; ++n) {
593         VhostShadowVirtqueue *svq;
594 
595         svq = vhost_svq_new(v->shadow_vq_ops, v->shadow_vq_ops_opaque);
596         g_ptr_array_add(shadow_vqs, svq);
597     }
598 
599     v->shadow_vqs = g_steal_pointer(&shadow_vqs);
600 }
601 
602 static int vhost_vdpa_init(struct vhost_dev *dev, void *opaque, Error **errp)
603 {
604     struct vhost_vdpa *v = opaque;
605     assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
606     trace_vhost_vdpa_init(dev, v->shared, opaque);
607     int ret;
608 
609     v->dev = dev;
610     dev->opaque =  opaque ;
611     v->shared->listener = vhost_vdpa_memory_listener;
612     vhost_vdpa_init_svq(dev, v);
613 
614     error_propagate(&dev->migration_blocker, v->migration_blocker);
615     if (!vhost_vdpa_first_dev(dev)) {
616         return 0;
617     }
618 
619     /*
620      * If dev->shadow_vqs_enabled at initialization that means the device has
621      * been started with x-svq=on, so don't block migration
622      */
623     if (dev->migration_blocker == NULL && !v->shadow_vqs_enabled) {
624         /* We don't have dev->features yet */
625         uint64_t features;
626         ret = vhost_vdpa_get_dev_features(dev, &features);
627         if (unlikely(ret)) {
628             error_setg_errno(errp, -ret, "Could not get device features");
629             return ret;
630         }
631         vhost_svq_valid_features(features, &dev->migration_blocker);
632     }
633 
634     /*
635      * Similar to VFIO, we end up pinning all guest memory and have to
636      * disable discarding of RAM.
637      */
638     ret = ram_block_discard_disable(true);
639     if (ret) {
640         error_report("Cannot set discarding of RAM broken");
641         return ret;
642     }
643 
644     vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE |
645                                VIRTIO_CONFIG_S_DRIVER);
646 
647     return 0;
648 }
649 
650 static void vhost_vdpa_host_notifier_uninit(struct vhost_dev *dev,
651                                             int queue_index)
652 {
653     size_t page_size = qemu_real_host_page_size();
654     struct vhost_vdpa *v = dev->opaque;
655     VirtIODevice *vdev = dev->vdev;
656     VhostVDPAHostNotifier *n;
657 
658     n = &v->notifier[queue_index];
659 
660     if (n->addr) {
661         virtio_queue_set_host_notifier_mr(vdev, queue_index, &n->mr, false);
662         object_unparent(OBJECT(&n->mr));
663         munmap(n->addr, page_size);
664         n->addr = NULL;
665     }
666 }
667 
668 static int vhost_vdpa_host_notifier_init(struct vhost_dev *dev, int queue_index)
669 {
670     size_t page_size = qemu_real_host_page_size();
671     struct vhost_vdpa *v = dev->opaque;
672     VirtIODevice *vdev = dev->vdev;
673     VhostVDPAHostNotifier *n;
674     int fd = v->shared->device_fd;
675     void *addr;
676     char *name;
677 
678     vhost_vdpa_host_notifier_uninit(dev, queue_index);
679 
680     n = &v->notifier[queue_index];
681 
682     addr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, fd,
683                 queue_index * page_size);
684     if (addr == MAP_FAILED) {
685         goto err;
686     }
687 
688     name = g_strdup_printf("vhost-vdpa/host-notifier@%p mmaps[%d]",
689                            v, queue_index);
690     memory_region_init_ram_device_ptr(&n->mr, OBJECT(vdev), name,
691                                       page_size, addr);
692     g_free(name);
693 
694     if (virtio_queue_set_host_notifier_mr(vdev, queue_index, &n->mr, true)) {
695         object_unparent(OBJECT(&n->mr));
696         munmap(addr, page_size);
697         goto err;
698     }
699     n->addr = addr;
700 
701     return 0;
702 
703 err:
704     return -1;
705 }
706 
707 static void vhost_vdpa_host_notifiers_uninit(struct vhost_dev *dev, int n)
708 {
709     int i;
710 
711     /*
712      * Pack all the changes to the memory regions in a single
713      * transaction to avoid a few updating of the address space
714      * topology.
715      */
716     memory_region_transaction_begin();
717 
718     for (i = dev->vq_index; i < dev->vq_index + n; i++) {
719         vhost_vdpa_host_notifier_uninit(dev, i);
720     }
721 
722     memory_region_transaction_commit();
723 }
724 
725 static void vhost_vdpa_host_notifiers_init(struct vhost_dev *dev)
726 {
727     struct vhost_vdpa *v = dev->opaque;
728     int i;
729 
730     if (v->shadow_vqs_enabled) {
731         /* FIXME SVQ is not compatible with host notifiers mr */
732         return;
733     }
734 
735     /*
736      * Pack all the changes to the memory regions in a single
737      * transaction to avoid a few updating of the address space
738      * topology.
739      */
740     memory_region_transaction_begin();
741 
742     for (i = dev->vq_index; i < dev->vq_index + dev->nvqs; i++) {
743         if (vhost_vdpa_host_notifier_init(dev, i)) {
744             vhost_vdpa_host_notifiers_uninit(dev, i - dev->vq_index);
745             break;
746         }
747     }
748 
749     memory_region_transaction_commit();
750 }
751 
752 static void vhost_vdpa_svq_cleanup(struct vhost_dev *dev)
753 {
754     struct vhost_vdpa *v = dev->opaque;
755     size_t idx;
756 
757     for (idx = 0; idx < v->shadow_vqs->len; ++idx) {
758         vhost_svq_stop(g_ptr_array_index(v->shadow_vqs, idx));
759     }
760     g_ptr_array_free(v->shadow_vqs, true);
761 }
762 
763 static int vhost_vdpa_cleanup(struct vhost_dev *dev)
764 {
765     struct vhost_vdpa *v;
766     assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
767     v = dev->opaque;
768     trace_vhost_vdpa_cleanup(dev, v);
769     if (vhost_vdpa_first_dev(dev)) {
770         ram_block_discard_disable(false);
771         memory_listener_unregister(&v->shared->listener);
772     }
773 
774     vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs);
775     vhost_vdpa_svq_cleanup(dev);
776 
777     dev->opaque = NULL;
778 
779     return 0;
780 }
781 
782 static int vhost_vdpa_memslots_limit(struct vhost_dev *dev)
783 {
784     trace_vhost_vdpa_memslots_limit(dev, INT_MAX);
785     return INT_MAX;
786 }
787 
788 static int vhost_vdpa_set_mem_table(struct vhost_dev *dev,
789                                     struct vhost_memory *mem)
790 {
791     if (!vhost_vdpa_first_dev(dev)) {
792         return 0;
793     }
794 
795     trace_vhost_vdpa_set_mem_table(dev, mem->nregions, mem->padding);
796     if (trace_event_get_state_backends(TRACE_VHOST_VDPA_SET_MEM_TABLE) &&
797         trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_REGIONS)) {
798         int i;
799         for (i = 0; i < mem->nregions; i++) {
800             trace_vhost_vdpa_dump_regions(dev, i,
801                                           mem->regions[i].guest_phys_addr,
802                                           mem->regions[i].memory_size,
803                                           mem->regions[i].userspace_addr,
804                                           mem->regions[i].flags_padding);
805         }
806     }
807     if (mem->padding) {
808         return -EINVAL;
809     }
810 
811     return 0;
812 }
813 
814 static int vhost_vdpa_set_features(struct vhost_dev *dev,
815                                    uint64_t features)
816 {
817     struct vhost_vdpa *v = dev->opaque;
818     int ret;
819 
820     if (!vhost_vdpa_first_dev(dev)) {
821         return 0;
822     }
823 
824     if (v->shadow_vqs_enabled) {
825         if ((v->acked_features ^ features) == BIT_ULL(VHOST_F_LOG_ALL)) {
826             /*
827              * QEMU is just trying to enable or disable logging. SVQ handles
828              * this sepparately, so no need to forward this.
829              */
830             v->acked_features = features;
831             return 0;
832         }
833 
834         v->acked_features = features;
835 
836         /* We must not ack _F_LOG if SVQ is enabled */
837         features &= ~BIT_ULL(VHOST_F_LOG_ALL);
838     }
839 
840     trace_vhost_vdpa_set_features(dev, features);
841     ret = vhost_vdpa_call(dev, VHOST_SET_FEATURES, &features);
842     if (ret) {
843         return ret;
844     }
845 
846     return vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_FEATURES_OK);
847 }
848 
849 static int vhost_vdpa_set_backend_cap(struct vhost_dev *dev)
850 {
851     struct vhost_vdpa *v = dev->opaque;
852 
853     uint64_t features;
854     uint64_t f = 0x1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2 |
855         0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH |
856         0x1ULL << VHOST_BACKEND_F_IOTLB_ASID |
857         0x1ULL << VHOST_BACKEND_F_SUSPEND;
858     int r;
859 
860     if (vhost_vdpa_call(dev, VHOST_GET_BACKEND_FEATURES, &features)) {
861         return -EFAULT;
862     }
863 
864     features &= f;
865 
866     if (vhost_vdpa_first_dev(dev)) {
867         r = vhost_vdpa_call(dev, VHOST_SET_BACKEND_FEATURES, &features);
868         if (r) {
869             return -EFAULT;
870         }
871     }
872 
873     dev->backend_cap = features;
874     v->shared->backend_cap = features;
875 
876     return 0;
877 }
878 
879 static int vhost_vdpa_get_device_id(struct vhost_dev *dev,
880                                     uint32_t *device_id)
881 {
882     int ret;
883     ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_DEVICE_ID, device_id);
884     trace_vhost_vdpa_get_device_id(dev, *device_id);
885     return ret;
886 }
887 
888 static int vhost_vdpa_reset_device(struct vhost_dev *dev)
889 {
890     struct vhost_vdpa *v = dev->opaque;
891     int ret;
892     uint8_t status = 0;
893 
894     ret = vhost_vdpa_call(dev, VHOST_VDPA_SET_STATUS, &status);
895     trace_vhost_vdpa_reset_device(dev);
896     v->suspended = false;
897     return ret;
898 }
899 
900 static int vhost_vdpa_get_vq_index(struct vhost_dev *dev, int idx)
901 {
902     assert(idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs);
903 
904     trace_vhost_vdpa_get_vq_index(dev, idx, idx);
905     return idx;
906 }
907 
908 static int vhost_vdpa_set_vring_enable_one(struct vhost_vdpa *v, unsigned idx,
909                                            int enable)
910 {
911     struct vhost_dev *dev = v->dev;
912     struct vhost_vring_state state = {
913         .index = idx,
914         .num = enable,
915     };
916     int r = vhost_vdpa_call(dev, VHOST_VDPA_SET_VRING_ENABLE, &state);
917 
918     trace_vhost_vdpa_set_vring_enable_one(dev, idx, enable, r);
919     return r;
920 }
921 
922 static int vhost_vdpa_set_vring_enable(struct vhost_dev *dev, int enable)
923 {
924     struct vhost_vdpa *v = dev->opaque;
925     unsigned int i;
926     int ret;
927 
928     for (i = 0; i < dev->nvqs; ++i) {
929         ret = vhost_vdpa_set_vring_enable_one(v, i, enable);
930         if (ret < 0) {
931             return ret;
932         }
933     }
934 
935     return 0;
936 }
937 
938 int vhost_vdpa_set_vring_ready(struct vhost_vdpa *v, unsigned idx)
939 {
940     return vhost_vdpa_set_vring_enable_one(v, idx, 1);
941 }
942 
943 static int vhost_vdpa_set_config_call(struct vhost_dev *dev,
944                                        int fd)
945 {
946     trace_vhost_vdpa_set_config_call(dev, fd);
947     return vhost_vdpa_call(dev, VHOST_VDPA_SET_CONFIG_CALL, &fd);
948 }
949 
950 static void vhost_vdpa_dump_config(struct vhost_dev *dev, const uint8_t *config,
951                                    uint32_t config_len)
952 {
953     g_autoptr(GString) str = g_string_sized_new(4 * 16);
954     size_t b, len;
955 
956     for (b = 0; b < config_len; b += len) {
957         len = MIN(config_len - b, 16);
958 
959         g_string_truncate(str, 0);
960         qemu_hexdump_line(str, config + b, len, 1, 4);
961         trace_vhost_vdpa_dump_config(dev, b, str->str);
962     }
963 }
964 
965 static int vhost_vdpa_set_config(struct vhost_dev *dev, const uint8_t *data,
966                                    uint32_t offset, uint32_t size,
967                                    uint32_t flags)
968 {
969     struct vhost_vdpa_config *config;
970     int ret;
971     unsigned long config_size = offsetof(struct vhost_vdpa_config, buf);
972 
973     trace_vhost_vdpa_set_config(dev, offset, size, flags);
974     config = g_malloc(size + config_size);
975     config->off = offset;
976     config->len = size;
977     memcpy(config->buf, data, size);
978     if (trace_event_get_state_backends(TRACE_VHOST_VDPA_SET_CONFIG) &&
979         trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_CONFIG)) {
980         vhost_vdpa_dump_config(dev, data, size);
981     }
982     ret = vhost_vdpa_call(dev, VHOST_VDPA_SET_CONFIG, config);
983     g_free(config);
984     return ret;
985 }
986 
987 static int vhost_vdpa_get_config(struct vhost_dev *dev, uint8_t *config,
988                                    uint32_t config_len, Error **errp)
989 {
990     struct vhost_vdpa_config *v_config;
991     unsigned long config_size = offsetof(struct vhost_vdpa_config, buf);
992     int ret;
993 
994     trace_vhost_vdpa_get_config(dev, config, config_len);
995     v_config = g_malloc(config_len + config_size);
996     v_config->len = config_len;
997     v_config->off = 0;
998     ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_CONFIG, v_config);
999     memcpy(config, v_config->buf, config_len);
1000     g_free(v_config);
1001     if (trace_event_get_state_backends(TRACE_VHOST_VDPA_GET_CONFIG) &&
1002         trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_CONFIG)) {
1003         vhost_vdpa_dump_config(dev, config, config_len);
1004     }
1005     return ret;
1006  }
1007 
1008 static int vhost_vdpa_set_dev_vring_base(struct vhost_dev *dev,
1009                                          struct vhost_vring_state *ring)
1010 {
1011     struct vhost_vdpa *v = dev->opaque;
1012 
1013     trace_vhost_vdpa_set_dev_vring_base(dev, ring->index, ring->num,
1014                                         v->shadow_vqs_enabled);
1015     return vhost_vdpa_call(dev, VHOST_SET_VRING_BASE, ring);
1016 }
1017 
1018 static int vhost_vdpa_set_vring_dev_kick(struct vhost_dev *dev,
1019                                          struct vhost_vring_file *file)
1020 {
1021     trace_vhost_vdpa_set_vring_kick(dev, file->index, file->fd);
1022     return vhost_vdpa_call(dev, VHOST_SET_VRING_KICK, file);
1023 }
1024 
1025 static int vhost_vdpa_set_vring_dev_call(struct vhost_dev *dev,
1026                                          struct vhost_vring_file *file)
1027 {
1028     trace_vhost_vdpa_set_vring_call(dev, file->index, file->fd);
1029     return vhost_vdpa_call(dev, VHOST_SET_VRING_CALL, file);
1030 }
1031 
1032 static int vhost_vdpa_set_vring_dev_addr(struct vhost_dev *dev,
1033                                          struct vhost_vring_addr *addr)
1034 {
1035     trace_vhost_vdpa_set_vring_addr(dev, addr->index, addr->flags,
1036                                 addr->desc_user_addr, addr->used_user_addr,
1037                                 addr->avail_user_addr,
1038                                 addr->log_guest_addr);
1039 
1040     return vhost_vdpa_call(dev, VHOST_SET_VRING_ADDR, addr);
1041 
1042 }
1043 
1044 /**
1045  * Set the shadow virtqueue descriptors to the device
1046  *
1047  * @dev: The vhost device model
1048  * @svq: The shadow virtqueue
1049  * @idx: The index of the virtqueue in the vhost device
1050  * @errp: Error
1051  *
1052  * Note that this function does not rewind kick file descriptor if cannot set
1053  * call one.
1054  */
1055 static int vhost_vdpa_svq_set_fds(struct vhost_dev *dev,
1056                                   VhostShadowVirtqueue *svq, unsigned idx,
1057                                   Error **errp)
1058 {
1059     struct vhost_vring_file file = {
1060         .index = dev->vq_index + idx,
1061     };
1062     const EventNotifier *event_notifier = &svq->hdev_kick;
1063     int r;
1064 
1065     r = event_notifier_init(&svq->hdev_kick, 0);
1066     if (r != 0) {
1067         error_setg_errno(errp, -r, "Couldn't create kick event notifier");
1068         goto err_init_hdev_kick;
1069     }
1070 
1071     r = event_notifier_init(&svq->hdev_call, 0);
1072     if (r != 0) {
1073         error_setg_errno(errp, -r, "Couldn't create call event notifier");
1074         goto err_init_hdev_call;
1075     }
1076 
1077     file.fd = event_notifier_get_fd(event_notifier);
1078     r = vhost_vdpa_set_vring_dev_kick(dev, &file);
1079     if (unlikely(r != 0)) {
1080         error_setg_errno(errp, -r, "Can't set device kick fd");
1081         goto err_init_set_dev_fd;
1082     }
1083 
1084     event_notifier = &svq->hdev_call;
1085     file.fd = event_notifier_get_fd(event_notifier);
1086     r = vhost_vdpa_set_vring_dev_call(dev, &file);
1087     if (unlikely(r != 0)) {
1088         error_setg_errno(errp, -r, "Can't set device call fd");
1089         goto err_init_set_dev_fd;
1090     }
1091 
1092     return 0;
1093 
1094 err_init_set_dev_fd:
1095     event_notifier_set_handler(&svq->hdev_call, NULL);
1096 
1097 err_init_hdev_call:
1098     event_notifier_cleanup(&svq->hdev_kick);
1099 
1100 err_init_hdev_kick:
1101     return r;
1102 }
1103 
1104 /**
1105  * Unmap a SVQ area in the device
1106  */
1107 static void vhost_vdpa_svq_unmap_ring(struct vhost_vdpa *v, hwaddr addr)
1108 {
1109     const DMAMap needle = {
1110         .translated_addr = addr,
1111     };
1112     const DMAMap *result = vhost_iova_tree_find_iova(v->shared->iova_tree,
1113                                                      &needle);
1114     hwaddr size;
1115     int r;
1116 
1117     if (unlikely(!result)) {
1118         error_report("Unable to find SVQ address to unmap");
1119         return;
1120     }
1121 
1122     size = ROUND_UP(result->size, qemu_real_host_page_size());
1123     r = vhost_vdpa_dma_unmap(v->shared, v->address_space_id, result->iova,
1124                              size);
1125     if (unlikely(r < 0)) {
1126         error_report("Unable to unmap SVQ vring: %s (%d)", g_strerror(-r), -r);
1127         return;
1128     }
1129 
1130     vhost_iova_tree_remove(v->shared->iova_tree, *result);
1131 }
1132 
1133 static void vhost_vdpa_svq_unmap_rings(struct vhost_dev *dev,
1134                                        const VhostShadowVirtqueue *svq)
1135 {
1136     struct vhost_vdpa *v = dev->opaque;
1137     struct vhost_vring_addr svq_addr;
1138 
1139     vhost_svq_get_vring_addr(svq, &svq_addr);
1140 
1141     vhost_vdpa_svq_unmap_ring(v, svq_addr.desc_user_addr);
1142 
1143     vhost_vdpa_svq_unmap_ring(v, svq_addr.used_user_addr);
1144 }
1145 
1146 /**
1147  * Map the SVQ area in the device
1148  *
1149  * @v: Vhost-vdpa device
1150  * @needle: The area to search iova
1151  * @taddr: The translated address (HVA)
1152  * @errorp: Error pointer
1153  */
1154 static bool vhost_vdpa_svq_map_ring(struct vhost_vdpa *v, DMAMap *needle,
1155                                     hwaddr taddr, Error **errp)
1156 {
1157     int r;
1158 
1159     r = vhost_iova_tree_map_alloc(v->shared->iova_tree, needle, taddr);
1160     if (unlikely(r != IOVA_OK)) {
1161         error_setg(errp, "Cannot allocate iova (%d)", r);
1162 
1163         if (needle->translated_addr == taddr) {
1164             error_append_hint(errp, "Insertion to IOVA->HVA tree failed");
1165             /* Remove the mapping from the IOVA-only tree */
1166             vhost_iova_tree_remove(v->shared->iova_tree, *needle);
1167         }
1168         return false;
1169     }
1170 
1171     r = vhost_vdpa_dma_map(v->shared, v->address_space_id, needle->iova,
1172                            needle->size + 1,
1173                            (void *)(uintptr_t)needle->translated_addr,
1174                            needle->perm == IOMMU_RO);
1175     if (unlikely(r != 0)) {
1176         error_setg_errno(errp, -r, "Cannot map region to device");
1177         vhost_iova_tree_remove(v->shared->iova_tree, *needle);
1178     }
1179 
1180     return r == 0;
1181 }
1182 
1183 /**
1184  * Map the shadow virtqueue rings in the device
1185  *
1186  * @dev: The vhost device
1187  * @svq: The shadow virtqueue
1188  * @addr: Assigned IOVA addresses
1189  * @errp: Error pointer
1190  */
1191 static bool vhost_vdpa_svq_map_rings(struct vhost_dev *dev,
1192                                      const VhostShadowVirtqueue *svq,
1193                                      struct vhost_vring_addr *addr,
1194                                      Error **errp)
1195 {
1196     ERRP_GUARD();
1197     DMAMap device_region, driver_region;
1198     struct vhost_vring_addr svq_addr;
1199     struct vhost_vdpa *v = dev->opaque;
1200     size_t device_size = vhost_svq_device_area_size(svq);
1201     size_t driver_size = vhost_svq_driver_area_size(svq);
1202     size_t avail_offset;
1203     bool ok;
1204 
1205     vhost_svq_get_vring_addr(svq, &svq_addr);
1206 
1207     driver_region = (DMAMap) {
1208         .size = driver_size - 1,
1209         .perm = IOMMU_RO,
1210     };
1211     ok = vhost_vdpa_svq_map_ring(v, &driver_region, svq_addr.desc_user_addr,
1212                                  errp);
1213     if (unlikely(!ok)) {
1214         error_prepend(errp, "Cannot create vq driver region: ");
1215         return false;
1216     }
1217     addr->desc_user_addr = driver_region.iova;
1218     avail_offset = svq_addr.avail_user_addr - svq_addr.desc_user_addr;
1219     addr->avail_user_addr = driver_region.iova + avail_offset;
1220 
1221     device_region = (DMAMap) {
1222         .size = device_size - 1,
1223         .perm = IOMMU_RW,
1224     };
1225     ok = vhost_vdpa_svq_map_ring(v, &device_region, svq_addr.used_user_addr,
1226                                  errp);
1227     if (unlikely(!ok)) {
1228         error_prepend(errp, "Cannot create vq device region: ");
1229         vhost_vdpa_svq_unmap_ring(v, driver_region.translated_addr);
1230     }
1231     addr->used_user_addr = device_region.iova;
1232 
1233     return ok;
1234 }
1235 
1236 static bool vhost_vdpa_svq_setup(struct vhost_dev *dev,
1237                                  VhostShadowVirtqueue *svq, unsigned idx,
1238                                  Error **errp)
1239 {
1240     uint16_t vq_index = dev->vq_index + idx;
1241     struct vhost_vring_state s = {
1242         .index = vq_index,
1243     };
1244     int r;
1245 
1246     r = vhost_vdpa_set_dev_vring_base(dev, &s);
1247     if (unlikely(r)) {
1248         error_setg_errno(errp, -r, "Cannot set vring base");
1249         return false;
1250     }
1251 
1252     r = vhost_vdpa_svq_set_fds(dev, svq, idx, errp);
1253     return r == 0;
1254 }
1255 
1256 static bool vhost_vdpa_svqs_start(struct vhost_dev *dev)
1257 {
1258     struct vhost_vdpa *v = dev->opaque;
1259     Error *err = NULL;
1260     unsigned i;
1261 
1262     if (!v->shadow_vqs_enabled) {
1263         return true;
1264     }
1265 
1266     for (i = 0; i < v->shadow_vqs->len; ++i) {
1267         VirtQueue *vq = virtio_get_queue(dev->vdev, dev->vq_index + i);
1268         VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, i);
1269         struct vhost_vring_addr addr = {
1270             .index = dev->vq_index + i,
1271         };
1272         int r;
1273         bool ok = vhost_vdpa_svq_setup(dev, svq, i, &err);
1274         if (unlikely(!ok)) {
1275             goto err;
1276         }
1277 
1278         vhost_svq_start(svq, dev->vdev, vq, v->shared->iova_tree);
1279         ok = vhost_vdpa_svq_map_rings(dev, svq, &addr, &err);
1280         if (unlikely(!ok)) {
1281             goto err_map;
1282         }
1283 
1284         /* Override vring GPA set by vhost subsystem */
1285         r = vhost_vdpa_set_vring_dev_addr(dev, &addr);
1286         if (unlikely(r != 0)) {
1287             error_setg_errno(&err, -r, "Cannot set device address");
1288             goto err_set_addr;
1289         }
1290     }
1291 
1292     return true;
1293 
1294 err_set_addr:
1295     vhost_vdpa_svq_unmap_rings(dev, g_ptr_array_index(v->shadow_vqs, i));
1296 
1297 err_map:
1298     vhost_svq_stop(g_ptr_array_index(v->shadow_vqs, i));
1299 
1300 err:
1301     error_reportf_err(err, "Cannot setup SVQ %u: ", i);
1302     for (unsigned j = 0; j < i; ++j) {
1303         VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, j);
1304         vhost_vdpa_svq_unmap_rings(dev, svq);
1305         vhost_svq_stop(svq);
1306     }
1307 
1308     return false;
1309 }
1310 
1311 static void vhost_vdpa_svqs_stop(struct vhost_dev *dev)
1312 {
1313     struct vhost_vdpa *v = dev->opaque;
1314 
1315     if (!v->shadow_vqs_enabled) {
1316         return;
1317     }
1318 
1319     for (unsigned i = 0; i < v->shadow_vqs->len; ++i) {
1320         VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, i);
1321 
1322         vhost_svq_stop(svq);
1323         vhost_vdpa_svq_unmap_rings(dev, svq);
1324 
1325         event_notifier_cleanup(&svq->hdev_kick);
1326         event_notifier_cleanup(&svq->hdev_call);
1327     }
1328 }
1329 
1330 static void vhost_vdpa_suspend(struct vhost_dev *dev)
1331 {
1332     struct vhost_vdpa *v = dev->opaque;
1333     int r;
1334 
1335     if (!vhost_vdpa_first_dev(dev)) {
1336         return;
1337     }
1338 
1339     if (dev->backend_cap & BIT_ULL(VHOST_BACKEND_F_SUSPEND)) {
1340         trace_vhost_vdpa_suspend(dev);
1341         r = ioctl(v->shared->device_fd, VHOST_VDPA_SUSPEND);
1342         if (unlikely(r)) {
1343             error_report("Cannot suspend: %s(%d)", g_strerror(errno), errno);
1344         } else {
1345             v->suspended = true;
1346             return;
1347         }
1348     }
1349 
1350     vhost_vdpa_reset_device(dev);
1351 }
1352 
1353 static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started)
1354 {
1355     struct vhost_vdpa *v = dev->opaque;
1356     bool ok;
1357     trace_vhost_vdpa_dev_start(dev, started);
1358 
1359     if (started) {
1360         vhost_vdpa_host_notifiers_init(dev);
1361         ok = vhost_vdpa_svqs_start(dev);
1362         if (unlikely(!ok)) {
1363             return -1;
1364         }
1365     } else {
1366         vhost_vdpa_suspend(dev);
1367         vhost_vdpa_svqs_stop(dev);
1368         vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs);
1369     }
1370 
1371     if (!vhost_vdpa_last_dev(dev)) {
1372         return 0;
1373     }
1374 
1375     if (started) {
1376         if (vhost_dev_has_iommu(dev) && (v->shadow_vqs_enabled)) {
1377             error_report("SVQ can not work while IOMMU enable, please disable"
1378                          "IOMMU and try again");
1379             return -1;
1380         }
1381         memory_listener_register(&v->shared->listener, dev->vdev->dma_as);
1382 
1383         return vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK);
1384     }
1385 
1386     return 0;
1387 }
1388 
1389 static void vhost_vdpa_reset_status(struct vhost_dev *dev)
1390 {
1391     struct vhost_vdpa *v = dev->opaque;
1392 
1393     if (!vhost_vdpa_last_dev(dev)) {
1394         return;
1395     }
1396 
1397     vhost_vdpa_reset_device(dev);
1398     vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE |
1399                                VIRTIO_CONFIG_S_DRIVER);
1400     memory_listener_unregister(&v->shared->listener);
1401 }
1402 
1403 static int vhost_vdpa_set_log_base(struct vhost_dev *dev, uint64_t base,
1404                                      struct vhost_log *log)
1405 {
1406     struct vhost_vdpa *v = dev->opaque;
1407     if (v->shadow_vqs_enabled || !vhost_vdpa_first_dev(dev)) {
1408         return 0;
1409     }
1410 
1411     trace_vhost_vdpa_set_log_base(dev, base, log->size, log->refcnt, log->fd,
1412                                   log->log);
1413     return vhost_vdpa_call(dev, VHOST_SET_LOG_BASE, &base);
1414 }
1415 
1416 static int vhost_vdpa_set_vring_addr(struct vhost_dev *dev,
1417                                        struct vhost_vring_addr *addr)
1418 {
1419     struct vhost_vdpa *v = dev->opaque;
1420 
1421     if (v->shadow_vqs_enabled) {
1422         /*
1423          * Device vring addr was set at device start. SVQ base is handled by
1424          * VirtQueue code.
1425          */
1426         return 0;
1427     }
1428 
1429     return vhost_vdpa_set_vring_dev_addr(dev, addr);
1430 }
1431 
1432 static int vhost_vdpa_set_vring_num(struct vhost_dev *dev,
1433                                       struct vhost_vring_state *ring)
1434 {
1435     trace_vhost_vdpa_set_vring_num(dev, ring->index, ring->num);
1436     return vhost_vdpa_call(dev, VHOST_SET_VRING_NUM, ring);
1437 }
1438 
1439 static int vhost_vdpa_set_vring_base(struct vhost_dev *dev,
1440                                        struct vhost_vring_state *ring)
1441 {
1442     struct vhost_vdpa *v = dev->opaque;
1443 
1444     if (v->shadow_vqs_enabled) {
1445         /*
1446          * Device vring base was set at device start. SVQ base is handled by
1447          * VirtQueue code.
1448          */
1449         return 0;
1450     }
1451 
1452     return vhost_vdpa_set_dev_vring_base(dev, ring);
1453 }
1454 
1455 static int vhost_vdpa_get_vring_base(struct vhost_dev *dev,
1456                                        struct vhost_vring_state *ring)
1457 {
1458     struct vhost_vdpa *v = dev->opaque;
1459     int ret;
1460 
1461     if (v->shadow_vqs_enabled) {
1462         ring->num = virtio_queue_get_last_avail_idx(dev->vdev, ring->index);
1463         trace_vhost_vdpa_get_vring_base(dev, ring->index, ring->num, true);
1464         return 0;
1465     }
1466 
1467     if (!v->suspended) {
1468         /*
1469          * Cannot trust in value returned by device, let vhost recover used
1470          * idx from guest.
1471          */
1472         return -1;
1473     }
1474 
1475     ret = vhost_vdpa_call(dev, VHOST_GET_VRING_BASE, ring);
1476     trace_vhost_vdpa_get_vring_base(dev, ring->index, ring->num, false);
1477     return ret;
1478 }
1479 
1480 static int vhost_vdpa_set_vring_kick(struct vhost_dev *dev,
1481                                        struct vhost_vring_file *file)
1482 {
1483     struct vhost_vdpa *v = dev->opaque;
1484     int vdpa_idx = file->index - dev->vq_index;
1485 
1486     if (v->shadow_vqs_enabled) {
1487         VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, vdpa_idx);
1488         vhost_svq_set_svq_kick_fd(svq, file->fd);
1489         return 0;
1490     } else {
1491         return vhost_vdpa_set_vring_dev_kick(dev, file);
1492     }
1493 }
1494 
1495 static int vhost_vdpa_set_vring_call(struct vhost_dev *dev,
1496                                        struct vhost_vring_file *file)
1497 {
1498     struct vhost_vdpa *v = dev->opaque;
1499     int vdpa_idx = file->index - dev->vq_index;
1500     VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, vdpa_idx);
1501 
1502     /* Remember last call fd because we can switch to SVQ anytime. */
1503     vhost_svq_set_svq_call_fd(svq, file->fd);
1504     /*
1505      * When SVQ is transitioning to off, shadow_vqs_enabled has
1506      * not been set back to false yet, but the underlying call fd
1507      * will have to switch back to the guest notifier to signal the
1508      * passthrough virtqueues. In other situations, SVQ's own call
1509      * fd shall be used to signal the device model.
1510      */
1511     if (v->shadow_vqs_enabled &&
1512         v->shared->svq_switching != SVQ_TSTATE_DISABLING) {
1513         return 0;
1514     }
1515 
1516     return vhost_vdpa_set_vring_dev_call(dev, file);
1517 }
1518 
1519 static int vhost_vdpa_get_features(struct vhost_dev *dev,
1520                                      uint64_t *features)
1521 {
1522     int ret = vhost_vdpa_get_dev_features(dev, features);
1523 
1524     if (ret == 0) {
1525         /* Add SVQ logging capabilities */
1526         *features |= BIT_ULL(VHOST_F_LOG_ALL);
1527     }
1528 
1529     return ret;
1530 }
1531 
1532 static int vhost_vdpa_set_owner(struct vhost_dev *dev)
1533 {
1534     if (!vhost_vdpa_first_dev(dev)) {
1535         return 0;
1536     }
1537 
1538     trace_vhost_vdpa_set_owner(dev);
1539     return vhost_vdpa_call(dev, VHOST_SET_OWNER, NULL);
1540 }
1541 
1542 static int vhost_vdpa_vq_get_addr(struct vhost_dev *dev,
1543                     struct vhost_vring_addr *addr, struct vhost_virtqueue *vq)
1544 {
1545     assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
1546     addr->desc_user_addr = (uint64_t)(unsigned long)vq->desc_phys;
1547     addr->avail_user_addr = (uint64_t)(unsigned long)vq->avail_phys;
1548     addr->used_user_addr = (uint64_t)(unsigned long)vq->used_phys;
1549     trace_vhost_vdpa_vq_get_addr(dev, vq, addr->desc_user_addr,
1550                                  addr->avail_user_addr, addr->used_user_addr);
1551     return 0;
1552 }
1553 
1554 static bool  vhost_vdpa_force_iommu(struct vhost_dev *dev)
1555 {
1556     return true;
1557 }
1558 
1559 const VhostOps vdpa_ops = {
1560         .backend_type = VHOST_BACKEND_TYPE_VDPA,
1561         .vhost_backend_init = vhost_vdpa_init,
1562         .vhost_backend_cleanup = vhost_vdpa_cleanup,
1563         .vhost_set_log_base = vhost_vdpa_set_log_base,
1564         .vhost_set_vring_addr = vhost_vdpa_set_vring_addr,
1565         .vhost_set_vring_num = vhost_vdpa_set_vring_num,
1566         .vhost_set_vring_base = vhost_vdpa_set_vring_base,
1567         .vhost_get_vring_base = vhost_vdpa_get_vring_base,
1568         .vhost_set_vring_kick = vhost_vdpa_set_vring_kick,
1569         .vhost_set_vring_call = vhost_vdpa_set_vring_call,
1570         .vhost_get_features = vhost_vdpa_get_features,
1571         .vhost_set_backend_cap = vhost_vdpa_set_backend_cap,
1572         .vhost_set_owner = vhost_vdpa_set_owner,
1573         .vhost_set_vring_endian = NULL,
1574         .vhost_backend_memslots_limit = vhost_vdpa_memslots_limit,
1575         .vhost_set_mem_table = vhost_vdpa_set_mem_table,
1576         .vhost_set_features = vhost_vdpa_set_features,
1577         .vhost_reset_device = vhost_vdpa_reset_device,
1578         .vhost_get_vq_index = vhost_vdpa_get_vq_index,
1579         .vhost_set_vring_enable = vhost_vdpa_set_vring_enable,
1580         .vhost_get_config  = vhost_vdpa_get_config,
1581         .vhost_set_config = vhost_vdpa_set_config,
1582         .vhost_requires_shm_log = NULL,
1583         .vhost_migration_done = NULL,
1584         .vhost_net_set_mtu = NULL,
1585         .vhost_set_iotlb_callback = NULL,
1586         .vhost_send_device_iotlb_msg = NULL,
1587         .vhost_dev_start = vhost_vdpa_dev_start,
1588         .vhost_get_device_id = vhost_vdpa_get_device_id,
1589         .vhost_vq_get_addr = vhost_vdpa_vq_get_addr,
1590         .vhost_force_iommu = vhost_vdpa_force_iommu,
1591         .vhost_set_config_call = vhost_vdpa_set_config_call,
1592         .vhost_reset_status = vhost_vdpa_reset_status,
1593 };
1594