xref: /qemu/hw/virtio/vhost-vdpa.c (revision 032e4d686e9c6f8ae3b9206c720ae3614e28d8a3)
1 /*
2  * vhost-vdpa
3  *
4  *  Copyright(c) 2017-2018 Intel Corporation.
5  *  Copyright(c) 2020 Red Hat, Inc.
6  *
7  * This work is licensed under the terms of the GNU GPL, version 2 or later.
8  * See the COPYING file in the top-level directory.
9  *
10  */
11 
12 #include "qemu/osdep.h"
13 #include <linux/vhost.h>
14 #include <linux/vfio.h>
15 #include <sys/eventfd.h>
16 #include <sys/ioctl.h>
17 #include "hw/virtio/vhost.h"
18 #include "hw/virtio/vhost-backend.h"
19 #include "hw/virtio/virtio-net.h"
20 #include "hw/virtio/vhost-vdpa.h"
21 #include "exec/address-spaces.h"
22 #include "qemu/main-loop.h"
23 #include "cpu.h"
24 #include "trace.h"
25 #include "qemu-common.h"
26 
27 /*
28  * Return one past the end of the end of section. Be careful with uint64_t
29  * conversions!
30  */
31 static Int128 vhost_vdpa_section_end(const MemoryRegionSection *section)
32 {
33     Int128 llend = int128_make64(section->offset_within_address_space);
34     llend = int128_add(llend, section->size);
35     llend = int128_and(llend, int128_exts64(TARGET_PAGE_MASK));
36 
37     return llend;
38 }
39 
40 static bool vhost_vdpa_listener_skipped_section(MemoryRegionSection *section)
41 {
42     return (!memory_region_is_ram(section->mr) &&
43             !memory_region_is_iommu(section->mr)) ||
44             memory_region_is_protected(section->mr) ||
45            /* vhost-vDPA doesn't allow MMIO to be mapped  */
46             memory_region_is_ram_device(section->mr) ||
47            /*
48             * Sizing an enabled 64-bit BAR can cause spurious mappings to
49             * addresses in the upper part of the 64-bit address space.  These
50             * are never accessed by the CPU and beyond the address width of
51             * some IOMMU hardware.  TODO: VDPA should tell us the IOMMU width.
52             */
53            section->offset_within_address_space & (1ULL << 63);
54 }
55 
56 static int vhost_vdpa_dma_map(struct vhost_vdpa *v, hwaddr iova, hwaddr size,
57                               void *vaddr, bool readonly)
58 {
59     struct vhost_msg_v2 msg = {};
60     int fd = v->device_fd;
61     int ret = 0;
62 
63     msg.type = v->msg_type;
64     msg.iotlb.iova = iova;
65     msg.iotlb.size = size;
66     msg.iotlb.uaddr = (uint64_t)(uintptr_t)vaddr;
67     msg.iotlb.perm = readonly ? VHOST_ACCESS_RO : VHOST_ACCESS_RW;
68     msg.iotlb.type = VHOST_IOTLB_UPDATE;
69 
70    trace_vhost_vdpa_dma_map(v, fd, msg.type, msg.iotlb.iova, msg.iotlb.size,
71                             msg.iotlb.uaddr, msg.iotlb.perm, msg.iotlb.type);
72 
73     if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
74         error_report("failed to write, fd=%d, errno=%d (%s)",
75             fd, errno, strerror(errno));
76         return -EIO ;
77     }
78 
79     return ret;
80 }
81 
82 static int vhost_vdpa_dma_unmap(struct vhost_vdpa *v, hwaddr iova,
83                                 hwaddr size)
84 {
85     struct vhost_msg_v2 msg = {};
86     int fd = v->device_fd;
87     int ret = 0;
88 
89     msg.type = v->msg_type;
90     msg.iotlb.iova = iova;
91     msg.iotlb.size = size;
92     msg.iotlb.type = VHOST_IOTLB_INVALIDATE;
93 
94     trace_vhost_vdpa_dma_unmap(v, fd, msg.type, msg.iotlb.iova,
95                                msg.iotlb.size, msg.iotlb.type);
96 
97     if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
98         error_report("failed to write, fd=%d, errno=%d (%s)",
99             fd, errno, strerror(errno));
100         return -EIO ;
101     }
102 
103     return ret;
104 }
105 
106 static void vhost_vdpa_listener_begin_batch(struct vhost_vdpa *v)
107 {
108     int fd = v->device_fd;
109     struct vhost_msg_v2 msg = {
110         .type = v->msg_type,
111         .iotlb.type = VHOST_IOTLB_BATCH_BEGIN,
112     };
113 
114     if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
115         error_report("failed to write, fd=%d, errno=%d (%s)",
116                      fd, errno, strerror(errno));
117     }
118 }
119 
120 static void vhost_vdpa_iotlb_batch_begin_once(struct vhost_vdpa *v)
121 {
122     if (v->dev->backend_cap & (0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH) &&
123         !v->iotlb_batch_begin_sent) {
124         vhost_vdpa_listener_begin_batch(v);
125     }
126 
127     v->iotlb_batch_begin_sent = true;
128 }
129 
130 static void vhost_vdpa_listener_commit(MemoryListener *listener)
131 {
132     struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener);
133     struct vhost_dev *dev = v->dev;
134     struct vhost_msg_v2 msg = {};
135     int fd = v->device_fd;
136 
137     if (!(dev->backend_cap & (0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH))) {
138         return;
139     }
140 
141     if (!v->iotlb_batch_begin_sent) {
142         return;
143     }
144 
145     msg.type = v->msg_type;
146     msg.iotlb.type = VHOST_IOTLB_BATCH_END;
147 
148     if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
149         error_report("failed to write, fd=%d, errno=%d (%s)",
150                      fd, errno, strerror(errno));
151     }
152 
153     v->iotlb_batch_begin_sent = false;
154 }
155 
156 static void vhost_vdpa_listener_region_add(MemoryListener *listener,
157                                            MemoryRegionSection *section)
158 {
159     struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener);
160     hwaddr iova;
161     Int128 llend, llsize;
162     void *vaddr;
163     int ret;
164 
165     if (vhost_vdpa_listener_skipped_section(section)) {
166         return;
167     }
168 
169     if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) !=
170                  (section->offset_within_region & ~TARGET_PAGE_MASK))) {
171         error_report("%s received unaligned region", __func__);
172         return;
173     }
174 
175     iova = TARGET_PAGE_ALIGN(section->offset_within_address_space);
176     llend = vhost_vdpa_section_end(section);
177     if (int128_ge(int128_make64(iova), llend)) {
178         return;
179     }
180 
181     memory_region_ref(section->mr);
182 
183     /* Here we assume that memory_region_is_ram(section->mr)==true */
184 
185     vaddr = memory_region_get_ram_ptr(section->mr) +
186             section->offset_within_region +
187             (iova - section->offset_within_address_space);
188 
189     trace_vhost_vdpa_listener_region_add(v, iova, int128_get64(llend),
190                                          vaddr, section->readonly);
191 
192     llsize = int128_sub(llend, int128_make64(iova));
193 
194     vhost_vdpa_iotlb_batch_begin_once(v);
195     ret = vhost_vdpa_dma_map(v, iova, int128_get64(llsize),
196                              vaddr, section->readonly);
197     if (ret) {
198         error_report("vhost vdpa map fail!");
199         goto fail;
200     }
201 
202     return;
203 
204 fail:
205     /*
206      * On the initfn path, store the first error in the container so we
207      * can gracefully fail.  Runtime, there's not much we can do other
208      * than throw a hardware error.
209      */
210     error_report("vhost-vdpa: DMA mapping failed, unable to continue");
211     return;
212 
213 }
214 
215 static void vhost_vdpa_listener_region_del(MemoryListener *listener,
216                                            MemoryRegionSection *section)
217 {
218     struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener);
219     hwaddr iova;
220     Int128 llend, llsize;
221     int ret;
222 
223     if (vhost_vdpa_listener_skipped_section(section)) {
224         return;
225     }
226 
227     if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) !=
228                  (section->offset_within_region & ~TARGET_PAGE_MASK))) {
229         error_report("%s received unaligned region", __func__);
230         return;
231     }
232 
233     iova = TARGET_PAGE_ALIGN(section->offset_within_address_space);
234     llend = vhost_vdpa_section_end(section);
235 
236     trace_vhost_vdpa_listener_region_del(v, iova, int128_get64(llend));
237 
238     if (int128_ge(int128_make64(iova), llend)) {
239         return;
240     }
241 
242     llsize = int128_sub(llend, int128_make64(iova));
243 
244     vhost_vdpa_iotlb_batch_begin_once(v);
245     ret = vhost_vdpa_dma_unmap(v, iova, int128_get64(llsize));
246     if (ret) {
247         error_report("vhost_vdpa dma unmap error!");
248     }
249 
250     memory_region_unref(section->mr);
251 }
252 /*
253  * IOTLB API is used by vhost-vpda which requires incremental updating
254  * of the mapping. So we can not use generic vhost memory listener which
255  * depends on the addnop().
256  */
257 static const MemoryListener vhost_vdpa_memory_listener = {
258     .name = "vhost-vdpa",
259     .commit = vhost_vdpa_listener_commit,
260     .region_add = vhost_vdpa_listener_region_add,
261     .region_del = vhost_vdpa_listener_region_del,
262 };
263 
264 static int vhost_vdpa_call(struct vhost_dev *dev, unsigned long int request,
265                              void *arg)
266 {
267     struct vhost_vdpa *v = dev->opaque;
268     int fd = v->device_fd;
269     int ret;
270 
271     assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
272 
273     ret = ioctl(fd, request, arg);
274     return ret < 0 ? -errno : ret;
275 }
276 
277 static void vhost_vdpa_add_status(struct vhost_dev *dev, uint8_t status)
278 {
279     uint8_t s;
280 
281     trace_vhost_vdpa_add_status(dev, status);
282     if (vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &s)) {
283         return;
284     }
285 
286     s |= status;
287 
288     vhost_vdpa_call(dev, VHOST_VDPA_SET_STATUS, &s);
289 }
290 
291 static int vhost_vdpa_init(struct vhost_dev *dev, void *opaque, Error **errp)
292 {
293     struct vhost_vdpa *v;
294     assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
295     trace_vhost_vdpa_init(dev, opaque);
296 
297     v = opaque;
298     v->dev = dev;
299     dev->opaque =  opaque ;
300     v->listener = vhost_vdpa_memory_listener;
301     v->msg_type = VHOST_IOTLB_MSG_V2;
302 
303     vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE |
304                                VIRTIO_CONFIG_S_DRIVER);
305 
306     return 0;
307 }
308 
309 static void vhost_vdpa_host_notifier_uninit(struct vhost_dev *dev,
310                                             int queue_index)
311 {
312     size_t page_size = qemu_real_host_page_size;
313     struct vhost_vdpa *v = dev->opaque;
314     VirtIODevice *vdev = dev->vdev;
315     VhostVDPAHostNotifier *n;
316 
317     n = &v->notifier[queue_index];
318 
319     if (n->addr) {
320         virtio_queue_set_host_notifier_mr(vdev, queue_index, &n->mr, false);
321         object_unparent(OBJECT(&n->mr));
322         munmap(n->addr, page_size);
323         n->addr = NULL;
324     }
325 }
326 
327 static void vhost_vdpa_host_notifiers_uninit(struct vhost_dev *dev, int n)
328 {
329     int i;
330 
331     for (i = 0; i < n; i++) {
332         vhost_vdpa_host_notifier_uninit(dev, i);
333     }
334 }
335 
336 static int vhost_vdpa_host_notifier_init(struct vhost_dev *dev, int queue_index)
337 {
338     size_t page_size = qemu_real_host_page_size;
339     struct vhost_vdpa *v = dev->opaque;
340     VirtIODevice *vdev = dev->vdev;
341     VhostVDPAHostNotifier *n;
342     int fd = v->device_fd;
343     void *addr;
344     char *name;
345 
346     vhost_vdpa_host_notifier_uninit(dev, queue_index);
347 
348     n = &v->notifier[queue_index];
349 
350     addr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, fd,
351                 queue_index * page_size);
352     if (addr == MAP_FAILED) {
353         goto err;
354     }
355 
356     name = g_strdup_printf("vhost-vdpa/host-notifier@%p mmaps[%d]",
357                            v, queue_index);
358     memory_region_init_ram_device_ptr(&n->mr, OBJECT(vdev), name,
359                                       page_size, addr);
360     g_free(name);
361 
362     if (virtio_queue_set_host_notifier_mr(vdev, queue_index, &n->mr, true)) {
363         munmap(addr, page_size);
364         goto err;
365     }
366     n->addr = addr;
367 
368     return 0;
369 
370 err:
371     return -1;
372 }
373 
374 static void vhost_vdpa_host_notifiers_init(struct vhost_dev *dev)
375 {
376     int i;
377 
378     for (i = dev->vq_index; i < dev->vq_index + dev->nvqs; i++) {
379         if (vhost_vdpa_host_notifier_init(dev, i)) {
380             goto err;
381         }
382     }
383 
384     return;
385 
386 err:
387     vhost_vdpa_host_notifiers_uninit(dev, i);
388     return;
389 }
390 
391 static int vhost_vdpa_cleanup(struct vhost_dev *dev)
392 {
393     struct vhost_vdpa *v;
394     assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
395     v = dev->opaque;
396     trace_vhost_vdpa_cleanup(dev, v);
397     vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs);
398     memory_listener_unregister(&v->listener);
399 
400     dev->opaque = NULL;
401     return 0;
402 }
403 
404 static int vhost_vdpa_memslots_limit(struct vhost_dev *dev)
405 {
406     trace_vhost_vdpa_memslots_limit(dev, INT_MAX);
407     return INT_MAX;
408 }
409 
410 static int vhost_vdpa_set_mem_table(struct vhost_dev *dev,
411                                     struct vhost_memory *mem)
412 {
413     trace_vhost_vdpa_set_mem_table(dev, mem->nregions, mem->padding);
414     if (trace_event_get_state_backends(TRACE_VHOST_VDPA_SET_MEM_TABLE) &&
415         trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_REGIONS)) {
416         int i;
417         for (i = 0; i < mem->nregions; i++) {
418             trace_vhost_vdpa_dump_regions(dev, i,
419                                           mem->regions[i].guest_phys_addr,
420                                           mem->regions[i].memory_size,
421                                           mem->regions[i].userspace_addr,
422                                           mem->regions[i].flags_padding);
423         }
424     }
425     if (mem->padding) {
426         return -1;
427     }
428 
429     return 0;
430 }
431 
432 static int vhost_vdpa_set_features(struct vhost_dev *dev,
433                                    uint64_t features)
434 {
435     int ret;
436     trace_vhost_vdpa_set_features(dev, features);
437     ret = vhost_vdpa_call(dev, VHOST_SET_FEATURES, &features);
438     uint8_t status = 0;
439     if (ret) {
440         return ret;
441     }
442     vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_FEATURES_OK);
443     vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &status);
444 
445     return !(status & VIRTIO_CONFIG_S_FEATURES_OK);
446 }
447 
448 static int vhost_vdpa_set_backend_cap(struct vhost_dev *dev)
449 {
450     uint64_t features;
451     uint64_t f = 0x1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2 |
452         0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH;
453     int r;
454 
455     if (vhost_vdpa_call(dev, VHOST_GET_BACKEND_FEATURES, &features)) {
456         return -EFAULT;
457     }
458 
459     features &= f;
460     r = vhost_vdpa_call(dev, VHOST_SET_BACKEND_FEATURES, &features);
461     if (r) {
462         return -EFAULT;
463     }
464 
465     dev->backend_cap = features;
466 
467     return 0;
468 }
469 
470 static int vhost_vdpa_get_device_id(struct vhost_dev *dev,
471                                     uint32_t *device_id)
472 {
473     int ret;
474     ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_DEVICE_ID, device_id);
475     trace_vhost_vdpa_get_device_id(dev, *device_id);
476     return ret;
477 }
478 
479 static int vhost_vdpa_reset_device(struct vhost_dev *dev)
480 {
481     int ret;
482     uint8_t status = 0;
483 
484     ret = vhost_vdpa_call(dev, VHOST_VDPA_SET_STATUS, &status);
485     trace_vhost_vdpa_reset_device(dev, status);
486     return ret;
487 }
488 
489 static int vhost_vdpa_get_vq_index(struct vhost_dev *dev, int idx)
490 {
491     assert(idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs);
492 
493     trace_vhost_vdpa_get_vq_index(dev, idx, idx - dev->vq_index);
494     return idx - dev->vq_index;
495 }
496 
497 static int vhost_vdpa_set_vring_ready(struct vhost_dev *dev)
498 {
499     int i;
500     trace_vhost_vdpa_set_vring_ready(dev);
501     for (i = 0; i < dev->nvqs; ++i) {
502         struct vhost_vring_state state = {
503             .index = dev->vq_index + i,
504             .num = 1,
505         };
506         vhost_vdpa_call(dev, VHOST_VDPA_SET_VRING_ENABLE, &state);
507     }
508     return 0;
509 }
510 
511 static void vhost_vdpa_dump_config(struct vhost_dev *dev, const uint8_t *config,
512                                    uint32_t config_len)
513 {
514     int b, len;
515     char line[QEMU_HEXDUMP_LINE_LEN];
516 
517     for (b = 0; b < config_len; b += 16) {
518         len = config_len - b;
519         qemu_hexdump_line(line, b, config, len, false);
520         trace_vhost_vdpa_dump_config(dev, line);
521     }
522 }
523 
524 static int vhost_vdpa_set_config(struct vhost_dev *dev, const uint8_t *data,
525                                    uint32_t offset, uint32_t size,
526                                    uint32_t flags)
527 {
528     struct vhost_vdpa_config *config;
529     int ret;
530     unsigned long config_size = offsetof(struct vhost_vdpa_config, buf);
531 
532     trace_vhost_vdpa_set_config(dev, offset, size, flags);
533     config = g_malloc(size + config_size);
534     config->off = offset;
535     config->len = size;
536     memcpy(config->buf, data, size);
537     if (trace_event_get_state_backends(TRACE_VHOST_VDPA_SET_CONFIG) &&
538         trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_CONFIG)) {
539         vhost_vdpa_dump_config(dev, data, size);
540     }
541     ret = vhost_vdpa_call(dev, VHOST_VDPA_SET_CONFIG, config);
542     g_free(config);
543     return ret;
544 }
545 
546 static int vhost_vdpa_get_config(struct vhost_dev *dev, uint8_t *config,
547                                    uint32_t config_len, Error **errp)
548 {
549     struct vhost_vdpa_config *v_config;
550     unsigned long config_size = offsetof(struct vhost_vdpa_config, buf);
551     int ret;
552 
553     trace_vhost_vdpa_get_config(dev, config, config_len);
554     v_config = g_malloc(config_len + config_size);
555     v_config->len = config_len;
556     v_config->off = 0;
557     ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_CONFIG, v_config);
558     memcpy(config, v_config->buf, config_len);
559     g_free(v_config);
560     if (trace_event_get_state_backends(TRACE_VHOST_VDPA_GET_CONFIG) &&
561         trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_CONFIG)) {
562         vhost_vdpa_dump_config(dev, config, config_len);
563     }
564     return ret;
565  }
566 
567 static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started)
568 {
569     struct vhost_vdpa *v = dev->opaque;
570     trace_vhost_vdpa_dev_start(dev, started);
571     if (started) {
572         uint8_t status = 0;
573         memory_listener_register(&v->listener, &address_space_memory);
574         vhost_vdpa_host_notifiers_init(dev);
575         vhost_vdpa_set_vring_ready(dev);
576         vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK);
577         vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &status);
578 
579         return !(status & VIRTIO_CONFIG_S_DRIVER_OK);
580     } else {
581         vhost_vdpa_reset_device(dev);
582         vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE |
583                                    VIRTIO_CONFIG_S_DRIVER);
584         vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs);
585         memory_listener_unregister(&v->listener);
586 
587         return 0;
588     }
589 }
590 
591 static int vhost_vdpa_set_log_base(struct vhost_dev *dev, uint64_t base,
592                                      struct vhost_log *log)
593 {
594     trace_vhost_vdpa_set_log_base(dev, base, log->size, log->refcnt, log->fd,
595                                   log->log);
596     return vhost_vdpa_call(dev, VHOST_SET_LOG_BASE, &base);
597 }
598 
599 static int vhost_vdpa_set_vring_addr(struct vhost_dev *dev,
600                                        struct vhost_vring_addr *addr)
601 {
602     trace_vhost_vdpa_set_vring_addr(dev, addr->index, addr->flags,
603                                     addr->desc_user_addr, addr->used_user_addr,
604                                     addr->avail_user_addr,
605                                     addr->log_guest_addr);
606     return vhost_vdpa_call(dev, VHOST_SET_VRING_ADDR, addr);
607 }
608 
609 static int vhost_vdpa_set_vring_num(struct vhost_dev *dev,
610                                       struct vhost_vring_state *ring)
611 {
612     trace_vhost_vdpa_set_vring_num(dev, ring->index, ring->num);
613     return vhost_vdpa_call(dev, VHOST_SET_VRING_NUM, ring);
614 }
615 
616 static int vhost_vdpa_set_vring_base(struct vhost_dev *dev,
617                                        struct vhost_vring_state *ring)
618 {
619     trace_vhost_vdpa_set_vring_base(dev, ring->index, ring->num);
620     return vhost_vdpa_call(dev, VHOST_SET_VRING_BASE, ring);
621 }
622 
623 static int vhost_vdpa_get_vring_base(struct vhost_dev *dev,
624                                        struct vhost_vring_state *ring)
625 {
626     int ret;
627 
628     ret = vhost_vdpa_call(dev, VHOST_GET_VRING_BASE, ring);
629     trace_vhost_vdpa_get_vring_base(dev, ring->index, ring->num);
630     return ret;
631 }
632 
633 static int vhost_vdpa_set_vring_kick(struct vhost_dev *dev,
634                                        struct vhost_vring_file *file)
635 {
636     trace_vhost_vdpa_set_vring_kick(dev, file->index, file->fd);
637     return vhost_vdpa_call(dev, VHOST_SET_VRING_KICK, file);
638 }
639 
640 static int vhost_vdpa_set_vring_call(struct vhost_dev *dev,
641                                        struct vhost_vring_file *file)
642 {
643     trace_vhost_vdpa_set_vring_call(dev, file->index, file->fd);
644     return vhost_vdpa_call(dev, VHOST_SET_VRING_CALL, file);
645 }
646 
647 static int vhost_vdpa_get_features(struct vhost_dev *dev,
648                                      uint64_t *features)
649 {
650     int ret;
651 
652     ret = vhost_vdpa_call(dev, VHOST_GET_FEATURES, features);
653     trace_vhost_vdpa_get_features(dev, *features);
654     return ret;
655 }
656 
657 static int vhost_vdpa_set_owner(struct vhost_dev *dev)
658 {
659     trace_vhost_vdpa_set_owner(dev);
660     return vhost_vdpa_call(dev, VHOST_SET_OWNER, NULL);
661 }
662 
663 static int vhost_vdpa_vq_get_addr(struct vhost_dev *dev,
664                     struct vhost_vring_addr *addr, struct vhost_virtqueue *vq)
665 {
666     assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
667     addr->desc_user_addr = (uint64_t)(unsigned long)vq->desc_phys;
668     addr->avail_user_addr = (uint64_t)(unsigned long)vq->avail_phys;
669     addr->used_user_addr = (uint64_t)(unsigned long)vq->used_phys;
670     trace_vhost_vdpa_vq_get_addr(dev, vq, addr->desc_user_addr,
671                                  addr->avail_user_addr, addr->used_user_addr);
672     return 0;
673 }
674 
675 static bool  vhost_vdpa_force_iommu(struct vhost_dev *dev)
676 {
677     return true;
678 }
679 
680 const VhostOps vdpa_ops = {
681         .backend_type = VHOST_BACKEND_TYPE_VDPA,
682         .vhost_backend_init = vhost_vdpa_init,
683         .vhost_backend_cleanup = vhost_vdpa_cleanup,
684         .vhost_set_log_base = vhost_vdpa_set_log_base,
685         .vhost_set_vring_addr = vhost_vdpa_set_vring_addr,
686         .vhost_set_vring_num = vhost_vdpa_set_vring_num,
687         .vhost_set_vring_base = vhost_vdpa_set_vring_base,
688         .vhost_get_vring_base = vhost_vdpa_get_vring_base,
689         .vhost_set_vring_kick = vhost_vdpa_set_vring_kick,
690         .vhost_set_vring_call = vhost_vdpa_set_vring_call,
691         .vhost_get_features = vhost_vdpa_get_features,
692         .vhost_set_backend_cap = vhost_vdpa_set_backend_cap,
693         .vhost_set_owner = vhost_vdpa_set_owner,
694         .vhost_set_vring_endian = NULL,
695         .vhost_backend_memslots_limit = vhost_vdpa_memslots_limit,
696         .vhost_set_mem_table = vhost_vdpa_set_mem_table,
697         .vhost_set_features = vhost_vdpa_set_features,
698         .vhost_reset_device = vhost_vdpa_reset_device,
699         .vhost_get_vq_index = vhost_vdpa_get_vq_index,
700         .vhost_get_config  = vhost_vdpa_get_config,
701         .vhost_set_config = vhost_vdpa_set_config,
702         .vhost_requires_shm_log = NULL,
703         .vhost_migration_done = NULL,
704         .vhost_backend_can_merge = NULL,
705         .vhost_net_set_mtu = NULL,
706         .vhost_set_iotlb_callback = NULL,
707         .vhost_send_device_iotlb_msg = NULL,
708         .vhost_dev_start = vhost_vdpa_dev_start,
709         .vhost_get_device_id = vhost_vdpa_get_device_id,
710         .vhost_vq_get_addr = vhost_vdpa_vq_get_addr,
711         .vhost_force_iommu = vhost_vdpa_force_iommu,
712 };
713