xref: /qemu/hw/virtio/vhost-vdpa.c (revision d0416d487bd561526486c2e43910ea9494855e16)
1 /*
2  * vhost-vdpa
3  *
4  *  Copyright(c) 2017-2018 Intel Corporation.
5  *  Copyright(c) 2020 Red Hat, Inc.
6  *
7  * This work is licensed under the terms of the GNU GPL, version 2 or later.
8  * See the COPYING file in the top-level directory.
9  *
10  */
11 
12 #include "qemu/osdep.h"
13 #include <linux/vhost.h>
14 #include <linux/vfio.h>
15 #include <sys/eventfd.h>
16 #include <sys/ioctl.h>
17 #include "hw/virtio/vhost.h"
18 #include "hw/virtio/vhost-backend.h"
19 #include "hw/virtio/virtio-net.h"
20 #include "hw/virtio/vhost-vdpa.h"
21 #include "exec/address-spaces.h"
22 #include "qemu/main-loop.h"
23 #include "cpu.h"
24 #include "trace.h"
25 #include "qemu-common.h"
26 
27 static bool vhost_vdpa_listener_skipped_section(MemoryRegionSection *section)
28 {
29     return (!memory_region_is_ram(section->mr) &&
30             !memory_region_is_iommu(section->mr)) ||
31            /* vhost-vDPA doesn't allow MMIO to be mapped  */
32             memory_region_is_ram_device(section->mr) ||
33            /*
34             * Sizing an enabled 64-bit BAR can cause spurious mappings to
35             * addresses in the upper part of the 64-bit address space.  These
36             * are never accessed by the CPU and beyond the address width of
37             * some IOMMU hardware.  TODO: VDPA should tell us the IOMMU width.
38             */
39            section->offset_within_address_space & (1ULL << 63);
40 }
41 
42 static int vhost_vdpa_dma_map(struct vhost_vdpa *v, hwaddr iova, hwaddr size,
43                               void *vaddr, bool readonly)
44 {
45     struct vhost_msg_v2 msg = {};
46     int fd = v->device_fd;
47     int ret = 0;
48 
49     msg.type = v->msg_type;
50     msg.iotlb.iova = iova;
51     msg.iotlb.size = size;
52     msg.iotlb.uaddr = (uint64_t)(uintptr_t)vaddr;
53     msg.iotlb.perm = readonly ? VHOST_ACCESS_RO : VHOST_ACCESS_RW;
54     msg.iotlb.type = VHOST_IOTLB_UPDATE;
55 
56    trace_vhost_vdpa_dma_map(v, fd, msg.type, msg.iotlb.iova, msg.iotlb.size,
57                             msg.iotlb.uaddr, msg.iotlb.perm, msg.iotlb.type);
58 
59     if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
60         error_report("failed to write, fd=%d, errno=%d (%s)",
61             fd, errno, strerror(errno));
62         return -EIO ;
63     }
64 
65     return ret;
66 }
67 
68 static int vhost_vdpa_dma_unmap(struct vhost_vdpa *v, hwaddr iova,
69                                 hwaddr size)
70 {
71     struct vhost_msg_v2 msg = {};
72     int fd = v->device_fd;
73     int ret = 0;
74 
75     msg.type = v->msg_type;
76     msg.iotlb.iova = iova;
77     msg.iotlb.size = size;
78     msg.iotlb.type = VHOST_IOTLB_INVALIDATE;
79 
80     trace_vhost_vdpa_dma_unmap(v, fd, msg.type, msg.iotlb.iova,
81                                msg.iotlb.size, msg.iotlb.type);
82 
83     if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
84         error_report("failed to write, fd=%d, errno=%d (%s)",
85             fd, errno, strerror(errno));
86         return -EIO ;
87     }
88 
89     return ret;
90 }
91 
92 static void vhost_vdpa_listener_begin(MemoryListener *listener)
93 {
94     struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener);
95     struct vhost_dev *dev = v->dev;
96     struct vhost_msg_v2 msg = {};
97     int fd = v->device_fd;
98 
99     if (!(dev->backend_cap & (0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH))) {
100         return;
101     }
102 
103     msg.type = v->msg_type;
104     msg.iotlb.type = VHOST_IOTLB_BATCH_BEGIN;
105 
106     if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
107         error_report("failed to write, fd=%d, errno=%d (%s)",
108                      fd, errno, strerror(errno));
109     }
110 }
111 
112 static void vhost_vdpa_listener_commit(MemoryListener *listener)
113 {
114     struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener);
115     struct vhost_dev *dev = v->dev;
116     struct vhost_msg_v2 msg = {};
117     int fd = v->device_fd;
118 
119     if (!(dev->backend_cap & (0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH))) {
120         return;
121     }
122 
123     msg.type = v->msg_type;
124     msg.iotlb.type = VHOST_IOTLB_BATCH_END;
125 
126     if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
127         error_report("failed to write, fd=%d, errno=%d (%s)",
128                      fd, errno, strerror(errno));
129     }
130 }
131 
132 static void vhost_vdpa_listener_region_add(MemoryListener *listener,
133                                            MemoryRegionSection *section)
134 {
135     struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener);
136     hwaddr iova;
137     Int128 llend, llsize;
138     void *vaddr;
139     int ret;
140 
141     if (vhost_vdpa_listener_skipped_section(section)) {
142         return;
143     }
144 
145     if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) !=
146                  (section->offset_within_region & ~TARGET_PAGE_MASK))) {
147         error_report("%s received unaligned region", __func__);
148         return;
149     }
150 
151     iova = TARGET_PAGE_ALIGN(section->offset_within_address_space);
152     llend = int128_make64(section->offset_within_address_space);
153     llend = int128_add(llend, section->size);
154     llend = int128_and(llend, int128_exts64(TARGET_PAGE_MASK));
155 
156     if (int128_ge(int128_make64(iova), llend)) {
157         return;
158     }
159 
160     memory_region_ref(section->mr);
161 
162     /* Here we assume that memory_region_is_ram(section->mr)==true */
163 
164     vaddr = memory_region_get_ram_ptr(section->mr) +
165             section->offset_within_region +
166             (iova - section->offset_within_address_space);
167 
168     trace_vhost_vdpa_listener_region_add(v, iova, int128_get64(llend),
169                                          vaddr, section->readonly);
170 
171     llsize = int128_sub(llend, int128_make64(iova));
172 
173     ret = vhost_vdpa_dma_map(v, iova, int128_get64(llsize),
174                              vaddr, section->readonly);
175     if (ret) {
176         error_report("vhost vdpa map fail!");
177         goto fail;
178     }
179 
180     return;
181 
182 fail:
183     /*
184      * On the initfn path, store the first error in the container so we
185      * can gracefully fail.  Runtime, there's not much we can do other
186      * than throw a hardware error.
187      */
188     error_report("vhost-vdpa: DMA mapping failed, unable to continue");
189     return;
190 
191 }
192 
193 static void vhost_vdpa_listener_region_del(MemoryListener *listener,
194                                            MemoryRegionSection *section)
195 {
196     struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener);
197     hwaddr iova;
198     Int128 llend, llsize;
199     int ret;
200 
201     if (vhost_vdpa_listener_skipped_section(section)) {
202         return;
203     }
204 
205     if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) !=
206                  (section->offset_within_region & ~TARGET_PAGE_MASK))) {
207         error_report("%s received unaligned region", __func__);
208         return;
209     }
210 
211     iova = TARGET_PAGE_ALIGN(section->offset_within_address_space);
212     llend = int128_make64(section->offset_within_address_space);
213     llend = int128_add(llend, section->size);
214     llend = int128_and(llend, int128_exts64(TARGET_PAGE_MASK));
215 
216     trace_vhost_vdpa_listener_region_del(v, iova, int128_get64(llend));
217 
218     if (int128_ge(int128_make64(iova), llend)) {
219         return;
220     }
221 
222     llsize = int128_sub(llend, int128_make64(iova));
223 
224     ret = vhost_vdpa_dma_unmap(v, iova, int128_get64(llsize));
225     if (ret) {
226         error_report("vhost_vdpa dma unmap error!");
227     }
228 
229     memory_region_unref(section->mr);
230 }
231 /*
232  * IOTLB API is used by vhost-vpda which requires incremental updating
233  * of the mapping. So we can not use generic vhost memory listener which
234  * depends on the addnop().
235  */
236 static const MemoryListener vhost_vdpa_memory_listener = {
237     .begin = vhost_vdpa_listener_begin,
238     .commit = vhost_vdpa_listener_commit,
239     .region_add = vhost_vdpa_listener_region_add,
240     .region_del = vhost_vdpa_listener_region_del,
241 };
242 
243 static int vhost_vdpa_call(struct vhost_dev *dev, unsigned long int request,
244                              void *arg)
245 {
246     struct vhost_vdpa *v = dev->opaque;
247     int fd = v->device_fd;
248 
249     assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
250 
251     return ioctl(fd, request, arg);
252 }
253 
254 static void vhost_vdpa_add_status(struct vhost_dev *dev, uint8_t status)
255 {
256     uint8_t s;
257 
258     trace_vhost_vdpa_add_status(dev, status);
259     if (vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &s)) {
260         return;
261     }
262 
263     s |= status;
264 
265     vhost_vdpa_call(dev, VHOST_VDPA_SET_STATUS, &s);
266 }
267 
268 static int vhost_vdpa_init(struct vhost_dev *dev, void *opaque)
269 {
270     struct vhost_vdpa *v;
271     uint64_t features;
272     assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
273     trace_vhost_vdpa_init(dev, opaque);
274 
275     v = opaque;
276     v->dev = dev;
277     dev->opaque =  opaque ;
278     vhost_vdpa_call(dev, VHOST_GET_FEATURES, &features);
279     dev->backend_features = features;
280     v->listener = vhost_vdpa_memory_listener;
281     v->msg_type = VHOST_IOTLB_MSG_V2;
282 
283     vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE |
284                                VIRTIO_CONFIG_S_DRIVER);
285 
286     return 0;
287 }
288 
289 static void vhost_vdpa_host_notifier_uninit(struct vhost_dev *dev,
290                                             int queue_index)
291 {
292     size_t page_size = qemu_real_host_page_size;
293     struct vhost_vdpa *v = dev->opaque;
294     VirtIODevice *vdev = dev->vdev;
295     VhostVDPAHostNotifier *n;
296 
297     n = &v->notifier[queue_index];
298 
299     if (n->addr) {
300         virtio_queue_set_host_notifier_mr(vdev, queue_index, &n->mr, false);
301         object_unparent(OBJECT(&n->mr));
302         munmap(n->addr, page_size);
303         n->addr = NULL;
304     }
305 }
306 
307 static void vhost_vdpa_host_notifiers_uninit(struct vhost_dev *dev, int n)
308 {
309     int i;
310 
311     for (i = 0; i < n; i++) {
312         vhost_vdpa_host_notifier_uninit(dev, i);
313     }
314 }
315 
316 static int vhost_vdpa_host_notifier_init(struct vhost_dev *dev, int queue_index)
317 {
318     size_t page_size = qemu_real_host_page_size;
319     struct vhost_vdpa *v = dev->opaque;
320     VirtIODevice *vdev = dev->vdev;
321     VhostVDPAHostNotifier *n;
322     int fd = v->device_fd;
323     void *addr;
324     char *name;
325 
326     vhost_vdpa_host_notifier_uninit(dev, queue_index);
327 
328     n = &v->notifier[queue_index];
329 
330     addr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, fd,
331                 queue_index * page_size);
332     if (addr == MAP_FAILED) {
333         goto err;
334     }
335 
336     name = g_strdup_printf("vhost-vdpa/host-notifier@%p mmaps[%d]",
337                            v, queue_index);
338     memory_region_init_ram_device_ptr(&n->mr, OBJECT(vdev), name,
339                                       page_size, addr);
340     g_free(name);
341 
342     if (virtio_queue_set_host_notifier_mr(vdev, queue_index, &n->mr, true)) {
343         munmap(addr, page_size);
344         goto err;
345     }
346     n->addr = addr;
347 
348     return 0;
349 
350 err:
351     return -1;
352 }
353 
354 static void vhost_vdpa_host_notifiers_init(struct vhost_dev *dev)
355 {
356     int i;
357 
358     for (i = dev->vq_index; i < dev->vq_index + dev->nvqs; i++) {
359         if (vhost_vdpa_host_notifier_init(dev, i)) {
360             goto err;
361         }
362     }
363 
364     return;
365 
366 err:
367     vhost_vdpa_host_notifiers_uninit(dev, i);
368     return;
369 }
370 
371 static int vhost_vdpa_cleanup(struct vhost_dev *dev)
372 {
373     struct vhost_vdpa *v;
374     assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
375     v = dev->opaque;
376     trace_vhost_vdpa_cleanup(dev, v);
377     vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs);
378     memory_listener_unregister(&v->listener);
379 
380     dev->opaque = NULL;
381     return 0;
382 }
383 
384 static int vhost_vdpa_memslots_limit(struct vhost_dev *dev)
385 {
386     trace_vhost_vdpa_memslots_limit(dev, INT_MAX);
387     return INT_MAX;
388 }
389 
390 static int vhost_vdpa_set_mem_table(struct vhost_dev *dev,
391                                     struct vhost_memory *mem)
392 {
393     trace_vhost_vdpa_set_mem_table(dev, mem->nregions, mem->padding);
394     if (trace_event_get_state_backends(TRACE_VHOST_VDPA_SET_MEM_TABLE) &&
395         trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_REGIONS)) {
396         int i;
397         for (i = 0; i < mem->nregions; i++) {
398             trace_vhost_vdpa_dump_regions(dev, i,
399                                           mem->regions[i].guest_phys_addr,
400                                           mem->regions[i].memory_size,
401                                           mem->regions[i].userspace_addr,
402                                           mem->regions[i].flags_padding);
403         }
404     }
405     if (mem->padding) {
406         return -1;
407     }
408 
409     return 0;
410 }
411 
412 static int vhost_vdpa_set_features(struct vhost_dev *dev,
413                                    uint64_t features)
414 {
415     int ret;
416     trace_vhost_vdpa_set_features(dev, features);
417     ret = vhost_vdpa_call(dev, VHOST_SET_FEATURES, &features);
418     uint8_t status = 0;
419     if (ret) {
420         return ret;
421     }
422     vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_FEATURES_OK);
423     vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &status);
424 
425     return !(status & VIRTIO_CONFIG_S_FEATURES_OK);
426 }
427 
428 static int vhost_vdpa_set_backend_cap(struct vhost_dev *dev)
429 {
430     uint64_t features;
431     uint64_t f = 0x1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2 |
432         0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH;
433     int r;
434 
435     if (vhost_vdpa_call(dev, VHOST_GET_BACKEND_FEATURES, &features)) {
436         return 0;
437     }
438 
439     features &= f;
440     r = vhost_vdpa_call(dev, VHOST_SET_BACKEND_FEATURES, &features);
441     if (r) {
442         return 0;
443     }
444 
445     dev->backend_cap = features;
446 
447     return 0;
448 }
449 
450 static int vhost_vdpa_get_device_id(struct vhost_dev *dev,
451                                     uint32_t *device_id)
452 {
453     int ret;
454     ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_DEVICE_ID, device_id);
455     trace_vhost_vdpa_get_device_id(dev, *device_id);
456     return ret;
457 }
458 
459 static int vhost_vdpa_reset_device(struct vhost_dev *dev)
460 {
461     int ret;
462     uint8_t status = 0;
463 
464     ret = vhost_vdpa_call(dev, VHOST_VDPA_SET_STATUS, &status);
465     trace_vhost_vdpa_reset_device(dev, status);
466     return ret;
467 }
468 
469 static int vhost_vdpa_get_vq_index(struct vhost_dev *dev, int idx)
470 {
471     assert(idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs);
472 
473     trace_vhost_vdpa_get_vq_index(dev, idx, idx - dev->vq_index);
474     return idx - dev->vq_index;
475 }
476 
477 static int vhost_vdpa_set_vring_ready(struct vhost_dev *dev)
478 {
479     int i;
480     trace_vhost_vdpa_set_vring_ready(dev);
481     for (i = 0; i < dev->nvqs; ++i) {
482         struct vhost_vring_state state = {
483             .index = dev->vq_index + i,
484             .num = 1,
485         };
486         vhost_vdpa_call(dev, VHOST_VDPA_SET_VRING_ENABLE, &state);
487     }
488     return 0;
489 }
490 
491 static void vhost_vdpa_dump_config(struct vhost_dev *dev, const uint8_t *config,
492                                    uint32_t config_len)
493 {
494     int b, len;
495     char line[QEMU_HEXDUMP_LINE_LEN];
496 
497     for (b = 0; b < config_len; b += 16) {
498         len = config_len - b;
499         qemu_hexdump_line(line, b, config, len, false);
500         trace_vhost_vdpa_dump_config(dev, line);
501     }
502 }
503 
504 static int vhost_vdpa_set_config(struct vhost_dev *dev, const uint8_t *data,
505                                    uint32_t offset, uint32_t size,
506                                    uint32_t flags)
507 {
508     struct vhost_vdpa_config *config;
509     int ret;
510     unsigned long config_size = offsetof(struct vhost_vdpa_config, buf);
511 
512     trace_vhost_vdpa_set_config(dev, offset, size, flags);
513     config = g_malloc(size + config_size);
514     config->off = offset;
515     config->len = size;
516     memcpy(config->buf, data, size);
517     if (trace_event_get_state_backends(TRACE_VHOST_VDPA_SET_CONFIG) &&
518         trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_CONFIG)) {
519         vhost_vdpa_dump_config(dev, data, size);
520     }
521     ret = vhost_vdpa_call(dev, VHOST_VDPA_SET_CONFIG, config);
522     g_free(config);
523     return ret;
524 }
525 
526 static int vhost_vdpa_get_config(struct vhost_dev *dev, uint8_t *config,
527                                    uint32_t config_len)
528 {
529     struct vhost_vdpa_config *v_config;
530     unsigned long config_size = offsetof(struct vhost_vdpa_config, buf);
531     int ret;
532 
533     trace_vhost_vdpa_get_config(dev, config, config_len);
534     v_config = g_malloc(config_len + config_size);
535     v_config->len = config_len;
536     v_config->off = 0;
537     ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_CONFIG, v_config);
538     memcpy(config, v_config->buf, config_len);
539     g_free(v_config);
540     if (trace_event_get_state_backends(TRACE_VHOST_VDPA_GET_CONFIG) &&
541         trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_CONFIG)) {
542         vhost_vdpa_dump_config(dev, config, config_len);
543     }
544     return ret;
545  }
546 
547 static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started)
548 {
549     struct vhost_vdpa *v = dev->opaque;
550     trace_vhost_vdpa_dev_start(dev, started);
551     if (started) {
552         uint8_t status = 0;
553         memory_listener_register(&v->listener, &address_space_memory);
554         vhost_vdpa_host_notifiers_init(dev);
555         vhost_vdpa_set_vring_ready(dev);
556         vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK);
557         vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &status);
558 
559         return !(status & VIRTIO_CONFIG_S_DRIVER_OK);
560     } else {
561         vhost_vdpa_reset_device(dev);
562         vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE |
563                                    VIRTIO_CONFIG_S_DRIVER);
564         vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs);
565         memory_listener_unregister(&v->listener);
566 
567         return 0;
568     }
569 }
570 
571 static int vhost_vdpa_set_log_base(struct vhost_dev *dev, uint64_t base,
572                                      struct vhost_log *log)
573 {
574     trace_vhost_vdpa_set_log_base(dev, base, log->size, log->refcnt, log->fd,
575                                   log->log);
576     return vhost_vdpa_call(dev, VHOST_SET_LOG_BASE, &base);
577 }
578 
579 static int vhost_vdpa_set_vring_addr(struct vhost_dev *dev,
580                                        struct vhost_vring_addr *addr)
581 {
582     trace_vhost_vdpa_set_vring_addr(dev, addr->index, addr->flags,
583                                     addr->desc_user_addr, addr->used_user_addr,
584                                     addr->avail_user_addr,
585                                     addr->log_guest_addr);
586     return vhost_vdpa_call(dev, VHOST_SET_VRING_ADDR, addr);
587 }
588 
589 static int vhost_vdpa_set_vring_num(struct vhost_dev *dev,
590                                       struct vhost_vring_state *ring)
591 {
592     trace_vhost_vdpa_set_vring_num(dev, ring->index, ring->num);
593     return vhost_vdpa_call(dev, VHOST_SET_VRING_NUM, ring);
594 }
595 
596 static int vhost_vdpa_set_vring_base(struct vhost_dev *dev,
597                                        struct vhost_vring_state *ring)
598 {
599     trace_vhost_vdpa_set_vring_base(dev, ring->index, ring->num);
600     return vhost_vdpa_call(dev, VHOST_SET_VRING_BASE, ring);
601 }
602 
603 static int vhost_vdpa_get_vring_base(struct vhost_dev *dev,
604                                        struct vhost_vring_state *ring)
605 {
606     int ret;
607 
608     ret = vhost_vdpa_call(dev, VHOST_GET_VRING_BASE, ring);
609     trace_vhost_vdpa_get_vring_base(dev, ring->index, ring->num);
610     return ret;
611 }
612 
613 static int vhost_vdpa_set_vring_kick(struct vhost_dev *dev,
614                                        struct vhost_vring_file *file)
615 {
616     trace_vhost_vdpa_set_vring_kick(dev, file->index, file->fd);
617     return vhost_vdpa_call(dev, VHOST_SET_VRING_KICK, file);
618 }
619 
620 static int vhost_vdpa_set_vring_call(struct vhost_dev *dev,
621                                        struct vhost_vring_file *file)
622 {
623     trace_vhost_vdpa_set_vring_call(dev, file->index, file->fd);
624     return vhost_vdpa_call(dev, VHOST_SET_VRING_CALL, file);
625 }
626 
627 static int vhost_vdpa_get_features(struct vhost_dev *dev,
628                                      uint64_t *features)
629 {
630     int ret;
631 
632     ret = vhost_vdpa_call(dev, VHOST_GET_FEATURES, features);
633     trace_vhost_vdpa_get_features(dev, *features);
634     return ret;
635 }
636 
637 static int vhost_vdpa_set_owner(struct vhost_dev *dev)
638 {
639     trace_vhost_vdpa_set_owner(dev);
640     return vhost_vdpa_call(dev, VHOST_SET_OWNER, NULL);
641 }
642 
643 static int vhost_vdpa_vq_get_addr(struct vhost_dev *dev,
644                     struct vhost_vring_addr *addr, struct vhost_virtqueue *vq)
645 {
646     assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
647     addr->desc_user_addr = (uint64_t)(unsigned long)vq->desc_phys;
648     addr->avail_user_addr = (uint64_t)(unsigned long)vq->avail_phys;
649     addr->used_user_addr = (uint64_t)(unsigned long)vq->used_phys;
650     trace_vhost_vdpa_vq_get_addr(dev, vq, addr->desc_user_addr,
651                                  addr->avail_user_addr, addr->used_user_addr);
652     return 0;
653 }
654 
655 static bool  vhost_vdpa_force_iommu(struct vhost_dev *dev)
656 {
657     return true;
658 }
659 
660 const VhostOps vdpa_ops = {
661         .backend_type = VHOST_BACKEND_TYPE_VDPA,
662         .vhost_backend_init = vhost_vdpa_init,
663         .vhost_backend_cleanup = vhost_vdpa_cleanup,
664         .vhost_set_log_base = vhost_vdpa_set_log_base,
665         .vhost_set_vring_addr = vhost_vdpa_set_vring_addr,
666         .vhost_set_vring_num = vhost_vdpa_set_vring_num,
667         .vhost_set_vring_base = vhost_vdpa_set_vring_base,
668         .vhost_get_vring_base = vhost_vdpa_get_vring_base,
669         .vhost_set_vring_kick = vhost_vdpa_set_vring_kick,
670         .vhost_set_vring_call = vhost_vdpa_set_vring_call,
671         .vhost_get_features = vhost_vdpa_get_features,
672         .vhost_set_backend_cap = vhost_vdpa_set_backend_cap,
673         .vhost_set_owner = vhost_vdpa_set_owner,
674         .vhost_set_vring_endian = NULL,
675         .vhost_backend_memslots_limit = vhost_vdpa_memslots_limit,
676         .vhost_set_mem_table = vhost_vdpa_set_mem_table,
677         .vhost_set_features = vhost_vdpa_set_features,
678         .vhost_reset_device = vhost_vdpa_reset_device,
679         .vhost_get_vq_index = vhost_vdpa_get_vq_index,
680         .vhost_get_config  = vhost_vdpa_get_config,
681         .vhost_set_config = vhost_vdpa_set_config,
682         .vhost_requires_shm_log = NULL,
683         .vhost_migration_done = NULL,
684         .vhost_backend_can_merge = NULL,
685         .vhost_net_set_mtu = NULL,
686         .vhost_set_iotlb_callback = NULL,
687         .vhost_send_device_iotlb_msg = NULL,
688         .vhost_dev_start = vhost_vdpa_dev_start,
689         .vhost_get_device_id = vhost_vdpa_get_device_id,
690         .vhost_vq_get_addr = vhost_vdpa_vq_get_addr,
691         .vhost_force_iommu = vhost_vdpa_force_iommu,
692 };
693