1 /* 2 * vhost-vdpa 3 * 4 * Copyright(c) 2017-2018 Intel Corporation. 5 * Copyright(c) 2020 Red Hat, Inc. 6 * 7 * This work is licensed under the terms of the GNU GPL, version 2 or later. 8 * See the COPYING file in the top-level directory. 9 * 10 */ 11 12 #include "qemu/osdep.h" 13 #include <linux/vhost.h> 14 #include <linux/vfio.h> 15 #include <sys/eventfd.h> 16 #include <sys/ioctl.h> 17 #include "hw/virtio/vhost.h" 18 #include "hw/virtio/vhost-backend.h" 19 #include "hw/virtio/virtio-net.h" 20 #include "hw/virtio/vhost-shadow-virtqueue.h" 21 #include "hw/virtio/vhost-vdpa.h" 22 #include "exec/address-spaces.h" 23 #include "migration/blocker.h" 24 #include "qemu/cutils.h" 25 #include "qemu/main-loop.h" 26 #include "cpu.h" 27 #include "trace.h" 28 #include "qapi/error.h" 29 30 /* 31 * Return one past the end of the end of section. Be careful with uint64_t 32 * conversions! 33 */ 34 static Int128 vhost_vdpa_section_end(const MemoryRegionSection *section) 35 { 36 Int128 llend = int128_make64(section->offset_within_address_space); 37 llend = int128_add(llend, section->size); 38 llend = int128_and(llend, int128_exts64(TARGET_PAGE_MASK)); 39 40 return llend; 41 } 42 43 static bool vhost_vdpa_listener_skipped_section(MemoryRegionSection *section, 44 uint64_t iova_min, 45 uint64_t iova_max) 46 { 47 Int128 llend; 48 49 if ((!memory_region_is_ram(section->mr) && 50 !memory_region_is_iommu(section->mr)) || 51 memory_region_is_protected(section->mr) || 52 /* vhost-vDPA doesn't allow MMIO to be mapped */ 53 memory_region_is_ram_device(section->mr)) { 54 return true; 55 } 56 57 if (section->offset_within_address_space < iova_min) { 58 error_report("RAM section out of device range (min=0x%" PRIx64 59 ", addr=0x%" HWADDR_PRIx ")", 60 iova_min, section->offset_within_address_space); 61 return true; 62 } 63 64 llend = vhost_vdpa_section_end(section); 65 if (int128_gt(llend, int128_make64(iova_max))) { 66 error_report("RAM section out of device range (max=0x%" PRIx64 67 ", end addr=0x%" PRIx64 ")", 68 iova_max, int128_get64(llend)); 69 return true; 70 } 71 72 return false; 73 } 74 75 /* 76 * The caller must set asid = 0 if the device does not support asid. 77 * This is not an ABI break since it is set to 0 by the initializer anyway. 78 */ 79 int vhost_vdpa_dma_map(struct vhost_vdpa *v, uint32_t asid, hwaddr iova, 80 hwaddr size, void *vaddr, bool readonly) 81 { 82 struct vhost_msg_v2 msg = {}; 83 int fd = v->device_fd; 84 int ret = 0; 85 86 msg.type = v->msg_type; 87 msg.asid = asid; 88 msg.iotlb.iova = iova; 89 msg.iotlb.size = size; 90 msg.iotlb.uaddr = (uint64_t)(uintptr_t)vaddr; 91 msg.iotlb.perm = readonly ? VHOST_ACCESS_RO : VHOST_ACCESS_RW; 92 msg.iotlb.type = VHOST_IOTLB_UPDATE; 93 94 trace_vhost_vdpa_dma_map(v, fd, msg.type, msg.asid, msg.iotlb.iova, 95 msg.iotlb.size, msg.iotlb.uaddr, msg.iotlb.perm, 96 msg.iotlb.type); 97 98 if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) { 99 error_report("failed to write, fd=%d, errno=%d (%s)", 100 fd, errno, strerror(errno)); 101 return -EIO ; 102 } 103 104 return ret; 105 } 106 107 /* 108 * The caller must set asid = 0 if the device does not support asid. 109 * This is not an ABI break since it is set to 0 by the initializer anyway. 110 */ 111 int vhost_vdpa_dma_unmap(struct vhost_vdpa *v, uint32_t asid, hwaddr iova, 112 hwaddr size) 113 { 114 struct vhost_msg_v2 msg = {}; 115 int fd = v->device_fd; 116 int ret = 0; 117 118 msg.type = v->msg_type; 119 msg.asid = asid; 120 msg.iotlb.iova = iova; 121 msg.iotlb.size = size; 122 msg.iotlb.type = VHOST_IOTLB_INVALIDATE; 123 124 trace_vhost_vdpa_dma_unmap(v, fd, msg.type, msg.asid, msg.iotlb.iova, 125 msg.iotlb.size, msg.iotlb.type); 126 127 if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) { 128 error_report("failed to write, fd=%d, errno=%d (%s)", 129 fd, errno, strerror(errno)); 130 return -EIO ; 131 } 132 133 return ret; 134 } 135 136 static void vhost_vdpa_listener_begin_batch(struct vhost_vdpa *v) 137 { 138 int fd = v->device_fd; 139 struct vhost_msg_v2 msg = { 140 .type = v->msg_type, 141 .iotlb.type = VHOST_IOTLB_BATCH_BEGIN, 142 }; 143 144 trace_vhost_vdpa_listener_begin_batch(v, fd, msg.type, msg.iotlb.type); 145 if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) { 146 error_report("failed to write, fd=%d, errno=%d (%s)", 147 fd, errno, strerror(errno)); 148 } 149 } 150 151 static void vhost_vdpa_iotlb_batch_begin_once(struct vhost_vdpa *v) 152 { 153 if (v->dev->backend_cap & (0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH) && 154 !v->iotlb_batch_begin_sent) { 155 vhost_vdpa_listener_begin_batch(v); 156 } 157 158 v->iotlb_batch_begin_sent = true; 159 } 160 161 static void vhost_vdpa_listener_commit(MemoryListener *listener) 162 { 163 struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener); 164 struct vhost_dev *dev = v->dev; 165 struct vhost_msg_v2 msg = {}; 166 int fd = v->device_fd; 167 168 if (!(dev->backend_cap & (0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH))) { 169 return; 170 } 171 172 if (!v->iotlb_batch_begin_sent) { 173 return; 174 } 175 176 msg.type = v->msg_type; 177 msg.iotlb.type = VHOST_IOTLB_BATCH_END; 178 179 trace_vhost_vdpa_listener_commit(v, fd, msg.type, msg.iotlb.type); 180 if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) { 181 error_report("failed to write, fd=%d, errno=%d (%s)", 182 fd, errno, strerror(errno)); 183 } 184 185 v->iotlb_batch_begin_sent = false; 186 } 187 188 static void vhost_vdpa_listener_region_add(MemoryListener *listener, 189 MemoryRegionSection *section) 190 { 191 DMAMap mem_region = {}; 192 struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener); 193 hwaddr iova; 194 Int128 llend, llsize; 195 void *vaddr; 196 int ret; 197 198 if (vhost_vdpa_listener_skipped_section(section, v->iova_range.first, 199 v->iova_range.last)) { 200 return; 201 } 202 203 if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) != 204 (section->offset_within_region & ~TARGET_PAGE_MASK))) { 205 error_report("%s received unaligned region", __func__); 206 return; 207 } 208 209 iova = TARGET_PAGE_ALIGN(section->offset_within_address_space); 210 llend = vhost_vdpa_section_end(section); 211 if (int128_ge(int128_make64(iova), llend)) { 212 return; 213 } 214 215 memory_region_ref(section->mr); 216 217 /* Here we assume that memory_region_is_ram(section->mr)==true */ 218 219 vaddr = memory_region_get_ram_ptr(section->mr) + 220 section->offset_within_region + 221 (iova - section->offset_within_address_space); 222 223 trace_vhost_vdpa_listener_region_add(v, iova, int128_get64(llend), 224 vaddr, section->readonly); 225 226 llsize = int128_sub(llend, int128_make64(iova)); 227 if (v->shadow_data) { 228 int r; 229 230 mem_region.translated_addr = (hwaddr)(uintptr_t)vaddr, 231 mem_region.size = int128_get64(llsize) - 1, 232 mem_region.perm = IOMMU_ACCESS_FLAG(true, section->readonly), 233 234 r = vhost_iova_tree_map_alloc(v->iova_tree, &mem_region); 235 if (unlikely(r != IOVA_OK)) { 236 error_report("Can't allocate a mapping (%d)", r); 237 goto fail; 238 } 239 240 iova = mem_region.iova; 241 } 242 243 vhost_vdpa_iotlb_batch_begin_once(v); 244 ret = vhost_vdpa_dma_map(v, VHOST_VDPA_GUEST_PA_ASID, iova, 245 int128_get64(llsize), vaddr, section->readonly); 246 if (ret) { 247 error_report("vhost vdpa map fail!"); 248 goto fail_map; 249 } 250 251 return; 252 253 fail_map: 254 if (v->shadow_data) { 255 vhost_iova_tree_remove(v->iova_tree, mem_region); 256 } 257 258 fail: 259 /* 260 * On the initfn path, store the first error in the container so we 261 * can gracefully fail. Runtime, there's not much we can do other 262 * than throw a hardware error. 263 */ 264 error_report("vhost-vdpa: DMA mapping failed, unable to continue"); 265 return; 266 267 } 268 269 static void vhost_vdpa_listener_region_del(MemoryListener *listener, 270 MemoryRegionSection *section) 271 { 272 struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener); 273 hwaddr iova; 274 Int128 llend, llsize; 275 int ret; 276 277 if (vhost_vdpa_listener_skipped_section(section, v->iova_range.first, 278 v->iova_range.last)) { 279 return; 280 } 281 282 if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) != 283 (section->offset_within_region & ~TARGET_PAGE_MASK))) { 284 error_report("%s received unaligned region", __func__); 285 return; 286 } 287 288 iova = TARGET_PAGE_ALIGN(section->offset_within_address_space); 289 llend = vhost_vdpa_section_end(section); 290 291 trace_vhost_vdpa_listener_region_del(v, iova, 292 int128_get64(int128_sub(llend, int128_one()))); 293 294 if (int128_ge(int128_make64(iova), llend)) { 295 return; 296 } 297 298 llsize = int128_sub(llend, int128_make64(iova)); 299 300 if (v->shadow_data) { 301 const DMAMap *result; 302 const void *vaddr = memory_region_get_ram_ptr(section->mr) + 303 section->offset_within_region + 304 (iova - section->offset_within_address_space); 305 DMAMap mem_region = { 306 .translated_addr = (hwaddr)(uintptr_t)vaddr, 307 .size = int128_get64(llsize) - 1, 308 }; 309 310 result = vhost_iova_tree_find_iova(v->iova_tree, &mem_region); 311 if (!result) { 312 /* The memory listener map wasn't mapped */ 313 return; 314 } 315 iova = result->iova; 316 vhost_iova_tree_remove(v->iova_tree, *result); 317 } 318 vhost_vdpa_iotlb_batch_begin_once(v); 319 /* 320 * The unmap ioctl doesn't accept a full 64-bit. need to check it 321 */ 322 if (int128_eq(llsize, int128_2_64())) { 323 llsize = int128_rshift(llsize, 1); 324 ret = vhost_vdpa_dma_unmap(v, VHOST_VDPA_GUEST_PA_ASID, iova, 325 int128_get64(llsize)); 326 327 if (ret) { 328 error_report("vhost_vdpa_dma_unmap(%p, 0x%" HWADDR_PRIx ", " 329 "0x%" HWADDR_PRIx ") = %d (%m)", 330 v, iova, int128_get64(llsize), ret); 331 } 332 iova += int128_get64(llsize); 333 } 334 ret = vhost_vdpa_dma_unmap(v, VHOST_VDPA_GUEST_PA_ASID, iova, 335 int128_get64(llsize)); 336 337 if (ret) { 338 error_report("vhost_vdpa_dma_unmap(%p, 0x%" HWADDR_PRIx ", " 339 "0x%" HWADDR_PRIx ") = %d (%m)", 340 v, iova, int128_get64(llsize), ret); 341 } 342 343 memory_region_unref(section->mr); 344 } 345 /* 346 * IOTLB API is used by vhost-vdpa which requires incremental updating 347 * of the mapping. So we can not use generic vhost memory listener which 348 * depends on the addnop(). 349 */ 350 static const MemoryListener vhost_vdpa_memory_listener = { 351 .name = "vhost-vdpa", 352 .commit = vhost_vdpa_listener_commit, 353 .region_add = vhost_vdpa_listener_region_add, 354 .region_del = vhost_vdpa_listener_region_del, 355 }; 356 357 static int vhost_vdpa_call(struct vhost_dev *dev, unsigned long int request, 358 void *arg) 359 { 360 struct vhost_vdpa *v = dev->opaque; 361 int fd = v->device_fd; 362 int ret; 363 364 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA); 365 366 ret = ioctl(fd, request, arg); 367 return ret < 0 ? -errno : ret; 368 } 369 370 static int vhost_vdpa_add_status(struct vhost_dev *dev, uint8_t status) 371 { 372 uint8_t s; 373 int ret; 374 375 trace_vhost_vdpa_add_status(dev, status); 376 ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &s); 377 if (ret < 0) { 378 return ret; 379 } 380 381 s |= status; 382 383 ret = vhost_vdpa_call(dev, VHOST_VDPA_SET_STATUS, &s); 384 if (ret < 0) { 385 return ret; 386 } 387 388 ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &s); 389 if (ret < 0) { 390 return ret; 391 } 392 393 if (!(s & status)) { 394 return -EIO; 395 } 396 397 return 0; 398 } 399 400 int vhost_vdpa_get_iova_range(int fd, struct vhost_vdpa_iova_range *iova_range) 401 { 402 int ret = ioctl(fd, VHOST_VDPA_GET_IOVA_RANGE, iova_range); 403 404 return ret < 0 ? -errno : 0; 405 } 406 407 /* 408 * The use of this function is for requests that only need to be 409 * applied once. Typically such request occurs at the beginning 410 * of operation, and before setting up queues. It should not be 411 * used for request that performs operation until all queues are 412 * set, which would need to check dev->vq_index_end instead. 413 */ 414 static bool vhost_vdpa_first_dev(struct vhost_dev *dev) 415 { 416 struct vhost_vdpa *v = dev->opaque; 417 418 return v->index == 0; 419 } 420 421 static int vhost_vdpa_get_dev_features(struct vhost_dev *dev, 422 uint64_t *features) 423 { 424 int ret; 425 426 ret = vhost_vdpa_call(dev, VHOST_GET_FEATURES, features); 427 trace_vhost_vdpa_get_features(dev, *features); 428 return ret; 429 } 430 431 static void vhost_vdpa_init_svq(struct vhost_dev *hdev, struct vhost_vdpa *v) 432 { 433 g_autoptr(GPtrArray) shadow_vqs = NULL; 434 435 shadow_vqs = g_ptr_array_new_full(hdev->nvqs, vhost_svq_free); 436 for (unsigned n = 0; n < hdev->nvqs; ++n) { 437 VhostShadowVirtqueue *svq; 438 439 svq = vhost_svq_new(v->shadow_vq_ops, v->shadow_vq_ops_opaque); 440 g_ptr_array_add(shadow_vqs, svq); 441 } 442 443 v->shadow_vqs = g_steal_pointer(&shadow_vqs); 444 } 445 446 static int vhost_vdpa_init(struct vhost_dev *dev, void *opaque, Error **errp) 447 { 448 struct vhost_vdpa *v; 449 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA); 450 trace_vhost_vdpa_init(dev, opaque); 451 int ret; 452 453 v = opaque; 454 v->dev = dev; 455 dev->opaque = opaque ; 456 v->listener = vhost_vdpa_memory_listener; 457 v->msg_type = VHOST_IOTLB_MSG_V2; 458 vhost_vdpa_init_svq(dev, v); 459 460 error_propagate(&dev->migration_blocker, v->migration_blocker); 461 if (!vhost_vdpa_first_dev(dev)) { 462 return 0; 463 } 464 465 /* 466 * If dev->shadow_vqs_enabled at initialization that means the device has 467 * been started with x-svq=on, so don't block migration 468 */ 469 if (dev->migration_blocker == NULL && !v->shadow_vqs_enabled) { 470 /* We don't have dev->features yet */ 471 uint64_t features; 472 ret = vhost_vdpa_get_dev_features(dev, &features); 473 if (unlikely(ret)) { 474 error_setg_errno(errp, -ret, "Could not get device features"); 475 return ret; 476 } 477 vhost_svq_valid_features(features, &dev->migration_blocker); 478 } 479 480 /* 481 * Similar to VFIO, we end up pinning all guest memory and have to 482 * disable discarding of RAM. 483 */ 484 ret = ram_block_discard_disable(true); 485 if (ret) { 486 error_report("Cannot set discarding of RAM broken"); 487 return ret; 488 } 489 490 vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE | 491 VIRTIO_CONFIG_S_DRIVER); 492 493 return 0; 494 } 495 496 static void vhost_vdpa_host_notifier_uninit(struct vhost_dev *dev, 497 int queue_index) 498 { 499 size_t page_size = qemu_real_host_page_size(); 500 struct vhost_vdpa *v = dev->opaque; 501 VirtIODevice *vdev = dev->vdev; 502 VhostVDPAHostNotifier *n; 503 504 n = &v->notifier[queue_index]; 505 506 if (n->addr) { 507 virtio_queue_set_host_notifier_mr(vdev, queue_index, &n->mr, false); 508 object_unparent(OBJECT(&n->mr)); 509 munmap(n->addr, page_size); 510 n->addr = NULL; 511 } 512 } 513 514 static int vhost_vdpa_host_notifier_init(struct vhost_dev *dev, int queue_index) 515 { 516 size_t page_size = qemu_real_host_page_size(); 517 struct vhost_vdpa *v = dev->opaque; 518 VirtIODevice *vdev = dev->vdev; 519 VhostVDPAHostNotifier *n; 520 int fd = v->device_fd; 521 void *addr; 522 char *name; 523 524 vhost_vdpa_host_notifier_uninit(dev, queue_index); 525 526 n = &v->notifier[queue_index]; 527 528 addr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, fd, 529 queue_index * page_size); 530 if (addr == MAP_FAILED) { 531 goto err; 532 } 533 534 name = g_strdup_printf("vhost-vdpa/host-notifier@%p mmaps[%d]", 535 v, queue_index); 536 memory_region_init_ram_device_ptr(&n->mr, OBJECT(vdev), name, 537 page_size, addr); 538 g_free(name); 539 540 if (virtio_queue_set_host_notifier_mr(vdev, queue_index, &n->mr, true)) { 541 object_unparent(OBJECT(&n->mr)); 542 munmap(addr, page_size); 543 goto err; 544 } 545 n->addr = addr; 546 547 return 0; 548 549 err: 550 return -1; 551 } 552 553 static void vhost_vdpa_host_notifiers_uninit(struct vhost_dev *dev, int n) 554 { 555 int i; 556 557 /* 558 * Pack all the changes to the memory regions in a single 559 * transaction to avoid a few updating of the address space 560 * topology. 561 */ 562 memory_region_transaction_begin(); 563 564 for (i = dev->vq_index; i < dev->vq_index + n; i++) { 565 vhost_vdpa_host_notifier_uninit(dev, i); 566 } 567 568 memory_region_transaction_commit(); 569 } 570 571 static void vhost_vdpa_host_notifiers_init(struct vhost_dev *dev) 572 { 573 struct vhost_vdpa *v = dev->opaque; 574 int i; 575 576 if (v->shadow_vqs_enabled) { 577 /* FIXME SVQ is not compatible with host notifiers mr */ 578 return; 579 } 580 581 /* 582 * Pack all the changes to the memory regions in a single 583 * transaction to avoid a few updating of the address space 584 * topology. 585 */ 586 memory_region_transaction_begin(); 587 588 for (i = dev->vq_index; i < dev->vq_index + dev->nvqs; i++) { 589 if (vhost_vdpa_host_notifier_init(dev, i)) { 590 vhost_vdpa_host_notifiers_uninit(dev, i - dev->vq_index); 591 break; 592 } 593 } 594 595 memory_region_transaction_commit(); 596 } 597 598 static void vhost_vdpa_svq_cleanup(struct vhost_dev *dev) 599 { 600 struct vhost_vdpa *v = dev->opaque; 601 size_t idx; 602 603 for (idx = 0; idx < v->shadow_vqs->len; ++idx) { 604 vhost_svq_stop(g_ptr_array_index(v->shadow_vqs, idx)); 605 } 606 g_ptr_array_free(v->shadow_vqs, true); 607 } 608 609 static int vhost_vdpa_cleanup(struct vhost_dev *dev) 610 { 611 struct vhost_vdpa *v; 612 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA); 613 v = dev->opaque; 614 trace_vhost_vdpa_cleanup(dev, v); 615 if (vhost_vdpa_first_dev(dev)) { 616 ram_block_discard_disable(false); 617 } 618 619 vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs); 620 memory_listener_unregister(&v->listener); 621 vhost_vdpa_svq_cleanup(dev); 622 623 dev->opaque = NULL; 624 625 return 0; 626 } 627 628 static int vhost_vdpa_memslots_limit(struct vhost_dev *dev) 629 { 630 trace_vhost_vdpa_memslots_limit(dev, INT_MAX); 631 return INT_MAX; 632 } 633 634 static int vhost_vdpa_set_mem_table(struct vhost_dev *dev, 635 struct vhost_memory *mem) 636 { 637 if (!vhost_vdpa_first_dev(dev)) { 638 return 0; 639 } 640 641 trace_vhost_vdpa_set_mem_table(dev, mem->nregions, mem->padding); 642 if (trace_event_get_state_backends(TRACE_VHOST_VDPA_SET_MEM_TABLE) && 643 trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_REGIONS)) { 644 int i; 645 for (i = 0; i < mem->nregions; i++) { 646 trace_vhost_vdpa_dump_regions(dev, i, 647 mem->regions[i].guest_phys_addr, 648 mem->regions[i].memory_size, 649 mem->regions[i].userspace_addr, 650 mem->regions[i].flags_padding); 651 } 652 } 653 if (mem->padding) { 654 return -EINVAL; 655 } 656 657 return 0; 658 } 659 660 static int vhost_vdpa_set_features(struct vhost_dev *dev, 661 uint64_t features) 662 { 663 struct vhost_vdpa *v = dev->opaque; 664 int ret; 665 666 if (!vhost_vdpa_first_dev(dev)) { 667 return 0; 668 } 669 670 if (v->shadow_vqs_enabled) { 671 if ((v->acked_features ^ features) == BIT_ULL(VHOST_F_LOG_ALL)) { 672 /* 673 * QEMU is just trying to enable or disable logging. SVQ handles 674 * this sepparately, so no need to forward this. 675 */ 676 v->acked_features = features; 677 return 0; 678 } 679 680 v->acked_features = features; 681 682 /* We must not ack _F_LOG if SVQ is enabled */ 683 features &= ~BIT_ULL(VHOST_F_LOG_ALL); 684 } 685 686 trace_vhost_vdpa_set_features(dev, features); 687 ret = vhost_vdpa_call(dev, VHOST_SET_FEATURES, &features); 688 if (ret) { 689 return ret; 690 } 691 692 return vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_FEATURES_OK); 693 } 694 695 static int vhost_vdpa_set_backend_cap(struct vhost_dev *dev) 696 { 697 uint64_t features; 698 uint64_t f = 0x1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2 | 699 0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH | 700 0x1ULL << VHOST_BACKEND_F_IOTLB_ASID | 701 0x1ULL << VHOST_BACKEND_F_SUSPEND; 702 int r; 703 704 if (vhost_vdpa_call(dev, VHOST_GET_BACKEND_FEATURES, &features)) { 705 return -EFAULT; 706 } 707 708 features &= f; 709 710 if (vhost_vdpa_first_dev(dev)) { 711 r = vhost_vdpa_call(dev, VHOST_SET_BACKEND_FEATURES, &features); 712 if (r) { 713 return -EFAULT; 714 } 715 } 716 717 dev->backend_cap = features; 718 719 return 0; 720 } 721 722 static int vhost_vdpa_get_device_id(struct vhost_dev *dev, 723 uint32_t *device_id) 724 { 725 int ret; 726 ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_DEVICE_ID, device_id); 727 trace_vhost_vdpa_get_device_id(dev, *device_id); 728 return ret; 729 } 730 731 static int vhost_vdpa_reset_device(struct vhost_dev *dev) 732 { 733 struct vhost_vdpa *v = dev->opaque; 734 int ret; 735 uint8_t status = 0; 736 737 ret = vhost_vdpa_call(dev, VHOST_VDPA_SET_STATUS, &status); 738 trace_vhost_vdpa_reset_device(dev, status); 739 v->suspended = false; 740 return ret; 741 } 742 743 static int vhost_vdpa_get_vq_index(struct vhost_dev *dev, int idx) 744 { 745 assert(idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs); 746 747 trace_vhost_vdpa_get_vq_index(dev, idx, idx); 748 return idx; 749 } 750 751 static int vhost_vdpa_set_vring_ready(struct vhost_dev *dev) 752 { 753 int i; 754 trace_vhost_vdpa_set_vring_ready(dev); 755 for (i = 0; i < dev->nvqs; ++i) { 756 struct vhost_vring_state state = { 757 .index = dev->vq_index + i, 758 .num = 1, 759 }; 760 vhost_vdpa_call(dev, VHOST_VDPA_SET_VRING_ENABLE, &state); 761 } 762 return 0; 763 } 764 765 static int vhost_vdpa_set_config_call(struct vhost_dev *dev, 766 int fd) 767 { 768 trace_vhost_vdpa_set_config_call(dev, fd); 769 return vhost_vdpa_call(dev, VHOST_VDPA_SET_CONFIG_CALL, &fd); 770 } 771 772 static void vhost_vdpa_dump_config(struct vhost_dev *dev, const uint8_t *config, 773 uint32_t config_len) 774 { 775 int b, len; 776 char line[QEMU_HEXDUMP_LINE_LEN]; 777 778 for (b = 0; b < config_len; b += 16) { 779 len = config_len - b; 780 qemu_hexdump_line(line, b, config, len, false); 781 trace_vhost_vdpa_dump_config(dev, line); 782 } 783 } 784 785 static int vhost_vdpa_set_config(struct vhost_dev *dev, const uint8_t *data, 786 uint32_t offset, uint32_t size, 787 uint32_t flags) 788 { 789 struct vhost_vdpa_config *config; 790 int ret; 791 unsigned long config_size = offsetof(struct vhost_vdpa_config, buf); 792 793 trace_vhost_vdpa_set_config(dev, offset, size, flags); 794 config = g_malloc(size + config_size); 795 config->off = offset; 796 config->len = size; 797 memcpy(config->buf, data, size); 798 if (trace_event_get_state_backends(TRACE_VHOST_VDPA_SET_CONFIG) && 799 trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_CONFIG)) { 800 vhost_vdpa_dump_config(dev, data, size); 801 } 802 ret = vhost_vdpa_call(dev, VHOST_VDPA_SET_CONFIG, config); 803 g_free(config); 804 return ret; 805 } 806 807 static int vhost_vdpa_get_config(struct vhost_dev *dev, uint8_t *config, 808 uint32_t config_len, Error **errp) 809 { 810 struct vhost_vdpa_config *v_config; 811 unsigned long config_size = offsetof(struct vhost_vdpa_config, buf); 812 int ret; 813 814 trace_vhost_vdpa_get_config(dev, config, config_len); 815 v_config = g_malloc(config_len + config_size); 816 v_config->len = config_len; 817 v_config->off = 0; 818 ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_CONFIG, v_config); 819 memcpy(config, v_config->buf, config_len); 820 g_free(v_config); 821 if (trace_event_get_state_backends(TRACE_VHOST_VDPA_GET_CONFIG) && 822 trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_CONFIG)) { 823 vhost_vdpa_dump_config(dev, config, config_len); 824 } 825 return ret; 826 } 827 828 static int vhost_vdpa_set_dev_vring_base(struct vhost_dev *dev, 829 struct vhost_vring_state *ring) 830 { 831 trace_vhost_vdpa_set_vring_base(dev, ring->index, ring->num); 832 return vhost_vdpa_call(dev, VHOST_SET_VRING_BASE, ring); 833 } 834 835 static int vhost_vdpa_set_vring_dev_kick(struct vhost_dev *dev, 836 struct vhost_vring_file *file) 837 { 838 trace_vhost_vdpa_set_vring_kick(dev, file->index, file->fd); 839 return vhost_vdpa_call(dev, VHOST_SET_VRING_KICK, file); 840 } 841 842 static int vhost_vdpa_set_vring_dev_call(struct vhost_dev *dev, 843 struct vhost_vring_file *file) 844 { 845 trace_vhost_vdpa_set_vring_call(dev, file->index, file->fd); 846 return vhost_vdpa_call(dev, VHOST_SET_VRING_CALL, file); 847 } 848 849 static int vhost_vdpa_set_vring_dev_addr(struct vhost_dev *dev, 850 struct vhost_vring_addr *addr) 851 { 852 trace_vhost_vdpa_set_vring_addr(dev, addr->index, addr->flags, 853 addr->desc_user_addr, addr->used_user_addr, 854 addr->avail_user_addr, 855 addr->log_guest_addr); 856 857 return vhost_vdpa_call(dev, VHOST_SET_VRING_ADDR, addr); 858 859 } 860 861 /** 862 * Set the shadow virtqueue descriptors to the device 863 * 864 * @dev: The vhost device model 865 * @svq: The shadow virtqueue 866 * @idx: The index of the virtqueue in the vhost device 867 * @errp: Error 868 * 869 * Note that this function does not rewind kick file descriptor if cannot set 870 * call one. 871 */ 872 static int vhost_vdpa_svq_set_fds(struct vhost_dev *dev, 873 VhostShadowVirtqueue *svq, unsigned idx, 874 Error **errp) 875 { 876 struct vhost_vring_file file = { 877 .index = dev->vq_index + idx, 878 }; 879 const EventNotifier *event_notifier = &svq->hdev_kick; 880 int r; 881 882 r = event_notifier_init(&svq->hdev_kick, 0); 883 if (r != 0) { 884 error_setg_errno(errp, -r, "Couldn't create kick event notifier"); 885 goto err_init_hdev_kick; 886 } 887 888 r = event_notifier_init(&svq->hdev_call, 0); 889 if (r != 0) { 890 error_setg_errno(errp, -r, "Couldn't create call event notifier"); 891 goto err_init_hdev_call; 892 } 893 894 file.fd = event_notifier_get_fd(event_notifier); 895 r = vhost_vdpa_set_vring_dev_kick(dev, &file); 896 if (unlikely(r != 0)) { 897 error_setg_errno(errp, -r, "Can't set device kick fd"); 898 goto err_init_set_dev_fd; 899 } 900 901 event_notifier = &svq->hdev_call; 902 file.fd = event_notifier_get_fd(event_notifier); 903 r = vhost_vdpa_set_vring_dev_call(dev, &file); 904 if (unlikely(r != 0)) { 905 error_setg_errno(errp, -r, "Can't set device call fd"); 906 goto err_init_set_dev_fd; 907 } 908 909 return 0; 910 911 err_init_set_dev_fd: 912 event_notifier_set_handler(&svq->hdev_call, NULL); 913 914 err_init_hdev_call: 915 event_notifier_cleanup(&svq->hdev_kick); 916 917 err_init_hdev_kick: 918 return r; 919 } 920 921 /** 922 * Unmap a SVQ area in the device 923 */ 924 static void vhost_vdpa_svq_unmap_ring(struct vhost_vdpa *v, hwaddr addr) 925 { 926 const DMAMap needle = { 927 .translated_addr = addr, 928 }; 929 const DMAMap *result = vhost_iova_tree_find_iova(v->iova_tree, &needle); 930 hwaddr size; 931 int r; 932 933 if (unlikely(!result)) { 934 error_report("Unable to find SVQ address to unmap"); 935 return; 936 } 937 938 size = ROUND_UP(result->size, qemu_real_host_page_size()); 939 r = vhost_vdpa_dma_unmap(v, v->address_space_id, result->iova, size); 940 if (unlikely(r < 0)) { 941 error_report("Unable to unmap SVQ vring: %s (%d)", g_strerror(-r), -r); 942 return; 943 } 944 945 vhost_iova_tree_remove(v->iova_tree, *result); 946 } 947 948 static void vhost_vdpa_svq_unmap_rings(struct vhost_dev *dev, 949 const VhostShadowVirtqueue *svq) 950 { 951 struct vhost_vdpa *v = dev->opaque; 952 struct vhost_vring_addr svq_addr; 953 954 vhost_svq_get_vring_addr(svq, &svq_addr); 955 956 vhost_vdpa_svq_unmap_ring(v, svq_addr.desc_user_addr); 957 958 vhost_vdpa_svq_unmap_ring(v, svq_addr.used_user_addr); 959 } 960 961 /** 962 * Map the SVQ area in the device 963 * 964 * @v: Vhost-vdpa device 965 * @needle: The area to search iova 966 * @errorp: Error pointer 967 */ 968 static bool vhost_vdpa_svq_map_ring(struct vhost_vdpa *v, DMAMap *needle, 969 Error **errp) 970 { 971 int r; 972 973 r = vhost_iova_tree_map_alloc(v->iova_tree, needle); 974 if (unlikely(r != IOVA_OK)) { 975 error_setg(errp, "Cannot allocate iova (%d)", r); 976 return false; 977 } 978 979 r = vhost_vdpa_dma_map(v, v->address_space_id, needle->iova, 980 needle->size + 1, 981 (void *)(uintptr_t)needle->translated_addr, 982 needle->perm == IOMMU_RO); 983 if (unlikely(r != 0)) { 984 error_setg_errno(errp, -r, "Cannot map region to device"); 985 vhost_iova_tree_remove(v->iova_tree, *needle); 986 } 987 988 return r == 0; 989 } 990 991 /** 992 * Map the shadow virtqueue rings in the device 993 * 994 * @dev: The vhost device 995 * @svq: The shadow virtqueue 996 * @addr: Assigned IOVA addresses 997 * @errp: Error pointer 998 */ 999 static bool vhost_vdpa_svq_map_rings(struct vhost_dev *dev, 1000 const VhostShadowVirtqueue *svq, 1001 struct vhost_vring_addr *addr, 1002 Error **errp) 1003 { 1004 ERRP_GUARD(); 1005 DMAMap device_region, driver_region; 1006 struct vhost_vring_addr svq_addr; 1007 struct vhost_vdpa *v = dev->opaque; 1008 size_t device_size = vhost_svq_device_area_size(svq); 1009 size_t driver_size = vhost_svq_driver_area_size(svq); 1010 size_t avail_offset; 1011 bool ok; 1012 1013 vhost_svq_get_vring_addr(svq, &svq_addr); 1014 1015 driver_region = (DMAMap) { 1016 .translated_addr = svq_addr.desc_user_addr, 1017 .size = driver_size - 1, 1018 .perm = IOMMU_RO, 1019 }; 1020 ok = vhost_vdpa_svq_map_ring(v, &driver_region, errp); 1021 if (unlikely(!ok)) { 1022 error_prepend(errp, "Cannot create vq driver region: "); 1023 return false; 1024 } 1025 addr->desc_user_addr = driver_region.iova; 1026 avail_offset = svq_addr.avail_user_addr - svq_addr.desc_user_addr; 1027 addr->avail_user_addr = driver_region.iova + avail_offset; 1028 1029 device_region = (DMAMap) { 1030 .translated_addr = svq_addr.used_user_addr, 1031 .size = device_size - 1, 1032 .perm = IOMMU_RW, 1033 }; 1034 ok = vhost_vdpa_svq_map_ring(v, &device_region, errp); 1035 if (unlikely(!ok)) { 1036 error_prepend(errp, "Cannot create vq device region: "); 1037 vhost_vdpa_svq_unmap_ring(v, driver_region.translated_addr); 1038 } 1039 addr->used_user_addr = device_region.iova; 1040 1041 return ok; 1042 } 1043 1044 static bool vhost_vdpa_svq_setup(struct vhost_dev *dev, 1045 VhostShadowVirtqueue *svq, unsigned idx, 1046 Error **errp) 1047 { 1048 uint16_t vq_index = dev->vq_index + idx; 1049 struct vhost_vring_state s = { 1050 .index = vq_index, 1051 }; 1052 int r; 1053 1054 r = vhost_vdpa_set_dev_vring_base(dev, &s); 1055 if (unlikely(r)) { 1056 error_setg_errno(errp, -r, "Cannot set vring base"); 1057 return false; 1058 } 1059 1060 r = vhost_vdpa_svq_set_fds(dev, svq, idx, errp); 1061 return r == 0; 1062 } 1063 1064 static bool vhost_vdpa_svqs_start(struct vhost_dev *dev) 1065 { 1066 struct vhost_vdpa *v = dev->opaque; 1067 Error *err = NULL; 1068 unsigned i; 1069 1070 if (!v->shadow_vqs_enabled) { 1071 return true; 1072 } 1073 1074 for (i = 0; i < v->shadow_vqs->len; ++i) { 1075 VirtQueue *vq = virtio_get_queue(dev->vdev, dev->vq_index + i); 1076 VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, i); 1077 struct vhost_vring_addr addr = { 1078 .index = dev->vq_index + i, 1079 }; 1080 int r; 1081 bool ok = vhost_vdpa_svq_setup(dev, svq, i, &err); 1082 if (unlikely(!ok)) { 1083 goto err; 1084 } 1085 1086 vhost_svq_start(svq, dev->vdev, vq, v->iova_tree); 1087 ok = vhost_vdpa_svq_map_rings(dev, svq, &addr, &err); 1088 if (unlikely(!ok)) { 1089 goto err_map; 1090 } 1091 1092 /* Override vring GPA set by vhost subsystem */ 1093 r = vhost_vdpa_set_vring_dev_addr(dev, &addr); 1094 if (unlikely(r != 0)) { 1095 error_setg_errno(&err, -r, "Cannot set device address"); 1096 goto err_set_addr; 1097 } 1098 } 1099 1100 return true; 1101 1102 err_set_addr: 1103 vhost_vdpa_svq_unmap_rings(dev, g_ptr_array_index(v->shadow_vqs, i)); 1104 1105 err_map: 1106 vhost_svq_stop(g_ptr_array_index(v->shadow_vqs, i)); 1107 1108 err: 1109 error_reportf_err(err, "Cannot setup SVQ %u: ", i); 1110 for (unsigned j = 0; j < i; ++j) { 1111 VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, j); 1112 vhost_vdpa_svq_unmap_rings(dev, svq); 1113 vhost_svq_stop(svq); 1114 } 1115 1116 return false; 1117 } 1118 1119 static void vhost_vdpa_svqs_stop(struct vhost_dev *dev) 1120 { 1121 struct vhost_vdpa *v = dev->opaque; 1122 1123 if (!v->shadow_vqs_enabled) { 1124 return; 1125 } 1126 1127 for (unsigned i = 0; i < v->shadow_vqs->len; ++i) { 1128 VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, i); 1129 1130 vhost_svq_stop(svq); 1131 vhost_vdpa_svq_unmap_rings(dev, svq); 1132 1133 event_notifier_cleanup(&svq->hdev_kick); 1134 event_notifier_cleanup(&svq->hdev_call); 1135 } 1136 } 1137 1138 static void vhost_vdpa_suspend(struct vhost_dev *dev) 1139 { 1140 struct vhost_vdpa *v = dev->opaque; 1141 int r; 1142 1143 if (!vhost_vdpa_first_dev(dev)) { 1144 return; 1145 } 1146 1147 if (dev->backend_cap & BIT_ULL(VHOST_BACKEND_F_SUSPEND)) { 1148 trace_vhost_vdpa_suspend(dev); 1149 r = ioctl(v->device_fd, VHOST_VDPA_SUSPEND); 1150 if (unlikely(r)) { 1151 error_report("Cannot suspend: %s(%d)", g_strerror(errno), errno); 1152 } else { 1153 v->suspended = true; 1154 return; 1155 } 1156 } 1157 1158 vhost_vdpa_reset_device(dev); 1159 } 1160 1161 static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started) 1162 { 1163 struct vhost_vdpa *v = dev->opaque; 1164 bool ok; 1165 trace_vhost_vdpa_dev_start(dev, started); 1166 1167 if (started) { 1168 vhost_vdpa_host_notifiers_init(dev); 1169 ok = vhost_vdpa_svqs_start(dev); 1170 if (unlikely(!ok)) { 1171 return -1; 1172 } 1173 vhost_vdpa_set_vring_ready(dev); 1174 } else { 1175 vhost_vdpa_suspend(dev); 1176 vhost_vdpa_svqs_stop(dev); 1177 vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs); 1178 } 1179 1180 if (dev->vq_index + dev->nvqs != dev->vq_index_end) { 1181 return 0; 1182 } 1183 1184 if (started) { 1185 memory_listener_register(&v->listener, &address_space_memory); 1186 return vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK); 1187 } 1188 1189 return 0; 1190 } 1191 1192 static void vhost_vdpa_reset_status(struct vhost_dev *dev) 1193 { 1194 struct vhost_vdpa *v = dev->opaque; 1195 1196 if (dev->vq_index + dev->nvqs != dev->vq_index_end) { 1197 return; 1198 } 1199 1200 vhost_vdpa_reset_device(dev); 1201 vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE | 1202 VIRTIO_CONFIG_S_DRIVER); 1203 memory_listener_unregister(&v->listener); 1204 } 1205 1206 static int vhost_vdpa_set_log_base(struct vhost_dev *dev, uint64_t base, 1207 struct vhost_log *log) 1208 { 1209 struct vhost_vdpa *v = dev->opaque; 1210 if (v->shadow_vqs_enabled || !vhost_vdpa_first_dev(dev)) { 1211 return 0; 1212 } 1213 1214 trace_vhost_vdpa_set_log_base(dev, base, log->size, log->refcnt, log->fd, 1215 log->log); 1216 return vhost_vdpa_call(dev, VHOST_SET_LOG_BASE, &base); 1217 } 1218 1219 static int vhost_vdpa_set_vring_addr(struct vhost_dev *dev, 1220 struct vhost_vring_addr *addr) 1221 { 1222 struct vhost_vdpa *v = dev->opaque; 1223 1224 if (v->shadow_vqs_enabled) { 1225 /* 1226 * Device vring addr was set at device start. SVQ base is handled by 1227 * VirtQueue code. 1228 */ 1229 return 0; 1230 } 1231 1232 return vhost_vdpa_set_vring_dev_addr(dev, addr); 1233 } 1234 1235 static int vhost_vdpa_set_vring_num(struct vhost_dev *dev, 1236 struct vhost_vring_state *ring) 1237 { 1238 trace_vhost_vdpa_set_vring_num(dev, ring->index, ring->num); 1239 return vhost_vdpa_call(dev, VHOST_SET_VRING_NUM, ring); 1240 } 1241 1242 static int vhost_vdpa_set_vring_base(struct vhost_dev *dev, 1243 struct vhost_vring_state *ring) 1244 { 1245 struct vhost_vdpa *v = dev->opaque; 1246 1247 if (v->shadow_vqs_enabled) { 1248 /* 1249 * Device vring base was set at device start. SVQ base is handled by 1250 * VirtQueue code. 1251 */ 1252 return 0; 1253 } 1254 1255 return vhost_vdpa_set_dev_vring_base(dev, ring); 1256 } 1257 1258 static int vhost_vdpa_get_vring_base(struct vhost_dev *dev, 1259 struct vhost_vring_state *ring) 1260 { 1261 struct vhost_vdpa *v = dev->opaque; 1262 int ret; 1263 1264 if (v->shadow_vqs_enabled) { 1265 ring->num = virtio_queue_get_last_avail_idx(dev->vdev, ring->index); 1266 return 0; 1267 } 1268 1269 if (!v->suspended) { 1270 /* 1271 * Cannot trust in value returned by device, let vhost recover used 1272 * idx from guest. 1273 */ 1274 return -1; 1275 } 1276 1277 ret = vhost_vdpa_call(dev, VHOST_GET_VRING_BASE, ring); 1278 trace_vhost_vdpa_get_vring_base(dev, ring->index, ring->num); 1279 return ret; 1280 } 1281 1282 static int vhost_vdpa_set_vring_kick(struct vhost_dev *dev, 1283 struct vhost_vring_file *file) 1284 { 1285 struct vhost_vdpa *v = dev->opaque; 1286 int vdpa_idx = file->index - dev->vq_index; 1287 1288 if (v->shadow_vqs_enabled) { 1289 VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, vdpa_idx); 1290 vhost_svq_set_svq_kick_fd(svq, file->fd); 1291 return 0; 1292 } else { 1293 return vhost_vdpa_set_vring_dev_kick(dev, file); 1294 } 1295 } 1296 1297 static int vhost_vdpa_set_vring_call(struct vhost_dev *dev, 1298 struct vhost_vring_file *file) 1299 { 1300 struct vhost_vdpa *v = dev->opaque; 1301 int vdpa_idx = file->index - dev->vq_index; 1302 VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, vdpa_idx); 1303 1304 /* Remember last call fd because we can switch to SVQ anytime. */ 1305 vhost_svq_set_svq_call_fd(svq, file->fd); 1306 if (v->shadow_vqs_enabled) { 1307 return 0; 1308 } 1309 1310 return vhost_vdpa_set_vring_dev_call(dev, file); 1311 } 1312 1313 static int vhost_vdpa_get_features(struct vhost_dev *dev, 1314 uint64_t *features) 1315 { 1316 int ret = vhost_vdpa_get_dev_features(dev, features); 1317 1318 if (ret == 0) { 1319 /* Add SVQ logging capabilities */ 1320 *features |= BIT_ULL(VHOST_F_LOG_ALL); 1321 } 1322 1323 return ret; 1324 } 1325 1326 static int vhost_vdpa_set_owner(struct vhost_dev *dev) 1327 { 1328 if (!vhost_vdpa_first_dev(dev)) { 1329 return 0; 1330 } 1331 1332 trace_vhost_vdpa_set_owner(dev); 1333 return vhost_vdpa_call(dev, VHOST_SET_OWNER, NULL); 1334 } 1335 1336 static int vhost_vdpa_vq_get_addr(struct vhost_dev *dev, 1337 struct vhost_vring_addr *addr, struct vhost_virtqueue *vq) 1338 { 1339 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA); 1340 addr->desc_user_addr = (uint64_t)(unsigned long)vq->desc_phys; 1341 addr->avail_user_addr = (uint64_t)(unsigned long)vq->avail_phys; 1342 addr->used_user_addr = (uint64_t)(unsigned long)vq->used_phys; 1343 trace_vhost_vdpa_vq_get_addr(dev, vq, addr->desc_user_addr, 1344 addr->avail_user_addr, addr->used_user_addr); 1345 return 0; 1346 } 1347 1348 static bool vhost_vdpa_force_iommu(struct vhost_dev *dev) 1349 { 1350 return true; 1351 } 1352 1353 const VhostOps vdpa_ops = { 1354 .backend_type = VHOST_BACKEND_TYPE_VDPA, 1355 .vhost_backend_init = vhost_vdpa_init, 1356 .vhost_backend_cleanup = vhost_vdpa_cleanup, 1357 .vhost_set_log_base = vhost_vdpa_set_log_base, 1358 .vhost_set_vring_addr = vhost_vdpa_set_vring_addr, 1359 .vhost_set_vring_num = vhost_vdpa_set_vring_num, 1360 .vhost_set_vring_base = vhost_vdpa_set_vring_base, 1361 .vhost_get_vring_base = vhost_vdpa_get_vring_base, 1362 .vhost_set_vring_kick = vhost_vdpa_set_vring_kick, 1363 .vhost_set_vring_call = vhost_vdpa_set_vring_call, 1364 .vhost_get_features = vhost_vdpa_get_features, 1365 .vhost_set_backend_cap = vhost_vdpa_set_backend_cap, 1366 .vhost_set_owner = vhost_vdpa_set_owner, 1367 .vhost_set_vring_endian = NULL, 1368 .vhost_backend_memslots_limit = vhost_vdpa_memslots_limit, 1369 .vhost_set_mem_table = vhost_vdpa_set_mem_table, 1370 .vhost_set_features = vhost_vdpa_set_features, 1371 .vhost_reset_device = vhost_vdpa_reset_device, 1372 .vhost_get_vq_index = vhost_vdpa_get_vq_index, 1373 .vhost_get_config = vhost_vdpa_get_config, 1374 .vhost_set_config = vhost_vdpa_set_config, 1375 .vhost_requires_shm_log = NULL, 1376 .vhost_migration_done = NULL, 1377 .vhost_backend_can_merge = NULL, 1378 .vhost_net_set_mtu = NULL, 1379 .vhost_set_iotlb_callback = NULL, 1380 .vhost_send_device_iotlb_msg = NULL, 1381 .vhost_dev_start = vhost_vdpa_dev_start, 1382 .vhost_get_device_id = vhost_vdpa_get_device_id, 1383 .vhost_vq_get_addr = vhost_vdpa_vq_get_addr, 1384 .vhost_force_iommu = vhost_vdpa_force_iommu, 1385 .vhost_set_config_call = vhost_vdpa_set_config_call, 1386 .vhost_reset_status = vhost_vdpa_reset_status, 1387 }; 1388