1 /* 2 * vhost support 3 * 4 * Copyright Red Hat, Inc. 2010 5 * 6 * Authors: 7 * Michael S. Tsirkin <mst@redhat.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2. See 10 * the COPYING file in the top-level directory. 11 * 12 * Contributions after 2012-01-13 are licensed under the terms of the 13 * GNU GPL, version 2 or (at your option) any later version. 14 */ 15 16 #include "qemu/osdep.h" 17 #include "qapi/error.h" 18 #include "hw/virtio/vhost.h" 19 #include "qemu/atomic.h" 20 #include "qemu/range.h" 21 #include "qemu/error-report.h" 22 #include "qemu/memfd.h" 23 #include "qemu/log.h" 24 #include "standard-headers/linux/vhost_types.h" 25 #include "hw/virtio/virtio-bus.h" 26 #include "hw/mem/memory-device.h" 27 #include "migration/blocker.h" 28 #include "migration/qemu-file-types.h" 29 #include "system/dma.h" 30 #include "trace.h" 31 32 /* enabled until disconnected backend stabilizes */ 33 #define _VHOST_DEBUG 1 34 35 #ifdef _VHOST_DEBUG 36 #define VHOST_OPS_DEBUG(retval, fmt, ...) \ 37 do { \ 38 error_report(fmt ": %s (%d)", ## __VA_ARGS__, \ 39 strerror(-retval), -retval); \ 40 } while (0) 41 #else 42 #define VHOST_OPS_DEBUG(retval, fmt, ...) \ 43 do { } while (0) 44 #endif 45 46 static struct vhost_log *vhost_log[VHOST_BACKEND_TYPE_MAX]; 47 static struct vhost_log *vhost_log_shm[VHOST_BACKEND_TYPE_MAX]; 48 static QLIST_HEAD(, vhost_dev) vhost_log_devs[VHOST_BACKEND_TYPE_MAX]; 49 50 /* Memslots used by backends that support private memslots (without an fd). */ 51 static unsigned int used_memslots; 52 53 /* Memslots used by backends that only support shared memslots (with an fd). */ 54 static unsigned int used_shared_memslots; 55 56 static QLIST_HEAD(, vhost_dev) vhost_devices = 57 QLIST_HEAD_INITIALIZER(vhost_devices); 58 59 unsigned int vhost_get_max_memslots(void) 60 { 61 unsigned int max = UINT_MAX; 62 struct vhost_dev *hdev; 63 64 QLIST_FOREACH(hdev, &vhost_devices, entry) { 65 max = MIN(max, hdev->vhost_ops->vhost_backend_memslots_limit(hdev)); 66 } 67 return max; 68 } 69 70 unsigned int vhost_get_free_memslots(void) 71 { 72 unsigned int free = UINT_MAX; 73 struct vhost_dev *hdev; 74 75 QLIST_FOREACH(hdev, &vhost_devices, entry) { 76 unsigned int r = hdev->vhost_ops->vhost_backend_memslots_limit(hdev); 77 unsigned int cur_free; 78 79 if (hdev->vhost_ops->vhost_backend_no_private_memslots && 80 hdev->vhost_ops->vhost_backend_no_private_memslots(hdev)) { 81 cur_free = r - used_shared_memslots; 82 } else { 83 cur_free = r - used_memslots; 84 } 85 free = MIN(free, cur_free); 86 } 87 return free; 88 } 89 90 static void vhost_dev_sync_region(struct vhost_dev *dev, 91 MemoryRegionSection *section, 92 uint64_t mfirst, uint64_t mlast, 93 uint64_t rfirst, uint64_t rlast) 94 { 95 vhost_log_chunk_t *dev_log = dev->log->log; 96 97 uint64_t start = MAX(mfirst, rfirst); 98 uint64_t end = MIN(mlast, rlast); 99 vhost_log_chunk_t *from = dev_log + start / VHOST_LOG_CHUNK; 100 vhost_log_chunk_t *to = dev_log + end / VHOST_LOG_CHUNK + 1; 101 uint64_t addr = QEMU_ALIGN_DOWN(start, VHOST_LOG_CHUNK); 102 103 if (end < start) { 104 return; 105 } 106 assert(end / VHOST_LOG_CHUNK < dev->log_size); 107 assert(start / VHOST_LOG_CHUNK < dev->log_size); 108 109 for (;from < to; ++from) { 110 vhost_log_chunk_t log; 111 /* We first check with non-atomic: much cheaper, 112 * and we expect non-dirty to be the common case. */ 113 if (!*from) { 114 addr += VHOST_LOG_CHUNK; 115 continue; 116 } 117 /* Data must be read atomically. We don't really need barrier semantics 118 * but it's easier to use atomic_* than roll our own. */ 119 log = qatomic_xchg(from, 0); 120 while (log) { 121 int bit = ctzl(log); 122 hwaddr page_addr; 123 hwaddr section_offset; 124 hwaddr mr_offset; 125 page_addr = addr + bit * VHOST_LOG_PAGE; 126 section_offset = page_addr - section->offset_within_address_space; 127 mr_offset = section_offset + section->offset_within_region; 128 memory_region_set_dirty(section->mr, mr_offset, VHOST_LOG_PAGE); 129 log &= ~(0x1ull << bit); 130 } 131 addr += VHOST_LOG_CHUNK; 132 } 133 } 134 135 bool vhost_dev_has_iommu(struct vhost_dev *dev) 136 { 137 VirtIODevice *vdev = dev->vdev; 138 139 /* 140 * For vhost, VIRTIO_F_IOMMU_PLATFORM means the backend support 141 * incremental memory mapping API via IOTLB API. For platform that 142 * does not have IOMMU, there's no need to enable this feature 143 * which may cause unnecessary IOTLB miss/update transactions. 144 */ 145 if (vdev) { 146 return virtio_bus_device_iommu_enabled(vdev) && 147 virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM); 148 } else { 149 return false; 150 } 151 } 152 153 static inline bool vhost_dev_should_log(struct vhost_dev *dev) 154 { 155 assert(dev->vhost_ops); 156 assert(dev->vhost_ops->backend_type > VHOST_BACKEND_TYPE_NONE); 157 assert(dev->vhost_ops->backend_type < VHOST_BACKEND_TYPE_MAX); 158 159 return dev == QLIST_FIRST(&vhost_log_devs[dev->vhost_ops->backend_type]); 160 } 161 162 static inline void vhost_dev_elect_mem_logger(struct vhost_dev *hdev, bool add) 163 { 164 VhostBackendType backend_type; 165 166 assert(hdev->vhost_ops); 167 168 backend_type = hdev->vhost_ops->backend_type; 169 assert(backend_type > VHOST_BACKEND_TYPE_NONE); 170 assert(backend_type < VHOST_BACKEND_TYPE_MAX); 171 172 if (add && !QLIST_IS_INSERTED(hdev, logdev_entry)) { 173 if (QLIST_EMPTY(&vhost_log_devs[backend_type])) { 174 QLIST_INSERT_HEAD(&vhost_log_devs[backend_type], 175 hdev, logdev_entry); 176 } else { 177 /* 178 * The first vhost_device in the list is selected as the shared 179 * logger to scan memory sections. Put new entry next to the head 180 * to avoid inadvertent change to the underlying logger device. 181 * This is done in order to get better cache locality and to avoid 182 * performance churn on the hot path for log scanning. Even when 183 * new devices come and go quickly, it wouldn't end up changing 184 * the active leading logger device at all. 185 */ 186 QLIST_INSERT_AFTER(QLIST_FIRST(&vhost_log_devs[backend_type]), 187 hdev, logdev_entry); 188 } 189 } else if (!add && QLIST_IS_INSERTED(hdev, logdev_entry)) { 190 QLIST_REMOVE(hdev, logdev_entry); 191 } 192 } 193 194 static int vhost_sync_dirty_bitmap(struct vhost_dev *dev, 195 MemoryRegionSection *section, 196 hwaddr first, 197 hwaddr last) 198 { 199 int i; 200 hwaddr start_addr; 201 hwaddr end_addr; 202 203 if (!dev->log_enabled || !dev->started) { 204 return 0; 205 } 206 start_addr = section->offset_within_address_space; 207 end_addr = range_get_last(start_addr, int128_get64(section->size)); 208 start_addr = MAX(first, start_addr); 209 end_addr = MIN(last, end_addr); 210 211 if (vhost_dev_should_log(dev)) { 212 for (i = 0; i < dev->mem->nregions; ++i) { 213 struct vhost_memory_region *reg = dev->mem->regions + i; 214 vhost_dev_sync_region(dev, section, start_addr, end_addr, 215 reg->guest_phys_addr, 216 range_get_last(reg->guest_phys_addr, 217 reg->memory_size)); 218 } 219 } 220 for (i = 0; i < dev->nvqs; ++i) { 221 struct vhost_virtqueue *vq = dev->vqs + i; 222 223 if (!vq->used_phys && !vq->used_size) { 224 continue; 225 } 226 227 if (vhost_dev_has_iommu(dev)) { 228 IOMMUTLBEntry iotlb; 229 hwaddr used_phys = vq->used_phys, used_size = vq->used_size; 230 hwaddr phys, s, offset; 231 232 while (used_size) { 233 rcu_read_lock(); 234 iotlb = address_space_get_iotlb_entry(dev->vdev->dma_as, 235 used_phys, 236 true, 237 MEMTXATTRS_UNSPECIFIED); 238 rcu_read_unlock(); 239 240 if (!iotlb.target_as) { 241 qemu_log_mask(LOG_GUEST_ERROR, "translation " 242 "failure for used_iova %"PRIx64"\n", 243 used_phys); 244 return -EINVAL; 245 } 246 247 offset = used_phys & iotlb.addr_mask; 248 phys = iotlb.translated_addr + offset; 249 250 /* 251 * Distance from start of used ring until last byte of 252 * IOMMU page. 253 */ 254 s = iotlb.addr_mask - offset; 255 /* 256 * Size of used ring, or of the part of it until end 257 * of IOMMU page. To avoid zero result, do the adding 258 * outside of MIN(). 259 */ 260 s = MIN(s, used_size - 1) + 1; 261 262 vhost_dev_sync_region(dev, section, start_addr, end_addr, phys, 263 range_get_last(phys, s)); 264 used_size -= s; 265 used_phys += s; 266 } 267 } else { 268 vhost_dev_sync_region(dev, section, start_addr, 269 end_addr, vq->used_phys, 270 range_get_last(vq->used_phys, vq->used_size)); 271 } 272 } 273 return 0; 274 } 275 276 static void vhost_log_sync(MemoryListener *listener, 277 MemoryRegionSection *section) 278 { 279 struct vhost_dev *dev = container_of(listener, struct vhost_dev, 280 memory_listener); 281 vhost_sync_dirty_bitmap(dev, section, 0x0, ~0x0ULL); 282 } 283 284 static void vhost_log_sync_range(struct vhost_dev *dev, 285 hwaddr first, hwaddr last) 286 { 287 int i; 288 /* FIXME: this is N^2 in number of sections */ 289 for (i = 0; i < dev->n_mem_sections; ++i) { 290 MemoryRegionSection *section = &dev->mem_sections[i]; 291 vhost_sync_dirty_bitmap(dev, section, first, last); 292 } 293 } 294 295 static uint64_t vhost_get_log_size(struct vhost_dev *dev) 296 { 297 uint64_t log_size = 0; 298 int i; 299 for (i = 0; i < dev->mem->nregions; ++i) { 300 struct vhost_memory_region *reg = dev->mem->regions + i; 301 uint64_t last = range_get_last(reg->guest_phys_addr, 302 reg->memory_size); 303 log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1); 304 } 305 return log_size; 306 } 307 308 static int vhost_set_backend_type(struct vhost_dev *dev, 309 VhostBackendType backend_type) 310 { 311 int r = 0; 312 313 switch (backend_type) { 314 #ifdef CONFIG_VHOST_KERNEL 315 case VHOST_BACKEND_TYPE_KERNEL: 316 dev->vhost_ops = &kernel_ops; 317 break; 318 #endif 319 #ifdef CONFIG_VHOST_USER 320 case VHOST_BACKEND_TYPE_USER: 321 dev->vhost_ops = &user_ops; 322 break; 323 #endif 324 #ifdef CONFIG_VHOST_VDPA 325 case VHOST_BACKEND_TYPE_VDPA: 326 dev->vhost_ops = &vdpa_ops; 327 break; 328 #endif 329 default: 330 error_report("Unknown vhost backend type"); 331 r = -1; 332 } 333 334 if (r == 0) { 335 assert(dev->vhost_ops->backend_type == backend_type); 336 } 337 338 return r; 339 } 340 341 static struct vhost_log *vhost_log_alloc(uint64_t size, bool share) 342 { 343 Error *err = NULL; 344 struct vhost_log *log; 345 uint64_t logsize = size * sizeof(*(log->log)); 346 int fd = -1; 347 348 log = g_new0(struct vhost_log, 1); 349 if (share) { 350 log->log = qemu_memfd_alloc("vhost-log", logsize, 351 F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL, 352 &fd, &err); 353 if (err) { 354 error_report_err(err); 355 g_free(log); 356 return NULL; 357 } 358 memset(log->log, 0, logsize); 359 } else { 360 log->log = g_malloc0(logsize); 361 } 362 363 log->size = size; 364 log->refcnt = 1; 365 log->fd = fd; 366 367 return log; 368 } 369 370 static struct vhost_log *vhost_log_get(VhostBackendType backend_type, 371 uint64_t size, bool share) 372 { 373 struct vhost_log *log; 374 375 assert(backend_type > VHOST_BACKEND_TYPE_NONE); 376 assert(backend_type < VHOST_BACKEND_TYPE_MAX); 377 378 log = share ? vhost_log_shm[backend_type] : vhost_log[backend_type]; 379 380 if (!log || log->size != size) { 381 log = vhost_log_alloc(size, share); 382 if (share) { 383 vhost_log_shm[backend_type] = log; 384 } else { 385 vhost_log[backend_type] = log; 386 } 387 } else { 388 ++log->refcnt; 389 } 390 391 return log; 392 } 393 394 static void vhost_log_put(struct vhost_dev *dev, bool sync) 395 { 396 struct vhost_log *log = dev->log; 397 VhostBackendType backend_type; 398 399 if (!log) { 400 return; 401 } 402 403 assert(dev->vhost_ops); 404 backend_type = dev->vhost_ops->backend_type; 405 406 if (backend_type == VHOST_BACKEND_TYPE_NONE || 407 backend_type >= VHOST_BACKEND_TYPE_MAX) { 408 return; 409 } 410 411 --log->refcnt; 412 if (log->refcnt == 0) { 413 /* Sync only the range covered by the old log */ 414 if (dev->log_size && sync) { 415 vhost_log_sync_range(dev, 0, dev->log_size * VHOST_LOG_CHUNK - 1); 416 } 417 418 if (vhost_log[backend_type] == log) { 419 g_free(log->log); 420 vhost_log[backend_type] = NULL; 421 } else if (vhost_log_shm[backend_type] == log) { 422 qemu_memfd_free(log->log, log->size * sizeof(*(log->log)), 423 log->fd); 424 vhost_log_shm[backend_type] = NULL; 425 } 426 427 g_free(log); 428 } 429 430 vhost_dev_elect_mem_logger(dev, false); 431 dev->log = NULL; 432 dev->log_size = 0; 433 } 434 435 static bool vhost_dev_log_is_shared(struct vhost_dev *dev) 436 { 437 return dev->vhost_ops->vhost_requires_shm_log && 438 dev->vhost_ops->vhost_requires_shm_log(dev); 439 } 440 441 static inline void vhost_dev_log_resize(struct vhost_dev *dev, uint64_t size) 442 { 443 struct vhost_log *log = vhost_log_get(dev->vhost_ops->backend_type, 444 size, vhost_dev_log_is_shared(dev)); 445 uint64_t log_base = (uintptr_t)log->log; 446 int r; 447 448 /* inform backend of log switching, this must be done before 449 releasing the current log, to ensure no logging is lost */ 450 r = dev->vhost_ops->vhost_set_log_base(dev, log_base, log); 451 if (r < 0) { 452 VHOST_OPS_DEBUG(r, "vhost_set_log_base failed"); 453 } 454 455 vhost_log_put(dev, true); 456 dev->log = log; 457 dev->log_size = size; 458 } 459 460 static void *vhost_memory_map(struct vhost_dev *dev, hwaddr addr, 461 hwaddr *plen, bool is_write) 462 { 463 if (!vhost_dev_has_iommu(dev)) { 464 return cpu_physical_memory_map(addr, plen, is_write); 465 } else { 466 return (void *)(uintptr_t)addr; 467 } 468 } 469 470 static void vhost_memory_unmap(struct vhost_dev *dev, void *buffer, 471 hwaddr len, int is_write, 472 hwaddr access_len) 473 { 474 if (!vhost_dev_has_iommu(dev)) { 475 cpu_physical_memory_unmap(buffer, len, is_write, access_len); 476 } 477 } 478 479 static int vhost_verify_ring_part_mapping(void *ring_hva, 480 uint64_t ring_gpa, 481 uint64_t ring_size, 482 void *reg_hva, 483 uint64_t reg_gpa, 484 uint64_t reg_size) 485 { 486 uint64_t hva_ring_offset; 487 uint64_t ring_last = range_get_last(ring_gpa, ring_size); 488 uint64_t reg_last = range_get_last(reg_gpa, reg_size); 489 490 if (ring_last < reg_gpa || ring_gpa > reg_last) { 491 return 0; 492 } 493 /* check that whole ring's is mapped */ 494 if (ring_last > reg_last) { 495 return -ENOMEM; 496 } 497 /* check that ring's MemoryRegion wasn't replaced */ 498 hva_ring_offset = ring_gpa - reg_gpa; 499 if (ring_hva != reg_hva + hva_ring_offset) { 500 return -EBUSY; 501 } 502 503 return 0; 504 } 505 506 static int vhost_verify_ring_mappings(struct vhost_dev *dev, 507 void *reg_hva, 508 uint64_t reg_gpa, 509 uint64_t reg_size) 510 { 511 int i, j; 512 int r = 0; 513 const char *part_name[] = { 514 "descriptor table", 515 "available ring", 516 "used ring" 517 }; 518 519 if (vhost_dev_has_iommu(dev)) { 520 return 0; 521 } 522 523 for (i = 0; i < dev->nvqs; ++i) { 524 struct vhost_virtqueue *vq = dev->vqs + i; 525 526 if (vq->desc_phys == 0) { 527 continue; 528 } 529 530 j = 0; 531 r = vhost_verify_ring_part_mapping( 532 vq->desc, vq->desc_phys, vq->desc_size, 533 reg_hva, reg_gpa, reg_size); 534 if (r) { 535 break; 536 } 537 538 j++; 539 r = vhost_verify_ring_part_mapping( 540 vq->avail, vq->avail_phys, vq->avail_size, 541 reg_hva, reg_gpa, reg_size); 542 if (r) { 543 break; 544 } 545 546 j++; 547 r = vhost_verify_ring_part_mapping( 548 vq->used, vq->used_phys, vq->used_size, 549 reg_hva, reg_gpa, reg_size); 550 if (r) { 551 break; 552 } 553 } 554 555 if (r == -ENOMEM) { 556 error_report("Unable to map %s for ring %d", part_name[j], i); 557 } else if (r == -EBUSY) { 558 error_report("%s relocated for ring %d", part_name[j], i); 559 } 560 return r; 561 } 562 563 /* 564 * vhost_section: identify sections needed for vhost access 565 * 566 * We only care about RAM sections here (where virtqueue and guest 567 * internals accessed by virtio might live). 568 */ 569 static bool vhost_section(struct vhost_dev *dev, MemoryRegionSection *section) 570 { 571 MemoryRegion *mr = section->mr; 572 573 if (memory_region_is_ram(mr) && !memory_region_is_rom(mr)) { 574 uint8_t dirty_mask = memory_region_get_dirty_log_mask(mr); 575 uint8_t handled_dirty; 576 577 /* 578 * Kernel based vhost doesn't handle any block which is doing 579 * dirty-tracking other than migration for which it has 580 * specific logging support. However for TCG the kernel never 581 * gets involved anyway so we can also ignore it's 582 * self-modiying code detection flags. However a vhost-user 583 * client could still confuse a TCG guest if it re-writes 584 * executable memory that has already been translated. 585 */ 586 handled_dirty = (1 << DIRTY_MEMORY_MIGRATION) | 587 (1 << DIRTY_MEMORY_CODE); 588 589 if (dirty_mask & ~handled_dirty) { 590 trace_vhost_reject_section(mr->name, 1); 591 return false; 592 } 593 594 /* 595 * Some backends (like vhost-user) can only handle memory regions 596 * that have an fd (can be mapped into a different process). Filter 597 * the ones without an fd out, if requested. 598 * 599 * TODO: we might have to limit to MAP_SHARED as well. 600 */ 601 if (memory_region_get_fd(section->mr) < 0 && 602 dev->vhost_ops->vhost_backend_no_private_memslots && 603 dev->vhost_ops->vhost_backend_no_private_memslots(dev)) { 604 trace_vhost_reject_section(mr->name, 2); 605 return false; 606 } 607 608 trace_vhost_section(mr->name); 609 return true; 610 } else { 611 trace_vhost_reject_section(mr->name, 3); 612 return false; 613 } 614 } 615 616 static void vhost_begin(MemoryListener *listener) 617 { 618 struct vhost_dev *dev = container_of(listener, struct vhost_dev, 619 memory_listener); 620 dev->tmp_sections = NULL; 621 dev->n_tmp_sections = 0; 622 } 623 624 static void vhost_commit(MemoryListener *listener) 625 { 626 struct vhost_dev *dev = container_of(listener, struct vhost_dev, 627 memory_listener); 628 MemoryRegionSection *old_sections; 629 int n_old_sections; 630 uint64_t log_size; 631 size_t regions_size; 632 int r; 633 int i; 634 bool changed = false; 635 636 /* Note we can be called before the device is started, but then 637 * starting the device calls set_mem_table, so we need to have 638 * built the data structures. 639 */ 640 old_sections = dev->mem_sections; 641 n_old_sections = dev->n_mem_sections; 642 dev->mem_sections = dev->tmp_sections; 643 dev->n_mem_sections = dev->n_tmp_sections; 644 645 if (dev->n_mem_sections != n_old_sections) { 646 changed = true; 647 } else { 648 /* Same size, lets check the contents */ 649 for (i = 0; i < n_old_sections; i++) { 650 if (!MemoryRegionSection_eq(&old_sections[i], 651 &dev->mem_sections[i])) { 652 changed = true; 653 break; 654 } 655 } 656 } 657 658 trace_vhost_commit(dev->started, changed); 659 if (!changed) { 660 goto out; 661 } 662 663 /* Rebuild the regions list from the new sections list */ 664 regions_size = offsetof(struct vhost_memory, regions) + 665 dev->n_mem_sections * sizeof dev->mem->regions[0]; 666 dev->mem = g_realloc(dev->mem, regions_size); 667 dev->mem->nregions = dev->n_mem_sections; 668 669 if (dev->vhost_ops->vhost_backend_no_private_memslots && 670 dev->vhost_ops->vhost_backend_no_private_memslots(dev)) { 671 used_shared_memslots = dev->mem->nregions; 672 } else { 673 used_memslots = dev->mem->nregions; 674 } 675 676 for (i = 0; i < dev->n_mem_sections; i++) { 677 struct vhost_memory_region *cur_vmr = dev->mem->regions + i; 678 struct MemoryRegionSection *mrs = dev->mem_sections + i; 679 680 cur_vmr->guest_phys_addr = mrs->offset_within_address_space; 681 cur_vmr->memory_size = int128_get64(mrs->size); 682 cur_vmr->userspace_addr = 683 (uintptr_t)memory_region_get_ram_ptr(mrs->mr) + 684 mrs->offset_within_region; 685 cur_vmr->flags_padding = 0; 686 } 687 688 if (!dev->started) { 689 goto out; 690 } 691 692 for (i = 0; i < dev->mem->nregions; i++) { 693 if (vhost_verify_ring_mappings(dev, 694 (void *)(uintptr_t)dev->mem->regions[i].userspace_addr, 695 dev->mem->regions[i].guest_phys_addr, 696 dev->mem->regions[i].memory_size)) { 697 error_report("Verify ring failure on region %d", i); 698 abort(); 699 } 700 } 701 702 if (!dev->log_enabled) { 703 r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem); 704 if (r < 0) { 705 VHOST_OPS_DEBUG(r, "vhost_set_mem_table failed"); 706 } 707 goto out; 708 } 709 log_size = vhost_get_log_size(dev); 710 /* We allocate an extra 4K bytes to log, 711 * to reduce the * number of reallocations. */ 712 #define VHOST_LOG_BUFFER (0x1000 / sizeof *dev->log) 713 /* To log more, must increase log size before table update. */ 714 if (dev->log_size < log_size) { 715 vhost_dev_log_resize(dev, log_size + VHOST_LOG_BUFFER); 716 } 717 r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem); 718 if (r < 0) { 719 VHOST_OPS_DEBUG(r, "vhost_set_mem_table failed"); 720 } 721 /* To log less, can only decrease log size after table update. */ 722 if (dev->log_size > log_size + VHOST_LOG_BUFFER) { 723 vhost_dev_log_resize(dev, log_size); 724 } 725 726 out: 727 /* Deref the old list of sections, this must happen _after_ the 728 * vhost_set_mem_table to ensure the client isn't still using the 729 * section we're about to unref. 730 */ 731 while (n_old_sections--) { 732 memory_region_unref(old_sections[n_old_sections].mr); 733 } 734 g_free(old_sections); 735 } 736 737 /* Adds the section data to the tmp_section structure. 738 * It relies on the listener calling us in memory address order 739 * and for each region (via the _add and _nop methods) to 740 * join neighbours. 741 */ 742 static void vhost_region_add_section(struct vhost_dev *dev, 743 MemoryRegionSection *section) 744 { 745 bool need_add = true; 746 uint64_t mrs_size = int128_get64(section->size); 747 uint64_t mrs_gpa = section->offset_within_address_space; 748 uintptr_t mrs_host = (uintptr_t)memory_region_get_ram_ptr(section->mr) + 749 section->offset_within_region; 750 RAMBlock *mrs_rb = section->mr->ram_block; 751 752 trace_vhost_region_add_section(section->mr->name, mrs_gpa, mrs_size, 753 mrs_host); 754 755 if (dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER) { 756 /* Round the section to it's page size */ 757 /* First align the start down to a page boundary */ 758 size_t mrs_page = qemu_ram_pagesize(mrs_rb); 759 uint64_t alignage = mrs_host & (mrs_page - 1); 760 if (alignage) { 761 mrs_host -= alignage; 762 mrs_size += alignage; 763 mrs_gpa -= alignage; 764 } 765 /* Now align the size up to a page boundary */ 766 alignage = mrs_size & (mrs_page - 1); 767 if (alignage) { 768 mrs_size += mrs_page - alignage; 769 } 770 trace_vhost_region_add_section_aligned(section->mr->name, mrs_gpa, 771 mrs_size, mrs_host); 772 } 773 774 if (dev->n_tmp_sections && !section->unmergeable) { 775 /* Since we already have at least one section, lets see if 776 * this extends it; since we're scanning in order, we only 777 * have to look at the last one, and the FlatView that calls 778 * us shouldn't have overlaps. 779 */ 780 MemoryRegionSection *prev_sec = dev->tmp_sections + 781 (dev->n_tmp_sections - 1); 782 uint64_t prev_gpa_start = prev_sec->offset_within_address_space; 783 uint64_t prev_size = int128_get64(prev_sec->size); 784 uint64_t prev_gpa_end = range_get_last(prev_gpa_start, prev_size); 785 uint64_t prev_host_start = 786 (uintptr_t)memory_region_get_ram_ptr(prev_sec->mr) + 787 prev_sec->offset_within_region; 788 uint64_t prev_host_end = range_get_last(prev_host_start, prev_size); 789 790 if (mrs_gpa <= (prev_gpa_end + 1)) { 791 /* OK, looks like overlapping/intersecting - it's possible that 792 * the rounding to page sizes has made them overlap, but they should 793 * match up in the same RAMBlock if they do. 794 */ 795 if (mrs_gpa < prev_gpa_start) { 796 error_report("%s:Section '%s' rounded to %"PRIx64 797 " prior to previous '%s' %"PRIx64, 798 __func__, section->mr->name, mrs_gpa, 799 prev_sec->mr->name, prev_gpa_start); 800 /* A way to cleanly fail here would be better */ 801 return; 802 } 803 /* Offset from the start of the previous GPA to this GPA */ 804 size_t offset = mrs_gpa - prev_gpa_start; 805 806 if (prev_host_start + offset == mrs_host && 807 section->mr == prev_sec->mr && !prev_sec->unmergeable) { 808 uint64_t max_end = MAX(prev_host_end, mrs_host + mrs_size); 809 need_add = false; 810 prev_sec->offset_within_address_space = 811 MIN(prev_gpa_start, mrs_gpa); 812 prev_sec->offset_within_region = 813 MIN(prev_host_start, mrs_host) - 814 (uintptr_t)memory_region_get_ram_ptr(prev_sec->mr); 815 prev_sec->size = int128_make64(max_end - MIN(prev_host_start, 816 mrs_host)); 817 trace_vhost_region_add_section_merge(section->mr->name, 818 int128_get64(prev_sec->size), 819 prev_sec->offset_within_address_space, 820 prev_sec->offset_within_region); 821 } else { 822 /* adjoining regions are fine, but overlapping ones with 823 * different blocks/offsets shouldn't happen 824 */ 825 if (mrs_gpa != prev_gpa_end + 1) { 826 error_report("%s: Overlapping but not coherent sections " 827 "at %"PRIx64, 828 __func__, mrs_gpa); 829 return; 830 } 831 } 832 } 833 } 834 835 if (need_add) { 836 ++dev->n_tmp_sections; 837 dev->tmp_sections = g_renew(MemoryRegionSection, dev->tmp_sections, 838 dev->n_tmp_sections); 839 dev->tmp_sections[dev->n_tmp_sections - 1] = *section; 840 /* The flatview isn't stable and we don't use it, making it NULL 841 * means we can memcmp the list. 842 */ 843 dev->tmp_sections[dev->n_tmp_sections - 1].fv = NULL; 844 memory_region_ref(section->mr); 845 } 846 } 847 848 /* Used for both add and nop callbacks */ 849 static void vhost_region_addnop(MemoryListener *listener, 850 MemoryRegionSection *section) 851 { 852 struct vhost_dev *dev = container_of(listener, struct vhost_dev, 853 memory_listener); 854 855 if (!vhost_section(dev, section)) { 856 return; 857 } 858 vhost_region_add_section(dev, section); 859 } 860 861 static void vhost_iommu_unmap_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb) 862 { 863 struct vhost_iommu *iommu = container_of(n, struct vhost_iommu, n); 864 struct vhost_dev *hdev = iommu->hdev; 865 hwaddr iova = iotlb->iova + iommu->iommu_offset; 866 867 if (vhost_backend_invalidate_device_iotlb(hdev, iova, 868 iotlb->addr_mask + 1)) { 869 error_report("Fail to invalidate device iotlb"); 870 } 871 } 872 873 static void vhost_iommu_region_add(MemoryListener *listener, 874 MemoryRegionSection *section) 875 { 876 struct vhost_dev *dev = container_of(listener, struct vhost_dev, 877 iommu_listener); 878 struct vhost_iommu *iommu; 879 Int128 end; 880 int iommu_idx; 881 IOMMUMemoryRegion *iommu_mr; 882 883 if (!memory_region_is_iommu(section->mr)) { 884 return; 885 } 886 887 iommu_mr = IOMMU_MEMORY_REGION(section->mr); 888 889 iommu = g_malloc0(sizeof(*iommu)); 890 end = int128_add(int128_make64(section->offset_within_region), 891 section->size); 892 end = int128_sub(end, int128_one()); 893 iommu_idx = memory_region_iommu_attrs_to_index(iommu_mr, 894 MEMTXATTRS_UNSPECIFIED); 895 iommu_notifier_init(&iommu->n, vhost_iommu_unmap_notify, 896 dev->vdev->device_iotlb_enabled ? 897 IOMMU_NOTIFIER_DEVIOTLB_UNMAP : 898 IOMMU_NOTIFIER_UNMAP, 899 section->offset_within_region, 900 int128_get64(end), 901 iommu_idx); 902 iommu->mr = section->mr; 903 iommu->iommu_offset = section->offset_within_address_space - 904 section->offset_within_region; 905 iommu->hdev = dev; 906 memory_region_register_iommu_notifier(section->mr, &iommu->n, 907 &error_fatal); 908 QLIST_INSERT_HEAD(&dev->iommu_list, iommu, iommu_next); 909 /* TODO: can replay help performance here? */ 910 } 911 912 static void vhost_iommu_region_del(MemoryListener *listener, 913 MemoryRegionSection *section) 914 { 915 struct vhost_dev *dev = container_of(listener, struct vhost_dev, 916 iommu_listener); 917 struct vhost_iommu *iommu; 918 919 if (!memory_region_is_iommu(section->mr)) { 920 return; 921 } 922 923 QLIST_FOREACH(iommu, &dev->iommu_list, iommu_next) { 924 if (iommu->mr == section->mr && 925 iommu->n.start == section->offset_within_region) { 926 memory_region_unregister_iommu_notifier(iommu->mr, 927 &iommu->n); 928 QLIST_REMOVE(iommu, iommu_next); 929 g_free(iommu); 930 break; 931 } 932 } 933 } 934 935 void vhost_toggle_device_iotlb(VirtIODevice *vdev) 936 { 937 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev); 938 struct vhost_dev *dev; 939 struct vhost_iommu *iommu; 940 941 if (vdev->vhost_started) { 942 dev = vdc->get_vhost(vdev); 943 } else { 944 return; 945 } 946 947 QLIST_FOREACH(iommu, &dev->iommu_list, iommu_next) { 948 memory_region_unregister_iommu_notifier(iommu->mr, &iommu->n); 949 iommu->n.notifier_flags = vdev->device_iotlb_enabled ? 950 IOMMU_NOTIFIER_DEVIOTLB_UNMAP : IOMMU_NOTIFIER_UNMAP; 951 memory_region_register_iommu_notifier(iommu->mr, &iommu->n, 952 &error_fatal); 953 } 954 } 955 956 static int vhost_virtqueue_set_addr(struct vhost_dev *dev, 957 struct vhost_virtqueue *vq, 958 unsigned idx, bool enable_log) 959 { 960 struct vhost_vring_addr addr; 961 int r; 962 memset(&addr, 0, sizeof(struct vhost_vring_addr)); 963 964 if (dev->vhost_ops->vhost_vq_get_addr) { 965 r = dev->vhost_ops->vhost_vq_get_addr(dev, &addr, vq); 966 if (r < 0) { 967 VHOST_OPS_DEBUG(r, "vhost_vq_get_addr failed"); 968 return r; 969 } 970 } else { 971 addr.desc_user_addr = (uint64_t)(unsigned long)vq->desc; 972 addr.avail_user_addr = (uint64_t)(unsigned long)vq->avail; 973 addr.used_user_addr = (uint64_t)(unsigned long)vq->used; 974 } 975 addr.index = idx; 976 addr.log_guest_addr = vq->used_phys; 977 addr.flags = enable_log ? (1 << VHOST_VRING_F_LOG) : 0; 978 r = dev->vhost_ops->vhost_set_vring_addr(dev, &addr); 979 if (r < 0) { 980 VHOST_OPS_DEBUG(r, "vhost_set_vring_addr failed"); 981 } 982 return r; 983 } 984 985 static int vhost_dev_set_features(struct vhost_dev *dev, 986 bool enable_log) 987 { 988 uint64_t features = dev->acked_features; 989 int r; 990 if (enable_log) { 991 features |= 0x1ULL << VHOST_F_LOG_ALL; 992 } 993 if (!vhost_dev_has_iommu(dev)) { 994 features &= ~(0x1ULL << VIRTIO_F_IOMMU_PLATFORM); 995 } 996 if (dev->vhost_ops->vhost_force_iommu) { 997 if (dev->vhost_ops->vhost_force_iommu(dev) == true) { 998 features |= 0x1ULL << VIRTIO_F_IOMMU_PLATFORM; 999 } 1000 } 1001 r = dev->vhost_ops->vhost_set_features(dev, features); 1002 if (r < 0) { 1003 VHOST_OPS_DEBUG(r, "vhost_set_features failed"); 1004 goto out; 1005 } 1006 if (dev->vhost_ops->vhost_set_backend_cap) { 1007 r = dev->vhost_ops->vhost_set_backend_cap(dev); 1008 if (r < 0) { 1009 VHOST_OPS_DEBUG(r, "vhost_set_backend_cap failed"); 1010 goto out; 1011 } 1012 } 1013 1014 out: 1015 return r; 1016 } 1017 1018 static int vhost_dev_set_log(struct vhost_dev *dev, bool enable_log) 1019 { 1020 int r, i, idx; 1021 hwaddr addr; 1022 1023 r = vhost_dev_set_features(dev, enable_log); 1024 if (r < 0) { 1025 goto err_features; 1026 } 1027 for (i = 0; i < dev->nvqs; ++i) { 1028 idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i); 1029 addr = virtio_queue_get_desc_addr(dev->vdev, idx); 1030 if (!addr) { 1031 /* 1032 * The queue might not be ready for start. If this 1033 * is the case there is no reason to continue the process. 1034 * The similar logic is used by the vhost_virtqueue_start() 1035 * routine. 1036 */ 1037 continue; 1038 } 1039 r = vhost_virtqueue_set_addr(dev, dev->vqs + i, idx, 1040 enable_log); 1041 if (r < 0) { 1042 goto err_vq; 1043 } 1044 } 1045 1046 /* 1047 * At log start we select our vhost_device logger that will scan the 1048 * memory sections and skip for the others. This is possible because 1049 * the log is shared amongst all vhost devices for a given type of 1050 * backend. 1051 */ 1052 vhost_dev_elect_mem_logger(dev, enable_log); 1053 1054 return 0; 1055 err_vq: 1056 for (; i >= 0; --i) { 1057 idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i); 1058 addr = virtio_queue_get_desc_addr(dev->vdev, idx); 1059 if (!addr) { 1060 continue; 1061 } 1062 vhost_virtqueue_set_addr(dev, dev->vqs + i, idx, 1063 dev->log_enabled); 1064 } 1065 vhost_dev_set_features(dev, dev->log_enabled); 1066 err_features: 1067 return r; 1068 } 1069 1070 static int vhost_migration_log(MemoryListener *listener, bool enable) 1071 { 1072 struct vhost_dev *dev = container_of(listener, struct vhost_dev, 1073 memory_listener); 1074 int r; 1075 if (enable == dev->log_enabled) { 1076 return 0; 1077 } 1078 if (!dev->started) { 1079 dev->log_enabled = enable; 1080 return 0; 1081 } 1082 1083 r = 0; 1084 if (!enable) { 1085 r = vhost_dev_set_log(dev, false); 1086 if (r < 0) { 1087 goto check_dev_state; 1088 } 1089 vhost_log_put(dev, false); 1090 } else { 1091 vhost_dev_log_resize(dev, vhost_get_log_size(dev)); 1092 r = vhost_dev_set_log(dev, true); 1093 if (r < 0) { 1094 goto check_dev_state; 1095 } 1096 } 1097 1098 check_dev_state: 1099 dev->log_enabled = enable; 1100 /* 1101 * vhost-user-* devices could change their state during log 1102 * initialization due to disconnect. So check dev state after 1103 * vhost communication. 1104 */ 1105 if (!dev->started) { 1106 /* 1107 * Since device is in the stopped state, it is okay for 1108 * migration. Return success. 1109 */ 1110 r = 0; 1111 } 1112 if (r) { 1113 /* An error occurred. */ 1114 dev->log_enabled = false; 1115 } 1116 1117 return r; 1118 } 1119 1120 static bool vhost_log_global_start(MemoryListener *listener, Error **errp) 1121 { 1122 int r; 1123 1124 r = vhost_migration_log(listener, true); 1125 if (r < 0) { 1126 abort(); 1127 } 1128 return true; 1129 } 1130 1131 static void vhost_log_global_stop(MemoryListener *listener) 1132 { 1133 int r; 1134 1135 r = vhost_migration_log(listener, false); 1136 if (r < 0) { 1137 abort(); 1138 } 1139 } 1140 1141 static void vhost_log_start(MemoryListener *listener, 1142 MemoryRegionSection *section, 1143 int old, int new) 1144 { 1145 /* FIXME: implement */ 1146 } 1147 1148 static void vhost_log_stop(MemoryListener *listener, 1149 MemoryRegionSection *section, 1150 int old, int new) 1151 { 1152 /* FIXME: implement */ 1153 } 1154 1155 /* The vhost driver natively knows how to handle the vrings of non 1156 * cross-endian legacy devices and modern devices. Only legacy devices 1157 * exposed to a bi-endian guest may require the vhost driver to use a 1158 * specific endianness. 1159 */ 1160 static inline bool vhost_needs_vring_endian(VirtIODevice *vdev) 1161 { 1162 if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) { 1163 return false; 1164 } 1165 #if HOST_BIG_ENDIAN 1166 return vdev->device_endian == VIRTIO_DEVICE_ENDIAN_LITTLE; 1167 #else 1168 return vdev->device_endian == VIRTIO_DEVICE_ENDIAN_BIG; 1169 #endif 1170 } 1171 1172 static int vhost_virtqueue_set_vring_endian_legacy(struct vhost_dev *dev, 1173 bool is_big_endian, 1174 int vhost_vq_index) 1175 { 1176 int r; 1177 struct vhost_vring_state s = { 1178 .index = vhost_vq_index, 1179 .num = is_big_endian 1180 }; 1181 1182 r = dev->vhost_ops->vhost_set_vring_endian(dev, &s); 1183 if (r < 0) { 1184 VHOST_OPS_DEBUG(r, "vhost_set_vring_endian failed"); 1185 } 1186 return r; 1187 } 1188 1189 static int vhost_memory_region_lookup(struct vhost_dev *hdev, 1190 uint64_t gpa, uint64_t *uaddr, 1191 uint64_t *len) 1192 { 1193 int i; 1194 1195 for (i = 0; i < hdev->mem->nregions; i++) { 1196 struct vhost_memory_region *reg = hdev->mem->regions + i; 1197 1198 if (gpa >= reg->guest_phys_addr && 1199 reg->guest_phys_addr + reg->memory_size > gpa) { 1200 *uaddr = reg->userspace_addr + gpa - reg->guest_phys_addr; 1201 *len = reg->guest_phys_addr + reg->memory_size - gpa; 1202 return 0; 1203 } 1204 } 1205 1206 return -EFAULT; 1207 } 1208 1209 int vhost_device_iotlb_miss(struct vhost_dev *dev, uint64_t iova, int write) 1210 { 1211 IOMMUTLBEntry iotlb; 1212 uint64_t uaddr, len; 1213 int ret = -EFAULT; 1214 1215 RCU_READ_LOCK_GUARD(); 1216 1217 trace_vhost_iotlb_miss(dev, 1); 1218 1219 iotlb = address_space_get_iotlb_entry(dev->vdev->dma_as, 1220 iova, write, 1221 MEMTXATTRS_UNSPECIFIED); 1222 if (iotlb.target_as != NULL) { 1223 ret = vhost_memory_region_lookup(dev, iotlb.translated_addr, 1224 &uaddr, &len); 1225 if (ret) { 1226 trace_vhost_iotlb_miss(dev, 3); 1227 error_report("Fail to lookup the translated address " 1228 "%"PRIx64, iotlb.translated_addr); 1229 goto out; 1230 } 1231 1232 len = MIN(iotlb.addr_mask + 1, len); 1233 iova = iova & ~iotlb.addr_mask; 1234 1235 ret = vhost_backend_update_device_iotlb(dev, iova, uaddr, 1236 len, iotlb.perm); 1237 if (ret) { 1238 trace_vhost_iotlb_miss(dev, 4); 1239 error_report("Fail to update device iotlb"); 1240 goto out; 1241 } 1242 } 1243 1244 trace_vhost_iotlb_miss(dev, 2); 1245 1246 out: 1247 return ret; 1248 } 1249 1250 int vhost_virtqueue_start(struct vhost_dev *dev, 1251 struct VirtIODevice *vdev, 1252 struct vhost_virtqueue *vq, 1253 unsigned idx) 1254 { 1255 BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev))); 1256 VirtioBusState *vbus = VIRTIO_BUS(qbus); 1257 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus); 1258 hwaddr s, l, a; 1259 int r; 1260 int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx); 1261 struct vhost_vring_file file = { 1262 .index = vhost_vq_index 1263 }; 1264 struct vhost_vring_state state = { 1265 .index = vhost_vq_index 1266 }; 1267 struct VirtQueue *vvq = virtio_get_queue(vdev, idx); 1268 1269 a = virtio_queue_get_desc_addr(vdev, idx); 1270 if (a == 0) { 1271 /* Queue might not be ready for start */ 1272 return 0; 1273 } 1274 1275 vq->num = state.num = virtio_queue_get_num(vdev, idx); 1276 r = dev->vhost_ops->vhost_set_vring_num(dev, &state); 1277 if (r) { 1278 VHOST_OPS_DEBUG(r, "vhost_set_vring_num failed"); 1279 return r; 1280 } 1281 1282 state.num = virtio_queue_get_last_avail_idx(vdev, idx); 1283 r = dev->vhost_ops->vhost_set_vring_base(dev, &state); 1284 if (r) { 1285 VHOST_OPS_DEBUG(r, "vhost_set_vring_base failed"); 1286 return r; 1287 } 1288 1289 if (vhost_needs_vring_endian(vdev)) { 1290 r = vhost_virtqueue_set_vring_endian_legacy(dev, 1291 virtio_is_big_endian(vdev), 1292 vhost_vq_index); 1293 if (r) { 1294 return r; 1295 } 1296 } 1297 1298 vq->desc_size = s = l = virtio_queue_get_desc_size(vdev, idx); 1299 vq->desc_phys = a; 1300 vq->desc = vhost_memory_map(dev, a, &l, false); 1301 if (!vq->desc || l != s) { 1302 r = -ENOMEM; 1303 goto fail_alloc_desc; 1304 } 1305 vq->avail_size = s = l = virtio_queue_get_avail_size(vdev, idx); 1306 vq->avail_phys = a = virtio_queue_get_avail_addr(vdev, idx); 1307 vq->avail = vhost_memory_map(dev, a, &l, false); 1308 if (!vq->avail || l != s) { 1309 r = -ENOMEM; 1310 goto fail_alloc_avail; 1311 } 1312 vq->used_size = s = l = virtio_queue_get_used_size(vdev, idx); 1313 vq->used_phys = a = virtio_queue_get_used_addr(vdev, idx); 1314 vq->used = vhost_memory_map(dev, a, &l, true); 1315 if (!vq->used || l != s) { 1316 r = -ENOMEM; 1317 goto fail_alloc_used; 1318 } 1319 1320 r = vhost_virtqueue_set_addr(dev, vq, vhost_vq_index, dev->log_enabled); 1321 if (r < 0) { 1322 goto fail_alloc; 1323 } 1324 1325 file.fd = event_notifier_get_fd(virtio_queue_get_host_notifier(vvq)); 1326 r = dev->vhost_ops->vhost_set_vring_kick(dev, &file); 1327 if (r) { 1328 VHOST_OPS_DEBUG(r, "vhost_set_vring_kick failed"); 1329 goto fail_kick; 1330 } 1331 1332 /* Clear and discard previous events if any. */ 1333 event_notifier_test_and_clear(&vq->masked_notifier); 1334 1335 /* Init vring in unmasked state, unless guest_notifier_mask 1336 * will do it later. 1337 */ 1338 if (!vdev->use_guest_notifier_mask) { 1339 /* TODO: check and handle errors. */ 1340 vhost_virtqueue_mask(dev, vdev, idx, false); 1341 } 1342 1343 if (k->query_guest_notifiers && 1344 k->query_guest_notifiers(qbus->parent) && 1345 virtio_queue_vector(vdev, idx) == VIRTIO_NO_VECTOR) { 1346 file.fd = -1; 1347 r = dev->vhost_ops->vhost_set_vring_call(dev, &file); 1348 if (r) { 1349 goto fail_vector; 1350 } 1351 } 1352 1353 return 0; 1354 1355 fail_vector: 1356 fail_kick: 1357 fail_alloc: 1358 vhost_memory_unmap(dev, vq->used, virtio_queue_get_used_size(vdev, idx), 1359 0, 0); 1360 fail_alloc_used: 1361 vhost_memory_unmap(dev, vq->avail, virtio_queue_get_avail_size(vdev, idx), 1362 0, 0); 1363 fail_alloc_avail: 1364 vhost_memory_unmap(dev, vq->desc, virtio_queue_get_desc_size(vdev, idx), 1365 0, 0); 1366 fail_alloc_desc: 1367 return r; 1368 } 1369 1370 int vhost_virtqueue_stop(struct vhost_dev *dev, 1371 struct VirtIODevice *vdev, 1372 struct vhost_virtqueue *vq, 1373 unsigned idx) 1374 { 1375 int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx); 1376 struct vhost_vring_state state = { 1377 .index = vhost_vq_index, 1378 }; 1379 int r; 1380 1381 if (virtio_queue_get_desc_addr(vdev, idx) == 0) { 1382 /* Don't stop the virtqueue which might have not been started */ 1383 return 0; 1384 } 1385 1386 r = dev->vhost_ops->vhost_get_vring_base(dev, &state); 1387 if (r < 0) { 1388 VHOST_OPS_DEBUG(r, "vhost VQ %u ring restore failed: %d", idx, r); 1389 /* Connection to the backend is broken, so let's sync internal 1390 * last avail idx to the device used idx. 1391 */ 1392 virtio_queue_restore_last_avail_idx(vdev, idx); 1393 } else { 1394 virtio_queue_set_last_avail_idx(vdev, idx, state.num); 1395 } 1396 virtio_queue_invalidate_signalled_used(vdev, idx); 1397 virtio_queue_update_used_idx(vdev, idx); 1398 1399 /* In the cross-endian case, we need to reset the vring endianness to 1400 * native as legacy devices expect so by default. 1401 */ 1402 if (vhost_needs_vring_endian(vdev)) { 1403 vhost_virtqueue_set_vring_endian_legacy(dev, 1404 !virtio_is_big_endian(vdev), 1405 vhost_vq_index); 1406 } 1407 1408 vhost_memory_unmap(dev, vq->used, virtio_queue_get_used_size(vdev, idx), 1409 1, virtio_queue_get_used_size(vdev, idx)); 1410 vhost_memory_unmap(dev, vq->avail, virtio_queue_get_avail_size(vdev, idx), 1411 0, virtio_queue_get_avail_size(vdev, idx)); 1412 vhost_memory_unmap(dev, vq->desc, virtio_queue_get_desc_size(vdev, idx), 1413 0, virtio_queue_get_desc_size(vdev, idx)); 1414 return r; 1415 } 1416 1417 static int vhost_virtqueue_set_busyloop_timeout(struct vhost_dev *dev, 1418 int n, uint32_t timeout) 1419 { 1420 int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, n); 1421 struct vhost_vring_state state = { 1422 .index = vhost_vq_index, 1423 .num = timeout, 1424 }; 1425 int r; 1426 1427 if (!dev->vhost_ops->vhost_set_vring_busyloop_timeout) { 1428 return -EINVAL; 1429 } 1430 1431 r = dev->vhost_ops->vhost_set_vring_busyloop_timeout(dev, &state); 1432 if (r) { 1433 VHOST_OPS_DEBUG(r, "vhost_set_vring_busyloop_timeout failed"); 1434 return r; 1435 } 1436 1437 return 0; 1438 } 1439 1440 static void vhost_virtqueue_error_notifier(EventNotifier *n) 1441 { 1442 struct vhost_virtqueue *vq = container_of(n, struct vhost_virtqueue, 1443 error_notifier); 1444 struct vhost_dev *dev = vq->dev; 1445 int index = vq - dev->vqs; 1446 1447 if (event_notifier_test_and_clear(n) && dev->vdev) { 1448 VHOST_OPS_DEBUG(-EINVAL, "vhost vring error in virtqueue %d", 1449 dev->vq_index + index); 1450 } 1451 } 1452 1453 static int vhost_virtqueue_init(struct vhost_dev *dev, 1454 struct vhost_virtqueue *vq, int n) 1455 { 1456 int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, n); 1457 struct vhost_vring_file file = { 1458 .index = vhost_vq_index, 1459 }; 1460 int r = event_notifier_init(&vq->masked_notifier, 0); 1461 if (r < 0) { 1462 return r; 1463 } 1464 1465 file.fd = event_notifier_get_wfd(&vq->masked_notifier); 1466 r = dev->vhost_ops->vhost_set_vring_call(dev, &file); 1467 if (r) { 1468 VHOST_OPS_DEBUG(r, "vhost_set_vring_call failed"); 1469 goto fail_call; 1470 } 1471 1472 vq->dev = dev; 1473 1474 if (dev->vhost_ops->vhost_set_vring_err) { 1475 r = event_notifier_init(&vq->error_notifier, 0); 1476 if (r < 0) { 1477 goto fail_call; 1478 } 1479 1480 file.fd = event_notifier_get_fd(&vq->error_notifier); 1481 r = dev->vhost_ops->vhost_set_vring_err(dev, &file); 1482 if (r) { 1483 VHOST_OPS_DEBUG(r, "vhost_set_vring_err failed"); 1484 goto fail_err; 1485 } 1486 1487 event_notifier_set_handler(&vq->error_notifier, 1488 vhost_virtqueue_error_notifier); 1489 } 1490 1491 return 0; 1492 1493 fail_err: 1494 event_notifier_cleanup(&vq->error_notifier); 1495 fail_call: 1496 event_notifier_cleanup(&vq->masked_notifier); 1497 return r; 1498 } 1499 1500 static void vhost_virtqueue_cleanup(struct vhost_virtqueue *vq) 1501 { 1502 event_notifier_cleanup(&vq->masked_notifier); 1503 if (vq->dev->vhost_ops->vhost_set_vring_err) { 1504 event_notifier_set_handler(&vq->error_notifier, NULL); 1505 event_notifier_cleanup(&vq->error_notifier); 1506 } 1507 } 1508 1509 int vhost_dev_init(struct vhost_dev *hdev, void *opaque, 1510 VhostBackendType backend_type, uint32_t busyloop_timeout, 1511 Error **errp) 1512 { 1513 unsigned int used, reserved, limit; 1514 uint64_t features; 1515 int i, r, n_initialized_vqs = 0; 1516 1517 hdev->vdev = NULL; 1518 hdev->migration_blocker = NULL; 1519 1520 r = vhost_set_backend_type(hdev, backend_type); 1521 assert(r >= 0); 1522 1523 r = hdev->vhost_ops->vhost_backend_init(hdev, opaque, errp); 1524 if (r < 0) { 1525 goto fail; 1526 } 1527 1528 r = hdev->vhost_ops->vhost_set_owner(hdev); 1529 if (r < 0) { 1530 error_setg_errno(errp, -r, "vhost_set_owner failed"); 1531 goto fail; 1532 } 1533 1534 r = hdev->vhost_ops->vhost_get_features(hdev, &features); 1535 if (r < 0) { 1536 error_setg_errno(errp, -r, "vhost_get_features failed"); 1537 goto fail; 1538 } 1539 1540 limit = hdev->vhost_ops->vhost_backend_memslots_limit(hdev); 1541 if (limit < MEMORY_DEVICES_SAFE_MAX_MEMSLOTS && 1542 memory_devices_memslot_auto_decision_active()) { 1543 error_setg(errp, "some memory device (like virtio-mem)" 1544 " decided how many memory slots to use based on the overall" 1545 " number of memory slots; this vhost backend would further" 1546 " restricts the overall number of memory slots"); 1547 error_append_hint(errp, "Try plugging this vhost backend before" 1548 " plugging such memory devices.\n"); 1549 r = -EINVAL; 1550 goto fail; 1551 } 1552 1553 for (i = 0; i < hdev->nvqs; ++i, ++n_initialized_vqs) { 1554 r = vhost_virtqueue_init(hdev, hdev->vqs + i, hdev->vq_index + i); 1555 if (r < 0) { 1556 error_setg_errno(errp, -r, "Failed to initialize virtqueue %d", i); 1557 goto fail; 1558 } 1559 } 1560 1561 if (busyloop_timeout) { 1562 for (i = 0; i < hdev->nvqs; ++i) { 1563 r = vhost_virtqueue_set_busyloop_timeout(hdev, hdev->vq_index + i, 1564 busyloop_timeout); 1565 if (r < 0) { 1566 error_setg_errno(errp, -r, "Failed to set busyloop timeout"); 1567 goto fail_busyloop; 1568 } 1569 } 1570 } 1571 1572 hdev->features = features; 1573 1574 hdev->memory_listener = (MemoryListener) { 1575 .name = "vhost", 1576 .begin = vhost_begin, 1577 .commit = vhost_commit, 1578 .region_add = vhost_region_addnop, 1579 .region_nop = vhost_region_addnop, 1580 .log_start = vhost_log_start, 1581 .log_stop = vhost_log_stop, 1582 .log_sync = vhost_log_sync, 1583 .log_global_start = vhost_log_global_start, 1584 .log_global_stop = vhost_log_global_stop, 1585 .priority = MEMORY_LISTENER_PRIORITY_DEV_BACKEND 1586 }; 1587 1588 hdev->iommu_listener = (MemoryListener) { 1589 .name = "vhost-iommu", 1590 .region_add = vhost_iommu_region_add, 1591 .region_del = vhost_iommu_region_del, 1592 }; 1593 1594 if (hdev->migration_blocker == NULL) { 1595 if (!(hdev->features & (0x1ULL << VHOST_F_LOG_ALL))) { 1596 error_setg(&hdev->migration_blocker, 1597 "Migration disabled: vhost lacks VHOST_F_LOG_ALL feature."); 1598 } else if (vhost_dev_log_is_shared(hdev) && !qemu_memfd_alloc_check()) { 1599 error_setg(&hdev->migration_blocker, 1600 "Migration disabled: failed to allocate shared memory"); 1601 } 1602 } 1603 1604 if (hdev->migration_blocker != NULL) { 1605 r = migrate_add_blocker_normal(&hdev->migration_blocker, errp); 1606 if (r < 0) { 1607 goto fail_busyloop; 1608 } 1609 } 1610 1611 hdev->mem = g_malloc0(offsetof(struct vhost_memory, regions)); 1612 hdev->n_mem_sections = 0; 1613 hdev->mem_sections = NULL; 1614 hdev->log = NULL; 1615 hdev->log_size = 0; 1616 hdev->log_enabled = false; 1617 hdev->started = false; 1618 memory_listener_register(&hdev->memory_listener, &address_space_memory); 1619 QLIST_INSERT_HEAD(&vhost_devices, hdev, entry); 1620 1621 /* 1622 * The listener we registered properly updated the corresponding counter. 1623 * So we can trust that these values are accurate. 1624 */ 1625 if (hdev->vhost_ops->vhost_backend_no_private_memslots && 1626 hdev->vhost_ops->vhost_backend_no_private_memslots(hdev)) { 1627 used = used_shared_memslots; 1628 } else { 1629 used = used_memslots; 1630 } 1631 /* 1632 * We assume that all reserved memslots actually require a real memslot 1633 * in our vhost backend. This might not be true, for example, if the 1634 * memslot would be ROM. If ever relevant, we can optimize for that -- 1635 * but we'll need additional information about the reservations. 1636 */ 1637 reserved = memory_devices_get_reserved_memslots(); 1638 if (used + reserved > limit) { 1639 error_setg(errp, "vhost backend memory slots limit (%d) is less" 1640 " than current number of used (%d) and reserved (%d)" 1641 " memory slots for memory devices.", limit, used, reserved); 1642 r = -EINVAL; 1643 goto fail_busyloop; 1644 } 1645 1646 return 0; 1647 1648 fail_busyloop: 1649 if (busyloop_timeout) { 1650 while (--i >= 0) { 1651 vhost_virtqueue_set_busyloop_timeout(hdev, hdev->vq_index + i, 0); 1652 } 1653 } 1654 fail: 1655 hdev->nvqs = n_initialized_vqs; 1656 vhost_dev_cleanup(hdev); 1657 return r; 1658 } 1659 1660 void vhost_dev_cleanup(struct vhost_dev *hdev) 1661 { 1662 int i; 1663 1664 trace_vhost_dev_cleanup(hdev); 1665 1666 for (i = 0; i < hdev->nvqs; ++i) { 1667 vhost_virtqueue_cleanup(hdev->vqs + i); 1668 } 1669 if (hdev->mem) { 1670 /* those are only safe after successful init */ 1671 memory_listener_unregister(&hdev->memory_listener); 1672 QLIST_REMOVE(hdev, entry); 1673 } 1674 migrate_del_blocker(&hdev->migration_blocker); 1675 g_free(hdev->mem); 1676 g_free(hdev->mem_sections); 1677 if (hdev->vhost_ops) { 1678 hdev->vhost_ops->vhost_backend_cleanup(hdev); 1679 } 1680 assert(!hdev->log); 1681 1682 memset(hdev, 0, sizeof(struct vhost_dev)); 1683 } 1684 1685 void vhost_dev_disable_notifiers_nvqs(struct vhost_dev *hdev, 1686 VirtIODevice *vdev, 1687 unsigned int nvqs) 1688 { 1689 BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev))); 1690 int i, r; 1691 1692 /* 1693 * Batch all the host notifiers in a single transaction to avoid 1694 * quadratic time complexity in address_space_update_ioeventfds(). 1695 */ 1696 memory_region_transaction_begin(); 1697 1698 for (i = 0; i < nvqs; ++i) { 1699 r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i, 1700 false); 1701 if (r < 0) { 1702 error_report("vhost VQ %d notifier cleanup failed: %d", i, -r); 1703 } 1704 assert(r >= 0); 1705 } 1706 1707 /* 1708 * The transaction expects the ioeventfds to be open when it 1709 * commits. Do it now, before the cleanup loop. 1710 */ 1711 memory_region_transaction_commit(); 1712 1713 for (i = 0; i < nvqs; ++i) { 1714 virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i); 1715 } 1716 virtio_device_release_ioeventfd(vdev); 1717 } 1718 1719 /* Stop processing guest IO notifications in qemu. 1720 * Start processing them in vhost in kernel. 1721 */ 1722 int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev) 1723 { 1724 BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev))); 1725 int i, r; 1726 1727 /* We will pass the notifiers to the kernel, make sure that QEMU 1728 * doesn't interfere. 1729 */ 1730 r = virtio_device_grab_ioeventfd(vdev); 1731 if (r < 0) { 1732 error_report("binding does not support host notifiers"); 1733 return r; 1734 } 1735 1736 /* 1737 * Batch all the host notifiers in a single transaction to avoid 1738 * quadratic time complexity in address_space_update_ioeventfds(). 1739 */ 1740 memory_region_transaction_begin(); 1741 1742 for (i = 0; i < hdev->nvqs; ++i) { 1743 r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i, 1744 true); 1745 if (r < 0) { 1746 error_report("vhost VQ %d notifier binding failed: %d", i, -r); 1747 memory_region_transaction_commit(); 1748 vhost_dev_disable_notifiers_nvqs(hdev, vdev, i); 1749 return r; 1750 } 1751 } 1752 1753 memory_region_transaction_commit(); 1754 1755 return 0; 1756 } 1757 1758 /* Stop processing guest IO notifications in vhost. 1759 * Start processing them in qemu. 1760 * This might actually run the qemu handlers right away, 1761 * so virtio in qemu must be completely setup when this is called. 1762 */ 1763 void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev) 1764 { 1765 vhost_dev_disable_notifiers_nvqs(hdev, vdev, hdev->nvqs); 1766 } 1767 1768 /* Test and clear event pending status. 1769 * Should be called after unmask to avoid losing events. 1770 */ 1771 bool vhost_virtqueue_pending(struct vhost_dev *hdev, int n) 1772 { 1773 struct vhost_virtqueue *vq = hdev->vqs + n - hdev->vq_index; 1774 assert(n >= hdev->vq_index && n < hdev->vq_index + hdev->nvqs); 1775 return event_notifier_test_and_clear(&vq->masked_notifier); 1776 } 1777 1778 /* Mask/unmask events from this vq. */ 1779 void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n, 1780 bool mask) 1781 { 1782 struct VirtQueue *vvq = virtio_get_queue(vdev, n); 1783 int r, index = n - hdev->vq_index; 1784 struct vhost_vring_file file; 1785 1786 /* should only be called after backend is connected */ 1787 assert(hdev->vhost_ops); 1788 1789 if (mask) { 1790 assert(vdev->use_guest_notifier_mask); 1791 file.fd = event_notifier_get_wfd(&hdev->vqs[index].masked_notifier); 1792 } else { 1793 file.fd = event_notifier_get_wfd(virtio_queue_get_guest_notifier(vvq)); 1794 } 1795 1796 file.index = hdev->vhost_ops->vhost_get_vq_index(hdev, n); 1797 r = hdev->vhost_ops->vhost_set_vring_call(hdev, &file); 1798 if (r < 0) { 1799 error_report("vhost_set_vring_call failed %d", -r); 1800 } 1801 } 1802 1803 bool vhost_config_pending(struct vhost_dev *hdev) 1804 { 1805 assert(hdev->vhost_ops); 1806 if ((hdev->started == false) || 1807 (hdev->vhost_ops->vhost_set_config_call == NULL)) { 1808 return false; 1809 } 1810 1811 EventNotifier *notifier = 1812 &hdev->vqs[VHOST_QUEUE_NUM_CONFIG_INR].masked_config_notifier; 1813 return event_notifier_test_and_clear(notifier); 1814 } 1815 1816 void vhost_config_mask(struct vhost_dev *hdev, VirtIODevice *vdev, bool mask) 1817 { 1818 int fd; 1819 int r; 1820 EventNotifier *notifier = 1821 &hdev->vqs[VHOST_QUEUE_NUM_CONFIG_INR].masked_config_notifier; 1822 EventNotifier *config_notifier = &vdev->config_notifier; 1823 assert(hdev->vhost_ops); 1824 1825 if ((hdev->started == false) || 1826 (hdev->vhost_ops->vhost_set_config_call == NULL)) { 1827 return; 1828 } 1829 if (mask) { 1830 assert(vdev->use_guest_notifier_mask); 1831 fd = event_notifier_get_fd(notifier); 1832 } else { 1833 fd = event_notifier_get_fd(config_notifier); 1834 } 1835 r = hdev->vhost_ops->vhost_set_config_call(hdev, fd); 1836 if (r < 0) { 1837 error_report("vhost_set_config_call failed %d", -r); 1838 } 1839 } 1840 1841 static void vhost_stop_config_intr(struct vhost_dev *dev) 1842 { 1843 int fd = -1; 1844 assert(dev->vhost_ops); 1845 if (dev->vhost_ops->vhost_set_config_call) { 1846 dev->vhost_ops->vhost_set_config_call(dev, fd); 1847 } 1848 } 1849 1850 static void vhost_start_config_intr(struct vhost_dev *dev) 1851 { 1852 int r; 1853 1854 assert(dev->vhost_ops); 1855 int fd = event_notifier_get_fd(&dev->vdev->config_notifier); 1856 if (dev->vhost_ops->vhost_set_config_call) { 1857 r = dev->vhost_ops->vhost_set_config_call(dev, fd); 1858 if (!r) { 1859 event_notifier_set(&dev->vdev->config_notifier); 1860 } 1861 } 1862 } 1863 1864 uint64_t vhost_get_features(struct vhost_dev *hdev, const int *feature_bits, 1865 uint64_t features) 1866 { 1867 const int *bit = feature_bits; 1868 while (*bit != VHOST_INVALID_FEATURE_BIT) { 1869 uint64_t bit_mask = (1ULL << *bit); 1870 if (!(hdev->features & bit_mask)) { 1871 features &= ~bit_mask; 1872 } 1873 bit++; 1874 } 1875 return features; 1876 } 1877 1878 void vhost_ack_features(struct vhost_dev *hdev, const int *feature_bits, 1879 uint64_t features) 1880 { 1881 const int *bit = feature_bits; 1882 while (*bit != VHOST_INVALID_FEATURE_BIT) { 1883 uint64_t bit_mask = (1ULL << *bit); 1884 if (features & bit_mask) { 1885 hdev->acked_features |= bit_mask; 1886 } 1887 bit++; 1888 } 1889 } 1890 1891 int vhost_dev_get_config(struct vhost_dev *hdev, uint8_t *config, 1892 uint32_t config_len, Error **errp) 1893 { 1894 assert(hdev->vhost_ops); 1895 1896 if (hdev->vhost_ops->vhost_get_config) { 1897 return hdev->vhost_ops->vhost_get_config(hdev, config, config_len, 1898 errp); 1899 } 1900 1901 error_setg(errp, "vhost_get_config not implemented"); 1902 return -ENOSYS; 1903 } 1904 1905 int vhost_dev_set_config(struct vhost_dev *hdev, const uint8_t *data, 1906 uint32_t offset, uint32_t size, uint32_t flags) 1907 { 1908 assert(hdev->vhost_ops); 1909 1910 if (hdev->vhost_ops->vhost_set_config) { 1911 return hdev->vhost_ops->vhost_set_config(hdev, data, offset, 1912 size, flags); 1913 } 1914 1915 return -ENOSYS; 1916 } 1917 1918 void vhost_dev_set_config_notifier(struct vhost_dev *hdev, 1919 const VhostDevConfigOps *ops) 1920 { 1921 hdev->config_ops = ops; 1922 } 1923 1924 void vhost_dev_free_inflight(struct vhost_inflight *inflight) 1925 { 1926 if (inflight && inflight->addr) { 1927 qemu_memfd_free(inflight->addr, inflight->size, inflight->fd); 1928 inflight->addr = NULL; 1929 inflight->fd = -1; 1930 } 1931 } 1932 1933 int vhost_dev_prepare_inflight(struct vhost_dev *hdev, VirtIODevice *vdev) 1934 { 1935 int r; 1936 1937 if (hdev->vhost_ops->vhost_get_inflight_fd == NULL || 1938 hdev->vhost_ops->vhost_set_inflight_fd == NULL) { 1939 return 0; 1940 } 1941 1942 hdev->vdev = vdev; 1943 1944 r = vhost_dev_set_features(hdev, hdev->log_enabled); 1945 if (r < 0) { 1946 VHOST_OPS_DEBUG(r, "vhost_dev_prepare_inflight failed"); 1947 return r; 1948 } 1949 1950 return 0; 1951 } 1952 1953 int vhost_dev_set_inflight(struct vhost_dev *dev, 1954 struct vhost_inflight *inflight) 1955 { 1956 int r; 1957 1958 if (dev->vhost_ops->vhost_set_inflight_fd && inflight->addr) { 1959 r = dev->vhost_ops->vhost_set_inflight_fd(dev, inflight); 1960 if (r) { 1961 VHOST_OPS_DEBUG(r, "vhost_set_inflight_fd failed"); 1962 return r; 1963 } 1964 } 1965 1966 return 0; 1967 } 1968 1969 int vhost_dev_get_inflight(struct vhost_dev *dev, uint16_t queue_size, 1970 struct vhost_inflight *inflight) 1971 { 1972 int r; 1973 1974 if (dev->vhost_ops->vhost_get_inflight_fd) { 1975 r = dev->vhost_ops->vhost_get_inflight_fd(dev, queue_size, inflight); 1976 if (r) { 1977 VHOST_OPS_DEBUG(r, "vhost_get_inflight_fd failed"); 1978 return r; 1979 } 1980 } 1981 1982 return 0; 1983 } 1984 1985 static int vhost_dev_set_vring_enable(struct vhost_dev *hdev, int enable) 1986 { 1987 if (!hdev->vhost_ops->vhost_set_vring_enable) { 1988 return 0; 1989 } 1990 1991 /* 1992 * For vhost-user devices, if VHOST_USER_F_PROTOCOL_FEATURES has not 1993 * been negotiated, the rings start directly in the enabled state, and 1994 * .vhost_set_vring_enable callback will fail since 1995 * VHOST_USER_SET_VRING_ENABLE is not supported. 1996 */ 1997 if (hdev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER && 1998 !virtio_has_feature(hdev->backend_features, 1999 VHOST_USER_F_PROTOCOL_FEATURES)) { 2000 return 0; 2001 } 2002 2003 return hdev->vhost_ops->vhost_set_vring_enable(hdev, enable); 2004 } 2005 2006 /* 2007 * Host notifiers must be enabled at this point. 2008 * 2009 * If @vrings is true, this function will enable all vrings before starting the 2010 * device. If it is false, the vring initialization is left to be done by the 2011 * caller. 2012 */ 2013 int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev, bool vrings) 2014 { 2015 int i, r; 2016 2017 /* should only be called after backend is connected */ 2018 assert(hdev->vhost_ops); 2019 2020 trace_vhost_dev_start(hdev, vdev->name, vrings); 2021 2022 vdev->vhost_started = true; 2023 hdev->started = true; 2024 hdev->vdev = vdev; 2025 2026 r = vhost_dev_set_features(hdev, hdev->log_enabled); 2027 if (r < 0) { 2028 goto fail_features; 2029 } 2030 2031 if (vhost_dev_has_iommu(hdev)) { 2032 memory_listener_register(&hdev->iommu_listener, vdev->dma_as); 2033 } 2034 2035 r = hdev->vhost_ops->vhost_set_mem_table(hdev, hdev->mem); 2036 if (r < 0) { 2037 VHOST_OPS_DEBUG(r, "vhost_set_mem_table failed"); 2038 goto fail_mem; 2039 } 2040 for (i = 0; i < hdev->nvqs; ++i) { 2041 r = vhost_virtqueue_start(hdev, 2042 vdev, 2043 hdev->vqs + i, 2044 hdev->vq_index + i); 2045 if (r < 0) { 2046 goto fail_vq; 2047 } 2048 } 2049 2050 r = event_notifier_init( 2051 &hdev->vqs[VHOST_QUEUE_NUM_CONFIG_INR].masked_config_notifier, 0); 2052 if (r < 0) { 2053 VHOST_OPS_DEBUG(r, "event_notifier_init failed"); 2054 goto fail_vq; 2055 } 2056 event_notifier_test_and_clear( 2057 &hdev->vqs[VHOST_QUEUE_NUM_CONFIG_INR].masked_config_notifier); 2058 if (!vdev->use_guest_notifier_mask) { 2059 vhost_config_mask(hdev, vdev, true); 2060 } 2061 if (hdev->log_enabled) { 2062 uint64_t log_base; 2063 2064 hdev->log_size = vhost_get_log_size(hdev); 2065 hdev->log = vhost_log_get(hdev->vhost_ops->backend_type, 2066 hdev->log_size, 2067 vhost_dev_log_is_shared(hdev)); 2068 log_base = (uintptr_t)hdev->log->log; 2069 r = hdev->vhost_ops->vhost_set_log_base(hdev, 2070 hdev->log_size ? log_base : 0, 2071 hdev->log); 2072 if (r < 0) { 2073 VHOST_OPS_DEBUG(r, "vhost_set_log_base failed"); 2074 goto fail_log; 2075 } 2076 vhost_dev_elect_mem_logger(hdev, true); 2077 } 2078 if (vrings) { 2079 r = vhost_dev_set_vring_enable(hdev, true); 2080 if (r) { 2081 goto fail_log; 2082 } 2083 } 2084 if (hdev->vhost_ops->vhost_dev_start) { 2085 r = hdev->vhost_ops->vhost_dev_start(hdev, true); 2086 if (r) { 2087 goto fail_start; 2088 } 2089 } 2090 if (vhost_dev_has_iommu(hdev) && 2091 hdev->vhost_ops->vhost_set_iotlb_callback) { 2092 hdev->vhost_ops->vhost_set_iotlb_callback(hdev, true); 2093 2094 /* Update used ring information for IOTLB to work correctly, 2095 * vhost-kernel code requires for this.*/ 2096 for (i = 0; i < hdev->nvqs; ++i) { 2097 struct vhost_virtqueue *vq = hdev->vqs + i; 2098 r = vhost_device_iotlb_miss(hdev, vq->used_phys, true); 2099 if (r) { 2100 goto fail_iotlb; 2101 } 2102 } 2103 } 2104 vhost_start_config_intr(hdev); 2105 return 0; 2106 fail_iotlb: 2107 if (vhost_dev_has_iommu(hdev) && 2108 hdev->vhost_ops->vhost_set_iotlb_callback) { 2109 hdev->vhost_ops->vhost_set_iotlb_callback(hdev, false); 2110 } 2111 if (hdev->vhost_ops->vhost_dev_start) { 2112 hdev->vhost_ops->vhost_dev_start(hdev, false); 2113 } 2114 fail_start: 2115 if (vrings) { 2116 vhost_dev_set_vring_enable(hdev, false); 2117 } 2118 fail_log: 2119 vhost_log_put(hdev, false); 2120 fail_vq: 2121 while (--i >= 0) { 2122 vhost_virtqueue_stop(hdev, 2123 vdev, 2124 hdev->vqs + i, 2125 hdev->vq_index + i); 2126 } 2127 2128 fail_mem: 2129 if (vhost_dev_has_iommu(hdev)) { 2130 memory_listener_unregister(&hdev->iommu_listener); 2131 } 2132 fail_features: 2133 vdev->vhost_started = false; 2134 hdev->started = false; 2135 return r; 2136 } 2137 2138 /* Host notifiers must be enabled at this point. */ 2139 int vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev, bool vrings) 2140 { 2141 int i; 2142 int rc = 0; 2143 2144 /* should only be called after backend is connected */ 2145 assert(hdev->vhost_ops); 2146 event_notifier_test_and_clear( 2147 &hdev->vqs[VHOST_QUEUE_NUM_CONFIG_INR].masked_config_notifier); 2148 event_notifier_test_and_clear(&vdev->config_notifier); 2149 event_notifier_cleanup( 2150 &hdev->vqs[VHOST_QUEUE_NUM_CONFIG_INR].masked_config_notifier); 2151 2152 trace_vhost_dev_stop(hdev, vdev->name, vrings); 2153 2154 if (hdev->vhost_ops->vhost_dev_start) { 2155 hdev->vhost_ops->vhost_dev_start(hdev, false); 2156 } 2157 if (vrings) { 2158 vhost_dev_set_vring_enable(hdev, false); 2159 } 2160 for (i = 0; i < hdev->nvqs; ++i) { 2161 rc |= vhost_virtqueue_stop(hdev, 2162 vdev, 2163 hdev->vqs + i, 2164 hdev->vq_index + i); 2165 } 2166 if (hdev->vhost_ops->vhost_reset_status) { 2167 hdev->vhost_ops->vhost_reset_status(hdev); 2168 } 2169 2170 if (vhost_dev_has_iommu(hdev)) { 2171 if (hdev->vhost_ops->vhost_set_iotlb_callback) { 2172 hdev->vhost_ops->vhost_set_iotlb_callback(hdev, false); 2173 } 2174 memory_listener_unregister(&hdev->iommu_listener); 2175 } 2176 vhost_stop_config_intr(hdev); 2177 vhost_log_put(hdev, true); 2178 hdev->started = false; 2179 vdev->vhost_started = false; 2180 hdev->vdev = NULL; 2181 return rc; 2182 } 2183 2184 int vhost_net_set_backend(struct vhost_dev *hdev, 2185 struct vhost_vring_file *file) 2186 { 2187 if (hdev->vhost_ops->vhost_net_set_backend) { 2188 return hdev->vhost_ops->vhost_net_set_backend(hdev, file); 2189 } 2190 2191 return -ENOSYS; 2192 } 2193 2194 int vhost_reset_device(struct vhost_dev *hdev) 2195 { 2196 if (hdev->vhost_ops->vhost_reset_device) { 2197 return hdev->vhost_ops->vhost_reset_device(hdev); 2198 } 2199 2200 return -ENOSYS; 2201 } 2202 2203 bool vhost_supports_device_state(struct vhost_dev *dev) 2204 { 2205 if (dev->vhost_ops->vhost_supports_device_state) { 2206 return dev->vhost_ops->vhost_supports_device_state(dev); 2207 } 2208 2209 return false; 2210 } 2211 2212 int vhost_set_device_state_fd(struct vhost_dev *dev, 2213 VhostDeviceStateDirection direction, 2214 VhostDeviceStatePhase phase, 2215 int fd, 2216 int *reply_fd, 2217 Error **errp) 2218 { 2219 if (dev->vhost_ops->vhost_set_device_state_fd) { 2220 return dev->vhost_ops->vhost_set_device_state_fd(dev, direction, phase, 2221 fd, reply_fd, errp); 2222 } 2223 2224 error_setg(errp, 2225 "vhost transport does not support migration state transfer"); 2226 return -ENOSYS; 2227 } 2228 2229 int vhost_check_device_state(struct vhost_dev *dev, Error **errp) 2230 { 2231 if (dev->vhost_ops->vhost_check_device_state) { 2232 return dev->vhost_ops->vhost_check_device_state(dev, errp); 2233 } 2234 2235 error_setg(errp, 2236 "vhost transport does not support migration state transfer"); 2237 return -ENOSYS; 2238 } 2239 2240 int vhost_save_backend_state(struct vhost_dev *dev, QEMUFile *f, Error **errp) 2241 { 2242 ERRP_GUARD(); 2243 /* Maximum chunk size in which to transfer the state */ 2244 const size_t chunk_size = 1 * 1024 * 1024; 2245 g_autofree void *transfer_buf = NULL; 2246 g_autoptr(GError) g_err = NULL; 2247 int pipe_fds[2], read_fd = -1, write_fd = -1, reply_fd = -1; 2248 int ret; 2249 2250 /* [0] for reading (our end), [1] for writing (back-end's end) */ 2251 if (!g_unix_open_pipe(pipe_fds, FD_CLOEXEC, &g_err)) { 2252 error_setg(errp, "Failed to set up state transfer pipe: %s", 2253 g_err->message); 2254 ret = -EINVAL; 2255 goto fail; 2256 } 2257 2258 read_fd = pipe_fds[0]; 2259 write_fd = pipe_fds[1]; 2260 2261 /* 2262 * VHOST_TRANSFER_STATE_PHASE_STOPPED means the device must be stopped. 2263 * Ideally, it is suspended, but SUSPEND/RESUME currently do not exist for 2264 * vhost-user, so just check that it is stopped at all. 2265 */ 2266 assert(!dev->started); 2267 2268 /* Transfer ownership of write_fd to the back-end */ 2269 ret = vhost_set_device_state_fd(dev, 2270 VHOST_TRANSFER_STATE_DIRECTION_SAVE, 2271 VHOST_TRANSFER_STATE_PHASE_STOPPED, 2272 write_fd, 2273 &reply_fd, 2274 errp); 2275 if (ret < 0) { 2276 error_prepend(errp, "Failed to initiate state transfer: "); 2277 goto fail; 2278 } 2279 2280 /* If the back-end wishes to use a different pipe, switch over */ 2281 if (reply_fd >= 0) { 2282 close(read_fd); 2283 read_fd = reply_fd; 2284 } 2285 2286 transfer_buf = g_malloc(chunk_size); 2287 2288 while (true) { 2289 ssize_t read_ret; 2290 2291 read_ret = RETRY_ON_EINTR(read(read_fd, transfer_buf, chunk_size)); 2292 if (read_ret < 0) { 2293 ret = -errno; 2294 error_setg_errno(errp, -ret, "Failed to receive state"); 2295 goto fail; 2296 } 2297 2298 assert(read_ret <= chunk_size); 2299 qemu_put_be32(f, read_ret); 2300 2301 if (read_ret == 0) { 2302 /* EOF */ 2303 break; 2304 } 2305 2306 qemu_put_buffer(f, transfer_buf, read_ret); 2307 } 2308 2309 /* 2310 * Back-end will not really care, but be clean and close our end of the pipe 2311 * before inquiring the back-end about whether transfer was successful 2312 */ 2313 close(read_fd); 2314 read_fd = -1; 2315 2316 /* Also, verify that the device is still stopped */ 2317 assert(!dev->started); 2318 2319 ret = vhost_check_device_state(dev, errp); 2320 if (ret < 0) { 2321 goto fail; 2322 } 2323 2324 ret = 0; 2325 fail: 2326 if (read_fd >= 0) { 2327 close(read_fd); 2328 } 2329 2330 return ret; 2331 } 2332 2333 int vhost_load_backend_state(struct vhost_dev *dev, QEMUFile *f, Error **errp) 2334 { 2335 ERRP_GUARD(); 2336 size_t transfer_buf_size = 0; 2337 g_autofree void *transfer_buf = NULL; 2338 g_autoptr(GError) g_err = NULL; 2339 int pipe_fds[2], read_fd = -1, write_fd = -1, reply_fd = -1; 2340 int ret; 2341 2342 /* [0] for reading (back-end's end), [1] for writing (our end) */ 2343 if (!g_unix_open_pipe(pipe_fds, FD_CLOEXEC, &g_err)) { 2344 error_setg(errp, "Failed to set up state transfer pipe: %s", 2345 g_err->message); 2346 ret = -EINVAL; 2347 goto fail; 2348 } 2349 2350 read_fd = pipe_fds[0]; 2351 write_fd = pipe_fds[1]; 2352 2353 /* 2354 * VHOST_TRANSFER_STATE_PHASE_STOPPED means the device must be stopped. 2355 * Ideally, it is suspended, but SUSPEND/RESUME currently do not exist for 2356 * vhost-user, so just check that it is stopped at all. 2357 */ 2358 assert(!dev->started); 2359 2360 /* Transfer ownership of read_fd to the back-end */ 2361 ret = vhost_set_device_state_fd(dev, 2362 VHOST_TRANSFER_STATE_DIRECTION_LOAD, 2363 VHOST_TRANSFER_STATE_PHASE_STOPPED, 2364 read_fd, 2365 &reply_fd, 2366 errp); 2367 if (ret < 0) { 2368 error_prepend(errp, "Failed to initiate state transfer: "); 2369 goto fail; 2370 } 2371 2372 /* If the back-end wishes to use a different pipe, switch over */ 2373 if (reply_fd >= 0) { 2374 close(write_fd); 2375 write_fd = reply_fd; 2376 } 2377 2378 while (true) { 2379 size_t this_chunk_size = qemu_get_be32(f); 2380 ssize_t write_ret; 2381 const uint8_t *transfer_pointer; 2382 2383 if (this_chunk_size == 0) { 2384 /* End of state */ 2385 break; 2386 } 2387 2388 if (transfer_buf_size < this_chunk_size) { 2389 transfer_buf = g_realloc(transfer_buf, this_chunk_size); 2390 transfer_buf_size = this_chunk_size; 2391 } 2392 2393 if (qemu_get_buffer(f, transfer_buf, this_chunk_size) < 2394 this_chunk_size) 2395 { 2396 error_setg(errp, "Failed to read state"); 2397 ret = -EINVAL; 2398 goto fail; 2399 } 2400 2401 transfer_pointer = transfer_buf; 2402 while (this_chunk_size > 0) { 2403 write_ret = RETRY_ON_EINTR( 2404 write(write_fd, transfer_pointer, this_chunk_size) 2405 ); 2406 if (write_ret < 0) { 2407 ret = -errno; 2408 error_setg_errno(errp, -ret, "Failed to send state"); 2409 goto fail; 2410 } else if (write_ret == 0) { 2411 error_setg(errp, "Failed to send state: Connection is closed"); 2412 ret = -ECONNRESET; 2413 goto fail; 2414 } 2415 2416 assert(write_ret <= this_chunk_size); 2417 this_chunk_size -= write_ret; 2418 transfer_pointer += write_ret; 2419 } 2420 } 2421 2422 /* 2423 * Close our end, thus ending transfer, before inquiring the back-end about 2424 * whether transfer was successful 2425 */ 2426 close(write_fd); 2427 write_fd = -1; 2428 2429 /* Also, verify that the device is still stopped */ 2430 assert(!dev->started); 2431 2432 ret = vhost_check_device_state(dev, errp); 2433 if (ret < 0) { 2434 goto fail; 2435 } 2436 2437 ret = 0; 2438 fail: 2439 if (write_fd >= 0) { 2440 close(write_fd); 2441 } 2442 2443 return ret; 2444 } 2445