1 /* 2 * vhost support 3 * 4 * Copyright Red Hat, Inc. 2010 5 * 6 * Authors: 7 * Michael S. Tsirkin <mst@redhat.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2. See 10 * the COPYING file in the top-level directory. 11 * 12 * Contributions after 2012-01-13 are licensed under the terms of the 13 * GNU GPL, version 2 or (at your option) any later version. 14 */ 15 16 #include "qemu/osdep.h" 17 #include "qapi/error.h" 18 #include "hw/virtio/vhost.h" 19 #include "hw/hw.h" 20 #include "qemu/atomic.h" 21 #include "qemu/range.h" 22 #include "qemu/error-report.h" 23 #include "qemu/memfd.h" 24 #include <linux/vhost.h> 25 #include "exec/address-spaces.h" 26 #include "hw/virtio/virtio-bus.h" 27 #include "hw/virtio/virtio-access.h" 28 #include "migration/migration.h" 29 30 static struct vhost_log *vhost_log; 31 static struct vhost_log *vhost_log_shm; 32 33 static unsigned int used_memslots; 34 static QLIST_HEAD(, vhost_dev) vhost_devices = 35 QLIST_HEAD_INITIALIZER(vhost_devices); 36 37 bool vhost_has_free_slot(void) 38 { 39 unsigned int slots_limit = ~0U; 40 struct vhost_dev *hdev; 41 42 QLIST_FOREACH(hdev, &vhost_devices, entry) { 43 unsigned int r = hdev->vhost_ops->vhost_backend_memslots_limit(hdev); 44 slots_limit = MIN(slots_limit, r); 45 } 46 return slots_limit > used_memslots; 47 } 48 49 static void vhost_dev_sync_region(struct vhost_dev *dev, 50 MemoryRegionSection *section, 51 uint64_t mfirst, uint64_t mlast, 52 uint64_t rfirst, uint64_t rlast) 53 { 54 vhost_log_chunk_t *log = dev->log->log; 55 56 uint64_t start = MAX(mfirst, rfirst); 57 uint64_t end = MIN(mlast, rlast); 58 vhost_log_chunk_t *from = log + start / VHOST_LOG_CHUNK; 59 vhost_log_chunk_t *to = log + end / VHOST_LOG_CHUNK + 1; 60 uint64_t addr = (start / VHOST_LOG_CHUNK) * VHOST_LOG_CHUNK; 61 62 if (end < start) { 63 return; 64 } 65 assert(end / VHOST_LOG_CHUNK < dev->log_size); 66 assert(start / VHOST_LOG_CHUNK < dev->log_size); 67 68 for (;from < to; ++from) { 69 vhost_log_chunk_t log; 70 /* We first check with non-atomic: much cheaper, 71 * and we expect non-dirty to be the common case. */ 72 if (!*from) { 73 addr += VHOST_LOG_CHUNK; 74 continue; 75 } 76 /* Data must be read atomically. We don't really need barrier semantics 77 * but it's easier to use atomic_* than roll our own. */ 78 log = atomic_xchg(from, 0); 79 while (log) { 80 int bit = ctzl(log); 81 hwaddr page_addr; 82 hwaddr section_offset; 83 hwaddr mr_offset; 84 page_addr = addr + bit * VHOST_LOG_PAGE; 85 section_offset = page_addr - section->offset_within_address_space; 86 mr_offset = section_offset + section->offset_within_region; 87 memory_region_set_dirty(section->mr, mr_offset, VHOST_LOG_PAGE); 88 log &= ~(0x1ull << bit); 89 } 90 addr += VHOST_LOG_CHUNK; 91 } 92 } 93 94 static int vhost_sync_dirty_bitmap(struct vhost_dev *dev, 95 MemoryRegionSection *section, 96 hwaddr first, 97 hwaddr last) 98 { 99 int i; 100 hwaddr start_addr; 101 hwaddr end_addr; 102 103 if (!dev->log_enabled || !dev->started) { 104 return 0; 105 } 106 start_addr = section->offset_within_address_space; 107 end_addr = range_get_last(start_addr, int128_get64(section->size)); 108 start_addr = MAX(first, start_addr); 109 end_addr = MIN(last, end_addr); 110 111 for (i = 0; i < dev->mem->nregions; ++i) { 112 struct vhost_memory_region *reg = dev->mem->regions + i; 113 vhost_dev_sync_region(dev, section, start_addr, end_addr, 114 reg->guest_phys_addr, 115 range_get_last(reg->guest_phys_addr, 116 reg->memory_size)); 117 } 118 for (i = 0; i < dev->nvqs; ++i) { 119 struct vhost_virtqueue *vq = dev->vqs + i; 120 vhost_dev_sync_region(dev, section, start_addr, end_addr, vq->used_phys, 121 range_get_last(vq->used_phys, vq->used_size)); 122 } 123 return 0; 124 } 125 126 static void vhost_log_sync(MemoryListener *listener, 127 MemoryRegionSection *section) 128 { 129 struct vhost_dev *dev = container_of(listener, struct vhost_dev, 130 memory_listener); 131 vhost_sync_dirty_bitmap(dev, section, 0x0, ~0x0ULL); 132 } 133 134 static void vhost_log_sync_range(struct vhost_dev *dev, 135 hwaddr first, hwaddr last) 136 { 137 int i; 138 /* FIXME: this is N^2 in number of sections */ 139 for (i = 0; i < dev->n_mem_sections; ++i) { 140 MemoryRegionSection *section = &dev->mem_sections[i]; 141 vhost_sync_dirty_bitmap(dev, section, first, last); 142 } 143 } 144 145 /* Assign/unassign. Keep an unsorted array of non-overlapping 146 * memory regions in dev->mem. */ 147 static void vhost_dev_unassign_memory(struct vhost_dev *dev, 148 uint64_t start_addr, 149 uint64_t size) 150 { 151 int from, to, n = dev->mem->nregions; 152 /* Track overlapping/split regions for sanity checking. */ 153 int overlap_start = 0, overlap_end = 0, overlap_middle = 0, split = 0; 154 155 for (from = 0, to = 0; from < n; ++from, ++to) { 156 struct vhost_memory_region *reg = dev->mem->regions + to; 157 uint64_t reglast; 158 uint64_t memlast; 159 uint64_t change; 160 161 /* clone old region */ 162 if (to != from) { 163 memcpy(reg, dev->mem->regions + from, sizeof *reg); 164 } 165 166 /* No overlap is simple */ 167 if (!ranges_overlap(reg->guest_phys_addr, reg->memory_size, 168 start_addr, size)) { 169 continue; 170 } 171 172 /* Split only happens if supplied region 173 * is in the middle of an existing one. Thus it can not 174 * overlap with any other existing region. */ 175 assert(!split); 176 177 reglast = range_get_last(reg->guest_phys_addr, reg->memory_size); 178 memlast = range_get_last(start_addr, size); 179 180 /* Remove whole region */ 181 if (start_addr <= reg->guest_phys_addr && memlast >= reglast) { 182 --dev->mem->nregions; 183 --to; 184 ++overlap_middle; 185 continue; 186 } 187 188 /* Shrink region */ 189 if (memlast >= reglast) { 190 reg->memory_size = start_addr - reg->guest_phys_addr; 191 assert(reg->memory_size); 192 assert(!overlap_end); 193 ++overlap_end; 194 continue; 195 } 196 197 /* Shift region */ 198 if (start_addr <= reg->guest_phys_addr) { 199 change = memlast + 1 - reg->guest_phys_addr; 200 reg->memory_size -= change; 201 reg->guest_phys_addr += change; 202 reg->userspace_addr += change; 203 assert(reg->memory_size); 204 assert(!overlap_start); 205 ++overlap_start; 206 continue; 207 } 208 209 /* This only happens if supplied region 210 * is in the middle of an existing one. Thus it can not 211 * overlap with any other existing region. */ 212 assert(!overlap_start); 213 assert(!overlap_end); 214 assert(!overlap_middle); 215 /* Split region: shrink first part, shift second part. */ 216 memcpy(dev->mem->regions + n, reg, sizeof *reg); 217 reg->memory_size = start_addr - reg->guest_phys_addr; 218 assert(reg->memory_size); 219 change = memlast + 1 - reg->guest_phys_addr; 220 reg = dev->mem->regions + n; 221 reg->memory_size -= change; 222 assert(reg->memory_size); 223 reg->guest_phys_addr += change; 224 reg->userspace_addr += change; 225 /* Never add more than 1 region */ 226 assert(dev->mem->nregions == n); 227 ++dev->mem->nregions; 228 ++split; 229 } 230 } 231 232 /* Called after unassign, so no regions overlap the given range. */ 233 static void vhost_dev_assign_memory(struct vhost_dev *dev, 234 uint64_t start_addr, 235 uint64_t size, 236 uint64_t uaddr) 237 { 238 int from, to; 239 struct vhost_memory_region *merged = NULL; 240 for (from = 0, to = 0; from < dev->mem->nregions; ++from, ++to) { 241 struct vhost_memory_region *reg = dev->mem->regions + to; 242 uint64_t prlast, urlast; 243 uint64_t pmlast, umlast; 244 uint64_t s, e, u; 245 246 /* clone old region */ 247 if (to != from) { 248 memcpy(reg, dev->mem->regions + from, sizeof *reg); 249 } 250 prlast = range_get_last(reg->guest_phys_addr, reg->memory_size); 251 pmlast = range_get_last(start_addr, size); 252 urlast = range_get_last(reg->userspace_addr, reg->memory_size); 253 umlast = range_get_last(uaddr, size); 254 255 /* check for overlapping regions: should never happen. */ 256 assert(prlast < start_addr || pmlast < reg->guest_phys_addr); 257 /* Not an adjacent or overlapping region - do not merge. */ 258 if ((prlast + 1 != start_addr || urlast + 1 != uaddr) && 259 (pmlast + 1 != reg->guest_phys_addr || 260 umlast + 1 != reg->userspace_addr)) { 261 continue; 262 } 263 264 if (dev->vhost_ops->vhost_backend_can_merge && 265 !dev->vhost_ops->vhost_backend_can_merge(dev, uaddr, size, 266 reg->userspace_addr, 267 reg->memory_size)) { 268 continue; 269 } 270 271 if (merged) { 272 --to; 273 assert(to >= 0); 274 } else { 275 merged = reg; 276 } 277 u = MIN(uaddr, reg->userspace_addr); 278 s = MIN(start_addr, reg->guest_phys_addr); 279 e = MAX(pmlast, prlast); 280 uaddr = merged->userspace_addr = u; 281 start_addr = merged->guest_phys_addr = s; 282 size = merged->memory_size = e - s + 1; 283 assert(merged->memory_size); 284 } 285 286 if (!merged) { 287 struct vhost_memory_region *reg = dev->mem->regions + to; 288 memset(reg, 0, sizeof *reg); 289 reg->memory_size = size; 290 assert(reg->memory_size); 291 reg->guest_phys_addr = start_addr; 292 reg->userspace_addr = uaddr; 293 ++to; 294 } 295 assert(to <= dev->mem->nregions + 1); 296 dev->mem->nregions = to; 297 } 298 299 static uint64_t vhost_get_log_size(struct vhost_dev *dev) 300 { 301 uint64_t log_size = 0; 302 int i; 303 for (i = 0; i < dev->mem->nregions; ++i) { 304 struct vhost_memory_region *reg = dev->mem->regions + i; 305 uint64_t last = range_get_last(reg->guest_phys_addr, 306 reg->memory_size); 307 log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1); 308 } 309 for (i = 0; i < dev->nvqs; ++i) { 310 struct vhost_virtqueue *vq = dev->vqs + i; 311 uint64_t last = vq->used_phys + vq->used_size - 1; 312 log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1); 313 } 314 return log_size; 315 } 316 317 static struct vhost_log *vhost_log_alloc(uint64_t size, bool share) 318 { 319 struct vhost_log *log; 320 uint64_t logsize = size * sizeof(*(log->log)); 321 int fd = -1; 322 323 log = g_new0(struct vhost_log, 1); 324 if (share) { 325 log->log = qemu_memfd_alloc("vhost-log", logsize, 326 F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL, 327 &fd); 328 memset(log->log, 0, logsize); 329 } else { 330 log->log = g_malloc0(logsize); 331 } 332 333 log->size = size; 334 log->refcnt = 1; 335 log->fd = fd; 336 337 return log; 338 } 339 340 static struct vhost_log *vhost_log_get(uint64_t size, bool share) 341 { 342 struct vhost_log *log = share ? vhost_log_shm : vhost_log; 343 344 if (!log || log->size != size) { 345 log = vhost_log_alloc(size, share); 346 if (share) { 347 vhost_log_shm = log; 348 } else { 349 vhost_log = log; 350 } 351 } else { 352 ++log->refcnt; 353 } 354 355 return log; 356 } 357 358 static void vhost_log_put(struct vhost_dev *dev, bool sync) 359 { 360 struct vhost_log *log = dev->log; 361 362 if (!log) { 363 return; 364 } 365 dev->log = NULL; 366 dev->log_size = 0; 367 368 --log->refcnt; 369 if (log->refcnt == 0) { 370 /* Sync only the range covered by the old log */ 371 if (dev->log_size && sync) { 372 vhost_log_sync_range(dev, 0, dev->log_size * VHOST_LOG_CHUNK - 1); 373 } 374 375 if (vhost_log == log) { 376 g_free(log->log); 377 vhost_log = NULL; 378 } else if (vhost_log_shm == log) { 379 qemu_memfd_free(log->log, log->size * sizeof(*(log->log)), 380 log->fd); 381 vhost_log_shm = NULL; 382 } 383 384 g_free(log); 385 } 386 } 387 388 static bool vhost_dev_log_is_shared(struct vhost_dev *dev) 389 { 390 return dev->vhost_ops->vhost_requires_shm_log && 391 dev->vhost_ops->vhost_requires_shm_log(dev); 392 } 393 394 static inline void vhost_dev_log_resize(struct vhost_dev *dev, uint64_t size) 395 { 396 struct vhost_log *log = vhost_log_get(size, vhost_dev_log_is_shared(dev)); 397 uint64_t log_base = (uintptr_t)log->log; 398 int r; 399 400 /* inform backend of log switching, this must be done before 401 releasing the current log, to ensure no logging is lost */ 402 r = dev->vhost_ops->vhost_set_log_base(dev, log_base, log); 403 assert(r >= 0); 404 vhost_log_put(dev, true); 405 dev->log = log; 406 dev->log_size = size; 407 } 408 409 static int vhost_verify_ring_mappings(struct vhost_dev *dev, 410 uint64_t start_addr, 411 uint64_t size) 412 { 413 int i; 414 int r = 0; 415 416 for (i = 0; !r && i < dev->nvqs; ++i) { 417 struct vhost_virtqueue *vq = dev->vqs + i; 418 hwaddr l; 419 void *p; 420 421 if (!ranges_overlap(start_addr, size, vq->ring_phys, vq->ring_size)) { 422 continue; 423 } 424 l = vq->ring_size; 425 p = cpu_physical_memory_map(vq->ring_phys, &l, 1); 426 if (!p || l != vq->ring_size) { 427 fprintf(stderr, "Unable to map ring buffer for ring %d\n", i); 428 r = -ENOMEM; 429 } 430 if (p != vq->ring) { 431 fprintf(stderr, "Ring buffer relocated for ring %d\n", i); 432 r = -EBUSY; 433 } 434 cpu_physical_memory_unmap(p, l, 0, 0); 435 } 436 return r; 437 } 438 439 static struct vhost_memory_region *vhost_dev_find_reg(struct vhost_dev *dev, 440 uint64_t start_addr, 441 uint64_t size) 442 { 443 int i, n = dev->mem->nregions; 444 for (i = 0; i < n; ++i) { 445 struct vhost_memory_region *reg = dev->mem->regions + i; 446 if (ranges_overlap(reg->guest_phys_addr, reg->memory_size, 447 start_addr, size)) { 448 return reg; 449 } 450 } 451 return NULL; 452 } 453 454 static bool vhost_dev_cmp_memory(struct vhost_dev *dev, 455 uint64_t start_addr, 456 uint64_t size, 457 uint64_t uaddr) 458 { 459 struct vhost_memory_region *reg = vhost_dev_find_reg(dev, start_addr, size); 460 uint64_t reglast; 461 uint64_t memlast; 462 463 if (!reg) { 464 return true; 465 } 466 467 reglast = range_get_last(reg->guest_phys_addr, reg->memory_size); 468 memlast = range_get_last(start_addr, size); 469 470 /* Need to extend region? */ 471 if (start_addr < reg->guest_phys_addr || memlast > reglast) { 472 return true; 473 } 474 /* userspace_addr changed? */ 475 return uaddr != reg->userspace_addr + start_addr - reg->guest_phys_addr; 476 } 477 478 static void vhost_set_memory(MemoryListener *listener, 479 MemoryRegionSection *section, 480 bool add) 481 { 482 struct vhost_dev *dev = container_of(listener, struct vhost_dev, 483 memory_listener); 484 hwaddr start_addr = section->offset_within_address_space; 485 ram_addr_t size = int128_get64(section->size); 486 bool log_dirty = 487 memory_region_get_dirty_log_mask(section->mr) & ~(1 << DIRTY_MEMORY_MIGRATION); 488 int s = offsetof(struct vhost_memory, regions) + 489 (dev->mem->nregions + 1) * sizeof dev->mem->regions[0]; 490 void *ram; 491 492 dev->mem = g_realloc(dev->mem, s); 493 494 if (log_dirty) { 495 add = false; 496 } 497 498 assert(size); 499 500 /* Optimize no-change case. At least cirrus_vga does this a lot at this time. */ 501 ram = memory_region_get_ram_ptr(section->mr) + section->offset_within_region; 502 if (add) { 503 if (!vhost_dev_cmp_memory(dev, start_addr, size, (uintptr_t)ram)) { 504 /* Region exists with same address. Nothing to do. */ 505 return; 506 } 507 } else { 508 if (!vhost_dev_find_reg(dev, start_addr, size)) { 509 /* Removing region that we don't access. Nothing to do. */ 510 return; 511 } 512 } 513 514 vhost_dev_unassign_memory(dev, start_addr, size); 515 if (add) { 516 /* Add given mapping, merging adjacent regions if any */ 517 vhost_dev_assign_memory(dev, start_addr, size, (uintptr_t)ram); 518 } else { 519 /* Remove old mapping for this memory, if any. */ 520 vhost_dev_unassign_memory(dev, start_addr, size); 521 } 522 dev->mem_changed_start_addr = MIN(dev->mem_changed_start_addr, start_addr); 523 dev->mem_changed_end_addr = MAX(dev->mem_changed_end_addr, start_addr + size - 1); 524 dev->memory_changed = true; 525 used_memslots = dev->mem->nregions; 526 } 527 528 static bool vhost_section(MemoryRegionSection *section) 529 { 530 return memory_region_is_ram(section->mr); 531 } 532 533 static void vhost_begin(MemoryListener *listener) 534 { 535 struct vhost_dev *dev = container_of(listener, struct vhost_dev, 536 memory_listener); 537 dev->mem_changed_end_addr = 0; 538 dev->mem_changed_start_addr = -1; 539 } 540 541 static void vhost_commit(MemoryListener *listener) 542 { 543 struct vhost_dev *dev = container_of(listener, struct vhost_dev, 544 memory_listener); 545 hwaddr start_addr = 0; 546 ram_addr_t size = 0; 547 uint64_t log_size; 548 int r; 549 550 if (!dev->memory_changed) { 551 return; 552 } 553 if (!dev->started) { 554 return; 555 } 556 if (dev->mem_changed_start_addr > dev->mem_changed_end_addr) { 557 return; 558 } 559 560 if (dev->started) { 561 start_addr = dev->mem_changed_start_addr; 562 size = dev->mem_changed_end_addr - dev->mem_changed_start_addr + 1; 563 564 r = vhost_verify_ring_mappings(dev, start_addr, size); 565 assert(r >= 0); 566 } 567 568 if (!dev->log_enabled) { 569 r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem); 570 assert(r >= 0); 571 dev->memory_changed = false; 572 return; 573 } 574 log_size = vhost_get_log_size(dev); 575 /* We allocate an extra 4K bytes to log, 576 * to reduce the * number of reallocations. */ 577 #define VHOST_LOG_BUFFER (0x1000 / sizeof *dev->log) 578 /* To log more, must increase log size before table update. */ 579 if (dev->log_size < log_size) { 580 vhost_dev_log_resize(dev, log_size + VHOST_LOG_BUFFER); 581 } 582 r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem); 583 assert(r >= 0); 584 /* To log less, can only decrease log size after table update. */ 585 if (dev->log_size > log_size + VHOST_LOG_BUFFER) { 586 vhost_dev_log_resize(dev, log_size); 587 } 588 dev->memory_changed = false; 589 } 590 591 static void vhost_region_add(MemoryListener *listener, 592 MemoryRegionSection *section) 593 { 594 struct vhost_dev *dev = container_of(listener, struct vhost_dev, 595 memory_listener); 596 597 if (!vhost_section(section)) { 598 return; 599 } 600 601 ++dev->n_mem_sections; 602 dev->mem_sections = g_renew(MemoryRegionSection, dev->mem_sections, 603 dev->n_mem_sections); 604 dev->mem_sections[dev->n_mem_sections - 1] = *section; 605 memory_region_ref(section->mr); 606 vhost_set_memory(listener, section, true); 607 } 608 609 static void vhost_region_del(MemoryListener *listener, 610 MemoryRegionSection *section) 611 { 612 struct vhost_dev *dev = container_of(listener, struct vhost_dev, 613 memory_listener); 614 int i; 615 616 if (!vhost_section(section)) { 617 return; 618 } 619 620 vhost_set_memory(listener, section, false); 621 memory_region_unref(section->mr); 622 for (i = 0; i < dev->n_mem_sections; ++i) { 623 if (dev->mem_sections[i].offset_within_address_space 624 == section->offset_within_address_space) { 625 --dev->n_mem_sections; 626 memmove(&dev->mem_sections[i], &dev->mem_sections[i+1], 627 (dev->n_mem_sections - i) * sizeof(*dev->mem_sections)); 628 break; 629 } 630 } 631 } 632 633 static void vhost_region_nop(MemoryListener *listener, 634 MemoryRegionSection *section) 635 { 636 } 637 638 static int vhost_virtqueue_set_addr(struct vhost_dev *dev, 639 struct vhost_virtqueue *vq, 640 unsigned idx, bool enable_log) 641 { 642 struct vhost_vring_addr addr = { 643 .index = idx, 644 .desc_user_addr = (uint64_t)(unsigned long)vq->desc, 645 .avail_user_addr = (uint64_t)(unsigned long)vq->avail, 646 .used_user_addr = (uint64_t)(unsigned long)vq->used, 647 .log_guest_addr = vq->used_phys, 648 .flags = enable_log ? (1 << VHOST_VRING_F_LOG) : 0, 649 }; 650 int r = dev->vhost_ops->vhost_set_vring_addr(dev, &addr); 651 if (r < 0) { 652 return -errno; 653 } 654 return 0; 655 } 656 657 static int vhost_dev_set_features(struct vhost_dev *dev, bool enable_log) 658 { 659 uint64_t features = dev->acked_features; 660 int r; 661 if (enable_log) { 662 features |= 0x1ULL << VHOST_F_LOG_ALL; 663 } 664 r = dev->vhost_ops->vhost_set_features(dev, features); 665 return r < 0 ? -errno : 0; 666 } 667 668 static int vhost_dev_set_log(struct vhost_dev *dev, bool enable_log) 669 { 670 int r, t, i, idx; 671 r = vhost_dev_set_features(dev, enable_log); 672 if (r < 0) { 673 goto err_features; 674 } 675 for (i = 0; i < dev->nvqs; ++i) { 676 idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i); 677 r = vhost_virtqueue_set_addr(dev, dev->vqs + i, idx, 678 enable_log); 679 if (r < 0) { 680 goto err_vq; 681 } 682 } 683 return 0; 684 err_vq: 685 for (; i >= 0; --i) { 686 idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i); 687 t = vhost_virtqueue_set_addr(dev, dev->vqs + i, idx, 688 dev->log_enabled); 689 assert(t >= 0); 690 } 691 t = vhost_dev_set_features(dev, dev->log_enabled); 692 assert(t >= 0); 693 err_features: 694 return r; 695 } 696 697 static int vhost_migration_log(MemoryListener *listener, int enable) 698 { 699 struct vhost_dev *dev = container_of(listener, struct vhost_dev, 700 memory_listener); 701 int r; 702 if (!!enable == dev->log_enabled) { 703 return 0; 704 } 705 if (!dev->started) { 706 dev->log_enabled = enable; 707 return 0; 708 } 709 if (!enable) { 710 r = vhost_dev_set_log(dev, false); 711 if (r < 0) { 712 return r; 713 } 714 vhost_log_put(dev, false); 715 } else { 716 vhost_dev_log_resize(dev, vhost_get_log_size(dev)); 717 r = vhost_dev_set_log(dev, true); 718 if (r < 0) { 719 return r; 720 } 721 } 722 dev->log_enabled = enable; 723 return 0; 724 } 725 726 static void vhost_log_global_start(MemoryListener *listener) 727 { 728 int r; 729 730 r = vhost_migration_log(listener, true); 731 if (r < 0) { 732 abort(); 733 } 734 } 735 736 static void vhost_log_global_stop(MemoryListener *listener) 737 { 738 int r; 739 740 r = vhost_migration_log(listener, false); 741 if (r < 0) { 742 abort(); 743 } 744 } 745 746 static void vhost_log_start(MemoryListener *listener, 747 MemoryRegionSection *section, 748 int old, int new) 749 { 750 /* FIXME: implement */ 751 } 752 753 static void vhost_log_stop(MemoryListener *listener, 754 MemoryRegionSection *section, 755 int old, int new) 756 { 757 /* FIXME: implement */ 758 } 759 760 /* The vhost driver natively knows how to handle the vrings of non 761 * cross-endian legacy devices and modern devices. Only legacy devices 762 * exposed to a bi-endian guest may require the vhost driver to use a 763 * specific endianness. 764 */ 765 static inline bool vhost_needs_vring_endian(VirtIODevice *vdev) 766 { 767 if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) { 768 return false; 769 } 770 #ifdef HOST_WORDS_BIGENDIAN 771 return vdev->device_endian == VIRTIO_DEVICE_ENDIAN_LITTLE; 772 #else 773 return vdev->device_endian == VIRTIO_DEVICE_ENDIAN_BIG; 774 #endif 775 } 776 777 static int vhost_virtqueue_set_vring_endian_legacy(struct vhost_dev *dev, 778 bool is_big_endian, 779 int vhost_vq_index) 780 { 781 struct vhost_vring_state s = { 782 .index = vhost_vq_index, 783 .num = is_big_endian 784 }; 785 786 if (!dev->vhost_ops->vhost_set_vring_endian(dev, &s)) { 787 return 0; 788 } 789 790 if (errno == ENOTTY) { 791 error_report("vhost does not support cross-endian"); 792 return -ENOSYS; 793 } 794 795 return -errno; 796 } 797 798 static int vhost_virtqueue_start(struct vhost_dev *dev, 799 struct VirtIODevice *vdev, 800 struct vhost_virtqueue *vq, 801 unsigned idx) 802 { 803 hwaddr s, l, a; 804 int r; 805 int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx); 806 struct vhost_vring_file file = { 807 .index = vhost_vq_index 808 }; 809 struct vhost_vring_state state = { 810 .index = vhost_vq_index 811 }; 812 struct VirtQueue *vvq = virtio_get_queue(vdev, idx); 813 814 815 vq->num = state.num = virtio_queue_get_num(vdev, idx); 816 r = dev->vhost_ops->vhost_set_vring_num(dev, &state); 817 if (r) { 818 return -errno; 819 } 820 821 state.num = virtio_queue_get_last_avail_idx(vdev, idx); 822 r = dev->vhost_ops->vhost_set_vring_base(dev, &state); 823 if (r) { 824 return -errno; 825 } 826 827 if (vhost_needs_vring_endian(vdev)) { 828 r = vhost_virtqueue_set_vring_endian_legacy(dev, 829 virtio_is_big_endian(vdev), 830 vhost_vq_index); 831 if (r) { 832 return -errno; 833 } 834 } 835 836 s = l = virtio_queue_get_desc_size(vdev, idx); 837 a = virtio_queue_get_desc_addr(vdev, idx); 838 vq->desc = cpu_physical_memory_map(a, &l, 0); 839 if (!vq->desc || l != s) { 840 r = -ENOMEM; 841 goto fail_alloc_desc; 842 } 843 s = l = virtio_queue_get_avail_size(vdev, idx); 844 a = virtio_queue_get_avail_addr(vdev, idx); 845 vq->avail = cpu_physical_memory_map(a, &l, 0); 846 if (!vq->avail || l != s) { 847 r = -ENOMEM; 848 goto fail_alloc_avail; 849 } 850 vq->used_size = s = l = virtio_queue_get_used_size(vdev, idx); 851 vq->used_phys = a = virtio_queue_get_used_addr(vdev, idx); 852 vq->used = cpu_physical_memory_map(a, &l, 1); 853 if (!vq->used || l != s) { 854 r = -ENOMEM; 855 goto fail_alloc_used; 856 } 857 858 vq->ring_size = s = l = virtio_queue_get_ring_size(vdev, idx); 859 vq->ring_phys = a = virtio_queue_get_ring_addr(vdev, idx); 860 vq->ring = cpu_physical_memory_map(a, &l, 1); 861 if (!vq->ring || l != s) { 862 r = -ENOMEM; 863 goto fail_alloc_ring; 864 } 865 866 r = vhost_virtqueue_set_addr(dev, vq, vhost_vq_index, dev->log_enabled); 867 if (r < 0) { 868 r = -errno; 869 goto fail_alloc; 870 } 871 872 file.fd = event_notifier_get_fd(virtio_queue_get_host_notifier(vvq)); 873 r = dev->vhost_ops->vhost_set_vring_kick(dev, &file); 874 if (r) { 875 r = -errno; 876 goto fail_kick; 877 } 878 879 /* Clear and discard previous events if any. */ 880 event_notifier_test_and_clear(&vq->masked_notifier); 881 882 /* Init vring in unmasked state, unless guest_notifier_mask 883 * will do it later. 884 */ 885 if (!vdev->use_guest_notifier_mask) { 886 /* TODO: check and handle errors. */ 887 vhost_virtqueue_mask(dev, vdev, idx, false); 888 } 889 890 return 0; 891 892 fail_kick: 893 fail_alloc: 894 cpu_physical_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, idx), 895 0, 0); 896 fail_alloc_ring: 897 cpu_physical_memory_unmap(vq->used, virtio_queue_get_used_size(vdev, idx), 898 0, 0); 899 fail_alloc_used: 900 cpu_physical_memory_unmap(vq->avail, virtio_queue_get_avail_size(vdev, idx), 901 0, 0); 902 fail_alloc_avail: 903 cpu_physical_memory_unmap(vq->desc, virtio_queue_get_desc_size(vdev, idx), 904 0, 0); 905 fail_alloc_desc: 906 return r; 907 } 908 909 static void vhost_virtqueue_stop(struct vhost_dev *dev, 910 struct VirtIODevice *vdev, 911 struct vhost_virtqueue *vq, 912 unsigned idx) 913 { 914 int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx); 915 struct vhost_vring_state state = { 916 .index = vhost_vq_index, 917 }; 918 int r; 919 920 r = dev->vhost_ops->vhost_get_vring_base(dev, &state); 921 if (r < 0) { 922 fprintf(stderr, "vhost VQ %d ring restore failed: %d\n", idx, r); 923 fflush(stderr); 924 } 925 virtio_queue_set_last_avail_idx(vdev, idx, state.num); 926 virtio_queue_invalidate_signalled_used(vdev, idx); 927 928 /* In the cross-endian case, we need to reset the vring endianness to 929 * native as legacy devices expect so by default. 930 */ 931 if (vhost_needs_vring_endian(vdev)) { 932 r = vhost_virtqueue_set_vring_endian_legacy(dev, 933 !virtio_is_big_endian(vdev), 934 vhost_vq_index); 935 if (r < 0) { 936 error_report("failed to reset vring endianness"); 937 } 938 } 939 940 assert (r >= 0); 941 cpu_physical_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, idx), 942 0, virtio_queue_get_ring_size(vdev, idx)); 943 cpu_physical_memory_unmap(vq->used, virtio_queue_get_used_size(vdev, idx), 944 1, virtio_queue_get_used_size(vdev, idx)); 945 cpu_physical_memory_unmap(vq->avail, virtio_queue_get_avail_size(vdev, idx), 946 0, virtio_queue_get_avail_size(vdev, idx)); 947 cpu_physical_memory_unmap(vq->desc, virtio_queue_get_desc_size(vdev, idx), 948 0, virtio_queue_get_desc_size(vdev, idx)); 949 } 950 951 static void vhost_eventfd_add(MemoryListener *listener, 952 MemoryRegionSection *section, 953 bool match_data, uint64_t data, EventNotifier *e) 954 { 955 } 956 957 static void vhost_eventfd_del(MemoryListener *listener, 958 MemoryRegionSection *section, 959 bool match_data, uint64_t data, EventNotifier *e) 960 { 961 } 962 963 static int vhost_virtqueue_set_busyloop_timeout(struct vhost_dev *dev, 964 int n, uint32_t timeout) 965 { 966 int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, n); 967 struct vhost_vring_state state = { 968 .index = vhost_vq_index, 969 .num = timeout, 970 }; 971 int r; 972 973 if (!dev->vhost_ops->vhost_set_vring_busyloop_timeout) { 974 return -EINVAL; 975 } 976 977 r = dev->vhost_ops->vhost_set_vring_busyloop_timeout(dev, &state); 978 if (r) { 979 return r; 980 } 981 982 return 0; 983 } 984 985 static int vhost_virtqueue_init(struct vhost_dev *dev, 986 struct vhost_virtqueue *vq, int n) 987 { 988 int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, n); 989 struct vhost_vring_file file = { 990 .index = vhost_vq_index, 991 }; 992 int r = event_notifier_init(&vq->masked_notifier, 0); 993 if (r < 0) { 994 return r; 995 } 996 997 file.fd = event_notifier_get_fd(&vq->masked_notifier); 998 r = dev->vhost_ops->vhost_set_vring_call(dev, &file); 999 if (r) { 1000 r = -errno; 1001 goto fail_call; 1002 } 1003 return 0; 1004 fail_call: 1005 event_notifier_cleanup(&vq->masked_notifier); 1006 return r; 1007 } 1008 1009 static void vhost_virtqueue_cleanup(struct vhost_virtqueue *vq) 1010 { 1011 event_notifier_cleanup(&vq->masked_notifier); 1012 } 1013 1014 int vhost_dev_init(struct vhost_dev *hdev, void *opaque, 1015 VhostBackendType backend_type, uint32_t busyloop_timeout) 1016 { 1017 uint64_t features; 1018 int i, r; 1019 1020 hdev->migration_blocker = NULL; 1021 1022 r = vhost_set_backend_type(hdev, backend_type); 1023 assert(r >= 0); 1024 1025 r = hdev->vhost_ops->vhost_backend_init(hdev, opaque); 1026 if (r < 0) { 1027 goto fail; 1028 } 1029 1030 if (used_memslots > hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) { 1031 fprintf(stderr, "vhost backend memory slots limit is less" 1032 " than current number of present memory slots\n"); 1033 r = -1; 1034 goto fail; 1035 } 1036 1037 r = hdev->vhost_ops->vhost_set_owner(hdev); 1038 if (r < 0) { 1039 goto fail; 1040 } 1041 1042 r = hdev->vhost_ops->vhost_get_features(hdev, &features); 1043 if (r < 0) { 1044 goto fail; 1045 } 1046 1047 for (i = 0; i < hdev->nvqs; ++i) { 1048 r = vhost_virtqueue_init(hdev, hdev->vqs + i, hdev->vq_index + i); 1049 if (r < 0) { 1050 goto fail_vq; 1051 } 1052 } 1053 1054 if (busyloop_timeout) { 1055 for (i = 0; i < hdev->nvqs; ++i) { 1056 r = vhost_virtqueue_set_busyloop_timeout(hdev, hdev->vq_index + i, 1057 busyloop_timeout); 1058 if (r < 0) { 1059 goto fail_busyloop; 1060 } 1061 } 1062 } 1063 1064 hdev->features = features; 1065 1066 hdev->memory_listener = (MemoryListener) { 1067 .begin = vhost_begin, 1068 .commit = vhost_commit, 1069 .region_add = vhost_region_add, 1070 .region_del = vhost_region_del, 1071 .region_nop = vhost_region_nop, 1072 .log_start = vhost_log_start, 1073 .log_stop = vhost_log_stop, 1074 .log_sync = vhost_log_sync, 1075 .log_global_start = vhost_log_global_start, 1076 .log_global_stop = vhost_log_global_stop, 1077 .eventfd_add = vhost_eventfd_add, 1078 .eventfd_del = vhost_eventfd_del, 1079 .priority = 10 1080 }; 1081 1082 if (hdev->migration_blocker == NULL) { 1083 if (!(hdev->features & (0x1ULL << VHOST_F_LOG_ALL))) { 1084 error_setg(&hdev->migration_blocker, 1085 "Migration disabled: vhost lacks VHOST_F_LOG_ALL feature."); 1086 } else if (!qemu_memfd_check()) { 1087 error_setg(&hdev->migration_blocker, 1088 "Migration disabled: failed to allocate shared memory"); 1089 } 1090 } 1091 1092 if (hdev->migration_blocker != NULL) { 1093 migrate_add_blocker(hdev->migration_blocker); 1094 } 1095 1096 hdev->mem = g_malloc0(offsetof(struct vhost_memory, regions)); 1097 hdev->n_mem_sections = 0; 1098 hdev->mem_sections = NULL; 1099 hdev->log = NULL; 1100 hdev->log_size = 0; 1101 hdev->log_enabled = false; 1102 hdev->started = false; 1103 hdev->memory_changed = false; 1104 memory_listener_register(&hdev->memory_listener, &address_space_memory); 1105 QLIST_INSERT_HEAD(&vhost_devices, hdev, entry); 1106 return 0; 1107 fail_busyloop: 1108 while (--i >= 0) { 1109 vhost_virtqueue_set_busyloop_timeout(hdev, hdev->vq_index + i, 0); 1110 } 1111 i = hdev->nvqs; 1112 fail_vq: 1113 while (--i >= 0) { 1114 vhost_virtqueue_cleanup(hdev->vqs + i); 1115 } 1116 fail: 1117 r = -errno; 1118 hdev->vhost_ops->vhost_backend_cleanup(hdev); 1119 QLIST_REMOVE(hdev, entry); 1120 return r; 1121 } 1122 1123 void vhost_dev_cleanup(struct vhost_dev *hdev) 1124 { 1125 int i; 1126 for (i = 0; i < hdev->nvqs; ++i) { 1127 vhost_virtqueue_cleanup(hdev->vqs + i); 1128 } 1129 if (hdev->mem) { 1130 /* those are only safe after successful init */ 1131 memory_listener_unregister(&hdev->memory_listener); 1132 QLIST_REMOVE(hdev, entry); 1133 } 1134 if (hdev->migration_blocker) { 1135 migrate_del_blocker(hdev->migration_blocker); 1136 error_free(hdev->migration_blocker); 1137 } 1138 g_free(hdev->mem); 1139 g_free(hdev->mem_sections); 1140 hdev->vhost_ops->vhost_backend_cleanup(hdev); 1141 assert(!hdev->log); 1142 } 1143 1144 /* Stop processing guest IO notifications in qemu. 1145 * Start processing them in vhost in kernel. 1146 */ 1147 int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev) 1148 { 1149 BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev))); 1150 VirtioBusState *vbus = VIRTIO_BUS(qbus); 1151 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus); 1152 int i, r, e; 1153 if (!k->ioeventfd_started) { 1154 fprintf(stderr, "binding does not support host notifiers\n"); 1155 r = -ENOSYS; 1156 goto fail; 1157 } 1158 1159 for (i = 0; i < hdev->nvqs; ++i) { 1160 r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i, 1161 true); 1162 if (r < 0) { 1163 fprintf(stderr, "vhost VQ %d notifier binding failed: %d\n", i, -r); 1164 goto fail_vq; 1165 } 1166 } 1167 1168 return 0; 1169 fail_vq: 1170 while (--i >= 0) { 1171 e = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i, 1172 false); 1173 if (e < 0) { 1174 fprintf(stderr, "vhost VQ %d notifier cleanup error: %d\n", i, -r); 1175 fflush(stderr); 1176 } 1177 assert (e >= 0); 1178 } 1179 fail: 1180 return r; 1181 } 1182 1183 /* Stop processing guest IO notifications in vhost. 1184 * Start processing them in qemu. 1185 * This might actually run the qemu handlers right away, 1186 * so virtio in qemu must be completely setup when this is called. 1187 */ 1188 void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev) 1189 { 1190 BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev))); 1191 int i, r; 1192 1193 for (i = 0; i < hdev->nvqs; ++i) { 1194 r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i, 1195 false); 1196 if (r < 0) { 1197 fprintf(stderr, "vhost VQ %d notifier cleanup failed: %d\n", i, -r); 1198 fflush(stderr); 1199 } 1200 assert (r >= 0); 1201 } 1202 } 1203 1204 /* Test and clear event pending status. 1205 * Should be called after unmask to avoid losing events. 1206 */ 1207 bool vhost_virtqueue_pending(struct vhost_dev *hdev, int n) 1208 { 1209 struct vhost_virtqueue *vq = hdev->vqs + n - hdev->vq_index; 1210 assert(n >= hdev->vq_index && n < hdev->vq_index + hdev->nvqs); 1211 return event_notifier_test_and_clear(&vq->masked_notifier); 1212 } 1213 1214 /* Mask/unmask events from this vq. */ 1215 void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n, 1216 bool mask) 1217 { 1218 struct VirtQueue *vvq = virtio_get_queue(vdev, n); 1219 int r, index = n - hdev->vq_index; 1220 struct vhost_vring_file file; 1221 1222 if (mask) { 1223 assert(vdev->use_guest_notifier_mask); 1224 file.fd = event_notifier_get_fd(&hdev->vqs[index].masked_notifier); 1225 } else { 1226 file.fd = event_notifier_get_fd(virtio_queue_get_guest_notifier(vvq)); 1227 } 1228 1229 file.index = hdev->vhost_ops->vhost_get_vq_index(hdev, n); 1230 r = hdev->vhost_ops->vhost_set_vring_call(hdev, &file); 1231 assert(r >= 0); 1232 } 1233 1234 uint64_t vhost_get_features(struct vhost_dev *hdev, const int *feature_bits, 1235 uint64_t features) 1236 { 1237 const int *bit = feature_bits; 1238 while (*bit != VHOST_INVALID_FEATURE_BIT) { 1239 uint64_t bit_mask = (1ULL << *bit); 1240 if (!(hdev->features & bit_mask)) { 1241 features &= ~bit_mask; 1242 } 1243 bit++; 1244 } 1245 return features; 1246 } 1247 1248 void vhost_ack_features(struct vhost_dev *hdev, const int *feature_bits, 1249 uint64_t features) 1250 { 1251 const int *bit = feature_bits; 1252 while (*bit != VHOST_INVALID_FEATURE_BIT) { 1253 uint64_t bit_mask = (1ULL << *bit); 1254 if (features & bit_mask) { 1255 hdev->acked_features |= bit_mask; 1256 } 1257 bit++; 1258 } 1259 } 1260 1261 /* Host notifiers must be enabled at this point. */ 1262 int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev) 1263 { 1264 int i, r; 1265 1266 hdev->started = true; 1267 1268 r = vhost_dev_set_features(hdev, hdev->log_enabled); 1269 if (r < 0) { 1270 goto fail_features; 1271 } 1272 r = hdev->vhost_ops->vhost_set_mem_table(hdev, hdev->mem); 1273 if (r < 0) { 1274 r = -errno; 1275 goto fail_mem; 1276 } 1277 for (i = 0; i < hdev->nvqs; ++i) { 1278 r = vhost_virtqueue_start(hdev, 1279 vdev, 1280 hdev->vqs + i, 1281 hdev->vq_index + i); 1282 if (r < 0) { 1283 goto fail_vq; 1284 } 1285 } 1286 1287 if (hdev->log_enabled) { 1288 uint64_t log_base; 1289 1290 hdev->log_size = vhost_get_log_size(hdev); 1291 hdev->log = vhost_log_get(hdev->log_size, 1292 vhost_dev_log_is_shared(hdev)); 1293 log_base = (uintptr_t)hdev->log->log; 1294 r = hdev->vhost_ops->vhost_set_log_base(hdev, 1295 hdev->log_size ? log_base : 0, 1296 hdev->log); 1297 if (r < 0) { 1298 r = -errno; 1299 goto fail_log; 1300 } 1301 } 1302 1303 return 0; 1304 fail_log: 1305 vhost_log_put(hdev, false); 1306 fail_vq: 1307 while (--i >= 0) { 1308 vhost_virtqueue_stop(hdev, 1309 vdev, 1310 hdev->vqs + i, 1311 hdev->vq_index + i); 1312 } 1313 i = hdev->nvqs; 1314 fail_mem: 1315 fail_features: 1316 1317 hdev->started = false; 1318 return r; 1319 } 1320 1321 /* Host notifiers must be enabled at this point. */ 1322 void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev) 1323 { 1324 int i; 1325 1326 for (i = 0; i < hdev->nvqs; ++i) { 1327 vhost_virtqueue_stop(hdev, 1328 vdev, 1329 hdev->vqs + i, 1330 hdev->vq_index + i); 1331 } 1332 1333 vhost_log_put(hdev, true); 1334 hdev->started = false; 1335 } 1336