1 /* 2 * vhost shadow virtqueue 3 * 4 * SPDX-FileCopyrightText: Red Hat, Inc. 2021 5 * SPDX-FileContributor: Author: Eugenio Pérez <eperezma@redhat.com> 6 * 7 * SPDX-License-Identifier: GPL-2.0-or-later 8 */ 9 10 #include "qemu/osdep.h" 11 #include "hw/virtio/vhost-shadow-virtqueue.h" 12 13 #include "qemu/error-report.h" 14 #include "qapi/error.h" 15 #include "qemu/main-loop.h" 16 #include "qemu/log.h" 17 #include "qemu/memalign.h" 18 #include "linux-headers/linux/vhost.h" 19 20 /** 21 * Validate the transport device features that both guests can use with the SVQ 22 * and SVQs can use with the device. 23 * 24 * @dev_features: The features 25 * @errp: Error pointer 26 */ 27 bool vhost_svq_valid_features(uint64_t features, Error **errp) 28 { 29 bool ok = true; 30 uint64_t svq_features = features; 31 32 for (uint64_t b = VIRTIO_TRANSPORT_F_START; b <= VIRTIO_TRANSPORT_F_END; 33 ++b) { 34 switch (b) { 35 case VIRTIO_F_ANY_LAYOUT: 36 continue; 37 38 case VIRTIO_F_ACCESS_PLATFORM: 39 /* SVQ trust in the host's IOMMU to translate addresses */ 40 case VIRTIO_F_VERSION_1: 41 /* SVQ trust that the guest vring is little endian */ 42 if (!(svq_features & BIT_ULL(b))) { 43 svq_features |= BIT_ULL(b); 44 ok = false; 45 } 46 continue; 47 48 default: 49 if (svq_features & BIT_ULL(b)) { 50 svq_features &= ~BIT_ULL(b); 51 ok = false; 52 } 53 } 54 } 55 56 if (!ok) { 57 error_setg(errp, "SVQ Invalid device feature flags, offer: 0x%"PRIx64 58 ", ok: 0x%"PRIx64, features, svq_features); 59 } 60 return ok; 61 } 62 63 /** 64 * Number of descriptors that the SVQ can make available from the guest. 65 * 66 * @svq: The svq 67 */ 68 static uint16_t vhost_svq_available_slots(const VhostShadowVirtqueue *svq) 69 { 70 return svq->vring.num - (svq->shadow_avail_idx - svq->shadow_used_idx); 71 } 72 73 /** 74 * Translate addresses between the qemu's virtual address and the SVQ IOVA 75 * 76 * @svq: Shadow VirtQueue 77 * @vaddr: Translated IOVA addresses 78 * @iovec: Source qemu's VA addresses 79 * @num: Length of iovec and minimum length of vaddr 80 */ 81 static bool vhost_svq_translate_addr(const VhostShadowVirtqueue *svq, 82 hwaddr *addrs, const struct iovec *iovec, 83 size_t num) 84 { 85 if (num == 0) { 86 return true; 87 } 88 89 for (size_t i = 0; i < num; ++i) { 90 DMAMap needle = { 91 .translated_addr = (hwaddr)(uintptr_t)iovec[i].iov_base, 92 .size = iovec[i].iov_len, 93 }; 94 Int128 needle_last, map_last; 95 size_t off; 96 97 const DMAMap *map = vhost_iova_tree_find_iova(svq->iova_tree, &needle); 98 /* 99 * Map cannot be NULL since iova map contains all guest space and 100 * qemu already has a physical address mapped 101 */ 102 if (unlikely(!map)) { 103 qemu_log_mask(LOG_GUEST_ERROR, 104 "Invalid address 0x%"HWADDR_PRIx" given by guest", 105 needle.translated_addr); 106 return false; 107 } 108 109 off = needle.translated_addr - map->translated_addr; 110 addrs[i] = map->iova + off; 111 112 needle_last = int128_add(int128_make64(needle.translated_addr), 113 int128_make64(iovec[i].iov_len)); 114 map_last = int128_make64(map->translated_addr + map->size); 115 if (unlikely(int128_gt(needle_last, map_last))) { 116 qemu_log_mask(LOG_GUEST_ERROR, 117 "Guest buffer expands over iova range"); 118 return false; 119 } 120 } 121 122 return true; 123 } 124 125 static void vhost_vring_write_descs(VhostShadowVirtqueue *svq, hwaddr *sg, 126 const struct iovec *iovec, size_t num, 127 bool more_descs, bool write) 128 { 129 uint16_t i = svq->free_head, last = svq->free_head; 130 unsigned n; 131 uint16_t flags = write ? cpu_to_le16(VRING_DESC_F_WRITE) : 0; 132 vring_desc_t *descs = svq->vring.desc; 133 134 if (num == 0) { 135 return; 136 } 137 138 for (n = 0; n < num; n++) { 139 if (more_descs || (n + 1 < num)) { 140 descs[i].flags = flags | cpu_to_le16(VRING_DESC_F_NEXT); 141 descs[i].next = cpu_to_le16(svq->desc_next[i]); 142 } else { 143 descs[i].flags = flags; 144 } 145 descs[i].addr = cpu_to_le64(sg[n]); 146 descs[i].len = cpu_to_le32(iovec[n].iov_len); 147 148 last = i; 149 i = cpu_to_le16(svq->desc_next[i]); 150 } 151 152 svq->free_head = le16_to_cpu(svq->desc_next[last]); 153 } 154 155 static bool vhost_svq_add_split(VhostShadowVirtqueue *svq, 156 VirtQueueElement *elem, unsigned *head) 157 { 158 unsigned avail_idx; 159 vring_avail_t *avail = svq->vring.avail; 160 bool ok; 161 g_autofree hwaddr *sgs = g_new(hwaddr, MAX(elem->out_num, elem->in_num)); 162 163 *head = svq->free_head; 164 165 /* We need some descriptors here */ 166 if (unlikely(!elem->out_num && !elem->in_num)) { 167 qemu_log_mask(LOG_GUEST_ERROR, 168 "Guest provided element with no descriptors"); 169 return false; 170 } 171 172 ok = vhost_svq_translate_addr(svq, sgs, elem->out_sg, elem->out_num); 173 if (unlikely(!ok)) { 174 return false; 175 } 176 vhost_vring_write_descs(svq, sgs, elem->out_sg, elem->out_num, 177 elem->in_num > 0, false); 178 179 180 ok = vhost_svq_translate_addr(svq, sgs, elem->in_sg, elem->in_num); 181 if (unlikely(!ok)) { 182 return false; 183 } 184 185 vhost_vring_write_descs(svq, sgs, elem->in_sg, elem->in_num, false, true); 186 187 /* 188 * Put the entry in the available array (but don't update avail->idx until 189 * they do sync). 190 */ 191 avail_idx = svq->shadow_avail_idx & (svq->vring.num - 1); 192 avail->ring[avail_idx] = cpu_to_le16(*head); 193 svq->shadow_avail_idx++; 194 195 /* Update the avail index after write the descriptor */ 196 smp_wmb(); 197 avail->idx = cpu_to_le16(svq->shadow_avail_idx); 198 199 return true; 200 } 201 202 static bool vhost_svq_add(VhostShadowVirtqueue *svq, VirtQueueElement *elem) 203 { 204 unsigned qemu_head; 205 bool ok = vhost_svq_add_split(svq, elem, &qemu_head); 206 if (unlikely(!ok)) { 207 return false; 208 } 209 210 svq->ring_id_maps[qemu_head] = elem; 211 return true; 212 } 213 214 static void vhost_svq_kick(VhostShadowVirtqueue *svq) 215 { 216 /* 217 * We need to expose the available array entries before checking the used 218 * flags 219 */ 220 smp_mb(); 221 if (svq->vring.used->flags & VRING_USED_F_NO_NOTIFY) { 222 return; 223 } 224 225 event_notifier_set(&svq->hdev_kick); 226 } 227 228 /** 229 * Forward available buffers. 230 * 231 * @svq: Shadow VirtQueue 232 * 233 * Note that this function does not guarantee that all guest's available 234 * buffers are available to the device in SVQ avail ring. The guest may have 235 * exposed a GPA / GIOVA contiguous buffer, but it may not be contiguous in 236 * qemu vaddr. 237 * 238 * If that happens, guest's kick notifications will be disabled until the 239 * device uses some buffers. 240 */ 241 static void vhost_handle_guest_kick(VhostShadowVirtqueue *svq) 242 { 243 /* Clear event notifier */ 244 event_notifier_test_and_clear(&svq->svq_kick); 245 246 /* Forward to the device as many available buffers as possible */ 247 do { 248 virtio_queue_set_notification(svq->vq, false); 249 250 while (true) { 251 VirtQueueElement *elem; 252 bool ok; 253 254 if (svq->next_guest_avail_elem) { 255 elem = g_steal_pointer(&svq->next_guest_avail_elem); 256 } else { 257 elem = virtqueue_pop(svq->vq, sizeof(*elem)); 258 } 259 260 if (!elem) { 261 break; 262 } 263 264 if (elem->out_num + elem->in_num > vhost_svq_available_slots(svq)) { 265 /* 266 * This condition is possible since a contiguous buffer in GPA 267 * does not imply a contiguous buffer in qemu's VA 268 * scatter-gather segments. If that happens, the buffer exposed 269 * to the device needs to be a chain of descriptors at this 270 * moment. 271 * 272 * SVQ cannot hold more available buffers if we are here: 273 * queue the current guest descriptor and ignore further kicks 274 * until some elements are used. 275 */ 276 svq->next_guest_avail_elem = elem; 277 return; 278 } 279 280 ok = vhost_svq_add(svq, elem); 281 if (unlikely(!ok)) { 282 /* VQ is broken, just return and ignore any other kicks */ 283 return; 284 } 285 vhost_svq_kick(svq); 286 } 287 288 virtio_queue_set_notification(svq->vq, true); 289 } while (!virtio_queue_empty(svq->vq)); 290 } 291 292 /** 293 * Handle guest's kick. 294 * 295 * @n: guest kick event notifier, the one that guest set to notify svq. 296 */ 297 static void vhost_handle_guest_kick_notifier(EventNotifier *n) 298 { 299 VhostShadowVirtqueue *svq = container_of(n, VhostShadowVirtqueue, svq_kick); 300 event_notifier_test_and_clear(n); 301 vhost_handle_guest_kick(svq); 302 } 303 304 static bool vhost_svq_more_used(VhostShadowVirtqueue *svq) 305 { 306 if (svq->last_used_idx != svq->shadow_used_idx) { 307 return true; 308 } 309 310 svq->shadow_used_idx = cpu_to_le16(svq->vring.used->idx); 311 312 return svq->last_used_idx != svq->shadow_used_idx; 313 } 314 315 /** 316 * Enable vhost device calls after disable them. 317 * 318 * @svq: The svq 319 * 320 * It returns false if there are pending used buffers from the vhost device, 321 * avoiding the possible races between SVQ checking for more work and enabling 322 * callbacks. True if SVQ used vring has no more pending buffers. 323 */ 324 static bool vhost_svq_enable_notification(VhostShadowVirtqueue *svq) 325 { 326 svq->vring.avail->flags &= ~cpu_to_le16(VRING_AVAIL_F_NO_INTERRUPT); 327 /* Make sure the flag is written before the read of used_idx */ 328 smp_mb(); 329 return !vhost_svq_more_used(svq); 330 } 331 332 static void vhost_svq_disable_notification(VhostShadowVirtqueue *svq) 333 { 334 svq->vring.avail->flags |= cpu_to_le16(VRING_AVAIL_F_NO_INTERRUPT); 335 } 336 337 static VirtQueueElement *vhost_svq_get_buf(VhostShadowVirtqueue *svq, 338 uint32_t *len) 339 { 340 const vring_used_t *used = svq->vring.used; 341 vring_used_elem_t used_elem; 342 uint16_t last_used; 343 344 if (!vhost_svq_more_used(svq)) { 345 return NULL; 346 } 347 348 /* Only get used array entries after they have been exposed by dev */ 349 smp_rmb(); 350 last_used = svq->last_used_idx & (svq->vring.num - 1); 351 used_elem.id = le32_to_cpu(used->ring[last_used].id); 352 used_elem.len = le32_to_cpu(used->ring[last_used].len); 353 354 svq->last_used_idx++; 355 if (unlikely(used_elem.id >= svq->vring.num)) { 356 qemu_log_mask(LOG_GUEST_ERROR, "Device %s says index %u is used", 357 svq->vdev->name, used_elem.id); 358 return NULL; 359 } 360 361 if (unlikely(!svq->ring_id_maps[used_elem.id])) { 362 qemu_log_mask(LOG_GUEST_ERROR, 363 "Device %s says index %u is used, but it was not available", 364 svq->vdev->name, used_elem.id); 365 return NULL; 366 } 367 368 svq->desc_next[used_elem.id] = svq->free_head; 369 svq->free_head = used_elem.id; 370 371 *len = used_elem.len; 372 return g_steal_pointer(&svq->ring_id_maps[used_elem.id]); 373 } 374 375 static void vhost_svq_flush(VhostShadowVirtqueue *svq, 376 bool check_for_avail_queue) 377 { 378 VirtQueue *vq = svq->vq; 379 380 /* Forward as many used buffers as possible. */ 381 do { 382 unsigned i = 0; 383 384 vhost_svq_disable_notification(svq); 385 while (true) { 386 uint32_t len; 387 g_autofree VirtQueueElement *elem = vhost_svq_get_buf(svq, &len); 388 if (!elem) { 389 break; 390 } 391 392 if (unlikely(i >= svq->vring.num)) { 393 qemu_log_mask(LOG_GUEST_ERROR, 394 "More than %u used buffers obtained in a %u size SVQ", 395 i, svq->vring.num); 396 virtqueue_fill(vq, elem, len, i); 397 virtqueue_flush(vq, i); 398 return; 399 } 400 virtqueue_fill(vq, elem, len, i++); 401 } 402 403 virtqueue_flush(vq, i); 404 event_notifier_set(&svq->svq_call); 405 406 if (check_for_avail_queue && svq->next_guest_avail_elem) { 407 /* 408 * Avail ring was full when vhost_svq_flush was called, so it's a 409 * good moment to make more descriptors available if possible. 410 */ 411 vhost_handle_guest_kick(svq); 412 } 413 } while (!vhost_svq_enable_notification(svq)); 414 } 415 416 /** 417 * Forward used buffers. 418 * 419 * @n: hdev call event notifier, the one that device set to notify svq. 420 * 421 * Note that we are not making any buffers available in the loop, there is no 422 * way that it runs more than virtqueue size times. 423 */ 424 static void vhost_svq_handle_call(EventNotifier *n) 425 { 426 VhostShadowVirtqueue *svq = container_of(n, VhostShadowVirtqueue, 427 hdev_call); 428 event_notifier_test_and_clear(n); 429 vhost_svq_flush(svq, true); 430 } 431 432 /** 433 * Set the call notifier for the SVQ to call the guest 434 * 435 * @svq: Shadow virtqueue 436 * @call_fd: call notifier 437 * 438 * Called on BQL context. 439 */ 440 void vhost_svq_set_svq_call_fd(VhostShadowVirtqueue *svq, int call_fd) 441 { 442 if (call_fd == VHOST_FILE_UNBIND) { 443 /* 444 * Fail event_notifier_set if called handling device call. 445 * 446 * SVQ still needs device notifications, since it needs to keep 447 * forwarding used buffers even with the unbind. 448 */ 449 memset(&svq->svq_call, 0, sizeof(svq->svq_call)); 450 } else { 451 event_notifier_init_fd(&svq->svq_call, call_fd); 452 } 453 } 454 455 /** 456 * Get the shadow vq vring address. 457 * @svq: Shadow virtqueue 458 * @addr: Destination to store address 459 */ 460 void vhost_svq_get_vring_addr(const VhostShadowVirtqueue *svq, 461 struct vhost_vring_addr *addr) 462 { 463 addr->desc_user_addr = (uint64_t)(uintptr_t)svq->vring.desc; 464 addr->avail_user_addr = (uint64_t)(uintptr_t)svq->vring.avail; 465 addr->used_user_addr = (uint64_t)(uintptr_t)svq->vring.used; 466 } 467 468 size_t vhost_svq_driver_area_size(const VhostShadowVirtqueue *svq) 469 { 470 size_t desc_size = sizeof(vring_desc_t) * svq->vring.num; 471 size_t avail_size = offsetof(vring_avail_t, ring) + 472 sizeof(uint16_t) * svq->vring.num; 473 474 return ROUND_UP(desc_size + avail_size, qemu_real_host_page_size()); 475 } 476 477 size_t vhost_svq_device_area_size(const VhostShadowVirtqueue *svq) 478 { 479 size_t used_size = offsetof(vring_used_t, ring) + 480 sizeof(vring_used_elem_t) * svq->vring.num; 481 return ROUND_UP(used_size, qemu_real_host_page_size()); 482 } 483 484 /** 485 * Set a new file descriptor for the guest to kick the SVQ and notify for avail 486 * 487 * @svq: The svq 488 * @svq_kick_fd: The svq kick fd 489 * 490 * Note that the SVQ will never close the old file descriptor. 491 */ 492 void vhost_svq_set_svq_kick_fd(VhostShadowVirtqueue *svq, int svq_kick_fd) 493 { 494 EventNotifier *svq_kick = &svq->svq_kick; 495 bool poll_stop = VHOST_FILE_UNBIND != event_notifier_get_fd(svq_kick); 496 bool poll_start = svq_kick_fd != VHOST_FILE_UNBIND; 497 498 if (poll_stop) { 499 event_notifier_set_handler(svq_kick, NULL); 500 } 501 502 /* 503 * event_notifier_set_handler already checks for guest's notifications if 504 * they arrive at the new file descriptor in the switch, so there is no 505 * need to explicitly check for them. 506 */ 507 if (poll_start) { 508 event_notifier_init_fd(svq_kick, svq_kick_fd); 509 event_notifier_set(svq_kick); 510 event_notifier_set_handler(svq_kick, vhost_handle_guest_kick_notifier); 511 } 512 } 513 514 /** 515 * Start the shadow virtqueue operation. 516 * 517 * @svq: Shadow Virtqueue 518 * @vdev: VirtIO device 519 * @vq: Virtqueue to shadow 520 */ 521 void vhost_svq_start(VhostShadowVirtqueue *svq, VirtIODevice *vdev, 522 VirtQueue *vq) 523 { 524 size_t desc_size, driver_size, device_size; 525 526 svq->next_guest_avail_elem = NULL; 527 svq->shadow_avail_idx = 0; 528 svq->shadow_used_idx = 0; 529 svq->last_used_idx = 0; 530 svq->vdev = vdev; 531 svq->vq = vq; 532 533 svq->vring.num = virtio_queue_get_num(vdev, virtio_get_queue_index(vq)); 534 driver_size = vhost_svq_driver_area_size(svq); 535 device_size = vhost_svq_device_area_size(svq); 536 svq->vring.desc = qemu_memalign(qemu_real_host_page_size(), driver_size); 537 desc_size = sizeof(vring_desc_t) * svq->vring.num; 538 svq->vring.avail = (void *)((char *)svq->vring.desc + desc_size); 539 memset(svq->vring.desc, 0, driver_size); 540 svq->vring.used = qemu_memalign(qemu_real_host_page_size(), device_size); 541 memset(svq->vring.used, 0, device_size); 542 svq->ring_id_maps = g_new0(VirtQueueElement *, svq->vring.num); 543 svq->desc_next = g_new0(uint16_t, svq->vring.num); 544 for (unsigned i = 0; i < svq->vring.num - 1; i++) { 545 svq->desc_next[i] = cpu_to_le16(i + 1); 546 } 547 } 548 549 /** 550 * Stop the shadow virtqueue operation. 551 * @svq: Shadow Virtqueue 552 */ 553 void vhost_svq_stop(VhostShadowVirtqueue *svq) 554 { 555 event_notifier_set_handler(&svq->svq_kick, NULL); 556 g_autofree VirtQueueElement *next_avail_elem = NULL; 557 558 if (!svq->vq) { 559 return; 560 } 561 562 /* Send all pending used descriptors to guest */ 563 vhost_svq_flush(svq, false); 564 565 for (unsigned i = 0; i < svq->vring.num; ++i) { 566 g_autofree VirtQueueElement *elem = NULL; 567 elem = g_steal_pointer(&svq->ring_id_maps[i]); 568 if (elem) { 569 virtqueue_detach_element(svq->vq, elem, 0); 570 } 571 } 572 573 next_avail_elem = g_steal_pointer(&svq->next_guest_avail_elem); 574 if (next_avail_elem) { 575 virtqueue_detach_element(svq->vq, next_avail_elem, 0); 576 } 577 svq->vq = NULL; 578 g_free(svq->desc_next); 579 g_free(svq->ring_id_maps); 580 qemu_vfree(svq->vring.desc); 581 qemu_vfree(svq->vring.used); 582 } 583 584 /** 585 * Creates vhost shadow virtqueue, and instructs the vhost device to use the 586 * shadow methods and file descriptors. 587 * 588 * @iova_tree: Tree to perform descriptors translations 589 * 590 * Returns the new virtqueue or NULL. 591 * 592 * In case of error, reason is reported through error_report. 593 */ 594 VhostShadowVirtqueue *vhost_svq_new(VhostIOVATree *iova_tree) 595 { 596 g_autofree VhostShadowVirtqueue *svq = g_new0(VhostShadowVirtqueue, 1); 597 int r; 598 599 r = event_notifier_init(&svq->hdev_kick, 0); 600 if (r != 0) { 601 error_report("Couldn't create kick event notifier: %s (%d)", 602 g_strerror(errno), errno); 603 goto err_init_hdev_kick; 604 } 605 606 r = event_notifier_init(&svq->hdev_call, 0); 607 if (r != 0) { 608 error_report("Couldn't create call event notifier: %s (%d)", 609 g_strerror(errno), errno); 610 goto err_init_hdev_call; 611 } 612 613 event_notifier_init_fd(&svq->svq_kick, VHOST_FILE_UNBIND); 614 event_notifier_set_handler(&svq->hdev_call, vhost_svq_handle_call); 615 svq->iova_tree = iova_tree; 616 return g_steal_pointer(&svq); 617 618 err_init_hdev_call: 619 event_notifier_cleanup(&svq->hdev_kick); 620 621 err_init_hdev_kick: 622 return NULL; 623 } 624 625 /** 626 * Free the resources of the shadow virtqueue. 627 * 628 * @pvq: gpointer to SVQ so it can be used by autofree functions. 629 */ 630 void vhost_svq_free(gpointer pvq) 631 { 632 VhostShadowVirtqueue *vq = pvq; 633 vhost_svq_stop(vq); 634 event_notifier_cleanup(&vq->hdev_kick); 635 event_notifier_set_handler(&vq->hdev_call, NULL); 636 event_notifier_cleanup(&vq->hdev_call); 637 g_free(vq); 638 } 639