xref: /qemu/hw/virtio/vhost-shadow-virtqueue.c (revision f20b70eb5a68cfd8fef74a13ccdd494ef1cb0221)
1 /*
2  * vhost shadow virtqueue
3  *
4  * SPDX-FileCopyrightText: Red Hat, Inc. 2021
5  * SPDX-FileContributor: Author: Eugenio Pérez <eperezma@redhat.com>
6  *
7  * SPDX-License-Identifier: GPL-2.0-or-later
8  */
9 
10 #include "qemu/osdep.h"
11 #include "hw/virtio/vhost-shadow-virtqueue.h"
12 
13 #include "qemu/error-report.h"
14 #include "qapi/error.h"
15 #include "qemu/main-loop.h"
16 #include "qemu/log.h"
17 #include "qemu/memalign.h"
18 #include "linux-headers/linux/vhost.h"
19 
20 /**
21  * Validate the transport device features that both guests can use with the SVQ
22  * and SVQs can use with the device.
23  *
24  * @dev_features: The features
25  * @errp: Error pointer
26  */
27 bool vhost_svq_valid_features(uint64_t features, Error **errp)
28 {
29     bool ok = true;
30     uint64_t svq_features = features;
31 
32     for (uint64_t b = VIRTIO_TRANSPORT_F_START; b <= VIRTIO_TRANSPORT_F_END;
33          ++b) {
34         switch (b) {
35         case VIRTIO_F_ANY_LAYOUT:
36             continue;
37 
38         case VIRTIO_F_ACCESS_PLATFORM:
39             /* SVQ trust in the host's IOMMU to translate addresses */
40         case VIRTIO_F_VERSION_1:
41             /* SVQ trust that the guest vring is little endian */
42             if (!(svq_features & BIT_ULL(b))) {
43                 svq_features |= BIT_ULL(b);
44                 ok = false;
45             }
46             continue;
47 
48         default:
49             if (svq_features & BIT_ULL(b)) {
50                 svq_features &= ~BIT_ULL(b);
51                 ok = false;
52             }
53         }
54     }
55 
56     if (!ok) {
57         error_setg(errp, "SVQ Invalid device feature flags, offer: 0x%"PRIx64
58                          ", ok: 0x%"PRIx64, features, svq_features);
59     }
60     return ok;
61 }
62 
63 /**
64  * Number of descriptors that the SVQ can make available from the guest.
65  *
66  * @svq: The svq
67  */
68 static uint16_t vhost_svq_available_slots(const VhostShadowVirtqueue *svq)
69 {
70     return svq->vring.num - (svq->shadow_avail_idx - svq->shadow_used_idx);
71 }
72 
73 /**
74  * Translate addresses between the qemu's virtual address and the SVQ IOVA
75  *
76  * @svq: Shadow VirtQueue
77  * @vaddr: Translated IOVA addresses
78  * @iovec: Source qemu's VA addresses
79  * @num: Length of iovec and minimum length of vaddr
80  */
81 static bool vhost_svq_translate_addr(const VhostShadowVirtqueue *svq,
82                                      hwaddr *addrs, const struct iovec *iovec,
83                                      size_t num)
84 {
85     if (num == 0) {
86         return true;
87     }
88 
89     for (size_t i = 0; i < num; ++i) {
90         DMAMap needle = {
91             .translated_addr = (hwaddr)(uintptr_t)iovec[i].iov_base,
92             .size = iovec[i].iov_len,
93         };
94         Int128 needle_last, map_last;
95         size_t off;
96 
97         const DMAMap *map = vhost_iova_tree_find_iova(svq->iova_tree, &needle);
98         /*
99          * Map cannot be NULL since iova map contains all guest space and
100          * qemu already has a physical address mapped
101          */
102         if (unlikely(!map)) {
103             qemu_log_mask(LOG_GUEST_ERROR,
104                           "Invalid address 0x%"HWADDR_PRIx" given by guest",
105                           needle.translated_addr);
106             return false;
107         }
108 
109         off = needle.translated_addr - map->translated_addr;
110         addrs[i] = map->iova + off;
111 
112         needle_last = int128_add(int128_make64(needle.translated_addr),
113                                  int128_make64(iovec[i].iov_len));
114         map_last = int128_make64(map->translated_addr + map->size);
115         if (unlikely(int128_gt(needle_last, map_last))) {
116             qemu_log_mask(LOG_GUEST_ERROR,
117                           "Guest buffer expands over iova range");
118             return false;
119         }
120     }
121 
122     return true;
123 }
124 
125 /**
126  * Write descriptors to SVQ vring
127  *
128  * @svq: The shadow virtqueue
129  * @sg: Cache for hwaddr
130  * @iovec: The iovec from the guest
131  * @num: iovec length
132  * @more_descs: True if more descriptors come in the chain
133  * @write: True if they are writeable descriptors
134  *
135  * Return true if success, false otherwise and print error.
136  */
137 static bool vhost_svq_vring_write_descs(VhostShadowVirtqueue *svq, hwaddr *sg,
138                                         const struct iovec *iovec, size_t num,
139                                         bool more_descs, bool write)
140 {
141     uint16_t i = svq->free_head, last = svq->free_head;
142     unsigned n;
143     uint16_t flags = write ? cpu_to_le16(VRING_DESC_F_WRITE) : 0;
144     vring_desc_t *descs = svq->vring.desc;
145     bool ok;
146 
147     if (num == 0) {
148         return true;
149     }
150 
151     ok = vhost_svq_translate_addr(svq, sg, iovec, num);
152     if (unlikely(!ok)) {
153         return false;
154     }
155 
156     for (n = 0; n < num; n++) {
157         if (more_descs || (n + 1 < num)) {
158             descs[i].flags = flags | cpu_to_le16(VRING_DESC_F_NEXT);
159             descs[i].next = cpu_to_le16(svq->desc_next[i]);
160         } else {
161             descs[i].flags = flags;
162         }
163         descs[i].addr = cpu_to_le64(sg[n]);
164         descs[i].len = cpu_to_le32(iovec[n].iov_len);
165 
166         last = i;
167         i = cpu_to_le16(svq->desc_next[i]);
168     }
169 
170     svq->free_head = le16_to_cpu(svq->desc_next[last]);
171     return true;
172 }
173 
174 static bool vhost_svq_add_split(VhostShadowVirtqueue *svq,
175                                 VirtQueueElement *elem, unsigned *head)
176 {
177     unsigned avail_idx;
178     vring_avail_t *avail = svq->vring.avail;
179     bool ok;
180     g_autofree hwaddr *sgs = g_new(hwaddr, MAX(elem->out_num, elem->in_num));
181 
182     *head = svq->free_head;
183 
184     /* We need some descriptors here */
185     if (unlikely(!elem->out_num && !elem->in_num)) {
186         qemu_log_mask(LOG_GUEST_ERROR,
187                       "Guest provided element with no descriptors");
188         return false;
189     }
190 
191     ok = vhost_svq_vring_write_descs(svq, sgs, elem->out_sg, elem->out_num,
192                                      elem->in_num > 0, false);
193     if (unlikely(!ok)) {
194         return false;
195     }
196 
197     ok = vhost_svq_vring_write_descs(svq, sgs, elem->in_sg, elem->in_num, false,
198                                      true);
199     if (unlikely(!ok)) {
200         return false;
201     }
202 
203     /*
204      * Put the entry in the available array (but don't update avail->idx until
205      * they do sync).
206      */
207     avail_idx = svq->shadow_avail_idx & (svq->vring.num - 1);
208     avail->ring[avail_idx] = cpu_to_le16(*head);
209     svq->shadow_avail_idx++;
210 
211     /* Update the avail index after write the descriptor */
212     smp_wmb();
213     avail->idx = cpu_to_le16(svq->shadow_avail_idx);
214 
215     return true;
216 }
217 
218 static void vhost_svq_kick(VhostShadowVirtqueue *svq)
219 {
220     /*
221      * We need to expose the available array entries before checking the used
222      * flags
223      */
224     smp_mb();
225     if (svq->vring.used->flags & VRING_USED_F_NO_NOTIFY) {
226         return;
227     }
228 
229     event_notifier_set(&svq->hdev_kick);
230 }
231 
232 /**
233  * Add an element to a SVQ.
234  *
235  * The caller must check that there is enough slots for the new element. It
236  * takes ownership of the element: In case of failure not ENOSPC, it is free.
237  *
238  * Return -EINVAL if element is invalid, -ENOSPC if dev queue is full
239  */
240 static int vhost_svq_add(VhostShadowVirtqueue *svq, VirtQueueElement *elem)
241 {
242     unsigned qemu_head;
243     unsigned ndescs = elem->in_num + elem->out_num;
244     bool ok;
245 
246     if (unlikely(ndescs > vhost_svq_available_slots(svq))) {
247         return -ENOSPC;
248     }
249 
250     ok = vhost_svq_add_split(svq, elem, &qemu_head);
251     if (unlikely(!ok)) {
252         g_free(elem);
253         return -EINVAL;
254     }
255 
256     svq->ring_id_maps[qemu_head] = elem;
257     vhost_svq_kick(svq);
258     return 0;
259 }
260 
261 /**
262  * Forward available buffers.
263  *
264  * @svq: Shadow VirtQueue
265  *
266  * Note that this function does not guarantee that all guest's available
267  * buffers are available to the device in SVQ avail ring. The guest may have
268  * exposed a GPA / GIOVA contiguous buffer, but it may not be contiguous in
269  * qemu vaddr.
270  *
271  * If that happens, guest's kick notifications will be disabled until the
272  * device uses some buffers.
273  */
274 static void vhost_handle_guest_kick(VhostShadowVirtqueue *svq)
275 {
276     /* Clear event notifier */
277     event_notifier_test_and_clear(&svq->svq_kick);
278 
279     /* Forward to the device as many available buffers as possible */
280     do {
281         virtio_queue_set_notification(svq->vq, false);
282 
283         while (true) {
284             VirtQueueElement *elem;
285             int r;
286 
287             if (svq->next_guest_avail_elem) {
288                 elem = g_steal_pointer(&svq->next_guest_avail_elem);
289             } else {
290                 elem = virtqueue_pop(svq->vq, sizeof(*elem));
291             }
292 
293             if (!elem) {
294                 break;
295             }
296 
297             r = vhost_svq_add(svq, elem);
298             if (unlikely(r != 0)) {
299                 if (r == -ENOSPC) {
300                     /*
301                      * This condition is possible since a contiguous buffer in
302                      * GPA does not imply a contiguous buffer in qemu's VA
303                      * scatter-gather segments. If that happens, the buffer
304                      * exposed to the device needs to be a chain of descriptors
305                      * at this moment.
306                      *
307                      * SVQ cannot hold more available buffers if we are here:
308                      * queue the current guest descriptor and ignore kicks
309                      * until some elements are used.
310                      */
311                     svq->next_guest_avail_elem = elem;
312                 }
313 
314                 /* VQ is full or broken, just return and ignore kicks */
315                 return;
316             }
317         }
318 
319         virtio_queue_set_notification(svq->vq, true);
320     } while (!virtio_queue_empty(svq->vq));
321 }
322 
323 /**
324  * Handle guest's kick.
325  *
326  * @n: guest kick event notifier, the one that guest set to notify svq.
327  */
328 static void vhost_handle_guest_kick_notifier(EventNotifier *n)
329 {
330     VhostShadowVirtqueue *svq = container_of(n, VhostShadowVirtqueue, svq_kick);
331     event_notifier_test_and_clear(n);
332     vhost_handle_guest_kick(svq);
333 }
334 
335 static bool vhost_svq_more_used(VhostShadowVirtqueue *svq)
336 {
337     uint16_t *used_idx = &svq->vring.used->idx;
338     if (svq->last_used_idx != svq->shadow_used_idx) {
339         return true;
340     }
341 
342     svq->shadow_used_idx = cpu_to_le16(*(volatile uint16_t *)used_idx);
343 
344     return svq->last_used_idx != svq->shadow_used_idx;
345 }
346 
347 /**
348  * Enable vhost device calls after disable them.
349  *
350  * @svq: The svq
351  *
352  * It returns false if there are pending used buffers from the vhost device,
353  * avoiding the possible races between SVQ checking for more work and enabling
354  * callbacks. True if SVQ used vring has no more pending buffers.
355  */
356 static bool vhost_svq_enable_notification(VhostShadowVirtqueue *svq)
357 {
358     svq->vring.avail->flags &= ~cpu_to_le16(VRING_AVAIL_F_NO_INTERRUPT);
359     /* Make sure the flag is written before the read of used_idx */
360     smp_mb();
361     return !vhost_svq_more_used(svq);
362 }
363 
364 static void vhost_svq_disable_notification(VhostShadowVirtqueue *svq)
365 {
366     svq->vring.avail->flags |= cpu_to_le16(VRING_AVAIL_F_NO_INTERRUPT);
367 }
368 
369 static uint16_t vhost_svq_last_desc_of_chain(const VhostShadowVirtqueue *svq,
370                                              uint16_t num, uint16_t i)
371 {
372     for (uint16_t j = 0; j < (num - 1); ++j) {
373         i = le16_to_cpu(svq->desc_next[i]);
374     }
375 
376     return i;
377 }
378 
379 static VirtQueueElement *vhost_svq_get_buf(VhostShadowVirtqueue *svq,
380                                            uint32_t *len)
381 {
382     const vring_used_t *used = svq->vring.used;
383     vring_used_elem_t used_elem;
384     uint16_t last_used, last_used_chain, num;
385 
386     if (!vhost_svq_more_used(svq)) {
387         return NULL;
388     }
389 
390     /* Only get used array entries after they have been exposed by dev */
391     smp_rmb();
392     last_used = svq->last_used_idx & (svq->vring.num - 1);
393     used_elem.id = le32_to_cpu(used->ring[last_used].id);
394     used_elem.len = le32_to_cpu(used->ring[last_used].len);
395 
396     svq->last_used_idx++;
397     if (unlikely(used_elem.id >= svq->vring.num)) {
398         qemu_log_mask(LOG_GUEST_ERROR, "Device %s says index %u is used",
399                       svq->vdev->name, used_elem.id);
400         return NULL;
401     }
402 
403     if (unlikely(!svq->ring_id_maps[used_elem.id])) {
404         qemu_log_mask(LOG_GUEST_ERROR,
405             "Device %s says index %u is used, but it was not available",
406             svq->vdev->name, used_elem.id);
407         return NULL;
408     }
409 
410     num = svq->ring_id_maps[used_elem.id]->in_num +
411           svq->ring_id_maps[used_elem.id]->out_num;
412     last_used_chain = vhost_svq_last_desc_of_chain(svq, num, used_elem.id);
413     svq->desc_next[last_used_chain] = svq->free_head;
414     svq->free_head = used_elem.id;
415 
416     *len = used_elem.len;
417     return g_steal_pointer(&svq->ring_id_maps[used_elem.id]);
418 }
419 
420 static void vhost_svq_flush(VhostShadowVirtqueue *svq,
421                             bool check_for_avail_queue)
422 {
423     VirtQueue *vq = svq->vq;
424 
425     /* Forward as many used buffers as possible. */
426     do {
427         unsigned i = 0;
428 
429         vhost_svq_disable_notification(svq);
430         while (true) {
431             uint32_t len;
432             g_autofree VirtQueueElement *elem = vhost_svq_get_buf(svq, &len);
433             if (!elem) {
434                 break;
435             }
436 
437             if (unlikely(i >= svq->vring.num)) {
438                 qemu_log_mask(LOG_GUEST_ERROR,
439                          "More than %u used buffers obtained in a %u size SVQ",
440                          i, svq->vring.num);
441                 virtqueue_fill(vq, elem, len, i);
442                 virtqueue_flush(vq, i);
443                 return;
444             }
445             virtqueue_fill(vq, elem, len, i++);
446         }
447 
448         virtqueue_flush(vq, i);
449         event_notifier_set(&svq->svq_call);
450 
451         if (check_for_avail_queue && svq->next_guest_avail_elem) {
452             /*
453              * Avail ring was full when vhost_svq_flush was called, so it's a
454              * good moment to make more descriptors available if possible.
455              */
456             vhost_handle_guest_kick(svq);
457         }
458     } while (!vhost_svq_enable_notification(svq));
459 }
460 
461 /**
462  * Forward used buffers.
463  *
464  * @n: hdev call event notifier, the one that device set to notify svq.
465  *
466  * Note that we are not making any buffers available in the loop, there is no
467  * way that it runs more than virtqueue size times.
468  */
469 static void vhost_svq_handle_call(EventNotifier *n)
470 {
471     VhostShadowVirtqueue *svq = container_of(n, VhostShadowVirtqueue,
472                                              hdev_call);
473     event_notifier_test_and_clear(n);
474     vhost_svq_flush(svq, true);
475 }
476 
477 /**
478  * Set the call notifier for the SVQ to call the guest
479  *
480  * @svq: Shadow virtqueue
481  * @call_fd: call notifier
482  *
483  * Called on BQL context.
484  */
485 void vhost_svq_set_svq_call_fd(VhostShadowVirtqueue *svq, int call_fd)
486 {
487     if (call_fd == VHOST_FILE_UNBIND) {
488         /*
489          * Fail event_notifier_set if called handling device call.
490          *
491          * SVQ still needs device notifications, since it needs to keep
492          * forwarding used buffers even with the unbind.
493          */
494         memset(&svq->svq_call, 0, sizeof(svq->svq_call));
495     } else {
496         event_notifier_init_fd(&svq->svq_call, call_fd);
497     }
498 }
499 
500 /**
501  * Get the shadow vq vring address.
502  * @svq: Shadow virtqueue
503  * @addr: Destination to store address
504  */
505 void vhost_svq_get_vring_addr(const VhostShadowVirtqueue *svq,
506                               struct vhost_vring_addr *addr)
507 {
508     addr->desc_user_addr = (uint64_t)(uintptr_t)svq->vring.desc;
509     addr->avail_user_addr = (uint64_t)(uintptr_t)svq->vring.avail;
510     addr->used_user_addr = (uint64_t)(uintptr_t)svq->vring.used;
511 }
512 
513 size_t vhost_svq_driver_area_size(const VhostShadowVirtqueue *svq)
514 {
515     size_t desc_size = sizeof(vring_desc_t) * svq->vring.num;
516     size_t avail_size = offsetof(vring_avail_t, ring) +
517                                              sizeof(uint16_t) * svq->vring.num;
518 
519     return ROUND_UP(desc_size + avail_size, qemu_real_host_page_size());
520 }
521 
522 size_t vhost_svq_device_area_size(const VhostShadowVirtqueue *svq)
523 {
524     size_t used_size = offsetof(vring_used_t, ring) +
525                                     sizeof(vring_used_elem_t) * svq->vring.num;
526     return ROUND_UP(used_size, qemu_real_host_page_size());
527 }
528 
529 /**
530  * Set a new file descriptor for the guest to kick the SVQ and notify for avail
531  *
532  * @svq: The svq
533  * @svq_kick_fd: The svq kick fd
534  *
535  * Note that the SVQ will never close the old file descriptor.
536  */
537 void vhost_svq_set_svq_kick_fd(VhostShadowVirtqueue *svq, int svq_kick_fd)
538 {
539     EventNotifier *svq_kick = &svq->svq_kick;
540     bool poll_stop = VHOST_FILE_UNBIND != event_notifier_get_fd(svq_kick);
541     bool poll_start = svq_kick_fd != VHOST_FILE_UNBIND;
542 
543     if (poll_stop) {
544         event_notifier_set_handler(svq_kick, NULL);
545     }
546 
547     /*
548      * event_notifier_set_handler already checks for guest's notifications if
549      * they arrive at the new file descriptor in the switch, so there is no
550      * need to explicitly check for them.
551      */
552     if (poll_start) {
553         event_notifier_init_fd(svq_kick, svq_kick_fd);
554         event_notifier_set(svq_kick);
555         event_notifier_set_handler(svq_kick, vhost_handle_guest_kick_notifier);
556     }
557 }
558 
559 /**
560  * Start the shadow virtqueue operation.
561  *
562  * @svq: Shadow Virtqueue
563  * @vdev: VirtIO device
564  * @vq: Virtqueue to shadow
565  */
566 void vhost_svq_start(VhostShadowVirtqueue *svq, VirtIODevice *vdev,
567                      VirtQueue *vq)
568 {
569     size_t desc_size, driver_size, device_size;
570 
571     svq->next_guest_avail_elem = NULL;
572     svq->shadow_avail_idx = 0;
573     svq->shadow_used_idx = 0;
574     svq->last_used_idx = 0;
575     svq->vdev = vdev;
576     svq->vq = vq;
577 
578     svq->vring.num = virtio_queue_get_num(vdev, virtio_get_queue_index(vq));
579     driver_size = vhost_svq_driver_area_size(svq);
580     device_size = vhost_svq_device_area_size(svq);
581     svq->vring.desc = qemu_memalign(qemu_real_host_page_size(), driver_size);
582     desc_size = sizeof(vring_desc_t) * svq->vring.num;
583     svq->vring.avail = (void *)((char *)svq->vring.desc + desc_size);
584     memset(svq->vring.desc, 0, driver_size);
585     svq->vring.used = qemu_memalign(qemu_real_host_page_size(), device_size);
586     memset(svq->vring.used, 0, device_size);
587     svq->ring_id_maps = g_new0(VirtQueueElement *, svq->vring.num);
588     svq->desc_next = g_new0(uint16_t, svq->vring.num);
589     for (unsigned i = 0; i < svq->vring.num - 1; i++) {
590         svq->desc_next[i] = cpu_to_le16(i + 1);
591     }
592 }
593 
594 /**
595  * Stop the shadow virtqueue operation.
596  * @svq: Shadow Virtqueue
597  */
598 void vhost_svq_stop(VhostShadowVirtqueue *svq)
599 {
600     event_notifier_set_handler(&svq->svq_kick, NULL);
601     g_autofree VirtQueueElement *next_avail_elem = NULL;
602 
603     if (!svq->vq) {
604         return;
605     }
606 
607     /* Send all pending used descriptors to guest */
608     vhost_svq_flush(svq, false);
609 
610     for (unsigned i = 0; i < svq->vring.num; ++i) {
611         g_autofree VirtQueueElement *elem = NULL;
612         elem = g_steal_pointer(&svq->ring_id_maps[i]);
613         if (elem) {
614             virtqueue_detach_element(svq->vq, elem, 0);
615         }
616     }
617 
618     next_avail_elem = g_steal_pointer(&svq->next_guest_avail_elem);
619     if (next_avail_elem) {
620         virtqueue_detach_element(svq->vq, next_avail_elem, 0);
621     }
622     svq->vq = NULL;
623     g_free(svq->desc_next);
624     g_free(svq->ring_id_maps);
625     qemu_vfree(svq->vring.desc);
626     qemu_vfree(svq->vring.used);
627 }
628 
629 /**
630  * Creates vhost shadow virtqueue, and instructs the vhost device to use the
631  * shadow methods and file descriptors.
632  *
633  * @iova_tree: Tree to perform descriptors translations
634  *
635  * Returns the new virtqueue or NULL.
636  *
637  * In case of error, reason is reported through error_report.
638  */
639 VhostShadowVirtqueue *vhost_svq_new(VhostIOVATree *iova_tree)
640 {
641     g_autofree VhostShadowVirtqueue *svq = g_new0(VhostShadowVirtqueue, 1);
642     int r;
643 
644     r = event_notifier_init(&svq->hdev_kick, 0);
645     if (r != 0) {
646         error_report("Couldn't create kick event notifier: %s (%d)",
647                      g_strerror(errno), errno);
648         goto err_init_hdev_kick;
649     }
650 
651     r = event_notifier_init(&svq->hdev_call, 0);
652     if (r != 0) {
653         error_report("Couldn't create call event notifier: %s (%d)",
654                      g_strerror(errno), errno);
655         goto err_init_hdev_call;
656     }
657 
658     event_notifier_init_fd(&svq->svq_kick, VHOST_FILE_UNBIND);
659     event_notifier_set_handler(&svq->hdev_call, vhost_svq_handle_call);
660     svq->iova_tree = iova_tree;
661     return g_steal_pointer(&svq);
662 
663 err_init_hdev_call:
664     event_notifier_cleanup(&svq->hdev_kick);
665 
666 err_init_hdev_kick:
667     return NULL;
668 }
669 
670 /**
671  * Free the resources of the shadow virtqueue.
672  *
673  * @pvq: gpointer to SVQ so it can be used by autofree functions.
674  */
675 void vhost_svq_free(gpointer pvq)
676 {
677     VhostShadowVirtqueue *vq = pvq;
678     vhost_svq_stop(vq);
679     event_notifier_cleanup(&vq->hdev_kick);
680     event_notifier_set_handler(&vq->hdev_call, NULL);
681     event_notifier_cleanup(&vq->hdev_call);
682     g_free(vq);
683 }
684