xref: /qemu/hw/virtio/vhost-shadow-virtqueue.c (revision 1f46ae65d85f677b660bda46685dd3e94885a7cb)
1 /*
2  * vhost shadow virtqueue
3  *
4  * SPDX-FileCopyrightText: Red Hat, Inc. 2021
5  * SPDX-FileContributor: Author: Eugenio Pérez <eperezma@redhat.com>
6  *
7  * SPDX-License-Identifier: GPL-2.0-or-later
8  */
9 
10 #include "qemu/osdep.h"
11 #include "hw/virtio/vhost-shadow-virtqueue.h"
12 
13 #include "qemu/error-report.h"
14 #include "qapi/error.h"
15 #include "qemu/main-loop.h"
16 #include "qemu/log.h"
17 #include "qemu/memalign.h"
18 #include "linux-headers/linux/vhost.h"
19 
20 /**
21  * Validate the transport device features that both guests can use with the SVQ
22  * and SVQs can use with the device.
23  *
24  * @dev_features: The features
25  * @errp: Error pointer
26  */
27 bool vhost_svq_valid_features(uint64_t features, Error **errp)
28 {
29     bool ok = true;
30     uint64_t svq_features = features;
31 
32     for (uint64_t b = VIRTIO_TRANSPORT_F_START; b <= VIRTIO_TRANSPORT_F_END;
33          ++b) {
34         switch (b) {
35         case VIRTIO_F_ANY_LAYOUT:
36             continue;
37 
38         case VIRTIO_F_ACCESS_PLATFORM:
39             /* SVQ trust in the host's IOMMU to translate addresses */
40         case VIRTIO_F_VERSION_1:
41             /* SVQ trust that the guest vring is little endian */
42             if (!(svq_features & BIT_ULL(b))) {
43                 svq_features |= BIT_ULL(b);
44                 ok = false;
45             }
46             continue;
47 
48         default:
49             if (svq_features & BIT_ULL(b)) {
50                 svq_features &= ~BIT_ULL(b);
51                 ok = false;
52             }
53         }
54     }
55 
56     if (!ok) {
57         error_setg(errp, "SVQ Invalid device feature flags, offer: 0x%"PRIx64
58                          ", ok: 0x%"PRIx64, features, svq_features);
59     }
60     return ok;
61 }
62 
63 /**
64  * Number of descriptors that the SVQ can make available from the guest.
65  *
66  * @svq: The svq
67  */
68 static uint16_t vhost_svq_available_slots(const VhostShadowVirtqueue *svq)
69 {
70     return svq->vring.num - (svq->shadow_avail_idx - svq->shadow_used_idx);
71 }
72 
73 /**
74  * Translate addresses between the qemu's virtual address and the SVQ IOVA
75  *
76  * @svq: Shadow VirtQueue
77  * @vaddr: Translated IOVA addresses
78  * @iovec: Source qemu's VA addresses
79  * @num: Length of iovec and minimum length of vaddr
80  */
81 static bool vhost_svq_translate_addr(const VhostShadowVirtqueue *svq,
82                                      hwaddr *addrs, const struct iovec *iovec,
83                                      size_t num)
84 {
85     if (num == 0) {
86         return true;
87     }
88 
89     for (size_t i = 0; i < num; ++i) {
90         DMAMap needle = {
91             .translated_addr = (hwaddr)(uintptr_t)iovec[i].iov_base,
92             .size = iovec[i].iov_len,
93         };
94         Int128 needle_last, map_last;
95         size_t off;
96 
97         const DMAMap *map = vhost_iova_tree_find_iova(svq->iova_tree, &needle);
98         /*
99          * Map cannot be NULL since iova map contains all guest space and
100          * qemu already has a physical address mapped
101          */
102         if (unlikely(!map)) {
103             qemu_log_mask(LOG_GUEST_ERROR,
104                           "Invalid address 0x%"HWADDR_PRIx" given by guest",
105                           needle.translated_addr);
106             return false;
107         }
108 
109         off = needle.translated_addr - map->translated_addr;
110         addrs[i] = map->iova + off;
111 
112         needle_last = int128_add(int128_make64(needle.translated_addr),
113                                  int128_make64(iovec[i].iov_len));
114         map_last = int128_make64(map->translated_addr + map->size);
115         if (unlikely(int128_gt(needle_last, map_last))) {
116             qemu_log_mask(LOG_GUEST_ERROR,
117                           "Guest buffer expands over iova range");
118             return false;
119         }
120     }
121 
122     return true;
123 }
124 
125 /**
126  * Write descriptors to SVQ vring
127  *
128  * @svq: The shadow virtqueue
129  * @sg: Cache for hwaddr
130  * @iovec: The iovec from the guest
131  * @num: iovec length
132  * @more_descs: True if more descriptors come in the chain
133  * @write: True if they are writeable descriptors
134  *
135  * Return true if success, false otherwise and print error.
136  */
137 static bool vhost_svq_vring_write_descs(VhostShadowVirtqueue *svq, hwaddr *sg,
138                                         const struct iovec *iovec, size_t num,
139                                         bool more_descs, bool write)
140 {
141     uint16_t i = svq->free_head, last = svq->free_head;
142     unsigned n;
143     uint16_t flags = write ? cpu_to_le16(VRING_DESC_F_WRITE) : 0;
144     vring_desc_t *descs = svq->vring.desc;
145     bool ok;
146 
147     if (num == 0) {
148         return true;
149     }
150 
151     ok = vhost_svq_translate_addr(svq, sg, iovec, num);
152     if (unlikely(!ok)) {
153         return false;
154     }
155 
156     for (n = 0; n < num; n++) {
157         if (more_descs || (n + 1 < num)) {
158             descs[i].flags = flags | cpu_to_le16(VRING_DESC_F_NEXT);
159             descs[i].next = cpu_to_le16(svq->desc_next[i]);
160         } else {
161             descs[i].flags = flags;
162         }
163         descs[i].addr = cpu_to_le64(sg[n]);
164         descs[i].len = cpu_to_le32(iovec[n].iov_len);
165 
166         last = i;
167         i = cpu_to_le16(svq->desc_next[i]);
168     }
169 
170     svq->free_head = le16_to_cpu(svq->desc_next[last]);
171     return true;
172 }
173 
174 static bool vhost_svq_add_split(VhostShadowVirtqueue *svq,
175                                 const struct iovec *out_sg, size_t out_num,
176                                 const struct iovec *in_sg, size_t in_num,
177                                 unsigned *head)
178 {
179     unsigned avail_idx;
180     vring_avail_t *avail = svq->vring.avail;
181     bool ok;
182     g_autofree hwaddr *sgs = g_new(hwaddr, MAX(out_num, in_num));
183 
184     *head = svq->free_head;
185 
186     /* We need some descriptors here */
187     if (unlikely(!out_num && !in_num)) {
188         qemu_log_mask(LOG_GUEST_ERROR,
189                       "Guest provided element with no descriptors");
190         return false;
191     }
192 
193     ok = vhost_svq_vring_write_descs(svq, sgs, out_sg, out_num, in_num > 0,
194                                      false);
195     if (unlikely(!ok)) {
196         return false;
197     }
198 
199     ok = vhost_svq_vring_write_descs(svq, sgs, in_sg, in_num, false, true);
200     if (unlikely(!ok)) {
201         return false;
202     }
203 
204     /*
205      * Put the entry in the available array (but don't update avail->idx until
206      * they do sync).
207      */
208     avail_idx = svq->shadow_avail_idx & (svq->vring.num - 1);
209     avail->ring[avail_idx] = cpu_to_le16(*head);
210     svq->shadow_avail_idx++;
211 
212     /* Update the avail index after write the descriptor */
213     smp_wmb();
214     avail->idx = cpu_to_le16(svq->shadow_avail_idx);
215 
216     return true;
217 }
218 
219 static void vhost_svq_kick(VhostShadowVirtqueue *svq)
220 {
221     /*
222      * We need to expose the available array entries before checking the used
223      * flags
224      */
225     smp_mb();
226     if (svq->vring.used->flags & VRING_USED_F_NO_NOTIFY) {
227         return;
228     }
229 
230     event_notifier_set(&svq->hdev_kick);
231 }
232 
233 /**
234  * Add an element to a SVQ.
235  *
236  * The caller must check that there is enough slots for the new element. It
237  * takes ownership of the element: In case of failure not ENOSPC, it is free.
238  *
239  * Return -EINVAL if element is invalid, -ENOSPC if dev queue is full
240  */
241 static int vhost_svq_add(VhostShadowVirtqueue *svq, const struct iovec *out_sg,
242                           size_t out_num, const struct iovec *in_sg,
243                           size_t in_num, VirtQueueElement *elem)
244 {
245     unsigned qemu_head;
246     unsigned ndescs = in_num + out_num;
247     bool ok;
248 
249     if (unlikely(ndescs > vhost_svq_available_slots(svq))) {
250         return -ENOSPC;
251     }
252 
253     ok = vhost_svq_add_split(svq, out_sg, out_num, in_sg, in_num, &qemu_head);
254     if (unlikely(!ok)) {
255         g_free(elem);
256         return -EINVAL;
257     }
258 
259     svq->ring_id_maps[qemu_head] = elem;
260     vhost_svq_kick(svq);
261     return 0;
262 }
263 
264 /* Convenience wrapper to add a guest's element to SVQ */
265 static int vhost_svq_add_element(VhostShadowVirtqueue *svq,
266                                  VirtQueueElement *elem)
267 {
268     return vhost_svq_add(svq, elem->out_sg, elem->out_num, elem->in_sg,
269                          elem->in_num, elem);
270 }
271 
272 /**
273  * Forward available buffers.
274  *
275  * @svq: Shadow VirtQueue
276  *
277  * Note that this function does not guarantee that all guest's available
278  * buffers are available to the device in SVQ avail ring. The guest may have
279  * exposed a GPA / GIOVA contiguous buffer, but it may not be contiguous in
280  * qemu vaddr.
281  *
282  * If that happens, guest's kick notifications will be disabled until the
283  * device uses some buffers.
284  */
285 static void vhost_handle_guest_kick(VhostShadowVirtqueue *svq)
286 {
287     /* Clear event notifier */
288     event_notifier_test_and_clear(&svq->svq_kick);
289 
290     /* Forward to the device as many available buffers as possible */
291     do {
292         virtio_queue_set_notification(svq->vq, false);
293 
294         while (true) {
295             VirtQueueElement *elem;
296             int r;
297 
298             if (svq->next_guest_avail_elem) {
299                 elem = g_steal_pointer(&svq->next_guest_avail_elem);
300             } else {
301                 elem = virtqueue_pop(svq->vq, sizeof(*elem));
302             }
303 
304             if (!elem) {
305                 break;
306             }
307 
308             r = vhost_svq_add_element(svq, elem);
309             if (unlikely(r != 0)) {
310                 if (r == -ENOSPC) {
311                     /*
312                      * This condition is possible since a contiguous buffer in
313                      * GPA does not imply a contiguous buffer in qemu's VA
314                      * scatter-gather segments. If that happens, the buffer
315                      * exposed to the device needs to be a chain of descriptors
316                      * at this moment.
317                      *
318                      * SVQ cannot hold more available buffers if we are here:
319                      * queue the current guest descriptor and ignore kicks
320                      * until some elements are used.
321                      */
322                     svq->next_guest_avail_elem = elem;
323                 }
324 
325                 /* VQ is full or broken, just return and ignore kicks */
326                 return;
327             }
328         }
329 
330         virtio_queue_set_notification(svq->vq, true);
331     } while (!virtio_queue_empty(svq->vq));
332 }
333 
334 /**
335  * Handle guest's kick.
336  *
337  * @n: guest kick event notifier, the one that guest set to notify svq.
338  */
339 static void vhost_handle_guest_kick_notifier(EventNotifier *n)
340 {
341     VhostShadowVirtqueue *svq = container_of(n, VhostShadowVirtqueue, svq_kick);
342     event_notifier_test_and_clear(n);
343     vhost_handle_guest_kick(svq);
344 }
345 
346 static bool vhost_svq_more_used(VhostShadowVirtqueue *svq)
347 {
348     uint16_t *used_idx = &svq->vring.used->idx;
349     if (svq->last_used_idx != svq->shadow_used_idx) {
350         return true;
351     }
352 
353     svq->shadow_used_idx = cpu_to_le16(*(volatile uint16_t *)used_idx);
354 
355     return svq->last_used_idx != svq->shadow_used_idx;
356 }
357 
358 /**
359  * Enable vhost device calls after disable them.
360  *
361  * @svq: The svq
362  *
363  * It returns false if there are pending used buffers from the vhost device,
364  * avoiding the possible races between SVQ checking for more work and enabling
365  * callbacks. True if SVQ used vring has no more pending buffers.
366  */
367 static bool vhost_svq_enable_notification(VhostShadowVirtqueue *svq)
368 {
369     svq->vring.avail->flags &= ~cpu_to_le16(VRING_AVAIL_F_NO_INTERRUPT);
370     /* Make sure the flag is written before the read of used_idx */
371     smp_mb();
372     return !vhost_svq_more_used(svq);
373 }
374 
375 static void vhost_svq_disable_notification(VhostShadowVirtqueue *svq)
376 {
377     svq->vring.avail->flags |= cpu_to_le16(VRING_AVAIL_F_NO_INTERRUPT);
378 }
379 
380 static uint16_t vhost_svq_last_desc_of_chain(const VhostShadowVirtqueue *svq,
381                                              uint16_t num, uint16_t i)
382 {
383     for (uint16_t j = 0; j < (num - 1); ++j) {
384         i = le16_to_cpu(svq->desc_next[i]);
385     }
386 
387     return i;
388 }
389 
390 static VirtQueueElement *vhost_svq_get_buf(VhostShadowVirtqueue *svq,
391                                            uint32_t *len)
392 {
393     const vring_used_t *used = svq->vring.used;
394     vring_used_elem_t used_elem;
395     uint16_t last_used, last_used_chain, num;
396 
397     if (!vhost_svq_more_used(svq)) {
398         return NULL;
399     }
400 
401     /* Only get used array entries after they have been exposed by dev */
402     smp_rmb();
403     last_used = svq->last_used_idx & (svq->vring.num - 1);
404     used_elem.id = le32_to_cpu(used->ring[last_used].id);
405     used_elem.len = le32_to_cpu(used->ring[last_used].len);
406 
407     svq->last_used_idx++;
408     if (unlikely(used_elem.id >= svq->vring.num)) {
409         qemu_log_mask(LOG_GUEST_ERROR, "Device %s says index %u is used",
410                       svq->vdev->name, used_elem.id);
411         return NULL;
412     }
413 
414     if (unlikely(!svq->ring_id_maps[used_elem.id])) {
415         qemu_log_mask(LOG_GUEST_ERROR,
416             "Device %s says index %u is used, but it was not available",
417             svq->vdev->name, used_elem.id);
418         return NULL;
419     }
420 
421     num = svq->ring_id_maps[used_elem.id]->in_num +
422           svq->ring_id_maps[used_elem.id]->out_num;
423     last_used_chain = vhost_svq_last_desc_of_chain(svq, num, used_elem.id);
424     svq->desc_next[last_used_chain] = svq->free_head;
425     svq->free_head = used_elem.id;
426 
427     *len = used_elem.len;
428     return g_steal_pointer(&svq->ring_id_maps[used_elem.id]);
429 }
430 
431 static void vhost_svq_flush(VhostShadowVirtqueue *svq,
432                             bool check_for_avail_queue)
433 {
434     VirtQueue *vq = svq->vq;
435 
436     /* Forward as many used buffers as possible. */
437     do {
438         unsigned i = 0;
439 
440         vhost_svq_disable_notification(svq);
441         while (true) {
442             uint32_t len;
443             g_autofree VirtQueueElement *elem = vhost_svq_get_buf(svq, &len);
444             if (!elem) {
445                 break;
446             }
447 
448             if (unlikely(i >= svq->vring.num)) {
449                 qemu_log_mask(LOG_GUEST_ERROR,
450                          "More than %u used buffers obtained in a %u size SVQ",
451                          i, svq->vring.num);
452                 virtqueue_fill(vq, elem, len, i);
453                 virtqueue_flush(vq, i);
454                 return;
455             }
456             virtqueue_fill(vq, elem, len, i++);
457         }
458 
459         virtqueue_flush(vq, i);
460         event_notifier_set(&svq->svq_call);
461 
462         if (check_for_avail_queue && svq->next_guest_avail_elem) {
463             /*
464              * Avail ring was full when vhost_svq_flush was called, so it's a
465              * good moment to make more descriptors available if possible.
466              */
467             vhost_handle_guest_kick(svq);
468         }
469     } while (!vhost_svq_enable_notification(svq));
470 }
471 
472 /**
473  * Forward used buffers.
474  *
475  * @n: hdev call event notifier, the one that device set to notify svq.
476  *
477  * Note that we are not making any buffers available in the loop, there is no
478  * way that it runs more than virtqueue size times.
479  */
480 static void vhost_svq_handle_call(EventNotifier *n)
481 {
482     VhostShadowVirtqueue *svq = container_of(n, VhostShadowVirtqueue,
483                                              hdev_call);
484     event_notifier_test_and_clear(n);
485     vhost_svq_flush(svq, true);
486 }
487 
488 /**
489  * Set the call notifier for the SVQ to call the guest
490  *
491  * @svq: Shadow virtqueue
492  * @call_fd: call notifier
493  *
494  * Called on BQL context.
495  */
496 void vhost_svq_set_svq_call_fd(VhostShadowVirtqueue *svq, int call_fd)
497 {
498     if (call_fd == VHOST_FILE_UNBIND) {
499         /*
500          * Fail event_notifier_set if called handling device call.
501          *
502          * SVQ still needs device notifications, since it needs to keep
503          * forwarding used buffers even with the unbind.
504          */
505         memset(&svq->svq_call, 0, sizeof(svq->svq_call));
506     } else {
507         event_notifier_init_fd(&svq->svq_call, call_fd);
508     }
509 }
510 
511 /**
512  * Get the shadow vq vring address.
513  * @svq: Shadow virtqueue
514  * @addr: Destination to store address
515  */
516 void vhost_svq_get_vring_addr(const VhostShadowVirtqueue *svq,
517                               struct vhost_vring_addr *addr)
518 {
519     addr->desc_user_addr = (uint64_t)(uintptr_t)svq->vring.desc;
520     addr->avail_user_addr = (uint64_t)(uintptr_t)svq->vring.avail;
521     addr->used_user_addr = (uint64_t)(uintptr_t)svq->vring.used;
522 }
523 
524 size_t vhost_svq_driver_area_size(const VhostShadowVirtqueue *svq)
525 {
526     size_t desc_size = sizeof(vring_desc_t) * svq->vring.num;
527     size_t avail_size = offsetof(vring_avail_t, ring) +
528                                              sizeof(uint16_t) * svq->vring.num;
529 
530     return ROUND_UP(desc_size + avail_size, qemu_real_host_page_size());
531 }
532 
533 size_t vhost_svq_device_area_size(const VhostShadowVirtqueue *svq)
534 {
535     size_t used_size = offsetof(vring_used_t, ring) +
536                                     sizeof(vring_used_elem_t) * svq->vring.num;
537     return ROUND_UP(used_size, qemu_real_host_page_size());
538 }
539 
540 /**
541  * Set a new file descriptor for the guest to kick the SVQ and notify for avail
542  *
543  * @svq: The svq
544  * @svq_kick_fd: The svq kick fd
545  *
546  * Note that the SVQ will never close the old file descriptor.
547  */
548 void vhost_svq_set_svq_kick_fd(VhostShadowVirtqueue *svq, int svq_kick_fd)
549 {
550     EventNotifier *svq_kick = &svq->svq_kick;
551     bool poll_stop = VHOST_FILE_UNBIND != event_notifier_get_fd(svq_kick);
552     bool poll_start = svq_kick_fd != VHOST_FILE_UNBIND;
553 
554     if (poll_stop) {
555         event_notifier_set_handler(svq_kick, NULL);
556     }
557 
558     /*
559      * event_notifier_set_handler already checks for guest's notifications if
560      * they arrive at the new file descriptor in the switch, so there is no
561      * need to explicitly check for them.
562      */
563     if (poll_start) {
564         event_notifier_init_fd(svq_kick, svq_kick_fd);
565         event_notifier_set(svq_kick);
566         event_notifier_set_handler(svq_kick, vhost_handle_guest_kick_notifier);
567     }
568 }
569 
570 /**
571  * Start the shadow virtqueue operation.
572  *
573  * @svq: Shadow Virtqueue
574  * @vdev: VirtIO device
575  * @vq: Virtqueue to shadow
576  */
577 void vhost_svq_start(VhostShadowVirtqueue *svq, VirtIODevice *vdev,
578                      VirtQueue *vq)
579 {
580     size_t desc_size, driver_size, device_size;
581 
582     svq->next_guest_avail_elem = NULL;
583     svq->shadow_avail_idx = 0;
584     svq->shadow_used_idx = 0;
585     svq->last_used_idx = 0;
586     svq->vdev = vdev;
587     svq->vq = vq;
588 
589     svq->vring.num = virtio_queue_get_num(vdev, virtio_get_queue_index(vq));
590     driver_size = vhost_svq_driver_area_size(svq);
591     device_size = vhost_svq_device_area_size(svq);
592     svq->vring.desc = qemu_memalign(qemu_real_host_page_size(), driver_size);
593     desc_size = sizeof(vring_desc_t) * svq->vring.num;
594     svq->vring.avail = (void *)((char *)svq->vring.desc + desc_size);
595     memset(svq->vring.desc, 0, driver_size);
596     svq->vring.used = qemu_memalign(qemu_real_host_page_size(), device_size);
597     memset(svq->vring.used, 0, device_size);
598     svq->ring_id_maps = g_new0(VirtQueueElement *, svq->vring.num);
599     svq->desc_next = g_new0(uint16_t, svq->vring.num);
600     for (unsigned i = 0; i < svq->vring.num - 1; i++) {
601         svq->desc_next[i] = cpu_to_le16(i + 1);
602     }
603 }
604 
605 /**
606  * Stop the shadow virtqueue operation.
607  * @svq: Shadow Virtqueue
608  */
609 void vhost_svq_stop(VhostShadowVirtqueue *svq)
610 {
611     event_notifier_set_handler(&svq->svq_kick, NULL);
612     g_autofree VirtQueueElement *next_avail_elem = NULL;
613 
614     if (!svq->vq) {
615         return;
616     }
617 
618     /* Send all pending used descriptors to guest */
619     vhost_svq_flush(svq, false);
620 
621     for (unsigned i = 0; i < svq->vring.num; ++i) {
622         g_autofree VirtQueueElement *elem = NULL;
623         elem = g_steal_pointer(&svq->ring_id_maps[i]);
624         if (elem) {
625             virtqueue_detach_element(svq->vq, elem, 0);
626         }
627     }
628 
629     next_avail_elem = g_steal_pointer(&svq->next_guest_avail_elem);
630     if (next_avail_elem) {
631         virtqueue_detach_element(svq->vq, next_avail_elem, 0);
632     }
633     svq->vq = NULL;
634     g_free(svq->desc_next);
635     g_free(svq->ring_id_maps);
636     qemu_vfree(svq->vring.desc);
637     qemu_vfree(svq->vring.used);
638 }
639 
640 /**
641  * Creates vhost shadow virtqueue, and instructs the vhost device to use the
642  * shadow methods and file descriptors.
643  *
644  * @iova_tree: Tree to perform descriptors translations
645  *
646  * Returns the new virtqueue or NULL.
647  *
648  * In case of error, reason is reported through error_report.
649  */
650 VhostShadowVirtqueue *vhost_svq_new(VhostIOVATree *iova_tree)
651 {
652     g_autofree VhostShadowVirtqueue *svq = g_new0(VhostShadowVirtqueue, 1);
653     int r;
654 
655     r = event_notifier_init(&svq->hdev_kick, 0);
656     if (r != 0) {
657         error_report("Couldn't create kick event notifier: %s (%d)",
658                      g_strerror(errno), errno);
659         goto err_init_hdev_kick;
660     }
661 
662     r = event_notifier_init(&svq->hdev_call, 0);
663     if (r != 0) {
664         error_report("Couldn't create call event notifier: %s (%d)",
665                      g_strerror(errno), errno);
666         goto err_init_hdev_call;
667     }
668 
669     event_notifier_init_fd(&svq->svq_kick, VHOST_FILE_UNBIND);
670     event_notifier_set_handler(&svq->hdev_call, vhost_svq_handle_call);
671     svq->iova_tree = iova_tree;
672     return g_steal_pointer(&svq);
673 
674 err_init_hdev_call:
675     event_notifier_cleanup(&svq->hdev_kick);
676 
677 err_init_hdev_kick:
678     return NULL;
679 }
680 
681 /**
682  * Free the resources of the shadow virtqueue.
683  *
684  * @pvq: gpointer to SVQ so it can be used by autofree functions.
685  */
686 void vhost_svq_free(gpointer pvq)
687 {
688     VhostShadowVirtqueue *vq = pvq;
689     vhost_svq_stop(vq);
690     event_notifier_cleanup(&vq->hdev_kick);
691     event_notifier_set_handler(&vq->hdev_call, NULL);
692     event_notifier_cleanup(&vq->hdev_call);
693     g_free(vq);
694 }
695