xref: /qemu/include/hw/virtio/vhost-vdpa.h (revision 6322b753f798337835e205b6d805356bea582c86)
1 /*
2  * vhost-vdpa.h
3  *
4  * Copyright(c) 2017-2018 Intel Corporation.
5  * Copyright(c) 2020 Red Hat, Inc.
6  *
7  * This work is licensed under the terms of the GNU GPL, version 2 or later.
8  * See the COPYING file in the top-level directory.
9  *
10  */
11 
12 #ifndef HW_VIRTIO_VHOST_VDPA_H
13 #define HW_VIRTIO_VHOST_VDPA_H
14 
15 #include <gmodule.h>
16 
17 #include "hw/virtio/vhost-iova-tree.h"
18 #include "hw/virtio/vhost-shadow-virtqueue.h"
19 #include "hw/virtio/virtio.h"
20 #include "standard-headers/linux/vhost_types.h"
21 
22 /*
23  * ASID dedicated to map guest's addresses.  If SVQ is disabled it maps GPA to
24  * qemu's IOVA.  If SVQ is enabled it maps also the SVQ vring here
25  */
26 #define VHOST_VDPA_GUEST_PA_ASID 0
27 
28 typedef struct VhostVDPAHostNotifier {
29     MemoryRegion mr;
30     void *addr;
31 } VhostVDPAHostNotifier;
32 
33 typedef enum SVQTransitionState {
34     SVQ_TSTATE_DISABLING = -1,
35     SVQ_TSTATE_DONE,
36     SVQ_TSTATE_ENABLING
37 } SVQTransitionState;
38 
39 /* Info shared by all vhost_vdpa device models */
40 typedef struct vhost_vdpa_shared {
41     int device_fd;
42     MemoryListener listener;
43     struct vhost_vdpa_iova_range iova_range;
44     QLIST_HEAD(, vdpa_iommu) iommu_list;
45 
46     /*
47      * IOVA mapping used by the Shadow Virtqueue
48      *
49      * It is shared among all ASID for simplicity, whether CVQ shares ASID with
50      * guest or not:
51      * - Memory listener need access to guest's memory addresses allocated in
52      *   the IOVA tree.
53      * - There should be plenty of IOVA address space for both ASID not to
54      *   worry about collisions between them.  Guest's translations are still
55      *   validated with virtio virtqueue_pop so there is no risk for the guest
56      *   to access memory that it shouldn't.
57      *
58      * To allocate a iova tree per ASID is doable but it complicates the code
59      * and it is not worth it for the moment.
60      */
61     VhostIOVATree *iova_tree;
62 
63     /* Copy of backend features */
64     uint64_t backend_cap;
65 
66     bool iotlb_batch_begin_sent;
67 
68     /*
69      * The memory listener has been registered, so DMA maps have been sent to
70      * the device.
71      */
72     bool listener_registered;
73 
74     /* Vdpa must send shadow addresses as IOTLB key for data queues, not GPA */
75     bool shadow_data;
76 
77     /* SVQ switching is in progress, or already completed? */
78     SVQTransitionState svq_switching;
79 } VhostVDPAShared;
80 
81 typedef struct vhost_vdpa {
82     int index;
83     uint32_t address_space_id;
84     uint64_t acked_features;
85     bool shadow_vqs_enabled;
86     /* Device suspended successfully */
87     bool suspended;
88     VhostVDPAShared *shared;
89     GPtrArray *shadow_vqs;
90     const VhostShadowVirtqueueOps *shadow_vq_ops;
91     void *shadow_vq_ops_opaque;
92     struct vhost_dev *dev;
93     Error *migration_blocker;
94     VhostVDPAHostNotifier notifier[VIRTIO_QUEUE_MAX];
95     IOMMUNotifier n;
96 } VhostVDPA;
97 
98 int vhost_vdpa_get_iova_range(int fd, struct vhost_vdpa_iova_range *iova_range);
99 int vhost_vdpa_set_vring_ready(struct vhost_vdpa *v, unsigned idx);
100 
101 int vhost_vdpa_dma_map(VhostVDPAShared *s, uint32_t asid, hwaddr iova,
102                        hwaddr size, void *vaddr, bool readonly);
103 int vhost_vdpa_dma_unmap(VhostVDPAShared *s, uint32_t asid, hwaddr iova,
104                          hwaddr size);
105 
106 typedef struct vdpa_iommu {
107     VhostVDPAShared *dev_shared;
108     IOMMUMemoryRegion *iommu_mr;
109     hwaddr iommu_offset;
110     IOMMUNotifier n;
111     QLIST_ENTRY(vdpa_iommu) iommu_next;
112 } VDPAIOMMUState;
113 
114 
115 #endif
116