1 #ifndef KVM__VIRTIO_H
2 #define KVM__VIRTIO_H
3
4 #include <endian.h>
5
6 #include <linux/virtio_ring.h>
7 #include <linux/virtio_pci.h>
8
9 #include <linux/types.h>
10 #include <linux/compiler.h>
11 #include <linux/virtio_config.h>
12 #include <sys/uio.h>
13
14 #include "kvm/barrier.h"
15 #include "kvm/kvm.h"
16
17 #define VIRTIO_IRQ_LOW 0
18 #define VIRTIO_IRQ_HIGH 1
19
20 #define VIRTIO_PCI_O_CONFIG 0
21 #define VIRTIO_PCI_O_MSIX 1
22
23 #define VIRTIO_ENDIAN_LE (1 << 0)
24 #define VIRTIO_ENDIAN_BE (1 << 1)
25
26 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
27 #define VIRTIO_ENDIAN_HOST VIRTIO_ENDIAN_LE
28 #else
29 #define VIRTIO_ENDIAN_HOST VIRTIO_ENDIAN_BE
30 #endif
31
32 /* Reserved status bits */
33 #define VIRTIO_CONFIG_S_MASK \
34 (VIRTIO_CONFIG_S_ACKNOWLEDGE | \
35 VIRTIO_CONFIG_S_DRIVER | \
36 VIRTIO_CONFIG_S_DRIVER_OK | \
37 VIRTIO_CONFIG_S_FEATURES_OK | \
38 VIRTIO_CONFIG_S_NEEDS_RESET | \
39 VIRTIO_CONFIG_S_FAILED)
40
41 /* Kvmtool status bits */
42 /* Start the device */
43 #define VIRTIO__STATUS_START (1 << 8)
44 /* Stop the device */
45 #define VIRTIO__STATUS_STOP (1 << 9)
46 /* Initialize the config */
47 #define VIRTIO__STATUS_CONFIG (1 << 10)
48
49 struct vring_addr {
50 bool legacy;
51 union {
52 /* Legacy description */
53 struct {
54 u32 pfn;
55 u32 align;
56 u32 pgsize;
57 };
58 /* Modern description */
59 struct {
60 u32 desc_lo;
61 u32 desc_hi;
62 u32 avail_lo;
63 u32 avail_hi;
64 u32 used_lo;
65 u32 used_hi;
66 };
67 };
68 };
69
70 struct virt_queue {
71 struct vring vring;
72 struct vring_addr vring_addr;
73 /* The last_avail_idx field is an index to ->ring of struct vring_avail.
74 It's where we assume the next request index is at. */
75 u16 last_avail_idx;
76 u16 last_used_signalled;
77 u16 endian;
78 bool use_event_idx;
79 bool enabled;
80 struct virtio_device *vdev;
81
82 /* vhost IRQ handling */
83 int gsi;
84 int irqfd;
85 int index;
86 };
87
88 /*
89 * The default policy is not to cope with the guest endianness.
90 * It also helps not breaking archs that do not care about supporting
91 * such a configuration.
92 */
93 #ifndef VIRTIO_RING_ENDIAN
94 #define VIRTIO_RING_ENDIAN VIRTIO_ENDIAN_HOST
95 #endif
96
97 #if VIRTIO_RING_ENDIAN != VIRTIO_ENDIAN_HOST
98
virtio_guest_to_host_u16(u16 endian,u16 val)99 static inline u16 virtio_guest_to_host_u16(u16 endian, u16 val)
100 {
101 return (endian == VIRTIO_ENDIAN_LE) ? le16toh(val) : be16toh(val);
102 }
103
virtio_host_to_guest_u16(u16 endian,u16 val)104 static inline u16 virtio_host_to_guest_u16(u16 endian, u16 val)
105 {
106 return (endian == VIRTIO_ENDIAN_LE) ? htole16(val) : htobe16(val);
107 }
108
virtio_guest_to_host_u32(u16 endian,u32 val)109 static inline u32 virtio_guest_to_host_u32(u16 endian, u32 val)
110 {
111 return (endian == VIRTIO_ENDIAN_LE) ? le32toh(val) : be32toh(val);
112 }
113
virtio_host_to_guest_u32(u16 endian,u32 val)114 static inline u32 virtio_host_to_guest_u32(u16 endian, u32 val)
115 {
116 return (endian == VIRTIO_ENDIAN_LE) ? htole32(val) : htobe32(val);
117 }
118
virtio_guest_to_host_u64(u16 endian,u64 val)119 static inline u64 virtio_guest_to_host_u64(u16 endian, u64 val)
120 {
121 return (endian == VIRTIO_ENDIAN_LE) ? le64toh(val) : be64toh(val);
122 }
123
virtio_host_to_guest_u64(u16 endian,u64 val)124 static inline u64 virtio_host_to_guest_u64(u16 endian, u64 val)
125 {
126 return (endian == VIRTIO_ENDIAN_LE) ? htole64(val) : htobe64(val);
127 }
128
129 #else
130
virtio_guest_to_host_u16(u16 endian,u16 value)131 static inline u16 virtio_guest_to_host_u16(u16 endian, u16 value)
132 {
133 return value;
134 }
virtio_host_to_guest_u16(u16 endian,u16 value)135 static inline u16 virtio_host_to_guest_u16(u16 endian, u16 value)
136 {
137 return value;
138 }
virtio_guest_to_host_u32(u16 endian,u32 value)139 static inline u32 virtio_guest_to_host_u32(u16 endian, u32 value)
140 {
141 return value;
142 }
virtio_host_to_guest_u32(u16 endian,u32 value)143 static inline u32 virtio_host_to_guest_u32(u16 endian, u32 value)
144 {
145 return value;
146 }
virtio_guest_to_host_u64(u16 endian,u64 value)147 static inline u64 virtio_guest_to_host_u64(u16 endian, u64 value)
148 {
149 return value;
150 }
virtio_host_to_guest_u64(u16 endian,u64 value)151 static inline u64 virtio_host_to_guest_u64(u16 endian, u64 value)
152 {
153 return value;
154 }
155
156 #endif
157
virt_queue__pop(struct virt_queue * queue)158 static inline u16 virt_queue__pop(struct virt_queue *queue)
159 {
160 __u16 guest_idx;
161
162 /*
163 * The guest updates the avail index after writing the ring entry.
164 * Ensure that we read the updated entry once virt_queue__available()
165 * observes the new index.
166 */
167 rmb();
168
169 guest_idx = queue->vring.avail->ring[queue->last_avail_idx++ % queue->vring.num];
170 return virtio_guest_to_host_u16(queue->endian, guest_idx);
171 }
172
virt_queue__get_desc(struct virt_queue * queue,u16 desc_ndx)173 static inline struct vring_desc *virt_queue__get_desc(struct virt_queue *queue, u16 desc_ndx)
174 {
175 return &queue->vring.desc[desc_ndx];
176 }
177
virt_queue__available(struct virt_queue * vq)178 static inline bool virt_queue__available(struct virt_queue *vq)
179 {
180 u16 last_avail_idx = virtio_host_to_guest_u16(vq->endian, vq->last_avail_idx);
181
182 if (!vq->vring.avail)
183 return 0;
184
185 if (vq->use_event_idx) {
186 vring_avail_event(&vq->vring) = last_avail_idx;
187 /*
188 * After the driver writes a new avail index, it reads the event
189 * index to see if we need any notification. Ensure that it
190 * reads the updated index, or else we'll miss the notification.
191 */
192 mb();
193 }
194
195 return vq->vring.avail->idx != last_avail_idx;
196 }
197
198 void virt_queue__used_idx_advance(struct virt_queue *queue, u16 jump);
199 struct vring_used_elem * virt_queue__set_used_elem_no_update(struct virt_queue *queue, u32 head, u32 len, u16 offset);
200 struct vring_used_elem *virt_queue__set_used_elem(struct virt_queue *queue, u32 head, u32 len);
201
202 bool virtio_queue__should_signal(struct virt_queue *vq);
203 u16 virt_queue__get_iov(struct virt_queue *vq, struct iovec iov[],
204 u16 *out, u16 *in, struct kvm *kvm);
205 u16 virt_queue__get_head_iov(struct virt_queue *vq, struct iovec iov[],
206 u16 *out, u16 *in, u16 head, struct kvm *kvm);
207 u16 virt_queue__get_inout_iov(struct kvm *kvm, struct virt_queue *queue,
208 struct iovec in_iov[], struct iovec out_iov[],
209 u16 *in, u16 *out);
210 int virtio__get_dev_specific_field(int offset, bool msix, u32 *config_off);
211
212 enum virtio_trans {
213 VIRTIO_PCI,
214 VIRTIO_PCI_LEGACY,
215 VIRTIO_MMIO,
216 VIRTIO_MMIO_LEGACY,
217 };
218
219 struct virtio_device {
220 bool legacy;
221 bool use_vhost;
222 void *virtio;
223 struct virtio_ops *ops;
224 u16 endian;
225 u64 features;
226 u32 status;
227 };
228
229 struct virtio_ops {
230 u8 *(*get_config)(struct kvm *kvm, void *dev);
231 size_t (*get_config_size)(struct kvm *kvm, void *dev);
232 u64 (*get_host_features)(struct kvm *kvm, void *dev);
233 unsigned int (*get_vq_count)(struct kvm *kvm, void *dev);
234 int (*init_vq)(struct kvm *kvm, void *dev, u32 vq);
235 void (*exit_vq)(struct kvm *kvm, void *dev, u32 vq);
236 int (*notify_vq)(struct kvm *kvm, void *dev, u32 vq);
237 struct virt_queue *(*get_vq)(struct kvm *kvm, void *dev, u32 vq);
238 int (*get_size_vq)(struct kvm *kvm, void *dev, u32 vq);
239 int (*set_size_vq)(struct kvm *kvm, void *dev, u32 vq, int size);
240 void (*notify_vq_gsi)(struct kvm *kvm, void *dev, u32 vq, u32 gsi);
241 void (*notify_vq_eventfd)(struct kvm *kvm, void *dev, u32 vq, u32 efd);
242 int (*signal_vq)(struct kvm *kvm, struct virtio_device *vdev, u32 queueid);
243 int (*signal_config)(struct kvm *kvm, struct virtio_device *vdev);
244 void (*notify_status)(struct kvm *kvm, void *dev, u32 status);
245 int (*init)(struct kvm *kvm, void *dev, struct virtio_device *vdev,
246 int device_id, int subsys_id, int class);
247 int (*exit)(struct kvm *kvm, struct virtio_device *vdev);
248 int (*reset)(struct kvm *kvm, struct virtio_device *vdev);
249 };
250
251 int __must_check virtio_init(struct kvm *kvm, void *dev, struct virtio_device *vdev,
252 struct virtio_ops *ops, enum virtio_trans trans,
253 int device_id, int subsys_id, int class);
254 void virtio_exit(struct kvm *kvm, struct virtio_device *vdev);
255 int virtio_compat_add_message(const char *device, const char *config);
256 const char* virtio_trans_name(enum virtio_trans trans);
257 void virtio_init_device_vq(struct kvm *kvm, struct virtio_device *vdev,
258 struct virt_queue *vq, size_t nr_descs);
259 void virtio_exit_vq(struct kvm *kvm, struct virtio_device *vdev, void *dev,
260 int num);
261 bool virtio_access_config(struct kvm *kvm, struct virtio_device *vdev, void *dev,
262 unsigned long offset, void *data, size_t size,
263 bool is_write);
264 void virtio_set_guest_features(struct kvm *kvm, struct virtio_device *vdev,
265 void *dev, u64 features);
266 void virtio_notify_status(struct kvm *kvm, struct virtio_device *vdev,
267 void *dev, u8 status);
268 void virtio_vhost_init(struct kvm *kvm, int vhost_fd);
269 void virtio_vhost_set_vring(struct kvm *kvm, int vhost_fd, u32 index,
270 struct virt_queue *queue);
271 void virtio_vhost_set_vring_kick(struct kvm *kvm, int vhost_fd,
272 u32 index, int event_fd);
273 void virtio_vhost_set_vring_irqfd(struct kvm *kvm, u32 gsi,
274 struct virt_queue *queue);
275 void virtio_vhost_reset_vring(struct kvm *kvm, int vhost_fd, u32 index,
276 struct virt_queue *queue);
277 int virtio_vhost_set_features(int vhost_fd, u64 features);
278
279 int virtio_transport_parser(const struct option *opt, const char *arg, int unset);
280
281 #endif /* KVM__VIRTIO_H */
282