1 #include <linux/virtio_ring.h> 2 #include <linux/types.h> 3 #include <sys/uio.h> 4 #include <stdlib.h> 5 6 #include "kvm/guest_compat.h" 7 #include "kvm/barrier.h" 8 #include "kvm/virtio.h" 9 #include "kvm/virtio-pci.h" 10 #include "kvm/virtio-mmio.h" 11 #include "kvm/util.h" 12 #include "kvm/kvm.h" 13 14 15 struct vring_used_elem *virt_queue__set_used_elem(struct virt_queue *queue, u32 head, u32 len) 16 { 17 struct vring_used_elem *used_elem; 18 19 used_elem = &queue->vring.used->ring[queue->vring.used->idx % queue->vring.num]; 20 used_elem->id = head; 21 used_elem->len = len; 22 23 /* 24 * Use wmb to assure that used elem was updated with head and len. 25 * We need a wmb here since we can't advance idx unless we're ready 26 * to pass the used element to the guest. 27 */ 28 wmb(); 29 queue->vring.used->idx++; 30 31 /* 32 * Use wmb to assure used idx has been increased before we signal the guest. 33 * Without a wmb here the guest may ignore the queue since it won't see 34 * an updated idx. 35 */ 36 wmb(); 37 38 return used_elem; 39 } 40 41 /* 42 * Each buffer in the virtqueues is actually a chain of descriptors. This 43 * function returns the next descriptor in the chain, or vq->vring.num if we're 44 * at the end. 45 */ 46 static unsigned next_desc(struct vring_desc *desc, 47 unsigned int i, unsigned int max) 48 { 49 unsigned int next; 50 51 /* If this descriptor says it doesn't chain, we're done. */ 52 if (!(desc[i].flags & VRING_DESC_F_NEXT)) 53 return max; 54 55 /* Check they're not leading us off end of descriptors. */ 56 next = desc[i].next; 57 /* Make sure compiler knows to grab that: we don't want it changing! */ 58 wmb(); 59 60 return next; 61 } 62 63 u16 virt_queue__get_head_iov(struct virt_queue *vq, struct iovec iov[], u16 *out, u16 *in, u16 head, struct kvm *kvm) 64 { 65 struct vring_desc *desc; 66 u16 idx; 67 u16 max; 68 69 idx = head; 70 *out = *in = 0; 71 max = vq->vring.num; 72 desc = vq->vring.desc; 73 74 if (desc[idx].flags & VRING_DESC_F_INDIRECT) { 75 max = desc[idx].len / sizeof(struct vring_desc); 76 desc = guest_flat_to_host(kvm, desc[idx].addr); 77 idx = 0; 78 } 79 80 do { 81 /* Grab the first descriptor, and check it's OK. */ 82 iov[*out + *in].iov_len = desc[idx].len; 83 iov[*out + *in].iov_base = guest_flat_to_host(kvm, desc[idx].addr); 84 /* If this is an input descriptor, increment that count. */ 85 if (desc[idx].flags & VRING_DESC_F_WRITE) 86 (*in)++; 87 else 88 (*out)++; 89 } while ((idx = next_desc(desc, idx, max)) != max); 90 91 return head; 92 } 93 94 u16 virt_queue__get_iov(struct virt_queue *vq, struct iovec iov[], u16 *out, u16 *in, struct kvm *kvm) 95 { 96 u16 head; 97 98 head = virt_queue__pop(vq); 99 100 return virt_queue__get_head_iov(vq, iov, out, in, head, kvm); 101 } 102 103 /* in and out are relative to guest */ 104 u16 virt_queue__get_inout_iov(struct kvm *kvm, struct virt_queue *queue, 105 struct iovec in_iov[], struct iovec out_iov[], 106 u16 *in, u16 *out) 107 { 108 struct vring_desc *desc; 109 u16 head, idx; 110 111 idx = head = virt_queue__pop(queue); 112 *out = *in = 0; 113 do { 114 desc = virt_queue__get_desc(queue, idx); 115 if (desc->flags & VRING_DESC_F_WRITE) { 116 in_iov[*in].iov_base = guest_flat_to_host(kvm, 117 desc->addr); 118 in_iov[*in].iov_len = desc->len; 119 (*in)++; 120 } else { 121 out_iov[*out].iov_base = guest_flat_to_host(kvm, 122 desc->addr); 123 out_iov[*out].iov_len = desc->len; 124 (*out)++; 125 } 126 if (desc->flags & VRING_DESC_F_NEXT) 127 idx = desc->next; 128 else 129 break; 130 } while (1); 131 132 return head; 133 } 134 135 int virtio__get_dev_specific_field(int offset, bool msix, u32 *config_off) 136 { 137 if (msix) { 138 if (offset < 4) 139 return VIRTIO_PCI_O_MSIX; 140 else 141 offset -= 4; 142 } 143 144 *config_off = offset; 145 146 return VIRTIO_PCI_O_CONFIG; 147 } 148 149 bool virtio_queue__should_signal(struct virt_queue *vq) 150 { 151 u16 old_idx, new_idx, event_idx; 152 153 old_idx = vq->last_used_signalled; 154 new_idx = vq->vring.used->idx; 155 event_idx = vring_used_event(&vq->vring); 156 157 if (vring_need_event(event_idx, new_idx, old_idx)) { 158 vq->last_used_signalled = new_idx; 159 return true; 160 } 161 162 return false; 163 } 164 165 int virtio_init(struct kvm *kvm, void *dev, struct virtio_device *vdev, 166 struct virtio_ops *ops, enum virtio_trans trans, 167 int device_id, int subsys_id, int class) 168 { 169 void *virtio; 170 171 switch (trans) { 172 case VIRTIO_PCI: 173 virtio = calloc(sizeof(struct virtio_pci), 1); 174 if (!virtio) 175 return -ENOMEM; 176 vdev->virtio = virtio; 177 vdev->ops = ops; 178 vdev->ops->signal_vq = virtio_pci__signal_vq; 179 vdev->ops->signal_config = virtio_pci__signal_config; 180 vdev->ops->init = virtio_pci__init; 181 vdev->ops->exit = virtio_pci__exit; 182 vdev->ops->init(kvm, dev, vdev, device_id, subsys_id, class); 183 break; 184 case VIRTIO_MMIO: 185 virtio = calloc(sizeof(struct virtio_mmio), 1); 186 if (!virtio) 187 return -ENOMEM; 188 vdev->virtio = virtio; 189 vdev->ops = ops; 190 vdev->ops->signal_vq = virtio_mmio_signal_vq; 191 vdev->ops->signal_config = virtio_mmio_signal_config; 192 vdev->ops->init = virtio_mmio_init; 193 vdev->ops->exit = virtio_mmio_exit; 194 vdev->ops->init(kvm, dev, vdev, device_id, subsys_id, class); 195 break; 196 default: 197 return -1; 198 }; 199 200 return 0; 201 } 202 203 int virtio_compat_add_message(const char *device, const char *config) 204 { 205 int len = 1024; 206 int compat_id; 207 char *title; 208 char *desc; 209 210 title = malloc(len); 211 if (!title) 212 return -ENOMEM; 213 214 desc = malloc(len); 215 if (!desc) { 216 free(title); 217 return -ENOMEM; 218 } 219 220 snprintf(title, len, "%s device was not detected.", device); 221 snprintf(desc, len, "While you have requested a %s device, " 222 "the guest kernel did not initialize it.\n" 223 "\tPlease make sure that the guest kernel was " 224 "compiled with %s=y enabled in .config.", 225 device, config); 226 227 compat_id = compat__add_message(title, desc); 228 229 free(desc); 230 free(title); 231 232 return compat_id; 233 } 234