1 #include <linux/virtio_ring.h> 2 #include <linux/types.h> 3 #include <sys/uio.h> 4 5 #include "kvm/barrier.h" 6 7 #include "kvm/kvm.h" 8 #include "kvm/virtio.h" 9 10 struct vring_used_elem *virt_queue__set_used_elem(struct virt_queue *queue, u32 head, u32 len) 11 { 12 struct vring_used_elem *used_elem; 13 14 used_elem = &queue->vring.used->ring[queue->vring.used->idx % queue->vring.num]; 15 used_elem->id = head; 16 used_elem->len = len; 17 18 /* 19 * Use wmb to assure that used elem was updated with head and len. 20 * We need a wmb here since we can't advance idx unless we're ready 21 * to pass the used element to the guest. 22 */ 23 wmb(); 24 queue->vring.used->idx++; 25 26 /* 27 * Use wmb to assure used idx has been increased before we signal the guest. 28 * Without a wmb here the guest may ignore the queue since it won't see 29 * an updated idx. 30 */ 31 wmb(); 32 33 return used_elem; 34 } 35 36 /* 37 * Each buffer in the virtqueues is actually a chain of descriptors. This 38 * function returns the next descriptor in the chain, or vq->vring.num if we're 39 * at the end. 40 */ 41 static unsigned next_desc(struct vring_desc *desc, 42 unsigned int i, unsigned int max) 43 { 44 unsigned int next; 45 46 /* If this descriptor says it doesn't chain, we're done. */ 47 if (!(desc[i].flags & VRING_DESC_F_NEXT)) 48 return max; 49 50 /* Check they're not leading us off end of descriptors. */ 51 next = desc[i].next; 52 /* Make sure compiler knows to grab that: we don't want it changing! */ 53 wmb(); 54 55 return next; 56 } 57 58 u16 virt_queue__get_head_iov(struct virt_queue *vq, struct iovec iov[], u16 *out, u16 *in, u16 head, struct kvm *kvm) 59 { 60 struct vring_desc *desc; 61 u16 idx; 62 u16 max; 63 64 idx = head; 65 *out = *in = 0; 66 max = vq->vring.num; 67 desc = vq->vring.desc; 68 69 if (desc[idx].flags & VRING_DESC_F_INDIRECT) { 70 max = desc[idx].len / sizeof(struct vring_desc); 71 desc = guest_flat_to_host(kvm, desc[idx].addr); 72 idx = 0; 73 } 74 75 do { 76 /* Grab the first descriptor, and check it's OK. */ 77 iov[*out + *in].iov_len = desc[idx].len; 78 iov[*out + *in].iov_base = guest_flat_to_host(kvm, desc[idx].addr); 79 /* If this is an input descriptor, increment that count. */ 80 if (desc[idx].flags & VRING_DESC_F_WRITE) 81 (*in)++; 82 else 83 (*out)++; 84 } while ((idx = next_desc(desc, idx, max)) != max); 85 86 return head; 87 } 88 89 u16 virt_queue__get_iov(struct virt_queue *vq, struct iovec iov[], u16 *out, u16 *in, struct kvm *kvm) 90 { 91 u16 head; 92 93 head = virt_queue__pop(vq); 94 95 return virt_queue__get_head_iov(vq, iov, out, in, head, kvm); 96 } 97 98 /* in and out are relative to guest */ 99 u16 virt_queue__get_inout_iov(struct kvm *kvm, struct virt_queue *queue, 100 struct iovec in_iov[], struct iovec out_iov[], 101 u16 *in, u16 *out) 102 { 103 struct vring_desc *desc; 104 u16 head, idx; 105 106 idx = head = virt_queue__pop(queue); 107 *out = *in = 0; 108 do { 109 desc = virt_queue__get_desc(queue, idx); 110 if (desc->flags & VRING_DESC_F_WRITE) { 111 in_iov[*in].iov_base = guest_flat_to_host(kvm, 112 desc->addr); 113 in_iov[*in].iov_len = desc->len; 114 (*in)++; 115 } else { 116 out_iov[*out].iov_base = guest_flat_to_host(kvm, 117 desc->addr); 118 out_iov[*out].iov_len = desc->len; 119 (*out)++; 120 } 121 if (desc->flags & VRING_DESC_F_NEXT) 122 idx = desc->next; 123 else 124 break; 125 } while (1); 126 127 return head; 128 } 129 130 int virtio__get_dev_specific_field(int offset, bool msix, u32 *config_off) 131 { 132 if (msix) { 133 if (offset < 4) 134 return VIRTIO_PCI_O_MSIX; 135 else 136 offset -= 4; 137 } 138 139 *config_off = offset; 140 141 return VIRTIO_PCI_O_CONFIG; 142 } 143 144 bool virtio_queue__should_signal(struct virt_queue *vq) 145 { 146 u16 old_idx, new_idx, event_idx; 147 148 old_idx = vq->last_used_signalled; 149 new_idx = vq->vring.used->idx; 150 event_idx = vring_used_event(&vq->vring); 151 152 if (vring_need_event(event_idx, new_idx, old_idx)) { 153 vq->last_used_signalled = new_idx; 154 return true; 155 } 156 157 return false; 158 } 159