139d6af07SAsias He #include <linux/virtio_ring.h> 23fdf659dSSasha Levin #include <linux/types.h> 339d6af07SAsias He #include <sys/uio.h> 42caa836dSIngo Molnar 52caa836dSIngo Molnar #include "kvm/barrier.h" 62caa836dSIngo Molnar 739d6af07SAsias He #include "kvm/kvm.h" 839d6af07SAsias He #include "kvm/virtio.h" 939d6af07SAsias He 103fdf659dSSasha Levin struct vring_used_elem *virt_queue__set_used_elem(struct virt_queue *queue, u32 head, u32 len) 1139d6af07SAsias He { 1239d6af07SAsias He struct vring_used_elem *used_elem; 13407475bfSPekka Enberg 1494902782SSasha Levin used_elem = &queue->vring.used->ring[queue->vring.used->idx % queue->vring.num]; 1539d6af07SAsias He used_elem->id = head; 1639d6af07SAsias He used_elem->len = len; 1794902782SSasha Levin 1894902782SSasha Levin /* 1994902782SSasha Levin * Use wmb to assure that used elem was updated with head and len. 2094902782SSasha Levin * We need a wmb here since we can't advance idx unless we're ready 2194902782SSasha Levin * to pass the used element to the guest. 2294902782SSasha Levin */ 2394902782SSasha Levin wmb(); 2494902782SSasha Levin queue->vring.used->idx++; 2594902782SSasha Levin 2694902782SSasha Levin /* 2794902782SSasha Levin * Use wmb to assure used idx has been increased before we signal the guest. 2894902782SSasha Levin * Without a wmb here the guest may ignore the queue since it won't see 2994902782SSasha Levin * an updated idx. 3094902782SSasha Levin */ 3194902782SSasha Levin wmb(); 3294902782SSasha Levin 3339d6af07SAsias He return used_elem; 3439d6af07SAsias He } 3539d6af07SAsias He 363fdf659dSSasha Levin u16 virt_queue__get_iov(struct virt_queue *queue, struct iovec iov[], u16 *out, u16 *in, struct kvm *kvm) 3739d6af07SAsias He { 3839d6af07SAsias He struct vring_desc *desc; 393fdf659dSSasha Levin u16 head, idx; 4039d6af07SAsias He 4139d6af07SAsias He idx = head = virt_queue__pop(queue); 4239d6af07SAsias He *out = *in = 0; 4339d6af07SAsias He 4439d6af07SAsias He do { 4539d6af07SAsias He desc = virt_queue__get_desc(queue, idx); 4639d6af07SAsias He iov[*out + *in].iov_base = guest_flat_to_host(kvm, desc->addr); 4739d6af07SAsias He iov[*out + *in].iov_len = desc->len; 4839d6af07SAsias He if (desc->flags & VRING_DESC_F_WRITE) 4939d6af07SAsias He (*in)++; 5039d6af07SAsias He else 5139d6af07SAsias He (*out)++; 5239d6af07SAsias He if (desc->flags & VRING_DESC_F_NEXT) 5339d6af07SAsias He idx = desc->next; 5439d6af07SAsias He else 5539d6af07SAsias He break; 5639d6af07SAsias He } while (1); 5739d6af07SAsias He 5839d6af07SAsias He return head; 5939d6af07SAsias He } 607f5ffaf5SAsias He 6108861bcfSAneesh Kumar K.V /* in and out are relative to guest */ 6208861bcfSAneesh Kumar K.V u16 virt_queue__get_inout_iov(struct kvm *kvm, struct virt_queue *queue, 6308861bcfSAneesh Kumar K.V struct iovec in_iov[], struct iovec out_iov[], 6408861bcfSAneesh Kumar K.V u16 *in, u16 *out) 6508861bcfSAneesh Kumar K.V { 6608861bcfSAneesh Kumar K.V u16 head, idx; 6708861bcfSAneesh Kumar K.V struct vring_desc *desc; 6808861bcfSAneesh Kumar K.V 6908861bcfSAneesh Kumar K.V idx = head = virt_queue__pop(queue); 7008861bcfSAneesh Kumar K.V *out = *in = 0; 7108861bcfSAneesh Kumar K.V do { 7208861bcfSAneesh Kumar K.V desc = virt_queue__get_desc(queue, idx); 7308861bcfSAneesh Kumar K.V if (desc->flags & VRING_DESC_F_WRITE) { 7408861bcfSAneesh Kumar K.V in_iov[*in].iov_base = guest_flat_to_host(kvm, 7508861bcfSAneesh Kumar K.V desc->addr); 7608861bcfSAneesh Kumar K.V in_iov[*in].iov_len = desc->len; 7708861bcfSAneesh Kumar K.V (*in)++; 7808861bcfSAneesh Kumar K.V } else { 7908861bcfSAneesh Kumar K.V out_iov[*out].iov_base = guest_flat_to_host(kvm, 8008861bcfSAneesh Kumar K.V desc->addr); 8108861bcfSAneesh Kumar K.V out_iov[*out].iov_len = desc->len; 8208861bcfSAneesh Kumar K.V (*out)++; 8308861bcfSAneesh Kumar K.V } 8408861bcfSAneesh Kumar K.V if (desc->flags & VRING_DESC_F_NEXT) 8508861bcfSAneesh Kumar K.V idx = desc->next; 8608861bcfSAneesh Kumar K.V else 8708861bcfSAneesh Kumar K.V break; 8808861bcfSAneesh Kumar K.V } while (1); 8908861bcfSAneesh Kumar K.V return head; 9008861bcfSAneesh Kumar K.V } 9108861bcfSAneesh Kumar K.V 9208861bcfSAneesh Kumar K.V 937f5ffaf5SAsias He void virt_queue__trigger_irq(struct virt_queue *vq, int irq, u8 *isr, struct kvm *kvm) 947f5ffaf5SAsias He { 959b6022b8SAsias He if (vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT) 969b6022b8SAsias He return; 979b6022b8SAsias He 987f5ffaf5SAsias He if (*isr == VIRTIO_IRQ_LOW) { 997f5ffaf5SAsias He *isr = VIRTIO_IRQ_HIGH; 1007f5ffaf5SAsias He kvm__irq_line(kvm, irq, VIRTIO_IRQ_HIGH); 1017f5ffaf5SAsias He } 1027f5ffaf5SAsias He } 103*c3a79fa1SSasha Levin 104*c3a79fa1SSasha Levin int virtio__get_dev_specific_field(int offset, bool msix, bool features_hi, u32 *config_off) 105*c3a79fa1SSasha Levin { 106*c3a79fa1SSasha Levin if (msix) { 107*c3a79fa1SSasha Levin if (offset < 4) 108*c3a79fa1SSasha Levin return VIRTIO_PCI_O_MSIX; 109*c3a79fa1SSasha Levin else 110*c3a79fa1SSasha Levin offset -= 4; 111*c3a79fa1SSasha Levin } 112*c3a79fa1SSasha Levin 113*c3a79fa1SSasha Levin if (features_hi) { 114*c3a79fa1SSasha Levin if (offset < 4) 115*c3a79fa1SSasha Levin return VIRTIO_PCI_O_FEATURES; 116*c3a79fa1SSasha Levin else 117*c3a79fa1SSasha Levin offset -= 4; 118*c3a79fa1SSasha Levin } 119*c3a79fa1SSasha Levin 120*c3a79fa1SSasha Levin *config_off = offset; 121*c3a79fa1SSasha Levin 122*c3a79fa1SSasha Levin return VIRTIO_PCI_O_CONFIG; 123*c3a79fa1SSasha Levin } 124