139d6af07SAsias He #ifndef KVM__VIRTIO_H 239d6af07SAsias He #define KVM__VIRTIO_H 339d6af07SAsias He 439d6af07SAsias He #include <linux/virtio_ring.h> 5b8f43678SSasha Levin #include <linux/virtio_pci.h> 639d6af07SAsias He 73fdf659dSSasha Levin #include <linux/types.h> 839d6af07SAsias He #include <sys/uio.h> 939d6af07SAsias He 1039d6af07SAsias He #include "kvm/kvm.h" 1139d6af07SAsias He 127f5ffaf5SAsias He #define VIRTIO_IRQ_LOW 0 137f5ffaf5SAsias He #define VIRTIO_IRQ_HIGH 1 147f5ffaf5SAsias He 15c3a79fa1SSasha Levin #define VIRTIO_PCI_O_CONFIG 0 16c3a79fa1SSasha Levin #define VIRTIO_PCI_O_MSIX 1 17c3a79fa1SSasha Levin #define VIRTIO_PCI_O_FEATURES 2 18c3a79fa1SSasha Levin 1939d6af07SAsias He struct virt_queue { 2039d6af07SAsias He struct vring vring; 213fdf659dSSasha Levin u32 pfn; 2239d6af07SAsias He /* The last_avail_idx field is an index to ->ring of struct vring_avail. 2339d6af07SAsias He It's where we assume the next request index is at. */ 243fdf659dSSasha Levin u16 last_avail_idx; 2539d6af07SAsias He }; 2639d6af07SAsias He 273fdf659dSSasha Levin static inline u16 virt_queue__pop(struct virt_queue *queue) 2839d6af07SAsias He { 2939d6af07SAsias He return queue->vring.avail->ring[queue->last_avail_idx++ % queue->vring.num]; 3039d6af07SAsias He } 3139d6af07SAsias He 323fdf659dSSasha Levin static inline struct vring_desc *virt_queue__get_desc(struct virt_queue *queue, u16 desc_ndx) 3339d6af07SAsias He { 3439d6af07SAsias He return &queue->vring.desc[desc_ndx]; 3539d6af07SAsias He } 3639d6af07SAsias He 3739d6af07SAsias He static inline bool virt_queue__available(struct virt_queue *vq) 3839d6af07SAsias He { 39412aa73fSAmos Kong if (!vq->vring.avail) 40412aa73fSAmos Kong return 0; 41*24a6fb44SSasha Levin 42*24a6fb44SSasha Levin vring_avail_event(&vq->vring) = vq->last_avail_idx; 4339d6af07SAsias He return vq->vring.avail->idx != vq->last_avail_idx; 4439d6af07SAsias He } 4539d6af07SAsias He 46aaf0b445SSasha Levin /* 47aaf0b445SSasha Levin * Warning: on 32-bit hosts, shifting pfn left may cause a truncation of pfn values 48aaf0b445SSasha Levin * higher than 4GB - thus, pointing to the wrong area in guest virtual memory space 49aaf0b445SSasha Levin * and breaking the virt queue which owns this pfn. 50aaf0b445SSasha Levin */ 51aaf0b445SSasha Levin static inline void *guest_pfn_to_host(struct kvm *kvm, u32 pfn) 52aaf0b445SSasha Levin { 53b8f43678SSasha Levin return guest_flat_to_host(kvm, (unsigned long)pfn << VIRTIO_PCI_QUEUE_ADDR_SHIFT); 54aaf0b445SSasha Levin } 55aaf0b445SSasha Levin 56*24a6fb44SSasha Levin static inline int virtio_queue__should_signal(struct virt_queue *vq) 57*24a6fb44SSasha Levin { 58*24a6fb44SSasha Levin return vring_used_event(&vq->vring) <= vq->vring.used->idx; 59*24a6fb44SSasha Levin } 60*24a6fb44SSasha Levin 613fdf659dSSasha Levin struct vring_used_elem *virt_queue__set_used_elem(struct virt_queue *queue, u32 head, u32 len); 6239d6af07SAsias He 633fdf659dSSasha Levin u16 virt_queue__get_iov(struct virt_queue *queue, struct iovec iov[], u16 *out, u16 *in, struct kvm *kvm); 6408861bcfSAneesh Kumar K.V u16 virt_queue__get_inout_iov(struct kvm *kvm, struct virt_queue *queue, 6508861bcfSAneesh Kumar K.V struct iovec in_iov[], struct iovec out_iov[], 6608861bcfSAneesh Kumar K.V u16 *in, u16 *out); 67c3a79fa1SSasha Levin int virtio__get_dev_specific_field(int offset, bool msix, bool features_hi, u32 *config_off); 68c3a79fa1SSasha Levin 6939d6af07SAsias He #endif /* KVM__VIRTIO_H */ 70