139d6af07SAsias He #include <linux/virtio_ring.h> 23fdf659dSSasha Levin #include <linux/types.h> 339d6af07SAsias He #include <sys/uio.h> 402eca50cSAsias He #include <stdlib.h> 52caa836dSIngo Molnar 652f34d2cSAsias He #include "kvm/guest_compat.h" 72caa836dSIngo Molnar #include "kvm/barrier.h" 839d6af07SAsias He #include "kvm/virtio.h" 902eca50cSAsias He #include "kvm/virtio-pci.h" 10755752d6SAsias He #include "kvm/virtio-mmio.h" 1102eca50cSAsias He #include "kvm/util.h" 1202eca50cSAsias He #include "kvm/kvm.h" 1302eca50cSAsias He 1439d6af07SAsias He 15dc7a55d6SSuzuki K. Poulose const char* virtio_trans_name(enum virtio_trans trans) 16dc7a55d6SSuzuki K. Poulose { 17dc7a55d6SSuzuki K. Poulose if (trans == VIRTIO_PCI) 18dc7a55d6SSuzuki K. Poulose return "pci"; 19dc7a55d6SSuzuki K. Poulose else if (trans == VIRTIO_MMIO) 20dc7a55d6SSuzuki K. Poulose return "mmio"; 21dc7a55d6SSuzuki K. Poulose return "unknown"; 22dc7a55d6SSuzuki K. Poulose } 23dc7a55d6SSuzuki K. Poulose 243fea89a9SWill Deacon void virt_queue__used_idx_advance(struct virt_queue *queue, u16 jump) 2539d6af07SAsias He { 26fb591944SMarc Zyngier u16 idx = virtio_guest_to_host_u16(queue, queue->vring.used->idx); 27407475bfSPekka Enberg 2894902782SSasha Levin /* 2994902782SSasha Levin * Use wmb to assure that used elem was updated with head and len. 3094902782SSasha Levin * We need a wmb here since we can't advance idx unless we're ready 3194902782SSasha Levin * to pass the used element to the guest. 3294902782SSasha Levin */ 3394902782SSasha Levin wmb(); 343fea89a9SWill Deacon idx += jump; 35fb591944SMarc Zyngier queue->vring.used->idx = virtio_host_to_guest_u16(queue, idx); 363fea89a9SWill Deacon } 373fea89a9SWill Deacon 383fea89a9SWill Deacon struct vring_used_elem * 393fea89a9SWill Deacon virt_queue__set_used_elem_no_update(struct virt_queue *queue, u32 head, 403fea89a9SWill Deacon u32 len, u16 offset) 413fea89a9SWill Deacon { 423fea89a9SWill Deacon struct vring_used_elem *used_elem; 433fea89a9SWill Deacon u16 idx = virtio_guest_to_host_u16(queue, queue->vring.used->idx); 443fea89a9SWill Deacon 453fea89a9SWill Deacon idx += offset; 463fea89a9SWill Deacon used_elem = &queue->vring.used->ring[idx % queue->vring.num]; 473fea89a9SWill Deacon used_elem->id = virtio_host_to_guest_u32(queue, head); 483fea89a9SWill Deacon used_elem->len = virtio_host_to_guest_u32(queue, len); 493fea89a9SWill Deacon 503fea89a9SWill Deacon return used_elem; 513fea89a9SWill Deacon } 523fea89a9SWill Deacon 533fea89a9SWill Deacon struct vring_used_elem *virt_queue__set_used_elem(struct virt_queue *queue, u32 head, u32 len) 543fea89a9SWill Deacon { 553fea89a9SWill Deacon struct vring_used_elem *used_elem; 563fea89a9SWill Deacon 573fea89a9SWill Deacon used_elem = virt_queue__set_used_elem_no_update(queue, head, len, 0); 583fea89a9SWill Deacon virt_queue__used_idx_advance(queue, 1); 5994902782SSasha Levin 6039d6af07SAsias He return used_elem; 6139d6af07SAsias He } 6239d6af07SAsias He 63fb591944SMarc Zyngier static inline bool virt_desc__test_flag(struct virt_queue *vq, 64fb591944SMarc Zyngier struct vring_desc *desc, u16 flag) 65fb591944SMarc Zyngier { 66fb591944SMarc Zyngier return !!(virtio_guest_to_host_u16(vq, desc->flags) & flag); 67fb591944SMarc Zyngier } 68fb591944SMarc Zyngier 69754c8ce3SSasha Levin /* 70754c8ce3SSasha Levin * Each buffer in the virtqueues is actually a chain of descriptors. This 71bbea6c7aSJean-Philippe Brucker * function returns the next descriptor in the chain, or max if we're at the 72bbea6c7aSJean-Philippe Brucker * end. 73754c8ce3SSasha Levin */ 74fb591944SMarc Zyngier static unsigned next_desc(struct virt_queue *vq, struct vring_desc *desc, 75754c8ce3SSasha Levin unsigned int i, unsigned int max) 76754c8ce3SSasha Levin { 77754c8ce3SSasha Levin unsigned int next; 78754c8ce3SSasha Levin 79754c8ce3SSasha Levin /* If this descriptor says it doesn't chain, we're done. */ 80fb591944SMarc Zyngier if (!virt_desc__test_flag(vq, &desc[i], VRING_DESC_F_NEXT)) 81754c8ce3SSasha Levin return max; 82754c8ce3SSasha Levin 83fb591944SMarc Zyngier next = virtio_guest_to_host_u16(vq, desc[i].next); 84754c8ce3SSasha Levin 85bbea6c7aSJean-Philippe Brucker /* Ensure they're not leading us off end of descriptors. */ 86bbea6c7aSJean-Philippe Brucker return min(next, max); 87754c8ce3SSasha Levin } 88754c8ce3SSasha Levin 892fddfdb5SAsias He u16 virt_queue__get_head_iov(struct virt_queue *vq, struct iovec iov[], u16 *out, u16 *in, u16 head, struct kvm *kvm) 9039d6af07SAsias He { 9139d6af07SAsias He struct vring_desc *desc; 922fddfdb5SAsias He u16 idx; 93754c8ce3SSasha Levin u16 max; 9439d6af07SAsias He 952fddfdb5SAsias He idx = head; 9639d6af07SAsias He *out = *in = 0; 97754c8ce3SSasha Levin max = vq->vring.num; 98754c8ce3SSasha Levin desc = vq->vring.desc; 99754c8ce3SSasha Levin 100fb591944SMarc Zyngier if (virt_desc__test_flag(vq, &desc[idx], VRING_DESC_F_INDIRECT)) { 101fb591944SMarc Zyngier max = virtio_guest_to_host_u32(vq, desc[idx].len) / sizeof(struct vring_desc); 102fb591944SMarc Zyngier desc = guest_flat_to_host(kvm, virtio_guest_to_host_u64(vq, desc[idx].addr)); 103754c8ce3SSasha Levin idx = 0; 104754c8ce3SSasha Levin } 10539d6af07SAsias He 10639d6af07SAsias He do { 107754c8ce3SSasha Levin /* Grab the first descriptor, and check it's OK. */ 108fb591944SMarc Zyngier iov[*out + *in].iov_len = virtio_guest_to_host_u32(vq, desc[idx].len); 109fb591944SMarc Zyngier iov[*out + *in].iov_base = guest_flat_to_host(kvm, 110fb591944SMarc Zyngier virtio_guest_to_host_u64(vq, desc[idx].addr)); 111754c8ce3SSasha Levin /* If this is an input descriptor, increment that count. */ 112fb591944SMarc Zyngier if (virt_desc__test_flag(vq, &desc[idx], VRING_DESC_F_WRITE)) 11339d6af07SAsias He (*in)++; 11439d6af07SAsias He else 11539d6af07SAsias He (*out)++; 116fb591944SMarc Zyngier } while ((idx = next_desc(vq, desc, idx, max)) != max); 11739d6af07SAsias He 11839d6af07SAsias He return head; 11939d6af07SAsias He } 1207f5ffaf5SAsias He 1212fddfdb5SAsias He u16 virt_queue__get_iov(struct virt_queue *vq, struct iovec iov[], u16 *out, u16 *in, struct kvm *kvm) 1222fddfdb5SAsias He { 1232fddfdb5SAsias He u16 head; 1242fddfdb5SAsias He 1252fddfdb5SAsias He head = virt_queue__pop(vq); 1262fddfdb5SAsias He 1272fddfdb5SAsias He return virt_queue__get_head_iov(vq, iov, out, in, head, kvm); 1282fddfdb5SAsias He } 1292fddfdb5SAsias He 13008861bcfSAneesh Kumar K.V /* in and out are relative to guest */ 13108861bcfSAneesh Kumar K.V u16 virt_queue__get_inout_iov(struct kvm *kvm, struct virt_queue *queue, 13208861bcfSAneesh Kumar K.V struct iovec in_iov[], struct iovec out_iov[], 13308861bcfSAneesh Kumar K.V u16 *in, u16 *out) 13408861bcfSAneesh Kumar K.V { 13508861bcfSAneesh Kumar K.V struct vring_desc *desc; 1362fddfdb5SAsias He u16 head, idx; 13708861bcfSAneesh Kumar K.V 13808861bcfSAneesh Kumar K.V idx = head = virt_queue__pop(queue); 13908861bcfSAneesh Kumar K.V *out = *in = 0; 14008861bcfSAneesh Kumar K.V do { 141fb591944SMarc Zyngier u64 addr; 14208861bcfSAneesh Kumar K.V desc = virt_queue__get_desc(queue, idx); 143fb591944SMarc Zyngier addr = virtio_guest_to_host_u64(queue, desc->addr); 144fb591944SMarc Zyngier if (virt_desc__test_flag(queue, desc, VRING_DESC_F_WRITE)) { 145fb591944SMarc Zyngier in_iov[*in].iov_base = guest_flat_to_host(kvm, addr); 146fb591944SMarc Zyngier in_iov[*in].iov_len = virtio_guest_to_host_u32(queue, desc->len); 14708861bcfSAneesh Kumar K.V (*in)++; 14808861bcfSAneesh Kumar K.V } else { 149fb591944SMarc Zyngier out_iov[*out].iov_base = guest_flat_to_host(kvm, addr); 150fb591944SMarc Zyngier out_iov[*out].iov_len = virtio_guest_to_host_u32(queue, desc->len); 15108861bcfSAneesh Kumar K.V (*out)++; 15208861bcfSAneesh Kumar K.V } 153fb591944SMarc Zyngier if (virt_desc__test_flag(queue, desc, VRING_DESC_F_NEXT)) 154fb591944SMarc Zyngier idx = virtio_guest_to_host_u16(queue, desc->next); 15508861bcfSAneesh Kumar K.V else 15608861bcfSAneesh Kumar K.V break; 15708861bcfSAneesh Kumar K.V } while (1); 1582fddfdb5SAsias He 15908861bcfSAneesh Kumar K.V return head; 16008861bcfSAneesh Kumar K.V } 16108861bcfSAneesh Kumar K.V 162fd41cde0SJean-Philippe Brucker void virtio_init_device_vq(struct kvm *kvm, struct virtio_device *vdev, 163609ee906SJean-Philippe Brucker struct virt_queue *vq, size_t nr_descs) 164fd41cde0SJean-Philippe Brucker { 165609ee906SJean-Philippe Brucker struct vring_addr *addr = &vq->vring_addr; 166fd41cde0SJean-Philippe Brucker 167fd41cde0SJean-Philippe Brucker vq->endian = vdev->endian; 168fd41cde0SJean-Philippe Brucker vq->use_event_idx = (vdev->features & VIRTIO_RING_F_EVENT_IDX); 169fd41cde0SJean-Philippe Brucker vq->enabled = true; 170fd41cde0SJean-Philippe Brucker 171609ee906SJean-Philippe Brucker if (addr->legacy) { 172609ee906SJean-Philippe Brucker unsigned long base = (u64)addr->pfn * addr->pgsize; 173609ee906SJean-Philippe Brucker void *p = guest_flat_to_host(kvm, base); 174609ee906SJean-Philippe Brucker 175609ee906SJean-Philippe Brucker vring_init(&vq->vring, nr_descs, p, addr->align); 176609ee906SJean-Philippe Brucker } else { 177609ee906SJean-Philippe Brucker u64 desc = (u64)addr->desc_hi << 32 | addr->desc_lo; 178609ee906SJean-Philippe Brucker u64 avail = (u64)addr->avail_hi << 32 | addr->avail_lo; 179609ee906SJean-Philippe Brucker u64 used = (u64)addr->used_hi << 32 | addr->used_lo; 180609ee906SJean-Philippe Brucker 181609ee906SJean-Philippe Brucker vq->vring = (struct vring) { 182609ee906SJean-Philippe Brucker .desc = guest_flat_to_host(kvm, desc), 183609ee906SJean-Philippe Brucker .used = guest_flat_to_host(kvm, used), 184609ee906SJean-Philippe Brucker .avail = guest_flat_to_host(kvm, avail), 185609ee906SJean-Philippe Brucker .num = nr_descs, 186609ee906SJean-Philippe Brucker }; 187609ee906SJean-Philippe Brucker } 188fd41cde0SJean-Philippe Brucker } 189fd41cde0SJean-Philippe Brucker 190ad346c2eSJean-Philippe Brucker void virtio_exit_vq(struct kvm *kvm, struct virtio_device *vdev, 191ad346c2eSJean-Philippe Brucker void *dev, int num) 192ad346c2eSJean-Philippe Brucker { 193ad346c2eSJean-Philippe Brucker struct virt_queue *vq = vdev->ops->get_vq(kvm, dev, num); 194ad346c2eSJean-Philippe Brucker 195ad346c2eSJean-Philippe Brucker if (vq->enabled && vdev->ops->exit_vq) 196ad346c2eSJean-Philippe Brucker vdev->ops->exit_vq(kvm, dev, num); 197ad346c2eSJean-Philippe Brucker memset(vq, 0, sizeof(*vq)); 198ad346c2eSJean-Philippe Brucker } 199ad346c2eSJean-Philippe Brucker 2001382aba0SSasha Levin int virtio__get_dev_specific_field(int offset, bool msix, u32 *config_off) 201c3a79fa1SSasha Levin { 202c3a79fa1SSasha Levin if (msix) { 203c3a79fa1SSasha Levin if (offset < 4) 204c3a79fa1SSasha Levin return VIRTIO_PCI_O_MSIX; 205c3a79fa1SSasha Levin else 206c3a79fa1SSasha Levin offset -= 4; 207c3a79fa1SSasha Levin } 208c3a79fa1SSasha Levin 209c3a79fa1SSasha Levin *config_off = offset; 210c3a79fa1SSasha Levin 211c3a79fa1SSasha Levin return VIRTIO_PCI_O_CONFIG; 212c3a79fa1SSasha Levin } 21351b1454fSAsias He 21451b1454fSAsias He bool virtio_queue__should_signal(struct virt_queue *vq) 21551b1454fSAsias He { 21651b1454fSAsias He u16 old_idx, new_idx, event_idx; 21751b1454fSAsias He 218d7d79bd5SAlexandru Elisei /* 219d7d79bd5SAlexandru Elisei * Use mb to assure used idx has been increased before we signal the 220d7d79bd5SAlexandru Elisei * guest, and we don't read a stale value for used_event. Without a mb 221d7d79bd5SAlexandru Elisei * here we might not send a notification that we need to send, or the 222d7d79bd5SAlexandru Elisei * guest may ignore the queue since it won't see an updated idx. 223d7d79bd5SAlexandru Elisei */ 224d7d79bd5SAlexandru Elisei mb(); 225d7d79bd5SAlexandru Elisei 226b7af514cSJean-Philippe Brucker if (!vq->use_event_idx) { 227b7af514cSJean-Philippe Brucker /* 228b7af514cSJean-Philippe Brucker * When VIRTIO_RING_F_EVENT_IDX isn't negotiated, interrupt the 229b7af514cSJean-Philippe Brucker * guest if it didn't explicitly request to be left alone. 230b7af514cSJean-Philippe Brucker */ 231b7af514cSJean-Philippe Brucker return !(virtio_guest_to_host_u16(vq, vq->vring.avail->flags) & 232b7af514cSJean-Philippe Brucker VRING_AVAIL_F_NO_INTERRUPT); 233b7af514cSJean-Philippe Brucker } 234b7af514cSJean-Philippe Brucker 23551b1454fSAsias He old_idx = vq->last_used_signalled; 236fb591944SMarc Zyngier new_idx = virtio_guest_to_host_u16(vq, vq->vring.used->idx); 237fb591944SMarc Zyngier event_idx = virtio_guest_to_host_u16(vq, vring_used_event(&vq->vring)); 23851b1454fSAsias He 23951b1454fSAsias He if (vring_need_event(event_idx, new_idx, old_idx)) { 24051b1454fSAsias He vq->last_used_signalled = new_idx; 24151b1454fSAsias He return true; 24251b1454fSAsias He } 24351b1454fSAsias He 24451b1454fSAsias He return false; 24551b1454fSAsias He } 24602eca50cSAsias He 24756a16c90SJean-Philippe Brucker void virtio_set_guest_features(struct kvm *kvm, struct virtio_device *vdev, 24856a16c90SJean-Philippe Brucker void *dev, u32 features) 24956a16c90SJean-Philippe Brucker { 25056a16c90SJean-Philippe Brucker /* TODO: fail negotiation if features & ~host_features */ 25156a16c90SJean-Philippe Brucker 25256a16c90SJean-Philippe Brucker vdev->features = features; 25356a16c90SJean-Philippe Brucker vdev->ops->set_guest_features(kvm, dev, features); 25456a16c90SJean-Philippe Brucker } 25556a16c90SJean-Philippe Brucker 25695242e44SJean-Philippe Brucker void virtio_notify_status(struct kvm *kvm, struct virtio_device *vdev, 25795242e44SJean-Philippe Brucker void *dev, u8 status) 25895242e44SJean-Philippe Brucker { 25995242e44SJean-Philippe Brucker u32 ext_status = status; 26095242e44SJean-Philippe Brucker 26195242e44SJean-Philippe Brucker vdev->status &= ~VIRTIO_CONFIG_S_MASK; 26295242e44SJean-Philippe Brucker vdev->status |= status; 26395242e44SJean-Philippe Brucker 26495242e44SJean-Philippe Brucker /* Add a few hints to help devices */ 26595242e44SJean-Philippe Brucker if ((status & VIRTIO_CONFIG_S_DRIVER_OK) && 26695242e44SJean-Philippe Brucker !(vdev->status & VIRTIO__STATUS_START)) { 26795242e44SJean-Philippe Brucker vdev->status |= VIRTIO__STATUS_START; 26895242e44SJean-Philippe Brucker ext_status |= VIRTIO__STATUS_START; 26995242e44SJean-Philippe Brucker 27095242e44SJean-Philippe Brucker } else if (!status && (vdev->status & VIRTIO__STATUS_START)) { 27195242e44SJean-Philippe Brucker vdev->status &= ~VIRTIO__STATUS_START; 27295242e44SJean-Philippe Brucker ext_status |= VIRTIO__STATUS_STOP; 273eb34a8c2SJean-Philippe Brucker 274eb34a8c2SJean-Philippe Brucker /* 275eb34a8c2SJean-Philippe Brucker * Reset virtqueues and stop all traffic now, so that the device 276eb34a8c2SJean-Philippe Brucker * can safely reset the backend in notify_status(). 277eb34a8c2SJean-Philippe Brucker */ 278eb34a8c2SJean-Philippe Brucker vdev->ops->reset(kvm, vdev); 27995242e44SJean-Philippe Brucker } 280*867b15ccSJean-Philippe Brucker if (!status) 281*867b15ccSJean-Philippe Brucker ext_status |= VIRTIO__STATUS_CONFIG; 28295242e44SJean-Philippe Brucker 28395242e44SJean-Philippe Brucker if (vdev->ops->notify_status) 28495242e44SJean-Philippe Brucker vdev->ops->notify_status(kvm, dev, ext_status); 28595242e44SJean-Philippe Brucker } 28695242e44SJean-Philippe Brucker 28715e6c4e7SJean-Philippe Brucker bool virtio_access_config(struct kvm *kvm, struct virtio_device *vdev, 28815e6c4e7SJean-Philippe Brucker void *dev, unsigned long offset, void *data, 28915e6c4e7SJean-Philippe Brucker size_t size, bool is_write) 29015e6c4e7SJean-Philippe Brucker { 29115e6c4e7SJean-Philippe Brucker void *in, *out, *config; 29215e6c4e7SJean-Philippe Brucker size_t config_size = vdev->ops->get_config_size(kvm, dev); 29315e6c4e7SJean-Philippe Brucker 29415e6c4e7SJean-Philippe Brucker if (WARN_ONCE(offset + size > config_size, 29515e6c4e7SJean-Philippe Brucker "Config access offset (%lu) is beyond config size (%zu)\n", 29615e6c4e7SJean-Philippe Brucker offset, config_size)) 29715e6c4e7SJean-Philippe Brucker return false; 29815e6c4e7SJean-Philippe Brucker 29915e6c4e7SJean-Philippe Brucker config = vdev->ops->get_config(kvm, dev) + offset; 30015e6c4e7SJean-Philippe Brucker 30115e6c4e7SJean-Philippe Brucker in = is_write ? data : config; 30215e6c4e7SJean-Philippe Brucker out = is_write ? config : data; 30315e6c4e7SJean-Philippe Brucker 30415e6c4e7SJean-Philippe Brucker switch (size) { 30515e6c4e7SJean-Philippe Brucker case 1: 30615e6c4e7SJean-Philippe Brucker *(u8 *)out = *(u8 *)in; 30715e6c4e7SJean-Philippe Brucker break; 30815e6c4e7SJean-Philippe Brucker case 2: 30915e6c4e7SJean-Philippe Brucker *(u16 *)out = *(u16 *)in; 31015e6c4e7SJean-Philippe Brucker break; 31115e6c4e7SJean-Philippe Brucker case 4: 31215e6c4e7SJean-Philippe Brucker *(u32 *)out = *(u32 *)in; 31315e6c4e7SJean-Philippe Brucker break; 31415e6c4e7SJean-Philippe Brucker case 8: 31515e6c4e7SJean-Philippe Brucker *(u64 *)out = *(u64 *)in; 31615e6c4e7SJean-Philippe Brucker break; 31715e6c4e7SJean-Philippe Brucker default: 31815e6c4e7SJean-Philippe Brucker WARN_ONCE(1, "%s: invalid access size\n", __func__); 31915e6c4e7SJean-Philippe Brucker return false; 32015e6c4e7SJean-Philippe Brucker } 32115e6c4e7SJean-Philippe Brucker 32215e6c4e7SJean-Philippe Brucker return true; 32315e6c4e7SJean-Philippe Brucker } 32415e6c4e7SJean-Philippe Brucker 32502eca50cSAsias He int virtio_init(struct kvm *kvm, void *dev, struct virtio_device *vdev, 32602eca50cSAsias He struct virtio_ops *ops, enum virtio_trans trans, 32702eca50cSAsias He int device_id, int subsys_id, int class) 32802eca50cSAsias He { 32902eca50cSAsias He void *virtio; 330db927775SAlexandru Elisei int r; 33102eca50cSAsias He 33202eca50cSAsias He switch (trans) { 33302eca50cSAsias He case VIRTIO_PCI: 33402eca50cSAsias He virtio = calloc(sizeof(struct virtio_pci), 1); 33502eca50cSAsias He if (!virtio) 33602eca50cSAsias He return -ENOMEM; 33702eca50cSAsias He vdev->virtio = virtio; 33802eca50cSAsias He vdev->ops = ops; 33902eca50cSAsias He vdev->ops->signal_vq = virtio_pci__signal_vq; 34002eca50cSAsias He vdev->ops->signal_config = virtio_pci__signal_config; 34102eca50cSAsias He vdev->ops->init = virtio_pci__init; 34202eca50cSAsias He vdev->ops->exit = virtio_pci__exit; 343eb34a8c2SJean-Philippe Brucker vdev->ops->reset = virtio_pci__reset; 344db927775SAlexandru Elisei r = vdev->ops->init(kvm, dev, vdev, device_id, subsys_id, class); 34502eca50cSAsias He break; 346755752d6SAsias He case VIRTIO_MMIO: 347755752d6SAsias He virtio = calloc(sizeof(struct virtio_mmio), 1); 348755752d6SAsias He if (!virtio) 349755752d6SAsias He return -ENOMEM; 350755752d6SAsias He vdev->virtio = virtio; 351755752d6SAsias He vdev->ops = ops; 352755752d6SAsias He vdev->ops->signal_vq = virtio_mmio_signal_vq; 353755752d6SAsias He vdev->ops->signal_config = virtio_mmio_signal_config; 354755752d6SAsias He vdev->ops->init = virtio_mmio_init; 355755752d6SAsias He vdev->ops->exit = virtio_mmio_exit; 356eb34a8c2SJean-Philippe Brucker vdev->ops->reset = virtio_mmio_reset; 357db927775SAlexandru Elisei r = vdev->ops->init(kvm, dev, vdev, device_id, subsys_id, class); 358755752d6SAsias He break; 35902eca50cSAsias He default: 360db927775SAlexandru Elisei r = -1; 36102eca50cSAsias He }; 36202eca50cSAsias He 363db927775SAlexandru Elisei return r; 36402eca50cSAsias He } 36552f34d2cSAsias He 36652f34d2cSAsias He int virtio_compat_add_message(const char *device, const char *config) 36752f34d2cSAsias He { 36852f34d2cSAsias He int len = 1024; 36952f34d2cSAsias He int compat_id; 37052f34d2cSAsias He char *title; 37152f34d2cSAsias He char *desc; 37252f34d2cSAsias He 37352f34d2cSAsias He title = malloc(len); 37452f34d2cSAsias He if (!title) 37552f34d2cSAsias He return -ENOMEM; 37652f34d2cSAsias He 37752f34d2cSAsias He desc = malloc(len); 37852f34d2cSAsias He if (!desc) { 37952f34d2cSAsias He free(title); 38052f34d2cSAsias He return -ENOMEM; 38152f34d2cSAsias He } 38252f34d2cSAsias He 38327cead0dSAsias He snprintf(title, len, "%s device was not detected.", device); 38452f34d2cSAsias He snprintf(desc, len, "While you have requested a %s device, " 38552f34d2cSAsias He "the guest kernel did not initialize it.\n" 38627cead0dSAsias He "\tPlease make sure that the guest kernel was " 38727cead0dSAsias He "compiled with %s=y enabled in .config.", 38852f34d2cSAsias He device, config); 38952f34d2cSAsias He 39052f34d2cSAsias He compat_id = compat__add_message(title, desc); 39152f34d2cSAsias He 39252f34d2cSAsias He free(desc); 39352f34d2cSAsias He free(title); 39452f34d2cSAsias He 39552f34d2cSAsias He return compat_id; 39652f34d2cSAsias He } 397