1 #include "kvm/ioport.h"
2 #include "kvm/virtio.h"
3 #include "kvm/virtio-mmio.h"
4
5 #include <linux/virtio_mmio.h>
6
7 #define vmmio_selected_vq(vdev, vmmio) \
8 (vdev)->ops->get_vq((vmmio)->kvm, (vmmio)->dev, (vmmio)->hdr.queue_sel)
9
virtio_mmio_config_in(struct kvm_cpu * vcpu,u64 addr,void * data,u32 len,struct virtio_device * vdev)10 static void virtio_mmio_config_in(struct kvm_cpu *vcpu,
11 u64 addr, void *data, u32 len,
12 struct virtio_device *vdev)
13 {
14 struct virtio_mmio *vmmio = vdev->virtio;
15 struct virt_queue *vq;
16 u32 val = 0;
17
18 switch (addr) {
19 case VIRTIO_MMIO_MAGIC_VALUE:
20 case VIRTIO_MMIO_VERSION:
21 case VIRTIO_MMIO_DEVICE_ID:
22 case VIRTIO_MMIO_VENDOR_ID:
23 case VIRTIO_MMIO_STATUS:
24 case VIRTIO_MMIO_INTERRUPT_STATUS:
25 ioport__write32(data, *(u32 *)(((void *)&vmmio->hdr) + addr));
26 break;
27 case VIRTIO_MMIO_DEVICE_FEATURES:
28 if (vmmio->hdr.host_features_sel == 0)
29 val = vdev->ops->get_host_features(vmmio->kvm,
30 vmmio->dev);
31 ioport__write32(data, val);
32 break;
33 case VIRTIO_MMIO_QUEUE_PFN:
34 vq = vmmio_selected_vq(vdev, vmmio);
35 ioport__write32(data, vq->vring_addr.pfn);
36 break;
37 case VIRTIO_MMIO_QUEUE_NUM_MAX:
38 val = vdev->ops->get_size_vq(vmmio->kvm, vmmio->dev,
39 vmmio->hdr.queue_sel);
40 ioport__write32(data, val);
41 break;
42 default:
43 break;
44 }
45 }
46
virtio_mmio_config_out(struct kvm_cpu * vcpu,u64 addr,void * data,u32 len,struct virtio_device * vdev)47 static void virtio_mmio_config_out(struct kvm_cpu *vcpu,
48 u64 addr, void *data, u32 len,
49 struct virtio_device *vdev)
50 {
51 struct virtio_mmio *vmmio = vdev->virtio;
52 struct kvm *kvm = vmmio->kvm;
53 unsigned int vq_count = vdev->ops->get_vq_count(kvm, vmmio->dev);
54 struct virt_queue *vq;
55 u32 val = 0;
56
57 switch (addr) {
58 case VIRTIO_MMIO_DEVICE_FEATURES_SEL:
59 case VIRTIO_MMIO_DRIVER_FEATURES_SEL:
60 val = ioport__read32(data);
61 *(u32 *)(((void *)&vmmio->hdr) + addr) = val;
62 break;
63 case VIRTIO_MMIO_QUEUE_SEL:
64 val = ioport__read32(data);
65 if (val >= vq_count) {
66 WARN_ONCE(1, "QUEUE_SEL value (%u) is larger than VQ count (%u)\n",
67 val, vq_count);
68 break;
69 }
70 *(u32 *)(((void *)&vmmio->hdr) + addr) = val;
71 break;
72 case VIRTIO_MMIO_STATUS:
73 vmmio->hdr.status = ioport__read32(data);
74 if (!vmmio->hdr.status) /* Sample endianness on reset */
75 vdev->endian = kvm_cpu__get_endianness(vcpu);
76 virtio_notify_status(kvm, vdev, vmmio->dev, vmmio->hdr.status);
77 break;
78 case VIRTIO_MMIO_DRIVER_FEATURES:
79 if (vmmio->hdr.guest_features_sel == 0) {
80 val = ioport__read32(data);
81 virtio_set_guest_features(vmmio->kvm, vdev,
82 vmmio->dev, val);
83 }
84 break;
85 case VIRTIO_MMIO_GUEST_PAGE_SIZE:
86 val = ioport__read32(data);
87 vmmio->hdr.guest_page_size = val;
88 break;
89 case VIRTIO_MMIO_QUEUE_NUM:
90 val = ioport__read32(data);
91 vmmio->hdr.queue_num = val;
92 vdev->ops->set_size_vq(vmmio->kvm, vmmio->dev,
93 vmmio->hdr.queue_sel, val);
94 break;
95 case VIRTIO_MMIO_QUEUE_ALIGN:
96 val = ioport__read32(data);
97 vmmio->hdr.queue_align = val;
98 break;
99 case VIRTIO_MMIO_QUEUE_PFN:
100 val = ioport__read32(data);
101 if (val) {
102 vq = vmmio_selected_vq(vdev, vmmio);
103 vq->vring_addr = (struct vring_addr) {
104 .legacy = true,
105 .pfn = val,
106 .align = vmmio->hdr.queue_align,
107 .pgsize = vmmio->hdr.guest_page_size,
108 };
109 virtio_mmio_init_vq(kvm, vdev, vmmio->hdr.queue_sel);
110 } else {
111 virtio_mmio_exit_vq(kvm, vdev, vmmio->hdr.queue_sel);
112 }
113 break;
114 case VIRTIO_MMIO_QUEUE_NOTIFY:
115 val = ioport__read32(data);
116 if (val >= vq_count) {
117 WARN_ONCE(1, "QUEUE_NOTIFY value (%u) is larger than VQ count (%u)\n",
118 val, vq_count);
119 break;
120 }
121 vdev->ops->notify_vq(vmmio->kvm, vmmio->dev, val);
122 break;
123 case VIRTIO_MMIO_INTERRUPT_ACK:
124 val = ioport__read32(data);
125 vmmio->hdr.interrupt_state &= ~val;
126 break;
127 default:
128 break;
129 };
130 }
131
virtio_mmio_legacy_callback(struct kvm_cpu * vcpu,u64 addr,u8 * data,u32 len,u8 is_write,void * ptr)132 void virtio_mmio_legacy_callback(struct kvm_cpu *vcpu, u64 addr, u8 *data,
133 u32 len, u8 is_write, void *ptr)
134 {
135 struct virtio_device *vdev = ptr;
136 struct virtio_mmio *vmmio = vdev->virtio;
137 u32 offset = addr - vmmio->addr;
138
139 if (offset >= VIRTIO_MMIO_CONFIG) {
140 offset -= VIRTIO_MMIO_CONFIG;
141 virtio_access_config(vmmio->kvm, vdev, vmmio->dev, offset, data,
142 len, is_write);
143 return;
144 }
145
146 if (is_write)
147 virtio_mmio_config_out(vcpu, offset, data, len, ptr);
148 else
149 virtio_mmio_config_in(vcpu, offset, data, len, ptr);
150 }
151