1 #include "kvm/virtio-pci.h" 2 3 #include "kvm/ioport.h" 4 #include "kvm/kvm.h" 5 #include "kvm/virtio-pci-dev.h" 6 #include "kvm/irq.h" 7 #include "kvm/virtio.h" 8 #include "kvm/ioeventfd.h" 9 10 #include <sys/ioctl.h> 11 #include <linux/virtio_pci.h> 12 #include <linux/byteorder.h> 13 #include <string.h> 14 15 static void virtio_pci__ioevent_callback(struct kvm *kvm, void *param) 16 { 17 struct virtio_pci_ioevent_param *ioeventfd = param; 18 struct virtio_pci *vpci = ioeventfd->vdev->virtio; 19 20 ioeventfd->vdev->ops->notify_vq(kvm, vpci->dev, ioeventfd->vq); 21 } 22 23 static int virtio_pci__init_ioeventfd(struct kvm *kvm, struct virtio_device *vdev, u32 vq) 24 { 25 struct ioevent ioevent; 26 struct virtio_pci *vpci = vdev->virtio; 27 int r; 28 29 vpci->ioeventfds[vq] = (struct virtio_pci_ioevent_param) { 30 .vdev = vdev, 31 .vq = vq, 32 }; 33 34 ioevent = (struct ioevent) { 35 .io_addr = vpci->base_addr + VIRTIO_PCI_QUEUE_NOTIFY, 36 .io_len = sizeof(u16), 37 .fn = virtio_pci__ioevent_callback, 38 .fn_ptr = &vpci->ioeventfds[vq], 39 .datamatch = vq, 40 .fn_kvm = kvm, 41 .fd = eventfd(0, 0), 42 }; 43 44 if (vdev->use_vhost) 45 /* 46 * Vhost will poll the eventfd in host kernel side, 47 * no need to poll in userspace. 48 */ 49 r = ioeventfd__add_event(&ioevent, true, false); 50 else 51 /* Need to poll in userspace. */ 52 r = ioeventfd__add_event(&ioevent, true, true); 53 if (r) 54 return r; 55 56 if (vdev->ops->notify_vq_eventfd) 57 vdev->ops->notify_vq_eventfd(kvm, vpci->dev, vq, ioevent.fd); 58 59 return 0; 60 } 61 62 static inline bool virtio_pci__msix_enabled(struct virtio_pci *vpci) 63 { 64 return vpci->pci_hdr.msix.ctrl & cpu_to_le16(PCI_MSIX_FLAGS_ENABLE); 65 } 66 67 static bool virtio_pci__specific_io_in(struct kvm *kvm, struct virtio_device *vdev, u16 port, 68 void *data, int size, int offset) 69 { 70 u32 config_offset; 71 struct virtio_pci *vpci = vdev->virtio; 72 int type = virtio__get_dev_specific_field(offset - 20, 73 virtio_pci__msix_enabled(vpci), 74 &config_offset); 75 if (type == VIRTIO_PCI_O_MSIX) { 76 switch (offset) { 77 case VIRTIO_MSI_CONFIG_VECTOR: 78 ioport__write16(data, vpci->config_vector); 79 break; 80 case VIRTIO_MSI_QUEUE_VECTOR: 81 ioport__write16(data, vpci->vq_vector[vpci->queue_selector]); 82 break; 83 }; 84 85 return true; 86 } else if (type == VIRTIO_PCI_O_CONFIG) { 87 u8 cfg; 88 89 cfg = vdev->ops->get_config(kvm, vpci->dev)[config_offset]; 90 ioport__write8(data, cfg); 91 return true; 92 } 93 94 return false; 95 } 96 97 static bool virtio_pci__io_in(struct ioport *ioport, struct kvm *kvm, u16 port, void *data, int size) 98 { 99 unsigned long offset; 100 bool ret = true; 101 struct virtio_device *vdev; 102 struct virtio_pci *vpci; 103 u32 val; 104 105 vdev = ioport->priv; 106 vpci = vdev->virtio; 107 offset = port - vpci->base_addr; 108 109 switch (offset) { 110 case VIRTIO_PCI_HOST_FEATURES: 111 val = vdev->ops->get_host_features(kvm, vpci->dev); 112 ioport__write32(data, val); 113 break; 114 case VIRTIO_PCI_QUEUE_PFN: 115 val = vdev->ops->get_pfn_vq(kvm, vpci->dev, vpci->queue_selector); 116 ioport__write32(data, val); 117 break; 118 case VIRTIO_PCI_QUEUE_NUM: 119 val = vdev->ops->get_size_vq(kvm, vpci->dev, vpci->queue_selector); 120 ioport__write16(data, val); 121 break; 122 case VIRTIO_PCI_STATUS: 123 ioport__write8(data, vpci->status); 124 break; 125 case VIRTIO_PCI_ISR: 126 ioport__write8(data, vpci->isr); 127 kvm__irq_line(kvm, vpci->pci_hdr.irq_line, VIRTIO_IRQ_LOW); 128 vpci->isr = VIRTIO_IRQ_LOW; 129 break; 130 default: 131 ret = virtio_pci__specific_io_in(kvm, vdev, port, data, size, offset); 132 break; 133 }; 134 135 return ret; 136 } 137 138 static bool virtio_pci__specific_io_out(struct kvm *kvm, struct virtio_device *vdev, u16 port, 139 void *data, int size, int offset) 140 { 141 struct virtio_pci *vpci = vdev->virtio; 142 u32 config_offset, gsi, vec; 143 int type = virtio__get_dev_specific_field(offset - 20, virtio_pci__msix_enabled(vpci), 144 &config_offset); 145 if (type == VIRTIO_PCI_O_MSIX) { 146 switch (offset) { 147 case VIRTIO_MSI_CONFIG_VECTOR: 148 vec = vpci->config_vector = ioport__read16(data); 149 150 gsi = irq__add_msix_route(kvm, &vpci->msix_table[vec].msg); 151 152 vpci->config_gsi = gsi; 153 break; 154 case VIRTIO_MSI_QUEUE_VECTOR: 155 vec = vpci->vq_vector[vpci->queue_selector] = ioport__read16(data); 156 157 gsi = irq__add_msix_route(kvm, &vpci->msix_table[vec].msg); 158 vpci->gsis[vpci->queue_selector] = gsi; 159 if (vdev->ops->notify_vq_gsi) 160 vdev->ops->notify_vq_gsi(kvm, vpci->dev, 161 vpci->queue_selector, gsi); 162 break; 163 }; 164 165 return true; 166 } else if (type == VIRTIO_PCI_O_CONFIG) { 167 vdev->ops->get_config(kvm, vpci->dev)[config_offset] = *(u8 *)data; 168 169 return true; 170 } 171 172 return false; 173 } 174 175 static bool virtio_pci__io_out(struct ioport *ioport, struct kvm *kvm, u16 port, void *data, int size) 176 { 177 unsigned long offset; 178 bool ret = true; 179 struct virtio_device *vdev; 180 struct virtio_pci *vpci; 181 u32 val; 182 183 vdev = ioport->priv; 184 vpci = vdev->virtio; 185 offset = port - vpci->base_addr; 186 187 switch (offset) { 188 case VIRTIO_PCI_GUEST_FEATURES: 189 val = ioport__read32(data); 190 vdev->ops->set_guest_features(kvm, vpci->dev, val); 191 break; 192 case VIRTIO_PCI_QUEUE_PFN: 193 val = ioport__read32(data); 194 virtio_pci__init_ioeventfd(kvm, vdev, vpci->queue_selector); 195 vdev->ops->init_vq(kvm, vpci->dev, vpci->queue_selector, val); 196 break; 197 case VIRTIO_PCI_QUEUE_SEL: 198 vpci->queue_selector = ioport__read16(data); 199 break; 200 case VIRTIO_PCI_QUEUE_NOTIFY: 201 val = ioport__read16(data); 202 vdev->ops->notify_vq(kvm, vpci->dev, val); 203 break; 204 case VIRTIO_PCI_STATUS: 205 vpci->status = ioport__read8(data); 206 break; 207 default: 208 ret = virtio_pci__specific_io_out(kvm, vdev, port, data, size, offset); 209 break; 210 }; 211 212 return ret; 213 } 214 215 static struct ioport_operations virtio_pci__io_ops = { 216 .io_in = virtio_pci__io_in, 217 .io_out = virtio_pci__io_out, 218 }; 219 220 static void virtio_pci__mmio_callback(u64 addr, u8 *data, u32 len, u8 is_write, void *ptr) 221 { 222 struct virtio_pci *vpci = ptr; 223 void *table; 224 u32 offset; 225 226 if (addr > vpci->msix_io_block + PCI_IO_SIZE) { 227 table = &vpci->msix_pba; 228 offset = vpci->msix_io_block + PCI_IO_SIZE; 229 } else { 230 table = &vpci->msix_table; 231 offset = vpci->msix_io_block; 232 } 233 234 if (is_write) 235 memcpy(table + addr - offset, data, len); 236 else 237 memcpy(data, table + addr - offset, len); 238 } 239 240 static void virtio_pci__signal_msi(struct kvm *kvm, struct virtio_pci *vpci, int vec) 241 { 242 struct kvm_msi msi = { 243 .address_lo = vpci->msix_table[vec].msg.address_lo, 244 .address_hi = vpci->msix_table[vec].msg.address_hi, 245 .data = vpci->msix_table[vec].msg.data, 246 }; 247 248 ioctl(kvm->vm_fd, KVM_SIGNAL_MSI, &msi); 249 } 250 251 int virtio_pci__signal_vq(struct kvm *kvm, struct virtio_device *vdev, u32 vq) 252 { 253 struct virtio_pci *vpci = vdev->virtio; 254 int tbl = vpci->vq_vector[vq]; 255 256 if (virtio_pci__msix_enabled(vpci)) { 257 if (vpci->pci_hdr.msix.ctrl & cpu_to_le16(PCI_MSIX_FLAGS_MASKALL) || 258 vpci->msix_table[tbl].ctrl & cpu_to_le16(PCI_MSIX_ENTRY_CTRL_MASKBIT)) { 259 260 vpci->msix_pba |= 1 << tbl; 261 return 0; 262 } 263 264 if (vpci->features & VIRTIO_PCI_F_SIGNAL_MSI) 265 virtio_pci__signal_msi(kvm, vpci, vpci->vq_vector[vq]); 266 else 267 kvm__irq_trigger(kvm, vpci->gsis[vq]); 268 } else { 269 vpci->isr = VIRTIO_IRQ_HIGH; 270 kvm__irq_trigger(kvm, vpci->pci_hdr.irq_line); 271 } 272 return 0; 273 } 274 275 int virtio_pci__signal_config(struct kvm *kvm, struct virtio_device *vdev) 276 { 277 struct virtio_pci *vpci = vdev->virtio; 278 int tbl = vpci->config_vector; 279 280 if (virtio_pci__msix_enabled(vpci)) { 281 if (vpci->pci_hdr.msix.ctrl & cpu_to_le16(PCI_MSIX_FLAGS_MASKALL) || 282 vpci->msix_table[tbl].ctrl & cpu_to_le16(PCI_MSIX_ENTRY_CTRL_MASKBIT)) { 283 284 vpci->msix_pba |= 1 << tbl; 285 return 0; 286 } 287 288 if (vpci->features & VIRTIO_PCI_F_SIGNAL_MSI) 289 virtio_pci__signal_msi(kvm, vpci, vpci->config_vector); 290 else 291 kvm__irq_trigger(kvm, vpci->config_gsi); 292 } else { 293 vpci->isr = VIRTIO_PCI_ISR_CONFIG; 294 kvm__irq_trigger(kvm, vpci->pci_hdr.irq_line); 295 } 296 297 return 0; 298 } 299 300 int virtio_pci__init(struct kvm *kvm, void *dev, struct virtio_device *vdev, 301 int device_id, int subsys_id, int class) 302 { 303 struct virtio_pci *vpci = vdev->virtio; 304 u8 pin, line, ndev; 305 int r; 306 307 vpci->dev = dev; 308 vpci->msix_io_block = pci_get_io_space_block(PCI_IO_SIZE * 2); 309 310 r = ioport__register(kvm, IOPORT_EMPTY, &virtio_pci__io_ops, IOPORT_SIZE, vdev); 311 if (r < 0) 312 return r; 313 314 vpci->base_addr = (u16)r; 315 r = kvm__register_mmio(kvm, vpci->msix_io_block, PCI_IO_SIZE, false, 316 virtio_pci__mmio_callback, vpci); 317 if (r < 0) 318 goto free_ioport; 319 320 vpci->pci_hdr = (struct pci_device_header) { 321 .vendor_id = cpu_to_le16(PCI_VENDOR_ID_REDHAT_QUMRANET), 322 .device_id = cpu_to_le16(device_id), 323 .header_type = PCI_HEADER_TYPE_NORMAL, 324 .revision_id = 0, 325 .class[0] = class & 0xff, 326 .class[1] = (class >> 8) & 0xff, 327 .class[2] = (class >> 16) & 0xff, 328 .subsys_vendor_id = cpu_to_le16(PCI_SUBSYSTEM_VENDOR_ID_REDHAT_QUMRANET), 329 .subsys_id = cpu_to_le16(subsys_id), 330 .bar[0] = cpu_to_le32(vpci->base_addr 331 | PCI_BASE_ADDRESS_SPACE_IO), 332 .bar[1] = cpu_to_le32(vpci->msix_io_block 333 | PCI_BASE_ADDRESS_SPACE_MEMORY), 334 .status = cpu_to_le16(PCI_STATUS_CAP_LIST), 335 .capabilities = (void *)&vpci->pci_hdr.msix - (void *)&vpci->pci_hdr, 336 .bar_size[0] = IOPORT_SIZE, 337 .bar_size[1] = PCI_IO_SIZE, 338 .bar_size[3] = PCI_IO_SIZE, 339 }; 340 341 vpci->pci_hdr.msix.cap = PCI_CAP_ID_MSIX; 342 vpci->pci_hdr.msix.next = 0; 343 /* 344 * We at most have VIRTIO_PCI_MAX_VQ entries for virt queue, 345 * VIRTIO_PCI_MAX_CONFIG entries for config. 346 * 347 * To quote the PCI spec: 348 * 349 * System software reads this field to determine the 350 * MSI-X Table Size N, which is encoded as N-1. 351 * For example, a returned value of "00000000011" 352 * indicates a table size of 4. 353 */ 354 vpci->pci_hdr.msix.ctrl = cpu_to_le16(VIRTIO_PCI_MAX_VQ + VIRTIO_PCI_MAX_CONFIG - 1); 355 356 /* 357 * Both table and PBA could be mapped on the same BAR, but for now 358 * we're not in short of BARs 359 */ 360 vpci->pci_hdr.msix.table_offset = cpu_to_le32(1); /* Use BAR 1 */ 361 vpci->pci_hdr.msix.pba_offset = cpu_to_le32(1 | PCI_IO_SIZE); /* Use BAR 3 */ 362 vpci->config_vector = 0; 363 364 r = irq__register_device(subsys_id, &ndev, &pin, &line); 365 if (r < 0) 366 goto free_mmio; 367 368 if (kvm__supports_extension(kvm, KVM_CAP_SIGNAL_MSI)) 369 vpci->features |= VIRTIO_PCI_F_SIGNAL_MSI; 370 371 vpci->pci_hdr.irq_pin = pin; 372 vpci->pci_hdr.irq_line = line; 373 r = pci__register(&vpci->pci_hdr, ndev); 374 if (r < 0) 375 goto free_ioport; 376 377 return 0; 378 379 free_mmio: 380 kvm__deregister_mmio(kvm, vpci->msix_io_block); 381 free_ioport: 382 ioport__unregister(kvm, vpci->base_addr); 383 return r; 384 } 385 386 int virtio_pci__exit(struct kvm *kvm, struct virtio_device *vdev) 387 { 388 struct virtio_pci *vpci = vdev->virtio; 389 int i; 390 391 kvm__deregister_mmio(kvm, vpci->msix_io_block); 392 ioport__unregister(kvm, vpci->base_addr); 393 394 for (i = 0; i < VIRTIO_PCI_MAX_VQ; i++) 395 ioeventfd__del_event(vpci->base_addr + VIRTIO_PCI_QUEUE_NOTIFY, i); 396 397 return 0; 398 } 399