1 #include "kvm/virtio-pci.h" 2 3 #include "kvm/ioport.h" 4 #include "kvm/kvm.h" 5 #include "kvm/kvm-cpu.h" 6 #include "kvm/virtio-pci-dev.h" 7 #include "kvm/irq.h" 8 #include "kvm/virtio.h" 9 #include "kvm/ioeventfd.h" 10 11 #include <sys/ioctl.h> 12 #include <linux/virtio_pci.h> 13 #include <linux/byteorder.h> 14 #include <string.h> 15 16 static void virtio_pci__ioevent_callback(struct kvm *kvm, void *param) 17 { 18 struct virtio_pci_ioevent_param *ioeventfd = param; 19 struct virtio_pci *vpci = ioeventfd->vdev->virtio; 20 21 ioeventfd->vdev->ops->notify_vq(kvm, vpci->dev, ioeventfd->vq); 22 } 23 24 static int virtio_pci__init_ioeventfd(struct kvm *kvm, struct virtio_device *vdev, u32 vq) 25 { 26 struct ioevent ioevent; 27 struct virtio_pci *vpci = vdev->virtio; 28 int i, r, flags = 0; 29 int fds[2]; 30 31 vpci->ioeventfds[vq] = (struct virtio_pci_ioevent_param) { 32 .vdev = vdev, 33 .vq = vq, 34 }; 35 36 ioevent = (struct ioevent) { 37 .fn = virtio_pci__ioevent_callback, 38 .fn_ptr = &vpci->ioeventfds[vq], 39 .datamatch = vq, 40 .fn_kvm = kvm, 41 }; 42 43 /* 44 * Vhost will poll the eventfd in host kernel side, otherwise we 45 * need to poll in userspace. 46 */ 47 if (!vdev->use_vhost) 48 flags |= IOEVENTFD_FLAG_USER_POLL; 49 50 /* ioport */ 51 ioevent.io_addr = vpci->port_addr + VIRTIO_PCI_QUEUE_NOTIFY; 52 ioevent.io_len = sizeof(u16); 53 ioevent.fd = fds[0] = eventfd(0, 0); 54 r = ioeventfd__add_event(&ioevent, flags | IOEVENTFD_FLAG_PIO); 55 if (r) 56 return r; 57 58 /* mmio */ 59 ioevent.io_addr = vpci->mmio_addr + VIRTIO_PCI_QUEUE_NOTIFY; 60 ioevent.io_len = sizeof(u16); 61 ioevent.fd = fds[1] = eventfd(0, 0); 62 r = ioeventfd__add_event(&ioevent, flags); 63 if (r) 64 goto free_ioport_evt; 65 66 if (vdev->ops->notify_vq_eventfd) 67 for (i = 0; i < 2; ++i) 68 vdev->ops->notify_vq_eventfd(kvm, vpci->dev, vq, 69 fds[i]); 70 return 0; 71 72 free_ioport_evt: 73 ioeventfd__del_event(vpci->port_addr + VIRTIO_PCI_QUEUE_NOTIFY, vq); 74 return r; 75 } 76 77 static inline bool virtio_pci__msix_enabled(struct virtio_pci *vpci) 78 { 79 return vpci->pci_hdr.msix.ctrl & cpu_to_le16(PCI_MSIX_FLAGS_ENABLE); 80 } 81 82 static bool virtio_pci__specific_io_in(struct kvm *kvm, struct virtio_device *vdev, u16 port, 83 void *data, int size, int offset) 84 { 85 u32 config_offset; 86 struct virtio_pci *vpci = vdev->virtio; 87 int type = virtio__get_dev_specific_field(offset - 20, 88 virtio_pci__msix_enabled(vpci), 89 &config_offset); 90 if (type == VIRTIO_PCI_O_MSIX) { 91 switch (offset) { 92 case VIRTIO_MSI_CONFIG_VECTOR: 93 ioport__write16(data, vpci->config_vector); 94 break; 95 case VIRTIO_MSI_QUEUE_VECTOR: 96 ioport__write16(data, vpci->vq_vector[vpci->queue_selector]); 97 break; 98 }; 99 100 return true; 101 } else if (type == VIRTIO_PCI_O_CONFIG) { 102 u8 cfg; 103 104 cfg = vdev->ops->get_config(kvm, vpci->dev)[config_offset]; 105 ioport__write8(data, cfg); 106 return true; 107 } 108 109 return false; 110 } 111 112 static bool virtio_pci__io_in(struct ioport *ioport, struct kvm_cpu *vcpu, u16 port, void *data, int size) 113 { 114 unsigned long offset; 115 bool ret = true; 116 struct virtio_device *vdev; 117 struct virtio_pci *vpci; 118 struct kvm *kvm; 119 u32 val; 120 121 kvm = vcpu->kvm; 122 vdev = ioport->priv; 123 vpci = vdev->virtio; 124 offset = port - vpci->port_addr; 125 126 switch (offset) { 127 case VIRTIO_PCI_HOST_FEATURES: 128 val = vdev->ops->get_host_features(kvm, vpci->dev); 129 ioport__write32(data, val); 130 break; 131 case VIRTIO_PCI_QUEUE_PFN: 132 val = vdev->ops->get_pfn_vq(kvm, vpci->dev, vpci->queue_selector); 133 ioport__write32(data, val); 134 break; 135 case VIRTIO_PCI_QUEUE_NUM: 136 val = vdev->ops->get_size_vq(kvm, vpci->dev, vpci->queue_selector); 137 ioport__write16(data, val); 138 break; 139 case VIRTIO_PCI_STATUS: 140 ioport__write8(data, vpci->status); 141 break; 142 case VIRTIO_PCI_ISR: 143 ioport__write8(data, vpci->isr); 144 kvm__irq_line(kvm, vpci->legacy_irq_line, VIRTIO_IRQ_LOW); 145 vpci->isr = VIRTIO_IRQ_LOW; 146 break; 147 default: 148 ret = virtio_pci__specific_io_in(kvm, vdev, port, data, size, offset); 149 break; 150 }; 151 152 return ret; 153 } 154 155 static void update_msix_map(struct virtio_pci *vpci, 156 struct msix_table *msix_entry, u32 vecnum) 157 { 158 u32 gsi, i; 159 160 /* Find the GSI number used for that vector */ 161 if (vecnum == vpci->config_vector) { 162 gsi = vpci->config_gsi; 163 } else { 164 for (i = 0; i < VIRTIO_PCI_MAX_VQ; i++) 165 if (vpci->vq_vector[i] == vecnum) 166 break; 167 if (i == VIRTIO_PCI_MAX_VQ) 168 return; 169 gsi = vpci->gsis[i]; 170 } 171 172 if (gsi == 0) 173 return; 174 175 msix_entry = &msix_entry[vecnum]; 176 irq__update_msix_route(vpci->kvm, gsi, &msix_entry->msg); 177 } 178 179 static bool virtio_pci__specific_io_out(struct kvm *kvm, struct virtio_device *vdev, u16 port, 180 void *data, int size, int offset) 181 { 182 struct virtio_pci *vpci = vdev->virtio; 183 u32 config_offset, vec; 184 int gsi; 185 int type = virtio__get_dev_specific_field(offset - 20, virtio_pci__msix_enabled(vpci), 186 &config_offset); 187 if (type == VIRTIO_PCI_O_MSIX) { 188 switch (offset) { 189 case VIRTIO_MSI_CONFIG_VECTOR: 190 vec = vpci->config_vector = ioport__read16(data); 191 if (vec == VIRTIO_MSI_NO_VECTOR) 192 break; 193 194 gsi = irq__add_msix_route(kvm, 195 &vpci->msix_table[vec].msg); 196 if (gsi >= 0) 197 vpci->config_gsi = gsi; 198 break; 199 case VIRTIO_MSI_QUEUE_VECTOR: 200 vec = ioport__read16(data); 201 vpci->vq_vector[vpci->queue_selector] = vec; 202 203 if (vec == VIRTIO_MSI_NO_VECTOR) 204 break; 205 206 gsi = irq__add_msix_route(kvm, 207 &vpci->msix_table[vec].msg); 208 if (gsi < 0) 209 break; 210 vpci->gsis[vpci->queue_selector] = gsi; 211 if (vdev->ops->notify_vq_gsi) 212 vdev->ops->notify_vq_gsi(kvm, vpci->dev, 213 vpci->queue_selector, 214 gsi); 215 break; 216 }; 217 218 return true; 219 } else if (type == VIRTIO_PCI_O_CONFIG) { 220 vdev->ops->get_config(kvm, vpci->dev)[config_offset] = *(u8 *)data; 221 222 return true; 223 } 224 225 return false; 226 } 227 228 static bool virtio_pci__io_out(struct ioport *ioport, struct kvm_cpu *vcpu, u16 port, void *data, int size) 229 { 230 unsigned long offset; 231 bool ret = true; 232 struct virtio_device *vdev; 233 struct virtio_pci *vpci; 234 struct kvm *kvm; 235 u32 val; 236 237 kvm = vcpu->kvm; 238 vdev = ioport->priv; 239 vpci = vdev->virtio; 240 offset = port - vpci->port_addr; 241 242 switch (offset) { 243 case VIRTIO_PCI_GUEST_FEATURES: 244 val = ioport__read32(data); 245 vdev->ops->set_guest_features(kvm, vpci->dev, val); 246 break; 247 case VIRTIO_PCI_QUEUE_PFN: 248 val = ioport__read32(data); 249 virtio_pci__init_ioeventfd(kvm, vdev, vpci->queue_selector); 250 vdev->ops->init_vq(kvm, vpci->dev, vpci->queue_selector, 251 1 << VIRTIO_PCI_QUEUE_ADDR_SHIFT, 252 VIRTIO_PCI_VRING_ALIGN, val); 253 break; 254 case VIRTIO_PCI_QUEUE_SEL: 255 vpci->queue_selector = ioport__read16(data); 256 break; 257 case VIRTIO_PCI_QUEUE_NOTIFY: 258 val = ioport__read16(data); 259 vdev->ops->notify_vq(kvm, vpci->dev, val); 260 break; 261 case VIRTIO_PCI_STATUS: 262 vpci->status = ioport__read8(data); 263 if (!vpci->status) /* Sample endianness on reset */ 264 vdev->endian = kvm_cpu__get_endianness(vcpu); 265 if (vdev->ops->notify_status) 266 vdev->ops->notify_status(kvm, vpci->dev, vpci->status); 267 break; 268 default: 269 ret = virtio_pci__specific_io_out(kvm, vdev, port, data, size, offset); 270 break; 271 }; 272 273 return ret; 274 } 275 276 static struct ioport_operations virtio_pci__io_ops = { 277 .io_in = virtio_pci__io_in, 278 .io_out = virtio_pci__io_out, 279 }; 280 281 static void virtio_pci__msix_mmio_callback(struct kvm_cpu *vcpu, 282 u64 addr, u8 *data, u32 len, 283 u8 is_write, void *ptr) 284 { 285 struct virtio_pci *vpci = ptr; 286 struct msix_table *table; 287 int vecnum; 288 size_t offset; 289 290 if (addr > vpci->msix_io_block + PCI_IO_SIZE) { 291 if (is_write) 292 return; 293 table = (struct msix_table *)&vpci->msix_pba; 294 offset = addr - (vpci->msix_io_block + PCI_IO_SIZE); 295 } else { 296 table = vpci->msix_table; 297 offset = addr - vpci->msix_io_block; 298 } 299 vecnum = offset / sizeof(struct msix_table); 300 offset = offset % sizeof(struct msix_table); 301 302 if (!is_write) { 303 memcpy(data, (void *)&table[vecnum] + offset, len); 304 return; 305 } 306 307 memcpy((void *)&table[vecnum] + offset, data, len); 308 309 /* Did we just update the address or payload? */ 310 if (offset < offsetof(struct msix_table, ctrl)) 311 update_msix_map(vpci, table, vecnum); 312 } 313 314 static void virtio_pci__signal_msi(struct kvm *kvm, struct virtio_pci *vpci, int vec) 315 { 316 struct kvm_msi msi = { 317 .address_lo = vpci->msix_table[vec].msg.address_lo, 318 .address_hi = vpci->msix_table[vec].msg.address_hi, 319 .data = vpci->msix_table[vec].msg.data, 320 }; 321 322 ioctl(kvm->vm_fd, KVM_SIGNAL_MSI, &msi); 323 } 324 325 int virtio_pci__signal_vq(struct kvm *kvm, struct virtio_device *vdev, u32 vq) 326 { 327 struct virtio_pci *vpci = vdev->virtio; 328 int tbl = vpci->vq_vector[vq]; 329 330 if (virtio_pci__msix_enabled(vpci) && tbl != VIRTIO_MSI_NO_VECTOR) { 331 if (vpci->pci_hdr.msix.ctrl & cpu_to_le16(PCI_MSIX_FLAGS_MASKALL) || 332 vpci->msix_table[tbl].ctrl & cpu_to_le16(PCI_MSIX_ENTRY_CTRL_MASKBIT)) { 333 334 vpci->msix_pba |= 1 << tbl; 335 return 0; 336 } 337 338 if (vpci->features & VIRTIO_PCI_F_SIGNAL_MSI) 339 virtio_pci__signal_msi(kvm, vpci, vpci->vq_vector[vq]); 340 else 341 kvm__irq_trigger(kvm, vpci->gsis[vq]); 342 } else { 343 vpci->isr = VIRTIO_IRQ_HIGH; 344 kvm__irq_trigger(kvm, vpci->legacy_irq_line); 345 } 346 return 0; 347 } 348 349 int virtio_pci__signal_config(struct kvm *kvm, struct virtio_device *vdev) 350 { 351 struct virtio_pci *vpci = vdev->virtio; 352 int tbl = vpci->config_vector; 353 354 if (virtio_pci__msix_enabled(vpci) && tbl != VIRTIO_MSI_NO_VECTOR) { 355 if (vpci->pci_hdr.msix.ctrl & cpu_to_le16(PCI_MSIX_FLAGS_MASKALL) || 356 vpci->msix_table[tbl].ctrl & cpu_to_le16(PCI_MSIX_ENTRY_CTRL_MASKBIT)) { 357 358 vpci->msix_pba |= 1 << tbl; 359 return 0; 360 } 361 362 if (vpci->features & VIRTIO_PCI_F_SIGNAL_MSI) 363 virtio_pci__signal_msi(kvm, vpci, tbl); 364 else 365 kvm__irq_trigger(kvm, vpci->config_gsi); 366 } else { 367 vpci->isr = VIRTIO_PCI_ISR_CONFIG; 368 kvm__irq_trigger(kvm, vpci->legacy_irq_line); 369 } 370 371 return 0; 372 } 373 374 static void virtio_pci__io_mmio_callback(struct kvm_cpu *vcpu, 375 u64 addr, u8 *data, u32 len, 376 u8 is_write, void *ptr) 377 { 378 struct virtio_pci *vpci = ptr; 379 int direction = is_write ? KVM_EXIT_IO_OUT : KVM_EXIT_IO_IN; 380 u16 port = vpci->port_addr + (addr & (IOPORT_SIZE - 1)); 381 382 kvm__emulate_io(vcpu, port, data, direction, len, 1); 383 } 384 385 int virtio_pci__init(struct kvm *kvm, void *dev, struct virtio_device *vdev, 386 int device_id, int subsys_id, int class) 387 { 388 struct virtio_pci *vpci = vdev->virtio; 389 int r; 390 391 vpci->kvm = kvm; 392 vpci->dev = dev; 393 394 r = ioport__register(kvm, IOPORT_EMPTY, &virtio_pci__io_ops, IOPORT_SIZE, vdev); 395 if (r < 0) 396 return r; 397 vpci->port_addr = (u16)r; 398 399 vpci->mmio_addr = pci_get_io_space_block(IOPORT_SIZE); 400 r = kvm__register_mmio(kvm, vpci->mmio_addr, IOPORT_SIZE, false, 401 virtio_pci__io_mmio_callback, vpci); 402 if (r < 0) 403 goto free_ioport; 404 405 vpci->msix_io_block = pci_get_io_space_block(PCI_IO_SIZE * 2); 406 r = kvm__register_mmio(kvm, vpci->msix_io_block, PCI_IO_SIZE * 2, false, 407 virtio_pci__msix_mmio_callback, vpci); 408 if (r < 0) 409 goto free_mmio; 410 411 vpci->pci_hdr = (struct pci_device_header) { 412 .vendor_id = cpu_to_le16(PCI_VENDOR_ID_REDHAT_QUMRANET), 413 .device_id = cpu_to_le16(device_id), 414 .command = PCI_COMMAND_IO | PCI_COMMAND_MEMORY, 415 .header_type = PCI_HEADER_TYPE_NORMAL, 416 .revision_id = 0, 417 .class[0] = class & 0xff, 418 .class[1] = (class >> 8) & 0xff, 419 .class[2] = (class >> 16) & 0xff, 420 .subsys_vendor_id = cpu_to_le16(PCI_SUBSYSTEM_VENDOR_ID_REDHAT_QUMRANET), 421 .subsys_id = cpu_to_le16(subsys_id), 422 .bar[0] = cpu_to_le32(vpci->mmio_addr 423 | PCI_BASE_ADDRESS_SPACE_MEMORY), 424 .bar[1] = cpu_to_le32(vpci->port_addr 425 | PCI_BASE_ADDRESS_SPACE_IO), 426 .bar[2] = cpu_to_le32(vpci->msix_io_block 427 | PCI_BASE_ADDRESS_SPACE_MEMORY), 428 .status = cpu_to_le16(PCI_STATUS_CAP_LIST), 429 .capabilities = (void *)&vpci->pci_hdr.msix - (void *)&vpci->pci_hdr, 430 .bar_size[0] = cpu_to_le32(IOPORT_SIZE), 431 .bar_size[1] = cpu_to_le32(IOPORT_SIZE), 432 .bar_size[2] = cpu_to_le32(PCI_IO_SIZE*2), 433 }; 434 435 vpci->dev_hdr = (struct device_header) { 436 .bus_type = DEVICE_BUS_PCI, 437 .data = &vpci->pci_hdr, 438 }; 439 440 vpci->pci_hdr.msix.cap = PCI_CAP_ID_MSIX; 441 vpci->pci_hdr.msix.next = 0; 442 /* 443 * We at most have VIRTIO_PCI_MAX_VQ entries for virt queue, 444 * VIRTIO_PCI_MAX_CONFIG entries for config. 445 * 446 * To quote the PCI spec: 447 * 448 * System software reads this field to determine the 449 * MSI-X Table Size N, which is encoded as N-1. 450 * For example, a returned value of "00000000011" 451 * indicates a table size of 4. 452 */ 453 vpci->pci_hdr.msix.ctrl = cpu_to_le16(VIRTIO_PCI_MAX_VQ + VIRTIO_PCI_MAX_CONFIG - 1); 454 455 /* Both table and PBA are mapped to the same BAR (2) */ 456 vpci->pci_hdr.msix.table_offset = cpu_to_le32(2); 457 vpci->pci_hdr.msix.pba_offset = cpu_to_le32(2 | PCI_IO_SIZE); 458 vpci->config_vector = 0; 459 460 if (kvm__supports_extension(kvm, KVM_CAP_SIGNAL_MSI)) 461 vpci->features |= VIRTIO_PCI_F_SIGNAL_MSI; 462 463 r = device__register(&vpci->dev_hdr); 464 if (r < 0) 465 goto free_msix_mmio; 466 467 /* save the IRQ that device__register() has allocated */ 468 vpci->legacy_irq_line = vpci->pci_hdr.irq_line; 469 470 return 0; 471 472 free_msix_mmio: 473 kvm__deregister_mmio(kvm, vpci->msix_io_block); 474 free_mmio: 475 kvm__deregister_mmio(kvm, vpci->mmio_addr); 476 free_ioport: 477 ioport__unregister(kvm, vpci->port_addr); 478 return r; 479 } 480 481 int virtio_pci__exit(struct kvm *kvm, struct virtio_device *vdev) 482 { 483 struct virtio_pci *vpci = vdev->virtio; 484 int i; 485 486 kvm__deregister_mmio(kvm, vpci->mmio_addr); 487 kvm__deregister_mmio(kvm, vpci->msix_io_block); 488 ioport__unregister(kvm, vpci->port_addr); 489 490 for (i = 0; i < VIRTIO_PCI_MAX_VQ; i++) { 491 ioeventfd__del_event(vpci->port_addr + VIRTIO_PCI_QUEUE_NOTIFY, i); 492 ioeventfd__del_event(vpci->mmio_addr + VIRTIO_PCI_QUEUE_NOTIFY, i); 493 } 494 495 return 0; 496 } 497