1 /* 2 * QEMU PCI bus manager 3 * 4 * Copyright (c) 2004 Fabrice Bellard 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 25 #include "qemu/osdep.h" 26 #include "qemu/datadir.h" 27 #include "qemu/units.h" 28 #include "hw/irq.h" 29 #include "hw/pci/pci.h" 30 #include "hw/pci/pci_bridge.h" 31 #include "hw/pci/pci_bus.h" 32 #include "hw/pci/pci_host.h" 33 #include "hw/qdev-properties.h" 34 #include "hw/qdev-properties-system.h" 35 #include "migration/qemu-file-types.h" 36 #include "migration/vmstate.h" 37 #include "net/net.h" 38 #include "system/numa.h" 39 #include "system/runstate.h" 40 #include "system/system.h" 41 #include "hw/loader.h" 42 #include "qemu/error-report.h" 43 #include "qemu/range.h" 44 #include "trace.h" 45 #include "hw/pci/msi.h" 46 #include "hw/pci/msix.h" 47 #include "hw/hotplug.h" 48 #include "hw/boards.h" 49 #include "hw/nvram/fw_cfg.h" 50 #include "qapi/error.h" 51 #include "qemu/cutils.h" 52 #include "pci-internal.h" 53 54 #include "hw/xen/xen.h" 55 #include "hw/i386/kvm/xen_evtchn.h" 56 57 //#define DEBUG_PCI 58 #ifdef DEBUG_PCI 59 # define PCI_DPRINTF(format, ...) printf(format, ## __VA_ARGS__) 60 #else 61 # define PCI_DPRINTF(format, ...) do { } while (0) 62 #endif 63 64 bool pci_available = true; 65 66 static char *pcibus_get_dev_path(DeviceState *dev); 67 static char *pcibus_get_fw_dev_path(DeviceState *dev); 68 static void pcibus_reset_hold(Object *obj, ResetType type); 69 static bool pcie_has_upstream_port(PCIDevice *dev); 70 71 static void prop_pci_busnr_get(Object *obj, Visitor *v, const char *name, 72 void *opaque, Error **errp) 73 { 74 uint8_t busnr = pci_dev_bus_num(PCI_DEVICE(obj)); 75 76 visit_type_uint8(v, name, &busnr, errp); 77 } 78 79 static const PropertyInfo prop_pci_busnr = { 80 .name = "busnr", 81 .get = prop_pci_busnr_get, 82 }; 83 84 static const Property pci_props[] = { 85 DEFINE_PROP_PCI_DEVFN("addr", PCIDevice, devfn, -1), 86 DEFINE_PROP_STRING("romfile", PCIDevice, romfile), 87 DEFINE_PROP_UINT32("romsize", PCIDevice, romsize, UINT32_MAX), 88 DEFINE_PROP_INT32("rombar", PCIDevice, rom_bar, -1), 89 DEFINE_PROP_BIT("multifunction", PCIDevice, cap_present, 90 QEMU_PCI_CAP_MULTIFUNCTION_BITNR, false), 91 DEFINE_PROP_BIT("x-pcie-lnksta-dllla", PCIDevice, cap_present, 92 QEMU_PCIE_LNKSTA_DLLLA_BITNR, true), 93 DEFINE_PROP_BIT("x-pcie-extcap-init", PCIDevice, cap_present, 94 QEMU_PCIE_EXTCAP_INIT_BITNR, true), 95 DEFINE_PROP_STRING("failover_pair_id", PCIDevice, 96 failover_pair_id), 97 DEFINE_PROP_UINT32("acpi-index", PCIDevice, acpi_index, 0), 98 DEFINE_PROP_BIT("x-pcie-err-unc-mask", PCIDevice, cap_present, 99 QEMU_PCIE_ERR_UNC_MASK_BITNR, true), 100 DEFINE_PROP_BIT("x-pcie-ari-nextfn-1", PCIDevice, cap_present, 101 QEMU_PCIE_ARI_NEXTFN_1_BITNR, false), 102 DEFINE_PROP_SIZE32("x-max-bounce-buffer-size", PCIDevice, 103 max_bounce_buffer_size, DEFAULT_MAX_BOUNCE_BUFFER_SIZE), 104 DEFINE_PROP_BIT("x-pcie-ext-tag", PCIDevice, cap_present, 105 QEMU_PCIE_EXT_TAG_BITNR, true), 106 { .name = "busnr", .info = &prop_pci_busnr }, 107 }; 108 109 static const VMStateDescription vmstate_pcibus = { 110 .name = "PCIBUS", 111 .version_id = 1, 112 .minimum_version_id = 1, 113 .fields = (const VMStateField[]) { 114 VMSTATE_INT32_EQUAL(nirq, PCIBus, NULL), 115 VMSTATE_VARRAY_INT32(irq_count, PCIBus, 116 nirq, 0, vmstate_info_int32, 117 int32_t), 118 VMSTATE_END_OF_LIST() 119 } 120 }; 121 122 static gint g_cmp_uint32(gconstpointer a, gconstpointer b, gpointer user_data) 123 { 124 return a - b; 125 } 126 127 static GSequence *pci_acpi_index_list(void) 128 { 129 static GSequence *used_acpi_index_list; 130 131 if (!used_acpi_index_list) { 132 used_acpi_index_list = g_sequence_new(NULL); 133 } 134 return used_acpi_index_list; 135 } 136 137 static void pci_init_bus_master(PCIDevice *pci_dev) 138 { 139 AddressSpace *dma_as = pci_device_iommu_address_space(pci_dev); 140 141 memory_region_init_alias(&pci_dev->bus_master_enable_region, 142 OBJECT(pci_dev), "bus master", 143 dma_as->root, 0, memory_region_size(dma_as->root)); 144 memory_region_set_enabled(&pci_dev->bus_master_enable_region, false); 145 memory_region_add_subregion(&pci_dev->bus_master_container_region, 0, 146 &pci_dev->bus_master_enable_region); 147 } 148 149 static void pcibus_machine_done(Notifier *notifier, void *data) 150 { 151 PCIBus *bus = container_of(notifier, PCIBus, machine_done); 152 int i; 153 154 for (i = 0; i < ARRAY_SIZE(bus->devices); ++i) { 155 if (bus->devices[i]) { 156 pci_init_bus_master(bus->devices[i]); 157 } 158 } 159 } 160 161 static void pci_bus_realize(BusState *qbus, Error **errp) 162 { 163 PCIBus *bus = PCI_BUS(qbus); 164 165 bus->machine_done.notify = pcibus_machine_done; 166 qemu_add_machine_init_done_notifier(&bus->machine_done); 167 168 vmstate_register_any(NULL, &vmstate_pcibus, bus); 169 } 170 171 static void pcie_bus_realize(BusState *qbus, Error **errp) 172 { 173 PCIBus *bus = PCI_BUS(qbus); 174 Error *local_err = NULL; 175 176 pci_bus_realize(qbus, &local_err); 177 if (local_err) { 178 error_propagate(errp, local_err); 179 return; 180 } 181 182 /* 183 * A PCI-E bus can support extended config space if it's the root 184 * bus, or if the bus/bridge above it does as well 185 */ 186 if (pci_bus_is_root(bus)) { 187 bus->flags |= PCI_BUS_EXTENDED_CONFIG_SPACE; 188 } else { 189 PCIBus *parent_bus = pci_get_bus(bus->parent_dev); 190 191 if (pci_bus_allows_extended_config_space(parent_bus)) { 192 bus->flags |= PCI_BUS_EXTENDED_CONFIG_SPACE; 193 } 194 } 195 } 196 197 static void pci_bus_unrealize(BusState *qbus) 198 { 199 PCIBus *bus = PCI_BUS(qbus); 200 201 qemu_remove_machine_init_done_notifier(&bus->machine_done); 202 203 vmstate_unregister(NULL, &vmstate_pcibus, bus); 204 } 205 206 static int pcibus_num(PCIBus *bus) 207 { 208 if (pci_bus_is_root(bus)) { 209 return 0; /* pci host bridge */ 210 } 211 return bus->parent_dev->config[PCI_SECONDARY_BUS]; 212 } 213 214 static uint16_t pcibus_numa_node(PCIBus *bus) 215 { 216 return NUMA_NODE_UNASSIGNED; 217 } 218 219 bool pci_bus_add_fw_cfg_extra_pci_roots(FWCfgState *fw_cfg, 220 PCIBus *bus, 221 Error **errp) 222 { 223 Object *obj; 224 225 if (!bus) { 226 return true; 227 } 228 obj = OBJECT(bus); 229 230 return fw_cfg_add_file_from_generator(fw_cfg, obj->parent, 231 object_get_canonical_path_component(obj), 232 "etc/extra-pci-roots", errp); 233 } 234 235 static GByteArray *pci_bus_fw_cfg_gen_data(Object *obj, Error **errp) 236 { 237 PCIBus *bus = PCI_BUS(obj); 238 GByteArray *byte_array; 239 uint64_t extra_hosts = 0; 240 241 if (!bus) { 242 return NULL; 243 } 244 245 QLIST_FOREACH(bus, &bus->child, sibling) { 246 /* look for expander root buses */ 247 if (pci_bus_is_root(bus)) { 248 extra_hosts++; 249 } 250 } 251 252 if (!extra_hosts) { 253 return NULL; 254 } 255 extra_hosts = cpu_to_le64(extra_hosts); 256 257 byte_array = g_byte_array_new(); 258 g_byte_array_append(byte_array, 259 (const void *)&extra_hosts, sizeof(extra_hosts)); 260 261 return byte_array; 262 } 263 264 static void pci_bus_class_init(ObjectClass *klass, void *data) 265 { 266 BusClass *k = BUS_CLASS(klass); 267 PCIBusClass *pbc = PCI_BUS_CLASS(klass); 268 ResettableClass *rc = RESETTABLE_CLASS(klass); 269 FWCfgDataGeneratorClass *fwgc = FW_CFG_DATA_GENERATOR_CLASS(klass); 270 271 k->print_dev = pcibus_dev_print; 272 k->get_dev_path = pcibus_get_dev_path; 273 k->get_fw_dev_path = pcibus_get_fw_dev_path; 274 k->realize = pci_bus_realize; 275 k->unrealize = pci_bus_unrealize; 276 277 rc->phases.hold = pcibus_reset_hold; 278 279 pbc->bus_num = pcibus_num; 280 pbc->numa_node = pcibus_numa_node; 281 282 fwgc->get_data = pci_bus_fw_cfg_gen_data; 283 } 284 285 static const TypeInfo pci_bus_info = { 286 .name = TYPE_PCI_BUS, 287 .parent = TYPE_BUS, 288 .instance_size = sizeof(PCIBus), 289 .class_size = sizeof(PCIBusClass), 290 .class_init = pci_bus_class_init, 291 .interfaces = (InterfaceInfo[]) { 292 { TYPE_FW_CFG_DATA_GENERATOR_INTERFACE }, 293 { } 294 } 295 }; 296 297 static const TypeInfo cxl_interface_info = { 298 .name = INTERFACE_CXL_DEVICE, 299 .parent = TYPE_INTERFACE, 300 }; 301 302 static const TypeInfo pcie_interface_info = { 303 .name = INTERFACE_PCIE_DEVICE, 304 .parent = TYPE_INTERFACE, 305 }; 306 307 static const TypeInfo conventional_pci_interface_info = { 308 .name = INTERFACE_CONVENTIONAL_PCI_DEVICE, 309 .parent = TYPE_INTERFACE, 310 }; 311 312 static void pcie_bus_class_init(ObjectClass *klass, void *data) 313 { 314 BusClass *k = BUS_CLASS(klass); 315 316 k->realize = pcie_bus_realize; 317 } 318 319 static const TypeInfo pcie_bus_info = { 320 .name = TYPE_PCIE_BUS, 321 .parent = TYPE_PCI_BUS, 322 .class_init = pcie_bus_class_init, 323 }; 324 325 static const TypeInfo cxl_bus_info = { 326 .name = TYPE_CXL_BUS, 327 .parent = TYPE_PCIE_BUS, 328 .class_init = pcie_bus_class_init, 329 }; 330 331 static void pci_update_mappings(PCIDevice *d); 332 static void pci_irq_handler(void *opaque, int irq_num, int level); 333 static void pci_add_option_rom(PCIDevice *pdev, bool is_default_rom, Error **); 334 static void pci_del_option_rom(PCIDevice *pdev); 335 336 static uint16_t pci_default_sub_vendor_id = PCI_SUBVENDOR_ID_REDHAT_QUMRANET; 337 static uint16_t pci_default_sub_device_id = PCI_SUBDEVICE_ID_QEMU; 338 339 PCIHostStateList pci_host_bridges; 340 341 int pci_bar(PCIDevice *d, int reg) 342 { 343 uint8_t type; 344 345 /* PCIe virtual functions do not have their own BARs */ 346 assert(!pci_is_vf(d)); 347 348 if (reg != PCI_ROM_SLOT) 349 return PCI_BASE_ADDRESS_0 + reg * 4; 350 351 type = d->config[PCI_HEADER_TYPE] & ~PCI_HEADER_TYPE_MULTI_FUNCTION; 352 return type == PCI_HEADER_TYPE_BRIDGE ? PCI_ROM_ADDRESS1 : PCI_ROM_ADDRESS; 353 } 354 355 static inline int pci_irq_state(PCIDevice *d, int irq_num) 356 { 357 return (d->irq_state >> irq_num) & 0x1; 358 } 359 360 static inline void pci_set_irq_state(PCIDevice *d, int irq_num, int level) 361 { 362 d->irq_state &= ~(0x1 << irq_num); 363 d->irq_state |= level << irq_num; 364 } 365 366 static void pci_bus_change_irq_level(PCIBus *bus, int irq_num, int change) 367 { 368 assert(irq_num >= 0); 369 assert(irq_num < bus->nirq); 370 bus->irq_count[irq_num] += change; 371 bus->set_irq(bus->irq_opaque, irq_num, bus->irq_count[irq_num] != 0); 372 } 373 374 static void pci_change_irq_level(PCIDevice *pci_dev, int irq_num, int change) 375 { 376 PCIBus *bus; 377 for (;;) { 378 int dev_irq = irq_num; 379 bus = pci_get_bus(pci_dev); 380 assert(bus->map_irq); 381 irq_num = bus->map_irq(pci_dev, irq_num); 382 trace_pci_route_irq(dev_irq, DEVICE(pci_dev)->canonical_path, irq_num, 383 pci_bus_is_root(bus) ? "root-complex" 384 : DEVICE(bus->parent_dev)->canonical_path); 385 if (bus->set_irq) 386 break; 387 pci_dev = bus->parent_dev; 388 } 389 pci_bus_change_irq_level(bus, irq_num, change); 390 } 391 392 int pci_bus_get_irq_level(PCIBus *bus, int irq_num) 393 { 394 assert(irq_num >= 0); 395 assert(irq_num < bus->nirq); 396 return !!bus->irq_count[irq_num]; 397 } 398 399 /* Update interrupt status bit in config space on interrupt 400 * state change. */ 401 static void pci_update_irq_status(PCIDevice *dev) 402 { 403 if (dev->irq_state) { 404 dev->config[PCI_STATUS] |= PCI_STATUS_INTERRUPT; 405 } else { 406 dev->config[PCI_STATUS] &= ~PCI_STATUS_INTERRUPT; 407 } 408 } 409 410 void pci_device_deassert_intx(PCIDevice *dev) 411 { 412 int i; 413 for (i = 0; i < PCI_NUM_PINS; ++i) { 414 pci_irq_handler(dev, i, 0); 415 } 416 } 417 418 static void pci_msi_trigger(PCIDevice *dev, MSIMessage msg) 419 { 420 MemTxAttrs attrs = {}; 421 422 /* 423 * Xen uses the high bits of the address to contain some of the bits 424 * of the PIRQ#. Therefore we can't just send the write cycle and 425 * trust that it's caught by the APIC at 0xfee00000 because the 426 * target of the write might be e.g. 0x0x1000fee46000 for PIRQ#4166. 427 * So we intercept the delivery here instead of in kvm_send_msi(). 428 */ 429 if (xen_mode == XEN_EMULATE && 430 xen_evtchn_deliver_pirq_msi(msg.address, msg.data)) { 431 return; 432 } 433 attrs.requester_id = pci_requester_id(dev); 434 address_space_stl_le(&dev->bus_master_as, msg.address, msg.data, 435 attrs, NULL); 436 } 437 438 static void pci_reset_regions(PCIDevice *dev) 439 { 440 int r; 441 if (pci_is_vf(dev)) { 442 return; 443 } 444 445 for (r = 0; r < PCI_NUM_REGIONS; ++r) { 446 PCIIORegion *region = &dev->io_regions[r]; 447 if (!region->size) { 448 continue; 449 } 450 451 if (!(region->type & PCI_BASE_ADDRESS_SPACE_IO) && 452 region->type & PCI_BASE_ADDRESS_MEM_TYPE_64) { 453 pci_set_quad(dev->config + pci_bar(dev, r), region->type); 454 } else { 455 pci_set_long(dev->config + pci_bar(dev, r), region->type); 456 } 457 } 458 } 459 460 static void pci_do_device_reset(PCIDevice *dev) 461 { 462 pci_device_deassert_intx(dev); 463 assert(dev->irq_state == 0); 464 465 /* Clear all writable bits */ 466 pci_word_test_and_clear_mask(dev->config + PCI_COMMAND, 467 pci_get_word(dev->wmask + PCI_COMMAND) | 468 pci_get_word(dev->w1cmask + PCI_COMMAND)); 469 pci_word_test_and_clear_mask(dev->config + PCI_STATUS, 470 pci_get_word(dev->wmask + PCI_STATUS) | 471 pci_get_word(dev->w1cmask + PCI_STATUS)); 472 /* Some devices make bits of PCI_INTERRUPT_LINE read only */ 473 pci_byte_test_and_clear_mask(dev->config + PCI_INTERRUPT_LINE, 474 pci_get_word(dev->wmask + PCI_INTERRUPT_LINE) | 475 pci_get_word(dev->w1cmask + PCI_INTERRUPT_LINE)); 476 dev->config[PCI_CACHE_LINE_SIZE] = 0x0; 477 pci_reset_regions(dev); 478 pci_update_mappings(dev); 479 480 msi_reset(dev); 481 msix_reset(dev); 482 pcie_sriov_pf_reset(dev); 483 } 484 485 /* 486 * This function is called on #RST and FLR. 487 * FLR if PCI_EXP_DEVCTL_BCR_FLR is set 488 */ 489 void pci_device_reset(PCIDevice *dev) 490 { 491 device_cold_reset(&dev->qdev); 492 pci_do_device_reset(dev); 493 } 494 495 /* 496 * Trigger pci bus reset under a given bus. 497 * Called via bus_cold_reset on RST# assert, after the devices 498 * have been reset device_cold_reset-ed already. 499 */ 500 static void pcibus_reset_hold(Object *obj, ResetType type) 501 { 502 PCIBus *bus = PCI_BUS(obj); 503 int i; 504 505 for (i = 0; i < ARRAY_SIZE(bus->devices); ++i) { 506 if (bus->devices[i]) { 507 pci_do_device_reset(bus->devices[i]); 508 } 509 } 510 511 for (i = 0; i < bus->nirq; i++) { 512 assert(bus->irq_count[i] == 0); 513 } 514 } 515 516 static void pci_host_bus_register(DeviceState *host) 517 { 518 PCIHostState *host_bridge = PCI_HOST_BRIDGE(host); 519 520 QLIST_INSERT_HEAD(&pci_host_bridges, host_bridge, next); 521 } 522 523 static void pci_host_bus_unregister(DeviceState *host) 524 { 525 PCIHostState *host_bridge = PCI_HOST_BRIDGE(host); 526 527 QLIST_REMOVE(host_bridge, next); 528 } 529 530 PCIBus *pci_device_root_bus(const PCIDevice *d) 531 { 532 PCIBus *bus = pci_get_bus(d); 533 534 while (!pci_bus_is_root(bus)) { 535 d = bus->parent_dev; 536 assert(d != NULL); 537 538 bus = pci_get_bus(d); 539 } 540 541 return bus; 542 } 543 544 const char *pci_root_bus_path(PCIDevice *dev) 545 { 546 PCIBus *rootbus = pci_device_root_bus(dev); 547 PCIHostState *host_bridge = PCI_HOST_BRIDGE(rootbus->qbus.parent); 548 PCIHostBridgeClass *hc = PCI_HOST_BRIDGE_GET_CLASS(host_bridge); 549 550 assert(host_bridge->bus == rootbus); 551 552 if (hc->root_bus_path) { 553 return (*hc->root_bus_path)(host_bridge, rootbus); 554 } 555 556 return rootbus->qbus.name; 557 } 558 559 bool pci_bus_bypass_iommu(PCIBus *bus) 560 { 561 PCIBus *rootbus = bus; 562 PCIHostState *host_bridge; 563 564 if (!pci_bus_is_root(bus)) { 565 rootbus = pci_device_root_bus(bus->parent_dev); 566 } 567 568 host_bridge = PCI_HOST_BRIDGE(rootbus->qbus.parent); 569 570 assert(host_bridge->bus == rootbus); 571 572 return host_bridge->bypass_iommu; 573 } 574 575 static void pci_root_bus_internal_init(PCIBus *bus, DeviceState *parent, 576 MemoryRegion *mem, MemoryRegion *io, 577 uint8_t devfn_min) 578 { 579 assert(PCI_FUNC(devfn_min) == 0); 580 bus->devfn_min = devfn_min; 581 bus->slot_reserved_mask = 0x0; 582 bus->address_space_mem = mem; 583 bus->address_space_io = io; 584 bus->flags |= PCI_BUS_IS_ROOT; 585 586 /* host bridge */ 587 QLIST_INIT(&bus->child); 588 589 pci_host_bus_register(parent); 590 } 591 592 static void pci_bus_uninit(PCIBus *bus) 593 { 594 pci_host_bus_unregister(BUS(bus)->parent); 595 } 596 597 bool pci_bus_is_express(const PCIBus *bus) 598 { 599 return object_dynamic_cast(OBJECT(bus), TYPE_PCIE_BUS); 600 } 601 602 void pci_root_bus_init(PCIBus *bus, size_t bus_size, DeviceState *parent, 603 const char *name, 604 MemoryRegion *mem, MemoryRegion *io, 605 uint8_t devfn_min, const char *typename) 606 { 607 qbus_init(bus, bus_size, typename, parent, name); 608 pci_root_bus_internal_init(bus, parent, mem, io, devfn_min); 609 } 610 611 PCIBus *pci_root_bus_new(DeviceState *parent, const char *name, 612 MemoryRegion *mem, MemoryRegion *io, 613 uint8_t devfn_min, const char *typename) 614 { 615 PCIBus *bus; 616 617 bus = PCI_BUS(qbus_new(typename, parent, name)); 618 pci_root_bus_internal_init(bus, parent, mem, io, devfn_min); 619 return bus; 620 } 621 622 void pci_root_bus_cleanup(PCIBus *bus) 623 { 624 pci_bus_uninit(bus); 625 /* the caller of the unplug hotplug handler will delete this device */ 626 qbus_unrealize(BUS(bus)); 627 } 628 629 void pci_bus_irqs(PCIBus *bus, pci_set_irq_fn set_irq, 630 void *irq_opaque, int nirq) 631 { 632 bus->set_irq = set_irq; 633 bus->irq_opaque = irq_opaque; 634 bus->nirq = nirq; 635 g_free(bus->irq_count); 636 bus->irq_count = g_malloc0(nirq * sizeof(bus->irq_count[0])); 637 } 638 639 void pci_bus_map_irqs(PCIBus *bus, pci_map_irq_fn map_irq) 640 { 641 bus->map_irq = map_irq; 642 } 643 644 void pci_bus_irqs_cleanup(PCIBus *bus) 645 { 646 bus->set_irq = NULL; 647 bus->map_irq = NULL; 648 bus->irq_opaque = NULL; 649 bus->nirq = 0; 650 g_free(bus->irq_count); 651 bus->irq_count = NULL; 652 } 653 654 PCIBus *pci_register_root_bus(DeviceState *parent, const char *name, 655 pci_set_irq_fn set_irq, pci_map_irq_fn map_irq, 656 void *irq_opaque, 657 MemoryRegion *mem, MemoryRegion *io, 658 uint8_t devfn_min, int nirq, 659 const char *typename) 660 { 661 PCIBus *bus; 662 663 bus = pci_root_bus_new(parent, name, mem, io, devfn_min, typename); 664 pci_bus_irqs(bus, set_irq, irq_opaque, nirq); 665 pci_bus_map_irqs(bus, map_irq); 666 return bus; 667 } 668 669 void pci_unregister_root_bus(PCIBus *bus) 670 { 671 pci_bus_irqs_cleanup(bus); 672 pci_root_bus_cleanup(bus); 673 } 674 675 int pci_bus_num(PCIBus *s) 676 { 677 return PCI_BUS_GET_CLASS(s)->bus_num(s); 678 } 679 680 /* Returns the min and max bus numbers of a PCI bus hierarchy */ 681 void pci_bus_range(PCIBus *bus, int *min_bus, int *max_bus) 682 { 683 int i; 684 *min_bus = *max_bus = pci_bus_num(bus); 685 686 for (i = 0; i < ARRAY_SIZE(bus->devices); ++i) { 687 PCIDevice *dev = bus->devices[i]; 688 689 if (dev && IS_PCI_BRIDGE(dev)) { 690 *min_bus = MIN(*min_bus, dev->config[PCI_SECONDARY_BUS]); 691 *max_bus = MAX(*max_bus, dev->config[PCI_SUBORDINATE_BUS]); 692 } 693 } 694 } 695 696 int pci_bus_numa_node(PCIBus *bus) 697 { 698 return PCI_BUS_GET_CLASS(bus)->numa_node(bus); 699 } 700 701 static int get_pci_config_device(QEMUFile *f, void *pv, size_t size, 702 const VMStateField *field) 703 { 704 PCIDevice *s = container_of(pv, PCIDevice, config); 705 uint8_t *config; 706 int i; 707 708 assert(size == pci_config_size(s)); 709 config = g_malloc(size); 710 711 qemu_get_buffer(f, config, size); 712 for (i = 0; i < size; ++i) { 713 if ((config[i] ^ s->config[i]) & 714 s->cmask[i] & ~s->wmask[i] & ~s->w1cmask[i]) { 715 error_report("%s: Bad config data: i=0x%x read: %x device: %x " 716 "cmask: %x wmask: %x w1cmask:%x", __func__, 717 i, config[i], s->config[i], 718 s->cmask[i], s->wmask[i], s->w1cmask[i]); 719 g_free(config); 720 return -EINVAL; 721 } 722 } 723 memcpy(s->config, config, size); 724 725 pci_update_mappings(s); 726 if (IS_PCI_BRIDGE(s)) { 727 pci_bridge_update_mappings(PCI_BRIDGE(s)); 728 } 729 730 memory_region_set_enabled(&s->bus_master_enable_region, 731 pci_get_word(s->config + PCI_COMMAND) 732 & PCI_COMMAND_MASTER); 733 734 g_free(config); 735 return 0; 736 } 737 738 /* just put buffer */ 739 static int put_pci_config_device(QEMUFile *f, void *pv, size_t size, 740 const VMStateField *field, JSONWriter *vmdesc) 741 { 742 const uint8_t **v = pv; 743 assert(size == pci_config_size(container_of(pv, PCIDevice, config))); 744 qemu_put_buffer(f, *v, size); 745 746 return 0; 747 } 748 749 static const VMStateInfo vmstate_info_pci_config = { 750 .name = "pci config", 751 .get = get_pci_config_device, 752 .put = put_pci_config_device, 753 }; 754 755 static int get_pci_irq_state(QEMUFile *f, void *pv, size_t size, 756 const VMStateField *field) 757 { 758 PCIDevice *s = container_of(pv, PCIDevice, irq_state); 759 uint32_t irq_state[PCI_NUM_PINS]; 760 int i; 761 for (i = 0; i < PCI_NUM_PINS; ++i) { 762 irq_state[i] = qemu_get_be32(f); 763 if (irq_state[i] != 0x1 && irq_state[i] != 0) { 764 fprintf(stderr, "irq state %d: must be 0 or 1.\n", 765 irq_state[i]); 766 return -EINVAL; 767 } 768 } 769 770 for (i = 0; i < PCI_NUM_PINS; ++i) { 771 pci_set_irq_state(s, i, irq_state[i]); 772 } 773 774 return 0; 775 } 776 777 static int put_pci_irq_state(QEMUFile *f, void *pv, size_t size, 778 const VMStateField *field, JSONWriter *vmdesc) 779 { 780 int i; 781 PCIDevice *s = container_of(pv, PCIDevice, irq_state); 782 783 for (i = 0; i < PCI_NUM_PINS; ++i) { 784 qemu_put_be32(f, pci_irq_state(s, i)); 785 } 786 787 return 0; 788 } 789 790 static const VMStateInfo vmstate_info_pci_irq_state = { 791 .name = "pci irq state", 792 .get = get_pci_irq_state, 793 .put = put_pci_irq_state, 794 }; 795 796 static bool migrate_is_pcie(void *opaque, int version_id) 797 { 798 return pci_is_express((PCIDevice *)opaque); 799 } 800 801 static bool migrate_is_not_pcie(void *opaque, int version_id) 802 { 803 return !pci_is_express((PCIDevice *)opaque); 804 } 805 806 static int pci_post_load(void *opaque, int version_id) 807 { 808 pcie_sriov_pf_post_load(opaque); 809 return 0; 810 } 811 812 const VMStateDescription vmstate_pci_device = { 813 .name = "PCIDevice", 814 .version_id = 2, 815 .minimum_version_id = 1, 816 .post_load = pci_post_load, 817 .fields = (const VMStateField[]) { 818 VMSTATE_INT32_POSITIVE_LE(version_id, PCIDevice), 819 VMSTATE_BUFFER_UNSAFE_INFO_TEST(config, PCIDevice, 820 migrate_is_not_pcie, 821 0, vmstate_info_pci_config, 822 PCI_CONFIG_SPACE_SIZE), 823 VMSTATE_BUFFER_UNSAFE_INFO_TEST(config, PCIDevice, 824 migrate_is_pcie, 825 0, vmstate_info_pci_config, 826 PCIE_CONFIG_SPACE_SIZE), 827 VMSTATE_BUFFER_UNSAFE_INFO(irq_state, PCIDevice, 2, 828 vmstate_info_pci_irq_state, 829 PCI_NUM_PINS * sizeof(int32_t)), 830 VMSTATE_END_OF_LIST() 831 } 832 }; 833 834 835 void pci_device_save(PCIDevice *s, QEMUFile *f) 836 { 837 /* Clear interrupt status bit: it is implicit 838 * in irq_state which we are saving. 839 * This makes us compatible with old devices 840 * which never set or clear this bit. */ 841 s->config[PCI_STATUS] &= ~PCI_STATUS_INTERRUPT; 842 vmstate_save_state(f, &vmstate_pci_device, s, NULL); 843 /* Restore the interrupt status bit. */ 844 pci_update_irq_status(s); 845 } 846 847 int pci_device_load(PCIDevice *s, QEMUFile *f) 848 { 849 int ret; 850 ret = vmstate_load_state(f, &vmstate_pci_device, s, s->version_id); 851 /* Restore the interrupt status bit. */ 852 pci_update_irq_status(s); 853 return ret; 854 } 855 856 static void pci_set_default_subsystem_id(PCIDevice *pci_dev) 857 { 858 pci_set_word(pci_dev->config + PCI_SUBSYSTEM_VENDOR_ID, 859 pci_default_sub_vendor_id); 860 pci_set_word(pci_dev->config + PCI_SUBSYSTEM_ID, 861 pci_default_sub_device_id); 862 } 863 864 /* 865 * Parse [[<domain>:]<bus>:]<slot>, return -1 on error if funcp == NULL 866 * [[<domain>:]<bus>:]<slot>.<func>, return -1 on error 867 */ 868 static int pci_parse_devaddr(const char *addr, int *domp, int *busp, 869 unsigned int *slotp, unsigned int *funcp) 870 { 871 const char *p; 872 char *e; 873 unsigned long val; 874 unsigned long dom = 0, bus = 0; 875 unsigned int slot = 0; 876 unsigned int func = 0; 877 878 p = addr; 879 val = strtoul(p, &e, 16); 880 if (e == p) 881 return -1; 882 if (*e == ':') { 883 bus = val; 884 p = e + 1; 885 val = strtoul(p, &e, 16); 886 if (e == p) 887 return -1; 888 if (*e == ':') { 889 dom = bus; 890 bus = val; 891 p = e + 1; 892 val = strtoul(p, &e, 16); 893 if (e == p) 894 return -1; 895 } 896 } 897 898 slot = val; 899 900 if (funcp != NULL) { 901 if (*e != '.') 902 return -1; 903 904 p = e + 1; 905 val = strtoul(p, &e, 16); 906 if (e == p) 907 return -1; 908 909 func = val; 910 } 911 912 /* if funcp == NULL func is 0 */ 913 if (dom > 0xffff || bus > 0xff || slot > 0x1f || func > 7) 914 return -1; 915 916 if (*e) 917 return -1; 918 919 *domp = dom; 920 *busp = bus; 921 *slotp = slot; 922 if (funcp != NULL) 923 *funcp = func; 924 return 0; 925 } 926 927 static void pci_init_cmask(PCIDevice *dev) 928 { 929 pci_set_word(dev->cmask + PCI_VENDOR_ID, 0xffff); 930 pci_set_word(dev->cmask + PCI_DEVICE_ID, 0xffff); 931 dev->cmask[PCI_STATUS] = PCI_STATUS_CAP_LIST; 932 dev->cmask[PCI_REVISION_ID] = 0xff; 933 dev->cmask[PCI_CLASS_PROG] = 0xff; 934 pci_set_word(dev->cmask + PCI_CLASS_DEVICE, 0xffff); 935 dev->cmask[PCI_HEADER_TYPE] = 0xff; 936 dev->cmask[PCI_CAPABILITY_LIST] = 0xff; 937 } 938 939 static void pci_init_wmask(PCIDevice *dev) 940 { 941 int config_size = pci_config_size(dev); 942 943 dev->wmask[PCI_CACHE_LINE_SIZE] = 0xff; 944 dev->wmask[PCI_INTERRUPT_LINE] = 0xff; 945 pci_set_word(dev->wmask + PCI_COMMAND, 946 PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | 947 PCI_COMMAND_INTX_DISABLE); 948 pci_word_test_and_set_mask(dev->wmask + PCI_COMMAND, PCI_COMMAND_SERR); 949 950 memset(dev->wmask + PCI_CONFIG_HEADER_SIZE, 0xff, 951 config_size - PCI_CONFIG_HEADER_SIZE); 952 } 953 954 static void pci_init_w1cmask(PCIDevice *dev) 955 { 956 /* 957 * Note: It's okay to set w1cmask even for readonly bits as 958 * long as their value is hardwired to 0. 959 */ 960 pci_set_word(dev->w1cmask + PCI_STATUS, 961 PCI_STATUS_PARITY | PCI_STATUS_SIG_TARGET_ABORT | 962 PCI_STATUS_REC_TARGET_ABORT | PCI_STATUS_REC_MASTER_ABORT | 963 PCI_STATUS_SIG_SYSTEM_ERROR | PCI_STATUS_DETECTED_PARITY); 964 } 965 966 static void pci_init_mask_bridge(PCIDevice *d) 967 { 968 /* PCI_PRIMARY_BUS, PCI_SECONDARY_BUS, PCI_SUBORDINATE_BUS and 969 PCI_SEC_LATENCY_TIMER */ 970 memset(d->wmask + PCI_PRIMARY_BUS, 0xff, 4); 971 972 /* base and limit */ 973 d->wmask[PCI_IO_BASE] = PCI_IO_RANGE_MASK & 0xff; 974 d->wmask[PCI_IO_LIMIT] = PCI_IO_RANGE_MASK & 0xff; 975 pci_set_word(d->wmask + PCI_MEMORY_BASE, 976 PCI_MEMORY_RANGE_MASK & 0xffff); 977 pci_set_word(d->wmask + PCI_MEMORY_LIMIT, 978 PCI_MEMORY_RANGE_MASK & 0xffff); 979 pci_set_word(d->wmask + PCI_PREF_MEMORY_BASE, 980 PCI_PREF_RANGE_MASK & 0xffff); 981 pci_set_word(d->wmask + PCI_PREF_MEMORY_LIMIT, 982 PCI_PREF_RANGE_MASK & 0xffff); 983 984 /* PCI_PREF_BASE_UPPER32 and PCI_PREF_LIMIT_UPPER32 */ 985 memset(d->wmask + PCI_PREF_BASE_UPPER32, 0xff, 8); 986 987 /* Supported memory and i/o types */ 988 d->config[PCI_IO_BASE] |= PCI_IO_RANGE_TYPE_16; 989 d->config[PCI_IO_LIMIT] |= PCI_IO_RANGE_TYPE_16; 990 pci_word_test_and_set_mask(d->config + PCI_PREF_MEMORY_BASE, 991 PCI_PREF_RANGE_TYPE_64); 992 pci_word_test_and_set_mask(d->config + PCI_PREF_MEMORY_LIMIT, 993 PCI_PREF_RANGE_TYPE_64); 994 995 /* 996 * TODO: Bridges default to 10-bit VGA decoding but we currently only 997 * implement 16-bit decoding (no alias support). 998 */ 999 pci_set_word(d->wmask + PCI_BRIDGE_CONTROL, 1000 PCI_BRIDGE_CTL_PARITY | 1001 PCI_BRIDGE_CTL_SERR | 1002 PCI_BRIDGE_CTL_ISA | 1003 PCI_BRIDGE_CTL_VGA | 1004 PCI_BRIDGE_CTL_VGA_16BIT | 1005 PCI_BRIDGE_CTL_MASTER_ABORT | 1006 PCI_BRIDGE_CTL_BUS_RESET | 1007 PCI_BRIDGE_CTL_FAST_BACK | 1008 PCI_BRIDGE_CTL_DISCARD | 1009 PCI_BRIDGE_CTL_SEC_DISCARD | 1010 PCI_BRIDGE_CTL_DISCARD_SERR); 1011 /* Below does not do anything as we never set this bit, put here for 1012 * completeness. */ 1013 pci_set_word(d->w1cmask + PCI_BRIDGE_CONTROL, 1014 PCI_BRIDGE_CTL_DISCARD_STATUS); 1015 d->cmask[PCI_IO_BASE] |= PCI_IO_RANGE_TYPE_MASK; 1016 d->cmask[PCI_IO_LIMIT] |= PCI_IO_RANGE_TYPE_MASK; 1017 pci_word_test_and_set_mask(d->cmask + PCI_PREF_MEMORY_BASE, 1018 PCI_PREF_RANGE_TYPE_MASK); 1019 pci_word_test_and_set_mask(d->cmask + PCI_PREF_MEMORY_LIMIT, 1020 PCI_PREF_RANGE_TYPE_MASK); 1021 } 1022 1023 static void pci_init_multifunction(PCIBus *bus, PCIDevice *dev, Error **errp) 1024 { 1025 uint8_t slot = PCI_SLOT(dev->devfn); 1026 uint8_t func; 1027 1028 if (dev->cap_present & QEMU_PCI_CAP_MULTIFUNCTION) { 1029 dev->config[PCI_HEADER_TYPE] |= PCI_HEADER_TYPE_MULTI_FUNCTION; 1030 } 1031 1032 /* 1033 * With SR/IOV and ARI, a device at function 0 need not be a multifunction 1034 * device, as it may just be a VF that ended up with function 0 in 1035 * the legacy PCI interpretation. Avoid failing in such cases: 1036 */ 1037 if (pci_is_vf(dev) && 1038 dev->exp.sriov_vf.pf->cap_present & QEMU_PCI_CAP_MULTIFUNCTION) { 1039 return; 1040 } 1041 1042 /* 1043 * multifunction bit is interpreted in two ways as follows. 1044 * - all functions must set the bit to 1. 1045 * Example: Intel X53 1046 * - function 0 must set the bit, but the rest function (> 0) 1047 * is allowed to leave the bit to 0. 1048 * Example: PIIX3(also in qemu), PIIX4(also in qemu), ICH10, 1049 * 1050 * So OS (at least Linux) checks the bit of only function 0, 1051 * and doesn't see the bit of function > 0. 1052 * 1053 * The below check allows both interpretation. 1054 */ 1055 if (PCI_FUNC(dev->devfn)) { 1056 PCIDevice *f0 = bus->devices[PCI_DEVFN(slot, 0)]; 1057 if (f0 && !(f0->cap_present & QEMU_PCI_CAP_MULTIFUNCTION)) { 1058 /* function 0 should set multifunction bit */ 1059 error_setg(errp, "PCI: single function device can't be populated " 1060 "in function %x.%x", slot, PCI_FUNC(dev->devfn)); 1061 return; 1062 } 1063 return; 1064 } 1065 1066 if (dev->cap_present & QEMU_PCI_CAP_MULTIFUNCTION) { 1067 return; 1068 } 1069 /* function 0 indicates single function, so function > 0 must be NULL */ 1070 for (func = 1; func < PCI_FUNC_MAX; ++func) { 1071 if (bus->devices[PCI_DEVFN(slot, func)]) { 1072 error_setg(errp, "PCI: %x.0 indicates single function, " 1073 "but %x.%x is already populated.", 1074 slot, slot, func); 1075 return; 1076 } 1077 } 1078 } 1079 1080 static void pci_config_alloc(PCIDevice *pci_dev) 1081 { 1082 int config_size = pci_config_size(pci_dev); 1083 1084 pci_dev->config = g_malloc0(config_size); 1085 pci_dev->cmask = g_malloc0(config_size); 1086 pci_dev->wmask = g_malloc0(config_size); 1087 pci_dev->w1cmask = g_malloc0(config_size); 1088 pci_dev->used = g_malloc0(config_size); 1089 } 1090 1091 static void pci_config_free(PCIDevice *pci_dev) 1092 { 1093 g_free(pci_dev->config); 1094 g_free(pci_dev->cmask); 1095 g_free(pci_dev->wmask); 1096 g_free(pci_dev->w1cmask); 1097 g_free(pci_dev->used); 1098 } 1099 1100 static void do_pci_unregister_device(PCIDevice *pci_dev) 1101 { 1102 pci_get_bus(pci_dev)->devices[pci_dev->devfn] = NULL; 1103 pci_config_free(pci_dev); 1104 1105 if (xen_mode == XEN_EMULATE) { 1106 xen_evtchn_remove_pci_device(pci_dev); 1107 } 1108 if (memory_region_is_mapped(&pci_dev->bus_master_enable_region)) { 1109 memory_region_del_subregion(&pci_dev->bus_master_container_region, 1110 &pci_dev->bus_master_enable_region); 1111 } 1112 address_space_destroy(&pci_dev->bus_master_as); 1113 } 1114 1115 /* Extract PCIReqIDCache into BDF format */ 1116 static uint16_t pci_req_id_cache_extract(PCIReqIDCache *cache) 1117 { 1118 uint8_t bus_n; 1119 uint16_t result; 1120 1121 switch (cache->type) { 1122 case PCI_REQ_ID_BDF: 1123 result = pci_get_bdf(cache->dev); 1124 break; 1125 case PCI_REQ_ID_SECONDARY_BUS: 1126 bus_n = pci_dev_bus_num(cache->dev); 1127 result = PCI_BUILD_BDF(bus_n, 0); 1128 break; 1129 default: 1130 error_report("Invalid PCI requester ID cache type: %d", 1131 cache->type); 1132 exit(1); 1133 break; 1134 } 1135 1136 return result; 1137 } 1138 1139 /* Parse bridges up to the root complex and return requester ID 1140 * cache for specific device. For full PCIe topology, the cache 1141 * result would be exactly the same as getting BDF of the device. 1142 * However, several tricks are required when system mixed up with 1143 * legacy PCI devices and PCIe-to-PCI bridges. 1144 * 1145 * Here we cache the proxy device (and type) not requester ID since 1146 * bus number might change from time to time. 1147 */ 1148 static PCIReqIDCache pci_req_id_cache_get(PCIDevice *dev) 1149 { 1150 PCIDevice *parent; 1151 PCIReqIDCache cache = { 1152 .dev = dev, 1153 .type = PCI_REQ_ID_BDF, 1154 }; 1155 1156 while (!pci_bus_is_root(pci_get_bus(dev))) { 1157 /* We are under PCI/PCIe bridges */ 1158 parent = pci_get_bus(dev)->parent_dev; 1159 if (pci_is_express(parent)) { 1160 if (pcie_cap_get_type(parent) == PCI_EXP_TYPE_PCI_BRIDGE) { 1161 /* When we pass through PCIe-to-PCI/PCIX bridges, we 1162 * override the requester ID using secondary bus 1163 * number of parent bridge with zeroed devfn 1164 * (pcie-to-pci bridge spec chap 2.3). */ 1165 cache.type = PCI_REQ_ID_SECONDARY_BUS; 1166 cache.dev = dev; 1167 } 1168 } else { 1169 /* Legacy PCI, override requester ID with the bridge's 1170 * BDF upstream. When the root complex connects to 1171 * legacy PCI devices (including buses), it can only 1172 * obtain requester ID info from directly attached 1173 * devices. If devices are attached under bridges, only 1174 * the requester ID of the bridge that is directly 1175 * attached to the root complex can be recognized. */ 1176 cache.type = PCI_REQ_ID_BDF; 1177 cache.dev = parent; 1178 } 1179 dev = parent; 1180 } 1181 1182 return cache; 1183 } 1184 1185 uint16_t pci_requester_id(PCIDevice *dev) 1186 { 1187 return pci_req_id_cache_extract(&dev->requester_id_cache); 1188 } 1189 1190 static bool pci_bus_devfn_available(PCIBus *bus, int devfn) 1191 { 1192 return !(bus->devices[devfn]); 1193 } 1194 1195 static bool pci_bus_devfn_reserved(PCIBus *bus, int devfn) 1196 { 1197 return bus->slot_reserved_mask & (1UL << PCI_SLOT(devfn)); 1198 } 1199 1200 uint32_t pci_bus_get_slot_reserved_mask(PCIBus *bus) 1201 { 1202 return bus->slot_reserved_mask; 1203 } 1204 1205 void pci_bus_set_slot_reserved_mask(PCIBus *bus, uint32_t mask) 1206 { 1207 bus->slot_reserved_mask |= mask; 1208 } 1209 1210 void pci_bus_clear_slot_reserved_mask(PCIBus *bus, uint32_t mask) 1211 { 1212 bus->slot_reserved_mask &= ~mask; 1213 } 1214 1215 /* -1 for devfn means auto assign */ 1216 static PCIDevice *do_pci_register_device(PCIDevice *pci_dev, 1217 const char *name, int devfn, 1218 Error **errp) 1219 { 1220 PCIDeviceClass *pc = PCI_DEVICE_GET_CLASS(pci_dev); 1221 PCIConfigReadFunc *config_read = pc->config_read; 1222 PCIConfigWriteFunc *config_write = pc->config_write; 1223 Error *local_err = NULL; 1224 DeviceState *dev = DEVICE(pci_dev); 1225 PCIBus *bus = pci_get_bus(pci_dev); 1226 bool is_bridge = IS_PCI_BRIDGE(pci_dev); 1227 1228 /* Only pci bridges can be attached to extra PCI root buses */ 1229 if (pci_bus_is_root(bus) && bus->parent_dev && !is_bridge) { 1230 error_setg(errp, 1231 "PCI: Only PCI/PCIe bridges can be plugged into %s", 1232 bus->parent_dev->name); 1233 return NULL; 1234 } 1235 1236 if (devfn < 0) { 1237 for(devfn = bus->devfn_min ; devfn < ARRAY_SIZE(bus->devices); 1238 devfn += PCI_FUNC_MAX) { 1239 if (pci_bus_devfn_available(bus, devfn) && 1240 !pci_bus_devfn_reserved(bus, devfn)) { 1241 goto found; 1242 } 1243 } 1244 error_setg(errp, "PCI: no slot/function available for %s, all in use " 1245 "or reserved", name); 1246 return NULL; 1247 found: ; 1248 } else if (pci_bus_devfn_reserved(bus, devfn)) { 1249 error_setg(errp, "PCI: slot %d function %d not available for %s," 1250 " reserved", 1251 PCI_SLOT(devfn), PCI_FUNC(devfn), name); 1252 return NULL; 1253 } else if (!pci_bus_devfn_available(bus, devfn)) { 1254 error_setg(errp, "PCI: slot %d function %d not available for %s," 1255 " in use by %s,id=%s", 1256 PCI_SLOT(devfn), PCI_FUNC(devfn), name, 1257 bus->devices[devfn]->name, bus->devices[devfn]->qdev.id); 1258 return NULL; 1259 } 1260 1261 /* 1262 * Populating function 0 triggers a scan from the guest that 1263 * exposes other non-zero functions. Hence we need to ensure that 1264 * function 0 wasn't added yet. 1265 */ 1266 if (dev->hotplugged && !pci_is_vf(pci_dev) && 1267 pci_get_function_0(pci_dev)) { 1268 error_setg(errp, "PCI: slot %d function 0 already occupied by %s," 1269 " new func %s cannot be exposed to guest.", 1270 PCI_SLOT(pci_get_function_0(pci_dev)->devfn), 1271 pci_get_function_0(pci_dev)->name, 1272 name); 1273 1274 return NULL; 1275 } 1276 1277 pci_dev->devfn = devfn; 1278 pci_dev->requester_id_cache = pci_req_id_cache_get(pci_dev); 1279 pstrcpy(pci_dev->name, sizeof(pci_dev->name), name); 1280 1281 memory_region_init(&pci_dev->bus_master_container_region, OBJECT(pci_dev), 1282 "bus master container", UINT64_MAX); 1283 address_space_init(&pci_dev->bus_master_as, 1284 &pci_dev->bus_master_container_region, pci_dev->name); 1285 pci_dev->bus_master_as.max_bounce_buffer_size = 1286 pci_dev->max_bounce_buffer_size; 1287 1288 if (phase_check(PHASE_MACHINE_READY)) { 1289 pci_init_bus_master(pci_dev); 1290 } 1291 pci_dev->irq_state = 0; 1292 pci_config_alloc(pci_dev); 1293 1294 pci_config_set_vendor_id(pci_dev->config, pc->vendor_id); 1295 pci_config_set_device_id(pci_dev->config, pc->device_id); 1296 pci_config_set_revision(pci_dev->config, pc->revision); 1297 pci_config_set_class(pci_dev->config, pc->class_id); 1298 1299 if (!is_bridge) { 1300 if (pc->subsystem_vendor_id || pc->subsystem_id) { 1301 pci_set_word(pci_dev->config + PCI_SUBSYSTEM_VENDOR_ID, 1302 pc->subsystem_vendor_id); 1303 pci_set_word(pci_dev->config + PCI_SUBSYSTEM_ID, 1304 pc->subsystem_id); 1305 } else { 1306 pci_set_default_subsystem_id(pci_dev); 1307 } 1308 } else { 1309 /* subsystem_vendor_id/subsystem_id are only for header type 0 */ 1310 assert(!pc->subsystem_vendor_id); 1311 assert(!pc->subsystem_id); 1312 } 1313 pci_init_cmask(pci_dev); 1314 pci_init_wmask(pci_dev); 1315 pci_init_w1cmask(pci_dev); 1316 if (is_bridge) { 1317 pci_init_mask_bridge(pci_dev); 1318 } 1319 pci_init_multifunction(bus, pci_dev, &local_err); 1320 if (local_err) { 1321 error_propagate(errp, local_err); 1322 do_pci_unregister_device(pci_dev); 1323 return NULL; 1324 } 1325 1326 if (!config_read) 1327 config_read = pci_default_read_config; 1328 if (!config_write) 1329 config_write = pci_default_write_config; 1330 pci_dev->config_read = config_read; 1331 pci_dev->config_write = config_write; 1332 bus->devices[devfn] = pci_dev; 1333 pci_dev->version_id = 2; /* Current pci device vmstate version */ 1334 return pci_dev; 1335 } 1336 1337 static void pci_unregister_io_regions(PCIDevice *pci_dev) 1338 { 1339 PCIIORegion *r; 1340 int i; 1341 1342 for(i = 0; i < PCI_NUM_REGIONS; i++) { 1343 r = &pci_dev->io_regions[i]; 1344 if (!r->size || r->addr == PCI_BAR_UNMAPPED) 1345 continue; 1346 memory_region_del_subregion(r->address_space, r->memory); 1347 } 1348 1349 pci_unregister_vga(pci_dev); 1350 } 1351 1352 static void pci_qdev_unrealize(DeviceState *dev) 1353 { 1354 PCIDevice *pci_dev = PCI_DEVICE(dev); 1355 PCIDeviceClass *pc = PCI_DEVICE_GET_CLASS(pci_dev); 1356 1357 pci_unregister_io_regions(pci_dev); 1358 pci_del_option_rom(pci_dev); 1359 1360 if (pc->exit) { 1361 pc->exit(pci_dev); 1362 } 1363 1364 pci_device_deassert_intx(pci_dev); 1365 do_pci_unregister_device(pci_dev); 1366 1367 pci_dev->msi_trigger = NULL; 1368 1369 /* 1370 * clean up acpi-index so it could reused by another device 1371 */ 1372 if (pci_dev->acpi_index) { 1373 GSequence *used_indexes = pci_acpi_index_list(); 1374 1375 g_sequence_remove(g_sequence_lookup(used_indexes, 1376 GINT_TO_POINTER(pci_dev->acpi_index), 1377 g_cmp_uint32, NULL)); 1378 } 1379 } 1380 1381 void pci_register_bar(PCIDevice *pci_dev, int region_num, 1382 uint8_t type, MemoryRegion *memory) 1383 { 1384 PCIIORegion *r; 1385 uint32_t addr; /* offset in pci config space */ 1386 uint64_t wmask; 1387 pcibus_t size = memory_region_size(memory); 1388 uint8_t hdr_type; 1389 1390 assert(!pci_is_vf(pci_dev)); /* VFs must use pcie_sriov_vf_register_bar */ 1391 assert(region_num >= 0); 1392 assert(region_num < PCI_NUM_REGIONS); 1393 assert(is_power_of_2(size)); 1394 1395 /* A PCI bridge device (with Type 1 header) may only have at most 2 BARs */ 1396 hdr_type = 1397 pci_dev->config[PCI_HEADER_TYPE] & ~PCI_HEADER_TYPE_MULTI_FUNCTION; 1398 assert(hdr_type != PCI_HEADER_TYPE_BRIDGE || region_num < 2); 1399 1400 r = &pci_dev->io_regions[region_num]; 1401 assert(!r->size); 1402 r->addr = PCI_BAR_UNMAPPED; 1403 r->size = size; 1404 r->type = type; 1405 r->memory = memory; 1406 r->address_space = type & PCI_BASE_ADDRESS_SPACE_IO 1407 ? pci_get_bus(pci_dev)->address_space_io 1408 : pci_get_bus(pci_dev)->address_space_mem; 1409 1410 wmask = ~(size - 1); 1411 if (region_num == PCI_ROM_SLOT) { 1412 /* ROM enable bit is writable */ 1413 wmask |= PCI_ROM_ADDRESS_ENABLE; 1414 } 1415 1416 addr = pci_bar(pci_dev, region_num); 1417 pci_set_long(pci_dev->config + addr, type); 1418 1419 if (!(r->type & PCI_BASE_ADDRESS_SPACE_IO) && 1420 r->type & PCI_BASE_ADDRESS_MEM_TYPE_64) { 1421 pci_set_quad(pci_dev->wmask + addr, wmask); 1422 pci_set_quad(pci_dev->cmask + addr, ~0ULL); 1423 } else { 1424 pci_set_long(pci_dev->wmask + addr, wmask & 0xffffffff); 1425 pci_set_long(pci_dev->cmask + addr, 0xffffffff); 1426 } 1427 } 1428 1429 static void pci_update_vga(PCIDevice *pci_dev) 1430 { 1431 uint16_t cmd; 1432 1433 if (!pci_dev->has_vga) { 1434 return; 1435 } 1436 1437 cmd = pci_get_word(pci_dev->config + PCI_COMMAND); 1438 1439 memory_region_set_enabled(pci_dev->vga_regions[QEMU_PCI_VGA_MEM], 1440 cmd & PCI_COMMAND_MEMORY); 1441 memory_region_set_enabled(pci_dev->vga_regions[QEMU_PCI_VGA_IO_LO], 1442 cmd & PCI_COMMAND_IO); 1443 memory_region_set_enabled(pci_dev->vga_regions[QEMU_PCI_VGA_IO_HI], 1444 cmd & PCI_COMMAND_IO); 1445 } 1446 1447 void pci_register_vga(PCIDevice *pci_dev, MemoryRegion *mem, 1448 MemoryRegion *io_lo, MemoryRegion *io_hi) 1449 { 1450 PCIBus *bus = pci_get_bus(pci_dev); 1451 1452 assert(!pci_dev->has_vga); 1453 1454 assert(memory_region_size(mem) == QEMU_PCI_VGA_MEM_SIZE); 1455 pci_dev->vga_regions[QEMU_PCI_VGA_MEM] = mem; 1456 memory_region_add_subregion_overlap(bus->address_space_mem, 1457 QEMU_PCI_VGA_MEM_BASE, mem, 1); 1458 1459 assert(memory_region_size(io_lo) == QEMU_PCI_VGA_IO_LO_SIZE); 1460 pci_dev->vga_regions[QEMU_PCI_VGA_IO_LO] = io_lo; 1461 memory_region_add_subregion_overlap(bus->address_space_io, 1462 QEMU_PCI_VGA_IO_LO_BASE, io_lo, 1); 1463 1464 assert(memory_region_size(io_hi) == QEMU_PCI_VGA_IO_HI_SIZE); 1465 pci_dev->vga_regions[QEMU_PCI_VGA_IO_HI] = io_hi; 1466 memory_region_add_subregion_overlap(bus->address_space_io, 1467 QEMU_PCI_VGA_IO_HI_BASE, io_hi, 1); 1468 pci_dev->has_vga = true; 1469 1470 pci_update_vga(pci_dev); 1471 } 1472 1473 void pci_unregister_vga(PCIDevice *pci_dev) 1474 { 1475 PCIBus *bus = pci_get_bus(pci_dev); 1476 1477 if (!pci_dev->has_vga) { 1478 return; 1479 } 1480 1481 memory_region_del_subregion(bus->address_space_mem, 1482 pci_dev->vga_regions[QEMU_PCI_VGA_MEM]); 1483 memory_region_del_subregion(bus->address_space_io, 1484 pci_dev->vga_regions[QEMU_PCI_VGA_IO_LO]); 1485 memory_region_del_subregion(bus->address_space_io, 1486 pci_dev->vga_regions[QEMU_PCI_VGA_IO_HI]); 1487 pci_dev->has_vga = false; 1488 } 1489 1490 pcibus_t pci_get_bar_addr(PCIDevice *pci_dev, int region_num) 1491 { 1492 return pci_dev->io_regions[region_num].addr; 1493 } 1494 1495 static pcibus_t pci_config_get_bar_addr(PCIDevice *d, int reg, 1496 uint8_t type, pcibus_t size) 1497 { 1498 pcibus_t new_addr; 1499 if (!pci_is_vf(d)) { 1500 int bar = pci_bar(d, reg); 1501 if (type & PCI_BASE_ADDRESS_MEM_TYPE_64) { 1502 new_addr = pci_get_quad(d->config + bar); 1503 } else { 1504 new_addr = pci_get_long(d->config + bar); 1505 } 1506 } else { 1507 PCIDevice *pf = d->exp.sriov_vf.pf; 1508 uint16_t sriov_cap = pf->exp.sriov_cap; 1509 int bar = sriov_cap + PCI_SRIOV_BAR + reg * 4; 1510 uint16_t vf_offset = 1511 pci_get_word(pf->config + sriov_cap + PCI_SRIOV_VF_OFFSET); 1512 uint16_t vf_stride = 1513 pci_get_word(pf->config + sriov_cap + PCI_SRIOV_VF_STRIDE); 1514 uint32_t vf_num = (d->devfn - (pf->devfn + vf_offset)) / vf_stride; 1515 1516 if (type & PCI_BASE_ADDRESS_MEM_TYPE_64) { 1517 new_addr = pci_get_quad(pf->config + bar); 1518 } else { 1519 new_addr = pci_get_long(pf->config + bar); 1520 } 1521 new_addr += vf_num * size; 1522 } 1523 /* The ROM slot has a specific enable bit, keep it intact */ 1524 if (reg != PCI_ROM_SLOT) { 1525 new_addr &= ~(size - 1); 1526 } 1527 return new_addr; 1528 } 1529 1530 pcibus_t pci_bar_address(PCIDevice *d, 1531 int reg, uint8_t type, pcibus_t size) 1532 { 1533 pcibus_t new_addr, last_addr; 1534 uint16_t cmd = pci_get_word(d->config + PCI_COMMAND); 1535 MachineClass *mc = MACHINE_GET_CLASS(qdev_get_machine()); 1536 bool allow_0_address = mc->pci_allow_0_address; 1537 1538 if (type & PCI_BASE_ADDRESS_SPACE_IO) { 1539 if (!(cmd & PCI_COMMAND_IO)) { 1540 return PCI_BAR_UNMAPPED; 1541 } 1542 new_addr = pci_config_get_bar_addr(d, reg, type, size); 1543 last_addr = new_addr + size - 1; 1544 /* Check if 32 bit BAR wraps around explicitly. 1545 * TODO: make priorities correct and remove this work around. 1546 */ 1547 if (last_addr <= new_addr || last_addr >= UINT32_MAX || 1548 (!allow_0_address && new_addr == 0)) { 1549 return PCI_BAR_UNMAPPED; 1550 } 1551 return new_addr; 1552 } 1553 1554 if (!(cmd & PCI_COMMAND_MEMORY)) { 1555 return PCI_BAR_UNMAPPED; 1556 } 1557 new_addr = pci_config_get_bar_addr(d, reg, type, size); 1558 /* the ROM slot has a specific enable bit */ 1559 if (reg == PCI_ROM_SLOT && !(new_addr & PCI_ROM_ADDRESS_ENABLE)) { 1560 return PCI_BAR_UNMAPPED; 1561 } 1562 new_addr &= ~(size - 1); 1563 last_addr = new_addr + size - 1; 1564 /* NOTE: we do not support wrapping */ 1565 /* XXX: as we cannot support really dynamic 1566 mappings, we handle specific values as invalid 1567 mappings. */ 1568 if (last_addr <= new_addr || last_addr == PCI_BAR_UNMAPPED || 1569 (!allow_0_address && new_addr == 0)) { 1570 return PCI_BAR_UNMAPPED; 1571 } 1572 1573 /* Now pcibus_t is 64bit. 1574 * Check if 32 bit BAR wraps around explicitly. 1575 * Without this, PC ide doesn't work well. 1576 * TODO: remove this work around. 1577 */ 1578 if (!(type & PCI_BASE_ADDRESS_MEM_TYPE_64) && last_addr >= UINT32_MAX) { 1579 return PCI_BAR_UNMAPPED; 1580 } 1581 1582 /* 1583 * OS is allowed to set BAR beyond its addressable 1584 * bits. For example, 32 bit OS can set 64bit bar 1585 * to >4G. Check it. TODO: we might need to support 1586 * it in the future for e.g. PAE. 1587 */ 1588 if (last_addr >= HWADDR_MAX) { 1589 return PCI_BAR_UNMAPPED; 1590 } 1591 1592 return new_addr; 1593 } 1594 1595 static void pci_update_mappings(PCIDevice *d) 1596 { 1597 PCIIORegion *r; 1598 int i; 1599 pcibus_t new_addr; 1600 1601 for(i = 0; i < PCI_NUM_REGIONS; i++) { 1602 r = &d->io_regions[i]; 1603 1604 /* this region isn't registered */ 1605 if (!r->size) 1606 continue; 1607 1608 new_addr = pci_bar_address(d, i, r->type, r->size); 1609 if (!d->enabled) { 1610 new_addr = PCI_BAR_UNMAPPED; 1611 } 1612 1613 /* This bar isn't changed */ 1614 if (new_addr == r->addr) 1615 continue; 1616 1617 /* now do the real mapping */ 1618 if (r->addr != PCI_BAR_UNMAPPED) { 1619 trace_pci_update_mappings_del(d->name, pci_dev_bus_num(d), 1620 PCI_SLOT(d->devfn), 1621 PCI_FUNC(d->devfn), 1622 i, r->addr, r->size); 1623 memory_region_del_subregion(r->address_space, r->memory); 1624 } 1625 r->addr = new_addr; 1626 if (r->addr != PCI_BAR_UNMAPPED) { 1627 trace_pci_update_mappings_add(d->name, pci_dev_bus_num(d), 1628 PCI_SLOT(d->devfn), 1629 PCI_FUNC(d->devfn), 1630 i, r->addr, r->size); 1631 memory_region_add_subregion_overlap(r->address_space, 1632 r->addr, r->memory, 1); 1633 } 1634 } 1635 1636 pci_update_vga(d); 1637 } 1638 1639 static inline int pci_irq_disabled(PCIDevice *d) 1640 { 1641 return pci_get_word(d->config + PCI_COMMAND) & PCI_COMMAND_INTX_DISABLE; 1642 } 1643 1644 /* Called after interrupt disabled field update in config space, 1645 * assert/deassert interrupts if necessary. 1646 * Gets original interrupt disable bit value (before update). */ 1647 static void pci_update_irq_disabled(PCIDevice *d, int was_irq_disabled) 1648 { 1649 int i, disabled = pci_irq_disabled(d); 1650 if (disabled == was_irq_disabled) 1651 return; 1652 for (i = 0; i < PCI_NUM_PINS; ++i) { 1653 int state = pci_irq_state(d, i); 1654 pci_change_irq_level(d, i, disabled ? -state : state); 1655 } 1656 } 1657 1658 uint32_t pci_default_read_config(PCIDevice *d, 1659 uint32_t address, int len) 1660 { 1661 uint32_t val = 0; 1662 1663 assert(address + len <= pci_config_size(d)); 1664 1665 if (pci_is_express_downstream_port(d) && 1666 ranges_overlap(address, len, d->exp.exp_cap + PCI_EXP_LNKSTA, 2)) { 1667 pcie_sync_bridge_lnk(d); 1668 } 1669 memcpy(&val, d->config + address, len); 1670 return le32_to_cpu(val); 1671 } 1672 1673 void pci_default_write_config(PCIDevice *d, uint32_t addr, uint32_t val_in, int l) 1674 { 1675 int i, was_irq_disabled = pci_irq_disabled(d); 1676 uint32_t val = val_in; 1677 1678 assert(addr + l <= pci_config_size(d)); 1679 1680 for (i = 0; i < l; val >>= 8, ++i) { 1681 uint8_t wmask = d->wmask[addr + i]; 1682 uint8_t w1cmask = d->w1cmask[addr + i]; 1683 assert(!(wmask & w1cmask)); 1684 d->config[addr + i] = (d->config[addr + i] & ~wmask) | (val & wmask); 1685 d->config[addr + i] &= ~(val & w1cmask); /* W1C: Write 1 to Clear */ 1686 } 1687 if (ranges_overlap(addr, l, PCI_BASE_ADDRESS_0, 24) || 1688 ranges_overlap(addr, l, PCI_ROM_ADDRESS, 4) || 1689 ranges_overlap(addr, l, PCI_ROM_ADDRESS1, 4) || 1690 range_covers_byte(addr, l, PCI_COMMAND)) 1691 pci_update_mappings(d); 1692 1693 if (ranges_overlap(addr, l, PCI_COMMAND, 2)) { 1694 pci_update_irq_disabled(d, was_irq_disabled); 1695 memory_region_set_enabled(&d->bus_master_enable_region, 1696 (pci_get_word(d->config + PCI_COMMAND) 1697 & PCI_COMMAND_MASTER) && d->enabled); 1698 } 1699 1700 msi_write_config(d, addr, val_in, l); 1701 msix_write_config(d, addr, val_in, l); 1702 pcie_sriov_config_write(d, addr, val_in, l); 1703 } 1704 1705 /***********************************************************/ 1706 /* generic PCI irq support */ 1707 1708 /* 0 <= irq_num <= 3. level must be 0 or 1 */ 1709 static void pci_irq_handler(void *opaque, int irq_num, int level) 1710 { 1711 PCIDevice *pci_dev = opaque; 1712 int change; 1713 1714 assert(0 <= irq_num && irq_num < PCI_NUM_PINS); 1715 assert(level == 0 || level == 1); 1716 change = level - pci_irq_state(pci_dev, irq_num); 1717 if (!change) 1718 return; 1719 1720 pci_set_irq_state(pci_dev, irq_num, level); 1721 pci_update_irq_status(pci_dev); 1722 if (pci_irq_disabled(pci_dev)) 1723 return; 1724 pci_change_irq_level(pci_dev, irq_num, change); 1725 } 1726 1727 qemu_irq pci_allocate_irq(PCIDevice *pci_dev) 1728 { 1729 int intx = pci_intx(pci_dev); 1730 assert(0 <= intx && intx < PCI_NUM_PINS); 1731 1732 return qemu_allocate_irq(pci_irq_handler, pci_dev, intx); 1733 } 1734 1735 void pci_set_irq(PCIDevice *pci_dev, int level) 1736 { 1737 int intx = pci_intx(pci_dev); 1738 pci_irq_handler(pci_dev, intx, level); 1739 } 1740 1741 /* Special hooks used by device assignment */ 1742 void pci_bus_set_route_irq_fn(PCIBus *bus, pci_route_irq_fn route_intx_to_irq) 1743 { 1744 assert(pci_bus_is_root(bus)); 1745 bus->route_intx_to_irq = route_intx_to_irq; 1746 } 1747 1748 PCIINTxRoute pci_device_route_intx_to_irq(PCIDevice *dev, int pin) 1749 { 1750 PCIBus *bus; 1751 1752 do { 1753 int dev_irq = pin; 1754 bus = pci_get_bus(dev); 1755 pin = bus->map_irq(dev, pin); 1756 trace_pci_route_irq(dev_irq, DEVICE(dev)->canonical_path, pin, 1757 pci_bus_is_root(bus) ? "root-complex" 1758 : DEVICE(bus->parent_dev)->canonical_path); 1759 dev = bus->parent_dev; 1760 } while (dev); 1761 1762 if (!bus->route_intx_to_irq) { 1763 error_report("PCI: Bug - unimplemented PCI INTx routing (%s)", 1764 object_get_typename(OBJECT(bus->qbus.parent))); 1765 return (PCIINTxRoute) { PCI_INTX_DISABLED, -1 }; 1766 } 1767 1768 return bus->route_intx_to_irq(bus->irq_opaque, pin); 1769 } 1770 1771 bool pci_intx_route_changed(PCIINTxRoute *old, PCIINTxRoute *new) 1772 { 1773 return old->mode != new->mode || old->irq != new->irq; 1774 } 1775 1776 void pci_bus_fire_intx_routing_notifier(PCIBus *bus) 1777 { 1778 PCIDevice *dev; 1779 PCIBus *sec; 1780 int i; 1781 1782 for (i = 0; i < ARRAY_SIZE(bus->devices); ++i) { 1783 dev = bus->devices[i]; 1784 if (dev && dev->intx_routing_notifier) { 1785 dev->intx_routing_notifier(dev); 1786 } 1787 } 1788 1789 QLIST_FOREACH(sec, &bus->child, sibling) { 1790 pci_bus_fire_intx_routing_notifier(sec); 1791 } 1792 } 1793 1794 void pci_device_set_intx_routing_notifier(PCIDevice *dev, 1795 PCIINTxRoutingNotifier notifier) 1796 { 1797 dev->intx_routing_notifier = notifier; 1798 } 1799 1800 /* 1801 * PCI-to-PCI bridge specification 1802 * 9.1: Interrupt routing. Table 9-1 1803 * 1804 * the PCI Express Base Specification, Revision 2.1 1805 * 2.2.8.1: INTx interrupt signaling - Rules 1806 * the Implementation Note 1807 * Table 2-20 1808 */ 1809 /* 1810 * 0 <= pin <= 3 0 = INTA, 1 = INTB, 2 = INTC, 3 = INTD 1811 * 0-origin unlike PCI interrupt pin register. 1812 */ 1813 int pci_swizzle_map_irq_fn(PCIDevice *pci_dev, int pin) 1814 { 1815 return pci_swizzle(PCI_SLOT(pci_dev->devfn), pin); 1816 } 1817 1818 /***********************************************************/ 1819 /* monitor info on PCI */ 1820 1821 static const pci_class_desc pci_class_descriptions[] = 1822 { 1823 { 0x0001, "VGA controller", "display"}, 1824 { 0x0100, "SCSI controller", "scsi"}, 1825 { 0x0101, "IDE controller", "ide"}, 1826 { 0x0102, "Floppy controller", "fdc"}, 1827 { 0x0103, "IPI controller", "ipi"}, 1828 { 0x0104, "RAID controller", "raid"}, 1829 { 0x0106, "SATA controller"}, 1830 { 0x0107, "SAS controller"}, 1831 { 0x0180, "Storage controller"}, 1832 { 0x0200, "Ethernet controller", "ethernet"}, 1833 { 0x0201, "Token Ring controller", "token-ring"}, 1834 { 0x0202, "FDDI controller", "fddi"}, 1835 { 0x0203, "ATM controller", "atm"}, 1836 { 0x0280, "Network controller"}, 1837 { 0x0300, "VGA controller", "display", 0x00ff}, 1838 { 0x0301, "XGA controller"}, 1839 { 0x0302, "3D controller"}, 1840 { 0x0380, "Display controller"}, 1841 { 0x0400, "Video controller", "video"}, 1842 { 0x0401, "Audio controller", "sound"}, 1843 { 0x0402, "Phone"}, 1844 { 0x0403, "Audio controller", "sound"}, 1845 { 0x0480, "Multimedia controller"}, 1846 { 0x0500, "RAM controller", "memory"}, 1847 { 0x0501, "Flash controller", "flash"}, 1848 { 0x0580, "Memory controller"}, 1849 { 0x0600, "Host bridge", "host"}, 1850 { 0x0601, "ISA bridge", "isa"}, 1851 { 0x0602, "EISA bridge", "eisa"}, 1852 { 0x0603, "MC bridge", "mca"}, 1853 { 0x0604, "PCI bridge", "pci-bridge"}, 1854 { 0x0605, "PCMCIA bridge", "pcmcia"}, 1855 { 0x0606, "NUBUS bridge", "nubus"}, 1856 { 0x0607, "CARDBUS bridge", "cardbus"}, 1857 { 0x0608, "RACEWAY bridge"}, 1858 { 0x0680, "Bridge"}, 1859 { 0x0700, "Serial port", "serial"}, 1860 { 0x0701, "Parallel port", "parallel"}, 1861 { 0x0800, "Interrupt controller", "interrupt-controller"}, 1862 { 0x0801, "DMA controller", "dma-controller"}, 1863 { 0x0802, "Timer", "timer"}, 1864 { 0x0803, "RTC", "rtc"}, 1865 { 0x0900, "Keyboard", "keyboard"}, 1866 { 0x0901, "Pen", "pen"}, 1867 { 0x0902, "Mouse", "mouse"}, 1868 { 0x0A00, "Dock station", "dock", 0x00ff}, 1869 { 0x0B00, "i386 cpu", "cpu", 0x00ff}, 1870 { 0x0c00, "Firewire controller", "firewire"}, 1871 { 0x0c01, "Access bus controller", "access-bus"}, 1872 { 0x0c02, "SSA controller", "ssa"}, 1873 { 0x0c03, "USB controller", "usb"}, 1874 { 0x0c04, "Fibre channel controller", "fibre-channel"}, 1875 { 0x0c05, "SMBus"}, 1876 { 0, NULL} 1877 }; 1878 1879 void pci_for_each_device_under_bus_reverse(PCIBus *bus, 1880 pci_bus_dev_fn fn, 1881 void *opaque) 1882 { 1883 PCIDevice *d; 1884 int devfn; 1885 1886 for (devfn = 0; devfn < ARRAY_SIZE(bus->devices); devfn++) { 1887 d = bus->devices[ARRAY_SIZE(bus->devices) - 1 - devfn]; 1888 if (d) { 1889 fn(bus, d, opaque); 1890 } 1891 } 1892 } 1893 1894 void pci_for_each_device_reverse(PCIBus *bus, int bus_num, 1895 pci_bus_dev_fn fn, void *opaque) 1896 { 1897 bus = pci_find_bus_nr(bus, bus_num); 1898 1899 if (bus) { 1900 pci_for_each_device_under_bus_reverse(bus, fn, opaque); 1901 } 1902 } 1903 1904 void pci_for_each_device_under_bus(PCIBus *bus, 1905 pci_bus_dev_fn fn, void *opaque) 1906 { 1907 PCIDevice *d; 1908 int devfn; 1909 1910 for(devfn = 0; devfn < ARRAY_SIZE(bus->devices); devfn++) { 1911 d = bus->devices[devfn]; 1912 if (d) { 1913 fn(bus, d, opaque); 1914 } 1915 } 1916 } 1917 1918 void pci_for_each_device(PCIBus *bus, int bus_num, 1919 pci_bus_dev_fn fn, void *opaque) 1920 { 1921 bus = pci_find_bus_nr(bus, bus_num); 1922 1923 if (bus) { 1924 pci_for_each_device_under_bus(bus, fn, opaque); 1925 } 1926 } 1927 1928 const pci_class_desc *get_class_desc(int class) 1929 { 1930 const pci_class_desc *desc; 1931 1932 desc = pci_class_descriptions; 1933 while (desc->desc && class != desc->class) { 1934 desc++; 1935 } 1936 1937 return desc; 1938 } 1939 1940 void pci_init_nic_devices(PCIBus *bus, const char *default_model) 1941 { 1942 qemu_create_nic_bus_devices(&bus->qbus, TYPE_PCI_DEVICE, default_model, 1943 "virtio", "virtio-net-pci"); 1944 } 1945 1946 bool pci_init_nic_in_slot(PCIBus *rootbus, const char *model, 1947 const char *alias, const char *devaddr) 1948 { 1949 NICInfo *nd = qemu_find_nic_info(model, true, alias); 1950 int dom, busnr, devfn; 1951 PCIDevice *pci_dev; 1952 unsigned slot; 1953 PCIBus *bus; 1954 1955 if (!nd) { 1956 return false; 1957 } 1958 1959 if (!devaddr || pci_parse_devaddr(devaddr, &dom, &busnr, &slot, NULL) < 0) { 1960 error_report("Invalid PCI device address %s for device %s", 1961 devaddr, model); 1962 exit(1); 1963 } 1964 1965 if (dom != 0) { 1966 error_report("No support for non-zero PCI domains"); 1967 exit(1); 1968 } 1969 1970 devfn = PCI_DEVFN(slot, 0); 1971 1972 bus = pci_find_bus_nr(rootbus, busnr); 1973 if (!bus) { 1974 error_report("Invalid PCI device address %s for device %s", 1975 devaddr, model); 1976 exit(1); 1977 } 1978 1979 pci_dev = pci_new(devfn, model); 1980 qdev_set_nic_properties(&pci_dev->qdev, nd); 1981 pci_realize_and_unref(pci_dev, bus, &error_fatal); 1982 return true; 1983 } 1984 1985 PCIDevice *pci_vga_init(PCIBus *bus) 1986 { 1987 vga_interface_created = true; 1988 switch (vga_interface_type) { 1989 case VGA_CIRRUS: 1990 return pci_create_simple(bus, -1, "cirrus-vga"); 1991 case VGA_QXL: 1992 return pci_create_simple(bus, -1, "qxl-vga"); 1993 case VGA_STD: 1994 return pci_create_simple(bus, -1, "VGA"); 1995 case VGA_VMWARE: 1996 return pci_create_simple(bus, -1, "vmware-svga"); 1997 case VGA_VIRTIO: 1998 return pci_create_simple(bus, -1, "virtio-vga"); 1999 case VGA_NONE: 2000 default: /* Other non-PCI types. Checking for unsupported types is already 2001 done in vl.c. */ 2002 return NULL; 2003 } 2004 } 2005 2006 /* Whether a given bus number is in range of the secondary 2007 * bus of the given bridge device. */ 2008 static bool pci_secondary_bus_in_range(PCIDevice *dev, int bus_num) 2009 { 2010 return !(pci_get_word(dev->config + PCI_BRIDGE_CONTROL) & 2011 PCI_BRIDGE_CTL_BUS_RESET) /* Don't walk the bus if it's reset. */ && 2012 dev->config[PCI_SECONDARY_BUS] <= bus_num && 2013 bus_num <= dev->config[PCI_SUBORDINATE_BUS]; 2014 } 2015 2016 /* Whether a given bus number is in a range of a root bus */ 2017 static bool pci_root_bus_in_range(PCIBus *bus, int bus_num) 2018 { 2019 int i; 2020 2021 for (i = 0; i < ARRAY_SIZE(bus->devices); ++i) { 2022 PCIDevice *dev = bus->devices[i]; 2023 2024 if (dev && IS_PCI_BRIDGE(dev)) { 2025 if (pci_secondary_bus_in_range(dev, bus_num)) { 2026 return true; 2027 } 2028 } 2029 } 2030 2031 return false; 2032 } 2033 2034 PCIBus *pci_find_bus_nr(PCIBus *bus, int bus_num) 2035 { 2036 PCIBus *sec; 2037 2038 if (!bus) { 2039 return NULL; 2040 } 2041 2042 if (pci_bus_num(bus) == bus_num) { 2043 return bus; 2044 } 2045 2046 /* Consider all bus numbers in range for the host pci bridge. */ 2047 if (!pci_bus_is_root(bus) && 2048 !pci_secondary_bus_in_range(bus->parent_dev, bus_num)) { 2049 return NULL; 2050 } 2051 2052 /* try child bus */ 2053 for (; bus; bus = sec) { 2054 QLIST_FOREACH(sec, &bus->child, sibling) { 2055 if (pci_bus_num(sec) == bus_num) { 2056 return sec; 2057 } 2058 /* PXB buses assumed to be children of bus 0 */ 2059 if (pci_bus_is_root(sec)) { 2060 if (pci_root_bus_in_range(sec, bus_num)) { 2061 break; 2062 } 2063 } else { 2064 if (pci_secondary_bus_in_range(sec->parent_dev, bus_num)) { 2065 break; 2066 } 2067 } 2068 } 2069 } 2070 2071 return NULL; 2072 } 2073 2074 void pci_for_each_bus_depth_first(PCIBus *bus, pci_bus_ret_fn begin, 2075 pci_bus_fn end, void *parent_state) 2076 { 2077 PCIBus *sec; 2078 void *state; 2079 2080 if (!bus) { 2081 return; 2082 } 2083 2084 if (begin) { 2085 state = begin(bus, parent_state); 2086 } else { 2087 state = parent_state; 2088 } 2089 2090 QLIST_FOREACH(sec, &bus->child, sibling) { 2091 pci_for_each_bus_depth_first(sec, begin, end, state); 2092 } 2093 2094 if (end) { 2095 end(bus, state); 2096 } 2097 } 2098 2099 2100 PCIDevice *pci_find_device(PCIBus *bus, int bus_num, uint8_t devfn) 2101 { 2102 bus = pci_find_bus_nr(bus, bus_num); 2103 2104 if (!bus) 2105 return NULL; 2106 2107 return bus->devices[devfn]; 2108 } 2109 2110 #define ONBOARD_INDEX_MAX (16 * 1024 - 1) 2111 2112 static void pci_qdev_realize(DeviceState *qdev, Error **errp) 2113 { 2114 PCIDevice *pci_dev = (PCIDevice *)qdev; 2115 PCIDeviceClass *pc = PCI_DEVICE_GET_CLASS(pci_dev); 2116 ObjectClass *klass = OBJECT_CLASS(pc); 2117 Error *local_err = NULL; 2118 bool is_default_rom; 2119 uint16_t class_id; 2120 2121 /* 2122 * capped by systemd (see: udev-builtin-net_id.c) 2123 * as it's the only known user honor it to avoid users 2124 * misconfigure QEMU and then wonder why acpi-index doesn't work 2125 */ 2126 if (pci_dev->acpi_index > ONBOARD_INDEX_MAX) { 2127 error_setg(errp, "acpi-index should be less or equal to %u", 2128 ONBOARD_INDEX_MAX); 2129 return; 2130 } 2131 2132 /* 2133 * make sure that acpi-index is unique across all present PCI devices 2134 */ 2135 if (pci_dev->acpi_index) { 2136 GSequence *used_indexes = pci_acpi_index_list(); 2137 2138 if (g_sequence_lookup(used_indexes, 2139 GINT_TO_POINTER(pci_dev->acpi_index), 2140 g_cmp_uint32, NULL)) { 2141 error_setg(errp, "a PCI device with acpi-index = %" PRIu32 2142 " already exist", pci_dev->acpi_index); 2143 return; 2144 } 2145 g_sequence_insert_sorted(used_indexes, 2146 GINT_TO_POINTER(pci_dev->acpi_index), 2147 g_cmp_uint32, NULL); 2148 } 2149 2150 if (pci_dev->romsize != UINT32_MAX && !is_power_of_2(pci_dev->romsize)) { 2151 error_setg(errp, "ROM size %u is not a power of two", pci_dev->romsize); 2152 return; 2153 } 2154 2155 /* initialize cap_present for pci_is_express() and pci_config_size(), 2156 * Note that hybrid PCIs are not set automatically and need to manage 2157 * QEMU_PCI_CAP_EXPRESS manually */ 2158 if (object_class_dynamic_cast(klass, INTERFACE_PCIE_DEVICE) && 2159 !object_class_dynamic_cast(klass, INTERFACE_CONVENTIONAL_PCI_DEVICE)) { 2160 pci_dev->cap_present |= QEMU_PCI_CAP_EXPRESS; 2161 } 2162 2163 if (object_class_dynamic_cast(klass, INTERFACE_CXL_DEVICE)) { 2164 pci_dev->cap_present |= QEMU_PCIE_CAP_CXL; 2165 } 2166 2167 pci_dev = do_pci_register_device(pci_dev, 2168 object_get_typename(OBJECT(qdev)), 2169 pci_dev->devfn, errp); 2170 if (pci_dev == NULL) 2171 return; 2172 2173 if (pc->realize) { 2174 pc->realize(pci_dev, &local_err); 2175 if (local_err) { 2176 error_propagate(errp, local_err); 2177 do_pci_unregister_device(pci_dev); 2178 return; 2179 } 2180 } 2181 2182 /* 2183 * A PCIe Downstream Port that do not have ARI Forwarding enabled must 2184 * associate only Device 0 with the device attached to the bus 2185 * representing the Link from the Port (PCIe base spec rev 4.0 ver 0.3, 2186 * sec 7.3.1). 2187 * With ARI, PCI_SLOT() can return non-zero value as the traditional 2188 * 5-bit Device Number and 3-bit Function Number fields in its associated 2189 * Routing IDs, Requester IDs and Completer IDs are interpreted as a 2190 * single 8-bit Function Number. Hence, ignore ARI capable devices. 2191 */ 2192 if (pci_is_express(pci_dev) && 2193 !pcie_find_capability(pci_dev, PCI_EXT_CAP_ID_ARI) && 2194 pcie_has_upstream_port(pci_dev) && 2195 PCI_SLOT(pci_dev->devfn)) { 2196 warn_report("PCI: slot %d is not valid for %s," 2197 " parent device only allows plugging into slot 0.", 2198 PCI_SLOT(pci_dev->devfn), pci_dev->name); 2199 } 2200 2201 if (pci_dev->failover_pair_id) { 2202 if (!pci_bus_is_express(pci_get_bus(pci_dev))) { 2203 error_setg(errp, "failover primary device must be on " 2204 "PCIExpress bus"); 2205 pci_qdev_unrealize(DEVICE(pci_dev)); 2206 return; 2207 } 2208 class_id = pci_get_word(pci_dev->config + PCI_CLASS_DEVICE); 2209 if (class_id != PCI_CLASS_NETWORK_ETHERNET) { 2210 error_setg(errp, "failover primary device is not an " 2211 "Ethernet device"); 2212 pci_qdev_unrealize(DEVICE(pci_dev)); 2213 return; 2214 } 2215 if ((pci_dev->cap_present & QEMU_PCI_CAP_MULTIFUNCTION) 2216 || (PCI_FUNC(pci_dev->devfn) != 0)) { 2217 error_setg(errp, "failover: primary device must be in its own " 2218 "PCI slot"); 2219 pci_qdev_unrealize(DEVICE(pci_dev)); 2220 return; 2221 } 2222 qdev->allow_unplug_during_migration = true; 2223 } 2224 2225 /* rom loading */ 2226 is_default_rom = false; 2227 if (pci_dev->romfile == NULL && pc->romfile != NULL) { 2228 pci_dev->romfile = g_strdup(pc->romfile); 2229 is_default_rom = true; 2230 } 2231 2232 pci_add_option_rom(pci_dev, is_default_rom, &local_err); 2233 if (local_err) { 2234 error_propagate(errp, local_err); 2235 pci_qdev_unrealize(DEVICE(pci_dev)); 2236 return; 2237 } 2238 2239 pci_set_power(pci_dev, true); 2240 2241 pci_dev->msi_trigger = pci_msi_trigger; 2242 } 2243 2244 static PCIDevice *pci_new_internal(int devfn, bool multifunction, 2245 const char *name) 2246 { 2247 DeviceState *dev; 2248 2249 dev = qdev_new(name); 2250 qdev_prop_set_int32(dev, "addr", devfn); 2251 qdev_prop_set_bit(dev, "multifunction", multifunction); 2252 return PCI_DEVICE(dev); 2253 } 2254 2255 PCIDevice *pci_new_multifunction(int devfn, const char *name) 2256 { 2257 return pci_new_internal(devfn, true, name); 2258 } 2259 2260 PCIDevice *pci_new(int devfn, const char *name) 2261 { 2262 return pci_new_internal(devfn, false, name); 2263 } 2264 2265 bool pci_realize_and_unref(PCIDevice *dev, PCIBus *bus, Error **errp) 2266 { 2267 return qdev_realize_and_unref(&dev->qdev, &bus->qbus, errp); 2268 } 2269 2270 PCIDevice *pci_create_simple_multifunction(PCIBus *bus, int devfn, 2271 const char *name) 2272 { 2273 PCIDevice *dev = pci_new_multifunction(devfn, name); 2274 pci_realize_and_unref(dev, bus, &error_fatal); 2275 return dev; 2276 } 2277 2278 PCIDevice *pci_create_simple(PCIBus *bus, int devfn, const char *name) 2279 { 2280 PCIDevice *dev = pci_new(devfn, name); 2281 pci_realize_and_unref(dev, bus, &error_fatal); 2282 return dev; 2283 } 2284 2285 static uint8_t pci_find_space(PCIDevice *pdev, uint8_t size) 2286 { 2287 int offset = PCI_CONFIG_HEADER_SIZE; 2288 int i; 2289 for (i = PCI_CONFIG_HEADER_SIZE; i < PCI_CONFIG_SPACE_SIZE; ++i) { 2290 if (pdev->used[i]) 2291 offset = i + 1; 2292 else if (i - offset + 1 == size) 2293 return offset; 2294 } 2295 return 0; 2296 } 2297 2298 static uint8_t pci_find_capability_list(PCIDevice *pdev, uint8_t cap_id, 2299 uint8_t *prev_p) 2300 { 2301 uint8_t next, prev; 2302 2303 if (!(pdev->config[PCI_STATUS] & PCI_STATUS_CAP_LIST)) 2304 return 0; 2305 2306 for (prev = PCI_CAPABILITY_LIST; (next = pdev->config[prev]); 2307 prev = next + PCI_CAP_LIST_NEXT) 2308 if (pdev->config[next + PCI_CAP_LIST_ID] == cap_id) 2309 break; 2310 2311 if (prev_p) 2312 *prev_p = prev; 2313 return next; 2314 } 2315 2316 static uint8_t pci_find_capability_at_offset(PCIDevice *pdev, uint8_t offset) 2317 { 2318 uint8_t next, prev, found = 0; 2319 2320 if (!(pdev->used[offset])) { 2321 return 0; 2322 } 2323 2324 assert(pdev->config[PCI_STATUS] & PCI_STATUS_CAP_LIST); 2325 2326 for (prev = PCI_CAPABILITY_LIST; (next = pdev->config[prev]); 2327 prev = next + PCI_CAP_LIST_NEXT) { 2328 if (next <= offset && next > found) { 2329 found = next; 2330 } 2331 } 2332 return found; 2333 } 2334 2335 /* Patch the PCI vendor and device ids in a PCI rom image if necessary. 2336 This is needed for an option rom which is used for more than one device. */ 2337 static void pci_patch_ids(PCIDevice *pdev, uint8_t *ptr, uint32_t size) 2338 { 2339 uint16_t vendor_id; 2340 uint16_t device_id; 2341 uint16_t rom_vendor_id; 2342 uint16_t rom_device_id; 2343 uint16_t rom_magic; 2344 uint16_t pcir_offset; 2345 uint8_t checksum; 2346 2347 /* Words in rom data are little endian (like in PCI configuration), 2348 so they can be read / written with pci_get_word / pci_set_word. */ 2349 2350 /* Only a valid rom will be patched. */ 2351 rom_magic = pci_get_word(ptr); 2352 if (rom_magic != 0xaa55) { 2353 PCI_DPRINTF("Bad ROM magic %04x\n", rom_magic); 2354 return; 2355 } 2356 pcir_offset = pci_get_word(ptr + 0x18); 2357 if (pcir_offset + 8 >= size || memcmp(ptr + pcir_offset, "PCIR", 4)) { 2358 PCI_DPRINTF("Bad PCIR offset 0x%x or signature\n", pcir_offset); 2359 return; 2360 } 2361 2362 vendor_id = pci_get_word(pdev->config + PCI_VENDOR_ID); 2363 device_id = pci_get_word(pdev->config + PCI_DEVICE_ID); 2364 rom_vendor_id = pci_get_word(ptr + pcir_offset + 4); 2365 rom_device_id = pci_get_word(ptr + pcir_offset + 6); 2366 2367 PCI_DPRINTF("%s: ROM id %04x%04x / PCI id %04x%04x\n", pdev->romfile, 2368 vendor_id, device_id, rom_vendor_id, rom_device_id); 2369 2370 checksum = ptr[6]; 2371 2372 if (vendor_id != rom_vendor_id) { 2373 /* Patch vendor id and checksum (at offset 6 for etherboot roms). */ 2374 checksum += (uint8_t)rom_vendor_id + (uint8_t)(rom_vendor_id >> 8); 2375 checksum -= (uint8_t)vendor_id + (uint8_t)(vendor_id >> 8); 2376 PCI_DPRINTF("ROM checksum %02x / %02x\n", ptr[6], checksum); 2377 ptr[6] = checksum; 2378 pci_set_word(ptr + pcir_offset + 4, vendor_id); 2379 } 2380 2381 if (device_id != rom_device_id) { 2382 /* Patch device id and checksum (at offset 6 for etherboot roms). */ 2383 checksum += (uint8_t)rom_device_id + (uint8_t)(rom_device_id >> 8); 2384 checksum -= (uint8_t)device_id + (uint8_t)(device_id >> 8); 2385 PCI_DPRINTF("ROM checksum %02x / %02x\n", ptr[6], checksum); 2386 ptr[6] = checksum; 2387 pci_set_word(ptr + pcir_offset + 6, device_id); 2388 } 2389 } 2390 2391 /* Add an option rom for the device */ 2392 static void pci_add_option_rom(PCIDevice *pdev, bool is_default_rom, 2393 Error **errp) 2394 { 2395 int64_t size = 0; 2396 g_autofree char *path = NULL; 2397 char name[32]; 2398 const VMStateDescription *vmsd; 2399 2400 /* 2401 * In case of incoming migration ROM will come with migration stream, no 2402 * reason to load the file. Neither we want to fail if local ROM file 2403 * mismatches with specified romsize. 2404 */ 2405 bool load_file = !runstate_check(RUN_STATE_INMIGRATE); 2406 2407 if (!pdev->romfile || !strlen(pdev->romfile)) { 2408 return; 2409 } 2410 2411 if (!pdev->rom_bar) { 2412 /* 2413 * Load rom via fw_cfg instead of creating a rom bar, 2414 * for 0.11 compatibility. 2415 */ 2416 int class = pci_get_word(pdev->config + PCI_CLASS_DEVICE); 2417 2418 /* 2419 * Hot-plugged devices can't use the option ROM 2420 * if the rom bar is disabled. 2421 */ 2422 if (DEVICE(pdev)->hotplugged) { 2423 error_setg(errp, "Hot-plugged device without ROM bar" 2424 " can't have an option ROM"); 2425 return; 2426 } 2427 2428 if (class == 0x0300) { 2429 rom_add_vga(pdev->romfile); 2430 } else { 2431 rom_add_option(pdev->romfile, -1); 2432 } 2433 return; 2434 } 2435 2436 if (load_file || pdev->romsize == UINT32_MAX) { 2437 path = qemu_find_file(QEMU_FILE_TYPE_BIOS, pdev->romfile); 2438 if (path == NULL) { 2439 path = g_strdup(pdev->romfile); 2440 } 2441 2442 size = get_image_size(path); 2443 if (size < 0) { 2444 error_setg(errp, "failed to find romfile \"%s\"", pdev->romfile); 2445 return; 2446 } else if (size == 0) { 2447 error_setg(errp, "romfile \"%s\" is empty", pdev->romfile); 2448 return; 2449 } else if (size > 2 * GiB) { 2450 error_setg(errp, 2451 "romfile \"%s\" too large (size cannot exceed 2 GiB)", 2452 pdev->romfile); 2453 return; 2454 } 2455 if (pdev->romsize != UINT_MAX) { 2456 if (size > pdev->romsize) { 2457 error_setg(errp, "romfile \"%s\" (%u bytes) " 2458 "is too large for ROM size %u", 2459 pdev->romfile, (uint32_t)size, pdev->romsize); 2460 return; 2461 } 2462 } else { 2463 pdev->romsize = pow2ceil(size); 2464 } 2465 } 2466 2467 vmsd = qdev_get_vmsd(DEVICE(pdev)); 2468 snprintf(name, sizeof(name), "%s.rom", 2469 vmsd ? vmsd->name : object_get_typename(OBJECT(pdev))); 2470 2471 pdev->has_rom = true; 2472 memory_region_init_rom(&pdev->rom, OBJECT(pdev), name, pdev->romsize, 2473 &error_fatal); 2474 2475 if (load_file) { 2476 void *ptr = memory_region_get_ram_ptr(&pdev->rom); 2477 2478 if (load_image_size(path, ptr, size) < 0) { 2479 error_setg(errp, "failed to load romfile \"%s\"", pdev->romfile); 2480 return; 2481 } 2482 2483 if (is_default_rom) { 2484 /* Only the default rom images will be patched (if needed). */ 2485 pci_patch_ids(pdev, ptr, size); 2486 } 2487 } 2488 2489 pci_register_bar(pdev, PCI_ROM_SLOT, 0, &pdev->rom); 2490 } 2491 2492 static void pci_del_option_rom(PCIDevice *pdev) 2493 { 2494 if (!pdev->has_rom) 2495 return; 2496 2497 vmstate_unregister_ram(&pdev->rom, &pdev->qdev); 2498 pdev->has_rom = false; 2499 } 2500 2501 /* 2502 * On success, pci_add_capability() returns a positive value 2503 * that the offset of the pci capability. 2504 * On failure, it sets an error and returns a negative error 2505 * code. 2506 */ 2507 int pci_add_capability(PCIDevice *pdev, uint8_t cap_id, 2508 uint8_t offset, uint8_t size, 2509 Error **errp) 2510 { 2511 uint8_t *config; 2512 int i, overlapping_cap; 2513 2514 if (!offset) { 2515 offset = pci_find_space(pdev, size); 2516 /* out of PCI config space is programming error */ 2517 assert(offset); 2518 } else { 2519 /* Verify that capabilities don't overlap. Note: device assignment 2520 * depends on this check to verify that the device is not broken. 2521 * Should never trigger for emulated devices, but it's helpful 2522 * for debugging these. */ 2523 for (i = offset; i < offset + size; i++) { 2524 overlapping_cap = pci_find_capability_at_offset(pdev, i); 2525 if (overlapping_cap) { 2526 error_setg(errp, "%s:%02x:%02x.%x " 2527 "Attempt to add PCI capability %x at offset " 2528 "%x overlaps existing capability %x at offset %x", 2529 pci_root_bus_path(pdev), pci_dev_bus_num(pdev), 2530 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn), 2531 cap_id, offset, overlapping_cap, i); 2532 return -EINVAL; 2533 } 2534 } 2535 } 2536 2537 config = pdev->config + offset; 2538 config[PCI_CAP_LIST_ID] = cap_id; 2539 config[PCI_CAP_LIST_NEXT] = pdev->config[PCI_CAPABILITY_LIST]; 2540 pdev->config[PCI_CAPABILITY_LIST] = offset; 2541 pdev->config[PCI_STATUS] |= PCI_STATUS_CAP_LIST; 2542 memset(pdev->used + offset, 0xFF, QEMU_ALIGN_UP(size, 4)); 2543 /* Make capability read-only by default */ 2544 memset(pdev->wmask + offset, 0, size); 2545 /* Check capability by default */ 2546 memset(pdev->cmask + offset, 0xFF, size); 2547 return offset; 2548 } 2549 2550 /* Unlink capability from the pci config space. */ 2551 void pci_del_capability(PCIDevice *pdev, uint8_t cap_id, uint8_t size) 2552 { 2553 uint8_t prev, offset = pci_find_capability_list(pdev, cap_id, &prev); 2554 if (!offset) 2555 return; 2556 pdev->config[prev] = pdev->config[offset + PCI_CAP_LIST_NEXT]; 2557 /* Make capability writable again */ 2558 memset(pdev->wmask + offset, 0xff, size); 2559 memset(pdev->w1cmask + offset, 0, size); 2560 /* Clear cmask as device-specific registers can't be checked */ 2561 memset(pdev->cmask + offset, 0, size); 2562 memset(pdev->used + offset, 0, QEMU_ALIGN_UP(size, 4)); 2563 2564 if (!pdev->config[PCI_CAPABILITY_LIST]) 2565 pdev->config[PCI_STATUS] &= ~PCI_STATUS_CAP_LIST; 2566 } 2567 2568 uint8_t pci_find_capability(PCIDevice *pdev, uint8_t cap_id) 2569 { 2570 return pci_find_capability_list(pdev, cap_id, NULL); 2571 } 2572 2573 static char *pci_dev_fw_name(DeviceState *dev, char *buf, int len) 2574 { 2575 PCIDevice *d = (PCIDevice *)dev; 2576 const char *name = NULL; 2577 const pci_class_desc *desc = pci_class_descriptions; 2578 int class = pci_get_word(d->config + PCI_CLASS_DEVICE); 2579 2580 while (desc->desc && 2581 (class & ~desc->fw_ign_bits) != 2582 (desc->class & ~desc->fw_ign_bits)) { 2583 desc++; 2584 } 2585 2586 if (desc->desc) { 2587 name = desc->fw_name; 2588 } 2589 2590 if (name) { 2591 pstrcpy(buf, len, name); 2592 } else { 2593 snprintf(buf, len, "pci%04x,%04x", 2594 pci_get_word(d->config + PCI_VENDOR_ID), 2595 pci_get_word(d->config + PCI_DEVICE_ID)); 2596 } 2597 2598 return buf; 2599 } 2600 2601 static char *pcibus_get_fw_dev_path(DeviceState *dev) 2602 { 2603 PCIDevice *d = (PCIDevice *)dev; 2604 char name[33]; 2605 int has_func = !!PCI_FUNC(d->devfn); 2606 2607 return g_strdup_printf("%s@%x%s%.*x", 2608 pci_dev_fw_name(dev, name, sizeof(name)), 2609 PCI_SLOT(d->devfn), 2610 has_func ? "," : "", 2611 has_func, 2612 PCI_FUNC(d->devfn)); 2613 } 2614 2615 static char *pcibus_get_dev_path(DeviceState *dev) 2616 { 2617 PCIDevice *d = container_of(dev, PCIDevice, qdev); 2618 PCIDevice *t; 2619 int slot_depth; 2620 /* Path format: Domain:00:Slot.Function:Slot.Function....:Slot.Function. 2621 * 00 is added here to make this format compatible with 2622 * domain:Bus:Slot.Func for systems without nested PCI bridges. 2623 * Slot.Function list specifies the slot and function numbers for all 2624 * devices on the path from root to the specific device. */ 2625 const char *root_bus_path; 2626 int root_bus_len; 2627 char slot[] = ":SS.F"; 2628 int slot_len = sizeof slot - 1 /* For '\0' */; 2629 int path_len; 2630 char *path, *p; 2631 int s; 2632 2633 root_bus_path = pci_root_bus_path(d); 2634 root_bus_len = strlen(root_bus_path); 2635 2636 /* Calculate # of slots on path between device and root. */; 2637 slot_depth = 0; 2638 for (t = d; t; t = pci_get_bus(t)->parent_dev) { 2639 ++slot_depth; 2640 } 2641 2642 path_len = root_bus_len + slot_len * slot_depth; 2643 2644 /* Allocate memory, fill in the terminating null byte. */ 2645 path = g_malloc(path_len + 1 /* For '\0' */); 2646 path[path_len] = '\0'; 2647 2648 memcpy(path, root_bus_path, root_bus_len); 2649 2650 /* Fill in slot numbers. We walk up from device to root, so need to print 2651 * them in the reverse order, last to first. */ 2652 p = path + path_len; 2653 for (t = d; t; t = pci_get_bus(t)->parent_dev) { 2654 p -= slot_len; 2655 s = snprintf(slot, sizeof slot, ":%02x.%x", 2656 PCI_SLOT(t->devfn), PCI_FUNC(t->devfn)); 2657 assert(s == slot_len); 2658 memcpy(p, slot, slot_len); 2659 } 2660 2661 return path; 2662 } 2663 2664 static int pci_qdev_find_recursive(PCIBus *bus, 2665 const char *id, PCIDevice **pdev) 2666 { 2667 DeviceState *qdev = qdev_find_recursive(&bus->qbus, id); 2668 if (!qdev) { 2669 return -ENODEV; 2670 } 2671 2672 /* roughly check if given qdev is pci device */ 2673 if (object_dynamic_cast(OBJECT(qdev), TYPE_PCI_DEVICE)) { 2674 *pdev = PCI_DEVICE(qdev); 2675 return 0; 2676 } 2677 return -EINVAL; 2678 } 2679 2680 int pci_qdev_find_device(const char *id, PCIDevice **pdev) 2681 { 2682 PCIHostState *host_bridge; 2683 int rc = -ENODEV; 2684 2685 QLIST_FOREACH(host_bridge, &pci_host_bridges, next) { 2686 int tmp = pci_qdev_find_recursive(host_bridge->bus, id, pdev); 2687 if (!tmp) { 2688 rc = 0; 2689 break; 2690 } 2691 if (tmp != -ENODEV) { 2692 rc = tmp; 2693 } 2694 } 2695 2696 return rc; 2697 } 2698 2699 MemoryRegion *pci_address_space(PCIDevice *dev) 2700 { 2701 return pci_get_bus(dev)->address_space_mem; 2702 } 2703 2704 MemoryRegion *pci_address_space_io(PCIDevice *dev) 2705 { 2706 return pci_get_bus(dev)->address_space_io; 2707 } 2708 2709 static void pci_device_class_init(ObjectClass *klass, void *data) 2710 { 2711 DeviceClass *k = DEVICE_CLASS(klass); 2712 2713 k->realize = pci_qdev_realize; 2714 k->unrealize = pci_qdev_unrealize; 2715 k->bus_type = TYPE_PCI_BUS; 2716 device_class_set_props(k, pci_props); 2717 object_class_property_set_description( 2718 klass, "x-max-bounce-buffer-size", 2719 "Maximum buffer size allocated for bounce buffers used for mapped " 2720 "access to indirect DMA memory"); 2721 } 2722 2723 static void pci_device_class_base_init(ObjectClass *klass, void *data) 2724 { 2725 if (!object_class_is_abstract(klass)) { 2726 ObjectClass *conventional = 2727 object_class_dynamic_cast(klass, INTERFACE_CONVENTIONAL_PCI_DEVICE); 2728 ObjectClass *pcie = 2729 object_class_dynamic_cast(klass, INTERFACE_PCIE_DEVICE); 2730 ObjectClass *cxl = 2731 object_class_dynamic_cast(klass, INTERFACE_CXL_DEVICE); 2732 assert(conventional || pcie || cxl); 2733 } 2734 } 2735 2736 /* 2737 * Get IOMMU root bus, aliased bus and devfn of a PCI device 2738 * 2739 * IOMMU root bus is needed by all call sites to call into iommu_ops. 2740 * For call sites which don't need aliased BDF, passing NULL to 2741 * aliased_[bus|devfn] is allowed. 2742 * 2743 * @piommu_bus: return root #PCIBus backed by an IOMMU for the PCI device. 2744 * 2745 * @aliased_bus: return aliased #PCIBus of the PCI device, optional. 2746 * 2747 * @aliased_devfn: return aliased devfn of the PCI device, optional. 2748 */ 2749 static void pci_device_get_iommu_bus_devfn(PCIDevice *dev, 2750 PCIBus **piommu_bus, 2751 PCIBus **aliased_bus, 2752 int *aliased_devfn) 2753 { 2754 PCIBus *bus = pci_get_bus(dev); 2755 PCIBus *iommu_bus = bus; 2756 int devfn = dev->devfn; 2757 2758 while (iommu_bus && !iommu_bus->iommu_ops && iommu_bus->parent_dev) { 2759 PCIBus *parent_bus = pci_get_bus(iommu_bus->parent_dev); 2760 2761 /* 2762 * The requester ID of the provided device may be aliased, as seen from 2763 * the IOMMU, due to topology limitations. The IOMMU relies on a 2764 * requester ID to provide a unique AddressSpace for devices, but 2765 * conventional PCI buses pre-date such concepts. Instead, the PCIe- 2766 * to-PCI bridge creates and accepts transactions on behalf of down- 2767 * stream devices. When doing so, all downstream devices are masked 2768 * (aliased) behind a single requester ID. The requester ID used 2769 * depends on the format of the bridge devices. Proper PCIe-to-PCI 2770 * bridges, with a PCIe capability indicating such, follow the 2771 * guidelines of chapter 2.3 of the PCIe-to-PCI/X bridge specification, 2772 * where the bridge uses the seconary bus as the bridge portion of the 2773 * requester ID and devfn of 00.0. For other bridges, typically those 2774 * found on the root complex such as the dmi-to-pci-bridge, we follow 2775 * the convention of typical bare-metal hardware, which uses the 2776 * requester ID of the bridge itself. There are device specific 2777 * exceptions to these rules, but these are the defaults that the 2778 * Linux kernel uses when determining DMA aliases itself and believed 2779 * to be true for the bare metal equivalents of the devices emulated 2780 * in QEMU. 2781 */ 2782 if (!pci_bus_is_express(iommu_bus)) { 2783 PCIDevice *parent = iommu_bus->parent_dev; 2784 2785 if (pci_is_express(parent) && 2786 pcie_cap_get_type(parent) == PCI_EXP_TYPE_PCI_BRIDGE) { 2787 devfn = PCI_DEVFN(0, 0); 2788 bus = iommu_bus; 2789 } else { 2790 devfn = parent->devfn; 2791 bus = parent_bus; 2792 } 2793 } 2794 2795 iommu_bus = parent_bus; 2796 } 2797 2798 assert(0 <= devfn && devfn < PCI_DEVFN_MAX); 2799 assert(iommu_bus); 2800 2801 if (pci_bus_bypass_iommu(bus) || !iommu_bus->iommu_ops) { 2802 iommu_bus = NULL; 2803 } 2804 2805 *piommu_bus = iommu_bus; 2806 2807 if (aliased_bus) { 2808 *aliased_bus = bus; 2809 } 2810 2811 if (aliased_devfn) { 2812 *aliased_devfn = devfn; 2813 } 2814 } 2815 2816 AddressSpace *pci_device_iommu_address_space(PCIDevice *dev) 2817 { 2818 PCIBus *bus; 2819 PCIBus *iommu_bus; 2820 int devfn; 2821 2822 pci_device_get_iommu_bus_devfn(dev, &iommu_bus, &bus, &devfn); 2823 if (iommu_bus) { 2824 return iommu_bus->iommu_ops->get_address_space(bus, 2825 iommu_bus->iommu_opaque, devfn); 2826 } 2827 return &address_space_memory; 2828 } 2829 2830 bool pci_device_set_iommu_device(PCIDevice *dev, HostIOMMUDevice *hiod, 2831 Error **errp) 2832 { 2833 PCIBus *iommu_bus, *aliased_bus; 2834 int aliased_devfn; 2835 2836 /* set_iommu_device requires device's direct BDF instead of aliased BDF */ 2837 pci_device_get_iommu_bus_devfn(dev, &iommu_bus, 2838 &aliased_bus, &aliased_devfn); 2839 if (iommu_bus && iommu_bus->iommu_ops->set_iommu_device) { 2840 hiod->aliased_bus = aliased_bus; 2841 hiod->aliased_devfn = aliased_devfn; 2842 return iommu_bus->iommu_ops->set_iommu_device(pci_get_bus(dev), 2843 iommu_bus->iommu_opaque, 2844 dev->devfn, hiod, errp); 2845 } 2846 return true; 2847 } 2848 2849 void pci_device_unset_iommu_device(PCIDevice *dev) 2850 { 2851 PCIBus *iommu_bus; 2852 2853 pci_device_get_iommu_bus_devfn(dev, &iommu_bus, NULL, NULL); 2854 if (iommu_bus && iommu_bus->iommu_ops->unset_iommu_device) { 2855 return iommu_bus->iommu_ops->unset_iommu_device(pci_get_bus(dev), 2856 iommu_bus->iommu_opaque, 2857 dev->devfn); 2858 } 2859 } 2860 2861 void pci_setup_iommu(PCIBus *bus, const PCIIOMMUOps *ops, void *opaque) 2862 { 2863 /* 2864 * If called, pci_setup_iommu() should provide a minimum set of 2865 * useful callbacks for the bus. 2866 */ 2867 assert(ops); 2868 assert(ops->get_address_space); 2869 2870 bus->iommu_ops = ops; 2871 bus->iommu_opaque = opaque; 2872 } 2873 2874 static void pci_dev_get_w64(PCIBus *b, PCIDevice *dev, void *opaque) 2875 { 2876 Range *range = opaque; 2877 uint16_t cmd = pci_get_word(dev->config + PCI_COMMAND); 2878 int i; 2879 2880 if (!(cmd & PCI_COMMAND_MEMORY)) { 2881 return; 2882 } 2883 2884 if (IS_PCI_BRIDGE(dev)) { 2885 pcibus_t base = pci_bridge_get_base(dev, PCI_BASE_ADDRESS_MEM_PREFETCH); 2886 pcibus_t limit = pci_bridge_get_limit(dev, PCI_BASE_ADDRESS_MEM_PREFETCH); 2887 2888 base = MAX(base, 0x1ULL << 32); 2889 2890 if (limit >= base) { 2891 Range pref_range; 2892 range_set_bounds(&pref_range, base, limit); 2893 range_extend(range, &pref_range); 2894 } 2895 } 2896 for (i = 0; i < PCI_NUM_REGIONS; ++i) { 2897 PCIIORegion *r = &dev->io_regions[i]; 2898 pcibus_t lob, upb; 2899 Range region_range; 2900 2901 if (!r->size || 2902 (r->type & PCI_BASE_ADDRESS_SPACE_IO) || 2903 !(r->type & PCI_BASE_ADDRESS_MEM_TYPE_64)) { 2904 continue; 2905 } 2906 2907 lob = pci_bar_address(dev, i, r->type, r->size); 2908 upb = lob + r->size - 1; 2909 if (lob == PCI_BAR_UNMAPPED) { 2910 continue; 2911 } 2912 2913 lob = MAX(lob, 0x1ULL << 32); 2914 2915 if (upb >= lob) { 2916 range_set_bounds(®ion_range, lob, upb); 2917 range_extend(range, ®ion_range); 2918 } 2919 } 2920 } 2921 2922 void pci_bus_get_w64_range(PCIBus *bus, Range *range) 2923 { 2924 range_make_empty(range); 2925 pci_for_each_device_under_bus(bus, pci_dev_get_w64, range); 2926 } 2927 2928 static bool pcie_has_upstream_port(PCIDevice *dev) 2929 { 2930 PCIDevice *parent_dev = pci_bridge_get_device(pci_get_bus(dev)); 2931 2932 /* Device associated with an upstream port. 2933 * As there are several types of these, it's easier to check the 2934 * parent device: upstream ports are always connected to 2935 * root or downstream ports. 2936 */ 2937 return parent_dev && 2938 pci_is_express(parent_dev) && 2939 parent_dev->exp.exp_cap && 2940 (pcie_cap_get_type(parent_dev) == PCI_EXP_TYPE_ROOT_PORT || 2941 pcie_cap_get_type(parent_dev) == PCI_EXP_TYPE_DOWNSTREAM); 2942 } 2943 2944 PCIDevice *pci_get_function_0(PCIDevice *pci_dev) 2945 { 2946 PCIBus *bus = pci_get_bus(pci_dev); 2947 2948 if(pcie_has_upstream_port(pci_dev)) { 2949 /* With an upstream PCIe port, we only support 1 device at slot 0 */ 2950 return bus->devices[0]; 2951 } else { 2952 /* Other bus types might support multiple devices at slots 0-31 */ 2953 return bus->devices[PCI_DEVFN(PCI_SLOT(pci_dev->devfn), 0)]; 2954 } 2955 } 2956 2957 MSIMessage pci_get_msi_message(PCIDevice *dev, int vector) 2958 { 2959 MSIMessage msg; 2960 if (msix_enabled(dev)) { 2961 msg = msix_get_message(dev, vector); 2962 } else if (msi_enabled(dev)) { 2963 msg = msi_get_message(dev, vector); 2964 } else { 2965 /* Should never happen */ 2966 error_report("%s: unknown interrupt type", __func__); 2967 abort(); 2968 } 2969 return msg; 2970 } 2971 2972 void pci_set_power(PCIDevice *d, bool state) 2973 { 2974 /* 2975 * Don't change the enabled state of VFs when powering on/off the device. 2976 * 2977 * When powering on, VFs must not be enabled immediately but they must 2978 * wait until the guest configures SR-IOV. 2979 * When powering off, their corresponding PFs will be reset and disable 2980 * VFs. 2981 */ 2982 if (!pci_is_vf(d)) { 2983 pci_set_enabled(d, state); 2984 } 2985 } 2986 2987 void pci_set_enabled(PCIDevice *d, bool state) 2988 { 2989 if (d->enabled == state) { 2990 return; 2991 } 2992 2993 d->enabled = state; 2994 pci_update_mappings(d); 2995 memory_region_set_enabled(&d->bus_master_enable_region, 2996 (pci_get_word(d->config + PCI_COMMAND) 2997 & PCI_COMMAND_MASTER) && d->enabled); 2998 if (qdev_is_realized(&d->qdev)) { 2999 pci_device_reset(d); 3000 } 3001 } 3002 3003 static const TypeInfo pci_device_type_info = { 3004 .name = TYPE_PCI_DEVICE, 3005 .parent = TYPE_DEVICE, 3006 .instance_size = sizeof(PCIDevice), 3007 .abstract = true, 3008 .class_size = sizeof(PCIDeviceClass), 3009 .class_init = pci_device_class_init, 3010 .class_base_init = pci_device_class_base_init, 3011 }; 3012 3013 static void pci_register_types(void) 3014 { 3015 type_register_static(&pci_bus_info); 3016 type_register_static(&pcie_bus_info); 3017 type_register_static(&cxl_bus_info); 3018 type_register_static(&conventional_pci_interface_info); 3019 type_register_static(&cxl_interface_info); 3020 type_register_static(&pcie_interface_info); 3021 type_register_static(&pci_device_type_info); 3022 } 3023 3024 type_init(pci_register_types) 3025