1 /* 2 * QEMU PCI bus manager 3 * 4 * Copyright (c) 2004 Fabrice Bellard 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 25 #include "qemu/osdep.h" 26 #include "qemu/datadir.h" 27 #include "qemu/units.h" 28 #include "hw/irq.h" 29 #include "hw/pci/pci.h" 30 #include "hw/pci/pci_bridge.h" 31 #include "hw/pci/pci_bus.h" 32 #include "hw/pci/pci_host.h" 33 #include "hw/qdev-properties.h" 34 #include "hw/qdev-properties-system.h" 35 #include "migration/qemu-file-types.h" 36 #include "migration/vmstate.h" 37 #include "net/net.h" 38 #include "system/numa.h" 39 #include "system/runstate.h" 40 #include "system/system.h" 41 #include "hw/loader.h" 42 #include "qemu/error-report.h" 43 #include "qemu/range.h" 44 #include "trace.h" 45 #include "hw/pci/msi.h" 46 #include "hw/pci/msix.h" 47 #include "hw/hotplug.h" 48 #include "hw/boards.h" 49 #include "hw/nvram/fw_cfg.h" 50 #include "qapi/error.h" 51 #include "qemu/cutils.h" 52 #include "pci-internal.h" 53 54 #include "hw/xen/xen.h" 55 #include "hw/i386/kvm/xen_evtchn.h" 56 57 bool pci_available = true; 58 59 static char *pcibus_get_dev_path(DeviceState *dev); 60 static char *pcibus_get_fw_dev_path(DeviceState *dev); 61 static void pcibus_reset_hold(Object *obj, ResetType type); 62 static bool pcie_has_upstream_port(PCIDevice *dev); 63 64 static void prop_pci_busnr_get(Object *obj, Visitor *v, const char *name, 65 void *opaque, Error **errp) 66 { 67 uint8_t busnr = pci_dev_bus_num(PCI_DEVICE(obj)); 68 69 visit_type_uint8(v, name, &busnr, errp); 70 } 71 72 static const PropertyInfo prop_pci_busnr = { 73 .type = "busnr", 74 .get = prop_pci_busnr_get, 75 }; 76 77 static const Property pci_props[] = { 78 DEFINE_PROP_PCI_DEVFN("addr", PCIDevice, devfn, -1), 79 DEFINE_PROP_STRING("romfile", PCIDevice, romfile), 80 DEFINE_PROP_UINT32("romsize", PCIDevice, romsize, UINT32_MAX), 81 DEFINE_PROP_INT32("rombar", PCIDevice, rom_bar, -1), 82 DEFINE_PROP_BIT("multifunction", PCIDevice, cap_present, 83 QEMU_PCI_CAP_MULTIFUNCTION_BITNR, false), 84 DEFINE_PROP_BIT("x-pcie-lnksta-dllla", PCIDevice, cap_present, 85 QEMU_PCIE_LNKSTA_DLLLA_BITNR, true), 86 DEFINE_PROP_BIT("x-pcie-extcap-init", PCIDevice, cap_present, 87 QEMU_PCIE_EXTCAP_INIT_BITNR, true), 88 DEFINE_PROP_STRING("failover_pair_id", PCIDevice, 89 failover_pair_id), 90 DEFINE_PROP_UINT32("acpi-index", PCIDevice, acpi_index, 0), 91 DEFINE_PROP_BIT("x-pcie-err-unc-mask", PCIDevice, cap_present, 92 QEMU_PCIE_ERR_UNC_MASK_BITNR, true), 93 DEFINE_PROP_BIT("x-pcie-ari-nextfn-1", PCIDevice, cap_present, 94 QEMU_PCIE_ARI_NEXTFN_1_BITNR, false), 95 DEFINE_PROP_SIZE32("x-max-bounce-buffer-size", PCIDevice, 96 max_bounce_buffer_size, DEFAULT_MAX_BOUNCE_BUFFER_SIZE), 97 DEFINE_PROP_STRING("sriov-pf", PCIDevice, sriov_pf), 98 DEFINE_PROP_BIT("x-pcie-ext-tag", PCIDevice, cap_present, 99 QEMU_PCIE_EXT_TAG_BITNR, true), 100 { .name = "busnr", .info = &prop_pci_busnr }, 101 }; 102 103 static const VMStateDescription vmstate_pcibus = { 104 .name = "PCIBUS", 105 .version_id = 1, 106 .minimum_version_id = 1, 107 .fields = (const VMStateField[]) { 108 VMSTATE_INT32_EQUAL(nirq, PCIBus, NULL), 109 VMSTATE_VARRAY_INT32(irq_count, PCIBus, 110 nirq, 0, vmstate_info_int32, 111 int32_t), 112 VMSTATE_END_OF_LIST() 113 } 114 }; 115 116 static gint g_cmp_uint32(gconstpointer a, gconstpointer b, gpointer user_data) 117 { 118 return a - b; 119 } 120 121 static GSequence *pci_acpi_index_list(void) 122 { 123 static GSequence *used_acpi_index_list; 124 125 if (!used_acpi_index_list) { 126 used_acpi_index_list = g_sequence_new(NULL); 127 } 128 return used_acpi_index_list; 129 } 130 131 static void pci_init_bus_master(PCIDevice *pci_dev) 132 { 133 AddressSpace *dma_as = pci_device_iommu_address_space(pci_dev); 134 135 memory_region_init_alias(&pci_dev->bus_master_enable_region, 136 OBJECT(pci_dev), "bus master", 137 dma_as->root, 0, memory_region_size(dma_as->root)); 138 memory_region_set_enabled(&pci_dev->bus_master_enable_region, false); 139 memory_region_add_subregion(&pci_dev->bus_master_container_region, 0, 140 &pci_dev->bus_master_enable_region); 141 } 142 143 static void pcibus_machine_done(Notifier *notifier, void *data) 144 { 145 PCIBus *bus = container_of(notifier, PCIBus, machine_done); 146 int i; 147 148 for (i = 0; i < ARRAY_SIZE(bus->devices); ++i) { 149 if (bus->devices[i]) { 150 pci_init_bus_master(bus->devices[i]); 151 } 152 } 153 } 154 155 static void pci_bus_realize(BusState *qbus, Error **errp) 156 { 157 PCIBus *bus = PCI_BUS(qbus); 158 159 bus->machine_done.notify = pcibus_machine_done; 160 qemu_add_machine_init_done_notifier(&bus->machine_done); 161 162 vmstate_register_any(NULL, &vmstate_pcibus, bus); 163 } 164 165 static void pcie_bus_realize(BusState *qbus, Error **errp) 166 { 167 PCIBus *bus = PCI_BUS(qbus); 168 Error *local_err = NULL; 169 170 pci_bus_realize(qbus, &local_err); 171 if (local_err) { 172 error_propagate(errp, local_err); 173 return; 174 } 175 176 /* 177 * A PCI-E bus can support extended config space if it's the root 178 * bus, or if the bus/bridge above it does as well 179 */ 180 if (pci_bus_is_root(bus)) { 181 bus->flags |= PCI_BUS_EXTENDED_CONFIG_SPACE; 182 } else { 183 PCIBus *parent_bus = pci_get_bus(bus->parent_dev); 184 185 if (pci_bus_allows_extended_config_space(parent_bus)) { 186 bus->flags |= PCI_BUS_EXTENDED_CONFIG_SPACE; 187 } 188 } 189 } 190 191 static void pci_bus_unrealize(BusState *qbus) 192 { 193 PCIBus *bus = PCI_BUS(qbus); 194 195 qemu_remove_machine_init_done_notifier(&bus->machine_done); 196 197 vmstate_unregister(NULL, &vmstate_pcibus, bus); 198 } 199 200 static int pcibus_num(PCIBus *bus) 201 { 202 if (pci_bus_is_root(bus)) { 203 return 0; /* pci host bridge */ 204 } 205 return bus->parent_dev->config[PCI_SECONDARY_BUS]; 206 } 207 208 static uint16_t pcibus_numa_node(PCIBus *bus) 209 { 210 return NUMA_NODE_UNASSIGNED; 211 } 212 213 bool pci_bus_add_fw_cfg_extra_pci_roots(FWCfgState *fw_cfg, 214 PCIBus *bus, 215 Error **errp) 216 { 217 Object *obj; 218 219 if (!bus) { 220 return true; 221 } 222 obj = OBJECT(bus); 223 224 return fw_cfg_add_file_from_generator(fw_cfg, obj->parent, 225 object_get_canonical_path_component(obj), 226 "etc/extra-pci-roots", errp); 227 } 228 229 static GByteArray *pci_bus_fw_cfg_gen_data(Object *obj, Error **errp) 230 { 231 PCIBus *bus = PCI_BUS(obj); 232 GByteArray *byte_array; 233 uint64_t extra_hosts = 0; 234 235 if (!bus) { 236 return NULL; 237 } 238 239 QLIST_FOREACH(bus, &bus->child, sibling) { 240 /* look for expander root buses */ 241 if (pci_bus_is_root(bus)) { 242 extra_hosts++; 243 } 244 } 245 246 if (!extra_hosts) { 247 return NULL; 248 } 249 extra_hosts = cpu_to_le64(extra_hosts); 250 251 byte_array = g_byte_array_new(); 252 g_byte_array_append(byte_array, 253 (const void *)&extra_hosts, sizeof(extra_hosts)); 254 255 return byte_array; 256 } 257 258 static void pci_bus_class_init(ObjectClass *klass, const void *data) 259 { 260 BusClass *k = BUS_CLASS(klass); 261 PCIBusClass *pbc = PCI_BUS_CLASS(klass); 262 ResettableClass *rc = RESETTABLE_CLASS(klass); 263 FWCfgDataGeneratorClass *fwgc = FW_CFG_DATA_GENERATOR_CLASS(klass); 264 265 k->print_dev = pcibus_dev_print; 266 k->get_dev_path = pcibus_get_dev_path; 267 k->get_fw_dev_path = pcibus_get_fw_dev_path; 268 k->realize = pci_bus_realize; 269 k->unrealize = pci_bus_unrealize; 270 271 rc->phases.hold = pcibus_reset_hold; 272 273 pbc->bus_num = pcibus_num; 274 pbc->numa_node = pcibus_numa_node; 275 276 fwgc->get_data = pci_bus_fw_cfg_gen_data; 277 } 278 279 static const TypeInfo pci_bus_info = { 280 .name = TYPE_PCI_BUS, 281 .parent = TYPE_BUS, 282 .instance_size = sizeof(PCIBus), 283 .class_size = sizeof(PCIBusClass), 284 .class_init = pci_bus_class_init, 285 .interfaces = (const InterfaceInfo[]) { 286 { TYPE_FW_CFG_DATA_GENERATOR_INTERFACE }, 287 { } 288 } 289 }; 290 291 static const TypeInfo cxl_interface_info = { 292 .name = INTERFACE_CXL_DEVICE, 293 .parent = TYPE_INTERFACE, 294 }; 295 296 static const TypeInfo pcie_interface_info = { 297 .name = INTERFACE_PCIE_DEVICE, 298 .parent = TYPE_INTERFACE, 299 }; 300 301 static const TypeInfo conventional_pci_interface_info = { 302 .name = INTERFACE_CONVENTIONAL_PCI_DEVICE, 303 .parent = TYPE_INTERFACE, 304 }; 305 306 static void pcie_bus_class_init(ObjectClass *klass, const void *data) 307 { 308 BusClass *k = BUS_CLASS(klass); 309 310 k->realize = pcie_bus_realize; 311 } 312 313 static const TypeInfo pcie_bus_info = { 314 .name = TYPE_PCIE_BUS, 315 .parent = TYPE_PCI_BUS, 316 .class_init = pcie_bus_class_init, 317 }; 318 319 static const TypeInfo cxl_bus_info = { 320 .name = TYPE_CXL_BUS, 321 .parent = TYPE_PCIE_BUS, 322 .class_init = pcie_bus_class_init, 323 }; 324 325 static void pci_update_mappings(PCIDevice *d); 326 static void pci_irq_handler(void *opaque, int irq_num, int level); 327 static void pci_add_option_rom(PCIDevice *pdev, bool is_default_rom, Error **); 328 static void pci_del_option_rom(PCIDevice *pdev); 329 330 static uint16_t pci_default_sub_vendor_id = PCI_SUBVENDOR_ID_REDHAT_QUMRANET; 331 static uint16_t pci_default_sub_device_id = PCI_SUBDEVICE_ID_QEMU; 332 333 PCIHostStateList pci_host_bridges; 334 335 int pci_bar(PCIDevice *d, int reg) 336 { 337 uint8_t type; 338 339 /* PCIe virtual functions do not have their own BARs */ 340 assert(!pci_is_vf(d)); 341 342 if (reg != PCI_ROM_SLOT) 343 return PCI_BASE_ADDRESS_0 + reg * 4; 344 345 type = d->config[PCI_HEADER_TYPE] & ~PCI_HEADER_TYPE_MULTI_FUNCTION; 346 return type == PCI_HEADER_TYPE_BRIDGE ? PCI_ROM_ADDRESS1 : PCI_ROM_ADDRESS; 347 } 348 349 static inline int pci_irq_state(PCIDevice *d, int irq_num) 350 { 351 return (d->irq_state >> irq_num) & 0x1; 352 } 353 354 static inline void pci_set_irq_state(PCIDevice *d, int irq_num, int level) 355 { 356 d->irq_state &= ~(0x1 << irq_num); 357 d->irq_state |= level << irq_num; 358 } 359 360 static void pci_bus_change_irq_level(PCIBus *bus, int irq_num, int change) 361 { 362 assert(irq_num >= 0); 363 assert(irq_num < bus->nirq); 364 bus->irq_count[irq_num] += change; 365 bus->set_irq(bus->irq_opaque, irq_num, bus->irq_count[irq_num] != 0); 366 } 367 368 static void pci_change_irq_level(PCIDevice *pci_dev, int irq_num, int change) 369 { 370 PCIBus *bus; 371 for (;;) { 372 int dev_irq = irq_num; 373 bus = pci_get_bus(pci_dev); 374 assert(bus->map_irq); 375 irq_num = bus->map_irq(pci_dev, irq_num); 376 trace_pci_route_irq(dev_irq, DEVICE(pci_dev)->canonical_path, irq_num, 377 pci_bus_is_root(bus) ? "root-complex" 378 : DEVICE(bus->parent_dev)->canonical_path); 379 if (bus->set_irq) 380 break; 381 pci_dev = bus->parent_dev; 382 } 383 pci_bus_change_irq_level(bus, irq_num, change); 384 } 385 386 int pci_bus_get_irq_level(PCIBus *bus, int irq_num) 387 { 388 assert(irq_num >= 0); 389 assert(irq_num < bus->nirq); 390 return !!bus->irq_count[irq_num]; 391 } 392 393 /* Update interrupt status bit in config space on interrupt 394 * state change. */ 395 static void pci_update_irq_status(PCIDevice *dev) 396 { 397 if (dev->irq_state) { 398 dev->config[PCI_STATUS] |= PCI_STATUS_INTERRUPT; 399 } else { 400 dev->config[PCI_STATUS] &= ~PCI_STATUS_INTERRUPT; 401 } 402 } 403 404 void pci_device_deassert_intx(PCIDevice *dev) 405 { 406 int i; 407 for (i = 0; i < PCI_NUM_PINS; ++i) { 408 pci_irq_handler(dev, i, 0); 409 } 410 } 411 412 static void pci_msi_trigger(PCIDevice *dev, MSIMessage msg) 413 { 414 MemTxAttrs attrs = {}; 415 416 /* 417 * Xen uses the high bits of the address to contain some of the bits 418 * of the PIRQ#. Therefore we can't just send the write cycle and 419 * trust that it's caught by the APIC at 0xfee00000 because the 420 * target of the write might be e.g. 0x0x1000fee46000 for PIRQ#4166. 421 * So we intercept the delivery here instead of in kvm_send_msi(). 422 */ 423 if (xen_mode == XEN_EMULATE && 424 xen_evtchn_deliver_pirq_msi(msg.address, msg.data)) { 425 return; 426 } 427 attrs.requester_id = pci_requester_id(dev); 428 address_space_stl_le(&dev->bus_master_as, msg.address, msg.data, 429 attrs, NULL); 430 } 431 432 /* 433 * Register and track a PM capability. If wmask is also enabled for the power 434 * state field of the pmcsr register, guest writes may change the device PM 435 * state. BAR access is only enabled while the device is in the D0 state. 436 * Return the capability offset or negative error code. 437 */ 438 int pci_pm_init(PCIDevice *d, uint8_t offset, Error **errp) 439 { 440 int cap = pci_add_capability(d, PCI_CAP_ID_PM, offset, PCI_PM_SIZEOF, errp); 441 442 if (cap < 0) { 443 return cap; 444 } 445 446 d->pm_cap = cap; 447 d->cap_present |= QEMU_PCI_CAP_PM; 448 449 return cap; 450 } 451 452 static uint8_t pci_pm_state(PCIDevice *d) 453 { 454 uint16_t pmcsr; 455 456 if (!(d->cap_present & QEMU_PCI_CAP_PM)) { 457 return 0; 458 } 459 460 pmcsr = pci_get_word(d->config + d->pm_cap + PCI_PM_CTRL); 461 462 return pmcsr & PCI_PM_CTRL_STATE_MASK; 463 } 464 465 /* 466 * Update the PM capability state based on the new value stored in config 467 * space respective to the old, pre-write state provided. If the new value 468 * is rejected (unsupported or invalid transition) restore the old value. 469 * Return the resulting PM state. 470 */ 471 static uint8_t pci_pm_update(PCIDevice *d, uint32_t addr, int l, uint8_t old) 472 { 473 uint16_t pmc; 474 uint8_t new; 475 476 if (!(d->cap_present & QEMU_PCI_CAP_PM) || 477 !range_covers_byte(addr, l, d->pm_cap + PCI_PM_CTRL)) { 478 return old; 479 } 480 481 new = pci_pm_state(d); 482 if (new == old) { 483 return old; 484 } 485 486 pmc = pci_get_word(d->config + d->pm_cap + PCI_PM_PMC); 487 488 /* 489 * Transitions to D1 & D2 are only allowed if supported. Devices may 490 * only transition to higher D-states or to D0. 491 */ 492 if ((!(pmc & PCI_PM_CAP_D1) && new == 1) || 493 (!(pmc & PCI_PM_CAP_D2) && new == 2) || 494 (old && new && new < old)) { 495 pci_word_test_and_clear_mask(d->config + d->pm_cap + PCI_PM_CTRL, 496 PCI_PM_CTRL_STATE_MASK); 497 pci_word_test_and_set_mask(d->config + d->pm_cap + PCI_PM_CTRL, 498 old); 499 trace_pci_pm_bad_transition(d->name, pci_dev_bus_num(d), 500 PCI_SLOT(d->devfn), PCI_FUNC(d->devfn), 501 old, new); 502 return old; 503 } 504 505 trace_pci_pm_transition(d->name, pci_dev_bus_num(d), PCI_SLOT(d->devfn), 506 PCI_FUNC(d->devfn), old, new); 507 return new; 508 } 509 510 static void pci_reset_regions(PCIDevice *dev) 511 { 512 int r; 513 if (pci_is_vf(dev)) { 514 return; 515 } 516 517 for (r = 0; r < PCI_NUM_REGIONS; ++r) { 518 PCIIORegion *region = &dev->io_regions[r]; 519 if (!region->size) { 520 continue; 521 } 522 523 if (!(region->type & PCI_BASE_ADDRESS_SPACE_IO) && 524 region->type & PCI_BASE_ADDRESS_MEM_TYPE_64) { 525 pci_set_quad(dev->config + pci_bar(dev, r), region->type); 526 } else { 527 pci_set_long(dev->config + pci_bar(dev, r), region->type); 528 } 529 } 530 } 531 532 static void pci_do_device_reset(PCIDevice *dev) 533 { 534 pci_device_deassert_intx(dev); 535 assert(dev->irq_state == 0); 536 537 /* Clear all writable bits */ 538 pci_word_test_and_clear_mask(dev->config + PCI_COMMAND, 539 pci_get_word(dev->wmask + PCI_COMMAND) | 540 pci_get_word(dev->w1cmask + PCI_COMMAND)); 541 pci_word_test_and_clear_mask(dev->config + PCI_STATUS, 542 pci_get_word(dev->wmask + PCI_STATUS) | 543 pci_get_word(dev->w1cmask + PCI_STATUS)); 544 /* Some devices make bits of PCI_INTERRUPT_LINE read only */ 545 pci_byte_test_and_clear_mask(dev->config + PCI_INTERRUPT_LINE, 546 pci_get_word(dev->wmask + PCI_INTERRUPT_LINE) | 547 pci_get_word(dev->w1cmask + PCI_INTERRUPT_LINE)); 548 dev->config[PCI_CACHE_LINE_SIZE] = 0x0; 549 /* Default PM state is D0 */ 550 if (dev->cap_present & QEMU_PCI_CAP_PM) { 551 pci_word_test_and_clear_mask(dev->config + dev->pm_cap + PCI_PM_CTRL, 552 PCI_PM_CTRL_STATE_MASK); 553 } 554 pci_reset_regions(dev); 555 pci_update_mappings(dev); 556 557 msi_reset(dev); 558 msix_reset(dev); 559 pcie_sriov_pf_reset(dev); 560 } 561 562 /* 563 * This function is called on #RST and FLR. 564 * FLR if PCI_EXP_DEVCTL_BCR_FLR is set 565 */ 566 void pci_device_reset(PCIDevice *dev) 567 { 568 device_cold_reset(&dev->qdev); 569 pci_do_device_reset(dev); 570 } 571 572 /* 573 * Trigger pci bus reset under a given bus. 574 * Called via bus_cold_reset on RST# assert, after the devices 575 * have been reset device_cold_reset-ed already. 576 */ 577 static void pcibus_reset_hold(Object *obj, ResetType type) 578 { 579 PCIBus *bus = PCI_BUS(obj); 580 int i; 581 582 for (i = 0; i < ARRAY_SIZE(bus->devices); ++i) { 583 if (bus->devices[i]) { 584 pci_do_device_reset(bus->devices[i]); 585 } 586 } 587 588 for (i = 0; i < bus->nirq; i++) { 589 assert(bus->irq_count[i] == 0); 590 } 591 } 592 593 static void pci_host_bus_register(DeviceState *host) 594 { 595 PCIHostState *host_bridge = PCI_HOST_BRIDGE(host); 596 597 QLIST_INSERT_HEAD(&pci_host_bridges, host_bridge, next); 598 } 599 600 static void pci_host_bus_unregister(DeviceState *host) 601 { 602 PCIHostState *host_bridge = PCI_HOST_BRIDGE(host); 603 604 QLIST_REMOVE(host_bridge, next); 605 } 606 607 PCIBus *pci_device_root_bus(const PCIDevice *d) 608 { 609 PCIBus *bus = pci_get_bus(d); 610 611 while (!pci_bus_is_root(bus)) { 612 d = bus->parent_dev; 613 assert(d != NULL); 614 615 bus = pci_get_bus(d); 616 } 617 618 return bus; 619 } 620 621 const char *pci_root_bus_path(PCIDevice *dev) 622 { 623 PCIBus *rootbus = pci_device_root_bus(dev); 624 PCIHostState *host_bridge = PCI_HOST_BRIDGE(rootbus->qbus.parent); 625 PCIHostBridgeClass *hc = PCI_HOST_BRIDGE_GET_CLASS(host_bridge); 626 627 assert(host_bridge->bus == rootbus); 628 629 if (hc->root_bus_path) { 630 return (*hc->root_bus_path)(host_bridge, rootbus); 631 } 632 633 return rootbus->qbus.name; 634 } 635 636 bool pci_bus_bypass_iommu(PCIBus *bus) 637 { 638 PCIBus *rootbus = bus; 639 PCIHostState *host_bridge; 640 641 if (!pci_bus_is_root(bus)) { 642 rootbus = pci_device_root_bus(bus->parent_dev); 643 } 644 645 host_bridge = PCI_HOST_BRIDGE(rootbus->qbus.parent); 646 647 assert(host_bridge->bus == rootbus); 648 649 return host_bridge->bypass_iommu; 650 } 651 652 static void pci_root_bus_internal_init(PCIBus *bus, DeviceState *parent, 653 MemoryRegion *mem, MemoryRegion *io, 654 uint8_t devfn_min) 655 { 656 assert(PCI_FUNC(devfn_min) == 0); 657 bus->devfn_min = devfn_min; 658 bus->slot_reserved_mask = 0x0; 659 bus->address_space_mem = mem; 660 bus->address_space_io = io; 661 bus->flags |= PCI_BUS_IS_ROOT; 662 663 /* host bridge */ 664 QLIST_INIT(&bus->child); 665 666 pci_host_bus_register(parent); 667 } 668 669 static void pci_bus_uninit(PCIBus *bus) 670 { 671 pci_host_bus_unregister(BUS(bus)->parent); 672 } 673 674 bool pci_bus_is_express(const PCIBus *bus) 675 { 676 return object_dynamic_cast(OBJECT(bus), TYPE_PCIE_BUS); 677 } 678 679 void pci_root_bus_init(PCIBus *bus, size_t bus_size, DeviceState *parent, 680 const char *name, 681 MemoryRegion *mem, MemoryRegion *io, 682 uint8_t devfn_min, const char *typename) 683 { 684 qbus_init(bus, bus_size, typename, parent, name); 685 pci_root_bus_internal_init(bus, parent, mem, io, devfn_min); 686 } 687 688 PCIBus *pci_root_bus_new(DeviceState *parent, const char *name, 689 MemoryRegion *mem, MemoryRegion *io, 690 uint8_t devfn_min, const char *typename) 691 { 692 PCIBus *bus; 693 694 bus = PCI_BUS(qbus_new(typename, parent, name)); 695 pci_root_bus_internal_init(bus, parent, mem, io, devfn_min); 696 return bus; 697 } 698 699 void pci_root_bus_cleanup(PCIBus *bus) 700 { 701 pci_bus_uninit(bus); 702 /* the caller of the unplug hotplug handler will delete this device */ 703 qbus_unrealize(BUS(bus)); 704 } 705 706 void pci_bus_irqs(PCIBus *bus, pci_set_irq_fn set_irq, 707 void *irq_opaque, int nirq) 708 { 709 bus->set_irq = set_irq; 710 bus->irq_opaque = irq_opaque; 711 bus->nirq = nirq; 712 g_free(bus->irq_count); 713 bus->irq_count = g_malloc0(nirq * sizeof(bus->irq_count[0])); 714 } 715 716 void pci_bus_map_irqs(PCIBus *bus, pci_map_irq_fn map_irq) 717 { 718 bus->map_irq = map_irq; 719 } 720 721 void pci_bus_irqs_cleanup(PCIBus *bus) 722 { 723 bus->set_irq = NULL; 724 bus->map_irq = NULL; 725 bus->irq_opaque = NULL; 726 bus->nirq = 0; 727 g_free(bus->irq_count); 728 bus->irq_count = NULL; 729 } 730 731 PCIBus *pci_register_root_bus(DeviceState *parent, const char *name, 732 pci_set_irq_fn set_irq, pci_map_irq_fn map_irq, 733 void *irq_opaque, 734 MemoryRegion *mem, MemoryRegion *io, 735 uint8_t devfn_min, int nirq, 736 const char *typename) 737 { 738 PCIBus *bus; 739 740 bus = pci_root_bus_new(parent, name, mem, io, devfn_min, typename); 741 pci_bus_irqs(bus, set_irq, irq_opaque, nirq); 742 pci_bus_map_irqs(bus, map_irq); 743 return bus; 744 } 745 746 void pci_unregister_root_bus(PCIBus *bus) 747 { 748 pci_bus_irqs_cleanup(bus); 749 pci_root_bus_cleanup(bus); 750 } 751 752 int pci_bus_num(PCIBus *s) 753 { 754 return PCI_BUS_GET_CLASS(s)->bus_num(s); 755 } 756 757 /* Returns the min and max bus numbers of a PCI bus hierarchy */ 758 void pci_bus_range(PCIBus *bus, int *min_bus, int *max_bus) 759 { 760 int i; 761 *min_bus = *max_bus = pci_bus_num(bus); 762 763 for (i = 0; i < ARRAY_SIZE(bus->devices); ++i) { 764 PCIDevice *dev = bus->devices[i]; 765 766 if (dev && IS_PCI_BRIDGE(dev)) { 767 *min_bus = MIN(*min_bus, dev->config[PCI_SECONDARY_BUS]); 768 *max_bus = MAX(*max_bus, dev->config[PCI_SUBORDINATE_BUS]); 769 } 770 } 771 } 772 773 int pci_bus_numa_node(PCIBus *bus) 774 { 775 return PCI_BUS_GET_CLASS(bus)->numa_node(bus); 776 } 777 778 static int get_pci_config_device(QEMUFile *f, void *pv, size_t size, 779 const VMStateField *field) 780 { 781 PCIDevice *s = container_of(pv, PCIDevice, config); 782 uint8_t *config; 783 int i; 784 785 assert(size == pci_config_size(s)); 786 config = g_malloc(size); 787 788 qemu_get_buffer(f, config, size); 789 for (i = 0; i < size; ++i) { 790 if ((config[i] ^ s->config[i]) & 791 s->cmask[i] & ~s->wmask[i] & ~s->w1cmask[i]) { 792 error_report("%s: Bad config data: i=0x%x read: %x device: %x " 793 "cmask: %x wmask: %x w1cmask:%x", __func__, 794 i, config[i], s->config[i], 795 s->cmask[i], s->wmask[i], s->w1cmask[i]); 796 g_free(config); 797 return -EINVAL; 798 } 799 } 800 memcpy(s->config, config, size); 801 802 pci_update_mappings(s); 803 if (IS_PCI_BRIDGE(s)) { 804 pci_bridge_update_mappings(PCI_BRIDGE(s)); 805 } 806 807 memory_region_set_enabled(&s->bus_master_enable_region, 808 pci_get_word(s->config + PCI_COMMAND) 809 & PCI_COMMAND_MASTER); 810 811 g_free(config); 812 return 0; 813 } 814 815 /* just put buffer */ 816 static int put_pci_config_device(QEMUFile *f, void *pv, size_t size, 817 const VMStateField *field, JSONWriter *vmdesc) 818 { 819 const uint8_t **v = pv; 820 assert(size == pci_config_size(container_of(pv, PCIDevice, config))); 821 qemu_put_buffer(f, *v, size); 822 823 return 0; 824 } 825 826 static const VMStateInfo vmstate_info_pci_config = { 827 .name = "pci config", 828 .get = get_pci_config_device, 829 .put = put_pci_config_device, 830 }; 831 832 static int get_pci_irq_state(QEMUFile *f, void *pv, size_t size, 833 const VMStateField *field) 834 { 835 PCIDevice *s = container_of(pv, PCIDevice, irq_state); 836 uint32_t irq_state[PCI_NUM_PINS]; 837 int i; 838 for (i = 0; i < PCI_NUM_PINS; ++i) { 839 irq_state[i] = qemu_get_be32(f); 840 if (irq_state[i] != 0x1 && irq_state[i] != 0) { 841 fprintf(stderr, "irq state %d: must be 0 or 1.\n", 842 irq_state[i]); 843 return -EINVAL; 844 } 845 } 846 847 for (i = 0; i < PCI_NUM_PINS; ++i) { 848 pci_set_irq_state(s, i, irq_state[i]); 849 } 850 851 return 0; 852 } 853 854 static int put_pci_irq_state(QEMUFile *f, void *pv, size_t size, 855 const VMStateField *field, JSONWriter *vmdesc) 856 { 857 int i; 858 PCIDevice *s = container_of(pv, PCIDevice, irq_state); 859 860 for (i = 0; i < PCI_NUM_PINS; ++i) { 861 qemu_put_be32(f, pci_irq_state(s, i)); 862 } 863 864 return 0; 865 } 866 867 static const VMStateInfo vmstate_info_pci_irq_state = { 868 .name = "pci irq state", 869 .get = get_pci_irq_state, 870 .put = put_pci_irq_state, 871 }; 872 873 static bool migrate_is_pcie(void *opaque, int version_id) 874 { 875 return pci_is_express((PCIDevice *)opaque); 876 } 877 878 static bool migrate_is_not_pcie(void *opaque, int version_id) 879 { 880 return !pci_is_express((PCIDevice *)opaque); 881 } 882 883 static int pci_post_load(void *opaque, int version_id) 884 { 885 pcie_sriov_pf_post_load(opaque); 886 return 0; 887 } 888 889 const VMStateDescription vmstate_pci_device = { 890 .name = "PCIDevice", 891 .version_id = 2, 892 .minimum_version_id = 1, 893 .post_load = pci_post_load, 894 .fields = (const VMStateField[]) { 895 VMSTATE_INT32_POSITIVE_LE(version_id, PCIDevice), 896 VMSTATE_BUFFER_UNSAFE_INFO_TEST(config, PCIDevice, 897 migrate_is_not_pcie, 898 0, vmstate_info_pci_config, 899 PCI_CONFIG_SPACE_SIZE), 900 VMSTATE_BUFFER_UNSAFE_INFO_TEST(config, PCIDevice, 901 migrate_is_pcie, 902 0, vmstate_info_pci_config, 903 PCIE_CONFIG_SPACE_SIZE), 904 VMSTATE_BUFFER_UNSAFE_INFO(irq_state, PCIDevice, 2, 905 vmstate_info_pci_irq_state, 906 PCI_NUM_PINS * sizeof(int32_t)), 907 VMSTATE_END_OF_LIST() 908 } 909 }; 910 911 912 void pci_device_save(PCIDevice *s, QEMUFile *f) 913 { 914 /* Clear interrupt status bit: it is implicit 915 * in irq_state which we are saving. 916 * This makes us compatible with old devices 917 * which never set or clear this bit. */ 918 s->config[PCI_STATUS] &= ~PCI_STATUS_INTERRUPT; 919 vmstate_save_state(f, &vmstate_pci_device, s, NULL); 920 /* Restore the interrupt status bit. */ 921 pci_update_irq_status(s); 922 } 923 924 int pci_device_load(PCIDevice *s, QEMUFile *f) 925 { 926 int ret; 927 ret = vmstate_load_state(f, &vmstate_pci_device, s, s->version_id); 928 /* Restore the interrupt status bit. */ 929 pci_update_irq_status(s); 930 return ret; 931 } 932 933 static void pci_set_default_subsystem_id(PCIDevice *pci_dev) 934 { 935 pci_set_word(pci_dev->config + PCI_SUBSYSTEM_VENDOR_ID, 936 pci_default_sub_vendor_id); 937 pci_set_word(pci_dev->config + PCI_SUBSYSTEM_ID, 938 pci_default_sub_device_id); 939 } 940 941 /* 942 * Parse [[<domain>:]<bus>:]<slot>, return -1 on error if funcp == NULL 943 * [[<domain>:]<bus>:]<slot>.<func>, return -1 on error 944 */ 945 static int pci_parse_devaddr(const char *addr, int *domp, int *busp, 946 unsigned int *slotp, unsigned int *funcp) 947 { 948 const char *p; 949 char *e; 950 unsigned long val; 951 unsigned long dom = 0, bus = 0; 952 unsigned int slot = 0; 953 unsigned int func = 0; 954 955 p = addr; 956 val = strtoul(p, &e, 16); 957 if (e == p) 958 return -1; 959 if (*e == ':') { 960 bus = val; 961 p = e + 1; 962 val = strtoul(p, &e, 16); 963 if (e == p) 964 return -1; 965 if (*e == ':') { 966 dom = bus; 967 bus = val; 968 p = e + 1; 969 val = strtoul(p, &e, 16); 970 if (e == p) 971 return -1; 972 } 973 } 974 975 slot = val; 976 977 if (funcp != NULL) { 978 if (*e != '.') 979 return -1; 980 981 p = e + 1; 982 val = strtoul(p, &e, 16); 983 if (e == p) 984 return -1; 985 986 func = val; 987 } 988 989 /* if funcp == NULL func is 0 */ 990 if (dom > 0xffff || bus > 0xff || slot > 0x1f || func > 7) 991 return -1; 992 993 if (*e) 994 return -1; 995 996 *domp = dom; 997 *busp = bus; 998 *slotp = slot; 999 if (funcp != NULL) 1000 *funcp = func; 1001 return 0; 1002 } 1003 1004 static void pci_init_cmask(PCIDevice *dev) 1005 { 1006 pci_set_word(dev->cmask + PCI_VENDOR_ID, 0xffff); 1007 pci_set_word(dev->cmask + PCI_DEVICE_ID, 0xffff); 1008 dev->cmask[PCI_STATUS] = PCI_STATUS_CAP_LIST; 1009 dev->cmask[PCI_REVISION_ID] = 0xff; 1010 dev->cmask[PCI_CLASS_PROG] = 0xff; 1011 pci_set_word(dev->cmask + PCI_CLASS_DEVICE, 0xffff); 1012 dev->cmask[PCI_HEADER_TYPE] = 0xff; 1013 dev->cmask[PCI_CAPABILITY_LIST] = 0xff; 1014 } 1015 1016 static void pci_init_wmask(PCIDevice *dev) 1017 { 1018 int config_size = pci_config_size(dev); 1019 1020 dev->wmask[PCI_CACHE_LINE_SIZE] = 0xff; 1021 dev->wmask[PCI_INTERRUPT_LINE] = 0xff; 1022 pci_set_word(dev->wmask + PCI_COMMAND, 1023 PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | 1024 PCI_COMMAND_INTX_DISABLE); 1025 pci_word_test_and_set_mask(dev->wmask + PCI_COMMAND, PCI_COMMAND_SERR); 1026 1027 memset(dev->wmask + PCI_CONFIG_HEADER_SIZE, 0xff, 1028 config_size - PCI_CONFIG_HEADER_SIZE); 1029 } 1030 1031 static void pci_init_w1cmask(PCIDevice *dev) 1032 { 1033 /* 1034 * Note: It's okay to set w1cmask even for readonly bits as 1035 * long as their value is hardwired to 0. 1036 */ 1037 pci_set_word(dev->w1cmask + PCI_STATUS, 1038 PCI_STATUS_PARITY | PCI_STATUS_SIG_TARGET_ABORT | 1039 PCI_STATUS_REC_TARGET_ABORT | PCI_STATUS_REC_MASTER_ABORT | 1040 PCI_STATUS_SIG_SYSTEM_ERROR | PCI_STATUS_DETECTED_PARITY); 1041 } 1042 1043 static void pci_init_mask_bridge(PCIDevice *d) 1044 { 1045 /* PCI_PRIMARY_BUS, PCI_SECONDARY_BUS, PCI_SUBORDINATE_BUS and 1046 PCI_SEC_LATENCY_TIMER */ 1047 memset(d->wmask + PCI_PRIMARY_BUS, 0xff, 4); 1048 1049 /* base and limit */ 1050 d->wmask[PCI_IO_BASE] = PCI_IO_RANGE_MASK & 0xff; 1051 d->wmask[PCI_IO_LIMIT] = PCI_IO_RANGE_MASK & 0xff; 1052 pci_set_word(d->wmask + PCI_MEMORY_BASE, 1053 PCI_MEMORY_RANGE_MASK & 0xffff); 1054 pci_set_word(d->wmask + PCI_MEMORY_LIMIT, 1055 PCI_MEMORY_RANGE_MASK & 0xffff); 1056 pci_set_word(d->wmask + PCI_PREF_MEMORY_BASE, 1057 PCI_PREF_RANGE_MASK & 0xffff); 1058 pci_set_word(d->wmask + PCI_PREF_MEMORY_LIMIT, 1059 PCI_PREF_RANGE_MASK & 0xffff); 1060 1061 /* PCI_PREF_BASE_UPPER32 and PCI_PREF_LIMIT_UPPER32 */ 1062 memset(d->wmask + PCI_PREF_BASE_UPPER32, 0xff, 8); 1063 1064 /* Supported memory and i/o types */ 1065 d->config[PCI_IO_BASE] |= PCI_IO_RANGE_TYPE_16; 1066 d->config[PCI_IO_LIMIT] |= PCI_IO_RANGE_TYPE_16; 1067 pci_word_test_and_set_mask(d->config + PCI_PREF_MEMORY_BASE, 1068 PCI_PREF_RANGE_TYPE_64); 1069 pci_word_test_and_set_mask(d->config + PCI_PREF_MEMORY_LIMIT, 1070 PCI_PREF_RANGE_TYPE_64); 1071 1072 /* 1073 * TODO: Bridges default to 10-bit VGA decoding but we currently only 1074 * implement 16-bit decoding (no alias support). 1075 */ 1076 pci_set_word(d->wmask + PCI_BRIDGE_CONTROL, 1077 PCI_BRIDGE_CTL_PARITY | 1078 PCI_BRIDGE_CTL_SERR | 1079 PCI_BRIDGE_CTL_ISA | 1080 PCI_BRIDGE_CTL_VGA | 1081 PCI_BRIDGE_CTL_VGA_16BIT | 1082 PCI_BRIDGE_CTL_MASTER_ABORT | 1083 PCI_BRIDGE_CTL_BUS_RESET | 1084 PCI_BRIDGE_CTL_FAST_BACK | 1085 PCI_BRIDGE_CTL_DISCARD | 1086 PCI_BRIDGE_CTL_SEC_DISCARD | 1087 PCI_BRIDGE_CTL_DISCARD_SERR); 1088 /* Below does not do anything as we never set this bit, put here for 1089 * completeness. */ 1090 pci_set_word(d->w1cmask + PCI_BRIDGE_CONTROL, 1091 PCI_BRIDGE_CTL_DISCARD_STATUS); 1092 d->cmask[PCI_IO_BASE] |= PCI_IO_RANGE_TYPE_MASK; 1093 d->cmask[PCI_IO_LIMIT] |= PCI_IO_RANGE_TYPE_MASK; 1094 pci_word_test_and_set_mask(d->cmask + PCI_PREF_MEMORY_BASE, 1095 PCI_PREF_RANGE_TYPE_MASK); 1096 pci_word_test_and_set_mask(d->cmask + PCI_PREF_MEMORY_LIMIT, 1097 PCI_PREF_RANGE_TYPE_MASK); 1098 } 1099 1100 static void pci_init_multifunction(PCIBus *bus, PCIDevice *dev, Error **errp) 1101 { 1102 uint8_t slot = PCI_SLOT(dev->devfn); 1103 uint8_t func; 1104 1105 if (dev->cap_present & QEMU_PCI_CAP_MULTIFUNCTION) { 1106 dev->config[PCI_HEADER_TYPE] |= PCI_HEADER_TYPE_MULTI_FUNCTION; 1107 } 1108 1109 /* SR/IOV is not handled here. */ 1110 if (pci_is_vf(dev)) { 1111 return; 1112 } 1113 1114 /* 1115 * multifunction bit is interpreted in two ways as follows. 1116 * - all functions must set the bit to 1. 1117 * Example: Intel X53 1118 * - function 0 must set the bit, but the rest function (> 0) 1119 * is allowed to leave the bit to 0. 1120 * Example: PIIX3(also in qemu), PIIX4(also in qemu), ICH10, 1121 * 1122 * So OS (at least Linux) checks the bit of only function 0, 1123 * and doesn't see the bit of function > 0. 1124 * 1125 * The below check allows both interpretation. 1126 */ 1127 if (PCI_FUNC(dev->devfn)) { 1128 PCIDevice *f0 = bus->devices[PCI_DEVFN(slot, 0)]; 1129 if (f0 && !(f0->cap_present & QEMU_PCI_CAP_MULTIFUNCTION)) { 1130 /* function 0 should set multifunction bit */ 1131 error_setg(errp, "PCI: single function device can't be populated " 1132 "in function %x.%x", slot, PCI_FUNC(dev->devfn)); 1133 return; 1134 } 1135 return; 1136 } 1137 1138 if (dev->cap_present & QEMU_PCI_CAP_MULTIFUNCTION) { 1139 return; 1140 } 1141 /* function 0 indicates single function, so function > 0 must be NULL */ 1142 for (func = 1; func < PCI_FUNC_MAX; ++func) { 1143 PCIDevice *device = bus->devices[PCI_DEVFN(slot, func)]; 1144 if (device && !pci_is_vf(device)) { 1145 error_setg(errp, "PCI: %x.0 indicates single function, " 1146 "but %x.%x is already populated.", 1147 slot, slot, func); 1148 return; 1149 } 1150 } 1151 } 1152 1153 static void pci_config_alloc(PCIDevice *pci_dev) 1154 { 1155 int config_size = pci_config_size(pci_dev); 1156 1157 pci_dev->config = g_malloc0(config_size); 1158 pci_dev->cmask = g_malloc0(config_size); 1159 pci_dev->wmask = g_malloc0(config_size); 1160 pci_dev->w1cmask = g_malloc0(config_size); 1161 pci_dev->used = g_malloc0(config_size); 1162 } 1163 1164 static void pci_config_free(PCIDevice *pci_dev) 1165 { 1166 g_free(pci_dev->config); 1167 g_free(pci_dev->cmask); 1168 g_free(pci_dev->wmask); 1169 g_free(pci_dev->w1cmask); 1170 g_free(pci_dev->used); 1171 } 1172 1173 static void do_pci_unregister_device(PCIDevice *pci_dev) 1174 { 1175 pci_get_bus(pci_dev)->devices[pci_dev->devfn] = NULL; 1176 pci_config_free(pci_dev); 1177 1178 if (xen_mode == XEN_EMULATE) { 1179 xen_evtchn_remove_pci_device(pci_dev); 1180 } 1181 if (memory_region_is_mapped(&pci_dev->bus_master_enable_region)) { 1182 memory_region_del_subregion(&pci_dev->bus_master_container_region, 1183 &pci_dev->bus_master_enable_region); 1184 } 1185 address_space_destroy(&pci_dev->bus_master_as); 1186 } 1187 1188 /* Extract PCIReqIDCache into BDF format */ 1189 static uint16_t pci_req_id_cache_extract(PCIReqIDCache *cache) 1190 { 1191 uint8_t bus_n; 1192 uint16_t result; 1193 1194 switch (cache->type) { 1195 case PCI_REQ_ID_BDF: 1196 result = pci_get_bdf(cache->dev); 1197 break; 1198 case PCI_REQ_ID_SECONDARY_BUS: 1199 bus_n = pci_dev_bus_num(cache->dev); 1200 result = PCI_BUILD_BDF(bus_n, 0); 1201 break; 1202 default: 1203 error_report("Invalid PCI requester ID cache type: %d", 1204 cache->type); 1205 exit(1); 1206 break; 1207 } 1208 1209 return result; 1210 } 1211 1212 /* Parse bridges up to the root complex and return requester ID 1213 * cache for specific device. For full PCIe topology, the cache 1214 * result would be exactly the same as getting BDF of the device. 1215 * However, several tricks are required when system mixed up with 1216 * legacy PCI devices and PCIe-to-PCI bridges. 1217 * 1218 * Here we cache the proxy device (and type) not requester ID since 1219 * bus number might change from time to time. 1220 */ 1221 static PCIReqIDCache pci_req_id_cache_get(PCIDevice *dev) 1222 { 1223 PCIDevice *parent; 1224 PCIReqIDCache cache = { 1225 .dev = dev, 1226 .type = PCI_REQ_ID_BDF, 1227 }; 1228 1229 while (!pci_bus_is_root(pci_get_bus(dev))) { 1230 /* We are under PCI/PCIe bridges */ 1231 parent = pci_get_bus(dev)->parent_dev; 1232 if (pci_is_express(parent)) { 1233 if (pcie_cap_get_type(parent) == PCI_EXP_TYPE_PCI_BRIDGE) { 1234 /* When we pass through PCIe-to-PCI/PCIX bridges, we 1235 * override the requester ID using secondary bus 1236 * number of parent bridge with zeroed devfn 1237 * (pcie-to-pci bridge spec chap 2.3). */ 1238 cache.type = PCI_REQ_ID_SECONDARY_BUS; 1239 cache.dev = dev; 1240 } 1241 } else { 1242 /* Legacy PCI, override requester ID with the bridge's 1243 * BDF upstream. When the root complex connects to 1244 * legacy PCI devices (including buses), it can only 1245 * obtain requester ID info from directly attached 1246 * devices. If devices are attached under bridges, only 1247 * the requester ID of the bridge that is directly 1248 * attached to the root complex can be recognized. */ 1249 cache.type = PCI_REQ_ID_BDF; 1250 cache.dev = parent; 1251 } 1252 dev = parent; 1253 } 1254 1255 return cache; 1256 } 1257 1258 uint16_t pci_requester_id(PCIDevice *dev) 1259 { 1260 return pci_req_id_cache_extract(&dev->requester_id_cache); 1261 } 1262 1263 static bool pci_bus_devfn_available(PCIBus *bus, int devfn) 1264 { 1265 return !(bus->devices[devfn]); 1266 } 1267 1268 static bool pci_bus_devfn_reserved(PCIBus *bus, int devfn) 1269 { 1270 return bus->slot_reserved_mask & (1UL << PCI_SLOT(devfn)); 1271 } 1272 1273 uint32_t pci_bus_get_slot_reserved_mask(PCIBus *bus) 1274 { 1275 return bus->slot_reserved_mask; 1276 } 1277 1278 void pci_bus_set_slot_reserved_mask(PCIBus *bus, uint32_t mask) 1279 { 1280 bus->slot_reserved_mask |= mask; 1281 } 1282 1283 void pci_bus_clear_slot_reserved_mask(PCIBus *bus, uint32_t mask) 1284 { 1285 bus->slot_reserved_mask &= ~mask; 1286 } 1287 1288 /* -1 for devfn means auto assign */ 1289 static PCIDevice *do_pci_register_device(PCIDevice *pci_dev, 1290 const char *name, int devfn, 1291 Error **errp) 1292 { 1293 PCIDeviceClass *pc = PCI_DEVICE_GET_CLASS(pci_dev); 1294 PCIConfigReadFunc *config_read = pc->config_read; 1295 PCIConfigWriteFunc *config_write = pc->config_write; 1296 Error *local_err = NULL; 1297 DeviceState *dev = DEVICE(pci_dev); 1298 PCIBus *bus = pci_get_bus(pci_dev); 1299 bool is_bridge = IS_PCI_BRIDGE(pci_dev); 1300 1301 /* Only pci bridges can be attached to extra PCI root buses */ 1302 if (pci_bus_is_root(bus) && bus->parent_dev && !is_bridge) { 1303 error_setg(errp, 1304 "PCI: Only PCI/PCIe bridges can be plugged into %s", 1305 bus->parent_dev->name); 1306 return NULL; 1307 } 1308 1309 if (devfn < 0) { 1310 for(devfn = bus->devfn_min ; devfn < ARRAY_SIZE(bus->devices); 1311 devfn += PCI_FUNC_MAX) { 1312 if (pci_bus_devfn_available(bus, devfn) && 1313 !pci_bus_devfn_reserved(bus, devfn)) { 1314 goto found; 1315 } 1316 } 1317 error_setg(errp, "PCI: no slot/function available for %s, all in use " 1318 "or reserved", name); 1319 return NULL; 1320 found: ; 1321 } else if (pci_bus_devfn_reserved(bus, devfn)) { 1322 error_setg(errp, "PCI: slot %d function %d not available for %s," 1323 " reserved", 1324 PCI_SLOT(devfn), PCI_FUNC(devfn), name); 1325 return NULL; 1326 } else if (!pci_bus_devfn_available(bus, devfn)) { 1327 error_setg(errp, "PCI: slot %d function %d not available for %s," 1328 " in use by %s,id=%s", 1329 PCI_SLOT(devfn), PCI_FUNC(devfn), name, 1330 bus->devices[devfn]->name, bus->devices[devfn]->qdev.id); 1331 return NULL; 1332 } 1333 1334 /* 1335 * Populating function 0 triggers a scan from the guest that 1336 * exposes other non-zero functions. Hence we need to ensure that 1337 * function 0 wasn't added yet. 1338 */ 1339 if (dev->hotplugged && !pci_is_vf(pci_dev) && 1340 pci_get_function_0(pci_dev)) { 1341 error_setg(errp, "PCI: slot %d function 0 already occupied by %s," 1342 " new func %s cannot be exposed to guest.", 1343 PCI_SLOT(pci_get_function_0(pci_dev)->devfn), 1344 pci_get_function_0(pci_dev)->name, 1345 name); 1346 1347 return NULL; 1348 } 1349 1350 pci_dev->devfn = devfn; 1351 pci_dev->requester_id_cache = pci_req_id_cache_get(pci_dev); 1352 pstrcpy(pci_dev->name, sizeof(pci_dev->name), name); 1353 1354 memory_region_init(&pci_dev->bus_master_container_region, OBJECT(pci_dev), 1355 "bus master container", UINT64_MAX); 1356 address_space_init(&pci_dev->bus_master_as, 1357 &pci_dev->bus_master_container_region, pci_dev->name); 1358 pci_dev->bus_master_as.max_bounce_buffer_size = 1359 pci_dev->max_bounce_buffer_size; 1360 1361 if (phase_check(PHASE_MACHINE_READY)) { 1362 pci_init_bus_master(pci_dev); 1363 } 1364 pci_dev->irq_state = 0; 1365 pci_config_alloc(pci_dev); 1366 1367 pci_config_set_vendor_id(pci_dev->config, pc->vendor_id); 1368 pci_config_set_device_id(pci_dev->config, pc->device_id); 1369 pci_config_set_revision(pci_dev->config, pc->revision); 1370 pci_config_set_class(pci_dev->config, pc->class_id); 1371 1372 if (!is_bridge) { 1373 if (pc->subsystem_vendor_id || pc->subsystem_id) { 1374 pci_set_word(pci_dev->config + PCI_SUBSYSTEM_VENDOR_ID, 1375 pc->subsystem_vendor_id); 1376 pci_set_word(pci_dev->config + PCI_SUBSYSTEM_ID, 1377 pc->subsystem_id); 1378 } else { 1379 pci_set_default_subsystem_id(pci_dev); 1380 } 1381 } else { 1382 /* subsystem_vendor_id/subsystem_id are only for header type 0 */ 1383 assert(!pc->subsystem_vendor_id); 1384 assert(!pc->subsystem_id); 1385 } 1386 pci_init_cmask(pci_dev); 1387 pci_init_wmask(pci_dev); 1388 pci_init_w1cmask(pci_dev); 1389 if (is_bridge) { 1390 pci_init_mask_bridge(pci_dev); 1391 } 1392 pci_init_multifunction(bus, pci_dev, &local_err); 1393 if (local_err) { 1394 error_propagate(errp, local_err); 1395 do_pci_unregister_device(pci_dev); 1396 return NULL; 1397 } 1398 1399 if (!config_read) 1400 config_read = pci_default_read_config; 1401 if (!config_write) 1402 config_write = pci_default_write_config; 1403 pci_dev->config_read = config_read; 1404 pci_dev->config_write = config_write; 1405 bus->devices[devfn] = pci_dev; 1406 pci_dev->version_id = 2; /* Current pci device vmstate version */ 1407 return pci_dev; 1408 } 1409 1410 static void pci_unregister_io_regions(PCIDevice *pci_dev) 1411 { 1412 PCIIORegion *r; 1413 int i; 1414 1415 for(i = 0; i < PCI_NUM_REGIONS; i++) { 1416 r = &pci_dev->io_regions[i]; 1417 if (!r->size || r->addr == PCI_BAR_UNMAPPED) 1418 continue; 1419 memory_region_del_subregion(r->address_space, r->memory); 1420 } 1421 1422 pci_unregister_vga(pci_dev); 1423 } 1424 1425 static void pci_qdev_unrealize(DeviceState *dev) 1426 { 1427 PCIDevice *pci_dev = PCI_DEVICE(dev); 1428 PCIDeviceClass *pc = PCI_DEVICE_GET_CLASS(pci_dev); 1429 1430 pci_unregister_io_regions(pci_dev); 1431 pci_del_option_rom(pci_dev); 1432 pcie_sriov_unregister_device(pci_dev); 1433 1434 if (pc->exit) { 1435 pc->exit(pci_dev); 1436 } 1437 1438 pci_device_deassert_intx(pci_dev); 1439 do_pci_unregister_device(pci_dev); 1440 1441 pci_dev->msi_trigger = NULL; 1442 1443 /* 1444 * clean up acpi-index so it could reused by another device 1445 */ 1446 if (pci_dev->acpi_index) { 1447 GSequence *used_indexes = pci_acpi_index_list(); 1448 1449 g_sequence_remove(g_sequence_lookup(used_indexes, 1450 GINT_TO_POINTER(pci_dev->acpi_index), 1451 g_cmp_uint32, NULL)); 1452 } 1453 } 1454 1455 void pci_register_bar(PCIDevice *pci_dev, int region_num, 1456 uint8_t type, MemoryRegion *memory) 1457 { 1458 PCIIORegion *r; 1459 uint32_t addr; /* offset in pci config space */ 1460 uint64_t wmask; 1461 pcibus_t size = memory_region_size(memory); 1462 uint8_t hdr_type; 1463 1464 assert(region_num >= 0); 1465 assert(region_num < PCI_NUM_REGIONS); 1466 assert(is_power_of_2(size)); 1467 1468 /* A PCI bridge device (with Type 1 header) may only have at most 2 BARs */ 1469 hdr_type = 1470 pci_dev->config[PCI_HEADER_TYPE] & ~PCI_HEADER_TYPE_MULTI_FUNCTION; 1471 assert(hdr_type != PCI_HEADER_TYPE_BRIDGE || region_num < 2); 1472 1473 r = &pci_dev->io_regions[region_num]; 1474 assert(!r->size); 1475 r->size = size; 1476 r->type = type; 1477 r->memory = memory; 1478 r->address_space = type & PCI_BASE_ADDRESS_SPACE_IO 1479 ? pci_get_bus(pci_dev)->address_space_io 1480 : pci_get_bus(pci_dev)->address_space_mem; 1481 1482 if (pci_is_vf(pci_dev)) { 1483 PCIDevice *pf = pci_dev->exp.sriov_vf.pf; 1484 assert(!pf || type == pf->exp.sriov_pf.vf_bar_type[region_num]); 1485 1486 r->addr = pci_bar_address(pci_dev, region_num, r->type, r->size); 1487 if (r->addr != PCI_BAR_UNMAPPED) { 1488 memory_region_add_subregion_overlap(r->address_space, 1489 r->addr, r->memory, 1); 1490 } 1491 } else { 1492 r->addr = PCI_BAR_UNMAPPED; 1493 1494 wmask = ~(size - 1); 1495 if (region_num == PCI_ROM_SLOT) { 1496 /* ROM enable bit is writable */ 1497 wmask |= PCI_ROM_ADDRESS_ENABLE; 1498 } 1499 1500 addr = pci_bar(pci_dev, region_num); 1501 pci_set_long(pci_dev->config + addr, type); 1502 1503 if (!(r->type & PCI_BASE_ADDRESS_SPACE_IO) && 1504 r->type & PCI_BASE_ADDRESS_MEM_TYPE_64) { 1505 pci_set_quad(pci_dev->wmask + addr, wmask); 1506 pci_set_quad(pci_dev->cmask + addr, ~0ULL); 1507 } else { 1508 pci_set_long(pci_dev->wmask + addr, wmask & 0xffffffff); 1509 pci_set_long(pci_dev->cmask + addr, 0xffffffff); 1510 } 1511 } 1512 } 1513 1514 static void pci_update_vga(PCIDevice *pci_dev) 1515 { 1516 uint16_t cmd; 1517 1518 if (!pci_dev->has_vga) { 1519 return; 1520 } 1521 1522 cmd = pci_get_word(pci_dev->config + PCI_COMMAND); 1523 1524 memory_region_set_enabled(pci_dev->vga_regions[QEMU_PCI_VGA_MEM], 1525 cmd & PCI_COMMAND_MEMORY); 1526 memory_region_set_enabled(pci_dev->vga_regions[QEMU_PCI_VGA_IO_LO], 1527 cmd & PCI_COMMAND_IO); 1528 memory_region_set_enabled(pci_dev->vga_regions[QEMU_PCI_VGA_IO_HI], 1529 cmd & PCI_COMMAND_IO); 1530 } 1531 1532 void pci_register_vga(PCIDevice *pci_dev, MemoryRegion *mem, 1533 MemoryRegion *io_lo, MemoryRegion *io_hi) 1534 { 1535 PCIBus *bus = pci_get_bus(pci_dev); 1536 1537 assert(!pci_dev->has_vga); 1538 1539 assert(memory_region_size(mem) == QEMU_PCI_VGA_MEM_SIZE); 1540 pci_dev->vga_regions[QEMU_PCI_VGA_MEM] = mem; 1541 memory_region_add_subregion_overlap(bus->address_space_mem, 1542 QEMU_PCI_VGA_MEM_BASE, mem, 1); 1543 1544 assert(memory_region_size(io_lo) == QEMU_PCI_VGA_IO_LO_SIZE); 1545 pci_dev->vga_regions[QEMU_PCI_VGA_IO_LO] = io_lo; 1546 memory_region_add_subregion_overlap(bus->address_space_io, 1547 QEMU_PCI_VGA_IO_LO_BASE, io_lo, 1); 1548 1549 assert(memory_region_size(io_hi) == QEMU_PCI_VGA_IO_HI_SIZE); 1550 pci_dev->vga_regions[QEMU_PCI_VGA_IO_HI] = io_hi; 1551 memory_region_add_subregion_overlap(bus->address_space_io, 1552 QEMU_PCI_VGA_IO_HI_BASE, io_hi, 1); 1553 pci_dev->has_vga = true; 1554 1555 pci_update_vga(pci_dev); 1556 } 1557 1558 void pci_unregister_vga(PCIDevice *pci_dev) 1559 { 1560 PCIBus *bus = pci_get_bus(pci_dev); 1561 1562 if (!pci_dev->has_vga) { 1563 return; 1564 } 1565 1566 memory_region_del_subregion(bus->address_space_mem, 1567 pci_dev->vga_regions[QEMU_PCI_VGA_MEM]); 1568 memory_region_del_subregion(bus->address_space_io, 1569 pci_dev->vga_regions[QEMU_PCI_VGA_IO_LO]); 1570 memory_region_del_subregion(bus->address_space_io, 1571 pci_dev->vga_regions[QEMU_PCI_VGA_IO_HI]); 1572 pci_dev->has_vga = false; 1573 } 1574 1575 pcibus_t pci_get_bar_addr(PCIDevice *pci_dev, int region_num) 1576 { 1577 return pci_dev->io_regions[region_num].addr; 1578 } 1579 1580 static pcibus_t pci_config_get_bar_addr(PCIDevice *d, int reg, 1581 uint8_t type, pcibus_t size) 1582 { 1583 pcibus_t new_addr; 1584 if (!pci_is_vf(d)) { 1585 int bar = pci_bar(d, reg); 1586 if (type & PCI_BASE_ADDRESS_MEM_TYPE_64) { 1587 new_addr = pci_get_quad(d->config + bar); 1588 } else { 1589 new_addr = pci_get_long(d->config + bar); 1590 } 1591 } else { 1592 PCIDevice *pf = d->exp.sriov_vf.pf; 1593 uint16_t sriov_cap = pf->exp.sriov_cap; 1594 int bar = sriov_cap + PCI_SRIOV_BAR + reg * 4; 1595 uint16_t vf_offset = 1596 pci_get_word(pf->config + sriov_cap + PCI_SRIOV_VF_OFFSET); 1597 uint16_t vf_stride = 1598 pci_get_word(pf->config + sriov_cap + PCI_SRIOV_VF_STRIDE); 1599 uint32_t vf_num = d->devfn - (pf->devfn + vf_offset); 1600 1601 if (vf_num) { 1602 vf_num /= vf_stride; 1603 } 1604 1605 if (type & PCI_BASE_ADDRESS_MEM_TYPE_64) { 1606 new_addr = pci_get_quad(pf->config + bar); 1607 } else { 1608 new_addr = pci_get_long(pf->config + bar); 1609 } 1610 new_addr += vf_num * size; 1611 } 1612 /* The ROM slot has a specific enable bit, keep it intact */ 1613 if (reg != PCI_ROM_SLOT) { 1614 new_addr &= ~(size - 1); 1615 } 1616 return new_addr; 1617 } 1618 1619 pcibus_t pci_bar_address(PCIDevice *d, 1620 int reg, uint8_t type, pcibus_t size) 1621 { 1622 pcibus_t new_addr, last_addr; 1623 uint16_t cmd = pci_get_word(d->config + PCI_COMMAND); 1624 MachineClass *mc = MACHINE_GET_CLASS(qdev_get_machine()); 1625 bool allow_0_address = mc->pci_allow_0_address; 1626 1627 if (type & PCI_BASE_ADDRESS_SPACE_IO) { 1628 if (!(cmd & PCI_COMMAND_IO)) { 1629 return PCI_BAR_UNMAPPED; 1630 } 1631 new_addr = pci_config_get_bar_addr(d, reg, type, size); 1632 last_addr = new_addr + size - 1; 1633 /* Check if 32 bit BAR wraps around explicitly. 1634 * TODO: make priorities correct and remove this work around. 1635 */ 1636 if (last_addr <= new_addr || last_addr >= UINT32_MAX || 1637 (!allow_0_address && new_addr == 0)) { 1638 return PCI_BAR_UNMAPPED; 1639 } 1640 return new_addr; 1641 } 1642 1643 if (!(cmd & PCI_COMMAND_MEMORY)) { 1644 return PCI_BAR_UNMAPPED; 1645 } 1646 new_addr = pci_config_get_bar_addr(d, reg, type, size); 1647 /* the ROM slot has a specific enable bit */ 1648 if (reg == PCI_ROM_SLOT && !(new_addr & PCI_ROM_ADDRESS_ENABLE)) { 1649 return PCI_BAR_UNMAPPED; 1650 } 1651 new_addr &= ~(size - 1); 1652 last_addr = new_addr + size - 1; 1653 /* NOTE: we do not support wrapping */ 1654 /* XXX: as we cannot support really dynamic 1655 mappings, we handle specific values as invalid 1656 mappings. */ 1657 if (last_addr <= new_addr || last_addr == PCI_BAR_UNMAPPED || 1658 (!allow_0_address && new_addr == 0)) { 1659 return PCI_BAR_UNMAPPED; 1660 } 1661 1662 /* Now pcibus_t is 64bit. 1663 * Check if 32 bit BAR wraps around explicitly. 1664 * Without this, PC ide doesn't work well. 1665 * TODO: remove this work around. 1666 */ 1667 if (!(type & PCI_BASE_ADDRESS_MEM_TYPE_64) && last_addr >= UINT32_MAX) { 1668 return PCI_BAR_UNMAPPED; 1669 } 1670 1671 /* 1672 * OS is allowed to set BAR beyond its addressable 1673 * bits. For example, 32 bit OS can set 64bit bar 1674 * to >4G. Check it. TODO: we might need to support 1675 * it in the future for e.g. PAE. 1676 */ 1677 if (last_addr >= HWADDR_MAX) { 1678 return PCI_BAR_UNMAPPED; 1679 } 1680 1681 return new_addr; 1682 } 1683 1684 static void pci_update_mappings(PCIDevice *d) 1685 { 1686 PCIIORegion *r; 1687 int i; 1688 pcibus_t new_addr; 1689 1690 for(i = 0; i < PCI_NUM_REGIONS; i++) { 1691 r = &d->io_regions[i]; 1692 1693 /* this region isn't registered */ 1694 if (!r->size) 1695 continue; 1696 1697 new_addr = pci_bar_address(d, i, r->type, r->size); 1698 if (!d->enabled || pci_pm_state(d)) { 1699 new_addr = PCI_BAR_UNMAPPED; 1700 } 1701 1702 /* This bar isn't changed */ 1703 if (new_addr == r->addr) 1704 continue; 1705 1706 /* now do the real mapping */ 1707 if (r->addr != PCI_BAR_UNMAPPED) { 1708 trace_pci_update_mappings_del(d->name, pci_dev_bus_num(d), 1709 PCI_SLOT(d->devfn), 1710 PCI_FUNC(d->devfn), 1711 i, r->addr, r->size); 1712 memory_region_del_subregion(r->address_space, r->memory); 1713 } 1714 r->addr = new_addr; 1715 if (r->addr != PCI_BAR_UNMAPPED) { 1716 trace_pci_update_mappings_add(d->name, pci_dev_bus_num(d), 1717 PCI_SLOT(d->devfn), 1718 PCI_FUNC(d->devfn), 1719 i, r->addr, r->size); 1720 memory_region_add_subregion_overlap(r->address_space, 1721 r->addr, r->memory, 1); 1722 } 1723 } 1724 1725 pci_update_vga(d); 1726 } 1727 1728 static inline int pci_irq_disabled(PCIDevice *d) 1729 { 1730 return pci_get_word(d->config + PCI_COMMAND) & PCI_COMMAND_INTX_DISABLE; 1731 } 1732 1733 /* Called after interrupt disabled field update in config space, 1734 * assert/deassert interrupts if necessary. 1735 * Gets original interrupt disable bit value (before update). */ 1736 static void pci_update_irq_disabled(PCIDevice *d, int was_irq_disabled) 1737 { 1738 int i, disabled = pci_irq_disabled(d); 1739 if (disabled == was_irq_disabled) 1740 return; 1741 for (i = 0; i < PCI_NUM_PINS; ++i) { 1742 int state = pci_irq_state(d, i); 1743 pci_change_irq_level(d, i, disabled ? -state : state); 1744 } 1745 } 1746 1747 uint32_t pci_default_read_config(PCIDevice *d, 1748 uint32_t address, int len) 1749 { 1750 uint32_t val = 0; 1751 1752 assert(address + len <= pci_config_size(d)); 1753 1754 if (pci_is_express_downstream_port(d) && 1755 ranges_overlap(address, len, d->exp.exp_cap + PCI_EXP_LNKSTA, 2)) { 1756 pcie_sync_bridge_lnk(d); 1757 } 1758 memcpy(&val, d->config + address, len); 1759 return le32_to_cpu(val); 1760 } 1761 1762 void pci_default_write_config(PCIDevice *d, uint32_t addr, uint32_t val_in, int l) 1763 { 1764 uint8_t new_pm_state, old_pm_state = pci_pm_state(d); 1765 int i, was_irq_disabled = pci_irq_disabled(d); 1766 uint32_t val = val_in; 1767 1768 assert(addr + l <= pci_config_size(d)); 1769 1770 for (i = 0; i < l; val >>= 8, ++i) { 1771 uint8_t wmask = d->wmask[addr + i]; 1772 uint8_t w1cmask = d->w1cmask[addr + i]; 1773 assert(!(wmask & w1cmask)); 1774 d->config[addr + i] = (d->config[addr + i] & ~wmask) | (val & wmask); 1775 d->config[addr + i] &= ~(val & w1cmask); /* W1C: Write 1 to Clear */ 1776 } 1777 1778 new_pm_state = pci_pm_update(d, addr, l, old_pm_state); 1779 1780 if (ranges_overlap(addr, l, PCI_BASE_ADDRESS_0, 24) || 1781 ranges_overlap(addr, l, PCI_ROM_ADDRESS, 4) || 1782 ranges_overlap(addr, l, PCI_ROM_ADDRESS1, 4) || 1783 range_covers_byte(addr, l, PCI_COMMAND) || 1784 !!new_pm_state != !!old_pm_state) { 1785 pci_update_mappings(d); 1786 } 1787 1788 if (ranges_overlap(addr, l, PCI_COMMAND, 2)) { 1789 pci_update_irq_disabled(d, was_irq_disabled); 1790 memory_region_set_enabled(&d->bus_master_enable_region, 1791 (pci_get_word(d->config + PCI_COMMAND) 1792 & PCI_COMMAND_MASTER) && d->enabled); 1793 } 1794 1795 msi_write_config(d, addr, val_in, l); 1796 msix_write_config(d, addr, val_in, l); 1797 pcie_sriov_config_write(d, addr, val_in, l); 1798 } 1799 1800 /***********************************************************/ 1801 /* generic PCI irq support */ 1802 1803 /* 0 <= irq_num <= 3. level must be 0 or 1 */ 1804 static void pci_irq_handler(void *opaque, int irq_num, int level) 1805 { 1806 PCIDevice *pci_dev = opaque; 1807 int change; 1808 1809 assert(0 <= irq_num && irq_num < PCI_NUM_PINS); 1810 assert(level == 0 || level == 1); 1811 change = level - pci_irq_state(pci_dev, irq_num); 1812 if (!change) 1813 return; 1814 1815 pci_set_irq_state(pci_dev, irq_num, level); 1816 pci_update_irq_status(pci_dev); 1817 if (pci_irq_disabled(pci_dev)) 1818 return; 1819 pci_change_irq_level(pci_dev, irq_num, change); 1820 } 1821 1822 qemu_irq pci_allocate_irq(PCIDevice *pci_dev) 1823 { 1824 int intx = pci_intx(pci_dev); 1825 assert(0 <= intx && intx < PCI_NUM_PINS); 1826 1827 return qemu_allocate_irq(pci_irq_handler, pci_dev, intx); 1828 } 1829 1830 void pci_set_irq(PCIDevice *pci_dev, int level) 1831 { 1832 int intx = pci_intx(pci_dev); 1833 pci_irq_handler(pci_dev, intx, level); 1834 } 1835 1836 /* Special hooks used by device assignment */ 1837 void pci_bus_set_route_irq_fn(PCIBus *bus, pci_route_irq_fn route_intx_to_irq) 1838 { 1839 assert(pci_bus_is_root(bus)); 1840 bus->route_intx_to_irq = route_intx_to_irq; 1841 } 1842 1843 PCIINTxRoute pci_device_route_intx_to_irq(PCIDevice *dev, int pin) 1844 { 1845 PCIBus *bus; 1846 1847 do { 1848 int dev_irq = pin; 1849 bus = pci_get_bus(dev); 1850 pin = bus->map_irq(dev, pin); 1851 trace_pci_route_irq(dev_irq, DEVICE(dev)->canonical_path, pin, 1852 pci_bus_is_root(bus) ? "root-complex" 1853 : DEVICE(bus->parent_dev)->canonical_path); 1854 dev = bus->parent_dev; 1855 } while (dev); 1856 1857 if (!bus->route_intx_to_irq) { 1858 error_report("PCI: Bug - unimplemented PCI INTx routing (%s)", 1859 object_get_typename(OBJECT(bus->qbus.parent))); 1860 return (PCIINTxRoute) { PCI_INTX_DISABLED, -1 }; 1861 } 1862 1863 return bus->route_intx_to_irq(bus->irq_opaque, pin); 1864 } 1865 1866 bool pci_intx_route_changed(PCIINTxRoute *old, PCIINTxRoute *new) 1867 { 1868 return old->mode != new->mode || old->irq != new->irq; 1869 } 1870 1871 void pci_bus_fire_intx_routing_notifier(PCIBus *bus) 1872 { 1873 PCIDevice *dev; 1874 PCIBus *sec; 1875 int i; 1876 1877 for (i = 0; i < ARRAY_SIZE(bus->devices); ++i) { 1878 dev = bus->devices[i]; 1879 if (dev && dev->intx_routing_notifier) { 1880 dev->intx_routing_notifier(dev); 1881 } 1882 } 1883 1884 QLIST_FOREACH(sec, &bus->child, sibling) { 1885 pci_bus_fire_intx_routing_notifier(sec); 1886 } 1887 } 1888 1889 void pci_device_set_intx_routing_notifier(PCIDevice *dev, 1890 PCIINTxRoutingNotifier notifier) 1891 { 1892 dev->intx_routing_notifier = notifier; 1893 } 1894 1895 /* 1896 * PCI-to-PCI bridge specification 1897 * 9.1: Interrupt routing. Table 9-1 1898 * 1899 * the PCI Express Base Specification, Revision 2.1 1900 * 2.2.8.1: INTx interrupt signaling - Rules 1901 * the Implementation Note 1902 * Table 2-20 1903 */ 1904 /* 1905 * 0 <= pin <= 3 0 = INTA, 1 = INTB, 2 = INTC, 3 = INTD 1906 * 0-origin unlike PCI interrupt pin register. 1907 */ 1908 int pci_swizzle_map_irq_fn(PCIDevice *pci_dev, int pin) 1909 { 1910 return pci_swizzle(PCI_SLOT(pci_dev->devfn), pin); 1911 } 1912 1913 /***********************************************************/ 1914 /* monitor info on PCI */ 1915 1916 static const pci_class_desc pci_class_descriptions[] = 1917 { 1918 { 0x0001, "VGA controller", "display"}, 1919 { 0x0100, "SCSI controller", "scsi"}, 1920 { 0x0101, "IDE controller", "ide"}, 1921 { 0x0102, "Floppy controller", "fdc"}, 1922 { 0x0103, "IPI controller", "ipi"}, 1923 { 0x0104, "RAID controller", "raid"}, 1924 { 0x0106, "SATA controller"}, 1925 { 0x0107, "SAS controller"}, 1926 { 0x0180, "Storage controller"}, 1927 { 0x0200, "Ethernet controller", "ethernet"}, 1928 { 0x0201, "Token Ring controller", "token-ring"}, 1929 { 0x0202, "FDDI controller", "fddi"}, 1930 { 0x0203, "ATM controller", "atm"}, 1931 { 0x0280, "Network controller"}, 1932 { 0x0300, "VGA controller", "display", 0x00ff}, 1933 { 0x0301, "XGA controller"}, 1934 { 0x0302, "3D controller"}, 1935 { 0x0380, "Display controller"}, 1936 { 0x0400, "Video controller", "video"}, 1937 { 0x0401, "Audio controller", "sound"}, 1938 { 0x0402, "Phone"}, 1939 { 0x0403, "Audio controller", "sound"}, 1940 { 0x0480, "Multimedia controller"}, 1941 { 0x0500, "RAM controller", "memory"}, 1942 { 0x0501, "Flash controller", "flash"}, 1943 { 0x0580, "Memory controller"}, 1944 { 0x0600, "Host bridge", "host"}, 1945 { 0x0601, "ISA bridge", "isa"}, 1946 { 0x0602, "EISA bridge", "eisa"}, 1947 { 0x0603, "MC bridge", "mca"}, 1948 { 0x0604, "PCI bridge", "pci-bridge"}, 1949 { 0x0605, "PCMCIA bridge", "pcmcia"}, 1950 { 0x0606, "NUBUS bridge", "nubus"}, 1951 { 0x0607, "CARDBUS bridge", "cardbus"}, 1952 { 0x0608, "RACEWAY bridge"}, 1953 { 0x0680, "Bridge"}, 1954 { 0x0700, "Serial port", "serial"}, 1955 { 0x0701, "Parallel port", "parallel"}, 1956 { 0x0800, "Interrupt controller", "interrupt-controller"}, 1957 { 0x0801, "DMA controller", "dma-controller"}, 1958 { 0x0802, "Timer", "timer"}, 1959 { 0x0803, "RTC", "rtc"}, 1960 { 0x0900, "Keyboard", "keyboard"}, 1961 { 0x0901, "Pen", "pen"}, 1962 { 0x0902, "Mouse", "mouse"}, 1963 { 0x0A00, "Dock station", "dock", 0x00ff}, 1964 { 0x0B00, "i386 cpu", "cpu", 0x00ff}, 1965 { 0x0c00, "Firewire controller", "firewire"}, 1966 { 0x0c01, "Access bus controller", "access-bus"}, 1967 { 0x0c02, "SSA controller", "ssa"}, 1968 { 0x0c03, "USB controller", "usb"}, 1969 { 0x0c04, "Fibre channel controller", "fibre-channel"}, 1970 { 0x0c05, "SMBus"}, 1971 { 0, NULL} 1972 }; 1973 1974 void pci_for_each_device_under_bus_reverse(PCIBus *bus, 1975 pci_bus_dev_fn fn, 1976 void *opaque) 1977 { 1978 PCIDevice *d; 1979 int devfn; 1980 1981 for (devfn = 0; devfn < ARRAY_SIZE(bus->devices); devfn++) { 1982 d = bus->devices[ARRAY_SIZE(bus->devices) - 1 - devfn]; 1983 if (d) { 1984 fn(bus, d, opaque); 1985 } 1986 } 1987 } 1988 1989 void pci_for_each_device_reverse(PCIBus *bus, int bus_num, 1990 pci_bus_dev_fn fn, void *opaque) 1991 { 1992 bus = pci_find_bus_nr(bus, bus_num); 1993 1994 if (bus) { 1995 pci_for_each_device_under_bus_reverse(bus, fn, opaque); 1996 } 1997 } 1998 1999 void pci_for_each_device_under_bus(PCIBus *bus, 2000 pci_bus_dev_fn fn, void *opaque) 2001 { 2002 PCIDevice *d; 2003 int devfn; 2004 2005 for(devfn = 0; devfn < ARRAY_SIZE(bus->devices); devfn++) { 2006 d = bus->devices[devfn]; 2007 if (d) { 2008 fn(bus, d, opaque); 2009 } 2010 } 2011 } 2012 2013 void pci_for_each_device(PCIBus *bus, int bus_num, 2014 pci_bus_dev_fn fn, void *opaque) 2015 { 2016 bus = pci_find_bus_nr(bus, bus_num); 2017 2018 if (bus) { 2019 pci_for_each_device_under_bus(bus, fn, opaque); 2020 } 2021 } 2022 2023 const pci_class_desc *get_class_desc(int class) 2024 { 2025 const pci_class_desc *desc; 2026 2027 desc = pci_class_descriptions; 2028 while (desc->desc && class != desc->class) { 2029 desc++; 2030 } 2031 2032 return desc; 2033 } 2034 2035 void pci_init_nic_devices(PCIBus *bus, const char *default_model) 2036 { 2037 qemu_create_nic_bus_devices(&bus->qbus, TYPE_PCI_DEVICE, default_model, 2038 "virtio", "virtio-net-pci"); 2039 } 2040 2041 bool pci_init_nic_in_slot(PCIBus *rootbus, const char *model, 2042 const char *alias, const char *devaddr) 2043 { 2044 NICInfo *nd = qemu_find_nic_info(model, true, alias); 2045 int dom, busnr, devfn; 2046 PCIDevice *pci_dev; 2047 unsigned slot; 2048 PCIBus *bus; 2049 2050 if (!nd) { 2051 return false; 2052 } 2053 2054 if (!devaddr || pci_parse_devaddr(devaddr, &dom, &busnr, &slot, NULL) < 0) { 2055 error_report("Invalid PCI device address %s for device %s", 2056 devaddr, model); 2057 exit(1); 2058 } 2059 2060 if (dom != 0) { 2061 error_report("No support for non-zero PCI domains"); 2062 exit(1); 2063 } 2064 2065 devfn = PCI_DEVFN(slot, 0); 2066 2067 bus = pci_find_bus_nr(rootbus, busnr); 2068 if (!bus) { 2069 error_report("Invalid PCI device address %s for device %s", 2070 devaddr, model); 2071 exit(1); 2072 } 2073 2074 pci_dev = pci_new(devfn, model); 2075 qdev_set_nic_properties(&pci_dev->qdev, nd); 2076 pci_realize_and_unref(pci_dev, bus, &error_fatal); 2077 return true; 2078 } 2079 2080 PCIDevice *pci_vga_init(PCIBus *bus) 2081 { 2082 vga_interface_created = true; 2083 switch (vga_interface_type) { 2084 case VGA_CIRRUS: 2085 return pci_create_simple(bus, -1, "cirrus-vga"); 2086 case VGA_QXL: 2087 return pci_create_simple(bus, -1, "qxl-vga"); 2088 case VGA_STD: 2089 return pci_create_simple(bus, -1, "VGA"); 2090 case VGA_VMWARE: 2091 return pci_create_simple(bus, -1, "vmware-svga"); 2092 case VGA_VIRTIO: 2093 return pci_create_simple(bus, -1, "virtio-vga"); 2094 case VGA_NONE: 2095 default: /* Other non-PCI types. Checking for unsupported types is already 2096 done in vl.c. */ 2097 return NULL; 2098 } 2099 } 2100 2101 /* Whether a given bus number is in range of the secondary 2102 * bus of the given bridge device. */ 2103 static bool pci_secondary_bus_in_range(PCIDevice *dev, int bus_num) 2104 { 2105 return !(pci_get_word(dev->config + PCI_BRIDGE_CONTROL) & 2106 PCI_BRIDGE_CTL_BUS_RESET) /* Don't walk the bus if it's reset. */ && 2107 dev->config[PCI_SECONDARY_BUS] <= bus_num && 2108 bus_num <= dev->config[PCI_SUBORDINATE_BUS]; 2109 } 2110 2111 /* Whether a given bus number is in a range of a root bus */ 2112 static bool pci_root_bus_in_range(PCIBus *bus, int bus_num) 2113 { 2114 int i; 2115 2116 for (i = 0; i < ARRAY_SIZE(bus->devices); ++i) { 2117 PCIDevice *dev = bus->devices[i]; 2118 2119 if (dev && IS_PCI_BRIDGE(dev)) { 2120 if (pci_secondary_bus_in_range(dev, bus_num)) { 2121 return true; 2122 } 2123 } 2124 } 2125 2126 return false; 2127 } 2128 2129 PCIBus *pci_find_bus_nr(PCIBus *bus, int bus_num) 2130 { 2131 PCIBus *sec; 2132 2133 if (!bus) { 2134 return NULL; 2135 } 2136 2137 if (pci_bus_num(bus) == bus_num) { 2138 return bus; 2139 } 2140 2141 /* Consider all bus numbers in range for the host pci bridge. */ 2142 if (!pci_bus_is_root(bus) && 2143 !pci_secondary_bus_in_range(bus->parent_dev, bus_num)) { 2144 return NULL; 2145 } 2146 2147 /* try child bus */ 2148 for (; bus; bus = sec) { 2149 QLIST_FOREACH(sec, &bus->child, sibling) { 2150 if (pci_bus_num(sec) == bus_num) { 2151 return sec; 2152 } 2153 /* PXB buses assumed to be children of bus 0 */ 2154 if (pci_bus_is_root(sec)) { 2155 if (pci_root_bus_in_range(sec, bus_num)) { 2156 break; 2157 } 2158 } else { 2159 if (pci_secondary_bus_in_range(sec->parent_dev, bus_num)) { 2160 break; 2161 } 2162 } 2163 } 2164 } 2165 2166 return NULL; 2167 } 2168 2169 void pci_for_each_bus_depth_first(PCIBus *bus, pci_bus_ret_fn begin, 2170 pci_bus_fn end, void *parent_state) 2171 { 2172 PCIBus *sec; 2173 void *state; 2174 2175 if (!bus) { 2176 return; 2177 } 2178 2179 if (begin) { 2180 state = begin(bus, parent_state); 2181 } else { 2182 state = parent_state; 2183 } 2184 2185 QLIST_FOREACH(sec, &bus->child, sibling) { 2186 pci_for_each_bus_depth_first(sec, begin, end, state); 2187 } 2188 2189 if (end) { 2190 end(bus, state); 2191 } 2192 } 2193 2194 2195 PCIDevice *pci_find_device(PCIBus *bus, int bus_num, uint8_t devfn) 2196 { 2197 bus = pci_find_bus_nr(bus, bus_num); 2198 2199 if (!bus) 2200 return NULL; 2201 2202 return bus->devices[devfn]; 2203 } 2204 2205 #define ONBOARD_INDEX_MAX (16 * 1024 - 1) 2206 2207 static void pci_qdev_realize(DeviceState *qdev, Error **errp) 2208 { 2209 PCIDevice *pci_dev = (PCIDevice *)qdev; 2210 PCIDeviceClass *pc = PCI_DEVICE_GET_CLASS(pci_dev); 2211 ObjectClass *klass = OBJECT_CLASS(pc); 2212 Error *local_err = NULL; 2213 bool is_default_rom; 2214 uint16_t class_id; 2215 2216 /* 2217 * capped by systemd (see: udev-builtin-net_id.c) 2218 * as it's the only known user honor it to avoid users 2219 * misconfigure QEMU and then wonder why acpi-index doesn't work 2220 */ 2221 if (pci_dev->acpi_index > ONBOARD_INDEX_MAX) { 2222 error_setg(errp, "acpi-index should be less or equal to %u", 2223 ONBOARD_INDEX_MAX); 2224 return; 2225 } 2226 2227 /* 2228 * make sure that acpi-index is unique across all present PCI devices 2229 */ 2230 if (pci_dev->acpi_index) { 2231 GSequence *used_indexes = pci_acpi_index_list(); 2232 2233 if (g_sequence_lookup(used_indexes, 2234 GINT_TO_POINTER(pci_dev->acpi_index), 2235 g_cmp_uint32, NULL)) { 2236 error_setg(errp, "a PCI device with acpi-index = %" PRIu32 2237 " already exist", pci_dev->acpi_index); 2238 return; 2239 } 2240 g_sequence_insert_sorted(used_indexes, 2241 GINT_TO_POINTER(pci_dev->acpi_index), 2242 g_cmp_uint32, NULL); 2243 } 2244 2245 if (pci_dev->romsize != UINT32_MAX && !is_power_of_2(pci_dev->romsize)) { 2246 error_setg(errp, "ROM size %u is not a power of two", pci_dev->romsize); 2247 return; 2248 } 2249 2250 /* initialize cap_present for pci_is_express() and pci_config_size(), 2251 * Note that hybrid PCIs are not set automatically and need to manage 2252 * QEMU_PCI_CAP_EXPRESS manually */ 2253 if (object_class_dynamic_cast(klass, INTERFACE_PCIE_DEVICE) && 2254 !object_class_dynamic_cast(klass, INTERFACE_CONVENTIONAL_PCI_DEVICE)) { 2255 pci_dev->cap_present |= QEMU_PCI_CAP_EXPRESS; 2256 } 2257 2258 if (object_class_dynamic_cast(klass, INTERFACE_CXL_DEVICE)) { 2259 pci_dev->cap_present |= QEMU_PCIE_CAP_CXL; 2260 } 2261 2262 pci_dev = do_pci_register_device(pci_dev, 2263 object_get_typename(OBJECT(qdev)), 2264 pci_dev->devfn, errp); 2265 if (pci_dev == NULL) 2266 return; 2267 2268 if (pc->realize) { 2269 pc->realize(pci_dev, &local_err); 2270 if (local_err) { 2271 error_propagate(errp, local_err); 2272 do_pci_unregister_device(pci_dev); 2273 return; 2274 } 2275 } 2276 2277 if (!pcie_sriov_register_device(pci_dev, errp)) { 2278 pci_qdev_unrealize(DEVICE(pci_dev)); 2279 return; 2280 } 2281 2282 /* 2283 * A PCIe Downstream Port that do not have ARI Forwarding enabled must 2284 * associate only Device 0 with the device attached to the bus 2285 * representing the Link from the Port (PCIe base spec rev 4.0 ver 0.3, 2286 * sec 7.3.1). 2287 * With ARI, PCI_SLOT() can return non-zero value as the traditional 2288 * 5-bit Device Number and 3-bit Function Number fields in its associated 2289 * Routing IDs, Requester IDs and Completer IDs are interpreted as a 2290 * single 8-bit Function Number. Hence, ignore ARI capable devices. 2291 */ 2292 if (pci_is_express(pci_dev) && 2293 !pcie_find_capability(pci_dev, PCI_EXT_CAP_ID_ARI) && 2294 pcie_has_upstream_port(pci_dev) && 2295 PCI_SLOT(pci_dev->devfn)) { 2296 warn_report("PCI: slot %d is not valid for %s," 2297 " parent device only allows plugging into slot 0.", 2298 PCI_SLOT(pci_dev->devfn), pci_dev->name); 2299 } 2300 2301 if (pci_dev->failover_pair_id) { 2302 if (!pci_bus_is_express(pci_get_bus(pci_dev))) { 2303 error_setg(errp, "failover primary device must be on " 2304 "PCIExpress bus"); 2305 pci_qdev_unrealize(DEVICE(pci_dev)); 2306 return; 2307 } 2308 class_id = pci_get_word(pci_dev->config + PCI_CLASS_DEVICE); 2309 if (class_id != PCI_CLASS_NETWORK_ETHERNET) { 2310 error_setg(errp, "failover primary device is not an " 2311 "Ethernet device"); 2312 pci_qdev_unrealize(DEVICE(pci_dev)); 2313 return; 2314 } 2315 if ((pci_dev->cap_present & QEMU_PCI_CAP_MULTIFUNCTION) 2316 || (PCI_FUNC(pci_dev->devfn) != 0)) { 2317 error_setg(errp, "failover: primary device must be in its own " 2318 "PCI slot"); 2319 pci_qdev_unrealize(DEVICE(pci_dev)); 2320 return; 2321 } 2322 qdev->allow_unplug_during_migration = true; 2323 } 2324 2325 /* rom loading */ 2326 is_default_rom = false; 2327 if (pci_dev->romfile == NULL && pc->romfile != NULL) { 2328 pci_dev->romfile = g_strdup(pc->romfile); 2329 is_default_rom = true; 2330 } 2331 2332 pci_add_option_rom(pci_dev, is_default_rom, &local_err); 2333 if (local_err) { 2334 error_propagate(errp, local_err); 2335 pci_qdev_unrealize(DEVICE(pci_dev)); 2336 return; 2337 } 2338 2339 pci_set_power(pci_dev, true); 2340 2341 pci_dev->msi_trigger = pci_msi_trigger; 2342 } 2343 2344 static PCIDevice *pci_new_internal(int devfn, bool multifunction, 2345 const char *name) 2346 { 2347 DeviceState *dev; 2348 2349 dev = qdev_new(name); 2350 qdev_prop_set_int32(dev, "addr", devfn); 2351 qdev_prop_set_bit(dev, "multifunction", multifunction); 2352 return PCI_DEVICE(dev); 2353 } 2354 2355 PCIDevice *pci_new_multifunction(int devfn, const char *name) 2356 { 2357 return pci_new_internal(devfn, true, name); 2358 } 2359 2360 PCIDevice *pci_new(int devfn, const char *name) 2361 { 2362 return pci_new_internal(devfn, false, name); 2363 } 2364 2365 bool pci_realize_and_unref(PCIDevice *dev, PCIBus *bus, Error **errp) 2366 { 2367 return qdev_realize_and_unref(&dev->qdev, &bus->qbus, errp); 2368 } 2369 2370 PCIDevice *pci_create_simple_multifunction(PCIBus *bus, int devfn, 2371 const char *name) 2372 { 2373 PCIDevice *dev = pci_new_multifunction(devfn, name); 2374 pci_realize_and_unref(dev, bus, &error_fatal); 2375 return dev; 2376 } 2377 2378 PCIDevice *pci_create_simple(PCIBus *bus, int devfn, const char *name) 2379 { 2380 PCIDevice *dev = pci_new(devfn, name); 2381 pci_realize_and_unref(dev, bus, &error_fatal); 2382 return dev; 2383 } 2384 2385 static uint8_t pci_find_space(PCIDevice *pdev, uint8_t size) 2386 { 2387 int offset = PCI_CONFIG_HEADER_SIZE; 2388 int i; 2389 for (i = PCI_CONFIG_HEADER_SIZE; i < PCI_CONFIG_SPACE_SIZE; ++i) { 2390 if (pdev->used[i]) 2391 offset = i + 1; 2392 else if (i - offset + 1 == size) 2393 return offset; 2394 } 2395 return 0; 2396 } 2397 2398 static uint8_t pci_find_capability_list(PCIDevice *pdev, uint8_t cap_id, 2399 uint8_t *prev_p) 2400 { 2401 uint8_t next, prev; 2402 2403 if (!(pdev->config[PCI_STATUS] & PCI_STATUS_CAP_LIST)) 2404 return 0; 2405 2406 for (prev = PCI_CAPABILITY_LIST; (next = pdev->config[prev]); 2407 prev = next + PCI_CAP_LIST_NEXT) 2408 if (pdev->config[next + PCI_CAP_LIST_ID] == cap_id) 2409 break; 2410 2411 if (prev_p) 2412 *prev_p = prev; 2413 return next; 2414 } 2415 2416 static uint8_t pci_find_capability_at_offset(PCIDevice *pdev, uint8_t offset) 2417 { 2418 uint8_t next, prev, found = 0; 2419 2420 if (!(pdev->used[offset])) { 2421 return 0; 2422 } 2423 2424 assert(pdev->config[PCI_STATUS] & PCI_STATUS_CAP_LIST); 2425 2426 for (prev = PCI_CAPABILITY_LIST; (next = pdev->config[prev]); 2427 prev = next + PCI_CAP_LIST_NEXT) { 2428 if (next <= offset && next > found) { 2429 found = next; 2430 } 2431 } 2432 return found; 2433 } 2434 2435 /* Patch the PCI vendor and device ids in a PCI rom image if necessary. 2436 This is needed for an option rom which is used for more than one device. */ 2437 static void pci_patch_ids(PCIDevice *pdev, uint8_t *ptr, uint32_t size) 2438 { 2439 uint16_t vendor_id; 2440 uint16_t device_id; 2441 uint16_t rom_vendor_id; 2442 uint16_t rom_device_id; 2443 uint16_t rom_magic; 2444 uint16_t pcir_offset; 2445 uint8_t checksum; 2446 2447 /* Words in rom data are little endian (like in PCI configuration), 2448 so they can be read / written with pci_get_word / pci_set_word. */ 2449 2450 /* Only a valid rom will be patched. */ 2451 rom_magic = pci_get_word(ptr); 2452 if (rom_magic != 0xaa55) { 2453 trace_pci_bad_rom_magic(rom_magic, 0xaa55); 2454 return; 2455 } 2456 pcir_offset = pci_get_word(ptr + 0x18); 2457 if (pcir_offset + 8 >= size || memcmp(ptr + pcir_offset, "PCIR", 4)) { 2458 trace_pci_bad_pcir_offset(pcir_offset); 2459 return; 2460 } 2461 2462 vendor_id = pci_get_word(pdev->config + PCI_VENDOR_ID); 2463 device_id = pci_get_word(pdev->config + PCI_DEVICE_ID); 2464 rom_vendor_id = pci_get_word(ptr + pcir_offset + 4); 2465 rom_device_id = pci_get_word(ptr + pcir_offset + 6); 2466 2467 trace_pci_rom_and_pci_ids(pdev->romfile, vendor_id, device_id, 2468 rom_vendor_id, rom_device_id); 2469 2470 checksum = ptr[6]; 2471 2472 if (vendor_id != rom_vendor_id) { 2473 /* Patch vendor id and checksum (at offset 6 for etherboot roms). */ 2474 checksum += (uint8_t)rom_vendor_id + (uint8_t)(rom_vendor_id >> 8); 2475 checksum -= (uint8_t)vendor_id + (uint8_t)(vendor_id >> 8); 2476 trace_pci_rom_checksum_change(ptr[6], checksum); 2477 ptr[6] = checksum; 2478 pci_set_word(ptr + pcir_offset + 4, vendor_id); 2479 } 2480 2481 if (device_id != rom_device_id) { 2482 /* Patch device id and checksum (at offset 6 for etherboot roms). */ 2483 checksum += (uint8_t)rom_device_id + (uint8_t)(rom_device_id >> 8); 2484 checksum -= (uint8_t)device_id + (uint8_t)(device_id >> 8); 2485 trace_pci_rom_checksum_change(ptr[6], checksum); 2486 ptr[6] = checksum; 2487 pci_set_word(ptr + pcir_offset + 6, device_id); 2488 } 2489 } 2490 2491 /* Add an option rom for the device */ 2492 static void pci_add_option_rom(PCIDevice *pdev, bool is_default_rom, 2493 Error **errp) 2494 { 2495 int64_t size = 0; 2496 g_autofree char *path = NULL; 2497 char name[32]; 2498 const VMStateDescription *vmsd; 2499 2500 /* 2501 * In case of incoming migration ROM will come with migration stream, no 2502 * reason to load the file. Neither we want to fail if local ROM file 2503 * mismatches with specified romsize. 2504 */ 2505 bool load_file = !runstate_check(RUN_STATE_INMIGRATE); 2506 2507 if (!pdev->romfile || !strlen(pdev->romfile)) { 2508 return; 2509 } 2510 2511 if (!pdev->rom_bar) { 2512 /* 2513 * Load rom via fw_cfg instead of creating a rom bar, 2514 * for 0.11 compatibility. 2515 */ 2516 int class = pci_get_word(pdev->config + PCI_CLASS_DEVICE); 2517 2518 /* 2519 * Hot-plugged devices can't use the option ROM 2520 * if the rom bar is disabled. 2521 */ 2522 if (DEVICE(pdev)->hotplugged) { 2523 error_setg(errp, "Hot-plugged device without ROM bar" 2524 " can't have an option ROM"); 2525 return; 2526 } 2527 2528 if (class == 0x0300) { 2529 rom_add_vga(pdev->romfile); 2530 } else { 2531 rom_add_option(pdev->romfile, -1); 2532 } 2533 return; 2534 } 2535 2536 if (pci_is_vf(pdev)) { 2537 if (pdev->rom_bar > 0) { 2538 error_setg(errp, "ROM BAR cannot be enabled for SR-IOV VF"); 2539 } 2540 2541 return; 2542 } 2543 2544 if (load_file || pdev->romsize == UINT32_MAX) { 2545 path = qemu_find_file(QEMU_FILE_TYPE_BIOS, pdev->romfile); 2546 if (path == NULL) { 2547 path = g_strdup(pdev->romfile); 2548 } 2549 2550 size = get_image_size(path); 2551 if (size < 0) { 2552 error_setg(errp, "failed to find romfile \"%s\"", pdev->romfile); 2553 return; 2554 } else if (size == 0) { 2555 error_setg(errp, "romfile \"%s\" is empty", pdev->romfile); 2556 return; 2557 } else if (size > 2 * GiB) { 2558 error_setg(errp, 2559 "romfile \"%s\" too large (size cannot exceed 2 GiB)", 2560 pdev->romfile); 2561 return; 2562 } 2563 if (pdev->romsize != UINT_MAX) { 2564 if (size > pdev->romsize) { 2565 error_setg(errp, "romfile \"%s\" (%u bytes) " 2566 "is too large for ROM size %u", 2567 pdev->romfile, (uint32_t)size, pdev->romsize); 2568 return; 2569 } 2570 } else { 2571 pdev->romsize = pow2ceil(size); 2572 } 2573 } 2574 2575 vmsd = qdev_get_vmsd(DEVICE(pdev)); 2576 snprintf(name, sizeof(name), "%s.rom", 2577 vmsd ? vmsd->name : object_get_typename(OBJECT(pdev))); 2578 2579 pdev->has_rom = true; 2580 memory_region_init_rom(&pdev->rom, OBJECT(pdev), name, pdev->romsize, 2581 &error_fatal); 2582 2583 if (load_file) { 2584 void *ptr = memory_region_get_ram_ptr(&pdev->rom); 2585 2586 if (load_image_size(path, ptr, size) < 0) { 2587 error_setg(errp, "failed to load romfile \"%s\"", pdev->romfile); 2588 return; 2589 } 2590 2591 if (is_default_rom) { 2592 /* Only the default rom images will be patched (if needed). */ 2593 pci_patch_ids(pdev, ptr, size); 2594 } 2595 } 2596 2597 pci_register_bar(pdev, PCI_ROM_SLOT, 0, &pdev->rom); 2598 } 2599 2600 static void pci_del_option_rom(PCIDevice *pdev) 2601 { 2602 if (!pdev->has_rom) 2603 return; 2604 2605 vmstate_unregister_ram(&pdev->rom, &pdev->qdev); 2606 pdev->has_rom = false; 2607 } 2608 2609 /* 2610 * On success, pci_add_capability() returns a positive value 2611 * that the offset of the pci capability. 2612 * On failure, it sets an error and returns a negative error 2613 * code. 2614 */ 2615 int pci_add_capability(PCIDevice *pdev, uint8_t cap_id, 2616 uint8_t offset, uint8_t size, 2617 Error **errp) 2618 { 2619 uint8_t *config; 2620 int i, overlapping_cap; 2621 2622 if (!offset) { 2623 offset = pci_find_space(pdev, size); 2624 /* out of PCI config space is programming error */ 2625 assert(offset); 2626 } else { 2627 /* Verify that capabilities don't overlap. Note: device assignment 2628 * depends on this check to verify that the device is not broken. 2629 * Should never trigger for emulated devices, but it's helpful 2630 * for debugging these. */ 2631 for (i = offset; i < offset + size; i++) { 2632 overlapping_cap = pci_find_capability_at_offset(pdev, i); 2633 if (overlapping_cap) { 2634 error_setg(errp, "%s:%02x:%02x.%x " 2635 "Attempt to add PCI capability %x at offset " 2636 "%x overlaps existing capability %x at offset %x", 2637 pci_root_bus_path(pdev), pci_dev_bus_num(pdev), 2638 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn), 2639 cap_id, offset, overlapping_cap, i); 2640 return -EINVAL; 2641 } 2642 } 2643 } 2644 2645 config = pdev->config + offset; 2646 config[PCI_CAP_LIST_ID] = cap_id; 2647 config[PCI_CAP_LIST_NEXT] = pdev->config[PCI_CAPABILITY_LIST]; 2648 pdev->config[PCI_CAPABILITY_LIST] = offset; 2649 pdev->config[PCI_STATUS] |= PCI_STATUS_CAP_LIST; 2650 memset(pdev->used + offset, 0xFF, QEMU_ALIGN_UP(size, 4)); 2651 /* Make capability read-only by default */ 2652 memset(pdev->wmask + offset, 0, size); 2653 /* Check capability by default */ 2654 memset(pdev->cmask + offset, 0xFF, size); 2655 return offset; 2656 } 2657 2658 /* Unlink capability from the pci config space. */ 2659 void pci_del_capability(PCIDevice *pdev, uint8_t cap_id, uint8_t size) 2660 { 2661 uint8_t prev, offset = pci_find_capability_list(pdev, cap_id, &prev); 2662 if (!offset) 2663 return; 2664 pdev->config[prev] = pdev->config[offset + PCI_CAP_LIST_NEXT]; 2665 /* Make capability writable again */ 2666 memset(pdev->wmask + offset, 0xff, size); 2667 memset(pdev->w1cmask + offset, 0, size); 2668 /* Clear cmask as device-specific registers can't be checked */ 2669 memset(pdev->cmask + offset, 0, size); 2670 memset(pdev->used + offset, 0, QEMU_ALIGN_UP(size, 4)); 2671 2672 if (!pdev->config[PCI_CAPABILITY_LIST]) 2673 pdev->config[PCI_STATUS] &= ~PCI_STATUS_CAP_LIST; 2674 } 2675 2676 uint8_t pci_find_capability(PCIDevice *pdev, uint8_t cap_id) 2677 { 2678 return pci_find_capability_list(pdev, cap_id, NULL); 2679 } 2680 2681 static char *pci_dev_fw_name(DeviceState *dev, char *buf, int len) 2682 { 2683 PCIDevice *d = (PCIDevice *)dev; 2684 const char *name = NULL; 2685 const pci_class_desc *desc = pci_class_descriptions; 2686 int class = pci_get_word(d->config + PCI_CLASS_DEVICE); 2687 2688 while (desc->desc && 2689 (class & ~desc->fw_ign_bits) != 2690 (desc->class & ~desc->fw_ign_bits)) { 2691 desc++; 2692 } 2693 2694 if (desc->desc) { 2695 name = desc->fw_name; 2696 } 2697 2698 if (name) { 2699 pstrcpy(buf, len, name); 2700 } else { 2701 snprintf(buf, len, "pci%04x,%04x", 2702 pci_get_word(d->config + PCI_VENDOR_ID), 2703 pci_get_word(d->config + PCI_DEVICE_ID)); 2704 } 2705 2706 return buf; 2707 } 2708 2709 static char *pcibus_get_fw_dev_path(DeviceState *dev) 2710 { 2711 PCIDevice *d = (PCIDevice *)dev; 2712 char name[33]; 2713 int has_func = !!PCI_FUNC(d->devfn); 2714 2715 return g_strdup_printf("%s@%x%s%.*x", 2716 pci_dev_fw_name(dev, name, sizeof(name)), 2717 PCI_SLOT(d->devfn), 2718 has_func ? "," : "", 2719 has_func, 2720 PCI_FUNC(d->devfn)); 2721 } 2722 2723 static char *pcibus_get_dev_path(DeviceState *dev) 2724 { 2725 PCIDevice *d = container_of(dev, PCIDevice, qdev); 2726 PCIDevice *t; 2727 int slot_depth; 2728 /* Path format: Domain:00:Slot.Function:Slot.Function....:Slot.Function. 2729 * 00 is added here to make this format compatible with 2730 * domain:Bus:Slot.Func for systems without nested PCI bridges. 2731 * Slot.Function list specifies the slot and function numbers for all 2732 * devices on the path from root to the specific device. */ 2733 const char *root_bus_path; 2734 int root_bus_len; 2735 char slot[] = ":SS.F"; 2736 int slot_len = sizeof slot - 1 /* For '\0' */; 2737 int path_len; 2738 char *path, *p; 2739 int s; 2740 2741 root_bus_path = pci_root_bus_path(d); 2742 root_bus_len = strlen(root_bus_path); 2743 2744 /* Calculate # of slots on path between device and root. */; 2745 slot_depth = 0; 2746 for (t = d; t; t = pci_get_bus(t)->parent_dev) { 2747 ++slot_depth; 2748 } 2749 2750 path_len = root_bus_len + slot_len * slot_depth; 2751 2752 /* Allocate memory, fill in the terminating null byte. */ 2753 path = g_malloc(path_len + 1 /* For '\0' */); 2754 path[path_len] = '\0'; 2755 2756 memcpy(path, root_bus_path, root_bus_len); 2757 2758 /* Fill in slot numbers. We walk up from device to root, so need to print 2759 * them in the reverse order, last to first. */ 2760 p = path + path_len; 2761 for (t = d; t; t = pci_get_bus(t)->parent_dev) { 2762 p -= slot_len; 2763 s = snprintf(slot, sizeof slot, ":%02x.%x", 2764 PCI_SLOT(t->devfn), PCI_FUNC(t->devfn)); 2765 assert(s == slot_len); 2766 memcpy(p, slot, slot_len); 2767 } 2768 2769 return path; 2770 } 2771 2772 static int pci_qdev_find_recursive(PCIBus *bus, 2773 const char *id, PCIDevice **pdev) 2774 { 2775 DeviceState *qdev = qdev_find_recursive(&bus->qbus, id); 2776 if (!qdev) { 2777 return -ENODEV; 2778 } 2779 2780 /* roughly check if given qdev is pci device */ 2781 if (object_dynamic_cast(OBJECT(qdev), TYPE_PCI_DEVICE)) { 2782 *pdev = PCI_DEVICE(qdev); 2783 return 0; 2784 } 2785 return -EINVAL; 2786 } 2787 2788 int pci_qdev_find_device(const char *id, PCIDevice **pdev) 2789 { 2790 PCIHostState *host_bridge; 2791 int rc = -ENODEV; 2792 2793 QLIST_FOREACH(host_bridge, &pci_host_bridges, next) { 2794 int tmp = pci_qdev_find_recursive(host_bridge->bus, id, pdev); 2795 if (!tmp) { 2796 rc = 0; 2797 break; 2798 } 2799 if (tmp != -ENODEV) { 2800 rc = tmp; 2801 } 2802 } 2803 2804 return rc; 2805 } 2806 2807 MemoryRegion *pci_address_space(PCIDevice *dev) 2808 { 2809 return pci_get_bus(dev)->address_space_mem; 2810 } 2811 2812 MemoryRegion *pci_address_space_io(PCIDevice *dev) 2813 { 2814 return pci_get_bus(dev)->address_space_io; 2815 } 2816 2817 static void pci_device_class_init(ObjectClass *klass, const void *data) 2818 { 2819 DeviceClass *k = DEVICE_CLASS(klass); 2820 2821 k->realize = pci_qdev_realize; 2822 k->unrealize = pci_qdev_unrealize; 2823 k->bus_type = TYPE_PCI_BUS; 2824 device_class_set_props(k, pci_props); 2825 object_class_property_set_description( 2826 klass, "x-max-bounce-buffer-size", 2827 "Maximum buffer size allocated for bounce buffers used for mapped " 2828 "access to indirect DMA memory"); 2829 } 2830 2831 static void pci_device_class_base_init(ObjectClass *klass, const void *data) 2832 { 2833 if (!object_class_is_abstract(klass)) { 2834 ObjectClass *conventional = 2835 object_class_dynamic_cast(klass, INTERFACE_CONVENTIONAL_PCI_DEVICE); 2836 ObjectClass *pcie = 2837 object_class_dynamic_cast(klass, INTERFACE_PCIE_DEVICE); 2838 ObjectClass *cxl = 2839 object_class_dynamic_cast(klass, INTERFACE_CXL_DEVICE); 2840 assert(conventional || pcie || cxl); 2841 } 2842 } 2843 2844 /* 2845 * Get IOMMU root bus, aliased bus and devfn of a PCI device 2846 * 2847 * IOMMU root bus is needed by all call sites to call into iommu_ops. 2848 * For call sites which don't need aliased BDF, passing NULL to 2849 * aliased_[bus|devfn] is allowed. 2850 * 2851 * @piommu_bus: return root #PCIBus backed by an IOMMU for the PCI device. 2852 * 2853 * @aliased_bus: return aliased #PCIBus of the PCI device, optional. 2854 * 2855 * @aliased_devfn: return aliased devfn of the PCI device, optional. 2856 */ 2857 static void pci_device_get_iommu_bus_devfn(PCIDevice *dev, 2858 PCIBus **piommu_bus, 2859 PCIBus **aliased_bus, 2860 int *aliased_devfn) 2861 { 2862 PCIBus *bus = pci_get_bus(dev); 2863 PCIBus *iommu_bus = bus; 2864 int devfn = dev->devfn; 2865 2866 while (iommu_bus && !iommu_bus->iommu_ops && iommu_bus->parent_dev) { 2867 PCIBus *parent_bus = pci_get_bus(iommu_bus->parent_dev); 2868 2869 /* 2870 * The requester ID of the provided device may be aliased, as seen from 2871 * the IOMMU, due to topology limitations. The IOMMU relies on a 2872 * requester ID to provide a unique AddressSpace for devices, but 2873 * conventional PCI buses pre-date such concepts. Instead, the PCIe- 2874 * to-PCI bridge creates and accepts transactions on behalf of down- 2875 * stream devices. When doing so, all downstream devices are masked 2876 * (aliased) behind a single requester ID. The requester ID used 2877 * depends on the format of the bridge devices. Proper PCIe-to-PCI 2878 * bridges, with a PCIe capability indicating such, follow the 2879 * guidelines of chapter 2.3 of the PCIe-to-PCI/X bridge specification, 2880 * where the bridge uses the seconary bus as the bridge portion of the 2881 * requester ID and devfn of 00.0. For other bridges, typically those 2882 * found on the root complex such as the dmi-to-pci-bridge, we follow 2883 * the convention of typical bare-metal hardware, which uses the 2884 * requester ID of the bridge itself. There are device specific 2885 * exceptions to these rules, but these are the defaults that the 2886 * Linux kernel uses when determining DMA aliases itself and believed 2887 * to be true for the bare metal equivalents of the devices emulated 2888 * in QEMU. 2889 */ 2890 if (!pci_bus_is_express(iommu_bus)) { 2891 PCIDevice *parent = iommu_bus->parent_dev; 2892 2893 if (pci_is_express(parent) && 2894 pcie_cap_get_type(parent) == PCI_EXP_TYPE_PCI_BRIDGE) { 2895 devfn = PCI_DEVFN(0, 0); 2896 bus = iommu_bus; 2897 } else { 2898 devfn = parent->devfn; 2899 bus = parent_bus; 2900 } 2901 } 2902 2903 iommu_bus = parent_bus; 2904 } 2905 2906 assert(0 <= devfn && devfn < PCI_DEVFN_MAX); 2907 assert(iommu_bus); 2908 2909 if (pci_bus_bypass_iommu(bus) || !iommu_bus->iommu_ops) { 2910 iommu_bus = NULL; 2911 } 2912 2913 *piommu_bus = iommu_bus; 2914 2915 if (aliased_bus) { 2916 *aliased_bus = bus; 2917 } 2918 2919 if (aliased_devfn) { 2920 *aliased_devfn = devfn; 2921 } 2922 } 2923 2924 AddressSpace *pci_device_iommu_address_space(PCIDevice *dev) 2925 { 2926 PCIBus *bus; 2927 PCIBus *iommu_bus; 2928 int devfn; 2929 2930 pci_device_get_iommu_bus_devfn(dev, &iommu_bus, &bus, &devfn); 2931 if (iommu_bus) { 2932 return iommu_bus->iommu_ops->get_address_space(bus, 2933 iommu_bus->iommu_opaque, devfn); 2934 } 2935 return &address_space_memory; 2936 } 2937 2938 bool pci_device_set_iommu_device(PCIDevice *dev, HostIOMMUDevice *hiod, 2939 Error **errp) 2940 { 2941 PCIBus *iommu_bus, *aliased_bus; 2942 int aliased_devfn; 2943 2944 /* set_iommu_device requires device's direct BDF instead of aliased BDF */ 2945 pci_device_get_iommu_bus_devfn(dev, &iommu_bus, 2946 &aliased_bus, &aliased_devfn); 2947 if (iommu_bus && iommu_bus->iommu_ops->set_iommu_device) { 2948 hiod->aliased_bus = aliased_bus; 2949 hiod->aliased_devfn = aliased_devfn; 2950 return iommu_bus->iommu_ops->set_iommu_device(pci_get_bus(dev), 2951 iommu_bus->iommu_opaque, 2952 dev->devfn, hiod, errp); 2953 } 2954 return true; 2955 } 2956 2957 void pci_device_unset_iommu_device(PCIDevice *dev) 2958 { 2959 PCIBus *iommu_bus; 2960 2961 pci_device_get_iommu_bus_devfn(dev, &iommu_bus, NULL, NULL); 2962 if (iommu_bus && iommu_bus->iommu_ops->unset_iommu_device) { 2963 return iommu_bus->iommu_ops->unset_iommu_device(pci_get_bus(dev), 2964 iommu_bus->iommu_opaque, 2965 dev->devfn); 2966 } 2967 } 2968 2969 void pci_setup_iommu(PCIBus *bus, const PCIIOMMUOps *ops, void *opaque) 2970 { 2971 /* 2972 * If called, pci_setup_iommu() should provide a minimum set of 2973 * useful callbacks for the bus. 2974 */ 2975 assert(ops); 2976 assert(ops->get_address_space); 2977 2978 bus->iommu_ops = ops; 2979 bus->iommu_opaque = opaque; 2980 } 2981 2982 static void pci_dev_get_w64(PCIBus *b, PCIDevice *dev, void *opaque) 2983 { 2984 Range *range = opaque; 2985 uint16_t cmd = pci_get_word(dev->config + PCI_COMMAND); 2986 int i; 2987 2988 if (!(cmd & PCI_COMMAND_MEMORY)) { 2989 return; 2990 } 2991 2992 if (IS_PCI_BRIDGE(dev)) { 2993 pcibus_t base = pci_bridge_get_base(dev, PCI_BASE_ADDRESS_MEM_PREFETCH); 2994 pcibus_t limit = pci_bridge_get_limit(dev, PCI_BASE_ADDRESS_MEM_PREFETCH); 2995 2996 base = MAX(base, 0x1ULL << 32); 2997 2998 if (limit >= base) { 2999 Range pref_range; 3000 range_set_bounds(&pref_range, base, limit); 3001 range_extend(range, &pref_range); 3002 } 3003 } 3004 for (i = 0; i < PCI_NUM_REGIONS; ++i) { 3005 PCIIORegion *r = &dev->io_regions[i]; 3006 pcibus_t lob, upb; 3007 Range region_range; 3008 3009 if (!r->size || 3010 (r->type & PCI_BASE_ADDRESS_SPACE_IO) || 3011 !(r->type & PCI_BASE_ADDRESS_MEM_TYPE_64)) { 3012 continue; 3013 } 3014 3015 lob = pci_bar_address(dev, i, r->type, r->size); 3016 upb = lob + r->size - 1; 3017 if (lob == PCI_BAR_UNMAPPED) { 3018 continue; 3019 } 3020 3021 lob = MAX(lob, 0x1ULL << 32); 3022 3023 if (upb >= lob) { 3024 range_set_bounds(®ion_range, lob, upb); 3025 range_extend(range, ®ion_range); 3026 } 3027 } 3028 } 3029 3030 void pci_bus_get_w64_range(PCIBus *bus, Range *range) 3031 { 3032 range_make_empty(range); 3033 pci_for_each_device_under_bus(bus, pci_dev_get_w64, range); 3034 } 3035 3036 static bool pcie_has_upstream_port(PCIDevice *dev) 3037 { 3038 PCIDevice *parent_dev = pci_bridge_get_device(pci_get_bus(dev)); 3039 3040 /* Device associated with an upstream port. 3041 * As there are several types of these, it's easier to check the 3042 * parent device: upstream ports are always connected to 3043 * root or downstream ports. 3044 */ 3045 return parent_dev && 3046 pci_is_express(parent_dev) && 3047 parent_dev->exp.exp_cap && 3048 (pcie_cap_get_type(parent_dev) == PCI_EXP_TYPE_ROOT_PORT || 3049 pcie_cap_get_type(parent_dev) == PCI_EXP_TYPE_DOWNSTREAM); 3050 } 3051 3052 PCIDevice *pci_get_function_0(PCIDevice *pci_dev) 3053 { 3054 PCIBus *bus = pci_get_bus(pci_dev); 3055 3056 if(pcie_has_upstream_port(pci_dev)) { 3057 /* With an upstream PCIe port, we only support 1 device at slot 0 */ 3058 return bus->devices[0]; 3059 } else { 3060 /* Other bus types might support multiple devices at slots 0-31 */ 3061 return bus->devices[PCI_DEVFN(PCI_SLOT(pci_dev->devfn), 0)]; 3062 } 3063 } 3064 3065 MSIMessage pci_get_msi_message(PCIDevice *dev, int vector) 3066 { 3067 MSIMessage msg; 3068 if (msix_enabled(dev)) { 3069 msg = msix_get_message(dev, vector); 3070 } else if (msi_enabled(dev)) { 3071 msg = msi_get_message(dev, vector); 3072 } else { 3073 /* Should never happen */ 3074 error_report("%s: unknown interrupt type", __func__); 3075 abort(); 3076 } 3077 return msg; 3078 } 3079 3080 void pci_set_power(PCIDevice *d, bool state) 3081 { 3082 /* 3083 * Don't change the enabled state of VFs when powering on/off the device. 3084 * 3085 * When powering on, VFs must not be enabled immediately but they must 3086 * wait until the guest configures SR-IOV. 3087 * When powering off, their corresponding PFs will be reset and disable 3088 * VFs. 3089 */ 3090 if (!pci_is_vf(d)) { 3091 pci_set_enabled(d, state); 3092 } 3093 } 3094 3095 void pci_set_enabled(PCIDevice *d, bool state) 3096 { 3097 if (d->enabled == state) { 3098 return; 3099 } 3100 3101 d->enabled = state; 3102 pci_update_mappings(d); 3103 memory_region_set_enabled(&d->bus_master_enable_region, 3104 (pci_get_word(d->config + PCI_COMMAND) 3105 & PCI_COMMAND_MASTER) && d->enabled); 3106 if (qdev_is_realized(&d->qdev)) { 3107 pci_device_reset(d); 3108 } 3109 } 3110 3111 static const TypeInfo pci_device_type_info = { 3112 .name = TYPE_PCI_DEVICE, 3113 .parent = TYPE_DEVICE, 3114 .instance_size = sizeof(PCIDevice), 3115 .abstract = true, 3116 .class_size = sizeof(PCIDeviceClass), 3117 .class_init = pci_device_class_init, 3118 .class_base_init = pci_device_class_base_init, 3119 }; 3120 3121 static void pci_register_types(void) 3122 { 3123 type_register_static(&pci_bus_info); 3124 type_register_static(&pcie_bus_info); 3125 type_register_static(&cxl_bus_info); 3126 type_register_static(&conventional_pci_interface_info); 3127 type_register_static(&cxl_interface_info); 3128 type_register_static(&pcie_interface_info); 3129 type_register_static(&pci_device_type_info); 3130 } 3131 3132 type_init(pci_register_types) 3133