1 /* 2 * QEMU PCI bus manager 3 * 4 * Copyright (c) 2004 Fabrice Bellard 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 25 #include "qemu/osdep.h" 26 #include "qemu/datadir.h" 27 #include "qemu/units.h" 28 #include "hw/irq.h" 29 #include "hw/pci/pci.h" 30 #include "hw/pci/pci_bridge.h" 31 #include "hw/pci/pci_bus.h" 32 #include "hw/pci/pci_host.h" 33 #include "hw/qdev-properties.h" 34 #include "hw/qdev-properties-system.h" 35 #include "migration/qemu-file-types.h" 36 #include "migration/vmstate.h" 37 #include "net/net.h" 38 #include "system/numa.h" 39 #include "system/runstate.h" 40 #include "system/system.h" 41 #include "hw/loader.h" 42 #include "qemu/error-report.h" 43 #include "qemu/range.h" 44 #include "trace.h" 45 #include "hw/pci/msi.h" 46 #include "hw/pci/msix.h" 47 #include "hw/hotplug.h" 48 #include "hw/boards.h" 49 #include "hw/nvram/fw_cfg.h" 50 #include "qapi/error.h" 51 #include "qemu/cutils.h" 52 #include "pci-internal.h" 53 54 #include "hw/xen/xen.h" 55 #include "hw/i386/kvm/xen_evtchn.h" 56 57 bool pci_available = true; 58 59 static char *pcibus_get_dev_path(DeviceState *dev); 60 static char *pcibus_get_fw_dev_path(DeviceState *dev); 61 static void pcibus_reset_hold(Object *obj, ResetType type); 62 static bool pcie_has_upstream_port(PCIDevice *dev); 63 64 static void prop_pci_busnr_get(Object *obj, Visitor *v, const char *name, 65 void *opaque, Error **errp) 66 { 67 uint8_t busnr = pci_dev_bus_num(PCI_DEVICE(obj)); 68 69 visit_type_uint8(v, name, &busnr, errp); 70 } 71 72 static const PropertyInfo prop_pci_busnr = { 73 .type = "busnr", 74 .get = prop_pci_busnr_get, 75 }; 76 77 static const Property pci_props[] = { 78 DEFINE_PROP_PCI_DEVFN("addr", PCIDevice, devfn, -1), 79 DEFINE_PROP_STRING("romfile", PCIDevice, romfile), 80 DEFINE_PROP_UINT32("romsize", PCIDevice, romsize, UINT32_MAX), 81 DEFINE_PROP_INT32("rombar", PCIDevice, rom_bar, -1), 82 DEFINE_PROP_BIT("multifunction", PCIDevice, cap_present, 83 QEMU_PCI_CAP_MULTIFUNCTION_BITNR, false), 84 DEFINE_PROP_BIT("x-pcie-lnksta-dllla", PCIDevice, cap_present, 85 QEMU_PCIE_LNKSTA_DLLLA_BITNR, true), 86 DEFINE_PROP_BIT("x-pcie-extcap-init", PCIDevice, cap_present, 87 QEMU_PCIE_EXTCAP_INIT_BITNR, true), 88 DEFINE_PROP_STRING("failover_pair_id", PCIDevice, 89 failover_pair_id), 90 DEFINE_PROP_UINT32("acpi-index", PCIDevice, acpi_index, 0), 91 DEFINE_PROP_BIT("x-pcie-err-unc-mask", PCIDevice, cap_present, 92 QEMU_PCIE_ERR_UNC_MASK_BITNR, true), 93 DEFINE_PROP_BIT("x-pcie-ari-nextfn-1", PCIDevice, cap_present, 94 QEMU_PCIE_ARI_NEXTFN_1_BITNR, false), 95 DEFINE_PROP_SIZE32("x-max-bounce-buffer-size", PCIDevice, 96 max_bounce_buffer_size, DEFAULT_MAX_BOUNCE_BUFFER_SIZE), 97 DEFINE_PROP_BIT("x-pcie-ext-tag", PCIDevice, cap_present, 98 QEMU_PCIE_EXT_TAG_BITNR, true), 99 { .name = "busnr", .info = &prop_pci_busnr }, 100 }; 101 102 static const VMStateDescription vmstate_pcibus = { 103 .name = "PCIBUS", 104 .version_id = 1, 105 .minimum_version_id = 1, 106 .fields = (const VMStateField[]) { 107 VMSTATE_INT32_EQUAL(nirq, PCIBus, NULL), 108 VMSTATE_VARRAY_INT32(irq_count, PCIBus, 109 nirq, 0, vmstate_info_int32, 110 int32_t), 111 VMSTATE_END_OF_LIST() 112 } 113 }; 114 115 static gint g_cmp_uint32(gconstpointer a, gconstpointer b, gpointer user_data) 116 { 117 return a - b; 118 } 119 120 static GSequence *pci_acpi_index_list(void) 121 { 122 static GSequence *used_acpi_index_list; 123 124 if (!used_acpi_index_list) { 125 used_acpi_index_list = g_sequence_new(NULL); 126 } 127 return used_acpi_index_list; 128 } 129 130 static void pci_init_bus_master(PCIDevice *pci_dev) 131 { 132 AddressSpace *dma_as = pci_device_iommu_address_space(pci_dev); 133 134 memory_region_init_alias(&pci_dev->bus_master_enable_region, 135 OBJECT(pci_dev), "bus master", 136 dma_as->root, 0, memory_region_size(dma_as->root)); 137 memory_region_set_enabled(&pci_dev->bus_master_enable_region, false); 138 memory_region_add_subregion(&pci_dev->bus_master_container_region, 0, 139 &pci_dev->bus_master_enable_region); 140 } 141 142 static void pcibus_machine_done(Notifier *notifier, void *data) 143 { 144 PCIBus *bus = container_of(notifier, PCIBus, machine_done); 145 int i; 146 147 for (i = 0; i < ARRAY_SIZE(bus->devices); ++i) { 148 if (bus->devices[i]) { 149 pci_init_bus_master(bus->devices[i]); 150 } 151 } 152 } 153 154 static void pci_bus_realize(BusState *qbus, Error **errp) 155 { 156 PCIBus *bus = PCI_BUS(qbus); 157 158 bus->machine_done.notify = pcibus_machine_done; 159 qemu_add_machine_init_done_notifier(&bus->machine_done); 160 161 vmstate_register_any(NULL, &vmstate_pcibus, bus); 162 } 163 164 static void pcie_bus_realize(BusState *qbus, Error **errp) 165 { 166 PCIBus *bus = PCI_BUS(qbus); 167 Error *local_err = NULL; 168 169 pci_bus_realize(qbus, &local_err); 170 if (local_err) { 171 error_propagate(errp, local_err); 172 return; 173 } 174 175 /* 176 * A PCI-E bus can support extended config space if it's the root 177 * bus, or if the bus/bridge above it does as well 178 */ 179 if (pci_bus_is_root(bus)) { 180 bus->flags |= PCI_BUS_EXTENDED_CONFIG_SPACE; 181 } else { 182 PCIBus *parent_bus = pci_get_bus(bus->parent_dev); 183 184 if (pci_bus_allows_extended_config_space(parent_bus)) { 185 bus->flags |= PCI_BUS_EXTENDED_CONFIG_SPACE; 186 } 187 } 188 } 189 190 static void pci_bus_unrealize(BusState *qbus) 191 { 192 PCIBus *bus = PCI_BUS(qbus); 193 194 qemu_remove_machine_init_done_notifier(&bus->machine_done); 195 196 vmstate_unregister(NULL, &vmstate_pcibus, bus); 197 } 198 199 static int pcibus_num(PCIBus *bus) 200 { 201 if (pci_bus_is_root(bus)) { 202 return 0; /* pci host bridge */ 203 } 204 return bus->parent_dev->config[PCI_SECONDARY_BUS]; 205 } 206 207 static uint16_t pcibus_numa_node(PCIBus *bus) 208 { 209 return NUMA_NODE_UNASSIGNED; 210 } 211 212 bool pci_bus_add_fw_cfg_extra_pci_roots(FWCfgState *fw_cfg, 213 PCIBus *bus, 214 Error **errp) 215 { 216 Object *obj; 217 218 if (!bus) { 219 return true; 220 } 221 obj = OBJECT(bus); 222 223 return fw_cfg_add_file_from_generator(fw_cfg, obj->parent, 224 object_get_canonical_path_component(obj), 225 "etc/extra-pci-roots", errp); 226 } 227 228 static GByteArray *pci_bus_fw_cfg_gen_data(Object *obj, Error **errp) 229 { 230 PCIBus *bus = PCI_BUS(obj); 231 GByteArray *byte_array; 232 uint64_t extra_hosts = 0; 233 234 if (!bus) { 235 return NULL; 236 } 237 238 QLIST_FOREACH(bus, &bus->child, sibling) { 239 /* look for expander root buses */ 240 if (pci_bus_is_root(bus)) { 241 extra_hosts++; 242 } 243 } 244 245 if (!extra_hosts) { 246 return NULL; 247 } 248 extra_hosts = cpu_to_le64(extra_hosts); 249 250 byte_array = g_byte_array_new(); 251 g_byte_array_append(byte_array, 252 (const void *)&extra_hosts, sizeof(extra_hosts)); 253 254 return byte_array; 255 } 256 257 static void pci_bus_class_init(ObjectClass *klass, const void *data) 258 { 259 BusClass *k = BUS_CLASS(klass); 260 PCIBusClass *pbc = PCI_BUS_CLASS(klass); 261 ResettableClass *rc = RESETTABLE_CLASS(klass); 262 FWCfgDataGeneratorClass *fwgc = FW_CFG_DATA_GENERATOR_CLASS(klass); 263 264 k->print_dev = pcibus_dev_print; 265 k->get_dev_path = pcibus_get_dev_path; 266 k->get_fw_dev_path = pcibus_get_fw_dev_path; 267 k->realize = pci_bus_realize; 268 k->unrealize = pci_bus_unrealize; 269 270 rc->phases.hold = pcibus_reset_hold; 271 272 pbc->bus_num = pcibus_num; 273 pbc->numa_node = pcibus_numa_node; 274 275 fwgc->get_data = pci_bus_fw_cfg_gen_data; 276 } 277 278 static const TypeInfo pci_bus_info = { 279 .name = TYPE_PCI_BUS, 280 .parent = TYPE_BUS, 281 .instance_size = sizeof(PCIBus), 282 .class_size = sizeof(PCIBusClass), 283 .class_init = pci_bus_class_init, 284 .interfaces = (const InterfaceInfo[]) { 285 { TYPE_FW_CFG_DATA_GENERATOR_INTERFACE }, 286 { } 287 } 288 }; 289 290 static const TypeInfo cxl_interface_info = { 291 .name = INTERFACE_CXL_DEVICE, 292 .parent = TYPE_INTERFACE, 293 }; 294 295 static const TypeInfo pcie_interface_info = { 296 .name = INTERFACE_PCIE_DEVICE, 297 .parent = TYPE_INTERFACE, 298 }; 299 300 static const TypeInfo conventional_pci_interface_info = { 301 .name = INTERFACE_CONVENTIONAL_PCI_DEVICE, 302 .parent = TYPE_INTERFACE, 303 }; 304 305 static void pcie_bus_class_init(ObjectClass *klass, const void *data) 306 { 307 BusClass *k = BUS_CLASS(klass); 308 309 k->realize = pcie_bus_realize; 310 } 311 312 static const TypeInfo pcie_bus_info = { 313 .name = TYPE_PCIE_BUS, 314 .parent = TYPE_PCI_BUS, 315 .class_init = pcie_bus_class_init, 316 }; 317 318 static const TypeInfo cxl_bus_info = { 319 .name = TYPE_CXL_BUS, 320 .parent = TYPE_PCIE_BUS, 321 .class_init = pcie_bus_class_init, 322 }; 323 324 static void pci_update_mappings(PCIDevice *d); 325 static void pci_irq_handler(void *opaque, int irq_num, int level); 326 static void pci_add_option_rom(PCIDevice *pdev, bool is_default_rom, Error **); 327 static void pci_del_option_rom(PCIDevice *pdev); 328 329 static uint16_t pci_default_sub_vendor_id = PCI_SUBVENDOR_ID_REDHAT_QUMRANET; 330 static uint16_t pci_default_sub_device_id = PCI_SUBDEVICE_ID_QEMU; 331 332 PCIHostStateList pci_host_bridges; 333 334 int pci_bar(PCIDevice *d, int reg) 335 { 336 uint8_t type; 337 338 /* PCIe virtual functions do not have their own BARs */ 339 assert(!pci_is_vf(d)); 340 341 if (reg != PCI_ROM_SLOT) 342 return PCI_BASE_ADDRESS_0 + reg * 4; 343 344 type = d->config[PCI_HEADER_TYPE] & ~PCI_HEADER_TYPE_MULTI_FUNCTION; 345 return type == PCI_HEADER_TYPE_BRIDGE ? PCI_ROM_ADDRESS1 : PCI_ROM_ADDRESS; 346 } 347 348 static inline int pci_irq_state(PCIDevice *d, int irq_num) 349 { 350 return (d->irq_state >> irq_num) & 0x1; 351 } 352 353 static inline void pci_set_irq_state(PCIDevice *d, int irq_num, int level) 354 { 355 d->irq_state &= ~(0x1 << irq_num); 356 d->irq_state |= level << irq_num; 357 } 358 359 static void pci_bus_change_irq_level(PCIBus *bus, int irq_num, int change) 360 { 361 assert(irq_num >= 0); 362 assert(irq_num < bus->nirq); 363 bus->irq_count[irq_num] += change; 364 bus->set_irq(bus->irq_opaque, irq_num, bus->irq_count[irq_num] != 0); 365 } 366 367 static void pci_change_irq_level(PCIDevice *pci_dev, int irq_num, int change) 368 { 369 PCIBus *bus; 370 for (;;) { 371 int dev_irq = irq_num; 372 bus = pci_get_bus(pci_dev); 373 assert(bus->map_irq); 374 irq_num = bus->map_irq(pci_dev, irq_num); 375 trace_pci_route_irq(dev_irq, DEVICE(pci_dev)->canonical_path, irq_num, 376 pci_bus_is_root(bus) ? "root-complex" 377 : DEVICE(bus->parent_dev)->canonical_path); 378 if (bus->set_irq) 379 break; 380 pci_dev = bus->parent_dev; 381 } 382 pci_bus_change_irq_level(bus, irq_num, change); 383 } 384 385 int pci_bus_get_irq_level(PCIBus *bus, int irq_num) 386 { 387 assert(irq_num >= 0); 388 assert(irq_num < bus->nirq); 389 return !!bus->irq_count[irq_num]; 390 } 391 392 /* Update interrupt status bit in config space on interrupt 393 * state change. */ 394 static void pci_update_irq_status(PCIDevice *dev) 395 { 396 if (dev->irq_state) { 397 dev->config[PCI_STATUS] |= PCI_STATUS_INTERRUPT; 398 } else { 399 dev->config[PCI_STATUS] &= ~PCI_STATUS_INTERRUPT; 400 } 401 } 402 403 void pci_device_deassert_intx(PCIDevice *dev) 404 { 405 int i; 406 for (i = 0; i < PCI_NUM_PINS; ++i) { 407 pci_irq_handler(dev, i, 0); 408 } 409 } 410 411 static void pci_msi_trigger(PCIDevice *dev, MSIMessage msg) 412 { 413 MemTxAttrs attrs = {}; 414 415 /* 416 * Xen uses the high bits of the address to contain some of the bits 417 * of the PIRQ#. Therefore we can't just send the write cycle and 418 * trust that it's caught by the APIC at 0xfee00000 because the 419 * target of the write might be e.g. 0x0x1000fee46000 for PIRQ#4166. 420 * So we intercept the delivery here instead of in kvm_send_msi(). 421 */ 422 if (xen_mode == XEN_EMULATE && 423 xen_evtchn_deliver_pirq_msi(msg.address, msg.data)) { 424 return; 425 } 426 attrs.requester_id = pci_requester_id(dev); 427 address_space_stl_le(&dev->bus_master_as, msg.address, msg.data, 428 attrs, NULL); 429 } 430 431 /* 432 * Register and track a PM capability. If wmask is also enabled for the power 433 * state field of the pmcsr register, guest writes may change the device PM 434 * state. BAR access is only enabled while the device is in the D0 state. 435 * Return the capability offset or negative error code. 436 */ 437 int pci_pm_init(PCIDevice *d, uint8_t offset, Error **errp) 438 { 439 int cap = pci_add_capability(d, PCI_CAP_ID_PM, offset, PCI_PM_SIZEOF, errp); 440 441 if (cap < 0) { 442 return cap; 443 } 444 445 d->pm_cap = cap; 446 d->cap_present |= QEMU_PCI_CAP_PM; 447 448 return cap; 449 } 450 451 static uint8_t pci_pm_state(PCIDevice *d) 452 { 453 uint16_t pmcsr; 454 455 if (!(d->cap_present & QEMU_PCI_CAP_PM)) { 456 return 0; 457 } 458 459 pmcsr = pci_get_word(d->config + d->pm_cap + PCI_PM_CTRL); 460 461 return pmcsr & PCI_PM_CTRL_STATE_MASK; 462 } 463 464 /* 465 * Update the PM capability state based on the new value stored in config 466 * space respective to the old, pre-write state provided. If the new value 467 * is rejected (unsupported or invalid transition) restore the old value. 468 * Return the resulting PM state. 469 */ 470 static uint8_t pci_pm_update(PCIDevice *d, uint32_t addr, int l, uint8_t old) 471 { 472 uint16_t pmc; 473 uint8_t new; 474 475 if (!(d->cap_present & QEMU_PCI_CAP_PM) || 476 !range_covers_byte(addr, l, d->pm_cap + PCI_PM_CTRL)) { 477 return old; 478 } 479 480 new = pci_pm_state(d); 481 if (new == old) { 482 return old; 483 } 484 485 pmc = pci_get_word(d->config + d->pm_cap + PCI_PM_PMC); 486 487 /* 488 * Transitions to D1 & D2 are only allowed if supported. Devices may 489 * only transition to higher D-states or to D0. 490 */ 491 if ((!(pmc & PCI_PM_CAP_D1) && new == 1) || 492 (!(pmc & PCI_PM_CAP_D2) && new == 2) || 493 (old && new && new < old)) { 494 pci_word_test_and_clear_mask(d->config + d->pm_cap + PCI_PM_CTRL, 495 PCI_PM_CTRL_STATE_MASK); 496 pci_word_test_and_set_mask(d->config + d->pm_cap + PCI_PM_CTRL, 497 old); 498 trace_pci_pm_bad_transition(d->name, pci_dev_bus_num(d), 499 PCI_SLOT(d->devfn), PCI_FUNC(d->devfn), 500 old, new); 501 return old; 502 } 503 504 trace_pci_pm_transition(d->name, pci_dev_bus_num(d), PCI_SLOT(d->devfn), 505 PCI_FUNC(d->devfn), old, new); 506 return new; 507 } 508 509 static void pci_reset_regions(PCIDevice *dev) 510 { 511 int r; 512 if (pci_is_vf(dev)) { 513 return; 514 } 515 516 for (r = 0; r < PCI_NUM_REGIONS; ++r) { 517 PCIIORegion *region = &dev->io_regions[r]; 518 if (!region->size) { 519 continue; 520 } 521 522 if (!(region->type & PCI_BASE_ADDRESS_SPACE_IO) && 523 region->type & PCI_BASE_ADDRESS_MEM_TYPE_64) { 524 pci_set_quad(dev->config + pci_bar(dev, r), region->type); 525 } else { 526 pci_set_long(dev->config + pci_bar(dev, r), region->type); 527 } 528 } 529 } 530 531 static void pci_do_device_reset(PCIDevice *dev) 532 { 533 pci_device_deassert_intx(dev); 534 assert(dev->irq_state == 0); 535 536 /* Clear all writable bits */ 537 pci_word_test_and_clear_mask(dev->config + PCI_COMMAND, 538 pci_get_word(dev->wmask + PCI_COMMAND) | 539 pci_get_word(dev->w1cmask + PCI_COMMAND)); 540 pci_word_test_and_clear_mask(dev->config + PCI_STATUS, 541 pci_get_word(dev->wmask + PCI_STATUS) | 542 pci_get_word(dev->w1cmask + PCI_STATUS)); 543 /* Some devices make bits of PCI_INTERRUPT_LINE read only */ 544 pci_byte_test_and_clear_mask(dev->config + PCI_INTERRUPT_LINE, 545 pci_get_word(dev->wmask + PCI_INTERRUPT_LINE) | 546 pci_get_word(dev->w1cmask + PCI_INTERRUPT_LINE)); 547 dev->config[PCI_CACHE_LINE_SIZE] = 0x0; 548 /* Default PM state is D0 */ 549 if (dev->cap_present & QEMU_PCI_CAP_PM) { 550 pci_word_test_and_clear_mask(dev->config + dev->pm_cap + PCI_PM_CTRL, 551 PCI_PM_CTRL_STATE_MASK); 552 } 553 pci_reset_regions(dev); 554 pci_update_mappings(dev); 555 556 msi_reset(dev); 557 msix_reset(dev); 558 pcie_sriov_pf_reset(dev); 559 } 560 561 /* 562 * This function is called on #RST and FLR. 563 * FLR if PCI_EXP_DEVCTL_BCR_FLR is set 564 */ 565 void pci_device_reset(PCIDevice *dev) 566 { 567 device_cold_reset(&dev->qdev); 568 pci_do_device_reset(dev); 569 } 570 571 /* 572 * Trigger pci bus reset under a given bus. 573 * Called via bus_cold_reset on RST# assert, after the devices 574 * have been reset device_cold_reset-ed already. 575 */ 576 static void pcibus_reset_hold(Object *obj, ResetType type) 577 { 578 PCIBus *bus = PCI_BUS(obj); 579 int i; 580 581 for (i = 0; i < ARRAY_SIZE(bus->devices); ++i) { 582 if (bus->devices[i]) { 583 pci_do_device_reset(bus->devices[i]); 584 } 585 } 586 587 for (i = 0; i < bus->nirq; i++) { 588 assert(bus->irq_count[i] == 0); 589 } 590 } 591 592 static void pci_host_bus_register(DeviceState *host) 593 { 594 PCIHostState *host_bridge = PCI_HOST_BRIDGE(host); 595 596 QLIST_INSERT_HEAD(&pci_host_bridges, host_bridge, next); 597 } 598 599 static void pci_host_bus_unregister(DeviceState *host) 600 { 601 PCIHostState *host_bridge = PCI_HOST_BRIDGE(host); 602 603 QLIST_REMOVE(host_bridge, next); 604 } 605 606 PCIBus *pci_device_root_bus(const PCIDevice *d) 607 { 608 PCIBus *bus = pci_get_bus(d); 609 610 while (!pci_bus_is_root(bus)) { 611 d = bus->parent_dev; 612 assert(d != NULL); 613 614 bus = pci_get_bus(d); 615 } 616 617 return bus; 618 } 619 620 const char *pci_root_bus_path(PCIDevice *dev) 621 { 622 PCIBus *rootbus = pci_device_root_bus(dev); 623 PCIHostState *host_bridge = PCI_HOST_BRIDGE(rootbus->qbus.parent); 624 PCIHostBridgeClass *hc = PCI_HOST_BRIDGE_GET_CLASS(host_bridge); 625 626 assert(host_bridge->bus == rootbus); 627 628 if (hc->root_bus_path) { 629 return (*hc->root_bus_path)(host_bridge, rootbus); 630 } 631 632 return rootbus->qbus.name; 633 } 634 635 bool pci_bus_bypass_iommu(PCIBus *bus) 636 { 637 PCIBus *rootbus = bus; 638 PCIHostState *host_bridge; 639 640 if (!pci_bus_is_root(bus)) { 641 rootbus = pci_device_root_bus(bus->parent_dev); 642 } 643 644 host_bridge = PCI_HOST_BRIDGE(rootbus->qbus.parent); 645 646 assert(host_bridge->bus == rootbus); 647 648 return host_bridge->bypass_iommu; 649 } 650 651 static void pci_root_bus_internal_init(PCIBus *bus, DeviceState *parent, 652 MemoryRegion *mem, MemoryRegion *io, 653 uint8_t devfn_min) 654 { 655 assert(PCI_FUNC(devfn_min) == 0); 656 bus->devfn_min = devfn_min; 657 bus->slot_reserved_mask = 0x0; 658 bus->address_space_mem = mem; 659 bus->address_space_io = io; 660 bus->flags |= PCI_BUS_IS_ROOT; 661 662 /* host bridge */ 663 QLIST_INIT(&bus->child); 664 665 pci_host_bus_register(parent); 666 } 667 668 static void pci_bus_uninit(PCIBus *bus) 669 { 670 pci_host_bus_unregister(BUS(bus)->parent); 671 } 672 673 bool pci_bus_is_express(const PCIBus *bus) 674 { 675 return object_dynamic_cast(OBJECT(bus), TYPE_PCIE_BUS); 676 } 677 678 void pci_root_bus_init(PCIBus *bus, size_t bus_size, DeviceState *parent, 679 const char *name, 680 MemoryRegion *mem, MemoryRegion *io, 681 uint8_t devfn_min, const char *typename) 682 { 683 qbus_init(bus, bus_size, typename, parent, name); 684 pci_root_bus_internal_init(bus, parent, mem, io, devfn_min); 685 } 686 687 PCIBus *pci_root_bus_new(DeviceState *parent, const char *name, 688 MemoryRegion *mem, MemoryRegion *io, 689 uint8_t devfn_min, const char *typename) 690 { 691 PCIBus *bus; 692 693 bus = PCI_BUS(qbus_new(typename, parent, name)); 694 pci_root_bus_internal_init(bus, parent, mem, io, devfn_min); 695 return bus; 696 } 697 698 void pci_root_bus_cleanup(PCIBus *bus) 699 { 700 pci_bus_uninit(bus); 701 /* the caller of the unplug hotplug handler will delete this device */ 702 qbus_unrealize(BUS(bus)); 703 } 704 705 void pci_bus_irqs(PCIBus *bus, pci_set_irq_fn set_irq, 706 void *irq_opaque, int nirq) 707 { 708 bus->set_irq = set_irq; 709 bus->irq_opaque = irq_opaque; 710 bus->nirq = nirq; 711 g_free(bus->irq_count); 712 bus->irq_count = g_malloc0(nirq * sizeof(bus->irq_count[0])); 713 } 714 715 void pci_bus_map_irqs(PCIBus *bus, pci_map_irq_fn map_irq) 716 { 717 bus->map_irq = map_irq; 718 } 719 720 void pci_bus_irqs_cleanup(PCIBus *bus) 721 { 722 bus->set_irq = NULL; 723 bus->map_irq = NULL; 724 bus->irq_opaque = NULL; 725 bus->nirq = 0; 726 g_free(bus->irq_count); 727 bus->irq_count = NULL; 728 } 729 730 PCIBus *pci_register_root_bus(DeviceState *parent, const char *name, 731 pci_set_irq_fn set_irq, pci_map_irq_fn map_irq, 732 void *irq_opaque, 733 MemoryRegion *mem, MemoryRegion *io, 734 uint8_t devfn_min, int nirq, 735 const char *typename) 736 { 737 PCIBus *bus; 738 739 bus = pci_root_bus_new(parent, name, mem, io, devfn_min, typename); 740 pci_bus_irqs(bus, set_irq, irq_opaque, nirq); 741 pci_bus_map_irqs(bus, map_irq); 742 return bus; 743 } 744 745 void pci_unregister_root_bus(PCIBus *bus) 746 { 747 pci_bus_irqs_cleanup(bus); 748 pci_root_bus_cleanup(bus); 749 } 750 751 int pci_bus_num(PCIBus *s) 752 { 753 return PCI_BUS_GET_CLASS(s)->bus_num(s); 754 } 755 756 /* Returns the min and max bus numbers of a PCI bus hierarchy */ 757 void pci_bus_range(PCIBus *bus, int *min_bus, int *max_bus) 758 { 759 int i; 760 *min_bus = *max_bus = pci_bus_num(bus); 761 762 for (i = 0; i < ARRAY_SIZE(bus->devices); ++i) { 763 PCIDevice *dev = bus->devices[i]; 764 765 if (dev && IS_PCI_BRIDGE(dev)) { 766 *min_bus = MIN(*min_bus, dev->config[PCI_SECONDARY_BUS]); 767 *max_bus = MAX(*max_bus, dev->config[PCI_SUBORDINATE_BUS]); 768 } 769 } 770 } 771 772 int pci_bus_numa_node(PCIBus *bus) 773 { 774 return PCI_BUS_GET_CLASS(bus)->numa_node(bus); 775 } 776 777 static int get_pci_config_device(QEMUFile *f, void *pv, size_t size, 778 const VMStateField *field) 779 { 780 PCIDevice *s = container_of(pv, PCIDevice, config); 781 uint8_t *config; 782 int i; 783 784 assert(size == pci_config_size(s)); 785 config = g_malloc(size); 786 787 qemu_get_buffer(f, config, size); 788 for (i = 0; i < size; ++i) { 789 if ((config[i] ^ s->config[i]) & 790 s->cmask[i] & ~s->wmask[i] & ~s->w1cmask[i]) { 791 error_report("%s: Bad config data: i=0x%x read: %x device: %x " 792 "cmask: %x wmask: %x w1cmask:%x", __func__, 793 i, config[i], s->config[i], 794 s->cmask[i], s->wmask[i], s->w1cmask[i]); 795 g_free(config); 796 return -EINVAL; 797 } 798 } 799 memcpy(s->config, config, size); 800 801 pci_update_mappings(s); 802 if (IS_PCI_BRIDGE(s)) { 803 pci_bridge_update_mappings(PCI_BRIDGE(s)); 804 } 805 806 memory_region_set_enabled(&s->bus_master_enable_region, 807 pci_get_word(s->config + PCI_COMMAND) 808 & PCI_COMMAND_MASTER); 809 810 g_free(config); 811 return 0; 812 } 813 814 /* just put buffer */ 815 static int put_pci_config_device(QEMUFile *f, void *pv, size_t size, 816 const VMStateField *field, JSONWriter *vmdesc) 817 { 818 const uint8_t **v = pv; 819 assert(size == pci_config_size(container_of(pv, PCIDevice, config))); 820 qemu_put_buffer(f, *v, size); 821 822 return 0; 823 } 824 825 static const VMStateInfo vmstate_info_pci_config = { 826 .name = "pci config", 827 .get = get_pci_config_device, 828 .put = put_pci_config_device, 829 }; 830 831 static int get_pci_irq_state(QEMUFile *f, void *pv, size_t size, 832 const VMStateField *field) 833 { 834 PCIDevice *s = container_of(pv, PCIDevice, irq_state); 835 uint32_t irq_state[PCI_NUM_PINS]; 836 int i; 837 for (i = 0; i < PCI_NUM_PINS; ++i) { 838 irq_state[i] = qemu_get_be32(f); 839 if (irq_state[i] != 0x1 && irq_state[i] != 0) { 840 fprintf(stderr, "irq state %d: must be 0 or 1.\n", 841 irq_state[i]); 842 return -EINVAL; 843 } 844 } 845 846 for (i = 0; i < PCI_NUM_PINS; ++i) { 847 pci_set_irq_state(s, i, irq_state[i]); 848 } 849 850 return 0; 851 } 852 853 static int put_pci_irq_state(QEMUFile *f, void *pv, size_t size, 854 const VMStateField *field, JSONWriter *vmdesc) 855 { 856 int i; 857 PCIDevice *s = container_of(pv, PCIDevice, irq_state); 858 859 for (i = 0; i < PCI_NUM_PINS; ++i) { 860 qemu_put_be32(f, pci_irq_state(s, i)); 861 } 862 863 return 0; 864 } 865 866 static const VMStateInfo vmstate_info_pci_irq_state = { 867 .name = "pci irq state", 868 .get = get_pci_irq_state, 869 .put = put_pci_irq_state, 870 }; 871 872 static bool migrate_is_pcie(void *opaque, int version_id) 873 { 874 return pci_is_express((PCIDevice *)opaque); 875 } 876 877 static bool migrate_is_not_pcie(void *opaque, int version_id) 878 { 879 return !pci_is_express((PCIDevice *)opaque); 880 } 881 882 static int pci_post_load(void *opaque, int version_id) 883 { 884 pcie_sriov_pf_post_load(opaque); 885 return 0; 886 } 887 888 const VMStateDescription vmstate_pci_device = { 889 .name = "PCIDevice", 890 .version_id = 2, 891 .minimum_version_id = 1, 892 .post_load = pci_post_load, 893 .fields = (const VMStateField[]) { 894 VMSTATE_INT32_POSITIVE_LE(version_id, PCIDevice), 895 VMSTATE_BUFFER_UNSAFE_INFO_TEST(config, PCIDevice, 896 migrate_is_not_pcie, 897 0, vmstate_info_pci_config, 898 PCI_CONFIG_SPACE_SIZE), 899 VMSTATE_BUFFER_UNSAFE_INFO_TEST(config, PCIDevice, 900 migrate_is_pcie, 901 0, vmstate_info_pci_config, 902 PCIE_CONFIG_SPACE_SIZE), 903 VMSTATE_BUFFER_UNSAFE_INFO(irq_state, PCIDevice, 2, 904 vmstate_info_pci_irq_state, 905 PCI_NUM_PINS * sizeof(int32_t)), 906 VMSTATE_END_OF_LIST() 907 } 908 }; 909 910 911 void pci_device_save(PCIDevice *s, QEMUFile *f) 912 { 913 /* Clear interrupt status bit: it is implicit 914 * in irq_state which we are saving. 915 * This makes us compatible with old devices 916 * which never set or clear this bit. */ 917 s->config[PCI_STATUS] &= ~PCI_STATUS_INTERRUPT; 918 vmstate_save_state(f, &vmstate_pci_device, s, NULL); 919 /* Restore the interrupt status bit. */ 920 pci_update_irq_status(s); 921 } 922 923 int pci_device_load(PCIDevice *s, QEMUFile *f) 924 { 925 int ret; 926 ret = vmstate_load_state(f, &vmstate_pci_device, s, s->version_id); 927 /* Restore the interrupt status bit. */ 928 pci_update_irq_status(s); 929 return ret; 930 } 931 932 static void pci_set_default_subsystem_id(PCIDevice *pci_dev) 933 { 934 pci_set_word(pci_dev->config + PCI_SUBSYSTEM_VENDOR_ID, 935 pci_default_sub_vendor_id); 936 pci_set_word(pci_dev->config + PCI_SUBSYSTEM_ID, 937 pci_default_sub_device_id); 938 } 939 940 /* 941 * Parse [[<domain>:]<bus>:]<slot>, return -1 on error if funcp == NULL 942 * [[<domain>:]<bus>:]<slot>.<func>, return -1 on error 943 */ 944 static int pci_parse_devaddr(const char *addr, int *domp, int *busp, 945 unsigned int *slotp, unsigned int *funcp) 946 { 947 const char *p; 948 char *e; 949 unsigned long val; 950 unsigned long dom = 0, bus = 0; 951 unsigned int slot = 0; 952 unsigned int func = 0; 953 954 p = addr; 955 val = strtoul(p, &e, 16); 956 if (e == p) 957 return -1; 958 if (*e == ':') { 959 bus = val; 960 p = e + 1; 961 val = strtoul(p, &e, 16); 962 if (e == p) 963 return -1; 964 if (*e == ':') { 965 dom = bus; 966 bus = val; 967 p = e + 1; 968 val = strtoul(p, &e, 16); 969 if (e == p) 970 return -1; 971 } 972 } 973 974 slot = val; 975 976 if (funcp != NULL) { 977 if (*e != '.') 978 return -1; 979 980 p = e + 1; 981 val = strtoul(p, &e, 16); 982 if (e == p) 983 return -1; 984 985 func = val; 986 } 987 988 /* if funcp == NULL func is 0 */ 989 if (dom > 0xffff || bus > 0xff || slot > 0x1f || func > 7) 990 return -1; 991 992 if (*e) 993 return -1; 994 995 *domp = dom; 996 *busp = bus; 997 *slotp = slot; 998 if (funcp != NULL) 999 *funcp = func; 1000 return 0; 1001 } 1002 1003 static void pci_init_cmask(PCIDevice *dev) 1004 { 1005 pci_set_word(dev->cmask + PCI_VENDOR_ID, 0xffff); 1006 pci_set_word(dev->cmask + PCI_DEVICE_ID, 0xffff); 1007 dev->cmask[PCI_STATUS] = PCI_STATUS_CAP_LIST; 1008 dev->cmask[PCI_REVISION_ID] = 0xff; 1009 dev->cmask[PCI_CLASS_PROG] = 0xff; 1010 pci_set_word(dev->cmask + PCI_CLASS_DEVICE, 0xffff); 1011 dev->cmask[PCI_HEADER_TYPE] = 0xff; 1012 dev->cmask[PCI_CAPABILITY_LIST] = 0xff; 1013 } 1014 1015 static void pci_init_wmask(PCIDevice *dev) 1016 { 1017 int config_size = pci_config_size(dev); 1018 1019 dev->wmask[PCI_CACHE_LINE_SIZE] = 0xff; 1020 dev->wmask[PCI_INTERRUPT_LINE] = 0xff; 1021 pci_set_word(dev->wmask + PCI_COMMAND, 1022 PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | 1023 PCI_COMMAND_INTX_DISABLE); 1024 pci_word_test_and_set_mask(dev->wmask + PCI_COMMAND, PCI_COMMAND_SERR); 1025 1026 memset(dev->wmask + PCI_CONFIG_HEADER_SIZE, 0xff, 1027 config_size - PCI_CONFIG_HEADER_SIZE); 1028 } 1029 1030 static void pci_init_w1cmask(PCIDevice *dev) 1031 { 1032 /* 1033 * Note: It's okay to set w1cmask even for readonly bits as 1034 * long as their value is hardwired to 0. 1035 */ 1036 pci_set_word(dev->w1cmask + PCI_STATUS, 1037 PCI_STATUS_PARITY | PCI_STATUS_SIG_TARGET_ABORT | 1038 PCI_STATUS_REC_TARGET_ABORT | PCI_STATUS_REC_MASTER_ABORT | 1039 PCI_STATUS_SIG_SYSTEM_ERROR | PCI_STATUS_DETECTED_PARITY); 1040 } 1041 1042 static void pci_init_mask_bridge(PCIDevice *d) 1043 { 1044 /* PCI_PRIMARY_BUS, PCI_SECONDARY_BUS, PCI_SUBORDINATE_BUS and 1045 PCI_SEC_LATENCY_TIMER */ 1046 memset(d->wmask + PCI_PRIMARY_BUS, 0xff, 4); 1047 1048 /* base and limit */ 1049 d->wmask[PCI_IO_BASE] = PCI_IO_RANGE_MASK & 0xff; 1050 d->wmask[PCI_IO_LIMIT] = PCI_IO_RANGE_MASK & 0xff; 1051 pci_set_word(d->wmask + PCI_MEMORY_BASE, 1052 PCI_MEMORY_RANGE_MASK & 0xffff); 1053 pci_set_word(d->wmask + PCI_MEMORY_LIMIT, 1054 PCI_MEMORY_RANGE_MASK & 0xffff); 1055 pci_set_word(d->wmask + PCI_PREF_MEMORY_BASE, 1056 PCI_PREF_RANGE_MASK & 0xffff); 1057 pci_set_word(d->wmask + PCI_PREF_MEMORY_LIMIT, 1058 PCI_PREF_RANGE_MASK & 0xffff); 1059 1060 /* PCI_PREF_BASE_UPPER32 and PCI_PREF_LIMIT_UPPER32 */ 1061 memset(d->wmask + PCI_PREF_BASE_UPPER32, 0xff, 8); 1062 1063 /* Supported memory and i/o types */ 1064 d->config[PCI_IO_BASE] |= PCI_IO_RANGE_TYPE_16; 1065 d->config[PCI_IO_LIMIT] |= PCI_IO_RANGE_TYPE_16; 1066 pci_word_test_and_set_mask(d->config + PCI_PREF_MEMORY_BASE, 1067 PCI_PREF_RANGE_TYPE_64); 1068 pci_word_test_and_set_mask(d->config + PCI_PREF_MEMORY_LIMIT, 1069 PCI_PREF_RANGE_TYPE_64); 1070 1071 /* 1072 * TODO: Bridges default to 10-bit VGA decoding but we currently only 1073 * implement 16-bit decoding (no alias support). 1074 */ 1075 pci_set_word(d->wmask + PCI_BRIDGE_CONTROL, 1076 PCI_BRIDGE_CTL_PARITY | 1077 PCI_BRIDGE_CTL_SERR | 1078 PCI_BRIDGE_CTL_ISA | 1079 PCI_BRIDGE_CTL_VGA | 1080 PCI_BRIDGE_CTL_VGA_16BIT | 1081 PCI_BRIDGE_CTL_MASTER_ABORT | 1082 PCI_BRIDGE_CTL_BUS_RESET | 1083 PCI_BRIDGE_CTL_FAST_BACK | 1084 PCI_BRIDGE_CTL_DISCARD | 1085 PCI_BRIDGE_CTL_SEC_DISCARD | 1086 PCI_BRIDGE_CTL_DISCARD_SERR); 1087 /* Below does not do anything as we never set this bit, put here for 1088 * completeness. */ 1089 pci_set_word(d->w1cmask + PCI_BRIDGE_CONTROL, 1090 PCI_BRIDGE_CTL_DISCARD_STATUS); 1091 d->cmask[PCI_IO_BASE] |= PCI_IO_RANGE_TYPE_MASK; 1092 d->cmask[PCI_IO_LIMIT] |= PCI_IO_RANGE_TYPE_MASK; 1093 pci_word_test_and_set_mask(d->cmask + PCI_PREF_MEMORY_BASE, 1094 PCI_PREF_RANGE_TYPE_MASK); 1095 pci_word_test_and_set_mask(d->cmask + PCI_PREF_MEMORY_LIMIT, 1096 PCI_PREF_RANGE_TYPE_MASK); 1097 } 1098 1099 static void pci_init_multifunction(PCIBus *bus, PCIDevice *dev, Error **errp) 1100 { 1101 uint8_t slot = PCI_SLOT(dev->devfn); 1102 uint8_t func; 1103 1104 if (dev->cap_present & QEMU_PCI_CAP_MULTIFUNCTION) { 1105 dev->config[PCI_HEADER_TYPE] |= PCI_HEADER_TYPE_MULTI_FUNCTION; 1106 } 1107 1108 /* 1109 * With SR/IOV and ARI, a device at function 0 need not be a multifunction 1110 * device, as it may just be a VF that ended up with function 0 in 1111 * the legacy PCI interpretation. Avoid failing in such cases: 1112 */ 1113 if (pci_is_vf(dev) && 1114 dev->exp.sriov_vf.pf->cap_present & QEMU_PCI_CAP_MULTIFUNCTION) { 1115 return; 1116 } 1117 1118 /* 1119 * multifunction bit is interpreted in two ways as follows. 1120 * - all functions must set the bit to 1. 1121 * Example: Intel X53 1122 * - function 0 must set the bit, but the rest function (> 0) 1123 * is allowed to leave the bit to 0. 1124 * Example: PIIX3(also in qemu), PIIX4(also in qemu), ICH10, 1125 * 1126 * So OS (at least Linux) checks the bit of only function 0, 1127 * and doesn't see the bit of function > 0. 1128 * 1129 * The below check allows both interpretation. 1130 */ 1131 if (PCI_FUNC(dev->devfn)) { 1132 PCIDevice *f0 = bus->devices[PCI_DEVFN(slot, 0)]; 1133 if (f0 && !(f0->cap_present & QEMU_PCI_CAP_MULTIFUNCTION)) { 1134 /* function 0 should set multifunction bit */ 1135 error_setg(errp, "PCI: single function device can't be populated " 1136 "in function %x.%x", slot, PCI_FUNC(dev->devfn)); 1137 return; 1138 } 1139 return; 1140 } 1141 1142 if (dev->cap_present & QEMU_PCI_CAP_MULTIFUNCTION) { 1143 return; 1144 } 1145 /* function 0 indicates single function, so function > 0 must be NULL */ 1146 for (func = 1; func < PCI_FUNC_MAX; ++func) { 1147 if (bus->devices[PCI_DEVFN(slot, func)]) { 1148 error_setg(errp, "PCI: %x.0 indicates single function, " 1149 "but %x.%x is already populated.", 1150 slot, slot, func); 1151 return; 1152 } 1153 } 1154 } 1155 1156 static void pci_config_alloc(PCIDevice *pci_dev) 1157 { 1158 int config_size = pci_config_size(pci_dev); 1159 1160 pci_dev->config = g_malloc0(config_size); 1161 pci_dev->cmask = g_malloc0(config_size); 1162 pci_dev->wmask = g_malloc0(config_size); 1163 pci_dev->w1cmask = g_malloc0(config_size); 1164 pci_dev->used = g_malloc0(config_size); 1165 } 1166 1167 static void pci_config_free(PCIDevice *pci_dev) 1168 { 1169 g_free(pci_dev->config); 1170 g_free(pci_dev->cmask); 1171 g_free(pci_dev->wmask); 1172 g_free(pci_dev->w1cmask); 1173 g_free(pci_dev->used); 1174 } 1175 1176 static void do_pci_unregister_device(PCIDevice *pci_dev) 1177 { 1178 pci_get_bus(pci_dev)->devices[pci_dev->devfn] = NULL; 1179 pci_config_free(pci_dev); 1180 1181 if (xen_mode == XEN_EMULATE) { 1182 xen_evtchn_remove_pci_device(pci_dev); 1183 } 1184 if (memory_region_is_mapped(&pci_dev->bus_master_enable_region)) { 1185 memory_region_del_subregion(&pci_dev->bus_master_container_region, 1186 &pci_dev->bus_master_enable_region); 1187 } 1188 address_space_destroy(&pci_dev->bus_master_as); 1189 } 1190 1191 /* Extract PCIReqIDCache into BDF format */ 1192 static uint16_t pci_req_id_cache_extract(PCIReqIDCache *cache) 1193 { 1194 uint8_t bus_n; 1195 uint16_t result; 1196 1197 switch (cache->type) { 1198 case PCI_REQ_ID_BDF: 1199 result = pci_get_bdf(cache->dev); 1200 break; 1201 case PCI_REQ_ID_SECONDARY_BUS: 1202 bus_n = pci_dev_bus_num(cache->dev); 1203 result = PCI_BUILD_BDF(bus_n, 0); 1204 break; 1205 default: 1206 error_report("Invalid PCI requester ID cache type: %d", 1207 cache->type); 1208 exit(1); 1209 break; 1210 } 1211 1212 return result; 1213 } 1214 1215 /* Parse bridges up to the root complex and return requester ID 1216 * cache for specific device. For full PCIe topology, the cache 1217 * result would be exactly the same as getting BDF of the device. 1218 * However, several tricks are required when system mixed up with 1219 * legacy PCI devices and PCIe-to-PCI bridges. 1220 * 1221 * Here we cache the proxy device (and type) not requester ID since 1222 * bus number might change from time to time. 1223 */ 1224 static PCIReqIDCache pci_req_id_cache_get(PCIDevice *dev) 1225 { 1226 PCIDevice *parent; 1227 PCIReqIDCache cache = { 1228 .dev = dev, 1229 .type = PCI_REQ_ID_BDF, 1230 }; 1231 1232 while (!pci_bus_is_root(pci_get_bus(dev))) { 1233 /* We are under PCI/PCIe bridges */ 1234 parent = pci_get_bus(dev)->parent_dev; 1235 if (pci_is_express(parent)) { 1236 if (pcie_cap_get_type(parent) == PCI_EXP_TYPE_PCI_BRIDGE) { 1237 /* When we pass through PCIe-to-PCI/PCIX bridges, we 1238 * override the requester ID using secondary bus 1239 * number of parent bridge with zeroed devfn 1240 * (pcie-to-pci bridge spec chap 2.3). */ 1241 cache.type = PCI_REQ_ID_SECONDARY_BUS; 1242 cache.dev = dev; 1243 } 1244 } else { 1245 /* Legacy PCI, override requester ID with the bridge's 1246 * BDF upstream. When the root complex connects to 1247 * legacy PCI devices (including buses), it can only 1248 * obtain requester ID info from directly attached 1249 * devices. If devices are attached under bridges, only 1250 * the requester ID of the bridge that is directly 1251 * attached to the root complex can be recognized. */ 1252 cache.type = PCI_REQ_ID_BDF; 1253 cache.dev = parent; 1254 } 1255 dev = parent; 1256 } 1257 1258 return cache; 1259 } 1260 1261 uint16_t pci_requester_id(PCIDevice *dev) 1262 { 1263 return pci_req_id_cache_extract(&dev->requester_id_cache); 1264 } 1265 1266 static bool pci_bus_devfn_available(PCIBus *bus, int devfn) 1267 { 1268 return !(bus->devices[devfn]); 1269 } 1270 1271 static bool pci_bus_devfn_reserved(PCIBus *bus, int devfn) 1272 { 1273 return bus->slot_reserved_mask & (1UL << PCI_SLOT(devfn)); 1274 } 1275 1276 uint32_t pci_bus_get_slot_reserved_mask(PCIBus *bus) 1277 { 1278 return bus->slot_reserved_mask; 1279 } 1280 1281 void pci_bus_set_slot_reserved_mask(PCIBus *bus, uint32_t mask) 1282 { 1283 bus->slot_reserved_mask |= mask; 1284 } 1285 1286 void pci_bus_clear_slot_reserved_mask(PCIBus *bus, uint32_t mask) 1287 { 1288 bus->slot_reserved_mask &= ~mask; 1289 } 1290 1291 /* -1 for devfn means auto assign */ 1292 static PCIDevice *do_pci_register_device(PCIDevice *pci_dev, 1293 const char *name, int devfn, 1294 Error **errp) 1295 { 1296 PCIDeviceClass *pc = PCI_DEVICE_GET_CLASS(pci_dev); 1297 PCIConfigReadFunc *config_read = pc->config_read; 1298 PCIConfigWriteFunc *config_write = pc->config_write; 1299 Error *local_err = NULL; 1300 DeviceState *dev = DEVICE(pci_dev); 1301 PCIBus *bus = pci_get_bus(pci_dev); 1302 bool is_bridge = IS_PCI_BRIDGE(pci_dev); 1303 1304 /* Only pci bridges can be attached to extra PCI root buses */ 1305 if (pci_bus_is_root(bus) && bus->parent_dev && !is_bridge) { 1306 error_setg(errp, 1307 "PCI: Only PCI/PCIe bridges can be plugged into %s", 1308 bus->parent_dev->name); 1309 return NULL; 1310 } 1311 1312 if (devfn < 0) { 1313 for(devfn = bus->devfn_min ; devfn < ARRAY_SIZE(bus->devices); 1314 devfn += PCI_FUNC_MAX) { 1315 if (pci_bus_devfn_available(bus, devfn) && 1316 !pci_bus_devfn_reserved(bus, devfn)) { 1317 goto found; 1318 } 1319 } 1320 error_setg(errp, "PCI: no slot/function available for %s, all in use " 1321 "or reserved", name); 1322 return NULL; 1323 found: ; 1324 } else if (pci_bus_devfn_reserved(bus, devfn)) { 1325 error_setg(errp, "PCI: slot %d function %d not available for %s," 1326 " reserved", 1327 PCI_SLOT(devfn), PCI_FUNC(devfn), name); 1328 return NULL; 1329 } else if (!pci_bus_devfn_available(bus, devfn)) { 1330 error_setg(errp, "PCI: slot %d function %d not available for %s," 1331 " in use by %s,id=%s", 1332 PCI_SLOT(devfn), PCI_FUNC(devfn), name, 1333 bus->devices[devfn]->name, bus->devices[devfn]->qdev.id); 1334 return NULL; 1335 } 1336 1337 /* 1338 * Populating function 0 triggers a scan from the guest that 1339 * exposes other non-zero functions. Hence we need to ensure that 1340 * function 0 wasn't added yet. 1341 */ 1342 if (dev->hotplugged && !pci_is_vf(pci_dev) && 1343 pci_get_function_0(pci_dev)) { 1344 error_setg(errp, "PCI: slot %d function 0 already occupied by %s," 1345 " new func %s cannot be exposed to guest.", 1346 PCI_SLOT(pci_get_function_0(pci_dev)->devfn), 1347 pci_get_function_0(pci_dev)->name, 1348 name); 1349 1350 return NULL; 1351 } 1352 1353 pci_dev->devfn = devfn; 1354 pci_dev->requester_id_cache = pci_req_id_cache_get(pci_dev); 1355 pstrcpy(pci_dev->name, sizeof(pci_dev->name), name); 1356 1357 memory_region_init(&pci_dev->bus_master_container_region, OBJECT(pci_dev), 1358 "bus master container", UINT64_MAX); 1359 address_space_init(&pci_dev->bus_master_as, 1360 &pci_dev->bus_master_container_region, pci_dev->name); 1361 pci_dev->bus_master_as.max_bounce_buffer_size = 1362 pci_dev->max_bounce_buffer_size; 1363 1364 if (phase_check(PHASE_MACHINE_READY)) { 1365 pci_init_bus_master(pci_dev); 1366 } 1367 pci_dev->irq_state = 0; 1368 pci_config_alloc(pci_dev); 1369 1370 pci_config_set_vendor_id(pci_dev->config, pc->vendor_id); 1371 pci_config_set_device_id(pci_dev->config, pc->device_id); 1372 pci_config_set_revision(pci_dev->config, pc->revision); 1373 pci_config_set_class(pci_dev->config, pc->class_id); 1374 1375 if (!is_bridge) { 1376 if (pc->subsystem_vendor_id || pc->subsystem_id) { 1377 pci_set_word(pci_dev->config + PCI_SUBSYSTEM_VENDOR_ID, 1378 pc->subsystem_vendor_id); 1379 pci_set_word(pci_dev->config + PCI_SUBSYSTEM_ID, 1380 pc->subsystem_id); 1381 } else { 1382 pci_set_default_subsystem_id(pci_dev); 1383 } 1384 } else { 1385 /* subsystem_vendor_id/subsystem_id are only for header type 0 */ 1386 assert(!pc->subsystem_vendor_id); 1387 assert(!pc->subsystem_id); 1388 } 1389 pci_init_cmask(pci_dev); 1390 pci_init_wmask(pci_dev); 1391 pci_init_w1cmask(pci_dev); 1392 if (is_bridge) { 1393 pci_init_mask_bridge(pci_dev); 1394 } 1395 pci_init_multifunction(bus, pci_dev, &local_err); 1396 if (local_err) { 1397 error_propagate(errp, local_err); 1398 do_pci_unregister_device(pci_dev); 1399 return NULL; 1400 } 1401 1402 if (!config_read) 1403 config_read = pci_default_read_config; 1404 if (!config_write) 1405 config_write = pci_default_write_config; 1406 pci_dev->config_read = config_read; 1407 pci_dev->config_write = config_write; 1408 bus->devices[devfn] = pci_dev; 1409 pci_dev->version_id = 2; /* Current pci device vmstate version */ 1410 return pci_dev; 1411 } 1412 1413 static void pci_unregister_io_regions(PCIDevice *pci_dev) 1414 { 1415 PCIIORegion *r; 1416 int i; 1417 1418 for(i = 0; i < PCI_NUM_REGIONS; i++) { 1419 r = &pci_dev->io_regions[i]; 1420 if (!r->size || r->addr == PCI_BAR_UNMAPPED) 1421 continue; 1422 memory_region_del_subregion(r->address_space, r->memory); 1423 } 1424 1425 pci_unregister_vga(pci_dev); 1426 } 1427 1428 static void pci_qdev_unrealize(DeviceState *dev) 1429 { 1430 PCIDevice *pci_dev = PCI_DEVICE(dev); 1431 PCIDeviceClass *pc = PCI_DEVICE_GET_CLASS(pci_dev); 1432 1433 pci_unregister_io_regions(pci_dev); 1434 pci_del_option_rom(pci_dev); 1435 1436 if (pc->exit) { 1437 pc->exit(pci_dev); 1438 } 1439 1440 pci_device_deassert_intx(pci_dev); 1441 do_pci_unregister_device(pci_dev); 1442 1443 pci_dev->msi_trigger = NULL; 1444 1445 /* 1446 * clean up acpi-index so it could reused by another device 1447 */ 1448 if (pci_dev->acpi_index) { 1449 GSequence *used_indexes = pci_acpi_index_list(); 1450 1451 g_sequence_remove(g_sequence_lookup(used_indexes, 1452 GINT_TO_POINTER(pci_dev->acpi_index), 1453 g_cmp_uint32, NULL)); 1454 } 1455 } 1456 1457 void pci_register_bar(PCIDevice *pci_dev, int region_num, 1458 uint8_t type, MemoryRegion *memory) 1459 { 1460 PCIIORegion *r; 1461 uint32_t addr; /* offset in pci config space */ 1462 uint64_t wmask; 1463 pcibus_t size = memory_region_size(memory); 1464 uint8_t hdr_type; 1465 1466 assert(!pci_is_vf(pci_dev)); /* VFs must use pcie_sriov_vf_register_bar */ 1467 assert(region_num >= 0); 1468 assert(region_num < PCI_NUM_REGIONS); 1469 assert(is_power_of_2(size)); 1470 1471 /* A PCI bridge device (with Type 1 header) may only have at most 2 BARs */ 1472 hdr_type = 1473 pci_dev->config[PCI_HEADER_TYPE] & ~PCI_HEADER_TYPE_MULTI_FUNCTION; 1474 assert(hdr_type != PCI_HEADER_TYPE_BRIDGE || region_num < 2); 1475 1476 r = &pci_dev->io_regions[region_num]; 1477 assert(!r->size); 1478 r->addr = PCI_BAR_UNMAPPED; 1479 r->size = size; 1480 r->type = type; 1481 r->memory = memory; 1482 r->address_space = type & PCI_BASE_ADDRESS_SPACE_IO 1483 ? pci_get_bus(pci_dev)->address_space_io 1484 : pci_get_bus(pci_dev)->address_space_mem; 1485 1486 wmask = ~(size - 1); 1487 if (region_num == PCI_ROM_SLOT) { 1488 /* ROM enable bit is writable */ 1489 wmask |= PCI_ROM_ADDRESS_ENABLE; 1490 } 1491 1492 addr = pci_bar(pci_dev, region_num); 1493 pci_set_long(pci_dev->config + addr, type); 1494 1495 if (!(r->type & PCI_BASE_ADDRESS_SPACE_IO) && 1496 r->type & PCI_BASE_ADDRESS_MEM_TYPE_64) { 1497 pci_set_quad(pci_dev->wmask + addr, wmask); 1498 pci_set_quad(pci_dev->cmask + addr, ~0ULL); 1499 } else { 1500 pci_set_long(pci_dev->wmask + addr, wmask & 0xffffffff); 1501 pci_set_long(pci_dev->cmask + addr, 0xffffffff); 1502 } 1503 } 1504 1505 static void pci_update_vga(PCIDevice *pci_dev) 1506 { 1507 uint16_t cmd; 1508 1509 if (!pci_dev->has_vga) { 1510 return; 1511 } 1512 1513 cmd = pci_get_word(pci_dev->config + PCI_COMMAND); 1514 1515 memory_region_set_enabled(pci_dev->vga_regions[QEMU_PCI_VGA_MEM], 1516 cmd & PCI_COMMAND_MEMORY); 1517 memory_region_set_enabled(pci_dev->vga_regions[QEMU_PCI_VGA_IO_LO], 1518 cmd & PCI_COMMAND_IO); 1519 memory_region_set_enabled(pci_dev->vga_regions[QEMU_PCI_VGA_IO_HI], 1520 cmd & PCI_COMMAND_IO); 1521 } 1522 1523 void pci_register_vga(PCIDevice *pci_dev, MemoryRegion *mem, 1524 MemoryRegion *io_lo, MemoryRegion *io_hi) 1525 { 1526 PCIBus *bus = pci_get_bus(pci_dev); 1527 1528 assert(!pci_dev->has_vga); 1529 1530 assert(memory_region_size(mem) == QEMU_PCI_VGA_MEM_SIZE); 1531 pci_dev->vga_regions[QEMU_PCI_VGA_MEM] = mem; 1532 memory_region_add_subregion_overlap(bus->address_space_mem, 1533 QEMU_PCI_VGA_MEM_BASE, mem, 1); 1534 1535 assert(memory_region_size(io_lo) == QEMU_PCI_VGA_IO_LO_SIZE); 1536 pci_dev->vga_regions[QEMU_PCI_VGA_IO_LO] = io_lo; 1537 memory_region_add_subregion_overlap(bus->address_space_io, 1538 QEMU_PCI_VGA_IO_LO_BASE, io_lo, 1); 1539 1540 assert(memory_region_size(io_hi) == QEMU_PCI_VGA_IO_HI_SIZE); 1541 pci_dev->vga_regions[QEMU_PCI_VGA_IO_HI] = io_hi; 1542 memory_region_add_subregion_overlap(bus->address_space_io, 1543 QEMU_PCI_VGA_IO_HI_BASE, io_hi, 1); 1544 pci_dev->has_vga = true; 1545 1546 pci_update_vga(pci_dev); 1547 } 1548 1549 void pci_unregister_vga(PCIDevice *pci_dev) 1550 { 1551 PCIBus *bus = pci_get_bus(pci_dev); 1552 1553 if (!pci_dev->has_vga) { 1554 return; 1555 } 1556 1557 memory_region_del_subregion(bus->address_space_mem, 1558 pci_dev->vga_regions[QEMU_PCI_VGA_MEM]); 1559 memory_region_del_subregion(bus->address_space_io, 1560 pci_dev->vga_regions[QEMU_PCI_VGA_IO_LO]); 1561 memory_region_del_subregion(bus->address_space_io, 1562 pci_dev->vga_regions[QEMU_PCI_VGA_IO_HI]); 1563 pci_dev->has_vga = false; 1564 } 1565 1566 pcibus_t pci_get_bar_addr(PCIDevice *pci_dev, int region_num) 1567 { 1568 return pci_dev->io_regions[region_num].addr; 1569 } 1570 1571 static pcibus_t pci_config_get_bar_addr(PCIDevice *d, int reg, 1572 uint8_t type, pcibus_t size) 1573 { 1574 pcibus_t new_addr; 1575 if (!pci_is_vf(d)) { 1576 int bar = pci_bar(d, reg); 1577 if (type & PCI_BASE_ADDRESS_MEM_TYPE_64) { 1578 new_addr = pci_get_quad(d->config + bar); 1579 } else { 1580 new_addr = pci_get_long(d->config + bar); 1581 } 1582 } else { 1583 PCIDevice *pf = d->exp.sriov_vf.pf; 1584 uint16_t sriov_cap = pf->exp.sriov_cap; 1585 int bar = sriov_cap + PCI_SRIOV_BAR + reg * 4; 1586 uint16_t vf_offset = 1587 pci_get_word(pf->config + sriov_cap + PCI_SRIOV_VF_OFFSET); 1588 uint16_t vf_stride = 1589 pci_get_word(pf->config + sriov_cap + PCI_SRIOV_VF_STRIDE); 1590 uint32_t vf_num = (d->devfn - (pf->devfn + vf_offset)) / vf_stride; 1591 1592 if (type & PCI_BASE_ADDRESS_MEM_TYPE_64) { 1593 new_addr = pci_get_quad(pf->config + bar); 1594 } else { 1595 new_addr = pci_get_long(pf->config + bar); 1596 } 1597 new_addr += vf_num * size; 1598 } 1599 /* The ROM slot has a specific enable bit, keep it intact */ 1600 if (reg != PCI_ROM_SLOT) { 1601 new_addr &= ~(size - 1); 1602 } 1603 return new_addr; 1604 } 1605 1606 pcibus_t pci_bar_address(PCIDevice *d, 1607 int reg, uint8_t type, pcibus_t size) 1608 { 1609 pcibus_t new_addr, last_addr; 1610 uint16_t cmd = pci_get_word(d->config + PCI_COMMAND); 1611 MachineClass *mc = MACHINE_GET_CLASS(qdev_get_machine()); 1612 bool allow_0_address = mc->pci_allow_0_address; 1613 1614 if (type & PCI_BASE_ADDRESS_SPACE_IO) { 1615 if (!(cmd & PCI_COMMAND_IO)) { 1616 return PCI_BAR_UNMAPPED; 1617 } 1618 new_addr = pci_config_get_bar_addr(d, reg, type, size); 1619 last_addr = new_addr + size - 1; 1620 /* Check if 32 bit BAR wraps around explicitly. 1621 * TODO: make priorities correct and remove this work around. 1622 */ 1623 if (last_addr <= new_addr || last_addr >= UINT32_MAX || 1624 (!allow_0_address && new_addr == 0)) { 1625 return PCI_BAR_UNMAPPED; 1626 } 1627 return new_addr; 1628 } 1629 1630 if (!(cmd & PCI_COMMAND_MEMORY)) { 1631 return PCI_BAR_UNMAPPED; 1632 } 1633 new_addr = pci_config_get_bar_addr(d, reg, type, size); 1634 /* the ROM slot has a specific enable bit */ 1635 if (reg == PCI_ROM_SLOT && !(new_addr & PCI_ROM_ADDRESS_ENABLE)) { 1636 return PCI_BAR_UNMAPPED; 1637 } 1638 new_addr &= ~(size - 1); 1639 last_addr = new_addr + size - 1; 1640 /* NOTE: we do not support wrapping */ 1641 /* XXX: as we cannot support really dynamic 1642 mappings, we handle specific values as invalid 1643 mappings. */ 1644 if (last_addr <= new_addr || last_addr == PCI_BAR_UNMAPPED || 1645 (!allow_0_address && new_addr == 0)) { 1646 return PCI_BAR_UNMAPPED; 1647 } 1648 1649 /* Now pcibus_t is 64bit. 1650 * Check if 32 bit BAR wraps around explicitly. 1651 * Without this, PC ide doesn't work well. 1652 * TODO: remove this work around. 1653 */ 1654 if (!(type & PCI_BASE_ADDRESS_MEM_TYPE_64) && last_addr >= UINT32_MAX) { 1655 return PCI_BAR_UNMAPPED; 1656 } 1657 1658 /* 1659 * OS is allowed to set BAR beyond its addressable 1660 * bits. For example, 32 bit OS can set 64bit bar 1661 * to >4G. Check it. TODO: we might need to support 1662 * it in the future for e.g. PAE. 1663 */ 1664 if (last_addr >= HWADDR_MAX) { 1665 return PCI_BAR_UNMAPPED; 1666 } 1667 1668 return new_addr; 1669 } 1670 1671 static void pci_update_mappings(PCIDevice *d) 1672 { 1673 PCIIORegion *r; 1674 int i; 1675 pcibus_t new_addr; 1676 1677 for(i = 0; i < PCI_NUM_REGIONS; i++) { 1678 r = &d->io_regions[i]; 1679 1680 /* this region isn't registered */ 1681 if (!r->size) 1682 continue; 1683 1684 new_addr = pci_bar_address(d, i, r->type, r->size); 1685 if (!d->enabled || pci_pm_state(d)) { 1686 new_addr = PCI_BAR_UNMAPPED; 1687 } 1688 1689 /* This bar isn't changed */ 1690 if (new_addr == r->addr) 1691 continue; 1692 1693 /* now do the real mapping */ 1694 if (r->addr != PCI_BAR_UNMAPPED) { 1695 trace_pci_update_mappings_del(d->name, pci_dev_bus_num(d), 1696 PCI_SLOT(d->devfn), 1697 PCI_FUNC(d->devfn), 1698 i, r->addr, r->size); 1699 memory_region_del_subregion(r->address_space, r->memory); 1700 } 1701 r->addr = new_addr; 1702 if (r->addr != PCI_BAR_UNMAPPED) { 1703 trace_pci_update_mappings_add(d->name, pci_dev_bus_num(d), 1704 PCI_SLOT(d->devfn), 1705 PCI_FUNC(d->devfn), 1706 i, r->addr, r->size); 1707 memory_region_add_subregion_overlap(r->address_space, 1708 r->addr, r->memory, 1); 1709 } 1710 } 1711 1712 pci_update_vga(d); 1713 } 1714 1715 static inline int pci_irq_disabled(PCIDevice *d) 1716 { 1717 return pci_get_word(d->config + PCI_COMMAND) & PCI_COMMAND_INTX_DISABLE; 1718 } 1719 1720 /* Called after interrupt disabled field update in config space, 1721 * assert/deassert interrupts if necessary. 1722 * Gets original interrupt disable bit value (before update). */ 1723 static void pci_update_irq_disabled(PCIDevice *d, int was_irq_disabled) 1724 { 1725 int i, disabled = pci_irq_disabled(d); 1726 if (disabled == was_irq_disabled) 1727 return; 1728 for (i = 0; i < PCI_NUM_PINS; ++i) { 1729 int state = pci_irq_state(d, i); 1730 pci_change_irq_level(d, i, disabled ? -state : state); 1731 } 1732 } 1733 1734 uint32_t pci_default_read_config(PCIDevice *d, 1735 uint32_t address, int len) 1736 { 1737 uint32_t val = 0; 1738 1739 assert(address + len <= pci_config_size(d)); 1740 1741 if (pci_is_express_downstream_port(d) && 1742 ranges_overlap(address, len, d->exp.exp_cap + PCI_EXP_LNKSTA, 2)) { 1743 pcie_sync_bridge_lnk(d); 1744 } 1745 memcpy(&val, d->config + address, len); 1746 return le32_to_cpu(val); 1747 } 1748 1749 void pci_default_write_config(PCIDevice *d, uint32_t addr, uint32_t val_in, int l) 1750 { 1751 uint8_t new_pm_state, old_pm_state = pci_pm_state(d); 1752 int i, was_irq_disabled = pci_irq_disabled(d); 1753 uint32_t val = val_in; 1754 1755 assert(addr + l <= pci_config_size(d)); 1756 1757 for (i = 0; i < l; val >>= 8, ++i) { 1758 uint8_t wmask = d->wmask[addr + i]; 1759 uint8_t w1cmask = d->w1cmask[addr + i]; 1760 assert(!(wmask & w1cmask)); 1761 d->config[addr + i] = (d->config[addr + i] & ~wmask) | (val & wmask); 1762 d->config[addr + i] &= ~(val & w1cmask); /* W1C: Write 1 to Clear */ 1763 } 1764 1765 new_pm_state = pci_pm_update(d, addr, l, old_pm_state); 1766 1767 if (ranges_overlap(addr, l, PCI_BASE_ADDRESS_0, 24) || 1768 ranges_overlap(addr, l, PCI_ROM_ADDRESS, 4) || 1769 ranges_overlap(addr, l, PCI_ROM_ADDRESS1, 4) || 1770 range_covers_byte(addr, l, PCI_COMMAND) || 1771 !!new_pm_state != !!old_pm_state) { 1772 pci_update_mappings(d); 1773 } 1774 1775 if (ranges_overlap(addr, l, PCI_COMMAND, 2)) { 1776 pci_update_irq_disabled(d, was_irq_disabled); 1777 memory_region_set_enabled(&d->bus_master_enable_region, 1778 (pci_get_word(d->config + PCI_COMMAND) 1779 & PCI_COMMAND_MASTER) && d->enabled); 1780 } 1781 1782 msi_write_config(d, addr, val_in, l); 1783 msix_write_config(d, addr, val_in, l); 1784 pcie_sriov_config_write(d, addr, val_in, l); 1785 } 1786 1787 /***********************************************************/ 1788 /* generic PCI irq support */ 1789 1790 /* 0 <= irq_num <= 3. level must be 0 or 1 */ 1791 static void pci_irq_handler(void *opaque, int irq_num, int level) 1792 { 1793 PCIDevice *pci_dev = opaque; 1794 int change; 1795 1796 assert(0 <= irq_num && irq_num < PCI_NUM_PINS); 1797 assert(level == 0 || level == 1); 1798 change = level - pci_irq_state(pci_dev, irq_num); 1799 if (!change) 1800 return; 1801 1802 pci_set_irq_state(pci_dev, irq_num, level); 1803 pci_update_irq_status(pci_dev); 1804 if (pci_irq_disabled(pci_dev)) 1805 return; 1806 pci_change_irq_level(pci_dev, irq_num, change); 1807 } 1808 1809 qemu_irq pci_allocate_irq(PCIDevice *pci_dev) 1810 { 1811 int intx = pci_intx(pci_dev); 1812 assert(0 <= intx && intx < PCI_NUM_PINS); 1813 1814 return qemu_allocate_irq(pci_irq_handler, pci_dev, intx); 1815 } 1816 1817 void pci_set_irq(PCIDevice *pci_dev, int level) 1818 { 1819 int intx = pci_intx(pci_dev); 1820 pci_irq_handler(pci_dev, intx, level); 1821 } 1822 1823 /* Special hooks used by device assignment */ 1824 void pci_bus_set_route_irq_fn(PCIBus *bus, pci_route_irq_fn route_intx_to_irq) 1825 { 1826 assert(pci_bus_is_root(bus)); 1827 bus->route_intx_to_irq = route_intx_to_irq; 1828 } 1829 1830 PCIINTxRoute pci_device_route_intx_to_irq(PCIDevice *dev, int pin) 1831 { 1832 PCIBus *bus; 1833 1834 do { 1835 int dev_irq = pin; 1836 bus = pci_get_bus(dev); 1837 pin = bus->map_irq(dev, pin); 1838 trace_pci_route_irq(dev_irq, DEVICE(dev)->canonical_path, pin, 1839 pci_bus_is_root(bus) ? "root-complex" 1840 : DEVICE(bus->parent_dev)->canonical_path); 1841 dev = bus->parent_dev; 1842 } while (dev); 1843 1844 if (!bus->route_intx_to_irq) { 1845 error_report("PCI: Bug - unimplemented PCI INTx routing (%s)", 1846 object_get_typename(OBJECT(bus->qbus.parent))); 1847 return (PCIINTxRoute) { PCI_INTX_DISABLED, -1 }; 1848 } 1849 1850 return bus->route_intx_to_irq(bus->irq_opaque, pin); 1851 } 1852 1853 bool pci_intx_route_changed(PCIINTxRoute *old, PCIINTxRoute *new) 1854 { 1855 return old->mode != new->mode || old->irq != new->irq; 1856 } 1857 1858 void pci_bus_fire_intx_routing_notifier(PCIBus *bus) 1859 { 1860 PCIDevice *dev; 1861 PCIBus *sec; 1862 int i; 1863 1864 for (i = 0; i < ARRAY_SIZE(bus->devices); ++i) { 1865 dev = bus->devices[i]; 1866 if (dev && dev->intx_routing_notifier) { 1867 dev->intx_routing_notifier(dev); 1868 } 1869 } 1870 1871 QLIST_FOREACH(sec, &bus->child, sibling) { 1872 pci_bus_fire_intx_routing_notifier(sec); 1873 } 1874 } 1875 1876 void pci_device_set_intx_routing_notifier(PCIDevice *dev, 1877 PCIINTxRoutingNotifier notifier) 1878 { 1879 dev->intx_routing_notifier = notifier; 1880 } 1881 1882 /* 1883 * PCI-to-PCI bridge specification 1884 * 9.1: Interrupt routing. Table 9-1 1885 * 1886 * the PCI Express Base Specification, Revision 2.1 1887 * 2.2.8.1: INTx interrupt signaling - Rules 1888 * the Implementation Note 1889 * Table 2-20 1890 */ 1891 /* 1892 * 0 <= pin <= 3 0 = INTA, 1 = INTB, 2 = INTC, 3 = INTD 1893 * 0-origin unlike PCI interrupt pin register. 1894 */ 1895 int pci_swizzle_map_irq_fn(PCIDevice *pci_dev, int pin) 1896 { 1897 return pci_swizzle(PCI_SLOT(pci_dev->devfn), pin); 1898 } 1899 1900 /***********************************************************/ 1901 /* monitor info on PCI */ 1902 1903 static const pci_class_desc pci_class_descriptions[] = 1904 { 1905 { 0x0001, "VGA controller", "display"}, 1906 { 0x0100, "SCSI controller", "scsi"}, 1907 { 0x0101, "IDE controller", "ide"}, 1908 { 0x0102, "Floppy controller", "fdc"}, 1909 { 0x0103, "IPI controller", "ipi"}, 1910 { 0x0104, "RAID controller", "raid"}, 1911 { 0x0106, "SATA controller"}, 1912 { 0x0107, "SAS controller"}, 1913 { 0x0180, "Storage controller"}, 1914 { 0x0200, "Ethernet controller", "ethernet"}, 1915 { 0x0201, "Token Ring controller", "token-ring"}, 1916 { 0x0202, "FDDI controller", "fddi"}, 1917 { 0x0203, "ATM controller", "atm"}, 1918 { 0x0280, "Network controller"}, 1919 { 0x0300, "VGA controller", "display", 0x00ff}, 1920 { 0x0301, "XGA controller"}, 1921 { 0x0302, "3D controller"}, 1922 { 0x0380, "Display controller"}, 1923 { 0x0400, "Video controller", "video"}, 1924 { 0x0401, "Audio controller", "sound"}, 1925 { 0x0402, "Phone"}, 1926 { 0x0403, "Audio controller", "sound"}, 1927 { 0x0480, "Multimedia controller"}, 1928 { 0x0500, "RAM controller", "memory"}, 1929 { 0x0501, "Flash controller", "flash"}, 1930 { 0x0580, "Memory controller"}, 1931 { 0x0600, "Host bridge", "host"}, 1932 { 0x0601, "ISA bridge", "isa"}, 1933 { 0x0602, "EISA bridge", "eisa"}, 1934 { 0x0603, "MC bridge", "mca"}, 1935 { 0x0604, "PCI bridge", "pci-bridge"}, 1936 { 0x0605, "PCMCIA bridge", "pcmcia"}, 1937 { 0x0606, "NUBUS bridge", "nubus"}, 1938 { 0x0607, "CARDBUS bridge", "cardbus"}, 1939 { 0x0608, "RACEWAY bridge"}, 1940 { 0x0680, "Bridge"}, 1941 { 0x0700, "Serial port", "serial"}, 1942 { 0x0701, "Parallel port", "parallel"}, 1943 { 0x0800, "Interrupt controller", "interrupt-controller"}, 1944 { 0x0801, "DMA controller", "dma-controller"}, 1945 { 0x0802, "Timer", "timer"}, 1946 { 0x0803, "RTC", "rtc"}, 1947 { 0x0900, "Keyboard", "keyboard"}, 1948 { 0x0901, "Pen", "pen"}, 1949 { 0x0902, "Mouse", "mouse"}, 1950 { 0x0A00, "Dock station", "dock", 0x00ff}, 1951 { 0x0B00, "i386 cpu", "cpu", 0x00ff}, 1952 { 0x0c00, "Firewire controller", "firewire"}, 1953 { 0x0c01, "Access bus controller", "access-bus"}, 1954 { 0x0c02, "SSA controller", "ssa"}, 1955 { 0x0c03, "USB controller", "usb"}, 1956 { 0x0c04, "Fibre channel controller", "fibre-channel"}, 1957 { 0x0c05, "SMBus"}, 1958 { 0, NULL} 1959 }; 1960 1961 void pci_for_each_device_under_bus_reverse(PCIBus *bus, 1962 pci_bus_dev_fn fn, 1963 void *opaque) 1964 { 1965 PCIDevice *d; 1966 int devfn; 1967 1968 for (devfn = 0; devfn < ARRAY_SIZE(bus->devices); devfn++) { 1969 d = bus->devices[ARRAY_SIZE(bus->devices) - 1 - devfn]; 1970 if (d) { 1971 fn(bus, d, opaque); 1972 } 1973 } 1974 } 1975 1976 void pci_for_each_device_reverse(PCIBus *bus, int bus_num, 1977 pci_bus_dev_fn fn, void *opaque) 1978 { 1979 bus = pci_find_bus_nr(bus, bus_num); 1980 1981 if (bus) { 1982 pci_for_each_device_under_bus_reverse(bus, fn, opaque); 1983 } 1984 } 1985 1986 void pci_for_each_device_under_bus(PCIBus *bus, 1987 pci_bus_dev_fn fn, void *opaque) 1988 { 1989 PCIDevice *d; 1990 int devfn; 1991 1992 for(devfn = 0; devfn < ARRAY_SIZE(bus->devices); devfn++) { 1993 d = bus->devices[devfn]; 1994 if (d) { 1995 fn(bus, d, opaque); 1996 } 1997 } 1998 } 1999 2000 void pci_for_each_device(PCIBus *bus, int bus_num, 2001 pci_bus_dev_fn fn, void *opaque) 2002 { 2003 bus = pci_find_bus_nr(bus, bus_num); 2004 2005 if (bus) { 2006 pci_for_each_device_under_bus(bus, fn, opaque); 2007 } 2008 } 2009 2010 const pci_class_desc *get_class_desc(int class) 2011 { 2012 const pci_class_desc *desc; 2013 2014 desc = pci_class_descriptions; 2015 while (desc->desc && class != desc->class) { 2016 desc++; 2017 } 2018 2019 return desc; 2020 } 2021 2022 void pci_init_nic_devices(PCIBus *bus, const char *default_model) 2023 { 2024 qemu_create_nic_bus_devices(&bus->qbus, TYPE_PCI_DEVICE, default_model, 2025 "virtio", "virtio-net-pci"); 2026 } 2027 2028 bool pci_init_nic_in_slot(PCIBus *rootbus, const char *model, 2029 const char *alias, const char *devaddr) 2030 { 2031 NICInfo *nd = qemu_find_nic_info(model, true, alias); 2032 int dom, busnr, devfn; 2033 PCIDevice *pci_dev; 2034 unsigned slot; 2035 PCIBus *bus; 2036 2037 if (!nd) { 2038 return false; 2039 } 2040 2041 if (!devaddr || pci_parse_devaddr(devaddr, &dom, &busnr, &slot, NULL) < 0) { 2042 error_report("Invalid PCI device address %s for device %s", 2043 devaddr, model); 2044 exit(1); 2045 } 2046 2047 if (dom != 0) { 2048 error_report("No support for non-zero PCI domains"); 2049 exit(1); 2050 } 2051 2052 devfn = PCI_DEVFN(slot, 0); 2053 2054 bus = pci_find_bus_nr(rootbus, busnr); 2055 if (!bus) { 2056 error_report("Invalid PCI device address %s for device %s", 2057 devaddr, model); 2058 exit(1); 2059 } 2060 2061 pci_dev = pci_new(devfn, model); 2062 qdev_set_nic_properties(&pci_dev->qdev, nd); 2063 pci_realize_and_unref(pci_dev, bus, &error_fatal); 2064 return true; 2065 } 2066 2067 PCIDevice *pci_vga_init(PCIBus *bus) 2068 { 2069 vga_interface_created = true; 2070 switch (vga_interface_type) { 2071 case VGA_CIRRUS: 2072 return pci_create_simple(bus, -1, "cirrus-vga"); 2073 case VGA_QXL: 2074 return pci_create_simple(bus, -1, "qxl-vga"); 2075 case VGA_STD: 2076 return pci_create_simple(bus, -1, "VGA"); 2077 case VGA_VMWARE: 2078 return pci_create_simple(bus, -1, "vmware-svga"); 2079 case VGA_VIRTIO: 2080 return pci_create_simple(bus, -1, "virtio-vga"); 2081 case VGA_NONE: 2082 default: /* Other non-PCI types. Checking for unsupported types is already 2083 done in vl.c. */ 2084 return NULL; 2085 } 2086 } 2087 2088 /* Whether a given bus number is in range of the secondary 2089 * bus of the given bridge device. */ 2090 static bool pci_secondary_bus_in_range(PCIDevice *dev, int bus_num) 2091 { 2092 return !(pci_get_word(dev->config + PCI_BRIDGE_CONTROL) & 2093 PCI_BRIDGE_CTL_BUS_RESET) /* Don't walk the bus if it's reset. */ && 2094 dev->config[PCI_SECONDARY_BUS] <= bus_num && 2095 bus_num <= dev->config[PCI_SUBORDINATE_BUS]; 2096 } 2097 2098 /* Whether a given bus number is in a range of a root bus */ 2099 static bool pci_root_bus_in_range(PCIBus *bus, int bus_num) 2100 { 2101 int i; 2102 2103 for (i = 0; i < ARRAY_SIZE(bus->devices); ++i) { 2104 PCIDevice *dev = bus->devices[i]; 2105 2106 if (dev && IS_PCI_BRIDGE(dev)) { 2107 if (pci_secondary_bus_in_range(dev, bus_num)) { 2108 return true; 2109 } 2110 } 2111 } 2112 2113 return false; 2114 } 2115 2116 PCIBus *pci_find_bus_nr(PCIBus *bus, int bus_num) 2117 { 2118 PCIBus *sec; 2119 2120 if (!bus) { 2121 return NULL; 2122 } 2123 2124 if (pci_bus_num(bus) == bus_num) { 2125 return bus; 2126 } 2127 2128 /* Consider all bus numbers in range for the host pci bridge. */ 2129 if (!pci_bus_is_root(bus) && 2130 !pci_secondary_bus_in_range(bus->parent_dev, bus_num)) { 2131 return NULL; 2132 } 2133 2134 /* try child bus */ 2135 for (; bus; bus = sec) { 2136 QLIST_FOREACH(sec, &bus->child, sibling) { 2137 if (pci_bus_num(sec) == bus_num) { 2138 return sec; 2139 } 2140 /* PXB buses assumed to be children of bus 0 */ 2141 if (pci_bus_is_root(sec)) { 2142 if (pci_root_bus_in_range(sec, bus_num)) { 2143 break; 2144 } 2145 } else { 2146 if (pci_secondary_bus_in_range(sec->parent_dev, bus_num)) { 2147 break; 2148 } 2149 } 2150 } 2151 } 2152 2153 return NULL; 2154 } 2155 2156 void pci_for_each_bus_depth_first(PCIBus *bus, pci_bus_ret_fn begin, 2157 pci_bus_fn end, void *parent_state) 2158 { 2159 PCIBus *sec; 2160 void *state; 2161 2162 if (!bus) { 2163 return; 2164 } 2165 2166 if (begin) { 2167 state = begin(bus, parent_state); 2168 } else { 2169 state = parent_state; 2170 } 2171 2172 QLIST_FOREACH(sec, &bus->child, sibling) { 2173 pci_for_each_bus_depth_first(sec, begin, end, state); 2174 } 2175 2176 if (end) { 2177 end(bus, state); 2178 } 2179 } 2180 2181 2182 PCIDevice *pci_find_device(PCIBus *bus, int bus_num, uint8_t devfn) 2183 { 2184 bus = pci_find_bus_nr(bus, bus_num); 2185 2186 if (!bus) 2187 return NULL; 2188 2189 return bus->devices[devfn]; 2190 } 2191 2192 #define ONBOARD_INDEX_MAX (16 * 1024 - 1) 2193 2194 static void pci_qdev_realize(DeviceState *qdev, Error **errp) 2195 { 2196 PCIDevice *pci_dev = (PCIDevice *)qdev; 2197 PCIDeviceClass *pc = PCI_DEVICE_GET_CLASS(pci_dev); 2198 ObjectClass *klass = OBJECT_CLASS(pc); 2199 Error *local_err = NULL; 2200 bool is_default_rom; 2201 uint16_t class_id; 2202 2203 /* 2204 * capped by systemd (see: udev-builtin-net_id.c) 2205 * as it's the only known user honor it to avoid users 2206 * misconfigure QEMU and then wonder why acpi-index doesn't work 2207 */ 2208 if (pci_dev->acpi_index > ONBOARD_INDEX_MAX) { 2209 error_setg(errp, "acpi-index should be less or equal to %u", 2210 ONBOARD_INDEX_MAX); 2211 return; 2212 } 2213 2214 /* 2215 * make sure that acpi-index is unique across all present PCI devices 2216 */ 2217 if (pci_dev->acpi_index) { 2218 GSequence *used_indexes = pci_acpi_index_list(); 2219 2220 if (g_sequence_lookup(used_indexes, 2221 GINT_TO_POINTER(pci_dev->acpi_index), 2222 g_cmp_uint32, NULL)) { 2223 error_setg(errp, "a PCI device with acpi-index = %" PRIu32 2224 " already exist", pci_dev->acpi_index); 2225 return; 2226 } 2227 g_sequence_insert_sorted(used_indexes, 2228 GINT_TO_POINTER(pci_dev->acpi_index), 2229 g_cmp_uint32, NULL); 2230 } 2231 2232 if (pci_dev->romsize != UINT32_MAX && !is_power_of_2(pci_dev->romsize)) { 2233 error_setg(errp, "ROM size %u is not a power of two", pci_dev->romsize); 2234 return; 2235 } 2236 2237 /* initialize cap_present for pci_is_express() and pci_config_size(), 2238 * Note that hybrid PCIs are not set automatically and need to manage 2239 * QEMU_PCI_CAP_EXPRESS manually */ 2240 if (object_class_dynamic_cast(klass, INTERFACE_PCIE_DEVICE) && 2241 !object_class_dynamic_cast(klass, INTERFACE_CONVENTIONAL_PCI_DEVICE)) { 2242 pci_dev->cap_present |= QEMU_PCI_CAP_EXPRESS; 2243 } 2244 2245 if (object_class_dynamic_cast(klass, INTERFACE_CXL_DEVICE)) { 2246 pci_dev->cap_present |= QEMU_PCIE_CAP_CXL; 2247 } 2248 2249 pci_dev = do_pci_register_device(pci_dev, 2250 object_get_typename(OBJECT(qdev)), 2251 pci_dev->devfn, errp); 2252 if (pci_dev == NULL) 2253 return; 2254 2255 if (pc->realize) { 2256 pc->realize(pci_dev, &local_err); 2257 if (local_err) { 2258 error_propagate(errp, local_err); 2259 do_pci_unregister_device(pci_dev); 2260 return; 2261 } 2262 } 2263 2264 /* 2265 * A PCIe Downstream Port that do not have ARI Forwarding enabled must 2266 * associate only Device 0 with the device attached to the bus 2267 * representing the Link from the Port (PCIe base spec rev 4.0 ver 0.3, 2268 * sec 7.3.1). 2269 * With ARI, PCI_SLOT() can return non-zero value as the traditional 2270 * 5-bit Device Number and 3-bit Function Number fields in its associated 2271 * Routing IDs, Requester IDs and Completer IDs are interpreted as a 2272 * single 8-bit Function Number. Hence, ignore ARI capable devices. 2273 */ 2274 if (pci_is_express(pci_dev) && 2275 !pcie_find_capability(pci_dev, PCI_EXT_CAP_ID_ARI) && 2276 pcie_has_upstream_port(pci_dev) && 2277 PCI_SLOT(pci_dev->devfn)) { 2278 warn_report("PCI: slot %d is not valid for %s," 2279 " parent device only allows plugging into slot 0.", 2280 PCI_SLOT(pci_dev->devfn), pci_dev->name); 2281 } 2282 2283 if (pci_dev->failover_pair_id) { 2284 if (!pci_bus_is_express(pci_get_bus(pci_dev))) { 2285 error_setg(errp, "failover primary device must be on " 2286 "PCIExpress bus"); 2287 pci_qdev_unrealize(DEVICE(pci_dev)); 2288 return; 2289 } 2290 class_id = pci_get_word(pci_dev->config + PCI_CLASS_DEVICE); 2291 if (class_id != PCI_CLASS_NETWORK_ETHERNET) { 2292 error_setg(errp, "failover primary device is not an " 2293 "Ethernet device"); 2294 pci_qdev_unrealize(DEVICE(pci_dev)); 2295 return; 2296 } 2297 if ((pci_dev->cap_present & QEMU_PCI_CAP_MULTIFUNCTION) 2298 || (PCI_FUNC(pci_dev->devfn) != 0)) { 2299 error_setg(errp, "failover: primary device must be in its own " 2300 "PCI slot"); 2301 pci_qdev_unrealize(DEVICE(pci_dev)); 2302 return; 2303 } 2304 qdev->allow_unplug_during_migration = true; 2305 } 2306 2307 /* rom loading */ 2308 is_default_rom = false; 2309 if (pci_dev->romfile == NULL && pc->romfile != NULL) { 2310 pci_dev->romfile = g_strdup(pc->romfile); 2311 is_default_rom = true; 2312 } 2313 2314 pci_add_option_rom(pci_dev, is_default_rom, &local_err); 2315 if (local_err) { 2316 error_propagate(errp, local_err); 2317 pci_qdev_unrealize(DEVICE(pci_dev)); 2318 return; 2319 } 2320 2321 pci_set_power(pci_dev, true); 2322 2323 pci_dev->msi_trigger = pci_msi_trigger; 2324 } 2325 2326 static PCIDevice *pci_new_internal(int devfn, bool multifunction, 2327 const char *name) 2328 { 2329 DeviceState *dev; 2330 2331 dev = qdev_new(name); 2332 qdev_prop_set_int32(dev, "addr", devfn); 2333 qdev_prop_set_bit(dev, "multifunction", multifunction); 2334 return PCI_DEVICE(dev); 2335 } 2336 2337 PCIDevice *pci_new_multifunction(int devfn, const char *name) 2338 { 2339 return pci_new_internal(devfn, true, name); 2340 } 2341 2342 PCIDevice *pci_new(int devfn, const char *name) 2343 { 2344 return pci_new_internal(devfn, false, name); 2345 } 2346 2347 bool pci_realize_and_unref(PCIDevice *dev, PCIBus *bus, Error **errp) 2348 { 2349 return qdev_realize_and_unref(&dev->qdev, &bus->qbus, errp); 2350 } 2351 2352 PCIDevice *pci_create_simple_multifunction(PCIBus *bus, int devfn, 2353 const char *name) 2354 { 2355 PCIDevice *dev = pci_new_multifunction(devfn, name); 2356 pci_realize_and_unref(dev, bus, &error_fatal); 2357 return dev; 2358 } 2359 2360 PCIDevice *pci_create_simple(PCIBus *bus, int devfn, const char *name) 2361 { 2362 PCIDevice *dev = pci_new(devfn, name); 2363 pci_realize_and_unref(dev, bus, &error_fatal); 2364 return dev; 2365 } 2366 2367 static uint8_t pci_find_space(PCIDevice *pdev, uint8_t size) 2368 { 2369 int offset = PCI_CONFIG_HEADER_SIZE; 2370 int i; 2371 for (i = PCI_CONFIG_HEADER_SIZE; i < PCI_CONFIG_SPACE_SIZE; ++i) { 2372 if (pdev->used[i]) 2373 offset = i + 1; 2374 else if (i - offset + 1 == size) 2375 return offset; 2376 } 2377 return 0; 2378 } 2379 2380 static uint8_t pci_find_capability_list(PCIDevice *pdev, uint8_t cap_id, 2381 uint8_t *prev_p) 2382 { 2383 uint8_t next, prev; 2384 2385 if (!(pdev->config[PCI_STATUS] & PCI_STATUS_CAP_LIST)) 2386 return 0; 2387 2388 for (prev = PCI_CAPABILITY_LIST; (next = pdev->config[prev]); 2389 prev = next + PCI_CAP_LIST_NEXT) 2390 if (pdev->config[next + PCI_CAP_LIST_ID] == cap_id) 2391 break; 2392 2393 if (prev_p) 2394 *prev_p = prev; 2395 return next; 2396 } 2397 2398 static uint8_t pci_find_capability_at_offset(PCIDevice *pdev, uint8_t offset) 2399 { 2400 uint8_t next, prev, found = 0; 2401 2402 if (!(pdev->used[offset])) { 2403 return 0; 2404 } 2405 2406 assert(pdev->config[PCI_STATUS] & PCI_STATUS_CAP_LIST); 2407 2408 for (prev = PCI_CAPABILITY_LIST; (next = pdev->config[prev]); 2409 prev = next + PCI_CAP_LIST_NEXT) { 2410 if (next <= offset && next > found) { 2411 found = next; 2412 } 2413 } 2414 return found; 2415 } 2416 2417 /* Patch the PCI vendor and device ids in a PCI rom image if necessary. 2418 This is needed for an option rom which is used for more than one device. */ 2419 static void pci_patch_ids(PCIDevice *pdev, uint8_t *ptr, uint32_t size) 2420 { 2421 uint16_t vendor_id; 2422 uint16_t device_id; 2423 uint16_t rom_vendor_id; 2424 uint16_t rom_device_id; 2425 uint16_t rom_magic; 2426 uint16_t pcir_offset; 2427 uint8_t checksum; 2428 2429 /* Words in rom data are little endian (like in PCI configuration), 2430 so they can be read / written with pci_get_word / pci_set_word. */ 2431 2432 /* Only a valid rom will be patched. */ 2433 rom_magic = pci_get_word(ptr); 2434 if (rom_magic != 0xaa55) { 2435 trace_pci_bad_rom_magic(rom_magic, 0xaa55); 2436 return; 2437 } 2438 pcir_offset = pci_get_word(ptr + 0x18); 2439 if (pcir_offset + 8 >= size || memcmp(ptr + pcir_offset, "PCIR", 4)) { 2440 trace_pci_bad_pcir_offset(pcir_offset); 2441 return; 2442 } 2443 2444 vendor_id = pci_get_word(pdev->config + PCI_VENDOR_ID); 2445 device_id = pci_get_word(pdev->config + PCI_DEVICE_ID); 2446 rom_vendor_id = pci_get_word(ptr + pcir_offset + 4); 2447 rom_device_id = pci_get_word(ptr + pcir_offset + 6); 2448 2449 trace_pci_rom_and_pci_ids(pdev->romfile, vendor_id, device_id, 2450 rom_vendor_id, rom_device_id); 2451 2452 checksum = ptr[6]; 2453 2454 if (vendor_id != rom_vendor_id) { 2455 /* Patch vendor id and checksum (at offset 6 for etherboot roms). */ 2456 checksum += (uint8_t)rom_vendor_id + (uint8_t)(rom_vendor_id >> 8); 2457 checksum -= (uint8_t)vendor_id + (uint8_t)(vendor_id >> 8); 2458 trace_pci_rom_checksum_change(ptr[6], checksum); 2459 ptr[6] = checksum; 2460 pci_set_word(ptr + pcir_offset + 4, vendor_id); 2461 } 2462 2463 if (device_id != rom_device_id) { 2464 /* Patch device id and checksum (at offset 6 for etherboot roms). */ 2465 checksum += (uint8_t)rom_device_id + (uint8_t)(rom_device_id >> 8); 2466 checksum -= (uint8_t)device_id + (uint8_t)(device_id >> 8); 2467 trace_pci_rom_checksum_change(ptr[6], checksum); 2468 ptr[6] = checksum; 2469 pci_set_word(ptr + pcir_offset + 6, device_id); 2470 } 2471 } 2472 2473 /* Add an option rom for the device */ 2474 static void pci_add_option_rom(PCIDevice *pdev, bool is_default_rom, 2475 Error **errp) 2476 { 2477 int64_t size = 0; 2478 g_autofree char *path = NULL; 2479 char name[32]; 2480 const VMStateDescription *vmsd; 2481 2482 /* 2483 * In case of incoming migration ROM will come with migration stream, no 2484 * reason to load the file. Neither we want to fail if local ROM file 2485 * mismatches with specified romsize. 2486 */ 2487 bool load_file = !runstate_check(RUN_STATE_INMIGRATE); 2488 2489 if (!pdev->romfile || !strlen(pdev->romfile)) { 2490 return; 2491 } 2492 2493 if (!pdev->rom_bar) { 2494 /* 2495 * Load rom via fw_cfg instead of creating a rom bar, 2496 * for 0.11 compatibility. 2497 */ 2498 int class = pci_get_word(pdev->config + PCI_CLASS_DEVICE); 2499 2500 /* 2501 * Hot-plugged devices can't use the option ROM 2502 * if the rom bar is disabled. 2503 */ 2504 if (DEVICE(pdev)->hotplugged) { 2505 error_setg(errp, "Hot-plugged device without ROM bar" 2506 " can't have an option ROM"); 2507 return; 2508 } 2509 2510 if (class == 0x0300) { 2511 rom_add_vga(pdev->romfile); 2512 } else { 2513 rom_add_option(pdev->romfile, -1); 2514 } 2515 return; 2516 } 2517 2518 if (load_file || pdev->romsize == UINT32_MAX) { 2519 path = qemu_find_file(QEMU_FILE_TYPE_BIOS, pdev->romfile); 2520 if (path == NULL) { 2521 path = g_strdup(pdev->romfile); 2522 } 2523 2524 size = get_image_size(path); 2525 if (size < 0) { 2526 error_setg(errp, "failed to find romfile \"%s\"", pdev->romfile); 2527 return; 2528 } else if (size == 0) { 2529 error_setg(errp, "romfile \"%s\" is empty", pdev->romfile); 2530 return; 2531 } else if (size > 2 * GiB) { 2532 error_setg(errp, 2533 "romfile \"%s\" too large (size cannot exceed 2 GiB)", 2534 pdev->romfile); 2535 return; 2536 } 2537 if (pdev->romsize != UINT_MAX) { 2538 if (size > pdev->romsize) { 2539 error_setg(errp, "romfile \"%s\" (%u bytes) " 2540 "is too large for ROM size %u", 2541 pdev->romfile, (uint32_t)size, pdev->romsize); 2542 return; 2543 } 2544 } else { 2545 pdev->romsize = pow2ceil(size); 2546 } 2547 } 2548 2549 vmsd = qdev_get_vmsd(DEVICE(pdev)); 2550 snprintf(name, sizeof(name), "%s.rom", 2551 vmsd ? vmsd->name : object_get_typename(OBJECT(pdev))); 2552 2553 pdev->has_rom = true; 2554 memory_region_init_rom(&pdev->rom, OBJECT(pdev), name, pdev->romsize, 2555 &error_fatal); 2556 2557 if (load_file) { 2558 void *ptr = memory_region_get_ram_ptr(&pdev->rom); 2559 2560 if (load_image_size(path, ptr, size) < 0) { 2561 error_setg(errp, "failed to load romfile \"%s\"", pdev->romfile); 2562 return; 2563 } 2564 2565 if (is_default_rom) { 2566 /* Only the default rom images will be patched (if needed). */ 2567 pci_patch_ids(pdev, ptr, size); 2568 } 2569 } 2570 2571 pci_register_bar(pdev, PCI_ROM_SLOT, 0, &pdev->rom); 2572 } 2573 2574 static void pci_del_option_rom(PCIDevice *pdev) 2575 { 2576 if (!pdev->has_rom) 2577 return; 2578 2579 vmstate_unregister_ram(&pdev->rom, &pdev->qdev); 2580 pdev->has_rom = false; 2581 } 2582 2583 /* 2584 * On success, pci_add_capability() returns a positive value 2585 * that the offset of the pci capability. 2586 * On failure, it sets an error and returns a negative error 2587 * code. 2588 */ 2589 int pci_add_capability(PCIDevice *pdev, uint8_t cap_id, 2590 uint8_t offset, uint8_t size, 2591 Error **errp) 2592 { 2593 uint8_t *config; 2594 int i, overlapping_cap; 2595 2596 if (!offset) { 2597 offset = pci_find_space(pdev, size); 2598 /* out of PCI config space is programming error */ 2599 assert(offset); 2600 } else { 2601 /* Verify that capabilities don't overlap. Note: device assignment 2602 * depends on this check to verify that the device is not broken. 2603 * Should never trigger for emulated devices, but it's helpful 2604 * for debugging these. */ 2605 for (i = offset; i < offset + size; i++) { 2606 overlapping_cap = pci_find_capability_at_offset(pdev, i); 2607 if (overlapping_cap) { 2608 error_setg(errp, "%s:%02x:%02x.%x " 2609 "Attempt to add PCI capability %x at offset " 2610 "%x overlaps existing capability %x at offset %x", 2611 pci_root_bus_path(pdev), pci_dev_bus_num(pdev), 2612 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn), 2613 cap_id, offset, overlapping_cap, i); 2614 return -EINVAL; 2615 } 2616 } 2617 } 2618 2619 config = pdev->config + offset; 2620 config[PCI_CAP_LIST_ID] = cap_id; 2621 config[PCI_CAP_LIST_NEXT] = pdev->config[PCI_CAPABILITY_LIST]; 2622 pdev->config[PCI_CAPABILITY_LIST] = offset; 2623 pdev->config[PCI_STATUS] |= PCI_STATUS_CAP_LIST; 2624 memset(pdev->used + offset, 0xFF, QEMU_ALIGN_UP(size, 4)); 2625 /* Make capability read-only by default */ 2626 memset(pdev->wmask + offset, 0, size); 2627 /* Check capability by default */ 2628 memset(pdev->cmask + offset, 0xFF, size); 2629 return offset; 2630 } 2631 2632 /* Unlink capability from the pci config space. */ 2633 void pci_del_capability(PCIDevice *pdev, uint8_t cap_id, uint8_t size) 2634 { 2635 uint8_t prev, offset = pci_find_capability_list(pdev, cap_id, &prev); 2636 if (!offset) 2637 return; 2638 pdev->config[prev] = pdev->config[offset + PCI_CAP_LIST_NEXT]; 2639 /* Make capability writable again */ 2640 memset(pdev->wmask + offset, 0xff, size); 2641 memset(pdev->w1cmask + offset, 0, size); 2642 /* Clear cmask as device-specific registers can't be checked */ 2643 memset(pdev->cmask + offset, 0, size); 2644 memset(pdev->used + offset, 0, QEMU_ALIGN_UP(size, 4)); 2645 2646 if (!pdev->config[PCI_CAPABILITY_LIST]) 2647 pdev->config[PCI_STATUS] &= ~PCI_STATUS_CAP_LIST; 2648 } 2649 2650 uint8_t pci_find_capability(PCIDevice *pdev, uint8_t cap_id) 2651 { 2652 return pci_find_capability_list(pdev, cap_id, NULL); 2653 } 2654 2655 static char *pci_dev_fw_name(DeviceState *dev, char *buf, int len) 2656 { 2657 PCIDevice *d = (PCIDevice *)dev; 2658 const char *name = NULL; 2659 const pci_class_desc *desc = pci_class_descriptions; 2660 int class = pci_get_word(d->config + PCI_CLASS_DEVICE); 2661 2662 while (desc->desc && 2663 (class & ~desc->fw_ign_bits) != 2664 (desc->class & ~desc->fw_ign_bits)) { 2665 desc++; 2666 } 2667 2668 if (desc->desc) { 2669 name = desc->fw_name; 2670 } 2671 2672 if (name) { 2673 pstrcpy(buf, len, name); 2674 } else { 2675 snprintf(buf, len, "pci%04x,%04x", 2676 pci_get_word(d->config + PCI_VENDOR_ID), 2677 pci_get_word(d->config + PCI_DEVICE_ID)); 2678 } 2679 2680 return buf; 2681 } 2682 2683 static char *pcibus_get_fw_dev_path(DeviceState *dev) 2684 { 2685 PCIDevice *d = (PCIDevice *)dev; 2686 char name[33]; 2687 int has_func = !!PCI_FUNC(d->devfn); 2688 2689 return g_strdup_printf("%s@%x%s%.*x", 2690 pci_dev_fw_name(dev, name, sizeof(name)), 2691 PCI_SLOT(d->devfn), 2692 has_func ? "," : "", 2693 has_func, 2694 PCI_FUNC(d->devfn)); 2695 } 2696 2697 static char *pcibus_get_dev_path(DeviceState *dev) 2698 { 2699 PCIDevice *d = container_of(dev, PCIDevice, qdev); 2700 PCIDevice *t; 2701 int slot_depth; 2702 /* Path format: Domain:00:Slot.Function:Slot.Function....:Slot.Function. 2703 * 00 is added here to make this format compatible with 2704 * domain:Bus:Slot.Func for systems without nested PCI bridges. 2705 * Slot.Function list specifies the slot and function numbers for all 2706 * devices on the path from root to the specific device. */ 2707 const char *root_bus_path; 2708 int root_bus_len; 2709 char slot[] = ":SS.F"; 2710 int slot_len = sizeof slot - 1 /* For '\0' */; 2711 int path_len; 2712 char *path, *p; 2713 int s; 2714 2715 root_bus_path = pci_root_bus_path(d); 2716 root_bus_len = strlen(root_bus_path); 2717 2718 /* Calculate # of slots on path between device and root. */; 2719 slot_depth = 0; 2720 for (t = d; t; t = pci_get_bus(t)->parent_dev) { 2721 ++slot_depth; 2722 } 2723 2724 path_len = root_bus_len + slot_len * slot_depth; 2725 2726 /* Allocate memory, fill in the terminating null byte. */ 2727 path = g_malloc(path_len + 1 /* For '\0' */); 2728 path[path_len] = '\0'; 2729 2730 memcpy(path, root_bus_path, root_bus_len); 2731 2732 /* Fill in slot numbers. We walk up from device to root, so need to print 2733 * them in the reverse order, last to first. */ 2734 p = path + path_len; 2735 for (t = d; t; t = pci_get_bus(t)->parent_dev) { 2736 p -= slot_len; 2737 s = snprintf(slot, sizeof slot, ":%02x.%x", 2738 PCI_SLOT(t->devfn), PCI_FUNC(t->devfn)); 2739 assert(s == slot_len); 2740 memcpy(p, slot, slot_len); 2741 } 2742 2743 return path; 2744 } 2745 2746 static int pci_qdev_find_recursive(PCIBus *bus, 2747 const char *id, PCIDevice **pdev) 2748 { 2749 DeviceState *qdev = qdev_find_recursive(&bus->qbus, id); 2750 if (!qdev) { 2751 return -ENODEV; 2752 } 2753 2754 /* roughly check if given qdev is pci device */ 2755 if (object_dynamic_cast(OBJECT(qdev), TYPE_PCI_DEVICE)) { 2756 *pdev = PCI_DEVICE(qdev); 2757 return 0; 2758 } 2759 return -EINVAL; 2760 } 2761 2762 int pci_qdev_find_device(const char *id, PCIDevice **pdev) 2763 { 2764 PCIHostState *host_bridge; 2765 int rc = -ENODEV; 2766 2767 QLIST_FOREACH(host_bridge, &pci_host_bridges, next) { 2768 int tmp = pci_qdev_find_recursive(host_bridge->bus, id, pdev); 2769 if (!tmp) { 2770 rc = 0; 2771 break; 2772 } 2773 if (tmp != -ENODEV) { 2774 rc = tmp; 2775 } 2776 } 2777 2778 return rc; 2779 } 2780 2781 MemoryRegion *pci_address_space(PCIDevice *dev) 2782 { 2783 return pci_get_bus(dev)->address_space_mem; 2784 } 2785 2786 MemoryRegion *pci_address_space_io(PCIDevice *dev) 2787 { 2788 return pci_get_bus(dev)->address_space_io; 2789 } 2790 2791 static void pci_device_class_init(ObjectClass *klass, const void *data) 2792 { 2793 DeviceClass *k = DEVICE_CLASS(klass); 2794 2795 k->realize = pci_qdev_realize; 2796 k->unrealize = pci_qdev_unrealize; 2797 k->bus_type = TYPE_PCI_BUS; 2798 device_class_set_props(k, pci_props); 2799 object_class_property_set_description( 2800 klass, "x-max-bounce-buffer-size", 2801 "Maximum buffer size allocated for bounce buffers used for mapped " 2802 "access to indirect DMA memory"); 2803 } 2804 2805 static void pci_device_class_base_init(ObjectClass *klass, const void *data) 2806 { 2807 if (!object_class_is_abstract(klass)) { 2808 ObjectClass *conventional = 2809 object_class_dynamic_cast(klass, INTERFACE_CONVENTIONAL_PCI_DEVICE); 2810 ObjectClass *pcie = 2811 object_class_dynamic_cast(klass, INTERFACE_PCIE_DEVICE); 2812 ObjectClass *cxl = 2813 object_class_dynamic_cast(klass, INTERFACE_CXL_DEVICE); 2814 assert(conventional || pcie || cxl); 2815 } 2816 } 2817 2818 /* 2819 * Get IOMMU root bus, aliased bus and devfn of a PCI device 2820 * 2821 * IOMMU root bus is needed by all call sites to call into iommu_ops. 2822 * For call sites which don't need aliased BDF, passing NULL to 2823 * aliased_[bus|devfn] is allowed. 2824 * 2825 * @piommu_bus: return root #PCIBus backed by an IOMMU for the PCI device. 2826 * 2827 * @aliased_bus: return aliased #PCIBus of the PCI device, optional. 2828 * 2829 * @aliased_devfn: return aliased devfn of the PCI device, optional. 2830 */ 2831 static void pci_device_get_iommu_bus_devfn(PCIDevice *dev, 2832 PCIBus **piommu_bus, 2833 PCIBus **aliased_bus, 2834 int *aliased_devfn) 2835 { 2836 PCIBus *bus = pci_get_bus(dev); 2837 PCIBus *iommu_bus = bus; 2838 int devfn = dev->devfn; 2839 2840 while (iommu_bus && !iommu_bus->iommu_ops && iommu_bus->parent_dev) { 2841 PCIBus *parent_bus = pci_get_bus(iommu_bus->parent_dev); 2842 2843 /* 2844 * The requester ID of the provided device may be aliased, as seen from 2845 * the IOMMU, due to topology limitations. The IOMMU relies on a 2846 * requester ID to provide a unique AddressSpace for devices, but 2847 * conventional PCI buses pre-date such concepts. Instead, the PCIe- 2848 * to-PCI bridge creates and accepts transactions on behalf of down- 2849 * stream devices. When doing so, all downstream devices are masked 2850 * (aliased) behind a single requester ID. The requester ID used 2851 * depends on the format of the bridge devices. Proper PCIe-to-PCI 2852 * bridges, with a PCIe capability indicating such, follow the 2853 * guidelines of chapter 2.3 of the PCIe-to-PCI/X bridge specification, 2854 * where the bridge uses the seconary bus as the bridge portion of the 2855 * requester ID and devfn of 00.0. For other bridges, typically those 2856 * found on the root complex such as the dmi-to-pci-bridge, we follow 2857 * the convention of typical bare-metal hardware, which uses the 2858 * requester ID of the bridge itself. There are device specific 2859 * exceptions to these rules, but these are the defaults that the 2860 * Linux kernel uses when determining DMA aliases itself and believed 2861 * to be true for the bare metal equivalents of the devices emulated 2862 * in QEMU. 2863 */ 2864 if (!pci_bus_is_express(iommu_bus)) { 2865 PCIDevice *parent = iommu_bus->parent_dev; 2866 2867 if (pci_is_express(parent) && 2868 pcie_cap_get_type(parent) == PCI_EXP_TYPE_PCI_BRIDGE) { 2869 devfn = PCI_DEVFN(0, 0); 2870 bus = iommu_bus; 2871 } else { 2872 devfn = parent->devfn; 2873 bus = parent_bus; 2874 } 2875 } 2876 2877 iommu_bus = parent_bus; 2878 } 2879 2880 assert(0 <= devfn && devfn < PCI_DEVFN_MAX); 2881 assert(iommu_bus); 2882 2883 if (pci_bus_bypass_iommu(bus) || !iommu_bus->iommu_ops) { 2884 iommu_bus = NULL; 2885 } 2886 2887 *piommu_bus = iommu_bus; 2888 2889 if (aliased_bus) { 2890 *aliased_bus = bus; 2891 } 2892 2893 if (aliased_devfn) { 2894 *aliased_devfn = devfn; 2895 } 2896 } 2897 2898 AddressSpace *pci_device_iommu_address_space(PCIDevice *dev) 2899 { 2900 PCIBus *bus; 2901 PCIBus *iommu_bus; 2902 int devfn; 2903 2904 pci_device_get_iommu_bus_devfn(dev, &iommu_bus, &bus, &devfn); 2905 if (iommu_bus) { 2906 return iommu_bus->iommu_ops->get_address_space(bus, 2907 iommu_bus->iommu_opaque, devfn); 2908 } 2909 return &address_space_memory; 2910 } 2911 2912 bool pci_device_set_iommu_device(PCIDevice *dev, HostIOMMUDevice *hiod, 2913 Error **errp) 2914 { 2915 PCIBus *iommu_bus, *aliased_bus; 2916 int aliased_devfn; 2917 2918 /* set_iommu_device requires device's direct BDF instead of aliased BDF */ 2919 pci_device_get_iommu_bus_devfn(dev, &iommu_bus, 2920 &aliased_bus, &aliased_devfn); 2921 if (iommu_bus && iommu_bus->iommu_ops->set_iommu_device) { 2922 hiod->aliased_bus = aliased_bus; 2923 hiod->aliased_devfn = aliased_devfn; 2924 return iommu_bus->iommu_ops->set_iommu_device(pci_get_bus(dev), 2925 iommu_bus->iommu_opaque, 2926 dev->devfn, hiod, errp); 2927 } 2928 return true; 2929 } 2930 2931 void pci_device_unset_iommu_device(PCIDevice *dev) 2932 { 2933 PCIBus *iommu_bus; 2934 2935 pci_device_get_iommu_bus_devfn(dev, &iommu_bus, NULL, NULL); 2936 if (iommu_bus && iommu_bus->iommu_ops->unset_iommu_device) { 2937 return iommu_bus->iommu_ops->unset_iommu_device(pci_get_bus(dev), 2938 iommu_bus->iommu_opaque, 2939 dev->devfn); 2940 } 2941 } 2942 2943 void pci_setup_iommu(PCIBus *bus, const PCIIOMMUOps *ops, void *opaque) 2944 { 2945 /* 2946 * If called, pci_setup_iommu() should provide a minimum set of 2947 * useful callbacks for the bus. 2948 */ 2949 assert(ops); 2950 assert(ops->get_address_space); 2951 2952 bus->iommu_ops = ops; 2953 bus->iommu_opaque = opaque; 2954 } 2955 2956 static void pci_dev_get_w64(PCIBus *b, PCIDevice *dev, void *opaque) 2957 { 2958 Range *range = opaque; 2959 uint16_t cmd = pci_get_word(dev->config + PCI_COMMAND); 2960 int i; 2961 2962 if (!(cmd & PCI_COMMAND_MEMORY)) { 2963 return; 2964 } 2965 2966 if (IS_PCI_BRIDGE(dev)) { 2967 pcibus_t base = pci_bridge_get_base(dev, PCI_BASE_ADDRESS_MEM_PREFETCH); 2968 pcibus_t limit = pci_bridge_get_limit(dev, PCI_BASE_ADDRESS_MEM_PREFETCH); 2969 2970 base = MAX(base, 0x1ULL << 32); 2971 2972 if (limit >= base) { 2973 Range pref_range; 2974 range_set_bounds(&pref_range, base, limit); 2975 range_extend(range, &pref_range); 2976 } 2977 } 2978 for (i = 0; i < PCI_NUM_REGIONS; ++i) { 2979 PCIIORegion *r = &dev->io_regions[i]; 2980 pcibus_t lob, upb; 2981 Range region_range; 2982 2983 if (!r->size || 2984 (r->type & PCI_BASE_ADDRESS_SPACE_IO) || 2985 !(r->type & PCI_BASE_ADDRESS_MEM_TYPE_64)) { 2986 continue; 2987 } 2988 2989 lob = pci_bar_address(dev, i, r->type, r->size); 2990 upb = lob + r->size - 1; 2991 if (lob == PCI_BAR_UNMAPPED) { 2992 continue; 2993 } 2994 2995 lob = MAX(lob, 0x1ULL << 32); 2996 2997 if (upb >= lob) { 2998 range_set_bounds(®ion_range, lob, upb); 2999 range_extend(range, ®ion_range); 3000 } 3001 } 3002 } 3003 3004 void pci_bus_get_w64_range(PCIBus *bus, Range *range) 3005 { 3006 range_make_empty(range); 3007 pci_for_each_device_under_bus(bus, pci_dev_get_w64, range); 3008 } 3009 3010 static bool pcie_has_upstream_port(PCIDevice *dev) 3011 { 3012 PCIDevice *parent_dev = pci_bridge_get_device(pci_get_bus(dev)); 3013 3014 /* Device associated with an upstream port. 3015 * As there are several types of these, it's easier to check the 3016 * parent device: upstream ports are always connected to 3017 * root or downstream ports. 3018 */ 3019 return parent_dev && 3020 pci_is_express(parent_dev) && 3021 parent_dev->exp.exp_cap && 3022 (pcie_cap_get_type(parent_dev) == PCI_EXP_TYPE_ROOT_PORT || 3023 pcie_cap_get_type(parent_dev) == PCI_EXP_TYPE_DOWNSTREAM); 3024 } 3025 3026 PCIDevice *pci_get_function_0(PCIDevice *pci_dev) 3027 { 3028 PCIBus *bus = pci_get_bus(pci_dev); 3029 3030 if(pcie_has_upstream_port(pci_dev)) { 3031 /* With an upstream PCIe port, we only support 1 device at slot 0 */ 3032 return bus->devices[0]; 3033 } else { 3034 /* Other bus types might support multiple devices at slots 0-31 */ 3035 return bus->devices[PCI_DEVFN(PCI_SLOT(pci_dev->devfn), 0)]; 3036 } 3037 } 3038 3039 MSIMessage pci_get_msi_message(PCIDevice *dev, int vector) 3040 { 3041 MSIMessage msg; 3042 if (msix_enabled(dev)) { 3043 msg = msix_get_message(dev, vector); 3044 } else if (msi_enabled(dev)) { 3045 msg = msi_get_message(dev, vector); 3046 } else { 3047 /* Should never happen */ 3048 error_report("%s: unknown interrupt type", __func__); 3049 abort(); 3050 } 3051 return msg; 3052 } 3053 3054 void pci_set_power(PCIDevice *d, bool state) 3055 { 3056 /* 3057 * Don't change the enabled state of VFs when powering on/off the device. 3058 * 3059 * When powering on, VFs must not be enabled immediately but they must 3060 * wait until the guest configures SR-IOV. 3061 * When powering off, their corresponding PFs will be reset and disable 3062 * VFs. 3063 */ 3064 if (!pci_is_vf(d)) { 3065 pci_set_enabled(d, state); 3066 } 3067 } 3068 3069 void pci_set_enabled(PCIDevice *d, bool state) 3070 { 3071 if (d->enabled == state) { 3072 return; 3073 } 3074 3075 d->enabled = state; 3076 pci_update_mappings(d); 3077 memory_region_set_enabled(&d->bus_master_enable_region, 3078 (pci_get_word(d->config + PCI_COMMAND) 3079 & PCI_COMMAND_MASTER) && d->enabled); 3080 if (qdev_is_realized(&d->qdev)) { 3081 pci_device_reset(d); 3082 } 3083 } 3084 3085 static const TypeInfo pci_device_type_info = { 3086 .name = TYPE_PCI_DEVICE, 3087 .parent = TYPE_DEVICE, 3088 .instance_size = sizeof(PCIDevice), 3089 .abstract = true, 3090 .class_size = sizeof(PCIDeviceClass), 3091 .class_init = pci_device_class_init, 3092 .class_base_init = pci_device_class_base_init, 3093 }; 3094 3095 static void pci_register_types(void) 3096 { 3097 type_register_static(&pci_bus_info); 3098 type_register_static(&pcie_bus_info); 3099 type_register_static(&cxl_bus_info); 3100 type_register_static(&conventional_pci_interface_info); 3101 type_register_static(&cxl_interface_info); 3102 type_register_static(&pcie_interface_info); 3103 type_register_static(&pci_device_type_info); 3104 } 3105 3106 type_init(pci_register_types) 3107