1 /* 2 * QEMU PCI bus manager 3 * 4 * Copyright (c) 2004 Fabrice Bellard 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 25 #include "qemu/osdep.h" 26 #include "qemu/datadir.h" 27 #include "qemu/units.h" 28 #include "hw/irq.h" 29 #include "hw/pci/pci.h" 30 #include "hw/pci/pci_bridge.h" 31 #include "hw/pci/pci_bus.h" 32 #include "hw/pci/pci_host.h" 33 #include "hw/qdev-properties.h" 34 #include "hw/qdev-properties-system.h" 35 #include "migration/qemu-file-types.h" 36 #include "migration/vmstate.h" 37 #include "net/net.h" 38 #include "sysemu/numa.h" 39 #include "sysemu/runstate.h" 40 #include "sysemu/sysemu.h" 41 #include "hw/loader.h" 42 #include "qemu/error-report.h" 43 #include "qemu/range.h" 44 #include "trace.h" 45 #include "hw/pci/msi.h" 46 #include "hw/pci/msix.h" 47 #include "hw/hotplug.h" 48 #include "hw/boards.h" 49 #include "hw/nvram/fw_cfg.h" 50 #include "qapi/error.h" 51 #include "qemu/cutils.h" 52 #include "pci-internal.h" 53 54 #include "hw/xen/xen.h" 55 #include "hw/i386/kvm/xen_evtchn.h" 56 57 //#define DEBUG_PCI 58 #ifdef DEBUG_PCI 59 # define PCI_DPRINTF(format, ...) printf(format, ## __VA_ARGS__) 60 #else 61 # define PCI_DPRINTF(format, ...) do { } while (0) 62 #endif 63 64 bool pci_available = true; 65 66 static char *pcibus_get_dev_path(DeviceState *dev); 67 static char *pcibus_get_fw_dev_path(DeviceState *dev); 68 static void pcibus_reset_hold(Object *obj, ResetType type); 69 static bool pcie_has_upstream_port(PCIDevice *dev); 70 71 static void prop_pci_busnr_get(Object *obj, Visitor *v, const char *name, 72 void *opaque, Error **errp) 73 { 74 uint8_t busnr = pci_dev_bus_num(PCI_DEVICE(obj)); 75 76 visit_type_uint8(v, name, &busnr, errp); 77 } 78 79 static const PropertyInfo prop_pci_busnr = { 80 .name = "busnr", 81 .get = prop_pci_busnr_get, 82 }; 83 84 static const Property pci_props[] = { 85 DEFINE_PROP_PCI_DEVFN("addr", PCIDevice, devfn, -1), 86 DEFINE_PROP_STRING("romfile", PCIDevice, romfile), 87 DEFINE_PROP_UINT32("romsize", PCIDevice, romsize, UINT32_MAX), 88 DEFINE_PROP_UINT32("rombar", PCIDevice, rom_bar, 1), 89 DEFINE_PROP_BIT("multifunction", PCIDevice, cap_present, 90 QEMU_PCI_CAP_MULTIFUNCTION_BITNR, false), 91 DEFINE_PROP_BIT("x-pcie-lnksta-dllla", PCIDevice, cap_present, 92 QEMU_PCIE_LNKSTA_DLLLA_BITNR, true), 93 DEFINE_PROP_BIT("x-pcie-extcap-init", PCIDevice, cap_present, 94 QEMU_PCIE_EXTCAP_INIT_BITNR, true), 95 DEFINE_PROP_STRING("failover_pair_id", PCIDevice, 96 failover_pair_id), 97 DEFINE_PROP_UINT32("acpi-index", PCIDevice, acpi_index, 0), 98 DEFINE_PROP_BIT("x-pcie-err-unc-mask", PCIDevice, cap_present, 99 QEMU_PCIE_ERR_UNC_MASK_BITNR, true), 100 DEFINE_PROP_BIT("x-pcie-ari-nextfn-1", PCIDevice, cap_present, 101 QEMU_PCIE_ARI_NEXTFN_1_BITNR, false), 102 DEFINE_PROP_SIZE32("x-max-bounce-buffer-size", PCIDevice, 103 max_bounce_buffer_size, DEFAULT_MAX_BOUNCE_BUFFER_SIZE), 104 DEFINE_PROP_BIT("x-pcie-ext-tag", PCIDevice, cap_present, 105 QEMU_PCIE_EXT_TAG_BITNR, true), 106 { .name = "busnr", .info = &prop_pci_busnr }, 107 DEFINE_PROP_END_OF_LIST() 108 }; 109 110 static const VMStateDescription vmstate_pcibus = { 111 .name = "PCIBUS", 112 .version_id = 1, 113 .minimum_version_id = 1, 114 .fields = (const VMStateField[]) { 115 VMSTATE_INT32_EQUAL(nirq, PCIBus, NULL), 116 VMSTATE_VARRAY_INT32(irq_count, PCIBus, 117 nirq, 0, vmstate_info_int32, 118 int32_t), 119 VMSTATE_END_OF_LIST() 120 } 121 }; 122 123 static gint g_cmp_uint32(gconstpointer a, gconstpointer b, gpointer user_data) 124 { 125 return a - b; 126 } 127 128 static GSequence *pci_acpi_index_list(void) 129 { 130 static GSequence *used_acpi_index_list; 131 132 if (!used_acpi_index_list) { 133 used_acpi_index_list = g_sequence_new(NULL); 134 } 135 return used_acpi_index_list; 136 } 137 138 static void pci_init_bus_master(PCIDevice *pci_dev) 139 { 140 AddressSpace *dma_as = pci_device_iommu_address_space(pci_dev); 141 142 memory_region_init_alias(&pci_dev->bus_master_enable_region, 143 OBJECT(pci_dev), "bus master", 144 dma_as->root, 0, memory_region_size(dma_as->root)); 145 memory_region_set_enabled(&pci_dev->bus_master_enable_region, false); 146 memory_region_add_subregion(&pci_dev->bus_master_container_region, 0, 147 &pci_dev->bus_master_enable_region); 148 } 149 150 static void pcibus_machine_done(Notifier *notifier, void *data) 151 { 152 PCIBus *bus = container_of(notifier, PCIBus, machine_done); 153 int i; 154 155 for (i = 0; i < ARRAY_SIZE(bus->devices); ++i) { 156 if (bus->devices[i]) { 157 pci_init_bus_master(bus->devices[i]); 158 } 159 } 160 } 161 162 static void pci_bus_realize(BusState *qbus, Error **errp) 163 { 164 PCIBus *bus = PCI_BUS(qbus); 165 166 bus->machine_done.notify = pcibus_machine_done; 167 qemu_add_machine_init_done_notifier(&bus->machine_done); 168 169 vmstate_register_any(NULL, &vmstate_pcibus, bus); 170 } 171 172 static void pcie_bus_realize(BusState *qbus, Error **errp) 173 { 174 PCIBus *bus = PCI_BUS(qbus); 175 Error *local_err = NULL; 176 177 pci_bus_realize(qbus, &local_err); 178 if (local_err) { 179 error_propagate(errp, local_err); 180 return; 181 } 182 183 /* 184 * A PCI-E bus can support extended config space if it's the root 185 * bus, or if the bus/bridge above it does as well 186 */ 187 if (pci_bus_is_root(bus)) { 188 bus->flags |= PCI_BUS_EXTENDED_CONFIG_SPACE; 189 } else { 190 PCIBus *parent_bus = pci_get_bus(bus->parent_dev); 191 192 if (pci_bus_allows_extended_config_space(parent_bus)) { 193 bus->flags |= PCI_BUS_EXTENDED_CONFIG_SPACE; 194 } 195 } 196 } 197 198 static void pci_bus_unrealize(BusState *qbus) 199 { 200 PCIBus *bus = PCI_BUS(qbus); 201 202 qemu_remove_machine_init_done_notifier(&bus->machine_done); 203 204 vmstate_unregister(NULL, &vmstate_pcibus, bus); 205 } 206 207 static int pcibus_num(PCIBus *bus) 208 { 209 if (pci_bus_is_root(bus)) { 210 return 0; /* pci host bridge */ 211 } 212 return bus->parent_dev->config[PCI_SECONDARY_BUS]; 213 } 214 215 static uint16_t pcibus_numa_node(PCIBus *bus) 216 { 217 return NUMA_NODE_UNASSIGNED; 218 } 219 220 bool pci_bus_add_fw_cfg_extra_pci_roots(FWCfgState *fw_cfg, 221 PCIBus *bus, 222 Error **errp) 223 { 224 Object *obj; 225 226 if (!bus) { 227 return true; 228 } 229 obj = OBJECT(bus); 230 231 return fw_cfg_add_file_from_generator(fw_cfg, obj->parent, 232 object_get_canonical_path_component(obj), 233 "etc/extra-pci-roots", errp); 234 } 235 236 static GByteArray *pci_bus_fw_cfg_gen_data(Object *obj, Error **errp) 237 { 238 PCIBus *bus = PCI_BUS(obj); 239 GByteArray *byte_array; 240 uint64_t extra_hosts = 0; 241 242 if (!bus) { 243 return NULL; 244 } 245 246 QLIST_FOREACH(bus, &bus->child, sibling) { 247 /* look for expander root buses */ 248 if (pci_bus_is_root(bus)) { 249 extra_hosts++; 250 } 251 } 252 253 if (!extra_hosts) { 254 return NULL; 255 } 256 extra_hosts = cpu_to_le64(extra_hosts); 257 258 byte_array = g_byte_array_new(); 259 g_byte_array_append(byte_array, 260 (const void *)&extra_hosts, sizeof(extra_hosts)); 261 262 return byte_array; 263 } 264 265 static void pci_bus_class_init(ObjectClass *klass, void *data) 266 { 267 BusClass *k = BUS_CLASS(klass); 268 PCIBusClass *pbc = PCI_BUS_CLASS(klass); 269 ResettableClass *rc = RESETTABLE_CLASS(klass); 270 FWCfgDataGeneratorClass *fwgc = FW_CFG_DATA_GENERATOR_CLASS(klass); 271 272 k->print_dev = pcibus_dev_print; 273 k->get_dev_path = pcibus_get_dev_path; 274 k->get_fw_dev_path = pcibus_get_fw_dev_path; 275 k->realize = pci_bus_realize; 276 k->unrealize = pci_bus_unrealize; 277 278 rc->phases.hold = pcibus_reset_hold; 279 280 pbc->bus_num = pcibus_num; 281 pbc->numa_node = pcibus_numa_node; 282 283 fwgc->get_data = pci_bus_fw_cfg_gen_data; 284 } 285 286 static const TypeInfo pci_bus_info = { 287 .name = TYPE_PCI_BUS, 288 .parent = TYPE_BUS, 289 .instance_size = sizeof(PCIBus), 290 .class_size = sizeof(PCIBusClass), 291 .class_init = pci_bus_class_init, 292 .interfaces = (InterfaceInfo[]) { 293 { TYPE_FW_CFG_DATA_GENERATOR_INTERFACE }, 294 { } 295 } 296 }; 297 298 static const TypeInfo cxl_interface_info = { 299 .name = INTERFACE_CXL_DEVICE, 300 .parent = TYPE_INTERFACE, 301 }; 302 303 static const TypeInfo pcie_interface_info = { 304 .name = INTERFACE_PCIE_DEVICE, 305 .parent = TYPE_INTERFACE, 306 }; 307 308 static const TypeInfo conventional_pci_interface_info = { 309 .name = INTERFACE_CONVENTIONAL_PCI_DEVICE, 310 .parent = TYPE_INTERFACE, 311 }; 312 313 static void pcie_bus_class_init(ObjectClass *klass, void *data) 314 { 315 BusClass *k = BUS_CLASS(klass); 316 317 k->realize = pcie_bus_realize; 318 } 319 320 static const TypeInfo pcie_bus_info = { 321 .name = TYPE_PCIE_BUS, 322 .parent = TYPE_PCI_BUS, 323 .class_init = pcie_bus_class_init, 324 }; 325 326 static const TypeInfo cxl_bus_info = { 327 .name = TYPE_CXL_BUS, 328 .parent = TYPE_PCIE_BUS, 329 .class_init = pcie_bus_class_init, 330 }; 331 332 static void pci_update_mappings(PCIDevice *d); 333 static void pci_irq_handler(void *opaque, int irq_num, int level); 334 static void pci_add_option_rom(PCIDevice *pdev, bool is_default_rom, Error **); 335 static void pci_del_option_rom(PCIDevice *pdev); 336 337 static uint16_t pci_default_sub_vendor_id = PCI_SUBVENDOR_ID_REDHAT_QUMRANET; 338 static uint16_t pci_default_sub_device_id = PCI_SUBDEVICE_ID_QEMU; 339 340 PCIHostStateList pci_host_bridges; 341 342 int pci_bar(PCIDevice *d, int reg) 343 { 344 uint8_t type; 345 346 /* PCIe virtual functions do not have their own BARs */ 347 assert(!pci_is_vf(d)); 348 349 if (reg != PCI_ROM_SLOT) 350 return PCI_BASE_ADDRESS_0 + reg * 4; 351 352 type = d->config[PCI_HEADER_TYPE] & ~PCI_HEADER_TYPE_MULTI_FUNCTION; 353 return type == PCI_HEADER_TYPE_BRIDGE ? PCI_ROM_ADDRESS1 : PCI_ROM_ADDRESS; 354 } 355 356 static inline int pci_irq_state(PCIDevice *d, int irq_num) 357 { 358 return (d->irq_state >> irq_num) & 0x1; 359 } 360 361 static inline void pci_set_irq_state(PCIDevice *d, int irq_num, int level) 362 { 363 d->irq_state &= ~(0x1 << irq_num); 364 d->irq_state |= level << irq_num; 365 } 366 367 static void pci_bus_change_irq_level(PCIBus *bus, int irq_num, int change) 368 { 369 assert(irq_num >= 0); 370 assert(irq_num < bus->nirq); 371 bus->irq_count[irq_num] += change; 372 bus->set_irq(bus->irq_opaque, irq_num, bus->irq_count[irq_num] != 0); 373 } 374 375 static void pci_change_irq_level(PCIDevice *pci_dev, int irq_num, int change) 376 { 377 PCIBus *bus; 378 for (;;) { 379 int dev_irq = irq_num; 380 bus = pci_get_bus(pci_dev); 381 assert(bus->map_irq); 382 irq_num = bus->map_irq(pci_dev, irq_num); 383 trace_pci_route_irq(dev_irq, DEVICE(pci_dev)->canonical_path, irq_num, 384 pci_bus_is_root(bus) ? "root-complex" 385 : DEVICE(bus->parent_dev)->canonical_path); 386 if (bus->set_irq) 387 break; 388 pci_dev = bus->parent_dev; 389 } 390 pci_bus_change_irq_level(bus, irq_num, change); 391 } 392 393 int pci_bus_get_irq_level(PCIBus *bus, int irq_num) 394 { 395 assert(irq_num >= 0); 396 assert(irq_num < bus->nirq); 397 return !!bus->irq_count[irq_num]; 398 } 399 400 /* Update interrupt status bit in config space on interrupt 401 * state change. */ 402 static void pci_update_irq_status(PCIDevice *dev) 403 { 404 if (dev->irq_state) { 405 dev->config[PCI_STATUS] |= PCI_STATUS_INTERRUPT; 406 } else { 407 dev->config[PCI_STATUS] &= ~PCI_STATUS_INTERRUPT; 408 } 409 } 410 411 void pci_device_deassert_intx(PCIDevice *dev) 412 { 413 int i; 414 for (i = 0; i < PCI_NUM_PINS; ++i) { 415 pci_irq_handler(dev, i, 0); 416 } 417 } 418 419 static void pci_msi_trigger(PCIDevice *dev, MSIMessage msg) 420 { 421 MemTxAttrs attrs = {}; 422 423 /* 424 * Xen uses the high bits of the address to contain some of the bits 425 * of the PIRQ#. Therefore we can't just send the write cycle and 426 * trust that it's caught by the APIC at 0xfee00000 because the 427 * target of the write might be e.g. 0x0x1000fee46000 for PIRQ#4166. 428 * So we intercept the delivery here instead of in kvm_send_msi(). 429 */ 430 if (xen_mode == XEN_EMULATE && 431 xen_evtchn_deliver_pirq_msi(msg.address, msg.data)) { 432 return; 433 } 434 attrs.requester_id = pci_requester_id(dev); 435 address_space_stl_le(&dev->bus_master_as, msg.address, msg.data, 436 attrs, NULL); 437 } 438 439 static void pci_reset_regions(PCIDevice *dev) 440 { 441 int r; 442 if (pci_is_vf(dev)) { 443 return; 444 } 445 446 for (r = 0; r < PCI_NUM_REGIONS; ++r) { 447 PCIIORegion *region = &dev->io_regions[r]; 448 if (!region->size) { 449 continue; 450 } 451 452 if (!(region->type & PCI_BASE_ADDRESS_SPACE_IO) && 453 region->type & PCI_BASE_ADDRESS_MEM_TYPE_64) { 454 pci_set_quad(dev->config + pci_bar(dev, r), region->type); 455 } else { 456 pci_set_long(dev->config + pci_bar(dev, r), region->type); 457 } 458 } 459 } 460 461 static void pci_do_device_reset(PCIDevice *dev) 462 { 463 pci_device_deassert_intx(dev); 464 assert(dev->irq_state == 0); 465 466 /* Clear all writable bits */ 467 pci_word_test_and_clear_mask(dev->config + PCI_COMMAND, 468 pci_get_word(dev->wmask + PCI_COMMAND) | 469 pci_get_word(dev->w1cmask + PCI_COMMAND)); 470 pci_word_test_and_clear_mask(dev->config + PCI_STATUS, 471 pci_get_word(dev->wmask + PCI_STATUS) | 472 pci_get_word(dev->w1cmask + PCI_STATUS)); 473 /* Some devices make bits of PCI_INTERRUPT_LINE read only */ 474 pci_byte_test_and_clear_mask(dev->config + PCI_INTERRUPT_LINE, 475 pci_get_word(dev->wmask + PCI_INTERRUPT_LINE) | 476 pci_get_word(dev->w1cmask + PCI_INTERRUPT_LINE)); 477 dev->config[PCI_CACHE_LINE_SIZE] = 0x0; 478 pci_reset_regions(dev); 479 pci_update_mappings(dev); 480 481 msi_reset(dev); 482 msix_reset(dev); 483 pcie_sriov_pf_reset(dev); 484 } 485 486 /* 487 * This function is called on #RST and FLR. 488 * FLR if PCI_EXP_DEVCTL_BCR_FLR is set 489 */ 490 void pci_device_reset(PCIDevice *dev) 491 { 492 device_cold_reset(&dev->qdev); 493 pci_do_device_reset(dev); 494 } 495 496 /* 497 * Trigger pci bus reset under a given bus. 498 * Called via bus_cold_reset on RST# assert, after the devices 499 * have been reset device_cold_reset-ed already. 500 */ 501 static void pcibus_reset_hold(Object *obj, ResetType type) 502 { 503 PCIBus *bus = PCI_BUS(obj); 504 int i; 505 506 for (i = 0; i < ARRAY_SIZE(bus->devices); ++i) { 507 if (bus->devices[i]) { 508 pci_do_device_reset(bus->devices[i]); 509 } 510 } 511 512 for (i = 0; i < bus->nirq; i++) { 513 assert(bus->irq_count[i] == 0); 514 } 515 } 516 517 static void pci_host_bus_register(DeviceState *host) 518 { 519 PCIHostState *host_bridge = PCI_HOST_BRIDGE(host); 520 521 QLIST_INSERT_HEAD(&pci_host_bridges, host_bridge, next); 522 } 523 524 static void pci_host_bus_unregister(DeviceState *host) 525 { 526 PCIHostState *host_bridge = PCI_HOST_BRIDGE(host); 527 528 QLIST_REMOVE(host_bridge, next); 529 } 530 531 PCIBus *pci_device_root_bus(const PCIDevice *d) 532 { 533 PCIBus *bus = pci_get_bus(d); 534 535 while (!pci_bus_is_root(bus)) { 536 d = bus->parent_dev; 537 assert(d != NULL); 538 539 bus = pci_get_bus(d); 540 } 541 542 return bus; 543 } 544 545 const char *pci_root_bus_path(PCIDevice *dev) 546 { 547 PCIBus *rootbus = pci_device_root_bus(dev); 548 PCIHostState *host_bridge = PCI_HOST_BRIDGE(rootbus->qbus.parent); 549 PCIHostBridgeClass *hc = PCI_HOST_BRIDGE_GET_CLASS(host_bridge); 550 551 assert(host_bridge->bus == rootbus); 552 553 if (hc->root_bus_path) { 554 return (*hc->root_bus_path)(host_bridge, rootbus); 555 } 556 557 return rootbus->qbus.name; 558 } 559 560 bool pci_bus_bypass_iommu(PCIBus *bus) 561 { 562 PCIBus *rootbus = bus; 563 PCIHostState *host_bridge; 564 565 if (!pci_bus_is_root(bus)) { 566 rootbus = pci_device_root_bus(bus->parent_dev); 567 } 568 569 host_bridge = PCI_HOST_BRIDGE(rootbus->qbus.parent); 570 571 assert(host_bridge->bus == rootbus); 572 573 return host_bridge->bypass_iommu; 574 } 575 576 static void pci_root_bus_internal_init(PCIBus *bus, DeviceState *parent, 577 MemoryRegion *mem, MemoryRegion *io, 578 uint8_t devfn_min) 579 { 580 assert(PCI_FUNC(devfn_min) == 0); 581 bus->devfn_min = devfn_min; 582 bus->slot_reserved_mask = 0x0; 583 bus->address_space_mem = mem; 584 bus->address_space_io = io; 585 bus->flags |= PCI_BUS_IS_ROOT; 586 587 /* host bridge */ 588 QLIST_INIT(&bus->child); 589 590 pci_host_bus_register(parent); 591 } 592 593 static void pci_bus_uninit(PCIBus *bus) 594 { 595 pci_host_bus_unregister(BUS(bus)->parent); 596 } 597 598 bool pci_bus_is_express(const PCIBus *bus) 599 { 600 return object_dynamic_cast(OBJECT(bus), TYPE_PCIE_BUS); 601 } 602 603 void pci_root_bus_init(PCIBus *bus, size_t bus_size, DeviceState *parent, 604 const char *name, 605 MemoryRegion *mem, MemoryRegion *io, 606 uint8_t devfn_min, const char *typename) 607 { 608 qbus_init(bus, bus_size, typename, parent, name); 609 pci_root_bus_internal_init(bus, parent, mem, io, devfn_min); 610 } 611 612 PCIBus *pci_root_bus_new(DeviceState *parent, const char *name, 613 MemoryRegion *mem, MemoryRegion *io, 614 uint8_t devfn_min, const char *typename) 615 { 616 PCIBus *bus; 617 618 bus = PCI_BUS(qbus_new(typename, parent, name)); 619 pci_root_bus_internal_init(bus, parent, mem, io, devfn_min); 620 return bus; 621 } 622 623 void pci_root_bus_cleanup(PCIBus *bus) 624 { 625 pci_bus_uninit(bus); 626 /* the caller of the unplug hotplug handler will delete this device */ 627 qbus_unrealize(BUS(bus)); 628 } 629 630 void pci_bus_irqs(PCIBus *bus, pci_set_irq_fn set_irq, 631 void *irq_opaque, int nirq) 632 { 633 bus->set_irq = set_irq; 634 bus->irq_opaque = irq_opaque; 635 bus->nirq = nirq; 636 g_free(bus->irq_count); 637 bus->irq_count = g_malloc0(nirq * sizeof(bus->irq_count[0])); 638 } 639 640 void pci_bus_map_irqs(PCIBus *bus, pci_map_irq_fn map_irq) 641 { 642 bus->map_irq = map_irq; 643 } 644 645 void pci_bus_irqs_cleanup(PCIBus *bus) 646 { 647 bus->set_irq = NULL; 648 bus->map_irq = NULL; 649 bus->irq_opaque = NULL; 650 bus->nirq = 0; 651 g_free(bus->irq_count); 652 bus->irq_count = NULL; 653 } 654 655 PCIBus *pci_register_root_bus(DeviceState *parent, const char *name, 656 pci_set_irq_fn set_irq, pci_map_irq_fn map_irq, 657 void *irq_opaque, 658 MemoryRegion *mem, MemoryRegion *io, 659 uint8_t devfn_min, int nirq, 660 const char *typename) 661 { 662 PCIBus *bus; 663 664 bus = pci_root_bus_new(parent, name, mem, io, devfn_min, typename); 665 pci_bus_irqs(bus, set_irq, irq_opaque, nirq); 666 pci_bus_map_irqs(bus, map_irq); 667 return bus; 668 } 669 670 void pci_unregister_root_bus(PCIBus *bus) 671 { 672 pci_bus_irqs_cleanup(bus); 673 pci_root_bus_cleanup(bus); 674 } 675 676 int pci_bus_num(PCIBus *s) 677 { 678 return PCI_BUS_GET_CLASS(s)->bus_num(s); 679 } 680 681 /* Returns the min and max bus numbers of a PCI bus hierarchy */ 682 void pci_bus_range(PCIBus *bus, int *min_bus, int *max_bus) 683 { 684 int i; 685 *min_bus = *max_bus = pci_bus_num(bus); 686 687 for (i = 0; i < ARRAY_SIZE(bus->devices); ++i) { 688 PCIDevice *dev = bus->devices[i]; 689 690 if (dev && IS_PCI_BRIDGE(dev)) { 691 *min_bus = MIN(*min_bus, dev->config[PCI_SECONDARY_BUS]); 692 *max_bus = MAX(*max_bus, dev->config[PCI_SUBORDINATE_BUS]); 693 } 694 } 695 } 696 697 int pci_bus_numa_node(PCIBus *bus) 698 { 699 return PCI_BUS_GET_CLASS(bus)->numa_node(bus); 700 } 701 702 static int get_pci_config_device(QEMUFile *f, void *pv, size_t size, 703 const VMStateField *field) 704 { 705 PCIDevice *s = container_of(pv, PCIDevice, config); 706 uint8_t *config; 707 int i; 708 709 assert(size == pci_config_size(s)); 710 config = g_malloc(size); 711 712 qemu_get_buffer(f, config, size); 713 for (i = 0; i < size; ++i) { 714 if ((config[i] ^ s->config[i]) & 715 s->cmask[i] & ~s->wmask[i] & ~s->w1cmask[i]) { 716 error_report("%s: Bad config data: i=0x%x read: %x device: %x " 717 "cmask: %x wmask: %x w1cmask:%x", __func__, 718 i, config[i], s->config[i], 719 s->cmask[i], s->wmask[i], s->w1cmask[i]); 720 g_free(config); 721 return -EINVAL; 722 } 723 } 724 memcpy(s->config, config, size); 725 726 pci_update_mappings(s); 727 if (IS_PCI_BRIDGE(s)) { 728 pci_bridge_update_mappings(PCI_BRIDGE(s)); 729 } 730 731 memory_region_set_enabled(&s->bus_master_enable_region, 732 pci_get_word(s->config + PCI_COMMAND) 733 & PCI_COMMAND_MASTER); 734 735 g_free(config); 736 return 0; 737 } 738 739 /* just put buffer */ 740 static int put_pci_config_device(QEMUFile *f, void *pv, size_t size, 741 const VMStateField *field, JSONWriter *vmdesc) 742 { 743 const uint8_t **v = pv; 744 assert(size == pci_config_size(container_of(pv, PCIDevice, config))); 745 qemu_put_buffer(f, *v, size); 746 747 return 0; 748 } 749 750 static const VMStateInfo vmstate_info_pci_config = { 751 .name = "pci config", 752 .get = get_pci_config_device, 753 .put = put_pci_config_device, 754 }; 755 756 static int get_pci_irq_state(QEMUFile *f, void *pv, size_t size, 757 const VMStateField *field) 758 { 759 PCIDevice *s = container_of(pv, PCIDevice, irq_state); 760 uint32_t irq_state[PCI_NUM_PINS]; 761 int i; 762 for (i = 0; i < PCI_NUM_PINS; ++i) { 763 irq_state[i] = qemu_get_be32(f); 764 if (irq_state[i] != 0x1 && irq_state[i] != 0) { 765 fprintf(stderr, "irq state %d: must be 0 or 1.\n", 766 irq_state[i]); 767 return -EINVAL; 768 } 769 } 770 771 for (i = 0; i < PCI_NUM_PINS; ++i) { 772 pci_set_irq_state(s, i, irq_state[i]); 773 } 774 775 return 0; 776 } 777 778 static int put_pci_irq_state(QEMUFile *f, void *pv, size_t size, 779 const VMStateField *field, JSONWriter *vmdesc) 780 { 781 int i; 782 PCIDevice *s = container_of(pv, PCIDevice, irq_state); 783 784 for (i = 0; i < PCI_NUM_PINS; ++i) { 785 qemu_put_be32(f, pci_irq_state(s, i)); 786 } 787 788 return 0; 789 } 790 791 static const VMStateInfo vmstate_info_pci_irq_state = { 792 .name = "pci irq state", 793 .get = get_pci_irq_state, 794 .put = put_pci_irq_state, 795 }; 796 797 static bool migrate_is_pcie(void *opaque, int version_id) 798 { 799 return pci_is_express((PCIDevice *)opaque); 800 } 801 802 static bool migrate_is_not_pcie(void *opaque, int version_id) 803 { 804 return !pci_is_express((PCIDevice *)opaque); 805 } 806 807 const VMStateDescription vmstate_pci_device = { 808 .name = "PCIDevice", 809 .version_id = 2, 810 .minimum_version_id = 1, 811 .fields = (const VMStateField[]) { 812 VMSTATE_INT32_POSITIVE_LE(version_id, PCIDevice), 813 VMSTATE_BUFFER_UNSAFE_INFO_TEST(config, PCIDevice, 814 migrate_is_not_pcie, 815 0, vmstate_info_pci_config, 816 PCI_CONFIG_SPACE_SIZE), 817 VMSTATE_BUFFER_UNSAFE_INFO_TEST(config, PCIDevice, 818 migrate_is_pcie, 819 0, vmstate_info_pci_config, 820 PCIE_CONFIG_SPACE_SIZE), 821 VMSTATE_BUFFER_UNSAFE_INFO(irq_state, PCIDevice, 2, 822 vmstate_info_pci_irq_state, 823 PCI_NUM_PINS * sizeof(int32_t)), 824 VMSTATE_END_OF_LIST() 825 } 826 }; 827 828 829 void pci_device_save(PCIDevice *s, QEMUFile *f) 830 { 831 /* Clear interrupt status bit: it is implicit 832 * in irq_state which we are saving. 833 * This makes us compatible with old devices 834 * which never set or clear this bit. */ 835 s->config[PCI_STATUS] &= ~PCI_STATUS_INTERRUPT; 836 vmstate_save_state(f, &vmstate_pci_device, s, NULL); 837 /* Restore the interrupt status bit. */ 838 pci_update_irq_status(s); 839 } 840 841 int pci_device_load(PCIDevice *s, QEMUFile *f) 842 { 843 int ret; 844 ret = vmstate_load_state(f, &vmstate_pci_device, s, s->version_id); 845 /* Restore the interrupt status bit. */ 846 pci_update_irq_status(s); 847 return ret; 848 } 849 850 static void pci_set_default_subsystem_id(PCIDevice *pci_dev) 851 { 852 pci_set_word(pci_dev->config + PCI_SUBSYSTEM_VENDOR_ID, 853 pci_default_sub_vendor_id); 854 pci_set_word(pci_dev->config + PCI_SUBSYSTEM_ID, 855 pci_default_sub_device_id); 856 } 857 858 /* 859 * Parse [[<domain>:]<bus>:]<slot>, return -1 on error if funcp == NULL 860 * [[<domain>:]<bus>:]<slot>.<func>, return -1 on error 861 */ 862 static int pci_parse_devaddr(const char *addr, int *domp, int *busp, 863 unsigned int *slotp, unsigned int *funcp) 864 { 865 const char *p; 866 char *e; 867 unsigned long val; 868 unsigned long dom = 0, bus = 0; 869 unsigned int slot = 0; 870 unsigned int func = 0; 871 872 p = addr; 873 val = strtoul(p, &e, 16); 874 if (e == p) 875 return -1; 876 if (*e == ':') { 877 bus = val; 878 p = e + 1; 879 val = strtoul(p, &e, 16); 880 if (e == p) 881 return -1; 882 if (*e == ':') { 883 dom = bus; 884 bus = val; 885 p = e + 1; 886 val = strtoul(p, &e, 16); 887 if (e == p) 888 return -1; 889 } 890 } 891 892 slot = val; 893 894 if (funcp != NULL) { 895 if (*e != '.') 896 return -1; 897 898 p = e + 1; 899 val = strtoul(p, &e, 16); 900 if (e == p) 901 return -1; 902 903 func = val; 904 } 905 906 /* if funcp == NULL func is 0 */ 907 if (dom > 0xffff || bus > 0xff || slot > 0x1f || func > 7) 908 return -1; 909 910 if (*e) 911 return -1; 912 913 *domp = dom; 914 *busp = bus; 915 *slotp = slot; 916 if (funcp != NULL) 917 *funcp = func; 918 return 0; 919 } 920 921 static void pci_init_cmask(PCIDevice *dev) 922 { 923 pci_set_word(dev->cmask + PCI_VENDOR_ID, 0xffff); 924 pci_set_word(dev->cmask + PCI_DEVICE_ID, 0xffff); 925 dev->cmask[PCI_STATUS] = PCI_STATUS_CAP_LIST; 926 dev->cmask[PCI_REVISION_ID] = 0xff; 927 dev->cmask[PCI_CLASS_PROG] = 0xff; 928 pci_set_word(dev->cmask + PCI_CLASS_DEVICE, 0xffff); 929 dev->cmask[PCI_HEADER_TYPE] = 0xff; 930 dev->cmask[PCI_CAPABILITY_LIST] = 0xff; 931 } 932 933 static void pci_init_wmask(PCIDevice *dev) 934 { 935 int config_size = pci_config_size(dev); 936 937 dev->wmask[PCI_CACHE_LINE_SIZE] = 0xff; 938 dev->wmask[PCI_INTERRUPT_LINE] = 0xff; 939 pci_set_word(dev->wmask + PCI_COMMAND, 940 PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | 941 PCI_COMMAND_INTX_DISABLE); 942 pci_word_test_and_set_mask(dev->wmask + PCI_COMMAND, PCI_COMMAND_SERR); 943 944 memset(dev->wmask + PCI_CONFIG_HEADER_SIZE, 0xff, 945 config_size - PCI_CONFIG_HEADER_SIZE); 946 } 947 948 static void pci_init_w1cmask(PCIDevice *dev) 949 { 950 /* 951 * Note: It's okay to set w1cmask even for readonly bits as 952 * long as their value is hardwired to 0. 953 */ 954 pci_set_word(dev->w1cmask + PCI_STATUS, 955 PCI_STATUS_PARITY | PCI_STATUS_SIG_TARGET_ABORT | 956 PCI_STATUS_REC_TARGET_ABORT | PCI_STATUS_REC_MASTER_ABORT | 957 PCI_STATUS_SIG_SYSTEM_ERROR | PCI_STATUS_DETECTED_PARITY); 958 } 959 960 static void pci_init_mask_bridge(PCIDevice *d) 961 { 962 /* PCI_PRIMARY_BUS, PCI_SECONDARY_BUS, PCI_SUBORDINATE_BUS and 963 PCI_SEC_LATENCY_TIMER */ 964 memset(d->wmask + PCI_PRIMARY_BUS, 0xff, 4); 965 966 /* base and limit */ 967 d->wmask[PCI_IO_BASE] = PCI_IO_RANGE_MASK & 0xff; 968 d->wmask[PCI_IO_LIMIT] = PCI_IO_RANGE_MASK & 0xff; 969 pci_set_word(d->wmask + PCI_MEMORY_BASE, 970 PCI_MEMORY_RANGE_MASK & 0xffff); 971 pci_set_word(d->wmask + PCI_MEMORY_LIMIT, 972 PCI_MEMORY_RANGE_MASK & 0xffff); 973 pci_set_word(d->wmask + PCI_PREF_MEMORY_BASE, 974 PCI_PREF_RANGE_MASK & 0xffff); 975 pci_set_word(d->wmask + PCI_PREF_MEMORY_LIMIT, 976 PCI_PREF_RANGE_MASK & 0xffff); 977 978 /* PCI_PREF_BASE_UPPER32 and PCI_PREF_LIMIT_UPPER32 */ 979 memset(d->wmask + PCI_PREF_BASE_UPPER32, 0xff, 8); 980 981 /* Supported memory and i/o types */ 982 d->config[PCI_IO_BASE] |= PCI_IO_RANGE_TYPE_16; 983 d->config[PCI_IO_LIMIT] |= PCI_IO_RANGE_TYPE_16; 984 pci_word_test_and_set_mask(d->config + PCI_PREF_MEMORY_BASE, 985 PCI_PREF_RANGE_TYPE_64); 986 pci_word_test_and_set_mask(d->config + PCI_PREF_MEMORY_LIMIT, 987 PCI_PREF_RANGE_TYPE_64); 988 989 /* 990 * TODO: Bridges default to 10-bit VGA decoding but we currently only 991 * implement 16-bit decoding (no alias support). 992 */ 993 pci_set_word(d->wmask + PCI_BRIDGE_CONTROL, 994 PCI_BRIDGE_CTL_PARITY | 995 PCI_BRIDGE_CTL_SERR | 996 PCI_BRIDGE_CTL_ISA | 997 PCI_BRIDGE_CTL_VGA | 998 PCI_BRIDGE_CTL_VGA_16BIT | 999 PCI_BRIDGE_CTL_MASTER_ABORT | 1000 PCI_BRIDGE_CTL_BUS_RESET | 1001 PCI_BRIDGE_CTL_FAST_BACK | 1002 PCI_BRIDGE_CTL_DISCARD | 1003 PCI_BRIDGE_CTL_SEC_DISCARD | 1004 PCI_BRIDGE_CTL_DISCARD_SERR); 1005 /* Below does not do anything as we never set this bit, put here for 1006 * completeness. */ 1007 pci_set_word(d->w1cmask + PCI_BRIDGE_CONTROL, 1008 PCI_BRIDGE_CTL_DISCARD_STATUS); 1009 d->cmask[PCI_IO_BASE] |= PCI_IO_RANGE_TYPE_MASK; 1010 d->cmask[PCI_IO_LIMIT] |= PCI_IO_RANGE_TYPE_MASK; 1011 pci_word_test_and_set_mask(d->cmask + PCI_PREF_MEMORY_BASE, 1012 PCI_PREF_RANGE_TYPE_MASK); 1013 pci_word_test_and_set_mask(d->cmask + PCI_PREF_MEMORY_LIMIT, 1014 PCI_PREF_RANGE_TYPE_MASK); 1015 } 1016 1017 static void pci_init_multifunction(PCIBus *bus, PCIDevice *dev, Error **errp) 1018 { 1019 uint8_t slot = PCI_SLOT(dev->devfn); 1020 uint8_t func; 1021 1022 if (dev->cap_present & QEMU_PCI_CAP_MULTIFUNCTION) { 1023 dev->config[PCI_HEADER_TYPE] |= PCI_HEADER_TYPE_MULTI_FUNCTION; 1024 } 1025 1026 /* 1027 * With SR/IOV and ARI, a device at function 0 need not be a multifunction 1028 * device, as it may just be a VF that ended up with function 0 in 1029 * the legacy PCI interpretation. Avoid failing in such cases: 1030 */ 1031 if (pci_is_vf(dev) && 1032 dev->exp.sriov_vf.pf->cap_present & QEMU_PCI_CAP_MULTIFUNCTION) { 1033 return; 1034 } 1035 1036 /* 1037 * multifunction bit is interpreted in two ways as follows. 1038 * - all functions must set the bit to 1. 1039 * Example: Intel X53 1040 * - function 0 must set the bit, but the rest function (> 0) 1041 * is allowed to leave the bit to 0. 1042 * Example: PIIX3(also in qemu), PIIX4(also in qemu), ICH10, 1043 * 1044 * So OS (at least Linux) checks the bit of only function 0, 1045 * and doesn't see the bit of function > 0. 1046 * 1047 * The below check allows both interpretation. 1048 */ 1049 if (PCI_FUNC(dev->devfn)) { 1050 PCIDevice *f0 = bus->devices[PCI_DEVFN(slot, 0)]; 1051 if (f0 && !(f0->cap_present & QEMU_PCI_CAP_MULTIFUNCTION)) { 1052 /* function 0 should set multifunction bit */ 1053 error_setg(errp, "PCI: single function device can't be populated " 1054 "in function %x.%x", slot, PCI_FUNC(dev->devfn)); 1055 return; 1056 } 1057 return; 1058 } 1059 1060 if (dev->cap_present & QEMU_PCI_CAP_MULTIFUNCTION) { 1061 return; 1062 } 1063 /* function 0 indicates single function, so function > 0 must be NULL */ 1064 for (func = 1; func < PCI_FUNC_MAX; ++func) { 1065 if (bus->devices[PCI_DEVFN(slot, func)]) { 1066 error_setg(errp, "PCI: %x.0 indicates single function, " 1067 "but %x.%x is already populated.", 1068 slot, slot, func); 1069 return; 1070 } 1071 } 1072 } 1073 1074 static void pci_config_alloc(PCIDevice *pci_dev) 1075 { 1076 int config_size = pci_config_size(pci_dev); 1077 1078 pci_dev->config = g_malloc0(config_size); 1079 pci_dev->cmask = g_malloc0(config_size); 1080 pci_dev->wmask = g_malloc0(config_size); 1081 pci_dev->w1cmask = g_malloc0(config_size); 1082 pci_dev->used = g_malloc0(config_size); 1083 } 1084 1085 static void pci_config_free(PCIDevice *pci_dev) 1086 { 1087 g_free(pci_dev->config); 1088 g_free(pci_dev->cmask); 1089 g_free(pci_dev->wmask); 1090 g_free(pci_dev->w1cmask); 1091 g_free(pci_dev->used); 1092 } 1093 1094 static void do_pci_unregister_device(PCIDevice *pci_dev) 1095 { 1096 pci_get_bus(pci_dev)->devices[pci_dev->devfn] = NULL; 1097 pci_config_free(pci_dev); 1098 1099 if (xen_mode == XEN_EMULATE) { 1100 xen_evtchn_remove_pci_device(pci_dev); 1101 } 1102 if (memory_region_is_mapped(&pci_dev->bus_master_enable_region)) { 1103 memory_region_del_subregion(&pci_dev->bus_master_container_region, 1104 &pci_dev->bus_master_enable_region); 1105 } 1106 address_space_destroy(&pci_dev->bus_master_as); 1107 } 1108 1109 /* Extract PCIReqIDCache into BDF format */ 1110 static uint16_t pci_req_id_cache_extract(PCIReqIDCache *cache) 1111 { 1112 uint8_t bus_n; 1113 uint16_t result; 1114 1115 switch (cache->type) { 1116 case PCI_REQ_ID_BDF: 1117 result = pci_get_bdf(cache->dev); 1118 break; 1119 case PCI_REQ_ID_SECONDARY_BUS: 1120 bus_n = pci_dev_bus_num(cache->dev); 1121 result = PCI_BUILD_BDF(bus_n, 0); 1122 break; 1123 default: 1124 error_report("Invalid PCI requester ID cache type: %d", 1125 cache->type); 1126 exit(1); 1127 break; 1128 } 1129 1130 return result; 1131 } 1132 1133 /* Parse bridges up to the root complex and return requester ID 1134 * cache for specific device. For full PCIe topology, the cache 1135 * result would be exactly the same as getting BDF of the device. 1136 * However, several tricks are required when system mixed up with 1137 * legacy PCI devices and PCIe-to-PCI bridges. 1138 * 1139 * Here we cache the proxy device (and type) not requester ID since 1140 * bus number might change from time to time. 1141 */ 1142 static PCIReqIDCache pci_req_id_cache_get(PCIDevice *dev) 1143 { 1144 PCIDevice *parent; 1145 PCIReqIDCache cache = { 1146 .dev = dev, 1147 .type = PCI_REQ_ID_BDF, 1148 }; 1149 1150 while (!pci_bus_is_root(pci_get_bus(dev))) { 1151 /* We are under PCI/PCIe bridges */ 1152 parent = pci_get_bus(dev)->parent_dev; 1153 if (pci_is_express(parent)) { 1154 if (pcie_cap_get_type(parent) == PCI_EXP_TYPE_PCI_BRIDGE) { 1155 /* When we pass through PCIe-to-PCI/PCIX bridges, we 1156 * override the requester ID using secondary bus 1157 * number of parent bridge with zeroed devfn 1158 * (pcie-to-pci bridge spec chap 2.3). */ 1159 cache.type = PCI_REQ_ID_SECONDARY_BUS; 1160 cache.dev = dev; 1161 } 1162 } else { 1163 /* Legacy PCI, override requester ID with the bridge's 1164 * BDF upstream. When the root complex connects to 1165 * legacy PCI devices (including buses), it can only 1166 * obtain requester ID info from directly attached 1167 * devices. If devices are attached under bridges, only 1168 * the requester ID of the bridge that is directly 1169 * attached to the root complex can be recognized. */ 1170 cache.type = PCI_REQ_ID_BDF; 1171 cache.dev = parent; 1172 } 1173 dev = parent; 1174 } 1175 1176 return cache; 1177 } 1178 1179 uint16_t pci_requester_id(PCIDevice *dev) 1180 { 1181 return pci_req_id_cache_extract(&dev->requester_id_cache); 1182 } 1183 1184 static bool pci_bus_devfn_available(PCIBus *bus, int devfn) 1185 { 1186 return !(bus->devices[devfn]); 1187 } 1188 1189 static bool pci_bus_devfn_reserved(PCIBus *bus, int devfn) 1190 { 1191 return bus->slot_reserved_mask & (1UL << PCI_SLOT(devfn)); 1192 } 1193 1194 uint32_t pci_bus_get_slot_reserved_mask(PCIBus *bus) 1195 { 1196 return bus->slot_reserved_mask; 1197 } 1198 1199 void pci_bus_set_slot_reserved_mask(PCIBus *bus, uint32_t mask) 1200 { 1201 bus->slot_reserved_mask |= mask; 1202 } 1203 1204 void pci_bus_clear_slot_reserved_mask(PCIBus *bus, uint32_t mask) 1205 { 1206 bus->slot_reserved_mask &= ~mask; 1207 } 1208 1209 /* -1 for devfn means auto assign */ 1210 static PCIDevice *do_pci_register_device(PCIDevice *pci_dev, 1211 const char *name, int devfn, 1212 Error **errp) 1213 { 1214 PCIDeviceClass *pc = PCI_DEVICE_GET_CLASS(pci_dev); 1215 PCIConfigReadFunc *config_read = pc->config_read; 1216 PCIConfigWriteFunc *config_write = pc->config_write; 1217 Error *local_err = NULL; 1218 DeviceState *dev = DEVICE(pci_dev); 1219 PCIBus *bus = pci_get_bus(pci_dev); 1220 bool is_bridge = IS_PCI_BRIDGE(pci_dev); 1221 1222 /* Only pci bridges can be attached to extra PCI root buses */ 1223 if (pci_bus_is_root(bus) && bus->parent_dev && !is_bridge) { 1224 error_setg(errp, 1225 "PCI: Only PCI/PCIe bridges can be plugged into %s", 1226 bus->parent_dev->name); 1227 return NULL; 1228 } 1229 1230 if (devfn < 0) { 1231 for(devfn = bus->devfn_min ; devfn < ARRAY_SIZE(bus->devices); 1232 devfn += PCI_FUNC_MAX) { 1233 if (pci_bus_devfn_available(bus, devfn) && 1234 !pci_bus_devfn_reserved(bus, devfn)) { 1235 goto found; 1236 } 1237 } 1238 error_setg(errp, "PCI: no slot/function available for %s, all in use " 1239 "or reserved", name); 1240 return NULL; 1241 found: ; 1242 } else if (pci_bus_devfn_reserved(bus, devfn)) { 1243 error_setg(errp, "PCI: slot %d function %d not available for %s," 1244 " reserved", 1245 PCI_SLOT(devfn), PCI_FUNC(devfn), name); 1246 return NULL; 1247 } else if (!pci_bus_devfn_available(bus, devfn)) { 1248 error_setg(errp, "PCI: slot %d function %d not available for %s," 1249 " in use by %s,id=%s", 1250 PCI_SLOT(devfn), PCI_FUNC(devfn), name, 1251 bus->devices[devfn]->name, bus->devices[devfn]->qdev.id); 1252 return NULL; 1253 } 1254 1255 /* 1256 * Populating function 0 triggers a scan from the guest that 1257 * exposes other non-zero functions. Hence we need to ensure that 1258 * function 0 wasn't added yet. 1259 */ 1260 if (dev->hotplugged && !pci_is_vf(pci_dev) && 1261 pci_get_function_0(pci_dev)) { 1262 error_setg(errp, "PCI: slot %d function 0 already occupied by %s," 1263 " new func %s cannot be exposed to guest.", 1264 PCI_SLOT(pci_get_function_0(pci_dev)->devfn), 1265 pci_get_function_0(pci_dev)->name, 1266 name); 1267 1268 return NULL; 1269 } 1270 1271 pci_dev->devfn = devfn; 1272 pci_dev->requester_id_cache = pci_req_id_cache_get(pci_dev); 1273 pstrcpy(pci_dev->name, sizeof(pci_dev->name), name); 1274 1275 memory_region_init(&pci_dev->bus_master_container_region, OBJECT(pci_dev), 1276 "bus master container", UINT64_MAX); 1277 address_space_init(&pci_dev->bus_master_as, 1278 &pci_dev->bus_master_container_region, pci_dev->name); 1279 pci_dev->bus_master_as.max_bounce_buffer_size = 1280 pci_dev->max_bounce_buffer_size; 1281 1282 if (phase_check(PHASE_MACHINE_READY)) { 1283 pci_init_bus_master(pci_dev); 1284 } 1285 pci_dev->irq_state = 0; 1286 pci_config_alloc(pci_dev); 1287 1288 pci_config_set_vendor_id(pci_dev->config, pc->vendor_id); 1289 pci_config_set_device_id(pci_dev->config, pc->device_id); 1290 pci_config_set_revision(pci_dev->config, pc->revision); 1291 pci_config_set_class(pci_dev->config, pc->class_id); 1292 1293 if (!is_bridge) { 1294 if (pc->subsystem_vendor_id || pc->subsystem_id) { 1295 pci_set_word(pci_dev->config + PCI_SUBSYSTEM_VENDOR_ID, 1296 pc->subsystem_vendor_id); 1297 pci_set_word(pci_dev->config + PCI_SUBSYSTEM_ID, 1298 pc->subsystem_id); 1299 } else { 1300 pci_set_default_subsystem_id(pci_dev); 1301 } 1302 } else { 1303 /* subsystem_vendor_id/subsystem_id are only for header type 0 */ 1304 assert(!pc->subsystem_vendor_id); 1305 assert(!pc->subsystem_id); 1306 } 1307 pci_init_cmask(pci_dev); 1308 pci_init_wmask(pci_dev); 1309 pci_init_w1cmask(pci_dev); 1310 if (is_bridge) { 1311 pci_init_mask_bridge(pci_dev); 1312 } 1313 pci_init_multifunction(bus, pci_dev, &local_err); 1314 if (local_err) { 1315 error_propagate(errp, local_err); 1316 do_pci_unregister_device(pci_dev); 1317 return NULL; 1318 } 1319 1320 if (!config_read) 1321 config_read = pci_default_read_config; 1322 if (!config_write) 1323 config_write = pci_default_write_config; 1324 pci_dev->config_read = config_read; 1325 pci_dev->config_write = config_write; 1326 bus->devices[devfn] = pci_dev; 1327 pci_dev->version_id = 2; /* Current pci device vmstate version */ 1328 return pci_dev; 1329 } 1330 1331 static void pci_unregister_io_regions(PCIDevice *pci_dev) 1332 { 1333 PCIIORegion *r; 1334 int i; 1335 1336 for(i = 0; i < PCI_NUM_REGIONS; i++) { 1337 r = &pci_dev->io_regions[i]; 1338 if (!r->size || r->addr == PCI_BAR_UNMAPPED) 1339 continue; 1340 memory_region_del_subregion(r->address_space, r->memory); 1341 } 1342 1343 pci_unregister_vga(pci_dev); 1344 } 1345 1346 static void pci_qdev_unrealize(DeviceState *dev) 1347 { 1348 PCIDevice *pci_dev = PCI_DEVICE(dev); 1349 PCIDeviceClass *pc = PCI_DEVICE_GET_CLASS(pci_dev); 1350 1351 pci_unregister_io_regions(pci_dev); 1352 pci_del_option_rom(pci_dev); 1353 1354 if (pc->exit) { 1355 pc->exit(pci_dev); 1356 } 1357 1358 pci_device_deassert_intx(pci_dev); 1359 do_pci_unregister_device(pci_dev); 1360 1361 pci_dev->msi_trigger = NULL; 1362 1363 /* 1364 * clean up acpi-index so it could reused by another device 1365 */ 1366 if (pci_dev->acpi_index) { 1367 GSequence *used_indexes = pci_acpi_index_list(); 1368 1369 g_sequence_remove(g_sequence_lookup(used_indexes, 1370 GINT_TO_POINTER(pci_dev->acpi_index), 1371 g_cmp_uint32, NULL)); 1372 } 1373 } 1374 1375 void pci_register_bar(PCIDevice *pci_dev, int region_num, 1376 uint8_t type, MemoryRegion *memory) 1377 { 1378 PCIIORegion *r; 1379 uint32_t addr; /* offset in pci config space */ 1380 uint64_t wmask; 1381 pcibus_t size = memory_region_size(memory); 1382 uint8_t hdr_type; 1383 1384 assert(!pci_is_vf(pci_dev)); /* VFs must use pcie_sriov_vf_register_bar */ 1385 assert(region_num >= 0); 1386 assert(region_num < PCI_NUM_REGIONS); 1387 assert(is_power_of_2(size)); 1388 1389 /* A PCI bridge device (with Type 1 header) may only have at most 2 BARs */ 1390 hdr_type = 1391 pci_dev->config[PCI_HEADER_TYPE] & ~PCI_HEADER_TYPE_MULTI_FUNCTION; 1392 assert(hdr_type != PCI_HEADER_TYPE_BRIDGE || region_num < 2); 1393 1394 r = &pci_dev->io_regions[region_num]; 1395 r->addr = PCI_BAR_UNMAPPED; 1396 r->size = size; 1397 r->type = type; 1398 r->memory = memory; 1399 r->address_space = type & PCI_BASE_ADDRESS_SPACE_IO 1400 ? pci_get_bus(pci_dev)->address_space_io 1401 : pci_get_bus(pci_dev)->address_space_mem; 1402 1403 wmask = ~(size - 1); 1404 if (region_num == PCI_ROM_SLOT) { 1405 /* ROM enable bit is writable */ 1406 wmask |= PCI_ROM_ADDRESS_ENABLE; 1407 } 1408 1409 addr = pci_bar(pci_dev, region_num); 1410 pci_set_long(pci_dev->config + addr, type); 1411 1412 if (!(r->type & PCI_BASE_ADDRESS_SPACE_IO) && 1413 r->type & PCI_BASE_ADDRESS_MEM_TYPE_64) { 1414 pci_set_quad(pci_dev->wmask + addr, wmask); 1415 pci_set_quad(pci_dev->cmask + addr, ~0ULL); 1416 } else { 1417 pci_set_long(pci_dev->wmask + addr, wmask & 0xffffffff); 1418 pci_set_long(pci_dev->cmask + addr, 0xffffffff); 1419 } 1420 } 1421 1422 static void pci_update_vga(PCIDevice *pci_dev) 1423 { 1424 uint16_t cmd; 1425 1426 if (!pci_dev->has_vga) { 1427 return; 1428 } 1429 1430 cmd = pci_get_word(pci_dev->config + PCI_COMMAND); 1431 1432 memory_region_set_enabled(pci_dev->vga_regions[QEMU_PCI_VGA_MEM], 1433 cmd & PCI_COMMAND_MEMORY); 1434 memory_region_set_enabled(pci_dev->vga_regions[QEMU_PCI_VGA_IO_LO], 1435 cmd & PCI_COMMAND_IO); 1436 memory_region_set_enabled(pci_dev->vga_regions[QEMU_PCI_VGA_IO_HI], 1437 cmd & PCI_COMMAND_IO); 1438 } 1439 1440 void pci_register_vga(PCIDevice *pci_dev, MemoryRegion *mem, 1441 MemoryRegion *io_lo, MemoryRegion *io_hi) 1442 { 1443 PCIBus *bus = pci_get_bus(pci_dev); 1444 1445 assert(!pci_dev->has_vga); 1446 1447 assert(memory_region_size(mem) == QEMU_PCI_VGA_MEM_SIZE); 1448 pci_dev->vga_regions[QEMU_PCI_VGA_MEM] = mem; 1449 memory_region_add_subregion_overlap(bus->address_space_mem, 1450 QEMU_PCI_VGA_MEM_BASE, mem, 1); 1451 1452 assert(memory_region_size(io_lo) == QEMU_PCI_VGA_IO_LO_SIZE); 1453 pci_dev->vga_regions[QEMU_PCI_VGA_IO_LO] = io_lo; 1454 memory_region_add_subregion_overlap(bus->address_space_io, 1455 QEMU_PCI_VGA_IO_LO_BASE, io_lo, 1); 1456 1457 assert(memory_region_size(io_hi) == QEMU_PCI_VGA_IO_HI_SIZE); 1458 pci_dev->vga_regions[QEMU_PCI_VGA_IO_HI] = io_hi; 1459 memory_region_add_subregion_overlap(bus->address_space_io, 1460 QEMU_PCI_VGA_IO_HI_BASE, io_hi, 1); 1461 pci_dev->has_vga = true; 1462 1463 pci_update_vga(pci_dev); 1464 } 1465 1466 void pci_unregister_vga(PCIDevice *pci_dev) 1467 { 1468 PCIBus *bus = pci_get_bus(pci_dev); 1469 1470 if (!pci_dev->has_vga) { 1471 return; 1472 } 1473 1474 memory_region_del_subregion(bus->address_space_mem, 1475 pci_dev->vga_regions[QEMU_PCI_VGA_MEM]); 1476 memory_region_del_subregion(bus->address_space_io, 1477 pci_dev->vga_regions[QEMU_PCI_VGA_IO_LO]); 1478 memory_region_del_subregion(bus->address_space_io, 1479 pci_dev->vga_regions[QEMU_PCI_VGA_IO_HI]); 1480 pci_dev->has_vga = false; 1481 } 1482 1483 pcibus_t pci_get_bar_addr(PCIDevice *pci_dev, int region_num) 1484 { 1485 return pci_dev->io_regions[region_num].addr; 1486 } 1487 1488 static pcibus_t pci_config_get_bar_addr(PCIDevice *d, int reg, 1489 uint8_t type, pcibus_t size) 1490 { 1491 pcibus_t new_addr; 1492 if (!pci_is_vf(d)) { 1493 int bar = pci_bar(d, reg); 1494 if (type & PCI_BASE_ADDRESS_MEM_TYPE_64) { 1495 new_addr = pci_get_quad(d->config + bar); 1496 } else { 1497 new_addr = pci_get_long(d->config + bar); 1498 } 1499 } else { 1500 PCIDevice *pf = d->exp.sriov_vf.pf; 1501 uint16_t sriov_cap = pf->exp.sriov_cap; 1502 int bar = sriov_cap + PCI_SRIOV_BAR + reg * 4; 1503 uint16_t vf_offset = 1504 pci_get_word(pf->config + sriov_cap + PCI_SRIOV_VF_OFFSET); 1505 uint16_t vf_stride = 1506 pci_get_word(pf->config + sriov_cap + PCI_SRIOV_VF_STRIDE); 1507 uint32_t vf_num = (d->devfn - (pf->devfn + vf_offset)) / vf_stride; 1508 1509 if (type & PCI_BASE_ADDRESS_MEM_TYPE_64) { 1510 new_addr = pci_get_quad(pf->config + bar); 1511 } else { 1512 new_addr = pci_get_long(pf->config + bar); 1513 } 1514 new_addr += vf_num * size; 1515 } 1516 /* The ROM slot has a specific enable bit, keep it intact */ 1517 if (reg != PCI_ROM_SLOT) { 1518 new_addr &= ~(size - 1); 1519 } 1520 return new_addr; 1521 } 1522 1523 pcibus_t pci_bar_address(PCIDevice *d, 1524 int reg, uint8_t type, pcibus_t size) 1525 { 1526 pcibus_t new_addr, last_addr; 1527 uint16_t cmd = pci_get_word(d->config + PCI_COMMAND); 1528 MachineClass *mc = MACHINE_GET_CLASS(qdev_get_machine()); 1529 bool allow_0_address = mc->pci_allow_0_address; 1530 1531 if (type & PCI_BASE_ADDRESS_SPACE_IO) { 1532 if (!(cmd & PCI_COMMAND_IO)) { 1533 return PCI_BAR_UNMAPPED; 1534 } 1535 new_addr = pci_config_get_bar_addr(d, reg, type, size); 1536 last_addr = new_addr + size - 1; 1537 /* Check if 32 bit BAR wraps around explicitly. 1538 * TODO: make priorities correct and remove this work around. 1539 */ 1540 if (last_addr <= new_addr || last_addr >= UINT32_MAX || 1541 (!allow_0_address && new_addr == 0)) { 1542 return PCI_BAR_UNMAPPED; 1543 } 1544 return new_addr; 1545 } 1546 1547 if (!(cmd & PCI_COMMAND_MEMORY)) { 1548 return PCI_BAR_UNMAPPED; 1549 } 1550 new_addr = pci_config_get_bar_addr(d, reg, type, size); 1551 /* the ROM slot has a specific enable bit */ 1552 if (reg == PCI_ROM_SLOT && !(new_addr & PCI_ROM_ADDRESS_ENABLE)) { 1553 return PCI_BAR_UNMAPPED; 1554 } 1555 new_addr &= ~(size - 1); 1556 last_addr = new_addr + size - 1; 1557 /* NOTE: we do not support wrapping */ 1558 /* XXX: as we cannot support really dynamic 1559 mappings, we handle specific values as invalid 1560 mappings. */ 1561 if (last_addr <= new_addr || last_addr == PCI_BAR_UNMAPPED || 1562 (!allow_0_address && new_addr == 0)) { 1563 return PCI_BAR_UNMAPPED; 1564 } 1565 1566 /* Now pcibus_t is 64bit. 1567 * Check if 32 bit BAR wraps around explicitly. 1568 * Without this, PC ide doesn't work well. 1569 * TODO: remove this work around. 1570 */ 1571 if (!(type & PCI_BASE_ADDRESS_MEM_TYPE_64) && last_addr >= UINT32_MAX) { 1572 return PCI_BAR_UNMAPPED; 1573 } 1574 1575 /* 1576 * OS is allowed to set BAR beyond its addressable 1577 * bits. For example, 32 bit OS can set 64bit bar 1578 * to >4G. Check it. TODO: we might need to support 1579 * it in the future for e.g. PAE. 1580 */ 1581 if (last_addr >= HWADDR_MAX) { 1582 return PCI_BAR_UNMAPPED; 1583 } 1584 1585 return new_addr; 1586 } 1587 1588 static void pci_update_mappings(PCIDevice *d) 1589 { 1590 PCIIORegion *r; 1591 int i; 1592 pcibus_t new_addr; 1593 1594 for(i = 0; i < PCI_NUM_REGIONS; i++) { 1595 r = &d->io_regions[i]; 1596 1597 /* this region isn't registered */ 1598 if (!r->size) 1599 continue; 1600 1601 new_addr = pci_bar_address(d, i, r->type, r->size); 1602 if (!d->has_power) { 1603 new_addr = PCI_BAR_UNMAPPED; 1604 } 1605 1606 /* This bar isn't changed */ 1607 if (new_addr == r->addr) 1608 continue; 1609 1610 /* now do the real mapping */ 1611 if (r->addr != PCI_BAR_UNMAPPED) { 1612 trace_pci_update_mappings_del(d->name, pci_dev_bus_num(d), 1613 PCI_SLOT(d->devfn), 1614 PCI_FUNC(d->devfn), 1615 i, r->addr, r->size); 1616 memory_region_del_subregion(r->address_space, r->memory); 1617 } 1618 r->addr = new_addr; 1619 if (r->addr != PCI_BAR_UNMAPPED) { 1620 trace_pci_update_mappings_add(d->name, pci_dev_bus_num(d), 1621 PCI_SLOT(d->devfn), 1622 PCI_FUNC(d->devfn), 1623 i, r->addr, r->size); 1624 memory_region_add_subregion_overlap(r->address_space, 1625 r->addr, r->memory, 1); 1626 } 1627 } 1628 1629 pci_update_vga(d); 1630 } 1631 1632 static inline int pci_irq_disabled(PCIDevice *d) 1633 { 1634 return pci_get_word(d->config + PCI_COMMAND) & PCI_COMMAND_INTX_DISABLE; 1635 } 1636 1637 /* Called after interrupt disabled field update in config space, 1638 * assert/deassert interrupts if necessary. 1639 * Gets original interrupt disable bit value (before update). */ 1640 static void pci_update_irq_disabled(PCIDevice *d, int was_irq_disabled) 1641 { 1642 int i, disabled = pci_irq_disabled(d); 1643 if (disabled == was_irq_disabled) 1644 return; 1645 for (i = 0; i < PCI_NUM_PINS; ++i) { 1646 int state = pci_irq_state(d, i); 1647 pci_change_irq_level(d, i, disabled ? -state : state); 1648 } 1649 } 1650 1651 uint32_t pci_default_read_config(PCIDevice *d, 1652 uint32_t address, int len) 1653 { 1654 uint32_t val = 0; 1655 1656 assert(address + len <= pci_config_size(d)); 1657 1658 if (pci_is_express_downstream_port(d) && 1659 ranges_overlap(address, len, d->exp.exp_cap + PCI_EXP_LNKSTA, 2)) { 1660 pcie_sync_bridge_lnk(d); 1661 } 1662 memcpy(&val, d->config + address, len); 1663 return le32_to_cpu(val); 1664 } 1665 1666 void pci_default_write_config(PCIDevice *d, uint32_t addr, uint32_t val_in, int l) 1667 { 1668 int i, was_irq_disabled = pci_irq_disabled(d); 1669 uint32_t val = val_in; 1670 1671 assert(addr + l <= pci_config_size(d)); 1672 1673 for (i = 0; i < l; val >>= 8, ++i) { 1674 uint8_t wmask = d->wmask[addr + i]; 1675 uint8_t w1cmask = d->w1cmask[addr + i]; 1676 assert(!(wmask & w1cmask)); 1677 d->config[addr + i] = (d->config[addr + i] & ~wmask) | (val & wmask); 1678 d->config[addr + i] &= ~(val & w1cmask); /* W1C: Write 1 to Clear */ 1679 } 1680 if (ranges_overlap(addr, l, PCI_BASE_ADDRESS_0, 24) || 1681 ranges_overlap(addr, l, PCI_ROM_ADDRESS, 4) || 1682 ranges_overlap(addr, l, PCI_ROM_ADDRESS1, 4) || 1683 range_covers_byte(addr, l, PCI_COMMAND)) 1684 pci_update_mappings(d); 1685 1686 if (ranges_overlap(addr, l, PCI_COMMAND, 2)) { 1687 pci_update_irq_disabled(d, was_irq_disabled); 1688 memory_region_set_enabled(&d->bus_master_enable_region, 1689 (pci_get_word(d->config + PCI_COMMAND) 1690 & PCI_COMMAND_MASTER) && d->has_power); 1691 } 1692 1693 msi_write_config(d, addr, val_in, l); 1694 msix_write_config(d, addr, val_in, l); 1695 pcie_sriov_config_write(d, addr, val_in, l); 1696 } 1697 1698 /***********************************************************/ 1699 /* generic PCI irq support */ 1700 1701 /* 0 <= irq_num <= 3. level must be 0 or 1 */ 1702 static void pci_irq_handler(void *opaque, int irq_num, int level) 1703 { 1704 PCIDevice *pci_dev = opaque; 1705 int change; 1706 1707 assert(0 <= irq_num && irq_num < PCI_NUM_PINS); 1708 assert(level == 0 || level == 1); 1709 change = level - pci_irq_state(pci_dev, irq_num); 1710 if (!change) 1711 return; 1712 1713 pci_set_irq_state(pci_dev, irq_num, level); 1714 pci_update_irq_status(pci_dev); 1715 if (pci_irq_disabled(pci_dev)) 1716 return; 1717 pci_change_irq_level(pci_dev, irq_num, change); 1718 } 1719 1720 qemu_irq pci_allocate_irq(PCIDevice *pci_dev) 1721 { 1722 int intx = pci_intx(pci_dev); 1723 assert(0 <= intx && intx < PCI_NUM_PINS); 1724 1725 return qemu_allocate_irq(pci_irq_handler, pci_dev, intx); 1726 } 1727 1728 void pci_set_irq(PCIDevice *pci_dev, int level) 1729 { 1730 int intx = pci_intx(pci_dev); 1731 pci_irq_handler(pci_dev, intx, level); 1732 } 1733 1734 /* Special hooks used by device assignment */ 1735 void pci_bus_set_route_irq_fn(PCIBus *bus, pci_route_irq_fn route_intx_to_irq) 1736 { 1737 assert(pci_bus_is_root(bus)); 1738 bus->route_intx_to_irq = route_intx_to_irq; 1739 } 1740 1741 PCIINTxRoute pci_device_route_intx_to_irq(PCIDevice *dev, int pin) 1742 { 1743 PCIBus *bus; 1744 1745 do { 1746 int dev_irq = pin; 1747 bus = pci_get_bus(dev); 1748 pin = bus->map_irq(dev, pin); 1749 trace_pci_route_irq(dev_irq, DEVICE(dev)->canonical_path, pin, 1750 pci_bus_is_root(bus) ? "root-complex" 1751 : DEVICE(bus->parent_dev)->canonical_path); 1752 dev = bus->parent_dev; 1753 } while (dev); 1754 1755 if (!bus->route_intx_to_irq) { 1756 error_report("PCI: Bug - unimplemented PCI INTx routing (%s)", 1757 object_get_typename(OBJECT(bus->qbus.parent))); 1758 return (PCIINTxRoute) { PCI_INTX_DISABLED, -1 }; 1759 } 1760 1761 return bus->route_intx_to_irq(bus->irq_opaque, pin); 1762 } 1763 1764 bool pci_intx_route_changed(PCIINTxRoute *old, PCIINTxRoute *new) 1765 { 1766 return old->mode != new->mode || old->irq != new->irq; 1767 } 1768 1769 void pci_bus_fire_intx_routing_notifier(PCIBus *bus) 1770 { 1771 PCIDevice *dev; 1772 PCIBus *sec; 1773 int i; 1774 1775 for (i = 0; i < ARRAY_SIZE(bus->devices); ++i) { 1776 dev = bus->devices[i]; 1777 if (dev && dev->intx_routing_notifier) { 1778 dev->intx_routing_notifier(dev); 1779 } 1780 } 1781 1782 QLIST_FOREACH(sec, &bus->child, sibling) { 1783 pci_bus_fire_intx_routing_notifier(sec); 1784 } 1785 } 1786 1787 void pci_device_set_intx_routing_notifier(PCIDevice *dev, 1788 PCIINTxRoutingNotifier notifier) 1789 { 1790 dev->intx_routing_notifier = notifier; 1791 } 1792 1793 /* 1794 * PCI-to-PCI bridge specification 1795 * 9.1: Interrupt routing. Table 9-1 1796 * 1797 * the PCI Express Base Specification, Revision 2.1 1798 * 2.2.8.1: INTx interrupt signaling - Rules 1799 * the Implementation Note 1800 * Table 2-20 1801 */ 1802 /* 1803 * 0 <= pin <= 3 0 = INTA, 1 = INTB, 2 = INTC, 3 = INTD 1804 * 0-origin unlike PCI interrupt pin register. 1805 */ 1806 int pci_swizzle_map_irq_fn(PCIDevice *pci_dev, int pin) 1807 { 1808 return pci_swizzle(PCI_SLOT(pci_dev->devfn), pin); 1809 } 1810 1811 /***********************************************************/ 1812 /* monitor info on PCI */ 1813 1814 static const pci_class_desc pci_class_descriptions[] = 1815 { 1816 { 0x0001, "VGA controller", "display"}, 1817 { 0x0100, "SCSI controller", "scsi"}, 1818 { 0x0101, "IDE controller", "ide"}, 1819 { 0x0102, "Floppy controller", "fdc"}, 1820 { 0x0103, "IPI controller", "ipi"}, 1821 { 0x0104, "RAID controller", "raid"}, 1822 { 0x0106, "SATA controller"}, 1823 { 0x0107, "SAS controller"}, 1824 { 0x0180, "Storage controller"}, 1825 { 0x0200, "Ethernet controller", "ethernet"}, 1826 { 0x0201, "Token Ring controller", "token-ring"}, 1827 { 0x0202, "FDDI controller", "fddi"}, 1828 { 0x0203, "ATM controller", "atm"}, 1829 { 0x0280, "Network controller"}, 1830 { 0x0300, "VGA controller", "display", 0x00ff}, 1831 { 0x0301, "XGA controller"}, 1832 { 0x0302, "3D controller"}, 1833 { 0x0380, "Display controller"}, 1834 { 0x0400, "Video controller", "video"}, 1835 { 0x0401, "Audio controller", "sound"}, 1836 { 0x0402, "Phone"}, 1837 { 0x0403, "Audio controller", "sound"}, 1838 { 0x0480, "Multimedia controller"}, 1839 { 0x0500, "RAM controller", "memory"}, 1840 { 0x0501, "Flash controller", "flash"}, 1841 { 0x0580, "Memory controller"}, 1842 { 0x0600, "Host bridge", "host"}, 1843 { 0x0601, "ISA bridge", "isa"}, 1844 { 0x0602, "EISA bridge", "eisa"}, 1845 { 0x0603, "MC bridge", "mca"}, 1846 { 0x0604, "PCI bridge", "pci-bridge"}, 1847 { 0x0605, "PCMCIA bridge", "pcmcia"}, 1848 { 0x0606, "NUBUS bridge", "nubus"}, 1849 { 0x0607, "CARDBUS bridge", "cardbus"}, 1850 { 0x0608, "RACEWAY bridge"}, 1851 { 0x0680, "Bridge"}, 1852 { 0x0700, "Serial port", "serial"}, 1853 { 0x0701, "Parallel port", "parallel"}, 1854 { 0x0800, "Interrupt controller", "interrupt-controller"}, 1855 { 0x0801, "DMA controller", "dma-controller"}, 1856 { 0x0802, "Timer", "timer"}, 1857 { 0x0803, "RTC", "rtc"}, 1858 { 0x0900, "Keyboard", "keyboard"}, 1859 { 0x0901, "Pen", "pen"}, 1860 { 0x0902, "Mouse", "mouse"}, 1861 { 0x0A00, "Dock station", "dock", 0x00ff}, 1862 { 0x0B00, "i386 cpu", "cpu", 0x00ff}, 1863 { 0x0c00, "Firewire controller", "firewire"}, 1864 { 0x0c01, "Access bus controller", "access-bus"}, 1865 { 0x0c02, "SSA controller", "ssa"}, 1866 { 0x0c03, "USB controller", "usb"}, 1867 { 0x0c04, "Fibre channel controller", "fibre-channel"}, 1868 { 0x0c05, "SMBus"}, 1869 { 0, NULL} 1870 }; 1871 1872 void pci_for_each_device_under_bus_reverse(PCIBus *bus, 1873 pci_bus_dev_fn fn, 1874 void *opaque) 1875 { 1876 PCIDevice *d; 1877 int devfn; 1878 1879 for (devfn = 0; devfn < ARRAY_SIZE(bus->devices); devfn++) { 1880 d = bus->devices[ARRAY_SIZE(bus->devices) - 1 - devfn]; 1881 if (d) { 1882 fn(bus, d, opaque); 1883 } 1884 } 1885 } 1886 1887 void pci_for_each_device_reverse(PCIBus *bus, int bus_num, 1888 pci_bus_dev_fn fn, void *opaque) 1889 { 1890 bus = pci_find_bus_nr(bus, bus_num); 1891 1892 if (bus) { 1893 pci_for_each_device_under_bus_reverse(bus, fn, opaque); 1894 } 1895 } 1896 1897 void pci_for_each_device_under_bus(PCIBus *bus, 1898 pci_bus_dev_fn fn, void *opaque) 1899 { 1900 PCIDevice *d; 1901 int devfn; 1902 1903 for(devfn = 0; devfn < ARRAY_SIZE(bus->devices); devfn++) { 1904 d = bus->devices[devfn]; 1905 if (d) { 1906 fn(bus, d, opaque); 1907 } 1908 } 1909 } 1910 1911 void pci_for_each_device(PCIBus *bus, int bus_num, 1912 pci_bus_dev_fn fn, void *opaque) 1913 { 1914 bus = pci_find_bus_nr(bus, bus_num); 1915 1916 if (bus) { 1917 pci_for_each_device_under_bus(bus, fn, opaque); 1918 } 1919 } 1920 1921 const pci_class_desc *get_class_desc(int class) 1922 { 1923 const pci_class_desc *desc; 1924 1925 desc = pci_class_descriptions; 1926 while (desc->desc && class != desc->class) { 1927 desc++; 1928 } 1929 1930 return desc; 1931 } 1932 1933 void pci_init_nic_devices(PCIBus *bus, const char *default_model) 1934 { 1935 qemu_create_nic_bus_devices(&bus->qbus, TYPE_PCI_DEVICE, default_model, 1936 "virtio", "virtio-net-pci"); 1937 } 1938 1939 bool pci_init_nic_in_slot(PCIBus *rootbus, const char *model, 1940 const char *alias, const char *devaddr) 1941 { 1942 NICInfo *nd = qemu_find_nic_info(model, true, alias); 1943 int dom, busnr, devfn; 1944 PCIDevice *pci_dev; 1945 unsigned slot; 1946 PCIBus *bus; 1947 1948 if (!nd) { 1949 return false; 1950 } 1951 1952 if (!devaddr || pci_parse_devaddr(devaddr, &dom, &busnr, &slot, NULL) < 0) { 1953 error_report("Invalid PCI device address %s for device %s", 1954 devaddr, model); 1955 exit(1); 1956 } 1957 1958 if (dom != 0) { 1959 error_report("No support for non-zero PCI domains"); 1960 exit(1); 1961 } 1962 1963 devfn = PCI_DEVFN(slot, 0); 1964 1965 bus = pci_find_bus_nr(rootbus, busnr); 1966 if (!bus) { 1967 error_report("Invalid PCI device address %s for device %s", 1968 devaddr, model); 1969 exit(1); 1970 } 1971 1972 pci_dev = pci_new(devfn, model); 1973 qdev_set_nic_properties(&pci_dev->qdev, nd); 1974 pci_realize_and_unref(pci_dev, bus, &error_fatal); 1975 return true; 1976 } 1977 1978 PCIDevice *pci_vga_init(PCIBus *bus) 1979 { 1980 vga_interface_created = true; 1981 switch (vga_interface_type) { 1982 case VGA_CIRRUS: 1983 return pci_create_simple(bus, -1, "cirrus-vga"); 1984 case VGA_QXL: 1985 return pci_create_simple(bus, -1, "qxl-vga"); 1986 case VGA_STD: 1987 return pci_create_simple(bus, -1, "VGA"); 1988 case VGA_VMWARE: 1989 return pci_create_simple(bus, -1, "vmware-svga"); 1990 case VGA_VIRTIO: 1991 return pci_create_simple(bus, -1, "virtio-vga"); 1992 case VGA_NONE: 1993 default: /* Other non-PCI types. Checking for unsupported types is already 1994 done in vl.c. */ 1995 return NULL; 1996 } 1997 } 1998 1999 /* Whether a given bus number is in range of the secondary 2000 * bus of the given bridge device. */ 2001 static bool pci_secondary_bus_in_range(PCIDevice *dev, int bus_num) 2002 { 2003 return !(pci_get_word(dev->config + PCI_BRIDGE_CONTROL) & 2004 PCI_BRIDGE_CTL_BUS_RESET) /* Don't walk the bus if it's reset. */ && 2005 dev->config[PCI_SECONDARY_BUS] <= bus_num && 2006 bus_num <= dev->config[PCI_SUBORDINATE_BUS]; 2007 } 2008 2009 /* Whether a given bus number is in a range of a root bus */ 2010 static bool pci_root_bus_in_range(PCIBus *bus, int bus_num) 2011 { 2012 int i; 2013 2014 for (i = 0; i < ARRAY_SIZE(bus->devices); ++i) { 2015 PCIDevice *dev = bus->devices[i]; 2016 2017 if (dev && IS_PCI_BRIDGE(dev)) { 2018 if (pci_secondary_bus_in_range(dev, bus_num)) { 2019 return true; 2020 } 2021 } 2022 } 2023 2024 return false; 2025 } 2026 2027 PCIBus *pci_find_bus_nr(PCIBus *bus, int bus_num) 2028 { 2029 PCIBus *sec; 2030 2031 if (!bus) { 2032 return NULL; 2033 } 2034 2035 if (pci_bus_num(bus) == bus_num) { 2036 return bus; 2037 } 2038 2039 /* Consider all bus numbers in range for the host pci bridge. */ 2040 if (!pci_bus_is_root(bus) && 2041 !pci_secondary_bus_in_range(bus->parent_dev, bus_num)) { 2042 return NULL; 2043 } 2044 2045 /* try child bus */ 2046 for (; bus; bus = sec) { 2047 QLIST_FOREACH(sec, &bus->child, sibling) { 2048 if (pci_bus_num(sec) == bus_num) { 2049 return sec; 2050 } 2051 /* PXB buses assumed to be children of bus 0 */ 2052 if (pci_bus_is_root(sec)) { 2053 if (pci_root_bus_in_range(sec, bus_num)) { 2054 break; 2055 } 2056 } else { 2057 if (pci_secondary_bus_in_range(sec->parent_dev, bus_num)) { 2058 break; 2059 } 2060 } 2061 } 2062 } 2063 2064 return NULL; 2065 } 2066 2067 void pci_for_each_bus_depth_first(PCIBus *bus, pci_bus_ret_fn begin, 2068 pci_bus_fn end, void *parent_state) 2069 { 2070 PCIBus *sec; 2071 void *state; 2072 2073 if (!bus) { 2074 return; 2075 } 2076 2077 if (begin) { 2078 state = begin(bus, parent_state); 2079 } else { 2080 state = parent_state; 2081 } 2082 2083 QLIST_FOREACH(sec, &bus->child, sibling) { 2084 pci_for_each_bus_depth_first(sec, begin, end, state); 2085 } 2086 2087 if (end) { 2088 end(bus, state); 2089 } 2090 } 2091 2092 2093 PCIDevice *pci_find_device(PCIBus *bus, int bus_num, uint8_t devfn) 2094 { 2095 bus = pci_find_bus_nr(bus, bus_num); 2096 2097 if (!bus) 2098 return NULL; 2099 2100 return bus->devices[devfn]; 2101 } 2102 2103 #define ONBOARD_INDEX_MAX (16 * 1024 - 1) 2104 2105 static void pci_qdev_realize(DeviceState *qdev, Error **errp) 2106 { 2107 PCIDevice *pci_dev = (PCIDevice *)qdev; 2108 PCIDeviceClass *pc = PCI_DEVICE_GET_CLASS(pci_dev); 2109 ObjectClass *klass = OBJECT_CLASS(pc); 2110 Error *local_err = NULL; 2111 bool is_default_rom; 2112 uint16_t class_id; 2113 2114 /* 2115 * capped by systemd (see: udev-builtin-net_id.c) 2116 * as it's the only known user honor it to avoid users 2117 * misconfigure QEMU and then wonder why acpi-index doesn't work 2118 */ 2119 if (pci_dev->acpi_index > ONBOARD_INDEX_MAX) { 2120 error_setg(errp, "acpi-index should be less or equal to %u", 2121 ONBOARD_INDEX_MAX); 2122 return; 2123 } 2124 2125 /* 2126 * make sure that acpi-index is unique across all present PCI devices 2127 */ 2128 if (pci_dev->acpi_index) { 2129 GSequence *used_indexes = pci_acpi_index_list(); 2130 2131 if (g_sequence_lookup(used_indexes, 2132 GINT_TO_POINTER(pci_dev->acpi_index), 2133 g_cmp_uint32, NULL)) { 2134 error_setg(errp, "a PCI device with acpi-index = %" PRIu32 2135 " already exist", pci_dev->acpi_index); 2136 return; 2137 } 2138 g_sequence_insert_sorted(used_indexes, 2139 GINT_TO_POINTER(pci_dev->acpi_index), 2140 g_cmp_uint32, NULL); 2141 } 2142 2143 if (pci_dev->romsize != UINT32_MAX && !is_power_of_2(pci_dev->romsize)) { 2144 error_setg(errp, "ROM size %u is not a power of two", pci_dev->romsize); 2145 return; 2146 } 2147 2148 /* initialize cap_present for pci_is_express() and pci_config_size(), 2149 * Note that hybrid PCIs are not set automatically and need to manage 2150 * QEMU_PCI_CAP_EXPRESS manually */ 2151 if (object_class_dynamic_cast(klass, INTERFACE_PCIE_DEVICE) && 2152 !object_class_dynamic_cast(klass, INTERFACE_CONVENTIONAL_PCI_DEVICE)) { 2153 pci_dev->cap_present |= QEMU_PCI_CAP_EXPRESS; 2154 } 2155 2156 if (object_class_dynamic_cast(klass, INTERFACE_CXL_DEVICE)) { 2157 pci_dev->cap_present |= QEMU_PCIE_CAP_CXL; 2158 } 2159 2160 pci_dev = do_pci_register_device(pci_dev, 2161 object_get_typename(OBJECT(qdev)), 2162 pci_dev->devfn, errp); 2163 if (pci_dev == NULL) 2164 return; 2165 2166 if (pc->realize) { 2167 pc->realize(pci_dev, &local_err); 2168 if (local_err) { 2169 error_propagate(errp, local_err); 2170 do_pci_unregister_device(pci_dev); 2171 return; 2172 } 2173 } 2174 2175 /* 2176 * A PCIe Downstream Port that do not have ARI Forwarding enabled must 2177 * associate only Device 0 with the device attached to the bus 2178 * representing the Link from the Port (PCIe base spec rev 4.0 ver 0.3, 2179 * sec 7.3.1). 2180 * With ARI, PCI_SLOT() can return non-zero value as the traditional 2181 * 5-bit Device Number and 3-bit Function Number fields in its associated 2182 * Routing IDs, Requester IDs and Completer IDs are interpreted as a 2183 * single 8-bit Function Number. Hence, ignore ARI capable devices. 2184 */ 2185 if (pci_is_express(pci_dev) && 2186 !pcie_find_capability(pci_dev, PCI_EXT_CAP_ID_ARI) && 2187 pcie_has_upstream_port(pci_dev) && 2188 PCI_SLOT(pci_dev->devfn)) { 2189 warn_report("PCI: slot %d is not valid for %s," 2190 " parent device only allows plugging into slot 0.", 2191 PCI_SLOT(pci_dev->devfn), pci_dev->name); 2192 } 2193 2194 if (pci_dev->failover_pair_id) { 2195 if (!pci_bus_is_express(pci_get_bus(pci_dev))) { 2196 error_setg(errp, "failover primary device must be on " 2197 "PCIExpress bus"); 2198 pci_qdev_unrealize(DEVICE(pci_dev)); 2199 return; 2200 } 2201 class_id = pci_get_word(pci_dev->config + PCI_CLASS_DEVICE); 2202 if (class_id != PCI_CLASS_NETWORK_ETHERNET) { 2203 error_setg(errp, "failover primary device is not an " 2204 "Ethernet device"); 2205 pci_qdev_unrealize(DEVICE(pci_dev)); 2206 return; 2207 } 2208 if ((pci_dev->cap_present & QEMU_PCI_CAP_MULTIFUNCTION) 2209 || (PCI_FUNC(pci_dev->devfn) != 0)) { 2210 error_setg(errp, "failover: primary device must be in its own " 2211 "PCI slot"); 2212 pci_qdev_unrealize(DEVICE(pci_dev)); 2213 return; 2214 } 2215 qdev->allow_unplug_during_migration = true; 2216 } 2217 2218 /* rom loading */ 2219 is_default_rom = false; 2220 if (pci_dev->romfile == NULL && pc->romfile != NULL) { 2221 pci_dev->romfile = g_strdup(pc->romfile); 2222 is_default_rom = true; 2223 } 2224 2225 pci_add_option_rom(pci_dev, is_default_rom, &local_err); 2226 if (local_err) { 2227 error_propagate(errp, local_err); 2228 pci_qdev_unrealize(DEVICE(pci_dev)); 2229 return; 2230 } 2231 2232 pci_set_power(pci_dev, true); 2233 2234 pci_dev->msi_trigger = pci_msi_trigger; 2235 } 2236 2237 static PCIDevice *pci_new_internal(int devfn, bool multifunction, 2238 const char *name) 2239 { 2240 DeviceState *dev; 2241 2242 dev = qdev_new(name); 2243 qdev_prop_set_int32(dev, "addr", devfn); 2244 qdev_prop_set_bit(dev, "multifunction", multifunction); 2245 return PCI_DEVICE(dev); 2246 } 2247 2248 PCIDevice *pci_new_multifunction(int devfn, const char *name) 2249 { 2250 return pci_new_internal(devfn, true, name); 2251 } 2252 2253 PCIDevice *pci_new(int devfn, const char *name) 2254 { 2255 return pci_new_internal(devfn, false, name); 2256 } 2257 2258 bool pci_realize_and_unref(PCIDevice *dev, PCIBus *bus, Error **errp) 2259 { 2260 return qdev_realize_and_unref(&dev->qdev, &bus->qbus, errp); 2261 } 2262 2263 PCIDevice *pci_create_simple_multifunction(PCIBus *bus, int devfn, 2264 const char *name) 2265 { 2266 PCIDevice *dev = pci_new_multifunction(devfn, name); 2267 pci_realize_and_unref(dev, bus, &error_fatal); 2268 return dev; 2269 } 2270 2271 PCIDevice *pci_create_simple(PCIBus *bus, int devfn, const char *name) 2272 { 2273 PCIDevice *dev = pci_new(devfn, name); 2274 pci_realize_and_unref(dev, bus, &error_fatal); 2275 return dev; 2276 } 2277 2278 static uint8_t pci_find_space(PCIDevice *pdev, uint8_t size) 2279 { 2280 int offset = PCI_CONFIG_HEADER_SIZE; 2281 int i; 2282 for (i = PCI_CONFIG_HEADER_SIZE; i < PCI_CONFIG_SPACE_SIZE; ++i) { 2283 if (pdev->used[i]) 2284 offset = i + 1; 2285 else if (i - offset + 1 == size) 2286 return offset; 2287 } 2288 return 0; 2289 } 2290 2291 static uint8_t pci_find_capability_list(PCIDevice *pdev, uint8_t cap_id, 2292 uint8_t *prev_p) 2293 { 2294 uint8_t next, prev; 2295 2296 if (!(pdev->config[PCI_STATUS] & PCI_STATUS_CAP_LIST)) 2297 return 0; 2298 2299 for (prev = PCI_CAPABILITY_LIST; (next = pdev->config[prev]); 2300 prev = next + PCI_CAP_LIST_NEXT) 2301 if (pdev->config[next + PCI_CAP_LIST_ID] == cap_id) 2302 break; 2303 2304 if (prev_p) 2305 *prev_p = prev; 2306 return next; 2307 } 2308 2309 static uint8_t pci_find_capability_at_offset(PCIDevice *pdev, uint8_t offset) 2310 { 2311 uint8_t next, prev, found = 0; 2312 2313 if (!(pdev->used[offset])) { 2314 return 0; 2315 } 2316 2317 assert(pdev->config[PCI_STATUS] & PCI_STATUS_CAP_LIST); 2318 2319 for (prev = PCI_CAPABILITY_LIST; (next = pdev->config[prev]); 2320 prev = next + PCI_CAP_LIST_NEXT) { 2321 if (next <= offset && next > found) { 2322 found = next; 2323 } 2324 } 2325 return found; 2326 } 2327 2328 /* Patch the PCI vendor and device ids in a PCI rom image if necessary. 2329 This is needed for an option rom which is used for more than one device. */ 2330 static void pci_patch_ids(PCIDevice *pdev, uint8_t *ptr, uint32_t size) 2331 { 2332 uint16_t vendor_id; 2333 uint16_t device_id; 2334 uint16_t rom_vendor_id; 2335 uint16_t rom_device_id; 2336 uint16_t rom_magic; 2337 uint16_t pcir_offset; 2338 uint8_t checksum; 2339 2340 /* Words in rom data are little endian (like in PCI configuration), 2341 so they can be read / written with pci_get_word / pci_set_word. */ 2342 2343 /* Only a valid rom will be patched. */ 2344 rom_magic = pci_get_word(ptr); 2345 if (rom_magic != 0xaa55) { 2346 PCI_DPRINTF("Bad ROM magic %04x\n", rom_magic); 2347 return; 2348 } 2349 pcir_offset = pci_get_word(ptr + 0x18); 2350 if (pcir_offset + 8 >= size || memcmp(ptr + pcir_offset, "PCIR", 4)) { 2351 PCI_DPRINTF("Bad PCIR offset 0x%x or signature\n", pcir_offset); 2352 return; 2353 } 2354 2355 vendor_id = pci_get_word(pdev->config + PCI_VENDOR_ID); 2356 device_id = pci_get_word(pdev->config + PCI_DEVICE_ID); 2357 rom_vendor_id = pci_get_word(ptr + pcir_offset + 4); 2358 rom_device_id = pci_get_word(ptr + pcir_offset + 6); 2359 2360 PCI_DPRINTF("%s: ROM id %04x%04x / PCI id %04x%04x\n", pdev->romfile, 2361 vendor_id, device_id, rom_vendor_id, rom_device_id); 2362 2363 checksum = ptr[6]; 2364 2365 if (vendor_id != rom_vendor_id) { 2366 /* Patch vendor id and checksum (at offset 6 for etherboot roms). */ 2367 checksum += (uint8_t)rom_vendor_id + (uint8_t)(rom_vendor_id >> 8); 2368 checksum -= (uint8_t)vendor_id + (uint8_t)(vendor_id >> 8); 2369 PCI_DPRINTF("ROM checksum %02x / %02x\n", ptr[6], checksum); 2370 ptr[6] = checksum; 2371 pci_set_word(ptr + pcir_offset + 4, vendor_id); 2372 } 2373 2374 if (device_id != rom_device_id) { 2375 /* Patch device id and checksum (at offset 6 for etherboot roms). */ 2376 checksum += (uint8_t)rom_device_id + (uint8_t)(rom_device_id >> 8); 2377 checksum -= (uint8_t)device_id + (uint8_t)(device_id >> 8); 2378 PCI_DPRINTF("ROM checksum %02x / %02x\n", ptr[6], checksum); 2379 ptr[6] = checksum; 2380 pci_set_word(ptr + pcir_offset + 6, device_id); 2381 } 2382 } 2383 2384 /* Add an option rom for the device */ 2385 static void pci_add_option_rom(PCIDevice *pdev, bool is_default_rom, 2386 Error **errp) 2387 { 2388 int64_t size = 0; 2389 g_autofree char *path = NULL; 2390 char name[32]; 2391 const VMStateDescription *vmsd; 2392 2393 /* 2394 * In case of incoming migration ROM will come with migration stream, no 2395 * reason to load the file. Neither we want to fail if local ROM file 2396 * mismatches with specified romsize. 2397 */ 2398 bool load_file = !runstate_check(RUN_STATE_INMIGRATE); 2399 2400 if (!pdev->romfile || !strlen(pdev->romfile)) { 2401 return; 2402 } 2403 2404 if (!pdev->rom_bar) { 2405 /* 2406 * Load rom via fw_cfg instead of creating a rom bar, 2407 * for 0.11 compatibility. 2408 */ 2409 int class = pci_get_word(pdev->config + PCI_CLASS_DEVICE); 2410 2411 /* 2412 * Hot-plugged devices can't use the option ROM 2413 * if the rom bar is disabled. 2414 */ 2415 if (DEVICE(pdev)->hotplugged) { 2416 error_setg(errp, "Hot-plugged device without ROM bar" 2417 " can't have an option ROM"); 2418 return; 2419 } 2420 2421 if (class == 0x0300) { 2422 rom_add_vga(pdev->romfile); 2423 } else { 2424 rom_add_option(pdev->romfile, -1); 2425 } 2426 return; 2427 } 2428 2429 if (load_file || pdev->romsize == UINT32_MAX) { 2430 path = qemu_find_file(QEMU_FILE_TYPE_BIOS, pdev->romfile); 2431 if (path == NULL) { 2432 path = g_strdup(pdev->romfile); 2433 } 2434 2435 size = get_image_size(path); 2436 if (size < 0) { 2437 error_setg(errp, "failed to find romfile \"%s\"", pdev->romfile); 2438 return; 2439 } else if (size == 0) { 2440 error_setg(errp, "romfile \"%s\" is empty", pdev->romfile); 2441 return; 2442 } else if (size > 2 * GiB) { 2443 error_setg(errp, 2444 "romfile \"%s\" too large (size cannot exceed 2 GiB)", 2445 pdev->romfile); 2446 return; 2447 } 2448 if (pdev->romsize != UINT_MAX) { 2449 if (size > pdev->romsize) { 2450 error_setg(errp, "romfile \"%s\" (%u bytes) " 2451 "is too large for ROM size %u", 2452 pdev->romfile, (uint32_t)size, pdev->romsize); 2453 return; 2454 } 2455 } else { 2456 pdev->romsize = pow2ceil(size); 2457 } 2458 } 2459 2460 vmsd = qdev_get_vmsd(DEVICE(pdev)); 2461 snprintf(name, sizeof(name), "%s.rom", 2462 vmsd ? vmsd->name : object_get_typename(OBJECT(pdev))); 2463 2464 pdev->has_rom = true; 2465 memory_region_init_rom(&pdev->rom, OBJECT(pdev), name, pdev->romsize, 2466 &error_fatal); 2467 2468 if (load_file) { 2469 void *ptr = memory_region_get_ram_ptr(&pdev->rom); 2470 2471 if (load_image_size(path, ptr, size) < 0) { 2472 error_setg(errp, "failed to load romfile \"%s\"", pdev->romfile); 2473 return; 2474 } 2475 2476 if (is_default_rom) { 2477 /* Only the default rom images will be patched (if needed). */ 2478 pci_patch_ids(pdev, ptr, size); 2479 } 2480 } 2481 2482 pci_register_bar(pdev, PCI_ROM_SLOT, 0, &pdev->rom); 2483 } 2484 2485 static void pci_del_option_rom(PCIDevice *pdev) 2486 { 2487 if (!pdev->has_rom) 2488 return; 2489 2490 vmstate_unregister_ram(&pdev->rom, &pdev->qdev); 2491 pdev->has_rom = false; 2492 } 2493 2494 /* 2495 * On success, pci_add_capability() returns a positive value 2496 * that the offset of the pci capability. 2497 * On failure, it sets an error and returns a negative error 2498 * code. 2499 */ 2500 int pci_add_capability(PCIDevice *pdev, uint8_t cap_id, 2501 uint8_t offset, uint8_t size, 2502 Error **errp) 2503 { 2504 uint8_t *config; 2505 int i, overlapping_cap; 2506 2507 if (!offset) { 2508 offset = pci_find_space(pdev, size); 2509 /* out of PCI config space is programming error */ 2510 assert(offset); 2511 } else { 2512 /* Verify that capabilities don't overlap. Note: device assignment 2513 * depends on this check to verify that the device is not broken. 2514 * Should never trigger for emulated devices, but it's helpful 2515 * for debugging these. */ 2516 for (i = offset; i < offset + size; i++) { 2517 overlapping_cap = pci_find_capability_at_offset(pdev, i); 2518 if (overlapping_cap) { 2519 error_setg(errp, "%s:%02x:%02x.%x " 2520 "Attempt to add PCI capability %x at offset " 2521 "%x overlaps existing capability %x at offset %x", 2522 pci_root_bus_path(pdev), pci_dev_bus_num(pdev), 2523 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn), 2524 cap_id, offset, overlapping_cap, i); 2525 return -EINVAL; 2526 } 2527 } 2528 } 2529 2530 config = pdev->config + offset; 2531 config[PCI_CAP_LIST_ID] = cap_id; 2532 config[PCI_CAP_LIST_NEXT] = pdev->config[PCI_CAPABILITY_LIST]; 2533 pdev->config[PCI_CAPABILITY_LIST] = offset; 2534 pdev->config[PCI_STATUS] |= PCI_STATUS_CAP_LIST; 2535 memset(pdev->used + offset, 0xFF, QEMU_ALIGN_UP(size, 4)); 2536 /* Make capability read-only by default */ 2537 memset(pdev->wmask + offset, 0, size); 2538 /* Check capability by default */ 2539 memset(pdev->cmask + offset, 0xFF, size); 2540 return offset; 2541 } 2542 2543 /* Unlink capability from the pci config space. */ 2544 void pci_del_capability(PCIDevice *pdev, uint8_t cap_id, uint8_t size) 2545 { 2546 uint8_t prev, offset = pci_find_capability_list(pdev, cap_id, &prev); 2547 if (!offset) 2548 return; 2549 pdev->config[prev] = pdev->config[offset + PCI_CAP_LIST_NEXT]; 2550 /* Make capability writable again */ 2551 memset(pdev->wmask + offset, 0xff, size); 2552 memset(pdev->w1cmask + offset, 0, size); 2553 /* Clear cmask as device-specific registers can't be checked */ 2554 memset(pdev->cmask + offset, 0, size); 2555 memset(pdev->used + offset, 0, QEMU_ALIGN_UP(size, 4)); 2556 2557 if (!pdev->config[PCI_CAPABILITY_LIST]) 2558 pdev->config[PCI_STATUS] &= ~PCI_STATUS_CAP_LIST; 2559 } 2560 2561 uint8_t pci_find_capability(PCIDevice *pdev, uint8_t cap_id) 2562 { 2563 return pci_find_capability_list(pdev, cap_id, NULL); 2564 } 2565 2566 static char *pci_dev_fw_name(DeviceState *dev, char *buf, int len) 2567 { 2568 PCIDevice *d = (PCIDevice *)dev; 2569 const char *name = NULL; 2570 const pci_class_desc *desc = pci_class_descriptions; 2571 int class = pci_get_word(d->config + PCI_CLASS_DEVICE); 2572 2573 while (desc->desc && 2574 (class & ~desc->fw_ign_bits) != 2575 (desc->class & ~desc->fw_ign_bits)) { 2576 desc++; 2577 } 2578 2579 if (desc->desc) { 2580 name = desc->fw_name; 2581 } 2582 2583 if (name) { 2584 pstrcpy(buf, len, name); 2585 } else { 2586 snprintf(buf, len, "pci%04x,%04x", 2587 pci_get_word(d->config + PCI_VENDOR_ID), 2588 pci_get_word(d->config + PCI_DEVICE_ID)); 2589 } 2590 2591 return buf; 2592 } 2593 2594 static char *pcibus_get_fw_dev_path(DeviceState *dev) 2595 { 2596 PCIDevice *d = (PCIDevice *)dev; 2597 char name[33]; 2598 int has_func = !!PCI_FUNC(d->devfn); 2599 2600 return g_strdup_printf("%s@%x%s%.*x", 2601 pci_dev_fw_name(dev, name, sizeof(name)), 2602 PCI_SLOT(d->devfn), 2603 has_func ? "," : "", 2604 has_func, 2605 PCI_FUNC(d->devfn)); 2606 } 2607 2608 static char *pcibus_get_dev_path(DeviceState *dev) 2609 { 2610 PCIDevice *d = container_of(dev, PCIDevice, qdev); 2611 PCIDevice *t; 2612 int slot_depth; 2613 /* Path format: Domain:00:Slot.Function:Slot.Function....:Slot.Function. 2614 * 00 is added here to make this format compatible with 2615 * domain:Bus:Slot.Func for systems without nested PCI bridges. 2616 * Slot.Function list specifies the slot and function numbers for all 2617 * devices on the path from root to the specific device. */ 2618 const char *root_bus_path; 2619 int root_bus_len; 2620 char slot[] = ":SS.F"; 2621 int slot_len = sizeof slot - 1 /* For '\0' */; 2622 int path_len; 2623 char *path, *p; 2624 int s; 2625 2626 root_bus_path = pci_root_bus_path(d); 2627 root_bus_len = strlen(root_bus_path); 2628 2629 /* Calculate # of slots on path between device and root. */; 2630 slot_depth = 0; 2631 for (t = d; t; t = pci_get_bus(t)->parent_dev) { 2632 ++slot_depth; 2633 } 2634 2635 path_len = root_bus_len + slot_len * slot_depth; 2636 2637 /* Allocate memory, fill in the terminating null byte. */ 2638 path = g_malloc(path_len + 1 /* For '\0' */); 2639 path[path_len] = '\0'; 2640 2641 memcpy(path, root_bus_path, root_bus_len); 2642 2643 /* Fill in slot numbers. We walk up from device to root, so need to print 2644 * them in the reverse order, last to first. */ 2645 p = path + path_len; 2646 for (t = d; t; t = pci_get_bus(t)->parent_dev) { 2647 p -= slot_len; 2648 s = snprintf(slot, sizeof slot, ":%02x.%x", 2649 PCI_SLOT(t->devfn), PCI_FUNC(t->devfn)); 2650 assert(s == slot_len); 2651 memcpy(p, slot, slot_len); 2652 } 2653 2654 return path; 2655 } 2656 2657 static int pci_qdev_find_recursive(PCIBus *bus, 2658 const char *id, PCIDevice **pdev) 2659 { 2660 DeviceState *qdev = qdev_find_recursive(&bus->qbus, id); 2661 if (!qdev) { 2662 return -ENODEV; 2663 } 2664 2665 /* roughly check if given qdev is pci device */ 2666 if (object_dynamic_cast(OBJECT(qdev), TYPE_PCI_DEVICE)) { 2667 *pdev = PCI_DEVICE(qdev); 2668 return 0; 2669 } 2670 return -EINVAL; 2671 } 2672 2673 int pci_qdev_find_device(const char *id, PCIDevice **pdev) 2674 { 2675 PCIHostState *host_bridge; 2676 int rc = -ENODEV; 2677 2678 QLIST_FOREACH(host_bridge, &pci_host_bridges, next) { 2679 int tmp = pci_qdev_find_recursive(host_bridge->bus, id, pdev); 2680 if (!tmp) { 2681 rc = 0; 2682 break; 2683 } 2684 if (tmp != -ENODEV) { 2685 rc = tmp; 2686 } 2687 } 2688 2689 return rc; 2690 } 2691 2692 MemoryRegion *pci_address_space(PCIDevice *dev) 2693 { 2694 return pci_get_bus(dev)->address_space_mem; 2695 } 2696 2697 MemoryRegion *pci_address_space_io(PCIDevice *dev) 2698 { 2699 return pci_get_bus(dev)->address_space_io; 2700 } 2701 2702 static void pci_device_class_init(ObjectClass *klass, void *data) 2703 { 2704 DeviceClass *k = DEVICE_CLASS(klass); 2705 2706 k->realize = pci_qdev_realize; 2707 k->unrealize = pci_qdev_unrealize; 2708 k->bus_type = TYPE_PCI_BUS; 2709 device_class_set_props(k, pci_props); 2710 object_class_property_set_description( 2711 klass, "x-max-bounce-buffer-size", 2712 "Maximum buffer size allocated for bounce buffers used for mapped " 2713 "access to indirect DMA memory"); 2714 } 2715 2716 static void pci_device_class_base_init(ObjectClass *klass, void *data) 2717 { 2718 if (!object_class_is_abstract(klass)) { 2719 ObjectClass *conventional = 2720 object_class_dynamic_cast(klass, INTERFACE_CONVENTIONAL_PCI_DEVICE); 2721 ObjectClass *pcie = 2722 object_class_dynamic_cast(klass, INTERFACE_PCIE_DEVICE); 2723 ObjectClass *cxl = 2724 object_class_dynamic_cast(klass, INTERFACE_CXL_DEVICE); 2725 assert(conventional || pcie || cxl); 2726 } 2727 } 2728 2729 /* 2730 * Get IOMMU root bus, aliased bus and devfn of a PCI device 2731 * 2732 * IOMMU root bus is needed by all call sites to call into iommu_ops. 2733 * For call sites which don't need aliased BDF, passing NULL to 2734 * aliased_[bus|devfn] is allowed. 2735 * 2736 * @piommu_bus: return root #PCIBus backed by an IOMMU for the PCI device. 2737 * 2738 * @aliased_bus: return aliased #PCIBus of the PCI device, optional. 2739 * 2740 * @aliased_devfn: return aliased devfn of the PCI device, optional. 2741 */ 2742 static void pci_device_get_iommu_bus_devfn(PCIDevice *dev, 2743 PCIBus **piommu_bus, 2744 PCIBus **aliased_bus, 2745 int *aliased_devfn) 2746 { 2747 PCIBus *bus = pci_get_bus(dev); 2748 PCIBus *iommu_bus = bus; 2749 int devfn = dev->devfn; 2750 2751 while (iommu_bus && !iommu_bus->iommu_ops && iommu_bus->parent_dev) { 2752 PCIBus *parent_bus = pci_get_bus(iommu_bus->parent_dev); 2753 2754 /* 2755 * The requester ID of the provided device may be aliased, as seen from 2756 * the IOMMU, due to topology limitations. The IOMMU relies on a 2757 * requester ID to provide a unique AddressSpace for devices, but 2758 * conventional PCI buses pre-date such concepts. Instead, the PCIe- 2759 * to-PCI bridge creates and accepts transactions on behalf of down- 2760 * stream devices. When doing so, all downstream devices are masked 2761 * (aliased) behind a single requester ID. The requester ID used 2762 * depends on the format of the bridge devices. Proper PCIe-to-PCI 2763 * bridges, with a PCIe capability indicating such, follow the 2764 * guidelines of chapter 2.3 of the PCIe-to-PCI/X bridge specification, 2765 * where the bridge uses the seconary bus as the bridge portion of the 2766 * requester ID and devfn of 00.0. For other bridges, typically those 2767 * found on the root complex such as the dmi-to-pci-bridge, we follow 2768 * the convention of typical bare-metal hardware, which uses the 2769 * requester ID of the bridge itself. There are device specific 2770 * exceptions to these rules, but these are the defaults that the 2771 * Linux kernel uses when determining DMA aliases itself and believed 2772 * to be true for the bare metal equivalents of the devices emulated 2773 * in QEMU. 2774 */ 2775 if (!pci_bus_is_express(iommu_bus)) { 2776 PCIDevice *parent = iommu_bus->parent_dev; 2777 2778 if (pci_is_express(parent) && 2779 pcie_cap_get_type(parent) == PCI_EXP_TYPE_PCI_BRIDGE) { 2780 devfn = PCI_DEVFN(0, 0); 2781 bus = iommu_bus; 2782 } else { 2783 devfn = parent->devfn; 2784 bus = parent_bus; 2785 } 2786 } 2787 2788 iommu_bus = parent_bus; 2789 } 2790 2791 assert(0 <= devfn && devfn < PCI_DEVFN_MAX); 2792 assert(iommu_bus); 2793 2794 if (pci_bus_bypass_iommu(bus) || !iommu_bus->iommu_ops) { 2795 iommu_bus = NULL; 2796 } 2797 2798 *piommu_bus = iommu_bus; 2799 2800 if (aliased_bus) { 2801 *aliased_bus = bus; 2802 } 2803 2804 if (aliased_devfn) { 2805 *aliased_devfn = devfn; 2806 } 2807 } 2808 2809 AddressSpace *pci_device_iommu_address_space(PCIDevice *dev) 2810 { 2811 PCIBus *bus; 2812 PCIBus *iommu_bus; 2813 int devfn; 2814 2815 pci_device_get_iommu_bus_devfn(dev, &iommu_bus, &bus, &devfn); 2816 if (iommu_bus) { 2817 return iommu_bus->iommu_ops->get_address_space(bus, 2818 iommu_bus->iommu_opaque, devfn); 2819 } 2820 return &address_space_memory; 2821 } 2822 2823 bool pci_device_set_iommu_device(PCIDevice *dev, HostIOMMUDevice *hiod, 2824 Error **errp) 2825 { 2826 PCIBus *iommu_bus, *aliased_bus; 2827 int aliased_devfn; 2828 2829 /* set_iommu_device requires device's direct BDF instead of aliased BDF */ 2830 pci_device_get_iommu_bus_devfn(dev, &iommu_bus, 2831 &aliased_bus, &aliased_devfn); 2832 if (iommu_bus && iommu_bus->iommu_ops->set_iommu_device) { 2833 hiod->aliased_bus = aliased_bus; 2834 hiod->aliased_devfn = aliased_devfn; 2835 return iommu_bus->iommu_ops->set_iommu_device(pci_get_bus(dev), 2836 iommu_bus->iommu_opaque, 2837 dev->devfn, hiod, errp); 2838 } 2839 return true; 2840 } 2841 2842 void pci_device_unset_iommu_device(PCIDevice *dev) 2843 { 2844 PCIBus *iommu_bus; 2845 2846 pci_device_get_iommu_bus_devfn(dev, &iommu_bus, NULL, NULL); 2847 if (iommu_bus && iommu_bus->iommu_ops->unset_iommu_device) { 2848 return iommu_bus->iommu_ops->unset_iommu_device(pci_get_bus(dev), 2849 iommu_bus->iommu_opaque, 2850 dev->devfn); 2851 } 2852 } 2853 2854 void pci_setup_iommu(PCIBus *bus, const PCIIOMMUOps *ops, void *opaque) 2855 { 2856 /* 2857 * If called, pci_setup_iommu() should provide a minimum set of 2858 * useful callbacks for the bus. 2859 */ 2860 assert(ops); 2861 assert(ops->get_address_space); 2862 2863 bus->iommu_ops = ops; 2864 bus->iommu_opaque = opaque; 2865 } 2866 2867 static void pci_dev_get_w64(PCIBus *b, PCIDevice *dev, void *opaque) 2868 { 2869 Range *range = opaque; 2870 uint16_t cmd = pci_get_word(dev->config + PCI_COMMAND); 2871 int i; 2872 2873 if (!(cmd & PCI_COMMAND_MEMORY)) { 2874 return; 2875 } 2876 2877 if (IS_PCI_BRIDGE(dev)) { 2878 pcibus_t base = pci_bridge_get_base(dev, PCI_BASE_ADDRESS_MEM_PREFETCH); 2879 pcibus_t limit = pci_bridge_get_limit(dev, PCI_BASE_ADDRESS_MEM_PREFETCH); 2880 2881 base = MAX(base, 0x1ULL << 32); 2882 2883 if (limit >= base) { 2884 Range pref_range; 2885 range_set_bounds(&pref_range, base, limit); 2886 range_extend(range, &pref_range); 2887 } 2888 } 2889 for (i = 0; i < PCI_NUM_REGIONS; ++i) { 2890 PCIIORegion *r = &dev->io_regions[i]; 2891 pcibus_t lob, upb; 2892 Range region_range; 2893 2894 if (!r->size || 2895 (r->type & PCI_BASE_ADDRESS_SPACE_IO) || 2896 !(r->type & PCI_BASE_ADDRESS_MEM_TYPE_64)) { 2897 continue; 2898 } 2899 2900 lob = pci_bar_address(dev, i, r->type, r->size); 2901 upb = lob + r->size - 1; 2902 if (lob == PCI_BAR_UNMAPPED) { 2903 continue; 2904 } 2905 2906 lob = MAX(lob, 0x1ULL << 32); 2907 2908 if (upb >= lob) { 2909 range_set_bounds(®ion_range, lob, upb); 2910 range_extend(range, ®ion_range); 2911 } 2912 } 2913 } 2914 2915 void pci_bus_get_w64_range(PCIBus *bus, Range *range) 2916 { 2917 range_make_empty(range); 2918 pci_for_each_device_under_bus(bus, pci_dev_get_w64, range); 2919 } 2920 2921 static bool pcie_has_upstream_port(PCIDevice *dev) 2922 { 2923 PCIDevice *parent_dev = pci_bridge_get_device(pci_get_bus(dev)); 2924 2925 /* Device associated with an upstream port. 2926 * As there are several types of these, it's easier to check the 2927 * parent device: upstream ports are always connected to 2928 * root or downstream ports. 2929 */ 2930 return parent_dev && 2931 pci_is_express(parent_dev) && 2932 parent_dev->exp.exp_cap && 2933 (pcie_cap_get_type(parent_dev) == PCI_EXP_TYPE_ROOT_PORT || 2934 pcie_cap_get_type(parent_dev) == PCI_EXP_TYPE_DOWNSTREAM); 2935 } 2936 2937 PCIDevice *pci_get_function_0(PCIDevice *pci_dev) 2938 { 2939 PCIBus *bus = pci_get_bus(pci_dev); 2940 2941 if(pcie_has_upstream_port(pci_dev)) { 2942 /* With an upstream PCIe port, we only support 1 device at slot 0 */ 2943 return bus->devices[0]; 2944 } else { 2945 /* Other bus types might support multiple devices at slots 0-31 */ 2946 return bus->devices[PCI_DEVFN(PCI_SLOT(pci_dev->devfn), 0)]; 2947 } 2948 } 2949 2950 MSIMessage pci_get_msi_message(PCIDevice *dev, int vector) 2951 { 2952 MSIMessage msg; 2953 if (msix_enabled(dev)) { 2954 msg = msix_get_message(dev, vector); 2955 } else if (msi_enabled(dev)) { 2956 msg = msi_get_message(dev, vector); 2957 } else { 2958 /* Should never happen */ 2959 error_report("%s: unknown interrupt type", __func__); 2960 abort(); 2961 } 2962 return msg; 2963 } 2964 2965 void pci_set_power(PCIDevice *d, bool state) 2966 { 2967 if (d->has_power == state) { 2968 return; 2969 } 2970 2971 d->has_power = state; 2972 pci_update_mappings(d); 2973 memory_region_set_enabled(&d->bus_master_enable_region, 2974 (pci_get_word(d->config + PCI_COMMAND) 2975 & PCI_COMMAND_MASTER) && d->has_power); 2976 if (!d->has_power) { 2977 pci_device_reset(d); 2978 } 2979 } 2980 2981 static const TypeInfo pci_device_type_info = { 2982 .name = TYPE_PCI_DEVICE, 2983 .parent = TYPE_DEVICE, 2984 .instance_size = sizeof(PCIDevice), 2985 .abstract = true, 2986 .class_size = sizeof(PCIDeviceClass), 2987 .class_init = pci_device_class_init, 2988 .class_base_init = pci_device_class_base_init, 2989 }; 2990 2991 static void pci_register_types(void) 2992 { 2993 type_register_static(&pci_bus_info); 2994 type_register_static(&pcie_bus_info); 2995 type_register_static(&cxl_bus_info); 2996 type_register_static(&conventional_pci_interface_info); 2997 type_register_static(&cxl_interface_info); 2998 type_register_static(&pcie_interface_info); 2999 type_register_static(&pci_device_type_info); 3000 } 3001 3002 type_init(pci_register_types) 3003