Lines Matching +full:check +full:- +full:patch
33 #include "hw/qdev-properties.h"
34 #include "hw/qdev-properties-system.h"
35 #include "migration/qemu-file-types.h"
42 #include "qemu/error-report.h"
52 #include "pci-internal.h"
78 DEFINE_PROP_PCI_DEVFN("addr", PCIDevice, devfn, -1),
81 DEFINE_PROP_INT32("rombar", PCIDevice, rom_bar, -1),
84 DEFINE_PROP_BIT("x-pcie-lnksta-dllla", PCIDevice, cap_present,
86 DEFINE_PROP_BIT("x-pcie-extcap-init", PCIDevice, cap_present,
90 DEFINE_PROP_UINT32("acpi-index", PCIDevice, acpi_index, 0),
91 DEFINE_PROP_BIT("x-pcie-err-unc-mask", PCIDevice, cap_present,
93 DEFINE_PROP_BIT("x-pcie-ari-nextfn-1", PCIDevice, cap_present,
95 DEFINE_PROP_SIZE32("x-max-bounce-buffer-size", PCIDevice,
97 DEFINE_PROP_STRING("sriov-pf", PCIDevice, sriov_pf),
98 DEFINE_PROP_BIT("x-pcie-ext-tag", PCIDevice, cap_present,
118 return a - b; in g_cmp_uint32()
133 memory_region_set_enabled(&d->bus_master_enable_region, enable); in pci_set_master()
134 d->is_master = enable; /* cache the status */ in pci_set_master()
141 memory_region_init_alias(&pci_dev->bus_master_enable_region, in pci_init_bus_master()
143 dma_as->root, 0, memory_region_size(dma_as->root)); in pci_init_bus_master()
145 memory_region_add_subregion(&pci_dev->bus_master_container_region, 0, in pci_init_bus_master()
146 &pci_dev->bus_master_enable_region); in pci_init_bus_master()
154 for (i = 0; i < ARRAY_SIZE(bus->devices); ++i) { in pcibus_machine_done()
155 if (bus->devices[i]) { in pcibus_machine_done()
156 pci_init_bus_master(bus->devices[i]); in pcibus_machine_done()
165 bus->machine_done.notify = pcibus_machine_done; in pci_bus_realize()
166 qemu_add_machine_init_done_notifier(&bus->machine_done); in pci_bus_realize()
183 * A PCI-E bus can support extended config space if it's the root in pcie_bus_realize()
187 bus->flags |= PCI_BUS_EXTENDED_CONFIG_SPACE; in pcie_bus_realize()
189 PCIBus *parent_bus = pci_get_bus(bus->parent_dev); in pcie_bus_realize()
192 bus->flags |= PCI_BUS_EXTENDED_CONFIG_SPACE; in pcie_bus_realize()
201 qemu_remove_machine_init_done_notifier(&bus->machine_done); in pci_bus_unrealize()
211 return bus->parent_dev->config[PCI_SECONDARY_BUS]; in pcibus_num()
230 return fw_cfg_add_file_from_generator(fw_cfg, obj->parent, in pci_bus_add_fw_cfg_extra_pci_roots()
232 "etc/extra-pci-roots", errp); in pci_bus_add_fw_cfg_extra_pci_roots()
245 QLIST_FOREACH(bus, &bus->child, sibling) { in pci_bus_fw_cfg_gen_data()
271 k->print_dev = pcibus_dev_print; in pci_bus_class_init()
272 k->get_dev_path = pcibus_get_dev_path; in pci_bus_class_init()
273 k->get_fw_dev_path = pcibus_get_fw_dev_path; in pci_bus_class_init()
274 k->realize = pci_bus_realize; in pci_bus_class_init()
275 k->unrealize = pci_bus_unrealize; in pci_bus_class_init()
277 rc->phases.hold = pcibus_reset_hold; in pci_bus_class_init()
279 pbc->bus_num = pcibus_num; in pci_bus_class_init()
280 pbc->numa_node = pcibus_numa_node; in pci_bus_class_init()
282 fwgc->get_data = pci_bus_fw_cfg_gen_data; in pci_bus_class_init()
316 k->realize = pcie_bus_realize; in pcie_bus_class_init()
351 type = d->config[PCI_HEADER_TYPE] & ~PCI_HEADER_TYPE_MULTI_FUNCTION; in pci_bar()
357 return (d->irq_state >> irq_num) & 0x1; in pci_irq_state()
362 d->irq_state &= ~(0x1 << irq_num); in pci_set_irq_state()
363 d->irq_state |= level << irq_num; in pci_set_irq_state()
369 assert(irq_num < bus->nirq); in pci_bus_change_irq_level()
370 bus->irq_count[irq_num] += change; in pci_bus_change_irq_level()
371 bus->set_irq(bus->irq_opaque, irq_num, bus->irq_count[irq_num] != 0); in pci_bus_change_irq_level()
380 assert(bus->map_irq); in pci_change_irq_level()
381 irq_num = bus->map_irq(pci_dev, irq_num); in pci_change_irq_level()
382 trace_pci_route_irq(dev_irq, DEVICE(pci_dev)->canonical_path, irq_num, in pci_change_irq_level()
383 pci_bus_is_root(bus) ? "root-complex" in pci_change_irq_level()
384 : DEVICE(bus->parent_dev)->canonical_path); in pci_change_irq_level()
385 if (bus->set_irq) in pci_change_irq_level()
387 pci_dev = bus->parent_dev; in pci_change_irq_level()
395 assert(irq_num < bus->nirq); in pci_bus_get_irq_level()
396 return !!bus->irq_count[irq_num]; in pci_bus_get_irq_level()
403 if (dev->irq_state) { in pci_update_irq_status()
404 dev->config[PCI_STATUS] |= PCI_STATUS_INTERRUPT; in pci_update_irq_status()
406 dev->config[PCI_STATUS] &= ~PCI_STATUS_INTERRUPT; in pci_update_irq_status()
434 address_space_stl_le(&dev->bus_master_as, msg.address, msg.data, in pci_msi_trigger()
452 d->pm_cap = cap; in pci_pm_init()
453 d->cap_present |= QEMU_PCI_CAP_PM; in pci_pm_init()
462 if (!(d->cap_present & QEMU_PCI_CAP_PM)) { in pci_pm_state()
466 pmcsr = pci_get_word(d->config + d->pm_cap + PCI_PM_CTRL); in pci_pm_state()
473 * space respective to the old, pre-write state provided. If the new value
482 if (!(d->cap_present & QEMU_PCI_CAP_PM) || in pci_pm_update()
483 !range_covers_byte(addr, l, d->pm_cap + PCI_PM_CTRL)) { in pci_pm_update()
492 pmc = pci_get_word(d->config + d->pm_cap + PCI_PM_PMC); in pci_pm_update()
496 * only transition to higher D-states or to D0. in pci_pm_update()
501 pci_word_test_and_clear_mask(d->config + d->pm_cap + PCI_PM_CTRL, in pci_pm_update()
503 pci_word_test_and_set_mask(d->config + d->pm_cap + PCI_PM_CTRL, in pci_pm_update()
505 trace_pci_pm_bad_transition(d->name, pci_dev_bus_num(d), in pci_pm_update()
506 PCI_SLOT(d->devfn), PCI_FUNC(d->devfn), in pci_pm_update()
511 trace_pci_pm_transition(d->name, pci_dev_bus_num(d), PCI_SLOT(d->devfn), in pci_pm_update()
512 PCI_FUNC(d->devfn), old, new); in pci_pm_update()
524 PCIIORegion *region = &dev->io_regions[r]; in pci_reset_regions()
525 if (!region->size) { in pci_reset_regions()
529 if (!(region->type & PCI_BASE_ADDRESS_SPACE_IO) && in pci_reset_regions()
530 region->type & PCI_BASE_ADDRESS_MEM_TYPE_64) { in pci_reset_regions()
531 pci_set_quad(dev->config + pci_bar(dev, r), region->type); in pci_reset_regions()
533 pci_set_long(dev->config + pci_bar(dev, r), region->type); in pci_reset_regions()
541 assert(dev->irq_state == 0); in pci_do_device_reset()
544 pci_word_test_and_clear_mask(dev->config + PCI_COMMAND, in pci_do_device_reset()
545 pci_get_word(dev->wmask + PCI_COMMAND) | in pci_do_device_reset()
546 pci_get_word(dev->w1cmask + PCI_COMMAND)); in pci_do_device_reset()
547 pci_word_test_and_clear_mask(dev->config + PCI_STATUS, in pci_do_device_reset()
548 pci_get_word(dev->wmask + PCI_STATUS) | in pci_do_device_reset()
549 pci_get_word(dev->w1cmask + PCI_STATUS)); in pci_do_device_reset()
551 pci_byte_test_and_clear_mask(dev->config + PCI_INTERRUPT_LINE, in pci_do_device_reset()
552 pci_get_word(dev->wmask + PCI_INTERRUPT_LINE) | in pci_do_device_reset()
553 pci_get_word(dev->w1cmask + PCI_INTERRUPT_LINE)); in pci_do_device_reset()
554 dev->config[PCI_CACHE_LINE_SIZE] = 0x0; in pci_do_device_reset()
556 if (dev->cap_present & QEMU_PCI_CAP_PM) { in pci_do_device_reset()
557 pci_word_test_and_clear_mask(dev->config + dev->pm_cap + PCI_PM_CTRL, in pci_do_device_reset()
574 device_cold_reset(&dev->qdev); in pci_device_reset()
581 * have been reset device_cold_reset-ed already.
588 for (i = 0; i < ARRAY_SIZE(bus->devices); ++i) { in pcibus_reset_hold()
589 if (bus->devices[i]) { in pcibus_reset_hold()
590 pci_do_device_reset(bus->devices[i]); in pcibus_reset_hold()
594 for (i = 0; i < bus->nirq; i++) { in pcibus_reset_hold()
595 assert(bus->irq_count[i] == 0); in pcibus_reset_hold()
618 d = bus->parent_dev; in pci_device_root_bus()
630 PCIHostState *host_bridge = PCI_HOST_BRIDGE(rootbus->qbus.parent); in pci_root_bus_path()
633 assert(host_bridge->bus == rootbus); in pci_root_bus_path()
635 if (hc->root_bus_path) { in pci_root_bus_path()
636 return (*hc->root_bus_path)(host_bridge, rootbus); in pci_root_bus_path()
639 return rootbus->qbus.name; in pci_root_bus_path()
648 rootbus = pci_device_root_bus(bus->parent_dev); in pci_bus_bypass_iommu()
651 host_bridge = PCI_HOST_BRIDGE(rootbus->qbus.parent); in pci_bus_bypass_iommu()
653 assert(host_bridge->bus == rootbus); in pci_bus_bypass_iommu()
655 return host_bridge->bypass_iommu; in pci_bus_bypass_iommu()
663 bus->devfn_min = devfn_min; in pci_root_bus_internal_init()
664 bus->slot_reserved_mask = 0x0; in pci_root_bus_internal_init()
665 bus->address_space_mem = mem; in pci_root_bus_internal_init()
666 bus->address_space_io = io; in pci_root_bus_internal_init()
667 bus->flags |= PCI_BUS_IS_ROOT; in pci_root_bus_internal_init()
670 QLIST_INIT(&bus->child); in pci_root_bus_internal_init()
677 pci_host_bus_unregister(BUS(bus)->parent); in pci_bus_uninit()
715 bus->set_irq = set_irq; in pci_bus_irqs()
716 bus->irq_opaque = irq_opaque; in pci_bus_irqs()
717 bus->nirq = nirq; in pci_bus_irqs()
718 g_free(bus->irq_count); in pci_bus_irqs()
719 bus->irq_count = g_malloc0(nirq * sizeof(bus->irq_count[0])); in pci_bus_irqs()
724 bus->map_irq = map_irq; in pci_bus_map_irqs()
729 bus->set_irq = NULL; in pci_bus_irqs_cleanup()
730 bus->map_irq = NULL; in pci_bus_irqs_cleanup()
731 bus->irq_opaque = NULL; in pci_bus_irqs_cleanup()
732 bus->nirq = 0; in pci_bus_irqs_cleanup()
733 g_free(bus->irq_count); in pci_bus_irqs_cleanup()
734 bus->irq_count = NULL; in pci_bus_irqs_cleanup()
760 return PCI_BUS_GET_CLASS(s)->bus_num(s); in pci_bus_num()
769 for (i = 0; i < ARRAY_SIZE(bus->devices); ++i) { in pci_bus_range()
770 PCIDevice *dev = bus->devices[i]; in pci_bus_range()
773 *min_bus = MIN(*min_bus, dev->config[PCI_SECONDARY_BUS]); in pci_bus_range()
774 *max_bus = MAX(*max_bus, dev->config[PCI_SUBORDINATE_BUS]); in pci_bus_range()
781 return PCI_BUS_GET_CLASS(bus)->numa_node(bus); in pci_bus_numa_node()
796 if ((config[i] ^ s->config[i]) & in get_pci_config_device()
797 s->cmask[i] & ~s->wmask[i] & ~s->w1cmask[i]) { in get_pci_config_device()
800 i, config[i], s->config[i], in get_pci_config_device()
801 s->cmask[i], s->wmask[i], s->w1cmask[i]); in get_pci_config_device()
803 return -EINVAL; in get_pci_config_device()
806 memcpy(s->config, config, size); in get_pci_config_device()
813 pci_set_master(s, pci_get_word(s->config + PCI_COMMAND) in get_pci_config_device()
848 return -EINVAL; in get_pci_irq_state()
923 s->config[PCI_STATUS] &= ~PCI_STATUS_INTERRUPT; in pci_device_save()
932 ret = vmstate_load_state(f, &vmstate_pci_device, s, s->version_id); in pci_device_load()
940 pci_set_word(pci_dev->config + PCI_SUBSYSTEM_VENDOR_ID, in pci_set_default_subsystem_id()
942 pci_set_word(pci_dev->config + PCI_SUBSYSTEM_ID, in pci_set_default_subsystem_id()
947 * Parse [[<domain>:]<bus>:]<slot>, return -1 on error if funcp == NULL
948 * [[<domain>:]<bus>:]<slot>.<func>, return -1 on error
963 return -1; in pci_parse_devaddr()
969 return -1; in pci_parse_devaddr()
976 return -1; in pci_parse_devaddr()
984 return -1; in pci_parse_devaddr()
989 return -1; in pci_parse_devaddr()
996 return -1; in pci_parse_devaddr()
999 return -1; in pci_parse_devaddr()
1011 pci_set_word(dev->cmask + PCI_VENDOR_ID, 0xffff); in pci_init_cmask()
1012 pci_set_word(dev->cmask + PCI_DEVICE_ID, 0xffff); in pci_init_cmask()
1013 dev->cmask[PCI_STATUS] = PCI_STATUS_CAP_LIST; in pci_init_cmask()
1014 dev->cmask[PCI_REVISION_ID] = 0xff; in pci_init_cmask()
1015 dev->cmask[PCI_CLASS_PROG] = 0xff; in pci_init_cmask()
1016 pci_set_word(dev->cmask + PCI_CLASS_DEVICE, 0xffff); in pci_init_cmask()
1017 dev->cmask[PCI_HEADER_TYPE] = 0xff; in pci_init_cmask()
1018 dev->cmask[PCI_CAPABILITY_LIST] = 0xff; in pci_init_cmask()
1025 dev->wmask[PCI_CACHE_LINE_SIZE] = 0xff; in pci_init_wmask()
1026 dev->wmask[PCI_INTERRUPT_LINE] = 0xff; in pci_init_wmask()
1027 pci_set_word(dev->wmask + PCI_COMMAND, in pci_init_wmask()
1030 pci_word_test_and_set_mask(dev->wmask + PCI_COMMAND, PCI_COMMAND_SERR); in pci_init_wmask()
1032 memset(dev->wmask + PCI_CONFIG_HEADER_SIZE, 0xff, in pci_init_wmask()
1033 config_size - PCI_CONFIG_HEADER_SIZE); in pci_init_wmask()
1042 pci_set_word(dev->w1cmask + PCI_STATUS, in pci_init_w1cmask()
1052 memset(d->wmask + PCI_PRIMARY_BUS, 0xff, 4); in pci_init_mask_bridge()
1055 d->wmask[PCI_IO_BASE] = PCI_IO_RANGE_MASK & 0xff; in pci_init_mask_bridge()
1056 d->wmask[PCI_IO_LIMIT] = PCI_IO_RANGE_MASK & 0xff; in pci_init_mask_bridge()
1057 pci_set_word(d->wmask + PCI_MEMORY_BASE, in pci_init_mask_bridge()
1059 pci_set_word(d->wmask + PCI_MEMORY_LIMIT, in pci_init_mask_bridge()
1061 pci_set_word(d->wmask + PCI_PREF_MEMORY_BASE, in pci_init_mask_bridge()
1063 pci_set_word(d->wmask + PCI_PREF_MEMORY_LIMIT, in pci_init_mask_bridge()
1067 memset(d->wmask + PCI_PREF_BASE_UPPER32, 0xff, 8); in pci_init_mask_bridge()
1070 d->config[PCI_IO_BASE] |= PCI_IO_RANGE_TYPE_16; in pci_init_mask_bridge()
1071 d->config[PCI_IO_LIMIT] |= PCI_IO_RANGE_TYPE_16; in pci_init_mask_bridge()
1072 pci_word_test_and_set_mask(d->config + PCI_PREF_MEMORY_BASE, in pci_init_mask_bridge()
1074 pci_word_test_and_set_mask(d->config + PCI_PREF_MEMORY_LIMIT, in pci_init_mask_bridge()
1078 * TODO: Bridges default to 10-bit VGA decoding but we currently only in pci_init_mask_bridge()
1079 * implement 16-bit decoding (no alias support). in pci_init_mask_bridge()
1081 pci_set_word(d->wmask + PCI_BRIDGE_CONTROL, in pci_init_mask_bridge()
1095 pci_set_word(d->w1cmask + PCI_BRIDGE_CONTROL, in pci_init_mask_bridge()
1097 d->cmask[PCI_IO_BASE] |= PCI_IO_RANGE_TYPE_MASK; in pci_init_mask_bridge()
1098 d->cmask[PCI_IO_LIMIT] |= PCI_IO_RANGE_TYPE_MASK; in pci_init_mask_bridge()
1099 pci_word_test_and_set_mask(d->cmask + PCI_PREF_MEMORY_BASE, in pci_init_mask_bridge()
1101 pci_word_test_and_set_mask(d->cmask + PCI_PREF_MEMORY_LIMIT, in pci_init_mask_bridge()
1107 uint8_t slot = PCI_SLOT(dev->devfn); in pci_init_multifunction()
1110 if (dev->cap_present & QEMU_PCI_CAP_MULTIFUNCTION) { in pci_init_multifunction()
1111 dev->config[PCI_HEADER_TYPE] |= PCI_HEADER_TYPE_MULTI_FUNCTION; in pci_init_multifunction()
1121 * - all functions must set the bit to 1. in pci_init_multifunction()
1123 * - function 0 must set the bit, but the rest function (> 0) in pci_init_multifunction()
1130 * The below check allows both interpretation. in pci_init_multifunction()
1132 if (PCI_FUNC(dev->devfn)) { in pci_init_multifunction()
1133 PCIDevice *f0 = bus->devices[PCI_DEVFN(slot, 0)]; in pci_init_multifunction()
1134 if (f0 && !(f0->cap_present & QEMU_PCI_CAP_MULTIFUNCTION)) { in pci_init_multifunction()
1137 "in function %x.%x", slot, PCI_FUNC(dev->devfn)); in pci_init_multifunction()
1143 if (dev->cap_present & QEMU_PCI_CAP_MULTIFUNCTION) { in pci_init_multifunction()
1148 PCIDevice *device = bus->devices[PCI_DEVFN(slot, func)]; in pci_init_multifunction()
1162 pci_dev->config = g_malloc0(config_size); in pci_config_alloc()
1163 pci_dev->cmask = g_malloc0(config_size); in pci_config_alloc()
1164 pci_dev->wmask = g_malloc0(config_size); in pci_config_alloc()
1165 pci_dev->w1cmask = g_malloc0(config_size); in pci_config_alloc()
1166 pci_dev->used = g_malloc0(config_size); in pci_config_alloc()
1171 g_free(pci_dev->config); in pci_config_free()
1172 g_free(pci_dev->cmask); in pci_config_free()
1173 g_free(pci_dev->wmask); in pci_config_free()
1174 g_free(pci_dev->w1cmask); in pci_config_free()
1175 g_free(pci_dev->used); in pci_config_free()
1180 pci_get_bus(pci_dev)->devices[pci_dev->devfn] = NULL; in do_pci_unregister_device()
1186 if (memory_region_is_mapped(&pci_dev->bus_master_enable_region)) { in do_pci_unregister_device()
1187 memory_region_del_subregion(&pci_dev->bus_master_container_region, in do_pci_unregister_device()
1188 &pci_dev->bus_master_enable_region); in do_pci_unregister_device()
1190 address_space_destroy(&pci_dev->bus_master_as); in do_pci_unregister_device()
1199 switch (cache->type) { in pci_req_id_cache_extract()
1201 result = pci_get_bdf(cache->dev); in pci_req_id_cache_extract()
1204 bus_n = pci_dev_bus_num(cache->dev); in pci_req_id_cache_extract()
1209 cache->type); in pci_req_id_cache_extract()
1221 * legacy PCI devices and PCIe-to-PCI bridges.
1236 parent = pci_get_bus(dev)->parent_dev; in pci_req_id_cache_get()
1239 /* When we pass through PCIe-to-PCI/PCIX bridges, we in pci_req_id_cache_get()
1242 * (pcie-to-pci bridge spec chap 2.3). */ in pci_req_id_cache_get()
1265 return pci_req_id_cache_extract(&dev->requester_id_cache); in pci_requester_id()
1270 return !(bus->devices[devfn]); in pci_bus_devfn_available()
1275 return bus->slot_reserved_mask & (1UL << PCI_SLOT(devfn)); in pci_bus_devfn_reserved()
1280 return bus->slot_reserved_mask; in pci_bus_get_slot_reserved_mask()
1285 bus->slot_reserved_mask |= mask; in pci_bus_set_slot_reserved_mask()
1290 bus->slot_reserved_mask &= ~mask; in pci_bus_clear_slot_reserved_mask()
1293 /* -1 for devfn means auto assign */
1299 PCIConfigReadFunc *config_read = pc->config_read; in do_pci_register_device()
1300 PCIConfigWriteFunc *config_write = pc->config_write; in do_pci_register_device()
1307 if (pci_bus_is_root(bus) && bus->parent_dev && !is_bridge) { in do_pci_register_device()
1310 bus->parent_dev->name); in do_pci_register_device()
1315 for(devfn = bus->devfn_min ; devfn < ARRAY_SIZE(bus->devices); in do_pci_register_device()
1335 bus->devices[devfn]->name, bus->devices[devfn]->qdev.id); in do_pci_register_device()
1341 * exposes other non-zero functions. Hence we need to ensure that in do_pci_register_device()
1344 if (dev->hotplugged && !pci_is_vf(pci_dev) && in do_pci_register_device()
1348 PCI_SLOT(pci_get_function_0(pci_dev)->devfn), in do_pci_register_device()
1349 pci_get_function_0(pci_dev)->name, in do_pci_register_device()
1355 pci_dev->devfn = devfn; in do_pci_register_device()
1356 pci_dev->requester_id_cache = pci_req_id_cache_get(pci_dev); in do_pci_register_device()
1357 pstrcpy(pci_dev->name, sizeof(pci_dev->name), name); in do_pci_register_device()
1359 memory_region_init(&pci_dev->bus_master_container_region, OBJECT(pci_dev), in do_pci_register_device()
1361 address_space_init(&pci_dev->bus_master_as, in do_pci_register_device()
1362 &pci_dev->bus_master_container_region, pci_dev->name); in do_pci_register_device()
1363 pci_dev->bus_master_as.max_bounce_buffer_size = in do_pci_register_device()
1364 pci_dev->max_bounce_buffer_size; in do_pci_register_device()
1369 pci_dev->irq_state = 0; in do_pci_register_device()
1372 pci_config_set_vendor_id(pci_dev->config, pc->vendor_id); in do_pci_register_device()
1373 pci_config_set_device_id(pci_dev->config, pc->device_id); in do_pci_register_device()
1374 pci_config_set_revision(pci_dev->config, pc->revision); in do_pci_register_device()
1375 pci_config_set_class(pci_dev->config, pc->class_id); in do_pci_register_device()
1378 if (pc->subsystem_vendor_id || pc->subsystem_id) { in do_pci_register_device()
1379 pci_set_word(pci_dev->config + PCI_SUBSYSTEM_VENDOR_ID, in do_pci_register_device()
1380 pc->subsystem_vendor_id); in do_pci_register_device()
1381 pci_set_word(pci_dev->config + PCI_SUBSYSTEM_ID, in do_pci_register_device()
1382 pc->subsystem_id); in do_pci_register_device()
1388 assert(!pc->subsystem_vendor_id); in do_pci_register_device()
1389 assert(!pc->subsystem_id); in do_pci_register_device()
1408 pci_dev->config_read = config_read; in do_pci_register_device()
1409 pci_dev->config_write = config_write; in do_pci_register_device()
1410 bus->devices[devfn] = pci_dev; in do_pci_register_device()
1411 pci_dev->version_id = 2; /* Current pci device vmstate version */ in do_pci_register_device()
1421 r = &pci_dev->io_regions[i]; in pci_unregister_io_regions()
1422 if (!r->size || r->addr == PCI_BAR_UNMAPPED) in pci_unregister_io_regions()
1424 memory_region_del_subregion(r->address_space, r->memory); in pci_unregister_io_regions()
1439 if (pc->exit) { in pci_qdev_unrealize()
1440 pc->exit(pci_dev); in pci_qdev_unrealize()
1446 pci_dev->msi_trigger = NULL; in pci_qdev_unrealize()
1449 * clean up acpi-index so it could reused by another device in pci_qdev_unrealize()
1451 if (pci_dev->acpi_index) { in pci_qdev_unrealize()
1455 GINT_TO_POINTER(pci_dev->acpi_index), in pci_qdev_unrealize()
1475 pci_dev->config[PCI_HEADER_TYPE] & ~PCI_HEADER_TYPE_MULTI_FUNCTION; in pci_register_bar()
1478 r = &pci_dev->io_regions[region_num]; in pci_register_bar()
1479 assert(!r->size); in pci_register_bar()
1480 r->size = size; in pci_register_bar()
1481 r->type = type; in pci_register_bar()
1482 r->memory = memory; in pci_register_bar()
1483 r->address_space = type & PCI_BASE_ADDRESS_SPACE_IO in pci_register_bar()
1484 ? pci_get_bus(pci_dev)->address_space_io in pci_register_bar()
1485 : pci_get_bus(pci_dev)->address_space_mem; in pci_register_bar()
1488 PCIDevice *pf = pci_dev->exp.sriov_vf.pf; in pci_register_bar()
1489 assert(!pf || type == pf->exp.sriov_pf.vf_bar_type[region_num]); in pci_register_bar()
1491 r->addr = pci_bar_address(pci_dev, region_num, r->type, r->size); in pci_register_bar()
1492 if (r->addr != PCI_BAR_UNMAPPED) { in pci_register_bar()
1493 memory_region_add_subregion_overlap(r->address_space, in pci_register_bar()
1494 r->addr, r->memory, 1); in pci_register_bar()
1497 r->addr = PCI_BAR_UNMAPPED; in pci_register_bar()
1499 wmask = ~(size - 1); in pci_register_bar()
1506 pci_set_long(pci_dev->config + addr, type); in pci_register_bar()
1508 if (!(r->type & PCI_BASE_ADDRESS_SPACE_IO) && in pci_register_bar()
1509 r->type & PCI_BASE_ADDRESS_MEM_TYPE_64) { in pci_register_bar()
1510 pci_set_quad(pci_dev->wmask + addr, wmask); in pci_register_bar()
1511 pci_set_quad(pci_dev->cmask + addr, ~0ULL); in pci_register_bar()
1513 pci_set_long(pci_dev->wmask + addr, wmask & 0xffffffff); in pci_register_bar()
1514 pci_set_long(pci_dev->cmask + addr, 0xffffffff); in pci_register_bar()
1523 if (!pci_dev->has_vga) { in pci_update_vga()
1527 cmd = pci_get_word(pci_dev->config + PCI_COMMAND); in pci_update_vga()
1529 memory_region_set_enabled(pci_dev->vga_regions[QEMU_PCI_VGA_MEM], in pci_update_vga()
1531 memory_region_set_enabled(pci_dev->vga_regions[QEMU_PCI_VGA_IO_LO], in pci_update_vga()
1533 memory_region_set_enabled(pci_dev->vga_regions[QEMU_PCI_VGA_IO_HI], in pci_update_vga()
1542 assert(!pci_dev->has_vga); in pci_register_vga()
1545 pci_dev->vga_regions[QEMU_PCI_VGA_MEM] = mem; in pci_register_vga()
1546 memory_region_add_subregion_overlap(bus->address_space_mem, in pci_register_vga()
1550 pci_dev->vga_regions[QEMU_PCI_VGA_IO_LO] = io_lo; in pci_register_vga()
1551 memory_region_add_subregion_overlap(bus->address_space_io, in pci_register_vga()
1555 pci_dev->vga_regions[QEMU_PCI_VGA_IO_HI] = io_hi; in pci_register_vga()
1556 memory_region_add_subregion_overlap(bus->address_space_io, in pci_register_vga()
1558 pci_dev->has_vga = true; in pci_register_vga()
1567 if (!pci_dev->has_vga) { in pci_unregister_vga()
1571 memory_region_del_subregion(bus->address_space_mem, in pci_unregister_vga()
1572 pci_dev->vga_regions[QEMU_PCI_VGA_MEM]); in pci_unregister_vga()
1573 memory_region_del_subregion(bus->address_space_io, in pci_unregister_vga()
1574 pci_dev->vga_regions[QEMU_PCI_VGA_IO_LO]); in pci_unregister_vga()
1575 memory_region_del_subregion(bus->address_space_io, in pci_unregister_vga()
1576 pci_dev->vga_regions[QEMU_PCI_VGA_IO_HI]); in pci_unregister_vga()
1577 pci_dev->has_vga = false; in pci_unregister_vga()
1582 return pci_dev->io_regions[region_num].addr; in pci_get_bar_addr()
1592 new_addr = pci_get_quad(d->config + bar); in pci_config_get_bar_addr()
1594 new_addr = pci_get_long(d->config + bar); in pci_config_get_bar_addr()
1597 PCIDevice *pf = d->exp.sriov_vf.pf; in pci_config_get_bar_addr()
1598 uint16_t sriov_cap = pf->exp.sriov_cap; in pci_config_get_bar_addr()
1601 pci_get_word(pf->config + sriov_cap + PCI_SRIOV_VF_OFFSET); in pci_config_get_bar_addr()
1603 pci_get_word(pf->config + sriov_cap + PCI_SRIOV_VF_STRIDE); in pci_config_get_bar_addr()
1604 uint32_t vf_num = d->devfn - (pf->devfn + vf_offset); in pci_config_get_bar_addr()
1611 new_addr = pci_get_quad(pf->config + bar); in pci_config_get_bar_addr()
1613 new_addr = pci_get_long(pf->config + bar); in pci_config_get_bar_addr()
1619 new_addr &= ~(size - 1); in pci_config_get_bar_addr()
1628 uint16_t cmd = pci_get_word(d->config + PCI_COMMAND); in pci_bar_address()
1630 bool allow_0_address = mc->pci_allow_0_address; in pci_bar_address()
1637 last_addr = new_addr + size - 1; in pci_bar_address()
1638 /* Check if 32 bit BAR wraps around explicitly. in pci_bar_address()
1656 new_addr &= ~(size - 1); in pci_bar_address()
1657 last_addr = new_addr + size - 1; in pci_bar_address()
1668 * Check if 32 bit BAR wraps around explicitly. in pci_bar_address()
1679 * to >4G. Check it. TODO: we might need to support in pci_bar_address()
1696 r = &d->io_regions[i]; in pci_update_mappings()
1699 if (!r->size) in pci_update_mappings()
1702 new_addr = pci_bar_address(d, i, r->type, r->size); in pci_update_mappings()
1703 if (!d->enabled || pci_pm_state(d)) { in pci_update_mappings()
1708 if (new_addr == r->addr) in pci_update_mappings()
1712 if (r->addr != PCI_BAR_UNMAPPED) { in pci_update_mappings()
1713 trace_pci_update_mappings_del(d->name, pci_dev_bus_num(d), in pci_update_mappings()
1714 PCI_SLOT(d->devfn), in pci_update_mappings()
1715 PCI_FUNC(d->devfn), in pci_update_mappings()
1716 i, r->addr, r->size); in pci_update_mappings()
1717 memory_region_del_subregion(r->address_space, r->memory); in pci_update_mappings()
1719 r->addr = new_addr; in pci_update_mappings()
1720 if (r->addr != PCI_BAR_UNMAPPED) { in pci_update_mappings()
1721 trace_pci_update_mappings_add(d->name, pci_dev_bus_num(d), in pci_update_mappings()
1722 PCI_SLOT(d->devfn), in pci_update_mappings()
1723 PCI_FUNC(d->devfn), in pci_update_mappings()
1724 i, r->addr, r->size); in pci_update_mappings()
1725 memory_region_add_subregion_overlap(r->address_space, in pci_update_mappings()
1726 r->addr, r->memory, 1); in pci_update_mappings()
1735 return pci_get_word(d->config + PCI_COMMAND) & PCI_COMMAND_INTX_DISABLE; in pci_irq_disabled()
1748 pci_change_irq_level(d, i, disabled ? -state : state); in pci_update_irq_disabled()
1760 ranges_overlap(address, len, d->exp.exp_cap + PCI_EXP_LNKSTA, 2)) { in pci_default_read_config()
1763 memcpy(&val, d->config + address, len); in pci_default_read_config()
1776 uint8_t wmask = d->wmask[addr + i]; in pci_default_write_config()
1777 uint8_t w1cmask = d->w1cmask[addr + i]; in pci_default_write_config()
1779 d->config[addr + i] = (d->config[addr + i] & ~wmask) | (val & wmask); in pci_default_write_config()
1780 d->config[addr + i] &= ~(val & w1cmask); /* W1C: Write 1 to Clear */ in pci_default_write_config()
1795 pci_set_master(d, (pci_get_word(d->config + PCI_COMMAND) & in pci_default_write_config()
1796 PCI_COMMAND_MASTER) && d->enabled); in pci_default_write_config()
1815 change = level - pci_irq_state(pci_dev, irq_num); in pci_irq_handler()
1844 bus->route_intx_to_irq = route_intx_to_irq; in pci_bus_set_route_irq_fn()
1854 pin = bus->map_irq(dev, pin); in pci_device_route_intx_to_irq()
1855 trace_pci_route_irq(dev_irq, DEVICE(dev)->canonical_path, pin, in pci_device_route_intx_to_irq()
1856 pci_bus_is_root(bus) ? "root-complex" in pci_device_route_intx_to_irq()
1857 : DEVICE(bus->parent_dev)->canonical_path); in pci_device_route_intx_to_irq()
1858 dev = bus->parent_dev; in pci_device_route_intx_to_irq()
1861 if (!bus->route_intx_to_irq) { in pci_device_route_intx_to_irq()
1862 error_report("PCI: Bug - unimplemented PCI INTx routing (%s)", in pci_device_route_intx_to_irq()
1863 object_get_typename(OBJECT(bus->qbus.parent))); in pci_device_route_intx_to_irq()
1864 return (PCIINTxRoute) { PCI_INTX_DISABLED, -1 }; in pci_device_route_intx_to_irq()
1867 return bus->route_intx_to_irq(bus->irq_opaque, pin); in pci_device_route_intx_to_irq()
1872 return old->mode != new->mode || old->irq != new->irq; in pci_intx_route_changed()
1881 for (i = 0; i < ARRAY_SIZE(bus->devices); ++i) { in pci_bus_fire_intx_routing_notifier()
1882 dev = bus->devices[i]; in pci_bus_fire_intx_routing_notifier()
1883 if (dev && dev->intx_routing_notifier) { in pci_bus_fire_intx_routing_notifier()
1884 dev->intx_routing_notifier(dev); in pci_bus_fire_intx_routing_notifier()
1888 QLIST_FOREACH(sec, &bus->child, sibling) { in pci_bus_fire_intx_routing_notifier()
1896 dev->intx_routing_notifier = notifier; in pci_device_set_intx_routing_notifier()
1900 * PCI-to-PCI bridge specification
1901 * 9.1: Interrupt routing. Table 9-1
1904 * 2.2.8.1: INTx interrupt signaling - Rules
1906 * Table 2-20
1910 * 0-origin unlike PCI interrupt pin register.
1914 return pci_swizzle(PCI_SLOT(pci_dev->devfn), pin); in pci_swizzle_map_irq_fn()
1932 { 0x0201, "Token Ring controller", "token-ring"},
1952 { 0x0604, "PCI bridge", "pci-bridge"},
1960 { 0x0800, "Interrupt controller", "interrupt-controller"},
1961 { 0x0801, "DMA controller", "dma-controller"},
1970 { 0x0c01, "Access bus controller", "access-bus"},
1973 { 0x0c04, "Fibre channel controller", "fibre-channel"},
1985 for (devfn = 0; devfn < ARRAY_SIZE(bus->devices); devfn++) { in pci_for_each_device_under_bus_reverse()
1986 d = bus->devices[ARRAY_SIZE(bus->devices) - 1 - devfn]; in pci_for_each_device_under_bus_reverse()
2009 for(devfn = 0; devfn < ARRAY_SIZE(bus->devices); devfn++) { in pci_for_each_device_under_bus()
2010 d = bus->devices[devfn]; in pci_for_each_device_under_bus()
2032 while (desc->desc && class != desc->class) { in get_class_desc()
2041 qemu_create_nic_bus_devices(&bus->qbus, TYPE_PCI_DEVICE, default_model, in pci_init_nic_devices()
2042 "virtio", "virtio-net-pci"); in pci_init_nic_devices()
2065 error_report("No support for non-zero PCI domains"); in pci_init_nic_in_slot()
2079 qdev_set_nic_properties(&pci_dev->qdev, nd); in pci_init_nic_in_slot()
2089 return pci_create_simple(bus, -1, "cirrus-vga"); in pci_vga_init()
2091 return pci_create_simple(bus, -1, "qxl-vga"); in pci_vga_init()
2093 return pci_create_simple(bus, -1, "VGA"); in pci_vga_init()
2095 return pci_create_simple(bus, -1, "vmware-svga"); in pci_vga_init()
2097 return pci_create_simple(bus, -1, "virtio-vga"); in pci_vga_init()
2099 default: /* Other non-PCI types. Checking for unsupported types is already in pci_vga_init()
2109 return !(pci_get_word(dev->config + PCI_BRIDGE_CONTROL) & in pci_secondary_bus_in_range()
2111 dev->config[PCI_SECONDARY_BUS] <= bus_num && in pci_secondary_bus_in_range()
2112 bus_num <= dev->config[PCI_SUBORDINATE_BUS]; in pci_secondary_bus_in_range()
2120 for (i = 0; i < ARRAY_SIZE(bus->devices); ++i) { in pci_root_bus_in_range()
2121 PCIDevice *dev = bus->devices[i]; in pci_root_bus_in_range()
2147 !pci_secondary_bus_in_range(bus->parent_dev, bus_num)) { in pci_find_bus_nr()
2153 QLIST_FOREACH(sec, &bus->child, sibling) { in pci_find_bus_nr()
2163 if (pci_secondary_bus_in_range(sec->parent_dev, bus_num)) { in pci_find_bus_nr()
2189 QLIST_FOREACH(sec, &bus->child, sibling) { in pci_for_each_bus_depth_first()
2206 return bus->devices[devfn]; in pci_find_device()
2209 #define ONBOARD_INDEX_MAX (16 * 1024 - 1)
2221 * capped by systemd (see: udev-builtin-net_id.c) in pci_qdev_realize()
2223 * misconfigure QEMU and then wonder why acpi-index doesn't work in pci_qdev_realize()
2225 if (pci_dev->acpi_index > ONBOARD_INDEX_MAX) { in pci_qdev_realize()
2226 error_setg(errp, "acpi-index should be less or equal to %u", in pci_qdev_realize()
2232 * make sure that acpi-index is unique across all present PCI devices in pci_qdev_realize()
2234 if (pci_dev->acpi_index) { in pci_qdev_realize()
2238 GINT_TO_POINTER(pci_dev->acpi_index), in pci_qdev_realize()
2240 error_setg(errp, "a PCI device with acpi-index = %" PRIu32 in pci_qdev_realize()
2241 " already exist", pci_dev->acpi_index); in pci_qdev_realize()
2245 GINT_TO_POINTER(pci_dev->acpi_index), in pci_qdev_realize()
2249 if (pci_dev->romsize != UINT32_MAX && !is_power_of_2(pci_dev->romsize)) { in pci_qdev_realize()
2250 error_setg(errp, "ROM size %u is not a power of two", pci_dev->romsize); in pci_qdev_realize()
2259 pci_dev->cap_present |= QEMU_PCI_CAP_EXPRESS; in pci_qdev_realize()
2263 pci_dev->cap_present |= QEMU_PCIE_CAP_CXL; in pci_qdev_realize()
2268 pci_dev->devfn, errp); in pci_qdev_realize()
2272 if (pc->realize) { in pci_qdev_realize()
2273 pc->realize(pci_dev, &local_err); in pci_qdev_realize()
2291 * With ARI, PCI_SLOT() can return non-zero value as the traditional in pci_qdev_realize()
2292 * 5-bit Device Number and 3-bit Function Number fields in its associated in pci_qdev_realize()
2294 * single 8-bit Function Number. Hence, ignore ARI capable devices. in pci_qdev_realize()
2299 PCI_SLOT(pci_dev->devfn)) { in pci_qdev_realize()
2302 PCI_SLOT(pci_dev->devfn), pci_dev->name); in pci_qdev_realize()
2305 if (pci_dev->failover_pair_id) { in pci_qdev_realize()
2312 class_id = pci_get_word(pci_dev->config + PCI_CLASS_DEVICE); in pci_qdev_realize()
2319 if ((pci_dev->cap_present & QEMU_PCI_CAP_MULTIFUNCTION) in pci_qdev_realize()
2320 || (PCI_FUNC(pci_dev->devfn) != 0)) { in pci_qdev_realize()
2326 qdev->allow_unplug_during_migration = true; in pci_qdev_realize()
2331 if (pci_dev->romfile == NULL && pc->romfile != NULL) { in pci_qdev_realize()
2332 pci_dev->romfile = g_strdup(pc->romfile); in pci_qdev_realize()
2345 pci_dev->msi_trigger = pci_msi_trigger; in pci_qdev_realize()
2371 return qdev_realize_and_unref(&dev->qdev, &bus->qbus, errp); in pci_realize_and_unref()
2394 if (pdev->used[i]) in pci_find_space()
2396 else if (i - offset + 1 == size) in pci_find_space()
2407 if (!(pdev->config[PCI_STATUS] & PCI_STATUS_CAP_LIST)) in pci_find_capability_list()
2410 for (prev = PCI_CAPABILITY_LIST; (next = pdev->config[prev]); in pci_find_capability_list()
2412 if (pdev->config[next + PCI_CAP_LIST_ID] == cap_id) in pci_find_capability_list()
2424 if (!(pdev->used[offset])) { in pci_find_capability_at_offset()
2428 assert(pdev->config[PCI_STATUS] & PCI_STATUS_CAP_LIST); in pci_find_capability_at_offset()
2430 for (prev = PCI_CAPABILITY_LIST; (next = pdev->config[prev]); in pci_find_capability_at_offset()
2439 /* Patch the PCI vendor and device ids in a PCI rom image if necessary.
2466 vendor_id = pci_get_word(pdev->config + PCI_VENDOR_ID); in pci_patch_ids()
2467 device_id = pci_get_word(pdev->config + PCI_DEVICE_ID); in pci_patch_ids()
2471 trace_pci_rom_and_pci_ids(pdev->romfile, vendor_id, device_id, in pci_patch_ids()
2477 /* Patch vendor id and checksum (at offset 6 for etherboot roms). */ in pci_patch_ids()
2479 checksum -= (uint8_t)vendor_id + (uint8_t)(vendor_id >> 8); in pci_patch_ids()
2486 /* Patch device id and checksum (at offset 6 for etherboot roms). */ in pci_patch_ids()
2488 checksum -= (uint8_t)device_id + (uint8_t)(device_id >> 8); in pci_patch_ids()
2511 if (!pdev->romfile || !strlen(pdev->romfile)) { in pci_add_option_rom()
2515 if (!pdev->rom_bar) { in pci_add_option_rom()
2520 int class = pci_get_word(pdev->config + PCI_CLASS_DEVICE); in pci_add_option_rom()
2523 * Hot-plugged devices can't use the option ROM in pci_add_option_rom()
2526 if (DEVICE(pdev)->hotplugged) { in pci_add_option_rom()
2527 error_setg(errp, "Hot-plugged device without ROM bar" in pci_add_option_rom()
2533 rom_add_vga(pdev->romfile); in pci_add_option_rom()
2535 rom_add_option(pdev->romfile, -1); in pci_add_option_rom()
2541 if (pdev->rom_bar > 0) { in pci_add_option_rom()
2542 error_setg(errp, "ROM BAR cannot be enabled for SR-IOV VF"); in pci_add_option_rom()
2548 if (load_file || pdev->romsize == UINT32_MAX) { in pci_add_option_rom()
2549 path = qemu_find_file(QEMU_FILE_TYPE_BIOS, pdev->romfile); in pci_add_option_rom()
2551 path = g_strdup(pdev->romfile); in pci_add_option_rom()
2556 error_setg(errp, "failed to find romfile \"%s\"", pdev->romfile); in pci_add_option_rom()
2559 error_setg(errp, "romfile \"%s\" is empty", pdev->romfile); in pci_add_option_rom()
2564 pdev->romfile); in pci_add_option_rom()
2567 if (pdev->romsize != UINT_MAX) { in pci_add_option_rom()
2568 if (size > pdev->romsize) { in pci_add_option_rom()
2571 pdev->romfile, (uint32_t)size, pdev->romsize); in pci_add_option_rom()
2575 pdev->romsize = pow2ceil(size); in pci_add_option_rom()
2581 vmsd ? vmsd->name : object_get_typename(OBJECT(pdev))); in pci_add_option_rom()
2583 pdev->has_rom = true; in pci_add_option_rom()
2584 memory_region_init_rom(&pdev->rom, OBJECT(pdev), name, pdev->romsize, in pci_add_option_rom()
2588 void *ptr = memory_region_get_ram_ptr(&pdev->rom); in pci_add_option_rom()
2591 error_setg(errp, "failed to load romfile \"%s\"", pdev->romfile); in pci_add_option_rom()
2601 pci_register_bar(pdev, PCI_ROM_SLOT, 0, &pdev->rom); in pci_add_option_rom()
2606 if (!pdev->has_rom) in pci_del_option_rom()
2609 vmstate_unregister_ram(&pdev->rom, &pdev->qdev); in pci_del_option_rom()
2610 pdev->has_rom = false; in pci_del_option_rom()
2632 * depends on this check to verify that the device is not broken. in pci_add_capability()
2642 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn), in pci_add_capability()
2644 return -EINVAL; in pci_add_capability()
2649 config = pdev->config + offset; in pci_add_capability()
2651 config[PCI_CAP_LIST_NEXT] = pdev->config[PCI_CAPABILITY_LIST]; in pci_add_capability()
2652 pdev->config[PCI_CAPABILITY_LIST] = offset; in pci_add_capability()
2653 pdev->config[PCI_STATUS] |= PCI_STATUS_CAP_LIST; in pci_add_capability()
2654 memset(pdev->used + offset, 0xFF, QEMU_ALIGN_UP(size, 4)); in pci_add_capability()
2655 /* Make capability read-only by default */ in pci_add_capability()
2656 memset(pdev->wmask + offset, 0, size); in pci_add_capability()
2657 /* Check capability by default */ in pci_add_capability()
2658 memset(pdev->cmask + offset, 0xFF, size); in pci_add_capability()
2668 pdev->config[prev] = pdev->config[offset + PCI_CAP_LIST_NEXT]; in pci_del_capability()
2670 memset(pdev->wmask + offset, 0xff, size); in pci_del_capability()
2671 memset(pdev->w1cmask + offset, 0, size); in pci_del_capability()
2672 /* Clear cmask as device-specific registers can't be checked */ in pci_del_capability()
2673 memset(pdev->cmask + offset, 0, size); in pci_del_capability()
2674 memset(pdev->used + offset, 0, QEMU_ALIGN_UP(size, 4)); in pci_del_capability()
2676 if (!pdev->config[PCI_CAPABILITY_LIST]) in pci_del_capability()
2677 pdev->config[PCI_STATUS] &= ~PCI_STATUS_CAP_LIST; in pci_del_capability()
2690 int class = pci_get_word(d->config + PCI_CLASS_DEVICE); in pci_dev_fw_name()
2692 while (desc->desc && in pci_dev_fw_name()
2693 (class & ~desc->fw_ign_bits) != in pci_dev_fw_name()
2694 (desc->class & ~desc->fw_ign_bits)) { in pci_dev_fw_name()
2698 if (desc->desc) { in pci_dev_fw_name()
2699 name = desc->fw_name; in pci_dev_fw_name()
2706 pci_get_word(d->config + PCI_VENDOR_ID), in pci_dev_fw_name()
2707 pci_get_word(d->config + PCI_DEVICE_ID)); in pci_dev_fw_name()
2717 int has_func = !!PCI_FUNC(d->devfn); in pcibus_get_fw_dev_path()
2721 PCI_SLOT(d->devfn), in pcibus_get_fw_dev_path()
2724 PCI_FUNC(d->devfn)); in pcibus_get_fw_dev_path()
2740 int slot_len = sizeof slot - 1 /* For '\0' */; in pcibus_get_dev_path()
2750 for (t = d; t; t = pci_get_bus(t)->parent_dev) { in pcibus_get_dev_path()
2765 for (t = d; t; t = pci_get_bus(t)->parent_dev) { in pcibus_get_dev_path()
2766 p -= slot_len; in pcibus_get_dev_path()
2768 PCI_SLOT(t->devfn), PCI_FUNC(t->devfn)); in pcibus_get_dev_path()
2779 DeviceState *qdev = qdev_find_recursive(&bus->qbus, id); in pci_qdev_find_recursive()
2781 return -ENODEV; in pci_qdev_find_recursive()
2784 /* roughly check if given qdev is pci device */ in pci_qdev_find_recursive()
2789 return -EINVAL; in pci_qdev_find_recursive()
2795 int rc = -ENODEV; in pci_qdev_find_device()
2798 int tmp = pci_qdev_find_recursive(host_bridge->bus, id, pdev); in pci_qdev_find_device()
2803 if (tmp != -ENODEV) { in pci_qdev_find_device()
2813 return pci_get_bus(dev)->address_space_mem; in pci_address_space()
2818 return pci_get_bus(dev)->address_space_io; in pci_address_space_io()
2825 k->realize = pci_qdev_realize; in pci_device_class_init()
2826 k->unrealize = pci_qdev_unrealize; in pci_device_class_init()
2827 k->bus_type = TYPE_PCI_BUS; in pci_device_class_init()
2830 klass, "x-max-bounce-buffer-size", in pci_device_class_init()
2868 int devfn = dev->devfn; in pci_device_get_iommu_bus_devfn()
2870 while (iommu_bus && !iommu_bus->iommu_ops && iommu_bus->parent_dev) { in pci_device_get_iommu_bus_devfn()
2871 PCIBus *parent_bus = pci_get_bus(iommu_bus->parent_dev); in pci_device_get_iommu_bus_devfn()
2877 * conventional PCI buses pre-date such concepts. Instead, the PCIe- in pci_device_get_iommu_bus_devfn()
2878 * to-PCI bridge creates and accepts transactions on behalf of down- in pci_device_get_iommu_bus_devfn()
2881 * depends on the format of the bridge devices. Proper PCIe-to-PCI in pci_device_get_iommu_bus_devfn()
2883 * guidelines of chapter 2.3 of the PCIe-to-PCI/X bridge specification, in pci_device_get_iommu_bus_devfn()
2886 * found on the root complex such as the dmi-to-pci-bridge, we follow in pci_device_get_iommu_bus_devfn()
2887 * the convention of typical bare-metal hardware, which uses the in pci_device_get_iommu_bus_devfn()
2895 PCIDevice *parent = iommu_bus->parent_dev; in pci_device_get_iommu_bus_devfn()
2902 devfn = parent->devfn; in pci_device_get_iommu_bus_devfn()
2913 if (pci_bus_bypass_iommu(bus) || !iommu_bus->iommu_ops) { in pci_device_get_iommu_bus_devfn()
2936 return iommu_bus->iommu_ops->get_address_space(bus, in pci_device_iommu_address_space()
2937 iommu_bus->iommu_opaque, devfn); in pci_device_iommu_address_space()
2950 if (iommu_bus && iommu_bus->iommu_ops->init_iotlb_notifier) { in pci_iommu_init_iotlb_notifier()
2951 iommu_bus->iommu_ops->init_iotlb_notifier(bus, iommu_bus->iommu_opaque, in pci_iommu_init_iotlb_notifier()
2956 return -ENODEV; in pci_iommu_init_iotlb_notifier()
2968 if (iommu_bus && iommu_bus->iommu_ops->set_iommu_device) { in pci_device_set_iommu_device()
2969 hiod->aliased_bus = aliased_bus; in pci_device_set_iommu_device()
2970 hiod->aliased_devfn = aliased_devfn; in pci_device_set_iommu_device()
2971 return iommu_bus->iommu_ops->set_iommu_device(pci_get_bus(dev), in pci_device_set_iommu_device()
2972 iommu_bus->iommu_opaque, in pci_device_set_iommu_device()
2973 dev->devfn, hiod, errp); in pci_device_set_iommu_device()
2983 if (iommu_bus && iommu_bus->iommu_ops->unset_iommu_device) { in pci_device_unset_iommu_device()
2984 return iommu_bus->iommu_ops->unset_iommu_device(pci_get_bus(dev), in pci_device_unset_iommu_device()
2985 iommu_bus->iommu_opaque, in pci_device_unset_iommu_device()
2986 dev->devfn); in pci_device_unset_iommu_device()
2998 if (!dev->is_master || in pci_pri_request_page()
3000 return -EPERM; in pci_pri_request_page()
3004 return -EPERM; in pci_pri_request_page()
3008 if (iommu_bus && iommu_bus->iommu_ops->pri_request_page) { in pci_pri_request_page()
3009 return iommu_bus->iommu_ops->pri_request_page(bus, in pci_pri_request_page()
3010 iommu_bus->iommu_opaque, in pci_pri_request_page()
3016 return -ENODEV; in pci_pri_request_page()
3026 if (!dev->is_master || in pci_pri_register_notifier()
3028 return -EPERM; in pci_pri_register_notifier()
3032 if (iommu_bus && iommu_bus->iommu_ops->pri_register_notifier) { in pci_pri_register_notifier()
3033 iommu_bus->iommu_ops->pri_register_notifier(bus, in pci_pri_register_notifier()
3034 iommu_bus->iommu_opaque, in pci_pri_register_notifier()
3039 return -ENODEV; in pci_pri_register_notifier()
3049 if (iommu_bus && iommu_bus->iommu_ops->pri_unregister_notifier) { in pci_pri_unregister_notifier()
3050 iommu_bus->iommu_ops->pri_unregister_notifier(bus, in pci_pri_unregister_notifier()
3051 iommu_bus->iommu_opaque, in pci_pri_unregister_notifier()
3067 if (!dev->is_master || in pci_ats_request_translation()
3069 return -EPERM; in pci_ats_request_translation()
3073 return -ENOSPC; in pci_ats_request_translation()
3077 return -EPERM; in pci_ats_request_translation()
3081 if (iommu_bus && iommu_bus->iommu_ops->ats_request_translation) { in pci_ats_request_translation()
3082 return iommu_bus->iommu_ops->ats_request_translation(bus, in pci_ats_request_translation()
3083 iommu_bus->iommu_opaque, in pci_ats_request_translation()
3090 return -ENODEV; in pci_ats_request_translation()
3101 return -EPERM; in pci_iommu_register_iotlb_notifier()
3105 if (iommu_bus && iommu_bus->iommu_ops->register_iotlb_notifier) { in pci_iommu_register_iotlb_notifier()
3106 iommu_bus->iommu_ops->register_iotlb_notifier(bus, in pci_iommu_register_iotlb_notifier()
3107 iommu_bus->iommu_opaque, devfn, in pci_iommu_register_iotlb_notifier()
3112 return -ENODEV; in pci_iommu_register_iotlb_notifier()
3123 return -EPERM; in pci_iommu_unregister_iotlb_notifier()
3127 if (iommu_bus && iommu_bus->iommu_ops->unregister_iotlb_notifier) { in pci_iommu_unregister_iotlb_notifier()
3128 iommu_bus->iommu_ops->unregister_iotlb_notifier(bus, in pci_iommu_unregister_iotlb_notifier()
3129 iommu_bus->iommu_opaque, in pci_iommu_unregister_iotlb_notifier()
3134 return -ENODEV; in pci_iommu_unregister_iotlb_notifier()
3145 if (iommu_bus && iommu_bus->iommu_ops->get_iotlb_info) { in pci_iommu_get_iotlb_info()
3146 iommu_bus->iommu_ops->get_iotlb_info(iommu_bus->iommu_opaque, in pci_iommu_get_iotlb_info()
3151 return -ENODEV; in pci_iommu_get_iotlb_info()
3161 assert(ops->get_address_space); in pci_setup_iommu()
3163 bus->iommu_ops = ops; in pci_setup_iommu()
3164 bus->iommu_opaque = opaque; in pci_setup_iommu()
3170 uint16_t cmd = pci_get_word(dev->config + PCI_COMMAND); in pci_dev_get_w64()
3190 PCIIORegion *r = &dev->io_regions[i]; in pci_dev_get_w64()
3194 if (!r->size || in pci_dev_get_w64()
3195 (r->type & PCI_BASE_ADDRESS_SPACE_IO) || in pci_dev_get_w64()
3196 !(r->type & PCI_BASE_ADDRESS_MEM_TYPE_64)) { in pci_dev_get_w64()
3200 lob = pci_bar_address(dev, i, r->type, r->size); in pci_dev_get_w64()
3201 upb = lob + r->size - 1; in pci_dev_get_w64()
3226 * As there are several types of these, it's easier to check the in pcie_has_upstream_port()
3232 parent_dev->exp.exp_cap && in pcie_has_upstream_port()
3243 return bus->devices[0]; in pci_get_function_0()
3245 /* Other bus types might support multiple devices at slots 0-31 */ in pci_get_function_0()
3246 return bus->devices[PCI_DEVFN(PCI_SLOT(pci_dev->devfn), 0)]; in pci_get_function_0()
3271 * wait until the guest configures SR-IOV. in pci_set_power()
3282 if (d->enabled == state) { in pci_set_enabled()
3286 d->enabled = state; in pci_set_enabled()
3288 pci_set_master(d, (pci_get_word(d->config + PCI_COMMAND) in pci_set_enabled()
3289 & PCI_COMMAND_MASTER) && d->enabled); in pci_set_enabled()
3290 if (qdev_is_realized(&d->qdev)) { in pci_set_enabled()