xref: /qemu/hw/pci/pci.c (revision 6322b753f798337835e205b6d805356bea582c86)
1 /*
2  * QEMU PCI bus manager
3  *
4  * Copyright (c) 2004 Fabrice Bellard
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to deal
8  * in the Software without restriction, including without limitation the rights
9  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10  * copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22  * THE SOFTWARE.
23  */
24 
25 #include "qemu/osdep.h"
26 #include "qemu/datadir.h"
27 #include "qemu/units.h"
28 #include "hw/irq.h"
29 #include "hw/pci/pci.h"
30 #include "hw/pci/pci_bridge.h"
31 #include "hw/pci/pci_bus.h"
32 #include "hw/pci/pci_host.h"
33 #include "hw/qdev-properties.h"
34 #include "hw/qdev-properties-system.h"
35 #include "migration/qemu-file-types.h"
36 #include "migration/vmstate.h"
37 #include "net/net.h"
38 #include "system/numa.h"
39 #include "system/runstate.h"
40 #include "system/system.h"
41 #include "hw/loader.h"
42 #include "qemu/error-report.h"
43 #include "qemu/range.h"
44 #include "trace.h"
45 #include "hw/pci/msi.h"
46 #include "hw/pci/msix.h"
47 #include "hw/hotplug.h"
48 #include "hw/boards.h"
49 #include "hw/nvram/fw_cfg.h"
50 #include "qapi/error.h"
51 #include "qemu/cutils.h"
52 #include "pci-internal.h"
53 
54 #include "hw/xen/xen.h"
55 #include "hw/i386/kvm/xen_evtchn.h"
56 
57 bool pci_available = true;
58 
59 static char *pcibus_get_dev_path(DeviceState *dev);
60 static char *pcibus_get_fw_dev_path(DeviceState *dev);
61 static void pcibus_reset_hold(Object *obj, ResetType type);
62 static bool pcie_has_upstream_port(PCIDevice *dev);
63 
prop_pci_busnr_get(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)64 static void prop_pci_busnr_get(Object *obj, Visitor *v, const char *name,
65                              void *opaque, Error **errp)
66 {
67     uint8_t busnr = pci_dev_bus_num(PCI_DEVICE(obj));
68 
69     visit_type_uint8(v, name, &busnr, errp);
70 }
71 
72 static const PropertyInfo prop_pci_busnr = {
73     .type = "busnr",
74     .get = prop_pci_busnr_get,
75 };
76 
77 static const Property pci_props[] = {
78     DEFINE_PROP_PCI_DEVFN("addr", PCIDevice, devfn, -1),
79     DEFINE_PROP_STRING("romfile", PCIDevice, romfile),
80     DEFINE_PROP_UINT32("romsize", PCIDevice, romsize, UINT32_MAX),
81     DEFINE_PROP_INT32("rombar",  PCIDevice, rom_bar, -1),
82     DEFINE_PROP_BIT("multifunction", PCIDevice, cap_present,
83                     QEMU_PCI_CAP_MULTIFUNCTION_BITNR, false),
84     DEFINE_PROP_BIT("x-pcie-lnksta-dllla", PCIDevice, cap_present,
85                     QEMU_PCIE_LNKSTA_DLLLA_BITNR, true),
86     DEFINE_PROP_BIT("x-pcie-extcap-init", PCIDevice, cap_present,
87                     QEMU_PCIE_EXTCAP_INIT_BITNR, true),
88     DEFINE_PROP_STRING("failover_pair_id", PCIDevice,
89                        failover_pair_id),
90     DEFINE_PROP_UINT32("acpi-index",  PCIDevice, acpi_index, 0),
91     DEFINE_PROP_BIT("x-pcie-err-unc-mask", PCIDevice, cap_present,
92                     QEMU_PCIE_ERR_UNC_MASK_BITNR, true),
93     DEFINE_PROP_BIT("x-pcie-ari-nextfn-1", PCIDevice, cap_present,
94                     QEMU_PCIE_ARI_NEXTFN_1_BITNR, false),
95     DEFINE_PROP_SIZE32("x-max-bounce-buffer-size", PCIDevice,
96                      max_bounce_buffer_size, DEFAULT_MAX_BOUNCE_BUFFER_SIZE),
97     DEFINE_PROP_STRING("sriov-pf", PCIDevice, sriov_pf),
98     DEFINE_PROP_BIT("x-pcie-ext-tag", PCIDevice, cap_present,
99                     QEMU_PCIE_EXT_TAG_BITNR, true),
100     { .name = "busnr", .info = &prop_pci_busnr },
101 };
102 
103 static const VMStateDescription vmstate_pcibus = {
104     .name = "PCIBUS",
105     .version_id = 1,
106     .minimum_version_id = 1,
107     .fields = (const VMStateField[]) {
108         VMSTATE_INT32_EQUAL(nirq, PCIBus, NULL),
109         VMSTATE_VARRAY_INT32(irq_count, PCIBus,
110                              nirq, 0, vmstate_info_int32,
111                              int32_t),
112         VMSTATE_END_OF_LIST()
113     }
114 };
115 
g_cmp_uint32(gconstpointer a,gconstpointer b,gpointer user_data)116 static gint g_cmp_uint32(gconstpointer a, gconstpointer b, gpointer user_data)
117 {
118     return a - b;
119 }
120 
pci_acpi_index_list(void)121 static GSequence *pci_acpi_index_list(void)
122 {
123     static GSequence *used_acpi_index_list;
124 
125     if (!used_acpi_index_list) {
126         used_acpi_index_list = g_sequence_new(NULL);
127     }
128     return used_acpi_index_list;
129 }
130 
pci_set_master(PCIDevice * d,bool enable)131 static void pci_set_master(PCIDevice *d, bool enable)
132 {
133     memory_region_set_enabled(&d->bus_master_enable_region, enable);
134     d->is_master = enable; /* cache the status */
135 }
136 
pci_init_bus_master(PCIDevice * pci_dev)137 static void pci_init_bus_master(PCIDevice *pci_dev)
138 {
139     AddressSpace *dma_as = pci_device_iommu_address_space(pci_dev);
140 
141     memory_region_init_alias(&pci_dev->bus_master_enable_region,
142                              OBJECT(pci_dev), "bus master",
143                              dma_as->root, 0, memory_region_size(dma_as->root));
144     pci_set_master(pci_dev, false);
145     memory_region_add_subregion(&pci_dev->bus_master_container_region, 0,
146                                 &pci_dev->bus_master_enable_region);
147 }
148 
pcibus_machine_done(Notifier * notifier,void * data)149 static void pcibus_machine_done(Notifier *notifier, void *data)
150 {
151     PCIBus *bus = container_of(notifier, PCIBus, machine_done);
152     int i;
153 
154     for (i = 0; i < ARRAY_SIZE(bus->devices); ++i) {
155         if (bus->devices[i]) {
156             pci_init_bus_master(bus->devices[i]);
157         }
158     }
159 }
160 
pci_bus_realize(BusState * qbus,Error ** errp)161 static void pci_bus_realize(BusState *qbus, Error **errp)
162 {
163     PCIBus *bus = PCI_BUS(qbus);
164 
165     bus->machine_done.notify = pcibus_machine_done;
166     qemu_add_machine_init_done_notifier(&bus->machine_done);
167 
168     vmstate_register_any(NULL, &vmstate_pcibus, bus);
169 }
170 
pcie_bus_realize(BusState * qbus,Error ** errp)171 static void pcie_bus_realize(BusState *qbus, Error **errp)
172 {
173     PCIBus *bus = PCI_BUS(qbus);
174     Error *local_err = NULL;
175 
176     pci_bus_realize(qbus, &local_err);
177     if (local_err) {
178         error_propagate(errp, local_err);
179         return;
180     }
181 
182     /*
183      * A PCI-E bus can support extended config space if it's the root
184      * bus, or if the bus/bridge above it does as well
185      */
186     if (pci_bus_is_root(bus)) {
187         bus->flags |= PCI_BUS_EXTENDED_CONFIG_SPACE;
188     } else {
189         PCIBus *parent_bus = pci_get_bus(bus->parent_dev);
190 
191         if (pci_bus_allows_extended_config_space(parent_bus)) {
192             bus->flags |= PCI_BUS_EXTENDED_CONFIG_SPACE;
193         }
194     }
195 }
196 
pci_bus_unrealize(BusState * qbus)197 static void pci_bus_unrealize(BusState *qbus)
198 {
199     PCIBus *bus = PCI_BUS(qbus);
200 
201     qemu_remove_machine_init_done_notifier(&bus->machine_done);
202 
203     vmstate_unregister(NULL, &vmstate_pcibus, bus);
204 }
205 
pcibus_num(PCIBus * bus)206 static int pcibus_num(PCIBus *bus)
207 {
208     if (pci_bus_is_root(bus)) {
209         return 0; /* pci host bridge */
210     }
211     return bus->parent_dev->config[PCI_SECONDARY_BUS];
212 }
213 
pcibus_numa_node(PCIBus * bus)214 static uint16_t pcibus_numa_node(PCIBus *bus)
215 {
216     return NUMA_NODE_UNASSIGNED;
217 }
218 
pci_bus_add_fw_cfg_extra_pci_roots(FWCfgState * fw_cfg,PCIBus * bus,Error ** errp)219 bool pci_bus_add_fw_cfg_extra_pci_roots(FWCfgState *fw_cfg,
220                                         PCIBus *bus,
221                                         Error **errp)
222 {
223     Object *obj;
224 
225     if (!bus) {
226         return true;
227     }
228     obj = OBJECT(bus);
229 
230     return fw_cfg_add_file_from_generator(fw_cfg, obj->parent,
231                                           object_get_canonical_path_component(obj),
232                                           "etc/extra-pci-roots", errp);
233 }
234 
pci_bus_fw_cfg_gen_data(Object * obj,Error ** errp)235 static GByteArray *pci_bus_fw_cfg_gen_data(Object *obj, Error **errp)
236 {
237     PCIBus *bus = PCI_BUS(obj);
238     GByteArray *byte_array;
239     uint64_t extra_hosts = 0;
240 
241     if (!bus) {
242         return NULL;
243     }
244 
245     QLIST_FOREACH(bus, &bus->child, sibling) {
246         /* look for expander root buses */
247         if (pci_bus_is_root(bus)) {
248             extra_hosts++;
249         }
250     }
251 
252     if (!extra_hosts) {
253         return NULL;
254     }
255     extra_hosts = cpu_to_le64(extra_hosts);
256 
257     byte_array = g_byte_array_new();
258     g_byte_array_append(byte_array,
259                         (const void *)&extra_hosts, sizeof(extra_hosts));
260 
261     return byte_array;
262 }
263 
pci_bus_class_init(ObjectClass * klass,const void * data)264 static void pci_bus_class_init(ObjectClass *klass, const void *data)
265 {
266     BusClass *k = BUS_CLASS(klass);
267     PCIBusClass *pbc = PCI_BUS_CLASS(klass);
268     ResettableClass *rc = RESETTABLE_CLASS(klass);
269     FWCfgDataGeneratorClass *fwgc = FW_CFG_DATA_GENERATOR_CLASS(klass);
270 
271     k->print_dev = pcibus_dev_print;
272     k->get_dev_path = pcibus_get_dev_path;
273     k->get_fw_dev_path = pcibus_get_fw_dev_path;
274     k->realize = pci_bus_realize;
275     k->unrealize = pci_bus_unrealize;
276 
277     rc->phases.hold = pcibus_reset_hold;
278 
279     pbc->bus_num = pcibus_num;
280     pbc->numa_node = pcibus_numa_node;
281 
282     fwgc->get_data = pci_bus_fw_cfg_gen_data;
283 }
284 
285 static const TypeInfo pci_bus_info = {
286     .name = TYPE_PCI_BUS,
287     .parent = TYPE_BUS,
288     .instance_size = sizeof(PCIBus),
289     .class_size = sizeof(PCIBusClass),
290     .class_init = pci_bus_class_init,
291     .interfaces = (const InterfaceInfo[]) {
292         { TYPE_FW_CFG_DATA_GENERATOR_INTERFACE },
293         { }
294     }
295 };
296 
297 static const TypeInfo cxl_interface_info = {
298     .name          = INTERFACE_CXL_DEVICE,
299     .parent        = TYPE_INTERFACE,
300 };
301 
302 static const TypeInfo pcie_interface_info = {
303     .name          = INTERFACE_PCIE_DEVICE,
304     .parent        = TYPE_INTERFACE,
305 };
306 
307 static const TypeInfo conventional_pci_interface_info = {
308     .name          = INTERFACE_CONVENTIONAL_PCI_DEVICE,
309     .parent        = TYPE_INTERFACE,
310 };
311 
pcie_bus_class_init(ObjectClass * klass,const void * data)312 static void pcie_bus_class_init(ObjectClass *klass, const void *data)
313 {
314     BusClass *k = BUS_CLASS(klass);
315 
316     k->realize = pcie_bus_realize;
317 }
318 
319 static const TypeInfo pcie_bus_info = {
320     .name = TYPE_PCIE_BUS,
321     .parent = TYPE_PCI_BUS,
322     .class_init = pcie_bus_class_init,
323 };
324 
325 static const TypeInfo cxl_bus_info = {
326     .name       = TYPE_CXL_BUS,
327     .parent     = TYPE_PCIE_BUS,
328     .class_init = pcie_bus_class_init,
329 };
330 
331 static void pci_update_mappings(PCIDevice *d);
332 static void pci_irq_handler(void *opaque, int irq_num, int level);
333 static void pci_add_option_rom(PCIDevice *pdev, bool is_default_rom, Error **);
334 static void pci_del_option_rom(PCIDevice *pdev);
335 
336 static uint16_t pci_default_sub_vendor_id = PCI_SUBVENDOR_ID_REDHAT_QUMRANET;
337 static uint16_t pci_default_sub_device_id = PCI_SUBDEVICE_ID_QEMU;
338 
339 PCIHostStateList pci_host_bridges;
340 
pci_bar(PCIDevice * d,int reg)341 int pci_bar(PCIDevice *d, int reg)
342 {
343     uint8_t type;
344 
345     /* PCIe virtual functions do not have their own BARs */
346     assert(!pci_is_vf(d));
347 
348     if (reg != PCI_ROM_SLOT)
349         return PCI_BASE_ADDRESS_0 + reg * 4;
350 
351     type = d->config[PCI_HEADER_TYPE] & ~PCI_HEADER_TYPE_MULTI_FUNCTION;
352     return type == PCI_HEADER_TYPE_BRIDGE ? PCI_ROM_ADDRESS1 : PCI_ROM_ADDRESS;
353 }
354 
pci_irq_state(PCIDevice * d,int irq_num)355 static inline int pci_irq_state(PCIDevice *d, int irq_num)
356 {
357         return (d->irq_state >> irq_num) & 0x1;
358 }
359 
pci_set_irq_state(PCIDevice * d,int irq_num,int level)360 static inline void pci_set_irq_state(PCIDevice *d, int irq_num, int level)
361 {
362         d->irq_state &= ~(0x1 << irq_num);
363         d->irq_state |= level << irq_num;
364 }
365 
pci_bus_change_irq_level(PCIBus * bus,int irq_num,int change)366 static void pci_bus_change_irq_level(PCIBus *bus, int irq_num, int change)
367 {
368     assert(irq_num >= 0);
369     assert(irq_num < bus->nirq);
370     bus->irq_count[irq_num] += change;
371     bus->set_irq(bus->irq_opaque, irq_num, bus->irq_count[irq_num] != 0);
372 }
373 
pci_change_irq_level(PCIDevice * pci_dev,int irq_num,int change)374 static void pci_change_irq_level(PCIDevice *pci_dev, int irq_num, int change)
375 {
376     PCIBus *bus;
377     for (;;) {
378         int dev_irq = irq_num;
379         bus = pci_get_bus(pci_dev);
380         assert(bus->map_irq);
381         irq_num = bus->map_irq(pci_dev, irq_num);
382         trace_pci_route_irq(dev_irq, DEVICE(pci_dev)->canonical_path, irq_num,
383                             pci_bus_is_root(bus) ? "root-complex"
384                                     : DEVICE(bus->parent_dev)->canonical_path);
385         if (bus->set_irq)
386             break;
387         pci_dev = bus->parent_dev;
388     }
389     pci_bus_change_irq_level(bus, irq_num, change);
390 }
391 
pci_bus_get_irq_level(PCIBus * bus,int irq_num)392 int pci_bus_get_irq_level(PCIBus *bus, int irq_num)
393 {
394     assert(irq_num >= 0);
395     assert(irq_num < bus->nirq);
396     return !!bus->irq_count[irq_num];
397 }
398 
399 /* Update interrupt status bit in config space on interrupt
400  * state change. */
pci_update_irq_status(PCIDevice * dev)401 static void pci_update_irq_status(PCIDevice *dev)
402 {
403     if (dev->irq_state) {
404         dev->config[PCI_STATUS] |= PCI_STATUS_INTERRUPT;
405     } else {
406         dev->config[PCI_STATUS] &= ~PCI_STATUS_INTERRUPT;
407     }
408 }
409 
pci_device_deassert_intx(PCIDevice * dev)410 void pci_device_deassert_intx(PCIDevice *dev)
411 {
412     int i;
413     for (i = 0; i < PCI_NUM_PINS; ++i) {
414         pci_irq_handler(dev, i, 0);
415     }
416 }
417 
pci_msi_trigger(PCIDevice * dev,MSIMessage msg)418 static void pci_msi_trigger(PCIDevice *dev, MSIMessage msg)
419 {
420     MemTxAttrs attrs = {};
421 
422     /*
423      * Xen uses the high bits of the address to contain some of the bits
424      * of the PIRQ#. Therefore we can't just send the write cycle and
425      * trust that it's caught by the APIC at 0xfee00000 because the
426      * target of the write might be e.g. 0x0x1000fee46000 for PIRQ#4166.
427      * So we intercept the delivery here instead of in kvm_send_msi().
428      */
429     if (xen_mode == XEN_EMULATE &&
430         xen_evtchn_deliver_pirq_msi(msg.address, msg.data)) {
431         return;
432     }
433     attrs.requester_id = pci_requester_id(dev);
434     address_space_stl_le(&dev->bus_master_as, msg.address, msg.data,
435                          attrs, NULL);
436 }
437 
438 /*
439  * Register and track a PM capability.  If wmask is also enabled for the power
440  * state field of the pmcsr register, guest writes may change the device PM
441  * state.  BAR access is only enabled while the device is in the D0 state.
442  * Return the capability offset or negative error code.
443  */
pci_pm_init(PCIDevice * d,uint8_t offset,Error ** errp)444 int pci_pm_init(PCIDevice *d, uint8_t offset, Error **errp)
445 {
446     int cap = pci_add_capability(d, PCI_CAP_ID_PM, offset, PCI_PM_SIZEOF, errp);
447 
448     if (cap < 0) {
449         return cap;
450     }
451 
452     d->pm_cap = cap;
453     d->cap_present |= QEMU_PCI_CAP_PM;
454 
455     return cap;
456 }
457 
pci_pm_state(PCIDevice * d)458 static uint8_t pci_pm_state(PCIDevice *d)
459 {
460     uint16_t pmcsr;
461 
462     if (!(d->cap_present & QEMU_PCI_CAP_PM)) {
463         return 0;
464     }
465 
466     pmcsr = pci_get_word(d->config + d->pm_cap + PCI_PM_CTRL);
467 
468     return pmcsr & PCI_PM_CTRL_STATE_MASK;
469 }
470 
471 /*
472  * Update the PM capability state based on the new value stored in config
473  * space respective to the old, pre-write state provided.  If the new value
474  * is rejected (unsupported or invalid transition) restore the old value.
475  * Return the resulting PM state.
476  */
pci_pm_update(PCIDevice * d,uint32_t addr,int l,uint8_t old)477 static uint8_t pci_pm_update(PCIDevice *d, uint32_t addr, int l, uint8_t old)
478 {
479     uint16_t pmc;
480     uint8_t new;
481 
482     if (!(d->cap_present & QEMU_PCI_CAP_PM) ||
483         !range_covers_byte(addr, l, d->pm_cap + PCI_PM_CTRL)) {
484         return old;
485     }
486 
487     new = pci_pm_state(d);
488     if (new == old) {
489         return old;
490     }
491 
492     pmc = pci_get_word(d->config + d->pm_cap + PCI_PM_PMC);
493 
494     /*
495      * Transitions to D1 & D2 are only allowed if supported.  Devices may
496      * only transition to higher D-states or to D0.
497      */
498     if ((!(pmc & PCI_PM_CAP_D1) && new == 1) ||
499         (!(pmc & PCI_PM_CAP_D2) && new == 2) ||
500         (old && new && new < old)) {
501         pci_word_test_and_clear_mask(d->config + d->pm_cap + PCI_PM_CTRL,
502                                      PCI_PM_CTRL_STATE_MASK);
503         pci_word_test_and_set_mask(d->config + d->pm_cap + PCI_PM_CTRL,
504                                    old);
505         trace_pci_pm_bad_transition(d->name, pci_dev_bus_num(d),
506                                     PCI_SLOT(d->devfn), PCI_FUNC(d->devfn),
507                                     old, new);
508         return old;
509     }
510 
511     trace_pci_pm_transition(d->name, pci_dev_bus_num(d), PCI_SLOT(d->devfn),
512                             PCI_FUNC(d->devfn), old, new);
513     return new;
514 }
515 
pci_reset_regions(PCIDevice * dev)516 static void pci_reset_regions(PCIDevice *dev)
517 {
518     int r;
519     if (pci_is_vf(dev)) {
520         return;
521     }
522 
523     for (r = 0; r < PCI_NUM_REGIONS; ++r) {
524         PCIIORegion *region = &dev->io_regions[r];
525         if (!region->size) {
526             continue;
527         }
528 
529         if (!(region->type & PCI_BASE_ADDRESS_SPACE_IO) &&
530             region->type & PCI_BASE_ADDRESS_MEM_TYPE_64) {
531             pci_set_quad(dev->config + pci_bar(dev, r), region->type);
532         } else {
533             pci_set_long(dev->config + pci_bar(dev, r), region->type);
534         }
535     }
536 }
537 
pci_do_device_reset(PCIDevice * dev)538 static void pci_do_device_reset(PCIDevice *dev)
539 {
540     pci_device_deassert_intx(dev);
541     assert(dev->irq_state == 0);
542 
543     /* Clear all writable bits */
544     pci_word_test_and_clear_mask(dev->config + PCI_COMMAND,
545                                  pci_get_word(dev->wmask + PCI_COMMAND) |
546                                  pci_get_word(dev->w1cmask + PCI_COMMAND));
547     pci_word_test_and_clear_mask(dev->config + PCI_STATUS,
548                                  pci_get_word(dev->wmask + PCI_STATUS) |
549                                  pci_get_word(dev->w1cmask + PCI_STATUS));
550     /* Some devices make bits of PCI_INTERRUPT_LINE read only */
551     pci_byte_test_and_clear_mask(dev->config + PCI_INTERRUPT_LINE,
552                               pci_get_word(dev->wmask + PCI_INTERRUPT_LINE) |
553                               pci_get_word(dev->w1cmask + PCI_INTERRUPT_LINE));
554     dev->config[PCI_CACHE_LINE_SIZE] = 0x0;
555     /* Default PM state is D0 */
556     if (dev->cap_present & QEMU_PCI_CAP_PM) {
557         pci_word_test_and_clear_mask(dev->config + dev->pm_cap + PCI_PM_CTRL,
558                                      PCI_PM_CTRL_STATE_MASK);
559     }
560     pci_reset_regions(dev);
561     pci_update_mappings(dev);
562 
563     msi_reset(dev);
564     msix_reset(dev);
565     pcie_sriov_pf_reset(dev);
566 }
567 
568 /*
569  * This function is called on #RST and FLR.
570  * FLR if PCI_EXP_DEVCTL_BCR_FLR is set
571  */
pci_device_reset(PCIDevice * dev)572 void pci_device_reset(PCIDevice *dev)
573 {
574     device_cold_reset(&dev->qdev);
575     pci_do_device_reset(dev);
576 }
577 
578 /*
579  * Trigger pci bus reset under a given bus.
580  * Called via bus_cold_reset on RST# assert, after the devices
581  * have been reset device_cold_reset-ed already.
582  */
pcibus_reset_hold(Object * obj,ResetType type)583 static void pcibus_reset_hold(Object *obj, ResetType type)
584 {
585     PCIBus *bus = PCI_BUS(obj);
586     int i;
587 
588     for (i = 0; i < ARRAY_SIZE(bus->devices); ++i) {
589         if (bus->devices[i]) {
590             pci_do_device_reset(bus->devices[i]);
591         }
592     }
593 
594     for (i = 0; i < bus->nirq; i++) {
595         assert(bus->irq_count[i] == 0);
596     }
597 }
598 
pci_host_bus_register(DeviceState * host)599 static void pci_host_bus_register(DeviceState *host)
600 {
601     PCIHostState *host_bridge = PCI_HOST_BRIDGE(host);
602 
603     QLIST_INSERT_HEAD(&pci_host_bridges, host_bridge, next);
604 }
605 
pci_host_bus_unregister(DeviceState * host)606 static void pci_host_bus_unregister(DeviceState *host)
607 {
608     PCIHostState *host_bridge = PCI_HOST_BRIDGE(host);
609 
610     QLIST_REMOVE(host_bridge, next);
611 }
612 
pci_device_root_bus(const PCIDevice * d)613 PCIBus *pci_device_root_bus(const PCIDevice *d)
614 {
615     PCIBus *bus = pci_get_bus(d);
616 
617     while (!pci_bus_is_root(bus)) {
618         d = bus->parent_dev;
619         assert(d != NULL);
620 
621         bus = pci_get_bus(d);
622     }
623 
624     return bus;
625 }
626 
pci_root_bus_path(PCIDevice * dev)627 const char *pci_root_bus_path(PCIDevice *dev)
628 {
629     PCIBus *rootbus = pci_device_root_bus(dev);
630     PCIHostState *host_bridge = PCI_HOST_BRIDGE(rootbus->qbus.parent);
631     PCIHostBridgeClass *hc = PCI_HOST_BRIDGE_GET_CLASS(host_bridge);
632 
633     assert(host_bridge->bus == rootbus);
634 
635     if (hc->root_bus_path) {
636         return (*hc->root_bus_path)(host_bridge, rootbus);
637     }
638 
639     return rootbus->qbus.name;
640 }
641 
pci_bus_bypass_iommu(PCIBus * bus)642 bool pci_bus_bypass_iommu(PCIBus *bus)
643 {
644     PCIBus *rootbus = bus;
645     PCIHostState *host_bridge;
646 
647     if (!pci_bus_is_root(bus)) {
648         rootbus = pci_device_root_bus(bus->parent_dev);
649     }
650 
651     host_bridge = PCI_HOST_BRIDGE(rootbus->qbus.parent);
652 
653     assert(host_bridge->bus == rootbus);
654 
655     return host_bridge->bypass_iommu;
656 }
657 
pci_root_bus_internal_init(PCIBus * bus,DeviceState * parent,MemoryRegion * mem,MemoryRegion * io,uint8_t devfn_min)658 static void pci_root_bus_internal_init(PCIBus *bus, DeviceState *parent,
659                                        MemoryRegion *mem, MemoryRegion *io,
660                                        uint8_t devfn_min)
661 {
662     assert(PCI_FUNC(devfn_min) == 0);
663     bus->devfn_min = devfn_min;
664     bus->slot_reserved_mask = 0x0;
665     bus->address_space_mem = mem;
666     bus->address_space_io = io;
667     bus->flags |= PCI_BUS_IS_ROOT;
668 
669     /* host bridge */
670     QLIST_INIT(&bus->child);
671 
672     pci_host_bus_register(parent);
673 }
674 
pci_bus_uninit(PCIBus * bus)675 static void pci_bus_uninit(PCIBus *bus)
676 {
677     pci_host_bus_unregister(BUS(bus)->parent);
678 }
679 
pci_bus_is_express(const PCIBus * bus)680 bool pci_bus_is_express(const PCIBus *bus)
681 {
682     return object_dynamic_cast(OBJECT(bus), TYPE_PCIE_BUS);
683 }
684 
pci_root_bus_init(PCIBus * bus,size_t bus_size,DeviceState * parent,const char * name,MemoryRegion * mem,MemoryRegion * io,uint8_t devfn_min,const char * typename)685 void pci_root_bus_init(PCIBus *bus, size_t bus_size, DeviceState *parent,
686                        const char *name,
687                        MemoryRegion *mem, MemoryRegion *io,
688                        uint8_t devfn_min, const char *typename)
689 {
690     qbus_init(bus, bus_size, typename, parent, name);
691     pci_root_bus_internal_init(bus, parent, mem, io, devfn_min);
692 }
693 
pci_root_bus_new(DeviceState * parent,const char * name,MemoryRegion * mem,MemoryRegion * io,uint8_t devfn_min,const char * typename)694 PCIBus *pci_root_bus_new(DeviceState *parent, const char *name,
695                          MemoryRegion *mem, MemoryRegion *io,
696                          uint8_t devfn_min, const char *typename)
697 {
698     PCIBus *bus;
699 
700     bus = PCI_BUS(qbus_new(typename, parent, name));
701     pci_root_bus_internal_init(bus, parent, mem, io, devfn_min);
702     return bus;
703 }
704 
pci_root_bus_cleanup(PCIBus * bus)705 void pci_root_bus_cleanup(PCIBus *bus)
706 {
707     pci_bus_uninit(bus);
708     /* the caller of the unplug hotplug handler will delete this device */
709     qbus_unrealize(BUS(bus));
710 }
711 
pci_bus_irqs(PCIBus * bus,pci_set_irq_fn set_irq,void * irq_opaque,int nirq)712 void pci_bus_irqs(PCIBus *bus, pci_set_irq_fn set_irq,
713                   void *irq_opaque, int nirq)
714 {
715     bus->set_irq = set_irq;
716     bus->irq_opaque = irq_opaque;
717     bus->nirq = nirq;
718     g_free(bus->irq_count);
719     bus->irq_count = g_malloc0(nirq * sizeof(bus->irq_count[0]));
720 }
721 
pci_bus_map_irqs(PCIBus * bus,pci_map_irq_fn map_irq)722 void pci_bus_map_irqs(PCIBus *bus, pci_map_irq_fn map_irq)
723 {
724     bus->map_irq = map_irq;
725 }
726 
pci_bus_irqs_cleanup(PCIBus * bus)727 void pci_bus_irqs_cleanup(PCIBus *bus)
728 {
729     bus->set_irq = NULL;
730     bus->map_irq = NULL;
731     bus->irq_opaque = NULL;
732     bus->nirq = 0;
733     g_free(bus->irq_count);
734     bus->irq_count = NULL;
735 }
736 
pci_register_root_bus(DeviceState * parent,const char * name,pci_set_irq_fn set_irq,pci_map_irq_fn map_irq,void * irq_opaque,MemoryRegion * mem,MemoryRegion * io,uint8_t devfn_min,int nirq,const char * typename)737 PCIBus *pci_register_root_bus(DeviceState *parent, const char *name,
738                               pci_set_irq_fn set_irq, pci_map_irq_fn map_irq,
739                               void *irq_opaque,
740                               MemoryRegion *mem, MemoryRegion *io,
741                               uint8_t devfn_min, int nirq,
742                               const char *typename)
743 {
744     PCIBus *bus;
745 
746     bus = pci_root_bus_new(parent, name, mem, io, devfn_min, typename);
747     pci_bus_irqs(bus, set_irq, irq_opaque, nirq);
748     pci_bus_map_irqs(bus, map_irq);
749     return bus;
750 }
751 
pci_unregister_root_bus(PCIBus * bus)752 void pci_unregister_root_bus(PCIBus *bus)
753 {
754     pci_bus_irqs_cleanup(bus);
755     pci_root_bus_cleanup(bus);
756 }
757 
pci_bus_num(PCIBus * s)758 int pci_bus_num(PCIBus *s)
759 {
760     return PCI_BUS_GET_CLASS(s)->bus_num(s);
761 }
762 
763 /* Returns the min and max bus numbers of a PCI bus hierarchy */
pci_bus_range(PCIBus * bus,int * min_bus,int * max_bus)764 void pci_bus_range(PCIBus *bus, int *min_bus, int *max_bus)
765 {
766     int i;
767     *min_bus = *max_bus = pci_bus_num(bus);
768 
769     for (i = 0; i < ARRAY_SIZE(bus->devices); ++i) {
770         PCIDevice *dev = bus->devices[i];
771 
772         if (dev && IS_PCI_BRIDGE(dev)) {
773             *min_bus = MIN(*min_bus, dev->config[PCI_SECONDARY_BUS]);
774             *max_bus = MAX(*max_bus, dev->config[PCI_SUBORDINATE_BUS]);
775         }
776     }
777 }
778 
pci_bus_numa_node(PCIBus * bus)779 int pci_bus_numa_node(PCIBus *bus)
780 {
781     return PCI_BUS_GET_CLASS(bus)->numa_node(bus);
782 }
783 
get_pci_config_device(QEMUFile * f,void * pv,size_t size,const VMStateField * field)784 static int get_pci_config_device(QEMUFile *f, void *pv, size_t size,
785                                  const VMStateField *field)
786 {
787     PCIDevice *s = container_of(pv, PCIDevice, config);
788     uint8_t *config;
789     int i;
790 
791     assert(size == pci_config_size(s));
792     config = g_malloc(size);
793 
794     qemu_get_buffer(f, config, size);
795     for (i = 0; i < size; ++i) {
796         if ((config[i] ^ s->config[i]) &
797             s->cmask[i] & ~s->wmask[i] & ~s->w1cmask[i]) {
798             error_report("%s: Bad config data: i=0x%x read: %x device: %x "
799                          "cmask: %x wmask: %x w1cmask:%x", __func__,
800                          i, config[i], s->config[i],
801                          s->cmask[i], s->wmask[i], s->w1cmask[i]);
802             g_free(config);
803             return -EINVAL;
804         }
805     }
806     memcpy(s->config, config, size);
807 
808     pci_update_mappings(s);
809     if (IS_PCI_BRIDGE(s)) {
810         pci_bridge_update_mappings(PCI_BRIDGE(s));
811     }
812 
813     pci_set_master(s, pci_get_word(s->config + PCI_COMMAND)
814                       & PCI_COMMAND_MASTER);
815 
816     g_free(config);
817     return 0;
818 }
819 
820 /* just put buffer */
put_pci_config_device(QEMUFile * f,void * pv,size_t size,const VMStateField * field,JSONWriter * vmdesc)821 static int put_pci_config_device(QEMUFile *f, void *pv, size_t size,
822                                  const VMStateField *field, JSONWriter *vmdesc)
823 {
824     const uint8_t **v = pv;
825     assert(size == pci_config_size(container_of(pv, PCIDevice, config)));
826     qemu_put_buffer(f, *v, size);
827 
828     return 0;
829 }
830 
831 static const VMStateInfo vmstate_info_pci_config = {
832     .name = "pci config",
833     .get  = get_pci_config_device,
834     .put  = put_pci_config_device,
835 };
836 
get_pci_irq_state(QEMUFile * f,void * pv,size_t size,const VMStateField * field)837 static int get_pci_irq_state(QEMUFile *f, void *pv, size_t size,
838                              const VMStateField *field)
839 {
840     PCIDevice *s = container_of(pv, PCIDevice, irq_state);
841     uint32_t irq_state[PCI_NUM_PINS];
842     int i;
843     for (i = 0; i < PCI_NUM_PINS; ++i) {
844         irq_state[i] = qemu_get_be32(f);
845         if (irq_state[i] != 0x1 && irq_state[i] != 0) {
846             fprintf(stderr, "irq state %d: must be 0 or 1.\n",
847                     irq_state[i]);
848             return -EINVAL;
849         }
850     }
851 
852     for (i = 0; i < PCI_NUM_PINS; ++i) {
853         pci_set_irq_state(s, i, irq_state[i]);
854     }
855 
856     return 0;
857 }
858 
put_pci_irq_state(QEMUFile * f,void * pv,size_t size,const VMStateField * field,JSONWriter * vmdesc)859 static int put_pci_irq_state(QEMUFile *f, void *pv, size_t size,
860                              const VMStateField *field, JSONWriter *vmdesc)
861 {
862     int i;
863     PCIDevice *s = container_of(pv, PCIDevice, irq_state);
864 
865     for (i = 0; i < PCI_NUM_PINS; ++i) {
866         qemu_put_be32(f, pci_irq_state(s, i));
867     }
868 
869     return 0;
870 }
871 
872 static const VMStateInfo vmstate_info_pci_irq_state = {
873     .name = "pci irq state",
874     .get  = get_pci_irq_state,
875     .put  = put_pci_irq_state,
876 };
877 
migrate_is_pcie(void * opaque,int version_id)878 static bool migrate_is_pcie(void *opaque, int version_id)
879 {
880     return pci_is_express((PCIDevice *)opaque);
881 }
882 
migrate_is_not_pcie(void * opaque,int version_id)883 static bool migrate_is_not_pcie(void *opaque, int version_id)
884 {
885     return !pci_is_express((PCIDevice *)opaque);
886 }
887 
pci_post_load(void * opaque,int version_id)888 static int pci_post_load(void *opaque, int version_id)
889 {
890     pcie_sriov_pf_post_load(opaque);
891     return 0;
892 }
893 
894 const VMStateDescription vmstate_pci_device = {
895     .name = "PCIDevice",
896     .version_id = 2,
897     .minimum_version_id = 1,
898     .post_load = pci_post_load,
899     .fields = (const VMStateField[]) {
900         VMSTATE_INT32_POSITIVE_LE(version_id, PCIDevice),
901         VMSTATE_BUFFER_UNSAFE_INFO_TEST(config, PCIDevice,
902                                    migrate_is_not_pcie,
903                                    0, vmstate_info_pci_config,
904                                    PCI_CONFIG_SPACE_SIZE),
905         VMSTATE_BUFFER_UNSAFE_INFO_TEST(config, PCIDevice,
906                                    migrate_is_pcie,
907                                    0, vmstate_info_pci_config,
908                                    PCIE_CONFIG_SPACE_SIZE),
909         VMSTATE_BUFFER_UNSAFE_INFO(irq_state, PCIDevice, 2,
910                                    vmstate_info_pci_irq_state,
911                                    PCI_NUM_PINS * sizeof(int32_t)),
912         VMSTATE_END_OF_LIST()
913     }
914 };
915 
916 
pci_device_save(PCIDevice * s,QEMUFile * f)917 void pci_device_save(PCIDevice *s, QEMUFile *f)
918 {
919     /* Clear interrupt status bit: it is implicit
920      * in irq_state which we are saving.
921      * This makes us compatible with old devices
922      * which never set or clear this bit. */
923     s->config[PCI_STATUS] &= ~PCI_STATUS_INTERRUPT;
924     vmstate_save_state(f, &vmstate_pci_device, s, NULL);
925     /* Restore the interrupt status bit. */
926     pci_update_irq_status(s);
927 }
928 
pci_device_load(PCIDevice * s,QEMUFile * f)929 int pci_device_load(PCIDevice *s, QEMUFile *f)
930 {
931     int ret;
932     ret = vmstate_load_state(f, &vmstate_pci_device, s, s->version_id);
933     /* Restore the interrupt status bit. */
934     pci_update_irq_status(s);
935     return ret;
936 }
937 
pci_set_default_subsystem_id(PCIDevice * pci_dev)938 static void pci_set_default_subsystem_id(PCIDevice *pci_dev)
939 {
940     pci_set_word(pci_dev->config + PCI_SUBSYSTEM_VENDOR_ID,
941                  pci_default_sub_vendor_id);
942     pci_set_word(pci_dev->config + PCI_SUBSYSTEM_ID,
943                  pci_default_sub_device_id);
944 }
945 
946 /*
947  * Parse [[<domain>:]<bus>:]<slot>, return -1 on error if funcp == NULL
948  *       [[<domain>:]<bus>:]<slot>.<func>, return -1 on error
949  */
pci_parse_devaddr(const char * addr,int * domp,int * busp,unsigned int * slotp,unsigned int * funcp)950 static int pci_parse_devaddr(const char *addr, int *domp, int *busp,
951                              unsigned int *slotp, unsigned int *funcp)
952 {
953     const char *p;
954     char *e;
955     unsigned long val;
956     unsigned long dom = 0, bus = 0;
957     unsigned int slot = 0;
958     unsigned int func = 0;
959 
960     p = addr;
961     val = strtoul(p, &e, 16);
962     if (e == p)
963         return -1;
964     if (*e == ':') {
965         bus = val;
966         p = e + 1;
967         val = strtoul(p, &e, 16);
968         if (e == p)
969             return -1;
970         if (*e == ':') {
971             dom = bus;
972             bus = val;
973             p = e + 1;
974             val = strtoul(p, &e, 16);
975             if (e == p)
976                 return -1;
977         }
978     }
979 
980     slot = val;
981 
982     if (funcp != NULL) {
983         if (*e != '.')
984             return -1;
985 
986         p = e + 1;
987         val = strtoul(p, &e, 16);
988         if (e == p)
989             return -1;
990 
991         func = val;
992     }
993 
994     /* if funcp == NULL func is 0 */
995     if (dom > 0xffff || bus > 0xff || slot > 0x1f || func > 7)
996         return -1;
997 
998     if (*e)
999         return -1;
1000 
1001     *domp = dom;
1002     *busp = bus;
1003     *slotp = slot;
1004     if (funcp != NULL)
1005         *funcp = func;
1006     return 0;
1007 }
1008 
pci_init_cmask(PCIDevice * dev)1009 static void pci_init_cmask(PCIDevice *dev)
1010 {
1011     pci_set_word(dev->cmask + PCI_VENDOR_ID, 0xffff);
1012     pci_set_word(dev->cmask + PCI_DEVICE_ID, 0xffff);
1013     dev->cmask[PCI_STATUS] = PCI_STATUS_CAP_LIST;
1014     dev->cmask[PCI_REVISION_ID] = 0xff;
1015     dev->cmask[PCI_CLASS_PROG] = 0xff;
1016     pci_set_word(dev->cmask + PCI_CLASS_DEVICE, 0xffff);
1017     dev->cmask[PCI_HEADER_TYPE] = 0xff;
1018     dev->cmask[PCI_CAPABILITY_LIST] = 0xff;
1019 }
1020 
pci_init_wmask(PCIDevice * dev)1021 static void pci_init_wmask(PCIDevice *dev)
1022 {
1023     int config_size = pci_config_size(dev);
1024 
1025     dev->wmask[PCI_CACHE_LINE_SIZE] = 0xff;
1026     dev->wmask[PCI_INTERRUPT_LINE] = 0xff;
1027     pci_set_word(dev->wmask + PCI_COMMAND,
1028                  PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER |
1029                  PCI_COMMAND_INTX_DISABLE);
1030     pci_word_test_and_set_mask(dev->wmask + PCI_COMMAND, PCI_COMMAND_SERR);
1031 
1032     memset(dev->wmask + PCI_CONFIG_HEADER_SIZE, 0xff,
1033            config_size - PCI_CONFIG_HEADER_SIZE);
1034 }
1035 
pci_init_w1cmask(PCIDevice * dev)1036 static void pci_init_w1cmask(PCIDevice *dev)
1037 {
1038     /*
1039      * Note: It's okay to set w1cmask even for readonly bits as
1040      * long as their value is hardwired to 0.
1041      */
1042     pci_set_word(dev->w1cmask + PCI_STATUS,
1043                  PCI_STATUS_PARITY | PCI_STATUS_SIG_TARGET_ABORT |
1044                  PCI_STATUS_REC_TARGET_ABORT | PCI_STATUS_REC_MASTER_ABORT |
1045                  PCI_STATUS_SIG_SYSTEM_ERROR | PCI_STATUS_DETECTED_PARITY);
1046 }
1047 
pci_init_mask_bridge(PCIDevice * d)1048 static void pci_init_mask_bridge(PCIDevice *d)
1049 {
1050     /* PCI_PRIMARY_BUS, PCI_SECONDARY_BUS, PCI_SUBORDINATE_BUS and
1051        PCI_SEC_LATENCY_TIMER */
1052     memset(d->wmask + PCI_PRIMARY_BUS, 0xff, 4);
1053 
1054     /* base and limit */
1055     d->wmask[PCI_IO_BASE] = PCI_IO_RANGE_MASK & 0xff;
1056     d->wmask[PCI_IO_LIMIT] = PCI_IO_RANGE_MASK & 0xff;
1057     pci_set_word(d->wmask + PCI_MEMORY_BASE,
1058                  PCI_MEMORY_RANGE_MASK & 0xffff);
1059     pci_set_word(d->wmask + PCI_MEMORY_LIMIT,
1060                  PCI_MEMORY_RANGE_MASK & 0xffff);
1061     pci_set_word(d->wmask + PCI_PREF_MEMORY_BASE,
1062                  PCI_PREF_RANGE_MASK & 0xffff);
1063     pci_set_word(d->wmask + PCI_PREF_MEMORY_LIMIT,
1064                  PCI_PREF_RANGE_MASK & 0xffff);
1065 
1066     /* PCI_PREF_BASE_UPPER32 and PCI_PREF_LIMIT_UPPER32 */
1067     memset(d->wmask + PCI_PREF_BASE_UPPER32, 0xff, 8);
1068 
1069     /* Supported memory and i/o types */
1070     d->config[PCI_IO_BASE] |= PCI_IO_RANGE_TYPE_16;
1071     d->config[PCI_IO_LIMIT] |= PCI_IO_RANGE_TYPE_16;
1072     pci_word_test_and_set_mask(d->config + PCI_PREF_MEMORY_BASE,
1073                                PCI_PREF_RANGE_TYPE_64);
1074     pci_word_test_and_set_mask(d->config + PCI_PREF_MEMORY_LIMIT,
1075                                PCI_PREF_RANGE_TYPE_64);
1076 
1077     /*
1078      * TODO: Bridges default to 10-bit VGA decoding but we currently only
1079      * implement 16-bit decoding (no alias support).
1080      */
1081     pci_set_word(d->wmask + PCI_BRIDGE_CONTROL,
1082                  PCI_BRIDGE_CTL_PARITY |
1083                  PCI_BRIDGE_CTL_SERR |
1084                  PCI_BRIDGE_CTL_ISA |
1085                  PCI_BRIDGE_CTL_VGA |
1086                  PCI_BRIDGE_CTL_VGA_16BIT |
1087                  PCI_BRIDGE_CTL_MASTER_ABORT |
1088                  PCI_BRIDGE_CTL_BUS_RESET |
1089                  PCI_BRIDGE_CTL_FAST_BACK |
1090                  PCI_BRIDGE_CTL_DISCARD |
1091                  PCI_BRIDGE_CTL_SEC_DISCARD |
1092                  PCI_BRIDGE_CTL_DISCARD_SERR);
1093     /* Below does not do anything as we never set this bit, put here for
1094      * completeness. */
1095     pci_set_word(d->w1cmask + PCI_BRIDGE_CONTROL,
1096                  PCI_BRIDGE_CTL_DISCARD_STATUS);
1097     d->cmask[PCI_IO_BASE] |= PCI_IO_RANGE_TYPE_MASK;
1098     d->cmask[PCI_IO_LIMIT] |= PCI_IO_RANGE_TYPE_MASK;
1099     pci_word_test_and_set_mask(d->cmask + PCI_PREF_MEMORY_BASE,
1100                                PCI_PREF_RANGE_TYPE_MASK);
1101     pci_word_test_and_set_mask(d->cmask + PCI_PREF_MEMORY_LIMIT,
1102                                PCI_PREF_RANGE_TYPE_MASK);
1103 }
1104 
pci_init_multifunction(PCIBus * bus,PCIDevice * dev,Error ** errp)1105 static void pci_init_multifunction(PCIBus *bus, PCIDevice *dev, Error **errp)
1106 {
1107     uint8_t slot = PCI_SLOT(dev->devfn);
1108     uint8_t func;
1109 
1110     if (dev->cap_present & QEMU_PCI_CAP_MULTIFUNCTION) {
1111         dev->config[PCI_HEADER_TYPE] |= PCI_HEADER_TYPE_MULTI_FUNCTION;
1112     }
1113 
1114     /* SR/IOV is not handled here. */
1115     if (pci_is_vf(dev)) {
1116         return;
1117     }
1118 
1119     /*
1120      * multifunction bit is interpreted in two ways as follows.
1121      *   - all functions must set the bit to 1.
1122      *     Example: Intel X53
1123      *   - function 0 must set the bit, but the rest function (> 0)
1124      *     is allowed to leave the bit to 0.
1125      *     Example: PIIX3(also in qemu), PIIX4(also in qemu), ICH10,
1126      *
1127      * So OS (at least Linux) checks the bit of only function 0,
1128      * and doesn't see the bit of function > 0.
1129      *
1130      * The below check allows both interpretation.
1131      */
1132     if (PCI_FUNC(dev->devfn)) {
1133         PCIDevice *f0 = bus->devices[PCI_DEVFN(slot, 0)];
1134         if (f0 && !(f0->cap_present & QEMU_PCI_CAP_MULTIFUNCTION)) {
1135             /* function 0 should set multifunction bit */
1136             error_setg(errp, "PCI: single function device can't be populated "
1137                        "in function %x.%x", slot, PCI_FUNC(dev->devfn));
1138             return;
1139         }
1140         return;
1141     }
1142 
1143     if (dev->cap_present & QEMU_PCI_CAP_MULTIFUNCTION) {
1144         return;
1145     }
1146     /* function 0 indicates single function, so function > 0 must be NULL */
1147     for (func = 1; func < PCI_FUNC_MAX; ++func) {
1148         PCIDevice *device = bus->devices[PCI_DEVFN(slot, func)];
1149         if (device && !pci_is_vf(device)) {
1150             error_setg(errp, "PCI: %x.0 indicates single function, "
1151                        "but %x.%x is already populated.",
1152                        slot, slot, func);
1153             return;
1154         }
1155     }
1156 }
1157 
pci_config_alloc(PCIDevice * pci_dev)1158 static void pci_config_alloc(PCIDevice *pci_dev)
1159 {
1160     int config_size = pci_config_size(pci_dev);
1161 
1162     pci_dev->config = g_malloc0(config_size);
1163     pci_dev->cmask = g_malloc0(config_size);
1164     pci_dev->wmask = g_malloc0(config_size);
1165     pci_dev->w1cmask = g_malloc0(config_size);
1166     pci_dev->used = g_malloc0(config_size);
1167 }
1168 
pci_config_free(PCIDevice * pci_dev)1169 static void pci_config_free(PCIDevice *pci_dev)
1170 {
1171     g_free(pci_dev->config);
1172     g_free(pci_dev->cmask);
1173     g_free(pci_dev->wmask);
1174     g_free(pci_dev->w1cmask);
1175     g_free(pci_dev->used);
1176 }
1177 
do_pci_unregister_device(PCIDevice * pci_dev)1178 static void do_pci_unregister_device(PCIDevice *pci_dev)
1179 {
1180     pci_get_bus(pci_dev)->devices[pci_dev->devfn] = NULL;
1181     pci_config_free(pci_dev);
1182 
1183     if (xen_mode == XEN_EMULATE) {
1184         xen_evtchn_remove_pci_device(pci_dev);
1185     }
1186     if (memory_region_is_mapped(&pci_dev->bus_master_enable_region)) {
1187         memory_region_del_subregion(&pci_dev->bus_master_container_region,
1188                                     &pci_dev->bus_master_enable_region);
1189     }
1190     address_space_destroy(&pci_dev->bus_master_as);
1191 }
1192 
1193 /* Extract PCIReqIDCache into BDF format */
pci_req_id_cache_extract(PCIReqIDCache * cache)1194 static uint16_t pci_req_id_cache_extract(PCIReqIDCache *cache)
1195 {
1196     uint8_t bus_n;
1197     uint16_t result;
1198 
1199     switch (cache->type) {
1200     case PCI_REQ_ID_BDF:
1201         result = pci_get_bdf(cache->dev);
1202         break;
1203     case PCI_REQ_ID_SECONDARY_BUS:
1204         bus_n = pci_dev_bus_num(cache->dev);
1205         result = PCI_BUILD_BDF(bus_n, 0);
1206         break;
1207     default:
1208         error_report("Invalid PCI requester ID cache type: %d",
1209                      cache->type);
1210         exit(1);
1211         break;
1212     }
1213 
1214     return result;
1215 }
1216 
1217 /* Parse bridges up to the root complex and return requester ID
1218  * cache for specific device.  For full PCIe topology, the cache
1219  * result would be exactly the same as getting BDF of the device.
1220  * However, several tricks are required when system mixed up with
1221  * legacy PCI devices and PCIe-to-PCI bridges.
1222  *
1223  * Here we cache the proxy device (and type) not requester ID since
1224  * bus number might change from time to time.
1225  */
pci_req_id_cache_get(PCIDevice * dev)1226 static PCIReqIDCache pci_req_id_cache_get(PCIDevice *dev)
1227 {
1228     PCIDevice *parent;
1229     PCIReqIDCache cache = {
1230         .dev = dev,
1231         .type = PCI_REQ_ID_BDF,
1232     };
1233 
1234     while (!pci_bus_is_root(pci_get_bus(dev))) {
1235         /* We are under PCI/PCIe bridges */
1236         parent = pci_get_bus(dev)->parent_dev;
1237         if (pci_is_express(parent)) {
1238             if (pcie_cap_get_type(parent) == PCI_EXP_TYPE_PCI_BRIDGE) {
1239                 /* When we pass through PCIe-to-PCI/PCIX bridges, we
1240                  * override the requester ID using secondary bus
1241                  * number of parent bridge with zeroed devfn
1242                  * (pcie-to-pci bridge spec chap 2.3). */
1243                 cache.type = PCI_REQ_ID_SECONDARY_BUS;
1244                 cache.dev = dev;
1245             }
1246         } else {
1247             /* Legacy PCI, override requester ID with the bridge's
1248              * BDF upstream.  When the root complex connects to
1249              * legacy PCI devices (including buses), it can only
1250              * obtain requester ID info from directly attached
1251              * devices.  If devices are attached under bridges, only
1252              * the requester ID of the bridge that is directly
1253              * attached to the root complex can be recognized. */
1254             cache.type = PCI_REQ_ID_BDF;
1255             cache.dev = parent;
1256         }
1257         dev = parent;
1258     }
1259 
1260     return cache;
1261 }
1262 
pci_requester_id(PCIDevice * dev)1263 uint16_t pci_requester_id(PCIDevice *dev)
1264 {
1265     return pci_req_id_cache_extract(&dev->requester_id_cache);
1266 }
1267 
pci_bus_devfn_available(PCIBus * bus,int devfn)1268 static bool pci_bus_devfn_available(PCIBus *bus, int devfn)
1269 {
1270     return !(bus->devices[devfn]);
1271 }
1272 
pci_bus_devfn_reserved(PCIBus * bus,int devfn)1273 static bool pci_bus_devfn_reserved(PCIBus *bus, int devfn)
1274 {
1275     return bus->slot_reserved_mask & (1UL << PCI_SLOT(devfn));
1276 }
1277 
pci_bus_get_slot_reserved_mask(PCIBus * bus)1278 uint32_t pci_bus_get_slot_reserved_mask(PCIBus *bus)
1279 {
1280     return bus->slot_reserved_mask;
1281 }
1282 
pci_bus_set_slot_reserved_mask(PCIBus * bus,uint32_t mask)1283 void pci_bus_set_slot_reserved_mask(PCIBus *bus, uint32_t mask)
1284 {
1285     bus->slot_reserved_mask |= mask;
1286 }
1287 
pci_bus_clear_slot_reserved_mask(PCIBus * bus,uint32_t mask)1288 void pci_bus_clear_slot_reserved_mask(PCIBus *bus, uint32_t mask)
1289 {
1290     bus->slot_reserved_mask &= ~mask;
1291 }
1292 
1293 /* -1 for devfn means auto assign */
do_pci_register_device(PCIDevice * pci_dev,const char * name,int devfn,Error ** errp)1294 static PCIDevice *do_pci_register_device(PCIDevice *pci_dev,
1295                                          const char *name, int devfn,
1296                                          Error **errp)
1297 {
1298     PCIDeviceClass *pc = PCI_DEVICE_GET_CLASS(pci_dev);
1299     PCIConfigReadFunc *config_read = pc->config_read;
1300     PCIConfigWriteFunc *config_write = pc->config_write;
1301     Error *local_err = NULL;
1302     DeviceState *dev = DEVICE(pci_dev);
1303     PCIBus *bus = pci_get_bus(pci_dev);
1304     bool is_bridge = IS_PCI_BRIDGE(pci_dev);
1305 
1306     /* Only pci bridges can be attached to extra PCI root buses */
1307     if (pci_bus_is_root(bus) && bus->parent_dev && !is_bridge) {
1308         error_setg(errp,
1309                    "PCI: Only PCI/PCIe bridges can be plugged into %s",
1310                     bus->parent_dev->name);
1311         return NULL;
1312     }
1313 
1314     if (devfn < 0) {
1315         for(devfn = bus->devfn_min ; devfn < ARRAY_SIZE(bus->devices);
1316             devfn += PCI_FUNC_MAX) {
1317             if (pci_bus_devfn_available(bus, devfn) &&
1318                    !pci_bus_devfn_reserved(bus, devfn)) {
1319                 goto found;
1320             }
1321         }
1322         error_setg(errp, "PCI: no slot/function available for %s, all in use "
1323                    "or reserved", name);
1324         return NULL;
1325     found: ;
1326     } else if (pci_bus_devfn_reserved(bus, devfn)) {
1327         error_setg(errp, "PCI: slot %d function %d not available for %s,"
1328                    " reserved",
1329                    PCI_SLOT(devfn), PCI_FUNC(devfn), name);
1330         return NULL;
1331     } else if (!pci_bus_devfn_available(bus, devfn)) {
1332         error_setg(errp, "PCI: slot %d function %d not available for %s,"
1333                    " in use by %s,id=%s",
1334                    PCI_SLOT(devfn), PCI_FUNC(devfn), name,
1335                    bus->devices[devfn]->name, bus->devices[devfn]->qdev.id);
1336         return NULL;
1337     }
1338 
1339     /*
1340      * Populating function 0 triggers a scan from the guest that
1341      * exposes other non-zero functions. Hence we need to ensure that
1342      * function 0 wasn't added yet.
1343      */
1344     if (dev->hotplugged && !pci_is_vf(pci_dev) &&
1345         pci_get_function_0(pci_dev)) {
1346         error_setg(errp, "PCI: slot %d function 0 already occupied by %s,"
1347                    " new func %s cannot be exposed to guest.",
1348                    PCI_SLOT(pci_get_function_0(pci_dev)->devfn),
1349                    pci_get_function_0(pci_dev)->name,
1350                    name);
1351 
1352        return NULL;
1353     }
1354 
1355     pci_dev->devfn = devfn;
1356     pci_dev->requester_id_cache = pci_req_id_cache_get(pci_dev);
1357     pstrcpy(pci_dev->name, sizeof(pci_dev->name), name);
1358 
1359     memory_region_init(&pci_dev->bus_master_container_region, OBJECT(pci_dev),
1360                        "bus master container", UINT64_MAX);
1361     address_space_init(&pci_dev->bus_master_as,
1362                        &pci_dev->bus_master_container_region, pci_dev->name);
1363     pci_dev->bus_master_as.max_bounce_buffer_size =
1364         pci_dev->max_bounce_buffer_size;
1365 
1366     if (phase_check(PHASE_MACHINE_READY)) {
1367         pci_init_bus_master(pci_dev);
1368     }
1369     pci_dev->irq_state = 0;
1370     pci_config_alloc(pci_dev);
1371 
1372     pci_config_set_vendor_id(pci_dev->config, pc->vendor_id);
1373     pci_config_set_device_id(pci_dev->config, pc->device_id);
1374     pci_config_set_revision(pci_dev->config, pc->revision);
1375     pci_config_set_class(pci_dev->config, pc->class_id);
1376 
1377     if (!is_bridge) {
1378         if (pc->subsystem_vendor_id || pc->subsystem_id) {
1379             pci_set_word(pci_dev->config + PCI_SUBSYSTEM_VENDOR_ID,
1380                          pc->subsystem_vendor_id);
1381             pci_set_word(pci_dev->config + PCI_SUBSYSTEM_ID,
1382                          pc->subsystem_id);
1383         } else {
1384             pci_set_default_subsystem_id(pci_dev);
1385         }
1386     } else {
1387         /* subsystem_vendor_id/subsystem_id are only for header type 0 */
1388         assert(!pc->subsystem_vendor_id);
1389         assert(!pc->subsystem_id);
1390     }
1391     pci_init_cmask(pci_dev);
1392     pci_init_wmask(pci_dev);
1393     pci_init_w1cmask(pci_dev);
1394     if (is_bridge) {
1395         pci_init_mask_bridge(pci_dev);
1396     }
1397     pci_init_multifunction(bus, pci_dev, &local_err);
1398     if (local_err) {
1399         error_propagate(errp, local_err);
1400         do_pci_unregister_device(pci_dev);
1401         return NULL;
1402     }
1403 
1404     if (!config_read)
1405         config_read = pci_default_read_config;
1406     if (!config_write)
1407         config_write = pci_default_write_config;
1408     pci_dev->config_read = config_read;
1409     pci_dev->config_write = config_write;
1410     bus->devices[devfn] = pci_dev;
1411     pci_dev->version_id = 2; /* Current pci device vmstate version */
1412     return pci_dev;
1413 }
1414 
pci_unregister_io_regions(PCIDevice * pci_dev)1415 static void pci_unregister_io_regions(PCIDevice *pci_dev)
1416 {
1417     PCIIORegion *r;
1418     int i;
1419 
1420     for(i = 0; i < PCI_NUM_REGIONS; i++) {
1421         r = &pci_dev->io_regions[i];
1422         if (!r->size || r->addr == PCI_BAR_UNMAPPED)
1423             continue;
1424         memory_region_del_subregion(r->address_space, r->memory);
1425     }
1426 
1427     pci_unregister_vga(pci_dev);
1428 }
1429 
pci_qdev_unrealize(DeviceState * dev)1430 static void pci_qdev_unrealize(DeviceState *dev)
1431 {
1432     PCIDevice *pci_dev = PCI_DEVICE(dev);
1433     PCIDeviceClass *pc = PCI_DEVICE_GET_CLASS(pci_dev);
1434 
1435     pci_unregister_io_regions(pci_dev);
1436     pci_del_option_rom(pci_dev);
1437     pcie_sriov_unregister_device(pci_dev);
1438 
1439     if (pc->exit) {
1440         pc->exit(pci_dev);
1441     }
1442 
1443     pci_device_deassert_intx(pci_dev);
1444     do_pci_unregister_device(pci_dev);
1445 
1446     pci_dev->msi_trigger = NULL;
1447 
1448     /*
1449      * clean up acpi-index so it could reused by another device
1450      */
1451     if (pci_dev->acpi_index) {
1452         GSequence *used_indexes = pci_acpi_index_list();
1453 
1454         g_sequence_remove(g_sequence_lookup(used_indexes,
1455                           GINT_TO_POINTER(pci_dev->acpi_index),
1456                           g_cmp_uint32, NULL));
1457     }
1458 }
1459 
pci_register_bar(PCIDevice * pci_dev,int region_num,uint8_t type,MemoryRegion * memory)1460 void pci_register_bar(PCIDevice *pci_dev, int region_num,
1461                       uint8_t type, MemoryRegion *memory)
1462 {
1463     PCIIORegion *r;
1464     uint32_t addr; /* offset in pci config space */
1465     uint64_t wmask;
1466     pcibus_t size = memory_region_size(memory);
1467     uint8_t hdr_type;
1468 
1469     assert(region_num >= 0);
1470     assert(region_num < PCI_NUM_REGIONS);
1471     assert(is_power_of_2(size));
1472 
1473     /* A PCI bridge device (with Type 1 header) may only have at most 2 BARs */
1474     hdr_type =
1475         pci_dev->config[PCI_HEADER_TYPE] & ~PCI_HEADER_TYPE_MULTI_FUNCTION;
1476     assert(hdr_type != PCI_HEADER_TYPE_BRIDGE || region_num < 2);
1477 
1478     r = &pci_dev->io_regions[region_num];
1479     assert(!r->size);
1480     r->size = size;
1481     r->type = type;
1482     r->memory = memory;
1483     r->address_space = type & PCI_BASE_ADDRESS_SPACE_IO
1484                         ? pci_get_bus(pci_dev)->address_space_io
1485                         : pci_get_bus(pci_dev)->address_space_mem;
1486 
1487     if (pci_is_vf(pci_dev)) {
1488         PCIDevice *pf = pci_dev->exp.sriov_vf.pf;
1489         assert(!pf || type == pf->exp.sriov_pf.vf_bar_type[region_num]);
1490 
1491         r->addr = pci_bar_address(pci_dev, region_num, r->type, r->size);
1492         if (r->addr != PCI_BAR_UNMAPPED) {
1493             memory_region_add_subregion_overlap(r->address_space,
1494                                                 r->addr, r->memory, 1);
1495         }
1496     } else {
1497         r->addr = PCI_BAR_UNMAPPED;
1498 
1499         wmask = ~(size - 1);
1500         if (region_num == PCI_ROM_SLOT) {
1501             /* ROM enable bit is writable */
1502             wmask |= PCI_ROM_ADDRESS_ENABLE;
1503         }
1504 
1505         addr = pci_bar(pci_dev, region_num);
1506         pci_set_long(pci_dev->config + addr, type);
1507 
1508         if (!(r->type & PCI_BASE_ADDRESS_SPACE_IO) &&
1509             r->type & PCI_BASE_ADDRESS_MEM_TYPE_64) {
1510             pci_set_quad(pci_dev->wmask + addr, wmask);
1511             pci_set_quad(pci_dev->cmask + addr, ~0ULL);
1512         } else {
1513             pci_set_long(pci_dev->wmask + addr, wmask & 0xffffffff);
1514             pci_set_long(pci_dev->cmask + addr, 0xffffffff);
1515         }
1516     }
1517 }
1518 
pci_update_vga(PCIDevice * pci_dev)1519 static void pci_update_vga(PCIDevice *pci_dev)
1520 {
1521     uint16_t cmd;
1522 
1523     if (!pci_dev->has_vga) {
1524         return;
1525     }
1526 
1527     cmd = pci_get_word(pci_dev->config + PCI_COMMAND);
1528 
1529     memory_region_set_enabled(pci_dev->vga_regions[QEMU_PCI_VGA_MEM],
1530                               cmd & PCI_COMMAND_MEMORY);
1531     memory_region_set_enabled(pci_dev->vga_regions[QEMU_PCI_VGA_IO_LO],
1532                               cmd & PCI_COMMAND_IO);
1533     memory_region_set_enabled(pci_dev->vga_regions[QEMU_PCI_VGA_IO_HI],
1534                               cmd & PCI_COMMAND_IO);
1535 }
1536 
pci_register_vga(PCIDevice * pci_dev,MemoryRegion * mem,MemoryRegion * io_lo,MemoryRegion * io_hi)1537 void pci_register_vga(PCIDevice *pci_dev, MemoryRegion *mem,
1538                       MemoryRegion *io_lo, MemoryRegion *io_hi)
1539 {
1540     PCIBus *bus = pci_get_bus(pci_dev);
1541 
1542     assert(!pci_dev->has_vga);
1543 
1544     assert(memory_region_size(mem) == QEMU_PCI_VGA_MEM_SIZE);
1545     pci_dev->vga_regions[QEMU_PCI_VGA_MEM] = mem;
1546     memory_region_add_subregion_overlap(bus->address_space_mem,
1547                                         QEMU_PCI_VGA_MEM_BASE, mem, 1);
1548 
1549     assert(memory_region_size(io_lo) == QEMU_PCI_VGA_IO_LO_SIZE);
1550     pci_dev->vga_regions[QEMU_PCI_VGA_IO_LO] = io_lo;
1551     memory_region_add_subregion_overlap(bus->address_space_io,
1552                                         QEMU_PCI_VGA_IO_LO_BASE, io_lo, 1);
1553 
1554     assert(memory_region_size(io_hi) == QEMU_PCI_VGA_IO_HI_SIZE);
1555     pci_dev->vga_regions[QEMU_PCI_VGA_IO_HI] = io_hi;
1556     memory_region_add_subregion_overlap(bus->address_space_io,
1557                                         QEMU_PCI_VGA_IO_HI_BASE, io_hi, 1);
1558     pci_dev->has_vga = true;
1559 
1560     pci_update_vga(pci_dev);
1561 }
1562 
pci_unregister_vga(PCIDevice * pci_dev)1563 void pci_unregister_vga(PCIDevice *pci_dev)
1564 {
1565     PCIBus *bus = pci_get_bus(pci_dev);
1566 
1567     if (!pci_dev->has_vga) {
1568         return;
1569     }
1570 
1571     memory_region_del_subregion(bus->address_space_mem,
1572                                 pci_dev->vga_regions[QEMU_PCI_VGA_MEM]);
1573     memory_region_del_subregion(bus->address_space_io,
1574                                 pci_dev->vga_regions[QEMU_PCI_VGA_IO_LO]);
1575     memory_region_del_subregion(bus->address_space_io,
1576                                 pci_dev->vga_regions[QEMU_PCI_VGA_IO_HI]);
1577     pci_dev->has_vga = false;
1578 }
1579 
pci_get_bar_addr(PCIDevice * pci_dev,int region_num)1580 pcibus_t pci_get_bar_addr(PCIDevice *pci_dev, int region_num)
1581 {
1582     return pci_dev->io_regions[region_num].addr;
1583 }
1584 
pci_config_get_bar_addr(PCIDevice * d,int reg,uint8_t type,pcibus_t size)1585 static pcibus_t pci_config_get_bar_addr(PCIDevice *d, int reg,
1586                                         uint8_t type, pcibus_t size)
1587 {
1588     pcibus_t new_addr;
1589     if (!pci_is_vf(d)) {
1590         int bar = pci_bar(d, reg);
1591         if (type & PCI_BASE_ADDRESS_MEM_TYPE_64) {
1592             new_addr = pci_get_quad(d->config + bar);
1593         } else {
1594             new_addr = pci_get_long(d->config + bar);
1595         }
1596     } else {
1597         PCIDevice *pf = d->exp.sriov_vf.pf;
1598         uint16_t sriov_cap = pf->exp.sriov_cap;
1599         int bar = sriov_cap + PCI_SRIOV_BAR + reg * 4;
1600         uint16_t vf_offset =
1601             pci_get_word(pf->config + sriov_cap + PCI_SRIOV_VF_OFFSET);
1602         uint16_t vf_stride =
1603             pci_get_word(pf->config + sriov_cap + PCI_SRIOV_VF_STRIDE);
1604         uint32_t vf_num = d->devfn - (pf->devfn + vf_offset);
1605 
1606         if (vf_num) {
1607             vf_num /= vf_stride;
1608         }
1609 
1610         if (type & PCI_BASE_ADDRESS_MEM_TYPE_64) {
1611             new_addr = pci_get_quad(pf->config + bar);
1612         } else {
1613             new_addr = pci_get_long(pf->config + bar);
1614         }
1615         new_addr += vf_num * size;
1616     }
1617     /* The ROM slot has a specific enable bit, keep it intact */
1618     if (reg != PCI_ROM_SLOT) {
1619         new_addr &= ~(size - 1);
1620     }
1621     return new_addr;
1622 }
1623 
pci_bar_address(PCIDevice * d,int reg,uint8_t type,pcibus_t size)1624 pcibus_t pci_bar_address(PCIDevice *d,
1625                          int reg, uint8_t type, pcibus_t size)
1626 {
1627     pcibus_t new_addr, last_addr;
1628     uint16_t cmd = pci_get_word(d->config + PCI_COMMAND);
1629     MachineClass *mc = MACHINE_GET_CLASS(qdev_get_machine());
1630     bool allow_0_address = mc->pci_allow_0_address;
1631 
1632     if (type & PCI_BASE_ADDRESS_SPACE_IO) {
1633         if (!(cmd & PCI_COMMAND_IO)) {
1634             return PCI_BAR_UNMAPPED;
1635         }
1636         new_addr = pci_config_get_bar_addr(d, reg, type, size);
1637         last_addr = new_addr + size - 1;
1638         /* Check if 32 bit BAR wraps around explicitly.
1639          * TODO: make priorities correct and remove this work around.
1640          */
1641         if (last_addr <= new_addr || last_addr >= UINT32_MAX ||
1642             (!allow_0_address && new_addr == 0)) {
1643             return PCI_BAR_UNMAPPED;
1644         }
1645         return new_addr;
1646     }
1647 
1648     if (!(cmd & PCI_COMMAND_MEMORY)) {
1649         return PCI_BAR_UNMAPPED;
1650     }
1651     new_addr = pci_config_get_bar_addr(d, reg, type, size);
1652     /* the ROM slot has a specific enable bit */
1653     if (reg == PCI_ROM_SLOT && !(new_addr & PCI_ROM_ADDRESS_ENABLE)) {
1654         return PCI_BAR_UNMAPPED;
1655     }
1656     new_addr &= ~(size - 1);
1657     last_addr = new_addr + size - 1;
1658     /* NOTE: we do not support wrapping */
1659     /* XXX: as we cannot support really dynamic
1660        mappings, we handle specific values as invalid
1661        mappings. */
1662     if (last_addr <= new_addr || last_addr == PCI_BAR_UNMAPPED ||
1663         (!allow_0_address && new_addr == 0)) {
1664         return PCI_BAR_UNMAPPED;
1665     }
1666 
1667     /* Now pcibus_t is 64bit.
1668      * Check if 32 bit BAR wraps around explicitly.
1669      * Without this, PC ide doesn't work well.
1670      * TODO: remove this work around.
1671      */
1672     if  (!(type & PCI_BASE_ADDRESS_MEM_TYPE_64) && last_addr >= UINT32_MAX) {
1673         return PCI_BAR_UNMAPPED;
1674     }
1675 
1676     /*
1677      * OS is allowed to set BAR beyond its addressable
1678      * bits. For example, 32 bit OS can set 64bit bar
1679      * to >4G. Check it. TODO: we might need to support
1680      * it in the future for e.g. PAE.
1681      */
1682     if (last_addr >= HWADDR_MAX) {
1683         return PCI_BAR_UNMAPPED;
1684     }
1685 
1686     return new_addr;
1687 }
1688 
pci_update_mappings(PCIDevice * d)1689 static void pci_update_mappings(PCIDevice *d)
1690 {
1691     PCIIORegion *r;
1692     int i;
1693     pcibus_t new_addr;
1694 
1695     for(i = 0; i < PCI_NUM_REGIONS; i++) {
1696         r = &d->io_regions[i];
1697 
1698         /* this region isn't registered */
1699         if (!r->size)
1700             continue;
1701 
1702         new_addr = pci_bar_address(d, i, r->type, r->size);
1703         if (!d->enabled || pci_pm_state(d)) {
1704             new_addr = PCI_BAR_UNMAPPED;
1705         }
1706 
1707         /* This bar isn't changed */
1708         if (new_addr == r->addr)
1709             continue;
1710 
1711         /* now do the real mapping */
1712         if (r->addr != PCI_BAR_UNMAPPED) {
1713             trace_pci_update_mappings_del(d->name, pci_dev_bus_num(d),
1714                                           PCI_SLOT(d->devfn),
1715                                           PCI_FUNC(d->devfn),
1716                                           i, r->addr, r->size);
1717             memory_region_del_subregion(r->address_space, r->memory);
1718         }
1719         r->addr = new_addr;
1720         if (r->addr != PCI_BAR_UNMAPPED) {
1721             trace_pci_update_mappings_add(d->name, pci_dev_bus_num(d),
1722                                           PCI_SLOT(d->devfn),
1723                                           PCI_FUNC(d->devfn),
1724                                           i, r->addr, r->size);
1725             memory_region_add_subregion_overlap(r->address_space,
1726                                                 r->addr, r->memory, 1);
1727         }
1728     }
1729 
1730     pci_update_vga(d);
1731 }
1732 
pci_irq_disabled(PCIDevice * d)1733 int pci_irq_disabled(PCIDevice *d)
1734 {
1735     return pci_get_word(d->config + PCI_COMMAND) & PCI_COMMAND_INTX_DISABLE;
1736 }
1737 
1738 /* Called after interrupt disabled field update in config space,
1739  * assert/deassert interrupts if necessary.
1740  * Gets original interrupt disable bit value (before update). */
pci_update_irq_disabled(PCIDevice * d,int was_irq_disabled)1741 static void pci_update_irq_disabled(PCIDevice *d, int was_irq_disabled)
1742 {
1743     int i, disabled = pci_irq_disabled(d);
1744     if (disabled == was_irq_disabled)
1745         return;
1746     for (i = 0; i < PCI_NUM_PINS; ++i) {
1747         int state = pci_irq_state(d, i);
1748         pci_change_irq_level(d, i, disabled ? -state : state);
1749     }
1750 }
1751 
pci_default_read_config(PCIDevice * d,uint32_t address,int len)1752 uint32_t pci_default_read_config(PCIDevice *d,
1753                                  uint32_t address, int len)
1754 {
1755     uint32_t val = 0;
1756 
1757     assert(address + len <= pci_config_size(d));
1758 
1759     if (pci_is_express_downstream_port(d) &&
1760         ranges_overlap(address, len, d->exp.exp_cap + PCI_EXP_LNKSTA, 2)) {
1761         pcie_sync_bridge_lnk(d);
1762     }
1763     memcpy(&val, d->config + address, len);
1764     return le32_to_cpu(val);
1765 }
1766 
pci_default_write_config(PCIDevice * d,uint32_t addr,uint32_t val_in,int l)1767 void pci_default_write_config(PCIDevice *d, uint32_t addr, uint32_t val_in, int l)
1768 {
1769     uint8_t new_pm_state, old_pm_state = pci_pm_state(d);
1770     int i, was_irq_disabled = pci_irq_disabled(d);
1771     uint32_t val = val_in;
1772 
1773     assert(addr + l <= pci_config_size(d));
1774 
1775     for (i = 0; i < l; val >>= 8, ++i) {
1776         uint8_t wmask = d->wmask[addr + i];
1777         uint8_t w1cmask = d->w1cmask[addr + i];
1778         assert(!(wmask & w1cmask));
1779         d->config[addr + i] = (d->config[addr + i] & ~wmask) | (val & wmask);
1780         d->config[addr + i] &= ~(val & w1cmask); /* W1C: Write 1 to Clear */
1781     }
1782 
1783     new_pm_state = pci_pm_update(d, addr, l, old_pm_state);
1784 
1785     if (ranges_overlap(addr, l, PCI_BASE_ADDRESS_0, 24) ||
1786         ranges_overlap(addr, l, PCI_ROM_ADDRESS, 4) ||
1787         ranges_overlap(addr, l, PCI_ROM_ADDRESS1, 4) ||
1788         range_covers_byte(addr, l, PCI_COMMAND) ||
1789         !!new_pm_state != !!old_pm_state) {
1790         pci_update_mappings(d);
1791     }
1792 
1793     if (ranges_overlap(addr, l, PCI_COMMAND, 2)) {
1794         pci_update_irq_disabled(d, was_irq_disabled);
1795         pci_set_master(d, (pci_get_word(d->config + PCI_COMMAND) &
1796                           PCI_COMMAND_MASTER) && d->enabled);
1797     }
1798 
1799     msi_write_config(d, addr, val_in, l);
1800     msix_write_config(d, addr, val_in, l);
1801     pcie_sriov_config_write(d, addr, val_in, l);
1802 }
1803 
1804 /***********************************************************/
1805 /* generic PCI irq support */
1806 
1807 /* 0 <= irq_num <= 3. level must be 0 or 1 */
pci_irq_handler(void * opaque,int irq_num,int level)1808 static void pci_irq_handler(void *opaque, int irq_num, int level)
1809 {
1810     PCIDevice *pci_dev = opaque;
1811     int change;
1812 
1813     assert(0 <= irq_num && irq_num < PCI_NUM_PINS);
1814     assert(level == 0 || level == 1);
1815     change = level - pci_irq_state(pci_dev, irq_num);
1816     if (!change)
1817         return;
1818 
1819     pci_set_irq_state(pci_dev, irq_num, level);
1820     pci_update_irq_status(pci_dev);
1821     if (pci_irq_disabled(pci_dev))
1822         return;
1823     pci_change_irq_level(pci_dev, irq_num, change);
1824 }
1825 
pci_allocate_irq(PCIDevice * pci_dev)1826 qemu_irq pci_allocate_irq(PCIDevice *pci_dev)
1827 {
1828     int intx = pci_intx(pci_dev);
1829     assert(0 <= intx && intx < PCI_NUM_PINS);
1830 
1831     return qemu_allocate_irq(pci_irq_handler, pci_dev, intx);
1832 }
1833 
pci_set_irq(PCIDevice * pci_dev,int level)1834 void pci_set_irq(PCIDevice *pci_dev, int level)
1835 {
1836     int intx = pci_intx(pci_dev);
1837     pci_irq_handler(pci_dev, intx, level);
1838 }
1839 
1840 /* Special hooks used by device assignment */
pci_bus_set_route_irq_fn(PCIBus * bus,pci_route_irq_fn route_intx_to_irq)1841 void pci_bus_set_route_irq_fn(PCIBus *bus, pci_route_irq_fn route_intx_to_irq)
1842 {
1843     assert(pci_bus_is_root(bus));
1844     bus->route_intx_to_irq = route_intx_to_irq;
1845 }
1846 
pci_device_route_intx_to_irq(PCIDevice * dev,int pin)1847 PCIINTxRoute pci_device_route_intx_to_irq(PCIDevice *dev, int pin)
1848 {
1849     PCIBus *bus;
1850 
1851     do {
1852         int dev_irq = pin;
1853         bus = pci_get_bus(dev);
1854         pin = bus->map_irq(dev, pin);
1855         trace_pci_route_irq(dev_irq, DEVICE(dev)->canonical_path, pin,
1856                             pci_bus_is_root(bus) ? "root-complex"
1857                                     : DEVICE(bus->parent_dev)->canonical_path);
1858         dev = bus->parent_dev;
1859     } while (dev);
1860 
1861     if (!bus->route_intx_to_irq) {
1862         error_report("PCI: Bug - unimplemented PCI INTx routing (%s)",
1863                      object_get_typename(OBJECT(bus->qbus.parent)));
1864         return (PCIINTxRoute) { PCI_INTX_DISABLED, -1 };
1865     }
1866 
1867     return bus->route_intx_to_irq(bus->irq_opaque, pin);
1868 }
1869 
pci_intx_route_changed(PCIINTxRoute * old,PCIINTxRoute * new)1870 bool pci_intx_route_changed(PCIINTxRoute *old, PCIINTxRoute *new)
1871 {
1872     return old->mode != new->mode || old->irq != new->irq;
1873 }
1874 
pci_bus_fire_intx_routing_notifier(PCIBus * bus)1875 void pci_bus_fire_intx_routing_notifier(PCIBus *bus)
1876 {
1877     PCIDevice *dev;
1878     PCIBus *sec;
1879     int i;
1880 
1881     for (i = 0; i < ARRAY_SIZE(bus->devices); ++i) {
1882         dev = bus->devices[i];
1883         if (dev && dev->intx_routing_notifier) {
1884             dev->intx_routing_notifier(dev);
1885         }
1886     }
1887 
1888     QLIST_FOREACH(sec, &bus->child, sibling) {
1889         pci_bus_fire_intx_routing_notifier(sec);
1890     }
1891 }
1892 
pci_device_set_intx_routing_notifier(PCIDevice * dev,PCIINTxRoutingNotifier notifier)1893 void pci_device_set_intx_routing_notifier(PCIDevice *dev,
1894                                           PCIINTxRoutingNotifier notifier)
1895 {
1896     dev->intx_routing_notifier = notifier;
1897 }
1898 
1899 /*
1900  * PCI-to-PCI bridge specification
1901  * 9.1: Interrupt routing. Table 9-1
1902  *
1903  * the PCI Express Base Specification, Revision 2.1
1904  * 2.2.8.1: INTx interrupt signaling - Rules
1905  *          the Implementation Note
1906  *          Table 2-20
1907  */
1908 /*
1909  * 0 <= pin <= 3 0 = INTA, 1 = INTB, 2 = INTC, 3 = INTD
1910  * 0-origin unlike PCI interrupt pin register.
1911  */
pci_swizzle_map_irq_fn(PCIDevice * pci_dev,int pin)1912 int pci_swizzle_map_irq_fn(PCIDevice *pci_dev, int pin)
1913 {
1914     return pci_swizzle(PCI_SLOT(pci_dev->devfn), pin);
1915 }
1916 
1917 /***********************************************************/
1918 /* monitor info on PCI */
1919 
1920 static const pci_class_desc pci_class_descriptions[] =
1921 {
1922     { 0x0001, "VGA controller", "display"},
1923     { 0x0100, "SCSI controller", "scsi"},
1924     { 0x0101, "IDE controller", "ide"},
1925     { 0x0102, "Floppy controller", "fdc"},
1926     { 0x0103, "IPI controller", "ipi"},
1927     { 0x0104, "RAID controller", "raid"},
1928     { 0x0106, "SATA controller"},
1929     { 0x0107, "SAS controller"},
1930     { 0x0180, "Storage controller"},
1931     { 0x0200, "Ethernet controller", "ethernet"},
1932     { 0x0201, "Token Ring controller", "token-ring"},
1933     { 0x0202, "FDDI controller", "fddi"},
1934     { 0x0203, "ATM controller", "atm"},
1935     { 0x0280, "Network controller"},
1936     { 0x0300, "VGA controller", "display", 0x00ff},
1937     { 0x0301, "XGA controller"},
1938     { 0x0302, "3D controller"},
1939     { 0x0380, "Display controller"},
1940     { 0x0400, "Video controller", "video"},
1941     { 0x0401, "Audio controller", "sound"},
1942     { 0x0402, "Phone"},
1943     { 0x0403, "Audio controller", "sound"},
1944     { 0x0480, "Multimedia controller"},
1945     { 0x0500, "RAM controller", "memory"},
1946     { 0x0501, "Flash controller", "flash"},
1947     { 0x0580, "Memory controller"},
1948     { 0x0600, "Host bridge", "host"},
1949     { 0x0601, "ISA bridge", "isa"},
1950     { 0x0602, "EISA bridge", "eisa"},
1951     { 0x0603, "MC bridge", "mca"},
1952     { 0x0604, "PCI bridge", "pci-bridge"},
1953     { 0x0605, "PCMCIA bridge", "pcmcia"},
1954     { 0x0606, "NUBUS bridge", "nubus"},
1955     { 0x0607, "CARDBUS bridge", "cardbus"},
1956     { 0x0608, "RACEWAY bridge"},
1957     { 0x0680, "Bridge"},
1958     { 0x0700, "Serial port", "serial"},
1959     { 0x0701, "Parallel port", "parallel"},
1960     { 0x0800, "Interrupt controller", "interrupt-controller"},
1961     { 0x0801, "DMA controller", "dma-controller"},
1962     { 0x0802, "Timer", "timer"},
1963     { 0x0803, "RTC", "rtc"},
1964     { 0x0900, "Keyboard", "keyboard"},
1965     { 0x0901, "Pen", "pen"},
1966     { 0x0902, "Mouse", "mouse"},
1967     { 0x0A00, "Dock station", "dock", 0x00ff},
1968     { 0x0B00, "i386 cpu", "cpu", 0x00ff},
1969     { 0x0c00, "Firewire controller", "firewire"},
1970     { 0x0c01, "Access bus controller", "access-bus"},
1971     { 0x0c02, "SSA controller", "ssa"},
1972     { 0x0c03, "USB controller", "usb"},
1973     { 0x0c04, "Fibre channel controller", "fibre-channel"},
1974     { 0x0c05, "SMBus"},
1975     { 0, NULL}
1976 };
1977 
pci_for_each_device_under_bus_reverse(PCIBus * bus,pci_bus_dev_fn fn,void * opaque)1978 void pci_for_each_device_under_bus_reverse(PCIBus *bus,
1979                                            pci_bus_dev_fn fn,
1980                                            void *opaque)
1981 {
1982     PCIDevice *d;
1983     int devfn;
1984 
1985     for (devfn = 0; devfn < ARRAY_SIZE(bus->devices); devfn++) {
1986         d = bus->devices[ARRAY_SIZE(bus->devices) - 1 - devfn];
1987         if (d) {
1988             fn(bus, d, opaque);
1989         }
1990     }
1991 }
1992 
pci_for_each_device_reverse(PCIBus * bus,int bus_num,pci_bus_dev_fn fn,void * opaque)1993 void pci_for_each_device_reverse(PCIBus *bus, int bus_num,
1994                                  pci_bus_dev_fn fn, void *opaque)
1995 {
1996     bus = pci_find_bus_nr(bus, bus_num);
1997 
1998     if (bus) {
1999         pci_for_each_device_under_bus_reverse(bus, fn, opaque);
2000     }
2001 }
2002 
pci_for_each_device_under_bus(PCIBus * bus,pci_bus_dev_fn fn,void * opaque)2003 void pci_for_each_device_under_bus(PCIBus *bus,
2004                                    pci_bus_dev_fn fn, void *opaque)
2005 {
2006     PCIDevice *d;
2007     int devfn;
2008 
2009     for(devfn = 0; devfn < ARRAY_SIZE(bus->devices); devfn++) {
2010         d = bus->devices[devfn];
2011         if (d) {
2012             fn(bus, d, opaque);
2013         }
2014     }
2015 }
2016 
pci_for_each_device(PCIBus * bus,int bus_num,pci_bus_dev_fn fn,void * opaque)2017 void pci_for_each_device(PCIBus *bus, int bus_num,
2018                          pci_bus_dev_fn fn, void *opaque)
2019 {
2020     bus = pci_find_bus_nr(bus, bus_num);
2021 
2022     if (bus) {
2023         pci_for_each_device_under_bus(bus, fn, opaque);
2024     }
2025 }
2026 
get_class_desc(int class)2027 const pci_class_desc *get_class_desc(int class)
2028 {
2029     const pci_class_desc *desc;
2030 
2031     desc = pci_class_descriptions;
2032     while (desc->desc && class != desc->class) {
2033         desc++;
2034     }
2035 
2036     return desc;
2037 }
2038 
pci_init_nic_devices(PCIBus * bus,const char * default_model)2039 void pci_init_nic_devices(PCIBus *bus, const char *default_model)
2040 {
2041     qemu_create_nic_bus_devices(&bus->qbus, TYPE_PCI_DEVICE, default_model,
2042                                 "virtio", "virtio-net-pci");
2043 }
2044 
pci_init_nic_in_slot(PCIBus * rootbus,const char * model,const char * alias,const char * devaddr)2045 bool pci_init_nic_in_slot(PCIBus *rootbus, const char *model,
2046                           const char *alias, const char *devaddr)
2047 {
2048     NICInfo *nd = qemu_find_nic_info(model, true, alias);
2049     int dom, busnr, devfn;
2050     PCIDevice *pci_dev;
2051     unsigned slot;
2052     PCIBus *bus;
2053 
2054     if (!nd) {
2055         return false;
2056     }
2057 
2058     if (!devaddr || pci_parse_devaddr(devaddr, &dom, &busnr, &slot, NULL) < 0) {
2059         error_report("Invalid PCI device address %s for device %s",
2060                      devaddr, model);
2061         exit(1);
2062     }
2063 
2064     if (dom != 0) {
2065         error_report("No support for non-zero PCI domains");
2066         exit(1);
2067     }
2068 
2069     devfn = PCI_DEVFN(slot, 0);
2070 
2071     bus = pci_find_bus_nr(rootbus, busnr);
2072     if (!bus) {
2073         error_report("Invalid PCI device address %s for device %s",
2074                      devaddr, model);
2075         exit(1);
2076     }
2077 
2078     pci_dev = pci_new(devfn, model);
2079     qdev_set_nic_properties(&pci_dev->qdev, nd);
2080     pci_realize_and_unref(pci_dev, bus, &error_fatal);
2081     return true;
2082 }
2083 
pci_vga_init(PCIBus * bus)2084 PCIDevice *pci_vga_init(PCIBus *bus)
2085 {
2086     vga_interface_created = true;
2087     switch (vga_interface_type) {
2088     case VGA_CIRRUS:
2089         return pci_create_simple(bus, -1, "cirrus-vga");
2090     case VGA_QXL:
2091         return pci_create_simple(bus, -1, "qxl-vga");
2092     case VGA_STD:
2093         return pci_create_simple(bus, -1, "VGA");
2094     case VGA_VMWARE:
2095         return pci_create_simple(bus, -1, "vmware-svga");
2096     case VGA_VIRTIO:
2097         return pci_create_simple(bus, -1, "virtio-vga");
2098     case VGA_NONE:
2099     default: /* Other non-PCI types. Checking for unsupported types is already
2100                 done in vl.c. */
2101         return NULL;
2102     }
2103 }
2104 
2105 /* Whether a given bus number is in range of the secondary
2106  * bus of the given bridge device. */
pci_secondary_bus_in_range(PCIDevice * dev,int bus_num)2107 static bool pci_secondary_bus_in_range(PCIDevice *dev, int bus_num)
2108 {
2109     return !(pci_get_word(dev->config + PCI_BRIDGE_CONTROL) &
2110              PCI_BRIDGE_CTL_BUS_RESET) /* Don't walk the bus if it's reset. */ &&
2111         dev->config[PCI_SECONDARY_BUS] <= bus_num &&
2112         bus_num <= dev->config[PCI_SUBORDINATE_BUS];
2113 }
2114 
2115 /* Whether a given bus number is in a range of a root bus */
pci_root_bus_in_range(PCIBus * bus,int bus_num)2116 static bool pci_root_bus_in_range(PCIBus *bus, int bus_num)
2117 {
2118     int i;
2119 
2120     for (i = 0; i < ARRAY_SIZE(bus->devices); ++i) {
2121         PCIDevice *dev = bus->devices[i];
2122 
2123         if (dev && IS_PCI_BRIDGE(dev)) {
2124             if (pci_secondary_bus_in_range(dev, bus_num)) {
2125                 return true;
2126             }
2127         }
2128     }
2129 
2130     return false;
2131 }
2132 
pci_find_bus_nr(PCIBus * bus,int bus_num)2133 PCIBus *pci_find_bus_nr(PCIBus *bus, int bus_num)
2134 {
2135     PCIBus *sec;
2136 
2137     if (!bus) {
2138         return NULL;
2139     }
2140 
2141     if (pci_bus_num(bus) == bus_num) {
2142         return bus;
2143     }
2144 
2145     /* Consider all bus numbers in range for the host pci bridge. */
2146     if (!pci_bus_is_root(bus) &&
2147         !pci_secondary_bus_in_range(bus->parent_dev, bus_num)) {
2148         return NULL;
2149     }
2150 
2151     /* try child bus */
2152     for (; bus; bus = sec) {
2153         QLIST_FOREACH(sec, &bus->child, sibling) {
2154             if (pci_bus_num(sec) == bus_num) {
2155                 return sec;
2156             }
2157             /* PXB buses assumed to be children of bus 0 */
2158             if (pci_bus_is_root(sec)) {
2159                 if (pci_root_bus_in_range(sec, bus_num)) {
2160                     break;
2161                 }
2162             } else {
2163                 if (pci_secondary_bus_in_range(sec->parent_dev, bus_num)) {
2164                     break;
2165                 }
2166             }
2167         }
2168     }
2169 
2170     return NULL;
2171 }
2172 
pci_for_each_bus_depth_first(PCIBus * bus,pci_bus_ret_fn begin,pci_bus_fn end,void * parent_state)2173 void pci_for_each_bus_depth_first(PCIBus *bus, pci_bus_ret_fn begin,
2174                                   pci_bus_fn end, void *parent_state)
2175 {
2176     PCIBus *sec;
2177     void *state;
2178 
2179     if (!bus) {
2180         return;
2181     }
2182 
2183     if (begin) {
2184         state = begin(bus, parent_state);
2185     } else {
2186         state = parent_state;
2187     }
2188 
2189     QLIST_FOREACH(sec, &bus->child, sibling) {
2190         pci_for_each_bus_depth_first(sec, begin, end, state);
2191     }
2192 
2193     if (end) {
2194         end(bus, state);
2195     }
2196 }
2197 
2198 
pci_find_device(PCIBus * bus,int bus_num,uint8_t devfn)2199 PCIDevice *pci_find_device(PCIBus *bus, int bus_num, uint8_t devfn)
2200 {
2201     bus = pci_find_bus_nr(bus, bus_num);
2202 
2203     if (!bus)
2204         return NULL;
2205 
2206     return bus->devices[devfn];
2207 }
2208 
2209 #define ONBOARD_INDEX_MAX (16 * 1024 - 1)
2210 
pci_qdev_realize(DeviceState * qdev,Error ** errp)2211 static void pci_qdev_realize(DeviceState *qdev, Error **errp)
2212 {
2213     PCIDevice *pci_dev = (PCIDevice *)qdev;
2214     PCIDeviceClass *pc = PCI_DEVICE_GET_CLASS(pci_dev);
2215     ObjectClass *klass = OBJECT_CLASS(pc);
2216     Error *local_err = NULL;
2217     bool is_default_rom;
2218     uint16_t class_id;
2219 
2220     /*
2221      * capped by systemd (see: udev-builtin-net_id.c)
2222      * as it's the only known user honor it to avoid users
2223      * misconfigure QEMU and then wonder why acpi-index doesn't work
2224      */
2225     if (pci_dev->acpi_index > ONBOARD_INDEX_MAX) {
2226         error_setg(errp, "acpi-index should be less or equal to %u",
2227                    ONBOARD_INDEX_MAX);
2228         return;
2229     }
2230 
2231     /*
2232      * make sure that acpi-index is unique across all present PCI devices
2233      */
2234     if (pci_dev->acpi_index) {
2235         GSequence *used_indexes = pci_acpi_index_list();
2236 
2237         if (g_sequence_lookup(used_indexes,
2238                               GINT_TO_POINTER(pci_dev->acpi_index),
2239                               g_cmp_uint32, NULL)) {
2240             error_setg(errp, "a PCI device with acpi-index = %" PRIu32
2241                        " already exist", pci_dev->acpi_index);
2242             return;
2243         }
2244         g_sequence_insert_sorted(used_indexes,
2245                                  GINT_TO_POINTER(pci_dev->acpi_index),
2246                                  g_cmp_uint32, NULL);
2247     }
2248 
2249     if (pci_dev->romsize != UINT32_MAX && !is_power_of_2(pci_dev->romsize)) {
2250         error_setg(errp, "ROM size %u is not a power of two", pci_dev->romsize);
2251         return;
2252     }
2253 
2254     /* initialize cap_present for pci_is_express() and pci_config_size(),
2255      * Note that hybrid PCIs are not set automatically and need to manage
2256      * QEMU_PCI_CAP_EXPRESS manually */
2257     if (object_class_dynamic_cast(klass, INTERFACE_PCIE_DEVICE) &&
2258        !object_class_dynamic_cast(klass, INTERFACE_CONVENTIONAL_PCI_DEVICE)) {
2259         pci_dev->cap_present |= QEMU_PCI_CAP_EXPRESS;
2260     }
2261 
2262     if (object_class_dynamic_cast(klass, INTERFACE_CXL_DEVICE)) {
2263         pci_dev->cap_present |= QEMU_PCIE_CAP_CXL;
2264     }
2265 
2266     pci_dev = do_pci_register_device(pci_dev,
2267                                      object_get_typename(OBJECT(qdev)),
2268                                      pci_dev->devfn, errp);
2269     if (pci_dev == NULL)
2270         return;
2271 
2272     if (pc->realize) {
2273         pc->realize(pci_dev, &local_err);
2274         if (local_err) {
2275             error_propagate(errp, local_err);
2276             do_pci_unregister_device(pci_dev);
2277             return;
2278         }
2279     }
2280 
2281     if (!pcie_sriov_register_device(pci_dev, errp)) {
2282         pci_qdev_unrealize(DEVICE(pci_dev));
2283         return;
2284     }
2285 
2286     /*
2287      * A PCIe Downstream Port that do not have ARI Forwarding enabled must
2288      * associate only Device 0 with the device attached to the bus
2289      * representing the Link from the Port (PCIe base spec rev 4.0 ver 0.3,
2290      * sec 7.3.1).
2291      * With ARI, PCI_SLOT() can return non-zero value as the traditional
2292      * 5-bit Device Number and 3-bit Function Number fields in its associated
2293      * Routing IDs, Requester IDs and Completer IDs are interpreted as a
2294      * single 8-bit Function Number. Hence, ignore ARI capable devices.
2295      */
2296     if (pci_is_express(pci_dev) &&
2297         !pcie_find_capability(pci_dev, PCI_EXT_CAP_ID_ARI) &&
2298         pcie_has_upstream_port(pci_dev) &&
2299         PCI_SLOT(pci_dev->devfn)) {
2300         warn_report("PCI: slot %d is not valid for %s,"
2301                     " parent device only allows plugging into slot 0.",
2302                     PCI_SLOT(pci_dev->devfn), pci_dev->name);
2303     }
2304 
2305     if (pci_dev->failover_pair_id) {
2306         if (!pci_bus_is_express(pci_get_bus(pci_dev))) {
2307             error_setg(errp, "failover primary device must be on "
2308                              "PCIExpress bus");
2309             pci_qdev_unrealize(DEVICE(pci_dev));
2310             return;
2311         }
2312         class_id = pci_get_word(pci_dev->config + PCI_CLASS_DEVICE);
2313         if (class_id != PCI_CLASS_NETWORK_ETHERNET) {
2314             error_setg(errp, "failover primary device is not an "
2315                              "Ethernet device");
2316             pci_qdev_unrealize(DEVICE(pci_dev));
2317             return;
2318         }
2319         if ((pci_dev->cap_present & QEMU_PCI_CAP_MULTIFUNCTION)
2320             || (PCI_FUNC(pci_dev->devfn) != 0)) {
2321             error_setg(errp, "failover: primary device must be in its own "
2322                               "PCI slot");
2323             pci_qdev_unrealize(DEVICE(pci_dev));
2324             return;
2325         }
2326         qdev->allow_unplug_during_migration = true;
2327     }
2328 
2329     /* rom loading */
2330     is_default_rom = false;
2331     if (pci_dev->romfile == NULL && pc->romfile != NULL) {
2332         pci_dev->romfile = g_strdup(pc->romfile);
2333         is_default_rom = true;
2334     }
2335 
2336     pci_add_option_rom(pci_dev, is_default_rom, &local_err);
2337     if (local_err) {
2338         error_propagate(errp, local_err);
2339         pci_qdev_unrealize(DEVICE(pci_dev));
2340         return;
2341     }
2342 
2343     pci_set_power(pci_dev, true);
2344 
2345     pci_dev->msi_trigger = pci_msi_trigger;
2346 }
2347 
pci_new_internal(int devfn,bool multifunction,const char * name)2348 static PCIDevice *pci_new_internal(int devfn, bool multifunction,
2349                                    const char *name)
2350 {
2351     DeviceState *dev;
2352 
2353     dev = qdev_new(name);
2354     qdev_prop_set_int32(dev, "addr", devfn);
2355     qdev_prop_set_bit(dev, "multifunction", multifunction);
2356     return PCI_DEVICE(dev);
2357 }
2358 
pci_new_multifunction(int devfn,const char * name)2359 PCIDevice *pci_new_multifunction(int devfn, const char *name)
2360 {
2361     return pci_new_internal(devfn, true, name);
2362 }
2363 
pci_new(int devfn,const char * name)2364 PCIDevice *pci_new(int devfn, const char *name)
2365 {
2366     return pci_new_internal(devfn, false, name);
2367 }
2368 
pci_realize_and_unref(PCIDevice * dev,PCIBus * bus,Error ** errp)2369 bool pci_realize_and_unref(PCIDevice *dev, PCIBus *bus, Error **errp)
2370 {
2371     return qdev_realize_and_unref(&dev->qdev, &bus->qbus, errp);
2372 }
2373 
pci_create_simple_multifunction(PCIBus * bus,int devfn,const char * name)2374 PCIDevice *pci_create_simple_multifunction(PCIBus *bus, int devfn,
2375                                            const char *name)
2376 {
2377     PCIDevice *dev = pci_new_multifunction(devfn, name);
2378     pci_realize_and_unref(dev, bus, &error_fatal);
2379     return dev;
2380 }
2381 
pci_create_simple(PCIBus * bus,int devfn,const char * name)2382 PCIDevice *pci_create_simple(PCIBus *bus, int devfn, const char *name)
2383 {
2384     PCIDevice *dev = pci_new(devfn, name);
2385     pci_realize_and_unref(dev, bus, &error_fatal);
2386     return dev;
2387 }
2388 
pci_find_space(PCIDevice * pdev,uint8_t size)2389 static uint8_t pci_find_space(PCIDevice *pdev, uint8_t size)
2390 {
2391     int offset = PCI_CONFIG_HEADER_SIZE;
2392     int i;
2393     for (i = PCI_CONFIG_HEADER_SIZE; i < PCI_CONFIG_SPACE_SIZE; ++i) {
2394         if (pdev->used[i])
2395             offset = i + 1;
2396         else if (i - offset + 1 == size)
2397             return offset;
2398     }
2399     return 0;
2400 }
2401 
pci_find_capability_list(PCIDevice * pdev,uint8_t cap_id,uint8_t * prev_p)2402 static uint8_t pci_find_capability_list(PCIDevice *pdev, uint8_t cap_id,
2403                                         uint8_t *prev_p)
2404 {
2405     uint8_t next, prev;
2406 
2407     if (!(pdev->config[PCI_STATUS] & PCI_STATUS_CAP_LIST))
2408         return 0;
2409 
2410     for (prev = PCI_CAPABILITY_LIST; (next = pdev->config[prev]);
2411          prev = next + PCI_CAP_LIST_NEXT)
2412         if (pdev->config[next + PCI_CAP_LIST_ID] == cap_id)
2413             break;
2414 
2415     if (prev_p)
2416         *prev_p = prev;
2417     return next;
2418 }
2419 
pci_find_capability_at_offset(PCIDevice * pdev,uint8_t offset)2420 static uint8_t pci_find_capability_at_offset(PCIDevice *pdev, uint8_t offset)
2421 {
2422     uint8_t next, prev, found = 0;
2423 
2424     if (!(pdev->used[offset])) {
2425         return 0;
2426     }
2427 
2428     assert(pdev->config[PCI_STATUS] & PCI_STATUS_CAP_LIST);
2429 
2430     for (prev = PCI_CAPABILITY_LIST; (next = pdev->config[prev]);
2431          prev = next + PCI_CAP_LIST_NEXT) {
2432         if (next <= offset && next > found) {
2433             found = next;
2434         }
2435     }
2436     return found;
2437 }
2438 
2439 /* Patch the PCI vendor and device ids in a PCI rom image if necessary.
2440    This is needed for an option rom which is used for more than one device. */
pci_patch_ids(PCIDevice * pdev,uint8_t * ptr,uint32_t size)2441 static void pci_patch_ids(PCIDevice *pdev, uint8_t *ptr, uint32_t size)
2442 {
2443     uint16_t vendor_id;
2444     uint16_t device_id;
2445     uint16_t rom_vendor_id;
2446     uint16_t rom_device_id;
2447     uint16_t rom_magic;
2448     uint16_t pcir_offset;
2449     uint8_t checksum;
2450 
2451     /* Words in rom data are little endian (like in PCI configuration),
2452        so they can be read / written with pci_get_word / pci_set_word. */
2453 
2454     /* Only a valid rom will be patched. */
2455     rom_magic = pci_get_word(ptr);
2456     if (rom_magic != 0xaa55) {
2457         trace_pci_bad_rom_magic(rom_magic, 0xaa55);
2458         return;
2459     }
2460     pcir_offset = pci_get_word(ptr + 0x18);
2461     if (pcir_offset + 8 >= size || memcmp(ptr + pcir_offset, "PCIR", 4)) {
2462         trace_pci_bad_pcir_offset(pcir_offset);
2463         return;
2464     }
2465 
2466     vendor_id = pci_get_word(pdev->config + PCI_VENDOR_ID);
2467     device_id = pci_get_word(pdev->config + PCI_DEVICE_ID);
2468     rom_vendor_id = pci_get_word(ptr + pcir_offset + 4);
2469     rom_device_id = pci_get_word(ptr + pcir_offset + 6);
2470 
2471     trace_pci_rom_and_pci_ids(pdev->romfile, vendor_id, device_id,
2472                               rom_vendor_id, rom_device_id);
2473 
2474     checksum = ptr[6];
2475 
2476     if (vendor_id != rom_vendor_id) {
2477         /* Patch vendor id and checksum (at offset 6 for etherboot roms). */
2478         checksum += (uint8_t)rom_vendor_id + (uint8_t)(rom_vendor_id >> 8);
2479         checksum -= (uint8_t)vendor_id + (uint8_t)(vendor_id >> 8);
2480         trace_pci_rom_checksum_change(ptr[6], checksum);
2481         ptr[6] = checksum;
2482         pci_set_word(ptr + pcir_offset + 4, vendor_id);
2483     }
2484 
2485     if (device_id != rom_device_id) {
2486         /* Patch device id and checksum (at offset 6 for etherboot roms). */
2487         checksum += (uint8_t)rom_device_id + (uint8_t)(rom_device_id >> 8);
2488         checksum -= (uint8_t)device_id + (uint8_t)(device_id >> 8);
2489         trace_pci_rom_checksum_change(ptr[6], checksum);
2490         ptr[6] = checksum;
2491         pci_set_word(ptr + pcir_offset + 6, device_id);
2492     }
2493 }
2494 
2495 /* Add an option rom for the device */
pci_add_option_rom(PCIDevice * pdev,bool is_default_rom,Error ** errp)2496 static void pci_add_option_rom(PCIDevice *pdev, bool is_default_rom,
2497                                Error **errp)
2498 {
2499     int64_t size = 0;
2500     g_autofree char *path = NULL;
2501     char name[32];
2502     const VMStateDescription *vmsd;
2503 
2504     /*
2505      * In case of incoming migration ROM will come with migration stream, no
2506      * reason to load the file.  Neither we want to fail if local ROM file
2507      * mismatches with specified romsize.
2508      */
2509     bool load_file = !runstate_check(RUN_STATE_INMIGRATE);
2510 
2511     if (!pdev->romfile || !strlen(pdev->romfile)) {
2512         return;
2513     }
2514 
2515     if (!pdev->rom_bar) {
2516         /*
2517          * Load rom via fw_cfg instead of creating a rom bar,
2518          * for 0.11 compatibility.
2519          */
2520         int class = pci_get_word(pdev->config + PCI_CLASS_DEVICE);
2521 
2522         /*
2523          * Hot-plugged devices can't use the option ROM
2524          * if the rom bar is disabled.
2525          */
2526         if (DEVICE(pdev)->hotplugged) {
2527             error_setg(errp, "Hot-plugged device without ROM bar"
2528                        " can't have an option ROM");
2529             return;
2530         }
2531 
2532         if (class == 0x0300) {
2533             rom_add_vga(pdev->romfile);
2534         } else {
2535             rom_add_option(pdev->romfile, -1);
2536         }
2537         return;
2538     }
2539 
2540     if (pci_is_vf(pdev)) {
2541         if (pdev->rom_bar > 0) {
2542             error_setg(errp, "ROM BAR cannot be enabled for SR-IOV VF");
2543         }
2544 
2545         return;
2546     }
2547 
2548     if (load_file || pdev->romsize == UINT32_MAX) {
2549         path = qemu_find_file(QEMU_FILE_TYPE_BIOS, pdev->romfile);
2550         if (path == NULL) {
2551             path = g_strdup(pdev->romfile);
2552         }
2553 
2554         size = get_image_size(path);
2555         if (size < 0) {
2556             error_setg(errp, "failed to find romfile \"%s\"", pdev->romfile);
2557             return;
2558         } else if (size == 0) {
2559             error_setg(errp, "romfile \"%s\" is empty", pdev->romfile);
2560             return;
2561         } else if (size > 2 * GiB) {
2562             error_setg(errp,
2563                        "romfile \"%s\" too large (size cannot exceed 2 GiB)",
2564                        pdev->romfile);
2565             return;
2566         }
2567         if (pdev->romsize != UINT_MAX) {
2568             if (size > pdev->romsize) {
2569                 error_setg(errp, "romfile \"%s\" (%u bytes) "
2570                            "is too large for ROM size %u",
2571                            pdev->romfile, (uint32_t)size, pdev->romsize);
2572                 return;
2573             }
2574         } else {
2575             pdev->romsize = pow2ceil(size);
2576         }
2577     }
2578 
2579     vmsd = qdev_get_vmsd(DEVICE(pdev));
2580     snprintf(name, sizeof(name), "%s.rom",
2581              vmsd ? vmsd->name : object_get_typename(OBJECT(pdev)));
2582 
2583     pdev->has_rom = true;
2584     memory_region_init_rom(&pdev->rom, OBJECT(pdev), name, pdev->romsize,
2585                            &error_fatal);
2586 
2587     if (load_file) {
2588         void *ptr = memory_region_get_ram_ptr(&pdev->rom);
2589 
2590         if (load_image_size(path, ptr, size) < 0) {
2591             error_setg(errp, "failed to load romfile \"%s\"", pdev->romfile);
2592             return;
2593         }
2594 
2595         if (is_default_rom) {
2596             /* Only the default rom images will be patched (if needed). */
2597             pci_patch_ids(pdev, ptr, size);
2598         }
2599     }
2600 
2601     pci_register_bar(pdev, PCI_ROM_SLOT, 0, &pdev->rom);
2602 }
2603 
pci_del_option_rom(PCIDevice * pdev)2604 static void pci_del_option_rom(PCIDevice *pdev)
2605 {
2606     if (!pdev->has_rom)
2607         return;
2608 
2609     vmstate_unregister_ram(&pdev->rom, &pdev->qdev);
2610     pdev->has_rom = false;
2611 }
2612 
2613 /*
2614  * On success, pci_add_capability() returns a positive value
2615  * that the offset of the pci capability.
2616  * On failure, it sets an error and returns a negative error
2617  * code.
2618  */
pci_add_capability(PCIDevice * pdev,uint8_t cap_id,uint8_t offset,uint8_t size,Error ** errp)2619 int pci_add_capability(PCIDevice *pdev, uint8_t cap_id,
2620                        uint8_t offset, uint8_t size,
2621                        Error **errp)
2622 {
2623     uint8_t *config;
2624     int i, overlapping_cap;
2625 
2626     if (!offset) {
2627         offset = pci_find_space(pdev, size);
2628         /* out of PCI config space is programming error */
2629         assert(offset);
2630     } else {
2631         /* Verify that capabilities don't overlap.  Note: device assignment
2632          * depends on this check to verify that the device is not broken.
2633          * Should never trigger for emulated devices, but it's helpful
2634          * for debugging these. */
2635         for (i = offset; i < offset + size; i++) {
2636             overlapping_cap = pci_find_capability_at_offset(pdev, i);
2637             if (overlapping_cap) {
2638                 error_setg(errp, "%s:%02x:%02x.%x "
2639                            "Attempt to add PCI capability %x at offset "
2640                            "%x overlaps existing capability %x at offset %x",
2641                            pci_root_bus_path(pdev), pci_dev_bus_num(pdev),
2642                            PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
2643                            cap_id, offset, overlapping_cap, i);
2644                 return -EINVAL;
2645             }
2646         }
2647     }
2648 
2649     config = pdev->config + offset;
2650     config[PCI_CAP_LIST_ID] = cap_id;
2651     config[PCI_CAP_LIST_NEXT] = pdev->config[PCI_CAPABILITY_LIST];
2652     pdev->config[PCI_CAPABILITY_LIST] = offset;
2653     pdev->config[PCI_STATUS] |= PCI_STATUS_CAP_LIST;
2654     memset(pdev->used + offset, 0xFF, QEMU_ALIGN_UP(size, 4));
2655     /* Make capability read-only by default */
2656     memset(pdev->wmask + offset, 0, size);
2657     /* Check capability by default */
2658     memset(pdev->cmask + offset, 0xFF, size);
2659     return offset;
2660 }
2661 
2662 /* Unlink capability from the pci config space. */
pci_del_capability(PCIDevice * pdev,uint8_t cap_id,uint8_t size)2663 void pci_del_capability(PCIDevice *pdev, uint8_t cap_id, uint8_t size)
2664 {
2665     uint8_t prev, offset = pci_find_capability_list(pdev, cap_id, &prev);
2666     if (!offset)
2667         return;
2668     pdev->config[prev] = pdev->config[offset + PCI_CAP_LIST_NEXT];
2669     /* Make capability writable again */
2670     memset(pdev->wmask + offset, 0xff, size);
2671     memset(pdev->w1cmask + offset, 0, size);
2672     /* Clear cmask as device-specific registers can't be checked */
2673     memset(pdev->cmask + offset, 0, size);
2674     memset(pdev->used + offset, 0, QEMU_ALIGN_UP(size, 4));
2675 
2676     if (!pdev->config[PCI_CAPABILITY_LIST])
2677         pdev->config[PCI_STATUS] &= ~PCI_STATUS_CAP_LIST;
2678 }
2679 
pci_find_capability(PCIDevice * pdev,uint8_t cap_id)2680 uint8_t pci_find_capability(PCIDevice *pdev, uint8_t cap_id)
2681 {
2682     return pci_find_capability_list(pdev, cap_id, NULL);
2683 }
2684 
pci_dev_fw_name(DeviceState * dev,char * buf,int len)2685 static char *pci_dev_fw_name(DeviceState *dev, char *buf, int len)
2686 {
2687     PCIDevice *d = (PCIDevice *)dev;
2688     const char *name = NULL;
2689     const pci_class_desc *desc =  pci_class_descriptions;
2690     int class = pci_get_word(d->config + PCI_CLASS_DEVICE);
2691 
2692     while (desc->desc &&
2693           (class & ~desc->fw_ign_bits) !=
2694           (desc->class & ~desc->fw_ign_bits)) {
2695         desc++;
2696     }
2697 
2698     if (desc->desc) {
2699         name = desc->fw_name;
2700     }
2701 
2702     if (name) {
2703         pstrcpy(buf, len, name);
2704     } else {
2705         snprintf(buf, len, "pci%04x,%04x",
2706                  pci_get_word(d->config + PCI_VENDOR_ID),
2707                  pci_get_word(d->config + PCI_DEVICE_ID));
2708     }
2709 
2710     return buf;
2711 }
2712 
pcibus_get_fw_dev_path(DeviceState * dev)2713 static char *pcibus_get_fw_dev_path(DeviceState *dev)
2714 {
2715     PCIDevice *d = (PCIDevice *)dev;
2716     char name[33];
2717     int has_func = !!PCI_FUNC(d->devfn);
2718 
2719     return g_strdup_printf("%s@%x%s%.*x",
2720                            pci_dev_fw_name(dev, name, sizeof(name)),
2721                            PCI_SLOT(d->devfn),
2722                            has_func ? "," : "",
2723                            has_func,
2724                            PCI_FUNC(d->devfn));
2725 }
2726 
pcibus_get_dev_path(DeviceState * dev)2727 static char *pcibus_get_dev_path(DeviceState *dev)
2728 {
2729     PCIDevice *d = container_of(dev, PCIDevice, qdev);
2730     PCIDevice *t;
2731     int slot_depth;
2732     /* Path format: Domain:00:Slot.Function:Slot.Function....:Slot.Function.
2733      * 00 is added here to make this format compatible with
2734      * domain:Bus:Slot.Func for systems without nested PCI bridges.
2735      * Slot.Function list specifies the slot and function numbers for all
2736      * devices on the path from root to the specific device. */
2737     const char *root_bus_path;
2738     int root_bus_len;
2739     char slot[] = ":SS.F";
2740     int slot_len = sizeof slot - 1 /* For '\0' */;
2741     int path_len;
2742     char *path, *p;
2743     int s;
2744 
2745     root_bus_path = pci_root_bus_path(d);
2746     root_bus_len = strlen(root_bus_path);
2747 
2748     /* Calculate # of slots on path between device and root. */;
2749     slot_depth = 0;
2750     for (t = d; t; t = pci_get_bus(t)->parent_dev) {
2751         ++slot_depth;
2752     }
2753 
2754     path_len = root_bus_len + slot_len * slot_depth;
2755 
2756     /* Allocate memory, fill in the terminating null byte. */
2757     path = g_malloc(path_len + 1 /* For '\0' */);
2758     path[path_len] = '\0';
2759 
2760     memcpy(path, root_bus_path, root_bus_len);
2761 
2762     /* Fill in slot numbers. We walk up from device to root, so need to print
2763      * them in the reverse order, last to first. */
2764     p = path + path_len;
2765     for (t = d; t; t = pci_get_bus(t)->parent_dev) {
2766         p -= slot_len;
2767         s = snprintf(slot, sizeof slot, ":%02x.%x",
2768                      PCI_SLOT(t->devfn), PCI_FUNC(t->devfn));
2769         assert(s == slot_len);
2770         memcpy(p, slot, slot_len);
2771     }
2772 
2773     return path;
2774 }
2775 
pci_qdev_find_recursive(PCIBus * bus,const char * id,PCIDevice ** pdev)2776 static int pci_qdev_find_recursive(PCIBus *bus,
2777                                    const char *id, PCIDevice **pdev)
2778 {
2779     DeviceState *qdev = qdev_find_recursive(&bus->qbus, id);
2780     if (!qdev) {
2781         return -ENODEV;
2782     }
2783 
2784     /* roughly check if given qdev is pci device */
2785     if (object_dynamic_cast(OBJECT(qdev), TYPE_PCI_DEVICE)) {
2786         *pdev = PCI_DEVICE(qdev);
2787         return 0;
2788     }
2789     return -EINVAL;
2790 }
2791 
pci_qdev_find_device(const char * id,PCIDevice ** pdev)2792 int pci_qdev_find_device(const char *id, PCIDevice **pdev)
2793 {
2794     PCIHostState *host_bridge;
2795     int rc = -ENODEV;
2796 
2797     QLIST_FOREACH(host_bridge, &pci_host_bridges, next) {
2798         int tmp = pci_qdev_find_recursive(host_bridge->bus, id, pdev);
2799         if (!tmp) {
2800             rc = 0;
2801             break;
2802         }
2803         if (tmp != -ENODEV) {
2804             rc = tmp;
2805         }
2806     }
2807 
2808     return rc;
2809 }
2810 
pci_address_space(PCIDevice * dev)2811 MemoryRegion *pci_address_space(PCIDevice *dev)
2812 {
2813     return pci_get_bus(dev)->address_space_mem;
2814 }
2815 
pci_address_space_io(PCIDevice * dev)2816 MemoryRegion *pci_address_space_io(PCIDevice *dev)
2817 {
2818     return pci_get_bus(dev)->address_space_io;
2819 }
2820 
pci_device_class_init(ObjectClass * klass,const void * data)2821 static void pci_device_class_init(ObjectClass *klass, const void *data)
2822 {
2823     DeviceClass *k = DEVICE_CLASS(klass);
2824 
2825     k->realize = pci_qdev_realize;
2826     k->unrealize = pci_qdev_unrealize;
2827     k->bus_type = TYPE_PCI_BUS;
2828     device_class_set_props(k, pci_props);
2829     object_class_property_set_description(
2830         klass, "x-max-bounce-buffer-size",
2831         "Maximum buffer size allocated for bounce buffers used for mapped "
2832         "access to indirect DMA memory");
2833 }
2834 
pci_device_class_base_init(ObjectClass * klass,const void * data)2835 static void pci_device_class_base_init(ObjectClass *klass, const void *data)
2836 {
2837     if (!object_class_is_abstract(klass)) {
2838         ObjectClass *conventional =
2839             object_class_dynamic_cast(klass, INTERFACE_CONVENTIONAL_PCI_DEVICE);
2840         ObjectClass *pcie =
2841             object_class_dynamic_cast(klass, INTERFACE_PCIE_DEVICE);
2842         ObjectClass *cxl =
2843             object_class_dynamic_cast(klass, INTERFACE_CXL_DEVICE);
2844         assert(conventional || pcie || cxl);
2845     }
2846 }
2847 
2848 /*
2849  * Get IOMMU root bus, aliased bus and devfn of a PCI device
2850  *
2851  * IOMMU root bus is needed by all call sites to call into iommu_ops.
2852  * For call sites which don't need aliased BDF, passing NULL to
2853  * aliased_[bus|devfn] is allowed.
2854  *
2855  * @piommu_bus: return root #PCIBus backed by an IOMMU for the PCI device.
2856  *
2857  * @aliased_bus: return aliased #PCIBus of the PCI device, optional.
2858  *
2859  * @aliased_devfn: return aliased devfn of the PCI device, optional.
2860  */
pci_device_get_iommu_bus_devfn(PCIDevice * dev,PCIBus ** piommu_bus,PCIBus ** aliased_bus,int * aliased_devfn)2861 static void pci_device_get_iommu_bus_devfn(PCIDevice *dev,
2862                                            PCIBus **piommu_bus,
2863                                            PCIBus **aliased_bus,
2864                                            int *aliased_devfn)
2865 {
2866     PCIBus *bus = pci_get_bus(dev);
2867     PCIBus *iommu_bus = bus;
2868     int devfn = dev->devfn;
2869 
2870     while (iommu_bus && !iommu_bus->iommu_ops && iommu_bus->parent_dev) {
2871         PCIBus *parent_bus = pci_get_bus(iommu_bus->parent_dev);
2872 
2873         /*
2874          * The requester ID of the provided device may be aliased, as seen from
2875          * the IOMMU, due to topology limitations.  The IOMMU relies on a
2876          * requester ID to provide a unique AddressSpace for devices, but
2877          * conventional PCI buses pre-date such concepts.  Instead, the PCIe-
2878          * to-PCI bridge creates and accepts transactions on behalf of down-
2879          * stream devices.  When doing so, all downstream devices are masked
2880          * (aliased) behind a single requester ID.  The requester ID used
2881          * depends on the format of the bridge devices.  Proper PCIe-to-PCI
2882          * bridges, with a PCIe capability indicating such, follow the
2883          * guidelines of chapter 2.3 of the PCIe-to-PCI/X bridge specification,
2884          * where the bridge uses the seconary bus as the bridge portion of the
2885          * requester ID and devfn of 00.0.  For other bridges, typically those
2886          * found on the root complex such as the dmi-to-pci-bridge, we follow
2887          * the convention of typical bare-metal hardware, which uses the
2888          * requester ID of the bridge itself.  There are device specific
2889          * exceptions to these rules, but these are the defaults that the
2890          * Linux kernel uses when determining DMA aliases itself and believed
2891          * to be true for the bare metal equivalents of the devices emulated
2892          * in QEMU.
2893          */
2894         if (!pci_bus_is_express(iommu_bus)) {
2895             PCIDevice *parent = iommu_bus->parent_dev;
2896 
2897             if (pci_is_express(parent) &&
2898                 pcie_cap_get_type(parent) == PCI_EXP_TYPE_PCI_BRIDGE) {
2899                 devfn = PCI_DEVFN(0, 0);
2900                 bus = iommu_bus;
2901             } else {
2902                 devfn = parent->devfn;
2903                 bus = parent_bus;
2904             }
2905         }
2906 
2907         iommu_bus = parent_bus;
2908     }
2909 
2910     assert(0 <= devfn && devfn < PCI_DEVFN_MAX);
2911     assert(iommu_bus);
2912 
2913     if (pci_bus_bypass_iommu(bus) || !iommu_bus->iommu_ops) {
2914         iommu_bus = NULL;
2915     }
2916 
2917     *piommu_bus = iommu_bus;
2918 
2919     if (aliased_bus) {
2920         *aliased_bus = bus;
2921     }
2922 
2923     if (aliased_devfn) {
2924         *aliased_devfn = devfn;
2925     }
2926 }
2927 
pci_device_iommu_address_space(PCIDevice * dev)2928 AddressSpace *pci_device_iommu_address_space(PCIDevice *dev)
2929 {
2930     PCIBus *bus;
2931     PCIBus *iommu_bus;
2932     int devfn;
2933 
2934     pci_device_get_iommu_bus_devfn(dev, &iommu_bus, &bus, &devfn);
2935     if (iommu_bus) {
2936         return iommu_bus->iommu_ops->get_address_space(bus,
2937                                  iommu_bus->iommu_opaque, devfn);
2938     }
2939     return &address_space_memory;
2940 }
2941 
pci_iommu_init_iotlb_notifier(PCIDevice * dev,IOMMUNotifier * n,IOMMUNotify fn,void * opaque)2942 int pci_iommu_init_iotlb_notifier(PCIDevice *dev, IOMMUNotifier *n,
2943                                   IOMMUNotify fn, void *opaque)
2944 {
2945     PCIBus *bus;
2946     PCIBus *iommu_bus;
2947     int devfn;
2948 
2949     pci_device_get_iommu_bus_devfn(dev, &bus, &iommu_bus, &devfn);
2950     if (iommu_bus && iommu_bus->iommu_ops->init_iotlb_notifier) {
2951         iommu_bus->iommu_ops->init_iotlb_notifier(bus, iommu_bus->iommu_opaque,
2952                                                   devfn, n, fn, opaque);
2953         return 0;
2954     }
2955 
2956     return -ENODEV;
2957 }
2958 
pci_device_set_iommu_device(PCIDevice * dev,HostIOMMUDevice * hiod,Error ** errp)2959 bool pci_device_set_iommu_device(PCIDevice *dev, HostIOMMUDevice *hiod,
2960                                  Error **errp)
2961 {
2962     PCIBus *iommu_bus, *aliased_bus;
2963     int aliased_devfn;
2964 
2965     /* set_iommu_device requires device's direct BDF instead of aliased BDF */
2966     pci_device_get_iommu_bus_devfn(dev, &iommu_bus,
2967                                    &aliased_bus, &aliased_devfn);
2968     if (iommu_bus && iommu_bus->iommu_ops->set_iommu_device) {
2969         hiod->aliased_bus = aliased_bus;
2970         hiod->aliased_devfn = aliased_devfn;
2971         return iommu_bus->iommu_ops->set_iommu_device(pci_get_bus(dev),
2972                                                       iommu_bus->iommu_opaque,
2973                                                       dev->devfn, hiod, errp);
2974     }
2975     return true;
2976 }
2977 
pci_device_unset_iommu_device(PCIDevice * dev)2978 void pci_device_unset_iommu_device(PCIDevice *dev)
2979 {
2980     PCIBus *iommu_bus;
2981 
2982     pci_device_get_iommu_bus_devfn(dev, &iommu_bus, NULL, NULL);
2983     if (iommu_bus && iommu_bus->iommu_ops->unset_iommu_device) {
2984         return iommu_bus->iommu_ops->unset_iommu_device(pci_get_bus(dev),
2985                                                         iommu_bus->iommu_opaque,
2986                                                         dev->devfn);
2987     }
2988 }
2989 
pci_pri_request_page(PCIDevice * dev,uint32_t pasid,bool priv_req,bool exec_req,hwaddr addr,bool lpig,uint16_t prgi,bool is_read,bool is_write)2990 int pci_pri_request_page(PCIDevice *dev, uint32_t pasid, bool priv_req,
2991                          bool exec_req, hwaddr addr, bool lpig,
2992                          uint16_t prgi, bool is_read, bool is_write)
2993 {
2994     PCIBus *bus;
2995     PCIBus *iommu_bus;
2996     int devfn;
2997 
2998     if (!dev->is_master ||
2999             ((pasid != PCI_NO_PASID) && !pcie_pasid_enabled(dev))) {
3000         return -EPERM;
3001     }
3002 
3003     if (!pcie_pri_enabled(dev)) {
3004         return -EPERM;
3005     }
3006 
3007     pci_device_get_iommu_bus_devfn(dev, &bus, &iommu_bus, &devfn);
3008     if (iommu_bus && iommu_bus->iommu_ops->pri_request_page) {
3009         return iommu_bus->iommu_ops->pri_request_page(bus,
3010                                                      iommu_bus->iommu_opaque,
3011                                                      devfn, pasid, priv_req,
3012                                                      exec_req, addr, lpig, prgi,
3013                                                      is_read, is_write);
3014     }
3015 
3016     return -ENODEV;
3017 }
3018 
pci_pri_register_notifier(PCIDevice * dev,uint32_t pasid,IOMMUPRINotifier * notifier)3019 int pci_pri_register_notifier(PCIDevice *dev, uint32_t pasid,
3020                               IOMMUPRINotifier *notifier)
3021 {
3022     PCIBus *bus;
3023     PCIBus *iommu_bus;
3024     int devfn;
3025 
3026     if (!dev->is_master ||
3027             ((pasid != PCI_NO_PASID) && !pcie_pasid_enabled(dev))) {
3028         return -EPERM;
3029     }
3030 
3031     pci_device_get_iommu_bus_devfn(dev, &bus, &iommu_bus, &devfn);
3032     if (iommu_bus && iommu_bus->iommu_ops->pri_register_notifier) {
3033         iommu_bus->iommu_ops->pri_register_notifier(bus,
3034                                                     iommu_bus->iommu_opaque,
3035                                                     devfn, pasid, notifier);
3036         return 0;
3037     }
3038 
3039     return -ENODEV;
3040 }
3041 
pci_pri_unregister_notifier(PCIDevice * dev,uint32_t pasid)3042 void pci_pri_unregister_notifier(PCIDevice *dev, uint32_t pasid)
3043 {
3044     PCIBus *bus;
3045     PCIBus *iommu_bus;
3046     int devfn;
3047 
3048     pci_device_get_iommu_bus_devfn(dev, &bus, &iommu_bus, &devfn);
3049     if (iommu_bus && iommu_bus->iommu_ops->pri_unregister_notifier) {
3050         iommu_bus->iommu_ops->pri_unregister_notifier(bus,
3051                                                       iommu_bus->iommu_opaque,
3052                                                       devfn, pasid);
3053     }
3054 }
3055 
pci_ats_request_translation(PCIDevice * dev,uint32_t pasid,bool priv_req,bool exec_req,hwaddr addr,size_t length,bool no_write,IOMMUTLBEntry * result,size_t result_length,uint32_t * err_count)3056 ssize_t pci_ats_request_translation(PCIDevice *dev, uint32_t pasid,
3057                                     bool priv_req, bool exec_req,
3058                                     hwaddr addr, size_t length,
3059                                     bool no_write, IOMMUTLBEntry *result,
3060                                     size_t result_length,
3061                                     uint32_t *err_count)
3062 {
3063     PCIBus *bus;
3064     PCIBus *iommu_bus;
3065     int devfn;
3066 
3067     if (!dev->is_master ||
3068             ((pasid != PCI_NO_PASID) && !pcie_pasid_enabled(dev))) {
3069         return -EPERM;
3070     }
3071 
3072     if (result_length == 0) {
3073         return -ENOSPC;
3074     }
3075 
3076     if (!pcie_ats_enabled(dev)) {
3077         return -EPERM;
3078     }
3079 
3080     pci_device_get_iommu_bus_devfn(dev, &bus, &iommu_bus, &devfn);
3081     if (iommu_bus && iommu_bus->iommu_ops->ats_request_translation) {
3082         return iommu_bus->iommu_ops->ats_request_translation(bus,
3083                                                      iommu_bus->iommu_opaque,
3084                                                      devfn, pasid, priv_req,
3085                                                      exec_req, addr, length,
3086                                                      no_write, result,
3087                                                      result_length, err_count);
3088     }
3089 
3090     return -ENODEV;
3091 }
3092 
pci_iommu_register_iotlb_notifier(PCIDevice * dev,uint32_t pasid,IOMMUNotifier * n)3093 int pci_iommu_register_iotlb_notifier(PCIDevice *dev, uint32_t pasid,
3094                                       IOMMUNotifier *n)
3095 {
3096     PCIBus *bus;
3097     PCIBus *iommu_bus;
3098     int devfn;
3099 
3100     if ((pasid != PCI_NO_PASID) && !pcie_pasid_enabled(dev)) {
3101         return -EPERM;
3102     }
3103 
3104     pci_device_get_iommu_bus_devfn(dev, &bus, &iommu_bus, &devfn);
3105     if (iommu_bus && iommu_bus->iommu_ops->register_iotlb_notifier) {
3106         iommu_bus->iommu_ops->register_iotlb_notifier(bus,
3107                                            iommu_bus->iommu_opaque, devfn,
3108                                            pasid, n);
3109         return 0;
3110     }
3111 
3112     return -ENODEV;
3113 }
3114 
pci_iommu_unregister_iotlb_notifier(PCIDevice * dev,uint32_t pasid,IOMMUNotifier * n)3115 int pci_iommu_unregister_iotlb_notifier(PCIDevice *dev, uint32_t pasid,
3116                                         IOMMUNotifier *n)
3117 {
3118     PCIBus *bus;
3119     PCIBus *iommu_bus;
3120     int devfn;
3121 
3122     if ((pasid != PCI_NO_PASID) && !pcie_pasid_enabled(dev)) {
3123         return -EPERM;
3124     }
3125 
3126     pci_device_get_iommu_bus_devfn(dev, &bus, &iommu_bus, &devfn);
3127     if (iommu_bus && iommu_bus->iommu_ops->unregister_iotlb_notifier) {
3128         iommu_bus->iommu_ops->unregister_iotlb_notifier(bus,
3129                                                         iommu_bus->iommu_opaque,
3130                                                         devfn, pasid, n);
3131         return 0;
3132     }
3133 
3134     return -ENODEV;
3135 }
3136 
pci_iommu_get_iotlb_info(PCIDevice * dev,uint8_t * addr_width,uint32_t * min_page_size)3137 int pci_iommu_get_iotlb_info(PCIDevice *dev, uint8_t *addr_width,
3138                              uint32_t *min_page_size)
3139 {
3140     PCIBus *bus;
3141     PCIBus *iommu_bus;
3142     int devfn;
3143 
3144     pci_device_get_iommu_bus_devfn(dev, &bus, &iommu_bus, &devfn);
3145     if (iommu_bus && iommu_bus->iommu_ops->get_iotlb_info) {
3146         iommu_bus->iommu_ops->get_iotlb_info(iommu_bus->iommu_opaque,
3147                                              addr_width, min_page_size);
3148         return 0;
3149     }
3150 
3151     return -ENODEV;
3152 }
3153 
pci_setup_iommu(PCIBus * bus,const PCIIOMMUOps * ops,void * opaque)3154 void pci_setup_iommu(PCIBus *bus, const PCIIOMMUOps *ops, void *opaque)
3155 {
3156     /*
3157      * If called, pci_setup_iommu() should provide a minimum set of
3158      * useful callbacks for the bus.
3159      */
3160     assert(ops);
3161     assert(ops->get_address_space);
3162 
3163     bus->iommu_ops = ops;
3164     bus->iommu_opaque = opaque;
3165 }
3166 
pci_dev_get_w64(PCIBus * b,PCIDevice * dev,void * opaque)3167 static void pci_dev_get_w64(PCIBus *b, PCIDevice *dev, void *opaque)
3168 {
3169     Range *range = opaque;
3170     uint16_t cmd = pci_get_word(dev->config + PCI_COMMAND);
3171     int i;
3172 
3173     if (!(cmd & PCI_COMMAND_MEMORY)) {
3174         return;
3175     }
3176 
3177     if (IS_PCI_BRIDGE(dev)) {
3178         pcibus_t base = pci_bridge_get_base(dev, PCI_BASE_ADDRESS_MEM_PREFETCH);
3179         pcibus_t limit = pci_bridge_get_limit(dev, PCI_BASE_ADDRESS_MEM_PREFETCH);
3180 
3181         base = MAX(base, 0x1ULL << 32);
3182 
3183         if (limit >= base) {
3184             Range pref_range;
3185             range_set_bounds(&pref_range, base, limit);
3186             range_extend(range, &pref_range);
3187         }
3188     }
3189     for (i = 0; i < PCI_NUM_REGIONS; ++i) {
3190         PCIIORegion *r = &dev->io_regions[i];
3191         pcibus_t lob, upb;
3192         Range region_range;
3193 
3194         if (!r->size ||
3195             (r->type & PCI_BASE_ADDRESS_SPACE_IO) ||
3196             !(r->type & PCI_BASE_ADDRESS_MEM_TYPE_64)) {
3197             continue;
3198         }
3199 
3200         lob = pci_bar_address(dev, i, r->type, r->size);
3201         upb = lob + r->size - 1;
3202         if (lob == PCI_BAR_UNMAPPED) {
3203             continue;
3204         }
3205 
3206         lob = MAX(lob, 0x1ULL << 32);
3207 
3208         if (upb >= lob) {
3209             range_set_bounds(&region_range, lob, upb);
3210             range_extend(range, &region_range);
3211         }
3212     }
3213 }
3214 
pci_bus_get_w64_range(PCIBus * bus,Range * range)3215 void pci_bus_get_w64_range(PCIBus *bus, Range *range)
3216 {
3217     range_make_empty(range);
3218     pci_for_each_device_under_bus(bus, pci_dev_get_w64, range);
3219 }
3220 
pcie_has_upstream_port(PCIDevice * dev)3221 static bool pcie_has_upstream_port(PCIDevice *dev)
3222 {
3223     PCIDevice *parent_dev = pci_bridge_get_device(pci_get_bus(dev));
3224 
3225     /* Device associated with an upstream port.
3226      * As there are several types of these, it's easier to check the
3227      * parent device: upstream ports are always connected to
3228      * root or downstream ports.
3229      */
3230     return parent_dev &&
3231         pci_is_express(parent_dev) &&
3232         parent_dev->exp.exp_cap &&
3233         (pcie_cap_get_type(parent_dev) == PCI_EXP_TYPE_ROOT_PORT ||
3234          pcie_cap_get_type(parent_dev) == PCI_EXP_TYPE_DOWNSTREAM);
3235 }
3236 
pci_get_function_0(PCIDevice * pci_dev)3237 PCIDevice *pci_get_function_0(PCIDevice *pci_dev)
3238 {
3239     PCIBus *bus = pci_get_bus(pci_dev);
3240 
3241     if(pcie_has_upstream_port(pci_dev)) {
3242         /* With an upstream PCIe port, we only support 1 device at slot 0 */
3243         return bus->devices[0];
3244     } else {
3245         /* Other bus types might support multiple devices at slots 0-31 */
3246         return bus->devices[PCI_DEVFN(PCI_SLOT(pci_dev->devfn), 0)];
3247     }
3248 }
3249 
pci_get_msi_message(PCIDevice * dev,int vector)3250 MSIMessage pci_get_msi_message(PCIDevice *dev, int vector)
3251 {
3252     MSIMessage msg;
3253     if (msix_enabled(dev)) {
3254         msg = msix_get_message(dev, vector);
3255     } else if (msi_enabled(dev)) {
3256         msg = msi_get_message(dev, vector);
3257     } else {
3258         /* Should never happen */
3259         error_report("%s: unknown interrupt type", __func__);
3260         abort();
3261     }
3262     return msg;
3263 }
3264 
pci_set_power(PCIDevice * d,bool state)3265 void pci_set_power(PCIDevice *d, bool state)
3266 {
3267     /*
3268      * Don't change the enabled state of VFs when powering on/off the device.
3269      *
3270      * When powering on, VFs must not be enabled immediately but they must
3271      * wait until the guest configures SR-IOV.
3272      * When powering off, their corresponding PFs will be reset and disable
3273      * VFs.
3274      */
3275     if (!pci_is_vf(d)) {
3276         pci_set_enabled(d, state);
3277     }
3278 }
3279 
pci_set_enabled(PCIDevice * d,bool state)3280 void pci_set_enabled(PCIDevice *d, bool state)
3281 {
3282     if (d->enabled == state) {
3283         return;
3284     }
3285 
3286     d->enabled = state;
3287     pci_update_mappings(d);
3288     pci_set_master(d, (pci_get_word(d->config + PCI_COMMAND)
3289                       & PCI_COMMAND_MASTER) && d->enabled);
3290     if (qdev_is_realized(&d->qdev)) {
3291         pci_device_reset(d);
3292     }
3293 }
3294 
3295 static const TypeInfo pci_device_type_info = {
3296     .name = TYPE_PCI_DEVICE,
3297     .parent = TYPE_DEVICE,
3298     .instance_size = sizeof(PCIDevice),
3299     .abstract = true,
3300     .class_size = sizeof(PCIDeviceClass),
3301     .class_init = pci_device_class_init,
3302     .class_base_init = pci_device_class_base_init,
3303 };
3304 
pci_register_types(void)3305 static void pci_register_types(void)
3306 {
3307     type_register_static(&pci_bus_info);
3308     type_register_static(&pcie_bus_info);
3309     type_register_static(&cxl_bus_info);
3310     type_register_static(&conventional_pci_interface_info);
3311     type_register_static(&cxl_interface_info);
3312     type_register_static(&pcie_interface_info);
3313     type_register_static(&pci_device_type_info);
3314 }
3315 
3316 type_init(pci_register_types)
3317