xref: /qemu/hw/ppc/spapr_pci.c (revision 466e88318596a2b9e4df60ad00357f6a4dc648b0)
1 /*
2  * QEMU sPAPR PCI host originated from Uninorth PCI host
3  *
4  * Copyright (c) 2011 Alexey Kardashevskiy, IBM Corporation.
5  * Copyright (C) 2011 David Gibson, IBM Corporation.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a copy
8  * of this software and associated documentation files (the "Software"), to deal
9  * in the Software without restriction, including without limitation the rights
10  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11  * copies of the Software, and to permit persons to whom the Software is
12  * furnished to do so, subject to the following conditions:
13  *
14  * The above copyright notice and this permission notice shall be included in
15  * all copies or substantial portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23  * THE SOFTWARE.
24  */
25 #include "qemu/osdep.h"
26 #include "qapi/error.h"
27 #include "qemu-common.h"
28 #include "cpu.h"
29 #include "hw/hw.h"
30 #include "hw/sysbus.h"
31 #include "hw/pci/pci.h"
32 #include "hw/pci/msi.h"
33 #include "hw/pci/msix.h"
34 #include "hw/pci/pci_host.h"
35 #include "hw/ppc/spapr.h"
36 #include "hw/pci-host/spapr.h"
37 #include "exec/address-spaces.h"
38 #include "exec/ram_addr.h"
39 #include <libfdt.h>
40 #include "trace.h"
41 #include "qemu/error-report.h"
42 #include "qapi/qmp/qerror.h"
43 #include "hw/ppc/fdt.h"
44 #include "hw/pci/pci_bridge.h"
45 #include "hw/pci/pci_bus.h"
46 #include "hw/pci/pci_ids.h"
47 #include "hw/ppc/spapr_drc.h"
48 #include "sysemu/device_tree.h"
49 #include "sysemu/kvm.h"
50 #include "sysemu/hostmem.h"
51 #include "sysemu/numa.h"
52 
53 /* Copied from the kernel arch/powerpc/platforms/pseries/msi.c */
54 #define RTAS_QUERY_FN           0
55 #define RTAS_CHANGE_FN          1
56 #define RTAS_RESET_FN           2
57 #define RTAS_CHANGE_MSI_FN      3
58 #define RTAS_CHANGE_MSIX_FN     4
59 
60 /* Interrupt types to return on RTAS_CHANGE_* */
61 #define RTAS_TYPE_MSI           1
62 #define RTAS_TYPE_MSIX          2
63 
64 SpaprPhbState *spapr_pci_find_phb(SpaprMachineState *spapr, uint64_t buid)
65 {
66     SpaprPhbState *sphb;
67 
68     QLIST_FOREACH(sphb, &spapr->phbs, list) {
69         if (sphb->buid != buid) {
70             continue;
71         }
72         return sphb;
73     }
74 
75     return NULL;
76 }
77 
78 PCIDevice *spapr_pci_find_dev(SpaprMachineState *spapr, uint64_t buid,
79                               uint32_t config_addr)
80 {
81     SpaprPhbState *sphb = spapr_pci_find_phb(spapr, buid);
82     PCIHostState *phb = PCI_HOST_BRIDGE(sphb);
83     int bus_num = (config_addr >> 16) & 0xFF;
84     int devfn = (config_addr >> 8) & 0xFF;
85 
86     if (!phb) {
87         return NULL;
88     }
89 
90     return pci_find_device(phb->bus, bus_num, devfn);
91 }
92 
93 static uint32_t rtas_pci_cfgaddr(uint32_t arg)
94 {
95     /* This handles the encoding of extended config space addresses */
96     return ((arg >> 20) & 0xf00) | (arg & 0xff);
97 }
98 
99 static void finish_read_pci_config(SpaprMachineState *spapr, uint64_t buid,
100                                    uint32_t addr, uint32_t size,
101                                    target_ulong rets)
102 {
103     PCIDevice *pci_dev;
104     uint32_t val;
105 
106     if ((size != 1) && (size != 2) && (size != 4)) {
107         /* access must be 1, 2 or 4 bytes */
108         rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
109         return;
110     }
111 
112     pci_dev = spapr_pci_find_dev(spapr, buid, addr);
113     addr = rtas_pci_cfgaddr(addr);
114 
115     if (!pci_dev || (addr % size) || (addr >= pci_config_size(pci_dev))) {
116         /* Access must be to a valid device, within bounds and
117          * naturally aligned */
118         rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
119         return;
120     }
121 
122     val = pci_host_config_read_common(pci_dev, addr,
123                                       pci_config_size(pci_dev), size);
124 
125     rtas_st(rets, 0, RTAS_OUT_SUCCESS);
126     rtas_st(rets, 1, val);
127 }
128 
129 static void rtas_ibm_read_pci_config(PowerPCCPU *cpu, SpaprMachineState *spapr,
130                                      uint32_t token, uint32_t nargs,
131                                      target_ulong args,
132                                      uint32_t nret, target_ulong rets)
133 {
134     uint64_t buid;
135     uint32_t size, addr;
136 
137     if ((nargs != 4) || (nret != 2)) {
138         rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
139         return;
140     }
141 
142     buid = rtas_ldq(args, 1);
143     size = rtas_ld(args, 3);
144     addr = rtas_ld(args, 0);
145 
146     finish_read_pci_config(spapr, buid, addr, size, rets);
147 }
148 
149 static void rtas_read_pci_config(PowerPCCPU *cpu, SpaprMachineState *spapr,
150                                  uint32_t token, uint32_t nargs,
151                                  target_ulong args,
152                                  uint32_t nret, target_ulong rets)
153 {
154     uint32_t size, addr;
155 
156     if ((nargs != 2) || (nret != 2)) {
157         rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
158         return;
159     }
160 
161     size = rtas_ld(args, 1);
162     addr = rtas_ld(args, 0);
163 
164     finish_read_pci_config(spapr, 0, addr, size, rets);
165 }
166 
167 static void finish_write_pci_config(SpaprMachineState *spapr, uint64_t buid,
168                                     uint32_t addr, uint32_t size,
169                                     uint32_t val, target_ulong rets)
170 {
171     PCIDevice *pci_dev;
172 
173     if ((size != 1) && (size != 2) && (size != 4)) {
174         /* access must be 1, 2 or 4 bytes */
175         rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
176         return;
177     }
178 
179     pci_dev = spapr_pci_find_dev(spapr, buid, addr);
180     addr = rtas_pci_cfgaddr(addr);
181 
182     if (!pci_dev || (addr % size) || (addr >= pci_config_size(pci_dev))) {
183         /* Access must be to a valid device, within bounds and
184          * naturally aligned */
185         rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
186         return;
187     }
188 
189     pci_host_config_write_common(pci_dev, addr, pci_config_size(pci_dev),
190                                  val, size);
191 
192     rtas_st(rets, 0, RTAS_OUT_SUCCESS);
193 }
194 
195 static void rtas_ibm_write_pci_config(PowerPCCPU *cpu, SpaprMachineState *spapr,
196                                       uint32_t token, uint32_t nargs,
197                                       target_ulong args,
198                                       uint32_t nret, target_ulong rets)
199 {
200     uint64_t buid;
201     uint32_t val, size, addr;
202 
203     if ((nargs != 5) || (nret != 1)) {
204         rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
205         return;
206     }
207 
208     buid = rtas_ldq(args, 1);
209     val = rtas_ld(args, 4);
210     size = rtas_ld(args, 3);
211     addr = rtas_ld(args, 0);
212 
213     finish_write_pci_config(spapr, buid, addr, size, val, rets);
214 }
215 
216 static void rtas_write_pci_config(PowerPCCPU *cpu, SpaprMachineState *spapr,
217                                   uint32_t token, uint32_t nargs,
218                                   target_ulong args,
219                                   uint32_t nret, target_ulong rets)
220 {
221     uint32_t val, size, addr;
222 
223     if ((nargs != 3) || (nret != 1)) {
224         rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
225         return;
226     }
227 
228 
229     val = rtas_ld(args, 2);
230     size = rtas_ld(args, 1);
231     addr = rtas_ld(args, 0);
232 
233     finish_write_pci_config(spapr, 0, addr, size, val, rets);
234 }
235 
236 /*
237  * Set MSI/MSIX message data.
238  * This is required for msi_notify()/msix_notify() which
239  * will write at the addresses via spapr_msi_write().
240  *
241  * If hwaddr == 0, all entries will have .data == first_irq i.e.
242  * table will be reset.
243  */
244 static void spapr_msi_setmsg(PCIDevice *pdev, hwaddr addr, bool msix,
245                              unsigned first_irq, unsigned req_num)
246 {
247     unsigned i;
248     MSIMessage msg = { .address = addr, .data = first_irq };
249 
250     if (!msix) {
251         msi_set_message(pdev, msg);
252         trace_spapr_pci_msi_setup(pdev->name, 0, msg.address);
253         return;
254     }
255 
256     for (i = 0; i < req_num; ++i) {
257         msix_set_message(pdev, i, msg);
258         trace_spapr_pci_msi_setup(pdev->name, i, msg.address);
259         if (addr) {
260             ++msg.data;
261         }
262     }
263 }
264 
265 static void rtas_ibm_change_msi(PowerPCCPU *cpu, SpaprMachineState *spapr,
266                                 uint32_t token, uint32_t nargs,
267                                 target_ulong args, uint32_t nret,
268                                 target_ulong rets)
269 {
270     SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr);
271     uint32_t config_addr = rtas_ld(args, 0);
272     uint64_t buid = rtas_ldq(args, 1);
273     unsigned int func = rtas_ld(args, 3);
274     unsigned int req_num = rtas_ld(args, 4); /* 0 == remove all */
275     unsigned int seq_num = rtas_ld(args, 5);
276     unsigned int ret_intr_type;
277     unsigned int irq, max_irqs = 0;
278     SpaprPhbState *phb = NULL;
279     PCIDevice *pdev = NULL;
280     spapr_pci_msi *msi;
281     int *config_addr_key;
282     Error *err = NULL;
283     int i;
284 
285     /* Fins SpaprPhbState */
286     phb = spapr_pci_find_phb(spapr, buid);
287     if (phb) {
288         pdev = spapr_pci_find_dev(spapr, buid, config_addr);
289     }
290     if (!phb || !pdev) {
291         rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
292         return;
293     }
294 
295     switch (func) {
296     case RTAS_CHANGE_FN:
297         if (msi_present(pdev)) {
298             ret_intr_type = RTAS_TYPE_MSI;
299         } else if (msix_present(pdev)) {
300             ret_intr_type = RTAS_TYPE_MSIX;
301         } else {
302             rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
303             return;
304         }
305         break;
306     case RTAS_CHANGE_MSI_FN:
307         if (msi_present(pdev)) {
308             ret_intr_type = RTAS_TYPE_MSI;
309         } else {
310             rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
311             return;
312         }
313         break;
314     case RTAS_CHANGE_MSIX_FN:
315         if (msix_present(pdev)) {
316             ret_intr_type = RTAS_TYPE_MSIX;
317         } else {
318             rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
319             return;
320         }
321         break;
322     default:
323         error_report("rtas_ibm_change_msi(%u) is not implemented", func);
324         rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
325         return;
326     }
327 
328     msi = (spapr_pci_msi *) g_hash_table_lookup(phb->msi, &config_addr);
329 
330     /* Releasing MSIs */
331     if (!req_num) {
332         if (!msi) {
333             trace_spapr_pci_msi("Releasing wrong config", config_addr);
334             rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
335             return;
336         }
337 
338         if (!smc->legacy_irq_allocation) {
339             spapr_irq_msi_free(spapr, msi->first_irq, msi->num);
340         }
341         spapr_irq_free(spapr, msi->first_irq, msi->num);
342         if (msi_present(pdev)) {
343             spapr_msi_setmsg(pdev, 0, false, 0, 0);
344         }
345         if (msix_present(pdev)) {
346             spapr_msi_setmsg(pdev, 0, true, 0, 0);
347         }
348         g_hash_table_remove(phb->msi, &config_addr);
349 
350         trace_spapr_pci_msi("Released MSIs", config_addr);
351         rtas_st(rets, 0, RTAS_OUT_SUCCESS);
352         rtas_st(rets, 1, 0);
353         return;
354     }
355 
356     /* Enabling MSI */
357 
358     /* Check if the device supports as many IRQs as requested */
359     if (ret_intr_type == RTAS_TYPE_MSI) {
360         max_irqs = msi_nr_vectors_allocated(pdev);
361     } else if (ret_intr_type == RTAS_TYPE_MSIX) {
362         max_irqs = pdev->msix_entries_nr;
363     }
364     if (!max_irqs) {
365         error_report("Requested interrupt type %d is not enabled for device %x",
366                      ret_intr_type, config_addr);
367         rtas_st(rets, 0, -1); /* Hardware error */
368         return;
369     }
370     /* Correct the number if the guest asked for too many */
371     if (req_num > max_irqs) {
372         trace_spapr_pci_msi_retry(config_addr, req_num, max_irqs);
373         req_num = max_irqs;
374         irq = 0; /* to avoid misleading trace */
375         goto out;
376     }
377 
378     /* Allocate MSIs */
379     if (smc->legacy_irq_allocation) {
380         irq = spapr_irq_find(spapr, req_num, ret_intr_type == RTAS_TYPE_MSI,
381                              &err);
382     } else {
383         irq = spapr_irq_msi_alloc(spapr, req_num,
384                                   ret_intr_type == RTAS_TYPE_MSI, &err);
385     }
386     if (err) {
387         error_reportf_err(err, "Can't allocate MSIs for device %x: ",
388                           config_addr);
389         rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
390         return;
391     }
392 
393     for (i = 0; i < req_num; i++) {
394         spapr_irq_claim(spapr, irq + i, false, &err);
395         if (err) {
396             if (i) {
397                 spapr_irq_free(spapr, irq, i);
398             }
399             if (!smc->legacy_irq_allocation) {
400                 spapr_irq_msi_free(spapr, irq, req_num);
401             }
402             error_reportf_err(err, "Can't allocate MSIs for device %x: ",
403                               config_addr);
404             rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
405             return;
406         }
407     }
408 
409     /* Release previous MSIs */
410     if (msi) {
411         if (!smc->legacy_irq_allocation) {
412             spapr_irq_msi_free(spapr, msi->first_irq, msi->num);
413         }
414         spapr_irq_free(spapr, msi->first_irq, msi->num);
415         g_hash_table_remove(phb->msi, &config_addr);
416     }
417 
418     /* Setup MSI/MSIX vectors in the device (via cfgspace or MSIX BAR) */
419     spapr_msi_setmsg(pdev, SPAPR_PCI_MSI_WINDOW, ret_intr_type == RTAS_TYPE_MSIX,
420                      irq, req_num);
421 
422     /* Add MSI device to cache */
423     msi = g_new(spapr_pci_msi, 1);
424     msi->first_irq = irq;
425     msi->num = req_num;
426     config_addr_key = g_new(int, 1);
427     *config_addr_key = config_addr;
428     g_hash_table_insert(phb->msi, config_addr_key, msi);
429 
430 out:
431     rtas_st(rets, 0, RTAS_OUT_SUCCESS);
432     rtas_st(rets, 1, req_num);
433     rtas_st(rets, 2, ++seq_num);
434     if (nret > 3) {
435         rtas_st(rets, 3, ret_intr_type);
436     }
437 
438     trace_spapr_pci_rtas_ibm_change_msi(config_addr, func, req_num, irq);
439 }
440 
441 static void rtas_ibm_query_interrupt_source_number(PowerPCCPU *cpu,
442                                                    SpaprMachineState *spapr,
443                                                    uint32_t token,
444                                                    uint32_t nargs,
445                                                    target_ulong args,
446                                                    uint32_t nret,
447                                                    target_ulong rets)
448 {
449     uint32_t config_addr = rtas_ld(args, 0);
450     uint64_t buid = rtas_ldq(args, 1);
451     unsigned int intr_src_num = -1, ioa_intr_num = rtas_ld(args, 3);
452     SpaprPhbState *phb = NULL;
453     PCIDevice *pdev = NULL;
454     spapr_pci_msi *msi;
455 
456     /* Find SpaprPhbState */
457     phb = spapr_pci_find_phb(spapr, buid);
458     if (phb) {
459         pdev = spapr_pci_find_dev(spapr, buid, config_addr);
460     }
461     if (!phb || !pdev) {
462         rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
463         return;
464     }
465 
466     /* Find device descriptor and start IRQ */
467     msi = (spapr_pci_msi *) g_hash_table_lookup(phb->msi, &config_addr);
468     if (!msi || !msi->first_irq || !msi->num || (ioa_intr_num >= msi->num)) {
469         trace_spapr_pci_msi("Failed to return vector", config_addr);
470         rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
471         return;
472     }
473     intr_src_num = msi->first_irq + ioa_intr_num;
474     trace_spapr_pci_rtas_ibm_query_interrupt_source_number(ioa_intr_num,
475                                                            intr_src_num);
476 
477     rtas_st(rets, 0, RTAS_OUT_SUCCESS);
478     rtas_st(rets, 1, intr_src_num);
479     rtas_st(rets, 2, 1);/* 0 == level; 1 == edge */
480 }
481 
482 static void rtas_ibm_set_eeh_option(PowerPCCPU *cpu,
483                                     SpaprMachineState *spapr,
484                                     uint32_t token, uint32_t nargs,
485                                     target_ulong args, uint32_t nret,
486                                     target_ulong rets)
487 {
488     SpaprPhbState *sphb;
489     uint32_t addr, option;
490     uint64_t buid;
491     int ret;
492 
493     if ((nargs != 4) || (nret != 1)) {
494         goto param_error_exit;
495     }
496 
497     buid = rtas_ldq(args, 1);
498     addr = rtas_ld(args, 0);
499     option = rtas_ld(args, 3);
500 
501     sphb = spapr_pci_find_phb(spapr, buid);
502     if (!sphb) {
503         goto param_error_exit;
504     }
505 
506     if (!spapr_phb_eeh_available(sphb)) {
507         goto param_error_exit;
508     }
509 
510     ret = spapr_phb_vfio_eeh_set_option(sphb, addr, option);
511     rtas_st(rets, 0, ret);
512     return;
513 
514 param_error_exit:
515     rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
516 }
517 
518 static void rtas_ibm_get_config_addr_info2(PowerPCCPU *cpu,
519                                            SpaprMachineState *spapr,
520                                            uint32_t token, uint32_t nargs,
521                                            target_ulong args, uint32_t nret,
522                                            target_ulong rets)
523 {
524     SpaprPhbState *sphb;
525     PCIDevice *pdev;
526     uint32_t addr, option;
527     uint64_t buid;
528 
529     if ((nargs != 4) || (nret != 2)) {
530         goto param_error_exit;
531     }
532 
533     buid = rtas_ldq(args, 1);
534     sphb = spapr_pci_find_phb(spapr, buid);
535     if (!sphb) {
536         goto param_error_exit;
537     }
538 
539     if (!spapr_phb_eeh_available(sphb)) {
540         goto param_error_exit;
541     }
542 
543     /*
544      * We always have PE address of form "00BB0001". "BB"
545      * represents the bus number of PE's primary bus.
546      */
547     option = rtas_ld(args, 3);
548     switch (option) {
549     case RTAS_GET_PE_ADDR:
550         addr = rtas_ld(args, 0);
551         pdev = spapr_pci_find_dev(spapr, buid, addr);
552         if (!pdev) {
553             goto param_error_exit;
554         }
555 
556         rtas_st(rets, 1, (pci_bus_num(pci_get_bus(pdev)) << 16) + 1);
557         break;
558     case RTAS_GET_PE_MODE:
559         rtas_st(rets, 1, RTAS_PE_MODE_SHARED);
560         break;
561     default:
562         goto param_error_exit;
563     }
564 
565     rtas_st(rets, 0, RTAS_OUT_SUCCESS);
566     return;
567 
568 param_error_exit:
569     rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
570 }
571 
572 static void rtas_ibm_read_slot_reset_state2(PowerPCCPU *cpu,
573                                             SpaprMachineState *spapr,
574                                             uint32_t token, uint32_t nargs,
575                                             target_ulong args, uint32_t nret,
576                                             target_ulong rets)
577 {
578     SpaprPhbState *sphb;
579     uint64_t buid;
580     int state, ret;
581 
582     if ((nargs != 3) || (nret != 4 && nret != 5)) {
583         goto param_error_exit;
584     }
585 
586     buid = rtas_ldq(args, 1);
587     sphb = spapr_pci_find_phb(spapr, buid);
588     if (!sphb) {
589         goto param_error_exit;
590     }
591 
592     if (!spapr_phb_eeh_available(sphb)) {
593         goto param_error_exit;
594     }
595 
596     ret = spapr_phb_vfio_eeh_get_state(sphb, &state);
597     rtas_st(rets, 0, ret);
598     if (ret != RTAS_OUT_SUCCESS) {
599         return;
600     }
601 
602     rtas_st(rets, 1, state);
603     rtas_st(rets, 2, RTAS_EEH_SUPPORT);
604     rtas_st(rets, 3, RTAS_EEH_PE_UNAVAIL_INFO);
605     if (nret >= 5) {
606         rtas_st(rets, 4, RTAS_EEH_PE_RECOVER_INFO);
607     }
608     return;
609 
610 param_error_exit:
611     rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
612 }
613 
614 static void rtas_ibm_set_slot_reset(PowerPCCPU *cpu,
615                                     SpaprMachineState *spapr,
616                                     uint32_t token, uint32_t nargs,
617                                     target_ulong args, uint32_t nret,
618                                     target_ulong rets)
619 {
620     SpaprPhbState *sphb;
621     uint32_t option;
622     uint64_t buid;
623     int ret;
624 
625     if ((nargs != 4) || (nret != 1)) {
626         goto param_error_exit;
627     }
628 
629     buid = rtas_ldq(args, 1);
630     option = rtas_ld(args, 3);
631     sphb = spapr_pci_find_phb(spapr, buid);
632     if (!sphb) {
633         goto param_error_exit;
634     }
635 
636     if (!spapr_phb_eeh_available(sphb)) {
637         goto param_error_exit;
638     }
639 
640     ret = spapr_phb_vfio_eeh_reset(sphb, option);
641     rtas_st(rets, 0, ret);
642     return;
643 
644 param_error_exit:
645     rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
646 }
647 
648 static void rtas_ibm_configure_pe(PowerPCCPU *cpu,
649                                   SpaprMachineState *spapr,
650                                   uint32_t token, uint32_t nargs,
651                                   target_ulong args, uint32_t nret,
652                                   target_ulong rets)
653 {
654     SpaprPhbState *sphb;
655     uint64_t buid;
656     int ret;
657 
658     if ((nargs != 3) || (nret != 1)) {
659         goto param_error_exit;
660     }
661 
662     buid = rtas_ldq(args, 1);
663     sphb = spapr_pci_find_phb(spapr, buid);
664     if (!sphb) {
665         goto param_error_exit;
666     }
667 
668     if (!spapr_phb_eeh_available(sphb)) {
669         goto param_error_exit;
670     }
671 
672     ret = spapr_phb_vfio_eeh_configure(sphb);
673     rtas_st(rets, 0, ret);
674     return;
675 
676 param_error_exit:
677     rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
678 }
679 
680 /* To support it later */
681 static void rtas_ibm_slot_error_detail(PowerPCCPU *cpu,
682                                        SpaprMachineState *spapr,
683                                        uint32_t token, uint32_t nargs,
684                                        target_ulong args, uint32_t nret,
685                                        target_ulong rets)
686 {
687     SpaprPhbState *sphb;
688     int option;
689     uint64_t buid;
690 
691     if ((nargs != 8) || (nret != 1)) {
692         goto param_error_exit;
693     }
694 
695     buid = rtas_ldq(args, 1);
696     sphb = spapr_pci_find_phb(spapr, buid);
697     if (!sphb) {
698         goto param_error_exit;
699     }
700 
701     if (!spapr_phb_eeh_available(sphb)) {
702         goto param_error_exit;
703     }
704 
705     option = rtas_ld(args, 7);
706     switch (option) {
707     case RTAS_SLOT_TEMP_ERR_LOG:
708     case RTAS_SLOT_PERM_ERR_LOG:
709         break;
710     default:
711         goto param_error_exit;
712     }
713 
714     /* We don't have error log yet */
715     rtas_st(rets, 0, RTAS_OUT_NO_ERRORS_FOUND);
716     return;
717 
718 param_error_exit:
719     rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
720 }
721 
722 static void pci_spapr_set_irq(void *opaque, int irq_num, int level)
723 {
724     /*
725      * Here we use the number returned by pci_swizzle_map_irq_fn to find a
726      * corresponding qemu_irq.
727      */
728     SpaprPhbState *phb = opaque;
729 
730     trace_spapr_pci_lsi_set(phb->dtbusname, irq_num, phb->lsi_table[irq_num].irq);
731     qemu_set_irq(spapr_phb_lsi_qirq(phb, irq_num), level);
732 }
733 
734 static PCIINTxRoute spapr_route_intx_pin_to_irq(void *opaque, int pin)
735 {
736     SpaprPhbState *sphb = SPAPR_PCI_HOST_BRIDGE(opaque);
737     PCIINTxRoute route;
738 
739     route.mode = PCI_INTX_ENABLED;
740     route.irq = sphb->lsi_table[pin].irq;
741 
742     return route;
743 }
744 
745 /*
746  * MSI/MSIX memory region implementation.
747  * The handler handles both MSI and MSIX.
748  * The vector number is encoded in least bits in data.
749  */
750 static void spapr_msi_write(void *opaque, hwaddr addr,
751                             uint64_t data, unsigned size)
752 {
753     SpaprMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
754     uint32_t irq = data;
755 
756     trace_spapr_pci_msi_write(addr, data, irq);
757 
758     qemu_irq_pulse(spapr_qirq(spapr, irq));
759 }
760 
761 static const MemoryRegionOps spapr_msi_ops = {
762     /* There is no .read as the read result is undefined by PCI spec */
763     .read = NULL,
764     .write = spapr_msi_write,
765     .endianness = DEVICE_LITTLE_ENDIAN
766 };
767 
768 /*
769  * PHB PCI device
770  */
771 static AddressSpace *spapr_pci_dma_iommu(PCIBus *bus, void *opaque, int devfn)
772 {
773     SpaprPhbState *phb = opaque;
774 
775     return &phb->iommu_as;
776 }
777 
778 static char *spapr_phb_vfio_get_loc_code(SpaprPhbState *sphb,  PCIDevice *pdev)
779 {
780     char *path = NULL, *buf = NULL, *host = NULL;
781 
782     /* Get the PCI VFIO host id */
783     host = object_property_get_str(OBJECT(pdev), "host", NULL);
784     if (!host) {
785         goto err_out;
786     }
787 
788     /* Construct the path of the file that will give us the DT location */
789     path = g_strdup_printf("/sys/bus/pci/devices/%s/devspec", host);
790     g_free(host);
791     if (!g_file_get_contents(path, &buf, NULL, NULL)) {
792         goto err_out;
793     }
794     g_free(path);
795 
796     /* Construct and read from host device tree the loc-code */
797     path = g_strdup_printf("/proc/device-tree%s/ibm,loc-code", buf);
798     g_free(buf);
799     if (!g_file_get_contents(path, &buf, NULL, NULL)) {
800         goto err_out;
801     }
802     return buf;
803 
804 err_out:
805     g_free(path);
806     return NULL;
807 }
808 
809 static char *spapr_phb_get_loc_code(SpaprPhbState *sphb, PCIDevice *pdev)
810 {
811     char *buf;
812     const char *devtype = "qemu";
813     uint32_t busnr = pci_bus_num(PCI_BUS(qdev_get_parent_bus(DEVICE(pdev))));
814 
815     if (object_dynamic_cast(OBJECT(pdev), "vfio-pci")) {
816         buf = spapr_phb_vfio_get_loc_code(sphb, pdev);
817         if (buf) {
818             return buf;
819         }
820         devtype = "vfio";
821     }
822     /*
823      * For emulated devices and VFIO-failure case, make up
824      * the loc-code.
825      */
826     buf = g_strdup_printf("%s_%s:%04x:%02x:%02x.%x",
827                           devtype, pdev->name, sphb->index, busnr,
828                           PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
829     return buf;
830 }
831 
832 /* Macros to operate with address in OF binding to PCI */
833 #define b_x(x, p, l)    (((x) & ((1<<(l))-1)) << (p))
834 #define b_n(x)          b_x((x), 31, 1) /* 0 if relocatable */
835 #define b_p(x)          b_x((x), 30, 1) /* 1 if prefetchable */
836 #define b_t(x)          b_x((x), 29, 1) /* 1 if the address is aliased */
837 #define b_ss(x)         b_x((x), 24, 2) /* the space code */
838 #define b_bbbbbbbb(x)   b_x((x), 16, 8) /* bus number */
839 #define b_ddddd(x)      b_x((x), 11, 5) /* device number */
840 #define b_fff(x)        b_x((x), 8, 3)  /* function number */
841 #define b_rrrrrrrr(x)   b_x((x), 0, 8)  /* register number */
842 
843 /* for 'reg'/'assigned-addresses' OF properties */
844 #define RESOURCE_CELLS_SIZE 2
845 #define RESOURCE_CELLS_ADDRESS 3
846 
847 typedef struct ResourceFields {
848     uint32_t phys_hi;
849     uint32_t phys_mid;
850     uint32_t phys_lo;
851     uint32_t size_hi;
852     uint32_t size_lo;
853 } QEMU_PACKED ResourceFields;
854 
855 typedef struct ResourceProps {
856     ResourceFields reg[8];
857     ResourceFields assigned[7];
858     uint32_t reg_len;
859     uint32_t assigned_len;
860 } ResourceProps;
861 
862 /* fill in the 'reg'/'assigned-resources' OF properties for
863  * a PCI device. 'reg' describes resource requirements for a
864  * device's IO/MEM regions, 'assigned-addresses' describes the
865  * actual resource assignments.
866  *
867  * the properties are arrays of ('phys-addr', 'size') pairs describing
868  * the addressable regions of the PCI device, where 'phys-addr' is a
869  * RESOURCE_CELLS_ADDRESS-tuple of 32-bit integers corresponding to
870  * (phys.hi, phys.mid, phys.lo), and 'size' is a
871  * RESOURCE_CELLS_SIZE-tuple corresponding to (size.hi, size.lo).
872  *
873  * phys.hi = 0xYYXXXXZZ, where:
874  *   0xYY = npt000ss
875  *          |||   |
876  *          |||   +-- space code
877  *          |||               |
878  *          |||               +  00 if configuration space
879  *          |||               +  01 if IO region,
880  *          |||               +  10 if 32-bit MEM region
881  *          |||               +  11 if 64-bit MEM region
882  *          |||
883  *          ||+------ for non-relocatable IO: 1 if aliased
884  *          ||        for relocatable IO: 1 if below 64KB
885  *          ||        for MEM: 1 if below 1MB
886  *          |+------- 1 if region is prefetchable
887  *          +-------- 1 if region is non-relocatable
888  *   0xXXXX = bbbbbbbb dddddfff, encoding bus, slot, and function
889  *            bits respectively
890  *   0xZZ = rrrrrrrr, the register number of the BAR corresponding
891  *          to the region
892  *
893  * phys.mid and phys.lo correspond respectively to the hi/lo portions
894  * of the actual address of the region.
895  *
896  * how the phys-addr/size values are used differ slightly between
897  * 'reg' and 'assigned-addresses' properties. namely, 'reg' has
898  * an additional description for the config space region of the
899  * device, and in the case of QEMU has n=0 and phys.mid=phys.lo=0
900  * to describe the region as relocatable, with an address-mapping
901  * that corresponds directly to the PHB's address space for the
902  * resource. 'assigned-addresses' always has n=1 set with an absolute
903  * address assigned for the resource. in general, 'assigned-addresses'
904  * won't be populated, since addresses for PCI devices are generally
905  * unmapped initially and left to the guest to assign.
906  *
907  * note also that addresses defined in these properties are, at least
908  * for PAPR guests, relative to the PHBs IO/MEM windows, and
909  * correspond directly to the addresses in the BARs.
910  *
911  * in accordance with PCI Bus Binding to Open Firmware,
912  * IEEE Std 1275-1994, section 4.1.1, as implemented by PAPR+ v2.7,
913  * Appendix C.
914  */
915 static void populate_resource_props(PCIDevice *d, ResourceProps *rp)
916 {
917     int bus_num = pci_bus_num(PCI_BUS(qdev_get_parent_bus(DEVICE(d))));
918     uint32_t dev_id = (b_bbbbbbbb(bus_num) |
919                        b_ddddd(PCI_SLOT(d->devfn)) |
920                        b_fff(PCI_FUNC(d->devfn)));
921     ResourceFields *reg, *assigned;
922     int i, reg_idx = 0, assigned_idx = 0;
923 
924     /* config space region */
925     reg = &rp->reg[reg_idx++];
926     reg->phys_hi = cpu_to_be32(dev_id);
927     reg->phys_mid = 0;
928     reg->phys_lo = 0;
929     reg->size_hi = 0;
930     reg->size_lo = 0;
931 
932     for (i = 0; i < PCI_NUM_REGIONS; i++) {
933         if (!d->io_regions[i].size) {
934             continue;
935         }
936 
937         reg = &rp->reg[reg_idx++];
938 
939         reg->phys_hi = cpu_to_be32(dev_id | b_rrrrrrrr(pci_bar(d, i)));
940         if (d->io_regions[i].type & PCI_BASE_ADDRESS_SPACE_IO) {
941             reg->phys_hi |= cpu_to_be32(b_ss(1));
942         } else if (d->io_regions[i].type & PCI_BASE_ADDRESS_MEM_TYPE_64) {
943             reg->phys_hi |= cpu_to_be32(b_ss(3));
944         } else {
945             reg->phys_hi |= cpu_to_be32(b_ss(2));
946         }
947         reg->phys_mid = 0;
948         reg->phys_lo = 0;
949         reg->size_hi = cpu_to_be32(d->io_regions[i].size >> 32);
950         reg->size_lo = cpu_to_be32(d->io_regions[i].size);
951 
952         if (d->io_regions[i].addr == PCI_BAR_UNMAPPED) {
953             continue;
954         }
955 
956         assigned = &rp->assigned[assigned_idx++];
957         assigned->phys_hi = cpu_to_be32(be32_to_cpu(reg->phys_hi) | b_n(1));
958         assigned->phys_mid = cpu_to_be32(d->io_regions[i].addr >> 32);
959         assigned->phys_lo = cpu_to_be32(d->io_regions[i].addr);
960         assigned->size_hi = reg->size_hi;
961         assigned->size_lo = reg->size_lo;
962     }
963 
964     rp->reg_len = reg_idx * sizeof(ResourceFields);
965     rp->assigned_len = assigned_idx * sizeof(ResourceFields);
966 }
967 
968 typedef struct PCIClass PCIClass;
969 typedef struct PCISubClass PCISubClass;
970 typedef struct PCIIFace PCIIFace;
971 
972 struct PCIIFace {
973     int iface;
974     const char *name;
975 };
976 
977 struct PCISubClass {
978     int subclass;
979     const char *name;
980     const PCIIFace *iface;
981 };
982 
983 struct PCIClass {
984     const char *name;
985     const PCISubClass *subc;
986 };
987 
988 static const PCISubClass undef_subclass[] = {
989     { PCI_CLASS_NOT_DEFINED_VGA, "display", NULL },
990     { 0xFF, NULL, NULL },
991 };
992 
993 static const PCISubClass mass_subclass[] = {
994     { PCI_CLASS_STORAGE_SCSI, "scsi", NULL },
995     { PCI_CLASS_STORAGE_IDE, "ide", NULL },
996     { PCI_CLASS_STORAGE_FLOPPY, "fdc", NULL },
997     { PCI_CLASS_STORAGE_IPI, "ipi", NULL },
998     { PCI_CLASS_STORAGE_RAID, "raid", NULL },
999     { PCI_CLASS_STORAGE_ATA, "ata", NULL },
1000     { PCI_CLASS_STORAGE_SATA, "sata", NULL },
1001     { PCI_CLASS_STORAGE_SAS, "sas", NULL },
1002     { 0xFF, NULL, NULL },
1003 };
1004 
1005 static const PCISubClass net_subclass[] = {
1006     { PCI_CLASS_NETWORK_ETHERNET, "ethernet", NULL },
1007     { PCI_CLASS_NETWORK_TOKEN_RING, "token-ring", NULL },
1008     { PCI_CLASS_NETWORK_FDDI, "fddi", NULL },
1009     { PCI_CLASS_NETWORK_ATM, "atm", NULL },
1010     { PCI_CLASS_NETWORK_ISDN, "isdn", NULL },
1011     { PCI_CLASS_NETWORK_WORLDFIP, "worldfip", NULL },
1012     { PCI_CLASS_NETWORK_PICMG214, "picmg", NULL },
1013     { 0xFF, NULL, NULL },
1014 };
1015 
1016 static const PCISubClass displ_subclass[] = {
1017     { PCI_CLASS_DISPLAY_VGA, "vga", NULL },
1018     { PCI_CLASS_DISPLAY_XGA, "xga", NULL },
1019     { PCI_CLASS_DISPLAY_3D, "3d-controller", NULL },
1020     { 0xFF, NULL, NULL },
1021 };
1022 
1023 static const PCISubClass media_subclass[] = {
1024     { PCI_CLASS_MULTIMEDIA_VIDEO, "video", NULL },
1025     { PCI_CLASS_MULTIMEDIA_AUDIO, "sound", NULL },
1026     { PCI_CLASS_MULTIMEDIA_PHONE, "telephony", NULL },
1027     { 0xFF, NULL, NULL },
1028 };
1029 
1030 static const PCISubClass mem_subclass[] = {
1031     { PCI_CLASS_MEMORY_RAM, "memory", NULL },
1032     { PCI_CLASS_MEMORY_FLASH, "flash", NULL },
1033     { 0xFF, NULL, NULL },
1034 };
1035 
1036 static const PCISubClass bridg_subclass[] = {
1037     { PCI_CLASS_BRIDGE_HOST, "host", NULL },
1038     { PCI_CLASS_BRIDGE_ISA, "isa", NULL },
1039     { PCI_CLASS_BRIDGE_EISA, "eisa", NULL },
1040     { PCI_CLASS_BRIDGE_MC, "mca", NULL },
1041     { PCI_CLASS_BRIDGE_PCI, "pci", NULL },
1042     { PCI_CLASS_BRIDGE_PCMCIA, "pcmcia", NULL },
1043     { PCI_CLASS_BRIDGE_NUBUS, "nubus", NULL },
1044     { PCI_CLASS_BRIDGE_CARDBUS, "cardbus", NULL },
1045     { PCI_CLASS_BRIDGE_RACEWAY, "raceway", NULL },
1046     { PCI_CLASS_BRIDGE_PCI_SEMITP, "semi-transparent-pci", NULL },
1047     { PCI_CLASS_BRIDGE_IB_PCI, "infiniband", NULL },
1048     { 0xFF, NULL, NULL },
1049 };
1050 
1051 static const PCISubClass comm_subclass[] = {
1052     { PCI_CLASS_COMMUNICATION_SERIAL, "serial", NULL },
1053     { PCI_CLASS_COMMUNICATION_PARALLEL, "parallel", NULL },
1054     { PCI_CLASS_COMMUNICATION_MULTISERIAL, "multiport-serial", NULL },
1055     { PCI_CLASS_COMMUNICATION_MODEM, "modem", NULL },
1056     { PCI_CLASS_COMMUNICATION_GPIB, "gpib", NULL },
1057     { PCI_CLASS_COMMUNICATION_SC, "smart-card", NULL },
1058     { 0xFF, NULL, NULL, },
1059 };
1060 
1061 static const PCIIFace pic_iface[] = {
1062     { PCI_CLASS_SYSTEM_PIC_IOAPIC, "io-apic" },
1063     { PCI_CLASS_SYSTEM_PIC_IOXAPIC, "io-xapic" },
1064     { 0xFF, NULL },
1065 };
1066 
1067 static const PCISubClass sys_subclass[] = {
1068     { PCI_CLASS_SYSTEM_PIC, "interrupt-controller", pic_iface },
1069     { PCI_CLASS_SYSTEM_DMA, "dma-controller", NULL },
1070     { PCI_CLASS_SYSTEM_TIMER, "timer", NULL },
1071     { PCI_CLASS_SYSTEM_RTC, "rtc", NULL },
1072     { PCI_CLASS_SYSTEM_PCI_HOTPLUG, "hot-plug-controller", NULL },
1073     { PCI_CLASS_SYSTEM_SDHCI, "sd-host-controller", NULL },
1074     { 0xFF, NULL, NULL },
1075 };
1076 
1077 static const PCISubClass inp_subclass[] = {
1078     { PCI_CLASS_INPUT_KEYBOARD, "keyboard", NULL },
1079     { PCI_CLASS_INPUT_PEN, "pen", NULL },
1080     { PCI_CLASS_INPUT_MOUSE, "mouse", NULL },
1081     { PCI_CLASS_INPUT_SCANNER, "scanner", NULL },
1082     { PCI_CLASS_INPUT_GAMEPORT, "gameport", NULL },
1083     { 0xFF, NULL, NULL },
1084 };
1085 
1086 static const PCISubClass dock_subclass[] = {
1087     { PCI_CLASS_DOCKING_GENERIC, "dock", NULL },
1088     { 0xFF, NULL, NULL },
1089 };
1090 
1091 static const PCISubClass cpu_subclass[] = {
1092     { PCI_CLASS_PROCESSOR_PENTIUM, "pentium", NULL },
1093     { PCI_CLASS_PROCESSOR_POWERPC, "powerpc", NULL },
1094     { PCI_CLASS_PROCESSOR_MIPS, "mips", NULL },
1095     { PCI_CLASS_PROCESSOR_CO, "co-processor", NULL },
1096     { 0xFF, NULL, NULL },
1097 };
1098 
1099 static const PCIIFace usb_iface[] = {
1100     { PCI_CLASS_SERIAL_USB_UHCI, "usb-uhci" },
1101     { PCI_CLASS_SERIAL_USB_OHCI, "usb-ohci", },
1102     { PCI_CLASS_SERIAL_USB_EHCI, "usb-ehci" },
1103     { PCI_CLASS_SERIAL_USB_XHCI, "usb-xhci" },
1104     { PCI_CLASS_SERIAL_USB_UNKNOWN, "usb-unknown" },
1105     { PCI_CLASS_SERIAL_USB_DEVICE, "usb-device" },
1106     { 0xFF, NULL },
1107 };
1108 
1109 static const PCISubClass ser_subclass[] = {
1110     { PCI_CLASS_SERIAL_FIREWIRE, "firewire", NULL },
1111     { PCI_CLASS_SERIAL_ACCESS, "access-bus", NULL },
1112     { PCI_CLASS_SERIAL_SSA, "ssa", NULL },
1113     { PCI_CLASS_SERIAL_USB, "usb", usb_iface },
1114     { PCI_CLASS_SERIAL_FIBER, "fibre-channel", NULL },
1115     { PCI_CLASS_SERIAL_SMBUS, "smb", NULL },
1116     { PCI_CLASS_SERIAL_IB, "infiniband", NULL },
1117     { PCI_CLASS_SERIAL_IPMI, "ipmi", NULL },
1118     { PCI_CLASS_SERIAL_SERCOS, "sercos", NULL },
1119     { PCI_CLASS_SERIAL_CANBUS, "canbus", NULL },
1120     { 0xFF, NULL, NULL },
1121 };
1122 
1123 static const PCISubClass wrl_subclass[] = {
1124     { PCI_CLASS_WIRELESS_IRDA, "irda", NULL },
1125     { PCI_CLASS_WIRELESS_CIR, "consumer-ir", NULL },
1126     { PCI_CLASS_WIRELESS_RF_CONTROLLER, "rf-controller", NULL },
1127     { PCI_CLASS_WIRELESS_BLUETOOTH, "bluetooth", NULL },
1128     { PCI_CLASS_WIRELESS_BROADBAND, "broadband", NULL },
1129     { 0xFF, NULL, NULL },
1130 };
1131 
1132 static const PCISubClass sat_subclass[] = {
1133     { PCI_CLASS_SATELLITE_TV, "satellite-tv", NULL },
1134     { PCI_CLASS_SATELLITE_AUDIO, "satellite-audio", NULL },
1135     { PCI_CLASS_SATELLITE_VOICE, "satellite-voice", NULL },
1136     { PCI_CLASS_SATELLITE_DATA, "satellite-data", NULL },
1137     { 0xFF, NULL, NULL },
1138 };
1139 
1140 static const PCISubClass crypt_subclass[] = {
1141     { PCI_CLASS_CRYPT_NETWORK, "network-encryption", NULL },
1142     { PCI_CLASS_CRYPT_ENTERTAINMENT,
1143       "entertainment-encryption", NULL },
1144     { 0xFF, NULL, NULL },
1145 };
1146 
1147 static const PCISubClass spc_subclass[] = {
1148     { PCI_CLASS_SP_DPIO, "dpio", NULL },
1149     { PCI_CLASS_SP_PERF, "counter", NULL },
1150     { PCI_CLASS_SP_SYNCH, "measurement", NULL },
1151     { PCI_CLASS_SP_MANAGEMENT, "management-card", NULL },
1152     { 0xFF, NULL, NULL },
1153 };
1154 
1155 static const PCIClass pci_classes[] = {
1156     { "legacy-device", undef_subclass },
1157     { "mass-storage",  mass_subclass },
1158     { "network", net_subclass },
1159     { "display", displ_subclass, },
1160     { "multimedia-device", media_subclass },
1161     { "memory-controller", mem_subclass },
1162     { "unknown-bridge", bridg_subclass },
1163     { "communication-controller", comm_subclass},
1164     { "system-peripheral", sys_subclass },
1165     { "input-controller", inp_subclass },
1166     { "docking-station", dock_subclass },
1167     { "cpu", cpu_subclass },
1168     { "serial-bus", ser_subclass },
1169     { "wireless-controller", wrl_subclass },
1170     { "intelligent-io", NULL },
1171     { "satellite-device", sat_subclass },
1172     { "encryption", crypt_subclass },
1173     { "data-processing-controller", spc_subclass },
1174 };
1175 
1176 static const char *dt_name_from_class(uint8_t class, uint8_t subclass,
1177                                       uint8_t iface)
1178 {
1179     const PCIClass *pclass;
1180     const PCISubClass *psubclass;
1181     const PCIIFace *piface;
1182     const char *name;
1183 
1184     if (class >= ARRAY_SIZE(pci_classes)) {
1185         return "pci";
1186     }
1187 
1188     pclass = pci_classes + class;
1189     name = pclass->name;
1190 
1191     if (pclass->subc == NULL) {
1192         return name;
1193     }
1194 
1195     psubclass = pclass->subc;
1196     while ((psubclass->subclass & 0xff) != 0xff) {
1197         if ((psubclass->subclass & 0xff) == subclass) {
1198             name = psubclass->name;
1199             break;
1200         }
1201         psubclass++;
1202     }
1203 
1204     piface = psubclass->iface;
1205     if (piface == NULL) {
1206         return name;
1207     }
1208     while ((piface->iface & 0xff) != 0xff) {
1209         if ((piface->iface & 0xff) == iface) {
1210             name = piface->name;
1211             break;
1212         }
1213         piface++;
1214     }
1215 
1216     return name;
1217 }
1218 
1219 static uint32_t spapr_phb_get_pci_drc_index(SpaprPhbState *phb,
1220                                             PCIDevice *pdev);
1221 
1222 typedef struct PciWalkFdt {
1223     void *fdt;
1224     int offset;
1225     SpaprPhbState *sphb;
1226     int err;
1227 } PciWalkFdt;
1228 
1229 static int spapr_dt_pci_device(SpaprPhbState *sphb, PCIDevice *dev,
1230                                void *fdt, int parent_offset);
1231 
1232 static void spapr_dt_pci_device_cb(PCIBus *bus, PCIDevice *pdev,
1233                                    void *opaque)
1234 {
1235     PciWalkFdt *p = opaque;
1236     int err;
1237 
1238     if (p->err) {
1239         /* Something's already broken, don't keep going */
1240         return;
1241     }
1242 
1243     err = spapr_dt_pci_device(p->sphb, pdev, p->fdt, p->offset);
1244     if (err < 0) {
1245         p->err = err;
1246     }
1247 }
1248 
1249 /* Augment PCI device node with bridge specific information */
1250 static int spapr_dt_pci_bus(SpaprPhbState *sphb, PCIBus *bus,
1251                                void *fdt, int offset)
1252 {
1253     PciWalkFdt cbinfo = {
1254         .fdt = fdt,
1255         .offset = offset,
1256         .sphb = sphb,
1257         .err = 0,
1258     };
1259 
1260     _FDT(fdt_setprop_cell(fdt, offset, "#address-cells",
1261                           RESOURCE_CELLS_ADDRESS));
1262     _FDT(fdt_setprop_cell(fdt, offset, "#size-cells",
1263                           RESOURCE_CELLS_SIZE));
1264 
1265     if (bus) {
1266         pci_for_each_device_reverse(bus, pci_bus_num(bus),
1267                                     spapr_dt_pci_device_cb, &cbinfo);
1268         if (cbinfo.err) {
1269             return cbinfo.err;
1270         }
1271     }
1272 
1273     return offset;
1274 }
1275 
1276 /* create OF node for pci device and required OF DT properties */
1277 static int spapr_dt_pci_device(SpaprPhbState *sphb, PCIDevice *dev,
1278                                void *fdt, int parent_offset)
1279 {
1280     int offset;
1281     const gchar *basename;
1282     gchar *nodename;
1283     int slot = PCI_SLOT(dev->devfn);
1284     int func = PCI_FUNC(dev->devfn);
1285     PCIDeviceClass *pc = PCI_DEVICE_GET_CLASS(dev);
1286     ResourceProps rp;
1287     uint32_t drc_index = spapr_phb_get_pci_drc_index(sphb, dev);
1288     uint32_t vendor_id = pci_default_read_config(dev, PCI_VENDOR_ID, 2);
1289     uint32_t device_id = pci_default_read_config(dev, PCI_DEVICE_ID, 2);
1290     uint32_t revision_id = pci_default_read_config(dev, PCI_REVISION_ID, 1);
1291     uint32_t ccode = pci_default_read_config(dev, PCI_CLASS_PROG, 3);
1292     uint32_t irq_pin = pci_default_read_config(dev, PCI_INTERRUPT_PIN, 1);
1293     uint32_t subsystem_id = pci_default_read_config(dev, PCI_SUBSYSTEM_ID, 2);
1294     uint32_t subsystem_vendor_id =
1295         pci_default_read_config(dev, PCI_SUBSYSTEM_VENDOR_ID, 2);
1296     uint32_t cache_line_size =
1297         pci_default_read_config(dev, PCI_CACHE_LINE_SIZE, 1);
1298     uint32_t pci_status = pci_default_read_config(dev, PCI_STATUS, 2);
1299     gchar *loc_code;
1300 
1301     basename = dt_name_from_class((ccode >> 16) & 0xff, (ccode >> 8) & 0xff,
1302                                   ccode & 0xff);
1303 
1304     if (func != 0) {
1305         nodename = g_strdup_printf("%s@%x,%x", basename, slot, func);
1306     } else {
1307         nodename = g_strdup_printf("%s@%x", basename, slot);
1308     }
1309 
1310     _FDT(offset = fdt_add_subnode(fdt, parent_offset, nodename));
1311 
1312     g_free(nodename);
1313 
1314     /* in accordance with PAPR+ v2.7 13.6.3, Table 181 */
1315     _FDT(fdt_setprop_cell(fdt, offset, "vendor-id", vendor_id));
1316     _FDT(fdt_setprop_cell(fdt, offset, "device-id", device_id));
1317     _FDT(fdt_setprop_cell(fdt, offset, "revision-id", revision_id));
1318 
1319     _FDT(fdt_setprop_cell(fdt, offset, "class-code", ccode));
1320     if (irq_pin) {
1321         _FDT(fdt_setprop_cell(fdt, offset, "interrupts", irq_pin));
1322     }
1323 
1324     if (subsystem_id) {
1325         _FDT(fdt_setprop_cell(fdt, offset, "subsystem-id", subsystem_id));
1326     }
1327 
1328     if (subsystem_vendor_id) {
1329         _FDT(fdt_setprop_cell(fdt, offset, "subsystem-vendor-id",
1330                               subsystem_vendor_id));
1331     }
1332 
1333     _FDT(fdt_setprop_cell(fdt, offset, "cache-line-size", cache_line_size));
1334 
1335 
1336     /* the following fdt cells are masked off the pci status register */
1337     _FDT(fdt_setprop_cell(fdt, offset, "devsel-speed",
1338                           PCI_STATUS_DEVSEL_MASK & pci_status));
1339 
1340     if (pci_status & PCI_STATUS_FAST_BACK) {
1341         _FDT(fdt_setprop(fdt, offset, "fast-back-to-back", NULL, 0));
1342     }
1343     if (pci_status & PCI_STATUS_66MHZ) {
1344         _FDT(fdt_setprop(fdt, offset, "66mhz-capable", NULL, 0));
1345     }
1346     if (pci_status & PCI_STATUS_UDF) {
1347         _FDT(fdt_setprop(fdt, offset, "udf-supported", NULL, 0));
1348     }
1349 
1350     loc_code = spapr_phb_get_loc_code(sphb, dev);
1351     _FDT(fdt_setprop_string(fdt, offset, "ibm,loc-code", loc_code));
1352     g_free(loc_code);
1353 
1354     if (drc_index) {
1355         _FDT(fdt_setprop_cell(fdt, offset, "ibm,my-drc-index", drc_index));
1356     }
1357 
1358     if (msi_present(dev)) {
1359         uint32_t max_msi = msi_nr_vectors_allocated(dev);
1360         if (max_msi) {
1361             _FDT(fdt_setprop_cell(fdt, offset, "ibm,req#msi", max_msi));
1362         }
1363     }
1364     if (msix_present(dev)) {
1365         uint32_t max_msix = dev->msix_entries_nr;
1366         if (max_msix) {
1367             _FDT(fdt_setprop_cell(fdt, offset, "ibm,req#msi-x", max_msix));
1368         }
1369     }
1370 
1371     populate_resource_props(dev, &rp);
1372     _FDT(fdt_setprop(fdt, offset, "reg", (uint8_t *)rp.reg, rp.reg_len));
1373     _FDT(fdt_setprop(fdt, offset, "assigned-addresses",
1374                      (uint8_t *)rp.assigned, rp.assigned_len));
1375 
1376     if (sphb->pcie_ecs && pci_is_express(dev)) {
1377         _FDT(fdt_setprop_cell(fdt, offset, "ibm,pci-config-space-type", 0x1));
1378     }
1379 
1380     spapr_phb_nvgpu_populate_pcidev_dt(dev, fdt, offset, sphb);
1381 
1382     if (!pc->is_bridge) {
1383         /* Properties only for non-bridges */
1384         uint32_t min_grant = pci_default_read_config(dev, PCI_MIN_GNT, 1);
1385         uint32_t max_latency = pci_default_read_config(dev, PCI_MAX_LAT, 1);
1386         _FDT(fdt_setprop_cell(fdt, offset, "min-grant", min_grant));
1387         _FDT(fdt_setprop_cell(fdt, offset, "max-latency", max_latency));
1388         return offset;
1389     } else {
1390         PCIBus *sec_bus = pci_bridge_get_sec_bus(PCI_BRIDGE(dev));
1391 
1392         return spapr_dt_pci_bus(sphb, sec_bus, fdt, offset);
1393     }
1394 }
1395 
1396 /* Callback to be called during DRC release. */
1397 void spapr_phb_remove_pci_device_cb(DeviceState *dev)
1398 {
1399     HotplugHandler *hotplug_ctrl = qdev_get_hotplug_handler(dev);
1400 
1401     hotplug_handler_unplug(hotplug_ctrl, dev, &error_abort);
1402     object_unparent(OBJECT(dev));
1403 }
1404 
1405 static SpaprDrc *spapr_phb_get_pci_func_drc(SpaprPhbState *phb,
1406                                                     uint32_t busnr,
1407                                                     int32_t devfn)
1408 {
1409     return spapr_drc_by_id(TYPE_SPAPR_DRC_PCI,
1410                            (phb->index << 16) | (busnr << 8) | devfn);
1411 }
1412 
1413 static SpaprDrc *spapr_phb_get_pci_drc(SpaprPhbState *phb,
1414                                                PCIDevice *pdev)
1415 {
1416     uint32_t busnr = pci_bus_num(PCI_BUS(qdev_get_parent_bus(DEVICE(pdev))));
1417     return spapr_phb_get_pci_func_drc(phb, busnr, pdev->devfn);
1418 }
1419 
1420 static uint32_t spapr_phb_get_pci_drc_index(SpaprPhbState *phb,
1421                                             PCIDevice *pdev)
1422 {
1423     SpaprDrc *drc = spapr_phb_get_pci_drc(phb, pdev);
1424 
1425     if (!drc) {
1426         return 0;
1427     }
1428 
1429     return spapr_drc_index(drc);
1430 }
1431 
1432 int spapr_pci_dt_populate(SpaprDrc *drc, SpaprMachineState *spapr,
1433                           void *fdt, int *fdt_start_offset, Error **errp)
1434 {
1435     HotplugHandler *plug_handler = qdev_get_hotplug_handler(drc->dev);
1436     SpaprPhbState *sphb = SPAPR_PCI_HOST_BRIDGE(plug_handler);
1437     PCIDevice *pdev = PCI_DEVICE(drc->dev);
1438 
1439     *fdt_start_offset = spapr_dt_pci_device(sphb, pdev, fdt, 0);
1440     return 0;
1441 }
1442 
1443 static void spapr_pci_plug(HotplugHandler *plug_handler,
1444                            DeviceState *plugged_dev, Error **errp)
1445 {
1446     SpaprPhbState *phb = SPAPR_PCI_HOST_BRIDGE(DEVICE(plug_handler));
1447     PCIDevice *pdev = PCI_DEVICE(plugged_dev);
1448     SpaprDrc *drc = spapr_phb_get_pci_drc(phb, pdev);
1449     Error *local_err = NULL;
1450     PCIBus *bus = PCI_BUS(qdev_get_parent_bus(DEVICE(pdev)));
1451     uint32_t slotnr = PCI_SLOT(pdev->devfn);
1452 
1453     /* if DR is disabled we don't need to do anything in the case of
1454      * hotplug or coldplug callbacks
1455      */
1456     if (!phb->dr_enabled) {
1457         /* if this is a hotplug operation initiated by the user
1458          * we need to let them know it's not enabled
1459          */
1460         if (plugged_dev->hotplugged) {
1461             error_setg(&local_err, QERR_BUS_NO_HOTPLUG,
1462                        object_get_typename(OBJECT(phb)));
1463         }
1464         goto out;
1465     }
1466 
1467     g_assert(drc);
1468 
1469     /* Following the QEMU convention used for PCIe multifunction
1470      * hotplug, we do not allow functions to be hotplugged to a
1471      * slot that already has function 0 present
1472      */
1473     if (plugged_dev->hotplugged && bus->devices[PCI_DEVFN(slotnr, 0)] &&
1474         PCI_FUNC(pdev->devfn) != 0) {
1475         error_setg(&local_err, "PCI: slot %d function 0 already ocuppied by %s,"
1476                    " additional functions can no longer be exposed to guest.",
1477                    slotnr, bus->devices[PCI_DEVFN(slotnr, 0)]->name);
1478         goto out;
1479     }
1480 
1481     spapr_drc_attach(drc, DEVICE(pdev), &local_err);
1482     if (local_err) {
1483         goto out;
1484     }
1485 
1486     /* If this is function 0, signal hotplug for all the device functions.
1487      * Otherwise defer sending the hotplug event.
1488      */
1489     if (!spapr_drc_hotplugged(plugged_dev)) {
1490         spapr_drc_reset(drc);
1491     } else if (PCI_FUNC(pdev->devfn) == 0) {
1492         int i;
1493 
1494         for (i = 0; i < 8; i++) {
1495             SpaprDrc *func_drc;
1496             SpaprDrcClass *func_drck;
1497             SpaprDREntitySense state;
1498 
1499             func_drc = spapr_phb_get_pci_func_drc(phb, pci_bus_num(bus),
1500                                                   PCI_DEVFN(slotnr, i));
1501             func_drck = SPAPR_DR_CONNECTOR_GET_CLASS(func_drc);
1502             state = func_drck->dr_entity_sense(func_drc);
1503 
1504             if (state == SPAPR_DR_ENTITY_SENSE_PRESENT) {
1505                 spapr_hotplug_req_add_by_index(func_drc);
1506             }
1507         }
1508     }
1509 
1510 out:
1511     error_propagate(errp, local_err);
1512 }
1513 
1514 static void spapr_pci_unplug(HotplugHandler *plug_handler,
1515                              DeviceState *plugged_dev, Error **errp)
1516 {
1517     /* some version guests do not wait for completion of a device
1518      * cleanup (generally done asynchronously by the kernel) before
1519      * signaling to QEMU that the device is safe, but instead sleep
1520      * for some 'safe' period of time. unfortunately on a busy host
1521      * this sleep isn't guaranteed to be long enough, resulting in
1522      * bad things like IRQ lines being left asserted during final
1523      * device removal. to deal with this we call reset just prior
1524      * to finalizing the device, which will put the device back into
1525      * an 'idle' state, as the device cleanup code expects.
1526      */
1527     pci_device_reset(PCI_DEVICE(plugged_dev));
1528     object_property_set_bool(OBJECT(plugged_dev), false, "realized", NULL);
1529 }
1530 
1531 static void spapr_pci_unplug_request(HotplugHandler *plug_handler,
1532                                      DeviceState *plugged_dev, Error **errp)
1533 {
1534     SpaprPhbState *phb = SPAPR_PCI_HOST_BRIDGE(DEVICE(plug_handler));
1535     PCIDevice *pdev = PCI_DEVICE(plugged_dev);
1536     SpaprDrc *drc = spapr_phb_get_pci_drc(phb, pdev);
1537 
1538     if (!phb->dr_enabled) {
1539         error_setg(errp, QERR_BUS_NO_HOTPLUG,
1540                    object_get_typename(OBJECT(phb)));
1541         return;
1542     }
1543 
1544     g_assert(drc);
1545     g_assert(drc->dev == plugged_dev);
1546 
1547     if (!spapr_drc_unplug_requested(drc)) {
1548         PCIBus *bus = PCI_BUS(qdev_get_parent_bus(DEVICE(pdev)));
1549         uint32_t slotnr = PCI_SLOT(pdev->devfn);
1550         SpaprDrc *func_drc;
1551         SpaprDrcClass *func_drck;
1552         SpaprDREntitySense state;
1553         int i;
1554 
1555         /* ensure any other present functions are pending unplug */
1556         if (PCI_FUNC(pdev->devfn) == 0) {
1557             for (i = 1; i < 8; i++) {
1558                 func_drc = spapr_phb_get_pci_func_drc(phb, pci_bus_num(bus),
1559                                                       PCI_DEVFN(slotnr, i));
1560                 func_drck = SPAPR_DR_CONNECTOR_GET_CLASS(func_drc);
1561                 state = func_drck->dr_entity_sense(func_drc);
1562                 if (state == SPAPR_DR_ENTITY_SENSE_PRESENT
1563                     && !spapr_drc_unplug_requested(func_drc)) {
1564                     error_setg(errp,
1565                                "PCI: slot %d, function %d still present. "
1566                                "Must unplug all non-0 functions first.",
1567                                slotnr, i);
1568                     return;
1569                 }
1570             }
1571         }
1572 
1573         spapr_drc_detach(drc);
1574 
1575         /* if this isn't func 0, defer unplug event. otherwise signal removal
1576          * for all present functions
1577          */
1578         if (PCI_FUNC(pdev->devfn) == 0) {
1579             for (i = 7; i >= 0; i--) {
1580                 func_drc = spapr_phb_get_pci_func_drc(phb, pci_bus_num(bus),
1581                                                       PCI_DEVFN(slotnr, i));
1582                 func_drck = SPAPR_DR_CONNECTOR_GET_CLASS(func_drc);
1583                 state = func_drck->dr_entity_sense(func_drc);
1584                 if (state == SPAPR_DR_ENTITY_SENSE_PRESENT) {
1585                     spapr_hotplug_req_remove_by_index(func_drc);
1586                 }
1587             }
1588         }
1589     }
1590 }
1591 
1592 static void spapr_phb_finalizefn(Object *obj)
1593 {
1594     SpaprPhbState *sphb = SPAPR_PCI_HOST_BRIDGE(obj);
1595 
1596     g_free(sphb->dtbusname);
1597     sphb->dtbusname = NULL;
1598 }
1599 
1600 static void spapr_phb_unrealize(DeviceState *dev, Error **errp)
1601 {
1602     SpaprMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
1603     SysBusDevice *s = SYS_BUS_DEVICE(dev);
1604     PCIHostState *phb = PCI_HOST_BRIDGE(s);
1605     SpaprPhbState *sphb = SPAPR_PCI_HOST_BRIDGE(phb);
1606     SpaprTceTable *tcet;
1607     int i;
1608     const unsigned windows_supported = spapr_phb_windows_supported(sphb);
1609 
1610     spapr_phb_nvgpu_free(sphb);
1611 
1612     if (sphb->msi) {
1613         g_hash_table_unref(sphb->msi);
1614         sphb->msi = NULL;
1615     }
1616 
1617     /*
1618      * Remove IO/MMIO subregions and aliases, rest should get cleaned
1619      * via PHB's unrealize->object_finalize
1620      */
1621     for (i = windows_supported - 1; i >= 0; i--) {
1622         tcet = spapr_tce_find_by_liobn(sphb->dma_liobn[i]);
1623         if (tcet) {
1624             memory_region_del_subregion(&sphb->iommu_root,
1625                                         spapr_tce_get_iommu(tcet));
1626         }
1627     }
1628 
1629     if (sphb->dr_enabled) {
1630         for (i = PCI_SLOT_MAX * 8 - 1; i >= 0; i--) {
1631             SpaprDrc *drc = spapr_drc_by_id(TYPE_SPAPR_DRC_PCI,
1632                                                     (sphb->index << 16) | i);
1633 
1634             if (drc) {
1635                 object_unparent(OBJECT(drc));
1636             }
1637         }
1638     }
1639 
1640     for (i = PCI_NUM_PINS - 1; i >= 0; i--) {
1641         if (sphb->lsi_table[i].irq) {
1642             spapr_irq_free(spapr, sphb->lsi_table[i].irq, 1);
1643             sphb->lsi_table[i].irq = 0;
1644         }
1645     }
1646 
1647     QLIST_REMOVE(sphb, list);
1648 
1649     memory_region_del_subregion(&sphb->iommu_root, &sphb->msiwindow);
1650 
1651     address_space_destroy(&sphb->iommu_as);
1652 
1653     qbus_set_hotplug_handler(BUS(phb->bus), NULL, &error_abort);
1654     pci_unregister_root_bus(phb->bus);
1655 
1656     memory_region_del_subregion(get_system_memory(), &sphb->iowindow);
1657     if (sphb->mem64_win_pciaddr != (hwaddr)-1) {
1658         memory_region_del_subregion(get_system_memory(), &sphb->mem64window);
1659     }
1660     memory_region_del_subregion(get_system_memory(), &sphb->mem32window);
1661 }
1662 
1663 static void spapr_phb_realize(DeviceState *dev, Error **errp)
1664 {
1665     /* We don't use SPAPR_MACHINE() in order to exit gracefully if the user
1666      * tries to add a sPAPR PHB to a non-pseries machine.
1667      */
1668     SpaprMachineState *spapr =
1669         (SpaprMachineState *) object_dynamic_cast(qdev_get_machine(),
1670                                                   TYPE_SPAPR_MACHINE);
1671     SpaprMachineClass *smc = spapr ? SPAPR_MACHINE_GET_CLASS(spapr) : NULL;
1672     SysBusDevice *s = SYS_BUS_DEVICE(dev);
1673     SpaprPhbState *sphb = SPAPR_PCI_HOST_BRIDGE(s);
1674     PCIHostState *phb = PCI_HOST_BRIDGE(s);
1675     char *namebuf;
1676     int i;
1677     PCIBus *bus;
1678     uint64_t msi_window_size = 4096;
1679     SpaprTceTable *tcet;
1680     const unsigned windows_supported = spapr_phb_windows_supported(sphb);
1681 
1682     if (!spapr) {
1683         error_setg(errp, TYPE_SPAPR_PCI_HOST_BRIDGE " needs a pseries machine");
1684         return;
1685     }
1686 
1687     assert(sphb->index != (uint32_t)-1); /* checked in spapr_phb_pre_plug() */
1688 
1689     if (sphb->mem64_win_size != 0) {
1690         if (sphb->mem_win_size > SPAPR_PCI_MEM32_WIN_SIZE) {
1691             error_setg(errp, "32-bit memory window of size 0x%"HWADDR_PRIx
1692                        " (max 2 GiB)", sphb->mem_win_size);
1693             return;
1694         }
1695 
1696         /* 64-bit window defaults to identity mapping */
1697         sphb->mem64_win_pciaddr = sphb->mem64_win_addr;
1698     } else if (sphb->mem_win_size > SPAPR_PCI_MEM32_WIN_SIZE) {
1699         /*
1700          * For compatibility with old configuration, if no 64-bit MMIO
1701          * window is specified, but the ordinary (32-bit) memory
1702          * window is specified as > 2GiB, we treat it as a 2GiB 32-bit
1703          * window, with a 64-bit MMIO window following on immediately
1704          * afterwards
1705          */
1706         sphb->mem64_win_size = sphb->mem_win_size - SPAPR_PCI_MEM32_WIN_SIZE;
1707         sphb->mem64_win_addr = sphb->mem_win_addr + SPAPR_PCI_MEM32_WIN_SIZE;
1708         sphb->mem64_win_pciaddr =
1709             SPAPR_PCI_MEM_WIN_BUS_OFFSET + SPAPR_PCI_MEM32_WIN_SIZE;
1710         sphb->mem_win_size = SPAPR_PCI_MEM32_WIN_SIZE;
1711     }
1712 
1713     if (spapr_pci_find_phb(spapr, sphb->buid)) {
1714         SpaprPhbState *s;
1715 
1716         error_setg(errp, "PCI host bridges must have unique indexes");
1717         error_append_hint(errp, "The following indexes are already in use:");
1718         QLIST_FOREACH(s, &spapr->phbs, list) {
1719             error_append_hint(errp, " %d", s->index);
1720         }
1721         error_append_hint(errp, "\nTry another value for the index property\n");
1722         return;
1723     }
1724 
1725     if (sphb->numa_node != -1 &&
1726         (sphb->numa_node >= MAX_NODES || !numa_info[sphb->numa_node].present)) {
1727         error_setg(errp, "Invalid NUMA node ID for PCI host bridge");
1728         return;
1729     }
1730 
1731     sphb->dtbusname = g_strdup_printf("pci@%" PRIx64, sphb->buid);
1732 
1733     /* Initialize memory regions */
1734     namebuf = g_strdup_printf("%s.mmio", sphb->dtbusname);
1735     memory_region_init(&sphb->memspace, OBJECT(sphb), namebuf, UINT64_MAX);
1736     g_free(namebuf);
1737 
1738     namebuf = g_strdup_printf("%s.mmio32-alias", sphb->dtbusname);
1739     memory_region_init_alias(&sphb->mem32window, OBJECT(sphb),
1740                              namebuf, &sphb->memspace,
1741                              SPAPR_PCI_MEM_WIN_BUS_OFFSET, sphb->mem_win_size);
1742     g_free(namebuf);
1743     memory_region_add_subregion(get_system_memory(), sphb->mem_win_addr,
1744                                 &sphb->mem32window);
1745 
1746     if (sphb->mem64_win_size != 0) {
1747         namebuf = g_strdup_printf("%s.mmio64-alias", sphb->dtbusname);
1748         memory_region_init_alias(&sphb->mem64window, OBJECT(sphb),
1749                                  namebuf, &sphb->memspace,
1750                                  sphb->mem64_win_pciaddr, sphb->mem64_win_size);
1751         g_free(namebuf);
1752 
1753         memory_region_add_subregion(get_system_memory(),
1754                                     sphb->mem64_win_addr,
1755                                     &sphb->mem64window);
1756     }
1757 
1758     /* Initialize IO regions */
1759     namebuf = g_strdup_printf("%s.io", sphb->dtbusname);
1760     memory_region_init(&sphb->iospace, OBJECT(sphb),
1761                        namebuf, SPAPR_PCI_IO_WIN_SIZE);
1762     g_free(namebuf);
1763 
1764     namebuf = g_strdup_printf("%s.io-alias", sphb->dtbusname);
1765     memory_region_init_alias(&sphb->iowindow, OBJECT(sphb), namebuf,
1766                              &sphb->iospace, 0, SPAPR_PCI_IO_WIN_SIZE);
1767     g_free(namebuf);
1768     memory_region_add_subregion(get_system_memory(), sphb->io_win_addr,
1769                                 &sphb->iowindow);
1770 
1771     bus = pci_register_root_bus(dev, NULL,
1772                                 pci_spapr_set_irq, pci_swizzle_map_irq_fn, sphb,
1773                                 &sphb->memspace, &sphb->iospace,
1774                                 PCI_DEVFN(0, 0), PCI_NUM_PINS,
1775                                 TYPE_PCI_BUS);
1776 
1777     /*
1778      * Despite resembling a vanilla PCI bus in most ways, the PAPR
1779      * para-virtualized PCI bus *does* permit PCI-E extended config
1780      * space access
1781      */
1782     if (sphb->pcie_ecs) {
1783         bus->flags |= PCI_BUS_EXTENDED_CONFIG_SPACE;
1784     }
1785     phb->bus = bus;
1786     qbus_set_hotplug_handler(BUS(phb->bus), OBJECT(sphb), NULL);
1787 
1788     /*
1789      * Initialize PHB address space.
1790      * By default there will be at least one subregion for default
1791      * 32bit DMA window.
1792      * Later the guest might want to create another DMA window
1793      * which will become another memory subregion.
1794      */
1795     namebuf = g_strdup_printf("%s.iommu-root", sphb->dtbusname);
1796     memory_region_init(&sphb->iommu_root, OBJECT(sphb),
1797                        namebuf, UINT64_MAX);
1798     g_free(namebuf);
1799     address_space_init(&sphb->iommu_as, &sphb->iommu_root,
1800                        sphb->dtbusname);
1801 
1802     /*
1803      * As MSI/MSIX interrupts trigger by writing at MSI/MSIX vectors,
1804      * we need to allocate some memory to catch those writes coming
1805      * from msi_notify()/msix_notify().
1806      * As MSIMessage:addr is going to be the same and MSIMessage:data
1807      * is going to be a VIRQ number, 4 bytes of the MSI MR will only
1808      * be used.
1809      *
1810      * For KVM we want to ensure that this memory is a full page so that
1811      * our memory slot is of page size granularity.
1812      */
1813 #ifdef CONFIG_KVM
1814     if (kvm_enabled()) {
1815         msi_window_size = getpagesize();
1816     }
1817 #endif
1818 
1819     memory_region_init_io(&sphb->msiwindow, OBJECT(sphb), &spapr_msi_ops, spapr,
1820                           "msi", msi_window_size);
1821     memory_region_add_subregion(&sphb->iommu_root, SPAPR_PCI_MSI_WINDOW,
1822                                 &sphb->msiwindow);
1823 
1824     pci_setup_iommu(bus, spapr_pci_dma_iommu, sphb);
1825 
1826     pci_bus_set_route_irq_fn(bus, spapr_route_intx_pin_to_irq);
1827 
1828     QLIST_INSERT_HEAD(&spapr->phbs, sphb, list);
1829 
1830     /* Initialize the LSI table */
1831     for (i = 0; i < PCI_NUM_PINS; i++) {
1832         uint32_t irq = SPAPR_IRQ_PCI_LSI + sphb->index * PCI_NUM_PINS + i;
1833         Error *local_err = NULL;
1834 
1835         if (smc->legacy_irq_allocation) {
1836             irq = spapr_irq_findone(spapr, &local_err);
1837             if (local_err) {
1838                 error_propagate_prepend(errp, local_err,
1839                                         "can't allocate LSIs: ");
1840                 /*
1841                  * Older machines will never support PHB hotplug, ie, this is an
1842                  * init only path and QEMU will terminate. No need to rollback.
1843                  */
1844                 return;
1845             }
1846         }
1847 
1848         spapr_irq_claim(spapr, irq, true, &local_err);
1849         if (local_err) {
1850             error_propagate_prepend(errp, local_err, "can't allocate LSIs: ");
1851             goto unrealize;
1852         }
1853 
1854         sphb->lsi_table[i].irq = irq;
1855     }
1856 
1857     /* allocate connectors for child PCI devices */
1858     if (sphb->dr_enabled) {
1859         for (i = 0; i < PCI_SLOT_MAX * 8; i++) {
1860             spapr_dr_connector_new(OBJECT(phb), TYPE_SPAPR_DRC_PCI,
1861                                    (sphb->index << 16) | i);
1862         }
1863     }
1864 
1865     /* DMA setup */
1866     for (i = 0; i < windows_supported; ++i) {
1867         tcet = spapr_tce_new_table(DEVICE(sphb), sphb->dma_liobn[i]);
1868         if (!tcet) {
1869             error_setg(errp, "Creating window#%d failed for %s",
1870                        i, sphb->dtbusname);
1871             goto unrealize;
1872         }
1873         memory_region_add_subregion(&sphb->iommu_root, 0,
1874                                     spapr_tce_get_iommu(tcet));
1875     }
1876 
1877     sphb->msi = g_hash_table_new_full(g_int_hash, g_int_equal, g_free, g_free);
1878     return;
1879 
1880 unrealize:
1881     spapr_phb_unrealize(dev, NULL);
1882 }
1883 
1884 static int spapr_phb_children_reset(Object *child, void *opaque)
1885 {
1886     DeviceState *dev = (DeviceState *) object_dynamic_cast(child, TYPE_DEVICE);
1887 
1888     if (dev) {
1889         device_reset(dev);
1890     }
1891 
1892     return 0;
1893 }
1894 
1895 void spapr_phb_dma_reset(SpaprPhbState *sphb)
1896 {
1897     int i;
1898     SpaprTceTable *tcet;
1899 
1900     for (i = 0; i < SPAPR_PCI_DMA_MAX_WINDOWS; ++i) {
1901         tcet = spapr_tce_find_by_liobn(sphb->dma_liobn[i]);
1902 
1903         if (tcet && tcet->nb_table) {
1904             spapr_tce_table_disable(tcet);
1905         }
1906     }
1907 
1908     /* Register default 32bit DMA window */
1909     tcet = spapr_tce_find_by_liobn(sphb->dma_liobn[0]);
1910     spapr_tce_table_enable(tcet, SPAPR_TCE_PAGE_SHIFT, sphb->dma_win_addr,
1911                            sphb->dma_win_size >> SPAPR_TCE_PAGE_SHIFT);
1912 }
1913 
1914 static void spapr_phb_reset(DeviceState *qdev)
1915 {
1916     SpaprPhbState *sphb = SPAPR_PCI_HOST_BRIDGE(qdev);
1917     Error *errp = NULL;
1918 
1919     spapr_phb_dma_reset(sphb);
1920     spapr_phb_nvgpu_free(sphb);
1921     spapr_phb_nvgpu_setup(sphb, &errp);
1922     if (errp) {
1923         error_report_err(errp);
1924     }
1925 
1926     /* Reset the IOMMU state */
1927     object_child_foreach(OBJECT(qdev), spapr_phb_children_reset, NULL);
1928 
1929     if (spapr_phb_eeh_available(SPAPR_PCI_HOST_BRIDGE(qdev))) {
1930         spapr_phb_vfio_reset(qdev);
1931     }
1932 }
1933 
1934 static Property spapr_phb_properties[] = {
1935     DEFINE_PROP_UINT32("index", SpaprPhbState, index, -1),
1936     DEFINE_PROP_UINT64("mem_win_size", SpaprPhbState, mem_win_size,
1937                        SPAPR_PCI_MEM32_WIN_SIZE),
1938     DEFINE_PROP_UINT64("mem64_win_size", SpaprPhbState, mem64_win_size,
1939                        SPAPR_PCI_MEM64_WIN_SIZE),
1940     DEFINE_PROP_UINT64("io_win_size", SpaprPhbState, io_win_size,
1941                        SPAPR_PCI_IO_WIN_SIZE),
1942     DEFINE_PROP_BOOL("dynamic-reconfiguration", SpaprPhbState, dr_enabled,
1943                      true),
1944     /* Default DMA window is 0..1GB */
1945     DEFINE_PROP_UINT64("dma_win_addr", SpaprPhbState, dma_win_addr, 0),
1946     DEFINE_PROP_UINT64("dma_win_size", SpaprPhbState, dma_win_size, 0x40000000),
1947     DEFINE_PROP_UINT64("dma64_win_addr", SpaprPhbState, dma64_win_addr,
1948                        0x800000000000000ULL),
1949     DEFINE_PROP_BOOL("ddw", SpaprPhbState, ddw_enabled, true),
1950     DEFINE_PROP_UINT64("pgsz", SpaprPhbState, page_size_mask,
1951                        (1ULL << 12) | (1ULL << 16)),
1952     DEFINE_PROP_UINT32("numa_node", SpaprPhbState, numa_node, -1),
1953     DEFINE_PROP_BOOL("pre-2.8-migration", SpaprPhbState,
1954                      pre_2_8_migration, false),
1955     DEFINE_PROP_BOOL("pcie-extended-configuration-space", SpaprPhbState,
1956                      pcie_ecs, true),
1957     DEFINE_PROP_UINT64("gpa", SpaprPhbState, nv2_gpa_win_addr, 0),
1958     DEFINE_PROP_UINT64("atsd", SpaprPhbState, nv2_atsd_win_addr, 0),
1959     DEFINE_PROP_END_OF_LIST(),
1960 };
1961 
1962 static const VMStateDescription vmstate_spapr_pci_lsi = {
1963     .name = "spapr_pci/lsi",
1964     .version_id = 1,
1965     .minimum_version_id = 1,
1966     .fields = (VMStateField[]) {
1967         VMSTATE_UINT32_EQUAL(irq, struct spapr_pci_lsi, NULL),
1968 
1969         VMSTATE_END_OF_LIST()
1970     },
1971 };
1972 
1973 static const VMStateDescription vmstate_spapr_pci_msi = {
1974     .name = "spapr_pci/msi",
1975     .version_id = 1,
1976     .minimum_version_id = 1,
1977     .fields = (VMStateField []) {
1978         VMSTATE_UINT32(key, spapr_pci_msi_mig),
1979         VMSTATE_UINT32(value.first_irq, spapr_pci_msi_mig),
1980         VMSTATE_UINT32(value.num, spapr_pci_msi_mig),
1981         VMSTATE_END_OF_LIST()
1982     },
1983 };
1984 
1985 static int spapr_pci_pre_save(void *opaque)
1986 {
1987     SpaprPhbState *sphb = opaque;
1988     GHashTableIter iter;
1989     gpointer key, value;
1990     int i;
1991 
1992     if (sphb->pre_2_8_migration) {
1993         sphb->mig_liobn = sphb->dma_liobn[0];
1994         sphb->mig_mem_win_addr = sphb->mem_win_addr;
1995         sphb->mig_mem_win_size = sphb->mem_win_size;
1996         sphb->mig_io_win_addr = sphb->io_win_addr;
1997         sphb->mig_io_win_size = sphb->io_win_size;
1998 
1999         if ((sphb->mem64_win_size != 0)
2000             && (sphb->mem64_win_addr
2001                 == (sphb->mem_win_addr + sphb->mem_win_size))) {
2002             sphb->mig_mem_win_size += sphb->mem64_win_size;
2003         }
2004     }
2005 
2006     g_free(sphb->msi_devs);
2007     sphb->msi_devs = NULL;
2008     sphb->msi_devs_num = g_hash_table_size(sphb->msi);
2009     if (!sphb->msi_devs_num) {
2010         return 0;
2011     }
2012     sphb->msi_devs = g_new(spapr_pci_msi_mig, sphb->msi_devs_num);
2013 
2014     g_hash_table_iter_init(&iter, sphb->msi);
2015     for (i = 0; g_hash_table_iter_next(&iter, &key, &value); ++i) {
2016         sphb->msi_devs[i].key = *(uint32_t *) key;
2017         sphb->msi_devs[i].value = *(spapr_pci_msi *) value;
2018     }
2019 
2020     return 0;
2021 }
2022 
2023 static int spapr_pci_post_load(void *opaque, int version_id)
2024 {
2025     SpaprPhbState *sphb = opaque;
2026     gpointer key, value;
2027     int i;
2028 
2029     for (i = 0; i < sphb->msi_devs_num; ++i) {
2030         key = g_memdup(&sphb->msi_devs[i].key,
2031                        sizeof(sphb->msi_devs[i].key));
2032         value = g_memdup(&sphb->msi_devs[i].value,
2033                          sizeof(sphb->msi_devs[i].value));
2034         g_hash_table_insert(sphb->msi, key, value);
2035     }
2036     g_free(sphb->msi_devs);
2037     sphb->msi_devs = NULL;
2038     sphb->msi_devs_num = 0;
2039 
2040     return 0;
2041 }
2042 
2043 static bool pre_2_8_migration(void *opaque, int version_id)
2044 {
2045     SpaprPhbState *sphb = opaque;
2046 
2047     return sphb->pre_2_8_migration;
2048 }
2049 
2050 static const VMStateDescription vmstate_spapr_pci = {
2051     .name = "spapr_pci",
2052     .version_id = 2,
2053     .minimum_version_id = 2,
2054     .pre_save = spapr_pci_pre_save,
2055     .post_load = spapr_pci_post_load,
2056     .fields = (VMStateField[]) {
2057         VMSTATE_UINT64_EQUAL(buid, SpaprPhbState, NULL),
2058         VMSTATE_UINT32_TEST(mig_liobn, SpaprPhbState, pre_2_8_migration),
2059         VMSTATE_UINT64_TEST(mig_mem_win_addr, SpaprPhbState, pre_2_8_migration),
2060         VMSTATE_UINT64_TEST(mig_mem_win_size, SpaprPhbState, pre_2_8_migration),
2061         VMSTATE_UINT64_TEST(mig_io_win_addr, SpaprPhbState, pre_2_8_migration),
2062         VMSTATE_UINT64_TEST(mig_io_win_size, SpaprPhbState, pre_2_8_migration),
2063         VMSTATE_STRUCT_ARRAY(lsi_table, SpaprPhbState, PCI_NUM_PINS, 0,
2064                              vmstate_spapr_pci_lsi, struct spapr_pci_lsi),
2065         VMSTATE_INT32(msi_devs_num, SpaprPhbState),
2066         VMSTATE_STRUCT_VARRAY_ALLOC(msi_devs, SpaprPhbState, msi_devs_num, 0,
2067                                     vmstate_spapr_pci_msi, spapr_pci_msi_mig),
2068         VMSTATE_END_OF_LIST()
2069     },
2070 };
2071 
2072 static const char *spapr_phb_root_bus_path(PCIHostState *host_bridge,
2073                                            PCIBus *rootbus)
2074 {
2075     SpaprPhbState *sphb = SPAPR_PCI_HOST_BRIDGE(host_bridge);
2076 
2077     return sphb->dtbusname;
2078 }
2079 
2080 static void spapr_phb_class_init(ObjectClass *klass, void *data)
2081 {
2082     PCIHostBridgeClass *hc = PCI_HOST_BRIDGE_CLASS(klass);
2083     DeviceClass *dc = DEVICE_CLASS(klass);
2084     HotplugHandlerClass *hp = HOTPLUG_HANDLER_CLASS(klass);
2085 
2086     hc->root_bus_path = spapr_phb_root_bus_path;
2087     dc->realize = spapr_phb_realize;
2088     dc->unrealize = spapr_phb_unrealize;
2089     dc->props = spapr_phb_properties;
2090     dc->reset = spapr_phb_reset;
2091     dc->vmsd = &vmstate_spapr_pci;
2092     /* Supported by TYPE_SPAPR_MACHINE */
2093     dc->user_creatable = true;
2094     set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
2095     hp->plug = spapr_pci_plug;
2096     hp->unplug = spapr_pci_unplug;
2097     hp->unplug_request = spapr_pci_unplug_request;
2098 }
2099 
2100 static const TypeInfo spapr_phb_info = {
2101     .name          = TYPE_SPAPR_PCI_HOST_BRIDGE,
2102     .parent        = TYPE_PCI_HOST_BRIDGE,
2103     .instance_size = sizeof(SpaprPhbState),
2104     .instance_finalize = spapr_phb_finalizefn,
2105     .class_init    = spapr_phb_class_init,
2106     .interfaces    = (InterfaceInfo[]) {
2107         { TYPE_HOTPLUG_HANDLER },
2108         { }
2109     }
2110 };
2111 
2112 static void spapr_phb_pci_enumerate_bridge(PCIBus *bus, PCIDevice *pdev,
2113                                            void *opaque)
2114 {
2115     unsigned int *bus_no = opaque;
2116     PCIBus *sec_bus = NULL;
2117 
2118     if ((pci_default_read_config(pdev, PCI_HEADER_TYPE, 1) !=
2119          PCI_HEADER_TYPE_BRIDGE)) {
2120         return;
2121     }
2122 
2123     (*bus_no)++;
2124     pci_default_write_config(pdev, PCI_PRIMARY_BUS, pci_dev_bus_num(pdev), 1);
2125     pci_default_write_config(pdev, PCI_SECONDARY_BUS, *bus_no, 1);
2126     pci_default_write_config(pdev, PCI_SUBORDINATE_BUS, *bus_no, 1);
2127 
2128     sec_bus = pci_bridge_get_sec_bus(PCI_BRIDGE(pdev));
2129     if (!sec_bus) {
2130         return;
2131     }
2132 
2133     pci_for_each_device(sec_bus, pci_bus_num(sec_bus),
2134                         spapr_phb_pci_enumerate_bridge, bus_no);
2135     pci_default_write_config(pdev, PCI_SUBORDINATE_BUS, *bus_no, 1);
2136 }
2137 
2138 static void spapr_phb_pci_enumerate(SpaprPhbState *phb)
2139 {
2140     PCIBus *bus = PCI_HOST_BRIDGE(phb)->bus;
2141     unsigned int bus_no = 0;
2142 
2143     pci_for_each_device(bus, pci_bus_num(bus),
2144                         spapr_phb_pci_enumerate_bridge,
2145                         &bus_no);
2146 
2147 }
2148 
2149 int spapr_dt_phb(SpaprPhbState *phb, uint32_t intc_phandle, void *fdt,
2150                  uint32_t nr_msis, int *node_offset)
2151 {
2152     int bus_off, i, j, ret;
2153     uint32_t bus_range[] = { cpu_to_be32(0), cpu_to_be32(0xff) };
2154     struct {
2155         uint32_t hi;
2156         uint64_t child;
2157         uint64_t parent;
2158         uint64_t size;
2159     } QEMU_PACKED ranges[] = {
2160         {
2161             cpu_to_be32(b_ss(1)), cpu_to_be64(0),
2162             cpu_to_be64(phb->io_win_addr),
2163             cpu_to_be64(memory_region_size(&phb->iospace)),
2164         },
2165         {
2166             cpu_to_be32(b_ss(2)), cpu_to_be64(SPAPR_PCI_MEM_WIN_BUS_OFFSET),
2167             cpu_to_be64(phb->mem_win_addr),
2168             cpu_to_be64(phb->mem_win_size),
2169         },
2170         {
2171             cpu_to_be32(b_ss(3)), cpu_to_be64(phb->mem64_win_pciaddr),
2172             cpu_to_be64(phb->mem64_win_addr),
2173             cpu_to_be64(phb->mem64_win_size),
2174         },
2175     };
2176     const unsigned sizeof_ranges =
2177         (phb->mem64_win_size ? 3 : 2) * sizeof(ranges[0]);
2178     uint64_t bus_reg[] = { cpu_to_be64(phb->buid), 0 };
2179     uint32_t interrupt_map_mask[] = {
2180         cpu_to_be32(b_ddddd(-1)|b_fff(0)), 0x0, 0x0, cpu_to_be32(-1)};
2181     uint32_t interrupt_map[PCI_SLOT_MAX * PCI_NUM_PINS][7];
2182     uint32_t ddw_applicable[] = {
2183         cpu_to_be32(RTAS_IBM_QUERY_PE_DMA_WINDOW),
2184         cpu_to_be32(RTAS_IBM_CREATE_PE_DMA_WINDOW),
2185         cpu_to_be32(RTAS_IBM_REMOVE_PE_DMA_WINDOW)
2186     };
2187     uint32_t ddw_extensions[] = {
2188         cpu_to_be32(1),
2189         cpu_to_be32(RTAS_IBM_RESET_PE_DMA_WINDOW)
2190     };
2191     uint32_t associativity[] = {cpu_to_be32(0x4),
2192                                 cpu_to_be32(0x0),
2193                                 cpu_to_be32(0x0),
2194                                 cpu_to_be32(0x0),
2195                                 cpu_to_be32(phb->numa_node)};
2196     SpaprTceTable *tcet;
2197     SpaprDrc *drc;
2198     Error *errp = NULL;
2199 
2200     /* Start populating the FDT */
2201     _FDT(bus_off = fdt_add_subnode(fdt, 0, phb->dtbusname));
2202     if (node_offset) {
2203         *node_offset = bus_off;
2204     }
2205 
2206     /* Write PHB properties */
2207     _FDT(fdt_setprop_string(fdt, bus_off, "device_type", "pci"));
2208     _FDT(fdt_setprop_string(fdt, bus_off, "compatible", "IBM,Logical_PHB"));
2209     _FDT(fdt_setprop_cell(fdt, bus_off, "#interrupt-cells", 0x1));
2210     _FDT(fdt_setprop(fdt, bus_off, "used-by-rtas", NULL, 0));
2211     _FDT(fdt_setprop(fdt, bus_off, "bus-range", &bus_range, sizeof(bus_range)));
2212     _FDT(fdt_setprop(fdt, bus_off, "ranges", &ranges, sizeof_ranges));
2213     _FDT(fdt_setprop(fdt, bus_off, "reg", &bus_reg, sizeof(bus_reg)));
2214     _FDT(fdt_setprop_cell(fdt, bus_off, "ibm,pci-config-space-type", 0x1));
2215     _FDT(fdt_setprop_cell(fdt, bus_off, "ibm,pe-total-#msi", nr_msis));
2216 
2217     /* Dynamic DMA window */
2218     if (phb->ddw_enabled) {
2219         _FDT(fdt_setprop(fdt, bus_off, "ibm,ddw-applicable", &ddw_applicable,
2220                          sizeof(ddw_applicable)));
2221         _FDT(fdt_setprop(fdt, bus_off, "ibm,ddw-extensions",
2222                          &ddw_extensions, sizeof(ddw_extensions)));
2223     }
2224 
2225     /* Advertise NUMA via ibm,associativity */
2226     if (phb->numa_node != -1) {
2227         _FDT(fdt_setprop(fdt, bus_off, "ibm,associativity", associativity,
2228                          sizeof(associativity)));
2229     }
2230 
2231     /* Build the interrupt-map, this must matches what is done
2232      * in pci_swizzle_map_irq_fn
2233      */
2234     _FDT(fdt_setprop(fdt, bus_off, "interrupt-map-mask",
2235                      &interrupt_map_mask, sizeof(interrupt_map_mask)));
2236     for (i = 0; i < PCI_SLOT_MAX; i++) {
2237         for (j = 0; j < PCI_NUM_PINS; j++) {
2238             uint32_t *irqmap = interrupt_map[i*PCI_NUM_PINS + j];
2239             int lsi_num = pci_swizzle(i, j);
2240 
2241             irqmap[0] = cpu_to_be32(b_ddddd(i)|b_fff(0));
2242             irqmap[1] = 0;
2243             irqmap[2] = 0;
2244             irqmap[3] = cpu_to_be32(j+1);
2245             irqmap[4] = cpu_to_be32(intc_phandle);
2246             spapr_dt_irq(&irqmap[5], phb->lsi_table[lsi_num].irq, true);
2247         }
2248     }
2249     /* Write interrupt map */
2250     _FDT(fdt_setprop(fdt, bus_off, "interrupt-map", &interrupt_map,
2251                      sizeof(interrupt_map)));
2252 
2253     tcet = spapr_tce_find_by_liobn(phb->dma_liobn[0]);
2254     if (!tcet) {
2255         return -1;
2256     }
2257     spapr_dma_dt(fdt, bus_off, "ibm,dma-window",
2258                  tcet->liobn, tcet->bus_offset,
2259                  tcet->nb_table << tcet->page_shift);
2260 
2261     drc = spapr_drc_by_id(TYPE_SPAPR_DRC_PHB, phb->index);
2262     if (drc) {
2263         uint32_t drc_index = cpu_to_be32(spapr_drc_index(drc));
2264 
2265         _FDT(fdt_setprop(fdt, bus_off, "ibm,my-drc-index", &drc_index,
2266                          sizeof(drc_index)));
2267     }
2268 
2269     /* Walk the bridges and program the bus numbers*/
2270     spapr_phb_pci_enumerate(phb);
2271     _FDT(fdt_setprop_cell(fdt, bus_off, "qemu,phb-enumerated", 0x1));
2272 
2273     /* Walk the bridge and subordinate buses */
2274     ret = spapr_dt_pci_bus(phb, PCI_HOST_BRIDGE(phb)->bus, fdt, bus_off);
2275     if (ret < 0) {
2276         return ret;
2277     }
2278 
2279     ret = spapr_drc_populate_dt(fdt, bus_off, OBJECT(phb),
2280                                 SPAPR_DR_CONNECTOR_TYPE_PCI);
2281     if (ret) {
2282         return ret;
2283     }
2284 
2285     spapr_phb_nvgpu_populate_dt(phb, fdt, bus_off, &errp);
2286     if (errp) {
2287         error_report_err(errp);
2288     }
2289     spapr_phb_nvgpu_ram_populate_dt(phb, fdt);
2290 
2291     return 0;
2292 }
2293 
2294 void spapr_pci_rtas_init(void)
2295 {
2296     spapr_rtas_register(RTAS_READ_PCI_CONFIG, "read-pci-config",
2297                         rtas_read_pci_config);
2298     spapr_rtas_register(RTAS_WRITE_PCI_CONFIG, "write-pci-config",
2299                         rtas_write_pci_config);
2300     spapr_rtas_register(RTAS_IBM_READ_PCI_CONFIG, "ibm,read-pci-config",
2301                         rtas_ibm_read_pci_config);
2302     spapr_rtas_register(RTAS_IBM_WRITE_PCI_CONFIG, "ibm,write-pci-config",
2303                         rtas_ibm_write_pci_config);
2304     if (msi_nonbroken) {
2305         spapr_rtas_register(RTAS_IBM_QUERY_INTERRUPT_SOURCE_NUMBER,
2306                             "ibm,query-interrupt-source-number",
2307                             rtas_ibm_query_interrupt_source_number);
2308         spapr_rtas_register(RTAS_IBM_CHANGE_MSI, "ibm,change-msi",
2309                             rtas_ibm_change_msi);
2310     }
2311 
2312     spapr_rtas_register(RTAS_IBM_SET_EEH_OPTION,
2313                         "ibm,set-eeh-option",
2314                         rtas_ibm_set_eeh_option);
2315     spapr_rtas_register(RTAS_IBM_GET_CONFIG_ADDR_INFO2,
2316                         "ibm,get-config-addr-info2",
2317                         rtas_ibm_get_config_addr_info2);
2318     spapr_rtas_register(RTAS_IBM_READ_SLOT_RESET_STATE2,
2319                         "ibm,read-slot-reset-state2",
2320                         rtas_ibm_read_slot_reset_state2);
2321     spapr_rtas_register(RTAS_IBM_SET_SLOT_RESET,
2322                         "ibm,set-slot-reset",
2323                         rtas_ibm_set_slot_reset);
2324     spapr_rtas_register(RTAS_IBM_CONFIGURE_PE,
2325                         "ibm,configure-pe",
2326                         rtas_ibm_configure_pe);
2327     spapr_rtas_register(RTAS_IBM_SLOT_ERROR_DETAIL,
2328                         "ibm,slot-error-detail",
2329                         rtas_ibm_slot_error_detail);
2330 }
2331 
2332 static void spapr_pci_register_types(void)
2333 {
2334     type_register_static(&spapr_phb_info);
2335 }
2336 
2337 type_init(spapr_pci_register_types)
2338 
2339 static int spapr_switch_one_vga(DeviceState *dev, void *opaque)
2340 {
2341     bool be = *(bool *)opaque;
2342 
2343     if (object_dynamic_cast(OBJECT(dev), "VGA")
2344         || object_dynamic_cast(OBJECT(dev), "secondary-vga")) {
2345         object_property_set_bool(OBJECT(dev), be, "big-endian-framebuffer",
2346                                  &error_abort);
2347     }
2348     return 0;
2349 }
2350 
2351 void spapr_pci_switch_vga(bool big_endian)
2352 {
2353     SpaprMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
2354     SpaprPhbState *sphb;
2355 
2356     /*
2357      * For backward compatibility with existing guests, we switch
2358      * the endianness of the VGA controller when changing the guest
2359      * interrupt mode
2360      */
2361     QLIST_FOREACH(sphb, &spapr->phbs, list) {
2362         BusState *bus = &PCI_HOST_BRIDGE(sphb)->bus->qbus;
2363         qbus_walk_children(bus, spapr_switch_one_vga, NULL, NULL, NULL,
2364                            &big_endian);
2365     }
2366 }
2367