xref: /qemu/hw/ppc/spapr_pci.c (revision 1d2d974244c6f1629ca83f1de293eaa557634627)
1 /*
2  * QEMU sPAPR PCI host originated from Uninorth PCI host
3  *
4  * Copyright (c) 2011 Alexey Kardashevskiy, IBM Corporation.
5  * Copyright (C) 2011 David Gibson, IBM Corporation.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a copy
8  * of this software and associated documentation files (the "Software"), to deal
9  * in the Software without restriction, including without limitation the rights
10  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11  * copies of the Software, and to permit persons to whom the Software is
12  * furnished to do so, subject to the following conditions:
13  *
14  * The above copyright notice and this permission notice shall be included in
15  * all copies or substantial portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23  * THE SOFTWARE.
24  */
25 #include "hw/hw.h"
26 #include "hw/sysbus.h"
27 #include "hw/pci/pci.h"
28 #include "hw/pci/msi.h"
29 #include "hw/pci/msix.h"
30 #include "hw/pci/pci_host.h"
31 #include "hw/ppc/spapr.h"
32 #include "hw/pci-host/spapr.h"
33 #include "exec/address-spaces.h"
34 #include <libfdt.h>
35 #include "trace.h"
36 #include "qemu/error-report.h"
37 #include "qapi/qmp/qerror.h"
38 
39 #include "hw/pci/pci_bridge.h"
40 #include "hw/pci/pci_bus.h"
41 #include "hw/ppc/spapr_drc.h"
42 #include "sysemu/device_tree.h"
43 
44 /* Copied from the kernel arch/powerpc/platforms/pseries/msi.c */
45 #define RTAS_QUERY_FN           0
46 #define RTAS_CHANGE_FN          1
47 #define RTAS_RESET_FN           2
48 #define RTAS_CHANGE_MSI_FN      3
49 #define RTAS_CHANGE_MSIX_FN     4
50 
51 /* Interrupt types to return on RTAS_CHANGE_* */
52 #define RTAS_TYPE_MSI           1
53 #define RTAS_TYPE_MSIX          2
54 
55 #define FDT_NAME_MAX          128
56 
57 #define _FDT(exp) \
58     do { \
59         int ret = (exp);                                           \
60         if (ret < 0) {                                             \
61             return ret;                                            \
62         }                                                          \
63     } while (0)
64 
65 sPAPRPHBState *spapr_pci_find_phb(sPAPRMachineState *spapr, uint64_t buid)
66 {
67     sPAPRPHBState *sphb;
68 
69     QLIST_FOREACH(sphb, &spapr->phbs, list) {
70         if (sphb->buid != buid) {
71             continue;
72         }
73         return sphb;
74     }
75 
76     return NULL;
77 }
78 
79 PCIDevice *spapr_pci_find_dev(sPAPRMachineState *spapr, uint64_t buid,
80                               uint32_t config_addr)
81 {
82     sPAPRPHBState *sphb = spapr_pci_find_phb(spapr, buid);
83     PCIHostState *phb = PCI_HOST_BRIDGE(sphb);
84     int bus_num = (config_addr >> 16) & 0xFF;
85     int devfn = (config_addr >> 8) & 0xFF;
86 
87     if (!phb) {
88         return NULL;
89     }
90 
91     return pci_find_device(phb->bus, bus_num, devfn);
92 }
93 
94 static uint32_t rtas_pci_cfgaddr(uint32_t arg)
95 {
96     /* This handles the encoding of extended config space addresses */
97     return ((arg >> 20) & 0xf00) | (arg & 0xff);
98 }
99 
100 static void finish_read_pci_config(sPAPRMachineState *spapr, uint64_t buid,
101                                    uint32_t addr, uint32_t size,
102                                    target_ulong rets)
103 {
104     PCIDevice *pci_dev;
105     uint32_t val;
106 
107     if ((size != 1) && (size != 2) && (size != 4)) {
108         /* access must be 1, 2 or 4 bytes */
109         rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
110         return;
111     }
112 
113     pci_dev = spapr_pci_find_dev(spapr, buid, addr);
114     addr = rtas_pci_cfgaddr(addr);
115 
116     if (!pci_dev || (addr % size) || (addr >= pci_config_size(pci_dev))) {
117         /* Access must be to a valid device, within bounds and
118          * naturally aligned */
119         rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
120         return;
121     }
122 
123     val = pci_host_config_read_common(pci_dev, addr,
124                                       pci_config_size(pci_dev), size);
125 
126     rtas_st(rets, 0, RTAS_OUT_SUCCESS);
127     rtas_st(rets, 1, val);
128 }
129 
130 static void rtas_ibm_read_pci_config(PowerPCCPU *cpu, sPAPRMachineState *spapr,
131                                      uint32_t token, uint32_t nargs,
132                                      target_ulong args,
133                                      uint32_t nret, target_ulong rets)
134 {
135     uint64_t buid;
136     uint32_t size, addr;
137 
138     if ((nargs != 4) || (nret != 2)) {
139         rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
140         return;
141     }
142 
143     buid = ((uint64_t)rtas_ld(args, 1) << 32) | rtas_ld(args, 2);
144     size = rtas_ld(args, 3);
145     addr = rtas_ld(args, 0);
146 
147     finish_read_pci_config(spapr, buid, addr, size, rets);
148 }
149 
150 static void rtas_read_pci_config(PowerPCCPU *cpu, sPAPRMachineState *spapr,
151                                  uint32_t token, uint32_t nargs,
152                                  target_ulong args,
153                                  uint32_t nret, target_ulong rets)
154 {
155     uint32_t size, addr;
156 
157     if ((nargs != 2) || (nret != 2)) {
158         rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
159         return;
160     }
161 
162     size = rtas_ld(args, 1);
163     addr = rtas_ld(args, 0);
164 
165     finish_read_pci_config(spapr, 0, addr, size, rets);
166 }
167 
168 static void finish_write_pci_config(sPAPRMachineState *spapr, uint64_t buid,
169                                     uint32_t addr, uint32_t size,
170                                     uint32_t val, target_ulong rets)
171 {
172     PCIDevice *pci_dev;
173 
174     if ((size != 1) && (size != 2) && (size != 4)) {
175         /* access must be 1, 2 or 4 bytes */
176         rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
177         return;
178     }
179 
180     pci_dev = spapr_pci_find_dev(spapr, buid, addr);
181     addr = rtas_pci_cfgaddr(addr);
182 
183     if (!pci_dev || (addr % size) || (addr >= pci_config_size(pci_dev))) {
184         /* Access must be to a valid device, within bounds and
185          * naturally aligned */
186         rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
187         return;
188     }
189 
190     pci_host_config_write_common(pci_dev, addr, pci_config_size(pci_dev),
191                                  val, size);
192 
193     rtas_st(rets, 0, RTAS_OUT_SUCCESS);
194 }
195 
196 static void rtas_ibm_write_pci_config(PowerPCCPU *cpu, sPAPRMachineState *spapr,
197                                       uint32_t token, uint32_t nargs,
198                                       target_ulong args,
199                                       uint32_t nret, target_ulong rets)
200 {
201     uint64_t buid;
202     uint32_t val, size, addr;
203 
204     if ((nargs != 5) || (nret != 1)) {
205         rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
206         return;
207     }
208 
209     buid = ((uint64_t)rtas_ld(args, 1) << 32) | rtas_ld(args, 2);
210     val = rtas_ld(args, 4);
211     size = rtas_ld(args, 3);
212     addr = rtas_ld(args, 0);
213 
214     finish_write_pci_config(spapr, buid, addr, size, val, rets);
215 }
216 
217 static void rtas_write_pci_config(PowerPCCPU *cpu, sPAPRMachineState *spapr,
218                                   uint32_t token, uint32_t nargs,
219                                   target_ulong args,
220                                   uint32_t nret, target_ulong rets)
221 {
222     uint32_t val, size, addr;
223 
224     if ((nargs != 3) || (nret != 1)) {
225         rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
226         return;
227     }
228 
229 
230     val = rtas_ld(args, 2);
231     size = rtas_ld(args, 1);
232     addr = rtas_ld(args, 0);
233 
234     finish_write_pci_config(spapr, 0, addr, size, val, rets);
235 }
236 
237 /*
238  * Set MSI/MSIX message data.
239  * This is required for msi_notify()/msix_notify() which
240  * will write at the addresses via spapr_msi_write().
241  *
242  * If hwaddr == 0, all entries will have .data == first_irq i.e.
243  * table will be reset.
244  */
245 static void spapr_msi_setmsg(PCIDevice *pdev, hwaddr addr, bool msix,
246                              unsigned first_irq, unsigned req_num)
247 {
248     unsigned i;
249     MSIMessage msg = { .address = addr, .data = first_irq };
250 
251     if (!msix) {
252         msi_set_message(pdev, msg);
253         trace_spapr_pci_msi_setup(pdev->name, 0, msg.address);
254         return;
255     }
256 
257     for (i = 0; i < req_num; ++i) {
258         msix_set_message(pdev, i, msg);
259         trace_spapr_pci_msi_setup(pdev->name, i, msg.address);
260         if (addr) {
261             ++msg.data;
262         }
263     }
264 }
265 
266 static void rtas_ibm_change_msi(PowerPCCPU *cpu, sPAPRMachineState *spapr,
267                                 uint32_t token, uint32_t nargs,
268                                 target_ulong args, uint32_t nret,
269                                 target_ulong rets)
270 {
271     uint32_t config_addr = rtas_ld(args, 0);
272     uint64_t buid = ((uint64_t)rtas_ld(args, 1) << 32) | rtas_ld(args, 2);
273     unsigned int func = rtas_ld(args, 3);
274     unsigned int req_num = rtas_ld(args, 4); /* 0 == remove all */
275     unsigned int seq_num = rtas_ld(args, 5);
276     unsigned int ret_intr_type;
277     unsigned int irq, max_irqs = 0, num = 0;
278     sPAPRPHBState *phb = NULL;
279     PCIDevice *pdev = NULL;
280     spapr_pci_msi *msi;
281     int *config_addr_key;
282 
283     switch (func) {
284     case RTAS_CHANGE_MSI_FN:
285     case RTAS_CHANGE_FN:
286         ret_intr_type = RTAS_TYPE_MSI;
287         break;
288     case RTAS_CHANGE_MSIX_FN:
289         ret_intr_type = RTAS_TYPE_MSIX;
290         break;
291     default:
292         error_report("rtas_ibm_change_msi(%u) is not implemented", func);
293         rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
294         return;
295     }
296 
297     /* Fins sPAPRPHBState */
298     phb = spapr_pci_find_phb(spapr, buid);
299     if (phb) {
300         pdev = spapr_pci_find_dev(spapr, buid, config_addr);
301     }
302     if (!phb || !pdev) {
303         rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
304         return;
305     }
306 
307     /* Releasing MSIs */
308     if (!req_num) {
309         msi = (spapr_pci_msi *) g_hash_table_lookup(phb->msi, &config_addr);
310         if (!msi) {
311             trace_spapr_pci_msi("Releasing wrong config", config_addr);
312             rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
313             return;
314         }
315 
316         xics_free(spapr->icp, msi->first_irq, msi->num);
317         if (msi_present(pdev)) {
318             spapr_msi_setmsg(pdev, 0, false, 0, num);
319         }
320         if (msix_present(pdev)) {
321             spapr_msi_setmsg(pdev, 0, true, 0, num);
322         }
323         g_hash_table_remove(phb->msi, &config_addr);
324 
325         trace_spapr_pci_msi("Released MSIs", config_addr);
326         rtas_st(rets, 0, RTAS_OUT_SUCCESS);
327         rtas_st(rets, 1, 0);
328         return;
329     }
330 
331     /* Enabling MSI */
332 
333     /* Check if the device supports as many IRQs as requested */
334     if (ret_intr_type == RTAS_TYPE_MSI) {
335         max_irqs = msi_nr_vectors_allocated(pdev);
336     } else if (ret_intr_type == RTAS_TYPE_MSIX) {
337         max_irqs = pdev->msix_entries_nr;
338     }
339     if (!max_irqs) {
340         error_report("Requested interrupt type %d is not enabled for device %x",
341                      ret_intr_type, config_addr);
342         rtas_st(rets, 0, -1); /* Hardware error */
343         return;
344     }
345     /* Correct the number if the guest asked for too many */
346     if (req_num > max_irqs) {
347         trace_spapr_pci_msi_retry(config_addr, req_num, max_irqs);
348         req_num = max_irqs;
349         irq = 0; /* to avoid misleading trace */
350         goto out;
351     }
352 
353     /* Allocate MSIs */
354     irq = xics_alloc_block(spapr->icp, 0, req_num, false,
355                            ret_intr_type == RTAS_TYPE_MSI);
356     if (!irq) {
357         error_report("Cannot allocate MSIs for device %x", config_addr);
358         rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
359         return;
360     }
361 
362     /* Setup MSI/MSIX vectors in the device (via cfgspace or MSIX BAR) */
363     spapr_msi_setmsg(pdev, SPAPR_PCI_MSI_WINDOW, ret_intr_type == RTAS_TYPE_MSIX,
364                      irq, req_num);
365 
366     /* Add MSI device to cache */
367     msi = g_new(spapr_pci_msi, 1);
368     msi->first_irq = irq;
369     msi->num = req_num;
370     config_addr_key = g_new(int, 1);
371     *config_addr_key = config_addr;
372     g_hash_table_insert(phb->msi, config_addr_key, msi);
373 
374 out:
375     rtas_st(rets, 0, RTAS_OUT_SUCCESS);
376     rtas_st(rets, 1, req_num);
377     rtas_st(rets, 2, ++seq_num);
378     rtas_st(rets, 3, ret_intr_type);
379 
380     trace_spapr_pci_rtas_ibm_change_msi(config_addr, func, req_num, irq);
381 }
382 
383 static void rtas_ibm_query_interrupt_source_number(PowerPCCPU *cpu,
384                                                    sPAPRMachineState *spapr,
385                                                    uint32_t token,
386                                                    uint32_t nargs,
387                                                    target_ulong args,
388                                                    uint32_t nret,
389                                                    target_ulong rets)
390 {
391     uint32_t config_addr = rtas_ld(args, 0);
392     uint64_t buid = ((uint64_t)rtas_ld(args, 1) << 32) | rtas_ld(args, 2);
393     unsigned int intr_src_num = -1, ioa_intr_num = rtas_ld(args, 3);
394     sPAPRPHBState *phb = NULL;
395     PCIDevice *pdev = NULL;
396     spapr_pci_msi *msi;
397 
398     /* Find sPAPRPHBState */
399     phb = spapr_pci_find_phb(spapr, buid);
400     if (phb) {
401         pdev = spapr_pci_find_dev(spapr, buid, config_addr);
402     }
403     if (!phb || !pdev) {
404         rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
405         return;
406     }
407 
408     /* Find device descriptor and start IRQ */
409     msi = (spapr_pci_msi *) g_hash_table_lookup(phb->msi, &config_addr);
410     if (!msi || !msi->first_irq || !msi->num || (ioa_intr_num >= msi->num)) {
411         trace_spapr_pci_msi("Failed to return vector", config_addr);
412         rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
413         return;
414     }
415     intr_src_num = msi->first_irq + ioa_intr_num;
416     trace_spapr_pci_rtas_ibm_query_interrupt_source_number(ioa_intr_num,
417                                                            intr_src_num);
418 
419     rtas_st(rets, 0, RTAS_OUT_SUCCESS);
420     rtas_st(rets, 1, intr_src_num);
421     rtas_st(rets, 2, 1);/* 0 == level; 1 == edge */
422 }
423 
424 static void rtas_ibm_set_eeh_option(PowerPCCPU *cpu,
425                                     sPAPRMachineState *spapr,
426                                     uint32_t token, uint32_t nargs,
427                                     target_ulong args, uint32_t nret,
428                                     target_ulong rets)
429 {
430     sPAPRPHBState *sphb;
431     sPAPRPHBClass *spc;
432     uint32_t addr, option;
433     uint64_t buid;
434     int ret;
435 
436     if ((nargs != 4) || (nret != 1)) {
437         goto param_error_exit;
438     }
439 
440     buid = ((uint64_t)rtas_ld(args, 1) << 32) | rtas_ld(args, 2);
441     addr = rtas_ld(args, 0);
442     option = rtas_ld(args, 3);
443 
444     sphb = spapr_pci_find_phb(spapr, buid);
445     if (!sphb) {
446         goto param_error_exit;
447     }
448 
449     spc = SPAPR_PCI_HOST_BRIDGE_GET_CLASS(sphb);
450     if (!spc->eeh_set_option) {
451         goto param_error_exit;
452     }
453 
454     ret = spc->eeh_set_option(sphb, addr, option);
455     rtas_st(rets, 0, ret);
456     return;
457 
458 param_error_exit:
459     rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
460 }
461 
462 static void rtas_ibm_get_config_addr_info2(PowerPCCPU *cpu,
463                                            sPAPRMachineState *spapr,
464                                            uint32_t token, uint32_t nargs,
465                                            target_ulong args, uint32_t nret,
466                                            target_ulong rets)
467 {
468     sPAPRPHBState *sphb;
469     sPAPRPHBClass *spc;
470     PCIDevice *pdev;
471     uint32_t addr, option;
472     uint64_t buid;
473 
474     if ((nargs != 4) || (nret != 2)) {
475         goto param_error_exit;
476     }
477 
478     buid = ((uint64_t)rtas_ld(args, 1) << 32) | rtas_ld(args, 2);
479     sphb = spapr_pci_find_phb(spapr, buid);
480     if (!sphb) {
481         goto param_error_exit;
482     }
483 
484     spc = SPAPR_PCI_HOST_BRIDGE_GET_CLASS(sphb);
485     if (!spc->eeh_set_option) {
486         goto param_error_exit;
487     }
488 
489     /*
490      * We always have PE address of form "00BB0001". "BB"
491      * represents the bus number of PE's primary bus.
492      */
493     option = rtas_ld(args, 3);
494     switch (option) {
495     case RTAS_GET_PE_ADDR:
496         addr = rtas_ld(args, 0);
497         pdev = spapr_pci_find_dev(spapr, buid, addr);
498         if (!pdev) {
499             goto param_error_exit;
500         }
501 
502         rtas_st(rets, 1, (pci_bus_num(pdev->bus) << 16) + 1);
503         break;
504     case RTAS_GET_PE_MODE:
505         rtas_st(rets, 1, RTAS_PE_MODE_SHARED);
506         break;
507     default:
508         goto param_error_exit;
509     }
510 
511     rtas_st(rets, 0, RTAS_OUT_SUCCESS);
512     return;
513 
514 param_error_exit:
515     rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
516 }
517 
518 static void rtas_ibm_read_slot_reset_state2(PowerPCCPU *cpu,
519                                             sPAPRMachineState *spapr,
520                                             uint32_t token, uint32_t nargs,
521                                             target_ulong args, uint32_t nret,
522                                             target_ulong rets)
523 {
524     sPAPRPHBState *sphb;
525     sPAPRPHBClass *spc;
526     uint64_t buid;
527     int state, ret;
528 
529     if ((nargs != 3) || (nret != 4 && nret != 5)) {
530         goto param_error_exit;
531     }
532 
533     buid = ((uint64_t)rtas_ld(args, 1) << 32) | rtas_ld(args, 2);
534     sphb = spapr_pci_find_phb(spapr, buid);
535     if (!sphb) {
536         goto param_error_exit;
537     }
538 
539     spc = SPAPR_PCI_HOST_BRIDGE_GET_CLASS(sphb);
540     if (!spc->eeh_get_state) {
541         goto param_error_exit;
542     }
543 
544     ret = spc->eeh_get_state(sphb, &state);
545     rtas_st(rets, 0, ret);
546     if (ret != RTAS_OUT_SUCCESS) {
547         return;
548     }
549 
550     rtas_st(rets, 1, state);
551     rtas_st(rets, 2, RTAS_EEH_SUPPORT);
552     rtas_st(rets, 3, RTAS_EEH_PE_UNAVAIL_INFO);
553     if (nret >= 5) {
554         rtas_st(rets, 4, RTAS_EEH_PE_RECOVER_INFO);
555     }
556     return;
557 
558 param_error_exit:
559     rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
560 }
561 
562 static void rtas_ibm_set_slot_reset(PowerPCCPU *cpu,
563                                     sPAPRMachineState *spapr,
564                                     uint32_t token, uint32_t nargs,
565                                     target_ulong args, uint32_t nret,
566                                     target_ulong rets)
567 {
568     sPAPRPHBState *sphb;
569     sPAPRPHBClass *spc;
570     uint32_t option;
571     uint64_t buid;
572     int ret;
573 
574     if ((nargs != 4) || (nret != 1)) {
575         goto param_error_exit;
576     }
577 
578     buid = ((uint64_t)rtas_ld(args, 1) << 32) | rtas_ld(args, 2);
579     option = rtas_ld(args, 3);
580     sphb = spapr_pci_find_phb(spapr, buid);
581     if (!sphb) {
582         goto param_error_exit;
583     }
584 
585     spc = SPAPR_PCI_HOST_BRIDGE_GET_CLASS(sphb);
586     if (!spc->eeh_reset) {
587         goto param_error_exit;
588     }
589 
590     ret = spc->eeh_reset(sphb, option);
591     rtas_st(rets, 0, ret);
592     return;
593 
594 param_error_exit:
595     rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
596 }
597 
598 static void rtas_ibm_configure_pe(PowerPCCPU *cpu,
599                                   sPAPRMachineState *spapr,
600                                   uint32_t token, uint32_t nargs,
601                                   target_ulong args, uint32_t nret,
602                                   target_ulong rets)
603 {
604     sPAPRPHBState *sphb;
605     sPAPRPHBClass *spc;
606     uint64_t buid;
607     int ret;
608 
609     if ((nargs != 3) || (nret != 1)) {
610         goto param_error_exit;
611     }
612 
613     buid = ((uint64_t)rtas_ld(args, 1) << 32) | rtas_ld(args, 2);
614     sphb = spapr_pci_find_phb(spapr, buid);
615     if (!sphb) {
616         goto param_error_exit;
617     }
618 
619     spc = SPAPR_PCI_HOST_BRIDGE_GET_CLASS(sphb);
620     if (!spc->eeh_configure) {
621         goto param_error_exit;
622     }
623 
624     ret = spc->eeh_configure(sphb);
625     rtas_st(rets, 0, ret);
626     return;
627 
628 param_error_exit:
629     rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
630 }
631 
632 /* To support it later */
633 static void rtas_ibm_slot_error_detail(PowerPCCPU *cpu,
634                                        sPAPRMachineState *spapr,
635                                        uint32_t token, uint32_t nargs,
636                                        target_ulong args, uint32_t nret,
637                                        target_ulong rets)
638 {
639     sPAPRPHBState *sphb;
640     sPAPRPHBClass *spc;
641     int option;
642     uint64_t buid;
643 
644     if ((nargs != 8) || (nret != 1)) {
645         goto param_error_exit;
646     }
647 
648     buid = ((uint64_t)rtas_ld(args, 1) << 32) | rtas_ld(args, 2);
649     sphb = spapr_pci_find_phb(spapr, buid);
650     if (!sphb) {
651         goto param_error_exit;
652     }
653 
654     spc = SPAPR_PCI_HOST_BRIDGE_GET_CLASS(sphb);
655     if (!spc->eeh_set_option) {
656         goto param_error_exit;
657     }
658 
659     option = rtas_ld(args, 7);
660     switch (option) {
661     case RTAS_SLOT_TEMP_ERR_LOG:
662     case RTAS_SLOT_PERM_ERR_LOG:
663         break;
664     default:
665         goto param_error_exit;
666     }
667 
668     /* We don't have error log yet */
669     rtas_st(rets, 0, RTAS_OUT_NO_ERRORS_FOUND);
670     return;
671 
672 param_error_exit:
673     rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
674 }
675 
676 static int pci_spapr_swizzle(int slot, int pin)
677 {
678     return (slot + pin) % PCI_NUM_PINS;
679 }
680 
681 static int pci_spapr_map_irq(PCIDevice *pci_dev, int irq_num)
682 {
683     /*
684      * Here we need to convert pci_dev + irq_num to some unique value
685      * which is less than number of IRQs on the specific bus (4).  We
686      * use standard PCI swizzling, that is (slot number + pin number)
687      * % 4.
688      */
689     return pci_spapr_swizzle(PCI_SLOT(pci_dev->devfn), irq_num);
690 }
691 
692 static void pci_spapr_set_irq(void *opaque, int irq_num, int level)
693 {
694     /*
695      * Here we use the number returned by pci_spapr_map_irq to find a
696      * corresponding qemu_irq.
697      */
698     sPAPRPHBState *phb = opaque;
699 
700     trace_spapr_pci_lsi_set(phb->dtbusname, irq_num, phb->lsi_table[irq_num].irq);
701     qemu_set_irq(spapr_phb_lsi_qirq(phb, irq_num), level);
702 }
703 
704 static PCIINTxRoute spapr_route_intx_pin_to_irq(void *opaque, int pin)
705 {
706     sPAPRPHBState *sphb = SPAPR_PCI_HOST_BRIDGE(opaque);
707     PCIINTxRoute route;
708 
709     route.mode = PCI_INTX_ENABLED;
710     route.irq = sphb->lsi_table[pin].irq;
711 
712     return route;
713 }
714 
715 /*
716  * MSI/MSIX memory region implementation.
717  * The handler handles both MSI and MSIX.
718  * For MSI-X, the vector number is encoded as a part of the address,
719  * data is set to 0.
720  * For MSI, the vector number is encoded in least bits in data.
721  */
722 static void spapr_msi_write(void *opaque, hwaddr addr,
723                             uint64_t data, unsigned size)
724 {
725     sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
726     uint32_t irq = data;
727 
728     trace_spapr_pci_msi_write(addr, data, irq);
729 
730     qemu_irq_pulse(xics_get_qirq(spapr->icp, irq));
731 }
732 
733 static const MemoryRegionOps spapr_msi_ops = {
734     /* There is no .read as the read result is undefined by PCI spec */
735     .read = NULL,
736     .write = spapr_msi_write,
737     .endianness = DEVICE_LITTLE_ENDIAN
738 };
739 
740 /*
741  * PHB PCI device
742  */
743 static AddressSpace *spapr_pci_dma_iommu(PCIBus *bus, void *opaque, int devfn)
744 {
745     sPAPRPHBState *phb = opaque;
746 
747     return &phb->iommu_as;
748 }
749 
750 /* Macros to operate with address in OF binding to PCI */
751 #define b_x(x, p, l)    (((x) & ((1<<(l))-1)) << (p))
752 #define b_n(x)          b_x((x), 31, 1) /* 0 if relocatable */
753 #define b_p(x)          b_x((x), 30, 1) /* 1 if prefetchable */
754 #define b_t(x)          b_x((x), 29, 1) /* 1 if the address is aliased */
755 #define b_ss(x)         b_x((x), 24, 2) /* the space code */
756 #define b_bbbbbbbb(x)   b_x((x), 16, 8) /* bus number */
757 #define b_ddddd(x)      b_x((x), 11, 5) /* device number */
758 #define b_fff(x)        b_x((x), 8, 3)  /* function number */
759 #define b_rrrrrrrr(x)   b_x((x), 0, 8)  /* register number */
760 
761 /* for 'reg'/'assigned-addresses' OF properties */
762 #define RESOURCE_CELLS_SIZE 2
763 #define RESOURCE_CELLS_ADDRESS 3
764 
765 typedef struct ResourceFields {
766     uint32_t phys_hi;
767     uint32_t phys_mid;
768     uint32_t phys_lo;
769     uint32_t size_hi;
770     uint32_t size_lo;
771 } QEMU_PACKED ResourceFields;
772 
773 typedef struct ResourceProps {
774     ResourceFields reg[8];
775     ResourceFields assigned[7];
776     uint32_t reg_len;
777     uint32_t assigned_len;
778 } ResourceProps;
779 
780 /* fill in the 'reg'/'assigned-resources' OF properties for
781  * a PCI device. 'reg' describes resource requirements for a
782  * device's IO/MEM regions, 'assigned-addresses' describes the
783  * actual resource assignments.
784  *
785  * the properties are arrays of ('phys-addr', 'size') pairs describing
786  * the addressable regions of the PCI device, where 'phys-addr' is a
787  * RESOURCE_CELLS_ADDRESS-tuple of 32-bit integers corresponding to
788  * (phys.hi, phys.mid, phys.lo), and 'size' is a
789  * RESOURCE_CELLS_SIZE-tuple corresponding to (size.hi, size.lo).
790  *
791  * phys.hi = 0xYYXXXXZZ, where:
792  *   0xYY = npt000ss
793  *          |||   |
794  *          |||   +-- space code
795  *          |||               |
796  *          |||               +  00 if configuration space
797  *          |||               +  01 if IO region,
798  *          |||               +  10 if 32-bit MEM region
799  *          |||               +  11 if 64-bit MEM region
800  *          |||
801  *          ||+------ for non-relocatable IO: 1 if aliased
802  *          ||        for relocatable IO: 1 if below 64KB
803  *          ||        for MEM: 1 if below 1MB
804  *          |+------- 1 if region is prefetchable
805  *          +-------- 1 if region is non-relocatable
806  *   0xXXXX = bbbbbbbb dddddfff, encoding bus, slot, and function
807  *            bits respectively
808  *   0xZZ = rrrrrrrr, the register number of the BAR corresponding
809  *          to the region
810  *
811  * phys.mid and phys.lo correspond respectively to the hi/lo portions
812  * of the actual address of the region.
813  *
814  * how the phys-addr/size values are used differ slightly between
815  * 'reg' and 'assigned-addresses' properties. namely, 'reg' has
816  * an additional description for the config space region of the
817  * device, and in the case of QEMU has n=0 and phys.mid=phys.lo=0
818  * to describe the region as relocatable, with an address-mapping
819  * that corresponds directly to the PHB's address space for the
820  * resource. 'assigned-addresses' always has n=1 set with an absolute
821  * address assigned for the resource. in general, 'assigned-addresses'
822  * won't be populated, since addresses for PCI devices are generally
823  * unmapped initially and left to the guest to assign.
824  *
825  * note also that addresses defined in these properties are, at least
826  * for PAPR guests, relative to the PHBs IO/MEM windows, and
827  * correspond directly to the addresses in the BARs.
828  *
829  * in accordance with PCI Bus Binding to Open Firmware,
830  * IEEE Std 1275-1994, section 4.1.1, as implemented by PAPR+ v2.7,
831  * Appendix C.
832  */
833 static void populate_resource_props(PCIDevice *d, ResourceProps *rp)
834 {
835     int bus_num = pci_bus_num(PCI_BUS(qdev_get_parent_bus(DEVICE(d))));
836     uint32_t dev_id = (b_bbbbbbbb(bus_num) |
837                        b_ddddd(PCI_SLOT(d->devfn)) |
838                        b_fff(PCI_FUNC(d->devfn)));
839     ResourceFields *reg, *assigned;
840     int i, reg_idx = 0, assigned_idx = 0;
841 
842     /* config space region */
843     reg = &rp->reg[reg_idx++];
844     reg->phys_hi = cpu_to_be32(dev_id);
845     reg->phys_mid = 0;
846     reg->phys_lo = 0;
847     reg->size_hi = 0;
848     reg->size_lo = 0;
849 
850     for (i = 0; i < PCI_NUM_REGIONS; i++) {
851         if (!d->io_regions[i].size) {
852             continue;
853         }
854 
855         reg = &rp->reg[reg_idx++];
856 
857         reg->phys_hi = cpu_to_be32(dev_id | b_rrrrrrrr(pci_bar(d, i)));
858         if (d->io_regions[i].type & PCI_BASE_ADDRESS_SPACE_IO) {
859             reg->phys_hi |= cpu_to_be32(b_ss(1));
860         } else if (d->io_regions[i].type & PCI_BASE_ADDRESS_MEM_TYPE_64) {
861             reg->phys_hi |= cpu_to_be32(b_ss(3));
862         } else {
863             reg->phys_hi |= cpu_to_be32(b_ss(2));
864         }
865         reg->phys_mid = 0;
866         reg->phys_lo = 0;
867         reg->size_hi = cpu_to_be32(d->io_regions[i].size >> 32);
868         reg->size_lo = cpu_to_be32(d->io_regions[i].size);
869 
870         if (d->io_regions[i].addr == PCI_BAR_UNMAPPED) {
871             continue;
872         }
873 
874         assigned = &rp->assigned[assigned_idx++];
875         assigned->phys_hi = cpu_to_be32(reg->phys_hi | b_n(1));
876         assigned->phys_mid = cpu_to_be32(d->io_regions[i].addr >> 32);
877         assigned->phys_lo = cpu_to_be32(d->io_regions[i].addr);
878         assigned->size_hi = reg->size_hi;
879         assigned->size_lo = reg->size_lo;
880     }
881 
882     rp->reg_len = reg_idx * sizeof(ResourceFields);
883     rp->assigned_len = assigned_idx * sizeof(ResourceFields);
884 }
885 
886 static int spapr_populate_pci_child_dt(PCIDevice *dev, void *fdt, int offset,
887                                        int phb_index, int drc_index,
888                                        const char *drc_name)
889 {
890     ResourceProps rp;
891     bool is_bridge = false;
892     int pci_status;
893 
894     if (pci_default_read_config(dev, PCI_HEADER_TYPE, 1) ==
895         PCI_HEADER_TYPE_BRIDGE) {
896         is_bridge = true;
897     }
898 
899     /* in accordance with PAPR+ v2.7 13.6.3, Table 181 */
900     _FDT(fdt_setprop_cell(fdt, offset, "vendor-id",
901                           pci_default_read_config(dev, PCI_VENDOR_ID, 2)));
902     _FDT(fdt_setprop_cell(fdt, offset, "device-id",
903                           pci_default_read_config(dev, PCI_DEVICE_ID, 2)));
904     _FDT(fdt_setprop_cell(fdt, offset, "revision-id",
905                           pci_default_read_config(dev, PCI_REVISION_ID, 1)));
906     _FDT(fdt_setprop_cell(fdt, offset, "class-code",
907                           pci_default_read_config(dev, PCI_CLASS_PROG, 3)));
908     if (pci_default_read_config(dev, PCI_INTERRUPT_PIN, 1)) {
909         _FDT(fdt_setprop_cell(fdt, offset, "interrupts",
910                  pci_default_read_config(dev, PCI_INTERRUPT_PIN, 1)));
911     }
912 
913     if (!is_bridge) {
914         _FDT(fdt_setprop_cell(fdt, offset, "min-grant",
915             pci_default_read_config(dev, PCI_MIN_GNT, 1)));
916         _FDT(fdt_setprop_cell(fdt, offset, "max-latency",
917             pci_default_read_config(dev, PCI_MAX_LAT, 1)));
918     }
919 
920     if (pci_default_read_config(dev, PCI_SUBSYSTEM_ID, 2)) {
921         _FDT(fdt_setprop_cell(fdt, offset, "subsystem-id",
922                  pci_default_read_config(dev, PCI_SUBSYSTEM_ID, 2)));
923     }
924 
925     if (pci_default_read_config(dev, PCI_SUBSYSTEM_VENDOR_ID, 2)) {
926         _FDT(fdt_setprop_cell(fdt, offset, "subsystem-vendor-id",
927                  pci_default_read_config(dev, PCI_SUBSYSTEM_VENDOR_ID, 2)));
928     }
929 
930     _FDT(fdt_setprop_cell(fdt, offset, "cache-line-size",
931         pci_default_read_config(dev, PCI_CACHE_LINE_SIZE, 1)));
932 
933     /* the following fdt cells are masked off the pci status register */
934     pci_status = pci_default_read_config(dev, PCI_STATUS, 2);
935     _FDT(fdt_setprop_cell(fdt, offset, "devsel-speed",
936                           PCI_STATUS_DEVSEL_MASK & pci_status));
937 
938     if (pci_status & PCI_STATUS_FAST_BACK) {
939         _FDT(fdt_setprop(fdt, offset, "fast-back-to-back", NULL, 0));
940     }
941     if (pci_status & PCI_STATUS_66MHZ) {
942         _FDT(fdt_setprop(fdt, offset, "66mhz-capable", NULL, 0));
943     }
944     if (pci_status & PCI_STATUS_UDF) {
945         _FDT(fdt_setprop(fdt, offset, "udf-supported", NULL, 0));
946     }
947 
948     /* NOTE: this is normally generated by firmware via path/unit name,
949      * but in our case we must set it manually since it does not get
950      * processed by OF beforehand
951      */
952     _FDT(fdt_setprop_string(fdt, offset, "name", "pci"));
953     _FDT(fdt_setprop(fdt, offset, "ibm,loc-code", drc_name, strlen(drc_name)));
954     _FDT(fdt_setprop_cell(fdt, offset, "ibm,my-drc-index", drc_index));
955 
956     _FDT(fdt_setprop_cell(fdt, offset, "#address-cells",
957                           RESOURCE_CELLS_ADDRESS));
958     _FDT(fdt_setprop_cell(fdt, offset, "#size-cells",
959                           RESOURCE_CELLS_SIZE));
960     _FDT(fdt_setprop_cell(fdt, offset, "ibm,req#msi-x",
961                           RESOURCE_CELLS_SIZE));
962 
963     populate_resource_props(dev, &rp);
964     _FDT(fdt_setprop(fdt, offset, "reg", (uint8_t *)rp.reg, rp.reg_len));
965     _FDT(fdt_setprop(fdt, offset, "assigned-addresses",
966                      (uint8_t *)rp.assigned, rp.assigned_len));
967 
968     return 0;
969 }
970 
971 static uint32_t spapr_phb_get_pci_drc_index(sPAPRPHBState *phb,
972                                             PCIDevice *pdev);
973 
974 /* create OF node for pci device and required OF DT properties */
975 static int spapr_create_pci_child_dt(sPAPRPHBState *phb, PCIDevice *dev,
976                                      int drc_index, const char *drc_name,
977                                      void *fdt, int node_offset)
978 {
979     int offset, ret;
980     int slot = PCI_SLOT(dev->devfn);
981     int func = PCI_FUNC(dev->devfn);
982     char nodename[FDT_NAME_MAX];
983 
984     if (func != 0) {
985         snprintf(nodename, FDT_NAME_MAX, "pci@%x,%x", slot, func);
986     } else {
987         snprintf(nodename, FDT_NAME_MAX, "pci@%x", slot);
988     }
989     offset = fdt_add_subnode(fdt, node_offset, nodename);
990     ret = spapr_populate_pci_child_dt(dev, fdt, offset, phb->index, drc_index,
991                                       drc_name);
992     g_assert(!ret);
993     if (ret) {
994         return 0;
995     }
996     return offset;
997 }
998 
999 static void spapr_phb_add_pci_device(sPAPRDRConnector *drc,
1000                                      sPAPRPHBState *phb,
1001                                      PCIDevice *pdev,
1002                                      Error **errp)
1003 {
1004     sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
1005     DeviceState *dev = DEVICE(pdev);
1006     int drc_index = drck->get_index(drc);
1007     const char *drc_name = drck->get_name(drc);
1008     void *fdt = NULL;
1009     int fdt_start_offset = 0, fdt_size;
1010 
1011     if (dev->hotplugged) {
1012         fdt = create_device_tree(&fdt_size);
1013         fdt_start_offset = spapr_create_pci_child_dt(phb, pdev,
1014                                                      drc_index, drc_name,
1015                                                      fdt, 0);
1016         if (!fdt_start_offset) {
1017             error_setg(errp, "Failed to create pci child device tree node");
1018             goto out;
1019         }
1020     }
1021 
1022     drck->attach(drc, DEVICE(pdev),
1023                  fdt, fdt_start_offset, !dev->hotplugged, errp);
1024 out:
1025     if (*errp) {
1026         g_free(fdt);
1027     }
1028 }
1029 
1030 static void spapr_phb_remove_pci_device_cb(DeviceState *dev, void *opaque)
1031 {
1032     /* some version guests do not wait for completion of a device
1033      * cleanup (generally done asynchronously by the kernel) before
1034      * signaling to QEMU that the device is safe, but instead sleep
1035      * for some 'safe' period of time. unfortunately on a busy host
1036      * this sleep isn't guaranteed to be long enough, resulting in
1037      * bad things like IRQ lines being left asserted during final
1038      * device removal. to deal with this we call reset just prior
1039      * to finalizing the device, which will put the device back into
1040      * an 'idle' state, as the device cleanup code expects.
1041      */
1042     pci_device_reset(PCI_DEVICE(dev));
1043     object_unparent(OBJECT(dev));
1044 }
1045 
1046 static void spapr_phb_remove_pci_device(sPAPRDRConnector *drc,
1047                                         sPAPRPHBState *phb,
1048                                         PCIDevice *pdev,
1049                                         Error **errp)
1050 {
1051     sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
1052 
1053     drck->detach(drc, DEVICE(pdev), spapr_phb_remove_pci_device_cb, phb, errp);
1054 }
1055 
1056 static sPAPRDRConnector *spapr_phb_get_pci_drc(sPAPRPHBState *phb,
1057                                                PCIDevice *pdev)
1058 {
1059     uint32_t busnr = pci_bus_num(PCI_BUS(qdev_get_parent_bus(DEVICE(pdev))));
1060     return spapr_dr_connector_by_id(SPAPR_DR_CONNECTOR_TYPE_PCI,
1061                                     (phb->index << 16) |
1062                                     (busnr << 8) |
1063                                     pdev->devfn);
1064 }
1065 
1066 static uint32_t spapr_phb_get_pci_drc_index(sPAPRPHBState *phb,
1067                                             PCIDevice *pdev)
1068 {
1069     sPAPRDRConnector *drc = spapr_phb_get_pci_drc(phb, pdev);
1070     sPAPRDRConnectorClass *drck;
1071 
1072     if (!drc) {
1073         return 0;
1074     }
1075 
1076     drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
1077     return drck->get_index(drc);
1078 }
1079 
1080 static void spapr_phb_hot_plug_child(HotplugHandler *plug_handler,
1081                                      DeviceState *plugged_dev, Error **errp)
1082 {
1083     sPAPRPHBState *phb = SPAPR_PCI_HOST_BRIDGE(DEVICE(plug_handler));
1084     PCIDevice *pdev = PCI_DEVICE(plugged_dev);
1085     sPAPRDRConnector *drc = spapr_phb_get_pci_drc(phb, pdev);
1086     Error *local_err = NULL;
1087 
1088     /* if DR is disabled we don't need to do anything in the case of
1089      * hotplug or coldplug callbacks
1090      */
1091     if (!phb->dr_enabled) {
1092         /* if this is a hotplug operation initiated by the user
1093          * we need to let them know it's not enabled
1094          */
1095         if (plugged_dev->hotplugged) {
1096             error_setg(errp, QERR_BUS_NO_HOTPLUG,
1097                        object_get_typename(OBJECT(phb)));
1098         }
1099         return;
1100     }
1101 
1102     g_assert(drc);
1103 
1104     spapr_phb_add_pci_device(drc, phb, pdev, &local_err);
1105     if (local_err) {
1106         error_propagate(errp, local_err);
1107         return;
1108     }
1109     if (plugged_dev->hotplugged) {
1110         spapr_hotplug_req_add_event(drc);
1111     }
1112 }
1113 
1114 static void spapr_phb_hot_unplug_child(HotplugHandler *plug_handler,
1115                                        DeviceState *plugged_dev, Error **errp)
1116 {
1117     sPAPRPHBState *phb = SPAPR_PCI_HOST_BRIDGE(DEVICE(plug_handler));
1118     PCIDevice *pdev = PCI_DEVICE(plugged_dev);
1119     sPAPRDRConnectorClass *drck;
1120     sPAPRDRConnector *drc = spapr_phb_get_pci_drc(phb, pdev);
1121     Error *local_err = NULL;
1122 
1123     if (!phb->dr_enabled) {
1124         error_setg(errp, QERR_BUS_NO_HOTPLUG,
1125                    object_get_typename(OBJECT(phb)));
1126         return;
1127     }
1128 
1129     g_assert(drc);
1130 
1131     drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
1132     if (!drck->release_pending(drc)) {
1133         spapr_phb_remove_pci_device(drc, phb, pdev, &local_err);
1134         if (local_err) {
1135             error_propagate(errp, local_err);
1136             return;
1137         }
1138         spapr_hotplug_req_remove_event(drc);
1139     }
1140 }
1141 
1142 static void spapr_phb_realize(DeviceState *dev, Error **errp)
1143 {
1144     sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
1145     SysBusDevice *s = SYS_BUS_DEVICE(dev);
1146     sPAPRPHBState *sphb = SPAPR_PCI_HOST_BRIDGE(s);
1147     PCIHostState *phb = PCI_HOST_BRIDGE(s);
1148     sPAPRPHBClass *info = SPAPR_PCI_HOST_BRIDGE_GET_CLASS(s);
1149     char *namebuf;
1150     int i;
1151     PCIBus *bus;
1152     uint64_t msi_window_size = 4096;
1153 
1154     if (sphb->index != (uint32_t)-1) {
1155         hwaddr windows_base;
1156 
1157         if ((sphb->buid != (uint64_t)-1) || (sphb->dma_liobn != (uint32_t)-1)
1158             || (sphb->mem_win_addr != (hwaddr)-1)
1159             || (sphb->io_win_addr != (hwaddr)-1)) {
1160             error_setg(errp, "Either \"index\" or other parameters must"
1161                        " be specified for PAPR PHB, not both");
1162             return;
1163         }
1164 
1165         if (sphb->index > SPAPR_PCI_MAX_INDEX) {
1166             error_setg(errp, "\"index\" for PAPR PHB is too large (max %u)",
1167                        SPAPR_PCI_MAX_INDEX);
1168             return;
1169         }
1170 
1171         sphb->buid = SPAPR_PCI_BASE_BUID + sphb->index;
1172         sphb->dma_liobn = SPAPR_PCI_LIOBN(sphb->index, 0);
1173 
1174         windows_base = SPAPR_PCI_WINDOW_BASE
1175             + sphb->index * SPAPR_PCI_WINDOW_SPACING;
1176         sphb->mem_win_addr = windows_base + SPAPR_PCI_MMIO_WIN_OFF;
1177         sphb->io_win_addr = windows_base + SPAPR_PCI_IO_WIN_OFF;
1178     }
1179 
1180     if (sphb->buid == (uint64_t)-1) {
1181         error_setg(errp, "BUID not specified for PHB");
1182         return;
1183     }
1184 
1185     if (sphb->dma_liobn == (uint32_t)-1) {
1186         error_setg(errp, "LIOBN not specified for PHB");
1187         return;
1188     }
1189 
1190     if (sphb->mem_win_addr == (hwaddr)-1) {
1191         error_setg(errp, "Memory window address not specified for PHB");
1192         return;
1193     }
1194 
1195     if (sphb->io_win_addr == (hwaddr)-1) {
1196         error_setg(errp, "IO window address not specified for PHB");
1197         return;
1198     }
1199 
1200     if (spapr_pci_find_phb(spapr, sphb->buid)) {
1201         error_setg(errp, "PCI host bridges must have unique BUIDs");
1202         return;
1203     }
1204 
1205     sphb->dtbusname = g_strdup_printf("pci@%" PRIx64, sphb->buid);
1206 
1207     namebuf = alloca(strlen(sphb->dtbusname) + 32);
1208 
1209     /* Initialize memory regions */
1210     sprintf(namebuf, "%s.mmio", sphb->dtbusname);
1211     memory_region_init(&sphb->memspace, OBJECT(sphb), namebuf, UINT64_MAX);
1212 
1213     sprintf(namebuf, "%s.mmio-alias", sphb->dtbusname);
1214     memory_region_init_alias(&sphb->memwindow, OBJECT(sphb),
1215                              namebuf, &sphb->memspace,
1216                              SPAPR_PCI_MEM_WIN_BUS_OFFSET, sphb->mem_win_size);
1217     memory_region_add_subregion(get_system_memory(), sphb->mem_win_addr,
1218                                 &sphb->memwindow);
1219 
1220     /* Initialize IO regions */
1221     sprintf(namebuf, "%s.io", sphb->dtbusname);
1222     memory_region_init(&sphb->iospace, OBJECT(sphb),
1223                        namebuf, SPAPR_PCI_IO_WIN_SIZE);
1224 
1225     sprintf(namebuf, "%s.io-alias", sphb->dtbusname);
1226     memory_region_init_alias(&sphb->iowindow, OBJECT(sphb), namebuf,
1227                              &sphb->iospace, 0, SPAPR_PCI_IO_WIN_SIZE);
1228     memory_region_add_subregion(get_system_memory(), sphb->io_win_addr,
1229                                 &sphb->iowindow);
1230 
1231     bus = pci_register_bus(dev, NULL,
1232                            pci_spapr_set_irq, pci_spapr_map_irq, sphb,
1233                            &sphb->memspace, &sphb->iospace,
1234                            PCI_DEVFN(0, 0), PCI_NUM_PINS, TYPE_PCI_BUS);
1235     phb->bus = bus;
1236     qbus_set_hotplug_handler(BUS(phb->bus), DEVICE(sphb), NULL);
1237 
1238     /*
1239      * Initialize PHB address space.
1240      * By default there will be at least one subregion for default
1241      * 32bit DMA window.
1242      * Later the guest might want to create another DMA window
1243      * which will become another memory subregion.
1244      */
1245     sprintf(namebuf, "%s.iommu-root", sphb->dtbusname);
1246 
1247     memory_region_init(&sphb->iommu_root, OBJECT(sphb),
1248                        namebuf, UINT64_MAX);
1249     address_space_init(&sphb->iommu_as, &sphb->iommu_root,
1250                        sphb->dtbusname);
1251 
1252     /*
1253      * As MSI/MSIX interrupts trigger by writing at MSI/MSIX vectors,
1254      * we need to allocate some memory to catch those writes coming
1255      * from msi_notify()/msix_notify().
1256      * As MSIMessage:addr is going to be the same and MSIMessage:data
1257      * is going to be a VIRQ number, 4 bytes of the MSI MR will only
1258      * be used.
1259      *
1260      * For KVM we want to ensure that this memory is a full page so that
1261      * our memory slot is of page size granularity.
1262      */
1263 #ifdef CONFIG_KVM
1264     if (kvm_enabled()) {
1265         msi_window_size = getpagesize();
1266     }
1267 #endif
1268 
1269     memory_region_init_io(&sphb->msiwindow, NULL, &spapr_msi_ops, spapr,
1270                           "msi", msi_window_size);
1271     memory_region_add_subregion(&sphb->iommu_root, SPAPR_PCI_MSI_WINDOW,
1272                                 &sphb->msiwindow);
1273 
1274     pci_setup_iommu(bus, spapr_pci_dma_iommu, sphb);
1275 
1276     pci_bus_set_route_irq_fn(bus, spapr_route_intx_pin_to_irq);
1277 
1278     QLIST_INSERT_HEAD(&spapr->phbs, sphb, list);
1279 
1280     /* Initialize the LSI table */
1281     for (i = 0; i < PCI_NUM_PINS; i++) {
1282         uint32_t irq;
1283 
1284         irq = xics_alloc_block(spapr->icp, 0, 1, true, false);
1285         if (!irq) {
1286             error_setg(errp, "spapr_allocate_lsi failed");
1287             return;
1288         }
1289 
1290         sphb->lsi_table[i].irq = irq;
1291     }
1292 
1293     /* allocate connectors for child PCI devices */
1294     if (sphb->dr_enabled) {
1295         for (i = 0; i < PCI_SLOT_MAX * 8; i++) {
1296             spapr_dr_connector_new(OBJECT(phb),
1297                                    SPAPR_DR_CONNECTOR_TYPE_PCI,
1298                                    (sphb->index << 16) | i);
1299         }
1300     }
1301 
1302     if (!info->finish_realize) {
1303         error_setg(errp, "finish_realize not defined");
1304         return;
1305     }
1306 
1307     info->finish_realize(sphb, errp);
1308 
1309     sphb->msi = g_hash_table_new_full(g_int_hash, g_int_equal, g_free, g_free);
1310 }
1311 
1312 static void spapr_phb_finish_realize(sPAPRPHBState *sphb, Error **errp)
1313 {
1314     sPAPRTCETable *tcet;
1315     uint32_t nb_table;
1316 
1317     nb_table = SPAPR_PCI_DMA32_SIZE >> SPAPR_TCE_PAGE_SHIFT;
1318     tcet = spapr_tce_new_table(DEVICE(sphb), sphb->dma_liobn,
1319                                0, SPAPR_TCE_PAGE_SHIFT, nb_table, false);
1320     if (!tcet) {
1321         error_setg(errp, "Unable to create TCE table for %s",
1322                    sphb->dtbusname);
1323         return ;
1324     }
1325 
1326     /* Register default 32bit DMA window */
1327     memory_region_add_subregion(&sphb->iommu_root, 0,
1328                                 spapr_tce_get_iommu(tcet));
1329 }
1330 
1331 static int spapr_phb_children_reset(Object *child, void *opaque)
1332 {
1333     DeviceState *dev = (DeviceState *) object_dynamic_cast(child, TYPE_DEVICE);
1334 
1335     if (dev) {
1336         device_reset(dev);
1337     }
1338 
1339     return 0;
1340 }
1341 
1342 static void spapr_phb_reset(DeviceState *qdev)
1343 {
1344     /* Reset the IOMMU state */
1345     object_child_foreach(OBJECT(qdev), spapr_phb_children_reset, NULL);
1346 }
1347 
1348 static Property spapr_phb_properties[] = {
1349     DEFINE_PROP_UINT32("index", sPAPRPHBState, index, -1),
1350     DEFINE_PROP_UINT64("buid", sPAPRPHBState, buid, -1),
1351     DEFINE_PROP_UINT32("liobn", sPAPRPHBState, dma_liobn, -1),
1352     DEFINE_PROP_UINT64("mem_win_addr", sPAPRPHBState, mem_win_addr, -1),
1353     DEFINE_PROP_UINT64("mem_win_size", sPAPRPHBState, mem_win_size,
1354                        SPAPR_PCI_MMIO_WIN_SIZE),
1355     DEFINE_PROP_UINT64("io_win_addr", sPAPRPHBState, io_win_addr, -1),
1356     DEFINE_PROP_UINT64("io_win_size", sPAPRPHBState, io_win_size,
1357                        SPAPR_PCI_IO_WIN_SIZE),
1358     DEFINE_PROP_BOOL("dynamic-reconfiguration", sPAPRPHBState, dr_enabled,
1359                      true),
1360     DEFINE_PROP_END_OF_LIST(),
1361 };
1362 
1363 static const VMStateDescription vmstate_spapr_pci_lsi = {
1364     .name = "spapr_pci/lsi",
1365     .version_id = 1,
1366     .minimum_version_id = 1,
1367     .fields = (VMStateField[]) {
1368         VMSTATE_UINT32_EQUAL(irq, struct spapr_pci_lsi),
1369 
1370         VMSTATE_END_OF_LIST()
1371     },
1372 };
1373 
1374 static const VMStateDescription vmstate_spapr_pci_msi = {
1375     .name = "spapr_pci/msi",
1376     .version_id = 1,
1377     .minimum_version_id = 1,
1378     .fields = (VMStateField []) {
1379         VMSTATE_UINT32(key, spapr_pci_msi_mig),
1380         VMSTATE_UINT32(value.first_irq, spapr_pci_msi_mig),
1381         VMSTATE_UINT32(value.num, spapr_pci_msi_mig),
1382         VMSTATE_END_OF_LIST()
1383     },
1384 };
1385 
1386 static void spapr_pci_pre_save(void *opaque)
1387 {
1388     sPAPRPHBState *sphb = opaque;
1389     GHashTableIter iter;
1390     gpointer key, value;
1391     int i;
1392 
1393     if (sphb->msi_devs) {
1394         g_free(sphb->msi_devs);
1395         sphb->msi_devs = NULL;
1396     }
1397     sphb->msi_devs_num = g_hash_table_size(sphb->msi);
1398     if (!sphb->msi_devs_num) {
1399         return;
1400     }
1401     sphb->msi_devs = g_malloc(sphb->msi_devs_num * sizeof(spapr_pci_msi_mig));
1402 
1403     g_hash_table_iter_init(&iter, sphb->msi);
1404     for (i = 0; g_hash_table_iter_next(&iter, &key, &value); ++i) {
1405         sphb->msi_devs[i].key = *(uint32_t *) key;
1406         sphb->msi_devs[i].value = *(spapr_pci_msi *) value;
1407     }
1408 }
1409 
1410 static int spapr_pci_post_load(void *opaque, int version_id)
1411 {
1412     sPAPRPHBState *sphb = opaque;
1413     gpointer key, value;
1414     int i;
1415 
1416     for (i = 0; i < sphb->msi_devs_num; ++i) {
1417         key = g_memdup(&sphb->msi_devs[i].key,
1418                        sizeof(sphb->msi_devs[i].key));
1419         value = g_memdup(&sphb->msi_devs[i].value,
1420                          sizeof(sphb->msi_devs[i].value));
1421         g_hash_table_insert(sphb->msi, key, value);
1422     }
1423     if (sphb->msi_devs) {
1424         g_free(sphb->msi_devs);
1425         sphb->msi_devs = NULL;
1426     }
1427     sphb->msi_devs_num = 0;
1428 
1429     return 0;
1430 }
1431 
1432 static const VMStateDescription vmstate_spapr_pci = {
1433     .name = "spapr_pci",
1434     .version_id = 2,
1435     .minimum_version_id = 2,
1436     .pre_save = spapr_pci_pre_save,
1437     .post_load = spapr_pci_post_load,
1438     .fields = (VMStateField[]) {
1439         VMSTATE_UINT64_EQUAL(buid, sPAPRPHBState),
1440         VMSTATE_UINT32_EQUAL(dma_liobn, sPAPRPHBState),
1441         VMSTATE_UINT64_EQUAL(mem_win_addr, sPAPRPHBState),
1442         VMSTATE_UINT64_EQUAL(mem_win_size, sPAPRPHBState),
1443         VMSTATE_UINT64_EQUAL(io_win_addr, sPAPRPHBState),
1444         VMSTATE_UINT64_EQUAL(io_win_size, sPAPRPHBState),
1445         VMSTATE_STRUCT_ARRAY(lsi_table, sPAPRPHBState, PCI_NUM_PINS, 0,
1446                              vmstate_spapr_pci_lsi, struct spapr_pci_lsi),
1447         VMSTATE_INT32(msi_devs_num, sPAPRPHBState),
1448         VMSTATE_STRUCT_VARRAY_ALLOC(msi_devs, sPAPRPHBState, msi_devs_num, 0,
1449                                     vmstate_spapr_pci_msi, spapr_pci_msi_mig),
1450         VMSTATE_END_OF_LIST()
1451     },
1452 };
1453 
1454 static const char *spapr_phb_root_bus_path(PCIHostState *host_bridge,
1455                                            PCIBus *rootbus)
1456 {
1457     sPAPRPHBState *sphb = SPAPR_PCI_HOST_BRIDGE(host_bridge);
1458 
1459     return sphb->dtbusname;
1460 }
1461 
1462 static void spapr_phb_class_init(ObjectClass *klass, void *data)
1463 {
1464     PCIHostBridgeClass *hc = PCI_HOST_BRIDGE_CLASS(klass);
1465     DeviceClass *dc = DEVICE_CLASS(klass);
1466     sPAPRPHBClass *spc = SPAPR_PCI_HOST_BRIDGE_CLASS(klass);
1467     HotplugHandlerClass *hp = HOTPLUG_HANDLER_CLASS(klass);
1468 
1469     hc->root_bus_path = spapr_phb_root_bus_path;
1470     dc->realize = spapr_phb_realize;
1471     dc->props = spapr_phb_properties;
1472     dc->reset = spapr_phb_reset;
1473     dc->vmsd = &vmstate_spapr_pci;
1474     set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
1475     dc->cannot_instantiate_with_device_add_yet = false;
1476     spc->finish_realize = spapr_phb_finish_realize;
1477     hp->plug = spapr_phb_hot_plug_child;
1478     hp->unplug = spapr_phb_hot_unplug_child;
1479 }
1480 
1481 static const TypeInfo spapr_phb_info = {
1482     .name          = TYPE_SPAPR_PCI_HOST_BRIDGE,
1483     .parent        = TYPE_PCI_HOST_BRIDGE,
1484     .instance_size = sizeof(sPAPRPHBState),
1485     .class_init    = spapr_phb_class_init,
1486     .class_size    = sizeof(sPAPRPHBClass),
1487     .interfaces    = (InterfaceInfo[]) {
1488         { TYPE_HOTPLUG_HANDLER },
1489         { }
1490     }
1491 };
1492 
1493 PCIHostState *spapr_create_phb(sPAPRMachineState *spapr, int index)
1494 {
1495     DeviceState *dev;
1496 
1497     dev = qdev_create(NULL, TYPE_SPAPR_PCI_HOST_BRIDGE);
1498     qdev_prop_set_uint32(dev, "index", index);
1499     qdev_init_nofail(dev);
1500 
1501     return PCI_HOST_BRIDGE(dev);
1502 }
1503 
1504 typedef struct sPAPRFDT {
1505     void *fdt;
1506     int node_off;
1507     sPAPRPHBState *sphb;
1508 } sPAPRFDT;
1509 
1510 static void spapr_populate_pci_devices_dt(PCIBus *bus, PCIDevice *pdev,
1511                                           void *opaque)
1512 {
1513     PCIBus *sec_bus;
1514     sPAPRFDT *p = opaque;
1515     int offset;
1516     sPAPRFDT s_fdt;
1517     uint32_t drc_index = spapr_phb_get_pci_drc_index(p->sphb, pdev);
1518 
1519     offset = spapr_create_pci_child_dt(p->sphb, pdev,
1520                                        drc_index, NULL,
1521                                        p->fdt, p->node_off);
1522     if (!offset) {
1523         error_report("Failed to create pci child device tree node");
1524         return;
1525     }
1526 
1527     if ((pci_default_read_config(pdev, PCI_HEADER_TYPE, 1) !=
1528          PCI_HEADER_TYPE_BRIDGE)) {
1529         return;
1530     }
1531 
1532     sec_bus = pci_bridge_get_sec_bus(PCI_BRIDGE(pdev));
1533     if (!sec_bus) {
1534         return;
1535     }
1536 
1537     s_fdt.fdt = p->fdt;
1538     s_fdt.node_off = offset;
1539     s_fdt.sphb = p->sphb;
1540     pci_for_each_device(sec_bus, pci_bus_num(sec_bus),
1541                         spapr_populate_pci_devices_dt,
1542                         &s_fdt);
1543 }
1544 
1545 static void spapr_phb_pci_enumerate_bridge(PCIBus *bus, PCIDevice *pdev,
1546                                            void *opaque)
1547 {
1548     unsigned int *bus_no = opaque;
1549     unsigned int primary = *bus_no;
1550     unsigned int subordinate = 0xff;
1551     PCIBus *sec_bus = NULL;
1552 
1553     if ((pci_default_read_config(pdev, PCI_HEADER_TYPE, 1) !=
1554          PCI_HEADER_TYPE_BRIDGE)) {
1555         return;
1556     }
1557 
1558     (*bus_no)++;
1559     pci_default_write_config(pdev, PCI_PRIMARY_BUS, primary, 1);
1560     pci_default_write_config(pdev, PCI_SECONDARY_BUS, *bus_no, 1);
1561     pci_default_write_config(pdev, PCI_SUBORDINATE_BUS, *bus_no, 1);
1562 
1563     sec_bus = pci_bridge_get_sec_bus(PCI_BRIDGE(pdev));
1564     if (!sec_bus) {
1565         return;
1566     }
1567 
1568     pci_default_write_config(pdev, PCI_SUBORDINATE_BUS, subordinate, 1);
1569     pci_for_each_device(sec_bus, pci_bus_num(sec_bus),
1570                         spapr_phb_pci_enumerate_bridge, bus_no);
1571     pci_default_write_config(pdev, PCI_SUBORDINATE_BUS, *bus_no, 1);
1572 }
1573 
1574 static void spapr_phb_pci_enumerate(sPAPRPHBState *phb)
1575 {
1576     PCIBus *bus = PCI_HOST_BRIDGE(phb)->bus;
1577     unsigned int bus_no = 0;
1578 
1579     pci_for_each_device(bus, pci_bus_num(bus),
1580                         spapr_phb_pci_enumerate_bridge,
1581                         &bus_no);
1582 
1583 }
1584 
1585 int spapr_populate_pci_dt(sPAPRPHBState *phb,
1586                           uint32_t xics_phandle,
1587                           void *fdt)
1588 {
1589     int bus_off, i, j, ret;
1590     char nodename[FDT_NAME_MAX];
1591     uint32_t bus_range[] = { cpu_to_be32(0), cpu_to_be32(0xff) };
1592     const uint64_t mmiosize = memory_region_size(&phb->memwindow);
1593     const uint64_t w32max = (1ULL << 32) - SPAPR_PCI_MEM_WIN_BUS_OFFSET;
1594     const uint64_t w32size = MIN(w32max, mmiosize);
1595     const uint64_t w64size = (mmiosize > w32size) ? (mmiosize - w32size) : 0;
1596     struct {
1597         uint32_t hi;
1598         uint64_t child;
1599         uint64_t parent;
1600         uint64_t size;
1601     } QEMU_PACKED ranges[] = {
1602         {
1603             cpu_to_be32(b_ss(1)), cpu_to_be64(0),
1604             cpu_to_be64(phb->io_win_addr),
1605             cpu_to_be64(memory_region_size(&phb->iospace)),
1606         },
1607         {
1608             cpu_to_be32(b_ss(2)), cpu_to_be64(SPAPR_PCI_MEM_WIN_BUS_OFFSET),
1609             cpu_to_be64(phb->mem_win_addr),
1610             cpu_to_be64(w32size),
1611         },
1612         {
1613             cpu_to_be32(b_ss(3)), cpu_to_be64(1ULL << 32),
1614             cpu_to_be64(phb->mem_win_addr + w32size),
1615             cpu_to_be64(w64size)
1616         },
1617     };
1618     const unsigned sizeof_ranges = (w64size ? 3 : 2) * sizeof(ranges[0]);
1619     uint64_t bus_reg[] = { cpu_to_be64(phb->buid), 0 };
1620     uint32_t interrupt_map_mask[] = {
1621         cpu_to_be32(b_ddddd(-1)|b_fff(0)), 0x0, 0x0, cpu_to_be32(-1)};
1622     uint32_t interrupt_map[PCI_SLOT_MAX * PCI_NUM_PINS][7];
1623     sPAPRTCETable *tcet;
1624     PCIBus *bus = PCI_HOST_BRIDGE(phb)->bus;
1625     sPAPRFDT s_fdt;
1626 
1627     /* Start populating the FDT */
1628     snprintf(nodename, FDT_NAME_MAX, "pci@%" PRIx64, phb->buid);
1629     bus_off = fdt_add_subnode(fdt, 0, nodename);
1630     if (bus_off < 0) {
1631         return bus_off;
1632     }
1633 
1634     /* Write PHB properties */
1635     _FDT(fdt_setprop_string(fdt, bus_off, "device_type", "pci"));
1636     _FDT(fdt_setprop_string(fdt, bus_off, "compatible", "IBM,Logical_PHB"));
1637     _FDT(fdt_setprop_cell(fdt, bus_off, "#address-cells", 0x3));
1638     _FDT(fdt_setprop_cell(fdt, bus_off, "#size-cells", 0x2));
1639     _FDT(fdt_setprop_cell(fdt, bus_off, "#interrupt-cells", 0x1));
1640     _FDT(fdt_setprop(fdt, bus_off, "used-by-rtas", NULL, 0));
1641     _FDT(fdt_setprop(fdt, bus_off, "bus-range", &bus_range, sizeof(bus_range)));
1642     _FDT(fdt_setprop(fdt, bus_off, "ranges", &ranges, sizeof_ranges));
1643     _FDT(fdt_setprop(fdt, bus_off, "reg", &bus_reg, sizeof(bus_reg)));
1644     _FDT(fdt_setprop_cell(fdt, bus_off, "ibm,pci-config-space-type", 0x1));
1645     _FDT(fdt_setprop_cell(fdt, bus_off, "ibm,pe-total-#msi", XICS_IRQS));
1646 
1647     /* Build the interrupt-map, this must matches what is done
1648      * in pci_spapr_map_irq
1649      */
1650     _FDT(fdt_setprop(fdt, bus_off, "interrupt-map-mask",
1651                      &interrupt_map_mask, sizeof(interrupt_map_mask)));
1652     for (i = 0; i < PCI_SLOT_MAX; i++) {
1653         for (j = 0; j < PCI_NUM_PINS; j++) {
1654             uint32_t *irqmap = interrupt_map[i*PCI_NUM_PINS + j];
1655             int lsi_num = pci_spapr_swizzle(i, j);
1656 
1657             irqmap[0] = cpu_to_be32(b_ddddd(i)|b_fff(0));
1658             irqmap[1] = 0;
1659             irqmap[2] = 0;
1660             irqmap[3] = cpu_to_be32(j+1);
1661             irqmap[4] = cpu_to_be32(xics_phandle);
1662             irqmap[5] = cpu_to_be32(phb->lsi_table[lsi_num].irq);
1663             irqmap[6] = cpu_to_be32(0x8);
1664         }
1665     }
1666     /* Write interrupt map */
1667     _FDT(fdt_setprop(fdt, bus_off, "interrupt-map", &interrupt_map,
1668                      sizeof(interrupt_map)));
1669 
1670     tcet = spapr_tce_find_by_liobn(SPAPR_PCI_LIOBN(phb->index, 0));
1671     spapr_dma_dt(fdt, bus_off, "ibm,dma-window",
1672                  tcet->liobn, tcet->bus_offset,
1673                  tcet->nb_table << tcet->page_shift);
1674 
1675     /* Walk the bridges and program the bus numbers*/
1676     spapr_phb_pci_enumerate(phb);
1677     _FDT(fdt_setprop_cell(fdt, bus_off, "qemu,phb-enumerated", 0x1));
1678 
1679     /* Populate tree nodes with PCI devices attached */
1680     s_fdt.fdt = fdt;
1681     s_fdt.node_off = bus_off;
1682     s_fdt.sphb = phb;
1683     pci_for_each_device(bus, pci_bus_num(bus),
1684                         spapr_populate_pci_devices_dt,
1685                         &s_fdt);
1686 
1687     ret = spapr_drc_populate_dt(fdt, bus_off, OBJECT(phb),
1688                                 SPAPR_DR_CONNECTOR_TYPE_PCI);
1689     if (ret) {
1690         return ret;
1691     }
1692 
1693     return 0;
1694 }
1695 
1696 void spapr_pci_rtas_init(void)
1697 {
1698     spapr_rtas_register(RTAS_READ_PCI_CONFIG, "read-pci-config",
1699                         rtas_read_pci_config);
1700     spapr_rtas_register(RTAS_WRITE_PCI_CONFIG, "write-pci-config",
1701                         rtas_write_pci_config);
1702     spapr_rtas_register(RTAS_IBM_READ_PCI_CONFIG, "ibm,read-pci-config",
1703                         rtas_ibm_read_pci_config);
1704     spapr_rtas_register(RTAS_IBM_WRITE_PCI_CONFIG, "ibm,write-pci-config",
1705                         rtas_ibm_write_pci_config);
1706     if (msi_supported) {
1707         spapr_rtas_register(RTAS_IBM_QUERY_INTERRUPT_SOURCE_NUMBER,
1708                             "ibm,query-interrupt-source-number",
1709                             rtas_ibm_query_interrupt_source_number);
1710         spapr_rtas_register(RTAS_IBM_CHANGE_MSI, "ibm,change-msi",
1711                             rtas_ibm_change_msi);
1712     }
1713 
1714     spapr_rtas_register(RTAS_IBM_SET_EEH_OPTION,
1715                         "ibm,set-eeh-option",
1716                         rtas_ibm_set_eeh_option);
1717     spapr_rtas_register(RTAS_IBM_GET_CONFIG_ADDR_INFO2,
1718                         "ibm,get-config-addr-info2",
1719                         rtas_ibm_get_config_addr_info2);
1720     spapr_rtas_register(RTAS_IBM_READ_SLOT_RESET_STATE2,
1721                         "ibm,read-slot-reset-state2",
1722                         rtas_ibm_read_slot_reset_state2);
1723     spapr_rtas_register(RTAS_IBM_SET_SLOT_RESET,
1724                         "ibm,set-slot-reset",
1725                         rtas_ibm_set_slot_reset);
1726     spapr_rtas_register(RTAS_IBM_CONFIGURE_PE,
1727                         "ibm,configure-pe",
1728                         rtas_ibm_configure_pe);
1729     spapr_rtas_register(RTAS_IBM_SLOT_ERROR_DETAIL,
1730                         "ibm,slot-error-detail",
1731                         rtas_ibm_slot_error_detail);
1732 }
1733 
1734 static void spapr_pci_register_types(void)
1735 {
1736     type_register_static(&spapr_phb_info);
1737 }
1738 
1739 type_init(spapr_pci_register_types)
1740 
1741 static int spapr_switch_one_vga(DeviceState *dev, void *opaque)
1742 {
1743     bool be = *(bool *)opaque;
1744 
1745     if (object_dynamic_cast(OBJECT(dev), "VGA")
1746         || object_dynamic_cast(OBJECT(dev), "secondary-vga")) {
1747         object_property_set_bool(OBJECT(dev), be, "big-endian-framebuffer",
1748                                  &error_abort);
1749     }
1750     return 0;
1751 }
1752 
1753 void spapr_pci_switch_vga(bool big_endian)
1754 {
1755     sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
1756     sPAPRPHBState *sphb;
1757 
1758     /*
1759      * For backward compatibility with existing guests, we switch
1760      * the endianness of the VGA controller when changing the guest
1761      * interrupt mode
1762      */
1763     QLIST_FOREACH(sphb, &spapr->phbs, list) {
1764         BusState *bus = &PCI_HOST_BRIDGE(sphb)->bus->qbus;
1765         qbus_walk_children(bus, spapr_switch_one_vga, NULL, NULL, NULL,
1766                            &big_endian);
1767     }
1768 }
1769