xref: /qemu/hw/ppc/spapr_pci.c (revision 62083979b0471ac07da6d94944bf12a9b18baa1f)
1 /*
2  * QEMU sPAPR PCI host originated from Uninorth PCI host
3  *
4  * Copyright (c) 2011 Alexey Kardashevskiy, IBM Corporation.
5  * Copyright (C) 2011 David Gibson, IBM Corporation.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a copy
8  * of this software and associated documentation files (the "Software"), to deal
9  * in the Software without restriction, including without limitation the rights
10  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11  * copies of the Software, and to permit persons to whom the Software is
12  * furnished to do so, subject to the following conditions:
13  *
14  * The above copyright notice and this permission notice shall be included in
15  * all copies or substantial portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23  * THE SOFTWARE.
24  */
25 #include "hw/hw.h"
26 #include "hw/pci/pci.h"
27 #include "hw/pci/msi.h"
28 #include "hw/pci/msix.h"
29 #include "hw/pci/pci_host.h"
30 #include "hw/ppc/spapr.h"
31 #include "hw/pci-host/spapr.h"
32 #include "exec/address-spaces.h"
33 #include <libfdt.h>
34 #include "trace.h"
35 #include "qemu/error-report.h"
36 
37 #include "hw/pci/pci_bus.h"
38 #include "hw/ppc/spapr_drc.h"
39 
40 /* Copied from the kernel arch/powerpc/platforms/pseries/msi.c */
41 #define RTAS_QUERY_FN           0
42 #define RTAS_CHANGE_FN          1
43 #define RTAS_RESET_FN           2
44 #define RTAS_CHANGE_MSI_FN      3
45 #define RTAS_CHANGE_MSIX_FN     4
46 
47 /* Interrupt types to return on RTAS_CHANGE_* */
48 #define RTAS_TYPE_MSI           1
49 #define RTAS_TYPE_MSIX          2
50 
51 sPAPRPHBState *spapr_pci_find_phb(sPAPREnvironment *spapr, uint64_t buid)
52 {
53     sPAPRPHBState *sphb;
54 
55     QLIST_FOREACH(sphb, &spapr->phbs, list) {
56         if (sphb->buid != buid) {
57             continue;
58         }
59         return sphb;
60     }
61 
62     return NULL;
63 }
64 
65 PCIDevice *spapr_pci_find_dev(sPAPREnvironment *spapr, uint64_t buid,
66                               uint32_t config_addr)
67 {
68     sPAPRPHBState *sphb = spapr_pci_find_phb(spapr, buid);
69     PCIHostState *phb = PCI_HOST_BRIDGE(sphb);
70     int bus_num = (config_addr >> 16) & 0xFF;
71     int devfn = (config_addr >> 8) & 0xFF;
72 
73     if (!phb) {
74         return NULL;
75     }
76 
77     return pci_find_device(phb->bus, bus_num, devfn);
78 }
79 
80 static uint32_t rtas_pci_cfgaddr(uint32_t arg)
81 {
82     /* This handles the encoding of extended config space addresses */
83     return ((arg >> 20) & 0xf00) | (arg & 0xff);
84 }
85 
86 static void finish_read_pci_config(sPAPREnvironment *spapr, uint64_t buid,
87                                    uint32_t addr, uint32_t size,
88                                    target_ulong rets)
89 {
90     PCIDevice *pci_dev;
91     uint32_t val;
92 
93     if ((size != 1) && (size != 2) && (size != 4)) {
94         /* access must be 1, 2 or 4 bytes */
95         rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
96         return;
97     }
98 
99     pci_dev = spapr_pci_find_dev(spapr, buid, addr);
100     addr = rtas_pci_cfgaddr(addr);
101 
102     if (!pci_dev || (addr % size) || (addr >= pci_config_size(pci_dev))) {
103         /* Access must be to a valid device, within bounds and
104          * naturally aligned */
105         rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
106         return;
107     }
108 
109     val = pci_host_config_read_common(pci_dev, addr,
110                                       pci_config_size(pci_dev), size);
111 
112     rtas_st(rets, 0, RTAS_OUT_SUCCESS);
113     rtas_st(rets, 1, val);
114 }
115 
116 static void rtas_ibm_read_pci_config(PowerPCCPU *cpu, sPAPREnvironment *spapr,
117                                      uint32_t token, uint32_t nargs,
118                                      target_ulong args,
119                                      uint32_t nret, target_ulong rets)
120 {
121     uint64_t buid;
122     uint32_t size, addr;
123 
124     if ((nargs != 4) || (nret != 2)) {
125         rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
126         return;
127     }
128 
129     buid = ((uint64_t)rtas_ld(args, 1) << 32) | rtas_ld(args, 2);
130     size = rtas_ld(args, 3);
131     addr = rtas_ld(args, 0);
132 
133     finish_read_pci_config(spapr, buid, addr, size, rets);
134 }
135 
136 static void rtas_read_pci_config(PowerPCCPU *cpu, sPAPREnvironment *spapr,
137                                  uint32_t token, uint32_t nargs,
138                                  target_ulong args,
139                                  uint32_t nret, target_ulong rets)
140 {
141     uint32_t size, addr;
142 
143     if ((nargs != 2) || (nret != 2)) {
144         rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
145         return;
146     }
147 
148     size = rtas_ld(args, 1);
149     addr = rtas_ld(args, 0);
150 
151     finish_read_pci_config(spapr, 0, addr, size, rets);
152 }
153 
154 static void finish_write_pci_config(sPAPREnvironment *spapr, uint64_t buid,
155                                     uint32_t addr, uint32_t size,
156                                     uint32_t val, target_ulong rets)
157 {
158     PCIDevice *pci_dev;
159 
160     if ((size != 1) && (size != 2) && (size != 4)) {
161         /* access must be 1, 2 or 4 bytes */
162         rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
163         return;
164     }
165 
166     pci_dev = spapr_pci_find_dev(spapr, buid, addr);
167     addr = rtas_pci_cfgaddr(addr);
168 
169     if (!pci_dev || (addr % size) || (addr >= pci_config_size(pci_dev))) {
170         /* Access must be to a valid device, within bounds and
171          * naturally aligned */
172         rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
173         return;
174     }
175 
176     pci_host_config_write_common(pci_dev, addr, pci_config_size(pci_dev),
177                                  val, size);
178 
179     rtas_st(rets, 0, RTAS_OUT_SUCCESS);
180 }
181 
182 static void rtas_ibm_write_pci_config(PowerPCCPU *cpu, sPAPREnvironment *spapr,
183                                       uint32_t token, uint32_t nargs,
184                                       target_ulong args,
185                                       uint32_t nret, target_ulong rets)
186 {
187     uint64_t buid;
188     uint32_t val, size, addr;
189 
190     if ((nargs != 5) || (nret != 1)) {
191         rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
192         return;
193     }
194 
195     buid = ((uint64_t)rtas_ld(args, 1) << 32) | rtas_ld(args, 2);
196     val = rtas_ld(args, 4);
197     size = rtas_ld(args, 3);
198     addr = rtas_ld(args, 0);
199 
200     finish_write_pci_config(spapr, buid, addr, size, val, rets);
201 }
202 
203 static void rtas_write_pci_config(PowerPCCPU *cpu, sPAPREnvironment *spapr,
204                                   uint32_t token, uint32_t nargs,
205                                   target_ulong args,
206                                   uint32_t nret, target_ulong rets)
207 {
208     uint32_t val, size, addr;
209 
210     if ((nargs != 3) || (nret != 1)) {
211         rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
212         return;
213     }
214 
215 
216     val = rtas_ld(args, 2);
217     size = rtas_ld(args, 1);
218     addr = rtas_ld(args, 0);
219 
220     finish_write_pci_config(spapr, 0, addr, size, val, rets);
221 }
222 
223 /*
224  * Set MSI/MSIX message data.
225  * This is required for msi_notify()/msix_notify() which
226  * will write at the addresses via spapr_msi_write().
227  *
228  * If hwaddr == 0, all entries will have .data == first_irq i.e.
229  * table will be reset.
230  */
231 static void spapr_msi_setmsg(PCIDevice *pdev, hwaddr addr, bool msix,
232                              unsigned first_irq, unsigned req_num)
233 {
234     unsigned i;
235     MSIMessage msg = { .address = addr, .data = first_irq };
236 
237     if (!msix) {
238         msi_set_message(pdev, msg);
239         trace_spapr_pci_msi_setup(pdev->name, 0, msg.address);
240         return;
241     }
242 
243     for (i = 0; i < req_num; ++i) {
244         msix_set_message(pdev, i, msg);
245         trace_spapr_pci_msi_setup(pdev->name, i, msg.address);
246         if (addr) {
247             ++msg.data;
248         }
249     }
250 }
251 
252 static void rtas_ibm_change_msi(PowerPCCPU *cpu, sPAPREnvironment *spapr,
253                                 uint32_t token, uint32_t nargs,
254                                 target_ulong args, uint32_t nret,
255                                 target_ulong rets)
256 {
257     uint32_t config_addr = rtas_ld(args, 0);
258     uint64_t buid = ((uint64_t)rtas_ld(args, 1) << 32) | rtas_ld(args, 2);
259     unsigned int func = rtas_ld(args, 3);
260     unsigned int req_num = rtas_ld(args, 4); /* 0 == remove all */
261     unsigned int seq_num = rtas_ld(args, 5);
262     unsigned int ret_intr_type;
263     unsigned int irq, max_irqs = 0, num = 0;
264     sPAPRPHBState *phb = NULL;
265     PCIDevice *pdev = NULL;
266     spapr_pci_msi *msi;
267     int *config_addr_key;
268 
269     switch (func) {
270     case RTAS_CHANGE_MSI_FN:
271     case RTAS_CHANGE_FN:
272         ret_intr_type = RTAS_TYPE_MSI;
273         break;
274     case RTAS_CHANGE_MSIX_FN:
275         ret_intr_type = RTAS_TYPE_MSIX;
276         break;
277     default:
278         error_report("rtas_ibm_change_msi(%u) is not implemented", func);
279         rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
280         return;
281     }
282 
283     /* Fins sPAPRPHBState */
284     phb = spapr_pci_find_phb(spapr, buid);
285     if (phb) {
286         pdev = spapr_pci_find_dev(spapr, buid, config_addr);
287     }
288     if (!phb || !pdev) {
289         rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
290         return;
291     }
292 
293     /* Releasing MSIs */
294     if (!req_num) {
295         msi = (spapr_pci_msi *) g_hash_table_lookup(phb->msi, &config_addr);
296         if (!msi) {
297             trace_spapr_pci_msi("Releasing wrong config", config_addr);
298             rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
299             return;
300         }
301 
302         xics_free(spapr->icp, msi->first_irq, msi->num);
303         if (msi_present(pdev)) {
304             spapr_msi_setmsg(pdev, 0, false, 0, num);
305         }
306         if (msix_present(pdev)) {
307             spapr_msi_setmsg(pdev, 0, true, 0, num);
308         }
309         g_hash_table_remove(phb->msi, &config_addr);
310 
311         trace_spapr_pci_msi("Released MSIs", config_addr);
312         rtas_st(rets, 0, RTAS_OUT_SUCCESS);
313         rtas_st(rets, 1, 0);
314         return;
315     }
316 
317     /* Enabling MSI */
318 
319     /* Check if the device supports as many IRQs as requested */
320     if (ret_intr_type == RTAS_TYPE_MSI) {
321         max_irqs = msi_nr_vectors_allocated(pdev);
322     } else if (ret_intr_type == RTAS_TYPE_MSIX) {
323         max_irqs = pdev->msix_entries_nr;
324     }
325     if (!max_irqs) {
326         error_report("Requested interrupt type %d is not enabled for device %x",
327                      ret_intr_type, config_addr);
328         rtas_st(rets, 0, -1); /* Hardware error */
329         return;
330     }
331     /* Correct the number if the guest asked for too many */
332     if (req_num > max_irqs) {
333         trace_spapr_pci_msi_retry(config_addr, req_num, max_irqs);
334         req_num = max_irqs;
335         irq = 0; /* to avoid misleading trace */
336         goto out;
337     }
338 
339     /* Allocate MSIs */
340     irq = xics_alloc_block(spapr->icp, 0, req_num, false,
341                            ret_intr_type == RTAS_TYPE_MSI);
342     if (!irq) {
343         error_report("Cannot allocate MSIs for device %x", config_addr);
344         rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
345         return;
346     }
347 
348     /* Setup MSI/MSIX vectors in the device (via cfgspace or MSIX BAR) */
349     spapr_msi_setmsg(pdev, SPAPR_PCI_MSI_WINDOW, ret_intr_type == RTAS_TYPE_MSIX,
350                      irq, req_num);
351 
352     /* Add MSI device to cache */
353     msi = g_new(spapr_pci_msi, 1);
354     msi->first_irq = irq;
355     msi->num = req_num;
356     config_addr_key = g_new(int, 1);
357     *config_addr_key = config_addr;
358     g_hash_table_insert(phb->msi, config_addr_key, msi);
359 
360 out:
361     rtas_st(rets, 0, RTAS_OUT_SUCCESS);
362     rtas_st(rets, 1, req_num);
363     rtas_st(rets, 2, ++seq_num);
364     rtas_st(rets, 3, ret_intr_type);
365 
366     trace_spapr_pci_rtas_ibm_change_msi(config_addr, func, req_num, irq);
367 }
368 
369 static void rtas_ibm_query_interrupt_source_number(PowerPCCPU *cpu,
370                                                    sPAPREnvironment *spapr,
371                                                    uint32_t token,
372                                                    uint32_t nargs,
373                                                    target_ulong args,
374                                                    uint32_t nret,
375                                                    target_ulong rets)
376 {
377     uint32_t config_addr = rtas_ld(args, 0);
378     uint64_t buid = ((uint64_t)rtas_ld(args, 1) << 32) | rtas_ld(args, 2);
379     unsigned int intr_src_num = -1, ioa_intr_num = rtas_ld(args, 3);
380     sPAPRPHBState *phb = NULL;
381     PCIDevice *pdev = NULL;
382     spapr_pci_msi *msi;
383 
384     /* Find sPAPRPHBState */
385     phb = spapr_pci_find_phb(spapr, buid);
386     if (phb) {
387         pdev = spapr_pci_find_dev(spapr, buid, config_addr);
388     }
389     if (!phb || !pdev) {
390         rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
391         return;
392     }
393 
394     /* Find device descriptor and start IRQ */
395     msi = (spapr_pci_msi *) g_hash_table_lookup(phb->msi, &config_addr);
396     if (!msi || !msi->first_irq || !msi->num || (ioa_intr_num >= msi->num)) {
397         trace_spapr_pci_msi("Failed to return vector", config_addr);
398         rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
399         return;
400     }
401     intr_src_num = msi->first_irq + ioa_intr_num;
402     trace_spapr_pci_rtas_ibm_query_interrupt_source_number(ioa_intr_num,
403                                                            intr_src_num);
404 
405     rtas_st(rets, 0, RTAS_OUT_SUCCESS);
406     rtas_st(rets, 1, intr_src_num);
407     rtas_st(rets, 2, 1);/* 0 == level; 1 == edge */
408 }
409 
410 static void rtas_ibm_set_eeh_option(PowerPCCPU *cpu,
411                                     sPAPREnvironment *spapr,
412                                     uint32_t token, uint32_t nargs,
413                                     target_ulong args, uint32_t nret,
414                                     target_ulong rets)
415 {
416     sPAPRPHBState *sphb;
417     sPAPRPHBClass *spc;
418     uint32_t addr, option;
419     uint64_t buid;
420     int ret;
421 
422     if ((nargs != 4) || (nret != 1)) {
423         goto param_error_exit;
424     }
425 
426     buid = ((uint64_t)rtas_ld(args, 1) << 32) | rtas_ld(args, 2);
427     addr = rtas_ld(args, 0);
428     option = rtas_ld(args, 3);
429 
430     sphb = spapr_pci_find_phb(spapr, buid);
431     if (!sphb) {
432         goto param_error_exit;
433     }
434 
435     spc = SPAPR_PCI_HOST_BRIDGE_GET_CLASS(sphb);
436     if (!spc->eeh_set_option) {
437         goto param_error_exit;
438     }
439 
440     ret = spc->eeh_set_option(sphb, addr, option);
441     rtas_st(rets, 0, ret);
442     return;
443 
444 param_error_exit:
445     rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
446 }
447 
448 static void rtas_ibm_get_config_addr_info2(PowerPCCPU *cpu,
449                                            sPAPREnvironment *spapr,
450                                            uint32_t token, uint32_t nargs,
451                                            target_ulong args, uint32_t nret,
452                                            target_ulong rets)
453 {
454     sPAPRPHBState *sphb;
455     sPAPRPHBClass *spc;
456     PCIDevice *pdev;
457     uint32_t addr, option;
458     uint64_t buid;
459 
460     if ((nargs != 4) || (nret != 2)) {
461         goto param_error_exit;
462     }
463 
464     buid = ((uint64_t)rtas_ld(args, 1) << 32) | rtas_ld(args, 2);
465     sphb = spapr_pci_find_phb(spapr, buid);
466     if (!sphb) {
467         goto param_error_exit;
468     }
469 
470     spc = SPAPR_PCI_HOST_BRIDGE_GET_CLASS(sphb);
471     if (!spc->eeh_set_option) {
472         goto param_error_exit;
473     }
474 
475     /*
476      * We always have PE address of form "00BB0001". "BB"
477      * represents the bus number of PE's primary bus.
478      */
479     option = rtas_ld(args, 3);
480     switch (option) {
481     case RTAS_GET_PE_ADDR:
482         addr = rtas_ld(args, 0);
483         pdev = spapr_pci_find_dev(spapr, buid, addr);
484         if (!pdev) {
485             goto param_error_exit;
486         }
487 
488         rtas_st(rets, 1, (pci_bus_num(pdev->bus) << 16) + 1);
489         break;
490     case RTAS_GET_PE_MODE:
491         rtas_st(rets, 1, RTAS_PE_MODE_SHARED);
492         break;
493     default:
494         goto param_error_exit;
495     }
496 
497     rtas_st(rets, 0, RTAS_OUT_SUCCESS);
498     return;
499 
500 param_error_exit:
501     rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
502 }
503 
504 static void rtas_ibm_read_slot_reset_state2(PowerPCCPU *cpu,
505                                             sPAPREnvironment *spapr,
506                                             uint32_t token, uint32_t nargs,
507                                             target_ulong args, uint32_t nret,
508                                             target_ulong rets)
509 {
510     sPAPRPHBState *sphb;
511     sPAPRPHBClass *spc;
512     uint64_t buid;
513     int state, ret;
514 
515     if ((nargs != 3) || (nret != 4 && nret != 5)) {
516         goto param_error_exit;
517     }
518 
519     buid = ((uint64_t)rtas_ld(args, 1) << 32) | rtas_ld(args, 2);
520     sphb = spapr_pci_find_phb(spapr, buid);
521     if (!sphb) {
522         goto param_error_exit;
523     }
524 
525     spc = SPAPR_PCI_HOST_BRIDGE_GET_CLASS(sphb);
526     if (!spc->eeh_get_state) {
527         goto param_error_exit;
528     }
529 
530     ret = spc->eeh_get_state(sphb, &state);
531     rtas_st(rets, 0, ret);
532     if (ret != RTAS_OUT_SUCCESS) {
533         return;
534     }
535 
536     rtas_st(rets, 1, state);
537     rtas_st(rets, 2, RTAS_EEH_SUPPORT);
538     rtas_st(rets, 3, RTAS_EEH_PE_UNAVAIL_INFO);
539     if (nret >= 5) {
540         rtas_st(rets, 4, RTAS_EEH_PE_RECOVER_INFO);
541     }
542     return;
543 
544 param_error_exit:
545     rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
546 }
547 
548 static void rtas_ibm_set_slot_reset(PowerPCCPU *cpu,
549                                     sPAPREnvironment *spapr,
550                                     uint32_t token, uint32_t nargs,
551                                     target_ulong args, uint32_t nret,
552                                     target_ulong rets)
553 {
554     sPAPRPHBState *sphb;
555     sPAPRPHBClass *spc;
556     uint32_t option;
557     uint64_t buid;
558     int ret;
559 
560     if ((nargs != 4) || (nret != 1)) {
561         goto param_error_exit;
562     }
563 
564     buid = ((uint64_t)rtas_ld(args, 1) << 32) | rtas_ld(args, 2);
565     option = rtas_ld(args, 3);
566     sphb = spapr_pci_find_phb(spapr, buid);
567     if (!sphb) {
568         goto param_error_exit;
569     }
570 
571     spc = SPAPR_PCI_HOST_BRIDGE_GET_CLASS(sphb);
572     if (!spc->eeh_reset) {
573         goto param_error_exit;
574     }
575 
576     ret = spc->eeh_reset(sphb, option);
577     rtas_st(rets, 0, ret);
578     return;
579 
580 param_error_exit:
581     rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
582 }
583 
584 static void rtas_ibm_configure_pe(PowerPCCPU *cpu,
585                                   sPAPREnvironment *spapr,
586                                   uint32_t token, uint32_t nargs,
587                                   target_ulong args, uint32_t nret,
588                                   target_ulong rets)
589 {
590     sPAPRPHBState *sphb;
591     sPAPRPHBClass *spc;
592     uint64_t buid;
593     int ret;
594 
595     if ((nargs != 3) || (nret != 1)) {
596         goto param_error_exit;
597     }
598 
599     buid = ((uint64_t)rtas_ld(args, 1) << 32) | rtas_ld(args, 2);
600     sphb = spapr_pci_find_phb(spapr, buid);
601     if (!sphb) {
602         goto param_error_exit;
603     }
604 
605     spc = SPAPR_PCI_HOST_BRIDGE_GET_CLASS(sphb);
606     if (!spc->eeh_configure) {
607         goto param_error_exit;
608     }
609 
610     ret = spc->eeh_configure(sphb);
611     rtas_st(rets, 0, ret);
612     return;
613 
614 param_error_exit:
615     rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
616 }
617 
618 /* To support it later */
619 static void rtas_ibm_slot_error_detail(PowerPCCPU *cpu,
620                                        sPAPREnvironment *spapr,
621                                        uint32_t token, uint32_t nargs,
622                                        target_ulong args, uint32_t nret,
623                                        target_ulong rets)
624 {
625     sPAPRPHBState *sphb;
626     sPAPRPHBClass *spc;
627     int option;
628     uint64_t buid;
629 
630     if ((nargs != 8) || (nret != 1)) {
631         goto param_error_exit;
632     }
633 
634     buid = ((uint64_t)rtas_ld(args, 1) << 32) | rtas_ld(args, 2);
635     sphb = spapr_pci_find_phb(spapr, buid);
636     if (!sphb) {
637         goto param_error_exit;
638     }
639 
640     spc = SPAPR_PCI_HOST_BRIDGE_GET_CLASS(sphb);
641     if (!spc->eeh_set_option) {
642         goto param_error_exit;
643     }
644 
645     option = rtas_ld(args, 7);
646     switch (option) {
647     case RTAS_SLOT_TEMP_ERR_LOG:
648     case RTAS_SLOT_PERM_ERR_LOG:
649         break;
650     default:
651         goto param_error_exit;
652     }
653 
654     /* We don't have error log yet */
655     rtas_st(rets, 0, RTAS_OUT_NO_ERRORS_FOUND);
656     return;
657 
658 param_error_exit:
659     rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
660 }
661 
662 static int pci_spapr_swizzle(int slot, int pin)
663 {
664     return (slot + pin) % PCI_NUM_PINS;
665 }
666 
667 static int pci_spapr_map_irq(PCIDevice *pci_dev, int irq_num)
668 {
669     /*
670      * Here we need to convert pci_dev + irq_num to some unique value
671      * which is less than number of IRQs on the specific bus (4).  We
672      * use standard PCI swizzling, that is (slot number + pin number)
673      * % 4.
674      */
675     return pci_spapr_swizzle(PCI_SLOT(pci_dev->devfn), irq_num);
676 }
677 
678 static void pci_spapr_set_irq(void *opaque, int irq_num, int level)
679 {
680     /*
681      * Here we use the number returned by pci_spapr_map_irq to find a
682      * corresponding qemu_irq.
683      */
684     sPAPRPHBState *phb = opaque;
685 
686     trace_spapr_pci_lsi_set(phb->dtbusname, irq_num, phb->lsi_table[irq_num].irq);
687     qemu_set_irq(spapr_phb_lsi_qirq(phb, irq_num), level);
688 }
689 
690 static PCIINTxRoute spapr_route_intx_pin_to_irq(void *opaque, int pin)
691 {
692     sPAPRPHBState *sphb = SPAPR_PCI_HOST_BRIDGE(opaque);
693     PCIINTxRoute route;
694 
695     route.mode = PCI_INTX_ENABLED;
696     route.irq = sphb->lsi_table[pin].irq;
697 
698     return route;
699 }
700 
701 /*
702  * MSI/MSIX memory region implementation.
703  * The handler handles both MSI and MSIX.
704  * For MSI-X, the vector number is encoded as a part of the address,
705  * data is set to 0.
706  * For MSI, the vector number is encoded in least bits in data.
707  */
708 static void spapr_msi_write(void *opaque, hwaddr addr,
709                             uint64_t data, unsigned size)
710 {
711     uint32_t irq = data;
712 
713     trace_spapr_pci_msi_write(addr, data, irq);
714 
715     qemu_irq_pulse(xics_get_qirq(spapr->icp, irq));
716 }
717 
718 static const MemoryRegionOps spapr_msi_ops = {
719     /* There is no .read as the read result is undefined by PCI spec */
720     .read = NULL,
721     .write = spapr_msi_write,
722     .endianness = DEVICE_LITTLE_ENDIAN
723 };
724 
725 /*
726  * PHB PCI device
727  */
728 static AddressSpace *spapr_pci_dma_iommu(PCIBus *bus, void *opaque, int devfn)
729 {
730     sPAPRPHBState *phb = opaque;
731 
732     return &phb->iommu_as;
733 }
734 
735 static void spapr_phb_realize(DeviceState *dev, Error **errp)
736 {
737     SysBusDevice *s = SYS_BUS_DEVICE(dev);
738     sPAPRPHBState *sphb = SPAPR_PCI_HOST_BRIDGE(s);
739     PCIHostState *phb = PCI_HOST_BRIDGE(s);
740     sPAPRPHBClass *info = SPAPR_PCI_HOST_BRIDGE_GET_CLASS(s);
741     char *namebuf;
742     int i;
743     PCIBus *bus;
744     uint64_t msi_window_size = 4096;
745 
746     if (sphb->index != (uint32_t)-1) {
747         hwaddr windows_base;
748 
749         if ((sphb->buid != (uint64_t)-1) || (sphb->dma_liobn != (uint32_t)-1)
750             || (sphb->mem_win_addr != (hwaddr)-1)
751             || (sphb->io_win_addr != (hwaddr)-1)) {
752             error_setg(errp, "Either \"index\" or other parameters must"
753                        " be specified for PAPR PHB, not both");
754             return;
755         }
756 
757         if (sphb->index > SPAPR_PCI_MAX_INDEX) {
758             error_setg(errp, "\"index\" for PAPR PHB is too large (max %u)",
759                        SPAPR_PCI_MAX_INDEX);
760             return;
761         }
762 
763         sphb->buid = SPAPR_PCI_BASE_BUID + sphb->index;
764         sphb->dma_liobn = SPAPR_PCI_LIOBN(sphb->index, 0);
765 
766         windows_base = SPAPR_PCI_WINDOW_BASE
767             + sphb->index * SPAPR_PCI_WINDOW_SPACING;
768         sphb->mem_win_addr = windows_base + SPAPR_PCI_MMIO_WIN_OFF;
769         sphb->io_win_addr = windows_base + SPAPR_PCI_IO_WIN_OFF;
770     }
771 
772     if (sphb->buid == (uint64_t)-1) {
773         error_setg(errp, "BUID not specified for PHB");
774         return;
775     }
776 
777     if (sphb->dma_liobn == (uint32_t)-1) {
778         error_setg(errp, "LIOBN not specified for PHB");
779         return;
780     }
781 
782     if (sphb->mem_win_addr == (hwaddr)-1) {
783         error_setg(errp, "Memory window address not specified for PHB");
784         return;
785     }
786 
787     if (sphb->io_win_addr == (hwaddr)-1) {
788         error_setg(errp, "IO window address not specified for PHB");
789         return;
790     }
791 
792     if (spapr_pci_find_phb(spapr, sphb->buid)) {
793         error_setg(errp, "PCI host bridges must have unique BUIDs");
794         return;
795     }
796 
797     sphb->dtbusname = g_strdup_printf("pci@%" PRIx64, sphb->buid);
798 
799     namebuf = alloca(strlen(sphb->dtbusname) + 32);
800 
801     /* Initialize memory regions */
802     sprintf(namebuf, "%s.mmio", sphb->dtbusname);
803     memory_region_init(&sphb->memspace, OBJECT(sphb), namebuf, UINT64_MAX);
804 
805     sprintf(namebuf, "%s.mmio-alias", sphb->dtbusname);
806     memory_region_init_alias(&sphb->memwindow, OBJECT(sphb),
807                              namebuf, &sphb->memspace,
808                              SPAPR_PCI_MEM_WIN_BUS_OFFSET, sphb->mem_win_size);
809     memory_region_add_subregion(get_system_memory(), sphb->mem_win_addr,
810                                 &sphb->memwindow);
811 
812     /* Initialize IO regions */
813     sprintf(namebuf, "%s.io", sphb->dtbusname);
814     memory_region_init(&sphb->iospace, OBJECT(sphb),
815                        namebuf, SPAPR_PCI_IO_WIN_SIZE);
816 
817     sprintf(namebuf, "%s.io-alias", sphb->dtbusname);
818     memory_region_init_alias(&sphb->iowindow, OBJECT(sphb), namebuf,
819                              &sphb->iospace, 0, SPAPR_PCI_IO_WIN_SIZE);
820     memory_region_add_subregion(get_system_memory(), sphb->io_win_addr,
821                                 &sphb->iowindow);
822 
823     bus = pci_register_bus(dev, NULL,
824                            pci_spapr_set_irq, pci_spapr_map_irq, sphb,
825                            &sphb->memspace, &sphb->iospace,
826                            PCI_DEVFN(0, 0), PCI_NUM_PINS, TYPE_PCI_BUS);
827     phb->bus = bus;
828 
829     /*
830      * Initialize PHB address space.
831      * By default there will be at least one subregion for default
832      * 32bit DMA window.
833      * Later the guest might want to create another DMA window
834      * which will become another memory subregion.
835      */
836     sprintf(namebuf, "%s.iommu-root", sphb->dtbusname);
837 
838     memory_region_init(&sphb->iommu_root, OBJECT(sphb),
839                        namebuf, UINT64_MAX);
840     address_space_init(&sphb->iommu_as, &sphb->iommu_root,
841                        sphb->dtbusname);
842 
843     /*
844      * As MSI/MSIX interrupts trigger by writing at MSI/MSIX vectors,
845      * we need to allocate some memory to catch those writes coming
846      * from msi_notify()/msix_notify().
847      * As MSIMessage:addr is going to be the same and MSIMessage:data
848      * is going to be a VIRQ number, 4 bytes of the MSI MR will only
849      * be used.
850      *
851      * For KVM we want to ensure that this memory is a full page so that
852      * our memory slot is of page size granularity.
853      */
854 #ifdef CONFIG_KVM
855     if (kvm_enabled()) {
856         msi_window_size = getpagesize();
857     }
858 #endif
859 
860     memory_region_init_io(&sphb->msiwindow, NULL, &spapr_msi_ops, spapr,
861                           "msi", msi_window_size);
862     memory_region_add_subregion(&sphb->iommu_root, SPAPR_PCI_MSI_WINDOW,
863                                 &sphb->msiwindow);
864 
865     pci_setup_iommu(bus, spapr_pci_dma_iommu, sphb);
866 
867     pci_bus_set_route_irq_fn(bus, spapr_route_intx_pin_to_irq);
868 
869     QLIST_INSERT_HEAD(&spapr->phbs, sphb, list);
870 
871     /* Initialize the LSI table */
872     for (i = 0; i < PCI_NUM_PINS; i++) {
873         uint32_t irq;
874 
875         irq = xics_alloc_block(spapr->icp, 0, 1, true, false);
876         if (!irq) {
877             error_setg(errp, "spapr_allocate_lsi failed");
878             return;
879         }
880 
881         sphb->lsi_table[i].irq = irq;
882     }
883 
884     /* allocate connectors for child PCI devices */
885     if (sphb->dr_enabled) {
886         for (i = 0; i < PCI_SLOT_MAX * 8; i++) {
887             spapr_dr_connector_new(OBJECT(phb),
888                                    SPAPR_DR_CONNECTOR_TYPE_PCI,
889                                    (sphb->index << 16) | i);
890         }
891     }
892 
893     if (!info->finish_realize) {
894         error_setg(errp, "finish_realize not defined");
895         return;
896     }
897 
898     info->finish_realize(sphb, errp);
899 
900     sphb->msi = g_hash_table_new_full(g_int_hash, g_int_equal, g_free, g_free);
901 }
902 
903 static void spapr_phb_finish_realize(sPAPRPHBState *sphb, Error **errp)
904 {
905     sPAPRTCETable *tcet;
906     uint32_t nb_table;
907 
908     nb_table = SPAPR_PCI_DMA32_SIZE >> SPAPR_TCE_PAGE_SHIFT;
909     tcet = spapr_tce_new_table(DEVICE(sphb), sphb->dma_liobn,
910                                0, SPAPR_TCE_PAGE_SHIFT, nb_table, false);
911     if (!tcet) {
912         error_setg(errp, "Unable to create TCE table for %s",
913                    sphb->dtbusname);
914         return ;
915     }
916 
917     /* Register default 32bit DMA window */
918     memory_region_add_subregion(&sphb->iommu_root, 0,
919                                 spapr_tce_get_iommu(tcet));
920 }
921 
922 static int spapr_phb_children_reset(Object *child, void *opaque)
923 {
924     DeviceState *dev = (DeviceState *) object_dynamic_cast(child, TYPE_DEVICE);
925 
926     if (dev) {
927         device_reset(dev);
928     }
929 
930     return 0;
931 }
932 
933 static void spapr_phb_reset(DeviceState *qdev)
934 {
935     /* Reset the IOMMU state */
936     object_child_foreach(OBJECT(qdev), spapr_phb_children_reset, NULL);
937 }
938 
939 static Property spapr_phb_properties[] = {
940     DEFINE_PROP_UINT32("index", sPAPRPHBState, index, -1),
941     DEFINE_PROP_UINT64("buid", sPAPRPHBState, buid, -1),
942     DEFINE_PROP_UINT32("liobn", sPAPRPHBState, dma_liobn, -1),
943     DEFINE_PROP_UINT64("mem_win_addr", sPAPRPHBState, mem_win_addr, -1),
944     DEFINE_PROP_UINT64("mem_win_size", sPAPRPHBState, mem_win_size,
945                        SPAPR_PCI_MMIO_WIN_SIZE),
946     DEFINE_PROP_UINT64("io_win_addr", sPAPRPHBState, io_win_addr, -1),
947     DEFINE_PROP_UINT64("io_win_size", sPAPRPHBState, io_win_size,
948                        SPAPR_PCI_IO_WIN_SIZE),
949     DEFINE_PROP_BOOL("dynamic-reconfiguration", sPAPRPHBState, dr_enabled,
950                      true),
951     DEFINE_PROP_END_OF_LIST(),
952 };
953 
954 static const VMStateDescription vmstate_spapr_pci_lsi = {
955     .name = "spapr_pci/lsi",
956     .version_id = 1,
957     .minimum_version_id = 1,
958     .fields = (VMStateField[]) {
959         VMSTATE_UINT32_EQUAL(irq, struct spapr_pci_lsi),
960 
961         VMSTATE_END_OF_LIST()
962     },
963 };
964 
965 static const VMStateDescription vmstate_spapr_pci_msi = {
966     .name = "spapr_pci/msi",
967     .version_id = 1,
968     .minimum_version_id = 1,
969     .fields = (VMStateField []) {
970         VMSTATE_UINT32(key, spapr_pci_msi_mig),
971         VMSTATE_UINT32(value.first_irq, spapr_pci_msi_mig),
972         VMSTATE_UINT32(value.num, spapr_pci_msi_mig),
973         VMSTATE_END_OF_LIST()
974     },
975 };
976 
977 static void spapr_pci_fill_msi_devs(gpointer key, gpointer value,
978                                     gpointer opaque)
979 {
980     sPAPRPHBState *sphb = opaque;
981 
982     sphb->msi_devs[sphb->msi_devs_num].key = *(uint32_t *)key;
983     sphb->msi_devs[sphb->msi_devs_num].value = *(spapr_pci_msi *)value;
984     sphb->msi_devs_num++;
985 }
986 
987 static void spapr_pci_pre_save(void *opaque)
988 {
989     sPAPRPHBState *sphb = opaque;
990     int msi_devs_num;
991 
992     if (sphb->msi_devs) {
993         g_free(sphb->msi_devs);
994         sphb->msi_devs = NULL;
995     }
996     sphb->msi_devs_num = 0;
997     msi_devs_num = g_hash_table_size(sphb->msi);
998     if (!msi_devs_num) {
999         return;
1000     }
1001     sphb->msi_devs = g_malloc(msi_devs_num * sizeof(spapr_pci_msi_mig));
1002 
1003     g_hash_table_foreach(sphb->msi, spapr_pci_fill_msi_devs, sphb);
1004     assert(sphb->msi_devs_num == msi_devs_num);
1005 }
1006 
1007 static int spapr_pci_post_load(void *opaque, int version_id)
1008 {
1009     sPAPRPHBState *sphb = opaque;
1010     gpointer key, value;
1011     int i;
1012 
1013     for (i = 0; i < sphb->msi_devs_num; ++i) {
1014         key = g_memdup(&sphb->msi_devs[i].key,
1015                        sizeof(sphb->msi_devs[i].key));
1016         value = g_memdup(&sphb->msi_devs[i].value,
1017                          sizeof(sphb->msi_devs[i].value));
1018         g_hash_table_insert(sphb->msi, key, value);
1019     }
1020     if (sphb->msi_devs) {
1021         g_free(sphb->msi_devs);
1022         sphb->msi_devs = NULL;
1023     }
1024     sphb->msi_devs_num = 0;
1025 
1026     return 0;
1027 }
1028 
1029 static const VMStateDescription vmstate_spapr_pci = {
1030     .name = "spapr_pci",
1031     .version_id = 2,
1032     .minimum_version_id = 2,
1033     .pre_save = spapr_pci_pre_save,
1034     .post_load = spapr_pci_post_load,
1035     .fields = (VMStateField[]) {
1036         VMSTATE_UINT64_EQUAL(buid, sPAPRPHBState),
1037         VMSTATE_UINT32_EQUAL(dma_liobn, sPAPRPHBState),
1038         VMSTATE_UINT64_EQUAL(mem_win_addr, sPAPRPHBState),
1039         VMSTATE_UINT64_EQUAL(mem_win_size, sPAPRPHBState),
1040         VMSTATE_UINT64_EQUAL(io_win_addr, sPAPRPHBState),
1041         VMSTATE_UINT64_EQUAL(io_win_size, sPAPRPHBState),
1042         VMSTATE_STRUCT_ARRAY(lsi_table, sPAPRPHBState, PCI_NUM_PINS, 0,
1043                              vmstate_spapr_pci_lsi, struct spapr_pci_lsi),
1044         VMSTATE_INT32(msi_devs_num, sPAPRPHBState),
1045         VMSTATE_STRUCT_VARRAY_ALLOC(msi_devs, sPAPRPHBState, msi_devs_num, 0,
1046                                     vmstate_spapr_pci_msi, spapr_pci_msi_mig),
1047         VMSTATE_END_OF_LIST()
1048     },
1049 };
1050 
1051 static const char *spapr_phb_root_bus_path(PCIHostState *host_bridge,
1052                                            PCIBus *rootbus)
1053 {
1054     sPAPRPHBState *sphb = SPAPR_PCI_HOST_BRIDGE(host_bridge);
1055 
1056     return sphb->dtbusname;
1057 }
1058 
1059 static void spapr_phb_class_init(ObjectClass *klass, void *data)
1060 {
1061     PCIHostBridgeClass *hc = PCI_HOST_BRIDGE_CLASS(klass);
1062     DeviceClass *dc = DEVICE_CLASS(klass);
1063     sPAPRPHBClass *spc = SPAPR_PCI_HOST_BRIDGE_CLASS(klass);
1064 
1065     hc->root_bus_path = spapr_phb_root_bus_path;
1066     dc->realize = spapr_phb_realize;
1067     dc->props = spapr_phb_properties;
1068     dc->reset = spapr_phb_reset;
1069     dc->vmsd = &vmstate_spapr_pci;
1070     set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
1071     dc->cannot_instantiate_with_device_add_yet = false;
1072     spc->finish_realize = spapr_phb_finish_realize;
1073 }
1074 
1075 static const TypeInfo spapr_phb_info = {
1076     .name          = TYPE_SPAPR_PCI_HOST_BRIDGE,
1077     .parent        = TYPE_PCI_HOST_BRIDGE,
1078     .instance_size = sizeof(sPAPRPHBState),
1079     .class_init    = spapr_phb_class_init,
1080     .class_size    = sizeof(sPAPRPHBClass),
1081 };
1082 
1083 PCIHostState *spapr_create_phb(sPAPREnvironment *spapr, int index)
1084 {
1085     DeviceState *dev;
1086 
1087     dev = qdev_create(NULL, TYPE_SPAPR_PCI_HOST_BRIDGE);
1088     qdev_prop_set_uint32(dev, "index", index);
1089     qdev_init_nofail(dev);
1090 
1091     return PCI_HOST_BRIDGE(dev);
1092 }
1093 
1094 /* Macros to operate with address in OF binding to PCI */
1095 #define b_x(x, p, l)    (((x) & ((1<<(l))-1)) << (p))
1096 #define b_n(x)          b_x((x), 31, 1) /* 0 if relocatable */
1097 #define b_p(x)          b_x((x), 30, 1) /* 1 if prefetchable */
1098 #define b_t(x)          b_x((x), 29, 1) /* 1 if the address is aliased */
1099 #define b_ss(x)         b_x((x), 24, 2) /* the space code */
1100 #define b_bbbbbbbb(x)   b_x((x), 16, 8) /* bus number */
1101 #define b_ddddd(x)      b_x((x), 11, 5) /* device number */
1102 #define b_fff(x)        b_x((x), 8, 3)  /* function number */
1103 #define b_rrrrrrrr(x)   b_x((x), 0, 8)  /* register number */
1104 
1105 int spapr_populate_pci_dt(sPAPRPHBState *phb,
1106                           uint32_t xics_phandle,
1107                           void *fdt)
1108 {
1109     int bus_off, i, j, ret;
1110     char nodename[256];
1111     uint32_t bus_range[] = { cpu_to_be32(0), cpu_to_be32(0xff) };
1112     const uint64_t mmiosize = memory_region_size(&phb->memwindow);
1113     const uint64_t w32max = (1ULL << 32) - SPAPR_PCI_MEM_WIN_BUS_OFFSET;
1114     const uint64_t w32size = MIN(w32max, mmiosize);
1115     const uint64_t w64size = (mmiosize > w32size) ? (mmiosize - w32size) : 0;
1116     struct {
1117         uint32_t hi;
1118         uint64_t child;
1119         uint64_t parent;
1120         uint64_t size;
1121     } QEMU_PACKED ranges[] = {
1122         {
1123             cpu_to_be32(b_ss(1)), cpu_to_be64(0),
1124             cpu_to_be64(phb->io_win_addr),
1125             cpu_to_be64(memory_region_size(&phb->iospace)),
1126         },
1127         {
1128             cpu_to_be32(b_ss(2)), cpu_to_be64(SPAPR_PCI_MEM_WIN_BUS_OFFSET),
1129             cpu_to_be64(phb->mem_win_addr),
1130             cpu_to_be64(w32size),
1131         },
1132         {
1133             cpu_to_be32(b_ss(3)), cpu_to_be64(1ULL << 32),
1134             cpu_to_be64(phb->mem_win_addr + w32size),
1135             cpu_to_be64(w64size)
1136         },
1137     };
1138     const unsigned sizeof_ranges = (w64size ? 3 : 2) * sizeof(ranges[0]);
1139     uint64_t bus_reg[] = { cpu_to_be64(phb->buid), 0 };
1140     uint32_t interrupt_map_mask[] = {
1141         cpu_to_be32(b_ddddd(-1)|b_fff(0)), 0x0, 0x0, cpu_to_be32(-1)};
1142     uint32_t interrupt_map[PCI_SLOT_MAX * PCI_NUM_PINS][7];
1143     sPAPRTCETable *tcet;
1144 
1145     /* Start populating the FDT */
1146     sprintf(nodename, "pci@%" PRIx64, phb->buid);
1147     bus_off = fdt_add_subnode(fdt, 0, nodename);
1148     if (bus_off < 0) {
1149         return bus_off;
1150     }
1151 
1152 #define _FDT(exp) \
1153     do { \
1154         int ret = (exp);                                           \
1155         if (ret < 0) {                                             \
1156             return ret;                                            \
1157         }                                                          \
1158     } while (0)
1159 
1160     /* Write PHB properties */
1161     _FDT(fdt_setprop_string(fdt, bus_off, "device_type", "pci"));
1162     _FDT(fdt_setprop_string(fdt, bus_off, "compatible", "IBM,Logical_PHB"));
1163     _FDT(fdt_setprop_cell(fdt, bus_off, "#address-cells", 0x3));
1164     _FDT(fdt_setprop_cell(fdt, bus_off, "#size-cells", 0x2));
1165     _FDT(fdt_setprop_cell(fdt, bus_off, "#interrupt-cells", 0x1));
1166     _FDT(fdt_setprop(fdt, bus_off, "used-by-rtas", NULL, 0));
1167     _FDT(fdt_setprop(fdt, bus_off, "bus-range", &bus_range, sizeof(bus_range)));
1168     _FDT(fdt_setprop(fdt, bus_off, "ranges", &ranges, sizeof_ranges));
1169     _FDT(fdt_setprop(fdt, bus_off, "reg", &bus_reg, sizeof(bus_reg)));
1170     _FDT(fdt_setprop_cell(fdt, bus_off, "ibm,pci-config-space-type", 0x1));
1171     _FDT(fdt_setprop_cell(fdt, bus_off, "ibm,pe-total-#msi", XICS_IRQS));
1172 
1173     /* Build the interrupt-map, this must matches what is done
1174      * in pci_spapr_map_irq
1175      */
1176     _FDT(fdt_setprop(fdt, bus_off, "interrupt-map-mask",
1177                      &interrupt_map_mask, sizeof(interrupt_map_mask)));
1178     for (i = 0; i < PCI_SLOT_MAX; i++) {
1179         for (j = 0; j < PCI_NUM_PINS; j++) {
1180             uint32_t *irqmap = interrupt_map[i*PCI_NUM_PINS + j];
1181             int lsi_num = pci_spapr_swizzle(i, j);
1182 
1183             irqmap[0] = cpu_to_be32(b_ddddd(i)|b_fff(0));
1184             irqmap[1] = 0;
1185             irqmap[2] = 0;
1186             irqmap[3] = cpu_to_be32(j+1);
1187             irqmap[4] = cpu_to_be32(xics_phandle);
1188             irqmap[5] = cpu_to_be32(phb->lsi_table[lsi_num].irq);
1189             irqmap[6] = cpu_to_be32(0x8);
1190         }
1191     }
1192     /* Write interrupt map */
1193     _FDT(fdt_setprop(fdt, bus_off, "interrupt-map", &interrupt_map,
1194                      sizeof(interrupt_map)));
1195 
1196     tcet = spapr_tce_find_by_liobn(SPAPR_PCI_LIOBN(phb->index, 0));
1197     spapr_dma_dt(fdt, bus_off, "ibm,dma-window",
1198                  tcet->liobn, tcet->bus_offset,
1199                  tcet->nb_table << tcet->page_shift);
1200 
1201     ret = spapr_drc_populate_dt(fdt, bus_off, OBJECT(phb),
1202                                 SPAPR_DR_CONNECTOR_TYPE_PCI);
1203     if (ret) {
1204         return ret;
1205     }
1206 
1207     return 0;
1208 }
1209 
1210 void spapr_pci_rtas_init(void)
1211 {
1212     spapr_rtas_register(RTAS_READ_PCI_CONFIG, "read-pci-config",
1213                         rtas_read_pci_config);
1214     spapr_rtas_register(RTAS_WRITE_PCI_CONFIG, "write-pci-config",
1215                         rtas_write_pci_config);
1216     spapr_rtas_register(RTAS_IBM_READ_PCI_CONFIG, "ibm,read-pci-config",
1217                         rtas_ibm_read_pci_config);
1218     spapr_rtas_register(RTAS_IBM_WRITE_PCI_CONFIG, "ibm,write-pci-config",
1219                         rtas_ibm_write_pci_config);
1220     if (msi_supported) {
1221         spapr_rtas_register(RTAS_IBM_QUERY_INTERRUPT_SOURCE_NUMBER,
1222                             "ibm,query-interrupt-source-number",
1223                             rtas_ibm_query_interrupt_source_number);
1224         spapr_rtas_register(RTAS_IBM_CHANGE_MSI, "ibm,change-msi",
1225                             rtas_ibm_change_msi);
1226     }
1227 
1228     spapr_rtas_register(RTAS_IBM_SET_EEH_OPTION,
1229                         "ibm,set-eeh-option",
1230                         rtas_ibm_set_eeh_option);
1231     spapr_rtas_register(RTAS_IBM_GET_CONFIG_ADDR_INFO2,
1232                         "ibm,get-config-addr-info2",
1233                         rtas_ibm_get_config_addr_info2);
1234     spapr_rtas_register(RTAS_IBM_READ_SLOT_RESET_STATE2,
1235                         "ibm,read-slot-reset-state2",
1236                         rtas_ibm_read_slot_reset_state2);
1237     spapr_rtas_register(RTAS_IBM_SET_SLOT_RESET,
1238                         "ibm,set-slot-reset",
1239                         rtas_ibm_set_slot_reset);
1240     spapr_rtas_register(RTAS_IBM_CONFIGURE_PE,
1241                         "ibm,configure-pe",
1242                         rtas_ibm_configure_pe);
1243     spapr_rtas_register(RTAS_IBM_SLOT_ERROR_DETAIL,
1244                         "ibm,slot-error-detail",
1245                         rtas_ibm_slot_error_detail);
1246 }
1247 
1248 static void spapr_pci_register_types(void)
1249 {
1250     type_register_static(&spapr_phb_info);
1251 }
1252 
1253 type_init(spapr_pci_register_types)
1254 
1255 static int spapr_switch_one_vga(DeviceState *dev, void *opaque)
1256 {
1257     bool be = *(bool *)opaque;
1258 
1259     if (object_dynamic_cast(OBJECT(dev), "VGA")
1260         || object_dynamic_cast(OBJECT(dev), "secondary-vga")) {
1261         object_property_set_bool(OBJECT(dev), be, "big-endian-framebuffer",
1262                                  &error_abort);
1263     }
1264     return 0;
1265 }
1266 
1267 void spapr_pci_switch_vga(bool big_endian)
1268 {
1269     sPAPRPHBState *sphb;
1270 
1271     /*
1272      * For backward compatibility with existing guests, we switch
1273      * the endianness of the VGA controller when changing the guest
1274      * interrupt mode
1275      */
1276     QLIST_FOREACH(sphb, &spapr->phbs, list) {
1277         BusState *bus = &PCI_HOST_BRIDGE(sphb)->bus->qbus;
1278         qbus_walk_children(bus, spapr_switch_one_vga, NULL, NULL, NULL,
1279                            &big_endian);
1280     }
1281 }
1282