xref: /qemu/hw/ppc/spapr_pci.c (revision 9b7d9284c3b114112a7759ce0a885df0767fe8d9)
1 /*
2  * QEMU sPAPR PCI host originated from Uninorth PCI host
3  *
4  * Copyright (c) 2011 Alexey Kardashevskiy, IBM Corporation.
5  * Copyright (C) 2011 David Gibson, IBM Corporation.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a copy
8  * of this software and associated documentation files (the "Software"), to deal
9  * in the Software without restriction, including without limitation the rights
10  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11  * copies of the Software, and to permit persons to whom the Software is
12  * furnished to do so, subject to the following conditions:
13  *
14  * The above copyright notice and this permission notice shall be included in
15  * all copies or substantial portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23  * THE SOFTWARE.
24  */
25 #include "hw/hw.h"
26 #include "hw/pci/pci.h"
27 #include "hw/pci/msi.h"
28 #include "hw/pci/msix.h"
29 #include "hw/pci/pci_host.h"
30 #include "hw/ppc/spapr.h"
31 #include "hw/pci-host/spapr.h"
32 #include "exec/address-spaces.h"
33 #include <libfdt.h>
34 #include "trace.h"
35 #include "qemu/error-report.h"
36 #include "qapi/qmp/qerror.h"
37 
38 #include "hw/pci/pci_bus.h"
39 #include "hw/ppc/spapr_drc.h"
40 #include "sysemu/device_tree.h"
41 
42 /* Copied from the kernel arch/powerpc/platforms/pseries/msi.c */
43 #define RTAS_QUERY_FN           0
44 #define RTAS_CHANGE_FN          1
45 #define RTAS_RESET_FN           2
46 #define RTAS_CHANGE_MSI_FN      3
47 #define RTAS_CHANGE_MSIX_FN     4
48 
49 /* Interrupt types to return on RTAS_CHANGE_* */
50 #define RTAS_TYPE_MSI           1
51 #define RTAS_TYPE_MSIX          2
52 
53 #define FDT_NAME_MAX          128
54 
55 #define _FDT(exp) \
56     do { \
57         int ret = (exp);                                           \
58         if (ret < 0) {                                             \
59             return ret;                                            \
60         }                                                          \
61     } while (0)
62 
63 sPAPRPHBState *spapr_pci_find_phb(sPAPRMachineState *spapr, uint64_t buid)
64 {
65     sPAPRPHBState *sphb;
66 
67     QLIST_FOREACH(sphb, &spapr->phbs, list) {
68         if (sphb->buid != buid) {
69             continue;
70         }
71         return sphb;
72     }
73 
74     return NULL;
75 }
76 
77 PCIDevice *spapr_pci_find_dev(sPAPRMachineState *spapr, uint64_t buid,
78                               uint32_t config_addr)
79 {
80     sPAPRPHBState *sphb = spapr_pci_find_phb(spapr, buid);
81     PCIHostState *phb = PCI_HOST_BRIDGE(sphb);
82     int bus_num = (config_addr >> 16) & 0xFF;
83     int devfn = (config_addr >> 8) & 0xFF;
84 
85     if (!phb) {
86         return NULL;
87     }
88 
89     return pci_find_device(phb->bus, bus_num, devfn);
90 }
91 
92 static uint32_t rtas_pci_cfgaddr(uint32_t arg)
93 {
94     /* This handles the encoding of extended config space addresses */
95     return ((arg >> 20) & 0xf00) | (arg & 0xff);
96 }
97 
98 static void finish_read_pci_config(sPAPRMachineState *spapr, uint64_t buid,
99                                    uint32_t addr, uint32_t size,
100                                    target_ulong rets)
101 {
102     PCIDevice *pci_dev;
103     uint32_t val;
104 
105     if ((size != 1) && (size != 2) && (size != 4)) {
106         /* access must be 1, 2 or 4 bytes */
107         rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
108         return;
109     }
110 
111     pci_dev = spapr_pci_find_dev(spapr, buid, addr);
112     addr = rtas_pci_cfgaddr(addr);
113 
114     if (!pci_dev || (addr % size) || (addr >= pci_config_size(pci_dev))) {
115         /* Access must be to a valid device, within bounds and
116          * naturally aligned */
117         rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
118         return;
119     }
120 
121     val = pci_host_config_read_common(pci_dev, addr,
122                                       pci_config_size(pci_dev), size);
123 
124     rtas_st(rets, 0, RTAS_OUT_SUCCESS);
125     rtas_st(rets, 1, val);
126 }
127 
128 static void rtas_ibm_read_pci_config(PowerPCCPU *cpu, sPAPRMachineState *spapr,
129                                      uint32_t token, uint32_t nargs,
130                                      target_ulong args,
131                                      uint32_t nret, target_ulong rets)
132 {
133     uint64_t buid;
134     uint32_t size, addr;
135 
136     if ((nargs != 4) || (nret != 2)) {
137         rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
138         return;
139     }
140 
141     buid = ((uint64_t)rtas_ld(args, 1) << 32) | rtas_ld(args, 2);
142     size = rtas_ld(args, 3);
143     addr = rtas_ld(args, 0);
144 
145     finish_read_pci_config(spapr, buid, addr, size, rets);
146 }
147 
148 static void rtas_read_pci_config(PowerPCCPU *cpu, sPAPRMachineState *spapr,
149                                  uint32_t token, uint32_t nargs,
150                                  target_ulong args,
151                                  uint32_t nret, target_ulong rets)
152 {
153     uint32_t size, addr;
154 
155     if ((nargs != 2) || (nret != 2)) {
156         rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
157         return;
158     }
159 
160     size = rtas_ld(args, 1);
161     addr = rtas_ld(args, 0);
162 
163     finish_read_pci_config(spapr, 0, addr, size, rets);
164 }
165 
166 static void finish_write_pci_config(sPAPRMachineState *spapr, uint64_t buid,
167                                     uint32_t addr, uint32_t size,
168                                     uint32_t val, target_ulong rets)
169 {
170     PCIDevice *pci_dev;
171 
172     if ((size != 1) && (size != 2) && (size != 4)) {
173         /* access must be 1, 2 or 4 bytes */
174         rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
175         return;
176     }
177 
178     pci_dev = spapr_pci_find_dev(spapr, buid, addr);
179     addr = rtas_pci_cfgaddr(addr);
180 
181     if (!pci_dev || (addr % size) || (addr >= pci_config_size(pci_dev))) {
182         /* Access must be to a valid device, within bounds and
183          * naturally aligned */
184         rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
185         return;
186     }
187 
188     pci_host_config_write_common(pci_dev, addr, pci_config_size(pci_dev),
189                                  val, size);
190 
191     rtas_st(rets, 0, RTAS_OUT_SUCCESS);
192 }
193 
194 static void rtas_ibm_write_pci_config(PowerPCCPU *cpu, sPAPRMachineState *spapr,
195                                       uint32_t token, uint32_t nargs,
196                                       target_ulong args,
197                                       uint32_t nret, target_ulong rets)
198 {
199     uint64_t buid;
200     uint32_t val, size, addr;
201 
202     if ((nargs != 5) || (nret != 1)) {
203         rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
204         return;
205     }
206 
207     buid = ((uint64_t)rtas_ld(args, 1) << 32) | rtas_ld(args, 2);
208     val = rtas_ld(args, 4);
209     size = rtas_ld(args, 3);
210     addr = rtas_ld(args, 0);
211 
212     finish_write_pci_config(spapr, buid, addr, size, val, rets);
213 }
214 
215 static void rtas_write_pci_config(PowerPCCPU *cpu, sPAPRMachineState *spapr,
216                                   uint32_t token, uint32_t nargs,
217                                   target_ulong args,
218                                   uint32_t nret, target_ulong rets)
219 {
220     uint32_t val, size, addr;
221 
222     if ((nargs != 3) || (nret != 1)) {
223         rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
224         return;
225     }
226 
227 
228     val = rtas_ld(args, 2);
229     size = rtas_ld(args, 1);
230     addr = rtas_ld(args, 0);
231 
232     finish_write_pci_config(spapr, 0, addr, size, val, rets);
233 }
234 
235 /*
236  * Set MSI/MSIX message data.
237  * This is required for msi_notify()/msix_notify() which
238  * will write at the addresses via spapr_msi_write().
239  *
240  * If hwaddr == 0, all entries will have .data == first_irq i.e.
241  * table will be reset.
242  */
243 static void spapr_msi_setmsg(PCIDevice *pdev, hwaddr addr, bool msix,
244                              unsigned first_irq, unsigned req_num)
245 {
246     unsigned i;
247     MSIMessage msg = { .address = addr, .data = first_irq };
248 
249     if (!msix) {
250         msi_set_message(pdev, msg);
251         trace_spapr_pci_msi_setup(pdev->name, 0, msg.address);
252         return;
253     }
254 
255     for (i = 0; i < req_num; ++i) {
256         msix_set_message(pdev, i, msg);
257         trace_spapr_pci_msi_setup(pdev->name, i, msg.address);
258         if (addr) {
259             ++msg.data;
260         }
261     }
262 }
263 
264 static void rtas_ibm_change_msi(PowerPCCPU *cpu, sPAPRMachineState *spapr,
265                                 uint32_t token, uint32_t nargs,
266                                 target_ulong args, uint32_t nret,
267                                 target_ulong rets)
268 {
269     uint32_t config_addr = rtas_ld(args, 0);
270     uint64_t buid = ((uint64_t)rtas_ld(args, 1) << 32) | rtas_ld(args, 2);
271     unsigned int func = rtas_ld(args, 3);
272     unsigned int req_num = rtas_ld(args, 4); /* 0 == remove all */
273     unsigned int seq_num = rtas_ld(args, 5);
274     unsigned int ret_intr_type;
275     unsigned int irq, max_irqs = 0, num = 0;
276     sPAPRPHBState *phb = NULL;
277     PCIDevice *pdev = NULL;
278     spapr_pci_msi *msi;
279     int *config_addr_key;
280 
281     switch (func) {
282     case RTAS_CHANGE_MSI_FN:
283     case RTAS_CHANGE_FN:
284         ret_intr_type = RTAS_TYPE_MSI;
285         break;
286     case RTAS_CHANGE_MSIX_FN:
287         ret_intr_type = RTAS_TYPE_MSIX;
288         break;
289     default:
290         error_report("rtas_ibm_change_msi(%u) is not implemented", func);
291         rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
292         return;
293     }
294 
295     /* Fins sPAPRPHBState */
296     phb = spapr_pci_find_phb(spapr, buid);
297     if (phb) {
298         pdev = spapr_pci_find_dev(spapr, buid, config_addr);
299     }
300     if (!phb || !pdev) {
301         rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
302         return;
303     }
304 
305     /* Releasing MSIs */
306     if (!req_num) {
307         msi = (spapr_pci_msi *) g_hash_table_lookup(phb->msi, &config_addr);
308         if (!msi) {
309             trace_spapr_pci_msi("Releasing wrong config", config_addr);
310             rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
311             return;
312         }
313 
314         xics_free(spapr->icp, msi->first_irq, msi->num);
315         if (msi_present(pdev)) {
316             spapr_msi_setmsg(pdev, 0, false, 0, num);
317         }
318         if (msix_present(pdev)) {
319             spapr_msi_setmsg(pdev, 0, true, 0, num);
320         }
321         g_hash_table_remove(phb->msi, &config_addr);
322 
323         trace_spapr_pci_msi("Released MSIs", config_addr);
324         rtas_st(rets, 0, RTAS_OUT_SUCCESS);
325         rtas_st(rets, 1, 0);
326         return;
327     }
328 
329     /* Enabling MSI */
330 
331     /* Check if the device supports as many IRQs as requested */
332     if (ret_intr_type == RTAS_TYPE_MSI) {
333         max_irqs = msi_nr_vectors_allocated(pdev);
334     } else if (ret_intr_type == RTAS_TYPE_MSIX) {
335         max_irqs = pdev->msix_entries_nr;
336     }
337     if (!max_irqs) {
338         error_report("Requested interrupt type %d is not enabled for device %x",
339                      ret_intr_type, config_addr);
340         rtas_st(rets, 0, -1); /* Hardware error */
341         return;
342     }
343     /* Correct the number if the guest asked for too many */
344     if (req_num > max_irqs) {
345         trace_spapr_pci_msi_retry(config_addr, req_num, max_irqs);
346         req_num = max_irqs;
347         irq = 0; /* to avoid misleading trace */
348         goto out;
349     }
350 
351     /* Allocate MSIs */
352     irq = xics_alloc_block(spapr->icp, 0, req_num, false,
353                            ret_intr_type == RTAS_TYPE_MSI);
354     if (!irq) {
355         error_report("Cannot allocate MSIs for device %x", config_addr);
356         rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
357         return;
358     }
359 
360     /* Setup MSI/MSIX vectors in the device (via cfgspace or MSIX BAR) */
361     spapr_msi_setmsg(pdev, SPAPR_PCI_MSI_WINDOW, ret_intr_type == RTAS_TYPE_MSIX,
362                      irq, req_num);
363 
364     /* Add MSI device to cache */
365     msi = g_new(spapr_pci_msi, 1);
366     msi->first_irq = irq;
367     msi->num = req_num;
368     config_addr_key = g_new(int, 1);
369     *config_addr_key = config_addr;
370     g_hash_table_insert(phb->msi, config_addr_key, msi);
371 
372 out:
373     rtas_st(rets, 0, RTAS_OUT_SUCCESS);
374     rtas_st(rets, 1, req_num);
375     rtas_st(rets, 2, ++seq_num);
376     rtas_st(rets, 3, ret_intr_type);
377 
378     trace_spapr_pci_rtas_ibm_change_msi(config_addr, func, req_num, irq);
379 }
380 
381 static void rtas_ibm_query_interrupt_source_number(PowerPCCPU *cpu,
382                                                    sPAPRMachineState *spapr,
383                                                    uint32_t token,
384                                                    uint32_t nargs,
385                                                    target_ulong args,
386                                                    uint32_t nret,
387                                                    target_ulong rets)
388 {
389     uint32_t config_addr = rtas_ld(args, 0);
390     uint64_t buid = ((uint64_t)rtas_ld(args, 1) << 32) | rtas_ld(args, 2);
391     unsigned int intr_src_num = -1, ioa_intr_num = rtas_ld(args, 3);
392     sPAPRPHBState *phb = NULL;
393     PCIDevice *pdev = NULL;
394     spapr_pci_msi *msi;
395 
396     /* Find sPAPRPHBState */
397     phb = spapr_pci_find_phb(spapr, buid);
398     if (phb) {
399         pdev = spapr_pci_find_dev(spapr, buid, config_addr);
400     }
401     if (!phb || !pdev) {
402         rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
403         return;
404     }
405 
406     /* Find device descriptor and start IRQ */
407     msi = (spapr_pci_msi *) g_hash_table_lookup(phb->msi, &config_addr);
408     if (!msi || !msi->first_irq || !msi->num || (ioa_intr_num >= msi->num)) {
409         trace_spapr_pci_msi("Failed to return vector", config_addr);
410         rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
411         return;
412     }
413     intr_src_num = msi->first_irq + ioa_intr_num;
414     trace_spapr_pci_rtas_ibm_query_interrupt_source_number(ioa_intr_num,
415                                                            intr_src_num);
416 
417     rtas_st(rets, 0, RTAS_OUT_SUCCESS);
418     rtas_st(rets, 1, intr_src_num);
419     rtas_st(rets, 2, 1);/* 0 == level; 1 == edge */
420 }
421 
422 static void rtas_ibm_set_eeh_option(PowerPCCPU *cpu,
423                                     sPAPRMachineState *spapr,
424                                     uint32_t token, uint32_t nargs,
425                                     target_ulong args, uint32_t nret,
426                                     target_ulong rets)
427 {
428     sPAPRPHBState *sphb;
429     sPAPRPHBClass *spc;
430     uint32_t addr, option;
431     uint64_t buid;
432     int ret;
433 
434     if ((nargs != 4) || (nret != 1)) {
435         goto param_error_exit;
436     }
437 
438     buid = ((uint64_t)rtas_ld(args, 1) << 32) | rtas_ld(args, 2);
439     addr = rtas_ld(args, 0);
440     option = rtas_ld(args, 3);
441 
442     sphb = spapr_pci_find_phb(spapr, buid);
443     if (!sphb) {
444         goto param_error_exit;
445     }
446 
447     spc = SPAPR_PCI_HOST_BRIDGE_GET_CLASS(sphb);
448     if (!spc->eeh_set_option) {
449         goto param_error_exit;
450     }
451 
452     ret = spc->eeh_set_option(sphb, addr, option);
453     rtas_st(rets, 0, ret);
454     return;
455 
456 param_error_exit:
457     rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
458 }
459 
460 static void rtas_ibm_get_config_addr_info2(PowerPCCPU *cpu,
461                                            sPAPRMachineState *spapr,
462                                            uint32_t token, uint32_t nargs,
463                                            target_ulong args, uint32_t nret,
464                                            target_ulong rets)
465 {
466     sPAPRPHBState *sphb;
467     sPAPRPHBClass *spc;
468     PCIDevice *pdev;
469     uint32_t addr, option;
470     uint64_t buid;
471 
472     if ((nargs != 4) || (nret != 2)) {
473         goto param_error_exit;
474     }
475 
476     buid = ((uint64_t)rtas_ld(args, 1) << 32) | rtas_ld(args, 2);
477     sphb = spapr_pci_find_phb(spapr, buid);
478     if (!sphb) {
479         goto param_error_exit;
480     }
481 
482     spc = SPAPR_PCI_HOST_BRIDGE_GET_CLASS(sphb);
483     if (!spc->eeh_set_option) {
484         goto param_error_exit;
485     }
486 
487     /*
488      * We always have PE address of form "00BB0001". "BB"
489      * represents the bus number of PE's primary bus.
490      */
491     option = rtas_ld(args, 3);
492     switch (option) {
493     case RTAS_GET_PE_ADDR:
494         addr = rtas_ld(args, 0);
495         pdev = spapr_pci_find_dev(spapr, buid, addr);
496         if (!pdev) {
497             goto param_error_exit;
498         }
499 
500         rtas_st(rets, 1, (pci_bus_num(pdev->bus) << 16) + 1);
501         break;
502     case RTAS_GET_PE_MODE:
503         rtas_st(rets, 1, RTAS_PE_MODE_SHARED);
504         break;
505     default:
506         goto param_error_exit;
507     }
508 
509     rtas_st(rets, 0, RTAS_OUT_SUCCESS);
510     return;
511 
512 param_error_exit:
513     rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
514 }
515 
516 static void rtas_ibm_read_slot_reset_state2(PowerPCCPU *cpu,
517                                             sPAPRMachineState *spapr,
518                                             uint32_t token, uint32_t nargs,
519                                             target_ulong args, uint32_t nret,
520                                             target_ulong rets)
521 {
522     sPAPRPHBState *sphb;
523     sPAPRPHBClass *spc;
524     uint64_t buid;
525     int state, ret;
526 
527     if ((nargs != 3) || (nret != 4 && nret != 5)) {
528         goto param_error_exit;
529     }
530 
531     buid = ((uint64_t)rtas_ld(args, 1) << 32) | rtas_ld(args, 2);
532     sphb = spapr_pci_find_phb(spapr, buid);
533     if (!sphb) {
534         goto param_error_exit;
535     }
536 
537     spc = SPAPR_PCI_HOST_BRIDGE_GET_CLASS(sphb);
538     if (!spc->eeh_get_state) {
539         goto param_error_exit;
540     }
541 
542     ret = spc->eeh_get_state(sphb, &state);
543     rtas_st(rets, 0, ret);
544     if (ret != RTAS_OUT_SUCCESS) {
545         return;
546     }
547 
548     rtas_st(rets, 1, state);
549     rtas_st(rets, 2, RTAS_EEH_SUPPORT);
550     rtas_st(rets, 3, RTAS_EEH_PE_UNAVAIL_INFO);
551     if (nret >= 5) {
552         rtas_st(rets, 4, RTAS_EEH_PE_RECOVER_INFO);
553     }
554     return;
555 
556 param_error_exit:
557     rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
558 }
559 
560 static void rtas_ibm_set_slot_reset(PowerPCCPU *cpu,
561                                     sPAPRMachineState *spapr,
562                                     uint32_t token, uint32_t nargs,
563                                     target_ulong args, uint32_t nret,
564                                     target_ulong rets)
565 {
566     sPAPRPHBState *sphb;
567     sPAPRPHBClass *spc;
568     uint32_t option;
569     uint64_t buid;
570     int ret;
571 
572     if ((nargs != 4) || (nret != 1)) {
573         goto param_error_exit;
574     }
575 
576     buid = ((uint64_t)rtas_ld(args, 1) << 32) | rtas_ld(args, 2);
577     option = rtas_ld(args, 3);
578     sphb = spapr_pci_find_phb(spapr, buid);
579     if (!sphb) {
580         goto param_error_exit;
581     }
582 
583     spc = SPAPR_PCI_HOST_BRIDGE_GET_CLASS(sphb);
584     if (!spc->eeh_reset) {
585         goto param_error_exit;
586     }
587 
588     ret = spc->eeh_reset(sphb, option);
589     rtas_st(rets, 0, ret);
590     return;
591 
592 param_error_exit:
593     rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
594 }
595 
596 static void rtas_ibm_configure_pe(PowerPCCPU *cpu,
597                                   sPAPRMachineState *spapr,
598                                   uint32_t token, uint32_t nargs,
599                                   target_ulong args, uint32_t nret,
600                                   target_ulong rets)
601 {
602     sPAPRPHBState *sphb;
603     sPAPRPHBClass *spc;
604     uint64_t buid;
605     int ret;
606 
607     if ((nargs != 3) || (nret != 1)) {
608         goto param_error_exit;
609     }
610 
611     buid = ((uint64_t)rtas_ld(args, 1) << 32) | rtas_ld(args, 2);
612     sphb = spapr_pci_find_phb(spapr, buid);
613     if (!sphb) {
614         goto param_error_exit;
615     }
616 
617     spc = SPAPR_PCI_HOST_BRIDGE_GET_CLASS(sphb);
618     if (!spc->eeh_configure) {
619         goto param_error_exit;
620     }
621 
622     ret = spc->eeh_configure(sphb);
623     rtas_st(rets, 0, ret);
624     return;
625 
626 param_error_exit:
627     rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
628 }
629 
630 /* To support it later */
631 static void rtas_ibm_slot_error_detail(PowerPCCPU *cpu,
632                                        sPAPRMachineState *spapr,
633                                        uint32_t token, uint32_t nargs,
634                                        target_ulong args, uint32_t nret,
635                                        target_ulong rets)
636 {
637     sPAPRPHBState *sphb;
638     sPAPRPHBClass *spc;
639     int option;
640     uint64_t buid;
641 
642     if ((nargs != 8) || (nret != 1)) {
643         goto param_error_exit;
644     }
645 
646     buid = ((uint64_t)rtas_ld(args, 1) << 32) | rtas_ld(args, 2);
647     sphb = spapr_pci_find_phb(spapr, buid);
648     if (!sphb) {
649         goto param_error_exit;
650     }
651 
652     spc = SPAPR_PCI_HOST_BRIDGE_GET_CLASS(sphb);
653     if (!spc->eeh_set_option) {
654         goto param_error_exit;
655     }
656 
657     option = rtas_ld(args, 7);
658     switch (option) {
659     case RTAS_SLOT_TEMP_ERR_LOG:
660     case RTAS_SLOT_PERM_ERR_LOG:
661         break;
662     default:
663         goto param_error_exit;
664     }
665 
666     /* We don't have error log yet */
667     rtas_st(rets, 0, RTAS_OUT_NO_ERRORS_FOUND);
668     return;
669 
670 param_error_exit:
671     rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
672 }
673 
674 static int pci_spapr_swizzle(int slot, int pin)
675 {
676     return (slot + pin) % PCI_NUM_PINS;
677 }
678 
679 static int pci_spapr_map_irq(PCIDevice *pci_dev, int irq_num)
680 {
681     /*
682      * Here we need to convert pci_dev + irq_num to some unique value
683      * which is less than number of IRQs on the specific bus (4).  We
684      * use standard PCI swizzling, that is (slot number + pin number)
685      * % 4.
686      */
687     return pci_spapr_swizzle(PCI_SLOT(pci_dev->devfn), irq_num);
688 }
689 
690 static void pci_spapr_set_irq(void *opaque, int irq_num, int level)
691 {
692     /*
693      * Here we use the number returned by pci_spapr_map_irq to find a
694      * corresponding qemu_irq.
695      */
696     sPAPRPHBState *phb = opaque;
697 
698     trace_spapr_pci_lsi_set(phb->dtbusname, irq_num, phb->lsi_table[irq_num].irq);
699     qemu_set_irq(spapr_phb_lsi_qirq(phb, irq_num), level);
700 }
701 
702 static PCIINTxRoute spapr_route_intx_pin_to_irq(void *opaque, int pin)
703 {
704     sPAPRPHBState *sphb = SPAPR_PCI_HOST_BRIDGE(opaque);
705     PCIINTxRoute route;
706 
707     route.mode = PCI_INTX_ENABLED;
708     route.irq = sphb->lsi_table[pin].irq;
709 
710     return route;
711 }
712 
713 /*
714  * MSI/MSIX memory region implementation.
715  * The handler handles both MSI and MSIX.
716  * For MSI-X, the vector number is encoded as a part of the address,
717  * data is set to 0.
718  * For MSI, the vector number is encoded in least bits in data.
719  */
720 static void spapr_msi_write(void *opaque, hwaddr addr,
721                             uint64_t data, unsigned size)
722 {
723     sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
724     uint32_t irq = data;
725 
726     trace_spapr_pci_msi_write(addr, data, irq);
727 
728     qemu_irq_pulse(xics_get_qirq(spapr->icp, irq));
729 }
730 
731 static const MemoryRegionOps spapr_msi_ops = {
732     /* There is no .read as the read result is undefined by PCI spec */
733     .read = NULL,
734     .write = spapr_msi_write,
735     .endianness = DEVICE_LITTLE_ENDIAN
736 };
737 
738 /*
739  * PHB PCI device
740  */
741 static AddressSpace *spapr_pci_dma_iommu(PCIBus *bus, void *opaque, int devfn)
742 {
743     sPAPRPHBState *phb = opaque;
744 
745     return &phb->iommu_as;
746 }
747 
748 /* Macros to operate with address in OF binding to PCI */
749 #define b_x(x, p, l)    (((x) & ((1<<(l))-1)) << (p))
750 #define b_n(x)          b_x((x), 31, 1) /* 0 if relocatable */
751 #define b_p(x)          b_x((x), 30, 1) /* 1 if prefetchable */
752 #define b_t(x)          b_x((x), 29, 1) /* 1 if the address is aliased */
753 #define b_ss(x)         b_x((x), 24, 2) /* the space code */
754 #define b_bbbbbbbb(x)   b_x((x), 16, 8) /* bus number */
755 #define b_ddddd(x)      b_x((x), 11, 5) /* device number */
756 #define b_fff(x)        b_x((x), 8, 3)  /* function number */
757 #define b_rrrrrrrr(x)   b_x((x), 0, 8)  /* register number */
758 
759 /* for 'reg'/'assigned-addresses' OF properties */
760 #define RESOURCE_CELLS_SIZE 2
761 #define RESOURCE_CELLS_ADDRESS 3
762 
763 typedef struct ResourceFields {
764     uint32_t phys_hi;
765     uint32_t phys_mid;
766     uint32_t phys_lo;
767     uint32_t size_hi;
768     uint32_t size_lo;
769 } QEMU_PACKED ResourceFields;
770 
771 typedef struct ResourceProps {
772     ResourceFields reg[8];
773     ResourceFields assigned[7];
774     uint32_t reg_len;
775     uint32_t assigned_len;
776 } ResourceProps;
777 
778 /* fill in the 'reg'/'assigned-resources' OF properties for
779  * a PCI device. 'reg' describes resource requirements for a
780  * device's IO/MEM regions, 'assigned-addresses' describes the
781  * actual resource assignments.
782  *
783  * the properties are arrays of ('phys-addr', 'size') pairs describing
784  * the addressable regions of the PCI device, where 'phys-addr' is a
785  * RESOURCE_CELLS_ADDRESS-tuple of 32-bit integers corresponding to
786  * (phys.hi, phys.mid, phys.lo), and 'size' is a
787  * RESOURCE_CELLS_SIZE-tuple corresponding to (size.hi, size.lo).
788  *
789  * phys.hi = 0xYYXXXXZZ, where:
790  *   0xYY = npt000ss
791  *          |||   |
792  *          |||   +-- space code
793  *          |||               |
794  *          |||               +  00 if configuration space
795  *          |||               +  01 if IO region,
796  *          |||               +  10 if 32-bit MEM region
797  *          |||               +  11 if 64-bit MEM region
798  *          |||
799  *          ||+------ for non-relocatable IO: 1 if aliased
800  *          ||        for relocatable IO: 1 if below 64KB
801  *          ||        for MEM: 1 if below 1MB
802  *          |+------- 1 if region is prefetchable
803  *          +-------- 1 if region is non-relocatable
804  *   0xXXXX = bbbbbbbb dddddfff, encoding bus, slot, and function
805  *            bits respectively
806  *   0xZZ = rrrrrrrr, the register number of the BAR corresponding
807  *          to the region
808  *
809  * phys.mid and phys.lo correspond respectively to the hi/lo portions
810  * of the actual address of the region.
811  *
812  * how the phys-addr/size values are used differ slightly between
813  * 'reg' and 'assigned-addresses' properties. namely, 'reg' has
814  * an additional description for the config space region of the
815  * device, and in the case of QEMU has n=0 and phys.mid=phys.lo=0
816  * to describe the region as relocatable, with an address-mapping
817  * that corresponds directly to the PHB's address space for the
818  * resource. 'assigned-addresses' always has n=1 set with an absolute
819  * address assigned for the resource. in general, 'assigned-addresses'
820  * won't be populated, since addresses for PCI devices are generally
821  * unmapped initially and left to the guest to assign.
822  *
823  * note also that addresses defined in these properties are, at least
824  * for PAPR guests, relative to the PHBs IO/MEM windows, and
825  * correspond directly to the addresses in the BARs.
826  *
827  * in accordance with PCI Bus Binding to Open Firmware,
828  * IEEE Std 1275-1994, section 4.1.1, as implemented by PAPR+ v2.7,
829  * Appendix C.
830  */
831 static void populate_resource_props(PCIDevice *d, ResourceProps *rp)
832 {
833     int bus_num = pci_bus_num(PCI_BUS(qdev_get_parent_bus(DEVICE(d))));
834     uint32_t dev_id = (b_bbbbbbbb(bus_num) |
835                        b_ddddd(PCI_SLOT(d->devfn)) |
836                        b_fff(PCI_FUNC(d->devfn)));
837     ResourceFields *reg, *assigned;
838     int i, reg_idx = 0, assigned_idx = 0;
839 
840     /* config space region */
841     reg = &rp->reg[reg_idx++];
842     reg->phys_hi = cpu_to_be32(dev_id);
843     reg->phys_mid = 0;
844     reg->phys_lo = 0;
845     reg->size_hi = 0;
846     reg->size_lo = 0;
847 
848     for (i = 0; i < PCI_NUM_REGIONS; i++) {
849         if (!d->io_regions[i].size) {
850             continue;
851         }
852 
853         reg = &rp->reg[reg_idx++];
854 
855         reg->phys_hi = cpu_to_be32(dev_id | b_rrrrrrrr(pci_bar(d, i)));
856         if (d->io_regions[i].type & PCI_BASE_ADDRESS_SPACE_IO) {
857             reg->phys_hi |= cpu_to_be32(b_ss(1));
858         } else if (d->io_regions[i].type & PCI_BASE_ADDRESS_MEM_TYPE_64) {
859             reg->phys_hi |= cpu_to_be32(b_ss(3));
860         } else {
861             reg->phys_hi |= cpu_to_be32(b_ss(2));
862         }
863         reg->phys_mid = 0;
864         reg->phys_lo = 0;
865         reg->size_hi = cpu_to_be32(d->io_regions[i].size >> 32);
866         reg->size_lo = cpu_to_be32(d->io_regions[i].size);
867 
868         if (d->io_regions[i].addr == PCI_BAR_UNMAPPED) {
869             continue;
870         }
871 
872         assigned = &rp->assigned[assigned_idx++];
873         assigned->phys_hi = cpu_to_be32(reg->phys_hi | b_n(1));
874         assigned->phys_mid = cpu_to_be32(d->io_regions[i].addr >> 32);
875         assigned->phys_lo = cpu_to_be32(d->io_regions[i].addr);
876         assigned->size_hi = reg->size_hi;
877         assigned->size_lo = reg->size_lo;
878     }
879 
880     rp->reg_len = reg_idx * sizeof(ResourceFields);
881     rp->assigned_len = assigned_idx * sizeof(ResourceFields);
882 }
883 
884 static int spapr_populate_pci_child_dt(PCIDevice *dev, void *fdt, int offset,
885                                        int phb_index, int drc_index,
886                                        const char *drc_name)
887 {
888     ResourceProps rp;
889     bool is_bridge = false;
890     int pci_status;
891 
892     if (pci_default_read_config(dev, PCI_HEADER_TYPE, 1) ==
893         PCI_HEADER_TYPE_BRIDGE) {
894         is_bridge = true;
895     }
896 
897     /* in accordance with PAPR+ v2.7 13.6.3, Table 181 */
898     _FDT(fdt_setprop_cell(fdt, offset, "vendor-id",
899                           pci_default_read_config(dev, PCI_VENDOR_ID, 2)));
900     _FDT(fdt_setprop_cell(fdt, offset, "device-id",
901                           pci_default_read_config(dev, PCI_DEVICE_ID, 2)));
902     _FDT(fdt_setprop_cell(fdt, offset, "revision-id",
903                           pci_default_read_config(dev, PCI_REVISION_ID, 1)));
904     _FDT(fdt_setprop_cell(fdt, offset, "class-code",
905                           pci_default_read_config(dev, PCI_CLASS_PROG, 3)));
906     if (pci_default_read_config(dev, PCI_INTERRUPT_PIN, 1)) {
907         _FDT(fdt_setprop_cell(fdt, offset, "interrupts",
908                  pci_default_read_config(dev, PCI_INTERRUPT_PIN, 1)));
909     }
910 
911     if (!is_bridge) {
912         _FDT(fdt_setprop_cell(fdt, offset, "min-grant",
913             pci_default_read_config(dev, PCI_MIN_GNT, 1)));
914         _FDT(fdt_setprop_cell(fdt, offset, "max-latency",
915             pci_default_read_config(dev, PCI_MAX_LAT, 1)));
916     }
917 
918     if (pci_default_read_config(dev, PCI_SUBSYSTEM_ID, 2)) {
919         _FDT(fdt_setprop_cell(fdt, offset, "subsystem-id",
920                  pci_default_read_config(dev, PCI_SUBSYSTEM_ID, 2)));
921     }
922 
923     if (pci_default_read_config(dev, PCI_SUBSYSTEM_VENDOR_ID, 2)) {
924         _FDT(fdt_setprop_cell(fdt, offset, "subsystem-vendor-id",
925                  pci_default_read_config(dev, PCI_SUBSYSTEM_VENDOR_ID, 2)));
926     }
927 
928     _FDT(fdt_setprop_cell(fdt, offset, "cache-line-size",
929         pci_default_read_config(dev, PCI_CACHE_LINE_SIZE, 1)));
930 
931     /* the following fdt cells are masked off the pci status register */
932     pci_status = pci_default_read_config(dev, PCI_STATUS, 2);
933     _FDT(fdt_setprop_cell(fdt, offset, "devsel-speed",
934                           PCI_STATUS_DEVSEL_MASK & pci_status));
935 
936     if (pci_status & PCI_STATUS_FAST_BACK) {
937         _FDT(fdt_setprop(fdt, offset, "fast-back-to-back", NULL, 0));
938     }
939     if (pci_status & PCI_STATUS_66MHZ) {
940         _FDT(fdt_setprop(fdt, offset, "66mhz-capable", NULL, 0));
941     }
942     if (pci_status & PCI_STATUS_UDF) {
943         _FDT(fdt_setprop(fdt, offset, "udf-supported", NULL, 0));
944     }
945 
946     /* NOTE: this is normally generated by firmware via path/unit name,
947      * but in our case we must set it manually since it does not get
948      * processed by OF beforehand
949      */
950     _FDT(fdt_setprop_string(fdt, offset, "name", "pci"));
951     _FDT(fdt_setprop(fdt, offset, "ibm,loc-code", drc_name, strlen(drc_name)));
952     _FDT(fdt_setprop_cell(fdt, offset, "ibm,my-drc-index", drc_index));
953 
954     _FDT(fdt_setprop_cell(fdt, offset, "#address-cells",
955                           RESOURCE_CELLS_ADDRESS));
956     _FDT(fdt_setprop_cell(fdt, offset, "#size-cells",
957                           RESOURCE_CELLS_SIZE));
958     _FDT(fdt_setprop_cell(fdt, offset, "ibm,req#msi-x",
959                           RESOURCE_CELLS_SIZE));
960 
961     populate_resource_props(dev, &rp);
962     _FDT(fdt_setprop(fdt, offset, "reg", (uint8_t *)rp.reg, rp.reg_len));
963     _FDT(fdt_setprop(fdt, offset, "assigned-addresses",
964                      (uint8_t *)rp.assigned, rp.assigned_len));
965 
966     return 0;
967 }
968 
969 /* create OF node for pci device and required OF DT properties */
970 static void *spapr_create_pci_child_dt(sPAPRPHBState *phb, PCIDevice *dev,
971                                        int drc_index, const char *drc_name,
972                                        int *dt_offset)
973 {
974     void *fdt;
975     int offset, ret, fdt_size;
976     int slot = PCI_SLOT(dev->devfn);
977     int func = PCI_FUNC(dev->devfn);
978     char nodename[FDT_NAME_MAX];
979 
980     fdt = create_device_tree(&fdt_size);
981     if (func != 0) {
982         snprintf(nodename, FDT_NAME_MAX, "pci@%x,%x", slot, func);
983     } else {
984         snprintf(nodename, FDT_NAME_MAX, "pci@%x", slot);
985     }
986     offset = fdt_add_subnode(fdt, 0, nodename);
987     ret = spapr_populate_pci_child_dt(dev, fdt, offset, phb->index, drc_index,
988                                       drc_name);
989     g_assert(!ret);
990 
991     *dt_offset = offset;
992     return fdt;
993 }
994 
995 static void spapr_phb_add_pci_device(sPAPRDRConnector *drc,
996                                      sPAPRPHBState *phb,
997                                      PCIDevice *pdev,
998                                      Error **errp)
999 {
1000     sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
1001     DeviceState *dev = DEVICE(pdev);
1002     int drc_index = drck->get_index(drc);
1003     const char *drc_name = drck->get_name(drc);
1004     void *fdt = NULL;
1005     int fdt_start_offset = 0;
1006 
1007     /* boot-time devices get their device tree node created by SLOF, but for
1008      * hotplugged devices we need QEMU to generate it so the guest can fetch
1009      * it via RTAS
1010      */
1011     if (dev->hotplugged) {
1012         fdt = spapr_create_pci_child_dt(phb, pdev, drc_index, drc_name,
1013                                         &fdt_start_offset);
1014     }
1015 
1016     drck->attach(drc, DEVICE(pdev),
1017                  fdt, fdt_start_offset, !dev->hotplugged, errp);
1018     if (*errp) {
1019         g_free(fdt);
1020     }
1021 }
1022 
1023 static void spapr_phb_remove_pci_device_cb(DeviceState *dev, void *opaque)
1024 {
1025     /* some version guests do not wait for completion of a device
1026      * cleanup (generally done asynchronously by the kernel) before
1027      * signaling to QEMU that the device is safe, but instead sleep
1028      * for some 'safe' period of time. unfortunately on a busy host
1029      * this sleep isn't guaranteed to be long enough, resulting in
1030      * bad things like IRQ lines being left asserted during final
1031      * device removal. to deal with this we call reset just prior
1032      * to finalizing the device, which will put the device back into
1033      * an 'idle' state, as the device cleanup code expects.
1034      */
1035     pci_device_reset(PCI_DEVICE(dev));
1036     object_unparent(OBJECT(dev));
1037 }
1038 
1039 static void spapr_phb_remove_pci_device(sPAPRDRConnector *drc,
1040                                         sPAPRPHBState *phb,
1041                                         PCIDevice *pdev,
1042                                         Error **errp)
1043 {
1044     sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
1045 
1046     drck->detach(drc, DEVICE(pdev), spapr_phb_remove_pci_device_cb, phb, errp);
1047 }
1048 
1049 static sPAPRDRConnector *spapr_phb_get_pci_drc(sPAPRPHBState *phb,
1050                                                PCIDevice *pdev)
1051 {
1052     uint32_t busnr = pci_bus_num(PCI_BUS(qdev_get_parent_bus(DEVICE(pdev))));
1053     return spapr_dr_connector_by_id(SPAPR_DR_CONNECTOR_TYPE_PCI,
1054                                     (phb->index << 16) |
1055                                     (busnr << 8) |
1056                                     pdev->devfn);
1057 }
1058 
1059 static void spapr_phb_hot_plug_child(HotplugHandler *plug_handler,
1060                                      DeviceState *plugged_dev, Error **errp)
1061 {
1062     sPAPRPHBState *phb = SPAPR_PCI_HOST_BRIDGE(DEVICE(plug_handler));
1063     PCIDevice *pdev = PCI_DEVICE(plugged_dev);
1064     sPAPRDRConnector *drc = spapr_phb_get_pci_drc(phb, pdev);
1065     Error *local_err = NULL;
1066 
1067     /* if DR is disabled we don't need to do anything in the case of
1068      * hotplug or coldplug callbacks
1069      */
1070     if (!phb->dr_enabled) {
1071         /* if this is a hotplug operation initiated by the user
1072          * we need to let them know it's not enabled
1073          */
1074         if (plugged_dev->hotplugged) {
1075             error_setg(errp, QERR_BUS_NO_HOTPLUG,
1076                        object_get_typename(OBJECT(phb)));
1077         }
1078         return;
1079     }
1080 
1081     g_assert(drc);
1082 
1083     spapr_phb_add_pci_device(drc, phb, pdev, &local_err);
1084     if (local_err) {
1085         error_propagate(errp, local_err);
1086         return;
1087     }
1088     if (plugged_dev->hotplugged) {
1089         spapr_hotplug_req_add_event(drc);
1090     }
1091 }
1092 
1093 static void spapr_phb_hot_unplug_child(HotplugHandler *plug_handler,
1094                                        DeviceState *plugged_dev, Error **errp)
1095 {
1096     sPAPRPHBState *phb = SPAPR_PCI_HOST_BRIDGE(DEVICE(plug_handler));
1097     PCIDevice *pdev = PCI_DEVICE(plugged_dev);
1098     sPAPRDRConnectorClass *drck;
1099     sPAPRDRConnector *drc = spapr_phb_get_pci_drc(phb, pdev);
1100     Error *local_err = NULL;
1101 
1102     if (!phb->dr_enabled) {
1103         error_setg(errp, QERR_BUS_NO_HOTPLUG,
1104                    object_get_typename(OBJECT(phb)));
1105         return;
1106     }
1107 
1108     g_assert(drc);
1109 
1110     drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
1111     if (!drck->release_pending(drc)) {
1112         spapr_phb_remove_pci_device(drc, phb, pdev, &local_err);
1113         if (local_err) {
1114             error_propagate(errp, local_err);
1115             return;
1116         }
1117         spapr_hotplug_req_remove_event(drc);
1118     }
1119 }
1120 
1121 static void spapr_phb_realize(DeviceState *dev, Error **errp)
1122 {
1123     sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
1124     SysBusDevice *s = SYS_BUS_DEVICE(dev);
1125     sPAPRPHBState *sphb = SPAPR_PCI_HOST_BRIDGE(s);
1126     PCIHostState *phb = PCI_HOST_BRIDGE(s);
1127     sPAPRPHBClass *info = SPAPR_PCI_HOST_BRIDGE_GET_CLASS(s);
1128     char *namebuf;
1129     int i;
1130     PCIBus *bus;
1131     uint64_t msi_window_size = 4096;
1132 
1133     if (sphb->index != (uint32_t)-1) {
1134         hwaddr windows_base;
1135 
1136         if ((sphb->buid != (uint64_t)-1) || (sphb->dma_liobn != (uint32_t)-1)
1137             || (sphb->mem_win_addr != (hwaddr)-1)
1138             || (sphb->io_win_addr != (hwaddr)-1)) {
1139             error_setg(errp, "Either \"index\" or other parameters must"
1140                        " be specified for PAPR PHB, not both");
1141             return;
1142         }
1143 
1144         if (sphb->index > SPAPR_PCI_MAX_INDEX) {
1145             error_setg(errp, "\"index\" for PAPR PHB is too large (max %u)",
1146                        SPAPR_PCI_MAX_INDEX);
1147             return;
1148         }
1149 
1150         sphb->buid = SPAPR_PCI_BASE_BUID + sphb->index;
1151         sphb->dma_liobn = SPAPR_PCI_LIOBN(sphb->index, 0);
1152 
1153         windows_base = SPAPR_PCI_WINDOW_BASE
1154             + sphb->index * SPAPR_PCI_WINDOW_SPACING;
1155         sphb->mem_win_addr = windows_base + SPAPR_PCI_MMIO_WIN_OFF;
1156         sphb->io_win_addr = windows_base + SPAPR_PCI_IO_WIN_OFF;
1157     }
1158 
1159     if (sphb->buid == (uint64_t)-1) {
1160         error_setg(errp, "BUID not specified for PHB");
1161         return;
1162     }
1163 
1164     if (sphb->dma_liobn == (uint32_t)-1) {
1165         error_setg(errp, "LIOBN not specified for PHB");
1166         return;
1167     }
1168 
1169     if (sphb->mem_win_addr == (hwaddr)-1) {
1170         error_setg(errp, "Memory window address not specified for PHB");
1171         return;
1172     }
1173 
1174     if (sphb->io_win_addr == (hwaddr)-1) {
1175         error_setg(errp, "IO window address not specified for PHB");
1176         return;
1177     }
1178 
1179     if (spapr_pci_find_phb(spapr, sphb->buid)) {
1180         error_setg(errp, "PCI host bridges must have unique BUIDs");
1181         return;
1182     }
1183 
1184     sphb->dtbusname = g_strdup_printf("pci@%" PRIx64, sphb->buid);
1185 
1186     namebuf = alloca(strlen(sphb->dtbusname) + 32);
1187 
1188     /* Initialize memory regions */
1189     sprintf(namebuf, "%s.mmio", sphb->dtbusname);
1190     memory_region_init(&sphb->memspace, OBJECT(sphb), namebuf, UINT64_MAX);
1191 
1192     sprintf(namebuf, "%s.mmio-alias", sphb->dtbusname);
1193     memory_region_init_alias(&sphb->memwindow, OBJECT(sphb),
1194                              namebuf, &sphb->memspace,
1195                              SPAPR_PCI_MEM_WIN_BUS_OFFSET, sphb->mem_win_size);
1196     memory_region_add_subregion(get_system_memory(), sphb->mem_win_addr,
1197                                 &sphb->memwindow);
1198 
1199     /* Initialize IO regions */
1200     sprintf(namebuf, "%s.io", sphb->dtbusname);
1201     memory_region_init(&sphb->iospace, OBJECT(sphb),
1202                        namebuf, SPAPR_PCI_IO_WIN_SIZE);
1203 
1204     sprintf(namebuf, "%s.io-alias", sphb->dtbusname);
1205     memory_region_init_alias(&sphb->iowindow, OBJECT(sphb), namebuf,
1206                              &sphb->iospace, 0, SPAPR_PCI_IO_WIN_SIZE);
1207     memory_region_add_subregion(get_system_memory(), sphb->io_win_addr,
1208                                 &sphb->iowindow);
1209 
1210     bus = pci_register_bus(dev, NULL,
1211                            pci_spapr_set_irq, pci_spapr_map_irq, sphb,
1212                            &sphb->memspace, &sphb->iospace,
1213                            PCI_DEVFN(0, 0), PCI_NUM_PINS, TYPE_PCI_BUS);
1214     phb->bus = bus;
1215     qbus_set_hotplug_handler(BUS(phb->bus), DEVICE(sphb), NULL);
1216 
1217     /*
1218      * Initialize PHB address space.
1219      * By default there will be at least one subregion for default
1220      * 32bit DMA window.
1221      * Later the guest might want to create another DMA window
1222      * which will become another memory subregion.
1223      */
1224     sprintf(namebuf, "%s.iommu-root", sphb->dtbusname);
1225 
1226     memory_region_init(&sphb->iommu_root, OBJECT(sphb),
1227                        namebuf, UINT64_MAX);
1228     address_space_init(&sphb->iommu_as, &sphb->iommu_root,
1229                        sphb->dtbusname);
1230 
1231     /*
1232      * As MSI/MSIX interrupts trigger by writing at MSI/MSIX vectors,
1233      * we need to allocate some memory to catch those writes coming
1234      * from msi_notify()/msix_notify().
1235      * As MSIMessage:addr is going to be the same and MSIMessage:data
1236      * is going to be a VIRQ number, 4 bytes of the MSI MR will only
1237      * be used.
1238      *
1239      * For KVM we want to ensure that this memory is a full page so that
1240      * our memory slot is of page size granularity.
1241      */
1242 #ifdef CONFIG_KVM
1243     if (kvm_enabled()) {
1244         msi_window_size = getpagesize();
1245     }
1246 #endif
1247 
1248     memory_region_init_io(&sphb->msiwindow, NULL, &spapr_msi_ops, spapr,
1249                           "msi", msi_window_size);
1250     memory_region_add_subregion(&sphb->iommu_root, SPAPR_PCI_MSI_WINDOW,
1251                                 &sphb->msiwindow);
1252 
1253     pci_setup_iommu(bus, spapr_pci_dma_iommu, sphb);
1254 
1255     pci_bus_set_route_irq_fn(bus, spapr_route_intx_pin_to_irq);
1256 
1257     QLIST_INSERT_HEAD(&spapr->phbs, sphb, list);
1258 
1259     /* Initialize the LSI table */
1260     for (i = 0; i < PCI_NUM_PINS; i++) {
1261         uint32_t irq;
1262 
1263         irq = xics_alloc_block(spapr->icp, 0, 1, true, false);
1264         if (!irq) {
1265             error_setg(errp, "spapr_allocate_lsi failed");
1266             return;
1267         }
1268 
1269         sphb->lsi_table[i].irq = irq;
1270     }
1271 
1272     /* allocate connectors for child PCI devices */
1273     if (sphb->dr_enabled) {
1274         for (i = 0; i < PCI_SLOT_MAX * 8; i++) {
1275             spapr_dr_connector_new(OBJECT(phb),
1276                                    SPAPR_DR_CONNECTOR_TYPE_PCI,
1277                                    (sphb->index << 16) | i);
1278         }
1279     }
1280 
1281     if (!info->finish_realize) {
1282         error_setg(errp, "finish_realize not defined");
1283         return;
1284     }
1285 
1286     info->finish_realize(sphb, errp);
1287 
1288     sphb->msi = g_hash_table_new_full(g_int_hash, g_int_equal, g_free, g_free);
1289 }
1290 
1291 static void spapr_phb_finish_realize(sPAPRPHBState *sphb, Error **errp)
1292 {
1293     sPAPRTCETable *tcet;
1294     uint32_t nb_table;
1295 
1296     nb_table = SPAPR_PCI_DMA32_SIZE >> SPAPR_TCE_PAGE_SHIFT;
1297     tcet = spapr_tce_new_table(DEVICE(sphb), sphb->dma_liobn,
1298                                0, SPAPR_TCE_PAGE_SHIFT, nb_table, false);
1299     if (!tcet) {
1300         error_setg(errp, "Unable to create TCE table for %s",
1301                    sphb->dtbusname);
1302         return ;
1303     }
1304 
1305     /* Register default 32bit DMA window */
1306     memory_region_add_subregion(&sphb->iommu_root, 0,
1307                                 spapr_tce_get_iommu(tcet));
1308 }
1309 
1310 static int spapr_phb_children_reset(Object *child, void *opaque)
1311 {
1312     DeviceState *dev = (DeviceState *) object_dynamic_cast(child, TYPE_DEVICE);
1313 
1314     if (dev) {
1315         device_reset(dev);
1316     }
1317 
1318     return 0;
1319 }
1320 
1321 static void spapr_phb_reset(DeviceState *qdev)
1322 {
1323     /* Reset the IOMMU state */
1324     object_child_foreach(OBJECT(qdev), spapr_phb_children_reset, NULL);
1325 }
1326 
1327 static Property spapr_phb_properties[] = {
1328     DEFINE_PROP_UINT32("index", sPAPRPHBState, index, -1),
1329     DEFINE_PROP_UINT64("buid", sPAPRPHBState, buid, -1),
1330     DEFINE_PROP_UINT32("liobn", sPAPRPHBState, dma_liobn, -1),
1331     DEFINE_PROP_UINT64("mem_win_addr", sPAPRPHBState, mem_win_addr, -1),
1332     DEFINE_PROP_UINT64("mem_win_size", sPAPRPHBState, mem_win_size,
1333                        SPAPR_PCI_MMIO_WIN_SIZE),
1334     DEFINE_PROP_UINT64("io_win_addr", sPAPRPHBState, io_win_addr, -1),
1335     DEFINE_PROP_UINT64("io_win_size", sPAPRPHBState, io_win_size,
1336                        SPAPR_PCI_IO_WIN_SIZE),
1337     DEFINE_PROP_BOOL("dynamic-reconfiguration", sPAPRPHBState, dr_enabled,
1338                      true),
1339     DEFINE_PROP_END_OF_LIST(),
1340 };
1341 
1342 static const VMStateDescription vmstate_spapr_pci_lsi = {
1343     .name = "spapr_pci/lsi",
1344     .version_id = 1,
1345     .minimum_version_id = 1,
1346     .fields = (VMStateField[]) {
1347         VMSTATE_UINT32_EQUAL(irq, struct spapr_pci_lsi),
1348 
1349         VMSTATE_END_OF_LIST()
1350     },
1351 };
1352 
1353 static const VMStateDescription vmstate_spapr_pci_msi = {
1354     .name = "spapr_pci/msi",
1355     .version_id = 1,
1356     .minimum_version_id = 1,
1357     .fields = (VMStateField []) {
1358         VMSTATE_UINT32(key, spapr_pci_msi_mig),
1359         VMSTATE_UINT32(value.first_irq, spapr_pci_msi_mig),
1360         VMSTATE_UINT32(value.num, spapr_pci_msi_mig),
1361         VMSTATE_END_OF_LIST()
1362     },
1363 };
1364 
1365 static void spapr_pci_fill_msi_devs(gpointer key, gpointer value,
1366                                     gpointer opaque)
1367 {
1368     sPAPRPHBState *sphb = opaque;
1369 
1370     sphb->msi_devs[sphb->msi_devs_num].key = *(uint32_t *)key;
1371     sphb->msi_devs[sphb->msi_devs_num].value = *(spapr_pci_msi *)value;
1372     sphb->msi_devs_num++;
1373 }
1374 
1375 static void spapr_pci_pre_save(void *opaque)
1376 {
1377     sPAPRPHBState *sphb = opaque;
1378     int msi_devs_num;
1379 
1380     if (sphb->msi_devs) {
1381         g_free(sphb->msi_devs);
1382         sphb->msi_devs = NULL;
1383     }
1384     sphb->msi_devs_num = 0;
1385     msi_devs_num = g_hash_table_size(sphb->msi);
1386     if (!msi_devs_num) {
1387         return;
1388     }
1389     sphb->msi_devs = g_malloc(msi_devs_num * sizeof(spapr_pci_msi_mig));
1390 
1391     g_hash_table_foreach(sphb->msi, spapr_pci_fill_msi_devs, sphb);
1392     assert(sphb->msi_devs_num == msi_devs_num);
1393 }
1394 
1395 static int spapr_pci_post_load(void *opaque, int version_id)
1396 {
1397     sPAPRPHBState *sphb = opaque;
1398     gpointer key, value;
1399     int i;
1400 
1401     for (i = 0; i < sphb->msi_devs_num; ++i) {
1402         key = g_memdup(&sphb->msi_devs[i].key,
1403                        sizeof(sphb->msi_devs[i].key));
1404         value = g_memdup(&sphb->msi_devs[i].value,
1405                          sizeof(sphb->msi_devs[i].value));
1406         g_hash_table_insert(sphb->msi, key, value);
1407     }
1408     if (sphb->msi_devs) {
1409         g_free(sphb->msi_devs);
1410         sphb->msi_devs = NULL;
1411     }
1412     sphb->msi_devs_num = 0;
1413 
1414     return 0;
1415 }
1416 
1417 static const VMStateDescription vmstate_spapr_pci = {
1418     .name = "spapr_pci",
1419     .version_id = 2,
1420     .minimum_version_id = 2,
1421     .pre_save = spapr_pci_pre_save,
1422     .post_load = spapr_pci_post_load,
1423     .fields = (VMStateField[]) {
1424         VMSTATE_UINT64_EQUAL(buid, sPAPRPHBState),
1425         VMSTATE_UINT32_EQUAL(dma_liobn, sPAPRPHBState),
1426         VMSTATE_UINT64_EQUAL(mem_win_addr, sPAPRPHBState),
1427         VMSTATE_UINT64_EQUAL(mem_win_size, sPAPRPHBState),
1428         VMSTATE_UINT64_EQUAL(io_win_addr, sPAPRPHBState),
1429         VMSTATE_UINT64_EQUAL(io_win_size, sPAPRPHBState),
1430         VMSTATE_STRUCT_ARRAY(lsi_table, sPAPRPHBState, PCI_NUM_PINS, 0,
1431                              vmstate_spapr_pci_lsi, struct spapr_pci_lsi),
1432         VMSTATE_INT32(msi_devs_num, sPAPRPHBState),
1433         VMSTATE_STRUCT_VARRAY_ALLOC(msi_devs, sPAPRPHBState, msi_devs_num, 0,
1434                                     vmstate_spapr_pci_msi, spapr_pci_msi_mig),
1435         VMSTATE_END_OF_LIST()
1436     },
1437 };
1438 
1439 static const char *spapr_phb_root_bus_path(PCIHostState *host_bridge,
1440                                            PCIBus *rootbus)
1441 {
1442     sPAPRPHBState *sphb = SPAPR_PCI_HOST_BRIDGE(host_bridge);
1443 
1444     return sphb->dtbusname;
1445 }
1446 
1447 static void spapr_phb_class_init(ObjectClass *klass, void *data)
1448 {
1449     PCIHostBridgeClass *hc = PCI_HOST_BRIDGE_CLASS(klass);
1450     DeviceClass *dc = DEVICE_CLASS(klass);
1451     sPAPRPHBClass *spc = SPAPR_PCI_HOST_BRIDGE_CLASS(klass);
1452     HotplugHandlerClass *hp = HOTPLUG_HANDLER_CLASS(klass);
1453 
1454     hc->root_bus_path = spapr_phb_root_bus_path;
1455     dc->realize = spapr_phb_realize;
1456     dc->props = spapr_phb_properties;
1457     dc->reset = spapr_phb_reset;
1458     dc->vmsd = &vmstate_spapr_pci;
1459     set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
1460     dc->cannot_instantiate_with_device_add_yet = false;
1461     spc->finish_realize = spapr_phb_finish_realize;
1462     hp->plug = spapr_phb_hot_plug_child;
1463     hp->unplug = spapr_phb_hot_unplug_child;
1464 }
1465 
1466 static const TypeInfo spapr_phb_info = {
1467     .name          = TYPE_SPAPR_PCI_HOST_BRIDGE,
1468     .parent        = TYPE_PCI_HOST_BRIDGE,
1469     .instance_size = sizeof(sPAPRPHBState),
1470     .class_init    = spapr_phb_class_init,
1471     .class_size    = sizeof(sPAPRPHBClass),
1472     .interfaces    = (InterfaceInfo[]) {
1473         { TYPE_HOTPLUG_HANDLER },
1474         { }
1475     }
1476 };
1477 
1478 PCIHostState *spapr_create_phb(sPAPRMachineState *spapr, int index)
1479 {
1480     DeviceState *dev;
1481 
1482     dev = qdev_create(NULL, TYPE_SPAPR_PCI_HOST_BRIDGE);
1483     qdev_prop_set_uint32(dev, "index", index);
1484     qdev_init_nofail(dev);
1485 
1486     return PCI_HOST_BRIDGE(dev);
1487 }
1488 
1489 int spapr_populate_pci_dt(sPAPRPHBState *phb,
1490                           uint32_t xics_phandle,
1491                           void *fdt)
1492 {
1493     int bus_off, i, j, ret;
1494     char nodename[FDT_NAME_MAX];
1495     uint32_t bus_range[] = { cpu_to_be32(0), cpu_to_be32(0xff) };
1496     const uint64_t mmiosize = memory_region_size(&phb->memwindow);
1497     const uint64_t w32max = (1ULL << 32) - SPAPR_PCI_MEM_WIN_BUS_OFFSET;
1498     const uint64_t w32size = MIN(w32max, mmiosize);
1499     const uint64_t w64size = (mmiosize > w32size) ? (mmiosize - w32size) : 0;
1500     struct {
1501         uint32_t hi;
1502         uint64_t child;
1503         uint64_t parent;
1504         uint64_t size;
1505     } QEMU_PACKED ranges[] = {
1506         {
1507             cpu_to_be32(b_ss(1)), cpu_to_be64(0),
1508             cpu_to_be64(phb->io_win_addr),
1509             cpu_to_be64(memory_region_size(&phb->iospace)),
1510         },
1511         {
1512             cpu_to_be32(b_ss(2)), cpu_to_be64(SPAPR_PCI_MEM_WIN_BUS_OFFSET),
1513             cpu_to_be64(phb->mem_win_addr),
1514             cpu_to_be64(w32size),
1515         },
1516         {
1517             cpu_to_be32(b_ss(3)), cpu_to_be64(1ULL << 32),
1518             cpu_to_be64(phb->mem_win_addr + w32size),
1519             cpu_to_be64(w64size)
1520         },
1521     };
1522     const unsigned sizeof_ranges = (w64size ? 3 : 2) * sizeof(ranges[0]);
1523     uint64_t bus_reg[] = { cpu_to_be64(phb->buid), 0 };
1524     uint32_t interrupt_map_mask[] = {
1525         cpu_to_be32(b_ddddd(-1)|b_fff(0)), 0x0, 0x0, cpu_to_be32(-1)};
1526     uint32_t interrupt_map[PCI_SLOT_MAX * PCI_NUM_PINS][7];
1527     sPAPRTCETable *tcet;
1528 
1529     /* Start populating the FDT */
1530     snprintf(nodename, FDT_NAME_MAX, "pci@%" PRIx64, phb->buid);
1531     bus_off = fdt_add_subnode(fdt, 0, nodename);
1532     if (bus_off < 0) {
1533         return bus_off;
1534     }
1535 
1536     /* Write PHB properties */
1537     _FDT(fdt_setprop_string(fdt, bus_off, "device_type", "pci"));
1538     _FDT(fdt_setprop_string(fdt, bus_off, "compatible", "IBM,Logical_PHB"));
1539     _FDT(fdt_setprop_cell(fdt, bus_off, "#address-cells", 0x3));
1540     _FDT(fdt_setprop_cell(fdt, bus_off, "#size-cells", 0x2));
1541     _FDT(fdt_setprop_cell(fdt, bus_off, "#interrupt-cells", 0x1));
1542     _FDT(fdt_setprop(fdt, bus_off, "used-by-rtas", NULL, 0));
1543     _FDT(fdt_setprop(fdt, bus_off, "bus-range", &bus_range, sizeof(bus_range)));
1544     _FDT(fdt_setprop(fdt, bus_off, "ranges", &ranges, sizeof_ranges));
1545     _FDT(fdt_setprop(fdt, bus_off, "reg", &bus_reg, sizeof(bus_reg)));
1546     _FDT(fdt_setprop_cell(fdt, bus_off, "ibm,pci-config-space-type", 0x1));
1547     _FDT(fdt_setprop_cell(fdt, bus_off, "ibm,pe-total-#msi", XICS_IRQS));
1548 
1549     /* Build the interrupt-map, this must matches what is done
1550      * in pci_spapr_map_irq
1551      */
1552     _FDT(fdt_setprop(fdt, bus_off, "interrupt-map-mask",
1553                      &interrupt_map_mask, sizeof(interrupt_map_mask)));
1554     for (i = 0; i < PCI_SLOT_MAX; i++) {
1555         for (j = 0; j < PCI_NUM_PINS; j++) {
1556             uint32_t *irqmap = interrupt_map[i*PCI_NUM_PINS + j];
1557             int lsi_num = pci_spapr_swizzle(i, j);
1558 
1559             irqmap[0] = cpu_to_be32(b_ddddd(i)|b_fff(0));
1560             irqmap[1] = 0;
1561             irqmap[2] = 0;
1562             irqmap[3] = cpu_to_be32(j+1);
1563             irqmap[4] = cpu_to_be32(xics_phandle);
1564             irqmap[5] = cpu_to_be32(phb->lsi_table[lsi_num].irq);
1565             irqmap[6] = cpu_to_be32(0x8);
1566         }
1567     }
1568     /* Write interrupt map */
1569     _FDT(fdt_setprop(fdt, bus_off, "interrupt-map", &interrupt_map,
1570                      sizeof(interrupt_map)));
1571 
1572     tcet = spapr_tce_find_by_liobn(SPAPR_PCI_LIOBN(phb->index, 0));
1573     spapr_dma_dt(fdt, bus_off, "ibm,dma-window",
1574                  tcet->liobn, tcet->bus_offset,
1575                  tcet->nb_table << tcet->page_shift);
1576 
1577     ret = spapr_drc_populate_dt(fdt, bus_off, OBJECT(phb),
1578                                 SPAPR_DR_CONNECTOR_TYPE_PCI);
1579     if (ret) {
1580         return ret;
1581     }
1582 
1583     return 0;
1584 }
1585 
1586 void spapr_pci_rtas_init(void)
1587 {
1588     spapr_rtas_register(RTAS_READ_PCI_CONFIG, "read-pci-config",
1589                         rtas_read_pci_config);
1590     spapr_rtas_register(RTAS_WRITE_PCI_CONFIG, "write-pci-config",
1591                         rtas_write_pci_config);
1592     spapr_rtas_register(RTAS_IBM_READ_PCI_CONFIG, "ibm,read-pci-config",
1593                         rtas_ibm_read_pci_config);
1594     spapr_rtas_register(RTAS_IBM_WRITE_PCI_CONFIG, "ibm,write-pci-config",
1595                         rtas_ibm_write_pci_config);
1596     if (msi_supported) {
1597         spapr_rtas_register(RTAS_IBM_QUERY_INTERRUPT_SOURCE_NUMBER,
1598                             "ibm,query-interrupt-source-number",
1599                             rtas_ibm_query_interrupt_source_number);
1600         spapr_rtas_register(RTAS_IBM_CHANGE_MSI, "ibm,change-msi",
1601                             rtas_ibm_change_msi);
1602     }
1603 
1604     spapr_rtas_register(RTAS_IBM_SET_EEH_OPTION,
1605                         "ibm,set-eeh-option",
1606                         rtas_ibm_set_eeh_option);
1607     spapr_rtas_register(RTAS_IBM_GET_CONFIG_ADDR_INFO2,
1608                         "ibm,get-config-addr-info2",
1609                         rtas_ibm_get_config_addr_info2);
1610     spapr_rtas_register(RTAS_IBM_READ_SLOT_RESET_STATE2,
1611                         "ibm,read-slot-reset-state2",
1612                         rtas_ibm_read_slot_reset_state2);
1613     spapr_rtas_register(RTAS_IBM_SET_SLOT_RESET,
1614                         "ibm,set-slot-reset",
1615                         rtas_ibm_set_slot_reset);
1616     spapr_rtas_register(RTAS_IBM_CONFIGURE_PE,
1617                         "ibm,configure-pe",
1618                         rtas_ibm_configure_pe);
1619     spapr_rtas_register(RTAS_IBM_SLOT_ERROR_DETAIL,
1620                         "ibm,slot-error-detail",
1621                         rtas_ibm_slot_error_detail);
1622 }
1623 
1624 static void spapr_pci_register_types(void)
1625 {
1626     type_register_static(&spapr_phb_info);
1627 }
1628 
1629 type_init(spapr_pci_register_types)
1630 
1631 static int spapr_switch_one_vga(DeviceState *dev, void *opaque)
1632 {
1633     bool be = *(bool *)opaque;
1634 
1635     if (object_dynamic_cast(OBJECT(dev), "VGA")
1636         || object_dynamic_cast(OBJECT(dev), "secondary-vga")) {
1637         object_property_set_bool(OBJECT(dev), be, "big-endian-framebuffer",
1638                                  &error_abort);
1639     }
1640     return 0;
1641 }
1642 
1643 void spapr_pci_switch_vga(bool big_endian)
1644 {
1645     sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
1646     sPAPRPHBState *sphb;
1647 
1648     /*
1649      * For backward compatibility with existing guests, we switch
1650      * the endianness of the VGA controller when changing the guest
1651      * interrupt mode
1652      */
1653     QLIST_FOREACH(sphb, &spapr->phbs, list) {
1654         BusState *bus = &PCI_HOST_BRIDGE(sphb)->bus->qbus;
1655         qbus_walk_children(bus, spapr_switch_one_vga, NULL, NULL, NULL,
1656                            &big_endian);
1657     }
1658 }
1659