xref: /qemu/hw/ppc/spapr_pci.c (revision 72187935b475454792512d44782a33f112b120e6)
1 /*
2  * QEMU sPAPR PCI host originated from Uninorth PCI host
3  *
4  * Copyright (c) 2011 Alexey Kardashevskiy, IBM Corporation.
5  * Copyright (C) 2011 David Gibson, IBM Corporation.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a copy
8  * of this software and associated documentation files (the "Software"), to deal
9  * in the Software without restriction, including without limitation the rights
10  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11  * copies of the Software, and to permit persons to whom the Software is
12  * furnished to do so, subject to the following conditions:
13  *
14  * The above copyright notice and this permission notice shall be included in
15  * all copies or substantial portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23  * THE SOFTWARE.
24  */
25 #include "hw/hw.h"
26 #include "hw/pci/pci.h"
27 #include "hw/pci/msi.h"
28 #include "hw/pci/msix.h"
29 #include "hw/pci/pci_host.h"
30 #include "hw/ppc/spapr.h"
31 #include "hw/pci-host/spapr.h"
32 #include "exec/address-spaces.h"
33 #include <libfdt.h>
34 #include "trace.h"
35 #include "qemu/error-report.h"
36 #include "qapi/qmp/qerror.h"
37 
38 #include "hw/pci/pci_bus.h"
39 #include "hw/ppc/spapr_drc.h"
40 #include "sysemu/device_tree.h"
41 
42 /* Copied from the kernel arch/powerpc/platforms/pseries/msi.c */
43 #define RTAS_QUERY_FN           0
44 #define RTAS_CHANGE_FN          1
45 #define RTAS_RESET_FN           2
46 #define RTAS_CHANGE_MSI_FN      3
47 #define RTAS_CHANGE_MSIX_FN     4
48 
49 /* Interrupt types to return on RTAS_CHANGE_* */
50 #define RTAS_TYPE_MSI           1
51 #define RTAS_TYPE_MSIX          2
52 
53 #define _FDT(exp) \
54     do { \
55         int ret = (exp);                                           \
56         if (ret < 0) {                                             \
57             return ret;                                            \
58         }                                                          \
59     } while (0)
60 
61 sPAPRPHBState *spapr_pci_find_phb(sPAPRMachineState *spapr, uint64_t buid)
62 {
63     sPAPRPHBState *sphb;
64 
65     QLIST_FOREACH(sphb, &spapr->phbs, list) {
66         if (sphb->buid != buid) {
67             continue;
68         }
69         return sphb;
70     }
71 
72     return NULL;
73 }
74 
75 PCIDevice *spapr_pci_find_dev(sPAPRMachineState *spapr, uint64_t buid,
76                               uint32_t config_addr)
77 {
78     sPAPRPHBState *sphb = spapr_pci_find_phb(spapr, buid);
79     PCIHostState *phb = PCI_HOST_BRIDGE(sphb);
80     int bus_num = (config_addr >> 16) & 0xFF;
81     int devfn = (config_addr >> 8) & 0xFF;
82 
83     if (!phb) {
84         return NULL;
85     }
86 
87     return pci_find_device(phb->bus, bus_num, devfn);
88 }
89 
90 static uint32_t rtas_pci_cfgaddr(uint32_t arg)
91 {
92     /* This handles the encoding of extended config space addresses */
93     return ((arg >> 20) & 0xf00) | (arg & 0xff);
94 }
95 
96 static void finish_read_pci_config(sPAPRMachineState *spapr, uint64_t buid,
97                                    uint32_t addr, uint32_t size,
98                                    target_ulong rets)
99 {
100     PCIDevice *pci_dev;
101     uint32_t val;
102 
103     if ((size != 1) && (size != 2) && (size != 4)) {
104         /* access must be 1, 2 or 4 bytes */
105         rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
106         return;
107     }
108 
109     pci_dev = spapr_pci_find_dev(spapr, buid, addr);
110     addr = rtas_pci_cfgaddr(addr);
111 
112     if (!pci_dev || (addr % size) || (addr >= pci_config_size(pci_dev))) {
113         /* Access must be to a valid device, within bounds and
114          * naturally aligned */
115         rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
116         return;
117     }
118 
119     val = pci_host_config_read_common(pci_dev, addr,
120                                       pci_config_size(pci_dev), size);
121 
122     rtas_st(rets, 0, RTAS_OUT_SUCCESS);
123     rtas_st(rets, 1, val);
124 }
125 
126 static void rtas_ibm_read_pci_config(PowerPCCPU *cpu, sPAPRMachineState *spapr,
127                                      uint32_t token, uint32_t nargs,
128                                      target_ulong args,
129                                      uint32_t nret, target_ulong rets)
130 {
131     uint64_t buid;
132     uint32_t size, addr;
133 
134     if ((nargs != 4) || (nret != 2)) {
135         rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
136         return;
137     }
138 
139     buid = ((uint64_t)rtas_ld(args, 1) << 32) | rtas_ld(args, 2);
140     size = rtas_ld(args, 3);
141     addr = rtas_ld(args, 0);
142 
143     finish_read_pci_config(spapr, buid, addr, size, rets);
144 }
145 
146 static void rtas_read_pci_config(PowerPCCPU *cpu, sPAPRMachineState *spapr,
147                                  uint32_t token, uint32_t nargs,
148                                  target_ulong args,
149                                  uint32_t nret, target_ulong rets)
150 {
151     uint32_t size, addr;
152 
153     if ((nargs != 2) || (nret != 2)) {
154         rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
155         return;
156     }
157 
158     size = rtas_ld(args, 1);
159     addr = rtas_ld(args, 0);
160 
161     finish_read_pci_config(spapr, 0, addr, size, rets);
162 }
163 
164 static void finish_write_pci_config(sPAPRMachineState *spapr, uint64_t buid,
165                                     uint32_t addr, uint32_t size,
166                                     uint32_t val, target_ulong rets)
167 {
168     PCIDevice *pci_dev;
169 
170     if ((size != 1) && (size != 2) && (size != 4)) {
171         /* access must be 1, 2 or 4 bytes */
172         rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
173         return;
174     }
175 
176     pci_dev = spapr_pci_find_dev(spapr, buid, addr);
177     addr = rtas_pci_cfgaddr(addr);
178 
179     if (!pci_dev || (addr % size) || (addr >= pci_config_size(pci_dev))) {
180         /* Access must be to a valid device, within bounds and
181          * naturally aligned */
182         rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
183         return;
184     }
185 
186     pci_host_config_write_common(pci_dev, addr, pci_config_size(pci_dev),
187                                  val, size);
188 
189     rtas_st(rets, 0, RTAS_OUT_SUCCESS);
190 }
191 
192 static void rtas_ibm_write_pci_config(PowerPCCPU *cpu, sPAPRMachineState *spapr,
193                                       uint32_t token, uint32_t nargs,
194                                       target_ulong args,
195                                       uint32_t nret, target_ulong rets)
196 {
197     uint64_t buid;
198     uint32_t val, size, addr;
199 
200     if ((nargs != 5) || (nret != 1)) {
201         rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
202         return;
203     }
204 
205     buid = ((uint64_t)rtas_ld(args, 1) << 32) | rtas_ld(args, 2);
206     val = rtas_ld(args, 4);
207     size = rtas_ld(args, 3);
208     addr = rtas_ld(args, 0);
209 
210     finish_write_pci_config(spapr, buid, addr, size, val, rets);
211 }
212 
213 static void rtas_write_pci_config(PowerPCCPU *cpu, sPAPRMachineState *spapr,
214                                   uint32_t token, uint32_t nargs,
215                                   target_ulong args,
216                                   uint32_t nret, target_ulong rets)
217 {
218     uint32_t val, size, addr;
219 
220     if ((nargs != 3) || (nret != 1)) {
221         rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
222         return;
223     }
224 
225 
226     val = rtas_ld(args, 2);
227     size = rtas_ld(args, 1);
228     addr = rtas_ld(args, 0);
229 
230     finish_write_pci_config(spapr, 0, addr, size, val, rets);
231 }
232 
233 /*
234  * Set MSI/MSIX message data.
235  * This is required for msi_notify()/msix_notify() which
236  * will write at the addresses via spapr_msi_write().
237  *
238  * If hwaddr == 0, all entries will have .data == first_irq i.e.
239  * table will be reset.
240  */
241 static void spapr_msi_setmsg(PCIDevice *pdev, hwaddr addr, bool msix,
242                              unsigned first_irq, unsigned req_num)
243 {
244     unsigned i;
245     MSIMessage msg = { .address = addr, .data = first_irq };
246 
247     if (!msix) {
248         msi_set_message(pdev, msg);
249         trace_spapr_pci_msi_setup(pdev->name, 0, msg.address);
250         return;
251     }
252 
253     for (i = 0; i < req_num; ++i) {
254         msix_set_message(pdev, i, msg);
255         trace_spapr_pci_msi_setup(pdev->name, i, msg.address);
256         if (addr) {
257             ++msg.data;
258         }
259     }
260 }
261 
262 static void rtas_ibm_change_msi(PowerPCCPU *cpu, sPAPRMachineState *spapr,
263                                 uint32_t token, uint32_t nargs,
264                                 target_ulong args, uint32_t nret,
265                                 target_ulong rets)
266 {
267     uint32_t config_addr = rtas_ld(args, 0);
268     uint64_t buid = ((uint64_t)rtas_ld(args, 1) << 32) | rtas_ld(args, 2);
269     unsigned int func = rtas_ld(args, 3);
270     unsigned int req_num = rtas_ld(args, 4); /* 0 == remove all */
271     unsigned int seq_num = rtas_ld(args, 5);
272     unsigned int ret_intr_type;
273     unsigned int irq, max_irqs = 0, num = 0;
274     sPAPRPHBState *phb = NULL;
275     PCIDevice *pdev = NULL;
276     spapr_pci_msi *msi;
277     int *config_addr_key;
278 
279     switch (func) {
280     case RTAS_CHANGE_MSI_FN:
281     case RTAS_CHANGE_FN:
282         ret_intr_type = RTAS_TYPE_MSI;
283         break;
284     case RTAS_CHANGE_MSIX_FN:
285         ret_intr_type = RTAS_TYPE_MSIX;
286         break;
287     default:
288         error_report("rtas_ibm_change_msi(%u) is not implemented", func);
289         rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
290         return;
291     }
292 
293     /* Fins sPAPRPHBState */
294     phb = spapr_pci_find_phb(spapr, buid);
295     if (phb) {
296         pdev = spapr_pci_find_dev(spapr, buid, config_addr);
297     }
298     if (!phb || !pdev) {
299         rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
300         return;
301     }
302 
303     /* Releasing MSIs */
304     if (!req_num) {
305         msi = (spapr_pci_msi *) g_hash_table_lookup(phb->msi, &config_addr);
306         if (!msi) {
307             trace_spapr_pci_msi("Releasing wrong config", config_addr);
308             rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
309             return;
310         }
311 
312         xics_free(spapr->icp, msi->first_irq, msi->num);
313         if (msi_present(pdev)) {
314             spapr_msi_setmsg(pdev, 0, false, 0, num);
315         }
316         if (msix_present(pdev)) {
317             spapr_msi_setmsg(pdev, 0, true, 0, num);
318         }
319         g_hash_table_remove(phb->msi, &config_addr);
320 
321         trace_spapr_pci_msi("Released MSIs", config_addr);
322         rtas_st(rets, 0, RTAS_OUT_SUCCESS);
323         rtas_st(rets, 1, 0);
324         return;
325     }
326 
327     /* Enabling MSI */
328 
329     /* Check if the device supports as many IRQs as requested */
330     if (ret_intr_type == RTAS_TYPE_MSI) {
331         max_irqs = msi_nr_vectors_allocated(pdev);
332     } else if (ret_intr_type == RTAS_TYPE_MSIX) {
333         max_irqs = pdev->msix_entries_nr;
334     }
335     if (!max_irqs) {
336         error_report("Requested interrupt type %d is not enabled for device %x",
337                      ret_intr_type, config_addr);
338         rtas_st(rets, 0, -1); /* Hardware error */
339         return;
340     }
341     /* Correct the number if the guest asked for too many */
342     if (req_num > max_irqs) {
343         trace_spapr_pci_msi_retry(config_addr, req_num, max_irqs);
344         req_num = max_irqs;
345         irq = 0; /* to avoid misleading trace */
346         goto out;
347     }
348 
349     /* Allocate MSIs */
350     irq = xics_alloc_block(spapr->icp, 0, req_num, false,
351                            ret_intr_type == RTAS_TYPE_MSI);
352     if (!irq) {
353         error_report("Cannot allocate MSIs for device %x", config_addr);
354         rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
355         return;
356     }
357 
358     /* Setup MSI/MSIX vectors in the device (via cfgspace or MSIX BAR) */
359     spapr_msi_setmsg(pdev, SPAPR_PCI_MSI_WINDOW, ret_intr_type == RTAS_TYPE_MSIX,
360                      irq, req_num);
361 
362     /* Add MSI device to cache */
363     msi = g_new(spapr_pci_msi, 1);
364     msi->first_irq = irq;
365     msi->num = req_num;
366     config_addr_key = g_new(int, 1);
367     *config_addr_key = config_addr;
368     g_hash_table_insert(phb->msi, config_addr_key, msi);
369 
370 out:
371     rtas_st(rets, 0, RTAS_OUT_SUCCESS);
372     rtas_st(rets, 1, req_num);
373     rtas_st(rets, 2, ++seq_num);
374     rtas_st(rets, 3, ret_intr_type);
375 
376     trace_spapr_pci_rtas_ibm_change_msi(config_addr, func, req_num, irq);
377 }
378 
379 static void rtas_ibm_query_interrupt_source_number(PowerPCCPU *cpu,
380                                                    sPAPRMachineState *spapr,
381                                                    uint32_t token,
382                                                    uint32_t nargs,
383                                                    target_ulong args,
384                                                    uint32_t nret,
385                                                    target_ulong rets)
386 {
387     uint32_t config_addr = rtas_ld(args, 0);
388     uint64_t buid = ((uint64_t)rtas_ld(args, 1) << 32) | rtas_ld(args, 2);
389     unsigned int intr_src_num = -1, ioa_intr_num = rtas_ld(args, 3);
390     sPAPRPHBState *phb = NULL;
391     PCIDevice *pdev = NULL;
392     spapr_pci_msi *msi;
393 
394     /* Find sPAPRPHBState */
395     phb = spapr_pci_find_phb(spapr, buid);
396     if (phb) {
397         pdev = spapr_pci_find_dev(spapr, buid, config_addr);
398     }
399     if (!phb || !pdev) {
400         rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
401         return;
402     }
403 
404     /* Find device descriptor and start IRQ */
405     msi = (spapr_pci_msi *) g_hash_table_lookup(phb->msi, &config_addr);
406     if (!msi || !msi->first_irq || !msi->num || (ioa_intr_num >= msi->num)) {
407         trace_spapr_pci_msi("Failed to return vector", config_addr);
408         rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
409         return;
410     }
411     intr_src_num = msi->first_irq + ioa_intr_num;
412     trace_spapr_pci_rtas_ibm_query_interrupt_source_number(ioa_intr_num,
413                                                            intr_src_num);
414 
415     rtas_st(rets, 0, RTAS_OUT_SUCCESS);
416     rtas_st(rets, 1, intr_src_num);
417     rtas_st(rets, 2, 1);/* 0 == level; 1 == edge */
418 }
419 
420 static void rtas_ibm_set_eeh_option(PowerPCCPU *cpu,
421                                     sPAPRMachineState *spapr,
422                                     uint32_t token, uint32_t nargs,
423                                     target_ulong args, uint32_t nret,
424                                     target_ulong rets)
425 {
426     sPAPRPHBState *sphb;
427     sPAPRPHBClass *spc;
428     uint32_t addr, option;
429     uint64_t buid;
430     int ret;
431 
432     if ((nargs != 4) || (nret != 1)) {
433         goto param_error_exit;
434     }
435 
436     buid = ((uint64_t)rtas_ld(args, 1) << 32) | rtas_ld(args, 2);
437     addr = rtas_ld(args, 0);
438     option = rtas_ld(args, 3);
439 
440     sphb = spapr_pci_find_phb(spapr, buid);
441     if (!sphb) {
442         goto param_error_exit;
443     }
444 
445     spc = SPAPR_PCI_HOST_BRIDGE_GET_CLASS(sphb);
446     if (!spc->eeh_set_option) {
447         goto param_error_exit;
448     }
449 
450     ret = spc->eeh_set_option(sphb, addr, option);
451     rtas_st(rets, 0, ret);
452     return;
453 
454 param_error_exit:
455     rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
456 }
457 
458 static void rtas_ibm_get_config_addr_info2(PowerPCCPU *cpu,
459                                            sPAPRMachineState *spapr,
460                                            uint32_t token, uint32_t nargs,
461                                            target_ulong args, uint32_t nret,
462                                            target_ulong rets)
463 {
464     sPAPRPHBState *sphb;
465     sPAPRPHBClass *spc;
466     PCIDevice *pdev;
467     uint32_t addr, option;
468     uint64_t buid;
469 
470     if ((nargs != 4) || (nret != 2)) {
471         goto param_error_exit;
472     }
473 
474     buid = ((uint64_t)rtas_ld(args, 1) << 32) | rtas_ld(args, 2);
475     sphb = spapr_pci_find_phb(spapr, buid);
476     if (!sphb) {
477         goto param_error_exit;
478     }
479 
480     spc = SPAPR_PCI_HOST_BRIDGE_GET_CLASS(sphb);
481     if (!spc->eeh_set_option) {
482         goto param_error_exit;
483     }
484 
485     /*
486      * We always have PE address of form "00BB0001". "BB"
487      * represents the bus number of PE's primary bus.
488      */
489     option = rtas_ld(args, 3);
490     switch (option) {
491     case RTAS_GET_PE_ADDR:
492         addr = rtas_ld(args, 0);
493         pdev = spapr_pci_find_dev(spapr, buid, addr);
494         if (!pdev) {
495             goto param_error_exit;
496         }
497 
498         rtas_st(rets, 1, (pci_bus_num(pdev->bus) << 16) + 1);
499         break;
500     case RTAS_GET_PE_MODE:
501         rtas_st(rets, 1, RTAS_PE_MODE_SHARED);
502         break;
503     default:
504         goto param_error_exit;
505     }
506 
507     rtas_st(rets, 0, RTAS_OUT_SUCCESS);
508     return;
509 
510 param_error_exit:
511     rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
512 }
513 
514 static void rtas_ibm_read_slot_reset_state2(PowerPCCPU *cpu,
515                                             sPAPRMachineState *spapr,
516                                             uint32_t token, uint32_t nargs,
517                                             target_ulong args, uint32_t nret,
518                                             target_ulong rets)
519 {
520     sPAPRPHBState *sphb;
521     sPAPRPHBClass *spc;
522     uint64_t buid;
523     int state, ret;
524 
525     if ((nargs != 3) || (nret != 4 && nret != 5)) {
526         goto param_error_exit;
527     }
528 
529     buid = ((uint64_t)rtas_ld(args, 1) << 32) | rtas_ld(args, 2);
530     sphb = spapr_pci_find_phb(spapr, buid);
531     if (!sphb) {
532         goto param_error_exit;
533     }
534 
535     spc = SPAPR_PCI_HOST_BRIDGE_GET_CLASS(sphb);
536     if (!spc->eeh_get_state) {
537         goto param_error_exit;
538     }
539 
540     ret = spc->eeh_get_state(sphb, &state);
541     rtas_st(rets, 0, ret);
542     if (ret != RTAS_OUT_SUCCESS) {
543         return;
544     }
545 
546     rtas_st(rets, 1, state);
547     rtas_st(rets, 2, RTAS_EEH_SUPPORT);
548     rtas_st(rets, 3, RTAS_EEH_PE_UNAVAIL_INFO);
549     if (nret >= 5) {
550         rtas_st(rets, 4, RTAS_EEH_PE_RECOVER_INFO);
551     }
552     return;
553 
554 param_error_exit:
555     rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
556 }
557 
558 static void rtas_ibm_set_slot_reset(PowerPCCPU *cpu,
559                                     sPAPRMachineState *spapr,
560                                     uint32_t token, uint32_t nargs,
561                                     target_ulong args, uint32_t nret,
562                                     target_ulong rets)
563 {
564     sPAPRPHBState *sphb;
565     sPAPRPHBClass *spc;
566     uint32_t option;
567     uint64_t buid;
568     int ret;
569 
570     if ((nargs != 4) || (nret != 1)) {
571         goto param_error_exit;
572     }
573 
574     buid = ((uint64_t)rtas_ld(args, 1) << 32) | rtas_ld(args, 2);
575     option = rtas_ld(args, 3);
576     sphb = spapr_pci_find_phb(spapr, buid);
577     if (!sphb) {
578         goto param_error_exit;
579     }
580 
581     spc = SPAPR_PCI_HOST_BRIDGE_GET_CLASS(sphb);
582     if (!spc->eeh_reset) {
583         goto param_error_exit;
584     }
585 
586     ret = spc->eeh_reset(sphb, option);
587     rtas_st(rets, 0, ret);
588     return;
589 
590 param_error_exit:
591     rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
592 }
593 
594 static void rtas_ibm_configure_pe(PowerPCCPU *cpu,
595                                   sPAPRMachineState *spapr,
596                                   uint32_t token, uint32_t nargs,
597                                   target_ulong args, uint32_t nret,
598                                   target_ulong rets)
599 {
600     sPAPRPHBState *sphb;
601     sPAPRPHBClass *spc;
602     uint64_t buid;
603     int ret;
604 
605     if ((nargs != 3) || (nret != 1)) {
606         goto param_error_exit;
607     }
608 
609     buid = ((uint64_t)rtas_ld(args, 1) << 32) | rtas_ld(args, 2);
610     sphb = spapr_pci_find_phb(spapr, buid);
611     if (!sphb) {
612         goto param_error_exit;
613     }
614 
615     spc = SPAPR_PCI_HOST_BRIDGE_GET_CLASS(sphb);
616     if (!spc->eeh_configure) {
617         goto param_error_exit;
618     }
619 
620     ret = spc->eeh_configure(sphb);
621     rtas_st(rets, 0, ret);
622     return;
623 
624 param_error_exit:
625     rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
626 }
627 
628 /* To support it later */
629 static void rtas_ibm_slot_error_detail(PowerPCCPU *cpu,
630                                        sPAPRMachineState *spapr,
631                                        uint32_t token, uint32_t nargs,
632                                        target_ulong args, uint32_t nret,
633                                        target_ulong rets)
634 {
635     sPAPRPHBState *sphb;
636     sPAPRPHBClass *spc;
637     int option;
638     uint64_t buid;
639 
640     if ((nargs != 8) || (nret != 1)) {
641         goto param_error_exit;
642     }
643 
644     buid = ((uint64_t)rtas_ld(args, 1) << 32) | rtas_ld(args, 2);
645     sphb = spapr_pci_find_phb(spapr, buid);
646     if (!sphb) {
647         goto param_error_exit;
648     }
649 
650     spc = SPAPR_PCI_HOST_BRIDGE_GET_CLASS(sphb);
651     if (!spc->eeh_set_option) {
652         goto param_error_exit;
653     }
654 
655     option = rtas_ld(args, 7);
656     switch (option) {
657     case RTAS_SLOT_TEMP_ERR_LOG:
658     case RTAS_SLOT_PERM_ERR_LOG:
659         break;
660     default:
661         goto param_error_exit;
662     }
663 
664     /* We don't have error log yet */
665     rtas_st(rets, 0, RTAS_OUT_NO_ERRORS_FOUND);
666     return;
667 
668 param_error_exit:
669     rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
670 }
671 
672 static int pci_spapr_swizzle(int slot, int pin)
673 {
674     return (slot + pin) % PCI_NUM_PINS;
675 }
676 
677 static int pci_spapr_map_irq(PCIDevice *pci_dev, int irq_num)
678 {
679     /*
680      * Here we need to convert pci_dev + irq_num to some unique value
681      * which is less than number of IRQs on the specific bus (4).  We
682      * use standard PCI swizzling, that is (slot number + pin number)
683      * % 4.
684      */
685     return pci_spapr_swizzle(PCI_SLOT(pci_dev->devfn), irq_num);
686 }
687 
688 static void pci_spapr_set_irq(void *opaque, int irq_num, int level)
689 {
690     /*
691      * Here we use the number returned by pci_spapr_map_irq to find a
692      * corresponding qemu_irq.
693      */
694     sPAPRPHBState *phb = opaque;
695 
696     trace_spapr_pci_lsi_set(phb->dtbusname, irq_num, phb->lsi_table[irq_num].irq);
697     qemu_set_irq(spapr_phb_lsi_qirq(phb, irq_num), level);
698 }
699 
700 static PCIINTxRoute spapr_route_intx_pin_to_irq(void *opaque, int pin)
701 {
702     sPAPRPHBState *sphb = SPAPR_PCI_HOST_BRIDGE(opaque);
703     PCIINTxRoute route;
704 
705     route.mode = PCI_INTX_ENABLED;
706     route.irq = sphb->lsi_table[pin].irq;
707 
708     return route;
709 }
710 
711 /*
712  * MSI/MSIX memory region implementation.
713  * The handler handles both MSI and MSIX.
714  * For MSI-X, the vector number is encoded as a part of the address,
715  * data is set to 0.
716  * For MSI, the vector number is encoded in least bits in data.
717  */
718 static void spapr_msi_write(void *opaque, hwaddr addr,
719                             uint64_t data, unsigned size)
720 {
721     sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
722     uint32_t irq = data;
723 
724     trace_spapr_pci_msi_write(addr, data, irq);
725 
726     qemu_irq_pulse(xics_get_qirq(spapr->icp, irq));
727 }
728 
729 static const MemoryRegionOps spapr_msi_ops = {
730     /* There is no .read as the read result is undefined by PCI spec */
731     .read = NULL,
732     .write = spapr_msi_write,
733     .endianness = DEVICE_LITTLE_ENDIAN
734 };
735 
736 /*
737  * PHB PCI device
738  */
739 static AddressSpace *spapr_pci_dma_iommu(PCIBus *bus, void *opaque, int devfn)
740 {
741     sPAPRPHBState *phb = opaque;
742 
743     return &phb->iommu_as;
744 }
745 
746 /* Macros to operate with address in OF binding to PCI */
747 #define b_x(x, p, l)    (((x) & ((1<<(l))-1)) << (p))
748 #define b_n(x)          b_x((x), 31, 1) /* 0 if relocatable */
749 #define b_p(x)          b_x((x), 30, 1) /* 1 if prefetchable */
750 #define b_t(x)          b_x((x), 29, 1) /* 1 if the address is aliased */
751 #define b_ss(x)         b_x((x), 24, 2) /* the space code */
752 #define b_bbbbbbbb(x)   b_x((x), 16, 8) /* bus number */
753 #define b_ddddd(x)      b_x((x), 11, 5) /* device number */
754 #define b_fff(x)        b_x((x), 8, 3)  /* function number */
755 #define b_rrrrrrrr(x)   b_x((x), 0, 8)  /* register number */
756 
757 /* for 'reg'/'assigned-addresses' OF properties */
758 #define RESOURCE_CELLS_SIZE 2
759 #define RESOURCE_CELLS_ADDRESS 3
760 
761 typedef struct ResourceFields {
762     uint32_t phys_hi;
763     uint32_t phys_mid;
764     uint32_t phys_lo;
765     uint32_t size_hi;
766     uint32_t size_lo;
767 } QEMU_PACKED ResourceFields;
768 
769 typedef struct ResourceProps {
770     ResourceFields reg[8];
771     ResourceFields assigned[7];
772     uint32_t reg_len;
773     uint32_t assigned_len;
774 } ResourceProps;
775 
776 /* fill in the 'reg'/'assigned-resources' OF properties for
777  * a PCI device. 'reg' describes resource requirements for a
778  * device's IO/MEM regions, 'assigned-addresses' describes the
779  * actual resource assignments.
780  *
781  * the properties are arrays of ('phys-addr', 'size') pairs describing
782  * the addressable regions of the PCI device, where 'phys-addr' is a
783  * RESOURCE_CELLS_ADDRESS-tuple of 32-bit integers corresponding to
784  * (phys.hi, phys.mid, phys.lo), and 'size' is a
785  * RESOURCE_CELLS_SIZE-tuple corresponding to (size.hi, size.lo).
786  *
787  * phys.hi = 0xYYXXXXZZ, where:
788  *   0xYY = npt000ss
789  *          |||   |
790  *          |||   +-- space code
791  *          |||               |
792  *          |||               +  00 if configuration space
793  *          |||               +  01 if IO region,
794  *          |||               +  10 if 32-bit MEM region
795  *          |||               +  11 if 64-bit MEM region
796  *          |||
797  *          ||+------ for non-relocatable IO: 1 if aliased
798  *          ||        for relocatable IO: 1 if below 64KB
799  *          ||        for MEM: 1 if below 1MB
800  *          |+------- 1 if region is prefetchable
801  *          +-------- 1 if region is non-relocatable
802  *   0xXXXX = bbbbbbbb dddddfff, encoding bus, slot, and function
803  *            bits respectively
804  *   0xZZ = rrrrrrrr, the register number of the BAR corresponding
805  *          to the region
806  *
807  * phys.mid and phys.lo correspond respectively to the hi/lo portions
808  * of the actual address of the region.
809  *
810  * how the phys-addr/size values are used differ slightly between
811  * 'reg' and 'assigned-addresses' properties. namely, 'reg' has
812  * an additional description for the config space region of the
813  * device, and in the case of QEMU has n=0 and phys.mid=phys.lo=0
814  * to describe the region as relocatable, with an address-mapping
815  * that corresponds directly to the PHB's address space for the
816  * resource. 'assigned-addresses' always has n=1 set with an absolute
817  * address assigned for the resource. in general, 'assigned-addresses'
818  * won't be populated, since addresses for PCI devices are generally
819  * unmapped initially and left to the guest to assign.
820  *
821  * note also that addresses defined in these properties are, at least
822  * for PAPR guests, relative to the PHBs IO/MEM windows, and
823  * correspond directly to the addresses in the BARs.
824  *
825  * in accordance with PCI Bus Binding to Open Firmware,
826  * IEEE Std 1275-1994, section 4.1.1, as implemented by PAPR+ v2.7,
827  * Appendix C.
828  */
829 static void populate_resource_props(PCIDevice *d, ResourceProps *rp)
830 {
831     int bus_num = pci_bus_num(PCI_BUS(qdev_get_parent_bus(DEVICE(d))));
832     uint32_t dev_id = (b_bbbbbbbb(bus_num) |
833                        b_ddddd(PCI_SLOT(d->devfn)) |
834                        b_fff(PCI_FUNC(d->devfn)));
835     ResourceFields *reg, *assigned;
836     int i, reg_idx = 0, assigned_idx = 0;
837 
838     /* config space region */
839     reg = &rp->reg[reg_idx++];
840     reg->phys_hi = cpu_to_be32(dev_id);
841     reg->phys_mid = 0;
842     reg->phys_lo = 0;
843     reg->size_hi = 0;
844     reg->size_lo = 0;
845 
846     for (i = 0; i < PCI_NUM_REGIONS; i++) {
847         if (!d->io_regions[i].size) {
848             continue;
849         }
850 
851         reg = &rp->reg[reg_idx++];
852 
853         reg->phys_hi = cpu_to_be32(dev_id | b_rrrrrrrr(pci_bar(d, i)));
854         if (d->io_regions[i].type & PCI_BASE_ADDRESS_SPACE_IO) {
855             reg->phys_hi |= cpu_to_be32(b_ss(1));
856         } else if (d->io_regions[i].type & PCI_BASE_ADDRESS_MEM_TYPE_64) {
857             reg->phys_hi |= cpu_to_be32(b_ss(3));
858         } else {
859             reg->phys_hi |= cpu_to_be32(b_ss(2));
860         }
861         reg->phys_mid = 0;
862         reg->phys_lo = 0;
863         reg->size_hi = cpu_to_be32(d->io_regions[i].size >> 32);
864         reg->size_lo = cpu_to_be32(d->io_regions[i].size);
865 
866         if (d->io_regions[i].addr == PCI_BAR_UNMAPPED) {
867             continue;
868         }
869 
870         assigned = &rp->assigned[assigned_idx++];
871         assigned->phys_hi = cpu_to_be32(reg->phys_hi | b_n(1));
872         assigned->phys_mid = cpu_to_be32(d->io_regions[i].addr >> 32);
873         assigned->phys_lo = cpu_to_be32(d->io_regions[i].addr);
874         assigned->size_hi = reg->size_hi;
875         assigned->size_lo = reg->size_lo;
876     }
877 
878     rp->reg_len = reg_idx * sizeof(ResourceFields);
879     rp->assigned_len = assigned_idx * sizeof(ResourceFields);
880 }
881 
882 static int spapr_populate_pci_child_dt(PCIDevice *dev, void *fdt, int offset,
883                                        int phb_index, int drc_index,
884                                        const char *drc_name)
885 {
886     ResourceProps rp;
887     bool is_bridge = false;
888     int pci_status;
889 
890     if (pci_default_read_config(dev, PCI_HEADER_TYPE, 1) ==
891         PCI_HEADER_TYPE_BRIDGE) {
892         is_bridge = true;
893     }
894 
895     /* in accordance with PAPR+ v2.7 13.6.3, Table 181 */
896     _FDT(fdt_setprop_cell(fdt, offset, "vendor-id",
897                           pci_default_read_config(dev, PCI_VENDOR_ID, 2)));
898     _FDT(fdt_setprop_cell(fdt, offset, "device-id",
899                           pci_default_read_config(dev, PCI_DEVICE_ID, 2)));
900     _FDT(fdt_setprop_cell(fdt, offset, "revision-id",
901                           pci_default_read_config(dev, PCI_REVISION_ID, 1)));
902     _FDT(fdt_setprop_cell(fdt, offset, "class-code",
903                           pci_default_read_config(dev, PCI_CLASS_DEVICE, 2)
904                             << 8));
905     if (pci_default_read_config(dev, PCI_INTERRUPT_PIN, 1)) {
906         _FDT(fdt_setprop_cell(fdt, offset, "interrupts",
907                  pci_default_read_config(dev, PCI_INTERRUPT_PIN, 1)));
908     }
909 
910     if (!is_bridge) {
911         _FDT(fdt_setprop_cell(fdt, offset, "min-grant",
912             pci_default_read_config(dev, PCI_MIN_GNT, 1)));
913         _FDT(fdt_setprop_cell(fdt, offset, "max-latency",
914             pci_default_read_config(dev, PCI_MAX_LAT, 1)));
915     }
916 
917     if (pci_default_read_config(dev, PCI_SUBSYSTEM_ID, 2)) {
918         _FDT(fdt_setprop_cell(fdt, offset, "subsystem-id",
919                  pci_default_read_config(dev, PCI_SUBSYSTEM_ID, 2)));
920     }
921 
922     if (pci_default_read_config(dev, PCI_SUBSYSTEM_VENDOR_ID, 2)) {
923         _FDT(fdt_setprop_cell(fdt, offset, "subsystem-vendor-id",
924                  pci_default_read_config(dev, PCI_SUBSYSTEM_VENDOR_ID, 2)));
925     }
926 
927     _FDT(fdt_setprop_cell(fdt, offset, "cache-line-size",
928         pci_default_read_config(dev, PCI_CACHE_LINE_SIZE, 1)));
929 
930     /* the following fdt cells are masked off the pci status register */
931     pci_status = pci_default_read_config(dev, PCI_STATUS, 2);
932     _FDT(fdt_setprop_cell(fdt, offset, "devsel-speed",
933                           PCI_STATUS_DEVSEL_MASK & pci_status));
934 
935     if (pci_status & PCI_STATUS_FAST_BACK) {
936         _FDT(fdt_setprop(fdt, offset, "fast-back-to-back", NULL, 0));
937     }
938     if (pci_status & PCI_STATUS_66MHZ) {
939         _FDT(fdt_setprop(fdt, offset, "66mhz-capable", NULL, 0));
940     }
941     if (pci_status & PCI_STATUS_UDF) {
942         _FDT(fdt_setprop(fdt, offset, "udf-supported", NULL, 0));
943     }
944 
945     /* NOTE: this is normally generated by firmware via path/unit name,
946      * but in our case we must set it manually since it does not get
947      * processed by OF beforehand
948      */
949     _FDT(fdt_setprop_string(fdt, offset, "name", "pci"));
950     _FDT(fdt_setprop(fdt, offset, "ibm,loc-code", drc_name, strlen(drc_name)));
951     _FDT(fdt_setprop_cell(fdt, offset, "ibm,my-drc-index", drc_index));
952 
953     _FDT(fdt_setprop_cell(fdt, offset, "#address-cells",
954                           RESOURCE_CELLS_ADDRESS));
955     _FDT(fdt_setprop_cell(fdt, offset, "#size-cells",
956                           RESOURCE_CELLS_SIZE));
957     _FDT(fdt_setprop_cell(fdt, offset, "ibm,req#msi-x",
958                           RESOURCE_CELLS_SIZE));
959 
960     populate_resource_props(dev, &rp);
961     _FDT(fdt_setprop(fdt, offset, "reg", (uint8_t *)rp.reg, rp.reg_len));
962     _FDT(fdt_setprop(fdt, offset, "assigned-addresses",
963                      (uint8_t *)rp.assigned, rp.assigned_len));
964 
965     return 0;
966 }
967 
968 /* create OF node for pci device and required OF DT properties */
969 static void *spapr_create_pci_child_dt(sPAPRPHBState *phb, PCIDevice *dev,
970                                        int drc_index, const char *drc_name,
971                                        int *dt_offset)
972 {
973     void *fdt;
974     int offset, ret, fdt_size;
975     int slot = PCI_SLOT(dev->devfn);
976     int func = PCI_FUNC(dev->devfn);
977     char nodename[512];
978 
979     fdt = create_device_tree(&fdt_size);
980     if (func != 0) {
981         sprintf(nodename, "pci@%d,%d", slot, func);
982     } else {
983         sprintf(nodename, "pci@%d", slot);
984     }
985     offset = fdt_add_subnode(fdt, 0, nodename);
986     ret = spapr_populate_pci_child_dt(dev, fdt, offset, phb->index, drc_index,
987                                       drc_name);
988     g_assert(!ret);
989 
990     *dt_offset = offset;
991     return fdt;
992 }
993 
994 static void spapr_phb_add_pci_device(sPAPRDRConnector *drc,
995                                      sPAPRPHBState *phb,
996                                      PCIDevice *pdev,
997                                      Error **errp)
998 {
999     sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
1000     DeviceState *dev = DEVICE(pdev);
1001     int drc_index = drck->get_index(drc);
1002     const char *drc_name = drck->get_name(drc);
1003     void *fdt = NULL;
1004     int fdt_start_offset = 0;
1005 
1006     /* boot-time devices get their device tree node created by SLOF, but for
1007      * hotplugged devices we need QEMU to generate it so the guest can fetch
1008      * it via RTAS
1009      */
1010     if (dev->hotplugged) {
1011         fdt = spapr_create_pci_child_dt(phb, pdev, drc_index, drc_name,
1012                                         &fdt_start_offset);
1013     }
1014 
1015     drck->attach(drc, DEVICE(pdev),
1016                  fdt, fdt_start_offset, !dev->hotplugged, errp);
1017     if (*errp) {
1018         g_free(fdt);
1019     }
1020 }
1021 
1022 static void spapr_phb_remove_pci_device_cb(DeviceState *dev, void *opaque)
1023 {
1024     /* some version guests do not wait for completion of a device
1025      * cleanup (generally done asynchronously by the kernel) before
1026      * signaling to QEMU that the device is safe, but instead sleep
1027      * for some 'safe' period of time. unfortunately on a busy host
1028      * this sleep isn't guaranteed to be long enough, resulting in
1029      * bad things like IRQ lines being left asserted during final
1030      * device removal. to deal with this we call reset just prior
1031      * to finalizing the device, which will put the device back into
1032      * an 'idle' state, as the device cleanup code expects.
1033      */
1034     pci_device_reset(PCI_DEVICE(dev));
1035     object_unparent(OBJECT(dev));
1036 }
1037 
1038 static void spapr_phb_remove_pci_device(sPAPRDRConnector *drc,
1039                                         sPAPRPHBState *phb,
1040                                         PCIDevice *pdev,
1041                                         Error **errp)
1042 {
1043     sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
1044 
1045     drck->detach(drc, DEVICE(pdev), spapr_phb_remove_pci_device_cb, phb, errp);
1046 }
1047 
1048 static sPAPRDRConnector *spapr_phb_get_pci_drc(sPAPRPHBState *phb,
1049                                                PCIDevice *pdev)
1050 {
1051     uint32_t busnr = pci_bus_num(PCI_BUS(qdev_get_parent_bus(DEVICE(pdev))));
1052     return spapr_dr_connector_by_id(SPAPR_DR_CONNECTOR_TYPE_PCI,
1053                                     (phb->index << 16) |
1054                                     (busnr << 8) |
1055                                     pdev->devfn);
1056 }
1057 
1058 static void spapr_phb_hot_plug_child(HotplugHandler *plug_handler,
1059                                      DeviceState *plugged_dev, Error **errp)
1060 {
1061     sPAPRPHBState *phb = SPAPR_PCI_HOST_BRIDGE(DEVICE(plug_handler));
1062     PCIDevice *pdev = PCI_DEVICE(plugged_dev);
1063     sPAPRDRConnector *drc = spapr_phb_get_pci_drc(phb, pdev);
1064     Error *local_err = NULL;
1065 
1066     /* if DR is disabled we don't need to do anything in the case of
1067      * hotplug or coldplug callbacks
1068      */
1069     if (!phb->dr_enabled) {
1070         /* if this is a hotplug operation initiated by the user
1071          * we need to let them know it's not enabled
1072          */
1073         if (plugged_dev->hotplugged) {
1074             error_setg(errp, QERR_BUS_NO_HOTPLUG,
1075                        object_get_typename(OBJECT(phb)));
1076         }
1077         return;
1078     }
1079 
1080     g_assert(drc);
1081 
1082     spapr_phb_add_pci_device(drc, phb, pdev, &local_err);
1083     if (local_err) {
1084         error_propagate(errp, local_err);
1085         return;
1086     }
1087     if (plugged_dev->hotplugged) {
1088         spapr_hotplug_req_add_event(drc);
1089     }
1090 }
1091 
1092 static void spapr_phb_hot_unplug_child(HotplugHandler *plug_handler,
1093                                        DeviceState *plugged_dev, Error **errp)
1094 {
1095     sPAPRPHBState *phb = SPAPR_PCI_HOST_BRIDGE(DEVICE(plug_handler));
1096     PCIDevice *pdev = PCI_DEVICE(plugged_dev);
1097     sPAPRDRConnectorClass *drck;
1098     sPAPRDRConnector *drc = spapr_phb_get_pci_drc(phb, pdev);
1099     Error *local_err = NULL;
1100 
1101     if (!phb->dr_enabled) {
1102         error_setg(errp, QERR_BUS_NO_HOTPLUG,
1103                    object_get_typename(OBJECT(phb)));
1104         return;
1105     }
1106 
1107     g_assert(drc);
1108 
1109     drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
1110     if (!drck->release_pending(drc)) {
1111         spapr_phb_remove_pci_device(drc, phb, pdev, &local_err);
1112         if (local_err) {
1113             error_propagate(errp, local_err);
1114             return;
1115         }
1116         spapr_hotplug_req_remove_event(drc);
1117     }
1118 }
1119 
1120 static void spapr_phb_realize(DeviceState *dev, Error **errp)
1121 {
1122     sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
1123     SysBusDevice *s = SYS_BUS_DEVICE(dev);
1124     sPAPRPHBState *sphb = SPAPR_PCI_HOST_BRIDGE(s);
1125     PCIHostState *phb = PCI_HOST_BRIDGE(s);
1126     sPAPRPHBClass *info = SPAPR_PCI_HOST_BRIDGE_GET_CLASS(s);
1127     char *namebuf;
1128     int i;
1129     PCIBus *bus;
1130     uint64_t msi_window_size = 4096;
1131 
1132     if (sphb->index != (uint32_t)-1) {
1133         hwaddr windows_base;
1134 
1135         if ((sphb->buid != (uint64_t)-1) || (sphb->dma_liobn != (uint32_t)-1)
1136             || (sphb->mem_win_addr != (hwaddr)-1)
1137             || (sphb->io_win_addr != (hwaddr)-1)) {
1138             error_setg(errp, "Either \"index\" or other parameters must"
1139                        " be specified for PAPR PHB, not both");
1140             return;
1141         }
1142 
1143         if (sphb->index > SPAPR_PCI_MAX_INDEX) {
1144             error_setg(errp, "\"index\" for PAPR PHB is too large (max %u)",
1145                        SPAPR_PCI_MAX_INDEX);
1146             return;
1147         }
1148 
1149         sphb->buid = SPAPR_PCI_BASE_BUID + sphb->index;
1150         sphb->dma_liobn = SPAPR_PCI_LIOBN(sphb->index, 0);
1151 
1152         windows_base = SPAPR_PCI_WINDOW_BASE
1153             + sphb->index * SPAPR_PCI_WINDOW_SPACING;
1154         sphb->mem_win_addr = windows_base + SPAPR_PCI_MMIO_WIN_OFF;
1155         sphb->io_win_addr = windows_base + SPAPR_PCI_IO_WIN_OFF;
1156     }
1157 
1158     if (sphb->buid == (uint64_t)-1) {
1159         error_setg(errp, "BUID not specified for PHB");
1160         return;
1161     }
1162 
1163     if (sphb->dma_liobn == (uint32_t)-1) {
1164         error_setg(errp, "LIOBN not specified for PHB");
1165         return;
1166     }
1167 
1168     if (sphb->mem_win_addr == (hwaddr)-1) {
1169         error_setg(errp, "Memory window address not specified for PHB");
1170         return;
1171     }
1172 
1173     if (sphb->io_win_addr == (hwaddr)-1) {
1174         error_setg(errp, "IO window address not specified for PHB");
1175         return;
1176     }
1177 
1178     if (spapr_pci_find_phb(spapr, sphb->buid)) {
1179         error_setg(errp, "PCI host bridges must have unique BUIDs");
1180         return;
1181     }
1182 
1183     sphb->dtbusname = g_strdup_printf("pci@%" PRIx64, sphb->buid);
1184 
1185     namebuf = alloca(strlen(sphb->dtbusname) + 32);
1186 
1187     /* Initialize memory regions */
1188     sprintf(namebuf, "%s.mmio", sphb->dtbusname);
1189     memory_region_init(&sphb->memspace, OBJECT(sphb), namebuf, UINT64_MAX);
1190 
1191     sprintf(namebuf, "%s.mmio-alias", sphb->dtbusname);
1192     memory_region_init_alias(&sphb->memwindow, OBJECT(sphb),
1193                              namebuf, &sphb->memspace,
1194                              SPAPR_PCI_MEM_WIN_BUS_OFFSET, sphb->mem_win_size);
1195     memory_region_add_subregion(get_system_memory(), sphb->mem_win_addr,
1196                                 &sphb->memwindow);
1197 
1198     /* Initialize IO regions */
1199     sprintf(namebuf, "%s.io", sphb->dtbusname);
1200     memory_region_init(&sphb->iospace, OBJECT(sphb),
1201                        namebuf, SPAPR_PCI_IO_WIN_SIZE);
1202 
1203     sprintf(namebuf, "%s.io-alias", sphb->dtbusname);
1204     memory_region_init_alias(&sphb->iowindow, OBJECT(sphb), namebuf,
1205                              &sphb->iospace, 0, SPAPR_PCI_IO_WIN_SIZE);
1206     memory_region_add_subregion(get_system_memory(), sphb->io_win_addr,
1207                                 &sphb->iowindow);
1208 
1209     bus = pci_register_bus(dev, NULL,
1210                            pci_spapr_set_irq, pci_spapr_map_irq, sphb,
1211                            &sphb->memspace, &sphb->iospace,
1212                            PCI_DEVFN(0, 0), PCI_NUM_PINS, TYPE_PCI_BUS);
1213     phb->bus = bus;
1214     qbus_set_hotplug_handler(BUS(phb->bus), DEVICE(sphb), NULL);
1215 
1216     /*
1217      * Initialize PHB address space.
1218      * By default there will be at least one subregion for default
1219      * 32bit DMA window.
1220      * Later the guest might want to create another DMA window
1221      * which will become another memory subregion.
1222      */
1223     sprintf(namebuf, "%s.iommu-root", sphb->dtbusname);
1224 
1225     memory_region_init(&sphb->iommu_root, OBJECT(sphb),
1226                        namebuf, UINT64_MAX);
1227     address_space_init(&sphb->iommu_as, &sphb->iommu_root,
1228                        sphb->dtbusname);
1229 
1230     /*
1231      * As MSI/MSIX interrupts trigger by writing at MSI/MSIX vectors,
1232      * we need to allocate some memory to catch those writes coming
1233      * from msi_notify()/msix_notify().
1234      * As MSIMessage:addr is going to be the same and MSIMessage:data
1235      * is going to be a VIRQ number, 4 bytes of the MSI MR will only
1236      * be used.
1237      *
1238      * For KVM we want to ensure that this memory is a full page so that
1239      * our memory slot is of page size granularity.
1240      */
1241 #ifdef CONFIG_KVM
1242     if (kvm_enabled()) {
1243         msi_window_size = getpagesize();
1244     }
1245 #endif
1246 
1247     memory_region_init_io(&sphb->msiwindow, NULL, &spapr_msi_ops, spapr,
1248                           "msi", msi_window_size);
1249     memory_region_add_subregion(&sphb->iommu_root, SPAPR_PCI_MSI_WINDOW,
1250                                 &sphb->msiwindow);
1251 
1252     pci_setup_iommu(bus, spapr_pci_dma_iommu, sphb);
1253 
1254     pci_bus_set_route_irq_fn(bus, spapr_route_intx_pin_to_irq);
1255 
1256     QLIST_INSERT_HEAD(&spapr->phbs, sphb, list);
1257 
1258     /* Initialize the LSI table */
1259     for (i = 0; i < PCI_NUM_PINS; i++) {
1260         uint32_t irq;
1261 
1262         irq = xics_alloc_block(spapr->icp, 0, 1, true, false);
1263         if (!irq) {
1264             error_setg(errp, "spapr_allocate_lsi failed");
1265             return;
1266         }
1267 
1268         sphb->lsi_table[i].irq = irq;
1269     }
1270 
1271     /* allocate connectors for child PCI devices */
1272     if (sphb->dr_enabled) {
1273         for (i = 0; i < PCI_SLOT_MAX * 8; i++) {
1274             spapr_dr_connector_new(OBJECT(phb),
1275                                    SPAPR_DR_CONNECTOR_TYPE_PCI,
1276                                    (sphb->index << 16) | i);
1277         }
1278     }
1279 
1280     if (!info->finish_realize) {
1281         error_setg(errp, "finish_realize not defined");
1282         return;
1283     }
1284 
1285     info->finish_realize(sphb, errp);
1286 
1287     sphb->msi = g_hash_table_new_full(g_int_hash, g_int_equal, g_free, g_free);
1288 }
1289 
1290 static void spapr_phb_finish_realize(sPAPRPHBState *sphb, Error **errp)
1291 {
1292     sPAPRTCETable *tcet;
1293     uint32_t nb_table;
1294 
1295     nb_table = SPAPR_PCI_DMA32_SIZE >> SPAPR_TCE_PAGE_SHIFT;
1296     tcet = spapr_tce_new_table(DEVICE(sphb), sphb->dma_liobn,
1297                                0, SPAPR_TCE_PAGE_SHIFT, nb_table, false);
1298     if (!tcet) {
1299         error_setg(errp, "Unable to create TCE table for %s",
1300                    sphb->dtbusname);
1301         return ;
1302     }
1303 
1304     /* Register default 32bit DMA window */
1305     memory_region_add_subregion(&sphb->iommu_root, 0,
1306                                 spapr_tce_get_iommu(tcet));
1307 }
1308 
1309 static int spapr_phb_children_reset(Object *child, void *opaque)
1310 {
1311     DeviceState *dev = (DeviceState *) object_dynamic_cast(child, TYPE_DEVICE);
1312 
1313     if (dev) {
1314         device_reset(dev);
1315     }
1316 
1317     return 0;
1318 }
1319 
1320 static void spapr_phb_reset(DeviceState *qdev)
1321 {
1322     /* Reset the IOMMU state */
1323     object_child_foreach(OBJECT(qdev), spapr_phb_children_reset, NULL);
1324 }
1325 
1326 static Property spapr_phb_properties[] = {
1327     DEFINE_PROP_UINT32("index", sPAPRPHBState, index, -1),
1328     DEFINE_PROP_UINT64("buid", sPAPRPHBState, buid, -1),
1329     DEFINE_PROP_UINT32("liobn", sPAPRPHBState, dma_liobn, -1),
1330     DEFINE_PROP_UINT64("mem_win_addr", sPAPRPHBState, mem_win_addr, -1),
1331     DEFINE_PROP_UINT64("mem_win_size", sPAPRPHBState, mem_win_size,
1332                        SPAPR_PCI_MMIO_WIN_SIZE),
1333     DEFINE_PROP_UINT64("io_win_addr", sPAPRPHBState, io_win_addr, -1),
1334     DEFINE_PROP_UINT64("io_win_size", sPAPRPHBState, io_win_size,
1335                        SPAPR_PCI_IO_WIN_SIZE),
1336     DEFINE_PROP_BOOL("dynamic-reconfiguration", sPAPRPHBState, dr_enabled,
1337                      true),
1338     DEFINE_PROP_END_OF_LIST(),
1339 };
1340 
1341 static const VMStateDescription vmstate_spapr_pci_lsi = {
1342     .name = "spapr_pci/lsi",
1343     .version_id = 1,
1344     .minimum_version_id = 1,
1345     .fields = (VMStateField[]) {
1346         VMSTATE_UINT32_EQUAL(irq, struct spapr_pci_lsi),
1347 
1348         VMSTATE_END_OF_LIST()
1349     },
1350 };
1351 
1352 static const VMStateDescription vmstate_spapr_pci_msi = {
1353     .name = "spapr_pci/msi",
1354     .version_id = 1,
1355     .minimum_version_id = 1,
1356     .fields = (VMStateField []) {
1357         VMSTATE_UINT32(key, spapr_pci_msi_mig),
1358         VMSTATE_UINT32(value.first_irq, spapr_pci_msi_mig),
1359         VMSTATE_UINT32(value.num, spapr_pci_msi_mig),
1360         VMSTATE_END_OF_LIST()
1361     },
1362 };
1363 
1364 static void spapr_pci_fill_msi_devs(gpointer key, gpointer value,
1365                                     gpointer opaque)
1366 {
1367     sPAPRPHBState *sphb = opaque;
1368 
1369     sphb->msi_devs[sphb->msi_devs_num].key = *(uint32_t *)key;
1370     sphb->msi_devs[sphb->msi_devs_num].value = *(spapr_pci_msi *)value;
1371     sphb->msi_devs_num++;
1372 }
1373 
1374 static void spapr_pci_pre_save(void *opaque)
1375 {
1376     sPAPRPHBState *sphb = opaque;
1377     int msi_devs_num;
1378 
1379     if (sphb->msi_devs) {
1380         g_free(sphb->msi_devs);
1381         sphb->msi_devs = NULL;
1382     }
1383     sphb->msi_devs_num = 0;
1384     msi_devs_num = g_hash_table_size(sphb->msi);
1385     if (!msi_devs_num) {
1386         return;
1387     }
1388     sphb->msi_devs = g_malloc(msi_devs_num * sizeof(spapr_pci_msi_mig));
1389 
1390     g_hash_table_foreach(sphb->msi, spapr_pci_fill_msi_devs, sphb);
1391     assert(sphb->msi_devs_num == msi_devs_num);
1392 }
1393 
1394 static int spapr_pci_post_load(void *opaque, int version_id)
1395 {
1396     sPAPRPHBState *sphb = opaque;
1397     gpointer key, value;
1398     int i;
1399 
1400     for (i = 0; i < sphb->msi_devs_num; ++i) {
1401         key = g_memdup(&sphb->msi_devs[i].key,
1402                        sizeof(sphb->msi_devs[i].key));
1403         value = g_memdup(&sphb->msi_devs[i].value,
1404                          sizeof(sphb->msi_devs[i].value));
1405         g_hash_table_insert(sphb->msi, key, value);
1406     }
1407     if (sphb->msi_devs) {
1408         g_free(sphb->msi_devs);
1409         sphb->msi_devs = NULL;
1410     }
1411     sphb->msi_devs_num = 0;
1412 
1413     return 0;
1414 }
1415 
1416 static const VMStateDescription vmstate_spapr_pci = {
1417     .name = "spapr_pci",
1418     .version_id = 2,
1419     .minimum_version_id = 2,
1420     .pre_save = spapr_pci_pre_save,
1421     .post_load = spapr_pci_post_load,
1422     .fields = (VMStateField[]) {
1423         VMSTATE_UINT64_EQUAL(buid, sPAPRPHBState),
1424         VMSTATE_UINT32_EQUAL(dma_liobn, sPAPRPHBState),
1425         VMSTATE_UINT64_EQUAL(mem_win_addr, sPAPRPHBState),
1426         VMSTATE_UINT64_EQUAL(mem_win_size, sPAPRPHBState),
1427         VMSTATE_UINT64_EQUAL(io_win_addr, sPAPRPHBState),
1428         VMSTATE_UINT64_EQUAL(io_win_size, sPAPRPHBState),
1429         VMSTATE_STRUCT_ARRAY(lsi_table, sPAPRPHBState, PCI_NUM_PINS, 0,
1430                              vmstate_spapr_pci_lsi, struct spapr_pci_lsi),
1431         VMSTATE_INT32(msi_devs_num, sPAPRPHBState),
1432         VMSTATE_STRUCT_VARRAY_ALLOC(msi_devs, sPAPRPHBState, msi_devs_num, 0,
1433                                     vmstate_spapr_pci_msi, spapr_pci_msi_mig),
1434         VMSTATE_END_OF_LIST()
1435     },
1436 };
1437 
1438 static const char *spapr_phb_root_bus_path(PCIHostState *host_bridge,
1439                                            PCIBus *rootbus)
1440 {
1441     sPAPRPHBState *sphb = SPAPR_PCI_HOST_BRIDGE(host_bridge);
1442 
1443     return sphb->dtbusname;
1444 }
1445 
1446 static void spapr_phb_class_init(ObjectClass *klass, void *data)
1447 {
1448     PCIHostBridgeClass *hc = PCI_HOST_BRIDGE_CLASS(klass);
1449     DeviceClass *dc = DEVICE_CLASS(klass);
1450     sPAPRPHBClass *spc = SPAPR_PCI_HOST_BRIDGE_CLASS(klass);
1451     HotplugHandlerClass *hp = HOTPLUG_HANDLER_CLASS(klass);
1452 
1453     hc->root_bus_path = spapr_phb_root_bus_path;
1454     dc->realize = spapr_phb_realize;
1455     dc->props = spapr_phb_properties;
1456     dc->reset = spapr_phb_reset;
1457     dc->vmsd = &vmstate_spapr_pci;
1458     set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
1459     dc->cannot_instantiate_with_device_add_yet = false;
1460     spc->finish_realize = spapr_phb_finish_realize;
1461     hp->plug = spapr_phb_hot_plug_child;
1462     hp->unplug = spapr_phb_hot_unplug_child;
1463 }
1464 
1465 static const TypeInfo spapr_phb_info = {
1466     .name          = TYPE_SPAPR_PCI_HOST_BRIDGE,
1467     .parent        = TYPE_PCI_HOST_BRIDGE,
1468     .instance_size = sizeof(sPAPRPHBState),
1469     .class_init    = spapr_phb_class_init,
1470     .class_size    = sizeof(sPAPRPHBClass),
1471     .interfaces    = (InterfaceInfo[]) {
1472         { TYPE_HOTPLUG_HANDLER },
1473         { }
1474     }
1475 };
1476 
1477 PCIHostState *spapr_create_phb(sPAPRMachineState *spapr, int index)
1478 {
1479     DeviceState *dev;
1480 
1481     dev = qdev_create(NULL, TYPE_SPAPR_PCI_HOST_BRIDGE);
1482     qdev_prop_set_uint32(dev, "index", index);
1483     qdev_init_nofail(dev);
1484 
1485     return PCI_HOST_BRIDGE(dev);
1486 }
1487 
1488 int spapr_populate_pci_dt(sPAPRPHBState *phb,
1489                           uint32_t xics_phandle,
1490                           void *fdt)
1491 {
1492     int bus_off, i, j, ret;
1493     char nodename[256];
1494     uint32_t bus_range[] = { cpu_to_be32(0), cpu_to_be32(0xff) };
1495     const uint64_t mmiosize = memory_region_size(&phb->memwindow);
1496     const uint64_t w32max = (1ULL << 32) - SPAPR_PCI_MEM_WIN_BUS_OFFSET;
1497     const uint64_t w32size = MIN(w32max, mmiosize);
1498     const uint64_t w64size = (mmiosize > w32size) ? (mmiosize - w32size) : 0;
1499     struct {
1500         uint32_t hi;
1501         uint64_t child;
1502         uint64_t parent;
1503         uint64_t size;
1504     } QEMU_PACKED ranges[] = {
1505         {
1506             cpu_to_be32(b_ss(1)), cpu_to_be64(0),
1507             cpu_to_be64(phb->io_win_addr),
1508             cpu_to_be64(memory_region_size(&phb->iospace)),
1509         },
1510         {
1511             cpu_to_be32(b_ss(2)), cpu_to_be64(SPAPR_PCI_MEM_WIN_BUS_OFFSET),
1512             cpu_to_be64(phb->mem_win_addr),
1513             cpu_to_be64(w32size),
1514         },
1515         {
1516             cpu_to_be32(b_ss(3)), cpu_to_be64(1ULL << 32),
1517             cpu_to_be64(phb->mem_win_addr + w32size),
1518             cpu_to_be64(w64size)
1519         },
1520     };
1521     const unsigned sizeof_ranges = (w64size ? 3 : 2) * sizeof(ranges[0]);
1522     uint64_t bus_reg[] = { cpu_to_be64(phb->buid), 0 };
1523     uint32_t interrupt_map_mask[] = {
1524         cpu_to_be32(b_ddddd(-1)|b_fff(0)), 0x0, 0x0, cpu_to_be32(-1)};
1525     uint32_t interrupt_map[PCI_SLOT_MAX * PCI_NUM_PINS][7];
1526     sPAPRTCETable *tcet;
1527 
1528     /* Start populating the FDT */
1529     sprintf(nodename, "pci@%" PRIx64, phb->buid);
1530     bus_off = fdt_add_subnode(fdt, 0, nodename);
1531     if (bus_off < 0) {
1532         return bus_off;
1533     }
1534 
1535     /* Write PHB properties */
1536     _FDT(fdt_setprop_string(fdt, bus_off, "device_type", "pci"));
1537     _FDT(fdt_setprop_string(fdt, bus_off, "compatible", "IBM,Logical_PHB"));
1538     _FDT(fdt_setprop_cell(fdt, bus_off, "#address-cells", 0x3));
1539     _FDT(fdt_setprop_cell(fdt, bus_off, "#size-cells", 0x2));
1540     _FDT(fdt_setprop_cell(fdt, bus_off, "#interrupt-cells", 0x1));
1541     _FDT(fdt_setprop(fdt, bus_off, "used-by-rtas", NULL, 0));
1542     _FDT(fdt_setprop(fdt, bus_off, "bus-range", &bus_range, sizeof(bus_range)));
1543     _FDT(fdt_setprop(fdt, bus_off, "ranges", &ranges, sizeof_ranges));
1544     _FDT(fdt_setprop(fdt, bus_off, "reg", &bus_reg, sizeof(bus_reg)));
1545     _FDT(fdt_setprop_cell(fdt, bus_off, "ibm,pci-config-space-type", 0x1));
1546     _FDT(fdt_setprop_cell(fdt, bus_off, "ibm,pe-total-#msi", XICS_IRQS));
1547 
1548     /* Build the interrupt-map, this must matches what is done
1549      * in pci_spapr_map_irq
1550      */
1551     _FDT(fdt_setprop(fdt, bus_off, "interrupt-map-mask",
1552                      &interrupt_map_mask, sizeof(interrupt_map_mask)));
1553     for (i = 0; i < PCI_SLOT_MAX; i++) {
1554         for (j = 0; j < PCI_NUM_PINS; j++) {
1555             uint32_t *irqmap = interrupt_map[i*PCI_NUM_PINS + j];
1556             int lsi_num = pci_spapr_swizzle(i, j);
1557 
1558             irqmap[0] = cpu_to_be32(b_ddddd(i)|b_fff(0));
1559             irqmap[1] = 0;
1560             irqmap[2] = 0;
1561             irqmap[3] = cpu_to_be32(j+1);
1562             irqmap[4] = cpu_to_be32(xics_phandle);
1563             irqmap[5] = cpu_to_be32(phb->lsi_table[lsi_num].irq);
1564             irqmap[6] = cpu_to_be32(0x8);
1565         }
1566     }
1567     /* Write interrupt map */
1568     _FDT(fdt_setprop(fdt, bus_off, "interrupt-map", &interrupt_map,
1569                      sizeof(interrupt_map)));
1570 
1571     tcet = spapr_tce_find_by_liobn(SPAPR_PCI_LIOBN(phb->index, 0));
1572     spapr_dma_dt(fdt, bus_off, "ibm,dma-window",
1573                  tcet->liobn, tcet->bus_offset,
1574                  tcet->nb_table << tcet->page_shift);
1575 
1576     ret = spapr_drc_populate_dt(fdt, bus_off, OBJECT(phb),
1577                                 SPAPR_DR_CONNECTOR_TYPE_PCI);
1578     if (ret) {
1579         return ret;
1580     }
1581 
1582     return 0;
1583 }
1584 
1585 void spapr_pci_rtas_init(void)
1586 {
1587     spapr_rtas_register(RTAS_READ_PCI_CONFIG, "read-pci-config",
1588                         rtas_read_pci_config);
1589     spapr_rtas_register(RTAS_WRITE_PCI_CONFIG, "write-pci-config",
1590                         rtas_write_pci_config);
1591     spapr_rtas_register(RTAS_IBM_READ_PCI_CONFIG, "ibm,read-pci-config",
1592                         rtas_ibm_read_pci_config);
1593     spapr_rtas_register(RTAS_IBM_WRITE_PCI_CONFIG, "ibm,write-pci-config",
1594                         rtas_ibm_write_pci_config);
1595     if (msi_supported) {
1596         spapr_rtas_register(RTAS_IBM_QUERY_INTERRUPT_SOURCE_NUMBER,
1597                             "ibm,query-interrupt-source-number",
1598                             rtas_ibm_query_interrupt_source_number);
1599         spapr_rtas_register(RTAS_IBM_CHANGE_MSI, "ibm,change-msi",
1600                             rtas_ibm_change_msi);
1601     }
1602 
1603     spapr_rtas_register(RTAS_IBM_SET_EEH_OPTION,
1604                         "ibm,set-eeh-option",
1605                         rtas_ibm_set_eeh_option);
1606     spapr_rtas_register(RTAS_IBM_GET_CONFIG_ADDR_INFO2,
1607                         "ibm,get-config-addr-info2",
1608                         rtas_ibm_get_config_addr_info2);
1609     spapr_rtas_register(RTAS_IBM_READ_SLOT_RESET_STATE2,
1610                         "ibm,read-slot-reset-state2",
1611                         rtas_ibm_read_slot_reset_state2);
1612     spapr_rtas_register(RTAS_IBM_SET_SLOT_RESET,
1613                         "ibm,set-slot-reset",
1614                         rtas_ibm_set_slot_reset);
1615     spapr_rtas_register(RTAS_IBM_CONFIGURE_PE,
1616                         "ibm,configure-pe",
1617                         rtas_ibm_configure_pe);
1618     spapr_rtas_register(RTAS_IBM_SLOT_ERROR_DETAIL,
1619                         "ibm,slot-error-detail",
1620                         rtas_ibm_slot_error_detail);
1621 }
1622 
1623 static void spapr_pci_register_types(void)
1624 {
1625     type_register_static(&spapr_phb_info);
1626 }
1627 
1628 type_init(spapr_pci_register_types)
1629 
1630 static int spapr_switch_one_vga(DeviceState *dev, void *opaque)
1631 {
1632     bool be = *(bool *)opaque;
1633 
1634     if (object_dynamic_cast(OBJECT(dev), "VGA")
1635         || object_dynamic_cast(OBJECT(dev), "secondary-vga")) {
1636         object_property_set_bool(OBJECT(dev), be, "big-endian-framebuffer",
1637                                  &error_abort);
1638     }
1639     return 0;
1640 }
1641 
1642 void spapr_pci_switch_vga(bool big_endian)
1643 {
1644     sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
1645     sPAPRPHBState *sphb;
1646 
1647     /*
1648      * For backward compatibility with existing guests, we switch
1649      * the endianness of the VGA controller when changing the guest
1650      * interrupt mode
1651      */
1652     QLIST_FOREACH(sphb, &spapr->phbs, list) {
1653         BusState *bus = &PCI_HOST_BRIDGE(sphb)->bus->qbus;
1654         qbus_walk_children(bus, spapr_switch_one_vga, NULL, NULL, NULL,
1655                            &big_endian);
1656     }
1657 }
1658