xref: /qemu/hw/ppc/spapr_pci.c (revision 06b40d250ecfa1633209c2e431a7a38acfd03a98)
1 /*
2  * QEMU sPAPR PCI host originated from Uninorth PCI host
3  *
4  * Copyright (c) 2011 Alexey Kardashevskiy, IBM Corporation.
5  * Copyright (C) 2011 David Gibson, IBM Corporation.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a copy
8  * of this software and associated documentation files (the "Software"), to deal
9  * in the Software without restriction, including without limitation the rights
10  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11  * copies of the Software, and to permit persons to whom the Software is
12  * furnished to do so, subject to the following conditions:
13  *
14  * The above copyright notice and this permission notice shall be included in
15  * all copies or substantial portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23  * THE SOFTWARE.
24  */
25 
26 #include "qemu/osdep.h"
27 #include "qapi/error.h"
28 #include "hw/irq.h"
29 #include "hw/sysbus.h"
30 #include "migration/vmstate.h"
31 #include "hw/pci/pci.h"
32 #include "hw/pci/msi.h"
33 #include "hw/pci/msix.h"
34 #include "hw/pci/pci_host.h"
35 #include "hw/ppc/spapr.h"
36 #include "hw/pci-host/spapr.h"
37 #include "system/ram_addr.h"
38 #include <libfdt.h>
39 #include "trace.h"
40 #include "qemu/error-report.h"
41 #include "qemu/module.h"
42 #include "hw/ppc/fdt.h"
43 #include "hw/pci/pci_bridge.h"
44 #include "hw/pci/pci_bus.h"
45 #include "hw/pci/pci_ids.h"
46 #include "hw/ppc/spapr_drc.h"
47 #include "hw/qdev-properties.h"
48 #include "system/device_tree.h"
49 #include "system/kvm.h"
50 #include "system/hostmem.h"
51 #include "system/numa.h"
52 #include "hw/ppc/spapr_numa.h"
53 #include "qemu/log.h"
54 
55 /* Copied from the kernel arch/powerpc/platforms/pseries/msi.c */
56 #define RTAS_QUERY_FN           0
57 #define RTAS_CHANGE_FN          1
58 #define RTAS_RESET_FN           2
59 #define RTAS_CHANGE_MSI_FN      3
60 #define RTAS_CHANGE_MSIX_FN     4
61 
62 /* Interrupt types to return on RTAS_CHANGE_* */
63 #define RTAS_TYPE_MSI           1
64 #define RTAS_TYPE_MSIX          2
65 
spapr_pci_find_phb(SpaprMachineState * spapr,uint64_t buid)66 SpaprPhbState *spapr_pci_find_phb(SpaprMachineState *spapr, uint64_t buid)
67 {
68     SpaprPhbState *sphb;
69 
70     QLIST_FOREACH(sphb, &spapr->phbs, list) {
71         if (sphb->buid != buid) {
72             continue;
73         }
74         return sphb;
75     }
76 
77     return NULL;
78 }
79 
spapr_pci_find_dev(SpaprMachineState * spapr,uint64_t buid,uint32_t config_addr)80 PCIDevice *spapr_pci_find_dev(SpaprMachineState *spapr, uint64_t buid,
81                               uint32_t config_addr)
82 {
83     SpaprPhbState *sphb = spapr_pci_find_phb(spapr, buid);
84     PCIHostState *phb = PCI_HOST_BRIDGE(sphb);
85     int bus_num = (config_addr >> 16) & 0xFF;
86     int devfn = (config_addr >> 8) & 0xFF;
87 
88     if (!phb) {
89         return NULL;
90     }
91 
92     return pci_find_device(phb->bus, bus_num, devfn);
93 }
94 
rtas_pci_cfgaddr(uint32_t arg)95 static uint32_t rtas_pci_cfgaddr(uint32_t arg)
96 {
97     /* This handles the encoding of extended config space addresses */
98     return ((arg >> 20) & 0xf00) | (arg & 0xff);
99 }
100 
finish_read_pci_config(SpaprMachineState * spapr,uint64_t buid,uint32_t addr,uint32_t size,target_ulong rets)101 static void finish_read_pci_config(SpaprMachineState *spapr, uint64_t buid,
102                                    uint32_t addr, uint32_t size,
103                                    target_ulong rets)
104 {
105     PCIDevice *pci_dev;
106     uint32_t val;
107 
108     if ((size != 1) && (size != 2) && (size != 4)) {
109         /* access must be 1, 2 or 4 bytes */
110         rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
111         return;
112     }
113 
114     pci_dev = spapr_pci_find_dev(spapr, buid, addr);
115     addr = rtas_pci_cfgaddr(addr);
116 
117     if (!pci_dev || (addr % size) || (addr >= pci_config_size(pci_dev))) {
118         /* Access must be to a valid device, within bounds and
119          * naturally aligned */
120         rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
121         return;
122     }
123 
124     val = pci_host_config_read_common(pci_dev, addr,
125                                       pci_config_size(pci_dev), size);
126 
127     rtas_st(rets, 0, RTAS_OUT_SUCCESS);
128     rtas_st(rets, 1, val);
129 }
130 
rtas_ibm_read_pci_config(PowerPCCPU * cpu,SpaprMachineState * spapr,uint32_t token,uint32_t nargs,target_ulong args,uint32_t nret,target_ulong rets)131 static void rtas_ibm_read_pci_config(PowerPCCPU *cpu, SpaprMachineState *spapr,
132                                      uint32_t token, uint32_t nargs,
133                                      target_ulong args,
134                                      uint32_t nret, target_ulong rets)
135 {
136     uint64_t buid;
137     uint32_t size, addr;
138 
139     if ((nargs != 4) || (nret != 2)) {
140         rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
141         return;
142     }
143 
144     buid = rtas_ldq(args, 1);
145     size = rtas_ld(args, 3);
146     addr = rtas_ld(args, 0);
147 
148     finish_read_pci_config(spapr, buid, addr, size, rets);
149 }
150 
rtas_read_pci_config(PowerPCCPU * cpu,SpaprMachineState * spapr,uint32_t token,uint32_t nargs,target_ulong args,uint32_t nret,target_ulong rets)151 static void rtas_read_pci_config(PowerPCCPU *cpu, SpaprMachineState *spapr,
152                                  uint32_t token, uint32_t nargs,
153                                  target_ulong args,
154                                  uint32_t nret, target_ulong rets)
155 {
156     uint32_t size, addr;
157 
158     if ((nargs != 2) || (nret != 2)) {
159         rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
160         return;
161     }
162 
163     size = rtas_ld(args, 1);
164     addr = rtas_ld(args, 0);
165 
166     finish_read_pci_config(spapr, 0, addr, size, rets);
167 }
168 
finish_write_pci_config(SpaprMachineState * spapr,uint64_t buid,uint32_t addr,uint32_t size,uint32_t val,target_ulong rets)169 static void finish_write_pci_config(SpaprMachineState *spapr, uint64_t buid,
170                                     uint32_t addr, uint32_t size,
171                                     uint32_t val, target_ulong rets)
172 {
173     PCIDevice *pci_dev;
174 
175     if ((size != 1) && (size != 2) && (size != 4)) {
176         /* access must be 1, 2 or 4 bytes */
177         rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
178         return;
179     }
180 
181     pci_dev = spapr_pci_find_dev(spapr, buid, addr);
182     addr = rtas_pci_cfgaddr(addr);
183 
184     if (!pci_dev || (addr % size) || (addr >= pci_config_size(pci_dev))) {
185         /* Access must be to a valid device, within bounds and
186          * naturally aligned */
187         rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
188         return;
189     }
190 
191     pci_host_config_write_common(pci_dev, addr, pci_config_size(pci_dev),
192                                  val, size);
193 
194     rtas_st(rets, 0, RTAS_OUT_SUCCESS);
195 }
196 
rtas_ibm_write_pci_config(PowerPCCPU * cpu,SpaprMachineState * spapr,uint32_t token,uint32_t nargs,target_ulong args,uint32_t nret,target_ulong rets)197 static void rtas_ibm_write_pci_config(PowerPCCPU *cpu, SpaprMachineState *spapr,
198                                       uint32_t token, uint32_t nargs,
199                                       target_ulong args,
200                                       uint32_t nret, target_ulong rets)
201 {
202     uint64_t buid;
203     uint32_t val, size, addr;
204 
205     if ((nargs != 5) || (nret != 1)) {
206         rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
207         return;
208     }
209 
210     buid = rtas_ldq(args, 1);
211     val = rtas_ld(args, 4);
212     size = rtas_ld(args, 3);
213     addr = rtas_ld(args, 0);
214 
215     finish_write_pci_config(spapr, buid, addr, size, val, rets);
216 }
217 
rtas_write_pci_config(PowerPCCPU * cpu,SpaprMachineState * spapr,uint32_t token,uint32_t nargs,target_ulong args,uint32_t nret,target_ulong rets)218 static void rtas_write_pci_config(PowerPCCPU *cpu, SpaprMachineState *spapr,
219                                   uint32_t token, uint32_t nargs,
220                                   target_ulong args,
221                                   uint32_t nret, target_ulong rets)
222 {
223     uint32_t val, size, addr;
224 
225     if ((nargs != 3) || (nret != 1)) {
226         rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
227         return;
228     }
229 
230 
231     val = rtas_ld(args, 2);
232     size = rtas_ld(args, 1);
233     addr = rtas_ld(args, 0);
234 
235     finish_write_pci_config(spapr, 0, addr, size, val, rets);
236 }
237 
238 /*
239  * Set MSI/MSIX message data.
240  * This is required for msi_notify()/msix_notify() which
241  * will write at the addresses via spapr_msi_write().
242  *
243  * If hwaddr == 0, all entries will have .data == first_irq i.e.
244  * table will be reset.
245  */
spapr_msi_setmsg(PCIDevice * pdev,hwaddr addr,bool msix,unsigned first_irq,unsigned req_num)246 static void spapr_msi_setmsg(PCIDevice *pdev, hwaddr addr, bool msix,
247                              unsigned first_irq, unsigned req_num)
248 {
249     unsigned i;
250     MSIMessage msg = { .address = addr, .data = first_irq };
251 
252     if (!msix) {
253         msi_set_message(pdev, msg);
254         trace_spapr_pci_msi_setup(pdev->name, 0, msg.address);
255         return;
256     }
257 
258     for (i = 0; i < req_num; ++i) {
259         msix_set_message(pdev, i, msg);
260         trace_spapr_pci_msi_setup(pdev->name, i, msg.address);
261         if (addr) {
262             ++msg.data;
263         }
264     }
265 }
266 
rtas_ibm_change_msi(PowerPCCPU * cpu,SpaprMachineState * spapr,uint32_t token,uint32_t nargs,target_ulong args,uint32_t nret,target_ulong rets)267 static void rtas_ibm_change_msi(PowerPCCPU *cpu, SpaprMachineState *spapr,
268                                 uint32_t token, uint32_t nargs,
269                                 target_ulong args, uint32_t nret,
270                                 target_ulong rets)
271 {
272     SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr);
273     uint32_t config_addr = rtas_ld(args, 0);
274     uint64_t buid = rtas_ldq(args, 1);
275     unsigned int func = rtas_ld(args, 3);
276     unsigned int req_num = rtas_ld(args, 4); /* 0 == remove all */
277     unsigned int seq_num = rtas_ld(args, 5);
278     unsigned int ret_intr_type;
279     unsigned int irq, max_irqs = 0;
280     SpaprPhbState *phb = NULL;
281     PCIDevice *pdev = NULL;
282     SpaprPciMsi *msi;
283     int *config_addr_key;
284     Error *err = NULL;
285     int i;
286 
287     /* Fins SpaprPhbState */
288     phb = spapr_pci_find_phb(spapr, buid);
289     if (phb) {
290         pdev = spapr_pci_find_dev(spapr, buid, config_addr);
291     }
292     if (!phb || !pdev) {
293         rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
294         return;
295     }
296 
297     switch (func) {
298     case RTAS_CHANGE_FN:
299         if (msi_present(pdev)) {
300             ret_intr_type = RTAS_TYPE_MSI;
301         } else if (msix_present(pdev)) {
302             ret_intr_type = RTAS_TYPE_MSIX;
303         } else {
304             rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
305             return;
306         }
307         break;
308     case RTAS_CHANGE_MSI_FN:
309         if (msi_present(pdev)) {
310             ret_intr_type = RTAS_TYPE_MSI;
311         } else {
312             rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
313             return;
314         }
315         break;
316     case RTAS_CHANGE_MSIX_FN:
317         if (msix_present(pdev)) {
318             ret_intr_type = RTAS_TYPE_MSIX;
319         } else {
320             rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
321             return;
322         }
323         break;
324     default:
325         error_report("rtas_ibm_change_msi(%u) is not implemented", func);
326         rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
327         return;
328     }
329 
330     msi = (SpaprPciMsi *) g_hash_table_lookup(phb->msi, &config_addr);
331 
332     /* Releasing MSIs */
333     if (!req_num) {
334         if (!msi) {
335             trace_spapr_pci_msi("Releasing wrong config", config_addr);
336             rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
337             return;
338         }
339 
340         if (msi_present(pdev)) {
341             spapr_msi_setmsg(pdev, 0, false, 0, 0);
342         }
343         if (msix_present(pdev)) {
344             spapr_msi_setmsg(pdev, 0, true, 0, 0);
345         }
346         g_hash_table_remove(phb->msi, &config_addr);
347 
348         trace_spapr_pci_msi("Released MSIs", config_addr);
349         rtas_st(rets, 0, RTAS_OUT_SUCCESS);
350         rtas_st(rets, 1, 0);
351         return;
352     }
353 
354     /* Enabling MSI */
355 
356     /* Check if the device supports as many IRQs as requested */
357     if (ret_intr_type == RTAS_TYPE_MSI) {
358         max_irqs = msi_nr_vectors_allocated(pdev);
359     } else if (ret_intr_type == RTAS_TYPE_MSIX) {
360         max_irqs = pdev->msix_entries_nr;
361     }
362     if (!max_irqs) {
363         error_report("Requested interrupt type %d is not enabled for device %x",
364                      ret_intr_type, config_addr);
365         rtas_st(rets, 0, -1); /* Hardware error */
366         return;
367     }
368     /* Correct the number if the guest asked for too many */
369     if (req_num > max_irqs) {
370         trace_spapr_pci_msi_retry(config_addr, req_num, max_irqs);
371         req_num = max_irqs;
372         irq = 0; /* to avoid misleading trace */
373         goto out;
374     }
375 
376     /* Allocate MSIs */
377     if (smc->legacy_irq_allocation) {
378         irq = spapr_irq_find(spapr, req_num, ret_intr_type == RTAS_TYPE_MSI,
379                              &err);
380     } else {
381         irq = spapr_irq_msi_alloc(spapr, req_num,
382                                   ret_intr_type == RTAS_TYPE_MSI, &err);
383     }
384     if (err) {
385         error_reportf_err(err, "Can't allocate MSIs for device %x: ",
386                           config_addr);
387         rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
388         return;
389     }
390 
391     for (i = 0; i < req_num; i++) {
392         spapr_irq_claim(spapr, irq + i, false, &err);
393         if (err) {
394             if (i) {
395                 spapr_irq_free(spapr, irq, i);
396             }
397             if (!smc->legacy_irq_allocation) {
398                 spapr_irq_msi_free(spapr, irq, req_num);
399             }
400             error_reportf_err(err, "Can't allocate MSIs for device %x: ",
401                               config_addr);
402             rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
403             return;
404         }
405     }
406 
407     /* Release previous MSIs */
408     if (msi) {
409         g_hash_table_remove(phb->msi, &config_addr);
410     }
411 
412     /* Setup MSI/MSIX vectors in the device (via cfgspace or MSIX BAR) */
413     spapr_msi_setmsg(pdev, SPAPR_PCI_MSI_WINDOW, ret_intr_type == RTAS_TYPE_MSIX,
414                      irq, req_num);
415 
416     /* Add MSI device to cache */
417     msi = g_new(SpaprPciMsi, 1);
418     msi->first_irq = irq;
419     msi->num = req_num;
420     config_addr_key = g_new(int, 1);
421     *config_addr_key = config_addr;
422     g_hash_table_insert(phb->msi, config_addr_key, msi);
423 
424 out:
425     rtas_st(rets, 0, RTAS_OUT_SUCCESS);
426     rtas_st(rets, 1, req_num);
427     rtas_st(rets, 2, ++seq_num);
428     if (nret > 3) {
429         rtas_st(rets, 3, ret_intr_type);
430     }
431 
432     trace_spapr_pci_rtas_ibm_change_msi(config_addr, func, req_num, irq);
433 }
434 
rtas_ibm_query_interrupt_source_number(PowerPCCPU * cpu,SpaprMachineState * spapr,uint32_t token,uint32_t nargs,target_ulong args,uint32_t nret,target_ulong rets)435 static void rtas_ibm_query_interrupt_source_number(PowerPCCPU *cpu,
436                                                    SpaprMachineState *spapr,
437                                                    uint32_t token,
438                                                    uint32_t nargs,
439                                                    target_ulong args,
440                                                    uint32_t nret,
441                                                    target_ulong rets)
442 {
443     uint32_t config_addr = rtas_ld(args, 0);
444     uint64_t buid = rtas_ldq(args, 1);
445     unsigned int intr_src_num = -1, ioa_intr_num = rtas_ld(args, 3);
446     SpaprPhbState *phb = NULL;
447     PCIDevice *pdev = NULL;
448     SpaprPciMsi *msi;
449 
450     /* Find SpaprPhbState */
451     phb = spapr_pci_find_phb(spapr, buid);
452     if (phb) {
453         pdev = spapr_pci_find_dev(spapr, buid, config_addr);
454     }
455     if (!phb || !pdev) {
456         rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
457         return;
458     }
459 
460     /* Find device descriptor and start IRQ */
461     msi = (SpaprPciMsi *) g_hash_table_lookup(phb->msi, &config_addr);
462     if (!msi || !msi->first_irq || !msi->num || (ioa_intr_num >= msi->num)) {
463         trace_spapr_pci_msi("Failed to return vector", config_addr);
464         rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
465         return;
466     }
467     intr_src_num = msi->first_irq + ioa_intr_num;
468     trace_spapr_pci_rtas_ibm_query_interrupt_source_number(ioa_intr_num,
469                                                            intr_src_num);
470 
471     rtas_st(rets, 0, RTAS_OUT_SUCCESS);
472     rtas_st(rets, 1, intr_src_num);
473     rtas_st(rets, 2, 1);/* 0 == level; 1 == edge */
474 }
475 
rtas_ibm_set_eeh_option(PowerPCCPU * cpu,SpaprMachineState * spapr,uint32_t token,uint32_t nargs,target_ulong args,uint32_t nret,target_ulong rets)476 static void rtas_ibm_set_eeh_option(PowerPCCPU *cpu,
477                                     SpaprMachineState *spapr,
478                                     uint32_t token, uint32_t nargs,
479                                     target_ulong args, uint32_t nret,
480                                     target_ulong rets)
481 {
482     SpaprPhbState *sphb;
483     uint32_t addr, option;
484     uint64_t buid;
485     int ret;
486 
487     if ((nargs != 4) || (nret != 1)) {
488         goto param_error_exit;
489     }
490 
491     buid = rtas_ldq(args, 1);
492     addr = rtas_ld(args, 0);
493     option = rtas_ld(args, 3);
494 
495     sphb = spapr_pci_find_phb(spapr, buid);
496     if (!sphb) {
497         goto param_error_exit;
498     }
499 
500     if (!spapr_phb_eeh_available(sphb)) {
501         goto param_error_exit;
502     }
503 
504     ret = spapr_phb_vfio_eeh_set_option(sphb, addr, option);
505     rtas_st(rets, 0, ret);
506     return;
507 
508 param_error_exit:
509     rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
510 }
511 
rtas_ibm_get_config_addr_info2(PowerPCCPU * cpu,SpaprMachineState * spapr,uint32_t token,uint32_t nargs,target_ulong args,uint32_t nret,target_ulong rets)512 static void rtas_ibm_get_config_addr_info2(PowerPCCPU *cpu,
513                                            SpaprMachineState *spapr,
514                                            uint32_t token, uint32_t nargs,
515                                            target_ulong args, uint32_t nret,
516                                            target_ulong rets)
517 {
518     SpaprPhbState *sphb;
519     PCIDevice *pdev;
520     uint32_t addr, option;
521     uint64_t buid;
522 
523     if ((nargs != 4) || (nret != 2)) {
524         goto param_error_exit;
525     }
526 
527     buid = rtas_ldq(args, 1);
528     sphb = spapr_pci_find_phb(spapr, buid);
529     if (!sphb) {
530         goto param_error_exit;
531     }
532 
533     if (!spapr_phb_eeh_available(sphb)) {
534         goto param_error_exit;
535     }
536 
537     /*
538      * We always have PE address of form "00BB0001". "BB"
539      * represents the bus number of PE's primary bus.
540      */
541     option = rtas_ld(args, 3);
542     switch (option) {
543     case RTAS_GET_PE_ADDR:
544         addr = rtas_ld(args, 0);
545         pdev = spapr_pci_find_dev(spapr, buid, addr);
546         if (!pdev) {
547             goto param_error_exit;
548         }
549 
550         rtas_st(rets, 1, (pci_bus_num(pci_get_bus(pdev)) << 16) + 1);
551         break;
552     case RTAS_GET_PE_MODE:
553         rtas_st(rets, 1, RTAS_PE_MODE_SHARED);
554         break;
555     default:
556         goto param_error_exit;
557     }
558 
559     rtas_st(rets, 0, RTAS_OUT_SUCCESS);
560     return;
561 
562 param_error_exit:
563     rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
564 }
565 
rtas_ibm_read_slot_reset_state2(PowerPCCPU * cpu,SpaprMachineState * spapr,uint32_t token,uint32_t nargs,target_ulong args,uint32_t nret,target_ulong rets)566 static void rtas_ibm_read_slot_reset_state2(PowerPCCPU *cpu,
567                                             SpaprMachineState *spapr,
568                                             uint32_t token, uint32_t nargs,
569                                             target_ulong args, uint32_t nret,
570                                             target_ulong rets)
571 {
572     SpaprPhbState *sphb;
573     uint64_t buid;
574     int state, ret;
575 
576     if ((nargs != 3) || (nret != 4 && nret != 5)) {
577         goto param_error_exit;
578     }
579 
580     buid = rtas_ldq(args, 1);
581     sphb = spapr_pci_find_phb(spapr, buid);
582     if (!sphb) {
583         goto param_error_exit;
584     }
585 
586     if (!spapr_phb_eeh_available(sphb)) {
587         goto param_error_exit;
588     }
589 
590     ret = spapr_phb_vfio_eeh_get_state(sphb, &state);
591     rtas_st(rets, 0, ret);
592     if (ret != RTAS_OUT_SUCCESS) {
593         return;
594     }
595 
596     rtas_st(rets, 1, state);
597     rtas_st(rets, 2, RTAS_EEH_SUPPORT);
598     rtas_st(rets, 3, RTAS_EEH_PE_UNAVAIL_INFO);
599     if (nret >= 5) {
600         rtas_st(rets, 4, RTAS_EEH_PE_RECOVER_INFO);
601     }
602     return;
603 
604 param_error_exit:
605     rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
606 }
607 
rtas_ibm_set_slot_reset(PowerPCCPU * cpu,SpaprMachineState * spapr,uint32_t token,uint32_t nargs,target_ulong args,uint32_t nret,target_ulong rets)608 static void rtas_ibm_set_slot_reset(PowerPCCPU *cpu,
609                                     SpaprMachineState *spapr,
610                                     uint32_t token, uint32_t nargs,
611                                     target_ulong args, uint32_t nret,
612                                     target_ulong rets)
613 {
614     SpaprPhbState *sphb;
615     uint32_t option;
616     uint64_t buid;
617     int ret;
618 
619     if ((nargs != 4) || (nret != 1)) {
620         goto param_error_exit;
621     }
622 
623     buid = rtas_ldq(args, 1);
624     option = rtas_ld(args, 3);
625     sphb = spapr_pci_find_phb(spapr, buid);
626     if (!sphb) {
627         goto param_error_exit;
628     }
629 
630     if (!spapr_phb_eeh_available(sphb)) {
631         goto param_error_exit;
632     }
633 
634     ret = spapr_phb_vfio_eeh_reset(sphb, option);
635     rtas_st(rets, 0, ret);
636     return;
637 
638 param_error_exit:
639     rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
640 }
641 
rtas_ibm_configure_pe(PowerPCCPU * cpu,SpaprMachineState * spapr,uint32_t token,uint32_t nargs,target_ulong args,uint32_t nret,target_ulong rets)642 static void rtas_ibm_configure_pe(PowerPCCPU *cpu,
643                                   SpaprMachineState *spapr,
644                                   uint32_t token, uint32_t nargs,
645                                   target_ulong args, uint32_t nret,
646                                   target_ulong rets)
647 {
648     SpaprPhbState *sphb;
649     uint64_t buid;
650     int ret;
651 
652     if ((nargs != 3) || (nret != 1)) {
653         goto param_error_exit;
654     }
655 
656     buid = rtas_ldq(args, 1);
657     sphb = spapr_pci_find_phb(spapr, buid);
658     if (!sphb) {
659         goto param_error_exit;
660     }
661 
662     if (!spapr_phb_eeh_available(sphb)) {
663         goto param_error_exit;
664     }
665 
666     ret = spapr_phb_vfio_eeh_configure(sphb);
667     rtas_st(rets, 0, ret);
668     return;
669 
670 param_error_exit:
671     rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
672 }
673 
674 /* To support it later */
rtas_ibm_slot_error_detail(PowerPCCPU * cpu,SpaprMachineState * spapr,uint32_t token,uint32_t nargs,target_ulong args,uint32_t nret,target_ulong rets)675 static void rtas_ibm_slot_error_detail(PowerPCCPU *cpu,
676                                        SpaprMachineState *spapr,
677                                        uint32_t token, uint32_t nargs,
678                                        target_ulong args, uint32_t nret,
679                                        target_ulong rets)
680 {
681     SpaprPhbState *sphb;
682     int option;
683     uint64_t buid;
684 
685     if ((nargs != 8) || (nret != 1)) {
686         goto param_error_exit;
687     }
688 
689     buid = rtas_ldq(args, 1);
690     sphb = spapr_pci_find_phb(spapr, buid);
691     if (!sphb) {
692         goto param_error_exit;
693     }
694 
695     if (!spapr_phb_eeh_available(sphb)) {
696         goto param_error_exit;
697     }
698 
699     option = rtas_ld(args, 7);
700     switch (option) {
701     case RTAS_SLOT_TEMP_ERR_LOG:
702     case RTAS_SLOT_PERM_ERR_LOG:
703         break;
704     default:
705         goto param_error_exit;
706     }
707 
708     /* We don't have error log yet */
709     rtas_st(rets, 0, RTAS_OUT_NO_ERRORS_FOUND);
710     return;
711 
712 param_error_exit:
713     rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
714 }
715 
pci_spapr_set_irq(void * opaque,int irq_num,int level)716 static void pci_spapr_set_irq(void *opaque, int irq_num, int level)
717 {
718     /*
719      * Here we use the number returned by pci_swizzle_map_irq_fn to find a
720      * corresponding qemu_irq.
721      */
722     SpaprPhbState *phb = opaque;
723     SpaprMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
724 
725     trace_spapr_pci_lsi_set(phb->dtbusname, irq_num, phb->lsi_table[irq_num].irq);
726     qemu_set_irq(spapr_qirq(spapr, phb->lsi_table[irq_num].irq), level);
727 }
728 
spapr_route_intx_pin_to_irq(void * opaque,int pin)729 static PCIINTxRoute spapr_route_intx_pin_to_irq(void *opaque, int pin)
730 {
731     SpaprPhbState *sphb = SPAPR_PCI_HOST_BRIDGE(opaque);
732     PCIINTxRoute route;
733 
734     route.mode = PCI_INTX_ENABLED;
735     route.irq = sphb->lsi_table[pin].irq;
736 
737     return route;
738 }
739 
spapr_msi_read(void * opaque,hwaddr addr,unsigned size)740 static uint64_t spapr_msi_read(void *opaque, hwaddr addr, unsigned size)
741 {
742     qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid access\n", __func__);
743     return 0;
744 }
745 
746 /*
747  * MSI/MSIX memory region implementation.
748  * The handler handles both MSI and MSIX.
749  * The vector number is encoded in least bits in data.
750  */
spapr_msi_write(void * opaque,hwaddr addr,uint64_t data,unsigned size)751 static void spapr_msi_write(void *opaque, hwaddr addr,
752                             uint64_t data, unsigned size)
753 {
754     SpaprMachineState *spapr = opaque;
755     uint32_t irq = data;
756 
757     trace_spapr_pci_msi_write(addr, data, irq);
758 
759     qemu_irq_pulse(spapr_qirq(spapr, irq));
760 }
761 
762 static const MemoryRegionOps spapr_msi_ops = {
763     /*
764      * .read result is undefined by PCI spec.
765      * define .read method to avoid assert failure in memory_region_init_io
766      */
767     .read = spapr_msi_read,
768     .write = spapr_msi_write,
769     .endianness = DEVICE_LITTLE_ENDIAN
770 };
771 
772 /*
773  * PHB PCI device
774  */
spapr_pci_dma_iommu(PCIBus * bus,void * opaque,int devfn)775 static AddressSpace *spapr_pci_dma_iommu(PCIBus *bus, void *opaque, int devfn)
776 {
777     SpaprPhbState *phb = opaque;
778 
779     return &phb->iommu_as;
780 }
781 
782 static const PCIIOMMUOps spapr_iommu_ops = {
783     .get_address_space = spapr_pci_dma_iommu,
784 };
785 
spapr_phb_vfio_get_loc_code(SpaprPhbState * sphb,PCIDevice * pdev)786 static char *spapr_phb_vfio_get_loc_code(SpaprPhbState *sphb,  PCIDevice *pdev)
787 {
788     g_autofree char *path = NULL;
789     g_autofree char *host = NULL;
790     g_autofree char *devspec = NULL;
791     char *buf = NULL;
792 
793     /* Get the PCI VFIO host id */
794     host = object_property_get_str(OBJECT(pdev), "host", NULL);
795     if (!host) {
796         return NULL;
797     }
798 
799     /* Construct the path of the file that will give us the DT location */
800     path = g_strdup_printf("/sys/bus/pci/devices/%s/devspec", host);
801     if (!g_file_get_contents(path, &devspec, NULL, NULL)) {
802         return NULL;
803     }
804 
805     /* Construct and read from host device tree the loc-code */
806     g_free(path);
807     path = g_strdup_printf("/proc/device-tree%s/ibm,loc-code", devspec);
808     if (!g_file_get_contents(path, &buf, NULL, NULL)) {
809         return NULL;
810     }
811     return buf;
812 }
813 
spapr_phb_get_loc_code(SpaprPhbState * sphb,PCIDevice * pdev)814 static char *spapr_phb_get_loc_code(SpaprPhbState *sphb, PCIDevice *pdev)
815 {
816     char *buf;
817     const char *devtype = "qemu";
818     uint32_t busnr = pci_bus_num(PCI_BUS(qdev_get_parent_bus(DEVICE(pdev))));
819 
820     if (object_dynamic_cast(OBJECT(pdev), "vfio-pci")) {
821         buf = spapr_phb_vfio_get_loc_code(sphb, pdev);
822         if (buf) {
823             return buf;
824         }
825         devtype = "vfio";
826     }
827     /*
828      * For emulated devices and VFIO-failure case, make up
829      * the loc-code.
830      */
831     buf = g_strdup_printf("%s_%s:%04x:%02x:%02x.%x",
832                           devtype, pdev->name, sphb->index, busnr,
833                           PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
834     return buf;
835 }
836 
837 /* Macros to operate with address in OF binding to PCI */
838 #define b_x(x, p, l)    (((x) & ((1<<(l))-1)) << (p))
839 #define b_n(x)          b_x((x), 31, 1) /* 0 if relocatable */
840 #define b_p(x)          b_x((x), 30, 1) /* 1 if prefetchable */
841 #define b_t(x)          b_x((x), 29, 1) /* 1 if the address is aliased */
842 #define b_ss(x)         b_x((x), 24, 2) /* the space code */
843 #define b_bbbbbbbb(x)   b_x((x), 16, 8) /* bus number */
844 #define b_ddddd(x)      b_x((x), 11, 5) /* device number */
845 #define b_fff(x)        b_x((x), 8, 3)  /* function number */
846 #define b_rrrrrrrr(x)   b_x((x), 0, 8)  /* register number */
847 
848 /* for 'reg' OF properties */
849 #define RESOURCE_CELLS_SIZE 2
850 #define RESOURCE_CELLS_ADDRESS 3
851 
852 typedef struct ResourceFields {
853     uint32_t phys_hi;
854     uint32_t phys_mid;
855     uint32_t phys_lo;
856     uint32_t size_hi;
857     uint32_t size_lo;
858 } QEMU_PACKED ResourceFields;
859 
860 typedef struct ResourceProps {
861     ResourceFields reg[8];
862     uint32_t reg_len;
863 } ResourceProps;
864 
865 /* fill in the 'reg' OF properties for
866  * a PCI device. 'reg' describes resource requirements for a
867  * device's IO/MEM regions.
868  *
869  * the property is an array of ('phys-addr', 'size') pairs describing
870  * the addressable regions of the PCI device, where 'phys-addr' is a
871  * RESOURCE_CELLS_ADDRESS-tuple of 32-bit integers corresponding to
872  * (phys.hi, phys.mid, phys.lo), and 'size' is a
873  * RESOURCE_CELLS_SIZE-tuple corresponding to (size.hi, size.lo).
874  *
875  * phys.hi = 0xYYXXXXZZ, where:
876  *   0xYY = npt000ss
877  *          |||   |
878  *          |||   +-- space code
879  *          |||               |
880  *          |||               +  00 if configuration space
881  *          |||               +  01 if IO region,
882  *          |||               +  10 if 32-bit MEM region
883  *          |||               +  11 if 64-bit MEM region
884  *          |||
885  *          ||+------ for non-relocatable IO: 1 if aliased
886  *          ||        for relocatable IO: 1 if below 64KB
887  *          ||        for MEM: 1 if below 1MB
888  *          |+------- 1 if region is prefetchable
889  *          +-------- 1 if region is non-relocatable
890  *   0xXXXX = bbbbbbbb dddddfff, encoding bus, slot, and function
891  *            bits respectively
892  *   0xZZ = rrrrrrrr, the register number of the BAR corresponding
893  *          to the region
894  *
895  * phys.mid and phys.lo correspond respectively to the hi/lo portions
896  * of the actual address of the region.
897  *
898  * note also that addresses defined in this property are, at least
899  * for PAPR guests, relative to the PHBs IO/MEM windows, and
900  * correspond directly to the addresses in the BARs.
901  *
902  * in accordance with PCI Bus Binding to Open Firmware,
903  * IEEE Std 1275-1994, section 4.1.1, as implemented by PAPR+ v2.7,
904  * Appendix C.
905  */
populate_resource_props(PCIDevice * d,ResourceProps * rp)906 static void populate_resource_props(PCIDevice *d, ResourceProps *rp)
907 {
908     int bus_num = pci_bus_num(PCI_BUS(qdev_get_parent_bus(DEVICE(d))));
909     uint32_t dev_id = (b_bbbbbbbb(bus_num) |
910                        b_ddddd(PCI_SLOT(d->devfn)) |
911                        b_fff(PCI_FUNC(d->devfn)));
912     ResourceFields *reg;
913     int i, reg_idx = 0;
914 
915     /* config space region */
916     reg = &rp->reg[reg_idx++];
917     reg->phys_hi = cpu_to_be32(dev_id);
918     reg->phys_mid = 0;
919     reg->phys_lo = 0;
920     reg->size_hi = 0;
921     reg->size_lo = 0;
922 
923     for (i = 0; i < PCI_NUM_REGIONS; i++) {
924         if (!d->io_regions[i].size) {
925             continue;
926         }
927 
928         reg = &rp->reg[reg_idx++];
929 
930         reg->phys_hi = cpu_to_be32(dev_id | b_rrrrrrrr(pci_bar(d, i)));
931         if (d->io_regions[i].type & PCI_BASE_ADDRESS_SPACE_IO) {
932             reg->phys_hi |= cpu_to_be32(b_ss(1));
933         } else if (d->io_regions[i].type & PCI_BASE_ADDRESS_MEM_TYPE_64) {
934             reg->phys_hi |= cpu_to_be32(b_ss(3));
935         } else {
936             reg->phys_hi |= cpu_to_be32(b_ss(2));
937         }
938         reg->phys_mid = 0;
939         reg->phys_lo = 0;
940         reg->size_hi = cpu_to_be32(d->io_regions[i].size >> 32);
941         reg->size_lo = cpu_to_be32(d->io_regions[i].size);
942     }
943 
944     rp->reg_len = reg_idx * sizeof(ResourceFields);
945 }
946 
947 typedef struct PCIClass PCIClass;
948 typedef struct PCISubClass PCISubClass;
949 typedef struct PCIIFace PCIIFace;
950 
951 struct PCIIFace {
952     int iface;
953     const char *name;
954 };
955 
956 struct PCISubClass {
957     int subclass;
958     const char *name;
959     const PCIIFace *iface;
960 };
961 
962 struct PCIClass {
963     const char *name;
964     const PCISubClass *subc;
965 };
966 
967 static const PCISubClass undef_subclass[] = {
968     { PCI_CLASS_NOT_DEFINED_VGA, "display", NULL },
969     { 0xFF, NULL, NULL },
970 };
971 
972 static const PCISubClass mass_subclass[] = {
973     { PCI_CLASS_STORAGE_SCSI, "scsi", NULL },
974     { PCI_CLASS_STORAGE_IDE, "ide", NULL },
975     { PCI_CLASS_STORAGE_FLOPPY, "fdc", NULL },
976     { PCI_CLASS_STORAGE_IPI, "ipi", NULL },
977     { PCI_CLASS_STORAGE_RAID, "raid", NULL },
978     { PCI_CLASS_STORAGE_ATA, "ata", NULL },
979     { PCI_CLASS_STORAGE_SATA, "sata", NULL },
980     { PCI_CLASS_STORAGE_SAS, "sas", NULL },
981     { 0xFF, NULL, NULL },
982 };
983 
984 static const PCISubClass net_subclass[] = {
985     { PCI_CLASS_NETWORK_ETHERNET, "ethernet", NULL },
986     { PCI_CLASS_NETWORK_TOKEN_RING, "token-ring", NULL },
987     { PCI_CLASS_NETWORK_FDDI, "fddi", NULL },
988     { PCI_CLASS_NETWORK_ATM, "atm", NULL },
989     { PCI_CLASS_NETWORK_ISDN, "isdn", NULL },
990     { PCI_CLASS_NETWORK_WORLDFIP, "worldfip", NULL },
991     { PCI_CLASS_NETWORK_PICMG214, "picmg", NULL },
992     { 0xFF, NULL, NULL },
993 };
994 
995 static const PCISubClass displ_subclass[] = {
996     { PCI_CLASS_DISPLAY_VGA, "vga", NULL },
997     { PCI_CLASS_DISPLAY_XGA, "xga", NULL },
998     { PCI_CLASS_DISPLAY_3D, "3d-controller", NULL },
999     { 0xFF, NULL, NULL },
1000 };
1001 
1002 static const PCISubClass media_subclass[] = {
1003     { PCI_CLASS_MULTIMEDIA_VIDEO, "video", NULL },
1004     { PCI_CLASS_MULTIMEDIA_AUDIO, "sound", NULL },
1005     { PCI_CLASS_MULTIMEDIA_PHONE, "telephony", NULL },
1006     { 0xFF, NULL, NULL },
1007 };
1008 
1009 static const PCISubClass mem_subclass[] = {
1010     { PCI_CLASS_MEMORY_RAM, "memory", NULL },
1011     { PCI_CLASS_MEMORY_FLASH, "flash", NULL },
1012     { 0xFF, NULL, NULL },
1013 };
1014 
1015 static const PCISubClass bridg_subclass[] = {
1016     { PCI_CLASS_BRIDGE_HOST, "host", NULL },
1017     { PCI_CLASS_BRIDGE_ISA, "isa", NULL },
1018     { PCI_CLASS_BRIDGE_EISA, "eisa", NULL },
1019     { PCI_CLASS_BRIDGE_MC, "mca", NULL },
1020     { PCI_CLASS_BRIDGE_PCI, "pci", NULL },
1021     { PCI_CLASS_BRIDGE_PCMCIA, "pcmcia", NULL },
1022     { PCI_CLASS_BRIDGE_NUBUS, "nubus", NULL },
1023     { PCI_CLASS_BRIDGE_CARDBUS, "cardbus", NULL },
1024     { PCI_CLASS_BRIDGE_RACEWAY, "raceway", NULL },
1025     { PCI_CLASS_BRIDGE_PCI_SEMITP, "semi-transparent-pci", NULL },
1026     { PCI_CLASS_BRIDGE_IB_PCI, "infiniband", NULL },
1027     { 0xFF, NULL, NULL },
1028 };
1029 
1030 static const PCISubClass comm_subclass[] = {
1031     { PCI_CLASS_COMMUNICATION_SERIAL, "serial", NULL },
1032     { PCI_CLASS_COMMUNICATION_PARALLEL, "parallel", NULL },
1033     { PCI_CLASS_COMMUNICATION_MULTISERIAL, "multiport-serial", NULL },
1034     { PCI_CLASS_COMMUNICATION_MODEM, "modem", NULL },
1035     { PCI_CLASS_COMMUNICATION_GPIB, "gpib", NULL },
1036     { PCI_CLASS_COMMUNICATION_SC, "smart-card", NULL },
1037     { 0xFF, NULL, NULL, },
1038 };
1039 
1040 static const PCIIFace pic_iface[] = {
1041     { PCI_CLASS_SYSTEM_PIC_IOAPIC, "io-apic" },
1042     { PCI_CLASS_SYSTEM_PIC_IOXAPIC, "io-xapic" },
1043     { 0xFF, NULL },
1044 };
1045 
1046 static const PCISubClass sys_subclass[] = {
1047     { PCI_CLASS_SYSTEM_PIC, "interrupt-controller", pic_iface },
1048     { PCI_CLASS_SYSTEM_DMA, "dma-controller", NULL },
1049     { PCI_CLASS_SYSTEM_TIMER, "timer", NULL },
1050     { PCI_CLASS_SYSTEM_RTC, "rtc", NULL },
1051     { PCI_CLASS_SYSTEM_PCI_HOTPLUG, "hot-plug-controller", NULL },
1052     { PCI_CLASS_SYSTEM_SDHCI, "sd-host-controller", NULL },
1053     { 0xFF, NULL, NULL },
1054 };
1055 
1056 static const PCISubClass inp_subclass[] = {
1057     { PCI_CLASS_INPUT_KEYBOARD, "keyboard", NULL },
1058     { PCI_CLASS_INPUT_PEN, "pen", NULL },
1059     { PCI_CLASS_INPUT_MOUSE, "mouse", NULL },
1060     { PCI_CLASS_INPUT_SCANNER, "scanner", NULL },
1061     { PCI_CLASS_INPUT_GAMEPORT, "gameport", NULL },
1062     { 0xFF, NULL, NULL },
1063 };
1064 
1065 static const PCISubClass dock_subclass[] = {
1066     { PCI_CLASS_DOCKING_GENERIC, "dock", NULL },
1067     { 0xFF, NULL, NULL },
1068 };
1069 
1070 static const PCISubClass cpu_subclass[] = {
1071     { PCI_CLASS_PROCESSOR_PENTIUM, "pentium", NULL },
1072     { PCI_CLASS_PROCESSOR_POWERPC, "powerpc", NULL },
1073     { PCI_CLASS_PROCESSOR_MIPS, "mips", NULL },
1074     { PCI_CLASS_PROCESSOR_CO, "co-processor", NULL },
1075     { 0xFF, NULL, NULL },
1076 };
1077 
1078 static const PCIIFace usb_iface[] = {
1079     { PCI_CLASS_SERIAL_USB_UHCI, "usb-uhci" },
1080     { PCI_CLASS_SERIAL_USB_OHCI, "usb-ohci", },
1081     { PCI_CLASS_SERIAL_USB_EHCI, "usb-ehci" },
1082     { PCI_CLASS_SERIAL_USB_XHCI, "usb-xhci" },
1083     { PCI_CLASS_SERIAL_USB_UNKNOWN, "usb-unknown" },
1084     { PCI_CLASS_SERIAL_USB_DEVICE, "usb-device" },
1085     { 0xFF, NULL },
1086 };
1087 
1088 static const PCISubClass ser_subclass[] = {
1089     { PCI_CLASS_SERIAL_FIREWIRE, "firewire", NULL },
1090     { PCI_CLASS_SERIAL_ACCESS, "access-bus", NULL },
1091     { PCI_CLASS_SERIAL_SSA, "ssa", NULL },
1092     { PCI_CLASS_SERIAL_USB, "usb", usb_iface },
1093     { PCI_CLASS_SERIAL_FIBER, "fibre-channel", NULL },
1094     { PCI_CLASS_SERIAL_SMBUS, "smb", NULL },
1095     { PCI_CLASS_SERIAL_IB, "infiniband", NULL },
1096     { PCI_CLASS_SERIAL_IPMI, "ipmi", NULL },
1097     { PCI_CLASS_SERIAL_SERCOS, "sercos", NULL },
1098     { PCI_CLASS_SERIAL_CANBUS, "canbus", NULL },
1099     { 0xFF, NULL, NULL },
1100 };
1101 
1102 static const PCISubClass wrl_subclass[] = {
1103     { PCI_CLASS_WIRELESS_IRDA, "irda", NULL },
1104     { PCI_CLASS_WIRELESS_CIR, "consumer-ir", NULL },
1105     { PCI_CLASS_WIRELESS_RF_CONTROLLER, "rf-controller", NULL },
1106     { PCI_CLASS_WIRELESS_BLUETOOTH, "bluetooth", NULL },
1107     { PCI_CLASS_WIRELESS_BROADBAND, "broadband", NULL },
1108     { 0xFF, NULL, NULL },
1109 };
1110 
1111 static const PCISubClass sat_subclass[] = {
1112     { PCI_CLASS_SATELLITE_TV, "satellite-tv", NULL },
1113     { PCI_CLASS_SATELLITE_AUDIO, "satellite-audio", NULL },
1114     { PCI_CLASS_SATELLITE_VOICE, "satellite-voice", NULL },
1115     { PCI_CLASS_SATELLITE_DATA, "satellite-data", NULL },
1116     { 0xFF, NULL, NULL },
1117 };
1118 
1119 static const PCISubClass crypt_subclass[] = {
1120     { PCI_CLASS_CRYPT_NETWORK, "network-encryption", NULL },
1121     { PCI_CLASS_CRYPT_ENTERTAINMENT,
1122       "entertainment-encryption", NULL },
1123     { 0xFF, NULL, NULL },
1124 };
1125 
1126 static const PCISubClass spc_subclass[] = {
1127     { PCI_CLASS_SP_DPIO, "dpio", NULL },
1128     { PCI_CLASS_SP_PERF, "counter", NULL },
1129     { PCI_CLASS_SP_SYNCH, "measurement", NULL },
1130     { PCI_CLASS_SP_MANAGEMENT, "management-card", NULL },
1131     { 0xFF, NULL, NULL },
1132 };
1133 
1134 static const PCIClass pci_classes[] = {
1135     { "legacy-device", undef_subclass },
1136     { "mass-storage",  mass_subclass },
1137     { "network", net_subclass },
1138     { "display", displ_subclass, },
1139     { "multimedia-device", media_subclass },
1140     { "memory-controller", mem_subclass },
1141     { "unknown-bridge", bridg_subclass },
1142     { "communication-controller", comm_subclass},
1143     { "system-peripheral", sys_subclass },
1144     { "input-controller", inp_subclass },
1145     { "docking-station", dock_subclass },
1146     { "cpu", cpu_subclass },
1147     { "serial-bus", ser_subclass },
1148     { "wireless-controller", wrl_subclass },
1149     { "intelligent-io", NULL },
1150     { "satellite-device", sat_subclass },
1151     { "encryption", crypt_subclass },
1152     { "data-processing-controller", spc_subclass },
1153 };
1154 
dt_name_from_class(uint8_t class,uint8_t subclass,uint8_t iface)1155 static const char *dt_name_from_class(uint8_t class, uint8_t subclass,
1156                                       uint8_t iface)
1157 {
1158     const PCIClass *pclass;
1159     const PCISubClass *psubclass;
1160     const PCIIFace *piface;
1161     const char *name;
1162 
1163     if (class >= ARRAY_SIZE(pci_classes)) {
1164         return "pci";
1165     }
1166 
1167     pclass = pci_classes + class;
1168     name = pclass->name;
1169 
1170     if (pclass->subc == NULL) {
1171         return name;
1172     }
1173 
1174     psubclass = pclass->subc;
1175     while ((psubclass->subclass & 0xff) != 0xff) {
1176         if ((psubclass->subclass & 0xff) == subclass) {
1177             name = psubclass->name;
1178             break;
1179         }
1180         psubclass++;
1181     }
1182 
1183     piface = psubclass->iface;
1184     if (piface == NULL) {
1185         return name;
1186     }
1187     while ((piface->iface & 0xff) != 0xff) {
1188         if ((piface->iface & 0xff) == iface) {
1189             name = piface->name;
1190             break;
1191         }
1192         piface++;
1193     }
1194 
1195     return name;
1196 }
1197 
1198 /*
1199  * DRC helper functions
1200  */
1201 
drc_id_from_devfn(SpaprPhbState * phb,uint8_t chassis,int32_t devfn)1202 static uint32_t drc_id_from_devfn(SpaprPhbState *phb,
1203                                   uint8_t chassis, int32_t devfn)
1204 {
1205     return (phb->index << 16) | (chassis << 8) | devfn;
1206 }
1207 
drc_from_devfn(SpaprPhbState * phb,uint8_t chassis,int32_t devfn)1208 static SpaprDrc *drc_from_devfn(SpaprPhbState *phb,
1209                                 uint8_t chassis, int32_t devfn)
1210 {
1211     return spapr_drc_by_id(TYPE_SPAPR_DRC_PCI,
1212                            drc_id_from_devfn(phb, chassis, devfn));
1213 }
1214 
chassis_from_bus(PCIBus * bus)1215 static uint8_t chassis_from_bus(PCIBus *bus)
1216 {
1217     if (pci_bus_is_root(bus)) {
1218         return 0;
1219     } else {
1220         PCIDevice *bridge = pci_bridge_get_device(bus);
1221 
1222         return object_property_get_uint(OBJECT(bridge), "chassis_nr",
1223                                         &error_abort);
1224     }
1225 }
1226 
drc_from_dev(SpaprPhbState * phb,PCIDevice * dev)1227 static SpaprDrc *drc_from_dev(SpaprPhbState *phb, PCIDevice *dev)
1228 {
1229     uint8_t chassis = chassis_from_bus(pci_get_bus(dev));
1230 
1231     return drc_from_devfn(phb, chassis, dev->devfn);
1232 }
1233 
add_drcs(SpaprPhbState * phb,PCIBus * bus)1234 static void add_drcs(SpaprPhbState *phb, PCIBus *bus)
1235 {
1236     Object *owner;
1237     int i;
1238     uint8_t chassis;
1239 
1240     chassis = chassis_from_bus(bus);
1241 
1242     if (pci_bus_is_root(bus)) {
1243         owner = OBJECT(phb);
1244     } else {
1245         owner = OBJECT(pci_bridge_get_device(bus));
1246     }
1247 
1248     for (i = 0; i < PCI_SLOT_MAX * PCI_FUNC_MAX; i++) {
1249         spapr_dr_connector_new(owner, TYPE_SPAPR_DRC_PCI,
1250                                drc_id_from_devfn(phb, chassis, i));
1251     }
1252 }
1253 
remove_drcs(SpaprPhbState * phb,PCIBus * bus)1254 static void remove_drcs(SpaprPhbState *phb, PCIBus *bus)
1255 {
1256     int i;
1257     uint8_t chassis;
1258 
1259     chassis = chassis_from_bus(bus);
1260 
1261     for (i = PCI_SLOT_MAX * PCI_FUNC_MAX - 1; i >= 0; i--) {
1262         SpaprDrc *drc = drc_from_devfn(phb, chassis, i);
1263 
1264         if (drc) {
1265             object_unparent(OBJECT(drc));
1266         }
1267     }
1268 }
1269 
1270 typedef struct PciWalkFdt {
1271     void *fdt;
1272     int offset;
1273     SpaprPhbState *sphb;
1274     int err;
1275 } PciWalkFdt;
1276 
1277 static int spapr_dt_pci_device(SpaprPhbState *sphb, PCIDevice *dev,
1278                                void *fdt, int parent_offset);
1279 
spapr_dt_pci_device_cb(PCIBus * bus,PCIDevice * pdev,void * opaque)1280 static void spapr_dt_pci_device_cb(PCIBus *bus, PCIDevice *pdev,
1281                                    void *opaque)
1282 {
1283     PciWalkFdt *p = opaque;
1284     int err;
1285 
1286     if (p->err || !pdev->enabled) {
1287         return;
1288     }
1289 
1290     err = spapr_dt_pci_device(p->sphb, pdev, p->fdt, p->offset);
1291     if (err < 0) {
1292         p->err = err;
1293     }
1294 }
1295 
1296 /* Augment PCI device node with bridge specific information */
spapr_dt_pci_bus(SpaprPhbState * sphb,PCIBus * bus,void * fdt,int offset)1297 static int spapr_dt_pci_bus(SpaprPhbState *sphb, PCIBus *bus,
1298                                void *fdt, int offset)
1299 {
1300     Object *owner;
1301     PciWalkFdt cbinfo = {
1302         .fdt = fdt,
1303         .offset = offset,
1304         .sphb = sphb,
1305         .err = 0,
1306     };
1307     int ret;
1308 
1309     _FDT(fdt_setprop_cell(fdt, offset, "#address-cells",
1310                           RESOURCE_CELLS_ADDRESS));
1311     _FDT(fdt_setprop_cell(fdt, offset, "#size-cells",
1312                           RESOURCE_CELLS_SIZE));
1313 
1314     assert(bus);
1315     pci_for_each_device_under_bus_reverse(bus, spapr_dt_pci_device_cb, &cbinfo);
1316     if (cbinfo.err) {
1317         return cbinfo.err;
1318     }
1319 
1320     if (pci_bus_is_root(bus)) {
1321         owner = OBJECT(sphb);
1322     } else {
1323         owner = OBJECT(pci_bridge_get_device(bus));
1324     }
1325 
1326     ret = spapr_dt_drc(fdt, offset, owner,
1327                        SPAPR_DR_CONNECTOR_TYPE_PCI);
1328     if (ret) {
1329         return ret;
1330     }
1331 
1332     return offset;
1333 }
1334 
spapr_pci_fw_dev_name(PCIDevice * dev)1335 char *spapr_pci_fw_dev_name(PCIDevice *dev)
1336 {
1337     const gchar *basename;
1338     int slot = PCI_SLOT(dev->devfn);
1339     int func = PCI_FUNC(dev->devfn);
1340     uint32_t ccode = pci_default_read_config(dev, PCI_CLASS_PROG, 3);
1341 
1342     basename = dt_name_from_class((ccode >> 16) & 0xff, (ccode >> 8) & 0xff,
1343                                   ccode & 0xff);
1344 
1345     if (func != 0) {
1346         return g_strdup_printf("%s@%x,%x", basename, slot, func);
1347     } else {
1348         return g_strdup_printf("%s@%x", basename, slot);
1349     }
1350 }
1351 
1352 /* create OF node for pci device and required OF DT properties */
spapr_dt_pci_device(SpaprPhbState * sphb,PCIDevice * dev,void * fdt,int parent_offset)1353 static int spapr_dt_pci_device(SpaprPhbState *sphb, PCIDevice *dev,
1354                                void *fdt, int parent_offset)
1355 {
1356     int offset;
1357     g_autofree gchar *nodename = spapr_pci_fw_dev_name(dev);
1358     ResourceProps rp;
1359     SpaprDrc *drc = drc_from_dev(sphb, dev);
1360     uint32_t vendor_id = pci_default_read_config(dev, PCI_VENDOR_ID, 2);
1361     uint32_t device_id = pci_default_read_config(dev, PCI_DEVICE_ID, 2);
1362     uint32_t revision_id = pci_default_read_config(dev, PCI_REVISION_ID, 1);
1363     uint32_t ccode = pci_default_read_config(dev, PCI_CLASS_PROG, 3);
1364     uint32_t irq_pin = pci_default_read_config(dev, PCI_INTERRUPT_PIN, 1);
1365     uint32_t subsystem_id = pci_default_read_config(dev, PCI_SUBSYSTEM_ID, 2);
1366     uint32_t subsystem_vendor_id =
1367         pci_default_read_config(dev, PCI_SUBSYSTEM_VENDOR_ID, 2);
1368     uint32_t cache_line_size =
1369         pci_default_read_config(dev, PCI_CACHE_LINE_SIZE, 1);
1370     uint32_t pci_status = pci_default_read_config(dev, PCI_STATUS, 2);
1371     gchar *loc_code;
1372 
1373     _FDT(offset = fdt_add_subnode(fdt, parent_offset, nodename));
1374 
1375     /* in accordance with PAPR+ v2.7 13.6.3, Table 181 */
1376     _FDT(fdt_setprop_cell(fdt, offset, "vendor-id", vendor_id));
1377     _FDT(fdt_setprop_cell(fdt, offset, "device-id", device_id));
1378     _FDT(fdt_setprop_cell(fdt, offset, "revision-id", revision_id));
1379 
1380     _FDT(fdt_setprop_cell(fdt, offset, "class-code", ccode));
1381     if (irq_pin) {
1382         _FDT(fdt_setprop_cell(fdt, offset, "interrupts", irq_pin));
1383     }
1384 
1385     if (subsystem_id) {
1386         _FDT(fdt_setprop_cell(fdt, offset, "subsystem-id", subsystem_id));
1387     }
1388 
1389     if (subsystem_vendor_id) {
1390         _FDT(fdt_setprop_cell(fdt, offset, "subsystem-vendor-id",
1391                               subsystem_vendor_id));
1392     }
1393 
1394     _FDT(fdt_setprop_cell(fdt, offset, "cache-line-size", cache_line_size));
1395 
1396 
1397     /* the following fdt cells are masked off the pci status register */
1398     _FDT(fdt_setprop_cell(fdt, offset, "devsel-speed",
1399                           PCI_STATUS_DEVSEL_MASK & pci_status));
1400 
1401     if (pci_status & PCI_STATUS_FAST_BACK) {
1402         _FDT(fdt_setprop(fdt, offset, "fast-back-to-back", NULL, 0));
1403     }
1404     if (pci_status & PCI_STATUS_66MHZ) {
1405         _FDT(fdt_setprop(fdt, offset, "66mhz-capable", NULL, 0));
1406     }
1407     if (pci_status & PCI_STATUS_UDF) {
1408         _FDT(fdt_setprop(fdt, offset, "udf-supported", NULL, 0));
1409     }
1410 
1411     loc_code = spapr_phb_get_loc_code(sphb, dev);
1412     _FDT(fdt_setprop_string(fdt, offset, "ibm,loc-code", loc_code));
1413     g_free(loc_code);
1414 
1415     if (drc) {
1416         _FDT(fdt_setprop_cell(fdt, offset, "ibm,my-drc-index",
1417                               spapr_drc_index(drc)));
1418     }
1419 
1420     if (msi_present(dev)) {
1421         uint32_t max_msi = msi_nr_vectors_allocated(dev);
1422         if (max_msi) {
1423             _FDT(fdt_setprop_cell(fdt, offset, "ibm,req#msi", max_msi));
1424         }
1425     }
1426     if (msix_present(dev)) {
1427         uint32_t max_msix = dev->msix_entries_nr;
1428         if (max_msix) {
1429             _FDT(fdt_setprop_cell(fdt, offset, "ibm,req#msi-x", max_msix));
1430         }
1431     }
1432 
1433     populate_resource_props(dev, &rp);
1434     _FDT(fdt_setprop(fdt, offset, "reg", (uint8_t *)rp.reg, rp.reg_len));
1435 
1436     if (sphb->pcie_ecs && pci_is_express(dev)) {
1437         _FDT(fdt_setprop_cell(fdt, offset, "ibm,pci-config-space-type", 0x1));
1438     }
1439 
1440     if (!IS_PCI_BRIDGE(dev)) {
1441         /* Properties only for non-bridges */
1442         uint32_t min_grant = pci_default_read_config(dev, PCI_MIN_GNT, 1);
1443         uint32_t max_latency = pci_default_read_config(dev, PCI_MAX_LAT, 1);
1444         _FDT(fdt_setprop_cell(fdt, offset, "min-grant", min_grant));
1445         _FDT(fdt_setprop_cell(fdt, offset, "max-latency", max_latency));
1446         return offset;
1447     } else {
1448         PCIBus *sec_bus = pci_bridge_get_sec_bus(PCI_BRIDGE(dev));
1449 
1450         return spapr_dt_pci_bus(sphb, sec_bus, fdt, offset);
1451     }
1452 }
1453 
1454 /* Callback to be called during DRC release. */
spapr_phb_remove_pci_device_cb(DeviceState * dev)1455 void spapr_phb_remove_pci_device_cb(DeviceState *dev)
1456 {
1457     HotplugHandler *hotplug_ctrl = qdev_get_hotplug_handler(dev);
1458 
1459     hotplug_handler_unplug(hotplug_ctrl, dev, &error_abort);
1460     object_unparent(OBJECT(dev));
1461 }
1462 
spapr_pci_dt_populate(SpaprDrc * drc,SpaprMachineState * spapr,void * fdt,int * fdt_start_offset,Error ** errp)1463 int spapr_pci_dt_populate(SpaprDrc *drc, SpaprMachineState *spapr,
1464                           void *fdt, int *fdt_start_offset, Error **errp)
1465 {
1466     HotplugHandler *plug_handler = qdev_get_hotplug_handler(drc->dev);
1467     SpaprPhbState *sphb = SPAPR_PCI_HOST_BRIDGE(plug_handler);
1468     PCIDevice *pdev = PCI_DEVICE(drc->dev);
1469 
1470     *fdt_start_offset = spapr_dt_pci_device(sphb, pdev, fdt, 0);
1471     return 0;
1472 }
1473 
spapr_pci_bridge_plug(SpaprPhbState * phb,PCIBridge * bridge)1474 static void spapr_pci_bridge_plug(SpaprPhbState *phb,
1475                                   PCIBridge *bridge)
1476 {
1477     PCIBus *bus = pci_bridge_get_sec_bus(bridge);
1478 
1479     add_drcs(phb, bus);
1480 }
1481 
1482 /* Returns non-zero if the value of "chassis_nr" is already in use */
check_chassis_nr(Object * obj,void * opaque)1483 static int check_chassis_nr(Object *obj, void *opaque)
1484 {
1485     int new_chassis_nr =
1486         object_property_get_uint(opaque, "chassis_nr", &error_abort);
1487     int chassis_nr =
1488         object_property_get_uint(obj, "chassis_nr", NULL);
1489 
1490     if (!object_dynamic_cast(obj, TYPE_PCI_BRIDGE)) {
1491         return 0;
1492     }
1493 
1494     /* Skip unsupported bridge types */
1495     if (!chassis_nr) {
1496         return 0;
1497     }
1498 
1499     /* Skip self */
1500     if (obj == opaque) {
1501         return 0;
1502     }
1503 
1504     return chassis_nr == new_chassis_nr;
1505 }
1506 
bridge_has_valid_chassis_nr(Object * bridge,Error ** errp)1507 static bool bridge_has_valid_chassis_nr(Object *bridge, Error **errp)
1508 {
1509     int chassis_nr =
1510         object_property_get_uint(bridge, "chassis_nr", NULL);
1511 
1512     /*
1513      * slotid_cap_init() already ensures that "chassis_nr" isn't null for
1514      * standard PCI bridges, so this really tells if "chassis_nr" is present
1515      * or not.
1516      */
1517     if (!chassis_nr) {
1518         error_setg(errp, "PCI Bridge lacks a \"chassis_nr\" property");
1519         error_append_hint(errp, "Try -device pci-bridge instead.\n");
1520         return false;
1521     }
1522 
1523     /* We want unique values for "chassis_nr" */
1524     if (object_child_foreach_recursive(object_get_root(), check_chassis_nr,
1525                                        bridge)) {
1526         error_setg(errp, "Bridge chassis %d already in use", chassis_nr);
1527         return false;
1528     }
1529 
1530     return true;
1531 }
1532 
spapr_pci_pre_plug(HotplugHandler * plug_handler,DeviceState * plugged_dev,Error ** errp)1533 static void spapr_pci_pre_plug(HotplugHandler *plug_handler,
1534                                DeviceState *plugged_dev, Error **errp)
1535 {
1536     SpaprPhbState *phb = SPAPR_PCI_HOST_BRIDGE(DEVICE(plug_handler));
1537     PCIDevice *pdev = PCI_DEVICE(plugged_dev);
1538     SpaprDrc *drc = drc_from_dev(phb, pdev);
1539     PCIBus *bus = PCI_BUS(qdev_get_parent_bus(DEVICE(pdev)));
1540     uint32_t slotnr = PCI_SLOT(pdev->devfn);
1541 
1542     if (IS_PCI_BRIDGE(plugged_dev)) {
1543         if (!bridge_has_valid_chassis_nr(OBJECT(plugged_dev), errp)) {
1544             return;
1545         }
1546     }
1547 
1548     /* Following the QEMU convention used for PCIe multifunction
1549      * hotplug, we do not allow functions to be hotplugged to a
1550      * slot that already has function 0 present
1551      */
1552     if (plugged_dev->hotplugged &&
1553         !pci_is_vf(pdev) &&
1554         bus->devices[PCI_DEVFN(slotnr, 0)] &&
1555         PCI_FUNC(pdev->devfn) != 0) {
1556         error_setg(errp, "PCI: slot %d function 0 already occupied by %s,"
1557                    " additional functions can no longer be exposed to guest.",
1558                    slotnr, bus->devices[PCI_DEVFN(slotnr, 0)]->name);
1559     }
1560 
1561     if (drc && drc->dev) {
1562         error_setg(errp, "PCI: slot %d already occupied by %s", slotnr,
1563                    pci_get_function_0(PCI_DEVICE(drc->dev))->name);
1564         return;
1565     }
1566 }
1567 
spapr_pci_plug(HotplugHandler * plug_handler,DeviceState * plugged_dev,Error ** errp)1568 static void spapr_pci_plug(HotplugHandler *plug_handler,
1569                            DeviceState *plugged_dev, Error **errp)
1570 {
1571     SpaprPhbState *phb = SPAPR_PCI_HOST_BRIDGE(DEVICE(plug_handler));
1572     PCIDevice *pdev = PCI_DEVICE(plugged_dev);
1573     SpaprDrc *drc = drc_from_dev(phb, pdev);
1574     uint32_t slotnr = PCI_SLOT(pdev->devfn);
1575 
1576     /*
1577      * If DR or the PCI device is disabled we don't need to do anything
1578      * in the case of hotplug or coldplug callbacks.
1579      */
1580     if (!pdev->enabled) {
1581         return;
1582     }
1583 
1584     g_assert(drc);
1585 
1586     if (IS_PCI_BRIDGE(plugged_dev)) {
1587         spapr_pci_bridge_plug(phb, PCI_BRIDGE(plugged_dev));
1588     }
1589 
1590     /* spapr_pci_pre_plug() already checked the DRC is attachable */
1591     spapr_drc_attach(drc, DEVICE(pdev));
1592 
1593     /* If this is function 0, signal hotplug for all the device functions.
1594      * Otherwise defer sending the hotplug event.
1595      */
1596     if (!spapr_drc_hotplugged(plugged_dev)) {
1597         spapr_drc_reset(drc);
1598     } else if (PCI_FUNC(pdev->devfn) == 0) {
1599         int i;
1600         uint8_t chassis = chassis_from_bus(pci_get_bus(pdev));
1601 
1602         for (i = 0; i < 8; i++) {
1603             SpaprDrc *func_drc;
1604             SpaprDrcClass *func_drck;
1605             SpaprDREntitySense state;
1606 
1607             func_drc = drc_from_devfn(phb, chassis, PCI_DEVFN(slotnr, i));
1608             func_drck = SPAPR_DR_CONNECTOR_GET_CLASS(func_drc);
1609             state = func_drck->dr_entity_sense(func_drc);
1610 
1611             if (state == SPAPR_DR_ENTITY_SENSE_PRESENT) {
1612                 spapr_hotplug_req_add_by_index(func_drc);
1613             }
1614         }
1615     }
1616 }
1617 
spapr_pci_bridge_unplug(SpaprPhbState * phb,PCIBridge * bridge)1618 static void spapr_pci_bridge_unplug(SpaprPhbState *phb,
1619                                     PCIBridge *bridge)
1620 {
1621     PCIBus *bus = pci_bridge_get_sec_bus(bridge);
1622 
1623     remove_drcs(phb, bus);
1624 }
1625 
spapr_pci_unplug(HotplugHandler * plug_handler,DeviceState * plugged_dev,Error ** errp)1626 static void spapr_pci_unplug(HotplugHandler *plug_handler,
1627                              DeviceState *plugged_dev, Error **errp)
1628 {
1629     SpaprPhbState *phb = SPAPR_PCI_HOST_BRIDGE(DEVICE(plug_handler));
1630 
1631     /* some version guests do not wait for completion of a device
1632      * cleanup (generally done asynchronously by the kernel) before
1633      * signaling to QEMU that the device is safe, but instead sleep
1634      * for some 'safe' period of time. unfortunately on a busy host
1635      * this sleep isn't guaranteed to be long enough, resulting in
1636      * bad things like IRQ lines being left asserted during final
1637      * device removal. to deal with this we call reset just prior
1638      * to finalizing the device, which will put the device back into
1639      * an 'idle' state, as the device cleanup code expects.
1640      */
1641     pci_device_reset(PCI_DEVICE(plugged_dev));
1642 
1643     if (IS_PCI_BRIDGE(plugged_dev)) {
1644         spapr_pci_bridge_unplug(phb, PCI_BRIDGE(plugged_dev));
1645         return;
1646     }
1647 
1648     qdev_unrealize(plugged_dev);
1649 }
1650 
spapr_pci_unplug_request(HotplugHandler * plug_handler,DeviceState * plugged_dev,Error ** errp)1651 static void spapr_pci_unplug_request(HotplugHandler *plug_handler,
1652                                      DeviceState *plugged_dev, Error **errp)
1653 {
1654     SpaprPhbState *phb = SPAPR_PCI_HOST_BRIDGE(DEVICE(plug_handler));
1655     PCIDevice *pdev = PCI_DEVICE(plugged_dev);
1656     SpaprDrc *drc = drc_from_dev(phb, pdev);
1657 
1658     g_assert(drc);
1659 
1660     if (!drc->dev) {
1661         return;
1662     }
1663 
1664     g_assert(drc->dev == plugged_dev);
1665 
1666     if (!spapr_drc_unplug_requested(drc)) {
1667         uint32_t slotnr = PCI_SLOT(pdev->devfn);
1668         SpaprDrc *func_drc;
1669         SpaprDrcClass *func_drck;
1670         SpaprDREntitySense state;
1671         int i;
1672         uint8_t chassis = chassis_from_bus(pci_get_bus(pdev));
1673 
1674         if (IS_PCI_BRIDGE(plugged_dev)) {
1675             error_setg(errp, "PCI: Hot unplug of PCI bridges not supported");
1676             return;
1677         }
1678         if (object_property_get_uint(OBJECT(pdev), "nvlink2-tgt", NULL)) {
1679             error_setg(errp, "PCI: Cannot unplug NVLink2 devices");
1680             return;
1681         }
1682 
1683         /* ensure any other present functions are pending unplug */
1684         if (PCI_FUNC(pdev->devfn) == 0) {
1685             for (i = 1; i < 8; i++) {
1686                 func_drc = drc_from_devfn(phb, chassis, PCI_DEVFN(slotnr, i));
1687                 func_drck = SPAPR_DR_CONNECTOR_GET_CLASS(func_drc);
1688                 state = func_drck->dr_entity_sense(func_drc);
1689                 if (state == SPAPR_DR_ENTITY_SENSE_PRESENT
1690                     && !spapr_drc_unplug_requested(func_drc)) {
1691                     /*
1692                      * Attempting to remove function 0 of a multifunction
1693                      * device will will cascade into removing all child
1694                      * functions, even if their unplug weren't requested
1695                      * beforehand.
1696                      */
1697                     spapr_drc_unplug_request(func_drc);
1698                 }
1699             }
1700         }
1701 
1702         spapr_drc_unplug_request(drc);
1703 
1704         /* if this isn't func 0, defer unplug event. otherwise signal removal
1705          * for all present functions
1706          */
1707         if (PCI_FUNC(pdev->devfn) == 0) {
1708             for (i = 7; i >= 0; i--) {
1709                 func_drc = drc_from_devfn(phb, chassis, PCI_DEVFN(slotnr, i));
1710                 func_drck = SPAPR_DR_CONNECTOR_GET_CLASS(func_drc);
1711                 state = func_drck->dr_entity_sense(func_drc);
1712                 if (state == SPAPR_DR_ENTITY_SENSE_PRESENT) {
1713                     spapr_hotplug_req_remove_by_index(func_drc);
1714                 }
1715             }
1716         }
1717     } else {
1718         error_setg(errp,
1719                    "PCI device unplug already in progress for device %s",
1720                    drc->dev->id);
1721     }
1722 }
1723 
spapr_phb_finalizefn(Object * obj)1724 static void spapr_phb_finalizefn(Object *obj)
1725 {
1726     SpaprPhbState *sphb = SPAPR_PCI_HOST_BRIDGE(obj);
1727 
1728     g_free(sphb->dtbusname);
1729     sphb->dtbusname = NULL;
1730 }
1731 
spapr_phb_unrealize(DeviceState * dev)1732 static void spapr_phb_unrealize(DeviceState *dev)
1733 {
1734     SpaprMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
1735     SysBusDevice *s = SYS_BUS_DEVICE(dev);
1736     PCIHostState *phb = PCI_HOST_BRIDGE(s);
1737     SpaprPhbState *sphb = SPAPR_PCI_HOST_BRIDGE(phb);
1738     SpaprTceTable *tcet;
1739     int i;
1740     const unsigned windows_supported = spapr_phb_windows_supported(sphb);
1741 
1742     if (sphb->msi) {
1743         g_hash_table_unref(sphb->msi);
1744         sphb->msi = NULL;
1745     }
1746 
1747     /*
1748      * Remove IO/MMIO subregions and aliases, rest should get cleaned
1749      * via PHB's unrealize->object_finalize
1750      */
1751     for (i = windows_supported - 1; i >= 0; i--) {
1752         tcet = spapr_tce_find_by_liobn(sphb->dma_liobn[i]);
1753         if (tcet) {
1754             memory_region_del_subregion(&sphb->iommu_root,
1755                                         spapr_tce_get_iommu(tcet));
1756         }
1757     }
1758 
1759     remove_drcs(sphb, phb->bus);
1760 
1761     for (i = PCI_NUM_PINS - 1; i >= 0; i--) {
1762         if (sphb->lsi_table[i].irq) {
1763             spapr_irq_free(spapr, sphb->lsi_table[i].irq, 1);
1764             sphb->lsi_table[i].irq = 0;
1765         }
1766     }
1767 
1768     QLIST_REMOVE(sphb, list);
1769 
1770     memory_region_del_subregion(&sphb->iommu_root, &sphb->msiwindow);
1771 
1772     /*
1773      * An attached PCI device may have memory listeners, eg. VFIO PCI. We have
1774      * unmapped all sections. Remove the listeners now, before destroying the
1775      * address space.
1776      */
1777     address_space_remove_listeners(&sphb->iommu_as);
1778     address_space_destroy(&sphb->iommu_as);
1779 
1780     qbus_set_hotplug_handler(BUS(phb->bus), NULL);
1781     pci_unregister_root_bus(phb->bus);
1782 
1783     memory_region_del_subregion(get_system_memory(), &sphb->iowindow);
1784     if (sphb->mem64_win_pciaddr != (hwaddr)-1) {
1785         memory_region_del_subregion(get_system_memory(), &sphb->mem64window);
1786     }
1787     memory_region_del_subregion(get_system_memory(), &sphb->mem32window);
1788 }
1789 
spapr_phb_destroy_msi(gpointer opaque)1790 static void spapr_phb_destroy_msi(gpointer opaque)
1791 {
1792     SpaprMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
1793     SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr);
1794     SpaprPciMsi *msi = opaque;
1795 
1796     if (!smc->legacy_irq_allocation) {
1797         spapr_irq_msi_free(spapr, msi->first_irq, msi->num);
1798     }
1799     spapr_irq_free(spapr, msi->first_irq, msi->num);
1800     g_free(msi);
1801 }
1802 
spapr_phb_realize(DeviceState * dev,Error ** errp)1803 static void spapr_phb_realize(DeviceState *dev, Error **errp)
1804 {
1805     ERRP_GUARD();
1806     /* We don't use SPAPR_MACHINE() in order to exit gracefully if the user
1807      * tries to add a sPAPR PHB to a non-pseries machine.
1808      */
1809     SpaprMachineState *spapr =
1810         (SpaprMachineState *) object_dynamic_cast(qdev_get_machine(),
1811                                                   TYPE_SPAPR_MACHINE);
1812     SpaprMachineClass *smc = spapr ? SPAPR_MACHINE_GET_CLASS(spapr) : NULL;
1813     SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
1814     SpaprPhbState *sphb = SPAPR_PCI_HOST_BRIDGE(sbd);
1815     PCIHostState *phb = PCI_HOST_BRIDGE(sbd);
1816     MachineState *ms = MACHINE(spapr);
1817     char *namebuf;
1818     int i;
1819     PCIBus *bus;
1820     uint64_t msi_window_size = 4096;
1821     SpaprTceTable *tcet;
1822     const unsigned windows_supported = spapr_phb_windows_supported(sphb);
1823 
1824     if (!spapr) {
1825         error_setg(errp, TYPE_SPAPR_PCI_HOST_BRIDGE " needs a pseries machine");
1826         return;
1827     }
1828 
1829     assert(sphb->index != (uint32_t)-1); /* checked in spapr_phb_pre_plug() */
1830 
1831     if (sphb->mem_win_size > SPAPR_PCI_MEM32_WIN_SIZE) {
1832         error_setg(errp, "32-bit memory window of size 0x%"HWADDR_PRIx
1833                    " (max 2 GiB)", sphb->mem_win_size);
1834         return;
1835     }
1836 
1837     /* 64-bit window defaults to identity mapping */
1838     sphb->mem64_win_pciaddr = sphb->mem64_win_addr;
1839 
1840     if (spapr_pci_find_phb(spapr, sphb->buid)) {
1841         SpaprPhbState *s;
1842 
1843         error_setg(errp, "PCI host bridges must have unique indexes");
1844         error_append_hint(errp, "The following indexes are already in use:");
1845         QLIST_FOREACH(s, &spapr->phbs, list) {
1846             error_append_hint(errp, " %d", s->index);
1847         }
1848         error_append_hint(errp, "\nTry another value for the index property\n");
1849         return;
1850     }
1851 
1852     if (sphb->numa_node != -1 &&
1853         (sphb->numa_node >= MAX_NODES ||
1854          !ms->numa_state->nodes[sphb->numa_node].present)) {
1855         error_setg(errp, "Invalid NUMA node ID for PCI host bridge");
1856         return;
1857     }
1858 
1859     sphb->dtbusname = g_strdup_printf("pci@%" PRIx64, sphb->buid);
1860 
1861     /* Initialize memory regions */
1862     namebuf = g_strdup_printf("%s.mmio", sphb->dtbusname);
1863     memory_region_init(&sphb->memspace, OBJECT(sphb), namebuf, UINT64_MAX);
1864     g_free(namebuf);
1865 
1866     namebuf = g_strdup_printf("%s.mmio32-alias", sphb->dtbusname);
1867     memory_region_init_alias(&sphb->mem32window, OBJECT(sphb),
1868                              namebuf, &sphb->memspace,
1869                              SPAPR_PCI_MEM_WIN_BUS_OFFSET, sphb->mem_win_size);
1870     g_free(namebuf);
1871     memory_region_add_subregion(get_system_memory(), sphb->mem_win_addr,
1872                                 &sphb->mem32window);
1873 
1874     if (sphb->mem64_win_size != 0) {
1875         namebuf = g_strdup_printf("%s.mmio64-alias", sphb->dtbusname);
1876         memory_region_init_alias(&sphb->mem64window, OBJECT(sphb),
1877                                  namebuf, &sphb->memspace,
1878                                  sphb->mem64_win_pciaddr, sphb->mem64_win_size);
1879         g_free(namebuf);
1880 
1881         memory_region_add_subregion(get_system_memory(),
1882                                     sphb->mem64_win_addr,
1883                                     &sphb->mem64window);
1884     }
1885 
1886     /* Initialize IO regions */
1887     namebuf = g_strdup_printf("%s.io", sphb->dtbusname);
1888     memory_region_init(&sphb->iospace, OBJECT(sphb),
1889                        namebuf, SPAPR_PCI_IO_WIN_SIZE);
1890     g_free(namebuf);
1891 
1892     namebuf = g_strdup_printf("%s.io-alias", sphb->dtbusname);
1893     memory_region_init_alias(&sphb->iowindow, OBJECT(sphb), namebuf,
1894                              &sphb->iospace, 0, SPAPR_PCI_IO_WIN_SIZE);
1895     g_free(namebuf);
1896     memory_region_add_subregion(get_system_memory(), sphb->io_win_addr,
1897                                 &sphb->iowindow);
1898 
1899     bus = pci_register_root_bus(dev, NULL,
1900                                 pci_spapr_set_irq, pci_swizzle_map_irq_fn, sphb,
1901                                 &sphb->memspace, &sphb->iospace,
1902                                 PCI_DEVFN(0, 0), PCI_NUM_PINS,
1903                                 TYPE_PCI_BUS);
1904 
1905     /*
1906      * Despite resembling a vanilla PCI bus in most ways, the PAPR
1907      * para-virtualized PCI bus *does* permit PCI-E extended config
1908      * space access
1909      */
1910     if (sphb->pcie_ecs) {
1911         bus->flags |= PCI_BUS_EXTENDED_CONFIG_SPACE;
1912     }
1913     phb->bus = bus;
1914     qbus_set_hotplug_handler(BUS(phb->bus), OBJECT(sphb));
1915 
1916     /*
1917      * Initialize PHB address space.
1918      * By default there will be at least one subregion for default
1919      * 32bit DMA window.
1920      * Later the guest might want to create another DMA window
1921      * which will become another memory subregion.
1922      */
1923     namebuf = g_strdup_printf("%s.iommu-root", sphb->dtbusname);
1924     memory_region_init(&sphb->iommu_root, OBJECT(sphb),
1925                        namebuf, UINT64_MAX);
1926     g_free(namebuf);
1927     address_space_init(&sphb->iommu_as, &sphb->iommu_root,
1928                        sphb->dtbusname);
1929 
1930     /*
1931      * As MSI/MSIX interrupts trigger by writing at MSI/MSIX vectors,
1932      * we need to allocate some memory to catch those writes coming
1933      * from msi_notify()/msix_notify().
1934      * As MSIMessage:addr is going to be the same and MSIMessage:data
1935      * is going to be a VIRQ number, 4 bytes of the MSI MR will only
1936      * be used.
1937      *
1938      * For KVM we want to ensure that this memory is a full page so that
1939      * our memory slot is of page size granularity.
1940      */
1941     if (kvm_enabled()) {
1942         msi_window_size = qemu_real_host_page_size();
1943     }
1944 
1945     memory_region_init_io(&sphb->msiwindow, OBJECT(sphb), &spapr_msi_ops, spapr,
1946                           "msi", msi_window_size);
1947     memory_region_add_subregion(&sphb->iommu_root, SPAPR_PCI_MSI_WINDOW,
1948                                 &sphb->msiwindow);
1949 
1950     pci_setup_iommu(bus, &spapr_iommu_ops, sphb);
1951 
1952     pci_bus_set_route_irq_fn(bus, spapr_route_intx_pin_to_irq);
1953 
1954     QLIST_INSERT_HEAD(&spapr->phbs, sphb, list);
1955 
1956     /* Initialize the LSI table */
1957     for (i = 0; i < PCI_NUM_PINS; i++) {
1958         int irq = SPAPR_IRQ_PCI_LSI + sphb->index * PCI_NUM_PINS + i;
1959 
1960         if (smc->legacy_irq_allocation) {
1961             irq = spapr_irq_findone(spapr, errp);
1962             if (irq < 0) {
1963                 error_prepend(errp, "can't allocate LSIs: ");
1964                 /*
1965                  * Older machines will never support PHB hotplug, ie, this is an
1966                  * init only path and QEMU will terminate. No need to rollback.
1967                  */
1968                 return;
1969             }
1970         }
1971 
1972         if (spapr_irq_claim(spapr, irq, true, errp) < 0) {
1973             error_prepend(errp, "can't allocate LSIs: ");
1974             goto unrealize;
1975         }
1976 
1977         sphb->lsi_table[i].irq = irq;
1978     }
1979 
1980     /* allocate connectors for child PCI devices */
1981     add_drcs(sphb, phb->bus);
1982 
1983     /* DMA setup */
1984     for (i = 0; i < windows_supported; ++i) {
1985         tcet = spapr_tce_new_table(DEVICE(sphb), sphb->dma_liobn[i]);
1986         if (!tcet) {
1987             error_setg(errp, "Creating window#%d failed for %s",
1988                        i, sphb->dtbusname);
1989             goto unrealize;
1990         }
1991         memory_region_add_subregion(&sphb->iommu_root, 0,
1992                                     spapr_tce_get_iommu(tcet));
1993     }
1994 
1995     sphb->msi = g_hash_table_new_full(g_int_hash, g_int_equal, g_free,
1996                                       spapr_phb_destroy_msi);
1997     return;
1998 
1999 unrealize:
2000     spapr_phb_unrealize(dev);
2001 }
2002 
spapr_phb_children_reset(Object * child,void * opaque)2003 static int spapr_phb_children_reset(Object *child, void *opaque)
2004 {
2005     DeviceState *dev = (DeviceState *) object_dynamic_cast(child, TYPE_DEVICE);
2006 
2007     if (dev) {
2008         device_cold_reset(dev);
2009     }
2010 
2011     return 0;
2012 }
2013 
spapr_phb_dma_reset(SpaprPhbState * sphb)2014 void spapr_phb_dma_reset(SpaprPhbState *sphb)
2015 {
2016     int i;
2017     SpaprTceTable *tcet;
2018 
2019     for (i = 0; i < SPAPR_PCI_DMA_MAX_WINDOWS; ++i) {
2020         tcet = spapr_tce_find_by_liobn(sphb->dma_liobn[i]);
2021 
2022         if (tcet && tcet->nb_table) {
2023             spapr_tce_table_disable(tcet);
2024         }
2025     }
2026 
2027     /* Register default 32bit DMA window */
2028     tcet = spapr_tce_find_by_liobn(sphb->dma_liobn[0]);
2029     spapr_tce_table_enable(tcet, SPAPR_TCE_PAGE_SHIFT, sphb->dma_win_addr,
2030                            sphb->dma_win_size >> SPAPR_TCE_PAGE_SHIFT);
2031     tcet->def_win = true;
2032 }
2033 
spapr_phb_reset(DeviceState * qdev)2034 static void spapr_phb_reset(DeviceState *qdev)
2035 {
2036     SpaprPhbState *sphb = SPAPR_PCI_HOST_BRIDGE(qdev);
2037 
2038     spapr_phb_dma_reset(sphb);
2039 
2040     /* Reset the IOMMU state */
2041     object_child_foreach(OBJECT(qdev), spapr_phb_children_reset, NULL);
2042 
2043     if (spapr_phb_eeh_available(SPAPR_PCI_HOST_BRIDGE(qdev))) {
2044         spapr_phb_vfio_reset(qdev);
2045     }
2046 
2047     g_hash_table_remove_all(sphb->msi);
2048 }
2049 
2050 static const Property spapr_phb_properties[] = {
2051     DEFINE_PROP_UINT32("index", SpaprPhbState, index, -1),
2052     DEFINE_PROP_UINT64("mem_win_size", SpaprPhbState, mem_win_size,
2053                        SPAPR_PCI_MEM32_WIN_SIZE),
2054     DEFINE_PROP_UINT64("mem64_win_size", SpaprPhbState, mem64_win_size,
2055                        SPAPR_PCI_MEM64_WIN_SIZE),
2056     DEFINE_PROP_UINT64("io_win_size", SpaprPhbState, io_win_size,
2057                        SPAPR_PCI_IO_WIN_SIZE),
2058     /* Default DMA window is 0..1GB */
2059     DEFINE_PROP_UINT64("dma_win_addr", SpaprPhbState, dma_win_addr, 0),
2060     DEFINE_PROP_UINT64("dma_win_size", SpaprPhbState, dma_win_size, 0x40000000),
2061     DEFINE_PROP_UINT64("dma64_win_addr", SpaprPhbState, dma64_win_addr,
2062                        0x800000000000000ULL),
2063     DEFINE_PROP_BOOL("ddw", SpaprPhbState, ddw_enabled, true),
2064     DEFINE_PROP_UINT64("pgsz", SpaprPhbState, page_size_mask,
2065                        (1ULL << 12) | (1ULL << 16)
2066                        | (1ULL << 21) | (1ULL << 24)),
2067     DEFINE_PROP_UINT32("numa_node", SpaprPhbState, numa_node, -1),
2068     DEFINE_PROP_BOOL("pcie-extended-configuration-space", SpaprPhbState,
2069                      pcie_ecs, true),
2070     DEFINE_PROP_BOOL("pre-5.1-associativity", SpaprPhbState,
2071                      pre_5_1_assoc, false),
2072 };
2073 
2074 static const VMStateDescription vmstate_spapr_pci_lsi = {
2075     .name = "spapr_pci/lsi",
2076     .version_id = 1,
2077     .minimum_version_id = 1,
2078     .fields = (const VMStateField[]) {
2079         VMSTATE_UINT32_EQUAL(irq, SpaprPciLsi, NULL),
2080 
2081         VMSTATE_END_OF_LIST()
2082     },
2083 };
2084 
2085 static const VMStateDescription vmstate_spapr_pci_msi = {
2086     .name = "spapr_pci/msi",
2087     .version_id = 1,
2088     .minimum_version_id = 1,
2089     .fields = (const VMStateField []) {
2090         VMSTATE_UINT32(key, SpaprPciMsiMig),
2091         VMSTATE_UINT32(value.first_irq, SpaprPciMsiMig),
2092         VMSTATE_UINT32(value.num, SpaprPciMsiMig),
2093         VMSTATE_END_OF_LIST()
2094     },
2095 };
2096 
spapr_pci_pre_save(void * opaque)2097 static int spapr_pci_pre_save(void *opaque)
2098 {
2099     SpaprPhbState *sphb = opaque;
2100     GHashTableIter iter;
2101     gpointer key, value;
2102     int i;
2103 
2104     g_free(sphb->msi_devs);
2105     sphb->msi_devs = NULL;
2106     sphb->msi_devs_num = g_hash_table_size(sphb->msi);
2107     if (!sphb->msi_devs_num) {
2108         return 0;
2109     }
2110     sphb->msi_devs = g_new(SpaprPciMsiMig, sphb->msi_devs_num);
2111 
2112     g_hash_table_iter_init(&iter, sphb->msi);
2113     for (i = 0; g_hash_table_iter_next(&iter, &key, &value); ++i) {
2114         sphb->msi_devs[i].key = *(uint32_t *) key;
2115         sphb->msi_devs[i].value = *(SpaprPciMsi *) value;
2116     }
2117 
2118     return 0;
2119 }
2120 
spapr_pci_post_save(void * opaque)2121 static int spapr_pci_post_save(void *opaque)
2122 {
2123     SpaprPhbState *sphb = opaque;
2124 
2125     g_free(sphb->msi_devs);
2126     sphb->msi_devs = NULL;
2127     sphb->msi_devs_num = 0;
2128     return 0;
2129 }
2130 
spapr_pci_post_load(void * opaque,int version_id)2131 static int spapr_pci_post_load(void *opaque, int version_id)
2132 {
2133     SpaprPhbState *sphb = opaque;
2134     gpointer key, value;
2135     int i;
2136 
2137     for (i = 0; i < sphb->msi_devs_num; ++i) {
2138         key = g_memdup2(&sphb->msi_devs[i].key, sizeof(sphb->msi_devs[i].key));
2139         value = g_memdup2(&sphb->msi_devs[i].value,
2140                           sizeof(sphb->msi_devs[i].value));
2141         g_hash_table_insert(sphb->msi, key, value);
2142     }
2143     g_free(sphb->msi_devs);
2144     sphb->msi_devs = NULL;
2145     sphb->msi_devs_num = 0;
2146 
2147     return 0;
2148 }
2149 
2150 static const VMStateDescription vmstate_spapr_pci = {
2151     .name = "spapr_pci",
2152     .version_id = 2,
2153     .minimum_version_id = 2,
2154     .pre_save = spapr_pci_pre_save,
2155     .post_save = spapr_pci_post_save,
2156     .post_load = spapr_pci_post_load,
2157     .fields = (const VMStateField[]) {
2158         VMSTATE_UINT64_EQUAL(buid, SpaprPhbState, NULL),
2159         VMSTATE_STRUCT_ARRAY(lsi_table, SpaprPhbState, PCI_NUM_PINS, 0,
2160                              vmstate_spapr_pci_lsi, SpaprPciLsi),
2161         VMSTATE_INT32(msi_devs_num, SpaprPhbState),
2162         VMSTATE_STRUCT_VARRAY_ALLOC(msi_devs, SpaprPhbState, msi_devs_num, 0,
2163                                     vmstate_spapr_pci_msi, SpaprPciMsiMig),
2164         VMSTATE_END_OF_LIST()
2165     },
2166 };
2167 
spapr_phb_root_bus_path(PCIHostState * host_bridge,PCIBus * rootbus)2168 static const char *spapr_phb_root_bus_path(PCIHostState *host_bridge,
2169                                            PCIBus *rootbus)
2170 {
2171     SpaprPhbState *sphb = SPAPR_PCI_HOST_BRIDGE(host_bridge);
2172 
2173     return sphb->dtbusname;
2174 }
2175 
spapr_phb_class_init(ObjectClass * klass,const void * data)2176 static void spapr_phb_class_init(ObjectClass *klass, const void *data)
2177 {
2178     PCIHostBridgeClass *hc = PCI_HOST_BRIDGE_CLASS(klass);
2179     DeviceClass *dc = DEVICE_CLASS(klass);
2180     HotplugHandlerClass *hp = HOTPLUG_HANDLER_CLASS(klass);
2181 
2182     hc->root_bus_path = spapr_phb_root_bus_path;
2183     dc->realize = spapr_phb_realize;
2184     dc->unrealize = spapr_phb_unrealize;
2185     device_class_set_props(dc, spapr_phb_properties);
2186     device_class_set_legacy_reset(dc, spapr_phb_reset);
2187     dc->vmsd = &vmstate_spapr_pci;
2188     /* Supported by TYPE_SPAPR_MACHINE */
2189     dc->user_creatable = true;
2190     set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
2191     hp->pre_plug = spapr_pci_pre_plug;
2192     hp->plug = spapr_pci_plug;
2193     hp->unplug = spapr_pci_unplug;
2194     hp->unplug_request = spapr_pci_unplug_request;
2195 }
2196 
2197 static const TypeInfo spapr_phb_info = {
2198     .name          = TYPE_SPAPR_PCI_HOST_BRIDGE,
2199     .parent        = TYPE_PCI_HOST_BRIDGE,
2200     .instance_size = sizeof(SpaprPhbState),
2201     .instance_finalize = spapr_phb_finalizefn,
2202     .class_init    = spapr_phb_class_init,
2203     .interfaces    = (const InterfaceInfo[]) {
2204         { TYPE_HOTPLUG_HANDLER },
2205         { }
2206     }
2207 };
2208 
spapr_phb_pci_enumerate_bridge(PCIBus * bus,PCIDevice * pdev,void * opaque)2209 static void spapr_phb_pci_enumerate_bridge(PCIBus *bus, PCIDevice *pdev,
2210                                            void *opaque)
2211 {
2212     unsigned int *bus_no = opaque;
2213     PCIBus *sec_bus = NULL;
2214 
2215     if ((pci_default_read_config(pdev, PCI_HEADER_TYPE, 1) !=
2216          PCI_HEADER_TYPE_BRIDGE)) {
2217         return;
2218     }
2219 
2220     (*bus_no)++;
2221     pci_default_write_config(pdev, PCI_PRIMARY_BUS, pci_dev_bus_num(pdev), 1);
2222     pci_default_write_config(pdev, PCI_SECONDARY_BUS, *bus_no, 1);
2223     pci_default_write_config(pdev, PCI_SUBORDINATE_BUS, *bus_no, 1);
2224 
2225     sec_bus = pci_bridge_get_sec_bus(PCI_BRIDGE(pdev));
2226     if (!sec_bus) {
2227         return;
2228     }
2229 
2230     pci_for_each_device_under_bus(sec_bus, spapr_phb_pci_enumerate_bridge,
2231                                   bus_no);
2232     pci_default_write_config(pdev, PCI_SUBORDINATE_BUS, *bus_no, 1);
2233 }
2234 
spapr_phb_pci_enumerate(SpaprPhbState * phb)2235 static void spapr_phb_pci_enumerate(SpaprPhbState *phb)
2236 {
2237     PCIBus *bus = PCI_HOST_BRIDGE(phb)->bus;
2238     unsigned int bus_no = 0;
2239 
2240     pci_for_each_device_under_bus(bus, spapr_phb_pci_enumerate_bridge,
2241                                   &bus_no);
2242 
2243 }
2244 
spapr_dt_phb(SpaprMachineState * spapr,SpaprPhbState * phb,uint32_t intc_phandle,void * fdt,int * node_offset)2245 int spapr_dt_phb(SpaprMachineState *spapr, SpaprPhbState *phb,
2246                  uint32_t intc_phandle, void *fdt, int *node_offset)
2247 {
2248     int bus_off, i, j, ret;
2249     uint32_t bus_range[] = { cpu_to_be32(0), cpu_to_be32(0xff) };
2250     struct {
2251         uint32_t hi;
2252         uint64_t child;
2253         uint64_t parent;
2254         uint64_t size;
2255     } QEMU_PACKED ranges[] = {
2256         {
2257             cpu_to_be32(b_ss(1)), cpu_to_be64(0),
2258             cpu_to_be64(phb->io_win_addr),
2259             cpu_to_be64(memory_region_size(&phb->iospace)),
2260         },
2261         {
2262             cpu_to_be32(b_ss(2)), cpu_to_be64(SPAPR_PCI_MEM_WIN_BUS_OFFSET),
2263             cpu_to_be64(phb->mem_win_addr),
2264             cpu_to_be64(phb->mem_win_size),
2265         },
2266         {
2267             cpu_to_be32(b_ss(3)), cpu_to_be64(phb->mem64_win_pciaddr),
2268             cpu_to_be64(phb->mem64_win_addr),
2269             cpu_to_be64(phb->mem64_win_size),
2270         },
2271     };
2272     const unsigned sizeof_ranges =
2273         (phb->mem64_win_size ? 3 : 2) * sizeof(ranges[0]);
2274     uint64_t bus_reg[] = { cpu_to_be64(phb->buid), 0 };
2275     uint32_t interrupt_map_mask[] = {
2276         cpu_to_be32(b_ddddd(-1)|b_fff(0)), 0x0, 0x0, cpu_to_be32(-1)};
2277     uint32_t interrupt_map[PCI_SLOT_MAX * PCI_NUM_PINS][7];
2278     uint32_t ddw_applicable[] = {
2279         cpu_to_be32(RTAS_IBM_QUERY_PE_DMA_WINDOW),
2280         cpu_to_be32(RTAS_IBM_CREATE_PE_DMA_WINDOW),
2281         cpu_to_be32(RTAS_IBM_REMOVE_PE_DMA_WINDOW)
2282     };
2283     uint32_t ddw_extensions[] = {
2284         cpu_to_be32(2),
2285         cpu_to_be32(RTAS_IBM_RESET_PE_DMA_WINDOW),
2286         cpu_to_be32(1), /* 1: ibm,query-pe-dma-window 6 outputs, PAPR 2.8 */
2287     };
2288     SpaprTceTable *tcet;
2289     SpaprDrc *drc;
2290 
2291     /* Start populating the FDT */
2292     _FDT(bus_off = fdt_add_subnode(fdt, 0, phb->dtbusname));
2293     if (node_offset) {
2294         *node_offset = bus_off;
2295     }
2296 
2297     /* Write PHB properties */
2298     _FDT(fdt_setprop_string(fdt, bus_off, "device_type", "pci"));
2299     _FDT(fdt_setprop_string(fdt, bus_off, "compatible", "IBM,Logical_PHB"));
2300     _FDT(fdt_setprop_cell(fdt, bus_off, "#interrupt-cells", 0x1));
2301     _FDT(fdt_setprop(fdt, bus_off, "used-by-rtas", NULL, 0));
2302     _FDT(fdt_setprop(fdt, bus_off, "bus-range", &bus_range, sizeof(bus_range)));
2303     _FDT(fdt_setprop(fdt, bus_off, "ranges", &ranges, sizeof_ranges));
2304     _FDT(fdt_setprop(fdt, bus_off, "reg", &bus_reg, sizeof(bus_reg)));
2305     _FDT(fdt_setprop_cell(fdt, bus_off, "ibm,pci-config-space-type", 0x1));
2306     _FDT(fdt_setprop_cell(fdt, bus_off, "ibm,pe-total-#msi",
2307                           spapr_irq_nr_msis(spapr)));
2308 
2309     /* Dynamic DMA window */
2310     if (phb->ddw_enabled) {
2311         _FDT(fdt_setprop(fdt, bus_off, "ibm,ddw-applicable", &ddw_applicable,
2312                          sizeof(ddw_applicable)));
2313         _FDT(fdt_setprop(fdt, bus_off, "ibm,ddw-extensions",
2314                          &ddw_extensions, sizeof(ddw_extensions)));
2315     }
2316 
2317     /* Advertise NUMA via ibm,associativity */
2318     if (phb->numa_node != -1) {
2319         spapr_numa_write_associativity_dt(spapr, fdt, bus_off, phb->numa_node);
2320     }
2321 
2322     /* Build the interrupt-map, this must matches what is done
2323      * in pci_swizzle_map_irq_fn
2324      */
2325     _FDT(fdt_setprop(fdt, bus_off, "interrupt-map-mask",
2326                      &interrupt_map_mask, sizeof(interrupt_map_mask)));
2327     for (i = 0; i < PCI_SLOT_MAX; i++) {
2328         for (j = 0; j < PCI_NUM_PINS; j++) {
2329             uint32_t *irqmap = interrupt_map[i*PCI_NUM_PINS + j];
2330             int lsi_num = pci_swizzle(i, j);
2331 
2332             irqmap[0] = cpu_to_be32(b_ddddd(i)|b_fff(0));
2333             irqmap[1] = 0;
2334             irqmap[2] = 0;
2335             irqmap[3] = cpu_to_be32(j+1);
2336             irqmap[4] = cpu_to_be32(intc_phandle);
2337             spapr_dt_irq(&irqmap[5], phb->lsi_table[lsi_num].irq, true);
2338         }
2339     }
2340     /* Write interrupt map */
2341     _FDT(fdt_setprop(fdt, bus_off, "interrupt-map", &interrupt_map,
2342                      sizeof(interrupt_map)));
2343 
2344     tcet = spapr_tce_find_by_liobn(phb->dma_liobn[0]);
2345     if (!tcet) {
2346         return -1;
2347     }
2348     spapr_dma_dt(fdt, bus_off, "ibm,dma-window",
2349                  tcet->liobn, tcet->bus_offset,
2350                  tcet->nb_table << tcet->page_shift);
2351 
2352     drc = spapr_drc_by_id(TYPE_SPAPR_DRC_PHB, phb->index);
2353     if (drc) {
2354         uint32_t drc_index = cpu_to_be32(spapr_drc_index(drc));
2355 
2356         _FDT(fdt_setprop(fdt, bus_off, "ibm,my-drc-index", &drc_index,
2357                          sizeof(drc_index)));
2358     }
2359 
2360     /* Walk the bridges and program the bus numbers*/
2361     spapr_phb_pci_enumerate(phb);
2362     _FDT(fdt_setprop_cell(fdt, bus_off, "qemu,phb-enumerated", 0x1));
2363 
2364     /* Walk the bridge and subordinate buses */
2365     ret = spapr_dt_pci_bus(phb, PCI_HOST_BRIDGE(phb)->bus, fdt, bus_off);
2366     if (ret < 0) {
2367         return ret;
2368     }
2369 
2370     return 0;
2371 }
2372 
spapr_pci_rtas_init(void)2373 void spapr_pci_rtas_init(void)
2374 {
2375     spapr_rtas_register(RTAS_READ_PCI_CONFIG, "read-pci-config",
2376                         rtas_read_pci_config);
2377     spapr_rtas_register(RTAS_WRITE_PCI_CONFIG, "write-pci-config",
2378                         rtas_write_pci_config);
2379     spapr_rtas_register(RTAS_IBM_READ_PCI_CONFIG, "ibm,read-pci-config",
2380                         rtas_ibm_read_pci_config);
2381     spapr_rtas_register(RTAS_IBM_WRITE_PCI_CONFIG, "ibm,write-pci-config",
2382                         rtas_ibm_write_pci_config);
2383     if (msi_nonbroken) {
2384         spapr_rtas_register(RTAS_IBM_QUERY_INTERRUPT_SOURCE_NUMBER,
2385                             "ibm,query-interrupt-source-number",
2386                             rtas_ibm_query_interrupt_source_number);
2387         spapr_rtas_register(RTAS_IBM_CHANGE_MSI, "ibm,change-msi",
2388                             rtas_ibm_change_msi);
2389     }
2390 
2391     spapr_rtas_register(RTAS_IBM_SET_EEH_OPTION,
2392                         "ibm,set-eeh-option",
2393                         rtas_ibm_set_eeh_option);
2394     spapr_rtas_register(RTAS_IBM_GET_CONFIG_ADDR_INFO2,
2395                         "ibm,get-config-addr-info2",
2396                         rtas_ibm_get_config_addr_info2);
2397     spapr_rtas_register(RTAS_IBM_READ_SLOT_RESET_STATE2,
2398                         "ibm,read-slot-reset-state2",
2399                         rtas_ibm_read_slot_reset_state2);
2400     spapr_rtas_register(RTAS_IBM_SET_SLOT_RESET,
2401                         "ibm,set-slot-reset",
2402                         rtas_ibm_set_slot_reset);
2403     spapr_rtas_register(RTAS_IBM_CONFIGURE_PE,
2404                         "ibm,configure-pe",
2405                         rtas_ibm_configure_pe);
2406     spapr_rtas_register(RTAS_IBM_SLOT_ERROR_DETAIL,
2407                         "ibm,slot-error-detail",
2408                         rtas_ibm_slot_error_detail);
2409 }
2410 
spapr_pci_register_types(void)2411 static void spapr_pci_register_types(void)
2412 {
2413     type_register_static(&spapr_phb_info);
2414 }
2415 
type_init(spapr_pci_register_types)2416 type_init(spapr_pci_register_types)
2417 
2418 static int spapr_switch_one_vga(DeviceState *dev, void *opaque)
2419 {
2420     bool be = *(bool *)opaque;
2421 
2422     if (object_dynamic_cast(OBJECT(dev), "VGA")
2423         || object_dynamic_cast(OBJECT(dev), "secondary-vga")
2424         || object_dynamic_cast(OBJECT(dev), "bochs-display")
2425         || object_dynamic_cast(OBJECT(dev), "virtio-vga")) {
2426         object_property_set_bool(OBJECT(dev), "big-endian-framebuffer", be,
2427                                  &error_abort);
2428     }
2429     return 0;
2430 }
2431 
spapr_pci_switch_vga(SpaprMachineState * spapr,bool big_endian)2432 void spapr_pci_switch_vga(SpaprMachineState *spapr, bool big_endian)
2433 {
2434     SpaprPhbState *sphb;
2435 
2436     /*
2437      * For backward compatibility with existing guests, we switch
2438      * the endianness of the VGA controller when changing the guest
2439      * interrupt mode
2440      */
2441     QLIST_FOREACH(sphb, &spapr->phbs, list) {
2442         BusState *bus = &PCI_HOST_BRIDGE(sphb)->bus->qbus;
2443         qbus_walk_children(bus, spapr_switch_one_vga, NULL, NULL, NULL,
2444                            &big_endian);
2445     }
2446 }
2447