Lines Matching +full:freeze +full:- +full:bridge +full:- +full:controller

1 // SPDX-License-Identifier: GPL-2.0-or-later
29 #include <asm/pci-bridge.h>
32 #include <asm/ppc-pci.h>
38 #include <asm/pnv-pci.h>
42 #include <misc/cxl-base.h>
66 if (pe->flags & PNV_IODA_PE_DEV) in pe_level_printk()
67 strscpy(pfix, dev_name(&pe->pdev->dev), sizeof(pfix)); in pe_level_printk()
68 else if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)) in pe_level_printk()
70 pci_domain_nr(pe->pbus), pe->pbus->number); in pe_level_printk()
72 else if (pe->flags & PNV_IODA_PE_VF) in pe_level_printk()
74 pci_domain_nr(pe->parent_dev->bus), in pe_level_printk()
75 (pe->rid & 0xff00) >> 8, in pe_level_printk()
76 PCI_SLOT(pe->rid), PCI_FUNC(pe->rid)); in pe_level_printk()
80 level, pfix, pe->pe_number, &vaf); in pe_level_printk()
91 return -EINVAL; in iommu_setup()
120 phb->ioda.pe_array[pe_no].phb = phb; in pnv_ioda_init_pe()
121 phb->ioda.pe_array[pe_no].pe_number = pe_no; in pnv_ioda_init_pe()
122 phb->ioda.pe_array[pe_no].dma_setup_done = false; in pnv_ioda_init_pe()
129 rc = opal_pci_eeh_freeze_clear(phb->opal_id, pe_no, in pnv_ioda_init_pe()
132 pr_warn("%s: Error %lld unfreezing PHB#%x-PE#%x\n", in pnv_ioda_init_pe()
133 __func__, rc, phb->hose->global_number, pe_no); in pnv_ioda_init_pe()
135 return &phb->ioda.pe_array[pe_no]; in pnv_ioda_init_pe()
140 if (!(pe_no >= 0 && pe_no < phb->ioda.total_pe_num)) { in pnv_ioda_reserve_pe()
142 __func__, pe_no, phb->hose->global_number); in pnv_ioda_reserve_pe()
146 mutex_lock(&phb->ioda.pe_alloc_mutex); in pnv_ioda_reserve_pe()
147 if (test_and_set_bit(pe_no, phb->ioda.pe_alloc)) in pnv_ioda_reserve_pe()
149 __func__, pe_no, phb->hose->global_number); in pnv_ioda_reserve_pe()
150 mutex_unlock(&phb->ioda.pe_alloc_mutex); in pnv_ioda_reserve_pe()
160 mutex_lock(&phb->ioda.pe_alloc_mutex); in pnv_ioda_alloc_pe()
163 for (pe = phb->ioda.total_pe_num - 1; pe >= 0; pe--) { in pnv_ioda_alloc_pe()
164 if (test_bit(pe, phb->ioda.pe_alloc)) { in pnv_ioda_alloc_pe()
177 set_bit(i, phb->ioda.pe_alloc); in pnv_ioda_alloc_pe()
180 ret = &phb->ioda.pe_array[pe]; in pnv_ioda_alloc_pe()
183 mutex_unlock(&phb->ioda.pe_alloc_mutex); in pnv_ioda_alloc_pe()
189 struct pnv_phb *phb = pe->phb; in pnv_ioda_free_pe()
190 unsigned int pe_num = pe->pe_number; in pnv_ioda_free_pe()
192 WARN_ON(pe->pdev); in pnv_ioda_free_pe()
195 mutex_lock(&phb->ioda.pe_alloc_mutex); in pnv_ioda_free_pe()
196 clear_bit(pe_num, phb->ioda.pe_alloc); in pnv_ioda_free_pe()
197 mutex_unlock(&phb->ioda.pe_alloc_mutex); in pnv_ioda_free_pe()
208 rc = opal_pci_set_phb_mem_window(phb->opal_id, in pnv_ioda2_init_m64()
210 phb->ioda.m64_bar_idx, in pnv_ioda2_init_m64()
211 phb->ioda.m64_base, in pnv_ioda2_init_m64()
213 phb->ioda.m64_size); in pnv_ioda2_init_m64()
220 rc = opal_pci_phb_mmio_enable(phb->opal_id, in pnv_ioda2_init_m64()
222 phb->ioda.m64_bar_idx, in pnv_ioda2_init_m64()
233 r = &phb->hose->mem_resources[1]; in pnv_ioda2_init_m64()
234 if (phb->ioda.reserved_pe_idx == 0) in pnv_ioda2_init_m64()
235 r->start += (2 * phb->ioda.m64_segsize); in pnv_ioda2_init_m64()
236 else if (phb->ioda.reserved_pe_idx == (phb->ioda.total_pe_num - 1)) in pnv_ioda2_init_m64()
237 r->end -= (2 * phb->ioda.m64_segsize); in pnv_ioda2_init_m64()
240 phb->ioda.reserved_pe_idx); in pnv_ioda2_init_m64()
246 rc, desc, phb->ioda.m64_bar_idx); in pnv_ioda2_init_m64()
247 opal_pci_phb_mmio_enable(phb->opal_id, in pnv_ioda2_init_m64()
249 phb->ioda.m64_bar_idx, in pnv_ioda2_init_m64()
251 return -EIO; in pnv_ioda2_init_m64()
257 struct pnv_phb *phb = pci_bus_to_pnvhb(pdev->bus); in pnv_ioda_reserve_dev_m64_pe()
262 base = phb->ioda.m64_base; in pnv_ioda_reserve_dev_m64_pe()
263 sgsz = phb->ioda.m64_segsize; in pnv_ioda_reserve_dev_m64_pe()
265 r = &pdev->resource[i]; in pnv_ioda_reserve_dev_m64_pe()
266 if (!r->parent || !pnv_pci_is_m64(phb, r)) in pnv_ioda_reserve_dev_m64_pe()
269 start = ALIGN_DOWN(r->start - base, sgsz); in pnv_ioda_reserve_dev_m64_pe()
270 end = ALIGN(r->end - base, sgsz); in pnv_ioda_reserve_dev_m64_pe()
286 list_for_each_entry(pdev, &bus->devices, bus_list) { in pnv_ioda_reserve_m64_pe()
289 if (all && pdev->subordinate) in pnv_ioda_reserve_m64_pe()
290 pnv_ioda_reserve_m64_pe(pdev->subordinate, in pnv_ioda_reserve_m64_pe()
307 size = ALIGN(phb->ioda.total_pe_num / 8, sizeof(unsigned long)); in pnv_ioda_pick_m64_pe()
323 if (bitmap_empty(pe_alloc, phb->ioda.total_pe_num)) { in pnv_ioda_pick_m64_pe()
333 i = -1; in pnv_ioda_pick_m64_pe()
334 while ((i = find_next_bit(pe_alloc, phb->ioda.total_pe_num, i + 1)) < in pnv_ioda_pick_m64_pe()
335 phb->ioda.total_pe_num) { in pnv_ioda_pick_m64_pe()
336 pe = &phb->ioda.pe_array[i]; in pnv_ioda_pick_m64_pe()
338 phb->ioda.m64_segmap[pe->pe_number] = pe->pe_number; in pnv_ioda_pick_m64_pe()
340 pe->flags |= PNV_IODA_PE_MASTER; in pnv_ioda_pick_m64_pe()
341 INIT_LIST_HEAD(&pe->slaves); in pnv_ioda_pick_m64_pe()
344 pe->flags |= PNV_IODA_PE_SLAVE; in pnv_ioda_pick_m64_pe()
345 pe->master = master_pe; in pnv_ioda_pick_m64_pe()
346 list_add_tail(&pe->list, &master_pe->slaves); in pnv_ioda_pick_m64_pe()
356 struct pci_controller *hose = phb->hose; in pnv_ioda_parse_m64_window()
357 struct device_node *dn = hose->dn; in pnv_ioda_parse_m64_window()
363 if (phb->type != PNV_PHB_IODA2) { in pnv_ioda_parse_m64_window()
373 r = of_get_property(dn, "ibm,opal-m64-window", NULL); in pnv_ioda_parse_m64_window()
375 pr_info(" No <ibm,opal-m64-window> on %pOF\n", in pnv_ioda_parse_m64_window()
382 * covering the whole 64-bits space. We support only one range. in pnv_ioda_parse_m64_window()
384 if (of_property_read_u32_array(dn, "ibm,opal-available-m64-ranges", in pnv_ioda_parse_m64_window()
393 __func__, m64_range[1], phb->hose->global_number); in pnv_ioda_parse_m64_window()
399 __func__, phb->hose->global_number); in pnv_ioda_parse_m64_window()
404 res = &hose->mem_resources[1]; in pnv_ioda_parse_m64_window()
405 res->name = dn->full_name; in pnv_ioda_parse_m64_window()
406 res->start = of_translate_address(dn, r + 2); in pnv_ioda_parse_m64_window()
407 res->end = res->start + of_read_number(r + 4, 2) - 1; in pnv_ioda_parse_m64_window()
408 res->flags = (IORESOURCE_MEM | IORESOURCE_MEM_64 | IORESOURCE_PREFETCH); in pnv_ioda_parse_m64_window()
410 hose->mem_offset[1] = res->start - pci_addr; in pnv_ioda_parse_m64_window()
412 phb->ioda.m64_size = resource_size(res); in pnv_ioda_parse_m64_window()
413 phb->ioda.m64_segsize = phb->ioda.m64_size / phb->ioda.total_pe_num; in pnv_ioda_parse_m64_window()
414 phb->ioda.m64_base = pci_addr; in pnv_ioda_parse_m64_window()
417 pr_info(" MEM 0x%016llx..0x%016llx -> 0x%016llx (M64 #%d..%d)\n", in pnv_ioda_parse_m64_window()
418 res->start, res->end, pci_addr, m64_range[0], in pnv_ioda_parse_m64_window()
419 m64_range[0] + m64_range[1] - 1); in pnv_ioda_parse_m64_window()
422 phb->ioda.m64_bar_alloc = (unsigned long)-1; in pnv_ioda_parse_m64_window()
425 m64_range[1]--; in pnv_ioda_parse_m64_window()
426 phb->ioda.m64_bar_idx = m64_range[0] + m64_range[1]; in pnv_ioda_parse_m64_window()
428 pr_info(" Using M64 #%d as default window\n", phb->ioda.m64_bar_idx); in pnv_ioda_parse_m64_window()
432 clear_bit(i, &phb->ioda.m64_bar_alloc); in pnv_ioda_parse_m64_window()
438 phb->init_m64 = pnv_ioda2_init_m64; in pnv_ioda_parse_m64_window()
443 struct pnv_ioda_pe *pe = &phb->ioda.pe_array[pe_no]; in pnv_ioda_freeze_pe()
448 if (pe->flags & PNV_IODA_PE_SLAVE) { in pnv_ioda_freeze_pe()
449 pe = pe->master; in pnv_ioda_freeze_pe()
450 if (WARN_ON(!pe || !(pe->flags & PNV_IODA_PE_MASTER))) in pnv_ioda_freeze_pe()
453 pe_no = pe->pe_number; in pnv_ioda_freeze_pe()
456 /* Freeze master PE */ in pnv_ioda_freeze_pe()
457 rc = opal_pci_eeh_freeze_set(phb->opal_id, in pnv_ioda_freeze_pe()
461 pr_warn("%s: Failure %lld freezing PHB#%x-PE#%x\n", in pnv_ioda_freeze_pe()
462 __func__, rc, phb->hose->global_number, pe_no); in pnv_ioda_freeze_pe()
466 /* Freeze slave PEs */ in pnv_ioda_freeze_pe()
467 if (!(pe->flags & PNV_IODA_PE_MASTER)) in pnv_ioda_freeze_pe()
470 list_for_each_entry(slave, &pe->slaves, list) { in pnv_ioda_freeze_pe()
471 rc = opal_pci_eeh_freeze_set(phb->opal_id, in pnv_ioda_freeze_pe()
472 slave->pe_number, in pnv_ioda_freeze_pe()
475 pr_warn("%s: Failure %lld freezing PHB#%x-PE#%x\n", in pnv_ioda_freeze_pe()
476 __func__, rc, phb->hose->global_number, in pnv_ioda_freeze_pe()
477 slave->pe_number); in pnv_ioda_freeze_pe()
487 pe = &phb->ioda.pe_array[pe_no]; in pnv_ioda_unfreeze_pe()
488 if (pe->flags & PNV_IODA_PE_SLAVE) { in pnv_ioda_unfreeze_pe()
489 pe = pe->master; in pnv_ioda_unfreeze_pe()
490 WARN_ON(!pe || !(pe->flags & PNV_IODA_PE_MASTER)); in pnv_ioda_unfreeze_pe()
491 pe_no = pe->pe_number; in pnv_ioda_unfreeze_pe()
495 rc = opal_pci_eeh_freeze_clear(phb->opal_id, pe_no, opt); in pnv_ioda_unfreeze_pe()
497 pr_warn("%s: Failure %lld clear %d on PHB#%x-PE#%x\n", in pnv_ioda_unfreeze_pe()
498 __func__, rc, opt, phb->hose->global_number, pe_no); in pnv_ioda_unfreeze_pe()
499 return -EIO; in pnv_ioda_unfreeze_pe()
502 if (!(pe->flags & PNV_IODA_PE_MASTER)) in pnv_ioda_unfreeze_pe()
506 list_for_each_entry(slave, &pe->slaves, list) { in pnv_ioda_unfreeze_pe()
507 rc = opal_pci_eeh_freeze_clear(phb->opal_id, in pnv_ioda_unfreeze_pe()
508 slave->pe_number, in pnv_ioda_unfreeze_pe()
511 pr_warn("%s: Failure %lld clear %d on PHB#%x-PE#%x\n", in pnv_ioda_unfreeze_pe()
512 __func__, rc, opt, phb->hose->global_number, in pnv_ioda_unfreeze_pe()
513 slave->pe_number); in pnv_ioda_unfreeze_pe()
514 return -EIO; in pnv_ioda_unfreeze_pe()
529 if (pe_no < 0 || pe_no >= phb->ioda.total_pe_num) in pnv_ioda_get_pe_state()
536 pe = &phb->ioda.pe_array[pe_no]; in pnv_ioda_get_pe_state()
537 if (pe->flags & PNV_IODA_PE_SLAVE) { in pnv_ioda_get_pe_state()
538 pe = pe->master; in pnv_ioda_get_pe_state()
539 WARN_ON(!pe || !(pe->flags & PNV_IODA_PE_MASTER)); in pnv_ioda_get_pe_state()
540 pe_no = pe->pe_number; in pnv_ioda_get_pe_state()
544 rc = opal_pci_eeh_freeze_status(phb->opal_id, pe_no, in pnv_ioda_get_pe_state()
548 "PHB#%x-PE#%x state\n", in pnv_ioda_get_pe_state()
550 phb->hose->global_number, pe_no); in pnv_ioda_get_pe_state()
555 if (!(pe->flags & PNV_IODA_PE_MASTER)) in pnv_ioda_get_pe_state()
558 list_for_each_entry(slave, &pe->slaves, list) { in pnv_ioda_get_pe_state()
559 rc = opal_pci_eeh_freeze_status(phb->opal_id, in pnv_ioda_get_pe_state()
560 slave->pe_number, in pnv_ioda_get_pe_state()
566 "PHB#%x-PE#%x state\n", in pnv_ioda_get_pe_state()
568 phb->hose->global_number, slave->pe_number); in pnv_ioda_get_pe_state()
585 int pe_number = phb->ioda.pe_rmap[bdfn]; in pnv_pci_bdfn_to_pe()
590 return &phb->ioda.pe_array[pe_number]; in pnv_pci_bdfn_to_pe()
595 struct pnv_phb *phb = pci_bus_to_pnvhb(dev->bus); in pnv_ioda_get_pe()
600 if (pdn->pe_number == IODA_INVALID_PE) in pnv_ioda_get_pe()
602 return &phb->ioda.pe_array[pdn->pe_number]; in pnv_ioda_get_pe()
617 rc = opal_pci_set_peltv(phb->opal_id, parent->pe_number, in pnv_ioda_set_one_peltv()
618 child->pe_number, op); in pnv_ioda_set_one_peltv()
622 return -ENXIO; in pnv_ioda_set_one_peltv()
625 if (!(child->flags & PNV_IODA_PE_MASTER)) in pnv_ioda_set_one_peltv()
629 list_for_each_entry(slave, &child->slaves, list) { in pnv_ioda_set_one_peltv()
630 rc = opal_pci_set_peltv(phb->opal_id, parent->pe_number, in pnv_ioda_set_one_peltv()
631 slave->pe_number, op); in pnv_ioda_set_one_peltv()
635 return -ENXIO; in pnv_ioda_set_one_peltv()
655 opal_pci_eeh_freeze_clear(phb->opal_id, pe->pe_number, in pnv_ioda_set_peltv()
657 if (pe->flags & PNV_IODA_PE_MASTER) { in pnv_ioda_set_peltv()
658 list_for_each_entry(slave, &pe->slaves, list) in pnv_ioda_set_peltv()
659 opal_pci_eeh_freeze_clear(phb->opal_id, in pnv_ioda_set_peltv()
660 slave->pe_number, in pnv_ioda_set_peltv()
667 * corresponding PELT-V as well. Otherwise, the error in pnv_ioda_set_peltv()
676 if (pe->flags & PNV_IODA_PE_MASTER) { in pnv_ioda_set_peltv()
677 list_for_each_entry(slave, &pe->slaves, list) { in pnv_ioda_set_peltv()
684 if (pe->flags & (PNV_IODA_PE_BUS_ALL | PNV_IODA_PE_BUS)) in pnv_ioda_set_peltv()
685 pdev = pe->pbus->self; in pnv_ioda_set_peltv()
686 else if (pe->flags & PNV_IODA_PE_DEV) in pnv_ioda_set_peltv()
687 pdev = pe->pdev->bus->self; in pnv_ioda_set_peltv()
689 else if (pe->flags & PNV_IODA_PE_VF) in pnv_ioda_set_peltv()
690 pdev = pe->parent_dev; in pnv_ioda_set_peltv()
696 if (pdn && pdn->pe_number != IODA_INVALID_PE) { in pnv_ioda_set_peltv()
697 parent = &phb->ioda.pe_array[pdn->pe_number]; in pnv_ioda_set_peltv()
703 pdev = pdev->bus->self; in pnv_ioda_set_peltv()
718 if (pdn && pdn->pe_number != IODA_INVALID_PE) { in pnv_ioda_unset_peltv()
719 rc = opal_pci_set_peltv(phb->opal_id, pdn->pe_number, in pnv_ioda_unset_peltv()
720 pe->pe_number, in pnv_ioda_unset_peltv()
724 parent = parent->bus->self; in pnv_ioda_unset_peltv()
727 opal_pci_eeh_freeze_clear(phb->opal_id, pe->pe_number, in pnv_ioda_unset_peltv()
731 rc = opal_pci_set_peltv(phb->opal_id, pe->pe_number, in pnv_ioda_unset_peltv()
732 pe->pe_number, OPAL_REMOVE_PE_FROM_DOMAIN); in pnv_ioda_unset_peltv()
745 if (pe->pbus) { in pnv_ioda_deconfigure_pe()
750 parent = pe->pbus->self; in pnv_ioda_deconfigure_pe()
751 if (pe->flags & PNV_IODA_PE_BUS_ALL) in pnv_ioda_deconfigure_pe()
752 count = resource_size(&pe->pbus->busn_res); in pnv_ioda_deconfigure_pe()
764 dev_err(&pe->pbus->dev, "Number of subordinate buses %d unsupported\n", in pnv_ioda_deconfigure_pe()
769 rid_end = pe->rid + (count << 8); in pnv_ioda_deconfigure_pe()
772 if (pe->flags & PNV_IODA_PE_VF) in pnv_ioda_deconfigure_pe()
773 parent = pe->parent_dev; in pnv_ioda_deconfigure_pe()
776 parent = pe->pdev->bus->self; in pnv_ioda_deconfigure_pe()
780 rid_end = pe->rid + 1; in pnv_ioda_deconfigure_pe()
784 for (rid = pe->rid; rid < rid_end; rid++) in pnv_ioda_deconfigure_pe()
785 phb->ioda.pe_rmap[rid] = IODA_INVALID_PE; in pnv_ioda_deconfigure_pe()
788 * Release from all parents PELT-V. NPUs don't have a PELTV in pnv_ioda_deconfigure_pe()
791 if (phb->type != PNV_PHB_NPU_OCAPI) in pnv_ioda_deconfigure_pe()
794 rc = opal_pci_set_pe(phb->opal_id, pe->pe_number, pe->rid, in pnv_ioda_deconfigure_pe()
799 pe->pbus = NULL; in pnv_ioda_deconfigure_pe()
800 pe->pdev = NULL; in pnv_ioda_deconfigure_pe()
802 pe->parent_dev = NULL; in pnv_ioda_deconfigure_pe()
814 if (pe->pbus) { in pnv_ioda_configure_pe()
819 if (pe->flags & PNV_IODA_PE_BUS_ALL) in pnv_ioda_configure_pe()
820 count = resource_size(&pe->pbus->busn_res); in pnv_ioda_configure_pe()
832 dev_err(&pe->pbus->dev, "Number of subordinate buses %d unsupported\n", in pnv_ioda_configure_pe()
837 rid_end = pe->rid + (count << 8); in pnv_ioda_configure_pe()
842 rid_end = pe->rid + 1; in pnv_ioda_configure_pe()
847 * corresponding PELT-V as well. Otherwise, the error in pnv_ioda_configure_pe()
851 rc = opal_pci_set_pe(phb->opal_id, pe->pe_number, pe->rid, in pnv_ioda_configure_pe()
855 return -ENXIO; in pnv_ioda_configure_pe()
862 if (phb->type != PNV_PHB_NPU_OCAPI) in pnv_ioda_configure_pe()
866 for (rid = pe->rid; rid < rid_end; rid++) in pnv_ioda_configure_pe()
867 phb->ioda.pe_rmap[rid] = pe->pe_number; in pnv_ioda_configure_pe()
869 pe->mve_number = 0; in pnv_ioda_configure_pe()
876 struct pnv_phb *phb = pci_bus_to_pnvhb(dev->bus); in pnv_ioda_setup_dev_PE()
885 if (pdn->pe_number != IODA_INVALID_PE) in pnv_ioda_setup_dev_PE()
901 pdn->pe_number = pe->pe_number; in pnv_ioda_setup_dev_PE()
902 pe->flags = PNV_IODA_PE_DEV; in pnv_ioda_setup_dev_PE()
903 pe->pdev = dev; in pnv_ioda_setup_dev_PE()
904 pe->pbus = NULL; in pnv_ioda_setup_dev_PE()
905 pe->mve_number = -1; in pnv_ioda_setup_dev_PE()
906 pe->rid = dev->bus->number << 8 | pdn->devfn; in pnv_ioda_setup_dev_PE()
907 pe->device_count++; in pnv_ioda_setup_dev_PE()
914 pdn->pe_number = IODA_INVALID_PE; in pnv_ioda_setup_dev_PE()
915 pe->pdev = NULL; in pnv_ioda_setup_dev_PE()
920 mutex_lock(&phb->ioda.pe_list_mutex); in pnv_ioda_setup_dev_PE()
921 list_add_tail(&pe->list, &phb->ioda.pe_list); in pnv_ioda_setup_dev_PE()
922 mutex_unlock(&phb->ioda.pe_list_mutex); in pnv_ioda_setup_dev_PE()
930 * orgiriated by PCIe-to-PCI bridge or PLX switch downstream ports.
942 pe_num = phb->ioda.pe_rmap[bus->number << 8]; in pnv_ioda_setup_bus_PE()
944 pe = &phb->ioda.pe_array[pe_num]; in pnv_ioda_setup_bus_PE()
950 pe = &phb->ioda.pe_array[phb->ioda.root_pe_idx]; in pnv_ioda_setup_bus_PE()
962 __func__, pci_domain_nr(bus), bus->number); in pnv_ioda_setup_bus_PE()
966 pe->flags |= (all ? PNV_IODA_PE_BUS_ALL : PNV_IODA_PE_BUS); in pnv_ioda_setup_bus_PE()
967 pe->pbus = bus; in pnv_ioda_setup_bus_PE()
968 pe->pdev = NULL; in pnv_ioda_setup_bus_PE()
969 pe->mve_number = -1; in pnv_ioda_setup_bus_PE()
970 pe->rid = bus->busn_res.start << 8; in pnv_ioda_setup_bus_PE()
974 &bus->busn_res.start, &bus->busn_res.end, in pnv_ioda_setup_bus_PE()
975 pe->pe_number); in pnv_ioda_setup_bus_PE()
978 &bus->busn_res.start, pe->pe_number); in pnv_ioda_setup_bus_PE()
983 pe->pbus = NULL; in pnv_ioda_setup_bus_PE()
988 list_add_tail(&pe->list, &phb->ioda.pe_list); in pnv_ioda_setup_bus_PE()
995 struct pnv_phb *phb = pci_bus_to_pnvhb(pdev->bus); in pnv_pci_ioda_dma_dev_setup()
1002 /* VF PEs should be pre-configured in pnv_pci_sriov_enable() */ in pnv_pci_ioda_dma_dev_setup()
1003 if (WARN_ON(pdev->is_virtfn)) in pnv_pci_ioda_dma_dev_setup()
1006 pnv_pci_configure_bus(pdev->bus); in pnv_pci_ioda_dma_dev_setup()
1008 pci_info(pdev, "Configured PE#%x\n", pe ? pe->pe_number : 0xfffff); in pnv_pci_ioda_dma_dev_setup()
1018 pci_info(pdev, "Added to existing PE#%x\n", pe->pe_number); in pnv_pci_ioda_dma_dev_setup()
1023 * skip allocating a TCE table, etc unless we get a non-bridge device. in pnv_pci_ioda_dma_dev_setup()
1025 if (!pe->dma_setup_done && !pci_is_bridge(pdev)) { in pnv_pci_ioda_dma_dev_setup()
1026 switch (phb->type) { in pnv_pci_ioda_dma_dev_setup()
1032 __func__, phb->hose->global_number, phb->type); in pnv_pci_ioda_dma_dev_setup()
1037 pdn->pe_number = pe->pe_number; in pnv_pci_ioda_dma_dev_setup()
1038 pe->device_count++; in pnv_pci_ioda_dma_dev_setup()
1040 WARN_ON(get_dma_ops(&pdev->dev) != &dma_iommu_ops); in pnv_pci_ioda_dma_dev_setup()
1041 pdev->dev.archdata.dma_offset = pe->tce_bypass_base; in pnv_pci_ioda_dma_dev_setup()
1042 set_iommu_table_base(&pdev->dev, pe->table_group.tables[0]); in pnv_pci_ioda_dma_dev_setup()
1045 if (pe->table_group.group) in pnv_pci_ioda_dma_dev_setup()
1046 iommu_add_device(&pe->table_group, &pdev->dev); in pnv_pci_ioda_dma_dev_setup()
1050 * Reconfigure TVE#0 to be usable as 64-bit DMA space.
1052 * The first 4GB of virtual memory for a PE is reserved for 32-bit accesses.
1056 * result are limited to the 4GB of virtual memory made available to 32-bit
1059 * In order to work around this, reconfigure TVE#0 to be suitable for 64-bit
1061 * by 64-bit DMAs. This should only be used by devices that want more than
1062 * 4GB, and only on PEs that have no 32-bit devices.
1085 table_pages = alloc_pages_node(pe->phb->hose->node, GFP_KERNEL, in pnv_pci_ioda_dma_64bit_bypass()
1101 rc = opal_pci_map_pe_dma_window(pe->phb->opal_id, in pnv_pci_ioda_dma_64bit_bypass()
1102 pe->pe_number, in pnv_pci_ioda_dma_64bit_bypass()
1104 (pe->pe_number << 1) + 0, in pnv_pci_ioda_dma_64bit_bypass()
1110 pe_info(pe, "Using 64-bit DMA iommu bypass (through TVE#0)\n"); in pnv_pci_ioda_dma_64bit_bypass()
1114 pe_err(pe, "Error configuring 64-bit DMA bypass\n"); in pnv_pci_ioda_dma_64bit_bypass()
1115 return -EIO; in pnv_pci_ioda_dma_64bit_bypass()
1121 struct pnv_phb *phb = pci_bus_to_pnvhb(pdev->bus); in pnv_pci_ioda_iommu_bypass_supported()
1125 if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE)) in pnv_pci_ioda_iommu_bypass_supported()
1128 pe = &phb->ioda.pe_array[pdn->pe_number]; in pnv_pci_ioda_iommu_bypass_supported()
1129 if (pe->tce_bypass_enabled) { in pnv_pci_ioda_iommu_bypass_supported()
1130 u64 top = pe->tce_bypass_base + memblock_end_of_DRAM() - 1; in pnv_pci_ioda_iommu_bypass_supported()
1138 * bypass the 32-bit region and be usable for 64-bit DMAs. in pnv_pci_ioda_iommu_bypass_supported()
1143 /* pe->pdev should be set if it's a single device, pe->pbus if not */ in pnv_pci_ioda_iommu_bypass_supported()
1144 (pe->device_count == 1 || !pe->pbus) && in pnv_pci_ioda_iommu_bypass_supported()
1145 phb->model == PNV_PHB_MODEL_PHB3) { in pnv_pci_ioda_iommu_bypass_supported()
1150 /* 4GB offset bypasses 32-bit space */ in pnv_pci_ioda_iommu_bypass_supported()
1151 pdev->dev.archdata.dma_offset = (1ULL << 32); in pnv_pci_ioda_iommu_bypass_supported()
1160 return phb->regs + 0x210; in pnv_ioda_get_inval_reg()
1178 /* 01xb - invalidate TCEs that match the specified PE# */ in pnv_pci_phb3_tce_invalidate_pe()
1179 __be64 __iomem *invalidate = pnv_ioda_get_inval_reg(pe->phb); in pnv_pci_phb3_tce_invalidate_pe()
1180 unsigned long val = PHB3_TCE_KILL_INVAL_PE | (pe->pe_number & 0xFF); in pnv_pci_phb3_tce_invalidate_pe()
1190 __be64 __iomem *invalidate = pnv_ioda_get_inval_reg(pe->phb); in pnv_pci_phb3_tce_invalidate()
1195 start |= (pe->pe_number & 0xFF); in pnv_pci_phb3_tce_invalidate()
1200 end |= ((index + npages - 1) << shift); in pnv_pci_phb3_tce_invalidate()
1212 struct pnv_phb *phb = pe->phb; in pnv_pci_ioda2_tce_invalidate_pe()
1214 if (phb->model == PNV_PHB_MODEL_PHB3 && phb->regs) in pnv_pci_ioda2_tce_invalidate_pe()
1217 opal_pci_tce_kill(phb->opal_id, OPAL_PCI_TCE_KILL_PE, in pnv_pci_ioda2_tce_invalidate_pe()
1218 pe->pe_number, 0, 0, 0); in pnv_pci_ioda2_tce_invalidate_pe()
1226 list_for_each_entry_lockless(tgl, &tbl->it_group_list, next) { in pnv_pci_ioda2_tce_invalidate()
1227 struct pnv_ioda_pe *pe = container_of(tgl->table_group, in pnv_pci_ioda2_tce_invalidate()
1229 struct pnv_phb *phb = pe->phb; in pnv_pci_ioda2_tce_invalidate()
1230 unsigned int shift = tbl->it_page_shift; in pnv_pci_ioda2_tce_invalidate()
1232 if (phb->model == PNV_PHB_MODEL_PHB3 && phb->regs) in pnv_pci_ioda2_tce_invalidate()
1236 opal_pci_tce_kill(phb->opal_id, in pnv_pci_ioda2_tce_invalidate()
1238 pe->pe_number, 1u << shift, in pnv_pci_ioda2_tce_invalidate()
1282 struct pnv_phb *phb = pe->phb; in pnv_pci_ioda2_set_window()
1284 const unsigned long size = tbl->it_indirect_levels ? in pnv_pci_ioda2_set_window()
1285 tbl->it_level_size : tbl->it_size; in pnv_pci_ioda2_set_window()
1286 const __u64 start_addr = tbl->it_offset << tbl->it_page_shift; in pnv_pci_ioda2_set_window()
1287 const __u64 win_size = tbl->it_size << tbl->it_page_shift; in pnv_pci_ioda2_set_window()
1290 num, start_addr, start_addr + win_size - 1, in pnv_pci_ioda2_set_window()
1295 * shifted by 1 bit for 32-bits DMA space. in pnv_pci_ioda2_set_window()
1297 rc = opal_pci_map_pe_dma_window(phb->opal_id, in pnv_pci_ioda2_set_window()
1298 pe->pe_number, in pnv_pci_ioda2_set_window()
1299 (pe->pe_number << 1) + num, in pnv_pci_ioda2_set_window()
1300 tbl->it_indirect_levels + 1, in pnv_pci_ioda2_set_window()
1301 __pa(tbl->it_base), in pnv_pci_ioda2_set_window()
1309 pnv_pci_link_table_and_group(phb->hose->node, num, in pnv_pci_ioda2_set_window()
1310 tbl, &pe->table_group); in pnv_pci_ioda2_set_window()
1318 uint16_t window_id = (pe->pe_number << 1 ) + 1; in pnv_pci_ioda2_set_bypass()
1321 pe_info(pe, "%sabling 64-bit DMA bypass\n", enable ? "En" : "Dis"); in pnv_pci_ioda2_set_bypass()
1326 rc = opal_pci_map_pe_dma_window_real(pe->phb->opal_id, in pnv_pci_ioda2_set_bypass()
1327 pe->pe_number, in pnv_pci_ioda2_set_bypass()
1329 pe->tce_bypass_base, in pnv_pci_ioda2_set_bypass()
1332 rc = opal_pci_map_pe_dma_window_real(pe->phb->opal_id, in pnv_pci_ioda2_set_bypass()
1333 pe->pe_number, in pnv_pci_ioda2_set_bypass()
1335 pe->tce_bypass_base, in pnv_pci_ioda2_set_bypass()
1341 pe->tce_bypass_enabled = enable; in pnv_pci_ioda2_set_bypass()
1350 int nid = pe->phb->hose->node; in pnv_pci_ioda2_create_table()
1351 __u64 bus_offset = num ? pe->tce_bypass_base : table_group->tce32_start; in pnv_pci_ioda2_create_table()
1357 return -ENOMEM; in pnv_pci_ioda2_create_table()
1359 tbl->it_ops = &pnv_ioda2_iommu_ops; in pnv_pci_ioda2_create_table()
1397 * end up being multilevel and with on-demand allocation in place, in pnv_pci_ioda2_setup_default_config()
1412 * order to save memory by relying on on-demain TCE level allocation. in pnv_pci_ioda2_setup_default_config()
1416 rc = pnv_pci_ioda2_create_table(&pe->table_group, 0, PAGE_SHIFT, in pnv_pci_ioda2_setup_default_config()
1419 pe_err(pe, "Failed to create 32-bit TCE table, err %ld", in pnv_pci_ioda2_setup_default_config()
1427 if (window_size > pe->phb->ioda.m32_pci_base) { in pnv_pci_ioda2_setup_default_config()
1428 res_start = pe->phb->ioda.m32_pci_base >> tbl->it_page_shift; in pnv_pci_ioda2_setup_default_config()
1429 res_end = min(window_size, SZ_4G) >> tbl->it_page_shift; in pnv_pci_ioda2_setup_default_config()
1432 tbl->it_index = (pe->phb->hose->global_number << 16) | pe->pe_number; in pnv_pci_ioda2_setup_default_config()
1433 if (iommu_init_table(tbl, pe->phb->hose->node, res_start, res_end)) in pnv_pci_ioda2_setup_default_config()
1434 rc = pnv_pci_ioda2_set_window(&pe->table_group, 0, tbl); in pnv_pci_ioda2_setup_default_config()
1436 rc = -ENOMEM; in pnv_pci_ioda2_setup_default_config()
1438 pe_err(pe, "Failed to configure 32-bit TCE table, err %ld\n", rc); in pnv_pci_ioda2_setup_default_config()
1450 if (pe->pdev) in pnv_pci_ioda2_setup_default_config()
1451 set_iommu_table_base(&pe->pdev->dev, tbl); in pnv_pci_ioda2_setup_default_config()
1461 struct pnv_phb *phb = pe->phb; in pnv_pci_ioda2_unset_window()
1466 ret = opal_pci_map_pe_dma_window(phb->opal_id, pe->pe_number, in pnv_pci_ioda2_unset_window()
1467 (pe->pe_number << 1) + num, in pnv_pci_ioda2_unset_window()
1475 pnv_pci_unlink_table_and_group(table_group->tables[num], table_group); in pnv_pci_ioda2_unset_window()
1486 unsigned entries_shift = window_shift - page_shift; in pnv_pci_ioda2_get_table_size()
1496 entries_shift = (entries_shift + levels - 1) / levels; in pnv_pci_ioda2_get_table_size()
1501 for ( ; levels; --levels) { in pnv_pci_ioda2_get_table_size()
1522 (*ptbl)->it_allocated_size = pnv_pci_ioda2_get_table_size( in pnv_pci_ioda2_create_table_userspace()
1531 list_for_each_entry(dev, &bus->devices, bus_list) { in pnv_ioda_setup_bus_dma()
1532 set_iommu_table_base(&dev->dev, pe->table_group.tables[0]); in pnv_ioda_setup_bus_dma()
1533 dev->dev.archdata.dma_offset = pe->tce_bypass_base; in pnv_ioda_setup_bus_dma()
1535 if ((pe->flags & PNV_IODA_PE_BUS_ALL) && dev->subordinate) in pnv_ioda_setup_bus_dma()
1536 pnv_ioda_setup_bus_dma(pe, dev->subordinate); in pnv_ioda_setup_bus_dma()
1545 struct iommu_table *tbl = pe->table_group.tables[0]; in pnv_ioda2_take_ownership()
1555 pnv_pci_ioda2_unset_window(&pe->table_group, 0); in pnv_ioda2_take_ownership()
1556 if (pe->pbus) in pnv_ioda2_take_ownership()
1557 pnv_ioda_setup_bus_dma(pe, pe->pbus); in pnv_ioda2_take_ownership()
1558 else if (pe->pdev) in pnv_ioda2_take_ownership()
1559 set_iommu_table_base(&pe->pdev->dev, NULL); in pnv_ioda2_take_ownership()
1571 if (pe->table_group.tables[0]) in pnv_ioda2_release_ownership()
1574 if (pe->pbus) in pnv_ioda2_release_ownership()
1575 pnv_ioda_setup_bus_dma(pe, pe->pbus); in pnv_ioda2_release_ownership()
1594 pe->tce_bypass_base = 1ull << 59; in pnv_pci_ioda2_setup_dma_pe()
1596 /* The PE will reserve all possible 32-bits space */ in pnv_pci_ioda2_setup_dma_pe()
1597 pe_info(pe, "Setting up 32-bit TCE table at 0..%08x\n", in pnv_pci_ioda2_setup_dma_pe()
1598 phb->ioda.m32_pci_base); in pnv_pci_ioda2_setup_dma_pe()
1601 pe->table_group.tce32_start = 0; in pnv_pci_ioda2_setup_dma_pe()
1602 pe->table_group.tce32_size = phb->ioda.m32_pci_base; in pnv_pci_ioda2_setup_dma_pe()
1603 pe->table_group.max_dynamic_windows_supported = in pnv_pci_ioda2_setup_dma_pe()
1605 pe->table_group.max_levels = POWERNV_IOMMU_MAX_LEVELS; in pnv_pci_ioda2_setup_dma_pe()
1606 pe->table_group.pgsizes = pnv_ioda_parse_tce_sizes(phb); in pnv_pci_ioda2_setup_dma_pe()
1613 pe->table_group.ops = &pnv_pci_ioda2_ops; in pnv_pci_ioda2_setup_dma_pe()
1614 iommu_register_group(&pe->table_group, phb->hose->global_number, in pnv_pci_ioda2_setup_dma_pe()
1615 pe->pe_number); in pnv_pci_ioda2_setup_dma_pe()
1617 pe->dma_setup_done = true; in pnv_pci_ioda2_setup_dma_pe()
1624 * The IRQ data is mapped in the PCI-MSI domain and the EOI OPAL call
1626 * numbers of the in-the-middle MSI domain are vector numbers and it's
1631 struct pci_controller *hose = irq_data_get_irq_chip_data(d->parent_data); in pnv_opal_pci_msi_eoi()
1632 struct pnv_phb *phb = hose->private_data; in pnv_opal_pci_msi_eoi()
1634 return opal_pci_msi_eoi(phb->opal_id, d->parent_data->hwirq); in pnv_opal_pci_msi_eoi()
1645 struct pnv_phb *phb = hose->private_data; in pnv_ioda2_msi_eoi()
1647 rc = opal_pci_msi_eoi(phb->opal_id, hw_irq); in pnv_ioda2_msi_eoi()
1660 if (phb->model != PNV_PHB_MODEL_PHB3) in pnv_set_msi_irq_chip()
1663 if (!phb->ioda.irq_chip_init) { in pnv_set_msi_irq_chip()
1670 phb->ioda.irq_chip_init = 1; in pnv_set_msi_irq_chip()
1671 phb->ioda.irq_chip = *ichip; in pnv_set_msi_irq_chip()
1672 phb->ioda.irq_chip.irq_eoi = pnv_ioda2_msi_eoi; in pnv_set_msi_irq_chip()
1674 irq_set_chip(virq, &phb->ioda.irq_chip); in pnv_set_msi_irq_chip()
1675 irq_set_chip_data(virq, phb->hose); in pnv_set_msi_irq_chip()
1698 dev_dbg(&dev->dev, "%s: setup %s-bit MSI for vector #%d\n", __func__, in __pnv_pci_ioda_msi_setup()
1703 return -ENXIO; in __pnv_pci_ioda_msi_setup()
1706 if (pe->mve_number < 0) in __pnv_pci_ioda_msi_setup()
1707 return -ENXIO; in __pnv_pci_ioda_msi_setup()
1709 /* Force 32-bit MSI on some broken devices */ in __pnv_pci_ioda_msi_setup()
1710 if (dev->no_64bit_msi) in __pnv_pci_ioda_msi_setup()
1714 rc = opal_pci_set_xive_pe(phb->opal_id, pe->pe_number, xive_num); in __pnv_pci_ioda_msi_setup()
1718 return -EIO; in __pnv_pci_ioda_msi_setup()
1724 rc = opal_get_msi_64(phb->opal_id, pe->mve_number, xive_num, 1, in __pnv_pci_ioda_msi_setup()
1727 pr_warn("%s: OPAL error %d getting 64-bit MSI data\n", in __pnv_pci_ioda_msi_setup()
1729 return -EIO; in __pnv_pci_ioda_msi_setup()
1731 msg->address_hi = be64_to_cpu(addr64) >> 32; in __pnv_pci_ioda_msi_setup()
1732 msg->address_lo = be64_to_cpu(addr64) & 0xfffffffful; in __pnv_pci_ioda_msi_setup()
1736 rc = opal_get_msi_32(phb->opal_id, pe->mve_number, xive_num, 1, in __pnv_pci_ioda_msi_setup()
1739 pr_warn("%s: OPAL error %d getting 32-bit MSI data\n", in __pnv_pci_ioda_msi_setup()
1741 return -EIO; in __pnv_pci_ioda_msi_setup()
1743 msg->address_hi = 0; in __pnv_pci_ioda_msi_setup()
1744 msg->address_lo = be32_to_cpu(addr32); in __pnv_pci_ioda_msi_setup()
1746 msg->data = be32_to_cpu(data); in __pnv_pci_ioda_msi_setup()
1754 * controller.
1770 d = d->parent_data; in pnv_msi_shutdown()
1771 if (d->chip->irq_shutdown) in pnv_msi_shutdown()
1772 d->chip->irq_shutdown(d); in pnv_msi_shutdown()
1788 .name = "PNV-PCI-MSI",
1807 struct pnv_phb *phb = hose->private_data; in pnv_msi_compose_msg()
1810 rc = __pnv_pci_ioda_msi_setup(phb, pdev, d->hwirq, in pnv_msi_compose_msg()
1811 entry->pci.msi_attrib.is_64, msg); in pnv_msi_compose_msg()
1813 dev_err(&pdev->dev, "Failed to setup %s-bit MSI #%ld : %d\n", in pnv_msi_compose_msg()
1814 entry->pci.msi_attrib.is_64 ? "64" : "32", d->hwirq, rc); in pnv_msi_compose_msg()
1824 struct pnv_phb *phb = hose->private_data; in pnv_msi_eoi()
1826 if (phb->model == PNV_PHB_MODEL_PHB3) { in pnv_msi_eoi()
1832 WARN_ON_ONCE(opal_pci_msi_eoi(phb->opal_id, d->hwirq)); in pnv_msi_eoi()
1839 .name = "PNV-MSI",
1854 parent_fwspec.fwnode = domain->parent->fwnode; in pnv_irq_parent_domain_alloc()
1869 struct pci_controller *hose = domain->host_data; in pnv_irq_domain_alloc()
1870 struct pnv_phb *phb = hose->private_data; in pnv_irq_domain_alloc()
1872 struct pci_dev *pdev = msi_desc_to_pci_dev(info->desc); in pnv_irq_domain_alloc()
1876 hwirq = msi_bitmap_alloc_hwirqs(&phb->msi_bmp, nr_irqs); in pnv_irq_domain_alloc()
1878 dev_warn(&pdev->dev, "failed to find a free MSI\n"); in pnv_irq_domain_alloc()
1879 return -ENOSPC; in pnv_irq_domain_alloc()
1882 dev_dbg(&pdev->dev, "%s bridge %pOF %d/%x #%d\n", __func__, in pnv_irq_domain_alloc()
1883 hose->dn, virq, hwirq, nr_irqs); in pnv_irq_domain_alloc()
1887 phb->msi_base + hwirq + i); in pnv_irq_domain_alloc()
1898 irq_domain_free_irqs_parent(domain, virq, i - 1); in pnv_irq_domain_alloc()
1899 msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq, nr_irqs); in pnv_irq_domain_alloc()
1908 struct pnv_phb *phb = hose->private_data; in pnv_irq_domain_free()
1910 pr_debug("%s bridge %pOF %d/%lx #%d\n", __func__, hose->dn, in pnv_irq_domain_free()
1911 virq, d->hwirq, nr_irqs); in pnv_irq_domain_free()
1913 msi_bitmap_free_hwirqs(&phb->msi_bmp, d->hwirq, nr_irqs); in pnv_irq_domain_free()
1914 /* XIVE domain is cleared through ->msi_free() */ in pnv_irq_domain_free()
1924 struct pnv_phb *phb = hose->private_data; in pnv_msi_allocate_domains()
1927 hose->fwnode = irq_domain_alloc_named_id_fwnode("PNV-MSI", phb->opal_id); in pnv_msi_allocate_domains()
1928 if (!hose->fwnode) in pnv_msi_allocate_domains()
1929 return -ENOMEM; in pnv_msi_allocate_domains()
1931 hose->dev_domain = irq_domain_create_hierarchy(parent, 0, count, in pnv_msi_allocate_domains()
1932 hose->fwnode, in pnv_msi_allocate_domains()
1934 if (!hose->dev_domain) { in pnv_msi_allocate_domains()
1935 pr_err("PCI: failed to create IRQ domain bridge %pOF (domain %d)\n", in pnv_msi_allocate_domains()
1936 hose->dn, hose->global_number); in pnv_msi_allocate_domains()
1937 irq_domain_free_fwnode(hose->fwnode); in pnv_msi_allocate_domains()
1938 return -ENOMEM; in pnv_msi_allocate_domains()
1941 hose->msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(hose->dn), in pnv_msi_allocate_domains()
1943 hose->dev_domain); in pnv_msi_allocate_domains()
1944 if (!hose->msi_domain) { in pnv_msi_allocate_domains()
1945 pr_err("PCI: failed to create MSI IRQ domain bridge %pOF (domain %d)\n", in pnv_msi_allocate_domains()
1946 hose->dn, hose->global_number); in pnv_msi_allocate_domains()
1947 irq_domain_free_fwnode(hose->fwnode); in pnv_msi_allocate_domains()
1948 irq_domain_remove(hose->dev_domain); in pnv_msi_allocate_domains()
1949 return -ENOMEM; in pnv_msi_allocate_domains()
1958 const __be32 *prop = of_get_property(phb->hose->dn, in pnv_pci_init_ioda_msis()
1959 "ibm,opal-msi-ranges", NULL); in pnv_pci_init_ioda_msis()
1962 prop = of_get_property(phb->hose->dn, "msi-ranges", NULL); in pnv_pci_init_ioda_msis()
1967 phb->msi_base = be32_to_cpup(prop); in pnv_pci_init_ioda_msis()
1969 if (msi_bitmap_alloc(&phb->msi_bmp, count, phb->hose->dn)) { in pnv_pci_init_ioda_msis()
1971 phb->hose->global_number); in pnv_pci_init_ioda_msis()
1976 count, phb->msi_base); in pnv_pci_init_ioda_msis()
1978 pnv_msi_allocate_domains(phb->hose, count); in pnv_pci_init_ioda_msis()
1984 struct pnv_phb *phb = pe->phb; in pnv_ioda_setup_pe_res()
1989 if (!res || !res->flags || res->start > res->end || in pnv_ioda_setup_pe_res()
1990 res->flags & IORESOURCE_UNSET) in pnv_ioda_setup_pe_res()
1993 if (res->flags & IORESOURCE_IO) { in pnv_ioda_setup_pe_res()
1994 region.start = res->start - phb->ioda.io_pci_base; in pnv_ioda_setup_pe_res()
1995 region.end = res->end - phb->ioda.io_pci_base; in pnv_ioda_setup_pe_res()
1996 index = region.start / phb->ioda.io_segsize; in pnv_ioda_setup_pe_res()
1998 while (index < phb->ioda.total_pe_num && in pnv_ioda_setup_pe_res()
2000 phb->ioda.io_segmap[index] = pe->pe_number; in pnv_ioda_setup_pe_res()
2001 rc = opal_pci_map_pe_mmio_window(phb->opal_id, in pnv_ioda_setup_pe_res()
2002 pe->pe_number, OPAL_IO_WINDOW_TYPE, 0, index); in pnv_ioda_setup_pe_res()
2005 __func__, rc, index, pe->pe_number); in pnv_ioda_setup_pe_res()
2009 region.start += phb->ioda.io_segsize; in pnv_ioda_setup_pe_res()
2012 } else if ((res->flags & IORESOURCE_MEM) && in pnv_ioda_setup_pe_res()
2014 region.start = res->start - in pnv_ioda_setup_pe_res()
2015 phb->hose->mem_offset[0] - in pnv_ioda_setup_pe_res()
2016 phb->ioda.m32_pci_base; in pnv_ioda_setup_pe_res()
2017 region.end = res->end - in pnv_ioda_setup_pe_res()
2018 phb->hose->mem_offset[0] - in pnv_ioda_setup_pe_res()
2019 phb->ioda.m32_pci_base; in pnv_ioda_setup_pe_res()
2020 index = region.start / phb->ioda.m32_segsize; in pnv_ioda_setup_pe_res()
2022 while (index < phb->ioda.total_pe_num && in pnv_ioda_setup_pe_res()
2024 phb->ioda.m32_segmap[index] = pe->pe_number; in pnv_ioda_setup_pe_res()
2025 rc = opal_pci_map_pe_mmio_window(phb->opal_id, in pnv_ioda_setup_pe_res()
2026 pe->pe_number, OPAL_M32_WINDOW_TYPE, 0, index); in pnv_ioda_setup_pe_res()
2029 __func__, rc, index, pe->pe_number); in pnv_ioda_setup_pe_res()
2033 region.start += phb->ioda.m32_segsize; in pnv_ioda_setup_pe_res()
2054 BUG_ON(!(pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL))); in pnv_ioda_setup_pe_seg()
2056 list_for_each_entry(pdev, &pe->pbus->devices, bus_list) { in pnv_ioda_setup_pe_seg()
2058 pnv_ioda_setup_pe_res(pe, &pdev->resource[i]); in pnv_ioda_setup_pe_seg()
2065 if (!(pe->flags & PNV_IODA_PE_BUS_ALL) || !pci_is_bridge(pdev)) in pnv_ioda_setup_pe_seg()
2069 &pdev->resource[PCI_BRIDGE_RESOURCES + i]); in pnv_ioda_setup_pe_seg()
2080 ret = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag_data, in pnv_pci_diag_data_set()
2081 phb->diag_data_size); in pnv_pci_diag_data_set()
2083 return -EIO; in pnv_pci_diag_data_set()
2086 pnv_pci_dump_phb_diag_data(phb->hose, phb->diag_data); in pnv_pci_diag_data_set()
2098 for (pe_num = 0; pe_num < phb->ioda.total_pe_num; pe_num++) { in pnv_pci_ioda_pe_dump()
2099 struct pnv_ioda_pe *pe = &phb->ioda.pe_array[pe_num]; in pnv_pci_ioda_pe_dump()
2101 if (!test_bit(pe_num, phb->ioda.pe_alloc)) in pnv_pci_ioda_pe_dump()
2105 pe->rid, pe->device_count, in pnv_pci_ioda_pe_dump()
2106 (pe->flags & PNV_IODA_PE_DEV) ? "dev " : "", in pnv_pci_ioda_pe_dump()
2107 (pe->flags & PNV_IODA_PE_BUS) ? "bus " : "", in pnv_pci_ioda_pe_dump()
2108 (pe->flags & PNV_IODA_PE_BUS_ALL) ? "all " : "", in pnv_pci_ioda_pe_dump()
2109 (pe->flags & PNV_IODA_PE_MASTER) ? "master " : "", in pnv_pci_ioda_pe_dump()
2110 (pe->flags & PNV_IODA_PE_SLAVE) ? "slave " : "", in pnv_pci_ioda_pe_dump()
2111 (pe->flags & PNV_IODA_PE_VF) ? "vf " : ""); in pnv_pci_ioda_pe_dump()
2130 phb = hose->private_data; in pnv_pci_ioda_create_dbgfs()
2132 sprintf(name, "PCI%04x", hose->global_number); in pnv_pci_ioda_create_dbgfs()
2133 phb->dbgfs = debugfs_create_dir(name, arch_debugfs_dir); in pnv_pci_ioda_create_dbgfs()
2135 debugfs_create_file_unsafe("dump_diag_regs", 0200, phb->dbgfs, in pnv_pci_ioda_create_dbgfs()
2137 debugfs_create_file_unsafe("dump_ioda_pe_state", 0200, phb->dbgfs, in pnv_pci_ioda_create_dbgfs()
2145 struct pci_dev *dev = bus->self; in pnv_pci_enable_bridge()
2149 if (list_empty(&bus->devices)) in pnv_pci_enable_bridge()
2153 * If there's a bridge associated with that bus enable it. This works in pnv_pci_enable_bridge()
2161 pci_err(dev, "Error enabling bridge (%d)\n", rc); in pnv_pci_enable_bridge()
2166 list_for_each_entry(child, &bus->children, node) in pnv_pci_enable_bridge()
2175 pnv_pci_enable_bridge(hose->bus); in pnv_pci_enable_bridges()
2197 * create against the parent PCI bridge. For that case, we
2206 struct pci_dev *bridge; in pnv_pci_window_alignment() local
2208 bridge = bus->self; in pnv_pci_window_alignment()
2209 while (bridge) { in pnv_pci_window_alignment()
2210 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE) { in pnv_pci_window_alignment()
2216 bridge = bridge->bus->self; in pnv_pci_window_alignment()
2221 * alignment for any 64-bit resource, PCIe doesn't care and in pnv_pci_window_alignment()
2222 * bridges only do 64-bit prefetchable anyway. in pnv_pci_window_alignment()
2224 if (phb->ioda.m64_segsize && pnv_pci_is_m64_flags(type)) in pnv_pci_window_alignment()
2225 return phb->ioda.m64_segsize; in pnv_pci_window_alignment()
2227 return phb->ioda.m32_segsize; in pnv_pci_window_alignment()
2229 return phb->ioda.io_segsize; in pnv_pci_window_alignment()
2234 * bridge behind the root port with PHB's windows in order
2244 struct pnv_phb *phb = hose->private_data; in pnv_pci_fixup_bridge_resources()
2245 struct pci_dev *bridge = bus->self; in pnv_pci_fixup_bridge_resources() local
2250 /* Check if we need apply fixup to the bridge's windows */ in pnv_pci_fixup_bridge_resources()
2251 if (!pci_is_root_bus(bridge->bus) && in pnv_pci_fixup_bridge_resources()
2252 !pci_is_root_bus(bridge->bus->self->bus)) in pnv_pci_fixup_bridge_resources()
2257 r = &bridge->resource[PCI_BRIDGE_RESOURCES + i]; in pnv_pci_fixup_bridge_resources()
2258 if (!r->flags || !r->parent) in pnv_pci_fixup_bridge_resources()
2262 if (r->flags & type & IORESOURCE_IO) in pnv_pci_fixup_bridge_resources()
2263 w = &hose->io_resource; in pnv_pci_fixup_bridge_resources()
2266 phb->ioda.m64_segsize) in pnv_pci_fixup_bridge_resources()
2267 w = &hose->mem_resources[1]; in pnv_pci_fixup_bridge_resources()
2268 else if (r->flags & type & IORESOURCE_MEM) { in pnv_pci_fixup_bridge_resources()
2269 w = &hose->mem_resources[0]; in pnv_pci_fixup_bridge_resources()
2273 r->start = w->start; in pnv_pci_fixup_bridge_resources()
2274 r->end = w->end; in pnv_pci_fixup_bridge_resources()
2276 /* The 64KB 32-bits MSI region shouldn't be included in in pnv_pci_fixup_bridge_resources()
2277 * the 32-bits bridge window. Otherwise, we can see strange in pnv_pci_fixup_bridge_resources()
2281 * 32-bits bridge window. in pnv_pci_fixup_bridge_resources()
2284 r->end += 0x10000; in pnv_pci_fixup_bridge_resources()
2285 r->end -= 0x100000; in pnv_pci_fixup_bridge_resources()
2292 struct pci_dev *bridge = bus->self; in pnv_pci_configure_bus() local
2294 bool all = (bridge && pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE); in pnv_pci_configure_bus()
2296 dev_info(&bus->dev, "Configuring PE for bus\n"); in pnv_pci_configure_bus()
2299 if (WARN_ON(list_empty(&bus->devices))) in pnv_pci_configure_bus()
2330 if (!pdn || pdn->pe_number == IODA_INVALID_PE) { in pnv_pci_enable_device_hook()
2347 if (pdn->pe_number == IODA_INVALID_PE) { in pnv_ocapi_enable_device_hook()
2357 struct iommu_table *tbl = pe->table_group.tables[0]; in pnv_pci_ioda2_release_pe_dma()
2360 if (!pe->dma_setup_done) in pnv_pci_ioda2_release_pe_dma()
2363 rc = pnv_pci_ioda2_unset_window(&pe->table_group, 0); in pnv_pci_ioda2_release_pe_dma()
2368 if (pe->table_group.group) { in pnv_pci_ioda2_release_pe_dma()
2369 iommu_group_put(pe->table_group.group); in pnv_pci_ioda2_release_pe_dma()
2370 WARN_ON(pe->table_group.group); in pnv_pci_ioda2_release_pe_dma()
2380 struct pnv_phb *phb = pe->phb; in pnv_ioda_free_pe_seg()
2384 for (idx = 0; idx < phb->ioda.total_pe_num; idx++) { in pnv_ioda_free_pe_seg()
2385 if (map[idx] != pe->pe_number) in pnv_ioda_free_pe_seg()
2388 rc = opal_pci_map_pe_mmio_window(phb->opal_id, in pnv_ioda_free_pe_seg()
2389 phb->ioda.reserved_pe_idx, win, 0, idx); in pnv_ioda_free_pe_seg()
2401 struct pnv_phb *phb = pe->phb; in pnv_ioda_release_pe_seg()
2403 if (phb->type == PNV_PHB_IODA2) { in pnv_ioda_release_pe_seg()
2405 phb->ioda.m32_segmap); in pnv_ioda_release_pe_seg()
2411 struct pnv_phb *phb = pe->phb; in pnv_ioda_release_pe()
2416 mutex_lock(&phb->ioda.pe_list_mutex); in pnv_ioda_release_pe()
2417 list_del(&pe->list); in pnv_ioda_release_pe()
2418 mutex_unlock(&phb->ioda.pe_list_mutex); in pnv_ioda_release_pe()
2420 switch (phb->type) { in pnv_ioda_release_pe()
2431 pnv_ioda_deconfigure_pe(pe->phb, pe); in pnv_ioda_release_pe()
2434 if (pe->flags & PNV_IODA_PE_MASTER) { in pnv_ioda_release_pe()
2435 list_for_each_entry_safe(slave, tmp, &pe->slaves, list) { in pnv_ioda_release_pe()
2436 list_del(&slave->list); in pnv_ioda_release_pe()
2447 if (phb->ioda.root_pe_idx == pe->pe_number) in pnv_ioda_release_pe()
2455 struct pnv_phb *phb = pci_bus_to_pnvhb(pdev->bus); in pnv_pci_release_device()
2460 if (pdev->is_virtfn) in pnv_pci_release_device()
2463 if (!pdn || pdn->pe_number == IODA_INVALID_PE) in pnv_pci_release_device()
2472 if (pdev->is_physfn) in pnv_pci_release_device()
2473 kfree(pdev->dev.archdata.iov_data); in pnv_pci_release_device()
2484 pe = &phb->ioda.pe_array[pdn->pe_number]; in pnv_pci_release_device()
2485 pdn->pe_number = IODA_INVALID_PE; in pnv_pci_release_device()
2487 WARN_ON(--pe->device_count < 0); in pnv_pci_release_device()
2488 if (pe->device_count == 0) in pnv_pci_release_device()
2494 struct pnv_phb *phb = hose->private_data; in pnv_pci_ioda_shutdown()
2496 opal_pci_reset(phb->opal_id, OPAL_RESET_PCI_IODA_TABLE, in pnv_pci_ioda_shutdown()
2505 list_for_each_entry(pe, &phb->ioda.pe_list, list) { in pnv_pci_ioda_dma_bus_setup()
2506 if (!(pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL))) in pnv_pci_ioda_dma_bus_setup()
2509 if (!pe->pbus) in pnv_pci_ioda_dma_bus_setup()
2512 if (bus->number == ((pe->rid >> 8) & 0xFF)) { in pnv_pci_ioda_dma_bus_setup()
2513 pe->pbus = bus; in pnv_pci_ioda_dma_bus_setup()
2523 struct pnv_phb *phb = hose->private_data; in pnv_pci_device_group()
2527 return ERR_PTR(-ENODEV); in pnv_pci_device_group()
2531 return ERR_PTR(-ENODEV); in pnv_pci_device_group()
2533 if (!pe->table_group.group) in pnv_pci_device_group()
2534 return ERR_PTR(-ENODEV); in pnv_pci_device_group()
2536 return iommu_group_ref_get(pe->table_group.group); in pnv_pci_device_group()
2584 prop64 = of_get_property(np, "ibm,opal-phbid", NULL); in pnv_pci_init_ioda_phb()
2586 pr_err(" Missing \"ibm,opal-phbid\" property !\n"); in pnv_pci_init_ioda_phb()
2590 pr_debug(" PHB-ID : 0x%016llx\n", phb_id); in pnv_pci_init_ioda_phb()
2597 /* Allocate PCI controller */ in pnv_pci_init_ioda_phb()
2598 phb->hose = hose = pcibios_alloc_controller(np); in pnv_pci_init_ioda_phb()
2599 if (!phb->hose) { in pnv_pci_init_ioda_phb()
2600 pr_err(" Can't allocate PCI controller for %pOF\n", in pnv_pci_init_ioda_phb()
2606 spin_lock_init(&phb->lock); in pnv_pci_init_ioda_phb()
2607 prop32 = of_get_property(np, "bus-range", &len); in pnv_pci_init_ioda_phb()
2609 hose->first_busno = be32_to_cpu(prop32[0]); in pnv_pci_init_ioda_phb()
2610 hose->last_busno = be32_to_cpu(prop32[1]); in pnv_pci_init_ioda_phb()
2612 pr_warn(" Broken <bus-range> on %pOF\n", np); in pnv_pci_init_ioda_phb()
2613 hose->first_busno = 0; in pnv_pci_init_ioda_phb()
2614 hose->last_busno = 0xff; in pnv_pci_init_ioda_phb()
2616 hose->private_data = phb; in pnv_pci_init_ioda_phb()
2617 phb->hub_id = hub_id; in pnv_pci_init_ioda_phb()
2618 phb->opal_id = phb_id; in pnv_pci_init_ioda_phb()
2619 phb->type = ioda_type; in pnv_pci_init_ioda_phb()
2620 mutex_init(&phb->ioda.pe_alloc_mutex); in pnv_pci_init_ioda_phb()
2623 if (of_device_is_compatible(np, "ibm,p7ioc-pciex")) in pnv_pci_init_ioda_phb()
2624 phb->model = PNV_PHB_MODEL_P7IOC; in pnv_pci_init_ioda_phb()
2625 else if (of_device_is_compatible(np, "ibm,power8-pciex")) in pnv_pci_init_ioda_phb()
2626 phb->model = PNV_PHB_MODEL_PHB3; in pnv_pci_init_ioda_phb()
2628 phb->model = PNV_PHB_MODEL_UNKNOWN; in pnv_pci_init_ioda_phb()
2631 prop32 = of_get_property(np, "ibm,phb-diag-data-size", NULL); in pnv_pci_init_ioda_phb()
2633 phb->diag_data_size = be32_to_cpup(prop32); in pnv_pci_init_ioda_phb()
2635 phb->diag_data_size = PNV_PCI_DIAG_BUF_SIZE; in pnv_pci_init_ioda_phb()
2637 phb->diag_data = kzalloc(phb->diag_data_size, GFP_KERNEL); in pnv_pci_init_ioda_phb()
2638 if (!phb->diag_data) in pnv_pci_init_ioda_phb()
2640 phb->diag_data_size); in pnv_pci_init_ioda_phb()
2642 /* Parse 32-bit and IO ranges (if any) */ in pnv_pci_init_ioda_phb()
2643 pci_process_bridge_OF_ranges(hose, np, !hose->global_number); in pnv_pci_init_ioda_phb()
2647 phb->regs_phys = r.start; in pnv_pci_init_ioda_phb()
2648 phb->regs = ioremap(r.start, resource_size(&r)); in pnv_pci_init_ioda_phb()
2649 if (phb->regs == NULL) in pnv_pci_init_ioda_phb()
2654 phb->ioda.total_pe_num = 1; in pnv_pci_init_ioda_phb()
2655 prop32 = of_get_property(np, "ibm,opal-num-pes", NULL); in pnv_pci_init_ioda_phb()
2657 phb->ioda.total_pe_num = be32_to_cpup(prop32); in pnv_pci_init_ioda_phb()
2658 prop32 = of_get_property(np, "ibm,opal-reserved-pe", NULL); in pnv_pci_init_ioda_phb()
2660 phb->ioda.reserved_pe_idx = be32_to_cpup(prop32); in pnv_pci_init_ioda_phb()
2663 for (segno = 0; segno < ARRAY_SIZE(phb->ioda.pe_rmap); segno++) in pnv_pci_init_ioda_phb()
2664 phb->ioda.pe_rmap[segno] = IODA_INVALID_PE; in pnv_pci_init_ioda_phb()
2666 /* Parse 64-bit MMIO range */ in pnv_pci_init_ioda_phb()
2669 phb->ioda.m32_size = resource_size(&hose->mem_resources[0]); in pnv_pci_init_ioda_phb()
2671 phb->ioda.m32_size += 0x10000; in pnv_pci_init_ioda_phb()
2673 phb->ioda.m32_segsize = phb->ioda.m32_size / phb->ioda.total_pe_num; in pnv_pci_init_ioda_phb()
2674 phb->ioda.m32_pci_base = hose->mem_resources[0].start - hose->mem_offset[0]; in pnv_pci_init_ioda_phb()
2675 phb->ioda.io_size = hose->pci_io_size; in pnv_pci_init_ioda_phb()
2676 phb->ioda.io_segsize = phb->ioda.io_size / phb->ioda.total_pe_num; in pnv_pci_init_ioda_phb()
2677 phb->ioda.io_pci_base = 0; /* XXX calculate this ? */ in pnv_pci_init_ioda_phb()
2680 size = ALIGN(max_t(unsigned, phb->ioda.total_pe_num, 8) / 8, in pnv_pci_init_ioda_phb()
2683 size += phb->ioda.total_pe_num * sizeof(phb->ioda.m64_segmap[0]); in pnv_pci_init_ioda_phb()
2685 size += phb->ioda.total_pe_num * sizeof(phb->ioda.m32_segmap[0]); in pnv_pci_init_ioda_phb()
2687 size += phb->ioda.total_pe_num * sizeof(struct pnv_ioda_pe); in pnv_pci_init_ioda_phb()
2692 phb->ioda.pe_alloc = aux; in pnv_pci_init_ioda_phb()
2693 phb->ioda.m64_segmap = aux + m64map_off; in pnv_pci_init_ioda_phb()
2694 phb->ioda.m32_segmap = aux + m32map_off; in pnv_pci_init_ioda_phb()
2695 for (segno = 0; segno < phb->ioda.total_pe_num; segno++) { in pnv_pci_init_ioda_phb()
2696 phb->ioda.m64_segmap[segno] = IODA_INVALID_PE; in pnv_pci_init_ioda_phb()
2697 phb->ioda.m32_segmap[segno] = IODA_INVALID_PE; in pnv_pci_init_ioda_phb()
2699 phb->ioda.pe_array = aux + pemap_off; in pnv_pci_init_ioda_phb()
2706 pnv_ioda_reserve_pe(phb, phb->ioda.reserved_pe_idx); in pnv_pci_init_ioda_phb()
2707 if (phb->ioda.reserved_pe_idx == 0) { in pnv_pci_init_ioda_phb()
2708 phb->ioda.root_pe_idx = 1; in pnv_pci_init_ioda_phb()
2709 pnv_ioda_reserve_pe(phb, phb->ioda.root_pe_idx); in pnv_pci_init_ioda_phb()
2710 } else if (phb->ioda.reserved_pe_idx == (phb->ioda.total_pe_num - 1)) { in pnv_pci_init_ioda_phb()
2711 phb->ioda.root_pe_idx = phb->ioda.reserved_pe_idx - 1; in pnv_pci_init_ioda_phb()
2712 pnv_ioda_reserve_pe(phb, phb->ioda.root_pe_idx); in pnv_pci_init_ioda_phb()
2716 phb->ioda.root_pe_idx = root_pe->pe_number; in pnv_pci_init_ioda_phb()
2719 INIT_LIST_HEAD(&phb->ioda.pe_list); in pnv_pci_init_ioda_phb()
2720 mutex_init(&phb->ioda.pe_list_mutex); in pnv_pci_init_ioda_phb()
2723 rc = opal_pci_set_phb_mem_window(opal->phb_id, in pnv_pci_init_ioda_phb()
2732 phb->ioda.total_pe_num, phb->ioda.reserved_pe_idx, in pnv_pci_init_ioda_phb()
2733 phb->ioda.m32_size, phb->ioda.m32_segsize); in pnv_pci_init_ioda_phb()
2734 if (phb->ioda.m64_size) in pnv_pci_init_ioda_phb()
2736 phb->ioda.m64_size, phb->ioda.m64_segsize); in pnv_pci_init_ioda_phb()
2737 if (phb->ioda.io_size) in pnv_pci_init_ioda_phb()
2739 phb->ioda.io_size, phb->ioda.io_segsize); in pnv_pci_init_ioda_phb()
2742 phb->hose->ops = &pnv_pci_ops; in pnv_pci_init_ioda_phb()
2743 phb->get_pe_state = pnv_ioda_get_pe_state; in pnv_pci_init_ioda_phb()
2744 phb->freeze_pe = pnv_ioda_freeze_pe; in pnv_pci_init_ioda_phb()
2745 phb->unfreeze_pe = pnv_ioda_unfreeze_pe; in pnv_pci_init_ioda_phb()
2754 * for the P2P bridge bars so that each PCI bus (excluding in pnv_pci_init_ioda_phb()
2759 switch (phb->type) { in pnv_pci_init_ioda_phb()
2761 hose->controller_ops = pnv_npu_ocapi_ioda_controller_ops; in pnv_pci_init_ioda_phb()
2764 hose->controller_ops = pnv_pci_ioda_controller_ops; in pnv_pci_init_ioda_phb()
2800 if (!phb->init_m64 || phb->init_m64(phb)) in pnv_pci_init_ioda_phb()
2801 hose->mem_resources[1].flags = 0; in pnv_pci_init_ioda_phb()
2819 struct pnv_phb *phb = pci_bus_to_pnvhb(dev->bus); in pnv_npu2_opencapi_cfg_size_fixup()
2824 if (phb->type == PNV_PHB_NPU_OCAPI) in pnv_npu2_opencapi_cfg_size_fixup()
2825 dev->cfg_size = PCI_CFG_SPACE_EXP_SIZE; in pnv_npu2_opencapi_cfg_size_fixup()