Lines Matching +full:dte +full:- +full:mode

1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
8 #define pr_fmt(fmt) "AMD-Vi: " fmt
14 #include <linux/pci-ats.h>
19 #include <linux/dma-map-ops.h>
20 #include <linux/dma-direct.h>
21 #include <linux/iommu-helper.h>
23 #include <linux/amd-iommu.h>
30 #include <linux/io-pgtable.h>
43 #include "../dma-iommu.h"
46 #define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28))
69 int amd_iommu_max_glx_val = -1;
90 return (pdom && (pdom->flags & PD_IOMMUV2_MASK)); in pdom_is_v2_pgtbl_mode()
100 return -ENODEV; in get_acpihid_device_id()
103 if (acpi_dev_hid_uid_match(adev, p->hid, in get_acpihid_device_id()
104 p->uid[0] ? p->uid : NULL)) { in get_acpihid_device_id()
107 return p->devid; in get_acpihid_device_id()
110 return -EINVAL; in get_acpihid_device_id()
128 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg; in get_dev_table()
131 dev_table = pci_seg->dev_table; in get_dev_table()
144 seg = pci_domain_nr(pdev->bus); in get_device_segment()
157 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg; in amd_iommu_set_rlookup_table()
159 pci_seg->rlookup_table[devid] = iommu; in amd_iommu_set_rlookup_table()
167 if (pci_seg->id == seg) in __rlookup_amd_iommu()
168 return pci_seg->rlookup_table[devid]; in __rlookup_amd_iommu()
191 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg; in alloc_dev_data()
197 spin_lock_init(&dev_data->lock); in alloc_dev_data()
198 dev_data->devid = devid; in alloc_dev_data()
199 ratelimit_default_init(&dev_data->rs); in alloc_dev_data()
201 llist_add(&dev_data->dev_data_list, &pci_seg->dev_data_list); in alloc_dev_data()
209 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg; in search_dev_data()
211 if (llist_empty(&pci_seg->dev_data_list)) in search_dev_data()
214 node = pci_seg->dev_data_list.first; in search_dev_data()
216 if (dev_data->devid == devid) in search_dev_data()
232 iommu = rlookup_amd_iommu(&pdev->dev); in clone_alias()
258 clone_alias(pdev, iommu->pci_seg->alias_table[pci_dev_id(pdev)], NULL); in clone_aliases()
266 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg; in setup_aliases()
277 ivrs_alias = pci_seg->alias_table[pci_dev_id(pdev)]; in setup_aliases()
279 PCI_BUS_NUM(ivrs_alias) == pdev->bus->number) in setup_aliases()
297 dev_data->defer_attach = true; in find_dev_data()
316 if ((devid == p->devid) && p->group) in acpihid_device_group()
317 entry->group = p->group; in acpihid_device_group()
320 if (!entry->group) in acpihid_device_group()
321 entry->group = generic_device_group(dev); in acpihid_device_group()
323 iommu_group_ref_get(entry->group); in acpihid_device_group()
325 return entry->group; in acpihid_device_group()
330 return (dev_data->flags & AMD_IOMMU_DEVICE_FLAG_PASID_SUP); in pdev_pasid_supported()
360 struct iommu_dev_data *dev_data = dev_iommu_priv_get(&pdev->dev); in pdev_enable_cap_ats()
361 int ret = -EINVAL; in pdev_enable_cap_ats()
363 if (dev_data->ats_enabled) in pdev_enable_cap_ats()
367 (dev_data->flags & AMD_IOMMU_DEVICE_FLAG_ATS_SUP)) { in pdev_enable_cap_ats()
370 dev_data->ats_enabled = 1; in pdev_enable_cap_ats()
371 dev_data->ats_qdep = pci_ats_queue_depth(pdev); in pdev_enable_cap_ats()
380 struct iommu_dev_data *dev_data = dev_iommu_priv_get(&pdev->dev); in pdev_disable_cap_ats()
382 if (dev_data->ats_enabled) { in pdev_disable_cap_ats()
384 dev_data->ats_enabled = 0; in pdev_disable_cap_ats()
390 struct iommu_dev_data *dev_data = dev_iommu_priv_get(&pdev->dev); in amd_iommu_pdev_enable_cap_pri()
391 int ret = -EINVAL; in amd_iommu_pdev_enable_cap_pri()
393 if (dev_data->pri_enabled) in amd_iommu_pdev_enable_cap_pri()
396 if (dev_data->flags & AMD_IOMMU_DEVICE_FLAG_PRI_SUP) { in amd_iommu_pdev_enable_cap_pri()
402 dev_data->pri_enabled = 1; in amd_iommu_pdev_enable_cap_pri()
403 dev_data->pri_tlp = pci_prg_resp_pasid_required(pdev); in amd_iommu_pdev_enable_cap_pri()
414 struct iommu_dev_data *dev_data = dev_iommu_priv_get(&pdev->dev); in amd_iommu_pdev_disable_cap_pri()
416 if (dev_data->pri_enabled) { in amd_iommu_pdev_disable_cap_pri()
418 dev_data->pri_enabled = 0; in amd_iommu_pdev_disable_cap_pri()
424 struct iommu_dev_data *dev_data = dev_iommu_priv_get(&pdev->dev); in pdev_enable_cap_pasid()
425 int ret = -EINVAL; in pdev_enable_cap_pasid()
427 if (dev_data->pasid_enabled) in pdev_enable_cap_pasid()
430 if (dev_data->flags & AMD_IOMMU_DEVICE_FLAG_PASID_SUP) { in pdev_enable_cap_pasid()
431 /* Only allow access to user-accessible pages */ in pdev_enable_cap_pasid()
434 dev_data->pasid_enabled = 1; in pdev_enable_cap_pasid()
442 struct iommu_dev_data *dev_data = dev_iommu_priv_get(&pdev->dev); in pdev_disable_cap_pasid()
444 if (dev_data->pasid_enabled) { in pdev_disable_cap_pasid()
446 dev_data->pasid_enabled = 0; in pdev_disable_cap_pasid()
488 pci_seg = iommu->pci_seg; in check_device()
489 if (devid > pci_seg->last_bdf) in check_device()
510 return -ENOMEM; in iommu_init_device()
512 dev_data->dev = dev; in iommu_init_device()
516 * By default we use passthrough mode for IOMMUv2 capable device. in iommu_init_device()
519 * it'll be forced to go into translation mode. in iommu_init_device()
523 dev_data->flags = pdev_get_caps(to_pci_dev(dev)); in iommu_init_device()
533 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg; in iommu_ignore_device()
542 pci_seg->rlookup_table[devid] = NULL; in iommu_ignore_device()
556 if (dev_data->domain) in amd_iommu_uninit_device()
561 * device is re-plugged - not doing so would introduce a ton of races. in amd_iommu_uninit_device()
577 pr_err("DTE[%d]: %016llx\n", i, dev_table[devid].data[i]); in dump_dte_entry()
586 pr_err("CMD[%d]: %08x\n", i, cmd->data[i]); in dump_command()
601 pdev = pci_get_domain_bus_and_slot(iommu->pci_seg->id, PCI_BUS_NUM(devid), in amd_iommu_report_rmp_hw_error()
604 dev_data = dev_iommu_priv_get(&pdev->dev); in amd_iommu_report_rmp_hw_error()
607 if (__ratelimit(&dev_data->rs)) { in amd_iommu_report_rmp_hw_error()
613 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), in amd_iommu_report_rmp_hw_error()
634 pdev = pci_get_domain_bus_and_slot(iommu->pci_seg->id, PCI_BUS_NUM(devid), in amd_iommu_report_rmp_fault()
637 dev_data = dev_iommu_priv_get(&pdev->dev); in amd_iommu_report_rmp_fault()
640 if (__ratelimit(&dev_data->rs)) { in amd_iommu_report_rmp_fault()
646 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), in amd_iommu_report_rmp_fault()
667 pdev = pci_get_domain_bus_and_slot(iommu->pci_seg->id, PCI_BUS_NUM(devid), in amd_iommu_report_page_fault()
670 dev_data = dev_iommu_priv_get(&pdev->dev); in amd_iommu_report_page_fault()
680 if (dev_data->domain == NULL) { in amd_iommu_report_page_fault()
683 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), in amd_iommu_report_page_fault()
688 if (!report_iommu_fault(&dev_data->domain->domain, in amd_iommu_report_page_fault()
689 &pdev->dev, address, in amd_iommu_report_page_fault()
696 if (__ratelimit(&dev_data->rs)) { in amd_iommu_report_page_fault()
702 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), in amd_iommu_report_page_fault()
713 struct device *dev = iommu->iommu.dev; in iommu_print_event()
746 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), in iommu_print_event()
753 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), in iommu_print_event()
758 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), in iommu_print_event()
771 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), in iommu_print_event()
776 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), in iommu_print_event()
789 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), in iommu_print_event()
811 head = readl(iommu->mmio_base + MMIO_EVT_HEAD_OFFSET); in iommu_poll_events()
812 tail = readl(iommu->mmio_base + MMIO_EVT_TAIL_OFFSET); in iommu_poll_events()
815 iommu_print_event(iommu, iommu->evt_buf + head); in iommu_poll_events()
819 writel(head, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET); in iommu_poll_events()
826 if (iommu->ppr_log == NULL) in iommu_poll_ppr_log()
829 head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); in iommu_poll_ppr_log()
830 tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); in iommu_poll_ppr_log()
837 raw = (u64 *)(iommu->ppr_log + head); in iommu_poll_ppr_log()
850 /* Avoid memcpy function-call overhead */ in iommu_poll_ppr_log()
863 /* Update head pointer of hardware ring-buffer */ in iommu_poll_ppr_log()
865 writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); in iommu_poll_ppr_log()
869 /* Refresh ring-buffer information */ in iommu_poll_ppr_log()
870 head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); in iommu_poll_ppr_log()
871 tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); in iommu_poll_ppr_log()
890 if (iommu->ga_log == NULL) in iommu_poll_ga_log()
893 head = readl(iommu->mmio_base + MMIO_GA_HEAD_OFFSET); in iommu_poll_ga_log()
894 tail = readl(iommu->mmio_base + MMIO_GA_TAIL_OFFSET); in iommu_poll_ga_log()
900 raw = (u64 *)(iommu->ga_log + head); in iommu_poll_ga_log()
902 /* Avoid memcpy function-call overhead */ in iommu_poll_ga_log()
905 /* Update head pointer of hardware ring-buffer */ in iommu_poll_ga_log()
907 writel(head, iommu->mmio_base + MMIO_GA_HEAD_OFFSET); in iommu_poll_ga_log()
935 dev_set_msi_domain(dev, iommu->ir_domain); in amd_iommu_set_pci_msi_domain()
949 u32 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); in amd_iommu_handle_irq()
954 writel(mask, iommu->mmio_base + MMIO_STATUS_OFFSET); in amd_iommu_handle_irq()
958 iommu->index, evt_type); in amd_iommu_handle_irq()
967 * When re-enabling interrupt (by writing 1 in amd_iommu_handle_irq()
976 * again and re-clear the bits in amd_iommu_handle_irq()
978 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); in amd_iommu_handle_irq()
1035 while (*iommu->cmd_sem != data && i < LOOP_TIMEOUT) { in wait_on_sem()
1041 pr_alert("Completion-Wait loop timed out\n"); in wait_on_sem()
1042 return -EIO; in wait_on_sem()
1055 tail = iommu->cmd_buf_tail; in copy_cmd_to_buffer()
1056 target = iommu->cmd_buf + tail; in copy_cmd_to_buffer()
1060 iommu->cmd_buf_tail = tail; in copy_cmd_to_buffer()
1063 writel(tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); in copy_cmd_to_buffer()
1070 u64 paddr = iommu_virt_to_phys((void *)iommu->cmd_sem); in build_completion_wait()
1073 cmd->data[0] = lower_32_bits(paddr) | CMD_COMPL_WAIT_STORE_MASK; in build_completion_wait()
1074 cmd->data[1] = upper_32_bits(paddr); in build_completion_wait()
1075 cmd->data[2] = lower_32_bits(data); in build_completion_wait()
1076 cmd->data[3] = upper_32_bits(data); in build_completion_wait()
1083 cmd->data[0] = devid; in build_inv_dte()
1100 end = address + size - 1; in build_inv_address()
1106 msb_diff = fls64(end ^ address) - 1; in build_inv_address()
1116 * The msb-bit must be clear on the address. Just set all the in build_inv_address()
1119 address |= (1ull << msb_diff) - 1; in build_inv_address()
1125 /* Set the size bit - we flush more than one 4kb page */ in build_inv_address()
1137 cmd->data[1] |= domid; in build_inv_iommu_pages()
1138 cmd->data[2] = lower_32_bits(inv_address); in build_inv_iommu_pages()
1139 cmd->data[3] = upper_32_bits(inv_address); in build_inv_iommu_pages()
1140 /* PDE bit - we want to flush everything, not only the PTEs */ in build_inv_iommu_pages()
1141 cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK; in build_inv_iommu_pages()
1143 cmd->data[0] |= pasid; in build_inv_iommu_pages()
1144 cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK; in build_inv_iommu_pages()
1157 cmd->data[0] = devid; in build_inv_iotlb_pages()
1158 cmd->data[0] |= (qdep & 0xff) << 24; in build_inv_iotlb_pages()
1159 cmd->data[1] = devid; in build_inv_iotlb_pages()
1160 cmd->data[2] = lower_32_bits(inv_address); in build_inv_iotlb_pages()
1161 cmd->data[3] = upper_32_bits(inv_address); in build_inv_iotlb_pages()
1163 cmd->data[0] |= ((pasid >> 8) & 0xff) << 16; in build_inv_iotlb_pages()
1164 cmd->data[1] |= (pasid & 0xff) << 16; in build_inv_iotlb_pages()
1165 cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK; in build_inv_iotlb_pages()
1176 cmd->data[0] = devid; in build_complete_ppr()
1178 cmd->data[1] = pasid; in build_complete_ppr()
1179 cmd->data[2] = CMD_INV_IOMMU_PAGES_GN_MASK; in build_complete_ppr()
1181 cmd->data[3] = tag & 0x1ff; in build_complete_ppr()
1182 cmd->data[3] |= (status & PPR_STATUS_MASK) << PPR_STATUS_SHIFT; in build_complete_ppr()
1196 cmd->data[0] = devid; in build_inv_irt()
1211 next_tail = (iommu->cmd_buf_tail + sizeof(*cmd)) % CMD_BUFFER_SIZE; in __iommu_queue_command_sync()
1213 left = (iommu->cmd_buf_head - next_tail) % CMD_BUFFER_SIZE; in __iommu_queue_command_sync()
1220 return -EIO; in __iommu_queue_command_sync()
1227 iommu->cmd_buf_head = readl(iommu->mmio_base + in __iommu_queue_command_sync()
1236 iommu->need_sync = sync; in __iommu_queue_command_sync()
1248 raw_spin_lock_irqsave(&iommu->lock, flags); in iommu_queue_command_sync()
1250 raw_spin_unlock_irqrestore(&iommu->lock, flags); in iommu_queue_command_sync()
1271 if (!iommu->need_sync) in iommu_completion_wait()
1274 data = atomic64_add_return(1, &iommu->cmd_sem_val); in iommu_completion_wait()
1277 raw_spin_lock_irqsave(&iommu->lock, flags); in iommu_completion_wait()
1286 raw_spin_unlock_irqrestore(&iommu->lock, flags); in iommu_completion_wait()
1303 u16 last_bdf = iommu->pci_seg->last_bdf; in amd_iommu_flush_dte_all()
1318 u16 last_bdf = iommu->pci_seg->last_bdf; in amd_iommu_flush_tlb_all()
1363 u16 last_bdf = iommu->pci_seg->last_bdf; in amd_iommu_flush_irt_all()
1365 if (iommu->irtcachedis_enabled) in amd_iommu_flush_irt_all()
1386 * Command send function for flushing on-device TLB
1395 qdep = dev_data->ats_qdep; in device_flush_iotlb()
1396 iommu = rlookup_amd_iommu(dev_data->dev); in device_flush_iotlb()
1398 return -EINVAL; in device_flush_iotlb()
1400 build_inv_iotlb_pages(&cmd, dev_data->devid, qdep, address, in device_flush_iotlb()
1424 iommu = rlookup_amd_iommu(dev_data->dev); in device_flush_dte()
1426 return -EINVAL; in device_flush_dte()
1428 if (dev_is_pci(dev_data->dev)) in device_flush_dte()
1429 pdev = to_pci_dev(dev_data->dev); in device_flush_dte()
1435 ret = iommu_flush_dte(iommu, dev_data->devid); in device_flush_dte()
1439 pci_seg = iommu->pci_seg; in device_flush_dte()
1440 alias = pci_seg->alias_table[dev_data->devid]; in device_flush_dte()
1441 if (alias != dev_data->devid) { in device_flush_dte()
1447 if (dev_data->ats_enabled) { in device_flush_dte()
1473 build_inv_iommu_pages(&cmd, address, size, domain->id, pasid, gn); in __domain_flush_pages()
1476 if (!domain->dev_iommu[i]) in __domain_flush_pages()
1486 list_for_each_entry(dev_data, &domain->dev_list, list) { in __domain_flush_pages()
1488 if (!dev_data->ats_enabled) in __domain_flush_pages()
1526 * size is always non-zero, but address might be zero, causing in amd_iommu_domain_flush_pages()
1529 * of the address on x86-32, cast to long when doing the check. in amd_iommu_domain_flush_pages()
1540 size -= flush_size; in amd_iommu_domain_flush_pages()
1547 /* Flush the whole IO/TLB for a given protection domain - including PDE */
1559 if (domain && !domain->dev_iommu[i]) in amd_iommu_domain_flush_complete()
1577 spin_lock_irqsave(&domain->lock, flags); in domain_flush_np_cache()
1579 spin_unlock_irqrestore(&domain->lock, flags); in domain_flush_np_cache()
1591 list_for_each_entry(dev_data, &domain->dev_list, list) in domain_flush_devices()
1661 if (domain->glx == 2) in free_gcr3_table()
1662 free_gcr3_tbl_level2(domain->gcr3_tbl); in free_gcr3_table()
1663 else if (domain->glx == 1) in free_gcr3_table()
1664 free_gcr3_tbl_level1(domain->gcr3_tbl); in free_gcr3_table()
1666 BUG_ON(domain->glx != 0); in free_gcr3_table()
1668 free_page((unsigned long)domain->gcr3_tbl); in free_gcr3_table()
1672 * Number of GCR3 table levels required. Level must be 4-Kbyte
1679 if (pasids == -1) in get_gcr3_levels()
1684 return levels ? (DIV_ROUND_UP(levels, 9) - 1) : levels; in get_gcr3_levels()
1687 /* Note: This function expects iommu_domain->lock to be held prior calling the function. */
1693 return -EINVAL; in setup_gcr3_table()
1695 domain->gcr3_tbl = alloc_pgtable_page(domain->nid, GFP_ATOMIC); in setup_gcr3_table()
1696 if (domain->gcr3_tbl == NULL) in setup_gcr3_table()
1697 return -ENOMEM; in setup_gcr3_table()
1699 domain->glx = levels; in setup_gcr3_table()
1700 domain->flags |= PD_IOMMUV2_MASK; in setup_gcr3_table()
1715 if (domain->iop.mode != PAGE_MODE_NONE) in set_dte_entry()
1716 pte_root = iommu_virt_to_phys(domain->iop.root); in set_dte_entry()
1718 pte_root |= (domain->iop.mode & DEV_ENTRY_MODE_MASK) in set_dte_entry()
1727 if (!amd_iommu_snp_en || (domain->id != 0)) in set_dte_entry()
1738 if (domain->dirty_tracking) in set_dte_entry()
1741 if (domain->flags & PD_IOMMUV2_MASK) { in set_dte_entry()
1742 u64 gcr3 = iommu_virt_to_phys(domain->gcr3_tbl); in set_dte_entry()
1743 u64 glx = domain->glx; in set_dte_entry()
1756 /* Encode GCR3 table into DTE */ in set_dte_entry()
1771 if (domain->flags & PD_GIOV_MASK) in set_dte_entry()
1776 flags |= domain->id; in set_dte_entry()
1784 * the previous kernel--if so, it needs to flush the translation cache in set_dte_entry()
1813 iommu = rlookup_amd_iommu(dev_data->dev); in do_attach()
1816 ats = dev_data->ats_enabled; in do_attach()
1819 dev_data->domain = domain; in do_attach()
1820 list_add(&dev_data->list, &domain->dev_list); in do_attach()
1823 if (domain->nid == NUMA_NO_NODE) in do_attach()
1824 domain->nid = dev_to_node(dev_data->dev); in do_attach()
1827 domain->dev_iommu[iommu->index] += 1; in do_attach()
1828 domain->dev_cnt += 1; in do_attach()
1831 set_dte_entry(iommu, dev_data->devid, domain, in do_attach()
1832 ats, dev_data->ppr); in do_attach()
1833 clone_aliases(iommu, dev_data->dev); in do_attach()
1840 struct protection_domain *domain = dev_data->domain; in do_detach()
1843 iommu = rlookup_amd_iommu(dev_data->dev); in do_detach()
1848 dev_data->domain = NULL; in do_detach()
1849 list_del(&dev_data->list); in do_detach()
1850 clear_dte_entry(iommu, dev_data->devid); in do_detach()
1851 clone_aliases(iommu, dev_data->dev); in do_detach()
1853 /* Flush the DTE entry */ in do_detach()
1859 /* decrease reference counters - needs to happen after the flushes */ in do_detach()
1860 domain->dev_iommu[iommu->index] -= 1; in do_detach()
1861 domain->dev_cnt -= 1; in do_detach()
1875 spin_lock_irqsave(&domain->lock, flags); in attach_device()
1879 spin_lock(&dev_data->lock); in attach_device()
1881 if (dev_data->domain != NULL) { in attach_device()
1882 ret = -EBUSY; in attach_device()
1892 spin_unlock(&dev_data->lock); in attach_device()
1894 spin_unlock_irqrestore(&domain->lock, flags); in attach_device()
1909 domain = dev_data->domain; in detach_device()
1911 spin_lock_irqsave(&domain->lock, flags); in detach_device()
1913 spin_lock(&dev_data->lock); in detach_device()
1921 if (WARN_ON(!dev_data->domain)) in detach_device()
1930 spin_unlock(&dev_data->lock); in detach_device()
1932 spin_unlock_irqrestore(&domain->lock, flags); in detach_device()
1942 return ERR_PTR(-ENODEV); in amd_iommu_probe_device()
1946 return ERR_PTR(-ENODEV); in amd_iommu_probe_device()
1949 if (!iommu->iommu.ops) in amd_iommu_probe_device()
1950 return ERR_PTR(-ENODEV); in amd_iommu_probe_device()
1953 return &iommu->iommu; in amd_iommu_probe_device()
1957 if (ret != -ENOTSUPP) in amd_iommu_probe_device()
1958 dev_err(dev, "Failed to initialize - trying to proceed anyway\n"); in amd_iommu_probe_device()
1963 iommu_dev = &iommu->iommu; in amd_iommu_probe_device()
1973 /* Domains are initialized for this device - have a look what we ended up with */ in amd_iommu_probe_finalize()
2011 list_for_each_entry(dev_data, &domain->dev_list, list) { in update_device_table()
2012 struct amd_iommu *iommu = rlookup_amd_iommu(dev_data->dev); in update_device_table()
2016 set_dte_entry(iommu, dev_data->devid, domain, in update_device_table()
2017 dev_data->ats_enabled, dev_data->ppr); in update_device_table()
2018 clone_aliases(iommu, dev_data->dev); in update_device_table()
2051 lockdep_assert_held(&domain->lock); in cleanup_domain()
2053 if (!domain->dev_cnt) in cleanup_domain()
2056 while (!list_empty(&domain->dev_list)) { in cleanup_domain()
2057 entry = list_first_entry(&domain->dev_list, in cleanup_domain()
2059 BUG_ON(!entry->domain); in cleanup_domain()
2062 WARN_ON(domain->dev_cnt != 0); in cleanup_domain()
2070 if (domain->iop.pgtbl_cfg.tlb) in protection_domain_free()
2071 free_io_pgtable_ops(&domain->iop.iop.ops); in protection_domain_free()
2073 if (domain->flags & PD_IOMMUV2_MASK) in protection_domain_free()
2076 if (domain->iop.root) in protection_domain_free()
2077 free_page((unsigned long)domain->iop.root); in protection_domain_free()
2079 if (domain->id) in protection_domain_free()
2080 domain_id_free(domain->id); in protection_domain_free()
2085 static int protection_domain_init_v1(struct protection_domain *domain, int mode) in protection_domain_init_v1() argument
2089 BUG_ON(mode < PAGE_MODE_NONE || mode > PAGE_MODE_6_LEVEL); in protection_domain_init_v1()
2091 if (mode != PAGE_MODE_NONE) { in protection_domain_init_v1()
2094 return -ENOMEM; in protection_domain_init_v1()
2097 amd_iommu_domain_set_pgtable(domain, pt_root, mode); in protection_domain_init_v1()
2104 domain->flags |= PD_GIOV_MASK; in protection_domain_init_v2()
2106 domain->domain.pgsize_bitmap = AMD_IOMMU_PGSIZES_V2; in protection_domain_init_v2()
2109 return -ENOMEM; in protection_domain_init_v2()
2125 domain->id = domain_id_alloc(); in protection_domain_alloc()
2126 if (!domain->id) in protection_domain_alloc()
2129 spin_lock_init(&domain->lock); in protection_domain_alloc()
2130 INIT_LIST_HEAD(&domain->dev_list); in protection_domain_alloc()
2131 domain->nid = NUMA_NO_NODE; in protection_domain_alloc()
2134 /* No need to allocate io pgtable ops in passthrough mode */ in protection_domain_alloc()
2142 * domain for pass-through devices. in protection_domain_alloc()
2159 ret = -EINVAL; in protection_domain_alloc()
2166 pgtbl_ops = alloc_io_pgtable_ops(pgtable, &domain->iop.pgtbl_cfg, domain); in protection_domain_alloc()
2182 return ((1ULL << PM_LEVEL_SHIFT(amd_iommu_gpt_level)) - 1); in dma_max_address()
2187 return iommu && (iommu->features & FEATURE_HDSUP); in amd_iommu_hd_support()
2200 return ERR_PTR(-ENODEV); in do_iommu_domain_alloc()
2204 * Since DTE[Mode]=0 is prohibited on SNP-enabled system, in do_iommu_domain_alloc()
2208 return ERR_PTR(-EINVAL); in do_iommu_domain_alloc()
2211 return ERR_PTR(-EOPNOTSUPP); in do_iommu_domain_alloc()
2215 return ERR_PTR(-ENOMEM); in do_iommu_domain_alloc()
2217 domain->domain.geometry.aperture_start = 0; in do_iommu_domain_alloc()
2218 domain->domain.geometry.aperture_end = dma_max_address(); in do_iommu_domain_alloc()
2219 domain->domain.geometry.force_aperture = true; in do_iommu_domain_alloc()
2222 domain->domain.type = type; in do_iommu_domain_alloc()
2223 domain->domain.pgsize_bitmap = iommu->iommu.ops->pgsize_bitmap; in do_iommu_domain_alloc()
2224 domain->domain.ops = iommu->iommu.ops->default_domain_ops; in do_iommu_domain_alloc()
2227 domain->domain.dirty_ops = &amd_dirty_ops; in do_iommu_domain_alloc()
2230 return &domain->domain; in do_iommu_domain_alloc()
2253 return ERR_PTR(-EOPNOTSUPP); in amd_iommu_domain_alloc_user()
2268 spin_lock_irqsave(&domain->lock, flags); in amd_iommu_domain_free()
2272 spin_unlock_irqrestore(&domain->lock, flags); in amd_iommu_domain_free()
2289 if (dev_data->domain == domain) in amd_iommu_attach_device()
2292 dev_data->defer_attach = false; in amd_iommu_attach_device()
2298 if (dom->dirty_ops && !amd_iommu_hd_support(iommu)) in amd_iommu_attach_device()
2299 return -EINVAL; in amd_iommu_attach_device()
2301 if (dev_data->domain) in amd_iommu_attach_device()
2308 if (dom->type == IOMMU_DOMAIN_UNMANAGED) in amd_iommu_attach_device()
2309 dev_data->use_vapic = 1; in amd_iommu_attach_device()
2311 dev_data->use_vapic = 0; in amd_iommu_attach_device()
2324 struct io_pgtable_ops *ops = &domain->iop.iop.ops; in amd_iommu_iotlb_sync_map()
2326 if (ops->map_pages) in amd_iommu_iotlb_sync_map()
2336 struct io_pgtable_ops *ops = &domain->iop.iop.ops; in amd_iommu_map_pages()
2338 int ret = -EINVAL; in amd_iommu_map_pages()
2341 (domain->iop.mode == PAGE_MODE_NONE)) in amd_iommu_map_pages()
2342 return -EINVAL; in amd_iommu_map_pages()
2349 if (ops->map_pages) { in amd_iommu_map_pages()
2350 ret = ops->map_pages(ops, iova, paddr, pgsize, in amd_iommu_map_pages()
2364 * to whether "non-present cache" is on, it is probably best to prefer in amd_iommu_iotlb_gather_add_page()
2368 * the guest, and the trade-off is different: unnecessary TLB flushes in amd_iommu_iotlb_gather_add_page()
2383 struct io_pgtable_ops *ops = &domain->iop.iop.ops; in amd_iommu_unmap_pages()
2387 (domain->iop.mode == PAGE_MODE_NONE)) in amd_iommu_unmap_pages()
2390 r = (ops->unmap_pages) ? ops->unmap_pages(ops, iova, pgsize, pgcount, NULL) : 0; in amd_iommu_unmap_pages()
2402 struct io_pgtable_ops *ops = &domain->iop.iop.ops; in amd_iommu_iova_to_phys()
2404 return ops->iova_to_phys(ops, iova); in amd_iommu_iova_to_phys()
2443 spin_lock_irqsave(&pdomain->lock, flags); in amd_iommu_set_dirty_tracking()
2444 if (!(pdomain->dirty_tracking ^ enable)) { in amd_iommu_set_dirty_tracking()
2445 spin_unlock_irqrestore(&pdomain->lock, flags); in amd_iommu_set_dirty_tracking()
2449 list_for_each_entry(dev_data, &pdomain->dev_list, list) { in amd_iommu_set_dirty_tracking()
2450 iommu = rlookup_amd_iommu(dev_data->dev); in amd_iommu_set_dirty_tracking()
2455 pte_root = dev_table[dev_data->devid].data[0]; in amd_iommu_set_dirty_tracking()
2460 /* Flush device DTE */ in amd_iommu_set_dirty_tracking()
2461 dev_table[dev_data->devid].data[0] = pte_root; in amd_iommu_set_dirty_tracking()
2470 pdomain->dirty_tracking = enable; in amd_iommu_set_dirty_tracking()
2471 spin_unlock_irqrestore(&pdomain->lock, flags); in amd_iommu_set_dirty_tracking()
2482 struct io_pgtable_ops *ops = &pdomain->iop.iop.ops; in amd_iommu_read_and_clear_dirty()
2485 if (!ops || !ops->read_and_clear_dirty) in amd_iommu_read_and_clear_dirty()
2486 return -EOPNOTSUPP; in amd_iommu_read_and_clear_dirty()
2488 spin_lock_irqsave(&pdomain->lock, lflags); in amd_iommu_read_and_clear_dirty()
2489 if (!pdomain->dirty_tracking && dirty->bitmap) { in amd_iommu_read_and_clear_dirty()
2490 spin_unlock_irqrestore(&pdomain->lock, lflags); in amd_iommu_read_and_clear_dirty()
2491 return -EINVAL; in amd_iommu_read_and_clear_dirty()
2493 spin_unlock_irqrestore(&pdomain->lock, lflags); in amd_iommu_read_and_clear_dirty()
2495 return ops->read_and_clear_dirty(ops, iova, size, flags, dirty); in amd_iommu_read_and_clear_dirty()
2515 pci_seg = iommu->pci_seg; in amd_iommu_get_resv_regions()
2517 list_for_each_entry(entry, &pci_seg->unity_map, list) { in amd_iommu_get_resv_regions()
2521 if (devid < entry->devid_start || devid > entry->devid_end) in amd_iommu_get_resv_regions()
2525 length = entry->address_end - entry->address_start; in amd_iommu_get_resv_regions()
2526 if (entry->prot & IOMMU_PROT_IR) in amd_iommu_get_resv_regions()
2528 if (entry->prot & IOMMU_PROT_IW) in amd_iommu_get_resv_regions()
2530 if (entry->prot & IOMMU_UNITY_MAP_FLAG_EXCL_RANGE) in amd_iommu_get_resv_regions()
2534 region = iommu_alloc_resv_region(entry->address_start, in amd_iommu_get_resv_regions()
2538 dev_err(dev, "Out of memory allocating dm-regions\n"); in amd_iommu_get_resv_regions()
2541 list_add_tail(&region->list, head); in amd_iommu_get_resv_regions()
2545 MSI_RANGE_END - MSI_RANGE_START + 1, in amd_iommu_get_resv_regions()
2549 list_add_tail(&region->list, head); in amd_iommu_get_resv_regions()
2552 HT_RANGE_END - HT_RANGE_START + 1, in amd_iommu_get_resv_regions()
2556 list_add_tail(&region->list, head); in amd_iommu_get_resv_regions()
2563 return dev_data->defer_attach; in amd_iommu_is_attach_deferred()
2571 spin_lock_irqsave(&dom->lock, flags); in amd_iommu_flush_iotlb_all()
2573 spin_unlock_irqrestore(&dom->lock, flags); in amd_iommu_flush_iotlb_all()
2582 spin_lock_irqsave(&dom->lock, flags); in amd_iommu_iotlb_sync()
2583 amd_iommu_domain_flush_pages(dom, gather->start, in amd_iommu_iotlb_sync()
2584 gather->end - gather->start + 1); in amd_iommu_iotlb_sync()
2585 spin_unlock_irqrestore(&dom->lock, flags); in amd_iommu_iotlb_sync()
2598 * - memory encryption is active, because some of those devices in amd_iommu_def_domain_type()
2599 * (AMD GPUs) don't have the encryption bit in their DMA-mask in amd_iommu_def_domain_type()
2601 * - SNP is enabled, because it prohibits DTE[Mode]=0. in amd_iommu_def_domain_type()
2655 if (!(domain->flags & PD_IOMMUV2_MASK)) in __flush_pasid()
2656 return -EINVAL; in __flush_pasid()
2658 build_inv_iommu_pages(&cmd, address, size, domain->id, pasid, true); in __flush_pasid()
2665 if (domain->dev_iommu[i] == 0) in __flush_pasid()
2677 list_for_each_entry(dev_data, &domain->dev_list, list) { in __flush_pasid()
2682 There might be non-IOMMUv2 capable devices in an IOMMUv2 in __flush_pasid()
2685 if (!dev_data->ats_enabled) in __flush_pasid()
2688 qdep = dev_data->ats_qdep; in __flush_pasid()
2689 iommu = rlookup_amd_iommu(dev_data->dev); in __flush_pasid()
2692 build_inv_iotlb_pages(&cmd, dev_data->devid, qdep, in __flush_pasid()
2723 spin_lock_irqsave(&domain->lock, flags); in amd_iommu_flush_page()
2725 spin_unlock_irqrestore(&domain->lock, flags); in amd_iommu_flush_page()
2741 spin_lock_irqsave(&domain->lock, flags); in amd_iommu_flush_tlb()
2743 spin_unlock_irqrestore(&domain->lock, flags); in amd_iommu_flush_tlb()
2774 level -= 1; in __get_gcr3_pte()
2785 if (domain->iop.mode != PAGE_MODE_NONE) in __set_gcr3()
2786 return -EINVAL; in __set_gcr3()
2788 pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, true); in __set_gcr3()
2790 return -ENOMEM; in __set_gcr3()
2801 if (domain->iop.mode != PAGE_MODE_NONE) in __clear_gcr3()
2802 return -EINVAL; in __clear_gcr3()
2804 pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, false); in __clear_gcr3()
2820 spin_lock_irqsave(&domain->lock, flags); in amd_iommu_domain_set_gcr3()
2822 spin_unlock_irqrestore(&domain->lock, flags); in amd_iommu_domain_set_gcr3()
2833 spin_lock_irqsave(&domain->lock, flags); in amd_iommu_domain_clear_gcr3()
2835 spin_unlock_irqrestore(&domain->lock, flags); in amd_iommu_domain_clear_gcr3()
2847 dev_data = dev_iommu_priv_get(&pdev->dev); in amd_iommu_complete_ppr()
2848 iommu = rlookup_amd_iommu(&pdev->dev); in amd_iommu_complete_ppr()
2850 return -ENODEV; in amd_iommu_complete_ppr()
2852 build_complete_ppr(&cmd, dev_data->devid, pasid, status, in amd_iommu_complete_ppr()
2853 tag, dev_data->pri_tlp); in amd_iommu_complete_ppr()
2876 if (iommu->irtcachedis_enabled) in iommu_flush_irt_and_complete()
2880 data = atomic64_add_return(1, &iommu->cmd_sem_val); in iommu_flush_irt_and_complete()
2883 raw_spin_lock_irqsave(&iommu->lock, flags); in iommu_flush_irt_and_complete()
2892 raw_spin_unlock_irqrestore(&iommu->lock, flags); in iommu_flush_irt_and_complete()
2898 u64 dte; in set_dte_irq_entry() local
2901 dte = dev_table[devid].data[2]; in set_dte_irq_entry()
2902 dte &= ~DTE_IRQ_PHYS_ADDR_MASK; in set_dte_irq_entry()
2903 dte |= iommu_virt_to_phys(table->table); in set_dte_irq_entry()
2904 dte |= DTE_IRQ_REMAP_INTCTL; in set_dte_irq_entry()
2905 dte |= DTE_INTTABLEN; in set_dte_irq_entry()
2906 dte |= DTE_IRQ_REMAP_ENABLE; in set_dte_irq_entry()
2908 dev_table[devid].data[2] = dte; in set_dte_irq_entry()
2914 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg; in get_irq_table()
2916 if (WARN_ONCE(!pci_seg->rlookup_table[devid], in get_irq_table()
2918 __func__, pci_seg->id, devid)) in get_irq_table()
2921 table = pci_seg->irq_lookup_table[devid]; in get_irq_table()
2923 __func__, pci_seg->id, devid)) in get_irq_table()
2937 table->table = kmem_cache_alloc(amd_iommu_irq_cache, GFP_KERNEL); in __alloc_irq_table()
2938 if (!table->table) { in __alloc_irq_table()
2942 raw_spin_lock_init(&table->lock); in __alloc_irq_table()
2945 memset(table->table, 0, in __alloc_irq_table()
2948 memset(table->table, 0, in __alloc_irq_table()
2956 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg; in set_remap_table_entry()
2958 pci_seg->irq_lookup_table[devid] = table; in set_remap_table_entry()
2968 struct amd_iommu *iommu = rlookup_amd_iommu(&pdev->dev); in set_remap_table_entry_alias()
2971 return -EINVAL; in set_remap_table_entry_alias()
2973 pci_seg = iommu->pci_seg; in set_remap_table_entry_alias()
2974 pci_seg->irq_lookup_table[alias] = table; in set_remap_table_entry_alias()
2976 iommu_flush_dte(pci_seg->rlookup_table[alias], alias); in set_remap_table_entry_alias()
2992 pci_seg = iommu->pci_seg; in alloc_irq_table()
2993 table = pci_seg->irq_lookup_table[devid]; in alloc_irq_table()
2997 alias = pci_seg->alias_table[devid]; in alloc_irq_table()
2998 table = pci_seg->irq_lookup_table[alias]; in alloc_irq_table()
3012 table = pci_seg->irq_lookup_table[devid]; in alloc_irq_table()
3016 table = pci_seg->irq_lookup_table[alias]; in alloc_irq_table()
3041 kmem_cache_free(amd_iommu_irq_cache, new_table->table); in alloc_irq_table()
3056 return -ENODEV; in alloc_irq_index()
3061 raw_spin_lock_irqsave(&table->lock, flags); in alloc_irq_index()
3064 for (index = ALIGN(table->min_index, alignment), c = 0; in alloc_irq_index()
3066 if (!iommu->irte_ops->is_allocated(table, index)) { in alloc_irq_index()
3075 for (; c != 0; --c) in alloc_irq_index()
3076 iommu->irte_ops->set_allocated(table, index - c + 1); in alloc_irq_index()
3078 index -= count - 1; in alloc_irq_index()
3085 index = -ENOSPC; in alloc_irq_index()
3088 raw_spin_unlock_irqrestore(&table->lock, flags); in alloc_irq_index()
3103 return -ENOMEM; in __modify_irte_ga()
3105 raw_spin_lock_irqsave(&table->lock, flags); in __modify_irte_ga()
3107 entry = (struct irte_ga *)table->table; in __modify_irte_ga()
3111 * We use cmpxchg16 to atomically update the 128-bit IRTE, in __modify_irte_ga()
3116 old = entry->irte; in __modify_irte_ga()
3117 WARN_ON(!try_cmpxchg128(&entry->irte, &old, irte->irte)); in __modify_irte_ga()
3119 raw_spin_unlock_irqrestore(&table->lock, flags); in __modify_irte_ga()
3146 return -ENOMEM; in modify_irte()
3148 raw_spin_lock_irqsave(&table->lock, flags); in modify_irte()
3149 table->table[index] = irte->val; in modify_irte()
3150 raw_spin_unlock_irqrestore(&table->lock, flags); in modify_irte()
3166 raw_spin_lock_irqsave(&table->lock, flags); in free_irte()
3167 iommu->irte_ops->clear_allocated(table, index); in free_irte()
3168 raw_spin_unlock_irqrestore(&table->lock, flags); in free_irte()
3179 irte->val = 0; in irte_prepare()
3180 irte->fields.vector = vector; in irte_prepare()
3181 irte->fields.int_type = delivery_mode; in irte_prepare()
3182 irte->fields.destination = dest_apicid; in irte_prepare()
3183 irte->fields.dm = dest_mode; in irte_prepare()
3184 irte->fields.valid = 1; in irte_prepare()
3193 irte->lo.val = 0; in irte_ga_prepare()
3194 irte->hi.val = 0; in irte_ga_prepare()
3195 irte->lo.fields_remap.int_type = delivery_mode; in irte_ga_prepare()
3196 irte->lo.fields_remap.dm = dest_mode; in irte_ga_prepare()
3197 irte->hi.fields.vector = vector; in irte_ga_prepare()
3198 irte->lo.fields_remap.destination = APICID_TO_IRTE_DEST_LO(dest_apicid); in irte_ga_prepare()
3199 irte->hi.fields.destination = APICID_TO_IRTE_DEST_HI(dest_apicid); in irte_ga_prepare()
3200 irte->lo.fields_remap.valid = 1; in irte_ga_prepare()
3207 irte->fields.valid = 1; in irte_activate()
3215 irte->lo.fields_remap.valid = 1; in irte_ga_activate()
3223 irte->fields.valid = 0; in irte_deactivate()
3231 irte->lo.fields_remap.valid = 0; in irte_ga_deactivate()
3240 irte->fields.vector = vector; in irte_set_affinity()
3241 irte->fields.destination = dest_apicid; in irte_set_affinity()
3250 if (!irte->lo.fields_remap.guest_mode) { in irte_ga_set_affinity()
3251 irte->hi.fields.vector = vector; in irte_ga_set_affinity()
3252 irte->lo.fields_remap.destination = in irte_ga_set_affinity()
3254 irte->hi.fields.destination = in irte_ga_set_affinity()
3263 table->table[index] = IRTE_ALLOCATED; in irte_set_allocated()
3268 struct irte_ga *ptr = (struct irte_ga *)table->table; in irte_ga_set_allocated()
3271 memset(&irte->lo.val, 0, sizeof(u64)); in irte_ga_set_allocated()
3272 memset(&irte->hi.val, 0, sizeof(u64)); in irte_ga_set_allocated()
3273 irte->hi.fields.vector = 0xff; in irte_ga_set_allocated()
3278 union irte *ptr = (union irte *)table->table; in irte_is_allocated()
3281 return irte->val != 0; in irte_is_allocated()
3286 struct irte_ga *ptr = (struct irte_ga *)table->table; in irte_ga_is_allocated()
3289 return irte->hi.fields.vector != 0; in irte_ga_is_allocated()
3294 table->table[index] = 0; in irte_clear_allocated()
3299 struct irte_ga *ptr = (struct irte_ga *)table->table; in irte_ga_clear_allocated()
3302 memset(&irte->lo.val, 0, sizeof(u64)); in irte_ga_clear_allocated()
3303 memset(&irte->hi.val, 0, sizeof(u64)); in irte_ga_clear_allocated()
3308 switch (info->type) { in get_devid()
3310 return get_ioapic_devid(info->devid); in get_devid()
3312 return get_hpet_devid(info->devid); in get_devid()
3315 return get_device_sbdf_id(msi_desc_to_dev(info->desc)); in get_devid()
3318 return -1; in get_devid()
3332 msg->data = index; in fill_msi_msg()
3333 msg->address_lo = 0; in fill_msi_msg()
3334 msg->arch_addr_lo.base_address = X86_MSI_BASE_ADDRESS_LOW; in fill_msi_msg()
3335 msg->address_hi = X86_MSI_BASE_ADDRESS_HIGH; in fill_msi_msg()
3343 struct irq_2_irte *irte_info = &data->irq_2_irte; in irq_remapping_prepare_irte()
3344 struct amd_iommu *iommu = data->iommu; in irq_remapping_prepare_irte()
3349 data->irq_2_irte.devid = devid; in irq_remapping_prepare_irte()
3350 data->irq_2_irte.index = index + sub_handle; in irq_remapping_prepare_irte()
3351 iommu->irte_ops->prepare(data->entry, APIC_DELIVERY_MODE_FIXED, in irq_remapping_prepare_irte()
3352 apic->dest_mode_logical, irq_cfg->vector, in irq_remapping_prepare_irte()
3353 irq_cfg->dest_apicid, devid); in irq_remapping_prepare_irte()
3355 switch (info->type) { in irq_remapping_prepare_irte()
3360 fill_msi_msg(&data->msi_entry, irte_info->index); in irq_remapping_prepare_irte()
3401 return -EINVAL; in irq_remapping_alloc()
3402 if (nr_irqs > 1 && info->type != X86_IRQ_ALLOC_TYPE_PCI_MSI) in irq_remapping_alloc()
3403 return -EINVAL; in irq_remapping_alloc()
3407 return -EINVAL; in irq_remapping_alloc()
3413 return -EINVAL; in irq_remapping_alloc()
3419 if (info->type == X86_IRQ_ALLOC_TYPE_IOAPIC) { in irq_remapping_alloc()
3424 if (!table->min_index) { in irq_remapping_alloc()
3429 table->min_index = 32; in irq_remapping_alloc()
3431 iommu->irte_ops->set_allocated(table, i); in irq_remapping_alloc()
3433 WARN_ON(table->min_index != 32); in irq_remapping_alloc()
3434 index = info->ioapic.pin; in irq_remapping_alloc()
3436 index = -ENOMEM; in irq_remapping_alloc()
3438 } else if (info->type == X86_IRQ_ALLOC_TYPE_PCI_MSI || in irq_remapping_alloc()
3439 info->type == X86_IRQ_ALLOC_TYPE_PCI_MSIX) { in irq_remapping_alloc()
3440 bool align = (info->type == X86_IRQ_ALLOC_TYPE_PCI_MSI); in irq_remapping_alloc()
3443 msi_desc_to_pci_dev(info->desc)); in irq_remapping_alloc()
3458 ret = -EINVAL; in irq_remapping_alloc()
3462 ret = -ENOMEM; in irq_remapping_alloc()
3468 data->entry = kzalloc(sizeof(union irte), GFP_KERNEL); in irq_remapping_alloc()
3470 data->entry = kzalloc(sizeof(struct irte_ga), in irq_remapping_alloc()
3472 if (!data->entry) { in irq_remapping_alloc()
3477 data->iommu = iommu; in irq_remapping_alloc()
3478 irq_data->hwirq = (devid << 16) + i; in irq_remapping_alloc()
3479 irq_data->chip_data = data; in irq_remapping_alloc()
3480 irq_data->chip = &amd_ir_chip; in irq_remapping_alloc()
3488 for (i--; i >= 0; i--) { in irq_remapping_alloc()
3491 kfree(irq_data->chip_data); in irq_remapping_alloc()
3510 if (irq_data && irq_data->chip_data) { in irq_remapping_free()
3511 data = irq_data->chip_data; in irq_remapping_free()
3512 irte_info = &data->irq_2_irte; in irq_remapping_free()
3513 free_irte(data->iommu, irte_info->devid, irte_info->index); in irq_remapping_free()
3514 kfree(data->entry); in irq_remapping_free()
3529 struct amd_ir_data *data = irq_data->chip_data; in irq_remapping_activate()
3530 struct irq_2_irte *irte_info = &data->irq_2_irte; in irq_remapping_activate()
3531 struct amd_iommu *iommu = data->iommu; in irq_remapping_activate()
3537 iommu->irte_ops->activate(iommu, data->entry, irte_info->devid, in irq_remapping_activate()
3538 irte_info->index); in irq_remapping_activate()
3546 struct amd_ir_data *data = irq_data->chip_data; in irq_remapping_deactivate()
3547 struct irq_2_irte *irte_info = &data->irq_2_irte; in irq_remapping_deactivate()
3548 struct amd_iommu *iommu = data->iommu; in irq_remapping_deactivate()
3551 iommu->irte_ops->deactivate(iommu, data->entry, irte_info->devid, in irq_remapping_deactivate()
3552 irte_info->index); in irq_remapping_deactivate()
3559 int devid = -1; in irq_remapping_select()
3565 devid = get_ioapic_devid(fwspec->param[0]); in irq_remapping_select()
3567 devid = get_hpet_devid(fwspec->param[0]); in irq_remapping_select()
3573 return iommu && iommu->ir_domain == d; in irq_remapping_select()
3587 struct irte_ga *entry = (struct irte_ga *) ir_data->entry; in amd_iommu_activate_guest_mode()
3593 valid = entry->lo.fields_vapic.valid; in amd_iommu_activate_guest_mode()
3595 entry->lo.val = 0; in amd_iommu_activate_guest_mode()
3596 entry->hi.val = 0; in amd_iommu_activate_guest_mode()
3598 entry->lo.fields_vapic.valid = valid; in amd_iommu_activate_guest_mode()
3599 entry->lo.fields_vapic.guest_mode = 1; in amd_iommu_activate_guest_mode()
3600 entry->lo.fields_vapic.ga_log_intr = 1; in amd_iommu_activate_guest_mode()
3601 entry->hi.fields.ga_root_ptr = ir_data->ga_root_ptr; in amd_iommu_activate_guest_mode()
3602 entry->hi.fields.vector = ir_data->ga_vector; in amd_iommu_activate_guest_mode()
3603 entry->lo.fields_vapic.ga_tag = ir_data->ga_tag; in amd_iommu_activate_guest_mode()
3605 return modify_irte_ga(ir_data->iommu, ir_data->irq_2_irte.devid, in amd_iommu_activate_guest_mode()
3606 ir_data->irq_2_irte.index, entry); in amd_iommu_activate_guest_mode()
3613 struct irte_ga *entry = (struct irte_ga *) ir_data->entry; in amd_iommu_deactivate_guest_mode()
3614 struct irq_cfg *cfg = ir_data->cfg; in amd_iommu_deactivate_guest_mode()
3618 !entry || !entry->lo.fields_vapic.guest_mode) in amd_iommu_deactivate_guest_mode()
3621 valid = entry->lo.fields_remap.valid; in amd_iommu_deactivate_guest_mode()
3623 entry->lo.val = 0; in amd_iommu_deactivate_guest_mode()
3624 entry->hi.val = 0; in amd_iommu_deactivate_guest_mode()
3626 entry->lo.fields_remap.valid = valid; in amd_iommu_deactivate_guest_mode()
3627 entry->lo.fields_remap.dm = apic->dest_mode_logical; in amd_iommu_deactivate_guest_mode()
3628 entry->lo.fields_remap.int_type = APIC_DELIVERY_MODE_FIXED; in amd_iommu_deactivate_guest_mode()
3629 entry->hi.fields.vector = cfg->vector; in amd_iommu_deactivate_guest_mode()
3630 entry->lo.fields_remap.destination = in amd_iommu_deactivate_guest_mode()
3631 APICID_TO_IRTE_DEST_LO(cfg->dest_apicid); in amd_iommu_deactivate_guest_mode()
3632 entry->hi.fields.destination = in amd_iommu_deactivate_guest_mode()
3633 APICID_TO_IRTE_DEST_HI(cfg->dest_apicid); in amd_iommu_deactivate_guest_mode()
3635 return modify_irte_ga(ir_data->iommu, ir_data->irq_2_irte.devid, in amd_iommu_deactivate_guest_mode()
3636 ir_data->irq_2_irte.index, entry); in amd_iommu_deactivate_guest_mode()
3644 struct vcpu_data *vcpu_pi_info = pi_data->vcpu_data; in amd_ir_set_vcpu_affinity()
3645 struct amd_ir_data *ir_data = data->chip_data; in amd_ir_set_vcpu_affinity()
3646 struct irq_2_irte *irte_info = &ir_data->irq_2_irte; in amd_ir_set_vcpu_affinity()
3649 if (ir_data->iommu == NULL) in amd_ir_set_vcpu_affinity()
3650 return -EINVAL; in amd_ir_set_vcpu_affinity()
3652 dev_data = search_dev_data(ir_data->iommu, irte_info->devid); in amd_ir_set_vcpu_affinity()
3655 * This device has never been set up for guest mode. in amd_ir_set_vcpu_affinity()
3658 if (!dev_data || !dev_data->use_vapic) in amd_ir_set_vcpu_affinity()
3661 ir_data->cfg = irqd_cfg(data); in amd_ir_set_vcpu_affinity()
3662 pi_data->ir_data = ir_data; in amd_ir_set_vcpu_affinity()
3665 * SVM tries to set up for VAPIC mode, but we are in in amd_ir_set_vcpu_affinity()
3666 * legacy mode. So, we force legacy mode instead. in amd_ir_set_vcpu_affinity()
3671 pi_data->is_guest_mode = false; in amd_ir_set_vcpu_affinity()
3674 pi_data->prev_ga_tag = ir_data->cached_ga_tag; in amd_ir_set_vcpu_affinity()
3675 if (pi_data->is_guest_mode) { in amd_ir_set_vcpu_affinity()
3676 ir_data->ga_root_ptr = (pi_data->base >> 12); in amd_ir_set_vcpu_affinity()
3677 ir_data->ga_vector = vcpu_pi_info->vector; in amd_ir_set_vcpu_affinity()
3678 ir_data->ga_tag = pi_data->ga_tag; in amd_ir_set_vcpu_affinity()
3681 ir_data->cached_ga_tag = pi_data->ga_tag; in amd_ir_set_vcpu_affinity()
3690 ir_data->cached_ga_tag = 0; in amd_ir_set_vcpu_affinity()
3707 iommu->irte_ops->set_affinity(iommu, ir_data->entry, irte_info->devid, in amd_ir_update_irte()
3708 irte_info->index, cfg->vector, in amd_ir_update_irte()
3709 cfg->dest_apicid); in amd_ir_update_irte()
3715 struct amd_ir_data *ir_data = data->chip_data; in amd_ir_set_affinity()
3716 struct irq_2_irte *irte_info = &ir_data->irq_2_irte; in amd_ir_set_affinity()
3718 struct irq_data *parent = data->parent_data; in amd_ir_set_affinity()
3719 struct amd_iommu *iommu = ir_data->iommu; in amd_ir_set_affinity()
3723 return -ENODEV; in amd_ir_set_affinity()
3725 ret = parent->chip->irq_set_affinity(parent, mask, force); in amd_ir_set_affinity()
3742 struct amd_ir_data *ir_data = irq_data->chip_data; in ir_compose_msi_msg()
3744 *msg = ir_data->msi_entry; in ir_compose_msi_msg()
3748 .name = "AMD-IR",
3759 .prefix = "IR-",
3766 .prefix = "vIR-",
3774 fn = irq_domain_alloc_named_id_fwnode("AMD-IR", iommu->index); in amd_iommu_create_irq_domain()
3776 return -ENOMEM; in amd_iommu_create_irq_domain()
3777 iommu->ir_domain = irq_domain_create_hierarchy(arch_get_ir_parent_domain(), 0, 0, in amd_iommu_create_irq_domain()
3779 if (!iommu->ir_domain) { in amd_iommu_create_irq_domain()
3781 return -ENOMEM; in amd_iommu_create_irq_domain()
3784 irq_domain_update_bus_token(iommu->ir_domain, DOMAIN_BUS_AMDVI); in amd_iommu_create_irq_domain()
3785 iommu->ir_domain->flags |= IRQ_DOMAIN_FLAG_MSI_PARENT | in amd_iommu_create_irq_domain()
3789 iommu->ir_domain->msi_parent_ops = &virt_amdvi_msi_parent_ops; in amd_iommu_create_irq_domain()
3791 iommu->ir_domain->msi_parent_ops = &amdvi_msi_parent_ops; in amd_iommu_create_irq_domain()
3799 struct irte_ga *entry = (struct irte_ga *) ir_data->entry; in amd_iommu_update_ga()
3802 !entry || !entry->lo.fields_vapic.guest_mode) in amd_iommu_update_ga()
3805 if (!ir_data->iommu) in amd_iommu_update_ga()
3806 return -ENODEV; in amd_iommu_update_ga()
3809 entry->lo.fields_vapic.destination = in amd_iommu_update_ga()
3811 entry->hi.fields.destination = in amd_iommu_update_ga()
3814 entry->lo.fields_vapic.is_run = is_run; in amd_iommu_update_ga()
3816 return __modify_irte_ga(ir_data->iommu, ir_data->irq_2_irte.devid, in amd_iommu_update_ga()
3817 ir_data->irq_2_irte.index, entry); in amd_iommu_update_ga()