Lines Matching full:iommu
22 #include <linux/iommu-helper.h>
24 #include <linux/amd-iommu.h>
38 #include <asm/iommu.h>
44 #include "../dma-iommu.h"
46 #include "../iommu-pages.h"
66 * general struct to manage commands send to an IOMMU
73 * AMD IOMMU allows up to 2^16 different protection domains. This is a bitmap
81 static void set_dte_entry(struct amd_iommu *iommu,
84 static void iommu_flush_dte_sync(struct amd_iommu *iommu, u16 devid);
86 static struct iommu_dev_data *find_dev_data(struct amd_iommu *iommu, u16 devid);
130 * IOMMU reads the entire Device Table entry in a single 256-bit transaction
139 static void update_dte256(struct amd_iommu *iommu, struct iommu_dev_data *dev_data, in update_dte256() argument
143 struct dev_table_entry *dev_table = get_dev_table(iommu); in update_dte256()
152 iommu_flush_dte_sync(iommu, dev_data->devid); in update_dte256()
157 iommu_flush_dte_sync(iommu, dev_data->devid); in update_dte256()
165 iommu_flush_dte_sync(iommu, dev_data->devid); in update_dte256()
174 iommu_flush_dte_sync(iommu, dev_data->devid); in update_dte256()
187 iommu_flush_dte_sync(iommu, dev_data->devid); in update_dte256()
192 iommu_flush_dte_sync(iommu, dev_data->devid); in update_dte256()
205 static void get_dte256(struct amd_iommu *iommu, struct iommu_dev_data *dev_data, in get_dte256() argument
210 struct dev_table_entry *dev_table = get_dev_table(iommu); in get_dte256()
272 struct dev_table_entry *get_dev_table(struct amd_iommu *iommu) in get_dev_table() argument
275 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg; in get_dev_table()
301 /* Writes the specific IOMMU for a device into the PCI segment rlookup table */
302 void amd_iommu_set_rlookup_table(struct amd_iommu *iommu, u16 devid) in amd_iommu_set_rlookup_table() argument
304 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg; in amd_iommu_set_rlookup_table()
306 pci_seg->rlookup_table[devid] = iommu; in amd_iommu_set_rlookup_table()
330 static struct iommu_dev_data *alloc_dev_data(struct amd_iommu *iommu, u16 devid) in alloc_dev_data() argument
333 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg; in alloc_dev_data()
348 struct iommu_dev_data *search_dev_data(struct amd_iommu *iommu, u16 devid) in search_dev_data() argument
352 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg; in search_dev_data()
369 struct amd_iommu *iommu; in clone_alias() local
377 iommu = rlookup_amd_iommu(&pdev->dev); in clone_alias()
378 if (!iommu) in clone_alias()
388 get_dte256(iommu, dev_data, &new); in clone_alias()
391 alias_data = find_dev_data(iommu, alias); in clone_alias()
397 update_dte256(iommu, alias_data, &new); in clone_alias()
399 amd_iommu_set_rlookup_table(iommu, alias); in clone_alias()
404 static void clone_aliases(struct amd_iommu *iommu, struct device *dev) in clone_aliases() argument
417 clone_alias(pdev, iommu->pci_seg->alias_table[pci_dev_id(pdev)], NULL); in clone_aliases()
422 static void setup_aliases(struct amd_iommu *iommu, struct device *dev) in setup_aliases() argument
425 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg; in setup_aliases()
441 clone_aliases(iommu, dev); in setup_aliases()
444 static struct iommu_dev_data *find_dev_data(struct amd_iommu *iommu, u16 devid) in find_dev_data() argument
448 dev_data = search_dev_data(iommu, devid); in find_dev_data()
451 dev_data = alloc_dev_data(iommu, devid); in find_dev_data()
455 if (translation_pre_enabled(iommu)) in find_dev_data()
463 * Find or create an IOMMU group for a acpihid device.
633 struct amd_iommu *iommu; in check_device() local
644 iommu = rlookup_amd_iommu(dev); in check_device()
645 if (!iommu) in check_device()
649 pci_seg = iommu->pci_seg; in check_device()
656 static int iommu_init_device(struct amd_iommu *iommu, struct device *dev) in iommu_init_device() argument
669 dev_data = find_dev_data(iommu, devid); in iommu_init_device()
680 setup_aliases(iommu, dev); in iommu_init_device()
696 static void iommu_ignore_device(struct amd_iommu *iommu, struct device *dev) in iommu_ignore_device() argument
698 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg; in iommu_ignore_device()
699 struct dev_table_entry *dev_table = get_dev_table(iommu); in iommu_ignore_device()
710 setup_aliases(iommu, dev); in iommu_ignore_device()
720 static void dump_dte_entry(struct amd_iommu *iommu, u16 devid) in dump_dte_entry() argument
724 struct iommu_dev_data *dev_data = find_dev_data(iommu, devid); in dump_dte_entry()
726 get_dte256(iommu, dev_data, &dte); in dump_dte_entry()
741 static void amd_iommu_report_rmp_hw_error(struct amd_iommu *iommu, volatile u32 *event) in amd_iommu_report_rmp_hw_error() argument
753 pdev = pci_get_domain_bus_and_slot(iommu->pci_seg->id, PCI_BUS_NUM(devid), in amd_iommu_report_rmp_hw_error()
765 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), in amd_iommu_report_rmp_hw_error()
773 static void amd_iommu_report_rmp_fault(struct amd_iommu *iommu, volatile u32 *event) in amd_iommu_report_rmp_fault() argument
786 pdev = pci_get_domain_bus_and_slot(iommu->pci_seg->id, PCI_BUS_NUM(devid), in amd_iommu_report_rmp_fault()
798 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), in amd_iommu_report_rmp_fault()
812 static void amd_iommu_report_page_fault(struct amd_iommu *iommu, in amd_iommu_report_page_fault() argument
819 pdev = pci_get_domain_bus_and_slot(iommu->pci_seg->id, PCI_BUS_NUM(devid), in amd_iommu_report_page_fault()
835 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), in amd_iommu_report_page_fault()
854 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), in amd_iommu_report_page_fault()
863 static void iommu_print_event(struct amd_iommu *iommu, void *__evt) in iommu_print_event() argument
865 struct device *dev = iommu->iommu.dev; in iommu_print_event()
879 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET); in iommu_print_event()
892 amd_iommu_report_page_fault(iommu, devid, pasid, address, flags); in iommu_print_event()
899 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), in iommu_print_event()
902 dump_dte_entry(iommu, devid); in iommu_print_event()
907 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), in iommu_print_event()
912 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), in iommu_print_event()
925 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), in iommu_print_event()
930 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), in iommu_print_event()
934 amd_iommu_report_rmp_fault(iommu, event); in iommu_print_event()
937 amd_iommu_report_rmp_hw_error(iommu, event); in iommu_print_event()
943 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), in iommu_print_event()
961 static void iommu_poll_events(struct amd_iommu *iommu) in iommu_poll_events() argument
965 head = readl(iommu->mmio_base + MMIO_EVT_HEAD_OFFSET); in iommu_poll_events()
966 tail = readl(iommu->mmio_base + MMIO_EVT_TAIL_OFFSET); in iommu_poll_events()
969 iommu_print_event(iommu, iommu->evt_buf + head); in iommu_poll_events()
973 writel(head, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET); in iommu_poll_events()
989 static void iommu_poll_ga_log(struct amd_iommu *iommu) in iommu_poll_ga_log() argument
993 if (iommu->ga_log == NULL) in iommu_poll_ga_log()
996 head = readl(iommu->mmio_base + MMIO_GA_HEAD_OFFSET); in iommu_poll_ga_log()
997 tail = readl(iommu->mmio_base + MMIO_GA_TAIL_OFFSET); in iommu_poll_ga_log()
1003 raw = (u64 *)(iommu->ga_log + head); in iommu_poll_ga_log()
1010 writel(head, iommu->mmio_base + MMIO_GA_HEAD_OFFSET); in iommu_poll_ga_log()
1032 amd_iommu_set_pci_msi_domain(struct device *dev, struct amd_iommu *iommu) in amd_iommu_set_pci_msi_domain() argument
1038 dev_set_msi_domain(dev, iommu->ir_domain); in amd_iommu_set_pci_msi_domain()
1043 amd_iommu_set_pci_msi_domain(struct device *dev, struct amd_iommu *iommu) { } in amd_iommu_set_pci_msi_domain() argument
1051 struct amd_iommu *iommu = (struct amd_iommu *) data; in amd_iommu_handle_irq() local
1052 u32 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); in amd_iommu_handle_irq()
1057 writel(mask, iommu->mmio_base + MMIO_STATUS_OFFSET); in amd_iommu_handle_irq()
1060 pr_devel("Processing IOMMU (ivhd%d) %s Log\n", in amd_iommu_handle_irq()
1061 iommu->index, evt_type); in amd_iommu_handle_irq()
1062 int_handler(iommu); in amd_iommu_handle_irq()
1066 overflow_handler(iommu); in amd_iommu_handle_irq()
1076 * Workaround: The IOMMU driver should read back the in amd_iommu_handle_irq()
1081 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); in amd_iommu_handle_irq()
1130 * IOMMU command queuing functions
1134 static int wait_on_sem(struct amd_iommu *iommu, u64 data) in wait_on_sem() argument
1138 while (*iommu->cmd_sem != data && i < LOOP_TIMEOUT) { in wait_on_sem()
1151 static void copy_cmd_to_buffer(struct amd_iommu *iommu, in copy_cmd_to_buffer() argument
1158 tail = iommu->cmd_buf_tail; in copy_cmd_to_buffer()
1159 target = iommu->cmd_buf + tail; in copy_cmd_to_buffer()
1163 iommu->cmd_buf_tail = tail; in copy_cmd_to_buffer()
1165 /* Tell the IOMMU about it */ in copy_cmd_to_buffer()
1166 writel(tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); in copy_cmd_to_buffer()
1170 struct amd_iommu *iommu, in build_completion_wait() argument
1173 u64 paddr = iommu_virt_to_phys((void *)iommu->cmd_sem); in build_completion_wait()
1307 static int __iommu_queue_command_sync(struct amd_iommu *iommu, in __iommu_queue_command_sync() argument
1314 next_tail = (iommu->cmd_buf_tail + sizeof(*cmd)) % CMD_BUFFER_SIZE; in __iommu_queue_command_sync()
1316 left = (iommu->cmd_buf_head - next_tail) % CMD_BUFFER_SIZE; in __iommu_queue_command_sync()
1330 iommu->cmd_buf_head = readl(iommu->mmio_base + in __iommu_queue_command_sync()
1336 copy_cmd_to_buffer(iommu, cmd); in __iommu_queue_command_sync()
1339 iommu->need_sync = sync; in __iommu_queue_command_sync()
1344 static int iommu_queue_command_sync(struct amd_iommu *iommu, in iommu_queue_command_sync() argument
1351 raw_spin_lock_irqsave(&iommu->lock, flags); in iommu_queue_command_sync()
1352 ret = __iommu_queue_command_sync(iommu, cmd, sync); in iommu_queue_command_sync()
1353 raw_spin_unlock_irqrestore(&iommu->lock, flags); in iommu_queue_command_sync()
1358 static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd) in iommu_queue_command() argument
1360 return iommu_queue_command_sync(iommu, cmd, true); in iommu_queue_command()
1365 * buffer of an IOMMU
1367 static int iommu_completion_wait(struct amd_iommu *iommu) in iommu_completion_wait() argument
1374 if (!iommu->need_sync) in iommu_completion_wait()
1377 data = atomic64_inc_return(&iommu->cmd_sem_val); in iommu_completion_wait()
1378 build_completion_wait(&cmd, iommu, data); in iommu_completion_wait()
1380 raw_spin_lock_irqsave(&iommu->lock, flags); in iommu_completion_wait()
1382 ret = __iommu_queue_command_sync(iommu, &cmd, false); in iommu_completion_wait()
1386 ret = wait_on_sem(iommu, data); in iommu_completion_wait()
1389 raw_spin_unlock_irqrestore(&iommu->lock, flags); in iommu_completion_wait()
1402 * Devices of this domain are behind this IOMMU in domain_flush_complete()
1406 iommu_completion_wait(pdom_iommu_info->iommu); in domain_flush_complete()
1409 static int iommu_flush_dte(struct amd_iommu *iommu, u16 devid) in iommu_flush_dte() argument
1415 return iommu_queue_command(iommu, &cmd); in iommu_flush_dte()
1418 static void iommu_flush_dte_sync(struct amd_iommu *iommu, u16 devid) in iommu_flush_dte_sync() argument
1422 ret = iommu_flush_dte(iommu, devid); in iommu_flush_dte_sync()
1424 iommu_completion_wait(iommu); in iommu_flush_dte_sync()
1427 static void amd_iommu_flush_dte_all(struct amd_iommu *iommu) in amd_iommu_flush_dte_all() argument
1430 u16 last_bdf = iommu->pci_seg->last_bdf; in amd_iommu_flush_dte_all()
1433 iommu_flush_dte(iommu, devid); in amd_iommu_flush_dte_all()
1435 iommu_completion_wait(iommu); in amd_iommu_flush_dte_all()
1442 static void amd_iommu_flush_tlb_all(struct amd_iommu *iommu) in amd_iommu_flush_tlb_all() argument
1445 u16 last_bdf = iommu->pci_seg->last_bdf; in amd_iommu_flush_tlb_all()
1451 iommu_queue_command(iommu, &cmd); in amd_iommu_flush_tlb_all()
1454 iommu_completion_wait(iommu); in amd_iommu_flush_tlb_all()
1457 static void amd_iommu_flush_tlb_domid(struct amd_iommu *iommu, u32 dom_id) in amd_iommu_flush_tlb_domid() argument
1463 iommu_queue_command(iommu, &cmd); in amd_iommu_flush_tlb_domid()
1465 iommu_completion_wait(iommu); in amd_iommu_flush_tlb_domid()
1468 static void amd_iommu_flush_all(struct amd_iommu *iommu) in amd_iommu_flush_all() argument
1474 iommu_queue_command(iommu, &cmd); in amd_iommu_flush_all()
1475 iommu_completion_wait(iommu); in amd_iommu_flush_all()
1478 static void iommu_flush_irt(struct amd_iommu *iommu, u16 devid) in iommu_flush_irt() argument
1484 iommu_queue_command(iommu, &cmd); in iommu_flush_irt()
1487 static void amd_iommu_flush_irt_all(struct amd_iommu *iommu) in amd_iommu_flush_irt_all() argument
1490 u16 last_bdf = iommu->pci_seg->last_bdf; in amd_iommu_flush_irt_all()
1492 if (iommu->irtcachedis_enabled) in amd_iommu_flush_irt_all()
1496 iommu_flush_irt(iommu, devid); in amd_iommu_flush_irt_all()
1498 iommu_completion_wait(iommu); in amd_iommu_flush_irt_all()
1501 void amd_iommu_flush_all_caches(struct amd_iommu *iommu) in amd_iommu_flush_all_caches() argument
1504 amd_iommu_flush_all(iommu); in amd_iommu_flush_all_caches()
1506 amd_iommu_flush_dte_all(iommu); in amd_iommu_flush_all_caches()
1507 amd_iommu_flush_irt_all(iommu); in amd_iommu_flush_all_caches()
1508 amd_iommu_flush_tlb_all(iommu); in amd_iommu_flush_all_caches()
1518 struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data); in device_flush_iotlb() local
1525 return iommu_queue_command(iommu, &cmd); in device_flush_iotlb()
1530 struct amd_iommu *iommu = data; in device_flush_dte_alias() local
1532 return iommu_flush_dte(iommu, alias); in device_flush_dte_alias()
1540 struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data); in device_flush_dte() local
1551 device_flush_dte_alias, iommu); in device_flush_dte()
1553 ret = iommu_flush_dte(iommu, dev_data->devid); in device_flush_dte()
1557 pci_seg = iommu->pci_seg; in device_flush_dte()
1560 ret = iommu_flush_dte(iommu, alias); in device_flush_dte()
1583 struct amd_iommu *iommu = get_amd_iommu_from_dev(dev_data->dev); in domain_flush_pages_v2() local
1589 ret |= iommu_queue_command(iommu, &cmd); in domain_flush_pages_v2()
1610 * Devices of this domain are behind this IOMMU in domain_flush_pages_v1()
1613 ret |= iommu_queue_command(pdom_iommu_info->iommu, &cmd); in domain_flush_pages_v1()
1659 /* Wait until IOMMU TLB and all device IOTLB flushes are complete */ in amd_iommu_domain_flush_pages()
1699 /* Wait until IOMMU TLB and all device IOTLB flushes are complete */ in amd_iommu_domain_flush_pages()
1714 struct amd_iommu *iommu = get_amd_iommu_from_dev(dev_data->dev); in amd_iommu_dev_flush_pasid_pages() local
1718 iommu_queue_command(iommu, &cmd); in amd_iommu_dev_flush_pasid_pages()
1723 iommu_completion_wait(iommu); in amd_iommu_dev_flush_pasid_pages()
1757 struct amd_iommu *iommu = rlookup_amd_iommu(dev_data->dev); in amd_iommu_update_and_flush_device_table() local
1759 set_dte_entry(iommu, dev_data); in amd_iommu_update_and_flush_device_table()
1760 clone_aliases(iommu, dev_data->dev); in amd_iommu_update_and_flush_device_table()
1772 struct amd_iommu *iommu; in amd_iommu_complete_ppr() local
1776 iommu = get_amd_iommu_from_dev(dev); in amd_iommu_complete_ppr()
1781 return iommu_queue_command(iommu, &cmd); in amd_iommu_complete_ppr()
1787 * allocated for every IOMMU as the default domain. If device isolation
1869 struct amd_iommu *iommu, int pasids) in setup_gcr3_table() argument
1872 int nid = iommu ? dev_to_node(&iommu->dev->dev) : NUMA_NO_NODE; in setup_gcr3_table()
1995 static void set_dte_gcr3_table(struct amd_iommu *iommu, in set_dte_gcr3_table() argument
2027 static void set_dte_entry(struct amd_iommu *iommu, in set_dte_entry() argument
2036 struct dev_table_entry *dte = &get_dev_table(iommu)[dev_data->devid]; in set_dte_entry()
2077 initial_dte = amd_iommu_get_ivhd_dte_flags(iommu->pci_seg->id, dev_data->devid); in set_dte_entry()
2083 set_dte_gcr3_table(iommu, dev_data, &new); in set_dte_entry()
2085 update_dte256(iommu, dev_data, &new); in set_dte_entry()
2093 amd_iommu_flush_tlb_domid(iommu, old_domid); in set_dte_entry()
2100 static void clear_dte_entry(struct amd_iommu *iommu, struct iommu_dev_data *dev_data) in clear_dte_entry() argument
2103 struct dev_table_entry *dte = &get_dev_table(iommu)[dev_data->devid]; in clear_dte_entry()
2106 update_dte256(iommu, dev_data, &new); in clear_dte_entry()
2112 struct amd_iommu *iommu = get_amd_iommu_from_dev(dev_data->dev); in dev_update_dte() local
2115 set_dte_entry(iommu, dev_data); in dev_update_dte()
2117 clear_dte_entry(iommu, dev_data); in dev_update_dte()
2119 clone_aliases(iommu, dev_data->dev); in dev_update_dte()
2121 iommu_completion_wait(iommu); in dev_update_dte()
2131 struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data); in init_gcr3_table() local
2144 * supported by the device/IOMMU. in init_gcr3_table()
2146 ret = setup_gcr3_table(&dev_data->gcr3_info, iommu, in init_gcr3_table()
2176 static int pdom_attach_iommu(struct amd_iommu *iommu, in pdom_attach_iommu() argument
2185 pdom_iommu_info = xa_load(&pdom->iommu_array, iommu->index); in pdom_attach_iommu()
2197 pdom_iommu_info->iommu = iommu; in pdom_attach_iommu()
2200 curr = xa_cmpxchg(&pdom->iommu_array, iommu->index, in pdom_attach_iommu()
2213 static void pdom_detach_iommu(struct amd_iommu *iommu, in pdom_detach_iommu() argument
2221 pdom_iommu_info = xa_load(&pdom->iommu_array, iommu->index); in pdom_detach_iommu()
2229 xa_erase(&pdom->iommu_array, iommu->index); in pdom_detach_iommu()
2244 struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data); in attach_device() local
2257 ret = pdom_attach_iommu(iommu, domain); in attach_device()
2265 pdom_detach_iommu(iommu, domain); in attach_device()
2279 if (amd_iommu_iopf_add_device(iommu, dev_data)) in attach_device()
2306 struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data); in detach_device() local
2324 amd_iommu_iopf_remove_device(iommu, dev_data); in detach_device()
2347 pdom_detach_iommu(iommu, domain); in detach_device()
2356 struct amd_iommu *iommu; in amd_iommu_probe_device() local
2363 iommu = rlookup_amd_iommu(dev); in amd_iommu_probe_device()
2364 if (!iommu) in amd_iommu_probe_device()
2368 if (!iommu->iommu.ops) in amd_iommu_probe_device()
2372 return &iommu->iommu; in amd_iommu_probe_device()
2374 ret = iommu_init_device(iommu, dev); in amd_iommu_probe_device()
2378 iommu_ignore_device(iommu, dev); in amd_iommu_probe_device()
2382 amd_iommu_set_pci_msi_domain(dev, iommu); in amd_iommu_probe_device()
2383 iommu_dev = &iommu->iommu; in amd_iommu_probe_device()
2386 * If IOMMU and device supports PASID then it will contain max in amd_iommu_probe_device()
2392 dev_data->max_pasids = min_t(u32, iommu->iommu.max_pasids, in amd_iommu_probe_device()
2398 iommu_completion_wait(iommu); in amd_iommu_probe_device()
2433 * The following functions belong to the exported interface of AMD IOMMU
2435 * This interface allows access to lower level functions of the IOMMU
2502 static bool amd_iommu_hd_support(struct amd_iommu *iommu) in amd_iommu_hd_support() argument
2504 return iommu && (iommu->features & FEATURE_HDSUP); in amd_iommu_hd_support()
2512 struct amd_iommu *iommu = get_amd_iommu_from_dev(dev); in do_iommu_domain_alloc() local
2534 domain->domain.ops = iommu->iommu.ops->default_domain_ops; in do_iommu_domain_alloc()
2547 struct amd_iommu *iommu = get_amd_iommu_from_dev(dev); in amd_iommu_domain_alloc_paging_flags() local
2557 if (!amd_iommu_hd_support(iommu)) in amd_iommu_domain_alloc_paging_flags()
2561 /* Allocate domain with v2 page table if IOMMU supports PASID. */ in amd_iommu_domain_alloc_paging_flags()
2649 struct amd_iommu *iommu = get_amd_iommu_from_dev(dev); in amd_iommu_attach_device() local
2662 * Restrict to devices with compatible IOMMU hardware support in amd_iommu_attach_device()
2665 if (dom->dirty_ops && !amd_iommu_hd_support(iommu)) in amd_iommu_attach_device()
2727 * AMD's IOMMU can flush as many pages as necessary in a single flush. in amd_iommu_iotlb_gather_add_page()
2732 * hypervisor needs to synchronize the host IOMMU PTEs with those of in amd_iommu_iotlb_gather_add_page()
2786 struct amd_iommu *iommu = get_amd_iommu_from_dev(dev); in amd_iommu_capable() local
2788 return amd_iommu_hd_support(iommu); in amd_iommu_capable()
2804 struct amd_iommu *iommu; in amd_iommu_set_dirty_tracking() local
2816 iommu = get_amd_iommu_from_dev_data(dev_data); in amd_iommu_set_dirty_tracking()
2817 dte = &get_dev_table(iommu)[dev_data->devid]; in amd_iommu_set_dirty_tracking()
2865 struct amd_iommu *iommu; in amd_iommu_get_resv_regions() local
2874 iommu = get_amd_iommu_from_dev(dev); in amd_iommu_get_resv_regions()
2875 pci_seg = iommu->pci_seg; in amd_iommu_get_resv_regions()
3059 static void iommu_flush_irt_and_complete(struct amd_iommu *iommu, u16 devid) in iommu_flush_irt_and_complete() argument
3066 if (iommu->irtcachedis_enabled) in iommu_flush_irt_and_complete()
3070 data = atomic64_inc_return(&iommu->cmd_sem_val); in iommu_flush_irt_and_complete()
3071 build_completion_wait(&cmd2, iommu, data); in iommu_flush_irt_and_complete()
3073 raw_spin_lock_irqsave(&iommu->lock, flags); in iommu_flush_irt_and_complete()
3074 ret = __iommu_queue_command_sync(iommu, &cmd, true); in iommu_flush_irt_and_complete()
3077 ret = __iommu_queue_command_sync(iommu, &cmd2, false); in iommu_flush_irt_and_complete()
3080 wait_on_sem(iommu, data); in iommu_flush_irt_and_complete()
3082 raw_spin_unlock_irqrestore(&iommu->lock, flags); in iommu_flush_irt_and_complete()
3092 static void set_dte_irq_entry(struct amd_iommu *iommu, u16 devid, in set_dte_irq_entry() argument
3096 struct dev_table_entry *dte = &get_dev_table(iommu)[devid]; in set_dte_irq_entry()
3097 struct iommu_dev_data *dev_data = search_dev_data(iommu, devid); in set_dte_irq_entry()
3114 static struct irq_remap_table *get_irq_table(struct amd_iommu *iommu, u16 devid) in get_irq_table() argument
3117 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg; in get_irq_table()
3120 "%s: no iommu for devid %x:%x\n", in get_irq_table()
3150 static void set_remap_table_entry(struct amd_iommu *iommu, u16 devid, in set_remap_table_entry() argument
3153 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg; in set_remap_table_entry()
3156 set_dte_irq_entry(iommu, devid, table); in set_remap_table_entry()
3157 iommu_flush_dte(iommu, devid); in set_remap_table_entry()
3165 struct amd_iommu *iommu = rlookup_amd_iommu(&pdev->dev); in set_remap_table_entry_alias() local
3167 if (!iommu) in set_remap_table_entry_alias()
3170 pci_seg = iommu->pci_seg; in set_remap_table_entry_alias()
3172 set_dte_irq_entry(iommu, alias, table); in set_remap_table_entry_alias()
3186 static struct irq_remap_table *alloc_irq_table(struct amd_iommu *iommu, in alloc_irq_table() argument
3195 int nid = iommu && iommu->dev ? dev_to_node(&iommu->dev->dev) : NUMA_NO_NODE; in alloc_irq_table()
3200 pci_seg = iommu->pci_seg; in alloc_irq_table()
3208 set_remap_table_entry(iommu, devid, table); in alloc_irq_table()
3226 set_remap_table_entry(iommu, devid, table); in alloc_irq_table()
3237 set_remap_table_entry(iommu, devid, table); in alloc_irq_table()
3240 set_remap_table_entry(iommu, alias, table); in alloc_irq_table()
3243 iommu_completion_wait(iommu); in alloc_irq_table()
3255 static int alloc_irq_index(struct amd_iommu *iommu, u16 devid, int count, in alloc_irq_index() argument
3263 table = alloc_irq_table(iommu, devid, pdev, max_irqs); in alloc_irq_index()
3275 if (!iommu->irte_ops->is_allocated(table, index)) { in alloc_irq_index()
3285 iommu->irte_ops->set_allocated(table, index - c + 1); in alloc_irq_index()
3302 static int __modify_irte_ga(struct amd_iommu *iommu, u16 devid, int index, in __modify_irte_ga() argument
3310 table = get_irq_table(iommu, devid); in __modify_irte_ga()
3333 static int modify_irte_ga(struct amd_iommu *iommu, u16 devid, int index, in modify_irte_ga() argument
3338 ret = __modify_irte_ga(iommu, devid, index, irte); in modify_irte_ga()
3342 iommu_flush_irt_and_complete(iommu, devid); in modify_irte_ga()
3347 static int modify_irte(struct amd_iommu *iommu, in modify_irte() argument
3353 table = get_irq_table(iommu, devid); in modify_irte()
3361 iommu_flush_irt_and_complete(iommu, devid); in modify_irte()
3366 static void free_irte(struct amd_iommu *iommu, u16 devid, int index) in free_irte() argument
3371 table = get_irq_table(iommu, devid); in free_irte()
3376 iommu->irte_ops->clear_allocated(table, index); in free_irte()
3379 iommu_flush_irt_and_complete(iommu, devid); in free_irte()
3412 static void irte_activate(struct amd_iommu *iommu, void *entry, u16 devid, u16 index) in irte_activate() argument
3417 modify_irte(iommu, devid, index, irte); in irte_activate()
3420 static void irte_ga_activate(struct amd_iommu *iommu, void *entry, u16 devid, u16 index) in irte_ga_activate() argument
3425 modify_irte_ga(iommu, devid, index, irte); in irte_ga_activate()
3428 static void irte_deactivate(struct amd_iommu *iommu, void *entry, u16 devid, u16 index) in irte_deactivate() argument
3433 modify_irte(iommu, devid, index, irte); in irte_deactivate()
3436 static void irte_ga_deactivate(struct amd_iommu *iommu, void *entry, u16 devid, u16 index) in irte_ga_deactivate() argument
3441 modify_irte_ga(iommu, devid, index, irte); in irte_ga_deactivate()
3444 static void irte_set_affinity(struct amd_iommu *iommu, void *entry, u16 devid, u16 index, in irte_set_affinity() argument
3451 modify_irte(iommu, devid, index, irte); in irte_set_affinity()
3454 static void irte_ga_set_affinity(struct amd_iommu *iommu, void *entry, u16 devid, u16 index, in irte_ga_set_affinity() argument
3465 modify_irte_ga(iommu, devid, index, irte); in irte_ga_set_affinity()
3561 struct amd_iommu *iommu = data->iommu; in irq_remapping_prepare_irte() local
3563 if (!iommu) in irq_remapping_prepare_irte()
3568 iommu->irte_ops->prepare(data->entry, APIC_DELIVERY_MODE_FIXED, in irq_remapping_prepare_irte()
3612 struct amd_iommu *iommu; in irq_remapping_alloc() local
3630 iommu = __rlookup_amd_iommu(seg, devid); in irq_remapping_alloc()
3631 if (!iommu) in irq_remapping_alloc()
3634 dev_data = search_dev_data(iommu, devid); in irq_remapping_alloc()
3644 table = alloc_irq_table(iommu, devid, NULL, max_irqs); in irq_remapping_alloc()
3653 iommu->irte_ops->set_allocated(table, i); in irq_remapping_alloc()
3664 index = alloc_irq_index(iommu, devid, nr_irqs, align, in irq_remapping_alloc()
3668 index = alloc_irq_index(iommu, devid, nr_irqs, false, NULL, in irq_remapping_alloc()
3701 data->iommu = iommu; in irq_remapping_alloc()
3717 free_irte(iommu, devid, index + i); in irq_remapping_alloc()
3736 free_irte(data->iommu, irte_info->devid, irte_info->index); in irq_remapping_free()
3744 static void amd_ir_update_irte(struct irq_data *irqd, struct amd_iommu *iommu,
3754 struct amd_iommu *iommu = data->iommu; in irq_remapping_activate() local
3757 if (!iommu) in irq_remapping_activate()
3760 iommu->irte_ops->activate(iommu, data->entry, irte_info->devid, in irq_remapping_activate()
3762 amd_ir_update_irte(irq_data, iommu, data, irte_info, cfg); in irq_remapping_activate()
3771 struct amd_iommu *iommu = data->iommu; in irq_remapping_deactivate() local
3773 if (iommu) in irq_remapping_deactivate()
3774 iommu->irte_ops->deactivate(iommu, data->entry, irte_info->devid, in irq_remapping_deactivate()
3781 struct amd_iommu *iommu; in irq_remapping_select() local
3794 iommu = __rlookup_amd_iommu((devid >> 16), (devid & 0xffff)); in irq_remapping_select()
3796 return iommu && iommu->ir_domain == d; in irq_remapping_select()
3828 return modify_irte_ga(ir_data->iommu, ir_data->irq_2_irte.devid, in amd_iommu_activate_guest_mode()
3858 return modify_irte_ga(ir_data->iommu, ir_data->irq_2_irte.devid, in amd_iommu_deactivate_guest_mode()
3875 if (ir_data->iommu == NULL) in amd_ir_set_vcpu_affinity()
3878 dev_data = search_dev_data(ir_data->iommu, irte_info->devid); in amd_ir_set_vcpu_affinity()
3913 static void amd_ir_update_irte(struct irq_data *irqd, struct amd_iommu *iommu, in amd_ir_update_irte() argument
3923 iommu->irte_ops->set_affinity(iommu, ir_data->entry, irte_info->devid, in amd_ir_update_irte()
3935 struct amd_iommu *iommu = ir_data->iommu; in amd_ir_set_affinity() local
3938 if (!iommu) in amd_ir_set_affinity()
3945 amd_ir_update_irte(data, iommu, ir_data, irte_info, cfg); in amd_ir_set_affinity()
3977 int amd_iommu_create_irq_domain(struct amd_iommu *iommu) in amd_iommu_create_irq_domain() argument
3981 fn = irq_domain_alloc_named_id_fwnode("AMD-IR", iommu->index); in amd_iommu_create_irq_domain()
3984 iommu->ir_domain = irq_domain_create_hierarchy(arch_get_ir_parent_domain(), 0, 0, in amd_iommu_create_irq_domain()
3985 fn, &amd_ir_domain_ops, iommu); in amd_iommu_create_irq_domain()
3986 if (!iommu->ir_domain) { in amd_iommu_create_irq_domain()
3991 irq_domain_update_bus_token(iommu->ir_domain, DOMAIN_BUS_AMDVI); in amd_iommu_create_irq_domain()
3992 iommu->ir_domain->flags |= IRQ_DOMAIN_FLAG_MSI_PARENT | in amd_iommu_create_irq_domain()
3994 iommu->ir_domain->msi_parent_ops = &amdvi_msi_parent_ops; in amd_iommu_create_irq_domain()
4008 if (!ir_data->iommu) in amd_iommu_update_ga()
4019 return __modify_irte_ga(ir_data->iommu, ir_data->irq_2_irte.devid, in amd_iommu_update_ga()