Lines Matching full:iommu

19 #include <linux/amd-iommu.h>
25 #include <asm/iommu.h>
38 #include "../iommu-pages.h"
98 * structure describing one IOMMU in the ACPI table. Typically followed by one
118 * A device entry describing which devices a specific IOMMU translates and
136 * An AMD IOMMU memory definition structure. It defines things like exclusion
227 bool translation_pre_enabled(struct amd_iommu *iommu) in translation_pre_enabled() argument
229 return (iommu->flags & AMD_IOMMU_FLAG_TRANS_PRE_ENABLED); in translation_pre_enabled()
232 static void clear_translation_pre_enabled(struct amd_iommu *iommu) in clear_translation_pre_enabled() argument
234 iommu->flags &= ~AMD_IOMMU_FLAG_TRANS_PRE_ENABLED; in clear_translation_pre_enabled()
237 static void init_translation_status(struct amd_iommu *iommu) in init_translation_status() argument
241 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET); in init_translation_status()
243 iommu->flags |= AMD_IOMMU_FLAG_TRANS_PRE_ENABLED; in init_translation_status()
265 struct amd_iommu *iommu; in get_global_efr() local
267 for_each_iommu(iommu) { in get_global_efr()
268 u64 tmp = iommu->features; in get_global_efr()
269 u64 tmp2 = iommu->features2; in get_global_efr()
271 if (list_is_first(&iommu->list, &amd_iommu_list)) { in get_global_efr()
282 …"Found inconsistent EFR/EFR2 %#llx,%#llx (global %#llx,%#llx) on iommu%d (%04x:%02x:%02x.%01x).\n", in get_global_efr()
284 iommu->index, iommu->pci_seg->id, in get_global_efr()
285 PCI_BUS_NUM(iommu->devid), PCI_SLOT(iommu->devid), in get_global_efr()
286 PCI_FUNC(iommu->devid)); in get_global_efr()
300 static void __init early_iommu_features_init(struct amd_iommu *iommu, in early_iommu_features_init() argument
304 iommu->features = h->efr_reg; in early_iommu_features_init()
305 iommu->features2 = h->efr_reg2; in early_iommu_features_init()
313 static u32 iommu_read_l1(struct amd_iommu *iommu, u16 l1, u8 address) in iommu_read_l1() argument
317 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16)); in iommu_read_l1()
318 pci_read_config_dword(iommu->dev, 0xfc, &val); in iommu_read_l1()
322 static void iommu_write_l1(struct amd_iommu *iommu, u16 l1, u8 address, u32 val) in iommu_write_l1() argument
324 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16 | 1 << 31)); in iommu_write_l1()
325 pci_write_config_dword(iommu->dev, 0xfc, val); in iommu_write_l1()
326 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16)); in iommu_write_l1()
329 static u32 iommu_read_l2(struct amd_iommu *iommu, u8 address) in iommu_read_l2() argument
333 pci_write_config_dword(iommu->dev, 0xf0, address); in iommu_read_l2()
334 pci_read_config_dword(iommu->dev, 0xf4, &val); in iommu_read_l2()
338 static void iommu_write_l2(struct amd_iommu *iommu, u8 address, u32 val) in iommu_write_l2() argument
340 pci_write_config_dword(iommu->dev, 0xf0, (address | 1 << 8)); in iommu_write_l2()
341 pci_write_config_dword(iommu->dev, 0xf4, val); in iommu_write_l2()
346 * AMD IOMMU MMIO register space handling functions
348 * These functions are used to program the IOMMU device registers in
354 * This function set the exclusion range in the IOMMU. DMA accesses to the
357 static void iommu_set_exclusion_range(struct amd_iommu *iommu) in iommu_set_exclusion_range() argument
359 u64 start = iommu->exclusion_start & PAGE_MASK; in iommu_set_exclusion_range()
360 u64 limit = (start + iommu->exclusion_length - 1) & PAGE_MASK; in iommu_set_exclusion_range()
363 if (!iommu->exclusion_start) in iommu_set_exclusion_range()
367 memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET, in iommu_set_exclusion_range()
371 memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET, in iommu_set_exclusion_range()
375 static void iommu_set_cwwb_range(struct amd_iommu *iommu) in iommu_set_cwwb_range() argument
377 u64 start = iommu_virt_to_phys((void *)iommu->cmd_sem); in iommu_set_cwwb_range()
387 memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET, in iommu_set_cwwb_range()
394 memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET, in iommu_set_cwwb_range()
398 /* Programs the physical address of the device table into the IOMMU hardware */
399 static void iommu_set_device_table(struct amd_iommu *iommu) in iommu_set_device_table() argument
402 u32 dev_table_size = iommu->pci_seg->dev_table_size; in iommu_set_device_table()
403 void *dev_table = (void *)get_dev_table(iommu); in iommu_set_device_table()
405 BUG_ON(iommu->mmio_base == NULL); in iommu_set_device_table()
409 memcpy_toio(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET, in iommu_set_device_table()
413 static void iommu_feature_set(struct amd_iommu *iommu, u64 val, u64 mask, u8 shift) in iommu_feature_set() argument
417 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET); in iommu_feature_set()
421 writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET); in iommu_feature_set()
424 /* Generic functions to enable/disable certain features of the IOMMU. */
425 void iommu_feature_enable(struct amd_iommu *iommu, u8 bit) in iommu_feature_enable() argument
427 iommu_feature_set(iommu, 1ULL, 1ULL, bit); in iommu_feature_enable()
430 static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit) in iommu_feature_disable() argument
432 iommu_feature_set(iommu, 0ULL, 1ULL, bit); in iommu_feature_disable()
436 static void iommu_enable(struct amd_iommu *iommu) in iommu_enable() argument
438 iommu_feature_enable(iommu, CONTROL_IOMMU_EN); in iommu_enable()
441 static void iommu_disable(struct amd_iommu *iommu) in iommu_disable() argument
443 if (!iommu->mmio_base) in iommu_disable()
447 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN); in iommu_disable()
450 iommu_feature_disable(iommu, CONTROL_EVT_INT_EN); in iommu_disable()
451 iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN); in iommu_disable()
453 /* Disable IOMMU GA_LOG */ in iommu_disable()
454 iommu_feature_disable(iommu, CONTROL_GALOG_EN); in iommu_disable()
455 iommu_feature_disable(iommu, CONTROL_GAINT_EN); in iommu_disable()
457 /* Disable IOMMU PPR logging */ in iommu_disable()
458 iommu_feature_disable(iommu, CONTROL_PPRLOG_EN); in iommu_disable()
459 iommu_feature_disable(iommu, CONTROL_PPRINT_EN); in iommu_disable()
461 /* Disable IOMMU hardware itself */ in iommu_disable()
462 iommu_feature_disable(iommu, CONTROL_IOMMU_EN); in iommu_disable()
465 iommu_feature_disable(iommu, CONTROL_IRTCACHEDIS); in iommu_disable()
469 * mapping and unmapping functions for the IOMMU MMIO space. Each AMD IOMMU in
484 static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu) in iommu_unmap_mmio_space() argument
486 if (iommu->mmio_base) in iommu_unmap_mmio_space()
487 iounmap(iommu->mmio_base); in iommu_unmap_mmio_space()
488 release_mem_region(iommu->mmio_phys, iommu->mmio_phys_end); in iommu_unmap_mmio_space()
509 * The functions below belong to the first pass of AMD IOMMU ACPI table
533 * After reading the highest device id from the IOMMU PCI capability header
628 * the second time. In this ACPI parsing iteration we allocate IOMMU specific
652 /* Allocate per PCI segment IOMMU rlookup table. */
716 * Allocates the command buffer. This buffer is per AMD IOMMU. We can
717 * write commands to that buffer later and the IOMMU will execute them
720 static int __init alloc_command_buffer(struct amd_iommu *iommu) in alloc_command_buffer() argument
722 iommu->cmd_buf = iommu_alloc_pages(GFP_KERNEL, in alloc_command_buffer()
725 return iommu->cmd_buf ? 0 : -ENOMEM; in alloc_command_buffer()
732 void amd_iommu_restart_log(struct amd_iommu *iommu, const char *evt_type, in amd_iommu_restart_log() argument
738 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); in amd_iommu_restart_log()
742 pr_info_ratelimited("IOMMU %s log restarting\n", evt_type); in amd_iommu_restart_log()
744 iommu_feature_disable(iommu, cntrl_log); in amd_iommu_restart_log()
745 iommu_feature_disable(iommu, cntrl_intr); in amd_iommu_restart_log()
747 writel(status_overflow_mask, iommu->mmio_base + MMIO_STATUS_OFFSET); in amd_iommu_restart_log()
749 iommu_feature_enable(iommu, cntrl_intr); in amd_iommu_restart_log()
750 iommu_feature_enable(iommu, cntrl_log); in amd_iommu_restart_log()
754 * This function restarts event logging in case the IOMMU experienced
757 void amd_iommu_restart_event_logging(struct amd_iommu *iommu) in amd_iommu_restart_event_logging() argument
759 amd_iommu_restart_log(iommu, "Event", CONTROL_EVT_INT_EN, in amd_iommu_restart_event_logging()
765 * This function restarts event logging in case the IOMMU experienced
768 void amd_iommu_restart_ga_log(struct amd_iommu *iommu) in amd_iommu_restart_ga_log() argument
770 amd_iommu_restart_log(iommu, "GA", CONTROL_GAINT_EN, in amd_iommu_restart_ga_log()
776 * This function resets the command buffer if the IOMMU stopped fetching
779 static void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu) in amd_iommu_reset_cmd_buffer() argument
781 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN); in amd_iommu_reset_cmd_buffer()
783 writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET); in amd_iommu_reset_cmd_buffer()
784 writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); in amd_iommu_reset_cmd_buffer()
785 iommu->cmd_buf_head = 0; in amd_iommu_reset_cmd_buffer()
786 iommu->cmd_buf_tail = 0; in amd_iommu_reset_cmd_buffer()
788 iommu_feature_enable(iommu, CONTROL_CMDBUF_EN); in amd_iommu_reset_cmd_buffer()
795 static void iommu_enable_command_buffer(struct amd_iommu *iommu) in iommu_enable_command_buffer() argument
799 BUG_ON(iommu->cmd_buf == NULL); in iommu_enable_command_buffer()
801 entry = iommu_virt_to_phys(iommu->cmd_buf); in iommu_enable_command_buffer()
804 memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET, in iommu_enable_command_buffer()
807 amd_iommu_reset_cmd_buffer(iommu); in iommu_enable_command_buffer()
813 static void iommu_disable_command_buffer(struct amd_iommu *iommu) in iommu_disable_command_buffer() argument
815 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN); in iommu_disable_command_buffer()
818 static void __init free_command_buffer(struct amd_iommu *iommu) in free_command_buffer() argument
820 iommu_free_pages(iommu->cmd_buf, get_order(CMD_BUFFER_SIZE)); in free_command_buffer()
823 void *__init iommu_alloc_4k_pages(struct amd_iommu *iommu, gfp_t gfp, in iommu_alloc_4k_pages() argument
839 /* allocates the memory where the IOMMU will log its events to */
840 static int __init alloc_event_buffer(struct amd_iommu *iommu) in alloc_event_buffer() argument
842 iommu->evt_buf = iommu_alloc_4k_pages(iommu, GFP_KERNEL, in alloc_event_buffer()
845 return iommu->evt_buf ? 0 : -ENOMEM; in alloc_event_buffer()
848 static void iommu_enable_event_buffer(struct amd_iommu *iommu) in iommu_enable_event_buffer() argument
852 BUG_ON(iommu->evt_buf == NULL); in iommu_enable_event_buffer()
854 entry = iommu_virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK; in iommu_enable_event_buffer()
856 memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET, in iommu_enable_event_buffer()
860 writel(0x00, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET); in iommu_enable_event_buffer()
861 writel(0x00, iommu->mmio_base + MMIO_EVT_TAIL_OFFSET); in iommu_enable_event_buffer()
863 iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN); in iommu_enable_event_buffer()
869 static void iommu_disable_event_buffer(struct amd_iommu *iommu) in iommu_disable_event_buffer() argument
871 iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN); in iommu_disable_event_buffer()
874 static void __init free_event_buffer(struct amd_iommu *iommu) in free_event_buffer() argument
876 iommu_free_pages(iommu->evt_buf, get_order(EVT_BUFFER_SIZE)); in free_event_buffer()
879 static void free_ga_log(struct amd_iommu *iommu) in free_ga_log() argument
882 iommu_free_pages(iommu->ga_log, get_order(GA_LOG_SIZE)); in free_ga_log()
883 iommu_free_pages(iommu->ga_log_tail, get_order(8)); in free_ga_log()
888 static int iommu_ga_log_enable(struct amd_iommu *iommu) in iommu_ga_log_enable() argument
893 if (!iommu->ga_log) in iommu_ga_log_enable()
896 entry = iommu_virt_to_phys(iommu->ga_log) | GA_LOG_SIZE_512; in iommu_ga_log_enable()
897 memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_BASE_OFFSET, in iommu_ga_log_enable()
899 entry = (iommu_virt_to_phys(iommu->ga_log_tail) & in iommu_ga_log_enable()
901 memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_TAIL_OFFSET, in iommu_ga_log_enable()
903 writel(0x00, iommu->mmio_base + MMIO_GA_HEAD_OFFSET); in iommu_ga_log_enable()
904 writel(0x00, iommu->mmio_base + MMIO_GA_TAIL_OFFSET); in iommu_ga_log_enable()
907 iommu_feature_enable(iommu, CONTROL_GAINT_EN); in iommu_ga_log_enable()
908 iommu_feature_enable(iommu, CONTROL_GALOG_EN); in iommu_ga_log_enable()
911 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); in iommu_ga_log_enable()
923 static int iommu_init_ga_log(struct amd_iommu *iommu) in iommu_init_ga_log() argument
928 iommu->ga_log = iommu_alloc_pages(GFP_KERNEL, get_order(GA_LOG_SIZE)); in iommu_init_ga_log()
929 if (!iommu->ga_log) in iommu_init_ga_log()
932 iommu->ga_log_tail = iommu_alloc_pages(GFP_KERNEL, get_order(8)); in iommu_init_ga_log()
933 if (!iommu->ga_log_tail) in iommu_init_ga_log()
938 free_ga_log(iommu); in iommu_init_ga_log()
943 static int __init alloc_cwwb_sem(struct amd_iommu *iommu) in alloc_cwwb_sem() argument
945 iommu->cmd_sem = iommu_alloc_4k_pages(iommu, GFP_KERNEL, 1); in alloc_cwwb_sem()
947 return iommu->cmd_sem ? 0 : -ENOMEM; in alloc_cwwb_sem()
950 static void __init free_cwwb_sem(struct amd_iommu *iommu) in free_cwwb_sem() argument
952 if (iommu->cmd_sem) in free_cwwb_sem()
953 iommu_free_page((void *)iommu->cmd_sem); in free_cwwb_sem()
956 static void iommu_enable_xt(struct amd_iommu *iommu) in iommu_enable_xt() argument
965 iommu_feature_enable(iommu, CONTROL_XT_EN); in iommu_enable_xt()
969 static void iommu_enable_gt(struct amd_iommu *iommu) in iommu_enable_gt() argument
974 iommu_feature_enable(iommu, CONTROL_GT_EN); in iommu_enable_gt()
986 static bool __copy_device_table(struct amd_iommu *iommu) in __copy_device_table() argument
989 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg; in __copy_device_table()
996 /* Each IOMMU use separate device table with the same size */ in __copy_device_table()
997 lo = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET); in __copy_device_table()
998 hi = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET + 4); in __copy_device_table()
1003 pr_err("The device table size of IOMMU:%d is not expected!\n", in __copy_device_table()
1004 iommu->index); in __copy_device_table()
1080 struct amd_iommu *iommu; in copy_device_table() local
1093 for_each_iommu(iommu) { in copy_device_table()
1094 if (pci_seg->id != iommu->pci_seg->id) in copy_device_table()
1096 if (!__copy_device_table(iommu)) in copy_device_table()
1147 set_dev_entry_from_acpi_range(struct amd_iommu *iommu, u16 first, u16 last, in set_dev_entry_from_acpi_range() argument
1157 if (search_ivhd_dte_flags(iommu->pci_seg->id, first, last)) in set_dev_entry_from_acpi_range()
1186 d->segid = iommu->pci_seg->id; in set_dev_entry_from_acpi_range()
1194 struct dev_table_entry *dev_table = get_dev_table(iommu); in set_dev_entry_from_acpi_range()
1198 amd_iommu_set_rlookup_table(iommu, i); in set_dev_entry_from_acpi_range()
1202 static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu, in set_dev_entry_from_acpi() argument
1205 set_dev_entry_from_acpi_range(iommu, devid, devid, flags, ext_flags); in set_dev_entry_from_acpi()
1316 * Takes a pointer to an AMD IOMMU entry in the ACPI table and
1319 static int __init init_iommu_from_acpi(struct amd_iommu *iommu, in init_iommu_from_acpi() argument
1328 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg; in init_iommu_from_acpi()
1342 iommu->acpi_flags = h->flags; in init_iommu_from_acpi()
1366 set_dev_entry_from_acpi_range(iommu, 0, pci_seg->last_bdf, e->flags, 0); in init_iommu_from_acpi()
1377 set_dev_entry_from_acpi(iommu, devid, e->flags, 0); in init_iommu_from_acpi()
1405 set_dev_entry_from_acpi(iommu, devid , e->flags, 0); in init_iommu_from_acpi()
1406 set_dev_entry_from_acpi(iommu, devid_to, e->flags, 0); in init_iommu_from_acpi()
1435 set_dev_entry_from_acpi(iommu, devid, e->flags, in init_iommu_from_acpi()
1463 set_dev_entry_from_acpi_range(iommu, devid_start, devid, flags, ext_flags); in init_iommu_from_acpi()
1464 set_dev_entry_from_acpi(iommu, devid_to, flags, ext_flags); in init_iommu_from_acpi()
1499 set_dev_entry_from_acpi(iommu, devid, e->flags, 0); in init_iommu_from_acpi()
1566 set_dev_entry_from_acpi(iommu, devid, e->flags, 0); in init_iommu_from_acpi()
1648 static void __init free_sysfs(struct amd_iommu *iommu) in free_sysfs() argument
1650 if (iommu->iommu.dev) { in free_sysfs()
1651 iommu_device_unregister(&iommu->iommu); in free_sysfs()
1652 iommu_device_sysfs_remove(&iommu->iommu); in free_sysfs()
1656 static void __init free_iommu_one(struct amd_iommu *iommu) in free_iommu_one() argument
1658 free_sysfs(iommu); in free_iommu_one()
1659 free_cwwb_sem(iommu); in free_iommu_one()
1660 free_command_buffer(iommu); in free_iommu_one()
1661 free_event_buffer(iommu); in free_iommu_one()
1662 amd_iommu_free_ppr_log(iommu); in free_iommu_one()
1663 free_ga_log(iommu); in free_iommu_one()
1664 iommu_unmap_mmio_space(iommu); in free_iommu_one()
1665 amd_iommu_iopf_uninit(iommu); in free_iommu_one()
1670 struct amd_iommu *iommu, *next; in free_iommu_all() local
1672 for_each_iommu_safe(iommu, next) { in free_iommu_all()
1673 list_del(&iommu->list); in free_iommu_all()
1674 free_iommu_one(iommu); in free_iommu_all()
1675 kfree(iommu); in free_iommu_all()
1680 * Family15h Model 10h-1fh erratum 746 (IOMMU Logging May Stall Translations)
1685 static void amd_iommu_erratum_746_workaround(struct amd_iommu *iommu) in amd_iommu_erratum_746_workaround() argument
1694 pci_write_config_dword(iommu->dev, 0xf0, 0x90); in amd_iommu_erratum_746_workaround()
1695 pci_read_config_dword(iommu->dev, 0xf4, &value); in amd_iommu_erratum_746_workaround()
1701 pci_write_config_dword(iommu->dev, 0xf0, 0x90 | (1 << 8)); in amd_iommu_erratum_746_workaround()
1703 pci_write_config_dword(iommu->dev, 0xf4, value | 0x4); in amd_iommu_erratum_746_workaround()
1704 pci_info(iommu->dev, "Applying erratum 746 workaround\n"); in amd_iommu_erratum_746_workaround()
1707 pci_write_config_dword(iommu->dev, 0xf0, 0x90); in amd_iommu_erratum_746_workaround()
1711 * Family15h Model 30h-3fh (IOMMU Mishandles ATS Write Permission)
1716 static void amd_iommu_ats_write_check_workaround(struct amd_iommu *iommu) in amd_iommu_ats_write_check_workaround() argument
1726 value = iommu_read_l2(iommu, 0x47); in amd_iommu_ats_write_check_workaround()
1732 iommu_write_l2(iommu, 0x47, value | BIT(0)); in amd_iommu_ats_write_check_workaround()
1734 pci_info(iommu->dev, "Applying ATS write check workaround\n"); in amd_iommu_ats_write_check_workaround()
1738 * This function glues the initialization function for one IOMMU
1740 * hardware. It does NOT enable the IOMMU. This is done afterwards.
1742 static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h, in init_iommu_one() argument
1750 iommu->pci_seg = pci_seg; in init_iommu_one()
1752 raw_spin_lock_init(&iommu->lock); in init_iommu_one()
1753 atomic64_set(&iommu->cmd_sem_val, 0); in init_iommu_one()
1755 /* Add IOMMU to internal data structures */ in init_iommu_one()
1756 list_add_tail(&iommu->list, &amd_iommu_list); in init_iommu_one()
1757 iommu->index = amd_iommus_present++; in init_iommu_one()
1759 if (unlikely(iommu->index >= MAX_IOMMUS)) { in init_iommu_one()
1765 * Copy data from ACPI table entry to the iommu struct in init_iommu_one()
1767 iommu->devid = h->devid; in init_iommu_one()
1768 iommu->cap_ptr = h->cap_ptr; in init_iommu_one()
1769 iommu->mmio_phys = h->mmio_phys; in init_iommu_one()
1777 iommu->mmio_phys_end = MMIO_REG_END_OFFSET; in init_iommu_one()
1779 iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET; in init_iommu_one()
1788 iommu->mmio_phys_end = MMIO_REG_END_OFFSET; in init_iommu_one()
1790 iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET; in init_iommu_one()
1801 early_iommu_features_init(iommu, h); in init_iommu_one()
1808 iommu->mmio_base = iommu_map_mmio_space(iommu->mmio_phys, in init_iommu_one()
1809 iommu->mmio_phys_end); in init_iommu_one()
1810 if (!iommu->mmio_base) in init_iommu_one()
1813 return init_iommu_from_acpi(iommu, h); in init_iommu_one()
1816 static int __init init_iommu_one_late(struct amd_iommu *iommu) in init_iommu_one_late() argument
1820 if (alloc_cwwb_sem(iommu)) in init_iommu_one_late()
1823 if (alloc_command_buffer(iommu)) in init_iommu_one_late()
1826 if (alloc_event_buffer(iommu)) in init_iommu_one_late()
1829 iommu->int_enabled = false; in init_iommu_one_late()
1831 init_translation_status(iommu); in init_iommu_one_late()
1832 if (translation_pre_enabled(iommu) && !is_kdump_kernel()) { in init_iommu_one_late()
1833 iommu_disable(iommu); in init_iommu_one_late()
1834 clear_translation_pre_enabled(iommu); in init_iommu_one_late()
1835 pr_warn("Translation was enabled for IOMMU:%d but we are not in kdump mode\n", in init_iommu_one_late()
1836 iommu->index); in init_iommu_one_late()
1839 amd_iommu_pre_enabled = translation_pre_enabled(iommu); in init_iommu_one_late()
1842 ret = amd_iommu_create_irq_domain(iommu); in init_iommu_one_late()
1848 * Make sure IOMMU is not considered to translate itself. The IVRS in init_iommu_one_late()
1851 iommu->pci_seg->rlookup_table[iommu->devid] = NULL; in init_iommu_one_late()
1883 * Iterates over all IOMMU entries in the ACPI table, allocates the
1884 * IOMMU structure and initializes it with init_iommu_one()
1890 struct amd_iommu *iommu; in init_iommu_all() local
1909 iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL); in init_iommu_all()
1910 if (iommu == NULL) in init_iommu_all()
1913 ret = init_iommu_one(iommu, h, table); in init_iommu_all()
1925 /* Phase 3 : Enabling IOMMU features */ in init_iommu_all()
1926 for_each_iommu(iommu) { in init_iommu_all()
1927 ret = init_iommu_one_late(iommu); in init_iommu_all()
1935 static void init_iommu_perf_ctr(struct amd_iommu *iommu) in init_iommu_perf_ctr() argument
1938 struct pci_dev *pdev = iommu->dev; in init_iommu_perf_ctr()
1945 pci_info(pdev, "IOMMU performance counters supported\n"); in init_iommu_perf_ctr()
1947 val = readl(iommu->mmio_base + MMIO_CNTR_CONF_OFFSET); in init_iommu_perf_ctr()
1948 iommu->max_banks = (u8) ((val >> 12) & 0x3f); in init_iommu_perf_ctr()
1949 iommu->max_counters = (u8) ((val >> 7) & 0xf); in init_iommu_perf_ctr()
1958 struct amd_iommu *iommu = dev_to_amd_iommu(dev); in amd_iommu_show_cap() local
1959 return sysfs_emit(buf, "%x\n", iommu->cap); in amd_iommu_show_cap()
1978 .name = "amd-iommu",
1989 * of the IOMMU Extended Feature Register [MMIO Offset 0030h].
1992 static void __init late_iommu_features_init(struct amd_iommu *iommu) in late_iommu_features_init() argument
1996 if (!(iommu->cap & (1 << IOMMU_CAP_EFR))) in late_iommu_features_init()
2000 features = readq(iommu->mmio_base + MMIO_EXT_FEATURES); in late_iommu_features_init()
2001 features2 = readq(iommu->mmio_base + MMIO_EXT_FEATURES2); in late_iommu_features_init()
2022 static int __init iommu_init_pci(struct amd_iommu *iommu) in iommu_init_pci() argument
2024 int cap_ptr = iommu->cap_ptr; in iommu_init_pci()
2027 iommu->dev = pci_get_domain_bus_and_slot(iommu->pci_seg->id, in iommu_init_pci()
2028 PCI_BUS_NUM(iommu->devid), in iommu_init_pci()
2029 iommu->devid & 0xff); in iommu_init_pci()
2030 if (!iommu->dev) in iommu_init_pci()
2033 /* Prevent binding other PCI device drivers to IOMMU devices */ in iommu_init_pci()
2034 iommu->dev->match_driver = false; in iommu_init_pci()
2036 /* ACPI _PRT won't have an IRQ for IOMMU */ in iommu_init_pci()
2037 iommu->dev->irq_managed = 1; in iommu_init_pci()
2039 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET, in iommu_init_pci()
2040 &iommu->cap); in iommu_init_pci()
2042 if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB))) in iommu_init_pci()
2045 late_iommu_features_init(iommu); in iommu_init_pci()
2052 iommu->iommu.max_pasids = (1 << (pasmax + 1)) - 1; in iommu_init_pci()
2054 BUG_ON(iommu->iommu.max_pasids & ~PASID_MASK); in iommu_init_pci()
2063 iommu_enable_gt(iommu); in iommu_init_pci()
2066 if (check_feature(FEATURE_PPR) && amd_iommu_alloc_ppr_log(iommu)) in iommu_init_pci()
2069 if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE)) { in iommu_init_pci()
2075 init_iommu_perf_ctr(iommu); in iommu_init_pci()
2077 if (is_rd890_iommu(iommu->dev)) { in iommu_init_pci()
2080 iommu->root_pdev = in iommu_init_pci()
2081 pci_get_domain_bus_and_slot(iommu->pci_seg->id, in iommu_init_pci()
2082 iommu->dev->bus->number, in iommu_init_pci()
2090 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 4, in iommu_init_pci()
2091 &iommu->stored_addr_lo); in iommu_init_pci()
2092 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 8, in iommu_init_pci()
2093 &iommu->stored_addr_hi); in iommu_init_pci()
2096 iommu->stored_addr_lo &= ~1; in iommu_init_pci()
2100 iommu->stored_l1[i][j] = iommu_read_l1(iommu, i, j); in iommu_init_pci()
2103 iommu->stored_l2[i] = iommu_read_l2(iommu, i); in iommu_init_pci()
2106 amd_iommu_erratum_746_workaround(iommu); in iommu_init_pci()
2107 amd_iommu_ats_write_check_workaround(iommu); in iommu_init_pci()
2109 ret = iommu_device_sysfs_add(&iommu->iommu, &iommu->dev->dev, in iommu_init_pci()
2110 amd_iommu_groups, "ivhd%d", iommu->index); in iommu_init_pci()
2115 * Allocate per IOMMU IOPF queue here so that in attach device path, in iommu_init_pci()
2119 ret = amd_iommu_iopf_init(iommu); in iommu_init_pci()
2124 iommu_device_register(&iommu->iommu, &amd_iommu_ops, NULL); in iommu_init_pci()
2126 return pci_enable_device(iommu->dev); in iommu_init_pci()
2167 struct amd_iommu *iommu; in amd_iommu_init_pci() local
2171 /* Init global identity domain before registering IOMMU */ in amd_iommu_init_pci()
2174 for_each_iommu(iommu) { in amd_iommu_init_pci()
2175 ret = iommu_init_pci(iommu); in amd_iommu_init_pci()
2177 pr_err("IOMMU%d: Failed to initialize IOMMU Hardware (error=%d)!\n", in amd_iommu_init_pci()
2178 iommu->index, ret); in amd_iommu_init_pci()
2182 iommu_set_cwwb_range(iommu); in amd_iommu_init_pci()
2198 for_each_iommu(iommu) in amd_iommu_init_pci()
2199 amd_iommu_flush_all_caches(iommu); in amd_iommu_init_pci()
2216 static int iommu_setup_msi(struct amd_iommu *iommu) in iommu_setup_msi() argument
2220 r = pci_enable_msi(iommu->dev); in iommu_setup_msi()
2224 r = request_threaded_irq(iommu->dev->irq, in iommu_setup_msi()
2228 iommu); in iommu_setup_msi()
2231 pci_disable_msi(iommu->dev); in iommu_setup_msi()
2300 struct amd_iommu *iommu = irqd->chip_data; in intcapxt_unmask_irq() local
2310 writeq(xt.capxt, iommu->mmio_base + irqd->hwirq); in intcapxt_unmask_irq()
2315 struct amd_iommu *iommu = irqd->chip_data; in intcapxt_mask_irq() local
2317 writeq(0, iommu->mmio_base + irqd->hwirq); in intcapxt_mask_irq()
2339 .name = "IOMMU-MSI",
2380 static int __iommu_setup_intcapxt(struct amd_iommu *iommu, const char *devname, in __iommu_setup_intcapxt() argument
2386 int node = dev_to_node(&iommu->dev->dev); in __iommu_setup_intcapxt()
2394 info.data = iommu; in __iommu_setup_intcapxt()
2404 thread_fn, 0, devname, iommu); in __iommu_setup_intcapxt()
2414 static int iommu_setup_intcapxt(struct amd_iommu *iommu) in iommu_setup_intcapxt() argument
2418 snprintf(iommu->evt_irq_name, sizeof(iommu->evt_irq_name), in iommu_setup_intcapxt()
2419 "AMD-Vi%d-Evt", iommu->index); in iommu_setup_intcapxt()
2420 ret = __iommu_setup_intcapxt(iommu, iommu->evt_irq_name, in iommu_setup_intcapxt()
2426 snprintf(iommu->ppr_irq_name, sizeof(iommu->ppr_irq_name), in iommu_setup_intcapxt()
2427 "AMD-Vi%d-PPR", iommu->index); in iommu_setup_intcapxt()
2428 ret = __iommu_setup_intcapxt(iommu, iommu->ppr_irq_name, in iommu_setup_intcapxt()
2435 snprintf(iommu->ga_irq_name, sizeof(iommu->ga_irq_name), in iommu_setup_intcapxt()
2436 "AMD-Vi%d-GA", iommu->index); in iommu_setup_intcapxt()
2437 ret = __iommu_setup_intcapxt(iommu, iommu->ga_irq_name, in iommu_setup_intcapxt()
2445 static int iommu_init_irq(struct amd_iommu *iommu) in iommu_init_irq() argument
2449 if (iommu->int_enabled) in iommu_init_irq()
2453 ret = iommu_setup_intcapxt(iommu); in iommu_init_irq()
2454 else if (iommu->dev->msi_cap) in iommu_init_irq()
2455 ret = iommu_setup_msi(iommu); in iommu_init_irq()
2462 iommu->int_enabled = true; in iommu_init_irq()
2466 iommu_feature_enable(iommu, CONTROL_INTCAPXT_EN); in iommu_init_irq()
2468 iommu_feature_enable(iommu, CONTROL_EVT_INT_EN); in iommu_init_irq()
2623 static void iommu_init_flags(struct amd_iommu *iommu) in iommu_init_flags() argument
2625 iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ? in iommu_init_flags()
2626 iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) : in iommu_init_flags()
2627 iommu_feature_disable(iommu, CONTROL_HT_TUN_EN); in iommu_init_flags()
2629 iommu->acpi_flags & IVHD_FLAG_PASSPW_EN_MASK ? in iommu_init_flags()
2630 iommu_feature_enable(iommu, CONTROL_PASSPW_EN) : in iommu_init_flags()
2631 iommu_feature_disable(iommu, CONTROL_PASSPW_EN); in iommu_init_flags()
2633 iommu->acpi_flags & IVHD_FLAG_RESPASSPW_EN_MASK ? in iommu_init_flags()
2634 iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) : in iommu_init_flags()
2635 iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN); in iommu_init_flags()
2637 iommu->acpi_flags & IVHD_FLAG_ISOC_EN_MASK ? in iommu_init_flags()
2638 iommu_feature_enable(iommu, CONTROL_ISOC_EN) : in iommu_init_flags()
2639 iommu_feature_disable(iommu, CONTROL_ISOC_EN); in iommu_init_flags()
2642 * make IOMMU memory accesses cache coherent in iommu_init_flags()
2644 iommu_feature_enable(iommu, CONTROL_COHERENT_EN); in iommu_init_flags()
2647 iommu_feature_set(iommu, CTRL_INV_TO_1S, CTRL_INV_TO_MASK, CONTROL_INV_TIMEOUT); in iommu_init_flags()
2651 iommu_feature_enable(iommu, CONTROL_EPH_EN); in iommu_init_flags()
2654 static void iommu_apply_resume_quirks(struct amd_iommu *iommu) in iommu_apply_resume_quirks() argument
2658 struct pci_dev *pdev = iommu->root_pdev; in iommu_apply_resume_quirks()
2660 /* RD890 BIOSes may not have completely reconfigured the iommu */ in iommu_apply_resume_quirks()
2661 if (!is_rd890_iommu(iommu->dev) || !pdev) in iommu_apply_resume_quirks()
2665 * First, we need to ensure that the iommu is enabled. This is in iommu_apply_resume_quirks()
2673 /* Enable the iommu */ in iommu_apply_resume_quirks()
2677 /* Restore the iommu BAR */ in iommu_apply_resume_quirks()
2678 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4, in iommu_apply_resume_quirks()
2679 iommu->stored_addr_lo); in iommu_apply_resume_quirks()
2680 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 8, in iommu_apply_resume_quirks()
2681 iommu->stored_addr_hi); in iommu_apply_resume_quirks()
2686 iommu_write_l1(iommu, i, j, iommu->stored_l1[i][j]); in iommu_apply_resume_quirks()
2690 iommu_write_l2(iommu, i, iommu->stored_l2[i]); in iommu_apply_resume_quirks()
2693 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4, in iommu_apply_resume_quirks()
2694 iommu->stored_addr_lo | 1); in iommu_apply_resume_quirks()
2697 static void iommu_enable_ga(struct amd_iommu *iommu) in iommu_enable_ga() argument
2703 iommu_feature_enable(iommu, CONTROL_GA_EN); in iommu_enable_ga()
2704 iommu->irte_ops = &irte_128_ops; in iommu_enable_ga()
2707 iommu->irte_ops = &irte_32_ops; in iommu_enable_ga()
2713 static void iommu_disable_irtcachedis(struct amd_iommu *iommu) in iommu_disable_irtcachedis() argument
2715 iommu_feature_disable(iommu, CONTROL_IRTCACHEDIS); in iommu_disable_irtcachedis()
2718 static void iommu_enable_irtcachedis(struct amd_iommu *iommu) in iommu_enable_irtcachedis() argument
2730 iommu_feature_enable(iommu, CONTROL_IRTCACHEDIS); in iommu_enable_irtcachedis()
2731 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET); in iommu_enable_irtcachedis()
2734 iommu->irtcachedis_enabled = true; in iommu_enable_irtcachedis()
2735 pr_info("iommu%d (%#06x) : IRT cache is %s\n", in iommu_enable_irtcachedis()
2736 iommu->index, iommu->devid, in iommu_enable_irtcachedis()
2737 iommu->irtcachedis_enabled ? "disabled" : "enabled"); in iommu_enable_irtcachedis()
2740 static void iommu_enable_2k_int(struct amd_iommu *iommu) in iommu_enable_2k_int() argument
2745 iommu_feature_set(iommu, in iommu_enable_2k_int()
2751 static void early_enable_iommu(struct amd_iommu *iommu) in early_enable_iommu() argument
2753 iommu_disable(iommu); in early_enable_iommu()
2754 iommu_init_flags(iommu); in early_enable_iommu()
2755 iommu_set_device_table(iommu); in early_enable_iommu()
2756 iommu_enable_command_buffer(iommu); in early_enable_iommu()
2757 iommu_enable_event_buffer(iommu); in early_enable_iommu()
2758 iommu_set_exclusion_range(iommu); in early_enable_iommu()
2759 iommu_enable_gt(iommu); in early_enable_iommu()
2760 iommu_enable_ga(iommu); in early_enable_iommu()
2761 iommu_enable_xt(iommu); in early_enable_iommu()
2762 iommu_enable_irtcachedis(iommu); in early_enable_iommu()
2763 iommu_enable_2k_int(iommu); in early_enable_iommu()
2764 iommu_enable(iommu); in early_enable_iommu()
2765 amd_iommu_flush_all_caches(iommu); in early_enable_iommu()
2778 struct amd_iommu *iommu; in early_enable_iommus() local
2798 for_each_iommu(iommu) { in early_enable_iommus()
2799 clear_translation_pre_enabled(iommu); in early_enable_iommus()
2800 early_enable_iommu(iommu); in early_enable_iommus()
2811 for_each_iommu(iommu) { in early_enable_iommus()
2812 iommu_disable_command_buffer(iommu); in early_enable_iommus()
2813 iommu_disable_event_buffer(iommu); in early_enable_iommus()
2814 iommu_disable_irtcachedis(iommu); in early_enable_iommus()
2815 iommu_enable_command_buffer(iommu); in early_enable_iommus()
2816 iommu_enable_event_buffer(iommu); in early_enable_iommus()
2817 iommu_enable_ga(iommu); in early_enable_iommus()
2818 iommu_enable_xt(iommu); in early_enable_iommus()
2819 iommu_enable_irtcachedis(iommu); in early_enable_iommus()
2820 iommu_enable_2k_int(iommu); in early_enable_iommus()
2821 iommu_set_device_table(iommu); in early_enable_iommus()
2822 amd_iommu_flush_all_caches(iommu); in early_enable_iommus()
2829 struct amd_iommu *iommu; in enable_iommus_ppr() local
2834 for_each_iommu(iommu) in enable_iommus_ppr()
2835 amd_iommu_enable_ppr_log(iommu); in enable_iommus_ppr()
2842 struct amd_iommu *iommu; in enable_iommus_vapic() local
2844 for_each_iommu(iommu) { in enable_iommus_vapic()
2849 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); in enable_iommus_vapic()
2853 iommu_feature_disable(iommu, CONTROL_GALOG_EN); in enable_iommus_vapic()
2854 iommu_feature_disable(iommu, CONTROL_GAINT_EN); in enable_iommus_vapic()
2861 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); in enable_iommus_vapic()
2885 for_each_iommu(iommu) { in enable_iommus_vapic()
2886 if (iommu_init_ga_log(iommu) || in enable_iommus_vapic()
2887 iommu_ga_log_enable(iommu)) in enable_iommus_vapic()
2890 iommu_feature_enable(iommu, CONTROL_GAM_EN); in enable_iommus_vapic()
2892 iommu_feature_enable(iommu, CONTROL_SNPAVIC_EN); in enable_iommus_vapic()
2902 struct amd_iommu *iommu; in disable_iommus() local
2904 for_each_iommu(iommu) in disable_iommus()
2905 iommu_disable(iommu); in disable_iommus()
2920 struct amd_iommu *iommu; in amd_iommu_resume() local
2922 for_each_iommu(iommu) in amd_iommu_resume()
2923 iommu_apply_resume_quirks(iommu); in amd_iommu_resume()
2926 for_each_iommu(iommu) in amd_iommu_resume()
2927 early_enable_iommu(iommu); in amd_iommu_resume()
3016 * This is the hardware init function for AMD IOMMU in the system.
3020 * This function basically parses the ACPI table for AMD IOMMU (IVRS)
3130 struct amd_iommu *iommu; in amd_iommu_enable_interrupts() local
3133 for_each_iommu(iommu) { in amd_iommu_enable_interrupts()
3134 ret = iommu_init_irq(iommu); in amd_iommu_enable_interrupts()
3170 /* Don't use IOMMU if there is Stoney Ridge graphics */ in detect_ivrs()
3176 pr_info("Disable IOMMU on Stoney Ridge\n"); in detect_ivrs()
3194 * The SNP support requires that IOMMU must be enabled, and is in iommu_snp_enable()
3198 pr_warn("SNP: IOMMU disabled or configured in passthrough mode, SNP cannot be supported.\n"); in iommu_snp_enable()
3203 pr_warn("SNP: IOMMU is configured with V2 page table mode, SNP cannot be supported.\n"); in iommu_snp_enable()
3209 pr_warn("SNP: IOMMU SNP feature not enabled, SNP cannot be supported.\n"); in iommu_snp_enable()
3214 * Enable host SNP support once SNP support is checked on IOMMU. in iommu_snp_enable()
3221 pr_info("IOMMU SNP support enabled.\n"); in iommu_snp_enable()
3231 * AMD IOMMU Initialization State Machine
3295 struct amd_iommu *iommu; in state_next() local
3301 for_each_iommu(iommu) in state_next()
3302 amd_iommu_flush_all_caches(iommu); in state_next()
3384 * This is the core init function for AMD IOMMU hardware in the system.
3390 struct amd_iommu *iommu; in amd_iommu_init() local
3397 * We failed to initialize the AMD IOMMU - try fallback in amd_iommu_init()
3404 for_each_iommu(iommu) in amd_iommu_init()
3405 amd_iommu_debugfs_setup(iommu); in amd_iommu_init()
3424 pr_notice("IOMMU not currently supported when SME is active\n"); in amd_iommu_sme_check()
3431 * Early detect code. This code runs at IOMMU detection time in the DMA
3452 x86_init.iommu.iommu_init = amd_iommu_init; in amd_iommu_detect()
3462 * Parsing functions for the AMD IOMMU specific kernel command line
3496 pr_warn("amd_iommu=fullflush deprecated; use iommu.strict=1 instead\n"); in parse_amd_iommu_options()
3693 /* CPU page table size should match IOMMU guest page table size */ in amd_iommu_pasid_supported()
3709 struct amd_iommu *iommu; in get_amd_iommu() local
3711 for_each_iommu(iommu) in get_amd_iommu()
3713 return iommu; in get_amd_iommu()
3719 * IOMMU EFR Performance Counter support functionality. This code allows
3720 * access to the IOMMU PC functionality.
3726 struct amd_iommu *iommu = get_amd_iommu(idx); in amd_iommu_pc_get_max_banks() local
3728 if (iommu) in amd_iommu_pc_get_max_banks()
3729 return iommu->max_banks; in amd_iommu_pc_get_max_banks()
3741 struct amd_iommu *iommu = get_amd_iommu(idx); in amd_iommu_pc_get_max_counters() local
3743 if (iommu) in amd_iommu_pc_get_max_counters()
3744 return iommu->max_counters; in amd_iommu_pc_get_max_counters()
3749 static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, in iommu_pc_get_set_reg() argument
3755 /* Make sure the IOMMU PC resource is available */ in iommu_pc_get_set_reg()
3759 /* Check for valid iommu and pc register indexing */ in iommu_pc_get_set_reg()
3760 if (WARN_ON(!iommu || (fxn > 0x28) || (fxn & 7))) in iommu_pc_get_set_reg()
3766 max_offset_lim = (u32)(((0x40 | iommu->max_banks) << 12) | in iommu_pc_get_set_reg()
3767 (iommu->max_counters << 8) | 0x28); in iommu_pc_get_set_reg()
3775 writel((u32)val, iommu->mmio_base + offset); in iommu_pc_get_set_reg()
3776 writel((val >> 32), iommu->mmio_base + offset + 4); in iommu_pc_get_set_reg()
3778 *value = readl(iommu->mmio_base + offset + 4); in iommu_pc_get_set_reg()
3780 *value |= readl(iommu->mmio_base + offset); in iommu_pc_get_set_reg()
3787 int amd_iommu_pc_get_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value) in amd_iommu_pc_get_reg() argument
3789 if (!iommu) in amd_iommu_pc_get_reg()
3792 return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, false); in amd_iommu_pc_get_reg()
3795 int amd_iommu_pc_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value) in amd_iommu_pc_set_reg() argument
3797 if (!iommu) in amd_iommu_pc_set_reg()
3800 return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, true); in amd_iommu_pc_set_reg()
3818 pr_warn("IOMMU PFN %lx RMP lookup failed, ret %d\n", pfn, ret); in iommu_page_make_shared()
3823 pr_warn("IOMMU PFN %lx not assigned in RMP table\n", pfn); in iommu_page_make_shared()
3832 pr_warn("PSMASH failed for IOMMU PFN %lx huge RMP entry, ret: %d, level: %d\n", in iommu_page_make_shared()
3861 struct amd_iommu *iommu; in amd_iommu_snp_disable() local
3867 for_each_iommu(iommu) { in amd_iommu_snp_disable()
3868 ret = iommu_make_shared(iommu->evt_buf, EVT_BUFFER_SIZE); in amd_iommu_snp_disable()
3872 ret = iommu_make_shared(iommu->ppr_log, PPR_LOG_SIZE); in amd_iommu_snp_disable()
3876 ret = iommu_make_shared((void *)iommu->cmd_sem, PAGE_SIZE); in amd_iommu_snp_disable()