Lines Matching full:iommu

27 #include <linux/amd-iommu.h>
30 #include <asm/iommu.h>
82 * structure describing one IOMMU in the ACPI table. Typically followed by one
98 * A device entry describing which devices a specific IOMMU translates and
109 * An AMD IOMMU memory definition structure. It defines things like exclusion
172 * mappings of the PCI device ids to the actual requestor ids on the IOMMU.
178 * The rlookup table is used to find the IOMMU which is responsible
184 * AMD IOMMU allows up to 2^16 differend protection domains. This is a bitmap
195 * the IOMMU used by this driver.
197 extern void iommu_flush_all_caches(struct amd_iommu *iommu);
215 static u32 iommu_read_l1(struct amd_iommu *iommu, u16 l1, u8 address) in iommu_read_l1() argument
219 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16)); in iommu_read_l1()
220 pci_read_config_dword(iommu->dev, 0xfc, &val); in iommu_read_l1()
224 static void iommu_write_l1(struct amd_iommu *iommu, u16 l1, u8 address, u32 val) in iommu_write_l1() argument
226 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16 | 1 << 31)); in iommu_write_l1()
227 pci_write_config_dword(iommu->dev, 0xfc, val); in iommu_write_l1()
228 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16)); in iommu_write_l1()
231 static u32 iommu_read_l2(struct amd_iommu *iommu, u8 address) in iommu_read_l2() argument
235 pci_write_config_dword(iommu->dev, 0xf0, address); in iommu_read_l2()
236 pci_read_config_dword(iommu->dev, 0xf4, &val); in iommu_read_l2()
240 static void iommu_write_l2(struct amd_iommu *iommu, u8 address, u32 val) in iommu_write_l2() argument
242 pci_write_config_dword(iommu->dev, 0xf0, (address | 1 << 8)); in iommu_write_l2()
243 pci_write_config_dword(iommu->dev, 0xf4, val); in iommu_write_l2()
248 * AMD IOMMU MMIO register space handling functions
250 * These functions are used to program the IOMMU device registers in
256 * This function set the exclusion range in the IOMMU. DMA accesses to the
259 static void iommu_set_exclusion_range(struct amd_iommu *iommu) in iommu_set_exclusion_range() argument
261 u64 start = iommu->exclusion_start & PAGE_MASK; in iommu_set_exclusion_range()
262 u64 limit = (start + iommu->exclusion_length) & PAGE_MASK; in iommu_set_exclusion_range()
265 if (!iommu->exclusion_start) in iommu_set_exclusion_range()
269 memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET, in iommu_set_exclusion_range()
273 memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET, in iommu_set_exclusion_range()
277 /* Programs the physical address of the device table into the IOMMU hardware */
278 static void iommu_set_device_table(struct amd_iommu *iommu) in iommu_set_device_table() argument
282 BUG_ON(iommu->mmio_base == NULL); in iommu_set_device_table()
286 memcpy_toio(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET, in iommu_set_device_table()
290 /* Generic functions to enable/disable certain features of the IOMMU. */
291 static void iommu_feature_enable(struct amd_iommu *iommu, u8 bit) in iommu_feature_enable() argument
295 ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET); in iommu_feature_enable()
297 writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET); in iommu_feature_enable()
300 static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit) in iommu_feature_disable() argument
304 ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET); in iommu_feature_disable()
306 writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET); in iommu_feature_disable()
309 static void iommu_set_inv_tlb_timeout(struct amd_iommu *iommu, int timeout) in iommu_set_inv_tlb_timeout() argument
313 ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET); in iommu_set_inv_tlb_timeout()
316 writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET); in iommu_set_inv_tlb_timeout()
320 static void iommu_enable(struct amd_iommu *iommu) in iommu_enable() argument
328 printk(KERN_INFO "AMD-Vi: Enabling IOMMU at %s cap 0x%hx", in iommu_enable()
329 dev_name(&iommu->dev->dev), iommu->cap_ptr); in iommu_enable()
331 if (iommu->cap & (1 << IOMMU_CAP_EFR)) { in iommu_enable()
334 if (iommu_feature(iommu, (1ULL << i))) in iommu_enable()
339 iommu_feature_enable(iommu, CONTROL_IOMMU_EN); in iommu_enable()
342 static void iommu_disable(struct amd_iommu *iommu) in iommu_disable() argument
345 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN); in iommu_disable()
348 iommu_feature_disable(iommu, CONTROL_EVT_INT_EN); in iommu_disable()
349 iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN); in iommu_disable()
351 /* Disable IOMMU hardware itself */ in iommu_disable()
352 iommu_feature_disable(iommu, CONTROL_IOMMU_EN); in iommu_disable()
356 * mapping and unmapping functions for the IOMMU MMIO space. Each AMD IOMMU in
379 static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu) in iommu_unmap_mmio_space() argument
381 if (iommu->mmio_base) in iommu_unmap_mmio_space()
382 iounmap(iommu->mmio_base); in iommu_unmap_mmio_space()
383 release_mem_region(iommu->mmio_phys, MMIO_REGION_LENGTH); in iommu_unmap_mmio_space()
388 * The functions below belong to the first pass of AMD IOMMU ACPI table
404 * This function reads the last device id the IOMMU has to handle from the PCI
405 * capability header for this IOMMU
418 * After reading the highest device id from the IOMMU PCI capability header
500 * the second time. In this ACPI parsing iteration we allocate IOMMU specific
507 * Allocates the command buffer. This buffer is per AMD IOMMU. We can
508 * write commands to that buffer later and the IOMMU will execute them
511 static u8 * __init alloc_command_buffer(struct amd_iommu *iommu) in alloc_command_buffer() argument
519 iommu->cmd_buf_size = CMD_BUFFER_SIZE | CMD_BUFFER_UNINITIALIZED; in alloc_command_buffer()
525 * This function resets the command buffer if the IOMMU stopped fetching
528 void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu) in amd_iommu_reset_cmd_buffer() argument
530 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN); in amd_iommu_reset_cmd_buffer()
532 writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET); in amd_iommu_reset_cmd_buffer()
533 writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); in amd_iommu_reset_cmd_buffer()
535 iommu_feature_enable(iommu, CONTROL_CMDBUF_EN); in amd_iommu_reset_cmd_buffer()
542 static void iommu_enable_command_buffer(struct amd_iommu *iommu) in iommu_enable_command_buffer() argument
546 BUG_ON(iommu->cmd_buf == NULL); in iommu_enable_command_buffer()
548 entry = (u64)virt_to_phys(iommu->cmd_buf); in iommu_enable_command_buffer()
551 memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET, in iommu_enable_command_buffer()
554 amd_iommu_reset_cmd_buffer(iommu); in iommu_enable_command_buffer()
555 iommu->cmd_buf_size &= ~(CMD_BUFFER_UNINITIALIZED); in iommu_enable_command_buffer()
558 static void __init free_command_buffer(struct amd_iommu *iommu) in free_command_buffer() argument
560 free_pages((unsigned long)iommu->cmd_buf, in free_command_buffer()
561 get_order(iommu->cmd_buf_size & ~(CMD_BUFFER_UNINITIALIZED))); in free_command_buffer()
564 /* allocates the memory where the IOMMU will log its events to */
565 static u8 * __init alloc_event_buffer(struct amd_iommu *iommu) in alloc_event_buffer() argument
567 iommu->evt_buf = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, in alloc_event_buffer()
570 if (iommu->evt_buf == NULL) in alloc_event_buffer()
573 iommu->evt_buf_size = EVT_BUFFER_SIZE; in alloc_event_buffer()
575 return iommu->evt_buf; in alloc_event_buffer()
578 static void iommu_enable_event_buffer(struct amd_iommu *iommu) in iommu_enable_event_buffer() argument
582 BUG_ON(iommu->evt_buf == NULL); in iommu_enable_event_buffer()
584 entry = (u64)virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK; in iommu_enable_event_buffer()
586 memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET, in iommu_enable_event_buffer()
590 writel(0x00, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET); in iommu_enable_event_buffer()
591 writel(0x00, iommu->mmio_base + MMIO_EVT_TAIL_OFFSET); in iommu_enable_event_buffer()
593 iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN); in iommu_enable_event_buffer()
596 static void __init free_event_buffer(struct amd_iommu *iommu) in free_event_buffer() argument
598 free_pages((unsigned long)iommu->evt_buf, get_order(EVT_BUFFER_SIZE)); in free_event_buffer()
601 /* allocates the memory where the IOMMU will log its events to */
602 static u8 * __init alloc_ppr_log(struct amd_iommu *iommu) in alloc_ppr_log() argument
604 iommu->ppr_log = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, in alloc_ppr_log()
607 if (iommu->ppr_log == NULL) in alloc_ppr_log()
610 return iommu->ppr_log; in alloc_ppr_log()
613 static void iommu_enable_ppr_log(struct amd_iommu *iommu) in iommu_enable_ppr_log() argument
617 if (iommu->ppr_log == NULL) in iommu_enable_ppr_log()
620 entry = (u64)virt_to_phys(iommu->ppr_log) | PPR_LOG_SIZE_512; in iommu_enable_ppr_log()
622 memcpy_toio(iommu->mmio_base + MMIO_PPR_LOG_OFFSET, in iommu_enable_ppr_log()
626 writel(0x00, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); in iommu_enable_ppr_log()
627 writel(0x00, iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); in iommu_enable_ppr_log()
629 iommu_feature_enable(iommu, CONTROL_PPFLOG_EN); in iommu_enable_ppr_log()
630 iommu_feature_enable(iommu, CONTROL_PPR_EN); in iommu_enable_ppr_log()
633 static void __init free_ppr_log(struct amd_iommu *iommu) in free_ppr_log() argument
635 if (iommu->ppr_log == NULL) in free_ppr_log()
638 free_pages((unsigned long)iommu->ppr_log, get_order(PPR_LOG_SIZE)); in free_ppr_log()
641 static void iommu_enable_gt(struct amd_iommu *iommu) in iommu_enable_gt() argument
643 if (!iommu_feature(iommu, FEATURE_GT)) in iommu_enable_gt()
646 iommu_feature_enable(iommu, CONTROL_GT_EN); in iommu_enable_gt()
678 /* Writes the specific IOMMU for a device into the rlookup table */
679 static void __init set_iommu_for_device(struct amd_iommu *iommu, u16 devid) in set_iommu_for_device() argument
681 amd_iommu_rlookup_table[devid] = iommu; in set_iommu_for_device()
688 static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu, in set_dev_entry_from_acpi() argument
708 set_iommu_for_device(iommu, devid); in set_dev_entry_from_acpi()
712 * Reads the device exclusion range from ACPI and initialize IOMMU with
717 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid]; in set_device_exclusion_range() local
722 if (iommu) { in set_device_exclusion_range()
724 * We only can configure exclusion ranges per IOMMU, not in set_device_exclusion_range()
729 iommu->exclusion_start = m->range_start; in set_device_exclusion_range()
730 iommu->exclusion_length = m->range_length; in set_device_exclusion_range()
735 * This function reads some important data from the IOMMU PCI space and
739 static void __init init_iommu_from_pci(struct amd_iommu *iommu) in init_iommu_from_pci() argument
741 int cap_ptr = iommu->cap_ptr; in init_iommu_from_pci()
745 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET, in init_iommu_from_pci()
746 &iommu->cap); in init_iommu_from_pci()
747 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_RANGE_OFFSET, in init_iommu_from_pci()
749 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_MISC_OFFSET, in init_iommu_from_pci()
752 iommu->first_device = calc_devid(MMIO_GET_BUS(range), in init_iommu_from_pci()
754 iommu->last_device = calc_devid(MMIO_GET_BUS(range), in init_iommu_from_pci()
756 iommu->evt_msi_num = MMIO_MSI_NUM(misc); in init_iommu_from_pci()
758 if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB))) in init_iommu_from_pci()
762 low = readl(iommu->mmio_base + MMIO_EXT_FEATURES); in init_iommu_from_pci()
763 high = readl(iommu->mmio_base + MMIO_EXT_FEATURES + 4); in init_iommu_from_pci()
765 iommu->features = ((u64)high << 32) | low; in init_iommu_from_pci()
767 if (iommu_feature(iommu, FEATURE_GT)) { in init_iommu_from_pci()
772 shift = iommu->features & FEATURE_PASID_MASK; in init_iommu_from_pci()
778 glxval = iommu->features & FEATURE_GLXVAL_MASK; in init_iommu_from_pci()
787 if (iommu_feature(iommu, FEATURE_GT) && in init_iommu_from_pci()
788 iommu_feature(iommu, FEATURE_PPR)) { in init_iommu_from_pci()
789 iommu->is_iommu_v2 = true; in init_iommu_from_pci()
793 if (!is_rd890_iommu(iommu->dev)) in init_iommu_from_pci()
802 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 4, in init_iommu_from_pci()
803 &iommu->stored_addr_lo); in init_iommu_from_pci()
804 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 8, in init_iommu_from_pci()
805 &iommu->stored_addr_hi); in init_iommu_from_pci()
808 iommu->stored_addr_lo &= ~1; in init_iommu_from_pci()
812 iommu->stored_l1[i][j] = iommu_read_l1(iommu, i, j); in init_iommu_from_pci()
815 iommu->stored_l2[i] = iommu_read_l2(iommu, i); in init_iommu_from_pci()
819 * Takes a pointer to an AMD IOMMU entry in the ACPI table and
822 static void __init init_iommu_from_acpi(struct amd_iommu *iommu, in init_iommu_from_acpi() argument
835 iommu->acpi_flags = h->flags; in init_iommu_from_acpi()
851 PCI_BUS(iommu->first_device), in init_iommu_from_acpi()
852 PCI_SLOT(iommu->first_device), in init_iommu_from_acpi()
853 PCI_FUNC(iommu->first_device), in init_iommu_from_acpi()
854 PCI_BUS(iommu->last_device), in init_iommu_from_acpi()
855 PCI_SLOT(iommu->last_device), in init_iommu_from_acpi()
856 PCI_FUNC(iommu->last_device), in init_iommu_from_acpi()
859 for (dev_i = iommu->first_device; in init_iommu_from_acpi()
860 dev_i <= iommu->last_device; ++dev_i) in init_iommu_from_acpi()
861 set_dev_entry_from_acpi(iommu, dev_i, in init_iommu_from_acpi()
874 set_dev_entry_from_acpi(iommu, devid, e->flags, 0); in init_iommu_from_acpi()
904 set_dev_entry_from_acpi(iommu, devid , e->flags, 0); in init_iommu_from_acpi()
905 set_dev_entry_from_acpi(iommu, devid_to, e->flags, 0); in init_iommu_from_acpi()
937 set_dev_entry_from_acpi(iommu, devid, e->flags, in init_iommu_from_acpi()
965 set_dev_entry_from_acpi(iommu, in init_iommu_from_acpi()
968 set_dev_entry_from_acpi(iommu, dev_i, in init_iommu_from_acpi()
980 /* Initializes the device->iommu mapping for the driver */
981 static int __init init_iommu_devices(struct amd_iommu *iommu) in init_iommu_devices() argument
985 for (i = iommu->first_device; i <= iommu->last_device; ++i) in init_iommu_devices()
986 set_iommu_for_device(iommu, i); in init_iommu_devices()
991 static void __init free_iommu_one(struct amd_iommu *iommu) in free_iommu_one() argument
993 free_command_buffer(iommu); in free_iommu_one()
994 free_event_buffer(iommu); in free_iommu_one()
995 free_ppr_log(iommu); in free_iommu_one()
996 iommu_unmap_mmio_space(iommu); in free_iommu_one()
1001 struct amd_iommu *iommu, *next; in free_iommu_all() local
1003 for_each_iommu_safe(iommu, next) { in free_iommu_all()
1004 list_del(&iommu->list); in free_iommu_all()
1005 free_iommu_one(iommu); in free_iommu_all()
1006 kfree(iommu); in free_iommu_all()
1011 * This function clues the initialization function for one IOMMU
1013 * hardware. It does NOT enable the IOMMU. This is done afterwards.
1015 static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h) in init_iommu_one() argument
1017 spin_lock_init(&iommu->lock); in init_iommu_one()
1019 /* Add IOMMU to internal data structures */ in init_iommu_one()
1020 list_add_tail(&iommu->list, &amd_iommu_list); in init_iommu_one()
1021 iommu->index = amd_iommus_present++; in init_iommu_one()
1023 if (unlikely(iommu->index >= MAX_IOMMUS)) { in init_iommu_one()
1028 /* Index is fine - add IOMMU to the array */ in init_iommu_one()
1029 amd_iommus[iommu->index] = iommu; in init_iommu_one()
1032 * Copy data from ACPI table entry to the iommu struct in init_iommu_one()
1034 iommu->dev = pci_get_bus_and_slot(PCI_BUS(h->devid), h->devid & 0xff); in init_iommu_one()
1035 if (!iommu->dev) in init_iommu_one()
1038 iommu->cap_ptr = h->cap_ptr; in init_iommu_one()
1039 iommu->pci_seg = h->pci_seg; in init_iommu_one()
1040 iommu->mmio_phys = h->mmio_phys; in init_iommu_one()
1041 iommu->mmio_base = iommu_map_mmio_space(h->mmio_phys); in init_iommu_one()
1042 if (!iommu->mmio_base) in init_iommu_one()
1045 iommu->cmd_buf = alloc_command_buffer(iommu); in init_iommu_one()
1046 if (!iommu->cmd_buf) in init_iommu_one()
1049 iommu->evt_buf = alloc_event_buffer(iommu); in init_iommu_one()
1050 if (!iommu->evt_buf) in init_iommu_one()
1053 iommu->int_enabled = false; in init_iommu_one()
1055 init_iommu_from_pci(iommu); in init_iommu_one()
1056 init_iommu_from_acpi(iommu, h); in init_iommu_one()
1057 init_iommu_devices(iommu); in init_iommu_one()
1059 if (iommu_feature(iommu, FEATURE_PPR)) { in init_iommu_one()
1060 iommu->ppr_log = alloc_ppr_log(iommu); in init_iommu_one()
1061 if (!iommu->ppr_log) in init_iommu_one()
1065 if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE)) in init_iommu_one()
1068 return pci_enable_device(iommu->dev); in init_iommu_one()
1072 * Iterates over all IOMMU entries in the ACPI table, allocates the
1073 * IOMMU structure and initializes it with init_iommu_one()
1079 struct amd_iommu *iommu; in init_iommu_all() local
1098 iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL); in init_iommu_all()
1099 if (iommu == NULL) { in init_iommu_all()
1104 ret = init_iommu_one(iommu, h); in init_iommu_all()
1130 static int iommu_setup_msi(struct amd_iommu *iommu) in iommu_setup_msi() argument
1134 if (pci_enable_msi(iommu->dev)) in iommu_setup_msi()
1137 r = request_threaded_irq(iommu->dev->irq, in iommu_setup_msi()
1141 iommu->dev); in iommu_setup_msi()
1144 pci_disable_msi(iommu->dev); in iommu_setup_msi()
1148 iommu->int_enabled = true; in iommu_setup_msi()
1149 iommu_feature_enable(iommu, CONTROL_EVT_INT_EN); in iommu_setup_msi()
1151 if (iommu->ppr_log != NULL) in iommu_setup_msi()
1152 iommu_feature_enable(iommu, CONTROL_PPFINT_EN); in iommu_setup_msi()
1157 static int iommu_init_msi(struct amd_iommu *iommu) in iommu_init_msi() argument
1159 if (iommu->int_enabled) in iommu_init_msi()
1162 if (pci_find_capability(iommu->dev, PCI_CAP_ID_MSI)) in iommu_init_msi()
1163 return iommu_setup_msi(iommu); in iommu_init_msi()
1291 static void iommu_init_flags(struct amd_iommu *iommu) in iommu_init_flags() argument
1293 iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ? in iommu_init_flags()
1294 iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) : in iommu_init_flags()
1295 iommu_feature_disable(iommu, CONTROL_HT_TUN_EN); in iommu_init_flags()
1297 iommu->acpi_flags & IVHD_FLAG_PASSPW_EN_MASK ? in iommu_init_flags()
1298 iommu_feature_enable(iommu, CONTROL_PASSPW_EN) : in iommu_init_flags()
1299 iommu_feature_disable(iommu, CONTROL_PASSPW_EN); in iommu_init_flags()
1301 iommu->acpi_flags & IVHD_FLAG_RESPASSPW_EN_MASK ? in iommu_init_flags()
1302 iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) : in iommu_init_flags()
1303 iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN); in iommu_init_flags()
1305 iommu->acpi_flags & IVHD_FLAG_ISOC_EN_MASK ? in iommu_init_flags()
1306 iommu_feature_enable(iommu, CONTROL_ISOC_EN) : in iommu_init_flags()
1307 iommu_feature_disable(iommu, CONTROL_ISOC_EN); in iommu_init_flags()
1310 * make IOMMU memory accesses cache coherent in iommu_init_flags()
1312 iommu_feature_enable(iommu, CONTROL_COHERENT_EN); in iommu_init_flags()
1315 iommu_set_inv_tlb_timeout(iommu, CTRL_INV_TO_1S); in iommu_init_flags()
1318 static void iommu_apply_resume_quirks(struct amd_iommu *iommu) in iommu_apply_resume_quirks() argument
1324 /* RD890 BIOSes may not have completely reconfigured the iommu */ in iommu_apply_resume_quirks()
1325 if (!is_rd890_iommu(iommu->dev)) in iommu_apply_resume_quirks()
1329 * First, we need to ensure that the iommu is enabled. This is in iommu_apply_resume_quirks()
1332 pdev = pci_get_bus_and_slot(iommu->dev->bus->number, PCI_DEVFN(0, 0)); in iommu_apply_resume_quirks()
1341 /* Enable the iommu */ in iommu_apply_resume_quirks()
1347 /* Restore the iommu BAR */ in iommu_apply_resume_quirks()
1348 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4, in iommu_apply_resume_quirks()
1349 iommu->stored_addr_lo); in iommu_apply_resume_quirks()
1350 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 8, in iommu_apply_resume_quirks()
1351 iommu->stored_addr_hi); in iommu_apply_resume_quirks()
1356 iommu_write_l1(iommu, i, j, iommu->stored_l1[i][j]); in iommu_apply_resume_quirks()
1360 iommu_write_l2(iommu, i, iommu->stored_l2[i]); in iommu_apply_resume_quirks()
1363 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4, in iommu_apply_resume_quirks()
1364 iommu->stored_addr_lo | 1); in iommu_apply_resume_quirks()
1373 struct amd_iommu *iommu; in enable_iommus() local
1375 for_each_iommu(iommu) { in enable_iommus()
1376 iommu_disable(iommu); in enable_iommus()
1377 iommu_init_flags(iommu); in enable_iommus()
1378 iommu_set_device_table(iommu); in enable_iommus()
1379 iommu_enable_command_buffer(iommu); in enable_iommus()
1380 iommu_enable_event_buffer(iommu); in enable_iommus()
1381 iommu_enable_ppr_log(iommu); in enable_iommus()
1382 iommu_enable_gt(iommu); in enable_iommus()
1383 iommu_set_exclusion_range(iommu); in enable_iommus()
1384 iommu_init_msi(iommu); in enable_iommus()
1385 iommu_enable(iommu); in enable_iommus()
1386 iommu_flush_all_caches(iommu); in enable_iommus()
1392 struct amd_iommu *iommu; in disable_iommus() local
1394 for_each_iommu(iommu) in disable_iommus()
1395 iommu_disable(iommu); in disable_iommus()
1405 struct amd_iommu *iommu; in amd_iommu_resume() local
1407 for_each_iommu(iommu) in amd_iommu_resume()
1408 iommu_apply_resume_quirks(iommu); in amd_iommu_resume()
1428 * This is the core init function for AMD IOMMU hardware in the system.
1432 * This function basically parses the ACPI table for AMD IOMMU (IVRS)
1485 * IOMMU see for that device in amd_iommu_init()
1492 /* IOMMU rlookup table - find the IOMMU for a specific device */ in amd_iommu_init()
1599 * We failed to initialize the AMD IOMMU - try fallback to GART in amd_iommu_init()
1611 * Early detect code. This code runs at IOMMU detection time in the DMA
1632 x86_init.iommu.iommu_init = amd_iommu_init; in amd_iommu_detect()
1643 * Parsing functions for the AMD IOMMU specific kernel command line