Lines Matching full:iommu
27 #include "iommu.h"
28 #include "../dma-iommu.h"
30 #include "../iommu-sva.h"
103 * 2. It maps to each iommu if successful.
104 * 3. Each iommu mapps to this domain if successful.
130 struct intel_iommu *iommu; /* the corresponding iommu */ member
161 static bool translation_pre_enabled(struct intel_iommu *iommu) in translation_pre_enabled() argument
163 return (iommu->flags & VTD_FLAG_TRANS_PRE_ENABLED); in translation_pre_enabled()
166 static void clear_translation_pre_enabled(struct intel_iommu *iommu) in clear_translation_pre_enabled() argument
168 iommu->flags &= ~VTD_FLAG_TRANS_PRE_ENABLED; in clear_translation_pre_enabled()
171 static void init_translation_status(struct intel_iommu *iommu) in init_translation_status() argument
175 gsts = readl(iommu->reg + DMAR_GSTS_REG); in init_translation_status()
177 iommu->flags |= VTD_FLAG_TRANS_PRE_ENABLED; in init_translation_status()
188 pr_info("IOMMU enabled\n"); in intel_iommu_setup()
192 pr_info("IOMMU disabled\n"); in intel_iommu_setup()
197 pr_warn("intel_iommu=forcedac deprecated; use iommu.forcedac instead\n"); in intel_iommu_setup()
200 pr_warn("intel_iommu=strict deprecated; use iommu.strict=1 instead\n"); in intel_iommu_setup()
212 pr_info("Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n"); in intel_iommu_setup()
256 * Calculate the Supported Adjusted Guest Address Widths of an IOMMU.
260 static unsigned long __iommu_calculate_sagaw(struct intel_iommu *iommu) in __iommu_calculate_sagaw() argument
264 fl_sagaw = BIT(2) | (cap_fl5lp_support(iommu->cap) ? BIT(3) : 0); in __iommu_calculate_sagaw()
265 sl_sagaw = cap_sagaw(iommu->cap); in __iommu_calculate_sagaw()
268 if (!sm_supported(iommu) || !ecap_flts(iommu->ecap)) in __iommu_calculate_sagaw()
272 if (!ecap_slts(iommu->ecap)) in __iommu_calculate_sagaw()
278 static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw) in __iommu_calculate_agaw() argument
283 sagaw = __iommu_calculate_sagaw(iommu); in __iommu_calculate_agaw()
293 * Calculate max SAGAW for each iommu.
295 int iommu_calculate_max_sagaw(struct intel_iommu *iommu) in iommu_calculate_max_sagaw() argument
297 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH); in iommu_calculate_max_sagaw()
301 * calculate agaw for each iommu.
305 int iommu_calculate_agaw(struct intel_iommu *iommu) in iommu_calculate_agaw() argument
307 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH); in iommu_calculate_agaw()
310 static bool iommu_paging_structure_coherency(struct intel_iommu *iommu) in iommu_paging_structure_coherency() argument
312 return sm_supported(iommu) ? in iommu_paging_structure_coherency()
313 ecap_smpwc(iommu->ecap) : ecap_coherent(iommu->ecap); in iommu_paging_structure_coherency()
320 struct intel_iommu *iommu; in domain_update_iommu_coherency() local
327 if (!iommu_paging_structure_coherency(info->iommu)) { in domain_update_iommu_coherency()
337 for_each_active_iommu(iommu, drhd) { in domain_update_iommu_coherency()
338 if (!iommu_paging_structure_coherency(iommu)) { in domain_update_iommu_coherency()
350 struct intel_iommu *iommu; in domain_update_iommu_superpage() local
358 for_each_active_iommu(iommu, drhd) { in domain_update_iommu_superpage()
359 if (iommu != skip) { in domain_update_iommu_superpage()
361 if (!cap_fl1gp_support(iommu->cap)) in domain_update_iommu_superpage()
364 mask &= cap_super_page_val(iommu->cap); in domain_update_iommu_superpage()
445 struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus, in iommu_context_addr() argument
448 struct root_entry *root = &iommu->root_entry[bus]; in iommu_context_addr()
456 if (!alloc && context_copied(iommu, bus, devfn)) in iommu_context_addr()
460 if (sm_supported(iommu)) { in iommu_context_addr()
474 context = alloc_pgtable_page(iommu->node, GFP_ATOMIC); in iommu_context_addr()
478 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE); in iommu_context_addr()
481 __iommu_flush_cache(iommu, entry, sizeof(*entry)); in iommu_context_addr()
519 /* We know that this device on this chipset has its own IOMMU. in quirk_ioat_snb_local_iommu()
520 * If we find it under a different IOMMU, then the BIOS is lying in quirk_ioat_snb_local_iommu()
521 * to us. Hope that the IOMMU for this device is actually in quirk_ioat_snb_local_iommu()
532 /* we know that the this iommu should be at offset 0xa000 from vtbar */ in quirk_ioat_snb_local_iommu()
543 static bool iommu_is_dummy(struct intel_iommu *iommu, struct device *dev) in iommu_is_dummy() argument
545 if (!iommu || iommu->drhd->ignored) in iommu_is_dummy()
564 struct intel_iommu *iommu; in device_lookup_iommu() local
578 * the PF instead to find the IOMMU. */ in device_lookup_iommu()
586 for_each_iommu(iommu, drhd) { in device_lookup_iommu()
594 * which we used for the IOMMU lookup. Strictly speaking in device_lookup_iommu()
620 iommu = NULL; in device_lookup_iommu()
622 if (iommu_is_dummy(iommu, dev)) in device_lookup_iommu()
623 iommu = NULL; in device_lookup_iommu()
627 return iommu; in device_lookup_iommu()
637 static void free_context_table(struct intel_iommu *iommu) in free_context_table() argument
642 if (!iommu->root_entry) in free_context_table()
646 context = iommu_context_addr(iommu, i, 0, 0); in free_context_table()
650 if (!sm_supported(iommu)) in free_context_table()
653 context = iommu_context_addr(iommu, i, 0x80, 0); in free_context_table()
658 free_pgtable_page(iommu->root_entry); in free_context_table()
659 iommu->root_entry = NULL; in free_context_table()
663 static void pgtable_walk(struct intel_iommu *iommu, unsigned long pfn, in pgtable_walk() argument
687 void dmar_fault_dump_ptes(struct intel_iommu *iommu, u16 source_id, in dmar_fault_dump_ptes() argument
699 pr_info("Dump %s table entries for IOVA 0x%llx\n", iommu->name, addr); in dmar_fault_dump_ptes()
702 rt_entry = &iommu->root_entry[bus]; in dmar_fault_dump_ptes()
708 if (sm_supported(iommu)) in dmar_fault_dump_ptes()
715 ctx_entry = iommu_context_addr(iommu, bus, devfn, 0); in dmar_fault_dump_ptes()
725 if (!sm_supported(iommu)) { in dmar_fault_dump_ptes()
765 pgtable_walk(iommu, addr >> VTD_PAGE_SHIFT, bus, devfn, pgtable, level); in dmar_fault_dump_ptes()
778 /* Address beyond IOMMU's addressing capabilities. */ in pfn_to_dma_pte()
1020 /* We can't just free the pages because the IOMMU may still be walking
1042 /* iommu handling */
1043 static int iommu_alloc_root_entry(struct intel_iommu *iommu) in iommu_alloc_root_entry() argument
1047 root = alloc_pgtable_page(iommu->node, GFP_ATOMIC); in iommu_alloc_root_entry()
1050 iommu->name); in iommu_alloc_root_entry()
1054 __iommu_flush_cache(iommu, root, ROOT_SIZE); in iommu_alloc_root_entry()
1055 iommu->root_entry = root; in iommu_alloc_root_entry()
1060 static void iommu_set_root_entry(struct intel_iommu *iommu) in iommu_set_root_entry() argument
1066 addr = virt_to_phys(iommu->root_entry); in iommu_set_root_entry()
1067 if (sm_supported(iommu)) in iommu_set_root_entry()
1070 raw_spin_lock_irqsave(&iommu->register_lock, flag); in iommu_set_root_entry()
1071 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, addr); in iommu_set_root_entry()
1073 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG); in iommu_set_root_entry()
1076 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, in iommu_set_root_entry()
1079 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in iommu_set_root_entry()
1085 if (cap_esrtps(iommu->cap)) in iommu_set_root_entry()
1088 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL); in iommu_set_root_entry()
1089 if (sm_supported(iommu)) in iommu_set_root_entry()
1090 qi_flush_pasid_cache(iommu, 0, QI_PC_GLOBAL, 0); in iommu_set_root_entry()
1091 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH); in iommu_set_root_entry()
1094 void iommu_flush_write_buffer(struct intel_iommu *iommu) in iommu_flush_write_buffer() argument
1099 if (!rwbf_quirk && !cap_rwbf(iommu->cap)) in iommu_flush_write_buffer()
1102 raw_spin_lock_irqsave(&iommu->register_lock, flag); in iommu_flush_write_buffer()
1103 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG); in iommu_flush_write_buffer()
1106 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, in iommu_flush_write_buffer()
1109 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in iommu_flush_write_buffer()
1113 static void __iommu_flush_context(struct intel_iommu *iommu, in __iommu_flush_context() argument
1133 iommu->name, type); in __iommu_flush_context()
1138 raw_spin_lock_irqsave(&iommu->register_lock, flag); in __iommu_flush_context()
1139 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val); in __iommu_flush_context()
1142 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG, in __iommu_flush_context()
1145 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in __iommu_flush_context()
1149 static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did, in __iommu_flush_iotlb() argument
1152 int tlb_offset = ecap_iotlb_offset(iommu->ecap); in __iommu_flush_iotlb()
1171 iommu->name, type); in __iommu_flush_iotlb()
1175 if (cap_write_drain(iommu->cap)) in __iommu_flush_iotlb()
1178 raw_spin_lock_irqsave(&iommu->register_lock, flag); in __iommu_flush_iotlb()
1181 dmar_writeq(iommu->reg + tlb_offset, val_iva); in __iommu_flush_iotlb()
1182 dmar_writeq(iommu->reg + tlb_offset + 8, val); in __iommu_flush_iotlb()
1185 IOMMU_WAIT_OP(iommu, tlb_offset + 8, in __iommu_flush_iotlb()
1188 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in __iommu_flush_iotlb()
1201 struct intel_iommu *iommu, u8 bus, u8 devfn) in domain_lookup_dev_info() argument
1208 if (info->iommu == iommu && info->bus == bus && in domain_lookup_dev_info()
1318 qi_flush_dev_iotlb(info->iommu, sid, info->pfsid, in __iommu_flush_dev_iotlb()
1343 qi_flush_dev_iotlb_pasid(info->iommu, in iommu_flush_dev_iotlb()
1352 static void domain_flush_pasid_iotlb(struct intel_iommu *iommu, in domain_flush_pasid_iotlb() argument
1356 u16 did = domain_id_iommu(domain, iommu); in domain_flush_pasid_iotlb()
1362 qi_flush_piotlb(iommu, did, dev_pasid->pasid, addr, npages, ih); in domain_flush_pasid_iotlb()
1365 qi_flush_piotlb(iommu, did, IOMMU_NO_PASID, addr, npages, ih); in domain_flush_pasid_iotlb()
1369 static void __iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did, in __iommu_flush_iotlb_psi() argument
1401 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap)) in __iommu_flush_iotlb_psi()
1402 iommu->flush.flush_iotlb(iommu, did, 0, 0, in __iommu_flush_iotlb_psi()
1405 iommu->flush.flush_iotlb(iommu, did, addr | ih, mask, in __iommu_flush_iotlb_psi()
1409 static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, in iommu_flush_iotlb_psi() argument
1417 u16 did = domain_id_iommu(domain, iommu); in iommu_flush_iotlb_psi()
1426 domain_flush_pasid_iotlb(iommu, domain, addr, pages, ih); in iommu_flush_iotlb_psi()
1428 __iommu_flush_iotlb_psi(iommu, did, pfn, pages, ih); in iommu_flush_iotlb_psi()
1434 if (!cap_caching_mode(iommu->cap) || !map) in iommu_flush_iotlb_psi()
1439 static void __mapping_notify_one(struct intel_iommu *iommu, struct dmar_domain *domain, in __mapping_notify_one() argument
1446 if (cap_caching_mode(iommu->cap) && !domain->use_first_level) in __mapping_notify_one()
1447 iommu_flush_iotlb_psi(iommu, domain, pfn, pages, 0, 1); in __mapping_notify_one()
1449 iommu_flush_write_buffer(iommu); in __mapping_notify_one()
1470 __iommu_flush_iotlb_psi(info->iommu, info->did, in parent_domain_flush()
1499 struct intel_iommu *iommu = info->iommu; in intel_flush_iotlb_all() local
1500 u16 did = domain_id_iommu(dmar_domain, iommu); in intel_flush_iotlb_all()
1503 domain_flush_pasid_iotlb(iommu, dmar_domain, 0, -1, 0); in intel_flush_iotlb_all()
1505 iommu->flush.flush_iotlb(iommu, did, 0, 0, in intel_flush_iotlb_all()
1508 if (!cap_caching_mode(iommu->cap)) in intel_flush_iotlb_all()
1516 static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu) in iommu_disable_protect_mem_regions() argument
1521 if (!cap_plmr(iommu->cap) && !cap_phmr(iommu->cap)) in iommu_disable_protect_mem_regions()
1524 raw_spin_lock_irqsave(&iommu->register_lock, flags); in iommu_disable_protect_mem_regions()
1525 pmen = readl(iommu->reg + DMAR_PMEN_REG); in iommu_disable_protect_mem_regions()
1527 writel(pmen, iommu->reg + DMAR_PMEN_REG); in iommu_disable_protect_mem_regions()
1530 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG, in iommu_disable_protect_mem_regions()
1533 raw_spin_unlock_irqrestore(&iommu->register_lock, flags); in iommu_disable_protect_mem_regions()
1536 static void iommu_enable_translation(struct intel_iommu *iommu) in iommu_enable_translation() argument
1541 raw_spin_lock_irqsave(&iommu->register_lock, flags); in iommu_enable_translation()
1542 iommu->gcmd |= DMA_GCMD_TE; in iommu_enable_translation()
1543 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); in iommu_enable_translation()
1546 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, in iommu_enable_translation()
1549 raw_spin_unlock_irqrestore(&iommu->register_lock, flags); in iommu_enable_translation()
1552 static void iommu_disable_translation(struct intel_iommu *iommu) in iommu_disable_translation() argument
1557 if (iommu_skip_te_disable && iommu->drhd->gfx_dedicated && in iommu_disable_translation()
1558 (cap_read_drain(iommu->cap) || cap_write_drain(iommu->cap))) in iommu_disable_translation()
1561 raw_spin_lock_irqsave(&iommu->register_lock, flag); in iommu_disable_translation()
1562 iommu->gcmd &= ~DMA_GCMD_TE; in iommu_disable_translation()
1563 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); in iommu_disable_translation()
1566 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, in iommu_disable_translation()
1569 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in iommu_disable_translation()
1572 static int iommu_init_domains(struct intel_iommu *iommu) in iommu_init_domains() argument
1576 ndomains = cap_ndoms(iommu->cap); in iommu_init_domains()
1578 iommu->name, ndomains); in iommu_init_domains()
1580 spin_lock_init(&iommu->lock); in iommu_init_domains()
1582 iommu->domain_ids = bitmap_zalloc(ndomains, GFP_KERNEL); in iommu_init_domains()
1583 if (!iommu->domain_ids) in iommu_init_domains()
1592 set_bit(0, iommu->domain_ids); in iommu_init_domains()
1601 if (sm_supported(iommu)) in iommu_init_domains()
1602 set_bit(FLPT_DEFAULT_DID, iommu->domain_ids); in iommu_init_domains()
1607 static void disable_dmar_iommu(struct intel_iommu *iommu) in disable_dmar_iommu() argument
1609 if (!iommu->domain_ids) in disable_dmar_iommu()
1613 * All iommu domains must have been detached from the devices, in disable_dmar_iommu()
1616 if (WARN_ON(bitmap_weight(iommu->domain_ids, cap_ndoms(iommu->cap)) in disable_dmar_iommu()
1620 if (iommu->gcmd & DMA_GCMD_TE) in disable_dmar_iommu()
1621 iommu_disable_translation(iommu); in disable_dmar_iommu()
1624 static void free_dmar_iommu(struct intel_iommu *iommu) in free_dmar_iommu() argument
1626 if (iommu->domain_ids) { in free_dmar_iommu()
1627 bitmap_free(iommu->domain_ids); in free_dmar_iommu()
1628 iommu->domain_ids = NULL; in free_dmar_iommu()
1631 if (iommu->copied_tables) { in free_dmar_iommu()
1632 bitmap_free(iommu->copied_tables); in free_dmar_iommu()
1633 iommu->copied_tables = NULL; in free_dmar_iommu()
1637 free_context_table(iommu); in free_dmar_iommu()
1640 if (pasid_supported(iommu)) { in free_dmar_iommu()
1641 if (ecap_prs(iommu->ecap)) in free_dmar_iommu()
1642 intel_svm_finish_prq(iommu); in free_dmar_iommu()
1685 int domain_attach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu) in domain_attach_iommu() argument
1695 spin_lock(&iommu->lock); in domain_attach_iommu()
1696 curr = xa_load(&domain->iommu_array, iommu->seq_id); in domain_attach_iommu()
1699 spin_unlock(&iommu->lock); in domain_attach_iommu()
1704 ndomains = cap_ndoms(iommu->cap); in domain_attach_iommu()
1705 num = find_first_zero_bit(iommu->domain_ids, ndomains); in domain_attach_iommu()
1707 pr_err("%s: No free domain ids\n", iommu->name); in domain_attach_iommu()
1711 set_bit(num, iommu->domain_ids); in domain_attach_iommu()
1714 info->iommu = iommu; in domain_attach_iommu()
1715 curr = xa_cmpxchg(&domain->iommu_array, iommu->seq_id, in domain_attach_iommu()
1723 spin_unlock(&iommu->lock); in domain_attach_iommu()
1727 clear_bit(info->did, iommu->domain_ids); in domain_attach_iommu()
1729 spin_unlock(&iommu->lock); in domain_attach_iommu()
1734 void domain_detach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu) in domain_detach_iommu() argument
1738 spin_lock(&iommu->lock); in domain_detach_iommu()
1739 info = xa_load(&domain->iommu_array, iommu->seq_id); in domain_detach_iommu()
1741 clear_bit(info->did, iommu->domain_ids); in domain_detach_iommu()
1742 xa_erase(&domain->iommu_array, iommu->seq_id); in domain_detach_iommu()
1747 spin_unlock(&iommu->lock); in domain_detach_iommu()
1797 struct intel_iommu *iommu, in domain_context_mapping_one() argument
1802 domain_lookup_dev_info(domain, iommu, bus, devfn); in domain_context_mapping_one()
1803 u16 did = domain_id_iommu(domain, iommu); in domain_context_mapping_one()
1814 spin_lock(&iommu->lock); in domain_context_mapping_one()
1816 context = iommu_context_addr(iommu, bus, devfn, 1); in domain_context_mapping_one()
1821 if (context_present(context) && !context_copied(iommu, bus, devfn)) in domain_context_mapping_one()
1833 if (context_copied(iommu, bus, devfn)) { in domain_context_mapping_one()
1836 if (did_old < cap_ndoms(iommu->cap)) { in domain_context_mapping_one()
1837 iommu->flush.flush_context(iommu, did_old, in domain_context_mapping_one()
1841 iommu->flush.flush_iotlb(iommu, did_old, 0, 0, in domain_context_mapping_one()
1845 clear_context_copied(iommu, bus, devfn); in domain_context_mapping_one()
1850 if (sm_supported(iommu)) { in domain_context_mapping_one()
1879 * Skip top levels of page tables for iommu which has in domain_context_mapping_one()
1882 for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) { in domain_context_mapping_one()
1902 context_set_address_width(context, iommu->msagaw); in domain_context_mapping_one()
1910 if (!ecap_coherent(iommu->ecap)) in domain_context_mapping_one()
1919 if (cap_caching_mode(iommu->cap)) { in domain_context_mapping_one()
1920 iommu->flush.flush_context(iommu, 0, in domain_context_mapping_one()
1924 iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH); in domain_context_mapping_one()
1926 iommu_flush_write_buffer(iommu); in domain_context_mapping_one()
1932 spin_unlock(&iommu->lock); in domain_context_mapping_one()
1939 struct intel_iommu *iommu; member
1948 return domain_context_mapping_one(data->domain, data->iommu, in domain_context_mapping_cb()
1958 struct intel_iommu *iommu = info->iommu; in domain_context_mapping() local
1965 return domain_context_mapping_one(domain, iommu, table, in domain_context_mapping()
1969 data.iommu = iommu; in domain_context_mapping()
2034 iommu_flush_iotlb_psi(info->iommu, domain, in switch_to_super_page()
2159 struct intel_iommu *iommu = info->iommu; in domain_context_clear_one() local
2163 if (!iommu) in domain_context_clear_one()
2166 spin_lock(&iommu->lock); in domain_context_clear_one()
2167 context = iommu_context_addr(iommu, bus, devfn, 0); in domain_context_clear_one()
2169 spin_unlock(&iommu->lock); in domain_context_clear_one()
2173 if (sm_supported(iommu)) { in domain_context_clear_one()
2177 did_old = domain_id_iommu(info->domain, iommu); in domain_context_clear_one()
2183 __iommu_flush_cache(iommu, context, sizeof(*context)); in domain_context_clear_one()
2184 spin_unlock(&iommu->lock); in domain_context_clear_one()
2185 iommu->flush.flush_context(iommu, in domain_context_clear_one()
2191 if (sm_supported(iommu)) in domain_context_clear_one()
2192 qi_flush_pasid_cache(iommu, did_old, QI_PC_ALL_PASIDS, 0); in domain_context_clear_one()
2194 iommu->flush.flush_iotlb(iommu, in domain_context_clear_one()
2203 static int domain_setup_first_level(struct intel_iommu *iommu, in domain_setup_first_level() argument
2213 * Skip top levels of page tables for iommu which has in domain_setup_first_level()
2216 for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) { in domain_setup_first_level()
2232 return intel_pasid_setup_first_level(iommu, dev, (pgd_t *)pgd, pasid, in domain_setup_first_level()
2233 domain_id_iommu(domain, iommu), in domain_setup_first_level()
2321 struct intel_iommu *iommu = info->iommu; in dmar_domain_attach_device() local
2325 ret = domain_attach_iommu(domain, iommu); in dmar_domain_attach_device()
2334 if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev)) { in dmar_domain_attach_device()
2337 ret = intel_pasid_setup_pass_through(iommu, in dmar_domain_attach_device()
2340 ret = domain_setup_first_level(iommu, domain, dev, in dmar_domain_attach_device()
2343 ret = intel_pasid_setup_second_level(iommu, domain, in dmar_domain_attach_device()
2359 if (sm_supported(info->iommu) || !domain_type_is_si(info->domain)) in dmar_domain_attach_device()
2420 static void intel_iommu_init_qi(struct intel_iommu *iommu) in intel_iommu_init_qi() argument
2423 * Start from the sane iommu hardware state. in intel_iommu_init_qi()
2428 if (!iommu->qi) { in intel_iommu_init_qi()
2432 dmar_fault(-1, iommu); in intel_iommu_init_qi()
2437 dmar_disable_qi(iommu); in intel_iommu_init_qi()
2440 if (dmar_enable_qi(iommu)) { in intel_iommu_init_qi()
2444 iommu->flush.flush_context = __iommu_flush_context; in intel_iommu_init_qi()
2445 iommu->flush.flush_iotlb = __iommu_flush_iotlb; in intel_iommu_init_qi()
2447 iommu->name); in intel_iommu_init_qi()
2449 iommu->flush.flush_context = qi_flush_context; in intel_iommu_init_qi()
2450 iommu->flush.flush_iotlb = qi_flush_iotlb; in intel_iommu_init_qi()
2451 pr_info("%s: Using Queued invalidation\n", iommu->name); in intel_iommu_init_qi()
2455 static int copy_context_table(struct intel_iommu *iommu, in copy_context_table() argument
2477 __iommu_flush_cache(iommu, new_ce, in copy_context_table()
2507 new_ce = alloc_pgtable_page(iommu->node, GFP_KERNEL); in copy_context_table()
2521 if (did >= 0 && did < cap_ndoms(iommu->cap)) in copy_context_table()
2522 set_bit(did, iommu->domain_ids); in copy_context_table()
2524 set_context_copied(iommu, bus, devfn); in copy_context_table()
2530 __iommu_flush_cache(iommu, new_ce, VTD_PAGE_SIZE); in copy_context_table()
2539 static int copy_translation_tables(struct intel_iommu *iommu) in copy_translation_tables() argument
2549 rtaddr_reg = dmar_readq(iommu->reg + DMAR_RTADDR_REG); in copy_translation_tables()
2551 new_ext = !!sm_supported(iommu); in copy_translation_tables()
2562 iommu->copied_tables = bitmap_zalloc(BIT_ULL(16), GFP_KERNEL); in copy_translation_tables()
2563 if (!iommu->copied_tables) in copy_translation_tables()
2582 ret = copy_context_table(iommu, &old_rt[bus], in copy_translation_tables()
2586 iommu->name, bus); in copy_translation_tables()
2591 spin_lock(&iommu->lock); in copy_translation_tables()
2600 iommu->root_entry[bus].lo = val; in copy_translation_tables()
2607 iommu->root_entry[bus].hi = val; in copy_translation_tables()
2610 spin_unlock(&iommu->lock); in copy_translation_tables()
2614 __iommu_flush_cache(iommu, iommu->root_entry, PAGE_SIZE); in copy_translation_tables()
2627 struct intel_iommu *iommu; in init_dmars() local
2634 for_each_iommu(iommu, drhd) { in init_dmars()
2636 iommu_disable_translation(iommu); in init_dmars()
2641 * Find the max pasid size of all IOMMU's in the system. in init_dmars()
2645 if (pasid_supported(iommu)) { in init_dmars()
2646 u32 temp = 2 << ecap_pss(iommu->ecap); in init_dmars()
2652 intel_iommu_init_qi(iommu); in init_dmars()
2654 ret = iommu_init_domains(iommu); in init_dmars()
2658 init_translation_status(iommu); in init_dmars()
2660 if (translation_pre_enabled(iommu) && !is_kdump_kernel()) { in init_dmars()
2661 iommu_disable_translation(iommu); in init_dmars()
2662 clear_translation_pre_enabled(iommu); in init_dmars()
2664 iommu->name); in init_dmars()
2670 * among all IOMMU's. Need to Split it later. in init_dmars()
2672 ret = iommu_alloc_root_entry(iommu); in init_dmars()
2676 if (translation_pre_enabled(iommu)) { in init_dmars()
2679 ret = copy_translation_tables(iommu); in init_dmars()
2682 * We found the IOMMU with translation in init_dmars()
2691 iommu->name); in init_dmars()
2692 iommu_disable_translation(iommu); in init_dmars()
2693 clear_translation_pre_enabled(iommu); in init_dmars()
2696 iommu->name); in init_dmars()
2700 if (!ecap_pass_through(iommu->ecap)) in init_dmars()
2702 intel_svm_check(iommu); in init_dmars()
2710 for_each_active_iommu(iommu, drhd) { in init_dmars()
2711 iommu_flush_write_buffer(iommu); in init_dmars()
2712 iommu_set_root_entry(iommu); in init_dmars()
2735 for_each_iommu(iommu, drhd) { in init_dmars()
2742 iommu_disable_protect_mem_regions(iommu); in init_dmars()
2746 iommu_flush_write_buffer(iommu); in init_dmars()
2749 if (pasid_supported(iommu) && ecap_prs(iommu->ecap)) { in init_dmars()
2755 ret = intel_svm_enable_prq(iommu); in init_dmars()
2761 ret = dmar_set_interrupt(iommu); in init_dmars()
2769 for_each_active_iommu(iommu, drhd) { in init_dmars()
2770 disable_dmar_iommu(iommu); in init_dmars()
2771 free_dmar_iommu(iommu); in init_dmars()
2809 /* This IOMMU has *only* gfx devices. Either bypass it or in init_no_remapping_devices()
2821 struct intel_iommu *iommu = NULL; in init_iommu_hw() local
2824 for_each_active_iommu(iommu, drhd) { in init_iommu_hw()
2825 if (iommu->qi) { in init_iommu_hw()
2826 ret = dmar_reenable_qi(iommu); in init_iommu_hw()
2832 for_each_iommu(iommu, drhd) { in init_iommu_hw()
2839 iommu_disable_protect_mem_regions(iommu); in init_iommu_hw()
2843 iommu_flush_write_buffer(iommu); in init_iommu_hw()
2844 iommu_set_root_entry(iommu); in init_iommu_hw()
2845 iommu_enable_translation(iommu); in init_iommu_hw()
2846 iommu_disable_protect_mem_regions(iommu); in init_iommu_hw()
2855 struct intel_iommu *iommu; in iommu_flush_all() local
2857 for_each_active_iommu(iommu, drhd) { in iommu_flush_all()
2858 iommu->flush.flush_context(iommu, 0, 0, 0, in iommu_flush_all()
2860 iommu->flush.flush_iotlb(iommu, 0, 0, 0, in iommu_flush_all()
2868 struct intel_iommu *iommu = NULL; in iommu_suspend() local
2873 for_each_active_iommu(iommu, drhd) { in iommu_suspend()
2874 iommu_disable_translation(iommu); in iommu_suspend()
2876 raw_spin_lock_irqsave(&iommu->register_lock, flag); in iommu_suspend()
2878 iommu->iommu_state[SR_DMAR_FECTL_REG] = in iommu_suspend()
2879 readl(iommu->reg + DMAR_FECTL_REG); in iommu_suspend()
2880 iommu->iommu_state[SR_DMAR_FEDATA_REG] = in iommu_suspend()
2881 readl(iommu->reg + DMAR_FEDATA_REG); in iommu_suspend()
2882 iommu->iommu_state[SR_DMAR_FEADDR_REG] = in iommu_suspend()
2883 readl(iommu->reg + DMAR_FEADDR_REG); in iommu_suspend()
2884 iommu->iommu_state[SR_DMAR_FEUADDR_REG] = in iommu_suspend()
2885 readl(iommu->reg + DMAR_FEUADDR_REG); in iommu_suspend()
2887 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in iommu_suspend()
2895 struct intel_iommu *iommu = NULL; in iommu_resume() local
2900 panic("tboot: IOMMU setup failed, DMAR can not resume!\n"); in iommu_resume()
2902 WARN(1, "IOMMU setup failed, DMAR can not resume!\n"); in iommu_resume()
2906 for_each_active_iommu(iommu, drhd) { in iommu_resume()
2908 raw_spin_lock_irqsave(&iommu->register_lock, flag); in iommu_resume()
2910 writel(iommu->iommu_state[SR_DMAR_FECTL_REG], in iommu_resume()
2911 iommu->reg + DMAR_FECTL_REG); in iommu_resume()
2912 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG], in iommu_resume()
2913 iommu->reg + DMAR_FEDATA_REG); in iommu_resume()
2914 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG], in iommu_resume()
2915 iommu->reg + DMAR_FEADDR_REG); in iommu_resume()
2916 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG], in iommu_resume()
2917 iommu->reg + DMAR_FEUADDR_REG); in iommu_resume()
2919 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in iommu_resume()
3145 struct intel_iommu *iommu = dmaru->iommu; in intel_iommu_add() local
3147 ret = intel_cap_audit(CAP_AUDIT_HOTPLUG_DMAR, iommu); in intel_iommu_add()
3151 if (hw_pass_through && !ecap_pass_through(iommu->ecap)) { in intel_iommu_add()
3153 iommu->name); in intel_iommu_add()
3157 sp = domain_update_iommu_superpage(NULL, iommu) - 1; in intel_iommu_add()
3158 if (sp >= 0 && !(cap_super_page_val(iommu->cap) & (1 << sp))) { in intel_iommu_add()
3160 iommu->name); in intel_iommu_add()
3167 if (iommu->gcmd & DMA_GCMD_TE) in intel_iommu_add()
3168 iommu_disable_translation(iommu); in intel_iommu_add()
3170 ret = iommu_init_domains(iommu); in intel_iommu_add()
3172 ret = iommu_alloc_root_entry(iommu); in intel_iommu_add()
3176 intel_svm_check(iommu); in intel_iommu_add()
3183 iommu_disable_protect_mem_regions(iommu); in intel_iommu_add()
3187 intel_iommu_init_qi(iommu); in intel_iommu_add()
3188 iommu_flush_write_buffer(iommu); in intel_iommu_add()
3191 if (pasid_supported(iommu) && ecap_prs(iommu->ecap)) { in intel_iommu_add()
3192 ret = intel_svm_enable_prq(iommu); in intel_iommu_add()
3197 ret = dmar_set_interrupt(iommu); in intel_iommu_add()
3201 iommu_set_root_entry(iommu); in intel_iommu_add()
3202 iommu_enable_translation(iommu); in intel_iommu_add()
3204 iommu_disable_protect_mem_regions(iommu); in intel_iommu_add()
3208 disable_dmar_iommu(iommu); in intel_iommu_add()
3210 free_dmar_iommu(iommu); in intel_iommu_add()
3217 struct intel_iommu *iommu = dmaru->iommu; in dmar_iommu_hotplug() local
3221 if (iommu == NULL) in dmar_iommu_hotplug()
3227 disable_dmar_iommu(iommu); in dmar_iommu_hotplug()
3228 free_dmar_iommu(iommu); in dmar_iommu_hotplug()
3281 static int dmar_ats_supported(struct pci_dev *dev, struct intel_iommu *iommu) in dmar_ats_supported() argument
3296 * When IOMMU is in legacy mode, enabling ATS is done in dmar_ats_supported()
3301 return !(satcu->atc_required && !sm_supported(iommu)); in dmar_ats_supported()
3429 struct intel_iommu *iommu; in intel_iommu_memory_notifier() local
3435 for_each_active_iommu(iommu, drhd) in intel_iommu_memory_notifier()
3436 iommu_flush_iotlb_psi(iommu, si_domain, in intel_iommu_memory_notifier()
3455 struct intel_iommu *iommu = NULL; in intel_disable_iommus() local
3458 for_each_iommu(iommu, drhd) in intel_disable_iommus()
3459 iommu_disable_translation(iommu); in intel_disable_iommus()
3465 struct intel_iommu *iommu = NULL; in intel_iommu_shutdown() local
3473 for_each_iommu(iommu, drhd) in intel_iommu_shutdown()
3474 iommu_disable_protect_mem_regions(iommu); in intel_iommu_shutdown()
3486 return container_of(iommu_dev, struct intel_iommu, iommu); in dev_to_intel_iommu()
3492 struct intel_iommu *iommu = dev_to_intel_iommu(dev); in version_show() local
3493 u32 ver = readl(iommu->reg + DMAR_VER_REG); in version_show()
3502 struct intel_iommu *iommu = dev_to_intel_iommu(dev); in address_show() local
3503 return sysfs_emit(buf, "%llx\n", iommu->reg_phys); in address_show()
3510 struct intel_iommu *iommu = dev_to_intel_iommu(dev); in cap_show() local
3511 return sysfs_emit(buf, "%llx\n", iommu->cap); in cap_show()
3518 struct intel_iommu *iommu = dev_to_intel_iommu(dev); in ecap_show() local
3519 return sysfs_emit(buf, "%llx\n", iommu->ecap); in ecap_show()
3526 struct intel_iommu *iommu = dev_to_intel_iommu(dev); in domains_supported_show() local
3527 return sysfs_emit(buf, "%ld\n", cap_ndoms(iommu->cap)); in domains_supported_show()
3534 struct intel_iommu *iommu = dev_to_intel_iommu(dev); in domains_used_show() local
3536 bitmap_weight(iommu->domain_ids, in domains_used_show()
3537 cap_ndoms(iommu->cap))); in domains_used_show()
3552 .name = "intel-iommu",
3580 pr_info("Intel-IOMMU force enabled due to platform opt in\n"); in platform_optin_force_iommu()
3583 * If Intel-IOMMU is disabled by default, we will apply identity in platform_optin_force_iommu()
3599 struct intel_iommu *iommu __maybe_unused; in probe_acpi_namespace_devices()
3603 for_each_active_iommu(iommu, drhd) { in probe_acpi_namespace_devices()
3636 pr_warn("Forcing Intel-IOMMU to enabled\n"); in tboot_force_iommu()
3648 struct intel_iommu *iommu; in intel_iommu_init() local
3651 * Intel IOMMU is required for a TXT/tboot launch or platform in intel_iommu_init()
3685 * We exit the function here to ensure IOMMU's remapping and in intel_iommu_init()
3686 * mempool aren't setup, which means that the IOMMU's PMRs in intel_iommu_init()
3693 for_each_iommu(iommu, drhd) in intel_iommu_init()
3694 iommu_disable_protect_mem_regions(iommu); in intel_iommu_init()
3729 for_each_active_iommu(iommu, drhd) { in intel_iommu_init()
3735 * the virtual and physical IOMMU page-tables. in intel_iommu_init()
3737 if (cap_caching_mode(iommu->cap) && in intel_iommu_init()
3739 pr_info_once("IOMMU batching disallowed due to virtualization\n"); in intel_iommu_init()
3742 iommu_device_sysfs_add(&iommu->iommu, NULL, in intel_iommu_init()
3744 "%s", iommu->name); in intel_iommu_init()
3745 iommu_device_register(&iommu->iommu, &intel_iommu_ops, NULL); in intel_iommu_init()
3747 iommu_pmu_register(iommu); in intel_iommu_init()
3759 for_each_iommu(iommu, drhd) { in intel_iommu_init()
3760 if (!drhd->ignored && !translation_pre_enabled(iommu)) in intel_iommu_init()
3761 iommu_enable_translation(iommu); in intel_iommu_init()
3763 iommu_disable_protect_mem_regions(iommu); in intel_iommu_init()
3788 * NB - intel-iommu lacks any sort of reference counting for the users of
3806 struct intel_iommu *iommu = info->iommu; in dmar_remove_one_dev_info() local
3810 if (dev_is_pci(info->dev) && sm_supported(iommu)) in dmar_remove_one_dev_info()
3811 intel_pasid_tear_down_entry(iommu, info->dev, in dmar_remove_one_dev_info()
3822 domain_detach_iommu(domain, iommu); in dmar_remove_one_dev_info()
3834 struct intel_iommu *iommu = info->iommu; in device_block_translation() local
3839 if (sm_supported(iommu)) in device_block_translation()
3840 intel_pasid_tear_down_entry(iommu, dev, in device_block_translation()
3853 domain_detach_iommu(info->domain, iommu); in device_block_translation()
3937 struct intel_iommu *iommu = info->iommu; in intel_iommu_domain_alloc_user() local
3943 if (!nested_supported(iommu) || flags) in intel_iommu_domain_alloc_user()
3951 if (nested_parent && !nested_supported(iommu)) in intel_iommu_domain_alloc_user()
3953 if (user_data || (dirty_tracking && !ssads_supported(iommu))) in intel_iommu_domain_alloc_user()
3998 struct intel_iommu *iommu = info->iommu; in prepare_domain_attach_device() local
4001 if (dmar_domain->force_snooping && !ecap_sc_support(iommu->ecap)) in prepare_domain_attach_device()
4004 if (domain->dirty_ops && !ssads_supported(iommu)) in prepare_domain_attach_device()
4007 /* check if this iommu agaw is sufficient for max mapped address */ in prepare_domain_attach_device()
4008 addr_width = agaw_to_width(iommu->agaw); in prepare_domain_attach_device()
4009 if (addr_width > cap_mgaw(iommu->cap)) in prepare_domain_attach_device()
4010 addr_width = cap_mgaw(iommu->cap); in prepare_domain_attach_device()
4019 while (iommu->agaw < dmar_domain->agaw) { in prepare_domain_attach_device()
4071 pr_err("%s: iommu width (%d) is not " in intel_iommu_map()
4168 iommu_flush_iotlb_psi(info->iommu, dmar_domain, in intel_iommu_tlb_sync()
4203 if (!ecap_sc_support(info->iommu->ecap)) { in domain_support_force_snooping()
4227 intel_pasid_setup_page_snoop_control(info->iommu, info->dev, in domain_set_force_snooping()
4264 return ecap_sc_support(info->iommu->ecap); in intel_iommu_capable()
4266 return ssads_supported(info->iommu); in intel_iommu_capable()
4276 struct intel_iommu *iommu; in intel_iommu_probe_device() local
4280 iommu = device_lookup_iommu(dev, &bus, &devfn); in intel_iommu_probe_device()
4281 if (!iommu || !iommu->iommu.ops) in intel_iommu_probe_device()
4295 info->segment = iommu->segment; in intel_iommu_probe_device()
4299 info->iommu = iommu; in intel_iommu_probe_device()
4301 if (ecap_dev_iotlb_support(iommu->ecap) && in intel_iommu_probe_device()
4303 dmar_ats_supported(pdev, iommu)) { in intel_iommu_probe_device()
4308 * For IOMMU that supports device IOTLB throttling in intel_iommu_probe_device()
4310 * of a VF such that IOMMU HW can gauge queue depth in intel_iommu_probe_device()
4314 if (ecap_dit(iommu->ecap)) in intel_iommu_probe_device()
4318 if (sm_supported(iommu)) { in intel_iommu_probe_device()
4319 if (pasid_supported(iommu)) { in intel_iommu_probe_device()
4326 if (info->ats_supported && ecap_prs(iommu->ecap) && in intel_iommu_probe_device()
4334 if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev)) { in intel_iommu_probe_device()
4345 return &iommu->iommu; in intel_iommu_probe_device()
4434 struct intel_iommu *iommu; in intel_iommu_enable_sva() local
4439 iommu = info->iommu; in intel_iommu_enable_sva()
4440 if (!iommu) in intel_iommu_enable_sva()
4443 if (!(iommu->flags & VTD_FLAG_SVM_CAPABLE)) in intel_iommu_enable_sva()
4451 * support PCI/PRI. The IOMMU side has no means to check the in intel_iommu_enable_sva()
4452 * capability of device-specific IOPF. Therefore, IOMMU can only in intel_iommu_enable_sva()
4470 struct intel_iommu *iommu; in intel_iommu_enable_iopf() local
4479 iommu = info->iommu; in intel_iommu_enable_iopf()
4480 if (!iommu) in intel_iommu_enable_iopf()
4491 ret = iopf_queue_add_device(iommu->iopf_queue, dev); in intel_iommu_enable_iopf()
4509 iopf_queue_remove_device(iommu->iopf_queue, dev); in intel_iommu_enable_iopf()
4517 struct intel_iommu *iommu = info->iommu; in intel_iommu_disable_iopf() local
4539 WARN_ON(iopf_queue_remove_device(iommu->iopf_queue, dev)); in intel_iommu_disable_iopf()
4578 return translation_pre_enabled(info->iommu) && !info->domain; in intel_iommu_is_attach_deferred()
4584 * thus not be able to bypass the IOMMU restrictions.
4590 "Skipping IOMMU quirk for dev [%04X:%04X] on untrusted PCI link\n", in risky_device()
4608 __mapping_notify_one(info->iommu, dmar_domain, pfn, pages); in intel_iommu_iotlb_sync_map()
4616 struct intel_iommu *iommu = info->iommu; in intel_iommu_remove_dev_pasid() local
4627 * notification. Before consolidating that code into iommu core, let in intel_iommu_remove_dev_pasid()
4647 domain_detach_iommu(dmar_domain, iommu); in intel_iommu_remove_dev_pasid()
4651 intel_pasid_tear_down_entry(iommu, dev, pasid, false); in intel_iommu_remove_dev_pasid()
4660 struct intel_iommu *iommu = info->iommu; in intel_iommu_set_dev_pasid() local
4665 if (!pasid_supported(iommu) || dev_is_real_dma_subdevice(dev)) in intel_iommu_set_dev_pasid()
4671 if (context_copied(iommu, info->bus, info->devfn)) in intel_iommu_set_dev_pasid()
4682 ret = domain_attach_iommu(dmar_domain, iommu); in intel_iommu_set_dev_pasid()
4687 ret = intel_pasid_setup_pass_through(iommu, dev, pasid); in intel_iommu_set_dev_pasid()
4689 ret = domain_setup_first_level(iommu, dmar_domain, in intel_iommu_set_dev_pasid()
4692 ret = intel_pasid_setup_second_level(iommu, dmar_domain, in intel_iommu_set_dev_pasid()
4708 domain_detach_iommu(dmar_domain, iommu); in intel_iommu_set_dev_pasid()
4717 struct intel_iommu *iommu = info->iommu; in intel_iommu_hw_info() local
4725 vtd->cap_reg = iommu->cap; in intel_iommu_hw_info()
4726 vtd->ecap_reg = iommu->ecap; in intel_iommu_hw_info()
4742 ret = intel_pasid_setup_dirty_tracking(info->iommu, info->dev, in device_set_dirty_tracking()
4895 pci_info(dev, "Disabling IOMMU for graphics on this chipset\n"); in quirk_iommu_igfx()
4976 pci_info(dev, "BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n"); in quirk_calpella_no_shadow_gtt()
5005 pci_info(dev, "Skip IOMMU disabling for graphics\n"); in quirk_igfx_skip_te_disable()
5099 * before unmap/unbind. For #3, iommu driver gets mmu_notifier to
5116 qi_flush_dev_iotlb(info->iommu, sid, info->pfsid, in quirk_extra_dev_tlb_flush()
5119 qi_flush_dev_iotlb_pasid(info->iommu, sid, info->pfsid, in quirk_extra_dev_tlb_flush()
5138 int ecmd_submit_sync(struct intel_iommu *iommu, u8 ecmd, u64 oa, u64 ob) in ecmd_submit_sync() argument
5144 if (!cap_ecmds(iommu->cap)) in ecmd_submit_sync()
5147 raw_spin_lock_irqsave(&iommu->register_lock, flags); in ecmd_submit_sync()
5149 res = dmar_readq(iommu->reg + DMAR_ECRSP_REG); in ecmd_submit_sync()
5162 dmar_writeq(iommu->reg + DMAR_ECEO_REG, ob); in ecmd_submit_sync()
5163 dmar_writeq(iommu->reg + DMAR_ECMD_REG, ecmd | (oa << DMA_ECMD_OA_SHIFT)); in ecmd_submit_sync()
5165 IOMMU_WAIT_OP(iommu, DMAR_ECRSP_REG, dmar_readq, in ecmd_submit_sync()
5175 raw_spin_unlock_irqrestore(&iommu->register_lock, flags); in ecmd_submit_sync()