Lines Matching full:iommu

38 #include <linux/iommu.h>
39 #include <linux/intel-iommu.h>
46 #include <asm/iommu.h>
84 * to the IOMMU core, which will then use this information to split
88 * Traditionally the IOMMU core just handed us the mappings directly,
95 * If at some point we'd like to utilize the IOMMU core's new behavior,
165 /* global iommu list, set NULL for ignored DMAR units */
342 * 2. It maps to each iommu if successful.
343 * 3. Each iommu mapps to this domain if successful.
375 int iommu_coherency;/* indicate coherency of iommu access */
377 int iommu_count; /* reference count of iommu */
381 spinlock_t iommu_lock; /* protect iommu set in domain */
393 struct intel_iommu *iommu; /* IOMMU used by this device */ member
451 printk(KERN_INFO "Intel-IOMMU: enabled\n"); in intel_iommu_setup()
454 printk(KERN_INFO "Intel-IOMMU: disabled\n"); in intel_iommu_setup()
458 "Intel-IOMMU: disable GFX device mapping\n"); in intel_iommu_setup()
461 "Intel-IOMMU: Forcing DAC for PCI devices\n"); in intel_iommu_setup()
465 "Intel-IOMMU: disable batched IOTLB flush\n"); in intel_iommu_setup()
469 "Intel-IOMMU: disable supported super page\n"); in intel_iommu_setup()
532 static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw) in __iommu_calculate_agaw() argument
537 sagaw = cap_sagaw(iommu->cap); in __iommu_calculate_agaw()
548 * Calculate max SAGAW for each iommu.
550 int iommu_calculate_max_sagaw(struct intel_iommu *iommu) in iommu_calculate_max_sagaw() argument
552 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH); in iommu_calculate_max_sagaw()
556 * calculate agaw for each iommu.
560 int iommu_calculate_agaw(struct intel_iommu *iommu) in iommu_calculate_agaw() argument
562 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH); in iommu_calculate_agaw()
565 /* This functionin only returns single iommu in a domain */
612 struct intel_iommu *iommu = NULL; in domain_update_iommu_superpage() local
621 for_each_active_iommu(iommu, drhd) { in domain_update_iommu_superpage()
622 mask &= cap_super_page_val(iommu->cap); in domain_update_iommu_superpage()
653 return drhd->iommu; in device_to_iommu()
658 return drhd->iommu; in device_to_iommu()
662 return drhd->iommu; in device_to_iommu()
676 static struct context_entry * device_to_context_entry(struct intel_iommu *iommu, in device_to_context_entry() argument
684 spin_lock_irqsave(&iommu->lock, flags); in device_to_context_entry()
685 root = &iommu->root_entry[bus]; in device_to_context_entry()
689 alloc_pgtable_page(iommu->node); in device_to_context_entry()
691 spin_unlock_irqrestore(&iommu->lock, flags); in device_to_context_entry()
694 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE); in device_to_context_entry()
698 __iommu_flush_cache(iommu, root, sizeof(*root)); in device_to_context_entry()
700 spin_unlock_irqrestore(&iommu->lock, flags); in device_to_context_entry()
704 static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn) in device_context_mapped() argument
711 spin_lock_irqsave(&iommu->lock, flags); in device_context_mapped()
712 root = &iommu->root_entry[bus]; in device_context_mapped()
720 spin_unlock_irqrestore(&iommu->lock, flags); in device_context_mapped()
724 static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn) in clear_context_table() argument
730 spin_lock_irqsave(&iommu->lock, flags); in clear_context_table()
731 root = &iommu->root_entry[bus]; in clear_context_table()
735 __iommu_flush_cache(iommu, &context[devfn], \ in clear_context_table()
738 spin_unlock_irqrestore(&iommu->lock, flags); in clear_context_table()
741 static void free_context_table(struct intel_iommu *iommu) in free_context_table() argument
748 spin_lock_irqsave(&iommu->lock, flags); in free_context_table()
749 if (!iommu->root_entry) { in free_context_table()
753 root = &iommu->root_entry[i]; in free_context_table()
758 free_pgtable_page(iommu->root_entry); in free_context_table()
759 iommu->root_entry = NULL; in free_context_table()
761 spin_unlock_irqrestore(&iommu->lock, flags); in free_context_table()
938 /* iommu handling */
939 static int iommu_alloc_root_entry(struct intel_iommu *iommu) in iommu_alloc_root_entry() argument
944 root = (struct root_entry *)alloc_pgtable_page(iommu->node); in iommu_alloc_root_entry()
948 __iommu_flush_cache(iommu, root, ROOT_SIZE); in iommu_alloc_root_entry()
950 spin_lock_irqsave(&iommu->lock, flags); in iommu_alloc_root_entry()
951 iommu->root_entry = root; in iommu_alloc_root_entry()
952 spin_unlock_irqrestore(&iommu->lock, flags); in iommu_alloc_root_entry()
957 static void iommu_set_root_entry(struct intel_iommu *iommu) in iommu_set_root_entry() argument
963 addr = iommu->root_entry; in iommu_set_root_entry()
965 raw_spin_lock_irqsave(&iommu->register_lock, flag); in iommu_set_root_entry()
966 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr)); in iommu_set_root_entry()
968 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG); in iommu_set_root_entry()
971 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, in iommu_set_root_entry()
974 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in iommu_set_root_entry()
977 static void iommu_flush_write_buffer(struct intel_iommu *iommu) in iommu_flush_write_buffer() argument
982 if (!rwbf_quirk && !cap_rwbf(iommu->cap)) in iommu_flush_write_buffer()
985 raw_spin_lock_irqsave(&iommu->register_lock, flag); in iommu_flush_write_buffer()
986 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG); in iommu_flush_write_buffer()
989 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, in iommu_flush_write_buffer()
992 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in iommu_flush_write_buffer()
996 static void __iommu_flush_context(struct intel_iommu *iommu, in __iommu_flush_context() argument
1019 raw_spin_lock_irqsave(&iommu->register_lock, flag); in __iommu_flush_context()
1020 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val); in __iommu_flush_context()
1023 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG, in __iommu_flush_context()
1026 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in __iommu_flush_context()
1030 static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did, in __iommu_flush_iotlb() argument
1033 int tlb_offset = ecap_iotlb_offset(iommu->ecap); in __iommu_flush_iotlb()
1059 if (cap_read_drain(iommu->cap)) in __iommu_flush_iotlb()
1062 if (cap_write_drain(iommu->cap)) in __iommu_flush_iotlb()
1065 raw_spin_lock_irqsave(&iommu->register_lock, flag); in __iommu_flush_iotlb()
1068 dmar_writeq(iommu->reg + tlb_offset, val_iva); in __iommu_flush_iotlb()
1069 dmar_writeq(iommu->reg + tlb_offset + 8, val); in __iommu_flush_iotlb()
1072 IOMMU_WAIT_OP(iommu, tlb_offset + 8, in __iommu_flush_iotlb()
1075 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in __iommu_flush_iotlb()
1079 printk(KERN_ERR"IOMMU: flush IOTLB failed\n"); in __iommu_flush_iotlb()
1081 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n", in __iommu_flush_iotlb()
1092 struct intel_iommu *iommu = device_to_iommu(segment, bus, devfn); in iommu_support_dev_iotlb() local
1094 if (!ecap_dev_iotlb_support(iommu->ecap)) in iommu_support_dev_iotlb()
1097 if (!iommu->qi) in iommu_support_dev_iotlb()
1117 info->iommu = iommu; in iommu_support_dev_iotlb()
1152 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask); in iommu_flush_dev_iotlb()
1157 static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did, in iommu_flush_iotlb_psi() argument
1171 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap)) in iommu_flush_iotlb_psi()
1172 iommu->flush.flush_iotlb(iommu, did, 0, 0, in iommu_flush_iotlb_psi()
1175 iommu->flush.flush_iotlb(iommu, did, addr, mask, in iommu_flush_iotlb_psi()
1182 if (!cap_caching_mode(iommu->cap) || !map) in iommu_flush_iotlb_psi()
1183 iommu_flush_dev_iotlb(iommu->domains[did], addr, mask); in iommu_flush_iotlb_psi()
1186 static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu) in iommu_disable_protect_mem_regions() argument
1191 raw_spin_lock_irqsave(&iommu->register_lock, flags); in iommu_disable_protect_mem_regions()
1192 pmen = readl(iommu->reg + DMAR_PMEN_REG); in iommu_disable_protect_mem_regions()
1194 writel(pmen, iommu->reg + DMAR_PMEN_REG); in iommu_disable_protect_mem_regions()
1197 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG, in iommu_disable_protect_mem_regions()
1200 raw_spin_unlock_irqrestore(&iommu->register_lock, flags); in iommu_disable_protect_mem_regions()
1203 static int iommu_enable_translation(struct intel_iommu *iommu) in iommu_enable_translation() argument
1208 raw_spin_lock_irqsave(&iommu->register_lock, flags); in iommu_enable_translation()
1209 iommu->gcmd |= DMA_GCMD_TE; in iommu_enable_translation()
1210 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); in iommu_enable_translation()
1213 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, in iommu_enable_translation()
1216 raw_spin_unlock_irqrestore(&iommu->register_lock, flags); in iommu_enable_translation()
1220 static int iommu_disable_translation(struct intel_iommu *iommu) in iommu_disable_translation() argument
1225 raw_spin_lock_irqsave(&iommu->register_lock, flag); in iommu_disable_translation()
1226 iommu->gcmd &= ~DMA_GCMD_TE; in iommu_disable_translation()
1227 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); in iommu_disable_translation()
1230 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, in iommu_disable_translation()
1233 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in iommu_disable_translation()
1238 static int iommu_init_domains(struct intel_iommu *iommu) in iommu_init_domains() argument
1243 ndomains = cap_ndoms(iommu->cap); in iommu_init_domains()
1244 pr_debug("IOMMU %d: Number of Domains supportd <%ld>\n", iommu->seq_id, in iommu_init_domains()
1248 spin_lock_init(&iommu->lock); in iommu_init_domains()
1253 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL); in iommu_init_domains()
1254 if (!iommu->domain_ids) { in iommu_init_domains()
1258 iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *), in iommu_init_domains()
1260 if (!iommu->domains) { in iommu_init_domains()
1269 if (cap_caching_mode(iommu->cap)) in iommu_init_domains()
1270 set_bit(0, iommu->domain_ids); in iommu_init_domains()
1278 void free_dmar_iommu(struct intel_iommu *iommu) in free_dmar_iommu() argument
1284 if ((iommu->domains) && (iommu->domain_ids)) { in free_dmar_iommu()
1285 for_each_set_bit(i, iommu->domain_ids, cap_ndoms(iommu->cap)) { in free_dmar_iommu()
1286 domain = iommu->domains[i]; in free_dmar_iommu()
1287 clear_bit(i, iommu->domain_ids); in free_dmar_iommu()
1300 if (iommu->gcmd & DMA_GCMD_TE) in free_dmar_iommu()
1301 iommu_disable_translation(iommu); in free_dmar_iommu()
1303 if (iommu->irq) { in free_dmar_iommu()
1304 irq_set_handler_data(iommu->irq, NULL); in free_dmar_iommu()
1306 free_irq(iommu->irq, iommu); in free_dmar_iommu()
1307 destroy_irq(iommu->irq); in free_dmar_iommu()
1310 kfree(iommu->domains); in free_dmar_iommu()
1311 kfree(iommu->domain_ids); in free_dmar_iommu()
1313 g_iommus[iommu->seq_id] = NULL; in free_dmar_iommu()
1325 free_context_table(iommu); in free_dmar_iommu()
1344 struct intel_iommu *iommu) in iommu_attach_domain() argument
1350 ndomains = cap_ndoms(iommu->cap); in iommu_attach_domain()
1352 spin_lock_irqsave(&iommu->lock, flags); in iommu_attach_domain()
1354 num = find_first_zero_bit(iommu->domain_ids, ndomains); in iommu_attach_domain()
1356 spin_unlock_irqrestore(&iommu->lock, flags); in iommu_attach_domain()
1357 printk(KERN_ERR "IOMMU: no free domain ids\n"); in iommu_attach_domain()
1362 set_bit(num, iommu->domain_ids); in iommu_attach_domain()
1363 set_bit(iommu->seq_id, &domain->iommu_bmp); in iommu_attach_domain()
1364 iommu->domains[num] = domain; in iommu_attach_domain()
1365 spin_unlock_irqrestore(&iommu->lock, flags); in iommu_attach_domain()
1371 struct intel_iommu *iommu) in iommu_detach_domain() argument
1377 spin_lock_irqsave(&iommu->lock, flags); in iommu_detach_domain()
1378 ndomains = cap_ndoms(iommu->cap); in iommu_detach_domain()
1379 for_each_set_bit(num, iommu->domain_ids, ndomains) { in iommu_detach_domain()
1380 if (iommu->domains[num] == domain) { in iommu_detach_domain()
1387 clear_bit(num, iommu->domain_ids); in iommu_detach_domain()
1388 clear_bit(iommu->seq_id, &domain->iommu_bmp); in iommu_detach_domain()
1389 iommu->domains[num] = NULL; in iommu_detach_domain()
1391 spin_unlock_irqrestore(&iommu->lock, flags); in iommu_detach_domain()
1457 struct intel_iommu *iommu; in domain_init() local
1467 iommu = domain_get_iommu(domain); in domain_init()
1468 if (guest_width > cap_mgaw(iommu->cap)) in domain_init()
1469 guest_width = cap_mgaw(iommu->cap); in domain_init()
1473 sagaw = cap_sagaw(iommu->cap); in domain_init()
1476 pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw); in domain_init()
1484 if (ecap_coherent(iommu->ecap)) in domain_init()
1489 if (ecap_sc_support(iommu->ecap)) in domain_init()
1494 domain->iommu_superpage = fls(cap_super_page_val(iommu->cap)); in domain_init()
1496 domain->nid = iommu->node; in domain_init()
1502 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE); in domain_init()
1509 struct intel_iommu *iommu; in domain_exit() local
1529 for_each_active_iommu(iommu, drhd) in domain_exit()
1530 if (test_bit(iommu->seq_id, &domain->iommu_bmp)) in domain_exit()
1531 iommu_detach_domain(domain, iommu); in domain_exit()
1541 struct intel_iommu *iommu; in domain_context_mapping_one() local
1556 iommu = device_to_iommu(segment, bus, devfn); in domain_context_mapping_one()
1557 if (!iommu) in domain_context_mapping_one()
1560 context = device_to_context_entry(iommu, bus, devfn); in domain_context_mapping_one()
1563 spin_lock_irqsave(&iommu->lock, flags); in domain_context_mapping_one()
1565 spin_unlock_irqrestore(&iommu->lock, flags); in domain_context_mapping_one()
1576 /* find an available domain id for this device in iommu */ in domain_context_mapping_one()
1577 ndomains = cap_ndoms(iommu->cap); in domain_context_mapping_one()
1578 for_each_set_bit(num, iommu->domain_ids, ndomains) { in domain_context_mapping_one()
1579 if (iommu->domains[num] == domain) { in domain_context_mapping_one()
1587 num = find_first_zero_bit(iommu->domain_ids, ndomains); in domain_context_mapping_one()
1589 spin_unlock_irqrestore(&iommu->lock, flags); in domain_context_mapping_one()
1590 printk(KERN_ERR "IOMMU: no free domain ids\n"); in domain_context_mapping_one()
1594 set_bit(num, iommu->domain_ids); in domain_context_mapping_one()
1595 iommu->domains[num] = domain; in domain_context_mapping_one()
1600 * iommu which has less agaw than default. in domain_context_mapping_one()
1604 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) { in domain_context_mapping_one()
1607 spin_unlock_irqrestore(&iommu->lock, flags); in domain_context_mapping_one()
1626 context_set_address_width(context, iommu->msagaw); in domain_context_mapping_one()
1629 context_set_address_width(context, iommu->agaw); in domain_context_mapping_one()
1643 if (cap_caching_mode(iommu->cap)) { in domain_context_mapping_one()
1644 iommu->flush.flush_context(iommu, 0, in domain_context_mapping_one()
1648 iommu->flush.flush_iotlb(iommu, domain->id, 0, 0, DMA_TLB_DSI_FLUSH); in domain_context_mapping_one()
1650 iommu_flush_write_buffer(iommu); in domain_context_mapping_one()
1653 spin_unlock_irqrestore(&iommu->lock, flags); in domain_context_mapping_one()
1656 if (!test_and_set_bit(iommu->seq_id, &domain->iommu_bmp)) { in domain_context_mapping_one()
1659 domain->nid = iommu->node; in domain_context_mapping_one()
1711 struct intel_iommu *iommu; in domain_context_mapped() local
1713 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number, in domain_context_mapped()
1715 if (!iommu) in domain_context_mapped()
1718 ret = device_context_mapped(iommu, pdev->bus->number, pdev->devfn); in domain_context_mapped()
1728 ret = device_context_mapped(iommu, parent->bus->number, in domain_context_mapped()
1735 return device_context_mapped(iommu, tmp->subordinate->number, in domain_context_mapped()
1738 return device_context_mapped(iommu, tmp->bus->number, in domain_context_mapped()
1892 static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn) in iommu_detach_dev() argument
1894 if (!iommu) in iommu_detach_dev()
1897 clear_context_table(iommu, bus, devfn); in iommu_detach_dev()
1898 iommu->flush.flush_context(iommu, 0, 0, 0, in iommu_detach_dev()
1900 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH); in iommu_detach_dev()
1907 struct intel_iommu *iommu; in domain_remove_dev_info() local
1916 info->dev->dev.archdata.iommu = NULL; in domain_remove_dev_info()
1920 iommu = device_to_iommu(info->segment, info->bus, info->devfn); in domain_remove_dev_info()
1921 iommu_detach_dev(iommu, info->bus, info->devfn); in domain_remove_dev_info()
1931 * Note: we use struct pci_dev->dev.archdata.iommu stores the info
1939 info = pdev->dev.archdata.iommu; in find_domain()
1949 struct intel_iommu *iommu; in get_domain_for_dev() local
1996 printk(KERN_ERR "IOMMU: can't find DMAR for device %s\n", in get_domain_for_dev()
2000 iommu = drhd->iommu; in get_domain_for_dev()
2002 ret = iommu_attach_domain(domain, iommu); in get_domain_for_dev()
2073 pdev->dev.archdata.iommu = info; in get_domain_for_dev()
2095 printk(KERN_ERR "IOMMU: reserve iova failed\n"); in iommu_domain_identity_map()
2134 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n", in iommu_prepare_identity_map()
2177 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO) in iommu_prepare_rmrr_dev()
2193 printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n"); in iommu_prepare_isa()
2197 printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; " in iommu_prepare_isa()
2213 struct intel_iommu *iommu; in si_domain_init() local
2222 for_each_active_iommu(iommu, drhd) { in si_domain_init()
2223 ret = iommu_attach_domain(si_domain, iommu); in si_domain_init()
2264 info = pdev->dev.archdata.iommu; in identity_mapping()
2298 pdev->dev.archdata.iommu = info; in domain_add_dev_info()
2376 printk(KERN_INFO "IOMMU: %s identity mapping for device %s\n", in iommu_prepare_static_identity_mapping()
2395 struct intel_iommu *iommu; in init_dmars() local
2416 printk(KERN_ERR "Allocating global iommu array failed\n"); in init_dmars()
2432 iommu = drhd->iommu; in init_dmars()
2433 g_iommus[iommu->seq_id] = iommu; in init_dmars()
2435 ret = iommu_init_domains(iommu); in init_dmars()
2442 * among all IOMMU's. Need to Split it later. in init_dmars()
2444 ret = iommu_alloc_root_entry(iommu); in init_dmars()
2446 printk(KERN_ERR "IOMMU: allocate root entry failed\n"); in init_dmars()
2449 if (!ecap_pass_through(iommu->ecap)) in init_dmars()
2454 * Start from the sane iommu hardware state. in init_dmars()
2460 iommu = drhd->iommu; in init_dmars()
2467 if (iommu->qi) in init_dmars()
2473 dmar_fault(-1, iommu); in init_dmars()
2478 dmar_disable_qi(iommu); in init_dmars()
2485 iommu = drhd->iommu; in init_dmars()
2487 if (dmar_enable_qi(iommu)) { in init_dmars()
2492 iommu->flush.flush_context = __iommu_flush_context; in init_dmars()
2493 iommu->flush.flush_iotlb = __iommu_flush_iotlb; in init_dmars()
2494 printk(KERN_INFO "IOMMU %d 0x%Lx: using Register based " in init_dmars()
2496 iommu->seq_id, in init_dmars()
2499 iommu->flush.flush_context = qi_flush_context; in init_dmars()
2500 iommu->flush.flush_iotlb = qi_flush_iotlb; in init_dmars()
2501 printk(KERN_INFO "IOMMU %d 0x%Lx: using Queued " in init_dmars()
2503 iommu->seq_id, in init_dmars()
2525 printk(KERN_CRIT "Failed to setup IOMMU pass-through\n"); in init_dmars()
2543 printk(KERN_INFO "IOMMU: Setting RMRR:\n"); in init_dmars()
2556 "IOMMU: mapping reserved region failed\n"); in init_dmars()
2576 iommu_disable_protect_mem_regions(drhd->iommu); in init_dmars()
2579 iommu = drhd->iommu; in init_dmars()
2581 iommu_flush_write_buffer(iommu); in init_dmars()
2583 ret = dmar_set_interrupt(iommu); in init_dmars()
2587 iommu_set_root_entry(iommu); in init_dmars()
2589 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL); in init_dmars()
2590 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH); in init_dmars()
2592 ret = iommu_enable_translation(iommu); in init_dmars()
2596 iommu_disable_protect_mem_regions(iommu); in init_dmars()
2604 iommu = drhd->iommu; in init_dmars()
2605 free_iommu(iommu); in init_dmars()
2619 /* Restrict dma_mask to the width that the iommu can handle */ in intel_alloc_iova()
2676 info = dev->dev.archdata.iommu; in get_valid_domain_for_dev()
2685 return pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO; in iommu_dummy()
2749 struct intel_iommu *iommu; in __intel_map_single() local
2761 iommu = domain_get_iommu(domain); in __intel_map_single()
2773 !cap_zlr(iommu->cap)) in __intel_map_single()
2789 if (cap_caching_mode(iommu->cap)) in __intel_map_single()
2790 iommu_flush_iotlb_psi(iommu, domain->id, mm_to_dma_pfn(iova->pfn_lo), size, 1); in __intel_map_single()
2792 iommu_flush_write_buffer(iommu); in __intel_map_single()
2823 struct intel_iommu *iommu = g_iommus[i]; in flush_unmaps() local
2824 if (!iommu) in flush_unmaps()
2831 if (!cap_caching_mode(iommu->cap)) in flush_unmaps()
2832 iommu->flush.flush_iotlb(iommu, 0, 0, 0, in flush_unmaps()
2840 if (cap_caching_mode(iommu->cap)) in flush_unmaps()
2841 iommu_flush_iotlb_psi(iommu, domain->id, in flush_unmaps()
2869 struct intel_iommu *iommu; in add_unmap() local
2875 iommu = domain_get_iommu(dom); in add_unmap()
2876 iommu_id = iommu->seq_id; in add_unmap()
2899 struct intel_iommu *iommu; in intel_unmap_page() local
2907 iommu = domain_get_iommu(domain); in intel_unmap_page()
2927 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn, in intel_unmap_page()
2992 struct intel_iommu *iommu; in intel_unmap_sg() local
3000 iommu = domain_get_iommu(domain); in intel_unmap_sg()
3017 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn, in intel_unmap_sg()
3056 struct intel_iommu *iommu; in intel_map_sg() local
3066 iommu = domain_get_iommu(domain); in intel_map_sg()
3083 !cap_zlr(iommu->cap)) in intel_map_sg()
3104 if (cap_caching_mode(iommu->cap)) in intel_map_sg()
3105 iommu_flush_iotlb_psi(iommu, domain->id, start_vpfn, size, 1); in intel_map_sg()
3107 iommu_flush_write_buffer(iommu); in intel_map_sg()
3215 /* We know that this device on this chipset has its own IOMMU. in quirk_ioat_snb_local_iommu()
3216 * If we find it under a different IOMMU, then the BIOS is lying in quirk_ioat_snb_local_iommu()
3217 * to us. Hope that the IOMMU for this device is actually in quirk_ioat_snb_local_iommu()
3228 /* we know that the this iommu should be at offset 0xa000 from vtbar */ in quirk_ioat_snb_local_iommu()
3233 pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO; in quirk_ioat_snb_local_iommu()
3266 /* This IOMMU has *only* gfx devices. Either bypass it or in init_no_remapping_devices()
3275 drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO; in init_no_remapping_devices()
3285 struct intel_iommu *iommu = NULL; in init_iommu_hw() local
3287 for_each_active_iommu(iommu, drhd) in init_iommu_hw()
3288 if (iommu->qi) in init_iommu_hw()
3289 dmar_reenable_qi(iommu); in init_iommu_hw()
3291 for_each_iommu(iommu, drhd) { in init_iommu_hw()
3298 iommu_disable_protect_mem_regions(iommu); in init_iommu_hw()
3302 iommu_flush_write_buffer(iommu); in init_iommu_hw()
3304 iommu_set_root_entry(iommu); in init_iommu_hw()
3306 iommu->flush.flush_context(iommu, 0, 0, 0, in init_iommu_hw()
3308 iommu->flush.flush_iotlb(iommu, 0, 0, 0, in init_iommu_hw()
3310 if (iommu_enable_translation(iommu)) in init_iommu_hw()
3312 iommu_disable_protect_mem_regions(iommu); in init_iommu_hw()
3321 struct intel_iommu *iommu; in iommu_flush_all() local
3323 for_each_active_iommu(iommu, drhd) { in iommu_flush_all()
3324 iommu->flush.flush_context(iommu, 0, 0, 0, in iommu_flush_all()
3326 iommu->flush.flush_iotlb(iommu, 0, 0, 0, in iommu_flush_all()
3334 struct intel_iommu *iommu = NULL; in iommu_suspend() local
3337 for_each_active_iommu(iommu, drhd) { in iommu_suspend()
3338 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS, in iommu_suspend()
3340 if (!iommu->iommu_state) in iommu_suspend()
3346 for_each_active_iommu(iommu, drhd) { in iommu_suspend()
3347 iommu_disable_translation(iommu); in iommu_suspend()
3349 raw_spin_lock_irqsave(&iommu->register_lock, flag); in iommu_suspend()
3351 iommu->iommu_state[SR_DMAR_FECTL_REG] = in iommu_suspend()
3352 readl(iommu->reg + DMAR_FECTL_REG); in iommu_suspend()
3353 iommu->iommu_state[SR_DMAR_FEDATA_REG] = in iommu_suspend()
3354 readl(iommu->reg + DMAR_FEDATA_REG); in iommu_suspend()
3355 iommu->iommu_state[SR_DMAR_FEADDR_REG] = in iommu_suspend()
3356 readl(iommu->reg + DMAR_FEADDR_REG); in iommu_suspend()
3357 iommu->iommu_state[SR_DMAR_FEUADDR_REG] = in iommu_suspend()
3358 readl(iommu->reg + DMAR_FEUADDR_REG); in iommu_suspend()
3360 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in iommu_suspend()
3365 for_each_active_iommu(iommu, drhd) in iommu_suspend()
3366 kfree(iommu->iommu_state); in iommu_suspend()
3374 struct intel_iommu *iommu = NULL; in iommu_resume() local
3379 panic("tboot: IOMMU setup failed, DMAR can not resume!\n"); in iommu_resume()
3381 WARN(1, "IOMMU setup failed, DMAR can not resume!\n"); in iommu_resume()
3385 for_each_active_iommu(iommu, drhd) { in iommu_resume()
3387 raw_spin_lock_irqsave(&iommu->register_lock, flag); in iommu_resume()
3389 writel(iommu->iommu_state[SR_DMAR_FECTL_REG], in iommu_resume()
3390 iommu->reg + DMAR_FECTL_REG); in iommu_resume()
3391 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG], in iommu_resume()
3392 iommu->reg + DMAR_FEDATA_REG); in iommu_resume()
3393 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG], in iommu_resume()
3394 iommu->reg + DMAR_FEADDR_REG); in iommu_resume()
3395 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG], in iommu_resume()
3396 iommu->reg + DMAR_FEUADDR_REG); in iommu_resume()
3398 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in iommu_resume()
3401 for_each_active_iommu(iommu, drhd) in iommu_resume()
3402 kfree(iommu->iommu_state); in iommu_resume()
3624 panic("tboot: Failed to initialize iommu memory\n"); in intel_iommu_init()
3636 panic("tboot: Failed to reserve iommu ranges\n"); in intel_iommu_init()
3646 printk(KERN_ERR "IOMMU: dmar init failed\n"); in intel_iommu_init()
3671 static void iommu_detach_dependent_devices(struct intel_iommu *iommu, in iommu_detach_dependent_devices() argument
3676 if (!iommu || !pdev) in iommu_detach_dependent_devices()
3685 iommu_detach_dev(iommu, parent->bus->number, in iommu_detach_dependent_devices()
3690 iommu_detach_dev(iommu, in iommu_detach_dependent_devices()
3693 iommu_detach_dev(iommu, tmp->bus->number, in iommu_detach_dependent_devices()
3702 struct intel_iommu *iommu; in domain_remove_one_dev_info() local
3707 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number, in domain_remove_one_dev_info()
3709 if (!iommu) in domain_remove_one_dev_info()
3721 info->dev->dev.archdata.iommu = NULL; in domain_remove_one_dev_info()
3725 iommu_detach_dev(iommu, info->bus, info->devfn); in domain_remove_one_dev_info()
3726 iommu_detach_dependent_devices(iommu, pdev); in domain_remove_one_dev_info()
3737 /* if there is no other devices under the same iommu in domain_remove_one_dev_info()
3738 * owned by this domain, clear this iommu in iommu_bmp in domain_remove_one_dev_info()
3739 * update iommu count and coherency in domain_remove_one_dev_info()
3741 if (iommu == device_to_iommu(info->segment, info->bus, in domain_remove_one_dev_info()
3751 clear_bit(iommu->seq_id, &domain->iommu_bmp); in domain_remove_one_dev_info()
3758 spin_lock_irqsave(&iommu->lock, tmp_flags); in domain_remove_one_dev_info()
3759 clear_bit(domain->id, iommu->domain_ids); in domain_remove_one_dev_info()
3760 iommu->domains[domain->id] = NULL; in domain_remove_one_dev_info()
3761 spin_unlock_irqrestore(&iommu->lock, tmp_flags); in domain_remove_one_dev_info()
3769 struct intel_iommu *iommu; in vm_domain_remove_all_dev_info() local
3779 info->dev->dev.archdata.iommu = NULL; in vm_domain_remove_all_dev_info()
3784 iommu = device_to_iommu(info->segment, info->bus, info->devfn); in vm_domain_remove_all_dev_info()
3785 iommu_detach_dev(iommu, info->bus, info->devfn); in vm_domain_remove_all_dev_info()
3786 iommu_detach_dependent_devices(iommu, info->dev); in vm_domain_remove_all_dev_info()
3788 /* clear this iommu in iommu_bmp, update iommu count in vm_domain_remove_all_dev_info()
3792 if (test_and_clear_bit(iommu->seq_id, in vm_domain_remove_all_dev_info()
3859 struct intel_iommu *iommu; in iommu_free_vm_domain() local
3866 iommu = drhd->iommu; in iommu_free_vm_domain()
3868 ndomains = cap_ndoms(iommu->cap); in iommu_free_vm_domain()
3869 for_each_set_bit(i, iommu->domain_ids, ndomains) { in iommu_free_vm_domain()
3870 if (iommu->domains[i] == domain) { in iommu_free_vm_domain()
3871 spin_lock_irqsave(&iommu->lock, flags); in iommu_free_vm_domain()
3872 clear_bit(i, iommu->domain_ids); in iommu_free_vm_domain()
3873 iommu->domains[i] = NULL; in iommu_free_vm_domain()
3874 spin_unlock_irqrestore(&iommu->lock, flags); in iommu_free_vm_domain()
3936 struct intel_iommu *iommu; in intel_iommu_attach_device() local
3953 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number, in intel_iommu_attach_device()
3955 if (!iommu) in intel_iommu_attach_device()
3958 /* check if this iommu agaw is sufficient for max mapped address */ in intel_iommu_attach_device()
3959 addr_width = agaw_to_width(iommu->agaw); in intel_iommu_attach_device()
3960 if (addr_width > cap_mgaw(iommu->cap)) in intel_iommu_attach_device()
3961 addr_width = cap_mgaw(iommu->cap); in intel_iommu_attach_device()
3964 printk(KERN_ERR "%s: iommu width (%d) is not " in intel_iommu_attach_device()
3974 while (iommu->agaw < dmar_domain->agaw) { in intel_iommu_attach_device()
4021 printk(KERN_ERR "%s: iommu width (%d) is not " in intel_iommu_map()
4080 * indicate the iommu cannot differentiate between them. To avoid
4150 printk(KERN_INFO "DMAR: Disabling IOMMU for graphics on this chipset\n"); in quirk_iommu_rwbf()
4175 printk(KERN_INFO "DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n"); in quirk_calpella_no_shadow_gtt()