Lines Matching +full:ats +full:- +full:supported

1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright © 2006-2014 Intel Corporation.
17 #include <linux/dma-direct.h>
21 #include <linux/pci-ats.h>
28 #include "../dma-iommu.h"
30 #include "../iommu-pages.h"
37 #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
38 #define IS_USB_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_SERIAL_USB)
39 #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
40 #define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
48 #define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << ((gaw) - VTD_PAGE_SHIFT)) - 1)
49 #define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << (gaw)) - 1)
54 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
61 * set to 1 to panic kernel if can't successfully enable VT-d
76 if (!(re->lo & 1)) in root_entry_lctp()
79 return re->lo & VTD_PAGE_MASK; in root_entry_lctp()
88 if (!(re->hi & 1)) in root_entry_uctp()
91 return re->hi & VTD_PAGE_MASK; in root_entry_uctp()
100 if (*rid_lhs < PCI_DEVID(info->bus, info->devfn)) in device_rid_cmp_key()
101 return -1; in device_rid_cmp_key()
103 if (*rid_lhs > PCI_DEVID(info->bus, info->devfn)) in device_rid_cmp_key()
113 u16 key = PCI_DEVID(info->bus, info->devfn); in device_rid_cmp()
119 * Looks up an IOMMU-probed device using its source ID.
135 spin_lock_irqsave(&iommu->device_rbtree_lock, flags); in device_rbtree_find()
136 node = rb_find(&rid, &iommu->device_rbtree, device_rid_cmp_key); in device_rbtree_find()
139 spin_unlock_irqrestore(&iommu->device_rbtree_lock, flags); in device_rbtree_find()
141 return info ? info->dev : NULL; in device_rbtree_find()
150 spin_lock_irqsave(&iommu->device_rbtree_lock, flags); in device_rbtree_insert()
151 curr = rb_find_add(&info->node, &iommu->device_rbtree, device_rid_cmp); in device_rbtree_insert()
152 spin_unlock_irqrestore(&iommu->device_rbtree_lock, flags); in device_rbtree_insert()
154 return -EEXIST; in device_rbtree_insert()
161 struct intel_iommu *iommu = info->iommu; in device_rbtree_remove()
164 spin_lock_irqsave(&iommu->device_rbtree_lock, flags); in device_rbtree_remove()
165 rb_erase(&info->node, &iommu->device_rbtree); in device_rbtree_remove()
166 spin_unlock_irqrestore(&iommu->device_rbtree_lock, flags); in device_rbtree_remove()
192 u8 atc_required:1; /* ATS is required */
222 return (iommu->flags & VTD_FLAG_TRANS_PRE_ENABLED); in translation_pre_enabled()
227 iommu->flags &= ~VTD_FLAG_TRANS_PRE_ENABLED; in clear_translation_pre_enabled()
234 gsts = readl(iommu->reg + DMAR_GSTS_REG); in init_translation_status()
236 iommu->flags |= VTD_FLAG_TRANS_PRE_ENABLED; in init_translation_status()
242 return -EINVAL; in intel_iommu_setup()
262 pr_info("Disable supported super page\n"); in intel_iommu_setup()
271 pr_info("Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n"); in intel_iommu_setup()
274 pr_notice("Unknown option - '%s'\n", str); in intel_iommu_setup()
288 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; in domain_pfn_supported()
294 * Calculate the Supported Adjusted Guest Address Widths of an IOMMU.
295 * Refer to 11.4.2 of the VT-d spec for the encoding of each bit of
302 fl_sagaw = BIT(2) | (cap_fl5lp_support(iommu->cap) ? BIT(3) : 0); in __iommu_calculate_sagaw()
303 sl_sagaw = cap_sagaw(iommu->cap); in __iommu_calculate_sagaw()
306 if (!sm_supported(iommu) || !ecap_flts(iommu->ecap)) in __iommu_calculate_sagaw()
310 if (!ecap_slts(iommu->ecap)) in __iommu_calculate_sagaw()
322 for (agaw = width_to_agaw(max_gaw); agaw >= 0; agaw--) { in __iommu_calculate_agaw()
341 * get a supported less agaw for iommus that don't support the default agaw.
351 ecap_smpwc(iommu->ecap) : ecap_coherent(iommu->ecap); in iommu_paging_structure_coherency()
354 /* Return the super pagesize bitmap if supported. */
360 * 1-level super page supports page size of 2MiB, 2-level super page in domain_super_pgsize_bitmap()
363 if (domain->iommu_superpage == 1) in domain_super_pgsize_bitmap()
365 else if (domain->iommu_superpage == 2) in domain_super_pgsize_bitmap()
374 struct root_entry *root = &iommu->root_entry[bus]; in iommu_context_addr()
385 entry = &root->lo; in iommu_context_addr()
388 devfn -= 0x80; in iommu_context_addr()
389 entry = &root->hi; in iommu_context_addr()
400 context = iommu_alloc_page_node(iommu->node, GFP_ATOMIC); in iommu_context_addr()
413 * is_downstream_to_pci_bridge - test if a device belongs to the PCI
414 * sub-hierarchy of a candidate PCI-PCI bridge
415 * @dev: candidate PCI device belonging to @bridge PCI sub-hierarchy
416 * @bridge: the candidate PCI-PCI bridge
418 * Return: true if @dev belongs to @bridge PCI sub-hierarchy, else false.
431 if (pbridge->subordinate && in is_downstream_to_pci_bridge()
432 pbridge->subordinate->number <= pdev->bus->number && in is_downstream_to_pci_bridge()
433 pbridge->subordinate->busn_res.end >= pdev->bus->number) in is_downstream_to_pci_bridge()
450 rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar); in quirk_ioat_snb_local_iommu()
453 dev_info(&pdev->dev, "failed to run vt-d quirk\n"); in quirk_ioat_snb_local_iommu()
460 if (!drhd || drhd->reg_base_addr - vtbar != 0xa000) { in quirk_ioat_snb_local_iommu()
461 …pr_warn_once(FW_BUG "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"… in quirk_ioat_snb_local_iommu()
471 if (!iommu || iommu->drhd->ignored) in iommu_is_dummy()
477 if (pdev->vendor == PCI_VENDOR_ID_INTEL && in iommu_is_dummy()
478 pdev->device == PCI_DEVICE_ID_INTEL_IOAT_SNB && in iommu_is_dummy()
506 dev = &pf_pdev->dev; in device_lookup_iommu()
507 segment = pci_domain_nr(pdev->bus); in device_lookup_iommu()
509 dev = &ACPI_COMPANION(dev)->dev; in device_lookup_iommu()
513 if (pdev && segment != drhd->segment) in device_lookup_iommu()
516 for_each_active_dev_scope(drhd->devices, in device_lookup_iommu()
517 drhd->devices_cnt, i, tmp) { in device_lookup_iommu()
523 if (pdev && pdev->is_virtfn) in device_lookup_iommu()
527 *bus = drhd->devices[i].bus; in device_lookup_iommu()
528 *devfn = drhd->devices[i].devfn; in device_lookup_iommu()
537 if (pdev && drhd->include_all) { in device_lookup_iommu()
540 *bus = pdev->bus->number; in device_lookup_iommu()
541 *devfn = pdev->devfn; in device_lookup_iommu()
559 if (!domain->iommu_coherency) in domain_flush_cache()
568 if (!iommu->root_entry) in free_context_table()
584 iommu_free_page(iommu->root_entry); in free_context_table()
585 iommu->root_entry = NULL; in free_context_table()
599 pr_info("pte level: %d, pte value: 0x%016llx\n", level, pte->val); in pgtable_walk()
602 pr_info("page table not present at level %d\n", level - 1); in pgtable_walk()
610 level--; in pgtable_walk()
626 pr_info("Dump %s table entries for IOVA 0x%llx\n", iommu->name, addr); in dmar_fault_dump_ptes()
629 if (!iommu->root_entry) { in dmar_fault_dump_ptes()
633 rt_entry = &iommu->root_entry[bus]; in dmar_fault_dump_ptes()
637 rt_entry->hi, rt_entry->lo); in dmar_fault_dump_ptes()
639 pr_info("root entry: 0x%016llx", rt_entry->lo); in dmar_fault_dump_ptes()
649 ctx_entry->hi, ctx_entry->lo); in dmar_fault_dump_ptes()
657 level = agaw_to_level(ctx_entry->hi & 7); in dmar_fault_dump_ptes()
658 pgtable = phys_to_virt(ctx_entry->lo & VTD_PAGE_MASK); in dmar_fault_dump_ptes()
668 dir = phys_to_virt(ctx_entry->lo & VTD_PAGE_MASK); in dmar_fault_dump_ptes()
670 /* For request-without-pasid, get the pasid from context entry */ in dmar_fault_dump_ptes()
676 pr_info("pasid dir entry: 0x%016llx\n", pde->val); in dmar_fault_dump_ptes()
686 for (i = 0; i < ARRAY_SIZE(pte->val); i++) in dmar_fault_dump_ptes()
687 pr_info("pasid table entry[%d]: 0x%016llx\n", i, pte->val[i]); in dmar_fault_dump_ptes()
695 level = pte->val[2] & BIT_ULL(2) ? 5 : 4; in dmar_fault_dump_ptes()
696 pgtable = phys_to_virt(pte->val[2] & VTD_PAGE_MASK); in dmar_fault_dump_ptes()
698 level = agaw_to_level((pte->val[0] >> 2) & 0x7); in dmar_fault_dump_ptes()
699 pgtable = phys_to_virt(pte->val[0] & VTD_PAGE_MASK); in dmar_fault_dump_ptes()
712 int level = agaw_to_level(domain->agaw); in pfn_to_dma_pte()
719 parent = domain->pgd; in pfn_to_dma_pte()
734 tmp_page = iommu_alloc_page_node(domain->nid, gfp); in pfn_to_dma_pte()
742 if (domain->use_first_level) in pfn_to_dma_pte()
746 if (!try_cmpxchg64(&pte->val, &tmp, pteval)) in pfn_to_dma_pte()
756 level--; in pfn_to_dma_pte()
771 int total = agaw_to_level(domain->agaw); in dma_pfn_level_pte()
774 parent = domain->pgd; in dma_pfn_level_pte()
792 total--; in dma_pfn_level_pte()
824 (void *)pte - (void *)first_pte); in dma_pte_clear_range()
848 dma_pte_free_level(domain, level - 1, retain_level, in dma_pte_free_level()
858 last_pfn < level_pfn + level_size(level) - 1)) { in dma_pte_free_level()
880 dma_pte_free_level(domain, agaw_to_level(domain->agaw), retain_level, in dma_pte_free_pagetable()
881 domain->pgd, 0, start_pfn, last_pfn); in dma_pte_free_pagetable()
884 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) { in dma_pte_free_pagetable()
885 iommu_free_page(domain->pgd); in dma_pte_free_pagetable()
886 domain->pgd = NULL; in dma_pte_free_pagetable()
893 know the hardware page-walk will no longer touch them.
903 list_add_tail(&pg->lru, freelist); in dma_pte_list_pagetables()
911 dma_pte_list_pagetables(domain, level - 1, pte, freelist); in dma_pte_list_pagetables()
934 last_pfn >= level_pfn + level_size(level) - 1) { in dma_pte_clear_level()
938 dma_pte_list_pagetables(domain, level - 1, pte, freelist); in dma_pte_clear_level()
946 dma_pte_clear_level(domain, level - 1, in dma_pte_clear_level()
957 (void *)++last_pte - (void *)first_pte); in dma_pte_clear_level()
971 dma_pte_clear_level(domain, agaw_to_level(domain->agaw), in domain_unmap()
972 domain->pgd, 0, start_pfn, last_pfn, freelist); in domain_unmap()
975 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) { in domain_unmap()
976 struct page *pgd_page = virt_to_page(domain->pgd); in domain_unmap()
977 list_add_tail(&pgd_page->lru, freelist); in domain_unmap()
978 domain->pgd = NULL; in domain_unmap()
987 root = iommu_alloc_page_node(iommu->node, GFP_ATOMIC); in iommu_alloc_root_entry()
990 iommu->name); in iommu_alloc_root_entry()
991 return -ENOMEM; in iommu_alloc_root_entry()
995 iommu->root_entry = root; in iommu_alloc_root_entry()
1006 addr = virt_to_phys(iommu->root_entry); in iommu_set_root_entry()
1010 raw_spin_lock_irqsave(&iommu->register_lock, flag); in iommu_set_root_entry()
1011 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, addr); in iommu_set_root_entry()
1013 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG); in iommu_set_root_entry()
1019 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in iommu_set_root_entry()
1025 if (cap_esrtps(iommu->cap)) in iommu_set_root_entry()
1028 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL); in iommu_set_root_entry()
1031 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH); in iommu_set_root_entry()
1039 if (!rwbf_quirk && !cap_rwbf(iommu->cap)) in iommu_flush_write_buffer()
1042 raw_spin_lock_irqsave(&iommu->register_lock, flag); in iommu_flush_write_buffer()
1043 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG); in iommu_flush_write_buffer()
1049 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in iommu_flush_write_buffer()
1072 pr_warn("%s: Unexpected context-cache invalidation type 0x%llx\n", in __iommu_flush_context()
1073 iommu->name, type); in __iommu_flush_context()
1078 raw_spin_lock_irqsave(&iommu->register_lock, flag); in __iommu_flush_context()
1079 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val); in __iommu_flush_context()
1085 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in __iommu_flush_context()
1091 int tlb_offset = ecap_iotlb_offset(iommu->ecap); in __iommu_flush_iotlb()
1110 iommu->name, type); in __iommu_flush_iotlb()
1114 if (cap_write_drain(iommu->cap)) in __iommu_flush_iotlb()
1117 raw_spin_lock_irqsave(&iommu->register_lock, flag); in __iommu_flush_iotlb()
1120 dmar_writeq(iommu->reg + tlb_offset, val_iva); in __iommu_flush_iotlb()
1121 dmar_writeq(iommu->reg + tlb_offset + 8, val); in __iommu_flush_iotlb()
1127 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in __iommu_flush_iotlb()
1145 spin_lock_irqsave(&domain->lock, flags); in domain_lookup_dev_info()
1146 list_for_each_entry(info, &domain->devices, link) { in domain_lookup_dev_info()
1147 if (info->iommu == iommu && info->bus == bus && in domain_lookup_dev_info()
1148 info->devfn == devfn) { in domain_lookup_dev_info()
1149 spin_unlock_irqrestore(&domain->lock, flags); in domain_lookup_dev_info()
1153 spin_unlock_irqrestore(&domain->lock, flags); in domain_lookup_dev_info()
1161 * check because it applies only to the built-in QAT devices and it doesn't
1167 if (pdev->vendor != PCI_VENDOR_ID_INTEL) in dev_needs_extra_dtlb_flush()
1170 if ((pdev->device & 0xfffc) != BUGGY_QAT_DEVID_MASK) in dev_needs_extra_dtlb_flush()
1180 if (!info->ats_supported) in iommu_enable_pci_ats()
1183 pdev = to_pci_dev(info->dev); in iommu_enable_pci_ats()
1188 info->ats_enabled = 1; in iommu_enable_pci_ats()
1193 if (!info->ats_enabled) in iommu_disable_pci_ats()
1196 pci_disable_ats(to_pci_dev(info->dev)); in iommu_disable_pci_ats()
1197 info->ats_enabled = 0; in iommu_disable_pci_ats()
1204 if (!info->ats_enabled || !info->pri_supported) in iommu_enable_pci_pri()
1207 pdev = to_pci_dev(info->dev); in iommu_enable_pci_pri()
1209 if (info->pasid_enabled && !pci_prg_resp_pasid_required(pdev)) in iommu_enable_pci_pri()
1216 info->pri_enabled = 1; in iommu_enable_pci_pri()
1221 if (!info->pri_enabled) in iommu_disable_pci_pri()
1224 if (WARN_ON(info->iopf_refcount)) in iommu_disable_pci_pri()
1225 iopf_queue_remove_device(info->iommu->iopf_queue, info->dev); in iommu_disable_pci_pri()
1227 pci_disable_pri(to_pci_dev(info->dev)); in iommu_disable_pci_pri()
1228 info->pri_enabled = 0; in iommu_disable_pci_pri()
1241 if (!cap_plmr(iommu->cap) && !cap_phmr(iommu->cap)) in iommu_disable_protect_mem_regions()
1244 raw_spin_lock_irqsave(&iommu->register_lock, flags); in iommu_disable_protect_mem_regions()
1245 pmen = readl(iommu->reg + DMAR_PMEN_REG); in iommu_disable_protect_mem_regions()
1247 writel(pmen, iommu->reg + DMAR_PMEN_REG); in iommu_disable_protect_mem_regions()
1253 raw_spin_unlock_irqrestore(&iommu->register_lock, flags); in iommu_disable_protect_mem_regions()
1261 raw_spin_lock_irqsave(&iommu->register_lock, flags); in iommu_enable_translation()
1262 iommu->gcmd |= DMA_GCMD_TE; in iommu_enable_translation()
1263 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); in iommu_enable_translation()
1269 raw_spin_unlock_irqrestore(&iommu->register_lock, flags); in iommu_enable_translation()
1277 if (iommu_skip_te_disable && iommu->drhd->gfx_dedicated && in iommu_disable_translation()
1278 (cap_read_drain(iommu->cap) || cap_write_drain(iommu->cap))) in iommu_disable_translation()
1281 raw_spin_lock_irqsave(&iommu->register_lock, flag); in iommu_disable_translation()
1282 iommu->gcmd &= ~DMA_GCMD_TE; in iommu_disable_translation()
1283 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); in iommu_disable_translation()
1289 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in iommu_disable_translation()
1296 ndomains = cap_ndoms(iommu->cap); in iommu_init_domains()
1297 pr_debug("%s: Number of Domains supported <%d>\n", in iommu_init_domains()
1298 iommu->name, ndomains); in iommu_init_domains()
1300 spin_lock_init(&iommu->lock); in iommu_init_domains()
1302 iommu->domain_ids = bitmap_zalloc(ndomains, GFP_KERNEL); in iommu_init_domains()
1303 if (!iommu->domain_ids) in iommu_init_domains()
1304 return -ENOMEM; in iommu_init_domains()
1308 * with domain-id 0, hence we need to pre-allocate it. We also in iommu_init_domains()
1309 * use domain-id 0 as a marker for non-allocated domain-id, so in iommu_init_domains()
1312 set_bit(0, iommu->domain_ids); in iommu_init_domains()
1315 * Vt-d spec rev3.0 (section 6.2.3.1) requires that each pasid in iommu_init_domains()
1316 * entry for first-level or pass-through translation modes should in iommu_init_domains()
1318 * second-level or nested translation. We reserve a domain id for in iommu_init_domains()
1322 set_bit(FLPT_DEFAULT_DID, iommu->domain_ids); in iommu_init_domains()
1329 if (!iommu->domain_ids) in disable_dmar_iommu()
1336 if (WARN_ON(bitmap_weight(iommu->domain_ids, cap_ndoms(iommu->cap)) in disable_dmar_iommu()
1340 if (iommu->gcmd & DMA_GCMD_TE) in disable_dmar_iommu()
1346 if (iommu->domain_ids) { in free_dmar_iommu()
1347 bitmap_free(iommu->domain_ids); in free_dmar_iommu()
1348 iommu->domain_ids = NULL; in free_dmar_iommu()
1351 if (iommu->copied_tables) { in free_dmar_iommu()
1352 bitmap_free(iommu->copied_tables); in free_dmar_iommu()
1353 iommu->copied_tables = NULL; in free_dmar_iommu()
1359 if (ecap_prs(iommu->ecap)) in free_dmar_iommu()
1374 if (ecap_flts(iommu->ecap) ^ ecap_slts(iommu->ecap)) in first_level_by_default()
1375 return ecap_flts(iommu->ecap); in first_level_by_default()
1384 int num, ret = -ENOSPC; in domain_attach_iommu()
1386 if (domain->domain.type == IOMMU_DOMAIN_SVA) in domain_attach_iommu()
1391 return -ENOMEM; in domain_attach_iommu()
1393 spin_lock(&iommu->lock); in domain_attach_iommu()
1394 curr = xa_load(&domain->iommu_array, iommu->seq_id); in domain_attach_iommu()
1396 curr->refcnt++; in domain_attach_iommu()
1397 spin_unlock(&iommu->lock); in domain_attach_iommu()
1402 ndomains = cap_ndoms(iommu->cap); in domain_attach_iommu()
1403 num = find_first_zero_bit(iommu->domain_ids, ndomains); in domain_attach_iommu()
1405 pr_err("%s: No free domain ids\n", iommu->name); in domain_attach_iommu()
1409 set_bit(num, iommu->domain_ids); in domain_attach_iommu()
1410 info->refcnt = 1; in domain_attach_iommu()
1411 info->did = num; in domain_attach_iommu()
1412 info->iommu = iommu; in domain_attach_iommu()
1413 curr = xa_cmpxchg(&domain->iommu_array, iommu->seq_id, in domain_attach_iommu()
1416 ret = xa_err(curr) ? : -EBUSY; in domain_attach_iommu()
1420 spin_unlock(&iommu->lock); in domain_attach_iommu()
1424 clear_bit(info->did, iommu->domain_ids); in domain_attach_iommu()
1426 spin_unlock(&iommu->lock); in domain_attach_iommu()
1435 if (domain->domain.type == IOMMU_DOMAIN_SVA) in domain_detach_iommu()
1438 spin_lock(&iommu->lock); in domain_detach_iommu()
1439 info = xa_load(&domain->iommu_array, iommu->seq_id); in domain_detach_iommu()
1440 if (--info->refcnt == 0) { in domain_detach_iommu()
1441 clear_bit(info->did, iommu->domain_ids); in domain_detach_iommu()
1442 xa_erase(&domain->iommu_array, iommu->seq_id); in domain_detach_iommu()
1443 domain->nid = NUMA_NO_NODE; in domain_detach_iommu()
1446 spin_unlock(&iommu->lock); in domain_detach_iommu()
1451 if (domain->pgd) { in domain_exit()
1454 domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw), &freelist); in domain_exit()
1458 if (WARN_ON(!list_empty(&domain->devices))) in domain_exit()
1461 kfree(domain->qi_batch); in domain_exit()
1467 * in-flight DMA and copied pgtable, but there is no unmapping
1469 * the newly-mapped device. For kdump, at this point, the device
1471 * in-flight DMA will exist, and we don't need to worry anymore
1483 assert_spin_locked(&iommu->lock); in copied_context_tear_down()
1488 if (did_old < cap_ndoms(iommu->cap)) { in copied_context_tear_down()
1489 iommu->flush.flush_context(iommu, did_old, in copied_context_tear_down()
1493 iommu->flush.flush_iotlb(iommu, did_old, 0, 0, in copied_context_tear_down()
1501 * It's a non-present to present mapping. If hardware doesn't cache
1502 * non-present entry we only need to flush the write-buffer. If the
1503 * _does_ cache non-present entries, then it does so in the special
1509 if (cap_caching_mode(iommu->cap)) { in context_present_cache_flush()
1510 iommu->flush.flush_context(iommu, 0, in context_present_cache_flush()
1514 iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH); in context_present_cache_flush()
1528 struct dma_pte *pgd = domain->pgd; in domain_context_mapping_one()
1535 spin_lock(&iommu->lock); in domain_context_mapping_one()
1536 ret = -ENOMEM; in domain_context_mapping_one()
1549 if (info && info->ats_supported) in domain_context_mapping_one()
1555 context_set_address_width(context, domain->agaw); in domain_context_mapping_one()
1559 if (!ecap_coherent(iommu->ecap)) in domain_context_mapping_one()
1565 spin_unlock(&iommu->lock); in domain_context_mapping_one()
1573 struct device_domain_info *info = dev_iommu_priv_get(&pdev->dev); in domain_context_mapping_cb()
1574 struct intel_iommu *iommu = info->iommu; in domain_context_mapping_cb()
1585 struct intel_iommu *iommu = info->iommu; in domain_context_mapping()
1586 u8 bus = info->bus, devfn = info->devfn; in domain_context_mapping()
1609 support = domain->iommu_superpage; in hardware_largepage_caps()
1623 support--; in hardware_largepage_caps()
1647 start_pfn + lvl_pages - 1, in switch_to_super_page()
1672 if (unlikely(!domain_pfn_supported(domain, iov_pfn + nr_pages - 1))) in __domain_mapping()
1673 return -EINVAL; in __domain_mapping()
1676 return -EINVAL; in __domain_mapping()
1678 if (!(prot & DMA_PTE_WRITE) && domain->nested_parent) { in __domain_mapping()
1679 …pr_err_ratelimited("Read-only mapping is disallowed on the domain which serves as the parent in a … in __domain_mapping()
1680 return -EINVAL; in __domain_mapping()
1685 if (domain->use_first_level) { in __domain_mapping()
1691 domain->has_mappings = true; in __domain_mapping()
1705 return -ENOMEM; in __domain_mapping()
1718 end_pfn = iov_pfn + pages_to_remove - 1; in __domain_mapping()
1729 if (!try_cmpxchg64_local(&pte->val, &tmp, pteval)) { in __domain_mapping()
1734 dumps--; in __domain_mapping()
1740 nr_pages -= lvl_pages; in __domain_mapping()
1761 (void *)pte - (void *)first_pte); in __domain_mapping()
1771 struct intel_iommu *iommu = info->iommu; in domain_context_clear_one()
1775 spin_lock(&iommu->lock); in domain_context_clear_one()
1778 spin_unlock(&iommu->lock); in domain_context_clear_one()
1785 spin_unlock(&iommu->lock); in domain_context_clear_one()
1831 struct dma_pte *pgd = domain->pgd; in domain_setup_first_level()
1834 level = agaw_to_level(domain->agaw); in domain_setup_first_level()
1836 return -EINVAL; in domain_setup_first_level()
1841 if (domain->force_snooping) in domain_setup_first_level()
1853 struct intel_iommu *iommu = info->iommu; in dmar_domain_attach_device()
1861 info->domain = domain; in dmar_domain_attach_device()
1862 spin_lock_irqsave(&domain->lock, flags); in dmar_domain_attach_device()
1863 list_add(&info->link, &domain->devices); in dmar_domain_attach_device()
1864 spin_unlock_irqrestore(&domain->lock, flags); in dmar_domain_attach_device()
1871 else if (domain->use_first_level) in dmar_domain_attach_device()
1893 * device_rmrr_is_relaxable - Test whether the RMRR of this device
1924 struct intel_iommu *iommu = info->iommu; in device_def_domain_type()
1930 if (!ecap_pass_through(iommu->ecap)) in device_def_domain_type()
1948 * (for example, while enabling interrupt-remapping) then in intel_iommu_init_qi()
1951 if (!iommu->qi) { in intel_iommu_init_qi()
1955 dmar_fault(-1, iommu); in intel_iommu_init_qi()
1957 * Disable queued invalidation if supported and already enabled in intel_iommu_init_qi()
1967 iommu->flush.flush_context = __iommu_flush_context; in intel_iommu_init_qi()
1968 iommu->flush.flush_iotlb = __iommu_flush_iotlb; in intel_iommu_init_qi()
1970 iommu->name); in intel_iommu_init_qi()
1972 iommu->flush.flush_context = qi_flush_context; in intel_iommu_init_qi()
1973 iommu->flush.flush_iotlb = qi_flush_iotlb; in intel_iommu_init_qi()
1974 pr_info("%s: Using Queued invalidation\n", iommu->name); in intel_iommu_init_qi()
2024 ret = -ENOMEM; in copy_context_table()
2030 new_ce = iommu_alloc_page_node(iommu->node, GFP_KERNEL); in copy_context_table()
2044 if (did >= 0 && did < cap_ndoms(iommu->cap)) in copy_context_table()
2045 set_bit(did, iommu->domain_ids); in copy_context_table()
2072 rtaddr_reg = dmar_readq(iommu->reg + DMAR_RTADDR_REG); in copy_translation_tables()
2083 return -EINVAL; in copy_translation_tables()
2085 iommu->copied_tables = bitmap_zalloc(BIT_ULL(16), GFP_KERNEL); in copy_translation_tables()
2086 if (!iommu->copied_tables) in copy_translation_tables()
2087 return -ENOMEM; in copy_translation_tables()
2091 return -EINVAL; in copy_translation_tables()
2095 return -ENOMEM; in copy_translation_tables()
2097 /* This is too big for the stack - allocate it from slab */ in copy_translation_tables()
2099 ret = -ENOMEM; in copy_translation_tables()
2109 iommu->name, bus); in copy_translation_tables()
2114 spin_lock(&iommu->lock); in copy_translation_tables()
2123 iommu->root_entry[bus].lo = val; in copy_translation_tables()
2130 iommu->root_entry[bus].hi = val; in copy_translation_tables()
2133 spin_unlock(&iommu->lock); in copy_translation_tables()
2137 __iommu_flush_cache(iommu, iommu->root_entry, PAGE_SIZE); in copy_translation_tables()
2154 if (drhd->ignored) { in init_dmars()
2162 * than the smallest supported. in init_dmars()
2165 u32 temp = 2 << ecap_pss(iommu->ecap); in init_dmars()
2183 iommu->name); in init_dmars()
2196 pr_info("Translation already enabled - trying to copy translation structures\n"); in init_dmars()
2202 * enabled - but failed to copy over the in init_dmars()
2203 * old root-entry table. Try to proceed in init_dmars()
2205 * allocating a clean root-entry table. in init_dmars()
2210 iommu->name); in init_dmars()
2215 iommu->name); in init_dmars()
2242 if (drhd->ignored) { in init_dmars()
2254 if (ecap_prs(iommu->ecap)) { in init_dmars()
2289 if (!drhd->include_all) { in init_no_remapping_devices()
2290 for_each_active_dev_scope(drhd->devices, in init_no_remapping_devices()
2291 drhd->devices_cnt, i, dev) in init_no_remapping_devices()
2294 if (i == drhd->devices_cnt) in init_no_remapping_devices()
2295 drhd->ignored = 1; in init_no_remapping_devices()
2300 if (drhd->include_all) in init_no_remapping_devices()
2303 for_each_active_dev_scope(drhd->devices, in init_no_remapping_devices()
2304 drhd->devices_cnt, i, dev) in init_no_remapping_devices()
2307 if (i < drhd->devices_cnt) in init_no_remapping_devices()
2312 drhd->gfx_dedicated = 1; in init_no_remapping_devices()
2314 drhd->ignored = 1; in init_no_remapping_devices()
2326 if (iommu->qi) { in init_iommu_hw()
2334 if (drhd->ignored) { in init_iommu_hw()
2359 iommu->flush.flush_context(iommu, 0, 0, 0, in iommu_flush_all()
2361 iommu->flush.flush_iotlb(iommu, 0, 0, 0, in iommu_flush_all()
2377 raw_spin_lock_irqsave(&iommu->register_lock, flag); in iommu_suspend()
2379 iommu->iommu_state[SR_DMAR_FECTL_REG] = in iommu_suspend()
2380 readl(iommu->reg + DMAR_FECTL_REG); in iommu_suspend()
2381 iommu->iommu_state[SR_DMAR_FEDATA_REG] = in iommu_suspend()
2382 readl(iommu->reg + DMAR_FEDATA_REG); in iommu_suspend()
2383 iommu->iommu_state[SR_DMAR_FEADDR_REG] = in iommu_suspend()
2384 readl(iommu->reg + DMAR_FEADDR_REG); in iommu_suspend()
2385 iommu->iommu_state[SR_DMAR_FEUADDR_REG] = in iommu_suspend()
2386 readl(iommu->reg + DMAR_FEUADDR_REG); in iommu_suspend()
2388 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in iommu_suspend()
2409 raw_spin_lock_irqsave(&iommu->register_lock, flag); in iommu_resume()
2411 writel(iommu->iommu_state[SR_DMAR_FECTL_REG], in iommu_resume()
2412 iommu->reg + DMAR_FECTL_REG); in iommu_resume()
2413 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG], in iommu_resume()
2414 iommu->reg + DMAR_FEDATA_REG); in iommu_resume()
2415 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG], in iommu_resume()
2416 iommu->reg + DMAR_FEADDR_REG); in iommu_resume()
2417 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG], in iommu_resume()
2418 iommu->reg + DMAR_FEUADDR_REG); in iommu_resume()
2420 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in iommu_resume()
2440 if (!IS_ALIGNED(rmrr->base_address, PAGE_SIZE) || in rmrr_sanity_check()
2441 !IS_ALIGNED(rmrr->end_address + 1, PAGE_SIZE) || in rmrr_sanity_check()
2442 rmrr->end_address <= rmrr->base_address || in rmrr_sanity_check()
2444 return -EINVAL; in rmrr_sanity_check()
2457 "Your BIOS is broken; bad RMRR [%#018Lx-%#018Lx]\n" in dmar_parse_one_rmrr()
2459 rmrr->base_address, rmrr->end_address, in dmar_parse_one_rmrr()
2470 rmrru->hdr = header; in dmar_parse_one_rmrr()
2472 rmrru->base_address = rmrr->base_address; in dmar_parse_one_rmrr()
2473 rmrru->end_address = rmrr->end_address; in dmar_parse_one_rmrr()
2475 rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1), in dmar_parse_one_rmrr()
2476 ((void *)rmrr) + rmrr->header.length, in dmar_parse_one_rmrr()
2477 &rmrru->devices_cnt); in dmar_parse_one_rmrr()
2478 if (rmrru->devices_cnt && rmrru->devices == NULL) in dmar_parse_one_rmrr()
2481 list_add(&rmrru->list, &dmar_rmrr_units); in dmar_parse_one_rmrr()
2487 return -ENOMEM; in dmar_parse_one_rmrr()
2497 tmp = (struct acpi_dmar_atsr *)atsru->hdr; in dmar_find_atsr()
2498 if (atsr->segment != tmp->segment) in dmar_find_atsr()
2500 if (atsr->header.length != tmp->header.length) in dmar_find_atsr()
2502 if (memcmp(atsr, tmp, atsr->header.length) == 0) in dmar_find_atsr()
2522 atsru = kzalloc(sizeof(*atsru) + hdr->length, GFP_KERNEL); in dmar_parse_one_atsr()
2524 return -ENOMEM; in dmar_parse_one_atsr()
2531 atsru->hdr = (void *)(atsru + 1); in dmar_parse_one_atsr()
2532 memcpy(atsru->hdr, hdr, hdr->length); in dmar_parse_one_atsr()
2533 atsru->include_all = atsr->flags & 0x1; in dmar_parse_one_atsr()
2534 if (!atsru->include_all) { in dmar_parse_one_atsr()
2535 atsru->devices = dmar_alloc_dev_scope((void *)(atsr + 1), in dmar_parse_one_atsr()
2536 (void *)atsr + atsr->header.length, in dmar_parse_one_atsr()
2537 &atsru->devices_cnt); in dmar_parse_one_atsr()
2538 if (atsru->devices_cnt && atsru->devices == NULL) { in dmar_parse_one_atsr()
2540 return -ENOMEM; in dmar_parse_one_atsr()
2544 list_add_rcu(&atsru->list, &dmar_atsr_units); in dmar_parse_one_atsr()
2551 dmar_free_dev_scope(&atsru->devices, &atsru->devices_cnt); in intel_iommu_free_atsr()
2563 list_del_rcu(&atsru->list); in dmar_release_one_atsr()
2583 if (!atsru->include_all && atsru->devices && atsru->devices_cnt) { in dmar_check_one_atsr()
2584 for_each_active_dev_scope(atsru->devices, atsru->devices_cnt, in dmar_check_one_atsr()
2586 return -EBUSY; in dmar_check_one_atsr()
2599 tmp = (struct acpi_dmar_satc *)satcu->hdr; in dmar_find_satc()
2600 if (satc->segment != tmp->segment) in dmar_find_satc()
2602 if (satc->header.length != tmp->header.length) in dmar_find_satc()
2604 if (memcmp(satc, tmp, satc->header.length) == 0) in dmar_find_satc()
2624 satcu = kzalloc(sizeof(*satcu) + hdr->length, GFP_KERNEL); in dmar_parse_one_satc()
2626 return -ENOMEM; in dmar_parse_one_satc()
2628 satcu->hdr = (void *)(satcu + 1); in dmar_parse_one_satc()
2629 memcpy(satcu->hdr, hdr, hdr->length); in dmar_parse_one_satc()
2630 satcu->atc_required = satc->flags & 0x1; in dmar_parse_one_satc()
2631 satcu->devices = dmar_alloc_dev_scope((void *)(satc + 1), in dmar_parse_one_satc()
2632 (void *)satc + satc->header.length, in dmar_parse_one_satc()
2633 &satcu->devices_cnt); in dmar_parse_one_satc()
2634 if (satcu->devices_cnt && !satcu->devices) { in dmar_parse_one_satc()
2636 return -ENOMEM; in dmar_parse_one_satc()
2638 list_add_rcu(&satcu->list, &dmar_satc_units); in dmar_parse_one_satc()
2645 struct intel_iommu *iommu = dmaru->iommu; in intel_iommu_add()
2651 if (iommu->gcmd & DMA_GCMD_TE) in intel_iommu_add()
2662 if (dmaru->ignored) { in intel_iommu_add()
2674 if (ecap_prs(iommu->ecap)) { in intel_iommu_add()
2700 struct intel_iommu *iommu = dmaru->iommu; in dmar_iommu_hotplug()
2705 return -EINVAL; in dmar_iommu_hotplug()
2724 list_del(&rmrru->list); in intel_iommu_free_dmars()
2725 dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt); in intel_iommu_free_dmars()
2730 list_del(&atsru->list); in intel_iommu_free_dmars()
2734 list_del(&satcu->list); in intel_iommu_free_dmars()
2735 dmar_free_dev_scope(&satcu->devices, &satcu->devices_cnt); in intel_iommu_free_dmars()
2751 satc = container_of(satcu->hdr, struct acpi_dmar_satc, header); in dmar_find_matched_satc_unit()
2752 if (satc->segment != pci_domain_nr(dev->bus)) in dmar_find_matched_satc_unit()
2754 for_each_dev_scope(satcu->devices, satcu->devices_cnt, i, tmp) in dmar_find_matched_satc_unit()
2778 * This device supports ATS as it is in SATC table. in dmar_ats_supported()
2779 * When IOMMU is in legacy mode, enabling ATS is done in dmar_ats_supported()
2781 * ATS, hence OS should not enable this device ATS in dmar_ats_supported()
2784 return !(satcu->atc_required && !sm_supported(iommu)); in dmar_ats_supported()
2786 for (bus = dev->bus; bus; bus = bus->parent) { in dmar_ats_supported()
2787 bridge = bus->self; in dmar_ats_supported()
2788 /* If it's an integrated device, allow ATS */ in dmar_ats_supported()
2791 /* Connected via non-PCIe: no ATS */ in dmar_ats_supported()
2802 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header); in dmar_ats_supported()
2803 if (atsr->segment != pci_domain_nr(dev->bus)) in dmar_ats_supported()
2806 for_each_dev_scope(atsru->devices, atsru->devices_cnt, i, tmp) in dmar_ats_supported()
2807 if (tmp == &bridge->dev) in dmar_ats_supported()
2810 if (atsru->include_all) in dmar_ats_supported()
2834 rmrr = container_of(rmrru->hdr, in dmar_iommu_notify_scope_dev()
2836 if (info->event == BUS_NOTIFY_ADD_DEVICE) { in dmar_iommu_notify_scope_dev()
2838 ((void *)rmrr) + rmrr->header.length, in dmar_iommu_notify_scope_dev()
2839 rmrr->segment, rmrru->devices, in dmar_iommu_notify_scope_dev()
2840 rmrru->devices_cnt); in dmar_iommu_notify_scope_dev()
2843 } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) { in dmar_iommu_notify_scope_dev()
2844 dmar_remove_dev_scope(info, rmrr->segment, in dmar_iommu_notify_scope_dev()
2845 rmrru->devices, rmrru->devices_cnt); in dmar_iommu_notify_scope_dev()
2850 if (atsru->include_all) in dmar_iommu_notify_scope_dev()
2853 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header); in dmar_iommu_notify_scope_dev()
2854 if (info->event == BUS_NOTIFY_ADD_DEVICE) { in dmar_iommu_notify_scope_dev()
2856 (void *)atsr + atsr->header.length, in dmar_iommu_notify_scope_dev()
2857 atsr->segment, atsru->devices, in dmar_iommu_notify_scope_dev()
2858 atsru->devices_cnt); in dmar_iommu_notify_scope_dev()
2863 } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) { in dmar_iommu_notify_scope_dev()
2864 if (dmar_remove_dev_scope(info, atsr->segment, in dmar_iommu_notify_scope_dev()
2865 atsru->devices, atsru->devices_cnt)) in dmar_iommu_notify_scope_dev()
2870 satc = container_of(satcu->hdr, struct acpi_dmar_satc, header); in dmar_iommu_notify_scope_dev()
2871 if (info->event == BUS_NOTIFY_ADD_DEVICE) { in dmar_iommu_notify_scope_dev()
2873 (void *)satc + satc->header.length, in dmar_iommu_notify_scope_dev()
2874 satc->segment, satcu->devices, in dmar_iommu_notify_scope_dev()
2875 satcu->devices_cnt); in dmar_iommu_notify_scope_dev()
2880 } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) { in dmar_iommu_notify_scope_dev()
2881 if (dmar_remove_dev_scope(info, satc->segment, in dmar_iommu_notify_scope_dev()
2882 satcu->devices, satcu->devices_cnt)) in dmar_iommu_notify_scope_dev()
2912 iommu = drhd->iommu; in intel_iommu_shutdown()
2933 u32 ver = readl(iommu->reg + DMAR_VER_REG); in version_show()
2943 return sysfs_emit(buf, "%llx\n", iommu->reg_phys); in address_show()
2951 return sysfs_emit(buf, "%llx\n", iommu->cap); in cap_show()
2959 return sysfs_emit(buf, "%llx\n", iommu->ecap); in ecap_show()
2967 return sysfs_emit(buf, "%ld\n", cap_ndoms(iommu->cap)); in domains_supported_show()
2976 bitmap_weight(iommu->domain_ids, in domains_used_show()
2977 cap_ndoms(iommu->cap))); in domains_used_show()
2992 .name = "intel-iommu",
3006 if (pdev->external_facing) { in has_external_pci()
3020 pr_info("Intel-IOMMU force enabled due to platform opt in\n"); in platform_optin_force_iommu()
3023 * If Intel-IOMMU is disabled by default, we will apply identity in platform_optin_force_iommu()
3038 /* To avoid a -Wunused-but-set-variable warning. */ in probe_acpi_namespace_devices()
3044 for_each_active_dev_scope(drhd->devices, in probe_acpi_namespace_devices()
3045 drhd->devices_cnt, i, dev) { in probe_acpi_namespace_devices()
3049 if (dev->bus != &acpi_bus_type) in probe_acpi_namespace_devices()
3054 mutex_lock(&adev->physical_node_lock); in probe_acpi_namespace_devices()
3056 &adev->physical_node_list, node) { in probe_acpi_namespace_devices()
3057 ret = iommu_probe_device(pn->dev); in probe_acpi_namespace_devices()
3061 mutex_unlock(&adev->physical_node_lock); in probe_acpi_namespace_devices()
3078 pr_warn("Forcing Intel-IOMMU to enabled\n"); in tboot_force_iommu()
3088 int ret = -ENODEV; in intel_iommu_init()
3174 * page-selective invalidations that are required for efficient in intel_iommu_init()
3177 * the virtual and physical IOMMU page-tables. in intel_iommu_init()
3179 if (cap_caching_mode(iommu->cap) && in intel_iommu_init()
3184 iommu_device_sysfs_add(&iommu->iommu, NULL, in intel_iommu_init()
3186 "%s", iommu->name); in intel_iommu_init()
3193 iommu_device_register(&iommu->iommu, &intel_iommu_ops, NULL); in intel_iommu_init()
3204 if (!drhd->ignored && !translation_pre_enabled(iommu)) in intel_iommu_init()
3232 * NB - intel-iommu lacks any sort of reference counting for the users of
3239 if (!dev_is_pci(info->dev)) { in domain_context_clear()
3240 domain_context_clear_one(info, info->bus, info->devfn); in domain_context_clear()
3244 pci_for_each_dma_alias(to_pci_dev(info->dev), in domain_context_clear()
3257 struct intel_iommu *iommu = info->iommu; in device_block_translation()
3260 if (info->domain) in device_block_translation()
3261 cache_tag_unassign_domain(info->domain, dev, IOMMU_NO_PASID); in device_block_translation()
3271 if (!info->domain) in device_block_translation()
3274 spin_lock_irqsave(&info->domain->lock, flags); in device_block_translation()
3275 list_del(&info->link); in device_block_translation()
3276 spin_unlock_irqrestore(&info->domain->lock, flags); in device_block_translation()
3278 domain_detach_iommu(info->domain, iommu); in device_block_translation()
3279 info->domain = NULL; in device_block_translation()
3307 return cap_fl1gp_support(iommu->cap) ? 2 : 1; in iommu_superpage_capability()
3309 return fls(cap_super_page_val(iommu->cap)); in iommu_superpage_capability()
3315 struct intel_iommu *iommu = info->iommu; in paging_domain_alloc()
3321 return ERR_PTR(-ENOMEM); in paging_domain_alloc()
3323 INIT_LIST_HEAD(&domain->devices); in paging_domain_alloc()
3324 INIT_LIST_HEAD(&domain->dev_pasids); in paging_domain_alloc()
3325 INIT_LIST_HEAD(&domain->cache_tags); in paging_domain_alloc()
3326 spin_lock_init(&domain->lock); in paging_domain_alloc()
3327 spin_lock_init(&domain->cache_lock); in paging_domain_alloc()
3328 xa_init(&domain->iommu_array); in paging_domain_alloc()
3330 domain->nid = dev_to_node(dev); in paging_domain_alloc()
3331 domain->use_first_level = first_stage; in paging_domain_alloc()
3334 addr_width = agaw_to_width(iommu->agaw); in paging_domain_alloc()
3335 if (addr_width > cap_mgaw(iommu->cap)) in paging_domain_alloc()
3336 addr_width = cap_mgaw(iommu->cap); in paging_domain_alloc()
3337 domain->gaw = addr_width; in paging_domain_alloc()
3338 domain->agaw = iommu->agaw; in paging_domain_alloc()
3339 domain->max_addr = __DOMAIN_MAX_ADDR(addr_width); in paging_domain_alloc()
3342 domain->iommu_coherency = iommu_paging_structure_coherency(iommu); in paging_domain_alloc()
3345 domain->domain.pgsize_bitmap = SZ_4K; in paging_domain_alloc()
3346 domain->iommu_superpage = iommu_superpage_capability(iommu, first_stage); in paging_domain_alloc()
3347 domain->domain.pgsize_bitmap |= domain_super_pgsize_bitmap(domain); in paging_domain_alloc()
3350 * IOVA aperture: First-level translation restricts the input-address in paging_domain_alloc()
3352 * as address bit [N-1], where N is 48-bits with 4-level paging and in paging_domain_alloc()
3353 * 57-bits with 5-level paging). Hence, skip bit [N-1]. in paging_domain_alloc()
3355 domain->domain.geometry.force_aperture = true; in paging_domain_alloc()
3356 domain->domain.geometry.aperture_start = 0; in paging_domain_alloc()
3358 domain->domain.geometry.aperture_end = __DOMAIN_MAX_ADDR(domain->gaw - 1); in paging_domain_alloc()
3360 domain->domain.geometry.aperture_end = __DOMAIN_MAX_ADDR(domain->gaw); in paging_domain_alloc()
3363 domain->pgd = iommu_alloc_page_node(domain->nid, GFP_KERNEL); in paging_domain_alloc()
3364 if (!domain->pgd) { in paging_domain_alloc()
3366 return ERR_PTR(-ENOMEM); in paging_domain_alloc()
3368 domain_flush_cache(domain, domain->pgd, PAGE_SIZE); in paging_domain_alloc()
3380 struct intel_iommu *iommu = info->iommu; in intel_iommu_domain_alloc_paging_flags()
3388 return ERR_PTR(-EOPNOTSUPP); in intel_iommu_domain_alloc_paging_flags()
3390 return ERR_PTR(-EOPNOTSUPP); in intel_iommu_domain_alloc_paging_flags()
3392 return ERR_PTR(-EOPNOTSUPP); in intel_iommu_domain_alloc_paging_flags()
3400 if (!sm_supported(iommu) || !ecap_slts(iommu->ecap)) in intel_iommu_domain_alloc_paging_flags()
3401 return ERR_PTR(-EOPNOTSUPP); in intel_iommu_domain_alloc_paging_flags()
3410 domain = &dmar_domain->domain; in intel_iommu_domain_alloc_paging_flags()
3411 domain->type = IOMMU_DOMAIN_UNMANAGED; in intel_iommu_domain_alloc_paging_flags()
3412 domain->owner = &intel_iommu_ops; in intel_iommu_domain_alloc_paging_flags()
3413 domain->ops = intel_iommu_ops.default_domain_ops; in intel_iommu_domain_alloc_paging_flags()
3416 dmar_domain->nested_parent = true; in intel_iommu_domain_alloc_paging_flags()
3417 INIT_LIST_HEAD(&dmar_domain->s1_domains); in intel_iommu_domain_alloc_paging_flags()
3418 spin_lock_init(&dmar_domain->s1_lock); in intel_iommu_domain_alloc_paging_flags()
3422 if (dmar_domain->use_first_level) { in intel_iommu_domain_alloc_paging_flags()
3424 return ERR_PTR(-EOPNOTSUPP); in intel_iommu_domain_alloc_paging_flags()
3426 domain->dirty_ops = &intel_dirty_ops; in intel_iommu_domain_alloc_paging_flags()
3436 WARN_ON(dmar_domain->nested_parent && in intel_iommu_domain_free()
3437 !list_empty(&dmar_domain->s1_domains)); in intel_iommu_domain_free()
3445 struct intel_iommu *iommu = info->iommu; in paging_domain_compatible()
3448 if (WARN_ON_ONCE(!(domain->type & __IOMMU_DOMAIN_PAGING))) in paging_domain_compatible()
3449 return -EPERM; in paging_domain_compatible()
3451 if (dmar_domain->force_snooping && !ecap_sc_support(iommu->ecap)) in paging_domain_compatible()
3452 return -EINVAL; in paging_domain_compatible()
3454 if (domain->dirty_ops && !ssads_supported(iommu)) in paging_domain_compatible()
3455 return -EINVAL; in paging_domain_compatible()
3457 if (dmar_domain->iommu_coherency != in paging_domain_compatible()
3459 return -EINVAL; in paging_domain_compatible()
3461 if (dmar_domain->iommu_superpage != in paging_domain_compatible()
3462 iommu_superpage_capability(iommu, dmar_domain->use_first_level)) in paging_domain_compatible()
3463 return -EINVAL; in paging_domain_compatible()
3465 if (dmar_domain->use_first_level && in paging_domain_compatible()
3466 (!sm_supported(iommu) || !ecap_flts(iommu->ecap))) in paging_domain_compatible()
3467 return -EINVAL; in paging_domain_compatible()
3470 addr_width = agaw_to_width(iommu->agaw); in paging_domain_compatible()
3471 if (addr_width > cap_mgaw(iommu->cap)) in paging_domain_compatible()
3472 addr_width = cap_mgaw(iommu->cap); in paging_domain_compatible()
3474 if (dmar_domain->gaw > addr_width || dmar_domain->agaw > iommu->agaw) in paging_domain_compatible()
3475 return -EINVAL; in paging_domain_compatible()
3478 context_copied(iommu, info->bus, info->devfn)) in paging_domain_compatible()
3510 if (dmar_domain->set_pte_snp) in intel_iommu_map()
3514 if (dmar_domain->max_addr < max_addr) { in intel_iommu_map()
3518 end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1; in intel_iommu_map()
3522 __func__, dmar_domain->gaw, max_addr); in intel_iommu_map()
3523 return -EFAULT; in intel_iommu_map()
3525 dmar_domain->max_addr = max_addr; in intel_iommu_map()
3544 return -EINVAL; in intel_iommu_map_pages()
3547 return -EINVAL; in intel_iommu_map_pages()
3565 size argument if it happens to be a large-page mapping. */ in intel_iommu_unmap()
3574 last_pfn = (iova + size - 1) >> VTD_PAGE_SHIFT; in intel_iommu_unmap()
3576 domain_unmap(dmar_domain, start_pfn, last_pfn, &gather->freelist); in intel_iommu_unmap()
3578 if (dmar_domain->max_addr == iova + size) in intel_iommu_unmap()
3579 dmar_domain->max_addr = iova; in intel_iommu_unmap()
3582 * We do not use page-selective IOTLB invalidation in flush queue, in intel_iommu_unmap()
3605 cache_tag_flush_range(to_dmar_domain(domain), gather->start, in intel_iommu_tlb_sync()
3606 gather->end, list_empty(&gather->freelist)); in intel_iommu_tlb_sync()
3607 iommu_put_pages_list(&gather->freelist); in intel_iommu_tlb_sync()
3623 VTD_PAGE_SHIFT) - 1)); in intel_iommu_iova_to_phys()
3633 assert_spin_locked(&domain->lock); in domain_support_force_snooping()
3634 list_for_each_entry(info, &domain->devices, link) { in domain_support_force_snooping()
3635 if (!ecap_sc_support(info->iommu->ecap)) { in domain_support_force_snooping()
3648 assert_spin_locked(&domain->lock); in domain_set_force_snooping()
3650 * Second level page table supports per-PTE snoop control. The in domain_set_force_snooping()
3653 if (!domain->use_first_level) { in domain_set_force_snooping()
3654 domain->set_pte_snp = true; in domain_set_force_snooping()
3658 list_for_each_entry(info, &domain->devices, link) in domain_set_force_snooping()
3659 intel_pasid_setup_page_snoop_control(info->iommu, info->dev, in domain_set_force_snooping()
3668 if (dmar_domain->force_snooping) in intel_iommu_enforce_cache_coherency()
3671 spin_lock_irqsave(&dmar_domain->lock, flags); in intel_iommu_enforce_cache_coherency()
3673 (!dmar_domain->use_first_level && dmar_domain->has_mappings)) { in intel_iommu_enforce_cache_coherency()
3674 spin_unlock_irqrestore(&dmar_domain->lock, flags); in intel_iommu_enforce_cache_coherency()
3679 dmar_domain->force_snooping = true; in intel_iommu_enforce_cache_coherency()
3680 spin_unlock_irqrestore(&dmar_domain->lock, flags); in intel_iommu_enforce_cache_coherency()
3696 return ecap_sc_support(info->iommu->ecap); in intel_iommu_capable()
3698 return ssads_supported(info->iommu); in intel_iommu_capable()
3713 if (!iommu || !iommu->iommu.ops) in intel_iommu_probe_device()
3714 return ERR_PTR(-ENODEV); in intel_iommu_probe_device()
3718 return ERR_PTR(-ENOMEM); in intel_iommu_probe_device()
3721 info->bus = pdev->bus->number; in intel_iommu_probe_device()
3722 info->devfn = pdev->devfn; in intel_iommu_probe_device()
3723 info->segment = pci_domain_nr(pdev->bus); in intel_iommu_probe_device()
3725 info->bus = bus; in intel_iommu_probe_device()
3726 info->devfn = devfn; in intel_iommu_probe_device()
3727 info->segment = iommu->segment; in intel_iommu_probe_device()
3730 info->dev = dev; in intel_iommu_probe_device()
3731 info->iommu = iommu; in intel_iommu_probe_device()
3733 if (ecap_dev_iotlb_support(iommu->ecap) && in intel_iommu_probe_device()
3736 info->ats_supported = 1; in intel_iommu_probe_device()
3737 info->dtlb_extra_inval = dev_needs_extra_dtlb_flush(pdev); in intel_iommu_probe_device()
3746 if (ecap_dit(iommu->ecap)) in intel_iommu_probe_device()
3747 info->pfsid = pci_dev_id(pci_physfn(pdev)); in intel_iommu_probe_device()
3748 info->ats_qdep = pci_ats_queue_depth(pdev); in intel_iommu_probe_device()
3755 info->pasid_supported = features | 1; in intel_iommu_probe_device()
3758 if (info->ats_supported && ecap_prs(iommu->ecap) && in intel_iommu_probe_device()
3760 info->pri_supported = 1; in intel_iommu_probe_device()
3779 if (!context_copied(iommu, info->bus, info->devfn)) { in intel_iommu_probe_device()
3788 return &iommu->iommu; in intel_iommu_probe_device()
3802 struct intel_iommu *iommu = info->iommu; in intel_iommu_probe_finalize()
3806 * device is undefined if you enable PASID support after ATS support. in intel_iommu_probe_finalize()
3810 if (info->pasid_supported && in intel_iommu_probe_finalize()
3811 !pci_enable_pasid(to_pci_dev(dev), info->pasid_supported & ~1)) in intel_iommu_probe_finalize()
3812 info->pasid_enabled = 1; in intel_iommu_probe_finalize()
3822 struct intel_iommu *iommu = info->iommu; in intel_iommu_release_device()
3827 if (info->pasid_enabled) { in intel_iommu_release_device()
3829 info->pasid_enabled = 0; in intel_iommu_release_device()
3832 mutex_lock(&iommu->iopf_lock); in intel_iommu_release_device()
3835 mutex_unlock(&iommu->iopf_lock); in intel_iommu_release_device()
3838 !context_copied(iommu, info->bus, info->devfn)) in intel_iommu_release_device()
3857 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt, in intel_iommu_get_resv_regions()
3867 length = rmrr->end_address - rmrr->base_address + 1; in intel_iommu_get_resv_regions()
3872 resv = iommu_alloc_resv_region(rmrr->base_address, in intel_iommu_get_resv_regions()
3878 list_add_tail(&resv->list, head); in intel_iommu_get_resv_regions()
3887 if ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA) { in intel_iommu_get_resv_regions()
3892 list_add_tail(&reg->list, head); in intel_iommu_get_resv_regions()
3898 IOAPIC_RANGE_END - IOAPIC_RANGE_START + 1, in intel_iommu_get_resv_regions()
3902 list_add_tail(&reg->list, head); in intel_iommu_get_resv_regions()
3915 struct intel_iommu *iommu = info->iommu; in intel_iommu_enable_iopf()
3918 if (!info->pri_enabled) in intel_iommu_enable_iopf()
3919 return -ENODEV; in intel_iommu_enable_iopf()
3921 if (info->iopf_refcount) { in intel_iommu_enable_iopf()
3922 info->iopf_refcount++; in intel_iommu_enable_iopf()
3926 ret = iopf_queue_add_device(iommu->iopf_queue, dev); in intel_iommu_enable_iopf()
3930 info->iopf_refcount = 1; in intel_iommu_enable_iopf()
3938 struct intel_iommu *iommu = info->iommu; in intel_iommu_disable_iopf()
3940 if (WARN_ON(!info->pri_enabled || !info->iopf_refcount)) in intel_iommu_disable_iopf()
3943 if (--info->iopf_refcount) in intel_iommu_disable_iopf()
3946 iopf_queue_remove_device(iommu->iopf_queue, dev); in intel_iommu_disable_iopf()
3960 return -ENODEV; in intel_iommu_dev_enable_feat()
3976 return -ENODEV; in intel_iommu_dev_disable_feat()
3984 return translation_pre_enabled(info->iommu) && !info->domain; in intel_iommu_is_attach_deferred()
3994 if (pdev->untrusted) { in risky_device()
3997 pdev->vendor, pdev->device); in risky_device()
4007 cache_tag_flush_range_np(to_dmar_domain(domain), iova, iova + size - 1); in intel_iommu_iotlb_sync_map()
4017 struct intel_iommu *iommu = info->iommu; in domain_remove_dev_pasid()
4025 if (domain->type == IOMMU_DOMAIN_IDENTITY) in domain_remove_dev_pasid()
4029 spin_lock_irqsave(&dmar_domain->lock, flags); in domain_remove_dev_pasid()
4030 list_for_each_entry(curr, &dmar_domain->dev_pasids, link_domain) { in domain_remove_dev_pasid()
4031 if (curr->dev == dev && curr->pasid == pasid) { in domain_remove_dev_pasid()
4032 list_del(&curr->link_domain); in domain_remove_dev_pasid()
4037 spin_unlock_irqrestore(&dmar_domain->lock, flags); in domain_remove_dev_pasid()
4053 intel_pasid_tear_down_entry(info->iommu, dev, pasid, false); in blocking_domain_set_dev_pasid()
4065 struct intel_iommu *iommu = info->iommu; in domain_add_dev_pasid()
4072 return ERR_PTR(-ENOMEM); in domain_add_dev_pasid()
4082 dev_pasid->dev = dev; in domain_add_dev_pasid()
4083 dev_pasid->pasid = pasid; in domain_add_dev_pasid()
4084 spin_lock_irqsave(&dmar_domain->lock, flags); in domain_add_dev_pasid()
4085 list_add(&dev_pasid->link_domain, &dmar_domain->dev_pasids); in domain_add_dev_pasid()
4086 spin_unlock_irqrestore(&dmar_domain->lock, flags); in domain_add_dev_pasid()
4102 struct intel_iommu *iommu = info->iommu; in intel_iommu_set_dev_pasid()
4106 if (WARN_ON_ONCE(!(domain->type & __IOMMU_DOMAIN_PAGING))) in intel_iommu_set_dev_pasid()
4107 return -EINVAL; in intel_iommu_set_dev_pasid()
4110 return -EOPNOTSUPP; in intel_iommu_set_dev_pasid()
4112 if (domain->dirty_ops) in intel_iommu_set_dev_pasid()
4113 return -EINVAL; in intel_iommu_set_dev_pasid()
4115 if (context_copied(iommu, info->bus, info->devfn)) in intel_iommu_set_dev_pasid()
4116 return -EBUSY; in intel_iommu_set_dev_pasid()
4126 if (dmar_domain->use_first_level) in intel_iommu_set_dev_pasid()
4149 struct intel_iommu *iommu = info->iommu; in intel_iommu_hw_info()
4154 return ERR_PTR(-ENOMEM); in intel_iommu_hw_info()
4156 vtd->flags = IOMMU_HW_INFO_VTD_ERRATA_772415_SPR17; in intel_iommu_hw_info()
4157 vtd->cap_reg = iommu->cap; in intel_iommu_hw_info()
4158 vtd->ecap_reg = iommu->ecap; in intel_iommu_hw_info()
4166 * hold the domain->lock when calling it.
4174 ret = intel_pasid_setup_dirty_tracking(info->iommu, info->dev, in device_set_dirty_tracking()
4190 spin_lock(&domain->s1_lock); in parent_domain_set_dirty_tracking()
4191 list_for_each_entry(s1_domain, &domain->s1_domains, s2_link) { in parent_domain_set_dirty_tracking()
4192 spin_lock_irqsave(&s1_domain->lock, flags); in parent_domain_set_dirty_tracking()
4193 ret = device_set_dirty_tracking(&s1_domain->devices, enable); in parent_domain_set_dirty_tracking()
4194 spin_unlock_irqrestore(&s1_domain->lock, flags); in parent_domain_set_dirty_tracking()
4198 spin_unlock(&domain->s1_lock); in parent_domain_set_dirty_tracking()
4202 list_for_each_entry(s1_domain, &domain->s1_domains, s2_link) { in parent_domain_set_dirty_tracking()
4203 spin_lock_irqsave(&s1_domain->lock, flags); in parent_domain_set_dirty_tracking()
4204 device_set_dirty_tracking(&s1_domain->devices, in parent_domain_set_dirty_tracking()
4205 domain->dirty_tracking); in parent_domain_set_dirty_tracking()
4206 spin_unlock_irqrestore(&s1_domain->lock, flags); in parent_domain_set_dirty_tracking()
4208 spin_unlock(&domain->s1_lock); in parent_domain_set_dirty_tracking()
4218 spin_lock(&dmar_domain->lock); in intel_iommu_set_dirty_tracking()
4219 if (dmar_domain->dirty_tracking == enable) in intel_iommu_set_dirty_tracking()
4222 ret = device_set_dirty_tracking(&dmar_domain->devices, enable); in intel_iommu_set_dirty_tracking()
4226 if (dmar_domain->nested_parent) { in intel_iommu_set_dirty_tracking()
4232 dmar_domain->dirty_tracking = enable; in intel_iommu_set_dirty_tracking()
4234 spin_unlock(&dmar_domain->lock); in intel_iommu_set_dirty_tracking()
4239 device_set_dirty_tracking(&dmar_domain->devices, in intel_iommu_set_dirty_tracking()
4240 dmar_domain->dirty_tracking); in intel_iommu_set_dirty_tracking()
4241 spin_unlock(&dmar_domain->lock); in intel_iommu_set_dirty_tracking()
4251 unsigned long end = iova + size - 1; in intel_iommu_read_and_clear_dirty()
4260 if (!dmar_domain->dirty_tracking && dirty->bitmap) in intel_iommu_read_and_clear_dirty()
4261 return -EINVAL; in intel_iommu_read_and_clear_dirty()
4291 struct intel_iommu *iommu = info->iommu; in context_setup_pass_through()
4294 spin_lock(&iommu->lock); in context_setup_pass_through()
4297 spin_unlock(&iommu->lock); in context_setup_pass_through()
4298 return -ENOMEM; in context_setup_pass_through()
4302 spin_unlock(&iommu->lock); in context_setup_pass_through()
4312 * AGAW value supported by hardware. And ASR is ignored by hardware. in context_setup_pass_through()
4314 context_set_address_width(context, iommu->msagaw); in context_setup_pass_through()
4318 if (!ecap_coherent(iommu->ecap)) in context_setup_pass_through()
4321 spin_unlock(&iommu->lock); in context_setup_pass_through()
4338 return context_setup_pass_through(dev, info->bus, info->devfn); in device_setup_pass_through()
4347 struct intel_iommu *iommu = info->iommu; in identity_domain_attach_dev()
4368 struct intel_iommu *iommu = info->iommu; in identity_domain_set_dev_pasid()
4372 return -EOPNOTSUPP; in identity_domain_set_dev_pasid()
4480 pci_info(dev, "Forcing write-buffer flush capability\n"); in quirk_iommu_rwbf()
4532 ver = (dev->device >> 8) & 0xff; in quirk_igfx_skip_te_disable()
4551 message if VT-d is actually disabled.
4572 known-broken BIOSes _don't_ actually hide it, so far. */ in check_tylersburg_isoch()
4589 /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */ in check_tylersburg_isoch()
4616 * Here we deal with a device TLB defect where device may inadvertently issue ATS
4647 if (likely(!info->dtlb_extra_inval)) in quirk_extra_dev_tlb_flush()
4650 sid = PCI_DEVID(info->bus, info->devfn); in quirk_extra_dev_tlb_flush()
4652 qi_flush_dev_iotlb(info->iommu, sid, info->pfsid, in quirk_extra_dev_tlb_flush()
4655 qi_flush_dev_iotlb_pasid(info->iommu, sid, info->pfsid, in quirk_extra_dev_tlb_flush()
4665 * VT-d spec. The VT-d hardware implementation may support some but not
4670 * - 0: Command successful without any error;
4671 * - Negative: software error value;
4672 * - Nonzero positive: failure status code defined in Table 48.
4680 if (!cap_ecmds(iommu->cap)) in ecmd_submit_sync()
4681 return -ENODEV; in ecmd_submit_sync()
4683 raw_spin_lock_irqsave(&iommu->register_lock, flags); in ecmd_submit_sync()
4685 res = dmar_readq(iommu->reg + DMAR_ECRSP_REG); in ecmd_submit_sync()
4687 ret = -EBUSY; in ecmd_submit_sync()
4693 * - There is no side effect if an ecmd doesn't require an in ecmd_submit_sync()
4695 * - It's not invoked in any critical path. The extra MMIO in ecmd_submit_sync()
4698 dmar_writeq(iommu->reg + DMAR_ECEO_REG, ob); in ecmd_submit_sync()
4699 dmar_writeq(iommu->reg + DMAR_ECMD_REG, ecmd | (oa << DMA_ECMD_OA_SHIFT)); in ecmd_submit_sync()
4705 ret = -ETIMEDOUT; in ecmd_submit_sync()
4711 raw_spin_unlock_irqrestore(&iommu->register_lock, flags); in ecmd_submit_sync()