Lines Matching full:domain

219  * 8-23: domain id
340 * This domain is a statically identity mapping domain.
341 * 1. This domain creats a static 1:1 mapping to all usable memory.
343 * 3. Each iommu mapps to this domain if successful.
348 /* devices under the same p2p bridge are owned in one domain */
351 /* domain represents a virtual machine, more than one devices
352 * across iommus may be owned in one domain, e.g. kvm guest.
360 int id; /* domain id */
362 unsigned long iommu_bmp; /* bitmap of iommus this domain uses*/
365 struct iova_domain iovad; /* iova's that belong to this domain */
373 int flags; /* flags to find out type of domain */
381 spinlock_t iommu_lock; /* protect iommu set in domain */
385 /* PCI domain-device relationship */
387 struct list_head link; /* link to domain siblings */
389 int segment; /* PCI domain */
394 struct dmar_domain *domain; /* pointer to domain */ member
405 struct dmar_domain *domain[HIGH_WATER_MARK]; member
419 static void domain_remove_dev_info(struct dmar_domain *domain);
565 /* This functionin only returns single iommu in a domain */
566 static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain) in domain_get_iommu() argument
570 /* si_domain and vm domain should not get here. */ in domain_get_iommu()
571 BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE); in domain_get_iommu()
572 BUG_ON(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY); in domain_get_iommu()
574 iommu_id = find_first_bit(&domain->iommu_bmp, g_num_of_iommus); in domain_get_iommu()
581 static void domain_update_iommu_coherency(struct dmar_domain *domain) in domain_update_iommu_coherency() argument
585 domain->iommu_coherency = 1; in domain_update_iommu_coherency()
587 for_each_set_bit(i, &domain->iommu_bmp, g_num_of_iommus) { in domain_update_iommu_coherency()
589 domain->iommu_coherency = 0; in domain_update_iommu_coherency()
595 static void domain_update_iommu_snooping(struct dmar_domain *domain) in domain_update_iommu_snooping() argument
599 domain->iommu_snooping = 1; in domain_update_iommu_snooping()
601 for_each_set_bit(i, &domain->iommu_bmp, g_num_of_iommus) { in domain_update_iommu_snooping()
603 domain->iommu_snooping = 0; in domain_update_iommu_snooping()
609 static void domain_update_iommu_superpage(struct dmar_domain *domain) in domain_update_iommu_superpage() argument
616 domain->iommu_superpage = 0; in domain_update_iommu_superpage()
627 domain->iommu_superpage = fls(mask); in domain_update_iommu_superpage()
631 static void domain_update_iommu_cap(struct dmar_domain *domain) in domain_update_iommu_cap() argument
633 domain_update_iommu_coherency(domain); in domain_update_iommu_cap()
634 domain_update_iommu_snooping(domain); in domain_update_iommu_cap()
635 domain_update_iommu_superpage(domain); in domain_update_iommu_cap()
668 static void domain_flush_cache(struct dmar_domain *domain, in domain_flush_cache() argument
671 if (!domain->iommu_coherency) in domain_flush_cache()
764 static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain, in pfn_to_dma_pte() argument
767 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; in pfn_to_dma_pte()
769 int level = agaw_to_level(domain->agaw); in pfn_to_dma_pte()
772 BUG_ON(!domain->pgd); in pfn_to_dma_pte()
774 parent = domain->pgd; in pfn_to_dma_pte()
789 tmp_page = alloc_pgtable_page(domain->nid); in pfn_to_dma_pte()
794 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE); in pfn_to_dma_pte()
801 domain_flush_cache(domain, pte, sizeof(*pte)); in pfn_to_dma_pte()
813 static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain, in dma_pfn_level_pte() argument
818 int total = agaw_to_level(domain->agaw); in dma_pfn_level_pte()
821 parent = domain->pgd; in dma_pfn_level_pte()
845 static int dma_pte_clear_range(struct dmar_domain *domain, in dma_pte_clear_range() argument
849 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; in dma_pte_clear_range()
861 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page); in dma_pte_clear_range()
872 domain_flush_cache(domain, first_pte, in dma_pte_clear_range()
882 static void dma_pte_free_pagetable(struct dmar_domain *domain, in dma_pte_free_pagetable() argument
886 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; in dma_pte_free_pagetable()
888 int total = agaw_to_level(domain->agaw); in dma_pte_free_pagetable()
908 first_pte = pte = dma_pfn_level_pte(domain, tmp, level, &large_page); in dma_pte_free_pagetable()
925 domain_flush_cache(domain, first_pte, in dma_pte_free_pagetable()
932 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) { in dma_pte_free_pagetable()
933 free_pgtable_page(domain->pgd); in dma_pte_free_pagetable()
934 domain->pgd = NULL; in dma_pte_free_pagetable()
1087 struct dmar_domain *domain, int segment, u8 bus, u8 devfn) in iommu_support_dev_iotlb() argument
1101 list_for_each_entry(info, &domain->devices, link) in iommu_support_dev_iotlb()
1138 static void iommu_flush_dev_iotlb(struct dmar_domain *domain, in iommu_flush_dev_iotlb() argument
1146 list_for_each_entry(info, &domain->devices, link) { in iommu_flush_dev_iotlb()
1166 * Fallback to domain selective flush if no PSI support or the size is in iommu_flush_iotlb_psi()
1255 printk(KERN_ERR "Allocating domain id array failed\n"); in iommu_init_domains()
1261 printk(KERN_ERR "Allocating domain array failed\n"); in iommu_init_domains()
1275 static void domain_exit(struct dmar_domain *domain);
1276 static void vm_domain_exit(struct dmar_domain *domain);
1280 struct dmar_domain *domain; in free_dmar_iommu() local
1286 domain = iommu->domains[i]; in free_dmar_iommu()
1289 spin_lock_irqsave(&domain->iommu_lock, flags); in free_dmar_iommu()
1290 if (--domain->iommu_count == 0) { in free_dmar_iommu()
1291 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) in free_dmar_iommu()
1292 vm_domain_exit(domain); in free_dmar_iommu()
1294 domain_exit(domain); in free_dmar_iommu()
1296 spin_unlock_irqrestore(&domain->iommu_lock, flags); in free_dmar_iommu()
1330 struct dmar_domain *domain; in alloc_domain() local
1332 domain = alloc_domain_mem(); in alloc_domain()
1333 if (!domain) in alloc_domain()
1336 domain->nid = -1; in alloc_domain()
1337 memset(&domain->iommu_bmp, 0, sizeof(unsigned long)); in alloc_domain()
1338 domain->flags = 0; in alloc_domain()
1340 return domain; in alloc_domain()
1343 static int iommu_attach_domain(struct dmar_domain *domain, in iommu_attach_domain() argument
1357 printk(KERN_ERR "IOMMU: no free domain ids\n"); in iommu_attach_domain()
1361 domain->id = num; in iommu_attach_domain()
1363 set_bit(iommu->seq_id, &domain->iommu_bmp); in iommu_attach_domain()
1364 iommu->domains[num] = domain; in iommu_attach_domain()
1370 static void iommu_detach_domain(struct dmar_domain *domain, in iommu_detach_domain() argument
1380 if (iommu->domains[num] == domain) { in iommu_detach_domain()
1388 clear_bit(iommu->seq_id, &domain->iommu_bmp); in iommu_detach_domain()
1436 static void domain_reserve_special_ranges(struct dmar_domain *domain) in domain_reserve_special_ranges() argument
1438 copy_reserved_iova(&reserved_iova_list, &domain->iovad); in domain_reserve_special_ranges()
1455 static int domain_init(struct dmar_domain *domain, int guest_width) in domain_init() argument
1461 init_iova_domain(&domain->iovad, DMA_32BIT_PFN); in domain_init()
1462 spin_lock_init(&domain->iommu_lock); in domain_init()
1464 domain_reserve_special_ranges(domain); in domain_init()
1467 iommu = domain_get_iommu(domain); in domain_init()
1470 domain->gaw = guest_width; in domain_init()
1481 domain->agaw = agaw; in domain_init()
1482 INIT_LIST_HEAD(&domain->devices); in domain_init()
1485 domain->iommu_coherency = 1; in domain_init()
1487 domain->iommu_coherency = 0; in domain_init()
1490 domain->iommu_snooping = 1; in domain_init()
1492 domain->iommu_snooping = 0; in domain_init()
1494 domain->iommu_superpage = fls(cap_super_page_val(iommu->cap)); in domain_init()
1495 domain->iommu_count = 1; in domain_init()
1496 domain->nid = iommu->node; in domain_init()
1499 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid); in domain_init()
1500 if (!domain->pgd) in domain_init()
1502 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE); in domain_init()
1506 static void domain_exit(struct dmar_domain *domain) in domain_exit() argument
1511 /* Domain 0 is reserved, so dont process it */ in domain_exit()
1512 if (!domain) in domain_exit()
1515 /* Flush any lazy unmaps that may reference this domain */ in domain_exit()
1519 domain_remove_dev_info(domain); in domain_exit()
1521 put_iova_domain(&domain->iovad); in domain_exit()
1524 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw)); in domain_exit()
1527 dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw)); in domain_exit()
1530 if (test_bit(iommu->seq_id, &domain->iommu_bmp)) in domain_exit()
1531 iommu_detach_domain(domain, iommu); in domain_exit()
1533 free_domain_mem(domain); in domain_exit()
1536 static int domain_context_mapping_one(struct dmar_domain *domain, int segment, in domain_context_mapping_one() argument
1552 BUG_ON(!domain->pgd); in domain_context_mapping_one()
1569 id = domain->id; in domain_context_mapping_one()
1570 pgd = domain->pgd; in domain_context_mapping_one()
1572 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE || in domain_context_mapping_one()
1573 domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) { in domain_context_mapping_one()
1576 /* find an available domain id for this device in iommu */ in domain_context_mapping_one()
1579 if (iommu->domains[num] == domain) { in domain_context_mapping_one()
1590 printk(KERN_ERR "IOMMU: no free domain ids\n"); in domain_context_mapping_one()
1595 iommu->domains[num] = domain; in domain_context_mapping_one()
1604 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) { in domain_context_mapping_one()
1617 info = iommu_support_dev_iotlb(domain, segment, bus, devfn); in domain_context_mapping_one()
1635 domain_flush_cache(domain, context, sizeof(*context)); in domain_context_mapping_one()
1641 * domain #0, which we have to flush: in domain_context_mapping_one()
1648 iommu->flush.flush_iotlb(iommu, domain->id, 0, 0, DMA_TLB_DSI_FLUSH); in domain_context_mapping_one()
1655 spin_lock_irqsave(&domain->iommu_lock, flags); in domain_context_mapping_one()
1656 if (!test_and_set_bit(iommu->seq_id, &domain->iommu_bmp)) { in domain_context_mapping_one()
1657 domain->iommu_count++; in domain_context_mapping_one()
1658 if (domain->iommu_count == 1) in domain_context_mapping_one()
1659 domain->nid = iommu->node; in domain_context_mapping_one()
1660 domain_update_iommu_cap(domain); in domain_context_mapping_one()
1662 spin_unlock_irqrestore(&domain->iommu_lock, flags); in domain_context_mapping_one()
1667 domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev, in domain_context_mapping() argument
1673 ret = domain_context_mapping_one(domain, pci_domain_nr(pdev->bus), in domain_context_mapping()
1686 ret = domain_context_mapping_one(domain, in domain_context_mapping()
1695 return domain_context_mapping_one(domain, in domain_context_mapping()
1700 return domain_context_mapping_one(domain, in domain_context_mapping()
1751 static inline int hardware_largepage_caps(struct dmar_domain *domain, in hardware_largepage_caps() argument
1759 support = domain->iommu_superpage; in hardware_largepage_caps()
1778 static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, in __domain_mapping() argument
1784 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; in __domain_mapping()
1815 largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res); in __domain_mapping()
1817 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, largepage_lvl); in __domain_mapping()
1867 domain_flush_cache(domain, first_pte, in __domain_mapping()
1878 static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn, in domain_sg_mapping() argument
1882 return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot); in domain_sg_mapping()
1885 static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn, in domain_pfn_mapping() argument
1889 return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot); in domain_pfn_mapping()
1903 static void domain_remove_dev_info(struct dmar_domain *domain) in domain_remove_dev_info() argument
1910 while (!list_empty(&domain->devices)) { in domain_remove_dev_info()
1911 info = list_entry(domain->devices.next, in domain_remove_dev_info()
1938 /* No lock here, assumes no domain exit in normal case */ in find_domain()
1941 return info->domain; in find_domain()
1945 /* domain is initialized */
1948 struct dmar_domain *domain, *found = NULL; in get_domain_for_dev() local
1958 domain = find_domain(pdev); in get_domain_for_dev()
1959 if (domain) in get_domain_for_dev()
1960 return domain; in get_domain_for_dev()
1977 found = info->domain; in get_domain_for_dev()
1982 /* pcie-pci bridge already has a domain, uses it */ in get_domain_for_dev()
1984 domain = found; in get_domain_for_dev()
1989 domain = alloc_domain(); in get_domain_for_dev()
1990 if (!domain) in get_domain_for_dev()
1993 /* Allocate new domain for the device */ in get_domain_for_dev()
2002 ret = iommu_attach_domain(domain, iommu); in get_domain_for_dev()
2004 free_domain_mem(domain); in get_domain_for_dev()
2008 if (domain_init(domain, gaw)) { in get_domain_for_dev()
2009 domain_exit(domain); in get_domain_for_dev()
2017 domain_exit(domain); in get_domain_for_dev()
2024 info->domain = domain; in get_domain_for_dev()
2025 /* This domain is shared by devices under p2p bridge */ in get_domain_for_dev()
2026 domain->flags |= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES; in get_domain_for_dev()
2028 /* pcie-to-pci bridge already has a domain, uses it */ in get_domain_for_dev()
2034 found = tmp->domain; in get_domain_for_dev()
2041 domain_exit(domain); in get_domain_for_dev()
2042 domain = found; in get_domain_for_dev()
2044 list_add(&info->link, &domain->devices); in get_domain_for_dev()
2058 info->domain = domain; in get_domain_for_dev()
2064 if (found != domain) { in get_domain_for_dev()
2065 domain_exit(domain); in get_domain_for_dev()
2066 domain = found; in get_domain_for_dev()
2069 return domain; in get_domain_for_dev()
2071 list_add(&info->link, &domain->devices); in get_domain_for_dev()
2075 return domain; in get_domain_for_dev()
2086 static int iommu_domain_identity_map(struct dmar_domain *domain, in iommu_domain_identity_map() argument
2093 if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn), in iommu_domain_identity_map()
2099 pr_debug("Mapping reserved region %llx-%llx for domain %d\n", in iommu_domain_identity_map()
2100 start, end, domain->id); in iommu_domain_identity_map()
2105 dma_pte_clear_range(domain, first_vpfn, last_vpfn); in iommu_domain_identity_map()
2107 return domain_pfn_mapping(domain, first_vpfn, first_vpfn, in iommu_domain_identity_map()
2116 struct dmar_domain *domain; in iommu_prepare_identity_map() local
2119 domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH); in iommu_prepare_identity_map()
2120 if (!domain) in iommu_prepare_identity_map()
2127 if (domain == si_domain && hw_pass_through) { in iommu_prepare_identity_map()
2147 if (end >> agaw_to_width(domain->agaw)) { in iommu_prepare_identity_map()
2150 agaw_to_width(domain->agaw), in iommu_prepare_identity_map()
2158 ret = iommu_domain_identity_map(domain, start, end); in iommu_prepare_identity_map()
2163 ret = domain_context_mapping(domain, pdev, CONTEXT_TT_MULTI_LEVEL); in iommu_prepare_identity_map()
2170 domain_exit(domain); in iommu_prepare_identity_map()
2208 static int md_domain_init(struct dmar_domain *domain, int guest_width);
2220 pr_debug("Identity mapping domain is domain %d\n", si_domain->id); in si_domain_init()
2255 static void domain_remove_one_dev_info(struct dmar_domain *domain,
2266 return (info->domain == si_domain); in identity_mapping()
2271 static int domain_add_dev_info(struct dmar_domain *domain, in domain_add_dev_info() argument
2283 ret = domain_context_mapping(domain, pdev, translation); in domain_add_dev_info()
2293 info->domain = domain; in domain_add_dev_info()
2296 list_add(&info->link, &domain->devices); in domain_add_dev_info()
2316 * We want to start off with all devices in the 1:1 domain, and in iommu_should_identity_map()
2329 * the 1:1 domain, just in _case_ one of their siblings turns out in iommu_should_identity_map()
2343 * take them out of the 1:1 domain later. in iommu_should_identity_map()
2533 * locate drhd for dev, alloc domain for dev in init_dmars()
2534 * allocate free domain in init_dmars()
2539 * init context with domain, translation etc in init_dmars()
2613 struct dmar_domain *domain, in intel_alloc_iova() argument
2620 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask); in intel_alloc_iova()
2628 iova = alloc_iova(&domain->iovad, nrpages, in intel_alloc_iova()
2633 iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1); in intel_alloc_iova()
2645 struct dmar_domain *domain; in __get_valid_domain_for_dev() local
2648 domain = get_domain_for_dev(pdev, in __get_valid_domain_for_dev()
2650 if (!domain) { in __get_valid_domain_for_dev()
2652 "Allocating domain for %s failed", pci_name(pdev)); in __get_valid_domain_for_dev()
2658 ret = domain_context_mapping(domain, pdev, in __get_valid_domain_for_dev()
2662 "Domain context map for %s failed", in __get_valid_domain_for_dev()
2668 return domain; in __get_valid_domain_for_dev()
2675 /* No lock here, assumes no domain exit in normal case */ in get_valid_domain_for_dev()
2678 return info->domain; in get_valid_domain_for_dev()
2744 struct dmar_domain *domain; in __intel_map_single() local
2757 domain = get_valid_domain_for_dev(pdev); in __intel_map_single()
2758 if (!domain) in __intel_map_single()
2761 iommu = domain_get_iommu(domain); in __intel_map_single()
2764 iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size), dma_mask); in __intel_map_single()
2783 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo), in __intel_map_single()
2790 iommu_flush_iotlb_psi(iommu, domain->id, mm_to_dma_pfn(iova->pfn_lo), size, 1); in __intel_map_single()
2800 __free_iova(&domain->iovad, iova); in __intel_map_single()
2837 struct dmar_domain *domain = deferred_flush[i].domain[j]; in flush_unmaps() local
2841 iommu_flush_iotlb_psi(iommu, domain->id, in flush_unmaps()
2845 iommu_flush_dev_iotlb(deferred_flush[i].domain[j], in flush_unmaps()
2848 __free_iova(&deferred_flush[i].domain[j]->iovad, iova); in flush_unmaps()
2879 deferred_flush[iommu_id].domain[next] = dom; in add_unmap()
2896 struct dmar_domain *domain; in intel_unmap_page() local
2904 domain = find_domain(pdev); in intel_unmap_page()
2905 BUG_ON(!domain); in intel_unmap_page()
2907 iommu = domain_get_iommu(domain); in intel_unmap_page()
2909 iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr)); in intel_unmap_page()
2921 dma_pte_clear_range(domain, start_pfn, last_pfn); in intel_unmap_page()
2924 dma_pte_free_pagetable(domain, start_pfn, last_pfn); in intel_unmap_page()
2927 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn, in intel_unmap_page()
2930 __free_iova(&domain->iovad, iova); in intel_unmap_page()
2932 add_unmap(domain, iova); in intel_unmap_page()
2989 struct dmar_domain *domain; in intel_unmap_sg() local
2997 domain = find_domain(pdev); in intel_unmap_sg()
2998 BUG_ON(!domain); in intel_unmap_sg()
3000 iommu = domain_get_iommu(domain); in intel_unmap_sg()
3002 iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address)); in intel_unmap_sg()
3011 dma_pte_clear_range(domain, start_pfn, last_pfn); in intel_unmap_sg()
3014 dma_pte_free_pagetable(domain, start_pfn, last_pfn); in intel_unmap_sg()
3017 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn, in intel_unmap_sg()
3020 __free_iova(&domain->iovad, iova); in intel_unmap_sg()
3022 add_unmap(domain, iova); in intel_unmap_sg()
3049 struct dmar_domain *domain; in intel_map_sg() local
3062 domain = get_valid_domain_for_dev(pdev); in intel_map_sg()
3063 if (!domain) in intel_map_sg()
3066 iommu = domain_get_iommu(domain); in intel_map_sg()
3071 iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size), in intel_map_sg()
3090 ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot); in intel_map_sg()
3093 dma_pte_clear_range(domain, start_vpfn, in intel_map_sg()
3096 dma_pte_free_pagetable(domain, start_vpfn, in intel_map_sg()
3099 __free_iova(&domain->iovad, iova); in intel_map_sg()
3105 iommu_flush_iotlb_psi(iommu, domain->id, start_vpfn, size, 1); in intel_map_sg()
3567 * Added device is not attached to its DMAR domain here yet. That will happen
3575 struct dmar_domain *domain; in device_notifier() local
3580 domain = find_domain(pdev); in device_notifier()
3581 if (!domain) in device_notifier()
3585 domain_remove_one_dev_info(domain, pdev); in device_notifier()
3587 if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) && in device_notifier()
3588 !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) && in device_notifier()
3589 list_empty(&domain->devices)) in device_notifier()
3590 domain_exit(domain); in device_notifier()
3698 static void domain_remove_one_dev_info(struct dmar_domain *domain, in domain_remove_one_dev_info() argument
3713 list_for_each_safe(entry, tmp, &domain->devices) { in domain_remove_one_dev_info()
3738 * owned by this domain, clear this iommu in iommu_bmp in domain_remove_one_dev_info()
3750 spin_lock_irqsave(&domain->iommu_lock, tmp_flags); in domain_remove_one_dev_info()
3751 clear_bit(iommu->seq_id, &domain->iommu_bmp); in domain_remove_one_dev_info()
3752 domain->iommu_count--; in domain_remove_one_dev_info()
3753 domain_update_iommu_cap(domain); in domain_remove_one_dev_info()
3754 spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags); in domain_remove_one_dev_info()
3756 if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) && in domain_remove_one_dev_info()
3757 !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)) { in domain_remove_one_dev_info()
3759 clear_bit(domain->id, iommu->domain_ids); in domain_remove_one_dev_info()
3760 iommu->domains[domain->id] = NULL; in domain_remove_one_dev_info()
3766 static void vm_domain_remove_all_dev_info(struct dmar_domain *domain) in vm_domain_remove_all_dev_info() argument
3773 while (!list_empty(&domain->devices)) { in vm_domain_remove_all_dev_info()
3774 info = list_entry(domain->devices.next, in vm_domain_remove_all_dev_info()
3791 spin_lock_irqsave(&domain->iommu_lock, flags2); in vm_domain_remove_all_dev_info()
3793 &domain->iommu_bmp)) { in vm_domain_remove_all_dev_info()
3794 domain->iommu_count--; in vm_domain_remove_all_dev_info()
3795 domain_update_iommu_cap(domain); in vm_domain_remove_all_dev_info()
3797 spin_unlock_irqrestore(&domain->iommu_lock, flags2); in vm_domain_remove_all_dev_info()
3805 /* domain id for virtual machine, it won't be set in context */
3810 struct dmar_domain *domain; in iommu_alloc_vm_domain() local
3812 domain = alloc_domain_mem(); in iommu_alloc_vm_domain()
3813 if (!domain) in iommu_alloc_vm_domain()
3816 domain->id = vm_domid++; in iommu_alloc_vm_domain()
3817 domain->nid = -1; in iommu_alloc_vm_domain()
3818 memset(&domain->iommu_bmp, 0, sizeof(unsigned long)); in iommu_alloc_vm_domain()
3819 domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE; in iommu_alloc_vm_domain()
3821 return domain; in iommu_alloc_vm_domain()
3824 static int md_domain_init(struct dmar_domain *domain, int guest_width) in md_domain_init() argument
3828 init_iova_domain(&domain->iovad, DMA_32BIT_PFN); in md_domain_init()
3829 spin_lock_init(&domain->iommu_lock); in md_domain_init()
3831 domain_reserve_special_ranges(domain); in md_domain_init()
3834 domain->gaw = guest_width; in md_domain_init()
3836 domain->agaw = width_to_agaw(adjust_width); in md_domain_init()
3838 INIT_LIST_HEAD(&domain->devices); in md_domain_init()
3840 domain->iommu_count = 0; in md_domain_init()
3841 domain->iommu_coherency = 0; in md_domain_init()
3842 domain->iommu_snooping = 0; in md_domain_init()
3843 domain->iommu_superpage = 0; in md_domain_init()
3844 domain->max_addr = 0; in md_domain_init()
3845 domain->nid = -1; in md_domain_init()
3848 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid); in md_domain_init()
3849 if (!domain->pgd) in md_domain_init()
3851 domain_flush_cache(domain, domain->pgd, PAGE_SIZE); in md_domain_init()
3855 static void iommu_free_vm_domain(struct dmar_domain *domain) in iommu_free_vm_domain() argument
3870 if (iommu->domains[i] == domain) { in iommu_free_vm_domain()
3881 static void vm_domain_exit(struct dmar_domain *domain) in vm_domain_exit() argument
3883 /* Domain 0 is reserved, so dont process it */ in vm_domain_exit()
3884 if (!domain) in vm_domain_exit()
3887 vm_domain_remove_all_dev_info(domain); in vm_domain_exit()
3889 put_iova_domain(&domain->iovad); in vm_domain_exit()
3892 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw)); in vm_domain_exit()
3895 dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw)); in vm_domain_exit()
3897 iommu_free_vm_domain(domain); in vm_domain_exit()
3898 free_domain_mem(domain); in vm_domain_exit()
3901 static int intel_iommu_domain_init(struct iommu_domain *domain) in intel_iommu_domain_init() argument
3918 domain->priv = dmar_domain; in intel_iommu_domain_init()
3923 static void intel_iommu_domain_destroy(struct iommu_domain *domain) in intel_iommu_domain_destroy() argument
3925 struct dmar_domain *dmar_domain = domain->priv; in intel_iommu_domain_destroy()
3927 domain->priv = NULL; in intel_iommu_domain_destroy()
3931 static int intel_iommu_attach_device(struct iommu_domain *domain, in intel_iommu_attach_device() argument
3934 struct dmar_domain *dmar_domain = domain->priv; in intel_iommu_attach_device()
3989 static void intel_iommu_detach_device(struct iommu_domain *domain, in intel_iommu_detach_device() argument
3992 struct dmar_domain *dmar_domain = domain->priv; in intel_iommu_detach_device()
3998 static int intel_iommu_map(struct iommu_domain *domain, in intel_iommu_map() argument
4002 struct dmar_domain *dmar_domain = domain->priv; in intel_iommu_map()
4036 static size_t intel_iommu_unmap(struct iommu_domain *domain, in intel_iommu_unmap() argument
4039 struct dmar_domain *dmar_domain = domain->priv; in intel_iommu_unmap()
4051 static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain, in intel_iommu_iova_to_phys() argument
4054 struct dmar_domain *dmar_domain = domain->priv; in intel_iommu_iova_to_phys()
4065 static int intel_iommu_domain_has_cap(struct iommu_domain *domain, in intel_iommu_domain_has_cap() argument
4068 struct dmar_domain *dmar_domain = domain->priv; in intel_iommu_domain_has_cap()