Lines Matching full:domain
76 * Domain for untranslated devices - only allocated
93 static void update_domain(struct protection_domain *domain);
204 * find the domain for a specific device
472 "domain=0x%04x address=0x%016llx flags=0x%04x]\n", in iommu_print_event()
484 "domain=0x%04x address=0x%016llx flags=0x%04x]\n", in iommu_print_event()
689 * TLB entries for this domain in build_inv_iommu_pages()
720 * TLB entries for this domain in build_inv_iotlb_pages()
973 static void __domain_flush_pages(struct protection_domain *domain, in __domain_flush_pages() argument
980 build_inv_iommu_pages(&cmd, address, size, domain->id, pde); in __domain_flush_pages()
983 if (!domain->dev_iommu[i]) in __domain_flush_pages()
987 * Devices of this domain are behind this IOMMU in __domain_flush_pages()
993 list_for_each_entry(dev_data, &domain->dev_list, list) { in __domain_flush_pages()
1004 static void domain_flush_pages(struct protection_domain *domain, in domain_flush_pages() argument
1007 __domain_flush_pages(domain, address, size, 0); in domain_flush_pages()
1010 /* Flush the whole IO/TLB for a given protection domain */
1011 static void domain_flush_tlb(struct protection_domain *domain) in domain_flush_tlb() argument
1013 __domain_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 0); in domain_flush_tlb()
1016 /* Flush the whole IO/TLB for a given protection domain - including PDE */
1017 static void domain_flush_tlb_pde(struct protection_domain *domain) in domain_flush_tlb_pde() argument
1019 __domain_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 1); in domain_flush_tlb_pde()
1022 static void domain_flush_complete(struct protection_domain *domain) in domain_flush_complete() argument
1027 if (!domain->dev_iommu[i]) in domain_flush_complete()
1031 * Devices of this domain are behind this IOMMU in domain_flush_complete()
1040 * This function flushes the DTEs for all devices in domain
1042 static void domain_flush_devices(struct protection_domain *domain) in domain_flush_devices() argument
1046 list_for_each_entry(dev_data, &domain->dev_list, list) in domain_flush_devices()
1062 static bool increase_address_space(struct protection_domain *domain, in increase_address_space() argument
1067 if (domain->mode == PAGE_MODE_6_LEVEL) in increase_address_space()
1075 *pte = PM_LEVEL_PDE(domain->mode, in increase_address_space()
1076 virt_to_phys(domain->pt_root)); in increase_address_space()
1077 domain->pt_root = pte; in increase_address_space()
1078 domain->mode += 1; in increase_address_space()
1079 domain->updated = true; in increase_address_space()
1084 static u64 *alloc_pte(struct protection_domain *domain, in alloc_pte() argument
1095 while (address > PM_LEVEL_SIZE(domain->mode)) in alloc_pte()
1096 increase_address_space(domain, gfp); in alloc_pte()
1098 level = domain->mode - 1; in alloc_pte()
1099 pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)]; in alloc_pte()
1132 static u64 *fetch_pte(struct protection_domain *domain, unsigned long address) in fetch_pte() argument
1137 if (address > PM_LEVEL_SIZE(domain->mode)) in fetch_pte()
1140 level = domain->mode - 1; in fetch_pte()
1141 pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)]; in fetch_pte()
1289 * dma_ops domain.
1299 ret = iommu_map_page(&dma_dom->domain, addr, addr, e->prot, in dma_ops_unity_map()
1319 * the default domain DMA of that IOMMU if necessary.
1370 * called with domain->lock held
1395 * aperture in case of dma_ops domain allocation or address allocation
1428 pte = alloc_pte(&dma_dom->domain, address, PAGE_SIZE, in alloc_new_range()
1477 u64 *pte = fetch_pte(&dma_dom->domain, i); in alloc_new_range()
1484 update_domain(&dma_dom->domain); in alloc_new_range()
1489 update_domain(&dma_dom->domain); in alloc_new_range()
1577 * called with domain->lock held
1604 * The next functions belong to the domain allocation. A domain is
1605 * allocated for every IOMMU as the default domain. If device isolation
1606 * is enabled, every device get its own domain. The most important thing
1613 * This function adds a protection domain to the global protection domain list
1615 static void add_domain_to_list(struct protection_domain *domain) in add_domain_to_list() argument
1620 list_add(&domain->list, &amd_iommu_pd_list); in add_domain_to_list()
1625 * This function removes a protection domain to the global
1626 * protection domain list
1628 static void del_domain_from_list(struct protection_domain *domain) in del_domain_from_list() argument
1633 list_del(&domain->list); in del_domain_from_list()
1664 static void free_pagetable(struct protection_domain *domain) in free_pagetable() argument
1669 p1 = domain->pt_root; in free_pagetable()
1691 domain->pt_root = NULL; in free_pagetable()
1724 static void free_gcr3_table(struct protection_domain *domain) in free_gcr3_table() argument
1726 if (domain->glx == 2) in free_gcr3_table()
1727 free_gcr3_tbl_level2(domain->gcr3_tbl); in free_gcr3_table()
1728 else if (domain->glx == 1) in free_gcr3_table()
1729 free_gcr3_tbl_level1(domain->gcr3_tbl); in free_gcr3_table()
1730 else if (domain->glx != 0) in free_gcr3_table()
1733 free_page((unsigned long)domain->gcr3_tbl); in free_gcr3_table()
1737 * Free a domain, only used if something went wrong in the
1747 del_domain_from_list(&dom->domain); in dma_ops_domain_free()
1749 free_pagetable(&dom->domain); in dma_ops_domain_free()
1762 * Allocates a new protection domain usable for the dma_ops functions.
1774 spin_lock_init(&dma_dom->domain.lock); in dma_ops_domain_alloc()
1776 dma_dom->domain.id = domain_id_alloc(); in dma_ops_domain_alloc()
1777 if (dma_dom->domain.id == 0) in dma_ops_domain_alloc()
1779 INIT_LIST_HEAD(&dma_dom->domain.dev_list); in dma_ops_domain_alloc()
1780 dma_dom->domain.mode = PAGE_MODE_2_LEVEL; in dma_ops_domain_alloc()
1781 dma_dom->domain.pt_root = (void *)get_zeroed_page(GFP_KERNEL); in dma_ops_domain_alloc()
1782 dma_dom->domain.flags = PD_DMA_OPS_MASK; in dma_ops_domain_alloc()
1783 dma_dom->domain.priv = dma_dom; in dma_ops_domain_alloc()
1784 if (!dma_dom->domain.pt_root) in dma_ops_domain_alloc()
1790 add_domain_to_list(&dma_dom->domain); in dma_ops_domain_alloc()
1812 * little helper function to check whether a given protection domain is a
1813 * dma_ops domain
1815 static bool dma_ops_domain(struct protection_domain *domain) in dma_ops_domain() argument
1817 return domain->flags & PD_DMA_OPS_MASK; in dma_ops_domain()
1820 static void set_dte_entry(u16 devid, struct protection_domain *domain, bool ats) in set_dte_entry() argument
1825 if (domain->mode != PAGE_MODE_NONE) in set_dte_entry()
1826 pte_root = virt_to_phys(domain->pt_root); in set_dte_entry()
1828 pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK) in set_dte_entry()
1837 if (domain->flags & PD_IOMMUV2_MASK) { in set_dte_entry()
1838 u64 gcr3 = __pa(domain->gcr3_tbl); in set_dte_entry()
1839 u64 glx = domain->glx; in set_dte_entry()
1864 flags |= domain->id; in set_dte_entry()
1880 struct protection_domain *domain) in do_attach() argument
1889 dev_data->domain = domain; in do_attach()
1890 list_add(&dev_data->list, &domain->dev_list); in do_attach()
1891 set_dte_entry(dev_data->devid, domain, ats); in do_attach()
1894 domain->dev_iommu[iommu->index] += 1; in do_attach()
1895 domain->dev_cnt += 1; in do_attach()
1908 dev_data->domain->dev_iommu[iommu->index] -= 1; in do_detach()
1909 dev_data->domain->dev_cnt -= 1; in do_detach()
1912 dev_data->domain = NULL; in do_detach()
1921 * If a device is not yet associated with a domain, this function does
1925 struct protection_domain *domain) in __attach_device() argument
1929 /* lock domain */ in __attach_device()
1930 spin_lock(&domain->lock); in __attach_device()
1937 if (alias_data->domain != NULL && in __attach_device()
1938 alias_data->domain != domain) in __attach_device()
1941 if (dev_data->domain != NULL && in __attach_device()
1942 dev_data->domain != domain) in __attach_device()
1946 if (alias_data->domain == NULL) in __attach_device()
1947 do_attach(alias_data, domain); in __attach_device()
1952 if (dev_data->domain == NULL) in __attach_device()
1953 do_attach(dev_data, domain); in __attach_device()
1962 spin_unlock(&domain->lock); in __attach_device()
2055 * If a device is not yet associated with a domain, this function does
2059 struct protection_domain *domain) in attach_device() argument
2068 if (domain->flags & PD_IOMMUV2_MASK) { in attach_device()
2085 ret = __attach_device(dev_data, domain); in attach_device()
2093 domain_flush_tlb_pde(domain); in attach_device()
2099 * Removes a device from a protection domain (unlocked)
2103 struct protection_domain *domain; in __detach_device() local
2106 BUG_ON(!dev_data->domain); in __detach_device()
2108 domain = dev_data->domain; in __detach_device()
2110 spin_lock_irqsave(&domain->lock, flags); in __detach_device()
2122 spin_unlock_irqrestore(&domain->lock, flags); in __detach_device()
2126 * passthrough domain if it is detached from any other domain. in __detach_device()
2130 (dev_data->domain == NULL && domain != pt_domain)) in __detach_device()
2135 * Removes a device from a protection domain (with devtable_lock held)
2139 struct protection_domain *domain; in detach_device() local
2144 domain = dev_data->domain; in detach_device()
2151 if (domain->flags & PD_IOMMUV2_MASK) in detach_device()
2160 * Find out the protection domain structure for a given PCI device. This
2171 if (dev_data->domain) in domain_for_device()
2172 return dev_data->domain; in domain_for_device()
2178 if (alias_data->domain != NULL) { in domain_for_device()
2179 __attach_device(dev_data, alias_data->domain); in domain_for_device()
2180 dom = alias_data->domain; in domain_for_device()
2192 struct protection_domain *domain; in device_change_notifier() local
2209 domain = domain_for_device(dev); in device_change_notifier()
2211 if (!domain) in device_change_notifier()
2221 domain = domain_for_device(dev); in device_change_notifier()
2223 /* allocate a protection domain if a device is added */ in device_change_notifier()
2268 * finds the corresponding IOMMU, the protection domain and the
2270 * If the device is not yet associated with a domain this is also done
2275 struct protection_domain *domain; in get_domain() local
2282 domain = domain_for_device(dev); in get_domain()
2283 if (domain != NULL && !dma_ops_domain(domain)) in get_domain()
2286 if (domain != NULL) in get_domain()
2287 return domain; in get_domain()
2293 attach_device(dev, &dma_dom->domain); in get_domain()
2294 DUMP_printk("Using protection domain %d for device %s\n", in get_domain()
2295 dma_dom->domain.id, dev_name(dev)); in get_domain()
2297 return &dma_dom->domain; in get_domain()
2300 static void update_device_table(struct protection_domain *domain) in update_device_table() argument
2304 list_for_each_entry(dev_data, &domain->dev_list, list) in update_device_table()
2305 set_dte_entry(dev_data->devid, domain, dev_data->ats.enabled); in update_device_table()
2308 static void update_domain(struct protection_domain *domain) in update_domain() argument
2310 if (!domain->updated) in update_domain()
2313 update_device_table(domain); in update_domain()
2315 domain_flush_devices(domain); in update_domain()
2316 domain_flush_tlb_pde(domain); in update_domain()
2318 domain->updated = false; in update_domain()
2336 pte = alloc_pte(&dom->domain, address, PAGE_SIZE, &pte_page, in dma_ops_get_pte()
2342 update_domain(&dom->domain); in dma_ops_get_pte()
2349 * the given address in the DMA address space for the domain.
2413 * Must be called with the domain lock held.
2475 domain_flush_tlb(&dma_dom->domain); in __map_single()
2478 domain_flush_pages(&dma_dom->domain, address, size); in __map_single()
2497 * the domain lock held too
2527 domain_flush_pages(&dma_dom->domain, flush_addr, size); in __unmap_single()
2541 struct protection_domain *domain; in map_page() local
2548 domain = get_domain(dev); in map_page()
2549 if (PTR_ERR(domain) == -EINVAL) in map_page()
2551 else if (IS_ERR(domain)) in map_page()
2556 spin_lock_irqsave(&domain->lock, flags); in map_page()
2558 addr = __map_single(dev, domain->priv, paddr, size, dir, false, in map_page()
2563 domain_flush_complete(domain); in map_page()
2566 spin_unlock_irqrestore(&domain->lock, flags); in map_page()
2578 struct protection_domain *domain; in unmap_page() local
2582 domain = get_domain(dev); in unmap_page()
2583 if (IS_ERR(domain)) in unmap_page()
2586 spin_lock_irqsave(&domain->lock, flags); in unmap_page()
2588 __unmap_single(domain->priv, dma_addr, size, dir); in unmap_page()
2590 domain_flush_complete(domain); in unmap_page()
2592 spin_unlock_irqrestore(&domain->lock, flags); in unmap_page()
2622 struct protection_domain *domain; in map_sg() local
2631 domain = get_domain(dev); in map_sg()
2632 if (PTR_ERR(domain) == -EINVAL) in map_sg()
2634 else if (IS_ERR(domain)) in map_sg()
2639 spin_lock_irqsave(&domain->lock, flags); in map_sg()
2644 s->dma_address = __map_single(dev, domain->priv, in map_sg()
2655 domain_flush_complete(domain); in map_sg()
2658 spin_unlock_irqrestore(&domain->lock, flags); in map_sg()
2664 __unmap_single(domain->priv, s->dma_address, in map_sg()
2683 struct protection_domain *domain; in unmap_sg() local
2689 domain = get_domain(dev); in unmap_sg()
2690 if (IS_ERR(domain)) in unmap_sg()
2693 spin_lock_irqsave(&domain->lock, flags); in unmap_sg()
2696 __unmap_single(domain->priv, s->dma_address, in unmap_sg()
2701 domain_flush_complete(domain); in unmap_sg()
2703 spin_unlock_irqrestore(&domain->lock, flags); in unmap_sg()
2714 struct protection_domain *domain; in alloc_coherent() local
2720 domain = get_domain(dev); in alloc_coherent()
2721 if (PTR_ERR(domain) == -EINVAL) { in alloc_coherent()
2725 } else if (IS_ERR(domain)) in alloc_coherent()
2741 spin_lock_irqsave(&domain->lock, flags); in alloc_coherent()
2743 *dma_addr = __map_single(dev, domain->priv, paddr, in alloc_coherent()
2747 spin_unlock_irqrestore(&domain->lock, flags); in alloc_coherent()
2751 domain_flush_complete(domain); in alloc_coherent()
2753 spin_unlock_irqrestore(&domain->lock, flags); in alloc_coherent()
2771 struct protection_domain *domain; in free_coherent() local
2775 domain = get_domain(dev); in free_coherent()
2776 if (IS_ERR(domain)) in free_coherent()
2779 spin_lock_irqsave(&domain->lock, flags); in free_coherent()
2781 __unmap_single(domain->priv, dma_addr, size, DMA_BIDIRECTIONAL); in free_coherent()
2783 domain_flush_complete(domain); in free_coherent()
2785 spin_unlock_irqrestore(&domain->lock, flags); in free_coherent()
2822 /* Make sure passthrough domain is allocated */ in prealloc_protection_domains()
2826 pr_info("AMD-Vi: Using passthough domain for device %s\n", in prealloc_protection_domains()
2830 /* Is there already any domain for it? */ in prealloc_protection_domains()
2842 attach_device(&dev->dev, &dma_dom->domain); in prealloc_protection_domains()
2899 * first allocate a default protection domain for every IOMMU we in amd_iommu_init_dma_ops()
2901 * protection domain will be assigned to the default one. in amd_iommu_init_dma_ops()
2907 iommu->default_dom->domain.flags |= PD_DEFAULT_MASK; in amd_iommu_init_dma_ops()
2947 * like protection domain handling and assignement of devices to domains
2952 static void cleanup_domain(struct protection_domain *domain) in cleanup_domain() argument
2959 list_for_each_entry_safe(dev_data, next, &domain->dev_list, list) { in cleanup_domain()
2967 static void protection_domain_free(struct protection_domain *domain) in protection_domain_free() argument
2969 if (!domain) in protection_domain_free()
2972 del_domain_from_list(domain); in protection_domain_free()
2974 if (domain->id) in protection_domain_free()
2975 domain_id_free(domain->id); in protection_domain_free()
2977 kfree(domain); in protection_domain_free()
2982 struct protection_domain *domain; in protection_domain_alloc() local
2984 domain = kzalloc(sizeof(*domain), GFP_KERNEL); in protection_domain_alloc()
2985 if (!domain) in protection_domain_alloc()
2988 spin_lock_init(&domain->lock); in protection_domain_alloc()
2989 mutex_init(&domain->api_lock); in protection_domain_alloc()
2990 domain->id = domain_id_alloc(); in protection_domain_alloc()
2991 if (!domain->id) in protection_domain_alloc()
2993 INIT_LIST_HEAD(&domain->dev_list); in protection_domain_alloc()
2995 add_domain_to_list(domain); in protection_domain_alloc()
2997 return domain; in protection_domain_alloc()
3000 kfree(domain); in protection_domain_alloc()
3010 /* allocate passthrough domain */ in alloc_passthrough_domain()
3021 struct protection_domain *domain; in amd_iommu_domain_init() local
3023 domain = protection_domain_alloc(); in amd_iommu_domain_init()
3024 if (!domain) in amd_iommu_domain_init()
3027 domain->mode = PAGE_MODE_3_LEVEL; in amd_iommu_domain_init()
3028 domain->pt_root = (void *)get_zeroed_page(GFP_KERNEL); in amd_iommu_domain_init()
3029 if (!domain->pt_root) in amd_iommu_domain_init()
3032 domain->iommu_domain = dom; in amd_iommu_domain_init()
3034 dom->priv = domain; in amd_iommu_domain_init()
3039 protection_domain_free(domain); in amd_iommu_domain_init()
3046 struct protection_domain *domain = dom->priv; in amd_iommu_domain_destroy() local
3048 if (!domain) in amd_iommu_domain_destroy()
3051 if (domain->dev_cnt > 0) in amd_iommu_domain_destroy()
3052 cleanup_domain(domain); in amd_iommu_domain_destroy()
3054 BUG_ON(domain->dev_cnt != 0); in amd_iommu_domain_destroy()
3056 if (domain->mode != PAGE_MODE_NONE) in amd_iommu_domain_destroy()
3057 free_pagetable(domain); in amd_iommu_domain_destroy()
3059 if (domain->flags & PD_IOMMUV2_MASK) in amd_iommu_domain_destroy()
3060 free_gcr3_table(domain); in amd_iommu_domain_destroy()
3062 protection_domain_free(domain); in amd_iommu_domain_destroy()
3079 if (dev_data->domain != NULL) in amd_iommu_detach_device()
3092 struct protection_domain *domain = dom->priv; in amd_iommu_attach_device() local
3106 if (dev_data->domain) in amd_iommu_attach_device()
3109 ret = attach_device(dev, domain); in amd_iommu_attach_device()
3119 struct protection_domain *domain = dom->priv; in amd_iommu_map() local
3123 if (domain->mode == PAGE_MODE_NONE) in amd_iommu_map()
3131 mutex_lock(&domain->api_lock); in amd_iommu_map()
3132 ret = iommu_map_page(domain, iova, paddr, prot, page_size); in amd_iommu_map()
3133 mutex_unlock(&domain->api_lock); in amd_iommu_map()
3141 struct protection_domain *domain = dom->priv; in amd_iommu_unmap() local
3144 if (domain->mode == PAGE_MODE_NONE) in amd_iommu_unmap()
3147 mutex_lock(&domain->api_lock); in amd_iommu_unmap()
3148 unmap_size = iommu_unmap_page(domain, iova, page_size); in amd_iommu_unmap()
3149 mutex_unlock(&domain->api_lock); in amd_iommu_unmap()
3151 domain_flush_tlb_pde(domain); in amd_iommu_unmap()
3159 struct protection_domain *domain = dom->priv; in amd_iommu_iova_to_phys() local
3164 if (domain->mode == PAGE_MODE_NONE) in amd_iommu_iova_to_phys()
3167 pte = fetch_pte(domain, iova); in amd_iommu_iova_to_phys()
3183 static int amd_iommu_domain_has_cap(struct iommu_domain *domain, in amd_iommu_domain_has_cap() argument
3287 struct protection_domain *domain = dom->priv; in amd_iommu_domain_direct_map() local
3290 spin_lock_irqsave(&domain->lock, flags); in amd_iommu_domain_direct_map()
3293 domain->mode = PAGE_MODE_NONE; in amd_iommu_domain_direct_map()
3294 domain->updated = true; in amd_iommu_domain_direct_map()
3297 update_domain(domain); in amd_iommu_domain_direct_map()
3300 free_pagetable(domain); in amd_iommu_domain_direct_map()
3302 spin_unlock_irqrestore(&domain->lock, flags); in amd_iommu_domain_direct_map()
3308 struct protection_domain *domain = dom->priv; in amd_iommu_domain_enable_v2() local
3322 spin_lock_irqsave(&domain->lock, flags); in amd_iommu_domain_enable_v2()
3326 * domain support IOMMUv2. Just force that the domain has no in amd_iommu_domain_enable_v2()
3330 if (domain->dev_cnt > 0 || domain->flags & PD_IOMMUV2_MASK) in amd_iommu_domain_enable_v2()
3334 domain->gcr3_tbl = (void *)get_zeroed_page(GFP_ATOMIC); in amd_iommu_domain_enable_v2()
3335 if (domain->gcr3_tbl == NULL) in amd_iommu_domain_enable_v2()
3338 domain->glx = levels; in amd_iommu_domain_enable_v2()
3339 domain->flags |= PD_IOMMUV2_MASK; in amd_iommu_domain_enable_v2()
3340 domain->updated = true; in amd_iommu_domain_enable_v2()
3342 update_domain(domain); in amd_iommu_domain_enable_v2()
3347 spin_unlock_irqrestore(&domain->lock, flags); in amd_iommu_domain_enable_v2()
3353 static int __flush_pasid(struct protection_domain *domain, int pasid, in __flush_pasid() argument
3360 if (!(domain->flags & PD_IOMMUV2_MASK)) in __flush_pasid()
3363 build_inv_iommu_pasid(&cmd, domain->id, pasid, address, size); in __flush_pasid()
3370 if (domain->dev_iommu[i] == 0) in __flush_pasid()
3379 domain_flush_complete(domain); in __flush_pasid()
3382 list_for_each_entry(dev_data, &domain->dev_list, list) { in __flush_pasid()
3400 domain_flush_complete(domain); in __flush_pasid()
3409 static int __amd_iommu_flush_page(struct protection_domain *domain, int pasid, in __amd_iommu_flush_page() argument
3414 return __flush_pasid(domain, pasid, address, false); in __amd_iommu_flush_page()
3420 struct protection_domain *domain = dom->priv; in amd_iommu_flush_page() local
3424 spin_lock_irqsave(&domain->lock, flags); in amd_iommu_flush_page()
3425 ret = __amd_iommu_flush_page(domain, pasid, address); in amd_iommu_flush_page()
3426 spin_unlock_irqrestore(&domain->lock, flags); in amd_iommu_flush_page()
3432 static int __amd_iommu_flush_tlb(struct protection_domain *domain, int pasid) in __amd_iommu_flush_tlb() argument
3436 return __flush_pasid(domain, pasid, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, in __amd_iommu_flush_tlb()
3442 struct protection_domain *domain = dom->priv; in amd_iommu_flush_tlb() local
3446 spin_lock_irqsave(&domain->lock, flags); in amd_iommu_flush_tlb()
3447 ret = __amd_iommu_flush_tlb(domain, pasid); in amd_iommu_flush_tlb()
3448 spin_unlock_irqrestore(&domain->lock, flags); in amd_iommu_flush_tlb()
3486 static int __set_gcr3(struct protection_domain *domain, int pasid, in __set_gcr3() argument
3491 if (domain->mode != PAGE_MODE_NONE) in __set_gcr3()
3494 pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, true); in __set_gcr3()
3500 return __amd_iommu_flush_tlb(domain, pasid); in __set_gcr3()
3503 static int __clear_gcr3(struct protection_domain *domain, int pasid) in __clear_gcr3() argument
3507 if (domain->mode != PAGE_MODE_NONE) in __clear_gcr3()
3510 pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, false); in __clear_gcr3()
3516 return __amd_iommu_flush_tlb(domain, pasid); in __clear_gcr3()
3522 struct protection_domain *domain = dom->priv; in amd_iommu_domain_set_gcr3() local
3526 spin_lock_irqsave(&domain->lock, flags); in amd_iommu_domain_set_gcr3()
3527 ret = __set_gcr3(domain, pasid, cr3); in amd_iommu_domain_set_gcr3()
3528 spin_unlock_irqrestore(&domain->lock, flags); in amd_iommu_domain_set_gcr3()
3536 struct protection_domain *domain = dom->priv; in amd_iommu_domain_clear_gcr3() local
3540 spin_lock_irqsave(&domain->lock, flags); in amd_iommu_domain_clear_gcr3()
3541 ret = __clear_gcr3(domain, pasid); in amd_iommu_domain_clear_gcr3()
3542 spin_unlock_irqrestore(&domain->lock, flags); in amd_iommu_domain_clear_gcr3()
3569 struct protection_domain *domain; in amd_iommu_get_v2_domain() local
3571 domain = get_domain(&pdev->dev); in amd_iommu_get_v2_domain()
3572 if (IS_ERR(domain)) in amd_iommu_get_v2_domain()
3576 if (!(domain->flags & PD_IOMMUV2_MASK)) in amd_iommu_get_v2_domain()
3579 return domain->iommu_domain; in amd_iommu_get_v2_domain()