Lines Matching +full:dte +full:- +full:mode

1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
8 #define pr_fmt(fmt) "AMD-Vi: " fmt
16 #include <linux/pci-ats.h>
21 #include <linux/dma-map-ops.h>
22 #include <linux/dma-direct.h>
23 #include <linux/dma-iommu.h>
24 #include <linux/iommu-helper.h>
26 #include <linux/amd-iommu.h>
47 #define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28))
83 * Domain for untranslated devices - only allocated
89 int amd_iommu_max_glx_val = -1;
125 return -ENODEV; in get_acpihid_device_id()
128 if (acpi_dev_hid_uid_match(adev, p->hid, in get_acpihid_device_id()
129 p->uid[0] ? p->uid : NULL)) { in get_acpihid_device_id()
132 return p->devid; in get_acpihid_device_id()
135 return -EINVAL; in get_acpihid_device_id()
158 u64 pt_root = atomic64_read(&domain->pt_root); in amd_iommu_domain_get_pgtable()
160 pgtable->root = (u64 *)(pt_root & PAGE_MASK); in amd_iommu_domain_get_pgtable()
161 pgtable->mode = pt_root & 7; /* lowest 3 bits encode pgtable mode */ in amd_iommu_domain_get_pgtable()
166 atomic64_set(&domain->pt_root, root); in amd_iommu_domain_set_pt_root()
175 u64 *root, int mode) in amd_iommu_domain_set_pgtable() argument
179 /* lowest 3 bits encode pgtable mode */ in amd_iommu_domain_set_pgtable()
180 pt_root = mode & 7; in amd_iommu_domain_set_pgtable()
194 spin_lock_init(&dev_data->lock); in alloc_dev_data()
195 dev_data->devid = devid; in alloc_dev_data()
196 ratelimit_default_init(&dev_data->rs); in alloc_dev_data()
198 llist_add(&dev_data->dev_data_list, &dev_data_list); in alloc_dev_data()
212 if (dev_data->devid == devid) in search_dev_data()
265 PCI_BUS_NUM(ivrs_alias) == pdev->bus->number) in setup_aliases()
286 dev_data->defer_attach = true; in find_dev_data()
305 if ((devid == p->devid) && p->group) in acpihid_device_group()
306 entry->group = p->group; in acpihid_device_group()
309 if (!entry->group) in acpihid_device_group()
310 entry->group = generic_device_group(dev); in acpihid_device_group()
312 iommu_group_ref_get(entry->group); in acpihid_device_group()
314 return entry->group; in acpihid_device_group()
341 dev_data = dev_iommu_priv_get(&pdev->dev); in pdev_pri_erratum()
343 return dev_data->errata & (1 << erratum) ? true : false; in pdev_pri_erratum()
385 return -ENOMEM; in iommu_init_device()
387 dev_data->pdev = setup_aliases(dev); in iommu_init_device()
390 * By default we use passthrough mode for IOMMUv2 capable device. in iommu_init_device()
393 * it'll be forced to go into translation mode. in iommu_init_device()
399 iommu = amd_iommu_rlookup_table[dev_data->devid]; in iommu_init_device()
400 dev_data->iommu_v2 = iommu->is_iommu_v2; in iommu_init_device()
430 if (dev_data->domain) in amd_iommu_uninit_device()
437 * device is re-plugged - not doing so would introduce a ton of races. in amd_iommu_uninit_device()
452 pte_mask = ~((cnt << 3) - 1); in first_pte_l7()
475 pr_err("DTE[%d]: %016llx\n", i, in dump_dte_entry()
485 pr_err("CMD[%d]: %08x\n", i, cmd->data[i]); in dump_command()
503 dev_data = dev_iommu_priv_get(&pdev->dev); in amd_iommu_report_rmp_hw_error()
505 if (dev_data && __ratelimit(&dev_data->rs)) { in amd_iommu_report_rmp_hw_error()
534 dev_data = dev_iommu_priv_get(&pdev->dev); in amd_iommu_report_rmp_fault()
536 if (dev_data && __ratelimit(&dev_data->rs)) { in amd_iommu_report_rmp_fault()
558 dev_data = dev_iommu_priv_get(&pdev->dev); in amd_iommu_report_page_fault()
560 if (dev_data && __ratelimit(&dev_data->rs)) { in amd_iommu_report_page_fault()
575 struct device *dev = iommu->iommu.dev; in iommu_print_event()
666 head = readl(iommu->mmio_base + MMIO_EVT_HEAD_OFFSET); in iommu_poll_events()
667 tail = readl(iommu->mmio_base + MMIO_EVT_TAIL_OFFSET); in iommu_poll_events()
670 iommu_print_event(iommu, iommu->evt_buf + head); in iommu_poll_events()
674 writel(head, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET); in iommu_poll_events()
699 if (iommu->ppr_log == NULL) in iommu_poll_ppr_log()
702 head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); in iommu_poll_ppr_log()
703 tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); in iommu_poll_ppr_log()
710 raw = (u64 *)(iommu->ppr_log + head); in iommu_poll_ppr_log()
723 /* Avoid memcpy function-call overhead */ in iommu_poll_ppr_log()
733 /* Update head pointer of hardware ring-buffer */ in iommu_poll_ppr_log()
735 writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); in iommu_poll_ppr_log()
740 /* Refresh ring-buffer information */ in iommu_poll_ppr_log()
741 head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); in iommu_poll_ppr_log()
742 tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); in iommu_poll_ppr_log()
761 if (iommu->ga_log == NULL) in iommu_poll_ga_log()
764 head = readl(iommu->mmio_base + MMIO_GA_HEAD_OFFSET); in iommu_poll_ga_log()
765 tail = readl(iommu->mmio_base + MMIO_GA_TAIL_OFFSET); in iommu_poll_ga_log()
771 raw = (u64 *)(iommu->ga_log + head); in iommu_poll_ga_log()
774 /* Avoid memcpy function-call overhead */ in iommu_poll_ga_log()
777 /* Update head pointer of hardware ring-buffer */ in iommu_poll_ga_log()
779 writel(head, iommu->mmio_base + MMIO_GA_HEAD_OFFSET); in iommu_poll_ga_log()
807 dev_set_msi_domain(dev, iommu->msi_domain); in amd_iommu_set_pci_msi_domain()
823 u32 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); in amd_iommu_int_thread()
828 iommu->mmio_base + MMIO_STATUS_OFFSET); in amd_iommu_int_thread()
849 * When re-enabling interrupt (by writing 1 in amd_iommu_int_thread()
858 * again and re-clear the bits in amd_iommu_int_thread()
860 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); in amd_iommu_int_thread()
880 while (*iommu->cmd_sem != data && i < LOOP_TIMEOUT) { in wait_on_sem()
886 pr_alert("Completion-Wait loop timed out\n"); in wait_on_sem()
887 return -EIO; in wait_on_sem()
900 tail = iommu->cmd_buf_tail; in copy_cmd_to_buffer()
901 target = iommu->cmd_buf + tail; in copy_cmd_to_buffer()
905 iommu->cmd_buf_tail = tail; in copy_cmd_to_buffer()
908 writel(tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); in copy_cmd_to_buffer()
915 u64 paddr = iommu_virt_to_phys((void *)iommu->cmd_sem); in build_completion_wait()
918 cmd->data[0] = lower_32_bits(paddr) | CMD_COMPL_WAIT_STORE_MASK; in build_completion_wait()
919 cmd->data[1] = upper_32_bits(paddr); in build_completion_wait()
920 cmd->data[2] = data; in build_completion_wait()
927 cmd->data[0] = devid; in build_inv_dte()
952 cmd->data[1] |= domid; in build_inv_iommu_pages()
953 cmd->data[2] = lower_32_bits(address); in build_inv_iommu_pages()
954 cmd->data[3] = upper_32_bits(address); in build_inv_iommu_pages()
956 if (s) /* size bit - we flush more than one 4kb page */ in build_inv_iommu_pages()
957 cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK; in build_inv_iommu_pages()
958 if (pde) /* PDE bit - we want to flush everything, not only the PTEs */ in build_inv_iommu_pages()
959 cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK; in build_inv_iommu_pages()
983 cmd->data[0] = devid; in build_inv_iotlb_pages()
984 cmd->data[0] |= (qdep & 0xff) << 24; in build_inv_iotlb_pages()
985 cmd->data[1] = devid; in build_inv_iotlb_pages()
986 cmd->data[2] = lower_32_bits(address); in build_inv_iotlb_pages()
987 cmd->data[3] = upper_32_bits(address); in build_inv_iotlb_pages()
990 cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK; in build_inv_iotlb_pages()
1000 cmd->data[0] = pasid; in build_inv_iommu_pasid()
1001 cmd->data[1] = domid; in build_inv_iommu_pasid()
1002 cmd->data[2] = lower_32_bits(address); in build_inv_iommu_pasid()
1003 cmd->data[3] = upper_32_bits(address); in build_inv_iommu_pasid()
1004 cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK; in build_inv_iommu_pasid()
1005 cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK; in build_inv_iommu_pasid()
1007 cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK; in build_inv_iommu_pasid()
1018 cmd->data[0] = devid; in build_inv_iotlb_pasid()
1019 cmd->data[0] |= ((pasid >> 8) & 0xff) << 16; in build_inv_iotlb_pasid()
1020 cmd->data[0] |= (qdep & 0xff) << 24; in build_inv_iotlb_pasid()
1021 cmd->data[1] = devid; in build_inv_iotlb_pasid()
1022 cmd->data[1] |= (pasid & 0xff) << 16; in build_inv_iotlb_pasid()
1023 cmd->data[2] = lower_32_bits(address); in build_inv_iotlb_pasid()
1024 cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK; in build_inv_iotlb_pasid()
1025 cmd->data[3] = upper_32_bits(address); in build_inv_iotlb_pasid()
1027 cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK; in build_inv_iotlb_pasid()
1036 cmd->data[0] = devid; in build_complete_ppr()
1038 cmd->data[1] = pasid; in build_complete_ppr()
1039 cmd->data[2] = CMD_INV_IOMMU_PAGES_GN_MASK; in build_complete_ppr()
1041 cmd->data[3] = tag & 0x1ff; in build_complete_ppr()
1042 cmd->data[3] |= (status & PPR_STATUS_MASK) << PPR_STATUS_SHIFT; in build_complete_ppr()
1056 cmd->data[0] = devid; in build_inv_irt()
1071 next_tail = (iommu->cmd_buf_tail + sizeof(*cmd)) % CMD_BUFFER_SIZE; in __iommu_queue_command_sync()
1073 left = (iommu->cmd_buf_head - next_tail) % CMD_BUFFER_SIZE; in __iommu_queue_command_sync()
1080 return -EIO; in __iommu_queue_command_sync()
1087 iommu->cmd_buf_head = readl(iommu->mmio_base + in __iommu_queue_command_sync()
1096 iommu->need_sync = sync; in __iommu_queue_command_sync()
1108 raw_spin_lock_irqsave(&iommu->lock, flags); in iommu_queue_command_sync()
1110 raw_spin_unlock_irqrestore(&iommu->lock, flags); in iommu_queue_command_sync()
1131 if (!iommu->need_sync) in iommu_completion_wait()
1134 raw_spin_lock_irqsave(&iommu->lock, flags); in iommu_completion_wait()
1136 data = ++iommu->cmd_sem_val; in iommu_completion_wait()
1146 raw_spin_unlock_irqrestore(&iommu->lock, flags); in iommu_completion_wait()
1240 * Command send function for flushing on-device TLB
1249 qdep = dev_data->ats.qdep; in device_flush_iotlb()
1250 iommu = amd_iommu_rlookup_table[dev_data->devid]; in device_flush_iotlb()
1252 build_inv_iotlb_pages(&cmd, dev_data->devid, qdep, address, size); in device_flush_iotlb()
1273 iommu = amd_iommu_rlookup_table[dev_data->devid]; in device_flush_dte()
1275 if (dev_data->pdev) in device_flush_dte()
1276 ret = pci_for_each_dma_alias(dev_data->pdev, in device_flush_dte()
1279 ret = iommu_flush_dte(iommu, dev_data->devid); in device_flush_dte()
1283 alias = amd_iommu_alias_table[dev_data->devid]; in device_flush_dte()
1284 if (alias != dev_data->devid) { in device_flush_dte()
1290 if (dev_data->ats.enabled) in device_flush_dte()
1308 build_inv_iommu_pages(&cmd, address, size, domain->id, pde); in __domain_flush_pages()
1311 if (!domain->dev_iommu[i]) in __domain_flush_pages()
1321 list_for_each_entry(dev_data, &domain->dev_list, list) { in __domain_flush_pages()
1323 if (!dev_data->ats.enabled) in __domain_flush_pages()
1338 /* Flush the whole IO/TLB for a given protection domain - including PDE */
1349 if (domain && !domain->dev_iommu[i]) in domain_flush_complete()
1367 spin_lock_irqsave(&domain->lock, flags); in domain_flush_np_cache()
1370 spin_unlock_irqrestore(&domain->lock, flags); in domain_flush_np_cache()
1382 list_for_each_entry(dev_data, &domain->dev_list, list) in domain_flush_devices()
1397 freelist = freelist->freelist; in free_page_list()
1406 p->freelist = freelist; in free_pt_page()
1443 static struct page *free_sub_pt(unsigned long root, int mode, in DEFINE_FREE_PT_FN()
1446 switch (mode) { in DEFINE_FREE_PT_FN()
1480 if (pgtable->mode == PAGE_MODE_NONE) in free_pagetable()
1483 BUG_ON(pgtable->mode < PAGE_MODE_NONE || in free_pagetable()
1484 pgtable->mode > PAGE_MODE_6_LEVEL); in free_pagetable()
1486 root = (unsigned long)pgtable->root; in free_pagetable()
1487 freelist = free_sub_pt(root, pgtable->mode, freelist); in free_pagetable()
1506 spin_lock_irqsave(&domain->lock, flags); in increase_address_space()
1510 if (address <= PM_LEVEL_SIZE(pgtable.mode)) in increase_address_space()
1514 if (WARN_ON_ONCE(pgtable.mode == PAGE_MODE_6_LEVEL)) in increase_address_space()
1521 *pte = PM_LEVEL_PDE(pgtable.mode, iommu_virt_to_phys(pgtable.root)); in increase_address_space()
1524 pgtable.mode += 1; in increase_address_space()
1532 amd_iommu_domain_set_pgtable(domain, pte, pgtable.mode); in increase_address_space()
1537 spin_unlock_irqrestore(&domain->lock, flags); in increase_address_space()
1557 while (address > PM_LEVEL_SIZE(pgtable.mode)) { in alloc_pte()
1560 * page-table. in alloc_pte()
1570 level = pgtable.mode - 1; in alloc_pte()
1626 level -= 1; in alloc_pte()
1655 if (address > PM_LEVEL_SIZE(pgtable.mode)) in fetch_pte()
1658 level = pgtable.mode - 1; in fetch_pte()
1677 level -= 1; in fetch_pte()
1698 int mode; in free_clear_pte() local
1701 pr_warn("AMD-Vi: IOMMU pte changed since we read it\n"); in free_clear_pte()
1709 mode = IOMMU_PTE_MODE(pteval); in free_clear_pte()
1711 return free_sub_pt(pt, mode, freelist); in free_clear_pte()
1736 ret = -EINVAL; in iommu_map_page()
1743 ret = -ENOMEM; in iommu_map_page()
1773 spin_lock_irqsave(&dom->lock, flags); in iommu_map_page()
1775 * Flush domain TLB(s) and wait for completion. Any Device-Table in iommu_map_page()
1781 spin_unlock_irqrestore(&dom->lock, flags); in iommu_map_page()
1814 bus_addr = (bus_addr & ~(unmap_size - 1)) + unmap_size; in iommu_unmap_page()
1889 if (domain->glx == 2) in free_gcr3_table()
1890 free_gcr3_tbl_level2(domain->gcr3_tbl); in free_gcr3_table()
1891 else if (domain->glx == 1) in free_gcr3_table()
1892 free_gcr3_tbl_level1(domain->gcr3_tbl); in free_gcr3_table()
1894 BUG_ON(domain->glx != 0); in free_gcr3_table()
1896 free_page((unsigned long)domain->gcr3_tbl); in free_gcr3_table()
1907 if (pgtable->mode != PAGE_MODE_NONE) in set_dte_entry()
1908 pte_root = iommu_virt_to_phys(pgtable->root); in set_dte_entry()
1910 pte_root |= (pgtable->mode & DEV_ENTRY_MODE_MASK) in set_dte_entry()
1926 if (domain->flags & PD_IOMMUV2_MASK) { in set_dte_entry()
1927 u64 gcr3 = iommu_virt_to_phys(domain->gcr3_tbl); in set_dte_entry()
1928 u64 glx = domain->glx; in set_dte_entry()
1941 /* Encode GCR3 table into DTE */ in set_dte_entry()
1953 flags |= domain->id; in set_dte_entry()
1961 * the previous kernel--if so, it needs to flush the translation cache in set_dte_entry()
1987 iommu = amd_iommu_rlookup_table[dev_data->devid]; in do_attach()
1988 ats = dev_data->ats.enabled; in do_attach()
1991 dev_data->domain = domain; in do_attach()
1992 list_add(&dev_data->list, &domain->dev_list); in do_attach()
1995 domain->dev_iommu[iommu->index] += 1; in do_attach()
1996 domain->dev_cnt += 1; in do_attach()
2000 set_dte_entry(dev_data->devid, domain, &pgtable, in do_attach()
2001 ats, dev_data->iommu_v2); in do_attach()
2002 clone_aliases(dev_data->pdev); in do_attach()
2009 struct protection_domain *domain = dev_data->domain; in do_detach()
2012 iommu = amd_iommu_rlookup_table[dev_data->devid]; in do_detach()
2015 dev_data->domain = NULL; in do_detach()
2016 list_del(&dev_data->list); in do_detach()
2017 clear_dte_entry(dev_data->devid); in do_detach()
2018 clone_aliases(dev_data->pdev); in do_detach()
2020 /* Flush the DTE entry */ in do_detach()
2029 /* decrease reference counters - needs to happen after the flushes */ in do_detach()
2030 domain->dev_iommu[iommu->index] -= 1; in do_detach()
2031 domain->dev_cnt -= 1; in do_detach()
2041 /* FIXME: Change generic reset-function to do the same */
2049 return -EINVAL; in pri_reset_while_enabled()
2069 /* Only allow access to user-accessible pages */ in pdev_iommuv2_enable()
2115 spin_lock_irqsave(&domain->lock, flags); in attach_device()
2119 spin_lock(&dev_data->lock); in attach_device()
2121 ret = -EBUSY; in attach_device()
2122 if (dev_data->domain != NULL) in attach_device()
2129 if (domain->flags & PD_IOMMUV2_MASK) { in attach_device()
2132 ret = -EINVAL; in attach_device()
2133 if (def_domain->type != IOMMU_DOMAIN_IDENTITY) in attach_device()
2136 if (dev_data->iommu_v2) { in attach_device()
2140 dev_data->ats.enabled = true; in attach_device()
2141 dev_data->ats.qdep = pci_ats_queue_depth(pdev); in attach_device()
2142 dev_data->pri_tlp = pci_prg_resp_pasid_required(pdev); in attach_device()
2146 dev_data->ats.enabled = true; in attach_device()
2147 dev_data->ats.qdep = pci_ats_queue_depth(pdev); in attach_device()
2156 * We might boot into a crash-kernel here. The crashed kernel in attach_device()
2165 spin_unlock(&dev_data->lock); in attach_device()
2167 spin_unlock_irqrestore(&domain->lock, flags); in attach_device()
2182 domain = dev_data->domain; in detach_device()
2184 spin_lock_irqsave(&domain->lock, flags); in detach_device()
2186 spin_lock(&dev_data->lock); in detach_device()
2194 if (WARN_ON(!dev_data->domain)) in detach_device()
2202 if (domain->flags & PD_IOMMUV2_MASK && dev_data->iommu_v2) in detach_device()
2204 else if (dev_data->ats.enabled) in detach_device()
2207 dev_data->ats.enabled = false; in detach_device()
2210 spin_unlock(&dev_data->lock); in detach_device()
2212 spin_unlock_irqrestore(&domain->lock, flags); in detach_device()
2222 return ERR_PTR(-ENODEV); in amd_iommu_probe_device()
2231 return &iommu->iommu; in amd_iommu_probe_device()
2235 if (ret != -ENOTSUPP) in amd_iommu_probe_device()
2236 dev_err(dev, "Failed to initialize - trying to proceed anyway\n"); in amd_iommu_probe_device()
2241 iommu_dev = &iommu->iommu; in amd_iommu_probe_device()
2253 /* Domains are initialized for this device - have a look what we ended up with */ in amd_iommu_probe_finalize()
2255 if (domain->type == IOMMU_DOMAIN_DMA) in amd_iommu_probe_finalize()
2284 switch (domain->type) { in amd_iommu_domain_get_attr()
2286 return -ENODEV; in amd_iommu_domain_get_attr()
2293 return -ENODEV; in amd_iommu_domain_get_attr()
2297 return -EINVAL; in amd_iommu_domain_get_attr()
2312 list_for_each_entry(dev_data, &domain->dev_list, list) { in update_device_table()
2313 set_dte_entry(dev_data->devid, domain, pgtable, in update_device_table()
2314 dev_data->ats.enabled, dev_data->iommu_v2); in update_device_table()
2315 clone_aliases(dev_data->pdev); in update_device_table()
2390 spin_lock_irqsave(&domain->lock, flags); in cleanup_domain()
2392 while (!list_empty(&domain->dev_list)) { in cleanup_domain()
2393 entry = list_first_entry(&domain->dev_list, in cleanup_domain()
2395 BUG_ON(!entry->domain); in cleanup_domain()
2399 spin_unlock_irqrestore(&domain->lock, flags); in cleanup_domain()
2409 if (domain->id) in protection_domain_free()
2410 domain_id_free(domain->id); in protection_domain_free()
2419 static int protection_domain_init(struct protection_domain *domain, int mode) in protection_domain_init() argument
2423 BUG_ON(mode < PAGE_MODE_NONE || mode > PAGE_MODE_6_LEVEL); in protection_domain_init()
2425 spin_lock_init(&domain->lock); in protection_domain_init()
2426 domain->id = domain_id_alloc(); in protection_domain_init()
2427 if (!domain->id) in protection_domain_init()
2428 return -ENOMEM; in protection_domain_init()
2429 INIT_LIST_HEAD(&domain->dev_list); in protection_domain_init()
2431 if (mode != PAGE_MODE_NONE) { in protection_domain_init()
2434 return -ENOMEM; in protection_domain_init()
2437 amd_iommu_domain_set_pgtable(domain, pt_root, mode); in protection_domain_init()
2442 static struct protection_domain *protection_domain_alloc(int mode) in protection_domain_alloc() argument
2450 if (protection_domain_init(domain, mode)) in protection_domain_alloc()
2464 int mode = DEFAULT_PGTABLE_LEVEL; in amd_iommu_domain_alloc() local
2467 mode = PAGE_MODE_NONE; in amd_iommu_domain_alloc()
2469 domain = protection_domain_alloc(mode); in amd_iommu_domain_alloc()
2473 domain->domain.geometry.aperture_start = 0; in amd_iommu_domain_alloc()
2474 domain->domain.geometry.aperture_end = ~0ULL; in amd_iommu_domain_alloc()
2475 domain->domain.geometry.force_aperture = true; in amd_iommu_domain_alloc()
2478 iommu_get_dma_cookie(&domain->domain) == -ENOMEM) in amd_iommu_domain_alloc()
2481 return &domain->domain; in amd_iommu_domain_alloc()
2495 if (domain->dev_cnt > 0) in amd_iommu_domain_free()
2498 BUG_ON(domain->dev_cnt != 0); in amd_iommu_domain_free()
2503 if (dom->type == IOMMU_DOMAIN_DMA) in amd_iommu_domain_free()
2504 iommu_put_dma_cookie(&domain->domain); in amd_iommu_domain_free()
2506 if (domain->flags & PD_IOMMUV2_MASK) in amd_iommu_domain_free()
2526 if (dev_data->domain != NULL) in amd_iommu_detach_device()
2535 (dom->type == IOMMU_DOMAIN_UNMANAGED)) in amd_iommu_detach_device()
2536 dev_data->use_vapic = 0; in amd_iommu_detach_device()
2551 return -EINVAL; in amd_iommu_attach_device()
2554 dev_data->defer_attach = false; in amd_iommu_attach_device()
2556 iommu = amd_iommu_rlookup_table[dev_data->devid]; in amd_iommu_attach_device()
2558 return -EINVAL; in amd_iommu_attach_device()
2560 if (dev_data->domain) in amd_iommu_attach_device()
2567 if (dom->type == IOMMU_DOMAIN_UNMANAGED) in amd_iommu_attach_device()
2568 dev_data->use_vapic = 1; in amd_iommu_attach_device()
2570 dev_data->use_vapic = 0; in amd_iommu_attach_device()
2589 if (pgtable.mode == PAGE_MODE_NONE) in amd_iommu_map()
2590 return -EINVAL; in amd_iommu_map()
2612 if (pgtable.mode == PAGE_MODE_NONE) in amd_iommu_unmap()
2627 if (pgtable.mode == PAGE_MODE_NONE) in amd_iommu_iova_to_phys()
2635 offset_mask = pte_pgsize - 1; in amd_iommu_iova_to_phys()
2672 if (devid < entry->devid_start || devid > entry->devid_end) in amd_iommu_get_resv_regions()
2676 length = entry->address_end - entry->address_start; in amd_iommu_get_resv_regions()
2677 if (entry->prot & IOMMU_PROT_IR) in amd_iommu_get_resv_regions()
2679 if (entry->prot & IOMMU_PROT_IW) in amd_iommu_get_resv_regions()
2681 if (entry->prot & IOMMU_UNITY_MAP_FLAG_EXCL_RANGE) in amd_iommu_get_resv_regions()
2685 region = iommu_alloc_resv_region(entry->address_start, in amd_iommu_get_resv_regions()
2688 dev_err(dev, "Out of memory allocating dm-regions\n"); in amd_iommu_get_resv_regions()
2691 list_add_tail(&region->list, head); in amd_iommu_get_resv_regions()
2695 MSI_RANGE_END - MSI_RANGE_START + 1, in amd_iommu_get_resv_regions()
2699 list_add_tail(&region->list, head); in amd_iommu_get_resv_regions()
2702 HT_RANGE_END - HT_RANGE_START + 1, in amd_iommu_get_resv_regions()
2706 list_add_tail(&region->list, head); in amd_iommu_get_resv_regions()
2714 return dev_data->defer_attach; in amd_iommu_is_attach_deferred()
2723 spin_lock_irqsave(&dom->lock, flags); in amd_iommu_flush_iotlb_all()
2726 spin_unlock_irqrestore(&dom->lock, flags); in amd_iommu_flush_iotlb_all()
2746 * encryption bit in their DMA-mask and require remapping. in amd_iommu_def_domain_type()
2748 if (!mem_encrypt_active() && dev_data->iommu_v2) in amd_iommu_def_domain_type()
2780 * mode
2782 * In passthrough mode the IOMMU is initialized and enabled but not used for
2783 * DMA-API translation.
2806 spin_lock_irqsave(&domain->lock, flags); in amd_iommu_domain_direct_map()
2811 /* Remove page-table from domain */ in amd_iommu_domain_direct_map()
2817 /* Page-table is not visible to IOMMU anymore, so free it */ in amd_iommu_domain_direct_map()
2820 spin_unlock_irqrestore(&domain->lock, flags); in amd_iommu_domain_direct_map()
2831 return -EINVAL; in amd_iommu_domain_enable_v2()
2834 for (levels = 0; (pasids - 1) & ~0x1ff; pasids >>= 9) in amd_iommu_domain_enable_v2()
2838 return -EINVAL; in amd_iommu_domain_enable_v2()
2840 spin_lock_irqsave(&domain->lock, flags); in amd_iommu_domain_enable_v2()
2845 * devices attached when it is switched into IOMMUv2 mode. in amd_iommu_domain_enable_v2()
2847 ret = -EBUSY; in amd_iommu_domain_enable_v2()
2848 if (domain->dev_cnt > 0 || domain->flags & PD_IOMMUV2_MASK) in amd_iommu_domain_enable_v2()
2851 ret = -ENOMEM; in amd_iommu_domain_enable_v2()
2852 domain->gcr3_tbl = (void *)get_zeroed_page(GFP_ATOMIC); in amd_iommu_domain_enable_v2()
2853 if (domain->gcr3_tbl == NULL) in amd_iommu_domain_enable_v2()
2856 domain->glx = levels; in amd_iommu_domain_enable_v2()
2857 domain->flags |= PD_IOMMUV2_MASK; in amd_iommu_domain_enable_v2()
2864 spin_unlock_irqrestore(&domain->lock, flags); in amd_iommu_domain_enable_v2()
2877 if (!(domain->flags & PD_IOMMUV2_MASK)) in __flush_pasid()
2878 return -EINVAL; in __flush_pasid()
2880 build_inv_iommu_pasid(&cmd, domain->id, pasid, address, size); in __flush_pasid()
2887 if (domain->dev_iommu[i] == 0) in __flush_pasid()
2899 list_for_each_entry(dev_data, &domain->dev_list, list) { in __flush_pasid()
2904 There might be non-IOMMUv2 capable devices in an IOMMUv2 in __flush_pasid()
2907 if (!dev_data->ats.enabled) in __flush_pasid()
2910 qdep = dev_data->ats.qdep; in __flush_pasid()
2911 iommu = amd_iommu_rlookup_table[dev_data->devid]; in __flush_pasid()
2913 build_inv_iotlb_pasid(&cmd, dev_data->devid, pasid, in __flush_pasid()
2944 spin_lock_irqsave(&domain->lock, flags); in amd_iommu_flush_page()
2946 spin_unlock_irqrestore(&domain->lock, flags); in amd_iommu_flush_page()
2964 spin_lock_irqsave(&domain->lock, flags); in amd_iommu_flush_tlb()
2966 spin_unlock_irqrestore(&domain->lock, flags); in amd_iommu_flush_tlb()
2998 level -= 1; in __get_gcr3_pte()
3011 if (pgtable.mode != PAGE_MODE_NONE) in __set_gcr3()
3012 return -EINVAL; in __set_gcr3()
3014 pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, true); in __set_gcr3()
3016 return -ENOMEM; in __set_gcr3()
3029 if (pgtable.mode != PAGE_MODE_NONE) in __clear_gcr3()
3030 return -EINVAL; in __clear_gcr3()
3032 pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, false); in __clear_gcr3()
3048 spin_lock_irqsave(&domain->lock, flags); in amd_iommu_domain_set_gcr3()
3050 spin_unlock_irqrestore(&domain->lock, flags); in amd_iommu_domain_set_gcr3()
3062 spin_lock_irqsave(&domain->lock, flags); in amd_iommu_domain_clear_gcr3()
3064 spin_unlock_irqrestore(&domain->lock, flags); in amd_iommu_domain_clear_gcr3()
3077 dev_data = dev_iommu_priv_get(&pdev->dev); in amd_iommu_complete_ppr()
3078 iommu = amd_iommu_rlookup_table[dev_data->devid]; in amd_iommu_complete_ppr()
3080 build_complete_ppr(&cmd, dev_data->devid, pasid, status, in amd_iommu_complete_ppr()
3081 tag, dev_data->pri_tlp); in amd_iommu_complete_ppr()
3091 struct device *dev = &pdev->dev; in amd_iommu_get_v2_domain()
3097 dev_data = dev_iommu_priv_get(&pdev->dev); in amd_iommu_get_v2_domain()
3098 pdomain = dev_data->domain; in amd_iommu_get_v2_domain()
3101 if (pdomain == NULL && dev_data->defer_attach) { in amd_iommu_get_v2_domain()
3102 dev_data->defer_attach = false; in amd_iommu_get_v2_domain()
3110 if (io_domain->type != IOMMU_DOMAIN_DMA) in amd_iommu_get_v2_domain()
3114 if (!(pdomain->flags & PD_IOMMUV2_MASK)) in amd_iommu_get_v2_domain()
3117 return &pdomain->domain; in amd_iommu_get_v2_domain()
3128 dev_data = dev_iommu_priv_get(&pdev->dev); in amd_iommu_enable_device_erratum()
3129 dev_data->errata |= (1 << erratum); in amd_iommu_enable_device_erratum()
3140 return -EINVAL; in amd_iommu_device_info()
3143 return -EINVAL; in amd_iommu_device_info()
3148 info->flags |= AMD_IOMMU_DEVICE_FLAG_ATS_SUP; in amd_iommu_device_info()
3152 info->flags |= AMD_IOMMU_DEVICE_FLAG_PRI_SUP; in amd_iommu_device_info()
3161 info->flags |= AMD_IOMMU_DEVICE_FLAG_PASID_SUP; in amd_iommu_device_info()
3162 info->max_pasids = min(pci_max_pasids(pdev), max_pasids); in amd_iommu_device_info()
3166 info->flags |= AMD_IOMMU_DEVICE_FLAG_EXEC_SUP; in amd_iommu_device_info()
3168 info->flags |= AMD_IOMMU_DEVICE_FLAG_PRIV_SUP; in amd_iommu_device_info()
3188 u64 dte; in set_dte_irq_entry() local
3190 dte = amd_iommu_dev_table[devid].data[2]; in set_dte_irq_entry()
3191 dte &= ~DTE_IRQ_PHYS_ADDR_MASK; in set_dte_irq_entry()
3192 dte |= iommu_virt_to_phys(table->table); in set_dte_irq_entry()
3193 dte |= DTE_IRQ_REMAP_INTCTL; in set_dte_irq_entry()
3194 dte |= DTE_IRQ_TABLE_LEN; in set_dte_irq_entry()
3195 dte |= DTE_IRQ_REMAP_ENABLE; in set_dte_irq_entry()
3197 amd_iommu_dev_table[devid].data[2] = dte; in set_dte_irq_entry()
3223 table->table = kmem_cache_alloc(amd_iommu_irq_cache, GFP_KERNEL); in __alloc_irq_table()
3224 if (!table->table) { in __alloc_irq_table()
3228 raw_spin_lock_init(&table->lock); in __alloc_irq_table()
3231 memset(table->table, 0, in __alloc_irq_table()
3234 memset(table->table, 0, in __alloc_irq_table()
3322 kmem_cache_free(amd_iommu_irq_cache, new_table->table); in alloc_irq_table()
3337 return -ENODEV; in alloc_irq_index()
3341 return -ENODEV; in alloc_irq_index()
3346 raw_spin_lock_irqsave(&table->lock, flags); in alloc_irq_index()
3349 for (index = ALIGN(table->min_index, alignment), c = 0; in alloc_irq_index()
3351 if (!iommu->irte_ops->is_allocated(table, index)) { in alloc_irq_index()
3360 for (; c != 0; --c) in alloc_irq_index()
3361 iommu->irte_ops->set_allocated(table, index - c + 1); in alloc_irq_index()
3363 index -= count - 1; in alloc_irq_index()
3370 index = -ENOSPC; in alloc_irq_index()
3373 raw_spin_unlock_irqrestore(&table->lock, flags); in alloc_irq_index()
3389 return -EINVAL; in modify_irte_ga()
3393 return -ENOMEM; in modify_irte_ga()
3395 raw_spin_lock_irqsave(&table->lock, flags); in modify_irte_ga()
3397 entry = (struct irte_ga *)table->table; in modify_irte_ga()
3400 ret = cmpxchg_double(&entry->lo.val, &entry->hi.val, in modify_irte_ga()
3401 entry->lo.val, entry->hi.val, in modify_irte_ga()
3402 irte->lo.val, irte->hi.val); in modify_irte_ga()
3404 * We use cmpxchg16 to atomically update the 128-bit IRTE, in modify_irte_ga()
3412 data->ref = entry; in modify_irte_ga()
3414 raw_spin_unlock_irqrestore(&table->lock, flags); in modify_irte_ga()
3430 return -EINVAL; in modify_irte()
3434 return -ENOMEM; in modify_irte()
3436 raw_spin_lock_irqsave(&table->lock, flags); in modify_irte()
3437 table->table[index] = irte->val; in modify_irte()
3438 raw_spin_unlock_irqrestore(&table->lock, flags); in modify_irte()
3460 raw_spin_lock_irqsave(&table->lock, flags); in free_irte()
3461 iommu->irte_ops->clear_allocated(table, index); in free_irte()
3462 raw_spin_unlock_irqrestore(&table->lock, flags); in free_irte()
3474 irte->val = 0; in irte_prepare()
3475 irte->fields.vector = vector; in irte_prepare()
3476 irte->fields.int_type = delivery_mode; in irte_prepare()
3477 irte->fields.destination = dest_apicid; in irte_prepare()
3478 irte->fields.dm = dest_mode; in irte_prepare()
3479 irte->fields.valid = 1; in irte_prepare()
3488 irte->lo.val = 0; in irte_ga_prepare()
3489 irte->hi.val = 0; in irte_ga_prepare()
3490 irte->lo.fields_remap.int_type = delivery_mode; in irte_ga_prepare()
3491 irte->lo.fields_remap.dm = dest_mode; in irte_ga_prepare()
3492 irte->hi.fields.vector = vector; in irte_ga_prepare()
3493 irte->lo.fields_remap.destination = APICID_TO_IRTE_DEST_LO(dest_apicid); in irte_ga_prepare()
3494 irte->hi.fields.destination = APICID_TO_IRTE_DEST_HI(dest_apicid); in irte_ga_prepare()
3495 irte->lo.fields_remap.valid = 1; in irte_ga_prepare()
3502 irte->fields.valid = 1; in irte_activate()
3510 irte->lo.fields_remap.valid = 1; in irte_ga_activate()
3518 irte->fields.valid = 0; in irte_deactivate()
3526 irte->lo.fields_remap.valid = 0; in irte_ga_deactivate()
3535 irte->fields.vector = vector; in irte_set_affinity()
3536 irte->fields.destination = dest_apicid; in irte_set_affinity()
3545 if (!irte->lo.fields_remap.guest_mode) { in irte_ga_set_affinity()
3546 irte->hi.fields.vector = vector; in irte_ga_set_affinity()
3547 irte->lo.fields_remap.destination = in irte_ga_set_affinity()
3549 irte->hi.fields.destination = in irte_ga_set_affinity()
3558 table->table[index] = IRTE_ALLOCATED; in irte_set_allocated()
3563 struct irte_ga *ptr = (struct irte_ga *)table->table; in irte_ga_set_allocated()
3566 memset(&irte->lo.val, 0, sizeof(u64)); in irte_ga_set_allocated()
3567 memset(&irte->hi.val, 0, sizeof(u64)); in irte_ga_set_allocated()
3568 irte->hi.fields.vector = 0xff; in irte_ga_set_allocated()
3573 union irte *ptr = (union irte *)table->table; in irte_is_allocated()
3576 return irte->val != 0; in irte_is_allocated()
3581 struct irte_ga *ptr = (struct irte_ga *)table->table; in irte_ga_is_allocated()
3584 return irte->hi.fields.vector != 0; in irte_ga_is_allocated()
3589 table->table[index] = 0; in irte_clear_allocated()
3594 struct irte_ga *ptr = (struct irte_ga *)table->table; in irte_ga_clear_allocated()
3597 memset(&irte->lo.val, 0, sizeof(u64)); in irte_ga_clear_allocated()
3598 memset(&irte->hi.val, 0, sizeof(u64)); in irte_ga_clear_allocated()
3603 switch (info->type) { in get_devid()
3606 return get_ioapic_devid(info->devid); in get_devid()
3609 return get_hpet_devid(info->devid); in get_devid()
3612 return get_device_id(msi_desc_to_dev(info->desc)); in get_devid()
3615 return -1; in get_devid()
3627 switch (info->type) { in get_irq_domain_for_devid()
3630 return iommu->ir_domain; in get_irq_domain_for_devid()
3664 struct irq_2_irte *irte_info = &data->irq_2_irte; in irq_remapping_prepare_irte()
3665 struct msi_msg *msg = &data->msi_entry; in irq_remapping_prepare_irte()
3672 data->irq_2_irte.devid = devid; in irq_remapping_prepare_irte()
3673 data->irq_2_irte.index = index + sub_handle; in irq_remapping_prepare_irte()
3674 iommu->irte_ops->prepare(data->entry, apic->irq_delivery_mode, in irq_remapping_prepare_irte()
3675 apic->irq_dest_mode, irq_cfg->vector, in irq_remapping_prepare_irte()
3676 irq_cfg->dest_apicid, devid); in irq_remapping_prepare_irte()
3678 switch (info->type) { in irq_remapping_prepare_irte()
3681 entry = info->ioapic.entry; in irq_remapping_prepare_irte()
3682 info->ioapic.entry = NULL; in irq_remapping_prepare_irte()
3684 entry->vector = index; in irq_remapping_prepare_irte()
3685 entry->mask = 0; in irq_remapping_prepare_irte()
3686 entry->trigger = info->ioapic.trigger; in irq_remapping_prepare_irte()
3687 entry->polarity = info->ioapic.polarity; in irq_remapping_prepare_irte()
3689 if (info->ioapic.trigger) in irq_remapping_prepare_irte()
3690 entry->mask = 1; in irq_remapping_prepare_irte()
3696 msg->address_hi = MSI_ADDR_BASE_HI; in irq_remapping_prepare_irte()
3697 msg->address_lo = MSI_ADDR_BASE_LO; in irq_remapping_prepare_irte()
3698 msg->data = irte_info->index; in irq_remapping_prepare_irte()
3738 return -EINVAL; in irq_remapping_alloc()
3739 if (nr_irqs > 1 && info->type != X86_IRQ_ALLOC_TYPE_PCI_MSI && in irq_remapping_alloc()
3740 info->type != X86_IRQ_ALLOC_TYPE_PCI_MSIX) in irq_remapping_alloc()
3741 return -EINVAL; in irq_remapping_alloc()
3747 if (info->type == X86_IRQ_ALLOC_TYPE_PCI_MSI) in irq_remapping_alloc()
3748 info->flags &= ~X86_IRQ_ALLOC_CONTIGUOUS_VECTORS; in irq_remapping_alloc()
3752 return -EINVAL; in irq_remapping_alloc()
3758 if (info->type == X86_IRQ_ALLOC_TYPE_IOAPIC) { in irq_remapping_alloc()
3764 if (!table->min_index) { in irq_remapping_alloc()
3769 table->min_index = 32; in irq_remapping_alloc()
3772 iommu->irte_ops->set_allocated(table, i); in irq_remapping_alloc()
3774 WARN_ON(table->min_index != 32); in irq_remapping_alloc()
3775 index = info->ioapic.pin; in irq_remapping_alloc()
3777 index = -ENOMEM; in irq_remapping_alloc()
3779 } else if (info->type == X86_IRQ_ALLOC_TYPE_PCI_MSI || in irq_remapping_alloc()
3780 info->type == X86_IRQ_ALLOC_TYPE_PCI_MSIX) { in irq_remapping_alloc()
3781 bool align = (info->type == X86_IRQ_ALLOC_TYPE_PCI_MSI); in irq_remapping_alloc()
3784 msi_desc_to_pci_dev(info->desc)); in irq_remapping_alloc()
3799 ret = -EINVAL; in irq_remapping_alloc()
3803 ret = -ENOMEM; in irq_remapping_alloc()
3809 data->entry = kzalloc(sizeof(union irte), GFP_KERNEL); in irq_remapping_alloc()
3811 data->entry = kzalloc(sizeof(struct irte_ga), in irq_remapping_alloc()
3813 if (!data->entry) { in irq_remapping_alloc()
3818 irq_data->hwirq = (devid << 16) + i; in irq_remapping_alloc()
3819 irq_data->chip_data = data; in irq_remapping_alloc()
3820 irq_data->chip = &amd_ir_chip; in irq_remapping_alloc()
3828 for (i--; i >= 0; i--) { in irq_remapping_alloc()
3831 kfree(irq_data->chip_data); in irq_remapping_alloc()
3850 if (irq_data && irq_data->chip_data) { in irq_remapping_free()
3851 data = irq_data->chip_data; in irq_remapping_free()
3852 irte_info = &data->irq_2_irte; in irq_remapping_free()
3853 free_irte(irte_info->devid, irte_info->index); in irq_remapping_free()
3854 kfree(data->entry); in irq_remapping_free()
3869 struct amd_ir_data *data = irq_data->chip_data; in irq_remapping_activate()
3870 struct irq_2_irte *irte_info = &data->irq_2_irte; in irq_remapping_activate()
3871 struct amd_iommu *iommu = amd_iommu_rlookup_table[irte_info->devid]; in irq_remapping_activate()
3877 iommu->irte_ops->activate(data->entry, irte_info->devid, in irq_remapping_activate()
3878 irte_info->index); in irq_remapping_activate()
3886 struct amd_ir_data *data = irq_data->chip_data; in irq_remapping_deactivate()
3887 struct irq_2_irte *irte_info = &data->irq_2_irte; in irq_remapping_deactivate()
3888 struct amd_iommu *iommu = amd_iommu_rlookup_table[irte_info->devid]; in irq_remapping_deactivate()
3891 iommu->irte_ops->deactivate(data->entry, irte_info->devid, in irq_remapping_deactivate()
3892 irte_info->index); in irq_remapping_deactivate()
3905 struct irte_ga *entry = (struct irte_ga *) ir_data->entry; in amd_iommu_activate_guest_mode()
3909 !entry || entry->lo.fields_vapic.guest_mode) in amd_iommu_activate_guest_mode()
3912 valid = entry->lo.fields_vapic.valid; in amd_iommu_activate_guest_mode()
3914 entry->lo.val = 0; in amd_iommu_activate_guest_mode()
3915 entry->hi.val = 0; in amd_iommu_activate_guest_mode()
3917 entry->lo.fields_vapic.valid = valid; in amd_iommu_activate_guest_mode()
3918 entry->lo.fields_vapic.guest_mode = 1; in amd_iommu_activate_guest_mode()
3919 entry->lo.fields_vapic.ga_log_intr = 1; in amd_iommu_activate_guest_mode()
3920 entry->hi.fields.ga_root_ptr = ir_data->ga_root_ptr; in amd_iommu_activate_guest_mode()
3921 entry->hi.fields.vector = ir_data->ga_vector; in amd_iommu_activate_guest_mode()
3922 entry->lo.fields_vapic.ga_tag = ir_data->ga_tag; in amd_iommu_activate_guest_mode()
3924 return modify_irte_ga(ir_data->irq_2_irte.devid, in amd_iommu_activate_guest_mode()
3925 ir_data->irq_2_irte.index, entry, ir_data); in amd_iommu_activate_guest_mode()
3932 struct irte_ga *entry = (struct irte_ga *) ir_data->entry; in amd_iommu_deactivate_guest_mode()
3933 struct irq_cfg *cfg = ir_data->cfg; in amd_iommu_deactivate_guest_mode()
3937 !entry || !entry->lo.fields_vapic.guest_mode) in amd_iommu_deactivate_guest_mode()
3940 valid = entry->lo.fields_remap.valid; in amd_iommu_deactivate_guest_mode()
3942 entry->lo.val = 0; in amd_iommu_deactivate_guest_mode()
3943 entry->hi.val = 0; in amd_iommu_deactivate_guest_mode()
3945 entry->lo.fields_remap.valid = valid; in amd_iommu_deactivate_guest_mode()
3946 entry->lo.fields_remap.dm = apic->irq_dest_mode; in amd_iommu_deactivate_guest_mode()
3947 entry->lo.fields_remap.int_type = apic->irq_delivery_mode; in amd_iommu_deactivate_guest_mode()
3948 entry->hi.fields.vector = cfg->vector; in amd_iommu_deactivate_guest_mode()
3949 entry->lo.fields_remap.destination = in amd_iommu_deactivate_guest_mode()
3950 APICID_TO_IRTE_DEST_LO(cfg->dest_apicid); in amd_iommu_deactivate_guest_mode()
3951 entry->hi.fields.destination = in amd_iommu_deactivate_guest_mode()
3952 APICID_TO_IRTE_DEST_HI(cfg->dest_apicid); in amd_iommu_deactivate_guest_mode()
3954 return modify_irte_ga(ir_data->irq_2_irte.devid, in amd_iommu_deactivate_guest_mode()
3955 ir_data->irq_2_irte.index, entry, ir_data); in amd_iommu_deactivate_guest_mode()
3964 struct vcpu_data *vcpu_pi_info = pi_data->vcpu_data; in amd_ir_set_vcpu_affinity()
3965 struct amd_ir_data *ir_data = data->chip_data; in amd_ir_set_vcpu_affinity()
3966 struct irq_2_irte *irte_info = &ir_data->irq_2_irte; in amd_ir_set_vcpu_affinity()
3967 struct iommu_dev_data *dev_data = search_dev_data(irte_info->devid); in amd_ir_set_vcpu_affinity()
3970 * This device has never been set up for guest mode. in amd_ir_set_vcpu_affinity()
3973 if (!dev_data || !dev_data->use_vapic) in amd_ir_set_vcpu_affinity()
3976 ir_data->cfg = irqd_cfg(data); in amd_ir_set_vcpu_affinity()
3977 pi_data->ir_data = ir_data; in amd_ir_set_vcpu_affinity()
3980 * SVM tries to set up for VAPIC mode, but we are in in amd_ir_set_vcpu_affinity()
3981 * legacy mode. So, we force legacy mode instead. in amd_ir_set_vcpu_affinity()
3986 pi_data->is_guest_mode = false; in amd_ir_set_vcpu_affinity()
3989 iommu = amd_iommu_rlookup_table[irte_info->devid]; in amd_ir_set_vcpu_affinity()
3991 return -EINVAL; in amd_ir_set_vcpu_affinity()
3993 pi_data->prev_ga_tag = ir_data->cached_ga_tag; in amd_ir_set_vcpu_affinity()
3994 if (pi_data->is_guest_mode) { in amd_ir_set_vcpu_affinity()
3995 ir_data->ga_root_ptr = (pi_data->base >> 12); in amd_ir_set_vcpu_affinity()
3996 ir_data->ga_vector = vcpu_pi_info->vector; in amd_ir_set_vcpu_affinity()
3997 ir_data->ga_tag = pi_data->ga_tag; in amd_ir_set_vcpu_affinity()
4000 ir_data->cached_ga_tag = pi_data->ga_tag; in amd_ir_set_vcpu_affinity()
4009 ir_data->cached_ga_tag = 0; in amd_ir_set_vcpu_affinity()
4026 iommu->irte_ops->set_affinity(ir_data->entry, irte_info->devid, in amd_ir_update_irte()
4027 irte_info->index, cfg->vector, in amd_ir_update_irte()
4028 cfg->dest_apicid); in amd_ir_update_irte()
4034 struct amd_ir_data *ir_data = data->chip_data; in amd_ir_set_affinity()
4035 struct irq_2_irte *irte_info = &ir_data->irq_2_irte; in amd_ir_set_affinity()
4037 struct irq_data *parent = data->parent_data; in amd_ir_set_affinity()
4038 struct amd_iommu *iommu = amd_iommu_rlookup_table[irte_info->devid]; in amd_ir_set_affinity()
4042 return -ENODEV; in amd_ir_set_affinity()
4044 ret = parent->chip->irq_set_affinity(parent, mask, force); in amd_ir_set_affinity()
4061 struct amd_ir_data *ir_data = irq_data->chip_data; in ir_compose_msi_msg()
4063 *msg = ir_data->msi_entry; in ir_compose_msi_msg()
4067 .name = "AMD-IR",
4078 fn = irq_domain_alloc_named_id_fwnode("AMD-IR", iommu->index); in amd_iommu_create_irq_domain()
4080 return -ENOMEM; in amd_iommu_create_irq_domain()
4081 iommu->ir_domain = irq_domain_create_tree(fn, &amd_ir_domain_ops, iommu); in amd_iommu_create_irq_domain()
4082 if (!iommu->ir_domain) { in amd_iommu_create_irq_domain()
4084 return -ENOMEM; in amd_iommu_create_irq_domain()
4087 iommu->ir_domain->parent = arch_get_ir_parent_domain(); in amd_iommu_create_irq_domain()
4088 iommu->msi_domain = arch_create_remap_msi_irq_domain(iommu->ir_domain, in amd_iommu_create_irq_domain()
4089 "AMD-IR-MSI", in amd_iommu_create_irq_domain()
4090 iommu->index); in amd_iommu_create_irq_domain()
4100 int devid = ir_data->irq_2_irte.devid; in amd_iommu_update_ga()
4101 struct irte_ga *entry = (struct irte_ga *) ir_data->entry; in amd_iommu_update_ga()
4102 struct irte_ga *ref = (struct irte_ga *) ir_data->ref; in amd_iommu_update_ga()
4105 !ref || !entry || !entry->lo.fields_vapic.guest_mode) in amd_iommu_update_ga()
4110 return -ENODEV; in amd_iommu_update_ga()
4114 return -ENODEV; in amd_iommu_update_ga()
4116 raw_spin_lock_irqsave(&table->lock, flags); in amd_iommu_update_ga()
4118 if (ref->lo.fields_vapic.guest_mode) { in amd_iommu_update_ga()
4120 ref->lo.fields_vapic.destination = in amd_iommu_update_ga()
4122 ref->hi.fields.destination = in amd_iommu_update_ga()
4125 ref->lo.fields_vapic.is_run = is_run; in amd_iommu_update_ga()
4129 raw_spin_unlock_irqrestore(&table->lock, flags); in amd_iommu_update_ga()