Lines Matching +full:0 +full:xfee00000

41 #define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
43 #define IOAPIC_RANGE_START (0xfee00000)
44 #define IOAPIC_RANGE_END (0xfeefffff)
45 #define IOVA_START_ADDR (0x1000)
70 static int force_on = 0;
83 return 0; in root_entry_lctp()
95 return 0; in root_entry_uctp()
147 int intel_iommu_enabled = 0;
187 dmar_disabled = 0; in intel_iommu_setup()
194 dmar_map_gfx = 0; in intel_iommu_setup()
204 intel_iommu_superpage = 0; in intel_iommu_setup()
210 intel_iommu_sm = 0; in intel_iommu_setup()
232 page = alloc_pages_node(node, gfp | __GFP_ZERO, 0); in alloc_pgtable_page()
264 fl_sagaw = BIT(2) | (cap_fl5lp_support(iommu->cap) ? BIT(3) : 0); in __iommu_calculate_sagaw()
284 for (agaw = width_to_agaw(max_gaw); agaw >= 0; agaw--) { in __iommu_calculate_agaw()
351 int mask = 0x3; in domain_update_iommu_superpage()
354 return 0; in domain_update_iommu_superpage()
362 mask = 0x1; in domain_update_iommu_superpage()
402 unsigned long bitmap = 0; in domain_super_pgsize_bitmap()
461 if (devfn >= 0x80) { in iommu_context_addr()
462 devfn -= 0x80; in iommu_context_addr()
524 rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar); in quirk_ioat_snb_local_iommu()
530 vtbar &= 0xffff0000; in quirk_ioat_snb_local_iommu()
532 /* we know that the this iommu should be at offset 0xa000 from vtbar */ in quirk_ioat_snb_local_iommu()
534 if (!drhd || drhd->reg_base_addr - vtbar != 0xa000) { in quirk_ioat_snb_local_iommu()
566 u16 segment = 0; in device_lookup_iommu()
645 for (i = 0; i < ROOT_ENTRY_NR; i++) { in free_context_table()
646 context = iommu_context_addr(iommu, i, 0, 0); in free_context_table()
653 context = iommu_context_addr(iommu, i, 0x80, 0); in free_context_table()
677 pr_info("pte level: %d, pte value: 0x%016llx\n", level, pte->val); in pgtable_walk()
695 u8 devfn = source_id & 0xff; in dmar_fault_dump_ptes()
699 pr_info("Dump %s table entries for IOVA 0x%llx\n", iommu->name, addr); in dmar_fault_dump_ptes()
709 pr_info("scalable mode root entry: hi 0x%016llx, low 0x%016llx\n", in dmar_fault_dump_ptes()
712 pr_info("root entry: 0x%016llx", rt_entry->lo); in dmar_fault_dump_ptes()
715 ctx_entry = iommu_context_addr(iommu, bus, devfn, 0); in dmar_fault_dump_ptes()
721 pr_info("context entry: hi 0x%016llx, low 0x%016llx\n", in dmar_fault_dump_ptes()
743 pr_info("pasid dir entry: 0x%016llx\n", pde->val); in dmar_fault_dump_ptes()
753 for (i = 0; i < ARRAY_SIZE(pte->val); i++) in dmar_fault_dump_ptes()
754 pr_info("pasid table entry[%d]: 0x%016llx\n", i, pte->val[i]); in dmar_fault_dump_ptes()
760 level = agaw_to_level((pte->val[0] >> 2) & 0x7); in dmar_fault_dump_ptes()
761 pgtable = phys_to_virt(pte->val[0] & VTD_PAGE_MASK); in dmar_fault_dump_ptes()
806 if (cmpxchg64(&pte->val, 0ULL, pteval)) in pfn_to_dma_pte()
941 domain->pgd, 0, start_pfn, last_pfn); in dma_pte_free_pagetable()
944 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) { in dma_pte_free_pagetable()
1032 domain->pgd, 0, start_pfn, last_pfn, freelist); in domain_unmap()
1035 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) { in domain_unmap()
1057 return 0; in iommu_alloc_root_entry()
1088 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL); in iommu_set_root_entry()
1090 qi_flush_pasid_cache(iommu, 0, QI_PC_GLOBAL, 0); in iommu_set_root_entry()
1091 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH); in iommu_set_root_entry()
1117 u64 val = 0; in __iommu_flush_context()
1132 pr_warn("%s: Unexpected context-cache invalidation type 0x%llx\n", in __iommu_flush_context()
1153 u64 val = 0, val_iva = 0; in __iommu_flush_iotlb()
1170 pr_warn("%s: Unexpected iotlb invalidation type 0x%llx\n", in __iommu_flush_iotlb()
1191 if (DMA_TLB_IAIG(val) == 0) in __iommu_flush_iotlb()
1247 * IDs ranging from 0x4940 to 0x4943. It is exempted from risky_device()
1251 #define BUGGY_QAT_DEVID_MASK 0x4940
1257 if ((pdev->device & 0xfffc) != BUGGY_QAT_DEVID_MASK) in dev_needs_extra_dtlb_flush()
1298 info->ats_enabled = 0; in iommu_disable_pci_caps()
1304 info->pasid_enabled = 0; in iommu_disable_pci_caps()
1402 iommu->flush.flush_iotlb(iommu, did, 0, 0, in __iommu_flush_iotlb_psi()
1447 iommu_flush_iotlb_psi(iommu, domain, pfn, pages, 0, 1); in __mapping_notify_one()
1485 __iommu_flush_dev_iotlb(device_info, 0, in parent_domain_flush()
1503 domain_flush_pasid_iotlb(iommu, dmar_domain, 0, -1, 0); in intel_flush_iotlb_all()
1505 iommu->flush.flush_iotlb(iommu, did, 0, 0, in intel_flush_iotlb_all()
1509 iommu_flush_dev_iotlb(dmar_domain, 0, MAX_AGAW_PFN_WIDTH); in intel_flush_iotlb_all()
1513 parent_domain_flush(dmar_domain, 0, -1, 0); in intel_flush_iotlb_all()
1588 * with domain-id 0, hence we need to pre-allocate it. We also in iommu_init_domains()
1589 * use domain-id 0 as a marker for non-allocated domain-id, so in iommu_init_domains()
1592 set_bit(0, iommu->domain_ids); in iommu_init_domains()
1595 * Vt-d spec rev3.0 (section 6.2.3.1) requires that each pasid in iommu_init_domains()
1604 return 0; in iommu_init_domains()
1701 return 0; in domain_attach_iommu()
1724 return 0; in domain_attach_iommu()
1740 if (--info->refcnt == 0) { in domain_detach_iommu()
1755 if (r == 0) in guestwidth_to_adjustwidth()
1769 domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw), &freelist); in domain_exit()
1791 return 0; in context_get_sm_pds()
1820 ret = 0; in domain_context_mapping_one()
1841 iommu->flush.flush_iotlb(iommu, did_old, 0, 0, in domain_context_mapping_one()
1917 * domain #0, which we have to flush: in domain_context_mapping_one()
1920 iommu->flush.flush_context(iommu, 0, in domain_context_mapping_one()
1924 iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH); in domain_context_mapping_one()
1929 ret = 0; in domain_context_mapping_one()
1950 alias & 0xff); in domain_context_mapping_cb()
2036 0, 0); in switch_to_super_page()
2039 lvl_pages, 0); in switch_to_super_page()
2055 unsigned int largepage_lvl = 0; in __domain_mapping()
2056 unsigned long lvl_pages = 0; in __domain_mapping()
2063 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0) in __domain_mapping()
2083 while (nr_pages > 0) { in __domain_mapping()
2116 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval); in __domain_mapping()
2119 pr_crit("ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n", in __domain_mapping()
2154 return 0; in __domain_mapping()
2167 context = iommu_context_addr(iommu, bus, devfn, 0); in domain_context_clear_one()
2192 qi_flush_pasid_cache(iommu, did_old, QI_PC_ALL_PASIDS, 0); in domain_context_clear_one()
2196 0, in domain_context_clear_one()
2197 0, in domain_context_clear_one()
2200 __iommu_flush_dev_iotlb(info, 0, MAX_AGAW_PFN_WIDTH); in domain_context_clear_one()
2210 int flags = 0; in domain_setup_first_level()
2277 return 0; in si_domain_init()
2314 return 0; in si_domain_init()
2362 return 0; in dmar_domain_attach_device()
2403 * - 0: both identity and dynamic domains work for this device
2417 return 0; in device_def_domain_type()
2460 int tbl_idx, pos = 0, idx, devfn, ret = 0, did; in copy_context_table()
2469 for (devfn = 0; devfn < 256; devfn++) { in copy_context_table()
2473 if (idx == 0) { in copy_context_table()
2485 ret = 0; in copy_context_table()
2486 if (devfn < 0x80) in copy_context_table()
2492 if (ext && devfn == 0) { in copy_context_table()
2494 devfn = 0x7f; in copy_context_table()
2511 ret = 0; in copy_context_table()
2521 if (did >= 0 && did < cap_ndoms(iommu->cap)) in copy_context_table()
2581 for (bus = 0; bus < 256; bus++) { in copy_translation_tables()
2594 for (bus = 0; bus < 256; bus++) { in copy_translation_tables()
2616 ret = 0; in copy_translation_tables()
2701 hw_pass_through = 0; in init_dmars()
2716 dmar_map_gfx = 0; in init_dmars()
2766 return 0; in init_dmars()
2849 return 0; in init_iommu_hw()
2858 iommu->flush.flush_context(iommu, 0, 0, 0, in iommu_flush_all()
2860 iommu->flush.flush_iotlb(iommu, 0, 0, 0, in iommu_flush_all()
2889 return 0; in iommu_suspend()
2945 return 0; in rmrr_sanity_check()
2982 return 0; in dmar_parse_one_rmrr()
3001 if (memcmp(atsr, tmp, atsr->header.length) == 0) in dmar_find_atsr()
3014 return 0; in dmar_parse_one_atsr()
3019 return 0; in dmar_parse_one_atsr()
3032 atsru->include_all = atsr->flags & 0x1; in dmar_parse_one_atsr()
3045 return 0; in dmar_parse_one_atsr()
3067 return 0; in dmar_release_one_atsr()
3080 return 0; in dmar_check_one_atsr()
3088 return 0; in dmar_check_one_atsr()
3103 if (memcmp(satc, tmp, satc->header.length) == 0) in dmar_find_satc()
3116 return 0; in dmar_parse_one_satc()
3121 return 0; in dmar_parse_one_satc()
3129 satcu->atc_required = satc->flags & 0x1; in dmar_parse_one_satc()
3139 return 0; in dmar_parse_one_satc()
3158 if (sp >= 0 && !(cap_super_page_val(iommu->cap) & (1 << sp))) { in intel_iommu_add()
3171 if (ret == 0) in intel_iommu_add()
3184 return 0; in intel_iommu_add()
3205 return 0; in intel_iommu_add()
3216 int ret = 0; in dmar_iommu_hotplug()
3220 return 0; in dmar_iommu_hotplug()
3311 return 0; in dmar_ats_supported()
3330 ret = 0; in dmar_ats_supported()
3348 return 0; in dmar_iommu_notify_scope_dev()
3358 if (ret < 0) in dmar_iommu_notify_scope_dev()
3376 if (ret > 0) in dmar_iommu_notify_scope_dev()
3378 else if (ret < 0) in dmar_iommu_notify_scope_dev()
3393 if (ret > 0) in dmar_iommu_notify_scope_dev()
3395 else if (ret < 0) in dmar_iommu_notify_scope_dev()
3404 return 0; in dmar_iommu_notify_scope_dev()
3438 list_empty(&freelist), 0); in intel_iommu_memory_notifier()
3450 .priority = 0
3577 return 0; in platform_optin_force_iommu()
3589 dmar_disabled = 0; in platform_optin_force_iommu()
3590 no_iommu = 0; in platform_optin_force_iommu()
3601 int i, ret = 0; in probe_acpi_namespace_devices()
3627 return 0; in probe_acpi_namespace_devices()
3633 return 0; in tboot_force_iommu()
3638 dmar_disabled = 0; in tboot_force_iommu()
3639 no_iommu = 0; in tboot_force_iommu()
3664 if (dmar_dev_scope_init() < 0) { in intel_iommu_init()
3771 return 0; in intel_iommu_init()
3783 domain_context_clear_one(info, PCI_BUS_NUM(alias), alias & 0xff); in domain_context_clear_one_cb()
3784 return 0; in domain_context_clear_one_cb()
3867 domain->iommu_superpage = 0; in md_domain_init()
3868 domain->max_addr = 0; in md_domain_init()
3875 return 0; in md_domain_init()
3882 return 0; in blocking_domain_attach_dev()
3912 domain->geometry.aperture_start = 0; in intel_iommu_domain_alloc()
4030 return 0; in prepare_domain_attach_device()
4055 int prot = 0; in intel_iommu_map()
4113 int level = 0; in intel_iommu_unmap()
4119 return 0; in intel_iommu_unmap()
4170 list_empty(&gather->freelist), 0); in intel_iommu_tlb_sync()
4183 int level = 0; in intel_iommu_iova_to_phys()
4184 u64 phys = 0; in intel_iommu_iova_to_phys()
4312 * treated as reserved, which should be set to 0. in intel_iommu_probe_device()
4322 if (features >= 0) in intel_iommu_probe_device()
4362 iommu_setup_dma_ops(dev, 0, U64_MAX); in intel_iommu_probe_finalize()
4407 reg = iommu_alloc_resv_region(0, 1UL << 24, prot, in intel_iommu_get_resv_regions()
4418 0, IOMMU_RESV_MSI, GFP_KERNEL); in intel_iommu_get_resv_regions()
4457 return 0; in intel_iommu_enable_sva()
4463 return 0; in intel_iommu_enable_sva()
4504 return 0; in intel_iommu_enable_iopf()
4531 info->pri_enabled = 0; in intel_iommu_disable_iopf()
4541 return 0; in intel_iommu_disable_iopf()
4567 return 0; in intel_iommu_dev_disable_feat()
4609 return 0; in intel_iommu_iotlb_sync_map()
4621 domain = iommu_get_domain_for_dev_pasid(dev, pasid, 0); in intel_iommu_remove_dev_pasid()
4706 return 0; in intel_iommu_set_dev_pasid()
4739 int ret = 0; in device_set_dirty_tracking()
4767 return 0; in parent_domain_set_dirty_tracking()
4804 return 0; in intel_iommu_set_dirty_tracking()
4833 int lvl = 0; in intel_iommu_read_and_clear_dirty()
4848 return 0; in intel_iommu_read_and_clear_dirty()
4896 dmar_map_gfx = 0; in quirk_iommu_igfx()
4900 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_igfx);
4901 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_igfx);
4902 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_igfx);
4903 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_igfx);
4904 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_igfx);
4905 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_igfx);
4906 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_igfx);
4909 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1606, quirk_iommu_igfx);
4910 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x160B, quirk_iommu_igfx);
4911 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x160E, quirk_iommu_igfx);
4912 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1602, quirk_iommu_igfx);
4913 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x160A, quirk_iommu_igfx);
4914 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x160D, quirk_iommu_igfx);
4915 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1616, quirk_iommu_igfx);
4916 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x161B, quirk_iommu_igfx);
4917 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x161E, quirk_iommu_igfx);
4918 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1612, quirk_iommu_igfx);
4919 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x161A, quirk_iommu_igfx);
4920 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x161D, quirk_iommu_igfx);
4921 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1626, quirk_iommu_igfx);
4922 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x162B, quirk_iommu_igfx);
4923 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x162E, quirk_iommu_igfx);
4924 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1622, quirk_iommu_igfx);
4925 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x162A, quirk_iommu_igfx);
4926 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x162D, quirk_iommu_igfx);
4927 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1636, quirk_iommu_igfx);
4928 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x163B, quirk_iommu_igfx);
4929 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x163E, quirk_iommu_igfx);
4930 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1632, quirk_iommu_igfx);
4931 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x163A, quirk_iommu_igfx);
4932 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x163D, quirk_iommu_igfx);
4947 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
4948 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_rwbf);
4949 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_rwbf);
4950 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_rwbf);
4951 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_rwbf);
4952 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_rwbf);
4953 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_rwbf);
4955 #define GGC 0x52
4956 #define GGC_MEMORY_SIZE_MASK (0xf << 8)
4957 #define GGC_MEMORY_SIZE_NONE (0x0 << 8)
4958 #define GGC_MEMORY_SIZE_1M (0x1 << 8)
4959 #define GGC_MEMORY_SIZE_2M (0x3 << 8)
4960 #define GGC_MEMORY_VT_ENABLED (0x8 << 8)
4961 #define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
4962 #define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
4963 #define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
4977 dmar_map_gfx = 0; in quirk_calpella_no_shadow_gtt()
4984 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
4985 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
4986 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
4987 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
4996 ver = (dev->device >> 8) & 0xff; in quirk_igfx_skip_te_disable()
4997 if (ver != 0x45 && ver != 0x46 && ver != 0x4c && in quirk_igfx_skip_te_disable()
4998 ver != 0x4e && ver != 0x8a && ver != 0x98 && in quirk_igfx_skip_te_disable()
4999 ver != 0x9a && ver != 0xa7 && ver != 0x7d) in quirk_igfx_skip_te_disable()
5023 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL); in check_tylersburg_isoch()
5037 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL); in check_tylersburg_isoch()
5046 if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) { in check_tylersburg_isoch()
5058 vtisochctrl &= 0x1c; in check_tylersburg_isoch()
5061 if (vtisochctrl == 0x10) in check_tylersburg_isoch()
5124 #define ecmd_get_status_code(res) (((res) & 0xff) >> 1)
5134 * - 0: Command successful without any error;