Lines Matching +full:0 +full:xfee00000

40 #define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
42 #define IOAPIC_RANGE_START (0xfee00000)
43 #define IOAPIC_RANGE_END (0xfeefffff)
44 #define IOVA_START_ADDR (0x1000)
64 static int force_on = 0;
77 return 0; in root_entry_lctp()
89 return 0; in root_entry_uctp()
106 return 0; in device_rid_cmp_key()
156 return 0; in device_rbtree_insert()
207 int intel_iommu_enabled = 0;
246 dmar_disabled = 0; in intel_iommu_setup()
263 intel_iommu_superpage = 0; in intel_iommu_setup()
269 intel_iommu_sm = 0; in intel_iommu_setup()
302 fl_sagaw = BIT(2) | (cap_fl5lp_support(iommu->cap) ? BIT(3) : 0); in __iommu_calculate_sagaw()
322 for (agaw = width_to_agaw(max_gaw); agaw >= 0; agaw--) { in __iommu_calculate_agaw()
357 unsigned long bitmap = 0; in domain_super_pgsize_bitmap()
387 if (devfn >= 0x80) { in iommu_context_addr()
388 devfn -= 0x80; in iommu_context_addr()
450 rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar); in quirk_ioat_snb_local_iommu()
456 vtbar &= 0xffff0000; in quirk_ioat_snb_local_iommu()
458 /* we know that the this iommu should be at offset 0xa000 from vtbar */ in quirk_ioat_snb_local_iommu()
460 if (!drhd || drhd->reg_base_addr - vtbar != 0xa000) { in quirk_ioat_snb_local_iommu()
492 u16 segment = 0; in device_lookup_iommu()
571 for (i = 0; i < ROOT_ENTRY_NR; i++) { in free_context_table()
572 context = iommu_context_addr(iommu, i, 0, 0); in free_context_table()
579 context = iommu_context_addr(iommu, i, 0x80, 0); in free_context_table()
599 pr_info("pte level: %d, pte value: 0x%016llx\n", level, pte->val); in pgtable_walk()
622 u8 devfn = source_id & 0xff; in dmar_fault_dump_ptes()
626 pr_info("Dump %s table entries for IOVA 0x%llx\n", iommu->name, addr); in dmar_fault_dump_ptes()
636 pr_info("scalable mode root entry: hi 0x%016llx, low 0x%016llx\n", in dmar_fault_dump_ptes()
639 pr_info("root entry: 0x%016llx", rt_entry->lo); in dmar_fault_dump_ptes()
642 ctx_entry = iommu_context_addr(iommu, bus, devfn, 0); in dmar_fault_dump_ptes()
648 pr_info("context entry: hi 0x%016llx, low 0x%016llx\n", in dmar_fault_dump_ptes()
676 pr_info("pasid dir entry: 0x%016llx\n", pde->val); in dmar_fault_dump_ptes()
686 for (i = 0; i < ARRAY_SIZE(pte->val); i++) in dmar_fault_dump_ptes()
687 pr_info("pasid table entry[%d]: 0x%016llx\n", i, pte->val[i]); in dmar_fault_dump_ptes()
698 level = agaw_to_level((pte->val[0] >> 2) & 0x7); in dmar_fault_dump_ptes()
699 pgtable = phys_to_virt(pte->val[0] & VTD_PAGE_MASK); in dmar_fault_dump_ptes()
745 tmp = 0ULL; in pfn_to_dma_pte()
881 domain->pgd, 0, start_pfn, last_pfn); in dma_pte_free_pagetable()
884 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) { in dma_pte_free_pagetable()
972 domain->pgd, 0, start_pfn, last_pfn, freelist); in domain_unmap()
975 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) { in domain_unmap()
997 return 0; in iommu_alloc_root_entry()
1028 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL); in iommu_set_root_entry()
1030 qi_flush_pasid_cache(iommu, 0, QI_PC_GLOBAL, 0); in iommu_set_root_entry()
1031 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH); in iommu_set_root_entry()
1057 u64 val = 0; in __iommu_flush_context()
1072 pr_warn("%s: Unexpected context-cache invalidation type 0x%llx\n", in __iommu_flush_context()
1092 u64 val = 0, val_iva = 0; in __iommu_flush_iotlb()
1109 pr_warn("%s: Unexpected iotlb invalidation type 0x%llx\n", in __iommu_flush_iotlb()
1130 if (DMA_TLB_IAIG(val) == 0) in __iommu_flush_iotlb()
1160 * IDs ranging from 0x4940 to 0x4943. It is exempted from risky_device()
1164 #define BUGGY_QAT_DEVID_MASK 0x4940
1170 if ((pdev->device & 0xfffc) != BUGGY_QAT_DEVID_MASK) in dev_needs_extra_dtlb_flush()
1197 info->ats_enabled = 0; in iommu_disable_pci_ats()
1228 info->pri_enabled = 0; in iommu_disable_pci_pri()
1308 * with domain-id 0, hence we need to pre-allocate it. We also in iommu_init_domains()
1309 * use domain-id 0 as a marker for non-allocated domain-id, so in iommu_init_domains()
1312 set_bit(0, iommu->domain_ids); in iommu_init_domains()
1315 * Vt-d spec rev3.0 (section 6.2.3.1) requires that each pasid in iommu_init_domains()
1324 return 0; in iommu_init_domains()
1387 return 0; in domain_attach_iommu()
1399 return 0; in domain_attach_iommu()
1421 return 0; in domain_attach_iommu()
1440 if (--info->refcnt == 0) { in domain_detach_iommu()
1454 domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw), &freelist); in domain_exit()
1493 iommu->flush.flush_iotlb(iommu, did_old, 0, 0, in copied_context_tear_down()
1504 * domain #0, which we have to flush:
1510 iommu->flush.flush_context(iommu, 0, in context_present_cache_flush()
1514 iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH); in context_present_cache_flush()
1541 ret = 0; in domain_context_mapping_one()
1562 ret = 0; in domain_context_mapping_one()
1578 PCI_BUS_NUM(alias), alias & 0xff); in domain_context_mapping_cb()
1599 return 0; in domain_context_mapping()
1651 end_pfn << VTD_PAGE_SHIFT, 0); in switch_to_super_page()
1667 unsigned int largepage_lvl = 0; in __domain_mapping()
1668 unsigned long lvl_pages = 0; in __domain_mapping()
1675 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0) in __domain_mapping()
1695 while (nr_pages > 0) { in __domain_mapping()
1728 tmp = 0ULL; in __domain_mapping()
1731 pr_crit("ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n", in __domain_mapping()
1766 return 0; in __domain_mapping()
1776 context = iommu_context_addr(iommu, bus, devfn, 0); in domain_context_clear_one()
1832 int level, flags = 0; in domain_setup_first_level()
1867 return 0; in dmar_domain_attach_device()
1885 return 0; in dmar_domain_attach_device()
1940 return 0; in device_def_domain_type()
1983 int tbl_idx, pos = 0, idx, devfn, ret = 0, did; in copy_context_table()
1992 for (devfn = 0; devfn < 256; devfn++) { in copy_context_table()
1996 if (idx == 0) { in copy_context_table()
2008 ret = 0; in copy_context_table()
2009 if (devfn < 0x80) in copy_context_table()
2015 if (ext && devfn == 0) { in copy_context_table()
2017 devfn = 0x7f; in copy_context_table()
2034 ret = 0; in copy_context_table()
2044 if (did >= 0 && did < cap_ndoms(iommu->cap)) in copy_context_table()
2104 for (bus = 0; bus < 256; bus++) { in copy_translation_tables()
2117 for (bus = 0; bus < 256; bus++) { in copy_translation_tables()
2139 ret = 0; in copy_translation_tables()
2271 return 0; in init_dmars()
2350 return 0; in init_iommu_hw()
2359 iommu->flush.flush_context(iommu, 0, 0, 0, in iommu_flush_all()
2361 iommu->flush.flush_iotlb(iommu, 0, 0, 0, in iommu_flush_all()
2390 return 0; in iommu_suspend()
2446 return 0; in rmrr_sanity_check()
2483 return 0; in dmar_parse_one_rmrr()
2502 if (memcmp(atsr, tmp, atsr->header.length) == 0) in dmar_find_atsr()
2515 return 0; in dmar_parse_one_atsr()
2520 return 0; in dmar_parse_one_atsr()
2533 atsru->include_all = atsr->flags & 0x1; in dmar_parse_one_atsr()
2546 return 0; in dmar_parse_one_atsr()
2568 return 0; in dmar_release_one_atsr()
2581 return 0; in dmar_check_one_atsr()
2589 return 0; in dmar_check_one_atsr()
2604 if (memcmp(satc, tmp, satc->header.length) == 0) in dmar_find_satc()
2617 return 0; in dmar_parse_one_satc()
2622 return 0; in dmar_parse_one_satc()
2630 satcu->atc_required = satc->flags & 0x1; in dmar_parse_one_satc()
2640 return 0; in dmar_parse_one_satc()
2655 if (ret == 0) in intel_iommu_add()
2668 return 0; in intel_iommu_add()
2688 return 0; in intel_iommu_add()
2699 int ret = 0; in dmar_iommu_hotplug()
2703 return 0; in dmar_iommu_hotplug()
2794 return 0; in dmar_ats_supported()
2813 ret = 0; in dmar_ats_supported()
2831 return 0; in dmar_iommu_notify_scope_dev()
2841 if (ret < 0) in dmar_iommu_notify_scope_dev()
2859 if (ret > 0) in dmar_iommu_notify_scope_dev()
2861 else if (ret < 0) in dmar_iommu_notify_scope_dev()
2876 if (ret > 0) in dmar_iommu_notify_scope_dev()
2878 else if (ret < 0) in dmar_iommu_notify_scope_dev()
2887 return 0; in dmar_iommu_notify_scope_dev()
3017 return 0; in platform_optin_force_iommu()
3029 dmar_disabled = 0; in platform_optin_force_iommu()
3030 no_iommu = 0; in platform_optin_force_iommu()
3041 int i, ret = 0; in probe_acpi_namespace_devices()
3069 return 0; in probe_acpi_namespace_devices()
3075 return 0; in tboot_force_iommu()
3080 dmar_disabled = 0; in tboot_force_iommu()
3081 no_iommu = 0; in tboot_force_iommu()
3106 if (dmar_dev_scope_init() < 0) { in intel_iommu_init()
3215 return 0; in intel_iommu_init()
3227 domain_context_clear_one(info, PCI_BUS_NUM(alias), alias & 0xff); in domain_context_clear_one_cb()
3228 return 0; in domain_context_clear_one_cb()
3286 return 0; in blocking_domain_attach_dev()
3304 return 0; in iommu_superpage_capability()
3356 domain->domain.geometry.aperture_start = 0; in paging_domain_alloc()
3481 return 0; in paging_domain_compatible()
3504 int prot = 0; in intel_iommu_map()
3562 int level = 0; in intel_iommu_unmap()
3568 return 0; in intel_iommu_unmap()
3615 int level = 0; in intel_iommu_iova_to_phys()
3616 u64 phys = 0; in intel_iommu_iova_to_phys()
3744 * treated as reserved, which should be set to 0. in intel_iommu_probe_device()
3754 if (features >= 0) in intel_iommu_probe_device()
3829 info->pasid_enabled = 0; in intel_iommu_release_device()
3888 reg = iommu_alloc_resv_region(0, 1UL << 24, prot, in intel_iommu_get_resv_regions()
3899 0, IOMMU_RESV_MSI, GFP_KERNEL); in intel_iommu_get_resv_regions()
3923 return 0; in intel_iommu_enable_iopf()
3932 return 0; in intel_iommu_enable_iopf()
3957 return 0; in intel_iommu_dev_enable_feat()
3970 return 0; in intel_iommu_dev_disable_feat()
3973 return 0; in intel_iommu_dev_disable_feat()
4009 return 0; in intel_iommu_iotlb_sync_map()
4056 return 0; in blocking_domain_set_dev_pasid()
4139 return 0; in intel_iommu_set_dev_pasid()
4171 int ret = 0; in device_set_dirty_tracking()
4199 return 0; in parent_domain_set_dirty_tracking()
4236 return 0; in intel_iommu_set_dirty_tracking()
4265 int lvl = 0; in intel_iommu_read_and_clear_dirty()
4280 return 0; in intel_iommu_read_and_clear_dirty()
4303 return 0; in context_setup_pass_through()
4323 return 0; in context_setup_pass_through()
4330 return context_setup_pass_through(dev, PCI_BUS_NUM(alias), alias & 0xff); in context_setup_pass_through_cb()
4353 return 0; in identity_domain_attach_dev()
4379 return 0; in identity_domain_set_dev_pasid()
4434 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_igfx);
4435 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_igfx);
4436 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_igfx);
4437 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_igfx);
4438 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_igfx);
4439 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_igfx);
4440 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_igfx);
4443 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_iommu_igfx);
4446 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1606, quirk_iommu_igfx);
4447 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x160B, quirk_iommu_igfx);
4448 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x160E, quirk_iommu_igfx);
4449 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1602, quirk_iommu_igfx);
4450 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x160A, quirk_iommu_igfx);
4451 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x160D, quirk_iommu_igfx);
4452 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1616, quirk_iommu_igfx);
4453 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x161B, quirk_iommu_igfx);
4454 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x161E, quirk_iommu_igfx);
4455 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1612, quirk_iommu_igfx);
4456 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x161A, quirk_iommu_igfx);
4457 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x161D, quirk_iommu_igfx);
4458 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1626, quirk_iommu_igfx);
4459 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x162B, quirk_iommu_igfx);
4460 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x162E, quirk_iommu_igfx);
4461 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1622, quirk_iommu_igfx);
4462 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x162A, quirk_iommu_igfx);
4463 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x162D, quirk_iommu_igfx);
4464 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1636, quirk_iommu_igfx);
4465 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x163B, quirk_iommu_igfx);
4466 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x163E, quirk_iommu_igfx);
4467 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1632, quirk_iommu_igfx);
4468 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x163A, quirk_iommu_igfx);
4469 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x163D, quirk_iommu_igfx);
4484 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
4485 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_rwbf);
4486 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_rwbf);
4487 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_rwbf);
4488 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_rwbf);
4489 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_rwbf);
4490 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_rwbf);
4492 #define GGC 0x52
4493 #define GGC_MEMORY_SIZE_MASK (0xf << 8)
4494 #define GGC_MEMORY_SIZE_NONE (0x0 << 8)
4495 #define GGC_MEMORY_SIZE_1M (0x1 << 8)
4496 #define GGC_MEMORY_SIZE_2M (0x3 << 8)
4497 #define GGC_MEMORY_VT_ENABLED (0x8 << 8)
4498 #define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
4499 #define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
4500 #define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
4521 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
4522 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
4523 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
4532 ver = (dev->device >> 8) & 0xff; in quirk_igfx_skip_te_disable()
4533 if (ver != 0x45 && ver != 0x46 && ver != 0x4c && in quirk_igfx_skip_te_disable()
4534 ver != 0x4e && ver != 0x8a && ver != 0x98 && in quirk_igfx_skip_te_disable()
4535 ver != 0x9a && ver != 0xa7 && ver != 0x7d) in quirk_igfx_skip_te_disable()
4559 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL); in check_tylersburg_isoch()
4573 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL); in check_tylersburg_isoch()
4582 if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) { in check_tylersburg_isoch()
4594 vtisochctrl &= 0x1c; in check_tylersburg_isoch()
4597 if (vtisochctrl == 0x10) in check_tylersburg_isoch()
4660 #define ecmd_get_status_code(res) (((res) & 0xff) >> 1)
4670 * - 0: Command successful without any error;