Lines Matching +full:protection +full:- +full:domain
1 // SPDX-License-Identifier: GPL-2.0-only
12 #include <linux/dma-mapping.h>
38 #define SECT_MASK (~(SECT_SIZE - 1))
39 #define LPAGE_MASK (~(LPAGE_SIZE - 1))
40 #define SPAGE_MASK (~(SPAGE_SIZE - 1))
55 * v1.x - v3.x SYSMMU supports 32bit physical and 32bit virtual address spaces
62 static short PG_ENT_SHIFT = -1;
98 #define section_offs(iova) (iova & (SECT_SIZE - 1))
100 #define lpage_offs(iova) (iova & (LPAGE_SIZE - 1))
102 #define spage_offs(iova) (iova & (SPAGE_SIZE - 1))
114 return (iova >> SPAGE_ORDER) & (NUM_LV2ENTRIES - 1); in lv2ent_offset()
152 #define MMU_RAW_VER(reg) (((reg) >> 21) & ((1 << 11) - 1)) /* 11 bits */
156 /* v1.x - v3.x registers */
203 { REG_AR_FAULT_ADDR, "MULTI-HIT", IOMMU_FAULT_READ },
204 { REG_AW_FAULT_ADDR, "MULTI-HIT", IOMMU_FAULT_WRITE },
206 { REG_AR_FAULT_ADDR, "SECURITY PROTECTION", IOMMU_FAULT_READ },
207 { REG_AR_FAULT_ADDR, "ACCESS PROTECTION", IOMMU_FAULT_READ },
208 { REG_AW_FAULT_ADDR, "SECURITY PROTECTION", IOMMU_FAULT_WRITE },
209 { REG_AW_FAULT_ADDR, "ACCESS PROTECTION", IOMMU_FAULT_WRITE },
216 "MULTI-HIT",
217 "ACCESS PROTECTION",
218 "SECURITY PROTECTION"
224 "ACCESS PROTECTION",
229 * This structure is attached to dev->iommu->priv of the master device
236 struct iommu_domain *domain; /* domain this device is attached */ member
243 * been attached to this domain and page tables of IO address space defined by
244 * it. It is usually referenced by 'domain' pointer.
252 struct iommu_domain domain; /* generic domain data structure */ member
295 struct exynos_iommu_domain *domain; /* domain we belong to */ member
296 struct list_head domain_node; /* node for domain clients list */
308 #define SYSMMU_REG(data, reg) ((data)->sfrbase + (data)->variant->reg)
317 return -ENXIO; in exynos_sysmmu_v1_get_fault_info()
320 fault->addr = readl(data->sfrbase + finfo->addr_reg); in exynos_sysmmu_v1_get_fault_info()
321 fault->name = finfo->name; in exynos_sysmmu_v1_get_fault_info()
322 fault->type = finfo->type; in exynos_sysmmu_v1_get_fault_info()
334 fault->type = IOMMU_FAULT_READ; in exynos_sysmmu_v5_get_fault_info()
337 fault->type = IOMMU_FAULT_WRITE; in exynos_sysmmu_v5_get_fault_info()
339 itype -= 16; in exynos_sysmmu_v5_get_fault_info()
341 return -ENXIO; in exynos_sysmmu_v5_get_fault_info()
344 fault->name = sysmmu_v5_fault_names[itype]; in exynos_sysmmu_v5_get_fault_info()
345 fault->addr = readl(data->sfrbase + addr_reg); in exynos_sysmmu_v5_get_fault_info()
356 fault->addr = readl(SYSMMU_REG(data, fault_va)); in exynos_sysmmu_v7_get_fault_info()
357 fault->name = sysmmu_v7_fault_names[itype % 4]; in exynos_sysmmu_v7_get_fault_info()
358 fault->type = (info & BIT(20)) ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ; in exynos_sysmmu_v7_get_fault_info()
388 /* SysMMU v7: non-VM capable register layout */
422 return container_of(dom, struct exynos_iommu_domain, domain); in to_exynos_domain()
427 writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL); in sysmmu_unblock()
434 writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL); in sysmmu_block()
435 while ((i > 0) && !(readl(data->sfrbase + REG_MMU_STATUS) & 1)) in sysmmu_block()
436 --i; in sysmmu_block()
438 if (!(readl(data->sfrbase + REG_MMU_STATUS) & 1)) { in sysmmu_block()
456 if (MMU_MAJ_VER(data->version) < 5 || num_inv == 1) { in __sysmmu_tlb_invalidate_entry()
464 writel((iova & SPAGE_MASK) + (num_inv - 1) * SPAGE_SIZE, in __sysmmu_tlb_invalidate_entry()
474 if (MMU_MAJ_VER(data->version) < 5) in __sysmmu_set_ptbase()
485 BUG_ON(clk_prepare_enable(data->clk_master)); in __sysmmu_enable_clocks()
486 BUG_ON(clk_prepare_enable(data->clk)); in __sysmmu_enable_clocks()
487 BUG_ON(clk_prepare_enable(data->pclk)); in __sysmmu_enable_clocks()
488 BUG_ON(clk_prepare_enable(data->aclk)); in __sysmmu_enable_clocks()
493 clk_disable_unprepare(data->aclk); in __sysmmu_disable_clocks()
494 clk_disable_unprepare(data->pclk); in __sysmmu_disable_clocks()
495 clk_disable_unprepare(data->clk); in __sysmmu_disable_clocks()
496 clk_disable_unprepare(data->clk_master); in __sysmmu_disable_clocks()
501 u32 capa0 = readl(data->sfrbase + REG_V7_CAPA0); in __sysmmu_has_capa1()
508 u32 capa1 = readl(data->sfrbase + REG_V7_CAPA1); in __sysmmu_get_vcr()
510 data->has_vcr = capa1 & CAPA1_VCR_ENABLED; in __sysmmu_get_vcr()
519 ver = readl(data->sfrbase + REG_MMU_VERSION); in __sysmmu_get_version()
523 data->version = MAKE_MMU_VER(1, 0); in __sysmmu_get_version()
525 data->version = MMU_RAW_VER(ver); in __sysmmu_get_version()
527 dev_dbg(data->sysmmu, "hardware version: %d.%d\n", in __sysmmu_get_version()
528 MMU_MAJ_VER(data->version), MMU_MIN_VER(data->version)); in __sysmmu_get_version()
530 if (MMU_MAJ_VER(data->version) < 5) { in __sysmmu_get_version()
531 data->variant = &sysmmu_v1_variant; in __sysmmu_get_version()
532 } else if (MMU_MAJ_VER(data->version) < 7) { in __sysmmu_get_version()
533 data->variant = &sysmmu_v5_variant; in __sysmmu_get_version()
537 if (data->has_vcr) in __sysmmu_get_version()
538 data->variant = &sysmmu_v7_vm_variant; in __sysmmu_get_version()
540 data->variant = &sysmmu_v7_variant; in __sysmmu_get_version()
551 dev_err(data->sysmmu, "%s: [%s] %s FAULT occurred at %#x\n", in show_fault_information()
552 dev_name(data->master), in show_fault_information()
553 fault->type == IOMMU_FAULT_READ ? "READ" : "WRITE", in show_fault_information()
554 fault->name, fault->addr); in show_fault_information()
555 dev_dbg(data->sysmmu, "Page table base: %pa\n", &data->pgtable); in show_fault_information()
556 ent = section_entry(phys_to_virt(data->pgtable), fault->addr); in show_fault_information()
557 dev_dbg(data->sysmmu, "\tLv1 entry: %#x\n", *ent); in show_fault_information()
559 ent = page_entry(ent, fault->addr); in show_fault_information()
560 dev_dbg(data->sysmmu, "\t Lv2 entry: %#x\n", *ent); in show_fault_information()
569 int ret = -ENOSYS; in exynos_sysmmu_irq()
571 WARN_ON(!data->active); in exynos_sysmmu_irq()
573 spin_lock(&data->lock); in exynos_sysmmu_irq()
574 clk_enable(data->clk_master); in exynos_sysmmu_irq()
577 ret = data->variant->get_fault_info(data, itype, &fault); in exynos_sysmmu_irq()
579 dev_err(data->sysmmu, "Unhandled interrupt bit %u\n", itype); in exynos_sysmmu_irq()
584 if (data->domain) { in exynos_sysmmu_irq()
585 ret = report_iommu_fault(&data->domain->domain, data->master, in exynos_sysmmu_irq()
596 clk_disable(data->clk_master); in exynos_sysmmu_irq()
597 spin_unlock(&data->lock); in exynos_sysmmu_irq()
606 clk_enable(data->clk_master); in __sysmmu_disable()
608 spin_lock_irqsave(&data->lock, flags); in __sysmmu_disable()
609 writel(CTRL_DISABLE, data->sfrbase + REG_MMU_CTRL); in __sysmmu_disable()
610 writel(0, data->sfrbase + REG_MMU_CFG); in __sysmmu_disable()
611 data->active = false; in __sysmmu_disable()
612 spin_unlock_irqrestore(&data->lock, flags); in __sysmmu_disable()
621 if (data->version <= MAKE_MMU_VER(3, 1)) in __sysmmu_init_config()
623 else if (data->version <= MAKE_MMU_VER(3, 2)) in __sysmmu_init_config()
628 cfg |= CFG_EAP; /* enable access protection bits check */ in __sysmmu_init_config()
630 writel(cfg, data->sfrbase + REG_MMU_CFG); in __sysmmu_init_config()
637 if (MMU_MAJ_VER(data->version) < 7 || !data->has_vcr) in __sysmmu_enable_vid()
640 ctrl = readl(data->sfrbase + REG_V7_CTRL_VM); in __sysmmu_enable_vid()
642 writel(ctrl, data->sfrbase + REG_V7_CTRL_VM); in __sysmmu_enable_vid()
651 spin_lock_irqsave(&data->lock, flags); in __sysmmu_enable()
652 writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL); in __sysmmu_enable()
654 __sysmmu_set_ptbase(data, data->pgtable); in __sysmmu_enable()
656 writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL); in __sysmmu_enable()
657 data->active = true; in __sysmmu_enable()
658 spin_unlock_irqrestore(&data->lock, flags); in __sysmmu_enable()
666 clk_disable(data->clk_master); in __sysmmu_enable()
674 spin_lock_irqsave(&data->lock, flags); in sysmmu_tlb_invalidate_flpdcache()
675 if (data->active && data->version >= MAKE_MMU_VER(3, 3)) { in sysmmu_tlb_invalidate_flpdcache()
676 clk_enable(data->clk_master); in sysmmu_tlb_invalidate_flpdcache()
678 if (data->version >= MAKE_MMU_VER(5, 0)) in sysmmu_tlb_invalidate_flpdcache()
684 clk_disable(data->clk_master); in sysmmu_tlb_invalidate_flpdcache()
686 spin_unlock_irqrestore(&data->lock, flags); in sysmmu_tlb_invalidate_flpdcache()
694 spin_lock_irqsave(&data->lock, flags); in sysmmu_tlb_invalidate_entry()
695 if (data->active) { in sysmmu_tlb_invalidate_entry()
698 clk_enable(data->clk_master); in sysmmu_tlb_invalidate_entry()
705 * because it is set-associative TLB in sysmmu_tlb_invalidate_entry()
706 * with 8-way and 64 sets. in sysmmu_tlb_invalidate_entry()
710 if (MMU_MAJ_VER(data->version) == 2) in sysmmu_tlb_invalidate_entry()
717 clk_disable(data->clk_master); in sysmmu_tlb_invalidate_entry()
719 spin_unlock_irqrestore(&data->lock, flags); in sysmmu_tlb_invalidate_entry()
727 struct device *dev = &pdev->dev; in exynos_sysmmu_probe()
733 return -ENOMEM; in exynos_sysmmu_probe()
736 data->sfrbase = devm_ioremap_resource(dev, res); in exynos_sysmmu_probe()
737 if (IS_ERR(data->sfrbase)) in exynos_sysmmu_probe()
738 return PTR_ERR(data->sfrbase); in exynos_sysmmu_probe()
751 data->clk = devm_clk_get_optional(dev, "sysmmu"); in exynos_sysmmu_probe()
752 if (IS_ERR(data->clk)) in exynos_sysmmu_probe()
753 return PTR_ERR(data->clk); in exynos_sysmmu_probe()
755 data->aclk = devm_clk_get_optional(dev, "aclk"); in exynos_sysmmu_probe()
756 if (IS_ERR(data->aclk)) in exynos_sysmmu_probe()
757 return PTR_ERR(data->aclk); in exynos_sysmmu_probe()
759 data->pclk = devm_clk_get_optional(dev, "pclk"); in exynos_sysmmu_probe()
760 if (IS_ERR(data->pclk)) in exynos_sysmmu_probe()
761 return PTR_ERR(data->pclk); in exynos_sysmmu_probe()
763 if (!data->clk && (!data->aclk || !data->pclk)) { in exynos_sysmmu_probe()
765 return -ENOSYS; in exynos_sysmmu_probe()
768 data->clk_master = devm_clk_get_optional(dev, "master"); in exynos_sysmmu_probe()
769 if (IS_ERR(data->clk_master)) in exynos_sysmmu_probe()
770 return PTR_ERR(data->clk_master); in exynos_sysmmu_probe()
772 data->sysmmu = dev; in exynos_sysmmu_probe()
773 spin_lock_init(&data->lock); in exynos_sysmmu_probe()
777 ret = iommu_device_sysfs_add(&data->iommu, &pdev->dev, NULL, in exynos_sysmmu_probe()
778 dev_name(data->sysmmu)); in exynos_sysmmu_probe()
785 if (MMU_MAJ_VER(data->version) < 5) { in exynos_sysmmu_probe()
796 if (MMU_MAJ_VER(data->version) >= 5) { in exynos_sysmmu_probe()
809 dma_dev = &pdev->dev; in exynos_sysmmu_probe()
813 ret = iommu_device_register(&data->iommu, &exynos_iommu_ops, dev); in exynos_sysmmu_probe()
820 iommu_device_sysfs_remove(&data->iommu); in exynos_sysmmu_probe()
827 struct device *master = data->master; in exynos_sysmmu_suspend()
832 mutex_lock(&owner->rpm_lock); in exynos_sysmmu_suspend()
833 if (&data->domain->domain != &exynos_identity_domain) { in exynos_sysmmu_suspend()
834 dev_dbg(data->sysmmu, "saving state\n"); in exynos_sysmmu_suspend()
837 mutex_unlock(&owner->rpm_lock); in exynos_sysmmu_suspend()
845 struct device *master = data->master; in exynos_sysmmu_resume()
850 mutex_lock(&owner->rpm_lock); in exynos_sysmmu_resume()
851 if (&data->domain->domain != &exynos_identity_domain) { in exynos_sysmmu_resume()
852 dev_dbg(data->sysmmu, "restoring state\n"); in exynos_sysmmu_resume()
855 mutex_unlock(&owner->rpm_lock); in exynos_sysmmu_resume()
867 { .compatible = "samsung,exynos-sysmmu", },
874 .name = "exynos-sysmmu",
892 struct exynos_iommu_domain *domain; in exynos_iommu_domain_alloc_paging() local
899 domain = kzalloc(sizeof(*domain), GFP_KERNEL); in exynos_iommu_domain_alloc_paging()
900 if (!domain) in exynos_iommu_domain_alloc_paging()
903 domain->pgtable = (sysmmu_pte_t *)__get_free_pages(GFP_KERNEL, 2); in exynos_iommu_domain_alloc_paging()
904 if (!domain->pgtable) in exynos_iommu_domain_alloc_paging()
907 domain->lv2entcnt = (short *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1); in exynos_iommu_domain_alloc_paging()
908 if (!domain->lv2entcnt) in exynos_iommu_domain_alloc_paging()
913 domain->pgtable[i] = ZERO_LV2LINK; in exynos_iommu_domain_alloc_paging()
915 handle = dma_map_single(dma_dev, domain->pgtable, LV1TABLE_SIZE, in exynos_iommu_domain_alloc_paging()
918 BUG_ON(handle != virt_to_phys(domain->pgtable)); in exynos_iommu_domain_alloc_paging()
922 spin_lock_init(&domain->lock); in exynos_iommu_domain_alloc_paging()
923 spin_lock_init(&domain->pgtablelock); in exynos_iommu_domain_alloc_paging()
924 INIT_LIST_HEAD(&domain->clients); in exynos_iommu_domain_alloc_paging()
926 domain->domain.geometry.aperture_start = 0; in exynos_iommu_domain_alloc_paging()
927 domain->domain.geometry.aperture_end = ~0UL; in exynos_iommu_domain_alloc_paging()
928 domain->domain.geometry.force_aperture = true; in exynos_iommu_domain_alloc_paging()
930 return &domain->domain; in exynos_iommu_domain_alloc_paging()
933 free_pages((unsigned long)domain->lv2entcnt, 1); in exynos_iommu_domain_alloc_paging()
935 free_pages((unsigned long)domain->pgtable, 2); in exynos_iommu_domain_alloc_paging()
937 kfree(domain); in exynos_iommu_domain_alloc_paging()
943 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain); in exynos_iommu_domain_free() local
948 WARN_ON(!list_empty(&domain->clients)); in exynos_iommu_domain_free()
950 spin_lock_irqsave(&domain->lock, flags); in exynos_iommu_domain_free()
952 list_for_each_entry_safe(data, next, &domain->clients, domain_node) { in exynos_iommu_domain_free()
953 spin_lock(&data->lock); in exynos_iommu_domain_free()
955 data->pgtable = 0; in exynos_iommu_domain_free()
956 data->domain = NULL; in exynos_iommu_domain_free()
957 list_del_init(&data->domain_node); in exynos_iommu_domain_free()
958 spin_unlock(&data->lock); in exynos_iommu_domain_free()
961 spin_unlock_irqrestore(&domain->lock, flags); in exynos_iommu_domain_free()
963 dma_unmap_single(dma_dev, virt_to_phys(domain->pgtable), LV1TABLE_SIZE, in exynos_iommu_domain_free()
967 if (lv1ent_page(domain->pgtable + i)) { in exynos_iommu_domain_free()
968 phys_addr_t base = lv2table_base(domain->pgtable + i); in exynos_iommu_domain_free()
976 free_pages((unsigned long)domain->pgtable, 2); in exynos_iommu_domain_free()
977 free_pages((unsigned long)domain->lv2entcnt, 1); in exynos_iommu_domain_free()
978 kfree(domain); in exynos_iommu_domain_free()
985 struct exynos_iommu_domain *domain; in exynos_iommu_identity_attach() local
990 if (owner->domain == identity_domain) in exynos_iommu_identity_attach()
993 domain = to_exynos_domain(owner->domain); in exynos_iommu_identity_attach()
994 pagetable = virt_to_phys(domain->pgtable); in exynos_iommu_identity_attach()
996 mutex_lock(&owner->rpm_lock); in exynos_iommu_identity_attach()
998 list_for_each_entry(data, &owner->controllers, owner_node) { in exynos_iommu_identity_attach()
999 pm_runtime_get_noresume(data->sysmmu); in exynos_iommu_identity_attach()
1000 if (pm_runtime_active(data->sysmmu)) in exynos_iommu_identity_attach()
1002 pm_runtime_put(data->sysmmu); in exynos_iommu_identity_attach()
1005 spin_lock_irqsave(&domain->lock, flags); in exynos_iommu_identity_attach()
1006 list_for_each_entry_safe(data, next, &domain->clients, domain_node) { in exynos_iommu_identity_attach()
1007 spin_lock(&data->lock); in exynos_iommu_identity_attach()
1008 data->pgtable = 0; in exynos_iommu_identity_attach()
1009 data->domain = NULL; in exynos_iommu_identity_attach()
1010 list_del_init(&data->domain_node); in exynos_iommu_identity_attach()
1011 spin_unlock(&data->lock); in exynos_iommu_identity_attach()
1013 owner->domain = identity_domain; in exynos_iommu_identity_attach()
1014 spin_unlock_irqrestore(&domain->lock, flags); in exynos_iommu_identity_attach()
1016 mutex_unlock(&owner->rpm_lock); in exynos_iommu_identity_attach()
1035 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain); in exynos_iommu_attach_device() local
1038 phys_addr_t pagetable = virt_to_phys(domain->pgtable); in exynos_iommu_attach_device()
1046 mutex_lock(&owner->rpm_lock); in exynos_iommu_attach_device()
1048 spin_lock_irqsave(&domain->lock, flags); in exynos_iommu_attach_device()
1049 list_for_each_entry(data, &owner->controllers, owner_node) { in exynos_iommu_attach_device()
1050 spin_lock(&data->lock); in exynos_iommu_attach_device()
1051 data->pgtable = pagetable; in exynos_iommu_attach_device()
1052 data->domain = domain; in exynos_iommu_attach_device()
1053 list_add_tail(&data->domain_node, &domain->clients); in exynos_iommu_attach_device()
1054 spin_unlock(&data->lock); in exynos_iommu_attach_device()
1056 owner->domain = iommu_domain; in exynos_iommu_attach_device()
1057 spin_unlock_irqrestore(&domain->lock, flags); in exynos_iommu_attach_device()
1059 list_for_each_entry(data, &owner->controllers, owner_node) { in exynos_iommu_attach_device()
1060 pm_runtime_get_noresume(data->sysmmu); in exynos_iommu_attach_device()
1061 if (pm_runtime_active(data->sysmmu)) in exynos_iommu_attach_device()
1063 pm_runtime_put(data->sysmmu); in exynos_iommu_attach_device()
1066 mutex_unlock(&owner->rpm_lock); in exynos_iommu_attach_device()
1074 static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *domain, in alloc_lv2entry() argument
1079 return ERR_PTR(-EADDRINUSE); in alloc_lv2entry()
1088 BUG_ON((uintptr_t)pent & (LV2TABLE_SIZE - 1)); in alloc_lv2entry()
1090 return ERR_PTR(-ENOMEM); in alloc_lv2entry()
1099 return ERR_PTR(-EADDRINUSE); in alloc_lv2entry()
1103 * If pre-fetched SLPD is a faulty SLPD in zero_l2_table, in alloc_lv2entry()
1122 spin_lock(&domain->lock); in alloc_lv2entry()
1123 list_for_each_entry(data, &domain->clients, domain_node) in alloc_lv2entry()
1125 spin_unlock(&domain->lock); in alloc_lv2entry()
1132 static int lv1set_section(struct exynos_iommu_domain *domain, in lv1set_section() argument
1139 return -EADDRINUSE; in lv1set_section()
1146 return -EADDRINUSE; in lv1set_section()
1155 spin_lock(&domain->lock); in lv1set_section()
1162 list_for_each_entry(data, &domain->clients, domain_node) in lv1set_section()
1165 spin_unlock(&domain->lock); in lv1set_section()
1175 return -EADDRINUSE; in lv2set_page()
1178 *pgcnt -= 1; in lv2set_page()
1189 memset(pent - i, 0, sizeof(*pent) * i); in lv2set_page()
1190 return -EADDRINUSE; in lv2set_page()
1198 *pgcnt -= SPAGES_PER_LPAGE; in lv2set_page()
1205 * *CAUTION* to the I/O virtual memory managers that support exynos-iommu:
1226 * - Any two consecutive I/O virtual regions must have a hole of size larger
1228 * - Start address of an I/O virtual region must be aligned by 128KiB.
1234 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain); in exynos_iommu_map() local
1238 int ret = -ENOMEM; in exynos_iommu_map()
1240 BUG_ON(domain->pgtable == NULL); in exynos_iommu_map()
1243 spin_lock_irqsave(&domain->pgtablelock, flags); in exynos_iommu_map()
1245 entry = section_entry(domain->pgtable, iova); in exynos_iommu_map()
1248 ret = lv1set_section(domain, entry, iova, paddr, prot, in exynos_iommu_map()
1249 &domain->lv2entcnt[lv1ent_offset(iova)]); in exynos_iommu_map()
1253 pent = alloc_lv2entry(domain, entry, iova, in exynos_iommu_map()
1254 &domain->lv2entcnt[lv1ent_offset(iova)]); in exynos_iommu_map()
1260 &domain->lv2entcnt[lv1ent_offset(iova)]); in exynos_iommu_map()
1269 spin_unlock_irqrestore(&domain->pgtablelock, flags); in exynos_iommu_map()
1274 static void exynos_iommu_tlb_invalidate_entry(struct exynos_iommu_domain *domain, in exynos_iommu_tlb_invalidate_entry() argument
1280 spin_lock_irqsave(&domain->lock, flags); in exynos_iommu_tlb_invalidate_entry()
1282 list_for_each_entry(data, &domain->clients, domain_node) in exynos_iommu_tlb_invalidate_entry()
1285 spin_unlock_irqrestore(&domain->lock, flags); in exynos_iommu_tlb_invalidate_entry()
1292 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain); in exynos_iommu_unmap() local
1298 BUG_ON(domain->pgtable == NULL); in exynos_iommu_unmap()
1300 spin_lock_irqsave(&domain->pgtablelock, flags); in exynos_iommu_unmap()
1302 ent = section_entry(domain->pgtable, iova); in exynos_iommu_unmap()
1334 domain->lv2entcnt[lv1ent_offset(iova)] += 1; in exynos_iommu_unmap()
1352 domain->lv2entcnt[lv1ent_offset(iova)] += SPAGES_PER_LPAGE; in exynos_iommu_unmap()
1354 spin_unlock_irqrestore(&domain->pgtablelock, flags); in exynos_iommu_unmap()
1356 exynos_iommu_tlb_invalidate_entry(domain, iova, size); in exynos_iommu_unmap()
1360 spin_unlock_irqrestore(&domain->pgtablelock, flags); in exynos_iommu_unmap()
1371 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain); in exynos_iommu_iova_to_phys() local
1376 spin_lock_irqsave(&domain->pgtablelock, flags); in exynos_iommu_iova_to_phys()
1378 entry = section_entry(domain->pgtable, iova); in exynos_iommu_iova_to_phys()
1391 spin_unlock_irqrestore(&domain->pgtablelock, flags); in exynos_iommu_iova_to_phys()
1402 return ERR_PTR(-ENODEV); in exynos_iommu_probe_device()
1404 list_for_each_entry(data, &owner->controllers, owner_node) { in exynos_iommu_probe_device()
1410 data->link = device_link_add(dev, data->sysmmu, in exynos_iommu_probe_device()
1416 data = list_first_entry(&owner->controllers, in exynos_iommu_probe_device()
1419 return &data->iommu; in exynos_iommu_probe_device()
1429 list_for_each_entry(data, &owner->controllers, owner_node) in exynos_iommu_release_device()
1430 device_link_del(data->link); in exynos_iommu_release_device()
1436 struct platform_device *sysmmu = of_find_device_by_node(spec->np); in exynos_iommu_of_xlate()
1441 return -ENODEV; in exynos_iommu_of_xlate()
1445 put_device(&sysmmu->dev); in exynos_iommu_of_xlate()
1446 return -ENODEV; in exynos_iommu_of_xlate()
1452 put_device(&sysmmu->dev); in exynos_iommu_of_xlate()
1453 return -ENOMEM; in exynos_iommu_of_xlate()
1456 INIT_LIST_HEAD(&owner->controllers); in exynos_iommu_of_xlate()
1457 mutex_init(&owner->rpm_lock); in exynos_iommu_of_xlate()
1458 owner->domain = &exynos_identity_domain; in exynos_iommu_of_xlate()
1462 list_for_each_entry(entry, &owner->controllers, owner_node) in exynos_iommu_of_xlate()
1466 list_add_tail(&data->owner_node, &owner->controllers); in exynos_iommu_of_xlate()
1467 data->master = dev; in exynos_iommu_of_xlate()
1500 lv2table_kmem_cache = kmem_cache_create("exynos-iommu-lv2table", in exynos_iommu_init()
1504 return -ENOMEM; in exynos_iommu_init()
1511 ret = -ENOMEM; in exynos_iommu_init()