Lines Matching full:as

277 	struct tegra_smmu_as *as;  in tegra_smmu_domain_alloc_paging()  local
279 as = kzalloc(sizeof(*as), GFP_KERNEL); in tegra_smmu_domain_alloc_paging()
280 if (!as) in tegra_smmu_domain_alloc_paging()
283 as->attr = SMMU_PD_READABLE | SMMU_PD_WRITABLE | SMMU_PD_NONSECURE; in tegra_smmu_domain_alloc_paging()
285 as->pd = alloc_page(GFP_KERNEL | __GFP_DMA | __GFP_ZERO); in tegra_smmu_domain_alloc_paging()
286 if (!as->pd) { in tegra_smmu_domain_alloc_paging()
287 kfree(as); in tegra_smmu_domain_alloc_paging()
291 as->count = kcalloc(SMMU_NUM_PDE, sizeof(u32), GFP_KERNEL); in tegra_smmu_domain_alloc_paging()
292 if (!as->count) { in tegra_smmu_domain_alloc_paging()
293 __free_page(as->pd); in tegra_smmu_domain_alloc_paging()
294 kfree(as); in tegra_smmu_domain_alloc_paging()
298 as->pts = kcalloc(SMMU_NUM_PDE, sizeof(*as->pts), GFP_KERNEL); in tegra_smmu_domain_alloc_paging()
299 if (!as->pts) { in tegra_smmu_domain_alloc_paging()
300 kfree(as->count); in tegra_smmu_domain_alloc_paging()
301 __free_page(as->pd); in tegra_smmu_domain_alloc_paging()
302 kfree(as); in tegra_smmu_domain_alloc_paging()
306 spin_lock_init(&as->lock); in tegra_smmu_domain_alloc_paging()
309 as->domain.geometry.aperture_start = 0; in tegra_smmu_domain_alloc_paging()
310 as->domain.geometry.aperture_end = 0xffffffff; in tegra_smmu_domain_alloc_paging()
311 as->domain.geometry.force_aperture = true; in tegra_smmu_domain_alloc_paging()
313 return &as->domain; in tegra_smmu_domain_alloc_paging()
318 struct tegra_smmu_as *as = to_smmu_as(domain); in tegra_smmu_domain_free() local
322 WARN_ON_ONCE(as->use_count); in tegra_smmu_domain_free()
323 kfree(as->count); in tegra_smmu_domain_free()
324 kfree(as->pts); in tegra_smmu_domain_free()
325 kfree(as); in tegra_smmu_domain_free()
406 struct tegra_smmu_as *as) in tegra_smmu_as_prepare() argument
413 if (as->use_count > 0) { in tegra_smmu_as_prepare()
414 as->use_count++; in tegra_smmu_as_prepare()
418 as->pd_dma = dma_map_page(smmu->dev, as->pd, 0, SMMU_SIZE_PD, in tegra_smmu_as_prepare()
420 if (dma_mapping_error(smmu->dev, as->pd_dma)) { in tegra_smmu_as_prepare()
426 if (!smmu_dma_addr_valid(smmu, as->pd_dma)) { in tegra_smmu_as_prepare()
431 err = tegra_smmu_alloc_asid(smmu, &as->id); in tegra_smmu_as_prepare()
435 smmu_flush_ptc(smmu, as->pd_dma, 0); in tegra_smmu_as_prepare()
436 smmu_flush_tlb_asid(smmu, as->id); in tegra_smmu_as_prepare()
438 smmu_writel(smmu, as->id & 0x7f, SMMU_PTB_ASID); in tegra_smmu_as_prepare()
439 value = SMMU_PTB_DATA_VALUE(as->pd_dma, as->attr); in tegra_smmu_as_prepare()
443 as->smmu = smmu; in tegra_smmu_as_prepare()
444 as->use_count++; in tegra_smmu_as_prepare()
451 dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE); in tegra_smmu_as_prepare()
459 struct tegra_smmu_as *as) in tegra_smmu_as_unprepare() argument
463 if (--as->use_count > 0) { in tegra_smmu_as_unprepare()
468 tegra_smmu_free_asid(smmu, as->id); in tegra_smmu_as_unprepare()
470 dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE); in tegra_smmu_as_unprepare()
472 as->smmu = NULL; in tegra_smmu_as_unprepare()
482 struct tegra_smmu_as *as = to_smmu_as(domain); in tegra_smmu_attach_dev() local
490 err = tegra_smmu_as_prepare(smmu, as); in tegra_smmu_attach_dev()
494 tegra_smmu_enable(smmu, fwspec->ids[index], as->id); in tegra_smmu_attach_dev()
504 tegra_smmu_disable(smmu, fwspec->ids[index], as->id); in tegra_smmu_attach_dev()
505 tegra_smmu_as_unprepare(smmu, as); in tegra_smmu_attach_dev()
516 struct tegra_smmu_as *as; in tegra_smmu_identity_attach() local
526 as = to_smmu_as(domain); in tegra_smmu_identity_attach()
527 smmu = as->smmu; in tegra_smmu_identity_attach()
529 tegra_smmu_disable(smmu, fwspec->ids[index], as->id); in tegra_smmu_identity_attach()
530 tegra_smmu_as_unprepare(smmu, as); in tegra_smmu_identity_attach()
544 static void tegra_smmu_set_pde(struct tegra_smmu_as *as, unsigned long iova, in tegra_smmu_set_pde() argument
548 struct tegra_smmu *smmu = as->smmu; in tegra_smmu_set_pde()
549 u32 *pd = page_address(as->pd); in tegra_smmu_set_pde()
556 dma_sync_single_range_for_device(smmu->dev, as->pd_dma, offset, in tegra_smmu_set_pde()
560 smmu_flush_ptc(smmu, as->pd_dma, offset); in tegra_smmu_set_pde()
561 smmu_flush_tlb_section(smmu, as->id, iova); in tegra_smmu_set_pde()
572 static u32 *tegra_smmu_pte_lookup(struct tegra_smmu_as *as, unsigned long iova, in tegra_smmu_pte_lookup() argument
576 struct tegra_smmu *smmu = as->smmu; in tegra_smmu_pte_lookup()
580 pt_page = as->pts[pd_index]; in tegra_smmu_pte_lookup()
584 pd = page_address(as->pd); in tegra_smmu_pte_lookup()
590 static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova, in as_get_pte() argument
594 struct tegra_smmu *smmu = as->smmu; in as_get_pte()
596 if (!as->pts[pde]) { in as_get_pte()
613 as->pts[pde] = page; in as_get_pte()
615 tegra_smmu_set_pde(as, iova, SMMU_MK_PDE(dma, SMMU_PDE_ATTR | in as_get_pte()
620 u32 *pd = page_address(as->pd); in as_get_pte()
625 return tegra_smmu_pte_offset(as->pts[pde], iova); in as_get_pte()
628 static void tegra_smmu_pte_get_use(struct tegra_smmu_as *as, unsigned long iova) in tegra_smmu_pte_get_use() argument
632 as->count[pd_index]++; in tegra_smmu_pte_get_use()
635 static void tegra_smmu_pte_put_use(struct tegra_smmu_as *as, unsigned long iova) in tegra_smmu_pte_put_use() argument
638 struct page *page = as->pts[pde]; in tegra_smmu_pte_put_use()
644 if (--as->count[pde] == 0) { in tegra_smmu_pte_put_use()
645 struct tegra_smmu *smmu = as->smmu; in tegra_smmu_pte_put_use()
646 u32 *pd = page_address(as->pd); in tegra_smmu_pte_put_use()
649 tegra_smmu_set_pde(as, iova, 0); in tegra_smmu_pte_put_use()
653 as->pts[pde] = NULL; in tegra_smmu_pte_put_use()
657 static void tegra_smmu_set_pte(struct tegra_smmu_as *as, unsigned long iova, in tegra_smmu_set_pte() argument
660 struct tegra_smmu *smmu = as->smmu; in tegra_smmu_set_pte()
668 smmu_flush_tlb_group(smmu, as->id, iova); in tegra_smmu_set_pte()
672 static struct page *as_get_pde_page(struct tegra_smmu_as *as, in as_get_pde_page() argument
677 struct page *page = as->pts[pde]; in as_get_pde_page()
689 spin_unlock_irqrestore(&as->lock, *flags); in as_get_pde_page()
694 spin_lock_irqsave(&as->lock, *flags); in as_get_pde_page()
701 if (as->pts[pde]) { in as_get_pde_page()
705 page = as->pts[pde]; in as_get_pde_page()
716 struct tegra_smmu_as *as = to_smmu_as(domain); in __tegra_smmu_map() local
722 page = as_get_pde_page(as, iova, gfp, flags); in __tegra_smmu_map()
726 pte = as_get_pte(as, iova, &pte_dma, page); in __tegra_smmu_map()
732 tegra_smmu_pte_get_use(as, iova); in __tegra_smmu_map()
742 tegra_smmu_set_pte(as, iova, pte, pte_dma, in __tegra_smmu_map()
752 struct tegra_smmu_as *as = to_smmu_as(domain); in __tegra_smmu_unmap() local
756 pte = tegra_smmu_pte_lookup(as, iova, &pte_dma); in __tegra_smmu_unmap()
760 tegra_smmu_set_pte(as, iova, pte, pte_dma, 0); in __tegra_smmu_unmap()
761 tegra_smmu_pte_put_use(as, iova); in __tegra_smmu_unmap()
770 struct tegra_smmu_as *as = to_smmu_as(domain); in tegra_smmu_map() local
774 spin_lock_irqsave(&as->lock, flags); in tegra_smmu_map()
776 spin_unlock_irqrestore(&as->lock, flags); in tegra_smmu_map()
787 struct tegra_smmu_as *as = to_smmu_as(domain); in tegra_smmu_unmap() local
790 spin_lock_irqsave(&as->lock, flags); in tegra_smmu_unmap()
792 spin_unlock_irqrestore(&as->lock, flags); in tegra_smmu_unmap()
800 struct tegra_smmu_as *as = to_smmu_as(domain); in tegra_smmu_iova_to_phys() local
805 pte = tegra_smmu_pte_lookup(as, iova, &pte_dma); in tegra_smmu_iova_to_phys()
809 pfn = *pte & as->smmu->pfn_mask; in tegra_smmu_iova_to_phys()
972 * the SMMU parent device is the same as the MC, so the reference count in tegra_smmu_of_xlate()