Lines Matching +full:tegra30 +full:- +full:mc
1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2011-2014 NVIDIA CORPORATION. All rights reserved.
16 #include <linux/dma-mapping.h>
19 #include <soc/tegra/mc.h>
33 struct tegra_mc *mc; member
72 writel(value, smmu->regs + offset); in smmu_writel()
77 return readl(smmu->regs + offset); in smmu_readl()
87 ((smmu)->soc->num_tlb_lines & (smmu)->tlb_mask)
119 /* per-SWGROUP SMMU_*_ASID register */
134 #define SMMU_PAGE_MASK (~(SMMU_SIZE_PT-1))
157 return (iova >> SMMU_PDE_SHIFT) & (SMMU_NUM_PDE - 1); in iova_pd_index()
162 return (iova >> SMMU_PTE_SHIFT) & (SMMU_NUM_PTE - 1); in iova_pt_index()
168 return (addr & smmu->pfn_mask) == addr; in smmu_dma_addr_valid()
173 return (dma_addr_t)(pde & smmu->pfn_mask) << 12; in smmu_pde_to_dma()
186 offset &= ~(smmu->mc->soc->atom_size - 1); in smmu_flush_ptc()
188 if (smmu->mc->soc->num_address_bits > 32) { in smmu_flush_ptc()
211 if (smmu->soc->num_asids == 4) in smmu_flush_tlb_asid()
226 if (smmu->soc->num_asids == 4) in smmu_flush_tlb_section()
241 if (smmu->soc->num_asids == 4) in smmu_flush_tlb_group()
259 mutex_lock(&smmu->lock); in tegra_smmu_alloc_asid()
261 id = find_first_zero_bit(smmu->asids, smmu->soc->num_asids); in tegra_smmu_alloc_asid()
262 if (id >= smmu->soc->num_asids) { in tegra_smmu_alloc_asid()
263 mutex_unlock(&smmu->lock); in tegra_smmu_alloc_asid()
264 return -ENOSPC; in tegra_smmu_alloc_asid()
267 set_bit(id, smmu->asids); in tegra_smmu_alloc_asid()
270 mutex_unlock(&smmu->lock); in tegra_smmu_alloc_asid()
276 mutex_lock(&smmu->lock); in tegra_smmu_free_asid()
277 clear_bit(id, smmu->asids); in tegra_smmu_free_asid()
278 mutex_unlock(&smmu->lock); in tegra_smmu_free_asid()
297 as->attr = SMMU_PD_READABLE | SMMU_PD_WRITABLE | SMMU_PD_NONSECURE; in tegra_smmu_domain_alloc()
299 as->pd = alloc_page(GFP_KERNEL | __GFP_DMA | __GFP_ZERO); in tegra_smmu_domain_alloc()
300 if (!as->pd) { in tegra_smmu_domain_alloc()
305 as->count = kcalloc(SMMU_NUM_PDE, sizeof(u32), GFP_KERNEL); in tegra_smmu_domain_alloc()
306 if (!as->count) { in tegra_smmu_domain_alloc()
307 __free_page(as->pd); in tegra_smmu_domain_alloc()
312 as->pts = kcalloc(SMMU_NUM_PDE, sizeof(*as->pts), GFP_KERNEL); in tegra_smmu_domain_alloc()
313 if (!as->pts) { in tegra_smmu_domain_alloc()
314 kfree(as->count); in tegra_smmu_domain_alloc()
315 __free_page(as->pd); in tegra_smmu_domain_alloc()
320 spin_lock_init(&as->lock); in tegra_smmu_domain_alloc()
323 as->domain.geometry.aperture_start = 0; in tegra_smmu_domain_alloc()
324 as->domain.geometry.aperture_end = 0xffffffff; in tegra_smmu_domain_alloc()
325 as->domain.geometry.force_aperture = true; in tegra_smmu_domain_alloc()
327 return &as->domain; in tegra_smmu_domain_alloc()
336 WARN_ON_ONCE(as->use_count); in tegra_smmu_domain_free()
337 kfree(as->count); in tegra_smmu_domain_free()
338 kfree(as->pts); in tegra_smmu_domain_free()
348 for (i = 0; i < smmu->soc->num_swgroups; i++) { in tegra_smmu_find_swgroup()
349 if (smmu->soc->swgroups[i].swgroup == swgroup) { in tegra_smmu_find_swgroup()
350 group = &smmu->soc->swgroups[i]; in tegra_smmu_find_swgroup()
367 value = smmu_readl(smmu, group->reg); in tegra_smmu_enable()
371 smmu_writel(smmu, value, group->reg); in tegra_smmu_enable()
379 for (i = 0; i < smmu->soc->num_clients; i++) { in tegra_smmu_enable()
380 const struct tegra_mc_client *client = &smmu->soc->clients[i]; in tegra_smmu_enable()
382 if (client->swgroup != swgroup) in tegra_smmu_enable()
385 value = smmu_readl(smmu, client->smmu.reg); in tegra_smmu_enable()
386 value |= BIT(client->smmu.bit); in tegra_smmu_enable()
387 smmu_writel(smmu, value, client->smmu.reg); in tegra_smmu_enable()
400 value = smmu_readl(smmu, group->reg); in tegra_smmu_disable()
404 smmu_writel(smmu, value, group->reg); in tegra_smmu_disable()
407 for (i = 0; i < smmu->soc->num_clients; i++) { in tegra_smmu_disable()
408 const struct tegra_mc_client *client = &smmu->soc->clients[i]; in tegra_smmu_disable()
410 if (client->swgroup != swgroup) in tegra_smmu_disable()
413 value = smmu_readl(smmu, client->smmu.reg); in tegra_smmu_disable()
414 value &= ~BIT(client->smmu.bit); in tegra_smmu_disable()
415 smmu_writel(smmu, value, client->smmu.reg); in tegra_smmu_disable()
425 if (as->use_count > 0) { in tegra_smmu_as_prepare()
426 as->use_count++; in tegra_smmu_as_prepare()
430 as->pd_dma = dma_map_page(smmu->dev, as->pd, 0, SMMU_SIZE_PD, in tegra_smmu_as_prepare()
432 if (dma_mapping_error(smmu->dev, as->pd_dma)) in tegra_smmu_as_prepare()
433 return -ENOMEM; in tegra_smmu_as_prepare()
435 /* We can't handle 64-bit DMA addresses */ in tegra_smmu_as_prepare()
436 if (!smmu_dma_addr_valid(smmu, as->pd_dma)) { in tegra_smmu_as_prepare()
437 err = -ENOMEM; in tegra_smmu_as_prepare()
441 err = tegra_smmu_alloc_asid(smmu, &as->id); in tegra_smmu_as_prepare()
445 smmu_flush_ptc(smmu, as->pd_dma, 0); in tegra_smmu_as_prepare()
446 smmu_flush_tlb_asid(smmu, as->id); in tegra_smmu_as_prepare()
448 smmu_writel(smmu, as->id & 0x7f, SMMU_PTB_ASID); in tegra_smmu_as_prepare()
449 value = SMMU_PTB_DATA_VALUE(as->pd_dma, as->attr); in tegra_smmu_as_prepare()
453 as->smmu = smmu; in tegra_smmu_as_prepare()
454 as->use_count++; in tegra_smmu_as_prepare()
459 dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE); in tegra_smmu_as_prepare()
466 if (--as->use_count > 0) in tegra_smmu_as_unprepare()
469 tegra_smmu_free_asid(smmu, as->id); in tegra_smmu_as_unprepare()
471 dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE); in tegra_smmu_as_unprepare()
473 as->smmu = NULL; in tegra_smmu_as_unprepare()
481 struct device_node *np = dev->of_node; in tegra_smmu_attach_dev()
486 while (!of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index, in tegra_smmu_attach_dev()
490 if (args.np != smmu->dev->of_node) { in tegra_smmu_attach_dev()
501 tegra_smmu_enable(smmu, swgroup, as->id); in tegra_smmu_attach_dev()
506 return -ENODEV; in tegra_smmu_attach_dev()
514 struct device_node *np = dev->of_node; in tegra_smmu_detach_dev()
515 struct tegra_smmu *smmu = as->smmu; in tegra_smmu_detach_dev()
519 while (!of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index, in tegra_smmu_detach_dev()
523 if (args.np != smmu->dev->of_node) { in tegra_smmu_detach_dev()
530 tegra_smmu_disable(smmu, swgroup, as->id); in tegra_smmu_detach_dev()
540 struct tegra_smmu *smmu = as->smmu; in tegra_smmu_set_pde()
541 u32 *pd = page_address(as->pd); in tegra_smmu_set_pde()
548 dma_sync_single_range_for_device(smmu->dev, as->pd_dma, offset, in tegra_smmu_set_pde()
552 smmu_flush_ptc(smmu, as->pd_dma, offset); in tegra_smmu_set_pde()
553 smmu_flush_tlb_section(smmu, as->id, iova); in tegra_smmu_set_pde()
568 struct tegra_smmu *smmu = as->smmu; in tegra_smmu_pte_lookup()
572 pt_page = as->pts[pd_index]; in tegra_smmu_pte_lookup()
576 pd = page_address(as->pd); in tegra_smmu_pte_lookup()
586 struct tegra_smmu *smmu = as->smmu; in as_get_pte()
588 if (!as->pts[pde]) { in as_get_pte()
591 dma = dma_map_page(smmu->dev, page, 0, SMMU_SIZE_PT, in as_get_pte()
593 if (dma_mapping_error(smmu->dev, dma)) { in as_get_pte()
599 dma_unmap_page(smmu->dev, dma, SMMU_SIZE_PT, in as_get_pte()
605 as->pts[pde] = page; in as_get_pte()
612 u32 *pd = page_address(as->pd); in as_get_pte()
617 return tegra_smmu_pte_offset(as->pts[pde], iova); in as_get_pte()
624 as->count[pd_index]++; in tegra_smmu_pte_get_use()
630 struct page *page = as->pts[pde]; in tegra_smmu_pte_put_use()
636 if (--as->count[pde] == 0) { in tegra_smmu_pte_put_use()
637 struct tegra_smmu *smmu = as->smmu; in tegra_smmu_pte_put_use()
638 u32 *pd = page_address(as->pd); in tegra_smmu_pte_put_use()
643 dma_unmap_page(smmu->dev, pte_dma, SMMU_SIZE_PT, DMA_TO_DEVICE); in tegra_smmu_pte_put_use()
645 as->pts[pde] = NULL; in tegra_smmu_pte_put_use()
652 struct tegra_smmu *smmu = as->smmu; in tegra_smmu_set_pte()
657 dma_sync_single_range_for_device(smmu->dev, pte_dma, offset, in tegra_smmu_set_pte()
660 smmu_flush_tlb_group(smmu, as->id, iova); in tegra_smmu_set_pte()
669 struct page *page = as->pts[pde]; in as_get_pde_page()
678 * spinlock needs to be unlocked and re-locked after allocation. in as_get_pde_page()
681 spin_unlock_irqrestore(&as->lock, *flags); in as_get_pde_page()
686 spin_lock_irqsave(&as->lock, *flags); in as_get_pde_page()
693 if (as->pts[pde]) { in as_get_pde_page()
697 page = as->pts[pde]; in as_get_pde_page()
716 return -ENOMEM; in __tegra_smmu_map()
720 return -ENOMEM; in __tegra_smmu_map()
722 /* If we aren't overwriting a pre-existing entry, increment use */ in __tegra_smmu_map()
765 spin_lock_irqsave(&as->lock, flags); in tegra_smmu_map()
767 spin_unlock_irqrestore(&as->lock, flags); in tegra_smmu_map()
778 spin_lock_irqsave(&as->lock, flags); in tegra_smmu_unmap()
780 spin_unlock_irqrestore(&as->lock, flags); in tegra_smmu_unmap()
797 pfn = *pte & as->smmu->pfn_mask; in tegra_smmu_iova_to_phys()
805 struct tegra_mc *mc; in tegra_smmu_find() local
811 mc = platform_get_drvdata(pdev); in tegra_smmu_find()
812 if (!mc) in tegra_smmu_find()
815 return mc->smmu; in tegra_smmu_find()
821 const struct iommu_ops *ops = smmu->iommu.ops; in tegra_smmu_configure()
824 err = iommu_fwspec_init(dev, &dev->of_node->fwnode, ops); in tegra_smmu_configure()
830 err = ops->of_xlate(dev, args); in tegra_smmu_configure()
842 struct device_node *np = dev->of_node; in tegra_smmu_probe_device()
848 while (of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index, in tegra_smmu_probe_device()
873 return ERR_PTR(-ENODEV); in tegra_smmu_probe_device()
875 return &smmu->iommu; in tegra_smmu_probe_device()
888 for (i = 0; i < smmu->soc->num_groups; i++) in tegra_smmu_find_group()
889 for (j = 0; j < smmu->soc->groups[i].num_swgroups; j++) in tegra_smmu_find_group()
890 if (smmu->soc->groups[i].swgroups[j] == swgroup) in tegra_smmu_find_group()
891 return &smmu->soc->groups[i]; in tegra_smmu_find_group()
899 struct tegra_smmu *smmu = group->smmu; in tegra_smmu_group_release()
901 mutex_lock(&smmu->lock); in tegra_smmu_group_release()
902 list_del(&group->list); in tegra_smmu_group_release()
903 mutex_unlock(&smmu->lock); in tegra_smmu_group_release()
916 mutex_lock(&smmu->lock); in tegra_smmu_group_get()
919 list_for_each_entry(group, &smmu->groups, list) in tegra_smmu_group_get()
920 if ((group->swgroup == swgroup) || (soc && group->soc == soc)) { in tegra_smmu_group_get()
921 grp = iommu_group_ref_get(group->group); in tegra_smmu_group_get()
922 mutex_unlock(&smmu->lock); in tegra_smmu_group_get()
926 group = devm_kzalloc(smmu->dev, sizeof(*group), GFP_KERNEL); in tegra_smmu_group_get()
928 mutex_unlock(&smmu->lock); in tegra_smmu_group_get()
932 INIT_LIST_HEAD(&group->list); in tegra_smmu_group_get()
933 group->swgroup = swgroup; in tegra_smmu_group_get()
934 group->smmu = smmu; in tegra_smmu_group_get()
935 group->soc = soc; in tegra_smmu_group_get()
937 group->group = iommu_group_alloc(); in tegra_smmu_group_get()
938 if (IS_ERR(group->group)) { in tegra_smmu_group_get()
939 devm_kfree(smmu->dev, group); in tegra_smmu_group_get()
940 mutex_unlock(&smmu->lock); in tegra_smmu_group_get()
944 iommu_group_set_iommudata(group->group, group, tegra_smmu_group_release); in tegra_smmu_group_get()
946 iommu_group_set_name(group->group, soc->name); in tegra_smmu_group_get()
947 list_add_tail(&group->list, &smmu->groups); in tegra_smmu_group_get()
948 mutex_unlock(&smmu->lock); in tegra_smmu_group_get()
950 return group->group; in tegra_smmu_group_get()
959 group = tegra_smmu_group_get(smmu, fwspec->ids[0]); in tegra_smmu_device_group()
969 u32 id = args->args[0]; in tegra_smmu_of_xlate()
993 { .compatible = "nvidia,tegra30-ahb", }, in tegra_smmu_ahb_enable()
1007 struct tegra_smmu *smmu = s->private; in tegra_smmu_swgroups_show()
1012 seq_printf(s, "------------------------\n"); in tegra_smmu_swgroups_show()
1014 for (i = 0; i < smmu->soc->num_swgroups; i++) { in tegra_smmu_swgroups_show()
1015 const struct tegra_smmu_swgroup *group = &smmu->soc->swgroups[i]; in tegra_smmu_swgroups_show()
1019 value = smmu_readl(smmu, group->reg); in tegra_smmu_swgroups_show()
1028 seq_printf(s, "%-9s %-7s %#04x\n", group->name, status, in tegra_smmu_swgroups_show()
1039 struct tegra_smmu *smmu = s->private; in tegra_smmu_clients_show()
1044 seq_printf(s, "--------------------\n"); in tegra_smmu_clients_show()
1046 for (i = 0; i < smmu->soc->num_clients; i++) { in tegra_smmu_clients_show()
1047 const struct tegra_mc_client *client = &smmu->soc->clients[i]; in tegra_smmu_clients_show()
1050 value = smmu_readl(smmu, client->smmu.reg); in tegra_smmu_clients_show()
1052 if (value & BIT(client->smmu.bit)) in tegra_smmu_clients_show()
1057 seq_printf(s, "%-12s %s\n", client->name, status); in tegra_smmu_clients_show()
1067 smmu->debugfs = debugfs_create_dir("smmu", NULL); in tegra_smmu_debugfs_init()
1068 if (!smmu->debugfs) in tegra_smmu_debugfs_init()
1071 debugfs_create_file("swgroups", S_IRUGO, smmu->debugfs, smmu, in tegra_smmu_debugfs_init()
1073 debugfs_create_file("clients", S_IRUGO, smmu->debugfs, smmu, in tegra_smmu_debugfs_init()
1079 debugfs_remove_recursive(smmu->debugfs); in tegra_smmu_debugfs_exit()
1084 struct tegra_mc *mc) in tegra_smmu_probe() argument
1093 return ERR_PTR(-ENOMEM); in tegra_smmu_probe()
1103 mc->smmu = smmu; in tegra_smmu_probe()
1105 size = BITS_TO_LONGS(soc->num_asids) * sizeof(long); in tegra_smmu_probe()
1107 smmu->asids = devm_kzalloc(dev, size, GFP_KERNEL); in tegra_smmu_probe()
1108 if (!smmu->asids) in tegra_smmu_probe()
1109 return ERR_PTR(-ENOMEM); in tegra_smmu_probe()
1111 INIT_LIST_HEAD(&smmu->groups); in tegra_smmu_probe()
1112 mutex_init(&smmu->lock); in tegra_smmu_probe()
1114 smmu->regs = mc->regs; in tegra_smmu_probe()
1115 smmu->soc = soc; in tegra_smmu_probe()
1116 smmu->dev = dev; in tegra_smmu_probe()
1117 smmu->mc = mc; in tegra_smmu_probe()
1119 smmu->pfn_mask = in tegra_smmu_probe()
1120 BIT_MASK(mc->soc->num_address_bits - SMMU_PTE_SHIFT) - 1; in tegra_smmu_probe()
1122 mc->soc->num_address_bits, smmu->pfn_mask); in tegra_smmu_probe()
1123 smmu->tlb_mask = (1 << fls(smmu->soc->num_tlb_lines)) - 1; in tegra_smmu_probe()
1124 dev_dbg(dev, "TLB lines: %u, mask: %#lx\n", smmu->soc->num_tlb_lines, in tegra_smmu_probe()
1125 smmu->tlb_mask); in tegra_smmu_probe()
1129 if (soc->supports_request_limit) in tegra_smmu_probe()
1137 if (soc->supports_round_robin_arbitration) in tegra_smmu_probe()
1149 err = iommu_device_sysfs_add(&smmu->iommu, dev, NULL, dev_name(dev)); in tegra_smmu_probe()
1153 iommu_device_set_ops(&smmu->iommu, &tegra_smmu_ops); in tegra_smmu_probe()
1154 iommu_device_set_fwnode(&smmu->iommu, dev->fwnode); in tegra_smmu_probe()
1156 err = iommu_device_register(&smmu->iommu); in tegra_smmu_probe()
1158 iommu_device_sysfs_remove(&smmu->iommu); in tegra_smmu_probe()
1164 iommu_device_unregister(&smmu->iommu); in tegra_smmu_probe()
1165 iommu_device_sysfs_remove(&smmu->iommu); in tegra_smmu_probe()
1177 iommu_device_unregister(&smmu->iommu); in tegra_smmu_remove()
1178 iommu_device_sysfs_remove(&smmu->iommu); in tegra_smmu_remove()