Lines Matching full:smmu

30 #include "arm-smmu-v3.h"
37 …domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
89 static void parse_driver_options(struct arm_smmu_device *smmu) in parse_driver_options() argument
94 if (of_property_read_bool(smmu->dev->of_node, in parse_driver_options()
96 smmu->options |= arm_smmu_options[i].opt; in parse_driver_options()
97 dev_notice(smmu->dev, "option %s\n", in parse_driver_options()
192 static void queue_poll_init(struct arm_smmu_device *smmu, in queue_poll_init() argument
197 qp->wfe = !!(smmu->features & ARM_SMMU_FEAT_SEV); in queue_poll_init()
348 static struct arm_smmu_cmdq *arm_smmu_get_cmdq(struct arm_smmu_device *smmu) in arm_smmu_get_cmdq() argument
350 return &smmu->cmdq; in arm_smmu_get_cmdq()
353 static void arm_smmu_cmdq_build_sync_cmd(u64 *cmd, struct arm_smmu_device *smmu, in arm_smmu_cmdq_build_sync_cmd() argument
364 if (smmu->options & ARM_SMMU_OPT_MSIPOLL) { in arm_smmu_cmdq_build_sync_cmd()
372 static void __arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu, in __arm_smmu_cmdq_skip_err() argument
390 dev_err(smmu->dev, "CMDQ error (cons 0x%08x): %s\n", cons, in __arm_smmu_cmdq_skip_err()
395 dev_err(smmu->dev, "retrying command fetch\n"); in __arm_smmu_cmdq_skip_err()
417 dev_err(smmu->dev, "skipping command in error state:\n"); in __arm_smmu_cmdq_skip_err()
419 dev_err(smmu->dev, "\t0x%016llx\n", (unsigned long long)cmd[i]); in __arm_smmu_cmdq_skip_err()
427 static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu) in arm_smmu_cmdq_skip_err() argument
429 __arm_smmu_cmdq_skip_err(smmu, &smmu->cmdq.q); in arm_smmu_cmdq_skip_err()
523 * a. If we have MSIs, the SMMU can write back into the CMD_SYNC
593 static int arm_smmu_cmdq_poll_until_not_full(struct arm_smmu_device *smmu, in arm_smmu_cmdq_poll_until_not_full() argument
598 struct arm_smmu_cmdq *cmdq = arm_smmu_get_cmdq(smmu); in arm_smmu_cmdq_poll_until_not_full()
612 queue_poll_init(smmu, &qp); in arm_smmu_cmdq_poll_until_not_full()
625 * Wait until the SMMU signals a CMD_SYNC completion MSI.
628 static int __arm_smmu_cmdq_poll_until_msi(struct arm_smmu_device *smmu, in __arm_smmu_cmdq_poll_until_msi() argument
633 struct arm_smmu_cmdq *cmdq = arm_smmu_get_cmdq(smmu); in __arm_smmu_cmdq_poll_until_msi()
636 queue_poll_init(smmu, &qp); in __arm_smmu_cmdq_poll_until_msi()
649 * Wait until the SMMU cons index passes llq->prod.
652 static int __arm_smmu_cmdq_poll_until_consumed(struct arm_smmu_device *smmu, in __arm_smmu_cmdq_poll_until_consumed() argument
656 struct arm_smmu_cmdq *cmdq = arm_smmu_get_cmdq(smmu); in __arm_smmu_cmdq_poll_until_consumed()
660 queue_poll_init(smmu, &qp); in __arm_smmu_cmdq_poll_until_consumed()
702 static int arm_smmu_cmdq_poll_until_sync(struct arm_smmu_device *smmu, in arm_smmu_cmdq_poll_until_sync() argument
705 if (smmu->options & ARM_SMMU_OPT_MSIPOLL) in arm_smmu_cmdq_poll_until_sync()
706 return __arm_smmu_cmdq_poll_until_msi(smmu, llq); in arm_smmu_cmdq_poll_until_sync()
708 return __arm_smmu_cmdq_poll_until_consumed(smmu, llq); in arm_smmu_cmdq_poll_until_sync()
744 static int arm_smmu_cmdq_issue_cmdlist(struct arm_smmu_device *smmu, in arm_smmu_cmdq_issue_cmdlist() argument
751 struct arm_smmu_cmdq *cmdq = arm_smmu_get_cmdq(smmu); in arm_smmu_cmdq_issue_cmdlist()
765 if (arm_smmu_cmdq_poll_until_not_full(smmu, &llq)) in arm_smmu_cmdq_issue_cmdlist()
766 dev_err_ratelimited(smmu->dev, "CMDQ timeout\n"); in arm_smmu_cmdq_issue_cmdlist()
791 arm_smmu_cmdq_build_sync_cmd(cmd_sync, smmu, &cmdq->q, prod); in arm_smmu_cmdq_issue_cmdlist()
807 /* 4. If we are the owner, take control of the SMMU hardware */ in arm_smmu_cmdq_issue_cmdlist()
841 ret = arm_smmu_cmdq_poll_until_sync(smmu, &llq); in arm_smmu_cmdq_issue_cmdlist()
843 dev_err_ratelimited(smmu->dev, in arm_smmu_cmdq_issue_cmdlist()
864 static int __arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu, in __arm_smmu_cmdq_issue_cmd() argument
871 dev_warn(smmu->dev, "ignoring unknown CMDQ opcode 0x%x\n", in __arm_smmu_cmdq_issue_cmd()
876 return arm_smmu_cmdq_issue_cmdlist(smmu, cmd, 1, sync); in __arm_smmu_cmdq_issue_cmd()
879 static int arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu, in arm_smmu_cmdq_issue_cmd() argument
882 return __arm_smmu_cmdq_issue_cmd(smmu, ent, false); in arm_smmu_cmdq_issue_cmd()
885 static int arm_smmu_cmdq_issue_cmd_with_sync(struct arm_smmu_device *smmu, in arm_smmu_cmdq_issue_cmd_with_sync() argument
888 return __arm_smmu_cmdq_issue_cmd(smmu, ent, true); in arm_smmu_cmdq_issue_cmd_with_sync()
891 static void arm_smmu_cmdq_batch_add(struct arm_smmu_device *smmu, in arm_smmu_cmdq_batch_add() argument
898 (smmu->options & ARM_SMMU_OPT_CMDQ_FORCE_SYNC)) { in arm_smmu_cmdq_batch_add()
899 arm_smmu_cmdq_issue_cmdlist(smmu, cmds->cmds, cmds->num, true); in arm_smmu_cmdq_batch_add()
904 arm_smmu_cmdq_issue_cmdlist(smmu, cmds->cmds, cmds->num, false); in arm_smmu_cmdq_batch_add()
910 dev_warn(smmu->dev, "ignoring unknown CMDQ opcode 0x%x\n", in arm_smmu_cmdq_batch_add()
918 static int arm_smmu_cmdq_batch_submit(struct arm_smmu_device *smmu, in arm_smmu_cmdq_batch_submit() argument
921 return arm_smmu_cmdq_issue_cmdlist(smmu, cmds->cmds, cmds->num, true); in arm_smmu_cmdq_batch_submit()
951 arm_smmu_cmdq_issue_cmd(master->smmu, &cmd); in arm_smmu_page_response()
963 void arm_smmu_tlb_inv_asid(struct arm_smmu_device *smmu, u16 asid) in arm_smmu_tlb_inv_asid() argument
966 .opcode = smmu->features & ARM_SMMU_FEAT_E2H ? in arm_smmu_tlb_inv_asid()
971 arm_smmu_cmdq_issue_cmd_with_sync(smmu, &cmd); in arm_smmu_tlb_inv_asid()
979 struct arm_smmu_device *smmu = master->smmu; in arm_smmu_sync_cd() local
991 arm_smmu_cmdq_batch_add(smmu, &cmds, &cmd); in arm_smmu_sync_cd()
994 arm_smmu_cmdq_batch_submit(smmu, &cmds); in arm_smmu_sync_cd()
997 static int arm_smmu_alloc_cd_leaf_table(struct arm_smmu_device *smmu, in arm_smmu_alloc_cd_leaf_table() argument
1002 l1_desc->l2ptr = dmam_alloc_coherent(smmu->dev, size, in arm_smmu_alloc_cd_leaf_table()
1005 dev_warn(smmu->dev, in arm_smmu_alloc_cd_leaf_table()
1027 struct arm_smmu_device *smmu = master->smmu; in arm_smmu_get_cd_ptr() local
1036 if (arm_smmu_alloc_cd_leaf_table(smmu, l1_desc)) in arm_smmu_get_cd_ptr()
1066 struct arm_smmu_device *smmu = master->smmu; in arm_smmu_write_ctx_desc() local
1081 if (!(smmu->features & ARM_SMMU_FEAT_STALL_FORCE)) in arm_smmu_write_ctx_desc()
1097 * STE may be live, and the SMMU might read dwords of this CD in any in arm_smmu_write_ctx_desc()
1118 * The SMMU accesses 64-bit values atomically. See IHI0070Ca 3.21.3 in arm_smmu_write_ctx_desc()
1121 * The size of single-copy atomic reads made by the SMMU is in arm_smmu_write_ctx_desc()
1136 struct arm_smmu_device *smmu = master->smmu; in arm_smmu_alloc_cd_tables() local
1143 if (!(smmu->features & ARM_SMMU_FEAT_2_LVL_CDTAB) || in arm_smmu_alloc_cd_tables()
1154 cd_table->l1_desc = devm_kcalloc(smmu->dev, cd_table->num_l1_ents, in arm_smmu_alloc_cd_tables()
1163 cd_table->cdtab = dmam_alloc_coherent(smmu->dev, l1size, &cd_table->cdtab_dma, in arm_smmu_alloc_cd_tables()
1166 dev_warn(smmu->dev, "failed to allocate context descriptor\n"); in arm_smmu_alloc_cd_tables()
1175 devm_kfree(smmu->dev, cd_table->l1_desc); in arm_smmu_alloc_cd_tables()
1185 struct arm_smmu_device *smmu = master->smmu; in arm_smmu_free_cd_tables() local
1195 dmam_free_coherent(smmu->dev, size, in arm_smmu_free_cd_tables()
1199 devm_kfree(smmu->dev, cd_table->l1_desc); in arm_smmu_free_cd_tables()
1207 dmam_free_coherent(smmu->dev, l1size, cd_table->cdtab, cd_table->cdtab_dma); in arm_smmu_free_cd_tables()
1241 static void arm_smmu_sync_ste_for_sid(struct arm_smmu_device *smmu, u32 sid) in arm_smmu_sync_ste_for_sid() argument
1251 arm_smmu_cmdq_issue_cmd_with_sync(smmu, &cmd); in arm_smmu_sync_ste_for_sid()
1265 * Given that we can't update the STE atomically and the SMMU in arm_smmu_write_strtab_ent()
1275 struct arm_smmu_device *smmu = master->smmu; in arm_smmu_write_strtab_ent() local
1330 * The SMMU can perform negative caching, so we must sync in arm_smmu_write_strtab_ent()
1333 if (smmu) in arm_smmu_write_strtab_ent()
1334 arm_smmu_sync_ste_for_sid(smmu, sid); in arm_smmu_write_strtab_ent()
1339 u64 strw = smmu->features & ARM_SMMU_FEAT_E2H ? in arm_smmu_write_strtab_ent()
1350 if (smmu->features & ARM_SMMU_FEAT_STALLS && in arm_smmu_write_strtab_ent()
1380 arm_smmu_sync_ste_for_sid(smmu, sid); in arm_smmu_write_strtab_ent()
1383 arm_smmu_sync_ste_for_sid(smmu, sid); in arm_smmu_write_strtab_ent()
1386 if (!(smmu->options & ARM_SMMU_OPT_SKIP_PREFETCH)) in arm_smmu_write_strtab_ent()
1387 arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd); in arm_smmu_write_strtab_ent()
1410 static int arm_smmu_init_l2_strtab(struct arm_smmu_device *smmu, u32 sid) in arm_smmu_init_l2_strtab() argument
1414 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg; in arm_smmu_init_l2_strtab()
1424 desc->l2ptr = dmam_alloc_coherent(smmu->dev, size, &desc->l2ptr_dma, in arm_smmu_init_l2_strtab()
1427 dev_err(smmu->dev, in arm_smmu_init_l2_strtab()
1439 arm_smmu_find_master(struct arm_smmu_device *smmu, u32 sid) in arm_smmu_find_master() argument
1444 lockdep_assert_held(&smmu->streams_mutex); in arm_smmu_find_master()
1446 node = smmu->streams.rb_node; in arm_smmu_find_master()
1461 static int arm_smmu_handle_evt(struct arm_smmu_device *smmu, u64 *evt) in arm_smmu_handle_evt() argument
1532 mutex_lock(&smmu->streams_mutex); in arm_smmu_handle_evt()
1533 master = arm_smmu_find_master(smmu, sid); in arm_smmu_handle_evt()
1551 mutex_unlock(&smmu->streams_mutex); in arm_smmu_handle_evt()
1558 struct arm_smmu_device *smmu = dev; in arm_smmu_evtq_thread() local
1559 struct arm_smmu_queue *q = &smmu->evtq.q; in arm_smmu_evtq_thread()
1569 ret = arm_smmu_handle_evt(smmu, evt); in arm_smmu_evtq_thread()
1573 dev_info(smmu->dev, "event 0x%02x received:\n", id); in arm_smmu_evtq_thread()
1575 dev_info(smmu->dev, "\t0x%016llx\n", in arm_smmu_evtq_thread()
1586 dev_err(smmu->dev, "EVTQ overflow detected -- events lost\n"); in arm_smmu_evtq_thread()
1594 static void arm_smmu_handle_ppr(struct arm_smmu_device *smmu, u64 *evt) in arm_smmu_handle_ppr() argument
1606 dev_info(smmu->dev, "unexpected PRI request received:\n"); in arm_smmu_handle_ppr()
1607 dev_info(smmu->dev, in arm_smmu_handle_ppr()
1628 arm_smmu_cmdq_issue_cmd(smmu, &cmd); in arm_smmu_handle_ppr()
1634 struct arm_smmu_device *smmu = dev; in arm_smmu_priq_thread() local
1635 struct arm_smmu_queue *q = &smmu->priq.q; in arm_smmu_priq_thread()
1641 arm_smmu_handle_ppr(smmu, evt); in arm_smmu_priq_thread()
1644 dev_err(smmu->dev, "PRIQ overflow detected -- requests lost\n"); in arm_smmu_priq_thread()
1652 static int arm_smmu_device_disable(struct arm_smmu_device *smmu);
1657 struct arm_smmu_device *smmu = dev; in arm_smmu_gerror_handler() local
1659 gerror = readl_relaxed(smmu->base + ARM_SMMU_GERROR); in arm_smmu_gerror_handler()
1660 gerrorn = readl_relaxed(smmu->base + ARM_SMMU_GERRORN); in arm_smmu_gerror_handler()
1666 dev_warn(smmu->dev, in arm_smmu_gerror_handler()
1671 dev_err(smmu->dev, "device has entered Service Failure Mode!\n"); in arm_smmu_gerror_handler()
1672 arm_smmu_device_disable(smmu); in arm_smmu_gerror_handler()
1676 dev_warn(smmu->dev, "GERROR MSI write aborted\n"); in arm_smmu_gerror_handler()
1679 dev_warn(smmu->dev, "PRIQ MSI write aborted\n"); in arm_smmu_gerror_handler()
1682 dev_warn(smmu->dev, "EVTQ MSI write aborted\n"); in arm_smmu_gerror_handler()
1685 dev_warn(smmu->dev, "CMDQ MSI write aborted\n"); in arm_smmu_gerror_handler()
1688 dev_err(smmu->dev, "PRIQ write aborted -- events may have been lost\n"); in arm_smmu_gerror_handler()
1691 dev_err(smmu->dev, "EVTQ write aborted -- events may have been lost\n"); in arm_smmu_gerror_handler()
1694 arm_smmu_cmdq_skip_err(smmu); in arm_smmu_gerror_handler()
1696 writel(gerror, smmu->base + ARM_SMMU_GERRORN); in arm_smmu_gerror_handler()
1702 struct arm_smmu_device *smmu = dev; in arm_smmu_combined_irq_thread() local
1705 if (smmu->features & ARM_SMMU_FEAT_PRI) in arm_smmu_combined_irq_thread()
1795 arm_smmu_cmdq_batch_add(master->smmu, &cmds, &cmd); in arm_smmu_atc_inv_master()
1798 return arm_smmu_cmdq_batch_submit(master->smmu, &cmds); in arm_smmu_atc_inv_master()
1810 if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_ATS)) in arm_smmu_atc_inv_domain()
1841 arm_smmu_cmdq_batch_add(smmu_domain->smmu, &cmds, &cmd); in arm_smmu_atc_inv_domain()
1846 return arm_smmu_cmdq_batch_submit(smmu_domain->smmu, &cmds); in arm_smmu_atc_inv_domain()
1853 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_tlb_inv_context() local
1859 * to the SMMU. We are relying on the dma_wmb() implicit during cmd in arm_smmu_tlb_inv_context()
1864 arm_smmu_tlb_inv_asid(smmu, smmu_domain->cd.asid); in arm_smmu_tlb_inv_context()
1868 arm_smmu_cmdq_issue_cmd_with_sync(smmu, &cmd); in arm_smmu_tlb_inv_context()
1878 struct arm_smmu_device *smmu = smmu_domain->smmu; in __arm_smmu_tlb_inv_range() local
1886 if (smmu->features & ARM_SMMU_FEAT_RANGE_INV) { in __arm_smmu_tlb_inv_range()
1912 if (smmu->features & ARM_SMMU_FEAT_RANGE_INV) { in __arm_smmu_tlb_inv_range()
1938 arm_smmu_cmdq_batch_add(smmu, &cmds, cmd); in __arm_smmu_tlb_inv_range()
1941 arm_smmu_cmdq_batch_submit(smmu, &cmds); in __arm_smmu_tlb_inv_range()
1955 cmd.opcode = smmu_domain->smmu->features & ARM_SMMU_FEAT_E2H ? in arm_smmu_tlb_inv_range_domain()
1976 .opcode = smmu_domain->smmu->features & ARM_SMMU_FEAT_E2H ? in arm_smmu_tlb_inv_range_asid()
2017 return master->smmu->features & ARM_SMMU_FEAT_COHERENCY; in arm_smmu_capable()
2058 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_domain_free() local
2071 ida_free(&smmu->vmid_map, cfg->vmid); in arm_smmu_domain_free()
2082 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_domain_finalise_s1() local
2091 XA_LIMIT(1, (1 << smmu->asid_bits) - 1), GFP_KERNEL); in arm_smmu_domain_finalise_s1()
2118 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_domain_finalise_s2() local
2123 vmid = ida_alloc_range(&smmu->vmid_map, 1, (1 << smmu->vmid_bits) - 1, in arm_smmu_domain_finalise_s2()
2151 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_domain_finalise() local
2159 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1)) in arm_smmu_domain_finalise()
2161 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2)) in arm_smmu_domain_finalise()
2166 ias = (smmu->features & ARM_SMMU_FEAT_VAX) ? 52 : 48; in arm_smmu_domain_finalise()
2168 oas = smmu->ias; in arm_smmu_domain_finalise()
2173 ias = smmu->ias; in arm_smmu_domain_finalise()
2174 oas = smmu->oas; in arm_smmu_domain_finalise()
2183 .pgsize_bitmap = smmu->pgsize_bitmap, in arm_smmu_domain_finalise()
2186 .coherent_walk = smmu->features & ARM_SMMU_FEAT_COHERENCY, in arm_smmu_domain_finalise()
2188 .iommu_dev = smmu->dev, in arm_smmu_domain_finalise()
2210 arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid) in arm_smmu_get_step_for_sid() argument
2212 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg; in arm_smmu_get_step_for_sid()
2214 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) { in arm_smmu_get_step_for_sid()
2231 struct arm_smmu_device *smmu = master->smmu; in arm_smmu_install_ste_for_dev() local
2236 arm_smmu_get_step_for_sid(smmu, sid); in arm_smmu_install_ste_for_dev()
2252 struct arm_smmu_device *smmu = master->smmu; in arm_smmu_ats_supported() local
2255 if (!(smmu->features & ARM_SMMU_FEAT_ATS)) in arm_smmu_ats_supported()
2268 struct arm_smmu_device *smmu = master->smmu; in arm_smmu_enable_ats() local
2276 stu = __ffs(smmu->pgsize_bitmap); in arm_smmu_enable_ats()
2295 * ATC invalidation via the SMMU. in arm_smmu_disable_ats()
2329 master->smmu->ssid_bits); in arm_smmu_enable_pasid()
2381 struct arm_smmu_device *smmu; in arm_smmu_attach_dev() local
2389 smmu = master->smmu; in arm_smmu_attach_dev()
2405 if (!smmu_domain->smmu) { in arm_smmu_attach_dev()
2406 smmu_domain->smmu = smmu; in arm_smmu_attach_dev()
2409 smmu_domain->smmu = NULL; in arm_smmu_attach_dev()
2410 } else if (smmu_domain->smmu != smmu) in arm_smmu_attach_dev()
2420 * The SMMU does not support enabling ATS with bypass. When the STE is in arm_smmu_attach_dev()
2497 if (smmu_domain->smmu) in arm_smmu_flush_iotlb_all()
2536 static bool arm_smmu_sid_in_range(struct arm_smmu_device *smmu, u32 sid) in arm_smmu_sid_in_range() argument
2538 unsigned long limit = smmu->strtab_cfg.num_l1_ents; in arm_smmu_sid_in_range()
2540 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) in arm_smmu_sid_in_range()
2546 static int arm_smmu_init_sid_strtab(struct arm_smmu_device *smmu, u32 sid) in arm_smmu_init_sid_strtab() argument
2548 /* Check the SIDs are in range of the SMMU and our stream table */ in arm_smmu_init_sid_strtab()
2549 if (!arm_smmu_sid_in_range(smmu, sid)) in arm_smmu_init_sid_strtab()
2553 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) in arm_smmu_init_sid_strtab()
2554 return arm_smmu_init_l2_strtab(smmu, sid); in arm_smmu_init_sid_strtab()
2559 static int arm_smmu_insert_master(struct arm_smmu_device *smmu, in arm_smmu_insert_master() argument
2574 mutex_lock(&smmu->streams_mutex); in arm_smmu_insert_master()
2582 ret = arm_smmu_init_sid_strtab(smmu, sid); in arm_smmu_insert_master()
2587 new_node = &(smmu->streams.rb_node); in arm_smmu_insert_master()
2608 rb_insert_color(&new_stream->node, &smmu->streams); in arm_smmu_insert_master()
2613 rb_erase(&master->streams[i].node, &smmu->streams); in arm_smmu_insert_master()
2616 mutex_unlock(&smmu->streams_mutex); in arm_smmu_insert_master()
2624 struct arm_smmu_device *smmu = master->smmu; in arm_smmu_remove_master() local
2627 if (!smmu || !master->streams) in arm_smmu_remove_master()
2630 mutex_lock(&smmu->streams_mutex); in arm_smmu_remove_master()
2632 rb_erase(&master->streams[i].node, &smmu->streams); in arm_smmu_remove_master()
2633 mutex_unlock(&smmu->streams_mutex); in arm_smmu_remove_master()
2643 struct arm_smmu_device *smmu; in arm_smmu_probe_device() local
2650 smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode); in arm_smmu_probe_device()
2651 if (!smmu) in arm_smmu_probe_device()
2659 master->smmu = smmu; in arm_smmu_probe_device()
2663 ret = arm_smmu_insert_master(smmu, master); in arm_smmu_probe_device()
2668 master->ssid_bits = min(smmu->ssid_bits, master->ssid_bits); in arm_smmu_probe_device()
2680 if (!(smmu->features & ARM_SMMU_FEAT_2_LVL_CDTAB)) in arm_smmu_probe_device()
2684 if ((smmu->features & ARM_SMMU_FEAT_STALLS && in arm_smmu_probe_device()
2686 smmu->features & ARM_SMMU_FEAT_STALL_FORCE) in arm_smmu_probe_device()
2689 return &smmu->iommu; in arm_smmu_probe_device()
2701 iopf_queue_remove_device(master->smmu->evtq.iopf, dev); in arm_smmu_release_device()
2733 if (smmu_domain->smmu) in arm_smmu_enable_nesting()
2874 static int arm_smmu_init_one_queue(struct arm_smmu_device *smmu, in arm_smmu_init_one_queue() argument
2885 q->base = dmam_alloc_coherent(smmu->dev, qsz, &q->base_dma, in arm_smmu_init_one_queue()
2894 dev_err(smmu->dev, in arm_smmu_init_one_queue()
2901 dev_info(smmu->dev, "allocated %u entries for %s\n", in arm_smmu_init_one_queue()
2917 static int arm_smmu_cmdq_init(struct arm_smmu_device *smmu) in arm_smmu_cmdq_init() argument
2919 struct arm_smmu_cmdq *cmdq = &smmu->cmdq; in arm_smmu_cmdq_init()
2925 cmdq->valid_map = (atomic_long_t *)devm_bitmap_zalloc(smmu->dev, nents, in arm_smmu_cmdq_init()
2933 static int arm_smmu_init_queues(struct arm_smmu_device *smmu) in arm_smmu_init_queues() argument
2938 ret = arm_smmu_init_one_queue(smmu, &smmu->cmdq.q, smmu->base, in arm_smmu_init_queues()
2944 ret = arm_smmu_cmdq_init(smmu); in arm_smmu_init_queues()
2949 ret = arm_smmu_init_one_queue(smmu, &smmu->evtq.q, smmu->page1, in arm_smmu_init_queues()
2955 if ((smmu->features & ARM_SMMU_FEAT_SVA) && in arm_smmu_init_queues()
2956 (smmu->features & ARM_SMMU_FEAT_STALLS)) { in arm_smmu_init_queues()
2957 smmu->evtq.iopf = iopf_queue_alloc(dev_name(smmu->dev)); in arm_smmu_init_queues()
2958 if (!smmu->evtq.iopf) in arm_smmu_init_queues()
2963 if (!(smmu->features & ARM_SMMU_FEAT_PRI)) in arm_smmu_init_queues()
2966 return arm_smmu_init_one_queue(smmu, &smmu->priq.q, smmu->page1, in arm_smmu_init_queues()
2971 static int arm_smmu_init_l1_strtab(struct arm_smmu_device *smmu) in arm_smmu_init_l1_strtab() argument
2974 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg; in arm_smmu_init_l1_strtab()
2975 void *strtab = smmu->strtab_cfg.strtab; in arm_smmu_init_l1_strtab()
2977 cfg->l1_desc = devm_kcalloc(smmu->dev, cfg->num_l1_ents, in arm_smmu_init_l1_strtab()
2990 static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu) in arm_smmu_init_strtab_2lvl() argument
2995 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg; in arm_smmu_init_strtab_2lvl()
2999 size = min(size, smmu->sid_bits - STRTAB_SPLIT); in arm_smmu_init_strtab_2lvl()
3003 if (size < smmu->sid_bits) in arm_smmu_init_strtab_2lvl()
3004 dev_warn(smmu->dev, in arm_smmu_init_strtab_2lvl()
3006 size, smmu->sid_bits); in arm_smmu_init_strtab_2lvl()
3009 strtab = dmam_alloc_coherent(smmu->dev, l1size, &cfg->strtab_dma, in arm_smmu_init_strtab_2lvl()
3012 dev_err(smmu->dev, in arm_smmu_init_strtab_2lvl()
3025 return arm_smmu_init_l1_strtab(smmu); in arm_smmu_init_strtab_2lvl()
3028 static int arm_smmu_init_strtab_linear(struct arm_smmu_device *smmu) in arm_smmu_init_strtab_linear() argument
3033 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg; in arm_smmu_init_strtab_linear()
3035 size = (1 << smmu->sid_bits) * (STRTAB_STE_DWORDS << 3); in arm_smmu_init_strtab_linear()
3036 strtab = dmam_alloc_coherent(smmu->dev, size, &cfg->strtab_dma, in arm_smmu_init_strtab_linear()
3039 dev_err(smmu->dev, in arm_smmu_init_strtab_linear()
3045 cfg->num_l1_ents = 1 << smmu->sid_bits; in arm_smmu_init_strtab_linear()
3049 reg |= FIELD_PREP(STRTAB_BASE_CFG_LOG2SIZE, smmu->sid_bits); in arm_smmu_init_strtab_linear()
3056 static int arm_smmu_init_strtab(struct arm_smmu_device *smmu) in arm_smmu_init_strtab() argument
3061 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) in arm_smmu_init_strtab()
3062 ret = arm_smmu_init_strtab_2lvl(smmu); in arm_smmu_init_strtab()
3064 ret = arm_smmu_init_strtab_linear(smmu); in arm_smmu_init_strtab()
3070 reg = smmu->strtab_cfg.strtab_dma & STRTAB_BASE_ADDR_MASK; in arm_smmu_init_strtab()
3072 smmu->strtab_cfg.strtab_base = reg; in arm_smmu_init_strtab()
3074 ida_init(&smmu->vmid_map); in arm_smmu_init_strtab()
3079 static int arm_smmu_init_structures(struct arm_smmu_device *smmu) in arm_smmu_init_structures() argument
3083 mutex_init(&smmu->streams_mutex); in arm_smmu_init_structures()
3084 smmu->streams = RB_ROOT; in arm_smmu_init_structures()
3086 ret = arm_smmu_init_queues(smmu); in arm_smmu_init_structures()
3090 return arm_smmu_init_strtab(smmu); in arm_smmu_init_structures()
3093 static int arm_smmu_write_reg_sync(struct arm_smmu_device *smmu, u32 val, in arm_smmu_write_reg_sync() argument
3098 writel_relaxed(val, smmu->base + reg_off); in arm_smmu_write_reg_sync()
3099 return readl_relaxed_poll_timeout(smmu->base + ack_off, reg, reg == val, in arm_smmu_write_reg_sync()
3104 static int arm_smmu_update_gbpa(struct arm_smmu_device *smmu, u32 set, u32 clr) in arm_smmu_update_gbpa() argument
3107 u32 reg, __iomem *gbpa = smmu->base + ARM_SMMU_GBPA; in arm_smmu_update_gbpa()
3121 dev_err(smmu->dev, "GBPA not responding to update\n"); in arm_smmu_update_gbpa()
3135 struct arm_smmu_device *smmu = dev_get_drvdata(dev); in arm_smmu_write_msi_msg() local
3141 writeq_relaxed(doorbell, smmu->base + cfg[0]); in arm_smmu_write_msi_msg()
3142 writel_relaxed(msg->data, smmu->base + cfg[1]); in arm_smmu_write_msi_msg()
3143 writel_relaxed(ARM_SMMU_MEMATTR_DEVICE_nGnRE, smmu->base + cfg[2]); in arm_smmu_write_msi_msg()
3146 static void arm_smmu_setup_msis(struct arm_smmu_device *smmu) in arm_smmu_setup_msis() argument
3149 struct device *dev = smmu->dev; in arm_smmu_setup_msis()
3152 writeq_relaxed(0, smmu->base + ARM_SMMU_GERROR_IRQ_CFG0); in arm_smmu_setup_msis()
3153 writeq_relaxed(0, smmu->base + ARM_SMMU_EVTQ_IRQ_CFG0); in arm_smmu_setup_msis()
3155 if (smmu->features & ARM_SMMU_FEAT_PRI) in arm_smmu_setup_msis()
3156 writeq_relaxed(0, smmu->base + ARM_SMMU_PRIQ_IRQ_CFG0); in arm_smmu_setup_msis()
3160 if (!(smmu->features & ARM_SMMU_FEAT_MSI)) in arm_smmu_setup_msis()
3164 dev_info(smmu->dev, "msi_domain absent - falling back to wired irqs\n"); in arm_smmu_setup_msis()
3175 smmu->evtq.q.irq = msi_get_virq(dev, EVTQ_MSI_INDEX); in arm_smmu_setup_msis()
3176 smmu->gerr_irq = msi_get_virq(dev, GERROR_MSI_INDEX); in arm_smmu_setup_msis()
3177 smmu->priq.q.irq = msi_get_virq(dev, PRIQ_MSI_INDEX); in arm_smmu_setup_msis()
3183 static void arm_smmu_setup_unique_irqs(struct arm_smmu_device *smmu) in arm_smmu_setup_unique_irqs() argument
3187 arm_smmu_setup_msis(smmu); in arm_smmu_setup_unique_irqs()
3190 irq = smmu->evtq.q.irq; in arm_smmu_setup_unique_irqs()
3192 ret = devm_request_threaded_irq(smmu->dev, irq, NULL, in arm_smmu_setup_unique_irqs()
3195 "arm-smmu-v3-evtq", smmu); in arm_smmu_setup_unique_irqs()
3197 dev_warn(smmu->dev, "failed to enable evtq irq\n"); in arm_smmu_setup_unique_irqs()
3199 dev_warn(smmu->dev, "no evtq irq - events will not be reported!\n"); in arm_smmu_setup_unique_irqs()
3202 irq = smmu->gerr_irq; in arm_smmu_setup_unique_irqs()
3204 ret = devm_request_irq(smmu->dev, irq, arm_smmu_gerror_handler, in arm_smmu_setup_unique_irqs()
3205 0, "arm-smmu-v3-gerror", smmu); in arm_smmu_setup_unique_irqs()
3207 dev_warn(smmu->dev, "failed to enable gerror irq\n"); in arm_smmu_setup_unique_irqs()
3209 dev_warn(smmu->dev, "no gerr irq - errors will not be reported!\n"); in arm_smmu_setup_unique_irqs()
3212 if (smmu->features & ARM_SMMU_FEAT_PRI) { in arm_smmu_setup_unique_irqs()
3213 irq = smmu->priq.q.irq; in arm_smmu_setup_unique_irqs()
3215 ret = devm_request_threaded_irq(smmu->dev, irq, NULL, in arm_smmu_setup_unique_irqs()
3218 "arm-smmu-v3-priq", in arm_smmu_setup_unique_irqs()
3219 smmu); in arm_smmu_setup_unique_irqs()
3221 dev_warn(smmu->dev, in arm_smmu_setup_unique_irqs()
3224 dev_warn(smmu->dev, "no priq irq - PRI will be broken\n"); in arm_smmu_setup_unique_irqs()
3229 static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu) in arm_smmu_setup_irqs() argument
3235 ret = arm_smmu_write_reg_sync(smmu, 0, ARM_SMMU_IRQ_CTRL, in arm_smmu_setup_irqs()
3238 dev_err(smmu->dev, "failed to disable irqs\n"); in arm_smmu_setup_irqs()
3242 irq = smmu->combined_irq; in arm_smmu_setup_irqs()
3248 ret = devm_request_threaded_irq(smmu->dev, irq, in arm_smmu_setup_irqs()
3252 "arm-smmu-v3-combined-irq", smmu); in arm_smmu_setup_irqs()
3254 dev_warn(smmu->dev, "failed to enable combined irq\n"); in arm_smmu_setup_irqs()
3256 arm_smmu_setup_unique_irqs(smmu); in arm_smmu_setup_irqs()
3258 if (smmu->features & ARM_SMMU_FEAT_PRI) in arm_smmu_setup_irqs()
3261 /* Enable interrupt generation on the SMMU */ in arm_smmu_setup_irqs()
3262 ret = arm_smmu_write_reg_sync(smmu, irqen_flags, in arm_smmu_setup_irqs()
3265 dev_warn(smmu->dev, "failed to enable irqs\n"); in arm_smmu_setup_irqs()
3270 static int arm_smmu_device_disable(struct arm_smmu_device *smmu) in arm_smmu_device_disable() argument
3274 ret = arm_smmu_write_reg_sync(smmu, 0, ARM_SMMU_CR0, ARM_SMMU_CR0ACK); in arm_smmu_device_disable()
3276 dev_err(smmu->dev, "failed to clear cr0\n"); in arm_smmu_device_disable()
3281 static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass) in arm_smmu_device_reset() argument
3287 /* Clear CR0 and sync (disables SMMU and queue processing) */ in arm_smmu_device_reset()
3288 reg = readl_relaxed(smmu->base + ARM_SMMU_CR0); in arm_smmu_device_reset()
3290 dev_warn(smmu->dev, "SMMU currently enabled! Resetting...\n"); in arm_smmu_device_reset()
3292 arm_smmu_update_gbpa(smmu, GBPA_ABORT, 0); in arm_smmu_device_reset()
3295 ret = arm_smmu_device_disable(smmu); in arm_smmu_device_reset()
3306 writel_relaxed(reg, smmu->base + ARM_SMMU_CR1); in arm_smmu_device_reset()
3311 if (smmu->features & ARM_SMMU_FEAT_E2H) in arm_smmu_device_reset()
3314 writel_relaxed(reg, smmu->base + ARM_SMMU_CR2); in arm_smmu_device_reset()
3317 writeq_relaxed(smmu->strtab_cfg.strtab_base, in arm_smmu_device_reset()
3318 smmu->base + ARM_SMMU_STRTAB_BASE); in arm_smmu_device_reset()
3319 writel_relaxed(smmu->strtab_cfg.strtab_base_cfg, in arm_smmu_device_reset()
3320 smmu->base + ARM_SMMU_STRTAB_BASE_CFG); in arm_smmu_device_reset()
3323 writeq_relaxed(smmu->cmdq.q.q_base, smmu->base + ARM_SMMU_CMDQ_BASE); in arm_smmu_device_reset()
3324 writel_relaxed(smmu->cmdq.q.llq.prod, smmu->base + ARM_SMMU_CMDQ_PROD); in arm_smmu_device_reset()
3325 writel_relaxed(smmu->cmdq.q.llq.cons, smmu->base + ARM_SMMU_CMDQ_CONS); in arm_smmu_device_reset()
3328 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0, in arm_smmu_device_reset()
3331 dev_err(smmu->dev, "failed to enable command queue\n"); in arm_smmu_device_reset()
3337 arm_smmu_cmdq_issue_cmd_with_sync(smmu, &cmd); in arm_smmu_device_reset()
3340 if (smmu->features & ARM_SMMU_FEAT_HYP) { in arm_smmu_device_reset()
3342 arm_smmu_cmdq_issue_cmd_with_sync(smmu, &cmd); in arm_smmu_device_reset()
3346 arm_smmu_cmdq_issue_cmd_with_sync(smmu, &cmd); in arm_smmu_device_reset()
3349 writeq_relaxed(smmu->evtq.q.q_base, smmu->base + ARM_SMMU_EVTQ_BASE); in arm_smmu_device_reset()
3350 writel_relaxed(smmu->evtq.q.llq.prod, smmu->page1 + ARM_SMMU_EVTQ_PROD); in arm_smmu_device_reset()
3351 writel_relaxed(smmu->evtq.q.llq.cons, smmu->page1 + ARM_SMMU_EVTQ_CONS); in arm_smmu_device_reset()
3354 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0, in arm_smmu_device_reset()
3357 dev_err(smmu->dev, "failed to enable event queue\n"); in arm_smmu_device_reset()
3362 if (smmu->features & ARM_SMMU_FEAT_PRI) { in arm_smmu_device_reset()
3363 writeq_relaxed(smmu->priq.q.q_base, in arm_smmu_device_reset()
3364 smmu->base + ARM_SMMU_PRIQ_BASE); in arm_smmu_device_reset()
3365 writel_relaxed(smmu->priq.q.llq.prod, in arm_smmu_device_reset()
3366 smmu->page1 + ARM_SMMU_PRIQ_PROD); in arm_smmu_device_reset()
3367 writel_relaxed(smmu->priq.q.llq.cons, in arm_smmu_device_reset()
3368 smmu->page1 + ARM_SMMU_PRIQ_CONS); in arm_smmu_device_reset()
3371 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0, in arm_smmu_device_reset()
3374 dev_err(smmu->dev, "failed to enable PRI queue\n"); in arm_smmu_device_reset()
3379 if (smmu->features & ARM_SMMU_FEAT_ATS) { in arm_smmu_device_reset()
3381 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0, in arm_smmu_device_reset()
3384 dev_err(smmu->dev, "failed to enable ATS check\n"); in arm_smmu_device_reset()
3389 ret = arm_smmu_setup_irqs(smmu); in arm_smmu_device_reset()
3391 dev_err(smmu->dev, "failed to setup irqs\n"); in arm_smmu_device_reset()
3398 /* Enable the SMMU interface, or ensure bypass */ in arm_smmu_device_reset()
3402 ret = arm_smmu_update_gbpa(smmu, 0, GBPA_ABORT); in arm_smmu_device_reset()
3406 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0, in arm_smmu_device_reset()
3409 dev_err(smmu->dev, "failed to enable SMMU interface\n"); in arm_smmu_device_reset()
3420 static void arm_smmu_device_iidr_probe(struct arm_smmu_device *smmu) in arm_smmu_device_iidr_probe() argument
3425 reg = readl_relaxed(smmu->base + ARM_SMMU_IIDR); in arm_smmu_device_iidr_probe()
3437 smmu->features &= ~ARM_SMMU_FEAT_SEV; in arm_smmu_device_iidr_probe()
3440 smmu->features &= ~ARM_SMMU_FEAT_NESTING; in arm_smmu_device_iidr_probe()
3444 smmu->features &= ~ARM_SMMU_FEAT_BTM; in arm_smmu_device_iidr_probe()
3445 smmu->options |= ARM_SMMU_OPT_CMDQ_FORCE_SYNC; in arm_smmu_device_iidr_probe()
3447 smmu->features &= ~ARM_SMMU_FEAT_NESTING; in arm_smmu_device_iidr_probe()
3454 static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu) in arm_smmu_device_hw_probe() argument
3457 bool coherent = smmu->features & ARM_SMMU_FEAT_COHERENCY; in arm_smmu_device_hw_probe()
3460 reg = readl_relaxed(smmu->base + ARM_SMMU_IDR0); in arm_smmu_device_hw_probe()
3464 smmu->features |= ARM_SMMU_FEAT_2_LVL_STRTAB; in arm_smmu_device_hw_probe()
3467 smmu->features |= ARM_SMMU_FEAT_2_LVL_CDTAB; in arm_smmu_device_hw_probe()
3476 smmu->features |= ARM_SMMU_FEAT_TT_LE | ARM_SMMU_FEAT_TT_BE; in arm_smmu_device_hw_probe()
3480 smmu->features |= ARM_SMMU_FEAT_TT_BE; in arm_smmu_device_hw_probe()
3484 smmu->features |= ARM_SMMU_FEAT_TT_LE; in arm_smmu_device_hw_probe()
3488 dev_err(smmu->dev, "unknown/unsupported TT endianness!\n"); in arm_smmu_device_hw_probe()
3494 smmu->features |= ARM_SMMU_FEAT_PRI; in arm_smmu_device_hw_probe()
3497 smmu->features |= ARM_SMMU_FEAT_ATS; in arm_smmu_device_hw_probe()
3500 smmu->features |= ARM_SMMU_FEAT_SEV; in arm_smmu_device_hw_probe()
3503 smmu->features |= ARM_SMMU_FEAT_MSI; in arm_smmu_device_hw_probe()
3505 smmu->options |= ARM_SMMU_OPT_MSIPOLL; in arm_smmu_device_hw_probe()
3509 smmu->features |= ARM_SMMU_FEAT_HYP; in arm_smmu_device_hw_probe()
3511 smmu->features |= ARM_SMMU_FEAT_E2H; in arm_smmu_device_hw_probe()
3519 dev_warn(smmu->dev, "IDR0.COHACC overridden by FW configuration (%s)\n", in arm_smmu_device_hw_probe()
3524 smmu->features |= ARM_SMMU_FEAT_STALL_FORCE; in arm_smmu_device_hw_probe()
3527 smmu->features |= ARM_SMMU_FEAT_STALLS; in arm_smmu_device_hw_probe()
3531 smmu->features |= ARM_SMMU_FEAT_TRANS_S1; in arm_smmu_device_hw_probe()
3534 smmu->features |= ARM_SMMU_FEAT_TRANS_S2; in arm_smmu_device_hw_probe()
3537 dev_err(smmu->dev, "no translation support!\n"); in arm_smmu_device_hw_probe()
3544 smmu->ias = 40; in arm_smmu_device_hw_probe()
3549 dev_err(smmu->dev, "AArch64 table format not supported!\n"); in arm_smmu_device_hw_probe()
3554 smmu->asid_bits = reg & IDR0_ASID16 ? 16 : 8; in arm_smmu_device_hw_probe()
3555 smmu->vmid_bits = reg & IDR0_VMID16 ? 16 : 8; in arm_smmu_device_hw_probe()
3558 reg = readl_relaxed(smmu->base + ARM_SMMU_IDR1); in arm_smmu_device_hw_probe()
3560 dev_err(smmu->dev, "embedded implementation not supported\n"); in arm_smmu_device_hw_probe()
3565 smmu->cmdq.q.llq.max_n_shift = min_t(u32, CMDQ_MAX_SZ_SHIFT, in arm_smmu_device_hw_probe()
3567 if (smmu->cmdq.q.llq.max_n_shift <= ilog2(CMDQ_BATCH_ENTRIES)) { in arm_smmu_device_hw_probe()
3574 dev_err(smmu->dev, "command queue size <= %d entries not supported\n", in arm_smmu_device_hw_probe()
3579 smmu->evtq.q.llq.max_n_shift = min_t(u32, EVTQ_MAX_SZ_SHIFT, in arm_smmu_device_hw_probe()
3581 smmu->priq.q.llq.max_n_shift = min_t(u32, PRIQ_MAX_SZ_SHIFT, in arm_smmu_device_hw_probe()
3585 smmu->ssid_bits = FIELD_GET(IDR1_SSIDSIZE, reg); in arm_smmu_device_hw_probe()
3586 smmu->sid_bits = FIELD_GET(IDR1_SIDSIZE, reg); in arm_smmu_device_hw_probe()
3587 smmu->iommu.max_pasids = 1UL << smmu->ssid_bits; in arm_smmu_device_hw_probe()
3590 * If the SMMU supports fewer bits than would fill a single L2 stream in arm_smmu_device_hw_probe()
3593 if (smmu->sid_bits <= STRTAB_SPLIT) in arm_smmu_device_hw_probe()
3594 smmu->features &= ~ARM_SMMU_FEAT_2_LVL_STRTAB; in arm_smmu_device_hw_probe()
3597 reg = readl_relaxed(smmu->base + ARM_SMMU_IDR3); in arm_smmu_device_hw_probe()
3599 smmu->features |= ARM_SMMU_FEAT_RANGE_INV; in arm_smmu_device_hw_probe()
3602 reg = readl_relaxed(smmu->base + ARM_SMMU_IDR5); in arm_smmu_device_hw_probe()
3605 smmu->evtq.max_stalls = FIELD_GET(IDR5_STALL_MAX, reg); in arm_smmu_device_hw_probe()
3609 smmu->pgsize_bitmap |= SZ_64K | SZ_512M; in arm_smmu_device_hw_probe()
3611 smmu->pgsize_bitmap |= SZ_16K | SZ_32M; in arm_smmu_device_hw_probe()
3613 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G; in arm_smmu_device_hw_probe()
3617 smmu->features |= ARM_SMMU_FEAT_VAX; in arm_smmu_device_hw_probe()
3622 smmu->oas = 32; in arm_smmu_device_hw_probe()
3625 smmu->oas = 36; in arm_smmu_device_hw_probe()
3628 smmu->oas = 40; in arm_smmu_device_hw_probe()
3631 smmu->oas = 42; in arm_smmu_device_hw_probe()
3634 smmu->oas = 44; in arm_smmu_device_hw_probe()
3637 smmu->oas = 52; in arm_smmu_device_hw_probe()
3638 smmu->pgsize_bitmap |= 1ULL << 42; /* 4TB */ in arm_smmu_device_hw_probe()
3641 dev_info(smmu->dev, in arm_smmu_device_hw_probe()
3645 smmu->oas = 48; in arm_smmu_device_hw_probe()
3649 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap; in arm_smmu_device_hw_probe()
3651 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap; in arm_smmu_device_hw_probe()
3654 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(smmu->oas))) in arm_smmu_device_hw_probe()
3655 dev_warn(smmu->dev, in arm_smmu_device_hw_probe()
3658 smmu->ias = max(smmu->ias, smmu->oas); in arm_smmu_device_hw_probe()
3660 if ((smmu->features & ARM_SMMU_FEAT_TRANS_S1) && in arm_smmu_device_hw_probe()
3661 (smmu->features & ARM_SMMU_FEAT_TRANS_S2)) in arm_smmu_device_hw_probe()
3662 smmu->features |= ARM_SMMU_FEAT_NESTING; in arm_smmu_device_hw_probe()
3664 arm_smmu_device_iidr_probe(smmu); in arm_smmu_device_hw_probe()
3666 if (arm_smmu_sva_supported(smmu)) in arm_smmu_device_hw_probe()
3667 smmu->features |= ARM_SMMU_FEAT_SVA; in arm_smmu_device_hw_probe()
3669 dev_info(smmu->dev, "ias %lu-bit, oas %lu-bit (features 0x%08x)\n", in arm_smmu_device_hw_probe()
3670 smmu->ias, smmu->oas, smmu->features); in arm_smmu_device_hw_probe()
3675 static void acpi_smmu_get_options(u32 model, struct arm_smmu_device *smmu) in acpi_smmu_get_options() argument
3679 smmu->options |= ARM_SMMU_OPT_PAGE0_REGS_ONLY; in acpi_smmu_get_options()
3682 smmu->options |= ARM_SMMU_OPT_SKIP_PREFETCH; in acpi_smmu_get_options()
3686 dev_notice(smmu->dev, "option mask 0x%x\n", smmu->options); in acpi_smmu_get_options()
3690 struct arm_smmu_device *smmu) in arm_smmu_device_acpi_probe() argument
3693 struct device *dev = smmu->dev; in arm_smmu_device_acpi_probe()
3701 acpi_smmu_get_options(iort_smmu->model, smmu); in arm_smmu_device_acpi_probe()
3704 smmu->features |= ARM_SMMU_FEAT_COHERENCY; in arm_smmu_device_acpi_probe()
3710 struct arm_smmu_device *smmu) in arm_smmu_device_acpi_probe() argument
3717 struct arm_smmu_device *smmu) in arm_smmu_device_dt_probe() argument
3730 parse_driver_options(smmu); in arm_smmu_device_dt_probe()
3733 smmu->features |= ARM_SMMU_FEAT_COHERENCY; in arm_smmu_device_dt_probe()
3738 static unsigned long arm_smmu_resource_size(struct arm_smmu_device *smmu) in arm_smmu_resource_size() argument
3740 if (smmu->options & ARM_SMMU_OPT_PAGE0_REGS_ONLY) in arm_smmu_resource_size()
3754 static void arm_smmu_rmr_install_bypass_ste(struct arm_smmu_device *smmu) in arm_smmu_rmr_install_bypass_ste() argument
3760 iort_get_rmr_sids(dev_fwnode(smmu->dev), &rmr_list); in arm_smmu_rmr_install_bypass_ste()
3769 ret = arm_smmu_init_sid_strtab(smmu, rmr->sids[i]); in arm_smmu_rmr_install_bypass_ste()
3771 dev_err(smmu->dev, "RMR SID(0x%x) bypass failed\n", in arm_smmu_rmr_install_bypass_ste()
3776 step = arm_smmu_get_step_for_sid(smmu, rmr->sids[i]); in arm_smmu_rmr_install_bypass_ste()
3781 iort_put_rmr_sids(dev_fwnode(smmu->dev), &rmr_list); in arm_smmu_rmr_install_bypass_ste()
3789 struct arm_smmu_device *smmu; in arm_smmu_device_probe() local
3793 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL); in arm_smmu_device_probe()
3794 if (!smmu) in arm_smmu_device_probe()
3796 smmu->dev = dev; in arm_smmu_device_probe()
3799 ret = arm_smmu_device_dt_probe(pdev, smmu); in arm_smmu_device_probe()
3801 ret = arm_smmu_device_acpi_probe(pdev, smmu); in arm_smmu_device_probe()
3813 if (resource_size(res) < arm_smmu_resource_size(smmu)) { in arm_smmu_device_probe()
3823 smmu->base = arm_smmu_ioremap(dev, ioaddr, ARM_SMMU_REG_SZ); in arm_smmu_device_probe()
3824 if (IS_ERR(smmu->base)) in arm_smmu_device_probe()
3825 return PTR_ERR(smmu->base); in arm_smmu_device_probe()
3827 if (arm_smmu_resource_size(smmu) > SZ_64K) { in arm_smmu_device_probe()
3828 smmu->page1 = arm_smmu_ioremap(dev, ioaddr + SZ_64K, in arm_smmu_device_probe()
3830 if (IS_ERR(smmu->page1)) in arm_smmu_device_probe()
3831 return PTR_ERR(smmu->page1); in arm_smmu_device_probe()
3833 smmu->page1 = smmu->base; in arm_smmu_device_probe()
3840 smmu->combined_irq = irq; in arm_smmu_device_probe()
3844 smmu->evtq.q.irq = irq; in arm_smmu_device_probe()
3848 smmu->priq.q.irq = irq; in arm_smmu_device_probe()
3852 smmu->gerr_irq = irq; in arm_smmu_device_probe()
3855 ret = arm_smmu_device_hw_probe(smmu); in arm_smmu_device_probe()
3860 ret = arm_smmu_init_structures(smmu); in arm_smmu_device_probe()
3865 platform_set_drvdata(pdev, smmu); in arm_smmu_device_probe()
3868 arm_smmu_rmr_install_bypass_ste(smmu); in arm_smmu_device_probe()
3871 ret = arm_smmu_device_reset(smmu, bypass); in arm_smmu_device_probe()
3876 ret = iommu_device_sysfs_add(&smmu->iommu, dev, NULL, in arm_smmu_device_probe()
3881 ret = iommu_device_register(&smmu->iommu, &arm_smmu_ops, dev); in arm_smmu_device_probe()
3884 iommu_device_sysfs_remove(&smmu->iommu); in arm_smmu_device_probe()
3893 struct arm_smmu_device *smmu = platform_get_drvdata(pdev); in arm_smmu_device_remove() local
3895 iommu_device_unregister(&smmu->iommu); in arm_smmu_device_remove()
3896 iommu_device_sysfs_remove(&smmu->iommu); in arm_smmu_device_remove()
3897 arm_smmu_device_disable(smmu); in arm_smmu_device_remove()
3898 iopf_queue_free(smmu->evtq.iopf); in arm_smmu_device_remove()
3899 ida_destroy(&smmu->vmid_map); in arm_smmu_device_remove()
3904 struct arm_smmu_device *smmu = platform_get_drvdata(pdev); in arm_smmu_device_shutdown() local
3906 arm_smmu_device_disable(smmu); in arm_smmu_device_shutdown()
3910 { .compatible = "arm,smmu-v3", },
3923 .name = "arm-smmu-v3",
3936 MODULE_ALIAS("platform:arm-smmu-v3");