Lines Matching +full:iommu +full:- +full:secure +full:- +full:id
1 // SPDX-License-Identifier: GPL-2.0-only
3 * IOMMU API for ARM architected SMMU implementations.
10 * - SMMUv1 and v2 implementations
11 * - Stream-matching and stream-indexing
12 * - v7/v8 long-descriptor format
13 * - Non-secure access to the SMMU
14 * - Context fault reporting
15 * - Extended Stream ID (16 bit)
18 #define pr_fmt(fmt) "arm-smmu: " fmt
24 #include <linux/dma-mapping.h>
40 #include "arm-smmu.h"
41 #include "../../dma-iommu.h"
50 #define QCOM_DUMMY_VAL -1
63 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
73 if (pm_runtime_enabled(smmu->dev))
74 return pm_runtime_resume_and_get(smmu->dev);
81 if (pm_runtime_enabled(smmu->dev))
82 pm_runtime_put_autosuspend(smmu->dev);
95 * to 5-10sec worth of reprogramming the context bank, while
98 pm_runtime_set_autosuspend_delay(smmu->dev, 20);
99 pm_runtime_use_autosuspend(smmu->dev);
114 struct pci_bus *bus = to_pci_dev(dev)->bus;
117 bus = bus->parent;
118 return of_node_get(bus->bridge->parent->of_node);
121 return of_node_get(dev->of_node);
133 struct device_node *np = it->node;
136 of_for_each_phandle(it, err, dev->of_node, "mmu-masters",
137 "#stream-id-cells", -1)
138 if (it->node == np) {
142 it->node = np;
143 return err == -ENOENT ? 0 : err;
158 if (!np || !of_property_present(np, "#stream-id-cells")) {
160 return -ENODEV;
169 return -ENODEV;
174 /* "mmu-masters" assumes Stream ID == Requester ID */
181 err = iommu_fwspec_init(dev, &smmu_dev->of_node->fwnode,
188 return -ENOMEM;
200 return -ENODEV;
216 if (smmu->impl && unlikely(smmu->impl->tlb_sync))
217 return smmu->impl->tlb_sync(smmu, page, sync, status);
221 for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) {
229 dev_err_ratelimited(smmu->dev,
230 "TLB sync timed out -- SMMU may be deadlocked\n");
237 spin_lock_irqsave(&smmu->global_sync_lock, flags);
240 spin_unlock_irqrestore(&smmu->global_sync_lock, flags);
245 struct arm_smmu_device *smmu = smmu_domain->smmu;
248 spin_lock_irqsave(&smmu_domain->cb_lock, flags);
249 __arm_smmu_tlb_sync(smmu, ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx),
251 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
262 arm_smmu_cb_write(smmu_domain->smmu, smmu_domain->cfg.cbndx,
263 ARM_SMMU_CB_S1_TLBIASID, smmu_domain->cfg.asid);
270 struct arm_smmu_device *smmu = smmu_domain->smmu;
274 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIVMID, smmu_domain->cfg.vmid);
282 struct arm_smmu_device *smmu = smmu_domain->smmu;
283 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
284 int idx = cfg->cbndx;
286 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
289 if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
291 iova |= cfg->asid;
295 } while (size -= granule);
298 iova |= (u64)cfg->asid << 48;
302 } while (size -= granule);
310 struct arm_smmu_device *smmu = smmu_domain->smmu;
311 int idx = smmu_domain->cfg.cbndx;
313 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
318 if (smmu_domain->cfg.fmt == ARM_SMMU_CTX_FMT_AARCH64)
323 } while (size -= granule);
330 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
332 if (cfg->flush_walk_prefer_tlbiasid) {
371 * On MMU-401 at least, the cost of firing off multiple TLBIVMIDs appears
374 * no-op and call arm_smmu_tlb_inv_context_s2() from .iotlb_sync as you might
382 struct arm_smmu_device *smmu = smmu_domain->smmu;
384 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
387 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIVMID, smmu_domain->cfg.vmid);
413 struct arm_smmu_device *smmu = smmu_domain->smmu;
414 int idx = smmu_domain->cfg.cbndx;
425 ret = report_iommu_fault(&smmu_domain->domain, NULL, iova,
428 if (ret == -ENOSYS)
429 dev_err_ratelimited(smmu->dev,
455 dev_err(smmu->dev,
456 "Blocked unknown Stream ID 0x%hx; boot with \"arm-smmu.disable_bypass=0\" to allow, but this may have security implications\n",
459 dev_err(smmu->dev,
461 dev_err(smmu->dev,
473 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
474 struct arm_smmu_cb *cb = &smmu_domain->smmu->cbs[cfg->cbndx];
475 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
477 cb->cfg = cfg;
481 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
482 cb->tcr[0] = pgtbl_cfg->arm_v7s_cfg.tcr;
484 cb->tcr[0] = arm_smmu_lpae_tcr(pgtbl_cfg);
485 cb->tcr[1] = arm_smmu_lpae_tcr2(pgtbl_cfg);
486 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
487 cb->tcr[1] |= ARM_SMMU_TCR2_AS;
489 cb->tcr[0] |= ARM_SMMU_TCR_EAE;
492 cb->tcr[0] = arm_smmu_lpae_vtcr(pgtbl_cfg);
497 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
498 cb->ttbr[0] = pgtbl_cfg->arm_v7s_cfg.ttbr;
499 cb->ttbr[1] = 0;
501 cb->ttbr[0] = FIELD_PREP(ARM_SMMU_TTBRn_ASID,
502 cfg->asid);
503 cb->ttbr[1] = FIELD_PREP(ARM_SMMU_TTBRn_ASID,
504 cfg->asid);
506 if (pgtbl_cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1)
507 cb->ttbr[1] |= pgtbl_cfg->arm_lpae_s1_cfg.ttbr;
509 cb->ttbr[0] |= pgtbl_cfg->arm_lpae_s1_cfg.ttbr;
512 cb->ttbr[0] = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
515 /* MAIRs (stage-1 only) */
517 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
518 cb->mair[0] = pgtbl_cfg->arm_v7s_cfg.prrr;
519 cb->mair[1] = pgtbl_cfg->arm_v7s_cfg.nmrr;
521 cb->mair[0] = pgtbl_cfg->arm_lpae_s1_cfg.mair;
522 cb->mair[1] = pgtbl_cfg->arm_lpae_s1_cfg.mair >> 32;
531 struct arm_smmu_cb *cb = &smmu->cbs[idx];
532 struct arm_smmu_cfg *cfg = cb->cfg;
540 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
543 if (smmu->version > ARM_SMMU_V1) {
544 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
548 /* 16-bit VMIDs live in CBA2R */
549 if (smmu->features & ARM_SMMU_FEAT_VMID16)
550 reg |= FIELD_PREP(ARM_SMMU_CBA2R_VMID16, cfg->vmid);
556 reg = FIELD_PREP(ARM_SMMU_CBAR_TYPE, cfg->cbar);
557 if (smmu->version < ARM_SMMU_V2)
558 reg |= FIELD_PREP(ARM_SMMU_CBAR_IRPTNDX, cfg->irptndx);
569 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
570 /* 8-bit VMIDs live in CBAR */
571 reg |= FIELD_PREP(ARM_SMMU_CBAR_VMID, cfg->vmid);
580 if (stage1 && smmu->version > ARM_SMMU_V1)
581 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TCR2, cb->tcr[1]);
582 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TCR, cb->tcr[0]);
585 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
586 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_CONTEXTIDR, cfg->asid);
587 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TTBR0, cb->ttbr[0]);
588 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TTBR1, cb->ttbr[1]);
590 arm_smmu_cb_writeq(smmu, idx, ARM_SMMU_CB_TTBR0, cb->ttbr[0]);
593 cb->ttbr[1]);
596 /* MAIRs (stage-1 only) */
598 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_S1_MAIR0, cb->mair[0]);
599 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_S1_MAIR1, cb->mair[1]);
610 if (smmu->impl && smmu->impl->write_sctlr)
611 smmu->impl->write_sctlr(smmu, idx, reg);
620 if (smmu->impl && smmu->impl->alloc_context_bank)
621 return smmu->impl->alloc_context_bank(smmu_domain, smmu, dev, start);
623 return __arm_smmu_alloc_bitmap(smmu->context_map, start, smmu->num_context_banks);
635 struct iommu_domain *domain = &smmu_domain->domain;
636 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
639 mutex_lock(&smmu_domain->init_mutex);
640 if (smmu_domain->smmu)
659 * Note that you can't actually request stage-2 mappings.
661 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
662 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
663 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
664 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
669 * the decision into the io-pgtable code where it arguably belongs,
674 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
675 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
678 (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S) &&
679 (smmu_domain->stage == ARM_SMMU_DOMAIN_S1))
680 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_S;
681 if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
682 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
685 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
687 if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
688 ret = -EINVAL;
692 switch (smmu_domain->stage) {
694 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
695 start = smmu->num_s2_context_banks;
696 ias = smmu->va_size;
697 oas = smmu->ipa_size;
698 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
700 } else if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_L) {
709 smmu_domain->flush_ops = &arm_smmu_s1_tlb_ops;
717 cfg->cbar = CBAR_TYPE_S2_TRANS;
719 ias = smmu->ipa_size;
720 oas = smmu->pa_size;
721 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
728 if (smmu->version == ARM_SMMU_V2)
729 smmu_domain->flush_ops = &arm_smmu_s2_tlb_ops_v2;
731 smmu_domain->flush_ops = &arm_smmu_s2_tlb_ops_v1;
734 ret = -EINVAL;
743 smmu_domain->smmu = smmu;
745 cfg->cbndx = ret;
746 if (smmu->version < ARM_SMMU_V2) {
747 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
748 cfg->irptndx %= smmu->num_context_irqs;
750 cfg->irptndx = cfg->cbndx;
753 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S2)
754 cfg->vmid = cfg->cbndx + 1;
756 cfg->asid = cfg->cbndx;
759 .pgsize_bitmap = smmu->pgsize_bitmap,
762 .coherent_walk = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK,
763 .tlb = smmu_domain->flush_ops,
764 .iommu_dev = smmu->dev,
767 if (smmu->impl && smmu->impl->init_context) {
768 ret = smmu->impl->init_context(smmu_domain, &pgtbl_cfg, dev);
773 if (smmu_domain->pgtbl_quirks)
774 pgtbl_cfg.quirks |= smmu_domain->pgtbl_quirks;
778 ret = -ENOMEM;
783 domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
786 domain->geometry.aperture_start = ~0UL << ias;
787 domain->geometry.aperture_end = ~0UL;
789 domain->geometry.aperture_end = (1UL << ias) - 1;
792 domain->geometry.force_aperture = true;
796 arm_smmu_write_context_bank(smmu, cfg->cbndx);
800 * handler seeing a half-initialised domain state.
802 irq = smmu->irqs[cfg->irptndx];
804 if (smmu->impl && smmu->impl->context_fault)
805 context_fault = smmu->impl->context_fault;
809 ret = devm_request_irq(smmu->dev, irq, context_fault, IRQF_SHARED,
810 "arm-smmu-context-fault", smmu_domain);
812 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
813 cfg->irptndx, irq);
814 cfg->irptndx = ARM_SMMU_INVALID_IRPTNDX;
817 mutex_unlock(&smmu_domain->init_mutex);
820 smmu_domain->pgtbl_ops = pgtbl_ops;
824 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
825 smmu_domain->smmu = NULL;
827 mutex_unlock(&smmu_domain->init_mutex);
833 struct arm_smmu_device *smmu = smmu_domain->smmu;
834 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
848 smmu->cbs[cfg->cbndx].cfg = NULL;
849 arm_smmu_write_context_bank(smmu, cfg->cbndx);
851 if (cfg->irptndx != ARM_SMMU_INVALID_IRPTNDX) {
852 irq = smmu->irqs[cfg->irptndx];
853 devm_free_irq(smmu->dev, irq, smmu_domain);
856 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
857 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
879 mutex_init(&smmu_domain->init_mutex);
880 spin_lock_init(&smmu_domain->cb_lock);
882 return &smmu_domain->domain;
899 struct arm_smmu_smr *smr = smmu->smrs + idx;
900 u32 reg = FIELD_PREP(ARM_SMMU_SMR_ID, smr->id) |
901 FIELD_PREP(ARM_SMMU_SMR_MASK, smr->mask);
903 if (!(smmu->features & ARM_SMMU_FEAT_EXIDS) && smr->valid)
910 struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx;
913 if (smmu->impl && smmu->impl->write_s2cr) {
914 smmu->impl->write_s2cr(smmu, idx);
918 reg = FIELD_PREP(ARM_SMMU_S2CR_TYPE, s2cr->type) |
919 FIELD_PREP(ARM_SMMU_S2CR_CBNDX, s2cr->cbndx) |
920 FIELD_PREP(ARM_SMMU_S2CR_PRIVCFG, s2cr->privcfg);
922 if (smmu->features & ARM_SMMU_FEAT_EXIDS && smmu->smrs &&
923 smmu->smrs[idx].valid)
931 if (smmu->smrs)
944 if (!smmu->smrs)
952 * these SMRs for the ID/mask values we're already trusting to be OK.
954 for (i = 0; i < smmu->num_mapping_groups; i++)
955 if (!smmu->smrs[i].valid)
960 * SMR.ID bits may not be preserved if the corresponding MASK
964 smr = FIELD_PREP(ARM_SMMU_SMR_ID, smmu->streamid_mask);
967 smmu->streamid_mask = FIELD_GET(ARM_SMMU_SMR_ID, smr);
969 smr = FIELD_PREP(ARM_SMMU_SMR_MASK, smmu->streamid_mask);
972 smmu->smr_mask_mask = FIELD_GET(ARM_SMMU_SMR_MASK, smr);
975 static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask)
977 struct arm_smmu_smr *smrs = smmu->smrs;
978 int i, free_idx = -ENOSPC;
982 return id;
985 for (i = 0; i < smmu->num_mapping_groups; ++i) {
1003 !((id ^ smrs[i].id) & ~smrs[i].mask))
1007 * though, then there always exists at least one stream ID
1010 if (!((id ^ smrs[i].id) & ~(smrs[i].mask | mask)))
1011 return -EINVAL;
1019 if (--smmu->s2crs[idx].count)
1022 smmu->s2crs[idx] = s2cr_init_val;
1023 if (smmu->smrs)
1024 smmu->smrs[idx].valid = false;
1033 struct arm_smmu_device *smmu = cfg->smmu;
1034 struct arm_smmu_smr *smrs = smmu->smrs;
1037 mutex_lock(&smmu->stream_map_mutex);
1040 u16 sid = FIELD_GET(ARM_SMMU_SMR_ID, fwspec->ids[i]);
1041 u16 mask = FIELD_GET(ARM_SMMU_SMR_MASK, fwspec->ids[i]);
1044 ret = -EEXIST;
1053 if (smrs && smmu->s2crs[idx].count == 0) {
1054 smrs[idx].id = sid;
1058 smmu->s2crs[idx].count++;
1059 cfg->smendx[i] = (s16)idx;
1066 mutex_unlock(&smmu->stream_map_mutex);
1070 while (i--) {
1071 arm_smmu_free_sme(smmu, cfg->smendx[i]);
1072 cfg->smendx[i] = INVALID_SMENDX;
1074 mutex_unlock(&smmu->stream_map_mutex);
1081 struct arm_smmu_device *smmu = cfg->smmu;
1084 mutex_lock(&smmu->stream_map_mutex);
1088 cfg->smendx[i] = INVALID_SMENDX;
1090 mutex_unlock(&smmu->stream_map_mutex);
1097 struct arm_smmu_device *smmu = cfg->smmu;
1098 struct arm_smmu_s2cr *s2cr = smmu->s2crs;
1122 * domains between of_xlate() and probe_device() - we have no way to cope
1129 return -ENODEV;
1131 smmu = cfg->smmu;
1146 if (smmu_domain->smmu != smmu) {
1147 ret = -EINVAL;
1153 smmu_domain->cfg.cbndx, fwspec);
1169 return -ENODEV;
1170 smmu = cfg->smmu;
1216 struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
1217 struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu;
1221 return -ENODEV;
1224 ret = ops->map_pages(ops, iova, paddr, pgsize, pgcount, prot, gfp, mapped);
1234 struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
1235 struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu;
1242 ret = ops->unmap_pages(ops, iova, pgsize, pgcount, iotlb_gather);
1251 struct arm_smmu_device *smmu = smmu_domain->smmu;
1253 if (smmu_domain->flush_ops) {
1255 smmu_domain->flush_ops->tlb_flush_all(smmu_domain);
1264 struct arm_smmu_device *smmu = smmu_domain->smmu;
1270 if (smmu->version == ARM_SMMU_V2 ||
1271 smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
1282 struct arm_smmu_device *smmu = smmu_domain->smmu;
1283 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1284 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1285 struct device *dev = smmu->dev;
1290 int ret, idx = cfg->cbndx;
1297 spin_lock_irqsave(&smmu_domain->cb_lock, flags);
1299 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
1307 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
1312 return ops->iova_to_phys(ops, iova);
1316 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
1334 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
1339 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
1340 smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
1343 return ops->iova_to_phys(ops, iova);
1358 return cfg->smmu->features & ARM_SMMU_FEAT_COHERENT_WALK ||
1388 * If dev->iommu_fwspec is initally NULL, arm_smmu_register_legacy_master()
1396 smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
1399 ret = -EINVAL;
1400 for (i = 0; i < fwspec->num_ids; i++) {
1401 u16 sid = FIELD_GET(ARM_SMMU_SMR_ID, fwspec->ids[i]);
1402 u16 mask = FIELD_GET(ARM_SMMU_SMR_MASK, fwspec->ids[i]);
1404 if (sid & ~smmu->streamid_mask) {
1405 dev_err(dev, "stream ID 0x%x out of range for SMMU (0x%x)\n",
1406 sid, smmu->streamid_mask);
1409 if (mask & ~smmu->smr_mask_mask) {
1411 mask, smmu->smr_mask_mask);
1416 ret = -ENOMEM;
1422 cfg->smmu = smmu;
1424 while (i--)
1425 cfg->smendx[i] = INVALID_SMENDX;
1437 device_link_add(dev, smmu->dev,
1440 return &smmu->iommu;
1455 ret = arm_smmu_rpm_get(cfg->smmu);
1461 arm_smmu_rpm_put(cfg->smmu);
1472 smmu = cfg->smmu;
1474 if (smmu->impl && smmu->impl->probe_finalize)
1475 smmu->impl->probe_finalize(smmu, dev);
1482 struct arm_smmu_device *smmu = cfg->smmu;
1486 mutex_lock(&smmu->stream_map_mutex);
1488 if (group && smmu->s2crs[idx].group &&
1489 group != smmu->s2crs[idx].group) {
1490 mutex_unlock(&smmu->stream_map_mutex);
1491 return ERR_PTR(-EINVAL);
1494 group = smmu->s2crs[idx].group;
1498 mutex_unlock(&smmu->stream_map_mutex);
1512 smmu->s2crs[idx].group = group;
1514 mutex_unlock(&smmu->stream_map_mutex);
1523 mutex_lock(&smmu_domain->init_mutex);
1524 if (smmu_domain->smmu)
1525 ret = -EPERM;
1527 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
1528 mutex_unlock(&smmu_domain->init_mutex);
1539 mutex_lock(&smmu_domain->init_mutex);
1540 if (smmu_domain->smmu)
1541 ret = -EPERM;
1543 smmu_domain->pgtbl_quirks = quirks;
1544 mutex_unlock(&smmu_domain->init_mutex);
1553 if (args->args_count > 0)
1554 fwid |= FIELD_PREP(ARM_SMMU_SMR_ID, args->args[0]);
1556 if (args->args_count > 1)
1557 fwid |= FIELD_PREP(ARM_SMMU_SMR_MASK, args->args[1]);
1558 else if (!of_property_read_u32(args->np, "stream-match-mask", &mask))
1575 list_add_tail(®ion->list, head);
1583 const struct arm_smmu_impl *impl = cfg->smmu->impl;
1588 if (impl && impl->def_domain_type)
1589 return impl->def_domain_type(dev);
1606 .pgsize_bitmap = -1UL, /* Restricted during device attach */
1634 for (i = 0; i < smmu->num_mapping_groups; ++i)
1638 for (i = 0; i < smmu->num_context_banks; ++i) {
1669 if (smmu->features & ARM_SMMU_FEAT_VMID16)
1672 if (smmu->features & ARM_SMMU_FEAT_EXIDS)
1675 if (smmu->impl && smmu->impl->reset)
1676 smmu->impl->reset(smmu);
1705 u32 id;
1706 bool cttw_reg, cttw_fw = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK;
1709 dev_notice(smmu->dev, "probing hardware configuration...\n");
1710 dev_notice(smmu->dev, "SMMUv%d with:\n",
1711 smmu->version == ARM_SMMU_V2 ? 2 : 1);
1714 id = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID0);
1718 id &= ~(ARM_SMMU_ID0_S2TS | ARM_SMMU_ID0_NTS);
1720 id &= ~(ARM_SMMU_ID0_S1TS | ARM_SMMU_ID0_NTS);
1722 if (id & ARM_SMMU_ID0_S1TS) {
1723 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
1724 dev_notice(smmu->dev, "\tstage 1 translation\n");
1727 if (id & ARM_SMMU_ID0_S2TS) {
1728 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
1729 dev_notice(smmu->dev, "\tstage 2 translation\n");
1732 if (id & ARM_SMMU_ID0_NTS) {
1733 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
1734 dev_notice(smmu->dev, "\tnested translation\n");
1737 if (!(smmu->features &
1739 dev_err(smmu->dev, "\tno translation support!\n");
1740 return -ENODEV;
1743 if ((id & ARM_SMMU_ID0_S1TS) &&
1744 ((smmu->version < ARM_SMMU_V2) || !(id & ARM_SMMU_ID0_ATOSNS))) {
1745 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
1746 dev_notice(smmu->dev, "\taddress translation ops\n");
1753 * ID register value has ended up configured incorrectly.
1755 cttw_reg = !!(id & ARM_SMMU_ID0_CTTW);
1757 dev_notice(smmu->dev, "\t%scoherent table walk\n",
1758 cttw_fw ? "" : "non-");
1760 dev_notice(smmu->dev,
1764 if (smmu->version == ARM_SMMU_V2 && id & ARM_SMMU_ID0_EXIDS) {
1765 smmu->features |= ARM_SMMU_FEAT_EXIDS;
1768 size = 1 << FIELD_GET(ARM_SMMU_ID0_NUMSIDB, id);
1770 smmu->streamid_mask = size - 1;
1771 if (id & ARM_SMMU_ID0_SMS) {
1772 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
1773 size = FIELD_GET(ARM_SMMU_ID0_NUMSMRG, id);
1775 dev_err(smmu->dev,
1776 "stream-matching supported, but no SMRs present!\n");
1777 return -ENODEV;
1780 /* Zero-initialised to mark as invalid */
1781 smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs),
1783 if (!smmu->smrs)
1784 return -ENOMEM;
1786 dev_notice(smmu->dev,
1789 /* s2cr->type == 0 means translation, so initialise explicitly */
1790 smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs),
1792 if (!smmu->s2crs)
1793 return -ENOMEM;
1795 smmu->s2crs[i] = s2cr_init_val;
1797 smmu->num_mapping_groups = size;
1798 mutex_init(&smmu->stream_map_mutex);
1799 spin_lock_init(&smmu->global_sync_lock);
1801 if (smmu->version < ARM_SMMU_V2 ||
1802 !(id & ARM_SMMU_ID0_PTFS_NO_AARCH32)) {
1803 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
1804 if (!(id & ARM_SMMU_ID0_PTFS_NO_AARCH32S))
1805 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
1809 id = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID1);
1810 smmu->pgshift = (id & ARM_SMMU_ID1_PAGESIZE) ? 16 : 12;
1813 size = 1 << (FIELD_GET(ARM_SMMU_ID1_NUMPAGENDXB, id) + 1);
1814 if (smmu->numpage != 2 * size << smmu->pgshift)
1815 dev_warn(smmu->dev,
1817 2 * size << smmu->pgshift, smmu->numpage);
1819 smmu->numpage = size;
1821 smmu->num_s2_context_banks = FIELD_GET(ARM_SMMU_ID1_NUMS2CB, id);
1822 smmu->num_context_banks = FIELD_GET(ARM_SMMU_ID1_NUMCB, id);
1823 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
1824 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
1825 return -ENODEV;
1827 dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
1828 smmu->num_context_banks, smmu->num_s2_context_banks);
1829 smmu->cbs = devm_kcalloc(smmu->dev, smmu->num_context_banks,
1830 sizeof(*smmu->cbs), GFP_KERNEL);
1831 if (!smmu->cbs)
1832 return -ENOMEM;
1835 id = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID2);
1836 size = arm_smmu_id_size_to_bits(FIELD_GET(ARM_SMMU_ID2_IAS, id));
1837 smmu->ipa_size = size;
1840 size = arm_smmu_id_size_to_bits(FIELD_GET(ARM_SMMU_ID2_OAS, id));
1841 smmu->pa_size = size;
1843 if (id & ARM_SMMU_ID2_VMID16)
1844 smmu->features |= ARM_SMMU_FEAT_VMID16;
1851 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
1852 dev_warn(smmu->dev,
1855 if (smmu->version < ARM_SMMU_V2) {
1856 smmu->va_size = smmu->ipa_size;
1857 if (smmu->version == ARM_SMMU_V1_64K)
1858 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
1860 size = FIELD_GET(ARM_SMMU_ID2_UBS, id);
1861 smmu->va_size = arm_smmu_id_size_to_bits(size);
1862 if (id & ARM_SMMU_ID2_PTFS_4K)
1863 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
1864 if (id & ARM_SMMU_ID2_PTFS_16K)
1865 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
1866 if (id & ARM_SMMU_ID2_PTFS_64K)
1867 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
1870 if (smmu->impl && smmu->impl->cfg_probe) {
1871 ret = smmu->impl->cfg_probe(smmu);
1877 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
1878 smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
1879 if (smmu->features &
1881 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
1882 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
1883 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
1884 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
1885 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
1887 if (arm_smmu_ops.pgsize_bitmap == -1UL)
1888 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
1890 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
1891 dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
1892 smmu->pgsize_bitmap);
1895 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
1896 dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
1897 smmu->va_size, smmu->ipa_size);
1899 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
1900 dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
1901 smmu->ipa_size, smmu->pa_size);
1922 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
1923 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
1924 { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
1925 { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
1926 { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
1927 { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
1928 { .compatible = "nvidia,smmu-500", .data = &arm_mmu500 },
1929 { .compatible = "qcom,smmu-v2", .data = &qcom_smmuv2 },
1942 smmu->version = ARM_SMMU_V1;
1943 smmu->model = GENERIC_SMMU;
1946 smmu->version = ARM_SMMU_V1_64K;
1947 smmu->model = GENERIC_SMMU;
1950 smmu->version = ARM_SMMU_V2;
1951 smmu->model = GENERIC_SMMU;
1954 smmu->version = ARM_SMMU_V2;
1955 smmu->model = ARM_MMU500;
1958 smmu->version = ARM_SMMU_V2;
1959 smmu->model = CAVIUM_SMMUV2;
1962 ret = -ENODEV;
1971 struct device *dev = smmu->dev;
1978 iort_smmu = (struct acpi_iort_smmu *)node->node_data;
1980 ret = acpi_smmu_get_data(iort_smmu->model, smmu);
1988 if (iort_smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK)
1989 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
1997 return -ENODEV;
2005 struct device *dev = smmu->dev;
2008 if (of_property_read_u32(dev->of_node, "#global-interrupts", global_irqs))
2009 return dev_err_probe(dev, -ENODEV,
2010 "missing #global-interrupts property\n");
2014 smmu->version = data->version;
2015 smmu->model = data->model;
2017 legacy_binding = of_find_property(dev->of_node, "mmu-masters", NULL);
2020 pr_notice("deprecated \"mmu-masters\" DT property in use; %s support unavailable\n",
2028 return -ENODEV;
2031 if (of_dma_is_coherent(dev->of_node))
2032 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
2045 iort_get_rmr_sids(dev_fwnode(smmu->dev), &rmr_list);
2062 for (i = 0; i < rmr->num_sids; i++) {
2063 idx = arm_smmu_find_sme(smmu, rmr->sids[i], ~0);
2067 if (smmu->s2crs[idx].count == 0) {
2068 smmu->smrs[idx].id = rmr->sids[i];
2069 smmu->smrs[idx].mask = 0;
2070 smmu->smrs[idx].valid = true;
2072 smmu->s2crs[idx].count++;
2073 smmu->s2crs[idx].type = S2CR_TYPE_BYPASS;
2074 smmu->s2crs[idx].privcfg = S2CR_PRIVCFG_DEFAULT;
2080 dev_notice(smmu->dev, "\tpreserved %d boot mapping%s\n", cnt,
2082 iort_put_rmr_sids(dev_fwnode(smmu->dev), &rmr_list);
2089 struct device *dev = &pdev->dev;
2097 return -ENOMEM;
2099 smmu->dev = dev;
2101 if (dev->of_node)
2108 smmu->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
2109 if (IS_ERR(smmu->base))
2110 return PTR_ERR(smmu->base);
2111 smmu->ioaddr = res->start;
2117 smmu->numpage = resource_size(res);
2125 smmu->num_context_irqs = num_irqs - global_irqs - pmu_irqs;
2126 if (smmu->num_context_irqs <= 0)
2127 return dev_err_probe(dev, -ENODEV,
2131 smmu->irqs = devm_kcalloc(dev, smmu->num_context_irqs,
2132 sizeof(*smmu->irqs), GFP_KERNEL);
2133 if (!smmu->irqs)
2134 return dev_err_probe(dev, -ENOMEM, "failed to allocate %d irqs\n",
2135 smmu->num_context_irqs);
2137 for (i = 0; i < smmu->num_context_irqs; i++) {
2142 smmu->irqs[i] = irq;
2145 err = devm_clk_bulk_get_all(dev, &smmu->clks);
2150 smmu->num_clks = err;
2152 err = clk_bulk_prepare_enable(smmu->num_clks, smmu->clks);
2160 if (smmu->version == ARM_SMMU_V2) {
2161 if (smmu->num_context_banks > smmu->num_context_irqs) {
2164 smmu->num_context_irqs, smmu->num_context_banks);
2165 return -ENODEV;
2169 smmu->num_context_irqs = smmu->num_context_banks;
2172 if (smmu->impl && smmu->impl->global_fault)
2173 global_fault = smmu->impl->global_fault;
2184 "arm-smmu global fault", smmu);
2191 err = iommu_device_sysfs_add(&smmu->iommu, smmu->dev, NULL,
2192 "smmu.%pa", &smmu->ioaddr);
2194 dev_err(dev, "Failed to register iommu in sysfs\n");
2198 err = iommu_device_register(&smmu->iommu, &arm_smmu_ops,
2201 dev_err(dev, "Failed to register iommu\n");
2202 iommu_device_sysfs_remove(&smmu->iommu);
2215 * We want to avoid touching dev->power.lock in fastpaths unless
2216 * it's really going to do something useful - pm_runtime_enabled()
2220 if (dev->pm_domain) {
2232 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
2233 dev_notice(&pdev->dev, "disabling translation\n");
2240 if (pm_runtime_enabled(smmu->dev))
2241 pm_runtime_force_suspend(smmu->dev);
2243 clk_bulk_disable(smmu->num_clks, smmu->clks);
2245 clk_bulk_unprepare(smmu->num_clks, smmu->clks);
2252 iommu_device_unregister(&smmu->iommu);
2253 iommu_device_sysfs_remove(&smmu->iommu);
2263 ret = clk_bulk_enable(smmu->num_clks, smmu->clks);
2276 clk_bulk_disable(smmu->num_clks, smmu->clks);
2286 ret = clk_bulk_prepare(smmu->num_clks, smmu->clks);
2295 clk_bulk_unprepare(smmu->num_clks, smmu->clks);
2313 clk_bulk_unprepare(smmu->num_clks, smmu->clks);
2325 .name = "arm-smmu",
2336 MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
2338 MODULE_ALIAS("platform:arm-smmu");