Lines Matching +full:mc +full:- +full:sid

1 // SPDX-License-Identifier: GPL-2.0-only
10 * - SMMUv1 and v2 implementations
11 * - Stream-matching and stream-indexing
12 * - v7/v8 long-descriptor format
13 * - Non-secure access to the SMMU
14 * - Context fault reporting
15 * - Extended Stream ID (16 bit)
18 #define pr_fmt(fmt) "arm-smmu: " fmt
24 #include <linux/dma-mapping.h>
39 #include <linux/fsl/mc.h>
41 #include "arm-smmu.h"
42 #include "../../dma-iommu.h"
51 #define QCOM_DUMMY_VAL -1
74 if (pm_runtime_enabled(smmu->dev))
75 return pm_runtime_resume_and_get(smmu->dev);
82 if (pm_runtime_enabled(smmu->dev)) {
83 pm_runtime_mark_last_busy(smmu->dev);
84 __pm_runtime_put_autosuspend(smmu->dev);
99 * to 5-10sec worth of reprogramming the context bank, while
102 pm_runtime_set_autosuspend_delay(smmu->dev, 20);
103 pm_runtime_use_autosuspend(smmu->dev);
118 struct pci_bus *bus = to_pci_dev(dev)->bus;
121 bus = bus->parent;
122 return of_node_get(bus->bridge->parent->of_node);
125 return of_node_get(dev->of_node);
137 struct device_node *np = it->node;
140 of_for_each_phandle(it, err, dev->of_node, "mmu-masters",
141 "#stream-id-cells", -1)
142 if (it->node == np) {
146 it->node = np;
147 return err == -ENOENT ? 0 : err;
162 if (!np || !of_property_present(np, "#stream-id-cells")) {
164 return -ENODEV;
173 return -ENODEV;
178 /* "mmu-masters" assumes Stream ID == Requester ID */
191 return -ENOMEM;
203 return -ENODEV;
219 if (smmu->impl && unlikely(smmu->impl->tlb_sync))
220 return smmu->impl->tlb_sync(smmu, page, sync, status);
224 for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) {
232 dev_err_ratelimited(smmu->dev,
233 "TLB sync timed out -- SMMU may be deadlocked\n");
240 spin_lock_irqsave(&smmu->global_sync_lock, flags);
243 spin_unlock_irqrestore(&smmu->global_sync_lock, flags);
248 struct arm_smmu_device *smmu = smmu_domain->smmu;
251 spin_lock_irqsave(&smmu_domain->cb_lock, flags);
252 __arm_smmu_tlb_sync(smmu, ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx),
254 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
265 arm_smmu_cb_write(smmu_domain->smmu, smmu_domain->cfg.cbndx,
266 ARM_SMMU_CB_S1_TLBIASID, smmu_domain->cfg.asid);
273 struct arm_smmu_device *smmu = smmu_domain->smmu;
277 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIVMID, smmu_domain->cfg.vmid);
285 struct arm_smmu_device *smmu = smmu_domain->smmu;
286 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
287 int idx = cfg->cbndx;
289 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
292 if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
294 iova |= cfg->asid;
298 } while (size -= granule);
301 iova |= (u64)cfg->asid << 48;
305 } while (size -= granule);
313 struct arm_smmu_device *smmu = smmu_domain->smmu;
314 int idx = smmu_domain->cfg.cbndx;
316 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
321 if (smmu_domain->cfg.fmt == ARM_SMMU_CTX_FMT_AARCH64)
326 } while (size -= granule);
333 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
335 if (cfg->flush_walk_prefer_tlbiasid) {
374 * On MMU-401 at least, the cost of firing off multiple TLBIVMIDs appears
377 * no-op and call arm_smmu_tlb_inv_context_s2() from .iotlb_sync as you might
385 struct arm_smmu_device *smmu = smmu_domain->smmu;
387 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
390 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIVMID, smmu_domain->cfg.vmid);
415 cfi->iova = arm_smmu_cb_readq(smmu, idx, ARM_SMMU_CB_FAR);
416 cfi->fsr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSR);
417 cfi->fsynr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSYNR0);
418 cfi->cbfrsynra = arm_smmu_gr1_read(smmu, ARM_SMMU_GR1_CBFRSYNRA(idx));
424 dev_err(smmu->dev,
426 cfi->fsr, cfi->iova, cfi->fsynr, cfi->cbfrsynra, idx);
428 dev_err(smmu->dev, "FSR = %08x [%s%sFormat=%u%s%s%s%s%s%s%s%s], SID=0x%x\n",
429 cfi->fsr,
430 (cfi->fsr & ARM_SMMU_CB_FSR_MULTI) ? "MULTI " : "",
431 (cfi->fsr & ARM_SMMU_CB_FSR_SS) ? "SS " : "",
432 (u32)FIELD_GET(ARM_SMMU_CB_FSR_FORMAT, cfi->fsr),
433 (cfi->fsr & ARM_SMMU_CB_FSR_UUT) ? " UUT" : "",
434 (cfi->fsr & ARM_SMMU_CB_FSR_ASF) ? " ASF" : "",
435 (cfi->fsr & ARM_SMMU_CB_FSR_TLBLKF) ? " TLBLKF" : "",
436 (cfi->fsr & ARM_SMMU_CB_FSR_TLBMCF) ? " TLBMCF" : "",
437 (cfi->fsr & ARM_SMMU_CB_FSR_EF) ? " EF" : "",
438 (cfi->fsr & ARM_SMMU_CB_FSR_PF) ? " PF" : "",
439 (cfi->fsr & ARM_SMMU_CB_FSR_AFF) ? " AFF" : "",
440 (cfi->fsr & ARM_SMMU_CB_FSR_TF) ? " TF" : "",
441 cfi->cbfrsynra);
443 dev_err(smmu->dev, "FSYNR0 = %08x [S1CBNDX=%u%s%s%s%s%s%s PLVL=%u]\n",
444 cfi->fsynr,
445 (u32)FIELD_GET(ARM_SMMU_CB_FSYNR0_S1CBNDX, cfi->fsynr),
446 (cfi->fsynr & ARM_SMMU_CB_FSYNR0_AFR) ? " AFR" : "",
447 (cfi->fsynr & ARM_SMMU_CB_FSYNR0_PTWF) ? " PTWF" : "",
448 (cfi->fsynr & ARM_SMMU_CB_FSYNR0_NSATTR) ? " NSATTR" : "",
449 (cfi->fsynr & ARM_SMMU_CB_FSYNR0_IND) ? " IND" : "",
450 (cfi->fsynr & ARM_SMMU_CB_FSYNR0_PNU) ? " PNU" : "",
451 (cfi->fsynr & ARM_SMMU_CB_FSYNR0_WNR) ? " WNR" : "",
452 (u32)FIELD_GET(ARM_SMMU_CB_FSYNR0_PLVL, cfi->fsynr));
459 struct arm_smmu_device *smmu = smmu_domain->smmu;
462 int idx = smmu_domain->cfg.cbndx;
470 ret = report_iommu_fault(&smmu_domain->domain, NULL, cfi.iova,
473 if (ret == -ENOSYS && __ratelimit(&rs))
498 dev_err(smmu->dev,
499 "Blocked unknown Stream ID 0x%hx; boot with \"arm-smmu.disable_bypass=0\" to allow, but this may have security implications\n",
502 dev_err(smmu->dev,
504 dev_err(smmu->dev,
516 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
517 struct arm_smmu_cb *cb = &smmu_domain->smmu->cbs[cfg->cbndx];
518 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
520 cb->cfg = cfg;
524 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
525 cb->tcr[0] = pgtbl_cfg->arm_v7s_cfg.tcr;
527 cb->tcr[0] = arm_smmu_lpae_tcr(pgtbl_cfg);
528 cb->tcr[1] = arm_smmu_lpae_tcr2(pgtbl_cfg);
529 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
530 cb->tcr[1] |= ARM_SMMU_TCR2_AS;
532 cb->tcr[0] |= ARM_SMMU_TCR_EAE;
535 cb->tcr[0] = arm_smmu_lpae_vtcr(pgtbl_cfg);
540 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
541 cb->ttbr[0] = pgtbl_cfg->arm_v7s_cfg.ttbr;
542 cb->ttbr[1] = 0;
544 cb->ttbr[0] = FIELD_PREP(ARM_SMMU_TTBRn_ASID,
545 cfg->asid);
546 cb->ttbr[1] = FIELD_PREP(ARM_SMMU_TTBRn_ASID,
547 cfg->asid);
549 if (pgtbl_cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1)
550 cb->ttbr[1] |= pgtbl_cfg->arm_lpae_s1_cfg.ttbr;
552 cb->ttbr[0] |= pgtbl_cfg->arm_lpae_s1_cfg.ttbr;
555 cb->ttbr[0] = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
558 /* MAIRs (stage-1 only) */
560 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
561 cb->mair[0] = pgtbl_cfg->arm_v7s_cfg.prrr;
562 cb->mair[1] = pgtbl_cfg->arm_v7s_cfg.nmrr;
564 cb->mair[0] = pgtbl_cfg->arm_lpae_s1_cfg.mair;
565 cb->mair[1] = pgtbl_cfg->arm_lpae_s1_cfg.mair >> 32;
574 struct arm_smmu_cb *cb = &smmu->cbs[idx];
575 struct arm_smmu_cfg *cfg = cb->cfg;
583 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
586 if (smmu->version > ARM_SMMU_V1) {
587 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
591 /* 16-bit VMIDs live in CBA2R */
592 if (smmu->features & ARM_SMMU_FEAT_VMID16)
593 reg |= FIELD_PREP(ARM_SMMU_CBA2R_VMID16, cfg->vmid);
599 reg = FIELD_PREP(ARM_SMMU_CBAR_TYPE, cfg->cbar);
600 if (smmu->version < ARM_SMMU_V2)
601 reg |= FIELD_PREP(ARM_SMMU_CBAR_IRPTNDX, cfg->irptndx);
612 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
613 /* 8-bit VMIDs live in CBAR */
614 reg |= FIELD_PREP(ARM_SMMU_CBAR_VMID, cfg->vmid);
623 if (stage1 && smmu->version > ARM_SMMU_V1)
624 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TCR2, cb->tcr[1]);
625 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TCR, cb->tcr[0]);
628 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
629 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_CONTEXTIDR, cfg->asid);
630 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TTBR0, cb->ttbr[0]);
631 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TTBR1, cb->ttbr[1]);
633 arm_smmu_cb_writeq(smmu, idx, ARM_SMMU_CB_TTBR0, cb->ttbr[0]);
636 cb->ttbr[1]);
639 /* MAIRs (stage-1 only) */
641 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_S1_MAIR0, cb->mair[0]);
642 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_S1_MAIR1, cb->mair[1]);
653 if (smmu->impl && smmu->impl->write_sctlr)
654 smmu->impl->write_sctlr(smmu, idx, reg);
663 if (smmu->impl && smmu->impl->alloc_context_bank)
664 return smmu->impl->alloc_context_bank(smmu_domain, smmu, dev, start);
666 return __arm_smmu_alloc_bitmap(smmu->context_map, start, smmu->num_context_banks);
678 struct iommu_domain *domain = &smmu_domain->domain;
679 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
682 mutex_lock(&smmu_domain->init_mutex);
683 if (smmu_domain->smmu)
702 * Note that you can't actually request stage-2 mappings.
704 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
705 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
706 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
707 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
712 * the decision into the io-pgtable code where it arguably belongs,
717 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
718 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
721 (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S) &&
722 (smmu_domain->stage == ARM_SMMU_DOMAIN_S1))
723 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_S;
724 if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
725 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
728 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
730 if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
731 ret = -EINVAL;
735 switch (smmu_domain->stage) {
737 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
738 start = smmu->num_s2_context_banks;
739 ias = smmu->va_size;
740 oas = smmu->ipa_size;
741 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
743 } else if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_L) {
752 smmu_domain->flush_ops = &arm_smmu_s1_tlb_ops;
760 cfg->cbar = CBAR_TYPE_S2_TRANS;
762 ias = smmu->ipa_size;
763 oas = smmu->pa_size;
764 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
771 if (smmu->version == ARM_SMMU_V2)
772 smmu_domain->flush_ops = &arm_smmu_s2_tlb_ops_v2;
774 smmu_domain->flush_ops = &arm_smmu_s2_tlb_ops_v1;
777 ret = -EINVAL;
786 smmu_domain->smmu = smmu;
788 cfg->cbndx = ret;
789 if (smmu->version < ARM_SMMU_V2) {
790 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
791 cfg->irptndx %= smmu->num_context_irqs;
793 cfg->irptndx = cfg->cbndx;
796 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S2)
797 cfg->vmid = cfg->cbndx + 1;
799 cfg->asid = cfg->cbndx;
802 .pgsize_bitmap = smmu->pgsize_bitmap,
805 .coherent_walk = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK,
806 .tlb = smmu_domain->flush_ops,
807 .iommu_dev = smmu->dev,
810 if (smmu->impl && smmu->impl->init_context) {
811 ret = smmu->impl->init_context(smmu_domain, &pgtbl_cfg, dev);
816 if (smmu_domain->pgtbl_quirks)
817 pgtbl_cfg.quirks |= smmu_domain->pgtbl_quirks;
821 ret = -ENOMEM;
826 domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
829 domain->geometry.aperture_start = ~0UL << ias;
830 domain->geometry.aperture_end = ~0UL;
832 domain->geometry.aperture_end = (1UL << ias) - 1;
835 domain->geometry.force_aperture = true;
839 arm_smmu_write_context_bank(smmu, cfg->cbndx);
843 * handler seeing a half-initialised domain state.
845 irq = smmu->irqs[cfg->irptndx];
847 if (smmu->impl && smmu->impl->context_fault)
848 context_fault = smmu->impl->context_fault;
852 if (smmu->impl && smmu->impl->context_fault_needs_threaded_irq)
853 ret = devm_request_threaded_irq(smmu->dev, irq, NULL,
856 "arm-smmu-context-fault",
859 ret = devm_request_irq(smmu->dev, irq, context_fault, IRQF_SHARED,
860 "arm-smmu-context-fault", smmu_domain);
863 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
864 cfg->irptndx, irq);
865 cfg->irptndx = ARM_SMMU_INVALID_IRPTNDX;
868 mutex_unlock(&smmu_domain->init_mutex);
871 smmu_domain->pgtbl_ops = pgtbl_ops;
875 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
876 smmu_domain->smmu = NULL;
878 mutex_unlock(&smmu_domain->init_mutex);
884 struct arm_smmu_device *smmu = smmu_domain->smmu;
885 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
899 smmu->cbs[cfg->cbndx].cfg = NULL;
900 arm_smmu_write_context_bank(smmu, cfg->cbndx);
902 if (cfg->irptndx != ARM_SMMU_INVALID_IRPTNDX) {
903 irq = smmu->irqs[cfg->irptndx];
904 devm_free_irq(smmu->dev, irq, smmu_domain);
907 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
908 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
926 mutex_init(&smmu_domain->init_mutex);
927 spin_lock_init(&smmu_domain->cb_lock);
929 return &smmu_domain->domain;
946 struct arm_smmu_smr *smr = smmu->smrs + idx;
947 u32 reg = FIELD_PREP(ARM_SMMU_SMR_ID, smr->id) |
948 FIELD_PREP(ARM_SMMU_SMR_MASK, smr->mask);
950 if (!(smmu->features & ARM_SMMU_FEAT_EXIDS) && smr->valid)
957 struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx;
960 if (smmu->impl && smmu->impl->write_s2cr) {
961 smmu->impl->write_s2cr(smmu, idx);
965 reg = FIELD_PREP(ARM_SMMU_S2CR_TYPE, s2cr->type) |
966 FIELD_PREP(ARM_SMMU_S2CR_CBNDX, s2cr->cbndx) |
967 FIELD_PREP(ARM_SMMU_S2CR_PRIVCFG, s2cr->privcfg);
969 if (smmu->features & ARM_SMMU_FEAT_EXIDS && smmu->smrs &&
970 smmu->smrs[idx].valid)
978 if (smmu->smrs)
991 if (!smmu->smrs)
1001 for (i = 0; i < smmu->num_mapping_groups; i++)
1002 if (!smmu->smrs[i].valid)
1011 smr = FIELD_PREP(ARM_SMMU_SMR_ID, smmu->streamid_mask);
1014 smmu->streamid_mask = FIELD_GET(ARM_SMMU_SMR_ID, smr);
1016 smr = FIELD_PREP(ARM_SMMU_SMR_MASK, smmu->streamid_mask);
1019 smmu->smr_mask_mask = FIELD_GET(ARM_SMMU_SMR_MASK, smr);
1024 struct arm_smmu_smr *smrs = smmu->smrs;
1025 int i, free_idx = -ENOSPC;
1032 for (i = 0; i < smmu->num_mapping_groups; ++i) {
1058 return -EINVAL;
1066 if (--smmu->s2crs[idx].count)
1069 smmu->s2crs[idx] = s2cr_init_val;
1070 if (smmu->smrs)
1071 smmu->smrs[idx].valid = false;
1080 struct arm_smmu_device *smmu = cfg->smmu;
1081 struct arm_smmu_smr *smrs = smmu->smrs;
1084 mutex_lock(&smmu->stream_map_mutex);
1087 u16 sid = FIELD_GET(ARM_SMMU_SMR_ID, fwspec->ids[i]);
1088 u16 mask = FIELD_GET(ARM_SMMU_SMR_MASK, fwspec->ids[i]);
1091 ret = -EEXIST;
1095 ret = arm_smmu_find_sme(smmu, sid, mask);
1100 if (smrs && smmu->s2crs[idx].count == 0) {
1101 smrs[idx].id = sid;
1105 smmu->s2crs[idx].count++;
1106 cfg->smendx[i] = (s16)idx;
1113 mutex_unlock(&smmu->stream_map_mutex);
1117 while (i--) {
1118 arm_smmu_free_sme(smmu, cfg->smendx[i]);
1119 cfg->smendx[i] = INVALID_SMENDX;
1121 mutex_unlock(&smmu->stream_map_mutex);
1128 struct arm_smmu_device *smmu = cfg->smmu;
1131 mutex_lock(&smmu->stream_map_mutex);
1135 cfg->smendx[i] = INVALID_SMENDX;
1137 mutex_unlock(&smmu->stream_map_mutex);
1144 struct arm_smmu_device *smmu = cfg->smmu;
1145 struct arm_smmu_s2cr *s2cr = smmu->s2crs;
1169 * domains between of_xlate() and probe_device() - we have no way to cope
1176 return -ENODEV;
1178 smmu = cfg->smmu;
1193 if (smmu_domain->smmu != smmu) {
1194 ret = -EINVAL;
1200 smmu_domain->cfg.cbndx, fwspec);
1215 return -ENODEV;
1216 smmu = cfg->smmu;
1261 struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
1262 struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu;
1266 return -ENODEV;
1269 ret = ops->map_pages(ops, iova, paddr, pgsize, pgcount, prot, gfp, mapped);
1279 struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
1280 struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu;
1287 ret = ops->unmap_pages(ops, iova, pgsize, pgcount, iotlb_gather);
1296 struct arm_smmu_device *smmu = smmu_domain->smmu;
1298 if (smmu_domain->flush_ops) {
1300 smmu_domain->flush_ops->tlb_flush_all(smmu_domain);
1309 struct arm_smmu_device *smmu = smmu_domain->smmu;
1315 if (smmu->version == ARM_SMMU_V2 ||
1316 smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
1327 struct arm_smmu_device *smmu = smmu_domain->smmu;
1328 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1329 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1330 struct device *dev = smmu->dev;
1335 int ret, idx = cfg->cbndx;
1342 spin_lock_irqsave(&smmu_domain->cb_lock, flags);
1344 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
1352 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
1357 return ops->iova_to_phys(ops, iova);
1361 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
1379 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
1384 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
1385 smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
1388 return ops->iova_to_phys(ops, iova);
1403 return cfg->smmu->features & ARM_SMMU_FEAT_COHERENT_WALK ||
1433 * If dev->iommu_fwspec is initally NULL, arm_smmu_register_legacy_master()
1441 smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
1444 ret = -EINVAL;
1445 for (i = 0; i < fwspec->num_ids; i++) {
1446 u16 sid = FIELD_GET(ARM_SMMU_SMR_ID, fwspec->ids[i]);
1447 u16 mask = FIELD_GET(ARM_SMMU_SMR_MASK, fwspec->ids[i]);
1449 if (sid & ~smmu->streamid_mask) {
1451 sid, smmu->streamid_mask);
1454 if (mask & ~smmu->smr_mask_mask) {
1456 mask, smmu->smr_mask_mask);
1461 ret = -ENOMEM;
1467 cfg->smmu = smmu;
1469 while (i--)
1470 cfg->smendx[i] = INVALID_SMENDX;
1482 device_link_add(dev, smmu->dev,
1485 return &smmu->iommu;
1499 ret = arm_smmu_rpm_get(cfg->smmu);
1505 arm_smmu_rpm_put(cfg->smmu);
1516 smmu = cfg->smmu;
1518 if (smmu->impl && smmu->impl->probe_finalize)
1519 smmu->impl->probe_finalize(smmu, dev);
1526 struct arm_smmu_device *smmu = cfg->smmu;
1530 mutex_lock(&smmu->stream_map_mutex);
1532 if (group && smmu->s2crs[idx].group &&
1533 group != smmu->s2crs[idx].group) {
1534 mutex_unlock(&smmu->stream_map_mutex);
1535 return ERR_PTR(-EINVAL);
1538 group = smmu->s2crs[idx].group;
1542 mutex_unlock(&smmu->stream_map_mutex);
1556 smmu->s2crs[idx].group = group;
1558 mutex_unlock(&smmu->stream_map_mutex);
1568 mutex_lock(&smmu_domain->init_mutex);
1569 if (smmu_domain->smmu)
1570 ret = -EPERM;
1572 smmu_domain->pgtbl_quirks = quirks;
1573 mutex_unlock(&smmu_domain->init_mutex);
1583 if (args->args_count > 0)
1584 fwid |= FIELD_PREP(ARM_SMMU_SMR_ID, args->args[0]);
1586 if (args->args_count > 1)
1587 fwid |= FIELD_PREP(ARM_SMMU_SMR_MASK, args->args[1]);
1588 else if (!of_property_read_u32(args->np, "stream-match-mask", &mask))
1605 list_add_tail(&region->list, head);
1613 const struct arm_smmu_impl *impl = cfg->smmu->impl;
1618 if (impl && impl->def_domain_type)
1619 return impl->def_domain_type(dev);
1636 .pgsize_bitmap = -1UL, /* Restricted during device attach */
1663 for (i = 0; i < smmu->num_mapping_groups; ++i)
1667 for (i = 0; i < smmu->num_context_banks; ++i) {
1698 if (smmu->features & ARM_SMMU_FEAT_VMID16)
1701 if (smmu->features & ARM_SMMU_FEAT_EXIDS)
1704 if (smmu->impl && smmu->impl->reset)
1705 smmu->impl->reset(smmu);
1735 bool cttw_reg, cttw_fw = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK;
1738 dev_notice(smmu->dev, "probing hardware configuration...\n");
1739 dev_notice(smmu->dev, "SMMUv%d with:\n",
1740 smmu->version == ARM_SMMU_V2 ? 2 : 1);
1752 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
1753 dev_notice(smmu->dev, "\tstage 1 translation\n");
1757 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
1758 dev_notice(smmu->dev, "\tstage 2 translation\n");
1762 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
1763 dev_notice(smmu->dev, "\tnested translation\n");
1766 if (!(smmu->features &
1768 dev_err(smmu->dev, "\tno translation support!\n");
1769 return -ENODEV;
1773 ((smmu->version < ARM_SMMU_V2) || !(id & ARM_SMMU_ID0_ATOSNS))) {
1774 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
1775 dev_notice(smmu->dev, "\taddress translation ops\n");
1786 dev_notice(smmu->dev, "\t%scoherent table walk\n",
1787 cttw_fw ? "" : "non-");
1789 dev_notice(smmu->dev,
1793 if (smmu->version == ARM_SMMU_V2 && id & ARM_SMMU_ID0_EXIDS) {
1794 smmu->features |= ARM_SMMU_FEAT_EXIDS;
1799 smmu->streamid_mask = size - 1;
1801 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
1804 dev_err(smmu->dev,
1805 "stream-matching supported, but no SMRs present!\n");
1806 return -ENODEV;
1809 /* Zero-initialised to mark as invalid */
1810 smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs),
1812 if (!smmu->smrs)
1813 return -ENOMEM;
1815 dev_notice(smmu->dev,
1818 /* s2cr->type == 0 means translation, so initialise explicitly */
1819 smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs),
1821 if (!smmu->s2crs)
1822 return -ENOMEM;
1824 smmu->s2crs[i] = s2cr_init_val;
1826 smmu->num_mapping_groups = size;
1827 mutex_init(&smmu->stream_map_mutex);
1828 spin_lock_init(&smmu->global_sync_lock);
1830 if (smmu->version < ARM_SMMU_V2 ||
1832 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
1834 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
1839 smmu->pgshift = (id & ARM_SMMU_ID1_PAGESIZE) ? 16 : 12;
1843 if (smmu->numpage != 2 * size << smmu->pgshift)
1844 dev_warn(smmu->dev,
1846 2 * size << smmu->pgshift, smmu->numpage);
1848 smmu->numpage = size;
1850 smmu->num_s2_context_banks = FIELD_GET(ARM_SMMU_ID1_NUMS2CB, id);
1851 smmu->num_context_banks = FIELD_GET(ARM_SMMU_ID1_NUMCB, id);
1852 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
1853 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
1854 return -ENODEV;
1856 dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
1857 smmu->num_context_banks, smmu->num_s2_context_banks);
1858 smmu->cbs = devm_kcalloc(smmu->dev, smmu->num_context_banks,
1859 sizeof(*smmu->cbs), GFP_KERNEL);
1860 if (!smmu->cbs)
1861 return -ENOMEM;
1866 smmu->ipa_size = size;
1870 smmu->pa_size = size;
1873 smmu->features |= ARM_SMMU_FEAT_VMID16;
1880 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
1881 dev_warn(smmu->dev,
1884 if (smmu->version < ARM_SMMU_V2) {
1885 smmu->va_size = smmu->ipa_size;
1886 if (smmu->version == ARM_SMMU_V1_64K)
1887 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
1890 smmu->va_size = arm_smmu_id_size_to_bits(size);
1892 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
1894 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
1896 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
1899 if (smmu->impl && smmu->impl->cfg_probe) {
1900 ret = smmu->impl->cfg_probe(smmu);
1906 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
1907 smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
1908 if (smmu->features &
1910 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
1911 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
1912 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
1913 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
1914 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
1916 if (arm_smmu_ops.pgsize_bitmap == -1UL)
1917 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
1919 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
1920 dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
1921 smmu->pgsize_bitmap);
1924 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
1925 dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
1926 smmu->va_size, smmu->ipa_size);
1928 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
1929 dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
1930 smmu->ipa_size, smmu->pa_size);
1951 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
1952 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
1953 { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
1954 { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
1955 { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
1956 { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
1957 { .compatible = "nvidia,smmu-500", .data = &arm_mmu500 },
1958 { .compatible = "qcom,smmu-v2", .data = &qcom_smmuv2 },
1971 smmu->version = ARM_SMMU_V1;
1972 smmu->model = GENERIC_SMMU;
1975 smmu->version = ARM_SMMU_V1_64K;
1976 smmu->model = GENERIC_SMMU;
1979 smmu->version = ARM_SMMU_V2;
1980 smmu->model = GENERIC_SMMU;
1983 smmu->version = ARM_SMMU_V2;
1984 smmu->model = ARM_MMU500;
1987 smmu->version = ARM_SMMU_V2;
1988 smmu->model = CAVIUM_SMMUV2;
1991 ret = -ENODEV;
2000 struct device *dev = smmu->dev;
2007 iort_smmu = (struct acpi_iort_smmu *)node->node_data;
2009 ret = acpi_smmu_get_data(iort_smmu->model, smmu);
2017 if (iort_smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK)
2018 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
2026 return -ENODEV;
2034 struct device *dev = smmu->dev;
2037 if (of_property_read_u32(dev->of_node, "#global-interrupts", global_irqs))
2038 return dev_err_probe(dev, -ENODEV,
2039 "missing #global-interrupts property\n");
2043 smmu->version = data->version;
2044 smmu->model = data->model;
2046 legacy_binding = of_find_property(dev->of_node, "mmu-masters", NULL);
2049 pr_notice("deprecated \"mmu-masters\" DT property in use; %s support unavailable\n",
2057 return -ENODEV;
2060 if (of_dma_is_coherent(dev->of_node))
2061 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
2074 iort_get_rmr_sids(dev_fwnode(smmu->dev), &rmr_list);
2091 for (i = 0; i < rmr->num_sids; i++) {
2092 idx = arm_smmu_find_sme(smmu, rmr->sids[i], ~0);
2096 if (smmu->s2crs[idx].count == 0) {
2097 smmu->smrs[idx].id = rmr->sids[i];
2098 smmu->smrs[idx].mask = 0;
2099 smmu->smrs[idx].valid = true;
2101 smmu->s2crs[idx].count++;
2102 smmu->s2crs[idx].type = S2CR_TYPE_BYPASS;
2103 smmu->s2crs[idx].privcfg = S2CR_PRIVCFG_DEFAULT;
2109 dev_notice(smmu->dev, "\tpreserved %d boot mapping%s\n", cnt,
2111 iort_put_rmr_sids(dev_fwnode(smmu->dev), &rmr_list);
2118 struct device *dev = &pdev->dev;
2126 return -ENOMEM;
2128 smmu->dev = dev;
2130 if (dev->of_node)
2137 smmu->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
2138 if (IS_ERR(smmu->base))
2139 return PTR_ERR(smmu->base);
2140 smmu->ioaddr = res->start;
2146 smmu->numpage = resource_size(res);
2154 smmu->num_context_irqs = num_irqs - global_irqs - pmu_irqs;
2155 if (smmu->num_context_irqs <= 0)
2156 return dev_err_probe(dev, -ENODEV,
2160 smmu->irqs = devm_kcalloc(dev, smmu->num_context_irqs,
2161 sizeof(*smmu->irqs), GFP_KERNEL);
2162 if (!smmu->irqs)
2163 return dev_err_probe(dev, -ENOMEM, "failed to allocate %d irqs\n",
2164 smmu->num_context_irqs);
2166 for (i = 0; i < smmu->num_context_irqs; i++) {
2171 smmu->irqs[i] = irq;
2174 err = devm_clk_bulk_get_all(dev, &smmu->clks);
2179 smmu->num_clks = err;
2181 err = clk_bulk_prepare_enable(smmu->num_clks, smmu->clks);
2189 if (smmu->version == ARM_SMMU_V2) {
2190 if (smmu->num_context_banks > smmu->num_context_irqs) {
2193 smmu->num_context_irqs, smmu->num_context_banks);
2194 return -ENODEV;
2198 smmu->num_context_irqs = smmu->num_context_banks;
2201 if (smmu->impl && smmu->impl->global_fault)
2202 global_fault = smmu->impl->global_fault;
2213 "arm-smmu global fault", smmu);
2228 err = iommu_device_sysfs_add(&smmu->iommu, smmu->dev, NULL,
2229 "smmu.%pa", &smmu->ioaddr);
2233 err = iommu_device_register(&smmu->iommu, &arm_smmu_ops,
2236 iommu_device_sysfs_remove(&smmu->iommu);
2241 * We want to avoid touching dev->power.lock in fastpaths unless
2242 * it's really going to do something useful - pm_runtime_enabled()
2246 if (dev->pm_domain) {
2259 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
2260 dev_notice(&pdev->dev, "disabling translation\n");
2267 if (pm_runtime_enabled(smmu->dev))
2268 pm_runtime_force_suspend(smmu->dev);
2270 clk_bulk_disable(smmu->num_clks, smmu->clks);
2272 clk_bulk_unprepare(smmu->num_clks, smmu->clks);
2279 iommu_device_unregister(&smmu->iommu);
2280 iommu_device_sysfs_remove(&smmu->iommu);
2290 ret = clk_bulk_enable(smmu->num_clks, smmu->clks);
2303 clk_bulk_disable(smmu->num_clks, smmu->clks);
2313 ret = clk_bulk_prepare(smmu->num_clks, smmu->clks);
2322 clk_bulk_unprepare(smmu->num_clks, smmu->clks);
2340 clk_bulk_unprepare(smmu->num_clks, smmu->clks);
2352 .name = "arm-smmu",
2365 MODULE_ALIAS("platform:arm-smmu");