1759aaa10SVivek Gautam // SPDX-License-Identifier: GPL-2.0-only 2759aaa10SVivek Gautam /* 3759aaa10SVivek Gautam * Copyright (c) 2019, The Linux Foundation. All rights reserved. 4759aaa10SVivek Gautam */ 5759aaa10SVivek Gautam 6a51627c5SShawn Guo #include <linux/acpi.h> 75c7469c6SJordan Crouse #include <linux/adreno-smmu-priv.h> 80e764a01SJordan Crouse #include <linux/of_device.h> 9759aaa10SVivek Gautam #include <linux/qcom_scm.h> 10759aaa10SVivek Gautam 11759aaa10SVivek Gautam #include "arm-smmu.h" 12759aaa10SVivek Gautam 13759aaa10SVivek Gautam struct qcom_smmu { 14759aaa10SVivek Gautam struct arm_smmu_device smmu; 15f9081b8fSBjorn Andersson bool bypass_quirk; 16f9081b8fSBjorn Andersson u8 bypass_cbndx; 17759aaa10SVivek Gautam }; 18759aaa10SVivek Gautam 19f9081b8fSBjorn Andersson static struct qcom_smmu *to_qcom_smmu(struct arm_smmu_device *smmu) 20f9081b8fSBjorn Andersson { 21f9081b8fSBjorn Andersson return container_of(smmu, struct qcom_smmu, smmu); 22f9081b8fSBjorn Andersson } 23f9081b8fSBjorn Andersson 24bffb2eafSRob Clark static void qcom_adreno_smmu_write_sctlr(struct arm_smmu_device *smmu, int idx, 25bffb2eafSRob Clark u32 reg) 26bffb2eafSRob Clark { 27bffb2eafSRob Clark /* 28bffb2eafSRob Clark * On the GPU device we want to process subsequent transactions after a 29bffb2eafSRob Clark * fault to keep the GPU from hanging 30bffb2eafSRob Clark */ 31bffb2eafSRob Clark reg |= ARM_SMMU_SCTLR_HUPCF; 32bffb2eafSRob Clark 33bffb2eafSRob Clark arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, reg); 34bffb2eafSRob Clark } 35bffb2eafSRob Clark 365c7469c6SJordan Crouse #define QCOM_ADRENO_SMMU_GPU_SID 0 375c7469c6SJordan Crouse 385c7469c6SJordan Crouse static bool qcom_adreno_smmu_is_gpu_device(struct device *dev) 395c7469c6SJordan Crouse { 405c7469c6SJordan Crouse struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 415c7469c6SJordan Crouse int i; 425c7469c6SJordan Crouse 435c7469c6SJordan Crouse /* 445c7469c6SJordan Crouse * The GPU will always use SID 0 so that is a handy way to uniquely 455c7469c6SJordan Crouse * identify it and configure it for per-instance pagetables 465c7469c6SJordan Crouse */ 475c7469c6SJordan Crouse for (i = 0; i < fwspec->num_ids; i++) { 485c7469c6SJordan Crouse u16 sid = FIELD_GET(ARM_SMMU_SMR_ID, fwspec->ids[i]); 495c7469c6SJordan Crouse 505c7469c6SJordan Crouse if (sid == QCOM_ADRENO_SMMU_GPU_SID) 515c7469c6SJordan Crouse return true; 525c7469c6SJordan Crouse } 535c7469c6SJordan Crouse 545c7469c6SJordan Crouse return false; 555c7469c6SJordan Crouse } 565c7469c6SJordan Crouse 575c7469c6SJordan Crouse static const struct io_pgtable_cfg *qcom_adreno_smmu_get_ttbr1_cfg( 585c7469c6SJordan Crouse const void *cookie) 595c7469c6SJordan Crouse { 605c7469c6SJordan Crouse struct arm_smmu_domain *smmu_domain = (void *)cookie; 615c7469c6SJordan Crouse struct io_pgtable *pgtable = 625c7469c6SJordan Crouse io_pgtable_ops_to_pgtable(smmu_domain->pgtbl_ops); 635c7469c6SJordan Crouse return &pgtable->cfg; 645c7469c6SJordan Crouse } 655c7469c6SJordan Crouse 665c7469c6SJordan Crouse /* 675c7469c6SJordan Crouse * Local implementation to configure TTBR0 with the specified pagetable config. 685c7469c6SJordan Crouse * The GPU driver will call this to enable TTBR0 when per-instance pagetables 695c7469c6SJordan Crouse * are active 705c7469c6SJordan Crouse */ 715c7469c6SJordan Crouse 725c7469c6SJordan Crouse static int qcom_adreno_smmu_set_ttbr0_cfg(const void *cookie, 735c7469c6SJordan Crouse const struct io_pgtable_cfg *pgtbl_cfg) 745c7469c6SJordan Crouse { 755c7469c6SJordan Crouse struct arm_smmu_domain *smmu_domain = (void *)cookie; 765c7469c6SJordan Crouse struct io_pgtable *pgtable = io_pgtable_ops_to_pgtable(smmu_domain->pgtbl_ops); 775c7469c6SJordan Crouse struct arm_smmu_cfg *cfg = &smmu_domain->cfg; 785c7469c6SJordan Crouse struct arm_smmu_cb *cb = &smmu_domain->smmu->cbs[cfg->cbndx]; 795c7469c6SJordan Crouse 805c7469c6SJordan Crouse /* The domain must have split pagetables already enabled */ 815c7469c6SJordan Crouse if (cb->tcr[0] & ARM_SMMU_TCR_EPD1) 825c7469c6SJordan Crouse return -EINVAL; 835c7469c6SJordan Crouse 845c7469c6SJordan Crouse /* If the pagetable config is NULL, disable TTBR0 */ 855c7469c6SJordan Crouse if (!pgtbl_cfg) { 865c7469c6SJordan Crouse /* Do nothing if it is already disabled */ 875c7469c6SJordan Crouse if ((cb->tcr[0] & ARM_SMMU_TCR_EPD0)) 885c7469c6SJordan Crouse return -EINVAL; 895c7469c6SJordan Crouse 905c7469c6SJordan Crouse /* Set TCR to the original configuration */ 915c7469c6SJordan Crouse cb->tcr[0] = arm_smmu_lpae_tcr(&pgtable->cfg); 925c7469c6SJordan Crouse cb->ttbr[0] = FIELD_PREP(ARM_SMMU_TTBRn_ASID, cb->cfg->asid); 935c7469c6SJordan Crouse } else { 945c7469c6SJordan Crouse u32 tcr = cb->tcr[0]; 955c7469c6SJordan Crouse 965c7469c6SJordan Crouse /* Don't call this again if TTBR0 is already enabled */ 975c7469c6SJordan Crouse if (!(cb->tcr[0] & ARM_SMMU_TCR_EPD0)) 985c7469c6SJordan Crouse return -EINVAL; 995c7469c6SJordan Crouse 1005c7469c6SJordan Crouse tcr |= arm_smmu_lpae_tcr(pgtbl_cfg); 1015c7469c6SJordan Crouse tcr &= ~(ARM_SMMU_TCR_EPD0 | ARM_SMMU_TCR_EPD1); 1025c7469c6SJordan Crouse 1035c7469c6SJordan Crouse cb->tcr[0] = tcr; 1045c7469c6SJordan Crouse cb->ttbr[0] = pgtbl_cfg->arm_lpae_s1_cfg.ttbr; 1055c7469c6SJordan Crouse cb->ttbr[0] |= FIELD_PREP(ARM_SMMU_TTBRn_ASID, cb->cfg->asid); 1065c7469c6SJordan Crouse } 1075c7469c6SJordan Crouse 1085c7469c6SJordan Crouse arm_smmu_write_context_bank(smmu_domain->smmu, cb->cfg->cbndx); 1095c7469c6SJordan Crouse 1105c7469c6SJordan Crouse return 0; 1115c7469c6SJordan Crouse } 1125c7469c6SJordan Crouse 1135c7469c6SJordan Crouse static int qcom_adreno_smmu_alloc_context_bank(struct arm_smmu_domain *smmu_domain, 1145c7469c6SJordan Crouse struct arm_smmu_device *smmu, 1155c7469c6SJordan Crouse struct device *dev, int start) 1165c7469c6SJordan Crouse { 1175c7469c6SJordan Crouse int count; 1185c7469c6SJordan Crouse 1195c7469c6SJordan Crouse /* 1205c7469c6SJordan Crouse * Assign context bank 0 to the GPU device so the GPU hardware can 1215c7469c6SJordan Crouse * switch pagetables 1225c7469c6SJordan Crouse */ 1235c7469c6SJordan Crouse if (qcom_adreno_smmu_is_gpu_device(dev)) { 1245c7469c6SJordan Crouse start = 0; 1255c7469c6SJordan Crouse count = 1; 1265c7469c6SJordan Crouse } else { 1275c7469c6SJordan Crouse start = 1; 1285c7469c6SJordan Crouse count = smmu->num_context_banks; 1295c7469c6SJordan Crouse } 1305c7469c6SJordan Crouse 1315c7469c6SJordan Crouse return __arm_smmu_alloc_bitmap(smmu->context_map, start, count); 1325c7469c6SJordan Crouse } 1335c7469c6SJordan Crouse 134a242f429SEric Anholt static bool qcom_adreno_can_do_ttbr1(struct arm_smmu_device *smmu) 135a242f429SEric Anholt { 136a242f429SEric Anholt const struct device_node *np = smmu->dev->of_node; 137a242f429SEric Anholt 138a242f429SEric Anholt if (of_device_is_compatible(np, "qcom,msm8996-smmu-v2")) 139a242f429SEric Anholt return false; 140a242f429SEric Anholt 141a242f429SEric Anholt return true; 142a242f429SEric Anholt } 143a242f429SEric Anholt 1445c7469c6SJordan Crouse static int qcom_adreno_smmu_init_context(struct arm_smmu_domain *smmu_domain, 1455c7469c6SJordan Crouse struct io_pgtable_cfg *pgtbl_cfg, struct device *dev) 1465c7469c6SJordan Crouse { 1475c7469c6SJordan Crouse struct adreno_smmu_priv *priv; 1485c7469c6SJordan Crouse 1495c7469c6SJordan Crouse /* Only enable split pagetables for the GPU device (SID 0) */ 1505c7469c6SJordan Crouse if (!qcom_adreno_smmu_is_gpu_device(dev)) 1515c7469c6SJordan Crouse return 0; 1525c7469c6SJordan Crouse 1535c7469c6SJordan Crouse /* 1545c7469c6SJordan Crouse * All targets that use the qcom,adreno-smmu compatible string *should* 1555c7469c6SJordan Crouse * be AARCH64 stage 1 but double check because the arm-smmu code assumes 1565c7469c6SJordan Crouse * that is the case when the TTBR1 quirk is enabled 1575c7469c6SJordan Crouse */ 158a242f429SEric Anholt if (qcom_adreno_can_do_ttbr1(smmu_domain->smmu) && 159a242f429SEric Anholt (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) && 1605c7469c6SJordan Crouse (smmu_domain->cfg.fmt == ARM_SMMU_CTX_FMT_AARCH64)) 1615c7469c6SJordan Crouse pgtbl_cfg->quirks |= IO_PGTABLE_QUIRK_ARM_TTBR1; 1625c7469c6SJordan Crouse 1635c7469c6SJordan Crouse /* 1645c7469c6SJordan Crouse * Initialize private interface with GPU: 1655c7469c6SJordan Crouse */ 1665c7469c6SJordan Crouse 1675c7469c6SJordan Crouse priv = dev_get_drvdata(dev); 1685c7469c6SJordan Crouse priv->cookie = smmu_domain; 1695c7469c6SJordan Crouse priv->get_ttbr1_cfg = qcom_adreno_smmu_get_ttbr1_cfg; 1705c7469c6SJordan Crouse priv->set_ttbr0_cfg = qcom_adreno_smmu_set_ttbr0_cfg; 1715c7469c6SJordan Crouse 1725c7469c6SJordan Crouse return 0; 1735c7469c6SJordan Crouse } 1745c7469c6SJordan Crouse 175a082121bSJordan Crouse static const struct of_device_id qcom_smmu_client_of_match[] __maybe_unused = { 1760e764a01SJordan Crouse { .compatible = "qcom,adreno" }, 1770e764a01SJordan Crouse { .compatible = "qcom,mdp4" }, 1780e764a01SJordan Crouse { .compatible = "qcom,mdss" }, 1790e764a01SJordan Crouse { .compatible = "qcom,sc7180-mdss" }, 180d100ff38SSibi Sankar { .compatible = "qcom,sc7180-mss-pil" }, 181*0b779f56SSai Prakash Ranjan { .compatible = "qcom,sc7280-mdss" }, 1821a7180ffSBjorn Andersson { .compatible = "qcom,sc8180x-mdss" }, 1830e764a01SJordan Crouse { .compatible = "qcom,sdm845-mdss" }, 184d100ff38SSibi Sankar { .compatible = "qcom,sdm845-mss-pil" }, 1850e764a01SJordan Crouse { } 1860e764a01SJordan Crouse }; 1870e764a01SJordan Crouse 18807a7f2caSBjorn Andersson static int qcom_smmu_cfg_probe(struct arm_smmu_device *smmu) 18907a7f2caSBjorn Andersson { 190f9081b8fSBjorn Andersson unsigned int last_s2cr = ARM_SMMU_GR0_S2CR(smmu->num_mapping_groups - 1); 191f9081b8fSBjorn Andersson struct qcom_smmu *qsmmu = to_qcom_smmu(smmu); 192f9081b8fSBjorn Andersson u32 reg; 19307a7f2caSBjorn Andersson u32 smr; 19407a7f2caSBjorn Andersson int i; 19507a7f2caSBjorn Andersson 196f9081b8fSBjorn Andersson /* 197f9081b8fSBjorn Andersson * With some firmware versions writes to S2CR of type FAULT are 198f9081b8fSBjorn Andersson * ignored, and writing BYPASS will end up written as FAULT in the 199f9081b8fSBjorn Andersson * register. Perform a write to S2CR to detect if this is the case and 200f9081b8fSBjorn Andersson * if so reserve a context bank to emulate bypass streams. 201f9081b8fSBjorn Andersson */ 202f9081b8fSBjorn Andersson reg = FIELD_PREP(ARM_SMMU_S2CR_TYPE, S2CR_TYPE_BYPASS) | 203f9081b8fSBjorn Andersson FIELD_PREP(ARM_SMMU_S2CR_CBNDX, 0xff) | 204f9081b8fSBjorn Andersson FIELD_PREP(ARM_SMMU_S2CR_PRIVCFG, S2CR_PRIVCFG_DEFAULT); 205f9081b8fSBjorn Andersson arm_smmu_gr0_write(smmu, last_s2cr, reg); 206f9081b8fSBjorn Andersson reg = arm_smmu_gr0_read(smmu, last_s2cr); 207f9081b8fSBjorn Andersson if (FIELD_GET(ARM_SMMU_S2CR_TYPE, reg) != S2CR_TYPE_BYPASS) { 208f9081b8fSBjorn Andersson qsmmu->bypass_quirk = true; 209f9081b8fSBjorn Andersson qsmmu->bypass_cbndx = smmu->num_context_banks - 1; 210f9081b8fSBjorn Andersson 211f9081b8fSBjorn Andersson set_bit(qsmmu->bypass_cbndx, smmu->context_map); 212f9081b8fSBjorn Andersson 213aded8c7cSBjorn Andersson arm_smmu_cb_write(smmu, qsmmu->bypass_cbndx, ARM_SMMU_CB_SCTLR, 0); 214aded8c7cSBjorn Andersson 215f9081b8fSBjorn Andersson reg = FIELD_PREP(ARM_SMMU_CBAR_TYPE, CBAR_TYPE_S1_TRANS_S2_BYPASS); 216f9081b8fSBjorn Andersson arm_smmu_gr1_write(smmu, ARM_SMMU_GR1_CBAR(qsmmu->bypass_cbndx), reg); 217f9081b8fSBjorn Andersson } 218f9081b8fSBjorn Andersson 21907a7f2caSBjorn Andersson for (i = 0; i < smmu->num_mapping_groups; i++) { 22007a7f2caSBjorn Andersson smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(i)); 22107a7f2caSBjorn Andersson 22207a7f2caSBjorn Andersson if (FIELD_GET(ARM_SMMU_SMR_VALID, smr)) { 223dead723eSIsaac J. Manjarres /* Ignore valid bit for SMR mask extraction. */ 224dead723eSIsaac J. Manjarres smr &= ~ARM_SMMU_SMR_VALID; 22507a7f2caSBjorn Andersson smmu->smrs[i].id = FIELD_GET(ARM_SMMU_SMR_ID, smr); 22607a7f2caSBjorn Andersson smmu->smrs[i].mask = FIELD_GET(ARM_SMMU_SMR_MASK, smr); 22707a7f2caSBjorn Andersson smmu->smrs[i].valid = true; 22807a7f2caSBjorn Andersson 22907a7f2caSBjorn Andersson smmu->s2crs[i].type = S2CR_TYPE_BYPASS; 23007a7f2caSBjorn Andersson smmu->s2crs[i].privcfg = S2CR_PRIVCFG_DEFAULT; 23107a7f2caSBjorn Andersson smmu->s2crs[i].cbndx = 0xff; 23207a7f2caSBjorn Andersson } 23307a7f2caSBjorn Andersson } 23407a7f2caSBjorn Andersson 23507a7f2caSBjorn Andersson return 0; 23607a7f2caSBjorn Andersson } 23707a7f2caSBjorn Andersson 238f9081b8fSBjorn Andersson static void qcom_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx) 239f9081b8fSBjorn Andersson { 240f9081b8fSBjorn Andersson struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx; 241f9081b8fSBjorn Andersson struct qcom_smmu *qsmmu = to_qcom_smmu(smmu); 242f9081b8fSBjorn Andersson u32 cbndx = s2cr->cbndx; 243f9081b8fSBjorn Andersson u32 type = s2cr->type; 244f9081b8fSBjorn Andersson u32 reg; 245f9081b8fSBjorn Andersson 246f9081b8fSBjorn Andersson if (qsmmu->bypass_quirk) { 247f9081b8fSBjorn Andersson if (type == S2CR_TYPE_BYPASS) { 248f9081b8fSBjorn Andersson /* 249f9081b8fSBjorn Andersson * Firmware with quirky S2CR handling will substitute 250f9081b8fSBjorn Andersson * BYPASS writes with FAULT, so point the stream to the 251f9081b8fSBjorn Andersson * reserved context bank and ask for translation on the 252f9081b8fSBjorn Andersson * stream 253f9081b8fSBjorn Andersson */ 254f9081b8fSBjorn Andersson type = S2CR_TYPE_TRANS; 255f9081b8fSBjorn Andersson cbndx = qsmmu->bypass_cbndx; 256f9081b8fSBjorn Andersson } else if (type == S2CR_TYPE_FAULT) { 257f9081b8fSBjorn Andersson /* 258f9081b8fSBjorn Andersson * Firmware with quirky S2CR handling will ignore FAULT 259f9081b8fSBjorn Andersson * writes, so trick it to write FAULT by asking for a 260f9081b8fSBjorn Andersson * BYPASS. 261f9081b8fSBjorn Andersson */ 262f9081b8fSBjorn Andersson type = S2CR_TYPE_BYPASS; 263f9081b8fSBjorn Andersson cbndx = 0xff; 264f9081b8fSBjorn Andersson } 265f9081b8fSBjorn Andersson } 266f9081b8fSBjorn Andersson 267f9081b8fSBjorn Andersson reg = FIELD_PREP(ARM_SMMU_S2CR_TYPE, type) | 268f9081b8fSBjorn Andersson FIELD_PREP(ARM_SMMU_S2CR_CBNDX, cbndx) | 269f9081b8fSBjorn Andersson FIELD_PREP(ARM_SMMU_S2CR_PRIVCFG, s2cr->privcfg); 270f9081b8fSBjorn Andersson arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_S2CR(idx), reg); 271f9081b8fSBjorn Andersson } 272f9081b8fSBjorn Andersson 2730e764a01SJordan Crouse static int qcom_smmu_def_domain_type(struct device *dev) 2740e764a01SJordan Crouse { 2750e764a01SJordan Crouse const struct of_device_id *match = 2760e764a01SJordan Crouse of_match_device(qcom_smmu_client_of_match, dev); 2770e764a01SJordan Crouse 2780e764a01SJordan Crouse return match ? IOMMU_DOMAIN_IDENTITY : 0; 2790e764a01SJordan Crouse } 2800e764a01SJordan Crouse 281759aaa10SVivek Gautam static int qcom_sdm845_smmu500_reset(struct arm_smmu_device *smmu) 282759aaa10SVivek Gautam { 283759aaa10SVivek Gautam int ret; 284759aaa10SVivek Gautam 285759aaa10SVivek Gautam /* 286759aaa10SVivek Gautam * To address performance degradation in non-real time clients, 287759aaa10SVivek Gautam * such as USB and UFS, turn off wait-for-safe on sdm845 based boards, 288759aaa10SVivek Gautam * such as MTP and db845, whose firmwares implement secure monitor 289759aaa10SVivek Gautam * call handlers to turn on/off the wait-for-safe logic. 290759aaa10SVivek Gautam */ 291759aaa10SVivek Gautam ret = qcom_scm_qsmmu500_wait_safe_toggle(0); 292759aaa10SVivek Gautam if (ret) 293759aaa10SVivek Gautam dev_warn(smmu->dev, "Failed to turn off SAFE logic\n"); 294759aaa10SVivek Gautam 295759aaa10SVivek Gautam return ret; 296759aaa10SVivek Gautam } 297759aaa10SVivek Gautam 29864510edeSSai Prakash Ranjan static int qcom_smmu500_reset(struct arm_smmu_device *smmu) 29964510edeSSai Prakash Ranjan { 30064510edeSSai Prakash Ranjan const struct device_node *np = smmu->dev->of_node; 30164510edeSSai Prakash Ranjan 30264510edeSSai Prakash Ranjan arm_mmu500_reset(smmu); 30364510edeSSai Prakash Ranjan 30464510edeSSai Prakash Ranjan if (of_device_is_compatible(np, "qcom,sdm845-smmu-500")) 30564510edeSSai Prakash Ranjan return qcom_sdm845_smmu500_reset(smmu); 30664510edeSSai Prakash Ranjan 30764510edeSSai Prakash Ranjan return 0; 30864510edeSSai Prakash Ranjan } 30964510edeSSai Prakash Ranjan 310759aaa10SVivek Gautam static const struct arm_smmu_impl qcom_smmu_impl = { 31107a7f2caSBjorn Andersson .cfg_probe = qcom_smmu_cfg_probe, 3120e764a01SJordan Crouse .def_domain_type = qcom_smmu_def_domain_type, 31364510edeSSai Prakash Ranjan .reset = qcom_smmu500_reset, 314f9081b8fSBjorn Andersson .write_s2cr = qcom_smmu_write_s2cr, 315759aaa10SVivek Gautam }; 316759aaa10SVivek Gautam 3175c7469c6SJordan Crouse static const struct arm_smmu_impl qcom_adreno_smmu_impl = { 3185c7469c6SJordan Crouse .init_context = qcom_adreno_smmu_init_context, 3195c7469c6SJordan Crouse .def_domain_type = qcom_smmu_def_domain_type, 3205c7469c6SJordan Crouse .reset = qcom_smmu500_reset, 3215c7469c6SJordan Crouse .alloc_context_bank = qcom_adreno_smmu_alloc_context_bank, 322bffb2eafSRob Clark .write_sctlr = qcom_adreno_smmu_write_sctlr, 3235c7469c6SJordan Crouse }; 3245c7469c6SJordan Crouse 3255c7469c6SJordan Crouse static struct arm_smmu_device *qcom_smmu_create(struct arm_smmu_device *smmu, 3265c7469c6SJordan Crouse const struct arm_smmu_impl *impl) 327759aaa10SVivek Gautam { 328759aaa10SVivek Gautam struct qcom_smmu *qsmmu; 329759aaa10SVivek Gautam 33072b55c96SJohn Stultz /* Check to make sure qcom_scm has finished probing */ 33172b55c96SJohn Stultz if (!qcom_scm_is_available()) 33272b55c96SJohn Stultz return ERR_PTR(-EPROBE_DEFER); 33372b55c96SJohn Stultz 334af9da914SRobin Murphy qsmmu = devm_krealloc(smmu->dev, smmu, sizeof(*qsmmu), GFP_KERNEL); 335759aaa10SVivek Gautam if (!qsmmu) 336759aaa10SVivek Gautam return ERR_PTR(-ENOMEM); 337759aaa10SVivek Gautam 3385c7469c6SJordan Crouse qsmmu->smmu.impl = impl; 339759aaa10SVivek Gautam 340759aaa10SVivek Gautam return &qsmmu->smmu; 341759aaa10SVivek Gautam } 3425c7469c6SJordan Crouse 34300597f9fSSai Prakash Ranjan static const struct of_device_id __maybe_unused qcom_smmu_impl_of_match[] = { 344b812834bSKonrad Dybcio { .compatible = "qcom,msm8998-smmu-v2" }, 34500597f9fSSai Prakash Ranjan { .compatible = "qcom,sc7180-smmu-500" }, 346*0b779f56SSai Prakash Ranjan { .compatible = "qcom,sc7280-smmu-500" }, 3471a7180ffSBjorn Andersson { .compatible = "qcom,sc8180x-smmu-500" }, 348b812834bSKonrad Dybcio { .compatible = "qcom,sdm630-smmu-v2" }, 34900597f9fSSai Prakash Ranjan { .compatible = "qcom,sdm845-smmu-500" }, 3506321484dSMartin Botka { .compatible = "qcom,sm6125-smmu-500" }, 35100597f9fSSai Prakash Ranjan { .compatible = "qcom,sm8150-smmu-500" }, 35200597f9fSSai Prakash Ranjan { .compatible = "qcom,sm8250-smmu-500" }, 353d8498b1eSVinod Koul { .compatible = "qcom,sm8350-smmu-500" }, 35400597f9fSSai Prakash Ranjan { } 35500597f9fSSai Prakash Ranjan }; 35600597f9fSSai Prakash Ranjan 357a51627c5SShawn Guo static struct acpi_platform_list qcom_acpi_platlist[] = { 358a51627c5SShawn Guo { "LENOVO", "CB-01 ", 0x8180, ACPI_SIG_IORT, equal, "QCOM SMMU" }, 359a51627c5SShawn Guo { "QCOM ", "QCOMEDK2", 0x8180, ACPI_SIG_IORT, equal, "QCOM SMMU" }, 360a51627c5SShawn Guo { } 361a51627c5SShawn Guo }; 362a51627c5SShawn Guo 3635c7469c6SJordan Crouse struct arm_smmu_device *qcom_smmu_impl_init(struct arm_smmu_device *smmu) 3645c7469c6SJordan Crouse { 36500597f9fSSai Prakash Ranjan const struct device_node *np = smmu->dev->of_node; 3665c7469c6SJordan Crouse 367a51627c5SShawn Guo if (np == NULL) { 368a51627c5SShawn Guo /* Match platform for ACPI boot */ 369a51627c5SShawn Guo if (acpi_match_platform_list(qcom_acpi_platlist) >= 0) 370a51627c5SShawn Guo return qcom_smmu_create(smmu, &qcom_smmu_impl); 371a51627c5SShawn Guo } 372a51627c5SShawn Guo 37300597f9fSSai Prakash Ranjan if (of_match_node(qcom_smmu_impl_of_match, np)) 37400597f9fSSai Prakash Ranjan return qcom_smmu_create(smmu, &qcom_smmu_impl); 37500597f9fSSai Prakash Ranjan 37600597f9fSSai Prakash Ranjan if (of_device_is_compatible(np, "qcom,adreno-smmu")) 3775c7469c6SJordan Crouse return qcom_smmu_create(smmu, &qcom_adreno_smmu_impl); 37800597f9fSSai Prakash Ranjan 37900597f9fSSai Prakash Ranjan return smmu; 3805c7469c6SJordan Crouse } 381