1759aaa10SVivek Gautam // SPDX-License-Identifier: GPL-2.0-only 2759aaa10SVivek Gautam /* 3759aaa10SVivek Gautam * Copyright (c) 2019, The Linux Foundation. All rights reserved. 4759aaa10SVivek Gautam */ 5759aaa10SVivek Gautam 6a51627c5SShawn Guo #include <linux/acpi.h> 75c7469c6SJordan Crouse #include <linux/adreno-smmu-priv.h> 8b9b721d1SSai Prakash Ranjan #include <linux/delay.h> 90e764a01SJordan Crouse #include <linux/of_device.h> 103bf90ecaSElliot Berman #include <linux/firmware/qcom/qcom_scm.h> 110b4eeee2SGeorgi Djakov #include <linux/platform_device.h> 120b4eeee2SGeorgi Djakov #include <linux/pm_runtime.h> 13759aaa10SVivek Gautam 14759aaa10SVivek Gautam #include "arm-smmu.h" 15b9b721d1SSai Prakash Ranjan #include "arm-smmu-qcom.h" 16759aaa10SVivek Gautam 17b9b721d1SSai Prakash Ranjan #define QCOM_DUMMY_VAL -1 18759aaa10SVivek Gautam 19f9081b8fSBjorn Andersson static struct qcom_smmu *to_qcom_smmu(struct arm_smmu_device *smmu) 20f9081b8fSBjorn Andersson { 21f9081b8fSBjorn Andersson return container_of(smmu, struct qcom_smmu, smmu); 22f9081b8fSBjorn Andersson } 23f9081b8fSBjorn Andersson 24b9b721d1SSai Prakash Ranjan static void qcom_smmu_tlb_sync(struct arm_smmu_device *smmu, int page, 25b9b721d1SSai Prakash Ranjan int sync, int status) 26b9b721d1SSai Prakash Ranjan { 27b9b721d1SSai Prakash Ranjan unsigned int spin_cnt, delay; 28b9b721d1SSai Prakash Ranjan u32 reg; 29b9b721d1SSai Prakash Ranjan 30b9b721d1SSai Prakash Ranjan arm_smmu_writel(smmu, page, sync, QCOM_DUMMY_VAL); 31b9b721d1SSai Prakash Ranjan for (delay = 1; delay < TLB_LOOP_TIMEOUT; delay *= 2) { 32b9b721d1SSai Prakash Ranjan for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) { 33b9b721d1SSai Prakash Ranjan reg = arm_smmu_readl(smmu, page, status); 34b9b721d1SSai Prakash Ranjan if (!(reg & ARM_SMMU_sTLBGSTATUS_GSACTIVE)) 35b9b721d1SSai Prakash Ranjan return; 36b9b721d1SSai Prakash Ranjan cpu_relax(); 37b9b721d1SSai Prakash Ranjan } 38b9b721d1SSai Prakash Ranjan udelay(delay); 39b9b721d1SSai Prakash Ranjan } 40b9b721d1SSai Prakash Ranjan 41b9b721d1SSai Prakash Ranjan qcom_smmu_tlb_sync_debug(smmu); 42b9b721d1SSai Prakash Ranjan } 43b9b721d1SSai Prakash Ranjan 44bffb2eafSRob Clark static void qcom_adreno_smmu_write_sctlr(struct arm_smmu_device *smmu, int idx, 45bffb2eafSRob Clark u32 reg) 46bffb2eafSRob Clark { 47ba6014a4SRob Clark struct qcom_smmu *qsmmu = to_qcom_smmu(smmu); 48ba6014a4SRob Clark 49bffb2eafSRob Clark /* 50bffb2eafSRob Clark * On the GPU device we want to process subsequent transactions after a 51bffb2eafSRob Clark * fault to keep the GPU from hanging 52bffb2eafSRob Clark */ 53bffb2eafSRob Clark reg |= ARM_SMMU_SCTLR_HUPCF; 54bffb2eafSRob Clark 55ba6014a4SRob Clark if (qsmmu->stall_enabled & BIT(idx)) 56ba6014a4SRob Clark reg |= ARM_SMMU_SCTLR_CFCFG; 57ba6014a4SRob Clark 58bffb2eafSRob Clark arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, reg); 59bffb2eafSRob Clark } 60bffb2eafSRob Clark 61ab5df7b9SJordan Crouse static void qcom_adreno_smmu_get_fault_info(const void *cookie, 62ab5df7b9SJordan Crouse struct adreno_smmu_fault_info *info) 63ab5df7b9SJordan Crouse { 64ab5df7b9SJordan Crouse struct arm_smmu_domain *smmu_domain = (void *)cookie; 65ab5df7b9SJordan Crouse struct arm_smmu_cfg *cfg = &smmu_domain->cfg; 66ab5df7b9SJordan Crouse struct arm_smmu_device *smmu = smmu_domain->smmu; 67ab5df7b9SJordan Crouse 68ab5df7b9SJordan Crouse info->fsr = arm_smmu_cb_read(smmu, cfg->cbndx, ARM_SMMU_CB_FSR); 69ab5df7b9SJordan Crouse info->fsynr0 = arm_smmu_cb_read(smmu, cfg->cbndx, ARM_SMMU_CB_FSYNR0); 70ab5df7b9SJordan Crouse info->fsynr1 = arm_smmu_cb_read(smmu, cfg->cbndx, ARM_SMMU_CB_FSYNR1); 71ab5df7b9SJordan Crouse info->far = arm_smmu_cb_readq(smmu, cfg->cbndx, ARM_SMMU_CB_FAR); 72ab5df7b9SJordan Crouse info->cbfrsynra = arm_smmu_gr1_read(smmu, ARM_SMMU_GR1_CBFRSYNRA(cfg->cbndx)); 73c31112fbSRob Clark info->ttbr0 = arm_smmu_cb_readq(smmu, cfg->cbndx, ARM_SMMU_CB_TTBR0); 74ab5df7b9SJordan Crouse info->contextidr = arm_smmu_cb_read(smmu, cfg->cbndx, ARM_SMMU_CB_CONTEXTIDR); 75ab5df7b9SJordan Crouse } 76ab5df7b9SJordan Crouse 77ba6014a4SRob Clark static void qcom_adreno_smmu_set_stall(const void *cookie, bool enabled) 78ba6014a4SRob Clark { 79ba6014a4SRob Clark struct arm_smmu_domain *smmu_domain = (void *)cookie; 80ba6014a4SRob Clark struct arm_smmu_cfg *cfg = &smmu_domain->cfg; 81ba6014a4SRob Clark struct qcom_smmu *qsmmu = to_qcom_smmu(smmu_domain->smmu); 82ba6014a4SRob Clark 83ba6014a4SRob Clark if (enabled) 84ba6014a4SRob Clark qsmmu->stall_enabled |= BIT(cfg->cbndx); 85ba6014a4SRob Clark else 86ba6014a4SRob Clark qsmmu->stall_enabled &= ~BIT(cfg->cbndx); 87ba6014a4SRob Clark } 88ba6014a4SRob Clark 89ba6014a4SRob Clark static void qcom_adreno_smmu_resume_translation(const void *cookie, bool terminate) 90ba6014a4SRob Clark { 91ba6014a4SRob Clark struct arm_smmu_domain *smmu_domain = (void *)cookie; 92ba6014a4SRob Clark struct arm_smmu_cfg *cfg = &smmu_domain->cfg; 93ba6014a4SRob Clark struct arm_smmu_device *smmu = smmu_domain->smmu; 94ba6014a4SRob Clark u32 reg = 0; 95ba6014a4SRob Clark 96ba6014a4SRob Clark if (terminate) 97ba6014a4SRob Clark reg |= ARM_SMMU_RESUME_TERMINATE; 98ba6014a4SRob Clark 99ba6014a4SRob Clark arm_smmu_cb_write(smmu, cfg->cbndx, ARM_SMMU_CB_RESUME, reg); 100ba6014a4SRob Clark } 101ba6014a4SRob Clark 1025c7469c6SJordan Crouse #define QCOM_ADRENO_SMMU_GPU_SID 0 1035c7469c6SJordan Crouse 1045c7469c6SJordan Crouse static bool qcom_adreno_smmu_is_gpu_device(struct device *dev) 1055c7469c6SJordan Crouse { 1065c7469c6SJordan Crouse struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 1075c7469c6SJordan Crouse int i; 1085c7469c6SJordan Crouse 1095c7469c6SJordan Crouse /* 1105c7469c6SJordan Crouse * The GPU will always use SID 0 so that is a handy way to uniquely 1115c7469c6SJordan Crouse * identify it and configure it for per-instance pagetables 1125c7469c6SJordan Crouse */ 1135c7469c6SJordan Crouse for (i = 0; i < fwspec->num_ids; i++) { 1145c7469c6SJordan Crouse u16 sid = FIELD_GET(ARM_SMMU_SMR_ID, fwspec->ids[i]); 1155c7469c6SJordan Crouse 1165c7469c6SJordan Crouse if (sid == QCOM_ADRENO_SMMU_GPU_SID) 1175c7469c6SJordan Crouse return true; 1185c7469c6SJordan Crouse } 1195c7469c6SJordan Crouse 1205c7469c6SJordan Crouse return false; 1215c7469c6SJordan Crouse } 1225c7469c6SJordan Crouse 1235c7469c6SJordan Crouse static const struct io_pgtable_cfg *qcom_adreno_smmu_get_ttbr1_cfg( 1245c7469c6SJordan Crouse const void *cookie) 1255c7469c6SJordan Crouse { 1265c7469c6SJordan Crouse struct arm_smmu_domain *smmu_domain = (void *)cookie; 1275c7469c6SJordan Crouse struct io_pgtable *pgtable = 1285c7469c6SJordan Crouse io_pgtable_ops_to_pgtable(smmu_domain->pgtbl_ops); 1295c7469c6SJordan Crouse return &pgtable->cfg; 1305c7469c6SJordan Crouse } 1315c7469c6SJordan Crouse 1325c7469c6SJordan Crouse /* 1335c7469c6SJordan Crouse * Local implementation to configure TTBR0 with the specified pagetable config. 1345c7469c6SJordan Crouse * The GPU driver will call this to enable TTBR0 when per-instance pagetables 1355c7469c6SJordan Crouse * are active 1365c7469c6SJordan Crouse */ 1375c7469c6SJordan Crouse 1385c7469c6SJordan Crouse static int qcom_adreno_smmu_set_ttbr0_cfg(const void *cookie, 1395c7469c6SJordan Crouse const struct io_pgtable_cfg *pgtbl_cfg) 1405c7469c6SJordan Crouse { 1415c7469c6SJordan Crouse struct arm_smmu_domain *smmu_domain = (void *)cookie; 1425c7469c6SJordan Crouse struct io_pgtable *pgtable = io_pgtable_ops_to_pgtable(smmu_domain->pgtbl_ops); 1435c7469c6SJordan Crouse struct arm_smmu_cfg *cfg = &smmu_domain->cfg; 1445c7469c6SJordan Crouse struct arm_smmu_cb *cb = &smmu_domain->smmu->cbs[cfg->cbndx]; 1455c7469c6SJordan Crouse 1465c7469c6SJordan Crouse /* The domain must have split pagetables already enabled */ 1475c7469c6SJordan Crouse if (cb->tcr[0] & ARM_SMMU_TCR_EPD1) 1485c7469c6SJordan Crouse return -EINVAL; 1495c7469c6SJordan Crouse 1505c7469c6SJordan Crouse /* If the pagetable config is NULL, disable TTBR0 */ 1515c7469c6SJordan Crouse if (!pgtbl_cfg) { 1525c7469c6SJordan Crouse /* Do nothing if it is already disabled */ 1535c7469c6SJordan Crouse if ((cb->tcr[0] & ARM_SMMU_TCR_EPD0)) 1545c7469c6SJordan Crouse return -EINVAL; 1555c7469c6SJordan Crouse 1565c7469c6SJordan Crouse /* Set TCR to the original configuration */ 1575c7469c6SJordan Crouse cb->tcr[0] = arm_smmu_lpae_tcr(&pgtable->cfg); 1585c7469c6SJordan Crouse cb->ttbr[0] = FIELD_PREP(ARM_SMMU_TTBRn_ASID, cb->cfg->asid); 1595c7469c6SJordan Crouse } else { 1605c7469c6SJordan Crouse u32 tcr = cb->tcr[0]; 1615c7469c6SJordan Crouse 1625c7469c6SJordan Crouse /* Don't call this again if TTBR0 is already enabled */ 1635c7469c6SJordan Crouse if (!(cb->tcr[0] & ARM_SMMU_TCR_EPD0)) 1645c7469c6SJordan Crouse return -EINVAL; 1655c7469c6SJordan Crouse 1665c7469c6SJordan Crouse tcr |= arm_smmu_lpae_tcr(pgtbl_cfg); 1675c7469c6SJordan Crouse tcr &= ~(ARM_SMMU_TCR_EPD0 | ARM_SMMU_TCR_EPD1); 1685c7469c6SJordan Crouse 1695c7469c6SJordan Crouse cb->tcr[0] = tcr; 1705c7469c6SJordan Crouse cb->ttbr[0] = pgtbl_cfg->arm_lpae_s1_cfg.ttbr; 1715c7469c6SJordan Crouse cb->ttbr[0] |= FIELD_PREP(ARM_SMMU_TTBRn_ASID, cb->cfg->asid); 1725c7469c6SJordan Crouse } 1735c7469c6SJordan Crouse 1745c7469c6SJordan Crouse arm_smmu_write_context_bank(smmu_domain->smmu, cb->cfg->cbndx); 1755c7469c6SJordan Crouse 1765c7469c6SJordan Crouse return 0; 1775c7469c6SJordan Crouse } 1785c7469c6SJordan Crouse 1795c7469c6SJordan Crouse static int qcom_adreno_smmu_alloc_context_bank(struct arm_smmu_domain *smmu_domain, 1805c7469c6SJordan Crouse struct arm_smmu_device *smmu, 1815c7469c6SJordan Crouse struct device *dev, int start) 1825c7469c6SJordan Crouse { 1835c7469c6SJordan Crouse int count; 1845c7469c6SJordan Crouse 1855c7469c6SJordan Crouse /* 1865c7469c6SJordan Crouse * Assign context bank 0 to the GPU device so the GPU hardware can 1875c7469c6SJordan Crouse * switch pagetables 1885c7469c6SJordan Crouse */ 1895c7469c6SJordan Crouse if (qcom_adreno_smmu_is_gpu_device(dev)) { 1905c7469c6SJordan Crouse start = 0; 1915c7469c6SJordan Crouse count = 1; 1925c7469c6SJordan Crouse } else { 1935c7469c6SJordan Crouse start = 1; 1945c7469c6SJordan Crouse count = smmu->num_context_banks; 1955c7469c6SJordan Crouse } 1965c7469c6SJordan Crouse 1975c7469c6SJordan Crouse return __arm_smmu_alloc_bitmap(smmu->context_map, start, count); 1985c7469c6SJordan Crouse } 1995c7469c6SJordan Crouse 200a242f429SEric Anholt static bool qcom_adreno_can_do_ttbr1(struct arm_smmu_device *smmu) 201a242f429SEric Anholt { 202a242f429SEric Anholt const struct device_node *np = smmu->dev->of_node; 203a242f429SEric Anholt 204a242f429SEric Anholt if (of_device_is_compatible(np, "qcom,msm8996-smmu-v2")) 205a242f429SEric Anholt return false; 206a242f429SEric Anholt 207a242f429SEric Anholt return true; 208a242f429SEric Anholt } 209a242f429SEric Anholt 2105c7469c6SJordan Crouse static int qcom_adreno_smmu_init_context(struct arm_smmu_domain *smmu_domain, 2115c7469c6SJordan Crouse struct io_pgtable_cfg *pgtbl_cfg, struct device *dev) 2125c7469c6SJordan Crouse { 2135c7469c6SJordan Crouse struct adreno_smmu_priv *priv; 2145c7469c6SJordan Crouse 215ef75702dSSai Prakash Ranjan smmu_domain->cfg.flush_walk_prefer_tlbiasid = true; 216ef75702dSSai Prakash Ranjan 2175c7469c6SJordan Crouse /* Only enable split pagetables for the GPU device (SID 0) */ 2185c7469c6SJordan Crouse if (!qcom_adreno_smmu_is_gpu_device(dev)) 2195c7469c6SJordan Crouse return 0; 2205c7469c6SJordan Crouse 2215c7469c6SJordan Crouse /* 2225c7469c6SJordan Crouse * All targets that use the qcom,adreno-smmu compatible string *should* 2235c7469c6SJordan Crouse * be AARCH64 stage 1 but double check because the arm-smmu code assumes 2245c7469c6SJordan Crouse * that is the case when the TTBR1 quirk is enabled 2255c7469c6SJordan Crouse */ 226a242f429SEric Anholt if (qcom_adreno_can_do_ttbr1(smmu_domain->smmu) && 227a242f429SEric Anholt (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) && 2285c7469c6SJordan Crouse (smmu_domain->cfg.fmt == ARM_SMMU_CTX_FMT_AARCH64)) 2295c7469c6SJordan Crouse pgtbl_cfg->quirks |= IO_PGTABLE_QUIRK_ARM_TTBR1; 2305c7469c6SJordan Crouse 2315c7469c6SJordan Crouse /* 2325c7469c6SJordan Crouse * Initialize private interface with GPU: 2335c7469c6SJordan Crouse */ 2345c7469c6SJordan Crouse 2355c7469c6SJordan Crouse priv = dev_get_drvdata(dev); 2365c7469c6SJordan Crouse priv->cookie = smmu_domain; 2375c7469c6SJordan Crouse priv->get_ttbr1_cfg = qcom_adreno_smmu_get_ttbr1_cfg; 2385c7469c6SJordan Crouse priv->set_ttbr0_cfg = qcom_adreno_smmu_set_ttbr0_cfg; 239ab5df7b9SJordan Crouse priv->get_fault_info = qcom_adreno_smmu_get_fault_info; 240ba6014a4SRob Clark priv->set_stall = qcom_adreno_smmu_set_stall; 241ba6014a4SRob Clark priv->resume_translation = qcom_adreno_smmu_resume_translation; 2425c7469c6SJordan Crouse 2435c7469c6SJordan Crouse return 0; 2445c7469c6SJordan Crouse } 2455c7469c6SJordan Crouse 246a082121bSJordan Crouse static const struct of_device_id qcom_smmu_client_of_match[] __maybe_unused = { 2470e764a01SJordan Crouse { .compatible = "qcom,adreno" }, 248afc95681SRob Clark { .compatible = "qcom,adreno-gmu" }, 2490e764a01SJordan Crouse { .compatible = "qcom,mdp4" }, 2500e764a01SJordan Crouse { .compatible = "qcom,mdss" }, 25128af105cSKonrad Dybcio { .compatible = "qcom,qcm2290-mdss" }, 2520e764a01SJordan Crouse { .compatible = "qcom,sc7180-mdss" }, 253d100ff38SSibi Sankar { .compatible = "qcom,sc7180-mss-pil" }, 2540b779f56SSai Prakash Ranjan { .compatible = "qcom,sc7280-mdss" }, 255e37f1fe4SSibi Sankar { .compatible = "qcom,sc7280-mss-pil" }, 2561a7180ffSBjorn Andersson { .compatible = "qcom,sc8180x-mdss" }, 2575fba66d4SBjorn Andersson { .compatible = "qcom,sc8280xp-mdss" }, 258270a1470SRichard Acayan { .compatible = "qcom,sdm670-mdss" }, 2590e764a01SJordan Crouse { .compatible = "qcom,sdm845-mdss" }, 260d100ff38SSibi Sankar { .compatible = "qcom,sdm845-mss-pil" }, 2617e85676aSKonrad Dybcio { .compatible = "qcom,sm6350-mdss" }, 2626ebaa77cSKonrad Dybcio { .compatible = "qcom,sm6375-mdss" }, 263ec2ff4d8SKonrad Dybcio { .compatible = "qcom,sm8150-mdss" }, 264ec2ff4d8SKonrad Dybcio { .compatible = "qcom,sm8250-mdss" }, 26512721e66SAbel Vesa { .compatible = "qcom,x1e80100-mdss" }, 2660e764a01SJordan Crouse { } 2670e764a01SJordan Crouse }; 2680e764a01SJordan Crouse 269ef75702dSSai Prakash Ranjan static int qcom_smmu_init_context(struct arm_smmu_domain *smmu_domain, 270ef75702dSSai Prakash Ranjan struct io_pgtable_cfg *pgtbl_cfg, struct device *dev) 271ef75702dSSai Prakash Ranjan { 272ef75702dSSai Prakash Ranjan smmu_domain->cfg.flush_walk_prefer_tlbiasid = true; 273ef75702dSSai Prakash Ranjan 274ef75702dSSai Prakash Ranjan return 0; 275ef75702dSSai Prakash Ranjan } 276ef75702dSSai Prakash Ranjan 27707a7f2caSBjorn Andersson static int qcom_smmu_cfg_probe(struct arm_smmu_device *smmu) 27807a7f2caSBjorn Andersson { 279f9081b8fSBjorn Andersson struct qcom_smmu *qsmmu = to_qcom_smmu(smmu); 28012261134SManivannan Sadhasivam unsigned int last_s2cr; 281f9081b8fSBjorn Andersson u32 reg; 28207a7f2caSBjorn Andersson u32 smr; 28307a7f2caSBjorn Andersson int i; 28407a7f2caSBjorn Andersson 285f9081b8fSBjorn Andersson /* 2863a8990b8SMarc Gonzalez * MSM8998 LPASS SMMU reports 13 context banks, but accessing 2873a8990b8SMarc Gonzalez * the last context bank crashes the system. 2883a8990b8SMarc Gonzalez */ 2893a8990b8SMarc Gonzalez if (of_device_is_compatible(smmu->dev->of_node, "qcom,msm8998-smmu-v2") && smmu->num_context_banks == 13) 2903a8990b8SMarc Gonzalez smmu->num_context_banks = 12; 2913a8990b8SMarc Gonzalez 2923a8990b8SMarc Gonzalez /* 29312261134SManivannan Sadhasivam * Some platforms support more than the Arm SMMU architected maximum of 29412261134SManivannan Sadhasivam * 128 stream matching groups. For unknown reasons, the additional 29512261134SManivannan Sadhasivam * groups don't exhibit the same behavior as the architected registers, 29612261134SManivannan Sadhasivam * so limit the groups to 128 until the behavior is fixed for the other 29712261134SManivannan Sadhasivam * groups. 29812261134SManivannan Sadhasivam */ 29912261134SManivannan Sadhasivam if (smmu->num_mapping_groups > 128) { 30012261134SManivannan Sadhasivam dev_notice(smmu->dev, "\tLimiting the stream matching groups to 128\n"); 30112261134SManivannan Sadhasivam smmu->num_mapping_groups = 128; 30212261134SManivannan Sadhasivam } 30312261134SManivannan Sadhasivam 30412261134SManivannan Sadhasivam last_s2cr = ARM_SMMU_GR0_S2CR(smmu->num_mapping_groups - 1); 30512261134SManivannan Sadhasivam 30612261134SManivannan Sadhasivam /* 307f9081b8fSBjorn Andersson * With some firmware versions writes to S2CR of type FAULT are 308f9081b8fSBjorn Andersson * ignored, and writing BYPASS will end up written as FAULT in the 309f9081b8fSBjorn Andersson * register. Perform a write to S2CR to detect if this is the case and 310f9081b8fSBjorn Andersson * if so reserve a context bank to emulate bypass streams. 311f9081b8fSBjorn Andersson */ 312f9081b8fSBjorn Andersson reg = FIELD_PREP(ARM_SMMU_S2CR_TYPE, S2CR_TYPE_BYPASS) | 313f9081b8fSBjorn Andersson FIELD_PREP(ARM_SMMU_S2CR_CBNDX, 0xff) | 314f9081b8fSBjorn Andersson FIELD_PREP(ARM_SMMU_S2CR_PRIVCFG, S2CR_PRIVCFG_DEFAULT); 315f9081b8fSBjorn Andersson arm_smmu_gr0_write(smmu, last_s2cr, reg); 316f9081b8fSBjorn Andersson reg = arm_smmu_gr0_read(smmu, last_s2cr); 317f9081b8fSBjorn Andersson if (FIELD_GET(ARM_SMMU_S2CR_TYPE, reg) != S2CR_TYPE_BYPASS) { 318f9081b8fSBjorn Andersson qsmmu->bypass_quirk = true; 319f9081b8fSBjorn Andersson qsmmu->bypass_cbndx = smmu->num_context_banks - 1; 320f9081b8fSBjorn Andersson 321f9081b8fSBjorn Andersson set_bit(qsmmu->bypass_cbndx, smmu->context_map); 322f9081b8fSBjorn Andersson 323aded8c7cSBjorn Andersson arm_smmu_cb_write(smmu, qsmmu->bypass_cbndx, ARM_SMMU_CB_SCTLR, 0); 324aded8c7cSBjorn Andersson 325f9081b8fSBjorn Andersson reg = FIELD_PREP(ARM_SMMU_CBAR_TYPE, CBAR_TYPE_S1_TRANS_S2_BYPASS); 326f9081b8fSBjorn Andersson arm_smmu_gr1_write(smmu, ARM_SMMU_GR1_CBAR(qsmmu->bypass_cbndx), reg); 327f9081b8fSBjorn Andersson } 328f9081b8fSBjorn Andersson 32907a7f2caSBjorn Andersson for (i = 0; i < smmu->num_mapping_groups; i++) { 33007a7f2caSBjorn Andersson smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(i)); 33107a7f2caSBjorn Andersson 33207a7f2caSBjorn Andersson if (FIELD_GET(ARM_SMMU_SMR_VALID, smr)) { 333dead723eSIsaac J. Manjarres /* Ignore valid bit for SMR mask extraction. */ 334dead723eSIsaac J. Manjarres smr &= ~ARM_SMMU_SMR_VALID; 33507a7f2caSBjorn Andersson smmu->smrs[i].id = FIELD_GET(ARM_SMMU_SMR_ID, smr); 33607a7f2caSBjorn Andersson smmu->smrs[i].mask = FIELD_GET(ARM_SMMU_SMR_MASK, smr); 33707a7f2caSBjorn Andersson smmu->smrs[i].valid = true; 33807a7f2caSBjorn Andersson 33907a7f2caSBjorn Andersson smmu->s2crs[i].type = S2CR_TYPE_BYPASS; 34007a7f2caSBjorn Andersson smmu->s2crs[i].privcfg = S2CR_PRIVCFG_DEFAULT; 34107a7f2caSBjorn Andersson smmu->s2crs[i].cbndx = 0xff; 34207a7f2caSBjorn Andersson } 34307a7f2caSBjorn Andersson } 34407a7f2caSBjorn Andersson 34507a7f2caSBjorn Andersson return 0; 34607a7f2caSBjorn Andersson } 34707a7f2caSBjorn Andersson 3482d42d3baSKonrad Dybcio static int qcom_adreno_smmuv2_cfg_probe(struct arm_smmu_device *smmu) 3492d42d3baSKonrad Dybcio { 3502d42d3baSKonrad Dybcio /* Support for 16K pages is advertised on some SoCs, but it doesn't seem to work */ 3512d42d3baSKonrad Dybcio smmu->features &= ~ARM_SMMU_FEAT_FMT_AARCH64_16K; 3522d42d3baSKonrad Dybcio 3532d42d3baSKonrad Dybcio return 0; 3542d42d3baSKonrad Dybcio } 3552d42d3baSKonrad Dybcio 356f9081b8fSBjorn Andersson static void qcom_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx) 357f9081b8fSBjorn Andersson { 358f9081b8fSBjorn Andersson struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx; 359f9081b8fSBjorn Andersson struct qcom_smmu *qsmmu = to_qcom_smmu(smmu); 360f9081b8fSBjorn Andersson u32 cbndx = s2cr->cbndx; 361f9081b8fSBjorn Andersson u32 type = s2cr->type; 362f9081b8fSBjorn Andersson u32 reg; 363f9081b8fSBjorn Andersson 364f9081b8fSBjorn Andersson if (qsmmu->bypass_quirk) { 365f9081b8fSBjorn Andersson if (type == S2CR_TYPE_BYPASS) { 366f9081b8fSBjorn Andersson /* 367f9081b8fSBjorn Andersson * Firmware with quirky S2CR handling will substitute 368f9081b8fSBjorn Andersson * BYPASS writes with FAULT, so point the stream to the 369f9081b8fSBjorn Andersson * reserved context bank and ask for translation on the 370f9081b8fSBjorn Andersson * stream 371f9081b8fSBjorn Andersson */ 372f9081b8fSBjorn Andersson type = S2CR_TYPE_TRANS; 373f9081b8fSBjorn Andersson cbndx = qsmmu->bypass_cbndx; 374f9081b8fSBjorn Andersson } else if (type == S2CR_TYPE_FAULT) { 375f9081b8fSBjorn Andersson /* 376f9081b8fSBjorn Andersson * Firmware with quirky S2CR handling will ignore FAULT 377f9081b8fSBjorn Andersson * writes, so trick it to write FAULT by asking for a 378f9081b8fSBjorn Andersson * BYPASS. 379f9081b8fSBjorn Andersson */ 380f9081b8fSBjorn Andersson type = S2CR_TYPE_BYPASS; 381f9081b8fSBjorn Andersson cbndx = 0xff; 382f9081b8fSBjorn Andersson } 383f9081b8fSBjorn Andersson } 384f9081b8fSBjorn Andersson 385f9081b8fSBjorn Andersson reg = FIELD_PREP(ARM_SMMU_S2CR_TYPE, type) | 386f9081b8fSBjorn Andersson FIELD_PREP(ARM_SMMU_S2CR_CBNDX, cbndx) | 387f9081b8fSBjorn Andersson FIELD_PREP(ARM_SMMU_S2CR_PRIVCFG, s2cr->privcfg); 388f9081b8fSBjorn Andersson arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_S2CR(idx), reg); 389f9081b8fSBjorn Andersson } 390f9081b8fSBjorn Andersson 3910e764a01SJordan Crouse static int qcom_smmu_def_domain_type(struct device *dev) 3920e764a01SJordan Crouse { 3930e764a01SJordan Crouse const struct of_device_id *match = 3940e764a01SJordan Crouse of_match_device(qcom_smmu_client_of_match, dev); 3950e764a01SJordan Crouse 3960e764a01SJordan Crouse return match ? IOMMU_DOMAIN_IDENTITY : 0; 3970e764a01SJordan Crouse } 3980e764a01SJordan Crouse 399759aaa10SVivek Gautam static int qcom_sdm845_smmu500_reset(struct arm_smmu_device *smmu) 400759aaa10SVivek Gautam { 401759aaa10SVivek Gautam int ret; 402759aaa10SVivek Gautam 403417b76adSDmitry Baryshkov arm_mmu500_reset(smmu); 404417b76adSDmitry Baryshkov 405759aaa10SVivek Gautam /* 406759aaa10SVivek Gautam * To address performance degradation in non-real time clients, 407759aaa10SVivek Gautam * such as USB and UFS, turn off wait-for-safe on sdm845 based boards, 408759aaa10SVivek Gautam * such as MTP and db845, whose firmwares implement secure monitor 409759aaa10SVivek Gautam * call handlers to turn on/off the wait-for-safe logic. 410759aaa10SVivek Gautam */ 411759aaa10SVivek Gautam ret = qcom_scm_qsmmu500_wait_safe_toggle(0); 412759aaa10SVivek Gautam if (ret) 413759aaa10SVivek Gautam dev_warn(smmu->dev, "Failed to turn off SAFE logic\n"); 414759aaa10SVivek Gautam 415759aaa10SVivek Gautam return ret; 416759aaa10SVivek Gautam } 417759aaa10SVivek Gautam 418b4c6ee51SDmitry Baryshkov static const struct arm_smmu_impl qcom_smmu_v2_impl = { 419b4c6ee51SDmitry Baryshkov .init_context = qcom_smmu_init_context, 420b4c6ee51SDmitry Baryshkov .cfg_probe = qcom_smmu_cfg_probe, 421b4c6ee51SDmitry Baryshkov .def_domain_type = qcom_smmu_def_domain_type, 422b4c6ee51SDmitry Baryshkov .write_s2cr = qcom_smmu_write_s2cr, 423b4c6ee51SDmitry Baryshkov .tlb_sync = qcom_smmu_tlb_sync, 424b4c6ee51SDmitry Baryshkov }; 425b4c6ee51SDmitry Baryshkov 426b4c6ee51SDmitry Baryshkov static const struct arm_smmu_impl qcom_smmu_500_impl = { 427ef75702dSSai Prakash Ranjan .init_context = qcom_smmu_init_context, 42807a7f2caSBjorn Andersson .cfg_probe = qcom_smmu_cfg_probe, 4290e764a01SJordan Crouse .def_domain_type = qcom_smmu_def_domain_type, 430417b76adSDmitry Baryshkov .reset = arm_mmu500_reset, 431417b76adSDmitry Baryshkov .write_s2cr = qcom_smmu_write_s2cr, 432417b76adSDmitry Baryshkov .tlb_sync = qcom_smmu_tlb_sync, 433b8ca7ce7SGeorgi Djakov #ifdef CONFIG_ARM_SMMU_QCOM_DEBUG 434b8ca7ce7SGeorgi Djakov .context_fault = qcom_smmu_context_fault, 435b8ca7ce7SGeorgi Djakov .context_fault_needs_threaded_irq = true, 436b8ca7ce7SGeorgi Djakov #endif 437417b76adSDmitry Baryshkov }; 438417b76adSDmitry Baryshkov 439417b76adSDmitry Baryshkov static const struct arm_smmu_impl sdm845_smmu_500_impl = { 440417b76adSDmitry Baryshkov .init_context = qcom_smmu_init_context, 441417b76adSDmitry Baryshkov .cfg_probe = qcom_smmu_cfg_probe, 442417b76adSDmitry Baryshkov .def_domain_type = qcom_smmu_def_domain_type, 443417b76adSDmitry Baryshkov .reset = qcom_sdm845_smmu500_reset, 444f9081b8fSBjorn Andersson .write_s2cr = qcom_smmu_write_s2cr, 445b9b721d1SSai Prakash Ranjan .tlb_sync = qcom_smmu_tlb_sync, 446d374555eSGeorgi Djakov #ifdef CONFIG_ARM_SMMU_QCOM_DEBUG 447d374555eSGeorgi Djakov .context_fault = qcom_smmu_context_fault, 448d374555eSGeorgi Djakov .context_fault_needs_threaded_irq = true, 449d374555eSGeorgi Djakov #endif 450759aaa10SVivek Gautam }; 451759aaa10SVivek Gautam 452b4c6ee51SDmitry Baryshkov static const struct arm_smmu_impl qcom_adreno_smmu_v2_impl = { 453b4c6ee51SDmitry Baryshkov .init_context = qcom_adreno_smmu_init_context, 4542d42d3baSKonrad Dybcio .cfg_probe = qcom_adreno_smmuv2_cfg_probe, 455b4c6ee51SDmitry Baryshkov .def_domain_type = qcom_smmu_def_domain_type, 456b4c6ee51SDmitry Baryshkov .alloc_context_bank = qcom_adreno_smmu_alloc_context_bank, 457b4c6ee51SDmitry Baryshkov .write_sctlr = qcom_adreno_smmu_write_sctlr, 458b4c6ee51SDmitry Baryshkov .tlb_sync = qcom_smmu_tlb_sync, 459b4c6ee51SDmitry Baryshkov }; 460b4c6ee51SDmitry Baryshkov 461b4c6ee51SDmitry Baryshkov static const struct arm_smmu_impl qcom_adreno_smmu_500_impl = { 4625c7469c6SJordan Crouse .init_context = qcom_adreno_smmu_init_context, 4635c7469c6SJordan Crouse .def_domain_type = qcom_smmu_def_domain_type, 464417b76adSDmitry Baryshkov .reset = arm_mmu500_reset, 4655c7469c6SJordan Crouse .alloc_context_bank = qcom_adreno_smmu_alloc_context_bank, 466bffb2eafSRob Clark .write_sctlr = qcom_adreno_smmu_write_sctlr, 467b9b721d1SSai Prakash Ranjan .tlb_sync = qcom_smmu_tlb_sync, 4685c7469c6SJordan Crouse }; 4695c7469c6SJordan Crouse 4705c7469c6SJordan Crouse static struct arm_smmu_device *qcom_smmu_create(struct arm_smmu_device *smmu, 4714c1d0ad1SDmitry Baryshkov const struct qcom_smmu_match_data *data) 472759aaa10SVivek Gautam { 47330b912a0SDmitry Baryshkov const struct device_node *np = smmu->dev->of_node; 4744c1d0ad1SDmitry Baryshkov const struct arm_smmu_impl *impl; 475759aaa10SVivek Gautam struct qcom_smmu *qsmmu; 476759aaa10SVivek Gautam 4774c1d0ad1SDmitry Baryshkov if (!data) 4784c1d0ad1SDmitry Baryshkov return ERR_PTR(-EINVAL); 4794c1d0ad1SDmitry Baryshkov 48030b912a0SDmitry Baryshkov if (np && of_device_is_compatible(np, "qcom,adreno-smmu")) 48130b912a0SDmitry Baryshkov impl = data->adreno_impl; 48230b912a0SDmitry Baryshkov else 4834c1d0ad1SDmitry Baryshkov impl = data->impl; 48430b912a0SDmitry Baryshkov 4854c1d0ad1SDmitry Baryshkov if (!impl) 4864c1d0ad1SDmitry Baryshkov return smmu; 4874c1d0ad1SDmitry Baryshkov 48872b55c96SJohn Stultz /* Check to make sure qcom_scm has finished probing */ 48972b55c96SJohn Stultz if (!qcom_scm_is_available()) 4909796cf9bSZhenhua Huang return ERR_PTR(dev_err_probe(smmu->dev, -EPROBE_DEFER, 4919796cf9bSZhenhua Huang "qcom_scm not ready\n")); 49272b55c96SJohn Stultz 493af9da914SRobin Murphy qsmmu = devm_krealloc(smmu->dev, smmu, sizeof(*qsmmu), GFP_KERNEL); 494759aaa10SVivek Gautam if (!qsmmu) 495759aaa10SVivek Gautam return ERR_PTR(-ENOMEM); 496759aaa10SVivek Gautam 4975c7469c6SJordan Crouse qsmmu->smmu.impl = impl; 4984172dda2SDmitry Baryshkov qsmmu->cfg = data->cfg; 499759aaa10SVivek Gautam 500759aaa10SVivek Gautam return &qsmmu->smmu; 501759aaa10SVivek Gautam } 5025c7469c6SJordan Crouse 5034172dda2SDmitry Baryshkov /* Implementation Defined Register Space 0 register offsets */ 5044172dda2SDmitry Baryshkov static const u32 qcom_smmu_impl0_reg_offset[] = { 5054172dda2SDmitry Baryshkov [QCOM_SMMU_TBU_PWR_STATUS] = 0x2204, 5064172dda2SDmitry Baryshkov [QCOM_SMMU_STATS_SYNC_INV_TBU_ACK] = 0x25dc, 5074172dda2SDmitry Baryshkov [QCOM_SMMU_MMU2QSS_AND_SAFE_WAIT_CNTR] = 0x2670, 5084172dda2SDmitry Baryshkov }; 5094172dda2SDmitry Baryshkov 5104172dda2SDmitry Baryshkov static const struct qcom_smmu_config qcom_smmu_impl0_cfg = { 5114172dda2SDmitry Baryshkov .reg_offset = qcom_smmu_impl0_reg_offset, 5124172dda2SDmitry Baryshkov }; 5134172dda2SDmitry Baryshkov 51430b912a0SDmitry Baryshkov /* 51530b912a0SDmitry Baryshkov * It is not yet possible to use MDP SMMU with the bypass quirk on the msm8996, 51630b912a0SDmitry Baryshkov * there are not enough context banks. 51730b912a0SDmitry Baryshkov */ 51830b912a0SDmitry Baryshkov static const struct qcom_smmu_match_data msm8996_smmu_data = { 51930b912a0SDmitry Baryshkov .impl = NULL, 520b4c6ee51SDmitry Baryshkov .adreno_impl = &qcom_adreno_smmu_v2_impl, 5214c1d0ad1SDmitry Baryshkov }; 5224c1d0ad1SDmitry Baryshkov 523b4c6ee51SDmitry Baryshkov static const struct qcom_smmu_match_data qcom_smmu_v2_data = { 524b4c6ee51SDmitry Baryshkov .impl = &qcom_smmu_v2_impl, 525b4c6ee51SDmitry Baryshkov .adreno_impl = &qcom_adreno_smmu_v2_impl, 5264c1d0ad1SDmitry Baryshkov }; 5274c1d0ad1SDmitry Baryshkov 528417b76adSDmitry Baryshkov static const struct qcom_smmu_match_data sdm845_smmu_500_data = { 529417b76adSDmitry Baryshkov .impl = &sdm845_smmu_500_impl, 530417b76adSDmitry Baryshkov /* 531417b76adSDmitry Baryshkov * No need for adreno impl here. On sdm845 the Adreno SMMU is handled 532417b76adSDmitry Baryshkov * by the separate sdm845-smmu-v2 device. 533417b76adSDmitry Baryshkov */ 5344172dda2SDmitry Baryshkov /* Also no debug configuration. */ 5354172dda2SDmitry Baryshkov }; 5364172dda2SDmitry Baryshkov 5374172dda2SDmitry Baryshkov static const struct qcom_smmu_match_data qcom_smmu_500_impl0_data = { 538b4c6ee51SDmitry Baryshkov .impl = &qcom_smmu_500_impl, 539b4c6ee51SDmitry Baryshkov .adreno_impl = &qcom_adreno_smmu_500_impl, 5404172dda2SDmitry Baryshkov .cfg = &qcom_smmu_impl0_cfg, 541417b76adSDmitry Baryshkov }; 542417b76adSDmitry Baryshkov 54380b71080SDmitry Baryshkov /* 54480b71080SDmitry Baryshkov * Do not add any more qcom,SOC-smmu-500 entries to this list, unless they need 54580b71080SDmitry Baryshkov * special handling and can not be covered by the qcom,smmu-500 entry. 54680b71080SDmitry Baryshkov */ 54700597f9fSSai Prakash Ranjan static const struct of_device_id __maybe_unused qcom_smmu_impl_of_match[] = { 54830b912a0SDmitry Baryshkov { .compatible = "qcom,msm8996-smmu-v2", .data = &msm8996_smmu_data }, 549b4c6ee51SDmitry Baryshkov { .compatible = "qcom,msm8998-smmu-v2", .data = &qcom_smmu_v2_data }, 5504172dda2SDmitry Baryshkov { .compatible = "qcom,qcm2290-smmu-500", .data = &qcom_smmu_500_impl0_data }, 5514172dda2SDmitry Baryshkov { .compatible = "qcom,qdu1000-smmu-500", .data = &qcom_smmu_500_impl0_data }, 5524172dda2SDmitry Baryshkov { .compatible = "qcom,sc7180-smmu-500", .data = &qcom_smmu_500_impl0_data }, 553e36ca2faSRob Clark { .compatible = "qcom,sc7180-smmu-v2", .data = &qcom_smmu_v2_data }, 5544172dda2SDmitry Baryshkov { .compatible = "qcom,sc7280-smmu-500", .data = &qcom_smmu_500_impl0_data }, 5554172dda2SDmitry Baryshkov { .compatible = "qcom,sc8180x-smmu-500", .data = &qcom_smmu_500_impl0_data }, 5564172dda2SDmitry Baryshkov { .compatible = "qcom,sc8280xp-smmu-500", .data = &qcom_smmu_500_impl0_data }, 557b4c6ee51SDmitry Baryshkov { .compatible = "qcom,sdm630-smmu-v2", .data = &qcom_smmu_v2_data }, 558b4c6ee51SDmitry Baryshkov { .compatible = "qcom,sdm845-smmu-v2", .data = &qcom_smmu_v2_data }, 559417b76adSDmitry Baryshkov { .compatible = "qcom,sdm845-smmu-500", .data = &sdm845_smmu_500_data }, 5604172dda2SDmitry Baryshkov { .compatible = "qcom,sm6115-smmu-500", .data = &qcom_smmu_500_impl0_data}, 5614172dda2SDmitry Baryshkov { .compatible = "qcom,sm6125-smmu-500", .data = &qcom_smmu_500_impl0_data }, 5623811a728SKonrad Dybcio { .compatible = "qcom,sm6350-smmu-v2", .data = &qcom_smmu_v2_data }, 5634172dda2SDmitry Baryshkov { .compatible = "qcom,sm6350-smmu-500", .data = &qcom_smmu_500_impl0_data }, 564757d591dSKonrad Dybcio { .compatible = "qcom,sm6375-smmu-v2", .data = &qcom_smmu_v2_data }, 5654172dda2SDmitry Baryshkov { .compatible = "qcom,sm6375-smmu-500", .data = &qcom_smmu_500_impl0_data }, 56670c61360SDanila Tikhonov { .compatible = "qcom,sm7150-smmu-v2", .data = &qcom_smmu_v2_data }, 5674172dda2SDmitry Baryshkov { .compatible = "qcom,sm8150-smmu-500", .data = &qcom_smmu_500_impl0_data }, 5684172dda2SDmitry Baryshkov { .compatible = "qcom,sm8250-smmu-500", .data = &qcom_smmu_500_impl0_data }, 5694172dda2SDmitry Baryshkov { .compatible = "qcom,sm8350-smmu-500", .data = &qcom_smmu_500_impl0_data }, 5704172dda2SDmitry Baryshkov { .compatible = "qcom,sm8450-smmu-500", .data = &qcom_smmu_500_impl0_data }, 57180b71080SDmitry Baryshkov { .compatible = "qcom,smmu-500", .data = &qcom_smmu_500_impl0_data }, 57200597f9fSSai Prakash Ranjan { } 57300597f9fSSai Prakash Ranjan }; 57400597f9fSSai Prakash Ranjan 57522c2d718SShawn Guo #ifdef CONFIG_ACPI 576a51627c5SShawn Guo static struct acpi_platform_list qcom_acpi_platlist[] = { 577a51627c5SShawn Guo { "LENOVO", "CB-01 ", 0x8180, ACPI_SIG_IORT, equal, "QCOM SMMU" }, 578a51627c5SShawn Guo { "QCOM ", "QCOMEDK2", 0x8180, ACPI_SIG_IORT, equal, "QCOM SMMU" }, 579a51627c5SShawn Guo { } 580a51627c5SShawn Guo }; 58122c2d718SShawn Guo #endif 582a51627c5SShawn Guo 5830b4eeee2SGeorgi Djakov static int qcom_smmu_tbu_probe(struct platform_device *pdev) 5840b4eeee2SGeorgi Djakov { 5850b4eeee2SGeorgi Djakov struct device *dev = &pdev->dev; 5860b4eeee2SGeorgi Djakov int ret; 5870b4eeee2SGeorgi Djakov 5880b4eeee2SGeorgi Djakov if (IS_ENABLED(CONFIG_ARM_SMMU_QCOM_DEBUG)) { 5890b4eeee2SGeorgi Djakov ret = qcom_tbu_probe(pdev); 5900b4eeee2SGeorgi Djakov if (ret) 5910b4eeee2SGeorgi Djakov return ret; 5920b4eeee2SGeorgi Djakov } 5930b4eeee2SGeorgi Djakov 5940b4eeee2SGeorgi Djakov if (dev->pm_domain) { 5950b4eeee2SGeorgi Djakov pm_runtime_set_active(dev); 5960b4eeee2SGeorgi Djakov pm_runtime_enable(dev); 5970b4eeee2SGeorgi Djakov } 5980b4eeee2SGeorgi Djakov 5990b4eeee2SGeorgi Djakov return 0; 6000b4eeee2SGeorgi Djakov } 6010b4eeee2SGeorgi Djakov 6020b4eeee2SGeorgi Djakov static const struct of_device_id qcom_smmu_tbu_of_match[] = { 6030b4eeee2SGeorgi Djakov { .compatible = "qcom,sc7280-tbu" }, 6040b4eeee2SGeorgi Djakov { .compatible = "qcom,sdm845-tbu" }, 6050b4eeee2SGeorgi Djakov { } 6060b4eeee2SGeorgi Djakov }; 6070b4eeee2SGeorgi Djakov 6080b4eeee2SGeorgi Djakov static struct platform_driver qcom_smmu_tbu_driver = { 6090b4eeee2SGeorgi Djakov .driver = { 6100b4eeee2SGeorgi Djakov .name = "qcom_tbu", 6110b4eeee2SGeorgi Djakov .of_match_table = qcom_smmu_tbu_of_match, 6120b4eeee2SGeorgi Djakov }, 6130b4eeee2SGeorgi Djakov .probe = qcom_smmu_tbu_probe, 6140b4eeee2SGeorgi Djakov }; 6150b4eeee2SGeorgi Djakov 6165c7469c6SJordan Crouse struct arm_smmu_device *qcom_smmu_impl_init(struct arm_smmu_device *smmu) 6175c7469c6SJordan Crouse { 61800597f9fSSai Prakash Ranjan const struct device_node *np = smmu->dev->of_node; 6194c1d0ad1SDmitry Baryshkov const struct of_device_id *match; 6200b4eeee2SGeorgi Djakov static u8 tbu_registered; 6210b4eeee2SGeorgi Djakov 6220b4eeee2SGeorgi Djakov if (!tbu_registered++) 6230b4eeee2SGeorgi Djakov platform_driver_register(&qcom_smmu_tbu_driver); 6245c7469c6SJordan Crouse 62522c2d718SShawn Guo #ifdef CONFIG_ACPI 626a51627c5SShawn Guo if (np == NULL) { 627a51627c5SShawn Guo /* Match platform for ACPI boot */ 628a51627c5SShawn Guo if (acpi_match_platform_list(qcom_acpi_platlist) >= 0) 6294172dda2SDmitry Baryshkov return qcom_smmu_create(smmu, &qcom_smmu_500_impl0_data); 630a51627c5SShawn Guo } 63122c2d718SShawn Guo #endif 63200597f9fSSai Prakash Ranjan 6334c1d0ad1SDmitry Baryshkov match = of_match_node(qcom_smmu_impl_of_match, np); 6344c1d0ad1SDmitry Baryshkov if (match) 6354c1d0ad1SDmitry Baryshkov return qcom_smmu_create(smmu, match->data); 636ab9a77a1SSai Prakash Ranjan 637e36ca2faSRob Clark /* 638e36ca2faSRob Clark * If you hit this WARN_ON() you are missing an entry in the 639e36ca2faSRob Clark * qcom_smmu_impl_of_match[] table, and GPU per-process page- 640e36ca2faSRob Clark * tables will be broken. 641e36ca2faSRob Clark */ 642e36ca2faSRob Clark WARN(of_device_is_compatible(np, "qcom,adreno-smmu"), 643e36ca2faSRob Clark "Missing qcom_smmu_impl_of_match entry for: %s", 644e36ca2faSRob Clark dev_name(smmu->dev)); 645e36ca2faSRob Clark 64600597f9fSSai Prakash Ranjan return smmu; 6475c7469c6SJordan Crouse } 648