xref: /linux/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c (revision 4172dda2b30a9a0e628e81d2a3bc9a6ef0936774)
1759aaa10SVivek Gautam // SPDX-License-Identifier: GPL-2.0-only
2759aaa10SVivek Gautam /*
3759aaa10SVivek Gautam  * Copyright (c) 2019, The Linux Foundation. All rights reserved.
4759aaa10SVivek Gautam  */
5759aaa10SVivek Gautam 
6a51627c5SShawn Guo #include <linux/acpi.h>
75c7469c6SJordan Crouse #include <linux/adreno-smmu-priv.h>
8b9b721d1SSai Prakash Ranjan #include <linux/delay.h>
90e764a01SJordan Crouse #include <linux/of_device.h>
10759aaa10SVivek Gautam #include <linux/qcom_scm.h>
11759aaa10SVivek Gautam 
12759aaa10SVivek Gautam #include "arm-smmu.h"
13b9b721d1SSai Prakash Ranjan #include "arm-smmu-qcom.h"
14759aaa10SVivek Gautam 
15b9b721d1SSai Prakash Ranjan #define QCOM_DUMMY_VAL	-1
16759aaa10SVivek Gautam 
17f9081b8fSBjorn Andersson static struct qcom_smmu *to_qcom_smmu(struct arm_smmu_device *smmu)
18f9081b8fSBjorn Andersson {
19f9081b8fSBjorn Andersson 	return container_of(smmu, struct qcom_smmu, smmu);
20f9081b8fSBjorn Andersson }
21f9081b8fSBjorn Andersson 
22b9b721d1SSai Prakash Ranjan static void qcom_smmu_tlb_sync(struct arm_smmu_device *smmu, int page,
23b9b721d1SSai Prakash Ranjan 				int sync, int status)
24b9b721d1SSai Prakash Ranjan {
25b9b721d1SSai Prakash Ranjan 	unsigned int spin_cnt, delay;
26b9b721d1SSai Prakash Ranjan 	u32 reg;
27b9b721d1SSai Prakash Ranjan 
28b9b721d1SSai Prakash Ranjan 	arm_smmu_writel(smmu, page, sync, QCOM_DUMMY_VAL);
29b9b721d1SSai Prakash Ranjan 	for (delay = 1; delay < TLB_LOOP_TIMEOUT; delay *= 2) {
30b9b721d1SSai Prakash Ranjan 		for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) {
31b9b721d1SSai Prakash Ranjan 			reg = arm_smmu_readl(smmu, page, status);
32b9b721d1SSai Prakash Ranjan 			if (!(reg & ARM_SMMU_sTLBGSTATUS_GSACTIVE))
33b9b721d1SSai Prakash Ranjan 				return;
34b9b721d1SSai Prakash Ranjan 			cpu_relax();
35b9b721d1SSai Prakash Ranjan 		}
36b9b721d1SSai Prakash Ranjan 		udelay(delay);
37b9b721d1SSai Prakash Ranjan 	}
38b9b721d1SSai Prakash Ranjan 
39b9b721d1SSai Prakash Ranjan 	qcom_smmu_tlb_sync_debug(smmu);
40b9b721d1SSai Prakash Ranjan }
41b9b721d1SSai Prakash Ranjan 
42bffb2eafSRob Clark static void qcom_adreno_smmu_write_sctlr(struct arm_smmu_device *smmu, int idx,
43bffb2eafSRob Clark 		u32 reg)
44bffb2eafSRob Clark {
45ba6014a4SRob Clark 	struct qcom_smmu *qsmmu = to_qcom_smmu(smmu);
46ba6014a4SRob Clark 
47bffb2eafSRob Clark 	/*
48bffb2eafSRob Clark 	 * On the GPU device we want to process subsequent transactions after a
49bffb2eafSRob Clark 	 * fault to keep the GPU from hanging
50bffb2eafSRob Clark 	 */
51bffb2eafSRob Clark 	reg |= ARM_SMMU_SCTLR_HUPCF;
52bffb2eafSRob Clark 
53ba6014a4SRob Clark 	if (qsmmu->stall_enabled & BIT(idx))
54ba6014a4SRob Clark 		reg |= ARM_SMMU_SCTLR_CFCFG;
55ba6014a4SRob Clark 
56bffb2eafSRob Clark 	arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, reg);
57bffb2eafSRob Clark }
58bffb2eafSRob Clark 
59ab5df7b9SJordan Crouse static void qcom_adreno_smmu_get_fault_info(const void *cookie,
60ab5df7b9SJordan Crouse 		struct adreno_smmu_fault_info *info)
61ab5df7b9SJordan Crouse {
62ab5df7b9SJordan Crouse 	struct arm_smmu_domain *smmu_domain = (void *)cookie;
63ab5df7b9SJordan Crouse 	struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
64ab5df7b9SJordan Crouse 	struct arm_smmu_device *smmu = smmu_domain->smmu;
65ab5df7b9SJordan Crouse 
66ab5df7b9SJordan Crouse 	info->fsr = arm_smmu_cb_read(smmu, cfg->cbndx, ARM_SMMU_CB_FSR);
67ab5df7b9SJordan Crouse 	info->fsynr0 = arm_smmu_cb_read(smmu, cfg->cbndx, ARM_SMMU_CB_FSYNR0);
68ab5df7b9SJordan Crouse 	info->fsynr1 = arm_smmu_cb_read(smmu, cfg->cbndx, ARM_SMMU_CB_FSYNR1);
69ab5df7b9SJordan Crouse 	info->far = arm_smmu_cb_readq(smmu, cfg->cbndx, ARM_SMMU_CB_FAR);
70ab5df7b9SJordan Crouse 	info->cbfrsynra = arm_smmu_gr1_read(smmu, ARM_SMMU_GR1_CBFRSYNRA(cfg->cbndx));
71c31112fbSRob Clark 	info->ttbr0 = arm_smmu_cb_readq(smmu, cfg->cbndx, ARM_SMMU_CB_TTBR0);
72ab5df7b9SJordan Crouse 	info->contextidr = arm_smmu_cb_read(smmu, cfg->cbndx, ARM_SMMU_CB_CONTEXTIDR);
73ab5df7b9SJordan Crouse }
74ab5df7b9SJordan Crouse 
75ba6014a4SRob Clark static void qcom_adreno_smmu_set_stall(const void *cookie, bool enabled)
76ba6014a4SRob Clark {
77ba6014a4SRob Clark 	struct arm_smmu_domain *smmu_domain = (void *)cookie;
78ba6014a4SRob Clark 	struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
79ba6014a4SRob Clark 	struct qcom_smmu *qsmmu = to_qcom_smmu(smmu_domain->smmu);
80ba6014a4SRob Clark 
81ba6014a4SRob Clark 	if (enabled)
82ba6014a4SRob Clark 		qsmmu->stall_enabled |= BIT(cfg->cbndx);
83ba6014a4SRob Clark 	else
84ba6014a4SRob Clark 		qsmmu->stall_enabled &= ~BIT(cfg->cbndx);
85ba6014a4SRob Clark }
86ba6014a4SRob Clark 
87ba6014a4SRob Clark static void qcom_adreno_smmu_resume_translation(const void *cookie, bool terminate)
88ba6014a4SRob Clark {
89ba6014a4SRob Clark 	struct arm_smmu_domain *smmu_domain = (void *)cookie;
90ba6014a4SRob Clark 	struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
91ba6014a4SRob Clark 	struct arm_smmu_device *smmu = smmu_domain->smmu;
92ba6014a4SRob Clark 	u32 reg = 0;
93ba6014a4SRob Clark 
94ba6014a4SRob Clark 	if (terminate)
95ba6014a4SRob Clark 		reg |= ARM_SMMU_RESUME_TERMINATE;
96ba6014a4SRob Clark 
97ba6014a4SRob Clark 	arm_smmu_cb_write(smmu, cfg->cbndx, ARM_SMMU_CB_RESUME, reg);
98ba6014a4SRob Clark }
99ba6014a4SRob Clark 
1005c7469c6SJordan Crouse #define QCOM_ADRENO_SMMU_GPU_SID 0
1015c7469c6SJordan Crouse 
1025c7469c6SJordan Crouse static bool qcom_adreno_smmu_is_gpu_device(struct device *dev)
1035c7469c6SJordan Crouse {
1045c7469c6SJordan Crouse 	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
1055c7469c6SJordan Crouse 	int i;
1065c7469c6SJordan Crouse 
1075c7469c6SJordan Crouse 	/*
1085c7469c6SJordan Crouse 	 * The GPU will always use SID 0 so that is a handy way to uniquely
1095c7469c6SJordan Crouse 	 * identify it and configure it for per-instance pagetables
1105c7469c6SJordan Crouse 	 */
1115c7469c6SJordan Crouse 	for (i = 0; i < fwspec->num_ids; i++) {
1125c7469c6SJordan Crouse 		u16 sid = FIELD_GET(ARM_SMMU_SMR_ID, fwspec->ids[i]);
1135c7469c6SJordan Crouse 
1145c7469c6SJordan Crouse 		if (sid == QCOM_ADRENO_SMMU_GPU_SID)
1155c7469c6SJordan Crouse 			return true;
1165c7469c6SJordan Crouse 	}
1175c7469c6SJordan Crouse 
1185c7469c6SJordan Crouse 	return false;
1195c7469c6SJordan Crouse }
1205c7469c6SJordan Crouse 
1215c7469c6SJordan Crouse static const struct io_pgtable_cfg *qcom_adreno_smmu_get_ttbr1_cfg(
1225c7469c6SJordan Crouse 		const void *cookie)
1235c7469c6SJordan Crouse {
1245c7469c6SJordan Crouse 	struct arm_smmu_domain *smmu_domain = (void *)cookie;
1255c7469c6SJordan Crouse 	struct io_pgtable *pgtable =
1265c7469c6SJordan Crouse 		io_pgtable_ops_to_pgtable(smmu_domain->pgtbl_ops);
1275c7469c6SJordan Crouse 	return &pgtable->cfg;
1285c7469c6SJordan Crouse }
1295c7469c6SJordan Crouse 
1305c7469c6SJordan Crouse /*
1315c7469c6SJordan Crouse  * Local implementation to configure TTBR0 with the specified pagetable config.
1325c7469c6SJordan Crouse  * The GPU driver will call this to enable TTBR0 when per-instance pagetables
1335c7469c6SJordan Crouse  * are active
1345c7469c6SJordan Crouse  */
1355c7469c6SJordan Crouse 
1365c7469c6SJordan Crouse static int qcom_adreno_smmu_set_ttbr0_cfg(const void *cookie,
1375c7469c6SJordan Crouse 		const struct io_pgtable_cfg *pgtbl_cfg)
1385c7469c6SJordan Crouse {
1395c7469c6SJordan Crouse 	struct arm_smmu_domain *smmu_domain = (void *)cookie;
1405c7469c6SJordan Crouse 	struct io_pgtable *pgtable = io_pgtable_ops_to_pgtable(smmu_domain->pgtbl_ops);
1415c7469c6SJordan Crouse 	struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1425c7469c6SJordan Crouse 	struct arm_smmu_cb *cb = &smmu_domain->smmu->cbs[cfg->cbndx];
1435c7469c6SJordan Crouse 
1445c7469c6SJordan Crouse 	/* The domain must have split pagetables already enabled */
1455c7469c6SJordan Crouse 	if (cb->tcr[0] & ARM_SMMU_TCR_EPD1)
1465c7469c6SJordan Crouse 		return -EINVAL;
1475c7469c6SJordan Crouse 
1485c7469c6SJordan Crouse 	/* If the pagetable config is NULL, disable TTBR0 */
1495c7469c6SJordan Crouse 	if (!pgtbl_cfg) {
1505c7469c6SJordan Crouse 		/* Do nothing if it is already disabled */
1515c7469c6SJordan Crouse 		if ((cb->tcr[0] & ARM_SMMU_TCR_EPD0))
1525c7469c6SJordan Crouse 			return -EINVAL;
1535c7469c6SJordan Crouse 
1545c7469c6SJordan Crouse 		/* Set TCR to the original configuration */
1555c7469c6SJordan Crouse 		cb->tcr[0] = arm_smmu_lpae_tcr(&pgtable->cfg);
1565c7469c6SJordan Crouse 		cb->ttbr[0] = FIELD_PREP(ARM_SMMU_TTBRn_ASID, cb->cfg->asid);
1575c7469c6SJordan Crouse 	} else {
1585c7469c6SJordan Crouse 		u32 tcr = cb->tcr[0];
1595c7469c6SJordan Crouse 
1605c7469c6SJordan Crouse 		/* Don't call this again if TTBR0 is already enabled */
1615c7469c6SJordan Crouse 		if (!(cb->tcr[0] & ARM_SMMU_TCR_EPD0))
1625c7469c6SJordan Crouse 			return -EINVAL;
1635c7469c6SJordan Crouse 
1645c7469c6SJordan Crouse 		tcr |= arm_smmu_lpae_tcr(pgtbl_cfg);
1655c7469c6SJordan Crouse 		tcr &= ~(ARM_SMMU_TCR_EPD0 | ARM_SMMU_TCR_EPD1);
1665c7469c6SJordan Crouse 
1675c7469c6SJordan Crouse 		cb->tcr[0] = tcr;
1685c7469c6SJordan Crouse 		cb->ttbr[0] = pgtbl_cfg->arm_lpae_s1_cfg.ttbr;
1695c7469c6SJordan Crouse 		cb->ttbr[0] |= FIELD_PREP(ARM_SMMU_TTBRn_ASID, cb->cfg->asid);
1705c7469c6SJordan Crouse 	}
1715c7469c6SJordan Crouse 
1725c7469c6SJordan Crouse 	arm_smmu_write_context_bank(smmu_domain->smmu, cb->cfg->cbndx);
1735c7469c6SJordan Crouse 
1745c7469c6SJordan Crouse 	return 0;
1755c7469c6SJordan Crouse }
1765c7469c6SJordan Crouse 
1775c7469c6SJordan Crouse static int qcom_adreno_smmu_alloc_context_bank(struct arm_smmu_domain *smmu_domain,
1785c7469c6SJordan Crouse 					       struct arm_smmu_device *smmu,
1795c7469c6SJordan Crouse 					       struct device *dev, int start)
1805c7469c6SJordan Crouse {
1815c7469c6SJordan Crouse 	int count;
1825c7469c6SJordan Crouse 
1835c7469c6SJordan Crouse 	/*
1845c7469c6SJordan Crouse 	 * Assign context bank 0 to the GPU device so the GPU hardware can
1855c7469c6SJordan Crouse 	 * switch pagetables
1865c7469c6SJordan Crouse 	 */
1875c7469c6SJordan Crouse 	if (qcom_adreno_smmu_is_gpu_device(dev)) {
1885c7469c6SJordan Crouse 		start = 0;
1895c7469c6SJordan Crouse 		count = 1;
1905c7469c6SJordan Crouse 	} else {
1915c7469c6SJordan Crouse 		start = 1;
1925c7469c6SJordan Crouse 		count = smmu->num_context_banks;
1935c7469c6SJordan Crouse 	}
1945c7469c6SJordan Crouse 
1955c7469c6SJordan Crouse 	return __arm_smmu_alloc_bitmap(smmu->context_map, start, count);
1965c7469c6SJordan Crouse }
1975c7469c6SJordan Crouse 
198a242f429SEric Anholt static bool qcom_adreno_can_do_ttbr1(struct arm_smmu_device *smmu)
199a242f429SEric Anholt {
200a242f429SEric Anholt 	const struct device_node *np = smmu->dev->of_node;
201a242f429SEric Anholt 
202a242f429SEric Anholt 	if (of_device_is_compatible(np, "qcom,msm8996-smmu-v2"))
203a242f429SEric Anholt 		return false;
204a242f429SEric Anholt 
205a242f429SEric Anholt 	return true;
206a242f429SEric Anholt }
207a242f429SEric Anholt 
2085c7469c6SJordan Crouse static int qcom_adreno_smmu_init_context(struct arm_smmu_domain *smmu_domain,
2095c7469c6SJordan Crouse 		struct io_pgtable_cfg *pgtbl_cfg, struct device *dev)
2105c7469c6SJordan Crouse {
2115c7469c6SJordan Crouse 	struct adreno_smmu_priv *priv;
2125c7469c6SJordan Crouse 
213ef75702dSSai Prakash Ranjan 	smmu_domain->cfg.flush_walk_prefer_tlbiasid = true;
214ef75702dSSai Prakash Ranjan 
2155c7469c6SJordan Crouse 	/* Only enable split pagetables for the GPU device (SID 0) */
2165c7469c6SJordan Crouse 	if (!qcom_adreno_smmu_is_gpu_device(dev))
2175c7469c6SJordan Crouse 		return 0;
2185c7469c6SJordan Crouse 
2195c7469c6SJordan Crouse 	/*
2205c7469c6SJordan Crouse 	 * All targets that use the qcom,adreno-smmu compatible string *should*
2215c7469c6SJordan Crouse 	 * be AARCH64 stage 1 but double check because the arm-smmu code assumes
2225c7469c6SJordan Crouse 	 * that is the case when the TTBR1 quirk is enabled
2235c7469c6SJordan Crouse 	 */
224a242f429SEric Anholt 	if (qcom_adreno_can_do_ttbr1(smmu_domain->smmu) &&
225a242f429SEric Anholt 	    (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) &&
2265c7469c6SJordan Crouse 	    (smmu_domain->cfg.fmt == ARM_SMMU_CTX_FMT_AARCH64))
2275c7469c6SJordan Crouse 		pgtbl_cfg->quirks |= IO_PGTABLE_QUIRK_ARM_TTBR1;
2285c7469c6SJordan Crouse 
2295c7469c6SJordan Crouse 	/*
2305c7469c6SJordan Crouse 	 * Initialize private interface with GPU:
2315c7469c6SJordan Crouse 	 */
2325c7469c6SJordan Crouse 
2335c7469c6SJordan Crouse 	priv = dev_get_drvdata(dev);
2345c7469c6SJordan Crouse 	priv->cookie = smmu_domain;
2355c7469c6SJordan Crouse 	priv->get_ttbr1_cfg = qcom_adreno_smmu_get_ttbr1_cfg;
2365c7469c6SJordan Crouse 	priv->set_ttbr0_cfg = qcom_adreno_smmu_set_ttbr0_cfg;
237ab5df7b9SJordan Crouse 	priv->get_fault_info = qcom_adreno_smmu_get_fault_info;
238ba6014a4SRob Clark 	priv->set_stall = qcom_adreno_smmu_set_stall;
239ba6014a4SRob Clark 	priv->resume_translation = qcom_adreno_smmu_resume_translation;
2405c7469c6SJordan Crouse 
2415c7469c6SJordan Crouse 	return 0;
2425c7469c6SJordan Crouse }
2435c7469c6SJordan Crouse 
244a082121bSJordan Crouse static const struct of_device_id qcom_smmu_client_of_match[] __maybe_unused = {
2450e764a01SJordan Crouse 	{ .compatible = "qcom,adreno" },
2460e764a01SJordan Crouse 	{ .compatible = "qcom,mdp4" },
2470e764a01SJordan Crouse 	{ .compatible = "qcom,mdss" },
2480e764a01SJordan Crouse 	{ .compatible = "qcom,sc7180-mdss" },
249d100ff38SSibi Sankar 	{ .compatible = "qcom,sc7180-mss-pil" },
2500b779f56SSai Prakash Ranjan 	{ .compatible = "qcom,sc7280-mdss" },
251e37f1fe4SSibi Sankar 	{ .compatible = "qcom,sc7280-mss-pil" },
2521a7180ffSBjorn Andersson 	{ .compatible = "qcom,sc8180x-mdss" },
2533482c0b7SEmma Anholt 	{ .compatible = "qcom,sm8250-mdss" },
2540e764a01SJordan Crouse 	{ .compatible = "qcom,sdm845-mdss" },
255d100ff38SSibi Sankar 	{ .compatible = "qcom,sdm845-mss-pil" },
2560e764a01SJordan Crouse 	{ }
2570e764a01SJordan Crouse };
2580e764a01SJordan Crouse 
259ef75702dSSai Prakash Ranjan static int qcom_smmu_init_context(struct arm_smmu_domain *smmu_domain,
260ef75702dSSai Prakash Ranjan 		struct io_pgtable_cfg *pgtbl_cfg, struct device *dev)
261ef75702dSSai Prakash Ranjan {
262ef75702dSSai Prakash Ranjan 	smmu_domain->cfg.flush_walk_prefer_tlbiasid = true;
263ef75702dSSai Prakash Ranjan 
264ef75702dSSai Prakash Ranjan 	return 0;
265ef75702dSSai Prakash Ranjan }
266ef75702dSSai Prakash Ranjan 
26707a7f2caSBjorn Andersson static int qcom_smmu_cfg_probe(struct arm_smmu_device *smmu)
26807a7f2caSBjorn Andersson {
269f9081b8fSBjorn Andersson 	unsigned int last_s2cr = ARM_SMMU_GR0_S2CR(smmu->num_mapping_groups - 1);
270f9081b8fSBjorn Andersson 	struct qcom_smmu *qsmmu = to_qcom_smmu(smmu);
271f9081b8fSBjorn Andersson 	u32 reg;
27207a7f2caSBjorn Andersson 	u32 smr;
27307a7f2caSBjorn Andersson 	int i;
27407a7f2caSBjorn Andersson 
275f9081b8fSBjorn Andersson 	/*
276f9081b8fSBjorn Andersson 	 * With some firmware versions writes to S2CR of type FAULT are
277f9081b8fSBjorn Andersson 	 * ignored, and writing BYPASS will end up written as FAULT in the
278f9081b8fSBjorn Andersson 	 * register. Perform a write to S2CR to detect if this is the case and
279f9081b8fSBjorn Andersson 	 * if so reserve a context bank to emulate bypass streams.
280f9081b8fSBjorn Andersson 	 */
281f9081b8fSBjorn Andersson 	reg = FIELD_PREP(ARM_SMMU_S2CR_TYPE, S2CR_TYPE_BYPASS) |
282f9081b8fSBjorn Andersson 	      FIELD_PREP(ARM_SMMU_S2CR_CBNDX, 0xff) |
283f9081b8fSBjorn Andersson 	      FIELD_PREP(ARM_SMMU_S2CR_PRIVCFG, S2CR_PRIVCFG_DEFAULT);
284f9081b8fSBjorn Andersson 	arm_smmu_gr0_write(smmu, last_s2cr, reg);
285f9081b8fSBjorn Andersson 	reg = arm_smmu_gr0_read(smmu, last_s2cr);
286f9081b8fSBjorn Andersson 	if (FIELD_GET(ARM_SMMU_S2CR_TYPE, reg) != S2CR_TYPE_BYPASS) {
287f9081b8fSBjorn Andersson 		qsmmu->bypass_quirk = true;
288f9081b8fSBjorn Andersson 		qsmmu->bypass_cbndx = smmu->num_context_banks - 1;
289f9081b8fSBjorn Andersson 
290f9081b8fSBjorn Andersson 		set_bit(qsmmu->bypass_cbndx, smmu->context_map);
291f9081b8fSBjorn Andersson 
292aded8c7cSBjorn Andersson 		arm_smmu_cb_write(smmu, qsmmu->bypass_cbndx, ARM_SMMU_CB_SCTLR, 0);
293aded8c7cSBjorn Andersson 
294f9081b8fSBjorn Andersson 		reg = FIELD_PREP(ARM_SMMU_CBAR_TYPE, CBAR_TYPE_S1_TRANS_S2_BYPASS);
295f9081b8fSBjorn Andersson 		arm_smmu_gr1_write(smmu, ARM_SMMU_GR1_CBAR(qsmmu->bypass_cbndx), reg);
296f9081b8fSBjorn Andersson 	}
297f9081b8fSBjorn Andersson 
29807a7f2caSBjorn Andersson 	for (i = 0; i < smmu->num_mapping_groups; i++) {
29907a7f2caSBjorn Andersson 		smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(i));
30007a7f2caSBjorn Andersson 
30107a7f2caSBjorn Andersson 		if (FIELD_GET(ARM_SMMU_SMR_VALID, smr)) {
302dead723eSIsaac J. Manjarres 			/* Ignore valid bit for SMR mask extraction. */
303dead723eSIsaac J. Manjarres 			smr &= ~ARM_SMMU_SMR_VALID;
30407a7f2caSBjorn Andersson 			smmu->smrs[i].id = FIELD_GET(ARM_SMMU_SMR_ID, smr);
30507a7f2caSBjorn Andersson 			smmu->smrs[i].mask = FIELD_GET(ARM_SMMU_SMR_MASK, smr);
30607a7f2caSBjorn Andersson 			smmu->smrs[i].valid = true;
30707a7f2caSBjorn Andersson 
30807a7f2caSBjorn Andersson 			smmu->s2crs[i].type = S2CR_TYPE_BYPASS;
30907a7f2caSBjorn Andersson 			smmu->s2crs[i].privcfg = S2CR_PRIVCFG_DEFAULT;
31007a7f2caSBjorn Andersson 			smmu->s2crs[i].cbndx = 0xff;
31107a7f2caSBjorn Andersson 		}
31207a7f2caSBjorn Andersson 	}
31307a7f2caSBjorn Andersson 
31407a7f2caSBjorn Andersson 	return 0;
31507a7f2caSBjorn Andersson }
31607a7f2caSBjorn Andersson 
317f9081b8fSBjorn Andersson static void qcom_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
318f9081b8fSBjorn Andersson {
319f9081b8fSBjorn Andersson 	struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx;
320f9081b8fSBjorn Andersson 	struct qcom_smmu *qsmmu = to_qcom_smmu(smmu);
321f9081b8fSBjorn Andersson 	u32 cbndx = s2cr->cbndx;
322f9081b8fSBjorn Andersson 	u32 type = s2cr->type;
323f9081b8fSBjorn Andersson 	u32 reg;
324f9081b8fSBjorn Andersson 
325f9081b8fSBjorn Andersson 	if (qsmmu->bypass_quirk) {
326f9081b8fSBjorn Andersson 		if (type == S2CR_TYPE_BYPASS) {
327f9081b8fSBjorn Andersson 			/*
328f9081b8fSBjorn Andersson 			 * Firmware with quirky S2CR handling will substitute
329f9081b8fSBjorn Andersson 			 * BYPASS writes with FAULT, so point the stream to the
330f9081b8fSBjorn Andersson 			 * reserved context bank and ask for translation on the
331f9081b8fSBjorn Andersson 			 * stream
332f9081b8fSBjorn Andersson 			 */
333f9081b8fSBjorn Andersson 			type = S2CR_TYPE_TRANS;
334f9081b8fSBjorn Andersson 			cbndx = qsmmu->bypass_cbndx;
335f9081b8fSBjorn Andersson 		} else if (type == S2CR_TYPE_FAULT) {
336f9081b8fSBjorn Andersson 			/*
337f9081b8fSBjorn Andersson 			 * Firmware with quirky S2CR handling will ignore FAULT
338f9081b8fSBjorn Andersson 			 * writes, so trick it to write FAULT by asking for a
339f9081b8fSBjorn Andersson 			 * BYPASS.
340f9081b8fSBjorn Andersson 			 */
341f9081b8fSBjorn Andersson 			type = S2CR_TYPE_BYPASS;
342f9081b8fSBjorn Andersson 			cbndx = 0xff;
343f9081b8fSBjorn Andersson 		}
344f9081b8fSBjorn Andersson 	}
345f9081b8fSBjorn Andersson 
346f9081b8fSBjorn Andersson 	reg = FIELD_PREP(ARM_SMMU_S2CR_TYPE, type) |
347f9081b8fSBjorn Andersson 	      FIELD_PREP(ARM_SMMU_S2CR_CBNDX, cbndx) |
348f9081b8fSBjorn Andersson 	      FIELD_PREP(ARM_SMMU_S2CR_PRIVCFG, s2cr->privcfg);
349f9081b8fSBjorn Andersson 	arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_S2CR(idx), reg);
350f9081b8fSBjorn Andersson }
351f9081b8fSBjorn Andersson 
3520e764a01SJordan Crouse static int qcom_smmu_def_domain_type(struct device *dev)
3530e764a01SJordan Crouse {
3540e764a01SJordan Crouse 	const struct of_device_id *match =
3550e764a01SJordan Crouse 		of_match_device(qcom_smmu_client_of_match, dev);
3560e764a01SJordan Crouse 
3570e764a01SJordan Crouse 	return match ? IOMMU_DOMAIN_IDENTITY : 0;
3580e764a01SJordan Crouse }
3590e764a01SJordan Crouse 
360759aaa10SVivek Gautam static int qcom_sdm845_smmu500_reset(struct arm_smmu_device *smmu)
361759aaa10SVivek Gautam {
362759aaa10SVivek Gautam 	int ret;
363759aaa10SVivek Gautam 
364417b76adSDmitry Baryshkov 	arm_mmu500_reset(smmu);
365417b76adSDmitry Baryshkov 
366759aaa10SVivek Gautam 	/*
367759aaa10SVivek Gautam 	 * To address performance degradation in non-real time clients,
368759aaa10SVivek Gautam 	 * such as USB and UFS, turn off wait-for-safe on sdm845 based boards,
369759aaa10SVivek Gautam 	 * such as MTP and db845, whose firmwares implement secure monitor
370759aaa10SVivek Gautam 	 * call handlers to turn on/off the wait-for-safe logic.
371759aaa10SVivek Gautam 	 */
372759aaa10SVivek Gautam 	ret = qcom_scm_qsmmu500_wait_safe_toggle(0);
373759aaa10SVivek Gautam 	if (ret)
374759aaa10SVivek Gautam 		dev_warn(smmu->dev, "Failed to turn off SAFE logic\n");
375759aaa10SVivek Gautam 
376759aaa10SVivek Gautam 	return ret;
377759aaa10SVivek Gautam }
378759aaa10SVivek Gautam 
379759aaa10SVivek Gautam static const struct arm_smmu_impl qcom_smmu_impl = {
380ef75702dSSai Prakash Ranjan 	.init_context = qcom_smmu_init_context,
38107a7f2caSBjorn Andersson 	.cfg_probe = qcom_smmu_cfg_probe,
3820e764a01SJordan Crouse 	.def_domain_type = qcom_smmu_def_domain_type,
383417b76adSDmitry Baryshkov 	.reset = arm_mmu500_reset,
384417b76adSDmitry Baryshkov 	.write_s2cr = qcom_smmu_write_s2cr,
385417b76adSDmitry Baryshkov 	.tlb_sync = qcom_smmu_tlb_sync,
386417b76adSDmitry Baryshkov };
387417b76adSDmitry Baryshkov 
388417b76adSDmitry Baryshkov static const struct arm_smmu_impl sdm845_smmu_500_impl = {
389417b76adSDmitry Baryshkov 	.init_context = qcom_smmu_init_context,
390417b76adSDmitry Baryshkov 	.cfg_probe = qcom_smmu_cfg_probe,
391417b76adSDmitry Baryshkov 	.def_domain_type = qcom_smmu_def_domain_type,
392417b76adSDmitry Baryshkov 	.reset = qcom_sdm845_smmu500_reset,
393f9081b8fSBjorn Andersson 	.write_s2cr = qcom_smmu_write_s2cr,
394b9b721d1SSai Prakash Ranjan 	.tlb_sync = qcom_smmu_tlb_sync,
395759aaa10SVivek Gautam };
396759aaa10SVivek Gautam 
3975c7469c6SJordan Crouse static const struct arm_smmu_impl qcom_adreno_smmu_impl = {
3985c7469c6SJordan Crouse 	.init_context = qcom_adreno_smmu_init_context,
3995c7469c6SJordan Crouse 	.def_domain_type = qcom_smmu_def_domain_type,
400417b76adSDmitry Baryshkov 	.reset = arm_mmu500_reset,
4015c7469c6SJordan Crouse 	.alloc_context_bank = qcom_adreno_smmu_alloc_context_bank,
402bffb2eafSRob Clark 	.write_sctlr = qcom_adreno_smmu_write_sctlr,
403b9b721d1SSai Prakash Ranjan 	.tlb_sync = qcom_smmu_tlb_sync,
4045c7469c6SJordan Crouse };
4055c7469c6SJordan Crouse 
4065c7469c6SJordan Crouse static struct arm_smmu_device *qcom_smmu_create(struct arm_smmu_device *smmu,
4074c1d0ad1SDmitry Baryshkov 		const struct qcom_smmu_match_data *data)
408759aaa10SVivek Gautam {
40930b912a0SDmitry Baryshkov 	const struct device_node *np = smmu->dev->of_node;
4104c1d0ad1SDmitry Baryshkov 	const struct arm_smmu_impl *impl;
411759aaa10SVivek Gautam 	struct qcom_smmu *qsmmu;
412759aaa10SVivek Gautam 
4134c1d0ad1SDmitry Baryshkov 	if (!data)
4144c1d0ad1SDmitry Baryshkov 		return ERR_PTR(-EINVAL);
4154c1d0ad1SDmitry Baryshkov 
41630b912a0SDmitry Baryshkov 	if (np && of_device_is_compatible(np, "qcom,adreno-smmu"))
41730b912a0SDmitry Baryshkov 		impl = data->adreno_impl;
41830b912a0SDmitry Baryshkov 	else
4194c1d0ad1SDmitry Baryshkov 		impl = data->impl;
42030b912a0SDmitry Baryshkov 
4214c1d0ad1SDmitry Baryshkov 	if (!impl)
4224c1d0ad1SDmitry Baryshkov 		return smmu;
4234c1d0ad1SDmitry Baryshkov 
42472b55c96SJohn Stultz 	/* Check to make sure qcom_scm has finished probing */
42572b55c96SJohn Stultz 	if (!qcom_scm_is_available())
42672b55c96SJohn Stultz 		return ERR_PTR(-EPROBE_DEFER);
42772b55c96SJohn Stultz 
428af9da914SRobin Murphy 	qsmmu = devm_krealloc(smmu->dev, smmu, sizeof(*qsmmu), GFP_KERNEL);
429759aaa10SVivek Gautam 	if (!qsmmu)
430759aaa10SVivek Gautam 		return ERR_PTR(-ENOMEM);
431759aaa10SVivek Gautam 
4325c7469c6SJordan Crouse 	qsmmu->smmu.impl = impl;
433*4172dda2SDmitry Baryshkov 	qsmmu->cfg = data->cfg;
434759aaa10SVivek Gautam 
435759aaa10SVivek Gautam 	return &qsmmu->smmu;
436759aaa10SVivek Gautam }
4375c7469c6SJordan Crouse 
438*4172dda2SDmitry Baryshkov /* Implementation Defined Register Space 0 register offsets */
439*4172dda2SDmitry Baryshkov static const u32 qcom_smmu_impl0_reg_offset[] = {
440*4172dda2SDmitry Baryshkov 	[QCOM_SMMU_TBU_PWR_STATUS]		= 0x2204,
441*4172dda2SDmitry Baryshkov 	[QCOM_SMMU_STATS_SYNC_INV_TBU_ACK]	= 0x25dc,
442*4172dda2SDmitry Baryshkov 	[QCOM_SMMU_MMU2QSS_AND_SAFE_WAIT_CNTR]	= 0x2670,
443*4172dda2SDmitry Baryshkov };
444*4172dda2SDmitry Baryshkov 
445*4172dda2SDmitry Baryshkov static const struct qcom_smmu_config qcom_smmu_impl0_cfg = {
446*4172dda2SDmitry Baryshkov 	.reg_offset = qcom_smmu_impl0_reg_offset,
447*4172dda2SDmitry Baryshkov };
448*4172dda2SDmitry Baryshkov 
44930b912a0SDmitry Baryshkov /*
45030b912a0SDmitry Baryshkov  * It is not yet possible to use MDP SMMU with the bypass quirk on the msm8996,
45130b912a0SDmitry Baryshkov  * there are not enough context banks.
45230b912a0SDmitry Baryshkov  */
45330b912a0SDmitry Baryshkov static const struct qcom_smmu_match_data msm8996_smmu_data = {
45430b912a0SDmitry Baryshkov 	.impl = NULL,
45530b912a0SDmitry Baryshkov 	.adreno_impl = &qcom_adreno_smmu_impl,
4564c1d0ad1SDmitry Baryshkov };
4574c1d0ad1SDmitry Baryshkov 
45830b912a0SDmitry Baryshkov static const struct qcom_smmu_match_data qcom_smmu_data = {
45930b912a0SDmitry Baryshkov 	.impl = &qcom_smmu_impl,
46030b912a0SDmitry Baryshkov 	.adreno_impl = &qcom_adreno_smmu_impl,
4614c1d0ad1SDmitry Baryshkov };
4624c1d0ad1SDmitry Baryshkov 
463417b76adSDmitry Baryshkov static const struct qcom_smmu_match_data sdm845_smmu_500_data = {
464417b76adSDmitry Baryshkov 	.impl = &sdm845_smmu_500_impl,
465417b76adSDmitry Baryshkov 	/*
466417b76adSDmitry Baryshkov 	 * No need for adreno impl here. On sdm845 the Adreno SMMU is handled
467417b76adSDmitry Baryshkov 	 * by the separate sdm845-smmu-v2 device.
468417b76adSDmitry Baryshkov 	 */
469*4172dda2SDmitry Baryshkov 	/* Also no debug configuration. */
470*4172dda2SDmitry Baryshkov };
471*4172dda2SDmitry Baryshkov 
472*4172dda2SDmitry Baryshkov static const struct qcom_smmu_match_data qcom_smmu_500_impl0_data = {
473*4172dda2SDmitry Baryshkov 	.impl = &qcom_smmu_impl,
474*4172dda2SDmitry Baryshkov 	.adreno_impl = &qcom_adreno_smmu_impl,
475*4172dda2SDmitry Baryshkov 	.cfg = &qcom_smmu_impl0_cfg,
476417b76adSDmitry Baryshkov };
477417b76adSDmitry Baryshkov 
47800597f9fSSai Prakash Ranjan static const struct of_device_id __maybe_unused qcom_smmu_impl_of_match[] = {
47930b912a0SDmitry Baryshkov 	{ .compatible = "qcom,msm8996-smmu-v2", .data = &msm8996_smmu_data },
4804c1d0ad1SDmitry Baryshkov 	{ .compatible = "qcom,msm8998-smmu-v2", .data = &qcom_smmu_data },
481*4172dda2SDmitry Baryshkov 	{ .compatible = "qcom,qcm2290-smmu-500", .data = &qcom_smmu_500_impl0_data },
482*4172dda2SDmitry Baryshkov 	{ .compatible = "qcom,qdu1000-smmu-500", .data = &qcom_smmu_500_impl0_data  },
483*4172dda2SDmitry Baryshkov 	{ .compatible = "qcom,sc7180-smmu-500", .data = &qcom_smmu_500_impl0_data },
484*4172dda2SDmitry Baryshkov 	{ .compatible = "qcom,sc7280-smmu-500", .data = &qcom_smmu_500_impl0_data },
485*4172dda2SDmitry Baryshkov 	{ .compatible = "qcom,sc8180x-smmu-500", .data = &qcom_smmu_500_impl0_data },
486*4172dda2SDmitry Baryshkov 	{ .compatible = "qcom,sc8280xp-smmu-500", .data = &qcom_smmu_500_impl0_data },
4874c1d0ad1SDmitry Baryshkov 	{ .compatible = "qcom,sdm630-smmu-v2", .data = &qcom_smmu_data },
48830b912a0SDmitry Baryshkov 	{ .compatible = "qcom,sdm845-smmu-v2", .data = &qcom_smmu_data },
489417b76adSDmitry Baryshkov 	{ .compatible = "qcom,sdm845-smmu-500", .data = &sdm845_smmu_500_data },
490*4172dda2SDmitry Baryshkov 	{ .compatible = "qcom,sm6115-smmu-500", .data = &qcom_smmu_500_impl0_data},
491*4172dda2SDmitry Baryshkov 	{ .compatible = "qcom,sm6125-smmu-500", .data = &qcom_smmu_500_impl0_data },
492*4172dda2SDmitry Baryshkov 	{ .compatible = "qcom,sm6350-smmu-500", .data = &qcom_smmu_500_impl0_data },
493*4172dda2SDmitry Baryshkov 	{ .compatible = "qcom,sm6375-smmu-500", .data = &qcom_smmu_500_impl0_data },
494*4172dda2SDmitry Baryshkov 	{ .compatible = "qcom,sm8150-smmu-500", .data = &qcom_smmu_500_impl0_data },
495*4172dda2SDmitry Baryshkov 	{ .compatible = "qcom,sm8250-smmu-500", .data = &qcom_smmu_500_impl0_data },
496*4172dda2SDmitry Baryshkov 	{ .compatible = "qcom,sm8350-smmu-500", .data = &qcom_smmu_500_impl0_data },
497*4172dda2SDmitry Baryshkov 	{ .compatible = "qcom,sm8450-smmu-500", .data = &qcom_smmu_500_impl0_data },
49800597f9fSSai Prakash Ranjan 	{ }
49900597f9fSSai Prakash Ranjan };
50000597f9fSSai Prakash Ranjan 
50122c2d718SShawn Guo #ifdef CONFIG_ACPI
502a51627c5SShawn Guo static struct acpi_platform_list qcom_acpi_platlist[] = {
503a51627c5SShawn Guo 	{ "LENOVO", "CB-01   ", 0x8180, ACPI_SIG_IORT, equal, "QCOM SMMU" },
504a51627c5SShawn Guo 	{ "QCOM  ", "QCOMEDK2", 0x8180, ACPI_SIG_IORT, equal, "QCOM SMMU" },
505a51627c5SShawn Guo 	{ }
506a51627c5SShawn Guo };
50722c2d718SShawn Guo #endif
508a51627c5SShawn Guo 
5095c7469c6SJordan Crouse struct arm_smmu_device *qcom_smmu_impl_init(struct arm_smmu_device *smmu)
5105c7469c6SJordan Crouse {
51100597f9fSSai Prakash Ranjan 	const struct device_node *np = smmu->dev->of_node;
5124c1d0ad1SDmitry Baryshkov 	const struct of_device_id *match;
5135c7469c6SJordan Crouse 
51422c2d718SShawn Guo #ifdef CONFIG_ACPI
515a51627c5SShawn Guo 	if (np == NULL) {
516a51627c5SShawn Guo 		/* Match platform for ACPI boot */
517a51627c5SShawn Guo 		if (acpi_match_platform_list(qcom_acpi_platlist) >= 0)
518*4172dda2SDmitry Baryshkov 			return qcom_smmu_create(smmu, &qcom_smmu_500_impl0_data);
519a51627c5SShawn Guo 	}
52022c2d718SShawn Guo #endif
52100597f9fSSai Prakash Ranjan 
5224c1d0ad1SDmitry Baryshkov 	match = of_match_node(qcom_smmu_impl_of_match, np);
5234c1d0ad1SDmitry Baryshkov 	if (match)
5244c1d0ad1SDmitry Baryshkov 		return qcom_smmu_create(smmu, match->data);
525ab9a77a1SSai Prakash Ranjan 
52600597f9fSSai Prakash Ranjan 	return smmu;
5275c7469c6SJordan Crouse }
528