xref: /linux/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c (revision ab93e0dd72c37d378dd936f031ffb83ff2bd87ce)
1759aaa10SVivek Gautam // SPDX-License-Identifier: GPL-2.0-only
2759aaa10SVivek Gautam /*
3759aaa10SVivek Gautam  * Copyright (c) 2019, The Linux Foundation. All rights reserved.
4759aaa10SVivek Gautam  */
5759aaa10SVivek Gautam 
6a51627c5SShawn Guo #include <linux/acpi.h>
75c7469c6SJordan Crouse #include <linux/adreno-smmu-priv.h>
8b9b721d1SSai Prakash Ranjan #include <linux/delay.h>
90e764a01SJordan Crouse #include <linux/of_device.h>
103bf90ecaSElliot Berman #include <linux/firmware/qcom/qcom_scm.h>
110b4eeee2SGeorgi Djakov #include <linux/platform_device.h>
120b4eeee2SGeorgi Djakov #include <linux/pm_runtime.h>
13759aaa10SVivek Gautam 
14759aaa10SVivek Gautam #include "arm-smmu.h"
15b9b721d1SSai Prakash Ranjan #include "arm-smmu-qcom.h"
16759aaa10SVivek Gautam 
17b9b721d1SSai Prakash Ranjan #define QCOM_DUMMY_VAL	-1
18759aaa10SVivek Gautam 
193e35c3e7SBibek Kumar Patro /*
203e35c3e7SBibek Kumar Patro  * SMMU-500 TRM defines BIT(0) as CMTLB (Enable context caching in the
213e35c3e7SBibek Kumar Patro  * macro TLB) and BIT(1) as CPRE (Enable context caching in the prefetch
223e35c3e7SBibek Kumar Patro  * buffer). The remaining bits are implementation defined and vary across
233e35c3e7SBibek Kumar Patro  * SoCs.
243e35c3e7SBibek Kumar Patro  */
253e35c3e7SBibek Kumar Patro 
263e35c3e7SBibek Kumar Patro #define CPRE			(1 << 1)
273e35c3e7SBibek Kumar Patro #define CMTLB			(1 << 0)
283e35c3e7SBibek Kumar Patro #define PREFETCH_SHIFT		8
293e35c3e7SBibek Kumar Patro #define PREFETCH_DEFAULT	0
303e35c3e7SBibek Kumar Patro #define PREFETCH_SHALLOW	(1 << PREFETCH_SHIFT)
313e35c3e7SBibek Kumar Patro #define PREFETCH_MODERATE	(2 << PREFETCH_SHIFT)
323e35c3e7SBibek Kumar Patro #define PREFETCH_DEEP		(3 << PREFETCH_SHIFT)
337f2ef1bfSBibek Kumar Patro #define GFX_ACTLR_PRR          (1 << 5)
347f2ef1bfSBibek Kumar Patro 
353e35c3e7SBibek Kumar Patro static const struct of_device_id qcom_smmu_actlr_client_of_match[] = {
363e35c3e7SBibek Kumar Patro 	{ .compatible = "qcom,adreno",
373e35c3e7SBibek Kumar Patro 			.data = (const void *) (PREFETCH_DEEP | CPRE | CMTLB) },
383e35c3e7SBibek Kumar Patro 	{ .compatible = "qcom,adreno-gmu",
393e35c3e7SBibek Kumar Patro 			.data = (const void *) (PREFETCH_DEEP | CPRE | CMTLB) },
403e35c3e7SBibek Kumar Patro 	{ .compatible = "qcom,adreno-smmu",
413e35c3e7SBibek Kumar Patro 			.data = (const void *) (PREFETCH_DEEP | CPRE | CMTLB) },
423e35c3e7SBibek Kumar Patro 	{ .compatible = "qcom,fastrpc",
433e35c3e7SBibek Kumar Patro 			.data = (const void *) (PREFETCH_DEEP | CPRE | CMTLB) },
443e35c3e7SBibek Kumar Patro 	{ .compatible = "qcom,sc7280-mdss",
453e35c3e7SBibek Kumar Patro 			.data = (const void *) (PREFETCH_SHALLOW | CPRE | CMTLB) },
463e35c3e7SBibek Kumar Patro 	{ .compatible = "qcom,sc7280-venus",
473e35c3e7SBibek Kumar Patro 			.data = (const void *) (PREFETCH_SHALLOW | CPRE | CMTLB) },
483e35c3e7SBibek Kumar Patro 	{ .compatible = "qcom,sm8550-mdss",
493e35c3e7SBibek Kumar Patro 			.data = (const void *) (PREFETCH_DEFAULT | CMTLB) },
503e35c3e7SBibek Kumar Patro 	{ }
513e35c3e7SBibek Kumar Patro };
523e35c3e7SBibek Kumar Patro 
to_qcom_smmu(struct arm_smmu_device * smmu)53f9081b8fSBjorn Andersson static struct qcom_smmu *to_qcom_smmu(struct arm_smmu_device *smmu)
54f9081b8fSBjorn Andersson {
55f9081b8fSBjorn Andersson 	return container_of(smmu, struct qcom_smmu, smmu);
56f9081b8fSBjorn Andersson }
57f9081b8fSBjorn Andersson 
qcom_smmu_tlb_sync(struct arm_smmu_device * smmu,int page,int sync,int status)58b9b721d1SSai Prakash Ranjan static void qcom_smmu_tlb_sync(struct arm_smmu_device *smmu, int page,
59b9b721d1SSai Prakash Ranjan 				int sync, int status)
60b9b721d1SSai Prakash Ranjan {
61b9b721d1SSai Prakash Ranjan 	unsigned int spin_cnt, delay;
62b9b721d1SSai Prakash Ranjan 	u32 reg;
63b9b721d1SSai Prakash Ranjan 
64b9b721d1SSai Prakash Ranjan 	arm_smmu_writel(smmu, page, sync, QCOM_DUMMY_VAL);
65b9b721d1SSai Prakash Ranjan 	for (delay = 1; delay < TLB_LOOP_TIMEOUT; delay *= 2) {
66b9b721d1SSai Prakash Ranjan 		for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) {
67b9b721d1SSai Prakash Ranjan 			reg = arm_smmu_readl(smmu, page, status);
68b9b721d1SSai Prakash Ranjan 			if (!(reg & ARM_SMMU_sTLBGSTATUS_GSACTIVE))
69b9b721d1SSai Prakash Ranjan 				return;
70b9b721d1SSai Prakash Ranjan 			cpu_relax();
71b9b721d1SSai Prakash Ranjan 		}
72b9b721d1SSai Prakash Ranjan 		udelay(delay);
73b9b721d1SSai Prakash Ranjan 	}
74b9b721d1SSai Prakash Ranjan 
75b9b721d1SSai Prakash Ranjan 	qcom_smmu_tlb_sync_debug(smmu);
76b9b721d1SSai Prakash Ranjan }
77b9b721d1SSai Prakash Ranjan 
qcom_adreno_smmu_write_sctlr(struct arm_smmu_device * smmu,int idx,u32 reg)78bffb2eafSRob Clark static void qcom_adreno_smmu_write_sctlr(struct arm_smmu_device *smmu, int idx,
79bffb2eafSRob Clark 		u32 reg)
80bffb2eafSRob Clark {
81ba6014a4SRob Clark 	struct qcom_smmu *qsmmu = to_qcom_smmu(smmu);
82ba6014a4SRob Clark 
83bffb2eafSRob Clark 	/*
84bffb2eafSRob Clark 	 * On the GPU device we want to process subsequent transactions after a
85bffb2eafSRob Clark 	 * fault to keep the GPU from hanging
86bffb2eafSRob Clark 	 */
87bffb2eafSRob Clark 	reg |= ARM_SMMU_SCTLR_HUPCF;
88bffb2eafSRob Clark 
89ba6014a4SRob Clark 	if (qsmmu->stall_enabled & BIT(idx))
90ba6014a4SRob Clark 		reg |= ARM_SMMU_SCTLR_CFCFG;
91ba6014a4SRob Clark 
92bffb2eafSRob Clark 	arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, reg);
93bffb2eafSRob Clark }
94bffb2eafSRob Clark 
qcom_adreno_smmu_get_fault_info(const void * cookie,struct adreno_smmu_fault_info * info)95ab5df7b9SJordan Crouse static void qcom_adreno_smmu_get_fault_info(const void *cookie,
96ab5df7b9SJordan Crouse 		struct adreno_smmu_fault_info *info)
97ab5df7b9SJordan Crouse {
98ab5df7b9SJordan Crouse 	struct arm_smmu_domain *smmu_domain = (void *)cookie;
99ab5df7b9SJordan Crouse 	struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
100ab5df7b9SJordan Crouse 	struct arm_smmu_device *smmu = smmu_domain->smmu;
101ab5df7b9SJordan Crouse 
102ab5df7b9SJordan Crouse 	info->fsr = arm_smmu_cb_read(smmu, cfg->cbndx, ARM_SMMU_CB_FSR);
103ab5df7b9SJordan Crouse 	info->fsynr0 = arm_smmu_cb_read(smmu, cfg->cbndx, ARM_SMMU_CB_FSYNR0);
104ab5df7b9SJordan Crouse 	info->fsynr1 = arm_smmu_cb_read(smmu, cfg->cbndx, ARM_SMMU_CB_FSYNR1);
105ab5df7b9SJordan Crouse 	info->far = arm_smmu_cb_readq(smmu, cfg->cbndx, ARM_SMMU_CB_FAR);
106ab5df7b9SJordan Crouse 	info->cbfrsynra = arm_smmu_gr1_read(smmu, ARM_SMMU_GR1_CBFRSYNRA(cfg->cbndx));
107c31112fbSRob Clark 	info->ttbr0 = arm_smmu_cb_readq(smmu, cfg->cbndx, ARM_SMMU_CB_TTBR0);
108ab5df7b9SJordan Crouse 	info->contextidr = arm_smmu_cb_read(smmu, cfg->cbndx, ARM_SMMU_CB_CONTEXTIDR);
109ab5df7b9SJordan Crouse }
110ab5df7b9SJordan Crouse 
qcom_adreno_smmu_set_stall(const void * cookie,bool enabled)111ba6014a4SRob Clark static void qcom_adreno_smmu_set_stall(const void *cookie, bool enabled)
112ba6014a4SRob Clark {
113ba6014a4SRob Clark 	struct arm_smmu_domain *smmu_domain = (void *)cookie;
114ba6014a4SRob Clark 	struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
115*70892277SConnor Abbott 	struct arm_smmu_device *smmu = smmu_domain->smmu;
116*70892277SConnor Abbott 	struct qcom_smmu *qsmmu = to_qcom_smmu(smmu);
117*70892277SConnor Abbott 	u32 mask = BIT(cfg->cbndx);
118*70892277SConnor Abbott 	bool stall_changed = !!(qsmmu->stall_enabled & mask) != enabled;
119*70892277SConnor Abbott 	unsigned long flags;
120ba6014a4SRob Clark 
121ba6014a4SRob Clark 	if (enabled)
122*70892277SConnor Abbott 		qsmmu->stall_enabled |= mask;
123ba6014a4SRob Clark 	else
124*70892277SConnor Abbott 		qsmmu->stall_enabled &= ~mask;
125*70892277SConnor Abbott 
126*70892277SConnor Abbott 	/*
127*70892277SConnor Abbott 	 * If the device is on and we changed the setting, update the register.
128*70892277SConnor Abbott 	 * The spec pseudocode says that CFCFG is resampled after a fault, and
129*70892277SConnor Abbott 	 * we believe that no implementations cache it in the TLB, so it should
130*70892277SConnor Abbott 	 * be safe to change it without a TLB invalidation.
131*70892277SConnor Abbott 	 */
132*70892277SConnor Abbott 	if (stall_changed && pm_runtime_get_if_active(smmu->dev) > 0) {
133*70892277SConnor Abbott 		u32 reg;
134*70892277SConnor Abbott 
135*70892277SConnor Abbott 		spin_lock_irqsave(&smmu_domain->cb_lock, flags);
136*70892277SConnor Abbott 		reg = arm_smmu_cb_read(smmu, cfg->cbndx, ARM_SMMU_CB_SCTLR);
137*70892277SConnor Abbott 
138*70892277SConnor Abbott 		if (enabled)
139*70892277SConnor Abbott 			reg |= ARM_SMMU_SCTLR_CFCFG;
140*70892277SConnor Abbott 		else
141*70892277SConnor Abbott 			reg &= ~ARM_SMMU_SCTLR_CFCFG;
142*70892277SConnor Abbott 
143*70892277SConnor Abbott 		arm_smmu_cb_write(smmu, cfg->cbndx, ARM_SMMU_CB_SCTLR, reg);
144*70892277SConnor Abbott 		spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
145*70892277SConnor Abbott 
146*70892277SConnor Abbott 		pm_runtime_put_autosuspend(smmu->dev);
147ba6014a4SRob Clark 	}
148ba6014a4SRob Clark }
149ba6014a4SRob Clark 
qcom_adreno_smmu_set_prr_bit(const void * cookie,bool set)1507f2ef1bfSBibek Kumar Patro static void qcom_adreno_smmu_set_prr_bit(const void *cookie, bool set)
1517f2ef1bfSBibek Kumar Patro {
1527f2ef1bfSBibek Kumar Patro 	struct arm_smmu_domain *smmu_domain = (void *)cookie;
1537f2ef1bfSBibek Kumar Patro 	struct arm_smmu_device *smmu = smmu_domain->smmu;
1547f2ef1bfSBibek Kumar Patro 	struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1557f2ef1bfSBibek Kumar Patro 	u32 reg = 0;
1567f2ef1bfSBibek Kumar Patro 	int ret;
1577f2ef1bfSBibek Kumar Patro 
1587f2ef1bfSBibek Kumar Patro 	ret = pm_runtime_resume_and_get(smmu->dev);
1597f2ef1bfSBibek Kumar Patro 	if (ret < 0) {
1607f2ef1bfSBibek Kumar Patro 		dev_err(smmu->dev, "failed to get runtime PM: %d\n", ret);
1617f2ef1bfSBibek Kumar Patro 		return;
1627f2ef1bfSBibek Kumar Patro 	}
1637f2ef1bfSBibek Kumar Patro 
1647f2ef1bfSBibek Kumar Patro 	reg =  arm_smmu_cb_read(smmu, cfg->cbndx, ARM_SMMU_CB_ACTLR);
1657f2ef1bfSBibek Kumar Patro 	reg &= ~GFX_ACTLR_PRR;
1667f2ef1bfSBibek Kumar Patro 	if (set)
1677f2ef1bfSBibek Kumar Patro 		reg |= FIELD_PREP(GFX_ACTLR_PRR, 1);
1687f2ef1bfSBibek Kumar Patro 	arm_smmu_cb_write(smmu, cfg->cbndx, ARM_SMMU_CB_ACTLR, reg);
1697f2ef1bfSBibek Kumar Patro 	pm_runtime_put_autosuspend(smmu->dev);
1707f2ef1bfSBibek Kumar Patro }
1717f2ef1bfSBibek Kumar Patro 
qcom_adreno_smmu_set_prr_addr(const void * cookie,phys_addr_t page_addr)1727f2ef1bfSBibek Kumar Patro static void qcom_adreno_smmu_set_prr_addr(const void *cookie, phys_addr_t page_addr)
1737f2ef1bfSBibek Kumar Patro {
1747f2ef1bfSBibek Kumar Patro 	struct arm_smmu_domain *smmu_domain = (void *)cookie;
1757f2ef1bfSBibek Kumar Patro 	struct arm_smmu_device *smmu = smmu_domain->smmu;
1767f2ef1bfSBibek Kumar Patro 	int ret;
1777f2ef1bfSBibek Kumar Patro 
1787f2ef1bfSBibek Kumar Patro 	ret = pm_runtime_resume_and_get(smmu->dev);
1797f2ef1bfSBibek Kumar Patro 	if (ret < 0) {
1807f2ef1bfSBibek Kumar Patro 		dev_err(smmu->dev, "failed to get runtime PM: %d\n", ret);
1817f2ef1bfSBibek Kumar Patro 		return;
1827f2ef1bfSBibek Kumar Patro 	}
1837f2ef1bfSBibek Kumar Patro 
1847f2ef1bfSBibek Kumar Patro 	writel_relaxed(lower_32_bits(page_addr),
1857f2ef1bfSBibek Kumar Patro 				smmu->base + ARM_SMMU_GFX_PRR_CFG_LADDR);
1867f2ef1bfSBibek Kumar Patro 	writel_relaxed(upper_32_bits(page_addr),
1877f2ef1bfSBibek Kumar Patro 				smmu->base + ARM_SMMU_GFX_PRR_CFG_UADDR);
1887f2ef1bfSBibek Kumar Patro 	pm_runtime_put_autosuspend(smmu->dev);
1897f2ef1bfSBibek Kumar Patro }
1907f2ef1bfSBibek Kumar Patro 
1915c7469c6SJordan Crouse #define QCOM_ADRENO_SMMU_GPU_SID 0
1925c7469c6SJordan Crouse 
qcom_adreno_smmu_is_gpu_device(struct device * dev)1935c7469c6SJordan Crouse static bool qcom_adreno_smmu_is_gpu_device(struct device *dev)
1945c7469c6SJordan Crouse {
1955c7469c6SJordan Crouse 	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
1965c7469c6SJordan Crouse 	int i;
1975c7469c6SJordan Crouse 
1985c7469c6SJordan Crouse 	/*
1995c7469c6SJordan Crouse 	 * The GPU will always use SID 0 so that is a handy way to uniquely
2005c7469c6SJordan Crouse 	 * identify it and configure it for per-instance pagetables
2015c7469c6SJordan Crouse 	 */
2025c7469c6SJordan Crouse 	for (i = 0; i < fwspec->num_ids; i++) {
2035c7469c6SJordan Crouse 		u16 sid = FIELD_GET(ARM_SMMU_SMR_ID, fwspec->ids[i]);
2045c7469c6SJordan Crouse 
2055c7469c6SJordan Crouse 		if (sid == QCOM_ADRENO_SMMU_GPU_SID)
2065c7469c6SJordan Crouse 			return true;
2075c7469c6SJordan Crouse 	}
2085c7469c6SJordan Crouse 
2095c7469c6SJordan Crouse 	return false;
2105c7469c6SJordan Crouse }
2115c7469c6SJordan Crouse 
qcom_adreno_smmu_get_ttbr1_cfg(const void * cookie)2125c7469c6SJordan Crouse static const struct io_pgtable_cfg *qcom_adreno_smmu_get_ttbr1_cfg(
2135c7469c6SJordan Crouse 		const void *cookie)
2145c7469c6SJordan Crouse {
2155c7469c6SJordan Crouse 	struct arm_smmu_domain *smmu_domain = (void *)cookie;
2165c7469c6SJordan Crouse 	struct io_pgtable *pgtable =
2175c7469c6SJordan Crouse 		io_pgtable_ops_to_pgtable(smmu_domain->pgtbl_ops);
2185c7469c6SJordan Crouse 	return &pgtable->cfg;
2195c7469c6SJordan Crouse }
2205c7469c6SJordan Crouse 
2215c7469c6SJordan Crouse /*
2225c7469c6SJordan Crouse  * Local implementation to configure TTBR0 with the specified pagetable config.
2235c7469c6SJordan Crouse  * The GPU driver will call this to enable TTBR0 when per-instance pagetables
2245c7469c6SJordan Crouse  * are active
2255c7469c6SJordan Crouse  */
2265c7469c6SJordan Crouse 
qcom_adreno_smmu_set_ttbr0_cfg(const void * cookie,const struct io_pgtable_cfg * pgtbl_cfg)2275c7469c6SJordan Crouse static int qcom_adreno_smmu_set_ttbr0_cfg(const void *cookie,
2285c7469c6SJordan Crouse 		const struct io_pgtable_cfg *pgtbl_cfg)
2295c7469c6SJordan Crouse {
2305c7469c6SJordan Crouse 	struct arm_smmu_domain *smmu_domain = (void *)cookie;
2315c7469c6SJordan Crouse 	struct io_pgtable *pgtable = io_pgtable_ops_to_pgtable(smmu_domain->pgtbl_ops);
2325c7469c6SJordan Crouse 	struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
2335c7469c6SJordan Crouse 	struct arm_smmu_cb *cb = &smmu_domain->smmu->cbs[cfg->cbndx];
2345c7469c6SJordan Crouse 
2355c7469c6SJordan Crouse 	/* The domain must have split pagetables already enabled */
2365c7469c6SJordan Crouse 	if (cb->tcr[0] & ARM_SMMU_TCR_EPD1)
2375c7469c6SJordan Crouse 		return -EINVAL;
2385c7469c6SJordan Crouse 
2395c7469c6SJordan Crouse 	/* If the pagetable config is NULL, disable TTBR0 */
2405c7469c6SJordan Crouse 	if (!pgtbl_cfg) {
2415c7469c6SJordan Crouse 		/* Do nothing if it is already disabled */
2425c7469c6SJordan Crouse 		if ((cb->tcr[0] & ARM_SMMU_TCR_EPD0))
2435c7469c6SJordan Crouse 			return -EINVAL;
2445c7469c6SJordan Crouse 
2455c7469c6SJordan Crouse 		/* Set TCR to the original configuration */
2465c7469c6SJordan Crouse 		cb->tcr[0] = arm_smmu_lpae_tcr(&pgtable->cfg);
2475c7469c6SJordan Crouse 		cb->ttbr[0] = FIELD_PREP(ARM_SMMU_TTBRn_ASID, cb->cfg->asid);
2485c7469c6SJordan Crouse 	} else {
2495c7469c6SJordan Crouse 		u32 tcr = cb->tcr[0];
2505c7469c6SJordan Crouse 
2515c7469c6SJordan Crouse 		/* Don't call this again if TTBR0 is already enabled */
2525c7469c6SJordan Crouse 		if (!(cb->tcr[0] & ARM_SMMU_TCR_EPD0))
2535c7469c6SJordan Crouse 			return -EINVAL;
2545c7469c6SJordan Crouse 
2555c7469c6SJordan Crouse 		tcr |= arm_smmu_lpae_tcr(pgtbl_cfg);
2565c7469c6SJordan Crouse 		tcr &= ~(ARM_SMMU_TCR_EPD0 | ARM_SMMU_TCR_EPD1);
2575c7469c6SJordan Crouse 
2585c7469c6SJordan Crouse 		cb->tcr[0] = tcr;
2595c7469c6SJordan Crouse 		cb->ttbr[0] = pgtbl_cfg->arm_lpae_s1_cfg.ttbr;
2605c7469c6SJordan Crouse 		cb->ttbr[0] |= FIELD_PREP(ARM_SMMU_TTBRn_ASID, cb->cfg->asid);
2615c7469c6SJordan Crouse 	}
2625c7469c6SJordan Crouse 
2635c7469c6SJordan Crouse 	arm_smmu_write_context_bank(smmu_domain->smmu, cb->cfg->cbndx);
2645c7469c6SJordan Crouse 
2655c7469c6SJordan Crouse 	return 0;
2665c7469c6SJordan Crouse }
2675c7469c6SJordan Crouse 
qcom_adreno_smmu_alloc_context_bank(struct arm_smmu_domain * smmu_domain,struct arm_smmu_device * smmu,struct device * dev,int start)2685c7469c6SJordan Crouse static int qcom_adreno_smmu_alloc_context_bank(struct arm_smmu_domain *smmu_domain,
2695c7469c6SJordan Crouse 					       struct arm_smmu_device *smmu,
2705c7469c6SJordan Crouse 					       struct device *dev, int start)
2715c7469c6SJordan Crouse {
2725c7469c6SJordan Crouse 	int count;
2735c7469c6SJordan Crouse 
2745c7469c6SJordan Crouse 	/*
2755c7469c6SJordan Crouse 	 * Assign context bank 0 to the GPU device so the GPU hardware can
2765c7469c6SJordan Crouse 	 * switch pagetables
2775c7469c6SJordan Crouse 	 */
2785c7469c6SJordan Crouse 	if (qcom_adreno_smmu_is_gpu_device(dev)) {
2795c7469c6SJordan Crouse 		start = 0;
2805c7469c6SJordan Crouse 		count = 1;
2815c7469c6SJordan Crouse 	} else {
2825c7469c6SJordan Crouse 		start = 1;
2835c7469c6SJordan Crouse 		count = smmu->num_context_banks;
2845c7469c6SJordan Crouse 	}
2855c7469c6SJordan Crouse 
2865c7469c6SJordan Crouse 	return __arm_smmu_alloc_bitmap(smmu->context_map, start, count);
2875c7469c6SJordan Crouse }
2885c7469c6SJordan Crouse 
qcom_adreno_can_do_ttbr1(struct arm_smmu_device * smmu)289a242f429SEric Anholt static bool qcom_adreno_can_do_ttbr1(struct arm_smmu_device *smmu)
290a242f429SEric Anholt {
291a242f429SEric Anholt 	const struct device_node *np = smmu->dev->of_node;
292a242f429SEric Anholt 
293a242f429SEric Anholt 	if (of_device_is_compatible(np, "qcom,msm8996-smmu-v2"))
294a242f429SEric Anholt 		return false;
295a242f429SEric Anholt 
296a242f429SEric Anholt 	return true;
297a242f429SEric Anholt }
298a242f429SEric Anholt 
qcom_smmu_set_actlr_dev(struct device * dev,struct arm_smmu_device * smmu,int cbndx,const struct of_device_id * client_match)2999fe18d82SBibek Kumar Patro static void qcom_smmu_set_actlr_dev(struct device *dev, struct arm_smmu_device *smmu, int cbndx,
3009fe18d82SBibek Kumar Patro 		const struct of_device_id *client_match)
3019fe18d82SBibek Kumar Patro {
3029fe18d82SBibek Kumar Patro 	const struct of_device_id *match =
3039fe18d82SBibek Kumar Patro 			of_match_device(client_match, dev);
3049fe18d82SBibek Kumar Patro 
3059fe18d82SBibek Kumar Patro 	if (!match) {
3069fe18d82SBibek Kumar Patro 		dev_dbg(dev, "no ACTLR settings present\n");
3079fe18d82SBibek Kumar Patro 		return;
3089fe18d82SBibek Kumar Patro 	}
3099fe18d82SBibek Kumar Patro 
3109fe18d82SBibek Kumar Patro 	arm_smmu_cb_write(smmu, cbndx, ARM_SMMU_CB_ACTLR, (unsigned long)match->data);
3119fe18d82SBibek Kumar Patro }
3129fe18d82SBibek Kumar Patro 
qcom_adreno_smmu_init_context(struct arm_smmu_domain * smmu_domain,struct io_pgtable_cfg * pgtbl_cfg,struct device * dev)3135c7469c6SJordan Crouse static int qcom_adreno_smmu_init_context(struct arm_smmu_domain *smmu_domain,
3145c7469c6SJordan Crouse 		struct io_pgtable_cfg *pgtbl_cfg, struct device *dev)
3155c7469c6SJordan Crouse {
3167f2ef1bfSBibek Kumar Patro 	const struct device_node *np = smmu_domain->smmu->dev->of_node;
3179fe18d82SBibek Kumar Patro 	struct arm_smmu_device *smmu = smmu_domain->smmu;
3189fe18d82SBibek Kumar Patro 	struct qcom_smmu *qsmmu = to_qcom_smmu(smmu);
3199fe18d82SBibek Kumar Patro 	const struct of_device_id *client_match;
3209fe18d82SBibek Kumar Patro 	int cbndx = smmu_domain->cfg.cbndx;
3215c7469c6SJordan Crouse 	struct adreno_smmu_priv *priv;
3225c7469c6SJordan Crouse 
323ef75702dSSai Prakash Ranjan 	smmu_domain->cfg.flush_walk_prefer_tlbiasid = true;
324ef75702dSSai Prakash Ranjan 
3259fe18d82SBibek Kumar Patro 	client_match = qsmmu->data->client_match;
3269fe18d82SBibek Kumar Patro 
3279fe18d82SBibek Kumar Patro 	if (client_match)
3289fe18d82SBibek Kumar Patro 		qcom_smmu_set_actlr_dev(dev, smmu, cbndx, client_match);
3299fe18d82SBibek Kumar Patro 
3305c7469c6SJordan Crouse 	/* Only enable split pagetables for the GPU device (SID 0) */
3315c7469c6SJordan Crouse 	if (!qcom_adreno_smmu_is_gpu_device(dev))
3325c7469c6SJordan Crouse 		return 0;
3335c7469c6SJordan Crouse 
3345c7469c6SJordan Crouse 	/*
3355c7469c6SJordan Crouse 	 * All targets that use the qcom,adreno-smmu compatible string *should*
3365c7469c6SJordan Crouse 	 * be AARCH64 stage 1 but double check because the arm-smmu code assumes
3375c7469c6SJordan Crouse 	 * that is the case when the TTBR1 quirk is enabled
3385c7469c6SJordan Crouse 	 */
339a242f429SEric Anholt 	if (qcom_adreno_can_do_ttbr1(smmu_domain->smmu) &&
340a242f429SEric Anholt 	    (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) &&
3415c7469c6SJordan Crouse 	    (smmu_domain->cfg.fmt == ARM_SMMU_CTX_FMT_AARCH64))
3425c7469c6SJordan Crouse 		pgtbl_cfg->quirks |= IO_PGTABLE_QUIRK_ARM_TTBR1;
3435c7469c6SJordan Crouse 
3445c7469c6SJordan Crouse 	/*
3455c7469c6SJordan Crouse 	 * Initialize private interface with GPU:
3465c7469c6SJordan Crouse 	 */
3475c7469c6SJordan Crouse 
3485c7469c6SJordan Crouse 	priv = dev_get_drvdata(dev);
3495c7469c6SJordan Crouse 	priv->cookie = smmu_domain;
3505c7469c6SJordan Crouse 	priv->get_ttbr1_cfg = qcom_adreno_smmu_get_ttbr1_cfg;
3515c7469c6SJordan Crouse 	priv->set_ttbr0_cfg = qcom_adreno_smmu_set_ttbr0_cfg;
352ab5df7b9SJordan Crouse 	priv->get_fault_info = qcom_adreno_smmu_get_fault_info;
353ba6014a4SRob Clark 	priv->set_stall = qcom_adreno_smmu_set_stall;
3547f2ef1bfSBibek Kumar Patro 	priv->set_prr_bit = NULL;
3557f2ef1bfSBibek Kumar Patro 	priv->set_prr_addr = NULL;
3567f2ef1bfSBibek Kumar Patro 
3577f2ef1bfSBibek Kumar Patro 	if (of_device_is_compatible(np, "qcom,smmu-500") &&
3587f2ef1bfSBibek Kumar Patro 	    !of_device_is_compatible(np, "qcom,sm8250-smmu-500") &&
3597f2ef1bfSBibek Kumar Patro 	    of_device_is_compatible(np, "qcom,adreno-smmu")) {
3607f2ef1bfSBibek Kumar Patro 		priv->set_prr_bit = qcom_adreno_smmu_set_prr_bit;
3617f2ef1bfSBibek Kumar Patro 		priv->set_prr_addr = qcom_adreno_smmu_set_prr_addr;
3625c7469c6SJordan Crouse 	}
3635c7469c6SJordan Crouse 
3645c7469c6SJordan Crouse 	return 0;
3655c7469c6SJordan Crouse }
366a082121bSJordan Crouse 
3670e764a01SJordan Crouse static const struct of_device_id qcom_smmu_client_of_match[] __maybe_unused = {
368afc95681SRob Clark 	{ .compatible = "qcom,adreno" },
3690e764a01SJordan Crouse 	{ .compatible = "qcom,adreno-gmu" },
3700e764a01SJordan Crouse 	{ .compatible = "qcom,mdp4" },
37128af105cSKonrad Dybcio 	{ .compatible = "qcom,mdss" },
372b3f3c493SDmitry Baryshkov 	{ .compatible = "qcom,qcm2290-mdss" },
3730e764a01SJordan Crouse 	{ .compatible = "qcom,sar2130p-mdss" },
374d100ff38SSibi Sankar 	{ .compatible = "qcom,sc7180-mdss" },
3750b779f56SSai Prakash Ranjan 	{ .compatible = "qcom,sc7180-mss-pil" },
376e37f1fe4SSibi Sankar 	{ .compatible = "qcom,sc7280-mdss" },
3771a7180ffSBjorn Andersson 	{ .compatible = "qcom,sc7280-mss-pil" },
3785fba66d4SBjorn Andersson 	{ .compatible = "qcom,sc8180x-mdss" },
379270a1470SRichard Acayan 	{ .compatible = "qcom,sc8280xp-mdss" },
3800e764a01SJordan Crouse 	{ .compatible = "qcom,sdm670-mdss" },
381d100ff38SSibi Sankar 	{ .compatible = "qcom,sdm845-mdss" },
3827e85676aSKonrad Dybcio 	{ .compatible = "qcom,sdm845-mss-pil" },
3836ebaa77cSKonrad Dybcio 	{ .compatible = "qcom,sm6115-mdss" },
384ec2ff4d8SKonrad Dybcio 	{ .compatible = "qcom,sm6350-mdss" },
385ec2ff4d8SKonrad Dybcio 	{ .compatible = "qcom,sm6375-mdss" },
38612721e66SAbel Vesa 	{ .compatible = "qcom,sm8150-mdss" },
3870e764a01SJordan Crouse 	{ .compatible = "qcom,sm8250-mdss" },
3880e764a01SJordan Crouse 	{ .compatible = "qcom,x1e80100-mdss" },
3890e764a01SJordan Crouse 	{ }
390ef75702dSSai Prakash Ranjan };
391ef75702dSSai Prakash Ranjan 
qcom_smmu_init_context(struct arm_smmu_domain * smmu_domain,struct io_pgtable_cfg * pgtbl_cfg,struct device * dev)392ef75702dSSai Prakash Ranjan static int qcom_smmu_init_context(struct arm_smmu_domain *smmu_domain,
3939fe18d82SBibek Kumar Patro 		struct io_pgtable_cfg *pgtbl_cfg, struct device *dev)
3949fe18d82SBibek Kumar Patro {
3959fe18d82SBibek Kumar Patro 	struct arm_smmu_device *smmu = smmu_domain->smmu;
3969fe18d82SBibek Kumar Patro 	struct qcom_smmu *qsmmu = to_qcom_smmu(smmu);
3979fe18d82SBibek Kumar Patro 	const struct of_device_id *client_match;
398ef75702dSSai Prakash Ranjan 	int cbndx = smmu_domain->cfg.cbndx;
399ef75702dSSai Prakash Ranjan 
4009fe18d82SBibek Kumar Patro 	smmu_domain->cfg.flush_walk_prefer_tlbiasid = true;
4019fe18d82SBibek Kumar Patro 
4029fe18d82SBibek Kumar Patro 	client_match = qsmmu->data->client_match;
4039fe18d82SBibek Kumar Patro 
4049fe18d82SBibek Kumar Patro 	if (client_match)
405ef75702dSSai Prakash Ranjan 		qcom_smmu_set_actlr_dev(dev, smmu, cbndx, client_match);
406ef75702dSSai Prakash Ranjan 
407ef75702dSSai Prakash Ranjan 	return 0;
40807a7f2caSBjorn Andersson }
40907a7f2caSBjorn Andersson 
qcom_smmu_cfg_probe(struct arm_smmu_device * smmu)410f9081b8fSBjorn Andersson static int qcom_smmu_cfg_probe(struct arm_smmu_device *smmu)
41112261134SManivannan Sadhasivam {
412f9081b8fSBjorn Andersson 	struct qcom_smmu *qsmmu = to_qcom_smmu(smmu);
41307a7f2caSBjorn Andersson 	unsigned int last_s2cr;
41407a7f2caSBjorn Andersson 	u32 reg;
41507a7f2caSBjorn Andersson 	u32 smr;
416f9081b8fSBjorn Andersson 	int i;
4173a8990b8SMarc Gonzalez 
4183a8990b8SMarc Gonzalez 	/*
4193a8990b8SMarc Gonzalez 	 * MSM8998 LPASS SMMU reports 13 context banks, but accessing
42019eb465cSDmitry Baryshkov 	 * the last context bank crashes the system.
42119eb465cSDmitry Baryshkov 	 */
4223a8990b8SMarc Gonzalez 	if (of_device_is_compatible(smmu->dev->of_node, "qcom,msm8998-smmu-v2") &&
42319eb465cSDmitry Baryshkov 	    smmu->num_context_banks == 13) {
42419eb465cSDmitry Baryshkov 		smmu->num_context_banks = 12;
42519eb465cSDmitry Baryshkov 	} else if (of_device_is_compatible(smmu->dev->of_node, "qcom,sdm630-smmu-v2")) {
42619eb465cSDmitry Baryshkov 		if (smmu->num_context_banks == 21) /* SDM630 / SDM660 A2NOC SMMU */
42719eb465cSDmitry Baryshkov 			smmu->num_context_banks = 7;
42819eb465cSDmitry Baryshkov 		else if (smmu->num_context_banks == 14) /* SDM630 / SDM660 LPASS SMMU */
4293a8990b8SMarc Gonzalez 			smmu->num_context_banks = 13;
4303a8990b8SMarc Gonzalez 	}
43112261134SManivannan Sadhasivam 
43212261134SManivannan Sadhasivam 	/*
43312261134SManivannan Sadhasivam 	 * Some platforms support more than the Arm SMMU architected maximum of
43412261134SManivannan Sadhasivam 	 * 128 stream matching groups. For unknown reasons, the additional
43512261134SManivannan Sadhasivam 	 * groups don't exhibit the same behavior as the architected registers,
43612261134SManivannan Sadhasivam 	 * so limit the groups to 128 until the behavior is fixed for the other
43712261134SManivannan Sadhasivam 	 * groups.
43812261134SManivannan Sadhasivam 	 */
43912261134SManivannan Sadhasivam 	if (smmu->num_mapping_groups > 128) {
44012261134SManivannan Sadhasivam 		dev_notice(smmu->dev, "\tLimiting the stream matching groups to 128\n");
44112261134SManivannan Sadhasivam 		smmu->num_mapping_groups = 128;
44212261134SManivannan Sadhasivam 	}
44312261134SManivannan Sadhasivam 
44412261134SManivannan Sadhasivam 	last_s2cr = ARM_SMMU_GR0_S2CR(smmu->num_mapping_groups - 1);
445f9081b8fSBjorn Andersson 
446f9081b8fSBjorn Andersson 	/*
447f9081b8fSBjorn Andersson 	 * With some firmware versions writes to S2CR of type FAULT are
448f9081b8fSBjorn Andersson 	 * ignored, and writing BYPASS will end up written as FAULT in the
449f9081b8fSBjorn Andersson 	 * register. Perform a write to S2CR to detect if this is the case and
450f9081b8fSBjorn Andersson 	 * if so reserve a context bank to emulate bypass streams.
451f9081b8fSBjorn Andersson 	 */
452f9081b8fSBjorn Andersson 	reg = FIELD_PREP(ARM_SMMU_S2CR_TYPE, S2CR_TYPE_BYPASS) |
453f9081b8fSBjorn Andersson 	      FIELD_PREP(ARM_SMMU_S2CR_CBNDX, 0xff) |
454f9081b8fSBjorn Andersson 	      FIELD_PREP(ARM_SMMU_S2CR_PRIVCFG, S2CR_PRIVCFG_DEFAULT);
455f9081b8fSBjorn Andersson 	arm_smmu_gr0_write(smmu, last_s2cr, reg);
456f9081b8fSBjorn Andersson 	reg = arm_smmu_gr0_read(smmu, last_s2cr);
457f9081b8fSBjorn Andersson 	if (FIELD_GET(ARM_SMMU_S2CR_TYPE, reg) != S2CR_TYPE_BYPASS) {
458f9081b8fSBjorn Andersson 		qsmmu->bypass_quirk = true;
459f9081b8fSBjorn Andersson 		qsmmu->bypass_cbndx = smmu->num_context_banks - 1;
460f9081b8fSBjorn Andersson 
461aded8c7cSBjorn Andersson 		set_bit(qsmmu->bypass_cbndx, smmu->context_map);
462aded8c7cSBjorn Andersson 
463f9081b8fSBjorn Andersson 		arm_smmu_cb_write(smmu, qsmmu->bypass_cbndx, ARM_SMMU_CB_SCTLR, 0);
464f9081b8fSBjorn Andersson 
465f9081b8fSBjorn Andersson 		reg = FIELD_PREP(ARM_SMMU_CBAR_TYPE, CBAR_TYPE_S1_TRANS_S2_BYPASS);
466f9081b8fSBjorn Andersson 		arm_smmu_gr1_write(smmu, ARM_SMMU_GR1_CBAR(qsmmu->bypass_cbndx), reg);
46707a7f2caSBjorn Andersson 	}
46807a7f2caSBjorn Andersson 
46907a7f2caSBjorn Andersson 	for (i = 0; i < smmu->num_mapping_groups; i++) {
47007a7f2caSBjorn Andersson 		smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(i));
471dead723eSIsaac J. Manjarres 
472dead723eSIsaac J. Manjarres 		if (FIELD_GET(ARM_SMMU_SMR_VALID, smr)) {
47307a7f2caSBjorn Andersson 			/* Ignore valid bit for SMR mask extraction. */
47407a7f2caSBjorn Andersson 			smr &= ~ARM_SMMU_SMR_VALID;
47507a7f2caSBjorn Andersson 			smmu->smrs[i].id = FIELD_GET(ARM_SMMU_SMR_ID, smr);
47607a7f2caSBjorn Andersson 			smmu->smrs[i].mask = FIELD_GET(ARM_SMMU_SMR_MASK, smr);
47707a7f2caSBjorn Andersson 			smmu->smrs[i].valid = true;
47807a7f2caSBjorn Andersson 
47907a7f2caSBjorn Andersson 			smmu->s2crs[i].type = S2CR_TYPE_BYPASS;
48007a7f2caSBjorn Andersson 			smmu->s2crs[i].privcfg = S2CR_PRIVCFG_DEFAULT;
48107a7f2caSBjorn Andersson 			smmu->s2crs[i].cbndx = 0xff;
48207a7f2caSBjorn Andersson 		}
48307a7f2caSBjorn Andersson 	}
48407a7f2caSBjorn Andersson 
48507a7f2caSBjorn Andersson 	return 0;
4862d42d3baSKonrad Dybcio }
4872d42d3baSKonrad Dybcio 
qcom_adreno_smmuv2_cfg_probe(struct arm_smmu_device * smmu)4882d42d3baSKonrad Dybcio static int qcom_adreno_smmuv2_cfg_probe(struct arm_smmu_device *smmu)
4892d42d3baSKonrad Dybcio {
4902d42d3baSKonrad Dybcio 	/* Support for 16K pages is advertised on some SoCs, but it doesn't seem to work */
49119eb465cSDmitry Baryshkov 	smmu->features &= ~ARM_SMMU_FEAT_FMT_AARCH64_16K;
49219eb465cSDmitry Baryshkov 
49319eb465cSDmitry Baryshkov 	/* TZ protects several last context banks, hide them from Linux */
49419eb465cSDmitry Baryshkov 	if (of_device_is_compatible(smmu->dev->of_node, "qcom,sdm630-smmu-v2") &&
49519eb465cSDmitry Baryshkov 	    smmu->num_context_banks == 5)
4962d42d3baSKonrad Dybcio 		smmu->num_context_banks = 2;
4972d42d3baSKonrad Dybcio 
4982d42d3baSKonrad Dybcio 	return 0;
499f9081b8fSBjorn Andersson }
500f9081b8fSBjorn Andersson 
qcom_smmu_write_s2cr(struct arm_smmu_device * smmu,int idx)501f9081b8fSBjorn Andersson static void qcom_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
502f9081b8fSBjorn Andersson {
503f9081b8fSBjorn Andersson 	struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx;
504f9081b8fSBjorn Andersson 	struct qcom_smmu *qsmmu = to_qcom_smmu(smmu);
505f9081b8fSBjorn Andersson 	u32 cbndx = s2cr->cbndx;
506f9081b8fSBjorn Andersson 	u32 type = s2cr->type;
507f9081b8fSBjorn Andersson 	u32 reg;
508f9081b8fSBjorn Andersson 
509f9081b8fSBjorn Andersson 	if (qsmmu->bypass_quirk) {
510f9081b8fSBjorn Andersson 		if (type == S2CR_TYPE_BYPASS) {
511f9081b8fSBjorn Andersson 			/*
512f9081b8fSBjorn Andersson 			 * Firmware with quirky S2CR handling will substitute
513f9081b8fSBjorn Andersson 			 * BYPASS writes with FAULT, so point the stream to the
514f9081b8fSBjorn Andersson 			 * reserved context bank and ask for translation on the
515f9081b8fSBjorn Andersson 			 * stream
516f9081b8fSBjorn Andersson 			 */
517f9081b8fSBjorn Andersson 			type = S2CR_TYPE_TRANS;
518f9081b8fSBjorn Andersson 			cbndx = qsmmu->bypass_cbndx;
519f9081b8fSBjorn Andersson 		} else if (type == S2CR_TYPE_FAULT) {
520f9081b8fSBjorn Andersson 			/*
521f9081b8fSBjorn Andersson 			 * Firmware with quirky S2CR handling will ignore FAULT
522f9081b8fSBjorn Andersson 			 * writes, so trick it to write FAULT by asking for a
523f9081b8fSBjorn Andersson 			 * BYPASS.
524f9081b8fSBjorn Andersson 			 */
525f9081b8fSBjorn Andersson 			type = S2CR_TYPE_BYPASS;
526f9081b8fSBjorn Andersson 			cbndx = 0xff;
527f9081b8fSBjorn Andersson 		}
528f9081b8fSBjorn Andersson 	}
529f9081b8fSBjorn Andersson 
530f9081b8fSBjorn Andersson 	reg = FIELD_PREP(ARM_SMMU_S2CR_TYPE, type) |
531f9081b8fSBjorn Andersson 	      FIELD_PREP(ARM_SMMU_S2CR_CBNDX, cbndx) |
532f9081b8fSBjorn Andersson 	      FIELD_PREP(ARM_SMMU_S2CR_PRIVCFG, s2cr->privcfg);
533f9081b8fSBjorn Andersson 	arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_S2CR(idx), reg);
5340e764a01SJordan Crouse }
5350e764a01SJordan Crouse 
qcom_smmu_def_domain_type(struct device * dev)5360e764a01SJordan Crouse static int qcom_smmu_def_domain_type(struct device *dev)
5370e764a01SJordan Crouse {
5380e764a01SJordan Crouse 	const struct of_device_id *match =
5390e764a01SJordan Crouse 		of_match_device(qcom_smmu_client_of_match, dev);
5400e764a01SJordan Crouse 
5410e764a01SJordan Crouse 	return match ? IOMMU_DOMAIN_IDENTITY : 0;
542759aaa10SVivek Gautam }
543759aaa10SVivek Gautam 
qcom_sdm845_smmu500_reset(struct arm_smmu_device * smmu)544759aaa10SVivek Gautam static int qcom_sdm845_smmu500_reset(struct arm_smmu_device *smmu)
545759aaa10SVivek Gautam {
546417b76adSDmitry Baryshkov 	int ret;
547417b76adSDmitry Baryshkov 
548759aaa10SVivek Gautam 	arm_mmu500_reset(smmu);
549759aaa10SVivek Gautam 
550759aaa10SVivek Gautam 	/*
551759aaa10SVivek Gautam 	 * To address performance degradation in non-real time clients,
552759aaa10SVivek Gautam 	 * such as USB and UFS, turn off wait-for-safe on sdm845 based boards,
553759aaa10SVivek Gautam 	 * such as MTP and db845, whose firmwares implement secure monitor
554759aaa10SVivek Gautam 	 * call handlers to turn on/off the wait-for-safe logic.
555759aaa10SVivek Gautam 	 */
556759aaa10SVivek Gautam 	ret = qcom_scm_qsmmu500_wait_safe_toggle(0);
557759aaa10SVivek Gautam 	if (ret)
558759aaa10SVivek Gautam 		dev_warn(smmu->dev, "Failed to turn off SAFE logic\n");
559759aaa10SVivek Gautam 
560759aaa10SVivek Gautam 	return ret;
561b4c6ee51SDmitry Baryshkov }
562b4c6ee51SDmitry Baryshkov 
563b4c6ee51SDmitry Baryshkov static const struct arm_smmu_impl qcom_smmu_v2_impl = {
564b4c6ee51SDmitry Baryshkov 	.init_context = qcom_smmu_init_context,
565b4c6ee51SDmitry Baryshkov 	.cfg_probe = qcom_smmu_cfg_probe,
566b4c6ee51SDmitry Baryshkov 	.def_domain_type = qcom_smmu_def_domain_type,
567b4c6ee51SDmitry Baryshkov 	.write_s2cr = qcom_smmu_write_s2cr,
568b4c6ee51SDmitry Baryshkov 	.tlb_sync = qcom_smmu_tlb_sync,
569b4c6ee51SDmitry Baryshkov };
570ef75702dSSai Prakash Ranjan 
57107a7f2caSBjorn Andersson static const struct arm_smmu_impl qcom_smmu_500_impl = {
5720e764a01SJordan Crouse 	.init_context = qcom_smmu_init_context,
573417b76adSDmitry Baryshkov 	.cfg_probe = qcom_smmu_cfg_probe,
574417b76adSDmitry Baryshkov 	.def_domain_type = qcom_smmu_def_domain_type,
575417b76adSDmitry Baryshkov 	.reset = arm_mmu500_reset,
576b8ca7ce7SGeorgi Djakov 	.write_s2cr = qcom_smmu_write_s2cr,
577b8ca7ce7SGeorgi Djakov 	.tlb_sync = qcom_smmu_tlb_sync,
578b8ca7ce7SGeorgi Djakov #ifdef CONFIG_ARM_SMMU_QCOM_DEBUG
579b8ca7ce7SGeorgi Djakov 	.context_fault = qcom_smmu_context_fault,
580417b76adSDmitry Baryshkov 	.context_fault_needs_threaded_irq = true,
581417b76adSDmitry Baryshkov #endif
582417b76adSDmitry Baryshkov };
583417b76adSDmitry Baryshkov 
584417b76adSDmitry Baryshkov static const struct arm_smmu_impl sdm845_smmu_500_impl = {
585417b76adSDmitry Baryshkov 	.init_context = qcom_smmu_init_context,
586417b76adSDmitry Baryshkov 	.cfg_probe = qcom_smmu_cfg_probe,
587f9081b8fSBjorn Andersson 	.def_domain_type = qcom_smmu_def_domain_type,
588b9b721d1SSai Prakash Ranjan 	.reset = qcom_sdm845_smmu500_reset,
589d374555eSGeorgi Djakov 	.write_s2cr = qcom_smmu_write_s2cr,
590d374555eSGeorgi Djakov 	.tlb_sync = qcom_smmu_tlb_sync,
591d374555eSGeorgi Djakov #ifdef CONFIG_ARM_SMMU_QCOM_DEBUG
592d374555eSGeorgi Djakov 	.context_fault = qcom_smmu_context_fault,
593759aaa10SVivek Gautam 	.context_fault_needs_threaded_irq = true,
594759aaa10SVivek Gautam #endif
595b4c6ee51SDmitry Baryshkov };
596b4c6ee51SDmitry Baryshkov 
5972d42d3baSKonrad Dybcio static const struct arm_smmu_impl qcom_adreno_smmu_v2_impl = {
598b4c6ee51SDmitry Baryshkov 	.init_context = qcom_adreno_smmu_init_context,
599b4c6ee51SDmitry Baryshkov 	.cfg_probe = qcom_adreno_smmuv2_cfg_probe,
600b4c6ee51SDmitry Baryshkov 	.def_domain_type = qcom_smmu_def_domain_type,
601b4c6ee51SDmitry Baryshkov 	.alloc_context_bank = qcom_adreno_smmu_alloc_context_bank,
60216506207SConnor Abbott 	.write_sctlr = qcom_adreno_smmu_write_sctlr,
603b4c6ee51SDmitry Baryshkov 	.tlb_sync = qcom_smmu_tlb_sync,
604b4c6ee51SDmitry Baryshkov 	.context_fault_needs_threaded_irq = true,
605b4c6ee51SDmitry Baryshkov };
6065c7469c6SJordan Crouse 
6075c7469c6SJordan Crouse static const struct arm_smmu_impl qcom_adreno_smmu_500_impl = {
608417b76adSDmitry Baryshkov 	.init_context = qcom_adreno_smmu_init_context,
6095c7469c6SJordan Crouse 	.def_domain_type = qcom_smmu_def_domain_type,
610bffb2eafSRob Clark 	.reset = arm_mmu500_reset,
611b9b721d1SSai Prakash Ranjan 	.alloc_context_bank = qcom_adreno_smmu_alloc_context_bank,
61216506207SConnor Abbott 	.write_sctlr = qcom_adreno_smmu_write_sctlr,
6135c7469c6SJordan Crouse 	.tlb_sync = qcom_smmu_tlb_sync,
6145c7469c6SJordan Crouse 	.context_fault_needs_threaded_irq = true,
6155c7469c6SJordan Crouse };
6164c1d0ad1SDmitry Baryshkov 
qcom_smmu_create(struct arm_smmu_device * smmu,const struct qcom_smmu_match_data * data)617759aaa10SVivek Gautam static struct arm_smmu_device *qcom_smmu_create(struct arm_smmu_device *smmu,
61830b912a0SDmitry Baryshkov 		const struct qcom_smmu_match_data *data)
6194c1d0ad1SDmitry Baryshkov {
620759aaa10SVivek Gautam 	const struct device_node *np = smmu->dev->of_node;
621759aaa10SVivek Gautam 	const struct arm_smmu_impl *impl;
6224c1d0ad1SDmitry Baryshkov 	struct qcom_smmu *qsmmu;
6234c1d0ad1SDmitry Baryshkov 
6244c1d0ad1SDmitry Baryshkov 	if (!data)
62530b912a0SDmitry Baryshkov 		return ERR_PTR(-EINVAL);
62630b912a0SDmitry Baryshkov 
62730b912a0SDmitry Baryshkov 	if (np && of_device_is_compatible(np, "qcom,adreno-smmu"))
6284c1d0ad1SDmitry Baryshkov 		impl = data->adreno_impl;
62930b912a0SDmitry Baryshkov 	else
6304c1d0ad1SDmitry Baryshkov 		impl = data->impl;
6314c1d0ad1SDmitry Baryshkov 
6324c1d0ad1SDmitry Baryshkov 	if (!impl)
63372b55c96SJohn Stultz 		return smmu;
63472b55c96SJohn Stultz 
6359796cf9bSZhenhua Huang 	/* Check to make sure qcom_scm has finished probing */
6369796cf9bSZhenhua Huang 	if (!qcom_scm_is_available())
63772b55c96SJohn Stultz 		return ERR_PTR(dev_err_probe(smmu->dev, -EPROBE_DEFER,
638af9da914SRobin Murphy 			"qcom_scm not ready\n"));
639759aaa10SVivek Gautam 
640759aaa10SVivek Gautam 	qsmmu = devm_krealloc(smmu->dev, smmu, sizeof(*qsmmu), GFP_KERNEL);
641759aaa10SVivek Gautam 	if (!qsmmu)
6425c7469c6SJordan Crouse 		return ERR_PTR(-ENOMEM);
643445d7a8eSBibek Kumar Patro 
644759aaa10SVivek Gautam 	qsmmu->smmu.impl = impl;
645759aaa10SVivek Gautam 	qsmmu->data = data;
646759aaa10SVivek Gautam 
6475c7469c6SJordan Crouse 	return &qsmmu->smmu;
6484172dda2SDmitry Baryshkov }
6494172dda2SDmitry Baryshkov 
6504172dda2SDmitry Baryshkov /* Implementation Defined Register Space 0 register offsets */
6514172dda2SDmitry Baryshkov static const u32 qcom_smmu_impl0_reg_offset[] = {
6524172dda2SDmitry Baryshkov 	[QCOM_SMMU_TBU_PWR_STATUS]		= 0x2204,
6534172dda2SDmitry Baryshkov 	[QCOM_SMMU_STATS_SYNC_INV_TBU_ACK]	= 0x25dc,
6544172dda2SDmitry Baryshkov 	[QCOM_SMMU_MMU2QSS_AND_SAFE_WAIT_CNTR]	= 0x2670,
6554172dda2SDmitry Baryshkov };
6564172dda2SDmitry Baryshkov 
6574172dda2SDmitry Baryshkov static const struct qcom_smmu_config qcom_smmu_impl0_cfg = {
6584172dda2SDmitry Baryshkov 	.reg_offset = qcom_smmu_impl0_reg_offset,
65930b912a0SDmitry Baryshkov };
66030b912a0SDmitry Baryshkov 
66130b912a0SDmitry Baryshkov /*
66230b912a0SDmitry Baryshkov  * It is not yet possible to use MDP SMMU with the bypass quirk on the msm8996,
66330b912a0SDmitry Baryshkov  * there are not enough context banks.
66430b912a0SDmitry Baryshkov  */
665b4c6ee51SDmitry Baryshkov static const struct qcom_smmu_match_data msm8996_smmu_data = {
6664c1d0ad1SDmitry Baryshkov 	.impl = NULL,
6674c1d0ad1SDmitry Baryshkov 	.adreno_impl = &qcom_adreno_smmu_v2_impl,
668b4c6ee51SDmitry Baryshkov };
669b4c6ee51SDmitry Baryshkov 
670b4c6ee51SDmitry Baryshkov static const struct qcom_smmu_match_data qcom_smmu_v2_data = {
6714c1d0ad1SDmitry Baryshkov 	.impl = &qcom_smmu_v2_impl,
6724c1d0ad1SDmitry Baryshkov 	.adreno_impl = &qcom_adreno_smmu_v2_impl,
673417b76adSDmitry Baryshkov };
674417b76adSDmitry Baryshkov 
675417b76adSDmitry Baryshkov static const struct qcom_smmu_match_data sdm845_smmu_500_data = {
676417b76adSDmitry Baryshkov 	.impl = &sdm845_smmu_500_impl,
677417b76adSDmitry Baryshkov 	/*
678417b76adSDmitry Baryshkov 	 * No need for adreno impl here. On sdm845 the Adreno SMMU is handled
6794172dda2SDmitry Baryshkov 	 * by the separate sdm845-smmu-v2 device.
6804172dda2SDmitry Baryshkov 	 */
6814172dda2SDmitry Baryshkov 	/* Also no debug configuration. */
6824172dda2SDmitry Baryshkov };
683b4c6ee51SDmitry Baryshkov 
684b4c6ee51SDmitry Baryshkov static const struct qcom_smmu_match_data qcom_smmu_500_impl0_data = {
6854172dda2SDmitry Baryshkov 	.impl = &qcom_smmu_500_impl,
6863e35c3e7SBibek Kumar Patro 	.adreno_impl = &qcom_adreno_smmu_500_impl,
687417b76adSDmitry Baryshkov 	.cfg = &qcom_smmu_impl0_cfg,
688417b76adSDmitry Baryshkov 	.client_match = qcom_smmu_actlr_client_of_match,
68980b71080SDmitry Baryshkov };
69080b71080SDmitry Baryshkov 
69180b71080SDmitry Baryshkov /*
69280b71080SDmitry Baryshkov  * Do not add any more qcom,SOC-smmu-500 entries to this list, unless they need
69300597f9fSSai Prakash Ranjan  * special handling and can not be covered by the qcom,smmu-500 entry.
69430b912a0SDmitry Baryshkov  */
695b4c6ee51SDmitry Baryshkov static const struct of_device_id __maybe_unused qcom_smmu_impl_of_match[] = {
6964172dda2SDmitry Baryshkov 	{ .compatible = "qcom,msm8996-smmu-v2", .data = &msm8996_smmu_data },
6974172dda2SDmitry Baryshkov 	{ .compatible = "qcom,msm8998-smmu-v2", .data = &qcom_smmu_v2_data },
6984172dda2SDmitry Baryshkov 	{ .compatible = "qcom,qcm2290-smmu-500", .data = &qcom_smmu_500_impl0_data },
699e36ca2faSRob Clark 	{ .compatible = "qcom,qdu1000-smmu-500", .data = &qcom_smmu_500_impl0_data  },
7004172dda2SDmitry Baryshkov 	{ .compatible = "qcom,sc7180-smmu-500", .data = &qcom_smmu_500_impl0_data },
7014172dda2SDmitry Baryshkov 	{ .compatible = "qcom,sc7180-smmu-v2", .data = &qcom_smmu_v2_data },
7024172dda2SDmitry Baryshkov 	{ .compatible = "qcom,sc7280-smmu-500", .data = &qcom_smmu_500_impl0_data },
703b4c6ee51SDmitry Baryshkov 	{ .compatible = "qcom,sc8180x-smmu-500", .data = &qcom_smmu_500_impl0_data },
70442314738SRichard Acayan 	{ .compatible = "qcom,sc8280xp-smmu-500", .data = &qcom_smmu_500_impl0_data },
705b4c6ee51SDmitry Baryshkov 	{ .compatible = "qcom,sdm630-smmu-v2", .data = &qcom_smmu_v2_data },
706417b76adSDmitry Baryshkov 	{ .compatible = "qcom,sdm670-smmu-v2", .data = &qcom_smmu_v2_data },
7074172dda2SDmitry Baryshkov 	{ .compatible = "qcom,sdm845-smmu-v2", .data = &qcom_smmu_v2_data },
7084172dda2SDmitry Baryshkov 	{ .compatible = "qcom,sdm845-smmu-500", .data = &sdm845_smmu_500_data },
7093811a728SKonrad Dybcio 	{ .compatible = "qcom,sm6115-smmu-500", .data = &qcom_smmu_500_impl0_data},
7104172dda2SDmitry Baryshkov 	{ .compatible = "qcom,sm6125-smmu-500", .data = &qcom_smmu_500_impl0_data },
711757d591dSKonrad Dybcio 	{ .compatible = "qcom,sm6350-smmu-v2", .data = &qcom_smmu_v2_data },
7124172dda2SDmitry Baryshkov 	{ .compatible = "qcom,sm6350-smmu-500", .data = &qcom_smmu_500_impl0_data },
71370c61360SDanila Tikhonov 	{ .compatible = "qcom,sm6375-smmu-v2", .data = &qcom_smmu_v2_data },
7144172dda2SDmitry Baryshkov 	{ .compatible = "qcom,sm6375-smmu-500", .data = &qcom_smmu_500_impl0_data },
7154172dda2SDmitry Baryshkov 	{ .compatible = "qcom,sm7150-smmu-v2", .data = &qcom_smmu_v2_data },
7164172dda2SDmitry Baryshkov 	{ .compatible = "qcom,sm8150-smmu-500", .data = &qcom_smmu_500_impl0_data },
7174172dda2SDmitry Baryshkov 	{ .compatible = "qcom,sm8250-smmu-500", .data = &qcom_smmu_500_impl0_data },
71880b71080SDmitry Baryshkov 	{ .compatible = "qcom,sm8350-smmu-500", .data = &qcom_smmu_500_impl0_data },
71900597f9fSSai Prakash Ranjan 	{ .compatible = "qcom,sm8450-smmu-500", .data = &qcom_smmu_500_impl0_data },
72000597f9fSSai Prakash Ranjan 	{ .compatible = "qcom,smmu-500", .data = &qcom_smmu_500_impl0_data },
72100597f9fSSai Prakash Ranjan 	{ }
72222c2d718SShawn Guo };
723a51627c5SShawn Guo 
724a51627c5SShawn Guo #ifdef CONFIG_ACPI
725a51627c5SShawn Guo static struct acpi_platform_list qcom_acpi_platlist[] = {
726a51627c5SShawn Guo 	{ "LENOVO", "CB-01   ", 0x8180, ACPI_SIG_IORT, equal, "QCOM SMMU" },
727a51627c5SShawn Guo 	{ "QCOM  ", "QCOMEDK2", 0x8180, ACPI_SIG_IORT, equal, "QCOM SMMU" },
72822c2d718SShawn Guo 	{ }
729a51627c5SShawn Guo };
7300b4eeee2SGeorgi Djakov #endif
7310b4eeee2SGeorgi Djakov 
qcom_smmu_tbu_probe(struct platform_device * pdev)7320b4eeee2SGeorgi Djakov static int qcom_smmu_tbu_probe(struct platform_device *pdev)
7330b4eeee2SGeorgi Djakov {
7340b4eeee2SGeorgi Djakov 	struct device *dev = &pdev->dev;
7350b4eeee2SGeorgi Djakov 	int ret;
7360b4eeee2SGeorgi Djakov 
7370b4eeee2SGeorgi Djakov 	if (IS_ENABLED(CONFIG_ARM_SMMU_QCOM_DEBUG)) {
7380b4eeee2SGeorgi Djakov 		ret = qcom_tbu_probe(pdev);
7390b4eeee2SGeorgi Djakov 		if (ret)
7400b4eeee2SGeorgi Djakov 			return ret;
7410b4eeee2SGeorgi Djakov 	}
7420b4eeee2SGeorgi Djakov 
7430b4eeee2SGeorgi Djakov 	if (dev->pm_domain) {
7440b4eeee2SGeorgi Djakov 		pm_runtime_set_active(dev);
7450b4eeee2SGeorgi Djakov 		pm_runtime_enable(dev);
7460b4eeee2SGeorgi Djakov 	}
7470b4eeee2SGeorgi Djakov 
7480b4eeee2SGeorgi Djakov 	return 0;
7490b4eeee2SGeorgi Djakov }
7500b4eeee2SGeorgi Djakov 
7510b4eeee2SGeorgi Djakov static const struct of_device_id qcom_smmu_tbu_of_match[] = {
7520b4eeee2SGeorgi Djakov 	{ .compatible = "qcom,sc7280-tbu" },
7530b4eeee2SGeorgi Djakov 	{ .compatible = "qcom,sdm845-tbu" },
7540b4eeee2SGeorgi Djakov 	{ }
7550b4eeee2SGeorgi Djakov };
7560b4eeee2SGeorgi Djakov 
7570b4eeee2SGeorgi Djakov static struct platform_driver qcom_smmu_tbu_driver = {
7580b4eeee2SGeorgi Djakov 	.driver = {
7590b4eeee2SGeorgi Djakov 		.name           = "qcom_tbu",
7600b4eeee2SGeorgi Djakov 		.of_match_table = qcom_smmu_tbu_of_match,
7610b4eeee2SGeorgi Djakov 	},
7620b4eeee2SGeorgi Djakov 	.probe = qcom_smmu_tbu_probe,
7635c7469c6SJordan Crouse };
7645c7469c6SJordan Crouse 
qcom_smmu_impl_init(struct arm_smmu_device * smmu)76500597f9fSSai Prakash Ranjan struct arm_smmu_device *qcom_smmu_impl_init(struct arm_smmu_device *smmu)
7664c1d0ad1SDmitry Baryshkov {
7670b4eeee2SGeorgi Djakov 	const struct device_node *np = smmu->dev->of_node;
7680b4eeee2SGeorgi Djakov 	const struct of_device_id *match;
7690b4eeee2SGeorgi Djakov 	static u8 tbu_registered;
7700b4eeee2SGeorgi Djakov 
7715c7469c6SJordan Crouse 	if (!tbu_registered++)
77222c2d718SShawn Guo 		platform_driver_register(&qcom_smmu_tbu_driver);
773a51627c5SShawn Guo 
774a51627c5SShawn Guo #ifdef CONFIG_ACPI
775a51627c5SShawn Guo 	if (np == NULL) {
7764172dda2SDmitry Baryshkov 		/* Match platform for ACPI boot */
777a51627c5SShawn Guo 		if (acpi_match_platform_list(qcom_acpi_platlist) >= 0)
77822c2d718SShawn Guo 			return qcom_smmu_create(smmu, &qcom_smmu_500_impl0_data);
77900597f9fSSai Prakash Ranjan 	}
7804c1d0ad1SDmitry Baryshkov #endif
7814c1d0ad1SDmitry Baryshkov 
7824c1d0ad1SDmitry Baryshkov 	match = of_match_node(qcom_smmu_impl_of_match, np);
783ab9a77a1SSai Prakash Ranjan 	if (match)
784e36ca2faSRob Clark 		return qcom_smmu_create(smmu, match->data);
785e36ca2faSRob Clark 
786e36ca2faSRob Clark 	/*
787e36ca2faSRob Clark 	 * If you hit this WARN_ON() you are missing an entry in the
788e36ca2faSRob Clark 	 * qcom_smmu_impl_of_match[] table, and GPU per-process page-
789e36ca2faSRob Clark 	 * tables will be broken.
790e36ca2faSRob Clark 	 */
791e36ca2faSRob Clark 	WARN(of_device_is_compatible(np, "qcom,adreno-smmu"),
792e36ca2faSRob Clark 	     "Missing qcom_smmu_impl_of_match entry for: %s",
79300597f9fSSai Prakash Ranjan 	     dev_name(smmu->dev));
7945c7469c6SJordan Crouse 
795 	return smmu;
796 }
797