145051539SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
245ae7cffSWill Deacon /*
345ae7cffSWill Deacon * IOMMU API for ARM architected SMMU implementations.
445ae7cffSWill Deacon *
545ae7cffSWill Deacon * Copyright (C) 2013 ARM Limited
645ae7cffSWill Deacon *
745ae7cffSWill Deacon * Author: Will Deacon <will.deacon@arm.com>
845ae7cffSWill Deacon *
945ae7cffSWill Deacon * This driver currently supports:
1045ae7cffSWill Deacon * - SMMUv1 and v2 implementations
1145ae7cffSWill Deacon * - Stream-matching and stream-indexing
1245ae7cffSWill Deacon * - v7/v8 long-descriptor format
1345ae7cffSWill Deacon * - Non-secure access to the SMMU
1445ae7cffSWill Deacon * - Context fault reporting
15dc0eaa4eSAleksey Makarov * - Extended Stream ID (16 bit)
1645ae7cffSWill Deacon */
1745ae7cffSWill Deacon
1845ae7cffSWill Deacon #define pr_fmt(fmt) "arm-smmu: " fmt
1945ae7cffSWill Deacon
20d6fcd3b1SLorenzo Pieralisi #include <linux/acpi.h>
21d6fcd3b1SLorenzo Pieralisi #include <linux/acpi_iort.h>
220caf5f4eSRobin Murphy #include <linux/bitfield.h>
2345ae7cffSWill Deacon #include <linux/delay.h>
2445ae7cffSWill Deacon #include <linux/dma-mapping.h>
2545ae7cffSWill Deacon #include <linux/err.h>
2645ae7cffSWill Deacon #include <linux/interrupt.h>
2745ae7cffSWill Deacon #include <linux/io.h>
28859a732eSMitchel Humpherys #include <linux/iopoll.h>
29b06c076eSWill Deacon #include <linux/module.h>
3045ae7cffSWill Deacon #include <linux/of.h>
31bae2c2d4SRobin Murphy #include <linux/of_address.h>
32a9a1b0b5SWill Deacon #include <linux/pci.h>
3345ae7cffSWill Deacon #include <linux/platform_device.h>
3496a299d2SSricharan R #include <linux/pm_runtime.h>
35931a0ba6SRobin Murphy #include <linux/ratelimit.h>
3645ae7cffSWill Deacon #include <linux/slab.h>
3754e7d900SKrzysztof Kozlowski #include <linux/string_choices.h>
3845ae7cffSWill Deacon
39eab03e2aSNipun Gupta #include <linux/fsl/mc.h>
4045ae7cffSWill Deacon
41c5fc6488SRobin Murphy #include "arm-smmu.h"
42f2042ed2SRobin Murphy #include "../../dma-iommu.h"
432b03774bSRob Clark
444e4abae3SRobin Murphy /*
454e4abae3SRobin Murphy * Apparently, some Qualcomm arm64 platforms which appear to expose their SMMU
464e4abae3SRobin Murphy * global register space are still, in fact, using a hypervisor to mediate it
474e4abae3SRobin Murphy * by trapping and emulating register accesses. Sadly, some deployed versions
484e4abae3SRobin Murphy * of said trapping code have bugs wherein they go horribly wrong for stores
494e4abae3SRobin Murphy * using r31 (i.e. XZR/WZR) as the source register.
504e4abae3SRobin Murphy */
514e4abae3SRobin Murphy #define QCOM_DUMMY_VAL -1
524e4abae3SRobin Murphy
53f3ebee80SEric Auger #define MSI_IOVA_BASE 0x8000000
54f3ebee80SEric Auger #define MSI_IOVA_LENGTH 0x100000
55f3ebee80SEric Auger
564cf740b0SWill Deacon static int force_stage;
5725a1c96cSRobin Murphy module_param(force_stage, int, S_IRUGO);
584cf740b0SWill Deacon MODULE_PARM_DESC(force_stage,
594cf740b0SWill Deacon "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
60954a03beSDouglas Anderson static bool disable_bypass =
61954a03beSDouglas Anderson IS_ENABLED(CONFIG_ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT);
6225a1c96cSRobin Murphy module_param(disable_bypass, bool, S_IRUGO);
6325a1c96cSRobin Murphy MODULE_PARM_DESC(disable_bypass,
6425a1c96cSRobin Murphy "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
654cf740b0SWill Deacon
668e8b203eSRobin Murphy #define s2cr_init_val (struct arm_smmu_s2cr){ \
678e8b203eSRobin Murphy .type = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS, \
688e8b203eSRobin Murphy }
698e8b203eSRobin Murphy
70021bb842SRobin Murphy static bool using_legacy_binding, using_generic_binding;
71021bb842SRobin Murphy
arm_smmu_rpm_get(struct arm_smmu_device * smmu)72d4a44f07SSricharan R static inline int arm_smmu_rpm_get(struct arm_smmu_device *smmu)
73d4a44f07SSricharan R {
74d4a44f07SSricharan R if (pm_runtime_enabled(smmu->dev))
751adf30f1SXiyu Yang return pm_runtime_resume_and_get(smmu->dev);
76d4a44f07SSricharan R
77d4a44f07SSricharan R return 0;
78d4a44f07SSricharan R }
79d4a44f07SSricharan R
arm_smmu_rpm_put(struct arm_smmu_device * smmu)80d4a44f07SSricharan R static inline void arm_smmu_rpm_put(struct arm_smmu_device *smmu)
81d4a44f07SSricharan R {
820a679336SPranjal Shrivastava if (pm_runtime_enabled(smmu->dev)) {
830a679336SPranjal Shrivastava pm_runtime_mark_last_busy(smmu->dev);
840a679336SPranjal Shrivastava __pm_runtime_put_autosuspend(smmu->dev);
850a679336SPranjal Shrivastava
860a679336SPranjal Shrivastava }
87d4a44f07SSricharan R }
88d4a44f07SSricharan R
arm_smmu_rpm_use_autosuspend(struct arm_smmu_device * smmu)8922bb7b41SJason Gunthorpe static void arm_smmu_rpm_use_autosuspend(struct arm_smmu_device *smmu)
9022bb7b41SJason Gunthorpe {
9122bb7b41SJason Gunthorpe /*
9222bb7b41SJason Gunthorpe * Setup an autosuspend delay to avoid bouncing runpm state.
9322bb7b41SJason Gunthorpe * Otherwise, if a driver for a suspended consumer device
9422bb7b41SJason Gunthorpe * unmaps buffers, it will runpm resume/suspend for each one.
9522bb7b41SJason Gunthorpe *
9622bb7b41SJason Gunthorpe * For example, when used by a GPU device, when an application
9722bb7b41SJason Gunthorpe * or game exits, it can trigger unmapping 100s or 1000s of
9822bb7b41SJason Gunthorpe * buffers. With a runpm cycle for each buffer, that adds up
9922bb7b41SJason Gunthorpe * to 5-10sec worth of reprogramming the context bank, while
10022bb7b41SJason Gunthorpe * the system appears to be locked up to the user.
10122bb7b41SJason Gunthorpe */
10222bb7b41SJason Gunthorpe pm_runtime_set_autosuspend_delay(smmu->dev, 20);
10322bb7b41SJason Gunthorpe pm_runtime_use_autosuspend(smmu->dev);
10422bb7b41SJason Gunthorpe }
10522bb7b41SJason Gunthorpe
to_smmu_domain(struct iommu_domain * dom)1061d672638SJoerg Roedel static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
1071d672638SJoerg Roedel {
1081d672638SJoerg Roedel return container_of(dom, struct arm_smmu_domain, domain);
1091d672638SJoerg Roedel }
1101d672638SJoerg Roedel
111cd221bd2SWill Deacon static struct platform_driver arm_smmu_driver;
112cd221bd2SWill Deacon static const struct iommu_ops arm_smmu_ops;
113cd221bd2SWill Deacon
114cd221bd2SWill Deacon #ifdef CONFIG_ARM_SMMU_LEGACY_DT_BINDINGS
dev_get_dev_node(struct device * dev)1158f68f8e2SWill Deacon static struct device_node *dev_get_dev_node(struct device *dev)
116a9a1b0b5SWill Deacon {
117a9a1b0b5SWill Deacon if (dev_is_pci(dev)) {
118a9a1b0b5SWill Deacon struct pci_bus *bus = to_pci_dev(dev)->bus;
1192907320dSMitchel Humpherys
120a9a1b0b5SWill Deacon while (!pci_is_root_bus(bus))
121a9a1b0b5SWill Deacon bus = bus->parent;
122f80cd885SRobin Murphy return of_node_get(bus->bridge->parent->of_node);
123a9a1b0b5SWill Deacon }
124a9a1b0b5SWill Deacon
125f80cd885SRobin Murphy return of_node_get(dev->of_node);
126a9a1b0b5SWill Deacon }
127a9a1b0b5SWill Deacon
__arm_smmu_get_pci_sid(struct pci_dev * pdev,u16 alias,void * data)128f80cd885SRobin Murphy static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
12945ae7cffSWill Deacon {
130f80cd885SRobin Murphy *((__be32 *)data) = cpu_to_be32(alias);
131f80cd885SRobin Murphy return 0; /* Continue walking */
13245ae7cffSWill Deacon }
13345ae7cffSWill Deacon
__find_legacy_master_phandle(struct device * dev,void * data)134f80cd885SRobin Murphy static int __find_legacy_master_phandle(struct device *dev, void *data)
135a9a1b0b5SWill Deacon {
136f80cd885SRobin Murphy struct of_phandle_iterator *it = *(void **)data;
137f80cd885SRobin Murphy struct device_node *np = it->node;
138f80cd885SRobin Murphy int err;
139a9a1b0b5SWill Deacon
140f80cd885SRobin Murphy of_for_each_phandle(it, err, dev->of_node, "mmu-masters",
141c680e9abSUwe Kleine-König "#stream-id-cells", -1)
142f80cd885SRobin Murphy if (it->node == np) {
143f80cd885SRobin Murphy *(void **)data = dev;
144f80cd885SRobin Murphy return 1;
145f80cd885SRobin Murphy }
146f80cd885SRobin Murphy it->node = np;
147f80cd885SRobin Murphy return err == -ENOENT ? 0 : err;
1488f68f8e2SWill Deacon }
149a9a1b0b5SWill Deacon
arm_smmu_register_legacy_master(struct device * dev,struct arm_smmu_device ** smmu)150adfec2e7SRobin Murphy static int arm_smmu_register_legacy_master(struct device *dev,
151adfec2e7SRobin Murphy struct arm_smmu_device **smmu)
15245ae7cffSWill Deacon {
153adfec2e7SRobin Murphy struct device *smmu_dev;
154f80cd885SRobin Murphy struct device_node *np;
155f80cd885SRobin Murphy struct of_phandle_iterator it;
156f80cd885SRobin Murphy void *data = ⁢
157adfec2e7SRobin Murphy u32 *sids;
158f80cd885SRobin Murphy __be32 pci_sid;
159f80cd885SRobin Murphy int err;
16045ae7cffSWill Deacon
161f80cd885SRobin Murphy np = dev_get_dev_node(dev);
162a6c9e387SRob Herring if (!np || !of_property_present(np, "#stream-id-cells")) {
163f80cd885SRobin Murphy of_node_put(np);
164f80cd885SRobin Murphy return -ENODEV;
165f80cd885SRobin Murphy }
166f80cd885SRobin Murphy
167f80cd885SRobin Murphy it.node = np;
168d6fc5d97SRobin Murphy err = driver_for_each_device(&arm_smmu_driver.driver, NULL, &data,
169d6fc5d97SRobin Murphy __find_legacy_master_phandle);
170adfec2e7SRobin Murphy smmu_dev = data;
171f80cd885SRobin Murphy of_node_put(np);
172f80cd885SRobin Murphy if (err == 0)
173f80cd885SRobin Murphy return -ENODEV;
174f80cd885SRobin Murphy if (err < 0)
175f80cd885SRobin Murphy return err;
17644680eedSWill Deacon
177f80cd885SRobin Murphy if (dev_is_pci(dev)) {
178f80cd885SRobin Murphy /* "mmu-masters" assumes Stream ID == Requester ID */
179f80cd885SRobin Murphy pci_for_each_dma_alias(to_pci_dev(dev), __arm_smmu_get_pci_sid,
180f80cd885SRobin Murphy &pci_sid);
181f80cd885SRobin Murphy it.cur = &pci_sid;
182f80cd885SRobin Murphy it.cur_count = 1;
183f80cd885SRobin Murphy }
184f80cd885SRobin Murphy
1853f7c3209SRobin Murphy err = iommu_fwspec_init(dev, NULL);
186adfec2e7SRobin Murphy if (err)
187adfec2e7SRobin Murphy return err;
188adfec2e7SRobin Murphy
189adfec2e7SRobin Murphy sids = kcalloc(it.cur_count, sizeof(*sids), GFP_KERNEL);
190adfec2e7SRobin Murphy if (!sids)
191f80cd885SRobin Murphy return -ENOMEM;
192f80cd885SRobin Murphy
193adfec2e7SRobin Murphy *smmu = dev_get_drvdata(smmu_dev);
194adfec2e7SRobin Murphy of_phandle_iterator_args(&it, sids, it.cur_count);
195adfec2e7SRobin Murphy err = iommu_fwspec_add_ids(dev, sids, it.cur_count);
196adfec2e7SRobin Murphy kfree(sids);
197adfec2e7SRobin Murphy return err;
19845ae7cffSWill Deacon }
199cd221bd2SWill Deacon #else
arm_smmu_register_legacy_master(struct device * dev,struct arm_smmu_device ** smmu)200cd221bd2SWill Deacon static int arm_smmu_register_legacy_master(struct device *dev,
201cd221bd2SWill Deacon struct arm_smmu_device **smmu)
202cd221bd2SWill Deacon {
203cd221bd2SWill Deacon return -ENODEV;
204cd221bd2SWill Deacon }
205cd221bd2SWill Deacon #endif /* CONFIG_ARM_SMMU_LEGACY_DT_BINDINGS */
206cd221bd2SWill Deacon
__arm_smmu_free_bitmap(unsigned long * map,int idx)20745ae7cffSWill Deacon static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
20845ae7cffSWill Deacon {
20945ae7cffSWill Deacon clear_bit(idx, map);
21045ae7cffSWill Deacon }
21145ae7cffSWill Deacon
21245ae7cffSWill Deacon /* Wait for any pending TLB invalidations to complete */
__arm_smmu_tlb_sync(struct arm_smmu_device * smmu,int page,int sync,int status)21319713fd4SRobin Murphy static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu, int page,
21419713fd4SRobin Murphy int sync, int status)
21545ae7cffSWill Deacon {
2168513c893SRobin Murphy unsigned int spin_cnt, delay;
21719713fd4SRobin Murphy u32 reg;
21845ae7cffSWill Deacon
219ae2b60f3SRobin Murphy if (smmu->impl && unlikely(smmu->impl->tlb_sync))
220ae2b60f3SRobin Murphy return smmu->impl->tlb_sync(smmu, page, sync, status);
221ae2b60f3SRobin Murphy
22219713fd4SRobin Murphy arm_smmu_writel(smmu, page, sync, QCOM_DUMMY_VAL);
2238513c893SRobin Murphy for (delay = 1; delay < TLB_LOOP_TIMEOUT; delay *= 2) {
2248513c893SRobin Murphy for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) {
22519713fd4SRobin Murphy reg = arm_smmu_readl(smmu, page, status);
226fba6e960SWill Deacon if (!(reg & ARM_SMMU_sTLBGSTATUS_GSACTIVE))
2278513c893SRobin Murphy return;
22845ae7cffSWill Deacon cpu_relax();
2298513c893SRobin Murphy }
2308513c893SRobin Murphy udelay(delay);
2318513c893SRobin Murphy }
23245ae7cffSWill Deacon dev_err_ratelimited(smmu->dev,
23345ae7cffSWill Deacon "TLB sync timed out -- SMMU may be deadlocked\n");
23445ae7cffSWill Deacon }
23545ae7cffSWill Deacon
arm_smmu_tlb_sync_global(struct arm_smmu_device * smmu)23611febfcaSRobin Murphy static void arm_smmu_tlb_sync_global(struct arm_smmu_device *smmu)
2371463fe44SWill Deacon {
2388e517e76SWill Deacon unsigned long flags;
23911febfcaSRobin Murphy
2408e517e76SWill Deacon spin_lock_irqsave(&smmu->global_sync_lock, flags);
24100320ce6SRobin Murphy __arm_smmu_tlb_sync(smmu, ARM_SMMU_GR0, ARM_SMMU_GR0_sTLBGSYNC,
24219713fd4SRobin Murphy ARM_SMMU_GR0_sTLBGSTATUS);
2438e517e76SWill Deacon spin_unlock_irqrestore(&smmu->global_sync_lock, flags);
244518f7136SWill Deacon }
245518f7136SWill Deacon
arm_smmu_tlb_sync_context(struct arm_smmu_domain * smmu_domain)246ae2b60f3SRobin Murphy static void arm_smmu_tlb_sync_context(struct arm_smmu_domain *smmu_domain)
2471463fe44SWill Deacon {
24811febfcaSRobin Murphy struct arm_smmu_device *smmu = smmu_domain->smmu;
2498e517e76SWill Deacon unsigned long flags;
25011febfcaSRobin Murphy
2518e517e76SWill Deacon spin_lock_irqsave(&smmu_domain->cb_lock, flags);
25219713fd4SRobin Murphy __arm_smmu_tlb_sync(smmu, ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx),
25319713fd4SRobin Murphy ARM_SMMU_CB_TLBSYNC, ARM_SMMU_CB_TLBSTATUS);
2548e517e76SWill Deacon spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
255518f7136SWill Deacon }
256518f7136SWill Deacon
arm_smmu_tlb_inv_context_s1(void * cookie)25711febfcaSRobin Murphy static void arm_smmu_tlb_inv_context_s1(void *cookie)
258518f7136SWill Deacon {
259518f7136SWill Deacon struct arm_smmu_domain *smmu_domain = cookie;
26044f6876aSRobin Murphy /*
26119713fd4SRobin Murphy * The TLBI write may be relaxed, so ensure that PTEs cleared by the
26219713fd4SRobin Murphy * current CPU are visible beforehand.
26344f6876aSRobin Murphy */
26419713fd4SRobin Murphy wmb();
26519713fd4SRobin Murphy arm_smmu_cb_write(smmu_domain->smmu, smmu_domain->cfg.cbndx,
26619713fd4SRobin Murphy ARM_SMMU_CB_S1_TLBIASID, smmu_domain->cfg.asid);
267ae2b60f3SRobin Murphy arm_smmu_tlb_sync_context(smmu_domain);
2681463fe44SWill Deacon }
2691463fe44SWill Deacon
arm_smmu_tlb_inv_context_s2(void * cookie)27011febfcaSRobin Murphy static void arm_smmu_tlb_inv_context_s2(void *cookie)
27111febfcaSRobin Murphy {
27211febfcaSRobin Murphy struct arm_smmu_domain *smmu_domain = cookie;
27311febfcaSRobin Murphy struct arm_smmu_device *smmu = smmu_domain->smmu;
27411febfcaSRobin Murphy
27500320ce6SRobin Murphy /* See above */
27600320ce6SRobin Murphy wmb();
27700320ce6SRobin Murphy arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIVMID, smmu_domain->cfg.vmid);
27811febfcaSRobin Murphy arm_smmu_tlb_sync_global(smmu);
2791463fe44SWill Deacon }
2801463fe44SWill Deacon
arm_smmu_tlb_inv_range_s1(unsigned long iova,size_t size,size_t granule,void * cookie,int reg)28171e8a8cdSRobin Murphy static void arm_smmu_tlb_inv_range_s1(unsigned long iova, size_t size,
2823370cb6bSRobin Murphy size_t granule, void *cookie, int reg)
283518f7136SWill Deacon {
284518f7136SWill Deacon struct arm_smmu_domain *smmu_domain = cookie;
28571e8a8cdSRobin Murphy struct arm_smmu_device *smmu = smmu_domain->smmu;
286518f7136SWill Deacon struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
2873370cb6bSRobin Murphy int idx = cfg->cbndx;
288518f7136SWill Deacon
28971e8a8cdSRobin Murphy if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
2907d321bd3SWill Deacon wmb();
2917d321bd3SWill Deacon
2927602b871SRobin Murphy if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
293353b3250SRobin Murphy iova = (iova >> 12) << 12;
294280b683cSRobin Murphy iova |= cfg->asid;
29575df1386SRobin Murphy do {
29619713fd4SRobin Murphy arm_smmu_cb_write(smmu, idx, reg, iova);
29775df1386SRobin Murphy iova += granule;
29875df1386SRobin Murphy } while (size -= granule);
299518f7136SWill Deacon } else {
300518f7136SWill Deacon iova >>= 12;
301280b683cSRobin Murphy iova |= (u64)cfg->asid << 48;
30275df1386SRobin Murphy do {
30319713fd4SRobin Murphy arm_smmu_cb_writeq(smmu, idx, reg, iova);
30475df1386SRobin Murphy iova += granule >> 12;
30575df1386SRobin Murphy } while (size -= granule);
306518f7136SWill Deacon }
30771e8a8cdSRobin Murphy }
30871e8a8cdSRobin Murphy
arm_smmu_tlb_inv_range_s2(unsigned long iova,size_t size,size_t granule,void * cookie,int reg)30971e8a8cdSRobin Murphy static void arm_smmu_tlb_inv_range_s2(unsigned long iova, size_t size,
3103370cb6bSRobin Murphy size_t granule, void *cookie, int reg)
31171e8a8cdSRobin Murphy {
31271e8a8cdSRobin Murphy struct arm_smmu_domain *smmu_domain = cookie;
31371e8a8cdSRobin Murphy struct arm_smmu_device *smmu = smmu_domain->smmu;
3143370cb6bSRobin Murphy int idx = smmu_domain->cfg.cbndx;
31571e8a8cdSRobin Murphy
31671e8a8cdSRobin Murphy if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
31771e8a8cdSRobin Murphy wmb();
31871e8a8cdSRobin Murphy
31975df1386SRobin Murphy iova >>= 12;
32075df1386SRobin Murphy do {
32161005762SRobin Murphy if (smmu_domain->cfg.fmt == ARM_SMMU_CTX_FMT_AARCH64)
32219713fd4SRobin Murphy arm_smmu_cb_writeq(smmu, idx, reg, iova);
32361005762SRobin Murphy else
32419713fd4SRobin Murphy arm_smmu_cb_write(smmu, idx, reg, iova);
32575df1386SRobin Murphy iova += granule >> 12;
32675df1386SRobin Murphy } while (size -= granule);
327518f7136SWill Deacon }
328518f7136SWill Deacon
arm_smmu_tlb_inv_walk_s1(unsigned long iova,size_t size,size_t granule,void * cookie)3293f3b8d0cSRobin Murphy static void arm_smmu_tlb_inv_walk_s1(unsigned long iova, size_t size,
3303f3b8d0cSRobin Murphy size_t granule, void *cookie)
3313f3b8d0cSRobin Murphy {
332ef75702dSSai Prakash Ranjan struct arm_smmu_domain *smmu_domain = cookie;
333ef75702dSSai Prakash Ranjan struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
334ef75702dSSai Prakash Ranjan
335ef75702dSSai Prakash Ranjan if (cfg->flush_walk_prefer_tlbiasid) {
336ef75702dSSai Prakash Ranjan arm_smmu_tlb_inv_context_s1(cookie);
337ef75702dSSai Prakash Ranjan } else {
3383370cb6bSRobin Murphy arm_smmu_tlb_inv_range_s1(iova, size, granule, cookie,
3393370cb6bSRobin Murphy ARM_SMMU_CB_S1_TLBIVA);
3403f3b8d0cSRobin Murphy arm_smmu_tlb_sync_context(cookie);
3413f3b8d0cSRobin Murphy }
342ef75702dSSai Prakash Ranjan }
3433f3b8d0cSRobin Murphy
arm_smmu_tlb_add_page_s1(struct iommu_iotlb_gather * gather,unsigned long iova,size_t granule,void * cookie)3443f3b8d0cSRobin Murphy static void arm_smmu_tlb_add_page_s1(struct iommu_iotlb_gather *gather,
3453f3b8d0cSRobin Murphy unsigned long iova, size_t granule,
3463f3b8d0cSRobin Murphy void *cookie)
3473f3b8d0cSRobin Murphy {
3483370cb6bSRobin Murphy arm_smmu_tlb_inv_range_s1(iova, granule, granule, cookie,
3493370cb6bSRobin Murphy ARM_SMMU_CB_S1_TLBIVAL);
3503f3b8d0cSRobin Murphy }
3513f3b8d0cSRobin Murphy
arm_smmu_tlb_inv_walk_s2(unsigned long iova,size_t size,size_t granule,void * cookie)3523f3b8d0cSRobin Murphy static void arm_smmu_tlb_inv_walk_s2(unsigned long iova, size_t size,
3533f3b8d0cSRobin Murphy size_t granule, void *cookie)
3543f3b8d0cSRobin Murphy {
3553370cb6bSRobin Murphy arm_smmu_tlb_inv_range_s2(iova, size, granule, cookie,
3563370cb6bSRobin Murphy ARM_SMMU_CB_S2_TLBIIPAS2);
3573f3b8d0cSRobin Murphy arm_smmu_tlb_sync_context(cookie);
3583f3b8d0cSRobin Murphy }
3593f3b8d0cSRobin Murphy
arm_smmu_tlb_add_page_s2(struct iommu_iotlb_gather * gather,unsigned long iova,size_t granule,void * cookie)3603f3b8d0cSRobin Murphy static void arm_smmu_tlb_add_page_s2(struct iommu_iotlb_gather *gather,
3613f3b8d0cSRobin Murphy unsigned long iova, size_t granule,
3623f3b8d0cSRobin Murphy void *cookie)
3633f3b8d0cSRobin Murphy {
3643370cb6bSRobin Murphy arm_smmu_tlb_inv_range_s2(iova, granule, granule, cookie,
3653370cb6bSRobin Murphy ARM_SMMU_CB_S2_TLBIIPAS2L);
3663f3b8d0cSRobin Murphy }
3673f3b8d0cSRobin Murphy
arm_smmu_tlb_inv_walk_s2_v1(unsigned long iova,size_t size,size_t granule,void * cookie)368fefe8527SRobin Murphy static void arm_smmu_tlb_inv_walk_s2_v1(unsigned long iova, size_t size,
3693f3b8d0cSRobin Murphy size_t granule, void *cookie)
3703f3b8d0cSRobin Murphy {
3713f3b8d0cSRobin Murphy arm_smmu_tlb_inv_context_s2(cookie);
3723f3b8d0cSRobin Murphy }
37311febfcaSRobin Murphy /*
37411febfcaSRobin Murphy * On MMU-401 at least, the cost of firing off multiple TLBIVMIDs appears
37511febfcaSRobin Murphy * almost negligible, but the benefit of getting the first one in as far ahead
37611febfcaSRobin Murphy * of the sync as possible is significant, hence we don't just make this a
3773f3b8d0cSRobin Murphy * no-op and call arm_smmu_tlb_inv_context_s2() from .iotlb_sync as you might
3783f3b8d0cSRobin Murphy * think.
37911febfcaSRobin Murphy */
arm_smmu_tlb_add_page_s2_v1(struct iommu_iotlb_gather * gather,unsigned long iova,size_t granule,void * cookie)3803f3b8d0cSRobin Murphy static void arm_smmu_tlb_add_page_s2_v1(struct iommu_iotlb_gather *gather,
3813f3b8d0cSRobin Murphy unsigned long iova, size_t granule,
3823f3b8d0cSRobin Murphy void *cookie)
38311febfcaSRobin Murphy {
38411febfcaSRobin Murphy struct arm_smmu_domain *smmu_domain = cookie;
38500320ce6SRobin Murphy struct arm_smmu_device *smmu = smmu_domain->smmu;
38611febfcaSRobin Murphy
38700320ce6SRobin Murphy if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
3887d321bd3SWill Deacon wmb();
3897d321bd3SWill Deacon
39000320ce6SRobin Murphy arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIVMID, smmu_domain->cfg.vmid);
39111febfcaSRobin Murphy }
39211febfcaSRobin Murphy
393696bcfb7SRobin Murphy static const struct iommu_flush_ops arm_smmu_s1_tlb_ops = {
39411febfcaSRobin Murphy .tlb_flush_all = arm_smmu_tlb_inv_context_s1,
3953f3b8d0cSRobin Murphy .tlb_flush_walk = arm_smmu_tlb_inv_walk_s1,
3963f3b8d0cSRobin Murphy .tlb_add_page = arm_smmu_tlb_add_page_s1,
39711febfcaSRobin Murphy };
39811febfcaSRobin Murphy
399696bcfb7SRobin Murphy static const struct iommu_flush_ops arm_smmu_s2_tlb_ops_v2 = {
40011febfcaSRobin Murphy .tlb_flush_all = arm_smmu_tlb_inv_context_s2,
4013f3b8d0cSRobin Murphy .tlb_flush_walk = arm_smmu_tlb_inv_walk_s2,
4023f3b8d0cSRobin Murphy .tlb_add_page = arm_smmu_tlb_add_page_s2,
40311febfcaSRobin Murphy };
40411febfcaSRobin Murphy
405696bcfb7SRobin Murphy static const struct iommu_flush_ops arm_smmu_s2_tlb_ops_v1 = {
40611febfcaSRobin Murphy .tlb_flush_all = arm_smmu_tlb_inv_context_s2,
407fefe8527SRobin Murphy .tlb_flush_walk = arm_smmu_tlb_inv_walk_s2_v1,
4083f3b8d0cSRobin Murphy .tlb_add_page = arm_smmu_tlb_add_page_s2_v1,
409518f7136SWill Deacon };
410518f7136SWill Deacon
411d525b0afSRob Clark
arm_smmu_read_context_fault_info(struct arm_smmu_device * smmu,int idx,struct arm_smmu_context_fault_info * cfi)412d525b0afSRob Clark void arm_smmu_read_context_fault_info(struct arm_smmu_device *smmu, int idx,
413d525b0afSRob Clark struct arm_smmu_context_fault_info *cfi)
414d525b0afSRob Clark {
415d525b0afSRob Clark cfi->iova = arm_smmu_cb_readq(smmu, idx, ARM_SMMU_CB_FAR);
416d525b0afSRob Clark cfi->fsr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSR);
417d525b0afSRob Clark cfi->fsynr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSYNR0);
418d525b0afSRob Clark cfi->cbfrsynra = arm_smmu_gr1_read(smmu, ARM_SMMU_GR1_CBFRSYNRA(idx));
419d525b0afSRob Clark }
420d525b0afSRob Clark
arm_smmu_print_context_fault_info(struct arm_smmu_device * smmu,int idx,const struct arm_smmu_context_fault_info * cfi)421d525b0afSRob Clark void arm_smmu_print_context_fault_info(struct arm_smmu_device *smmu, int idx,
422d525b0afSRob Clark const struct arm_smmu_context_fault_info *cfi)
423d525b0afSRob Clark {
42498db56e4SRob Clark dev_err(smmu->dev,
425d525b0afSRob Clark "Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cbfrsynra=0x%x, cb=%d\n",
426d525b0afSRob Clark cfi->fsr, cfi->iova, cfi->fsynr, cfi->cbfrsynra, idx);
427d525b0afSRob Clark
428d525b0afSRob Clark dev_err(smmu->dev, "FSR = %08x [%s%sFormat=%u%s%s%s%s%s%s%s%s], SID=0x%x\n",
429d525b0afSRob Clark cfi->fsr,
430d525b0afSRob Clark (cfi->fsr & ARM_SMMU_CB_FSR_MULTI) ? "MULTI " : "",
431d525b0afSRob Clark (cfi->fsr & ARM_SMMU_CB_FSR_SS) ? "SS " : "",
432d525b0afSRob Clark (u32)FIELD_GET(ARM_SMMU_CB_FSR_FORMAT, cfi->fsr),
433d525b0afSRob Clark (cfi->fsr & ARM_SMMU_CB_FSR_UUT) ? " UUT" : "",
434d525b0afSRob Clark (cfi->fsr & ARM_SMMU_CB_FSR_ASF) ? " ASF" : "",
435d525b0afSRob Clark (cfi->fsr & ARM_SMMU_CB_FSR_TLBLKF) ? " TLBLKF" : "",
436d525b0afSRob Clark (cfi->fsr & ARM_SMMU_CB_FSR_TLBMCF) ? " TLBMCF" : "",
437d525b0afSRob Clark (cfi->fsr & ARM_SMMU_CB_FSR_EF) ? " EF" : "",
438d525b0afSRob Clark (cfi->fsr & ARM_SMMU_CB_FSR_PF) ? " PF" : "",
439d525b0afSRob Clark (cfi->fsr & ARM_SMMU_CB_FSR_AFF) ? " AFF" : "",
440d525b0afSRob Clark (cfi->fsr & ARM_SMMU_CB_FSR_TF) ? " TF" : "",
441d525b0afSRob Clark cfi->cbfrsynra);
442d525b0afSRob Clark
443d525b0afSRob Clark dev_err(smmu->dev, "FSYNR0 = %08x [S1CBNDX=%u%s%s%s%s%s%s PLVL=%u]\n",
444d525b0afSRob Clark cfi->fsynr,
445d525b0afSRob Clark (u32)FIELD_GET(ARM_SMMU_CB_FSYNR0_S1CBNDX, cfi->fsynr),
446d525b0afSRob Clark (cfi->fsynr & ARM_SMMU_CB_FSYNR0_AFR) ? " AFR" : "",
447d525b0afSRob Clark (cfi->fsynr & ARM_SMMU_CB_FSYNR0_PTWF) ? " PTWF" : "",
448d525b0afSRob Clark (cfi->fsynr & ARM_SMMU_CB_FSYNR0_NSATTR) ? " NSATTR" : "",
449d525b0afSRob Clark (cfi->fsynr & ARM_SMMU_CB_FSYNR0_IND) ? " IND" : "",
450d525b0afSRob Clark (cfi->fsynr & ARM_SMMU_CB_FSYNR0_PNU) ? " PNU" : "",
451d525b0afSRob Clark (cfi->fsynr & ARM_SMMU_CB_FSYNR0_WNR) ? " WNR" : "",
452d525b0afSRob Clark (u32)FIELD_GET(ARM_SMMU_CB_FSYNR0_PLVL, cfi->fsynr));
453d525b0afSRob Clark }
454d525b0afSRob Clark
arm_smmu_context_fault(int irq,void * dev)45545ae7cffSWill Deacon static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
45645ae7cffSWill Deacon {
457d525b0afSRob Clark struct arm_smmu_context_fault_info cfi;
458e0976331SJason Gunthorpe struct arm_smmu_domain *smmu_domain = dev;
45944680eedSWill Deacon struct arm_smmu_device *smmu = smmu_domain->smmu;
460d525b0afSRob Clark static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
461d525b0afSRob Clark DEFAULT_RATELIMIT_BURST);
46219713fd4SRobin Murphy int idx = smmu_domain->cfg.cbndx;
463f8f934c1SJordan Crouse int ret;
46445ae7cffSWill Deacon
465d525b0afSRob Clark arm_smmu_read_context_fault_info(smmu, idx, &cfi);
466d525b0afSRob Clark
467d525b0afSRob Clark if (!(cfi.fsr & ARM_SMMU_CB_FSR_FAULT))
46845ae7cffSWill Deacon return IRQ_NONE;
46945ae7cffSWill Deacon
470d525b0afSRob Clark ret = report_iommu_fault(&smmu_domain->domain, NULL, cfi.iova,
471d525b0afSRob Clark cfi.fsynr & ARM_SMMU_CB_FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ);
4723714ce1dSWill Deacon
473d525b0afSRob Clark if (ret == -ENOSYS && __ratelimit(&rs))
474d525b0afSRob Clark arm_smmu_print_context_fault_info(smmu, idx, &cfi);
475f8f934c1SJordan Crouse
476d525b0afSRob Clark arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_FSR, cfi.fsr);
477*3053a2c5SConnor Abbott
478*3053a2c5SConnor Abbott if (cfi.fsr & ARM_SMMU_CB_FSR_SS) {
479*3053a2c5SConnor Abbott arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_RESUME,
480*3053a2c5SConnor Abbott ret == -EAGAIN ? 0 : ARM_SMMU_RESUME_TERMINATE);
481*3053a2c5SConnor Abbott }
482*3053a2c5SConnor Abbott
4833714ce1dSWill Deacon return IRQ_HANDLED;
48445ae7cffSWill Deacon }
48545ae7cffSWill Deacon
arm_smmu_global_fault(int irq,void * dev)48645ae7cffSWill Deacon static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
48745ae7cffSWill Deacon {
48845ae7cffSWill Deacon u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
48945ae7cffSWill Deacon struct arm_smmu_device *smmu = dev;
490931a0ba6SRobin Murphy static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
491931a0ba6SRobin Murphy DEFAULT_RATELIMIT_BURST);
49245ae7cffSWill Deacon
49300320ce6SRobin Murphy gfsr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSR);
49400320ce6SRobin Murphy gfsynr0 = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSYNR0);
49500320ce6SRobin Murphy gfsynr1 = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSYNR1);
49600320ce6SRobin Murphy gfsynr2 = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSYNR2);
49745ae7cffSWill Deacon
4983a5df8ffSAndreas Herrmann if (!gfsr)
4993a5df8ffSAndreas Herrmann return IRQ_NONE;
5003a5df8ffSAndreas Herrmann
501931a0ba6SRobin Murphy if (__ratelimit(&rs)) {
502931a0ba6SRobin Murphy if (IS_ENABLED(CONFIG_ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT) &&
503fba6e960SWill Deacon (gfsr & ARM_SMMU_sGFSR_USF))
504931a0ba6SRobin Murphy dev_err(smmu->dev,
505931a0ba6SRobin Murphy "Blocked unknown Stream ID 0x%hx; boot with \"arm-smmu.disable_bypass=0\" to allow, but this may have security implications\n",
506931a0ba6SRobin Murphy (u16)gfsynr1);
507931a0ba6SRobin Murphy else
508931a0ba6SRobin Murphy dev_err(smmu->dev,
50945ae7cffSWill Deacon "Unexpected global fault, this could be serious\n");
510931a0ba6SRobin Murphy dev_err(smmu->dev,
51145ae7cffSWill Deacon "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
51245ae7cffSWill Deacon gfsr, gfsynr0, gfsynr1, gfsynr2);
513931a0ba6SRobin Murphy }
51445ae7cffSWill Deacon
51500320ce6SRobin Murphy arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sGFSR, gfsr);
516adaba320SWill Deacon return IRQ_HANDLED;
51745ae7cffSWill Deacon }
51845ae7cffSWill Deacon
arm_smmu_init_context_bank(struct arm_smmu_domain * smmu_domain,struct io_pgtable_cfg * pgtbl_cfg)519518f7136SWill Deacon static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
520518f7136SWill Deacon struct io_pgtable_cfg *pgtbl_cfg)
52145ae7cffSWill Deacon {
52244680eedSWill Deacon struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
52390df373cSRobin Murphy struct arm_smmu_cb *cb = &smmu_domain->smmu->cbs[cfg->cbndx];
52490df373cSRobin Murphy bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
52590df373cSRobin Murphy
52690df373cSRobin Murphy cb->cfg = cfg;
52790df373cSRobin Murphy
528620565a7SRobin Murphy /* TCR */
52990df373cSRobin Murphy if (stage1) {
53090df373cSRobin Murphy if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
53190df373cSRobin Murphy cb->tcr[0] = pgtbl_cfg->arm_v7s_cfg.tcr;
53290df373cSRobin Murphy } else {
533fb485eb1SRobin Murphy cb->tcr[0] = arm_smmu_lpae_tcr(pgtbl_cfg);
534fb485eb1SRobin Murphy cb->tcr[1] = arm_smmu_lpae_tcr2(pgtbl_cfg);
53590df373cSRobin Murphy if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
536fba6e960SWill Deacon cb->tcr[1] |= ARM_SMMU_TCR2_AS;
537fb485eb1SRobin Murphy else
538fba6e960SWill Deacon cb->tcr[0] |= ARM_SMMU_TCR_EAE;
53990df373cSRobin Murphy }
54090df373cSRobin Murphy } else {
541ac4b80e5SWill Deacon cb->tcr[0] = arm_smmu_lpae_vtcr(pgtbl_cfg);
54290df373cSRobin Murphy }
54390df373cSRobin Murphy
54490df373cSRobin Murphy /* TTBRs */
54590df373cSRobin Murphy if (stage1) {
54690df373cSRobin Murphy if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
547d1e5f26fSRobin Murphy cb->ttbr[0] = pgtbl_cfg->arm_v7s_cfg.ttbr;
548d1e5f26fSRobin Murphy cb->ttbr[1] = 0;
54990df373cSRobin Murphy } else {
55067f1a7a3SJordan Crouse cb->ttbr[0] = FIELD_PREP(ARM_SMMU_TTBRn_ASID,
551fba6e960SWill Deacon cfg->asid);
552fba6e960SWill Deacon cb->ttbr[1] = FIELD_PREP(ARM_SMMU_TTBRn_ASID,
553fba6e960SWill Deacon cfg->asid);
55467f1a7a3SJordan Crouse
55567f1a7a3SJordan Crouse if (pgtbl_cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1)
55667f1a7a3SJordan Crouse cb->ttbr[1] |= pgtbl_cfg->arm_lpae_s1_cfg.ttbr;
55767f1a7a3SJordan Crouse else
55867f1a7a3SJordan Crouse cb->ttbr[0] |= pgtbl_cfg->arm_lpae_s1_cfg.ttbr;
55990df373cSRobin Murphy }
56090df373cSRobin Murphy } else {
56190df373cSRobin Murphy cb->ttbr[0] = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
56290df373cSRobin Murphy }
56390df373cSRobin Murphy
56490df373cSRobin Murphy /* MAIRs (stage-1 only) */
56590df373cSRobin Murphy if (stage1) {
56690df373cSRobin Murphy if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
56790df373cSRobin Murphy cb->mair[0] = pgtbl_cfg->arm_v7s_cfg.prrr;
56890df373cSRobin Murphy cb->mair[1] = pgtbl_cfg->arm_v7s_cfg.nmrr;
56990df373cSRobin Murphy } else {
570205577abSRobin Murphy cb->mair[0] = pgtbl_cfg->arm_lpae_s1_cfg.mair;
571205577abSRobin Murphy cb->mair[1] = pgtbl_cfg->arm_lpae_s1_cfg.mair >> 32;
57290df373cSRobin Murphy }
57390df373cSRobin Murphy }
57490df373cSRobin Murphy }
57590df373cSRobin Murphy
arm_smmu_write_context_bank(struct arm_smmu_device * smmu,int idx)576556db53aSJordan Crouse void arm_smmu_write_context_bank(struct arm_smmu_device *smmu, int idx)
57790df373cSRobin Murphy {
57890df373cSRobin Murphy u32 reg;
57990df373cSRobin Murphy bool stage1;
58090df373cSRobin Murphy struct arm_smmu_cb *cb = &smmu->cbs[idx];
58190df373cSRobin Murphy struct arm_smmu_cfg *cfg = cb->cfg;
58290df373cSRobin Murphy
58390df373cSRobin Murphy /* Unassigned context banks only need disabling */
58490df373cSRobin Murphy if (!cfg) {
58519713fd4SRobin Murphy arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, 0);
58690df373cSRobin Murphy return;
58790df373cSRobin Murphy }
58890df373cSRobin Murphy
58944680eedSWill Deacon stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
59045ae7cffSWill Deacon
59190df373cSRobin Murphy /* CBA2R */
5924a1c93cbSWill Deacon if (smmu->version > ARM_SMMU_V1) {
5937602b871SRobin Murphy if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
594fba6e960SWill Deacon reg = ARM_SMMU_CBA2R_VA64;
5957602b871SRobin Murphy else
5965114e96cSRobin Murphy reg = 0;
5974e3e9b69STirumalesh Chalamarla /* 16-bit VMIDs live in CBA2R */
5984e3e9b69STirumalesh Chalamarla if (smmu->features & ARM_SMMU_FEAT_VMID16)
599fba6e960SWill Deacon reg |= FIELD_PREP(ARM_SMMU_CBA2R_VMID16, cfg->vmid);
6004e3e9b69STirumalesh Chalamarla
601aadbf214SRobin Murphy arm_smmu_gr1_write(smmu, ARM_SMMU_GR1_CBA2R(idx), reg);
6024a1c93cbSWill Deacon }
6034a1c93cbSWill Deacon
60445ae7cffSWill Deacon /* CBAR */
605fba6e960SWill Deacon reg = FIELD_PREP(ARM_SMMU_CBAR_TYPE, cfg->cbar);
606b7862e35SRobin Murphy if (smmu->version < ARM_SMMU_V2)
607fba6e960SWill Deacon reg |= FIELD_PREP(ARM_SMMU_CBAR_IRPTNDX, cfg->irptndx);
60845ae7cffSWill Deacon
60957ca90f6SWill Deacon /*
61057ca90f6SWill Deacon * Use the weakest shareability/memory types, so they are
61157ca90f6SWill Deacon * overridden by the ttbcr/pte.
61257ca90f6SWill Deacon */
61357ca90f6SWill Deacon if (stage1) {
614fba6e960SWill Deacon reg |= FIELD_PREP(ARM_SMMU_CBAR_S1_BPSHCFG,
615fba6e960SWill Deacon ARM_SMMU_CBAR_S1_BPSHCFG_NSH) |
616fba6e960SWill Deacon FIELD_PREP(ARM_SMMU_CBAR_S1_MEMATTR,
617fba6e960SWill Deacon ARM_SMMU_CBAR_S1_MEMATTR_WB);
6184e3e9b69STirumalesh Chalamarla } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
6194e3e9b69STirumalesh Chalamarla /* 8-bit VMIDs live in CBAR */
620fba6e960SWill Deacon reg |= FIELD_PREP(ARM_SMMU_CBAR_VMID, cfg->vmid);
62157ca90f6SWill Deacon }
622aadbf214SRobin Murphy arm_smmu_gr1_write(smmu, ARM_SMMU_GR1_CBAR(idx), reg);
62345ae7cffSWill Deacon
624125458abSSunil Goutham /*
625620565a7SRobin Murphy * TCR
626125458abSSunil Goutham * We must write this before the TTBRs, since it determines the
627125458abSSunil Goutham * access behaviour of some fields (in particular, ASID[15:8]).
628125458abSSunil Goutham */
62990df373cSRobin Murphy if (stage1 && smmu->version > ARM_SMMU_V1)
63019713fd4SRobin Murphy arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TCR2, cb->tcr[1]);
63119713fd4SRobin Murphy arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TCR, cb->tcr[0]);
63245ae7cffSWill Deacon
63345ae7cffSWill Deacon /* TTBRs */
63445ae7cffSWill Deacon if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
63519713fd4SRobin Murphy arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_CONTEXTIDR, cfg->asid);
63619713fd4SRobin Murphy arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TTBR0, cb->ttbr[0]);
63719713fd4SRobin Murphy arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TTBR1, cb->ttbr[1]);
63845ae7cffSWill Deacon } else {
63919713fd4SRobin Murphy arm_smmu_cb_writeq(smmu, idx, ARM_SMMU_CB_TTBR0, cb->ttbr[0]);
64090df373cSRobin Murphy if (stage1)
64119713fd4SRobin Murphy arm_smmu_cb_writeq(smmu, idx, ARM_SMMU_CB_TTBR1,
64219713fd4SRobin Murphy cb->ttbr[1]);
64345ae7cffSWill Deacon }
64445ae7cffSWill Deacon
645518f7136SWill Deacon /* MAIRs (stage-1 only) */
64645ae7cffSWill Deacon if (stage1) {
64719713fd4SRobin Murphy arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_S1_MAIR0, cb->mair[0]);
64819713fd4SRobin Murphy arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_S1_MAIR1, cb->mair[1]);
64945ae7cffSWill Deacon }
65045ae7cffSWill Deacon
65145ae7cffSWill Deacon /* SCTLR */
652fba6e960SWill Deacon reg = ARM_SMMU_SCTLR_CFIE | ARM_SMMU_SCTLR_CFRE | ARM_SMMU_SCTLR_AFE |
653fba6e960SWill Deacon ARM_SMMU_SCTLR_TRE | ARM_SMMU_SCTLR_M;
65445ae7cffSWill Deacon if (stage1)
655fba6e960SWill Deacon reg |= ARM_SMMU_SCTLR_S1_ASIDPNE;
65690df373cSRobin Murphy if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
657fba6e960SWill Deacon reg |= ARM_SMMU_SCTLR_E;
65890df373cSRobin Murphy
659bffb2eafSRob Clark if (smmu->impl && smmu->impl->write_sctlr)
660bffb2eafSRob Clark smmu->impl->write_sctlr(smmu, idx, reg);
661bffb2eafSRob Clark else
66219713fd4SRobin Murphy arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, reg);
66345ae7cffSWill Deacon }
66445ae7cffSWill Deacon
arm_smmu_alloc_context_bank(struct arm_smmu_domain * smmu_domain,struct arm_smmu_device * smmu,struct device * dev,unsigned int start)665556db53aSJordan Crouse static int arm_smmu_alloc_context_bank(struct arm_smmu_domain *smmu_domain,
666556db53aSJordan Crouse struct arm_smmu_device *smmu,
667556db53aSJordan Crouse struct device *dev, unsigned int start)
668556db53aSJordan Crouse {
669556db53aSJordan Crouse if (smmu->impl && smmu->impl->alloc_context_bank)
670556db53aSJordan Crouse return smmu->impl->alloc_context_bank(smmu_domain, smmu, dev, start);
671556db53aSJordan Crouse
672556db53aSJordan Crouse return __arm_smmu_alloc_bitmap(smmu->context_map, start, smmu->num_context_banks);
673556db53aSJordan Crouse }
674556db53aSJordan Crouse
arm_smmu_init_domain_context(struct arm_smmu_domain * smmu_domain,struct arm_smmu_device * smmu,struct device * dev)675e0976331SJason Gunthorpe static int arm_smmu_init_domain_context(struct arm_smmu_domain *smmu_domain,
676556db53aSJordan Crouse struct arm_smmu_device *smmu,
677556db53aSJordan Crouse struct device *dev)
67845ae7cffSWill Deacon {
679a18037b2SMitchel Humpherys int irq, start, ret = 0;
680518f7136SWill Deacon unsigned long ias, oas;
681518f7136SWill Deacon struct io_pgtable_ops *pgtbl_ops;
682518f7136SWill Deacon struct io_pgtable_cfg pgtbl_cfg;
683518f7136SWill Deacon enum io_pgtable_fmt fmt;
684e0976331SJason Gunthorpe struct iommu_domain *domain = &smmu_domain->domain;
68544680eedSWill Deacon struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
686aa7ec732SKrishna Reddy irqreturn_t (*context_fault)(int irq, void *dev);
68745ae7cffSWill Deacon
688518f7136SWill Deacon mutex_lock(&smmu_domain->init_mutex);
689a18037b2SMitchel Humpherys if (smmu_domain->smmu)
690a18037b2SMitchel Humpherys goto out_unlock;
691a18037b2SMitchel Humpherys
692c752ce45SWill Deacon /*
693c752ce45SWill Deacon * Mapping the requested stage onto what we support is surprisingly
694c752ce45SWill Deacon * complicated, mainly because the spec allows S1+S2 SMMUs without
695c752ce45SWill Deacon * support for nested translation. That means we end up with the
696c752ce45SWill Deacon * following table:
697c752ce45SWill Deacon *
698c752ce45SWill Deacon * Requested Supported Actual
699c752ce45SWill Deacon * S1 N S1
700c752ce45SWill Deacon * S1 S1+S2 S1
701c752ce45SWill Deacon * S1 S2 S2
702c752ce45SWill Deacon * S1 S1 S1
703c752ce45SWill Deacon * N N N
704c752ce45SWill Deacon * N S1+S2 S2
705c752ce45SWill Deacon * N S2 S2
706c752ce45SWill Deacon * N S1 S1
707c752ce45SWill Deacon *
708c752ce45SWill Deacon * Note that you can't actually request stage-2 mappings.
709c752ce45SWill Deacon */
710c752ce45SWill Deacon if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
711c752ce45SWill Deacon smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
712c752ce45SWill Deacon if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
713c752ce45SWill Deacon smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
714c752ce45SWill Deacon
7157602b871SRobin Murphy /*
7167602b871SRobin Murphy * Choosing a suitable context format is even more fiddly. Until we
7177602b871SRobin Murphy * grow some way for the caller to express a preference, and/or move
7187602b871SRobin Murphy * the decision into the io-pgtable code where it arguably belongs,
7197602b871SRobin Murphy * just aim for the closest thing to the rest of the system, and hope
7207602b871SRobin Murphy * that the hardware isn't esoteric enough that we can't assume AArch64
7217602b871SRobin Murphy * support to be a superset of AArch32 support...
7227602b871SRobin Murphy */
7237602b871SRobin Murphy if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
7247602b871SRobin Murphy cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
7256070529bSRobin Murphy if (IS_ENABLED(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) &&
7266070529bSRobin Murphy !IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_ARM_LPAE) &&
7276070529bSRobin Murphy (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S) &&
7286070529bSRobin Murphy (smmu_domain->stage == ARM_SMMU_DOMAIN_S1))
7296070529bSRobin Murphy cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_S;
7307602b871SRobin Murphy if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
7317602b871SRobin Murphy (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
7327602b871SRobin Murphy ARM_SMMU_FEAT_FMT_AARCH64_16K |
7337602b871SRobin Murphy ARM_SMMU_FEAT_FMT_AARCH64_4K)))
7347602b871SRobin Murphy cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
7357602b871SRobin Murphy
7367602b871SRobin Murphy if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
7377602b871SRobin Murphy ret = -EINVAL;
7387602b871SRobin Murphy goto out_unlock;
7397602b871SRobin Murphy }
7407602b871SRobin Murphy
741c752ce45SWill Deacon switch (smmu_domain->stage) {
742c752ce45SWill Deacon case ARM_SMMU_DOMAIN_S1:
743c752ce45SWill Deacon cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
744c752ce45SWill Deacon start = smmu->num_s2_context_banks;
745518f7136SWill Deacon ias = smmu->va_size;
746518f7136SWill Deacon oas = smmu->ipa_size;
7477602b871SRobin Murphy if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
748518f7136SWill Deacon fmt = ARM_64_LPAE_S1;
7496070529bSRobin Murphy } else if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_L) {
750518f7136SWill Deacon fmt = ARM_32_LPAE_S1;
7517602b871SRobin Murphy ias = min(ias, 32UL);
7527602b871SRobin Murphy oas = min(oas, 40UL);
7536070529bSRobin Murphy } else {
7546070529bSRobin Murphy fmt = ARM_V7S;
7556070529bSRobin Murphy ias = min(ias, 32UL);
7566070529bSRobin Murphy oas = min(oas, 32UL);
7577602b871SRobin Murphy }
758abfd6fe0SWill Deacon smmu_domain->flush_ops = &arm_smmu_s1_tlb_ops;
759c752ce45SWill Deacon break;
760c752ce45SWill Deacon case ARM_SMMU_DOMAIN_NESTED:
76145ae7cffSWill Deacon /*
76245ae7cffSWill Deacon * We will likely want to change this if/when KVM gets
76345ae7cffSWill Deacon * involved.
76445ae7cffSWill Deacon */
765c752ce45SWill Deacon case ARM_SMMU_DOMAIN_S2:
7669c5c92e3SWill Deacon cfg->cbar = CBAR_TYPE_S2_TRANS;
7679c5c92e3SWill Deacon start = 0;
768518f7136SWill Deacon ias = smmu->ipa_size;
769518f7136SWill Deacon oas = smmu->pa_size;
7707602b871SRobin Murphy if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
771518f7136SWill Deacon fmt = ARM_64_LPAE_S2;
7727602b871SRobin Murphy } else {
773518f7136SWill Deacon fmt = ARM_32_LPAE_S2;
7747602b871SRobin Murphy ias = min(ias, 40UL);
7757602b871SRobin Murphy oas = min(oas, 40UL);
7767602b871SRobin Murphy }
77711febfcaSRobin Murphy if (smmu->version == ARM_SMMU_V2)
778abfd6fe0SWill Deacon smmu_domain->flush_ops = &arm_smmu_s2_tlb_ops_v2;
77911febfcaSRobin Murphy else
780abfd6fe0SWill Deacon smmu_domain->flush_ops = &arm_smmu_s2_tlb_ops_v1;
781c752ce45SWill Deacon break;
782c752ce45SWill Deacon default:
783c752ce45SWill Deacon ret = -EINVAL;
784c752ce45SWill Deacon goto out_unlock;
78545ae7cffSWill Deacon }
786556db53aSJordan Crouse
787556db53aSJordan Crouse ret = arm_smmu_alloc_context_bank(smmu_domain, smmu, dev, start);
788556db53aSJordan Crouse if (ret < 0) {
789a18037b2SMitchel Humpherys goto out_unlock;
790556db53aSJordan Crouse }
791556db53aSJordan Crouse
792556db53aSJordan Crouse smmu_domain->smmu = smmu;
79345ae7cffSWill Deacon
79444680eedSWill Deacon cfg->cbndx = ret;
795b7862e35SRobin Murphy if (smmu->version < ARM_SMMU_V2) {
79644680eedSWill Deacon cfg->irptndx = atomic_inc_return(&smmu->irptndx);
79744680eedSWill Deacon cfg->irptndx %= smmu->num_context_irqs;
79845ae7cffSWill Deacon } else {
79944680eedSWill Deacon cfg->irptndx = cfg->cbndx;
80045ae7cffSWill Deacon }
80145ae7cffSWill Deacon
802280b683cSRobin Murphy if (smmu_domain->stage == ARM_SMMU_DOMAIN_S2)
803ba7e4a08SRobin Murphy cfg->vmid = cfg->cbndx + 1;
804280b683cSRobin Murphy else
805ba7e4a08SRobin Murphy cfg->asid = cfg->cbndx;
806ba7e4a08SRobin Murphy
807518f7136SWill Deacon pgtbl_cfg = (struct io_pgtable_cfg) {
808d5466357SRobin Murphy .pgsize_bitmap = smmu->pgsize_bitmap,
809518f7136SWill Deacon .ias = ias,
810518f7136SWill Deacon .oas = oas,
8114f41845bSWill Deacon .coherent_walk = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK,
812696bcfb7SRobin Murphy .tlb = smmu_domain->flush_ops,
8132df7a25cSRobin Murphy .iommu_dev = smmu->dev,
814518f7136SWill Deacon };
815a18037b2SMitchel Humpherys
816dd147a89SJordan Crouse if (smmu->impl && smmu->impl->init_context) {
817556db53aSJordan Crouse ret = smmu->impl->init_context(smmu_domain, &pgtbl_cfg, dev);
818dd147a89SJordan Crouse if (ret)
819dd147a89SJordan Crouse goto out_clear_smmu;
820dd147a89SJordan Crouse }
821dd147a89SJordan Crouse
8224fc52b81SChristoph Hellwig if (smmu_domain->pgtbl_quirks)
8234fc52b81SChristoph Hellwig pgtbl_cfg.quirks |= smmu_domain->pgtbl_quirks;
824c99110a8SSai Prakash Ranjan
825518f7136SWill Deacon pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
826518f7136SWill Deacon if (!pgtbl_ops) {
827518f7136SWill Deacon ret = -ENOMEM;
828518f7136SWill Deacon goto out_clear_smmu;
829518f7136SWill Deacon }
830518f7136SWill Deacon
831d5466357SRobin Murphy /* Update the domain's page sizes to reflect the page table format */
832d5466357SRobin Murphy domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
83367f1a7a3SJordan Crouse
83467f1a7a3SJordan Crouse if (pgtbl_cfg.quirks & IO_PGTABLE_QUIRK_ARM_TTBR1) {
83567f1a7a3SJordan Crouse domain->geometry.aperture_start = ~0UL << ias;
83667f1a7a3SJordan Crouse domain->geometry.aperture_end = ~0UL;
83767f1a7a3SJordan Crouse } else {
838455eb7d3SRobin Murphy domain->geometry.aperture_end = (1UL << ias) - 1;
83967f1a7a3SJordan Crouse }
84067f1a7a3SJordan Crouse
841455eb7d3SRobin Murphy domain->geometry.force_aperture = true;
842518f7136SWill Deacon
843518f7136SWill Deacon /* Initialise the context bank with our page table cfg */
844518f7136SWill Deacon arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg);
84590df373cSRobin Murphy arm_smmu_write_context_bank(smmu, cfg->cbndx);
846518f7136SWill Deacon
847518f7136SWill Deacon /*
848518f7136SWill Deacon * Request context fault interrupt. Do this last to avoid the
849518f7136SWill Deacon * handler seeing a half-initialised domain state.
850518f7136SWill Deacon */
85197dfad19SRobin Murphy irq = smmu->irqs[cfg->irptndx];
852aa7ec732SKrishna Reddy
853aa7ec732SKrishna Reddy if (smmu->impl && smmu->impl->context_fault)
854aa7ec732SKrishna Reddy context_fault = smmu->impl->context_fault;
855aa7ec732SKrishna Reddy else
856aa7ec732SKrishna Reddy context_fault = arm_smmu_context_fault;
857aa7ec732SKrishna Reddy
858960be6e1SGeorgi Djakov if (smmu->impl && smmu->impl->context_fault_needs_threaded_irq)
859960be6e1SGeorgi Djakov ret = devm_request_threaded_irq(smmu->dev, irq, NULL,
860960be6e1SGeorgi Djakov context_fault,
861960be6e1SGeorgi Djakov IRQF_ONESHOT | IRQF_SHARED,
862960be6e1SGeorgi Djakov "arm-smmu-context-fault",
863960be6e1SGeorgi Djakov smmu_domain);
864960be6e1SGeorgi Djakov else
865e0976331SJason Gunthorpe ret = devm_request_irq(smmu->dev, irq, context_fault, IRQF_SHARED,
866e0976331SJason Gunthorpe "arm-smmu-context-fault", smmu_domain);
867960be6e1SGeorgi Djakov
868287980e4SArnd Bergmann if (ret < 0) {
86945ae7cffSWill Deacon dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
87044680eedSWill Deacon cfg->irptndx, irq);
871fba6e960SWill Deacon cfg->irptndx = ARM_SMMU_INVALID_IRPTNDX;
87245ae7cffSWill Deacon }
87345ae7cffSWill Deacon
874518f7136SWill Deacon mutex_unlock(&smmu_domain->init_mutex);
875518f7136SWill Deacon
876518f7136SWill Deacon /* Publish page table ops for map/unmap */
877518f7136SWill Deacon smmu_domain->pgtbl_ops = pgtbl_ops;
878a9a1b0b5SWill Deacon return 0;
87945ae7cffSWill Deacon
880518f7136SWill Deacon out_clear_smmu:
8816db7bfb4SLiu Xiang __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
882518f7136SWill Deacon smmu_domain->smmu = NULL;
883a18037b2SMitchel Humpherys out_unlock:
884518f7136SWill Deacon mutex_unlock(&smmu_domain->init_mutex);
88545ae7cffSWill Deacon return ret;
88645ae7cffSWill Deacon }
88745ae7cffSWill Deacon
arm_smmu_destroy_domain_context(struct arm_smmu_domain * smmu_domain)888e0976331SJason Gunthorpe static void arm_smmu_destroy_domain_context(struct arm_smmu_domain *smmu_domain)
88945ae7cffSWill Deacon {
89044680eedSWill Deacon struct arm_smmu_device *smmu = smmu_domain->smmu;
89144680eedSWill Deacon struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
892d4a44f07SSricharan R int ret, irq;
89345ae7cffSWill Deacon
89422bb7b41SJason Gunthorpe if (!smmu)
89545ae7cffSWill Deacon return;
89645ae7cffSWill Deacon
897d4a44f07SSricharan R ret = arm_smmu_rpm_get(smmu);
898d4a44f07SSricharan R if (ret < 0)
899d4a44f07SSricharan R return;
900d4a44f07SSricharan R
901518f7136SWill Deacon /*
902518f7136SWill Deacon * Disable the context bank and free the page tables before freeing
903518f7136SWill Deacon * it.
904518f7136SWill Deacon */
90590df373cSRobin Murphy smmu->cbs[cfg->cbndx].cfg = NULL;
90690df373cSRobin Murphy arm_smmu_write_context_bank(smmu, cfg->cbndx);
9071463fe44SWill Deacon
908fba6e960SWill Deacon if (cfg->irptndx != ARM_SMMU_INVALID_IRPTNDX) {
90997dfad19SRobin Murphy irq = smmu->irqs[cfg->irptndx];
910e0976331SJason Gunthorpe devm_free_irq(smmu->dev, irq, smmu_domain);
91145ae7cffSWill Deacon }
91245ae7cffSWill Deacon
913518f7136SWill Deacon free_io_pgtable_ops(smmu_domain->pgtbl_ops);
91444680eedSWill Deacon __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
915d4a44f07SSricharan R
916d4a44f07SSricharan R arm_smmu_rpm_put(smmu);
91745ae7cffSWill Deacon }
91845ae7cffSWill Deacon
arm_smmu_domain_alloc_paging(struct device * dev)919d75d7dc2SJason Gunthorpe static struct iommu_domain *arm_smmu_domain_alloc_paging(struct device *dev)
92045ae7cffSWill Deacon {
92145ae7cffSWill Deacon struct arm_smmu_domain *smmu_domain;
92245ae7cffSWill Deacon struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev);
92345ae7cffSWill Deacon struct arm_smmu_device *smmu = cfg->smmu;
92445ae7cffSWill Deacon
92545ae7cffSWill Deacon /*
92645ae7cffSWill Deacon * Allocate the domain and initialise some of its data structures.
92745ae7cffSWill Deacon * We can't really do anything meaningful until we've added a
92845ae7cffSWill Deacon * master.
92945ae7cffSWill Deacon */
9301d672638SJoerg Roedel smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
93145ae7cffSWill Deacon if (!smmu_domain)
932518f7136SWill Deacon return NULL;
933523d7423SRobin Murphy
9341d672638SJoerg Roedel mutex_init(&smmu_domain->init_mutex);
9351d672638SJoerg Roedel spin_lock_init(&smmu_domain->cb_lock);
93645ae7cffSWill Deacon smmu_domain->domain.pgsize_bitmap = smmu->pgsize_bitmap;
93745ae7cffSWill Deacon
9381d672638SJoerg Roedel return &smmu_domain->domain;
93945ae7cffSWill Deacon }
9401d672638SJoerg Roedel
arm_smmu_domain_free(struct iommu_domain * domain)9411463fe44SWill Deacon static void arm_smmu_domain_free(struct iommu_domain *domain)
9421463fe44SWill Deacon {
9431463fe44SWill Deacon struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
9441463fe44SWill Deacon
9451463fe44SWill Deacon /*
946e0976331SJason Gunthorpe * Free the domain resources. We assume that all devices have
94745ae7cffSWill Deacon * already been detached.
94845ae7cffSWill Deacon */
94945ae7cffSWill Deacon arm_smmu_destroy_domain_context(smmu_domain);
9501f3d5ca4SRobin Murphy kfree(smmu_domain);
9511f3d5ca4SRobin Murphy }
9521f3d5ca4SRobin Murphy
arm_smmu_write_smr(struct arm_smmu_device * smmu,int idx)953fba6e960SWill Deacon static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx)
954fba6e960SWill Deacon {
9551f3d5ca4SRobin Murphy struct arm_smmu_smr *smr = smmu->smrs + idx;
956dc0eaa4eSAleksey Makarov u32 reg = FIELD_PREP(ARM_SMMU_SMR_ID, smr->id) |
957fba6e960SWill Deacon FIELD_PREP(ARM_SMMU_SMR_MASK, smr->mask);
95800320ce6SRobin Murphy
9591f3d5ca4SRobin Murphy if (!(smmu->features & ARM_SMMU_FEAT_EXIDS) && smr->valid)
9601f3d5ca4SRobin Murphy reg |= ARM_SMMU_SMR_VALID;
9618e8b203eSRobin Murphy arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(idx), reg);
9628e8b203eSRobin Murphy }
9638e8b203eSRobin Murphy
arm_smmu_write_s2cr(struct arm_smmu_device * smmu,int idx)96456b75b51SBjorn Andersson static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
96556b75b51SBjorn Andersson {
96656b75b51SBjorn Andersson struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx;
96756b75b51SBjorn Andersson u32 reg;
96856b75b51SBjorn Andersson
96956b75b51SBjorn Andersson if (smmu->impl && smmu->impl->write_s2cr) {
97056b75b51SBjorn Andersson smmu->impl->write_s2cr(smmu, idx);
97156b75b51SBjorn Andersson return;
972fba6e960SWill Deacon }
973fba6e960SWill Deacon
9748e8b203eSRobin Murphy reg = FIELD_PREP(ARM_SMMU_S2CR_TYPE, s2cr->type) |
975dc0eaa4eSAleksey Makarov FIELD_PREP(ARM_SMMU_S2CR_CBNDX, s2cr->cbndx) |
976dc0eaa4eSAleksey Makarov FIELD_PREP(ARM_SMMU_S2CR_PRIVCFG, s2cr->privcfg);
977fba6e960SWill Deacon
97800320ce6SRobin Murphy if (smmu->features & ARM_SMMU_FEAT_EXIDS && smmu->smrs &&
9798e8b203eSRobin Murphy smmu->smrs[idx].valid)
9808e8b203eSRobin Murphy reg |= ARM_SMMU_S2CR_EXIDVALID;
9818e8b203eSRobin Murphy arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_S2CR(idx), reg);
9828e8b203eSRobin Murphy }
9838e8b203eSRobin Murphy
arm_smmu_write_sme(struct arm_smmu_device * smmu,int idx)9848e8b203eSRobin Murphy static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx)
9858e8b203eSRobin Murphy {
9868e8b203eSRobin Murphy arm_smmu_write_s2cr(smmu, idx);
9878e8b203eSRobin Murphy if (smmu->smrs)
988dc0eaa4eSAleksey Makarov arm_smmu_write_smr(smmu, idx);
989dc0eaa4eSAleksey Makarov }
990dc0eaa4eSAleksey Makarov
991dc0eaa4eSAleksey Makarov /*
992dc0eaa4eSAleksey Makarov * The width of SMR's mask field depends on sCR0_EXIDENABLE, so this function
993dc0eaa4eSAleksey Makarov * should be called after sCR0 is written.
994dc0eaa4eSAleksey Makarov */
arm_smmu_test_smr_masks(struct arm_smmu_device * smmu)99579f7a5cbSRobin Murphy static void arm_smmu_test_smr_masks(struct arm_smmu_device *smmu)
996dc0eaa4eSAleksey Makarov {
997dc0eaa4eSAleksey Makarov u32 smr;
998dc0eaa4eSAleksey Makarov int i;
99979f7a5cbSRobin Murphy
100079f7a5cbSRobin Murphy if (!smmu->smrs)
100179f7a5cbSRobin Murphy return;
100279f7a5cbSRobin Murphy /*
100379f7a5cbSRobin Murphy * If we've had to accommodate firmware memory regions, we may
100479f7a5cbSRobin Murphy * have live SMRs by now; tread carefully...
100579f7a5cbSRobin Murphy *
100679f7a5cbSRobin Murphy * Somewhat perversely, not having a free SMR for this test implies we
100779f7a5cbSRobin Murphy * can get away without it anyway, as we'll only be able to 'allocate'
100879f7a5cbSRobin Murphy * these SMRs for the ID/mask values we're already trusting to be OK.
100979f7a5cbSRobin Murphy */
101079f7a5cbSRobin Murphy for (i = 0; i < smmu->num_mapping_groups; i++)
101179f7a5cbSRobin Murphy if (!smmu->smrs[i].valid)
1012dc0eaa4eSAleksey Makarov goto smr_ok;
1013dc0eaa4eSAleksey Makarov return;
1014dc0eaa4eSAleksey Makarov smr_ok:
1015dc0eaa4eSAleksey Makarov /*
1016dc0eaa4eSAleksey Makarov * SMR.ID bits may not be preserved if the corresponding MASK
1017fba6e960SWill Deacon * bits are set, so check each one separately. We can reject
101879f7a5cbSRobin Murphy * masters later if they try to claim IDs outside these masks.
101979f7a5cbSRobin Murphy */
1020fba6e960SWill Deacon smr = FIELD_PREP(ARM_SMMU_SMR_ID, smmu->streamid_mask);
1021dc0eaa4eSAleksey Makarov arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(i), smr);
1022fba6e960SWill Deacon smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(i));
102379f7a5cbSRobin Murphy smmu->streamid_mask = FIELD_GET(ARM_SMMU_SMR_ID, smr);
102479f7a5cbSRobin Murphy
1025fba6e960SWill Deacon smr = FIELD_PREP(ARM_SMMU_SMR_MASK, smmu->streamid_mask);
1026dc0eaa4eSAleksey Makarov arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(i), smr);
1027dc0eaa4eSAleksey Makarov smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(i));
1028588888a7SRobin Murphy smmu->smr_mask_mask = FIELD_GET(ARM_SMMU_SMR_MASK, smr);
10291f3d5ca4SRobin Murphy }
10301f3d5ca4SRobin Murphy
arm_smmu_find_sme(struct arm_smmu_device * smmu,u16 id,u16 mask)1031588888a7SRobin Murphy static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask)
10321f3d5ca4SRobin Murphy {
1033588888a7SRobin Murphy struct arm_smmu_smr *smrs = smmu->smrs;
1034588888a7SRobin Murphy int i, free_idx = -ENOSPC;
1035588888a7SRobin Murphy
10361f3d5ca4SRobin Murphy /* Stream indexing is blissfully easy */
1037588888a7SRobin Murphy if (!smrs)
1038588888a7SRobin Murphy return id;
1039588888a7SRobin Murphy
1040588888a7SRobin Murphy /* Validating SMRs is... less so */
1041588888a7SRobin Murphy for (i = 0; i < smmu->num_mapping_groups; ++i) {
1042588888a7SRobin Murphy if (!smrs[i].valid) {
1043588888a7SRobin Murphy /*
1044588888a7SRobin Murphy * Note the first free entry we come across, which
1045588888a7SRobin Murphy * we'll claim in the end if nothing else matches.
10461f3d5ca4SRobin Murphy */
10471f3d5ca4SRobin Murphy if (free_idx < 0)
1048588888a7SRobin Murphy free_idx = i;
1049588888a7SRobin Murphy continue;
1050588888a7SRobin Murphy }
1051588888a7SRobin Murphy /*
1052588888a7SRobin Murphy * If the new entry is _entirely_ matched by an existing entry,
1053588888a7SRobin Murphy * then reuse that, with the guarantee that there also cannot
1054588888a7SRobin Murphy * be any subsequent conflicting entries. In normal use we'd
1055588888a7SRobin Murphy * expect simply identical entries for this case, but there's
1056588888a7SRobin Murphy * no harm in accommodating the generalisation.
1057588888a7SRobin Murphy */
1058588888a7SRobin Murphy if ((mask & smrs[i].mask) == mask &&
1059588888a7SRobin Murphy !((id ^ smrs[i].id) & ~smrs[i].mask))
1060588888a7SRobin Murphy return i;
1061588888a7SRobin Murphy /*
1062588888a7SRobin Murphy * If the new entry has any other overlap with an existing one,
1063588888a7SRobin Murphy * though, then there always exists at least one stream ID
1064588888a7SRobin Murphy * which would cause a conflict, and we can't allow that risk.
106545ae7cffSWill Deacon */
106645ae7cffSWill Deacon if (!((id ^ smrs[i].id) & ~(smrs[i].mask | mask)))
1067588888a7SRobin Murphy return -EINVAL;
1068588888a7SRobin Murphy }
1069588888a7SRobin Murphy
1070588888a7SRobin Murphy return free_idx;
1071588888a7SRobin Murphy }
1072588888a7SRobin Murphy
arm_smmu_free_sme(struct arm_smmu_device * smmu,int idx)1073588888a7SRobin Murphy static bool arm_smmu_free_sme(struct arm_smmu_device *smmu, int idx)
1074588888a7SRobin Murphy {
1075588888a7SRobin Murphy if (--smmu->s2crs[idx].count)
1076588888a7SRobin Murphy return false;
1077588888a7SRobin Murphy
1078588888a7SRobin Murphy smmu->s2crs[idx] = s2cr_init_val;
1079588888a7SRobin Murphy if (smmu->smrs)
1080588888a7SRobin Murphy smmu->smrs[idx].valid = false;
1081588888a7SRobin Murphy
1082588888a7SRobin Murphy return true;
1083588888a7SRobin Murphy }
10849b468f7dSJoerg Roedel
arm_smmu_master_alloc_smes(struct device * dev)1085c84500a3SJoerg Roedel static int arm_smmu_master_alloc_smes(struct device *dev)
1086588888a7SRobin Murphy {
1087588888a7SRobin Murphy struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
1088588888a7SRobin Murphy struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev);
1089588888a7SRobin Murphy struct arm_smmu_device *smmu = cfg->smmu;
1090588888a7SRobin Murphy struct arm_smmu_smr *smrs = smmu->smrs;
1091588888a7SRobin Murphy int i, idx, ret;
10922465170fSRobin Murphy
1093fba6e960SWill Deacon mutex_lock(&smmu->stream_map_mutex);
1094fba6e960SWill Deacon /* Figure out a viable stream map entry allocation */
1095021bb842SRobin Murphy for_each_cfg_sme(cfg, fwspec, i, idx) {
1096588888a7SRobin Murphy u16 sid = FIELD_GET(ARM_SMMU_SMR_ID, fwspec->ids[i]);
1097588888a7SRobin Murphy u16 mask = FIELD_GET(ARM_SMMU_SMR_MASK, fwspec->ids[i]);
1098588888a7SRobin Murphy
1099588888a7SRobin Murphy if (idx != INVALID_SMENDX) {
1100588888a7SRobin Murphy ret = -EEXIST;
1101021bb842SRobin Murphy goto out_err;
1102588888a7SRobin Murphy }
1103588888a7SRobin Murphy
1104588888a7SRobin Murphy ret = arm_smmu_find_sme(smmu, sid, mask);
1105588888a7SRobin Murphy if (ret < 0)
1106588888a7SRobin Murphy goto out_err;
1107021bb842SRobin Murphy
1108021bb842SRobin Murphy idx = ret;
1109588888a7SRobin Murphy if (smrs && smmu->s2crs[idx].count == 0) {
1110588888a7SRobin Murphy smrs[idx].id = sid;
1111588888a7SRobin Murphy smrs[idx].mask = mask;
1112588888a7SRobin Murphy smrs[idx].valid = true;
111345ae7cffSWill Deacon }
111445ae7cffSWill Deacon smmu->s2crs[idx].count++;
111545ae7cffSWill Deacon cfg->smendx[i] = (s16)idx;
1116cefa0d55SJoerg Roedel }
1117588888a7SRobin Murphy
111845ae7cffSWill Deacon /* It worked! Now, poke the actual hardware */
1119588888a7SRobin Murphy for_each_cfg_sme(cfg, fwspec, i, idx)
112045ae7cffSWill Deacon arm_smmu_write_sme(smmu, idx);
112145ae7cffSWill Deacon
1122588888a7SRobin Murphy mutex_unlock(&smmu->stream_map_mutex);
11231f3d5ca4SRobin Murphy return 0;
1124588888a7SRobin Murphy
11251f3d5ca4SRobin Murphy out_err:
11261f3d5ca4SRobin Murphy while (i--) {
1127588888a7SRobin Murphy arm_smmu_free_sme(smmu, cfg->smendx[i]);
1128588888a7SRobin Murphy cfg->smendx[i] = INVALID_SMENDX;
112945ae7cffSWill Deacon }
113045ae7cffSWill Deacon mutex_unlock(&smmu->stream_map_mutex);
11312465170fSRobin Murphy return ret;
11322465170fSRobin Murphy }
113345ae7cffSWill Deacon
arm_smmu_master_free_smes(struct arm_smmu_master_cfg * cfg,struct iommu_fwspec * fwspec)11342465170fSRobin Murphy static void arm_smmu_master_free_smes(struct arm_smmu_master_cfg *cfg,
1135d3097e39SRobin Murphy struct iommu_fwspec *fwspec)
113643b412beSWill Deacon {
1137588888a7SRobin Murphy struct arm_smmu_device *smmu = cfg->smmu;
11382465170fSRobin Murphy int i, idx;
1139588888a7SRobin Murphy
1140588888a7SRobin Murphy mutex_lock(&smmu->stream_map_mutex);
11411f3d5ca4SRobin Murphy for_each_cfg_sme(cfg, fwspec, i, idx) {
114245ae7cffSWill Deacon if (arm_smmu_free_sme(smmu, idx))
1143588888a7SRobin Murphy arm_smmu_write_sme(smmu, idx);
114445ae7cffSWill Deacon cfg->smendx[i] = INVALID_SMENDX;
114545ae7cffSWill Deacon }
1146ff0f8029SJason Gunthorpe mutex_unlock(&smmu->stream_map_mutex);
1147ff0f8029SJason Gunthorpe }
1148ff0f8029SJason Gunthorpe
arm_smmu_master_install_s2crs(struct arm_smmu_master_cfg * cfg,enum arm_smmu_s2cr_type type,u8 cbndx,struct iommu_fwspec * fwspec)114945ae7cffSWill Deacon static void arm_smmu_master_install_s2crs(struct arm_smmu_master_cfg *cfg,
1150ff0f8029SJason Gunthorpe enum arm_smmu_s2cr_type type,
11518e8b203eSRobin Murphy u8 cbndx, struct iommu_fwspec *fwspec)
1152588888a7SRobin Murphy {
115345ae7cffSWill Deacon struct arm_smmu_device *smmu = cfg->smmu;
11542465170fSRobin Murphy struct arm_smmu_s2cr *s2cr = smmu->s2crs;
11558e8b203eSRobin Murphy int i, idx;
1156588888a7SRobin Murphy
11571f3d5ca4SRobin Murphy for_each_cfg_sme(cfg, fwspec, i, idx) {
11588e8b203eSRobin Murphy if (type == s2cr[idx].type && cbndx == s2cr[idx].cbndx)
1159e1989807SSricharan R continue;
11608e8b203eSRobin Murphy
11618e8b203eSRobin Murphy s2cr[idx].type = type;
116243b412beSWill Deacon s2cr[idx].privcfg = S2CR_PRIVCFG_DEFAULT;
1163bc7f2ce0SWill Deacon s2cr[idx].cbndx = cbndx;
1164bc7f2ce0SWill Deacon arm_smmu_write_s2cr(smmu, idx);
116545ae7cffSWill Deacon }
116645ae7cffSWill Deacon }
11671d672638SJoerg Roedel
arm_smmu_attach_dev(struct iommu_domain * domain,struct device * dev)1168c84500a3SJoerg Roedel static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
11692465170fSRobin Murphy {
11702465170fSRobin Murphy struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1171c84500a3SJoerg Roedel struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
117245ae7cffSWill Deacon struct arm_smmu_master_cfg *cfg;
1173fba4f8e5SRobin Murphy struct arm_smmu_device *smmu;
1174fba4f8e5SRobin Murphy int ret;
1175cefa0d55SJoerg Roedel
1176fba4f8e5SRobin Murphy /*
1177fba4f8e5SRobin Murphy * FIXME: The arch/arm DMA API code tries to attach devices to its own
1178fba4f8e5SRobin Murphy * domains between of_xlate() and probe_device() - we have no way to cope
1179fba4f8e5SRobin Murphy * with that, so until ARM gets converted to rely on groups and default
1180c84500a3SJoerg Roedel * domains, just say no (but more politely than by dereferencing NULL).
11812465170fSRobin Murphy * This should be at least a WARN_ON once that's sorted.
1182fba4f8e5SRobin Murphy */
1183fba4f8e5SRobin Murphy cfg = dev_iommu_priv_get(dev);
11842465170fSRobin Murphy if (!cfg)
1185d4a44f07SSricharan R return -ENODEV;
1186d4a44f07SSricharan R
1187d4a44f07SSricharan R smmu = cfg->smmu;
1188d4a44f07SSricharan R
1189d4a44f07SSricharan R ret = arm_smmu_rpm_get(smmu);
1190518f7136SWill Deacon if (ret < 0)
1191e0976331SJason Gunthorpe return ret;
1192287980e4SArnd Bergmann
1193d4a44f07SSricharan R /* Ensure that the domain is finalised */
1194a18037b2SMitchel Humpherys ret = arm_smmu_init_domain_context(smmu_domain, smmu, dev);
1195518f7136SWill Deacon if (ret < 0)
1196518f7136SWill Deacon goto rpm_put;
1197518f7136SWill Deacon
1198518f7136SWill Deacon /*
1199adfec2e7SRobin Murphy * Sanity check the domain. We don't support domains across
1200d4a44f07SSricharan R * different SMMUs.
1201d4a44f07SSricharan R */
120245ae7cffSWill Deacon if (smmu_domain->smmu != smmu) {
120345ae7cffSWill Deacon ret = -EINVAL;
120445ae7cffSWill Deacon goto rpm_put;
120522bb7b41SJason Gunthorpe }
1206ff0f8029SJason Gunthorpe
1207d4a44f07SSricharan R /* Looks ok, so add the device to the domain */
1208d4a44f07SSricharan R arm_smmu_master_install_s2crs(cfg, S2CR_TYPE_TRANS,
1209d4a44f07SSricharan R smmu_domain->cfg.cbndx, fwspec);
121045ae7cffSWill Deacon rpm_put:
121145ae7cffSWill Deacon arm_smmu_rpm_put(smmu);
1212bbbf11eeSJason Gunthorpe return ret;
1213bbbf11eeSJason Gunthorpe }
121422bb7b41SJason Gunthorpe
arm_smmu_attach_dev_type(struct device * dev,enum arm_smmu_s2cr_type type)121522bb7b41SJason Gunthorpe static int arm_smmu_attach_dev_type(struct device *dev,
121622bb7b41SJason Gunthorpe enum arm_smmu_s2cr_type type)
121722bb7b41SJason Gunthorpe {
121822bb7b41SJason Gunthorpe struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev);
121922bb7b41SJason Gunthorpe struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
122022bb7b41SJason Gunthorpe struct arm_smmu_device *smmu;
122122bb7b41SJason Gunthorpe int ret;
122222bb7b41SJason Gunthorpe
122322bb7b41SJason Gunthorpe if (!cfg)
122422bb7b41SJason Gunthorpe return -ENODEV;
122522bb7b41SJason Gunthorpe smmu = cfg->smmu;
122622bb7b41SJason Gunthorpe
122722bb7b41SJason Gunthorpe ret = arm_smmu_rpm_get(smmu);
1228bbbf11eeSJason Gunthorpe if (ret < 0)
122922bb7b41SJason Gunthorpe return ret;
123022bb7b41SJason Gunthorpe
123122bb7b41SJason Gunthorpe arm_smmu_master_install_s2crs(cfg, type, 0, fwspec);
123222bb7b41SJason Gunthorpe arm_smmu_rpm_put(smmu);
1233bbbf11eeSJason Gunthorpe return 0;
1234bbbf11eeSJason Gunthorpe }
1235bbbf11eeSJason Gunthorpe
arm_smmu_attach_dev_identity(struct iommu_domain * domain,struct device * dev)1236bbbf11eeSJason Gunthorpe static int arm_smmu_attach_dev_identity(struct iommu_domain *domain,
1237bbbf11eeSJason Gunthorpe struct device *dev)
1238bbbf11eeSJason Gunthorpe {
123922bb7b41SJason Gunthorpe return arm_smmu_attach_dev_type(dev, S2CR_TYPE_BYPASS);
124022bb7b41SJason Gunthorpe }
124122bb7b41SJason Gunthorpe
124222bb7b41SJason Gunthorpe static const struct iommu_domain_ops arm_smmu_identity_ops = {
124322bb7b41SJason Gunthorpe .attach_dev = arm_smmu_attach_dev_identity,
124422bb7b41SJason Gunthorpe };
124522bb7b41SJason Gunthorpe
124622bb7b41SJason Gunthorpe static struct iommu_domain arm_smmu_identity_domain = {
124722bb7b41SJason Gunthorpe .type = IOMMU_DOMAIN_IDENTITY,
1248bbbf11eeSJason Gunthorpe .ops = &arm_smmu_identity_ops,
1249bbbf11eeSJason Gunthorpe };
1250bbbf11eeSJason Gunthorpe
arm_smmu_attach_dev_blocked(struct iommu_domain * domain,struct device * dev)1251bbbf11eeSJason Gunthorpe static int arm_smmu_attach_dev_blocked(struct iommu_domain *domain,
1252bbbf11eeSJason Gunthorpe struct device *dev)
1253bbbf11eeSJason Gunthorpe {
1254bbbf11eeSJason Gunthorpe return arm_smmu_attach_dev_type(dev, S2CR_TYPE_FAULT);
1255bbbf11eeSJason Gunthorpe }
1256bbbf11eeSJason Gunthorpe
1257bbbf11eeSJason Gunthorpe static const struct iommu_domain_ops arm_smmu_blocked_ops = {
1258bbbf11eeSJason Gunthorpe .attach_dev = arm_smmu_attach_dev_blocked,
1259bbbf11eeSJason Gunthorpe };
1260bbbf11eeSJason Gunthorpe
1261bbbf11eeSJason Gunthorpe static struct iommu_domain arm_smmu_blocked_domain = {
1262bbbf11eeSJason Gunthorpe .type = IOMMU_DOMAIN_BLOCKED,
126380803531SIsaac J. Manjarres .ops = &arm_smmu_blocked_ops,
126480803531SIsaac J. Manjarres };
126580803531SIsaac J. Manjarres
arm_smmu_map_pages(struct iommu_domain * domain,unsigned long iova,phys_addr_t paddr,size_t pgsize,size_t pgcount,int prot,gfp_t gfp,size_t * mapped)126645ae7cffSWill Deacon static int arm_smmu_map_pages(struct iommu_domain *domain, unsigned long iova,
1267523d7423SRobin Murphy phys_addr_t paddr, size_t pgsize, size_t pgcount,
1268d4a44f07SSricharan R int prot, gfp_t gfp, size_t *mapped)
1269d4a44f07SSricharan R {
127045ae7cffSWill Deacon struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
1271518f7136SWill Deacon struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu;
127245ae7cffSWill Deacon int ret;
127345ae7cffSWill Deacon
1274d4a44f07SSricharan R if (!ops)
127580803531SIsaac J. Manjarres return -ENODEV;
1276d4a44f07SSricharan R
1277d4a44f07SSricharan R arm_smmu_rpm_get(smmu);
1278d4a44f07SSricharan R ret = ops->map_pages(ops, iova, paddr, pgsize, pgcount, prot, gfp, mapped);
127945ae7cffSWill Deacon arm_smmu_rpm_put(smmu);
128045ae7cffSWill Deacon
12819ea1a2c4SIsaac J. Manjarres return ret;
12829ea1a2c4SIsaac J. Manjarres }
12839ea1a2c4SIsaac J. Manjarres
arm_smmu_unmap_pages(struct iommu_domain * domain,unsigned long iova,size_t pgsize,size_t pgcount,struct iommu_iotlb_gather * iotlb_gather)128445ae7cffSWill Deacon static size_t arm_smmu_unmap_pages(struct iommu_domain *domain, unsigned long iova,
1285523d7423SRobin Murphy size_t pgsize, size_t pgcount,
1286d4a44f07SSricharan R struct iommu_iotlb_gather *iotlb_gather)
1287d4a44f07SSricharan R {
128845ae7cffSWill Deacon struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
1289518f7136SWill Deacon struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu;
1290518f7136SWill Deacon size_t ret;
1291518f7136SWill Deacon
1292d4a44f07SSricharan R if (!ops)
12939ea1a2c4SIsaac J. Manjarres return 0;
1294d4a44f07SSricharan R
1295d4a44f07SSricharan R arm_smmu_rpm_get(smmu);
1296d4a44f07SSricharan R ret = ops->unmap_pages(ops, iova, pgsize, pgcount, iotlb_gather);
129745ae7cffSWill Deacon arm_smmu_rpm_put(smmu);
129845ae7cffSWill Deacon
129944f6876aSRobin Murphy return ret;
130044f6876aSRobin Murphy }
130144f6876aSRobin Murphy
arm_smmu_flush_iotlb_all(struct iommu_domain * domain)1302d4a44f07SSricharan R static void arm_smmu_flush_iotlb_all(struct iommu_domain *domain)
130344f6876aSRobin Murphy {
1304abfd6fe0SWill Deacon struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1305d4a44f07SSricharan R struct arm_smmu_device *smmu = smmu_domain->smmu;
1306696bcfb7SRobin Murphy
1307d4a44f07SSricharan R if (smmu_domain->flush_ops) {
1308d4a44f07SSricharan R arm_smmu_rpm_get(smmu);
130944f6876aSRobin Murphy smmu_domain->flush_ops->tlb_flush_all(smmu_domain);
131044f6876aSRobin Murphy arm_smmu_rpm_put(smmu);
131156f8af5eSWill Deacon }
131256f8af5eSWill Deacon }
131332b12449SRobin Murphy
arm_smmu_iotlb_sync(struct iommu_domain * domain,struct iommu_iotlb_gather * gather)131432b12449SRobin Murphy static void arm_smmu_iotlb_sync(struct iommu_domain *domain,
1315d4a44f07SSricharan R struct iommu_iotlb_gather *gather)
131632b12449SRobin Murphy {
1317ae2b60f3SRobin Murphy struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1318ae2b60f3SRobin Murphy struct arm_smmu_device *smmu = smmu_domain->smmu;
1319ae2b60f3SRobin Murphy
1320d4a44f07SSricharan R if (!smmu)
1321ae2b60f3SRobin Murphy return;
1322ae2b60f3SRobin Murphy
1323ae2b60f3SRobin Murphy arm_smmu_rpm_get(smmu);
1324ae2b60f3SRobin Murphy if (smmu->version == ARM_SMMU_V2 ||
1325ae2b60f3SRobin Murphy smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
1326d4a44f07SSricharan R arm_smmu_tlb_sync_context(smmu_domain);
1327d4a44f07SSricharan R else
132832b12449SRobin Murphy arm_smmu_tlb_sync_global(smmu);
1329859a732eSMitchel Humpherys arm_smmu_rpm_put(smmu);
1330859a732eSMitchel Humpherys }
1331859a732eSMitchel Humpherys
arm_smmu_iova_to_phys_hard(struct iommu_domain * domain,dma_addr_t iova)13321d672638SJoerg Roedel static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
1333859a732eSMitchel Humpherys dma_addr_t iova)
1334859a732eSMitchel Humpherys {
1335859a732eSMitchel Humpherys struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1336859a732eSMitchel Humpherys struct arm_smmu_device *smmu = smmu_domain->smmu;
133719713fd4SRobin Murphy struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1338859a732eSMitchel Humpherys struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1339859a732eSMitchel Humpherys struct device *dev = smmu->dev;
1340523d7423SRobin Murphy void __iomem *reg;
134119713fd4SRobin Murphy u32 tmp;
13427c8f176dSXiyu Yang u64 phys;
1343d4a44f07SSricharan R unsigned long va, flags;
1344d4a44f07SSricharan R int ret, idx = cfg->cbndx;
1345d4a44f07SSricharan R phys_addr_t addr = 0;
1346d4a44f07SSricharan R
1347859a732eSMitchel Humpherys ret = arm_smmu_rpm_get(smmu);
1348523d7423SRobin Murphy if (ret < 0)
1349661d962fSRobin Murphy return 0;
135061005762SRobin Murphy
135119713fd4SRobin Murphy spin_lock_irqsave(&smmu_domain->cb_lock, flags);
135261005762SRobin Murphy va = iova & ~0xfffUL;
135319713fd4SRobin Murphy if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
1354859a732eSMitchel Humpherys arm_smmu_cb_writeq(smmu, idx, ARM_SMMU_CB_ATS1PR, va);
135519713fd4SRobin Murphy else
1356d0166022SRob Clark arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_ATS1PR, va);
1357fba6e960SWill Deacon
1358523d7423SRobin Murphy reg = arm_smmu_page(smmu, ARM_SMMU_CB(smmu, idx)) + ARM_SMMU_CB_ATSR;
1359859a732eSMitchel Humpherys if (readl_poll_timeout_atomic(reg, tmp, !(tmp & ARM_SMMU_CB_ATSR_ACTIVE),
1360077124c9SFabio Estevam 5, 50)) {
1361859a732eSMitchel Humpherys spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
13627c8f176dSXiyu Yang dev_err(dev,
1363859a732eSMitchel Humpherys "iova to phys timed out on %pad. Falling back to software table walk.\n",
1364859a732eSMitchel Humpherys &iova);
1365859a732eSMitchel Humpherys arm_smmu_rpm_put(smmu);
136619713fd4SRobin Murphy return ops->iova_to_phys(ops, iova);
1367523d7423SRobin Murphy }
1368fba6e960SWill Deacon
1369859a732eSMitchel Humpherys phys = arm_smmu_cb_readq(smmu, idx, ARM_SMMU_CB_PAR);
1370859a732eSMitchel Humpherys spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
13717c8f176dSXiyu Yang if (phys & ARM_SMMU_CB_PAR_F) {
1372859a732eSMitchel Humpherys dev_err(dev, "translation fault!\n");
1373859a732eSMitchel Humpherys dev_err(dev, "PAR = 0x%llx\n", phys);
13747c8f176dSXiyu Yang goto out;
13757c8f176dSXiyu Yang }
1376d4a44f07SSricharan R
1377d4a44f07SSricharan R addr = (phys & GENMASK_ULL(39, 12)) | (iova & 0xfff);
13787c8f176dSXiyu Yang out:
1379859a732eSMitchel Humpherys arm_smmu_rpm_put(smmu);
1380859a732eSMitchel Humpherys
138145ae7cffSWill Deacon return addr;
138245ae7cffSWill Deacon }
138345ae7cffSWill Deacon
arm_smmu_iova_to_phys(struct iommu_domain * domain,dma_addr_t iova)13841d672638SJoerg Roedel static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
1385518f7136SWill Deacon dma_addr_t iova)
138645ae7cffSWill Deacon {
1387518f7136SWill Deacon struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1388a44a9791SWill Deacon struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
138945ae7cffSWill Deacon
139083a60ed8SBaptiste Reynal if (!ops)
1391523d7423SRobin Murphy return 0;
1392523d7423SRobin Murphy
139383a60ed8SBaptiste Reynal if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
1394523d7423SRobin Murphy smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
139545ae7cffSWill Deacon return arm_smmu_iova_to_phys_hard(domain, iova);
139645ae7cffSWill Deacon
1397359ad157SRobin Murphy return ops->iova_to_phys(ops, iova);
139845ae7cffSWill Deacon }
1399df198b37SRobin Murphy
arm_smmu_capable(struct device * dev,enum iommu_cap cap)1400df198b37SRobin Murphy static bool arm_smmu_capable(struct device *dev, enum iommu_cap cap)
1401d0948945SWill Deacon {
1402d0948945SWill Deacon struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev);
1403ac9c5e92SRobin Murphy
1404ac9c5e92SRobin Murphy switch (cap) {
1405ac9c5e92SRobin Murphy case IOMMU_CAP_CACHE_COHERENCY:
1406ac9c5e92SRobin Murphy /*
1407ac9c5e92SRobin Murphy * It's overwhelmingly the case in practice that when the pagetable
1408ac9c5e92SRobin Murphy * walk interface is connected to a coherent interconnect, all the
1409ac9c5e92SRobin Murphy * translation interfaces are too. Furthermore if the device is
1410ac9c5e92SRobin Murphy * natively coherent, then its translation interface must also be.
14110029a8ddSAntonios Motakis */
14124a20ce0fSRobin Murphy return cfg->smmu->features & ARM_SMMU_FEAT_COHERENT_WALK ||
14130029a8ddSAntonios Motakis device_get_dma_attr(dev) == DEV_DMA_COHERENT;
1414d0948945SWill Deacon case IOMMU_CAP_NOEXEC:
14151fd0c775SJoerg Roedel case IOMMU_CAP_DEFERRED_FLUSH:
1416d0948945SWill Deacon return true;
141745ae7cffSWill Deacon default:
141845ae7cffSWill Deacon return false;
1419ce9babe5SLorenzo Pieralisi }
1420ce9babe5SLorenzo Pieralisi }
1421021bb842SRobin Murphy
14227d835134SRobin Murphy static
arm_smmu_get_by_fwnode(struct fwnode_handle * fwnode)14237d835134SRobin Murphy struct arm_smmu_device *arm_smmu_get_by_fwnode(struct fwnode_handle *fwnode)
1424021bb842SRobin Murphy {
1425021bb842SRobin Murphy struct device *dev = bus_find_device_by_fwnode(&platform_bus_type, fwnode);
1426021bb842SRobin Murphy
1427021bb842SRobin Murphy put_device(dev);
1428cefa0d55SJoerg Roedel return dev ? dev_get_drvdata(dev) : NULL;
142903edb226SWill Deacon }
14300b242ebbSJoerg Roedel
arm_smmu_probe_device(struct device * dev)1431f80cd885SRobin Murphy static struct iommu_device *arm_smmu_probe_device(struct device *dev)
14329b468f7dSJoerg Roedel {
1433f80cd885SRobin Murphy struct arm_smmu_device *smmu = NULL;
1434f80cd885SRobin Murphy struct arm_smmu_master_cfg *cfg;
1435021bb842SRobin Murphy struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
1436adfec2e7SRobin Murphy int i, ret;
1437a7990c64SArtem Savkov
1438a7990c64SArtem Savkov if (using_legacy_binding) {
1439a7990c64SArtem Savkov ret = arm_smmu_register_legacy_master(dev, &smmu);
1440a7990c64SArtem Savkov
1441a7990c64SArtem Savkov /*
1442a7990c64SArtem Savkov * If dev->iommu_fwspec is initally NULL, arm_smmu_register_legacy_master()
14439b468f7dSJoerg Roedel * will allocate/initialise a new one. Thus we need to update fwspec for
1444f80cd885SRobin Murphy * later use.
1445f80cd885SRobin Murphy */
1446021bb842SRobin Murphy fwspec = dev_iommu_fwspec_get(dev);
1447e7080665SRobin Murphy if (ret)
1448021bb842SRobin Murphy goto out_free;
1449f80cd885SRobin Murphy } else {
1450f80cd885SRobin Murphy smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
1451adfec2e7SRobin Murphy }
1452fba6e960SWill Deacon
1453fba6e960SWill Deacon ret = -EINVAL;
1454f80cd885SRobin Murphy for (i = 0; i < fwspec->num_ids; i++) {
1455adfec2e7SRobin Murphy u16 sid = FIELD_GET(ARM_SMMU_SMR_ID, fwspec->ids[i]);
1456f80cd885SRobin Murphy u16 mask = FIELD_GET(ARM_SMMU_SMR_MASK, fwspec->ids[i]);
1457021bb842SRobin Murphy
1458021bb842SRobin Murphy if (sid & ~smmu->streamid_mask) {
1459021bb842SRobin Murphy dev_err(dev, "stream ID 0x%x out of range for SMMU (0x%x)\n",
1460021bb842SRobin Murphy sid, smmu->streamid_mask);
1461021bb842SRobin Murphy goto out_free;
14626323f474SPeng Fan }
1463f80cd885SRobin Murphy if (mask & ~smmu->smr_mask_mask) {
1464f80cd885SRobin Murphy dev_err(dev, "SMR mask 0x%x out of range for SMMU (0x%x)\n",
1465f80cd885SRobin Murphy mask, smmu->smr_mask_mask);
146603edb226SWill Deacon goto out_free;
1467adfec2e7SRobin Murphy }
1468adfec2e7SRobin Murphy }
1469adfec2e7SRobin Murphy
1470adfec2e7SRobin Murphy ret = -ENOMEM;
1471adfec2e7SRobin Murphy cfg = kzalloc(offsetof(struct arm_smmu_master_cfg, smendx[i]),
1472adfec2e7SRobin Murphy GFP_KERNEL);
1473adfec2e7SRobin Murphy if (!cfg)
1474c84500a3SJoerg Roedel goto out_free;
1475adfec2e7SRobin Murphy
1476adfec2e7SRobin Murphy cfg->smmu = smmu;
1477adfec2e7SRobin Murphy dev_iommu_priv_set(dev, cfg);
1478d4a44f07SSricharan R while (i--)
1479d4a44f07SSricharan R cfg->smendx[i] = INVALID_SMENDX;
1480d4a44f07SSricharan R
1481d4a44f07SSricharan R ret = arm_smmu_rpm_get(smmu);
1482588888a7SRobin Murphy if (ret < 0)
1483d4a44f07SSricharan R goto out_cfg_free;
1484d4a44f07SSricharan R
1485adfec2e7SRobin Murphy ret = arm_smmu_master_alloc_smes(dev);
1486c54451a5SVivek Gautam arm_smmu_rpm_put(smmu);
1487adfec2e7SRobin Murphy
1488655e3643SSricharan R if (ret)
1489655e3643SSricharan R goto out_cfg_free;
1490655e3643SSricharan R
1491cefa0d55SJoerg Roedel device_link_add(dev, smmu->dev,
1492f80cd885SRobin Murphy DL_FLAG_PM_RUNTIME | DL_FLAG_AUTOREMOVE_SUPPLIER);
1493c54451a5SVivek Gautam
1494c54451a5SVivek Gautam return &smmu->iommu;
1495f80cd885SRobin Murphy
1496cefa0d55SJoerg Roedel out_cfg_free:
149703edb226SWill Deacon kfree(cfg);
149803edb226SWill Deacon out_free:
1499cefa0d55SJoerg Roedel return ERR_PTR(ret);
150045ae7cffSWill Deacon }
15019b468f7dSJoerg Roedel
arm_smmu_release_device(struct device * dev)15024d26ba67SRobin Murphy static void arm_smmu_release_device(struct device *dev)
1503d4a44f07SSricharan R {
15048e8b203eSRobin Murphy struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
15054d26ba67SRobin Murphy struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev);
1506d4a44f07SSricharan R int ret;
1507d4a44f07SSricharan R
1508d4a44f07SSricharan R ret = arm_smmu_rpm_get(cfg->smmu);
15092465170fSRobin Murphy if (ret < 0)
1510d4a44f07SSricharan R return;
15114d26ba67SRobin Murphy
1512d4a44f07SSricharan R arm_smmu_master_free_smes(cfg, fwspec);
1513c84500a3SJoerg Roedel
151445ae7cffSWill Deacon arm_smmu_rpm_put(cfg->smmu);
151545ae7cffSWill Deacon
15160d97174aSThierry Reding kfree(cfg);
15170d97174aSThierry Reding }
15180d97174aSThierry Reding
arm_smmu_probe_finalize(struct device * dev)15190d97174aSThierry Reding static void arm_smmu_probe_finalize(struct device *dev)
15200d97174aSThierry Reding {
15210d97174aSThierry Reding struct arm_smmu_master_cfg *cfg;
15220d97174aSThierry Reding struct arm_smmu_device *smmu;
15230d97174aSThierry Reding
1524b472191fSWill Deacon cfg = dev_iommu_priv_get(dev);
15250d97174aSThierry Reding smmu = cfg->smmu;
15260d97174aSThierry Reding
15270d97174aSThierry Reding if (smmu->impl && smmu->impl->probe_finalize)
1528af659932SJoerg Roedel smmu->impl->probe_finalize(smmu, dev);
1529af659932SJoerg Roedel }
1530c84500a3SJoerg Roedel
arm_smmu_device_group(struct device * dev)15319b468f7dSJoerg Roedel static struct iommu_group *arm_smmu_device_group(struct device *dev)
15322465170fSRobin Murphy {
1533588888a7SRobin Murphy struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev);
1534588888a7SRobin Murphy struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
1535588888a7SRobin Murphy struct arm_smmu_device *smmu = cfg->smmu;
1536b1a13479SKrishna Reddy struct iommu_group *group = NULL;
15372465170fSRobin Murphy int i, idx;
1538588888a7SRobin Murphy
153953f61313SYang Yingliang mutex_lock(&smmu->stream_map_mutex);
154053f61313SYang Yingliang for_each_cfg_sme(cfg, fwspec, i, idx) {
1541588888a7SRobin Murphy if (group && smmu->s2crs[idx].group &&
154253f61313SYang Yingliang group != smmu->s2crs[idx].group) {
1543588888a7SRobin Murphy mutex_unlock(&smmu->stream_map_mutex);
1544588888a7SRobin Murphy return ERR_PTR(-EINVAL);
1545588888a7SRobin Murphy }
1546588888a7SRobin Murphy
1547b1a13479SKrishna Reddy group = smmu->s2crs[idx].group;
1548b1a13479SKrishna Reddy }
1549e1b44cbeSRobin Murphy
1550b1a13479SKrishna Reddy if (group) {
1551af659932SJoerg Roedel mutex_unlock(&smmu->stream_map_mutex);
1552af659932SJoerg Roedel return iommu_group_ref_get(group);
1553af659932SJoerg Roedel }
1554eab03e2aSNipun Gupta
1555eab03e2aSNipun Gupta if (dev_is_pci(dev))
1556af659932SJoerg Roedel group = pci_device_group(dev);
1557af659932SJoerg Roedel else if (dev_is_fsl_mc(dev))
1558af659932SJoerg Roedel group = fsl_mc_device_group(dev);
1559cefa0d55SJoerg Roedel else
1560cefa0d55SJoerg Roedel group = generic_device_group(dev);
1561cefa0d55SJoerg Roedel
1562cefa0d55SJoerg Roedel /* Remember group for faster lookups */
1563cefa0d55SJoerg Roedel if (!IS_ERR(group))
1564b1a13479SKrishna Reddy for_each_cfg_sme(cfg, fwspec, i, idx)
1565af659932SJoerg Roedel smmu->s2crs[idx].group = group;
1566af659932SJoerg Roedel
1567af659932SJoerg Roedel mutex_unlock(&smmu->stream_map_mutex);
15684fc52b81SChristoph Hellwig return group;
15694fc52b81SChristoph Hellwig }
1570c752ce45SWill Deacon
arm_smmu_set_pgtable_quirks(struct iommu_domain * domain,unsigned long quirks)15711d672638SJoerg Roedel static int arm_smmu_set_pgtable_quirks(struct iommu_domain *domain,
15724fc52b81SChristoph Hellwig unsigned long quirks)
1573c752ce45SWill Deacon {
1574518f7136SWill Deacon struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
15754fc52b81SChristoph Hellwig int ret = 0;
1576c99110a8SSai Prakash Ranjan
15774fc52b81SChristoph Hellwig mutex_lock(&smmu_domain->init_mutex);
15784fc52b81SChristoph Hellwig if (smmu_domain->smmu)
1579518f7136SWill Deacon ret = -EPERM;
15804fc52b81SChristoph Hellwig else
1581518f7136SWill Deacon smmu_domain->pgtbl_quirks = quirks;
1582518f7136SWill Deacon mutex_unlock(&smmu_domain->init_mutex);
1583518f7136SWill Deacon
1584b42a905bSKrzysztof Kozlowski return ret;
1585b42a905bSKrzysztof Kozlowski }
1586021bb842SRobin Murphy
arm_smmu_of_xlate(struct device * dev,const struct of_phandle_args * args)158756fbf600SRobin Murphy static int arm_smmu_of_xlate(struct device *dev,
1588021bb842SRobin Murphy const struct of_phandle_args *args)
1589021bb842SRobin Murphy {
1590fba6e960SWill Deacon u32 mask, fwid = 0;
1591021bb842SRobin Murphy
1592021bb842SRobin Murphy if (args->args_count > 0)
1593fba6e960SWill Deacon fwid |= FIELD_PREP(ARM_SMMU_SMR_ID, args->args[0]);
159456fbf600SRobin Murphy
1595fba6e960SWill Deacon if (args->args_count > 1)
1596021bb842SRobin Murphy fwid |= FIELD_PREP(ARM_SMMU_SMR_MASK, args->args[1]);
1597021bb842SRobin Murphy else if (!of_property_read_u32(args->np, "stream-match-mask", &mask))
1598021bb842SRobin Murphy fwid |= FIELD_PREP(ARM_SMMU_SMR_MASK, mask);
1599021bb842SRobin Murphy
1600f3ebee80SEric Auger return iommu_fwspec_add_ids(dev, &fwid, 1);
1601f3ebee80SEric Auger }
1602f3ebee80SEric Auger
arm_smmu_get_resv_regions(struct device * dev,struct list_head * head)1603f3ebee80SEric Auger static void arm_smmu_get_resv_regions(struct device *dev,
1604f3ebee80SEric Auger struct list_head *head)
1605f3ebee80SEric Auger {
1606f3ebee80SEric Auger struct iommu_resv_region *region;
16070251d010SLu Baolu int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
1608f3ebee80SEric Auger
1609f3ebee80SEric Auger region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
1610f3ebee80SEric Auger prot, IOMMU_RESV_SW_MSI, GFP_KERNEL);
1611f3ebee80SEric Auger if (!region)
1612273df963SRobin Murphy return;
1613273df963SRobin Murphy
1614f3ebee80SEric Auger list_add_tail(®ion->list, head);
1615f3ebee80SEric Auger
1616232c5ae8SSai Prakash Ranjan iommu_dma_get_resv_regions(dev, head);
1617232c5ae8SSai Prakash Ranjan }
1618232c5ae8SSai Prakash Ranjan
arm_smmu_def_domain_type(struct device * dev)1619232c5ae8SSai Prakash Ranjan static int arm_smmu_def_domain_type(struct device *dev)
1620232c5ae8SSai Prakash Ranjan {
1621628bf55bSRobin Murphy struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev);
1622628bf55bSRobin Murphy const struct arm_smmu_impl *impl = cfg->smmu->impl;
1623628bf55bSRobin Murphy
1624232c5ae8SSai Prakash Ranjan if (using_legacy_binding)
1625232c5ae8SSai Prakash Ranjan return IOMMU_DOMAIN_IDENTITY;
1626232c5ae8SSai Prakash Ranjan
1627232c5ae8SSai Prakash Ranjan if (impl && impl->def_domain_type)
1628232c5ae8SSai Prakash Ranjan return impl->def_domain_type(dev);
1629232c5ae8SSai Prakash Ranjan
1630518f7136SWill Deacon return 0;
163122bb7b41SJason Gunthorpe }
1632bbbf11eeSJason Gunthorpe
16331fd0c775SJoerg Roedel static const struct iommu_ops arm_smmu_ops = {
1634d75d7dc2SJason Gunthorpe .identity_domain = &arm_smmu_identity_domain,
1635cefa0d55SJoerg Roedel .blocked_domain = &arm_smmu_blocked_domain,
1636cefa0d55SJoerg Roedel .capable = arm_smmu_capable,
16370d97174aSThierry Reding .domain_alloc_paging = arm_smmu_domain_alloc_paging,
1638af659932SJoerg Roedel .probe_device = arm_smmu_probe_device,
1639021bb842SRobin Murphy .release_device = arm_smmu_release_device,
1640f3ebee80SEric Auger .probe_finalize = arm_smmu_probe_finalize,
1641232c5ae8SSai Prakash Ranjan .device_group = arm_smmu_device_group,
1642518f7136SWill Deacon .of_xlate = arm_smmu_of_xlate,
1643c0aec668SRobin Murphy .get_resv_regions = arm_smmu_get_resv_regions,
16449a630a4bSLu Baolu .def_domain_type = arm_smmu_def_domain_type,
16459a630a4bSLu Baolu .owner = THIS_MODULE,
16469a630a4bSLu Baolu .default_domain_ops = &(const struct iommu_domain_ops) {
16479a630a4bSLu Baolu .attach_dev = arm_smmu_attach_dev,
16489a630a4bSLu Baolu .map_pages = arm_smmu_map_pages,
16499a630a4bSLu Baolu .unmap_pages = arm_smmu_unmap_pages,
16509a630a4bSLu Baolu .flush_iotlb_all = arm_smmu_flush_iotlb_all,
16519a630a4bSLu Baolu .iotlb_sync = arm_smmu_iotlb_sync,
16529a630a4bSLu Baolu .iova_to_phys = arm_smmu_iova_to_phys,
16539a630a4bSLu Baolu .set_pgtable_quirks = arm_smmu_set_pgtable_quirks,
165445ae7cffSWill Deacon .free = arm_smmu_domain_free,
165545ae7cffSWill Deacon }
165645ae7cffSWill Deacon };
165745ae7cffSWill Deacon
arm_smmu_device_reset(struct arm_smmu_device * smmu)16581f3d5ca4SRobin Murphy static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
165962b993a3SRobin Murphy {
1660659db6f6SAndreas Herrmann int i;
16613a5df8ffSAndreas Herrmann u32 reg;
166200320ce6SRobin Murphy
166300320ce6SRobin Murphy /* clear global FSR */
166445ae7cffSWill Deacon reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSR);
16651f3d5ca4SRobin Murphy arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sGFSR, reg);
16661f3d5ca4SRobin Murphy
16671f3d5ca4SRobin Murphy /*
16681f3d5ca4SRobin Murphy * Reset stream mapping groups: Initial values mark all SMRn as
16698e8b203eSRobin Murphy * invalid and all S2CRn as bypass unless overridden.
16708e8b203eSRobin Murphy */
167145ae7cffSWill Deacon for (i = 0; i < smmu->num_mapping_groups; ++i)
1672659db6f6SAndreas Herrmann arm_smmu_write_sme(smmu, i);
1673659db6f6SAndreas Herrmann
167490df373cSRobin Murphy /* Make sure all context banks are disabled and clear CB_FSR */
1675d0166022SRob Clark for (i = 0; i < smmu->num_context_banks; ++i) {
1676659db6f6SAndreas Herrmann arm_smmu_write_context_bank(smmu, i);
16771463fe44SWill Deacon arm_smmu_cb_write(smmu, i, ARM_SMMU_CB_FSR, ARM_SMMU_CB_FSR_FAULT);
167845ae7cffSWill Deacon }
167900320ce6SRobin Murphy
168000320ce6SRobin Murphy /* Invalidate the TLB, just in case */
168145ae7cffSWill Deacon arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIALLH, QCOM_DUMMY_VAL);
168200320ce6SRobin Murphy arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIALLNSNH, QCOM_DUMMY_VAL);
1683659db6f6SAndreas Herrmann
168445ae7cffSWill Deacon reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sCR0);
1685fba6e960SWill Deacon
1686fba6e960SWill Deacon /* Enable fault reporting */
168745ae7cffSWill Deacon reg |= (ARM_SMMU_sCR0_GFRE | ARM_SMMU_sCR0_GFIE |
168845ae7cffSWill Deacon ARM_SMMU_sCR0_GCFGFRE | ARM_SMMU_sCR0_GCFGFIE);
1689fba6e960SWill Deacon
169045ae7cffSWill Deacon /* Disable TLB broadcasting. */
169125a1c96cSRobin Murphy reg |= (ARM_SMMU_sCR0_VMIDPNE | ARM_SMMU_sCR0_PTM);
1692fba6e960SWill Deacon
169325a1c96cSRobin Murphy /* Enable client access, handling unmatched streams as appropriate */
1694fba6e960SWill Deacon reg &= ~ARM_SMMU_sCR0_CLIENTPD;
169525a1c96cSRobin Murphy if (disable_bypass)
1696fba6e960SWill Deacon reg |= ARM_SMMU_sCR0_USFCFG;
169745ae7cffSWill Deacon else
169845ae7cffSWill Deacon reg &= ~ARM_SMMU_sCR0_USFCFG;
1699fba6e960SWill Deacon
170045ae7cffSWill Deacon /* Disable forced broadcasting */
170145ae7cffSWill Deacon reg &= ~ARM_SMMU_sCR0_FB;
1702fba6e960SWill Deacon
170345ae7cffSWill Deacon /* Don't upgrade barriers */
17044e3e9b69STirumalesh Chalamarla reg &= ~(ARM_SMMU_sCR0_BSU);
1705fba6e960SWill Deacon
17064e3e9b69STirumalesh Chalamarla if (smmu->features & ARM_SMMU_FEAT_VMID16)
1707dc0eaa4eSAleksey Makarov reg |= ARM_SMMU_sCR0_VMID16EN;
1708fba6e960SWill Deacon
1709dc0eaa4eSAleksey Makarov if (smmu->features & ARM_SMMU_FEAT_EXIDS)
171062b993a3SRobin Murphy reg |= ARM_SMMU_sCR0_EXIDENABLE;
171162b993a3SRobin Murphy
171262b993a3SRobin Murphy if (smmu->impl && smmu->impl->reset)
171345ae7cffSWill Deacon smmu->impl->reset(smmu);
171411febfcaSRobin Murphy
171500320ce6SRobin Murphy /* Push the button */
171645ae7cffSWill Deacon arm_smmu_tlb_sync_global(smmu);
171745ae7cffSWill Deacon arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sCR0, reg);
171845ae7cffSWill Deacon }
171945ae7cffSWill Deacon
arm_smmu_id_size_to_bits(int size)172045ae7cffSWill Deacon static int arm_smmu_id_size_to_bits(int size)
172145ae7cffSWill Deacon {
172245ae7cffSWill Deacon switch (size) {
172345ae7cffSWill Deacon case 0:
172445ae7cffSWill Deacon return 32;
172545ae7cffSWill Deacon case 1:
172645ae7cffSWill Deacon return 36;
172745ae7cffSWill Deacon case 2:
172845ae7cffSWill Deacon return 40;
172945ae7cffSWill Deacon case 3:
173045ae7cffSWill Deacon return 42;
173145ae7cffSWill Deacon case 4:
173245ae7cffSWill Deacon return 44;
173345ae7cffSWill Deacon case 5:
173445ae7cffSWill Deacon default:
173545ae7cffSWill Deacon return 48;
173645ae7cffSWill Deacon }
173745ae7cffSWill Deacon }
173845ae7cffSWill Deacon
arm_smmu_device_cfg_probe(struct arm_smmu_device * smmu)1739490325e0SRobin Murphy static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
174045ae7cffSWill Deacon {
1741bbb8a184SLorenzo Pieralisi unsigned int size;
17426a79a5a3STomasz Nowicki u32 id;
174345ae7cffSWill Deacon bool cttw_reg, cttw_fw = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK;
174445ae7cffSWill Deacon int i, ret;
1745b7862e35SRobin Murphy
1746b7862e35SRobin Murphy dev_notice(smmu->dev, "probing hardware configuration...\n");
174745ae7cffSWill Deacon dev_notice(smmu->dev, "SMMUv%d with:\n",
174845ae7cffSWill Deacon smmu->version == ARM_SMMU_V2 ? 2 : 1);
174900320ce6SRobin Murphy
17504cf740b0SWill Deacon /* ID0 */
17514cf740b0SWill Deacon id = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID0);
17524cf740b0SWill Deacon
1753fba6e960SWill Deacon /* Restrict available stages based on module parameter */
17544cf740b0SWill Deacon if (force_stage == 1)
1755fba6e960SWill Deacon id &= ~(ARM_SMMU_ID0_S2TS | ARM_SMMU_ID0_NTS);
17564cf740b0SWill Deacon else if (force_stage == 2)
1757fba6e960SWill Deacon id &= ~(ARM_SMMU_ID0_S1TS | ARM_SMMU_ID0_NTS);
175845ae7cffSWill Deacon
175945ae7cffSWill Deacon if (id & ARM_SMMU_ID0_S1TS) {
176045ae7cffSWill Deacon smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
176145ae7cffSWill Deacon dev_notice(smmu->dev, "\tstage 1 translation\n");
1762fba6e960SWill Deacon }
176345ae7cffSWill Deacon
176445ae7cffSWill Deacon if (id & ARM_SMMU_ID0_S2TS) {
176545ae7cffSWill Deacon smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
176645ae7cffSWill Deacon dev_notice(smmu->dev, "\tstage 2 translation\n");
1767fba6e960SWill Deacon }
176845ae7cffSWill Deacon
176945ae7cffSWill Deacon if (id & ARM_SMMU_ID0_NTS) {
177045ae7cffSWill Deacon smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
177145ae7cffSWill Deacon dev_notice(smmu->dev, "\tnested translation\n");
177245ae7cffSWill Deacon }
17734cf740b0SWill Deacon
177445ae7cffSWill Deacon if (!(smmu->features &
177545ae7cffSWill Deacon (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
177645ae7cffSWill Deacon dev_err(smmu->dev, "\tno translation support!\n");
177745ae7cffSWill Deacon return -ENODEV;
1778fba6e960SWill Deacon }
1779fba6e960SWill Deacon
1780859a732eSMitchel Humpherys if ((id & ARM_SMMU_ID0_S1TS) &&
1781859a732eSMitchel Humpherys ((smmu->version < ARM_SMMU_V2) || !(id & ARM_SMMU_ID0_ATOSNS))) {
1782859a732eSMitchel Humpherys smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
1783859a732eSMitchel Humpherys dev_notice(smmu->dev, "\taddress translation ops\n");
1784bae2c2d4SRobin Murphy }
1785bae2c2d4SRobin Murphy
1786bbb8a184SLorenzo Pieralisi /*
1787bae2c2d4SRobin Murphy * In order for DMA API calls to work properly, we must defer to what
1788bae2c2d4SRobin Murphy * the FW says about coherency, regardless of what the hardware claims.
1789bae2c2d4SRobin Murphy * Fortunately, this also opens up a workaround for systems where the
1790fba6e960SWill Deacon * ID register value has ended up configured incorrectly.
1791bbb8a184SLorenzo Pieralisi */
1792bae2c2d4SRobin Murphy cttw_reg = !!(id & ARM_SMMU_ID0_CTTW);
1793bbb8a184SLorenzo Pieralisi if (cttw_fw || cttw_reg)
1794bbb8a184SLorenzo Pieralisi dev_notice(smmu->dev, "\t%scoherent table walk\n",
1795bae2c2d4SRobin Murphy cttw_fw ? "" : "non-");
1796bbb8a184SLorenzo Pieralisi if (cttw_fw != cttw_reg)
179745ae7cffSWill Deacon dev_notice(smmu->dev,
179821174240SRobin Murphy "\t(IDR0.CTTW overridden by FW configuration)\n");
1799fba6e960SWill Deacon
1800dc0eaa4eSAleksey Makarov /* Max. number of entries we have for stream matching/indexing */
1801dc0eaa4eSAleksey Makarov if (smmu->version == ARM_SMMU_V2 && id & ARM_SMMU_ID0_EXIDS) {
1802dc0eaa4eSAleksey Makarov smmu->features |= ARM_SMMU_FEAT_EXIDS;
1803fba6e960SWill Deacon size = 1 << 16;
1804dc0eaa4eSAleksey Makarov } else {
180521174240SRobin Murphy size = 1 << FIELD_GET(ARM_SMMU_ID0_NUMSIDB, id);
1806fba6e960SWill Deacon }
180745ae7cffSWill Deacon smmu->streamid_mask = size - 1;
1808fba6e960SWill Deacon if (id & ARM_SMMU_ID0_SMS) {
180921174240SRobin Murphy smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
181045ae7cffSWill Deacon size = FIELD_GET(ARM_SMMU_ID0_NUMSMRG, id);
181145ae7cffSWill Deacon if (size == 0) {
181245ae7cffSWill Deacon dev_err(smmu->dev,
181345ae7cffSWill Deacon "stream-matching supported, but no SMRs present!\n");
181445ae7cffSWill Deacon return -ENODEV;
18151f3d5ca4SRobin Murphy }
18161f3d5ca4SRobin Murphy
18171f3d5ca4SRobin Murphy /* Zero-initialised to mark as invalid */
18181f3d5ca4SRobin Murphy smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs),
18191f3d5ca4SRobin Murphy GFP_KERNEL);
18201f3d5ca4SRobin Murphy if (!smmu->smrs)
182145ae7cffSWill Deacon return -ENOMEM;
1822490325e0SRobin Murphy
182345ae7cffSWill Deacon dev_notice(smmu->dev,
18248e8b203eSRobin Murphy "\tstream matching with %u register groups", size);
18258e8b203eSRobin Murphy }
18268e8b203eSRobin Murphy /* s2cr->type == 0 means translation, so initialise explicitly */
18278e8b203eSRobin Murphy smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs),
18288e8b203eSRobin Murphy GFP_KERNEL);
18298e8b203eSRobin Murphy if (!smmu->s2crs)
18308e8b203eSRobin Murphy return -ENOMEM;
18318e8b203eSRobin Murphy for (i = 0; i < size; i++)
183221174240SRobin Murphy smmu->s2crs[i] = s2cr_init_val;
1833588888a7SRobin Murphy
18348e517e76SWill Deacon smmu->num_mapping_groups = size;
183545ae7cffSWill Deacon mutex_init(&smmu->stream_map_mutex);
1836fba6e960SWill Deacon spin_lock_init(&smmu->global_sync_lock);
1837fba6e960SWill Deacon
18387602b871SRobin Murphy if (smmu->version < ARM_SMMU_V2 ||
1839fba6e960SWill Deacon !(id & ARM_SMMU_ID0_PTFS_NO_AARCH32)) {
18407602b871SRobin Murphy smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
18417602b871SRobin Murphy if (!(id & ARM_SMMU_ID0_PTFS_NO_AARCH32S))
18427602b871SRobin Murphy smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
184345ae7cffSWill Deacon }
184400320ce6SRobin Murphy
1845fba6e960SWill Deacon /* ID1 */
184645ae7cffSWill Deacon id = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID1);
1847c55af7f7SAndreas Herrmann smmu->pgshift = (id & ARM_SMMU_ID1_PAGESIZE) ? 16 : 12;
1848fba6e960SWill Deacon
1849490325e0SRobin Murphy /* Check for size mismatch of SMMU address space from mapped region */
18502907320dSMitchel Humpherys size = 1 << (FIELD_GET(ARM_SMMU_ID1_NUMPAGENDXB, id) + 1);
1851490325e0SRobin Murphy if (smmu->numpage != 2 * size << smmu->pgshift)
1852490325e0SRobin Murphy dev_warn(smmu->dev,
1853490325e0SRobin Murphy "SMMU address space size (0x%x) differs from mapped region size (0x%x)!\n",
1854490325e0SRobin Murphy 2 * size << smmu->pgshift, smmu->numpage);
185545ae7cffSWill Deacon /* Now properly encode NUMPAGE to subsequently derive SMMU_CB_BASE */
1856fba6e960SWill Deacon smmu->numpage = size;
1857fba6e960SWill Deacon
185845ae7cffSWill Deacon smmu->num_s2_context_banks = FIELD_GET(ARM_SMMU_ID1_NUMS2CB, id);
185945ae7cffSWill Deacon smmu->num_context_banks = FIELD_GET(ARM_SMMU_ID1_NUMCB, id);
186045ae7cffSWill Deacon if (smmu->num_s2_context_banks > smmu->num_context_banks) {
186145ae7cffSWill Deacon dev_err(smmu->dev, "impossible number of S2 context banks!\n");
186245ae7cffSWill Deacon return -ENODEV;
186345ae7cffSWill Deacon }
186490df373cSRobin Murphy dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
186590df373cSRobin Murphy smmu->num_context_banks, smmu->num_s2_context_banks);
186690df373cSRobin Murphy smmu->cbs = devm_kcalloc(smmu->dev, smmu->num_context_banks,
186790df373cSRobin Murphy sizeof(*smmu->cbs), GFP_KERNEL);
186845ae7cffSWill Deacon if (!smmu->cbs)
186945ae7cffSWill Deacon return -ENOMEM;
187000320ce6SRobin Murphy
1871fba6e960SWill Deacon /* ID2 */
1872518f7136SWill Deacon id = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID2);
187345ae7cffSWill Deacon size = arm_smmu_id_size_to_bits(FIELD_GET(ARM_SMMU_ID2_IAS, id));
1874518f7136SWill Deacon smmu->ipa_size = size;
1875fba6e960SWill Deacon
1876518f7136SWill Deacon /* The output mask is also applied for bypass */
187745ae7cffSWill Deacon size = arm_smmu_id_size_to_bits(FIELD_GET(ARM_SMMU_ID2_OAS, id));
1878fba6e960SWill Deacon smmu->pa_size = size;
18794e3e9b69STirumalesh Chalamarla
18804e3e9b69STirumalesh Chalamarla if (id & ARM_SMMU_ID2_VMID16)
1881f1d84548SRobin Murphy smmu->features |= ARM_SMMU_FEAT_VMID16;
1882f1d84548SRobin Murphy
1883f1d84548SRobin Murphy /*
1884f1d84548SRobin Murphy * What the page table walker can address actually depends on which
1885f1d84548SRobin Murphy * descriptor format is in use, but since a) we don't know that yet,
1886f1d84548SRobin Murphy * and b) it can vary per context bank, this will have to do...
1887f1d84548SRobin Murphy */
1888f1d84548SRobin Murphy if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
1889f1d84548SRobin Murphy dev_warn(smmu->dev,
1890b7862e35SRobin Murphy "failed to set DMA mask for table walker\n");
1891518f7136SWill Deacon
1892b7862e35SRobin Murphy if (smmu->version < ARM_SMMU_V2) {
1893b7862e35SRobin Murphy smmu->va_size = smmu->ipa_size;
189445ae7cffSWill Deacon if (smmu->version == ARM_SMMU_V1_64K)
1895fba6e960SWill Deacon smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
1896518f7136SWill Deacon } else {
1897fba6e960SWill Deacon size = FIELD_GET(ARM_SMMU_ID2_UBS, id);
18987602b871SRobin Murphy smmu->va_size = arm_smmu_id_size_to_bits(size);
1899fba6e960SWill Deacon if (id & ARM_SMMU_ID2_PTFS_4K)
19007602b871SRobin Murphy smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
1901fba6e960SWill Deacon if (id & ARM_SMMU_ID2_PTFS_16K)
19027602b871SRobin Murphy smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
1903518f7136SWill Deacon if (id & ARM_SMMU_ID2_PTFS_64K)
190445ae7cffSWill Deacon smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
19056a79a5a3STomasz Nowicki }
19066a79a5a3STomasz Nowicki
19076a79a5a3STomasz Nowicki if (smmu->impl && smmu->impl->cfg_probe) {
19086a79a5a3STomasz Nowicki ret = smmu->impl->cfg_probe(smmu);
19096a79a5a3STomasz Nowicki if (ret)
19106a79a5a3STomasz Nowicki return ret;
19117602b871SRobin Murphy }
19127602b871SRobin Murphy
1913d5466357SRobin Murphy /* Now we've corralled the various formats, what'll it do? */
19147602b871SRobin Murphy if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
19157602b871SRobin Murphy smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
1916d5466357SRobin Murphy if (smmu->features &
19177602b871SRobin Murphy (ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
1918d5466357SRobin Murphy smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
19197602b871SRobin Murphy if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
1920d5466357SRobin Murphy smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
19217602b871SRobin Murphy if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
1922d5466357SRobin Murphy smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
1923d5466357SRobin Murphy
1924d5466357SRobin Murphy dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
1925d5466357SRobin Murphy smmu->pgsize_bitmap);
1926d5466357SRobin Murphy
1927d5466357SRobin Murphy
1928d5466357SRobin Murphy if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
192945ae7cffSWill Deacon dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
193028d6007bSWill Deacon smmu->va_size, smmu->ipa_size);
193128d6007bSWill Deacon
1932518f7136SWill Deacon if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
193328d6007bSWill Deacon dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
193428d6007bSWill Deacon smmu->ipa_size, smmu->pa_size);
193528d6007bSWill Deacon
1936518f7136SWill Deacon return 0;
193728d6007bSWill Deacon }
193845ae7cffSWill Deacon
193945ae7cffSWill Deacon struct arm_smmu_match_data {
194045ae7cffSWill Deacon enum arm_smmu_arch_version version;
194167b65a3fSRobin Murphy enum arm_smmu_implementation model;
194267b65a3fSRobin Murphy };
194367b65a3fSRobin Murphy
194467b65a3fSRobin Murphy #define ARM_SMMU_MATCH_DATA(name, ver, imp) \
194567b65a3fSRobin Murphy static const struct arm_smmu_match_data name = { .version = ver, .model = imp }
194667b65a3fSRobin Murphy
194796a299d2SSricharan R ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU);
194867b65a3fSRobin Murphy ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU);
194967b65a3fSRobin Murphy ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU);
195067b65a3fSRobin Murphy ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500);
1951b7862e35SRobin Murphy ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2);
1952f0cfffc4SRobin Murphy ARM_SMMU_MATCH_DATA(qcom_smmuv2, ARM_SMMU_V2, QCOM_SMMUV2);
1953e086d912SRobin Murphy
195489cddc56SVivek Gautam static const struct of_device_id arm_smmu_of_match[] = {
195567b65a3fSRobin Murphy { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
195609b5269aSJoerg Roedel { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
195767b65a3fSRobin Murphy { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
195867b65a3fSRobin Murphy { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
195967b65a3fSRobin Murphy { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
1960b7862e35SRobin Murphy { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
1961f0cfffc4SRobin Murphy { .compatible = "nvidia,smmu-500", .data = &arm_mmu500 },
1962e086d912SRobin Murphy { .compatible = "qcom,smmu-v2", .data = &qcom_smmuv2 },
1963aab5a1c8SKrishna Reddy { },
196489cddc56SVivek Gautam };
196509360403SRobin Murphy MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
196609360403SRobin Murphy
1967b06c076eSWill Deacon #ifdef CONFIG_ACPI
acpi_smmu_get_data(u32 model,struct arm_smmu_device * smmu)196809360403SRobin Murphy static int acpi_smmu_get_data(u32 model, struct arm_smmu_device *smmu)
1969d6fcd3b1SLorenzo Pieralisi {
1970d6fcd3b1SLorenzo Pieralisi int ret = 0;
1971d6fcd3b1SLorenzo Pieralisi
1972d6fcd3b1SLorenzo Pieralisi switch (model) {
1973d6fcd3b1SLorenzo Pieralisi case ACPI_IORT_SMMU_V1:
1974d6fcd3b1SLorenzo Pieralisi case ACPI_IORT_SMMU_CORELINK_MMU400:
1975d6fcd3b1SLorenzo Pieralisi smmu->version = ARM_SMMU_V1;
1976d6fcd3b1SLorenzo Pieralisi smmu->model = GENERIC_SMMU;
1977d6fcd3b1SLorenzo Pieralisi break;
1978d6fcd3b1SLorenzo Pieralisi case ACPI_IORT_SMMU_CORELINK_MMU401:
1979d6fcd3b1SLorenzo Pieralisi smmu->version = ARM_SMMU_V1_64K;
198084c24379SRobin Murphy smmu->model = GENERIC_SMMU;
198184c24379SRobin Murphy break;
198284c24379SRobin Murphy case ACPI_IORT_SMMU_V2:
198384c24379SRobin Murphy smmu->version = ARM_SMMU_V2;
1984d6fcd3b1SLorenzo Pieralisi smmu->model = GENERIC_SMMU;
1985d6fcd3b1SLorenzo Pieralisi break;
1986d6fcd3b1SLorenzo Pieralisi case ACPI_IORT_SMMU_CORELINK_MMU500:
1987d6fcd3b1SLorenzo Pieralisi smmu->version = ARM_SMMU_V2;
1988d6fcd3b1SLorenzo Pieralisi smmu->model = ARM_MMU500;
1989d6fcd3b1SLorenzo Pieralisi break;
1990d6fcd3b1SLorenzo Pieralisi case ACPI_IORT_SMMU_CAVIUM_THUNDERX:
1991d6fcd3b1SLorenzo Pieralisi smmu->version = ARM_SMMU_V2;
199284c24379SRobin Murphy smmu->model = CAVIUM_SMMUV2;
199384c24379SRobin Murphy break;
199484c24379SRobin Murphy default:
199584c24379SRobin Murphy ret = -ENODEV;
1996d6fcd3b1SLorenzo Pieralisi }
1997d6fcd3b1SLorenzo Pieralisi
1998d6fcd3b1SLorenzo Pieralisi return ret;
1999d6fcd3b1SLorenzo Pieralisi }
2000d6fcd3b1SLorenzo Pieralisi
arm_smmu_device_acpi_probe(struct arm_smmu_device * smmu,u32 * global_irqs,u32 * pmu_irqs)2001d6fcd3b1SLorenzo Pieralisi static int arm_smmu_device_acpi_probe(struct arm_smmu_device *smmu,
2002d6fcd3b1SLorenzo Pieralisi u32 *global_irqs, u32 *pmu_irqs)
200397dfad19SRobin Murphy {
200497dfad19SRobin Murphy struct device *dev = smmu->dev;
2005d6fcd3b1SLorenzo Pieralisi struct acpi_iort_node *node =
2006d6fcd3b1SLorenzo Pieralisi *(struct acpi_iort_node **)dev_get_platdata(dev);
2007d6fcd3b1SLorenzo Pieralisi struct acpi_iort_smmu *iort_smmu;
2008d6fcd3b1SLorenzo Pieralisi int ret;
2009d6fcd3b1SLorenzo Pieralisi
2010d6fcd3b1SLorenzo Pieralisi /* Retrieve SMMU1/2 specific data */
2011d6fcd3b1SLorenzo Pieralisi iort_smmu = (struct acpi_iort_smmu *)node->node_data;
2012d6fcd3b1SLorenzo Pieralisi
2013d6fcd3b1SLorenzo Pieralisi ret = acpi_smmu_get_data(iort_smmu->model, smmu);
2014d6fcd3b1SLorenzo Pieralisi if (ret < 0)
2015d6fcd3b1SLorenzo Pieralisi return ret;
2016d6fcd3b1SLorenzo Pieralisi
2017d6fcd3b1SLorenzo Pieralisi /* Ignore the configuration access interrupt */
2018d6fcd3b1SLorenzo Pieralisi *global_irqs = 1;
2019d6fcd3b1SLorenzo Pieralisi *pmu_irqs = 0;
202097dfad19SRobin Murphy
202197dfad19SRobin Murphy if (iort_smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK)
2022d6fcd3b1SLorenzo Pieralisi smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
2023d6fcd3b1SLorenzo Pieralisi
2024d6fcd3b1SLorenzo Pieralisi return 0;
2025d6fcd3b1SLorenzo Pieralisi }
2026d6fcd3b1SLorenzo Pieralisi #else
arm_smmu_device_acpi_probe(struct arm_smmu_device * smmu,u32 * global_irqs,u32 * pmu_irqs)2027d6fcd3b1SLorenzo Pieralisi static inline int arm_smmu_device_acpi_probe(struct arm_smmu_device *smmu,
2028d6fcd3b1SLorenzo Pieralisi u32 *global_irqs, u32 *pmu_irqs)
202997dfad19SRobin Murphy {
203097dfad19SRobin Murphy return -ENODEV;
2031d6fcd3b1SLorenzo Pieralisi }
2032d6fcd3b1SLorenzo Pieralisi #endif
2033d6fcd3b1SLorenzo Pieralisi
arm_smmu_device_dt_probe(struct arm_smmu_device * smmu,u32 * global_irqs,u32 * pmu_irqs)2034d6fcd3b1SLorenzo Pieralisi static int arm_smmu_device_dt_probe(struct arm_smmu_device *smmu,
2035d6fcd3b1SLorenzo Pieralisi u32 *global_irqs, u32 *pmu_irqs)
203697dfad19SRobin Murphy {
203797dfad19SRobin Murphy const struct arm_smmu_match_data *data;
203845ae7cffSWill Deacon struct device *dev = smmu->dev;
203967b65a3fSRobin Murphy bool legacy_binding;
204097dfad19SRobin Murphy
2041021bb842SRobin Murphy if (of_property_read_u32(dev->of_node, "#global-interrupts", global_irqs))
2042021bb842SRobin Murphy return dev_err_probe(dev, -ENODEV,
204397dfad19SRobin Murphy "missing #global-interrupts property\n");
204497dfad19SRobin Murphy *pmu_irqs = 0;
204597dfad19SRobin Murphy
204697dfad19SRobin Murphy data = of_device_get_match_data(dev);
2047bbb8a184SLorenzo Pieralisi smmu->version = data->version;
2048bbb8a184SLorenzo Pieralisi smmu->model = data->model;
2049bbb8a184SLorenzo Pieralisi
2050bbb8a184SLorenzo Pieralisi legacy_binding = of_find_property(dev->of_node, "mmu-masters", NULL);
2051bbb8a184SLorenzo Pieralisi if (legacy_binding && !using_generic_binding) {
2052021bb842SRobin Murphy if (!using_legacy_binding) {
2053021bb842SRobin Murphy pr_notice("deprecated \"mmu-masters\" DT property in use; %s support unavailable\n",
2054cd221bd2SWill Deacon IS_ENABLED(CONFIG_ARM_SMMU_LEGACY_DT_BINDINGS) ? "DMA API" : "SMMU");
2055cd221bd2SWill Deacon }
2056cd221bd2SWill Deacon using_legacy_binding = true;
2057cd221bd2SWill Deacon } else if (!legacy_binding && !using_legacy_binding) {
2058021bb842SRobin Murphy using_generic_binding = true;
2059021bb842SRobin Murphy } else {
2060021bb842SRobin Murphy dev_err(dev, "not probing due to mismatched DT properties\n");
2061021bb842SRobin Murphy return -ENODEV;
2062021bb842SRobin Murphy }
2063021bb842SRobin Murphy
2064021bb842SRobin Murphy if (of_dma_is_coherent(dev->of_node))
206545ae7cffSWill Deacon smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
2066bbb8a184SLorenzo Pieralisi
2067bbb8a184SLorenzo Pieralisi return 0;
2068bbb8a184SLorenzo Pieralisi }
2069bbb8a184SLorenzo Pieralisi
arm_smmu_rmr_install_bypass_smr(struct arm_smmu_device * smmu)2070bbb8a184SLorenzo Pieralisi static void arm_smmu_rmr_install_bypass_smr(struct arm_smmu_device *smmu)
2071bbb8a184SLorenzo Pieralisi {
20720bec0557SJon Nettleton struct list_head rmr_list;
20730bec0557SJon Nettleton struct iommu_resv_region *e;
20740bec0557SJon Nettleton int idx, cnt = 0;
20750bec0557SJon Nettleton u32 reg;
20760bec0557SJon Nettleton
20770bec0557SJon Nettleton INIT_LIST_HEAD(&rmr_list);
20780bec0557SJon Nettleton iort_get_rmr_sids(dev_fwnode(smmu->dev), &rmr_list);
20790bec0557SJon Nettleton
20800bec0557SJon Nettleton /*
20810bec0557SJon Nettleton * Rather than trying to look at existing mappings that
20820bec0557SJon Nettleton * are setup by the firmware and then invalidate the ones
20830bec0557SJon Nettleton * that do no have matching RMR entries, just disable the
20840bec0557SJon Nettleton * SMMU until it gets enabled again in the reset routine.
20850bec0557SJon Nettleton */
20860bec0557SJon Nettleton reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sCR0);
20870bec0557SJon Nettleton reg |= ARM_SMMU_sCR0_CLIENTPD;
20880bec0557SJon Nettleton arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sCR0, reg);
20890bec0557SJon Nettleton
20900bec0557SJon Nettleton list_for_each_entry(e, &rmr_list, list) {
20910bec0557SJon Nettleton struct iommu_iort_rmr_data *rmr;
20920bec0557SJon Nettleton int i;
20930bec0557SJon Nettleton
20940bec0557SJon Nettleton rmr = container_of(e, struct iommu_iort_rmr_data, rr);
20950bec0557SJon Nettleton for (i = 0; i < rmr->num_sids; i++) {
20960bec0557SJon Nettleton idx = arm_smmu_find_sme(smmu, rmr->sids[i], ~0);
20970bec0557SJon Nettleton if (idx < 0)
20980bec0557SJon Nettleton continue;
20990bec0557SJon Nettleton
21000bec0557SJon Nettleton if (smmu->s2crs[idx].count == 0) {
21010bec0557SJon Nettleton smmu->smrs[idx].id = rmr->sids[i];
21020bec0557SJon Nettleton smmu->smrs[idx].mask = 0;
21030bec0557SJon Nettleton smmu->smrs[idx].valid = true;
21040bec0557SJon Nettleton }
21050bec0557SJon Nettleton smmu->s2crs[idx].count++;
21060bec0557SJon Nettleton smmu->s2crs[idx].type = S2CR_TYPE_BYPASS;
21070bec0557SJon Nettleton smmu->s2crs[idx].privcfg = S2CR_PRIVCFG_DEFAULT;
21080bec0557SJon Nettleton
21090bec0557SJon Nettleton cnt++;
21100bec0557SJon Nettleton }
21110bec0557SJon Nettleton }
21120bec0557SJon Nettleton
21130bec0557SJon Nettleton dev_notice(smmu->dev, "\tpreserved %d boot mapping%s\n", cnt,
21140bec0557SJon Nettleton str_plural(cnt));
21150bec0557SJon Nettleton iort_put_rmr_sids(dev_fwnode(smmu->dev), &rmr_list);
211654e7d900SKrzysztof Kozlowski }
21170bec0557SJon Nettleton
arm_smmu_device_probe(struct platform_device * pdev)21180bec0557SJon Nettleton static int arm_smmu_device_probe(struct platform_device *pdev)
21190bec0557SJon Nettleton {
2120bbb8a184SLorenzo Pieralisi struct resource *res;
2121bbb8a184SLorenzo Pieralisi struct arm_smmu_device *smmu;
2122bbb8a184SLorenzo Pieralisi struct device *dev = &pdev->dev;
2123bbb8a184SLorenzo Pieralisi int num_irqs, i, err;
2124bbb8a184SLorenzo Pieralisi u32 global_irqs, pmu_irqs;
2125bbb8a184SLorenzo Pieralisi irqreturn_t (*global_fault)(int irq, void *dev);
212697dfad19SRobin Murphy
2127aa7ec732SKrishna Reddy smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
2128bbb8a184SLorenzo Pieralisi if (!smmu) {
212945ae7cffSWill Deacon dev_err(dev, "failed to allocate arm_smmu_device\n");
213045ae7cffSWill Deacon return -ENOMEM;
213145ae7cffSWill Deacon }
213245ae7cffSWill Deacon smmu->dev = dev;
213345ae7cffSWill Deacon
213445ae7cffSWill Deacon if (dev->of_node)
213545ae7cffSWill Deacon err = arm_smmu_device_dt_probe(smmu, &global_irqs, &pmu_irqs);
2136d6fcd3b1SLorenzo Pieralisi else
213797dfad19SRobin Murphy err = arm_smmu_device_acpi_probe(smmu, &global_irqs, &pmu_irqs);
2138d6fcd3b1SLorenzo Pieralisi if (err)
213997dfad19SRobin Murphy return err;
2140bbb8a184SLorenzo Pieralisi
2141bbb8a184SLorenzo Pieralisi smmu->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
214209360403SRobin Murphy if (IS_ERR(smmu->base))
2143d9ed8af1SYang Yingliang return PTR_ERR(smmu->base);
21448a7f4312SJulia Lawall smmu->ioaddr = res->start;
21458a7f4312SJulia Lawall
2146b9b721d1SSai Prakash Ranjan /*
2147b9b721d1SSai Prakash Ranjan * The resource size should effectively match the value of SMMU_TOP;
2148490325e0SRobin Murphy * stash that temporarily until we know PAGESIZE to validate it with.
2149490325e0SRobin Murphy */
2150490325e0SRobin Murphy smmu->numpage = resource_size(res);
2151490325e0SRobin Murphy
2152490325e0SRobin Murphy smmu = arm_smmu_impl_init(smmu);
215345ae7cffSWill Deacon if (IS_ERR(smmu))
21546c019f4eSKrishna Reddy return PTR_ERR(smmu);
21556c019f4eSKrishna Reddy
21566c019f4eSKrishna Reddy num_irqs = platform_irq_count(pdev);
21576c019f4eSKrishna Reddy
215897dfad19SRobin Murphy smmu->num_context_irqs = num_irqs - global_irqs - pmu_irqs;
215945ae7cffSWill Deacon if (smmu->num_context_irqs <= 0)
216097dfad19SRobin Murphy return dev_err_probe(dev, -ENODEV,
216197dfad19SRobin Murphy "found %d interrupts but expected at least %d\n",
216297dfad19SRobin Murphy num_irqs, global_irqs + pmu_irqs + 1);
216397dfad19SRobin Murphy
216497dfad19SRobin Murphy smmu->irqs = devm_kcalloc(dev, smmu->num_context_irqs,
216545ae7cffSWill Deacon sizeof(*smmu->irqs), GFP_KERNEL);
216697dfad19SRobin Murphy if (!smmu->irqs)
216797dfad19SRobin Murphy return dev_err_probe(dev, -ENOMEM, "failed to allocate %d irqs\n",
216897dfad19SRobin Murphy smmu->num_context_irqs);
216997dfad19SRobin Murphy
217097dfad19SRobin Murphy for (i = 0; i < smmu->num_context_irqs; i++) {
217145ae7cffSWill Deacon int irq = platform_get_irq(pdev, global_irqs + pmu_irqs + i);
217297dfad19SRobin Murphy
217397dfad19SRobin Murphy if (irq < 0)
21742907320dSMitchel Humpherys return irq;
217534d1b089SJean-Philippe Brucker smmu->irqs[i] = irq;
217697dfad19SRobin Murphy }
217745ae7cffSWill Deacon
217845ae7cffSWill Deacon err = devm_clk_bulk_get_all(dev, &smmu->clks);
217945ae7cffSWill Deacon if (err < 0) {
218096a299d2SSricharan R dev_err(dev, "failed to get clocks %d\n", err);
218196a299d2SSricharan R return err;
218296a299d2SSricharan R }
218396a299d2SSricharan R smmu->num_clks = err;
218496a299d2SSricharan R
218596a299d2SSricharan R err = clk_bulk_prepare_enable(smmu->num_clks, smmu->clks);
218696a299d2SSricharan R if (err)
218796a299d2SSricharan R return err;
218896a299d2SSricharan R
218996a299d2SSricharan R err = arm_smmu_device_cfg_probe(smmu);
219096a299d2SSricharan R if (err)
21913c8766d0SOlav Haugan return err;
21923c8766d0SOlav Haugan
21933c8766d0SOlav Haugan if (smmu->version == ARM_SMMU_V2) {
21943c8766d0SOlav Haugan if (smmu->num_context_banks > smmu->num_context_irqs) {
2195d1e20222SVivek Gautam dev_err(dev,
2196d1e20222SVivek Gautam "found only %d context irq(s) but %d required\n",
219745ae7cffSWill Deacon smmu->num_context_irqs, smmu->num_context_banks);
2198d1e20222SVivek Gautam return -ENODEV;
219945ae7cffSWill Deacon }
2200f80cd885SRobin Murphy
220145ae7cffSWill Deacon /* Ignore superfluous interrupts */
220245ae7cffSWill Deacon smmu->num_context_irqs = smmu->num_context_banks;
2203d1e20222SVivek Gautam }
2204d1e20222SVivek Gautam
2205d1e20222SVivek Gautam if (smmu->impl && smmu->impl->global_fault)
2206d1e20222SVivek Gautam global_fault = smmu->impl->global_fault;
2207aa7ec732SKrishna Reddy else
2208aa7ec732SKrishna Reddy global_fault = arm_smmu_global_fault;
2209aa7ec732SKrishna Reddy
2210aa7ec732SKrishna Reddy for (i = 0; i < global_irqs; i++) {
2211aa7ec732SKrishna Reddy int irq = platform_get_irq(pdev, i);
221297dfad19SRobin Murphy
221397dfad19SRobin Murphy if (irq < 0)
221497dfad19SRobin Murphy return irq;
221597dfad19SRobin Murphy
221697dfad19SRobin Murphy err = devm_request_irq(dev, irq, global_fault, IRQF_SHARED,
221797dfad19SRobin Murphy "arm-smmu global fault", smmu);
221897dfad19SRobin Murphy if (err)
221997dfad19SRobin Murphy return dev_err_probe(dev, err,
222097dfad19SRobin Murphy "failed to request global IRQ %d (%u)\n",
222197dfad19SRobin Murphy i, irq);
222297dfad19SRobin Murphy }
222397dfad19SRobin Murphy
222445ae7cffSWill Deacon platform_set_drvdata(pdev, smmu);
222545ae7cffSWill Deacon
2226d6fc5d97SRobin Murphy /* Check for RMRs and install bypass SMRs if any */
22270bec0557SJon Nettleton arm_smmu_rmr_install_bypass_smr(smmu);
22280bec0557SJon Nettleton
22290bec0557SJon Nettleton arm_smmu_device_reset(smmu);
22300bec0557SJon Nettleton arm_smmu_test_smr_masks(smmu);
2231fd90cecbSWill Deacon
2232dc0eaa4eSAleksey Makarov err = iommu_device_sysfs_add(&smmu->iommu, smmu->dev, NULL,
2233021bb842SRobin Murphy "smmu.%pa", &smmu->ioaddr);
22347d835134SRobin Murphy if (err)
22357d835134SRobin Murphy return dev_err_probe(dev, err, "Failed to register iommu in sysfs\n");
22367d835134SRobin Murphy
22377d835134SRobin Murphy err = iommu_device_register(&smmu->iommu, &arm_smmu_ops,
22387d835134SRobin Murphy using_legacy_binding ? NULL : dev);
22397d835134SRobin Murphy if (err) {
22407d835134SRobin Murphy iommu_device_sysfs_remove(&smmu->iommu);
22417d835134SRobin Murphy return dev_err_probe(dev, err, "Failed to register iommu\n");
22427d835134SRobin Murphy }
22437d835134SRobin Murphy
22447d835134SRobin Murphy /*
22457d835134SRobin Murphy * We want to avoid touching dev->power.lock in fastpaths unless
2246f6810c15SRobin Murphy * it's really going to do something useful - pm_runtime_enabled()
2247d4a44f07SSricharan R * can serve as an ideal proxy for that decision. So, conditionally
2248d4a44f07SSricharan R * enable pm_runtime.
2249d4a44f07SSricharan R */
2250d4a44f07SSricharan R if (dev->pm_domain) {
2251d4a44f07SSricharan R pm_runtime_set_active(dev);
2252d4a44f07SSricharan R pm_runtime_enable(dev);
2253d4a44f07SSricharan R arm_smmu_rpm_use_autosuspend(smmu);
2254d4a44f07SSricharan R }
22550a679336SPranjal Shrivastava
2256d4a44f07SSricharan R return 0;
2257d4a44f07SSricharan R }
225845ae7cffSWill Deacon
arm_smmu_device_shutdown(struct platform_device * pdev)225945ae7cffSWill Deacon static void arm_smmu_device_shutdown(struct platform_device *pdev)
226045ae7cffSWill Deacon {
2261ce31e6caSVladimir Oltean struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
226245ae7cffSWill Deacon
2263d6fc5d97SRobin Murphy if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
226445ae7cffSWill Deacon dev_notice(&pdev->dev, "disabling translation\n");
2265ecfadb6eSWill Deacon
226602782f3dSSai Prakash Ranjan arm_smmu_rpm_get(smmu);
226745ae7cffSWill Deacon /* Turn the thing off */
2268d4a44f07SSricharan R arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sCR0, ARM_SMMU_sCR0_CLIENTPD);
226945ae7cffSWill Deacon arm_smmu_rpm_put(smmu);
2270fba6e960SWill Deacon
2271d4a44f07SSricharan R if (pm_runtime_enabled(smmu->dev))
227296a299d2SSricharan R pm_runtime_force_suspend(smmu->dev);
2273d4a44f07SSricharan R else
2274d4a44f07SSricharan R clk_bulk_disable(smmu->num_clks, smmu->clks);
2275d4a44f07SSricharan R
2276d4a44f07SSricharan R clk_bulk_unprepare(smmu->num_clks, smmu->clks);
2277d4a44f07SSricharan R }
2278d4a44f07SSricharan R
arm_smmu_device_remove(struct platform_device * pdev)2279b06c076eSWill Deacon static void arm_smmu_device_remove(struct platform_device *pdev)
2280b06c076eSWill Deacon {
228162565a77SUwe Kleine-König struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
2282b06c076eSWill Deacon
2283ce31e6caSVladimir Oltean iommu_device_unregister(&smmu->iommu);
2284ce31e6caSVladimir Oltean iommu_device_sysfs_remove(&smmu->iommu);
2285ce31e6caSVladimir Oltean
2286ce31e6caSVladimir Oltean arm_smmu_device_shutdown(pdev);
2287ce31e6caSVladimir Oltean }
2288ce31e6caSVladimir Oltean
arm_smmu_runtime_resume(struct device * dev)22897aa8619aSNate Watterson static int __maybe_unused arm_smmu_runtime_resume(struct device *dev)
22907aa8619aSNate Watterson {
229196a299d2SSricharan R struct arm_smmu_device *smmu = dev_get_drvdata(dev);
2292a2d866f7SRobin Murphy int ret;
2293a2d866f7SRobin Murphy
229496a299d2SSricharan R ret = clk_bulk_enable(smmu->num_clks, smmu->clks);
229596a299d2SSricharan R if (ret)
229696a299d2SSricharan R return ret;
229796a299d2SSricharan R
229896a299d2SSricharan R arm_smmu_device_reset(smmu);
2299a2d866f7SRobin Murphy
2300a2d866f7SRobin Murphy return 0;
230196a299d2SSricharan R }
230245ae7cffSWill Deacon
arm_smmu_runtime_suspend(struct device * dev)230345ae7cffSWill Deacon static int __maybe_unused arm_smmu_runtime_suspend(struct device *dev)
230445ae7cffSWill Deacon {
230596a299d2SSricharan R struct arm_smmu_device *smmu = dev_get_drvdata(dev);
23066614ee77SDan Carpenter
230796a299d2SSricharan R clk_bulk_disable(smmu->num_clks, smmu->clks);
230896a299d2SSricharan R
230996a299d2SSricharan R return 0;
231096a299d2SSricharan R }
231196a299d2SSricharan R
arm_smmu_pm_resume(struct device * dev)231245ae7cffSWill Deacon static int __maybe_unused arm_smmu_pm_resume(struct device *dev)
231345ae7cffSWill Deacon {
2314a2d866f7SRobin Murphy int ret;
2315a2d866f7SRobin Murphy struct arm_smmu_device *smmu = dev_get_drvdata(dev);
2316afefe67eSSai Prakash Ranjan
2317afefe67eSSai Prakash Ranjan ret = clk_bulk_prepare(smmu->num_clks, smmu->clks);
2318afefe67eSSai Prakash Ranjan if (ret)
2319afefe67eSSai Prakash Ranjan return ret;
2320afefe67eSSai Prakash Ranjan
2321afefe67eSSai Prakash Ranjan if (pm_runtime_suspended(dev))
2322afefe67eSSai Prakash Ranjan return 0;
232396a299d2SSricharan R
2324a2d866f7SRobin Murphy ret = arm_smmu_runtime_resume(dev);
232596a299d2SSricharan R if (ret)
2326afefe67eSSai Prakash Ranjan clk_bulk_unprepare(smmu->num_clks, smmu->clks);
2327afefe67eSSai Prakash Ranjan
2328afefe67eSSai Prakash Ranjan return ret;
2329afefe67eSSai Prakash Ranjan }
2330afefe67eSSai Prakash Ranjan
arm_smmu_pm_suspend(struct device * dev)2331a2d866f7SRobin Murphy static int __maybe_unused arm_smmu_pm_suspend(struct device *dev)
2332a2d866f7SRobin Murphy {
233396a299d2SSricharan R int ret = 0;
233496a299d2SSricharan R struct arm_smmu_device *smmu = dev_get_drvdata(dev);
2335afefe67eSSai Prakash Ranjan
2336afefe67eSSai Prakash Ranjan if (pm_runtime_suspended(dev))
233796a299d2SSricharan R goto clk_unprepare;
2338afefe67eSSai Prakash Ranjan
2339afefe67eSSai Prakash Ranjan ret = arm_smmu_runtime_suspend(dev);
2340afefe67eSSai Prakash Ranjan if (ret)
2341afefe67eSSai Prakash Ranjan return ret;
2342afefe67eSSai Prakash Ranjan
2343afefe67eSSai Prakash Ranjan clk_unprepare:
2344afefe67eSSai Prakash Ranjan clk_bulk_unprepare(smmu->num_clks, smmu->clks);
2345afefe67eSSai Prakash Ranjan return ret;
2346afefe67eSSai Prakash Ranjan }
2347afefe67eSSai Prakash Ranjan
234896a299d2SSricharan R static const struct dev_pm_ops arm_smmu_pm_ops = {
234996a299d2SSricharan R SET_SYSTEM_SLEEP_PM_OPS(arm_smmu_pm_suspend, arm_smmu_pm_resume)
235096a299d2SSricharan R SET_RUNTIME_PM_OPS(arm_smmu_runtime_suspend,
235196a299d2SSricharan R arm_smmu_runtime_resume, NULL)
235296a299d2SSricharan R };
235396a299d2SSricharan R
235496a299d2SSricharan R static struct platform_driver arm_smmu_driver = {
2355a2d866f7SRobin Murphy .driver = {
235645ae7cffSWill Deacon .name = "arm-smmu",
235745ae7cffSWill Deacon .of_match_table = arm_smmu_of_match,
235845ae7cffSWill Deacon .pm = &arm_smmu_pm_ops,
2359cd037ff2SMasahiro Yamada .suppress_bind_attrs = true,
2360a2d866f7SRobin Murphy },
2361addb672fSPaul Gortmaker .probe = arm_smmu_device_probe,
236245ae7cffSWill Deacon .remove = arm_smmu_device_remove,
2363bbb8a184SLorenzo Pieralisi .shutdown = arm_smmu_device_shutdown,
2364e70140baSLinus Torvalds };
23657aa8619aSNate Watterson module_platform_driver(arm_smmu_driver);
236645ae7cffSWill Deacon
2367b06c076eSWill Deacon MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
2368b06c076eSWill Deacon MODULE_AUTHOR("Will Deacon <will@kernel.org>");
2369b06c076eSWill Deacon MODULE_ALIAS("platform:arm-smmu");
23701ea27ee2SWill Deacon MODULE_LICENSE("GPL v2");
2371d3daf666SArd Biesheuvel