16912ec91SNicolin Chen // SPDX-License-Identifier: GPL-2.0
26912ec91SNicolin Chen /*
36912ec91SNicolin Chen * Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES
46912ec91SNicolin Chen */
56912ec91SNicolin Chen
66912ec91SNicolin Chen #include <uapi/linux/iommufd.h>
76912ec91SNicolin Chen
86912ec91SNicolin Chen #include "arm-smmu-v3.h"
96912ec91SNicolin Chen
arm_smmu_hw_info(struct device * dev,u32 * length,enum iommu_hw_info_type * type)104b57c057SNicolin Chen void *arm_smmu_hw_info(struct device *dev, u32 *length,
114b57c057SNicolin Chen enum iommu_hw_info_type *type)
126912ec91SNicolin Chen {
136912ec91SNicolin Chen struct arm_smmu_master *master = dev_iommu_priv_get(dev);
149eb6a666SNicolin Chen const struct arm_smmu_impl_ops *impl_ops = master->smmu->impl_ops;
156912ec91SNicolin Chen struct iommu_hw_info_arm_smmuv3 *info;
166912ec91SNicolin Chen u32 __iomem *base_idr;
176912ec91SNicolin Chen unsigned int i;
186912ec91SNicolin Chen
1962622a87SNicolin Chen if (*type != IOMMU_HW_INFO_TYPE_DEFAULT &&
209eb6a666SNicolin Chen *type != IOMMU_HW_INFO_TYPE_ARM_SMMUV3) {
219eb6a666SNicolin Chen if (!impl_ops || !impl_ops->hw_info)
2262622a87SNicolin Chen return ERR_PTR(-EOPNOTSUPP);
239eb6a666SNicolin Chen return impl_ops->hw_info(master->smmu, length, type);
249eb6a666SNicolin Chen }
2562622a87SNicolin Chen
266912ec91SNicolin Chen info = kzalloc(sizeof(*info), GFP_KERNEL);
276912ec91SNicolin Chen if (!info)
286912ec91SNicolin Chen return ERR_PTR(-ENOMEM);
296912ec91SNicolin Chen
306912ec91SNicolin Chen base_idr = master->smmu->base + ARM_SMMU_IDR0;
316912ec91SNicolin Chen for (i = 0; i <= 5; i++)
326912ec91SNicolin Chen info->idr[i] = readl_relaxed(base_idr + i);
336912ec91SNicolin Chen info->iidr = readl_relaxed(master->smmu->base + ARM_SMMU_IIDR);
346912ec91SNicolin Chen info->aidr = readl_relaxed(master->smmu->base + ARM_SMMU_AIDR);
356912ec91SNicolin Chen
366912ec91SNicolin Chen *length = sizeof(*info);
376912ec91SNicolin Chen *type = IOMMU_HW_INFO_TYPE_ARM_SMMUV3;
386912ec91SNicolin Chen
396912ec91SNicolin Chen return info;
406912ec91SNicolin Chen }
4169d9b312SNicolin Chen
arm_smmu_make_nested_cd_table_ste(struct arm_smmu_ste * target,struct arm_smmu_master * master,struct arm_smmu_nested_domain * nested_domain,bool ats_enabled)421e8be08dSJason Gunthorpe static void arm_smmu_make_nested_cd_table_ste(
431e8be08dSJason Gunthorpe struct arm_smmu_ste *target, struct arm_smmu_master *master,
441e8be08dSJason Gunthorpe struct arm_smmu_nested_domain *nested_domain, bool ats_enabled)
451e8be08dSJason Gunthorpe {
461e8be08dSJason Gunthorpe arm_smmu_make_s2_domain_ste(
471e8be08dSJason Gunthorpe target, master, nested_domain->vsmmu->s2_parent, ats_enabled);
481e8be08dSJason Gunthorpe
491e8be08dSJason Gunthorpe target->data[0] = cpu_to_le64(STRTAB_STE_0_V |
501e8be08dSJason Gunthorpe FIELD_PREP(STRTAB_STE_0_CFG,
511e8be08dSJason Gunthorpe STRTAB_STE_0_CFG_NESTED));
521e8be08dSJason Gunthorpe target->data[0] |= nested_domain->ste[0] &
531e8be08dSJason Gunthorpe ~cpu_to_le64(STRTAB_STE_0_CFG);
541e8be08dSJason Gunthorpe target->data[1] |= nested_domain->ste[1];
55da0c5652SNicolin Chen /* Merge events for DoS mitigations on eventq */
56da0c5652SNicolin Chen target->data[1] |= cpu_to_le64(STRTAB_STE_1_MEV);
571e8be08dSJason Gunthorpe }
581e8be08dSJason Gunthorpe
591e8be08dSJason Gunthorpe /*
601e8be08dSJason Gunthorpe * Create a physical STE from the virtual STE that userspace provided when it
611e8be08dSJason Gunthorpe * created the nested domain. Using the vSTE userspace can request:
621e8be08dSJason Gunthorpe * - Non-valid STE
631e8be08dSJason Gunthorpe * - Abort STE
641e8be08dSJason Gunthorpe * - Bypass STE (install the S2, no CD table)
651e8be08dSJason Gunthorpe * - CD table STE (install the S2 and the userspace CD table)
661e8be08dSJason Gunthorpe */
arm_smmu_make_nested_domain_ste(struct arm_smmu_ste * target,struct arm_smmu_master * master,struct arm_smmu_nested_domain * nested_domain,bool ats_enabled)671e8be08dSJason Gunthorpe static void arm_smmu_make_nested_domain_ste(
681e8be08dSJason Gunthorpe struct arm_smmu_ste *target, struct arm_smmu_master *master,
691e8be08dSJason Gunthorpe struct arm_smmu_nested_domain *nested_domain, bool ats_enabled)
701e8be08dSJason Gunthorpe {
711e8be08dSJason Gunthorpe unsigned int cfg =
721e8be08dSJason Gunthorpe FIELD_GET(STRTAB_STE_0_CFG, le64_to_cpu(nested_domain->ste[0]));
731e8be08dSJason Gunthorpe
741e8be08dSJason Gunthorpe /*
751e8be08dSJason Gunthorpe * Userspace can request a non-valid STE through the nesting interface.
761e8be08dSJason Gunthorpe * We relay that into an abort physical STE with the intention that
771e8be08dSJason Gunthorpe * C_BAD_STE for this SID can be generated to userspace.
781e8be08dSJason Gunthorpe */
791e8be08dSJason Gunthorpe if (!(nested_domain->ste[0] & cpu_to_le64(STRTAB_STE_0_V)))
801e8be08dSJason Gunthorpe cfg = STRTAB_STE_0_CFG_ABORT;
811e8be08dSJason Gunthorpe
821e8be08dSJason Gunthorpe switch (cfg) {
831e8be08dSJason Gunthorpe case STRTAB_STE_0_CFG_S1_TRANS:
841e8be08dSJason Gunthorpe arm_smmu_make_nested_cd_table_ste(target, master, nested_domain,
851e8be08dSJason Gunthorpe ats_enabled);
861e8be08dSJason Gunthorpe break;
871e8be08dSJason Gunthorpe case STRTAB_STE_0_CFG_BYPASS:
881e8be08dSJason Gunthorpe arm_smmu_make_s2_domain_ste(target, master,
891e8be08dSJason Gunthorpe nested_domain->vsmmu->s2_parent,
901e8be08dSJason Gunthorpe ats_enabled);
911e8be08dSJason Gunthorpe break;
921e8be08dSJason Gunthorpe case STRTAB_STE_0_CFG_ABORT:
931e8be08dSJason Gunthorpe default:
941e8be08dSJason Gunthorpe arm_smmu_make_abort_ste(target);
951e8be08dSJason Gunthorpe break;
961e8be08dSJason Gunthorpe }
971e8be08dSJason Gunthorpe }
981e8be08dSJason Gunthorpe
arm_smmu_attach_prepare_vmaster(struct arm_smmu_attach_state * state,struct arm_smmu_nested_domain * nested_domain)99f0ea207eSNicolin Chen int arm_smmu_attach_prepare_vmaster(struct arm_smmu_attach_state *state,
100f0ea207eSNicolin Chen struct arm_smmu_nested_domain *nested_domain)
101f0ea207eSNicolin Chen {
102f0ea207eSNicolin Chen struct arm_smmu_vmaster *vmaster;
103f0ea207eSNicolin Chen unsigned long vsid;
104f0ea207eSNicolin Chen int ret;
105f0ea207eSNicolin Chen
106f0ea207eSNicolin Chen iommu_group_mutex_assert(state->master->dev);
107f0ea207eSNicolin Chen
108f0ea207eSNicolin Chen ret = iommufd_viommu_get_vdev_id(&nested_domain->vsmmu->core,
109f0ea207eSNicolin Chen state->master->dev, &vsid);
110f0ea207eSNicolin Chen if (ret)
111f0ea207eSNicolin Chen return ret;
112f0ea207eSNicolin Chen
113f0ea207eSNicolin Chen vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL);
114f0ea207eSNicolin Chen if (!vmaster)
115f0ea207eSNicolin Chen return -ENOMEM;
116f0ea207eSNicolin Chen vmaster->vsmmu = nested_domain->vsmmu;
117f0ea207eSNicolin Chen vmaster->vsid = vsid;
118f0ea207eSNicolin Chen state->vmaster = vmaster;
119f0ea207eSNicolin Chen
120f0ea207eSNicolin Chen return 0;
121f0ea207eSNicolin Chen }
122f0ea207eSNicolin Chen
arm_smmu_attach_commit_vmaster(struct arm_smmu_attach_state * state)123f0ea207eSNicolin Chen void arm_smmu_attach_commit_vmaster(struct arm_smmu_attach_state *state)
124f0ea207eSNicolin Chen {
125f0ea207eSNicolin Chen struct arm_smmu_master *master = state->master;
126f0ea207eSNicolin Chen
127f0ea207eSNicolin Chen mutex_lock(&master->smmu->streams_mutex);
128f0ea207eSNicolin Chen kfree(master->vmaster);
129f0ea207eSNicolin Chen master->vmaster = state->vmaster;
130f0ea207eSNicolin Chen mutex_unlock(&master->smmu->streams_mutex);
131f0ea207eSNicolin Chen }
132f0ea207eSNicolin Chen
arm_smmu_master_clear_vmaster(struct arm_smmu_master * master)133f0ea207eSNicolin Chen void arm_smmu_master_clear_vmaster(struct arm_smmu_master *master)
134f0ea207eSNicolin Chen {
135f0ea207eSNicolin Chen struct arm_smmu_attach_state state = { .master = master };
136f0ea207eSNicolin Chen
137f0ea207eSNicolin Chen arm_smmu_attach_commit_vmaster(&state);
138f0ea207eSNicolin Chen }
139f0ea207eSNicolin Chen
arm_smmu_attach_dev_nested(struct iommu_domain * domain,struct device * dev)1401e8be08dSJason Gunthorpe static int arm_smmu_attach_dev_nested(struct iommu_domain *domain,
1411e8be08dSJason Gunthorpe struct device *dev)
1421e8be08dSJason Gunthorpe {
1431e8be08dSJason Gunthorpe struct arm_smmu_nested_domain *nested_domain =
1441e8be08dSJason Gunthorpe to_smmu_nested_domain(domain);
1451e8be08dSJason Gunthorpe struct arm_smmu_master *master = dev_iommu_priv_get(dev);
1461e8be08dSJason Gunthorpe struct arm_smmu_attach_state state = {
1471e8be08dSJason Gunthorpe .master = master,
1481e8be08dSJason Gunthorpe .old_domain = iommu_get_domain_for_dev(dev),
1491e8be08dSJason Gunthorpe .ssid = IOMMU_NO_PASID,
1501e8be08dSJason Gunthorpe };
1511e8be08dSJason Gunthorpe struct arm_smmu_ste ste;
1521e8be08dSJason Gunthorpe int ret;
1531e8be08dSJason Gunthorpe
1541e8be08dSJason Gunthorpe if (nested_domain->vsmmu->smmu != master->smmu)
1551e8be08dSJason Gunthorpe return -EINVAL;
1561e8be08dSJason Gunthorpe if (arm_smmu_ssids_in_use(&master->cd_table))
1571e8be08dSJason Gunthorpe return -EBUSY;
1581e8be08dSJason Gunthorpe
1591e8be08dSJason Gunthorpe mutex_lock(&arm_smmu_asid_lock);
160f27298a8SJason Gunthorpe /*
161f27298a8SJason Gunthorpe * The VM has to control the actual ATS state at the PCI device because
162f27298a8SJason Gunthorpe * we forward the invalidations directly from the VM. If the VM doesn't
163f27298a8SJason Gunthorpe * think ATS is on it will not generate ATC flushes and the ATC will
164f27298a8SJason Gunthorpe * become incoherent. Since we can't access the actual virtual PCI ATS
165f27298a8SJason Gunthorpe * config bit here base this off the EATS value in the STE. If the EATS
166f27298a8SJason Gunthorpe * is set then the VM must generate ATC flushes.
167f27298a8SJason Gunthorpe */
168f27298a8SJason Gunthorpe state.disable_ats = !nested_domain->enable_ats;
1691e8be08dSJason Gunthorpe ret = arm_smmu_attach_prepare(&state, domain);
1701e8be08dSJason Gunthorpe if (ret) {
1711e8be08dSJason Gunthorpe mutex_unlock(&arm_smmu_asid_lock);
1721e8be08dSJason Gunthorpe return ret;
1731e8be08dSJason Gunthorpe }
1741e8be08dSJason Gunthorpe
1751e8be08dSJason Gunthorpe arm_smmu_make_nested_domain_ste(&ste, master, nested_domain,
1761e8be08dSJason Gunthorpe state.ats_enabled);
1771e8be08dSJason Gunthorpe arm_smmu_install_ste_for_dev(master, &ste);
1781e8be08dSJason Gunthorpe arm_smmu_attach_commit(&state);
1791e8be08dSJason Gunthorpe mutex_unlock(&arm_smmu_asid_lock);
1801e8be08dSJason Gunthorpe return 0;
1811e8be08dSJason Gunthorpe }
1821e8be08dSJason Gunthorpe
arm_smmu_domain_nested_free(struct iommu_domain * domain)1831e8be08dSJason Gunthorpe static void arm_smmu_domain_nested_free(struct iommu_domain *domain)
1841e8be08dSJason Gunthorpe {
1851e8be08dSJason Gunthorpe kfree(to_smmu_nested_domain(domain));
1861e8be08dSJason Gunthorpe }
1871e8be08dSJason Gunthorpe
1881e8be08dSJason Gunthorpe static const struct iommu_domain_ops arm_smmu_nested_ops = {
1891e8be08dSJason Gunthorpe .attach_dev = arm_smmu_attach_dev_nested,
1901e8be08dSJason Gunthorpe .free = arm_smmu_domain_nested_free,
1911e8be08dSJason Gunthorpe };
1921e8be08dSJason Gunthorpe
arm_smmu_validate_vste(struct iommu_hwpt_arm_smmuv3 * arg,bool * enable_ats)193f27298a8SJason Gunthorpe static int arm_smmu_validate_vste(struct iommu_hwpt_arm_smmuv3 *arg,
194f27298a8SJason Gunthorpe bool *enable_ats)
1951e8be08dSJason Gunthorpe {
196f27298a8SJason Gunthorpe unsigned int eats;
1971e8be08dSJason Gunthorpe unsigned int cfg;
1981e8be08dSJason Gunthorpe
1991e8be08dSJason Gunthorpe if (!(arg->ste[0] & cpu_to_le64(STRTAB_STE_0_V))) {
2001e8be08dSJason Gunthorpe memset(arg->ste, 0, sizeof(arg->ste));
2011e8be08dSJason Gunthorpe return 0;
2021e8be08dSJason Gunthorpe }
2031e8be08dSJason Gunthorpe
2041e8be08dSJason Gunthorpe /* EIO is reserved for invalid STE data. */
2051e8be08dSJason Gunthorpe if ((arg->ste[0] & ~STRTAB_STE_0_NESTING_ALLOWED) ||
2061e8be08dSJason Gunthorpe (arg->ste[1] & ~STRTAB_STE_1_NESTING_ALLOWED))
2071e8be08dSJason Gunthorpe return -EIO;
2081e8be08dSJason Gunthorpe
2091e8be08dSJason Gunthorpe cfg = FIELD_GET(STRTAB_STE_0_CFG, le64_to_cpu(arg->ste[0]));
2101e8be08dSJason Gunthorpe if (cfg != STRTAB_STE_0_CFG_ABORT && cfg != STRTAB_STE_0_CFG_BYPASS &&
2111e8be08dSJason Gunthorpe cfg != STRTAB_STE_0_CFG_S1_TRANS)
2121e8be08dSJason Gunthorpe return -EIO;
213f27298a8SJason Gunthorpe
214f27298a8SJason Gunthorpe /*
215f27298a8SJason Gunthorpe * Only Full ATS or ATS UR is supported
216f27298a8SJason Gunthorpe * The EATS field will be set by arm_smmu_make_nested_domain_ste()
217f27298a8SJason Gunthorpe */
218f27298a8SJason Gunthorpe eats = FIELD_GET(STRTAB_STE_1_EATS, le64_to_cpu(arg->ste[1]));
219f27298a8SJason Gunthorpe arg->ste[1] &= ~cpu_to_le64(STRTAB_STE_1_EATS);
220f27298a8SJason Gunthorpe if (eats != STRTAB_STE_1_EATS_ABT && eats != STRTAB_STE_1_EATS_TRANS)
221f27298a8SJason Gunthorpe return -EIO;
222f27298a8SJason Gunthorpe
223f27298a8SJason Gunthorpe if (cfg == STRTAB_STE_0_CFG_S1_TRANS)
224f27298a8SJason Gunthorpe *enable_ats = (eats == STRTAB_STE_1_EATS_TRANS);
2251e8be08dSJason Gunthorpe return 0;
2261e8be08dSJason Gunthorpe }
2271e8be08dSJason Gunthorpe
2284dc0d124SNicolin Chen struct iommu_domain *
arm_vsmmu_alloc_domain_nested(struct iommufd_viommu * viommu,u32 flags,const struct iommu_user_data * user_data)2291e8be08dSJason Gunthorpe arm_vsmmu_alloc_domain_nested(struct iommufd_viommu *viommu, u32 flags,
2301e8be08dSJason Gunthorpe const struct iommu_user_data *user_data)
2311e8be08dSJason Gunthorpe {
2321e8be08dSJason Gunthorpe struct arm_vsmmu *vsmmu = container_of(viommu, struct arm_vsmmu, core);
2331e8be08dSJason Gunthorpe struct arm_smmu_nested_domain *nested_domain;
2341e8be08dSJason Gunthorpe struct iommu_hwpt_arm_smmuv3 arg;
235f27298a8SJason Gunthorpe bool enable_ats = false;
2361e8be08dSJason Gunthorpe int ret;
2371e8be08dSJason Gunthorpe
23811534b4dSYi Liu if (flags)
2391e8be08dSJason Gunthorpe return ERR_PTR(-EOPNOTSUPP);
2401e8be08dSJason Gunthorpe
2411e8be08dSJason Gunthorpe ret = iommu_copy_struct_from_user(&arg, user_data,
2421e8be08dSJason Gunthorpe IOMMU_HWPT_DATA_ARM_SMMUV3, ste);
2431e8be08dSJason Gunthorpe if (ret)
2441e8be08dSJason Gunthorpe return ERR_PTR(ret);
2451e8be08dSJason Gunthorpe
246f27298a8SJason Gunthorpe ret = arm_smmu_validate_vste(&arg, &enable_ats);
2471e8be08dSJason Gunthorpe if (ret)
2481e8be08dSJason Gunthorpe return ERR_PTR(ret);
2491e8be08dSJason Gunthorpe
2501e8be08dSJason Gunthorpe nested_domain = kzalloc(sizeof(*nested_domain), GFP_KERNEL_ACCOUNT);
2511e8be08dSJason Gunthorpe if (!nested_domain)
2521e8be08dSJason Gunthorpe return ERR_PTR(-ENOMEM);
2531e8be08dSJason Gunthorpe
2541e8be08dSJason Gunthorpe nested_domain->domain.type = IOMMU_DOMAIN_NESTED;
2551e8be08dSJason Gunthorpe nested_domain->domain.ops = &arm_smmu_nested_ops;
256f27298a8SJason Gunthorpe nested_domain->enable_ats = enable_ats;
2571e8be08dSJason Gunthorpe nested_domain->vsmmu = vsmmu;
2581e8be08dSJason Gunthorpe nested_domain->ste[0] = arg.ste[0];
2591e8be08dSJason Gunthorpe nested_domain->ste[1] = arg.ste[1] & ~cpu_to_le64(STRTAB_STE_1_EATS);
2601e8be08dSJason Gunthorpe
2611e8be08dSJason Gunthorpe return &nested_domain->domain;
2621e8be08dSJason Gunthorpe }
2631e8be08dSJason Gunthorpe
arm_vsmmu_vsid_to_sid(struct arm_vsmmu * vsmmu,u32 vsid,u32 * sid)264d68beb27SNicolin Chen static int arm_vsmmu_vsid_to_sid(struct arm_vsmmu *vsmmu, u32 vsid, u32 *sid)
265d68beb27SNicolin Chen {
266d68beb27SNicolin Chen struct arm_smmu_master *master;
267d68beb27SNicolin Chen struct device *dev;
268d68beb27SNicolin Chen int ret = 0;
269d68beb27SNicolin Chen
270d68beb27SNicolin Chen xa_lock(&vsmmu->core.vdevs);
271d68beb27SNicolin Chen dev = iommufd_viommu_find_dev(&vsmmu->core, (unsigned long)vsid);
272d68beb27SNicolin Chen if (!dev) {
273d68beb27SNicolin Chen ret = -EIO;
274d68beb27SNicolin Chen goto unlock;
275d68beb27SNicolin Chen }
276d68beb27SNicolin Chen master = dev_iommu_priv_get(dev);
277d68beb27SNicolin Chen
278d68beb27SNicolin Chen /* At this moment, iommufd only supports PCI device that has one SID */
279d68beb27SNicolin Chen if (sid)
280d68beb27SNicolin Chen *sid = master->streams[0].id;
281d68beb27SNicolin Chen unlock:
282d68beb27SNicolin Chen xa_unlock(&vsmmu->core.vdevs);
283d68beb27SNicolin Chen return ret;
284d68beb27SNicolin Chen }
285d68beb27SNicolin Chen
286d68beb27SNicolin Chen /* This is basically iommu_viommu_arm_smmuv3_invalidate in u64 for conversion */
287d68beb27SNicolin Chen struct arm_vsmmu_invalidation_cmd {
288d68beb27SNicolin Chen union {
289d68beb27SNicolin Chen u64 cmd[2];
290d68beb27SNicolin Chen struct iommu_viommu_arm_smmuv3_invalidate ucmd;
291d68beb27SNicolin Chen };
292d68beb27SNicolin Chen };
293d68beb27SNicolin Chen
294d68beb27SNicolin Chen /*
295d68beb27SNicolin Chen * Convert, in place, the raw invalidation command into an internal format that
296d68beb27SNicolin Chen * can be passed to arm_smmu_cmdq_issue_cmdlist(). Internally commands are
297d68beb27SNicolin Chen * stored in CPU endian.
298d68beb27SNicolin Chen *
299d68beb27SNicolin Chen * Enforce the VMID or SID on the command.
300d68beb27SNicolin Chen */
arm_vsmmu_convert_user_cmd(struct arm_vsmmu * vsmmu,struct arm_vsmmu_invalidation_cmd * cmd)301d68beb27SNicolin Chen static int arm_vsmmu_convert_user_cmd(struct arm_vsmmu *vsmmu,
302d68beb27SNicolin Chen struct arm_vsmmu_invalidation_cmd *cmd)
303d68beb27SNicolin Chen {
304d68beb27SNicolin Chen /* Commands are le64 stored in u64 */
305d68beb27SNicolin Chen cmd->cmd[0] = le64_to_cpu(cmd->ucmd.cmd[0]);
306d68beb27SNicolin Chen cmd->cmd[1] = le64_to_cpu(cmd->ucmd.cmd[1]);
307d68beb27SNicolin Chen
308d68beb27SNicolin Chen switch (cmd->cmd[0] & CMDQ_0_OP) {
309d68beb27SNicolin Chen case CMDQ_OP_TLBI_NSNH_ALL:
310d68beb27SNicolin Chen /* Convert to NH_ALL */
311d68beb27SNicolin Chen cmd->cmd[0] = CMDQ_OP_TLBI_NH_ALL |
312d68beb27SNicolin Chen FIELD_PREP(CMDQ_TLBI_0_VMID, vsmmu->vmid);
313d68beb27SNicolin Chen cmd->cmd[1] = 0;
314d68beb27SNicolin Chen break;
315d68beb27SNicolin Chen case CMDQ_OP_TLBI_NH_VA:
316d68beb27SNicolin Chen case CMDQ_OP_TLBI_NH_VAA:
317d68beb27SNicolin Chen case CMDQ_OP_TLBI_NH_ALL:
318d68beb27SNicolin Chen case CMDQ_OP_TLBI_NH_ASID:
319d68beb27SNicolin Chen cmd->cmd[0] &= ~CMDQ_TLBI_0_VMID;
320d68beb27SNicolin Chen cmd->cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_VMID, vsmmu->vmid);
321d68beb27SNicolin Chen break;
322d68beb27SNicolin Chen case CMDQ_OP_ATC_INV:
323d68beb27SNicolin Chen case CMDQ_OP_CFGI_CD:
324d68beb27SNicolin Chen case CMDQ_OP_CFGI_CD_ALL: {
325d68beb27SNicolin Chen u32 sid, vsid = FIELD_GET(CMDQ_CFGI_0_SID, cmd->cmd[0]);
326d68beb27SNicolin Chen
327d68beb27SNicolin Chen if (arm_vsmmu_vsid_to_sid(vsmmu, vsid, &sid))
328d68beb27SNicolin Chen return -EIO;
329d68beb27SNicolin Chen cmd->cmd[0] &= ~CMDQ_CFGI_0_SID;
330d68beb27SNicolin Chen cmd->cmd[0] |= FIELD_PREP(CMDQ_CFGI_0_SID, sid);
331d68beb27SNicolin Chen break;
332d68beb27SNicolin Chen }
333d68beb27SNicolin Chen default:
334d68beb27SNicolin Chen return -EIO;
335d68beb27SNicolin Chen }
336d68beb27SNicolin Chen return 0;
337d68beb27SNicolin Chen }
338d68beb27SNicolin Chen
arm_vsmmu_cache_invalidate(struct iommufd_viommu * viommu,struct iommu_user_data_array * array)3394dc0d124SNicolin Chen int arm_vsmmu_cache_invalidate(struct iommufd_viommu *viommu,
340d68beb27SNicolin Chen struct iommu_user_data_array *array)
341d68beb27SNicolin Chen {
342d68beb27SNicolin Chen struct arm_vsmmu *vsmmu = container_of(viommu, struct arm_vsmmu, core);
343d68beb27SNicolin Chen struct arm_smmu_device *smmu = vsmmu->smmu;
344d68beb27SNicolin Chen struct arm_vsmmu_invalidation_cmd *last;
345d68beb27SNicolin Chen struct arm_vsmmu_invalidation_cmd *cmds;
346d68beb27SNicolin Chen struct arm_vsmmu_invalidation_cmd *cur;
347d68beb27SNicolin Chen struct arm_vsmmu_invalidation_cmd *end;
348d68beb27SNicolin Chen int ret;
349d68beb27SNicolin Chen
350d68beb27SNicolin Chen cmds = kcalloc(array->entry_num, sizeof(*cmds), GFP_KERNEL);
351d68beb27SNicolin Chen if (!cmds)
352d68beb27SNicolin Chen return -ENOMEM;
353d68beb27SNicolin Chen cur = cmds;
354d68beb27SNicolin Chen end = cmds + array->entry_num;
355d68beb27SNicolin Chen
356d68beb27SNicolin Chen static_assert(sizeof(*cmds) == 2 * sizeof(u64));
357d68beb27SNicolin Chen ret = iommu_copy_struct_from_full_user_array(
358d68beb27SNicolin Chen cmds, sizeof(*cmds), array,
359d68beb27SNicolin Chen IOMMU_VIOMMU_INVALIDATE_DATA_ARM_SMMUV3);
360d68beb27SNicolin Chen if (ret)
361d68beb27SNicolin Chen goto out;
362d68beb27SNicolin Chen
363d68beb27SNicolin Chen last = cmds;
364d68beb27SNicolin Chen while (cur != end) {
365d68beb27SNicolin Chen ret = arm_vsmmu_convert_user_cmd(vsmmu, cur);
366d68beb27SNicolin Chen if (ret)
367d68beb27SNicolin Chen goto out;
368d68beb27SNicolin Chen
369d68beb27SNicolin Chen /* FIXME work in blocks of CMDQ_BATCH_ENTRIES and copy each block? */
370d68beb27SNicolin Chen cur++;
371d68beb27SNicolin Chen if (cur != end && (cur - last) != CMDQ_BATCH_ENTRIES - 1)
372d68beb27SNicolin Chen continue;
373d68beb27SNicolin Chen
374d68beb27SNicolin Chen /* FIXME always uses the main cmdq rather than trying to group by type */
375d68beb27SNicolin Chen ret = arm_smmu_cmdq_issue_cmdlist(smmu, &smmu->cmdq, last->cmd,
376d68beb27SNicolin Chen cur - last, true);
377d68beb27SNicolin Chen if (ret) {
378d68beb27SNicolin Chen cur--;
379d68beb27SNicolin Chen goto out;
380d68beb27SNicolin Chen }
381d68beb27SNicolin Chen last = cur;
382d68beb27SNicolin Chen }
383d68beb27SNicolin Chen out:
384d68beb27SNicolin Chen array->entry_num = cur - cmds;
385d68beb27SNicolin Chen kfree(cmds);
386d68beb27SNicolin Chen return ret;
387d68beb27SNicolin Chen }
388d68beb27SNicolin Chen
38969d9b312SNicolin Chen static const struct iommufd_viommu_ops arm_vsmmu_ops = {
3901e8be08dSJason Gunthorpe .alloc_domain_nested = arm_vsmmu_alloc_domain_nested,
391d68beb27SNicolin Chen .cache_invalidate = arm_vsmmu_cache_invalidate,
39269d9b312SNicolin Chen };
39369d9b312SNicolin Chen
arm_smmu_get_viommu_size(struct device * dev,enum iommu_viommu_type viommu_type)3943961f2f5SNicolin Chen size_t arm_smmu_get_viommu_size(struct device *dev,
3953961f2f5SNicolin Chen enum iommu_viommu_type viommu_type)
39669d9b312SNicolin Chen {
39769d9b312SNicolin Chen struct arm_smmu_master *master = dev_iommu_priv_get(dev);
3983961f2f5SNicolin Chen struct arm_smmu_device *smmu = master->smmu;
39969d9b312SNicolin Chen
40069d9b312SNicolin Chen if (!(smmu->features & ARM_SMMU_FEAT_NESTING))
4013961f2f5SNicolin Chen return 0;
40269d9b312SNicolin Chen
40369d9b312SNicolin Chen /*
404d68beb27SNicolin Chen * FORCE_SYNC is not set with FEAT_NESTING. Some study of the exact HW
405d68beb27SNicolin Chen * defect is needed to determine if arm_vsmmu_cache_invalidate() needs
406d68beb27SNicolin Chen * any change to remove this.
407d68beb27SNicolin Chen */
408d68beb27SNicolin Chen if (WARN_ON(smmu->options & ARM_SMMU_OPT_CMDQ_FORCE_SYNC))
4093961f2f5SNicolin Chen return 0;
410d68beb27SNicolin Chen
411d68beb27SNicolin Chen /*
41269d9b312SNicolin Chen * Must support some way to prevent the VM from bypassing the cache
41369d9b312SNicolin Chen * because VFIO currently does not do any cache maintenance. canwbs
41469d9b312SNicolin Chen * indicates the device is fully coherent and no cache maintenance is
41567e4fe39SJason Gunthorpe * ever required, even for PCI No-Snoop. S2FWB means the S1 can't make
41667e4fe39SJason Gunthorpe * things non-coherent using the memattr, but No-Snoop behavior is not
41767e4fe39SJason Gunthorpe * effected.
41869d9b312SNicolin Chen */
41967e4fe39SJason Gunthorpe if (!arm_smmu_master_canwbs(master) &&
42067e4fe39SJason Gunthorpe !(smmu->features & ARM_SMMU_FEAT_S2FWB))
4213961f2f5SNicolin Chen return 0;
42269d9b312SNicolin Chen
4235a1c7590SNicolin Chen if (viommu_type == IOMMU_VIOMMU_TYPE_ARM_SMMUV3)
4243961f2f5SNicolin Chen return VIOMMU_STRUCT_SIZE(struct arm_vsmmu, core);
4255a1c7590SNicolin Chen
426*2c78e744SNicolin Chen if (!smmu->impl_ops || !smmu->impl_ops->get_viommu_size)
4275a1c7590SNicolin Chen return 0;
428*2c78e744SNicolin Chen return smmu->impl_ops->get_viommu_size(viommu_type);
4293961f2f5SNicolin Chen }
4303961f2f5SNicolin Chen
arm_vsmmu_init(struct iommufd_viommu * viommu,struct iommu_domain * parent_domain,const struct iommu_user_data * user_data)4313961f2f5SNicolin Chen int arm_vsmmu_init(struct iommufd_viommu *viommu,
432c3436d42SNicolin Chen struct iommu_domain *parent_domain,
433c3436d42SNicolin Chen const struct iommu_user_data *user_data)
4343961f2f5SNicolin Chen {
4353961f2f5SNicolin Chen struct arm_vsmmu *vsmmu = container_of(viommu, struct arm_vsmmu, core);
4363961f2f5SNicolin Chen struct arm_smmu_device *smmu =
4373961f2f5SNicolin Chen container_of(viommu->iommu_dev, struct arm_smmu_device, iommu);
4383961f2f5SNicolin Chen struct arm_smmu_domain *s2_parent = to_smmu_domain(parent_domain);
4393961f2f5SNicolin Chen
4403961f2f5SNicolin Chen if (s2_parent->smmu != smmu)
4413961f2f5SNicolin Chen return -EINVAL;
44269d9b312SNicolin Chen
44369d9b312SNicolin Chen vsmmu->smmu = smmu;
44469d9b312SNicolin Chen vsmmu->s2_parent = s2_parent;
44569d9b312SNicolin Chen /* FIXME Move VMID allocation from the S2 domain allocation to here */
44669d9b312SNicolin Chen vsmmu->vmid = s2_parent->s2_cfg.vmid;
44769d9b312SNicolin Chen
4485a1c7590SNicolin Chen if (viommu->type == IOMMU_VIOMMU_TYPE_ARM_SMMUV3) {
4493961f2f5SNicolin Chen viommu->ops = &arm_vsmmu_ops;
4503961f2f5SNicolin Chen return 0;
45169d9b312SNicolin Chen }
4526d026e6dSNathan Chancellor
4535a1c7590SNicolin Chen return smmu->impl_ops->vsmmu_init(vsmmu, user_data);
4545a1c7590SNicolin Chen }
4555a1c7590SNicolin Chen
arm_vmaster_report_event(struct arm_smmu_vmaster * vmaster,u64 * evt)456e7d3fa3dSNicolin Chen int arm_vmaster_report_event(struct arm_smmu_vmaster *vmaster, u64 *evt)
457e7d3fa3dSNicolin Chen {
458e7d3fa3dSNicolin Chen struct iommu_vevent_arm_smmuv3 vevt;
459e7d3fa3dSNicolin Chen int i;
460e7d3fa3dSNicolin Chen
461e7d3fa3dSNicolin Chen lockdep_assert_held(&vmaster->vsmmu->smmu->streams_mutex);
462e7d3fa3dSNicolin Chen
463e7d3fa3dSNicolin Chen vevt.evt[0] = cpu_to_le64((evt[0] & ~EVTQ_0_SID) |
464e7d3fa3dSNicolin Chen FIELD_PREP(EVTQ_0_SID, vmaster->vsid));
465e7d3fa3dSNicolin Chen for (i = 1; i < EVTQ_ENT_DWORDS; i++)
466e7d3fa3dSNicolin Chen vevt.evt[i] = cpu_to_le64(evt[i]);
467e7d3fa3dSNicolin Chen
468e7d3fa3dSNicolin Chen return iommufd_viommu_report_event(&vmaster->vsmmu->core,
469e7d3fa3dSNicolin Chen IOMMU_VEVENTQ_TYPE_ARM_SMMUV3, &vevt,
470e7d3fa3dSNicolin Chen sizeof(vevt));
471e7d3fa3dSNicolin Chen }
472e7d3fa3dSNicolin Chen
473cdd30ebbSPeter Zijlstra MODULE_IMPORT_NS("IOMMUFD");
474