xref: /linux/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-iommufd.c (revision 62622a8753fa6af3c104f9552863e6473b92fb31)
16912ec91SNicolin Chen // SPDX-License-Identifier: GPL-2.0
26912ec91SNicolin Chen /*
36912ec91SNicolin Chen  * Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES
46912ec91SNicolin Chen  */
56912ec91SNicolin Chen 
66912ec91SNicolin Chen #include <uapi/linux/iommufd.h>
76912ec91SNicolin Chen 
86912ec91SNicolin Chen #include "arm-smmu-v3.h"
96912ec91SNicolin Chen 
104b57c057SNicolin Chen void *arm_smmu_hw_info(struct device *dev, u32 *length,
114b57c057SNicolin Chen 		       enum iommu_hw_info_type *type)
126912ec91SNicolin Chen {
136912ec91SNicolin Chen 	struct arm_smmu_master *master = dev_iommu_priv_get(dev);
146912ec91SNicolin Chen 	struct iommu_hw_info_arm_smmuv3 *info;
156912ec91SNicolin Chen 	u32 __iomem *base_idr;
166912ec91SNicolin Chen 	unsigned int i;
176912ec91SNicolin Chen 
18*62622a87SNicolin Chen 	if (*type != IOMMU_HW_INFO_TYPE_DEFAULT &&
19*62622a87SNicolin Chen 	    *type != IOMMU_HW_INFO_TYPE_ARM_SMMUV3)
20*62622a87SNicolin Chen 		return ERR_PTR(-EOPNOTSUPP);
21*62622a87SNicolin Chen 
226912ec91SNicolin Chen 	info = kzalloc(sizeof(*info), GFP_KERNEL);
236912ec91SNicolin Chen 	if (!info)
246912ec91SNicolin Chen 		return ERR_PTR(-ENOMEM);
256912ec91SNicolin Chen 
266912ec91SNicolin Chen 	base_idr = master->smmu->base + ARM_SMMU_IDR0;
276912ec91SNicolin Chen 	for (i = 0; i <= 5; i++)
286912ec91SNicolin Chen 		info->idr[i] = readl_relaxed(base_idr + i);
296912ec91SNicolin Chen 	info->iidr = readl_relaxed(master->smmu->base + ARM_SMMU_IIDR);
306912ec91SNicolin Chen 	info->aidr = readl_relaxed(master->smmu->base + ARM_SMMU_AIDR);
316912ec91SNicolin Chen 
326912ec91SNicolin Chen 	*length = sizeof(*info);
336912ec91SNicolin Chen 	*type = IOMMU_HW_INFO_TYPE_ARM_SMMUV3;
346912ec91SNicolin Chen 
356912ec91SNicolin Chen 	return info;
366912ec91SNicolin Chen }
3769d9b312SNicolin Chen 
381e8be08dSJason Gunthorpe static void arm_smmu_make_nested_cd_table_ste(
391e8be08dSJason Gunthorpe 	struct arm_smmu_ste *target, struct arm_smmu_master *master,
401e8be08dSJason Gunthorpe 	struct arm_smmu_nested_domain *nested_domain, bool ats_enabled)
411e8be08dSJason Gunthorpe {
421e8be08dSJason Gunthorpe 	arm_smmu_make_s2_domain_ste(
431e8be08dSJason Gunthorpe 		target, master, nested_domain->vsmmu->s2_parent, ats_enabled);
441e8be08dSJason Gunthorpe 
451e8be08dSJason Gunthorpe 	target->data[0] = cpu_to_le64(STRTAB_STE_0_V |
461e8be08dSJason Gunthorpe 				      FIELD_PREP(STRTAB_STE_0_CFG,
471e8be08dSJason Gunthorpe 						 STRTAB_STE_0_CFG_NESTED));
481e8be08dSJason Gunthorpe 	target->data[0] |= nested_domain->ste[0] &
491e8be08dSJason Gunthorpe 			   ~cpu_to_le64(STRTAB_STE_0_CFG);
501e8be08dSJason Gunthorpe 	target->data[1] |= nested_domain->ste[1];
51da0c5652SNicolin Chen 	/* Merge events for DoS mitigations on eventq */
52da0c5652SNicolin Chen 	target->data[1] |= cpu_to_le64(STRTAB_STE_1_MEV);
531e8be08dSJason Gunthorpe }
541e8be08dSJason Gunthorpe 
551e8be08dSJason Gunthorpe /*
561e8be08dSJason Gunthorpe  * Create a physical STE from the virtual STE that userspace provided when it
571e8be08dSJason Gunthorpe  * created the nested domain. Using the vSTE userspace can request:
581e8be08dSJason Gunthorpe  * - Non-valid STE
591e8be08dSJason Gunthorpe  * - Abort STE
601e8be08dSJason Gunthorpe  * - Bypass STE (install the S2, no CD table)
611e8be08dSJason Gunthorpe  * - CD table STE (install the S2 and the userspace CD table)
621e8be08dSJason Gunthorpe  */
631e8be08dSJason Gunthorpe static void arm_smmu_make_nested_domain_ste(
641e8be08dSJason Gunthorpe 	struct arm_smmu_ste *target, struct arm_smmu_master *master,
651e8be08dSJason Gunthorpe 	struct arm_smmu_nested_domain *nested_domain, bool ats_enabled)
661e8be08dSJason Gunthorpe {
671e8be08dSJason Gunthorpe 	unsigned int cfg =
681e8be08dSJason Gunthorpe 		FIELD_GET(STRTAB_STE_0_CFG, le64_to_cpu(nested_domain->ste[0]));
691e8be08dSJason Gunthorpe 
701e8be08dSJason Gunthorpe 	/*
711e8be08dSJason Gunthorpe 	 * Userspace can request a non-valid STE through the nesting interface.
721e8be08dSJason Gunthorpe 	 * We relay that into an abort physical STE with the intention that
731e8be08dSJason Gunthorpe 	 * C_BAD_STE for this SID can be generated to userspace.
741e8be08dSJason Gunthorpe 	 */
751e8be08dSJason Gunthorpe 	if (!(nested_domain->ste[0] & cpu_to_le64(STRTAB_STE_0_V)))
761e8be08dSJason Gunthorpe 		cfg = STRTAB_STE_0_CFG_ABORT;
771e8be08dSJason Gunthorpe 
781e8be08dSJason Gunthorpe 	switch (cfg) {
791e8be08dSJason Gunthorpe 	case STRTAB_STE_0_CFG_S1_TRANS:
801e8be08dSJason Gunthorpe 		arm_smmu_make_nested_cd_table_ste(target, master, nested_domain,
811e8be08dSJason Gunthorpe 						  ats_enabled);
821e8be08dSJason Gunthorpe 		break;
831e8be08dSJason Gunthorpe 	case STRTAB_STE_0_CFG_BYPASS:
841e8be08dSJason Gunthorpe 		arm_smmu_make_s2_domain_ste(target, master,
851e8be08dSJason Gunthorpe 					    nested_domain->vsmmu->s2_parent,
861e8be08dSJason Gunthorpe 					    ats_enabled);
871e8be08dSJason Gunthorpe 		break;
881e8be08dSJason Gunthorpe 	case STRTAB_STE_0_CFG_ABORT:
891e8be08dSJason Gunthorpe 	default:
901e8be08dSJason Gunthorpe 		arm_smmu_make_abort_ste(target);
911e8be08dSJason Gunthorpe 		break;
921e8be08dSJason Gunthorpe 	}
931e8be08dSJason Gunthorpe }
941e8be08dSJason Gunthorpe 
95f0ea207eSNicolin Chen int arm_smmu_attach_prepare_vmaster(struct arm_smmu_attach_state *state,
96f0ea207eSNicolin Chen 				    struct arm_smmu_nested_domain *nested_domain)
97f0ea207eSNicolin Chen {
98f0ea207eSNicolin Chen 	struct arm_smmu_vmaster *vmaster;
99f0ea207eSNicolin Chen 	unsigned long vsid;
100f0ea207eSNicolin Chen 	int ret;
101f0ea207eSNicolin Chen 
102f0ea207eSNicolin Chen 	iommu_group_mutex_assert(state->master->dev);
103f0ea207eSNicolin Chen 
104f0ea207eSNicolin Chen 	ret = iommufd_viommu_get_vdev_id(&nested_domain->vsmmu->core,
105f0ea207eSNicolin Chen 					 state->master->dev, &vsid);
106f0ea207eSNicolin Chen 	if (ret)
107f0ea207eSNicolin Chen 		return ret;
108f0ea207eSNicolin Chen 
109f0ea207eSNicolin Chen 	vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL);
110f0ea207eSNicolin Chen 	if (!vmaster)
111f0ea207eSNicolin Chen 		return -ENOMEM;
112f0ea207eSNicolin Chen 	vmaster->vsmmu = nested_domain->vsmmu;
113f0ea207eSNicolin Chen 	vmaster->vsid = vsid;
114f0ea207eSNicolin Chen 	state->vmaster = vmaster;
115f0ea207eSNicolin Chen 
116f0ea207eSNicolin Chen 	return 0;
117f0ea207eSNicolin Chen }
118f0ea207eSNicolin Chen 
119f0ea207eSNicolin Chen void arm_smmu_attach_commit_vmaster(struct arm_smmu_attach_state *state)
120f0ea207eSNicolin Chen {
121f0ea207eSNicolin Chen 	struct arm_smmu_master *master = state->master;
122f0ea207eSNicolin Chen 
123f0ea207eSNicolin Chen 	mutex_lock(&master->smmu->streams_mutex);
124f0ea207eSNicolin Chen 	kfree(master->vmaster);
125f0ea207eSNicolin Chen 	master->vmaster = state->vmaster;
126f0ea207eSNicolin Chen 	mutex_unlock(&master->smmu->streams_mutex);
127f0ea207eSNicolin Chen }
128f0ea207eSNicolin Chen 
129f0ea207eSNicolin Chen void arm_smmu_master_clear_vmaster(struct arm_smmu_master *master)
130f0ea207eSNicolin Chen {
131f0ea207eSNicolin Chen 	struct arm_smmu_attach_state state = { .master = master };
132f0ea207eSNicolin Chen 
133f0ea207eSNicolin Chen 	arm_smmu_attach_commit_vmaster(&state);
134f0ea207eSNicolin Chen }
135f0ea207eSNicolin Chen 
1361e8be08dSJason Gunthorpe static int arm_smmu_attach_dev_nested(struct iommu_domain *domain,
1371e8be08dSJason Gunthorpe 				      struct device *dev)
1381e8be08dSJason Gunthorpe {
1391e8be08dSJason Gunthorpe 	struct arm_smmu_nested_domain *nested_domain =
1401e8be08dSJason Gunthorpe 		to_smmu_nested_domain(domain);
1411e8be08dSJason Gunthorpe 	struct arm_smmu_master *master = dev_iommu_priv_get(dev);
1421e8be08dSJason Gunthorpe 	struct arm_smmu_attach_state state = {
1431e8be08dSJason Gunthorpe 		.master = master,
1441e8be08dSJason Gunthorpe 		.old_domain = iommu_get_domain_for_dev(dev),
1451e8be08dSJason Gunthorpe 		.ssid = IOMMU_NO_PASID,
1461e8be08dSJason Gunthorpe 	};
1471e8be08dSJason Gunthorpe 	struct arm_smmu_ste ste;
1481e8be08dSJason Gunthorpe 	int ret;
1491e8be08dSJason Gunthorpe 
1501e8be08dSJason Gunthorpe 	if (nested_domain->vsmmu->smmu != master->smmu)
1511e8be08dSJason Gunthorpe 		return -EINVAL;
1521e8be08dSJason Gunthorpe 	if (arm_smmu_ssids_in_use(&master->cd_table))
1531e8be08dSJason Gunthorpe 		return -EBUSY;
1541e8be08dSJason Gunthorpe 
1551e8be08dSJason Gunthorpe 	mutex_lock(&arm_smmu_asid_lock);
156f27298a8SJason Gunthorpe 	/*
157f27298a8SJason Gunthorpe 	 * The VM has to control the actual ATS state at the PCI device because
158f27298a8SJason Gunthorpe 	 * we forward the invalidations directly from the VM. If the VM doesn't
159f27298a8SJason Gunthorpe 	 * think ATS is on it will not generate ATC flushes and the ATC will
160f27298a8SJason Gunthorpe 	 * become incoherent. Since we can't access the actual virtual PCI ATS
161f27298a8SJason Gunthorpe 	 * config bit here base this off the EATS value in the STE. If the EATS
162f27298a8SJason Gunthorpe 	 * is set then the VM must generate ATC flushes.
163f27298a8SJason Gunthorpe 	 */
164f27298a8SJason Gunthorpe 	state.disable_ats = !nested_domain->enable_ats;
1651e8be08dSJason Gunthorpe 	ret = arm_smmu_attach_prepare(&state, domain);
1661e8be08dSJason Gunthorpe 	if (ret) {
1671e8be08dSJason Gunthorpe 		mutex_unlock(&arm_smmu_asid_lock);
1681e8be08dSJason Gunthorpe 		return ret;
1691e8be08dSJason Gunthorpe 	}
1701e8be08dSJason Gunthorpe 
1711e8be08dSJason Gunthorpe 	arm_smmu_make_nested_domain_ste(&ste, master, nested_domain,
1721e8be08dSJason Gunthorpe 					state.ats_enabled);
1731e8be08dSJason Gunthorpe 	arm_smmu_install_ste_for_dev(master, &ste);
1741e8be08dSJason Gunthorpe 	arm_smmu_attach_commit(&state);
1751e8be08dSJason Gunthorpe 	mutex_unlock(&arm_smmu_asid_lock);
1761e8be08dSJason Gunthorpe 	return 0;
1771e8be08dSJason Gunthorpe }
1781e8be08dSJason Gunthorpe 
1791e8be08dSJason Gunthorpe static void arm_smmu_domain_nested_free(struct iommu_domain *domain)
1801e8be08dSJason Gunthorpe {
1811e8be08dSJason Gunthorpe 	kfree(to_smmu_nested_domain(domain));
1821e8be08dSJason Gunthorpe }
1831e8be08dSJason Gunthorpe 
1841e8be08dSJason Gunthorpe static const struct iommu_domain_ops arm_smmu_nested_ops = {
1851e8be08dSJason Gunthorpe 	.attach_dev = arm_smmu_attach_dev_nested,
1861e8be08dSJason Gunthorpe 	.free = arm_smmu_domain_nested_free,
1871e8be08dSJason Gunthorpe };
1881e8be08dSJason Gunthorpe 
189f27298a8SJason Gunthorpe static int arm_smmu_validate_vste(struct iommu_hwpt_arm_smmuv3 *arg,
190f27298a8SJason Gunthorpe 				  bool *enable_ats)
1911e8be08dSJason Gunthorpe {
192f27298a8SJason Gunthorpe 	unsigned int eats;
1931e8be08dSJason Gunthorpe 	unsigned int cfg;
1941e8be08dSJason Gunthorpe 
1951e8be08dSJason Gunthorpe 	if (!(arg->ste[0] & cpu_to_le64(STRTAB_STE_0_V))) {
1961e8be08dSJason Gunthorpe 		memset(arg->ste, 0, sizeof(arg->ste));
1971e8be08dSJason Gunthorpe 		return 0;
1981e8be08dSJason Gunthorpe 	}
1991e8be08dSJason Gunthorpe 
2001e8be08dSJason Gunthorpe 	/* EIO is reserved for invalid STE data. */
2011e8be08dSJason Gunthorpe 	if ((arg->ste[0] & ~STRTAB_STE_0_NESTING_ALLOWED) ||
2021e8be08dSJason Gunthorpe 	    (arg->ste[1] & ~STRTAB_STE_1_NESTING_ALLOWED))
2031e8be08dSJason Gunthorpe 		return -EIO;
2041e8be08dSJason Gunthorpe 
2051e8be08dSJason Gunthorpe 	cfg = FIELD_GET(STRTAB_STE_0_CFG, le64_to_cpu(arg->ste[0]));
2061e8be08dSJason Gunthorpe 	if (cfg != STRTAB_STE_0_CFG_ABORT && cfg != STRTAB_STE_0_CFG_BYPASS &&
2071e8be08dSJason Gunthorpe 	    cfg != STRTAB_STE_0_CFG_S1_TRANS)
2081e8be08dSJason Gunthorpe 		return -EIO;
209f27298a8SJason Gunthorpe 
210f27298a8SJason Gunthorpe 	/*
211f27298a8SJason Gunthorpe 	 * Only Full ATS or ATS UR is supported
212f27298a8SJason Gunthorpe 	 * The EATS field will be set by arm_smmu_make_nested_domain_ste()
213f27298a8SJason Gunthorpe 	 */
214f27298a8SJason Gunthorpe 	eats = FIELD_GET(STRTAB_STE_1_EATS, le64_to_cpu(arg->ste[1]));
215f27298a8SJason Gunthorpe 	arg->ste[1] &= ~cpu_to_le64(STRTAB_STE_1_EATS);
216f27298a8SJason Gunthorpe 	if (eats != STRTAB_STE_1_EATS_ABT && eats != STRTAB_STE_1_EATS_TRANS)
217f27298a8SJason Gunthorpe 		return -EIO;
218f27298a8SJason Gunthorpe 
219f27298a8SJason Gunthorpe 	if (cfg == STRTAB_STE_0_CFG_S1_TRANS)
220f27298a8SJason Gunthorpe 		*enable_ats = (eats == STRTAB_STE_1_EATS_TRANS);
2211e8be08dSJason Gunthorpe 	return 0;
2221e8be08dSJason Gunthorpe }
2231e8be08dSJason Gunthorpe 
2241e8be08dSJason Gunthorpe static struct iommu_domain *
2251e8be08dSJason Gunthorpe arm_vsmmu_alloc_domain_nested(struct iommufd_viommu *viommu, u32 flags,
2261e8be08dSJason Gunthorpe 			      const struct iommu_user_data *user_data)
2271e8be08dSJason Gunthorpe {
2281e8be08dSJason Gunthorpe 	struct arm_vsmmu *vsmmu = container_of(viommu, struct arm_vsmmu, core);
2291e8be08dSJason Gunthorpe 	struct arm_smmu_nested_domain *nested_domain;
2301e8be08dSJason Gunthorpe 	struct iommu_hwpt_arm_smmuv3 arg;
231f27298a8SJason Gunthorpe 	bool enable_ats = false;
2321e8be08dSJason Gunthorpe 	int ret;
2331e8be08dSJason Gunthorpe 
23411534b4dSYi Liu 	if (flags)
2351e8be08dSJason Gunthorpe 		return ERR_PTR(-EOPNOTSUPP);
2361e8be08dSJason Gunthorpe 
2371e8be08dSJason Gunthorpe 	ret = iommu_copy_struct_from_user(&arg, user_data,
2381e8be08dSJason Gunthorpe 					  IOMMU_HWPT_DATA_ARM_SMMUV3, ste);
2391e8be08dSJason Gunthorpe 	if (ret)
2401e8be08dSJason Gunthorpe 		return ERR_PTR(ret);
2411e8be08dSJason Gunthorpe 
242f27298a8SJason Gunthorpe 	ret = arm_smmu_validate_vste(&arg, &enable_ats);
2431e8be08dSJason Gunthorpe 	if (ret)
2441e8be08dSJason Gunthorpe 		return ERR_PTR(ret);
2451e8be08dSJason Gunthorpe 
2461e8be08dSJason Gunthorpe 	nested_domain = kzalloc(sizeof(*nested_domain), GFP_KERNEL_ACCOUNT);
2471e8be08dSJason Gunthorpe 	if (!nested_domain)
2481e8be08dSJason Gunthorpe 		return ERR_PTR(-ENOMEM);
2491e8be08dSJason Gunthorpe 
2501e8be08dSJason Gunthorpe 	nested_domain->domain.type = IOMMU_DOMAIN_NESTED;
2511e8be08dSJason Gunthorpe 	nested_domain->domain.ops = &arm_smmu_nested_ops;
252f27298a8SJason Gunthorpe 	nested_domain->enable_ats = enable_ats;
2531e8be08dSJason Gunthorpe 	nested_domain->vsmmu = vsmmu;
2541e8be08dSJason Gunthorpe 	nested_domain->ste[0] = arg.ste[0];
2551e8be08dSJason Gunthorpe 	nested_domain->ste[1] = arg.ste[1] & ~cpu_to_le64(STRTAB_STE_1_EATS);
2561e8be08dSJason Gunthorpe 
2571e8be08dSJason Gunthorpe 	return &nested_domain->domain;
2581e8be08dSJason Gunthorpe }
2591e8be08dSJason Gunthorpe 
260d68beb27SNicolin Chen static int arm_vsmmu_vsid_to_sid(struct arm_vsmmu *vsmmu, u32 vsid, u32 *sid)
261d68beb27SNicolin Chen {
262d68beb27SNicolin Chen 	struct arm_smmu_master *master;
263d68beb27SNicolin Chen 	struct device *dev;
264d68beb27SNicolin Chen 	int ret = 0;
265d68beb27SNicolin Chen 
266d68beb27SNicolin Chen 	xa_lock(&vsmmu->core.vdevs);
267d68beb27SNicolin Chen 	dev = iommufd_viommu_find_dev(&vsmmu->core, (unsigned long)vsid);
268d68beb27SNicolin Chen 	if (!dev) {
269d68beb27SNicolin Chen 		ret = -EIO;
270d68beb27SNicolin Chen 		goto unlock;
271d68beb27SNicolin Chen 	}
272d68beb27SNicolin Chen 	master = dev_iommu_priv_get(dev);
273d68beb27SNicolin Chen 
274d68beb27SNicolin Chen 	/* At this moment, iommufd only supports PCI device that has one SID */
275d68beb27SNicolin Chen 	if (sid)
276d68beb27SNicolin Chen 		*sid = master->streams[0].id;
277d68beb27SNicolin Chen unlock:
278d68beb27SNicolin Chen 	xa_unlock(&vsmmu->core.vdevs);
279d68beb27SNicolin Chen 	return ret;
280d68beb27SNicolin Chen }
281d68beb27SNicolin Chen 
282d68beb27SNicolin Chen /* This is basically iommu_viommu_arm_smmuv3_invalidate in u64 for conversion */
283d68beb27SNicolin Chen struct arm_vsmmu_invalidation_cmd {
284d68beb27SNicolin Chen 	union {
285d68beb27SNicolin Chen 		u64 cmd[2];
286d68beb27SNicolin Chen 		struct iommu_viommu_arm_smmuv3_invalidate ucmd;
287d68beb27SNicolin Chen 	};
288d68beb27SNicolin Chen };
289d68beb27SNicolin Chen 
290d68beb27SNicolin Chen /*
291d68beb27SNicolin Chen  * Convert, in place, the raw invalidation command into an internal format that
292d68beb27SNicolin Chen  * can be passed to arm_smmu_cmdq_issue_cmdlist(). Internally commands are
293d68beb27SNicolin Chen  * stored in CPU endian.
294d68beb27SNicolin Chen  *
295d68beb27SNicolin Chen  * Enforce the VMID or SID on the command.
296d68beb27SNicolin Chen  */
297d68beb27SNicolin Chen static int arm_vsmmu_convert_user_cmd(struct arm_vsmmu *vsmmu,
298d68beb27SNicolin Chen 				      struct arm_vsmmu_invalidation_cmd *cmd)
299d68beb27SNicolin Chen {
300d68beb27SNicolin Chen 	/* Commands are le64 stored in u64 */
301d68beb27SNicolin Chen 	cmd->cmd[0] = le64_to_cpu(cmd->ucmd.cmd[0]);
302d68beb27SNicolin Chen 	cmd->cmd[1] = le64_to_cpu(cmd->ucmd.cmd[1]);
303d68beb27SNicolin Chen 
304d68beb27SNicolin Chen 	switch (cmd->cmd[0] & CMDQ_0_OP) {
305d68beb27SNicolin Chen 	case CMDQ_OP_TLBI_NSNH_ALL:
306d68beb27SNicolin Chen 		/* Convert to NH_ALL */
307d68beb27SNicolin Chen 		cmd->cmd[0] = CMDQ_OP_TLBI_NH_ALL |
308d68beb27SNicolin Chen 			      FIELD_PREP(CMDQ_TLBI_0_VMID, vsmmu->vmid);
309d68beb27SNicolin Chen 		cmd->cmd[1] = 0;
310d68beb27SNicolin Chen 		break;
311d68beb27SNicolin Chen 	case CMDQ_OP_TLBI_NH_VA:
312d68beb27SNicolin Chen 	case CMDQ_OP_TLBI_NH_VAA:
313d68beb27SNicolin Chen 	case CMDQ_OP_TLBI_NH_ALL:
314d68beb27SNicolin Chen 	case CMDQ_OP_TLBI_NH_ASID:
315d68beb27SNicolin Chen 		cmd->cmd[0] &= ~CMDQ_TLBI_0_VMID;
316d68beb27SNicolin Chen 		cmd->cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_VMID, vsmmu->vmid);
317d68beb27SNicolin Chen 		break;
318d68beb27SNicolin Chen 	case CMDQ_OP_ATC_INV:
319d68beb27SNicolin Chen 	case CMDQ_OP_CFGI_CD:
320d68beb27SNicolin Chen 	case CMDQ_OP_CFGI_CD_ALL: {
321d68beb27SNicolin Chen 		u32 sid, vsid = FIELD_GET(CMDQ_CFGI_0_SID, cmd->cmd[0]);
322d68beb27SNicolin Chen 
323d68beb27SNicolin Chen 		if (arm_vsmmu_vsid_to_sid(vsmmu, vsid, &sid))
324d68beb27SNicolin Chen 			return -EIO;
325d68beb27SNicolin Chen 		cmd->cmd[0] &= ~CMDQ_CFGI_0_SID;
326d68beb27SNicolin Chen 		cmd->cmd[0] |= FIELD_PREP(CMDQ_CFGI_0_SID, sid);
327d68beb27SNicolin Chen 		break;
328d68beb27SNicolin Chen 	}
329d68beb27SNicolin Chen 	default:
330d68beb27SNicolin Chen 		return -EIO;
331d68beb27SNicolin Chen 	}
332d68beb27SNicolin Chen 	return 0;
333d68beb27SNicolin Chen }
334d68beb27SNicolin Chen 
335d68beb27SNicolin Chen static int arm_vsmmu_cache_invalidate(struct iommufd_viommu *viommu,
336d68beb27SNicolin Chen 				      struct iommu_user_data_array *array)
337d68beb27SNicolin Chen {
338d68beb27SNicolin Chen 	struct arm_vsmmu *vsmmu = container_of(viommu, struct arm_vsmmu, core);
339d68beb27SNicolin Chen 	struct arm_smmu_device *smmu = vsmmu->smmu;
340d68beb27SNicolin Chen 	struct arm_vsmmu_invalidation_cmd *last;
341d68beb27SNicolin Chen 	struct arm_vsmmu_invalidation_cmd *cmds;
342d68beb27SNicolin Chen 	struct arm_vsmmu_invalidation_cmd *cur;
343d68beb27SNicolin Chen 	struct arm_vsmmu_invalidation_cmd *end;
344d68beb27SNicolin Chen 	int ret;
345d68beb27SNicolin Chen 
346d68beb27SNicolin Chen 	cmds = kcalloc(array->entry_num, sizeof(*cmds), GFP_KERNEL);
347d68beb27SNicolin Chen 	if (!cmds)
348d68beb27SNicolin Chen 		return -ENOMEM;
349d68beb27SNicolin Chen 	cur = cmds;
350d68beb27SNicolin Chen 	end = cmds + array->entry_num;
351d68beb27SNicolin Chen 
352d68beb27SNicolin Chen 	static_assert(sizeof(*cmds) == 2 * sizeof(u64));
353d68beb27SNicolin Chen 	ret = iommu_copy_struct_from_full_user_array(
354d68beb27SNicolin Chen 		cmds, sizeof(*cmds), array,
355d68beb27SNicolin Chen 		IOMMU_VIOMMU_INVALIDATE_DATA_ARM_SMMUV3);
356d68beb27SNicolin Chen 	if (ret)
357d68beb27SNicolin Chen 		goto out;
358d68beb27SNicolin Chen 
359d68beb27SNicolin Chen 	last = cmds;
360d68beb27SNicolin Chen 	while (cur != end) {
361d68beb27SNicolin Chen 		ret = arm_vsmmu_convert_user_cmd(vsmmu, cur);
362d68beb27SNicolin Chen 		if (ret)
363d68beb27SNicolin Chen 			goto out;
364d68beb27SNicolin Chen 
365d68beb27SNicolin Chen 		/* FIXME work in blocks of CMDQ_BATCH_ENTRIES and copy each block? */
366d68beb27SNicolin Chen 		cur++;
367d68beb27SNicolin Chen 		if (cur != end && (cur - last) != CMDQ_BATCH_ENTRIES - 1)
368d68beb27SNicolin Chen 			continue;
369d68beb27SNicolin Chen 
370d68beb27SNicolin Chen 		/* FIXME always uses the main cmdq rather than trying to group by type */
371d68beb27SNicolin Chen 		ret = arm_smmu_cmdq_issue_cmdlist(smmu, &smmu->cmdq, last->cmd,
372d68beb27SNicolin Chen 						  cur - last, true);
373d68beb27SNicolin Chen 		if (ret) {
374d68beb27SNicolin Chen 			cur--;
375d68beb27SNicolin Chen 			goto out;
376d68beb27SNicolin Chen 		}
377d68beb27SNicolin Chen 		last = cur;
378d68beb27SNicolin Chen 	}
379d68beb27SNicolin Chen out:
380d68beb27SNicolin Chen 	array->entry_num = cur - cmds;
381d68beb27SNicolin Chen 	kfree(cmds);
382d68beb27SNicolin Chen 	return ret;
383d68beb27SNicolin Chen }
384d68beb27SNicolin Chen 
38569d9b312SNicolin Chen static const struct iommufd_viommu_ops arm_vsmmu_ops = {
3861e8be08dSJason Gunthorpe 	.alloc_domain_nested = arm_vsmmu_alloc_domain_nested,
387d68beb27SNicolin Chen 	.cache_invalidate = arm_vsmmu_cache_invalidate,
38869d9b312SNicolin Chen };
38969d9b312SNicolin Chen 
3903961f2f5SNicolin Chen size_t arm_smmu_get_viommu_size(struct device *dev,
3913961f2f5SNicolin Chen 				enum iommu_viommu_type viommu_type)
39269d9b312SNicolin Chen {
39369d9b312SNicolin Chen 	struct arm_smmu_master *master = dev_iommu_priv_get(dev);
3943961f2f5SNicolin Chen 	struct arm_smmu_device *smmu = master->smmu;
39569d9b312SNicolin Chen 
39669d9b312SNicolin Chen 	if (!(smmu->features & ARM_SMMU_FEAT_NESTING))
3973961f2f5SNicolin Chen 		return 0;
39869d9b312SNicolin Chen 
39969d9b312SNicolin Chen 	/*
400d68beb27SNicolin Chen 	 * FORCE_SYNC is not set with FEAT_NESTING. Some study of the exact HW
401d68beb27SNicolin Chen 	 * defect is needed to determine if arm_vsmmu_cache_invalidate() needs
402d68beb27SNicolin Chen 	 * any change to remove this.
403d68beb27SNicolin Chen 	 */
404d68beb27SNicolin Chen 	if (WARN_ON(smmu->options & ARM_SMMU_OPT_CMDQ_FORCE_SYNC))
4053961f2f5SNicolin Chen 		return 0;
406d68beb27SNicolin Chen 
407d68beb27SNicolin Chen 	/*
40869d9b312SNicolin Chen 	 * Must support some way to prevent the VM from bypassing the cache
40969d9b312SNicolin Chen 	 * because VFIO currently does not do any cache maintenance. canwbs
41069d9b312SNicolin Chen 	 * indicates the device is fully coherent and no cache maintenance is
41167e4fe39SJason Gunthorpe 	 * ever required, even for PCI No-Snoop. S2FWB means the S1 can't make
41267e4fe39SJason Gunthorpe 	 * things non-coherent using the memattr, but No-Snoop behavior is not
41367e4fe39SJason Gunthorpe 	 * effected.
41469d9b312SNicolin Chen 	 */
41567e4fe39SJason Gunthorpe 	if (!arm_smmu_master_canwbs(master) &&
41667e4fe39SJason Gunthorpe 	    !(smmu->features & ARM_SMMU_FEAT_S2FWB))
4173961f2f5SNicolin Chen 		return 0;
41869d9b312SNicolin Chen 
4193961f2f5SNicolin Chen 	if (viommu_type != IOMMU_VIOMMU_TYPE_ARM_SMMUV3)
4203961f2f5SNicolin Chen 		return 0;
4213961f2f5SNicolin Chen 
4223961f2f5SNicolin Chen 	return VIOMMU_STRUCT_SIZE(struct arm_vsmmu, core);
4233961f2f5SNicolin Chen }
4243961f2f5SNicolin Chen 
4253961f2f5SNicolin Chen int arm_vsmmu_init(struct iommufd_viommu *viommu,
426c3436d42SNicolin Chen 		   struct iommu_domain *parent_domain,
427c3436d42SNicolin Chen 		   const struct iommu_user_data *user_data)
4283961f2f5SNicolin Chen {
4293961f2f5SNicolin Chen 	struct arm_vsmmu *vsmmu = container_of(viommu, struct arm_vsmmu, core);
4303961f2f5SNicolin Chen 	struct arm_smmu_device *smmu =
4313961f2f5SNicolin Chen 		container_of(viommu->iommu_dev, struct arm_smmu_device, iommu);
4323961f2f5SNicolin Chen 	struct arm_smmu_domain *s2_parent = to_smmu_domain(parent_domain);
4333961f2f5SNicolin Chen 
4343961f2f5SNicolin Chen 	if (s2_parent->smmu != smmu)
4353961f2f5SNicolin Chen 		return -EINVAL;
43669d9b312SNicolin Chen 
43769d9b312SNicolin Chen 	vsmmu->smmu = smmu;
43869d9b312SNicolin Chen 	vsmmu->s2_parent = s2_parent;
43969d9b312SNicolin Chen 	/* FIXME Move VMID allocation from the S2 domain allocation to here */
44069d9b312SNicolin Chen 	vsmmu->vmid = s2_parent->s2_cfg.vmid;
44169d9b312SNicolin Chen 
4423961f2f5SNicolin Chen 	viommu->ops = &arm_vsmmu_ops;
4433961f2f5SNicolin Chen 	return 0;
44469d9b312SNicolin Chen }
4456d026e6dSNathan Chancellor 
446e7d3fa3dSNicolin Chen int arm_vmaster_report_event(struct arm_smmu_vmaster *vmaster, u64 *evt)
447e7d3fa3dSNicolin Chen {
448e7d3fa3dSNicolin Chen 	struct iommu_vevent_arm_smmuv3 vevt;
449e7d3fa3dSNicolin Chen 	int i;
450e7d3fa3dSNicolin Chen 
451e7d3fa3dSNicolin Chen 	lockdep_assert_held(&vmaster->vsmmu->smmu->streams_mutex);
452e7d3fa3dSNicolin Chen 
453e7d3fa3dSNicolin Chen 	vevt.evt[0] = cpu_to_le64((evt[0] & ~EVTQ_0_SID) |
454e7d3fa3dSNicolin Chen 				  FIELD_PREP(EVTQ_0_SID, vmaster->vsid));
455e7d3fa3dSNicolin Chen 	for (i = 1; i < EVTQ_ENT_DWORDS; i++)
456e7d3fa3dSNicolin Chen 		vevt.evt[i] = cpu_to_le64(evt[i]);
457e7d3fa3dSNicolin Chen 
458e7d3fa3dSNicolin Chen 	return iommufd_viommu_report_event(&vmaster->vsmmu->core,
459e7d3fa3dSNicolin Chen 					   IOMMU_VEVENTQ_TYPE_ARM_SMMUV3, &vevt,
460e7d3fa3dSNicolin Chen 					   sizeof(vevt));
461e7d3fa3dSNicolin Chen }
462e7d3fa3dSNicolin Chen 
463cdd30ebbSPeter Zijlstra MODULE_IMPORT_NS("IOMMUFD");
464