xref: /linux/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-iommufd.c (revision f27298a82ba09a1c8aecee8a209b2a312beac672)
16912ec91SNicolin Chen // SPDX-License-Identifier: GPL-2.0
26912ec91SNicolin Chen /*
36912ec91SNicolin Chen  * Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES
46912ec91SNicolin Chen  */
56912ec91SNicolin Chen 
66912ec91SNicolin Chen #include <uapi/linux/iommufd.h>
76912ec91SNicolin Chen 
86912ec91SNicolin Chen #include "arm-smmu-v3.h"
96912ec91SNicolin Chen 
106912ec91SNicolin Chen void *arm_smmu_hw_info(struct device *dev, u32 *length, u32 *type)
116912ec91SNicolin Chen {
126912ec91SNicolin Chen 	struct arm_smmu_master *master = dev_iommu_priv_get(dev);
136912ec91SNicolin Chen 	struct iommu_hw_info_arm_smmuv3 *info;
146912ec91SNicolin Chen 	u32 __iomem *base_idr;
156912ec91SNicolin Chen 	unsigned int i;
166912ec91SNicolin Chen 
176912ec91SNicolin Chen 	info = kzalloc(sizeof(*info), GFP_KERNEL);
186912ec91SNicolin Chen 	if (!info)
196912ec91SNicolin Chen 		return ERR_PTR(-ENOMEM);
206912ec91SNicolin Chen 
216912ec91SNicolin Chen 	base_idr = master->smmu->base + ARM_SMMU_IDR0;
226912ec91SNicolin Chen 	for (i = 0; i <= 5; i++)
236912ec91SNicolin Chen 		info->idr[i] = readl_relaxed(base_idr + i);
246912ec91SNicolin Chen 	info->iidr = readl_relaxed(master->smmu->base + ARM_SMMU_IIDR);
256912ec91SNicolin Chen 	info->aidr = readl_relaxed(master->smmu->base + ARM_SMMU_AIDR);
266912ec91SNicolin Chen 
276912ec91SNicolin Chen 	*length = sizeof(*info);
286912ec91SNicolin Chen 	*type = IOMMU_HW_INFO_TYPE_ARM_SMMUV3;
296912ec91SNicolin Chen 
306912ec91SNicolin Chen 	return info;
316912ec91SNicolin Chen }
3269d9b312SNicolin Chen 
331e8be08dSJason Gunthorpe static void arm_smmu_make_nested_cd_table_ste(
341e8be08dSJason Gunthorpe 	struct arm_smmu_ste *target, struct arm_smmu_master *master,
351e8be08dSJason Gunthorpe 	struct arm_smmu_nested_domain *nested_domain, bool ats_enabled)
361e8be08dSJason Gunthorpe {
371e8be08dSJason Gunthorpe 	arm_smmu_make_s2_domain_ste(
381e8be08dSJason Gunthorpe 		target, master, nested_domain->vsmmu->s2_parent, ats_enabled);
391e8be08dSJason Gunthorpe 
401e8be08dSJason Gunthorpe 	target->data[0] = cpu_to_le64(STRTAB_STE_0_V |
411e8be08dSJason Gunthorpe 				      FIELD_PREP(STRTAB_STE_0_CFG,
421e8be08dSJason Gunthorpe 						 STRTAB_STE_0_CFG_NESTED));
431e8be08dSJason Gunthorpe 	target->data[0] |= nested_domain->ste[0] &
441e8be08dSJason Gunthorpe 			   ~cpu_to_le64(STRTAB_STE_0_CFG);
451e8be08dSJason Gunthorpe 	target->data[1] |= nested_domain->ste[1];
461e8be08dSJason Gunthorpe }
471e8be08dSJason Gunthorpe 
481e8be08dSJason Gunthorpe /*
491e8be08dSJason Gunthorpe  * Create a physical STE from the virtual STE that userspace provided when it
501e8be08dSJason Gunthorpe  * created the nested domain. Using the vSTE userspace can request:
511e8be08dSJason Gunthorpe  * - Non-valid STE
521e8be08dSJason Gunthorpe  * - Abort STE
531e8be08dSJason Gunthorpe  * - Bypass STE (install the S2, no CD table)
541e8be08dSJason Gunthorpe  * - CD table STE (install the S2 and the userspace CD table)
551e8be08dSJason Gunthorpe  */
561e8be08dSJason Gunthorpe static void arm_smmu_make_nested_domain_ste(
571e8be08dSJason Gunthorpe 	struct arm_smmu_ste *target, struct arm_smmu_master *master,
581e8be08dSJason Gunthorpe 	struct arm_smmu_nested_domain *nested_domain, bool ats_enabled)
591e8be08dSJason Gunthorpe {
601e8be08dSJason Gunthorpe 	unsigned int cfg =
611e8be08dSJason Gunthorpe 		FIELD_GET(STRTAB_STE_0_CFG, le64_to_cpu(nested_domain->ste[0]));
621e8be08dSJason Gunthorpe 
631e8be08dSJason Gunthorpe 	/*
641e8be08dSJason Gunthorpe 	 * Userspace can request a non-valid STE through the nesting interface.
651e8be08dSJason Gunthorpe 	 * We relay that into an abort physical STE with the intention that
661e8be08dSJason Gunthorpe 	 * C_BAD_STE for this SID can be generated to userspace.
671e8be08dSJason Gunthorpe 	 */
681e8be08dSJason Gunthorpe 	if (!(nested_domain->ste[0] & cpu_to_le64(STRTAB_STE_0_V)))
691e8be08dSJason Gunthorpe 		cfg = STRTAB_STE_0_CFG_ABORT;
701e8be08dSJason Gunthorpe 
711e8be08dSJason Gunthorpe 	switch (cfg) {
721e8be08dSJason Gunthorpe 	case STRTAB_STE_0_CFG_S1_TRANS:
731e8be08dSJason Gunthorpe 		arm_smmu_make_nested_cd_table_ste(target, master, nested_domain,
741e8be08dSJason Gunthorpe 						  ats_enabled);
751e8be08dSJason Gunthorpe 		break;
761e8be08dSJason Gunthorpe 	case STRTAB_STE_0_CFG_BYPASS:
771e8be08dSJason Gunthorpe 		arm_smmu_make_s2_domain_ste(target, master,
781e8be08dSJason Gunthorpe 					    nested_domain->vsmmu->s2_parent,
791e8be08dSJason Gunthorpe 					    ats_enabled);
801e8be08dSJason Gunthorpe 		break;
811e8be08dSJason Gunthorpe 	case STRTAB_STE_0_CFG_ABORT:
821e8be08dSJason Gunthorpe 	default:
831e8be08dSJason Gunthorpe 		arm_smmu_make_abort_ste(target);
841e8be08dSJason Gunthorpe 		break;
851e8be08dSJason Gunthorpe 	}
861e8be08dSJason Gunthorpe }
871e8be08dSJason Gunthorpe 
881e8be08dSJason Gunthorpe static int arm_smmu_attach_dev_nested(struct iommu_domain *domain,
891e8be08dSJason Gunthorpe 				      struct device *dev)
901e8be08dSJason Gunthorpe {
911e8be08dSJason Gunthorpe 	struct arm_smmu_nested_domain *nested_domain =
921e8be08dSJason Gunthorpe 		to_smmu_nested_domain(domain);
931e8be08dSJason Gunthorpe 	struct arm_smmu_master *master = dev_iommu_priv_get(dev);
941e8be08dSJason Gunthorpe 	struct arm_smmu_attach_state state = {
951e8be08dSJason Gunthorpe 		.master = master,
961e8be08dSJason Gunthorpe 		.old_domain = iommu_get_domain_for_dev(dev),
971e8be08dSJason Gunthorpe 		.ssid = IOMMU_NO_PASID,
981e8be08dSJason Gunthorpe 	};
991e8be08dSJason Gunthorpe 	struct arm_smmu_ste ste;
1001e8be08dSJason Gunthorpe 	int ret;
1011e8be08dSJason Gunthorpe 
1021e8be08dSJason Gunthorpe 	if (nested_domain->vsmmu->smmu != master->smmu)
1031e8be08dSJason Gunthorpe 		return -EINVAL;
1041e8be08dSJason Gunthorpe 	if (arm_smmu_ssids_in_use(&master->cd_table))
1051e8be08dSJason Gunthorpe 		return -EBUSY;
1061e8be08dSJason Gunthorpe 
1071e8be08dSJason Gunthorpe 	mutex_lock(&arm_smmu_asid_lock);
108*f27298a8SJason Gunthorpe 	/*
109*f27298a8SJason Gunthorpe 	 * The VM has to control the actual ATS state at the PCI device because
110*f27298a8SJason Gunthorpe 	 * we forward the invalidations directly from the VM. If the VM doesn't
111*f27298a8SJason Gunthorpe 	 * think ATS is on it will not generate ATC flushes and the ATC will
112*f27298a8SJason Gunthorpe 	 * become incoherent. Since we can't access the actual virtual PCI ATS
113*f27298a8SJason Gunthorpe 	 * config bit here base this off the EATS value in the STE. If the EATS
114*f27298a8SJason Gunthorpe 	 * is set then the VM must generate ATC flushes.
115*f27298a8SJason Gunthorpe 	 */
116*f27298a8SJason Gunthorpe 	state.disable_ats = !nested_domain->enable_ats;
1171e8be08dSJason Gunthorpe 	ret = arm_smmu_attach_prepare(&state, domain);
1181e8be08dSJason Gunthorpe 	if (ret) {
1191e8be08dSJason Gunthorpe 		mutex_unlock(&arm_smmu_asid_lock);
1201e8be08dSJason Gunthorpe 		return ret;
1211e8be08dSJason Gunthorpe 	}
1221e8be08dSJason Gunthorpe 
1231e8be08dSJason Gunthorpe 	arm_smmu_make_nested_domain_ste(&ste, master, nested_domain,
1241e8be08dSJason Gunthorpe 					state.ats_enabled);
1251e8be08dSJason Gunthorpe 	arm_smmu_install_ste_for_dev(master, &ste);
1261e8be08dSJason Gunthorpe 	arm_smmu_attach_commit(&state);
1271e8be08dSJason Gunthorpe 	mutex_unlock(&arm_smmu_asid_lock);
1281e8be08dSJason Gunthorpe 	return 0;
1291e8be08dSJason Gunthorpe }
1301e8be08dSJason Gunthorpe 
1311e8be08dSJason Gunthorpe static void arm_smmu_domain_nested_free(struct iommu_domain *domain)
1321e8be08dSJason Gunthorpe {
1331e8be08dSJason Gunthorpe 	kfree(to_smmu_nested_domain(domain));
1341e8be08dSJason Gunthorpe }
1351e8be08dSJason Gunthorpe 
1361e8be08dSJason Gunthorpe static const struct iommu_domain_ops arm_smmu_nested_ops = {
1371e8be08dSJason Gunthorpe 	.attach_dev = arm_smmu_attach_dev_nested,
1381e8be08dSJason Gunthorpe 	.free = arm_smmu_domain_nested_free,
1391e8be08dSJason Gunthorpe };
1401e8be08dSJason Gunthorpe 
141*f27298a8SJason Gunthorpe static int arm_smmu_validate_vste(struct iommu_hwpt_arm_smmuv3 *arg,
142*f27298a8SJason Gunthorpe 				  bool *enable_ats)
1431e8be08dSJason Gunthorpe {
144*f27298a8SJason Gunthorpe 	unsigned int eats;
1451e8be08dSJason Gunthorpe 	unsigned int cfg;
1461e8be08dSJason Gunthorpe 
1471e8be08dSJason Gunthorpe 	if (!(arg->ste[0] & cpu_to_le64(STRTAB_STE_0_V))) {
1481e8be08dSJason Gunthorpe 		memset(arg->ste, 0, sizeof(arg->ste));
1491e8be08dSJason Gunthorpe 		return 0;
1501e8be08dSJason Gunthorpe 	}
1511e8be08dSJason Gunthorpe 
1521e8be08dSJason Gunthorpe 	/* EIO is reserved for invalid STE data. */
1531e8be08dSJason Gunthorpe 	if ((arg->ste[0] & ~STRTAB_STE_0_NESTING_ALLOWED) ||
1541e8be08dSJason Gunthorpe 	    (arg->ste[1] & ~STRTAB_STE_1_NESTING_ALLOWED))
1551e8be08dSJason Gunthorpe 		return -EIO;
1561e8be08dSJason Gunthorpe 
1571e8be08dSJason Gunthorpe 	cfg = FIELD_GET(STRTAB_STE_0_CFG, le64_to_cpu(arg->ste[0]));
1581e8be08dSJason Gunthorpe 	if (cfg != STRTAB_STE_0_CFG_ABORT && cfg != STRTAB_STE_0_CFG_BYPASS &&
1591e8be08dSJason Gunthorpe 	    cfg != STRTAB_STE_0_CFG_S1_TRANS)
1601e8be08dSJason Gunthorpe 		return -EIO;
161*f27298a8SJason Gunthorpe 
162*f27298a8SJason Gunthorpe 	/*
163*f27298a8SJason Gunthorpe 	 * Only Full ATS or ATS UR is supported
164*f27298a8SJason Gunthorpe 	 * The EATS field will be set by arm_smmu_make_nested_domain_ste()
165*f27298a8SJason Gunthorpe 	 */
166*f27298a8SJason Gunthorpe 	eats = FIELD_GET(STRTAB_STE_1_EATS, le64_to_cpu(arg->ste[1]));
167*f27298a8SJason Gunthorpe 	arg->ste[1] &= ~cpu_to_le64(STRTAB_STE_1_EATS);
168*f27298a8SJason Gunthorpe 	if (eats != STRTAB_STE_1_EATS_ABT && eats != STRTAB_STE_1_EATS_TRANS)
169*f27298a8SJason Gunthorpe 		return -EIO;
170*f27298a8SJason Gunthorpe 
171*f27298a8SJason Gunthorpe 	if (cfg == STRTAB_STE_0_CFG_S1_TRANS)
172*f27298a8SJason Gunthorpe 		*enable_ats = (eats == STRTAB_STE_1_EATS_TRANS);
1731e8be08dSJason Gunthorpe 	return 0;
1741e8be08dSJason Gunthorpe }
1751e8be08dSJason Gunthorpe 
1761e8be08dSJason Gunthorpe static struct iommu_domain *
1771e8be08dSJason Gunthorpe arm_vsmmu_alloc_domain_nested(struct iommufd_viommu *viommu, u32 flags,
1781e8be08dSJason Gunthorpe 			      const struct iommu_user_data *user_data)
1791e8be08dSJason Gunthorpe {
1801e8be08dSJason Gunthorpe 	struct arm_vsmmu *vsmmu = container_of(viommu, struct arm_vsmmu, core);
1811e8be08dSJason Gunthorpe 	const u32 SUPPORTED_FLAGS = IOMMU_HWPT_FAULT_ID_VALID;
1821e8be08dSJason Gunthorpe 	struct arm_smmu_nested_domain *nested_domain;
1831e8be08dSJason Gunthorpe 	struct iommu_hwpt_arm_smmuv3 arg;
184*f27298a8SJason Gunthorpe 	bool enable_ats = false;
1851e8be08dSJason Gunthorpe 	int ret;
1861e8be08dSJason Gunthorpe 
1871e8be08dSJason Gunthorpe 	/*
1881e8be08dSJason Gunthorpe 	 * Faults delivered to the nested domain are faults that originated by
1891e8be08dSJason Gunthorpe 	 * the S1 in the domain. The core code will match all PASIDs when
1901e8be08dSJason Gunthorpe 	 * delivering the fault due to user_pasid_table
1911e8be08dSJason Gunthorpe 	 */
1921e8be08dSJason Gunthorpe 	if (flags & ~SUPPORTED_FLAGS)
1931e8be08dSJason Gunthorpe 		return ERR_PTR(-EOPNOTSUPP);
1941e8be08dSJason Gunthorpe 
1951e8be08dSJason Gunthorpe 	ret = iommu_copy_struct_from_user(&arg, user_data,
1961e8be08dSJason Gunthorpe 					  IOMMU_HWPT_DATA_ARM_SMMUV3, ste);
1971e8be08dSJason Gunthorpe 	if (ret)
1981e8be08dSJason Gunthorpe 		return ERR_PTR(ret);
1991e8be08dSJason Gunthorpe 
200*f27298a8SJason Gunthorpe 	ret = arm_smmu_validate_vste(&arg, &enable_ats);
2011e8be08dSJason Gunthorpe 	if (ret)
2021e8be08dSJason Gunthorpe 		return ERR_PTR(ret);
2031e8be08dSJason Gunthorpe 
2041e8be08dSJason Gunthorpe 	nested_domain = kzalloc(sizeof(*nested_domain), GFP_KERNEL_ACCOUNT);
2051e8be08dSJason Gunthorpe 	if (!nested_domain)
2061e8be08dSJason Gunthorpe 		return ERR_PTR(-ENOMEM);
2071e8be08dSJason Gunthorpe 
2081e8be08dSJason Gunthorpe 	nested_domain->domain.type = IOMMU_DOMAIN_NESTED;
2091e8be08dSJason Gunthorpe 	nested_domain->domain.ops = &arm_smmu_nested_ops;
210*f27298a8SJason Gunthorpe 	nested_domain->enable_ats = enable_ats;
2111e8be08dSJason Gunthorpe 	nested_domain->vsmmu = vsmmu;
2121e8be08dSJason Gunthorpe 	nested_domain->ste[0] = arg.ste[0];
2131e8be08dSJason Gunthorpe 	nested_domain->ste[1] = arg.ste[1] & ~cpu_to_le64(STRTAB_STE_1_EATS);
2141e8be08dSJason Gunthorpe 
2151e8be08dSJason Gunthorpe 	return &nested_domain->domain;
2161e8be08dSJason Gunthorpe }
2171e8be08dSJason Gunthorpe 
21869d9b312SNicolin Chen static const struct iommufd_viommu_ops arm_vsmmu_ops = {
2191e8be08dSJason Gunthorpe 	.alloc_domain_nested = arm_vsmmu_alloc_domain_nested,
22069d9b312SNicolin Chen };
22169d9b312SNicolin Chen 
22269d9b312SNicolin Chen struct iommufd_viommu *arm_vsmmu_alloc(struct device *dev,
22369d9b312SNicolin Chen 				       struct iommu_domain *parent,
22469d9b312SNicolin Chen 				       struct iommufd_ctx *ictx,
22569d9b312SNicolin Chen 				       unsigned int viommu_type)
22669d9b312SNicolin Chen {
22769d9b312SNicolin Chen 	struct arm_smmu_device *smmu =
22869d9b312SNicolin Chen 		iommu_get_iommu_dev(dev, struct arm_smmu_device, iommu);
22969d9b312SNicolin Chen 	struct arm_smmu_master *master = dev_iommu_priv_get(dev);
23069d9b312SNicolin Chen 	struct arm_smmu_domain *s2_parent = to_smmu_domain(parent);
23169d9b312SNicolin Chen 	struct arm_vsmmu *vsmmu;
23269d9b312SNicolin Chen 
23369d9b312SNicolin Chen 	if (viommu_type != IOMMU_VIOMMU_TYPE_ARM_SMMUV3)
23469d9b312SNicolin Chen 		return ERR_PTR(-EOPNOTSUPP);
23569d9b312SNicolin Chen 
23669d9b312SNicolin Chen 	if (!(smmu->features & ARM_SMMU_FEAT_NESTING))
23769d9b312SNicolin Chen 		return ERR_PTR(-EOPNOTSUPP);
23869d9b312SNicolin Chen 
23969d9b312SNicolin Chen 	if (s2_parent->smmu != master->smmu)
24069d9b312SNicolin Chen 		return ERR_PTR(-EINVAL);
24169d9b312SNicolin Chen 
24269d9b312SNicolin Chen 	/*
24369d9b312SNicolin Chen 	 * Must support some way to prevent the VM from bypassing the cache
24469d9b312SNicolin Chen 	 * because VFIO currently does not do any cache maintenance. canwbs
24569d9b312SNicolin Chen 	 * indicates the device is fully coherent and no cache maintenance is
24667e4fe39SJason Gunthorpe 	 * ever required, even for PCI No-Snoop. S2FWB means the S1 can't make
24767e4fe39SJason Gunthorpe 	 * things non-coherent using the memattr, but No-Snoop behavior is not
24867e4fe39SJason Gunthorpe 	 * effected.
24969d9b312SNicolin Chen 	 */
25067e4fe39SJason Gunthorpe 	if (!arm_smmu_master_canwbs(master) &&
25167e4fe39SJason Gunthorpe 	    !(smmu->features & ARM_SMMU_FEAT_S2FWB))
25269d9b312SNicolin Chen 		return ERR_PTR(-EOPNOTSUPP);
25369d9b312SNicolin Chen 
25469d9b312SNicolin Chen 	vsmmu = iommufd_viommu_alloc(ictx, struct arm_vsmmu, core,
25569d9b312SNicolin Chen 				     &arm_vsmmu_ops);
25669d9b312SNicolin Chen 	if (IS_ERR(vsmmu))
25769d9b312SNicolin Chen 		return ERR_CAST(vsmmu);
25869d9b312SNicolin Chen 
25969d9b312SNicolin Chen 	vsmmu->smmu = smmu;
26069d9b312SNicolin Chen 	vsmmu->s2_parent = s2_parent;
26169d9b312SNicolin Chen 	/* FIXME Move VMID allocation from the S2 domain allocation to here */
26269d9b312SNicolin Chen 	vsmmu->vmid = s2_parent->s2_cfg.vmid;
26369d9b312SNicolin Chen 
26469d9b312SNicolin Chen 	return &vsmmu->core;
26569d9b312SNicolin Chen }
266