Lines Matching +full:ats +full:- +full:supported

1 // SPDX-License-Identifier: GPL-2.0
8 #include "arm-smmu-v3.h"
19 return ERR_PTR(-ENOMEM); in arm_smmu_hw_info()
21 base_idr = master->smmu->base + ARM_SMMU_IDR0; in arm_smmu_hw_info()
23 info->idr[i] = readl_relaxed(base_idr + i); in arm_smmu_hw_info()
24 info->iidr = readl_relaxed(master->smmu->base + ARM_SMMU_IIDR); in arm_smmu_hw_info()
25 info->aidr = readl_relaxed(master->smmu->base + ARM_SMMU_AIDR); in arm_smmu_hw_info()
38 target, master, nested_domain->vsmmu->s2_parent, ats_enabled); in arm_smmu_make_nested_cd_table_ste()
40 target->data[0] = cpu_to_le64(STRTAB_STE_0_V | in arm_smmu_make_nested_cd_table_ste()
43 target->data[0] |= nested_domain->ste[0] & in arm_smmu_make_nested_cd_table_ste()
45 target->data[1] |= nested_domain->ste[1]; in arm_smmu_make_nested_cd_table_ste()
47 target->data[1] |= cpu_to_le64(STRTAB_STE_1_MEV); in arm_smmu_make_nested_cd_table_ste()
53 * - Non-valid STE
54 * - Abort STE
55 * - Bypass STE (install the S2, no CD table)
56 * - CD table STE (install the S2 and the userspace CD table)
63 FIELD_GET(STRTAB_STE_0_CFG, le64_to_cpu(nested_domain->ste[0])); in arm_smmu_make_nested_domain_ste()
66 * Userspace can request a non-valid STE through the nesting interface. in arm_smmu_make_nested_domain_ste()
70 if (!(nested_domain->ste[0] & cpu_to_le64(STRTAB_STE_0_V))) in arm_smmu_make_nested_domain_ste()
80 nested_domain->vsmmu->s2_parent, in arm_smmu_make_nested_domain_ste()
97 iommu_group_mutex_assert(state->master->dev); in arm_smmu_attach_prepare_vmaster()
99 ret = iommufd_viommu_get_vdev_id(&nested_domain->vsmmu->core, in arm_smmu_attach_prepare_vmaster()
100 state->master->dev, &vsid); in arm_smmu_attach_prepare_vmaster()
106 return -ENOMEM; in arm_smmu_attach_prepare_vmaster()
107 vmaster->vsmmu = nested_domain->vsmmu; in arm_smmu_attach_prepare_vmaster()
108 vmaster->vsid = vsid; in arm_smmu_attach_prepare_vmaster()
109 state->vmaster = vmaster; in arm_smmu_attach_prepare_vmaster()
116 struct arm_smmu_master *master = state->master; in arm_smmu_attach_commit_vmaster()
118 mutex_lock(&master->smmu->streams_mutex); in arm_smmu_attach_commit_vmaster()
119 kfree(master->vmaster); in arm_smmu_attach_commit_vmaster()
120 master->vmaster = state->vmaster; in arm_smmu_attach_commit_vmaster()
121 mutex_unlock(&master->smmu->streams_mutex); in arm_smmu_attach_commit_vmaster()
145 if (nested_domain->vsmmu->smmu != master->smmu) in arm_smmu_attach_dev_nested()
146 return -EINVAL; in arm_smmu_attach_dev_nested()
147 if (arm_smmu_ssids_in_use(&master->cd_table)) in arm_smmu_attach_dev_nested()
148 return -EBUSY; in arm_smmu_attach_dev_nested()
152 * The VM has to control the actual ATS state at the PCI device because in arm_smmu_attach_dev_nested()
154 * think ATS is on it will not generate ATC flushes and the ATC will in arm_smmu_attach_dev_nested()
155 * become incoherent. Since we can't access the actual virtual PCI ATS in arm_smmu_attach_dev_nested()
159 state.disable_ats = !nested_domain->enable_ats; in arm_smmu_attach_dev_nested()
190 if (!(arg->ste[0] & cpu_to_le64(STRTAB_STE_0_V))) { in arm_smmu_validate_vste()
191 memset(arg->ste, 0, sizeof(arg->ste)); in arm_smmu_validate_vste()
196 if ((arg->ste[0] & ~STRTAB_STE_0_NESTING_ALLOWED) || in arm_smmu_validate_vste()
197 (arg->ste[1] & ~STRTAB_STE_1_NESTING_ALLOWED)) in arm_smmu_validate_vste()
198 return -EIO; in arm_smmu_validate_vste()
200 cfg = FIELD_GET(STRTAB_STE_0_CFG, le64_to_cpu(arg->ste[0])); in arm_smmu_validate_vste()
203 return -EIO; in arm_smmu_validate_vste()
206 * Only Full ATS or ATS UR is supported in arm_smmu_validate_vste()
209 eats = FIELD_GET(STRTAB_STE_1_EATS, le64_to_cpu(arg->ste[1])); in arm_smmu_validate_vste()
210 arg->ste[1] &= ~cpu_to_le64(STRTAB_STE_1_EATS); in arm_smmu_validate_vste()
212 return -EIO; in arm_smmu_validate_vste()
230 return ERR_PTR(-EOPNOTSUPP); in arm_vsmmu_alloc_domain_nested()
243 return ERR_PTR(-ENOMEM); in arm_vsmmu_alloc_domain_nested()
245 nested_domain->domain.type = IOMMU_DOMAIN_NESTED; in arm_vsmmu_alloc_domain_nested()
246 nested_domain->domain.ops = &arm_smmu_nested_ops; in arm_vsmmu_alloc_domain_nested()
247 nested_domain->enable_ats = enable_ats; in arm_vsmmu_alloc_domain_nested()
248 nested_domain->vsmmu = vsmmu; in arm_vsmmu_alloc_domain_nested()
249 nested_domain->ste[0] = arg.ste[0]; in arm_vsmmu_alloc_domain_nested()
250 nested_domain->ste[1] = arg.ste[1] & ~cpu_to_le64(STRTAB_STE_1_EATS); in arm_vsmmu_alloc_domain_nested()
252 return &nested_domain->domain; in arm_vsmmu_alloc_domain_nested()
261 xa_lock(&vsmmu->core.vdevs); in arm_vsmmu_vsid_to_sid()
262 dev = iommufd_viommu_find_dev(&vsmmu->core, (unsigned long)vsid); in arm_vsmmu_vsid_to_sid()
264 ret = -EIO; in arm_vsmmu_vsid_to_sid()
271 *sid = master->streams[0].id; in arm_vsmmu_vsid_to_sid()
273 xa_unlock(&vsmmu->core.vdevs); in arm_vsmmu_vsid_to_sid()
296 cmd->cmd[0] = le64_to_cpu(cmd->ucmd.cmd[0]); in arm_vsmmu_convert_user_cmd()
297 cmd->cmd[1] = le64_to_cpu(cmd->ucmd.cmd[1]); in arm_vsmmu_convert_user_cmd()
299 switch (cmd->cmd[0] & CMDQ_0_OP) { in arm_vsmmu_convert_user_cmd()
302 cmd->cmd[0] = CMDQ_OP_TLBI_NH_ALL | in arm_vsmmu_convert_user_cmd()
303 FIELD_PREP(CMDQ_TLBI_0_VMID, vsmmu->vmid); in arm_vsmmu_convert_user_cmd()
304 cmd->cmd[1] = 0; in arm_vsmmu_convert_user_cmd()
310 cmd->cmd[0] &= ~CMDQ_TLBI_0_VMID; in arm_vsmmu_convert_user_cmd()
311 cmd->cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_VMID, vsmmu->vmid); in arm_vsmmu_convert_user_cmd()
316 u32 sid, vsid = FIELD_GET(CMDQ_CFGI_0_SID, cmd->cmd[0]); in arm_vsmmu_convert_user_cmd()
319 return -EIO; in arm_vsmmu_convert_user_cmd()
320 cmd->cmd[0] &= ~CMDQ_CFGI_0_SID; in arm_vsmmu_convert_user_cmd()
321 cmd->cmd[0] |= FIELD_PREP(CMDQ_CFGI_0_SID, sid); in arm_vsmmu_convert_user_cmd()
325 return -EIO; in arm_vsmmu_convert_user_cmd()
334 struct arm_smmu_device *smmu = vsmmu->smmu; in arm_vsmmu_cache_invalidate()
341 cmds = kcalloc(array->entry_num, sizeof(*cmds), GFP_KERNEL); in arm_vsmmu_cache_invalidate()
343 return -ENOMEM; in arm_vsmmu_cache_invalidate()
345 end = cmds + array->entry_num; in arm_vsmmu_cache_invalidate()
362 if (cur != end && (cur - last) != CMDQ_BATCH_ENTRIES - 1) in arm_vsmmu_cache_invalidate()
366 ret = arm_smmu_cmdq_issue_cmdlist(smmu, &smmu->cmdq, last->cmd, in arm_vsmmu_cache_invalidate()
367 cur - last, true); in arm_vsmmu_cache_invalidate()
369 cur--; in arm_vsmmu_cache_invalidate()
375 array->entry_num = cur - cmds; in arm_vsmmu_cache_invalidate()
397 return ERR_PTR(-EOPNOTSUPP); in arm_vsmmu_alloc()
399 if (!(smmu->features & ARM_SMMU_FEAT_NESTING)) in arm_vsmmu_alloc()
400 return ERR_PTR(-EOPNOTSUPP); in arm_vsmmu_alloc()
402 if (s2_parent->smmu != master->smmu) in arm_vsmmu_alloc()
403 return ERR_PTR(-EINVAL); in arm_vsmmu_alloc()
410 if (WARN_ON(smmu->options & ARM_SMMU_OPT_CMDQ_FORCE_SYNC)) in arm_vsmmu_alloc()
411 return ERR_PTR(-EOPNOTSUPP); in arm_vsmmu_alloc()
417 * ever required, even for PCI No-Snoop. S2FWB means the S1 can't make in arm_vsmmu_alloc()
418 * things non-coherent using the memattr, but No-Snoop behavior is not in arm_vsmmu_alloc()
422 !(smmu->features & ARM_SMMU_FEAT_S2FWB)) in arm_vsmmu_alloc()
423 return ERR_PTR(-EOPNOTSUPP); in arm_vsmmu_alloc()
430 vsmmu->smmu = smmu; in arm_vsmmu_alloc()
431 vsmmu->s2_parent = s2_parent; in arm_vsmmu_alloc()
433 vsmmu->vmid = s2_parent->s2_cfg.vmid; in arm_vsmmu_alloc()
435 return &vsmmu->core; in arm_vsmmu_alloc()
443 lockdep_assert_held(&vmaster->vsmmu->smmu->streams_mutex); in arm_vmaster_report_event()
446 FIELD_PREP(EVTQ_0_SID, vmaster->vsid)); in arm_vmaster_report_event()
450 return iommufd_viommu_report_event(&vmaster->vsmmu->core, in arm_vmaster_report_event()