1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES
3 */
4 #include "iommufd_private.h"
5
iommufd_viommu_destroy(struct iommufd_object * obj)6 void iommufd_viommu_destroy(struct iommufd_object *obj)
7 {
8 struct iommufd_viommu *viommu =
9 container_of(obj, struct iommufd_viommu, obj);
10
11 if (viommu->ops && viommu->ops->destroy)
12 viommu->ops->destroy(viommu);
13 refcount_dec(&viommu->hwpt->common.obj.users);
14 xa_destroy(&viommu->vdevs);
15 }
16
iommufd_viommu_alloc_ioctl(struct iommufd_ucmd * ucmd)17 int iommufd_viommu_alloc_ioctl(struct iommufd_ucmd *ucmd)
18 {
19 struct iommu_viommu_alloc *cmd = ucmd->cmd;
20 struct iommufd_hwpt_paging *hwpt_paging;
21 struct iommufd_viommu *viommu;
22 struct iommufd_device *idev;
23 const struct iommu_ops *ops;
24 int rc;
25
26 if (cmd->flags || cmd->type == IOMMU_VIOMMU_TYPE_DEFAULT)
27 return -EOPNOTSUPP;
28
29 idev = iommufd_get_device(ucmd, cmd->dev_id);
30 if (IS_ERR(idev))
31 return PTR_ERR(idev);
32
33 ops = dev_iommu_ops(idev->dev);
34 if (!ops->viommu_alloc) {
35 rc = -EOPNOTSUPP;
36 goto out_put_idev;
37 }
38
39 hwpt_paging = iommufd_get_hwpt_paging(ucmd, cmd->hwpt_id);
40 if (IS_ERR(hwpt_paging)) {
41 rc = PTR_ERR(hwpt_paging);
42 goto out_put_idev;
43 }
44
45 if (!hwpt_paging->nest_parent) {
46 rc = -EINVAL;
47 goto out_put_hwpt;
48 }
49
50 viommu = ops->viommu_alloc(idev->dev, hwpt_paging->common.domain,
51 ucmd->ictx, cmd->type);
52 if (IS_ERR(viommu)) {
53 rc = PTR_ERR(viommu);
54 goto out_put_hwpt;
55 }
56
57 xa_init(&viommu->vdevs);
58 viommu->type = cmd->type;
59 viommu->ictx = ucmd->ictx;
60 viommu->hwpt = hwpt_paging;
61 refcount_inc(&viommu->hwpt->common.obj.users);
62 INIT_LIST_HEAD(&viommu->veventqs);
63 init_rwsem(&viommu->veventqs_rwsem);
64 /*
65 * It is the most likely case that a physical IOMMU is unpluggable. A
66 * pluggable IOMMU instance (if exists) is responsible for refcounting
67 * on its own.
68 */
69 viommu->iommu_dev = __iommu_get_iommu_dev(idev->dev);
70
71 cmd->out_viommu_id = viommu->obj.id;
72 rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
73 if (rc)
74 goto out_abort;
75 iommufd_object_finalize(ucmd->ictx, &viommu->obj);
76 goto out_put_hwpt;
77
78 out_abort:
79 iommufd_object_abort_and_destroy(ucmd->ictx, &viommu->obj);
80 out_put_hwpt:
81 iommufd_put_object(ucmd->ictx, &hwpt_paging->common.obj);
82 out_put_idev:
83 iommufd_put_object(ucmd->ictx, &idev->obj);
84 return rc;
85 }
86
iommufd_vdevice_destroy(struct iommufd_object * obj)87 void iommufd_vdevice_destroy(struct iommufd_object *obj)
88 {
89 struct iommufd_vdevice *vdev =
90 container_of(obj, struct iommufd_vdevice, obj);
91 struct iommufd_viommu *viommu = vdev->viommu;
92
93 /* xa_cmpxchg is okay to fail if alloc failed xa_cmpxchg previously */
94 xa_cmpxchg(&viommu->vdevs, vdev->id, vdev, NULL, GFP_KERNEL);
95 refcount_dec(&viommu->obj.users);
96 put_device(vdev->dev);
97 }
98
iommufd_vdevice_alloc_ioctl(struct iommufd_ucmd * ucmd)99 int iommufd_vdevice_alloc_ioctl(struct iommufd_ucmd *ucmd)
100 {
101 struct iommu_vdevice_alloc *cmd = ucmd->cmd;
102 struct iommufd_vdevice *vdev, *curr;
103 struct iommufd_viommu *viommu;
104 struct iommufd_device *idev;
105 u64 virt_id = cmd->virt_id;
106 int rc = 0;
107
108 /* virt_id indexes an xarray */
109 if (virt_id > ULONG_MAX)
110 return -EINVAL;
111
112 viommu = iommufd_get_viommu(ucmd, cmd->viommu_id);
113 if (IS_ERR(viommu))
114 return PTR_ERR(viommu);
115
116 idev = iommufd_get_device(ucmd, cmd->dev_id);
117 if (IS_ERR(idev)) {
118 rc = PTR_ERR(idev);
119 goto out_put_viommu;
120 }
121
122 if (viommu->iommu_dev != __iommu_get_iommu_dev(idev->dev)) {
123 rc = -EINVAL;
124 goto out_put_idev;
125 }
126
127 vdev = iommufd_object_alloc(ucmd->ictx, vdev, IOMMUFD_OBJ_VDEVICE);
128 if (IS_ERR(vdev)) {
129 rc = PTR_ERR(vdev);
130 goto out_put_idev;
131 }
132
133 vdev->id = virt_id;
134 vdev->dev = idev->dev;
135 get_device(idev->dev);
136 vdev->viommu = viommu;
137 refcount_inc(&viommu->obj.users);
138
139 curr = xa_cmpxchg(&viommu->vdevs, virt_id, NULL, vdev, GFP_KERNEL);
140 if (curr) {
141 rc = xa_err(curr) ?: -EEXIST;
142 goto out_abort;
143 }
144
145 cmd->out_vdevice_id = vdev->obj.id;
146 rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
147 if (rc)
148 goto out_abort;
149 iommufd_object_finalize(ucmd->ictx, &vdev->obj);
150 goto out_put_idev;
151
152 out_abort:
153 iommufd_object_abort_and_destroy(ucmd->ictx, &vdev->obj);
154 out_put_idev:
155 iommufd_put_object(ucmd->ictx, &idev->obj);
156 out_put_viommu:
157 iommufd_put_object(ucmd->ictx, &viommu->obj);
158 return rc;
159 }
160