1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2023 Intel Corporation.
4  */
5 #include <linux/vfio.h>
6 #include <linux/iommufd.h>
7 
8 #include "vfio.h"
9 
10 static dev_t device_devt;
11 
12 void vfio_init_device_cdev(struct vfio_device *device)
13 {
14 	device->device.devt = MKDEV(MAJOR(device_devt), device->index);
15 	cdev_init(&device->cdev, &vfio_device_fops);
16 	device->cdev.owner = THIS_MODULE;
17 }
18 
19 /*
20  * device access via the fd opened by this function is blocked until
21  * .open_device() is called successfully during BIND_IOMMUFD.
22  */
23 int vfio_device_fops_cdev_open(struct inode *inode, struct file *filep)
24 {
25 	struct vfio_device *device = container_of(inode->i_cdev,
26 						  struct vfio_device, cdev);
27 	struct vfio_device_file *df;
28 	int ret;
29 
30 	/* Paired with the put in vfio_device_fops_release() */
31 	if (!vfio_device_try_get_registration(device))
32 		return -ENODEV;
33 
34 	df = vfio_allocate_device_file(device);
35 	if (IS_ERR(df)) {
36 		ret = PTR_ERR(df);
37 		goto err_put_registration;
38 	}
39 
40 	filep->private_data = df;
41 
42 	/*
43 	 * Use the pseudo fs inode on the device to link all mmaps
44 	 * to the same address space, allowing us to unmap all vmas
45 	 * associated to this device using unmap_mapping_range().
46 	 */
47 	filep->f_mapping = device->inode->i_mapping;
48 
49 	return 0;
50 
51 err_put_registration:
52 	vfio_device_put_registration(device);
53 	return ret;
54 }
55 
56 static void vfio_df_get_kvm_safe(struct vfio_device_file *df)
57 {
58 	spin_lock(&df->kvm_ref_lock);
59 	vfio_device_get_kvm_safe(df->device, df->kvm);
60 	spin_unlock(&df->kvm_ref_lock);
61 }
62 
63 long vfio_df_ioctl_bind_iommufd(struct vfio_device_file *df,
64 				struct vfio_device_bind_iommufd __user *arg)
65 {
66 	struct vfio_device *device = df->device;
67 	struct vfio_device_bind_iommufd bind;
68 	unsigned long minsz;
69 	int ret;
70 
71 	static_assert(__same_type(arg->out_devid, df->devid));
72 
73 	minsz = offsetofend(struct vfio_device_bind_iommufd, out_devid);
74 
75 	if (copy_from_user(&bind, arg, minsz))
76 		return -EFAULT;
77 
78 	if (bind.argsz < minsz || bind.flags || bind.iommufd < 0)
79 		return -EINVAL;
80 
81 	/* BIND_IOMMUFD only allowed for cdev fds */
82 	if (df->group)
83 		return -EINVAL;
84 
85 	ret = vfio_device_block_group(device);
86 	if (ret)
87 		return ret;
88 
89 	mutex_lock(&device->dev_set->lock);
90 	/* one device cannot be bound twice */
91 	if (df->access_granted) {
92 		ret = -EINVAL;
93 		goto out_unlock;
94 	}
95 
96 	df->iommufd = iommufd_ctx_from_fd(bind.iommufd);
97 	if (IS_ERR(df->iommufd)) {
98 		ret = PTR_ERR(df->iommufd);
99 		df->iommufd = NULL;
100 		goto out_unlock;
101 	}
102 
103 	/*
104 	 * Before the device open, get the KVM pointer currently
105 	 * associated with the device file (if there is) and obtain
106 	 * a reference.  This reference is held until device closed.
107 	 * Save the pointer in the device for use by drivers.
108 	 */
109 	vfio_df_get_kvm_safe(df);
110 
111 	ret = vfio_df_open(df);
112 	if (ret)
113 		goto out_put_kvm;
114 
115 	ret = copy_to_user(&arg->out_devid, &df->devid,
116 			   sizeof(df->devid)) ? -EFAULT : 0;
117 	if (ret)
118 		goto out_close_device;
119 
120 	device->cdev_opened = true;
121 	/*
122 	 * Paired with smp_load_acquire() in vfio_device_fops::ioctl/
123 	 * read/write/mmap
124 	 */
125 	smp_store_release(&df->access_granted, true);
126 	mutex_unlock(&device->dev_set->lock);
127 	return 0;
128 
129 out_close_device:
130 	vfio_df_close(df);
131 out_put_kvm:
132 	vfio_device_put_kvm(device);
133 	iommufd_ctx_put(df->iommufd);
134 	df->iommufd = NULL;
135 out_unlock:
136 	mutex_unlock(&device->dev_set->lock);
137 	vfio_device_unblock_group(device);
138 	return ret;
139 }
140 
141 void vfio_df_unbind_iommufd(struct vfio_device_file *df)
142 {
143 	struct vfio_device *device = df->device;
144 
145 	/*
146 	 * In the time of close, there is no contention with another one
147 	 * changing this flag.  So read df->access_granted without lock
148 	 * and no smp_load_acquire() is ok.
149 	 */
150 	if (!df->access_granted)
151 		return;
152 
153 	mutex_lock(&device->dev_set->lock);
154 	vfio_df_close(df);
155 	vfio_device_put_kvm(device);
156 	iommufd_ctx_put(df->iommufd);
157 	device->cdev_opened = false;
158 	mutex_unlock(&device->dev_set->lock);
159 	vfio_device_unblock_group(device);
160 }
161 
162 int vfio_df_ioctl_attach_pt(struct vfio_device_file *df,
163 			    struct vfio_device_attach_iommufd_pt __user *arg)
164 {
165 	struct vfio_device_attach_iommufd_pt attach;
166 	struct vfio_device *device = df->device;
167 	unsigned long minsz, xend = 0;
168 	int ret;
169 
170 	minsz = offsetofend(struct vfio_device_attach_iommufd_pt, pt_id);
171 
172 	if (copy_from_user(&attach, arg, minsz))
173 		return -EFAULT;
174 
175 	if (attach.argsz < minsz)
176 		return -EINVAL;
177 
178 	if (attach.flags & ~VFIO_DEVICE_ATTACH_PASID)
179 		return -EINVAL;
180 
181 	if (attach.flags & VFIO_DEVICE_ATTACH_PASID) {
182 		if (!device->ops->pasid_attach_ioas)
183 			return -EOPNOTSUPP;
184 		xend = offsetofend(struct vfio_device_attach_iommufd_pt, pasid);
185 	}
186 
187 	if (xend) {
188 		if (attach.argsz < xend)
189 			return -EINVAL;
190 
191 		if (copy_from_user((void *)&attach + minsz,
192 				   (void __user *)arg + minsz, xend - minsz))
193 			return -EFAULT;
194 	}
195 
196 	mutex_lock(&device->dev_set->lock);
197 	if (attach.flags & VFIO_DEVICE_ATTACH_PASID)
198 		ret = device->ops->pasid_attach_ioas(device,
199 						     attach.pasid,
200 						     &attach.pt_id);
201 	else
202 		ret = device->ops->attach_ioas(device, &attach.pt_id);
203 	if (ret)
204 		goto out_unlock;
205 
206 	if (copy_to_user(&arg->pt_id, &attach.pt_id, sizeof(attach.pt_id))) {
207 		ret = -EFAULT;
208 		goto out_detach;
209 	}
210 	mutex_unlock(&device->dev_set->lock);
211 
212 	return 0;
213 
214 out_detach:
215 	device->ops->detach_ioas(device);
216 out_unlock:
217 	mutex_unlock(&device->dev_set->lock);
218 	return ret;
219 }
220 
221 int vfio_df_ioctl_detach_pt(struct vfio_device_file *df,
222 			    struct vfio_device_detach_iommufd_pt __user *arg)
223 {
224 	struct vfio_device_detach_iommufd_pt detach;
225 	struct vfio_device *device = df->device;
226 	unsigned long minsz, xend = 0;
227 
228 	minsz = offsetofend(struct vfio_device_detach_iommufd_pt, flags);
229 
230 	if (copy_from_user(&detach, arg, minsz))
231 		return -EFAULT;
232 
233 	if (detach.argsz < minsz)
234 		return -EINVAL;
235 
236 	if (detach.flags & ~VFIO_DEVICE_DETACH_PASID)
237 		return -EINVAL;
238 
239 	if (detach.flags & VFIO_DEVICE_DETACH_PASID) {
240 		if (!device->ops->pasid_detach_ioas)
241 			return -EOPNOTSUPP;
242 		xend = offsetofend(struct vfio_device_detach_iommufd_pt, pasid);
243 	}
244 
245 	if (xend) {
246 		if (detach.argsz < xend)
247 			return -EINVAL;
248 
249 		if (copy_from_user((void *)&detach + minsz,
250 				   (void __user *)arg + minsz, xend - minsz))
251 			return -EFAULT;
252 	}
253 
254 	mutex_lock(&device->dev_set->lock);
255 	if (detach.flags & VFIO_DEVICE_DETACH_PASID)
256 		device->ops->pasid_detach_ioas(device, detach.pasid);
257 	else
258 		device->ops->detach_ioas(device);
259 	mutex_unlock(&device->dev_set->lock);
260 
261 	return 0;
262 }
263 
264 static char *vfio_device_devnode(const struct device *dev, umode_t *mode)
265 {
266 	return kasprintf(GFP_KERNEL, "vfio/devices/%s", dev_name(dev));
267 }
268 
269 int vfio_cdev_init(struct class *device_class)
270 {
271 	device_class->devnode = vfio_device_devnode;
272 	return alloc_chrdev_region(&device_devt, 0,
273 				   MINORMASK + 1, "vfio-dev");
274 }
275 
276 void vfio_cdev_cleanup(void)
277 {
278 	unregister_chrdev_region(device_devt, MINORMASK + 1);
279 }
280