1 /*
2  * Copyright 2014 Canonical
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Andreas Pokorny
23  */
24 
25 #include <drm/drm_prime.h>
26 #include <linux/virtio_dma_buf.h>
27 
28 #include "virtgpu_drv.h"
29 
30 MODULE_IMPORT_NS("DMA_BUF");
31 
virtgpu_virtio_get_uuid(struct dma_buf * buf,uuid_t * uuid)32 static int virtgpu_virtio_get_uuid(struct dma_buf *buf,
33 				   uuid_t *uuid)
34 {
35 	struct drm_gem_object *obj = buf->priv;
36 	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
37 	struct virtio_gpu_device *vgdev = obj->dev->dev_private;
38 
39 	wait_event(vgdev->resp_wq, bo->uuid_state != STATE_INITIALIZING);
40 	if (bo->uuid_state != STATE_OK)
41 		return -ENODEV;
42 
43 	uuid_copy(uuid, &bo->uuid);
44 
45 	return 0;
46 }
47 
48 static struct sg_table *
virtgpu_gem_map_dma_buf(struct dma_buf_attachment * attach,enum dma_data_direction dir)49 virtgpu_gem_map_dma_buf(struct dma_buf_attachment *attach,
50 			enum dma_data_direction dir)
51 {
52 	struct drm_gem_object *obj = attach->dmabuf->priv;
53 	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
54 
55 	if (virtio_gpu_is_vram(bo))
56 		return virtio_gpu_vram_map_dma_buf(bo, attach->dev, dir);
57 
58 	return drm_gem_map_dma_buf(attach, dir);
59 }
60 
virtgpu_gem_unmap_dma_buf(struct dma_buf_attachment * attach,struct sg_table * sgt,enum dma_data_direction dir)61 static void virtgpu_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
62 				      struct sg_table *sgt,
63 				      enum dma_data_direction dir)
64 {
65 	struct drm_gem_object *obj = attach->dmabuf->priv;
66 	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
67 
68 	if (virtio_gpu_is_vram(bo)) {
69 		virtio_gpu_vram_unmap_dma_buf(attach->dev, sgt, dir);
70 		return;
71 	}
72 
73 	drm_gem_unmap_dma_buf(attach, sgt, dir);
74 }
75 
76 static const struct virtio_dma_buf_ops virtgpu_dmabuf_ops =  {
77 	.ops = {
78 		.cache_sgt_mapping = true,
79 		.attach = virtio_dma_buf_attach,
80 		.detach = drm_gem_map_detach,
81 		.map_dma_buf = virtgpu_gem_map_dma_buf,
82 		.unmap_dma_buf = virtgpu_gem_unmap_dma_buf,
83 		.release = drm_gem_dmabuf_release,
84 		.mmap = drm_gem_dmabuf_mmap,
85 		.vmap = drm_gem_dmabuf_vmap,
86 		.vunmap = drm_gem_dmabuf_vunmap,
87 	},
88 	.device_attach = drm_gem_map_attach,
89 	.get_uuid = virtgpu_virtio_get_uuid,
90 };
91 
virtio_gpu_resource_assign_uuid(struct virtio_gpu_device * vgdev,struct virtio_gpu_object * bo)92 int virtio_gpu_resource_assign_uuid(struct virtio_gpu_device *vgdev,
93 				    struct virtio_gpu_object *bo)
94 {
95 	struct virtio_gpu_object_array *objs;
96 
97 	objs = virtio_gpu_array_alloc(1);
98 	if (!objs)
99 		return -ENOMEM;
100 
101 	virtio_gpu_array_add_obj(objs, &bo->base.base);
102 
103 	return virtio_gpu_cmd_resource_assign_uuid(vgdev, objs);
104 }
105 
virtgpu_gem_prime_export(struct drm_gem_object * obj,int flags)106 struct dma_buf *virtgpu_gem_prime_export(struct drm_gem_object *obj,
107 					 int flags)
108 {
109 	struct dma_buf *buf;
110 	struct drm_device *dev = obj->dev;
111 	struct virtio_gpu_device *vgdev = dev->dev_private;
112 	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
113 	int ret = 0;
114 	bool blob = bo->host3d_blob || bo->guest_blob;
115 	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
116 
117 	if (!blob) {
118 		if (vgdev->has_resource_assign_uuid) {
119 			ret = virtio_gpu_resource_assign_uuid(vgdev, bo);
120 			if (ret)
121 				return ERR_PTR(ret);
122 
123 			virtio_gpu_notify(vgdev);
124 		} else {
125 			bo->uuid_state = STATE_ERR;
126 		}
127 	} else if (!(bo->blob_flags & VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE)) {
128 		bo->uuid_state = STATE_ERR;
129 	}
130 
131 	exp_info.ops = &virtgpu_dmabuf_ops.ops;
132 	exp_info.size = obj->size;
133 	exp_info.flags = flags;
134 	exp_info.priv = obj;
135 	exp_info.resv = obj->resv;
136 
137 	buf = virtio_dma_buf_export(&exp_info);
138 	if (IS_ERR(buf))
139 		return buf;
140 
141 	drm_dev_get(dev);
142 	drm_gem_object_get(obj);
143 
144 	return buf;
145 }
146 
virtgpu_dma_buf_import_sgt(struct virtio_gpu_mem_entry ** ents,unsigned int * nents,struct virtio_gpu_object * bo,struct dma_buf_attachment * attach)147 int virtgpu_dma_buf_import_sgt(struct virtio_gpu_mem_entry **ents,
148 			       unsigned int *nents,
149 			       struct virtio_gpu_object *bo,
150 			       struct dma_buf_attachment *attach)
151 {
152 	struct scatterlist *sl;
153 	struct sg_table *sgt;
154 	long i, ret;
155 
156 	dma_resv_assert_held(attach->dmabuf->resv);
157 
158 	ret = dma_resv_wait_timeout(attach->dmabuf->resv,
159 				    DMA_RESV_USAGE_KERNEL,
160 				    false, MAX_SCHEDULE_TIMEOUT);
161 	if (ret <= 0)
162 		return ret < 0 ? ret : -ETIMEDOUT;
163 
164 	sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
165 	if (IS_ERR(sgt))
166 		return PTR_ERR(sgt);
167 
168 	*ents = kvmalloc_array(sgt->nents,
169 			       sizeof(struct virtio_gpu_mem_entry),
170 			       GFP_KERNEL);
171 	if (!(*ents)) {
172 		dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
173 		return -ENOMEM;
174 	}
175 
176 	*nents = sgt->nents;
177 	for_each_sgtable_dma_sg(sgt, sl, i) {
178 		(*ents)[i].addr = cpu_to_le64(sg_dma_address(sl));
179 		(*ents)[i].length = cpu_to_le32(sg_dma_len(sl));
180 		(*ents)[i].padding = 0;
181 	}
182 
183 	bo->sgt = sgt;
184 	return 0;
185 }
186 
virtgpu_dma_buf_unmap(struct virtio_gpu_object * bo)187 static void virtgpu_dma_buf_unmap(struct virtio_gpu_object *bo)
188 {
189 	struct dma_buf_attachment *attach = bo->base.base.import_attach;
190 
191 	dma_resv_assert_held(attach->dmabuf->resv);
192 
193 	if (bo->created) {
194 		virtio_gpu_detach_object_fenced(bo);
195 
196 		if (bo->sgt)
197 			dma_buf_unmap_attachment(attach, bo->sgt,
198 						 DMA_BIDIRECTIONAL);
199 
200 		bo->sgt = NULL;
201 	}
202 }
203 
virtgpu_dma_buf_free_obj(struct drm_gem_object * obj)204 static void virtgpu_dma_buf_free_obj(struct drm_gem_object *obj)
205 {
206 	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
207 	struct virtio_gpu_device *vgdev = obj->dev->dev_private;
208 	struct dma_buf_attachment *attach = obj->import_attach;
209 
210 	if (attach) {
211 		struct dma_buf *dmabuf = attach->dmabuf;
212 
213 		dma_resv_lock(dmabuf->resv, NULL);
214 		virtgpu_dma_buf_unmap(bo);
215 		dma_resv_unlock(dmabuf->resv);
216 
217 		dma_buf_detach(dmabuf, attach);
218 		dma_buf_put(dmabuf);
219 	}
220 
221 	if (bo->created) {
222 		virtio_gpu_cmd_unref_resource(vgdev, bo);
223 		virtio_gpu_notify(vgdev);
224 		return;
225 	}
226 	virtio_gpu_cleanup_object(bo);
227 }
228 
virtgpu_dma_buf_init_obj(struct drm_device * dev,struct virtio_gpu_object * bo,struct dma_buf_attachment * attach)229 static int virtgpu_dma_buf_init_obj(struct drm_device *dev,
230 				    struct virtio_gpu_object *bo,
231 				    struct dma_buf_attachment *attach)
232 {
233 	struct virtio_gpu_device *vgdev = dev->dev_private;
234 	struct virtio_gpu_object_params params = { 0 };
235 	struct dma_resv *resv = attach->dmabuf->resv;
236 	struct virtio_gpu_mem_entry *ents = NULL;
237 	unsigned int nents;
238 	int ret;
239 
240 	ret = virtio_gpu_resource_id_get(vgdev, &bo->hw_res_handle);
241 	if (ret) {
242 		virtgpu_dma_buf_free_obj(&bo->base.base);
243 		return ret;
244 	}
245 
246 	dma_resv_lock(resv, NULL);
247 
248 	ret = dma_buf_pin(attach);
249 	if (ret)
250 		goto err_pin;
251 
252 	ret = virtgpu_dma_buf_import_sgt(&ents, &nents, bo, attach);
253 	if (ret)
254 		goto err_import;
255 
256 	params.blob = true;
257 	params.blob_mem = VIRTGPU_BLOB_MEM_GUEST;
258 	params.blob_flags = VIRTGPU_BLOB_FLAG_USE_SHAREABLE;
259 	params.size = attach->dmabuf->size;
260 
261 	virtio_gpu_cmd_resource_create_blob(vgdev, bo, &params,
262 					    ents, nents);
263 	bo->guest_blob = true;
264 
265 	dma_buf_unpin(attach);
266 	dma_resv_unlock(resv);
267 
268 	return 0;
269 
270 err_import:
271 	dma_buf_unpin(attach);
272 err_pin:
273 	dma_resv_unlock(resv);
274 	virtgpu_dma_buf_free_obj(&bo->base.base);
275 	return ret;
276 }
277 
278 static const struct drm_gem_object_funcs virtgpu_gem_dma_buf_funcs = {
279 	.free = virtgpu_dma_buf_free_obj,
280 };
281 
virtgpu_dma_buf_move_notify(struct dma_buf_attachment * attach)282 static void virtgpu_dma_buf_move_notify(struct dma_buf_attachment *attach)
283 {
284 	struct drm_gem_object *obj = attach->importer_priv;
285 	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
286 
287 	virtgpu_dma_buf_unmap(bo);
288 }
289 
290 static const struct dma_buf_attach_ops virtgpu_dma_buf_attach_ops = {
291 	.allow_peer2peer = true,
292 	.move_notify = virtgpu_dma_buf_move_notify
293 };
294 
virtgpu_gem_prime_import(struct drm_device * dev,struct dma_buf * buf)295 struct drm_gem_object *virtgpu_gem_prime_import(struct drm_device *dev,
296 						struct dma_buf *buf)
297 {
298 	struct virtio_gpu_device *vgdev = dev->dev_private;
299 	struct dma_buf_attachment *attach;
300 	struct virtio_gpu_object *bo;
301 	struct drm_gem_object *obj;
302 	int ret;
303 
304 	if (buf->ops == &virtgpu_dmabuf_ops.ops) {
305 		obj = buf->priv;
306 		if (obj->dev == dev) {
307 			/*
308 			 * Importing dmabuf exported from our own gem increases
309 			 * refcount on gem itself instead of f_count of dmabuf.
310 			 */
311 			drm_gem_object_get(obj);
312 			return obj;
313 		}
314 	}
315 
316 	if (!vgdev->has_resource_blob || vgdev->has_virgl_3d)
317 		return drm_gem_prime_import(dev, buf);
318 
319 	bo = kzalloc(sizeof(*bo), GFP_KERNEL);
320 	if (!bo)
321 		return ERR_PTR(-ENOMEM);
322 
323 	obj = &bo->base.base;
324 	obj->resv = buf->resv;
325 	obj->funcs = &virtgpu_gem_dma_buf_funcs;
326 	drm_gem_private_object_init(dev, obj, buf->size);
327 
328 	attach = dma_buf_dynamic_attach(buf, dev->dev,
329 					&virtgpu_dma_buf_attach_ops, obj);
330 	if (IS_ERR(attach)) {
331 		kfree(bo);
332 		return ERR_CAST(attach);
333 	}
334 
335 	obj->import_attach = attach;
336 	get_dma_buf(buf);
337 
338 	ret = virtgpu_dma_buf_init_obj(dev, bo, attach);
339 	if (ret < 0)
340 		return ERR_PTR(ret);
341 
342 	return obj;
343 }
344 
virtgpu_gem_prime_import_sg_table(struct drm_device * dev,struct dma_buf_attachment * attach,struct sg_table * table)345 struct drm_gem_object *virtgpu_gem_prime_import_sg_table(
346 	struct drm_device *dev, struct dma_buf_attachment *attach,
347 	struct sg_table *table)
348 {
349 	return ERR_PTR(-ENODEV);
350 }
351