1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3 *
4 * Copyright (c) 2011-2024 Broadcom. All Rights Reserved. The term
5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 **************************************************************************/
28
29 #include "vmwgfx_bo.h"
30 #include "vmwgfx_drv.h"
31 #include "vmwgfx_resource_priv.h"
32
33 #include <drm/ttm/ttm_placement.h>
34
vmw_bo_release(struct vmw_bo * vbo)35 static void vmw_bo_release(struct vmw_bo *vbo)
36 {
37 struct vmw_resource *res;
38
39 WARN_ON(kref_read(&vbo->tbo.base.refcount) != 0);
40 vmw_bo_unmap(vbo);
41
42 xa_destroy(&vbo->detached_resources);
43 WARN_ON(vbo->is_dumb && !vbo->dumb_surface);
44 if (vbo->is_dumb && vbo->dumb_surface) {
45 res = &vbo->dumb_surface->res;
46 WARN_ON(vbo != res->guest_memory_bo);
47 WARN_ON(!res->guest_memory_bo);
48 if (res->guest_memory_bo) {
49 /* Reserve and switch the backing mob. */
50 mutex_lock(&res->dev_priv->cmdbuf_mutex);
51 (void)vmw_resource_reserve(res, false, true);
52 vmw_resource_mob_detach(res);
53 if (res->dirty)
54 res->func->dirty_free(res);
55 if (res->coherent)
56 vmw_bo_dirty_release(res->guest_memory_bo);
57 res->guest_memory_bo = NULL;
58 res->guest_memory_offset = 0;
59 vmw_resource_unreserve(res, true, false, false, NULL,
60 0);
61 mutex_unlock(&res->dev_priv->cmdbuf_mutex);
62 }
63 vmw_surface_unreference(&vbo->dumb_surface);
64 }
65 drm_gem_object_release(&vbo->tbo.base);
66 }
67
68 /**
69 * vmw_bo_free - vmw_bo destructor
70 *
71 * @bo: Pointer to the embedded struct ttm_buffer_object
72 */
vmw_bo_free(struct ttm_buffer_object * bo)73 static void vmw_bo_free(struct ttm_buffer_object *bo)
74 {
75 struct vmw_bo *vbo = to_vmw_bo(&bo->base);
76
77 WARN_ON(!RB_EMPTY_ROOT(&vbo->res_tree));
78 vmw_bo_release(vbo);
79 WARN_ON(vbo->dirty);
80 kfree(vbo);
81 }
82
83 /**
84 * vmw_bo_pin_in_placement - Validate a buffer to placement.
85 *
86 * @dev_priv: Driver private.
87 * @buf: DMA buffer to move.
88 * @placement: The placement to pin it.
89 * @interruptible: Use interruptible wait.
90 * Return: Zero on success, Negative error code on failure. In particular
91 * -ERESTARTSYS if interrupted by a signal
92 */
vmw_bo_pin_in_placement(struct vmw_private * dev_priv,struct vmw_bo * buf,struct ttm_placement * placement,bool interruptible)93 static int vmw_bo_pin_in_placement(struct vmw_private *dev_priv,
94 struct vmw_bo *buf,
95 struct ttm_placement *placement,
96 bool interruptible)
97 {
98 struct ttm_operation_ctx ctx = {interruptible, false };
99 struct ttm_buffer_object *bo = &buf->tbo;
100 int ret;
101
102 vmw_execbuf_release_pinned_bo(dev_priv);
103
104 ret = ttm_bo_reserve(bo, interruptible, false, NULL);
105 if (unlikely(ret != 0))
106 goto err;
107
108 ret = ttm_bo_validate(bo, placement, &ctx);
109 if (!ret)
110 vmw_bo_pin_reserved(buf, true);
111
112 ttm_bo_unreserve(bo);
113 err:
114 return ret;
115 }
116
117
118 /**
119 * vmw_bo_pin_in_vram_or_gmr - Move a buffer to vram or gmr.
120 *
121 * This function takes the reservation_sem in write mode.
122 * Flushes and unpins the query bo to avoid failures.
123 *
124 * @dev_priv: Driver private.
125 * @buf: DMA buffer to move.
126 * @interruptible: Use interruptible wait.
127 * Return: Zero on success, Negative error code on failure. In particular
128 * -ERESTARTSYS if interrupted by a signal
129 */
vmw_bo_pin_in_vram_or_gmr(struct vmw_private * dev_priv,struct vmw_bo * buf,bool interruptible)130 int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
131 struct vmw_bo *buf,
132 bool interruptible)
133 {
134 struct ttm_operation_ctx ctx = {interruptible, false };
135 struct ttm_buffer_object *bo = &buf->tbo;
136 int ret;
137
138 vmw_execbuf_release_pinned_bo(dev_priv);
139
140 ret = ttm_bo_reserve(bo, interruptible, false, NULL);
141 if (unlikely(ret != 0))
142 goto err;
143
144 vmw_bo_placement_set(buf,
145 VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM,
146 VMW_BO_DOMAIN_GMR);
147 ret = ttm_bo_validate(bo, &buf->placement, &ctx);
148 if (likely(ret == 0) || ret == -ERESTARTSYS)
149 goto out_unreserve;
150
151 vmw_bo_placement_set(buf,
152 VMW_BO_DOMAIN_VRAM,
153 VMW_BO_DOMAIN_VRAM);
154 ret = ttm_bo_validate(bo, &buf->placement, &ctx);
155
156 out_unreserve:
157 if (!ret)
158 vmw_bo_pin_reserved(buf, true);
159
160 ttm_bo_unreserve(bo);
161 err:
162 return ret;
163 }
164
165
166 /**
167 * vmw_bo_pin_in_vram - Move a buffer to vram.
168 *
169 * This function takes the reservation_sem in write mode.
170 * Flushes and unpins the query bo to avoid failures.
171 *
172 * @dev_priv: Driver private.
173 * @buf: DMA buffer to move.
174 * @interruptible: Use interruptible wait.
175 * Return: Zero on success, Negative error code on failure. In particular
176 * -ERESTARTSYS if interrupted by a signal
177 */
vmw_bo_pin_in_vram(struct vmw_private * dev_priv,struct vmw_bo * buf,bool interruptible)178 int vmw_bo_pin_in_vram(struct vmw_private *dev_priv,
179 struct vmw_bo *buf,
180 bool interruptible)
181 {
182 return vmw_bo_pin_in_placement(dev_priv, buf, &vmw_vram_placement,
183 interruptible);
184 }
185
186
187 /**
188 * vmw_bo_pin_in_start_of_vram - Move a buffer to start of vram.
189 *
190 * This function takes the reservation_sem in write mode.
191 * Flushes and unpins the query bo to avoid failures.
192 *
193 * @dev_priv: Driver private.
194 * @buf: DMA buffer to pin.
195 * @interruptible: Use interruptible wait.
196 * Return: Zero on success, Negative error code on failure. In particular
197 * -ERESTARTSYS if interrupted by a signal
198 */
vmw_bo_pin_in_start_of_vram(struct vmw_private * dev_priv,struct vmw_bo * buf,bool interruptible)199 int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
200 struct vmw_bo *buf,
201 bool interruptible)
202 {
203 struct ttm_operation_ctx ctx = {interruptible, false };
204 struct ttm_buffer_object *bo = &buf->tbo;
205 int ret = 0;
206
207 vmw_execbuf_release_pinned_bo(dev_priv);
208 ret = ttm_bo_reserve(bo, interruptible, false, NULL);
209 if (unlikely(ret != 0))
210 goto err_unlock;
211
212 /*
213 * Is this buffer already in vram but not at the start of it?
214 * In that case, evict it first because TTM isn't good at handling
215 * that situation.
216 */
217 if (bo->resource->mem_type == TTM_PL_VRAM &&
218 bo->resource->start < PFN_UP(bo->resource->size) &&
219 bo->resource->start > 0 &&
220 buf->tbo.pin_count == 0) {
221 ctx.interruptible = false;
222 vmw_bo_placement_set(buf,
223 VMW_BO_DOMAIN_SYS,
224 VMW_BO_DOMAIN_SYS);
225 (void)ttm_bo_validate(bo, &buf->placement, &ctx);
226 }
227
228 vmw_bo_placement_set(buf,
229 VMW_BO_DOMAIN_VRAM,
230 VMW_BO_DOMAIN_VRAM);
231 buf->places[0].lpfn = PFN_UP(bo->resource->size);
232 ret = ttm_bo_validate(bo, &buf->placement, &ctx);
233
234 /* For some reason we didn't end up at the start of vram */
235 WARN_ON(ret == 0 && bo->resource->start != 0);
236 if (!ret)
237 vmw_bo_pin_reserved(buf, true);
238
239 ttm_bo_unreserve(bo);
240 err_unlock:
241
242 return ret;
243 }
244
245
246 /**
247 * vmw_bo_unpin - Unpin the buffer given buffer, does not move the buffer.
248 *
249 * This function takes the reservation_sem in write mode.
250 *
251 * @dev_priv: Driver private.
252 * @buf: DMA buffer to unpin.
253 * @interruptible: Use interruptible wait.
254 * Return: Zero on success, Negative error code on failure. In particular
255 * -ERESTARTSYS if interrupted by a signal
256 */
vmw_bo_unpin(struct vmw_private * dev_priv,struct vmw_bo * buf,bool interruptible)257 int vmw_bo_unpin(struct vmw_private *dev_priv,
258 struct vmw_bo *buf,
259 bool interruptible)
260 {
261 struct ttm_buffer_object *bo = &buf->tbo;
262 int ret;
263
264 ret = ttm_bo_reserve(bo, interruptible, false, NULL);
265 if (unlikely(ret != 0))
266 goto err;
267
268 vmw_bo_pin_reserved(buf, false);
269
270 ttm_bo_unreserve(bo);
271
272 err:
273 return ret;
274 }
275
276 /**
277 * vmw_bo_get_guest_ptr - Get the guest ptr representing the current placement
278 * of a buffer.
279 *
280 * @bo: Pointer to a struct ttm_buffer_object. Must be pinned or reserved.
281 * @ptr: SVGAGuestPtr returning the result.
282 */
vmw_bo_get_guest_ptr(const struct ttm_buffer_object * bo,SVGAGuestPtr * ptr)283 void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo,
284 SVGAGuestPtr *ptr)
285 {
286 if (bo->resource->mem_type == TTM_PL_VRAM) {
287 ptr->gmrId = SVGA_GMR_FRAMEBUFFER;
288 ptr->offset = bo->resource->start << PAGE_SHIFT;
289 } else {
290 ptr->gmrId = bo->resource->start;
291 ptr->offset = 0;
292 }
293 }
294
295
296 /**
297 * vmw_bo_pin_reserved - Pin or unpin a buffer object without moving it.
298 *
299 * @vbo: The buffer object. Must be reserved.
300 * @pin: Whether to pin or unpin.
301 *
302 */
vmw_bo_pin_reserved(struct vmw_bo * vbo,bool pin)303 void vmw_bo_pin_reserved(struct vmw_bo *vbo, bool pin)
304 {
305 struct ttm_operation_ctx ctx = { false, true };
306 struct ttm_place pl;
307 struct ttm_placement placement;
308 struct ttm_buffer_object *bo = &vbo->tbo;
309 uint32_t old_mem_type = bo->resource->mem_type;
310 int ret;
311
312 dma_resv_assert_held(bo->base.resv);
313
314 if (pin == !!bo->pin_count)
315 return;
316
317 pl.fpfn = 0;
318 pl.lpfn = 0;
319 pl.mem_type = bo->resource->mem_type;
320 pl.flags = bo->resource->placement;
321
322 memset(&placement, 0, sizeof(placement));
323 placement.num_placement = 1;
324 placement.placement = &pl;
325
326 ret = ttm_bo_validate(bo, &placement, &ctx);
327
328 BUG_ON(ret != 0 || bo->resource->mem_type != old_mem_type);
329
330 if (pin)
331 ttm_bo_pin(bo);
332 else
333 ttm_bo_unpin(bo);
334 }
335
336 /**
337 * vmw_bo_map_and_cache - Map a buffer object and cache the map
338 *
339 * @vbo: The buffer object to map
340 * Return: A kernel virtual address or NULL if mapping failed.
341 *
342 * This function maps a buffer object into the kernel address space, or
343 * returns the virtual kernel address of an already existing map. The virtual
344 * address remains valid as long as the buffer object is pinned or reserved.
345 * The cached map is torn down on either
346 * 1) Buffer object move
347 * 2) Buffer object swapout
348 * 3) Buffer object destruction
349 *
350 */
vmw_bo_map_and_cache(struct vmw_bo * vbo)351 void *vmw_bo_map_and_cache(struct vmw_bo *vbo)
352 {
353 return vmw_bo_map_and_cache_size(vbo, vbo->tbo.base.size);
354 }
355
vmw_bo_map_and_cache_size(struct vmw_bo * vbo,size_t size)356 void *vmw_bo_map_and_cache_size(struct vmw_bo *vbo, size_t size)
357 {
358 struct ttm_buffer_object *bo = &vbo->tbo;
359 bool not_used;
360 void *virtual;
361 int ret;
362
363 atomic_inc(&vbo->map_count);
364
365 virtual = ttm_kmap_obj_virtual(&vbo->map, ¬_used);
366 if (virtual)
367 return virtual;
368
369 ret = ttm_bo_kmap(bo, 0, PFN_UP(size), &vbo->map);
370 if (ret)
371 DRM_ERROR("Buffer object map failed: %d (size: bo = %zu, map = %zu).\n",
372 ret, bo->base.size, size);
373
374 return ttm_kmap_obj_virtual(&vbo->map, ¬_used);
375 }
376
377
378 /**
379 * vmw_bo_unmap - Tear down a cached buffer object map.
380 *
381 * @vbo: The buffer object whose map we are tearing down.
382 *
383 * This function tears down a cached map set up using
384 * vmw_bo_map_and_cache().
385 */
vmw_bo_unmap(struct vmw_bo * vbo)386 void vmw_bo_unmap(struct vmw_bo *vbo)
387 {
388 int map_count;
389
390 if (vbo->map.bo == NULL)
391 return;
392
393 map_count = atomic_dec_return(&vbo->map_count);
394
395 if (!map_count) {
396 ttm_bo_kunmap(&vbo->map);
397 vbo->map.bo = NULL;
398 }
399 }
400
401
402 /**
403 * vmw_bo_init - Initialize a vmw buffer object
404 *
405 * @dev_priv: Pointer to the device private struct
406 * @vmw_bo: Buffer object to initialize
407 * @params: Parameters used to initialize the buffer object
408 * @destroy: The function used to delete the buffer object
409 * Returns: Zero on success, negative error code on error.
410 *
411 */
vmw_bo_init(struct vmw_private * dev_priv,struct vmw_bo * vmw_bo,struct vmw_bo_params * params,void (* destroy)(struct ttm_buffer_object *))412 static int vmw_bo_init(struct vmw_private *dev_priv,
413 struct vmw_bo *vmw_bo,
414 struct vmw_bo_params *params,
415 void (*destroy)(struct ttm_buffer_object *))
416 {
417 struct ttm_operation_ctx ctx = {
418 .interruptible = params->bo_type != ttm_bo_type_kernel,
419 .no_wait_gpu = false,
420 .resv = params->resv,
421 };
422 struct ttm_device *bdev = &dev_priv->bdev;
423 struct drm_device *vdev = &dev_priv->drm;
424 int ret;
425
426 memset(vmw_bo, 0, sizeof(*vmw_bo));
427
428 BUILD_BUG_ON(TTM_MAX_BO_PRIORITY <= 3);
429 vmw_bo->tbo.priority = 3;
430 vmw_bo->res_tree = RB_ROOT;
431 xa_init(&vmw_bo->detached_resources);
432 atomic_set(&vmw_bo->map_count, 0);
433
434 params->size = ALIGN(params->size, PAGE_SIZE);
435 drm_gem_private_object_init(vdev, &vmw_bo->tbo.base, params->size);
436
437 vmw_bo_placement_set(vmw_bo, params->domain, params->busy_domain);
438 ret = ttm_bo_init_reserved(bdev, &vmw_bo->tbo, params->bo_type,
439 &vmw_bo->placement, 0, &ctx,
440 params->sg, params->resv, destroy);
441 if (unlikely(ret))
442 return ret;
443
444 if (params->pin)
445 ttm_bo_pin(&vmw_bo->tbo);
446 if (!params->keep_resv)
447 ttm_bo_unreserve(&vmw_bo->tbo);
448
449 return 0;
450 }
451
vmw_bo_create(struct vmw_private * vmw,struct vmw_bo_params * params,struct vmw_bo ** p_bo)452 int vmw_bo_create(struct vmw_private *vmw,
453 struct vmw_bo_params *params,
454 struct vmw_bo **p_bo)
455 {
456 int ret;
457
458 *p_bo = kmalloc(sizeof(**p_bo), GFP_KERNEL);
459 if (unlikely(!*p_bo)) {
460 DRM_ERROR("Failed to allocate a buffer.\n");
461 return -ENOMEM;
462 }
463
464 /*
465 * vmw_bo_init will delete the *p_bo object if it fails
466 */
467 ret = vmw_bo_init(vmw, *p_bo, params, vmw_bo_free);
468 if (unlikely(ret != 0))
469 goto out_error;
470
471 (*p_bo)->tbo.base.funcs = &vmw_gem_object_funcs;
472 return ret;
473 out_error:
474 *p_bo = NULL;
475 return ret;
476 }
477
478 /**
479 * vmw_user_bo_synccpu_grab - Grab a struct vmw_bo for cpu
480 * access, idling previous GPU operations on the buffer and optionally
481 * blocking it for further command submissions.
482 *
483 * @vmw_bo: Pointer to the buffer object being grabbed for CPU access
484 * @flags: Flags indicating how the grab should be performed.
485 * Return: Zero on success, Negative error code on error. In particular,
486 * -EBUSY will be returned if a dontblock operation is requested and the
487 * buffer object is busy, and -ERESTARTSYS will be returned if a wait is
488 * interrupted by a signal.
489 *
490 * A blocking grab will be automatically released when @tfile is closed.
491 */
vmw_user_bo_synccpu_grab(struct vmw_bo * vmw_bo,uint32_t flags)492 static int vmw_user_bo_synccpu_grab(struct vmw_bo *vmw_bo,
493 uint32_t flags)
494 {
495 bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
496 struct ttm_buffer_object *bo = &vmw_bo->tbo;
497 int ret;
498
499 if (flags & drm_vmw_synccpu_allow_cs) {
500 long lret;
501
502 lret = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_READ,
503 true, nonblock ? 0 :
504 MAX_SCHEDULE_TIMEOUT);
505 if (!lret)
506 return -EBUSY;
507 else if (lret < 0)
508 return lret;
509 return 0;
510 }
511
512 ret = ttm_bo_reserve(bo, true, nonblock, NULL);
513 if (unlikely(ret != 0))
514 return ret;
515
516 ret = ttm_bo_wait(bo, true, nonblock);
517 if (likely(ret == 0))
518 atomic_inc(&vmw_bo->cpu_writers);
519
520 ttm_bo_unreserve(bo);
521 if (unlikely(ret != 0))
522 return ret;
523
524 return ret;
525 }
526
527 /**
528 * vmw_user_bo_synccpu_release - Release a previous grab for CPU access,
529 * and unblock command submission on the buffer if blocked.
530 *
531 * @filp: Identifying the caller.
532 * @handle: Handle identifying the buffer object.
533 * @flags: Flags indicating the type of release.
534 */
vmw_user_bo_synccpu_release(struct drm_file * filp,uint32_t handle,uint32_t flags)535 static int vmw_user_bo_synccpu_release(struct drm_file *filp,
536 uint32_t handle,
537 uint32_t flags)
538 {
539 struct vmw_bo *vmw_bo;
540 int ret = vmw_user_bo_lookup(filp, handle, &vmw_bo);
541
542 if (!ret) {
543 if (!(flags & drm_vmw_synccpu_allow_cs)) {
544 atomic_dec(&vmw_bo->cpu_writers);
545 }
546 vmw_user_bo_unref(&vmw_bo);
547 }
548
549 return ret;
550 }
551
552
553 /**
554 * vmw_user_bo_synccpu_ioctl - ioctl function implementing the synccpu
555 * functionality.
556 *
557 * @dev: Identifies the drm device.
558 * @data: Pointer to the ioctl argument.
559 * @file_priv: Identifies the caller.
560 * Return: Zero on success, negative error code on error.
561 *
562 * This function checks the ioctl arguments for validity and calls the
563 * relevant synccpu functions.
564 */
vmw_user_bo_synccpu_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)565 int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
566 struct drm_file *file_priv)
567 {
568 struct drm_vmw_synccpu_arg *arg =
569 (struct drm_vmw_synccpu_arg *) data;
570 struct vmw_bo *vbo;
571 int ret;
572
573 if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
574 || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write |
575 drm_vmw_synccpu_dontblock |
576 drm_vmw_synccpu_allow_cs)) != 0) {
577 DRM_ERROR("Illegal synccpu flags.\n");
578 return -EINVAL;
579 }
580
581 switch (arg->op) {
582 case drm_vmw_synccpu_grab:
583 ret = vmw_user_bo_lookup(file_priv, arg->handle, &vbo);
584 if (unlikely(ret != 0))
585 return ret;
586
587 ret = vmw_user_bo_synccpu_grab(vbo, arg->flags);
588 vmw_user_bo_unref(&vbo);
589 if (unlikely(ret != 0)) {
590 if (ret == -ERESTARTSYS || ret == -EBUSY)
591 return -EBUSY;
592 DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
593 (unsigned int) arg->handle);
594 return ret;
595 }
596 break;
597 case drm_vmw_synccpu_release:
598 ret = vmw_user_bo_synccpu_release(file_priv,
599 arg->handle,
600 arg->flags);
601 if (unlikely(ret != 0)) {
602 DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
603 (unsigned int) arg->handle);
604 return ret;
605 }
606 break;
607 default:
608 DRM_ERROR("Invalid synccpu operation.\n");
609 return -EINVAL;
610 }
611
612 return 0;
613 }
614
615 /**
616 * vmw_bo_unref_ioctl - Generic handle close ioctl.
617 *
618 * @dev: Identifies the drm device.
619 * @data: Pointer to the ioctl argument.
620 * @file_priv: Identifies the caller.
621 * Return: Zero on success, negative error code on error.
622 *
623 * This function checks the ioctl arguments for validity and closes a
624 * handle to a TTM base object, optionally freeing the object.
625 */
vmw_bo_unref_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)626 int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
627 struct drm_file *file_priv)
628 {
629 struct drm_vmw_unref_dmabuf_arg *arg =
630 (struct drm_vmw_unref_dmabuf_arg *)data;
631
632 return drm_gem_handle_delete(file_priv, arg->handle);
633 }
634
635
636 /**
637 * vmw_user_bo_lookup - Look up a vmw user buffer object from a handle.
638 *
639 * @filp: The file the handle is registered with.
640 * @handle: The user buffer object handle
641 * @out: Pointer to a where a pointer to the embedded
642 * struct vmw_bo should be placed.
643 * Return: Zero on success, Negative error code on error.
644 *
645 * The vmw buffer object pointer will be refcounted (both ttm and gem)
646 */
vmw_user_bo_lookup(struct drm_file * filp,u32 handle,struct vmw_bo ** out)647 int vmw_user_bo_lookup(struct drm_file *filp,
648 u32 handle,
649 struct vmw_bo **out)
650 {
651 struct drm_gem_object *gobj;
652
653 gobj = drm_gem_object_lookup(filp, handle);
654 if (!gobj) {
655 DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
656 (unsigned long)handle);
657 return -ESRCH;
658 }
659
660 *out = to_vmw_bo(gobj);
661
662 return 0;
663 }
664
665 /**
666 * vmw_bo_fence_single - Utility function to fence a single TTM buffer
667 * object without unreserving it.
668 *
669 * @bo: Pointer to the struct ttm_buffer_object to fence.
670 * @fence: Pointer to the fence. If NULL, this function will
671 * insert a fence into the command stream..
672 *
673 * Contrary to the ttm_eu version of this function, it takes only
674 * a single buffer object instead of a list, and it also doesn't
675 * unreserve the buffer object, which needs to be done separately.
676 */
vmw_bo_fence_single(struct ttm_buffer_object * bo,struct vmw_fence_obj * fence)677 void vmw_bo_fence_single(struct ttm_buffer_object *bo,
678 struct vmw_fence_obj *fence)
679 {
680 struct ttm_device *bdev = bo->bdev;
681 struct vmw_private *dev_priv = vmw_priv_from_ttm(bdev);
682 int ret;
683
684 if (fence == NULL)
685 vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
686 else
687 dma_fence_get(&fence->base);
688
689 ret = dma_resv_reserve_fences(bo->base.resv, 1);
690 if (!ret)
691 dma_resv_add_fence(bo->base.resv, &fence->base,
692 DMA_RESV_USAGE_KERNEL);
693 else
694 /* Last resort fallback when we are OOM */
695 dma_fence_wait(&fence->base, false);
696 dma_fence_put(&fence->base);
697 }
698
699 /**
700 * vmw_bo_swap_notify - swapout notify callback.
701 *
702 * @bo: The buffer object to be swapped out.
703 */
vmw_bo_swap_notify(struct ttm_buffer_object * bo)704 void vmw_bo_swap_notify(struct ttm_buffer_object *bo)
705 {
706 /* Kill any cached kernel maps before swapout */
707 vmw_bo_unmap(to_vmw_bo(&bo->base));
708 }
709
710
711 /**
712 * vmw_bo_move_notify - TTM move_notify_callback
713 *
714 * @bo: The TTM buffer object about to move.
715 * @mem: The struct ttm_resource indicating to what memory
716 * region the move is taking place.
717 *
718 * Detaches cached maps and device bindings that require that the
719 * buffer doesn't move.
720 */
vmw_bo_move_notify(struct ttm_buffer_object * bo,struct ttm_resource * mem)721 void vmw_bo_move_notify(struct ttm_buffer_object *bo,
722 struct ttm_resource *mem)
723 {
724 struct vmw_bo *vbo = to_vmw_bo(&bo->base);
725
726 /*
727 * Kill any cached kernel maps before move to or from VRAM.
728 * With other types of moves, the underlying pages stay the same,
729 * and the map can be kept.
730 */
731 if (mem->mem_type == TTM_PL_VRAM || bo->resource->mem_type == TTM_PL_VRAM)
732 vmw_bo_unmap(vbo);
733
734 /*
735 * If we're moving a backup MOB out of MOB placement, then make sure we
736 * read back all resource content first, and unbind the MOB from
737 * the resource.
738 */
739 if (mem->mem_type != VMW_PL_MOB && bo->resource->mem_type == VMW_PL_MOB)
740 vmw_resource_unbind_list(vbo);
741 }
742
placement_flags(u32 domain,u32 desired,u32 fallback)743 static u32 placement_flags(u32 domain, u32 desired, u32 fallback)
744 {
745 if (desired & fallback & domain)
746 return 0;
747
748 if (desired & domain)
749 return TTM_PL_FLAG_DESIRED;
750
751 return TTM_PL_FLAG_FALLBACK;
752 }
753
754 static u32
set_placement_list(struct ttm_place * pl,u32 desired,u32 fallback)755 set_placement_list(struct ttm_place *pl, u32 desired, u32 fallback)
756 {
757 u32 domain = desired | fallback;
758 u32 n = 0;
759
760 /*
761 * The placements are ordered according to our preferences
762 */
763 if (domain & VMW_BO_DOMAIN_MOB) {
764 pl[n].mem_type = VMW_PL_MOB;
765 pl[n].flags = placement_flags(VMW_BO_DOMAIN_MOB, desired,
766 fallback);
767 pl[n].fpfn = 0;
768 pl[n].lpfn = 0;
769 n++;
770 }
771 if (domain & VMW_BO_DOMAIN_GMR) {
772 pl[n].mem_type = VMW_PL_GMR;
773 pl[n].flags = placement_flags(VMW_BO_DOMAIN_GMR, desired,
774 fallback);
775 pl[n].fpfn = 0;
776 pl[n].lpfn = 0;
777 n++;
778 }
779 if (domain & VMW_BO_DOMAIN_VRAM) {
780 pl[n].mem_type = TTM_PL_VRAM;
781 pl[n].flags = placement_flags(VMW_BO_DOMAIN_VRAM, desired,
782 fallback);
783 pl[n].fpfn = 0;
784 pl[n].lpfn = 0;
785 n++;
786 }
787 if (domain & VMW_BO_DOMAIN_WAITABLE_SYS) {
788 pl[n].mem_type = VMW_PL_SYSTEM;
789 pl[n].flags = placement_flags(VMW_BO_DOMAIN_WAITABLE_SYS,
790 desired, fallback);
791 pl[n].fpfn = 0;
792 pl[n].lpfn = 0;
793 n++;
794 }
795 if (domain & VMW_BO_DOMAIN_SYS) {
796 pl[n].mem_type = TTM_PL_SYSTEM;
797 pl[n].flags = placement_flags(VMW_BO_DOMAIN_SYS, desired,
798 fallback);
799 pl[n].fpfn = 0;
800 pl[n].lpfn = 0;
801 n++;
802 }
803
804 WARN_ON(!n);
805 if (!n) {
806 pl[n].mem_type = TTM_PL_SYSTEM;
807 pl[n].flags = 0;
808 pl[n].fpfn = 0;
809 pl[n].lpfn = 0;
810 n++;
811 }
812 return n;
813 }
814
vmw_bo_placement_set(struct vmw_bo * bo,u32 domain,u32 busy_domain)815 void vmw_bo_placement_set(struct vmw_bo *bo, u32 domain, u32 busy_domain)
816 {
817 struct ttm_device *bdev = bo->tbo.bdev;
818 struct vmw_private *vmw = vmw_priv_from_ttm(bdev);
819 struct ttm_placement *pl = &bo->placement;
820 bool mem_compatible = false;
821 u32 i;
822
823 pl->placement = bo->places;
824 pl->num_placement = set_placement_list(bo->places, domain, busy_domain);
825
826 if (drm_debug_enabled(DRM_UT_DRIVER) && bo->tbo.resource) {
827 for (i = 0; i < pl->num_placement; ++i) {
828 if (bo->tbo.resource->mem_type == TTM_PL_SYSTEM ||
829 bo->tbo.resource->mem_type == pl->placement[i].mem_type)
830 mem_compatible = true;
831 }
832 if (!mem_compatible)
833 drm_warn(&vmw->drm,
834 "%s: Incompatible transition from "
835 "bo->base.resource->mem_type = %u to domain = %u\n",
836 __func__, bo->tbo.resource->mem_type, domain);
837 }
838
839 }
840
vmw_bo_placement_set_default_accelerated(struct vmw_bo * bo)841 void vmw_bo_placement_set_default_accelerated(struct vmw_bo *bo)
842 {
843 struct ttm_device *bdev = bo->tbo.bdev;
844 struct vmw_private *vmw = vmw_priv_from_ttm(bdev);
845 u32 domain = VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM;
846
847 if (vmw->has_mob)
848 domain = VMW_BO_DOMAIN_MOB;
849
850 vmw_bo_placement_set(bo, domain, domain);
851 }
852
vmw_bo_add_detached_resource(struct vmw_bo * vbo,struct vmw_resource * res)853 int vmw_bo_add_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res)
854 {
855 return xa_err(xa_store(&vbo->detached_resources, (unsigned long)res, res, GFP_KERNEL));
856 }
857
vmw_bo_del_detached_resource(struct vmw_bo * vbo,struct vmw_resource * res)858 void vmw_bo_del_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res)
859 {
860 xa_erase(&vbo->detached_resources, (unsigned long)res);
861 }
862
vmw_bo_surface(struct vmw_bo * vbo)863 struct vmw_surface *vmw_bo_surface(struct vmw_bo *vbo)
864 {
865 unsigned long index;
866 struct vmw_resource *res = NULL;
867 struct vmw_surface *surf = NULL;
868 struct rb_node *rb_itr = vbo->res_tree.rb_node;
869
870 if (vbo->is_dumb && vbo->dumb_surface) {
871 res = &vbo->dumb_surface->res;
872 goto out;
873 }
874
875 xa_for_each(&vbo->detached_resources, index, res) {
876 if (res->func->res_type == vmw_res_surface)
877 goto out;
878 }
879
880 for (rb_itr = rb_first(&vbo->res_tree); rb_itr;
881 rb_itr = rb_next(rb_itr)) {
882 res = rb_entry(rb_itr, struct vmw_resource, mob_node);
883 if (res->func->res_type == vmw_res_surface)
884 goto out;
885 }
886
887 out:
888 if (res)
889 surf = vmw_res_to_srf(res);
890 return surf;
891 }
892
vmw_bo_mobid(struct vmw_bo * vbo)893 s32 vmw_bo_mobid(struct vmw_bo *vbo)
894 {
895 WARN_ON(vbo->tbo.resource->mem_type != VMW_PL_MOB);
896 return (s32)vbo->tbo.resource->start;
897 }
898