1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2 /**************************************************************************
3 *
4 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 **************************************************************************/
28 /*
29 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
30 */
31
32 #define pr_fmt(fmt) "[TTM] " fmt
33
34 #include <drm/ttm/ttm_bo.h>
35 #include <drm/ttm/ttm_placement.h>
36 #include <drm/ttm/ttm_tt.h>
37
38 #include <linux/jiffies.h>
39 #include <linux/slab.h>
40 #include <linux/sched.h>
41 #include <linux/mm.h>
42 #include <linux/file.h>
43 #include <linux/module.h>
44 #include <linux/atomic.h>
45 #include <linux/dma-resv.h>
46
47 #include "ttm_module.h"
48
ttm_bo_mem_space_debug(struct ttm_buffer_object * bo,struct ttm_placement * placement)49 static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
50 struct ttm_placement *placement)
51 {
52 struct drm_printer p = drm_debug_printer(TTM_PFX);
53 struct ttm_resource_manager *man;
54 int i, mem_type;
55
56 for (i = 0; i < placement->num_placement; i++) {
57 mem_type = placement->placement[i].mem_type;
58 drm_printf(&p, " placement[%d]=0x%08X (%d)\n",
59 i, placement->placement[i].flags, mem_type);
60 man = ttm_manager_type(bo->bdev, mem_type);
61 ttm_resource_manager_debug(man, &p);
62 }
63 }
64
65 /**
66 * ttm_bo_move_to_lru_tail
67 *
68 * @bo: The buffer object.
69 *
70 * Move this BO to the tail of all lru lists used to lookup and reserve an
71 * object. This function must be called with struct ttm_global::lru_lock
72 * held, and is used to make a BO less likely to be considered for eviction.
73 */
ttm_bo_move_to_lru_tail(struct ttm_buffer_object * bo)74 void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo)
75 {
76 dma_resv_assert_held(bo->base.resv);
77
78 if (bo->resource)
79 ttm_resource_move_to_lru_tail(bo->resource);
80 }
81 EXPORT_SYMBOL(ttm_bo_move_to_lru_tail);
82
83 /**
84 * ttm_bo_set_bulk_move - update BOs bulk move object
85 *
86 * @bo: The buffer object.
87 * @bulk: bulk move structure
88 *
89 * Update the BOs bulk move object, making sure that resources are added/removed
90 * as well. A bulk move allows to move many resource on the LRU at once,
91 * resulting in much less overhead of maintaining the LRU.
92 * The only requirement is that the resources stay together on the LRU and are
93 * never separated. This is enforces by setting the bulk_move structure on a BO.
94 * ttm_lru_bulk_move_tail() should be used to move all resources to the tail of
95 * their LRU list.
96 */
ttm_bo_set_bulk_move(struct ttm_buffer_object * bo,struct ttm_lru_bulk_move * bulk)97 void ttm_bo_set_bulk_move(struct ttm_buffer_object *bo,
98 struct ttm_lru_bulk_move *bulk)
99 {
100 dma_resv_assert_held(bo->base.resv);
101
102 if (bo->bulk_move == bulk)
103 return;
104
105 spin_lock(&bo->bdev->lru_lock);
106 if (bo->resource)
107 ttm_resource_del_bulk_move(bo->resource, bo);
108 bo->bulk_move = bulk;
109 if (bo->resource)
110 ttm_resource_add_bulk_move(bo->resource, bo);
111 spin_unlock(&bo->bdev->lru_lock);
112 }
113 EXPORT_SYMBOL(ttm_bo_set_bulk_move);
114
ttm_bo_handle_move_mem(struct ttm_buffer_object * bo,struct ttm_resource * mem,bool evict,struct ttm_operation_ctx * ctx,struct ttm_place * hop)115 static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
116 struct ttm_resource *mem, bool evict,
117 struct ttm_operation_ctx *ctx,
118 struct ttm_place *hop)
119 {
120 struct ttm_device *bdev = bo->bdev;
121 bool old_use_tt, new_use_tt;
122 int ret;
123
124 old_use_tt = !bo->resource || ttm_manager_type(bdev, bo->resource->mem_type)->use_tt;
125 new_use_tt = ttm_manager_type(bdev, mem->mem_type)->use_tt;
126
127 ttm_bo_unmap_virtual(bo);
128
129 /*
130 * Create and bind a ttm if required.
131 */
132
133 if (new_use_tt) {
134 /* Zero init the new TTM structure if the old location should
135 * have used one as well.
136 */
137 ret = ttm_tt_create(bo, old_use_tt);
138 if (ret)
139 goto out_err;
140
141 if (mem->mem_type != TTM_PL_SYSTEM) {
142 ret = ttm_tt_populate(bo->bdev, bo->ttm, ctx);
143 if (ret)
144 goto out_err;
145 }
146 }
147
148 ret = dma_resv_reserve_fences(bo->base.resv, 1);
149 if (ret)
150 goto out_err;
151
152 ret = bdev->funcs->move(bo, evict, ctx, mem, hop);
153 if (ret) {
154 if (ret == -EMULTIHOP)
155 return ret;
156 goto out_err;
157 }
158
159 ctx->bytes_moved += bo->base.size;
160 return 0;
161
162 out_err:
163 if (!old_use_tt)
164 ttm_bo_tt_destroy(bo);
165
166 return ret;
167 }
168
169 /*
170 * Call bo::reserved.
171 * Will release GPU memory type usage on destruction.
172 * This is the place to put in driver specific hooks to release
173 * driver private resources.
174 * Will release the bo::reserved lock.
175 */
176
ttm_bo_cleanup_memtype_use(struct ttm_buffer_object * bo)177 static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
178 {
179 if (bo->bdev->funcs->delete_mem_notify)
180 bo->bdev->funcs->delete_mem_notify(bo);
181
182 ttm_bo_tt_destroy(bo);
183 ttm_resource_free(bo, &bo->resource);
184 }
185
ttm_bo_individualize_resv(struct ttm_buffer_object * bo)186 static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo)
187 {
188 int r;
189
190 if (bo->base.resv == &bo->base._resv)
191 return 0;
192
193 BUG_ON(!dma_resv_trylock(&bo->base._resv));
194
195 r = dma_resv_copy_fences(&bo->base._resv, bo->base.resv);
196 dma_resv_unlock(&bo->base._resv);
197 if (r)
198 return r;
199
200 if (bo->type != ttm_bo_type_sg) {
201 /* This works because the BO is about to be destroyed and nobody
202 * reference it any more. The only tricky case is the trylock on
203 * the resv object while holding the lru_lock.
204 */
205 spin_lock(&bo->bdev->lru_lock);
206 bo->base.resv = &bo->base._resv;
207 spin_unlock(&bo->bdev->lru_lock);
208 }
209
210 return r;
211 }
212
ttm_bo_flush_all_fences(struct ttm_buffer_object * bo)213 static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
214 {
215 struct dma_resv *resv = &bo->base._resv;
216 struct dma_resv_iter cursor;
217 struct dma_fence *fence;
218
219 dma_resv_iter_begin(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP);
220 dma_resv_for_each_fence_unlocked(&cursor, fence) {
221 if (!fence->ops->signaled)
222 dma_fence_enable_sw_signaling(fence);
223 }
224 dma_resv_iter_end(&cursor);
225 }
226
227 /**
228 * ttm_bo_cleanup_refs
229 * If bo idle, remove from lru lists, and unref.
230 * If not idle, block if possible.
231 *
232 * Must be called with lru_lock and reservation held, this function
233 * will drop the lru lock and optionally the reservation lock before returning.
234 *
235 * @bo: The buffer object to clean-up
236 * @interruptible: Any sleeps should occur interruptibly.
237 * @no_wait_gpu: Never wait for gpu. Return -EBUSY instead.
238 * @unlock_resv: Unlock the reservation lock as well.
239 */
240
ttm_bo_cleanup_refs(struct ttm_buffer_object * bo,bool interruptible,bool no_wait_gpu,bool unlock_resv)241 static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
242 bool interruptible, bool no_wait_gpu,
243 bool unlock_resv)
244 {
245 struct dma_resv *resv = &bo->base._resv;
246 int ret;
247
248 if (dma_resv_test_signaled(resv, DMA_RESV_USAGE_BOOKKEEP))
249 ret = 0;
250 else
251 ret = -EBUSY;
252
253 if (ret && !no_wait_gpu) {
254 long lret;
255
256 if (unlock_resv)
257 dma_resv_unlock(bo->base.resv);
258 spin_unlock(&bo->bdev->lru_lock);
259
260 lret = dma_resv_wait_timeout(resv, DMA_RESV_USAGE_BOOKKEEP,
261 interruptible,
262 30 * HZ);
263
264 if (lret < 0)
265 return lret;
266 else if (lret == 0)
267 return -EBUSY;
268
269 spin_lock(&bo->bdev->lru_lock);
270 if (unlock_resv && !dma_resv_trylock(bo->base.resv)) {
271 /*
272 * We raced, and lost, someone else holds the reservation now,
273 * and is probably busy in ttm_bo_cleanup_memtype_use.
274 *
275 * Even if it's not the case, because we finished waiting any
276 * delayed destruction would succeed, so just return success
277 * here.
278 */
279 spin_unlock(&bo->bdev->lru_lock);
280 return 0;
281 }
282 ret = 0;
283 }
284
285 if (ret) {
286 if (unlock_resv)
287 dma_resv_unlock(bo->base.resv);
288 spin_unlock(&bo->bdev->lru_lock);
289 return ret;
290 }
291
292 spin_unlock(&bo->bdev->lru_lock);
293 ttm_bo_cleanup_memtype_use(bo);
294
295 if (unlock_resv)
296 dma_resv_unlock(bo->base.resv);
297
298 return 0;
299 }
300
301 /*
302 * Block for the dma_resv object to become idle, lock the buffer and clean up
303 * the resource and tt object.
304 */
ttm_bo_delayed_delete(struct work_struct * work)305 static void ttm_bo_delayed_delete(struct work_struct *work)
306 {
307 struct ttm_buffer_object *bo;
308
309 bo = container_of(work, typeof(*bo), delayed_delete);
310
311 dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP, false,
312 MAX_SCHEDULE_TIMEOUT);
313 dma_resv_lock(bo->base.resv, NULL);
314 ttm_bo_cleanup_memtype_use(bo);
315 dma_resv_unlock(bo->base.resv);
316 ttm_bo_put(bo);
317 }
318
ttm_bo_release(struct kref * kref)319 static void ttm_bo_release(struct kref *kref)
320 {
321 struct ttm_buffer_object *bo =
322 container_of(kref, struct ttm_buffer_object, kref);
323 struct ttm_device *bdev = bo->bdev;
324 int ret;
325
326 WARN_ON_ONCE(bo->pin_count);
327 WARN_ON_ONCE(bo->bulk_move);
328
329 if (!bo->deleted) {
330 ret = ttm_bo_individualize_resv(bo);
331 if (ret) {
332 /* Last resort, if we fail to allocate memory for the
333 * fences block for the BO to become idle
334 */
335 dma_resv_wait_timeout(bo->base.resv,
336 DMA_RESV_USAGE_BOOKKEEP, false,
337 30 * HZ);
338 }
339
340 if (bo->bdev->funcs->release_notify)
341 bo->bdev->funcs->release_notify(bo);
342
343 drm_vma_offset_remove(bdev->vma_manager, &bo->base.vma_node);
344 ttm_mem_io_free(bdev, bo->resource);
345
346 if (!dma_resv_test_signaled(bo->base.resv,
347 DMA_RESV_USAGE_BOOKKEEP) ||
348 (want_init_on_free() && (bo->ttm != NULL)) ||
349 !dma_resv_trylock(bo->base.resv)) {
350 /* The BO is not idle, resurrect it for delayed destroy */
351 ttm_bo_flush_all_fences(bo);
352 bo->deleted = true;
353
354 spin_lock(&bo->bdev->lru_lock);
355
356 /*
357 * Make pinned bos immediately available to
358 * shrinkers, now that they are queued for
359 * destruction.
360 *
361 * FIXME: QXL is triggering this. Can be removed when the
362 * driver is fixed.
363 */
364 if (bo->pin_count) {
365 bo->pin_count = 0;
366 ttm_resource_move_to_lru_tail(bo->resource);
367 }
368
369 kref_init(&bo->kref);
370 spin_unlock(&bo->bdev->lru_lock);
371
372 INIT_WORK(&bo->delayed_delete, ttm_bo_delayed_delete);
373
374 /* Schedule the worker on the closest NUMA node. This
375 * improves performance since system memory might be
376 * cleared on free and that is best done on a CPU core
377 * close to it.
378 */
379 queue_work_node(bdev->pool.nid, bdev->wq, &bo->delayed_delete);
380 return;
381 }
382
383 ttm_bo_cleanup_memtype_use(bo);
384 dma_resv_unlock(bo->base.resv);
385 }
386
387 atomic_dec(&ttm_glob.bo_count);
388 bo->destroy(bo);
389 }
390
391 /**
392 * ttm_bo_put
393 *
394 * @bo: The buffer object.
395 *
396 * Unreference a buffer object.
397 */
ttm_bo_put(struct ttm_buffer_object * bo)398 void ttm_bo_put(struct ttm_buffer_object *bo)
399 {
400 kref_put(&bo->kref, ttm_bo_release);
401 }
402 EXPORT_SYMBOL(ttm_bo_put);
403
ttm_bo_bounce_temp_buffer(struct ttm_buffer_object * bo,struct ttm_resource ** mem,struct ttm_operation_ctx * ctx,struct ttm_place * hop)404 static int ttm_bo_bounce_temp_buffer(struct ttm_buffer_object *bo,
405 struct ttm_resource **mem,
406 struct ttm_operation_ctx *ctx,
407 struct ttm_place *hop)
408 {
409 struct ttm_placement hop_placement;
410 struct ttm_resource *hop_mem;
411 int ret;
412
413 hop_placement.num_placement = hop_placement.num_busy_placement = 1;
414 hop_placement.placement = hop_placement.busy_placement = hop;
415
416 /* find space in the bounce domain */
417 ret = ttm_bo_mem_space(bo, &hop_placement, &hop_mem, ctx);
418 if (ret)
419 return ret;
420 /* move to the bounce domain */
421 ret = ttm_bo_handle_move_mem(bo, hop_mem, false, ctx, NULL);
422 if (ret) {
423 ttm_resource_free(bo, &hop_mem);
424 return ret;
425 }
426 return 0;
427 }
428
ttm_bo_evict(struct ttm_buffer_object * bo,struct ttm_operation_ctx * ctx)429 static int ttm_bo_evict(struct ttm_buffer_object *bo,
430 struct ttm_operation_ctx *ctx)
431 {
432 struct ttm_device *bdev = bo->bdev;
433 struct ttm_resource *evict_mem;
434 struct ttm_placement placement;
435 struct ttm_place hop;
436 int ret = 0;
437
438 memset(&hop, 0, sizeof(hop));
439
440 dma_resv_assert_held(bo->base.resv);
441
442 placement.num_placement = 0;
443 placement.num_busy_placement = 0;
444 bdev->funcs->evict_flags(bo, &placement);
445
446 if (!placement.num_placement && !placement.num_busy_placement) {
447 ret = ttm_bo_wait_ctx(bo, ctx);
448 if (ret)
449 return ret;
450
451 /*
452 * Since we've already synced, this frees backing store
453 * immediately.
454 */
455 return ttm_bo_pipeline_gutting(bo);
456 }
457
458 ret = ttm_bo_mem_space(bo, &placement, &evict_mem, ctx);
459 if (ret) {
460 if (ret != -ERESTARTSYS) {
461 pr_err("Failed to find memory space for buffer 0x%p eviction\n",
462 bo);
463 ttm_bo_mem_space_debug(bo, &placement);
464 }
465 goto out;
466 }
467
468 do {
469 ret = ttm_bo_handle_move_mem(bo, evict_mem, true, ctx, &hop);
470 if (ret != -EMULTIHOP)
471 break;
472
473 ret = ttm_bo_bounce_temp_buffer(bo, &evict_mem, ctx, &hop);
474 } while (!ret);
475
476 if (ret) {
477 ttm_resource_free(bo, &evict_mem);
478 if (ret != -ERESTARTSYS && ret != -EINTR)
479 pr_err("Buffer eviction failed\n");
480 }
481 out:
482 return ret;
483 }
484
485 /**
486 * ttm_bo_eviction_valuable
487 *
488 * @bo: The buffer object to evict
489 * @place: the placement we need to make room for
490 *
491 * Check if it is valuable to evict the BO to make room for the given placement.
492 */
ttm_bo_eviction_valuable(struct ttm_buffer_object * bo,const struct ttm_place * place)493 bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
494 const struct ttm_place *place)
495 {
496 struct ttm_resource *res = bo->resource;
497 struct ttm_device *bdev = bo->bdev;
498
499 dma_resv_assert_held(bo->base.resv);
500 if (bo->resource->mem_type == TTM_PL_SYSTEM)
501 return true;
502
503 /* Don't evict this BO if it's outside of the
504 * requested placement range
505 */
506 return ttm_resource_intersects(bdev, res, place, bo->base.size);
507 }
508 EXPORT_SYMBOL(ttm_bo_eviction_valuable);
509
510 /*
511 * Check the target bo is allowable to be evicted or swapout, including cases:
512 *
513 * a. if share same reservation object with ctx->resv, have assumption
514 * reservation objects should already be locked, so not lock again and
515 * return true directly when either the opreation allow_reserved_eviction
516 * or the target bo already is in delayed free list;
517 *
518 * b. Otherwise, trylock it.
519 */
ttm_bo_evict_swapout_allowable(struct ttm_buffer_object * bo,struct ttm_operation_ctx * ctx,const struct ttm_place * place,bool * locked,bool * busy)520 static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo,
521 struct ttm_operation_ctx *ctx,
522 const struct ttm_place *place,
523 bool *locked, bool *busy)
524 {
525 bool ret = false;
526
527 if (bo->pin_count) {
528 *locked = false;
529 if (busy)
530 *busy = false;
531 return false;
532 }
533
534 if (bo->base.resv == ctx->resv) {
535 dma_resv_assert_held(bo->base.resv);
536 if (ctx->allow_res_evict)
537 ret = true;
538 *locked = false;
539 if (busy)
540 *busy = false;
541 } else {
542 ret = dma_resv_trylock(bo->base.resv);
543 *locked = ret;
544 if (busy)
545 *busy = !ret;
546 }
547
548 if (ret && place && (bo->resource->mem_type != place->mem_type ||
549 !bo->bdev->funcs->eviction_valuable(bo, place))) {
550 ret = false;
551 if (*locked) {
552 dma_resv_unlock(bo->base.resv);
553 *locked = false;
554 }
555 }
556
557 return ret;
558 }
559
560 /**
561 * ttm_mem_evict_wait_busy - wait for a busy BO to become available
562 *
563 * @busy_bo: BO which couldn't be locked with trylock
564 * @ctx: operation context
565 * @ticket: acquire ticket
566 *
567 * Try to lock a busy buffer object to avoid failing eviction.
568 */
ttm_mem_evict_wait_busy(struct ttm_buffer_object * busy_bo,struct ttm_operation_ctx * ctx,struct ww_acquire_ctx * ticket)569 static int ttm_mem_evict_wait_busy(struct ttm_buffer_object *busy_bo,
570 struct ttm_operation_ctx *ctx,
571 struct ww_acquire_ctx *ticket)
572 {
573 int r;
574
575 if (!busy_bo || !ticket)
576 return -EBUSY;
577
578 if (ctx->interruptible)
579 r = dma_resv_lock_interruptible(busy_bo->base.resv,
580 ticket);
581 else
582 r = dma_resv_lock(busy_bo->base.resv, ticket);
583
584 /*
585 * TODO: It would be better to keep the BO locked until allocation is at
586 * least tried one more time, but that would mean a much larger rework
587 * of TTM.
588 */
589 if (!r)
590 dma_resv_unlock(busy_bo->base.resv);
591
592 return r == -EDEADLK ? -EBUSY : r;
593 }
594
ttm_mem_evict_first(struct ttm_device * bdev,struct ttm_resource_manager * man,const struct ttm_place * place,struct ttm_operation_ctx * ctx,struct ww_acquire_ctx * ticket)595 int ttm_mem_evict_first(struct ttm_device *bdev,
596 struct ttm_resource_manager *man,
597 const struct ttm_place *place,
598 struct ttm_operation_ctx *ctx,
599 struct ww_acquire_ctx *ticket)
600 {
601 struct ttm_buffer_object *bo = NULL, *busy_bo = NULL;
602 struct ttm_resource_cursor cursor;
603 struct ttm_resource *res;
604 bool locked = false;
605 int ret;
606
607 spin_lock(&bdev->lru_lock);
608 ttm_resource_manager_for_each_res(man, &cursor, res) {
609 bool busy;
610
611 if (!ttm_bo_evict_swapout_allowable(res->bo, ctx, place,
612 &locked, &busy)) {
613 if (busy && !busy_bo && ticket !=
614 dma_resv_locking_ctx(res->bo->base.resv))
615 busy_bo = res->bo;
616 continue;
617 }
618
619 if (ttm_bo_get_unless_zero(res->bo)) {
620 bo = res->bo;
621 break;
622 }
623 if (locked)
624 dma_resv_unlock(res->bo->base.resv);
625 }
626
627 if (!bo) {
628 if (busy_bo && !ttm_bo_get_unless_zero(busy_bo))
629 busy_bo = NULL;
630 spin_unlock(&bdev->lru_lock);
631 ret = ttm_mem_evict_wait_busy(busy_bo, ctx, ticket);
632 if (busy_bo)
633 ttm_bo_put(busy_bo);
634 return ret;
635 }
636
637 if (bo->deleted) {
638 ret = ttm_bo_cleanup_refs(bo, ctx->interruptible,
639 ctx->no_wait_gpu, locked);
640 ttm_bo_put(bo);
641 return ret;
642 }
643
644 spin_unlock(&bdev->lru_lock);
645
646 ret = ttm_bo_evict(bo, ctx);
647 if (locked)
648 ttm_bo_unreserve(bo);
649 else
650 ttm_bo_move_to_lru_tail_unlocked(bo);
651
652 ttm_bo_put(bo);
653 return ret;
654 }
655
656 /**
657 * ttm_bo_pin - Pin the buffer object.
658 * @bo: The buffer object to pin
659 *
660 * Make sure the buffer is not evicted any more during memory pressure.
661 * @bo must be unpinned again by calling ttm_bo_unpin().
662 */
ttm_bo_pin(struct ttm_buffer_object * bo)663 void ttm_bo_pin(struct ttm_buffer_object *bo)
664 {
665 dma_resv_assert_held(bo->base.resv);
666 WARN_ON_ONCE(!kref_read(&bo->kref));
667 spin_lock(&bo->bdev->lru_lock);
668 if (bo->resource)
669 ttm_resource_del_bulk_move(bo->resource, bo);
670 ++bo->pin_count;
671 spin_unlock(&bo->bdev->lru_lock);
672 }
673 EXPORT_SYMBOL(ttm_bo_pin);
674
675 /**
676 * ttm_bo_unpin - Unpin the buffer object.
677 * @bo: The buffer object to unpin
678 *
679 * Allows the buffer object to be evicted again during memory pressure.
680 */
ttm_bo_unpin(struct ttm_buffer_object * bo)681 void ttm_bo_unpin(struct ttm_buffer_object *bo)
682 {
683 dma_resv_assert_held(bo->base.resv);
684 WARN_ON_ONCE(!kref_read(&bo->kref));
685 if (WARN_ON_ONCE(!bo->pin_count))
686 return;
687
688 spin_lock(&bo->bdev->lru_lock);
689 --bo->pin_count;
690 if (bo->resource)
691 ttm_resource_add_bulk_move(bo->resource, bo);
692 spin_unlock(&bo->bdev->lru_lock);
693 }
694 EXPORT_SYMBOL(ttm_bo_unpin);
695
696 /*
697 * Add the last move fence to the BO as kernel dependency and reserve a new
698 * fence slot.
699 */
ttm_bo_add_move_fence(struct ttm_buffer_object * bo,struct ttm_resource_manager * man,struct ttm_resource * mem,bool no_wait_gpu)700 static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
701 struct ttm_resource_manager *man,
702 struct ttm_resource *mem,
703 bool no_wait_gpu)
704 {
705 struct dma_fence *fence;
706 int ret;
707
708 spin_lock(&man->move_lock);
709 fence = dma_fence_get(man->move);
710 spin_unlock(&man->move_lock);
711
712 if (!fence)
713 return 0;
714
715 if (no_wait_gpu) {
716 ret = dma_fence_is_signaled(fence) ? 0 : -EBUSY;
717 dma_fence_put(fence);
718 return ret;
719 }
720
721 dma_resv_add_fence(bo->base.resv, fence, DMA_RESV_USAGE_KERNEL);
722
723 ret = dma_resv_reserve_fences(bo->base.resv, 1);
724 dma_fence_put(fence);
725 return ret;
726 }
727
728 /*
729 * Repeatedly evict memory from the LRU for @mem_type until we create enough
730 * space, or we've evicted everything and there isn't enough space.
731 */
ttm_bo_mem_force_space(struct ttm_buffer_object * bo,const struct ttm_place * place,struct ttm_resource ** mem,struct ttm_operation_ctx * ctx)732 static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
733 const struct ttm_place *place,
734 struct ttm_resource **mem,
735 struct ttm_operation_ctx *ctx)
736 {
737 struct ttm_device *bdev = bo->bdev;
738 struct ttm_resource_manager *man;
739 struct ww_acquire_ctx *ticket;
740 int ret;
741
742 man = ttm_manager_type(bdev, place->mem_type);
743 ticket = dma_resv_locking_ctx(bo->base.resv);
744 do {
745 ret = ttm_resource_alloc(bo, place, mem);
746 if (likely(!ret))
747 break;
748 if (unlikely(ret != -ENOSPC))
749 return ret;
750 ret = ttm_mem_evict_first(bdev, man, place, ctx,
751 ticket);
752 if (unlikely(ret != 0))
753 return ret;
754 } while (1);
755
756 return ttm_bo_add_move_fence(bo, man, *mem, ctx->no_wait_gpu);
757 }
758
759 /**
760 * ttm_bo_mem_space
761 *
762 * @bo: Pointer to a struct ttm_buffer_object. the data of which
763 * we want to allocate space for.
764 * @placement: Proposed new placement for the buffer object.
765 * @mem: A struct ttm_resource.
766 * @ctx: if and how to sleep, lock buffers and alloc memory
767 *
768 * Allocate memory space for the buffer object pointed to by @bo, using
769 * the placement flags in @placement, potentially evicting other idle buffer objects.
770 * This function may sleep while waiting for space to become available.
771 * Returns:
772 * -EBUSY: No space available (only if no_wait == 1).
773 * -ENOMEM: Could not allocate memory for the buffer object, either due to
774 * fragmentation or concurrent allocators.
775 * -ERESTARTSYS: An interruptible sleep was interrupted by a signal.
776 */
ttm_bo_mem_space(struct ttm_buffer_object * bo,struct ttm_placement * placement,struct ttm_resource ** mem,struct ttm_operation_ctx * ctx)777 int ttm_bo_mem_space(struct ttm_buffer_object *bo,
778 struct ttm_placement *placement,
779 struct ttm_resource **mem,
780 struct ttm_operation_ctx *ctx)
781 {
782 struct ttm_device *bdev = bo->bdev;
783 bool type_found = false;
784 int i, ret;
785
786 ret = dma_resv_reserve_fences(bo->base.resv, 1);
787 if (unlikely(ret))
788 return ret;
789
790 for (i = 0; i < placement->num_placement; ++i) {
791 const struct ttm_place *place = &placement->placement[i];
792 struct ttm_resource_manager *man;
793
794 man = ttm_manager_type(bdev, place->mem_type);
795 if (!man || !ttm_resource_manager_used(man))
796 continue;
797
798 type_found = true;
799 ret = ttm_resource_alloc(bo, place, mem);
800 if (ret == -ENOSPC)
801 continue;
802 if (unlikely(ret))
803 goto error;
804
805 ret = ttm_bo_add_move_fence(bo, man, *mem, ctx->no_wait_gpu);
806 if (unlikely(ret)) {
807 ttm_resource_free(bo, mem);
808 if (ret == -EBUSY)
809 continue;
810
811 goto error;
812 }
813 return 0;
814 }
815
816 for (i = 0; i < placement->num_busy_placement; ++i) {
817 const struct ttm_place *place = &placement->busy_placement[i];
818 struct ttm_resource_manager *man;
819
820 man = ttm_manager_type(bdev, place->mem_type);
821 if (!man || !ttm_resource_manager_used(man))
822 continue;
823
824 type_found = true;
825 ret = ttm_bo_mem_force_space(bo, place, mem, ctx);
826 if (likely(!ret))
827 return 0;
828
829 if (ret && ret != -EBUSY)
830 goto error;
831 }
832
833 ret = -ENOMEM;
834 if (!type_found) {
835 pr_err(TTM_PFX "No compatible memory type found\n");
836 ret = -EINVAL;
837 }
838
839 error:
840 return ret;
841 }
842 EXPORT_SYMBOL(ttm_bo_mem_space);
843
ttm_bo_move_buffer(struct ttm_buffer_object * bo,struct ttm_placement * placement,struct ttm_operation_ctx * ctx)844 static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
845 struct ttm_placement *placement,
846 struct ttm_operation_ctx *ctx)
847 {
848 struct ttm_resource *mem;
849 struct ttm_place hop;
850 int ret;
851
852 dma_resv_assert_held(bo->base.resv);
853
854 /*
855 * Determine where to move the buffer.
856 *
857 * If driver determines move is going to need
858 * an extra step then it will return -EMULTIHOP
859 * and the buffer will be moved to the temporary
860 * stop and the driver will be called to make
861 * the second hop.
862 */
863 ret = ttm_bo_mem_space(bo, placement, &mem, ctx);
864 if (ret)
865 return ret;
866 bounce:
867 ret = ttm_bo_handle_move_mem(bo, mem, false, ctx, &hop);
868 if (ret == -EMULTIHOP) {
869 ret = ttm_bo_bounce_temp_buffer(bo, &mem, ctx, &hop);
870 if (ret)
871 goto out;
872 /* try and move to final place now. */
873 goto bounce;
874 }
875 out:
876 if (ret)
877 ttm_resource_free(bo, &mem);
878 return ret;
879 }
880
881 /**
882 * ttm_bo_validate
883 *
884 * @bo: The buffer object.
885 * @placement: Proposed placement for the buffer object.
886 * @ctx: validation parameters.
887 *
888 * Changes placement and caching policy of the buffer object
889 * according proposed placement.
890 * Returns
891 * -EINVAL on invalid proposed placement.
892 * -ENOMEM on out-of-memory condition.
893 * -EBUSY if no_wait is true and buffer busy.
894 * -ERESTARTSYS if interrupted by a signal.
895 */
ttm_bo_validate(struct ttm_buffer_object * bo,struct ttm_placement * placement,struct ttm_operation_ctx * ctx)896 int ttm_bo_validate(struct ttm_buffer_object *bo,
897 struct ttm_placement *placement,
898 struct ttm_operation_ctx *ctx)
899 {
900 int ret;
901
902 dma_resv_assert_held(bo->base.resv);
903
904 /*
905 * Remove the backing store if no placement is given.
906 */
907 if (!placement->num_placement && !placement->num_busy_placement)
908 return ttm_bo_pipeline_gutting(bo);
909
910 /* Check whether we need to move buffer. */
911 if (bo->resource && ttm_resource_compat(bo->resource, placement))
912 return 0;
913
914 /* Moving of pinned BOs is forbidden */
915 if (bo->pin_count)
916 return -EINVAL;
917
918 ret = ttm_bo_move_buffer(bo, placement, ctx);
919 if (ret)
920 return ret;
921
922 /*
923 * We might need to add a TTM.
924 */
925 if (!bo->resource || bo->resource->mem_type == TTM_PL_SYSTEM) {
926 ret = ttm_tt_create(bo, true);
927 if (ret)
928 return ret;
929 }
930 return 0;
931 }
932 EXPORT_SYMBOL(ttm_bo_validate);
933
934 /**
935 * ttm_bo_init_reserved
936 *
937 * @bdev: Pointer to a ttm_device struct.
938 * @bo: Pointer to a ttm_buffer_object to be initialized.
939 * @type: Requested type of buffer object.
940 * @placement: Initial placement for buffer object.
941 * @alignment: Data alignment in pages.
942 * @ctx: TTM operation context for memory allocation.
943 * @sg: Scatter-gather table.
944 * @resv: Pointer to a dma_resv, or NULL to let ttm allocate one.
945 * @destroy: Destroy function. Use NULL for kfree().
946 *
947 * This function initializes a pre-allocated struct ttm_buffer_object.
948 * As this object may be part of a larger structure, this function,
949 * together with the @destroy function, enables driver-specific objects
950 * derived from a ttm_buffer_object.
951 *
952 * On successful return, the caller owns an object kref to @bo. The kref and
953 * list_kref are usually set to 1, but note that in some situations, other
954 * tasks may already be holding references to @bo as well.
955 * Furthermore, if resv == NULL, the buffer's reservation lock will be held,
956 * and it is the caller's responsibility to call ttm_bo_unreserve.
957 *
958 * If a failure occurs, the function will call the @destroy function. Thus,
959 * after a failure, dereferencing @bo is illegal and will likely cause memory
960 * corruption.
961 *
962 * Returns
963 * -ENOMEM: Out of memory.
964 * -EINVAL: Invalid placement flags.
965 * -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources.
966 */
ttm_bo_init_reserved(struct ttm_device * bdev,struct ttm_buffer_object * bo,enum ttm_bo_type type,struct ttm_placement * placement,uint32_t alignment,struct ttm_operation_ctx * ctx,struct sg_table * sg,struct dma_resv * resv,void (* destroy)(struct ttm_buffer_object *))967 int ttm_bo_init_reserved(struct ttm_device *bdev, struct ttm_buffer_object *bo,
968 enum ttm_bo_type type, struct ttm_placement *placement,
969 uint32_t alignment, struct ttm_operation_ctx *ctx,
970 struct sg_table *sg, struct dma_resv *resv,
971 void (*destroy) (struct ttm_buffer_object *))
972 {
973 int ret;
974
975 kref_init(&bo->kref);
976 bo->bdev = bdev;
977 bo->type = type;
978 bo->page_alignment = alignment;
979 bo->destroy = destroy;
980 bo->pin_count = 0;
981 bo->sg = sg;
982 bo->bulk_move = NULL;
983 if (resv)
984 bo->base.resv = resv;
985 else
986 bo->base.resv = &bo->base._resv;
987 atomic_inc(&ttm_glob.bo_count);
988
989 /*
990 * For ttm_bo_type_device buffers, allocate
991 * address space from the device.
992 */
993 if (bo->type == ttm_bo_type_device || bo->type == ttm_bo_type_sg) {
994 ret = drm_vma_offset_add(bdev->vma_manager, &bo->base.vma_node,
995 PFN_UP(bo->base.size));
996 if (ret)
997 goto err_put;
998 }
999
1000 /* passed reservation objects should already be locked,
1001 * since otherwise lockdep will be angered in radeon.
1002 */
1003 if (!resv)
1004 WARN_ON(!dma_resv_trylock(bo->base.resv));
1005 else
1006 dma_resv_assert_held(resv);
1007
1008 ret = ttm_bo_validate(bo, placement, ctx);
1009 if (unlikely(ret))
1010 goto err_unlock;
1011
1012 return 0;
1013
1014 err_unlock:
1015 if (!resv)
1016 dma_resv_unlock(bo->base.resv);
1017
1018 err_put:
1019 ttm_bo_put(bo);
1020 return ret;
1021 }
1022 EXPORT_SYMBOL(ttm_bo_init_reserved);
1023
1024 /**
1025 * ttm_bo_init_validate
1026 *
1027 * @bdev: Pointer to a ttm_device struct.
1028 * @bo: Pointer to a ttm_buffer_object to be initialized.
1029 * @type: Requested type of buffer object.
1030 * @placement: Initial placement for buffer object.
1031 * @alignment: Data alignment in pages.
1032 * @interruptible: If needing to sleep to wait for GPU resources,
1033 * sleep interruptible.
1034 * pinned in physical memory. If this behaviour is not desired, this member
1035 * holds a pointer to a persistent shmem object. Typically, this would
1036 * point to the shmem object backing a GEM object if TTM is used to back a
1037 * GEM user interface.
1038 * @sg: Scatter-gather table.
1039 * @resv: Pointer to a dma_resv, or NULL to let ttm allocate one.
1040 * @destroy: Destroy function. Use NULL for kfree().
1041 *
1042 * This function initializes a pre-allocated struct ttm_buffer_object.
1043 * As this object may be part of a larger structure, this function,
1044 * together with the @destroy function,
1045 * enables driver-specific objects derived from a ttm_buffer_object.
1046 *
1047 * On successful return, the caller owns an object kref to @bo. The kref and
1048 * list_kref are usually set to 1, but note that in some situations, other
1049 * tasks may already be holding references to @bo as well.
1050 *
1051 * If a failure occurs, the function will call the @destroy function, Thus,
1052 * after a failure, dereferencing @bo is illegal and will likely cause memory
1053 * corruption.
1054 *
1055 * Returns
1056 * -ENOMEM: Out of memory.
1057 * -EINVAL: Invalid placement flags.
1058 * -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources.
1059 */
ttm_bo_init_validate(struct ttm_device * bdev,struct ttm_buffer_object * bo,enum ttm_bo_type type,struct ttm_placement * placement,uint32_t alignment,bool interruptible,struct sg_table * sg,struct dma_resv * resv,void (* destroy)(struct ttm_buffer_object *))1060 int ttm_bo_init_validate(struct ttm_device *bdev, struct ttm_buffer_object *bo,
1061 enum ttm_bo_type type, struct ttm_placement *placement,
1062 uint32_t alignment, bool interruptible,
1063 struct sg_table *sg, struct dma_resv *resv,
1064 void (*destroy) (struct ttm_buffer_object *))
1065 {
1066 struct ttm_operation_ctx ctx = { interruptible, false };
1067 int ret;
1068
1069 ret = ttm_bo_init_reserved(bdev, bo, type, placement, alignment, &ctx,
1070 sg, resv, destroy);
1071 if (ret)
1072 return ret;
1073
1074 if (!resv)
1075 ttm_bo_unreserve(bo);
1076
1077 return 0;
1078 }
1079 EXPORT_SYMBOL(ttm_bo_init_validate);
1080
1081 /*
1082 * buffer object vm functions.
1083 */
1084
1085 /**
1086 * ttm_bo_unmap_virtual
1087 *
1088 * @bo: tear down the virtual mappings for this BO
1089 */
ttm_bo_unmap_virtual(struct ttm_buffer_object * bo)1090 void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
1091 {
1092 struct ttm_device *bdev = bo->bdev;
1093
1094 drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping);
1095 ttm_mem_io_free(bdev, bo->resource);
1096 }
1097 EXPORT_SYMBOL(ttm_bo_unmap_virtual);
1098
1099 /**
1100 * ttm_bo_wait_ctx - wait for buffer idle.
1101 *
1102 * @bo: The buffer object.
1103 * @ctx: defines how to wait
1104 *
1105 * Waits for the buffer to be idle. Used timeout depends on the context.
1106 * Returns -EBUSY if wait timed outt, -ERESTARTSYS if interrupted by a signal or
1107 * zero on success.
1108 */
ttm_bo_wait_ctx(struct ttm_buffer_object * bo,struct ttm_operation_ctx * ctx)1109 int ttm_bo_wait_ctx(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx)
1110 {
1111 long ret;
1112
1113 if (ctx->no_wait_gpu) {
1114 if (dma_resv_test_signaled(bo->base.resv,
1115 DMA_RESV_USAGE_BOOKKEEP))
1116 return 0;
1117 else
1118 return -EBUSY;
1119 }
1120
1121 ret = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP,
1122 ctx->interruptible, 15 * HZ);
1123 if (unlikely(ret < 0))
1124 return ret;
1125 if (unlikely(ret == 0))
1126 return -EBUSY;
1127 return 0;
1128 }
1129 EXPORT_SYMBOL(ttm_bo_wait_ctx);
1130
ttm_bo_swapout(struct ttm_buffer_object * bo,struct ttm_operation_ctx * ctx,gfp_t gfp_flags)1131 int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
1132 gfp_t gfp_flags)
1133 {
1134 struct ttm_place place;
1135 bool locked;
1136 long ret;
1137
1138 /*
1139 * While the bo may already reside in SYSTEM placement, set
1140 * SYSTEM as new placement to cover also the move further below.
1141 * The driver may use the fact that we're moving from SYSTEM
1142 * as an indication that we're about to swap out.
1143 */
1144 memset(&place, 0, sizeof(place));
1145 place.mem_type = bo->resource->mem_type;
1146 if (!ttm_bo_evict_swapout_allowable(bo, ctx, &place, &locked, NULL))
1147 return -EBUSY;
1148
1149 if (!bo->ttm || !ttm_tt_is_populated(bo->ttm) ||
1150 bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL ||
1151 bo->ttm->page_flags & TTM_TT_FLAG_SWAPPED ||
1152 !ttm_bo_get_unless_zero(bo)) {
1153 if (locked)
1154 dma_resv_unlock(bo->base.resv);
1155 return -EBUSY;
1156 }
1157
1158 if (bo->deleted) {
1159 ret = ttm_bo_cleanup_refs(bo, false, false, locked);
1160 ttm_bo_put(bo);
1161 return ret == -EBUSY ? -ENOSPC : ret;
1162 }
1163
1164 /* TODO: Cleanup the locking */
1165 spin_unlock(&bo->bdev->lru_lock);
1166
1167 /*
1168 * Move to system cached
1169 */
1170 if (bo->resource->mem_type != TTM_PL_SYSTEM) {
1171 struct ttm_resource *evict_mem;
1172 struct ttm_place hop;
1173
1174 memset(&hop, 0, sizeof(hop));
1175 place.mem_type = TTM_PL_SYSTEM;
1176 ret = ttm_resource_alloc(bo, &place, &evict_mem);
1177 if (unlikely(ret))
1178 goto out;
1179
1180 ret = ttm_bo_handle_move_mem(bo, evict_mem, true, ctx, &hop);
1181 if (unlikely(ret != 0)) {
1182 WARN(ret == -EMULTIHOP, "Unexpected multihop in swaput - likely driver bug.\n");
1183 ttm_resource_free(bo, &evict_mem);
1184 goto out;
1185 }
1186 }
1187
1188 /*
1189 * Make sure BO is idle.
1190 */
1191 ret = ttm_bo_wait_ctx(bo, ctx);
1192 if (unlikely(ret != 0))
1193 goto out;
1194
1195 ttm_bo_unmap_virtual(bo);
1196
1197 /*
1198 * Swap out. Buffer will be swapped in again as soon as
1199 * anyone tries to access a ttm page.
1200 */
1201 if (bo->bdev->funcs->swap_notify)
1202 bo->bdev->funcs->swap_notify(bo);
1203
1204 if (ttm_tt_is_populated(bo->ttm))
1205 ret = ttm_tt_swapout(bo->bdev, bo->ttm, gfp_flags);
1206 out:
1207
1208 /*
1209 * Unreserve without putting on LRU to avoid swapping out an
1210 * already swapped buffer.
1211 */
1212 if (locked)
1213 dma_resv_unlock(bo->base.resv);
1214 ttm_bo_put(bo);
1215 return ret == -EBUSY ? -ENOSPC : ret;
1216 }
1217
ttm_bo_tt_destroy(struct ttm_buffer_object * bo)1218 void ttm_bo_tt_destroy(struct ttm_buffer_object *bo)
1219 {
1220 if (bo->ttm == NULL)
1221 return;
1222
1223 ttm_tt_unpopulate(bo->bdev, bo->ttm);
1224 ttm_tt_destroy(bo->bdev, bo->ttm);
1225 bo->ttm = NULL;
1226 }
1227