1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2 /**************************************************************************
3 *
4 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 **************************************************************************/
28 /*
29 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
30 */
31
32 #define pr_fmt(fmt) "[TTM] " fmt
33
34 #include <drm/ttm/ttm_bo.h>
35 #include <drm/ttm/ttm_placement.h>
36 #include <drm/ttm/ttm_tt.h>
37
38 #include <linux/export.h>
39 #include <linux/jiffies.h>
40 #include <linux/slab.h>
41 #include <linux/sched.h>
42 #include <linux/mm.h>
43 #include <linux/file.h>
44 #include <linux/module.h>
45 #include <linux/atomic.h>
46 #include <linux/cgroup_dmem.h>
47 #include <linux/dma-resv.h>
48
49 #include "ttm_module.h"
50 #include "ttm_bo_internal.h"
51
ttm_bo_mem_space_debug(struct ttm_buffer_object * bo,struct ttm_placement * placement)52 static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
53 struct ttm_placement *placement)
54 {
55 struct drm_printer p = drm_dbg_printer(NULL, DRM_UT_CORE, TTM_PFX);
56 struct ttm_resource_manager *man;
57 int i, mem_type;
58
59 for (i = 0; i < placement->num_placement; i++) {
60 mem_type = placement->placement[i].mem_type;
61 drm_printf(&p, " placement[%d]=0x%08X (%d)\n",
62 i, placement->placement[i].flags, mem_type);
63 man = ttm_manager_type(bo->bdev, mem_type);
64 ttm_resource_manager_debug(man, &p);
65 }
66 }
67
68 /**
69 * ttm_bo_move_to_lru_tail
70 *
71 * @bo: The buffer object.
72 *
73 * Move this BO to the tail of all lru lists used to lookup and reserve an
74 * object. This function must be called with struct ttm_global::lru_lock
75 * held, and is used to make a BO less likely to be considered for eviction.
76 */
ttm_bo_move_to_lru_tail(struct ttm_buffer_object * bo)77 void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo)
78 {
79 dma_resv_assert_held(bo->base.resv);
80
81 if (bo->resource)
82 ttm_resource_move_to_lru_tail(bo->resource);
83 }
84 EXPORT_SYMBOL(ttm_bo_move_to_lru_tail);
85
86 /**
87 * ttm_bo_set_bulk_move - update BOs bulk move object
88 *
89 * @bo: The buffer object.
90 * @bulk: bulk move structure
91 *
92 * Update the BOs bulk move object, making sure that resources are added/removed
93 * as well. A bulk move allows to move many resource on the LRU at once,
94 * resulting in much less overhead of maintaining the LRU.
95 * The only requirement is that the resources stay together on the LRU and are
96 * never separated. This is enforces by setting the bulk_move structure on a BO.
97 * ttm_lru_bulk_move_tail() should be used to move all resources to the tail of
98 * their LRU list.
99 */
ttm_bo_set_bulk_move(struct ttm_buffer_object * bo,struct ttm_lru_bulk_move * bulk)100 void ttm_bo_set_bulk_move(struct ttm_buffer_object *bo,
101 struct ttm_lru_bulk_move *bulk)
102 {
103 dma_resv_assert_held(bo->base.resv);
104
105 if (bo->bulk_move == bulk)
106 return;
107
108 spin_lock(&bo->bdev->lru_lock);
109 if (bo->resource)
110 ttm_resource_del_bulk_move(bo->resource, bo);
111 bo->bulk_move = bulk;
112 if (bo->resource)
113 ttm_resource_add_bulk_move(bo->resource, bo);
114 spin_unlock(&bo->bdev->lru_lock);
115 }
116 EXPORT_SYMBOL(ttm_bo_set_bulk_move);
117
ttm_bo_handle_move_mem(struct ttm_buffer_object * bo,struct ttm_resource * mem,bool evict,struct ttm_operation_ctx * ctx,struct ttm_place * hop)118 static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
119 struct ttm_resource *mem, bool evict,
120 struct ttm_operation_ctx *ctx,
121 struct ttm_place *hop)
122 {
123 struct ttm_device *bdev = bo->bdev;
124 bool old_use_tt, new_use_tt;
125 int ret;
126
127 old_use_tt = !bo->resource || ttm_manager_type(bdev, bo->resource->mem_type)->use_tt;
128 new_use_tt = ttm_manager_type(bdev, mem->mem_type)->use_tt;
129
130 ttm_bo_unmap_virtual(bo);
131
132 /*
133 * Create and bind a ttm if required.
134 */
135
136 if (new_use_tt) {
137 /* Zero init the new TTM structure if the old location should
138 * have used one as well.
139 */
140 ret = ttm_tt_create(bo, old_use_tt);
141 if (ret)
142 goto out_err;
143
144 if (mem->mem_type != TTM_PL_SYSTEM) {
145 ret = ttm_bo_populate(bo, ctx);
146 if (ret)
147 goto out_err;
148 }
149 }
150
151 ret = dma_resv_reserve_fences(bo->base.resv, 1);
152 if (ret)
153 goto out_err;
154
155 ret = bdev->funcs->move(bo, evict, ctx, mem, hop);
156 if (ret) {
157 if (ret == -EMULTIHOP)
158 return ret;
159 goto out_err;
160 }
161
162 ctx->bytes_moved += bo->base.size;
163 return 0;
164
165 out_err:
166 if (!old_use_tt)
167 ttm_bo_tt_destroy(bo);
168
169 return ret;
170 }
171
172 /*
173 * Call bo::reserved.
174 * Will release GPU memory type usage on destruction.
175 * This is the place to put in driver specific hooks to release
176 * driver private resources.
177 * Will release the bo::reserved lock.
178 */
179
ttm_bo_cleanup_memtype_use(struct ttm_buffer_object * bo)180 static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
181 {
182 if (bo->bdev->funcs->delete_mem_notify)
183 bo->bdev->funcs->delete_mem_notify(bo);
184
185 ttm_bo_tt_destroy(bo);
186 ttm_resource_free(bo, &bo->resource);
187 }
188
ttm_bo_individualize_resv(struct ttm_buffer_object * bo)189 static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo)
190 {
191 int r;
192
193 if (bo->base.resv == &bo->base._resv)
194 return 0;
195
196 BUG_ON(!dma_resv_trylock(&bo->base._resv));
197
198 r = dma_resv_copy_fences(&bo->base._resv, bo->base.resv);
199 dma_resv_unlock(&bo->base._resv);
200 if (r)
201 return r;
202
203 if (bo->type != ttm_bo_type_sg) {
204 /* This works because the BO is about to be destroyed and nobody
205 * reference it any more. The only tricky case is the trylock on
206 * the resv object while holding the lru_lock.
207 */
208 spin_lock(&bo->bdev->lru_lock);
209 bo->base.resv = &bo->base._resv;
210 spin_unlock(&bo->bdev->lru_lock);
211 }
212
213 return r;
214 }
215
ttm_bo_flush_all_fences(struct ttm_buffer_object * bo)216 static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
217 {
218 struct dma_resv *resv = &bo->base._resv;
219 struct dma_resv_iter cursor;
220 struct dma_fence *fence;
221
222 dma_resv_iter_begin(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP);
223 dma_resv_for_each_fence_unlocked(&cursor, fence) {
224 if (!fence->ops->signaled)
225 dma_fence_enable_sw_signaling(fence);
226 }
227 dma_resv_iter_end(&cursor);
228 }
229
230 /*
231 * Block for the dma_resv object to become idle, lock the buffer and clean up
232 * the resource and tt object.
233 */
ttm_bo_delayed_delete(struct work_struct * work)234 static void ttm_bo_delayed_delete(struct work_struct *work)
235 {
236 struct ttm_buffer_object *bo;
237
238 bo = container_of(work, typeof(*bo), delayed_delete);
239
240 dma_resv_wait_timeout(&bo->base._resv, DMA_RESV_USAGE_BOOKKEEP, false,
241 MAX_SCHEDULE_TIMEOUT);
242 dma_resv_lock(bo->base.resv, NULL);
243 ttm_bo_cleanup_memtype_use(bo);
244 dma_resv_unlock(bo->base.resv);
245 ttm_bo_put(bo);
246 }
247
ttm_bo_release(struct kref * kref)248 static void ttm_bo_release(struct kref *kref)
249 {
250 struct ttm_buffer_object *bo =
251 container_of(kref, struct ttm_buffer_object, kref);
252 struct ttm_device *bdev = bo->bdev;
253 int ret;
254
255 WARN_ON_ONCE(bo->pin_count);
256 WARN_ON_ONCE(bo->bulk_move);
257
258 if (!bo->deleted) {
259 ret = ttm_bo_individualize_resv(bo);
260 if (ret) {
261 /* Last resort, if we fail to allocate memory for the
262 * fences block for the BO to become idle
263 */
264 dma_resv_wait_timeout(bo->base.resv,
265 DMA_RESV_USAGE_BOOKKEEP, false,
266 30 * HZ);
267 }
268
269 if (bo->bdev->funcs->release_notify)
270 bo->bdev->funcs->release_notify(bo);
271
272 drm_vma_offset_remove(bdev->vma_manager, &bo->base.vma_node);
273 ttm_mem_io_free(bdev, bo->resource);
274
275 if (!dma_resv_test_signaled(&bo->base._resv,
276 DMA_RESV_USAGE_BOOKKEEP) ||
277 (want_init_on_free() && (bo->ttm != NULL)) ||
278 bo->type == ttm_bo_type_sg ||
279 !dma_resv_trylock(bo->base.resv)) {
280 /* The BO is not idle, resurrect it for delayed destroy */
281 ttm_bo_flush_all_fences(bo);
282 bo->deleted = true;
283
284 spin_lock(&bo->bdev->lru_lock);
285
286 /*
287 * Make pinned bos immediately available to
288 * shrinkers, now that they are queued for
289 * destruction.
290 *
291 * FIXME: QXL is triggering this. Can be removed when the
292 * driver is fixed.
293 */
294 if (bo->pin_count) {
295 bo->pin_count = 0;
296 ttm_resource_move_to_lru_tail(bo->resource);
297 }
298
299 kref_init(&bo->kref);
300 spin_unlock(&bo->bdev->lru_lock);
301
302 INIT_WORK(&bo->delayed_delete, ttm_bo_delayed_delete);
303
304 /* Schedule the worker on the closest NUMA node. This
305 * improves performance since system memory might be
306 * cleared on free and that is best done on a CPU core
307 * close to it.
308 */
309 queue_work_node(bdev->pool.nid, bdev->wq, &bo->delayed_delete);
310 return;
311 }
312
313 ttm_bo_cleanup_memtype_use(bo);
314 dma_resv_unlock(bo->base.resv);
315 }
316
317 atomic_dec(&ttm_glob.bo_count);
318 bo->destroy(bo);
319 }
320
321 /**
322 * ttm_bo_put
323 *
324 * @bo: The buffer object.
325 *
326 * Unreference a buffer object.
327 */
ttm_bo_put(struct ttm_buffer_object * bo)328 void ttm_bo_put(struct ttm_buffer_object *bo)
329 {
330 kref_put(&bo->kref, ttm_bo_release);
331 }
332 EXPORT_SYMBOL(ttm_bo_put);
333
ttm_bo_bounce_temp_buffer(struct ttm_buffer_object * bo,struct ttm_operation_ctx * ctx,struct ttm_place * hop)334 static int ttm_bo_bounce_temp_buffer(struct ttm_buffer_object *bo,
335 struct ttm_operation_ctx *ctx,
336 struct ttm_place *hop)
337 {
338 struct ttm_placement hop_placement;
339 struct ttm_resource *hop_mem;
340 int ret;
341
342 hop_placement.num_placement = 1;
343 hop_placement.placement = hop;
344
345 /* find space in the bounce domain */
346 ret = ttm_bo_mem_space(bo, &hop_placement, &hop_mem, ctx);
347 if (ret)
348 return ret;
349 /* move to the bounce domain */
350 ret = ttm_bo_handle_move_mem(bo, hop_mem, false, ctx, NULL);
351 if (ret) {
352 ttm_resource_free(bo, &hop_mem);
353 return ret;
354 }
355 return 0;
356 }
357
ttm_bo_evict(struct ttm_buffer_object * bo,struct ttm_operation_ctx * ctx)358 static int ttm_bo_evict(struct ttm_buffer_object *bo,
359 struct ttm_operation_ctx *ctx)
360 {
361 struct ttm_device *bdev = bo->bdev;
362 struct ttm_resource *evict_mem;
363 struct ttm_placement placement;
364 struct ttm_place hop;
365 int ret = 0;
366
367 memset(&hop, 0, sizeof(hop));
368
369 dma_resv_assert_held(bo->base.resv);
370
371 placement.num_placement = 0;
372 bdev->funcs->evict_flags(bo, &placement);
373
374 if (!placement.num_placement) {
375 ret = ttm_bo_wait_ctx(bo, ctx);
376 if (ret)
377 return ret;
378
379 /*
380 * Since we've already synced, this frees backing store
381 * immediately.
382 */
383 return ttm_bo_pipeline_gutting(bo);
384 }
385
386 ret = ttm_bo_mem_space(bo, &placement, &evict_mem, ctx);
387 if (ret) {
388 if (ret != -ERESTARTSYS) {
389 pr_err("Failed to find memory space for buffer 0x%p eviction\n",
390 bo);
391 ttm_bo_mem_space_debug(bo, &placement);
392 }
393 goto out;
394 }
395
396 do {
397 ret = ttm_bo_handle_move_mem(bo, evict_mem, true, ctx, &hop);
398 if (ret != -EMULTIHOP)
399 break;
400
401 ret = ttm_bo_bounce_temp_buffer(bo, ctx, &hop);
402 } while (!ret);
403
404 if (ret) {
405 ttm_resource_free(bo, &evict_mem);
406 if (ret != -ERESTARTSYS && ret != -EINTR)
407 pr_err("Buffer eviction failed\n");
408 }
409 out:
410 return ret;
411 }
412
413 /**
414 * ttm_bo_eviction_valuable
415 *
416 * @bo: The buffer object to evict
417 * @place: the placement we need to make room for
418 *
419 * Check if it is valuable to evict the BO to make room for the given placement.
420 */
ttm_bo_eviction_valuable(struct ttm_buffer_object * bo,const struct ttm_place * place)421 bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
422 const struct ttm_place *place)
423 {
424 struct ttm_resource *res = bo->resource;
425 struct ttm_device *bdev = bo->bdev;
426
427 dma_resv_assert_held(bo->base.resv);
428 if (bo->resource->mem_type == TTM_PL_SYSTEM)
429 return true;
430
431 /* Don't evict this BO if it's outside of the
432 * requested placement range
433 */
434 return ttm_resource_intersects(bdev, res, place, bo->base.size);
435 }
436 EXPORT_SYMBOL(ttm_bo_eviction_valuable);
437
438 /**
439 * ttm_bo_evict_first() - Evict the first bo on the manager's LRU list.
440 * @bdev: The ttm device.
441 * @man: The manager whose bo to evict.
442 * @ctx: The TTM operation ctx governing the eviction.
443 *
444 * Return: 0 if successful or the resource disappeared. Negative error code on error.
445 */
ttm_bo_evict_first(struct ttm_device * bdev,struct ttm_resource_manager * man,struct ttm_operation_ctx * ctx)446 int ttm_bo_evict_first(struct ttm_device *bdev, struct ttm_resource_manager *man,
447 struct ttm_operation_ctx *ctx)
448 {
449 struct ttm_resource_cursor cursor;
450 struct ttm_buffer_object *bo;
451 struct ttm_resource *res;
452 unsigned int mem_type;
453 int ret = 0;
454
455 spin_lock(&bdev->lru_lock);
456 ttm_resource_cursor_init(&cursor, man);
457 res = ttm_resource_manager_first(&cursor);
458 ttm_resource_cursor_fini(&cursor);
459 if (!res) {
460 ret = -ENOENT;
461 goto out_no_ref;
462 }
463 bo = res->bo;
464 if (!ttm_bo_get_unless_zero(bo))
465 goto out_no_ref;
466 mem_type = res->mem_type;
467 spin_unlock(&bdev->lru_lock);
468 ret = ttm_bo_reserve(bo, ctx->interruptible, ctx->no_wait_gpu, NULL);
469 if (ret)
470 goto out_no_lock;
471 if (!bo->resource || bo->resource->mem_type != mem_type)
472 goto out_bo_moved;
473
474 if (bo->deleted) {
475 ret = ttm_bo_wait_ctx(bo, ctx);
476 if (!ret)
477 ttm_bo_cleanup_memtype_use(bo);
478 } else {
479 ret = ttm_bo_evict(bo, ctx);
480 }
481 out_bo_moved:
482 dma_resv_unlock(bo->base.resv);
483 out_no_lock:
484 ttm_bo_put(bo);
485 return ret;
486
487 out_no_ref:
488 spin_unlock(&bdev->lru_lock);
489 return ret;
490 }
491
492 /**
493 * struct ttm_bo_evict_walk - Parameters for the evict walk.
494 */
495 struct ttm_bo_evict_walk {
496 /** @walk: The walk base parameters. */
497 struct ttm_lru_walk walk;
498 /** @place: The place passed to the resource allocation. */
499 const struct ttm_place *place;
500 /** @evictor: The buffer object we're trying to make room for. */
501 struct ttm_buffer_object *evictor;
502 /** @res: The allocated resource if any. */
503 struct ttm_resource **res;
504 /** @evicted: Number of successful evictions. */
505 unsigned long evicted;
506
507 /** @limit_pool: Which pool limit we should test against */
508 struct dmem_cgroup_pool_state *limit_pool;
509 /** @try_low: Whether we should attempt to evict BO's with low watermark threshold */
510 bool try_low;
511 /** @hit_low: If we cannot evict a bo when @try_low is false (first pass) */
512 bool hit_low;
513 };
514
ttm_bo_evict_cb(struct ttm_lru_walk * walk,struct ttm_buffer_object * bo)515 static s64 ttm_bo_evict_cb(struct ttm_lru_walk *walk, struct ttm_buffer_object *bo)
516 {
517 struct ttm_bo_evict_walk *evict_walk =
518 container_of(walk, typeof(*evict_walk), walk);
519 s64 lret;
520
521 if (!dmem_cgroup_state_evict_valuable(evict_walk->limit_pool, bo->resource->css,
522 evict_walk->try_low, &evict_walk->hit_low))
523 return 0;
524
525 if (bo->pin_count || !bo->bdev->funcs->eviction_valuable(bo, evict_walk->place))
526 return 0;
527
528 if (bo->deleted) {
529 lret = ttm_bo_wait_ctx(bo, walk->arg.ctx);
530 if (!lret)
531 ttm_bo_cleanup_memtype_use(bo);
532 } else {
533 lret = ttm_bo_evict(bo, walk->arg.ctx);
534 }
535
536 if (lret)
537 goto out;
538
539 evict_walk->evicted++;
540 if (evict_walk->res)
541 lret = ttm_resource_alloc(evict_walk->evictor, evict_walk->place,
542 evict_walk->res, NULL);
543 if (lret == 0)
544 return 1;
545 out:
546 /* Errors that should terminate the walk. */
547 if (lret == -ENOSPC)
548 return -EBUSY;
549
550 return lret;
551 }
552
553 static const struct ttm_lru_walk_ops ttm_evict_walk_ops = {
554 .process_bo = ttm_bo_evict_cb,
555 };
556
ttm_bo_evict_alloc(struct ttm_device * bdev,struct ttm_resource_manager * man,const struct ttm_place * place,struct ttm_buffer_object * evictor,struct ttm_operation_ctx * ctx,struct ww_acquire_ctx * ticket,struct ttm_resource ** res,struct dmem_cgroup_pool_state * limit_pool)557 static int ttm_bo_evict_alloc(struct ttm_device *bdev,
558 struct ttm_resource_manager *man,
559 const struct ttm_place *place,
560 struct ttm_buffer_object *evictor,
561 struct ttm_operation_ctx *ctx,
562 struct ww_acquire_ctx *ticket,
563 struct ttm_resource **res,
564 struct dmem_cgroup_pool_state *limit_pool)
565 {
566 struct ttm_bo_evict_walk evict_walk = {
567 .walk = {
568 .ops = &ttm_evict_walk_ops,
569 .arg = {
570 .ctx = ctx,
571 .ticket = ticket,
572 }
573 },
574 .place = place,
575 .evictor = evictor,
576 .res = res,
577 .limit_pool = limit_pool,
578 };
579 s64 lret;
580
581 evict_walk.walk.arg.trylock_only = true;
582 lret = ttm_lru_walk_for_evict(&evict_walk.walk, bdev, man, 1);
583
584 /* One more attempt if we hit low limit? */
585 if (!lret && evict_walk.hit_low) {
586 evict_walk.try_low = true;
587 lret = ttm_lru_walk_for_evict(&evict_walk.walk, bdev, man, 1);
588 }
589 if (lret || !ticket)
590 goto out;
591
592 /* Reset low limit */
593 evict_walk.try_low = evict_walk.hit_low = false;
594 /* If ticket-locking, repeat while making progress. */
595 evict_walk.walk.arg.trylock_only = false;
596
597 retry:
598 do {
599 /* The walk may clear the evict_walk.walk.ticket field */
600 evict_walk.walk.arg.ticket = ticket;
601 evict_walk.evicted = 0;
602 lret = ttm_lru_walk_for_evict(&evict_walk.walk, bdev, man, 1);
603 } while (!lret && evict_walk.evicted);
604
605 /* We hit the low limit? Try once more */
606 if (!lret && evict_walk.hit_low && !evict_walk.try_low) {
607 evict_walk.try_low = true;
608 goto retry;
609 }
610 out:
611 if (lret < 0)
612 return lret;
613 if (lret == 0)
614 return -EBUSY;
615 return 0;
616 }
617
618 /**
619 * ttm_bo_pin - Pin the buffer object.
620 * @bo: The buffer object to pin
621 *
622 * Make sure the buffer is not evicted any more during memory pressure.
623 * @bo must be unpinned again by calling ttm_bo_unpin().
624 */
ttm_bo_pin(struct ttm_buffer_object * bo)625 void ttm_bo_pin(struct ttm_buffer_object *bo)
626 {
627 dma_resv_assert_held(bo->base.resv);
628 WARN_ON_ONCE(!kref_read(&bo->kref));
629 spin_lock(&bo->bdev->lru_lock);
630 if (bo->resource)
631 ttm_resource_del_bulk_move(bo->resource, bo);
632 if (!bo->pin_count++ && bo->resource)
633 ttm_resource_move_to_lru_tail(bo->resource);
634 spin_unlock(&bo->bdev->lru_lock);
635 }
636 EXPORT_SYMBOL(ttm_bo_pin);
637
638 /**
639 * ttm_bo_unpin - Unpin the buffer object.
640 * @bo: The buffer object to unpin
641 *
642 * Allows the buffer object to be evicted again during memory pressure.
643 */
ttm_bo_unpin(struct ttm_buffer_object * bo)644 void ttm_bo_unpin(struct ttm_buffer_object *bo)
645 {
646 dma_resv_assert_held(bo->base.resv);
647 WARN_ON_ONCE(!kref_read(&bo->kref));
648 if (WARN_ON_ONCE(!bo->pin_count))
649 return;
650
651 spin_lock(&bo->bdev->lru_lock);
652 if (!--bo->pin_count && bo->resource) {
653 ttm_resource_add_bulk_move(bo->resource, bo);
654 ttm_resource_move_to_lru_tail(bo->resource);
655 }
656 spin_unlock(&bo->bdev->lru_lock);
657 }
658 EXPORT_SYMBOL(ttm_bo_unpin);
659
660 /*
661 * Add the last move fence to the BO as kernel dependency and reserve a new
662 * fence slot.
663 */
ttm_bo_add_move_fence(struct ttm_buffer_object * bo,struct ttm_resource_manager * man,bool no_wait_gpu)664 static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
665 struct ttm_resource_manager *man,
666 bool no_wait_gpu)
667 {
668 struct dma_fence *fence;
669 int ret;
670
671 spin_lock(&man->move_lock);
672 fence = dma_fence_get(man->move);
673 spin_unlock(&man->move_lock);
674
675 if (!fence)
676 return 0;
677
678 if (no_wait_gpu) {
679 ret = dma_fence_is_signaled(fence) ? 0 : -EBUSY;
680 dma_fence_put(fence);
681 return ret;
682 }
683
684 dma_resv_add_fence(bo->base.resv, fence, DMA_RESV_USAGE_KERNEL);
685
686 ret = dma_resv_reserve_fences(bo->base.resv, 1);
687 dma_fence_put(fence);
688 return ret;
689 }
690
691 /**
692 * ttm_bo_alloc_resource - Allocate backing store for a BO
693 *
694 * @bo: Pointer to a struct ttm_buffer_object of which we want a resource for
695 * @placement: Proposed new placement for the buffer object
696 * @ctx: if and how to sleep, lock buffers and alloc memory
697 * @force_space: If we should evict buffers to force space
698 * @res: The resulting struct ttm_resource.
699 *
700 * Allocates a resource for the buffer object pointed to by @bo, using the
701 * placement flags in @placement, potentially evicting other buffer objects when
702 * @force_space is true.
703 * This function may sleep while waiting for resources to become available.
704 * Returns:
705 * -EBUSY: No space available (only if no_wait == true).
706 * -ENOSPC: Could not allocate space for the buffer object, either due to
707 * fragmentation or concurrent allocators.
708 * -ERESTARTSYS: An interruptible sleep was interrupted by a signal.
709 */
ttm_bo_alloc_resource(struct ttm_buffer_object * bo,struct ttm_placement * placement,struct ttm_operation_ctx * ctx,bool force_space,struct ttm_resource ** res)710 static int ttm_bo_alloc_resource(struct ttm_buffer_object *bo,
711 struct ttm_placement *placement,
712 struct ttm_operation_ctx *ctx,
713 bool force_space,
714 struct ttm_resource **res)
715 {
716 struct ttm_device *bdev = bo->bdev;
717 struct ww_acquire_ctx *ticket;
718 int i, ret;
719
720 ticket = dma_resv_locking_ctx(bo->base.resv);
721 ret = dma_resv_reserve_fences(bo->base.resv, 1);
722 if (unlikely(ret))
723 return ret;
724
725 for (i = 0; i < placement->num_placement; ++i) {
726 const struct ttm_place *place = &placement->placement[i];
727 struct dmem_cgroup_pool_state *limit_pool = NULL;
728 struct ttm_resource_manager *man;
729 bool may_evict;
730
731 man = ttm_manager_type(bdev, place->mem_type);
732 if (!man || !ttm_resource_manager_used(man))
733 continue;
734
735 if (place->flags & (force_space ? TTM_PL_FLAG_DESIRED :
736 TTM_PL_FLAG_FALLBACK))
737 continue;
738
739 may_evict = (force_space && place->mem_type != TTM_PL_SYSTEM);
740 ret = ttm_resource_alloc(bo, place, res, force_space ? &limit_pool : NULL);
741 if (ret) {
742 if (ret != -ENOSPC && ret != -EAGAIN) {
743 dmem_cgroup_pool_state_put(limit_pool);
744 return ret;
745 }
746 if (!may_evict) {
747 dmem_cgroup_pool_state_put(limit_pool);
748 continue;
749 }
750
751 ret = ttm_bo_evict_alloc(bdev, man, place, bo, ctx,
752 ticket, res, limit_pool);
753 dmem_cgroup_pool_state_put(limit_pool);
754 if (ret == -EBUSY)
755 continue;
756 if (ret)
757 return ret;
758 }
759
760 ret = ttm_bo_add_move_fence(bo, man, ctx->no_wait_gpu);
761 if (unlikely(ret)) {
762 ttm_resource_free(bo, res);
763 if (ret == -EBUSY)
764 continue;
765
766 return ret;
767 }
768 return 0;
769 }
770
771 return -ENOSPC;
772 }
773
774 /*
775 * ttm_bo_mem_space - Wrapper around ttm_bo_alloc_resource
776 *
777 * @bo: Pointer to a struct ttm_buffer_object of which we want a resource for
778 * @placement: Proposed new placement for the buffer object
779 * @res: The resulting struct ttm_resource.
780 * @ctx: if and how to sleep, lock buffers and alloc memory
781 *
782 * Tries both idle allocation and forcefully eviction of buffers. See
783 * ttm_bo_alloc_resource for details.
784 */
ttm_bo_mem_space(struct ttm_buffer_object * bo,struct ttm_placement * placement,struct ttm_resource ** res,struct ttm_operation_ctx * ctx)785 int ttm_bo_mem_space(struct ttm_buffer_object *bo,
786 struct ttm_placement *placement,
787 struct ttm_resource **res,
788 struct ttm_operation_ctx *ctx)
789 {
790 bool force_space = false;
791 int ret;
792
793 do {
794 ret = ttm_bo_alloc_resource(bo, placement, ctx,
795 force_space, res);
796 force_space = !force_space;
797 } while (ret == -ENOSPC && force_space);
798
799 return ret;
800 }
801 EXPORT_SYMBOL(ttm_bo_mem_space);
802
803 /**
804 * ttm_bo_validate
805 *
806 * @bo: The buffer object.
807 * @placement: Proposed placement for the buffer object.
808 * @ctx: validation parameters.
809 *
810 * Changes placement and caching policy of the buffer object
811 * according proposed placement.
812 * Returns
813 * -EINVAL on invalid proposed placement.
814 * -ENOMEM on out-of-memory condition.
815 * -EBUSY if no_wait is true and buffer busy.
816 * -ERESTARTSYS if interrupted by a signal.
817 */
ttm_bo_validate(struct ttm_buffer_object * bo,struct ttm_placement * placement,struct ttm_operation_ctx * ctx)818 int ttm_bo_validate(struct ttm_buffer_object *bo,
819 struct ttm_placement *placement,
820 struct ttm_operation_ctx *ctx)
821 {
822 struct ttm_resource *res;
823 struct ttm_place hop;
824 bool force_space;
825 int ret;
826
827 dma_resv_assert_held(bo->base.resv);
828
829 /*
830 * Remove the backing store if no placement is given.
831 */
832 if (!placement->num_placement)
833 return ttm_bo_pipeline_gutting(bo);
834
835 force_space = false;
836 do {
837 /* Check whether we need to move buffer. */
838 if (bo->resource &&
839 ttm_resource_compatible(bo->resource, placement,
840 force_space))
841 return 0;
842
843 /* Moving of pinned BOs is forbidden */
844 if (bo->pin_count)
845 return -EINVAL;
846
847 /*
848 * Determine where to move the buffer.
849 *
850 * If driver determines move is going to need
851 * an extra step then it will return -EMULTIHOP
852 * and the buffer will be moved to the temporary
853 * stop and the driver will be called to make
854 * the second hop.
855 */
856 ret = ttm_bo_alloc_resource(bo, placement, ctx, force_space,
857 &res);
858 force_space = !force_space;
859 if (ret == -ENOSPC)
860 continue;
861 if (ret)
862 return ret;
863
864 bounce:
865 ret = ttm_bo_handle_move_mem(bo, res, false, ctx, &hop);
866 if (ret == -EMULTIHOP) {
867 ret = ttm_bo_bounce_temp_buffer(bo, ctx, &hop);
868 /* try and move to final place now. */
869 if (!ret)
870 goto bounce;
871 }
872 if (ret) {
873 ttm_resource_free(bo, &res);
874 return ret;
875 }
876
877 } while (ret && force_space);
878
879 /* For backward compatibility with userspace */
880 if (ret == -ENOSPC)
881 return -ENOMEM;
882
883 /*
884 * We might need to add a TTM.
885 */
886 if (!bo->resource || bo->resource->mem_type == TTM_PL_SYSTEM) {
887 ret = ttm_tt_create(bo, true);
888 if (ret)
889 return ret;
890 }
891 return 0;
892 }
893 EXPORT_SYMBOL(ttm_bo_validate);
894
895 /**
896 * ttm_bo_init_reserved
897 *
898 * @bdev: Pointer to a ttm_device struct.
899 * @bo: Pointer to a ttm_buffer_object to be initialized.
900 * @type: Requested type of buffer object.
901 * @placement: Initial placement for buffer object.
902 * @alignment: Data alignment in pages.
903 * @ctx: TTM operation context for memory allocation.
904 * @sg: Scatter-gather table.
905 * @resv: Pointer to a dma_resv, or NULL to let ttm allocate one.
906 * @destroy: Destroy function. Use NULL for kfree().
907 *
908 * This function initializes a pre-allocated struct ttm_buffer_object.
909 * As this object may be part of a larger structure, this function,
910 * together with the @destroy function, enables driver-specific objects
911 * derived from a ttm_buffer_object.
912 *
913 * On successful return, the caller owns an object kref to @bo. The kref and
914 * list_kref are usually set to 1, but note that in some situations, other
915 * tasks may already be holding references to @bo as well.
916 * Furthermore, if resv == NULL, the buffer's reservation lock will be held,
917 * and it is the caller's responsibility to call ttm_bo_unreserve.
918 *
919 * If a failure occurs, the function will call the @destroy function. Thus,
920 * after a failure, dereferencing @bo is illegal and will likely cause memory
921 * corruption.
922 *
923 * Returns
924 * -ENOMEM: Out of memory.
925 * -EINVAL: Invalid placement flags.
926 * -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources.
927 */
ttm_bo_init_reserved(struct ttm_device * bdev,struct ttm_buffer_object * bo,enum ttm_bo_type type,struct ttm_placement * placement,uint32_t alignment,struct ttm_operation_ctx * ctx,struct sg_table * sg,struct dma_resv * resv,void (* destroy)(struct ttm_buffer_object *))928 int ttm_bo_init_reserved(struct ttm_device *bdev, struct ttm_buffer_object *bo,
929 enum ttm_bo_type type, struct ttm_placement *placement,
930 uint32_t alignment, struct ttm_operation_ctx *ctx,
931 struct sg_table *sg, struct dma_resv *resv,
932 void (*destroy) (struct ttm_buffer_object *))
933 {
934 int ret;
935
936 kref_init(&bo->kref);
937 bo->bdev = bdev;
938 bo->type = type;
939 bo->page_alignment = alignment;
940 bo->destroy = destroy;
941 bo->pin_count = 0;
942 bo->sg = sg;
943 bo->bulk_move = NULL;
944 if (resv)
945 bo->base.resv = resv;
946 else
947 bo->base.resv = &bo->base._resv;
948 atomic_inc(&ttm_glob.bo_count);
949
950 /*
951 * For ttm_bo_type_device buffers, allocate
952 * address space from the device.
953 */
954 if (bo->type == ttm_bo_type_device || bo->type == ttm_bo_type_sg) {
955 ret = drm_vma_offset_add(bdev->vma_manager, &bo->base.vma_node,
956 PFN_UP(bo->base.size));
957 if (ret)
958 goto err_put;
959 }
960
961 /* passed reservation objects should already be locked,
962 * since otherwise lockdep will be angered in radeon.
963 */
964 if (!resv)
965 WARN_ON(!dma_resv_trylock(bo->base.resv));
966 else
967 dma_resv_assert_held(resv);
968
969 ret = ttm_bo_validate(bo, placement, ctx);
970 if (unlikely(ret))
971 goto err_unlock;
972
973 return 0;
974
975 err_unlock:
976 if (!resv)
977 dma_resv_unlock(bo->base.resv);
978
979 err_put:
980 ttm_bo_put(bo);
981 return ret;
982 }
983 EXPORT_SYMBOL(ttm_bo_init_reserved);
984
985 /**
986 * ttm_bo_init_validate
987 *
988 * @bdev: Pointer to a ttm_device struct.
989 * @bo: Pointer to a ttm_buffer_object to be initialized.
990 * @type: Requested type of buffer object.
991 * @placement: Initial placement for buffer object.
992 * @alignment: Data alignment in pages.
993 * @interruptible: If needing to sleep to wait for GPU resources,
994 * sleep interruptible.
995 * pinned in physical memory. If this behaviour is not desired, this member
996 * holds a pointer to a persistent shmem object. Typically, this would
997 * point to the shmem object backing a GEM object if TTM is used to back a
998 * GEM user interface.
999 * @sg: Scatter-gather table.
1000 * @resv: Pointer to a dma_resv, or NULL to let ttm allocate one.
1001 * @destroy: Destroy function. Use NULL for kfree().
1002 *
1003 * This function initializes a pre-allocated struct ttm_buffer_object.
1004 * As this object may be part of a larger structure, this function,
1005 * together with the @destroy function,
1006 * enables driver-specific objects derived from a ttm_buffer_object.
1007 *
1008 * On successful return, the caller owns an object kref to @bo. The kref and
1009 * list_kref are usually set to 1, but note that in some situations, other
1010 * tasks may already be holding references to @bo as well.
1011 *
1012 * If a failure occurs, the function will call the @destroy function, Thus,
1013 * after a failure, dereferencing @bo is illegal and will likely cause memory
1014 * corruption.
1015 *
1016 * Returns
1017 * -ENOMEM: Out of memory.
1018 * -EINVAL: Invalid placement flags.
1019 * -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources.
1020 */
ttm_bo_init_validate(struct ttm_device * bdev,struct ttm_buffer_object * bo,enum ttm_bo_type type,struct ttm_placement * placement,uint32_t alignment,bool interruptible,struct sg_table * sg,struct dma_resv * resv,void (* destroy)(struct ttm_buffer_object *))1021 int ttm_bo_init_validate(struct ttm_device *bdev, struct ttm_buffer_object *bo,
1022 enum ttm_bo_type type, struct ttm_placement *placement,
1023 uint32_t alignment, bool interruptible,
1024 struct sg_table *sg, struct dma_resv *resv,
1025 void (*destroy) (struct ttm_buffer_object *))
1026 {
1027 struct ttm_operation_ctx ctx = { interruptible, false };
1028 int ret;
1029
1030 ret = ttm_bo_init_reserved(bdev, bo, type, placement, alignment, &ctx,
1031 sg, resv, destroy);
1032 if (ret)
1033 return ret;
1034
1035 if (!resv)
1036 ttm_bo_unreserve(bo);
1037
1038 return 0;
1039 }
1040 EXPORT_SYMBOL(ttm_bo_init_validate);
1041
1042 /*
1043 * buffer object vm functions.
1044 */
1045
1046 /**
1047 * ttm_bo_unmap_virtual
1048 *
1049 * @bo: tear down the virtual mappings for this BO
1050 */
ttm_bo_unmap_virtual(struct ttm_buffer_object * bo)1051 void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
1052 {
1053 struct ttm_device *bdev = bo->bdev;
1054
1055 drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping);
1056 ttm_mem_io_free(bdev, bo->resource);
1057 }
1058 EXPORT_SYMBOL(ttm_bo_unmap_virtual);
1059
1060 /**
1061 * ttm_bo_wait_ctx - wait for buffer idle.
1062 *
1063 * @bo: The buffer object.
1064 * @ctx: defines how to wait
1065 *
1066 * Waits for the buffer to be idle. Used timeout depends on the context.
1067 * Returns -EBUSY if wait timed outt, -ERESTARTSYS if interrupted by a signal or
1068 * zero on success.
1069 */
ttm_bo_wait_ctx(struct ttm_buffer_object * bo,struct ttm_operation_ctx * ctx)1070 int ttm_bo_wait_ctx(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx)
1071 {
1072 long ret;
1073
1074 if (ctx->no_wait_gpu) {
1075 if (dma_resv_test_signaled(bo->base.resv,
1076 DMA_RESV_USAGE_BOOKKEEP))
1077 return 0;
1078 else
1079 return -EBUSY;
1080 }
1081
1082 ret = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP,
1083 ctx->interruptible, 15 * HZ);
1084 if (unlikely(ret < 0))
1085 return ret;
1086 if (unlikely(ret == 0))
1087 return -EBUSY;
1088 return 0;
1089 }
1090 EXPORT_SYMBOL(ttm_bo_wait_ctx);
1091
1092 /**
1093 * struct ttm_bo_swapout_walk - Parameters for the swapout walk
1094 */
1095 struct ttm_bo_swapout_walk {
1096 /** @walk: The walk base parameters. */
1097 struct ttm_lru_walk walk;
1098 /** @gfp_flags: The gfp flags to use for ttm_tt_swapout() */
1099 gfp_t gfp_flags;
1100 /** @hit_low: Whether we should attempt to swap BO's with low watermark threshold */
1101 /** @evict_low: If we cannot swap a bo when @try_low is false (first pass) */
1102 bool hit_low, evict_low;
1103 };
1104
1105 static s64
ttm_bo_swapout_cb(struct ttm_lru_walk * walk,struct ttm_buffer_object * bo)1106 ttm_bo_swapout_cb(struct ttm_lru_walk *walk, struct ttm_buffer_object *bo)
1107 {
1108 struct ttm_place place = {.mem_type = bo->resource->mem_type};
1109 struct ttm_bo_swapout_walk *swapout_walk =
1110 container_of(walk, typeof(*swapout_walk), walk);
1111 struct ttm_operation_ctx *ctx = walk->arg.ctx;
1112 s64 ret;
1113
1114 /*
1115 * While the bo may already reside in SYSTEM placement, set
1116 * SYSTEM as new placement to cover also the move further below.
1117 * The driver may use the fact that we're moving from SYSTEM
1118 * as an indication that we're about to swap out.
1119 */
1120 if (bo->pin_count || !bo->bdev->funcs->eviction_valuable(bo, &place)) {
1121 ret = -EBUSY;
1122 goto out;
1123 }
1124
1125 if (!bo->ttm || !ttm_tt_is_populated(bo->ttm) ||
1126 bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL ||
1127 bo->ttm->page_flags & TTM_TT_FLAG_SWAPPED) {
1128 ret = -EBUSY;
1129 goto out;
1130 }
1131
1132 if (bo->deleted) {
1133 pgoff_t num_pages = bo->ttm->num_pages;
1134
1135 ret = ttm_bo_wait_ctx(bo, ctx);
1136 if (ret)
1137 goto out;
1138
1139 ttm_bo_cleanup_memtype_use(bo);
1140 ret = num_pages;
1141 goto out;
1142 }
1143
1144 /*
1145 * Move to system cached
1146 */
1147 if (bo->resource->mem_type != TTM_PL_SYSTEM) {
1148 struct ttm_resource *evict_mem;
1149 struct ttm_place hop;
1150
1151 memset(&hop, 0, sizeof(hop));
1152 place.mem_type = TTM_PL_SYSTEM;
1153 ret = ttm_resource_alloc(bo, &place, &evict_mem, NULL);
1154 if (ret)
1155 goto out;
1156
1157 ret = ttm_bo_handle_move_mem(bo, evict_mem, true, ctx, &hop);
1158 if (ret) {
1159 WARN(ret == -EMULTIHOP,
1160 "Unexpected multihop in swapout - likely driver bug.\n");
1161 ttm_resource_free(bo, &evict_mem);
1162 goto out;
1163 }
1164 }
1165
1166 /*
1167 * Make sure BO is idle.
1168 */
1169 ret = ttm_bo_wait_ctx(bo, ctx);
1170 if (ret)
1171 goto out;
1172
1173 ttm_bo_unmap_virtual(bo);
1174 if (bo->bdev->funcs->swap_notify)
1175 bo->bdev->funcs->swap_notify(bo);
1176
1177 if (ttm_tt_is_populated(bo->ttm)) {
1178 spin_lock(&bo->bdev->lru_lock);
1179 ttm_resource_del_bulk_move(bo->resource, bo);
1180 spin_unlock(&bo->bdev->lru_lock);
1181
1182 ret = ttm_tt_swapout(bo->bdev, bo->ttm, swapout_walk->gfp_flags);
1183
1184 spin_lock(&bo->bdev->lru_lock);
1185 if (ret)
1186 ttm_resource_add_bulk_move(bo->resource, bo);
1187 ttm_resource_move_to_lru_tail(bo->resource);
1188 spin_unlock(&bo->bdev->lru_lock);
1189 }
1190
1191 out:
1192 /* Consider -ENOMEM and -ENOSPC non-fatal. */
1193 if (ret == -ENOMEM || ret == -ENOSPC)
1194 ret = -EBUSY;
1195
1196 return ret;
1197 }
1198
1199 const struct ttm_lru_walk_ops ttm_swap_ops = {
1200 .process_bo = ttm_bo_swapout_cb,
1201 };
1202
1203 /**
1204 * ttm_bo_swapout() - Swap out buffer objects on the LRU list to shmem.
1205 * @bdev: The ttm device.
1206 * @ctx: The ttm_operation_ctx governing the swapout operation.
1207 * @man: The resource manager whose resources / buffer objects are
1208 * goint to be swapped out.
1209 * @gfp_flags: The gfp flags used for shmem page allocations.
1210 * @target: The desired number of bytes to swap out.
1211 *
1212 * Return: The number of bytes actually swapped out, or negative error code
1213 * on error.
1214 */
ttm_bo_swapout(struct ttm_device * bdev,struct ttm_operation_ctx * ctx,struct ttm_resource_manager * man,gfp_t gfp_flags,s64 target)1215 s64 ttm_bo_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx,
1216 struct ttm_resource_manager *man, gfp_t gfp_flags,
1217 s64 target)
1218 {
1219 struct ttm_bo_swapout_walk swapout_walk = {
1220 .walk = {
1221 .ops = &ttm_swap_ops,
1222 .arg = {
1223 .ctx = ctx,
1224 .trylock_only = true,
1225 },
1226 },
1227 .gfp_flags = gfp_flags,
1228 };
1229
1230 return ttm_lru_walk_for_evict(&swapout_walk.walk, bdev, man, target);
1231 }
1232
ttm_bo_tt_destroy(struct ttm_buffer_object * bo)1233 void ttm_bo_tt_destroy(struct ttm_buffer_object *bo)
1234 {
1235 if (bo->ttm == NULL)
1236 return;
1237
1238 ttm_tt_unpopulate(bo->bdev, bo->ttm);
1239 ttm_tt_destroy(bo->bdev, bo->ttm);
1240 bo->ttm = NULL;
1241 }
1242
1243 /**
1244 * ttm_bo_populate() - Ensure that a buffer object has backing pages
1245 * @bo: The buffer object
1246 * @ctx: The ttm_operation_ctx governing the operation.
1247 *
1248 * For buffer objects in a memory type whose manager uses
1249 * struct ttm_tt for backing pages, ensure those backing pages
1250 * are present and with valid content. The bo's resource is also
1251 * placed on the correct LRU list if it was previously swapped
1252 * out.
1253 *
1254 * Return: 0 if successful, negative error code on failure.
1255 * Note: May return -EINTR or -ERESTARTSYS if @ctx::interruptible
1256 * is set to true.
1257 */
ttm_bo_populate(struct ttm_buffer_object * bo,struct ttm_operation_ctx * ctx)1258 int ttm_bo_populate(struct ttm_buffer_object *bo,
1259 struct ttm_operation_ctx *ctx)
1260 {
1261 struct ttm_tt *tt = bo->ttm;
1262 bool swapped;
1263 int ret;
1264
1265 dma_resv_assert_held(bo->base.resv);
1266
1267 if (!tt)
1268 return 0;
1269
1270 swapped = ttm_tt_is_swapped(tt);
1271 ret = ttm_tt_populate(bo->bdev, tt, ctx);
1272 if (ret)
1273 return ret;
1274
1275 if (swapped && !ttm_tt_is_swapped(tt) && !bo->pin_count &&
1276 bo->resource) {
1277 spin_lock(&bo->bdev->lru_lock);
1278 ttm_resource_add_bulk_move(bo->resource, bo);
1279 ttm_resource_move_to_lru_tail(bo->resource);
1280 spin_unlock(&bo->bdev->lru_lock);
1281 }
1282
1283 return 0;
1284 }
1285 EXPORT_SYMBOL(ttm_bo_populate);
1286