Lines Matching +full:io +full:- +full:backends
26 #include <linux/io-mapping.h>
27 #include <linux/iosys-map.h>
42 lockdep_assert_held(&cursor->man->bdev->lru_lock); in ttm_resource_cursor_clear_bulk()
44 cursor->bulk = NULL; in ttm_resource_cursor_clear_bulk()
45 list_del_init(&cursor->bulk_link); in ttm_resource_cursor_clear_bulk()
54 lockdep_assert_held(&cursor->man->bdev->lru_lock); in ttm_resource_cursor_move_bulk_tail()
56 if (WARN_ON_ONCE(bulk != cursor->bulk)) { in ttm_resource_cursor_move_bulk_tail()
57 list_del_init(&cursor->bulk_link); in ttm_resource_cursor_move_bulk_tail()
61 pos = &bulk->pos[cursor->mem_type][cursor->priority]; in ttm_resource_cursor_move_bulk_tail()
62 if (pos->last) in ttm_resource_cursor_move_bulk_tail()
63 list_move(&cursor->hitch.link, &pos->last->lru.link); in ttm_resource_cursor_move_bulk_tail()
72 list_for_each_entry_safe(cursor, next, &bulk->cursor_list, bulk_link) in ttm_bulk_move_adjust_cursors()
81 list_for_each_entry_safe(cursor, next, &bulk->cursor_list, bulk_link) in ttm_bulk_move_drop_cursors()
86 * ttm_resource_cursor_init() - Initialize a struct ttm_resource_cursor
95 cursor->priority = 0; in ttm_resource_cursor_init()
96 cursor->man = man; in ttm_resource_cursor_init()
97 ttm_lru_item_init(&cursor->hitch, TTM_LRU_HITCH); in ttm_resource_cursor_init()
98 INIT_LIST_HEAD(&cursor->bulk_link); in ttm_resource_cursor_init()
99 INIT_LIST_HEAD(&cursor->hitch.link); in ttm_resource_cursor_init()
103 * ttm_resource_cursor_fini() - Finalize the LRU list cursor usage
112 lockdep_assert_held(&cursor->man->bdev->lru_lock); in ttm_resource_cursor_fini()
113 list_del_init(&cursor->hitch.link); in ttm_resource_cursor_fini()
118 * ttm_lru_bulk_move_init - initialize a bulk move structure
126 INIT_LIST_HEAD(&bulk->cursor_list); in ttm_lru_bulk_move_init()
131 * ttm_lru_bulk_move_fini - finalize a bulk move structure
141 spin_lock(&bdev->lru_lock); in ttm_lru_bulk_move_fini()
143 spin_unlock(&bdev->lru_lock); in ttm_lru_bulk_move_fini()
148 * ttm_lru_bulk_move_tail - bulk move range of resources to the LRU tail.
162 struct ttm_lru_bulk_move_pos *pos = &bulk->pos[i][j]; in ttm_lru_bulk_move_tail()
165 if (!pos->first) in ttm_lru_bulk_move_tail()
168 lockdep_assert_held(&pos->first->bo->bdev->lru_lock); in ttm_lru_bulk_move_tail()
169 dma_resv_assert_held(pos->first->bo->base.resv); in ttm_lru_bulk_move_tail()
170 dma_resv_assert_held(pos->last->bo->base.resv); in ttm_lru_bulk_move_tail()
172 man = ttm_manager_type(pos->first->bo->bdev, i); in ttm_lru_bulk_move_tail()
173 list_bulk_move_tail(&man->lru[j], &pos->first->lru.link, in ttm_lru_bulk_move_tail()
174 &pos->last->lru.link); in ttm_lru_bulk_move_tail()
184 return &bulk->pos[res->mem_type][res->bo->priority]; in ttm_lru_bulk_move_pos()
187 /* Return the previous resource on the list (skip over non-resource list items) */
190 struct ttm_lru_item *lru = &cur->lru; in ttm_lru_prev_res()
199 /* Return the next resource on the list (skip over non-resource list items) */
202 struct ttm_lru_item *lru = &cur->lru; in ttm_lru_next_res()
215 if (pos->last != res) { in ttm_lru_bulk_move_pos_tail()
216 if (pos->first == res) in ttm_lru_bulk_move_pos_tail()
217 pos->first = ttm_lru_next_res(res); in ttm_lru_bulk_move_pos_tail()
218 list_move(&res->lru.link, &pos->last->lru.link); in ttm_lru_bulk_move_pos_tail()
219 pos->last = res; in ttm_lru_bulk_move_pos_tail()
229 if (!pos->first) { in ttm_lru_bulk_move_add()
230 pos->first = res; in ttm_lru_bulk_move_add()
231 pos->last = res; in ttm_lru_bulk_move_add()
233 WARN_ON(pos->first->bo->base.resv != res->bo->base.resv); in ttm_lru_bulk_move_add()
244 if (unlikely(WARN_ON(!pos->first || !pos->last) || in ttm_lru_bulk_move_del()
245 (pos->first == res && pos->last == res))) { in ttm_lru_bulk_move_del()
246 pos->first = NULL; in ttm_lru_bulk_move_del()
247 pos->last = NULL; in ttm_lru_bulk_move_del()
248 } else if (pos->first == res) { in ttm_lru_bulk_move_del()
249 pos->first = ttm_lru_next_res(res); in ttm_lru_bulk_move_del()
250 } else if (pos->last == res) { in ttm_lru_bulk_move_del()
251 pos->last = ttm_lru_prev_res(res); in ttm_lru_bulk_move_del()
253 list_move(&res->lru.link, &pos->last->lru.link); in ttm_lru_bulk_move_del()
263 * swapped-out bo to VRAM. in ttm_resource_is_swapped()
265 if (bo->resource != res || !bo->ttm) in ttm_resource_is_swapped()
268 dma_resv_assert_held(bo->base.resv); in ttm_resource_is_swapped()
269 return ttm_tt_is_swapped(bo->ttm); in ttm_resource_is_swapped()
274 return bo->pin_count || ttm_resource_is_swapped(res, bo); in ttm_resource_unevictable()
281 if (bo->bulk_move && !ttm_resource_unevictable(res, bo)) in ttm_resource_add_bulk_move()
282 ttm_lru_bulk_move_add(bo->bulk_move, res); in ttm_resource_add_bulk_move()
289 if (bo->bulk_move && !ttm_resource_unevictable(res, bo)) in ttm_resource_del_bulk_move()
290 ttm_lru_bulk_move_del(bo->bulk_move, res); in ttm_resource_del_bulk_move()
296 struct ttm_buffer_object *bo = res->bo; in ttm_resource_move_to_lru_tail()
297 struct ttm_device *bdev = bo->bdev; in ttm_resource_move_to_lru_tail()
299 lockdep_assert_held(&bo->bdev->lru_lock); in ttm_resource_move_to_lru_tail()
302 list_move_tail(&res->lru.link, &bdev->unevictable); in ttm_resource_move_to_lru_tail()
304 } else if (bo->bulk_move) { in ttm_resource_move_to_lru_tail()
306 ttm_lru_bulk_move_pos(bo->bulk_move, res); in ttm_resource_move_to_lru_tail()
312 man = ttm_manager_type(bdev, res->mem_type); in ttm_resource_move_to_lru_tail()
313 list_move_tail(&res->lru.link, &man->lru[bo->priority]); in ttm_resource_move_to_lru_tail()
318 * ttm_resource_init - resource object constructure
331 res->start = 0; in ttm_resource_init()
332 res->size = bo->base.size; in ttm_resource_init()
333 res->mem_type = place->mem_type; in ttm_resource_init()
334 res->placement = place->flags; in ttm_resource_init()
335 res->bus.addr = NULL; in ttm_resource_init()
336 res->bus.offset = 0; in ttm_resource_init()
337 res->bus.is_iomem = false; in ttm_resource_init()
338 res->bus.caching = ttm_cached; in ttm_resource_init()
339 res->bo = bo; in ttm_resource_init()
341 man = ttm_manager_type(bo->bdev, place->mem_type); in ttm_resource_init()
342 spin_lock(&bo->bdev->lru_lock); in ttm_resource_init()
344 list_add_tail(&res->lru.link, &bo->bdev->unevictable); in ttm_resource_init()
346 list_add_tail(&res->lru.link, &man->lru[bo->priority]); in ttm_resource_init()
347 man->usage += res->size; in ttm_resource_init()
348 spin_unlock(&bo->bdev->lru_lock); in ttm_resource_init()
353 * ttm_resource_fini - resource destructor
357 * Should be used by resource manager backends to clean up the TTM resource
365 struct ttm_device *bdev = man->bdev; in ttm_resource_fini()
367 spin_lock(&bdev->lru_lock); in ttm_resource_fini()
368 list_del_init(&res->lru.link); in ttm_resource_fini()
369 man->usage -= res->size; in ttm_resource_fini()
370 spin_unlock(&bdev->lru_lock); in ttm_resource_fini()
380 ttm_manager_type(bo->bdev, place->mem_type); in ttm_resource_alloc()
384 if (man->cg) { in ttm_resource_alloc()
385 ret = dmem_cgroup_try_charge(man->cg, bo->base.size, &pool, ret_limit_pool); in ttm_resource_alloc()
390 ret = man->func->alloc(man, bo, place, res_ptr); in ttm_resource_alloc()
393 dmem_cgroup_uncharge(pool, bo->base.size); in ttm_resource_alloc()
397 (*res_ptr)->css = pool; in ttm_resource_alloc()
399 spin_lock(&bo->bdev->lru_lock); in ttm_resource_alloc()
401 spin_unlock(&bo->bdev->lru_lock); in ttm_resource_alloc()
414 spin_lock(&bo->bdev->lru_lock); in ttm_resource_free()
416 spin_unlock(&bo->bdev->lru_lock); in ttm_resource_free()
418 pool = (*res)->css; in ttm_resource_free()
419 man = ttm_manager_type(bo->bdev, (*res)->mem_type); in ttm_resource_free()
420 man->func->free(man, *res); in ttm_resource_free()
422 if (man->cg) in ttm_resource_free()
423 dmem_cgroup_uncharge(pool, bo->base.size); in ttm_resource_free()
428 * ttm_resource_intersects - test for intersection
450 man = ttm_manager_type(bdev, res->mem_type); in ttm_resource_intersects()
451 if (!place || !man->func->intersects) in ttm_resource_intersects()
454 return man->func->intersects(man, res, place, size); in ttm_resource_intersects()
458 * ttm_resource_compatible - check if resource is compatible with placement
470 struct ttm_buffer_object *bo = res->bo; in ttm_resource_compatible()
471 struct ttm_device *bdev = bo->bdev; in ttm_resource_compatible()
474 if (res->placement & TTM_PL_FLAG_TEMPORARY) in ttm_resource_compatible()
477 for (i = 0; i < placement->num_placement; i++) { in ttm_resource_compatible()
478 const struct ttm_place *place = &placement->placement[i]; in ttm_resource_compatible()
481 if (res->mem_type != place->mem_type) in ttm_resource_compatible()
484 if (place->flags & (evicting ? TTM_PL_FLAG_DESIRED : in ttm_resource_compatible()
488 if (place->flags & TTM_PL_FLAG_CONTIGUOUS && in ttm_resource_compatible()
489 !(res->placement & TTM_PL_FLAG_CONTIGUOUS)) in ttm_resource_compatible()
492 man = ttm_manager_type(bdev, res->mem_type); in ttm_resource_compatible()
493 if (man->func->compatible && in ttm_resource_compatible()
494 !man->func->compatible(man, res, place, bo->base.size)) in ttm_resource_compatible()
505 spin_lock(&bo->bdev->lru_lock); in ttm_resource_set_bo()
506 res->bo = bo; in ttm_resource_set_bo()
507 spin_unlock(&bo->bdev->lru_lock); in ttm_resource_set_bo()
525 spin_lock_init(&man->move_lock); in ttm_resource_manager_init()
526 man->bdev = bdev; in ttm_resource_manager_init()
527 man->size = size; in ttm_resource_manager_init()
528 man->usage = 0; in ttm_resource_manager_init()
531 INIT_LIST_HEAD(&man->lru[i]); in ttm_resource_manager_init()
532 man->move = NULL; in ttm_resource_manager_init()
539 * @bdev - device to use
540 * @man - manager to use
561 spin_lock(&man->move_lock); in ttm_resource_manager_evict_all()
562 fence = dma_fence_get(man->move); in ttm_resource_manager_evict_all()
563 spin_unlock(&man->move_lock); in ttm_resource_manager_evict_all()
587 spin_lock(&man->bdev->lru_lock); in ttm_resource_manager_usage()
588 usage = man->usage; in ttm_resource_manager_usage()
589 spin_unlock(&man->bdev->lru_lock); in ttm_resource_manager_usage()
603 drm_printf(p, " use_type: %d\n", man->use_type); in ttm_resource_manager_debug()
604 drm_printf(p, " use_tt: %d\n", man->use_tt); in ttm_resource_manager_debug()
605 drm_printf(p, " size: %llu\n", man->size); in ttm_resource_manager_debug()
607 if (man->func->debug) in ttm_resource_manager_debug()
608 man->func->debug(man, p); in ttm_resource_manager_debug()
618 struct ttm_buffer_object *bo = next->bo; in ttm_resource_cursor_check_bulk()
620 lockdep_assert_held(&cursor->man->bdev->lru_lock); in ttm_resource_cursor_check_bulk()
621 bulk = bo->bulk_move; in ttm_resource_cursor_check_bulk()
623 if (cursor->bulk != bulk) { in ttm_resource_cursor_check_bulk()
625 list_move_tail(&cursor->bulk_link, &bulk->cursor_list); in ttm_resource_cursor_check_bulk()
626 cursor->mem_type = next->mem_type; in ttm_resource_cursor_check_bulk()
628 list_del_init(&cursor->bulk_link); in ttm_resource_cursor_check_bulk()
630 cursor->bulk = bulk; in ttm_resource_cursor_check_bulk()
635 * ttm_resource_manager_first() - Start iterating over the resources
647 struct ttm_resource_manager *man = cursor->man; in ttm_resource_manager_first()
652 lockdep_assert_held(&man->bdev->lru_lock); in ttm_resource_manager_first()
654 list_move(&cursor->hitch.link, &man->lru[cursor->priority]); in ttm_resource_manager_first()
659 * ttm_resource_manager_next() - Continue iterating over the resource manager
668 struct ttm_resource_manager *man = cursor->man; in ttm_resource_manager_next()
671 lockdep_assert_held(&man->bdev->lru_lock); in ttm_resource_manager_next()
674 lru = &cursor->hitch; in ttm_resource_manager_next()
675 list_for_each_entry_continue(lru, &man->lru[cursor->priority], link) { in ttm_resource_manager_next()
678 list_move(&cursor->hitch.link, &lru->link); in ttm_resource_manager_next()
683 if (++cursor->priority >= TTM_MAX_BO_PRIORITY) in ttm_resource_manager_next()
686 list_move(&cursor->hitch.link, &man->lru[cursor->priority]); in ttm_resource_manager_next()
694 * ttm_lru_first_res_or_null() - Return the first resource on an lru list
721 while (i >= iter_io->cache.end) { in ttm_kmap_iter_iomap_map_local()
722 iter_io->cache.sg = iter_io->cache.sg ? in ttm_kmap_iter_iomap_map_local()
723 sg_next(iter_io->cache.sg) : iter_io->st->sgl; in ttm_kmap_iter_iomap_map_local()
724 iter_io->cache.i = iter_io->cache.end; in ttm_kmap_iter_iomap_map_local()
725 iter_io->cache.end += sg_dma_len(iter_io->cache.sg) >> in ttm_kmap_iter_iomap_map_local()
727 iter_io->cache.offs = sg_dma_address(iter_io->cache.sg) - in ttm_kmap_iter_iomap_map_local()
728 iter_io->start; in ttm_kmap_iter_iomap_map_local()
731 if (i < iter_io->cache.i) { in ttm_kmap_iter_iomap_map_local()
732 iter_io->cache.end = 0; in ttm_kmap_iter_iomap_map_local()
733 iter_io->cache.sg = NULL; in ttm_kmap_iter_iomap_map_local()
737 addr = io_mapping_map_local_wc(iter_io->iomap, iter_io->cache.offs + in ttm_kmap_iter_iomap_map_local()
738 (((resource_size_t)i - iter_io->cache.i) in ttm_kmap_iter_iomap_map_local()
746 io_mapping_unmap_local(map->vaddr_iomem); in ttm_kmap_iter_iomap_unmap_local()
756 * ttm_kmap_iter_iomap_init - Initialize a struct ttm_kmap_iter_iomap
762 * sg_dma_address(st->sgl) - @start == 0 for @iomap start.
772 iter_io->base.ops = &ttm_kmap_iter_io_ops; in ttm_kmap_iter_iomap_init()
773 iter_io->iomap = iomap; in ttm_kmap_iter_iomap_init()
774 iter_io->st = st; in ttm_kmap_iter_iomap_init()
775 iter_io->start = start; in ttm_kmap_iter_iomap_init()
776 memset(&iter_io->cache, 0, sizeof(iter_io->cache)); in ttm_kmap_iter_iomap_init()
778 return &iter_io->base; in ttm_kmap_iter_iomap_init()
783 * DOC: Linear io iterator
786 * make io-mapping use memremap for all io memory, and have memremap
788 * code. These linear io iterators are implemented to mimic old functionality,
790 * friends, and at least on 32-bit they add global TLB flushes and points
801 *dmap = iter_io->dmap; in ttm_kmap_iter_linear_io_map_local()
811 * ttm_kmap_iter_linear_io_init - Initialize an iterator for linear io memory
817 * pointing at a linear chunk of io memory.
832 if (!mem->bus.is_iomem) { in ttm_kmap_iter_linear_io_init()
833 ret = -EINVAL; in ttm_kmap_iter_linear_io_init()
837 if (mem->bus.addr) { in ttm_kmap_iter_linear_io_init()
838 iosys_map_set_vaddr(&iter_io->dmap, mem->bus.addr); in ttm_kmap_iter_linear_io_init()
839 iter_io->needs_unmap = false; in ttm_kmap_iter_linear_io_init()
841 iter_io->needs_unmap = true; in ttm_kmap_iter_linear_io_init()
842 memset(&iter_io->dmap, 0, sizeof(iter_io->dmap)); in ttm_kmap_iter_linear_io_init()
843 if (mem->bus.caching == ttm_write_combined) in ttm_kmap_iter_linear_io_init()
844 iosys_map_set_vaddr_iomem(&iter_io->dmap, in ttm_kmap_iter_linear_io_init()
845 ioremap_wc(mem->bus.offset, in ttm_kmap_iter_linear_io_init()
846 mem->size)); in ttm_kmap_iter_linear_io_init()
847 else if (mem->bus.caching == ttm_cached) in ttm_kmap_iter_linear_io_init()
848 iosys_map_set_vaddr(&iter_io->dmap, in ttm_kmap_iter_linear_io_init()
849 memremap(mem->bus.offset, mem->size, in ttm_kmap_iter_linear_io_init()
855 if (iosys_map_is_null(&iter_io->dmap)) in ttm_kmap_iter_linear_io_init()
856 iosys_map_set_vaddr_iomem(&iter_io->dmap, in ttm_kmap_iter_linear_io_init()
857 ioremap(mem->bus.offset, in ttm_kmap_iter_linear_io_init()
858 mem->size)); in ttm_kmap_iter_linear_io_init()
860 if (iosys_map_is_null(&iter_io->dmap)) { in ttm_kmap_iter_linear_io_init()
861 ret = -ENOMEM; in ttm_kmap_iter_linear_io_init()
866 iter_io->base.ops = &ttm_kmap_iter_linear_io_ops; in ttm_kmap_iter_linear_io_init()
867 return &iter_io->base; in ttm_kmap_iter_linear_io_init()
876 * ttm_kmap_iter_linear_io_fini - Clean up an iterator for linear io memory
889 if (iter_io->needs_unmap && iosys_map_is_set(&iter_io->dmap)) { in ttm_kmap_iter_linear_io_fini()
890 if (iter_io->dmap.is_iomem) in ttm_kmap_iter_linear_io_fini()
891 iounmap(iter_io->dmap.vaddr_iomem); in ttm_kmap_iter_linear_io_fini()
893 memunmap(iter_io->dmap.vaddr); in ttm_kmap_iter_linear_io_fini()
904 (struct ttm_resource_manager *)m->private; in ttm_resource_manager_show()
914 * ttm_resource_manager_create_debugfs - Create debugfs entry for specified