Lines Matching +full:single +full:- +full:tt
1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
4 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
29 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
48 if (mem->bus.offset || mem->bus.addr) in ttm_mem_io_reserve()
51 mem->bus.is_iomem = false; in ttm_mem_io_reserve()
52 if (!bdev->funcs->io_mem_reserve) in ttm_mem_io_reserve()
55 return bdev->funcs->io_mem_reserve(bdev, mem); in ttm_mem_io_reserve()
64 if (!mem->bus.offset && !mem->bus.addr) in ttm_mem_io_free()
67 if (bdev->funcs->io_mem_free) in ttm_mem_io_free()
68 bdev->funcs->io_mem_free(bdev, mem); in ttm_mem_io_free()
70 mem->bus.offset = 0; in ttm_mem_io_free()
71 mem->bus.addr = NULL; in ttm_mem_io_free()
75 * ttm_move_memcpy - Helper to perform a memcpy ttm move operation.
82 * dma-fence if desired.
89 const struct ttm_kmap_iter_ops *dst_ops = dst_iter->ops; in ttm_move_memcpy()
90 const struct ttm_kmap_iter_ops *src_ops = src_iter->ops; in ttm_move_memcpy()
94 /* Single TTM move. NOP */ in ttm_move_memcpy()
95 if (dst_ops->maps_tt && src_ops->maps_tt) in ttm_move_memcpy()
101 dst_ops->map_local(dst_iter, &dst_map, i); in ttm_move_memcpy()
106 if (dst_ops->unmap_local) in ttm_move_memcpy()
107 dst_ops->unmap_local(dst_iter, &dst_map); in ttm_move_memcpy()
113 dst_ops->map_local(dst_iter, &dst_map, i); in ttm_move_memcpy()
114 src_ops->map_local(src_iter, &src_map, i); in ttm_move_memcpy()
118 if (src_ops->unmap_local) in ttm_move_memcpy()
119 src_ops->unmap_local(src_iter, &src_map); in ttm_move_memcpy()
120 if (dst_ops->unmap_local) in ttm_move_memcpy()
121 dst_ops->unmap_local(dst_iter, &dst_map); in ttm_move_memcpy()
135 * free any old aperture space, and set (@new_mem)->mm_node to NULL,
136 * and update the (@bo)->mem placement flags. If unsuccessful, the old
146 struct ttm_device *bdev = bo->bdev; in ttm_bo_move_memcpy()
148 ttm_manager_type(bo->bdev, dst_mem->mem_type); in ttm_bo_move_memcpy()
149 struct ttm_tt *ttm = bo->ttm; in ttm_bo_move_memcpy()
150 struct ttm_resource *src_mem = bo->resource; in ttm_bo_move_memcpy()
153 struct ttm_kmap_iter_tt tt; in ttm_bo_move_memcpy() member
161 return -EINVAL; in ttm_bo_move_memcpy()
163 src_man = ttm_manager_type(bdev, src_mem->mem_type); in ttm_bo_move_memcpy()
164 if (ttm && ((ttm->page_flags & TTM_TT_FLAG_SWAPPED) || in ttm_bo_move_memcpy()
165 dst_man->use_tt)) { in ttm_bo_move_memcpy()
172 if (PTR_ERR(dst_iter) == -EINVAL && dst_man->use_tt) in ttm_bo_move_memcpy()
173 dst_iter = ttm_kmap_iter_tt_init(&_dst_iter.tt, bo->ttm); in ttm_bo_move_memcpy()
178 if (PTR_ERR(src_iter) == -EINVAL && src_man->use_tt) in ttm_bo_move_memcpy()
179 src_iter = ttm_kmap_iter_tt_init(&_src_iter.tt, bo->ttm); in ttm_bo_move_memcpy()
185 clear = src_iter->ops->maps_tt && (!ttm || !ttm_tt_is_populated(ttm)); in ttm_bo_move_memcpy()
186 if (!(clear && ttm && !(ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC))) in ttm_bo_move_memcpy()
187 ttm_move_memcpy(clear, PFN_UP(dst_mem->size), dst_iter, src_iter); in ttm_bo_move_memcpy()
189 if (!src_iter->ops->maps_tt) in ttm_bo_move_memcpy()
194 if (!dst_iter->ops->maps_tt) in ttm_bo_move_memcpy()
206 dma_resv_fini(&fbo->base.base._resv); in ttm_transfered_destroy()
207 ttm_bo_put(fbo->bo); in ttm_transfered_destroy()
234 return -ENOMEM; in ttm_buffer_object_transfer()
236 fbo->base = *bo; in ttm_buffer_object_transfer()
244 drm_vma_node_reset(&fbo->base.base.vma_node); in ttm_buffer_object_transfer()
246 kref_init(&fbo->base.kref); in ttm_buffer_object_transfer()
247 fbo->base.destroy = &ttm_transfered_destroy; in ttm_buffer_object_transfer()
248 fbo->base.pin_count = 0; in ttm_buffer_object_transfer()
249 if (bo->type != ttm_bo_type_sg) in ttm_buffer_object_transfer()
250 fbo->base.base.resv = &fbo->base.base._resv; in ttm_buffer_object_transfer()
252 dma_resv_init(&fbo->base.base._resv); in ttm_buffer_object_transfer()
253 fbo->base.base.dev = NULL; in ttm_buffer_object_transfer()
254 ret = dma_resv_trylock(&fbo->base.base._resv); in ttm_buffer_object_transfer()
257 if (fbo->base.resource) { in ttm_buffer_object_transfer()
258 ttm_resource_set_bo(fbo->base.resource, &fbo->base); in ttm_buffer_object_transfer()
259 bo->resource = NULL; in ttm_buffer_object_transfer()
260 ttm_bo_set_bulk_move(&fbo->base, NULL); in ttm_buffer_object_transfer()
262 fbo->base.bulk_move = NULL; in ttm_buffer_object_transfer()
265 ret = dma_resv_reserve_fences(&fbo->base.base._resv, 1); in ttm_buffer_object_transfer()
272 fbo->bo = bo; in ttm_buffer_object_transfer()
274 ttm_bo_move_to_lru_tail_unlocked(&fbo->base); in ttm_buffer_object_transfer()
276 *new_obj = &fbo->base; in ttm_buffer_object_transfer()
296 man = ttm_manager_type(bo->bdev, res->mem_type); in ttm_io_prot()
297 caching = man->use_tt ? bo->ttm->caching : res->bus.caching; in ttm_io_prot()
308 struct ttm_resource *mem = bo->resource; in ttm_bo_ioremap()
310 if (bo->resource->bus.addr) { in ttm_bo_ioremap()
311 map->bo_kmap_type = ttm_bo_map_premapped; in ttm_bo_ioremap()
312 map->virtual = ((u8 *)bo->resource->bus.addr) + offset; in ttm_bo_ioremap()
314 resource_size_t res = bo->resource->bus.offset + offset; in ttm_bo_ioremap()
316 map->bo_kmap_type = ttm_bo_map_iomap; in ttm_bo_ioremap()
317 if (mem->bus.caching == ttm_write_combined) in ttm_bo_ioremap()
318 map->virtual = ioremap_wc(res, size); in ttm_bo_ioremap()
320 else if (mem->bus.caching == ttm_cached) in ttm_bo_ioremap()
321 map->virtual = ioremap_cache(res, size); in ttm_bo_ioremap()
324 map->virtual = ioremap(res, size); in ttm_bo_ioremap()
326 return (!map->virtual) ? -ENOMEM : 0; in ttm_bo_ioremap()
334 struct ttm_resource *mem = bo->resource; in ttm_bo_kmap_ttm()
339 struct ttm_tt *ttm = bo->ttm; in ttm_bo_kmap_ttm()
345 ret = ttm_tt_populate(bo->bdev, ttm, &ctx); in ttm_bo_kmap_ttm()
349 if (num_pages == 1 && ttm->caching == ttm_cached) { in ttm_bo_kmap_ttm()
351 * We're mapping a single page, and the desired in ttm_bo_kmap_ttm()
355 map->bo_kmap_type = ttm_bo_map_kmap; in ttm_bo_kmap_ttm()
356 map->page = ttm->pages[start_page]; in ttm_bo_kmap_ttm()
357 map->virtual = kmap(map->page); in ttm_bo_kmap_ttm()
364 map->bo_kmap_type = ttm_bo_map_vmap; in ttm_bo_kmap_ttm()
365 map->virtual = vmap(ttm->pages + start_page, num_pages, in ttm_bo_kmap_ttm()
368 return (!map->virtual) ? -ENOMEM : 0; in ttm_bo_kmap_ttm()
384 * -ENOMEM: Out of memory.
385 * -EINVAL: Invalid range.
394 map->virtual = NULL; in ttm_bo_kmap()
395 map->bo = bo; in ttm_bo_kmap()
396 if (num_pages > PFN_UP(bo->resource->size)) in ttm_bo_kmap()
397 return -EINVAL; in ttm_bo_kmap()
398 if ((start_page + num_pages) > PFN_UP(bo->resource->size)) in ttm_bo_kmap()
399 return -EINVAL; in ttm_bo_kmap()
401 ret = ttm_mem_io_reserve(bo->bdev, bo->resource); in ttm_bo_kmap()
404 if (!bo->resource->bus.is_iomem) { in ttm_bo_kmap()
423 if (!map->virtual) in ttm_bo_kunmap()
425 switch (map->bo_kmap_type) { in ttm_bo_kunmap()
427 iounmap(map->virtual); in ttm_bo_kunmap()
430 vunmap(map->virtual); in ttm_bo_kunmap()
433 kunmap(map->page); in ttm_bo_kunmap()
440 ttm_mem_io_free(map->bo->bdev, map->bo->resource); in ttm_bo_kunmap()
441 map->virtual = NULL; in ttm_bo_kunmap()
442 map->page = NULL; in ttm_bo_kunmap()
457 * -ENOMEM: Out of memory.
458 * -EINVAL: Invalid range.
462 struct ttm_resource *mem = bo->resource; in ttm_bo_vmap()
465 dma_resv_assert_held(bo->base.resv); in ttm_bo_vmap()
467 ret = ttm_mem_io_reserve(bo->bdev, mem); in ttm_bo_vmap()
471 if (mem->bus.is_iomem) { in ttm_bo_vmap()
474 if (mem->bus.addr) in ttm_bo_vmap()
475 vaddr_iomem = (void __iomem *)mem->bus.addr; in ttm_bo_vmap()
476 else if (mem->bus.caching == ttm_write_combined) in ttm_bo_vmap()
477 vaddr_iomem = ioremap_wc(mem->bus.offset, in ttm_bo_vmap()
478 bo->base.size); in ttm_bo_vmap()
480 else if (mem->bus.caching == ttm_cached) in ttm_bo_vmap()
481 vaddr_iomem = ioremap_cache(mem->bus.offset, in ttm_bo_vmap()
482 bo->base.size); in ttm_bo_vmap()
485 vaddr_iomem = ioremap(mem->bus.offset, bo->base.size); in ttm_bo_vmap()
488 return -ENOMEM; in ttm_bo_vmap()
497 struct ttm_tt *ttm = bo->ttm; in ttm_bo_vmap()
501 ret = ttm_tt_populate(bo->bdev, ttm, &ctx); in ttm_bo_vmap()
510 vaddr = vmap(ttm->pages, ttm->num_pages, 0, prot); in ttm_bo_vmap()
512 return -ENOMEM; in ttm_bo_vmap()
531 struct ttm_resource *mem = bo->resource; in ttm_bo_vunmap()
533 dma_resv_assert_held(bo->base.resv); in ttm_bo_vunmap()
538 if (!map->is_iomem) in ttm_bo_vunmap()
539 vunmap(map->vaddr); in ttm_bo_vunmap()
540 else if (!mem->bus.addr) in ttm_bo_vunmap()
541 iounmap(map->vaddr_iomem); in ttm_bo_vunmap()
544 ttm_mem_io_free(bo->bdev, bo->resource); in ttm_bo_vunmap()
553 ret = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP, in ttm_bo_wait_free_node()
556 return -EBUSY; in ttm_bo_wait_free_node()
562 ttm_resource_free(bo, &bo->resource); in ttm_bo_wait_free_node()
585 dma_resv_add_fence(&ghost_obj->base._resv, fence, in ttm_bo_move_to_ghost()
595 ghost_obj->ttm = NULL; in ttm_bo_move_to_ghost()
597 bo->ttm = NULL; in ttm_bo_move_to_ghost()
599 dma_resv_unlock(&ghost_obj->base._resv); in ttm_bo_move_to_ghost()
607 struct ttm_device *bdev = bo->bdev; in ttm_bo_move_pipeline_evict()
610 from = ttm_manager_type(bdev, bo->resource->mem_type); in ttm_bo_move_pipeline_evict()
616 spin_lock(&from->move_lock); in ttm_bo_move_pipeline_evict()
617 if (!from->move || dma_fence_is_later(fence, from->move)) { in ttm_bo_move_pipeline_evict()
618 dma_fence_put(from->move); in ttm_bo_move_pipeline_evict()
619 from->move = dma_fence_get(fence); in ttm_bo_move_pipeline_evict()
621 spin_unlock(&from->move_lock); in ttm_bo_move_pipeline_evict()
623 ttm_resource_free(bo, &bo->resource); in ttm_bo_move_pipeline_evict()
627 * ttm_bo_move_accel_cleanup - cleanup helper for hw copies
648 struct ttm_device *bdev = bo->bdev; in ttm_bo_move_accel_cleanup()
649 struct ttm_resource_manager *from = ttm_manager_type(bdev, bo->resource->mem_type); in ttm_bo_move_accel_cleanup()
650 struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type); in ttm_bo_move_accel_cleanup()
653 dma_resv_add_fence(bo->base.resv, fence, DMA_RESV_USAGE_KERNEL); in ttm_bo_move_accel_cleanup()
655 ret = ttm_bo_move_to_ghost(bo, fence, man->use_tt); in ttm_bo_move_accel_cleanup()
656 else if (!from->use_tt && pipeline) in ttm_bo_move_accel_cleanup()
659 ret = ttm_bo_wait_free_node(bo, man->use_tt); in ttm_bo_move_accel_cleanup()
671 * ttm_bo_move_sync_cleanup - cleanup by waiting for the move to finish
682 struct ttm_device *bdev = bo->bdev; in ttm_bo_move_sync_cleanup()
683 struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type); in ttm_bo_move_sync_cleanup()
686 ret = ttm_bo_wait_free_node(bo, man->use_tt); in ttm_bo_move_sync_cleanup()
695 * ttm_bo_pipeline_gutting - purge the contents of a bo
712 if (dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP)) { in ttm_bo_pipeline_gutting()
713 if (!bo->ttm) { in ttm_bo_pipeline_gutting()
719 ttm_tt_unpopulate(bo->bdev, bo->ttm); in ttm_bo_pipeline_gutting()
720 if (bo->type == ttm_bo_type_device) in ttm_bo_pipeline_gutting()
721 ttm_tt_mark_for_clear(bo->ttm); in ttm_bo_pipeline_gutting()
723 ttm_resource_free(bo, &bo->resource); in ttm_bo_pipeline_gutting()
732 * to avoid leaking sensitive information to user-space. in ttm_bo_pipeline_gutting()
735 ttm = bo->ttm; in ttm_bo_pipeline_gutting()
736 bo->ttm = NULL; in ttm_bo_pipeline_gutting()
738 swap(bo->ttm, ttm); in ttm_bo_pipeline_gutting()
746 ret = dma_resv_copy_fences(&ghost->base._resv, bo->base.resv); in ttm_bo_pipeline_gutting()
749 dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP, in ttm_bo_pipeline_gutting()
753 dma_resv_unlock(&ghost->base._resv); in ttm_bo_pipeline_gutting()
755 bo->ttm = ttm; in ttm_bo_pipeline_gutting()
759 ttm_tt_destroy(bo->bdev, ttm); in ttm_bo_pipeline_gutting()