Lines Matching +full:gfx +full:- +full:mem
29 #include <linux/dma-fence-array.h>
32 #include <linux/dma-buf.h>
68 * Each GPUVM is represented by a 1-2 or 1-5 level page table, depending
89 #define START(node) ((node)->start)
90 #define LAST(node) ((node)->last)
99 * struct amdgpu_prt_cb - Helper to disable partial resident texture feature from a fence callback
115 * struct amdgpu_vm_tlb_seq_struct - Helper to increment the TLB flush sequence
130 * amdgpu_vm_set_pasid - manage pasid and vm ptr mapping
145 if (vm->pasid == pasid) in amdgpu_vm_set_pasid()
148 if (vm->pasid) { in amdgpu_vm_set_pasid()
149 r = xa_err(xa_erase_irq(&adev->vm_manager.pasids, vm->pasid)); in amdgpu_vm_set_pasid()
153 vm->pasid = 0; in amdgpu_vm_set_pasid()
157 r = xa_err(xa_store_irq(&adev->vm_manager.pasids, pasid, vm, in amdgpu_vm_set_pasid()
162 vm->pasid = pasid; in amdgpu_vm_set_pasid()
170 * amdgpu_vm_bo_evicted - vm_bo is evicted
179 struct amdgpu_vm *vm = vm_bo->vm; in amdgpu_vm_bo_evicted()
180 struct amdgpu_bo *bo = vm_bo->bo; in amdgpu_vm_bo_evicted()
182 vm_bo->moved = true; in amdgpu_vm_bo_evicted()
183 spin_lock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_evicted()
184 if (bo->tbo.type == ttm_bo_type_kernel) in amdgpu_vm_bo_evicted()
185 list_move(&vm_bo->vm_status, &vm->evicted); in amdgpu_vm_bo_evicted()
187 list_move_tail(&vm_bo->vm_status, &vm->evicted); in amdgpu_vm_bo_evicted()
188 spin_unlock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_evicted()
191 * amdgpu_vm_bo_moved - vm_bo is moved
200 spin_lock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_moved()
201 list_move(&vm_bo->vm_status, &vm_bo->vm->moved); in amdgpu_vm_bo_moved()
202 spin_unlock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_moved()
206 * amdgpu_vm_bo_idle - vm_bo is idle
215 spin_lock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_idle()
216 list_move(&vm_bo->vm_status, &vm_bo->vm->idle); in amdgpu_vm_bo_idle()
217 spin_unlock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_idle()
218 vm_bo->moved = false; in amdgpu_vm_bo_idle()
222 * amdgpu_vm_bo_invalidated - vm_bo is invalidated
231 spin_lock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_invalidated()
232 list_move(&vm_bo->vm_status, &vm_bo->vm->invalidated); in amdgpu_vm_bo_invalidated()
233 spin_unlock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_invalidated()
237 * amdgpu_vm_bo_relocated - vm_bo is reloacted
246 if (vm_bo->bo->parent) { in amdgpu_vm_bo_relocated()
247 spin_lock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_relocated()
248 list_move(&vm_bo->vm_status, &vm_bo->vm->relocated); in amdgpu_vm_bo_relocated()
249 spin_unlock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_relocated()
256 * amdgpu_vm_bo_done - vm_bo is done
265 spin_lock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_done()
266 list_move(&vm_bo->vm_status, &vm_bo->vm->done); in amdgpu_vm_bo_done()
267 spin_unlock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_done()
271 * amdgpu_vm_bo_reset_state_machine - reset the vm_bo state machine
281 spin_lock(&vm->status_lock); in amdgpu_vm_bo_reset_state_machine()
282 list_splice_init(&vm->done, &vm->invalidated); in amdgpu_vm_bo_reset_state_machine()
283 list_for_each_entry(vm_bo, &vm->invalidated, vm_status) in amdgpu_vm_bo_reset_state_machine()
284 vm_bo->moved = true; in amdgpu_vm_bo_reset_state_machine()
285 list_for_each_entry_safe(vm_bo, tmp, &vm->idle, vm_status) { in amdgpu_vm_bo_reset_state_machine()
286 struct amdgpu_bo *bo = vm_bo->bo; in amdgpu_vm_bo_reset_state_machine()
288 vm_bo->moved = true; in amdgpu_vm_bo_reset_state_machine()
289 if (!bo || bo->tbo.type != ttm_bo_type_kernel) in amdgpu_vm_bo_reset_state_machine()
290 list_move(&vm_bo->vm_status, &vm_bo->vm->moved); in amdgpu_vm_bo_reset_state_machine()
291 else if (bo->parent) in amdgpu_vm_bo_reset_state_machine()
292 list_move(&vm_bo->vm_status, &vm_bo->vm->relocated); in amdgpu_vm_bo_reset_state_machine()
294 spin_unlock(&vm->status_lock); in amdgpu_vm_bo_reset_state_machine()
298 * amdgpu_vm_bo_base_init - Adds bo to the list of bos associated with the vm
310 base->vm = vm; in amdgpu_vm_bo_base_init()
311 base->bo = bo; in amdgpu_vm_bo_base_init()
312 base->next = NULL; in amdgpu_vm_bo_base_init()
313 INIT_LIST_HEAD(&base->vm_status); in amdgpu_vm_bo_base_init()
317 base->next = bo->vm_bo; in amdgpu_vm_bo_base_init()
318 bo->vm_bo = base; in amdgpu_vm_bo_base_init()
320 if (bo->tbo.base.resv != vm->root.bo->tbo.base.resv) in amdgpu_vm_bo_base_init()
323 dma_resv_assert_held(vm->root.bo->tbo.base.resv); in amdgpu_vm_bo_base_init()
325 ttm_bo_set_bulk_move(&bo->tbo, &vm->lru_bulk_move); in amdgpu_vm_bo_base_init()
326 if (bo->tbo.type == ttm_bo_type_kernel && bo->parent) in amdgpu_vm_bo_base_init()
331 if (bo->preferred_domains & in amdgpu_vm_bo_base_init()
332 amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type)) in amdgpu_vm_bo_base_init()
344 * amdgpu_vm_lock_pd - lock PD in drm_exec
356 return drm_exec_prepare_obj(exec, &vm->root.bo->tbo.base, in amdgpu_vm_lock_pd()
361 * amdgpu_vm_move_to_lru_tail - move all BOs to the end of LRU
372 spin_lock(&adev->mman.bdev.lru_lock); in amdgpu_vm_move_to_lru_tail()
373 ttm_lru_bulk_move_tail(&vm->lru_bulk_move); in amdgpu_vm_move_to_lru_tail()
374 spin_unlock(&adev->mman.bdev.lru_lock); in amdgpu_vm_move_to_lru_tail()
383 r = drm_sched_entity_init(&vm->immediate, DRM_SCHED_PRIORITY_NORMAL, in amdgpu_vm_init_entities()
384 adev->vm_manager.vm_pte_scheds, in amdgpu_vm_init_entities()
385 adev->vm_manager.vm_pte_num_scheds, NULL); in amdgpu_vm_init_entities()
389 return drm_sched_entity_init(&vm->delayed, DRM_SCHED_PRIORITY_NORMAL, in amdgpu_vm_init_entities()
390 adev->vm_manager.vm_pte_scheds, in amdgpu_vm_init_entities()
391 adev->vm_manager.vm_pte_num_scheds, NULL); in amdgpu_vm_init_entities()
394 drm_sched_entity_destroy(&vm->immediate); in amdgpu_vm_init_entities()
401 drm_sched_entity_destroy(&vm->immediate); in amdgpu_vm_fini_entities()
402 drm_sched_entity_destroy(&vm->delayed); in amdgpu_vm_fini_entities()
406 * amdgpu_vm_generation - return the page table re-generation counter
410 * Returns a page table re-generation token to allow checking if submissions
416 uint64_t result = (u64)atomic_read(&adev->vram_lost_counter) << 32; in amdgpu_vm_generation()
421 result += vm->generation; in amdgpu_vm_generation()
422 /* Add one if the page tables will be re-generated on next CS */ in amdgpu_vm_generation()
423 if (drm_sched_entity_error(&vm->delayed)) in amdgpu_vm_generation()
430 * amdgpu_vm_validate_pt_bos - validate the page table BOs
451 if (drm_sched_entity_error(&vm->delayed)) { in amdgpu_vm_validate_pt_bos()
452 ++vm->generation; in amdgpu_vm_validate_pt_bos()
460 spin_lock(&vm->status_lock); in amdgpu_vm_validate_pt_bos()
461 while (!list_empty(&vm->evicted)) { in amdgpu_vm_validate_pt_bos()
462 bo_base = list_first_entry(&vm->evicted, in amdgpu_vm_validate_pt_bos()
465 spin_unlock(&vm->status_lock); in amdgpu_vm_validate_pt_bos()
467 bo = bo_base->bo; in amdgpu_vm_validate_pt_bos()
479 if (bo->tbo.type != ttm_bo_type_kernel) { in amdgpu_vm_validate_pt_bos()
482 vm->update_funcs->map_table(to_amdgpu_bo_vm(bo)); in amdgpu_vm_validate_pt_bos()
485 spin_lock(&vm->status_lock); in amdgpu_vm_validate_pt_bos()
487 spin_unlock(&vm->status_lock); in amdgpu_vm_validate_pt_bos()
490 vm->evicting = false; in amdgpu_vm_validate_pt_bos()
497 * amdgpu_vm_ready - check VM is ready for updates
512 ret = !vm->evicting; in amdgpu_vm_ready()
515 spin_lock(&vm->status_lock); in amdgpu_vm_ready()
516 empty = list_empty(&vm->evicted); in amdgpu_vm_ready()
517 spin_unlock(&vm->status_lock); in amdgpu_vm_ready()
523 * amdgpu_vm_check_compute_bug - check whether asic has compute vm bug
538 /* Compute has a VM bug for GFX version < 7. in amdgpu_vm_check_compute_bug()
539 Compute has a VM bug for GFX 8 MEC firmware version < 673.*/ in amdgpu_vm_check_compute_bug()
540 if (ip_block->version->major <= 7) in amdgpu_vm_check_compute_bug()
542 else if (ip_block->version->major == 8) in amdgpu_vm_check_compute_bug()
543 if (adev->gfx.mec_fw_version < 673) in amdgpu_vm_check_compute_bug()
547 for (i = 0; i < adev->num_rings; i++) { in amdgpu_vm_check_compute_bug()
548 ring = adev->rings[i]; in amdgpu_vm_check_compute_bug()
549 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) in amdgpu_vm_check_compute_bug()
551 ring->has_compute_vm_bug = has_compute_vm_bug; in amdgpu_vm_check_compute_bug()
553 ring->has_compute_vm_bug = false; in amdgpu_vm_check_compute_bug()
558 * amdgpu_vm_need_pipeline_sync - Check if pipe sync is needed for job.
569 struct amdgpu_device *adev = ring->adev; in amdgpu_vm_need_pipeline_sync()
570 unsigned vmhub = ring->vm_hub; in amdgpu_vm_need_pipeline_sync()
571 struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; in amdgpu_vm_need_pipeline_sync()
573 if (job->vmid == 0) in amdgpu_vm_need_pipeline_sync()
576 if (job->vm_needs_flush || ring->has_compute_vm_bug) in amdgpu_vm_need_pipeline_sync()
579 if (ring->funcs->emit_gds_switch && job->gds_switch_needed) in amdgpu_vm_need_pipeline_sync()
582 if (amdgpu_vmid_had_gpu_reset(adev, &id_mgr->ids[job->vmid])) in amdgpu_vm_need_pipeline_sync()
589 * amdgpu_vm_flush - hardware flush the vm
603 struct amdgpu_device *adev = ring->adev; in amdgpu_vm_flush()
604 unsigned vmhub = ring->vm_hub; in amdgpu_vm_flush()
605 struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; in amdgpu_vm_flush()
606 struct amdgpu_vmid *id = &id_mgr->ids[job->vmid]; in amdgpu_vm_flush()
607 bool spm_update_needed = job->spm_update_needed; in amdgpu_vm_flush()
608 bool gds_switch_needed = ring->funcs->emit_gds_switch && in amdgpu_vm_flush()
609 job->gds_switch_needed; in amdgpu_vm_flush()
610 bool vm_flush_needed = job->vm_needs_flush; in amdgpu_vm_flush()
623 mutex_lock(&id_mgr->lock); in amdgpu_vm_flush()
624 if (id->pasid != job->pasid || !id->pasid_mapping || in amdgpu_vm_flush()
625 !dma_fence_is_signaled(id->pasid_mapping)) in amdgpu_vm_flush()
627 mutex_unlock(&id_mgr->lock); in amdgpu_vm_flush()
629 gds_switch_needed &= !!ring->funcs->emit_gds_switch; in amdgpu_vm_flush()
630 vm_flush_needed &= !!ring->funcs->emit_vm_flush && in amdgpu_vm_flush()
631 job->vm_pd_addr != AMDGPU_BO_INVALID_OFFSET; in amdgpu_vm_flush()
632 pasid_mapping_needed &= adev->gmc.gmc_funcs->emit_pasid_mapping && in amdgpu_vm_flush()
633 ring->funcs->emit_wreg; in amdgpu_vm_flush()
639 if (ring->funcs->init_cond_exec) in amdgpu_vm_flush()
646 trace_amdgpu_vm_flush(ring, job->vmid, job->vm_pd_addr); in amdgpu_vm_flush()
647 amdgpu_ring_emit_vm_flush(ring, job->vmid, job->vm_pd_addr); in amdgpu_vm_flush()
651 amdgpu_gmc_emit_pasid_mapping(ring, job->vmid, job->pasid); in amdgpu_vm_flush()
653 if (spm_update_needed && adev->gfx.rlc.funcs->update_spm_vmid) in amdgpu_vm_flush()
654 adev->gfx.rlc.funcs->update_spm_vmid(adev, job->vmid); in amdgpu_vm_flush()
656 if (!ring->is_mes_queue && ring->funcs->emit_gds_switch && in amdgpu_vm_flush()
658 amdgpu_ring_emit_gds_switch(ring, job->vmid, job->gds_base, in amdgpu_vm_flush()
659 job->gds_size, job->gws_base, in amdgpu_vm_flush()
660 job->gws_size, job->oa_base, in amdgpu_vm_flush()
661 job->oa_size); in amdgpu_vm_flush()
671 mutex_lock(&id_mgr->lock); in amdgpu_vm_flush()
672 dma_fence_put(id->last_flush); in amdgpu_vm_flush()
673 id->last_flush = dma_fence_get(fence); in amdgpu_vm_flush()
674 id->current_gpu_reset_count = in amdgpu_vm_flush()
675 atomic_read(&adev->gpu_reset_counter); in amdgpu_vm_flush()
676 mutex_unlock(&id_mgr->lock); in amdgpu_vm_flush()
680 mutex_lock(&id_mgr->lock); in amdgpu_vm_flush()
681 id->pasid = job->pasid; in amdgpu_vm_flush()
682 dma_fence_put(id->pasid_mapping); in amdgpu_vm_flush()
683 id->pasid_mapping = dma_fence_get(fence); in amdgpu_vm_flush()
684 mutex_unlock(&id_mgr->lock); in amdgpu_vm_flush()
688 if (ring->funcs->patch_cond_exec) in amdgpu_vm_flush()
692 if (ring->funcs->emit_switch_buffer) { in amdgpu_vm_flush()
701 * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
720 for (base = bo->vm_bo; base; base = base->next) { in amdgpu_vm_bo_find()
721 if (base->vm != vm) in amdgpu_vm_bo_find()
730 * amdgpu_vm_map_gart - Resolve gart mapping of addr
757 * amdgpu_vm_update_pdes - make sure that all directories are valid
777 spin_lock(&vm->status_lock); in amdgpu_vm_update_pdes()
778 list_splice_init(&vm->relocated, &relocated); in amdgpu_vm_update_pdes()
779 spin_unlock(&vm->status_lock); in amdgpu_vm_update_pdes()
785 return -ENODEV; in amdgpu_vm_update_pdes()
792 r = vm->update_funcs->prepare(¶ms, NULL, AMDGPU_SYNC_EXPLICIT); in amdgpu_vm_update_pdes()
798 flush_tlb_needed |= entry->moved; in amdgpu_vm_update_pdes()
805 r = vm->update_funcs->commit(¶ms, &vm->last_update); in amdgpu_vm_update_pdes()
810 atomic64_inc(&vm->tlb_seq); in amdgpu_vm_update_pdes()
824 * amdgpu_vm_tlb_seq_cb - make sure to increment tlb sequence
836 atomic64_inc(&tlb_cb->vm->tlb_seq); in amdgpu_vm_tlb_seq_cb()
841 * amdgpu_vm_update_range - update a range in the vm page table
878 return -ENODEV; in amdgpu_vm_update_range()
882 r = -ENOMEM; in amdgpu_vm_update_range()
887 * heavy-weight flush TLB unconditionally. in amdgpu_vm_update_range()
889 flush_tlb |= adev->gmc.xgmi.num_physical_nodes && in amdgpu_vm_update_range()
914 if (vm->evicting) { in amdgpu_vm_update_range()
915 r = -EBUSY; in amdgpu_vm_update_range()
919 if (!unlocked && !dma_fence_is_signaled(vm->last_unlocked)) { in amdgpu_vm_update_range()
922 amdgpu_bo_fence(vm->root.bo, vm->last_unlocked, true); in amdgpu_vm_update_range()
923 swap(vm->last_unlocked, tmp); in amdgpu_vm_update_range()
927 r = vm->update_funcs->prepare(¶ms, resv, sync_mode); in amdgpu_vm_update_range()
932 (last - start + 1) * AMDGPU_GPU_PAGE_SIZE, &cursor); in amdgpu_vm_update_range()
953 pages_addr[idx - 1] + PAGE_SIZE)) in amdgpu_vm_update_range()
957 count--; in amdgpu_vm_update_range()
985 r = vm->update_funcs->commit(¶ms, fence); in amdgpu_vm_update_range()
988 tlb_cb->vm = vm; in amdgpu_vm_update_range()
990 !dma_fence_add_callback(*fence, &tlb_cb->cb, in amdgpu_vm_update_range()
992 dma_fence_put(vm->last_tlb_flush); in amdgpu_vm_update_range()
993 vm->last_tlb_flush = dma_fence_get(*fence); in amdgpu_vm_update_range()
995 amdgpu_vm_tlb_seq_cb(NULL, &tlb_cb->cb); in amdgpu_vm_update_range()
1012 struct amdgpu_vm *vm = bo_va->base.vm; in amdgpu_vm_bo_get_memory()
1013 struct amdgpu_bo *bo = bo_va->base.bo; in amdgpu_vm_bo_get_memory()
1022 if (bo->tbo.base.resv != vm->root.bo->tbo.base.resv && in amdgpu_vm_bo_get_memory()
1023 !dma_resv_trylock(bo->tbo.base.resv)) in amdgpu_vm_bo_get_memory()
1027 if (bo->tbo.base.resv != vm->root.bo->tbo.base.resv) in amdgpu_vm_bo_get_memory()
1028 dma_resv_unlock(bo->tbo.base.resv); in amdgpu_vm_bo_get_memory()
1036 spin_lock(&vm->status_lock); in amdgpu_vm_get_memory()
1037 list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status) in amdgpu_vm_get_memory()
1040 list_for_each_entry_safe(bo_va, tmp, &vm->evicted, base.vm_status) in amdgpu_vm_get_memory()
1043 list_for_each_entry_safe(bo_va, tmp, &vm->relocated, base.vm_status) in amdgpu_vm_get_memory()
1046 list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) in amdgpu_vm_get_memory()
1049 list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status) in amdgpu_vm_get_memory()
1052 list_for_each_entry_safe(bo_va, tmp, &vm->done, base.vm_status) in amdgpu_vm_get_memory()
1054 spin_unlock(&vm->status_lock); in amdgpu_vm_get_memory()
1058 * amdgpu_vm_bo_update - update all BO mappings in the vm page table
1067 * 0 for success, -EINVAL for failure.
1072 struct amdgpu_bo *bo = bo_va->base.bo; in amdgpu_vm_bo_update()
1073 struct amdgpu_vm *vm = bo_va->base.vm; in amdgpu_vm_bo_update()
1076 struct ttm_resource *mem; in amdgpu_vm_bo_update() local
1086 mem = NULL; in amdgpu_vm_bo_update()
1087 resv = vm->root.bo->tbo.base.resv; in amdgpu_vm_bo_update()
1089 struct drm_gem_object *obj = &bo->tbo.base; in amdgpu_vm_bo_update()
1091 resv = bo->tbo.base.resv; in amdgpu_vm_bo_update()
1092 if (obj->import_attach && bo_va->is_xgmi) { in amdgpu_vm_bo_update()
1093 struct dma_buf *dma_buf = obj->import_attach->dmabuf; in amdgpu_vm_bo_update()
1094 struct drm_gem_object *gobj = dma_buf->priv; in amdgpu_vm_bo_update()
1097 if (abo->tbo.resource && in amdgpu_vm_bo_update()
1098 abo->tbo.resource->mem_type == TTM_PL_VRAM) in amdgpu_vm_bo_update()
1101 mem = bo->tbo.resource; in amdgpu_vm_bo_update()
1102 if (mem && (mem->mem_type == TTM_PL_TT || in amdgpu_vm_bo_update()
1103 mem->mem_type == AMDGPU_PL_PREEMPT)) in amdgpu_vm_bo_update()
1104 pages_addr = bo->tbo.ttm->dma_address; in amdgpu_vm_bo_update()
1110 flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem); in amdgpu_vm_bo_update()
1115 bo_adev = amdgpu_ttm_adev(bo->tbo.bdev); in amdgpu_vm_bo_update()
1116 vram_base = bo_adev->vm_manager.vram_base_offset; in amdgpu_vm_bo_update()
1117 uncached = (bo->flags & AMDGPU_GEM_CREATE_UNCACHED) != 0; in amdgpu_vm_bo_update()
1124 if (clear || (bo && bo->tbo.base.resv == in amdgpu_vm_bo_update()
1125 vm->root.bo->tbo.base.resv)) in amdgpu_vm_bo_update()
1126 last_update = &vm->last_update; in amdgpu_vm_bo_update()
1128 last_update = &bo_va->last_pt_update; in amdgpu_vm_bo_update()
1130 if (!clear && bo_va->base.moved) { in amdgpu_vm_bo_update()
1132 list_splice_init(&bo_va->valids, &bo_va->invalids); in amdgpu_vm_bo_update()
1134 } else if (bo_va->cleared != clear) { in amdgpu_vm_bo_update()
1135 list_splice_init(&bo_va->valids, &bo_va->invalids); in amdgpu_vm_bo_update()
1138 list_for_each_entry(mapping, &bo_va->invalids, list) { in amdgpu_vm_bo_update()
1141 /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here in amdgpu_vm_bo_update()
1144 if (!(mapping->flags & AMDGPU_PTE_READABLE)) in amdgpu_vm_bo_update()
1146 if (!(mapping->flags & AMDGPU_PTE_WRITEABLE)) in amdgpu_vm_bo_update()
1155 !uncached, resv, mapping->start, mapping->last, in amdgpu_vm_bo_update()
1156 update_flags, mapping->offset, in amdgpu_vm_bo_update()
1157 vram_base, mem, pages_addr, in amdgpu_vm_bo_update()
1167 if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv) { in amdgpu_vm_bo_update()
1168 uint32_t mem_type = bo->tbo.resource->mem_type; in amdgpu_vm_bo_update()
1170 if (!(bo->preferred_domains & in amdgpu_vm_bo_update()
1172 amdgpu_vm_bo_evicted(&bo_va->base); in amdgpu_vm_bo_update()
1174 amdgpu_vm_bo_idle(&bo_va->base); in amdgpu_vm_bo_update()
1176 amdgpu_vm_bo_done(&bo_va->base); in amdgpu_vm_bo_update()
1179 list_splice_init(&bo_va->invalids, &bo_va->valids); in amdgpu_vm_bo_update()
1180 bo_va->cleared = clear; in amdgpu_vm_bo_update()
1181 bo_va->base.moved = false; in amdgpu_vm_bo_update()
1184 list_for_each_entry(mapping, &bo_va->valids, list) in amdgpu_vm_bo_update()
1192 * amdgpu_vm_update_prt_state - update the global PRT state
1201 spin_lock_irqsave(&adev->vm_manager.prt_lock, flags); in amdgpu_vm_update_prt_state()
1202 enable = !!atomic_read(&adev->vm_manager.num_prt_users); in amdgpu_vm_update_prt_state()
1203 adev->gmc.gmc_funcs->set_prt(adev, enable); in amdgpu_vm_update_prt_state()
1204 spin_unlock_irqrestore(&adev->vm_manager.prt_lock, flags); in amdgpu_vm_update_prt_state()
1208 * amdgpu_vm_prt_get - add a PRT user
1214 if (!adev->gmc.gmc_funcs->set_prt) in amdgpu_vm_prt_get()
1217 if (atomic_inc_return(&adev->vm_manager.num_prt_users) == 1) in amdgpu_vm_prt_get()
1222 * amdgpu_vm_prt_put - drop a PRT user
1228 if (atomic_dec_return(&adev->vm_manager.num_prt_users) == 0) in amdgpu_vm_prt_put()
1233 * amdgpu_vm_prt_cb - callback for updating the PRT status
1242 amdgpu_vm_prt_put(cb->adev); in amdgpu_vm_prt_cb()
1247 * amdgpu_vm_add_prt_cb - add callback for updating the PRT status
1257 if (!adev->gmc.gmc_funcs->set_prt) in amdgpu_vm_add_prt_cb()
1268 cb->adev = adev; in amdgpu_vm_add_prt_cb()
1269 if (!fence || dma_fence_add_callback(fence, &cb->cb, in amdgpu_vm_add_prt_cb()
1271 amdgpu_vm_prt_cb(fence, &cb->cb); in amdgpu_vm_add_prt_cb()
1276 * amdgpu_vm_free_mapping - free a mapping
1290 if (mapping->flags & AMDGPU_PTE_PRT) in amdgpu_vm_free_mapping()
1296 * amdgpu_vm_prt_fini - finish all prt mappings
1305 struct dma_resv *resv = vm->root.bo->tbo.base.resv; in amdgpu_vm_prt_fini()
1317 * amdgpu_vm_clear_freed - clear freed BOs in the PT
1335 struct dma_resv *resv = vm->root.bo->tbo.base.resv; in amdgpu_vm_clear_freed()
1341 while (!list_empty(&vm->freed)) { in amdgpu_vm_clear_freed()
1342 mapping = list_first_entry(&vm->freed, in amdgpu_vm_clear_freed()
1344 list_del(&mapping->list); in amdgpu_vm_clear_freed()
1346 if (vm->pte_support_ats && in amdgpu_vm_clear_freed()
1347 mapping->start < AMDGPU_GMC_HOLE_START) in amdgpu_vm_clear_freed()
1351 resv, mapping->start, mapping->last, in amdgpu_vm_clear_freed()
1373 * amdgpu_vm_handle_moved - handle moved BOs in the PT
1395 spin_lock(&vm->status_lock); in amdgpu_vm_handle_moved()
1396 while (!list_empty(&vm->moved)) { in amdgpu_vm_handle_moved()
1397 bo_va = list_first_entry(&vm->moved, struct amdgpu_bo_va, in amdgpu_vm_handle_moved()
1399 spin_unlock(&vm->status_lock); in amdgpu_vm_handle_moved()
1405 spin_lock(&vm->status_lock); in amdgpu_vm_handle_moved()
1408 while (!list_empty(&vm->invalidated)) { in amdgpu_vm_handle_moved()
1409 bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va, in amdgpu_vm_handle_moved()
1411 resv = bo_va->base.bo->tbo.base.resv; in amdgpu_vm_handle_moved()
1412 spin_unlock(&vm->status_lock); in amdgpu_vm_handle_moved()
1415 if (!adev->debug_vm && dma_resv_trylock(resv)) { in amdgpu_vm_handle_moved()
1434 spin_lock(&vm->status_lock); in amdgpu_vm_handle_moved()
1436 spin_unlock(&vm->status_lock); in amdgpu_vm_handle_moved()
1442 * amdgpu_vm_flush_compute_tlb - Flush TLB on compute VM
1463 WARN_ON_ONCE(!vm->is_compute_context); in amdgpu_vm_flush_compute_tlb()
1470 if (atomic64_xchg(&vm->kfd_last_flushed_seq, tlb_seq) == tlb_seq) in amdgpu_vm_flush_compute_tlb()
1473 if (adev->family == AMDGPU_FAMILY_AI || in amdgpu_vm_flush_compute_tlb()
1474 adev->family == AMDGPU_FAMILY_RV) in amdgpu_vm_flush_compute_tlb()
1478 r = amdgpu_gmc_flush_gpu_tlb_pasid(adev, vm->pasid, flush_type, in amdgpu_vm_flush_compute_tlb()
1487 * amdgpu_vm_bo_add - add a bo to a specific vm
1511 amdgpu_vm_bo_base_init(&bo_va->base, vm, bo); in amdgpu_vm_bo_add()
1513 bo_va->ref_count = 1; in amdgpu_vm_bo_add()
1514 bo_va->last_pt_update = dma_fence_get_stub(); in amdgpu_vm_bo_add()
1515 INIT_LIST_HEAD(&bo_va->valids); in amdgpu_vm_bo_add()
1516 INIT_LIST_HEAD(&bo_va->invalids); in amdgpu_vm_bo_add()
1521 dma_resv_assert_held(bo->tbo.base.resv); in amdgpu_vm_bo_add()
1523 bo_va->is_xgmi = true; in amdgpu_vm_bo_add()
1533 * amdgpu_vm_bo_insert_map - insert a new mapping
1545 struct amdgpu_vm *vm = bo_va->base.vm; in amdgpu_vm_bo_insert_map()
1546 struct amdgpu_bo *bo = bo_va->base.bo; in amdgpu_vm_bo_insert_map()
1548 mapping->bo_va = bo_va; in amdgpu_vm_bo_insert_map()
1549 list_add(&mapping->list, &bo_va->invalids); in amdgpu_vm_bo_insert_map()
1550 amdgpu_vm_it_insert(mapping, &vm->va); in amdgpu_vm_bo_insert_map()
1552 if (mapping->flags & AMDGPU_PTE_PRT) in amdgpu_vm_bo_insert_map()
1555 if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv && in amdgpu_vm_bo_insert_map()
1556 !bo_va->base.moved) { in amdgpu_vm_bo_insert_map()
1557 amdgpu_vm_bo_moved(&bo_va->base); in amdgpu_vm_bo_insert_map()
1563 * amdgpu_vm_bo_map - map bo inside a vm
1585 struct amdgpu_bo *bo = bo_va->base.bo; in amdgpu_vm_bo_map()
1586 struct amdgpu_vm *vm = bo_va->base.vm; in amdgpu_vm_bo_map()
1591 return -EINVAL; in amdgpu_vm_bo_map()
1593 return -EINVAL; in amdgpu_vm_bo_map()
1596 eaddr = saddr + size - 1; in amdgpu_vm_bo_map()
1598 (eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT)) in amdgpu_vm_bo_map()
1599 return -EINVAL; in amdgpu_vm_bo_map()
1604 tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr); in amdgpu_vm_bo_map()
1607 dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with " in amdgpu_vm_bo_map()
1608 "0x%010Lx-0x%010Lx\n", bo, saddr, eaddr, in amdgpu_vm_bo_map()
1609 tmp->start, tmp->last + 1); in amdgpu_vm_bo_map()
1610 return -EINVAL; in amdgpu_vm_bo_map()
1615 return -ENOMEM; in amdgpu_vm_bo_map()
1617 mapping->start = saddr; in amdgpu_vm_bo_map()
1618 mapping->last = eaddr; in amdgpu_vm_bo_map()
1619 mapping->offset = offset; in amdgpu_vm_bo_map()
1620 mapping->flags = flags; in amdgpu_vm_bo_map()
1628 * amdgpu_vm_bo_replace_map - map bo inside a vm, replacing existing mappings
1651 struct amdgpu_bo *bo = bo_va->base.bo; in amdgpu_vm_bo_replace_map()
1657 return -EINVAL; in amdgpu_vm_bo_replace_map()
1659 return -EINVAL; in amdgpu_vm_bo_replace_map()
1662 eaddr = saddr + size - 1; in amdgpu_vm_bo_replace_map()
1664 (eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT)) in amdgpu_vm_bo_replace_map()
1665 return -EINVAL; in amdgpu_vm_bo_replace_map()
1670 return -ENOMEM; in amdgpu_vm_bo_replace_map()
1672 r = amdgpu_vm_bo_clear_mappings(adev, bo_va->base.vm, saddr, size); in amdgpu_vm_bo_replace_map()
1681 mapping->start = saddr; in amdgpu_vm_bo_replace_map()
1682 mapping->last = eaddr; in amdgpu_vm_bo_replace_map()
1683 mapping->offset = offset; in amdgpu_vm_bo_replace_map()
1684 mapping->flags = flags; in amdgpu_vm_bo_replace_map()
1692 * amdgpu_vm_bo_unmap - remove bo mapping from vm
1710 struct amdgpu_vm *vm = bo_va->base.vm; in amdgpu_vm_bo_unmap()
1715 list_for_each_entry(mapping, &bo_va->valids, list) { in amdgpu_vm_bo_unmap()
1716 if (mapping->start == saddr) in amdgpu_vm_bo_unmap()
1720 if (&mapping->list == &bo_va->valids) { in amdgpu_vm_bo_unmap()
1723 list_for_each_entry(mapping, &bo_va->invalids, list) { in amdgpu_vm_bo_unmap()
1724 if (mapping->start == saddr) in amdgpu_vm_bo_unmap()
1728 if (&mapping->list == &bo_va->invalids) in amdgpu_vm_bo_unmap()
1729 return -ENOENT; in amdgpu_vm_bo_unmap()
1732 list_del(&mapping->list); in amdgpu_vm_bo_unmap()
1733 amdgpu_vm_it_remove(mapping, &vm->va); in amdgpu_vm_bo_unmap()
1734 mapping->bo_va = NULL; in amdgpu_vm_bo_unmap()
1738 list_add(&mapping->list, &vm->freed); in amdgpu_vm_bo_unmap()
1741 bo_va->last_pt_update); in amdgpu_vm_bo_unmap()
1747 * amdgpu_vm_bo_clear_mappings - remove all mappings in a specific range
1767 eaddr = saddr + size - 1; in amdgpu_vm_bo_clear_mappings()
1774 return -ENOMEM; in amdgpu_vm_bo_clear_mappings()
1775 INIT_LIST_HEAD(&before->list); in amdgpu_vm_bo_clear_mappings()
1780 return -ENOMEM; in amdgpu_vm_bo_clear_mappings()
1782 INIT_LIST_HEAD(&after->list); in amdgpu_vm_bo_clear_mappings()
1785 tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr); in amdgpu_vm_bo_clear_mappings()
1788 if (tmp->start < saddr) { in amdgpu_vm_bo_clear_mappings()
1789 before->start = tmp->start; in amdgpu_vm_bo_clear_mappings()
1790 before->last = saddr - 1; in amdgpu_vm_bo_clear_mappings()
1791 before->offset = tmp->offset; in amdgpu_vm_bo_clear_mappings()
1792 before->flags = tmp->flags; in amdgpu_vm_bo_clear_mappings()
1793 before->bo_va = tmp->bo_va; in amdgpu_vm_bo_clear_mappings()
1794 list_add(&before->list, &tmp->bo_va->invalids); in amdgpu_vm_bo_clear_mappings()
1798 if (tmp->last > eaddr) { in amdgpu_vm_bo_clear_mappings()
1799 after->start = eaddr + 1; in amdgpu_vm_bo_clear_mappings()
1800 after->last = tmp->last; in amdgpu_vm_bo_clear_mappings()
1801 after->offset = tmp->offset; in amdgpu_vm_bo_clear_mappings()
1802 after->offset += (after->start - tmp->start) << PAGE_SHIFT; in amdgpu_vm_bo_clear_mappings()
1803 after->flags = tmp->flags; in amdgpu_vm_bo_clear_mappings()
1804 after->bo_va = tmp->bo_va; in amdgpu_vm_bo_clear_mappings()
1805 list_add(&after->list, &tmp->bo_va->invalids); in amdgpu_vm_bo_clear_mappings()
1808 list_del(&tmp->list); in amdgpu_vm_bo_clear_mappings()
1809 list_add(&tmp->list, &removed); in amdgpu_vm_bo_clear_mappings()
1816 amdgpu_vm_it_remove(tmp, &vm->va); in amdgpu_vm_bo_clear_mappings()
1817 list_del(&tmp->list); in amdgpu_vm_bo_clear_mappings()
1819 if (tmp->start < saddr) in amdgpu_vm_bo_clear_mappings()
1820 tmp->start = saddr; in amdgpu_vm_bo_clear_mappings()
1821 if (tmp->last > eaddr) in amdgpu_vm_bo_clear_mappings()
1822 tmp->last = eaddr; in amdgpu_vm_bo_clear_mappings()
1824 tmp->bo_va = NULL; in amdgpu_vm_bo_clear_mappings()
1825 list_add(&tmp->list, &vm->freed); in amdgpu_vm_bo_clear_mappings()
1830 if (!list_empty(&before->list)) { in amdgpu_vm_bo_clear_mappings()
1831 struct amdgpu_bo *bo = before->bo_va->base.bo; in amdgpu_vm_bo_clear_mappings()
1833 amdgpu_vm_it_insert(before, &vm->va); in amdgpu_vm_bo_clear_mappings()
1834 if (before->flags & AMDGPU_PTE_PRT) in amdgpu_vm_bo_clear_mappings()
1837 if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv && in amdgpu_vm_bo_clear_mappings()
1838 !before->bo_va->base.moved) in amdgpu_vm_bo_clear_mappings()
1839 amdgpu_vm_bo_moved(&before->bo_va->base); in amdgpu_vm_bo_clear_mappings()
1845 if (!list_empty(&after->list)) { in amdgpu_vm_bo_clear_mappings()
1846 struct amdgpu_bo *bo = after->bo_va->base.bo; in amdgpu_vm_bo_clear_mappings()
1848 amdgpu_vm_it_insert(after, &vm->va); in amdgpu_vm_bo_clear_mappings()
1849 if (after->flags & AMDGPU_PTE_PRT) in amdgpu_vm_bo_clear_mappings()
1852 if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv && in amdgpu_vm_bo_clear_mappings()
1853 !after->bo_va->base.moved) in amdgpu_vm_bo_clear_mappings()
1854 amdgpu_vm_bo_moved(&after->bo_va->base); in amdgpu_vm_bo_clear_mappings()
1863 * amdgpu_vm_bo_lookup_mapping - find mapping by address
1877 return amdgpu_vm_it_iter_first(&vm->va, addr, addr); in amdgpu_vm_bo_lookup_mapping()
1881 * amdgpu_vm_bo_trace_cs - trace all reserved mappings
1895 for (mapping = amdgpu_vm_it_iter_first(&vm->va, 0, U64_MAX); mapping; in amdgpu_vm_bo_trace_cs()
1897 if (mapping->bo_va && mapping->bo_va->base.bo) { in amdgpu_vm_bo_trace_cs()
1900 bo = mapping->bo_va->base.bo; in amdgpu_vm_bo_trace_cs()
1901 if (dma_resv_locking_ctx(bo->tbo.base.resv) != in amdgpu_vm_bo_trace_cs()
1911 * amdgpu_vm_bo_del - remove a bo from a specific vm
1916 * Remove @bo_va->bo from the requested vm.
1924 struct amdgpu_bo *bo = bo_va->base.bo; in amdgpu_vm_bo_del()
1925 struct amdgpu_vm *vm = bo_va->base.vm; in amdgpu_vm_bo_del()
1928 dma_resv_assert_held(vm->root.bo->tbo.base.resv); in amdgpu_vm_bo_del()
1931 dma_resv_assert_held(bo->tbo.base.resv); in amdgpu_vm_bo_del()
1932 if (bo->tbo.base.resv == vm->root.bo->tbo.base.resv) in amdgpu_vm_bo_del()
1933 ttm_bo_set_bulk_move(&bo->tbo, NULL); in amdgpu_vm_bo_del()
1935 for (base = &bo_va->base.bo->vm_bo; *base; in amdgpu_vm_bo_del()
1936 base = &(*base)->next) { in amdgpu_vm_bo_del()
1937 if (*base != &bo_va->base) in amdgpu_vm_bo_del()
1940 *base = bo_va->base.next; in amdgpu_vm_bo_del()
1945 spin_lock(&vm->status_lock); in amdgpu_vm_bo_del()
1946 list_del(&bo_va->base.vm_status); in amdgpu_vm_bo_del()
1947 spin_unlock(&vm->status_lock); in amdgpu_vm_bo_del()
1949 list_for_each_entry_safe(mapping, next, &bo_va->valids, list) { in amdgpu_vm_bo_del()
1950 list_del(&mapping->list); in amdgpu_vm_bo_del()
1951 amdgpu_vm_it_remove(mapping, &vm->va); in amdgpu_vm_bo_del()
1952 mapping->bo_va = NULL; in amdgpu_vm_bo_del()
1954 list_add(&mapping->list, &vm->freed); in amdgpu_vm_bo_del()
1956 list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) { in amdgpu_vm_bo_del()
1957 list_del(&mapping->list); in amdgpu_vm_bo_del()
1958 amdgpu_vm_it_remove(mapping, &vm->va); in amdgpu_vm_bo_del()
1960 bo_va->last_pt_update); in amdgpu_vm_bo_del()
1963 dma_fence_put(bo_va->last_pt_update); in amdgpu_vm_bo_del()
1965 if (bo && bo_va->is_xgmi) in amdgpu_vm_bo_del()
1972 * amdgpu_vm_evictable - check if we can evict a VM
1980 struct amdgpu_vm_bo_base *bo_base = bo->vm_bo; in amdgpu_vm_evictable()
1983 if (!bo_base || !bo_base->vm) in amdgpu_vm_evictable()
1987 if (!dma_resv_test_signaled(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP)) in amdgpu_vm_evictable()
1991 if (!amdgpu_vm_eviction_trylock(bo_base->vm)) in amdgpu_vm_evictable()
1995 if (!dma_fence_is_signaled(bo_base->vm->last_unlocked)) { in amdgpu_vm_evictable()
1996 amdgpu_vm_eviction_unlock(bo_base->vm); in amdgpu_vm_evictable()
2000 bo_base->vm->evicting = true; in amdgpu_vm_evictable()
2001 amdgpu_vm_eviction_unlock(bo_base->vm); in amdgpu_vm_evictable()
2006 * amdgpu_vm_bo_invalidate - mark the bo as invalid
2020 if (bo->parent && (amdgpu_bo_shadowed(bo->parent) == bo)) in amdgpu_vm_bo_invalidate()
2021 bo = bo->parent; in amdgpu_vm_bo_invalidate()
2023 for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) { in amdgpu_vm_bo_invalidate()
2024 struct amdgpu_vm *vm = bo_base->vm; in amdgpu_vm_bo_invalidate()
2026 if (evicted && bo->tbo.base.resv == vm->root.bo->tbo.base.resv) { in amdgpu_vm_bo_invalidate()
2031 if (bo_base->moved) in amdgpu_vm_bo_invalidate()
2033 bo_base->moved = true; in amdgpu_vm_bo_invalidate()
2035 if (bo->tbo.type == ttm_bo_type_kernel) in amdgpu_vm_bo_invalidate()
2037 else if (bo->tbo.base.resv == vm->root.bo->tbo.base.resv) in amdgpu_vm_bo_invalidate()
2045 * amdgpu_vm_get_block_size - calculate VM page table size as power of two
2060 return (bits - 9); in amdgpu_vm_get_block_size()
2066 * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size
2079 unsigned int max_size = 1 << (max_bits - 30); in amdgpu_vm_adjust_size()
2084 if (amdgpu_vm_size != -1) { in amdgpu_vm_adjust_size()
2087 dev_warn(adev->dev, "VM size (%d) too large, max is %u GB\n", in amdgpu_vm_adjust_size()
2099 * - Need to map system memory and VRAM from all GPUs in amdgpu_vm_adjust_size()
2100 * - VRAM from other GPUs not known here in amdgpu_vm_adjust_size()
2101 * - Assume VRAM <= system memory in amdgpu_vm_adjust_size()
2102 * - On GFX8 and older, VM space can be segmented for in amdgpu_vm_adjust_size()
2104 * - Need to allow room for fragmentation, guard pages etc. in amdgpu_vm_adjust_size()
2112 (1 << 30) - 1) >> 30; in amdgpu_vm_adjust_size()
2117 adev->vm_manager.max_pfn = (uint64_t)vm_size << 18; in amdgpu_vm_adjust_size()
2119 tmp = roundup_pow_of_two(adev->vm_manager.max_pfn); in amdgpu_vm_adjust_size()
2120 if (amdgpu_vm_block_size != -1) in amdgpu_vm_adjust_size()
2121 tmp >>= amdgpu_vm_block_size - 9; in amdgpu_vm_adjust_size()
2122 tmp = DIV_ROUND_UP(fls64(tmp) - 1, 9) - 1; in amdgpu_vm_adjust_size()
2123 adev->vm_manager.num_level = min_t(unsigned int, max_level, tmp); in amdgpu_vm_adjust_size()
2124 switch (adev->vm_manager.num_level) { in amdgpu_vm_adjust_size()
2126 adev->vm_manager.root_level = AMDGPU_VM_PDB2; in amdgpu_vm_adjust_size()
2129 adev->vm_manager.root_level = AMDGPU_VM_PDB1; in amdgpu_vm_adjust_size()
2132 adev->vm_manager.root_level = AMDGPU_VM_PDB0; in amdgpu_vm_adjust_size()
2135 dev_err(adev->dev, "VMPT only supports 2~4+1 levels\n"); in amdgpu_vm_adjust_size()
2138 if (amdgpu_vm_block_size != -1) in amdgpu_vm_adjust_size()
2139 adev->vm_manager.block_size = in amdgpu_vm_adjust_size()
2141 - AMDGPU_GPU_PAGE_SHIFT in amdgpu_vm_adjust_size()
2142 - 9 * adev->vm_manager.num_level); in amdgpu_vm_adjust_size()
2143 else if (adev->vm_manager.num_level > 1) in amdgpu_vm_adjust_size()
2144 adev->vm_manager.block_size = 9; in amdgpu_vm_adjust_size()
2146 adev->vm_manager.block_size = amdgpu_vm_get_block_size(tmp); in amdgpu_vm_adjust_size()
2148 if (amdgpu_vm_fragment_size == -1) in amdgpu_vm_adjust_size()
2149 adev->vm_manager.fragment_size = fragment_size_default; in amdgpu_vm_adjust_size()
2151 adev->vm_manager.fragment_size = amdgpu_vm_fragment_size; in amdgpu_vm_adjust_size()
2153 DRM_INFO("vm size is %u GB, %u levels, block size is %u-bit, fragment size is %u-bit\n", in amdgpu_vm_adjust_size()
2154 vm_size, adev->vm_manager.num_level + 1, in amdgpu_vm_adjust_size()
2155 adev->vm_manager.block_size, in amdgpu_vm_adjust_size()
2156 adev->vm_manager.fragment_size); in amdgpu_vm_adjust_size()
2160 * amdgpu_vm_wait_idle - wait for the VM to become idle
2167 timeout = dma_resv_wait_timeout(vm->root.bo->tbo.base.resv, in amdgpu_vm_wait_idle()
2173 return dma_fence_wait_timeout(vm->last_unlocked, true, timeout); in amdgpu_vm_wait_idle()
2177 * amdgpu_vm_init - initialize a vm instance
2195 vm->va = RB_ROOT_CACHED; in amdgpu_vm_init()
2197 vm->reserved_vmid[i] = NULL; in amdgpu_vm_init()
2198 INIT_LIST_HEAD(&vm->evicted); in amdgpu_vm_init()
2199 INIT_LIST_HEAD(&vm->relocated); in amdgpu_vm_init()
2200 INIT_LIST_HEAD(&vm->moved); in amdgpu_vm_init()
2201 INIT_LIST_HEAD(&vm->idle); in amdgpu_vm_init()
2202 INIT_LIST_HEAD(&vm->invalidated); in amdgpu_vm_init()
2203 spin_lock_init(&vm->status_lock); in amdgpu_vm_init()
2204 INIT_LIST_HEAD(&vm->freed); in amdgpu_vm_init()
2205 INIT_LIST_HEAD(&vm->done); in amdgpu_vm_init()
2206 INIT_LIST_HEAD(&vm->pt_freed); in amdgpu_vm_init()
2207 INIT_WORK(&vm->pt_free_work, amdgpu_vm_pt_free_work); in amdgpu_vm_init()
2208 INIT_KFIFO(vm->faults); in amdgpu_vm_init()
2214 vm->pte_support_ats = false; in amdgpu_vm_init()
2215 vm->is_compute_context = false; in amdgpu_vm_init()
2217 vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode & in amdgpu_vm_init()
2221 vm->use_cpu_for_update ? "CPU" : "SDMA"); in amdgpu_vm_init()
2222 WARN_ONCE((vm->use_cpu_for_update && in amdgpu_vm_init()
2223 !amdgpu_gmc_vram_full_visible(&adev->gmc)), in amdgpu_vm_init()
2226 if (vm->use_cpu_for_update) in amdgpu_vm_init()
2227 vm->update_funcs = &amdgpu_vm_cpu_funcs; in amdgpu_vm_init()
2229 vm->update_funcs = &amdgpu_vm_sdma_funcs; in amdgpu_vm_init()
2231 vm->last_update = dma_fence_get_stub(); in amdgpu_vm_init()
2232 vm->last_unlocked = dma_fence_get_stub(); in amdgpu_vm_init()
2233 vm->last_tlb_flush = dma_fence_get_stub(); in amdgpu_vm_init()
2234 vm->generation = 0; in amdgpu_vm_init()
2236 mutex_init(&vm->eviction_lock); in amdgpu_vm_init()
2237 vm->evicting = false; in amdgpu_vm_init()
2239 r = amdgpu_vm_pt_create(adev, vm, adev->vm_manager.root_level, in amdgpu_vm_init()
2244 root_bo = amdgpu_bo_ref(&root->bo); in amdgpu_vm_init()
2247 amdgpu_bo_unref(&root->shadow); in amdgpu_vm_init()
2252 amdgpu_vm_bo_base_init(&vm->root, vm, root_bo); in amdgpu_vm_init()
2253 r = dma_resv_reserve_fences(root_bo->tbo.base.resv, 1); in amdgpu_vm_init()
2261 amdgpu_bo_unreserve(vm->root.bo); in amdgpu_vm_init()
2268 amdgpu_bo_unreserve(vm->root.bo); in amdgpu_vm_init()
2272 dma_fence_put(vm->last_tlb_flush); in amdgpu_vm_init()
2273 dma_fence_put(vm->last_unlocked); in amdgpu_vm_init()
2280 * amdgpu_vm_make_compute - Turn a GFX VM into a compute VM
2285 * This only works on GFX VMs that don't have any BOs added and no
2289 * - use_cpu_for_update
2290 * - pte_supports_ats
2296 * 0 for success, -errno for errors.
2300 bool pte_support_ats = (adev->asic_type == CHIP_RAVEN); in amdgpu_vm_make_compute()
2303 r = amdgpu_bo_reserve(vm->root.bo, true); in amdgpu_vm_make_compute()
2310 if (pte_support_ats != vm->pte_support_ats) { in amdgpu_vm_make_compute()
2313 r = -EINVAL; in amdgpu_vm_make_compute()
2317 vm->pte_support_ats = pte_support_ats; in amdgpu_vm_make_compute()
2318 r = amdgpu_vm_pt_clear(adev, vm, to_amdgpu_bo_vm(vm->root.bo), in amdgpu_vm_make_compute()
2325 vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode & in amdgpu_vm_make_compute()
2328 vm->use_cpu_for_update ? "CPU" : "SDMA"); in amdgpu_vm_make_compute()
2329 WARN_ONCE((vm->use_cpu_for_update && in amdgpu_vm_make_compute()
2330 !amdgpu_gmc_vram_full_visible(&adev->gmc)), in amdgpu_vm_make_compute()
2333 if (vm->use_cpu_for_update) { in amdgpu_vm_make_compute()
2335 r = amdgpu_bo_sync_wait(vm->root.bo, in amdgpu_vm_make_compute()
2340 vm->update_funcs = &amdgpu_vm_cpu_funcs; in amdgpu_vm_make_compute()
2346 vm->update_funcs = &amdgpu_vm_sdma_funcs; in amdgpu_vm_make_compute()
2349 dma_fence_put(vm->last_update); in amdgpu_vm_make_compute()
2350 vm->last_update = dma_fence_get_stub(); in amdgpu_vm_make_compute()
2351 vm->is_compute_context = true; in amdgpu_vm_make_compute()
2354 amdgpu_bo_unref(&to_amdgpu_bo_vm(vm->root.bo)->shadow); in amdgpu_vm_make_compute()
2359 amdgpu_bo_unreserve(vm->root.bo); in amdgpu_vm_make_compute()
2364 * amdgpu_vm_release_compute - release a compute vm
2374 vm->is_compute_context = false; in amdgpu_vm_release_compute()
2378 * amdgpu_vm_fini - tear down a vm instance
2389 bool prt_fini_needed = !!adev->gmc.gmc_funcs->set_prt; in amdgpu_vm_fini()
2396 flush_work(&vm->pt_free_work); in amdgpu_vm_fini()
2398 root = amdgpu_bo_ref(vm->root.bo); in amdgpu_vm_fini()
2401 dma_fence_wait(vm->last_unlocked, false); in amdgpu_vm_fini()
2402 dma_fence_put(vm->last_unlocked); in amdgpu_vm_fini()
2403 dma_fence_wait(vm->last_tlb_flush, false); in amdgpu_vm_fini()
2405 spin_lock_irqsave(vm->last_tlb_flush->lock, flags); in amdgpu_vm_fini()
2406 spin_unlock_irqrestore(vm->last_tlb_flush->lock, flags); in amdgpu_vm_fini()
2407 dma_fence_put(vm->last_tlb_flush); in amdgpu_vm_fini()
2409 list_for_each_entry_safe(mapping, tmp, &vm->freed, list) { in amdgpu_vm_fini()
2410 if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) { in amdgpu_vm_fini()
2415 list_del(&mapping->list); in amdgpu_vm_fini()
2422 WARN_ON(vm->root.bo); in amdgpu_vm_fini()
2426 if (!RB_EMPTY_ROOT(&vm->va.rb_root)) { in amdgpu_vm_fini()
2427 dev_err(adev->dev, "still active bo inside vm\n"); in amdgpu_vm_fini()
2430 &vm->va.rb_root, rb) { in amdgpu_vm_fini()
2434 list_del(&mapping->list); in amdgpu_vm_fini()
2438 dma_fence_put(vm->last_update); in amdgpu_vm_fini()
2441 if (vm->reserved_vmid[i]) { in amdgpu_vm_fini()
2443 vm->reserved_vmid[i] = false; in amdgpu_vm_fini()
2450 * amdgpu_vm_manager_init - init the VM manager
2463 adev->vm_manager.concurrent_flush = !(adev->asic_type < CHIP_VEGA10 || in amdgpu_vm_manager_init()
2464 adev->asic_type == CHIP_NAVI10 || in amdgpu_vm_manager_init()
2465 adev->asic_type == CHIP_NAVI14); in amdgpu_vm_manager_init()
2468 adev->vm_manager.fence_context = in amdgpu_vm_manager_init()
2471 adev->vm_manager.seqno[i] = 0; in amdgpu_vm_manager_init()
2473 spin_lock_init(&adev->vm_manager.prt_lock); in amdgpu_vm_manager_init()
2474 atomic_set(&adev->vm_manager.num_prt_users, 0); in amdgpu_vm_manager_init()
2480 if (amdgpu_vm_update_mode == -1) { in amdgpu_vm_manager_init()
2484 if (amdgpu_gmc_vram_full_visible(&adev->gmc) && in amdgpu_vm_manager_init()
2486 adev->vm_manager.vm_update_mode = in amdgpu_vm_manager_init()
2489 adev->vm_manager.vm_update_mode = 0; in amdgpu_vm_manager_init()
2491 adev->vm_manager.vm_update_mode = amdgpu_vm_update_mode; in amdgpu_vm_manager_init()
2493 adev->vm_manager.vm_update_mode = 0; in amdgpu_vm_manager_init()
2496 xa_init_flags(&adev->vm_manager.pasids, XA_FLAGS_LOCK_IRQ); in amdgpu_vm_manager_init()
2500 * amdgpu_vm_manager_fini - cleanup VM manager
2508 WARN_ON(!xa_empty(&adev->vm_manager.pasids)); in amdgpu_vm_manager_fini()
2509 xa_destroy(&adev->vm_manager.pasids); in amdgpu_vm_manager_fini()
2515 * amdgpu_vm_ioctl - Manages VMID reservation for vm hubs.
2522 * 0 for success, -errno for errors.
2528 struct amdgpu_fpriv *fpriv = filp->driver_priv; in amdgpu_vm_ioctl()
2531 if (args->in.flags) in amdgpu_vm_ioctl()
2532 return -EINVAL; in amdgpu_vm_ioctl()
2534 switch (args->in.op) { in amdgpu_vm_ioctl()
2537 if (!fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)]) { in amdgpu_vm_ioctl()
2539 fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)] = true; in amdgpu_vm_ioctl()
2544 if (fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)]) { in amdgpu_vm_ioctl()
2546 fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)] = false; in amdgpu_vm_ioctl()
2550 return -EINVAL; in amdgpu_vm_ioctl()
2557 * amdgpu_vm_get_task_info - Extracts task info for a PASID.
2569 xa_lock_irqsave(&adev->vm_manager.pasids, flags); in amdgpu_vm_get_task_info()
2571 vm = xa_load(&adev->vm_manager.pasids, pasid); in amdgpu_vm_get_task_info()
2573 *task_info = vm->task_info; in amdgpu_vm_get_task_info()
2575 xa_unlock_irqrestore(&adev->vm_manager.pasids, flags); in amdgpu_vm_get_task_info()
2579 * amdgpu_vm_set_task_info - Sets VMs task info.
2585 if (vm->task_info.pid) in amdgpu_vm_set_task_info()
2588 vm->task_info.pid = current->pid; in amdgpu_vm_set_task_info()
2589 get_task_comm(vm->task_info.task_name, current); in amdgpu_vm_set_task_info()
2591 if (current->group_leader->mm != current->mm) in amdgpu_vm_set_task_info()
2594 vm->task_info.tgid = current->group_leader->pid; in amdgpu_vm_set_task_info()
2595 get_task_comm(vm->task_info.process_name, current->group_leader); in amdgpu_vm_set_task_info()
2599 * amdgpu_vm_handle_fault - graceful handling of VM faults.
2602 * @vmid: VMID, only used for GFX 9.4.3.
2604 * GFX 9.4.3.
2622 xa_lock_irqsave(&adev->vm_manager.pasids, irqflags); in amdgpu_vm_handle_fault()
2623 vm = xa_load(&adev->vm_manager.pasids, pasid); in amdgpu_vm_handle_fault()
2625 root = amdgpu_bo_ref(vm->root.bo); in amdgpu_vm_handle_fault()
2626 is_compute_context = vm->is_compute_context; in amdgpu_vm_handle_fault()
2630 xa_unlock_irqrestore(&adev->vm_manager.pasids, irqflags); in amdgpu_vm_handle_fault()
2648 xa_lock_irqsave(&adev->vm_manager.pasids, irqflags); in amdgpu_vm_handle_fault()
2649 vm = xa_load(&adev->vm_manager.pasids, pasid); in amdgpu_vm_handle_fault()
2650 if (vm && vm->root.bo != root) in amdgpu_vm_handle_fault()
2652 xa_unlock_irqrestore(&adev->vm_manager.pasids, irqflags); in amdgpu_vm_handle_fault()
2661 * combination to force a no-retry-fault in amdgpu_vm_handle_fault()
2667 value = adev->dummy_page_addr; in amdgpu_vm_handle_fault()
2676 r = dma_resv_reserve_fences(root->tbo.base.resv, 1); in amdgpu_vm_handle_fault()
2702 * amdgpu_debugfs_vm_bo_info - print BO info for the VM
2726 spin_lock(&vm->status_lock); in amdgpu_debugfs_vm_bo_info()
2728 list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status) { in amdgpu_debugfs_vm_bo_info()
2729 if (!bo_va->base.bo) in amdgpu_debugfs_vm_bo_info()
2731 total_idle += amdgpu_bo_print_info(id++, bo_va->base.bo, m); in amdgpu_debugfs_vm_bo_info()
2737 list_for_each_entry_safe(bo_va, tmp, &vm->evicted, base.vm_status) { in amdgpu_debugfs_vm_bo_info()
2738 if (!bo_va->base.bo) in amdgpu_debugfs_vm_bo_info()
2740 total_evicted += amdgpu_bo_print_info(id++, bo_va->base.bo, m); in amdgpu_debugfs_vm_bo_info()
2746 list_for_each_entry_safe(bo_va, tmp, &vm->relocated, base.vm_status) { in amdgpu_debugfs_vm_bo_info()
2747 if (!bo_va->base.bo) in amdgpu_debugfs_vm_bo_info()
2749 total_relocated += amdgpu_bo_print_info(id++, bo_va->base.bo, m); in amdgpu_debugfs_vm_bo_info()
2755 list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) { in amdgpu_debugfs_vm_bo_info()
2756 if (!bo_va->base.bo) in amdgpu_debugfs_vm_bo_info()
2758 total_moved += amdgpu_bo_print_info(id++, bo_va->base.bo, m); in amdgpu_debugfs_vm_bo_info()
2764 list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status) { in amdgpu_debugfs_vm_bo_info()
2765 if (!bo_va->base.bo) in amdgpu_debugfs_vm_bo_info()
2767 total_invalidated += amdgpu_bo_print_info(id++, bo_va->base.bo, m); in amdgpu_debugfs_vm_bo_info()
2773 list_for_each_entry_safe(bo_va, tmp, &vm->done, base.vm_status) { in amdgpu_debugfs_vm_bo_info()
2774 if (!bo_va->base.bo) in amdgpu_debugfs_vm_bo_info()
2776 total_done += amdgpu_bo_print_info(id++, bo_va->base.bo, m); in amdgpu_debugfs_vm_bo_info()
2778 spin_unlock(&vm->status_lock); in amdgpu_debugfs_vm_bo_info()
2797 * amdgpu_vm_update_fault_cache - update cached fault into.
2815 xa_lock_irqsave(&adev->vm_manager.pasids, flags); in amdgpu_vm_update_fault_cache()
2817 vm = xa_load(&adev->vm_manager.pasids, pasid); in amdgpu_vm_update_fault_cache()
2821 * only update if status is non-0. in amdgpu_vm_update_fault_cache()
2824 vm->fault_info.addr = addr; in amdgpu_vm_update_fault_cache()
2825 vm->fault_info.status = status; in amdgpu_vm_update_fault_cache()
2827 vm->fault_info.vmhub = AMDGPU_VMHUB_TYPE_GFX; in amdgpu_vm_update_fault_cache()
2828 vm->fault_info.vmhub |= in amdgpu_vm_update_fault_cache()
2829 (vmhub - AMDGPU_GFXHUB_START) << AMDGPU_VMHUB_IDX_SHIFT; in amdgpu_vm_update_fault_cache()
2831 vm->fault_info.vmhub = AMDGPU_VMHUB_TYPE_MM0; in amdgpu_vm_update_fault_cache()
2832 vm->fault_info.vmhub |= in amdgpu_vm_update_fault_cache()
2833 (vmhub - AMDGPU_MMHUB0_START) << AMDGPU_VMHUB_IDX_SHIFT; in amdgpu_vm_update_fault_cache()
2835 vm->fault_info.vmhub = AMDGPU_VMHUB_TYPE_MM1; in amdgpu_vm_update_fault_cache()
2836 vm->fault_info.vmhub |= in amdgpu_vm_update_fault_cache()
2837 (vmhub - AMDGPU_MMHUB1_START) << AMDGPU_VMHUB_IDX_SHIFT; in amdgpu_vm_update_fault_cache()
2842 xa_unlock_irqrestore(&adev->vm_manager.pasids, flags); in amdgpu_vm_update_fault_cache()