Lines Matching +full:gfx +full:- +full:mem
29 #include <linux/dma-fence-array.h>
32 #include <linux/dma-buf.h>
69 * Each GPUVM is represented by a 1-2 or 1-5 level page table, depending
90 #define START(node) ((node)->start)
91 #define LAST(node) ((node)->last)
100 * struct amdgpu_prt_cb - Helper to disable partial resident texture feature from a fence callback
116 * struct amdgpu_vm_tlb_seq_struct - Helper to increment the TLB flush sequence
131 * amdgpu_vm_set_pasid - manage pasid and vm ptr mapping
146 if (vm->pasid == pasid) in amdgpu_vm_set_pasid()
149 if (vm->pasid) { in amdgpu_vm_set_pasid()
150 r = xa_err(xa_erase_irq(&adev->vm_manager.pasids, vm->pasid)); in amdgpu_vm_set_pasid()
154 vm->pasid = 0; in amdgpu_vm_set_pasid()
158 r = xa_err(xa_store_irq(&adev->vm_manager.pasids, pasid, vm, in amdgpu_vm_set_pasid()
163 vm->pasid = pasid; in amdgpu_vm_set_pasid()
171 * amdgpu_vm_bo_evicted - vm_bo is evicted
180 struct amdgpu_vm *vm = vm_bo->vm; in amdgpu_vm_bo_evicted()
181 struct amdgpu_bo *bo = vm_bo->bo; in amdgpu_vm_bo_evicted()
183 vm_bo->moved = true; in amdgpu_vm_bo_evicted()
184 spin_lock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_evicted()
185 if (bo->tbo.type == ttm_bo_type_kernel) in amdgpu_vm_bo_evicted()
186 list_move(&vm_bo->vm_status, &vm->evicted); in amdgpu_vm_bo_evicted()
188 list_move_tail(&vm_bo->vm_status, &vm->evicted); in amdgpu_vm_bo_evicted()
189 spin_unlock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_evicted()
192 * amdgpu_vm_bo_moved - vm_bo is moved
201 spin_lock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_moved()
202 list_move(&vm_bo->vm_status, &vm_bo->vm->moved); in amdgpu_vm_bo_moved()
203 spin_unlock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_moved()
207 * amdgpu_vm_bo_idle - vm_bo is idle
216 spin_lock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_idle()
217 list_move(&vm_bo->vm_status, &vm_bo->vm->idle); in amdgpu_vm_bo_idle()
218 spin_unlock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_idle()
219 vm_bo->moved = false; in amdgpu_vm_bo_idle()
223 * amdgpu_vm_bo_invalidated - vm_bo is invalidated
232 spin_lock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_invalidated()
233 list_move(&vm_bo->vm_status, &vm_bo->vm->invalidated); in amdgpu_vm_bo_invalidated()
234 spin_unlock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_invalidated()
238 * amdgpu_vm_bo_evicted_user - vm_bo is evicted
247 vm_bo->moved = true; in amdgpu_vm_bo_evicted_user()
248 spin_lock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_evicted_user()
249 list_move(&vm_bo->vm_status, &vm_bo->vm->evicted_user); in amdgpu_vm_bo_evicted_user()
250 spin_unlock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_evicted_user()
254 * amdgpu_vm_bo_relocated - vm_bo is reloacted
263 if (vm_bo->bo->parent) { in amdgpu_vm_bo_relocated()
264 spin_lock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_relocated()
265 list_move(&vm_bo->vm_status, &vm_bo->vm->relocated); in amdgpu_vm_bo_relocated()
266 spin_unlock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_relocated()
273 * amdgpu_vm_bo_done - vm_bo is done
282 spin_lock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_done()
283 list_move(&vm_bo->vm_status, &vm_bo->vm->done); in amdgpu_vm_bo_done()
284 spin_unlock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_done()
288 * amdgpu_vm_bo_reset_state_machine - reset the vm_bo state machine
298 spin_lock(&vm->status_lock); in amdgpu_vm_bo_reset_state_machine()
299 list_splice_init(&vm->done, &vm->invalidated); in amdgpu_vm_bo_reset_state_machine()
300 list_for_each_entry(vm_bo, &vm->invalidated, vm_status) in amdgpu_vm_bo_reset_state_machine()
301 vm_bo->moved = true; in amdgpu_vm_bo_reset_state_machine()
302 list_for_each_entry_safe(vm_bo, tmp, &vm->idle, vm_status) { in amdgpu_vm_bo_reset_state_machine()
303 struct amdgpu_bo *bo = vm_bo->bo; in amdgpu_vm_bo_reset_state_machine()
305 vm_bo->moved = true; in amdgpu_vm_bo_reset_state_machine()
306 if (!bo || bo->tbo.type != ttm_bo_type_kernel) in amdgpu_vm_bo_reset_state_machine()
307 list_move(&vm_bo->vm_status, &vm_bo->vm->moved); in amdgpu_vm_bo_reset_state_machine()
308 else if (bo->parent) in amdgpu_vm_bo_reset_state_machine()
309 list_move(&vm_bo->vm_status, &vm_bo->vm->relocated); in amdgpu_vm_bo_reset_state_machine()
311 spin_unlock(&vm->status_lock); in amdgpu_vm_bo_reset_state_machine()
315 * amdgpu_vm_update_shared - helper to update shared memory stat
324 struct amdgpu_vm *vm = base->vm; in amdgpu_vm_update_shared()
325 struct amdgpu_bo *bo = base->bo; in amdgpu_vm_update_shared()
330 spin_lock(&vm->status_lock); in amdgpu_vm_update_shared()
331 shared = drm_gem_object_is_shared_for_memory_stats(&bo->tbo.base); in amdgpu_vm_update_shared()
332 if (base->shared != shared) { in amdgpu_vm_update_shared()
333 base->shared = shared; in amdgpu_vm_update_shared()
335 vm->stats[bo_memtype].drm.shared += size; in amdgpu_vm_update_shared()
336 vm->stats[bo_memtype].drm.private -= size; in amdgpu_vm_update_shared()
338 vm->stats[bo_memtype].drm.shared -= size; in amdgpu_vm_update_shared()
339 vm->stats[bo_memtype].drm.private += size; in amdgpu_vm_update_shared()
342 spin_unlock(&vm->status_lock); in amdgpu_vm_update_shared()
346 * amdgpu_vm_bo_update_shared - callback when bo gets shared/unshared
356 for (base = bo->vm_bo; base; base = base->next) in amdgpu_vm_bo_update_shared()
361 * amdgpu_vm_update_stats_locked - helper to update normal memory stat
364 * be bo->tbo.resource
365 * @sign: if we should add (+1) or subtract (-1) from the stat
373 struct amdgpu_vm *vm = base->vm; in amdgpu_vm_update_stats_locked()
374 struct amdgpu_bo *bo = base->bo; in amdgpu_vm_update_stats_locked()
378 /* For drm-total- and drm-shared-, BO are accounted by their preferred in amdgpu_vm_update_stats_locked()
381 if (base->shared) in amdgpu_vm_update_stats_locked()
382 vm->stats[bo_memtype].drm.shared += size; in amdgpu_vm_update_stats_locked()
384 vm->stats[bo_memtype].drm.private += size; in amdgpu_vm_update_stats_locked()
386 if (res && res->mem_type < __AMDGPU_PL_NUM) { in amdgpu_vm_update_stats_locked()
387 uint32_t res_memtype = res->mem_type; in amdgpu_vm_update_stats_locked()
389 vm->stats[res_memtype].drm.resident += size; in amdgpu_vm_update_stats_locked()
393 if (bo->flags & AMDGPU_GEM_CREATE_DISCARDABLE) in amdgpu_vm_update_stats_locked()
394 vm->stats[res_memtype].drm.purgeable += size; in amdgpu_vm_update_stats_locked()
395 if (!(bo->preferred_domains & amdgpu_mem_type_to_domain(res_memtype))) in amdgpu_vm_update_stats_locked()
396 vm->stats[bo_memtype].evicted += size; in amdgpu_vm_update_stats_locked()
401 * amdgpu_vm_update_stats - helper to update normal memory stat
404 * be bo->tbo.resource
405 * @sign: if we should add (+1) or subtract (-1) from the stat
412 struct amdgpu_vm *vm = base->vm; in amdgpu_vm_update_stats()
414 spin_lock(&vm->status_lock); in amdgpu_vm_update_stats()
416 spin_unlock(&vm->status_lock); in amdgpu_vm_update_stats()
420 * amdgpu_vm_bo_base_init - Adds bo to the list of bos associated with the vm
432 base->vm = vm; in amdgpu_vm_bo_base_init()
433 base->bo = bo; in amdgpu_vm_bo_base_init()
434 base->next = NULL; in amdgpu_vm_bo_base_init()
435 INIT_LIST_HEAD(&base->vm_status); in amdgpu_vm_bo_base_init()
439 base->next = bo->vm_bo; in amdgpu_vm_bo_base_init()
440 bo->vm_bo = base; in amdgpu_vm_bo_base_init()
442 spin_lock(&vm->status_lock); in amdgpu_vm_bo_base_init()
443 base->shared = drm_gem_object_is_shared_for_memory_stats(&bo->tbo.base); in amdgpu_vm_bo_base_init()
444 amdgpu_vm_update_stats_locked(base, bo->tbo.resource, +1); in amdgpu_vm_bo_base_init()
445 spin_unlock(&vm->status_lock); in amdgpu_vm_bo_base_init()
450 dma_resv_assert_held(vm->root.bo->tbo.base.resv); in amdgpu_vm_bo_base_init()
452 ttm_bo_set_bulk_move(&bo->tbo, &vm->lru_bulk_move); in amdgpu_vm_bo_base_init()
453 if (bo->tbo.type == ttm_bo_type_kernel && bo->parent) in amdgpu_vm_bo_base_init()
458 if (bo->preferred_domains & in amdgpu_vm_bo_base_init()
459 amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type)) in amdgpu_vm_bo_base_init()
471 * amdgpu_vm_lock_pd - lock PD in drm_exec
483 return drm_exec_prepare_obj(exec, &vm->root.bo->tbo.base, in amdgpu_vm_lock_pd()
488 * amdgpu_vm_move_to_lru_tail - move all BOs to the end of LRU
499 spin_lock(&adev->mman.bdev.lru_lock); in amdgpu_vm_move_to_lru_tail()
500 ttm_lru_bulk_move_tail(&vm->lru_bulk_move); in amdgpu_vm_move_to_lru_tail()
501 spin_unlock(&adev->mman.bdev.lru_lock); in amdgpu_vm_move_to_lru_tail()
510 r = drm_sched_entity_init(&vm->immediate, DRM_SCHED_PRIORITY_NORMAL, in amdgpu_vm_init_entities()
511 adev->vm_manager.vm_pte_scheds, in amdgpu_vm_init_entities()
512 adev->vm_manager.vm_pte_num_scheds, NULL); in amdgpu_vm_init_entities()
516 return drm_sched_entity_init(&vm->delayed, DRM_SCHED_PRIORITY_NORMAL, in amdgpu_vm_init_entities()
517 adev->vm_manager.vm_pte_scheds, in amdgpu_vm_init_entities()
518 adev->vm_manager.vm_pte_num_scheds, NULL); in amdgpu_vm_init_entities()
521 drm_sched_entity_destroy(&vm->immediate); in amdgpu_vm_init_entities()
528 drm_sched_entity_destroy(&vm->immediate); in amdgpu_vm_fini_entities()
529 drm_sched_entity_destroy(&vm->delayed); in amdgpu_vm_fini_entities()
533 * amdgpu_vm_generation - return the page table re-generation counter
537 * Returns a page table re-generation token to allow checking if submissions
543 uint64_t result = (u64)atomic_read(&adev->vram_lost_counter) << 32; in amdgpu_vm_generation()
548 result += lower_32_bits(vm->generation); in amdgpu_vm_generation()
549 /* Add one if the page tables will be re-generated on next CS */ in amdgpu_vm_generation()
550 if (drm_sched_entity_error(&vm->delayed)) in amdgpu_vm_generation()
557 * amdgpu_vm_validate - validate evicted BOs tracked in the VM
565 * Validate the page table BOs and per-VM BOs on command submission if
582 if (vm->generation != new_vm_generation) { in amdgpu_vm_validate()
583 vm->generation = new_vm_generation; in amdgpu_vm_validate()
591 spin_lock(&vm->status_lock); in amdgpu_vm_validate()
592 while (!list_empty(&vm->evicted)) { in amdgpu_vm_validate()
593 bo_base = list_first_entry(&vm->evicted, in amdgpu_vm_validate()
596 spin_unlock(&vm->status_lock); in amdgpu_vm_validate()
598 bo = bo_base->bo; in amdgpu_vm_validate()
604 if (bo->tbo.type != ttm_bo_type_kernel) { in amdgpu_vm_validate()
607 vm->update_funcs->map_table(to_amdgpu_bo_vm(bo)); in amdgpu_vm_validate()
610 spin_lock(&vm->status_lock); in amdgpu_vm_validate()
612 while (ticket && !list_empty(&vm->evicted_user)) { in amdgpu_vm_validate()
613 bo_base = list_first_entry(&vm->evicted_user, in amdgpu_vm_validate()
616 spin_unlock(&vm->status_lock); in amdgpu_vm_validate()
618 bo = bo_base->bo; in amdgpu_vm_validate()
620 if (dma_resv_locking_ctx(bo->tbo.base.resv) != ticket) { in amdgpu_vm_validate()
625 pr_warn_ratelimited("pid %d\n", ti->pid); in amdgpu_vm_validate()
629 return -EINVAL; in amdgpu_vm_validate()
638 spin_lock(&vm->status_lock); in amdgpu_vm_validate()
640 spin_unlock(&vm->status_lock); in amdgpu_vm_validate()
643 vm->evicting = false; in amdgpu_vm_validate()
650 * amdgpu_vm_ready - check VM is ready for updates
665 ret = !vm->evicting; in amdgpu_vm_ready()
668 spin_lock(&vm->status_lock); in amdgpu_vm_ready()
669 empty = list_empty(&vm->evicted); in amdgpu_vm_ready()
670 spin_unlock(&vm->status_lock); in amdgpu_vm_ready()
676 * amdgpu_vm_check_compute_bug - check whether asic has compute vm bug
691 /* Compute has a VM bug for GFX version < 7. in amdgpu_vm_check_compute_bug()
692 Compute has a VM bug for GFX 8 MEC firmware version < 673.*/ in amdgpu_vm_check_compute_bug()
693 if (ip_block->version->major <= 7) in amdgpu_vm_check_compute_bug()
695 else if (ip_block->version->major == 8) in amdgpu_vm_check_compute_bug()
696 if (adev->gfx.mec_fw_version < 673) in amdgpu_vm_check_compute_bug()
700 for (i = 0; i < adev->num_rings; i++) { in amdgpu_vm_check_compute_bug()
701 ring = adev->rings[i]; in amdgpu_vm_check_compute_bug()
702 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) in amdgpu_vm_check_compute_bug()
704 ring->has_compute_vm_bug = has_compute_vm_bug; in amdgpu_vm_check_compute_bug()
706 ring->has_compute_vm_bug = false; in amdgpu_vm_check_compute_bug()
711 * amdgpu_vm_need_pipeline_sync - Check if pipe sync is needed for job.
722 struct amdgpu_device *adev = ring->adev; in amdgpu_vm_need_pipeline_sync()
723 unsigned vmhub = ring->vm_hub; in amdgpu_vm_need_pipeline_sync()
724 struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; in amdgpu_vm_need_pipeline_sync()
726 if (job->vmid == 0) in amdgpu_vm_need_pipeline_sync()
729 if (job->vm_needs_flush || ring->has_compute_vm_bug) in amdgpu_vm_need_pipeline_sync()
732 if (ring->funcs->emit_gds_switch && job->gds_switch_needed) in amdgpu_vm_need_pipeline_sync()
735 if (amdgpu_vmid_had_gpu_reset(adev, &id_mgr->ids[job->vmid])) in amdgpu_vm_need_pipeline_sync()
742 * amdgpu_vm_flush - hardware flush the vm
756 struct amdgpu_device *adev = ring->adev; in amdgpu_vm_flush()
757 struct amdgpu_isolation *isolation = &adev->isolation[ring->xcp_id]; in amdgpu_vm_flush()
758 unsigned vmhub = ring->vm_hub; in amdgpu_vm_flush()
759 struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; in amdgpu_vm_flush()
760 struct amdgpu_vmid *id = &id_mgr->ids[job->vmid]; in amdgpu_vm_flush()
761 bool spm_update_needed = job->spm_update_needed; in amdgpu_vm_flush()
762 bool gds_switch_needed = ring->funcs->emit_gds_switch && in amdgpu_vm_flush()
763 job->gds_switch_needed; in amdgpu_vm_flush()
764 bool vm_flush_needed = job->vm_needs_flush; in amdgpu_vm_flush()
778 mutex_lock(&id_mgr->lock); in amdgpu_vm_flush()
779 if (id->pasid != job->pasid || !id->pasid_mapping || in amdgpu_vm_flush()
780 !dma_fence_is_signaled(id->pasid_mapping)) in amdgpu_vm_flush()
782 mutex_unlock(&id_mgr->lock); in amdgpu_vm_flush()
784 gds_switch_needed &= !!ring->funcs->emit_gds_switch; in amdgpu_vm_flush()
785 vm_flush_needed &= !!ring->funcs->emit_vm_flush && in amdgpu_vm_flush()
786 job->vm_pd_addr != AMDGPU_BO_INVALID_OFFSET; in amdgpu_vm_flush()
787 pasid_mapping_needed &= adev->gmc.gmc_funcs->emit_pasid_mapping && in amdgpu_vm_flush()
788 ring->funcs->emit_wreg; in amdgpu_vm_flush()
790 cleaner_shader_needed = adev->gfx.enable_cleaner_shader && in amdgpu_vm_flush()
791 ring->funcs->emit_cleaner_shader && job->base.s_fence && in amdgpu_vm_flush()
792 &job->base.s_fence->scheduled == isolation->spearhead; in amdgpu_vm_flush()
799 if (ring->funcs->init_cond_exec) in amdgpu_vm_flush()
801 ring->cond_exe_gpu_addr); in amdgpu_vm_flush()
807 ring->funcs->emit_cleaner_shader(ring); in amdgpu_vm_flush()
810 trace_amdgpu_vm_flush(ring, job->vmid, job->vm_pd_addr); in amdgpu_vm_flush()
811 amdgpu_ring_emit_vm_flush(ring, job->vmid, job->vm_pd_addr); in amdgpu_vm_flush()
815 amdgpu_gmc_emit_pasid_mapping(ring, job->vmid, job->pasid); in amdgpu_vm_flush()
817 if (spm_update_needed && adev->gfx.rlc.funcs->update_spm_vmid) in amdgpu_vm_flush()
818 adev->gfx.rlc.funcs->update_spm_vmid(adev, ring, job->vmid); in amdgpu_vm_flush()
820 if (!ring->is_mes_queue && ring->funcs->emit_gds_switch && in amdgpu_vm_flush()
822 amdgpu_ring_emit_gds_switch(ring, job->vmid, job->gds_base, in amdgpu_vm_flush()
823 job->gds_size, job->gws_base, in amdgpu_vm_flush()
824 job->gws_size, job->oa_base, in amdgpu_vm_flush()
825 job->oa_size); in amdgpu_vm_flush()
835 mutex_lock(&id_mgr->lock); in amdgpu_vm_flush()
836 dma_fence_put(id->last_flush); in amdgpu_vm_flush()
837 id->last_flush = dma_fence_get(fence); in amdgpu_vm_flush()
838 id->current_gpu_reset_count = in amdgpu_vm_flush()
839 atomic_read(&adev->gpu_reset_counter); in amdgpu_vm_flush()
840 mutex_unlock(&id_mgr->lock); in amdgpu_vm_flush()
844 mutex_lock(&id_mgr->lock); in amdgpu_vm_flush()
845 id->pasid = job->pasid; in amdgpu_vm_flush()
846 dma_fence_put(id->pasid_mapping); in amdgpu_vm_flush()
847 id->pasid_mapping = dma_fence_get(fence); in amdgpu_vm_flush()
848 mutex_unlock(&id_mgr->lock); in amdgpu_vm_flush()
857 mutex_lock(&adev->enforce_isolation_mutex); in amdgpu_vm_flush()
858 dma_fence_put(isolation->spearhead); in amdgpu_vm_flush()
859 isolation->spearhead = dma_fence_get(fence); in amdgpu_vm_flush()
860 mutex_unlock(&adev->enforce_isolation_mutex); in amdgpu_vm_flush()
867 if (ring->funcs->emit_switch_buffer) { in amdgpu_vm_flush()
877 * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
896 for (base = bo->vm_bo; base; base = base->next) { in amdgpu_vm_bo_find()
897 if (base->vm != vm) in amdgpu_vm_bo_find()
906 * amdgpu_vm_map_gart - Resolve gart mapping of addr
933 * amdgpu_vm_update_pdes - make sure that all directories are valid
953 spin_lock(&vm->status_lock); in amdgpu_vm_update_pdes()
954 list_splice_init(&vm->relocated, &relocated); in amdgpu_vm_update_pdes()
955 spin_unlock(&vm->status_lock); in amdgpu_vm_update_pdes()
961 return -ENODEV; in amdgpu_vm_update_pdes()
968 r = vm->update_funcs->prepare(¶ms, NULL); in amdgpu_vm_update_pdes()
974 flush_tlb_needed |= entry->moved; in amdgpu_vm_update_pdes()
981 r = vm->update_funcs->commit(¶ms, &vm->last_update); in amdgpu_vm_update_pdes()
986 atomic64_inc(&vm->tlb_seq); in amdgpu_vm_update_pdes()
1000 * amdgpu_vm_tlb_seq_cb - make sure to increment tlb sequence
1012 atomic64_inc(&tlb_cb->vm->tlb_seq); in amdgpu_vm_tlb_seq_cb()
1017 * amdgpu_vm_tlb_flush - prepare TLB flush
1030 struct amdgpu_vm *vm = params->vm; in amdgpu_vm_tlb_flush()
1032 tlb_cb->vm = vm; in amdgpu_vm_tlb_flush()
1034 amdgpu_vm_tlb_seq_cb(NULL, &tlb_cb->cb); in amdgpu_vm_tlb_flush()
1038 if (!dma_fence_add_callback(*fence, &tlb_cb->cb, in amdgpu_vm_tlb_flush()
1040 dma_fence_put(vm->last_tlb_flush); in amdgpu_vm_tlb_flush()
1041 vm->last_tlb_flush = dma_fence_get(*fence); in amdgpu_vm_tlb_flush()
1043 amdgpu_vm_tlb_seq_cb(NULL, &tlb_cb->cb); in amdgpu_vm_tlb_flush()
1047 if (!params->unlocked && vm->is_compute_context) { in amdgpu_vm_tlb_flush()
1048 amdgpu_vm_tlb_fence_create(params->adev, vm, fence); in amdgpu_vm_tlb_flush()
1051 dma_resv_add_fence(vm->root.bo->tbo.base.resv, *fence, in amdgpu_vm_tlb_flush()
1057 * amdgpu_vm_update_range - update a range in the vm page table
1094 return -ENODEV; in amdgpu_vm_update_range()
1099 return -ENOMEM; in amdgpu_vm_update_range()
1103 * heavy-weight flush TLB unconditionally. in amdgpu_vm_update_range()
1105 flush_tlb |= adev->gmc.xgmi.num_physical_nodes && in amdgpu_vm_update_range()
1124 if (vm->evicting) { in amdgpu_vm_update_range()
1125 r = -EBUSY; in amdgpu_vm_update_range()
1129 if (!unlocked && !dma_fence_is_signaled(vm->last_unlocked)) { in amdgpu_vm_update_range()
1132 amdgpu_bo_fence(vm->root.bo, vm->last_unlocked, true); in amdgpu_vm_update_range()
1133 swap(vm->last_unlocked, tmp); in amdgpu_vm_update_range()
1137 r = vm->update_funcs->prepare(¶ms, sync); in amdgpu_vm_update_range()
1142 (last - start + 1) * AMDGPU_GPU_PAGE_SIZE, &cursor); in amdgpu_vm_update_range()
1163 pages_addr[idx - 1] + PAGE_SIZE)) in amdgpu_vm_update_range()
1167 count--; in amdgpu_vm_update_range()
1195 r = vm->update_funcs->commit(¶ms, fence); in amdgpu_vm_update_range()
1216 spin_lock(&vm->status_lock); in amdgpu_vm_get_memory()
1217 memcpy(stats, vm->stats, sizeof(*stats) * __AMDGPU_PL_NUM); in amdgpu_vm_get_memory()
1218 spin_unlock(&vm->status_lock); in amdgpu_vm_get_memory()
1222 * amdgpu_vm_bo_update - update all BO mappings in the vm page table
1231 * 0 for success, -EINVAL for failure.
1236 struct amdgpu_bo *bo = bo_va->base.bo; in amdgpu_vm_bo_update()
1237 struct amdgpu_vm *vm = bo_va->base.vm; in amdgpu_vm_bo_update()
1241 struct ttm_resource *mem; in amdgpu_vm_bo_update() local
1251 mem = NULL; in amdgpu_vm_bo_update()
1256 r = amdgpu_sync_resv(adev, &sync, vm->root.bo->tbo.base.resv, in amdgpu_vm_bo_update()
1261 r = amdgpu_sync_kfd(&sync, bo->tbo.base.resv); in amdgpu_vm_bo_update()
1266 mem = NULL; in amdgpu_vm_bo_update()
1271 struct drm_gem_object *obj = &bo->tbo.base; in amdgpu_vm_bo_update()
1273 if (obj->import_attach && bo_va->is_xgmi) { in amdgpu_vm_bo_update()
1274 struct dma_buf *dma_buf = obj->import_attach->dmabuf; in amdgpu_vm_bo_update()
1275 struct drm_gem_object *gobj = dma_buf->priv; in amdgpu_vm_bo_update()
1278 if (abo->tbo.resource && in amdgpu_vm_bo_update()
1279 abo->tbo.resource->mem_type == TTM_PL_VRAM) in amdgpu_vm_bo_update()
1282 mem = bo->tbo.resource; in amdgpu_vm_bo_update()
1283 if (mem && (mem->mem_type == TTM_PL_TT || in amdgpu_vm_bo_update()
1284 mem->mem_type == AMDGPU_PL_PREEMPT)) in amdgpu_vm_bo_update()
1285 pages_addr = bo->tbo.ttm->dma_address; in amdgpu_vm_bo_update()
1288 r = amdgpu_sync_resv(adev, &sync, bo->tbo.base.resv, in amdgpu_vm_bo_update()
1297 flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem); in amdgpu_vm_bo_update()
1302 bo_adev = amdgpu_ttm_adev(bo->tbo.bdev); in amdgpu_vm_bo_update()
1303 vram_base = bo_adev->vm_manager.vram_base_offset; in amdgpu_vm_bo_update()
1304 uncached = (bo->flags & AMDGPU_GEM_CREATE_UNCACHED) != 0; in amdgpu_vm_bo_update()
1312 last_update = &vm->last_update; in amdgpu_vm_bo_update()
1314 last_update = &bo_va->last_pt_update; in amdgpu_vm_bo_update()
1316 if (!clear && bo_va->base.moved) { in amdgpu_vm_bo_update()
1318 list_splice_init(&bo_va->valids, &bo_va->invalids); in amdgpu_vm_bo_update()
1320 } else if (bo_va->cleared != clear) { in amdgpu_vm_bo_update()
1321 list_splice_init(&bo_va->valids, &bo_va->invalids); in amdgpu_vm_bo_update()
1324 list_for_each_entry(mapping, &bo_va->invalids, list) { in amdgpu_vm_bo_update()
1327 /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here in amdgpu_vm_bo_update()
1330 if (!(mapping->flags & AMDGPU_PTE_READABLE)) in amdgpu_vm_bo_update()
1332 if (!(mapping->flags & AMDGPU_PTE_WRITEABLE)) in amdgpu_vm_bo_update()
1341 !uncached, &sync, mapping->start, in amdgpu_vm_bo_update()
1342 mapping->last, update_flags, in amdgpu_vm_bo_update()
1343 mapping->offset, vram_base, mem, in amdgpu_vm_bo_update()
1354 if (bo->tbo.resource && in amdgpu_vm_bo_update()
1355 !(bo->preferred_domains & in amdgpu_vm_bo_update()
1356 amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type))) in amdgpu_vm_bo_update()
1357 amdgpu_vm_bo_evicted(&bo_va->base); in amdgpu_vm_bo_update()
1359 amdgpu_vm_bo_idle(&bo_va->base); in amdgpu_vm_bo_update()
1361 amdgpu_vm_bo_done(&bo_va->base); in amdgpu_vm_bo_update()
1364 list_splice_init(&bo_va->invalids, &bo_va->valids); in amdgpu_vm_bo_update()
1365 bo_va->cleared = clear; in amdgpu_vm_bo_update()
1366 bo_va->base.moved = false; in amdgpu_vm_bo_update()
1369 list_for_each_entry(mapping, &bo_va->valids, list) in amdgpu_vm_bo_update()
1379 * amdgpu_vm_update_prt_state - update the global PRT state
1388 spin_lock_irqsave(&adev->vm_manager.prt_lock, flags); in amdgpu_vm_update_prt_state()
1389 enable = !!atomic_read(&adev->vm_manager.num_prt_users); in amdgpu_vm_update_prt_state()
1390 adev->gmc.gmc_funcs->set_prt(adev, enable); in amdgpu_vm_update_prt_state()
1391 spin_unlock_irqrestore(&adev->vm_manager.prt_lock, flags); in amdgpu_vm_update_prt_state()
1395 * amdgpu_vm_prt_get - add a PRT user
1401 if (!adev->gmc.gmc_funcs->set_prt) in amdgpu_vm_prt_get()
1404 if (atomic_inc_return(&adev->vm_manager.num_prt_users) == 1) in amdgpu_vm_prt_get()
1409 * amdgpu_vm_prt_put - drop a PRT user
1415 if (atomic_dec_return(&adev->vm_manager.num_prt_users) == 0) in amdgpu_vm_prt_put()
1420 * amdgpu_vm_prt_cb - callback for updating the PRT status
1429 amdgpu_vm_prt_put(cb->adev); in amdgpu_vm_prt_cb()
1434 * amdgpu_vm_add_prt_cb - add callback for updating the PRT status
1444 if (!adev->gmc.gmc_funcs->set_prt) in amdgpu_vm_add_prt_cb()
1455 cb->adev = adev; in amdgpu_vm_add_prt_cb()
1456 if (!fence || dma_fence_add_callback(fence, &cb->cb, in amdgpu_vm_add_prt_cb()
1458 amdgpu_vm_prt_cb(fence, &cb->cb); in amdgpu_vm_add_prt_cb()
1463 * amdgpu_vm_free_mapping - free a mapping
1477 if (mapping->flags & AMDGPU_PTE_PRT_FLAG(adev)) in amdgpu_vm_free_mapping()
1483 * amdgpu_vm_prt_fini - finish all prt mappings
1492 struct dma_resv *resv = vm->root.bo->tbo.base.resv; in amdgpu_vm_prt_fini()
1504 * amdgpu_vm_clear_freed - clear freed BOs in the PT
1533 r = amdgpu_sync_resv(adev, &sync, vm->root.bo->tbo.base.resv, in amdgpu_vm_clear_freed()
1538 while (!list_empty(&vm->freed)) { in amdgpu_vm_clear_freed()
1539 mapping = list_first_entry(&vm->freed, in amdgpu_vm_clear_freed()
1541 list_del(&mapping->list); in amdgpu_vm_clear_freed()
1544 &sync, mapping->start, mapping->last, in amdgpu_vm_clear_freed()
1567 * amdgpu_vm_handle_moved - handle moved BOs in the PT
1589 spin_lock(&vm->status_lock); in amdgpu_vm_handle_moved()
1590 while (!list_empty(&vm->moved)) { in amdgpu_vm_handle_moved()
1591 bo_va = list_first_entry(&vm->moved, struct amdgpu_bo_va, in amdgpu_vm_handle_moved()
1593 spin_unlock(&vm->status_lock); in amdgpu_vm_handle_moved()
1599 spin_lock(&vm->status_lock); in amdgpu_vm_handle_moved()
1602 while (!list_empty(&vm->invalidated)) { in amdgpu_vm_handle_moved()
1603 bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va, in amdgpu_vm_handle_moved()
1605 resv = bo_va->base.bo->tbo.base.resv; in amdgpu_vm_handle_moved()
1606 spin_unlock(&vm->status_lock); in amdgpu_vm_handle_moved()
1609 if (!adev->debug_vm && dma_resv_trylock(resv)) { in amdgpu_vm_handle_moved()
1632 if (vm->is_compute_context && in amdgpu_vm_handle_moved()
1633 bo_va->base.bo->tbo.base.import_attach && in amdgpu_vm_handle_moved()
1634 (!bo_va->base.bo->tbo.resource || in amdgpu_vm_handle_moved()
1635 bo_va->base.bo->tbo.resource->mem_type == TTM_PL_SYSTEM)) in amdgpu_vm_handle_moved()
1636 amdgpu_vm_bo_evicted_user(&bo_va->base); in amdgpu_vm_handle_moved()
1638 spin_lock(&vm->status_lock); in amdgpu_vm_handle_moved()
1640 spin_unlock(&vm->status_lock); in amdgpu_vm_handle_moved()
1646 * amdgpu_vm_flush_compute_tlb - Flush TLB on compute VM
1667 WARN_ON_ONCE(!vm->is_compute_context); in amdgpu_vm_flush_compute_tlb()
1674 if (atomic64_xchg(&vm->kfd_last_flushed_seq, tlb_seq) == tlb_seq) in amdgpu_vm_flush_compute_tlb()
1677 if (adev->family == AMDGPU_FAMILY_AI || in amdgpu_vm_flush_compute_tlb()
1678 adev->family == AMDGPU_FAMILY_RV) in amdgpu_vm_flush_compute_tlb()
1682 r = amdgpu_gmc_flush_gpu_tlb_pasid(adev, vm->pasid, flush_type, in amdgpu_vm_flush_compute_tlb()
1691 * amdgpu_vm_bo_add - add a bo to a specific vm
1715 amdgpu_vm_bo_base_init(&bo_va->base, vm, bo); in amdgpu_vm_bo_add()
1717 bo_va->ref_count = 1; in amdgpu_vm_bo_add()
1718 bo_va->last_pt_update = dma_fence_get_stub(); in amdgpu_vm_bo_add()
1719 INIT_LIST_HEAD(&bo_va->valids); in amdgpu_vm_bo_add()
1720 INIT_LIST_HEAD(&bo_va->invalids); in amdgpu_vm_bo_add()
1725 dma_resv_assert_held(bo->tbo.base.resv); in amdgpu_vm_bo_add()
1727 bo_va->is_xgmi = true; in amdgpu_vm_bo_add()
1737 * amdgpu_vm_bo_insert_map - insert a new mapping
1749 struct amdgpu_vm *vm = bo_va->base.vm; in amdgpu_vm_bo_insert_map()
1750 struct amdgpu_bo *bo = bo_va->base.bo; in amdgpu_vm_bo_insert_map()
1752 mapping->bo_va = bo_va; in amdgpu_vm_bo_insert_map()
1753 list_add(&mapping->list, &bo_va->invalids); in amdgpu_vm_bo_insert_map()
1754 amdgpu_vm_it_insert(mapping, &vm->va); in amdgpu_vm_bo_insert_map()
1756 if (mapping->flags & AMDGPU_PTE_PRT_FLAG(adev)) in amdgpu_vm_bo_insert_map()
1759 if (amdgpu_vm_is_bo_always_valid(vm, bo) && !bo_va->base.moved) in amdgpu_vm_bo_insert_map()
1760 amdgpu_vm_bo_moved(&bo_va->base); in amdgpu_vm_bo_insert_map()
1777 return -EINVAL; in amdgpu_vm_verify_parameters()
1782 return -EINVAL; in amdgpu_vm_verify_parameters()
1786 return -EINVAL; in amdgpu_vm_verify_parameters()
1789 lpfn = (saddr + size - 1) >> AMDGPU_GPU_PAGE_SHIFT; in amdgpu_vm_verify_parameters()
1790 if (lpfn >= adev->vm_manager.max_pfn) in amdgpu_vm_verify_parameters()
1791 return -EINVAL; in amdgpu_vm_verify_parameters()
1797 * amdgpu_vm_bo_map - map bo inside a vm
1819 struct amdgpu_bo *bo = bo_va->base.bo; in amdgpu_vm_bo_map()
1820 struct amdgpu_vm *vm = bo_va->base.vm; in amdgpu_vm_bo_map()
1829 eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE; in amdgpu_vm_bo_map()
1831 tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr); in amdgpu_vm_bo_map()
1834 dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with " in amdgpu_vm_bo_map()
1835 "0x%010Lx-0x%010Lx\n", bo, saddr, eaddr, in amdgpu_vm_bo_map()
1836 tmp->start, tmp->last + 1); in amdgpu_vm_bo_map()
1837 return -EINVAL; in amdgpu_vm_bo_map()
1842 return -ENOMEM; in amdgpu_vm_bo_map()
1844 mapping->start = saddr; in amdgpu_vm_bo_map()
1845 mapping->last = eaddr; in amdgpu_vm_bo_map()
1846 mapping->offset = offset; in amdgpu_vm_bo_map()
1847 mapping->flags = flags; in amdgpu_vm_bo_map()
1855 * amdgpu_vm_bo_replace_map - map bo inside a vm, replacing existing mappings
1878 struct amdgpu_bo *bo = bo_va->base.bo; in amdgpu_vm_bo_replace_map()
1889 return -ENOMEM; in amdgpu_vm_bo_replace_map()
1891 r = amdgpu_vm_bo_clear_mappings(adev, bo_va->base.vm, saddr, size); in amdgpu_vm_bo_replace_map()
1898 eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE; in amdgpu_vm_bo_replace_map()
1900 mapping->start = saddr; in amdgpu_vm_bo_replace_map()
1901 mapping->last = eaddr; in amdgpu_vm_bo_replace_map()
1902 mapping->offset = offset; in amdgpu_vm_bo_replace_map()
1903 mapping->flags = flags; in amdgpu_vm_bo_replace_map()
1911 * amdgpu_vm_bo_unmap - remove bo mapping from vm
1929 struct amdgpu_vm *vm = bo_va->base.vm; in amdgpu_vm_bo_unmap()
1934 list_for_each_entry(mapping, &bo_va->valids, list) { in amdgpu_vm_bo_unmap()
1935 if (mapping->start == saddr) in amdgpu_vm_bo_unmap()
1939 if (&mapping->list == &bo_va->valids) { in amdgpu_vm_bo_unmap()
1942 list_for_each_entry(mapping, &bo_va->invalids, list) { in amdgpu_vm_bo_unmap()
1943 if (mapping->start == saddr) in amdgpu_vm_bo_unmap()
1947 if (&mapping->list == &bo_va->invalids) in amdgpu_vm_bo_unmap()
1948 return -ENOENT; in amdgpu_vm_bo_unmap()
1951 list_del(&mapping->list); in amdgpu_vm_bo_unmap()
1952 amdgpu_vm_it_remove(mapping, &vm->va); in amdgpu_vm_bo_unmap()
1953 mapping->bo_va = NULL; in amdgpu_vm_bo_unmap()
1957 list_add(&mapping->list, &vm->freed); in amdgpu_vm_bo_unmap()
1960 bo_va->last_pt_update); in amdgpu_vm_bo_unmap()
1966 * amdgpu_vm_bo_clear_mappings - remove all mappings in a specific range
1992 eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE; in amdgpu_vm_bo_clear_mappings()
1997 return -ENOMEM; in amdgpu_vm_bo_clear_mappings()
1998 INIT_LIST_HEAD(&before->list); in amdgpu_vm_bo_clear_mappings()
2003 return -ENOMEM; in amdgpu_vm_bo_clear_mappings()
2005 INIT_LIST_HEAD(&after->list); in amdgpu_vm_bo_clear_mappings()
2008 tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr); in amdgpu_vm_bo_clear_mappings()
2011 if (tmp->start < saddr) { in amdgpu_vm_bo_clear_mappings()
2012 before->start = tmp->start; in amdgpu_vm_bo_clear_mappings()
2013 before->last = saddr - 1; in amdgpu_vm_bo_clear_mappings()
2014 before->offset = tmp->offset; in amdgpu_vm_bo_clear_mappings()
2015 before->flags = tmp->flags; in amdgpu_vm_bo_clear_mappings()
2016 before->bo_va = tmp->bo_va; in amdgpu_vm_bo_clear_mappings()
2017 list_add(&before->list, &tmp->bo_va->invalids); in amdgpu_vm_bo_clear_mappings()
2021 if (tmp->last > eaddr) { in amdgpu_vm_bo_clear_mappings()
2022 after->start = eaddr + 1; in amdgpu_vm_bo_clear_mappings()
2023 after->last = tmp->last; in amdgpu_vm_bo_clear_mappings()
2024 after->offset = tmp->offset; in amdgpu_vm_bo_clear_mappings()
2025 after->offset += (after->start - tmp->start) << PAGE_SHIFT; in amdgpu_vm_bo_clear_mappings()
2026 after->flags = tmp->flags; in amdgpu_vm_bo_clear_mappings()
2027 after->bo_va = tmp->bo_va; in amdgpu_vm_bo_clear_mappings()
2028 list_add(&after->list, &tmp->bo_va->invalids); in amdgpu_vm_bo_clear_mappings()
2031 list_del(&tmp->list); in amdgpu_vm_bo_clear_mappings()
2032 list_add(&tmp->list, &removed); in amdgpu_vm_bo_clear_mappings()
2039 amdgpu_vm_it_remove(tmp, &vm->va); in amdgpu_vm_bo_clear_mappings()
2040 list_del(&tmp->list); in amdgpu_vm_bo_clear_mappings()
2042 if (tmp->start < saddr) in amdgpu_vm_bo_clear_mappings()
2043 tmp->start = saddr; in amdgpu_vm_bo_clear_mappings()
2044 if (tmp->last > eaddr) in amdgpu_vm_bo_clear_mappings()
2045 tmp->last = eaddr; in amdgpu_vm_bo_clear_mappings()
2047 tmp->bo_va = NULL; in amdgpu_vm_bo_clear_mappings()
2048 list_add(&tmp->list, &vm->freed); in amdgpu_vm_bo_clear_mappings()
2053 if (!list_empty(&before->list)) { in amdgpu_vm_bo_clear_mappings()
2054 struct amdgpu_bo *bo = before->bo_va->base.bo; in amdgpu_vm_bo_clear_mappings()
2056 amdgpu_vm_it_insert(before, &vm->va); in amdgpu_vm_bo_clear_mappings()
2057 if (before->flags & AMDGPU_PTE_PRT_FLAG(adev)) in amdgpu_vm_bo_clear_mappings()
2061 !before->bo_va->base.moved) in amdgpu_vm_bo_clear_mappings()
2062 amdgpu_vm_bo_moved(&before->bo_va->base); in amdgpu_vm_bo_clear_mappings()
2068 if (!list_empty(&after->list)) { in amdgpu_vm_bo_clear_mappings()
2069 struct amdgpu_bo *bo = after->bo_va->base.bo; in amdgpu_vm_bo_clear_mappings()
2071 amdgpu_vm_it_insert(after, &vm->va); in amdgpu_vm_bo_clear_mappings()
2072 if (after->flags & AMDGPU_PTE_PRT_FLAG(adev)) in amdgpu_vm_bo_clear_mappings()
2076 !after->bo_va->base.moved) in amdgpu_vm_bo_clear_mappings()
2077 amdgpu_vm_bo_moved(&after->bo_va->base); in amdgpu_vm_bo_clear_mappings()
2086 * amdgpu_vm_bo_lookup_mapping - find mapping by address
2100 return amdgpu_vm_it_iter_first(&vm->va, addr, addr); in amdgpu_vm_bo_lookup_mapping()
2104 * amdgpu_vm_bo_trace_cs - trace all reserved mappings
2118 for (mapping = amdgpu_vm_it_iter_first(&vm->va, 0, U64_MAX); mapping; in amdgpu_vm_bo_trace_cs()
2120 if (mapping->bo_va && mapping->bo_va->base.bo) { in amdgpu_vm_bo_trace_cs()
2123 bo = mapping->bo_va->base.bo; in amdgpu_vm_bo_trace_cs()
2124 if (dma_resv_locking_ctx(bo->tbo.base.resv) != in amdgpu_vm_bo_trace_cs()
2134 * amdgpu_vm_bo_del - remove a bo from a specific vm
2139 * Remove @bo_va->bo from the requested vm.
2147 struct amdgpu_bo *bo = bo_va->base.bo; in amdgpu_vm_bo_del()
2148 struct amdgpu_vm *vm = bo_va->base.vm; in amdgpu_vm_bo_del()
2151 dma_resv_assert_held(vm->root.bo->tbo.base.resv); in amdgpu_vm_bo_del()
2154 dma_resv_assert_held(bo->tbo.base.resv); in amdgpu_vm_bo_del()
2156 ttm_bo_set_bulk_move(&bo->tbo, NULL); in amdgpu_vm_bo_del()
2158 for (base = &bo_va->base.bo->vm_bo; *base; in amdgpu_vm_bo_del()
2159 base = &(*base)->next) { in amdgpu_vm_bo_del()
2160 if (*base != &bo_va->base) in amdgpu_vm_bo_del()
2163 amdgpu_vm_update_stats(*base, bo->tbo.resource, -1); in amdgpu_vm_bo_del()
2164 *base = bo_va->base.next; in amdgpu_vm_bo_del()
2169 spin_lock(&vm->status_lock); in amdgpu_vm_bo_del()
2170 list_del(&bo_va->base.vm_status); in amdgpu_vm_bo_del()
2171 spin_unlock(&vm->status_lock); in amdgpu_vm_bo_del()
2173 list_for_each_entry_safe(mapping, next, &bo_va->valids, list) { in amdgpu_vm_bo_del()
2174 list_del(&mapping->list); in amdgpu_vm_bo_del()
2175 amdgpu_vm_it_remove(mapping, &vm->va); in amdgpu_vm_bo_del()
2176 mapping->bo_va = NULL; in amdgpu_vm_bo_del()
2178 list_add(&mapping->list, &vm->freed); in amdgpu_vm_bo_del()
2180 list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) { in amdgpu_vm_bo_del()
2181 list_del(&mapping->list); in amdgpu_vm_bo_del()
2182 amdgpu_vm_it_remove(mapping, &vm->va); in amdgpu_vm_bo_del()
2184 bo_va->last_pt_update); in amdgpu_vm_bo_del()
2187 dma_fence_put(bo_va->last_pt_update); in amdgpu_vm_bo_del()
2189 if (bo && bo_va->is_xgmi) in amdgpu_vm_bo_del()
2196 * amdgpu_vm_evictable - check if we can evict a VM
2204 struct amdgpu_vm_bo_base *bo_base = bo->vm_bo; in amdgpu_vm_evictable()
2207 if (!bo_base || !bo_base->vm) in amdgpu_vm_evictable()
2211 if (!dma_resv_test_signaled(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP)) in amdgpu_vm_evictable()
2215 if (!amdgpu_vm_eviction_trylock(bo_base->vm)) in amdgpu_vm_evictable()
2219 if (!dma_fence_is_signaled(bo_base->vm->last_unlocked)) { in amdgpu_vm_evictable()
2220 amdgpu_vm_eviction_unlock(bo_base->vm); in amdgpu_vm_evictable()
2224 bo_base->vm->evicting = true; in amdgpu_vm_evictable()
2225 amdgpu_vm_eviction_unlock(bo_base->vm); in amdgpu_vm_evictable()
2230 * amdgpu_vm_bo_invalidate - mark the bo as invalid
2241 for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) { in amdgpu_vm_bo_invalidate()
2242 struct amdgpu_vm *vm = bo_base->vm; in amdgpu_vm_bo_invalidate()
2249 if (bo_base->moved) in amdgpu_vm_bo_invalidate()
2251 bo_base->moved = true; in amdgpu_vm_bo_invalidate()
2253 if (bo->tbo.type == ttm_bo_type_kernel) in amdgpu_vm_bo_invalidate()
2263 * amdgpu_vm_bo_move - handle BO move
2276 for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) { in amdgpu_vm_bo_move()
2277 struct amdgpu_vm *vm = bo_base->vm; in amdgpu_vm_bo_move()
2279 spin_lock(&vm->status_lock); in amdgpu_vm_bo_move()
2280 amdgpu_vm_update_stats_locked(bo_base, bo->tbo.resource, -1); in amdgpu_vm_bo_move()
2282 spin_unlock(&vm->status_lock); in amdgpu_vm_bo_move()
2289 * amdgpu_vm_get_block_size - calculate VM page table size as power of two
2304 return (bits - 9); in amdgpu_vm_get_block_size()
2310 * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size
2323 unsigned int max_size = 1 << (max_bits - 30); in amdgpu_vm_adjust_size()
2328 if (amdgpu_vm_size != -1) { in amdgpu_vm_adjust_size()
2331 dev_warn(adev->dev, "VM size (%d) too large, max is %u GB\n", in amdgpu_vm_adjust_size()
2343 * - Need to map system memory and VRAM from all GPUs in amdgpu_vm_adjust_size()
2344 * - VRAM from other GPUs not known here in amdgpu_vm_adjust_size()
2345 * - Assume VRAM <= system memory in amdgpu_vm_adjust_size()
2346 * - On GFX8 and older, VM space can be segmented for in amdgpu_vm_adjust_size()
2348 * - Need to allow room for fragmentation, guard pages etc. in amdgpu_vm_adjust_size()
2356 (1 << 30) - 1) >> 30; in amdgpu_vm_adjust_size()
2361 adev->vm_manager.max_pfn = (uint64_t)vm_size << 18; in amdgpu_vm_adjust_size()
2363 tmp = roundup_pow_of_two(adev->vm_manager.max_pfn); in amdgpu_vm_adjust_size()
2364 if (amdgpu_vm_block_size != -1) in amdgpu_vm_adjust_size()
2365 tmp >>= amdgpu_vm_block_size - 9; in amdgpu_vm_adjust_size()
2366 tmp = DIV_ROUND_UP(fls64(tmp) - 1, 9) - 1; in amdgpu_vm_adjust_size()
2367 adev->vm_manager.num_level = min_t(unsigned int, max_level, tmp); in amdgpu_vm_adjust_size()
2368 switch (adev->vm_manager.num_level) { in amdgpu_vm_adjust_size()
2370 adev->vm_manager.root_level = AMDGPU_VM_PDB2; in amdgpu_vm_adjust_size()
2373 adev->vm_manager.root_level = AMDGPU_VM_PDB1; in amdgpu_vm_adjust_size()
2376 adev->vm_manager.root_level = AMDGPU_VM_PDB0; in amdgpu_vm_adjust_size()
2379 dev_err(adev->dev, "VMPT only supports 2~4+1 levels\n"); in amdgpu_vm_adjust_size()
2382 if (amdgpu_vm_block_size != -1) in amdgpu_vm_adjust_size()
2383 adev->vm_manager.block_size = in amdgpu_vm_adjust_size()
2385 - AMDGPU_GPU_PAGE_SHIFT in amdgpu_vm_adjust_size()
2386 - 9 * adev->vm_manager.num_level); in amdgpu_vm_adjust_size()
2387 else if (adev->vm_manager.num_level > 1) in amdgpu_vm_adjust_size()
2388 adev->vm_manager.block_size = 9; in amdgpu_vm_adjust_size()
2390 adev->vm_manager.block_size = amdgpu_vm_get_block_size(tmp); in amdgpu_vm_adjust_size()
2392 if (amdgpu_vm_fragment_size == -1) in amdgpu_vm_adjust_size()
2393 adev->vm_manager.fragment_size = fragment_size_default; in amdgpu_vm_adjust_size()
2395 adev->vm_manager.fragment_size = amdgpu_vm_fragment_size; in amdgpu_vm_adjust_size()
2397 DRM_INFO("vm size is %u GB, %u levels, block size is %u-bit, fragment size is %u-bit\n", in amdgpu_vm_adjust_size()
2398 vm_size, adev->vm_manager.num_level + 1, in amdgpu_vm_adjust_size()
2399 adev->vm_manager.block_size, in amdgpu_vm_adjust_size()
2400 adev->vm_manager.fragment_size); in amdgpu_vm_adjust_size()
2404 * amdgpu_vm_wait_idle - wait for the VM to become idle
2411 timeout = dma_resv_wait_timeout(vm->root.bo->tbo.base.resv, in amdgpu_vm_wait_idle()
2417 return dma_fence_wait_timeout(vm->last_unlocked, true, timeout); in amdgpu_vm_wait_idle()
2433 xa_lock_irqsave(&adev->vm_manager.pasids, flags); in amdgpu_vm_get_vm_from_pasid()
2434 vm = xa_load(&adev->vm_manager.pasids, pasid); in amdgpu_vm_get_vm_from_pasid()
2435 xa_unlock_irqrestore(&adev->vm_manager.pasids, flags); in amdgpu_vm_get_vm_from_pasid()
2441 * amdgpu_vm_put_task_info - reference down the vm task_info ptr
2449 kref_put(&task_info->refcount, amdgpu_vm_destroy_task_info); in amdgpu_vm_put_task_info()
2453 * amdgpu_vm_get_task_info_vm - Extracts task info for a vm.
2466 ti = vm->task_info; in amdgpu_vm_get_task_info_vm()
2467 kref_get(&vm->task_info->refcount); in amdgpu_vm_get_task_info_vm()
2474 * amdgpu_vm_get_task_info_pasid - Extracts task info for a PASID.
2491 vm->task_info = kzalloc(sizeof(struct amdgpu_task_info), GFP_KERNEL); in amdgpu_vm_create_task_info()
2492 if (!vm->task_info) in amdgpu_vm_create_task_info()
2493 return -ENOMEM; in amdgpu_vm_create_task_info()
2495 kref_init(&vm->task_info->refcount); in amdgpu_vm_create_task_info()
2500 * amdgpu_vm_set_task_info - Sets VMs task info.
2506 if (!vm->task_info) in amdgpu_vm_set_task_info()
2509 if (vm->task_info->pid == current->pid) in amdgpu_vm_set_task_info()
2512 vm->task_info->pid = current->pid; in amdgpu_vm_set_task_info()
2513 get_task_comm(vm->task_info->task_name, current); in amdgpu_vm_set_task_info()
2515 if (current->group_leader->mm != current->mm) in amdgpu_vm_set_task_info()
2518 vm->task_info->tgid = current->group_leader->pid; in amdgpu_vm_set_task_info()
2519 get_task_comm(vm->task_info->process_name, current->group_leader); in amdgpu_vm_set_task_info()
2523 * amdgpu_vm_init - initialize a vm instance
2541 vm->va = RB_ROOT_CACHED; in amdgpu_vm_init()
2543 vm->reserved_vmid[i] = NULL; in amdgpu_vm_init()
2544 INIT_LIST_HEAD(&vm->evicted); in amdgpu_vm_init()
2545 INIT_LIST_HEAD(&vm->evicted_user); in amdgpu_vm_init()
2546 INIT_LIST_HEAD(&vm->relocated); in amdgpu_vm_init()
2547 INIT_LIST_HEAD(&vm->moved); in amdgpu_vm_init()
2548 INIT_LIST_HEAD(&vm->idle); in amdgpu_vm_init()
2549 INIT_LIST_HEAD(&vm->invalidated); in amdgpu_vm_init()
2550 spin_lock_init(&vm->status_lock); in amdgpu_vm_init()
2551 INIT_LIST_HEAD(&vm->freed); in amdgpu_vm_init()
2552 INIT_LIST_HEAD(&vm->done); in amdgpu_vm_init()
2553 INIT_KFIFO(vm->faults); in amdgpu_vm_init()
2559 ttm_lru_bulk_move_init(&vm->lru_bulk_move); in amdgpu_vm_init()
2561 vm->is_compute_context = false; in amdgpu_vm_init()
2563 vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode & in amdgpu_vm_init()
2567 vm->use_cpu_for_update ? "CPU" : "SDMA"); in amdgpu_vm_init()
2568 WARN_ONCE((vm->use_cpu_for_update && in amdgpu_vm_init()
2569 !amdgpu_gmc_vram_full_visible(&adev->gmc)), in amdgpu_vm_init()
2572 if (vm->use_cpu_for_update) in amdgpu_vm_init()
2573 vm->update_funcs = &amdgpu_vm_cpu_funcs; in amdgpu_vm_init()
2575 vm->update_funcs = &amdgpu_vm_sdma_funcs; in amdgpu_vm_init()
2577 vm->last_update = dma_fence_get_stub(); in amdgpu_vm_init()
2578 vm->last_unlocked = dma_fence_get_stub(); in amdgpu_vm_init()
2579 vm->last_tlb_flush = dma_fence_get_stub(); in amdgpu_vm_init()
2580 vm->generation = amdgpu_vm_generation(adev, NULL); in amdgpu_vm_init()
2582 mutex_init(&vm->eviction_lock); in amdgpu_vm_init()
2583 vm->evicting = false; in amdgpu_vm_init()
2584 vm->tlb_fence_context = dma_fence_context_alloc(1); in amdgpu_vm_init()
2586 r = amdgpu_vm_pt_create(adev, vm, adev->vm_manager.root_level, in amdgpu_vm_init()
2591 root_bo = amdgpu_bo_ref(&root->bo); in amdgpu_vm_init()
2598 amdgpu_vm_bo_base_init(&vm->root, vm, root_bo); in amdgpu_vm_init()
2599 r = dma_resv_reserve_fences(root_bo->tbo.base.resv, 1); in amdgpu_vm_init()
2611 amdgpu_bo_unreserve(vm->root.bo); in amdgpu_vm_init()
2618 amdgpu_bo_unreserve(vm->root.bo); in amdgpu_vm_init()
2622 dma_fence_put(vm->last_tlb_flush); in amdgpu_vm_init()
2623 dma_fence_put(vm->last_unlocked); in amdgpu_vm_init()
2624 ttm_lru_bulk_move_fini(&adev->mman.bdev, &vm->lru_bulk_move); in amdgpu_vm_init()
2631 * amdgpu_vm_make_compute - Turn a GFX VM into a compute VM
2636 * This only works on GFX VMs that don't have any BOs added and no
2640 * - use_cpu_for_update
2641 * - pte_supports_ats
2647 * 0 for success, -errno for errors.
2653 r = amdgpu_bo_reserve(vm->root.bo, true); in amdgpu_vm_make_compute()
2658 vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode & in amdgpu_vm_make_compute()
2661 vm->use_cpu_for_update ? "CPU" : "SDMA"); in amdgpu_vm_make_compute()
2662 WARN_ONCE((vm->use_cpu_for_update && in amdgpu_vm_make_compute()
2663 !amdgpu_gmc_vram_full_visible(&adev->gmc)), in amdgpu_vm_make_compute()
2666 if (vm->use_cpu_for_update) { in amdgpu_vm_make_compute()
2668 r = amdgpu_bo_sync_wait(vm->root.bo, in amdgpu_vm_make_compute()
2673 vm->update_funcs = &amdgpu_vm_cpu_funcs; in amdgpu_vm_make_compute()
2679 vm->update_funcs = &amdgpu_vm_sdma_funcs; in amdgpu_vm_make_compute()
2682 dma_fence_put(vm->last_update); in amdgpu_vm_make_compute()
2683 vm->last_update = dma_fence_get_stub(); in amdgpu_vm_make_compute()
2684 vm->is_compute_context = true; in amdgpu_vm_make_compute()
2687 amdgpu_bo_unreserve(vm->root.bo); in amdgpu_vm_make_compute()
2694 if (!(drm_memory_stats_is_zero(&vm->stats[i].drm) && in amdgpu_vm_stats_is_zero()
2695 vm->stats[i].evicted == 0)) in amdgpu_vm_stats_is_zero()
2702 * amdgpu_vm_fini - tear down a vm instance
2713 bool prt_fini_needed = !!adev->gmc.gmc_funcs->set_prt; in amdgpu_vm_fini()
2720 root = amdgpu_bo_ref(vm->root.bo); in amdgpu_vm_fini()
2723 dma_fence_wait(vm->last_unlocked, false); in amdgpu_vm_fini()
2724 dma_fence_put(vm->last_unlocked); in amdgpu_vm_fini()
2725 dma_fence_wait(vm->last_tlb_flush, false); in amdgpu_vm_fini()
2727 spin_lock_irqsave(vm->last_tlb_flush->lock, flags); in amdgpu_vm_fini()
2728 spin_unlock_irqrestore(vm->last_tlb_flush->lock, flags); in amdgpu_vm_fini()
2729 dma_fence_put(vm->last_tlb_flush); in amdgpu_vm_fini()
2731 list_for_each_entry_safe(mapping, tmp, &vm->freed, list) { in amdgpu_vm_fini()
2732 if (mapping->flags & AMDGPU_PTE_PRT_FLAG(adev) && prt_fini_needed) { in amdgpu_vm_fini()
2737 list_del(&mapping->list); in amdgpu_vm_fini()
2744 WARN_ON(vm->root.bo); in amdgpu_vm_fini()
2748 if (!RB_EMPTY_ROOT(&vm->va.rb_root)) { in amdgpu_vm_fini()
2749 dev_err(adev->dev, "still active bo inside vm\n"); in amdgpu_vm_fini()
2752 &vm->va.rb_root, rb) { in amdgpu_vm_fini()
2756 list_del(&mapping->list); in amdgpu_vm_fini()
2760 dma_fence_put(vm->last_update); in amdgpu_vm_fini()
2763 if (vm->reserved_vmid[i]) { in amdgpu_vm_fini()
2765 vm->reserved_vmid[i] = false; in amdgpu_vm_fini()
2769 ttm_lru_bulk_move_fini(&adev->mman.bdev, &vm->lru_bulk_move); in amdgpu_vm_fini()
2772 struct amdgpu_task_info *ti = vm->task_info; in amdgpu_vm_fini()
2774 dev_warn(adev->dev, in amdgpu_vm_fini()
2775 "VM memory stats for proc %s(%d) task %s(%d) is non-zero when fini\n", in amdgpu_vm_fini()
2776 ti->process_name, ti->pid, ti->task_name, ti->tgid); in amdgpu_vm_fini()
2779 amdgpu_vm_put_task_info(vm->task_info); in amdgpu_vm_fini()
2783 * amdgpu_vm_manager_init - init the VM manager
2796 adev->vm_manager.concurrent_flush = !(adev->asic_type < CHIP_VEGA10 || in amdgpu_vm_manager_init()
2797 adev->asic_type == CHIP_NAVI10 || in amdgpu_vm_manager_init()
2798 adev->asic_type == CHIP_NAVI14); in amdgpu_vm_manager_init()
2801 adev->vm_manager.fence_context = in amdgpu_vm_manager_init()
2804 adev->vm_manager.seqno[i] = 0; in amdgpu_vm_manager_init()
2806 spin_lock_init(&adev->vm_manager.prt_lock); in amdgpu_vm_manager_init()
2807 atomic_set(&adev->vm_manager.num_prt_users, 0); in amdgpu_vm_manager_init()
2813 if (amdgpu_vm_update_mode == -1) { in amdgpu_vm_manager_init()
2817 if (amdgpu_gmc_vram_full_visible(&adev->gmc) && in amdgpu_vm_manager_init()
2819 adev->vm_manager.vm_update_mode = in amdgpu_vm_manager_init()
2822 adev->vm_manager.vm_update_mode = 0; in amdgpu_vm_manager_init()
2824 adev->vm_manager.vm_update_mode = amdgpu_vm_update_mode; in amdgpu_vm_manager_init()
2826 adev->vm_manager.vm_update_mode = 0; in amdgpu_vm_manager_init()
2829 xa_init_flags(&adev->vm_manager.pasids, XA_FLAGS_LOCK_IRQ); in amdgpu_vm_manager_init()
2833 * amdgpu_vm_manager_fini - cleanup VM manager
2841 WARN_ON(!xa_empty(&adev->vm_manager.pasids)); in amdgpu_vm_manager_fini()
2842 xa_destroy(&adev->vm_manager.pasids); in amdgpu_vm_manager_fini()
2848 * amdgpu_vm_ioctl - Manages VMID reservation for vm hubs.
2855 * 0 for success, -errno for errors.
2861 struct amdgpu_fpriv *fpriv = filp->driver_priv; in amdgpu_vm_ioctl()
2864 if (args->in.flags) in amdgpu_vm_ioctl()
2865 return -EINVAL; in amdgpu_vm_ioctl()
2867 switch (args->in.op) { in amdgpu_vm_ioctl()
2870 if (!fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)]) { in amdgpu_vm_ioctl()
2872 fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)] = true; in amdgpu_vm_ioctl()
2877 if (fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)]) { in amdgpu_vm_ioctl()
2879 fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)] = false; in amdgpu_vm_ioctl()
2883 return -EINVAL; in amdgpu_vm_ioctl()
2890 * amdgpu_vm_handle_fault - graceful handling of VM faults.
2894 * @vmid: VMID, only used for GFX 9.4.3.
2896 * GFX 9.4.3.
2914 xa_lock_irqsave(&adev->vm_manager.pasids, irqflags); in amdgpu_vm_handle_fault()
2915 vm = xa_load(&adev->vm_manager.pasids, pasid); in amdgpu_vm_handle_fault()
2917 root = amdgpu_bo_ref(vm->root.bo); in amdgpu_vm_handle_fault()
2918 is_compute_context = vm->is_compute_context; in amdgpu_vm_handle_fault()
2922 xa_unlock_irqrestore(&adev->vm_manager.pasids, irqflags); in amdgpu_vm_handle_fault()
2940 xa_lock_irqsave(&adev->vm_manager.pasids, irqflags); in amdgpu_vm_handle_fault()
2941 vm = xa_load(&adev->vm_manager.pasids, pasid); in amdgpu_vm_handle_fault()
2942 if (vm && vm->root.bo != root) in amdgpu_vm_handle_fault()
2944 xa_unlock_irqrestore(&adev->vm_manager.pasids, irqflags); in amdgpu_vm_handle_fault()
2953 * combination to force a no-retry-fault in amdgpu_vm_handle_fault()
2959 value = adev->dummy_page_addr; in amdgpu_vm_handle_fault()
2968 r = dma_resv_reserve_fences(root->tbo.base.resv, 1); in amdgpu_vm_handle_fault()
2994 * amdgpu_debugfs_vm_bo_info - print BO info for the VM
3018 spin_lock(&vm->status_lock); in amdgpu_debugfs_vm_bo_info()
3020 list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status) { in amdgpu_debugfs_vm_bo_info()
3021 if (!bo_va->base.bo) in amdgpu_debugfs_vm_bo_info()
3023 total_idle += amdgpu_bo_print_info(id++, bo_va->base.bo, m); in amdgpu_debugfs_vm_bo_info()
3029 list_for_each_entry_safe(bo_va, tmp, &vm->evicted, base.vm_status) { in amdgpu_debugfs_vm_bo_info()
3030 if (!bo_va->base.bo) in amdgpu_debugfs_vm_bo_info()
3032 total_evicted += amdgpu_bo_print_info(id++, bo_va->base.bo, m); in amdgpu_debugfs_vm_bo_info()
3038 list_for_each_entry_safe(bo_va, tmp, &vm->relocated, base.vm_status) { in amdgpu_debugfs_vm_bo_info()
3039 if (!bo_va->base.bo) in amdgpu_debugfs_vm_bo_info()
3041 total_relocated += amdgpu_bo_print_info(id++, bo_va->base.bo, m); in amdgpu_debugfs_vm_bo_info()
3047 list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) { in amdgpu_debugfs_vm_bo_info()
3048 if (!bo_va->base.bo) in amdgpu_debugfs_vm_bo_info()
3050 total_moved += amdgpu_bo_print_info(id++, bo_va->base.bo, m); in amdgpu_debugfs_vm_bo_info()
3056 list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status) { in amdgpu_debugfs_vm_bo_info()
3057 if (!bo_va->base.bo) in amdgpu_debugfs_vm_bo_info()
3059 total_invalidated += amdgpu_bo_print_info(id++, bo_va->base.bo, m); in amdgpu_debugfs_vm_bo_info()
3065 list_for_each_entry_safe(bo_va, tmp, &vm->done, base.vm_status) { in amdgpu_debugfs_vm_bo_info()
3066 if (!bo_va->base.bo) in amdgpu_debugfs_vm_bo_info()
3068 total_done += amdgpu_bo_print_info(id++, bo_va->base.bo, m); in amdgpu_debugfs_vm_bo_info()
3070 spin_unlock(&vm->status_lock); in amdgpu_debugfs_vm_bo_info()
3089 * amdgpu_vm_update_fault_cache - update cached fault into.
3107 xa_lock_irqsave(&adev->vm_manager.pasids, flags); in amdgpu_vm_update_fault_cache()
3109 vm = xa_load(&adev->vm_manager.pasids, pasid); in amdgpu_vm_update_fault_cache()
3113 * only update if status is non-0. in amdgpu_vm_update_fault_cache()
3116 vm->fault_info.addr = addr; in amdgpu_vm_update_fault_cache()
3117 vm->fault_info.status = status; in amdgpu_vm_update_fault_cache()
3122 adev->vm_manager.fault_info.addr = addr; in amdgpu_vm_update_fault_cache()
3123 adev->vm_manager.fault_info.vmhub = vmhub; in amdgpu_vm_update_fault_cache()
3124 adev->vm_manager.fault_info.status = status; in amdgpu_vm_update_fault_cache()
3127 vm->fault_info.vmhub = AMDGPU_VMHUB_TYPE_GFX; in amdgpu_vm_update_fault_cache()
3128 vm->fault_info.vmhub |= in amdgpu_vm_update_fault_cache()
3129 (vmhub - AMDGPU_GFXHUB_START) << AMDGPU_VMHUB_IDX_SHIFT; in amdgpu_vm_update_fault_cache()
3131 vm->fault_info.vmhub = AMDGPU_VMHUB_TYPE_MM0; in amdgpu_vm_update_fault_cache()
3132 vm->fault_info.vmhub |= in amdgpu_vm_update_fault_cache()
3133 (vmhub - AMDGPU_MMHUB0_START) << AMDGPU_VMHUB_IDX_SHIFT; in amdgpu_vm_update_fault_cache()
3135 vm->fault_info.vmhub = AMDGPU_VMHUB_TYPE_MM1; in amdgpu_vm_update_fault_cache()
3136 vm->fault_info.vmhub |= in amdgpu_vm_update_fault_cache()
3137 (vmhub - AMDGPU_MMHUB1_START) << AMDGPU_VMHUB_IDX_SHIFT; in amdgpu_vm_update_fault_cache()
3142 xa_unlock_irqrestore(&adev->vm_manager.pasids, flags); in amdgpu_vm_update_fault_cache()
3146 * amdgpu_vm_is_bo_always_valid - check if the BO is VM always valid
3156 return bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv; in amdgpu_vm_is_bo_always_valid()