Lines Matching full:vm

119 	 * @vm: pointer to the amdgpu_vm structure to set the fence sequence on
121 struct amdgpu_vm *vm; member
130 * amdgpu_vm_set_pasid - manage pasid and vm ptr mapping
133 * @vm: amdgpu_vm pointer
134 * @pasid: the pasid the VM is using on this GPU
136 * Set the pasid this VM is using on this GPU, can also be used to remove the
140 int amdgpu_vm_set_pasid(struct amdgpu_device *adev, struct amdgpu_vm *vm, in amdgpu_vm_set_pasid() argument
145 if (vm->pasid == pasid) in amdgpu_vm_set_pasid()
148 if (vm->pasid) { in amdgpu_vm_set_pasid()
149 r = xa_err(xa_erase_irq(&adev->vm_manager.pasids, vm->pasid)); in amdgpu_vm_set_pasid()
153 vm->pasid = 0; in amdgpu_vm_set_pasid()
157 r = xa_err(xa_store_irq(&adev->vm_manager.pasids, pasid, vm, in amdgpu_vm_set_pasid()
162 vm->pasid = pasid; in amdgpu_vm_set_pasid()
174 * State for PDs/PTs and per VM BOs which are not at the location they should
179 struct amdgpu_vm *vm = vm_bo->vm; in amdgpu_vm_bo_evicted() local
183 spin_lock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_evicted()
185 list_move(&vm_bo->vm_status, &vm->evicted); in amdgpu_vm_bo_evicted()
187 list_move_tail(&vm_bo->vm_status, &vm->evicted); in amdgpu_vm_bo_evicted()
188 spin_unlock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_evicted()
195 * State for per VM BOs which are moved, but that change is not yet reflected
200 spin_lock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_moved()
201 list_move(&vm_bo->vm_status, &vm_bo->vm->moved); in amdgpu_vm_bo_moved()
202 spin_unlock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_moved()
210 * State for PDs/PTs and per VM BOs which have gone through the state machine
215 spin_lock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_idle()
216 list_move(&vm_bo->vm_status, &vm_bo->vm->idle); in amdgpu_vm_bo_idle()
217 spin_unlock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_idle()
231 spin_lock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_invalidated()
232 list_move(&vm_bo->vm_status, &vm_bo->vm->invalidated); in amdgpu_vm_bo_invalidated()
233 spin_unlock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_invalidated()
247 spin_lock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_relocated()
248 list_move(&vm_bo->vm_status, &vm_bo->vm->relocated); in amdgpu_vm_bo_relocated()
249 spin_unlock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_relocated()
265 spin_lock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_done()
266 list_move(&vm_bo->vm_status, &vm_bo->vm->done); in amdgpu_vm_bo_done()
267 spin_unlock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_done()
272 * @vm: the VM which state machine to reset
274 * Move all vm_bo object in the VM into a state where they will be updated
277 static void amdgpu_vm_bo_reset_state_machine(struct amdgpu_vm *vm) in amdgpu_vm_bo_reset_state_machine() argument
281 spin_lock(&vm->status_lock); in amdgpu_vm_bo_reset_state_machine()
282 list_splice_init(&vm->done, &vm->invalidated); in amdgpu_vm_bo_reset_state_machine()
283 list_for_each_entry(vm_bo, &vm->invalidated, vm_status) in amdgpu_vm_bo_reset_state_machine()
285 list_for_each_entry_safe(vm_bo, tmp, &vm->idle, vm_status) { in amdgpu_vm_bo_reset_state_machine()
290 list_move(&vm_bo->vm_status, &vm_bo->vm->moved); in amdgpu_vm_bo_reset_state_machine()
292 list_move(&vm_bo->vm_status, &vm_bo->vm->relocated); in amdgpu_vm_bo_reset_state_machine()
294 spin_unlock(&vm->status_lock); in amdgpu_vm_bo_reset_state_machine()
298 * amdgpu_vm_bo_base_init - Adds bo to the list of bos associated with the vm
300 * @base: base structure for tracking BO usage in a VM
301 * @vm: vm to which bo is to be added
308 struct amdgpu_vm *vm, struct amdgpu_bo *bo) in amdgpu_vm_bo_base_init() argument
310 base->vm = vm; in amdgpu_vm_bo_base_init()
320 if (bo->tbo.base.resv != vm->root.bo->tbo.base.resv) in amdgpu_vm_bo_base_init()
323 dma_resv_assert_held(vm->root.bo->tbo.base.resv); in amdgpu_vm_bo_base_init()
325 ttm_bo_set_bulk_move(&bo->tbo, &vm->lru_bulk_move); in amdgpu_vm_bo_base_init()
336 * we checked all the prerequisites, but it looks like this per vm bo in amdgpu_vm_bo_base_init()
338 * is validated on next vm use to avoid fault. in amdgpu_vm_bo_base_init()
346 * @vm: vm providing the BOs
350 * Lock the VM root PD in the DRM execution context.
352 int amdgpu_vm_lock_pd(struct amdgpu_vm *vm, struct drm_exec *exec, in amdgpu_vm_lock_pd() argument
355 /* We need at least two fences for the VM PD/PT updates */ in amdgpu_vm_lock_pd()
356 return drm_exec_prepare_obj(exec, &vm->root.bo->tbo.base, in amdgpu_vm_lock_pd()
364 * @vm: vm providing the BOs
370 struct amdgpu_vm *vm) in amdgpu_vm_move_to_lru_tail() argument
373 ttm_lru_bulk_move_tail(&vm->lru_bulk_move); in amdgpu_vm_move_to_lru_tail()
379 struct amdgpu_vm *vm) in amdgpu_vm_init_entities() argument
383 r = drm_sched_entity_init(&vm->immediate, DRM_SCHED_PRIORITY_NORMAL, in amdgpu_vm_init_entities()
389 return drm_sched_entity_init(&vm->delayed, DRM_SCHED_PRIORITY_NORMAL, in amdgpu_vm_init_entities()
394 drm_sched_entity_destroy(&vm->immediate); in amdgpu_vm_init_entities()
399 static void amdgpu_vm_fini_entities(struct amdgpu_vm *vm) in amdgpu_vm_fini_entities() argument
401 drm_sched_entity_destroy(&vm->immediate); in amdgpu_vm_fini_entities()
402 drm_sched_entity_destroy(&vm->delayed); in amdgpu_vm_fini_entities()
408 * @vm: optional VM to check, might be NULL
411 * are still valid to use this VM. The VM parameter might be NULL in which case
414 uint64_t amdgpu_vm_generation(struct amdgpu_device *adev, struct amdgpu_vm *vm) in amdgpu_vm_generation() argument
418 if (!vm) in amdgpu_vm_generation()
421 result += vm->generation; in amdgpu_vm_generation()
423 if (drm_sched_entity_error(&vm->delayed)) in amdgpu_vm_generation()
433 * @vm: vm providing the BOs
442 int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, in amdgpu_vm_validate_pt_bos() argument
451 if (drm_sched_entity_error(&vm->delayed)) { in amdgpu_vm_validate_pt_bos()
452 ++vm->generation; in amdgpu_vm_validate_pt_bos()
453 amdgpu_vm_bo_reset_state_machine(vm); in amdgpu_vm_validate_pt_bos()
454 amdgpu_vm_fini_entities(vm); in amdgpu_vm_validate_pt_bos()
455 r = amdgpu_vm_init_entities(adev, vm); in amdgpu_vm_validate_pt_bos()
460 spin_lock(&vm->status_lock); in amdgpu_vm_validate_pt_bos()
461 while (!list_empty(&vm->evicted)) { in amdgpu_vm_validate_pt_bos()
462 bo_base = list_first_entry(&vm->evicted, in amdgpu_vm_validate_pt_bos()
465 spin_unlock(&vm->status_lock); in amdgpu_vm_validate_pt_bos()
482 vm->update_funcs->map_table(to_amdgpu_bo_vm(bo)); in amdgpu_vm_validate_pt_bos()
485 spin_lock(&vm->status_lock); in amdgpu_vm_validate_pt_bos()
487 spin_unlock(&vm->status_lock); in amdgpu_vm_validate_pt_bos()
489 amdgpu_vm_eviction_lock(vm); in amdgpu_vm_validate_pt_bos()
490 vm->evicting = false; in amdgpu_vm_validate_pt_bos()
491 amdgpu_vm_eviction_unlock(vm); in amdgpu_vm_validate_pt_bos()
497 * amdgpu_vm_ready - check VM is ready for updates
499 * @vm: VM to check
501 * Check if all VM PDs/PTs are ready for updates
504 * True if VM is not evicting.
506 bool amdgpu_vm_ready(struct amdgpu_vm *vm) in amdgpu_vm_ready() argument
511 amdgpu_vm_eviction_lock(vm); in amdgpu_vm_ready()
512 ret = !vm->evicting; in amdgpu_vm_ready()
513 amdgpu_vm_eviction_unlock(vm); in amdgpu_vm_ready()
515 spin_lock(&vm->status_lock); in amdgpu_vm_ready()
516 empty = list_empty(&vm->evicted); in amdgpu_vm_ready()
517 spin_unlock(&vm->status_lock); in amdgpu_vm_ready()
523 * amdgpu_vm_check_compute_bug - check whether asic has compute vm bug
538 /* Compute has a VM bug for GFX version < 7. in amdgpu_vm_check_compute_bug()
539 Compute has a VM bug for GFX 8 MEC firmware version < 673.*/ in amdgpu_vm_check_compute_bug()
589 * amdgpu_vm_flush - hardware flush the vm
595 * Emit a VM flush when it is necessary.
701 * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
703 * @vm: requested vm
706 * Find @bo inside the requested vm.
707 * Search inside the @bos vm list for the requested vm
715 struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, in amdgpu_vm_bo_find() argument
721 if (base->vm != vm) in amdgpu_vm_bo_find()
760 * @vm: requested vm
769 struct amdgpu_vm *vm, bool immediate) in amdgpu_vm_update_pdes() argument
777 spin_lock(&vm->status_lock); in amdgpu_vm_update_pdes()
778 list_splice_init(&vm->relocated, &relocated); in amdgpu_vm_update_pdes()
779 spin_unlock(&vm->status_lock); in amdgpu_vm_update_pdes()
789 params.vm = vm; in amdgpu_vm_update_pdes()
792 r = vm->update_funcs->prepare(&params, NULL, AMDGPU_SYNC_EXPLICIT); in amdgpu_vm_update_pdes()
805 r = vm->update_funcs->commit(&params, &vm->last_update); in amdgpu_vm_update_pdes()
810 atomic64_inc(&vm->tlb_seq); in amdgpu_vm_update_pdes()
828 * Increments the tlb sequence to make sure that future CS execute a VM flush.
836 atomic64_inc(&tlb_cb->vm->tlb_seq); in amdgpu_vm_tlb_seq_cb()
841 * amdgpu_vm_update_range - update a range in the vm page table
844 * @vm: the VM to update the range
864 int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm, in amdgpu_vm_update_range() argument
899 params.vm = vm; in amdgpu_vm_update_range()
905 /* Implicitly sync to command submissions in the same VM before in amdgpu_vm_update_range()
913 amdgpu_vm_eviction_lock(vm); in amdgpu_vm_update_range()
914 if (vm->evicting) { in amdgpu_vm_update_range()
919 if (!unlocked && !dma_fence_is_signaled(vm->last_unlocked)) { in amdgpu_vm_update_range()
922 amdgpu_bo_fence(vm->root.bo, vm->last_unlocked, true); in amdgpu_vm_update_range()
923 swap(vm->last_unlocked, tmp); in amdgpu_vm_update_range()
927 r = vm->update_funcs->prepare(&params, resv, sync_mode); in amdgpu_vm_update_range()
985 r = vm->update_funcs->commit(&params, fence); in amdgpu_vm_update_range()
988 tlb_cb->vm = vm; in amdgpu_vm_update_range()
992 dma_fence_put(vm->last_tlb_flush); in amdgpu_vm_update_range()
993 vm->last_tlb_flush = dma_fence_get(*fence); in amdgpu_vm_update_range()
1004 amdgpu_vm_eviction_unlock(vm); in amdgpu_vm_update_range()
1012 struct amdgpu_vm *vm = bo_va->base.vm; in amdgpu_vm_bo_get_memory() local
1022 if (bo->tbo.base.resv != vm->root.bo->tbo.base.resv && in amdgpu_vm_bo_get_memory()
1027 if (bo->tbo.base.resv != vm->root.bo->tbo.base.resv) in amdgpu_vm_bo_get_memory()
1031 void amdgpu_vm_get_memory(struct amdgpu_vm *vm, in amdgpu_vm_get_memory() argument
1036 spin_lock(&vm->status_lock); in amdgpu_vm_get_memory()
1037 list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status) in amdgpu_vm_get_memory()
1040 list_for_each_entry_safe(bo_va, tmp, &vm->evicted, base.vm_status) in amdgpu_vm_get_memory()
1043 list_for_each_entry_safe(bo_va, tmp, &vm->relocated, base.vm_status) in amdgpu_vm_get_memory()
1046 list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) in amdgpu_vm_get_memory()
1049 list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status) in amdgpu_vm_get_memory()
1052 list_for_each_entry_safe(bo_va, tmp, &vm->done, base.vm_status) in amdgpu_vm_get_memory()
1054 spin_unlock(&vm->status_lock); in amdgpu_vm_get_memory()
1058 * amdgpu_vm_bo_update - update all BO mappings in the vm page table
1061 * @bo_va: requested BO and VM object
1073 struct amdgpu_vm *vm = bo_va->base.vm; in amdgpu_vm_bo_update() local
1087 resv = vm->root.bo->tbo.base.resv; in amdgpu_vm_bo_update()
1125 vm->root.bo->tbo.base.resv)) in amdgpu_vm_bo_update()
1126 last_update = &vm->last_update; in amdgpu_vm_bo_update()
1154 r = amdgpu_vm_update_range(adev, vm, false, false, flush_tlb, in amdgpu_vm_bo_update()
1167 if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv) { in amdgpu_vm_bo_update()
1279 * @vm: requested vm
1286 struct amdgpu_vm *vm, in amdgpu_vm_free_mapping() argument
1299 * @vm: requested vm
1301 * Register a cleanup callback to disable PRT support after VM dies.
1303 static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) in amdgpu_vm_prt_fini() argument
1305 struct dma_resv *resv = vm->root.bo->tbo.base.resv; in amdgpu_vm_prt_fini()
1320 * @vm: requested vm
1332 struct amdgpu_vm *vm, in amdgpu_vm_clear_freed() argument
1335 struct dma_resv *resv = vm->root.bo->tbo.base.resv; in amdgpu_vm_clear_freed()
1341 while (!list_empty(&vm->freed)) { in amdgpu_vm_clear_freed()
1342 mapping = list_first_entry(&vm->freed, in amdgpu_vm_clear_freed()
1346 if (vm->pte_support_ats && in amdgpu_vm_clear_freed()
1350 r = amdgpu_vm_update_range(adev, vm, false, false, true, false, in amdgpu_vm_clear_freed()
1354 amdgpu_vm_free_mapping(adev, vm, mapping, f); in amdgpu_vm_clear_freed()
1376 * @vm: requested vm
1377 * @ticket: optional reservation ticket used to reserve the VM
1387 struct amdgpu_vm *vm, in amdgpu_vm_handle_moved() argument
1395 spin_lock(&vm->status_lock); in amdgpu_vm_handle_moved()
1396 while (!list_empty(&vm->moved)) { in amdgpu_vm_handle_moved()
1397 bo_va = list_first_entry(&vm->moved, struct amdgpu_bo_va, in amdgpu_vm_handle_moved()
1399 spin_unlock(&vm->status_lock); in amdgpu_vm_handle_moved()
1401 /* Per VM BOs never need to bo cleared in the page tables */ in amdgpu_vm_handle_moved()
1405 spin_lock(&vm->status_lock); in amdgpu_vm_handle_moved()
1408 while (!list_empty(&vm->invalidated)) { in amdgpu_vm_handle_moved()
1409 bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va, in amdgpu_vm_handle_moved()
1412 spin_unlock(&vm->status_lock); in amdgpu_vm_handle_moved()
1434 spin_lock(&vm->status_lock); in amdgpu_vm_handle_moved()
1436 spin_unlock(&vm->status_lock); in amdgpu_vm_handle_moved()
1442 * amdgpu_vm_flush_compute_tlb - Flush TLB on compute VM
1445 * @vm: requested vm
1449 * Flush TLB if needed for a compute VM.
1455 struct amdgpu_vm *vm, in amdgpu_vm_flush_compute_tlb() argument
1459 uint64_t tlb_seq = amdgpu_vm_tlb_seq(vm); in amdgpu_vm_flush_compute_tlb()
1463 WARN_ON_ONCE(!vm->is_compute_context); in amdgpu_vm_flush_compute_tlb()
1470 if (atomic64_xchg(&vm->kfd_last_flushed_seq, tlb_seq) == tlb_seq) in amdgpu_vm_flush_compute_tlb()
1478 r = amdgpu_gmc_flush_gpu_tlb_pasid(adev, vm->pasid, flush_type, in amdgpu_vm_flush_compute_tlb()
1487 * amdgpu_vm_bo_add - add a bo to a specific vm
1490 * @vm: requested vm
1493 * Add @bo into the requested vm.
1494 * Add @bo to the list of bos associated with the vm
1502 struct amdgpu_vm *vm, in amdgpu_vm_bo_add() argument
1511 amdgpu_vm_bo_base_init(&bo_va->base, vm, bo); in amdgpu_vm_bo_add()
1545 struct amdgpu_vm *vm = bo_va->base.vm; in amdgpu_vm_bo_insert_map() local
1550 amdgpu_vm_it_insert(mapping, &vm->va); in amdgpu_vm_bo_insert_map()
1555 if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv && in amdgpu_vm_bo_insert_map()
1563 * amdgpu_vm_bo_map - map bo inside a vm
1572 * Add a mapping of the BO at the specefied addr into the VM.
1586 struct amdgpu_vm *vm = bo_va->base.vm; in amdgpu_vm_bo_map() local
1604 tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr); in amdgpu_vm_bo_map()
1628 * amdgpu_vm_bo_replace_map - map bo inside a vm, replacing existing mappings
1637 * Add a mapping of the BO at the specefied addr into the VM. Replace existing
1672 r = amdgpu_vm_bo_clear_mappings(adev, bo_va->base.vm, saddr, size); in amdgpu_vm_bo_replace_map()
1692 * amdgpu_vm_bo_unmap - remove bo mapping from vm
1698 * Remove a mapping of the BO at the specefied addr from the VM.
1710 struct amdgpu_vm *vm = bo_va->base.vm; in amdgpu_vm_bo_unmap() local
1733 amdgpu_vm_it_remove(mapping, &vm->va); in amdgpu_vm_bo_unmap()
1738 list_add(&mapping->list, &vm->freed); in amdgpu_vm_bo_unmap()
1740 amdgpu_vm_free_mapping(adev, vm, mapping, in amdgpu_vm_bo_unmap()
1750 * @vm: VM structure to use
1760 struct amdgpu_vm *vm, in amdgpu_vm_bo_clear_mappings() argument
1785 tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr); in amdgpu_vm_bo_clear_mappings()
1816 amdgpu_vm_it_remove(tmp, &vm->va); in amdgpu_vm_bo_clear_mappings()
1825 list_add(&tmp->list, &vm->freed); in amdgpu_vm_bo_clear_mappings()
1833 amdgpu_vm_it_insert(before, &vm->va); in amdgpu_vm_bo_clear_mappings()
1837 if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv && in amdgpu_vm_bo_clear_mappings()
1848 amdgpu_vm_it_insert(after, &vm->va); in amdgpu_vm_bo_clear_mappings()
1852 if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv && in amdgpu_vm_bo_clear_mappings()
1865 * @vm: the requested VM
1874 struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm, in amdgpu_vm_bo_lookup_mapping() argument
1877 return amdgpu_vm_it_iter_first(&vm->va, addr, addr); in amdgpu_vm_bo_lookup_mapping()
1883 * @vm: the requested vm
1888 void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket) in amdgpu_vm_bo_trace_cs() argument
1895 for (mapping = amdgpu_vm_it_iter_first(&vm->va, 0, U64_MAX); mapping; in amdgpu_vm_bo_trace_cs()
1911 * amdgpu_vm_bo_del - remove a bo from a specific vm
1916 * Remove @bo_va->bo from the requested vm.
1925 struct amdgpu_vm *vm = bo_va->base.vm; in amdgpu_vm_bo_del() local
1928 dma_resv_assert_held(vm->root.bo->tbo.base.resv); in amdgpu_vm_bo_del()
1932 if (bo->tbo.base.resv == vm->root.bo->tbo.base.resv) in amdgpu_vm_bo_del()
1945 spin_lock(&vm->status_lock); in amdgpu_vm_bo_del()
1947 spin_unlock(&vm->status_lock); in amdgpu_vm_bo_del()
1951 amdgpu_vm_it_remove(mapping, &vm->va); in amdgpu_vm_bo_del()
1954 list_add(&mapping->list, &vm->freed); in amdgpu_vm_bo_del()
1958 amdgpu_vm_it_remove(mapping, &vm->va); in amdgpu_vm_bo_del()
1959 amdgpu_vm_free_mapping(adev, vm, mapping, in amdgpu_vm_bo_del()
1972 * amdgpu_vm_evictable - check if we can evict a VM
1974 * @bo: A page table of the VM.
1976 * Check if it is possible to evict a VM.
1982 /* Page tables of a destroyed VM can go away immediately */ in amdgpu_vm_evictable()
1983 if (!bo_base || !bo_base->vm) in amdgpu_vm_evictable()
1986 /* Don't evict VM page tables while they are busy */ in amdgpu_vm_evictable()
1991 if (!amdgpu_vm_eviction_trylock(bo_base->vm)) in amdgpu_vm_evictable()
1994 /* Don't evict VM page tables while they are updated */ in amdgpu_vm_evictable()
1995 if (!dma_fence_is_signaled(bo_base->vm->last_unlocked)) { in amdgpu_vm_evictable()
1996 amdgpu_vm_eviction_unlock(bo_base->vm); in amdgpu_vm_evictable()
2000 bo_base->vm->evicting = true; in amdgpu_vm_evictable()
2001 amdgpu_vm_eviction_unlock(bo_base->vm); in amdgpu_vm_evictable()
2024 struct amdgpu_vm *vm = bo_base->vm; in amdgpu_vm_bo_invalidate() local
2026 if (evicted && bo->tbo.base.resv == vm->root.bo->tbo.base.resv) { in amdgpu_vm_bo_invalidate()
2037 else if (bo->tbo.base.resv == vm->root.bo->tbo.base.resv) in amdgpu_vm_bo_invalidate()
2045 * amdgpu_vm_get_block_size - calculate VM page table size as power of two
2047 * @vm_size: VM size
2050 * VM page table as power of two
2066 * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size
2069 * @min_vm_size: the minimum vm size in GB if it's set auto
2083 /* adjust vm size first */ in amdgpu_vm_adjust_size()
2087 dev_warn(adev->dev, "VM size (%d) too large, max is %u GB\n", in amdgpu_vm_adjust_size()
2095 /* Optimal VM size depends on the amount of physical in amdgpu_vm_adjust_size()
2102 * - On GFX8 and older, VM space can be segmented for in amdgpu_vm_adjust_size()
2108 * VM size with the given page table size. in amdgpu_vm_adjust_size()
2137 /* block size depends on vm size and hw setup*/ in amdgpu_vm_adjust_size()
2153 DRM_INFO("vm size is %u GB, %u levels, block size is %u-bit, fragment size is %u-bit\n", in amdgpu_vm_adjust_size()
2160 * amdgpu_vm_wait_idle - wait for the VM to become idle
2162 * @vm: VM object to wait for
2163 * @timeout: timeout to wait for VM to become idle
2165 long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout) in amdgpu_vm_wait_idle() argument
2167 timeout = dma_resv_wait_timeout(vm->root.bo->tbo.base.resv, in amdgpu_vm_wait_idle()
2173 return dma_fence_wait_timeout(vm->last_unlocked, true, timeout); in amdgpu_vm_wait_idle()
2177 * amdgpu_vm_init - initialize a vm instance
2180 * @vm: requested vm
2183 * Init @vm fields.
2188 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, in amdgpu_vm_init() argument
2195 vm->va = RB_ROOT_CACHED; in amdgpu_vm_init()
2197 vm->reserved_vmid[i] = NULL; in amdgpu_vm_init()
2198 INIT_LIST_HEAD(&vm->evicted); in amdgpu_vm_init()
2199 INIT_LIST_HEAD(&vm->relocated); in amdgpu_vm_init()
2200 INIT_LIST_HEAD(&vm->moved); in amdgpu_vm_init()
2201 INIT_LIST_HEAD(&vm->idle); in amdgpu_vm_init()
2202 INIT_LIST_HEAD(&vm->invalidated); in amdgpu_vm_init()
2203 spin_lock_init(&vm->status_lock); in amdgpu_vm_init()
2204 INIT_LIST_HEAD(&vm->freed); in amdgpu_vm_init()
2205 INIT_LIST_HEAD(&vm->done); in amdgpu_vm_init()
2206 INIT_LIST_HEAD(&vm->pt_freed); in amdgpu_vm_init()
2207 INIT_WORK(&vm->pt_free_work, amdgpu_vm_pt_free_work); in amdgpu_vm_init()
2208 INIT_KFIFO(vm->faults); in amdgpu_vm_init()
2210 r = amdgpu_vm_init_entities(adev, vm); in amdgpu_vm_init()
2214 vm->pte_support_ats = false; in amdgpu_vm_init()
2215 vm->is_compute_context = false; in amdgpu_vm_init()
2217 vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode & in amdgpu_vm_init()
2220 DRM_DEBUG_DRIVER("VM update mode is %s\n", in amdgpu_vm_init()
2221 vm->use_cpu_for_update ? "CPU" : "SDMA"); in amdgpu_vm_init()
2222 WARN_ONCE((vm->use_cpu_for_update && in amdgpu_vm_init()
2224 "CPU update of VM recommended only for large BAR system\n"); in amdgpu_vm_init()
2226 if (vm->use_cpu_for_update) in amdgpu_vm_init()
2227 vm->update_funcs = &amdgpu_vm_cpu_funcs; in amdgpu_vm_init()
2229 vm->update_funcs = &amdgpu_vm_sdma_funcs; in amdgpu_vm_init()
2231 vm->last_update = dma_fence_get_stub(); in amdgpu_vm_init()
2232 vm->last_unlocked = dma_fence_get_stub(); in amdgpu_vm_init()
2233 vm->last_tlb_flush = dma_fence_get_stub(); in amdgpu_vm_init()
2234 vm->generation = 0; in amdgpu_vm_init()
2236 mutex_init(&vm->eviction_lock); in amdgpu_vm_init()
2237 vm->evicting = false; in amdgpu_vm_init()
2239 r = amdgpu_vm_pt_create(adev, vm, adev->vm_manager.root_level, in amdgpu_vm_init()
2252 amdgpu_vm_bo_base_init(&vm->root, vm, root_bo); in amdgpu_vm_init()
2257 r = amdgpu_vm_pt_clear(adev, vm, root, false); in amdgpu_vm_init()
2261 amdgpu_bo_unreserve(vm->root.bo); in amdgpu_vm_init()
2267 amdgpu_vm_pt_free_root(adev, vm); in amdgpu_vm_init()
2268 amdgpu_bo_unreserve(vm->root.bo); in amdgpu_vm_init()
2272 dma_fence_put(vm->last_tlb_flush); in amdgpu_vm_init()
2273 dma_fence_put(vm->last_unlocked); in amdgpu_vm_init()
2274 amdgpu_vm_fini_entities(vm); in amdgpu_vm_init()
2280 * amdgpu_vm_make_compute - Turn a GFX VM into a compute VM
2283 * @vm: requested vm
2288 * Changes the following VM parameters:
2298 int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm) in amdgpu_vm_make_compute() argument
2303 r = amdgpu_bo_reserve(vm->root.bo, true); in amdgpu_vm_make_compute()
2310 if (pte_support_ats != vm->pte_support_ats) { in amdgpu_vm_make_compute()
2312 if (!amdgpu_vm_pt_is_root_clean(adev, vm)) { in amdgpu_vm_make_compute()
2317 vm->pte_support_ats = pte_support_ats; in amdgpu_vm_make_compute()
2318 r = amdgpu_vm_pt_clear(adev, vm, to_amdgpu_bo_vm(vm->root.bo), in amdgpu_vm_make_compute()
2324 /* Update VM state */ in amdgpu_vm_make_compute()
2325 vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode & in amdgpu_vm_make_compute()
2327 DRM_DEBUG_DRIVER("VM update mode is %s\n", in amdgpu_vm_make_compute()
2328 vm->use_cpu_for_update ? "CPU" : "SDMA"); in amdgpu_vm_make_compute()
2329 WARN_ONCE((vm->use_cpu_for_update && in amdgpu_vm_make_compute()
2331 "CPU update of VM recommended only for large BAR system\n"); in amdgpu_vm_make_compute()
2333 if (vm->use_cpu_for_update) { in amdgpu_vm_make_compute()
2335 r = amdgpu_bo_sync_wait(vm->root.bo, in amdgpu_vm_make_compute()
2340 vm->update_funcs = &amdgpu_vm_cpu_funcs; in amdgpu_vm_make_compute()
2341 r = amdgpu_vm_pt_map_tables(adev, vm); in amdgpu_vm_make_compute()
2346 vm->update_funcs = &amdgpu_vm_sdma_funcs; in amdgpu_vm_make_compute()
2349 dma_fence_put(vm->last_update); in amdgpu_vm_make_compute()
2350 vm->last_update = dma_fence_get_stub(); in amdgpu_vm_make_compute()
2351 vm->is_compute_context = true; in amdgpu_vm_make_compute()
2353 /* Free the shadow bo for compute VM */ in amdgpu_vm_make_compute()
2354 amdgpu_bo_unref(&to_amdgpu_bo_vm(vm->root.bo)->shadow); in amdgpu_vm_make_compute()
2359 amdgpu_bo_unreserve(vm->root.bo); in amdgpu_vm_make_compute()
2364 * amdgpu_vm_release_compute - release a compute vm
2366 * @vm: a vm turned into compute vm by calling amdgpu_vm_make_compute
2369 * pasid from vm. Compute should stop use of vm after this call.
2371 void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm) in amdgpu_vm_release_compute() argument
2373 amdgpu_vm_set_pasid(adev, vm, 0); in amdgpu_vm_release_compute()
2374 vm->is_compute_context = false; in amdgpu_vm_release_compute()
2378 * amdgpu_vm_fini - tear down a vm instance
2381 * @vm: requested vm
2383 * Tear down @vm.
2384 * Unbind the VM and remove all bos from the vm bo list
2386 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) in amdgpu_vm_fini() argument
2394 amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm); in amdgpu_vm_fini()
2396 flush_work(&vm->pt_free_work); in amdgpu_vm_fini()
2398 root = amdgpu_bo_ref(vm->root.bo); in amdgpu_vm_fini()
2400 amdgpu_vm_set_pasid(adev, vm, 0); in amdgpu_vm_fini()
2401 dma_fence_wait(vm->last_unlocked, false); in amdgpu_vm_fini()
2402 dma_fence_put(vm->last_unlocked); in amdgpu_vm_fini()
2403 dma_fence_wait(vm->last_tlb_flush, false); in amdgpu_vm_fini()
2405 spin_lock_irqsave(vm->last_tlb_flush->lock, flags); in amdgpu_vm_fini()
2406 spin_unlock_irqrestore(vm->last_tlb_flush->lock, flags); in amdgpu_vm_fini()
2407 dma_fence_put(vm->last_tlb_flush); in amdgpu_vm_fini()
2409 list_for_each_entry_safe(mapping, tmp, &vm->freed, list) { in amdgpu_vm_fini()
2411 amdgpu_vm_prt_fini(adev, vm); in amdgpu_vm_fini()
2416 amdgpu_vm_free_mapping(adev, vm, mapping, NULL); in amdgpu_vm_fini()
2419 amdgpu_vm_pt_free_root(adev, vm); in amdgpu_vm_fini()
2422 WARN_ON(vm->root.bo); in amdgpu_vm_fini()
2424 amdgpu_vm_fini_entities(vm); in amdgpu_vm_fini()
2426 if (!RB_EMPTY_ROOT(&vm->va.rb_root)) { in amdgpu_vm_fini()
2427 dev_err(adev->dev, "still active bo inside vm\n"); in amdgpu_vm_fini()
2430 &vm->va.rb_root, rb) { in amdgpu_vm_fini()
2438 dma_fence_put(vm->last_update); in amdgpu_vm_fini()
2441 if (vm->reserved_vmid[i]) { in amdgpu_vm_fini()
2443 vm->reserved_vmid[i] = false; in amdgpu_vm_fini()
2450 * amdgpu_vm_manager_init - init the VM manager
2454 * Initialize the VM manager structures
2477 * Compute VM tables will be updated by CPU in amdgpu_vm_manager_init()
2482 * avoid using CPU for VM table updates in amdgpu_vm_manager_init()
2500 * amdgpu_vm_manager_fini - cleanup VM manager
2504 * Cleanup the VM manager and free resources.
2515 * amdgpu_vm_ioctl - Manages VMID reservation for vm hubs.
2537 if (!fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)]) { in amdgpu_vm_ioctl()
2539 fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)] = true; in amdgpu_vm_ioctl()
2544 if (fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)]) { in amdgpu_vm_ioctl()
2546 fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)] = false; in amdgpu_vm_ioctl()
2560 * @pasid: PASID identifier for VM
2566 struct amdgpu_vm *vm; in amdgpu_vm_get_task_info() local
2571 vm = xa_load(&adev->vm_manager.pasids, pasid); in amdgpu_vm_get_task_info()
2572 if (vm) in amdgpu_vm_get_task_info()
2573 *task_info = vm->task_info; in amdgpu_vm_get_task_info()
2581 * @vm: vm for which to set the info
2583 void amdgpu_vm_set_task_info(struct amdgpu_vm *vm) in amdgpu_vm_set_task_info() argument
2585 if (vm->task_info.pid) in amdgpu_vm_set_task_info()
2588 vm->task_info.pid = current->pid; in amdgpu_vm_set_task_info()
2589 get_task_comm(vm->task_info.task_name, current); in amdgpu_vm_set_task_info()
2594 vm->task_info.tgid = current->group_leader->pid; in amdgpu_vm_set_task_info()
2595 get_task_comm(vm->task_info.process_name, current->group_leader); in amdgpu_vm_set_task_info()
2599 * amdgpu_vm_handle_fault - graceful handling of VM faults.
2601 * @pasid: PASID of the VM
2608 * Try to gracefully handle a VM fault. Return true if the fault was handled and
2619 struct amdgpu_vm *vm; in amdgpu_vm_handle_fault() local
2623 vm = xa_load(&adev->vm_manager.pasids, pasid); in amdgpu_vm_handle_fault()
2624 if (vm) { in amdgpu_vm_handle_fault()
2625 root = amdgpu_bo_ref(vm->root.bo); in amdgpu_vm_handle_fault()
2626 is_compute_context = vm->is_compute_context; in amdgpu_vm_handle_fault()
2647 /* Double check that the VM still exists */ in amdgpu_vm_handle_fault()
2649 vm = xa_load(&adev->vm_manager.pasids, pasid); in amdgpu_vm_handle_fault()
2650 if (vm && vm->root.bo != root) in amdgpu_vm_handle_fault()
2651 vm = NULL; in amdgpu_vm_handle_fault()
2653 if (!vm) in amdgpu_vm_handle_fault()
2682 r = amdgpu_vm_update_range(adev, vm, true, false, false, false, in amdgpu_vm_handle_fault()
2687 r = amdgpu_vm_update_pdes(adev, vm, true); in amdgpu_vm_handle_fault()
2702 * amdgpu_debugfs_vm_bo_info - print BO info for the VM
2704 * @vm: Requested VM for printing BO info
2707 * Print BO information in debugfs file for the VM
2709 void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m) in amdgpu_debugfs_vm_bo_info() argument
2726 spin_lock(&vm->status_lock); in amdgpu_debugfs_vm_bo_info()
2728 list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status) { in amdgpu_debugfs_vm_bo_info()
2737 list_for_each_entry_safe(bo_va, tmp, &vm->evicted, base.vm_status) { in amdgpu_debugfs_vm_bo_info()
2746 list_for_each_entry_safe(bo_va, tmp, &vm->relocated, base.vm_status) { in amdgpu_debugfs_vm_bo_info()
2755 list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) { in amdgpu_debugfs_vm_bo_info()
2764 list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status) { in amdgpu_debugfs_vm_bo_info()
2773 list_for_each_entry_safe(bo_va, tmp, &vm->done, base.vm_status) { in amdgpu_debugfs_vm_bo_info()
2778 spin_unlock(&vm->status_lock); in amdgpu_debugfs_vm_bo_info()
2799 * @pasid: PASID of the VM
2812 struct amdgpu_vm *vm; in amdgpu_vm_update_fault_cache() local
2817 vm = xa_load(&adev->vm_manager.pasids, pasid); in amdgpu_vm_update_fault_cache()
2823 if (vm && status) { in amdgpu_vm_update_fault_cache()
2824 vm->fault_info.addr = addr; in amdgpu_vm_update_fault_cache()
2825 vm->fault_info.status = status; in amdgpu_vm_update_fault_cache()
2827 vm->fault_info.vmhub = AMDGPU_VMHUB_TYPE_GFX; in amdgpu_vm_update_fault_cache()
2828 vm->fault_info.vmhub |= in amdgpu_vm_update_fault_cache()
2831 vm->fault_info.vmhub = AMDGPU_VMHUB_TYPE_MM0; in amdgpu_vm_update_fault_cache()
2832 vm->fault_info.vmhub |= in amdgpu_vm_update_fault_cache()
2835 vm->fault_info.vmhub = AMDGPU_VMHUB_TYPE_MM1; in amdgpu_vm_update_fault_cache()
2836 vm->fault_info.vmhub |= in amdgpu_vm_update_fault_cache()