Lines Matching full:vm

45 	/** @vm: VM bound to this slot. NULL is no VM is bound. */
46 struct panthor_vm *vm; member
78 * We use this list to pick a VM to evict when all slots are
89 /** @vm: VMs management fields */
91 /** @vm.lock: Lock protecting access to list. */
94 /** @vm.list: List containing all VMs. */
97 /** @vm.reset_in_progress: True if a reset is in progress. */
100 /** @vm.wq: Workqueue used for the VM_BIND queues. */
102 } vm; member
106 * struct panthor_vm_pool - VM pool object
109 /** @xa: Array used for VM handle tracking. */
134 * struct panthor_vm_op_ctx - VM operation context
136 * With VM operations potentially taking place in a dma-signaling path, we
156 * After an VM operation, there might be free pages left in this array.
174 /** @va: Virtual range targeted by the VM operation. */
184 * @returned_vmas: List of panthor_vma objects returned after a VM operation.
221 * struct panthor_vm - VM object
223 * A VM is an object representing a GPU (or MCU) virtual address space.
226 * the VM.
228 * Except for the MCU VM, which is managed by the kernel, all other VMs are
256 * There's currently one bind queue per VM. It doesn't make sense to
257 * allow more given the VM operations are serialized anyway.
274 * @op_lock: Lock used to serialize operations on a VM.
283 * @op_ctx: The context attached to the currently executing VM operation.
295 * For the MCU VM, this is managing the VA range that's used to map
318 * @as.id: ID of the address space this VM is bound to.
320 * A value of -1 means the VM is inactive/not bound.
324 /** @as.active_cnt: Number of active users of this VM. */
328 * @as.lru_node: Used to instead the VM in the panthor_mmu::as::lru_list.
340 * @heaps.pool: The heap pool attached to this VM.
342 * Will stay NULL until someone creates a heap context on this VM.
350 /** @node: Used to insert the VM in the panthor_mmu::vm::list. */
353 /** @for_mcu: True if this is the MCU VM. */
357 * @destroyed: True if the VM was destroyed.
359 * No further bind requests should be queued to a destroyed VM.
364 * @unusable: True if the VM has turned unusable because something
372 * Instead, we should just flag the VM as unusable, and fail any
373 * further request targeting this VM.
375 * We also provide a way to query a VM state, so userspace can destroy
393 * struct panthor_vm_bind_job - VM bind job
402 /** @cleanup_op_ctx_work: Work used to cleanup the VM operation context. */
405 /** @vm: VM targeted by the VM operation. */
406 struct panthor_vm *vm; member
431 * done to allow asynchronous VM operations.
438 struct panthor_vm *vm = cookie; in alloc_pt() local
442 if (unlikely(!vm->root_page_table)) { in alloc_pt()
445 drm_WARN_ON(&vm->ptdev->base, vm->op_ctx); in alloc_pt()
446 p = alloc_pages_node(dev_to_node(vm->ptdev->base.dev), in alloc_pt()
449 vm->root_page_table = page; in alloc_pt()
456 if (drm_WARN_ON(&vm->ptdev->base, size != SZ_4K)) in alloc_pt()
459 /* We must have some op_ctx attached to the VM and it must have at least one in alloc_pt()
462 if (drm_WARN_ON(&vm->ptdev->base, !vm->op_ctx) || in alloc_pt()
463 drm_WARN_ON(&vm->ptdev->base, in alloc_pt()
464 vm->op_ctx->rsvd_page_tables.ptr >= vm->op_ctx->rsvd_page_tables.count)) in alloc_pt()
467 page = vm->op_ctx->rsvd_page_tables.pages[vm->op_ctx->rsvd_page_tables.ptr++]; in alloc_pt()
490 struct panthor_vm *vm = cookie; in free_pt() local
492 if (unlikely(vm->root_page_table == data)) { in free_pt()
494 vm->root_page_table = NULL; in free_pt()
498 if (drm_WARN_ON(&vm->ptdev->base, size != SZ_4K)) in free_pt()
596 static int mmu_hw_do_operation(struct panthor_vm *vm, in mmu_hw_do_operation() argument
599 struct panthor_device *ptdev = vm->ptdev; in mmu_hw_do_operation()
603 ret = mmu_hw_do_operation_locked(ptdev, vm->as.id, iova, size, op); in mmu_hw_do_operation()
662 * panthor_vm_has_unhandled_faults() - Check if a VM has unhandled faults
663 * @vm: VM to check.
665 * Return: true if the VM has unhandled faults, false otherwise.
667 bool panthor_vm_has_unhandled_faults(struct panthor_vm *vm) in panthor_vm_has_unhandled_faults() argument
669 return vm->unhandled_fault; in panthor_vm_has_unhandled_faults()
673 * panthor_vm_is_unusable() - Check if the VM is still usable
674 * @vm: VM to check.
676 * Return: true if the VM is unusable, false otherwise.
678 bool panthor_vm_is_unusable(struct panthor_vm *vm) in panthor_vm_is_unusable() argument
680 return vm->unusable; in panthor_vm_is_unusable()
683 static void panthor_vm_release_as_locked(struct panthor_vm *vm) in panthor_vm_release_as_locked() argument
685 struct panthor_device *ptdev = vm->ptdev; in panthor_vm_release_as_locked()
689 if (drm_WARN_ON(&ptdev->base, vm->as.id < 0)) in panthor_vm_release_as_locked()
692 ptdev->mmu->as.slots[vm->as.id].vm = NULL; in panthor_vm_release_as_locked()
693 clear_bit(vm->as.id, &ptdev->mmu->as.alloc_mask); in panthor_vm_release_as_locked()
694 refcount_set(&vm->as.active_cnt, 0); in panthor_vm_release_as_locked()
695 list_del_init(&vm->as.lru_node); in panthor_vm_release_as_locked()
696 vm->as.id = -1; in panthor_vm_release_as_locked()
700 * panthor_vm_active() - Flag a VM as active
701 * @vm: VM to flag as active.
703 * Assigns an address space to a VM so it can be used by the GPU/MCU.
707 int panthor_vm_active(struct panthor_vm *vm) in panthor_vm_active() argument
709 struct panthor_device *ptdev = vm->ptdev; in panthor_vm_active()
711 struct io_pgtable_cfg *cfg = &io_pgtable_ops_to_pgtable(vm->pgtbl_ops)->cfg; in panthor_vm_active()
718 if (refcount_inc_not_zero(&vm->as.active_cnt)) in panthor_vm_active()
723 if (refcount_inc_not_zero(&vm->as.active_cnt)) in panthor_vm_active()
726 as = vm->as.id; in panthor_vm_active()
738 if (vm->for_mcu) { in panthor_vm_active()
762 vm->as.id = as; in panthor_vm_active()
764 ptdev->mmu->as.slots[as].vm = vm; in panthor_vm_active()
775 /* If the VM is re-activated, we clear the fault. */ in panthor_vm_active()
776 vm->unhandled_fault = false; in panthor_vm_active()
787 ret = panthor_mmu_as_enable(vm->ptdev, vm->as.id, transtab, transcfg, vm->memattr); in panthor_vm_active()
791 refcount_set(&vm->as.active_cnt, 1); in panthor_vm_active()
792 list_del_init(&vm->as.lru_node); in panthor_vm_active()
804 * panthor_vm_idle() - Flag a VM idle
805 * @vm: VM to flag as idle.
807 * When we know the GPU is done with the VM (no more jobs to process),
808 * we can relinquish the AS slot attached to this VM, if any.
810 * We don't release the slot immediately, but instead place the VM in
811 * the LRU list, so it can be evicted if another VM needs an AS slot.
816 void panthor_vm_idle(struct panthor_vm *vm) in panthor_vm_idle() argument
818 struct panthor_device *ptdev = vm->ptdev; in panthor_vm_idle()
820 if (!refcount_dec_and_mutex_lock(&vm->as.active_cnt, &ptdev->mmu->as.slots_lock)) in panthor_vm_idle()
823 if (!drm_WARN_ON(&ptdev->base, vm->as.id == -1 || !list_empty(&vm->as.lru_node))) in panthor_vm_idle()
824 list_add_tail(&vm->as.lru_node, &ptdev->mmu->as.lru_list); in panthor_vm_idle()
826 refcount_set(&vm->as.active_cnt, 0); in panthor_vm_idle()
830 u32 panthor_vm_page_size(struct panthor_vm *vm) in panthor_vm_page_size() argument
832 const struct io_pgtable *pgt = io_pgtable_ops_to_pgtable(vm->pgtbl_ops); in panthor_vm_page_size()
838 static void panthor_vm_stop(struct panthor_vm *vm) in panthor_vm_stop() argument
840 drm_sched_stop(&vm->sched, NULL); in panthor_vm_stop()
843 static void panthor_vm_start(struct panthor_vm *vm) in panthor_vm_start() argument
845 drm_sched_start(&vm->sched, 0); in panthor_vm_start()
849 * panthor_vm_as() - Get the AS slot attached to a VM
850 * @vm: VM to get the AS slot of.
852 * Return: -1 if the VM is not assigned an AS slot yet, >= 0 otherwise.
854 int panthor_vm_as(struct panthor_vm *vm) in panthor_vm_as() argument
856 return vm->as.id; in panthor_vm_as()
880 static int panthor_vm_flush_range(struct panthor_vm *vm, u64 iova, u64 size) in panthor_vm_flush_range() argument
882 struct panthor_device *ptdev = vm->ptdev; in panthor_vm_flush_range()
885 if (vm->as.id < 0) in panthor_vm_flush_range()
892 ret = mmu_hw_do_operation(vm, iova, size, AS_COMMAND_FLUSH_PT); in panthor_vm_flush_range()
899 * panthor_vm_flush_all() - Flush L2 caches for the entirety of a VM's AS
900 * @vm: VM whose cache to flush
904 int panthor_vm_flush_all(struct panthor_vm *vm) in panthor_vm_flush_all() argument
906 return panthor_vm_flush_range(vm, vm->base.mm_start, vm->base.mm_range); in panthor_vm_flush_all()
909 static int panthor_vm_unmap_pages(struct panthor_vm *vm, u64 iova, u64 size) in panthor_vm_unmap_pages() argument
911 struct panthor_device *ptdev = vm->ptdev; in panthor_vm_unmap_pages()
912 struct io_pgtable_ops *ops = vm->pgtbl_ops; in panthor_vm_unmap_pages()
915 drm_dbg(&ptdev->base, "unmap: as=%d, iova=%llx, len=%llx", vm->as.id, iova, size); in panthor_vm_unmap_pages()
928 panthor_vm_flush_range(vm, iova, offset + unmapped_sz); in panthor_vm_unmap_pages()
934 return panthor_vm_flush_range(vm, iova, size); in panthor_vm_unmap_pages()
938 panthor_vm_map_pages(struct panthor_vm *vm, u64 iova, int prot, in panthor_vm_map_pages() argument
941 struct panthor_device *ptdev = vm->ptdev; in panthor_vm_map_pages()
944 struct io_pgtable_ops *ops = vm->pgtbl_ops; in panthor_vm_map_pages()
966 vm->as.id, iova, &paddr, len); in panthor_vm_map_pages()
986 panthor_vm_unmap_pages(vm, start_iova, in panthor_vm_map_pages()
998 return panthor_vm_flush_range(vm, start_iova, iova - start_iova); in panthor_vm_map_pages()
1021 * @vm: VM to allocate a region on.
1028 * need to be mapped to the userspace VM, in the region reserved for kernel
1036 panthor_vm_alloc_va(struct panthor_vm *vm, u64 va, u64 size, in panthor_vm_alloc_va() argument
1039 ssize_t vm_pgsz = panthor_vm_page_size(vm); in panthor_vm_alloc_va()
1048 mutex_lock(&vm->mm_lock); in panthor_vm_alloc_va()
1052 ret = drm_mm_reserve_node(&vm->mm, va_node); in panthor_vm_alloc_va()
1054 ret = drm_mm_insert_node_in_range(&vm->mm, va_node, size, in panthor_vm_alloc_va()
1056 0, vm->kernel_auto_va.start, in panthor_vm_alloc_va()
1057 vm->kernel_auto_va.end, in panthor_vm_alloc_va()
1060 mutex_unlock(&vm->mm_lock); in panthor_vm_alloc_va()
1067 * @vm: VM to free the region on.
1070 void panthor_vm_free_va(struct panthor_vm *vm, struct drm_mm_node *va_node) in panthor_vm_free_va() argument
1072 mutex_lock(&vm->mm_lock); in panthor_vm_free_va()
1074 mutex_unlock(&vm->mm_lock); in panthor_vm_free_va()
1080 struct drm_gpuvm *vm = vm_bo->vm; in panthor_vm_bo_put() local
1085 * Same goes for the VM, since we take the VM resv lock. in panthor_vm_bo_put()
1088 drm_gpuvm_get(vm); in panthor_vm_bo_put()
1097 dma_resv_lock(drm_gpuvm_resv(vm), NULL); in panthor_vm_bo_put()
1101 dma_resv_unlock(drm_gpuvm_resv(vm)); in panthor_vm_bo_put()
1109 drm_gpuvm_put(vm); in panthor_vm_bo_put()
1114 struct panthor_vm *vm) in panthor_vm_cleanup_op_ctx() argument
1200 struct panthor_vm *vm, in panthor_vm_prepare_map_op_ctx() argument
1222 /* If the BO has an exclusive VM attached, it can't be mapped to other VMs. */ in panthor_vm_prepare_map_op_ctx()
1224 bo->exclusive_vm_root_gem != panthor_vm_root_gem(vm)) in panthor_vm_prepare_map_op_ctx()
1257 preallocated_vm_bo = drm_gpuvm_bo_create(&vm->base, &bo->base.base); in panthor_vm_prepare_map_op_ctx()
1267 * pre-allocated BO if the <BO,VM> association exists. Given we in panthor_vm_prepare_map_op_ctx()
1269 * be called immediately, and we have to hold the VM resv lock when in panthor_vm_prepare_map_op_ctx()
1272 dma_resv_lock(panthor_vm_resv(vm), NULL); in panthor_vm_prepare_map_op_ctx()
1276 dma_resv_unlock(panthor_vm_resv(vm)); in panthor_vm_prepare_map_op_ctx()
1278 /* If the a vm_bo for this <VM,BO> combination exists, it already in panthor_vm_prepare_map_op_ctx()
1316 dma_resv_lock(panthor_vm_resv(vm), NULL); in panthor_vm_prepare_map_op_ctx()
1318 dma_resv_unlock(panthor_vm_resv(vm)); in panthor_vm_prepare_map_op_ctx()
1323 panthor_vm_cleanup_op_ctx(op_ctx, vm); in panthor_vm_prepare_map_op_ctx()
1328 struct panthor_vm *vm, in panthor_vm_prepare_unmap_op_ctx() argument
1375 panthor_vm_cleanup_op_ctx(op_ctx, vm); in panthor_vm_prepare_unmap_op_ctx()
1380 struct panthor_vm *vm) in panthor_vm_prepare_sync_only_op_ctx() argument
1389 * @vm: VM to look into.
1402 panthor_vm_get_bo_for_va(struct panthor_vm *vm, u64 va, u64 *bo_offset) in panthor_vm_get_bo_for_va() argument
1408 /* Take the VM lock to prevent concurrent map/unmap operations. */ in panthor_vm_get_bo_for_va()
1409 mutex_lock(&vm->op_lock); in panthor_vm_get_bo_for_va()
1410 gpuva = drm_gpuva_find_first(&vm->base, va, 1); in panthor_vm_get_bo_for_va()
1417 mutex_unlock(&vm->op_lock); in panthor_vm_get_bo_for_va()
1438 /* If the task VM size is smaller than the GPU VA range, pick this in panthor_vm_create_get_user_va_range()
1444 /* If the GPU VA range is smaller than the task VM size, we in panthor_vm_create_get_user_va_range()
1495 * panthor_vm_pool_create_vm() - Create a VM
1497 * @pool: The VM to create this VM on.
1498 * @args: VM creation args.
1500 * Return: a positive VM ID on success, a negative error code otherwise.
1507 struct panthor_vm *vm; in panthor_vm_pool_create_vm() local
1515 vm = panthor_vm_create(ptdev, false, kernel_va_start, kernel_va_range, in panthor_vm_pool_create_vm()
1517 if (IS_ERR(vm)) in panthor_vm_pool_create_vm()
1518 return PTR_ERR(vm); in panthor_vm_pool_create_vm()
1520 ret = xa_alloc(&pool->xa, &id, vm, in panthor_vm_pool_create_vm()
1524 panthor_vm_put(vm); in panthor_vm_pool_create_vm()
1532 static void panthor_vm_destroy(struct panthor_vm *vm) in panthor_vm_destroy() argument
1534 if (!vm) in panthor_vm_destroy()
1537 vm->destroyed = true; in panthor_vm_destroy()
1539 mutex_lock(&vm->heaps.lock); in panthor_vm_destroy()
1540 panthor_heap_pool_destroy(vm->heaps.pool); in panthor_vm_destroy()
1541 vm->heaps.pool = NULL; in panthor_vm_destroy()
1542 mutex_unlock(&vm->heaps.lock); in panthor_vm_destroy()
1544 drm_WARN_ON(&vm->ptdev->base, in panthor_vm_destroy()
1545 panthor_vm_unmap_range(vm, vm->base.mm_start, vm->base.mm_range)); in panthor_vm_destroy()
1546 panthor_vm_put(vm); in panthor_vm_destroy()
1550 * panthor_vm_pool_destroy_vm() - Destroy a VM.
1551 * @pool: VM pool.
1552 * @handle: VM handle.
1554 * This function doesn't free the VM object or its resources, it just kills
1560 * The VM resources are freed when the last reference on the VM object is
1567 struct panthor_vm *vm; in panthor_vm_pool_destroy_vm() local
1569 vm = xa_erase(&pool->xa, handle); in panthor_vm_pool_destroy_vm()
1571 panthor_vm_destroy(vm); in panthor_vm_pool_destroy_vm()
1573 return vm ? 0 : -EINVAL; in panthor_vm_pool_destroy_vm()
1577 * panthor_vm_pool_get_vm() - Retrieve VM object bound to a VM handle
1578 * @pool: VM pool to check.
1579 * @handle: Handle of the VM to retrieve.
1581 * Return: A valid pointer if the VM exists, NULL otherwise.
1586 struct panthor_vm *vm; in panthor_vm_pool_get_vm() local
1589 vm = panthor_vm_get(xa_load(&pool->xa, handle)); in panthor_vm_pool_get_vm()
1592 return vm; in panthor_vm_pool_get_vm()
1596 * panthor_vm_pool_destroy() - Destroy a VM pool.
1606 struct panthor_vm *vm; in panthor_vm_pool_destroy() local
1612 xa_for_each(&pfile->vms->xa, i, vm) in panthor_vm_pool_destroy()
1613 panthor_vm_destroy(vm); in panthor_vm_pool_destroy()
1620 * panthor_vm_pool_create() - Create a VM pool
1718 if (ptdev->mmu->as.slots[as].vm) in panthor_mmu_irq_handler()
1719 ptdev->mmu->as.slots[as].vm->unhandled_fault = true; in panthor_mmu_irq_handler()
1748 struct panthor_vm *vm = ptdev->mmu->as.slots[i].vm; in panthor_mmu_suspend() local
1750 if (vm) { in panthor_mmu_suspend()
1752 panthor_vm_release_as_locked(vm); in panthor_mmu_suspend()
1784 * don't get asked to do a VM operation while the GPU is down.
1791 struct panthor_vm *vm; in panthor_mmu_pre_reset() local
1795 mutex_lock(&ptdev->mmu->vm.lock); in panthor_mmu_pre_reset()
1796 ptdev->mmu->vm.reset_in_progress = true; in panthor_mmu_pre_reset()
1797 list_for_each_entry(vm, &ptdev->mmu->vm.list, node) in panthor_mmu_pre_reset()
1798 panthor_vm_stop(vm); in panthor_mmu_pre_reset()
1799 mutex_unlock(&ptdev->mmu->vm.lock); in panthor_mmu_pre_reset()
1811 struct panthor_vm *vm; in panthor_mmu_post_reset() local
1822 struct panthor_vm *vm = ptdev->mmu->as.slots[i].vm; in panthor_mmu_post_reset() local
1824 if (vm) in panthor_mmu_post_reset()
1825 panthor_vm_release_as_locked(vm); in panthor_mmu_post_reset()
1833 mutex_lock(&ptdev->mmu->vm.lock); in panthor_mmu_post_reset()
1834 list_for_each_entry(vm, &ptdev->mmu->vm.list, node) { in panthor_mmu_post_reset()
1835 panthor_vm_start(vm); in panthor_mmu_post_reset()
1837 ptdev->mmu->vm.reset_in_progress = false; in panthor_mmu_post_reset()
1838 mutex_unlock(&ptdev->mmu->vm.lock); in panthor_mmu_post_reset()
1843 struct panthor_vm *vm = container_of(gpuvm, struct panthor_vm, base); in panthor_vm_free() local
1844 struct panthor_device *ptdev = vm->ptdev; in panthor_vm_free()
1846 mutex_lock(&vm->heaps.lock); in panthor_vm_free()
1847 if (drm_WARN_ON(&ptdev->base, vm->heaps.pool)) in panthor_vm_free()
1848 panthor_heap_pool_destroy(vm->heaps.pool); in panthor_vm_free()
1849 mutex_unlock(&vm->heaps.lock); in panthor_vm_free()
1850 mutex_destroy(&vm->heaps.lock); in panthor_vm_free()
1852 mutex_lock(&ptdev->mmu->vm.lock); in panthor_vm_free()
1853 list_del(&vm->node); in panthor_vm_free()
1859 if (ptdev->mmu->vm.reset_in_progress) in panthor_vm_free()
1860 panthor_vm_start(vm); in panthor_vm_free()
1861 mutex_unlock(&ptdev->mmu->vm.lock); in panthor_vm_free()
1863 drm_sched_entity_destroy(&vm->entity); in panthor_vm_free()
1864 drm_sched_fini(&vm->sched); in panthor_vm_free()
1867 if (vm->as.id >= 0) { in panthor_vm_free()
1871 panthor_mmu_as_disable(ptdev, vm->as.id); in panthor_vm_free()
1875 ptdev->mmu->as.slots[vm->as.id].vm = NULL; in panthor_vm_free()
1876 clear_bit(vm->as.id, &ptdev->mmu->as.alloc_mask); in panthor_vm_free()
1877 list_del(&vm->as.lru_node); in panthor_vm_free()
1881 free_io_pgtable_ops(vm->pgtbl_ops); in panthor_vm_free()
1883 drm_mm_takedown(&vm->mm); in panthor_vm_free()
1884 kfree(vm); in panthor_vm_free()
1888 * panthor_vm_put() - Release a reference on a VM
1889 * @vm: VM to release the reference on. Can be NULL.
1891 void panthor_vm_put(struct panthor_vm *vm) in panthor_vm_put() argument
1893 drm_gpuvm_put(vm ? &vm->base : NULL); in panthor_vm_put()
1897 * panthor_vm_get() - Get a VM reference
1898 * @vm: VM to get the reference on. Can be NULL.
1900 * Return: @vm value.
1902 struct panthor_vm *panthor_vm_get(struct panthor_vm *vm) in panthor_vm_get() argument
1904 if (vm) in panthor_vm_get()
1905 drm_gpuvm_get(&vm->base); in panthor_vm_get()
1907 return vm; in panthor_vm_get()
1911 * panthor_vm_get_heap_pool() - Get the heap pool attached to a VM
1912 * @vm: VM to query the heap pool on.
1915 * Heap pools are per-VM. This function allows one to retrieve the heap pool
1916 * attached to a VM.
1924 struct panthor_heap_pool *panthor_vm_get_heap_pool(struct panthor_vm *vm, bool create) in panthor_vm_get_heap_pool() argument
1928 mutex_lock(&vm->heaps.lock); in panthor_vm_get_heap_pool()
1929 if (!vm->heaps.pool && create) { in panthor_vm_get_heap_pool()
1930 if (vm->destroyed) in panthor_vm_get_heap_pool()
1933 pool = panthor_heap_pool_create(vm->ptdev, vm); in panthor_vm_get_heap_pool()
1936 vm->heaps.pool = panthor_heap_pool_get(pool); in panthor_vm_get_heap_pool()
1938 pool = panthor_heap_pool_get(vm->heaps.pool); in panthor_vm_get_heap_pool()
1942 mutex_unlock(&vm->heaps.lock); in panthor_vm_get_heap_pool()
1949 * heaps over all the heap pools in a VM
1953 * Calculate all heap chunk sizes in all heap pools bound to a VM. If the VM
1958 struct panthor_vm *vm; in panthor_vm_heaps_sizes() local
1965 xa_for_each(&pfile->vms->xa, i, vm) { in panthor_vm_heaps_sizes()
1966 size_t size = panthor_heap_pool_size(vm->heaps.pool); in panthor_vm_heaps_sizes()
1968 if (vm->as.id >= 0) in panthor_vm_heaps_sizes()
2016 static void panthor_vma_link(struct panthor_vm *vm, in panthor_vma_link() argument
2024 drm_WARN_ON(&vm->ptdev->base, drm_gpuvm_bo_put(vm_bo)); in panthor_vma_link()
2028 static void panthor_vma_unlink(struct panthor_vm *vm, in panthor_vma_unlink() argument
2043 list_add_tail(&vma->node, &vm->op_ctx->returned_vmas); in panthor_vma_unlink()
2059 struct panthor_vm *vm = priv; in panthor_gpuva_sm_step_map() local
2060 struct panthor_vm_op_ctx *op_ctx = vm->op_ctx; in panthor_gpuva_sm_step_map()
2069 ret = panthor_vm_map_pages(vm, op->map.va.addr, flags_to_prot(vma->flags), in panthor_gpuva_sm_step_map()
2078 drm_gpuva_map(&vm->base, &vma->base, &op->map); in panthor_gpuva_sm_step_map()
2079 panthor_vma_link(vm, vma, op_ctx->map.vm_bo); in panthor_gpuva_sm_step_map()
2088 struct panthor_vm *vm = priv; in panthor_gpuva_sm_step_remap() local
2089 struct panthor_vm_op_ctx *op_ctx = vm->op_ctx; in panthor_gpuva_sm_step_remap()
2095 ret = panthor_vm_unmap_pages(vm, unmap_start, unmap_range); in panthor_gpuva_sm_step_remap()
2119 panthor_vma_link(vm, prev_vma, in panthor_gpuva_sm_step_remap()
2124 panthor_vma_link(vm, next_vma, in panthor_gpuva_sm_step_remap()
2128 panthor_vma_unlink(vm, unmap_vma); in panthor_gpuva_sm_step_remap()
2136 struct panthor_vm *vm = priv; in panthor_gpuva_sm_step_unmap() local
2139 ret = panthor_vm_unmap_pages(vm, unmap_vma->base.va.addr, in panthor_gpuva_sm_step_unmap()
2141 if (drm_WARN_ON(&vm->ptdev->base, ret)) in panthor_gpuva_sm_step_unmap()
2145 panthor_vma_unlink(vm, unmap_vma); in panthor_gpuva_sm_step_unmap()
2157 * panthor_vm_resv() - Get the dma_resv object attached to a VM.
2158 * @vm: VM to get the dma_resv of.
2162 struct dma_resv *panthor_vm_resv(struct panthor_vm *vm) in panthor_vm_resv() argument
2164 return drm_gpuvm_resv(&vm->base); in panthor_vm_resv()
2167 struct drm_gem_object *panthor_vm_root_gem(struct panthor_vm *vm) in panthor_vm_root_gem() argument
2169 if (!vm) in panthor_vm_root_gem()
2172 return vm->base.r_obj; in panthor_vm_root_gem()
2176 panthor_vm_exec_op(struct panthor_vm *vm, struct panthor_vm_op_ctx *op, in panthor_vm_exec_op() argument
2185 mutex_lock(&vm->op_lock); in panthor_vm_exec_op()
2186 vm->op_ctx = op; in panthor_vm_exec_op()
2189 if (vm->unusable) { in panthor_vm_exec_op()
2194 ret = drm_gpuvm_sm_map(&vm->base, vm, op->va.addr, op->va.range, in panthor_vm_exec_op()
2199 ret = drm_gpuvm_sm_unmap(&vm->base, vm, op->va.addr, op->va.range); in panthor_vm_exec_op()
2208 vm->unusable = true; in panthor_vm_exec_op()
2210 vm->op_ctx = NULL; in panthor_vm_exec_op()
2211 mutex_unlock(&vm->op_lock); in panthor_vm_exec_op()
2224 * drm_sched finished fence, but we also flag the VM as unusable, because in panthor_vm_bind_run_job()
2225 * a failure in the async VM_BIND results in an inconsistent state. VM needs in panthor_vm_bind_run_job()
2229 ret = panthor_vm_exec_op(job->vm, &job->ctx, true); in panthor_vm_bind_run_job()
2242 panthor_vm_cleanup_op_ctx(&job->ctx, job->vm); in panthor_vm_bind_job_release()
2243 panthor_vm_put(job->vm); in panthor_vm_bind_job_release()
2288 * panthor_vm_create() - Create a VM
2290 * @for_mcu: True if this is the FW MCU VM.
2310 .submit_wq = ptdev->mmu->vm.wq, in panthor_vm_create()
2315 .name = "panthor-vm-bind", in panthor_vm_create()
2320 struct panthor_vm *vm; in panthor_vm_create() local
2323 vm = kzalloc(sizeof(*vm), GFP_KERNEL); in panthor_vm_create()
2324 if (!vm) in panthor_vm_create()
2327 /* We allocate a dummy GEM for the VM. */ in panthor_vm_create()
2334 mutex_init(&vm->heaps.lock); in panthor_vm_create()
2335 vm->for_mcu = for_mcu; in panthor_vm_create()
2336 vm->ptdev = ptdev; in panthor_vm_create()
2337 mutex_init(&vm->op_lock); in panthor_vm_create()
2348 mutex_init(&vm->mm_lock); in panthor_vm_create()
2349 drm_mm_init(&vm->mm, kernel_va_start, kernel_va_size); in panthor_vm_create()
2350 vm->kernel_auto_va.start = auto_kernel_va_start; in panthor_vm_create()
2351 vm->kernel_auto_va.end = vm->kernel_auto_va.start + auto_kernel_va_size - 1; in panthor_vm_create()
2353 INIT_LIST_HEAD(&vm->node); in panthor_vm_create()
2354 INIT_LIST_HEAD(&vm->as.lru_node); in panthor_vm_create()
2355 vm->as.id = -1; in panthor_vm_create()
2356 refcount_set(&vm->as.active_cnt, 0); in panthor_vm_create()
2369 vm->pgtbl_ops = alloc_io_pgtable_ops(ARM_64_LPAE_S1, &pgtbl_cfg, vm); in panthor_vm_create()
2370 if (!vm->pgtbl_ops) { in panthor_vm_create()
2375 ret = drm_sched_init(&vm->sched, &sched_args); in panthor_vm_create()
2379 sched = &vm->sched; in panthor_vm_create()
2380 ret = drm_sched_entity_init(&vm->entity, 0, &sched, 1, NULL); in panthor_vm_create()
2384 mair = io_pgtable_ops_to_pgtable(vm->pgtbl_ops)->cfg.arm_lpae_s1_cfg.mair; in panthor_vm_create()
2385 vm->memattr = mair_to_memattr(mair, ptdev->coherent); in panthor_vm_create()
2387 mutex_lock(&ptdev->mmu->vm.lock); in panthor_vm_create()
2388 list_add_tail(&vm->node, &ptdev->mmu->vm.list); in panthor_vm_create()
2391 if (ptdev->mmu->vm.reset_in_progress) in panthor_vm_create()
2392 panthor_vm_stop(vm); in panthor_vm_create()
2393 mutex_unlock(&ptdev->mmu->vm.lock); in panthor_vm_create()
2398 drm_gpuvm_init(&vm->base, for_mcu ? "panthor-MCU-VM" : "panthor-GPU-VM", in panthor_vm_create()
2402 return vm; in panthor_vm_create()
2405 drm_sched_fini(&vm->sched); in panthor_vm_create()
2408 free_io_pgtable_ops(vm->pgtbl_ops); in panthor_vm_create()
2411 drm_mm_takedown(&vm->mm); in panthor_vm_create()
2415 kfree(vm); in panthor_vm_create()
2421 struct panthor_vm *vm, in panthor_vm_bind_prepare_op_ctx() argument
2425 ssize_t vm_pgsz = panthor_vm_page_size(vm); in panthor_vm_bind_prepare_op_ctx()
2436 ret = panthor_vm_prepare_map_op_ctx(op_ctx, vm, in panthor_vm_bind_prepare_op_ctx()
2452 return panthor_vm_prepare_unmap_op_ctx(op_ctx, vm, op->va, op->size); in panthor_vm_bind_prepare_op_ctx()
2467 panthor_vm_prepare_sync_only_op_ctx(op_ctx, vm); in panthor_vm_bind_prepare_op_ctx()
2486 * @vm: VM targeted by the VM_BIND job.
2487 * @op: VM operation data.
2493 struct panthor_vm *vm, in panthor_vm_bind_job_create() argument
2499 if (!vm) in panthor_vm_bind_job_create()
2502 if (vm->destroyed || vm->unusable) in panthor_vm_bind_job_create()
2509 ret = panthor_vm_bind_prepare_op_ctx(file, vm, op, &job->ctx); in panthor_vm_bind_job_create()
2517 job->vm = panthor_vm_get(vm); in panthor_vm_bind_job_create()
2519 ret = drm_sched_job_init(&job->base, &vm->entity, 1, vm); in panthor_vm_bind_job_create()
2535 * Locks and prepare the VM resv.
2547 /* Acquire the VM lock an reserve a slot for this VM bind job. */ in panthor_vm_bind_job_prepare_resvs()
2548 ret = drm_gpuvm_prepare_vm(&job->vm->base, exec, 1); in panthor_vm_bind_job_prepare_resvs()
2573 drm_gpuvm_resv_add_fence(&job->vm->base, exec, in panthor_vm_bind_job_update_resvs()
2579 void panthor_vm_update_resvs(struct panthor_vm *vm, struct drm_exec *exec, in panthor_vm_update_resvs() argument
2584 drm_gpuvm_resv_add_fence(&vm->base, exec, fence, private_usage, extobj_usage); in panthor_vm_update_resvs()
2590 * @vm: VM targeted by the VM operation.
2591 * @op: Data describing the VM operation.
2596 struct panthor_vm *vm, in panthor_vm_bind_exec_sync_op() argument
2609 ret = panthor_vm_bind_prepare_op_ctx(file, vm, op, &op_ctx); in panthor_vm_bind_exec_sync_op()
2613 ret = panthor_vm_exec_op(vm, &op_ctx, false); in panthor_vm_bind_exec_sync_op()
2614 panthor_vm_cleanup_op_ctx(&op_ctx, vm); in panthor_vm_bind_exec_sync_op()
2620 * panthor_vm_map_bo_range() - Map a GEM object range to a VM
2621 * @vm: VM to map the GEM to.
2634 int panthor_vm_map_bo_range(struct panthor_vm *vm, struct panthor_gem_object *bo, in panthor_vm_map_bo_range() argument
2640 ret = panthor_vm_prepare_map_op_ctx(&op_ctx, vm, bo, offset, size, va, flags); in panthor_vm_map_bo_range()
2644 ret = panthor_vm_exec_op(vm, &op_ctx, false); in panthor_vm_map_bo_range()
2645 panthor_vm_cleanup_op_ctx(&op_ctx, vm); in panthor_vm_map_bo_range()
2652 * @vm: VM to unmap the region from.
2661 int panthor_vm_unmap_range(struct panthor_vm *vm, u64 va, u64 size) in panthor_vm_unmap_range() argument
2666 ret = panthor_vm_prepare_unmap_op_ctx(&op_ctx, vm, va, size); in panthor_vm_unmap_range()
2670 ret = panthor_vm_exec_op(vm, &op_ctx, false); in panthor_vm_unmap_range()
2671 panthor_vm_cleanup_op_ctx(&op_ctx, vm); in panthor_vm_unmap_range()
2677 * panthor_vm_prepare_mapped_bos_resvs() - Prepare resvs on VM BOs.
2679 * @vm: VM targeted by the GPU job.
2682 * GPU jobs assume all BOs bound to the VM at the time the job is submitted
2684 * need to reserve a slot on all BOs mapped to a VM and update this slot with
2689 int panthor_vm_prepare_mapped_bos_resvs(struct drm_exec *exec, struct panthor_vm *vm, in panthor_vm_prepare_mapped_bos_resvs() argument
2694 /* Acquire the VM lock and reserve a slot for this GPU job. */ in panthor_vm_prepare_mapped_bos_resvs()
2695 ret = drm_gpuvm_prepare_vm(&vm->base, exec, slot_count); in panthor_vm_prepare_mapped_bos_resvs()
2699 return drm_gpuvm_prepare_objects(&vm->base, exec, slot_count); in panthor_vm_prepare_mapped_bos_resvs()
2716 struct panthor_vm *vm = ptdev->mmu->as.slots[i].vm; in panthor_mmu_unplug() local
2718 if (vm) { in panthor_mmu_unplug()
2720 panthor_vm_release_as_locked(vm); in panthor_mmu_unplug()
2753 INIT_LIST_HEAD(&mmu->vm.list); in panthor_mmu_init()
2754 ret = drmm_mutex_init(&ptdev->base, &mmu->vm.lock); in panthor_mmu_init()
2769 mmu->vm.wq = alloc_workqueue("panthor-vm-bind", WQ_UNBOUND, 0); in panthor_mmu_init()
2770 if (!mmu->vm.wq) in panthor_mmu_init()
2782 return drmm_add_action_or_reset(&ptdev->base, panthor_mmu_release_wq, mmu->vm.wq); in panthor_mmu_init()
2786 static int show_vm_gpuvas(struct panthor_vm *vm, struct seq_file *m) in show_vm_gpuvas() argument
2790 mutex_lock(&vm->op_lock); in show_vm_gpuvas()
2791 ret = drm_debugfs_gpuva_info(m, &vm->base); in show_vm_gpuvas()
2792 mutex_unlock(&vm->op_lock); in show_vm_gpuvas()
2803 struct panthor_vm *vm; in show_each_vm() local
2806 mutex_lock(&ptdev->mmu->vm.lock); in show_each_vm()
2807 list_for_each_entry(vm, &ptdev->mmu->vm.list, node) { in show_each_vm()
2808 ret = show(vm, m); in show_each_vm()
2814 mutex_unlock(&ptdev->mmu->vm.lock); in show_each_vm()