Lines Matching full:mem

79 		struct kgd_mem *mem)  in kfd_mem_is_attached()  argument
83 list_for_each_entry(entry, &mem->attachments, list) in kfd_mem_is_attached()
116 uint64_t mem; in amdgpu_amdkfd_gpuvm_init_mem_limits() local
122 mem = si.totalram - si.totalhigh; in amdgpu_amdkfd_gpuvm_init_mem_limits()
123 mem *= si.mem_unit; in amdgpu_amdkfd_gpuvm_init_mem_limits()
126 kfd_mem_limit.max_system_mem_limit = mem - (mem >> 6); in amdgpu_amdkfd_gpuvm_init_mem_limits()
257 "adev reference can't be null when alloc mem flags vram is set"); in amdgpu_amdkfd_unreserve_mem_limit()
308 * @mem: BO of peer device that is being DMA mapped. Provides parameters
314 struct kgd_mem *mem, struct amdgpu_bo **bo_out) in create_dmamap_sg_bo() argument
320 ret = amdgpu_bo_reserve(mem->bo, false); in create_dmamap_sg_bo()
324 if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) in create_dmamap_sg_bo()
325 flags |= mem->bo->flags & (AMDGPU_GEM_CREATE_COHERENT | in create_dmamap_sg_bo()
328 ret = amdgpu_gem_object_create(adev, mem->bo->tbo.base.size, 1, in create_dmamap_sg_bo()
330 ttm_bo_type_sg, mem->bo->tbo.base.resv, &gem_obj, 0); in create_dmamap_sg_bo()
332 amdgpu_bo_unreserve(mem->bo); in create_dmamap_sg_bo()
340 (*bo_out)->parent = amdgpu_bo_ref(mem->bo); in create_dmamap_sg_bo()
497 static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem) in get_pte_flags() argument
502 if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE) in get_pte_flags()
504 if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE) in get_pte_flags()
543 kfd_mem_dmamap_userptr(struct kgd_mem *mem, in kfd_mem_dmamap_userptr() argument
547 mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ? in kfd_mem_dmamap_userptr()
552 struct ttm_tt *src_ttm = mem->bo->tbo.ttm; in kfd_mem_dmamap_userptr()
611 * @mem: SG BO of the DOORBELL or MMIO resource on the owning device
636 kfd_mem_dmamap_sg_bo(struct kgd_mem *mem, in kfd_mem_dmamap_sg_bo() argument
649 mmio = (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP); in kfd_mem_dmamap_sg_bo()
655 dir = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ? in kfd_mem_dmamap_sg_bo()
657 dma_addr = mem->bo->tbo.sg->sgl->dma_address; in kfd_mem_dmamap_sg_bo()
658 pr_debug("%d BO size: %d\n", mmio, mem->bo->tbo.sg->sgl->length); in kfd_mem_dmamap_sg_bo()
661 mem->bo->tbo.sg->sgl->length, dir, DMA_ATTR_SKIP_CPU_SYNC); in kfd_mem_dmamap_sg_bo()
667 ttm->sg = create_sg_table(dma_addr, mem->bo->tbo.sg->sgl->length); in kfd_mem_dmamap_sg_bo()
685 dma_unmap_resource(adev->dev, dma_addr, mem->bo->tbo.sg->sgl->length, in kfd_mem_dmamap_sg_bo()
691 kfd_mem_dmamap_attachment(struct kgd_mem *mem, in kfd_mem_dmamap_attachment() argument
698 return kfd_mem_dmamap_userptr(mem, attachment); in kfd_mem_dmamap_attachment()
702 return kfd_mem_dmamap_sg_bo(mem, attachment); in kfd_mem_dmamap_attachment()
710 kfd_mem_dmaunmap_userptr(struct kgd_mem *mem, in kfd_mem_dmaunmap_userptr() argument
714 mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ? in kfd_mem_dmaunmap_userptr()
744 * @mem: SG BO of the DOORBELL or MMIO resource on the owning device
759 kfd_mem_dmaunmap_sg_bo(struct kgd_mem *mem, in kfd_mem_dmaunmap_sg_bo() argument
776 dir = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ? in kfd_mem_dmaunmap_sg_bo()
787 kfd_mem_dmaunmap_attachment(struct kgd_mem *mem, in kfd_mem_dmaunmap_attachment() argument
794 kfd_mem_dmaunmap_userptr(mem, attachment); in kfd_mem_dmaunmap_attachment()
800 kfd_mem_dmaunmap_sg_bo(mem, attachment); in kfd_mem_dmaunmap_attachment()
807 static int kfd_mem_export_dmabuf(struct kgd_mem *mem) in kfd_mem_export_dmabuf() argument
809 if (!mem->dmabuf) { in kfd_mem_export_dmabuf()
814 bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev); in kfd_mem_export_dmabuf()
816 mem->gem_handle, in kfd_mem_export_dmabuf()
817 mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ? in kfd_mem_export_dmabuf()
825 mem->dmabuf = dmabuf; in kfd_mem_export_dmabuf()
832 kfd_mem_attach_dmabuf(struct amdgpu_device *adev, struct kgd_mem *mem, in kfd_mem_attach_dmabuf() argument
838 ret = kfd_mem_export_dmabuf(mem); in kfd_mem_attach_dmabuf()
842 gobj = amdgpu_gem_prime_import(adev_to_drm(adev), mem->dmabuf); in kfd_mem_attach_dmabuf()
865 static int kfd_mem_attach(struct amdgpu_device *adev, struct kgd_mem *mem, in kfd_mem_attach() argument
868 struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev); in kfd_mem_attach()
869 unsigned long bo_size = mem->bo->tbo.base.size; in kfd_mem_attach()
870 uint64_t va = mem->va; in kfd_mem_attach()
891 ((mem->domain == AMDGPU_GEM_DOMAIN_VRAM) || in kfd_mem_attach()
892 (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) || in kfd_mem_attach()
893 (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP))) { in kfd_mem_attach()
894 if (mem->domain == AMDGPU_GEM_DOMAIN_VRAM) in kfd_mem_attach()
910 if ((adev == bo_adev && !(mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) || in kfd_mem_attach()
911 (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) && reuse_dmamap(adev, bo_adev)) || in kfd_mem_attach()
912 (mem->domain == AMDGPU_GEM_DOMAIN_GTT && reuse_dmamap(adev, bo_adev)) || in kfd_mem_attach()
919 bo[i] = mem->bo; in kfd_mem_attach()
926 } else if (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm)) { in kfd_mem_attach()
929 ret = create_dmamap_sg_bo(adev, mem, &bo[i]); in kfd_mem_attach()
933 } else if (mem->bo->tbo.type == ttm_bo_type_sg) { in kfd_mem_attach()
934 WARN_ONCE(!(mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL || in kfd_mem_attach()
935 mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP), in kfd_mem_attach()
938 ret = create_dmamap_sg_bo(adev, mem, &bo[i]); in kfd_mem_attach()
942 } else if (mem->domain == AMDGPU_GEM_DOMAIN_GTT || in kfd_mem_attach()
943 mem->domain == AMDGPU_GEM_DOMAIN_VRAM) { in kfd_mem_attach()
945 ret = kfd_mem_attach_dmabuf(adev, mem, &bo[i]); in kfd_mem_attach()
975 attachment[i]->pte_flags = get_pte_flags(adev, mem); in kfd_mem_attach()
977 list_add(&attachment[i]->list, &mem->attachments); in kfd_mem_attach()
1015 static void add_kgd_mem_to_kfd_bo_list(struct kgd_mem *mem, in add_kgd_mem_to_kfd_bo_list() argument
1021 list_add_tail(&mem->validate_list, in add_kgd_mem_to_kfd_bo_list()
1024 list_add_tail(&mem->validate_list, &process_info->kfd_bo_list); in add_kgd_mem_to_kfd_bo_list()
1028 static void remove_kgd_mem_from_kfd_bo_list(struct kgd_mem *mem, in remove_kgd_mem_from_kfd_bo_list() argument
1032 list_del(&mem->validate_list); in remove_kgd_mem_from_kfd_bo_list()
1048 static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr, in init_user_pages() argument
1051 struct amdkfd_process_info *process_info = mem->process_info; in init_user_pages()
1052 struct amdgpu_bo *bo = mem->bo; in init_user_pages()
1080 mem->invalid++; in init_user_pages()
1097 amdgpu_bo_placement_from_domain(bo, mem->domain); in init_user_pages()
1135 * @mem: KFD BO structure.
1139 static int reserve_bo_and_vm(struct kgd_mem *mem, in reserve_bo_and_vm() argument
1143 struct amdgpu_bo *bo = mem->bo; in reserve_bo_and_vm()
1149 ctx->sync = &mem->sync; in reserve_bo_and_vm()
1172 * @mem: KFD BO structure.
1180 static int reserve_bo_and_cond_vms(struct kgd_mem *mem, in reserve_bo_and_cond_vms() argument
1185 struct amdgpu_bo *bo = mem->bo; in reserve_bo_and_cond_vms()
1188 ctx->sync = &mem->sync; in reserve_bo_and_cond_vms()
1192 list_for_each_entry(entry, &mem->attachments, list) { in reserve_bo_and_cond_vms()
1242 static void unmap_bo_from_gpuvm(struct kgd_mem *mem, in unmap_bo_from_gpuvm() argument
1257 static int update_gpuvm_pte(struct kgd_mem *mem, in update_gpuvm_pte() argument
1265 ret = kfd_mem_dmamap_attachment(mem, entry); in update_gpuvm_pte()
1279 static int map_bo_to_gpuvm(struct kgd_mem *mem, in map_bo_to_gpuvm() argument
1299 ret = update_gpuvm_pte(mem, entry, sync); in map_bo_to_gpuvm()
1308 unmap_bo_from_gpuvm(mem, entry, sync); in map_bo_to_gpuvm()
1309 kfd_mem_dmaunmap_attachment(mem, entry); in map_bo_to_gpuvm()
1682 void *drm_priv, struct kgd_mem **mem, in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu() argument
1747 *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL); in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1748 if (!*mem) { in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1752 INIT_LIST_HEAD(&(*mem)->attachments); in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1753 mutex_init(&(*mem)->lock); in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1754 (*mem)->aql_queue = !!(flags & KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM); in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1760 if ((*mem)->aql_queue) in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1764 (*mem)->alloc_flags = flags; in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1766 amdgpu_sync_create(&(*mem)->sync); in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1776 va, (*mem)->aql_queue ? size << 1 : size, in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1791 ret = drm_gem_handle_create(adev->kfd.client.file, gobj, &(*mem)->gem_handle); in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1799 bo->kfd_bo = *mem; in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1800 (*mem)->bo = bo; in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1804 (*mem)->va = va; in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1805 (*mem)->domain = domain; in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1806 (*mem)->mapped_to_gpu_memory = 0; in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1807 (*mem)->process_info = avm->process_info; in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1809 add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, user_addr); in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1813 ret = init_user_pages(*mem, user_addr, criu_resume); in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1844 remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info); in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1845 drm_gem_handle_delete(adev->kfd.client.file, (*mem)->gem_handle); in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1849 /* Don't unreserve system mem limit twice */ in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1854 mutex_destroy(&(*mem)->lock); in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1858 kfree(*mem); in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1868 struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv, in amdgpu_amdkfd_gpuvm_free_memory_of_gpu() argument
1871 struct amdkfd_process_info *process_info = mem->process_info; in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1872 unsigned long bo_size = mem->bo->tbo.base.size; in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1873 bool use_release_notifier = (mem->bo->kfd_bo == mem); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1880 mutex_lock(&mem->lock); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1883 if (mem->alloc_flags & in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1886 amdgpu_amdkfd_gpuvm_unpin_bo(mem->bo); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1889 mapped_to_gpu_memory = mem->mapped_to_gpu_memory; in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1890 is_imported = mem->is_imported; in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1891 mutex_unlock(&mem->lock); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1892 /* lock is not needed after this, since mem is unused and will in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1898 mem->va, bo_size); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1904 list_del(&mem->validate_list); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1908 if (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm)) { in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1909 amdgpu_hmm_unregister(mem->bo); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1911 amdgpu_ttm_tt_discard_user_pages(mem->bo->tbo.ttm, mem->range); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1915 ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1919 amdgpu_amdkfd_remove_eviction_fence(mem->bo, in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1921 pr_debug("Release VA 0x%llx - 0x%llx\n", mem->va, in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1922 mem->va + bo_size * (1 + mem->aql_queue)); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1925 list_for_each_entry_safe(entry, tmp, &mem->attachments, list) { in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1926 kfd_mem_dmaunmap_attachment(mem, entry); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1933 amdgpu_sync_free(&mem->sync); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1938 if (mem->bo->tbo.sg) { in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1939 sg_free_table(mem->bo->tbo.sg); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1940 kfree(mem->bo->tbo.sg); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1949 (mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_VRAM || in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1951 mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_GTT))) in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1958 drm_vma_node_revoke(&mem->bo->tbo.base.vma_node, drm_priv); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1959 drm_gem_handle_delete(adev->kfd.client.file, mem->gem_handle); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1960 if (mem->dmabuf) { in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1961 dma_buf_put(mem->dmabuf); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1962 mem->dmabuf = NULL; in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1964 mutex_destroy(&mem->lock); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1967 * amdgpu_amdkfd_release_notify and kfree the mem struct. That's why in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1970 drm_gem_object_put(&mem->bo->tbo.base); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1977 kfree(mem); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1983 struct amdgpu_device *adev, struct kgd_mem *mem, in amdgpu_amdkfd_gpuvm_map_memory_to_gpu() argument
1995 bo = mem->bo; in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
2005 mutex_lock(&mem->process_info->lock); in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
2012 mutex_lock(&mem->process_info->notifier_lock); in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
2013 is_invalid_userptr = !!mem->invalid; in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
2014 mutex_unlock(&mem->process_info->notifier_lock); in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
2017 mutex_lock(&mem->lock); in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
2019 domain = mem->domain; in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
2023 mem->va, in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
2024 mem->va + bo_size * (1 + mem->aql_queue), in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
2027 if (!kfd_mem_is_attached(avm, mem)) { in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
2028 ret = kfd_mem_attach(adev, mem, avm, mem->aql_queue); in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
2033 ret = reserve_bo_and_vm(mem, avm, &ctx); in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
2050 list_for_each_entry(entry, &mem->attachments, list) { in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
2057 ret = map_bo_to_gpuvm(mem, entry, ctx.sync, in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
2071 mem->mapped_to_gpu_memory++; in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
2073 mem->mapped_to_gpu_memory); in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
2083 mutex_unlock(&mem->process_info->lock); in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
2084 mutex_unlock(&mem->lock); in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
2088 int amdgpu_amdkfd_gpuvm_dmaunmap_mem(struct kgd_mem *mem, void *drm_priv) in amdgpu_amdkfd_gpuvm_dmaunmap_mem() argument
2096 mutex_lock(&mem->lock); in amdgpu_amdkfd_gpuvm_dmaunmap_mem()
2098 ret = amdgpu_bo_reserve(mem->bo, true); in amdgpu_amdkfd_gpuvm_dmaunmap_mem()
2102 list_for_each_entry(entry, &mem->attachments, list) { in amdgpu_amdkfd_gpuvm_dmaunmap_mem()
2109 kfd_mem_dmaunmap_attachment(mem, entry); in amdgpu_amdkfd_gpuvm_dmaunmap_mem()
2112 amdgpu_bo_unreserve(mem->bo); in amdgpu_amdkfd_gpuvm_dmaunmap_mem()
2114 mutex_unlock(&mem->lock); in amdgpu_amdkfd_gpuvm_dmaunmap_mem()
2120 struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv) in amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu() argument
2123 unsigned long bo_size = mem->bo->tbo.base.size; in amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu()
2128 mutex_lock(&mem->lock); in amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu()
2130 ret = reserve_bo_and_cond_vms(mem, avm, BO_VM_MAPPED, &ctx); in amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu()
2144 mem->va, in amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu()
2145 mem->va + bo_size * (1 + mem->aql_queue), in amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu()
2148 list_for_each_entry(entry, &mem->attachments, list) { in amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu()
2155 unmap_bo_from_gpuvm(mem, entry, ctx.sync); in amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu()
2158 mem->mapped_to_gpu_memory--; in amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu()
2160 mem->mapped_to_gpu_memory); in amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu()
2166 mutex_unlock(&mem->lock); in amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu()
2171 struct amdgpu_device *adev, struct kgd_mem *mem, bool intr) in amdgpu_amdkfd_gpuvm_sync_memory() argument
2178 mutex_lock(&mem->lock); in amdgpu_amdkfd_gpuvm_sync_memory()
2179 amdgpu_sync_clone(&mem->sync, &sync); in amdgpu_amdkfd_gpuvm_sync_memory()
2180 mutex_unlock(&mem->lock); in amdgpu_amdkfd_gpuvm_sync_memory()
2237 * @mem: Buffer object to be mapped for CPU access
2248 int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_mem *mem, in amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel() argument
2252 struct amdgpu_bo *bo = mem->bo; in amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel()
2259 mutex_lock(&mem->process_info->lock); in amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel()
2280 bo, mem->process_info->eviction_fence); in amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel()
2287 mutex_unlock(&mem->process_info->lock); in amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel()
2295 mutex_unlock(&mem->process_info->lock); in amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel()
2302 * @mem: Buffer object to be unmapped for CPU access
2308 void amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(struct kgd_mem *mem) in amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel() argument
2310 struct amdgpu_bo *bo = mem->bo; in amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel()
2319 struct kfd_vm_fault_info *mem) in amdgpu_amdkfd_gpuvm_get_vm_fault_info() argument
2322 *mem = *adev->gmc.vm_fault_info; in amdgpu_amdkfd_gpuvm_get_vm_fault_info()
2333 struct kgd_mem **mem, uint64_t *size, in import_obj_create() argument
2346 *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL); in import_obj_create()
2347 if (!*mem) in import_obj_create()
2360 INIT_LIST_HEAD(&(*mem)->attachments); in import_obj_create()
2361 mutex_init(&(*mem)->lock); in import_obj_create()
2363 (*mem)->alloc_flags = in import_obj_create()
2370 (*mem)->dmabuf = dma_buf; in import_obj_create()
2371 (*mem)->bo = bo; in import_obj_create()
2372 (*mem)->va = va; in import_obj_create()
2373 (*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) && !adev->gmc.is_app_apu ? in import_obj_create()
2376 (*mem)->mapped_to_gpu_memory = 0; in import_obj_create()
2377 (*mem)->process_info = avm->process_info; in import_obj_create()
2378 add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, false); in import_obj_create()
2379 amdgpu_sync_create(&(*mem)->sync); in import_obj_create()
2380 (*mem)->is_imported = true; in import_obj_create()
2385 ret = amdgpu_amdkfd_bo_validate_and_fence(bo, (*mem)->domain, in import_obj_create()
2394 remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info); in import_obj_create()
2397 kfree(*mem); in import_obj_create()
2403 struct kgd_mem **mem, uint64_t *size, in amdgpu_amdkfd_gpuvm_import_dmabuf_fd() argument
2420 ret = import_obj_create(adev, obj->dma_buf, obj, va, drm_priv, mem, size, in amdgpu_amdkfd_gpuvm_import_dmabuf_fd()
2425 (*mem)->gem_handle = handle; in amdgpu_amdkfd_gpuvm_import_dmabuf_fd()
2436 int amdgpu_amdkfd_gpuvm_export_dmabuf(struct kgd_mem *mem, in amdgpu_amdkfd_gpuvm_export_dmabuf() argument
2441 mutex_lock(&mem->lock); in amdgpu_amdkfd_gpuvm_export_dmabuf()
2442 ret = kfd_mem_export_dmabuf(mem); in amdgpu_amdkfd_gpuvm_export_dmabuf()
2446 get_dma_buf(mem->dmabuf); in amdgpu_amdkfd_gpuvm_export_dmabuf()
2447 *dma_buf = mem->dmabuf; in amdgpu_amdkfd_gpuvm_export_dmabuf()
2449 mutex_unlock(&mem->lock); in amdgpu_amdkfd_gpuvm_export_dmabuf()
2464 unsigned long cur_seq, struct kgd_mem *mem) in amdgpu_amdkfd_evict_userptr() argument
2466 struct amdkfd_process_info *process_info = mem->process_info; in amdgpu_amdkfd_evict_userptr()
2478 mem->invalid++; in amdgpu_amdkfd_evict_userptr()
2503 struct kgd_mem *mem, *tmp_mem; in update_invalid_user_pages() local
2512 list_for_each_entry_safe(mem, tmp_mem, in update_invalid_user_pages()
2515 if (mem->invalid) in update_invalid_user_pages()
2516 list_move_tail(&mem->validate_list, in update_invalid_user_pages()
2520 list_for_each_entry(mem, &process_info->userptr_inval_list, in update_invalid_user_pages()
2522 invalid = mem->invalid; in update_invalid_user_pages()
2529 bo = mem->bo; in update_invalid_user_pages()
2531 amdgpu_ttm_tt_discard_user_pages(bo->tbo.ttm, mem->range); in update_invalid_user_pages()
2532 mem->range = NULL; in update_invalid_user_pages()
2557 &mem->range); in update_invalid_user_pages()
2579 if (mem->invalid != invalid) { in update_invalid_user_pages()
2583 /* set mem valid if mem has hmm range associated */ in update_invalid_user_pages()
2584 if (mem->range) in update_invalid_user_pages()
2585 mem->invalid = 0; in update_invalid_user_pages()
2606 struct kgd_mem *mem, *tmp_mem; in validate_invalid_user_pages() local
2625 list_for_each_entry(mem, &process_info->userptr_inval_list, in validate_invalid_user_pages()
2629 gobj = &mem->bo->tbo.base; in validate_invalid_user_pages()
2642 list_for_each_entry_safe(mem, tmp_mem, in validate_invalid_user_pages()
2647 bo = mem->bo; in validate_invalid_user_pages()
2651 amdgpu_bo_placement_from_domain(bo, mem->domain); in validate_invalid_user_pages()
2665 list_for_each_entry(attachment, &mem->attachments, list) { in validate_invalid_user_pages()
2669 kfd_mem_dmaunmap_attachment(mem, attachment); in validate_invalid_user_pages()
2670 ret = update_gpuvm_pte(mem, attachment, &sync); in validate_invalid_user_pages()
2675 mem->invalid++; in validate_invalid_user_pages()
2699 struct kgd_mem *mem, *tmp_mem; in confirm_valid_user_pages_locked() local
2702 list_for_each_entry_safe(mem, tmp_mem, in confirm_valid_user_pages_locked()
2707 /* keep mem without hmm range at userptr_inval_list */ in confirm_valid_user_pages_locked()
2708 if (!mem->range) in confirm_valid_user_pages_locked()
2711 /* Only check mem with hmm range associated */ in confirm_valid_user_pages_locked()
2713 mem->bo->tbo.ttm, mem->range); in confirm_valid_user_pages_locked()
2715 mem->range = NULL; in confirm_valid_user_pages_locked()
2717 WARN(!mem->invalid, "Invalid BO not marked invalid"); in confirm_valid_user_pages_locked()
2722 if (mem->invalid) { in confirm_valid_user_pages_locked()
2728 list_move_tail(&mem->validate_list, in confirm_valid_user_pages_locked()
2858 struct kgd_mem *mem; in amdgpu_amdkfd_gpuvm_restore_process_bos() local
2883 list_for_each_entry(mem, &process_info->kfd_bo_list, in amdgpu_amdkfd_gpuvm_restore_process_bos()
2887 gobj = &mem->bo->tbo.base; in amdgpu_amdkfd_gpuvm_restore_process_bos()
2903 list_for_each_entry(mem, &process_info->kfd_bo_list, in amdgpu_amdkfd_gpuvm_restore_process_bos()
2906 struct amdgpu_bo *bo = mem->bo; in amdgpu_amdkfd_gpuvm_restore_process_bos()
2907 uint32_t domain = mem->domain; in amdgpu_amdkfd_gpuvm_restore_process_bos()
2933 list_for_each_entry(attachment, &mem->attachments, list) { in amdgpu_amdkfd_gpuvm_restore_process_bos()
2940 kfd_mem_dmaunmap_attachment(mem, attachment); in amdgpu_amdkfd_gpuvm_restore_process_bos()
2941 ret = update_gpuvm_pte(mem, attachment, &sync_obj); in amdgpu_amdkfd_gpuvm_restore_process_bos()
3015 list_for_each_entry(mem, &process_info->kfd_bo_list, validate_list) { in amdgpu_amdkfd_gpuvm_restore_process_bos()
3016 if (mem->bo->tbo.pin_count) in amdgpu_amdkfd_gpuvm_restore_process_bos()
3019 dma_resv_add_fence(mem->bo->tbo.base.resv, in amdgpu_amdkfd_gpuvm_restore_process_bos()
3041 int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem) in amdgpu_amdkfd_add_gws_to_process() argument
3050 *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL); in amdgpu_amdkfd_add_gws_to_process()
3051 if (!*mem) in amdgpu_amdkfd_add_gws_to_process()
3054 mutex_init(&(*mem)->lock); in amdgpu_amdkfd_add_gws_to_process()
3055 INIT_LIST_HEAD(&(*mem)->attachments); in amdgpu_amdkfd_add_gws_to_process()
3056 (*mem)->bo = amdgpu_bo_ref(gws_bo); in amdgpu_amdkfd_add_gws_to_process()
3057 (*mem)->domain = AMDGPU_GEM_DOMAIN_GWS; in amdgpu_amdkfd_add_gws_to_process()
3058 (*mem)->process_info = process_info; in amdgpu_amdkfd_add_gws_to_process()
3059 add_kgd_mem_to_kfd_bo_list(*mem, process_info, false); in amdgpu_amdkfd_add_gws_to_process()
3060 amdgpu_sync_create(&(*mem)->sync); in amdgpu_amdkfd_add_gws_to_process()
3064 mutex_lock(&(*mem)->process_info->lock); in amdgpu_amdkfd_add_gws_to_process()
3087 mutex_unlock(&(*mem)->process_info->lock); in amdgpu_amdkfd_add_gws_to_process()
3095 mutex_unlock(&(*mem)->process_info->lock); in amdgpu_amdkfd_add_gws_to_process()
3096 amdgpu_sync_free(&(*mem)->sync); in amdgpu_amdkfd_add_gws_to_process()
3097 remove_kgd_mem_from_kfd_bo_list(*mem, process_info); in amdgpu_amdkfd_add_gws_to_process()
3099 mutex_destroy(&(*mem)->lock); in amdgpu_amdkfd_add_gws_to_process()
3100 kfree(*mem); in amdgpu_amdkfd_add_gws_to_process()
3101 *mem = NULL; in amdgpu_amdkfd_add_gws_to_process()
3105 int amdgpu_amdkfd_remove_gws_from_process(void *info, void *mem) in amdgpu_amdkfd_remove_gws_from_process() argument
3109 struct kgd_mem *kgd_mem = (struct kgd_mem *)mem; in amdgpu_amdkfd_remove_gws_from_process()
3129 kfree(mem); in amdgpu_amdkfd_remove_gws_from_process()
3153 bool amdgpu_amdkfd_bo_mapped_to_dev(struct amdgpu_device *adev, struct kgd_mem *mem) in amdgpu_amdkfd_bo_mapped_to_dev() argument
3157 list_for_each_entry(entry, &mem->attachments, list) { in amdgpu_amdkfd_bo_mapped_to_dev()
3170 seq_printf(m, "System mem used %lldM out of %lluM\n", in kfd_debugfs_kfd_mem_limits()
3173 seq_printf(m, "TTM mem used %lldM out of %lluM\n", in kfd_debugfs_kfd_mem_limits()