Lines Matching +full:gfx +full:- +full:mem
1 // SPDX-License-Identifier: MIT
3 * Copyright 2014-2018 Advanced Micro Devices, Inc.
23 #include <linux/dma-buf.h>
73 #define domain_string(domain) domain_bit_to_string[ffs(domain)-1]
78 struct kgd_mem *mem) in kfd_mem_is_attached() argument
82 list_for_each_entry(entry, &mem->attachments, list) in kfd_mem_is_attached()
83 if (entry->bo_va->base.vm == avm) in kfd_mem_is_attached()
90 * reuse_dmamap() - Check whether adev can share the original
104 return (adev->ram_is_direct_mapped && bo_adev->ram_is_direct_mapped) || in reuse_dmamap()
105 (adev->dev->iommu_group == bo_adev->dev->iommu_group); in reuse_dmamap()
109 * System (TTM + userptr) memory - 15/16th System RAM
110 * TTM memory - 3/8th System RAM
115 uint64_t mem; in amdgpu_amdkfd_gpuvm_init_mem_limits() local
121 mem = si.totalram - si.totalhigh; in amdgpu_amdkfd_gpuvm_init_mem_limits()
122 mem *= si.mem_unit; in amdgpu_amdkfd_gpuvm_init_mem_limits()
125 kfd_mem_limit.max_system_mem_limit = mem - (mem >> 6); in amdgpu_amdkfd_gpuvm_init_mem_limits()
129 kfd_mem_limit.max_system_mem_limit -= AMDGPU_RESERVE_MEM_LIMIT; in amdgpu_amdkfd_gpuvm_init_mem_limits()
156 * amdgpu_amdkfd_reserve_mem_limit() - Decrease available memory by size
167 * returns -ENOMEM in case of error, ZERO otherwise
175 uint64_t reserved_for_ras = (con ? con->reserved_pages_in_bytes : 0); in amdgpu_amdkfd_reserve_mem_limit()
194 * For GFX 9.4.3, get the VRAM size from XCP structs in amdgpu_amdkfd_reserve_mem_limit()
197 return -EINVAL; in amdgpu_amdkfd_reserve_mem_limit()
200 if (adev->apu_prefer_gtt) { in amdgpu_amdkfd_reserve_mem_limit()
210 return -ENOMEM; in amdgpu_amdkfd_reserve_mem_limit()
223 (adev && xcp_id >= 0 && adev->kfd.vram_used[xcp_id] + vram_needed > in amdgpu_amdkfd_reserve_mem_limit()
224 vram_size - reserved_for_pt - reserved_for_ras - atomic64_read(&adev->vram_pin_size))) { in amdgpu_amdkfd_reserve_mem_limit()
225 ret = -ENOMEM; in amdgpu_amdkfd_reserve_mem_limit()
235 adev->kfd.vram_used[xcp_id] += vram_needed; in amdgpu_amdkfd_reserve_mem_limit()
236 adev->kfd.vram_used_aligned[xcp_id] += in amdgpu_amdkfd_reserve_mem_limit()
237 adev->apu_prefer_gtt ? in amdgpu_amdkfd_reserve_mem_limit()
255 kfd_mem_limit.system_mem_used -= size; in amdgpu_amdkfd_unreserve_mem_limit()
256 kfd_mem_limit.ttm_mem_used -= size; in amdgpu_amdkfd_unreserve_mem_limit()
259 "adev reference can't be null when alloc mem flags vram is set"); in amdgpu_amdkfd_unreserve_mem_limit()
264 adev->kfd.vram_used[xcp_id] -= size; in amdgpu_amdkfd_unreserve_mem_limit()
265 if (adev->apu_prefer_gtt) { in amdgpu_amdkfd_unreserve_mem_limit()
266 adev->kfd.vram_used_aligned[xcp_id] -= size; in amdgpu_amdkfd_unreserve_mem_limit()
267 kfd_mem_limit.system_mem_used -= size; in amdgpu_amdkfd_unreserve_mem_limit()
268 kfd_mem_limit.ttm_mem_used -= size; in amdgpu_amdkfd_unreserve_mem_limit()
270 adev->kfd.vram_used_aligned[xcp_id] -= in amdgpu_amdkfd_unreserve_mem_limit()
275 kfd_mem_limit.system_mem_used -= size; in amdgpu_amdkfd_unreserve_mem_limit()
282 WARN_ONCE(adev && xcp_id >= 0 && adev->kfd.vram_used[xcp_id] < 0, in amdgpu_amdkfd_unreserve_mem_limit()
295 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); in amdgpu_amdkfd_release_notify()
296 u32 alloc_flags = bo->kfd_bo->alloc_flags; in amdgpu_amdkfd_release_notify()
300 bo->xcp_id); in amdgpu_amdkfd_release_notify()
302 kfree(bo->kfd_bo); in amdgpu_amdkfd_release_notify()
306 * create_dmamap_sg_bo() - Creates a amdgpu_bo object to reflect information
310 * @mem: BO of peer device that is being DMA mapped. Provides parameters
316 struct kgd_mem *mem, struct amdgpu_bo **bo_out) in create_dmamap_sg_bo() argument
322 ret = amdgpu_bo_reserve(mem->bo, false); in create_dmamap_sg_bo()
326 if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) in create_dmamap_sg_bo()
327 flags |= mem->bo->flags & (AMDGPU_GEM_CREATE_COHERENT | in create_dmamap_sg_bo()
330 ret = amdgpu_gem_object_create(adev, mem->bo->tbo.base.size, 1, in create_dmamap_sg_bo()
332 ttm_bo_type_sg, mem->bo->tbo.base.resv, &gem_obj, 0); in create_dmamap_sg_bo()
334 amdgpu_bo_unreserve(mem->bo); in create_dmamap_sg_bo()
338 return -EINVAL; in create_dmamap_sg_bo()
342 (*bo_out)->parent = amdgpu_bo_ref(mem->bo); in create_dmamap_sg_bo()
346 /* amdgpu_amdkfd_remove_eviction_fence - Removes eviction fence from BO's
353 * NOTE: Must be called with BO reserved i.e. bo->tbo.resv->lock held.
361 return -EINVAL; in amdgpu_amdkfd_remove_eviction_fence()
367 dma_resv_replace_fences(bo->tbo.base.resv, ef->base.context, in amdgpu_amdkfd_remove_eviction_fence()
374 * amdgpu_amdkfd_remove_all_eviction_fences - Remove all eviction fences
384 struct dma_resv *resv = &bo->tbo.base._resv; in amdgpu_amdkfd_remove_all_eviction_fences()
395 dma_resv_replace_fences(resv, fence->context, stub, in amdgpu_amdkfd_remove_all_eviction_fences()
407 if (WARN(amdgpu_ttm_tt_get_usermm(bo->tbo.ttm), in amdgpu_amdkfd_bo_validate()
409 return -EINVAL; in amdgpu_amdkfd_bo_validate()
412 if (bo->tbo.pin_count) in amdgpu_amdkfd_bo_validate()
417 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); in amdgpu_amdkfd_bo_validate()
440 ret = dma_resv_reserve_fences(bo->tbo.base.resv, 1); in amdgpu_amdkfd_bo_validate_and_fence()
444 dma_resv_add_fence(bo->tbo.base.resv, fence, in amdgpu_amdkfd_bo_validate_and_fence()
455 return amdgpu_amdkfd_bo_validate(bo, bo->allowed_domains, false); in amdgpu_amdkfd_validate_vm_bo()
458 /* vm_validate_pt_pd_bos - Validate page table and directory BOs
468 struct amdgpu_bo *pd = vm->root.bo; in vm_validate_pt_pd_bos()
469 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev); in vm_validate_pt_pd_bos()
479 vm->pd_phys_addr = amdgpu_gmc_pd_addr(vm->root.bo); in vm_validate_pt_pd_bos()
486 struct amdgpu_bo *pd = vm->root.bo; in vm_update_pds()
487 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev); in vm_update_pds()
494 return amdgpu_sync_fence(sync, vm->last_update, GFP_KERNEL); in vm_update_pds()
497 static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem) in get_pte_flags() argument
502 if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE) in get_pte_flags()
504 if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE) in get_pte_flags()
511 * create_sg_table() - Create an sg_table for a contiguous DMA addr range
534 sg_dma_address(sg->sgl) = addr; in create_sg_table()
535 sg->sgl->length = size; in create_sg_table()
537 sg->sgl->dma_length = size; in create_sg_table()
543 kfd_mem_dmamap_userptr(struct kgd_mem *mem, in kfd_mem_dmamap_userptr() argument
547 mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ? in kfd_mem_dmamap_userptr()
550 struct amdgpu_bo *bo = attachment->bo_va->base.bo; in kfd_mem_dmamap_userptr()
551 struct amdgpu_device *adev = attachment->adev; in kfd_mem_dmamap_userptr()
552 struct ttm_tt *src_ttm = mem->bo->tbo.ttm; in kfd_mem_dmamap_userptr()
553 struct ttm_tt *ttm = bo->tbo.ttm; in kfd_mem_dmamap_userptr()
556 if (WARN_ON(ttm->num_pages != src_ttm->num_pages)) in kfd_mem_dmamap_userptr()
557 return -EINVAL; in kfd_mem_dmamap_userptr()
559 ttm->sg = kmalloc(sizeof(*ttm->sg), GFP_KERNEL); in kfd_mem_dmamap_userptr()
560 if (unlikely(!ttm->sg)) in kfd_mem_dmamap_userptr()
561 return -ENOMEM; in kfd_mem_dmamap_userptr()
564 ret = sg_alloc_table_from_pages(ttm->sg, src_ttm->pages, in kfd_mem_dmamap_userptr()
565 ttm->num_pages, 0, in kfd_mem_dmamap_userptr()
566 (u64)ttm->num_pages << PAGE_SHIFT, in kfd_mem_dmamap_userptr()
571 ret = dma_map_sgtable(adev->dev, ttm->sg, direction, 0); in kfd_mem_dmamap_userptr()
576 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); in kfd_mem_dmamap_userptr()
583 dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0); in kfd_mem_dmamap_userptr()
586 sg_free_table(ttm->sg); in kfd_mem_dmamap_userptr()
588 kfree(ttm->sg); in kfd_mem_dmamap_userptr()
589 ttm->sg = NULL; in kfd_mem_dmamap_userptr()
597 struct amdgpu_bo *bo = attachment->bo_va->base.bo; in kfd_mem_dmamap_dmabuf()
600 return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); in kfd_mem_dmamap_dmabuf()
604 * kfd_mem_dmamap_sg_bo() - Create DMA mapped sg_table to access DOORBELL or MMIO BO
605 * @mem: SG BO of the DOORBELL or MMIO resource on the owning device
617 * - Obtain DMA mapped address of DOORBELL or MMIO memory that could be used
619 * - Signal TTM to mark memory pointed to by requesting device's BO as GPU
624 * - Mapping of DOORBELL or MMIO BO of same or peer device
625 * - Validating an evicted DOOREBELL or MMIO BO on device seeking access
627 * Return: ZERO if successful, NON-ZERO otherwise
630 kfd_mem_dmamap_sg_bo(struct kgd_mem *mem, in kfd_mem_dmamap_sg_bo() argument
634 struct amdgpu_bo *bo = attachment->bo_va->base.bo; in kfd_mem_dmamap_sg_bo()
635 struct amdgpu_device *adev = attachment->adev; in kfd_mem_dmamap_sg_bo()
636 struct ttm_tt *ttm = bo->tbo.ttm; in kfd_mem_dmamap_sg_bo()
643 mmio = (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP); in kfd_mem_dmamap_sg_bo()
644 if (unlikely(ttm->sg)) { in kfd_mem_dmamap_sg_bo()
645 pr_err("SG Table of %d BO for peer device is UNEXPECTEDLY NON-NULL", mmio); in kfd_mem_dmamap_sg_bo()
646 return -EINVAL; in kfd_mem_dmamap_sg_bo()
649 dir = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ? in kfd_mem_dmamap_sg_bo()
651 dma_addr = mem->bo->tbo.sg->sgl->dma_address; in kfd_mem_dmamap_sg_bo()
652 pr_debug("%d BO size: %d\n", mmio, mem->bo->tbo.sg->sgl->length); in kfd_mem_dmamap_sg_bo()
654 dma_addr = dma_map_resource(adev->dev, dma_addr, in kfd_mem_dmamap_sg_bo()
655 mem->bo->tbo.sg->sgl->length, dir, DMA_ATTR_SKIP_CPU_SYNC); in kfd_mem_dmamap_sg_bo()
656 ret = dma_mapping_error(adev->dev, dma_addr); in kfd_mem_dmamap_sg_bo()
661 ttm->sg = create_sg_table(dma_addr, mem->bo->tbo.sg->sgl->length); in kfd_mem_dmamap_sg_bo()
662 if (unlikely(!ttm->sg)) { in kfd_mem_dmamap_sg_bo()
663 ret = -ENOMEM; in kfd_mem_dmamap_sg_bo()
668 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); in kfd_mem_dmamap_sg_bo()
675 sg_free_table(ttm->sg); in kfd_mem_dmamap_sg_bo()
676 kfree(ttm->sg); in kfd_mem_dmamap_sg_bo()
677 ttm->sg = NULL; in kfd_mem_dmamap_sg_bo()
679 dma_unmap_resource(adev->dev, dma_addr, mem->bo->tbo.sg->sgl->length, in kfd_mem_dmamap_sg_bo()
685 kfd_mem_dmamap_attachment(struct kgd_mem *mem, in kfd_mem_dmamap_attachment() argument
688 switch (attachment->type) { in kfd_mem_dmamap_attachment()
692 return kfd_mem_dmamap_userptr(mem, attachment); in kfd_mem_dmamap_attachment()
696 return kfd_mem_dmamap_sg_bo(mem, attachment); in kfd_mem_dmamap_attachment()
700 return -EINVAL; in kfd_mem_dmamap_attachment()
704 kfd_mem_dmaunmap_userptr(struct kgd_mem *mem, in kfd_mem_dmaunmap_userptr() argument
708 mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ? in kfd_mem_dmaunmap_userptr()
711 struct amdgpu_bo *bo = attachment->bo_va->base.bo; in kfd_mem_dmaunmap_userptr()
712 struct amdgpu_device *adev = attachment->adev; in kfd_mem_dmaunmap_userptr()
713 struct ttm_tt *ttm = bo->tbo.ttm; in kfd_mem_dmaunmap_userptr()
715 if (unlikely(!ttm->sg)) in kfd_mem_dmaunmap_userptr()
719 (void)ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); in kfd_mem_dmaunmap_userptr()
721 dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0); in kfd_mem_dmaunmap_userptr()
722 sg_free_table(ttm->sg); in kfd_mem_dmaunmap_userptr()
723 kfree(ttm->sg); in kfd_mem_dmaunmap_userptr()
724 ttm->sg = NULL; in kfd_mem_dmaunmap_userptr()
730 /* This is a no-op. We don't want to trigger eviction fences when in kfd_mem_dmaunmap_dmabuf()
737 * kfd_mem_dmaunmap_sg_bo() - Free DMA mapped sg_table of DOORBELL or MMIO BO
738 * @mem: SG BO of the DOORBELL or MMIO resource on the owning device
742 * - Signal TTM to mark memory pointed to by BO as GPU inaccessible
743 * - Free SG Table that is used to encapsulate DMA mapped memory of
753 kfd_mem_dmaunmap_sg_bo(struct kgd_mem *mem, in kfd_mem_dmaunmap_sg_bo() argument
757 struct amdgpu_bo *bo = attachment->bo_va->base.bo; in kfd_mem_dmaunmap_sg_bo()
758 struct amdgpu_device *adev = attachment->adev; in kfd_mem_dmaunmap_sg_bo()
759 struct ttm_tt *ttm = bo->tbo.ttm; in kfd_mem_dmaunmap_sg_bo()
762 if (unlikely(!ttm->sg)) { in kfd_mem_dmaunmap_sg_bo()
768 (void)ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); in kfd_mem_dmaunmap_sg_bo()
770 dir = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ? in kfd_mem_dmaunmap_sg_bo()
772 dma_unmap_resource(adev->dev, ttm->sg->sgl->dma_address, in kfd_mem_dmaunmap_sg_bo()
773 ttm->sg->sgl->length, dir, DMA_ATTR_SKIP_CPU_SYNC); in kfd_mem_dmaunmap_sg_bo()
774 sg_free_table(ttm->sg); in kfd_mem_dmaunmap_sg_bo()
775 kfree(ttm->sg); in kfd_mem_dmaunmap_sg_bo()
776 ttm->sg = NULL; in kfd_mem_dmaunmap_sg_bo()
777 bo->tbo.sg = NULL; in kfd_mem_dmaunmap_sg_bo()
781 kfd_mem_dmaunmap_attachment(struct kgd_mem *mem, in kfd_mem_dmaunmap_attachment() argument
784 switch (attachment->type) { in kfd_mem_dmaunmap_attachment()
788 kfd_mem_dmaunmap_userptr(mem, attachment); in kfd_mem_dmaunmap_attachment()
794 kfd_mem_dmaunmap_sg_bo(mem, attachment); in kfd_mem_dmaunmap_attachment()
801 static int kfd_mem_export_dmabuf(struct kgd_mem *mem) in kfd_mem_export_dmabuf() argument
803 if (!mem->dmabuf) { in kfd_mem_export_dmabuf()
807 bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev); in kfd_mem_export_dmabuf()
808 dmabuf = drm_gem_prime_handle_to_dmabuf(&bo_adev->ddev, bo_adev->kfd.client.file, in kfd_mem_export_dmabuf()
809 mem->gem_handle, in kfd_mem_export_dmabuf()
810 mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ? in kfd_mem_export_dmabuf()
814 mem->dmabuf = dmabuf; in kfd_mem_export_dmabuf()
821 kfd_mem_attach_dmabuf(struct amdgpu_device *adev, struct kgd_mem *mem, in kfd_mem_attach_dmabuf() argument
827 ret = kfd_mem_export_dmabuf(mem); in kfd_mem_attach_dmabuf()
831 gobj = amdgpu_gem_prime_import(adev_to_drm(adev), mem->dmabuf); in kfd_mem_attach_dmabuf()
836 (*bo)->flags |= AMDGPU_GEM_CREATE_PREEMPTIBLE; in kfd_mem_attach_dmabuf()
841 /* kfd_mem_attach - Add a BO to a VM
850 * 3. Determine ASIC-specific PTE flags
854 static int kfd_mem_attach(struct amdgpu_device *adev, struct kgd_mem *mem, in kfd_mem_attach() argument
857 struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev); in kfd_mem_attach()
858 unsigned long bo_size = mem->bo->tbo.base.size; in kfd_mem_attach()
859 uint64_t va = mem->va; in kfd_mem_attach()
868 return -EINVAL; in kfd_mem_attach()
879 if ((adev != bo_adev && !adev->apu_prefer_gtt) && in kfd_mem_attach()
880 ((mem->domain == AMDGPU_GEM_DOMAIN_VRAM) || in kfd_mem_attach()
881 (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) || in kfd_mem_attach()
882 (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP))) { in kfd_mem_attach()
883 if (mem->domain == AMDGPU_GEM_DOMAIN_VRAM) in kfd_mem_attach()
886 return -EINVAL; in kfd_mem_attach()
892 ret = -ENOMEM; in kfd_mem_attach()
896 pr_debug("\t add VA 0x%llx - 0x%llx to vm %p\n", va, in kfd_mem_attach()
899 if ((adev == bo_adev && !(mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) || in kfd_mem_attach()
900 (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) && reuse_dmamap(adev, bo_adev)) || in kfd_mem_attach()
901 (mem->domain == AMDGPU_GEM_DOMAIN_GTT && reuse_dmamap(adev, bo_adev)) || in kfd_mem_attach()
907 attachment[i]->type = KFD_MEM_ATT_SHARED; in kfd_mem_attach()
908 bo[i] = mem->bo; in kfd_mem_attach()
909 drm_gem_object_get(&bo[i]->tbo.base); in kfd_mem_attach()
912 attachment[i]->type = KFD_MEM_ATT_SHARED; in kfd_mem_attach()
914 drm_gem_object_get(&bo[i]->tbo.base); in kfd_mem_attach()
915 } else if (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm)) { in kfd_mem_attach()
916 /* Create an SG BO to DMA-map userptrs on other GPUs */ in kfd_mem_attach()
917 attachment[i]->type = KFD_MEM_ATT_USERPTR; in kfd_mem_attach()
918 ret = create_dmamap_sg_bo(adev, mem, &bo[i]); in kfd_mem_attach()
922 } else if (mem->bo->tbo.type == ttm_bo_type_sg) { in kfd_mem_attach()
923 WARN_ONCE(!(mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL || in kfd_mem_attach()
924 mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP), in kfd_mem_attach()
926 attachment[i]->type = KFD_MEM_ATT_SG; in kfd_mem_attach()
927 ret = create_dmamap_sg_bo(adev, mem, &bo[i]); in kfd_mem_attach()
931 } else if (mem->domain == AMDGPU_GEM_DOMAIN_GTT || in kfd_mem_attach()
932 mem->domain == AMDGPU_GEM_DOMAIN_VRAM) { in kfd_mem_attach()
933 attachment[i]->type = KFD_MEM_ATT_DMABUF; in kfd_mem_attach()
934 ret = kfd_mem_attach_dmabuf(adev, mem, &bo[i]); in kfd_mem_attach()
940 ret = -EINVAL; in kfd_mem_attach()
954 ++bo_va->ref_count; in kfd_mem_attach()
955 attachment[i]->bo_va = bo_va; in kfd_mem_attach()
957 if (unlikely(!attachment[i]->bo_va)) { in kfd_mem_attach()
958 ret = -ENOMEM; in kfd_mem_attach()
963 attachment[i]->va = va; in kfd_mem_attach()
964 attachment[i]->pte_flags = get_pte_flags(adev, mem); in kfd_mem_attach()
965 attachment[i]->adev = adev; in kfd_mem_attach()
966 list_add(&attachment[i]->list, &mem->attachments); in kfd_mem_attach()
974 for (; i >= 0; i--) { in kfd_mem_attach()
977 if (attachment[i]->bo_va) { in kfd_mem_attach()
979 if (--attachment[i]->bo_va->ref_count == 0) in kfd_mem_attach()
980 amdgpu_vm_bo_del(adev, attachment[i]->bo_va); in kfd_mem_attach()
982 list_del(&attachment[i]->list); in kfd_mem_attach()
985 drm_gem_object_put(&bo[i]->tbo.base); in kfd_mem_attach()
993 struct amdgpu_bo *bo = attachment->bo_va->base.bo; in kfd_mem_detach()
996 attachment->va, attachment); in kfd_mem_detach()
997 if (--attachment->bo_va->ref_count == 0) in kfd_mem_detach()
998 amdgpu_vm_bo_del(attachment->adev, attachment->bo_va); in kfd_mem_detach()
999 drm_gem_object_put(&bo->tbo.base); in kfd_mem_detach()
1000 list_del(&attachment->list); in kfd_mem_detach()
1004 static void add_kgd_mem_to_kfd_bo_list(struct kgd_mem *mem, in add_kgd_mem_to_kfd_bo_list() argument
1008 mutex_lock(&process_info->lock); in add_kgd_mem_to_kfd_bo_list()
1010 list_add_tail(&mem->validate_list, in add_kgd_mem_to_kfd_bo_list()
1011 &process_info->userptr_valid_list); in add_kgd_mem_to_kfd_bo_list()
1013 list_add_tail(&mem->validate_list, &process_info->kfd_bo_list); in add_kgd_mem_to_kfd_bo_list()
1014 mutex_unlock(&process_info->lock); in add_kgd_mem_to_kfd_bo_list()
1017 static void remove_kgd_mem_from_kfd_bo_list(struct kgd_mem *mem, in remove_kgd_mem_from_kfd_bo_list() argument
1020 mutex_lock(&process_info->lock); in remove_kgd_mem_from_kfd_bo_list()
1021 list_del(&mem->validate_list); in remove_kgd_mem_from_kfd_bo_list()
1022 mutex_unlock(&process_info->lock); in remove_kgd_mem_from_kfd_bo_list()
1032 * Takes the process_info->lock to protect against concurrent restore
1037 static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr, in init_user_pages() argument
1040 struct amdkfd_process_info *process_info = mem->process_info; in init_user_pages()
1041 struct amdgpu_bo *bo = mem->bo; in init_user_pages()
1046 mutex_lock(&process_info->lock); in init_user_pages()
1048 ret = amdgpu_ttm_tt_set_userptr(&bo->tbo, user_addr, 0); in init_user_pages()
1068 mutex_lock(&process_info->notifier_lock); in init_user_pages()
1069 mem->invalid++; in init_user_pages()
1070 mutex_unlock(&process_info->notifier_lock); in init_user_pages()
1071 mutex_unlock(&process_info->lock); in init_user_pages()
1075 ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages, &range); in init_user_pages()
1077 if (ret == -EAGAIN) in init_user_pages()
1089 amdgpu_bo_placement_from_domain(bo, mem->domain); in init_user_pages()
1090 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); in init_user_pages()
1096 amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, range); in init_user_pages()
1101 mutex_unlock(&process_info->lock); in init_user_pages()
1126 * reserve_bo_and_vm - reserve a BO and a VM unconditionally.
1127 * @mem: KFD BO structure.
1131 static int reserve_bo_and_vm(struct kgd_mem *mem, in reserve_bo_and_vm() argument
1135 struct amdgpu_bo *bo = mem->bo; in reserve_bo_and_vm()
1140 ctx->n_vms = 1; in reserve_bo_and_vm()
1141 ctx->sync = &mem->sync; in reserve_bo_and_vm()
1142 drm_exec_init(&ctx->exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0); in reserve_bo_and_vm()
1143 drm_exec_until_all_locked(&ctx->exec) { in reserve_bo_and_vm()
1144 ret = amdgpu_vm_lock_pd(vm, &ctx->exec, 2); in reserve_bo_and_vm()
1145 drm_exec_retry_on_contention(&ctx->exec); in reserve_bo_and_vm()
1149 ret = drm_exec_prepare_obj(&ctx->exec, &bo->tbo.base, 1); in reserve_bo_and_vm()
1150 drm_exec_retry_on_contention(&ctx->exec); in reserve_bo_and_vm()
1158 drm_exec_fini(&ctx->exec); in reserve_bo_and_vm()
1163 * reserve_bo_and_cond_vms - reserve a BO and some VMs conditionally
1164 * @mem: KFD BO structure.
1172 static int reserve_bo_and_cond_vms(struct kgd_mem *mem, in reserve_bo_and_cond_vms() argument
1177 struct amdgpu_bo *bo = mem->bo; in reserve_bo_and_cond_vms()
1180 ctx->sync = &mem->sync; in reserve_bo_and_cond_vms()
1181 drm_exec_init(&ctx->exec, DRM_EXEC_INTERRUPTIBLE_WAIT | in reserve_bo_and_cond_vms()
1183 drm_exec_until_all_locked(&ctx->exec) { in reserve_bo_and_cond_vms()
1184 ctx->n_vms = 0; in reserve_bo_and_cond_vms()
1185 list_for_each_entry(entry, &mem->attachments, list) { in reserve_bo_and_cond_vms()
1186 if ((vm && vm != entry->bo_va->base.vm) || in reserve_bo_and_cond_vms()
1187 (entry->is_mapped != map_type in reserve_bo_and_cond_vms()
1191 ret = amdgpu_vm_lock_pd(entry->bo_va->base.vm, in reserve_bo_and_cond_vms()
1192 &ctx->exec, 2); in reserve_bo_and_cond_vms()
1193 drm_exec_retry_on_contention(&ctx->exec); in reserve_bo_and_cond_vms()
1196 ++ctx->n_vms; in reserve_bo_and_cond_vms()
1199 ret = drm_exec_prepare_obj(&ctx->exec, &bo->tbo.base, 1); in reserve_bo_and_cond_vms()
1200 drm_exec_retry_on_contention(&ctx->exec); in reserve_bo_and_cond_vms()
1208 drm_exec_fini(&ctx->exec); in reserve_bo_and_cond_vms()
1213 * unreserve_bo_and_vms - Unreserve BO and VMs from a reservation context
1228 ret = amdgpu_sync_wait(ctx->sync, intr); in unreserve_bo_and_vms()
1230 drm_exec_fini(&ctx->exec); in unreserve_bo_and_vms()
1231 ctx->sync = NULL; in unreserve_bo_and_vms()
1235 static int unmap_bo_from_gpuvm(struct kgd_mem *mem, in unmap_bo_from_gpuvm() argument
1239 struct amdgpu_bo_va *bo_va = entry->bo_va; in unmap_bo_from_gpuvm()
1240 struct amdgpu_device *adev = entry->adev; in unmap_bo_from_gpuvm()
1241 struct amdgpu_vm *vm = bo_va->base.vm; in unmap_bo_from_gpuvm()
1243 if (bo_va->queue_refcount) { in unmap_bo_from_gpuvm()
1244 pr_debug("bo_va->queue_refcount %d\n", bo_va->queue_refcount); in unmap_bo_from_gpuvm()
1245 return -EBUSY; in unmap_bo_from_gpuvm()
1248 (void)amdgpu_vm_bo_unmap(adev, bo_va, entry->va); in unmap_bo_from_gpuvm()
1250 (void)amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update); in unmap_bo_from_gpuvm()
1252 (void)amdgpu_sync_fence(sync, bo_va->last_pt_update, GFP_KERNEL); in unmap_bo_from_gpuvm()
1257 static int update_gpuvm_pte(struct kgd_mem *mem, in update_gpuvm_pte() argument
1261 struct amdgpu_bo_va *bo_va = entry->bo_va; in update_gpuvm_pte()
1262 struct amdgpu_device *adev = entry->adev; in update_gpuvm_pte()
1265 ret = kfd_mem_dmamap_attachment(mem, entry); in update_gpuvm_pte()
1276 return amdgpu_sync_fence(sync, bo_va->last_pt_update, GFP_KERNEL); in update_gpuvm_pte()
1279 static int map_bo_to_gpuvm(struct kgd_mem *mem, in map_bo_to_gpuvm() argument
1287 ret = amdgpu_vm_bo_map(entry->adev, entry->bo_va, entry->va, 0, in map_bo_to_gpuvm()
1288 amdgpu_bo_size(entry->bo_va->base.bo), in map_bo_to_gpuvm()
1289 entry->pte_flags); in map_bo_to_gpuvm()
1292 entry->va, ret); in map_bo_to_gpuvm()
1299 ret = update_gpuvm_pte(mem, entry, sync); in map_bo_to_gpuvm()
1308 unmap_bo_from_gpuvm(mem, entry, sync); in map_bo_to_gpuvm()
1309 kfd_mem_dmaunmap_attachment(mem, entry); in map_bo_to_gpuvm()
1319 list_for_each_entry(peer_vm, &process_info->vm_list_head, in process_validate_vms()
1335 list_for_each_entry(peer_vm, &process_info->vm_list_head, in process_sync_pds_resv()
1337 struct amdgpu_bo *pd = peer_vm->root.bo; in process_sync_pds_resv()
1339 ret = amdgpu_sync_resv(NULL, sync, pd->tbo.base.resv, in process_sync_pds_resv()
1355 list_for_each_entry(peer_vm, &process_info->vm_list_head, in process_update_pds()
1374 return -ENOMEM; in init_kfd_vm()
1376 mutex_init(&info->lock); in init_kfd_vm()
1377 mutex_init(&info->notifier_lock); in init_kfd_vm()
1378 INIT_LIST_HEAD(&info->vm_list_head); in init_kfd_vm()
1379 INIT_LIST_HEAD(&info->kfd_bo_list); in init_kfd_vm()
1380 INIT_LIST_HEAD(&info->userptr_valid_list); in init_kfd_vm()
1381 INIT_LIST_HEAD(&info->userptr_inval_list); in init_kfd_vm()
1383 info->eviction_fence = in init_kfd_vm()
1385 current->mm, in init_kfd_vm()
1387 if (!info->eviction_fence) { in init_kfd_vm()
1389 ret = -ENOMEM; in init_kfd_vm()
1393 info->pid = get_task_pid(current->group_leader, PIDTYPE_PID); in init_kfd_vm()
1394 INIT_DELAYED_WORK(&info->restore_userptr_work, in init_kfd_vm()
1400 vm->process_info = *process_info; in init_kfd_vm()
1403 ret = amdgpu_bo_reserve(vm->root.bo, true); in init_kfd_vm()
1411 ret = amdgpu_bo_sync_wait(vm->root.bo, in init_kfd_vm()
1415 ret = dma_resv_reserve_fences(vm->root.bo->tbo.base.resv, 1); in init_kfd_vm()
1418 dma_resv_add_fence(vm->root.bo->tbo.base.resv, in init_kfd_vm()
1419 &vm->process_info->eviction_fence->base, in init_kfd_vm()
1421 amdgpu_bo_unreserve(vm->root.bo); in init_kfd_vm()
1424 mutex_lock(&vm->process_info->lock); in init_kfd_vm()
1425 list_add_tail(&vm->vm_list_node, in init_kfd_vm()
1426 &(vm->process_info->vm_list_head)); in init_kfd_vm()
1427 vm->process_info->n_vms++; in init_kfd_vm()
1429 *ef = dma_fence_get(&vm->process_info->eviction_fence->base); in init_kfd_vm()
1430 mutex_unlock(&vm->process_info->lock); in init_kfd_vm()
1437 amdgpu_bo_unreserve(vm->root.bo); in init_kfd_vm()
1439 vm->process_info = NULL; in init_kfd_vm()
1441 dma_fence_put(&info->eviction_fence->base); in init_kfd_vm()
1443 put_pid(info->pid); in init_kfd_vm()
1445 mutex_destroy(&info->lock); in init_kfd_vm()
1446 mutex_destroy(&info->notifier_lock); in init_kfd_vm()
1453 * amdgpu_amdkfd_gpuvm_pin_bo() - Pins a BO using following criteria
1457 * - USERPTR BOs are UNPINNABLE and will return error
1458 * - All other BO types (GTT, VRAM, MMIO and DOORBELL) will have their
1461 * Return: ZERO if successful in pinning, Non-Zero in case of error.
1471 if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS) { in amdgpu_amdkfd_gpuvm_pin_bo()
1476 if (!(bo->tbo.resource->placement & TTM_PL_FLAG_CONTIGUOUS)) { in amdgpu_amdkfd_gpuvm_pin_bo()
1480 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); in amdgpu_amdkfd_gpuvm_pin_bo()
1482 pr_debug("validate bo 0x%p to GTT failed %d\n", &bo->tbo, ret); in amdgpu_amdkfd_gpuvm_pin_bo()
1499 * amdgpu_amdkfd_gpuvm_unpin_bo() - Unpins BO using following criteria
1502 * - Is a illegal request for USERPTR BOs and is ignored
1503 * - All other BO types (GTT, VRAM, MMIO and DOORBELL) will have their
1526 if (avm->process_info) in amdgpu_amdkfd_gpuvm_acquire_process_vm()
1527 return -EINVAL; in amdgpu_amdkfd_gpuvm_acquire_process_vm()
1547 struct amdkfd_process_info *process_info = vm->process_info; in amdgpu_amdkfd_gpuvm_destroy_cb()
1553 mutex_lock(&process_info->lock); in amdgpu_amdkfd_gpuvm_destroy_cb()
1554 process_info->n_vms--; in amdgpu_amdkfd_gpuvm_destroy_cb()
1555 list_del(&vm->vm_list_node); in amdgpu_amdkfd_gpuvm_destroy_cb()
1556 mutex_unlock(&process_info->lock); in amdgpu_amdkfd_gpuvm_destroy_cb()
1558 vm->process_info = NULL; in amdgpu_amdkfd_gpuvm_destroy_cb()
1560 /* Release per-process resources when last compute VM is destroyed */ in amdgpu_amdkfd_gpuvm_destroy_cb()
1561 if (!process_info->n_vms) { in amdgpu_amdkfd_gpuvm_destroy_cb()
1562 WARN_ON(!list_empty(&process_info->kfd_bo_list)); in amdgpu_amdkfd_gpuvm_destroy_cb()
1563 WARN_ON(!list_empty(&process_info->userptr_valid_list)); in amdgpu_amdkfd_gpuvm_destroy_cb()
1564 WARN_ON(!list_empty(&process_info->userptr_inval_list)); in amdgpu_amdkfd_gpuvm_destroy_cb()
1566 dma_fence_put(&process_info->eviction_fence->base); in amdgpu_amdkfd_gpuvm_destroy_cb()
1567 cancel_delayed_work_sync(&process_info->restore_userptr_work); in amdgpu_amdkfd_gpuvm_destroy_cb()
1568 put_pid(process_info->pid); in amdgpu_amdkfd_gpuvm_destroy_cb()
1569 mutex_destroy(&process_info->lock); in amdgpu_amdkfd_gpuvm_destroy_cb()
1570 mutex_destroy(&process_info->notifier_lock); in amdgpu_amdkfd_gpuvm_destroy_cb()
1578 struct amdgpu_bo *pd = avm->root.bo; in amdgpu_amdkfd_gpuvm_get_process_page_dir()
1579 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev); in amdgpu_amdkfd_gpuvm_get_process_page_dir()
1581 if (adev->asic_type < CHIP_VEGA10) in amdgpu_amdkfd_gpuvm_get_process_page_dir()
1582 return avm->pd_phys_addr >> AMDGPU_GPU_PAGE_SHIFT; in amdgpu_amdkfd_gpuvm_get_process_page_dir()
1583 return avm->pd_phys_addr; in amdgpu_amdkfd_gpuvm_get_process_page_dir()
1590 mutex_lock(&pinfo->lock); in amdgpu_amdkfd_block_mmu_notifications()
1591 WRITE_ONCE(pinfo->block_mmu_notifications, true); in amdgpu_amdkfd_block_mmu_notifications()
1592 mutex_unlock(&pinfo->lock); in amdgpu_amdkfd_block_mmu_notifications()
1600 mutex_lock(&pinfo->lock); in amdgpu_amdkfd_criu_resume()
1602 mutex_lock(&pinfo->notifier_lock); in amdgpu_amdkfd_criu_resume()
1603 pinfo->evicted_bos++; in amdgpu_amdkfd_criu_resume()
1604 mutex_unlock(&pinfo->notifier_lock); in amdgpu_amdkfd_criu_resume()
1605 if (!READ_ONCE(pinfo->block_mmu_notifications)) { in amdgpu_amdkfd_criu_resume()
1606 ret = -EINVAL; in amdgpu_amdkfd_criu_resume()
1609 WRITE_ONCE(pinfo->block_mmu_notifications, false); in amdgpu_amdkfd_criu_resume()
1611 &pinfo->restore_userptr_work, 0); in amdgpu_amdkfd_criu_resume()
1614 mutex_unlock(&pinfo->lock); in amdgpu_amdkfd_criu_resume()
1624 uint64_t reserved_for_ras = (con ? con->reserved_pages_in_bytes : 0); in amdgpu_amdkfd_get_available_memory()
1630 - adev->kfd.vram_used_aligned[xcp_id] in amdgpu_amdkfd_get_available_memory()
1631 - atomic64_read(&adev->vram_pin_size) in amdgpu_amdkfd_get_available_memory()
1632 - reserved_for_pt in amdgpu_amdkfd_get_available_memory()
1633 - reserved_for_ras; in amdgpu_amdkfd_get_available_memory()
1635 if (adev->apu_prefer_gtt) { in amdgpu_amdkfd_get_available_memory()
1638 kfd_mem_limit.max_system_mem_limit - in amdgpu_amdkfd_get_available_memory()
1641 ttm_mem_available = kfd_mem_limit.max_ttm_mem_limit - in amdgpu_amdkfd_get_available_memory()
1661 void *drm_priv, struct kgd_mem **mem, in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu() argument
1673 int8_t xcp_id = -1; in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1683 if (adev->apu_prefer_gtt) { in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1696 xcp_id = fpriv->xcp_id == AMDGPU_XCP_NO_PARTITION ? in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1697 0 : fpriv->xcp_id; in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1708 return -EINVAL; in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1714 return -EINVAL; in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1717 return -ENOMEM; in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1719 return -EINVAL; in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1730 *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL); in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1731 if (!*mem) { in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1732 ret = -ENOMEM; in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1735 INIT_LIST_HEAD(&(*mem)->attachments); in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1736 mutex_init(&(*mem)->lock); in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1737 (*mem)->aql_queue = !!(flags & KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM); in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1743 if ((*mem)->aql_queue) in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1747 (*mem)->alloc_flags = flags; in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1749 amdgpu_sync_create(&(*mem)->sync); in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1759 va, (*mem)->aql_queue ? size << 1 : size, in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1769 ret = drm_vma_node_allow(&gobj->vma_node, drm_priv); in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1774 ret = drm_gem_handle_create(adev->kfd.client.file, gobj, &(*mem)->gem_handle); in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1779 bo->tbo.sg = sg; in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1780 bo->tbo.ttm->sg = sg; in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1782 bo->kfd_bo = *mem; in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1783 (*mem)->bo = bo; in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1785 bo->flags |= AMDGPU_AMDKFD_CREATE_USERPTR_BO; in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1787 (*mem)->va = va; in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1788 (*mem)->domain = domain; in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1789 (*mem)->mapped_to_gpu_memory = 0; in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1790 (*mem)->process_info = avm->process_info; in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1792 add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, user_addr); in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1796 ret = init_user_pages(*mem, user_addr, criu_resume); in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1806 bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT; in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1807 bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT; in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1809 mutex_lock(&avm->process_info->lock); in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1810 if (avm->process_info->eviction_fence && in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1811 !dma_fence_is_signaled(&avm->process_info->eviction_fence->base)) in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1813 &avm->process_info->eviction_fence->base); in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1814 mutex_unlock(&avm->process_info->lock); in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1827 remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info); in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1828 drm_gem_handle_delete(adev->kfd.client.file, (*mem)->gem_handle); in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1830 drm_vma_node_revoke(&gobj->vma_node, drm_priv); in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1832 /* Don't unreserve system mem limit twice */ in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1837 amdgpu_sync_free(&(*mem)->sync); in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1838 mutex_destroy(&(*mem)->lock); in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1842 kfree(*mem); in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1852 struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv, in amdgpu_amdkfd_gpuvm_free_memory_of_gpu() argument
1855 struct amdkfd_process_info *process_info = mem->process_info; in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1856 unsigned long bo_size = mem->bo->tbo.base.size; in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1857 bool use_release_notifier = (mem->bo->kfd_bo == mem); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1864 mutex_lock(&mem->lock); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1867 if (mem->alloc_flags & in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1870 amdgpu_amdkfd_gpuvm_unpin_bo(mem->bo); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1873 mapped_to_gpu_memory = mem->mapped_to_gpu_memory; in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1874 is_imported = mem->is_imported; in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1875 mutex_unlock(&mem->lock); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1876 /* lock is not needed after this, since mem is unused and will in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1882 mem->va, bo_size); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1883 return -EBUSY; in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1887 mutex_lock(&process_info->lock); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1888 list_del(&mem->validate_list); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1889 mutex_unlock(&process_info->lock); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1892 if (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm)) { in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1893 amdgpu_hmm_unregister(mem->bo); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1894 mutex_lock(&process_info->notifier_lock); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1895 amdgpu_ttm_tt_discard_user_pages(mem->bo->tbo.ttm, mem->range); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1896 mutex_unlock(&process_info->notifier_lock); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1899 ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1903 amdgpu_amdkfd_remove_eviction_fence(mem->bo, in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1904 process_info->eviction_fence); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1905 pr_debug("Release VA 0x%llx - 0x%llx\n", mem->va, in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1906 mem->va + bo_size * (1 + mem->aql_queue)); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1909 list_for_each_entry_safe(entry, tmp, &mem->attachments, list) { in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1910 kfd_mem_dmaunmap_attachment(mem, entry); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1917 amdgpu_sync_free(&mem->sync); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1922 if (mem->bo->tbo.sg) { in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1923 sg_free_table(mem->bo->tbo.sg); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1924 kfree(mem->bo->tbo.sg); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1933 (mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_VRAM || in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1934 (adev->apu_prefer_gtt && in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1935 mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_GTT))) in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1942 drm_vma_node_revoke(&mem->bo->tbo.base.vma_node, drm_priv); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1943 drm_gem_handle_delete(adev->kfd.client.file, mem->gem_handle); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1944 if (mem->dmabuf) { in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1945 dma_buf_put(mem->dmabuf); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1946 mem->dmabuf = NULL; in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1948 mutex_destroy(&mem->lock); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1951 * amdgpu_amdkfd_release_notify and kfree the mem struct. That's why in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1954 drm_gem_object_put(&mem->bo->tbo.base); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1961 kfree(mem); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1967 struct amdgpu_device *adev, struct kgd_mem *mem, in amdgpu_amdkfd_gpuvm_map_memory_to_gpu() argument
1979 bo = mem->bo; in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
1982 return -EINVAL; in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
1989 mutex_lock(&mem->process_info->lock); in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
1995 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) { in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
1996 mutex_lock(&mem->process_info->notifier_lock); in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
1997 is_invalid_userptr = !!mem->invalid; in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
1998 mutex_unlock(&mem->process_info->notifier_lock); in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
2001 mutex_lock(&mem->lock); in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
2003 domain = mem->domain; in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
2004 bo_size = bo->tbo.base.size; in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
2006 pr_debug("Map VA 0x%llx - 0x%llx to vm %p domain %s\n", in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
2007 mem->va, in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
2008 mem->va + bo_size * (1 + mem->aql_queue), in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
2011 if (!kfd_mem_is_attached(avm, mem)) { in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
2012 ret = kfd_mem_attach(adev, mem, avm, mem->aql_queue); in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
2017 ret = reserve_bo_and_vm(mem, avm, &ctx); in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
2026 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
2027 bo->tbo.resource->mem_type == TTM_PL_SYSTEM) in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
2034 list_for_each_entry(entry, &mem->attachments, list) { in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
2035 if (entry->bo_va->base.vm != avm || entry->is_mapped) in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
2038 pr_debug("\t map VA 0x%llx - 0x%llx in entry %p\n", in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
2039 entry->va, entry->va + bo_size, entry); in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
2041 ret = map_bo_to_gpuvm(mem, entry, ctx.sync, in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
2054 entry->is_mapped = true; in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
2055 mem->mapped_to_gpu_memory++; in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
2057 mem->mapped_to_gpu_memory); in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
2067 mutex_unlock(&mem->process_info->lock); in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
2068 mutex_unlock(&mem->lock); in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
2072 int amdgpu_amdkfd_gpuvm_dmaunmap_mem(struct kgd_mem *mem, void *drm_priv) in amdgpu_amdkfd_gpuvm_dmaunmap_mem() argument
2080 mutex_lock(&mem->lock); in amdgpu_amdkfd_gpuvm_dmaunmap_mem()
2082 ret = amdgpu_bo_reserve(mem->bo, true); in amdgpu_amdkfd_gpuvm_dmaunmap_mem()
2086 list_for_each_entry(entry, &mem->attachments, list) { in amdgpu_amdkfd_gpuvm_dmaunmap_mem()
2087 if (entry->bo_va->base.vm != vm) in amdgpu_amdkfd_gpuvm_dmaunmap_mem()
2089 if (entry->bo_va->base.bo->tbo.ttm && in amdgpu_amdkfd_gpuvm_dmaunmap_mem()
2090 !entry->bo_va->base.bo->tbo.ttm->sg) in amdgpu_amdkfd_gpuvm_dmaunmap_mem()
2093 kfd_mem_dmaunmap_attachment(mem, entry); in amdgpu_amdkfd_gpuvm_dmaunmap_mem()
2096 amdgpu_bo_unreserve(mem->bo); in amdgpu_amdkfd_gpuvm_dmaunmap_mem()
2098 mutex_unlock(&mem->lock); in amdgpu_amdkfd_gpuvm_dmaunmap_mem()
2104 struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv) in amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu() argument
2107 unsigned long bo_size = mem->bo->tbo.base.size; in amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu()
2112 mutex_lock(&mem->lock); in amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu()
2114 ret = reserve_bo_and_cond_vms(mem, avm, BO_VM_MAPPED, &ctx); in amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu()
2119 ret = -EINVAL; in amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu()
2127 pr_debug("Unmap VA 0x%llx - 0x%llx from vm %p\n", in amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu()
2128 mem->va, in amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu()
2129 mem->va + bo_size * (1 + mem->aql_queue), in amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu()
2132 list_for_each_entry(entry, &mem->attachments, list) { in amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu()
2133 if (entry->bo_va->base.vm != avm || !entry->is_mapped) in amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu()
2136 pr_debug("\t unmap VA 0x%llx - 0x%llx from entry %p\n", in amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu()
2137 entry->va, entry->va + bo_size, entry); in amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu()
2139 ret = unmap_bo_from_gpuvm(mem, entry, ctx.sync); in amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu()
2143 entry->is_mapped = false; in amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu()
2145 mem->mapped_to_gpu_memory--; in amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu()
2147 mem->mapped_to_gpu_memory); in amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu()
2153 mutex_unlock(&mem->lock); in amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu()
2158 struct amdgpu_device *adev, struct kgd_mem *mem, bool intr) in amdgpu_amdkfd_gpuvm_sync_memory() argument
2165 mutex_lock(&mem->lock); in amdgpu_amdkfd_gpuvm_sync_memory()
2166 amdgpu_sync_clone(&mem->sync, &sync); in amdgpu_amdkfd_gpuvm_sync_memory()
2167 mutex_unlock(&mem->lock); in amdgpu_amdkfd_gpuvm_sync_memory()
2175 * amdgpu_amdkfd_map_gtt_bo_to_gart - Map BO to GART and increment reference count
2198 ret = amdgpu_ttm_alloc_gart(&bo->tbo); in amdgpu_amdkfd_map_gtt_bo_to_gart()
2205 bo, bo->vm_bo->vm->process_info->eviction_fence); in amdgpu_amdkfd_map_gtt_bo_to_gart()
2222 /** amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel() - Map a GTT BO for kernel CPU access
2224 * @mem: Buffer object to be mapped for CPU access
2235 int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_mem *mem, in amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel() argument
2239 struct amdgpu_bo *bo = mem->bo; in amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel()
2241 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) { in amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel()
2243 return -EINVAL; in amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel()
2246 mutex_lock(&mem->process_info->lock); in amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel()
2267 bo, mem->process_info->eviction_fence); in amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel()
2274 mutex_unlock(&mem->process_info->lock); in amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel()
2282 mutex_unlock(&mem->process_info->lock); in amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel()
2287 /** amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel() - Unmap a GTT BO for kernel CPU access
2289 * @mem: Buffer object to be unmapped for CPU access
2295 void amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(struct kgd_mem *mem) in amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel() argument
2297 struct amdgpu_bo *bo = mem->bo; in amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel()
2306 struct kfd_vm_fault_info *mem) in amdgpu_amdkfd_gpuvm_get_vm_fault_info() argument
2308 if (atomic_read(&adev->gmc.vm_fault_info_updated) == 1) { in amdgpu_amdkfd_gpuvm_get_vm_fault_info()
2309 *mem = *adev->gmc.vm_fault_info; in amdgpu_amdkfd_gpuvm_get_vm_fault_info()
2311 atomic_set(&adev->gmc.vm_fault_info_updated, 0); in amdgpu_amdkfd_gpuvm_get_vm_fault_info()
2320 struct kgd_mem **mem, uint64_t *size, in import_obj_create() argument
2328 if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM | in import_obj_create()
2331 return -EINVAL; in import_obj_create()
2333 *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL); in import_obj_create()
2334 if (!*mem) in import_obj_create()
2335 return -ENOMEM; in import_obj_create()
2337 ret = drm_vma_node_allow(&obj->vma_node, drm_priv); in import_obj_create()
2347 INIT_LIST_HEAD(&(*mem)->attachments); in import_obj_create()
2348 mutex_init(&(*mem)->lock); in import_obj_create()
2350 (*mem)->alloc_flags = in import_obj_create()
2351 ((bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ? in import_obj_create()
2357 (*mem)->dmabuf = dma_buf; in import_obj_create()
2358 (*mem)->bo = bo; in import_obj_create()
2359 (*mem)->va = va; in import_obj_create()
2360 (*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) && in import_obj_create()
2361 !adev->apu_prefer_gtt ? in import_obj_create()
2364 (*mem)->mapped_to_gpu_memory = 0; in import_obj_create()
2365 (*mem)->process_info = avm->process_info; in import_obj_create()
2366 add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, false); in import_obj_create()
2367 amdgpu_sync_create(&(*mem)->sync); in import_obj_create()
2368 (*mem)->is_imported = true; in import_obj_create()
2370 mutex_lock(&avm->process_info->lock); in import_obj_create()
2371 if (avm->process_info->eviction_fence && in import_obj_create()
2372 !dma_fence_is_signaled(&avm->process_info->eviction_fence->base)) in import_obj_create()
2373 ret = amdgpu_amdkfd_bo_validate_and_fence(bo, (*mem)->domain, in import_obj_create()
2374 &avm->process_info->eviction_fence->base); in import_obj_create()
2375 mutex_unlock(&avm->process_info->lock); in import_obj_create()
2382 remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info); in import_obj_create()
2383 drm_vma_node_revoke(&obj->vma_node, drm_priv); in import_obj_create()
2385 kfree(*mem); in import_obj_create()
2391 struct kgd_mem **mem, uint64_t *size, in amdgpu_amdkfd_gpuvm_import_dmabuf_fd() argument
2398 ret = drm_gem_prime_fd_to_handle(&adev->ddev, adev->kfd.client.file, fd, in amdgpu_amdkfd_gpuvm_import_dmabuf_fd()
2402 obj = drm_gem_object_lookup(adev->kfd.client.file, handle); in amdgpu_amdkfd_gpuvm_import_dmabuf_fd()
2404 ret = -EINVAL; in amdgpu_amdkfd_gpuvm_import_dmabuf_fd()
2408 ret = import_obj_create(adev, obj->dma_buf, obj, va, drm_priv, mem, size, in amdgpu_amdkfd_gpuvm_import_dmabuf_fd()
2413 (*mem)->gem_handle = handle; in amdgpu_amdkfd_gpuvm_import_dmabuf_fd()
2420 drm_gem_handle_delete(adev->kfd.client.file, handle); in amdgpu_amdkfd_gpuvm_import_dmabuf_fd()
2424 int amdgpu_amdkfd_gpuvm_export_dmabuf(struct kgd_mem *mem, in amdgpu_amdkfd_gpuvm_export_dmabuf() argument
2429 mutex_lock(&mem->lock); in amdgpu_amdkfd_gpuvm_export_dmabuf()
2430 ret = kfd_mem_export_dmabuf(mem); in amdgpu_amdkfd_gpuvm_export_dmabuf()
2434 get_dma_buf(mem->dmabuf); in amdgpu_amdkfd_gpuvm_export_dmabuf()
2435 *dma_buf = mem->dmabuf; in amdgpu_amdkfd_gpuvm_export_dmabuf()
2437 mutex_unlock(&mem->lock); in amdgpu_amdkfd_gpuvm_export_dmabuf()
2452 unsigned long cur_seq, struct kgd_mem *mem) in amdgpu_amdkfd_evict_userptr() argument
2454 struct amdkfd_process_info *process_info = mem->process_info; in amdgpu_amdkfd_evict_userptr()
2460 if (READ_ONCE(process_info->block_mmu_notifications)) in amdgpu_amdkfd_evict_userptr()
2463 mutex_lock(&process_info->notifier_lock); in amdgpu_amdkfd_evict_userptr()
2466 mem->invalid++; in amdgpu_amdkfd_evict_userptr()
2467 if (++process_info->evicted_bos == 1) { in amdgpu_amdkfd_evict_userptr()
2469 r = kgd2kfd_quiesce_mm(mni->mm, in amdgpu_amdkfd_evict_userptr()
2472 if (r && r != -ESRCH) in amdgpu_amdkfd_evict_userptr()
2475 if (r != -ESRCH) in amdgpu_amdkfd_evict_userptr()
2477 &process_info->restore_userptr_work, in amdgpu_amdkfd_evict_userptr()
2480 mutex_unlock(&process_info->notifier_lock); in amdgpu_amdkfd_evict_userptr()
2494 struct kgd_mem *mem, *tmp_mem; in update_invalid_user_pages() local
2500 mutex_lock(&process_info->notifier_lock); in update_invalid_user_pages()
2503 list_for_each_entry_safe(mem, tmp_mem, in update_invalid_user_pages()
2504 &process_info->userptr_valid_list, in update_invalid_user_pages()
2506 if (mem->invalid) in update_invalid_user_pages()
2507 list_move_tail(&mem->validate_list, in update_invalid_user_pages()
2508 &process_info->userptr_inval_list); in update_invalid_user_pages()
2511 list_for_each_entry(mem, &process_info->userptr_inval_list, in update_invalid_user_pages()
2513 invalid = mem->invalid; in update_invalid_user_pages()
2520 bo = mem->bo; in update_invalid_user_pages()
2522 amdgpu_ttm_tt_discard_user_pages(bo->tbo.ttm, mem->range); in update_invalid_user_pages()
2523 mem->range = NULL; in update_invalid_user_pages()
2528 mutex_unlock(&process_info->notifier_lock); in update_invalid_user_pages()
2533 if (bo->tbo.resource->mem_type != TTM_PL_SYSTEM) { in update_invalid_user_pages()
2535 return -EAGAIN; in update_invalid_user_pages()
2537 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); in update_invalid_user_pages()
2542 return -EAGAIN; in update_invalid_user_pages()
2547 ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages, in update_invalid_user_pages()
2548 &mem->range); in update_invalid_user_pages()
2552 /* Return -EFAULT bad address error as success. It will in update_invalid_user_pages()
2557 * Return other error -EBUSY or -ENOMEM to retry restore in update_invalid_user_pages()
2559 if (ret != -EFAULT) in update_invalid_user_pages()
2565 mutex_lock(&process_info->notifier_lock); in update_invalid_user_pages()
2570 if (mem->invalid != invalid) { in update_invalid_user_pages()
2571 ret = -EAGAIN; in update_invalid_user_pages()
2574 /* set mem valid if mem has hmm range associated */ in update_invalid_user_pages()
2575 if (mem->range) in update_invalid_user_pages()
2576 mem->invalid = 0; in update_invalid_user_pages()
2580 mutex_unlock(&process_info->notifier_lock); in update_invalid_user_pages()
2597 struct kgd_mem *mem, *tmp_mem; in validate_invalid_user_pages() local
2607 list_for_each_entry(peer_vm, &process_info->vm_list_head, in validate_invalid_user_pages()
2616 list_for_each_entry(mem, &process_info->userptr_inval_list, in validate_invalid_user_pages()
2620 gobj = &mem->bo->tbo.base; in validate_invalid_user_pages()
2633 list_for_each_entry_safe(mem, tmp_mem, in validate_invalid_user_pages()
2634 &process_info->userptr_inval_list, in validate_invalid_user_pages()
2638 bo = mem->bo; in validate_invalid_user_pages()
2641 if (bo->tbo.ttm->pages[0]) { in validate_invalid_user_pages()
2642 amdgpu_bo_placement_from_domain(bo, mem->domain); in validate_invalid_user_pages()
2643 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); in validate_invalid_user_pages()
2656 list_for_each_entry(attachment, &mem->attachments, list) { in validate_invalid_user_pages()
2657 if (!attachment->is_mapped) in validate_invalid_user_pages()
2660 kfd_mem_dmaunmap_attachment(mem, attachment); in validate_invalid_user_pages()
2661 ret = update_gpuvm_pte(mem, attachment, &sync); in validate_invalid_user_pages()
2665 mutex_lock(&process_info->notifier_lock); in validate_invalid_user_pages()
2666 mem->invalid++; in validate_invalid_user_pages()
2667 mutex_unlock(&process_info->notifier_lock); in validate_invalid_user_pages()
2690 struct kgd_mem *mem, *tmp_mem; in confirm_valid_user_pages_locked() local
2693 list_for_each_entry_safe(mem, tmp_mem, in confirm_valid_user_pages_locked()
2694 &process_info->userptr_inval_list, in confirm_valid_user_pages_locked()
2698 /* keep mem without hmm range at userptr_inval_list */ in confirm_valid_user_pages_locked()
2699 if (!mem->range) in confirm_valid_user_pages_locked()
2702 /* Only check mem with hmm range associated */ in confirm_valid_user_pages_locked()
2704 mem->bo->tbo.ttm, mem->range); in confirm_valid_user_pages_locked()
2706 mem->range = NULL; in confirm_valid_user_pages_locked()
2708 WARN(!mem->invalid, "Invalid BO not marked invalid"); in confirm_valid_user_pages_locked()
2709 ret = -EAGAIN; in confirm_valid_user_pages_locked()
2713 if (mem->invalid) { in confirm_valid_user_pages_locked()
2715 ret = -EAGAIN; in confirm_valid_user_pages_locked()
2719 list_move_tail(&mem->validate_list, in confirm_valid_user_pages_locked()
2720 &process_info->userptr_valid_list); in confirm_valid_user_pages_locked()
2742 mutex_lock(&process_info->notifier_lock); in amdgpu_amdkfd_restore_userptr_worker()
2743 evicted_bos = process_info->evicted_bos; in amdgpu_amdkfd_restore_userptr_worker()
2744 mutex_unlock(&process_info->notifier_lock); in amdgpu_amdkfd_restore_userptr_worker()
2749 usertask = get_pid_task(process_info->pid, PIDTYPE_PID); in amdgpu_amdkfd_restore_userptr_worker()
2758 mutex_lock(&process_info->lock); in amdgpu_amdkfd_restore_userptr_worker()
2766 if (!list_empty(&process_info->userptr_inval_list)) { in amdgpu_amdkfd_restore_userptr_worker()
2775 mutex_lock(&process_info->notifier_lock); in amdgpu_amdkfd_restore_userptr_worker()
2776 if (process_info->evicted_bos != evicted_bos) in amdgpu_amdkfd_restore_userptr_worker()
2784 process_info->evicted_bos = evicted_bos = 0; in amdgpu_amdkfd_restore_userptr_worker()
2794 mutex_unlock(&process_info->notifier_lock); in amdgpu_amdkfd_restore_userptr_worker()
2796 mutex_unlock(&process_info->lock); in amdgpu_amdkfd_restore_userptr_worker()
2801 &process_info->restore_userptr_work, in amdgpu_amdkfd_restore_userptr_worker()
2814 /* protected by process_info->lock */); in replace_eviction_fence()
2827 /** amdgpu_amdkfd_gpuvm_restore_process_bos - Restore all BOs for the given
2833 * should be called when the Process is still valid. BO restore involves -
2849 struct kgd_mem *mem; in amdgpu_amdkfd_gpuvm_restore_process_bos() local
2859 mutex_lock(&process_info->lock); in amdgpu_amdkfd_gpuvm_restore_process_bos()
2863 list_for_each_entry(peer_vm, &process_info->vm_list_head, in amdgpu_amdkfd_gpuvm_restore_process_bos()
2876 list_for_each_entry(mem, &process_info->kfd_bo_list, in amdgpu_amdkfd_gpuvm_restore_process_bos()
2880 gobj = &mem->bo->tbo.base; in amdgpu_amdkfd_gpuvm_restore_process_bos()
2893 list_for_each_entry(mem, &process_info->kfd_bo_list, in amdgpu_amdkfd_gpuvm_restore_process_bos()
2896 struct amdgpu_bo *bo = mem->bo; in amdgpu_amdkfd_gpuvm_restore_process_bos()
2897 uint32_t domain = mem->domain; in amdgpu_amdkfd_gpuvm_restore_process_bos()
2914 dma_resv_for_each_fence(&cursor, bo->tbo.base.resv, in amdgpu_amdkfd_gpuvm_restore_process_bos()
2937 list_for_each_entry(mem, &process_info->kfd_bo_list, in amdgpu_amdkfd_gpuvm_restore_process_bos()
2941 list_for_each_entry(attachment, &mem->attachments, list) { in amdgpu_amdkfd_gpuvm_restore_process_bos()
2942 if (!attachment->is_mapped) in amdgpu_amdkfd_gpuvm_restore_process_bos()
2945 kfd_mem_dmaunmap_attachment(mem, attachment); in amdgpu_amdkfd_gpuvm_restore_process_bos()
2946 ret = update_gpuvm_pte(mem, attachment, &sync_obj); in amdgpu_amdkfd_gpuvm_restore_process_bos()
2955 list_for_each_entry(peer_vm, &process_info->vm_list_head, in amdgpu_amdkfd_gpuvm_restore_process_bos()
2958 peer_vm->root.bo->tbo.bdev); in amdgpu_amdkfd_gpuvm_restore_process_bos()
2996 if (dma_fence_is_signaled(&process_info->eviction_fence->base)) { in amdgpu_amdkfd_gpuvm_restore_process_bos()
2999 process_info->eviction_fence->base.context, in amdgpu_amdkfd_gpuvm_restore_process_bos()
3000 process_info->eviction_fence->mm, in amdgpu_amdkfd_gpuvm_restore_process_bos()
3005 ret = -ENOMEM; in amdgpu_amdkfd_gpuvm_restore_process_bos()
3008 dma_fence_put(&process_info->eviction_fence->base); in amdgpu_amdkfd_gpuvm_restore_process_bos()
3009 process_info->eviction_fence = new_fence; in amdgpu_amdkfd_gpuvm_restore_process_bos()
3010 replace_eviction_fence(ef, dma_fence_get(&new_fence->base)); in amdgpu_amdkfd_gpuvm_restore_process_bos()
3012 WARN_ONCE(*ef != &process_info->eviction_fence->base, in amdgpu_amdkfd_gpuvm_restore_process_bos()
3017 list_for_each_entry(mem, &process_info->kfd_bo_list, validate_list) { in amdgpu_amdkfd_gpuvm_restore_process_bos()
3018 if (mem->bo->tbo.pin_count) in amdgpu_amdkfd_gpuvm_restore_process_bos()
3021 dma_resv_add_fence(mem->bo->tbo.base.resv, in amdgpu_amdkfd_gpuvm_restore_process_bos()
3022 &process_info->eviction_fence->base, in amdgpu_amdkfd_gpuvm_restore_process_bos()
3026 list_for_each_entry(peer_vm, &process_info->vm_list_head, in amdgpu_amdkfd_gpuvm_restore_process_bos()
3028 struct amdgpu_bo *bo = peer_vm->root.bo; in amdgpu_amdkfd_gpuvm_restore_process_bos()
3030 dma_resv_add_fence(bo->tbo.base.resv, in amdgpu_amdkfd_gpuvm_restore_process_bos()
3031 &process_info->eviction_fence->base, in amdgpu_amdkfd_gpuvm_restore_process_bos()
3039 mutex_unlock(&process_info->lock); in amdgpu_amdkfd_gpuvm_restore_process_bos()
3043 int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem) in amdgpu_amdkfd_add_gws_to_process() argument
3050 return -EINVAL; in amdgpu_amdkfd_add_gws_to_process()
3052 *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL); in amdgpu_amdkfd_add_gws_to_process()
3053 if (!*mem) in amdgpu_amdkfd_add_gws_to_process()
3054 return -ENOMEM; in amdgpu_amdkfd_add_gws_to_process()
3056 mutex_init(&(*mem)->lock); in amdgpu_amdkfd_add_gws_to_process()
3057 INIT_LIST_HEAD(&(*mem)->attachments); in amdgpu_amdkfd_add_gws_to_process()
3058 (*mem)->bo = amdgpu_bo_ref(gws_bo); in amdgpu_amdkfd_add_gws_to_process()
3059 (*mem)->domain = AMDGPU_GEM_DOMAIN_GWS; in amdgpu_amdkfd_add_gws_to_process()
3060 (*mem)->process_info = process_info; in amdgpu_amdkfd_add_gws_to_process()
3061 add_kgd_mem_to_kfd_bo_list(*mem, process_info, false); in amdgpu_amdkfd_add_gws_to_process()
3062 amdgpu_sync_create(&(*mem)->sync); in amdgpu_amdkfd_add_gws_to_process()
3066 mutex_lock(&(*mem)->process_info->lock); in amdgpu_amdkfd_add_gws_to_process()
3082 ret = dma_resv_reserve_fences(gws_bo->tbo.base.resv, 1); in amdgpu_amdkfd_add_gws_to_process()
3085 dma_resv_add_fence(gws_bo->tbo.base.resv, in amdgpu_amdkfd_add_gws_to_process()
3086 &process_info->eviction_fence->base, in amdgpu_amdkfd_add_gws_to_process()
3089 mutex_unlock(&(*mem)->process_info->lock); in amdgpu_amdkfd_add_gws_to_process()
3097 mutex_unlock(&(*mem)->process_info->lock); in amdgpu_amdkfd_add_gws_to_process()
3098 amdgpu_sync_free(&(*mem)->sync); in amdgpu_amdkfd_add_gws_to_process()
3099 remove_kgd_mem_from_kfd_bo_list(*mem, process_info); in amdgpu_amdkfd_add_gws_to_process()
3101 mutex_destroy(&(*mem)->lock); in amdgpu_amdkfd_add_gws_to_process()
3102 kfree(*mem); in amdgpu_amdkfd_add_gws_to_process()
3103 *mem = NULL; in amdgpu_amdkfd_add_gws_to_process()
3107 int amdgpu_amdkfd_remove_gws_from_process(void *info, void *mem) in amdgpu_amdkfd_remove_gws_from_process() argument
3111 struct kgd_mem *kgd_mem = (struct kgd_mem *)mem; in amdgpu_amdkfd_remove_gws_from_process()
3112 struct amdgpu_bo *gws_bo = kgd_mem->bo; in amdgpu_amdkfd_remove_gws_from_process()
3126 process_info->eviction_fence); in amdgpu_amdkfd_remove_gws_from_process()
3128 amdgpu_sync_free(&kgd_mem->sync); in amdgpu_amdkfd_remove_gws_from_process()
3130 mutex_destroy(&kgd_mem->lock); in amdgpu_amdkfd_remove_gws_from_process()
3131 kfree(mem); in amdgpu_amdkfd_remove_gws_from_process()
3135 /* Returns GPU-specific tiling mode information */
3139 config->gb_addr_config = adev->gfx.config.gb_addr_config; in amdgpu_amdkfd_get_tile_config()
3140 config->tile_config_ptr = adev->gfx.config.tile_mode_array; in amdgpu_amdkfd_get_tile_config()
3141 config->num_tile_configs = in amdgpu_amdkfd_get_tile_config()
3142 ARRAY_SIZE(adev->gfx.config.tile_mode_array); in amdgpu_amdkfd_get_tile_config()
3143 config->macro_tile_config_ptr = in amdgpu_amdkfd_get_tile_config()
3144 adev->gfx.config.macrotile_mode_array; in amdgpu_amdkfd_get_tile_config()
3145 config->num_macro_tile_configs = in amdgpu_amdkfd_get_tile_config()
3146 ARRAY_SIZE(adev->gfx.config.macrotile_mode_array); in amdgpu_amdkfd_get_tile_config()
3149 config->num_banks = adev->gfx.config.num_banks; in amdgpu_amdkfd_get_tile_config()
3150 config->num_ranks = adev->gfx.config.num_ranks; in amdgpu_amdkfd_get_tile_config()
3155 bool amdgpu_amdkfd_bo_mapped_to_dev(void *drm_priv, struct kgd_mem *mem) in amdgpu_amdkfd_bo_mapped_to_dev() argument
3160 list_for_each_entry(entry, &mem->attachments, list) { in amdgpu_amdkfd_bo_mapped_to_dev()
3161 if (entry->is_mapped && entry->bo_va->base.vm == vm) in amdgpu_amdkfd_bo_mapped_to_dev()
3173 seq_printf(m, "System mem used %lldM out of %lluM\n", in kfd_debugfs_kfd_mem_limits()
3176 seq_printf(m, "TTM mem used %lldM out of %lluM\n", in kfd_debugfs_kfd_mem_limits()