Lines Matching +full:proc +full:- +full:id
30 #include <linux/amd-iommu.h>
58 /* Ordered, single-threaded workqueue for restoring evicted
61 * their BOs and result in a live-lock situation where processes
114 pdd = workarea->pdd; in kfd_sdma_activity_worker()
117 dqm = pdd->dev->dqm; in kfd_sdma_activity_worker()
118 qpd = &pdd->qpd; in kfd_sdma_activity_worker()
125 * we loop over all SDMA queues and get their counts from user-space. in kfd_sdma_activity_worker()
131 * 1. Create a temporary list of SDMA queue nodes from the qpd->queues_list, in kfd_sdma_activity_worker()
137 * from the qpd->queues_list. in kfd_sdma_activity_worker()
138 * 3. Do a second pass over qpd->queues_list to check if any nodes got deleted. in kfd_sdma_activity_worker()
150 list_for_each_entry(q, &qpd->queues_list, list) { in kfd_sdma_activity_worker()
151 if ((q->properties.type != KFD_QUEUE_TYPE_SDMA) && in kfd_sdma_activity_worker()
152 (q->properties.type != KFD_QUEUE_TYPE_SDMA_XGMI)) in kfd_sdma_activity_worker()
161 INIT_LIST_HEAD(&sdma_q->list); in kfd_sdma_activity_worker()
162 sdma_q->rptr = (uint64_t __user *)q->properties.read_ptr; in kfd_sdma_activity_worker()
163 sdma_q->queue_id = q->properties.queue_id; in kfd_sdma_activity_worker()
164 list_add_tail(&sdma_q->list, &sdma_q_list.list); in kfd_sdma_activity_worker()
169 * qpd->queues_list. Return the past activity count as the total sdma in kfd_sdma_activity_worker()
173 workarea->sdma_activity_counter = pdd->sdma_past_activity_counter; in kfd_sdma_activity_worker()
183 mm = get_task_mm(pdd->process->lead_thread); in kfd_sdma_activity_worker()
191 ret = read_sdma_queue_counter(sdma_q->rptr, &val); in kfd_sdma_activity_worker()
193 pr_debug("Failed to read SDMA queue active counter for queue id: %d", in kfd_sdma_activity_worker()
194 sdma_q->queue_id); in kfd_sdma_activity_worker()
196 sdma_q->sdma_val = val; in kfd_sdma_activity_worker()
197 workarea->sdma_activity_counter += val; in kfd_sdma_activity_worker()
210 workarea->sdma_activity_counter += pdd->sdma_past_activity_counter; in kfd_sdma_activity_worker()
212 list_for_each_entry(q, &qpd->queues_list, list) { in kfd_sdma_activity_worker()
216 if ((q->properties.type != KFD_QUEUE_TYPE_SDMA) && in kfd_sdma_activity_worker()
217 (q->properties.type != KFD_QUEUE_TYPE_SDMA_XGMI)) in kfd_sdma_activity_worker()
221 if (((uint64_t __user *)q->properties.read_ptr == sdma_q->rptr) && in kfd_sdma_activity_worker()
222 (sdma_q->queue_id == q->properties.queue_id)) { in kfd_sdma_activity_worker()
223 list_del(&sdma_q->list); in kfd_sdma_activity_worker()
234 * from qpd->queues_list during SDMA usage read. Subtract the SDMA in kfd_sdma_activity_worker()
238 workarea->sdma_activity_counter -= sdma_q->sdma_val; in kfd_sdma_activity_worker()
239 list_del(&sdma_q->list); in kfd_sdma_activity_worker()
247 list_del(&sdma_q->list); in kfd_sdma_activity_worker()
253 * @kfd_get_cu_occupancy() - Collect number of waves in-flight on this device
271 struct kfd_process *proc = NULL; in kfd_get_cu_occupancy() local
275 dev = pdd->dev; in kfd_get_cu_occupancy()
276 if (dev->kfd2kgd->get_cu_occupancy == NULL) in kfd_get_cu_occupancy()
277 return -EINVAL; in kfd_get_cu_occupancy()
280 proc = pdd->process; in kfd_get_cu_occupancy()
281 if (pdd->qpd.queue_count == 0) { in kfd_get_cu_occupancy()
282 pr_debug("Gpu-Id: %d has no active queues for process %d\n", in kfd_get_cu_occupancy()
283 dev->id, proc->pasid); in kfd_get_cu_occupancy()
290 dev->kfd2kgd->get_cu_occupancy(dev->kgd, proc->pasid, &wave_cnt, in kfd_get_cu_occupancy()
294 cu_cnt = (wave_cnt + (max_waves_per_cu - 1)) / max_waves_per_cu; in kfd_get_cu_occupancy()
301 if (strcmp(attr->name, "pasid") == 0) { in kfd_procfs_show()
305 return snprintf(buffer, PAGE_SIZE, "%d\n", p->pasid); in kfd_procfs_show()
306 } else if (strncmp(attr->name, "vram_", 5) == 0) { in kfd_procfs_show()
309 return snprintf(buffer, PAGE_SIZE, "%llu\n", READ_ONCE(pdd->vram_usage)); in kfd_procfs_show()
310 } else if (strncmp(attr->name, "sdma_", 5) == 0) { in kfd_procfs_show()
330 return -EINVAL; in kfd_procfs_show()
359 &kfd_device->kobj, "proc"); in kfd_procfs_init()
361 pr_warn("Could not create procfs proc folder"); in kfd_procfs_init()
381 if (!strcmp(attr->name, "size")) in kfd_procfs_queue_show()
383 q->properties.queue_size); in kfd_procfs_queue_show()
384 else if (!strcmp(attr->name, "type")) in kfd_procfs_queue_show()
385 return snprintf(buffer, PAGE_SIZE, "%d", q->properties.type); in kfd_procfs_queue_show()
386 else if (!strcmp(attr->name, "gpuid")) in kfd_procfs_queue_show()
387 return snprintf(buffer, PAGE_SIZE, "%u", q->device->id); in kfd_procfs_queue_show()
397 if (strcmp(attr->name, "evicted_ms") == 0) { in kfd_procfs_stats_show()
403 evict_jiffies = atomic64_read(&pdd->evict_duration_counter); in kfd_procfs_stats_show()
411 } else if (strcmp(attr->name, "cu_occupancy") == 0) { in kfd_procfs_stats_show()
466 struct kfd_process *proc; in kfd_procfs_add_queue() local
469 if (!q || !q->process) in kfd_procfs_add_queue()
470 return -EINVAL; in kfd_procfs_add_queue()
471 proc = q->process; in kfd_procfs_add_queue()
473 /* Create proc/<pid>/queues/<queue id> folder */ in kfd_procfs_add_queue()
474 if (!proc->kobj_queues) in kfd_procfs_add_queue()
475 return -EFAULT; in kfd_procfs_add_queue()
476 ret = kobject_init_and_add(&q->kobj, &procfs_queue_type, in kfd_procfs_add_queue()
477 proc->kobj_queues, "%u", q->properties.queue_id); in kfd_procfs_add_queue()
479 pr_warn("Creating proc/<pid>/queues/%u failed", in kfd_procfs_add_queue()
480 q->properties.queue_id); in kfd_procfs_add_queue()
481 kobject_put(&q->kobj); in kfd_procfs_add_queue()
494 return -EINVAL; in kfd_sysfs_create_file()
496 attr->name = name; in kfd_sysfs_create_file()
497 attr->mode = KFD_SYSFS_FILE_MODE; in kfd_sysfs_create_file()
500 ret = sysfs_create_file(p->kobj, attr); in kfd_sysfs_create_file()
512 return -EINVAL; in kfd_procfs_add_sysfs_stats()
514 if (!p->kobj) in kfd_procfs_add_sysfs_stats()
515 return -EFAULT; in kfd_procfs_add_sysfs_stats()
519 * - proc/<pid>/stats_<gpuid>/ in kfd_procfs_add_sysfs_stats()
520 * - proc/<pid>/stats_<gpuid>/evicted_ms in kfd_procfs_add_sysfs_stats()
521 * - proc/<pid>/stats_<gpuid>/cu_occupancy in kfd_procfs_add_sysfs_stats()
523 list_for_each_entry(pdd, &p->per_device_data, per_device_list) { in kfd_procfs_add_sysfs_stats()
527 "stats_%u", pdd->dev->id); in kfd_procfs_add_sysfs_stats()
530 return -ENOMEM; in kfd_procfs_add_sysfs_stats()
534 p->kobj, in kfd_procfs_add_sysfs_stats()
538 pr_warn("Creating KFD proc/stats_%s folder failed", in kfd_procfs_add_sysfs_stats()
544 pdd->kobj_stats = kobj_stats; in kfd_procfs_add_sysfs_stats()
545 pdd->attr_evict.name = "evicted_ms"; in kfd_procfs_add_sysfs_stats()
546 pdd->attr_evict.mode = KFD_SYSFS_FILE_MODE; in kfd_procfs_add_sysfs_stats()
547 sysfs_attr_init(&pdd->attr_evict); in kfd_procfs_add_sysfs_stats()
548 ret = sysfs_create_file(kobj_stats, &pdd->attr_evict); in kfd_procfs_add_sysfs_stats()
551 (int)pdd->dev->id); in kfd_procfs_add_sysfs_stats()
554 if (pdd->dev->kfd2kgd->get_cu_occupancy != NULL) { in kfd_procfs_add_sysfs_stats()
555 pdd->attr_cu_occupancy.name = "cu_occupancy"; in kfd_procfs_add_sysfs_stats()
556 pdd->attr_cu_occupancy.mode = KFD_SYSFS_FILE_MODE; in kfd_procfs_add_sysfs_stats()
557 sysfs_attr_init(&pdd->attr_cu_occupancy); in kfd_procfs_add_sysfs_stats()
559 &pdd->attr_cu_occupancy); in kfd_procfs_add_sysfs_stats()
562 pdd->attr_cu_occupancy.name, in kfd_procfs_add_sysfs_stats()
563 (int)pdd->dev->id); in kfd_procfs_add_sysfs_stats()
577 return -EINVAL; in kfd_procfs_add_sysfs_files()
579 if (!p->kobj) in kfd_procfs_add_sysfs_files()
580 return -EFAULT; in kfd_procfs_add_sysfs_files()
584 * - proc/<pid>/vram_<gpuid> in kfd_procfs_add_sysfs_files()
585 * - proc/<pid>/sdma_<gpuid> in kfd_procfs_add_sysfs_files()
587 list_for_each_entry(pdd, &p->per_device_data, per_device_list) { in kfd_procfs_add_sysfs_files()
588 snprintf(pdd->vram_filename, MAX_SYSFS_FILENAME_LEN, "vram_%u", in kfd_procfs_add_sysfs_files()
589 pdd->dev->id); in kfd_procfs_add_sysfs_files()
590 ret = kfd_sysfs_create_file(p, &pdd->attr_vram, pdd->vram_filename); in kfd_procfs_add_sysfs_files()
592 pr_warn("Creating vram usage for gpu id %d failed", in kfd_procfs_add_sysfs_files()
593 (int)pdd->dev->id); in kfd_procfs_add_sysfs_files()
595 snprintf(pdd->sdma_filename, MAX_SYSFS_FILENAME_LEN, "sdma_%u", in kfd_procfs_add_sysfs_files()
596 pdd->dev->id); in kfd_procfs_add_sysfs_files()
597 ret = kfd_sysfs_create_file(p, &pdd->attr_sdma, pdd->sdma_filename); in kfd_procfs_add_sysfs_files()
599 pr_warn("Creating sdma usage for gpu id %d failed", in kfd_procfs_add_sysfs_files()
600 (int)pdd->dev->id); in kfd_procfs_add_sysfs_files()
611 kobject_del(&q->kobj); in kfd_procfs_del_queue()
612 kobject_put(&q->kobj); in kfd_procfs_del_queue()
624 return -ENOMEM; in kfd_process_create_wq()
645 struct kfd_dev *dev = pdd->dev; in kfd_process_free_gpuvm()
647 amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(dev->kgd, mem, pdd->vm); in kfd_process_free_gpuvm()
648 amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, mem, NULL); in kfd_process_free_gpuvm()
651 /* kfd_process_alloc_gpuvm - Allocate GPU VM for the KFD process
655 * not need to take p->mutex.
661 struct kfd_dev *kdev = pdd->dev; in kfd_process_alloc_gpuvm()
666 err = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(kdev->kgd, gpu_va, size, in kfd_process_alloc_gpuvm()
667 pdd->vm, &mem, NULL, flags); in kfd_process_alloc_gpuvm()
671 err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(kdev->kgd, mem, pdd->vm); in kfd_process_alloc_gpuvm()
675 err = amdgpu_amdkfd_gpuvm_sync_memory(kdev->kgd, mem, true); in kfd_process_alloc_gpuvm()
683 * We do not need to take p->mutex, because the process is just in kfd_process_alloc_gpuvm()
694 err = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(kdev->kgd, in kfd_process_alloc_gpuvm()
712 amdgpu_amdkfd_gpuvm_free_memory_of_gpu(kdev->kgd, mem, NULL); in kfd_process_alloc_gpuvm()
718 /* kfd_process_device_reserve_ib_mem - Reserve memory inside the
726 struct qcm_process_device *qpd = &pdd->qpd; in kfd_process_device_reserve_ib_mem()
734 if (qpd->ib_kaddr || !qpd->ib_base) in kfd_process_device_reserve_ib_mem()
738 ret = kfd_process_alloc_gpuvm(pdd, qpd->ib_base, PAGE_SIZE, flags, in kfd_process_device_reserve_ib_mem()
743 qpd->ib_kaddr = kaddr; in kfd_process_device_reserve_ib_mem()
754 if (!thread->mm) in kfd_create_process()
755 return ERR_PTR(-EINVAL); in kfd_create_process()
758 if (thread->group_leader->mm != thread->mm) in kfd_create_process()
759 return ERR_PTR(-EINVAL); in kfd_create_process()
786 process->kobj = kfd_alloc_struct(process->kobj); in kfd_create_process()
787 if (!process->kobj) { in kfd_create_process()
791 ret = kobject_init_and_add(process->kobj, &procfs_type, in kfd_create_process()
793 (int)process->lead_thread->pid); in kfd_create_process()
796 kobject_put(process->kobj); in kfd_create_process()
800 process->attr_pasid.name = "pasid"; in kfd_create_process()
801 process->attr_pasid.mode = KFD_SYSFS_FILE_MODE; in kfd_create_process()
802 sysfs_attr_init(&process->attr_pasid); in kfd_create_process()
803 ret = sysfs_create_file(process->kobj, &process->attr_pasid); in kfd_create_process()
806 (int)process->lead_thread->pid); in kfd_create_process()
808 process->kobj_queues = kobject_create_and_add("queues", in kfd_create_process()
809 process->kobj); in kfd_create_process()
810 if (!process->kobj_queues) in kfd_create_process()
811 pr_warn("Creating KFD proc/queues folder failed"); in kfd_create_process()
816 (int)process->lead_thread->pid); in kfd_create_process()
821 (int)process->lead_thread->pid); in kfd_create_process()
825 kref_get(&process->ref); in kfd_create_process()
835 if (!thread->mm) in kfd_get_process()
836 return ERR_PTR(-EINVAL); in kfd_get_process()
839 if (thread->group_leader->mm != thread->mm) in kfd_get_process()
840 return ERR_PTR(-EINVAL); in kfd_get_process()
844 return ERR_PTR(-EINVAL); in kfd_get_process()
855 if (process->mm == mm) in find_process_by_mm()
867 p = find_process_by_mm(thread->mm); in find_process()
875 kref_put(&p->ref, kfd_process_ref_release); in kfd_unref_process()
880 struct kfd_process *p = pdd->process; in kfd_process_device_free_bos()
882 int id; in kfd_process_device_free_bos() local
888 idr_for_each_entry(&pdd->alloc_idr, mem, id) { in kfd_process_device_free_bos()
891 list_for_each_entry(peer_pdd, &p->per_device_data, in kfd_process_device_free_bos()
893 if (!peer_pdd->vm) in kfd_process_device_free_bos()
896 peer_pdd->dev->kgd, mem, peer_pdd->vm); in kfd_process_device_free_bos()
899 amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->kgd, mem, NULL); in kfd_process_device_free_bos()
900 kfd_process_device_remove_obj_handle(pdd, id); in kfd_process_device_free_bos()
908 list_for_each_entry(pdd, &p->per_device_data, per_device_list) in kfd_process_free_outstanding_kfd_bos()
916 list_for_each_entry_safe(pdd, temp, &p->per_device_data, in kfd_process_destroy_pdds()
918 pr_debug("Releasing pdd (topology id %d) for process (pasid 0x%x)\n", in kfd_process_destroy_pdds()
919 pdd->dev->id, p->pasid); in kfd_process_destroy_pdds()
921 if (pdd->drm_file) { in kfd_process_destroy_pdds()
923 pdd->dev->kgd, pdd->vm); in kfd_process_destroy_pdds()
924 fput(pdd->drm_file); in kfd_process_destroy_pdds()
926 else if (pdd->vm) in kfd_process_destroy_pdds()
928 pdd->dev->kgd, pdd->vm); in kfd_process_destroy_pdds()
930 list_del(&pdd->per_device_list); in kfd_process_destroy_pdds()
932 if (pdd->qpd.cwsr_kaddr && !pdd->qpd.cwsr_base) in kfd_process_destroy_pdds()
933 free_pages((unsigned long)pdd->qpd.cwsr_kaddr, in kfd_process_destroy_pdds()
936 kfree(pdd->qpd.doorbell_bitmap); in kfd_process_destroy_pdds()
937 idr_destroy(&pdd->alloc_idr); in kfd_process_destroy_pdds()
939 kfd_free_process_doorbells(pdd->dev, pdd->doorbell_index); in kfd_process_destroy_pdds()
945 if (pdd->runtime_inuse) { in kfd_process_destroy_pdds()
946 pm_runtime_mark_last_busy(pdd->dev->ddev->dev); in kfd_process_destroy_pdds()
947 pm_runtime_put_autosuspend(pdd->dev->ddev->dev); in kfd_process_destroy_pdds()
948 pdd->runtime_inuse = false; in kfd_process_destroy_pdds()
967 if (p->kobj) { in kfd_process_wq_release()
968 sysfs_remove_file(p->kobj, &p->attr_pasid); in kfd_process_wq_release()
969 kobject_del(p->kobj_queues); in kfd_process_wq_release()
970 kobject_put(p->kobj_queues); in kfd_process_wq_release()
971 p->kobj_queues = NULL; in kfd_process_wq_release()
973 list_for_each_entry(pdd, &p->per_device_data, per_device_list) { in kfd_process_wq_release()
974 sysfs_remove_file(p->kobj, &pdd->attr_vram); in kfd_process_wq_release()
975 sysfs_remove_file(p->kobj, &pdd->attr_sdma); in kfd_process_wq_release()
976 sysfs_remove_file(p->kobj, &pdd->attr_evict); in kfd_process_wq_release()
977 if (pdd->dev->kfd2kgd->get_cu_occupancy != NULL) in kfd_process_wq_release()
978 sysfs_remove_file(p->kobj, &pdd->attr_cu_occupancy); in kfd_process_wq_release()
979 kobject_del(pdd->kobj_stats); in kfd_process_wq_release()
980 kobject_put(pdd->kobj_stats); in kfd_process_wq_release()
981 pdd->kobj_stats = NULL; in kfd_process_wq_release()
984 kobject_del(p->kobj); in kfd_process_wq_release()
985 kobject_put(p->kobj); in kfd_process_wq_release()
986 p->kobj = NULL; in kfd_process_wq_release()
994 dma_fence_put(p->ef); in kfd_process_wq_release()
998 kfd_pasid_free(p->pasid); in kfd_process_wq_release()
999 mutex_destroy(&p->mutex); in kfd_process_wq_release()
1001 put_task_struct(p->lead_thread); in kfd_process_wq_release()
1010 INIT_WORK(&p->release_work, kfd_process_wq_release); in kfd_process_ref_release()
1011 queue_work(kfd_process_wq, &p->release_work); in kfd_process_ref_release()
1030 if (WARN_ON(p->mm != mm)) in kfd_process_notifier_release()
1034 hash_del_rcu(&p->kfd_processes); in kfd_process_notifier_release()
1038 cancel_delayed_work_sync(&p->eviction_work); in kfd_process_notifier_release()
1039 cancel_delayed_work_sync(&p->restore_work); in kfd_process_notifier_release()
1041 mutex_lock(&p->mutex); in kfd_process_notifier_release()
1047 list_for_each_entry(pdd, &p->per_device_data, per_device_list) { in kfd_process_notifier_release()
1048 struct kfd_dev *dev = pdd->dev; in kfd_process_notifier_release()
1051 if (dev && dev->dbgmgr && dev->dbgmgr->pasid == p->pasid) { in kfd_process_notifier_release()
1052 if (!kfd_dbgmgr_unregister(dev->dbgmgr, p)) { in kfd_process_notifier_release()
1053 kfd_dbgmgr_destroy(dev->dbgmgr); in kfd_process_notifier_release()
1054 dev->dbgmgr = NULL; in kfd_process_notifier_release()
1061 pqm_uninit(&p->pqm); in kfd_process_notifier_release()
1064 p->mm = NULL; in kfd_process_notifier_release()
1069 dma_fence_signal(p->ef); in kfd_process_notifier_release()
1071 mutex_unlock(&p->mutex); in kfd_process_notifier_release()
1073 mmu_notifier_put(&p->mmu_notifier); in kfd_process_notifier_release()
1086 list_for_each_entry(pdd, &p->per_device_data, per_device_list) { in kfd_process_init_cwsr_apu()
1087 struct kfd_dev *dev = pdd->dev; in kfd_process_init_cwsr_apu()
1088 struct qcm_process_device *qpd = &pdd->qpd; in kfd_process_init_cwsr_apu()
1090 if (!dev->cwsr_enabled || qpd->cwsr_kaddr || qpd->cwsr_base) in kfd_process_init_cwsr_apu()
1093 offset = KFD_MMAP_TYPE_RESERVED_MEM | KFD_MMAP_GPU_ID(dev->id); in kfd_process_init_cwsr_apu()
1094 qpd->tba_addr = (int64_t)vm_mmap(filep, 0, in kfd_process_init_cwsr_apu()
1098 if (IS_ERR_VALUE(qpd->tba_addr)) { in kfd_process_init_cwsr_apu()
1099 int err = qpd->tba_addr; in kfd_process_init_cwsr_apu()
1102 qpd->tba_addr = 0; in kfd_process_init_cwsr_apu()
1103 qpd->cwsr_kaddr = NULL; in kfd_process_init_cwsr_apu()
1107 memcpy(qpd->cwsr_kaddr, dev->cwsr_isa, dev->cwsr_isa_size); in kfd_process_init_cwsr_apu()
1109 qpd->tma_addr = qpd->tba_addr + KFD_CWSR_TMA_OFFSET; in kfd_process_init_cwsr_apu()
1111 qpd->tba_addr, qpd->tma_addr, qpd->cwsr_kaddr); in kfd_process_init_cwsr_apu()
1119 struct kfd_dev *dev = pdd->dev; in kfd_process_device_init_cwsr_dgpu()
1120 struct qcm_process_device *qpd = &pdd->qpd; in kfd_process_device_init_cwsr_dgpu()
1127 if (!dev->cwsr_enabled || qpd->cwsr_kaddr || !qpd->cwsr_base) in kfd_process_device_init_cwsr_dgpu()
1131 ret = kfd_process_alloc_gpuvm(pdd, qpd->cwsr_base, in kfd_process_device_init_cwsr_dgpu()
1136 qpd->cwsr_kaddr = kaddr; in kfd_process_device_init_cwsr_dgpu()
1137 qpd->tba_addr = qpd->cwsr_base; in kfd_process_device_init_cwsr_dgpu()
1139 memcpy(qpd->cwsr_kaddr, dev->cwsr_isa, dev->cwsr_isa_size); in kfd_process_device_init_cwsr_dgpu()
1141 qpd->tma_addr = qpd->tba_addr + KFD_CWSR_TMA_OFFSET; in kfd_process_device_init_cwsr_dgpu()
1143 qpd->tba_addr, qpd->tma_addr, qpd->cwsr_kaddr); in kfd_process_device_init_cwsr_dgpu()
1155 int err = -ENOMEM; in create_process()
1161 kref_init(&process->ref); in create_process()
1162 mutex_init(&process->mutex); in create_process()
1163 process->mm = thread->mm; in create_process()
1164 process->lead_thread = thread->group_leader; in create_process()
1165 INIT_LIST_HEAD(&process->per_device_data); in create_process()
1166 INIT_DELAYED_WORK(&process->eviction_work, evict_process_worker); in create_process()
1167 INIT_DELAYED_WORK(&process->restore_work, restore_process_worker); in create_process()
1168 process->last_restore_timestamp = get_jiffies_64(); in create_process()
1170 process->is_32bit_user_mode = in_compat_syscall(); in create_process()
1172 process->pasid = kfd_pasid_alloc(); in create_process()
1173 if (process->pasid == 0) in create_process()
1176 err = pqm_init(&process->pqm, process); in create_process()
1186 process->mmu_notifier.ops = &kfd_process_mmu_notifier_ops; in create_process()
1187 err = mmu_notifier_register(&process->mmu_notifier, process->mm); in create_process()
1191 get_task_struct(process->lead_thread); in create_process()
1192 hash_add_rcu(kfd_processes_table, &process->kfd_processes, in create_process()
1193 (uintptr_t)process->mm); in create_process()
1201 pqm_uninit(&process->pqm); in create_process()
1203 kfd_pasid_free(process->pasid); in create_process()
1205 mutex_destroy(&process->mutex); in create_process()
1215 int range_start = dev->shared_resources.non_cp_doorbells_start; in init_doorbell_bitmap()
1216 int range_end = dev->shared_resources.non_cp_doorbells_end; in init_doorbell_bitmap()
1218 if (!KFD_IS_SOC15(dev->device_info->asic_family)) in init_doorbell_bitmap()
1221 qpd->doorbell_bitmap = in init_doorbell_bitmap()
1224 if (!qpd->doorbell_bitmap) in init_doorbell_bitmap()
1225 return -ENOMEM; in init_doorbell_bitmap()
1228 pr_debug("reserved doorbell 0x%03x - 0x%03x\n", range_start, range_end); in init_doorbell_bitmap()
1229 pr_debug("reserved doorbell 0x%03x - 0x%03x\n", in init_doorbell_bitmap()
1235 set_bit(i, qpd->doorbell_bitmap); in init_doorbell_bitmap()
1237 qpd->doorbell_bitmap); in init_doorbell_bitmap()
1249 list_for_each_entry(pdd, &p->per_device_data, per_device_list) in kfd_get_process_device_data()
1250 if (pdd->dev == dev) in kfd_get_process_device_data()
1265 if (kfd_alloc_process_doorbells(dev, &pdd->doorbell_index) < 0) { in kfd_create_process_device_data()
1270 if (init_doorbell_bitmap(&pdd->qpd, dev)) { in kfd_create_process_device_data()
1275 pdd->dev = dev; in kfd_create_process_device_data()
1276 INIT_LIST_HEAD(&pdd->qpd.queues_list); in kfd_create_process_device_data()
1277 INIT_LIST_HEAD(&pdd->qpd.priv_queue_list); in kfd_create_process_device_data()
1278 pdd->qpd.dqm = dev->dqm; in kfd_create_process_device_data()
1279 pdd->qpd.pqm = &p->pqm; in kfd_create_process_device_data()
1280 pdd->qpd.evicted = 0; in kfd_create_process_device_data()
1281 pdd->qpd.mapped_gws_queue = false; in kfd_create_process_device_data()
1282 pdd->process = p; in kfd_create_process_device_data()
1283 pdd->bound = PDD_UNBOUND; in kfd_create_process_device_data()
1284 pdd->already_dequeued = false; in kfd_create_process_device_data()
1285 pdd->runtime_inuse = false; in kfd_create_process_device_data()
1286 pdd->vram_usage = 0; in kfd_create_process_device_data()
1287 pdd->sdma_past_activity_counter = 0; in kfd_create_process_device_data()
1288 atomic64_set(&pdd->evict_duration_counter, 0); in kfd_create_process_device_data()
1289 list_add(&pdd->per_device_list, &p->per_device_data); in kfd_create_process_device_data()
1292 idr_init(&pdd->alloc_idr); in kfd_create_process_device_data()
1302 * kfd_process_device_init_vm - Initialize a VM for a process-device
1304 * @pdd: The process-device
1313 * Returns 0 on success, -errno on failure.
1322 if (pdd->vm) in kfd_process_device_init_vm()
1323 return drm_file ? -EBUSY : 0; in kfd_process_device_init_vm()
1325 p = pdd->process; in kfd_process_device_init_vm()
1326 dev = pdd->dev; in kfd_process_device_init_vm()
1330 dev->kgd, drm_file, p->pasid, in kfd_process_device_init_vm()
1331 &pdd->vm, &p->kgd_process_info, &p->ef); in kfd_process_device_init_vm()
1333 ret = amdgpu_amdkfd_gpuvm_create_process_vm(dev->kgd, p->pasid, in kfd_process_device_init_vm()
1334 &pdd->vm, &p->kgd_process_info, &p->ef); in kfd_process_device_init_vm()
1340 amdgpu_vm_set_task_info(pdd->vm); in kfd_process_device_init_vm()
1349 pdd->drm_file = drm_file; in kfd_process_device_init_vm()
1357 amdgpu_amdkfd_gpuvm_destroy_process_vm(dev->kgd, pdd->vm); in kfd_process_device_init_vm()
1358 pdd->vm = NULL; in kfd_process_device_init_vm()
1364 * Direct the IOMMU to bind the process (specifically the pasid->mm)
1379 return ERR_PTR(-ENOMEM); in kfd_bind_process_to_device()
1383 * signal runtime-pm system to auto resume and prevent in kfd_bind_process_to_device()
1387 if (!pdd->runtime_inuse) { in kfd_bind_process_to_device()
1388 err = pm_runtime_get_sync(dev->ddev->dev); in kfd_bind_process_to_device()
1390 pm_runtime_put_autosuspend(dev->ddev->dev); in kfd_bind_process_to_device()
1407 pdd->runtime_inuse = true; in kfd_bind_process_to_device()
1413 if (!pdd->runtime_inuse) { in kfd_bind_process_to_device()
1414 pm_runtime_mark_last_busy(dev->ddev->dev); in kfd_bind_process_to_device()
1415 pm_runtime_put_autosuspend(dev->ddev->dev); in kfd_bind_process_to_device()
1424 return list_first_entry(&p->per_device_data, in kfd_get_first_process_device_data()
1433 if (list_is_last(&pdd->per_device_list, &p->per_device_data)) in kfd_get_next_process_device_data()
1440 return !(list_empty(&p->per_device_data)); in kfd_has_process_device_data()
1449 return idr_alloc(&pdd->alloc_idr, mem, 0, 0, GFP_KERNEL); in kfd_process_device_create_obj_handle()
1461 return idr_find(&pdd->alloc_idr, handle); in kfd_process_device_translate_handle()
1471 idr_remove(&pdd->alloc_idr, handle); in kfd_process_device_remove_obj_handle()
1474 /* This increments the process->ref counter. */
1483 if (p->pasid == pasid) { in kfd_lookup_process_by_pasid()
1484 kref_get(&p->ref); in kfd_lookup_process_by_pasid()
1495 /* This increments the process->ref counter. */
1504 kref_get(&p->ref); in kfd_lookup_process_by_mm()
1511 /* kfd_process_evict_queues - Evict all user queues of a process
1513 * Eviction is reference-counted per process-device. This means multiple
1522 list_for_each_entry(pdd, &p->per_device_data, per_device_list) { in kfd_process_evict_queues()
1523 r = pdd->dev->dqm->ops.evict_process_queues(pdd->dev->dqm, in kfd_process_evict_queues()
1524 &pdd->qpd); in kfd_process_evict_queues()
1538 list_for_each_entry(pdd, &p->per_device_data, per_device_list) { in kfd_process_evict_queues()
1541 if (pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm, in kfd_process_evict_queues()
1542 &pdd->qpd)) in kfd_process_evict_queues()
1545 n_evicted--; in kfd_process_evict_queues()
1551 /* kfd_process_restore_queues - Restore all user queues of a process */
1557 list_for_each_entry(pdd, &p->per_device_data, per_device_list) { in kfd_process_restore_queues()
1558 r = pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm, in kfd_process_restore_queues()
1559 &pdd->qpd); in kfd_process_restore_queues()
1582 WARN_ONCE(p->last_eviction_seqno != p->ef->seqno, in evict_process_worker()
1591 flush_delayed_work(&p->restore_work); in evict_process_worker()
1593 pr_debug("Started evicting pasid 0x%x\n", p->pasid); in evict_process_worker()
1596 dma_fence_signal(p->ef); in evict_process_worker()
1597 dma_fence_put(p->ef); in evict_process_worker()
1598 p->ef = NULL; in evict_process_worker()
1599 queue_delayed_work(kfd_restore_wq, &p->restore_work, in evict_process_worker()
1602 pr_debug("Finished evicting pasid 0x%x\n", p->pasid); in evict_process_worker()
1604 pr_err("Failed to evict queues of pasid 0x%x\n", p->pasid); in evict_process_worker()
1619 pr_debug("Started restoring pasid 0x%x\n", p->pasid); in restore_process_worker()
1627 * PROCESS_ACTIVE_TIME_MS - (time to execute the following two in restore_process_worker()
1631 p->last_restore_timestamp = get_jiffies_64(); in restore_process_worker()
1632 ret = amdgpu_amdkfd_gpuvm_restore_process_bos(p->kgd_process_info, in restore_process_worker()
1633 &p->ef); in restore_process_worker()
1636 p->pasid, PROCESS_BACK_OFF_TIME_MS); in restore_process_worker()
1637 ret = queue_delayed_work(kfd_restore_wq, &p->restore_work, in restore_process_worker()
1645 pr_debug("Finished restoring pasid 0x%x\n", p->pasid); in restore_process_worker()
1647 pr_err("Failed to restore queues of pasid 0x%x\n", p->pasid); in restore_process_worker()
1658 cancel_delayed_work_sync(&p->eviction_work); in kfd_suspend_all_processes()
1659 cancel_delayed_work_sync(&p->restore_work); in kfd_suspend_all_processes()
1662 pr_err("Failed to suspend process 0x%x\n", p->pasid); in kfd_suspend_all_processes()
1663 dma_fence_signal(p->ef); in kfd_suspend_all_processes()
1664 dma_fence_put(p->ef); in kfd_suspend_all_processes()
1665 p->ef = NULL; in kfd_suspend_all_processes()
1677 if (!queue_delayed_work(kfd_restore_wq, &p->restore_work, 0)) { in kfd_resume_all_processes()
1679 p->pasid); in kfd_resume_all_processes()
1680 ret = -EFAULT; in kfd_resume_all_processes()
1693 if ((vma->vm_end - vma->vm_start) != KFD_CWSR_TBA_TMA_SIZE) { in kfd_reserved_mem_mmap()
1695 return -EINVAL; in kfd_reserved_mem_mmap()
1700 return -EINVAL; in kfd_reserved_mem_mmap()
1701 qpd = &pdd->qpd; in kfd_reserved_mem_mmap()
1703 qpd->cwsr_kaddr = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, in kfd_reserved_mem_mmap()
1705 if (!qpd->cwsr_kaddr) { in kfd_reserved_mem_mmap()
1707 return -ENOMEM; in kfd_reserved_mem_mmap()
1710 vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND in kfd_reserved_mem_mmap()
1713 return remap_pfn_range(vma, vma->vm_start, in kfd_reserved_mem_mmap()
1714 PFN_DOWN(__pa(qpd->cwsr_kaddr)), in kfd_reserved_mem_mmap()
1715 KFD_CWSR_TBA_TMA_SIZE, vma->vm_page_prot); in kfd_reserved_mem_mmap()
1720 struct kfd_dev *dev = pdd->dev; in kfd_flush_tlb()
1722 if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) { in kfd_flush_tlb()
1726 if (pdd->qpd.vmid) in kfd_flush_tlb()
1727 amdgpu_amdkfd_flush_gpu_tlb_vmid(dev->kgd, in kfd_flush_tlb()
1728 pdd->qpd.vmid); in kfd_flush_tlb()
1730 amdgpu_amdkfd_flush_gpu_tlb_pasid(dev->kgd, in kfd_flush_tlb()
1731 pdd->process->pasid); in kfd_flush_tlb()
1747 p->lead_thread->tgid, p->pasid); in kfd_debugfs_mqds_by_process()
1749 mutex_lock(&p->mutex); in kfd_debugfs_mqds_by_process()
1750 r = pqm_debugfs_mqds(m, &p->pqm); in kfd_debugfs_mqds_by_process()
1751 mutex_unlock(&p->mutex); in kfd_debugfs_mqds_by_process()