/linux/drivers/gpu/drm/amd/amdkfd/ |
H A D | kfd_device.c | 42 * kfd_locked is used to lock the kfd driver during suspend or reset 43 * once locked, kfd driver will stop any further GPU execution. 61 static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size, 63 static void kfd_gtt_sa_fini(struct kfd_dev *kfd); 65 static int kfd_resume(struct kfd_node *kfd); 67 static void kfd_device_info_set_sdma_info(struct kfd_dev *kfd) in kfd_device_info_set_sdma_info() argument 69 uint32_t sdma_version = amdgpu_ip_version(kfd->adev, SDMA0_HWIP, 0); in kfd_device_info_set_sdma_info() 81 kfd->device_info.num_sdma_queues_per_engine = 2; in kfd_device_info_set_sdma_info() 107 kfd->device_info.num_sdma_queues_per_engine = 8; in kfd_device_info_set_sdma_info() 113 kfd in kfd_device_info_set_sdma_info() 141 kfd_device_info_set_event_interrupt_class(struct kfd_dev * kfd) kfd_device_info_set_event_interrupt_class() argument 200 kfd_device_info_init(struct kfd_dev * kfd,bool vf,uint32_t gfx_target_version) kfd_device_info_init() argument 267 struct kfd_dev *kfd = NULL; kgd2kfd_probe() local 502 kfd_cwsr_init(struct kfd_dev * kfd) kfd_cwsr_init() argument 565 struct kfd_dev *kfd = node->kfd; kfd_gws_init() local 653 kfd_cleanup_nodes(struct kfd_dev * kfd,unsigned int num_nodes) kfd_cleanup_nodes() argument 713 kgd2kfd_device_init(struct kfd_dev * kfd,const struct kgd2kfd_shared_resources * gpu_resources) kgd2kfd_device_init() argument 945 kgd2kfd_device_exit(struct kfd_dev * kfd) kgd2kfd_device_exit() argument 960 kgd2kfd_pre_reset(struct kfd_dev * kfd,struct amdgpu_reset_context * reset_context) kgd2kfd_pre_reset() argument 988 kgd2kfd_post_reset(struct kfd_dev * kfd) kgd2kfd_post_reset() argument 1016 kfd_is_locked(struct kfd_dev * kfd) kfd_is_locked() argument 1042 kgd2kfd_suspend(struct kfd_dev * kfd,bool suspend_proc) kgd2kfd_suspend() argument 1059 kgd2kfd_resume(struct kfd_dev * kfd,bool resume_proc) kgd2kfd_resume() argument 1078 kgd2kfd_suspend_process(struct kfd_dev * kfd) kgd2kfd_suspend_process() argument 1090 kgd2kfd_resume_process(struct kfd_dev * kfd) kgd2kfd_resume_process() argument 1120 kgd2kfd_interrupt(struct kfd_dev * kfd,const void * ih_ring_entry) kgd2kfd_interrupt() argument 1242 kfd_gtt_sa_init(struct kfd_dev * kfd,unsigned int buf_size,unsigned int chunk_size) kfd_gtt_sa_init() argument 1268 kfd_gtt_sa_fini(struct kfd_dev * kfd) kfd_gtt_sa_fini() argument 1292 struct kfd_dev *kfd = node->kfd; kfd_gtt_sa_allocate() local 1394 struct kfd_dev *kfd = node->kfd; kfd_gtt_sa_free() local 1415 kgd2kfd_set_sram_ecc_flag(struct kfd_dev * kfd) kgd2kfd_set_sram_ecc_flag() argument 1448 kgd2kfd_smi_event_throttle(struct kfd_dev * kfd,uint64_t throttle_bitmask) kgd2kfd_smi_event_throttle() argument 1481 kgd2kfd_check_and_lock_kfd(struct kfd_dev * kfd) kgd2kfd_check_and_lock_kfd() argument 1524 kgd2kfd_unlock_kfd(struct kfd_dev * kfd) kgd2kfd_unlock_kfd() argument 1531 kgd2kfd_start_sched(struct kfd_dev * kfd,uint32_t node_id) kgd2kfd_start_sched() argument 1553 kgd2kfd_stop_sched(struct kfd_dev * kfd,uint32_t node_id) kgd2kfd_stop_sched() argument 1570 kgd2kfd_compute_active(struct kfd_dev * kfd,uint32_t node_id) kgd2kfd_compute_active() argument [all...] |
H A D | kfd_doorbell.c | 44 * the /dev/kfd with the particular device encoded in the mmap offset. 45 * There will be other uses for mmap of /dev/kfd, so only a range of 50 size_t kfd_doorbell_process_slice(struct kfd_dev *kfd) in kfd_doorbell_process_slice() argument 52 if (!kfd->shared_resources.enable_mes) in kfd_doorbell_process_slice() 53 return roundup(kfd->device_info.doorbell_size * in kfd_doorbell_process_slice() 58 (struct amdgpu_device *)kfd->adev); in kfd_doorbell_process_slice() 62 int kfd_doorbell_init(struct kfd_dev *kfd) in kfd_doorbell_init() argument 75 kfd->doorbell_bitmap = bitmap_zalloc(size / sizeof(u32), GFP_KERNEL); in kfd_doorbell_init() 76 if (!kfd->doorbell_bitmap) { in kfd_doorbell_init() 82 r = amdgpu_bo_create_kernel(kfd in kfd_doorbell_init() 99 kfd_doorbell_fini(struct kfd_dev * kfd) kfd_doorbell_fini() argument 150 kfd_get_kernel_doorbell(struct kfd_dev * kfd,unsigned int * doorbell_off) kfd_get_kernel_doorbell() argument 178 kfd_release_kernel_doorbell(struct kfd_dev * kfd,u32 __iomem * db_addr) kfd_release_kernel_doorbell() argument 253 kfd_alloc_process_doorbells(struct kfd_dev * kfd,struct kfd_process_device * pdd) kfd_alloc_process_doorbells() argument 293 kfd_free_process_doorbells(struct kfd_dev * kfd,struct kfd_process_device * pdd) kfd_free_process_doorbells() argument [all...] |
H A D | kfd_interrupt.c | 58 KFD_IH_NUM_ENTRIES * node->kfd->device_info.ih_ring_entry_size, in kfd_interrupt_init() 65 if (!node->kfd->ih_wq) { in kfd_interrupt_init() 66 node->kfd->ih_wq = alloc_workqueue("KFD IH", WQ_HIGHPRI | WQ_UNBOUND, in kfd_interrupt_init() 67 node->kfd->num_nodes); in kfd_interrupt_init() 68 if (unlikely(!node->kfd->ih_wq)) { in kfd_interrupt_init() 116 kfifo_in(&node->ih_fifo, ih_ring_entry, node->kfd->device_info.ih_ring_entry_size); in enqueue_ih_ring_entry() 131 node->kfd->device_info.ih_ring_entry_size); in dequeue_ih_ring_entry() 132 WARN_ON(count != node->kfd->device_info.ih_ring_entry_size); in dequeue_ih_ring_entry() 133 return count == node->kfd->device_info.ih_ring_entry_size; in dequeue_ih_ring_entry() 143 dev->kfd in interrupt_wq() [all...] |
H A D | kfd_device_queue_manager_v9.c | 70 if (dqm->dev->kfd->noretry) in set_cache_memory_policy_v9() 73 if (KFD_GC_VERSION(dqm->dev->kfd) == IP_VERSION(9, 4, 3) || in set_cache_memory_policy_v9() 74 KFD_GC_VERSION(dqm->dev->kfd) == IP_VERSION(9, 4, 4)) in set_cache_memory_policy_v9() 77 if (KFD_GC_VERSION(dqm->dev->kfd) == IP_VERSION(9, 5, 0)) { in set_cache_memory_policy_v9() 103 if (dqm->dev->kfd->noretry) in update_qpd_v9() 106 if (KFD_GC_VERSION(dqm->dev->kfd) == IP_VERSION(9, 4, 3) || in update_qpd_v9() 107 KFD_GC_VERSION(dqm->dev->kfd) == IP_VERSION(9, 4, 4)) in update_qpd_v9()
|
H A D | kfd_packet_manager_v9.c | 37 struct kfd_node *kfd = pm->dqm->dev; in pm_map_process_v9() local 40 struct amdgpu_device *adev = kfd->adev; in pm_map_process_v9() 46 if (adev->enforce_isolation[kfd->node_id] == AMDGPU_ENFORCE_ISOLATION_ENABLE) in pm_map_process_v9() 58 if (kfd->dqm->trap_debug_vmid && pdd->process->debug_trap_enabled && in pm_map_process_v9() 60 packet->bitfields2.debug_vmid = kfd->dqm->trap_debug_vmid; in pm_map_process_v9() 94 struct kfd_dev *kfd = pm->dqm->dev->kfd; in pm_map_process_aldebaran() local 99 struct amdgpu_device *adev = kfd->adev; in pm_map_process_aldebaran() 121 for (i = 0; i < kfd->device_info.num_of_watch_points; i++) in pm_map_process_aldebaran() 154 struct kfd_node *kfd in pm_runlist_v9() local [all...] |
H A D | kfd_device_queue_manager.c | 86 int pipe_offset = (mec * dqm->dev->kfd->shared_resources.num_pipe_per_mec in is_pipe_enabled() 87 + pipe) * dqm->dev->kfd->shared_resources.num_queue_per_pipe; in is_pipe_enabled() 90 for (i = 0; i < dqm->dev->kfd->shared_resources.num_queue_per_pipe; ++i) in is_pipe_enabled() 92 dqm->dev->kfd->shared_resources.cp_queue_bitmap)) in is_pipe_enabled() 99 return bitmap_weight(dqm->dev->kfd->shared_resources.cp_queue_bitmap, in get_cp_queues_num() 105 return dqm->dev->kfd->shared_resources.num_queue_per_pipe; in get_queues_per_pipe() 110 return dqm->dev->kfd->shared_resources.num_pipe_per_mec; in get_pipes_per_mec() 122 dqm->dev->kfd->device_info.num_sdma_queues_per_engine; in get_num_sdma_queues() 128 dqm->dev->kfd->device_info.num_sdma_queues_per_engine; in get_num_xgmi_sdma_queues() 141 dqm->dev->kfd in init_sdma_bitmaps() [all...] |
H A D | kfd_debug.h | 115 && dev->kfd->mec2_fw_version < 0x81b6) || in kfd_dbg_has_gws_support() 118 && dev->kfd->mec2_fw_version < 0x1b6) || in kfd_dbg_has_gws_support() 120 && dev->kfd->mec2_fw_version < 0x1b6) || in kfd_dbg_has_gws_support() 122 && dev->kfd->mec2_fw_version < 0x30) || in kfd_dbg_has_gws_support()
|
H A D | kfd_debug.c | 438 if (!pdd->dev->kfd->shared_resources.enable_mes) { in kfd_dbg_trap_clear_dev_address_watch() 450 if (!pdd->dev->kfd->shared_resources.enable_mes) in kfd_dbg_trap_clear_dev_address_watch() 472 if (!pdd->dev->kfd->shared_resources.enable_mes) { in kfd_dbg_trap_set_dev_address_watch() 492 if (!pdd->dev->kfd->shared_resources.enable_mes) in kfd_dbg_trap_set_dev_address_watch() 544 if (!pdd->dev->kfd->shared_resources.enable_mes) in kfd_dbg_trap_set_flags() 567 if (!pdd->dev->kfd->shared_resources.enable_mes) in kfd_dbg_trap_set_flags() 629 if (!pdd->dev->kfd->shared_resources.enable_mes) in kfd_dbg_trap_deactivate() 746 if (!pdd->dev->kfd->shared_resources.enable_mes) in kfd_dbg_trap_activate() 880 if (!pdd->dev->kfd->shared_resources.enable_mes) in kfd_dbg_trap_set_wave_launch_override() 912 if (!pdd->dev->kfd in kfd_dbg_trap_set_wave_launch_mode() [all...] |
H A D | kfd_mqd_manager_v11.c | 112 if (node->kfd->shared_resources.enable_mes) in allocate_mqd() 135 if (mm->dev->kfd->shared_resources.enable_mes) in init_mqd() 169 * DISPATCH_PTR. This is required for the kfd debugger in init_mqd() 185 if (mm->dev->kfd->cwsr_enabled) { in init_mqd() 273 if (mm->dev->kfd->cwsr_enabled) in update_mqd() 406 if (mm->dev->kfd->shared_resources.enable_mes) in init_mqd_sdma() 557 if (dev->kfd->shared_resources.enable_mes) { in mqd_manager_init_v11()
|
H A D | kfd_topology.c | 497 __ilog2_u32(dev->gpu->kfd->device_info.num_of_watch_points); in node_show() 523 dev->gpu->kfd->mec_fw_version); in node_show() 531 dev->gpu->kfd->sdma_fw_version); in node_show() 1136 /* kfd_assign_gpu - Attach @gpu to the correct kfd topology device. If 1228 if (!dev->gpu->kfd->pci_atomic_requested || in kfd_set_iolink_no_atomics() 1282 adev->aid_mask && num_xgmi_nodes && gpu->kfd->num_nodes == 1 && in kfd_set_recommended_sdma_engines() 1589 (dev->gpu->kfd->hive_id && in kfd_dev_create_p2p_links() 1590 dev->gpu->kfd->hive_id == new_dev->gpu->kfd->hive_id)) in kfd_dev_create_p2p_links() 1944 firmware_supported = dev->gpu->kfd in kfd_topology_set_dbg_firmware_support() [all...] |
H A D | kfd_flat_memory.c | 331 pdd->dev->kfd->shared_resources.gpuvm_size - 1; in kfd_init_apertures_vi() 350 pdd->dev->kfd->shared_resources.gpuvm_size - 1; in kfd_init_apertures_v9()
|
H A D | kfd_process.c | 851 * take kfd processes mutex before starting of process creation in kfd_create_process() 863 /* A prior open of /dev/kfd could have already created the process. in kfd_create_process() 1075 kfd_free_process_doorbells(pdd->dev->kfd, pdd); in kfd_process_destroy_pdds() 1077 if (pdd->dev->kfd->shared_resources.enable_mes && in kfd_process_destroy_pdds() 1209 /* This increments p->ref counter if kfd process p exists */ in kfd_process_alloc_notifier() 1347 if (!dev->kfd->cwsr_enabled || qpd->cwsr_kaddr || qpd->cwsr_base) in kfd_process_init_cwsr_apu() 1365 memcpy(qpd->cwsr_kaddr, dev->kfd->cwsr_isa, dev->kfd->cwsr_isa_size); in kfd_process_init_cwsr_apu() 1390 if (!dev->kfd->cwsr_enabled || qpd->cwsr_kaddr || !qpd->cwsr_base) in kfd_process_device_init_cwsr_dgpu() 1403 memcpy(qpd->cwsr_kaddr, dev->kfd in kfd_process_device_init_cwsr_dgpu() [all...] |
H A D | kfd_mqd_manager_v10.c | 76 static struct kfd_mem_obj *allocate_mqd(struct kfd_node *kfd, in allocate_mqd() argument 81 if (kfd_gtt_sa_allocate(kfd, sizeof(struct v10_compute_mqd), in allocate_mqd() 122 * DISPATCH_PTR. This is required for the kfd debugger in init_mqd() 131 if (mm->dev->kfd->cwsr_enabled) { in init_mqd() 220 if (mm->dev->kfd->cwsr_enabled) in update_mqd()
|
H A D | kfd_crat.c | 1644 kfd_fill_gpu_cache_info_from_gfx_config_v2(kdev->kfd, in kfd_get_gpu_cache_info() 1716 kfd_fill_gpu_cache_info_from_gfx_config(kdev->kfd, in kfd_get_gpu_cache_info() 2187 bool use_ta_info = kdev->kfd->num_nodes == 1; in kfd_fill_gpu_xgmi_link_to_gpu() 2213 bool is_single_hop = kdev->kfd == peer_kdev->kfd; in kfd_fill_gpu_xgmi_link_to_gpu() 2282 (cu_info->number / kdev->kfd->num_nodes); in kfd_create_vcrat_image_gpu() 2357 if (kdev->kfd->hive_id) { in kfd_create_vcrat_image_gpu() 2362 if (peer_dev->gpu->kfd->hive_id != kdev->kfd->hive_id) in kfd_create_vcrat_image_gpu()
|
H A D | kfd_mqd_manager.c | 75 dev->kfd->device_info.num_sdma_queues_per_engine + in allocate_sdma_mqd() 112 cu_active_per_node = cu_info->number / mm->dev->kfd->num_nodes; in mqd_symmetrically_map_cu_mask()
|
H A D | kfd_packet_manager_vi.c | 81 struct kfd_node *kfd = pm->dqm->dev; in pm_runlist_vi() local 89 * of processes in the runlist and kfd module parameter in pm_runlist_vi() 96 kfd->max_proc_per_quantum); in pm_runlist_vi()
|
H A D | kfd_mqd_manager_v9.c | 44 if (mm->dev->kfd->cwsr_enabled && in mqd_stride_v9() 134 if (node->kfd->cwsr_enabled && (q->type == KFD_QUEUE_TYPE_COMPUTE)) { in allocate_mqd() 199 * DISPATCH_PTR. This is required for the kfd debugger in init_mqd() 212 if (mm->dev->kfd->cwsr_enabled && q->ctx_save_restore_area_address) { in init_mqd() 304 if (mm->dev->kfd->cwsr_enabled && q->ctx_save_restore_area_address) in update_mqd() 701 if (mm->dev->kfd->cwsr_enabled && in init_mqd_v9_4_3()
|
H A D | kfd_mqd_manager.h | 55 * @dev: The kfd device structure coupled with this module. 65 * ASIC. Currently the kfd driver supports only Kaveri so there are instances 71 struct kfd_mem_obj* (*allocate_mqd)(struct kfd_node *kfd,
|
H A D | kfd_int_process_v9.c | 308 dev->kfd->device_info.ih_ring_entry_size); in event_interrupt_isr_v9() 338 if (context_id == 0 && context_id_expected(dev->kfd)) in event_interrupt_isr_v9()
|
H A D | kfd_mqd_manager_cik.c | 76 static struct kfd_mem_obj *allocate_mqd(struct kfd_node *kfd, in allocate_mqd() argument 81 if (kfd_gtt_sa_allocate(kfd, sizeof(struct cik_mqd), in allocate_mqd()
|
H A D | kfd_mqd_manager_v12.c | 136 * DISPATCH_PTR. This is required for the kfd debugger in init_mqd() 148 if (mm->dev->kfd->cwsr_enabled) { in init_mqd() 236 if (mm->dev->kfd->cwsr_enabled) in update_mqd()
|
/linux/samples/bpf/ |
H A D | task_fd_query_user.c | 234 int err = -1, res, kfd, efd; in test_debug_fs_uprobe() local 240 kfd = open(buf, O_WRONLY | O_TRUNC, 0); in test_debug_fs_uprobe() 241 CHECK_PERROR_RET(kfd < 0); in test_debug_fs_uprobe() 250 CHECK_PERROR_RET(write(kfd, buf, strlen(buf)) < 0); in test_debug_fs_uprobe() 252 close(kfd); in test_debug_fs_uprobe() 253 kfd = -1; in test_debug_fs_uprobe() 270 kfd = sys_perf_event_open(&attr, -1, 0, -1, PERF_FLAG_FD_CLOEXEC); in test_debug_fs_uprobe() 271 link = bpf_program__attach_perf_event(progs[0], kfd); in test_debug_fs_uprobe() 275 close(kfd); in test_debug_fs_uprobe() 280 err = bpf_task_fd_query(getpid(), kfd, in test_debug_fs_uprobe() [all...] |
/linux/tools/perf/util/ |
H A D | probe-file.c | 152 int probe_file__open_both(int *kfd, int *ufd, int flag) in probe_file__open_both() argument 154 if (!kfd || !ufd) in probe_file__open_both() 157 *kfd = open_kprobe_events(flag & PF_FL_RW); in probe_file__open_both() 159 if (*kfd < 0 && *ufd < 0) { in probe_file__open_both() 160 print_both_open_warning(*kfd, *ufd, flag & PF_FL_RW); in probe_file__open_both() 161 return *kfd; in probe_file__open_both()
|
H A D | probe-file.h | 42 int probe_file__open_both(int *kfd, int *ufd, int flag);
|
/linux/drivers/gpu/drm/amd/amdgpu/ |
H A D | amdgpu_amdkfd_gpuvm.c | 223 (adev && xcp_id >= 0 && adev->kfd.vram_used[xcp_id] + vram_needed > in amdgpu_amdkfd_reserve_mem_limit() 235 adev->kfd.vram_used[xcp_id] += vram_needed; in amdgpu_amdkfd_reserve_mem_limit() 236 adev->kfd.vram_used_aligned[xcp_id] += in amdgpu_amdkfd_reserve_mem_limit() 264 adev->kfd.vram_used[xcp_id] -= size; in amdgpu_amdkfd_unreserve_mem_limit() 266 adev->kfd.vram_used_aligned[xcp_id] -= size; in amdgpu_amdkfd_unreserve_mem_limit() 270 adev->kfd.vram_used_aligned[xcp_id] -= in amdgpu_amdkfd_unreserve_mem_limit() 282 WARN_ONCE(adev && xcp_id >= 0 && adev->kfd.vram_used[xcp_id] < 0, in amdgpu_amdkfd_unreserve_mem_limit() 808 dmabuf = drm_gem_prime_handle_to_dmabuf(&bo_adev->ddev, bo_adev->kfd.client.file, in kfd_mem_export_dmabuf() 1630 - adev->kfd.vram_used_aligned[xcp_id] in amdgpu_amdkfd_get_available_memory() 1774 ret = drm_gem_handle_create(adev->kfd in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu() [all...] |