Lines Matching +full:disable +full:- +full:eop
52 struct amdgpu_device *adev = ring->adev; in mes_v12_0_ring_set_wptr()
54 if (ring->use_doorbell) { in mes_v12_0_ring_set_wptr()
55 atomic64_set((atomic64_t *)ring->wptr_cpu_addr, in mes_v12_0_ring_set_wptr()
56 ring->wptr); in mes_v12_0_ring_set_wptr()
57 WDOORBELL64(ring->doorbell_index, ring->wptr); in mes_v12_0_ring_set_wptr()
65 return *ring->rptr_cpu_addr; in mes_v12_0_ring_get_rptr()
72 if (ring->use_doorbell) in mes_v12_0_ring_get_wptr()
73 wptr = atomic64_read((atomic64_t *)ring->wptr_cpu_addr); in mes_v12_0_ring_get_wptr()
128 if (x_pkt->header.opcode < ARRAY_SIZE(mes_v12_0_opcodes)) in mes_v12_0_get_op_string()
129 op_str = mes_v12_0_opcodes[x_pkt->header.opcode]; in mes_v12_0_get_op_string()
138 if ((x_pkt->header.opcode == MES_SCH_API_MISC) && in mes_v12_0_get_misc_op_string()
139 (x_pkt->opcode < ARRAY_SIZE(mes_v12_0_misc_opcodes))) in mes_v12_0_get_misc_op_string()
140 op_str = mes_v12_0_misc_opcodes[x_pkt->opcode]; in mes_v12_0_get_misc_op_string()
151 struct amdgpu_device *adev = mes->adev; in mes_v12_0_submit_pkt_and_poll_completion()
152 struct amdgpu_ring *ring = &mes->ring[pipe]; in mes_v12_0_submit_pkt_and_poll_completion()
153 spinlock_t *ring_lock = &mes->ring_lock[pipe]; in mes_v12_0_submit_pkt_and_poll_completion()
164 if (x_pkt->header.opcode >= MES_SCH_API_MAX) in mes_v12_0_submit_pkt_and_poll_completion()
165 return -EINVAL; in mes_v12_0_submit_pkt_and_poll_completion()
178 status_gpu_addr = adev->wb.gpu_addr + (status_offset * 4); in mes_v12_0_submit_pkt_and_poll_completion()
179 status_ptr = (u64 *)&adev->wb.wb[status_offset]; in mes_v12_0_submit_pkt_and_poll_completion()
187 seq = ++ring->fence_drv.sync_seq; in mes_v12_0_submit_pkt_and_poll_completion()
189 seq - ring->fence_drv.num_fences_mask, in mes_v12_0_submit_pkt_and_poll_completion()
195 api_status->api_completion_fence_addr = status_gpu_addr; in mes_v12_0_submit_pkt_and_poll_completion()
196 api_status->api_completion_fence_value = 1; in mes_v12_0_submit_pkt_and_poll_completion()
205 ring->fence_drv.gpu_addr; in mes_v12_0_submit_pkt_and_poll_completion()
218 dev_dbg(adev->dev, "MES(%d) msg=%s (%s) was emitted\n", in mes_v12_0_submit_pkt_and_poll_completion()
221 dev_dbg(adev->dev, "MES(%d) msg=%s was emitted\n", in mes_v12_0_submit_pkt_and_poll_completion()
224 dev_dbg(adev->dev, "MES(%d) msg=%d was emitted\n", in mes_v12_0_submit_pkt_and_poll_completion()
225 pipe, x_pkt->header.opcode); in mes_v12_0_submit_pkt_and_poll_completion()
231 dev_err(adev->dev, "MES(%d) failed to respond to msg=%s (%s)\n", in mes_v12_0_submit_pkt_and_poll_completion()
234 dev_err(adev->dev, "MES(%d) failed to respond to msg=%s\n", in mes_v12_0_submit_pkt_and_poll_completion()
237 dev_err(adev->dev, "MES(%d) failed to respond to msg=%d\n", in mes_v12_0_submit_pkt_and_poll_completion()
238 pipe, x_pkt->header.opcode); in mes_v12_0_submit_pkt_and_poll_completion()
243 r = -ETIMEDOUT; in mes_v12_0_submit_pkt_and_poll_completion()
251 dev_err(adev->dev, "MES ring buffer is full.\n"); in mes_v12_0_submit_pkt_and_poll_completion()
274 return -1; in convert_to_mes_queue_type()
280 struct amdgpu_device *adev = mes->adev; in mes_v12_0_add_hw_queue()
282 struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; in mes_v12_0_add_hw_queue()
283 uint32_t vm_cntx_cntl = hub->vm_cntx_cntl; in mes_v12_0_add_hw_queue()
291 mes_add_queue_pkt.process_id = input->process_id; in mes_v12_0_add_hw_queue()
292 mes_add_queue_pkt.page_table_base_addr = input->page_table_base_addr; in mes_v12_0_add_hw_queue()
293 mes_add_queue_pkt.process_va_start = input->process_va_start; in mes_v12_0_add_hw_queue()
294 mes_add_queue_pkt.process_va_end = input->process_va_end; in mes_v12_0_add_hw_queue()
295 mes_add_queue_pkt.process_quantum = input->process_quantum; in mes_v12_0_add_hw_queue()
296 mes_add_queue_pkt.process_context_addr = input->process_context_addr; in mes_v12_0_add_hw_queue()
297 mes_add_queue_pkt.gang_quantum = input->gang_quantum; in mes_v12_0_add_hw_queue()
298 mes_add_queue_pkt.gang_context_addr = input->gang_context_addr; in mes_v12_0_add_hw_queue()
300 input->inprocess_gang_priority; in mes_v12_0_add_hw_queue()
302 input->gang_global_priority_level; in mes_v12_0_add_hw_queue()
303 mes_add_queue_pkt.doorbell_offset = input->doorbell_offset; in mes_v12_0_add_hw_queue()
304 mes_add_queue_pkt.mqd_addr = input->mqd_addr; in mes_v12_0_add_hw_queue()
306 mes_add_queue_pkt.wptr_addr = input->wptr_mc_addr; in mes_v12_0_add_hw_queue()
309 convert_to_mes_queue_type(input->queue_type); in mes_v12_0_add_hw_queue()
310 mes_add_queue_pkt.paging = input->paging; in mes_v12_0_add_hw_queue()
312 mes_add_queue_pkt.gws_base = input->gws_base; in mes_v12_0_add_hw_queue()
313 mes_add_queue_pkt.gws_size = input->gws_size; in mes_v12_0_add_hw_queue()
314 mes_add_queue_pkt.trap_handler_addr = input->tba_addr; in mes_v12_0_add_hw_queue()
315 mes_add_queue_pkt.tma_addr = input->tma_addr; in mes_v12_0_add_hw_queue()
316 mes_add_queue_pkt.trap_en = input->trap_en; in mes_v12_0_add_hw_queue()
317 mes_add_queue_pkt.skip_process_ctx_clear = input->skip_process_ctx_clear; in mes_v12_0_add_hw_queue()
318 mes_add_queue_pkt.is_kfd_process = input->is_kfd_process; in mes_v12_0_add_hw_queue()
320 /* For KFD, gds_size is re-used for queue size (needed in MES for AQL queues) */ in mes_v12_0_add_hw_queue()
321 mes_add_queue_pkt.is_aql_queue = input->is_aql_queue; in mes_v12_0_add_hw_queue()
322 mes_add_queue_pkt.gds_size = input->queue_size; in mes_v12_0_add_hw_queue()
324 /* For KFD, gds_size is re-used for queue size (needed in MES for AQL queues) */ in mes_v12_0_add_hw_queue()
325 mes_add_queue_pkt.is_aql_queue = input->is_aql_queue; in mes_v12_0_add_hw_queue()
326 mes_add_queue_pkt.gds_size = input->queue_size; in mes_v12_0_add_hw_queue()
345 mes_remove_queue_pkt.doorbell_offset = input->doorbell_offset; in mes_v12_0_remove_hw_queue()
346 mes_remove_queue_pkt.gang_context_addr = input->gang_context_addr; in mes_v12_0_remove_hw_queue()
359 for (i = 0; i < adev->usec_timeout; i++) { in gfx_v12_0_request_gfx_index_mutex()
380 if (i >= adev->usec_timeout) in gfx_v12_0_request_gfx_index_mutex()
381 return -EINVAL; in gfx_v12_0_request_gfx_index_mutex()
390 struct amdgpu_device *adev = mes->adev; in mes_v12_0_reset_queue_mmio()
397 dev_info(adev->dev, "reset gfx queue (%d:%d:%d: vmid:%d)\n", in mes_v12_0_reset_queue_mmio()
400 mutex_lock(&adev->gfx.reset_sem_mutex); in mes_v12_0_reset_queue_mmio()
412 mutex_unlock(&adev->gfx.reset_sem_mutex); in mes_v12_0_reset_queue_mmio()
414 mutex_lock(&adev->srbm_mutex); in mes_v12_0_reset_queue_mmio()
417 for (i = 0; i < adev->usec_timeout; i++) { in mes_v12_0_reset_queue_mmio()
422 if (i >= adev->usec_timeout) { in mes_v12_0_reset_queue_mmio()
423 dev_err(adev->dev, "failed to wait on gfx hqd deactivate\n"); in mes_v12_0_reset_queue_mmio()
424 r = -ETIMEDOUT; in mes_v12_0_reset_queue_mmio()
428 mutex_unlock(&adev->srbm_mutex); in mes_v12_0_reset_queue_mmio()
430 dev_info(adev->dev, "reset compute queue (%d:%d:%d)\n", in mes_v12_0_reset_queue_mmio()
432 mutex_lock(&adev->srbm_mutex); in mes_v12_0_reset_queue_mmio()
438 for (i = 0; i < adev->usec_timeout; i++) { in mes_v12_0_reset_queue_mmio()
443 if (i >= adev->usec_timeout) { in mes_v12_0_reset_queue_mmio()
444 dev_err(adev->dev, "failed to wait on hqd deactivate\n"); in mes_v12_0_reset_queue_mmio()
445 r = -ETIMEDOUT; in mes_v12_0_reset_queue_mmio()
448 mutex_unlock(&adev->srbm_mutex); in mes_v12_0_reset_queue_mmio()
450 dev_info(adev->dev, "reset sdma queue (%d:%d:%d)\n", in mes_v12_0_reset_queue_mmio()
465 for (i = 0; i < adev->usec_timeout; i++) { in mes_v12_0_reset_queue_mmio()
470 if (i >= adev->usec_timeout) { in mes_v12_0_reset_queue_mmio()
471 dev_err(adev->dev, "failed to wait on sdma queue reset done\n"); in mes_v12_0_reset_queue_mmio()
472 r = -ETIMEDOUT; in mes_v12_0_reset_queue_mmio()
492 mes_reset_queue_pkt.doorbell_offset = input->doorbell_offset; in mes_v12_0_reset_hw_queue()
493 mes_reset_queue_pkt.gang_context_addr = input->gang_context_addr; in mes_v12_0_reset_hw_queue()
496 if (mes->adev->enable_uni_mes) in mes_v12_0_reset_hw_queue()
518 mes_add_queue_pkt.pipe_id = input->pipe_id; in mes_v12_0_map_legacy_queue()
519 mes_add_queue_pkt.queue_id = input->queue_id; in mes_v12_0_map_legacy_queue()
520 mes_add_queue_pkt.doorbell_offset = input->doorbell_offset; in mes_v12_0_map_legacy_queue()
521 mes_add_queue_pkt.mqd_addr = input->mqd_addr; in mes_v12_0_map_legacy_queue()
522 mes_add_queue_pkt.wptr_addr = input->wptr_addr; in mes_v12_0_map_legacy_queue()
524 convert_to_mes_queue_type(input->queue_type); in mes_v12_0_map_legacy_queue()
527 if (mes->adev->enable_uni_mes) in mes_v12_0_map_legacy_queue()
549 mes_remove_queue_pkt.doorbell_offset = input->doorbell_offset; in mes_v12_0_unmap_legacy_queue()
552 mes_remove_queue_pkt.pipe_id = input->pipe_id; in mes_v12_0_unmap_legacy_queue()
553 mes_remove_queue_pkt.queue_id = input->queue_id; in mes_v12_0_unmap_legacy_queue()
555 if (input->action == PREEMPT_QUEUES_NO_UNMAP) { in mes_v12_0_unmap_legacy_queue()
557 mes_remove_queue_pkt.tf_addr = input->trail_fence_addr; in mes_v12_0_unmap_legacy_queue()
559 lower_32_bits(input->trail_fence_data); in mes_v12_0_unmap_legacy_queue()
563 convert_to_mes_queue_type(input->queue_type); in mes_v12_0_unmap_legacy_queue()
566 if (mes->adev->enable_uni_mes) in mes_v12_0_unmap_legacy_queue()
609 if (mes->adev->enable_uni_mes) in mes_v12_0_misc_op()
620 switch (input->op) { in mes_v12_0_misc_op()
623 misc_pkt.read_reg.reg_offset = input->read_reg.reg_offset; in mes_v12_0_misc_op()
624 misc_pkt.read_reg.buffer_addr = input->read_reg.buffer_addr; in mes_v12_0_misc_op()
628 misc_pkt.write_reg.reg_offset = input->write_reg.reg_offset; in mes_v12_0_misc_op()
629 misc_pkt.write_reg.reg_value = input->write_reg.reg_value; in mes_v12_0_misc_op()
634 misc_pkt.wait_reg_mem.reference = input->wrm_reg.ref; in mes_v12_0_misc_op()
635 misc_pkt.wait_reg_mem.mask = input->wrm_reg.mask; in mes_v12_0_misc_op()
636 misc_pkt.wait_reg_mem.reg_offset1 = input->wrm_reg.reg0; in mes_v12_0_misc_op()
642 misc_pkt.wait_reg_mem.reference = input->wrm_reg.ref; in mes_v12_0_misc_op()
643 misc_pkt.wait_reg_mem.mask = input->wrm_reg.mask; in mes_v12_0_misc_op()
644 misc_pkt.wait_reg_mem.reg_offset1 = input->wrm_reg.reg0; in mes_v12_0_misc_op()
645 misc_pkt.wait_reg_mem.reg_offset2 = input->wrm_reg.reg1; in mes_v12_0_misc_op()
651 input->set_shader_debugger.process_context_addr; in mes_v12_0_misc_op()
653 input->set_shader_debugger.flags.u32all; in mes_v12_0_misc_op()
655 input->set_shader_debugger.spi_gdbg_per_vmid_cntl; in mes_v12_0_misc_op()
657 input->set_shader_debugger.tcp_watch_cntl, in mes_v12_0_misc_op()
659 misc_pkt.set_shader_debugger.trap_en = input->set_shader_debugger.trap_en; in mes_v12_0_misc_op()
666 input->change_config.option.limit_single_process; in mes_v12_0_misc_op()
670 DRM_ERROR("unsupported misc op (%d) \n", input->op); in mes_v12_0_misc_op()
671 return -EINVAL; in mes_v12_0_misc_op()
690 mes->resource_1_gpu_addr[pipe]; in mes_v12_0_set_hw_resources_1()
700 struct amdgpu_device *adev = mes->adev; in mes_v12_0_set_hw_resources()
710 mes_set_hw_res_pkt.vmid_mask_mmhub = mes->vmid_mask_mmhub; in mes_v12_0_set_hw_resources()
711 mes_set_hw_res_pkt.vmid_mask_gfxhub = mes->vmid_mask_gfxhub; in mes_v12_0_set_hw_resources()
712 mes_set_hw_res_pkt.gds_size = adev->gds.gds_size; in mes_v12_0_set_hw_resources()
717 mes->compute_hqd_mask[i]; in mes_v12_0_set_hw_resources()
721 mes->gfx_hqd_mask[i]; in mes_v12_0_set_hw_resources()
725 mes->sdma_hqd_mask[i]; in mes_v12_0_set_hw_resources()
729 mes->aggregated_doorbells[i]; in mes_v12_0_set_hw_resources()
733 mes->sch_ctx_gpu_addr[pipe]; in mes_v12_0_set_hw_resources()
735 mes->query_status_fence_gpu_addr[pipe]; in mes_v12_0_set_hw_resources()
738 mes_set_hw_res_pkt.gc_base[i] = adev->reg_offset[GC_HWIP][0][i]; in mes_v12_0_set_hw_resources()
740 adev->reg_offset[MMHUB_HWIP][0][i]; in mes_v12_0_set_hw_resources()
742 adev->reg_offset[OSSSYS_HWIP][0][i]; in mes_v12_0_set_hw_resources()
754 * handling mode - 0: disabled; 1: basic version; 2: basic+ version in mes_v12_0_set_hw_resources()
761 mes_set_hw_res_pkt.event_intr_history_gpu_mc_ptr = mes->event_log_gpu_addr + in mes_v12_0_set_hw_resources()
775 struct amdgpu_device *adev = mes->adev; in mes_v12_0_init_aggregated_doorbell()
782 data |= mes->aggregated_doorbells[AMDGPU_MES_PRIORITY_LEVEL_LOW] << in mes_v12_0_init_aggregated_doorbell()
791 data |= mes->aggregated_doorbells[AMDGPU_MES_PRIORITY_LEVEL_NORMAL] << in mes_v12_0_init_aggregated_doorbell()
800 data |= mes->aggregated_doorbells[AMDGPU_MES_PRIORITY_LEVEL_MEDIUM] << in mes_v12_0_init_aggregated_doorbell()
809 data |= mes->aggregated_doorbells[AMDGPU_MES_PRIORITY_LEVEL_HIGH] << in mes_v12_0_init_aggregated_doorbell()
818 data |= mes->aggregated_doorbells[AMDGPU_MES_PRIORITY_LEVEL_REALTIME] << in mes_v12_0_init_aggregated_doorbell()
831 struct amdgpu_device *adev = mes->adev; in mes_v12_0_enable_unmapped_doorbell_handling()
854 if (input->use_mmio) in mes_v12_0_reset_legacy_queue()
855 return mes_v12_0_reset_queue_mmio(mes, input->queue_type, in mes_v12_0_reset_legacy_queue()
856 input->me_id, input->pipe_id, in mes_v12_0_reset_legacy_queue()
857 input->queue_id, input->vmid); in mes_v12_0_reset_legacy_queue()
866 convert_to_mes_queue_type(input->queue_type); in mes_v12_0_reset_legacy_queue()
870 mes_reset_queue_pkt.pipe_id_lp = input->pipe_id; in mes_v12_0_reset_legacy_queue()
871 mes_reset_queue_pkt.queue_id_lp = input->queue_id; in mes_v12_0_reset_legacy_queue()
872 mes_reset_queue_pkt.mqd_mc_addr_lp = input->mqd_addr; in mes_v12_0_reset_legacy_queue()
873 mes_reset_queue_pkt.doorbell_offset_lp = input->doorbell_offset; in mes_v12_0_reset_legacy_queue()
874 mes_reset_queue_pkt.wptr_addr_lp = input->wptr_addr; in mes_v12_0_reset_legacy_queue()
875 mes_reset_queue_pkt.vmid_id_lp = input->vmid; in mes_v12_0_reset_legacy_queue()
878 mes_reset_queue_pkt.doorbell_offset = input->doorbell_offset; in mes_v12_0_reset_legacy_queue()
881 if (mes->adev->enable_uni_mes) in mes_v12_0_reset_legacy_queue()
912 adev->mes.fw[pipe]->data; in mes_v12_0_allocate_ucode_buffer()
914 fw_data = (const __le32 *)(adev->mes.fw[pipe]->data + in mes_v12_0_allocate_ucode_buffer()
915 le32_to_cpu(mes_hdr->mes_ucode_offset_bytes)); in mes_v12_0_allocate_ucode_buffer()
916 fw_size = le32_to_cpu(mes_hdr->mes_ucode_size_bytes); in mes_v12_0_allocate_ucode_buffer()
921 &adev->mes.ucode_fw_obj[pipe], in mes_v12_0_allocate_ucode_buffer()
922 &adev->mes.ucode_fw_gpu_addr[pipe], in mes_v12_0_allocate_ucode_buffer()
923 (void **)&adev->mes.ucode_fw_ptr[pipe]); in mes_v12_0_allocate_ucode_buffer()
925 dev_err(adev->dev, "(%d) failed to create mes fw bo\n", r); in mes_v12_0_allocate_ucode_buffer()
929 memcpy(adev->mes.ucode_fw_ptr[pipe], fw_data, fw_size); in mes_v12_0_allocate_ucode_buffer()
931 amdgpu_bo_kunmap(adev->mes.ucode_fw_obj[pipe]); in mes_v12_0_allocate_ucode_buffer()
932 amdgpu_bo_unreserve(adev->mes.ucode_fw_obj[pipe]); in mes_v12_0_allocate_ucode_buffer()
946 adev->mes.fw[pipe]->data; in mes_v12_0_allocate_ucode_data_buffer()
948 fw_data = (const __le32 *)(adev->mes.fw[pipe]->data + in mes_v12_0_allocate_ucode_data_buffer()
949 le32_to_cpu(mes_hdr->mes_ucode_data_offset_bytes)); in mes_v12_0_allocate_ucode_data_buffer()
950 fw_size = le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes); in mes_v12_0_allocate_ucode_data_buffer()
955 &adev->mes.data_fw_obj[pipe], in mes_v12_0_allocate_ucode_data_buffer()
956 &adev->mes.data_fw_gpu_addr[pipe], in mes_v12_0_allocate_ucode_data_buffer()
957 (void **)&adev->mes.data_fw_ptr[pipe]); in mes_v12_0_allocate_ucode_data_buffer()
959 dev_err(adev->dev, "(%d) failed to create mes data fw bo\n", r); in mes_v12_0_allocate_ucode_data_buffer()
963 memcpy(adev->mes.data_fw_ptr[pipe], fw_data, fw_size); in mes_v12_0_allocate_ucode_data_buffer()
965 amdgpu_bo_kunmap(adev->mes.data_fw_obj[pipe]); in mes_v12_0_allocate_ucode_data_buffer()
966 amdgpu_bo_unreserve(adev->mes.data_fw_obj[pipe]); in mes_v12_0_allocate_ucode_data_buffer()
974 amdgpu_bo_free_kernel(&adev->mes.data_fw_obj[pipe], in mes_v12_0_free_ucode_buffers()
975 &adev->mes.data_fw_gpu_addr[pipe], in mes_v12_0_free_ucode_buffers()
976 (void **)&adev->mes.data_fw_ptr[pipe]); in mes_v12_0_free_ucode_buffers()
978 amdgpu_bo_free_kernel(&adev->mes.ucode_fw_obj[pipe], in mes_v12_0_free_ucode_buffers()
979 &adev->mes.ucode_fw_gpu_addr[pipe], in mes_v12_0_free_ucode_buffers()
980 (void **)&adev->mes.ucode_fw_ptr[pipe]); in mes_v12_0_free_ucode_buffers()
989 mutex_lock(&adev->srbm_mutex); in mes_v12_0_enable()
995 if (adev->mes.event_log_size >= (pipe + 1) * log_size) { in mes_v12_0_enable()
997 lower_32_bits(adev->mes.event_log_gpu_addr + in mes_v12_0_enable()
1000 upper_32_bits(adev->mes.event_log_gpu_addr + in mes_v12_0_enable()
1002 dev_info(adev->dev, "Setup CP MES MSCRATCH address : 0x%x. 0x%x\n", in mes_v12_0_enable()
1015 ucode_addr = adev->mes.uc_start_addr[pipe] >> 2; in mes_v12_0_enable()
1025 dev_info(adev->dev, "program CP_MES_CNTL : 0x%x\n", data); in mes_v12_0_enable()
1031 mutex_unlock(&adev->srbm_mutex); in mes_v12_0_enable()
1035 else if (adev->enable_uni_mes) in mes_v12_0_enable()
1059 mutex_lock(&adev->srbm_mutex); in mes_v12_0_set_ucode_start_addr()
1065 ucode_addr = adev->mes.uc_start_addr[pipe] >> 2; in mes_v12_0_set_ucode_start_addr()
1073 mutex_unlock(&adev->srbm_mutex); in mes_v12_0_set_ucode_start_addr()
1085 if (!adev->mes.fw[pipe]) in mes_v12_0_load_microcode()
1086 return -EINVAL; in mes_v12_0_load_microcode()
1098 mutex_lock(&adev->srbm_mutex); in mes_v12_0_load_microcode()
1106 lower_32_bits(adev->mes.ucode_fw_gpu_addr[pipe])); in mes_v12_0_load_microcode()
1108 upper_32_bits(adev->mes.ucode_fw_gpu_addr[pipe])); in mes_v12_0_load_microcode()
1110 /* set ucode instruction cache boundary to 2M-1 */ in mes_v12_0_load_microcode()
1115 lower_32_bits(adev->mes.data_fw_gpu_addr[pipe])); in mes_v12_0_load_microcode()
1117 upper_32_bits(adev->mes.data_fw_gpu_addr[pipe])); in mes_v12_0_load_microcode()
1136 mutex_unlock(&adev->srbm_mutex); in mes_v12_0_load_microcode()
1145 u32 *eop; in mes_v12_0_allocate_eop_buf() local
1149 &adev->mes.eop_gpu_obj[pipe], in mes_v12_0_allocate_eop_buf()
1150 &adev->mes.eop_gpu_addr[pipe], in mes_v12_0_allocate_eop_buf()
1151 (void **)&eop); in mes_v12_0_allocate_eop_buf()
1153 dev_warn(adev->dev, "(%d) create EOP bo failed\n", r); in mes_v12_0_allocate_eop_buf()
1157 memset(eop, 0, in mes_v12_0_allocate_eop_buf()
1158 adev->mes.eop_gpu_obj[pipe]->tbo.base.size); in mes_v12_0_allocate_eop_buf()
1160 amdgpu_bo_kunmap(adev->mes.eop_gpu_obj[pipe]); in mes_v12_0_allocate_eop_buf()
1161 amdgpu_bo_unreserve(adev->mes.eop_gpu_obj[pipe]); in mes_v12_0_allocate_eop_buf()
1168 struct v12_compute_mqd *mqd = ring->mqd_ptr; in mes_v12_0_mqd_init()
1172 mqd->header = 0xC0310800; in mes_v12_0_mqd_init()
1173 mqd->compute_pipelinestat_enable = 0x00000001; in mes_v12_0_mqd_init()
1174 mqd->compute_static_thread_mgmt_se0 = 0xffffffff; in mes_v12_0_mqd_init()
1175 mqd->compute_static_thread_mgmt_se1 = 0xffffffff; in mes_v12_0_mqd_init()
1176 mqd->compute_static_thread_mgmt_se2 = 0xffffffff; in mes_v12_0_mqd_init()
1177 mqd->compute_static_thread_mgmt_se3 = 0xffffffff; in mes_v12_0_mqd_init()
1178 mqd->compute_misc_reserved = 0x00000007; in mes_v12_0_mqd_init()
1180 eop_base_addr = ring->eop_gpu_addr >> 8; in mes_v12_0_mqd_init()
1182 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */ in mes_v12_0_mqd_init()
1185 (order_base_2(MES_EOP_SIZE / 4) - 1)); in mes_v12_0_mqd_init()
1187 mqd->cp_hqd_eop_base_addr_lo = lower_32_bits(eop_base_addr); in mes_v12_0_mqd_init()
1188 mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr); in mes_v12_0_mqd_init()
1189 mqd->cp_hqd_eop_control = tmp; in mes_v12_0_mqd_init()
1191 /* disable the queue if it's active */ in mes_v12_0_mqd_init()
1192 ring->wptr = 0; in mes_v12_0_mqd_init()
1193 mqd->cp_hqd_pq_rptr = 0; in mes_v12_0_mqd_init()
1194 mqd->cp_hqd_pq_wptr_lo = 0; in mes_v12_0_mqd_init()
1195 mqd->cp_hqd_pq_wptr_hi = 0; in mes_v12_0_mqd_init()
1198 mqd->cp_mqd_base_addr_lo = ring->mqd_gpu_addr & 0xfffffffc; in mes_v12_0_mqd_init()
1199 mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr); in mes_v12_0_mqd_init()
1204 mqd->cp_mqd_control = tmp; in mes_v12_0_mqd_init()
1207 hqd_gpu_addr = ring->gpu_addr >> 8; in mes_v12_0_mqd_init()
1208 mqd->cp_hqd_pq_base_lo = lower_32_bits(hqd_gpu_addr); in mes_v12_0_mqd_init()
1209 mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr); in mes_v12_0_mqd_init()
1212 wb_gpu_addr = ring->rptr_gpu_addr; in mes_v12_0_mqd_init()
1213 mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc; in mes_v12_0_mqd_init()
1214 mqd->cp_hqd_pq_rptr_report_addr_hi = in mes_v12_0_mqd_init()
1218 wb_gpu_addr = ring->wptr_gpu_addr; in mes_v12_0_mqd_init()
1219 mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffff8; in mes_v12_0_mqd_init()
1220 mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff; in mes_v12_0_mqd_init()
1225 (order_base_2(ring->ring_size / 4) - 1)); in mes_v12_0_mqd_init()
1227 ((order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1) << 8)); in mes_v12_0_mqd_init()
1233 mqd->cp_hqd_pq_control = tmp; in mes_v12_0_mqd_init()
1237 if (ring->use_doorbell) { in mes_v12_0_mqd_init()
1239 DOORBELL_OFFSET, ring->doorbell_index); in mes_v12_0_mqd_init()
1250 mqd->cp_hqd_pq_doorbell_control = tmp; in mes_v12_0_mqd_init()
1252 mqd->cp_hqd_vmid = 0; in mes_v12_0_mqd_init()
1254 mqd->cp_hqd_active = 1; in mes_v12_0_mqd_init()
1259 mqd->cp_hqd_persistent_state = tmp; in mes_v12_0_mqd_init()
1261 mqd->cp_hqd_ib_control = regCP_HQD_IB_CONTROL_DEFAULT; in mes_v12_0_mqd_init()
1262 mqd->cp_hqd_iq_timer = regCP_HQD_IQ_TIMER_DEFAULT; in mes_v12_0_mqd_init()
1263 mqd->cp_hqd_quantum = regCP_HQD_QUANTUM_DEFAULT; in mes_v12_0_mqd_init()
1270 mqd->reserved_184 = BIT(15); in mes_v12_0_mqd_init()
1277 struct v12_compute_mqd *mqd = ring->mqd_ptr; in mes_v12_0_queue_init_register()
1278 struct amdgpu_device *adev = ring->adev; in mes_v12_0_queue_init_register()
1281 mutex_lock(&adev->srbm_mutex); in mes_v12_0_queue_init_register()
1282 soc21_grbm_select(adev, 3, ring->pipe, 0, 0); in mes_v12_0_queue_init_register()
1296 WREG32_SOC15(GC, 0, regCP_MQD_BASE_ADDR, mqd->cp_mqd_base_addr_lo); in mes_v12_0_queue_init_register()
1297 WREG32_SOC15(GC, 0, regCP_MQD_BASE_ADDR_HI, mqd->cp_mqd_base_addr_hi); in mes_v12_0_queue_init_register()
1305 WREG32_SOC15(GC, 0, regCP_HQD_PQ_BASE, mqd->cp_hqd_pq_base_lo); in mes_v12_0_queue_init_register()
1306 WREG32_SOC15(GC, 0, regCP_HQD_PQ_BASE_HI, mqd->cp_hqd_pq_base_hi); in mes_v12_0_queue_init_register()
1310 mqd->cp_hqd_pq_rptr_report_addr_lo); in mes_v12_0_queue_init_register()
1312 mqd->cp_hqd_pq_rptr_report_addr_hi); in mes_v12_0_queue_init_register()
1315 WREG32_SOC15(GC, 0, regCP_HQD_PQ_CONTROL, mqd->cp_hqd_pq_control); in mes_v12_0_queue_init_register()
1319 mqd->cp_hqd_pq_wptr_poll_addr_lo); in mes_v12_0_queue_init_register()
1321 mqd->cp_hqd_pq_wptr_poll_addr_hi); in mes_v12_0_queue_init_register()
1325 mqd->cp_hqd_pq_doorbell_control); in mes_v12_0_queue_init_register()
1328 WREG32_SOC15(GC, 0, regCP_HQD_PERSISTENT_STATE, mqd->cp_hqd_persistent_state); in mes_v12_0_queue_init_register()
1331 WREG32_SOC15(GC, 0, regCP_HQD_ACTIVE, mqd->cp_hqd_active); in mes_v12_0_queue_init_register()
1334 mutex_unlock(&adev->srbm_mutex); in mes_v12_0_queue_init_register()
1339 struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; in mes_v12_0_kiq_enable_queue()
1340 struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring; in mes_v12_0_kiq_enable_queue()
1343 if (!kiq->pmf || !kiq->pmf->kiq_map_queues) in mes_v12_0_kiq_enable_queue()
1344 return -EINVAL; in mes_v12_0_kiq_enable_queue()
1346 r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size); in mes_v12_0_kiq_enable_queue()
1352 kiq->pmf->kiq_map_queues(kiq_ring, &adev->mes.ring[0]); in mes_v12_0_kiq_enable_queue()
1357 kiq_ring->sched.ready = false; in mes_v12_0_kiq_enable_queue()
1368 if (!adev->enable_uni_mes && pipe == AMDGPU_MES_KIQ_PIPE) in mes_v12_0_queue_init()
1369 ring = &adev->gfx.kiq[0].ring; in mes_v12_0_queue_init()
1371 ring = &adev->mes.ring[pipe]; in mes_v12_0_queue_init()
1373 if ((adev->enable_uni_mes || pipe == AMDGPU_MES_SCHED_PIPE) && in mes_v12_0_queue_init()
1374 (amdgpu_in_reset(adev) || adev->in_suspend)) { in mes_v12_0_queue_init()
1375 *(ring->wptr_cpu_addr) = 0; in mes_v12_0_queue_init()
1376 *(ring->rptr_cpu_addr) = 0; in mes_v12_0_queue_init()
1385 if (adev->enable_uni_mes) in mes_v12_0_queue_init()
1395 if (((pipe == AMDGPU_MES_SCHED_PIPE) && !adev->mes.sched_version) || in mes_v12_0_queue_init()
1396 ((pipe == AMDGPU_MES_KIQ_PIPE) && !adev->mes.kiq_version)) { in mes_v12_0_queue_init()
1398 mutex_lock(&adev->srbm_mutex); in mes_v12_0_queue_init()
1402 adev->mes.sched_version = RREG32_SOC15(GC, 0, regCP_MES_GP3_LO); in mes_v12_0_queue_init()
1403 else if (pipe == AMDGPU_MES_KIQ_PIPE && adev->enable_mes_kiq) in mes_v12_0_queue_init()
1404 adev->mes.kiq_version = RREG32_SOC15(GC, 0, regCP_MES_GP3_LO); in mes_v12_0_queue_init()
1407 mutex_unlock(&adev->srbm_mutex); in mes_v12_0_queue_init()
1417 ring = &adev->mes.ring[pipe]; in mes_v12_0_ring_init()
1419 ring->funcs = &mes_v12_0_ring_funcs; in mes_v12_0_ring_init()
1421 ring->me = 3; in mes_v12_0_ring_init()
1422 ring->pipe = pipe; in mes_v12_0_ring_init()
1423 ring->queue = 0; in mes_v12_0_ring_init()
1425 ring->ring_obj = NULL; in mes_v12_0_ring_init()
1426 ring->use_doorbell = true; in mes_v12_0_ring_init()
1427 ring->eop_gpu_addr = adev->mes.eop_gpu_addr[pipe]; in mes_v12_0_ring_init()
1428 ring->no_scheduler = true; in mes_v12_0_ring_init()
1429 sprintf(ring->name, "mes_%d.%d.%d", ring->me, ring->pipe, ring->queue); in mes_v12_0_ring_init()
1432 ring->doorbell_index = adev->doorbell_index.mes_ring0 << 1; in mes_v12_0_ring_init()
1434 ring->doorbell_index = adev->doorbell_index.mes_ring1 << 1; in mes_v12_0_ring_init()
1444 spin_lock_init(&adev->gfx.kiq[0].ring_lock); in mes_v12_0_kiq_ring_init()
1446 ring = &adev->gfx.kiq[0].ring; in mes_v12_0_kiq_ring_init()
1448 ring->me = 3; in mes_v12_0_kiq_ring_init()
1449 ring->pipe = 1; in mes_v12_0_kiq_ring_init()
1450 ring->queue = 0; in mes_v12_0_kiq_ring_init()
1452 ring->adev = NULL; in mes_v12_0_kiq_ring_init()
1453 ring->ring_obj = NULL; in mes_v12_0_kiq_ring_init()
1454 ring->use_doorbell = true; in mes_v12_0_kiq_ring_init()
1455 ring->doorbell_index = adev->doorbell_index.mes_ring1 << 1; in mes_v12_0_kiq_ring_init()
1456 ring->eop_gpu_addr = adev->mes.eop_gpu_addr[AMDGPU_MES_KIQ_PIPE]; in mes_v12_0_kiq_ring_init()
1457 ring->no_scheduler = true; in mes_v12_0_kiq_ring_init()
1458 sprintf(ring->name, "mes_kiq_%d.%d.%d", in mes_v12_0_kiq_ring_init()
1459 ring->me, ring->pipe, ring->queue); in mes_v12_0_kiq_ring_init()
1471 if (!adev->enable_uni_mes && pipe == AMDGPU_MES_KIQ_PIPE) in mes_v12_0_mqd_sw_init()
1472 ring = &adev->gfx.kiq[0].ring; in mes_v12_0_mqd_sw_init()
1474 ring = &adev->mes.ring[pipe]; in mes_v12_0_mqd_sw_init()
1476 if (ring->mqd_obj) in mes_v12_0_mqd_sw_init()
1480 AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj, in mes_v12_0_mqd_sw_init()
1481 &ring->mqd_gpu_addr, &ring->mqd_ptr); in mes_v12_0_mqd_sw_init()
1483 dev_warn(adev->dev, "failed to create ring mqd bo (%d)", r); in mes_v12_0_mqd_sw_init()
1487 memset(ring->mqd_ptr, 0, mqd_size); in mes_v12_0_mqd_sw_init()
1490 adev->mes.mqd_backup[pipe] = kmalloc(mqd_size, GFP_KERNEL); in mes_v12_0_mqd_sw_init()
1491 if (!adev->mes.mqd_backup[pipe]) in mes_v12_0_mqd_sw_init()
1492 dev_warn(adev->dev, in mes_v12_0_mqd_sw_init()
1494 ring->name); in mes_v12_0_mqd_sw_init()
1501 struct amdgpu_device *adev = ip_block->adev; in mes_v12_0_sw_init()
1504 adev->mes.funcs = &mes_v12_0_funcs; in mes_v12_0_sw_init()
1505 adev->mes.kiq_hw_init = &mes_v12_0_kiq_hw_init; in mes_v12_0_sw_init()
1506 adev->mes.kiq_hw_fini = &mes_v12_0_kiq_hw_fini; in mes_v12_0_sw_init()
1507 adev->mes.enable_legacy_queue_map = true; in mes_v12_0_sw_init()
1509 adev->mes.event_log_size = adev->enable_uni_mes ? in mes_v12_0_sw_init()
1525 if (!adev->enable_uni_mes && pipe == AMDGPU_MES_KIQ_PIPE) { in mes_v12_0_sw_init()
1534 &adev->mes.resource_1[pipe], in mes_v12_0_sw_init()
1535 &adev->mes.resource_1_gpu_addr[pipe], in mes_v12_0_sw_init()
1536 &adev->mes.resource_1_addr[pipe]); in mes_v12_0_sw_init()
1538 dev_err(adev->dev, "(%d) failed to create mes resource_1 bo pipe[%d]\n", r, pipe); in mes_v12_0_sw_init()
1549 struct amdgpu_device *adev = ip_block->adev; in mes_v12_0_sw_fini()
1553 amdgpu_bo_free_kernel(&adev->mes.resource_1[pipe], in mes_v12_0_sw_fini()
1554 &adev->mes.resource_1_gpu_addr[pipe], in mes_v12_0_sw_fini()
1555 &adev->mes.resource_1_addr[pipe]); in mes_v12_0_sw_fini()
1557 kfree(adev->mes.mqd_backup[pipe]); in mes_v12_0_sw_fini()
1559 amdgpu_bo_free_kernel(&adev->mes.eop_gpu_obj[pipe], in mes_v12_0_sw_fini()
1560 &adev->mes.eop_gpu_addr[pipe], in mes_v12_0_sw_fini()
1562 amdgpu_ucode_release(&adev->mes.fw[pipe]); in mes_v12_0_sw_fini()
1564 if (adev->enable_uni_mes || pipe == AMDGPU_MES_SCHED_PIPE) { in mes_v12_0_sw_fini()
1565 amdgpu_bo_free_kernel(&adev->mes.ring[pipe].mqd_obj, in mes_v12_0_sw_fini()
1566 &adev->mes.ring[pipe].mqd_gpu_addr, in mes_v12_0_sw_fini()
1567 &adev->mes.ring[pipe].mqd_ptr); in mes_v12_0_sw_fini()
1568 amdgpu_ring_fini(&adev->mes.ring[pipe]); in mes_v12_0_sw_fini()
1572 if (!adev->enable_uni_mes) { in mes_v12_0_sw_fini()
1573 amdgpu_bo_free_kernel(&adev->gfx.kiq[0].ring.mqd_obj, in mes_v12_0_sw_fini()
1574 &adev->gfx.kiq[0].ring.mqd_gpu_addr, in mes_v12_0_sw_fini()
1575 &adev->gfx.kiq[0].ring.mqd_ptr); in mes_v12_0_sw_fini()
1576 amdgpu_ring_fini(&adev->gfx.kiq[0].ring); in mes_v12_0_sw_fini()
1579 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) { in mes_v12_0_sw_fini()
1593 mutex_lock(&adev->srbm_mutex); in mes_v12_0_kiq_dequeue_sched()
1596 /* disable the queue if it's active */ in mes_v12_0_kiq_dequeue_sched()
1599 for (i = 0; i < adev->usec_timeout; i++) { in mes_v12_0_kiq_dequeue_sched()
1619 mutex_unlock(&adev->srbm_mutex); in mes_v12_0_kiq_dequeue_sched()
1621 adev->mes.ring[0].sched.ready = false; in mes_v12_0_kiq_dequeue_sched()
1627 struct amdgpu_device *adev = ring->adev; in mes_v12_0_kiq_setting()
1632 tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue); in mes_v12_0_kiq_setting()
1641 if (adev->enable_uni_mes) in mes_v12_0_kiq_hw_init()
1642 mes_v12_0_kiq_setting(&adev->mes.ring[AMDGPU_MES_KIQ_PIPE]); in mes_v12_0_kiq_hw_init()
1644 mes_v12_0_kiq_setting(&adev->gfx.kiq[0].ring); in mes_v12_0_kiq_hw_init()
1646 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) { in mes_v12_0_kiq_hw_init()
1662 } else if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) in mes_v12_0_kiq_hw_init()
1669 dev_err(adev->dev, "Failed to get MES handle\n"); in mes_v12_0_kiq_hw_init()
1670 return -EINVAL; in mes_v12_0_kiq_hw_init()
1677 if (adev->enable_uni_mes) { in mes_v12_0_kiq_hw_init()
1678 r = mes_v12_0_set_hw_resources(&adev->mes, AMDGPU_MES_KIQ_PIPE); in mes_v12_0_kiq_hw_init()
1682 mes_v12_0_set_hw_resources_1(&adev->mes, AMDGPU_MES_KIQ_PIPE); in mes_v12_0_kiq_hw_init()
1685 if (adev->mes.enable_legacy_queue_map) { in mes_v12_0_kiq_hw_init()
1700 if (adev->mes.ring[0].sched.ready) { in mes_v12_0_kiq_hw_fini()
1701 if (adev->enable_uni_mes) in mes_v12_0_kiq_hw_fini()
1703 &adev->mes.ring[AMDGPU_MES_SCHED_PIPE], in mes_v12_0_kiq_hw_fini()
1708 adev->mes.ring[0].sched.ready = false; in mes_v12_0_kiq_hw_fini()
1719 struct amdgpu_device *adev = ip_block->adev; in mes_v12_0_hw_init()
1721 if (adev->mes.ring[0].sched.ready) in mes_v12_0_hw_init()
1724 if (!adev->enable_mes_kiq) { in mes_v12_0_hw_init()
1725 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) { in mes_v12_0_hw_init()
1735 } else if (adev->firmware.load_type == in mes_v12_0_hw_init()
1745 mes_v12_0_enable_unmapped_doorbell_handling(&adev->mes, true); in mes_v12_0_hw_init()
1751 r = mes_v12_0_set_hw_resources(&adev->mes, AMDGPU_MES_SCHED_PIPE); in mes_v12_0_hw_init()
1755 mes_v12_0_set_hw_resources_1(&adev->mes, AMDGPU_MES_SCHED_PIPE); in mes_v12_0_hw_init()
1757 mes_v12_0_init_aggregated_doorbell(&adev->mes); in mes_v12_0_hw_init()
1759 r = mes_v12_0_query_sched_status(&adev->mes, AMDGPU_MES_SCHED_PIPE); in mes_v12_0_hw_init()
1771 * Disable KIQ ring usage from the driver once MES is enabled. in mes_v12_0_hw_init()
1775 adev->gfx.kiq[0].ring.sched.ready = false; in mes_v12_0_hw_init()
1776 adev->mes.ring[0].sched.ready = true; in mes_v12_0_hw_init()
1802 struct amdgpu_device *adev = ip_block->adev; in mes_v12_0_early_init()
1816 struct amdgpu_device *adev = ip_block->adev; in mes_v12_0_late_init()
1819 if (!amdgpu_in_reset(adev) && !adev->in_s0ix && !adev->in_suspend) in mes_v12_0_late_init()