Lines Matching defs:gmu
29 static int a6xx_hfi_queue_read(struct a6xx_gmu *gmu,
60 if (!gmu->legacy)
67 static int a6xx_hfi_queue_write(struct a6xx_gmu *gmu,
91 if (!gmu->legacy) {
99 gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 0x01);
103 static int a6xx_hfi_wait_for_msg_interrupt(struct a6xx_gmu *gmu, u32 id, u32 seqnum)
109 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO, val,
113 DRM_DEV_ERROR(gmu->dev,
120 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR,
126 static int a6xx_hfi_wait_for_ack(struct a6xx_gmu *gmu, u32 id, u32 seqnum,
129 struct a6xx_hfi_queue *queue = &gmu->queues[HFI_RESPONSE_QUEUE];
132 ret = a6xx_hfi_wait_for_msg_interrupt(gmu, id, seqnum);
140 ret = a6xx_hfi_queue_read(gmu, queue, (u32 *) &resp,
148 ret = a6xx_hfi_wait_for_msg_interrupt(gmu, id, seqnum);
150 DRM_DEV_ERROR(gmu->dev,
161 DRM_DEV_ERROR(gmu->dev, "GMU firmware error %d\n",
167 DRM_DEV_ERROR(gmu->dev,
174 DRM_DEV_ERROR(gmu->dev,
189 static int a6xx_hfi_send_msg(struct a6xx_gmu *gmu, int id,
192 struct a6xx_hfi_queue *queue = &gmu->queues[HFI_COMMAND_QUEUE];
202 ret = a6xx_hfi_queue_write(gmu, queue, data, dwords);
204 DRM_DEV_ERROR(gmu->dev, "Unable to send message %s id %d\n",
209 return a6xx_hfi_wait_for_ack(gmu, id, seqnum, payload, payload_size);
212 static int a6xx_hfi_send_gmu_init(struct a6xx_gmu *gmu, int boot_state)
216 msg.dbg_buffer_addr = (u32) gmu->debug.iova;
217 msg.dbg_buffer_size = (u32) gmu->debug.size;
220 return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_INIT, &msg, sizeof(msg),
224 static int a6xx_hfi_get_fw_version(struct a6xx_gmu *gmu, u32 *version)
231 return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_FW_VERSION, &msg, sizeof(msg),
235 static int a6xx_hfi_send_perf_table_v1(struct a6xx_gmu *gmu)
240 msg.num_gpu_levels = gmu->nr_gpu_freqs;
241 msg.num_gmu_levels = gmu->nr_gmu_freqs;
243 for (i = 0; i < gmu->nr_gpu_freqs; i++) {
244 msg.gx_votes[i].vote = gmu->gx_arc_votes[i];
245 msg.gx_votes[i].freq = gmu->gpu_freqs[i] / 1000;
248 for (i = 0; i < gmu->nr_gmu_freqs; i++) {
249 msg.cx_votes[i].vote = gmu->cx_arc_votes[i];
250 msg.cx_votes[i].freq = gmu->gmu_freqs[i] / 1000;
253 return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_PERF_TABLE, &msg, sizeof(msg),
257 static int a6xx_hfi_send_perf_table(struct a6xx_gmu *gmu)
262 msg.num_gpu_levels = gmu->nr_gpu_freqs;
263 msg.num_gmu_levels = gmu->nr_gmu_freqs;
265 for (i = 0; i < gmu->nr_gpu_freqs; i++) {
266 msg.gx_votes[i].vote = gmu->gx_arc_votes[i];
268 msg.gx_votes[i].freq = gmu->gpu_freqs[i] / 1000;
271 for (i = 0; i < gmu->nr_gmu_freqs; i++) {
272 msg.cx_votes[i].vote = gmu->cx_arc_votes[i];
273 msg.cx_votes[i].freq = gmu->gmu_freqs[i] / 1000;
276 return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_PERF_TABLE, &msg, sizeof(msg),
280 static void a6xx_generate_bw_table(const struct a6xx_info *info, struct a6xx_gmu *gmu,
292 for (i = 0; i < gmu->nr_gpu_bws; ++i)
294 msg->ddr_cmds_data[i][j] = gmu->gpu_ib_votes[i][j];
295 msg->bw_level_num = gmu->nr_gpu_bws;
722 static int a6xx_hfi_send_bw_table(struct a6xx_gmu *gmu)
725 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
729 if (gmu->bw_table)
732 msg = devm_kzalloc(gmu->dev, sizeof(*msg), GFP_KERNEL);
736 if (info->bcms && gmu->nr_gpu_bws > 1)
737 a6xx_generate_bw_table(info, gmu, msg);
761 gmu->bw_table = msg;
764 return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_BW_TABLE, gmu->bw_table, sizeof(*(gmu->bw_table)),
770 static int a6xx_hfi_enable_acd(struct a6xx_gmu *gmu)
772 struct a6xx_hfi_acd_table *acd_table = &gmu->acd_table;
784 ret = a6xx_hfi_send_msg(gmu, HFI_H2F_FEATURE_CTRL, &msg, sizeof(msg), NULL, 0);
786 DRM_DEV_ERROR(gmu->dev, "Unable to enable ACD (%d)\n", ret);
791 ret = a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_ACD, acd_table, sizeof(*acd_table), NULL, 0);
793 DRM_DEV_ERROR(gmu->dev, "Unable to ACD table (%d)\n", ret);
800 static int a6xx_hfi_send_test(struct a6xx_gmu *gmu)
804 return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_TEST, &msg, sizeof(msg),
808 static int a6xx_hfi_send_start(struct a6xx_gmu *gmu)
812 return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_START, &msg, sizeof(msg),
816 static int a6xx_hfi_send_core_fw_start(struct a6xx_gmu *gmu)
820 return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_CORE_FW_START, &msg,
824 int a6xx_hfi_set_freq(struct a6xx_gmu *gmu, u32 freq_index, u32 bw_index)
832 return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_GX_BW_PERF_VOTE, &msg,
836 int a6xx_hfi_send_prep_slumber(struct a6xx_gmu *gmu)
842 return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_PREPARE_SLUMBER, &msg,
846 static int a6xx_hfi_start_v1(struct a6xx_gmu *gmu, int boot_state)
850 ret = a6xx_hfi_send_gmu_init(gmu, boot_state);
854 ret = a6xx_hfi_get_fw_version(gmu, NULL);
864 ret = a6xx_hfi_send_perf_table_v1(gmu);
868 ret = a6xx_hfi_send_bw_table(gmu);
876 a6xx_hfi_send_test(gmu);
881 int a6xx_hfi_start(struct a6xx_gmu *gmu, int boot_state)
885 if (gmu->legacy)
886 return a6xx_hfi_start_v1(gmu, boot_state);
889 ret = a6xx_hfi_send_perf_table(gmu);
893 ret = a6xx_hfi_send_bw_table(gmu);
897 ret = a6xx_hfi_enable_acd(gmu);
901 ret = a6xx_hfi_send_core_fw_start(gmu);
909 ret = a6xx_hfi_send_start(gmu);
916 void a6xx_hfi_stop(struct a6xx_gmu *gmu)
920 for (i = 0; i < ARRAY_SIZE(gmu->queues); i++) {
921 struct a6xx_hfi_queue *queue = &gmu->queues[i];
927 DRM_DEV_ERROR(gmu->dev, "HFI queue %d is not empty\n", i);
964 void a6xx_hfi_init(struct a6xx_gmu *gmu)
966 struct a6xx_gmu_bo *hfi = &gmu->hfi;
977 table_size += (ARRAY_SIZE(gmu->queues) *
985 table->num_queues = ARRAY_SIZE(gmu->queues);
986 table->active_queues = ARRAY_SIZE(gmu->queues);
990 a6xx_hfi_queue_init(&gmu->queues[0], &headers[0], hfi->virt + offset,
995 a6xx_hfi_queue_init(&gmu->queues[1], &headers[1], hfi->virt + offset,
996 hfi->iova + offset, gmu->legacy ? 4 : 1);