Lines Matching full:gmu

26 static int a6xx_hfi_queue_read(struct a6xx_gmu *gmu,  in a6xx_hfi_queue_read()  argument
40 * If we are to assume that the GMU firmware is in fact a rational actor in a6xx_hfi_queue_read()
55 if (!gmu->legacy) in a6xx_hfi_queue_read()
62 static int a6xx_hfi_queue_write(struct a6xx_gmu *gmu, in a6xx_hfi_queue_write() argument
84 if (!gmu->legacy) { in a6xx_hfi_queue_write()
92 gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 0x01); in a6xx_hfi_queue_write()
96 static int a6xx_hfi_wait_for_ack(struct a6xx_gmu *gmu, u32 id, u32 seqnum, in a6xx_hfi_wait_for_ack() argument
99 struct a6xx_hfi_queue *queue = &gmu->queues[HFI_RESPONSE_QUEUE]; in a6xx_hfi_wait_for_ack()
104 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO, val, in a6xx_hfi_wait_for_ack()
108 DRM_DEV_ERROR(gmu->dev, in a6xx_hfi_wait_for_ack()
115 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, in a6xx_hfi_wait_for_ack()
122 ret = a6xx_hfi_queue_read(gmu, queue, (u32 *) &resp, in a6xx_hfi_wait_for_ack()
127 DRM_DEV_ERROR(gmu->dev, in a6xx_hfi_wait_for_ack()
137 DRM_DEV_ERROR(gmu->dev, "GMU firmware error %d\n", in a6xx_hfi_wait_for_ack()
143 DRM_DEV_ERROR(gmu->dev, in a6xx_hfi_wait_for_ack()
150 DRM_DEV_ERROR(gmu->dev, in a6xx_hfi_wait_for_ack()
165 static int a6xx_hfi_send_msg(struct a6xx_gmu *gmu, int id, in a6xx_hfi_send_msg() argument
168 struct a6xx_hfi_queue *queue = &gmu->queues[HFI_COMMAND_QUEUE]; in a6xx_hfi_send_msg()
178 ret = a6xx_hfi_queue_write(gmu, queue, data, dwords); in a6xx_hfi_send_msg()
180 DRM_DEV_ERROR(gmu->dev, "Unable to send message %s id %d\n", in a6xx_hfi_send_msg()
185 return a6xx_hfi_wait_for_ack(gmu, id, seqnum, payload, payload_size); in a6xx_hfi_send_msg()
188 static int a6xx_hfi_send_gmu_init(struct a6xx_gmu *gmu, int boot_state) in a6xx_hfi_send_gmu_init() argument
192 msg.dbg_buffer_addr = (u32) gmu->debug.iova; in a6xx_hfi_send_gmu_init()
193 msg.dbg_buffer_size = (u32) gmu->debug.size; in a6xx_hfi_send_gmu_init()
196 return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_INIT, &msg, sizeof(msg), in a6xx_hfi_send_gmu_init()
200 static int a6xx_hfi_get_fw_version(struct a6xx_gmu *gmu, u32 *version) in a6xx_hfi_get_fw_version() argument
207 return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_FW_VERSION, &msg, sizeof(msg), in a6xx_hfi_get_fw_version()
211 static int a6xx_hfi_send_perf_table_v1(struct a6xx_gmu *gmu) in a6xx_hfi_send_perf_table_v1() argument
216 msg.num_gpu_levels = gmu->nr_gpu_freqs; in a6xx_hfi_send_perf_table_v1()
217 msg.num_gmu_levels = gmu->nr_gmu_freqs; in a6xx_hfi_send_perf_table_v1()
219 for (i = 0; i < gmu->nr_gpu_freqs; i++) { in a6xx_hfi_send_perf_table_v1()
220 msg.gx_votes[i].vote = gmu->gx_arc_votes[i]; in a6xx_hfi_send_perf_table_v1()
221 msg.gx_votes[i].freq = gmu->gpu_freqs[i] / 1000; in a6xx_hfi_send_perf_table_v1()
224 for (i = 0; i < gmu->nr_gmu_freqs; i++) { in a6xx_hfi_send_perf_table_v1()
225 msg.cx_votes[i].vote = gmu->cx_arc_votes[i]; in a6xx_hfi_send_perf_table_v1()
226 msg.cx_votes[i].freq = gmu->gmu_freqs[i] / 1000; in a6xx_hfi_send_perf_table_v1()
229 return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_PERF_TABLE, &msg, sizeof(msg), in a6xx_hfi_send_perf_table_v1()
233 static int a6xx_hfi_send_perf_table(struct a6xx_gmu *gmu) in a6xx_hfi_send_perf_table() argument
238 msg.num_gpu_levels = gmu->nr_gpu_freqs; in a6xx_hfi_send_perf_table()
239 msg.num_gmu_levels = gmu->nr_gmu_freqs; in a6xx_hfi_send_perf_table()
241 for (i = 0; i < gmu->nr_gpu_freqs; i++) { in a6xx_hfi_send_perf_table()
242 msg.gx_votes[i].vote = gmu->gx_arc_votes[i]; in a6xx_hfi_send_perf_table()
244 msg.gx_votes[i].freq = gmu->gpu_freqs[i] / 1000; in a6xx_hfi_send_perf_table()
247 for (i = 0; i < gmu->nr_gmu_freqs; i++) { in a6xx_hfi_send_perf_table()
248 msg.cx_votes[i].vote = gmu->cx_arc_votes[i]; in a6xx_hfi_send_perf_table()
249 msg.cx_votes[i].freq = gmu->gmu_freqs[i] / 1000; in a6xx_hfi_send_perf_table()
252 return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_PERF_TABLE, &msg, sizeof(msg), in a6xx_hfi_send_perf_table()
258 /* Send a single "off" entry since the 618 GMU doesn't do bus scaling */ in a618_build_bw_table()
273 * These are the CX (CNOC) votes - these are used by the GMU but the in a618_build_bw_table()
304 * These are the CX (CNOC) votes - these are used by the GMU but the in a640_build_bw_table()
343 * These are the CX (CNOC) votes - these are used by the GMU but the in a650_build_bw_table()
356 /* Send a single "off" entry since the 630 GMU doesn't do bus scaling */ in a6xx_build_bw_table()
372 * sdm845 GMU are known and fixed so we can hard code them. in a6xx_build_bw_table()
392 static int a6xx_hfi_send_bw_table(struct a6xx_gmu *gmu) in a6xx_hfi_send_bw_table() argument
395 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); in a6xx_hfi_send_bw_table()
407 return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_BW_TABLE, &msg, sizeof(msg), in a6xx_hfi_send_bw_table()
411 static int a6xx_hfi_send_test(struct a6xx_gmu *gmu) in a6xx_hfi_send_test() argument
415 return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_TEST, &msg, sizeof(msg), in a6xx_hfi_send_test()
419 static int a6xx_hfi_send_start(struct a6xx_gmu *gmu) in a6xx_hfi_send_start() argument
423 return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_START, &msg, sizeof(msg), in a6xx_hfi_send_start()
427 static int a6xx_hfi_send_core_fw_start(struct a6xx_gmu *gmu) in a6xx_hfi_send_core_fw_start() argument
431 return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_CORE_FW_START, &msg, in a6xx_hfi_send_core_fw_start()
435 int a6xx_hfi_set_freq(struct a6xx_gmu *gmu, int index) in a6xx_hfi_set_freq() argument
443 return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_GX_BW_PERF_VOTE, &msg, in a6xx_hfi_set_freq()
447 int a6xx_hfi_send_prep_slumber(struct a6xx_gmu *gmu) in a6xx_hfi_send_prep_slumber() argument
453 return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_PREPARE_SLUMBER, &msg, in a6xx_hfi_send_prep_slumber()
457 static int a6xx_hfi_start_v1(struct a6xx_gmu *gmu, int boot_state) in a6xx_hfi_start_v1() argument
461 ret = a6xx_hfi_send_gmu_init(gmu, boot_state); in a6xx_hfi_start_v1()
465 ret = a6xx_hfi_get_fw_version(gmu, NULL); in a6xx_hfi_start_v1()
472 * the GMU firmware in a6xx_hfi_start_v1()
475 ret = a6xx_hfi_send_perf_table_v1(gmu); in a6xx_hfi_start_v1()
479 ret = a6xx_hfi_send_bw_table(gmu); in a6xx_hfi_start_v1()
484 * Let the GMU know that there won't be any more HFI messages until next in a6xx_hfi_start_v1()
487 a6xx_hfi_send_test(gmu); in a6xx_hfi_start_v1()
492 int a6xx_hfi_start(struct a6xx_gmu *gmu, int boot_state) in a6xx_hfi_start() argument
496 if (gmu->legacy) in a6xx_hfi_start()
497 return a6xx_hfi_start_v1(gmu, boot_state); in a6xx_hfi_start()
500 ret = a6xx_hfi_send_perf_table(gmu); in a6xx_hfi_start()
504 ret = a6xx_hfi_send_bw_table(gmu); in a6xx_hfi_start()
508 ret = a6xx_hfi_send_core_fw_start(gmu); in a6xx_hfi_start()
516 ret = a6xx_hfi_send_start(gmu); in a6xx_hfi_start()
523 void a6xx_hfi_stop(struct a6xx_gmu *gmu) in a6xx_hfi_stop() argument
527 for (i = 0; i < ARRAY_SIZE(gmu->queues); i++) { in a6xx_hfi_stop()
528 struct a6xx_hfi_queue *queue = &gmu->queues[i]; in a6xx_hfi_stop()
534 DRM_DEV_ERROR(gmu->dev, "HFI queue %d is not empty\n", i); in a6xx_hfi_stop()
565 void a6xx_hfi_init(struct a6xx_gmu *gmu) in a6xx_hfi_init() argument
567 struct a6xx_gmu_bo *hfi = &gmu->hfi; in a6xx_hfi_init()
578 table_size += (ARRAY_SIZE(gmu->queues) * in a6xx_hfi_init()
586 table->num_queues = ARRAY_SIZE(gmu->queues); in a6xx_hfi_init()
587 table->active_queues = ARRAY_SIZE(gmu->queues); in a6xx_hfi_init()
591 a6xx_hfi_queue_init(&gmu->queues[0], &headers[0], hfi->virt + offset, in a6xx_hfi_init()
594 /* GMU response queue */ in a6xx_hfi_init()
596 a6xx_hfi_queue_init(&gmu->queues[1], &headers[1], hfi->virt + offset, in a6xx_hfi_init()
597 hfi->iova + offset, gmu->legacy ? 4 : 1); in a6xx_hfi_init()