Lines Matching defs:gmu

21 static void a6xx_gmu_fault(struct a6xx_gmu *gmu)
23 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
28 gmu->hung = true;
39 struct a6xx_gmu *gmu = data;
42 status = gmu_read(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_STATUS);
43 gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, status);
46 dev_err_ratelimited(gmu->dev, "GMU watchdog expired\n");
48 a6xx_gmu_fault(gmu);
52 dev_err_ratelimited(gmu->dev, "GMU AHB bus error\n");
55 dev_err_ratelimited(gmu->dev, "GMU fence error: 0x%x\n",
56 gmu_read(gmu, REG_A6XX_GMU_AHB_FENCE_STATUS));
63 struct a6xx_gmu *gmu = data;
66 status = gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO);
67 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, status);
70 dev_err_ratelimited(gmu->dev, "GMU firmware fault\n");
72 a6xx_gmu_fault(gmu);
78 bool a6xx_gmu_sptprac_is_on(struct a6xx_gmu *gmu)
83 if (!gmu->initialized)
86 val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS);
94 bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu)
99 if (!gmu->initialized)
102 val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS);
115 struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
123 if (gpu_freq == gmu->freq)
126 for (perf_index = 0; perf_index < gmu->nr_gpu_freqs - 1; perf_index++)
127 if (gpu_freq == gmu->gpu_freqs[perf_index])
131 if (info->bcms && gmu->nr_gpu_bws > 1) {
134 for (bw_index = 0; bw_index < gmu->nr_gpu_bws - 1; bw_index++) {
135 if (bw == gmu->gpu_bw_table[bw_index])
154 do_div(tmp, gmu->gpu_bw_table[gmu->nr_gpu_bws - 1]);
161 gmu->current_perf_index = perf_index;
162 gmu->freq = gmu->gpu_freqs[perf_index];
164 trace_msm_gmu_freq_change(gmu->freq, perf_index);
175 if (!gmu->legacy) {
176 a6xx_hfi_set_freq(gmu, perf_index, bw_index);
183 gmu_write(gmu, REG_A6XX_GMU_DCVS_ACK_OPTION, 0);
185 gmu_write(gmu, REG_A6XX_GMU_DCVS_PERF_SETTING,
192 gmu_write(gmu, REG_A6XX_GMU_DCVS_BW_SETTING, 0xff);
195 a6xx_gmu_set_oob(gmu, GMU_OOB_DCVS_SET);
196 a6xx_gmu_clear_oob(gmu, GMU_OOB_DCVS_SET);
198 ret = gmu_read(gmu, REG_A6XX_GMU_DCVS_RETURN);
200 dev_err(gmu->dev, "GMU set GPU frequency error: %d\n", ret);
209 struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
211 return gmu->freq;
214 static bool a6xx_gmu_check_idle_level(struct a6xx_gmu *gmu)
217 int local = gmu->idle_level;
220 if (gmu->idle_level == GMU_IDLE_STATE_SPTP)
223 val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE);
226 if (gmu->idle_level != GMU_IDLE_STATE_IFPC ||
227 !a6xx_gmu_gx_is_on(gmu))
235 int a6xx_gmu_wait_for_idle(struct a6xx_gmu *gmu)
237 return spin_until(a6xx_gmu_check_idle_level(gmu));
240 static int a6xx_gmu_start(struct a6xx_gmu *gmu)
242 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
247 val = gmu_read(gmu, REG_A6XX_GMU_CM3_DTCM_START + 0xff8);
256 gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 1);
262 gmu_write(gmu, REG_A7XX_GMU_GENERAL_9, 0);
264 gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_RESP, 0);
267 gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 0);
269 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, val,
273 DRM_DEV_ERROR(gmu->dev, "GMU firmware initialization timed out\n");
278 static int a6xx_gmu_hfi_start(struct a6xx_gmu *gmu)
283 gmu_write(gmu, REG_A6XX_GMU_HFI_CTRL_INIT, 1);
285 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_HFI_CTRL_STATUS, val,
288 DRM_DEV_ERROR(gmu->dev, "Unable to start the HFI queues\n");
338 int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
344 WARN_ON_ONCE(!mutex_is_locked(&gmu->lock));
349 if (gmu->legacy) {
356 DRM_DEV_ERROR(gmu->dev,
364 gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 1 << request);
367 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO, val,
371 DRM_DEV_ERROR(gmu->dev,
374 gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO));
377 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, 1 << ack);
383 void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
387 WARN_ON_ONCE(!mutex_is_locked(&gmu->lock));
392 if (gmu->legacy)
397 gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 1 << bit);
401 int a6xx_sptprac_enable(struct a6xx_gmu *gmu)
406 if (!gmu->legacy)
409 gmu_write(gmu, REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL, 0x778000);
411 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, val,
415 DRM_DEV_ERROR(gmu->dev, "Unable to power on SPTPRAC: 0x%x\n",
416 gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS));
423 void a6xx_sptprac_disable(struct a6xx_gmu *gmu)
428 if (!gmu->legacy)
432 gmu_rmw(gmu, REG_A6XX_GPU_CC_GX_GDSCR, 0, (1 << 11));
434 gmu_write(gmu, REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL, 0x778001);
436 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, val,
440 DRM_DEV_ERROR(gmu->dev, "failed to power off SPTPRAC: 0x%x\n",
441 gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS));
445 static int a6xx_gmu_gfx_rail_on(struct a6xx_gmu *gmu)
450 gmu_write(gmu, REG_A6XX_GMU_BOOT_SLUMBER_OPTION, 0);
453 vote = gmu->gx_arc_votes[gmu->nr_gpu_freqs - 1];
455 gmu_write(gmu, REG_A6XX_GMU_GX_VOTE_IDX, vote & 0xff);
456 gmu_write(gmu, REG_A6XX_GMU_MX_VOTE_IDX, (vote >> 8) & 0xff);
459 return a6xx_gmu_set_oob(gmu, GMU_OOB_BOOT_SLUMBER);
462 static void a6xx_gemnoc_workaround(struct a6xx_gmu *gmu)
464 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
473 gmu_write(gmu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, BIT(0));
477 static int a6xx_gmu_notify_slumber(struct a6xx_gmu *gmu)
482 gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 0);
485 if (gmu->idle_level < GMU_IDLE_STATE_SPTP)
486 a6xx_sptprac_disable(gmu);
488 if (!gmu->legacy) {
489 ret = a6xx_hfi_send_prep_slumber(gmu);
494 gmu_write(gmu, REG_A6XX_GMU_BOOT_SLUMBER_OPTION, 1);
496 ret = a6xx_gmu_set_oob(gmu, GMU_OOB_BOOT_SLUMBER);
497 a6xx_gmu_clear_oob(gmu, GMU_OOB_BOOT_SLUMBER);
501 if (gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE)
503 DRM_DEV_ERROR(gmu->dev, "The GMU did not go into slumber\n");
509 a6xx_gemnoc_workaround(gmu);
512 gmu_write(gmu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, 0);
516 static int a6xx_rpmh_start(struct a6xx_gmu *gmu)
521 gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, BIT(1));
523 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_RSCC_CONTROL_ACK, val,
526 DRM_DEV_ERROR(gmu->dev, "Unable to power on the GPU RSC\n");
530 ret = gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_SEQ_BUSY_DRV0, val,
534 DRM_DEV_ERROR(gmu->dev, "GPU RSC sequence stuck while waking up the GPU\n");
538 gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0);
543 static void a6xx_rpmh_stop(struct a6xx_gmu *gmu)
548 gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1);
550 ret = gmu_poll_timeout_rscc(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0,
553 DRM_DEV_ERROR(gmu->dev, "Unable to power off the GPU RSC\n");
555 gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0);
566 static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu)
568 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
570 struct platform_device *pdev = to_platform_device(gmu->dev);
597 gmu_write_rscc(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0, BIT(24));
600 gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_SLAVE_ID_DRV0, 1);
601 gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA, 0);
602 gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR, 0);
603 gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 2, 0);
604 gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 2, 0);
605 gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 4,
607 gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 4, 0);
608 gmu_write_rscc(gmu, REG_A6XX_RSCC_OVERRIDE_START_ADDR, 0);
609 gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_SEQ_START_ADDR, 0x4520);
610 gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_LO, 0x4510);
611 gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_HI, 0x4514);
620 gmu_write_rscc(gmu, seqmem0_drv0_reg, 0xeaaae5a0);
621 gmu_write_rscc(gmu, seqmem0_drv0_reg + 1, 0xe1a1ebab);
622 gmu_write_rscc(gmu, seqmem0_drv0_reg + 2, 0xa2e0a581);
623 gmu_write_rscc(gmu, seqmem0_drv0_reg + 3, 0xecac82e2);
624 gmu_write_rscc(gmu, seqmem0_drv0_reg + 4, 0x0020edad);
626 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0, 0xa7a506a0);
627 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 1, 0xa1e6a6e7);
628 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 2, 0xa2e081e1);
629 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 3, 0xe9a982e2);
630 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 4, 0x0020e8a8);
684 a6xx_rpmh_stop(gmu);
702 static void a6xx_gmu_power_config(struct a6xx_gmu *gmu)
704 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
708 gmu_write(gmu, REG_A6XX_GMU_SYS_BUS_CONFIG, 0x1);
709 gmu_write(gmu, REG_A6XX_GMU_ICACHE_CONFIG, 0x1);
710 gmu_write(gmu, REG_A6XX_GMU_DCACHE_CONFIG, 0x1);
716 gmu_write(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0x9c40400);
718 switch (gmu->idle_level) {
720 gmu_write(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_HYST,
722 gmu_rmw(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0,
727 gmu_write(gmu, REG_A6XX_GMU_PWR_COL_SPTPRAC_HYST,
729 gmu_rmw(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0,
735 gmu_rmw(gmu, REG_A6XX_GMU_RPMH_CTRL, 0,
761 static int a6xx_gmu_fw_load(struct a6xx_gmu *gmu)
763 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
776 if (gmu->legacy) {
779 DRM_DEV_ERROR(gmu->dev,
784 gmu_write_bulk(gmu, REG_A6XX_GMU_CM3_ITCM_START,
798 gmu_write_bulk(gmu,
803 gmu_write_bulk(gmu,
806 } else if (!fw_block_mem(&gmu->icache, blk) &&
807 !fw_block_mem(&gmu->dcache, blk) &&
808 !fw_block_mem(&gmu->dummy, blk)) {
809 DRM_DEV_ERROR(gmu->dev,
815 ver = gmu_read(gmu, REG_A6XX_GMU_CORE_FW_VERSION);
824 static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
826 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
835 gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_CX_FALNEXT_INTF, 1);
836 gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_CX_FAL_INTF, 1);
843 gmu_write(gmu, REG_A6XX_GMU_GENERAL_7, 1);
846 ret = a6xx_rpmh_start(gmu);
854 ret = a6xx_rpmh_start(gmu);
858 ret = a6xx_gmu_fw_load(gmu);
864 gmu_write(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, 0);
865 gmu_write(gmu, REG_A6XX_GMU_CM3_BOOT_CONFIG, 0x02);
868 gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_ADDR, gmu->hfi.iova);
869 gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_INFO, 1);
879 gmu_write(gmu, REG_A6XX_GMU_AHB_FENCE_RANGE_0,
888 gmu_write(gmu, REG_A6XX_GMU_CM3_CFG, 0x4052);
906 gmu_write(gmu, REG_A7XX_GMU_GENERAL_10, chipid);
907 gmu_write(gmu, REG_A7XX_GMU_GENERAL_8,
908 (gmu->log.iova & GENMASK(31, 12)) |
909 ((gmu->log.size / SZ_4K - 1) & GENMASK(7, 0)));
911 gmu_write(gmu, REG_A6XX_GMU_HFI_SFR_ADDR, chipid);
913 gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_MSG,
914 gmu->log.iova | (gmu->log.size / SZ_4K - 1));
918 a6xx_gmu_power_config(gmu);
920 ret = a6xx_gmu_start(gmu);
924 if (gmu->legacy) {
925 ret = a6xx_gmu_gfx_rail_on(gmu);
931 if (gmu->idle_level < GMU_IDLE_STATE_SPTP) {
932 ret = a6xx_sptprac_enable(gmu);
937 ret = a6xx_gmu_hfi_start(gmu);
955 static void a6xx_gmu_irq_disable(struct a6xx_gmu *gmu)
957 disable_irq(gmu->gmu_irq);
958 disable_irq(gmu->hfi_irq);
960 gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK, ~0);
961 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK, ~0);
964 static void a6xx_gmu_rpmh_off(struct a6xx_gmu *gmu)
966 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
975 gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS0_DRV0_STATUS + seqmem_off,
977 gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS1_DRV0_STATUS + seqmem_off,
979 gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS2_DRV0_STATUS + seqmem_off,
981 gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS3_DRV0_STATUS + seqmem_off,
986 static void a6xx_gmu_force_off(struct a6xx_gmu *gmu)
988 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
996 gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_GMU_PWR_COL_KEEPALIVE, 0);
999 a6xx_hfi_stop(gmu);
1002 a6xx_gmu_irq_disable(gmu);
1005 a6xx_sptprac_disable(gmu);
1007 a6xx_gemnoc_workaround(gmu);
1010 a6xx_gmu_rpmh_off(gmu);
1013 gmu_write(gmu, REG_A6XX_GMU_AHB_FENCE_STATUS_CLR, 0x7);
1014 gmu_write(gmu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, 0);
1019 /* Halt the gmu cm3 core */
1020 gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 1);
1028 static void a6xx_gmu_set_initial_freq(struct msm_gpu *gpu, struct a6xx_gmu *gmu)
1031 unsigned long gpu_freq = gmu->gpu_freqs[gmu->current_perf_index];
1037 gmu->freq = 0; /* so a6xx_gmu_set_freq() doesn't exit early */
1042 static void a6xx_gmu_set_initial_bw(struct msm_gpu *gpu, struct a6xx_gmu *gmu)
1045 unsigned long gpu_freq = gmu->gpu_freqs[gmu->current_perf_index];
1059 struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
1062 if (WARN(!gmu->initialized, "The GMU is not set up yet\n"))
1065 gmu->hung = false;
1068 pm_runtime_get_sync(gmu->dev);
1075 if (!IS_ERR_OR_NULL(gmu->gxpd))
1076 pm_runtime_get_sync(gmu->gxpd);
1079 clk_set_rate(gmu->core_clk, 200000000);
1080 clk_set_rate(gmu->hub_clk, adreno_is_a740_family(adreno_gpu) ?
1082 ret = clk_bulk_prepare_enable(gmu->nr_clocks, gmu->clocks);
1084 pm_runtime_put(gmu->gxpd);
1085 pm_runtime_put(gmu->dev);
1090 a6xx_gmu_set_initial_bw(gpu, gmu);
1093 gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, ~0);
1094 gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK, ~A6XX_GMU_IRQ_MASK);
1095 enable_irq(gmu->gmu_irq);
1101 } else if (gmu->legacy) {
1102 status = gmu_read(gmu, REG_A6XX_GMU_GENERAL_7) == 1 ?
1112 ret = a6xx_gmu_fw_start(gmu, status);
1116 ret = a6xx_hfi_start(gmu, status);
1124 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, ~0);
1125 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK, ~A6XX_HFI_IRQ_MASK);
1126 enable_irq(gmu->hfi_irq);
1129 a6xx_gmu_set_initial_freq(gpu, gmu);
1134 disable_irq(gmu->gmu_irq);
1135 a6xx_rpmh_stop(gmu);
1136 pm_runtime_put(gmu->gxpd);
1137 pm_runtime_put(gmu->dev);
1143 bool a6xx_gmu_isidle(struct a6xx_gmu *gmu)
1147 if (!gmu->initialized)
1150 reg = gmu_read(gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS);
1159 static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu)
1161 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
1172 if (a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET))
1175 a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
1178 ret = a6xx_gmu_wait_for_idle(gmu);
1187 ret = a6xx_gmu_notify_slumber(gmu);
1191 ret = gmu_poll_timeout(gmu,
1202 DRM_DEV_ERROR(gmu->dev,
1204 gmu_read(gmu,
1206 gmu_read(gmu,
1210 a6xx_hfi_stop(gmu);
1213 a6xx_gmu_irq_disable(gmu);
1216 a6xx_rpmh_stop(gmu);
1221 a6xx_gmu_force_off(gmu);
1227 struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
1230 if (!pm_runtime_active(gmu->dev))
1237 if (gmu->hung)
1238 a6xx_gmu_force_off(gmu);
1240 a6xx_gmu_shutdown(gmu);
1250 if (!IS_ERR_OR_NULL(gmu->gxpd))
1251 pm_runtime_put_sync(gmu->gxpd);
1253 clk_bulk_disable_unprepare(gmu->nr_clocks, gmu->clocks);
1255 pm_runtime_put_sync(gmu->dev);
1260 static void a6xx_gmu_memory_free(struct a6xx_gmu *gmu)
1262 struct msm_mmu *mmu = to_msm_vm(gmu->vm)->mmu;
1264 msm_gem_kernel_put(gmu->hfi.obj, gmu->vm);
1265 msm_gem_kernel_put(gmu->debug.obj, gmu->vm);
1266 msm_gem_kernel_put(gmu->icache.obj, gmu->vm);
1267 msm_gem_kernel_put(gmu->dcache.obj, gmu->vm);
1268 msm_gem_kernel_put(gmu->dummy.obj, gmu->vm);
1269 msm_gem_kernel_put(gmu->log.obj, gmu->vm);
1272 drm_gpuvm_put(gmu->vm);
1275 static int a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo,
1278 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
1301 ret = msm_gem_get_and_pin_iova_range(bo->obj, gmu->vm, &bo->iova,
1316 static int a6xx_gmu_memory_probe(struct drm_device *drm, struct a6xx_gmu *gmu)
1320 mmu = msm_iommu_new(gmu->dev, 0);
1326 gmu->vm = msm_gem_vm_create(drm, mmu, "gmu", 0x0, 0x80000000, true);
1327 if (IS_ERR(gmu->vm))
1328 return PTR_ERR(gmu->vm);
1349 struct a6xx_gmu *gmu)
1368 dev_err(gmu->dev, "invalid BCM '%s' aux data size\n",
1377 for (bw_index = 0; bw_index < gmu->nr_gpu_bws; bw_index++) {
1378 u32 *data = gmu->gpu_ib_votes[bw_index];
1379 u32 bw = gmu->gpu_bw_table[bw_index];
1538 static int a6xx_gmu_rpmh_votes_init(struct a6xx_gmu *gmu)
1540 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
1547 ret = a6xx_gmu_rpmh_arc_votes_init(&gpu->pdev->dev, gmu->gx_arc_votes,
1548 gmu->gpu_freqs, gmu->nr_gpu_freqs, "gfx.lvl");
1551 ret |= a6xx_gmu_rpmh_arc_votes_init(gmu->dev, gmu->cx_arc_votes,
1552 gmu->gmu_freqs, gmu->nr_gmu_freqs, "cx.lvl");
1555 if (info->bcms && gmu->nr_gpu_bws > 1)
1556 ret |= a6xx_gmu_rpmh_bw_votes_init(adreno_gpu, info, gmu);
1625 static int a6xx_gmu_pwrlevels_probe(struct a6xx_gmu *gmu)
1627 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
1638 ret = devm_pm_opp_of_add_table(gmu->dev);
1640 DRM_DEV_ERROR(gmu->dev, "Unable to set the OPP table for the GMU\n");
1644 gmu->nr_gmu_freqs = a6xx_gmu_build_freq_table(gmu->dev,
1645 gmu->gmu_freqs, ARRAY_SIZE(gmu->gmu_freqs));
1651 gmu->nr_gpu_freqs = a6xx_gmu_build_freq_table(&gpu->pdev->dev,
1652 gmu->gpu_freqs, ARRAY_SIZE(gmu->gpu_freqs));
1654 gmu->current_perf_index = gmu->nr_gpu_freqs - 1;
1661 gmu->nr_gpu_bws = a6xx_gmu_build_bw_table(&gpu->pdev->dev,
1662 gmu->gpu_bw_table, ARRAY_SIZE(gmu->gpu_bw_table));
1665 return a6xx_gmu_rpmh_votes_init(gmu);
1668 static int a6xx_gmu_acd_probe(struct a6xx_gmu *gmu)
1670 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
1671 struct a6xx_hfi_acd_table *cmd = &gmu->acd_table;
1679 DRM_DEV_ERROR(gmu->dev, "Skipping GPU ACD probe\n");
1688 for (i = 1; i < gmu->nr_gpu_freqs; i++) {
1694 freq = gmu->gpu_freqs[i];
1704 DRM_DEV_ERROR(gmu->dev, "Unable to read acd level for freq %lu\n", freq);
1715 if (cmd->enable_by_level && IS_ERR_OR_NULL(gmu->qmp)) {
1716 DRM_DEV_ERROR(gmu->dev, "Unable to send ACD state to AOSS\n");
1721 if (IS_ERR_OR_NULL(gmu->qmp))
1728 ret = qmp_send(gmu->qmp, "{class: gpu, res: acd, val: %d}", !!cmd->enable_by_level);
1730 DRM_DEV_ERROR(gmu->dev, "Failed to send ACD state to AOSS\n");
1737 static int a6xx_gmu_clocks_probe(struct a6xx_gmu *gmu)
1739 int ret = devm_clk_bulk_get_all(gmu->dev, &gmu->clocks);
1744 gmu->nr_clocks = ret;
1746 gmu->core_clk = msm_clk_bulk_get_clock(gmu->clocks,
1747 gmu->nr_clocks, "gmu");
1749 gmu->hub_clk = msm_clk_bulk_get_clock(gmu->clocks,
1750 gmu->nr_clocks, "hub");
1776 static int a6xx_gmu_get_irq(struct a6xx_gmu *gmu, struct platform_device *pdev,
1783 ret = request_irq(irq, handler, IRQF_TRIGGER_HIGH | IRQF_NO_AUTOEN, name, gmu);
1796 struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
1797 struct platform_device *pdev = to_platform_device(gmu->dev);
1799 mutex_lock(&gmu->lock);
1800 if (!gmu->initialized) {
1801 mutex_unlock(&gmu->lock);
1805 gmu->initialized = false;
1807 mutex_unlock(&gmu->lock);
1809 pm_runtime_force_suspend(gmu->dev);
1812 * Since cxpd is a virt device, the devlink with gmu-dev will be removed
1815 dev_pm_domain_detach(gmu->cxpd, false);
1817 if (!IS_ERR_OR_NULL(gmu->gxpd)) {
1818 pm_runtime_disable(gmu->gxpd);
1819 dev_pm_domain_detach(gmu->gxpd, false);
1822 if (!IS_ERR_OR_NULL(gmu->qmp))
1823 qmp_put(gmu->qmp);
1825 iounmap(gmu->mmio);
1827 iounmap(gmu->rscc);
1828 gmu->mmio = NULL;
1829 gmu->rscc = NULL;
1832 a6xx_gmu_memory_free(gmu);
1834 free_irq(gmu->gmu_irq, gmu);
1835 free_irq(gmu->hfi_irq, gmu);
1839 put_device(gmu->dev);
1845 struct a6xx_gmu *gmu = container_of(nb, struct a6xx_gmu, pd_nb);
1848 complete_all(&gmu->pd_gate);
1856 struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
1862 gmu->dev = &pdev->dev;
1864 ret = of_dma_configure(gmu->dev, node, true);
1868 pm_runtime_enable(gmu->dev);
1871 gmu->legacy = true;
1874 gmu->mmio = a6xx_gmu_get_mmio(pdev, "gmu");
1875 if (IS_ERR(gmu->mmio)) {
1876 ret = PTR_ERR(gmu->mmio);
1880 gmu->cxpd = dev_pm_domain_attach_by_name(gmu->dev, "cx");
1881 if (IS_ERR(gmu->cxpd)) {
1882 ret = PTR_ERR(gmu->cxpd);
1886 if (!device_link_add(gmu->dev, gmu->cxpd, DL_FLAG_PM_RUNTIME)) {
1891 init_completion(&gmu->pd_gate);
1892 complete_all(&gmu->pd_gate);
1893 gmu->pd_nb.notifier_call = cxpd_notifier_cb;
1896 gmu->gxpd = dev_pm_domain_attach_by_name(gmu->dev, "gx");
1897 if (IS_ERR(gmu->gxpd)) {
1898 ret = PTR_ERR(gmu->gxpd);
1902 gmu->initialized = true;
1907 dev_pm_domain_detach(gmu->cxpd, false);
1910 iounmap(gmu->mmio);
1913 put_device(gmu->dev);
1921 struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
1929 gmu->dev = &pdev->dev;
1931 ret = of_dma_configure(gmu->dev, node, true);
1936 gmu->idle_level = GMU_IDLE_STATE_ACTIVE;
1938 pm_runtime_enable(gmu->dev);
1941 ret = a6xx_gmu_clocks_probe(gmu);
1945 ret = a6xx_gmu_memory_probe(adreno_gpu->base.dev, gmu);
1956 gmu->dummy.size = SZ_4K;
1959 ret = a6xx_gmu_memory_alloc(gmu, &gmu->debug, SZ_4K * 7,
1964 gmu->dummy.size = SZ_8K;
1968 ret = a6xx_gmu_memory_alloc(gmu, &gmu->dummy, gmu->dummy.size,
1976 ret = a6xx_gmu_memory_alloc(gmu, &gmu->icache,
1987 ret = a6xx_gmu_memory_alloc(gmu, &gmu->icache,
1992 ret = a6xx_gmu_memory_alloc(gmu, &gmu->dcache,
1998 gmu->legacy = true;
2001 ret = a6xx_gmu_memory_alloc(gmu, &gmu->debug, SZ_16K, 0, "debug");
2007 ret = a6xx_gmu_memory_alloc(gmu, &gmu->log, SZ_16K, 0, "log");
2012 ret = a6xx_gmu_memory_alloc(gmu, &gmu->hfi, SZ_16K, 0, "hfi");
2017 gmu->mmio = a6xx_gmu_get_mmio(pdev, "gmu");
2018 if (IS_ERR(gmu->mmio)) {
2019 ret = PTR_ERR(gmu->mmio);
2025 gmu->rscc = a6xx_gmu_get_mmio(pdev, "rscc");
2026 if (IS_ERR(gmu->rscc)) {
2031 gmu->rscc = gmu->mmio + 0x23000;
2035 gmu->hfi_irq = a6xx_gmu_get_irq(gmu, pdev, "hfi", a6xx_hfi_irq);
2036 gmu->gmu_irq = a6xx_gmu_get_irq(gmu, pdev, "gmu", a6xx_gmu_irq);
2038 if (gmu->hfi_irq < 0 || gmu->gmu_irq < 0) {
2043 gmu->cxpd = dev_pm_domain_attach_by_name(gmu->dev, "cx");
2044 if (IS_ERR(gmu->cxpd)) {
2045 ret = PTR_ERR(gmu->cxpd);
2049 link = device_link_add(gmu->dev, gmu->cxpd, DL_FLAG_PM_RUNTIME);
2056 gmu->qmp = qmp_get(gmu->dev);
2057 if (PTR_ERR_OR_ZERO(gmu->qmp) == -EPROBE_DEFER) {
2062 init_completion(&gmu->pd_gate);
2063 complete_all(&gmu->pd_gate);
2064 gmu->pd_nb.notifier_call = cxpd_notifier_cb;
2070 gmu->gxpd = dev_pm_domain_attach_by_name(gmu->dev, "gx");
2073 a6xx_gmu_pwrlevels_probe(gmu);
2075 ret = a6xx_gmu_acd_probe(gmu);
2080 a6xx_hfi_init(gmu);
2083 a6xx_gmu_rpmh_init(gmu);
2085 gmu->initialized = true;
2090 if (!IS_ERR_OR_NULL(gmu->gxpd))
2091 dev_pm_domain_detach(gmu->gxpd, false);
2093 if (!IS_ERR_OR_NULL(gmu->qmp))
2094 qmp_put(gmu->qmp);
2099 dev_pm_domain_detach(gmu->cxpd, false);
2102 iounmap(gmu->mmio);
2104 iounmap(gmu->rscc);
2105 free_irq(gmu->gmu_irq, gmu);
2106 free_irq(gmu->hfi_irq, gmu);
2109 a6xx_gmu_memory_free(gmu);
2112 put_device(gmu->dev);