Lines Matching full:gmu
20 static void a6xx_gmu_fault(struct a6xx_gmu *gmu) in a6xx_gmu_fault() argument
22 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); in a6xx_gmu_fault()
27 gmu->hung = true; in a6xx_gmu_fault()
38 struct a6xx_gmu *gmu = data; in a6xx_gmu_irq() local
41 status = gmu_read(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_STATUS); in a6xx_gmu_irq()
42 gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, status); in a6xx_gmu_irq()
45 dev_err_ratelimited(gmu->dev, "GMU watchdog expired\n"); in a6xx_gmu_irq()
47 a6xx_gmu_fault(gmu); in a6xx_gmu_irq()
51 dev_err_ratelimited(gmu->dev, "GMU AHB bus error\n"); in a6xx_gmu_irq()
54 dev_err_ratelimited(gmu->dev, "GMU fence error: 0x%x\n", in a6xx_gmu_irq()
55 gmu_read(gmu, REG_A6XX_GMU_AHB_FENCE_STATUS)); in a6xx_gmu_irq()
62 struct a6xx_gmu *gmu = data; in a6xx_hfi_irq() local
65 status = gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO); in a6xx_hfi_irq()
66 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, status); in a6xx_hfi_irq()
69 dev_err_ratelimited(gmu->dev, "GMU firmware fault\n"); in a6xx_hfi_irq()
71 a6xx_gmu_fault(gmu); in a6xx_hfi_irq()
77 bool a6xx_gmu_sptprac_is_on(struct a6xx_gmu *gmu) in a6xx_gmu_sptprac_is_on() argument
81 /* This can be called from gpu state code so make sure GMU is valid */ in a6xx_gmu_sptprac_is_on()
82 if (!gmu->initialized) in a6xx_gmu_sptprac_is_on()
85 val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS); in a6xx_gmu_sptprac_is_on()
93 bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu) in a6xx_gmu_gx_is_on() argument
97 /* This can be called from gpu state code so make sure GMU is valid */ in a6xx_gmu_gx_is_on()
98 if (!gmu->initialized) in a6xx_gmu_gx_is_on()
101 val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS); in a6xx_gmu_gx_is_on()
113 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; in a6xx_gmu_set_freq() local
120 if (gpu_freq == gmu->freq) in a6xx_gmu_set_freq()
123 for (perf_index = 0; perf_index < gmu->nr_gpu_freqs - 1; perf_index++) in a6xx_gmu_set_freq()
124 if (gpu_freq == gmu->gpu_freqs[perf_index]) in a6xx_gmu_set_freq()
127 gmu->current_perf_index = perf_index; in a6xx_gmu_set_freq()
128 gmu->freq = gmu->gpu_freqs[perf_index]; in a6xx_gmu_set_freq()
130 trace_msm_gmu_freq_change(gmu->freq, perf_index); in a6xx_gmu_set_freq()
141 if (!gmu->legacy) { in a6xx_gmu_set_freq()
142 a6xx_hfi_set_freq(gmu, perf_index); in a6xx_gmu_set_freq()
147 gmu_write(gmu, REG_A6XX_GMU_DCVS_ACK_OPTION, 0); in a6xx_gmu_set_freq()
149 gmu_write(gmu, REG_A6XX_GMU_DCVS_PERF_SETTING, in a6xx_gmu_set_freq()
156 gmu_write(gmu, REG_A6XX_GMU_DCVS_BW_SETTING, 0xff); in a6xx_gmu_set_freq()
158 /* Set and clear the OOB for DCVS to trigger the GMU */ in a6xx_gmu_set_freq()
159 a6xx_gmu_set_oob(gmu, GMU_OOB_DCVS_SET); in a6xx_gmu_set_freq()
160 a6xx_gmu_clear_oob(gmu, GMU_OOB_DCVS_SET); in a6xx_gmu_set_freq()
162 ret = gmu_read(gmu, REG_A6XX_GMU_DCVS_RETURN); in a6xx_gmu_set_freq()
164 dev_err(gmu->dev, "GMU set GPU frequency error: %d\n", ret); in a6xx_gmu_set_freq()
173 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; in a6xx_gmu_get_freq() local
175 return gmu->freq; in a6xx_gmu_get_freq()
178 static bool a6xx_gmu_check_idle_level(struct a6xx_gmu *gmu) in a6xx_gmu_check_idle_level() argument
181 int local = gmu->idle_level; in a6xx_gmu_check_idle_level()
184 if (gmu->idle_level == GMU_IDLE_STATE_SPTP) in a6xx_gmu_check_idle_level()
187 val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE); in a6xx_gmu_check_idle_level()
190 if (gmu->idle_level != GMU_IDLE_STATE_IFPC || in a6xx_gmu_check_idle_level()
191 !a6xx_gmu_gx_is_on(gmu)) in a6xx_gmu_check_idle_level()
198 /* Wait for the GMU to get to its most idle state */
199 int a6xx_gmu_wait_for_idle(struct a6xx_gmu *gmu) in a6xx_gmu_wait_for_idle() argument
201 return spin_until(a6xx_gmu_check_idle_level(gmu)); in a6xx_gmu_wait_for_idle()
204 static int a6xx_gmu_start(struct a6xx_gmu *gmu) in a6xx_gmu_start() argument
206 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); in a6xx_gmu_start()
211 val = gmu_read(gmu, REG_A6XX_GMU_CM3_DTCM_START + 0xff8); in a6xx_gmu_start()
220 gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 1); in a6xx_gmu_start()
226 gmu_write(gmu, REG_A6XX_GMU_GENERAL_9, 0); in a6xx_gmu_start()
228 gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_RESP, 0); in a6xx_gmu_start()
231 gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 0); in a6xx_gmu_start()
233 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, val, in a6xx_gmu_start()
237 DRM_DEV_ERROR(gmu->dev, "GMU firmware initialization timed out\n"); in a6xx_gmu_start()
242 static int a6xx_gmu_hfi_start(struct a6xx_gmu *gmu) in a6xx_gmu_hfi_start() argument
247 gmu_write(gmu, REG_A6XX_GMU_HFI_CTRL_INIT, 1); in a6xx_gmu_hfi_start()
249 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_HFI_CTRL_STATUS, val, in a6xx_gmu_hfi_start()
252 DRM_DEV_ERROR(gmu->dev, "Unable to start the HFI queues\n"); in a6xx_gmu_hfi_start()
301 /* Trigger a OOB (out of band) request to the GMU */
302 int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state) in a6xx_gmu_set_oob() argument
308 WARN_ON_ONCE(!mutex_is_locked(&gmu->lock)); in a6xx_gmu_set_oob()
313 if (gmu->legacy) { in a6xx_gmu_set_oob()
320 DRM_DEV_ERROR(gmu->dev, in a6xx_gmu_set_oob()
321 "Invalid non-legacy GMU request %s\n", in a6xx_gmu_set_oob()
328 gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 1 << request); in a6xx_gmu_set_oob()
331 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO, val, in a6xx_gmu_set_oob()
335 DRM_DEV_ERROR(gmu->dev, in a6xx_gmu_set_oob()
336 "Timeout waiting for GMU OOB set %s: 0x%x\n", in a6xx_gmu_set_oob()
338 gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO)); in a6xx_gmu_set_oob()
341 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, 1 << ack); in a6xx_gmu_set_oob()
346 /* Clear a pending OOB state in the GMU */
347 void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state) in a6xx_gmu_clear_oob() argument
351 WARN_ON_ONCE(!mutex_is_locked(&gmu->lock)); in a6xx_gmu_clear_oob()
356 if (gmu->legacy) in a6xx_gmu_clear_oob()
361 gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 1 << bit); in a6xx_gmu_clear_oob()
365 int a6xx_sptprac_enable(struct a6xx_gmu *gmu) in a6xx_sptprac_enable() argument
370 if (!gmu->legacy) in a6xx_sptprac_enable()
373 gmu_write(gmu, REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL, 0x778000); in a6xx_sptprac_enable()
375 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, val, in a6xx_sptprac_enable()
379 DRM_DEV_ERROR(gmu->dev, "Unable to power on SPTPRAC: 0x%x\n", in a6xx_sptprac_enable()
380 gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS)); in a6xx_sptprac_enable()
387 void a6xx_sptprac_disable(struct a6xx_gmu *gmu) in a6xx_sptprac_disable() argument
392 if (!gmu->legacy) in a6xx_sptprac_disable()
396 gmu_rmw(gmu, REG_A6XX_GPU_CC_GX_GDSCR, 0, (1 << 11)); in a6xx_sptprac_disable()
398 gmu_write(gmu, REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL, 0x778001); in a6xx_sptprac_disable()
400 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, val, in a6xx_sptprac_disable()
404 DRM_DEV_ERROR(gmu->dev, "failed to power off SPTPRAC: 0x%x\n", in a6xx_sptprac_disable()
405 gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS)); in a6xx_sptprac_disable()
408 /* Let the GMU know we are starting a boot sequence */
409 static int a6xx_gmu_gfx_rail_on(struct a6xx_gmu *gmu) in a6xx_gmu_gfx_rail_on() argument
413 /* Let the GMU know we are getting ready for boot */ in a6xx_gmu_gfx_rail_on()
414 gmu_write(gmu, REG_A6XX_GMU_BOOT_SLUMBER_OPTION, 0); in a6xx_gmu_gfx_rail_on()
417 vote = gmu->gx_arc_votes[gmu->nr_gpu_freqs - 1]; in a6xx_gmu_gfx_rail_on()
419 gmu_write(gmu, REG_A6XX_GMU_GX_VOTE_IDX, vote & 0xff); in a6xx_gmu_gfx_rail_on()
420 gmu_write(gmu, REG_A6XX_GMU_MX_VOTE_IDX, (vote >> 8) & 0xff); in a6xx_gmu_gfx_rail_on()
422 /* Let the GMU know the boot sequence has started */ in a6xx_gmu_gfx_rail_on()
423 return a6xx_gmu_set_oob(gmu, GMU_OOB_BOOT_SLUMBER); in a6xx_gmu_gfx_rail_on()
426 /* Let the GMU know that we are about to go into slumber */
427 static int a6xx_gmu_notify_slumber(struct a6xx_gmu *gmu) in a6xx_gmu_notify_slumber() argument
431 /* Disable the power counter so the GMU isn't busy */ in a6xx_gmu_notify_slumber()
432 gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 0); in a6xx_gmu_notify_slumber()
435 if (gmu->idle_level < GMU_IDLE_STATE_SPTP) in a6xx_gmu_notify_slumber()
436 a6xx_sptprac_disable(gmu); in a6xx_gmu_notify_slumber()
438 if (!gmu->legacy) { in a6xx_gmu_notify_slumber()
439 ret = a6xx_hfi_send_prep_slumber(gmu); in a6xx_gmu_notify_slumber()
443 /* Tell the GMU to get ready to slumber */ in a6xx_gmu_notify_slumber()
444 gmu_write(gmu, REG_A6XX_GMU_BOOT_SLUMBER_OPTION, 1); in a6xx_gmu_notify_slumber()
446 ret = a6xx_gmu_set_oob(gmu, GMU_OOB_BOOT_SLUMBER); in a6xx_gmu_notify_slumber()
447 a6xx_gmu_clear_oob(gmu, GMU_OOB_BOOT_SLUMBER); in a6xx_gmu_notify_slumber()
450 /* Check to see if the GMU really did slumber */ in a6xx_gmu_notify_slumber()
451 if (gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE) in a6xx_gmu_notify_slumber()
453 DRM_DEV_ERROR(gmu->dev, "The GMU did not go into slumber\n"); in a6xx_gmu_notify_slumber()
460 gmu_write(gmu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, 0); in a6xx_gmu_notify_slumber()
464 static int a6xx_rpmh_start(struct a6xx_gmu *gmu) in a6xx_rpmh_start() argument
469 gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1 << 1); in a6xx_rpmh_start()
473 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_RSCC_CONTROL_ACK, val, in a6xx_rpmh_start()
476 DRM_DEV_ERROR(gmu->dev, "Unable to power on the GPU RSC\n"); in a6xx_rpmh_start()
480 ret = gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_SEQ_BUSY_DRV0, val, in a6xx_rpmh_start()
484 DRM_DEV_ERROR(gmu->dev, "GPU RSC sequence stuck while waking up the GPU\n"); in a6xx_rpmh_start()
488 gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0); in a6xx_rpmh_start()
493 static void a6xx_rpmh_stop(struct a6xx_gmu *gmu) in a6xx_rpmh_stop() argument
498 gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1); in a6xx_rpmh_stop()
500 ret = gmu_poll_timeout_rscc(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0, in a6xx_rpmh_stop()
503 DRM_DEV_ERROR(gmu->dev, "Unable to power off the GPU RSC\n"); in a6xx_rpmh_stop()
505 gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0); in a6xx_rpmh_stop()
516 static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu) in a6xx_gmu_rpmh_init() argument
518 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); in a6xx_gmu_rpmh_init()
520 struct platform_device *pdev = to_platform_device(gmu->dev); in a6xx_gmu_rpmh_init()
548 gmu_write_rscc(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0, BIT(24)); in a6xx_gmu_rpmh_init()
551 gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_SLAVE_ID_DRV0, 1); in a6xx_gmu_rpmh_init()
552 gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA, 0); in a6xx_gmu_rpmh_init()
553 gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR, 0); in a6xx_gmu_rpmh_init()
554 gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 2, 0); in a6xx_gmu_rpmh_init()
555 gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 2, 0); in a6xx_gmu_rpmh_init()
556 gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 4, in a6xx_gmu_rpmh_init()
558 gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 4, 0); in a6xx_gmu_rpmh_init()
559 gmu_write_rscc(gmu, REG_A6XX_RSCC_OVERRIDE_START_ADDR, 0); in a6xx_gmu_rpmh_init()
560 gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_SEQ_START_ADDR, 0x4520); in a6xx_gmu_rpmh_init()
561 gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_LO, 0x4510); in a6xx_gmu_rpmh_init()
562 gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_HI, 0x4514); in a6xx_gmu_rpmh_init()
571 gmu_write_rscc(gmu, seqmem0_drv0_reg, 0xeaaae5a0); in a6xx_gmu_rpmh_init()
572 gmu_write_rscc(gmu, seqmem0_drv0_reg + 1, 0xe1a1ebab); in a6xx_gmu_rpmh_init()
573 gmu_write_rscc(gmu, seqmem0_drv0_reg + 2, 0xa2e0a581); in a6xx_gmu_rpmh_init()
574 gmu_write_rscc(gmu, seqmem0_drv0_reg + 3, 0xecac82e2); in a6xx_gmu_rpmh_init()
575 gmu_write_rscc(gmu, seqmem0_drv0_reg + 4, 0x0020edad); in a6xx_gmu_rpmh_init()
577 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0, 0xa7a506a0); in a6xx_gmu_rpmh_init()
578 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 1, 0xa1e6a6e7); in a6xx_gmu_rpmh_init()
579 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 2, 0xa2e081e1); in a6xx_gmu_rpmh_init()
580 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 3, 0xe9a982e2); in a6xx_gmu_rpmh_init()
581 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 4, 0x0020e8a8); in a6xx_gmu_rpmh_init()
635 a6xx_rpmh_stop(gmu); in a6xx_gmu_rpmh_init()
652 /* Set up the idle state for the GMU */
653 static void a6xx_gmu_power_config(struct a6xx_gmu *gmu) in a6xx_gmu_power_config() argument
655 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); in a6xx_gmu_power_config()
658 /* Disable GMU WB/RB buffer */ in a6xx_gmu_power_config()
659 gmu_write(gmu, REG_A6XX_GMU_SYS_BUS_CONFIG, 0x1); in a6xx_gmu_power_config()
660 gmu_write(gmu, REG_A6XX_GMU_ICACHE_CONFIG, 0x1); in a6xx_gmu_power_config()
661 gmu_write(gmu, REG_A6XX_GMU_DCACHE_CONFIG, 0x1); in a6xx_gmu_power_config()
667 gmu_write(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0x9c40400); in a6xx_gmu_power_config()
669 switch (gmu->idle_level) { in a6xx_gmu_power_config()
671 gmu_write(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_HYST, in a6xx_gmu_power_config()
673 gmu_rmw(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0, in a6xx_gmu_power_config()
678 gmu_write(gmu, REG_A6XX_GMU_PWR_COL_SPTPRAC_HYST, in a6xx_gmu_power_config()
680 gmu_rmw(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0, in a6xx_gmu_power_config()
686 gmu_rmw(gmu, REG_A6XX_GMU_RPMH_CTRL, 0, in a6xx_gmu_power_config()
712 static int a6xx_gmu_fw_load(struct a6xx_gmu *gmu) in a6xx_gmu_fw_load() argument
714 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); in a6xx_gmu_fw_load()
726 if (gmu->legacy) { in a6xx_gmu_fw_load()
729 DRM_DEV_ERROR(gmu->dev, in a6xx_gmu_fw_load()
730 "GMU firmware is bigger than the available region\n"); in a6xx_gmu_fw_load()
734 gmu_write_bulk(gmu, REG_A6XX_GMU_CM3_ITCM_START, in a6xx_gmu_fw_load()
748 gmu_write_bulk(gmu, in a6xx_gmu_fw_load()
753 gmu_write_bulk(gmu, in a6xx_gmu_fw_load()
756 } else if (!fw_block_mem(&gmu->icache, blk) && in a6xx_gmu_fw_load()
757 !fw_block_mem(&gmu->dcache, blk) && in a6xx_gmu_fw_load()
758 !fw_block_mem(&gmu->dummy, blk)) { in a6xx_gmu_fw_load()
759 DRM_DEV_ERROR(gmu->dev, in a6xx_gmu_fw_load()
768 static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state) in a6xx_gmu_fw_start() argument
770 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); in a6xx_gmu_fw_start()
778 gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_CX_FALNEXT_INTF, 1); in a6xx_gmu_fw_start()
779 gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_CX_FAL_INTF, 1); in a6xx_gmu_fw_start()
786 gmu_write(gmu, REG_A6XX_GMU_GENERAL_7, 1); in a6xx_gmu_fw_start()
789 ret = a6xx_rpmh_start(gmu); in a6xx_gmu_fw_start()
794 "GMU firmware is not loaded\n")) in a6xx_gmu_fw_start()
797 ret = a6xx_rpmh_start(gmu); in a6xx_gmu_fw_start()
801 ret = a6xx_gmu_fw_load(gmu); in a6xx_gmu_fw_start()
807 gmu_write(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, 0); in a6xx_gmu_fw_start()
808 gmu_write(gmu, REG_A6XX_GMU_CM3_BOOT_CONFIG, 0x02); in a6xx_gmu_fw_start()
811 gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_ADDR, gmu->hfi.iova); in a6xx_gmu_fw_start()
812 gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_INFO, 1); in a6xx_gmu_fw_start()
822 gmu_write(gmu, REG_A6XX_GMU_AHB_FENCE_RANGE_0, in a6xx_gmu_fw_start()
831 gmu_write(gmu, REG_A6XX_GMU_CM3_CFG, 0x4052); in a6xx_gmu_fw_start()
833 /* NOTE: A730 may also fall in this if-condition with a future GMU fw update. */ in a6xx_gmu_fw_start()
840 * This chipid that the GMU expects corresponds to the "GENX_Y_Z" naming, in a6xx_gmu_fw_start()
854 * Note that the GMU has a slightly different layout for in a6xx_gmu_fw_start()
866 gmu_write(gmu, REG_A6XX_GMU_GENERAL_10, chipid); in a6xx_gmu_fw_start()
867 gmu_write(gmu, REG_A6XX_GMU_GENERAL_8, in a6xx_gmu_fw_start()
868 (gmu->log.iova & GENMASK(31, 12)) | in a6xx_gmu_fw_start()
869 ((gmu->log.size / SZ_4K - 1) & GENMASK(7, 0))); in a6xx_gmu_fw_start()
871 gmu_write(gmu, REG_A6XX_GMU_HFI_SFR_ADDR, chipid); in a6xx_gmu_fw_start()
873 gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_MSG, in a6xx_gmu_fw_start()
874 gmu->log.iova | (gmu->log.size / SZ_4K - 1)); in a6xx_gmu_fw_start()
877 /* Set up the lowest idle level on the GMU */ in a6xx_gmu_fw_start()
878 a6xx_gmu_power_config(gmu); in a6xx_gmu_fw_start()
880 ret = a6xx_gmu_start(gmu); in a6xx_gmu_fw_start()
884 if (gmu->legacy) { in a6xx_gmu_fw_start()
885 ret = a6xx_gmu_gfx_rail_on(gmu); in a6xx_gmu_fw_start()
891 if (gmu->idle_level < GMU_IDLE_STATE_SPTP) { in a6xx_gmu_fw_start()
892 ret = a6xx_sptprac_enable(gmu); in a6xx_gmu_fw_start()
897 ret = a6xx_gmu_hfi_start(gmu); in a6xx_gmu_fw_start()
915 static void a6xx_gmu_irq_disable(struct a6xx_gmu *gmu) in a6xx_gmu_irq_disable() argument
917 disable_irq(gmu->gmu_irq); in a6xx_gmu_irq_disable()
918 disable_irq(gmu->hfi_irq); in a6xx_gmu_irq_disable()
920 gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK, ~0); in a6xx_gmu_irq_disable()
921 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK, ~0); in a6xx_gmu_irq_disable()
924 static void a6xx_gmu_rpmh_off(struct a6xx_gmu *gmu) in a6xx_gmu_rpmh_off() argument
926 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); in a6xx_gmu_rpmh_off()
935 gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS0_DRV0_STATUS + seqmem_off, in a6xx_gmu_rpmh_off()
937 gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS1_DRV0_STATUS + seqmem_off, in a6xx_gmu_rpmh_off()
939 gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS2_DRV0_STATUS + seqmem_off, in a6xx_gmu_rpmh_off()
941 gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS3_DRV0_STATUS + seqmem_off, in a6xx_gmu_rpmh_off()
945 /* Force the GMU off in case it isn't responsive */
946 static void a6xx_gmu_force_off(struct a6xx_gmu *gmu) in a6xx_gmu_force_off() argument
948 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); in a6xx_gmu_force_off()
956 gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_GMU_PWR_COL_KEEPALIVE, 0); in a6xx_gmu_force_off()
959 a6xx_hfi_stop(gmu); in a6xx_gmu_force_off()
962 a6xx_gmu_irq_disable(gmu); in a6xx_gmu_force_off()
964 /* Force off SPTP in case the GMU is managing it */ in a6xx_gmu_force_off()
965 a6xx_sptprac_disable(gmu); in a6xx_gmu_force_off()
968 a6xx_gmu_rpmh_off(gmu); in a6xx_gmu_force_off()
971 gmu_write(gmu, REG_A6XX_GMU_AHB_FENCE_STATUS_CLR, 0x7); in a6xx_gmu_force_off()
972 gmu_write(gmu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, 0); in a6xx_gmu_force_off()
977 /* Halt the gmu cm3 core */ in a6xx_gmu_force_off()
978 gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 1); in a6xx_gmu_force_off()
986 static void a6xx_gmu_set_initial_freq(struct msm_gpu *gpu, struct a6xx_gmu *gmu) in a6xx_gmu_set_initial_freq() argument
989 unsigned long gpu_freq = gmu->gpu_freqs[gmu->current_perf_index]; in a6xx_gmu_set_initial_freq()
995 gmu->freq = 0; /* so a6xx_gmu_set_freq() doesn't exit early */ in a6xx_gmu_set_initial_freq()
1000 static void a6xx_gmu_set_initial_bw(struct msm_gpu *gpu, struct a6xx_gmu *gmu) in a6xx_gmu_set_initial_bw() argument
1003 unsigned long gpu_freq = gmu->gpu_freqs[gmu->current_perf_index]; in a6xx_gmu_set_initial_bw()
1017 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; in a6xx_gmu_resume() local
1020 if (WARN(!gmu->initialized, "The GMU is not set up yet\n")) in a6xx_gmu_resume()
1023 gmu->hung = false; in a6xx_gmu_resume()
1026 if (!IS_ERR(gmu->qmp)) { in a6xx_gmu_resume()
1027 ret = qmp_send(gmu->qmp, "{class: gpu, res: acd, val: %d}", in a6xx_gmu_resume()
1030 dev_err(gmu->dev, "failed to send GPU ACD state\n"); in a6xx_gmu_resume()
1034 pm_runtime_get_sync(gmu->dev); in a6xx_gmu_resume()
1039 * bring down the GX after a GMU failure in a6xx_gmu_resume()
1041 if (!IS_ERR_OR_NULL(gmu->gxpd)) in a6xx_gmu_resume()
1042 pm_runtime_get_sync(gmu->gxpd); in a6xx_gmu_resume()
1044 /* Use a known rate to bring up the GMU */ in a6xx_gmu_resume()
1045 clk_set_rate(gmu->core_clk, 200000000); in a6xx_gmu_resume()
1046 clk_set_rate(gmu->hub_clk, adreno_is_a740_family(adreno_gpu) ? in a6xx_gmu_resume()
1048 ret = clk_bulk_prepare_enable(gmu->nr_clocks, gmu->clocks); in a6xx_gmu_resume()
1050 pm_runtime_put(gmu->gxpd); in a6xx_gmu_resume()
1051 pm_runtime_put(gmu->dev); in a6xx_gmu_resume()
1056 a6xx_gmu_set_initial_bw(gpu, gmu); in a6xx_gmu_resume()
1058 /* Enable the GMU interrupt */ in a6xx_gmu_resume()
1059 gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, ~0); in a6xx_gmu_resume()
1060 gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK, ~A6XX_GMU_IRQ_MASK); in a6xx_gmu_resume()
1061 enable_irq(gmu->gmu_irq); in a6xx_gmu_resume()
1067 } else if (gmu->legacy) { in a6xx_gmu_resume()
1068 status = gmu_read(gmu, REG_A6XX_GMU_GENERAL_7) == 1 ? in a6xx_gmu_resume()
1078 ret = a6xx_gmu_fw_start(gmu, status); in a6xx_gmu_resume()
1082 ret = a6xx_hfi_start(gmu, status); in a6xx_gmu_resume()
1087 * Turn on the GMU firmware fault interrupt after we know the boot in a6xx_gmu_resume()
1090 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, ~0); in a6xx_gmu_resume()
1091 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK, ~A6XX_HFI_IRQ_MASK); in a6xx_gmu_resume()
1092 enable_irq(gmu->hfi_irq); in a6xx_gmu_resume()
1095 a6xx_gmu_set_initial_freq(gpu, gmu); in a6xx_gmu_resume()
1098 /* On failure, shut down the GMU to leave it in a good state */ in a6xx_gmu_resume()
1100 disable_irq(gmu->gmu_irq); in a6xx_gmu_resume()
1101 a6xx_rpmh_stop(gmu); in a6xx_gmu_resume()
1102 pm_runtime_put(gmu->gxpd); in a6xx_gmu_resume()
1103 pm_runtime_put(gmu->dev); in a6xx_gmu_resume()
1109 bool a6xx_gmu_isidle(struct a6xx_gmu *gmu) in a6xx_gmu_isidle() argument
1113 if (!gmu->initialized) in a6xx_gmu_isidle()
1116 reg = gmu_read(gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS); in a6xx_gmu_isidle()
1124 /* Gracefully try to shut down the GMU and by extension the GPU */
1125 static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu) in a6xx_gmu_shutdown() argument
1127 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); in a6xx_gmu_shutdown()
1132 * The GMU may still be in slumber unless the GPU started so check and in a6xx_gmu_shutdown()
1135 val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE); in a6xx_gmu_shutdown()
1138 int ret = a6xx_gmu_wait_for_idle(gmu); in a6xx_gmu_shutdown()
1140 /* If the GMU isn't responding assume it is hung */ in a6xx_gmu_shutdown()
1142 a6xx_gmu_force_off(gmu); in a6xx_gmu_shutdown()
1148 /* tell the GMU we want to slumber */ in a6xx_gmu_shutdown()
1149 ret = a6xx_gmu_notify_slumber(gmu); in a6xx_gmu_shutdown()
1151 a6xx_gmu_force_off(gmu); in a6xx_gmu_shutdown()
1155 ret = gmu_poll_timeout(gmu, in a6xx_gmu_shutdown()
1166 DRM_DEV_ERROR(gmu->dev, in a6xx_gmu_shutdown()
1167 "Unable to slumber GMU: status = 0%x/0%x\n", in a6xx_gmu_shutdown()
1168 gmu_read(gmu, in a6xx_gmu_shutdown()
1170 gmu_read(gmu, in a6xx_gmu_shutdown()
1175 a6xx_hfi_stop(gmu); in a6xx_gmu_shutdown()
1178 a6xx_gmu_irq_disable(gmu); in a6xx_gmu_shutdown()
1181 a6xx_rpmh_stop(gmu); in a6xx_gmu_shutdown()
1187 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; in a6xx_gmu_stop() local
1190 if (!pm_runtime_active(gmu->dev)) in a6xx_gmu_stop()
1194 * Force the GMU off if we detected a hang, otherwise try to shut it in a6xx_gmu_stop()
1197 if (gmu->hung) in a6xx_gmu_stop()
1198 a6xx_gmu_force_off(gmu); in a6xx_gmu_stop()
1200 a6xx_gmu_shutdown(gmu); in a6xx_gmu_stop()
1206 * Make sure the GX domain is off before turning off the GMU (CX) in a6xx_gmu_stop()
1207 * domain. Usually the GMU does this but only if the shutdown sequence in a6xx_gmu_stop()
1210 if (!IS_ERR_OR_NULL(gmu->gxpd)) in a6xx_gmu_stop()
1211 pm_runtime_put_sync(gmu->gxpd); in a6xx_gmu_stop()
1213 clk_bulk_disable_unprepare(gmu->nr_clocks, gmu->clocks); in a6xx_gmu_stop()
1215 pm_runtime_put_sync(gmu->dev); in a6xx_gmu_stop()
1220 static void a6xx_gmu_memory_free(struct a6xx_gmu *gmu) in a6xx_gmu_memory_free() argument
1222 msm_gem_kernel_put(gmu->hfi.obj, gmu->aspace); in a6xx_gmu_memory_free()
1223 msm_gem_kernel_put(gmu->debug.obj, gmu->aspace); in a6xx_gmu_memory_free()
1224 msm_gem_kernel_put(gmu->icache.obj, gmu->aspace); in a6xx_gmu_memory_free()
1225 msm_gem_kernel_put(gmu->dcache.obj, gmu->aspace); in a6xx_gmu_memory_free()
1226 msm_gem_kernel_put(gmu->dummy.obj, gmu->aspace); in a6xx_gmu_memory_free()
1227 msm_gem_kernel_put(gmu->log.obj, gmu->aspace); in a6xx_gmu_memory_free()
1229 gmu->aspace->mmu->funcs->detach(gmu->aspace->mmu); in a6xx_gmu_memory_free()
1230 msm_gem_address_space_put(gmu->aspace); in a6xx_gmu_memory_free()
1233 static int a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo, in a6xx_gmu_memory_alloc() argument
1236 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); in a6xx_gmu_memory_alloc()
1244 /* no fixed address - use GMU's uncached range */ in a6xx_gmu_memory_alloc()
1259 ret = msm_gem_get_and_pin_iova_range(bo->obj, gmu->aspace, &bo->iova, in a6xx_gmu_memory_alloc()
1274 static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu) in a6xx_gmu_memory_probe() argument
1278 mmu = msm_iommu_new(gmu->dev, 0); in a6xx_gmu_memory_probe()
1284 gmu->aspace = msm_gem_address_space_create(mmu, "gmu", 0x0, 0x80000000); in a6xx_gmu_memory_probe()
1285 if (IS_ERR(gmu->aspace)) in a6xx_gmu_memory_probe()
1286 return PTR_ERR(gmu->aspace); in a6xx_gmu_memory_probe()
1385 * The GMU votes with the RPMh for itself and on behalf of the GPU but we need
1390 static int a6xx_gmu_rpmh_votes_init(struct a6xx_gmu *gmu) in a6xx_gmu_rpmh_votes_init() argument
1392 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); in a6xx_gmu_rpmh_votes_init()
1398 ret = a6xx_gmu_rpmh_arc_votes_init(&gpu->pdev->dev, gmu->gx_arc_votes, in a6xx_gmu_rpmh_votes_init()
1399 gmu->gpu_freqs, gmu->nr_gpu_freqs, "gfx.lvl"); in a6xx_gmu_rpmh_votes_init()
1402 ret |= a6xx_gmu_rpmh_arc_votes_init(gmu->dev, gmu->cx_arc_votes, in a6xx_gmu_rpmh_votes_init()
1403 gmu->gmu_freqs, gmu->nr_gmu_freqs, "cx.lvl"); in a6xx_gmu_rpmh_votes_init()
1422 "The GMU frequency table is being truncated\n")) in a6xx_gmu_build_freq_table()
1440 static int a6xx_gmu_pwrlevels_probe(struct a6xx_gmu *gmu) in a6xx_gmu_pwrlevels_probe() argument
1442 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); in a6xx_gmu_pwrlevels_probe()
1449 * The GMU handles its own frequency switching so build a list of in a6xx_gmu_pwrlevels_probe()
1452 ret = devm_pm_opp_of_add_table(gmu->dev); in a6xx_gmu_pwrlevels_probe()
1454 DRM_DEV_ERROR(gmu->dev, "Unable to set the OPP table for the GMU\n"); in a6xx_gmu_pwrlevels_probe()
1458 gmu->nr_gmu_freqs = a6xx_gmu_build_freq_table(gmu->dev, in a6xx_gmu_pwrlevels_probe()
1459 gmu->gmu_freqs, ARRAY_SIZE(gmu->gmu_freqs)); in a6xx_gmu_pwrlevels_probe()
1462 * The GMU also handles GPU frequency switching so build a list in a6xx_gmu_pwrlevels_probe()
1465 gmu->nr_gpu_freqs = a6xx_gmu_build_freq_table(&gpu->pdev->dev, in a6xx_gmu_pwrlevels_probe()
1466 gmu->gpu_freqs, ARRAY_SIZE(gmu->gpu_freqs)); in a6xx_gmu_pwrlevels_probe()
1468 gmu->current_perf_index = gmu->nr_gpu_freqs - 1; in a6xx_gmu_pwrlevels_probe()
1470 /* Build the list of RPMh votes that we'll send to the GMU */ in a6xx_gmu_pwrlevels_probe()
1471 return a6xx_gmu_rpmh_votes_init(gmu); in a6xx_gmu_pwrlevels_probe()
1474 static int a6xx_gmu_clocks_probe(struct a6xx_gmu *gmu) in a6xx_gmu_clocks_probe() argument
1476 int ret = devm_clk_bulk_get_all(gmu->dev, &gmu->clocks); in a6xx_gmu_clocks_probe()
1481 gmu->nr_clocks = ret; in a6xx_gmu_clocks_probe()
1483 gmu->core_clk = msm_clk_bulk_get_clock(gmu->clocks, in a6xx_gmu_clocks_probe()
1484 gmu->nr_clocks, "gmu"); in a6xx_gmu_clocks_probe()
1486 gmu->hub_clk = msm_clk_bulk_get_clock(gmu->clocks, in a6xx_gmu_clocks_probe()
1487 gmu->nr_clocks, "hub"); in a6xx_gmu_clocks_probe()
1513 static int a6xx_gmu_get_irq(struct a6xx_gmu *gmu, struct platform_device *pdev, in a6xx_gmu_get_irq() argument
1520 ret = request_irq(irq, handler, IRQF_TRIGGER_HIGH, name, gmu); in a6xx_gmu_get_irq()
1535 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; in a6xx_gmu_remove() local
1536 struct platform_device *pdev = to_platform_device(gmu->dev); in a6xx_gmu_remove()
1538 mutex_lock(&gmu->lock); in a6xx_gmu_remove()
1539 if (!gmu->initialized) { in a6xx_gmu_remove()
1540 mutex_unlock(&gmu->lock); in a6xx_gmu_remove()
1544 gmu->initialized = false; in a6xx_gmu_remove()
1546 mutex_unlock(&gmu->lock); in a6xx_gmu_remove()
1548 pm_runtime_force_suspend(gmu->dev); in a6xx_gmu_remove()
1551 * Since cxpd is a virt device, the devlink with gmu-dev will be removed in a6xx_gmu_remove()
1554 dev_pm_domain_detach(gmu->cxpd, false); in a6xx_gmu_remove()
1556 if (!IS_ERR_OR_NULL(gmu->gxpd)) { in a6xx_gmu_remove()
1557 pm_runtime_disable(gmu->gxpd); in a6xx_gmu_remove()
1558 dev_pm_domain_detach(gmu->gxpd, false); in a6xx_gmu_remove()
1561 if (!IS_ERR_OR_NULL(gmu->qmp)) in a6xx_gmu_remove()
1562 qmp_put(gmu->qmp); in a6xx_gmu_remove()
1564 iounmap(gmu->mmio); in a6xx_gmu_remove()
1566 iounmap(gmu->rscc); in a6xx_gmu_remove()
1567 gmu->mmio = NULL; in a6xx_gmu_remove()
1568 gmu->rscc = NULL; in a6xx_gmu_remove()
1571 a6xx_gmu_memory_free(gmu); in a6xx_gmu_remove()
1573 free_irq(gmu->gmu_irq, gmu); in a6xx_gmu_remove()
1574 free_irq(gmu->hfi_irq, gmu); in a6xx_gmu_remove()
1578 put_device(gmu->dev); in a6xx_gmu_remove()
1584 struct a6xx_gmu *gmu = container_of(nb, struct a6xx_gmu, pd_nb); in cxpd_notifier_cb() local
1587 complete_all(&gmu->pd_gate); in cxpd_notifier_cb()
1595 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; in a6xx_gmu_wrapper_init() local
1601 gmu->dev = &pdev->dev; in a6xx_gmu_wrapper_init()
1603 of_dma_configure(gmu->dev, node, true); in a6xx_gmu_wrapper_init()
1605 pm_runtime_enable(gmu->dev); in a6xx_gmu_wrapper_init()
1608 gmu->legacy = true; in a6xx_gmu_wrapper_init()
1610 /* Map the GMU registers */ in a6xx_gmu_wrapper_init()
1611 gmu->mmio = a6xx_gmu_get_mmio(pdev, "gmu"); in a6xx_gmu_wrapper_init()
1612 if (IS_ERR(gmu->mmio)) { in a6xx_gmu_wrapper_init()
1613 ret = PTR_ERR(gmu->mmio); in a6xx_gmu_wrapper_init()
1617 gmu->cxpd = dev_pm_domain_attach_by_name(gmu->dev, "cx"); in a6xx_gmu_wrapper_init()
1618 if (IS_ERR(gmu->cxpd)) { in a6xx_gmu_wrapper_init()
1619 ret = PTR_ERR(gmu->cxpd); in a6xx_gmu_wrapper_init()
1623 if (!device_link_add(gmu->dev, gmu->cxpd, DL_FLAG_PM_RUNTIME)) { in a6xx_gmu_wrapper_init()
1628 init_completion(&gmu->pd_gate); in a6xx_gmu_wrapper_init()
1629 complete_all(&gmu->pd_gate); in a6xx_gmu_wrapper_init()
1630 gmu->pd_nb.notifier_call = cxpd_notifier_cb; in a6xx_gmu_wrapper_init()
1633 gmu->gxpd = dev_pm_domain_attach_by_name(gmu->dev, "gx"); in a6xx_gmu_wrapper_init()
1634 if (IS_ERR(gmu->gxpd)) { in a6xx_gmu_wrapper_init()
1635 ret = PTR_ERR(gmu->gxpd); in a6xx_gmu_wrapper_init()
1639 gmu->initialized = true; in a6xx_gmu_wrapper_init()
1644 dev_pm_domain_detach(gmu->cxpd, false); in a6xx_gmu_wrapper_init()
1647 iounmap(gmu->mmio); in a6xx_gmu_wrapper_init()
1650 put_device(gmu->dev); in a6xx_gmu_wrapper_init()
1658 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; in a6xx_gmu_init() local
1666 gmu->dev = &pdev->dev; in a6xx_gmu_init()
1668 of_dma_configure(gmu->dev, node, true); in a6xx_gmu_init()
1671 gmu->idle_level = GMU_IDLE_STATE_ACTIVE; in a6xx_gmu_init()
1673 pm_runtime_enable(gmu->dev); in a6xx_gmu_init()
1676 ret = a6xx_gmu_clocks_probe(gmu); in a6xx_gmu_init()
1680 ret = a6xx_gmu_memory_probe(gmu); in a6xx_gmu_init()
1685 /* A660 now requires handling "prealloc requests" in GMU firmware in a6xx_gmu_init()
1691 gmu->dummy.size = SZ_4K; in a6xx_gmu_init()
1694 ret = a6xx_gmu_memory_alloc(gmu, &gmu->debug, SZ_4K * 7, in a6xx_gmu_init()
1699 gmu->dummy.size = SZ_8K; in a6xx_gmu_init()
1702 /* Allocate memory for the GMU dummy page */ in a6xx_gmu_init()
1703 ret = a6xx_gmu_memory_alloc(gmu, &gmu->dummy, gmu->dummy.size, in a6xx_gmu_init()
1711 ret = a6xx_gmu_memory_alloc(gmu, &gmu->icache, in a6xx_gmu_init()
1722 ret = a6xx_gmu_memory_alloc(gmu, &gmu->icache, in a6xx_gmu_init()
1727 ret = a6xx_gmu_memory_alloc(gmu, &gmu->dcache, in a6xx_gmu_init()
1733 gmu->legacy = true; in a6xx_gmu_init()
1735 /* Allocate memory for the GMU debug region */ in a6xx_gmu_init()
1736 ret = a6xx_gmu_memory_alloc(gmu, &gmu->debug, SZ_16K, 0, "debug"); in a6xx_gmu_init()
1741 /* Allocate memory for the GMU log region */ in a6xx_gmu_init()
1742 ret = a6xx_gmu_memory_alloc(gmu, &gmu->log, SZ_16K, 0, "log"); in a6xx_gmu_init()
1747 ret = a6xx_gmu_memory_alloc(gmu, &gmu->hfi, SZ_16K, 0, "hfi"); in a6xx_gmu_init()
1751 /* Map the GMU registers */ in a6xx_gmu_init()
1752 gmu->mmio = a6xx_gmu_get_mmio(pdev, "gmu"); in a6xx_gmu_init()
1753 if (IS_ERR(gmu->mmio)) { in a6xx_gmu_init()
1754 ret = PTR_ERR(gmu->mmio); in a6xx_gmu_init()
1760 gmu->rscc = a6xx_gmu_get_mmio(pdev, "rscc"); in a6xx_gmu_init()
1761 if (IS_ERR(gmu->rscc)) { in a6xx_gmu_init()
1766 gmu->rscc = gmu->mmio + 0x23000; in a6xx_gmu_init()
1769 /* Get the HFI and GMU interrupts */ in a6xx_gmu_init()
1770 gmu->hfi_irq = a6xx_gmu_get_irq(gmu, pdev, "hfi", a6xx_hfi_irq); in a6xx_gmu_init()
1771 gmu->gmu_irq = a6xx_gmu_get_irq(gmu, pdev, "gmu", a6xx_gmu_irq); in a6xx_gmu_init()
1773 if (gmu->hfi_irq < 0 || gmu->gmu_irq < 0) { in a6xx_gmu_init()
1778 gmu->cxpd = dev_pm_domain_attach_by_name(gmu->dev, "cx"); in a6xx_gmu_init()
1779 if (IS_ERR(gmu->cxpd)) { in a6xx_gmu_init()
1780 ret = PTR_ERR(gmu->cxpd); in a6xx_gmu_init()
1784 link = device_link_add(gmu->dev, gmu->cxpd, DL_FLAG_PM_RUNTIME); in a6xx_gmu_init()
1790 gmu->qmp = qmp_get(gmu->dev); in a6xx_gmu_init()
1791 if (IS_ERR(gmu->qmp) && adreno_is_a7xx(adreno_gpu)) { in a6xx_gmu_init()
1792 ret = PTR_ERR(gmu->qmp); in a6xx_gmu_init()
1796 init_completion(&gmu->pd_gate); in a6xx_gmu_init()
1797 complete_all(&gmu->pd_gate); in a6xx_gmu_init()
1798 gmu->pd_nb.notifier_call = cxpd_notifier_cb; in a6xx_gmu_init()
1801 * Get a link to the GX power domain to reset the GPU in case of GMU in a6xx_gmu_init()
1804 gmu->gxpd = dev_pm_domain_attach_by_name(gmu->dev, "gx"); in a6xx_gmu_init()
1806 /* Get the power levels for the GMU and GPU */ in a6xx_gmu_init()
1807 a6xx_gmu_pwrlevels_probe(gmu); in a6xx_gmu_init()
1810 a6xx_hfi_init(gmu); in a6xx_gmu_init()
1813 a6xx_gmu_rpmh_init(gmu); in a6xx_gmu_init()
1815 gmu->initialized = true; in a6xx_gmu_init()
1823 dev_pm_domain_detach(gmu->cxpd, false); in a6xx_gmu_init()
1826 iounmap(gmu->mmio); in a6xx_gmu_init()
1828 iounmap(gmu->rscc); in a6xx_gmu_init()
1829 free_irq(gmu->gmu_irq, gmu); in a6xx_gmu_init()
1830 free_irq(gmu->hfi_irq, gmu); in a6xx_gmu_init()
1833 a6xx_gmu_memory_free(gmu); in a6xx_gmu_init()
1836 put_device(gmu->dev); in a6xx_gmu_init()