Lines Matching +full:aoss +full:- +full:qmp

1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. */
11 #include <soc/qcom/cmd-db.h>
23 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; in a6xx_gmu_fault()
24 struct msm_gpu *gpu = &adreno_gpu->base; in a6xx_gmu_fault()
27 gmu->hung = true; in a6xx_gmu_fault()
30 del_timer(&gpu->hangcheck_timer); in a6xx_gmu_fault()
33 kthread_queue_work(gpu->worker, &gpu->recover_work); in a6xx_gmu_fault()
45 dev_err_ratelimited(gmu->dev, "GMU watchdog expired\n"); in a6xx_gmu_irq()
51 dev_err_ratelimited(gmu->dev, "GMU AHB bus error\n"); in a6xx_gmu_irq()
54 dev_err_ratelimited(gmu->dev, "GMU fence error: 0x%x\n", in a6xx_gmu_irq()
69 dev_err_ratelimited(gmu->dev, "GMU firmware fault\n"); in a6xx_hfi_irq()
82 if (!gmu->initialized) in a6xx_gmu_sptprac_is_on()
98 if (!gmu->initialized) in a6xx_gmu_gx_is_on()
113 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; in a6xx_gmu_set_freq()
120 if (gpu_freq == gmu->freq) in a6xx_gmu_set_freq()
123 for (perf_index = 0; perf_index < gmu->nr_gpu_freqs - 1; perf_index++) in a6xx_gmu_set_freq()
124 if (gpu_freq == gmu->gpu_freqs[perf_index]) in a6xx_gmu_set_freq()
127 gmu->current_perf_index = perf_index; in a6xx_gmu_set_freq()
128 gmu->freq = gmu->gpu_freqs[perf_index]; in a6xx_gmu_set_freq()
130 trace_msm_gmu_freq_change(gmu->freq, perf_index); in a6xx_gmu_set_freq()
141 if (!gmu->legacy) { in a6xx_gmu_set_freq()
143 dev_pm_opp_set_opp(&gpu->pdev->dev, opp); in a6xx_gmu_set_freq()
164 dev_err(gmu->dev, "GMU set GPU frequency error: %d\n", ret); in a6xx_gmu_set_freq()
166 dev_pm_opp_set_opp(&gpu->pdev->dev, opp); in a6xx_gmu_set_freq()
173 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; in a6xx_gmu_get_freq()
175 return gmu->freq; in a6xx_gmu_get_freq()
181 int local = gmu->idle_level; in a6xx_gmu_check_idle_level()
184 if (gmu->idle_level == GMU_IDLE_STATE_SPTP) in a6xx_gmu_check_idle_level()
190 if (gmu->idle_level != GMU_IDLE_STATE_IFPC || in a6xx_gmu_check_idle_level()
207 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; in a6xx_gmu_start()
237 DRM_DEV_ERROR(gmu->dev, "GMU firmware initialization timed out\n"); in a6xx_gmu_start()
252 DRM_DEV_ERROR(gmu->dev, "Unable to start the HFI queues\n"); in a6xx_gmu_hfi_start()
308 WARN_ON_ONCE(!mutex_is_locked(&gmu->lock)); in a6xx_gmu_set_oob()
311 return -EINVAL; in a6xx_gmu_set_oob()
313 if (gmu->legacy) { in a6xx_gmu_set_oob()
320 DRM_DEV_ERROR(gmu->dev, in a6xx_gmu_set_oob()
321 "Invalid non-legacy GMU request %s\n", in a6xx_gmu_set_oob()
323 return -EINVAL; in a6xx_gmu_set_oob()
335 DRM_DEV_ERROR(gmu->dev, in a6xx_gmu_set_oob()
351 WARN_ON_ONCE(!mutex_is_locked(&gmu->lock)); in a6xx_gmu_clear_oob()
356 if (gmu->legacy) in a6xx_gmu_clear_oob()
370 if (!gmu->legacy) in a6xx_sptprac_enable()
379 DRM_DEV_ERROR(gmu->dev, "Unable to power on SPTPRAC: 0x%x\n", in a6xx_sptprac_enable()
392 if (!gmu->legacy) in a6xx_sptprac_disable()
404 DRM_DEV_ERROR(gmu->dev, "failed to power off SPTPRAC: 0x%x\n", in a6xx_sptprac_disable()
417 vote = gmu->gx_arc_votes[gmu->nr_gpu_freqs - 1]; in a6xx_gmu_gfx_rail_on()
435 if (gmu->idle_level < GMU_IDLE_STATE_SPTP) in a6xx_gmu_notify_slumber()
438 if (!gmu->legacy) { in a6xx_gmu_notify_slumber()
453 DRM_DEV_ERROR(gmu->dev, "The GMU did not go into slumber\n"); in a6xx_gmu_notify_slumber()
454 ret = -ETIMEDOUT; in a6xx_gmu_notify_slumber()
476 DRM_DEV_ERROR(gmu->dev, "Unable to power on the GPU RSC\n"); in a6xx_rpmh_start()
484 DRM_DEV_ERROR(gmu->dev, "GPU RSC sequence stuck while waking up the GPU\n"); in a6xx_rpmh_start()
503 DRM_DEV_ERROR(gmu->dev, "Unable to power off the GPU RSC\n"); in a6xx_rpmh_stop()
519 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; in a6xx_gmu_rpmh_init()
520 struct platform_device *pdev = to_platform_device(gmu->dev); in a6xx_gmu_rpmh_init()
647 * for the shorter hysteresis that happens after main - this is 0xa (.5 us)
656 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; in a6xx_gmu_power_config()
669 switch (gmu->idle_level) { in a6xx_gmu_power_config()
705 if (!in_range(blk->addr, bo->iova, bo->size)) in fw_block_mem()
708 memcpy(bo->virt + blk->addr - bo->iova, blk->data, blk->size); in fw_block_mem()
715 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; in a6xx_gmu_fw_load()
716 const struct firmware *fw_image = adreno_gpu->fw[ADRENO_FW_GMU]; in a6xx_gmu_fw_load()
726 if (gmu->legacy) { in a6xx_gmu_fw_load()
728 if (fw_image->size > 0x8000) { in a6xx_gmu_fw_load()
729 DRM_DEV_ERROR(gmu->dev, in a6xx_gmu_fw_load()
731 return -EINVAL; in a6xx_gmu_fw_load()
735 (u32*) fw_image->data, fw_image->size); in a6xx_gmu_fw_load()
740 for (blk = (const struct block_header *) fw_image->data; in a6xx_gmu_fw_load()
741 (const u8*) blk < fw_image->data + fw_image->size; in a6xx_gmu_fw_load()
742 blk = (const struct block_header *) &blk->data[blk->size >> 2]) { in a6xx_gmu_fw_load()
743 if (blk->size == 0) in a6xx_gmu_fw_load()
746 if (in_range(blk->addr, itcm_base, SZ_16K)) { in a6xx_gmu_fw_load()
747 reg_offset = (blk->addr - itcm_base) >> 2; in a6xx_gmu_fw_load()
750 blk->data, blk->size); in a6xx_gmu_fw_load()
751 } else if (in_range(blk->addr, dtcm_base, SZ_16K)) { in a6xx_gmu_fw_load()
752 reg_offset = (blk->addr - dtcm_base) >> 2; in a6xx_gmu_fw_load()
755 blk->data, blk->size); in a6xx_gmu_fw_load()
756 } else if (!fw_block_mem(&gmu->icache, blk) && in a6xx_gmu_fw_load()
757 !fw_block_mem(&gmu->dcache, blk) && in a6xx_gmu_fw_load()
758 !fw_block_mem(&gmu->dummy, blk)) { in a6xx_gmu_fw_load()
759 DRM_DEV_ERROR(gmu->dev, in a6xx_gmu_fw_load()
761 blk->addr, blk->size, blk->data[0]); in a6xx_gmu_fw_load()
771 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; in a6xx_gmu_fw_start()
793 if (WARN(!adreno_gpu->fw[ADRENO_FW_GMU], in a6xx_gmu_fw_start()
795 return -ENOENT; in a6xx_gmu_fw_start()
811 gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_ADDR, gmu->hfi.iova); in a6xx_gmu_fw_start()
833 /* NOTE: A730 may also fall in this if-condition with a future GMU fw update. */ in a6xx_gmu_fw_start()
839 * The min part has a 1-1 mapping for each GPU SKU. in a6xx_gmu_fw_start()
846 return -EINVAL; in a6xx_gmu_fw_start()
860 chipid = adreno_gpu->chip_id & 0xffff0000; in a6xx_gmu_fw_start()
861 chipid |= (adreno_gpu->chip_id << 4) & 0xf000; /* minor */ in a6xx_gmu_fw_start()
862 chipid |= (adreno_gpu->chip_id << 8) & 0x0f00; /* patchid */ in a6xx_gmu_fw_start()
868 (gmu->log.iova & GENMASK(31, 12)) | in a6xx_gmu_fw_start()
869 ((gmu->log.size / SZ_4K - 1) & GENMASK(7, 0))); in a6xx_gmu_fw_start()
874 gmu->log.iova | (gmu->log.size / SZ_4K - 1)); in a6xx_gmu_fw_start()
884 if (gmu->legacy) { in a6xx_gmu_fw_start()
891 if (gmu->idle_level < GMU_IDLE_STATE_SPTP) { in a6xx_gmu_fw_start()
917 disable_irq(gmu->gmu_irq); in a6xx_gmu_irq_disable()
918 disable_irq(gmu->hfi_irq); in a6xx_gmu_irq_disable()
927 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; in a6xx_gmu_rpmh_off()
949 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; in a6xx_gmu_force_off()
950 struct msm_gpu *gpu = &adreno_gpu->base; in a6xx_gmu_force_off()
956 gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_GMU_PWR_COL_KEEPALIVE, 0); in a6xx_gmu_force_off()
989 unsigned long gpu_freq = gmu->gpu_freqs[gmu->current_perf_index]; in a6xx_gmu_set_initial_freq()
991 gpu_opp = dev_pm_opp_find_freq_exact(&gpu->pdev->dev, gpu_freq, true); in a6xx_gmu_set_initial_freq()
995 gmu->freq = 0; /* so a6xx_gmu_set_freq() doesn't exit early */ in a6xx_gmu_set_initial_freq()
1003 unsigned long gpu_freq = gmu->gpu_freqs[gmu->current_perf_index]; in a6xx_gmu_set_initial_bw()
1005 gpu_opp = dev_pm_opp_find_freq_exact(&gpu->pdev->dev, gpu_freq, true); in a6xx_gmu_set_initial_bw()
1009 dev_pm_opp_set_opp(&gpu->pdev->dev, gpu_opp); in a6xx_gmu_set_initial_bw()
1015 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; in a6xx_gmu_resume()
1016 struct msm_gpu *gpu = &adreno_gpu->base; in a6xx_gmu_resume()
1017 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; in a6xx_gmu_resume()
1020 if (WARN(!gmu->initialized, "The GMU is not set up yet\n")) in a6xx_gmu_resume()
1021 return -EINVAL; in a6xx_gmu_resume()
1023 gmu->hung = false; in a6xx_gmu_resume()
1025 /* Notify AOSS about the ACD state (unimplemented for now => disable it) */ in a6xx_gmu_resume()
1026 if (!IS_ERR(gmu->qmp)) { in a6xx_gmu_resume()
1027 ret = qmp_send(gmu->qmp, "{class: gpu, res: acd, val: %d}", in a6xx_gmu_resume()
1030 dev_err(gmu->dev, "failed to send GPU ACD state\n"); in a6xx_gmu_resume()
1034 pm_runtime_get_sync(gmu->dev); in a6xx_gmu_resume()
1041 if (!IS_ERR_OR_NULL(gmu->gxpd)) in a6xx_gmu_resume()
1042 pm_runtime_get_sync(gmu->gxpd); in a6xx_gmu_resume()
1045 clk_set_rate(gmu->core_clk, 200000000); in a6xx_gmu_resume()
1046 clk_set_rate(gmu->hub_clk, adreno_is_a740_family(adreno_gpu) ? in a6xx_gmu_resume()
1048 ret = clk_bulk_prepare_enable(gmu->nr_clocks, gmu->clocks); in a6xx_gmu_resume()
1050 pm_runtime_put(gmu->gxpd); in a6xx_gmu_resume()
1051 pm_runtime_put(gmu->dev); in a6xx_gmu_resume()
1061 enable_irq(gmu->gmu_irq); in a6xx_gmu_resume()
1067 } else if (gmu->legacy) { in a6xx_gmu_resume()
1092 enable_irq(gmu->hfi_irq); in a6xx_gmu_resume()
1100 disable_irq(gmu->gmu_irq); in a6xx_gmu_resume()
1102 pm_runtime_put(gmu->gxpd); in a6xx_gmu_resume()
1103 pm_runtime_put(gmu->dev); in a6xx_gmu_resume()
1113 if (!gmu->initialized) in a6xx_gmu_isidle()
1128 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; in a6xx_gmu_shutdown()
1146 a6xx_bus_clear_pending_transactions(adreno_gpu, a6xx_gpu->hung); in a6xx_gmu_shutdown()
1166 DRM_DEV_ERROR(gmu->dev, in a6xx_gmu_shutdown()
1187 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; in a6xx_gmu_stop()
1188 struct msm_gpu *gpu = &a6xx_gpu->base.base; in a6xx_gmu_stop()
1190 if (!pm_runtime_active(gmu->dev)) in a6xx_gmu_stop()
1197 if (gmu->hung) in a6xx_gmu_stop()
1203 dev_pm_opp_set_opp(&gpu->pdev->dev, NULL); in a6xx_gmu_stop()
1210 if (!IS_ERR_OR_NULL(gmu->gxpd)) in a6xx_gmu_stop()
1211 pm_runtime_put_sync(gmu->gxpd); in a6xx_gmu_stop()
1213 clk_bulk_disable_unprepare(gmu->nr_clocks, gmu->clocks); in a6xx_gmu_stop()
1215 pm_runtime_put_sync(gmu->dev); in a6xx_gmu_stop()
1222 msm_gem_kernel_put(gmu->hfi.obj, gmu->aspace); in a6xx_gmu_memory_free()
1223 msm_gem_kernel_put(gmu->debug.obj, gmu->aspace); in a6xx_gmu_memory_free()
1224 msm_gem_kernel_put(gmu->icache.obj, gmu->aspace); in a6xx_gmu_memory_free()
1225 msm_gem_kernel_put(gmu->dcache.obj, gmu->aspace); in a6xx_gmu_memory_free()
1226 msm_gem_kernel_put(gmu->dummy.obj, gmu->aspace); in a6xx_gmu_memory_free()
1227 msm_gem_kernel_put(gmu->log.obj, gmu->aspace); in a6xx_gmu_memory_free()
1229 gmu->aspace->mmu->funcs->detach(gmu->aspace->mmu); in a6xx_gmu_memory_free()
1230 msm_gem_address_space_put(gmu->aspace); in a6xx_gmu_memory_free()
1237 struct drm_device *dev = a6xx_gpu->base.base.dev; in a6xx_gmu_memory_alloc()
1244 /* no fixed address - use GMU's uncached range */ in a6xx_gmu_memory_alloc()
1255 bo->obj = msm_gem_new(dev, size, flags); in a6xx_gmu_memory_alloc()
1256 if (IS_ERR(bo->obj)) in a6xx_gmu_memory_alloc()
1257 return PTR_ERR(bo->obj); in a6xx_gmu_memory_alloc()
1259 ret = msm_gem_get_and_pin_iova_range(bo->obj, gmu->aspace, &bo->iova, in a6xx_gmu_memory_alloc()
1262 drm_gem_object_put(bo->obj); in a6xx_gmu_memory_alloc()
1266 bo->virt = msm_gem_get_vaddr(bo->obj); in a6xx_gmu_memory_alloc()
1267 bo->size = size; in a6xx_gmu_memory_alloc()
1269 msm_gem_object_set_name(bo->obj, name); in a6xx_gmu_memory_alloc()
1278 mmu = msm_iommu_new(gmu->dev, 0); in a6xx_gmu_memory_probe()
1280 return -ENODEV; in a6xx_gmu_memory_probe()
1284 gmu->aspace = msm_gem_address_space_create(mmu, "gmu", 0x0, 0x80000000); in a6xx_gmu_memory_probe()
1285 if (IS_ERR(gmu->aspace)) in a6xx_gmu_memory_probe()
1286 return PTR_ERR(gmu->aspace); in a6xx_gmu_memory_probe()
1291 /* Return the 'arc-level' for the given frequency */
1328 return -EINVAL; in a6xx_gmu_rpmh_arc_votes_init()
1336 return -EINVAL; in a6xx_gmu_rpmh_arc_votes_init()
1359 return -EINVAL; in a6xx_gmu_rpmh_arc_votes_init()
1393 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; in a6xx_gmu_rpmh_votes_init()
1394 struct msm_gpu *gpu = &adreno_gpu->base; in a6xx_gmu_rpmh_votes_init()
1398 ret = a6xx_gmu_rpmh_arc_votes_init(&gpu->pdev->dev, gmu->gx_arc_votes, in a6xx_gmu_rpmh_votes_init()
1399 gmu->gpu_freqs, gmu->nr_gpu_freqs, "gfx.lvl"); in a6xx_gmu_rpmh_votes_init()
1402 ret |= a6xx_gmu_rpmh_arc_votes_init(gmu->dev, gmu->cx_arc_votes, in a6xx_gmu_rpmh_votes_init()
1403 gmu->gmu_freqs, gmu->nr_gmu_freqs, "cx.lvl"); in a6xx_gmu_rpmh_votes_init()
1423 count = size - 1; in a6xx_gmu_build_freq_table()
1443 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; in a6xx_gmu_pwrlevels_probe()
1444 struct msm_gpu *gpu = &adreno_gpu->base; in a6xx_gmu_pwrlevels_probe()
1452 ret = devm_pm_opp_of_add_table(gmu->dev); in a6xx_gmu_pwrlevels_probe()
1454 DRM_DEV_ERROR(gmu->dev, "Unable to set the OPP table for the GMU\n"); in a6xx_gmu_pwrlevels_probe()
1458 gmu->nr_gmu_freqs = a6xx_gmu_build_freq_table(gmu->dev, in a6xx_gmu_pwrlevels_probe()
1459 gmu->gmu_freqs, ARRAY_SIZE(gmu->gmu_freqs)); in a6xx_gmu_pwrlevels_probe()
1465 gmu->nr_gpu_freqs = a6xx_gmu_build_freq_table(&gpu->pdev->dev, in a6xx_gmu_pwrlevels_probe()
1466 gmu->gpu_freqs, ARRAY_SIZE(gmu->gpu_freqs)); in a6xx_gmu_pwrlevels_probe()
1468 gmu->current_perf_index = gmu->nr_gpu_freqs - 1; in a6xx_gmu_pwrlevels_probe()
1476 int ret = devm_clk_bulk_get_all(gmu->dev, &gmu->clocks); in a6xx_gmu_clocks_probe()
1481 gmu->nr_clocks = ret; in a6xx_gmu_clocks_probe()
1483 gmu->core_clk = msm_clk_bulk_get_clock(gmu->clocks, in a6xx_gmu_clocks_probe()
1484 gmu->nr_clocks, "gmu"); in a6xx_gmu_clocks_probe()
1486 gmu->hub_clk = msm_clk_bulk_get_clock(gmu->clocks, in a6xx_gmu_clocks_probe()
1487 gmu->nr_clocks, "hub"); in a6xx_gmu_clocks_probe()
1500 DRM_DEV_ERROR(&pdev->dev, "Unable to find the %s registers\n", name); in a6xx_gmu_get_mmio()
1501 return ERR_PTR(-EINVAL); in a6xx_gmu_get_mmio()
1504 ret = ioremap(res->start, resource_size(res)); in a6xx_gmu_get_mmio()
1506 DRM_DEV_ERROR(&pdev->dev, "Unable to map the %s registers\n", name); in a6xx_gmu_get_mmio()
1507 return ERR_PTR(-EINVAL); in a6xx_gmu_get_mmio()
1522 DRM_DEV_ERROR(&pdev->dev, "Unable to get interrupt %s %d\n", in a6xx_gmu_get_irq()
1534 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; in a6xx_gmu_remove()
1535 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; in a6xx_gmu_remove()
1536 struct platform_device *pdev = to_platform_device(gmu->dev); in a6xx_gmu_remove()
1538 mutex_lock(&gmu->lock); in a6xx_gmu_remove()
1539 if (!gmu->initialized) { in a6xx_gmu_remove()
1540 mutex_unlock(&gmu->lock); in a6xx_gmu_remove()
1544 gmu->initialized = false; in a6xx_gmu_remove()
1546 mutex_unlock(&gmu->lock); in a6xx_gmu_remove()
1548 pm_runtime_force_suspend(gmu->dev); in a6xx_gmu_remove()
1551 * Since cxpd is a virt device, the devlink with gmu-dev will be removed in a6xx_gmu_remove()
1554 dev_pm_domain_detach(gmu->cxpd, false); in a6xx_gmu_remove()
1556 if (!IS_ERR_OR_NULL(gmu->gxpd)) { in a6xx_gmu_remove()
1557 pm_runtime_disable(gmu->gxpd); in a6xx_gmu_remove()
1558 dev_pm_domain_detach(gmu->gxpd, false); in a6xx_gmu_remove()
1561 if (!IS_ERR_OR_NULL(gmu->qmp)) in a6xx_gmu_remove()
1562 qmp_put(gmu->qmp); in a6xx_gmu_remove()
1564 iounmap(gmu->mmio); in a6xx_gmu_remove()
1566 iounmap(gmu->rscc); in a6xx_gmu_remove()
1567 gmu->mmio = NULL; in a6xx_gmu_remove()
1568 gmu->rscc = NULL; in a6xx_gmu_remove()
1573 free_irq(gmu->gmu_irq, gmu); in a6xx_gmu_remove()
1574 free_irq(gmu->hfi_irq, gmu); in a6xx_gmu_remove()
1578 put_device(gmu->dev); in a6xx_gmu_remove()
1587 complete_all(&gmu->pd_gate); in cxpd_notifier_cb()
1595 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; in a6xx_gmu_wrapper_init()
1599 return -ENODEV; in a6xx_gmu_wrapper_init()
1601 gmu->dev = &pdev->dev; in a6xx_gmu_wrapper_init()
1603 of_dma_configure(gmu->dev, node, true); in a6xx_gmu_wrapper_init()
1605 pm_runtime_enable(gmu->dev); in a6xx_gmu_wrapper_init()
1608 gmu->legacy = true; in a6xx_gmu_wrapper_init()
1611 gmu->mmio = a6xx_gmu_get_mmio(pdev, "gmu"); in a6xx_gmu_wrapper_init()
1612 if (IS_ERR(gmu->mmio)) { in a6xx_gmu_wrapper_init()
1613 ret = PTR_ERR(gmu->mmio); in a6xx_gmu_wrapper_init()
1617 gmu->cxpd = dev_pm_domain_attach_by_name(gmu->dev, "cx"); in a6xx_gmu_wrapper_init()
1618 if (IS_ERR(gmu->cxpd)) { in a6xx_gmu_wrapper_init()
1619 ret = PTR_ERR(gmu->cxpd); in a6xx_gmu_wrapper_init()
1623 if (!device_link_add(gmu->dev, gmu->cxpd, DL_FLAG_PM_RUNTIME)) { in a6xx_gmu_wrapper_init()
1624 ret = -ENODEV; in a6xx_gmu_wrapper_init()
1628 init_completion(&gmu->pd_gate); in a6xx_gmu_wrapper_init()
1629 complete_all(&gmu->pd_gate); in a6xx_gmu_wrapper_init()
1630 gmu->pd_nb.notifier_call = cxpd_notifier_cb; in a6xx_gmu_wrapper_init()
1633 gmu->gxpd = dev_pm_domain_attach_by_name(gmu->dev, "gx"); in a6xx_gmu_wrapper_init()
1634 if (IS_ERR(gmu->gxpd)) { in a6xx_gmu_wrapper_init()
1635 ret = PTR_ERR(gmu->gxpd); in a6xx_gmu_wrapper_init()
1639 gmu->initialized = true; in a6xx_gmu_wrapper_init()
1644 dev_pm_domain_detach(gmu->cxpd, false); in a6xx_gmu_wrapper_init()
1647 iounmap(gmu->mmio); in a6xx_gmu_wrapper_init()
1650 put_device(gmu->dev); in a6xx_gmu_wrapper_init()
1657 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; in a6xx_gmu_init()
1658 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; in a6xx_gmu_init()
1664 return -ENODEV; in a6xx_gmu_init()
1666 gmu->dev = &pdev->dev; in a6xx_gmu_init()
1668 of_dma_configure(gmu->dev, node, true); in a6xx_gmu_init()
1671 gmu->idle_level = GMU_IDLE_STATE_ACTIVE; in a6xx_gmu_init()
1673 pm_runtime_enable(gmu->dev); in a6xx_gmu_init()
1691 gmu->dummy.size = SZ_4K; in a6xx_gmu_init()
1694 ret = a6xx_gmu_memory_alloc(gmu, &gmu->debug, SZ_4K * 7, in a6xx_gmu_init()
1699 gmu->dummy.size = SZ_8K; in a6xx_gmu_init()
1703 ret = a6xx_gmu_memory_alloc(gmu, &gmu->dummy, gmu->dummy.size, in a6xx_gmu_init()
1711 ret = a6xx_gmu_memory_alloc(gmu, &gmu->icache, in a6xx_gmu_init()
1712 SZ_16M - SZ_16K, 0x04000, "icache"); in a6xx_gmu_init()
1716 * NOTE: when porting legacy ("pre-650-family") GPUs you may be tempted to add a condition in a6xx_gmu_init()
1722 ret = a6xx_gmu_memory_alloc(gmu, &gmu->icache, in a6xx_gmu_init()
1723 SZ_256K - SZ_16K, 0x04000, "icache"); in a6xx_gmu_init()
1727 ret = a6xx_gmu_memory_alloc(gmu, &gmu->dcache, in a6xx_gmu_init()
1728 SZ_256K - SZ_16K, 0x44000, "dcache"); in a6xx_gmu_init()
1733 gmu->legacy = true; in a6xx_gmu_init()
1736 ret = a6xx_gmu_memory_alloc(gmu, &gmu->debug, SZ_16K, 0, "debug"); in a6xx_gmu_init()
1742 ret = a6xx_gmu_memory_alloc(gmu, &gmu->log, SZ_16K, 0, "log"); in a6xx_gmu_init()
1747 ret = a6xx_gmu_memory_alloc(gmu, &gmu->hfi, SZ_16K, 0, "hfi"); in a6xx_gmu_init()
1752 gmu->mmio = a6xx_gmu_get_mmio(pdev, "gmu"); in a6xx_gmu_init()
1753 if (IS_ERR(gmu->mmio)) { in a6xx_gmu_init()
1754 ret = PTR_ERR(gmu->mmio); in a6xx_gmu_init()
1760 gmu->rscc = a6xx_gmu_get_mmio(pdev, "rscc"); in a6xx_gmu_init()
1761 if (IS_ERR(gmu->rscc)) { in a6xx_gmu_init()
1762 ret = -ENODEV; in a6xx_gmu_init()
1766 gmu->rscc = gmu->mmio + 0x23000; in a6xx_gmu_init()
1770 gmu->hfi_irq = a6xx_gmu_get_irq(gmu, pdev, "hfi", a6xx_hfi_irq); in a6xx_gmu_init()
1771 gmu->gmu_irq = a6xx_gmu_get_irq(gmu, pdev, "gmu", a6xx_gmu_irq); in a6xx_gmu_init()
1773 if (gmu->hfi_irq < 0 || gmu->gmu_irq < 0) { in a6xx_gmu_init()
1774 ret = -ENODEV; in a6xx_gmu_init()
1778 gmu->cxpd = dev_pm_domain_attach_by_name(gmu->dev, "cx"); in a6xx_gmu_init()
1779 if (IS_ERR(gmu->cxpd)) { in a6xx_gmu_init()
1780 ret = PTR_ERR(gmu->cxpd); in a6xx_gmu_init()
1784 link = device_link_add(gmu->dev, gmu->cxpd, DL_FLAG_PM_RUNTIME); in a6xx_gmu_init()
1786 ret = -ENODEV; in a6xx_gmu_init()
1790 gmu->qmp = qmp_get(gmu->dev); in a6xx_gmu_init()
1791 if (IS_ERR(gmu->qmp) && adreno_is_a7xx(adreno_gpu)) { in a6xx_gmu_init()
1792 ret = PTR_ERR(gmu->qmp); in a6xx_gmu_init()
1796 init_completion(&gmu->pd_gate); in a6xx_gmu_init()
1797 complete_all(&gmu->pd_gate); in a6xx_gmu_init()
1798 gmu->pd_nb.notifier_call = cxpd_notifier_cb; in a6xx_gmu_init()
1804 gmu->gxpd = dev_pm_domain_attach_by_name(gmu->dev, "gx"); in a6xx_gmu_init()
1815 gmu->initialized = true; in a6xx_gmu_init()
1823 dev_pm_domain_detach(gmu->cxpd, false); in a6xx_gmu_init()
1826 iounmap(gmu->mmio); in a6xx_gmu_init()
1828 iounmap(gmu->rscc); in a6xx_gmu_init()
1829 free_irq(gmu->gmu_irq, gmu); in a6xx_gmu_init()
1830 free_irq(gmu->hfi_irq, gmu); in a6xx_gmu_init()
1836 put_device(gmu->dev); in a6xx_gmu_init()