Lines Matching +full:adreno +full:- +full:gmu +full:- +full:wrapper

1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. */
14 #include <linux/soc/qcom/llcc-qcom.h>
23 /* Check that the GMU is idle */ in _a6xx_check_idle()
24 if (!adreno_has_gmu_wrapper(adreno_gpu) && !a6xx_gmu_isidle(&a6xx_gpu->gmu)) in _a6xx_check_idle()
44 gpu->name, __builtin_return_address(0), in a6xx_idle()
61 if (a6xx_gpu->has_whereami && !adreno_gpu->base.hw_apriv) { in update_shadow_rptr()
75 spin_lock_irqsave(&ring->preempt_lock, flags); in a6xx_flush()
78 ring->cur = ring->next; in a6xx_flush()
83 spin_unlock_irqrestore(&ring->preempt_lock, flags); in a6xx_flush()
105 bool sysprof = refcount_read(&a6xx_gpu->base.base.sysprof_active) > 1; in a6xx_set_pagetable()
106 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; in a6xx_set_pagetable()
111 if (ctx->seqno == a6xx_gpu->base.base.cur_ctx_seqno) in a6xx_set_pagetable()
114 if (msm_iommu_pagetable_params(ctx->aspace->mmu, &ttbr, &asid)) in a6xx_set_pagetable()
152 if (adreno_is_a7xx(&a6xx_gpu->base)) { in a6xx_set_pagetable()
180 /* Re-enable protected mode: */ in a6xx_set_pagetable()
189 unsigned int index = submit->seqno % MSM_GPU_SUBMIT_STATS_COUNT; in a6xx_submit()
192 struct msm_ringbuffer *ring = submit->ring; in a6xx_submit()
195 a6xx_set_pagetable(a6xx_gpu, ring, submit->queue->ctx); in a6xx_submit()
201 * For PM4 the GMU register offsets are calculated from the base of the in a6xx_submit()
216 for (i = 0; i < submit->nr_cmds; i++) { in a6xx_submit()
217 switch (submit->cmd[i].type) { in a6xx_submit()
221 if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno) in a6xx_submit()
226 OUT_RING(ring, lower_32_bits(submit->cmd[i].iova)); in a6xx_submit()
227 OUT_RING(ring, upper_32_bits(submit->cmd[i].iova)); in a6xx_submit()
228 OUT_RING(ring, submit->cmd[i].size); in a6xx_submit()
234 * Periodically update shadow-wptr if needed, so that we in a6xx_submit()
251 OUT_RING(ring, submit->seqno); in a6xx_submit()
262 OUT_RING(ring, submit->seqno); in a6xx_submit()
272 unsigned int index = submit->seqno % MSM_GPU_SUBMIT_STATS_COUNT; in a7xx_submit()
275 struct msm_ringbuffer *ring = submit->ring; in a7xx_submit()
285 a6xx_set_pagetable(a6xx_gpu, ring, submit->queue->ctx); in a7xx_submit()
302 for (i = 0; i < submit->nr_cmds; i++) { in a7xx_submit()
303 switch (submit->cmd[i].type) { in a7xx_submit()
307 if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno) in a7xx_submit()
312 OUT_RING(ring, lower_32_bits(submit->cmd[i].iova)); in a7xx_submit()
313 OUT_RING(ring, upper_32_bits(submit->cmd[i].iova)); in a7xx_submit()
314 OUT_RING(ring, submit->cmd[i].size); in a7xx_submit()
320 * Periodically update shadow-wptr if needed, so that we in a7xx_submit()
340 OUT_RING(ring, submit->seqno); in a7xx_submit()
362 OUT_RING(ring, submit->seqno); in a7xx_submit()
376 OUT_RING(ring, submit->seqno); in a7xx_submit()
383 OUT_RING(ring, submit->seqno); in a7xx_submit()
959 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; in a6xx_set_hwcg() local
964 if (!adreno_gpu->info->hwcg) in a6xx_set_hwcg()
977 gmu_write(&a6xx_gpu->gmu, REG_A6XX_GPU_GMU_AO_GMU_CGC_MODE_CNTL, in a6xx_set_hwcg()
979 gmu_write(&a6xx_gpu->gmu, REG_A6XX_GPU_GMU_AO_GMU_CGC_DELAY_CNTL, in a6xx_set_hwcg()
981 gmu_write(&a6xx_gpu->gmu, REG_A6XX_GPU_GMU_AO_GMU_CGC_HYST_CNTL, in a6xx_set_hwcg()
987 /* Don't re-program the registers if they are already correct */ in a6xx_set_hwcg()
993 gmu_rmw(gmu, REG_A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 1, 0); in a6xx_set_hwcg()
995 for (i = 0; (reg = &adreno_gpu->info->hwcg[i], reg->offset); i++) in a6xx_set_hwcg()
996 gpu_write(gpu, reg->offset, state ? reg->value : 0); in a6xx_set_hwcg()
1000 gmu_rmw(gmu, REG_A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 0, 1); in a6xx_set_hwcg()
1178 /* 0x008d0-0x008dd are unprotected on purpose for tools like perfetto */
1264 for (i = 0; i < count - 1; i++) { in a6xx_set_cp_protect()
1270 gpu_write(gpu, REG_A6XX_CP_PROTECT(count_max - 1), regs[i]); in a6xx_set_cp_protect()
1276 gpu->ubwc_config.rgb565_predicator = 0; in a6xx_calc_ubwc_config()
1278 gpu->ubwc_config.uavflagprd_inv = 0; in a6xx_calc_ubwc_config()
1280 gpu->ubwc_config.min_acc_len = 0; in a6xx_calc_ubwc_config()
1281 /* Entirely magic, per-GPU-gen value */ in a6xx_calc_ubwc_config()
1282 gpu->ubwc_config.ubwc_mode = 0; in a6xx_calc_ubwc_config()
1287 gpu->ubwc_config.highest_bank_bit = 15; in a6xx_calc_ubwc_config()
1290 gpu->ubwc_config.highest_bank_bit = 13; in a6xx_calc_ubwc_config()
1291 gpu->ubwc_config.min_acc_len = 1; in a6xx_calc_ubwc_config()
1292 gpu->ubwc_config.ubwc_mode = 1; in a6xx_calc_ubwc_config()
1300 gpu->ubwc_config.highest_bank_bit = 13; in a6xx_calc_ubwc_config()
1303 gpu->ubwc_config.amsbc = 1; in a6xx_calc_ubwc_config()
1311 gpu->ubwc_config.highest_bank_bit = 16; in a6xx_calc_ubwc_config()
1312 gpu->ubwc_config.amsbc = 1; in a6xx_calc_ubwc_config()
1313 gpu->ubwc_config.rgb565_predicator = 1; in a6xx_calc_ubwc_config()
1314 gpu->ubwc_config.uavflagprd_inv = 2; in a6xx_calc_ubwc_config()
1318 gpu->ubwc_config.highest_bank_bit = 14; in a6xx_calc_ubwc_config()
1319 gpu->ubwc_config.amsbc = 1; in a6xx_calc_ubwc_config()
1320 gpu->ubwc_config.rgb565_predicator = 1; in a6xx_calc_ubwc_config()
1321 gpu->ubwc_config.uavflagprd_inv = 2; in a6xx_calc_ubwc_config()
1333 BUG_ON(adreno_gpu->ubwc_config.highest_bank_bit < 13); in a6xx_set_ubwc_config()
1334 u32 hbb = adreno_gpu->ubwc_config.highest_bank_bit - 13; in a6xx_set_ubwc_config()
1339 adreno_gpu->ubwc_config.rgb565_predicator << 11 | in a6xx_set_ubwc_config()
1340 hbb_hi << 10 | adreno_gpu->ubwc_config.amsbc << 4 | in a6xx_set_ubwc_config()
1341 adreno_gpu->ubwc_config.min_acc_len << 3 | in a6xx_set_ubwc_config()
1342 hbb_lo << 1 | adreno_gpu->ubwc_config.ubwc_mode); in a6xx_set_ubwc_config()
1345 adreno_gpu->ubwc_config.min_acc_len << 3 | in a6xx_set_ubwc_config()
1346 hbb_lo << 1 | adreno_gpu->ubwc_config.ubwc_mode); in a6xx_set_ubwc_config()
1349 adreno_gpu->ubwc_config.uavflagprd_inv << 4 | in a6xx_set_ubwc_config()
1350 adreno_gpu->ubwc_config.min_acc_len << 3 | in a6xx_set_ubwc_config()
1351 hbb_lo << 1 | adreno_gpu->ubwc_config.ubwc_mode); in a6xx_set_ubwc_config()
1358 adreno_gpu->ubwc_config.min_acc_len << 23 | hbb_lo << 21); in a6xx_set_ubwc_config()
1363 struct msm_ringbuffer *ring = gpu->rb[0]; in a6xx_cp_init()
1387 return a6xx_idle(gpu, ring) ? 0 : -EINVAL; in a6xx_cp_init()
1392 struct msm_ringbuffer *ring = gpu->rb[0]; in a7xx_cp_init()
1436 return a6xx_idle(gpu, ring) ? 0 : -EINVAL; in a7xx_cp_init()
1446 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; in a6xx_ucode_check_version()
1447 struct msm_gpu *gpu = &adreno_gpu->base; in a6xx_ucode_check_version()
1448 const char *sqe_name = adreno_gpu->info->fw[ADRENO_FW_SQE]; in a6xx_ucode_check_version()
1482 a6xx_gpu->has_whereami = true; in a6xx_ucode_check_version()
1487 DRM_DEV_ERROR(&gpu->pdev->dev, in a6xx_ucode_check_version()
1496 DRM_DEV_ERROR(&gpu->pdev->dev, in a6xx_ucode_check_version()
1502 DRM_DEV_ERROR(&gpu->pdev->dev, in a6xx_ucode_check_version()
1515 if (!a6xx_gpu->sqe_bo) { in a6xx_ucode_load()
1516 a6xx_gpu->sqe_bo = adreno_fw_create_bo(gpu, in a6xx_ucode_load()
1517 adreno_gpu->fw[ADRENO_FW_SQE], &a6xx_gpu->sqe_iova); in a6xx_ucode_load()
1519 if (IS_ERR(a6xx_gpu->sqe_bo)) { in a6xx_ucode_load()
1520 int ret = PTR_ERR(a6xx_gpu->sqe_bo); in a6xx_ucode_load()
1522 a6xx_gpu->sqe_bo = NULL; in a6xx_ucode_load()
1523 DRM_DEV_ERROR(&gpu->pdev->dev, in a6xx_ucode_load()
1529 msm_gem_object_set_name(a6xx_gpu->sqe_bo, "sqefw"); in a6xx_ucode_load()
1530 if (!a6xx_ucode_check_version(a6xx_gpu, a6xx_gpu->sqe_bo)) { in a6xx_ucode_load()
1531 msm_gem_unpin_iova(a6xx_gpu->sqe_bo, gpu->aspace); in a6xx_ucode_load()
1532 drm_gem_object_put(a6xx_gpu->sqe_bo); in a6xx_ucode_load()
1534 a6xx_gpu->sqe_bo = NULL; in a6xx_ucode_load()
1535 return -EPERM; in a6xx_ucode_load()
1543 if ((adreno_gpu->base.hw_apriv || a6xx_gpu->has_whereami) && in a6xx_ucode_load()
1544 !a6xx_gpu->shadow_bo) { in a6xx_ucode_load()
1545 a6xx_gpu->shadow = msm_gem_kernel_new(gpu->dev, in a6xx_ucode_load()
1546 sizeof(u32) * gpu->nr_rings, in a6xx_ucode_load()
1548 gpu->aspace, &a6xx_gpu->shadow_bo, in a6xx_ucode_load()
1549 &a6xx_gpu->shadow_iova); in a6xx_ucode_load()
1551 if (IS_ERR(a6xx_gpu->shadow)) in a6xx_ucode_load()
1552 return PTR_ERR(a6xx_gpu->shadow); in a6xx_ucode_load()
1554 msm_gem_object_set_name(a6xx_gpu->shadow_bo, "shadow"); in a6xx_ucode_load()
1613 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; in hw_init() local
1618 /* Make sure the GMU keeps the GPU on while we set it up */ in hw_init()
1619 ret = a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET); in hw_init()
1644 a6xx_sptprac_enable(gmu); in hw_init()
1647 * Disable the trusted memory range - we don't actually supported secure in hw_init()
1711 /* Set the GMEM VA range [0x100000:0x100000 + gpu->gmem - 1] */ in hw_init()
1715 gmem_range_min + adreno_gpu->info->gmem - 1); in hw_init()
1772 gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_1, in hw_init()
1809 /* Set up the CX GMU counter 0 to count busy ticks */ in hw_init()
1810 gmu_write(gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_MASK, 0xff000000); in hw_init()
1813 gmu_rmw(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0, 0xff, BIT(5)); in hw_init()
1814 gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 1); in hw_init()
1841 if (gpu->hw_apriv) { in hw_init()
1862 gpu_write64(gpu, REG_A6XX_CP_SQE_INSTR_BASE, a6xx_gpu->sqe_iova); in hw_init()
1865 gpu_write64(gpu, REG_A6XX_CP_RB_BASE, gpu->rb[0]->iova); in hw_init()
1871 if (adreno_gpu->base.hw_apriv) in hw_init()
1878 if (a6xx_gpu->shadow_bo) { in hw_init()
1880 shadowptr(a6xx_gpu, gpu->rb[0])); in hw_init()
1886 rbmemptr(gpu->rb[0], bv_fence)); in hw_init()
1890 a6xx_gpu->cur_ring = gpu->rb[0]; in hw_init()
1892 gpu->cur_ctx_seqno = 0; in hw_init()
1910 OUT_PKT7(gpu->rb[0], CP_SET_SECURE_MODE, 1); in hw_init()
1911 OUT_RING(gpu->rb[0], 0x00000000); in hw_init()
1913 a6xx_flush(gpu, gpu->rb[0]); in hw_init()
1914 if (!a6xx_idle(gpu, gpu->rb[0])) in hw_init()
1915 return -EINVAL; in hw_init()
1916 } else if (ret == -ENODEV) { in hw_init()
1923 dev_warn_once(gpu->dev->dev, in hw_init()
1924 "Zap shader not enabled - using SECVID_TRUST_CNTL instead\n"); in hw_init()
1935 * Tell the GMU that we are done touching the GPU and it can start power in hw_init()
1938 a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET); in hw_init()
1940 if (a6xx_gpu->gmu.legacy) { in hw_init()
1941 /* Take the GMU out of its special boot mode */ in hw_init()
1942 a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_BOOT_SLUMBER); in hw_init()
1954 mutex_lock(&a6xx_gpu->gmu.lock); in a6xx_hw_init()
1956 mutex_unlock(&a6xx_gpu->gmu.lock); in a6xx_hw_init()
1963 DRM_DEV_INFO(&gpu->pdev->dev, "status: %08x\n", in a6xx_dump()
1972 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; in a6xx_recover() local
1978 DRM_DEV_INFO(&gpu->pdev->dev, "CP_SCRATCH_REG%d: %u\n", i, in a6xx_recover()
1988 a6xx_gpu->hung = true; in a6xx_recover()
1993 pm_runtime_dont_use_autosuspend(&gpu->pdev->dev); in a6xx_recover()
1996 mutex_lock(&gpu->active_lock); in a6xx_recover()
1997 active_submits = gpu->active_submits; in a6xx_recover()
2003 gpu->active_submits = 0; in a6xx_recover()
2014 reinit_completion(&gmu->pd_gate); in a6xx_recover()
2015 dev_pm_genpd_add_notifier(gmu->cxpd, &gmu->pd_nb); in a6xx_recover()
2016 dev_pm_genpd_synced_poweroff(gmu->cxpd); in a6xx_recover()
2020 pm_runtime_put(&gpu->pdev->dev); in a6xx_recover()
2023 pm_runtime_put_sync(&gpu->pdev->dev); in a6xx_recover()
2025 if (!wait_for_completion_timeout(&gmu->pd_gate, msecs_to_jiffies(1000))) in a6xx_recover()
2026 DRM_DEV_ERROR(&gpu->pdev->dev, "cx gdsc didn't collapse\n"); in a6xx_recover()
2028 dev_pm_genpd_remove_notifier(gmu->cxpd); in a6xx_recover()
2030 pm_runtime_use_autosuspend(&gpu->pdev->dev); in a6xx_recover()
2033 pm_runtime_get(&gpu->pdev->dev); in a6xx_recover()
2035 pm_runtime_get_sync(&gpu->pdev->dev); in a6xx_recover()
2037 gpu->active_submits = active_submits; in a6xx_recover()
2038 mutex_unlock(&gpu->active_lock); in a6xx_recover()
2041 a6xx_gpu->hung = false; in a6xx_recover()
2098 block = a6xx_fault_block(gpu, info->fsynr1 & 0xff); in a6xx_fault_handler()
2112 dev_err_ratelimited(&gpu->pdev->dev, in a6xx_cp_hw_err_irq()
2118 dev_err_ratelimited(&gpu->pdev->dev, in a6xx_cp_hw_err_irq()
2122 dev_err_ratelimited(&gpu->pdev->dev, "CP | HW fault | status=0x%8.8X\n", in a6xx_cp_hw_err_irq()
2128 dev_err_ratelimited(&gpu->pdev->dev, in a6xx_cp_hw_err_irq()
2135 dev_err_ratelimited(&gpu->pdev->dev, "CP AHB error interrupt\n"); in a6xx_cp_hw_err_irq()
2138 dev_err_ratelimited(&gpu->pdev->dev, "CP VSD decoder parity error\n"); in a6xx_cp_hw_err_irq()
2141 dev_err_ratelimited(&gpu->pdev->dev, "CP illegal instruction error\n"); in a6xx_cp_hw_err_irq()
2149 struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu); in a6xx_fault_detect_irq()
2165 gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_GMU_PWR_COL_KEEPALIVE, 1); in a6xx_fault_detect_irq()
2167 DRM_DEV_ERROR(&gpu->pdev->dev, in a6xx_fault_detect_irq()
2169 ring ? ring->id : -1, ring ? ring->fctx->last_fence : 0, in a6xx_fault_detect_irq()
2179 del_timer(&gpu->hangcheck_timer); in a6xx_fault_detect_irq()
2181 kthread_queue_work(gpu->worker, &gpu->recover_work); in a6xx_fault_detect_irq()
2186 struct msm_drm_private *priv = gpu->dev->dev_private; in a6xx_irq()
2191 if (priv->disable_err_irq) in a6xx_irq()
2198 dev_err_ratelimited(&gpu->pdev->dev, "CP | AHB bus error\n"); in a6xx_irq()
2204 dev_err_ratelimited(&gpu->pdev->dev, "RBBM | ATB ASYNC overflow\n"); in a6xx_irq()
2207 dev_err_ratelimited(&gpu->pdev->dev, "RBBM | ATB bus overflow\n"); in a6xx_irq()
2210 dev_err_ratelimited(&gpu->pdev->dev, "UCHE | Out of bounds access\n"); in a6xx_irq()
2220 llcc_slice_deactivate(a6xx_gpu->llc_slice); in a6xx_llc_deactivate()
2221 llcc_slice_deactivate(a6xx_gpu->htw_llc_slice); in a6xx_llc_deactivate()
2226 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; in a6xx_llc_activate()
2227 struct msm_gpu *gpu = &adreno_gpu->base; in a6xx_llc_activate()
2230 if (IS_ERR(a6xx_gpu->llc_mmio)) in a6xx_llc_activate()
2233 if (!llcc_slice_activate(a6xx_gpu->llc_slice)) { in a6xx_llc_activate()
2234 u32 gpu_scid = llcc_get_slice_id(a6xx_gpu->llc_slice); in a6xx_llc_activate()
2252 if (!llcc_slice_activate(a6xx_gpu->htw_llc_slice)) { in a6xx_llc_activate()
2253 if (!a6xx_gpu->have_mmu500) { in a6xx_llc_activate()
2254 u32 gpuhtw_scid = llcc_get_slice_id(a6xx_gpu->htw_llc_slice); in a6xx_llc_activate()
2268 if (!a6xx_gpu->have_mmu500) { in a6xx_llc_activate()
2286 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; in a7xx_llc_activate()
2287 struct msm_gpu *gpu = &adreno_gpu->base; in a7xx_llc_activate()
2289 if (IS_ERR(a6xx_gpu->llc_mmio)) in a7xx_llc_activate()
2292 if (!llcc_slice_activate(a6xx_gpu->llc_slice)) { in a7xx_llc_activate()
2293 u32 gpu_scid = llcc_get_slice_id(a6xx_gpu->llc_slice); in a7xx_llc_activate()
2310 llcc_slice_activate(a6xx_gpu->htw_llc_slice); in a7xx_llc_activate()
2315 /* No LLCC on non-RPMh (and by extension, non-GMU) SoCs */ in a6xx_llc_slices_destroy()
2316 if (adreno_has_gmu_wrapper(&a6xx_gpu->base)) in a6xx_llc_slices_destroy()
2319 llcc_slice_putd(a6xx_gpu->llc_slice); in a6xx_llc_slices_destroy()
2320 llcc_slice_putd(a6xx_gpu->htw_llc_slice); in a6xx_llc_slices_destroy()
2328 /* No LLCC on non-RPMh (and by extension, non-GMU) SoCs */ in a6xx_llc_slices_init()
2329 if (adreno_has_gmu_wrapper(&a6xx_gpu->base)) in a6xx_llc_slices_init()
2336 phandle = of_parse_phandle(pdev->dev.of_node, "iommus", 0); in a6xx_llc_slices_init()
2337 a6xx_gpu->have_mmu500 = (phandle && in a6xx_llc_slices_init()
2338 of_device_is_compatible(phandle, "arm,mmu-500")); in a6xx_llc_slices_init()
2341 if (is_a7xx || !a6xx_gpu->have_mmu500) in a6xx_llc_slices_init()
2342 a6xx_gpu->llc_mmio = msm_ioremap(pdev, "cx_mem"); in a6xx_llc_slices_init()
2344 a6xx_gpu->llc_mmio = NULL; in a6xx_llc_slices_init()
2346 a6xx_gpu->llc_slice = llcc_slice_getd(LLCC_GPU); in a6xx_llc_slices_init()
2347 a6xx_gpu->htw_llc_slice = llcc_slice_getd(LLCC_GPUHTW); in a6xx_llc_slices_init()
2349 if (IS_ERR_OR_NULL(a6xx_gpu->llc_slice) && IS_ERR_OR_NULL(a6xx_gpu->htw_llc_slice)) in a6xx_llc_slices_init()
2350 a6xx_gpu->llc_mmio = ERR_PTR(-EINVAL); in a6xx_llc_slices_init()
2361 struct msm_gpu *gpu = &adreno_gpu->base; in a6xx_bus_clear_pending_transactions()
2418 gpu->needs_hw_init = true; in a6xx_gmu_pm_resume()
2422 mutex_lock(&a6xx_gpu->gmu.lock); in a6xx_gmu_pm_resume()
2424 mutex_unlock(&a6xx_gpu->gmu.lock); in a6xx_gmu_pm_resume()
2439 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; in a6xx_pm_resume() local
2440 unsigned long freq = gpu->fast_rate; in a6xx_pm_resume()
2444 gpu->needs_hw_init = true; in a6xx_pm_resume()
2448 mutex_lock(&a6xx_gpu->gmu.lock); in a6xx_pm_resume()
2450 opp = dev_pm_opp_find_freq_ceil(&gpu->pdev->dev, &freq); in a6xx_pm_resume()
2458 dev_pm_opp_set_opp(&gpu->pdev->dev, opp); in a6xx_pm_resume()
2460 pm_runtime_resume_and_get(gmu->dev); in a6xx_pm_resume()
2461 pm_runtime_resume_and_get(gmu->gxpd); in a6xx_pm_resume()
2463 ret = clk_bulk_prepare_enable(gpu->nr_clocks, gpu->grp_clks); in a6xx_pm_resume()
2468 a6xx_sptprac_enable(gmu); in a6xx_pm_resume()
2473 pm_runtime_put(gmu->gxpd); in a6xx_pm_resume()
2474 pm_runtime_put(gmu->dev); in a6xx_pm_resume()
2475 dev_pm_opp_set_opp(&gpu->pdev->dev, NULL); in a6xx_pm_resume()
2478 mutex_unlock(&a6xx_gpu->gmu.lock); in a6xx_pm_resume()
2498 mutex_lock(&a6xx_gpu->gmu.lock); in a6xx_gmu_pm_suspend()
2500 mutex_unlock(&a6xx_gpu->gmu.lock); in a6xx_gmu_pm_suspend()
2504 if (a6xx_gpu->shadow_bo) in a6xx_gmu_pm_suspend()
2505 for (i = 0; i < gpu->nr_rings; i++) in a6xx_gmu_pm_suspend()
2506 a6xx_gpu->shadow[i] = 0; in a6xx_gmu_pm_suspend()
2508 gpu->suspend_count++; in a6xx_gmu_pm_suspend()
2517 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; in a6xx_pm_suspend() local
2524 mutex_lock(&a6xx_gpu->gmu.lock); in a6xx_pm_suspend()
2530 a6xx_sptprac_disable(gmu); in a6xx_pm_suspend()
2532 clk_bulk_disable_unprepare(gpu->nr_clocks, gpu->grp_clks); in a6xx_pm_suspend()
2534 pm_runtime_put_sync(gmu->gxpd); in a6xx_pm_suspend()
2535 dev_pm_opp_set_opp(&gpu->pdev->dev, NULL); in a6xx_pm_suspend()
2536 pm_runtime_put_sync(gmu->dev); in a6xx_pm_suspend()
2538 mutex_unlock(&a6xx_gpu->gmu.lock); in a6xx_pm_suspend()
2540 if (a6xx_gpu->shadow_bo) in a6xx_pm_suspend()
2541 for (i = 0; i < gpu->nr_rings; i++) in a6xx_pm_suspend()
2542 a6xx_gpu->shadow[i] = 0; in a6xx_pm_suspend()
2544 gpu->suspend_count++; in a6xx_pm_suspend()
2554 mutex_lock(&a6xx_gpu->gmu.lock); in a6xx_gmu_get_timestamp()
2557 a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET); in a6xx_gmu_get_timestamp()
2561 a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET); in a6xx_gmu_get_timestamp()
2563 mutex_unlock(&a6xx_gpu->gmu.lock); in a6xx_gmu_get_timestamp()
2579 return a6xx_gpu->cur_ring; in a6xx_active_ring()
2587 if (a6xx_gpu->sqe_bo) { in a6xx_destroy()
2588 msm_gem_unpin_iova(a6xx_gpu->sqe_bo, gpu->aspace); in a6xx_destroy()
2589 drm_gem_object_put(a6xx_gpu->sqe_bo); in a6xx_destroy()
2592 if (a6xx_gpu->shadow_bo) { in a6xx_destroy()
2593 msm_gem_unpin_iova(a6xx_gpu->shadow_bo, gpu->aspace); in a6xx_destroy()
2594 drm_gem_object_put(a6xx_gpu->shadow_bo); in a6xx_destroy()
2615 busy_cycles = gmu_read64(&a6xx_gpu->gmu, in a6xx_gpu_busy()
2628 mutex_lock(&a6xx_gpu->gmu.lock); in a6xx_gpu_set_freq()
2630 mutex_unlock(&a6xx_gpu->gmu.lock); in a6xx_gpu_set_freq()
2644 if (!IS_ERR_OR_NULL(a6xx_gpu->htw_llc_slice) && in a6xx_create_address_space()
2645 !device_iommu_capable(&pdev->dev, IOMMU_CAP_CACHE_COHERENCY)) in a6xx_create_address_space()
2656 mmu = msm_iommu_pagetable_create(gpu->aspace->mmu); in a6xx_create_private_address_space()
2671 if (adreno_gpu->base.hw_apriv || a6xx_gpu->has_whereami) in a6xx_get_rptr()
2672 return a6xx_gpu->shadow[ring->id]; in a6xx_get_rptr()
2674 return ring->memptrs->rptr = gpu_read(gpu, REG_A6XX_CP_RB_RPTR); in a6xx_get_rptr()
2703 progress = !!memcmp(&cp_state, &ring->last_cp_state, sizeof(cp_state)); in a6xx_progress()
2705 ring->last_cp_state = cp_state; in a6xx_progress()
2712 if (!info->speedbins) in fuse_to_supp_hw()
2715 for (int i = 0; info->speedbins[i].fuse != SHRT_MAX; i++) in fuse_to_supp_hw()
2716 if (info->speedbins[i].fuse == fuse) in fuse_to_supp_hw()
2717 return BIT(info->speedbins[i].speedbin); in fuse_to_supp_hw()
2730 * -ENOENT means that the platform doesn't support speedbin which is in a6xx_set_supported_hw()
2733 if (ret == -ENOENT) { in a6xx_set_supported_hw()
2737 "failed to read speed-bin. Some OPPs may not be supported by hardware\n"); in a6xx_set_supported_hw()
2745 "missing support for speed-bin: %u. Some OPPs may not be supported by hardware\n", in a6xx_set_supported_hw()
2850 struct msm_drm_private *priv = dev->dev_private; in a6xx_gpu_init()
2851 struct platform_device *pdev = priv->gpu_pdev; in a6xx_gpu_init()
2852 struct adreno_platform_config *config = pdev->dev.platform_data; in a6xx_gpu_init()
2862 return ERR_PTR(-ENOMEM); in a6xx_gpu_init()
2864 adreno_gpu = &a6xx_gpu->base; in a6xx_gpu_init()
2865 gpu = &adreno_gpu->base; in a6xx_gpu_init()
2867 mutex_init(&a6xx_gpu->gmu.lock); in a6xx_gpu_init()
2869 adreno_gpu->registers = NULL; in a6xx_gpu_init()
2871 /* Check if there is a GMU phandle and set it up */ in a6xx_gpu_init()
2872 node = of_parse_phandle(pdev->dev.of_node, "qcom,gmu", 0); in a6xx_gpu_init()
2876 adreno_gpu->gmu_is_wrapper = of_device_is_compatible(node, "qcom,adreno-gmu-wrapper"); in a6xx_gpu_init()
2878 adreno_gpu->base.hw_apriv = in a6xx_gpu_init()
2879 !!(config->info->quirks & ADRENO_QUIRK_HAS_HW_APRIV); in a6xx_gpu_init()
2881 /* gpu->info only gets assigned in adreno_gpu_init() */ in a6xx_gpu_init()
2882 is_a7xx = config->info->family == ADRENO_7XX_GEN1 || in a6xx_gpu_init()
2883 config->info->family == ADRENO_7XX_GEN2; in a6xx_gpu_init()
2887 ret = a6xx_set_supported_hw(&pdev->dev, config->info); in a6xx_gpu_init()
2889 a6xx_destroy(&(a6xx_gpu->base.base)); in a6xx_gpu_init()
2900 a6xx_destroy(&(a6xx_gpu->base.base)); in a6xx_gpu_init()
2909 priv->gpu_clamp_to_idle = true; in a6xx_gpu_init()
2917 a6xx_destroy(&(a6xx_gpu->base.base)); in a6xx_gpu_init()
2921 if (gpu->aspace) in a6xx_gpu_init()
2922 msm_mmu_set_fault_handler(gpu->aspace->mmu, gpu, in a6xx_gpu_init()