Lines Matching +full:gpu +full:- +full:id
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. */
15 #include <linux/soc/qcom/llcc-qcom.h>
19 static inline bool _a6xx_check_idle(struct msm_gpu *gpu) in _a6xx_check_idle() argument
21 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in _a6xx_check_idle()
25 if (!adreno_has_gmu_wrapper(adreno_gpu) && !a6xx_gmu_isidle(&a6xx_gpu->gmu)) in _a6xx_check_idle()
29 if (gpu_read(gpu, REG_A6XX_RBBM_STATUS) & in _a6xx_check_idle()
33 return !(gpu_read(gpu, REG_A6XX_RBBM_INT_0_STATUS) & in _a6xx_check_idle()
37 static bool a6xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring) in a6xx_idle() argument
40 if (!adreno_idle(gpu, ring)) in a6xx_idle()
43 if (spin_until(_a6xx_check_idle(gpu))) { in a6xx_idle()
44 DRM_ERROR("%s: %ps: timeout waiting for GPU to idle: status %8.8X irq %8.8X rptr/wptr %d/%d\n", in a6xx_idle()
45 gpu->name, __builtin_return_address(0), in a6xx_idle()
46 gpu_read(gpu, REG_A6XX_RBBM_STATUS), in a6xx_idle()
47 gpu_read(gpu, REG_A6XX_RBBM_INT_0_STATUS), in a6xx_idle()
48 gpu_read(gpu, REG_A6XX_CP_RB_RPTR), in a6xx_idle()
49 gpu_read(gpu, REG_A6XX_CP_RB_WPTR)); in a6xx_idle()
56 static void update_shadow_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring) in update_shadow_rptr() argument
58 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in update_shadow_rptr()
62 if (a6xx_gpu->has_whereami && !adreno_gpu->base.hw_apriv) { in update_shadow_rptr()
69 static void a6xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring) in a6xx_flush() argument
71 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a6xx_flush()
76 update_shadow_rptr(gpu, ring); in a6xx_flush()
78 spin_lock_irqsave(&ring->preempt_lock, flags); in a6xx_flush()
81 ring->cur = ring->next; in a6xx_flush()
88 if (a6xx_gpu->cur_ring == ring) in a6xx_flush()
89 gpu_write(gpu, REG_A6XX_CP_RB_WPTR, wptr); in a6xx_flush()
91 ring->restore_wptr = true; in a6xx_flush()
93 ring->restore_wptr = true; in a6xx_flush()
96 spin_unlock_irqrestore(&ring->preempt_lock, flags); in a6xx_flush()
113 bool sysprof = refcount_read(&a6xx_gpu->base.base.sysprof_active) > 1; in a6xx_set_pagetable()
114 struct msm_file_private *ctx = submit->queue->ctx; in a6xx_set_pagetable()
115 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; in a6xx_set_pagetable()
120 if (ctx->seqno == ring->cur_ctx_seqno) in a6xx_set_pagetable()
123 if (msm_iommu_pagetable_params(ctx->aspace->mmu, &ttbr, &asid)) in a6xx_set_pagetable()
126 if (adreno_gpu->info->family >= ADRENO_7XX_GEN1) { in a6xx_set_pagetable()
132 OUT_RING(ring, submit->seqno - 1); in a6xx_set_pagetable()
165 OUT_RING(ring, ctx->seqno); in a6xx_set_pagetable()
172 if (adreno_is_a7xx(&a6xx_gpu->base)) { in a6xx_set_pagetable()
179 * lingering in that part of the GPU in a6xx_set_pagetable()
200 /* Re-enable protected mode: */ in a6xx_set_pagetable()
207 static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) in a6xx_submit() argument
209 unsigned int index = submit->seqno % MSM_GPU_SUBMIT_STATS_COUNT; in a6xx_submit()
210 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a6xx_submit()
212 struct msm_ringbuffer *ring = submit->ring; in a6xx_submit()
222 * GPU registers so we need to add 0x1a800 to the register value on A630 in a6xx_submit()
236 for (i = 0; i < submit->nr_cmds; i++) { in a6xx_submit()
237 switch (submit->cmd[i].type) { in a6xx_submit()
241 if (ring->cur_ctx_seqno == submit->queue->ctx->seqno) in a6xx_submit()
246 OUT_RING(ring, lower_32_bits(submit->cmd[i].iova)); in a6xx_submit()
247 OUT_RING(ring, upper_32_bits(submit->cmd[i].iova)); in a6xx_submit()
248 OUT_RING(ring, A5XX_CP_INDIRECT_BUFFER_2_IB_SIZE(submit->cmd[i].size)); in a6xx_submit()
254 * Periodically update shadow-wptr if needed, so that we in a6xx_submit()
261 update_shadow_rptr(gpu, ring); in a6xx_submit()
271 OUT_RING(ring, submit->seqno); in a6xx_submit()
282 OUT_RING(ring, submit->seqno); in a6xx_submit()
285 gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER)); in a6xx_submit()
287 a6xx_flush(gpu, ring); in a6xx_submit()
305 a6xx_gpu->preempt_iova[ring->id])); in a6xx_emit_set_pseudo_reg()
307 a6xx_gpu->preempt_iova[ring->id])); in a6xx_emit_set_pseudo_reg()
320 preempt_postamble = a6xx_gpu->preempt_postamble_iova; in a6xx_emit_set_pseudo_reg()
326 a6xx_gpu->preempt_postamble_len) | in a6xx_emit_set_pseudo_reg()
330 static void a7xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) in a7xx_submit() argument
332 unsigned int index = submit->seqno % MSM_GPU_SUBMIT_STATS_COUNT; in a7xx_submit()
333 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a7xx_submit()
335 struct msm_ringbuffer *ring = submit->ring; in a7xx_submit()
351 if (gpu->nr_rings > 1) in a7xx_submit()
352 a6xx_emit_set_pseudo_reg(ring, a6xx_gpu, submit->queue); in a7xx_submit()
365 if (submit->queue->flags & MSM_SUBMITQUEUE_ALLOW_PREEMPT) { in a7xx_submit()
371 for (i = 0; i < submit->nr_cmds; i++) { in a7xx_submit()
372 switch (submit->cmd[i].type) { in a7xx_submit()
376 if (ring->cur_ctx_seqno == submit->queue->ctx->seqno) in a7xx_submit()
381 OUT_RING(ring, lower_32_bits(submit->cmd[i].iova)); in a7xx_submit()
382 OUT_RING(ring, upper_32_bits(submit->cmd[i].iova)); in a7xx_submit()
383 OUT_RING(ring, A5XX_CP_INDIRECT_BUFFER_2_IB_SIZE(submit->cmd[i].size)); in a7xx_submit()
389 * Periodically update shadow-wptr if needed, so that we in a7xx_submit()
396 update_shadow_rptr(gpu, ring); in a7xx_submit()
399 if (submit->queue->flags & MSM_SUBMITQUEUE_ALLOW_PREEMPT) { in a7xx_submit()
411 OUT_RING(ring, submit->seqno); in a7xx_submit()
433 OUT_RING(ring, submit->seqno); in a7xx_submit()
447 OUT_RING(ring, submit->seqno); in a7xx_submit()
449 a6xx_gpu->last_seqno[ring->id] = submit->seqno; in a7xx_submit()
456 OUT_RING(ring, submit->seqno); in a7xx_submit()
465 if (gpu->nr_rings > 1) { in a7xx_submit()
476 /* Data value - not used if the address above is 0 */ in a7xx_submit()
484 gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER)); in a7xx_submit()
486 a6xx_flush(gpu, ring); in a7xx_submit()
489 a6xx_preempt_trigger(gpu); in a7xx_submit()
492 static void a6xx_set_hwcg(struct msm_gpu *gpu, bool state) in a6xx_set_hwcg() argument
494 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a6xx_set_hwcg()
496 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; in a6xx_set_hwcg()
502 if (!(adreno_gpu->info->a6xx->hwcg || adreno_is_a7xx(adreno_gpu))) in a6xx_set_hwcg()
517 gmu_write(&a6xx_gpu->gmu, REG_A6XX_GPU_GMU_AO_GMU_CGC_MODE_CNTL, in a6xx_set_hwcg()
518 state ? adreno_gpu->info->a6xx->gmu_cgc_mode : 0); in a6xx_set_hwcg()
519 gmu_write(&a6xx_gpu->gmu, REG_A6XX_GPU_GMU_AO_GMU_CGC_DELAY_CNTL, in a6xx_set_hwcg()
521 gmu_write(&a6xx_gpu->gmu, REG_A6XX_GPU_GMU_AO_GMU_CGC_HYST_CNTL, in a6xx_set_hwcg()
524 if (!adreno_gpu->info->a6xx->hwcg) { in a6xx_set_hwcg()
525 gpu_write(gpu, REG_A7XX_RBBM_CLOCK_CNTL_GLOBAL, 1); in a6xx_set_hwcg()
526 gpu_write(gpu, REG_A7XX_RBBM_CGC_GLOBAL_LOAD_CMD, state ? 1 : 0); in a6xx_set_hwcg()
529 gpu_write(gpu, REG_A7XX_RBBM_CGC_P2S_TRIG_CMD, 1); in a6xx_set_hwcg()
531 if (gpu_poll_timeout(gpu, REG_A7XX_RBBM_CGC_P2S_STATUS, val, in a6xx_set_hwcg()
533 dev_err(&gpu->pdev->dev, "RBBM_CGC_P2S_STATUS TXDONE Poll failed\n"); in a6xx_set_hwcg()
537 gpu_write(gpu, REG_A7XX_RBBM_CLOCK_CNTL_GLOBAL, 0); in a6xx_set_hwcg()
543 val = gpu_read(gpu, REG_A6XX_RBBM_CLOCK_CNTL); in a6xx_set_hwcg()
545 /* Don't re-program the registers if they are already correct */ in a6xx_set_hwcg()
553 for (i = 0; (reg = &adreno_gpu->info->a6xx->hwcg[i], reg->offset); i++) in a6xx_set_hwcg()
554 gpu_write(gpu, reg->offset, state ? reg->value : 0); in a6xx_set_hwcg()
560 gpu_write(gpu, REG_A6XX_RBBM_CLOCK_CNTL, state ? clock_cntl_on : 0); in a6xx_set_hwcg()
563 static void a6xx_set_cp_protect(struct msm_gpu *gpu) in a6xx_set_cp_protect() argument
565 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a6xx_set_cp_protect()
566 const struct adreno_protect *protect = adreno_gpu->info->a6xx->protect; in a6xx_set_cp_protect()
574 gpu_write(gpu, REG_A6XX_CP_PROTECT_CNTL, in a6xx_set_cp_protect()
579 for (i = 0; i < protect->count - 1; i++) { in a6xx_set_cp_protect()
581 if (protect->regs[i]) in a6xx_set_cp_protect()
582 gpu_write(gpu, REG_A6XX_CP_PROTECT(i), protect->regs[i]); in a6xx_set_cp_protect()
585 gpu_write(gpu, REG_A6XX_CP_PROTECT(protect->count_max - 1), protect->regs[i]); in a6xx_set_cp_protect()
588 static void a6xx_calc_ubwc_config(struct adreno_gpu *gpu) in a6xx_calc_ubwc_config() argument
590 gpu->ubwc_config.rgb565_predicator = 0; in a6xx_calc_ubwc_config()
591 gpu->ubwc_config.uavflagprd_inv = 0; in a6xx_calc_ubwc_config()
592 gpu->ubwc_config.min_acc_len = 0; in a6xx_calc_ubwc_config()
593 gpu->ubwc_config.ubwc_swizzle = 0x6; in a6xx_calc_ubwc_config()
594 gpu->ubwc_config.macrotile_mode = 0; in a6xx_calc_ubwc_config()
595 gpu->ubwc_config.highest_bank_bit = 15; in a6xx_calc_ubwc_config()
597 if (adreno_is_a610(gpu)) { in a6xx_calc_ubwc_config()
598 gpu->ubwc_config.highest_bank_bit = 13; in a6xx_calc_ubwc_config()
599 gpu->ubwc_config.min_acc_len = 1; in a6xx_calc_ubwc_config()
600 gpu->ubwc_config.ubwc_swizzle = 0x7; in a6xx_calc_ubwc_config()
603 if (adreno_is_a618(gpu)) in a6xx_calc_ubwc_config()
604 gpu->ubwc_config.highest_bank_bit = 14; in a6xx_calc_ubwc_config()
606 if (adreno_is_a619(gpu)) in a6xx_calc_ubwc_config()
608 gpu->ubwc_config.highest_bank_bit = 13; in a6xx_calc_ubwc_config()
610 if (adreno_is_a619_holi(gpu)) in a6xx_calc_ubwc_config()
611 gpu->ubwc_config.highest_bank_bit = 13; in a6xx_calc_ubwc_config()
613 if (adreno_is_a621(gpu)) { in a6xx_calc_ubwc_config()
614 gpu->ubwc_config.highest_bank_bit = 13; in a6xx_calc_ubwc_config()
615 gpu->ubwc_config.amsbc = 1; in a6xx_calc_ubwc_config()
616 gpu->ubwc_config.uavflagprd_inv = 2; in a6xx_calc_ubwc_config()
619 if (adreno_is_a623(gpu)) { in a6xx_calc_ubwc_config()
620 gpu->ubwc_config.highest_bank_bit = 16; in a6xx_calc_ubwc_config()
621 gpu->ubwc_config.amsbc = 1; in a6xx_calc_ubwc_config()
622 gpu->ubwc_config.rgb565_predicator = 1; in a6xx_calc_ubwc_config()
623 gpu->ubwc_config.uavflagprd_inv = 2; in a6xx_calc_ubwc_config()
624 gpu->ubwc_config.macrotile_mode = 1; in a6xx_calc_ubwc_config()
627 if (adreno_is_a640_family(gpu)) in a6xx_calc_ubwc_config()
628 gpu->ubwc_config.amsbc = 1; in a6xx_calc_ubwc_config()
630 if (adreno_is_a680(gpu)) in a6xx_calc_ubwc_config()
631 gpu->ubwc_config.macrotile_mode = 1; in a6xx_calc_ubwc_config()
633 if (adreno_is_a650(gpu) || in a6xx_calc_ubwc_config()
634 adreno_is_a660(gpu) || in a6xx_calc_ubwc_config()
635 adreno_is_a690(gpu) || in a6xx_calc_ubwc_config()
636 adreno_is_a730(gpu) || in a6xx_calc_ubwc_config()
637 adreno_is_a740_family(gpu)) { in a6xx_calc_ubwc_config()
639 gpu->ubwc_config.highest_bank_bit = 16; in a6xx_calc_ubwc_config()
640 gpu->ubwc_config.amsbc = 1; in a6xx_calc_ubwc_config()
641 gpu->ubwc_config.rgb565_predicator = 1; in a6xx_calc_ubwc_config()
642 gpu->ubwc_config.uavflagprd_inv = 2; in a6xx_calc_ubwc_config()
643 gpu->ubwc_config.macrotile_mode = 1; in a6xx_calc_ubwc_config()
646 if (adreno_is_a663(gpu)) { in a6xx_calc_ubwc_config()
647 gpu->ubwc_config.highest_bank_bit = 13; in a6xx_calc_ubwc_config()
648 gpu->ubwc_config.amsbc = 1; in a6xx_calc_ubwc_config()
649 gpu->ubwc_config.rgb565_predicator = 1; in a6xx_calc_ubwc_config()
650 gpu->ubwc_config.uavflagprd_inv = 2; in a6xx_calc_ubwc_config()
651 gpu->ubwc_config.macrotile_mode = 1; in a6xx_calc_ubwc_config()
652 gpu->ubwc_config.ubwc_swizzle = 0x4; in a6xx_calc_ubwc_config()
655 if (adreno_is_7c3(gpu)) { in a6xx_calc_ubwc_config()
656 gpu->ubwc_config.highest_bank_bit = 14; in a6xx_calc_ubwc_config()
657 gpu->ubwc_config.amsbc = 1; in a6xx_calc_ubwc_config()
658 gpu->ubwc_config.rgb565_predicator = 1; in a6xx_calc_ubwc_config()
659 gpu->ubwc_config.uavflagprd_inv = 2; in a6xx_calc_ubwc_config()
660 gpu->ubwc_config.macrotile_mode = 1; in a6xx_calc_ubwc_config()
663 if (adreno_is_a702(gpu)) { in a6xx_calc_ubwc_config()
664 gpu->ubwc_config.highest_bank_bit = 14; in a6xx_calc_ubwc_config()
665 gpu->ubwc_config.min_acc_len = 1; in a6xx_calc_ubwc_config()
669 static void a6xx_set_ubwc_config(struct msm_gpu *gpu) in a6xx_set_ubwc_config() argument
671 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a6xx_set_ubwc_config()
677 BUG_ON(adreno_gpu->ubwc_config.highest_bank_bit < 13); in a6xx_set_ubwc_config()
678 u32 hbb = adreno_gpu->ubwc_config.highest_bank_bit - 13; in a6xx_set_ubwc_config()
681 u32 ubwc_mode = adreno_gpu->ubwc_config.ubwc_swizzle & 1; in a6xx_set_ubwc_config()
682 u32 level2_swizzling_dis = !(adreno_gpu->ubwc_config.ubwc_swizzle & 2); in a6xx_set_ubwc_config()
684 gpu_write(gpu, REG_A6XX_RB_NC_MODE_CNTL, in a6xx_set_ubwc_config()
686 adreno_gpu->ubwc_config.rgb565_predicator << 11 | in a6xx_set_ubwc_config()
687 hbb_hi << 10 | adreno_gpu->ubwc_config.amsbc << 4 | in a6xx_set_ubwc_config()
688 adreno_gpu->ubwc_config.min_acc_len << 3 | in a6xx_set_ubwc_config()
691 gpu_write(gpu, REG_A6XX_TPL1_NC_MODE_CNTL, in a6xx_set_ubwc_config()
693 adreno_gpu->ubwc_config.min_acc_len << 3 | in a6xx_set_ubwc_config()
696 gpu_write(gpu, REG_A6XX_SP_NC_MODE_CNTL, in a6xx_set_ubwc_config()
698 adreno_gpu->ubwc_config.uavflagprd_inv << 4 | in a6xx_set_ubwc_config()
699 adreno_gpu->ubwc_config.min_acc_len << 3 | in a6xx_set_ubwc_config()
703 gpu_write(gpu, REG_A7XX_GRAS_NC_MODE_CNTL, in a6xx_set_ubwc_config()
706 gpu_write(gpu, REG_A6XX_UCHE_MODE_CNTL, in a6xx_set_ubwc_config()
707 adreno_gpu->ubwc_config.min_acc_len << 23 | hbb_lo << 21); in a6xx_set_ubwc_config()
709 gpu_write(gpu, REG_A6XX_RBBM_NC_MODE_CNTL, in a6xx_set_ubwc_config()
710 adreno_gpu->ubwc_config.macrotile_mode); in a6xx_set_ubwc_config()
713 static void a7xx_patch_pwrup_reglist(struct msm_gpu *gpu) in a7xx_patch_pwrup_reglist() argument
715 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a7xx_patch_pwrup_reglist()
718 void *ptr = a6xx_gpu->pwrup_reglist_ptr; in a7xx_patch_pwrup_reglist()
720 u32 *dest = (u32 *)&lock->regs[0]; in a7xx_patch_pwrup_reglist()
723 reglist = adreno_gpu->info->a6xx->pwrup_reglist; in a7xx_patch_pwrup_reglist()
725 lock->gpu_req = lock->cpu_req = lock->turn = 0; in a7xx_patch_pwrup_reglist()
726 lock->ifpc_list_len = 0; in a7xx_patch_pwrup_reglist()
727 lock->preemption_list_len = reglist->count; in a7xx_patch_pwrup_reglist()
731 * register value into the GPU buffer in a7xx_patch_pwrup_reglist()
733 for (i = 0; i < reglist->count; i++) { in a7xx_patch_pwrup_reglist()
734 *dest++ = reglist->regs[i]; in a7xx_patch_pwrup_reglist()
735 *dest++ = gpu_read(gpu, reglist->regs[i]); in a7xx_patch_pwrup_reglist()
740 * 1. Static IFPC-only registers in a7xx_patch_pwrup_reglist()
747 * registers being virtualized, CP needs to know the pipe id to program in a7xx_patch_pwrup_reglist()
753 lock->dynamic_list_len = 0; in a7xx_patch_pwrup_reglist()
756 static int a7xx_preempt_start(struct msm_gpu *gpu) in a7xx_preempt_start() argument
758 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a7xx_preempt_start()
760 struct msm_ringbuffer *ring = gpu->rb[0]; in a7xx_preempt_start()
762 if (gpu->nr_rings <= 1) in a7xx_preempt_start()
779 a6xx_flush(gpu, ring); in a7xx_preempt_start()
781 return a6xx_idle(gpu, ring) ? 0 : -EINVAL; in a7xx_preempt_start()
784 static int a6xx_cp_init(struct msm_gpu *gpu) in a6xx_cp_init() argument
786 struct msm_ringbuffer *ring = gpu->rb[0]; in a6xx_cp_init()
809 a6xx_flush(gpu, ring); in a6xx_cp_init()
810 return a6xx_idle(gpu, ring) ? 0 : -EINVAL; in a6xx_cp_init()
813 static int a7xx_cp_init(struct msm_gpu *gpu) in a7xx_cp_init() argument
815 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a7xx_cp_init()
817 struct msm_ringbuffer *ring = gpu->rb[0]; in a7xx_cp_init()
854 OUT_RING(ring, lower_32_bits(a6xx_gpu->pwrup_reglist_iova)); in a7xx_cp_init()
856 OUT_RING(ring, upper_32_bits(a6xx_gpu->pwrup_reglist_iova)); in a7xx_cp_init()
860 a6xx_flush(gpu, ring); in a7xx_cp_init()
861 return a6xx_idle(gpu, ring) ? 0 : -EINVAL; in a7xx_cp_init()
871 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; in a6xx_ucode_check_version()
872 struct msm_gpu *gpu = &adreno_gpu->base; in a6xx_ucode_check_version() local
873 const char *sqe_name = adreno_gpu->info->fw[ADRENO_FW_SQE]; in a6xx_ucode_check_version()
907 a6xx_gpu->has_whereami = true; in a6xx_ucode_check_version()
912 DRM_DEV_ERROR(&gpu->pdev->dev, in a6xx_ucode_check_version()
921 DRM_DEV_ERROR(&gpu->pdev->dev, in a6xx_ucode_check_version()
927 DRM_DEV_ERROR(&gpu->pdev->dev, in a6xx_ucode_check_version()
928 "unknown GPU, add it to a6xx_ucode_check_version()!!\n"); in a6xx_ucode_check_version()
935 static int a6xx_ucode_load(struct msm_gpu *gpu) in a6xx_ucode_load() argument
937 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a6xx_ucode_load()
940 if (!a6xx_gpu->sqe_bo) { in a6xx_ucode_load()
941 a6xx_gpu->sqe_bo = adreno_fw_create_bo(gpu, in a6xx_ucode_load()
942 adreno_gpu->fw[ADRENO_FW_SQE], &a6xx_gpu->sqe_iova); in a6xx_ucode_load()
944 if (IS_ERR(a6xx_gpu->sqe_bo)) { in a6xx_ucode_load()
945 int ret = PTR_ERR(a6xx_gpu->sqe_bo); in a6xx_ucode_load()
947 a6xx_gpu->sqe_bo = NULL; in a6xx_ucode_load()
948 DRM_DEV_ERROR(&gpu->pdev->dev, in a6xx_ucode_load()
954 msm_gem_object_set_name(a6xx_gpu->sqe_bo, "sqefw"); in a6xx_ucode_load()
955 if (!a6xx_ucode_check_version(a6xx_gpu, a6xx_gpu->sqe_bo)) { in a6xx_ucode_load()
956 msm_gem_unpin_iova(a6xx_gpu->sqe_bo, gpu->aspace); in a6xx_ucode_load()
957 drm_gem_object_put(a6xx_gpu->sqe_bo); in a6xx_ucode_load()
959 a6xx_gpu->sqe_bo = NULL; in a6xx_ucode_load()
960 return -EPERM; in a6xx_ucode_load()
968 if ((adreno_gpu->base.hw_apriv || a6xx_gpu->has_whereami) && in a6xx_ucode_load()
969 !a6xx_gpu->shadow_bo) { in a6xx_ucode_load()
970 a6xx_gpu->shadow = msm_gem_kernel_new(gpu->dev, in a6xx_ucode_load()
971 sizeof(u32) * gpu->nr_rings, in a6xx_ucode_load()
973 gpu->aspace, &a6xx_gpu->shadow_bo, in a6xx_ucode_load()
974 &a6xx_gpu->shadow_iova); in a6xx_ucode_load()
976 if (IS_ERR(a6xx_gpu->shadow)) in a6xx_ucode_load()
977 return PTR_ERR(a6xx_gpu->shadow); in a6xx_ucode_load()
979 msm_gem_object_set_name(a6xx_gpu->shadow_bo, "shadow"); in a6xx_ucode_load()
982 a6xx_gpu->pwrup_reglist_ptr = msm_gem_kernel_new(gpu->dev, PAGE_SIZE, in a6xx_ucode_load()
984 gpu->aspace, &a6xx_gpu->pwrup_reglist_bo, in a6xx_ucode_load()
985 &a6xx_gpu->pwrup_reglist_iova); in a6xx_ucode_load()
987 if (IS_ERR(a6xx_gpu->pwrup_reglist_ptr)) in a6xx_ucode_load()
988 return PTR_ERR(a6xx_gpu->pwrup_reglist_ptr); in a6xx_ucode_load()
990 msm_gem_object_set_name(a6xx_gpu->pwrup_reglist_bo, "pwrup_reglist"); in a6xx_ucode_load()
995 static int a6xx_zap_shader_init(struct msm_gpu *gpu) in a6xx_zap_shader_init() argument
1003 ret = adreno_zap_shader_load(gpu, GPU_PAS_ID); in a6xx_zap_shader_init()
1045 static int hw_init(struct msm_gpu *gpu) in hw_init() argument
1047 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in hw_init()
1049 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; in hw_init()
1055 /* Make sure the GMU keeps the GPU on while we set it up */ in hw_init()
1056 ret = a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET); in hw_init()
1063 gpu_write(gpu, REG_A6XX_GBIF_HALT, 0); in hw_init()
1064 gpu_read(gpu, REG_A6XX_GBIF_HALT); in hw_init()
1066 gpu_write(gpu, REG_A6XX_RBBM_GPR0_CNTL, 0); in hw_init()
1067 gpu_read(gpu, REG_A6XX_RBBM_GPR0_CNTL); in hw_init()
1069 gpu_write(gpu, REG_A6XX_GBIF_HALT, 0); in hw_init()
1070 gpu_read(gpu, REG_A6XX_GBIF_HALT); in hw_init()
1072 gpu_write(gpu, REG_A6XX_RBBM_GBIF_HALT, 0); in hw_init()
1073 gpu_read(gpu, REG_A6XX_RBBM_GBIF_HALT); in hw_init()
1076 gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_CNTL, 0); in hw_init()
1082 * Disable the trusted memory range - we don't actually supported secure in hw_init()
1086 gpu_write64(gpu, REG_A6XX_RBBM_SECVID_TSB_TRUSTED_BASE, 0x00000000); in hw_init()
1087 gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_TRUSTED_SIZE, 0x00000000); in hw_init()
1091 gpu_write(gpu, REG_A6XX_CP_ADDR_MODE_CNTL, 0x1); in hw_init()
1092 gpu_write(gpu, REG_A6XX_VSC_ADDR_MODE_CNTL, 0x1); in hw_init()
1093 gpu_write(gpu, REG_A6XX_GRAS_ADDR_MODE_CNTL, 0x1); in hw_init()
1094 gpu_write(gpu, REG_A6XX_RB_ADDR_MODE_CNTL, 0x1); in hw_init()
1095 gpu_write(gpu, REG_A6XX_PC_ADDR_MODE_CNTL, 0x1); in hw_init()
1096 gpu_write(gpu, REG_A6XX_HLSQ_ADDR_MODE_CNTL, 0x1); in hw_init()
1097 gpu_write(gpu, REG_A6XX_VFD_ADDR_MODE_CNTL, 0x1); in hw_init()
1098 gpu_write(gpu, REG_A6XX_VPC_ADDR_MODE_CNTL, 0x1); in hw_init()
1099 gpu_write(gpu, REG_A6XX_UCHE_ADDR_MODE_CNTL, 0x1); in hw_init()
1100 gpu_write(gpu, REG_A6XX_SP_ADDR_MODE_CNTL, 0x1); in hw_init()
1101 gpu_write(gpu, REG_A6XX_TPL1_ADDR_MODE_CNTL, 0x1); in hw_init()
1102 gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL, 0x1); in hw_init()
1106 a6xx_set_hwcg(gpu, true); in hw_init()
1113 gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE0, 0x00071620); in hw_init()
1114 gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE1, 0x00071620); in hw_init()
1115 gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE2, 0x00071620); in hw_init()
1116 gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE3, 0x00071620); in hw_init()
1117 gpu_write(gpu, REG_A6XX_RBBM_GBIF_CLIENT_QOS_CNTL, in hw_init()
1120 gpu_write(gpu, REG_A6XX_RBBM_VBIF_CLIENT_QOS_CNTL, 0x3); in hw_init()
1124 gpu_write(gpu, REG_A6XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000009); in hw_init()
1127 gpu_write(gpu, REG_A6XX_UCHE_GBIF_GX_CONFIG, 0x10240e0); in hw_init()
1129 /* Make all blocks contribute to the GPU BUSY perf counter */ in hw_init()
1130 gpu_write(gpu, REG_A6XX_RBBM_PERFCTR_GPU_BUSY_MASKED, 0xffffffff); in hw_init()
1134 gpu_write64(gpu, REG_A6XX_UCHE_TRAP_BASE, adreno_gpu->uche_trap_base); in hw_init()
1135 gpu_write64(gpu, REG_A6XX_UCHE_WRITE_THRU_BASE, adreno_gpu->uche_trap_base); in hw_init()
1137 gpu_write64(gpu, REG_A6XX_UCHE_WRITE_RANGE_MAX, adreno_gpu->uche_trap_base + 0xfc0); in hw_init()
1138 gpu_write64(gpu, REG_A6XX_UCHE_TRAP_BASE, adreno_gpu->uche_trap_base); in hw_init()
1139 gpu_write64(gpu, REG_A6XX_UCHE_WRITE_THRU_BASE, adreno_gpu->uche_trap_base); in hw_init()
1147 /* Set the GMEM VA range [0x100000:0x100000 + gpu->gmem - 1] */ in hw_init()
1148 gpu_write64(gpu, REG_A6XX_UCHE_GMEM_RANGE_MIN, gmem_range_min); in hw_init()
1150 gpu_write64(gpu, REG_A6XX_UCHE_GMEM_RANGE_MAX, in hw_init()
1151 gmem_range_min + adreno_gpu->info->gmem - 1); in hw_init()
1155 gpu_write(gpu, REG_A6XX_UCHE_CACHE_WAYS, BIT(23)); in hw_init()
1157 gpu_write(gpu, REG_A6XX_UCHE_FILTER_CNTL, 0x804); in hw_init()
1158 gpu_write(gpu, REG_A6XX_UCHE_CACHE_WAYS, 0x4); in hw_init()
1162 gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2, 0x02000140); in hw_init()
1163 gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_1, 0x8040362c); in hw_init()
1165 gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2, 0x00800060); in hw_init()
1166 gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_1, 0x40201b16); in hw_init()
1168 gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2, 0x010000c0); in hw_init()
1169 gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_1, 0x8040362c); in hw_init()
1173 gpu_write(gpu, REG_A6XX_CP_LPAC_PROG_FIFO_SIZE, 0x00000020); in hw_init()
1177 gpu_write(gpu, REG_A6XX_CP_MEM_POOL_SIZE, 48); in hw_init()
1178 gpu_write(gpu, REG_A6XX_CP_MEM_POOL_DBG_ADDR, 47); in hw_init()
1180 gpu_write(gpu, REG_A6XX_CP_MEM_POOL_SIZE, 64); in hw_init()
1181 gpu_write(gpu, REG_A6XX_CP_MEM_POOL_DBG_ADDR, 63); in hw_init()
1183 gpu_write(gpu, REG_A6XX_CP_MEM_POOL_SIZE, 128); in hw_init()
1187 if (adreno_gpu->info->a6xx->prim_fifo_threshold) in hw_init()
1188 gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, in hw_init()
1189 adreno_gpu->info->a6xx->prim_fifo_threshold); in hw_init()
1192 gpu_write(gpu, REG_A6XX_CP_AHB_CNTL, 0x1); in hw_init()
1195 gpu_write(gpu, REG_A6XX_RBBM_PERFCTR_CNTL, 0x1); in hw_init()
1199 gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_1, in hw_init()
1204 gpu_write(gpu, REG_A6XX_CP_PERFCTR_CP_SEL(0), PERF_CP_ALWAYS_COUNT); in hw_init()
1206 a6xx_set_ubwc_config(gpu); in hw_init()
1211 gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL, (1 << 30) | 0xcfffff); in hw_init()
1213 gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL, (1 << 30) | 0x4fffff); in hw_init()
1215 gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL, (1 << 30) | 0x3fffff); in hw_init()
1217 gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL, (1 << 30) | 0x3ffff); in hw_init()
1219 gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL, (1 << 30) | 0x1fffff); in hw_init()
1221 gpu_write(gpu, REG_A6XX_UCHE_CLIENT_PF, BIT(7) | 0x1); in hw_init()
1225 gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_0, 0); in hw_init()
1226 gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_1, in hw_init()
1228 gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_2, in hw_init()
1230 gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_3, in hw_init()
1232 gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_4, in hw_init()
1244 a6xx_set_cp_protect(gpu); in hw_init()
1248 gpu_write(gpu, REG_A6XX_CP_CHICKEN_DBG, 0x00028801); in hw_init()
1250 gpu_write(gpu, REG_A6XX_CP_CHICKEN_DBG, 0x1); in hw_init()
1251 gpu_write(gpu, REG_A6XX_RBBM_GBIF_CLIENT_QOS_CNTL, 0x0); in hw_init()
1254 gpu_write(gpu, REG_A6XX_CP_CHICKEN_DBG, BIT(24)); in hw_init()
1258 gpu_write(gpu, REG_A6XX_UCHE_CMDQ_CONFIG, 0x90); in hw_init()
1259 /* Set dualQ + disable afull for A660 GPU */ in hw_init()
1261 gpu_write(gpu, REG_A6XX_UCHE_CMDQ_CONFIG, 0x66906); in hw_init()
1263 gpu_write(gpu, REG_A6XX_UCHE_CMDQ_CONFIG, in hw_init()
1271 if (gpu->hw_apriv) { in hw_init()
1273 gpu_write(gpu, REG_A6XX_CP_APRIV_CNTL, in hw_init()
1275 gpu_write(gpu, REG_A7XX_CP_BV_APRIV_CNTL, in hw_init()
1277 gpu_write(gpu, REG_A7XX_CP_LPAC_APRIV_CNTL, in hw_init()
1280 gpu_write(gpu, REG_A6XX_CP_APRIV_CNTL, in hw_init()
1286 gpu_rmw(gpu, REG_A6XX_RB_CMP_DBG_ECO_CNTL, BIT(19), BIT(19)); in hw_init()
1289 gpu_write(gpu, REG_A6XX_TPL1_DBG_ECO_CNTL1, 0xc0700); in hw_init()
1291 /* Disable non-ubwc read reqs from passing write reqs */ in hw_init()
1292 gpu_rmw(gpu, REG_A6XX_RB_CMP_DBG_ECO_CNTL, BIT(11), BIT(11)); in hw_init()
1296 gpu_write(gpu, REG_A6XX_RBBM_INT_0_MASK, in hw_init()
1299 ret = adreno_hw_init(gpu); in hw_init()
1303 gpu_write64(gpu, REG_A6XX_CP_SQE_INSTR_BASE, a6xx_gpu->sqe_iova); in hw_init()
1306 gpu_write64(gpu, REG_A6XX_CP_RB_BASE, gpu->rb[0]->iova); in hw_init()
1312 if (adreno_gpu->base.hw_apriv) in hw_init()
1313 gpu_write(gpu, REG_A6XX_CP_RB_CNTL, MSM_GPU_RB_CNTL_DEFAULT); in hw_init()
1315 gpu_write(gpu, REG_A6XX_CP_RB_CNTL, in hw_init()
1319 if (a6xx_gpu->shadow_bo) { in hw_init()
1320 gpu_write64(gpu, REG_A6XX_CP_RB_RPTR_ADDR, in hw_init()
1321 shadowptr(a6xx_gpu, gpu->rb[0])); in hw_init()
1322 for (unsigned int i = 0; i < gpu->nr_rings; i++) in hw_init()
1323 a6xx_gpu->shadow[i] = 0; in hw_init()
1328 gpu_write64(gpu, REG_A7XX_CP_BV_RB_RPTR_ADDR, in hw_init()
1329 rbmemptr(gpu->rb[0], bv_rptr)); in hw_init()
1332 a6xx_preempt_hw_init(gpu); in hw_init()
1335 a6xx_gpu->cur_ring = gpu->rb[0]; in hw_init()
1337 for (i = 0; i < gpu->nr_rings; i++) in hw_init()
1338 gpu->rb[i]->cur_ctx_seqno = 0; in hw_init()
1341 gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 1); in hw_init()
1343 if (adreno_is_a7xx(adreno_gpu) && !a6xx_gpu->pwrup_reglist_emitted) { in hw_init()
1344 a7xx_patch_pwrup_reglist(gpu); in hw_init()
1345 a6xx_gpu->pwrup_reglist_emitted = true; in hw_init()
1348 ret = adreno_is_a7xx(adreno_gpu) ? a7xx_cp_init(gpu) : a6xx_cp_init(gpu); in hw_init()
1359 ret = a6xx_zap_shader_init(gpu); in hw_init()
1361 OUT_PKT7(gpu->rb[0], CP_SET_SECURE_MODE, 1); in hw_init()
1362 OUT_RING(gpu->rb[0], 0x00000000); in hw_init()
1364 a6xx_flush(gpu, gpu->rb[0]); in hw_init()
1365 if (!a6xx_idle(gpu, gpu->rb[0])) in hw_init()
1366 return -EINVAL; in hw_init()
1367 } else if (ret == -ENODEV) { in hw_init()
1374 dev_warn_once(gpu->dev->dev, in hw_init()
1375 "Zap shader not enabled - using SECVID_TRUST_CNTL instead\n"); in hw_init()
1376 gpu_write(gpu, REG_A6XX_RBBM_SECVID_TRUST_CNTL, 0x0); in hw_init()
1386 /* Last step - yield the ringbuffer */ in hw_init()
1387 a7xx_preempt_start(gpu); in hw_init()
1390 * Tell the GMU that we are done touching the GPU and it can start power in hw_init()
1393 a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET); in hw_init()
1395 if (a6xx_gpu->gmu.legacy) { in hw_init()
1397 a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_BOOT_SLUMBER); in hw_init()
1403 static int a6xx_hw_init(struct msm_gpu *gpu) in a6xx_hw_init() argument
1405 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a6xx_hw_init()
1409 mutex_lock(&a6xx_gpu->gmu.lock); in a6xx_hw_init()
1410 ret = hw_init(gpu); in a6xx_hw_init()
1411 mutex_unlock(&a6xx_gpu->gmu.lock); in a6xx_hw_init()
1416 static void a6xx_dump(struct msm_gpu *gpu) in a6xx_dump() argument
1418 DRM_DEV_INFO(&gpu->pdev->dev, "status: %08x\n", in a6xx_dump()
1419 gpu_read(gpu, REG_A6XX_RBBM_STATUS)); in a6xx_dump()
1420 adreno_dump(gpu); in a6xx_dump()
1423 static void a6xx_recover(struct msm_gpu *gpu) in a6xx_recover() argument
1425 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a6xx_recover()
1427 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; in a6xx_recover()
1430 adreno_dump_info(gpu); in a6xx_recover()
1433 DRM_DEV_INFO(&gpu->pdev->dev, "CP_SCRATCH_REG%d: %u\n", i, in a6xx_recover()
1434 gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(i))); in a6xx_recover()
1437 a6xx_dump(gpu); in a6xx_recover()
1443 a6xx_gpu->hung = true; in a6xx_recover()
1446 gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 3); in a6xx_recover()
1448 pm_runtime_dont_use_autosuspend(&gpu->pdev->dev); in a6xx_recover()
1451 mutex_lock(&gpu->active_lock); in a6xx_recover()
1452 active_submits = gpu->active_submits; in a6xx_recover()
1458 gpu->active_submits = 0; in a6xx_recover()
1464 /* Reset the GPU to a clean state */ in a6xx_recover()
1465 a6xx_gpu_sw_reset(gpu, true); in a6xx_recover()
1466 a6xx_gpu_sw_reset(gpu, false); in a6xx_recover()
1469 reinit_completion(&gmu->pd_gate); in a6xx_recover()
1470 dev_pm_genpd_add_notifier(gmu->cxpd, &gmu->pd_nb); in a6xx_recover()
1471 dev_pm_genpd_synced_poweroff(gmu->cxpd); in a6xx_recover()
1475 pm_runtime_put(&gpu->pdev->dev); in a6xx_recover()
1478 pm_runtime_put_sync(&gpu->pdev->dev); in a6xx_recover()
1480 if (!wait_for_completion_timeout(&gmu->pd_gate, msecs_to_jiffies(1000))) in a6xx_recover()
1481 DRM_DEV_ERROR(&gpu->pdev->dev, "cx gdsc didn't collapse\n"); in a6xx_recover()
1483 dev_pm_genpd_remove_notifier(gmu->cxpd); in a6xx_recover()
1485 pm_runtime_use_autosuspend(&gpu->pdev->dev); in a6xx_recover()
1488 pm_runtime_get(&gpu->pdev->dev); in a6xx_recover()
1490 pm_runtime_get_sync(&gpu->pdev->dev); in a6xx_recover()
1492 gpu->active_submits = active_submits; in a6xx_recover()
1493 mutex_unlock(&gpu->active_lock); in a6xx_recover()
1495 msm_gpu_hw_init(gpu); in a6xx_recover()
1496 a6xx_gpu->hung = false; in a6xx_recover()
1499 static const char *a6xx_uche_fault_block(struct msm_gpu *gpu, u32 mid) in a6xx_uche_fault_block() argument
1501 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a6xx_uche_fault_block()
1516 * The source of the data depends on the mid ID read from FSYNR1. in a6xx_uche_fault_block()
1517 * and the client ID read from the UCHE block in a6xx_uche_fault_block()
1519 val = gpu_read(gpu, REG_A6XX_UCHE_CLIENT_PF); in a6xx_uche_fault_block()
1531 * compute-only some of them do not exist and there are holes in a6xx_uche_fault_block()
1535 "-", "LPAC_SP", "-", "-", in a6xx_uche_fault_block()
1536 "LPAC_HLSQ", "-", "-", "LPAC_TP", in a6xx_uche_fault_block()
1587 /* For mid=2 the source is TP or VFD except when the client id is 0 */ in a6xx_uche_fault_block()
1596 static const char *a6xx_fault_block(struct msm_gpu *gpu, u32 id) in a6xx_fault_block() argument
1598 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a6xx_fault_block()
1600 if (id == 0) in a6xx_fault_block()
1602 else if (id == 4) in a6xx_fault_block()
1604 else if (id == 6) in a6xx_fault_block()
1606 else if (id == 7) in a6xx_fault_block()
1608 else if (id == 5 && adreno_is_a7xx(adreno_gpu)) in a6xx_fault_block()
1611 return a6xx_uche_fault_block(gpu, id); in a6xx_fault_block()
1616 struct msm_gpu *gpu = arg; in a6xx_fault_handler() local
1621 gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(4)), in a6xx_fault_handler()
1622 gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(5)), in a6xx_fault_handler()
1623 gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(6)), in a6xx_fault_handler()
1624 gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(7)), in a6xx_fault_handler()
1628 block = a6xx_fault_block(gpu, info->fsynr1 & 0xff); in a6xx_fault_handler()
1630 return adreno_fault_handler(gpu, iova, flags, info, block, scratch); in a6xx_fault_handler()
1633 static void a6xx_cp_hw_err_irq(struct msm_gpu *gpu) in a6xx_cp_hw_err_irq() argument
1635 u32 status = gpu_read(gpu, REG_A6XX_CP_INTERRUPT_STATUS); in a6xx_cp_hw_err_irq()
1640 gpu_write(gpu, REG_A6XX_CP_SQE_STAT_ADDR, 1); in a6xx_cp_hw_err_irq()
1641 val = gpu_read(gpu, REG_A6XX_CP_SQE_STAT_DATA); in a6xx_cp_hw_err_irq()
1642 dev_err_ratelimited(&gpu->pdev->dev, in a6xx_cp_hw_err_irq()
1648 dev_err_ratelimited(&gpu->pdev->dev, in a6xx_cp_hw_err_irq()
1652 dev_err_ratelimited(&gpu->pdev->dev, "CP | HW fault | status=0x%8.8X\n", in a6xx_cp_hw_err_irq()
1653 gpu_read(gpu, REG_A6XX_CP_HW_FAULT)); in a6xx_cp_hw_err_irq()
1656 u32 val = gpu_read(gpu, REG_A6XX_CP_PROTECT_STATUS); in a6xx_cp_hw_err_irq()
1658 dev_err_ratelimited(&gpu->pdev->dev, in a6xx_cp_hw_err_irq()
1664 if (status & A6XX_CP_INT_CP_AHB_ERROR && !adreno_is_a7xx(to_adreno_gpu(gpu))) in a6xx_cp_hw_err_irq()
1665 dev_err_ratelimited(&gpu->pdev->dev, "CP AHB error interrupt\n"); in a6xx_cp_hw_err_irq()
1668 dev_err_ratelimited(&gpu->pdev->dev, "CP VSD decoder parity error\n"); in a6xx_cp_hw_err_irq()
1671 dev_err_ratelimited(&gpu->pdev->dev, "CP illegal instruction error\n"); in a6xx_cp_hw_err_irq()
1675 static void a6xx_fault_detect_irq(struct msm_gpu *gpu) in a6xx_fault_detect_irq() argument
1677 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a6xx_fault_detect_irq()
1679 struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu); in a6xx_fault_detect_irq()
1682 * If stalled on SMMU fault, we could trip the GPU's hang detection, in a6xx_fault_detect_irq()
1687 if (gpu_read(gpu, REG_A6XX_RBBM_STATUS3) & A6XX_RBBM_STATUS3_SMMU_STALLED_ON_FAULT) in a6xx_fault_detect_irq()
1691 * Force the GPU to stay on until after we finish in a6xx_fault_detect_irq()
1695 gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_GMU_PWR_COL_KEEPALIVE, 1); in a6xx_fault_detect_irq()
1697 DRM_DEV_ERROR(&gpu->pdev->dev, in a6xx_fault_detect_irq()
1698 …"gpu fault ring %d fence %x status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n", in a6xx_fault_detect_irq()
1699 ring ? ring->id : -1, ring ? ring->fctx->last_fence : 0, in a6xx_fault_detect_irq()
1700 gpu_read(gpu, REG_A6XX_RBBM_STATUS), in a6xx_fault_detect_irq()
1701 gpu_read(gpu, REG_A6XX_CP_RB_RPTR), in a6xx_fault_detect_irq()
1702 gpu_read(gpu, REG_A6XX_CP_RB_WPTR), in a6xx_fault_detect_irq()
1703 gpu_read64(gpu, REG_A6XX_CP_IB1_BASE), in a6xx_fault_detect_irq()
1704 gpu_read(gpu, REG_A6XX_CP_IB1_REM_SIZE), in a6xx_fault_detect_irq()
1705 gpu_read64(gpu, REG_A6XX_CP_IB2_BASE), in a6xx_fault_detect_irq()
1706 gpu_read(gpu, REG_A6XX_CP_IB2_REM_SIZE)); in a6xx_fault_detect_irq()
1709 timer_delete(&gpu->hangcheck_timer); in a6xx_fault_detect_irq()
1711 kthread_queue_work(gpu->worker, &gpu->recover_work); in a6xx_fault_detect_irq()
1714 static void a7xx_sw_fuse_violation_irq(struct msm_gpu *gpu) in a7xx_sw_fuse_violation_irq() argument
1718 status = gpu_read(gpu, REG_A7XX_RBBM_SW_FUSE_INT_STATUS); in a7xx_sw_fuse_violation_irq()
1719 gpu_write(gpu, REG_A7XX_RBBM_SW_FUSE_INT_MASK, 0); in a7xx_sw_fuse_violation_irq()
1721 dev_err_ratelimited(&gpu->pdev->dev, "SW fuse violation status=%8.8x\n", status); in a7xx_sw_fuse_violation_irq()
1729 timer_delete(&gpu->hangcheck_timer); in a7xx_sw_fuse_violation_irq()
1731 kthread_queue_work(gpu->worker, &gpu->recover_work); in a7xx_sw_fuse_violation_irq()
1735 static irqreturn_t a6xx_irq(struct msm_gpu *gpu) in a6xx_irq() argument
1737 struct msm_drm_private *priv = gpu->dev->dev_private; in a6xx_irq()
1738 u32 status = gpu_read(gpu, REG_A6XX_RBBM_INT_0_STATUS); in a6xx_irq()
1740 gpu_write(gpu, REG_A6XX_RBBM_INT_CLEAR_CMD, status); in a6xx_irq()
1742 if (priv->disable_err_irq) in a6xx_irq()
1746 a6xx_fault_detect_irq(gpu); in a6xx_irq()
1749 dev_err_ratelimited(&gpu->pdev->dev, "CP | AHB bus error\n"); in a6xx_irq()
1752 a6xx_cp_hw_err_irq(gpu); in a6xx_irq()
1755 dev_err_ratelimited(&gpu->pdev->dev, "RBBM | ATB ASYNC overflow\n"); in a6xx_irq()
1758 dev_err_ratelimited(&gpu->pdev->dev, "RBBM | ATB bus overflow\n"); in a6xx_irq()
1761 dev_err_ratelimited(&gpu->pdev->dev, "UCHE | Out of bounds access\n"); in a6xx_irq()
1764 a7xx_sw_fuse_violation_irq(gpu); in a6xx_irq()
1767 msm_gpu_retire(gpu); in a6xx_irq()
1768 a6xx_preempt_trigger(gpu); in a6xx_irq()
1772 a6xx_preempt_irq(gpu); in a6xx_irq()
1779 llcc_slice_deactivate(a6xx_gpu->llc_slice); in a6xx_llc_deactivate()
1780 llcc_slice_deactivate(a6xx_gpu->htw_llc_slice); in a6xx_llc_deactivate()
1785 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; in a6xx_llc_activate()
1786 struct msm_gpu *gpu = &adreno_gpu->base; in a6xx_llc_activate() local
1789 if (IS_ERR(a6xx_gpu->llc_mmio)) in a6xx_llc_activate()
1792 if (!llcc_slice_activate(a6xx_gpu->llc_slice)) { in a6xx_llc_activate()
1793 u32 gpu_scid = llcc_get_slice_id(a6xx_gpu->llc_slice); in a6xx_llc_activate()
1803 gpu_rmw(gpu, REG_A6XX_GBIF_SCACHE_CNTL0, (0x1f << 10) | in a6xx_llc_activate()
1811 if (!llcc_slice_activate(a6xx_gpu->htw_llc_slice)) { in a6xx_llc_activate()
1812 if (!a6xx_gpu->have_mmu500) { in a6xx_llc_activate()
1813 u32 gpuhtw_scid = llcc_get_slice_id(a6xx_gpu->htw_llc_slice); in a6xx_llc_activate()
1824 * Program the slice IDs for the various GPU blocks and GPU MMU in a6xx_llc_activate()
1827 if (!a6xx_gpu->have_mmu500) { in a6xx_llc_activate()
1840 gpu_rmw(gpu, REG_A6XX_GBIF_SCACHE_CNTL1, GENMASK(24, 0), cntl1_regval); in a6xx_llc_activate()
1845 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; in a7xx_llc_activate()
1846 struct msm_gpu *gpu = &adreno_gpu->base; in a7xx_llc_activate() local
1848 if (IS_ERR(a6xx_gpu->llc_mmio)) in a7xx_llc_activate()
1851 if (!llcc_slice_activate(a6xx_gpu->llc_slice)) { in a7xx_llc_activate()
1852 u32 gpu_scid = llcc_get_slice_id(a6xx_gpu->llc_slice); in a7xx_llc_activate()
1856 gpu_write(gpu, REG_A6XX_GBIF_SCACHE_CNTL1, in a7xx_llc_activate()
1864 gpu_write(gpu, REG_A6XX_GBIF_SCACHE_CNTL0, in a7xx_llc_activate()
1869 llcc_slice_activate(a6xx_gpu->htw_llc_slice); in a7xx_llc_activate()
1874 /* No LLCC on non-RPMh (and by extension, non-GMU) SoCs */ in a6xx_llc_slices_destroy()
1875 if (adreno_has_gmu_wrapper(&a6xx_gpu->base)) in a6xx_llc_slices_destroy()
1878 llcc_slice_putd(a6xx_gpu->llc_slice); in a6xx_llc_slices_destroy()
1879 llcc_slice_putd(a6xx_gpu->htw_llc_slice); in a6xx_llc_slices_destroy()
1887 /* No LLCC on non-RPMh (and by extension, non-GMU) SoCs */ in a6xx_llc_slices_init()
1888 if (adreno_has_gmu_wrapper(&a6xx_gpu->base)) in a6xx_llc_slices_init()
1895 phandle = of_parse_phandle(pdev->dev.of_node, "iommus", 0); in a6xx_llc_slices_init()
1896 a6xx_gpu->have_mmu500 = (phandle && in a6xx_llc_slices_init()
1897 of_device_is_compatible(phandle, "arm,mmu-500")); in a6xx_llc_slices_init()
1900 if (is_a7xx || !a6xx_gpu->have_mmu500) in a6xx_llc_slices_init()
1901 a6xx_gpu->llc_mmio = msm_ioremap(pdev, "cx_mem"); in a6xx_llc_slices_init()
1903 a6xx_gpu->llc_mmio = NULL; in a6xx_llc_slices_init()
1905 a6xx_gpu->llc_slice = llcc_slice_getd(LLCC_GPU); in a6xx_llc_slices_init()
1906 a6xx_gpu->htw_llc_slice = llcc_slice_getd(LLCC_GPUHTW); in a6xx_llc_slices_init()
1908 if (IS_ERR_OR_NULL(a6xx_gpu->llc_slice) && IS_ERR_OR_NULL(a6xx_gpu->htw_llc_slice)) in a6xx_llc_slices_init()
1909 a6xx_gpu->llc_mmio = ERR_PTR(-EINVAL); in a6xx_llc_slices_init()
1914 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; in a7xx_cx_mem_init()
1915 struct msm_gpu *gpu = &adreno_gpu->base; in a7xx_cx_mem_init() local
1929 dev_warn_once(gpu->dev->dev, in a7xx_cx_mem_init()
1935 adreno_gpu->has_ray_tracing = true; in a7xx_cx_mem_init()
1951 adreno_gpu->has_ray_tracing = in a7xx_cx_mem_init()
1955 adreno_gpu->has_ray_tracing = true; in a7xx_cx_mem_init()
1970 struct msm_gpu *gpu = &adreno_gpu->base; in a6xx_bus_clear_pending_transactions() local
1973 gpu_write(gpu, REG_A6XX_RBBM_GPR0_CNTL, GPR0_GBIF_HALT_REQUEST); in a6xx_bus_clear_pending_transactions()
1974 spin_until((gpu_read(gpu, REG_A6XX_RBBM_VBIF_GX_RESET_STATUS) & in a6xx_bus_clear_pending_transactions()
1977 gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, VBIF_XIN_HALT_CTRL0_MASK); in a6xx_bus_clear_pending_transactions()
1978 spin_until((gpu_read(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL1) & in a6xx_bus_clear_pending_transactions()
1980 gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0); in a6xx_bus_clear_pending_transactions()
1987 gpu_write(gpu, REG_A6XX_RBBM_GBIF_HALT, 1); in a6xx_bus_clear_pending_transactions()
1988 spin_until(gpu_read(gpu, REG_A6XX_RBBM_GBIF_HALT_ACK) & 1); in a6xx_bus_clear_pending_transactions()
1992 gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_CLIENT_HALT_MASK); in a6xx_bus_clear_pending_transactions()
1993 spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) & in a6xx_bus_clear_pending_transactions()
1997 gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_ARB_HALT_MASK); in a6xx_bus_clear_pending_transactions()
1998 spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) & in a6xx_bus_clear_pending_transactions()
2002 gpu_write(gpu, REG_A6XX_GBIF_HALT, 0x0); in a6xx_bus_clear_pending_transactions()
2005 void a6xx_gpu_sw_reset(struct msm_gpu *gpu, bool assert) in a6xx_gpu_sw_reset() argument
2008 if (adreno_is_a610(to_adreno_gpu(gpu))) in a6xx_gpu_sw_reset()
2011 gpu_write(gpu, REG_A6XX_RBBM_SW_RESET_CMD, assert); in a6xx_gpu_sw_reset()
2013 gpu_read(gpu, REG_A6XX_RBBM_SW_RESET_CMD); in a6xx_gpu_sw_reset()
2021 static int a6xx_gmu_pm_resume(struct msm_gpu *gpu) in a6xx_gmu_pm_resume() argument
2023 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a6xx_gmu_pm_resume()
2027 gpu->needs_hw_init = true; in a6xx_gmu_pm_resume()
2031 mutex_lock(&a6xx_gpu->gmu.lock); in a6xx_gmu_pm_resume()
2033 mutex_unlock(&a6xx_gpu->gmu.lock); in a6xx_gmu_pm_resume()
2037 msm_devfreq_resume(gpu); in a6xx_gmu_pm_resume()
2044 static int a6xx_pm_resume(struct msm_gpu *gpu) in a6xx_pm_resume() argument
2046 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a6xx_pm_resume()
2048 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; in a6xx_pm_resume()
2049 unsigned long freq = gpu->fast_rate; in a6xx_pm_resume()
2053 gpu->needs_hw_init = true; in a6xx_pm_resume()
2057 mutex_lock(&a6xx_gpu->gmu.lock); in a6xx_pm_resume()
2059 opp = dev_pm_opp_find_freq_ceil(&gpu->pdev->dev, &freq); in a6xx_pm_resume()
2067 dev_pm_opp_set_opp(&gpu->pdev->dev, opp); in a6xx_pm_resume()
2069 pm_runtime_resume_and_get(gmu->dev); in a6xx_pm_resume()
2070 pm_runtime_resume_and_get(gmu->gxpd); in a6xx_pm_resume()
2072 ret = clk_bulk_prepare_enable(gpu->nr_clocks, gpu->grp_clks); in a6xx_pm_resume()
2079 /* If anything goes south, tear the GPU down piece by piece.. */ in a6xx_pm_resume()
2082 pm_runtime_put(gmu->gxpd); in a6xx_pm_resume()
2083 pm_runtime_put(gmu->dev); in a6xx_pm_resume()
2084 dev_pm_opp_set_opp(&gpu->pdev->dev, NULL); in a6xx_pm_resume()
2087 mutex_unlock(&a6xx_gpu->gmu.lock); in a6xx_pm_resume()
2090 msm_devfreq_resume(gpu); in a6xx_pm_resume()
2095 static int a6xx_gmu_pm_suspend(struct msm_gpu *gpu) in a6xx_gmu_pm_suspend() argument
2097 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a6xx_gmu_pm_suspend()
2105 msm_devfreq_suspend(gpu); in a6xx_gmu_pm_suspend()
2107 mutex_lock(&a6xx_gpu->gmu.lock); in a6xx_gmu_pm_suspend()
2109 mutex_unlock(&a6xx_gpu->gmu.lock); in a6xx_gmu_pm_suspend()
2113 if (a6xx_gpu->shadow_bo) in a6xx_gmu_pm_suspend()
2114 for (i = 0; i < gpu->nr_rings; i++) in a6xx_gmu_pm_suspend()
2115 a6xx_gpu->shadow[i] = 0; in a6xx_gmu_pm_suspend()
2117 gpu->suspend_count++; in a6xx_gmu_pm_suspend()
2122 static int a6xx_pm_suspend(struct msm_gpu *gpu) in a6xx_pm_suspend() argument
2124 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a6xx_pm_suspend()
2126 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; in a6xx_pm_suspend()
2131 msm_devfreq_suspend(gpu); in a6xx_pm_suspend()
2133 mutex_lock(&a6xx_gpu->gmu.lock); in a6xx_pm_suspend()
2141 clk_bulk_disable_unprepare(gpu->nr_clocks, gpu->grp_clks); in a6xx_pm_suspend()
2143 pm_runtime_put_sync(gmu->gxpd); in a6xx_pm_suspend()
2144 dev_pm_opp_set_opp(&gpu->pdev->dev, NULL); in a6xx_pm_suspend()
2145 pm_runtime_put_sync(gmu->dev); in a6xx_pm_suspend()
2147 mutex_unlock(&a6xx_gpu->gmu.lock); in a6xx_pm_suspend()
2149 if (a6xx_gpu->shadow_bo) in a6xx_pm_suspend()
2150 for (i = 0; i < gpu->nr_rings; i++) in a6xx_pm_suspend()
2151 a6xx_gpu->shadow[i] = 0; in a6xx_pm_suspend()
2153 gpu->suspend_count++; in a6xx_pm_suspend()
2158 static int a6xx_gmu_get_timestamp(struct msm_gpu *gpu, uint64_t *value) in a6xx_gmu_get_timestamp() argument
2160 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a6xx_gmu_get_timestamp()
2163 mutex_lock(&a6xx_gpu->gmu.lock); in a6xx_gmu_get_timestamp()
2165 /* Force the GPU power on so we can read this register */ in a6xx_gmu_get_timestamp()
2166 a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET); in a6xx_gmu_get_timestamp()
2168 *value = gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER); in a6xx_gmu_get_timestamp()
2170 a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET); in a6xx_gmu_get_timestamp()
2172 mutex_unlock(&a6xx_gpu->gmu.lock); in a6xx_gmu_get_timestamp()
2177 static int a6xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value) in a6xx_get_timestamp() argument
2179 *value = gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER); in a6xx_get_timestamp()
2183 static struct msm_ringbuffer *a6xx_active_ring(struct msm_gpu *gpu) in a6xx_active_ring() argument
2185 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a6xx_active_ring()
2188 return a6xx_gpu->cur_ring; in a6xx_active_ring()
2191 static void a6xx_destroy(struct msm_gpu *gpu) in a6xx_destroy() argument
2193 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a6xx_destroy()
2196 if (a6xx_gpu->sqe_bo) { in a6xx_destroy()
2197 msm_gem_unpin_iova(a6xx_gpu->sqe_bo, gpu->aspace); in a6xx_destroy()
2198 drm_gem_object_put(a6xx_gpu->sqe_bo); in a6xx_destroy()
2201 if (a6xx_gpu->shadow_bo) { in a6xx_destroy()
2202 msm_gem_unpin_iova(a6xx_gpu->shadow_bo, gpu->aspace); in a6xx_destroy()
2203 drm_gem_object_put(a6xx_gpu->shadow_bo); in a6xx_destroy()
2215 static u64 a6xx_gpu_busy(struct msm_gpu *gpu, unsigned long *out_sample_rate) in a6xx_gpu_busy() argument
2217 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a6xx_gpu_busy()
2224 busy_cycles = gmu_read64(&a6xx_gpu->gmu, in a6xx_gpu_busy()
2231 static void a6xx_gpu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp, in a6xx_gpu_set_freq() argument
2234 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a6xx_gpu_set_freq()
2237 mutex_lock(&a6xx_gpu->gmu.lock); in a6xx_gpu_set_freq()
2238 a6xx_gmu_set_freq(gpu, opp, suspended); in a6xx_gpu_set_freq()
2239 mutex_unlock(&a6xx_gpu->gmu.lock); in a6xx_gpu_set_freq()
2243 a6xx_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev) in a6xx_create_address_space() argument
2245 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a6xx_create_address_space()
2250 * This allows GPU to set the bus attributes required to use system in a6xx_create_address_space()
2253 if (!IS_ERR_OR_NULL(a6xx_gpu->htw_llc_slice) && in a6xx_create_address_space()
2254 !device_iommu_capable(&pdev->dev, IOMMU_CAP_CACHE_COHERENCY)) in a6xx_create_address_space()
2257 return adreno_iommu_create_address_space(gpu, pdev, quirks); in a6xx_create_address_space()
2261 a6xx_create_private_address_space(struct msm_gpu *gpu) in a6xx_create_private_address_space() argument
2265 mmu = msm_iommu_pagetable_create(gpu->aspace->mmu); in a6xx_create_private_address_space()
2271 "gpu", 0x100000000ULL, in a6xx_create_private_address_space()
2272 adreno_private_address_space_size(gpu)); in a6xx_create_private_address_space()
2275 static uint32_t a6xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring) in a6xx_get_rptr() argument
2277 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a6xx_get_rptr()
2280 if (adreno_gpu->base.hw_apriv || a6xx_gpu->has_whereami) in a6xx_get_rptr()
2281 return a6xx_gpu->shadow[ring->id]; in a6xx_get_rptr()
2283 return ring->memptrs->rptr = gpu_read(gpu, REG_A6XX_CP_RB_RPTR); in a6xx_get_rptr()
2286 static bool a6xx_progress(struct msm_gpu *gpu, struct msm_ringbuffer *ring) in a6xx_progress() argument
2289 .ib1_base = gpu_read64(gpu, REG_A6XX_CP_IB1_BASE), in a6xx_progress()
2290 .ib2_base = gpu_read64(gpu, REG_A6XX_CP_IB2_BASE), in a6xx_progress()
2291 .ib1_rem = gpu_read(gpu, REG_A6XX_CP_IB1_REM_SIZE), in a6xx_progress()
2292 .ib2_rem = gpu_read(gpu, REG_A6XX_CP_IB2_REM_SIZE), in a6xx_progress()
2309 cp_state.ib1_rem += gpu_read(gpu, REG_A6XX_CP_ROQ_AVAIL_IB1) >> 16; in a6xx_progress()
2310 cp_state.ib2_rem += gpu_read(gpu, REG_A6XX_CP_ROQ_AVAIL_IB2) >> 16; in a6xx_progress()
2312 progress = !!memcmp(&cp_state, &ring->last_cp_state, sizeof(cp_state)); in a6xx_progress()
2314 ring->last_cp_state = cp_state; in a6xx_progress()
2321 if (!info->speedbins) in fuse_to_supp_hw()
2324 for (int i = 0; info->speedbins[i].fuse != SHRT_MAX; i++) in fuse_to_supp_hw()
2325 if (info->speedbins[i].fuse == fuse) in fuse_to_supp_hw()
2326 return BIT(info->speedbins[i].speedbin); in fuse_to_supp_hw()
2339 * -ENOENT means that the platform doesn't support speedbin which is in a6xx_set_supported_hw()
2342 if (ret == -ENOENT) { in a6xx_set_supported_hw()
2346 "failed to read speed-bin. Some OPPs may not be supported by hardware\n"); in a6xx_set_supported_hw()
2354 "missing support for speed-bin: %u. Some OPPs may not be supported by hardware\n", in a6xx_set_supported_hw()
2459 struct msm_drm_private *priv = dev->dev_private; in a6xx_gpu_init()
2460 struct platform_device *pdev = priv->gpu_pdev; in a6xx_gpu_init()
2461 struct adreno_platform_config *config = pdev->dev.platform_data; in a6xx_gpu_init()
2465 struct msm_gpu *gpu; in a6xx_gpu_init() local
2472 return ERR_PTR(-ENOMEM); in a6xx_gpu_init()
2474 adreno_gpu = &a6xx_gpu->base; in a6xx_gpu_init()
2475 gpu = &adreno_gpu->base; in a6xx_gpu_init()
2477 mutex_init(&a6xx_gpu->gmu.lock); in a6xx_gpu_init()
2479 adreno_gpu->registers = NULL; in a6xx_gpu_init()
2482 node = of_parse_phandle(pdev->dev.of_node, "qcom,gmu", 0); in a6xx_gpu_init()
2486 adreno_gpu->gmu_is_wrapper = of_device_is_compatible(node, "qcom,adreno-gmu-wrapper"); in a6xx_gpu_init()
2488 adreno_gpu->base.hw_apriv = in a6xx_gpu_init()
2489 !!(config->info->quirks & ADRENO_QUIRK_HAS_HW_APRIV); in a6xx_gpu_init()
2491 /* gpu->info only gets assigned in adreno_gpu_init() */ in a6xx_gpu_init()
2492 is_a7xx = config->info->family == ADRENO_7XX_GEN1 || in a6xx_gpu_init()
2493 config->info->family == ADRENO_7XX_GEN2 || in a6xx_gpu_init()
2494 config->info->family == ADRENO_7XX_GEN3; in a6xx_gpu_init()
2498 ret = a6xx_set_supported_hw(&pdev->dev, config->info); in a6xx_gpu_init()
2505 if ((enable_preemption == 1) || (enable_preemption == -1 && in a6xx_gpu_init()
2506 (config->info->quirks & ADRENO_QUIRK_PREEMPTION))) in a6xx_gpu_init()
2515 a6xx_destroy(&(a6xx_gpu->base.base)); in a6xx_gpu_init()
2524 priv->gpu_clamp_to_idle = true; in a6xx_gpu_init()
2532 a6xx_destroy(&(a6xx_gpu->base.base)); in a6xx_gpu_init()
2539 a6xx_destroy(&(a6xx_gpu->base.base)); in a6xx_gpu_init()
2544 adreno_gpu->uche_trap_base = 0x1fffffffff000ull; in a6xx_gpu_init()
2546 if (gpu->aspace) in a6xx_gpu_init()
2547 msm_mmu_set_fault_handler(gpu->aspace->mmu, gpu, in a6xx_gpu_init()
2552 a6xx_preempt_init(gpu); in a6xx_gpu_init()
2554 return gpu; in a6xx_gpu_init()