/linux-6.8/drivers/gpu/drm/msm/adreno/ |
D | a6xx_gmu.c | 20 static void a6xx_gmu_fault(struct a6xx_gmu *gmu) in a6xx_gmu_fault() argument 22 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); in a6xx_gmu_fault() 27 gmu->hung = true; in a6xx_gmu_fault() 38 struct a6xx_gmu *gmu = data; in a6xx_gmu_irq() local 41 status = gmu_read(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_STATUS); in a6xx_gmu_irq() 42 gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, status); in a6xx_gmu_irq() 45 dev_err_ratelimited(gmu->dev, "GMU watchdog expired\n"); in a6xx_gmu_irq() 47 a6xx_gmu_fault(gmu); in a6xx_gmu_irq() 51 dev_err_ratelimited(gmu->dev, "GMU AHB bus error\n"); in a6xx_gmu_irq() 54 dev_err_ratelimited(gmu->dev, "GMU fence error: 0x%x\n", in a6xx_gmu_irq() [all …]
|
D | a6xx_gmu.h | 23 * These define the different GMU wake up options - these define how both the 24 * CPU and the GMU bring up the hardware 27 /* THe GMU has already been booted and the rentention registers are active */ 30 /* the GMU is coming up for the first time or back from a power collapse */ 34 * These define the level of control that the GMU has - the higher the number 35 * the more things that the GMU hardware controls on its own. 38 /* The GMU does not do any idle state management */ 41 /* The GMU manages SPTP power collapse */ 44 /* The GMU does automatic IFPC (intra-frame power collapse) */ 50 /* For serializing communication with the GMU: */ [all …]
|
D | a6xx_hfi.c | 28 static int a6xx_hfi_queue_read(struct a6xx_gmu *gmu, in a6xx_hfi_queue_read() argument 44 * If we are to assume that the GMU firmware is in fact a rational actor in a6xx_hfi_queue_read() 59 if (!gmu->legacy) in a6xx_hfi_queue_read() 66 static int a6xx_hfi_queue_write(struct a6xx_gmu *gmu, in a6xx_hfi_queue_write() argument 90 if (!gmu->legacy) { in a6xx_hfi_queue_write() 98 gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 0x01); in a6xx_hfi_queue_write() 102 static int a6xx_hfi_wait_for_ack(struct a6xx_gmu *gmu, u32 id, u32 seqnum, in a6xx_hfi_wait_for_ack() argument 105 struct a6xx_hfi_queue *queue = &gmu->queues[HFI_RESPONSE_QUEUE]; in a6xx_hfi_wait_for_ack() 110 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO, val, in a6xx_hfi_wait_for_ack() 114 DRM_DEV_ERROR(gmu->dev, in a6xx_hfi_wait_for_ack() [all …]
|
D | a6xx_gpu.c | 23 /* Check that the GMU is idle */ in _a6xx_check_idle() 24 if (!adreno_has_gmu_wrapper(adreno_gpu) && !a6xx_gmu_isidle(&a6xx_gpu->gmu)) in _a6xx_check_idle() 201 * For PM4 the GMU register offsets are calculated from the base of the in a6xx_submit() 959 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; in a6xx_set_hwcg() local 977 gmu_write(&a6xx_gpu->gmu, REG_A6XX_GPU_GMU_AO_GMU_CGC_MODE_CNTL, in a6xx_set_hwcg() 979 gmu_write(&a6xx_gpu->gmu, REG_A6XX_GPU_GMU_AO_GMU_CGC_DELAY_CNTL, in a6xx_set_hwcg() 981 gmu_write(&a6xx_gpu->gmu, REG_A6XX_GPU_GMU_AO_GMU_CGC_HYST_CNTL, in a6xx_set_hwcg() 993 gmu_rmw(gmu, REG_A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 1, 0); in a6xx_set_hwcg() 1000 gmu_rmw(gmu, REG_A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 0, 1); in a6xx_set_hwcg() 1613 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; in hw_init() local [all …]
|
D | a6xx_gpu.h | 23 struct a6xx_gmu gmu; member 86 int a6xx_gmu_wait_for_idle(struct a6xx_gmu *gmu); 88 bool a6xx_gmu_isidle(struct a6xx_gmu *gmu); 90 int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state); 91 void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state);
|
D | a6xx_gpu_state.c | 144 if (!a6xx_gmu_sptprac_is_on(&a6xx_gpu->gmu)) in a6xx_crashdumper_run() 775 /* Read a block of GMU registers */ 784 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; in _a6xx_get_gmu_registers() local 804 val = gmu_read_rscc(gmu, offset); in _a6xx_get_gmu_registers() 806 val = gmu_read(gmu, offset); in _a6xx_get_gmu_registers() 827 /* Get the CX GMU registers from AHB */ in a6xx_get_gmu_registers() 833 if (!a6xx_gmu_gx_is_on(&a6xx_gpu->gmu)) in a6xx_get_gmu_registers() 871 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; in a6xx_snapshot_gmu_hfi_history() local 874 BUILD_BUG_ON(ARRAY_SIZE(gmu->queues) != ARRAY_SIZE(a6xx_state->hfi_queue_history)); in a6xx_snapshot_gmu_hfi_history() 876 for (i = 0; i < ARRAY_SIZE(gmu->queues); i++) { in a6xx_snapshot_gmu_hfi_history() [all …]
|
D | a6xx_hfi.h | 49 /* This is the outgoing queue to the GMU */ 52 /* THis is the incoming response queue from the GMU */
|
D | a6xx_gpu_state.h | 362 /* GMU GX */ 371 /* GMU CX */ 381 /* GMU AO */
|
D | adreno_gpu.c | 527 /* Skip loading GMU firwmare with GMU Wrapper */ in adreno_load_fw() 1083 /* Only handle the core clock when GMU is not in use (or is absent). */ in adreno_gpu_init()
|
/linux-6.8/Documentation/devicetree/bindings/display/msm/ |
D | gmu.yaml | 6 $id: http://devicetree.org/schemas/display/msm/gmu.yaml# 9 title: GMU attached to certain Adreno GPUs 15 These bindings describe the Graphics Management Unit (GMU) that is attached 16 to members of the Adreno A6xx GPU family. The GMU provides on-device power 24 - pattern: '^qcom,adreno-gmu-[67][0-9][0-9]\.[0-9]$' 25 - const: qcom,adreno-gmu 26 - const: qcom,adreno-gmu-wrapper 46 - description: GMU HFI interrupt 47 - description: GMU interrupt 52 - const: gmu [all …]
|
D | gpu.yaml | 111 qcom,gmu: 114 For GMU attached devices a phandle to the GMU device that will 187 - const: gmu 188 description: CX GMU clock 208 then: # Starting with A6xx, the clocks are usually defined in the GMU node 272 // Example a6xx (with GMU): 309 qcom,gmu = <&gmu>;
|
/linux-6.8/arch/arm64/boot/dts/qcom/ |
D | msm8992.dtsi | 31 gmu-sram@0 {
|
D | sm6350.dtsi | 1325 qcom,gmu = <&gmu>; 1408 gmu: gmu@3d6a000 { label 1409 compatible = "qcom,adreno-gmu-619.0", "qcom,adreno-gmu"; 1413 reg-names = "gmu", 1420 "gmu"; 1428 "gmu",
|
D | sm8350.dtsi | 1849 qcom,gmu = <&gmu>; 1913 gmu: gmu@3d6a000 { label 1914 compatible = "qcom,adreno-gmu-660.1", "qcom,adreno-gmu"; 1919 reg-names = "gmu", "rscc", "gmu_pdc"; 1923 interrupt-names = "hfi", "gmu"; 1932 clock-names = "gmu",
|
D | sc8180x.dtsi | 2175 qcom,gmu = <&gmu>; 2218 gmu: gmu@2c6a000 { label 2219 compatible = "qcom,adreno-gmu-680.1", "qcom,adreno-gmu"; 2224 reg-names = "gmu", 2230 interrupt-names = "hfi", "gmu"; 2237 clock-names = "ahb", "gmu", "cxo", "axi", "memnoc";
|
D | sm6115.dtsi | 1629 /* There's no (real) GMU, so we have to handle quite a bunch of clocks! */ 1640 "gmu", 1648 qcom,gmu = <&gmu_wrapper>; 1713 gmu_wrapper: gmu@596a000 { 1714 compatible = "qcom,adreno-gmu-wrapper"; 1716 reg-names = "gmu";
|
D | sc7180.dtsi | 2102 qcom,gmu = <&gmu>; 2196 gmu: gmu@506a000 { label 2197 compatible = "qcom,adreno-gmu-618.0", "qcom,adreno-gmu"; 2200 reg-names = "gmu", "gmu_pdc", "gmu_pdc_seq"; 2203 interrupt-names = "hfi", "gmu"; 2208 clock-names = "gmu", "cxo", "axi", "memnoc";
|
D | sm8450.dtsi | 2040 qcom,gmu = <&gmu>; 2113 gmu: gmu@3d6a000 { label 2114 compatible = "qcom,adreno-gmu-730.1", "qcom,adreno-gmu"; 2118 reg-names = "gmu", "rscc", "gmu_pdc"; 2122 interrupt-names = "hfi", "gmu"; 2132 "gmu", 2214 clock-names = "gmu",
|
D | sm8150-mtp.dts | 353 &gmu {
|
D | sm8150.dtsi | 2197 qcom,gmu = <&gmu>; 2249 gmu: gmu@2c6a000 { label 2250 compatible = "qcom,adreno-gmu-640.1", "qcom,adreno-gmu"; 2255 reg-names = "gmu", "gmu_pdc", "gmu_pdc_seq"; 2259 interrupt-names = "hfi", "gmu"; 2266 clock-names = "ahb", "gmu", "cxo", "axi", "memnoc";
|
D | sdm845.dtsi | 4753 * controlled entirely by the GMU 4762 qcom,gmu = <&gmu>; 4838 gmu: gmu@506a000 { label 4839 compatible = "qcom,adreno-gmu-630.2", "qcom,adreno-gmu"; 4844 reg-names = "gmu", "gmu_pdc", "gmu_pdc_seq"; 4848 interrupt-names = "hfi", "gmu"; 4854 clock-names = "gmu", "cxo", "axi", "memnoc";
|
D | sm8250-hdk.dts | 368 &gmu {
|
/linux-6.8/Documentation/devicetree/bindings/sram/ |
D | qcom,ocmem.yaml | 120 gmu-sram@0 {
|
/linux-6.8/drivers/clk/qcom/ |
D | gdsc.c | 540 * On SDM845+ the GPU GX domain is *almost* entirely controlled by the GMU 545 * the GMU crashes it could leave the GX on. In order to successfully bring back 554 * driver. During power up, nothing will happen from the CPU (and the GMU will
|
/linux-6.8/Documentation/devicetree/bindings/iommu/ |
D | arm,smmu.yaml | 492 - const: gmu 501 - description: GMU clock
|