Lines Matching +full:no +full:- +full:read +full:- +full:rollover

1 // SPDX-License-Identifier: GPL-2.0-only
33 struct msm_ringbuffer *ring = submit->ring; in a3xx_submit()
36 for (i = 0; i < submit->nr_cmds; i++) { in a3xx_submit()
37 switch (submit->cmd[i].type) { in a3xx_submit()
39 /* ignore IB-targets */ in a3xx_submit()
43 if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno) in a3xx_submit()
48 OUT_RING(ring, lower_32_bits(submit->cmd[i].iova)); in a3xx_submit()
49 OUT_RING(ring, submit->cmd[i].size); in a3xx_submit()
56 OUT_RING(ring, submit->seqno); in a3xx_submit()
73 OUT_RING(ring, submit->seqno); in a3xx_submit()
76 /* Dummy set-constant to trigger context rollover */ in a3xx_submit()
87 struct msm_ringbuffer *ring = gpu->rb[0]; in a3xx_me_init()
119 DBG("%s", gpu->name); in a3xx_hw_init()
122 /* Set up 16 deep read/write request queues: */ in a3xx_hw_init()
130 /* Enable WR-REQ: */ in a3xx_hw_init()
142 /* Set up 16 deep read/write request queues: */ in a3xx_hw_init()
150 /* Enable WR-REQ: */ in a3xx_hw_init()
170 /* Enable WR-REQ: */ in a3xx_hw_init()
177 /* Set up 16 deep read/write request queues: */ in a3xx_hw_init()
185 /* Enable WR-REQ: */ in a3xx_hw_init()
224 /* Turn on hang detection - this spews a lot of useful information in a3xx_hw_init()
229 /* Enable 64-byte cacheline size. HW Default is 32-byte (0x000000E0): */ in a3xx_hw_init()
248 if (a3xx_gpu->ocmem.hdl) { in a3xx_hw_init()
250 (unsigned int)(a3xx_gpu->ocmem.base >> 14)); in a3xx_hw_init()
257 for (i = 0; i < gpu->num_perfcntrs; i++) { in a3xx_hw_init()
258 const struct msm_gpu_perfcntr *perfcntr = &gpu->perfcntrs[i]; in a3xx_hw_init()
259 gpu_write(gpu, perfcntr->select_reg, perfcntr->select_val); in a3xx_hw_init()
276 gpu_write(gpu, REG_AXXX_CP_RB_BASE, lower_32_bits(gpu->rb[0]->iova)); in a3xx_hw_init()
302 /* NOTE: PM4/micro-engine firmware registers look to be the same in a3xx_hw_init()
309 ptr = (uint32_t *)(adreno_gpu->fw[ADRENO_FW_PM4]->data); in a3xx_hw_init()
310 len = adreno_gpu->fw[ADRENO_FW_PM4]->size / 4; in a3xx_hw_init()
321 ptr = (uint32_t *)(adreno_gpu->fw[ADRENO_FW_PFP]->data); in a3xx_hw_init()
322 len = adreno_gpu->fw[ADRENO_FW_PFP]->size / 4; in a3xx_hw_init()
329 /* CP ROQ queue sizes (bytes) - RB:16, ST:16, IB1:32, IB2:64 */ in a3xx_hw_init()
348 return a3xx_me_init(gpu) ? 0 : -EINVAL; in a3xx_hw_init()
377 DBG("%s", gpu->name); in a3xx_destroy()
381 adreno_gpu_ocmem_cleanup(&a3xx_gpu->ocmem); in a3xx_destroy()
389 if (!adreno_idle(gpu, gpu->rb[0])) in a3xx_idle()
395 DRM_ERROR("%s: timeout waiting for GPU to idle!\n", gpu->name); in a3xx_idle()
409 DBG("%s: %08x", gpu->name, status); in a3xx_irq()
471 return ERR_PTR(-ENOMEM); in a3xx_gpu_state_get()
475 state->rbbm_status = gpu_read(gpu, REG_A3XX_RBBM_STATUS); in a3xx_gpu_state_get()
485 *out_sample_rate = clk_get_rate(gpu->core_clk); in a3xx_gpu_busy()
492 ring->memptrs->rptr = gpu_read(gpu, REG_AXXX_CP_RB_RPTR); in a3xx_get_rptr()
493 return ring->memptrs->rptr; in a3xx_get_rptr()
531 struct msm_drm_private *priv = dev->dev_private; in a3xx_gpu_init()
532 struct platform_device *pdev = priv->gpu_pdev; in a3xx_gpu_init()
538 DRM_DEV_ERROR(dev->dev, "no a3xx device\n"); in a3xx_gpu_init()
539 ret = -ENXIO; in a3xx_gpu_init()
545 ret = -ENOMEM; in a3xx_gpu_init()
549 adreno_gpu = &a3xx_gpu->base; in a3xx_gpu_init()
550 gpu = &adreno_gpu->base; in a3xx_gpu_init()
552 gpu->perfcntrs = perfcntrs; in a3xx_gpu_init()
553 gpu->num_perfcntrs = ARRAY_SIZE(perfcntrs); in a3xx_gpu_init()
555 adreno_gpu->registers = a3xx_registers; in a3xx_gpu_init()
563 ret = adreno_gpu_ocmem_init(&adreno_gpu->base.pdev->dev, in a3xx_gpu_init()
564 adreno_gpu, &a3xx_gpu->ocmem); in a3xx_gpu_init()
569 if (!gpu->aspace) { in a3xx_gpu_init()
577 DRM_DEV_ERROR(dev->dev, "No memory protection without IOMMU\n"); in a3xx_gpu_init()
579 ret = -ENXIO; in a3xx_gpu_init()
584 icc_path = devm_of_icc_get(&pdev->dev, "gfx-mem"); in a3xx_gpu_init()
590 ocmem_icc_path = devm_of_icc_get(&pdev->dev, "ocmem"); in a3xx_gpu_init()
593 /* allow -ENODATA, ocmem icc is optional */ in a3xx_gpu_init()
594 if (ret != -ENODATA) in a3xx_gpu_init()
605 icc_set_bw(icc_path, 0, Bps_to_icc(gpu->fast_rate) * 8); in a3xx_gpu_init()
606 icc_set_bw(ocmem_icc_path, 0, Bps_to_icc(gpu->fast_rate) * 8); in a3xx_gpu_init()
612 a3xx_destroy(&a3xx_gpu->base.base); in a3xx_gpu_init()