Lines Matching +full:no +full:- +full:read +full:- +full:rollover
1 // SPDX-License-Identifier: GPL-2.0-only
33 struct msm_drm_private *priv = gpu->dev->dev_private; in a3xx_submit()
34 struct msm_ringbuffer *ring = submit->ring; in a3xx_submit()
37 for (i = 0; i < submit->nr_cmds; i++) { in a3xx_submit()
38 switch (submit->cmd[i].type) { in a3xx_submit()
40 /* ignore IB-targets */ in a3xx_submit()
44 if (priv->lastctx == submit->queue->ctx) in a3xx_submit()
49 OUT_RING(ring, lower_32_bits(submit->cmd[i].iova)); in a3xx_submit()
50 OUT_RING(ring, submit->cmd[i].size); in a3xx_submit()
57 OUT_RING(ring, submit->seqno); in a3xx_submit()
74 OUT_RING(ring, submit->seqno); in a3xx_submit()
77 /* Dummy set-constant to trigger context rollover */ in a3xx_submit()
88 struct msm_ringbuffer *ring = gpu->rb[0]; in a3xx_me_init()
120 DBG("%s", gpu->name); in a3xx_hw_init()
123 /* Set up 16 deep read/write request queues: */ in a3xx_hw_init()
131 /* Enable WR-REQ: */ in a3xx_hw_init()
143 /* Set up 16 deep read/write request queues: */ in a3xx_hw_init()
151 /* Enable WR-REQ: */ in a3xx_hw_init()
171 /* Enable WR-REQ: */ in a3xx_hw_init()
178 /* Set up 16 deep read/write request queues: */ in a3xx_hw_init()
186 /* Enable WR-REQ: */ in a3xx_hw_init()
225 /* Turn on hang detection - this spews a lot of useful information in a3xx_hw_init()
230 /* Enable 64-byte cacheline size. HW Default is 32-byte (0x000000E0): */ in a3xx_hw_init()
249 if (a3xx_gpu->ocmem.hdl) { in a3xx_hw_init()
251 (unsigned int)(a3xx_gpu->ocmem.base >> 14)); in a3xx_hw_init()
258 for (i = 0; i < gpu->num_perfcntrs; i++) { in a3xx_hw_init()
259 const struct msm_gpu_perfcntr *perfcntr = &gpu->perfcntrs[i]; in a3xx_hw_init()
260 gpu_write(gpu, perfcntr->select_reg, perfcntr->select_val); in a3xx_hw_init()
277 gpu_write(gpu, REG_AXXX_CP_RB_BASE, lower_32_bits(gpu->rb[0]->iova)); in a3xx_hw_init()
303 /* NOTE: PM4/micro-engine firmware registers look to be the same in a3xx_hw_init()
310 ptr = (uint32_t *)(adreno_gpu->fw[ADRENO_FW_PM4]->data); in a3xx_hw_init()
311 len = adreno_gpu->fw[ADRENO_FW_PM4]->size / 4; in a3xx_hw_init()
322 ptr = (uint32_t *)(adreno_gpu->fw[ADRENO_FW_PFP]->data); in a3xx_hw_init()
323 len = adreno_gpu->fw[ADRENO_FW_PFP]->size / 4; in a3xx_hw_init()
330 /* CP ROQ queue sizes (bytes) - RB:16, ST:16, IB1:32, IB2:64 */ in a3xx_hw_init()
349 return a3xx_me_init(gpu) ? 0 : -EINVAL; in a3xx_hw_init()
378 DBG("%s", gpu->name); in a3xx_destroy()
382 adreno_gpu_ocmem_cleanup(&a3xx_gpu->ocmem); in a3xx_destroy()
390 if (!adreno_idle(gpu, gpu->rb[0])) in a3xx_idle()
396 DRM_ERROR("%s: timeout waiting for GPU to idle!\n", gpu->name); in a3xx_idle()
410 DBG("%s: %08x", gpu->name, status); in a3xx_irq()
472 return ERR_PTR(-ENOMEM); in a3xx_gpu_state_get()
476 state->rbbm_status = gpu_read(gpu, REG_A3XX_RBBM_STATUS); in a3xx_gpu_state_get()
483 ring->memptrs->rptr = gpu_read(gpu, REG_AXXX_CP_RB_RPTR); in a3xx_get_rptr()
484 return ring->memptrs->rptr; in a3xx_get_rptr()
520 struct msm_drm_private *priv = dev->dev_private; in a3xx_gpu_init()
521 struct platform_device *pdev = priv->gpu_pdev; in a3xx_gpu_init()
525 DRM_DEV_ERROR(dev->dev, "no a3xx device\n"); in a3xx_gpu_init()
526 ret = -ENXIO; in a3xx_gpu_init()
532 ret = -ENOMEM; in a3xx_gpu_init()
536 adreno_gpu = &a3xx_gpu->base; in a3xx_gpu_init()
537 gpu = &adreno_gpu->base; in a3xx_gpu_init()
539 gpu->perfcntrs = perfcntrs; in a3xx_gpu_init()
540 gpu->num_perfcntrs = ARRAY_SIZE(perfcntrs); in a3xx_gpu_init()
542 adreno_gpu->registers = a3xx_registers; in a3xx_gpu_init()
550 ret = adreno_gpu_ocmem_init(&adreno_gpu->base.pdev->dev, in a3xx_gpu_init()
551 adreno_gpu, &a3xx_gpu->ocmem); in a3xx_gpu_init()
556 if (!gpu->aspace) { in a3xx_gpu_init()
564 DRM_DEV_ERROR(dev->dev, "No memory protection without IOMMU\n"); in a3xx_gpu_init()
565 ret = -ENXIO; in a3xx_gpu_init()
574 icc_set_bw(gpu->icc_path, 0, Bps_to_icc(gpu->fast_rate) * 8); in a3xx_gpu_init()
575 icc_set_bw(gpu->ocmem_icc_path, 0, Bps_to_icc(gpu->fast_rate) * 8); in a3xx_gpu_init()
581 a3xx_destroy(&a3xx_gpu->base.base); in a3xx_gpu_init()