Lines Matching full:gpu
24 static int zap_shader_load_mdt(struct msm_gpu *gpu, const char *fwname, in zap_shader_load_mdt() argument
27 struct device *dev = &gpu->pdev->dev; in zap_shader_load_mdt()
79 ret = request_firmware_direct(&fw, fwname, gpu->dev->dev); in zap_shader_load_mdt()
84 fw = adreno_request_fw(to_adreno_gpu(gpu), fwname); in zap_shader_load_mdt()
134 if (signed_fwname || (to_adreno_gpu(gpu)->fwloc == FW_LOCATION_LEGACY)) { in zap_shader_load_mdt()
170 int adreno_zap_shader_load(struct msm_gpu *gpu, u32 pasid) in adreno_zap_shader_load() argument
172 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in adreno_zap_shader_load()
173 struct platform_device *pdev = gpu->pdev; in adreno_zap_shader_load()
185 return zap_shader_load_mdt(gpu, adreno_gpu->info->zapfw, pasid); in adreno_zap_shader_load()
189 adreno_iommu_create_address_space(struct msm_gpu *gpu, in adreno_iommu_create_address_space() argument
211 aspace = msm_gem_address_space_create(mmu, "gpu", in adreno_iommu_create_address_space()
220 int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value) in adreno_get_param() argument
222 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in adreno_get_param()
247 pm_runtime_get_sync(&gpu->pdev->dev); in adreno_get_param()
248 ret = adreno_gpu->funcs->get_timestamp(gpu, value); in adreno_get_param()
249 pm_runtime_put_autosuspend(&gpu->pdev->dev); in adreno_get_param()
255 *value = gpu->nr_rings; in adreno_get_param()
261 *value = gpu->global_faults; in adreno_get_param()
264 DBG("%s: invalid param: %u", gpu->name, param); in adreno_get_param()
374 struct drm_gem_object *adreno_fw_create_bo(struct msm_gpu *gpu, in adreno_fw_create_bo() argument
380 ptr = msm_gem_kernel_new_locked(gpu->dev, fw->size - 4, in adreno_fw_create_bo()
381 MSM_BO_UNCACHED | MSM_BO_GPU_READONLY, gpu->aspace, &bo, iova); in adreno_fw_create_bo()
393 int adreno_hw_init(struct msm_gpu *gpu) in adreno_hw_init() argument
395 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in adreno_hw_init()
398 DBG("%s", gpu->name); in adreno_hw_init()
404 for (i = 0; i < gpu->nr_rings; i++) { in adreno_hw_init()
405 struct msm_ringbuffer *ring = gpu->rb[i]; in adreno_hw_init()
425 struct msm_gpu *gpu = &adreno_gpu->base; in get_rptr() local
427 return gpu->funcs->get_rptr(gpu, ring); in get_rptr()
430 struct msm_ringbuffer *adreno_active_ring(struct msm_gpu *gpu) in adreno_active_ring() argument
432 return gpu->rb[0]; in adreno_active_ring()
435 void adreno_recover(struct msm_gpu *gpu) in adreno_recover() argument
437 struct drm_device *dev = gpu->dev; in adreno_recover()
443 gpu->funcs->pm_suspend(gpu); in adreno_recover()
444 gpu->funcs->pm_resume(gpu); in adreno_recover()
446 ret = msm_gpu_hw_init(gpu); in adreno_recover()
448 DRM_DEV_ERROR(dev->dev, "gpu hw init failed: %d\n", ret); in adreno_recover()
453 void adreno_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring, u32 reg) in adreno_flush() argument
470 gpu_write(gpu, reg, wptr); in adreno_flush()
473 bool adreno_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring) in adreno_idle() argument
475 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in adreno_idle()
482 /* TODO maybe we need to reset GPU here to recover from hang? */ in adreno_idle()
484 gpu->name, ring->id, get_rptr(adreno_gpu, ring), wptr); in adreno_idle()
489 int adreno_gpu_state_get(struct msm_gpu *gpu, struct msm_gpu_state *state) in adreno_gpu_state_get() argument
491 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in adreno_gpu_state_get()
498 for (i = 0; i < gpu->nr_rings; i++) { in adreno_gpu_state_get()
501 state->ring[i].fence = gpu->rb[i]->memptrs->fence; in adreno_gpu_state_get()
502 state->ring[i].iova = gpu->rb[i]->iova; in adreno_gpu_state_get()
503 state->ring[i].seqno = gpu->rb[i]->seqno; in adreno_gpu_state_get()
504 state->ring[i].rptr = get_rptr(adreno_gpu, gpu->rb[i]); in adreno_gpu_state_get()
505 state->ring[i].wptr = get_wptr(gpu->rb[i]); in adreno_gpu_state_get()
512 if (gpu->rb[i]->start[j]) in adreno_gpu_state_get()
518 memcpy(state->ring[i].data, gpu->rb[i]->start, size << 2); in adreno_gpu_state_get()
544 state->registers[pos++] = gpu_read(gpu, addr); in adreno_gpu_state_get()
661 void adreno_show(struct msm_gpu *gpu, struct msm_gpu_state *state, in adreno_show() argument
664 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in adreno_show()
679 for (i = 0; i < gpu->nr_rings; i++) { in adreno_show()
717 /* Dump common gpu status and scratch registers on any hang, to make
719 * safe to read when GPU has hung (unlike some other regs, depending
720 * on how the GPU hung), and they are useful to match up to cmdstream
723 void adreno_dump_info(struct msm_gpu *gpu) in adreno_dump_info() argument
725 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in adreno_dump_info()
733 for (i = 0; i < gpu->nr_rings; i++) { in adreno_dump_info()
734 struct msm_ringbuffer *ring = gpu->rb[i]; in adreno_dump_info()
746 void adreno_dump(struct msm_gpu *gpu) in adreno_dump() argument
748 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in adreno_dump()
755 printk("IO:region %s 00000000 00020000\n", gpu->name); in adreno_dump()
762 uint32_t val = gpu_read(gpu, addr); in adreno_dump()
770 struct adreno_gpu *adreno_gpu = to_adreno_gpu(ring->gpu); in ring_freewords()
781 DRM_DEV_ERROR(ring->gpu->dev->dev, in adreno_wait_ring()
786 /* Get legacy powerlevels from qcom,gpu-pwrlevels and populate the opp table */
792 node = of_get_compatible_child(dev->of_node, "qcom,gpu-pwrlevels"); in adreno_get_legacy_pwrlevels()
794 DRM_DEV_DEBUG(dev, "Could not find the GPU powerlevels\n"); in adreno_get_legacy_pwrlevels()
801 ret = of_property_read_u32(child, "qcom,gpu-freq", &val); in adreno_get_legacy_pwrlevels()
819 struct msm_gpu *gpu) in adreno_get_pwrlevels() argument
825 gpu->fast_rate = 0; in adreno_get_pwrlevels()
840 gpu->fast_rate = freq; in adreno_get_pwrlevels()
845 if (!gpu->fast_rate) { in adreno_get_pwrlevels()
849 gpu->fast_rate = 200000000; in adreno_get_pwrlevels()
852 DBG("fast_rate=%u, slow_rate=27000000", gpu->fast_rate); in adreno_get_pwrlevels()
901 struct msm_gpu *gpu = &adreno_gpu->base; in adreno_gpu_init() local
914 adreno_get_pwrlevels(dev, gpu); in adreno_gpu_init()
931 gpu->icc_path = of_icc_get(dev, NULL); in adreno_gpu_init()
933 gpu->icc_path = of_icc_get(dev, "gfx-mem"); in adreno_gpu_init()
934 gpu->ocmem_icc_path = of_icc_get(dev, "ocmem"); in adreno_gpu_init()
937 if (IS_ERR(gpu->icc_path)) { in adreno_gpu_init()
938 ret = PTR_ERR(gpu->icc_path); in adreno_gpu_init()
939 gpu->icc_path = NULL; in adreno_gpu_init()
943 if (IS_ERR(gpu->ocmem_icc_path)) { in adreno_gpu_init()
944 ret = PTR_ERR(gpu->ocmem_icc_path); in adreno_gpu_init()
945 gpu->ocmem_icc_path = NULL; in adreno_gpu_init()
956 struct msm_gpu *gpu = &adreno_gpu->base; in adreno_gpu_cleanup() local
957 struct msm_drm_private *priv = gpu->dev->dev_private; in adreno_gpu_cleanup()
967 icc_put(gpu->icc_path); in adreno_gpu_cleanup()
968 icc_put(gpu->ocmem_icc_path); in adreno_gpu_cleanup()