Lines Matching +full:zap +full:- +full:shader

1 // SPDX-License-Identifier: GPL-2.0-only
17 #include <linux/nvmem-consumer.h>
33 struct device *dev = &gpu->pdev->dev; in zap_shader_load_mdt()
45 return -EINVAL; in zap_shader_load_mdt()
48 np = of_get_child_by_name(dev->of_node, "zap-shader"); in zap_shader_load_mdt()
51 return -ENODEV; in zap_shader_load_mdt()
54 mem_np = of_parse_phandle(np, "memory-region", 0); in zap_shader_load_mdt()
58 return -EINVAL; in zap_shader_load_mdt()
69 * Check for a firmware-name property. This is the new scheme in zap_shader_load_mdt()
71 * keys, allowing us to have a different zap fw path for different in zap_shader_load_mdt()
74 * If the firmware-name property is found, we bypass the in zap_shader_load_mdt()
78 * If the firmware-name property is not found, for backwards in zap_shader_load_mdt()
82 of_property_read_string_index(np, "firmware-name", 0, &signed_fwname); in zap_shader_load_mdt()
85 ret = request_firmware_direct(&fw, fwname, gpu->dev->dev); in zap_shader_load_mdt()
93 * For new targets, we require the firmware-name property, in zap_shader_load_mdt()
94 * if a zap-shader is required, rather than falling back in zap_shader_load_mdt()
102 return -ENODEV; in zap_shader_load_mdt()
120 ret = -E2BIG; in zap_shader_load_mdt()
127 ret = -ENOMEM; in zap_shader_load_mdt()
135 * with upstream linux-firmware it would be in a qcom/ subdir.. in zap_shader_load_mdt()
140 if (signed_fwname || (to_adreno_gpu(gpu)->fwloc == FW_LOCATION_LEGACY)) { in zap_shader_load_mdt()
159 * If the scm call returns -EOPNOTSUPP we assume that this target in zap_shader_load_mdt()
160 * doesn't need/support the zap shader so quietly fail in zap_shader_load_mdt()
162 if (ret == -EOPNOTSUPP) in zap_shader_load_mdt()
179 struct platform_device *pdev = gpu->pdev; in adreno_zap_shader_load()
181 /* Short cut if we determine the zap shader isn't available/needed */ in adreno_zap_shader_load()
183 return -ENODEV; in adreno_zap_shader_load()
187 DRM_DEV_ERROR(&pdev->dev, "SCM is not available\n"); in adreno_zap_shader_load()
188 return -EPROBE_DEFER; in adreno_zap_shader_load()
191 return zap_shader_load_mdt(gpu, adreno_gpu->info->zapfw, pasid); in adreno_zap_shader_load()
211 mmu = msm_iommu_gpu_new(&pdev->dev, gpu, quirks); in adreno_iommu_create_address_space()
224 start = max_t(u64, SZ_16M, geometry->aperture_start); in adreno_iommu_create_address_space()
225 size = geometry->aperture_end - start + 1; in adreno_iommu_create_address_space()
231 mmu->funcs->destroy(mmu); in adreno_iommu_create_address_space()
243 if (adreno_gpu->info->address_space_size) in adreno_private_address_space_size()
244 return adreno_gpu->info->address_space_size; in adreno_private_address_space_size()
258 bool do_devcoredump = info && !READ_ONCE(gpu->crashstate); in adreno_fault_handler()
265 gpu->aspace->mmu->funcs->resume_translation(gpu->aspace->mmu); in adreno_fault_handler()
270 * adreno-smmu-priv in adreno_fault_handler()
280 if (info->fsr & ARM_SMMU_FSR_TF) in adreno_fault_handler()
282 else if (info->fsr & ARM_SMMU_FSR_PF) in adreno_fault_handler()
284 else if (info->fsr & ARM_SMMU_FSR_EF) in adreno_fault_handler()
288 info->ttbr0, iova, in adreno_fault_handler()
295 del_timer(&gpu->hangcheck_timer); in adreno_fault_handler()
297 gpu->fault_info.ttbr0 = info->ttbr0; in adreno_fault_handler()
298 gpu->fault_info.iova = iova; in adreno_fault_handler()
299 gpu->fault_info.flags = flags; in adreno_fault_handler()
300 gpu->fault_info.type = type; in adreno_fault_handler()
301 gpu->fault_info.block = block; in adreno_fault_handler()
303 kthread_queue_work(gpu->worker, &gpu->fault_work); in adreno_fault_handler()
316 return -EINVAL; in adreno_get_param()
320 *value = adreno_gpu->info->revn; in adreno_get_param()
323 *value = adreno_gpu->info->gmem; in adreno_get_param()
333 *value = adreno_gpu->chip_id; in adreno_get_param()
334 if (!adreno_gpu->info->revn) in adreno_get_param()
335 *value |= ((uint64_t) adreno_gpu->speedbin) << 32; in adreno_get_param()
338 *value = adreno_gpu->base.fast_rate; in adreno_get_param()
341 if (adreno_gpu->funcs->get_timestamp) { in adreno_get_param()
344 pm_runtime_get_sync(&gpu->pdev->dev); in adreno_get_param()
345 ret = adreno_gpu->funcs->get_timestamp(gpu, value); in adreno_get_param()
346 pm_runtime_put_autosuspend(&gpu->pdev->dev); in adreno_get_param()
350 return -EINVAL; in adreno_get_param()
352 *value = gpu->nr_rings * NR_SCHED_PRIORITIES; in adreno_get_param()
358 if (ctx->aspace) in adreno_get_param()
359 *value = gpu->global_faults + ctx->aspace->faults; in adreno_get_param()
361 *value = gpu->global_faults; in adreno_get_param()
364 *value = gpu->suspend_count; in adreno_get_param()
367 if (ctx->aspace == gpu->aspace) in adreno_get_param()
368 return -EINVAL; in adreno_get_param()
369 *value = ctx->aspace->va_start; in adreno_get_param()
372 if (ctx->aspace == gpu->aspace) in adreno_get_param()
373 return -EINVAL; in adreno_get_param()
374 *value = ctx->aspace->va_size; in adreno_get_param()
377 *value = adreno_gpu->ubwc_config.highest_bank_bit; in adreno_get_param()
380 DBG("%s: invalid param: %u", gpu->name, param); in adreno_get_param()
381 return -EINVAL; in adreno_get_param()
395 return -EINVAL; in adreno_set_param()
399 return -EINVAL; in adreno_set_param()
411 mutex_lock(&gpu->lock); in adreno_set_param()
414 paramp = &ctx->comm; in adreno_set_param()
416 paramp = &ctx->cmdline; in adreno_set_param()
422 mutex_unlock(&gpu->lock); in adreno_set_param()
428 return -EPERM; in adreno_set_param()
431 DBG("%s: invalid param: %u", gpu->name, param); in adreno_set_param()
432 return -EINVAL; in adreno_set_param()
439 struct drm_device *drm = adreno_gpu->base.dev; in adreno_request_fw()
446 return ERR_PTR(-ENOMEM); in adreno_request_fw()
452 if ((adreno_gpu->fwloc == FW_LOCATION_UNKNOWN) || in adreno_request_fw()
453 (adreno_gpu->fwloc == FW_LOCATION_NEW)) { in adreno_request_fw()
455 ret = request_firmware_direct(&fw, newname, drm->dev); in adreno_request_fw()
457 DRM_DEV_INFO(drm->dev, "loaded %s from new location\n", in adreno_request_fw()
459 adreno_gpu->fwloc = FW_LOCATION_NEW; in adreno_request_fw()
461 } else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) { in adreno_request_fw()
462 DRM_DEV_ERROR(drm->dev, "failed to load %s: %d\n", in adreno_request_fw()
472 if ((adreno_gpu->fwloc == FW_LOCATION_UNKNOWN) || in adreno_request_fw()
473 (adreno_gpu->fwloc == FW_LOCATION_LEGACY)) { in adreno_request_fw()
475 ret = request_firmware_direct(&fw, fwname, drm->dev); in adreno_request_fw()
477 DRM_DEV_INFO(drm->dev, "loaded %s from legacy location\n", in adreno_request_fw()
479 adreno_gpu->fwloc = FW_LOCATION_LEGACY; in adreno_request_fw()
481 } else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) { in adreno_request_fw()
482 DRM_DEV_ERROR(drm->dev, "failed to load %s: %d\n", in adreno_request_fw()
493 if ((adreno_gpu->fwloc == FW_LOCATION_UNKNOWN) || in adreno_request_fw()
494 (adreno_gpu->fwloc == FW_LOCATION_HELPER)) { in adreno_request_fw()
496 ret = request_firmware(&fw, newname, drm->dev); in adreno_request_fw()
498 DRM_DEV_INFO(drm->dev, "loaded %s with helper\n", in adreno_request_fw()
500 adreno_gpu->fwloc = FW_LOCATION_HELPER; in adreno_request_fw()
502 } else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) { in adreno_request_fw()
503 DRM_DEV_ERROR(drm->dev, "failed to load %s: %d\n", in adreno_request_fw()
510 DRM_DEV_ERROR(drm->dev, "failed to load %s\n", fwname); in adreno_request_fw()
511 fw = ERR_PTR(-ENOENT); in adreno_request_fw()
521 for (i = 0; i < ARRAY_SIZE(adreno_gpu->info->fw); i++) { in adreno_load_fw()
524 if (!adreno_gpu->info->fw[i]) in adreno_load_fw()
532 if (adreno_gpu->fw[i]) in adreno_load_fw()
535 fw = adreno_request_fw(adreno_gpu, adreno_gpu->info->fw[i]); in adreno_load_fw()
539 adreno_gpu->fw[i] = fw; in adreno_load_fw()
551 ptr = msm_gem_kernel_new(gpu->dev, fw->size - 4, in adreno_fw_create_bo()
552 MSM_BO_WC | MSM_BO_GPU_READONLY, gpu->aspace, &bo, iova); in adreno_fw_create_bo()
557 memcpy(ptr, &fw->data[4], fw->size - 4); in adreno_fw_create_bo()
566 VERB("%s", gpu->name); in adreno_hw_init()
568 for (int i = 0; i < gpu->nr_rings; i++) { in adreno_hw_init()
569 struct msm_ringbuffer *ring = gpu->rb[i]; in adreno_hw_init()
574 ring->cur = ring->start; in adreno_hw_init()
575 ring->next = ring->start; in adreno_hw_init()
576 ring->memptrs->rptr = 0; in adreno_hw_init()
577 ring->memptrs->bv_fence = ring->fctx->completed_fence; in adreno_hw_init()
583 if (fence_before(ring->fctx->last_fence, ring->memptrs->fence)) { in adreno_hw_init()
584 ring->memptrs->fence = ring->fctx->last_fence; in adreno_hw_init()
595 struct msm_gpu *gpu = &adreno_gpu->base; in get_rptr()
597 return gpu->funcs->get_rptr(gpu, ring); in get_rptr()
602 return gpu->rb[0]; in adreno_active_ring()
607 struct drm_device *dev = gpu->dev; in adreno_recover()
610 // XXX pm-runtime?? we *need* the device to be off after this in adreno_recover()
611 // so maybe continuing to call ->pm_suspend/resume() is better? in adreno_recover()
613 gpu->funcs->pm_suspend(gpu); in adreno_recover()
614 gpu->funcs->pm_resume(gpu); in adreno_recover()
618 DRM_DEV_ERROR(dev->dev, "gpu hw init failed: %d\n", ret); in adreno_recover()
628 ring->cur = ring->next; in adreno_flush()
633 * the ringbuffer and rb->next hasn't wrapped to zero yet in adreno_flush()
654 gpu->name, ring->id, get_rptr(adreno_gpu, ring), wptr); in adreno_idle()
664 WARN_ON(!mutex_is_locked(&gpu->lock)); in adreno_gpu_state_get()
666 kref_init(&state->ref); in adreno_gpu_state_get()
668 ktime_get_real_ts64(&state->time); in adreno_gpu_state_get()
670 for (i = 0; i < gpu->nr_rings; i++) { in adreno_gpu_state_get()
673 state->ring[i].fence = gpu->rb[i]->memptrs->fence; in adreno_gpu_state_get()
674 state->ring[i].iova = gpu->rb[i]->iova; in adreno_gpu_state_get()
675 state->ring[i].seqno = gpu->rb[i]->fctx->last_fence; in adreno_gpu_state_get()
676 state->ring[i].rptr = get_rptr(adreno_gpu, gpu->rb[i]); in adreno_gpu_state_get()
677 state->ring[i].wptr = get_wptr(gpu->rb[i]); in adreno_gpu_state_get()
680 size = state->ring[i].wptr; in adreno_gpu_state_get()
683 for (j = state->ring[i].wptr; j < MSM_GPU_RINGBUFFER_SZ >> 2; j++) in adreno_gpu_state_get()
684 if (gpu->rb[i]->start[j]) in adreno_gpu_state_get()
688 state->ring[i].data = kvmalloc(size << 2, GFP_KERNEL); in adreno_gpu_state_get()
689 if (state->ring[i].data) { in adreno_gpu_state_get()
690 memcpy(state->ring[i].data, gpu->rb[i]->start, size << 2); in adreno_gpu_state_get()
691 state->ring[i].data_size = size << 2; in adreno_gpu_state_get()
697 if (!adreno_gpu->registers) in adreno_gpu_state_get()
701 for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) in adreno_gpu_state_get()
702 count += adreno_gpu->registers[i + 1] - in adreno_gpu_state_get()
703 adreno_gpu->registers[i] + 1; in adreno_gpu_state_get()
705 state->registers = kcalloc(count * 2, sizeof(u32), GFP_KERNEL); in adreno_gpu_state_get()
706 if (state->registers) { in adreno_gpu_state_get()
709 for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) { in adreno_gpu_state_get()
710 u32 start = adreno_gpu->registers[i]; in adreno_gpu_state_get()
711 u32 end = adreno_gpu->registers[i + 1]; in adreno_gpu_state_get()
715 state->registers[pos++] = addr; in adreno_gpu_state_get()
716 state->registers[pos++] = gpu_read(gpu, addr); in adreno_gpu_state_get()
720 state->nr_registers = count; in adreno_gpu_state_get()
730 for (i = 0; i < ARRAY_SIZE(state->ring); i++) in adreno_gpu_state_destroy()
731 kvfree(state->ring[i].data); in adreno_gpu_state_destroy()
733 for (i = 0; state->bos && i < state->nr_bos; i++) in adreno_gpu_state_destroy()
734 kvfree(state->bos[i].data); in adreno_gpu_state_destroy()
736 kfree(state->bos); in adreno_gpu_state_destroy()
737 kfree(state->comm); in adreno_gpu_state_destroy()
738 kfree(state->cmd); in adreno_gpu_state_destroy()
739 kfree(state->registers); in adreno_gpu_state_destroy()
756 return kref_put(&state->ref, adreno_gpu_state_kref_destroy); in adreno_gpu_state_put()
785 buf_itr += scnprintf(buf + buf_itr, buffer_size - buf_itr, "%s", in adreno_gpu_ascii85_encode()
808 * Only dump the non-zero part of the buffer - rarely will in adreno_show_object()
848 adreno_gpu->info->revn, in adreno_show()
849 ADRENO_CHIPID_ARGS(adreno_gpu->chip_id)); in adreno_show()
855 if (state->fault_info.ttbr0) { in adreno_show()
856 const struct msm_gpu_fault_info *info = &state->fault_info; in adreno_show()
858 drm_puts(p, "fault-info:\n"); in adreno_show()
859 drm_printf(p, " - ttbr0=%.16llx\n", info->ttbr0); in adreno_show()
860 drm_printf(p, " - iova=%.16lx\n", info->iova); in adreno_show()
861 drm_printf(p, " - dir=%s\n", info->flags & IOMMU_FAULT_WRITE ? "WRITE" : "READ"); in adreno_show()
862 drm_printf(p, " - type=%s\n", info->type); in adreno_show()
863 drm_printf(p, " - source=%s\n", info->block); in adreno_show()
866 drm_printf(p, "rbbm-status: 0x%08x\n", state->rbbm_status); in adreno_show()
870 for (i = 0; i < gpu->nr_rings; i++) { in adreno_show()
871 drm_printf(p, " - id: %d\n", i); in adreno_show()
872 drm_printf(p, " iova: 0x%016llx\n", state->ring[i].iova); in adreno_show()
873 drm_printf(p, " last-fence: %u\n", state->ring[i].seqno); in adreno_show()
874 drm_printf(p, " retired-fence: %u\n", state->ring[i].fence); in adreno_show()
875 drm_printf(p, " rptr: %u\n", state->ring[i].rptr); in adreno_show()
876 drm_printf(p, " wptr: %u\n", state->ring[i].wptr); in adreno_show()
879 adreno_show_object(p, &state->ring[i].data, in adreno_show()
880 state->ring[i].data_size, &state->ring[i].encoded); in adreno_show()
883 if (state->bos) { in adreno_show()
886 for (i = 0; i < state->nr_bos; i++) { in adreno_show()
887 drm_printf(p, " - iova: 0x%016llx\n", in adreno_show()
888 state->bos[i].iova); in adreno_show()
889 drm_printf(p, " size: %zd\n", state->bos[i].size); in adreno_show()
890 drm_printf(p, " name: %-32s\n", state->bos[i].name); in adreno_show()
892 adreno_show_object(p, &state->bos[i].data, in adreno_show()
893 state->bos[i].size, &state->bos[i].encoded); in adreno_show()
897 if (state->nr_registers) { in adreno_show()
900 for (i = 0; i < state->nr_registers; i++) { in adreno_show()
901 drm_printf(p, " - { offset: 0x%04x, value: 0x%08x }\n", in adreno_show()
902 state->registers[i * 2] << 2, in adreno_show()
903 state->registers[(i * 2) + 1]); in adreno_show()
921 adreno_gpu->info->revn, in adreno_dump_info()
922 ADRENO_CHIPID_ARGS(adreno_gpu->chip_id)); in adreno_dump_info()
924 for (i = 0; i < gpu->nr_rings; i++) { in adreno_dump_info()
925 struct msm_ringbuffer *ring = gpu->rb[i]; in adreno_dump_info()
928 ring->memptrs->fence, in adreno_dump_info()
929 ring->fctx->last_fence); in adreno_dump_info()
942 if (!adreno_gpu->registers) in adreno_dump()
946 printk("IO:region %s 00000000 00020000\n", gpu->name); in adreno_dump()
947 for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) { in adreno_dump()
948 uint32_t start = adreno_gpu->registers[i]; in adreno_dump()
949 uint32_t end = adreno_gpu->registers[i+1]; in adreno_dump()
961 struct adreno_gpu *adreno_gpu = to_adreno_gpu(ring->gpu); in ring_freewords()
963 /* Use ring->next to calculate free size */ in ring_freewords()
964 uint32_t wptr = ring->next - ring->start; in ring_freewords()
966 return (rptr + (size - 1) - wptr) % size; in ring_freewords()
972 DRM_DEV_ERROR(ring->gpu->dev->dev, in adreno_wait_ring()
974 ring->id); in adreno_wait_ring()
985 gpu->fast_rate = 0; in adreno_get_pwrlevels()
989 if (ret == -ENODEV) { in adreno_get_pwrlevels()
999 return -ENODEV; in adreno_get_pwrlevels()
1011 gpu->fast_rate = freq; in adreno_get_pwrlevels()
1014 DBG("fast_rate=%u, slow_rate=27000000", gpu->fast_rate); in adreno_get_pwrlevels()
1027 if (PTR_ERR(ocmem) == -ENODEV) { in adreno_gpu_ocmem_init()
1039 ocmem_hdl = ocmem_allocate(ocmem, OCMEM_GRAPHICS, adreno_gpu->info->gmem); in adreno_gpu_ocmem_init()
1043 adreno_ocmem->ocmem = ocmem; in adreno_gpu_ocmem_init()
1044 adreno_ocmem->base = ocmem_hdl->addr; in adreno_gpu_ocmem_init()
1045 adreno_ocmem->hdl = ocmem_hdl; in adreno_gpu_ocmem_init()
1047 if (WARN_ON(ocmem_hdl->len != adreno_gpu->info->gmem)) in adreno_gpu_ocmem_init()
1048 return -ENOMEM; in adreno_gpu_ocmem_init()
1055 if (adreno_ocmem && adreno_ocmem->base) in adreno_gpu_ocmem_cleanup()
1056 ocmem_free(adreno_ocmem->ocmem, OCMEM_GRAPHICS, in adreno_gpu_ocmem_cleanup()
1057 adreno_ocmem->hdl); in adreno_gpu_ocmem_cleanup()
1069 struct device *dev = &pdev->dev; in adreno_gpu_init()
1070 struct adreno_platform_config *config = dev->platform_data; in adreno_gpu_init()
1072 struct msm_gpu *gpu = &adreno_gpu->base; in adreno_gpu_init()
1077 adreno_gpu->funcs = funcs; in adreno_gpu_init()
1078 adreno_gpu->info = config->info; in adreno_gpu_init()
1079 adreno_gpu->chip_id = config->chip_id; in adreno_gpu_init()
1081 gpu->allow_relocs = config->info->family < ADRENO_6XX_GEN1; in adreno_gpu_init()
1085 adreno_gpu->info->family < ADRENO_6XX_GEN1) { in adreno_gpu_init()
1103 adreno_gpu->speedbin = (uint16_t) (0xffff & speedbin); in adreno_gpu_init()
1106 ADRENO_CHIPID_ARGS(config->chip_id)); in adreno_gpu_init()
1108 return -ENOMEM; in adreno_gpu_init()
1119 adreno_gpu->info->inactive_period); in adreno_gpu_init()
1122 return msm_gpu_init(drm, pdev, &adreno_gpu->base, &funcs->base, in adreno_gpu_init()
1128 struct msm_gpu *gpu = &adreno_gpu->base; in adreno_gpu_cleanup()
1129 struct msm_drm_private *priv = gpu->dev ? gpu->dev->dev_private : NULL; in adreno_gpu_cleanup()
1132 for (i = 0; i < ARRAY_SIZE(adreno_gpu->info->fw); i++) in adreno_gpu_cleanup()
1133 release_firmware(adreno_gpu->fw[i]); in adreno_gpu_cleanup()
1135 if (priv && pm_runtime_enabled(&priv->gpu_pdev->dev)) in adreno_gpu_cleanup()
1136 pm_runtime_disable(&priv->gpu_pdev->dev); in adreno_gpu_cleanup()
1138 msm_gpu_cleanup(&adreno_gpu->base); in adreno_gpu_cleanup()