Lines Matching +full:re +full:- +full:attached

1 // SPDX-License-Identifier: GPL-2.0-only OR MIT
40 sizeof(((struct rogue_fwif_frag_ctx_state *)0)->frag_reg_isp_store[0])); in get_xfer_ctx_state_size()
70 sizeof(((struct rogue_fwif_frag_ctx_state *)0)->frag_reg_isp_store[0])); in get_frag_ctx_state_size()
87 return -EINVAL; in get_ctx_state_size()
116 pvr_context_put(fence->queue->ctx); in pvr_queue_fence_release_work()
117 dma_fence_free(&fence->base); in pvr_queue_fence_release_work()
123 struct pvr_device *pvr_dev = fence->queue->ctx->pvr_dev; in pvr_queue_fence_release()
125 queue_work(pvr_dev->sched_wq, &fence->release_work); in pvr_queue_fence_release()
133 switch (fence->queue->type) { in pvr_queue_job_fence_get_timeline_name()
156 switch (fence->queue->type) { in pvr_queue_cccb_fence_get_timeline_name()
158 return "geometry-cccb"; in pvr_queue_cccb_fence_get_timeline_name()
161 return "fragment-cccb"; in pvr_queue_cccb_fence_get_timeline_name()
164 return "compute-cccb"; in pvr_queue_cccb_fence_get_timeline_name()
167 return "transfer-cccb"; in pvr_queue_cccb_fence_get_timeline_name()
181 * to_pvr_queue_job_fence() - Return a pvr_queue_fence object if the fence is
186 * * A non-NULL pvr_queue_fence object if the dma_fence is backed by a UFO, or
195 f = sched_fence->parent; in to_pvr_queue_job_fence()
197 if (f && f->ops == &pvr_queue_job_fence_ops) in to_pvr_queue_job_fence()
210 * pvr_queue_fence_put() - Put wrapper for pvr_queue_fence objects.
223 if (WARN_ON(f->ops && in pvr_queue_fence_put()
224 f->ops != &pvr_queue_cccb_fence_ops && in pvr_queue_fence_put()
225 f->ops != &pvr_queue_job_fence_ops)) in pvr_queue_fence_put()
229 if (f->ops) in pvr_queue_fence_put()
236 * pvr_queue_fence_alloc() - Allocate a pvr_queue_fence fence object
256 return &fence->base; in pvr_queue_fence_alloc()
260 * pvr_queue_fence_init() - Initializes a pvr_queue_fence object.
277 pvr_context_get(queue->ctx); in pvr_queue_fence_init()
278 fence->queue = queue; in pvr_queue_fence_init()
279 INIT_WORK(&fence->release_work, pvr_queue_fence_release_work); in pvr_queue_fence_init()
280 dma_fence_init(&fence->base, fence_ops, in pvr_queue_fence_init()
281 &fence_ctx->lock, fence_ctx->id, in pvr_queue_fence_init()
282 atomic_inc_return(&fence_ctx->seqno)); in pvr_queue_fence_init()
286 * pvr_queue_cccb_fence_init() - Initializes a CCCB fence object.
299 &queue->cccb_fence_ctx.base); in pvr_queue_cccb_fence_init()
303 * pvr_queue_job_fence_init() - Initializes a job done fence object.
316 if (!fence->ops) in pvr_queue_job_fence_init()
318 &queue->job_fence_ctx); in pvr_queue_job_fence_init()
322 * pvr_queue_fence_ctx_init() - Queue fence context initialization.
328 spin_lock_init(&fence_ctx->lock); in pvr_queue_fence_ctx_init()
329 fence_ctx->id = dma_fence_context_alloc(1); in pvr_queue_fence_ctx_init()
330 atomic_set(&fence_ctx->seqno, 0); in pvr_queue_fence_ctx_init()
335 /* We can pass at most ROGUE_FWIF_CCB_CMD_MAX_UFOS per UFO-related command. */ in ufo_cmds_size()
356 pvr_cccb_get_size_of_cmd_with_hdr(job->cmd_len); in job_cmds_size()
360 * job_count_remaining_native_deps() - Count the number of non-signaled native dependencies.
363 * Returns: Number of non-signaled native deps remaining.
371 xa_for_each(&job->base.dependencies, index, fence) { in job_count_remaining_native_deps()
378 if (!dma_fence_is_signaled(&jfence->base)) in job_count_remaining_native_deps()
386 * pvr_queue_get_job_cccb_fence() - Get the CCCB fence attached to a job.
406 if (!job->cccb_fence) in pvr_queue_get_job_cccb_fence()
409 mutex_lock(&queue->cccb_fence_ctx.job_lock); in pvr_queue_get_job_cccb_fence()
413 if (pvr_cccb_cmdseq_fits(&queue->cccb, job_cmds_size(job, native_deps_remaining))) { in pvr_queue_get_job_cccb_fence()
414 pvr_queue_fence_put(job->cccb_fence); in pvr_queue_get_job_cccb_fence()
415 job->cccb_fence = NULL; in pvr_queue_get_job_cccb_fence()
419 /* There should be no job attached to the CCCB fence context: in pvr_queue_get_job_cccb_fence()
422 if (WARN_ON(queue->cccb_fence_ctx.job)) in pvr_queue_get_job_cccb_fence()
423 pvr_job_put(queue->cccb_fence_ctx.job); in pvr_queue_get_job_cccb_fence()
425 queue->cccb_fence_ctx.job = pvr_job_get(job); in pvr_queue_get_job_cccb_fence()
428 cccb_fence = container_of(job->cccb_fence, struct pvr_queue_fence, base); in pvr_queue_get_job_cccb_fence()
429 if (!WARN_ON(cccb_fence->queue)) in pvr_queue_get_job_cccb_fence()
430 pvr_queue_cccb_fence_init(job->cccb_fence, queue); in pvr_queue_get_job_cccb_fence()
433 mutex_unlock(&queue->cccb_fence_ctx.job_lock); in pvr_queue_get_job_cccb_fence()
435 return dma_fence_get(job->cccb_fence); in pvr_queue_get_job_cccb_fence()
439 * pvr_queue_get_job_kccb_fence() - Get the KCCB fence attached to a job.
453 struct pvr_device *pvr_dev = queue->ctx->pvr_dev; in pvr_queue_get_job_kccb_fence()
459 if (!job->kccb_fence) in pvr_queue_get_job_kccb_fence()
462 if (!WARN_ON(job->kccb_fence->ops)) { in pvr_queue_get_job_kccb_fence()
463 kccb_fence = pvr_kccb_reserve_slot(pvr_dev, job->kccb_fence); in pvr_queue_get_job_kccb_fence()
464 job->kccb_fence = NULL; in pvr_queue_get_job_kccb_fence()
473 struct pvr_job *frag_job = job->type == DRM_PVR_JOB_TYPE_GEOMETRY ? in pvr_queue_get_paired_frag_job_dep()
474 job->paired_job : NULL; in pvr_queue_get_paired_frag_job_dep()
481 xa_for_each(&frag_job->base.dependencies, index, f) { in pvr_queue_get_paired_frag_job_dep()
487 if (f == &job->base.s_fence->scheduled) in pvr_queue_get_paired_frag_job_dep()
493 return frag_job->base.sched->ops->prepare_job(&frag_job->base, &queue->entity); in pvr_queue_get_paired_frag_job_dep()
497 * pvr_queue_prepare_job() - Return the next internal dependencies expressed as a dma_fence.
517 if (job->type == DRM_PVR_JOB_TYPE_FRAGMENT && job->paired_job) { in pvr_queue_prepare_job()
524 if (job->paired_job->has_pm_ref) in pvr_queue_prepare_job()
532 pvr_queue_job_fence_init(job->done_fence, in pvr_queue_prepare_job()
533 job->ctx->queues.fragment); in pvr_queue_prepare_job()
535 pvr_queue_job_fence_init(job->done_fence, queue); in pvr_queue_prepare_job()
564 * pvr_queue_update_active_state_locked() - Update the queue active state.
572 struct pvr_device *pvr_dev = queue->ctx->pvr_dev; in pvr_queue_update_active_state_locked()
574 lockdep_assert_held(&pvr_dev->queues.lock); in pvr_queue_update_active_state_locked()
578 * to re-insert it behind our back. in pvr_queue_update_active_state_locked()
580 if (list_empty(&queue->node)) in pvr_queue_update_active_state_locked()
583 if (!atomic_read(&queue->in_flight_job_count)) in pvr_queue_update_active_state_locked()
584 list_move_tail(&queue->node, &pvr_dev->queues.idle); in pvr_queue_update_active_state_locked()
586 list_move_tail(&queue->node, &pvr_dev->queues.active); in pvr_queue_update_active_state_locked()
590 * pvr_queue_update_active_state() - Update the queue active state.
604 struct pvr_device *pvr_dev = queue->ctx->pvr_dev; in pvr_queue_update_active_state()
606 mutex_lock(&pvr_dev->queues.lock); in pvr_queue_update_active_state()
608 mutex_unlock(&pvr_dev->queues.lock); in pvr_queue_update_active_state()
613 struct pvr_queue *queue = container_of(job->base.sched, struct pvr_queue, scheduler); in pvr_queue_submit_job_to_cccb()
615 struct pvr_cccb *cccb = &queue->cccb; in pvr_queue_submit_job_to_cccb()
625 atomic_inc(&queue->in_flight_job_count); in pvr_queue_submit_job_to_cccb()
628 xa_for_each(&job->base.dependencies, index, fence) { in pvr_queue_submit_job_to_cccb()
634 if (job->type == DRM_PVR_JOB_TYPE_FRAGMENT && job->paired_job && in pvr_queue_submit_job_to_cccb()
635 &job->paired_job->base.s_fence->scheduled == fence) in pvr_queue_submit_job_to_cccb()
638 if (dma_fence_is_signaled(&jfence->base)) in pvr_queue_submit_job_to_cccb()
641 pvr_fw_object_get_fw_addr(jfence->queue->timeline_ufo.fw_obj, in pvr_queue_submit_job_to_cccb()
643 ufos[ufo_count++].value = jfence->base.seqno; in pvr_queue_submit_job_to_cccb()
653 if (job->type == DRM_PVR_JOB_TYPE_FRAGMENT && job->paired_job) { in pvr_queue_submit_job_to_cccb()
654 jfence = to_pvr_queue_job_fence(job->paired_job->done_fence); in pvr_queue_submit_job_to_cccb()
656 pvr_fw_object_get_fw_addr(jfence->queue->timeline_ufo.fw_obj, in pvr_queue_submit_job_to_cccb()
658 ufos[ufo_count++].value = job->paired_job->done_fence->seqno; in pvr_queue_submit_job_to_cccb()
667 if (job->type == DRM_PVR_JOB_TYPE_GEOMETRY && job->paired_job) { in pvr_queue_submit_job_to_cccb()
668 struct rogue_fwif_cmd_geom *cmd = job->cmd; in pvr_queue_submit_job_to_cccb()
673 pvr_fw_object_get_fw_addr(queue->timeline_ufo.fw_obj, in pvr_queue_submit_job_to_cccb()
674 &cmd->partial_render_geom_frag_fence.addr); in pvr_queue_submit_job_to_cccb()
675 cmd->partial_render_geom_frag_fence.value = job->done_fence->seqno - 1; in pvr_queue_submit_job_to_cccb()
679 pvr_cccb_write_command_with_header(cccb, job->fw_ccb_cmd_type, job->cmd_len, job->cmd, in pvr_queue_submit_job_to_cccb()
680 job->id, job->id); in pvr_queue_submit_job_to_cccb()
683 pvr_fw_object_get_fw_addr(queue->timeline_ufo.fw_obj, &ufos[0].addr); in pvr_queue_submit_job_to_cccb()
684 ufos[0].value = job->done_fence->seqno; in pvr_queue_submit_job_to_cccb()
690 * pvr_queue_run_job() - Submit a job to the FW.
693 * This function is called when all non-native dependencies have been met and
699 struct pvr_device *pvr_dev = job->pvr_dev; in pvr_queue_run_job()
706 if (job->paired_job && job->type == DRM_PVR_JOB_TYPE_FRAGMENT && in pvr_queue_run_job()
707 job->done_fence->ops) { in pvr_queue_run_job()
708 return dma_fence_get(job->done_fence); in pvr_queue_run_job()
717 if (WARN_ON(job->paired_job && in pvr_queue_run_job()
718 (job->type != DRM_PVR_JOB_TYPE_GEOMETRY || in pvr_queue_run_job()
719 job->paired_job->type != DRM_PVR_JOB_TYPE_FRAGMENT || in pvr_queue_run_job()
720 job->hwrt != job->paired_job->hwrt || in pvr_queue_run_job()
721 job->ctx != job->paired_job->ctx))) in pvr_queue_run_job()
722 return ERR_PTR(-EINVAL); in pvr_queue_run_job()
728 if (job->paired_job) { in pvr_queue_run_job()
729 err = pvr_job_get_pm_ref(job->paired_job); in pvr_queue_run_job()
737 if (job->paired_job) { in pvr_queue_run_job()
739 struct pvr_job *frag_job = job->paired_job; in pvr_queue_run_job()
740 struct pvr_queue *geom_queue = job->ctx->queues.geometry; in pvr_queue_run_job()
741 struct pvr_queue *frag_queue = job->ctx->queues.fragment; in pvr_queue_run_job()
746 &geom_queue->cccb, &frag_queue->cccb, in pvr_queue_run_job()
747 pvr_context_get_fw_addr(geom_job->ctx) + in pvr_queue_run_job()
748 geom_queue->ctx_offset, in pvr_queue_run_job()
749 pvr_context_get_fw_addr(frag_job->ctx) + in pvr_queue_run_job()
750 frag_queue->ctx_offset, in pvr_queue_run_job()
751 job->hwrt, in pvr_queue_run_job()
752 frag_job->fw_ccb_cmd_type == in pvr_queue_run_job()
755 struct pvr_queue *queue = container_of(job->base.sched, in pvr_queue_run_job()
758 pvr_cccb_send_kccb_kick(pvr_dev, &queue->cccb, in pvr_queue_run_job()
759 pvr_context_get_fw_addr(job->ctx) + queue->ctx_offset, in pvr_queue_run_job()
760 job->hwrt); in pvr_queue_run_job()
763 return dma_fence_get(job->done_fence); in pvr_queue_run_job()
768 drm_sched_stop(&queue->scheduler, bad_job ? &bad_job->base : NULL); in pvr_queue_stop()
775 /* Make sure we CPU-signal the UFO object, so other queues don't get in pvr_queue_start()
778 *queue->timeline_ufo.value = atomic_read(&queue->job_fence_ctx.seqno); in pvr_queue_start()
780 list_for_each_entry(job, &queue->scheduler.pending_list, base.list) { in pvr_queue_start()
781 if (dma_fence_is_signaled(job->done_fence)) { in pvr_queue_start()
783 * In that case, re-assign the parent field to the done_fence. in pvr_queue_start()
785 WARN_ON(job->base.s_fence->parent); in pvr_queue_start()
786 job->base.s_fence->parent = dma_fence_get(job->done_fence); in pvr_queue_start()
791 atomic_set(&queue->ctx->faulty, 1); in pvr_queue_start()
795 drm_sched_start(&queue->scheduler, 0); in pvr_queue_start()
799 * pvr_queue_timedout_job() - Handle a job timeout event.
803 * the scheduler, and re-assign parent fences in the middle.
811 struct drm_gpu_scheduler *sched = s_job->sched; in pvr_queue_timedout_job()
813 struct pvr_device *pvr_dev = queue->ctx->pvr_dev; in pvr_queue_timedout_job()
817 dev_err(sched->dev, "Job timeout\n"); in pvr_queue_timedout_job()
821 * until the scheduler is really stopped doesn't end up re-inserting the in pvr_queue_timedout_job()
829 mutex_lock(&pvr_dev->queues.lock); in pvr_queue_timedout_job()
830 list_del_init(&queue->node); in pvr_queue_timedout_job()
831 mutex_unlock(&pvr_dev->queues.lock); in pvr_queue_timedout_job()
835 /* Re-assign job parent fences. */ in pvr_queue_timedout_job()
836 list_for_each_entry(job, &sched->pending_list, base.list) { in pvr_queue_timedout_job()
837 job->base.s_fence->parent = dma_fence_get(job->done_fence); in pvr_queue_timedout_job()
840 WARN_ON(atomic_read(&queue->in_flight_job_count) != job_count); in pvr_queue_timedout_job()
842 /* Re-insert the queue in the proper list, and kick a queue processing in pvr_queue_timedout_job()
845 mutex_lock(&pvr_dev->queues.lock); in pvr_queue_timedout_job()
847 list_move_tail(&queue->node, &pvr_dev->queues.idle); in pvr_queue_timedout_job()
849 atomic_set(&queue->in_flight_job_count, job_count); in pvr_queue_timedout_job()
850 list_move_tail(&queue->node, &pvr_dev->queues.active); in pvr_queue_timedout_job()
853 mutex_unlock(&pvr_dev->queues.lock); in pvr_queue_timedout_job()
861 * pvr_queue_free_job() - Release the reference the scheduler had on a job object.
870 if (job->type == DRM_PVR_JOB_TYPE_FRAGMENT && job->paired_job) in pvr_queue_free_job()
871 pvr_job_put(job->paired_job); in pvr_queue_free_job()
873 job->paired_job = NULL; in pvr_queue_free_job()
885 * pvr_queue_fence_is_ufo_backed() - Check if a dma_fence is backed by a UFO object
888 * A UFO-backed fence is a fence that can be signaled or waited upon FW-side.
889 * pvr_job::done_fence objects are backed by the timeline UFO attached to the queue
891 * world, so we also need to check if the fence we're being passed is a
899 sched_fence->sched->ops == &pvr_queue_sched_ops) in pvr_queue_fence_is_ufo_backed()
902 if (f && f->ops == &pvr_queue_job_fence_ops) in pvr_queue_fence_is_ufo_backed()
909 * pvr_queue_signal_done_fences() - Signal done fences.
913 * the UFO object attached to the queue.
921 spin_lock(&queue->scheduler.job_list_lock); in pvr_queue_signal_done_fences()
922 cur_seqno = *queue->timeline_ufo.value; in pvr_queue_signal_done_fences()
923 list_for_each_entry_safe(job, tmp_job, &queue->scheduler.pending_list, base.list) { in pvr_queue_signal_done_fences()
924 if ((int)(cur_seqno - lower_32_bits(job->done_fence->seqno)) < 0) in pvr_queue_signal_done_fences()
927 if (!dma_fence_is_signaled(job->done_fence)) { in pvr_queue_signal_done_fences()
928 dma_fence_signal(job->done_fence); in pvr_queue_signal_done_fences()
930 atomic_dec(&queue->in_flight_job_count); in pvr_queue_signal_done_fences()
933 spin_unlock(&queue->scheduler.job_list_lock); in pvr_queue_signal_done_fences()
937 * pvr_queue_check_job_waiting_for_cccb_space() - Check if the job waiting for CCCB space
952 mutex_lock(&queue->cccb_fence_ctx.job_lock); in pvr_queue_check_job_waiting_for_cccb_space()
953 job = queue->cccb_fence_ctx.job; in pvr_queue_check_job_waiting_for_cccb_space()
957 /* If we have a job attached to the CCCB fence context, its CCCB fence in pvr_queue_check_job_waiting_for_cccb_space()
960 if (WARN_ON(!job->cccb_fence)) { in pvr_queue_check_job_waiting_for_cccb_space()
966 cccb_fence = container_of(job->cccb_fence, struct pvr_queue_fence, base); in pvr_queue_check_job_waiting_for_cccb_space()
967 if (WARN_ON(!cccb_fence->queue)) { in pvr_queue_check_job_waiting_for_cccb_space()
977 if (!pvr_cccb_cmdseq_fits(&queue->cccb, job_cmds_size(job, native_deps_remaining))) { in pvr_queue_check_job_waiting_for_cccb_space()
982 dma_fence_signal(job->cccb_fence); in pvr_queue_check_job_waiting_for_cccb_space()
983 pvr_queue_fence_put(job->cccb_fence); in pvr_queue_check_job_waiting_for_cccb_space()
984 job->cccb_fence = NULL; in pvr_queue_check_job_waiting_for_cccb_space()
985 queue->cccb_fence_ctx.job = NULL; in pvr_queue_check_job_waiting_for_cccb_space()
988 mutex_unlock(&queue->cccb_fence_ctx.job_lock); in pvr_queue_check_job_waiting_for_cccb_space()
994 * pvr_queue_process() - Process events that happened on a queue.
1001 lockdep_assert_held(&queue->ctx->pvr_dev->queues.lock); in pvr_queue_process()
1010 switch (queue->type) { in get_dm_type()
1024 * init_fw_context() - Initializes the queue part of a FW context.
1028 * FW contexts are containing various states, one of them being a per-queue state
1034 struct pvr_context *ctx = queue->ctx; in init_fw_context()
1035 struct pvr_fw_object *fw_mem_ctx_obj = pvr_vm_get_fw_mem_context(ctx->vm_ctx); in init_fw_context()
1037 struct pvr_cccb *cccb = &queue->cccb; in init_fw_context()
1039 cctx_fw = fw_ctx_map + queue->ctx_offset; in init_fw_context()
1040 cctx_fw->ccbctl_fw_addr = cccb->ctrl_fw_addr; in init_fw_context()
1041 cctx_fw->ccb_fw_addr = cccb->cccb_fw_addr; in init_fw_context()
1043 cctx_fw->dm = get_dm_type(queue); in init_fw_context()
1044 cctx_fw->priority = ctx->priority; in init_fw_context()
1045 cctx_fw->priority_seq_num = 0; in init_fw_context()
1046 cctx_fw->max_deadline_ms = MAX_DEADLINE_MS; in init_fw_context()
1047 cctx_fw->pid = task_tgid_nr(current); in init_fw_context()
1048 cctx_fw->server_common_context_id = ctx->ctx_id; in init_fw_context()
1050 pvr_fw_object_get_fw_addr(fw_mem_ctx_obj, &cctx_fw->fw_mem_context_fw_addr); in init_fw_context()
1052 pvr_fw_object_get_fw_addr(queue->reg_state_obj, &cctx_fw->context_state_addr); in init_fw_context()
1056 * pvr_queue_cleanup_fw_context() - Wait for the FW context to be idle and clean it up.
1065 if (!queue->ctx->fw_obj) in pvr_queue_cleanup_fw_context()
1068 return pvr_fw_structure_cleanup(queue->ctx->pvr_dev, in pvr_queue_cleanup_fw_context()
1070 queue->ctx->fw_obj, queue->ctx_offset); in pvr_queue_cleanup_fw_context()
1074 * pvr_queue_job_init() - Initialize queue related fields in a pvr_job object.
1088 u32 min_native_dep_count = job->type == DRM_PVR_JOB_TYPE_FRAGMENT ? 1 : 0; in pvr_queue_job_init()
1092 if (atomic_read(&job->ctx->faulty)) in pvr_queue_job_init()
1093 return -EIO; in pvr_queue_job_init()
1095 queue = pvr_context_get_queue_for_job(job->ctx, job->type); in pvr_queue_job_init()
1097 return -EINVAL; in pvr_queue_job_init()
1099 if (!pvr_cccb_cmdseq_can_fit(&queue->cccb, job_cmds_size(job, min_native_dep_count))) in pvr_queue_job_init()
1100 return -E2BIG; in pvr_queue_job_init()
1102 err = drm_sched_job_init(&job->base, &queue->entity, 1, THIS_MODULE); in pvr_queue_job_init()
1106 job->cccb_fence = pvr_queue_fence_alloc(); in pvr_queue_job_init()
1107 job->kccb_fence = pvr_kccb_fence_alloc(); in pvr_queue_job_init()
1108 job->done_fence = pvr_queue_fence_alloc(); in pvr_queue_job_init()
1109 if (!job->cccb_fence || !job->kccb_fence || !job->done_fence) in pvr_queue_job_init()
1110 return -ENOMEM; in pvr_queue_job_init()
1116 * pvr_queue_job_arm() - Arm a job object.
1123 * we do multi-job submission, and something failed when creating/initializing
1132 drm_sched_job_arm(&job->base); in pvr_queue_job_arm()
1134 return &job->base.s_fence->finished; in pvr_queue_job_arm()
1138 * pvr_queue_job_cleanup() - Cleanup fence/scheduler related fields in the job object.
1145 pvr_queue_fence_put(job->done_fence); in pvr_queue_job_cleanup()
1146 pvr_queue_fence_put(job->cccb_fence); in pvr_queue_job_cleanup()
1147 pvr_kccb_fence_put(job->kccb_fence); in pvr_queue_job_cleanup()
1149 if (job->base.s_fence) in pvr_queue_job_cleanup()
1150 drm_sched_job_cleanup(&job->base); in pvr_queue_job_cleanup()
1154 * pvr_queue_job_push() - Push a job to its queue.
1159 * the drm_sched_entity attached to the queue. We grab a reference on
1165 struct pvr_queue *queue = container_of(job->base.sched, struct pvr_queue, scheduler); in pvr_queue_job_push()
1168 dma_fence_put(queue->last_queued_job_scheduled_fence); in pvr_queue_job_push()
1169 queue->last_queued_job_scheduled_fence = dma_fence_get(&job->base.s_fence->scheduled); in pvr_queue_job_push()
1172 drm_sched_entity_push_job(&job->base); in pvr_queue_job_push()
1179 if (queue->type == DRM_PVR_JOB_TYPE_GEOMETRY) { in reg_state_init()
1182 geom_ctx_state_fw->geom_core[0].geom_reg_vdm_call_stack_pointer_init = in reg_state_init()
1183 queue->callstack_addr; in reg_state_init()
1188 * pvr_queue_create() - Create a queue object.
1189 * @ctx: The context this queue will be attached to.
1226 struct pvr_device *pvr_dev = ctx->pvr_dev; in pvr_queue_create()
1229 .submit_wq = pvr_dev->sched_wq, in pvr_queue_create()
1234 .timeout_wq = pvr_dev->sched_wq, in pvr_queue_create()
1235 .name = "pvr-queue", in pvr_queue_create()
1236 .dev = pvr_dev->base.dev, in pvr_queue_create()
1244 return ERR_PTR(-EINVAL); in pvr_queue_create()
1246 switch (ctx->type) { in pvr_queue_create()
1250 return ERR_PTR(-EINVAL); in pvr_queue_create()
1254 return ERR_PTR(-EINVAL); in pvr_queue_create()
1258 return ERR_PTR(-EINVAL); in pvr_queue_create()
1261 return ERR_PTR(-EINVAL); in pvr_queue_create()
1270 return ERR_PTR(-ENOMEM); in pvr_queue_create()
1272 queue->type = type; in pvr_queue_create()
1273 queue->ctx_offset = get_ctx_offset(type); in pvr_queue_create()
1274 queue->ctx = ctx; in pvr_queue_create()
1275 queue->callstack_addr = args->callstack_addr; in pvr_queue_create()
1276 sched = &queue->scheduler; in pvr_queue_create()
1277 INIT_LIST_HEAD(&queue->node); in pvr_queue_create()
1278 mutex_init(&queue->cccb_fence_ctx.job_lock); in pvr_queue_create()
1279 pvr_queue_fence_ctx_init(&queue->cccb_fence_ctx.base); in pvr_queue_create()
1280 pvr_queue_fence_ctx_init(&queue->job_fence_ctx); in pvr_queue_create()
1282 err = pvr_cccb_init(pvr_dev, &queue->cccb, props[type].cccb_size, props[type].name); in pvr_queue_create()
1288 reg_state_init, queue, &queue->reg_state_obj); in pvr_queue_create()
1295 args->callstack_addr) { in pvr_queue_create()
1296 err = -EINVAL; in pvr_queue_create()
1300 cpu_map = pvr_fw_object_create_and_map(pvr_dev, sizeof(*queue->timeline_ufo.value), in pvr_queue_create()
1302 NULL, NULL, &queue->timeline_ufo.fw_obj); in pvr_queue_create()
1308 queue->timeline_ufo.value = cpu_map; in pvr_queue_create()
1310 err = drm_sched_init(&queue->scheduler, &sched_args); in pvr_queue_create()
1314 err = drm_sched_entity_init(&queue->entity, in pvr_queue_create()
1316 &sched, 1, &ctx->faulty); in pvr_queue_create()
1320 mutex_lock(&pvr_dev->queues.lock); in pvr_queue_create()
1321 list_add_tail(&queue->node, &pvr_dev->queues.idle); in pvr_queue_create()
1322 mutex_unlock(&pvr_dev->queues.lock); in pvr_queue_create()
1327 drm_sched_fini(&queue->scheduler); in pvr_queue_create()
1330 pvr_fw_object_unmap_and_destroy(queue->timeline_ufo.fw_obj); in pvr_queue_create()
1333 pvr_fw_object_destroy(queue->reg_state_obj); in pvr_queue_create()
1336 pvr_cccb_fini(&queue->cccb); in pvr_queue_create()
1339 mutex_destroy(&queue->cccb_fence_ctx.job_lock); in pvr_queue_create()
1349 mutex_lock(&pvr_dev->queues.lock); in pvr_queue_device_pre_reset()
1350 list_for_each_entry(queue, &pvr_dev->queues.idle, node) in pvr_queue_device_pre_reset()
1352 list_for_each_entry(queue, &pvr_dev->queues.active, node) in pvr_queue_device_pre_reset()
1354 mutex_unlock(&pvr_dev->queues.lock); in pvr_queue_device_pre_reset()
1361 mutex_lock(&pvr_dev->queues.lock); in pvr_queue_device_post_reset()
1362 list_for_each_entry(queue, &pvr_dev->queues.active, node) in pvr_queue_device_post_reset()
1364 list_for_each_entry(queue, &pvr_dev->queues.idle, node) in pvr_queue_device_post_reset()
1366 mutex_unlock(&pvr_dev->queues.lock); in pvr_queue_device_post_reset()
1370 * pvr_queue_kill() - Kill a queue.
1380 drm_sched_entity_destroy(&queue->entity); in pvr_queue_kill()
1381 dma_fence_put(queue->last_queued_job_scheduled_fence); in pvr_queue_kill()
1382 queue->last_queued_job_scheduled_fence = NULL; in pvr_queue_kill()
1386 * pvr_queue_destroy() - Destroy a queue.
1389 * Cleanup the queue and free the resources attached to it. Should be
1397 mutex_lock(&queue->ctx->pvr_dev->queues.lock); in pvr_queue_destroy()
1398 list_del_init(&queue->node); in pvr_queue_destroy()
1399 mutex_unlock(&queue->ctx->pvr_dev->queues.lock); in pvr_queue_destroy()
1401 drm_sched_fini(&queue->scheduler); in pvr_queue_destroy()
1402 drm_sched_entity_fini(&queue->entity); in pvr_queue_destroy()
1404 if (WARN_ON(queue->last_queued_job_scheduled_fence)) in pvr_queue_destroy()
1405 dma_fence_put(queue->last_queued_job_scheduled_fence); in pvr_queue_destroy()
1409 pvr_fw_object_unmap_and_destroy(queue->timeline_ufo.fw_obj); in pvr_queue_destroy()
1410 pvr_fw_object_destroy(queue->reg_state_obj); in pvr_queue_destroy()
1411 pvr_cccb_fini(&queue->cccb); in pvr_queue_destroy()
1412 mutex_destroy(&queue->cccb_fence_ctx.job_lock); in pvr_queue_destroy()
1417 * pvr_queue_device_init() - Device-level initialization of queue related fields.
1430 INIT_LIST_HEAD(&pvr_dev->queues.active); in pvr_queue_device_init()
1431 INIT_LIST_HEAD(&pvr_dev->queues.idle); in pvr_queue_device_init()
1432 err = drmm_mutex_init(from_pvr_device(pvr_dev), &pvr_dev->queues.lock); in pvr_queue_device_init()
1436 pvr_dev->sched_wq = alloc_workqueue("powervr-sched", WQ_UNBOUND, 0); in pvr_queue_device_init()
1437 if (!pvr_dev->sched_wq) in pvr_queue_device_init()
1438 return -ENOMEM; in pvr_queue_device_init()
1444 * pvr_queue_device_fini() - Device-level cleanup of queue related fields.
1447 * Cleanup/free all queue-related resources attached to a pvr_device object.
1451 destroy_workqueue(pvr_dev->sched_wq); in pvr_queue_device_fini()