Lines Matching full:job

230  * Call this function to allocate job CCCB and done fences. This only
294 * pvr_queue_job_fence_init() - Initializes a job done fence object.
299 * a job.
340 static u32 job_cmds_size(struct pvr_job *job, u32 ufo_wait_count) in job_cmds_size() argument
343 * and a command for the job itself. in job_cmds_size()
346 pvr_cccb_get_size_of_cmd_with_hdr(job->cmd_len); in job_cmds_size()
351 * @job: Job to operate on.
355 static unsigned long job_count_remaining_native_deps(struct pvr_job *job) in job_count_remaining_native_deps() argument
361 xa_for_each(&job->base.dependencies, index, fence) { in job_count_remaining_native_deps()
376 * pvr_queue_get_job_cccb_fence() - Get the CCCB fence attached to a job.
377 * @queue: The queue this job will be submitted to.
378 * @job: The job to get the CCCB fence on.
380 * The CCCB fence is a synchronization primitive allowing us to delay job
381 * submission until there's enough space in the CCCB to submit the job.
384 * * NULL if there's enough space in the CCCB to submit this job, or
388 pvr_queue_get_job_cccb_fence(struct pvr_queue *queue, struct pvr_job *job) in pvr_queue_get_job_cccb_fence() argument
394 * enough space in the cccb for our job. in pvr_queue_get_job_cccb_fence()
396 if (!job->cccb_fence) in pvr_queue_get_job_cccb_fence()
401 /* Count remaining native dependencies and check if the job fits in the CCCB. */ in pvr_queue_get_job_cccb_fence()
402 native_deps_remaining = job_count_remaining_native_deps(job); in pvr_queue_get_job_cccb_fence()
403 if (pvr_cccb_cmdseq_fits(&queue->cccb, job_cmds_size(job, native_deps_remaining))) { in pvr_queue_get_job_cccb_fence()
404 pvr_queue_fence_put(job->cccb_fence); in pvr_queue_get_job_cccb_fence()
405 job->cccb_fence = NULL; in pvr_queue_get_job_cccb_fence()
409 /* There should be no job attached to the CCCB fence context: in pvr_queue_get_job_cccb_fence()
412 if (WARN_ON(queue->cccb_fence_ctx.job)) in pvr_queue_get_job_cccb_fence()
413 pvr_job_put(queue->cccb_fence_ctx.job); in pvr_queue_get_job_cccb_fence()
415 queue->cccb_fence_ctx.job = pvr_job_get(job); in pvr_queue_get_job_cccb_fence()
418 cccb_fence = container_of(job->cccb_fence, struct pvr_queue_fence, base); in pvr_queue_get_job_cccb_fence()
420 pvr_queue_cccb_fence_init(job->cccb_fence, queue); in pvr_queue_get_job_cccb_fence()
425 return dma_fence_get(job->cccb_fence); in pvr_queue_get_job_cccb_fence()
429 * pvr_queue_get_job_kccb_fence() - Get the KCCB fence attached to a job.
430 * @queue: The queue this job will be submitted to.
431 * @job: The job to get the KCCB fence on.
433 * The KCCB fence is a synchronization primitive allowing us to delay job
434 * submission until there's enough space in the KCCB to submit the job.
437 * * NULL if there's enough space in the KCCB to submit this job, or
441 pvr_queue_get_job_kccb_fence(struct pvr_queue *queue, struct pvr_job *job) in pvr_queue_get_job_kccb_fence() argument
447 * enough space in the KCCB for our job. in pvr_queue_get_job_kccb_fence()
449 if (!job->kccb_fence) in pvr_queue_get_job_kccb_fence()
452 if (!WARN_ON(job->kccb_fence->ops)) { in pvr_queue_get_job_kccb_fence()
453 kccb_fence = pvr_kccb_reserve_slot(pvr_dev, job->kccb_fence); in pvr_queue_get_job_kccb_fence()
454 job->kccb_fence = NULL; in pvr_queue_get_job_kccb_fence()
461 pvr_queue_get_paired_frag_job_dep(struct pvr_queue *queue, struct pvr_job *job) in pvr_queue_get_paired_frag_job_dep() argument
463 struct pvr_job *frag_job = job->type == DRM_PVR_JOB_TYPE_GEOMETRY ? in pvr_queue_get_paired_frag_job_dep()
464 job->paired_job : NULL; in pvr_queue_get_paired_frag_job_dep()
477 if (f == &job->base.s_fence->scheduled) in pvr_queue_get_paired_frag_job_dep()
488 * @sched_job: The job to query the next internal dependency on
489 * @s_entity: The entity this job is queue on.
498 struct pvr_job *job = container_of(sched_job, struct pvr_job, base); in pvr_queue_prepare_job() local
504 * here because otherwise by the time of run_job() the job will end up in pvr_queue_prepare_job()
507 if (job->type == DRM_PVR_JOB_TYPE_FRAGMENT && job->paired_job) { in pvr_queue_prepare_job()
509 * This will be called on a paired fragment job after being in pvr_queue_prepare_job()
512 * geometry job, which would issue a pm ref. in pvr_queue_prepare_job()
514 if (job->paired_job->has_pm_ref) in pvr_queue_prepare_job()
518 * In this case we need to use the job's own ctx to initialise in pvr_queue_prepare_job()
520 * paired geometry job. in pvr_queue_prepare_job()
522 pvr_queue_job_fence_init(job->done_fence, in pvr_queue_prepare_job()
523 job->ctx->queues.fragment); in pvr_queue_prepare_job()
525 pvr_queue_job_fence_init(job->done_fence, queue); in pvr_queue_prepare_job()
531 internal_dep = pvr_queue_get_job_cccb_fence(queue, job); in pvr_queue_prepare_job()
537 internal_dep = pvr_queue_get_job_kccb_fence(queue, job); in pvr_queue_prepare_job()
543 * internal_dep = pvr_queue_get_job_xxxx_fence(queue, job); in pvr_queue_prepare_job()
546 /* The paired job fence should come last, when everything else is ready. */ in pvr_queue_prepare_job()
548 internal_dep = pvr_queue_get_paired_frag_job_dep(queue, job); in pvr_queue_prepare_job()
589 * This function should be called any time a job is submitted or it done
601 static void pvr_queue_submit_job_to_cccb(struct pvr_job *job) in pvr_queue_submit_job_to_cccb() argument
603 struct pvr_queue *queue = container_of(job->base.sched, struct pvr_queue, scheduler); in pvr_queue_submit_job_to_cccb()
618 xa_for_each(&job->base.dependencies, index, fence) { in pvr_queue_submit_job_to_cccb()
624 if (job->type == DRM_PVR_JOB_TYPE_FRAGMENT && job->paired_job && in pvr_queue_submit_job_to_cccb()
625 &job->paired_job->base.s_fence->scheduled == fence) in pvr_queue_submit_job_to_cccb()
643 if (job->type == DRM_PVR_JOB_TYPE_FRAGMENT && job->paired_job) { in pvr_queue_submit_job_to_cccb()
644 jfence = to_pvr_queue_job_fence(job->paired_job->done_fence); in pvr_queue_submit_job_to_cccb()
648 ufos[ufo_count++].value = job->paired_job->done_fence->seqno; in pvr_queue_submit_job_to_cccb()
657 if (job->type == DRM_PVR_JOB_TYPE_GEOMETRY && job->paired_job) { in pvr_queue_submit_job_to_cccb()
658 struct rogue_fwif_cmd_geom *cmd = job->cmd; in pvr_queue_submit_job_to_cccb()
665 cmd->partial_render_geom_frag_fence.value = job->done_fence->seqno - 1; in pvr_queue_submit_job_to_cccb()
668 /* Submit job to FW */ in pvr_queue_submit_job_to_cccb()
669 pvr_cccb_write_command_with_header(cccb, job->fw_ccb_cmd_type, job->cmd_len, job->cmd, in pvr_queue_submit_job_to_cccb()
670 job->id, job->id); in pvr_queue_submit_job_to_cccb()
672 /* Signal the job fence. */ in pvr_queue_submit_job_to_cccb()
674 ufos[0].value = job->done_fence->seqno; in pvr_queue_submit_job_to_cccb()
680 * pvr_queue_run_job() - Submit a job to the FW.
681 * @sched_job: The job to submit.
684 * when the commands resulting from this job are guaranteed to fit in the CCCB.
688 struct pvr_job *job = container_of(sched_job, struct pvr_job, base); in pvr_queue_run_job() local
689 struct pvr_device *pvr_dev = job->pvr_dev; in pvr_queue_run_job()
692 /* The fragment job is issued along the geometry job when we use combined in pvr_queue_run_job()
696 if (job->paired_job && job->type == DRM_PVR_JOB_TYPE_FRAGMENT && in pvr_queue_run_job()
697 job->done_fence->ops) { in pvr_queue_run_job()
698 return dma_fence_get(job->done_fence); in pvr_queue_run_job()
702 * we bail out early if we see a fragment job that's paired with a geomtry in pvr_queue_run_job()
703 * job. in pvr_queue_run_job()
707 if (WARN_ON(job->paired_job && in pvr_queue_run_job()
708 (job->type != DRM_PVR_JOB_TYPE_GEOMETRY || in pvr_queue_run_job()
709 job->paired_job->type != DRM_PVR_JOB_TYPE_FRAGMENT || in pvr_queue_run_job()
710 job->hwrt != job->paired_job->hwrt || in pvr_queue_run_job()
711 job->ctx != job->paired_job->ctx))) in pvr_queue_run_job()
714 err = pvr_job_get_pm_ref(job); in pvr_queue_run_job()
718 if (job->paired_job) { in pvr_queue_run_job()
719 err = pvr_job_get_pm_ref(job->paired_job); in pvr_queue_run_job()
724 /* Submit our job to the CCCB */ in pvr_queue_run_job()
725 pvr_queue_submit_job_to_cccb(job); in pvr_queue_run_job()
727 if (job->paired_job) { in pvr_queue_run_job()
728 struct pvr_job *geom_job = job; in pvr_queue_run_job()
729 struct pvr_job *frag_job = job->paired_job; in pvr_queue_run_job()
730 struct pvr_queue *geom_queue = job->ctx->queues.geometry; in pvr_queue_run_job()
731 struct pvr_queue *frag_queue = job->ctx->queues.fragment; in pvr_queue_run_job()
733 /* Submit the fragment job along the geometry job and send a combined kick. */ in pvr_queue_run_job()
741 job->hwrt, in pvr_queue_run_job()
745 struct pvr_queue *queue = container_of(job->base.sched, in pvr_queue_run_job()
749 pvr_context_get_fw_addr(job->ctx) + queue->ctx_offset, in pvr_queue_run_job()
750 job->hwrt); in pvr_queue_run_job()
753 return dma_fence_get(job->done_fence); in pvr_queue_run_job()
763 struct pvr_job *job; in pvr_queue_start() local
770 list_for_each_entry(job, &queue->scheduler.pending_list, base.list) { in pvr_queue_start()
771 if (dma_fence_is_signaled(job->done_fence)) { in pvr_queue_start()
775 WARN_ON(job->base.s_fence->parent); in pvr_queue_start()
776 job->base.s_fence->parent = dma_fence_get(job->done_fence); in pvr_queue_start()
779 * new job can be submitted. in pvr_queue_start()
789 * pvr_queue_timedout_job() - Handle a job timeout event.
790 * @s_job: The job this timeout occurred on.
804 struct pvr_job *job; in pvr_queue_timedout_job() local
807 dev_err(sched->dev, "Job timeout\n"); in pvr_queue_timedout_job()
825 /* Re-assign job parent fences. */ in pvr_queue_timedout_job()
826 list_for_each_entry(job, &sched->pending_list, base.list) { in pvr_queue_timedout_job()
827 job->base.s_fence->parent = dma_fence_get(job->done_fence); in pvr_queue_timedout_job()
851 * pvr_queue_free_job() - Release the reference the scheduler had on a job object.
852 * @sched_job: Job object to free.
856 struct pvr_job *job = container_of(sched_job, struct pvr_job, base); in pvr_queue_free_job() local
859 job->paired_job = NULL; in pvr_queue_free_job()
860 pvr_job_put(job); in pvr_queue_free_job()
904 struct pvr_job *job, *tmp_job; in pvr_queue_signal_done_fences() local
909 list_for_each_entry_safe(job, tmp_job, &queue->scheduler.pending_list, base.list) { in pvr_queue_signal_done_fences()
910 if ((int)(cur_seqno - lower_32_bits(job->done_fence->seqno)) < 0) in pvr_queue_signal_done_fences()
913 if (!dma_fence_is_signaled(job->done_fence)) { in pvr_queue_signal_done_fences()
914 dma_fence_signal(job->done_fence); in pvr_queue_signal_done_fences()
915 pvr_job_release_pm_ref(job); in pvr_queue_signal_done_fences()
923 * pvr_queue_check_job_waiting_for_cccb_space() - Check if the job waiting for CCCB space
928 * If we have a job waiting for CCCB, and this job now fits in the CCCB, we signal
936 struct pvr_job *job; in pvr_queue_check_job_waiting_for_cccb_space() local
939 job = queue->cccb_fence_ctx.job; in pvr_queue_check_job_waiting_for_cccb_space()
940 if (!job) in pvr_queue_check_job_waiting_for_cccb_space()
943 /* If we have a job attached to the CCCB fence context, its CCCB fence in pvr_queue_check_job_waiting_for_cccb_space()
946 if (WARN_ON(!job->cccb_fence)) { in pvr_queue_check_job_waiting_for_cccb_space()
947 job = NULL; in pvr_queue_check_job_waiting_for_cccb_space()
952 cccb_fence = container_of(job->cccb_fence, struct pvr_queue_fence, base); in pvr_queue_check_job_waiting_for_cccb_space()
954 job = NULL; in pvr_queue_check_job_waiting_for_cccb_space()
959 * If the job fits, signal the CCCB fence, this should unblock in pvr_queue_check_job_waiting_for_cccb_space()
962 native_deps_remaining = job_count_remaining_native_deps(job); in pvr_queue_check_job_waiting_for_cccb_space()
963 if (!pvr_cccb_cmdseq_fits(&queue->cccb, job_cmds_size(job, native_deps_remaining))) { in pvr_queue_check_job_waiting_for_cccb_space()
964 job = NULL; in pvr_queue_check_job_waiting_for_cccb_space()
968 dma_fence_signal(job->cccb_fence); in pvr_queue_check_job_waiting_for_cccb_space()
969 pvr_queue_fence_put(job->cccb_fence); in pvr_queue_check_job_waiting_for_cccb_space()
970 job->cccb_fence = NULL; in pvr_queue_check_job_waiting_for_cccb_space()
971 queue->cccb_fence_ctx.job = NULL; in pvr_queue_check_job_waiting_for_cccb_space()
976 pvr_job_put(job); in pvr_queue_check_job_waiting_for_cccb_space()
983 * Signal job fences and check if jobs waiting for CCCB space can be unblocked.
1061 * @job: The job to initialize.
1063 * Bind the job to a queue and allocate memory to guarantee pvr_queue_job_arm()
1065 * valid and the job can fit in the CCCB.
1071 int pvr_queue_job_init(struct pvr_job *job) in pvr_queue_job_init() argument
1073 /* Fragment jobs need at least one native fence wait on the geometry job fence. */ in pvr_queue_job_init()
1074 u32 min_native_dep_count = job->type == DRM_PVR_JOB_TYPE_FRAGMENT ? 1 : 0; in pvr_queue_job_init()
1078 if (atomic_read(&job->ctx->faulty)) in pvr_queue_job_init()
1081 queue = pvr_context_get_queue_for_job(job->ctx, job->type); in pvr_queue_job_init()
1085 if (!pvr_cccb_cmdseq_can_fit(&queue->cccb, job_cmds_size(job, min_native_dep_count))) in pvr_queue_job_init()
1088 err = drm_sched_job_init(&job->base, &queue->entity, 1, THIS_MODULE); in pvr_queue_job_init()
1092 job->cccb_fence = pvr_queue_fence_alloc(); in pvr_queue_job_init()
1093 job->kccb_fence = pvr_kccb_fence_alloc(); in pvr_queue_job_init()
1094 job->done_fence = pvr_queue_fence_alloc(); in pvr_queue_job_init()
1095 if (!job->cccb_fence || !job->kccb_fence || !job->done_fence) in pvr_queue_job_init()
1102 * pvr_queue_job_arm() - Arm a job object.
1103 * @job: The job to arm.
1107 * make sure the job is pushed using pvr_queue_job_push(), or guarantee that
1109 * we do multi-job submission, and something failed when creating/initializing
1110 * a job. In that case, we know the fence didn't leave the driver, and we
1116 struct dma_fence *pvr_queue_job_arm(struct pvr_job *job) in pvr_queue_job_arm() argument
1118 drm_sched_job_arm(&job->base); in pvr_queue_job_arm()
1120 return &job->base.s_fence->finished; in pvr_queue_job_arm()
1124 * pvr_queue_job_cleanup() - Cleanup fence/scheduler related fields in the job object.
1125 * @job: The job to cleanup.
1127 * Should be called in the job release path.
1129 void pvr_queue_job_cleanup(struct pvr_job *job) in pvr_queue_job_cleanup() argument
1131 pvr_queue_fence_put(job->done_fence); in pvr_queue_job_cleanup()
1132 pvr_queue_fence_put(job->cccb_fence); in pvr_queue_job_cleanup()
1133 pvr_kccb_fence_put(job->kccb_fence); in pvr_queue_job_cleanup()
1135 if (job->base.s_fence) in pvr_queue_job_cleanup()
1136 drm_sched_job_cleanup(&job->base); in pvr_queue_job_cleanup()
1140 * pvr_queue_job_push() - Push a job to its queue.
1141 * @job: The job to push.
1144 * have been added to the job. This will effectively queue the job to
1146 * the job object, so the caller is free to drop its reference when it's
1147 * done accessing the job object.
1149 void pvr_queue_job_push(struct pvr_job *job) in pvr_queue_job_push() argument
1151 struct pvr_queue *queue = container_of(job->base.sched, struct pvr_queue, scheduler); in pvr_queue_job_push()
1153 /* Keep track of the last queued job scheduled fence for combined submit. */ in pvr_queue_job_push()
1155 queue->last_queued_job_scheduled_fence = dma_fence_get(&job->base.s_fence->scheduled); in pvr_queue_job_push()
1157 pvr_job_get(job); in pvr_queue_job_push()
1158 drm_sched_entity_push_job(&job->base); in pvr_queue_job_push()