Lines Matching full:entity
44 * The jobs in an entity are always scheduled in the order in which they were pushed.
47 * hardware, i.e. the pending queue, the entity must not be referenced anymore
48 * through the jobs entity pointer.
115 * @entity: the scheduler entity
117 * Return true if we can push at least one more job from @entity, false
121 struct drm_sched_entity *entity) in drm_sched_can_queue() argument
125 s_job = drm_sched_entity_queue_peek(entity); in drm_sched_can_queue()
150 static void drm_sched_rq_remove_fifo_locked(struct drm_sched_entity *entity, in drm_sched_rq_remove_fifo_locked() argument
153 if (!RB_EMPTY_NODE(&entity->rb_tree_node)) { in drm_sched_rq_remove_fifo_locked()
154 rb_erase_cached(&entity->rb_tree_node, &rq->rb_tree_root); in drm_sched_rq_remove_fifo_locked()
155 RB_CLEAR_NODE(&entity->rb_tree_node); in drm_sched_rq_remove_fifo_locked()
159 void drm_sched_rq_update_fifo_locked(struct drm_sched_entity *entity, in drm_sched_rq_update_fifo_locked() argument
164 * Both locks need to be grabbed, one to protect from entity->rq change in drm_sched_rq_update_fifo_locked()
165 * for entity from within concurrent drm_sched_entity_select_rq and the in drm_sched_rq_update_fifo_locked()
168 lockdep_assert_held(&entity->lock); in drm_sched_rq_update_fifo_locked()
171 drm_sched_rq_remove_fifo_locked(entity, rq); in drm_sched_rq_update_fifo_locked()
173 entity->oldest_job_waiting = ts; in drm_sched_rq_update_fifo_locked()
175 rb_add_cached(&entity->rb_tree_node, &rq->rb_tree_root, in drm_sched_rq_update_fifo_locked()
198 * drm_sched_rq_add_entity - add an entity
201 * @entity: scheduler entity
203 * Adds a scheduler entity to the run queue.
206 struct drm_sched_entity *entity) in drm_sched_rq_add_entity() argument
208 lockdep_assert_held(&entity->lock); in drm_sched_rq_add_entity()
211 if (!list_empty(&entity->list)) in drm_sched_rq_add_entity()
215 list_add_tail(&entity->list, &rq->entities); in drm_sched_rq_add_entity()
219 * drm_sched_rq_remove_entity - remove an entity
222 * @entity: scheduler entity
224 * Removes a scheduler entity from the run queue.
227 struct drm_sched_entity *entity) in drm_sched_rq_remove_entity() argument
229 lockdep_assert_held(&entity->lock); in drm_sched_rq_remove_entity()
231 if (list_empty(&entity->list)) in drm_sched_rq_remove_entity()
237 list_del_init(&entity->list); in drm_sched_rq_remove_entity()
239 if (rq->current_entity == entity) in drm_sched_rq_remove_entity()
243 drm_sched_rq_remove_fifo_locked(entity, rq); in drm_sched_rq_remove_entity()
249 * drm_sched_rq_select_entity_rr - Select an entity which could provide a job to run
254 * Try to find the next ready entity.
256 * Return an entity if one is found; return an error-pointer (!NULL) if an
257 * entity was ready, but the scheduler had insufficient credits to accommodate
258 * its job; return NULL, if no ready entity was found.
264 struct drm_sched_entity *entity; in drm_sched_rq_select_entity_rr() local
268 entity = rq->current_entity; in drm_sched_rq_select_entity_rr()
269 if (entity) { in drm_sched_rq_select_entity_rr()
270 list_for_each_entry_continue(entity, &rq->entities, list) { in drm_sched_rq_select_entity_rr()
271 if (drm_sched_entity_is_ready(entity)) { in drm_sched_rq_select_entity_rr()
273 * entity in terms of fairness. in drm_sched_rq_select_entity_rr()
275 if (!drm_sched_can_queue(sched, entity)) { in drm_sched_rq_select_entity_rr()
280 rq->current_entity = entity; in drm_sched_rq_select_entity_rr()
281 reinit_completion(&entity->entity_idle); in drm_sched_rq_select_entity_rr()
283 return entity; in drm_sched_rq_select_entity_rr()
288 list_for_each_entry(entity, &rq->entities, list) { in drm_sched_rq_select_entity_rr()
289 if (drm_sched_entity_is_ready(entity)) { in drm_sched_rq_select_entity_rr()
290 /* If we can't queue yet, preserve the current entity in in drm_sched_rq_select_entity_rr()
293 if (!drm_sched_can_queue(sched, entity)) { in drm_sched_rq_select_entity_rr()
298 rq->current_entity = entity; in drm_sched_rq_select_entity_rr()
299 reinit_completion(&entity->entity_idle); in drm_sched_rq_select_entity_rr()
301 return entity; in drm_sched_rq_select_entity_rr()
304 if (entity == rq->current_entity) in drm_sched_rq_select_entity_rr()
314 * drm_sched_rq_select_entity_fifo - Select an entity which provides a job to run
319 * Find oldest waiting ready entity.
321 * Return an entity if one is found; return an error-pointer (!NULL) if an
322 * entity was ready, but the scheduler had insufficient credits to accommodate
323 * its job; return NULL, if no ready entity was found.
333 struct drm_sched_entity *entity; in drm_sched_rq_select_entity_fifo() local
335 entity = rb_entry(rb, struct drm_sched_entity, rb_tree_node); in drm_sched_rq_select_entity_fifo()
336 if (drm_sched_entity_is_ready(entity)) { in drm_sched_rq_select_entity_fifo()
337 /* If we can't queue yet, preserve the current entity in in drm_sched_rq_select_entity_fifo()
340 if (!drm_sched_can_queue(sched, entity)) { in drm_sched_rq_select_entity_fifo()
345 reinit_completion(&entity->entity_idle); in drm_sched_rq_select_entity_fifo()
763 * @entity: scheduler entity to use
779 * has died, which can mean that there's no valid runqueue for a @entity.
786 struct drm_sched_entity *entity, in drm_sched_job_init() argument
789 if (!entity->rq) { in drm_sched_job_init()
794 dev_err(job->sched->dev, "%s: entity has no rq!\n", __func__); in drm_sched_job_init()
811 job->entity = entity; in drm_sched_job_init()
813 job->s_fence = drm_sched_fence_alloc(entity, owner); in drm_sched_job_init()
841 struct drm_sched_entity *entity = job->entity; in drm_sched_job_arm() local
843 BUG_ON(!entity); in drm_sched_job_arm()
844 drm_sched_entity_select_rq(entity); in drm_sched_job_arm()
845 sched = entity->rq->sched; in drm_sched_job_arm()
848 job->s_priority = entity->priority; in drm_sched_job_arm()
851 drm_sched_fence_init(job->s_fence, job->entity); in drm_sched_job_arm()
1018 * before it was submitted to an entity with drm_sched_entity_push_job().
1061 * drm_sched_select_entity - Select next entity to process
1065 * Return an entity to process or NULL if none are found.
1067 * Note, that we break out of the for-loop when "entity" is non-null, which can
1074 struct drm_sched_entity *entity; in drm_sched_select_entity() local
1080 entity = drm_sched_policy == DRM_SCHED_POLICY_FIFO ? in drm_sched_select_entity()
1083 if (entity) in drm_sched_select_entity()
1087 return IS_ERR(entity) ? NULL : entity; in drm_sched_select_entity()
1199 struct drm_sched_entity *entity; in drm_sched_run_job_work() local
1205 /* Find entity with a ready job */ in drm_sched_run_job_work()
1206 entity = drm_sched_select_entity(sched); in drm_sched_run_job_work()
1207 if (!entity) in drm_sched_run_job_work()
1210 sched_job = drm_sched_entity_pop_job(entity); in drm_sched_run_job_work()
1212 complete_all(&entity->entity_idle); in drm_sched_run_job_work()
1222 trace_drm_run_job(sched_job, entity); in drm_sched_run_job_work()
1224 complete_all(&entity->entity_idle); in drm_sched_run_job_work()
1401 * limit of the scheduler then the respective sched entity is marked guilty and
1408 struct drm_sched_entity *entity; in drm_sched_increase_karma() local
1422 list_for_each_entry_safe(entity, tmp, &rq->entities, list) { in drm_sched_increase_karma()
1424 entity->fence_context) { in drm_sched_increase_karma()
1425 if (entity->guilty) in drm_sched_increase_karma()
1426 atomic_set(entity->guilty, 1); in drm_sched_increase_karma()
1431 if (&entity->list != &rq->entities) in drm_sched_increase_karma()