1 /*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24 #ifndef _DRM_GPU_SCHEDULER_H_
25 #define _DRM_GPU_SCHEDULER_H_
26
27 #include <drm/spsc_queue.h>
28 #include <linux/dma-fence.h>
29 #include <linux/completion.h>
30 #include <linux/xarray.h>
31 #include <linux/workqueue.h>
32
33 #define MAX_WAIT_SCHED_ENTITY_Q_EMPTY msecs_to_jiffies(1000)
34
35 /**
36 * DRM_SCHED_FENCE_DONT_PIPELINE - Prevent dependency pipelining
37 *
38 * Setting this flag on a scheduler fence prevents pipelining of jobs depending
39 * on this fence. In other words we always insert a full CPU round trip before
40 * dependent jobs are pushed to the hw queue.
41 */
42 #define DRM_SCHED_FENCE_DONT_PIPELINE DMA_FENCE_FLAG_USER_BITS
43
44 /**
45 * DRM_SCHED_FENCE_FLAG_HAS_DEADLINE_BIT - A fence deadline hint has been set
46 *
47 * Because we could have a deadline hint can be set before the backing hw
48 * fence is created, we need to keep track of whether a deadline has already
49 * been set.
50 */
51 #define DRM_SCHED_FENCE_FLAG_HAS_DEADLINE_BIT (DMA_FENCE_FLAG_USER_BITS + 1)
52
53 enum dma_resv_usage;
54 struct dma_resv;
55 struct drm_gem_object;
56
57 struct drm_gpu_scheduler;
58 struct drm_sched_rq;
59
60 struct drm_file;
61
62 /* These are often used as an (initial) index
63 * to an array, and as such should start at 0.
64 */
65 enum drm_sched_priority {
66 DRM_SCHED_PRIORITY_KERNEL,
67 DRM_SCHED_PRIORITY_HIGH,
68 DRM_SCHED_PRIORITY_NORMAL,
69 DRM_SCHED_PRIORITY_LOW,
70
71 DRM_SCHED_PRIORITY_COUNT
72 };
73
74 /**
75 * struct drm_sched_entity - A wrapper around a job queue (typically
76 * attached to the DRM file_priv).
77 *
78 * Entities will emit jobs in order to their corresponding hardware
79 * ring, and the scheduler will alternate between entities based on
80 * scheduling policy.
81 */
82 struct drm_sched_entity {
83 /**
84 * @list:
85 *
86 * Used to append this struct to the list of entities in the runqueue
87 * @rq under &drm_sched_rq.entities.
88 *
89 * Protected by &drm_sched_rq.lock of @rq.
90 */
91 struct list_head list;
92
93 /**
94 * @lock:
95 *
96 * Lock protecting the run-queue (@rq) to which this entity belongs,
97 * @priority and the list of schedulers (@sched_list, @num_sched_list).
98 */
99 spinlock_t lock;
100
101 /**
102 * @rq:
103 *
104 * Runqueue on which this entity is currently scheduled.
105 *
106 * FIXME: Locking is very unclear for this. Writers are protected by
107 * @lock, but readers are generally lockless and seem to just race with
108 * not even a READ_ONCE.
109 */
110 struct drm_sched_rq *rq;
111
112 /**
113 * @sched_list:
114 *
115 * A list of schedulers (struct drm_gpu_scheduler). Jobs from this entity can
116 * be scheduled on any scheduler on this list.
117 *
118 * This can be modified by calling drm_sched_entity_modify_sched().
119 * Locking is entirely up to the driver, see the above function for more
120 * details.
121 *
122 * This will be set to NULL if &num_sched_list equals 1 and @rq has been
123 * set already.
124 *
125 * FIXME: This means priority changes through
126 * drm_sched_entity_set_priority() will be lost henceforth in this case.
127 */
128 struct drm_gpu_scheduler **sched_list;
129
130 /**
131 * @num_sched_list:
132 *
133 * Number of drm_gpu_schedulers in the @sched_list.
134 */
135 unsigned int num_sched_list;
136
137 /**
138 * @priority:
139 *
140 * Priority of the entity. This can be modified by calling
141 * drm_sched_entity_set_priority(). Protected by @lock.
142 */
143 enum drm_sched_priority priority;
144
145 /**
146 * @job_queue: the list of jobs of this entity.
147 */
148 struct spsc_queue job_queue;
149
150 /**
151 * @fence_seq:
152 *
153 * A linearly increasing seqno incremented with each new
154 * &drm_sched_fence which is part of the entity.
155 *
156 * FIXME: Callers of drm_sched_job_arm() need to ensure correct locking,
157 * this doesn't need to be atomic.
158 */
159 atomic_t fence_seq;
160
161 /**
162 * @fence_context:
163 *
164 * A unique context for all the fences which belong to this entity. The
165 * &drm_sched_fence.scheduled uses the fence_context but
166 * &drm_sched_fence.finished uses fence_context + 1.
167 */
168 uint64_t fence_context;
169
170 /**
171 * @dependency:
172 *
173 * The dependency fence of the job which is on the top of the job queue.
174 */
175 struct dma_fence *dependency;
176
177 /**
178 * @cb:
179 *
180 * Callback for the dependency fence above.
181 */
182 struct dma_fence_cb cb;
183
184 /**
185 * @guilty:
186 *
187 * Points to entities' guilty.
188 */
189 atomic_t *guilty;
190
191 /**
192 * @last_scheduled:
193 *
194 * Points to the finished fence of the last scheduled job. Only written
195 * by drm_sched_entity_pop_job(). Can be accessed locklessly from
196 * drm_sched_job_arm() if the queue is empty.
197 */
198 struct dma_fence __rcu *last_scheduled;
199
200 /**
201 * @last_user: last group leader pushing a job into the entity.
202 */
203 struct task_struct *last_user;
204
205 /**
206 * @stopped:
207 *
208 * Marks the enity as removed from rq and destined for
209 * termination. This is set by calling drm_sched_entity_flush() and by
210 * drm_sched_fini().
211 */
212 bool stopped;
213
214 /**
215 * @entity_idle:
216 *
217 * Signals when entity is not in use, used to sequence entity cleanup in
218 * drm_sched_entity_fini().
219 */
220 struct completion entity_idle;
221
222 /**
223 * @oldest_job_waiting:
224 *
225 * Marks earliest job waiting in SW queue
226 */
227 ktime_t oldest_job_waiting;
228
229 /**
230 * @rb_tree_node:
231 *
232 * The node used to insert this entity into time based priority queue
233 */
234 struct rb_node rb_tree_node;
235
236 };
237
238 /**
239 * struct drm_sched_rq - queue of entities to be scheduled.
240 *
241 * @sched: the scheduler to which this rq belongs to.
242 * @lock: protects @entities, @rb_tree_root and @current_entity.
243 * @current_entity: the entity which is to be scheduled.
244 * @entities: list of the entities to be scheduled.
245 * @rb_tree_root: root of time based priority queue of entities for FIFO scheduling
246 *
247 * Run queue is a set of entities scheduling command submissions for
248 * one specific ring. It implements the scheduling policy that selects
249 * the next entity to emit commands from.
250 */
251 struct drm_sched_rq {
252 struct drm_gpu_scheduler *sched;
253
254 spinlock_t lock;
255 /* Following members are protected by the @lock: */
256 struct drm_sched_entity *current_entity;
257 struct list_head entities;
258 struct rb_root_cached rb_tree_root;
259 };
260
261 /**
262 * struct drm_sched_fence - fences corresponding to the scheduling of a job.
263 */
264 struct drm_sched_fence {
265 /**
266 * @scheduled: this fence is what will be signaled by the scheduler
267 * when the job is scheduled.
268 */
269 struct dma_fence scheduled;
270
271 /**
272 * @finished: this fence is what will be signaled by the scheduler
273 * when the job is completed.
274 *
275 * When setting up an out fence for the job, you should use
276 * this, since it's available immediately upon
277 * drm_sched_job_init(), and the fence returned by the driver
278 * from run_job() won't be created until the dependencies have
279 * resolved.
280 */
281 struct dma_fence finished;
282
283 /**
284 * @deadline: deadline set on &drm_sched_fence.finished which
285 * potentially needs to be propagated to &drm_sched_fence.parent
286 */
287 ktime_t deadline;
288
289 /**
290 * @parent: the fence returned by &drm_sched_backend_ops.run_job
291 * when scheduling the job on hardware. We signal the
292 * &drm_sched_fence.finished fence once parent is signalled.
293 */
294 struct dma_fence *parent;
295 /**
296 * @sched: the scheduler instance to which the job having this struct
297 * belongs to.
298 */
299 struct drm_gpu_scheduler *sched;
300 /**
301 * @lock: the lock used by the scheduled and the finished fences.
302 */
303 spinlock_t lock;
304 /**
305 * @owner: job owner for debugging
306 */
307 void *owner;
308
309 /**
310 * @drm_client_id:
311 *
312 * The client_id of the drm_file which owns the job.
313 */
314 uint64_t drm_client_id;
315 };
316
317 struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f);
318
319 /**
320 * struct drm_sched_job - A job to be run by an entity.
321 *
322 * @queue_node: used to append this struct to the queue of jobs in an entity.
323 * @list: a job participates in a "pending" and "done" lists.
324 * @sched: the scheduler instance on which this job is scheduled.
325 * @s_fence: contains the fences for the scheduling of job.
326 * @finish_cb: the callback for the finished fence.
327 * @credits: the number of credits this job contributes to the scheduler
328 * @work: Helper to reschedule job kill to different context.
329 * @karma: increment on every hang caused by this job. If this exceeds the hang
330 * limit of the scheduler then the job is marked guilty and will not
331 * be scheduled further.
332 * @s_priority: the priority of the job.
333 * @entity: the entity to which this job belongs.
334 * @cb: the callback for the parent fence in s_fence.
335 *
336 * A job is created by the driver using drm_sched_job_init(), and
337 * should call drm_sched_entity_push_job() once it wants the scheduler
338 * to schedule the job.
339 */
340 struct drm_sched_job {
341 /**
342 * @submit_ts:
343 *
344 * When the job was pushed into the entity queue.
345 */
346 ktime_t submit_ts;
347
348 /**
349 * @sched:
350 *
351 * The scheduler this job is or will be scheduled on. Gets set by
352 * drm_sched_job_arm(). Valid until drm_sched_backend_ops.free_job()
353 * has finished.
354 */
355 struct drm_gpu_scheduler *sched;
356
357 struct drm_sched_fence *s_fence;
358 struct drm_sched_entity *entity;
359
360 enum drm_sched_priority s_priority;
361 u32 credits;
362 /** @last_dependency: tracks @dependencies as they signal */
363 unsigned int last_dependency;
364 atomic_t karma;
365
366 struct spsc_node queue_node;
367 struct list_head list;
368
369 /*
370 * work is used only after finish_cb has been used and will not be
371 * accessed anymore.
372 */
373 union {
374 struct dma_fence_cb finish_cb;
375 struct work_struct work;
376 };
377
378 struct dma_fence_cb cb;
379
380 /**
381 * @dependencies:
382 *
383 * Contains the dependencies as struct dma_fence for this job, see
384 * drm_sched_job_add_dependency() and
385 * drm_sched_job_add_implicit_dependencies().
386 */
387 struct xarray dependencies;
388 };
389
390 /**
391 * enum drm_gpu_sched_stat - the scheduler's status
392 *
393 * @DRM_GPU_SCHED_STAT_NONE: Reserved. Do not use.
394 * @DRM_GPU_SCHED_STAT_RESET: The GPU hung and successfully reset.
395 * @DRM_GPU_SCHED_STAT_ENODEV: Error: Device is not available anymore.
396 * @DRM_GPU_SCHED_STAT_NO_HANG: Contrary to scheduler's assumption, the GPU
397 * did not hang and is still running.
398 */
399 enum drm_gpu_sched_stat {
400 DRM_GPU_SCHED_STAT_NONE,
401 DRM_GPU_SCHED_STAT_RESET,
402 DRM_GPU_SCHED_STAT_ENODEV,
403 DRM_GPU_SCHED_STAT_NO_HANG,
404 };
405
406 /**
407 * struct drm_sched_backend_ops - Define the backend operations
408 * called by the scheduler
409 *
410 * These functions should be implemented in the driver side.
411 */
412 struct drm_sched_backend_ops {
413 /**
414 * @prepare_job:
415 *
416 * Called when the scheduler is considering scheduling this job next, to
417 * get another struct dma_fence for this job to block on. Once it
418 * returns NULL, run_job() may be called.
419 *
420 * Can be NULL if no additional preparation to the dependencies are
421 * necessary. Skipped when jobs are killed instead of run.
422 */
423 struct dma_fence *(*prepare_job)(struct drm_sched_job *sched_job,
424 struct drm_sched_entity *s_entity);
425
426 /**
427 * @run_job: Called to execute the job once all of the dependencies
428 * have been resolved.
429 *
430 * @sched_job: the job to run
431 *
432 * The deprecated drm_sched_resubmit_jobs() (called by &struct
433 * drm_sched_backend_ops.timedout_job) can invoke this again with the
434 * same parameters. Using this is discouraged because it violates
435 * dma_fence rules, notably dma_fence_init() has to be called on
436 * already initialized fences for a second time. Moreover, this is
437 * dangerous because attempts to allocate memory might deadlock with
438 * memory management code waiting for the reset to complete.
439 *
440 * TODO: Document what drivers should do / use instead.
441 *
442 * This method is called in a workqueue context - either from the
443 * submit_wq the driver passed through drm_sched_init(), or, if the
444 * driver passed NULL, a separate, ordered workqueue the scheduler
445 * allocated.
446 *
447 * Note that the scheduler expects to 'inherit' its own reference to
448 * this fence from the callback. It does not invoke an extra
449 * dma_fence_get() on it. Consequently, this callback must take a
450 * reference for the scheduler, and additional ones for the driver's
451 * respective needs.
452 *
453 * Return:
454 * * On success: dma_fence the driver must signal once the hardware has
455 * completed the job ("hardware fence").
456 * * On failure: NULL or an ERR_PTR.
457 */
458 struct dma_fence *(*run_job)(struct drm_sched_job *sched_job);
459
460 /**
461 * @timedout_job: Called when a job has taken too long to execute,
462 * to trigger GPU recovery.
463 *
464 * @sched_job: The job that has timed out
465 *
466 * Drivers typically issue a reset to recover from GPU hangs.
467 * This procedure looks very different depending on whether a firmware
468 * or a hardware scheduler is being used.
469 *
470 * For a FIRMWARE SCHEDULER, each ring has one scheduler, and each
471 * scheduler has one entity. Hence, the steps taken typically look as
472 * follows:
473 *
474 * 1. Stop the scheduler using drm_sched_stop(). This will pause the
475 * scheduler workqueues and cancel the timeout work, guaranteeing
476 * that nothing is queued while the ring is being removed.
477 * 2. Remove the ring. The firmware will make sure that the
478 * corresponding parts of the hardware are resetted, and that other
479 * rings are not impacted.
480 * 3. Kill the entity and the associated scheduler.
481 *
482 *
483 * For a HARDWARE SCHEDULER, a scheduler instance schedules jobs from
484 * one or more entities to one ring. This implies that all entities
485 * associated with the affected scheduler cannot be torn down, because
486 * this would effectively also affect innocent userspace processes which
487 * did not submit faulty jobs (for example).
488 *
489 * Consequently, the procedure to recover with a hardware scheduler
490 * should look like this:
491 *
492 * 1. Stop all schedulers impacted by the reset using drm_sched_stop().
493 * 2. Kill the entity the faulty job stems from.
494 * 3. Issue a GPU reset on all faulty rings (driver-specific).
495 * 4. Re-submit jobs on all schedulers impacted by re-submitting them to
496 * the entities which are still alive.
497 * 5. Restart all schedulers that were stopped in step #1 using
498 * drm_sched_start().
499 *
500 * Note that some GPUs have distinct hardware queues but need to reset
501 * the GPU globally, which requires extra synchronization between the
502 * timeout handlers of different schedulers. One way to achieve this
503 * synchronization is to create an ordered workqueue (using
504 * alloc_ordered_workqueue()) at the driver level, and pass this queue
505 * as drm_sched_init()'s @timeout_wq parameter. This will guarantee
506 * that timeout handlers are executed sequentially.
507 *
508 * Return: The scheduler's status, defined by &enum drm_gpu_sched_stat
509 *
510 */
511 enum drm_gpu_sched_stat (*timedout_job)(struct drm_sched_job *sched_job);
512
513 /**
514 * @free_job: Called once the job's finished fence has been signaled
515 * and it's time to clean it up.
516 */
517 void (*free_job)(struct drm_sched_job *sched_job);
518
519 /**
520 * @cancel_job: Used by the scheduler to guarantee remaining jobs' fences
521 * get signaled in drm_sched_fini().
522 *
523 * Used by the scheduler to cancel all jobs that have not been executed
524 * with &struct drm_sched_backend_ops.run_job by the time
525 * drm_sched_fini() gets invoked.
526 *
527 * Drivers need to signal the passed job's hardware fence with an
528 * appropriate error code (e.g., -ECANCELED) in this callback. They
529 * must not free the job.
530 *
531 * The scheduler will only call this callback once it stopped calling
532 * all other callbacks forever, with the exception of &struct
533 * drm_sched_backend_ops.free_job.
534 */
535 void (*cancel_job)(struct drm_sched_job *sched_job);
536 };
537
538 /**
539 * struct drm_gpu_scheduler - scheduler instance-specific data
540 *
541 * @ops: backend operations provided by the driver.
542 * @credit_limit: the credit limit of this scheduler
543 * @credit_count: the current credit count of this scheduler
544 * @timeout: the time after which a job is removed from the scheduler.
545 * @name: name of the ring for which this scheduler is being used.
546 * @num_rqs: Number of run-queues. This is at most DRM_SCHED_PRIORITY_COUNT,
547 * as there's usually one run-queue per priority, but could be less.
548 * @sched_rq: An allocated array of run-queues of size @num_rqs;
549 * @job_scheduled: once @drm_sched_entity_do_release is called the scheduler
550 * waits on this wait queue until all the scheduled jobs are
551 * finished.
552 * @job_id_count: used to assign unique id to the each job.
553 * @submit_wq: workqueue used to queue @work_run_job and @work_free_job
554 * @timeout_wq: workqueue used to queue @work_tdr
555 * @work_run_job: work which calls run_job op of each scheduler.
556 * @work_free_job: work which calls free_job op of each scheduler.
557 * @work_tdr: schedules a delayed call to @drm_sched_job_timedout after the
558 * timeout interval is over.
559 * @pending_list: the list of jobs which are currently in the job queue.
560 * @job_list_lock: lock to protect the pending_list.
561 * @hang_limit: once the hangs by a job crosses this limit then it is marked
562 * guilty and it will no longer be considered for scheduling.
563 * @score: score to help loadbalancer pick a idle sched
564 * @_score: score used when the driver doesn't provide one
565 * @ready: marks if the underlying HW is ready to work
566 * @free_guilty: A hit to time out handler to free the guilty job.
567 * @pause_submit: pause queuing of @work_run_job on @submit_wq
568 * @own_submit_wq: scheduler owns allocation of @submit_wq
569 * @dev: system &struct device
570 *
571 * One scheduler is implemented for each hardware ring.
572 */
573 struct drm_gpu_scheduler {
574 const struct drm_sched_backend_ops *ops;
575 u32 credit_limit;
576 atomic_t credit_count;
577 long timeout;
578 const char *name;
579 u32 num_rqs;
580 struct drm_sched_rq **sched_rq;
581 wait_queue_head_t job_scheduled;
582 atomic64_t job_id_count;
583 struct workqueue_struct *submit_wq;
584 struct workqueue_struct *timeout_wq;
585 struct work_struct work_run_job;
586 struct work_struct work_free_job;
587 struct delayed_work work_tdr;
588 struct list_head pending_list;
589 spinlock_t job_list_lock;
590 int hang_limit;
591 atomic_t *score;
592 atomic_t _score;
593 bool ready;
594 bool free_guilty;
595 bool pause_submit;
596 bool own_submit_wq;
597 struct device *dev;
598 };
599
600 /**
601 * struct drm_sched_init_args - parameters for initializing a DRM GPU scheduler
602 *
603 * @ops: backend operations provided by the driver
604 * @submit_wq: workqueue to use for submission. If NULL, an ordered wq is
605 * allocated and used.
606 * @num_rqs: Number of run-queues. This may be at most DRM_SCHED_PRIORITY_COUNT,
607 * as there's usually one run-queue per priority, but may be less.
608 * @credit_limit: the number of credits this scheduler can hold from all jobs
609 * @hang_limit: number of times to allow a job to hang before dropping it.
610 * This mechanism is DEPRECATED. Set it to 0.
611 * @timeout: timeout value in jiffies for submitted jobs.
612 * @timeout_wq: workqueue to use for timeout work. If NULL, the system_wq is used.
613 * @score: score atomic shared with other schedulers. May be NULL.
614 * @name: name (typically the driver's name). Used for debugging
615 * @dev: associated device. Used for debugging
616 */
617 struct drm_sched_init_args {
618 const struct drm_sched_backend_ops *ops;
619 struct workqueue_struct *submit_wq;
620 struct workqueue_struct *timeout_wq;
621 u32 num_rqs;
622 u32 credit_limit;
623 unsigned int hang_limit;
624 long timeout;
625 atomic_t *score;
626 const char *name;
627 struct device *dev;
628 };
629
630 /* Scheduler operations */
631
632 int drm_sched_init(struct drm_gpu_scheduler *sched,
633 const struct drm_sched_init_args *args);
634
635 void drm_sched_fini(struct drm_gpu_scheduler *sched);
636
637 unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched);
638 void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
639 unsigned long remaining);
640 void drm_sched_tdr_queue_imm(struct drm_gpu_scheduler *sched);
641 bool drm_sched_wqueue_ready(struct drm_gpu_scheduler *sched);
642 void drm_sched_wqueue_stop(struct drm_gpu_scheduler *sched);
643 void drm_sched_wqueue_start(struct drm_gpu_scheduler *sched);
644 void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad);
645 void drm_sched_start(struct drm_gpu_scheduler *sched, int errno);
646 void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched);
647 void drm_sched_fault(struct drm_gpu_scheduler *sched);
648
649 struct drm_gpu_scheduler *
650 drm_sched_pick_best(struct drm_gpu_scheduler **sched_list,
651 unsigned int num_sched_list);
652
653 /* Jobs */
654
655 int drm_sched_job_init(struct drm_sched_job *job,
656 struct drm_sched_entity *entity,
657 u32 credits, void *owner,
658 u64 drm_client_id);
659 void drm_sched_job_arm(struct drm_sched_job *job);
660 void drm_sched_entity_push_job(struct drm_sched_job *sched_job);
661 int drm_sched_job_add_dependency(struct drm_sched_job *job,
662 struct dma_fence *fence);
663 int drm_sched_job_add_syncobj_dependency(struct drm_sched_job *job,
664 struct drm_file *file,
665 u32 handle,
666 u32 point);
667 int drm_sched_job_add_resv_dependencies(struct drm_sched_job *job,
668 struct dma_resv *resv,
669 enum dma_resv_usage usage);
670 int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job,
671 struct drm_gem_object *obj,
672 bool write);
673 bool drm_sched_job_has_dependency(struct drm_sched_job *job,
674 struct dma_fence *fence);
675 void drm_sched_job_cleanup(struct drm_sched_job *job);
676 void drm_sched_increase_karma(struct drm_sched_job *bad);
677
drm_sched_invalidate_job(struct drm_sched_job * s_job,int threshold)678 static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job,
679 int threshold)
680 {
681 return s_job && atomic_inc_return(&s_job->karma) > threshold;
682 }
683
684 /* Entities */
685
686 int drm_sched_entity_init(struct drm_sched_entity *entity,
687 enum drm_sched_priority priority,
688 struct drm_gpu_scheduler **sched_list,
689 unsigned int num_sched_list,
690 atomic_t *guilty);
691 long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout);
692 void drm_sched_entity_fini(struct drm_sched_entity *entity);
693 void drm_sched_entity_destroy(struct drm_sched_entity *entity);
694 void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
695 enum drm_sched_priority priority);
696 int drm_sched_entity_error(struct drm_sched_entity *entity);
697 void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
698 struct drm_gpu_scheduler **sched_list,
699 unsigned int num_sched_list);
700
701 #endif
702