1 /*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24 #include <linux/kthread.h>
25 #include <linux/slab.h>
26 #include <linux/completion.h>
27
28 #include <drm/drm_print.h>
29 #include <drm/gpu_scheduler.h>
30
31 #include "sched_internal.h"
32
33 #include "gpu_scheduler_trace.h"
34
35 /**
36 * drm_sched_entity_init - Init a context entity used by scheduler when
37 * submit to HW ring.
38 *
39 * @entity: scheduler entity to init
40 * @priority: priority of the entity
41 * @sched_list: the list of drm scheds on which jobs from this
42 * entity can be submitted
43 * @num_sched_list: number of drm sched in sched_list
44 * @guilty: atomic_t set to 1 when a job on this queue
45 * is found to be guilty causing a timeout
46 *
47 * Note that the &sched_list must have at least one element to schedule the entity.
48 *
49 * For changing @priority later on at runtime see
50 * drm_sched_entity_set_priority(). For changing the set of schedulers
51 * @sched_list at runtime see drm_sched_entity_modify_sched().
52 *
53 * An entity is cleaned up by calling drm_sched_entity_fini(). See also
54 * drm_sched_entity_destroy().
55 *
56 * Returns 0 on success or a negative error code on failure.
57 */
drm_sched_entity_init(struct drm_sched_entity * entity,enum drm_sched_priority priority,struct drm_gpu_scheduler ** sched_list,unsigned int num_sched_list,atomic_t * guilty)58 int drm_sched_entity_init(struct drm_sched_entity *entity,
59 enum drm_sched_priority priority,
60 struct drm_gpu_scheduler **sched_list,
61 unsigned int num_sched_list,
62 atomic_t *guilty)
63 {
64 if (!(entity && sched_list && (num_sched_list == 0 || sched_list[0])))
65 return -EINVAL;
66
67 memset(entity, 0, sizeof(struct drm_sched_entity));
68 INIT_LIST_HEAD(&entity->list);
69 entity->rq = NULL;
70 entity->guilty = guilty;
71 entity->num_sched_list = num_sched_list;
72 entity->priority = priority;
73 /*
74 * It's perfectly valid to initialize an entity without having a valid
75 * scheduler attached. It's just not valid to use the scheduler before it
76 * is initialized itself.
77 */
78 entity->sched_list = num_sched_list > 1 ? sched_list : NULL;
79 RCU_INIT_POINTER(entity->last_scheduled, NULL);
80 RB_CLEAR_NODE(&entity->rb_tree_node);
81
82 if (num_sched_list && !sched_list[0]->sched_rq) {
83 /* Since every entry covered by num_sched_list
84 * should be non-NULL and therefore we warn drivers
85 * not to do this and to fix their DRM calling order.
86 */
87 pr_warn("%s: called with uninitialized scheduler\n", __func__);
88 } else if (num_sched_list) {
89 /* The "priority" of an entity cannot exceed the number of run-queues of a
90 * scheduler. Protect against num_rqs being 0, by converting to signed. Choose
91 * the lowest priority available.
92 */
93 if (entity->priority >= sched_list[0]->num_rqs) {
94 dev_err(sched_list[0]->dev, "entity has out-of-bounds priority: %u. num_rqs: %u\n",
95 entity->priority, sched_list[0]->num_rqs);
96 entity->priority = max_t(s32, (s32) sched_list[0]->num_rqs - 1,
97 (s32) DRM_SCHED_PRIORITY_KERNEL);
98 }
99 entity->rq = sched_list[0]->sched_rq[entity->priority];
100 }
101
102 init_completion(&entity->entity_idle);
103
104 /* We start in an idle state. */
105 complete_all(&entity->entity_idle);
106
107 spin_lock_init(&entity->lock);
108 spsc_queue_init(&entity->job_queue);
109
110 atomic_set(&entity->fence_seq, 0);
111 entity->fence_context = dma_fence_context_alloc(2);
112
113 return 0;
114 }
115 EXPORT_SYMBOL(drm_sched_entity_init);
116
117 /**
118 * drm_sched_entity_modify_sched - Modify sched of an entity
119 * @entity: scheduler entity to init
120 * @sched_list: the list of new drm scheds which will replace
121 * existing entity->sched_list
122 * @num_sched_list: number of drm sched in sched_list
123 *
124 * Note that this must be called under the same common lock for @entity as
125 * drm_sched_job_arm() and drm_sched_entity_push_job(), or the driver needs to
126 * guarantee through some other means that this is never called while new jobs
127 * can be pushed to @entity.
128 */
drm_sched_entity_modify_sched(struct drm_sched_entity * entity,struct drm_gpu_scheduler ** sched_list,unsigned int num_sched_list)129 void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
130 struct drm_gpu_scheduler **sched_list,
131 unsigned int num_sched_list)
132 {
133 WARN_ON(!num_sched_list || !sched_list);
134
135 spin_lock(&entity->lock);
136 entity->sched_list = sched_list;
137 entity->num_sched_list = num_sched_list;
138 spin_unlock(&entity->lock);
139 }
140 EXPORT_SYMBOL(drm_sched_entity_modify_sched);
141
drm_sched_entity_is_idle(struct drm_sched_entity * entity)142 static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity)
143 {
144 rmb(); /* for list_empty to work without lock */
145
146 if (list_empty(&entity->list) ||
147 spsc_queue_count(&entity->job_queue) == 0 ||
148 entity->stopped)
149 return true;
150
151 return false;
152 }
153
154 /**
155 * drm_sched_entity_error - return error of last scheduled job
156 * @entity: scheduler entity to check
157 *
158 * Opportunistically return the error of the last scheduled job. Result can
159 * change any time when new jobs are pushed to the hw.
160 */
drm_sched_entity_error(struct drm_sched_entity * entity)161 int drm_sched_entity_error(struct drm_sched_entity *entity)
162 {
163 struct dma_fence *fence;
164 int r;
165
166 rcu_read_lock();
167 fence = rcu_dereference(entity->last_scheduled);
168 r = fence ? fence->error : 0;
169 rcu_read_unlock();
170
171 return r;
172 }
173 EXPORT_SYMBOL(drm_sched_entity_error);
174
drm_sched_entity_kill_jobs_work(struct work_struct * wrk)175 static void drm_sched_entity_kill_jobs_work(struct work_struct *wrk)
176 {
177 struct drm_sched_job *job = container_of(wrk, typeof(*job), work);
178
179 drm_sched_fence_finished(job->s_fence, -ESRCH);
180 WARN_ON(job->s_fence->parent);
181 job->sched->ops->free_job(job);
182 }
183
184 /* Signal the scheduler finished fence when the entity in question is killed. */
drm_sched_entity_kill_jobs_cb(struct dma_fence * f,struct dma_fence_cb * cb)185 static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
186 struct dma_fence_cb *cb)
187 {
188 struct drm_sched_job *job = container_of(cb, struct drm_sched_job,
189 finish_cb);
190 unsigned long index;
191
192 dma_fence_put(f);
193
194 /* Wait for all dependencies to avoid data corruptions */
195 xa_for_each(&job->dependencies, index, f) {
196 struct drm_sched_fence *s_fence = to_drm_sched_fence(f);
197
198 if (s_fence && f == &s_fence->scheduled) {
199 /* The dependencies array had a reference on the scheduled
200 * fence, and the finished fence refcount might have
201 * dropped to zero. Use dma_fence_get_rcu() so we get
202 * a NULL fence in that case.
203 */
204 f = dma_fence_get_rcu(&s_fence->finished);
205
206 /* Now that we have a reference on the finished fence,
207 * we can release the reference the dependencies array
208 * had on the scheduled fence.
209 */
210 dma_fence_put(&s_fence->scheduled);
211 }
212
213 xa_erase(&job->dependencies, index);
214 if (f && !dma_fence_add_callback(f, &job->finish_cb,
215 drm_sched_entity_kill_jobs_cb))
216 return;
217
218 dma_fence_put(f);
219 }
220
221 INIT_WORK(&job->work, drm_sched_entity_kill_jobs_work);
222 schedule_work(&job->work);
223 }
224
225 /* Remove the entity from the scheduler and kill all pending jobs */
drm_sched_entity_kill(struct drm_sched_entity * entity)226 static void drm_sched_entity_kill(struct drm_sched_entity *entity)
227 {
228 struct drm_sched_job *job;
229 struct dma_fence *prev;
230
231 if (!entity->rq)
232 return;
233
234 spin_lock(&entity->lock);
235 entity->stopped = true;
236 drm_sched_rq_remove_entity(entity->rq, entity);
237 spin_unlock(&entity->lock);
238
239 /* Make sure this entity is not used by the scheduler at the moment */
240 wait_for_completion(&entity->entity_idle);
241
242 /* The entity is guaranteed to not be used by the scheduler */
243 prev = rcu_dereference_check(entity->last_scheduled, true);
244 dma_fence_get(prev);
245 while ((job = drm_sched_entity_queue_pop(entity))) {
246 struct drm_sched_fence *s_fence = job->s_fence;
247
248 dma_fence_get(&s_fence->finished);
249 if (!prev ||
250 dma_fence_add_callback(prev, &job->finish_cb,
251 drm_sched_entity_kill_jobs_cb)) {
252 /*
253 * Adding callback above failed.
254 * dma_fence_put() checks for NULL.
255 */
256 dma_fence_put(prev);
257 drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb);
258 }
259
260 prev = &s_fence->finished;
261 }
262 dma_fence_put(prev);
263 }
264
265 /**
266 * drm_sched_entity_flush - Flush a context entity
267 *
268 * @entity: scheduler entity
269 * @timeout: time to wait in for Q to become empty in jiffies.
270 *
271 * Splitting drm_sched_entity_fini() into two functions, The first one does the
272 * waiting, removes the entity from the runqueue and returns an error when the
273 * process was killed.
274 *
275 * Returns the remaining time in jiffies left from the input timeout
276 */
drm_sched_entity_flush(struct drm_sched_entity * entity,long timeout)277 long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
278 {
279 struct drm_gpu_scheduler *sched;
280 struct task_struct *last_user;
281 long ret = timeout;
282
283 if (!entity->rq)
284 return 0;
285
286 sched = entity->rq->sched;
287 /**
288 * The client will not queue more IBs during this fini, consume existing
289 * queued IBs or discard them on SIGKILL
290 */
291 if (current->flags & PF_EXITING) {
292 if (timeout)
293 ret = wait_event_timeout(
294 sched->job_scheduled,
295 drm_sched_entity_is_idle(entity),
296 timeout);
297 } else {
298 wait_event_killable(sched->job_scheduled,
299 drm_sched_entity_is_idle(entity));
300 }
301
302 /* For killed process disable any more IBs enqueue right now */
303 last_user = cmpxchg(&entity->last_user, current->group_leader, NULL);
304 if ((!last_user || last_user == current->group_leader) &&
305 (current->flags & PF_EXITING) && (current->exit_code == SIGKILL))
306 drm_sched_entity_kill(entity);
307
308 return ret;
309 }
310 EXPORT_SYMBOL(drm_sched_entity_flush);
311
312 /**
313 * drm_sched_entity_fini - Destroy a context entity
314 *
315 * @entity: scheduler entity
316 *
317 * Cleanups up @entity which has been initialized by drm_sched_entity_init().
318 *
319 * If there are potentially job still in flight or getting newly queued
320 * drm_sched_entity_flush() must be called first. This function then goes over
321 * the entity and signals all jobs with an error code if the process was killed.
322 */
drm_sched_entity_fini(struct drm_sched_entity * entity)323 void drm_sched_entity_fini(struct drm_sched_entity *entity)
324 {
325 /*
326 * If consumption of existing IBs wasn't completed. Forcefully remove
327 * them here. Also makes sure that the scheduler won't touch this entity
328 * any more.
329 */
330 drm_sched_entity_kill(entity);
331
332 if (entity->dependency) {
333 dma_fence_remove_callback(entity->dependency, &entity->cb);
334 dma_fence_put(entity->dependency);
335 entity->dependency = NULL;
336 }
337
338 dma_fence_put(rcu_dereference_check(entity->last_scheduled, true));
339 RCU_INIT_POINTER(entity->last_scheduled, NULL);
340 }
341 EXPORT_SYMBOL(drm_sched_entity_fini);
342
343 /**
344 * drm_sched_entity_destroy - Destroy a context entity
345 * @entity: scheduler entity
346 *
347 * Calls drm_sched_entity_flush() and drm_sched_entity_fini() as a
348 * convenience wrapper.
349 */
drm_sched_entity_destroy(struct drm_sched_entity * entity)350 void drm_sched_entity_destroy(struct drm_sched_entity *entity)
351 {
352 drm_sched_entity_flush(entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY);
353 drm_sched_entity_fini(entity);
354 }
355 EXPORT_SYMBOL(drm_sched_entity_destroy);
356
357 /* drm_sched_entity_clear_dep - callback to clear the entities dependency */
drm_sched_entity_clear_dep(struct dma_fence * f,struct dma_fence_cb * cb)358 static void drm_sched_entity_clear_dep(struct dma_fence *f,
359 struct dma_fence_cb *cb)
360 {
361 struct drm_sched_entity *entity =
362 container_of(cb, struct drm_sched_entity, cb);
363
364 entity->dependency = NULL;
365 dma_fence_put(f);
366 }
367
368 /*
369 * drm_sched_entity_wakeup - callback to clear the entity's dependency and
370 * wake up the scheduler
371 */
drm_sched_entity_wakeup(struct dma_fence * f,struct dma_fence_cb * cb)372 static void drm_sched_entity_wakeup(struct dma_fence *f,
373 struct dma_fence_cb *cb)
374 {
375 struct drm_sched_entity *entity =
376 container_of(cb, struct drm_sched_entity, cb);
377
378 drm_sched_entity_clear_dep(f, cb);
379 drm_sched_wakeup(entity->rq->sched);
380 }
381
382 /**
383 * drm_sched_entity_set_priority - Sets priority of the entity
384 *
385 * @entity: scheduler entity
386 * @priority: scheduler priority
387 *
388 * Update the priority of runqueues used for the entity.
389 */
drm_sched_entity_set_priority(struct drm_sched_entity * entity,enum drm_sched_priority priority)390 void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
391 enum drm_sched_priority priority)
392 {
393 spin_lock(&entity->lock);
394 entity->priority = priority;
395 spin_unlock(&entity->lock);
396 }
397 EXPORT_SYMBOL(drm_sched_entity_set_priority);
398
399 /*
400 * Add a callback to the current dependency of the entity to wake up the
401 * scheduler when the entity becomes available.
402 */
drm_sched_entity_add_dependency_cb(struct drm_sched_entity * entity)403 static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity)
404 {
405 struct drm_gpu_scheduler *sched = entity->rq->sched;
406 struct dma_fence *fence = entity->dependency;
407 struct drm_sched_fence *s_fence;
408
409 if (fence->context == entity->fence_context ||
410 fence->context == entity->fence_context + 1) {
411 /*
412 * Fence is a scheduled/finished fence from a job
413 * which belongs to the same entity, we can ignore
414 * fences from ourself
415 */
416 dma_fence_put(entity->dependency);
417 return false;
418 }
419
420 s_fence = to_drm_sched_fence(fence);
421 if (!fence->error && s_fence && s_fence->sched == sched &&
422 !test_bit(DRM_SCHED_FENCE_DONT_PIPELINE, &fence->flags)) {
423
424 /*
425 * Fence is from the same scheduler, only need to wait for
426 * it to be scheduled
427 */
428 fence = dma_fence_get(&s_fence->scheduled);
429 dma_fence_put(entity->dependency);
430 entity->dependency = fence;
431 if (!dma_fence_add_callback(fence, &entity->cb,
432 drm_sched_entity_clear_dep))
433 return true;
434
435 /* Ignore it when it is already scheduled */
436 dma_fence_put(fence);
437 return false;
438 }
439
440 if (!dma_fence_add_callback(entity->dependency, &entity->cb,
441 drm_sched_entity_wakeup))
442 return true;
443
444 dma_fence_put(entity->dependency);
445 return false;
446 }
447
448 static struct dma_fence *
drm_sched_job_dependency(struct drm_sched_job * job,struct drm_sched_entity * entity)449 drm_sched_job_dependency(struct drm_sched_job *job,
450 struct drm_sched_entity *entity)
451 {
452 struct dma_fence *f;
453
454 /* We keep the fence around, so we can iterate over all dependencies
455 * in drm_sched_entity_kill_jobs_cb() to ensure all deps are signaled
456 * before killing the job.
457 */
458 f = xa_load(&job->dependencies, job->last_dependency);
459 if (f) {
460 job->last_dependency++;
461 return dma_fence_get(f);
462 }
463
464 if (job->sched->ops->prepare_job)
465 return job->sched->ops->prepare_job(job, entity);
466
467 return NULL;
468 }
469
drm_sched_entity_pop_job(struct drm_sched_entity * entity)470 struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity)
471 {
472 struct drm_sched_job *sched_job;
473
474 sched_job = drm_sched_entity_queue_peek(entity);
475 if (!sched_job)
476 return NULL;
477
478 while ((entity->dependency =
479 drm_sched_job_dependency(sched_job, entity))) {
480 trace_drm_sched_job_wait_dep(sched_job, entity->dependency);
481
482 if (drm_sched_entity_add_dependency_cb(entity))
483 return NULL;
484 }
485
486 /* skip jobs from entity that marked guilty */
487 if (entity->guilty && atomic_read(entity->guilty))
488 dma_fence_set_error(&sched_job->s_fence->finished, -ECANCELED);
489
490 dma_fence_put(rcu_dereference_check(entity->last_scheduled, true));
491 rcu_assign_pointer(entity->last_scheduled,
492 dma_fence_get(&sched_job->s_fence->finished));
493
494 /*
495 * If the queue is empty we allow drm_sched_entity_select_rq() to
496 * locklessly access ->last_scheduled. This only works if we set the
497 * pointer before we dequeue and if we a write barrier here.
498 */
499 smp_wmb();
500
501 spsc_queue_pop(&entity->job_queue);
502
503 /*
504 * Update the entity's location in the min heap according to
505 * the timestamp of the next job, if any.
506 */
507 if (drm_sched_policy == DRM_SCHED_POLICY_FIFO) {
508 struct drm_sched_job *next;
509
510 next = drm_sched_entity_queue_peek(entity);
511 if (next) {
512 struct drm_sched_rq *rq;
513
514 spin_lock(&entity->lock);
515 rq = entity->rq;
516 spin_lock(&rq->lock);
517 drm_sched_rq_update_fifo_locked(entity, rq,
518 next->submit_ts);
519 spin_unlock(&rq->lock);
520 spin_unlock(&entity->lock);
521 }
522 }
523
524 /* Jobs and entities might have different lifecycles. Since we're
525 * removing the job from the entities queue, set the jobs entity pointer
526 * to NULL to prevent any future access of the entity through this job.
527 */
528 sched_job->entity = NULL;
529
530 return sched_job;
531 }
532
drm_sched_entity_select_rq(struct drm_sched_entity * entity)533 void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
534 {
535 struct dma_fence *fence;
536 struct drm_gpu_scheduler *sched;
537 struct drm_sched_rq *rq;
538
539 /* single possible engine and already selected */
540 if (!entity->sched_list)
541 return;
542
543 /* queue non-empty, stay on the same engine */
544 if (spsc_queue_count(&entity->job_queue))
545 return;
546
547 /*
548 * Only when the queue is empty are we guaranteed that the scheduler
549 * thread cannot change ->last_scheduled. To enforce ordering we need
550 * a read barrier here. See drm_sched_entity_pop_job() for the other
551 * side.
552 */
553 smp_rmb();
554
555 fence = rcu_dereference_check(entity->last_scheduled, true);
556
557 /* stay on the same engine if the previous job hasn't finished */
558 if (fence && !dma_fence_is_signaled(fence))
559 return;
560
561 spin_lock(&entity->lock);
562 sched = drm_sched_pick_best(entity->sched_list, entity->num_sched_list);
563 rq = sched ? sched->sched_rq[entity->priority] : NULL;
564 if (rq != entity->rq) {
565 drm_sched_rq_remove_entity(entity->rq, entity);
566 entity->rq = rq;
567 }
568 spin_unlock(&entity->lock);
569
570 if (entity->num_sched_list == 1)
571 entity->sched_list = NULL;
572 }
573
574 /**
575 * drm_sched_entity_push_job - Submit a job to the entity's job queue
576 * @sched_job: job to submit
577 *
578 * Note: To guarantee that the order of insertion to queue matches the job's
579 * fence sequence number this function should be called with drm_sched_job_arm()
580 * under common lock for the struct drm_sched_entity that was set up for
581 * @sched_job in drm_sched_job_init().
582 */
drm_sched_entity_push_job(struct drm_sched_job * sched_job)583 void drm_sched_entity_push_job(struct drm_sched_job *sched_job)
584 {
585 struct drm_sched_entity *entity = sched_job->entity;
586 bool first;
587 ktime_t submit_ts;
588
589 trace_drm_sched_job(sched_job, entity);
590 atomic_inc(entity->rq->sched->score);
591 WRITE_ONCE(entity->last_user, current->group_leader);
592
593 /*
594 * After the sched_job is pushed into the entity queue, it may be
595 * completed and freed up at any time. We can no longer access it.
596 * Make sure to set the submit_ts first, to avoid a race.
597 */
598 sched_job->submit_ts = submit_ts = ktime_get();
599 first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
600
601 /* first job wakes up scheduler */
602 if (first) {
603 struct drm_gpu_scheduler *sched;
604 struct drm_sched_rq *rq;
605
606 /* Add the entity to the run queue */
607 spin_lock(&entity->lock);
608 if (entity->stopped) {
609 spin_unlock(&entity->lock);
610
611 DRM_ERROR("Trying to push to a killed entity\n");
612 return;
613 }
614
615 rq = entity->rq;
616 sched = rq->sched;
617
618 spin_lock(&rq->lock);
619 drm_sched_rq_add_entity(rq, entity);
620
621 if (drm_sched_policy == DRM_SCHED_POLICY_FIFO)
622 drm_sched_rq_update_fifo_locked(entity, rq, submit_ts);
623
624 spin_unlock(&rq->lock);
625 spin_unlock(&entity->lock);
626
627 drm_sched_wakeup(sched);
628 }
629 }
630 EXPORT_SYMBOL(drm_sched_entity_push_job);
631