1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
3 /* Copyright 2019 Collabora ltd. */
4 #include <linux/delay.h>
5 #include <linux/interrupt.h>
6 #include <linux/io.h>
7 #include <linux/platform_device.h>
8 #include <linux/pm_runtime.h>
9 #include <linux/dma-resv.h>
10 #include <drm/gpu_scheduler.h>
11 #include <drm/panfrost_drm.h>
12
13 #include "panfrost_device.h"
14 #include "panfrost_devfreq.h"
15 #include "panfrost_job.h"
16 #include "panfrost_features.h"
17 #include "panfrost_issues.h"
18 #include "panfrost_gem.h"
19 #include "panfrost_regs.h"
20 #include "panfrost_gpu.h"
21 #include "panfrost_mmu.h"
22
23 #define job_write(dev, reg, data) writel(data, dev->iomem + (reg))
24 #define job_read(dev, reg) readl(dev->iomem + (reg))
25
26 struct panfrost_queue_state {
27 struct drm_gpu_scheduler sched;
28
29 u64 fence_context;
30 u64 emit_seqno;
31 };
32
33 struct panfrost_job_slot {
34 struct panfrost_queue_state queue[NUM_JOB_SLOTS];
35 spinlock_t job_lock;
36 };
37
38 static struct panfrost_job *
to_panfrost_job(struct drm_sched_job * sched_job)39 to_panfrost_job(struct drm_sched_job *sched_job)
40 {
41 return container_of(sched_job, struct panfrost_job, base);
42 }
43
44 struct panfrost_fence {
45 struct dma_fence base;
46 struct drm_device *dev;
47 /* panfrost seqno for signaled() test */
48 u64 seqno;
49 int queue;
50 };
51
52 static inline struct panfrost_fence *
to_panfrost_fence(struct dma_fence * fence)53 to_panfrost_fence(struct dma_fence *fence)
54 {
55 return (struct panfrost_fence *)fence;
56 }
57
panfrost_fence_get_driver_name(struct dma_fence * fence)58 static const char *panfrost_fence_get_driver_name(struct dma_fence *fence)
59 {
60 return "panfrost";
61 }
62
panfrost_fence_get_timeline_name(struct dma_fence * fence)63 static const char *panfrost_fence_get_timeline_name(struct dma_fence *fence)
64 {
65 struct panfrost_fence *f = to_panfrost_fence(fence);
66
67 switch (f->queue) {
68 case 0:
69 return "panfrost-js-0";
70 case 1:
71 return "panfrost-js-1";
72 case 2:
73 return "panfrost-js-2";
74 default:
75 return NULL;
76 }
77 }
78
79 static const struct dma_fence_ops panfrost_fence_ops = {
80 .get_driver_name = panfrost_fence_get_driver_name,
81 .get_timeline_name = panfrost_fence_get_timeline_name,
82 };
83
panfrost_fence_create(struct panfrost_device * pfdev,int js_num)84 static struct dma_fence *panfrost_fence_create(struct panfrost_device *pfdev, int js_num)
85 {
86 struct panfrost_fence *fence;
87 struct panfrost_job_slot *js = pfdev->js;
88
89 fence = kzalloc(sizeof(*fence), GFP_KERNEL);
90 if (!fence)
91 return ERR_PTR(-ENOMEM);
92
93 fence->dev = pfdev->ddev;
94 fence->queue = js_num;
95 fence->seqno = ++js->queue[js_num].emit_seqno;
96 dma_fence_init(&fence->base, &panfrost_fence_ops, &js->job_lock,
97 js->queue[js_num].fence_context, fence->seqno);
98
99 return &fence->base;
100 }
101
panfrost_job_get_slot(struct panfrost_job * job)102 static int panfrost_job_get_slot(struct panfrost_job *job)
103 {
104 /* JS0: fragment jobs.
105 * JS1: vertex/tiler jobs
106 * JS2: compute jobs
107 */
108 if (job->requirements & PANFROST_JD_REQ_FS)
109 return 0;
110
111 /* Not exposed to userspace yet */
112 #if 0
113 if (job->requirements & PANFROST_JD_REQ_ONLY_COMPUTE) {
114 if ((job->requirements & PANFROST_JD_REQ_CORE_GRP_MASK) &&
115 (job->pfdev->features.nr_core_groups == 2))
116 return 2;
117 if (panfrost_has_hw_issue(job->pfdev, HW_ISSUE_8987))
118 return 2;
119 }
120 #endif
121 return 1;
122 }
123
panfrost_job_write_affinity(struct panfrost_device * pfdev,u32 requirements,int js)124 static void panfrost_job_write_affinity(struct panfrost_device *pfdev,
125 u32 requirements,
126 int js)
127 {
128 u64 affinity;
129
130 /*
131 * Use all cores for now.
132 * Eventually we may need to support tiler only jobs and h/w with
133 * multiple (2) coherent core groups
134 */
135 affinity = pfdev->features.shader_present;
136
137 job_write(pfdev, JS_AFFINITY_NEXT_LO(js), affinity & 0xFFFFFFFF);
138 job_write(pfdev, JS_AFFINITY_NEXT_HI(js), affinity >> 32);
139 }
140
panfrost_job_hw_submit(struct panfrost_job * job,int js)141 static void panfrost_job_hw_submit(struct panfrost_job *job, int js)
142 {
143 struct panfrost_device *pfdev = job->pfdev;
144 u32 cfg;
145 u64 jc_head = job->jc;
146 int ret;
147
148 panfrost_devfreq_record_busy(&pfdev->pfdevfreq);
149
150 ret = pm_runtime_get_sync(pfdev->dev);
151 if (ret < 0)
152 return;
153
154 if (WARN_ON(job_read(pfdev, JS_COMMAND_NEXT(js)))) {
155 return;
156 }
157
158 cfg = panfrost_mmu_as_get(pfdev, &job->file_priv->mmu);
159
160 job_write(pfdev, JS_HEAD_NEXT_LO(js), jc_head & 0xFFFFFFFF);
161 job_write(pfdev, JS_HEAD_NEXT_HI(js), jc_head >> 32);
162
163 panfrost_job_write_affinity(pfdev, job->requirements, js);
164
165 /* start MMU, medium priority, cache clean/flush on end, clean/flush on
166 * start */
167 cfg |= JS_CONFIG_THREAD_PRI(8) |
168 JS_CONFIG_START_FLUSH_CLEAN_INVALIDATE |
169 JS_CONFIG_END_FLUSH_CLEAN_INVALIDATE;
170
171 if (panfrost_has_hw_feature(pfdev, HW_FEATURE_FLUSH_REDUCTION))
172 cfg |= JS_CONFIG_ENABLE_FLUSH_REDUCTION;
173
174 if (panfrost_has_hw_issue(pfdev, HW_ISSUE_10649))
175 cfg |= JS_CONFIG_START_MMU;
176
177 job_write(pfdev, JS_CONFIG_NEXT(js), cfg);
178
179 if (panfrost_has_hw_feature(pfdev, HW_FEATURE_FLUSH_REDUCTION))
180 job_write(pfdev, JS_FLUSH_ID_NEXT(js), job->flush_id);
181
182 /* GO ! */
183 dev_dbg(pfdev->dev, "JS: Submitting atom %p to js[%d] with head=0x%llx",
184 job, js, jc_head);
185
186 job_write(pfdev, JS_COMMAND_NEXT(js), JS_COMMAND_START);
187 }
188
panfrost_acquire_object_fences(struct drm_gem_object ** bos,int bo_count,struct dma_fence ** implicit_fences)189 static void panfrost_acquire_object_fences(struct drm_gem_object **bos,
190 int bo_count,
191 struct dma_fence **implicit_fences)
192 {
193 int i;
194
195 for (i = 0; i < bo_count; i++)
196 implicit_fences[i] = dma_resv_get_excl_rcu(bos[i]->resv);
197 }
198
panfrost_attach_object_fences(struct drm_gem_object ** bos,int bo_count,struct dma_fence * fence)199 static void panfrost_attach_object_fences(struct drm_gem_object **bos,
200 int bo_count,
201 struct dma_fence *fence)
202 {
203 int i;
204
205 for (i = 0; i < bo_count; i++)
206 dma_resv_add_excl_fence(bos[i]->resv, fence);
207 }
208
panfrost_job_push(struct panfrost_job * job)209 int panfrost_job_push(struct panfrost_job *job)
210 {
211 struct panfrost_device *pfdev = job->pfdev;
212 int slot = panfrost_job_get_slot(job);
213 struct drm_sched_entity *entity = &job->file_priv->sched_entity[slot];
214 struct ww_acquire_ctx acquire_ctx;
215 int ret = 0;
216
217 mutex_lock(&pfdev->sched_lock);
218
219 ret = drm_gem_lock_reservations(job->bos, job->bo_count,
220 &acquire_ctx);
221 if (ret) {
222 mutex_unlock(&pfdev->sched_lock);
223 return ret;
224 }
225
226 ret = drm_sched_job_init(&job->base, entity, NULL);
227 if (ret) {
228 mutex_unlock(&pfdev->sched_lock);
229 goto unlock;
230 }
231
232 job->render_done_fence = dma_fence_get(&job->base.s_fence->finished);
233
234 kref_get(&job->refcount); /* put by scheduler job completion */
235
236 panfrost_acquire_object_fences(job->bos, job->bo_count,
237 job->implicit_fences);
238
239 drm_sched_entity_push_job(&job->base, entity);
240
241 mutex_unlock(&pfdev->sched_lock);
242
243 panfrost_attach_object_fences(job->bos, job->bo_count,
244 job->render_done_fence);
245
246 unlock:
247 drm_gem_unlock_reservations(job->bos, job->bo_count, &acquire_ctx);
248
249 return ret;
250 }
251
panfrost_job_cleanup(struct kref * ref)252 static void panfrost_job_cleanup(struct kref *ref)
253 {
254 struct panfrost_job *job = container_of(ref, struct panfrost_job,
255 refcount);
256 unsigned int i;
257
258 if (job->in_fences) {
259 for (i = 0; i < job->in_fence_count; i++)
260 dma_fence_put(job->in_fences[i]);
261 kvfree(job->in_fences);
262 }
263 if (job->implicit_fences) {
264 for (i = 0; i < job->bo_count; i++)
265 dma_fence_put(job->implicit_fences[i]);
266 kvfree(job->implicit_fences);
267 }
268 dma_fence_put(job->done_fence);
269 dma_fence_put(job->render_done_fence);
270
271 if (job->mappings) {
272 for (i = 0; i < job->bo_count; i++) {
273 if (!job->mappings[i])
274 break;
275
276 atomic_dec(&job->mappings[i]->obj->gpu_usecount);
277 panfrost_gem_mapping_put(job->mappings[i]);
278 }
279 kvfree(job->mappings);
280 }
281
282 if (job->bos) {
283 for (i = 0; i < job->bo_count; i++)
284 drm_gem_object_put(job->bos[i]);
285
286 kvfree(job->bos);
287 }
288
289 kfree(job);
290 }
291
panfrost_job_put(struct panfrost_job * job)292 void panfrost_job_put(struct panfrost_job *job)
293 {
294 kref_put(&job->refcount, panfrost_job_cleanup);
295 }
296
panfrost_job_free(struct drm_sched_job * sched_job)297 static void panfrost_job_free(struct drm_sched_job *sched_job)
298 {
299 struct panfrost_job *job = to_panfrost_job(sched_job);
300
301 drm_sched_job_cleanup(sched_job);
302
303 panfrost_job_put(job);
304 }
305
panfrost_job_dependency(struct drm_sched_job * sched_job,struct drm_sched_entity * s_entity)306 static struct dma_fence *panfrost_job_dependency(struct drm_sched_job *sched_job,
307 struct drm_sched_entity *s_entity)
308 {
309 struct panfrost_job *job = to_panfrost_job(sched_job);
310 struct dma_fence *fence;
311 unsigned int i;
312
313 /* Explicit fences */
314 for (i = 0; i < job->in_fence_count; i++) {
315 if (job->in_fences[i]) {
316 fence = job->in_fences[i];
317 job->in_fences[i] = NULL;
318 return fence;
319 }
320 }
321
322 /* Implicit fences, max. one per BO */
323 for (i = 0; i < job->bo_count; i++) {
324 if (job->implicit_fences[i]) {
325 fence = job->implicit_fences[i];
326 job->implicit_fences[i] = NULL;
327 return fence;
328 }
329 }
330
331 return NULL;
332 }
333
panfrost_job_run(struct drm_sched_job * sched_job)334 static struct dma_fence *panfrost_job_run(struct drm_sched_job *sched_job)
335 {
336 struct panfrost_job *job = to_panfrost_job(sched_job);
337 struct panfrost_device *pfdev = job->pfdev;
338 int slot = panfrost_job_get_slot(job);
339 struct dma_fence *fence = NULL;
340
341 if (unlikely(job->base.s_fence->finished.error))
342 return NULL;
343
344 pfdev->jobs[slot] = job;
345
346 fence = panfrost_fence_create(pfdev, slot);
347 if (IS_ERR(fence))
348 return NULL;
349
350 if (job->done_fence)
351 dma_fence_put(job->done_fence);
352 job->done_fence = dma_fence_get(fence);
353
354 panfrost_job_hw_submit(job, slot);
355
356 return fence;
357 }
358
panfrost_job_enable_interrupts(struct panfrost_device * pfdev)359 void panfrost_job_enable_interrupts(struct panfrost_device *pfdev)
360 {
361 int j;
362 u32 irq_mask = 0;
363
364 for (j = 0; j < NUM_JOB_SLOTS; j++) {
365 irq_mask |= MK_JS_MASK(j);
366 }
367
368 job_write(pfdev, JOB_INT_CLEAR, irq_mask);
369 job_write(pfdev, JOB_INT_MASK, irq_mask);
370 }
371
panfrost_job_timedout(struct drm_sched_job * sched_job)372 static void panfrost_job_timedout(struct drm_sched_job *sched_job)
373 {
374 struct panfrost_job *job = to_panfrost_job(sched_job);
375 struct panfrost_device *pfdev = job->pfdev;
376 int js = panfrost_job_get_slot(job);
377 unsigned long flags;
378 int i;
379
380 /*
381 * If the GPU managed to complete this jobs fence, the timeout is
382 * spurious. Bail out.
383 */
384 if (dma_fence_is_signaled(job->done_fence))
385 return;
386
387 dev_err(pfdev->dev, "gpu sched timeout, js=%d, config=0x%x, status=0x%x, head=0x%x, tail=0x%x, sched_job=%p",
388 js,
389 job_read(pfdev, JS_CONFIG(js)),
390 job_read(pfdev, JS_STATUS(js)),
391 job_read(pfdev, JS_HEAD_LO(js)),
392 job_read(pfdev, JS_TAIL_LO(js)),
393 sched_job);
394
395 if (!mutex_trylock(&pfdev->reset_lock))
396 return;
397
398 for (i = 0; i < NUM_JOB_SLOTS; i++) {
399 struct drm_gpu_scheduler *sched = &pfdev->js->queue[i].sched;
400
401 drm_sched_stop(sched, sched_job);
402 if (js != i)
403 /* Ensure any timeouts on other slots have finished */
404 cancel_delayed_work_sync(&sched->work_tdr);
405 }
406
407 drm_sched_increase_karma(sched_job);
408
409 spin_lock_irqsave(&pfdev->js->job_lock, flags);
410 for (i = 0; i < NUM_JOB_SLOTS; i++) {
411 if (pfdev->jobs[i]) {
412 pm_runtime_put_noidle(pfdev->dev);
413 panfrost_devfreq_record_idle(&pfdev->pfdevfreq);
414 pfdev->jobs[i] = NULL;
415 }
416 }
417 spin_unlock_irqrestore(&pfdev->js->job_lock, flags);
418
419 panfrost_device_reset(pfdev);
420
421 for (i = 0; i < NUM_JOB_SLOTS; i++)
422 drm_sched_resubmit_jobs(&pfdev->js->queue[i].sched);
423
424 /* restart scheduler after GPU is usable again */
425 for (i = 0; i < NUM_JOB_SLOTS; i++)
426 drm_sched_start(&pfdev->js->queue[i].sched, true);
427
428 mutex_unlock(&pfdev->reset_lock);
429 }
430
431 static const struct drm_sched_backend_ops panfrost_sched_ops = {
432 .dependency = panfrost_job_dependency,
433 .run_job = panfrost_job_run,
434 .timedout_job = panfrost_job_timedout,
435 .free_job = panfrost_job_free
436 };
437
panfrost_job_irq_handler(int irq,void * data)438 static irqreturn_t panfrost_job_irq_handler(int irq, void *data)
439 {
440 struct panfrost_device *pfdev = data;
441 u32 status = job_read(pfdev, JOB_INT_STAT);
442 int j;
443
444 dev_dbg(pfdev->dev, "jobslot irq status=%x\n", status);
445
446 if (!status)
447 return IRQ_NONE;
448
449 pm_runtime_mark_last_busy(pfdev->dev);
450
451 for (j = 0; status; j++) {
452 u32 mask = MK_JS_MASK(j);
453
454 if (!(status & mask))
455 continue;
456
457 job_write(pfdev, JOB_INT_CLEAR, mask);
458
459 if (status & JOB_INT_MASK_ERR(j)) {
460 job_write(pfdev, JS_COMMAND_NEXT(j), JS_COMMAND_NOP);
461
462 dev_err(pfdev->dev, "js fault, js=%d, status=%s, head=0x%x, tail=0x%x",
463 j,
464 panfrost_exception_name(pfdev, job_read(pfdev, JS_STATUS(j))),
465 job_read(pfdev, JS_HEAD_LO(j)),
466 job_read(pfdev, JS_TAIL_LO(j)));
467
468 drm_sched_fault(&pfdev->js->queue[j].sched);
469 }
470
471 if (status & JOB_INT_MASK_DONE(j)) {
472 struct panfrost_job *job;
473
474 spin_lock(&pfdev->js->job_lock);
475 job = pfdev->jobs[j];
476 /* Only NULL if job timeout occurred */
477 if (job) {
478 pfdev->jobs[j] = NULL;
479
480 panfrost_mmu_as_put(pfdev, &job->file_priv->mmu);
481 panfrost_devfreq_record_idle(&pfdev->pfdevfreq);
482
483 dma_fence_signal_locked(job->done_fence);
484 pm_runtime_put_autosuspend(pfdev->dev);
485 }
486 spin_unlock(&pfdev->js->job_lock);
487 }
488
489 status &= ~mask;
490 }
491
492 return IRQ_HANDLED;
493 }
494
panfrost_job_init(struct panfrost_device * pfdev)495 int panfrost_job_init(struct panfrost_device *pfdev)
496 {
497 struct panfrost_job_slot *js;
498 int ret, j, irq;
499
500 pfdev->js = js = devm_kzalloc(pfdev->dev, sizeof(*js), GFP_KERNEL);
501 if (!js)
502 return -ENOMEM;
503
504 spin_lock_init(&js->job_lock);
505
506 irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "job");
507 if (irq <= 0)
508 return -ENODEV;
509
510 ret = devm_request_irq(pfdev->dev, irq, panfrost_job_irq_handler,
511 IRQF_SHARED, KBUILD_MODNAME "-job", pfdev);
512 if (ret) {
513 dev_err(pfdev->dev, "failed to request job irq");
514 return ret;
515 }
516
517 for (j = 0; j < NUM_JOB_SLOTS; j++) {
518 js->queue[j].fence_context = dma_fence_context_alloc(1);
519
520 ret = drm_sched_init(&js->queue[j].sched,
521 &panfrost_sched_ops,
522 1, 0, msecs_to_jiffies(500),
523 "pan_js");
524 if (ret) {
525 dev_err(pfdev->dev, "Failed to create scheduler: %d.", ret);
526 goto err_sched;
527 }
528 }
529
530 panfrost_job_enable_interrupts(pfdev);
531
532 return 0;
533
534 err_sched:
535 for (j--; j >= 0; j--)
536 drm_sched_fini(&js->queue[j].sched);
537
538 return ret;
539 }
540
panfrost_job_fini(struct panfrost_device * pfdev)541 void panfrost_job_fini(struct panfrost_device *pfdev)
542 {
543 struct panfrost_job_slot *js = pfdev->js;
544 int j;
545
546 job_write(pfdev, JOB_INT_MASK, 0);
547
548 for (j = 0; j < NUM_JOB_SLOTS; j++)
549 drm_sched_fini(&js->queue[j].sched);
550
551 }
552
panfrost_job_open(struct panfrost_file_priv * panfrost_priv)553 int panfrost_job_open(struct panfrost_file_priv *panfrost_priv)
554 {
555 struct panfrost_device *pfdev = panfrost_priv->pfdev;
556 struct panfrost_job_slot *js = pfdev->js;
557 struct drm_gpu_scheduler *sched;
558 int ret, i;
559
560 for (i = 0; i < NUM_JOB_SLOTS; i++) {
561 sched = &js->queue[i].sched;
562 ret = drm_sched_entity_init(&panfrost_priv->sched_entity[i],
563 DRM_SCHED_PRIORITY_NORMAL, &sched,
564 1, NULL);
565 if (WARN_ON(ret))
566 return ret;
567 }
568 return 0;
569 }
570
panfrost_job_close(struct panfrost_file_priv * panfrost_priv)571 void panfrost_job_close(struct panfrost_file_priv *panfrost_priv)
572 {
573 int i;
574
575 for (i = 0; i < NUM_JOB_SLOTS; i++)
576 drm_sched_entity_destroy(&panfrost_priv->sched_entity[i]);
577 }
578
panfrost_job_is_idle(struct panfrost_device * pfdev)579 int panfrost_job_is_idle(struct panfrost_device *pfdev)
580 {
581 struct panfrost_job_slot *js = pfdev->js;
582 int i;
583
584 for (i = 0; i < NUM_JOB_SLOTS; i++) {
585 /* If there are any jobs in the HW queue, we're not idle */
586 if (atomic_read(&js->queue[i].sched.hw_rq_count))
587 return false;
588 }
589
590 return true;
591 }
592