1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
3
4 #include <linux/hardirq.h>
5 #include <linux/iosys-map.h>
6 #include <linux/kthread.h>
7 #include <linux/slab.h>
8 #include <linux/vmalloc.h>
9 #include <linux/pm_runtime.h>
10
11 #include <drm/drm_print.h>
12
13 #include "lima_devfreq.h"
14 #include "lima_drv.h"
15 #include "lima_sched.h"
16 #include "lima_vm.h"
17 #include "lima_mmu.h"
18 #include "lima_l2_cache.h"
19 #include "lima_gem.h"
20 #include "lima_trace.h"
21
22 struct lima_fence {
23 struct dma_fence base;
24 struct lima_sched_pipe *pipe;
25 };
26
27 static struct kmem_cache *lima_fence_slab;
28 static int lima_fence_slab_refcnt;
29
lima_sched_slab_init(void)30 int lima_sched_slab_init(void)
31 {
32 if (!lima_fence_slab) {
33 lima_fence_slab = kmem_cache_create(
34 "lima_fence", sizeof(struct lima_fence), 0,
35 SLAB_HWCACHE_ALIGN, NULL);
36 if (!lima_fence_slab)
37 return -ENOMEM;
38 }
39
40 lima_fence_slab_refcnt++;
41 return 0;
42 }
43
lima_sched_slab_fini(void)44 void lima_sched_slab_fini(void)
45 {
46 if (!--lima_fence_slab_refcnt) {
47 kmem_cache_destroy(lima_fence_slab);
48 lima_fence_slab = NULL;
49 }
50 }
51
to_lima_fence(struct dma_fence * fence)52 static inline struct lima_fence *to_lima_fence(struct dma_fence *fence)
53 {
54 return container_of(fence, struct lima_fence, base);
55 }
56
lima_fence_get_driver_name(struct dma_fence * fence)57 static const char *lima_fence_get_driver_name(struct dma_fence *fence)
58 {
59 return "lima";
60 }
61
lima_fence_get_timeline_name(struct dma_fence * fence)62 static const char *lima_fence_get_timeline_name(struct dma_fence *fence)
63 {
64 struct lima_fence *f = to_lima_fence(fence);
65
66 return f->pipe->base.name;
67 }
68
lima_fence_release_rcu(struct rcu_head * rcu)69 static void lima_fence_release_rcu(struct rcu_head *rcu)
70 {
71 struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
72 struct lima_fence *fence = to_lima_fence(f);
73
74 kmem_cache_free(lima_fence_slab, fence);
75 }
76
lima_fence_release(struct dma_fence * fence)77 static void lima_fence_release(struct dma_fence *fence)
78 {
79 struct lima_fence *f = to_lima_fence(fence);
80
81 call_rcu(&f->base.rcu, lima_fence_release_rcu);
82 }
83
84 static const struct dma_fence_ops lima_fence_ops = {
85 .get_driver_name = lima_fence_get_driver_name,
86 .get_timeline_name = lima_fence_get_timeline_name,
87 .release = lima_fence_release,
88 };
89
lima_fence_create(struct lima_sched_pipe * pipe)90 static struct lima_fence *lima_fence_create(struct lima_sched_pipe *pipe)
91 {
92 struct lima_fence *fence;
93
94 fence = kmem_cache_zalloc(lima_fence_slab, GFP_KERNEL);
95 if (!fence)
96 return NULL;
97
98 fence->pipe = pipe;
99 dma_fence_init(&fence->base, &lima_fence_ops, &pipe->fence_lock,
100 pipe->fence_context, ++pipe->fence_seqno);
101
102 return fence;
103 }
104
to_lima_task(struct drm_sched_job * job)105 static inline struct lima_sched_task *to_lima_task(struct drm_sched_job *job)
106 {
107 return container_of(job, struct lima_sched_task, base);
108 }
109
to_lima_pipe(struct drm_gpu_scheduler * sched)110 static inline struct lima_sched_pipe *to_lima_pipe(struct drm_gpu_scheduler *sched)
111 {
112 return container_of(sched, struct lima_sched_pipe, base);
113 }
114
lima_sched_task_init(struct lima_sched_task * task,struct lima_sched_context * context,struct lima_bo ** bos,int num_bos,struct lima_vm * vm,u64 drm_client_id)115 int lima_sched_task_init(struct lima_sched_task *task,
116 struct lima_sched_context *context,
117 struct lima_bo **bos, int num_bos,
118 struct lima_vm *vm,
119 u64 drm_client_id)
120 {
121 int err, i;
122
123 task->bos = kmemdup(bos, sizeof(*bos) * num_bos, GFP_KERNEL);
124 if (!task->bos)
125 return -ENOMEM;
126
127 for (i = 0; i < num_bos; i++)
128 drm_gem_object_get(&bos[i]->base.base);
129
130 err = drm_sched_job_init(&task->base, &context->base, 1, vm,
131 drm_client_id);
132 if (err) {
133 kfree(task->bos);
134 return err;
135 }
136
137 drm_sched_job_arm(&task->base);
138
139 task->num_bos = num_bos;
140 task->vm = lima_vm_get(vm);
141
142 return 0;
143 }
144
lima_sched_task_fini(struct lima_sched_task * task)145 void lima_sched_task_fini(struct lima_sched_task *task)
146 {
147 int i;
148
149 drm_sched_job_cleanup(&task->base);
150
151 if (task->bos) {
152 for (i = 0; i < task->num_bos; i++)
153 drm_gem_object_put(&task->bos[i]->base.base);
154 kfree(task->bos);
155 }
156
157 lima_vm_put(task->vm);
158 }
159
lima_sched_context_init(struct lima_sched_pipe * pipe,struct lima_sched_context * context)160 int lima_sched_context_init(struct lima_sched_pipe *pipe,
161 struct lima_sched_context *context)
162 {
163 struct drm_gpu_scheduler *sched = &pipe->base;
164
165 return drm_sched_entity_init(&context->base, DRM_SCHED_PRIORITY_NORMAL,
166 &sched, 1, NULL);
167 }
168
lima_sched_context_fini(struct lima_sched_pipe * pipe,struct lima_sched_context * context)169 void lima_sched_context_fini(struct lima_sched_pipe *pipe,
170 struct lima_sched_context *context)
171 {
172 drm_sched_entity_destroy(&context->base);
173 }
174
lima_sched_context_queue_task(struct lima_sched_task * task)175 struct dma_fence *lima_sched_context_queue_task(struct lima_sched_task *task)
176 {
177 struct dma_fence *fence = dma_fence_get(&task->base.s_fence->finished);
178
179 trace_lima_task_submit(task);
180 drm_sched_entity_push_job(&task->base);
181 return fence;
182 }
183
lima_pm_busy(struct lima_device * ldev)184 static int lima_pm_busy(struct lima_device *ldev)
185 {
186 int ret;
187
188 /* resume GPU if it has been suspended by runtime PM */
189 ret = pm_runtime_resume_and_get(ldev->dev);
190 if (ret < 0)
191 return ret;
192
193 lima_devfreq_record_busy(&ldev->devfreq);
194 return 0;
195 }
196
lima_pm_idle(struct lima_device * ldev)197 static void lima_pm_idle(struct lima_device *ldev)
198 {
199 lima_devfreq_record_idle(&ldev->devfreq);
200
201 /* GPU can do auto runtime suspend */
202 pm_runtime_mark_last_busy(ldev->dev);
203 pm_runtime_put_autosuspend(ldev->dev);
204 }
205
lima_sched_run_job(struct drm_sched_job * job)206 static struct dma_fence *lima_sched_run_job(struct drm_sched_job *job)
207 {
208 struct lima_sched_task *task = to_lima_task(job);
209 struct lima_sched_pipe *pipe = to_lima_pipe(job->sched);
210 struct lima_device *ldev = pipe->ldev;
211 struct lima_fence *fence;
212 int i, err;
213
214 /* after GPU reset */
215 if (job->s_fence->finished.error < 0)
216 return NULL;
217
218 fence = lima_fence_create(pipe);
219 if (!fence)
220 return NULL;
221
222 err = lima_pm_busy(ldev);
223 if (err < 0) {
224 dma_fence_put(&fence->base);
225 return NULL;
226 }
227
228 task->fence = &fence->base;
229
230 /* for caller usage of the fence, otherwise irq handler
231 * may consume the fence before caller use it
232 */
233 dma_fence_get(task->fence);
234
235 pipe->current_task = task;
236
237 /* this is needed for MMU to work correctly, otherwise GP/PP
238 * will hang or page fault for unknown reason after running for
239 * a while.
240 *
241 * Need to investigate:
242 * 1. is it related to TLB
243 * 2. how much performance will be affected by L2 cache flush
244 * 3. can we reduce the calling of this function because all
245 * GP/PP use the same L2 cache on mali400
246 *
247 * TODO:
248 * 1. move this to task fini to save some wait time?
249 * 2. when GP/PP use different l2 cache, need PP wait GP l2
250 * cache flush?
251 */
252 for (i = 0; i < pipe->num_l2_cache; i++)
253 lima_l2_cache_flush(pipe->l2_cache[i]);
254
255 lima_vm_put(pipe->current_vm);
256 pipe->current_vm = lima_vm_get(task->vm);
257
258 if (pipe->bcast_mmu)
259 lima_mmu_switch_vm(pipe->bcast_mmu, pipe->current_vm);
260 else {
261 for (i = 0; i < pipe->num_mmu; i++)
262 lima_mmu_switch_vm(pipe->mmu[i], pipe->current_vm);
263 }
264
265 trace_lima_task_run(task);
266
267 pipe->error = false;
268 pipe->task_run(pipe, task);
269
270 return task->fence;
271 }
272
lima_sched_build_error_task_list(struct lima_sched_task * task)273 static void lima_sched_build_error_task_list(struct lima_sched_task *task)
274 {
275 struct lima_sched_error_task *et;
276 struct lima_sched_pipe *pipe = to_lima_pipe(task->base.sched);
277 struct lima_ip *ip = pipe->processor[0];
278 int pipe_id = ip->id == lima_ip_gp ? lima_pipe_gp : lima_pipe_pp;
279 struct lima_device *dev = ip->dev;
280 struct lima_sched_context *sched_ctx =
281 container_of(task->base.entity,
282 struct lima_sched_context, base);
283 struct lima_ctx *ctx =
284 container_of(sched_ctx, struct lima_ctx, context[pipe_id]);
285 struct lima_dump_task *dt;
286 struct lima_dump_chunk *chunk;
287 struct lima_dump_chunk_pid *pid_chunk;
288 struct lima_dump_chunk_buffer *buffer_chunk;
289 u32 size, task_size, mem_size;
290 int i;
291 struct iosys_map map;
292 int ret;
293
294 mutex_lock(&dev->error_task_list_lock);
295
296 if (dev->dump.num_tasks >= lima_max_error_tasks) {
297 dev_info(dev->dev, "fail to save task state from %s pid %d: "
298 "error task list is full\n", ctx->pname, ctx->pid);
299 goto out;
300 }
301
302 /* frame chunk */
303 size = sizeof(struct lima_dump_chunk) + pipe->frame_size;
304 /* process name chunk */
305 size += sizeof(struct lima_dump_chunk) + sizeof(ctx->pname);
306 /* pid chunk */
307 size += sizeof(struct lima_dump_chunk);
308 /* buffer chunks */
309 for (i = 0; i < task->num_bos; i++) {
310 struct lima_bo *bo = task->bos[i];
311
312 size += sizeof(struct lima_dump_chunk);
313 size += bo->heap_size ? bo->heap_size : lima_bo_size(bo);
314 }
315
316 task_size = size + sizeof(struct lima_dump_task);
317 mem_size = task_size + sizeof(*et);
318 et = kvmalloc(mem_size, GFP_KERNEL);
319 if (!et) {
320 dev_err(dev->dev, "fail to alloc task dump buffer of size %x\n",
321 mem_size);
322 goto out;
323 }
324
325 et->data = et + 1;
326 et->size = task_size;
327
328 dt = et->data;
329 memset(dt, 0, sizeof(*dt));
330 dt->id = pipe_id;
331 dt->size = size;
332
333 chunk = (struct lima_dump_chunk *)(dt + 1);
334 memset(chunk, 0, sizeof(*chunk));
335 chunk->id = LIMA_DUMP_CHUNK_FRAME;
336 chunk->size = pipe->frame_size;
337 memcpy(chunk + 1, task->frame, pipe->frame_size);
338 dt->num_chunks++;
339
340 chunk = (void *)(chunk + 1) + chunk->size;
341 memset(chunk, 0, sizeof(*chunk));
342 chunk->id = LIMA_DUMP_CHUNK_PROCESS_NAME;
343 chunk->size = sizeof(ctx->pname);
344 memcpy(chunk + 1, ctx->pname, sizeof(ctx->pname));
345 dt->num_chunks++;
346
347 pid_chunk = (void *)(chunk + 1) + chunk->size;
348 memset(pid_chunk, 0, sizeof(*pid_chunk));
349 pid_chunk->id = LIMA_DUMP_CHUNK_PROCESS_ID;
350 pid_chunk->pid = ctx->pid;
351 dt->num_chunks++;
352
353 buffer_chunk = (void *)(pid_chunk + 1) + pid_chunk->size;
354 for (i = 0; i < task->num_bos; i++) {
355 struct lima_bo *bo = task->bos[i];
356 void *data;
357
358 memset(buffer_chunk, 0, sizeof(*buffer_chunk));
359 buffer_chunk->id = LIMA_DUMP_CHUNK_BUFFER;
360 buffer_chunk->va = lima_vm_get_va(task->vm, bo);
361
362 if (bo->heap_size) {
363 buffer_chunk->size = bo->heap_size;
364
365 data = vmap(bo->base.pages, bo->heap_size >> PAGE_SHIFT,
366 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
367 if (!data) {
368 kvfree(et);
369 goto out;
370 }
371
372 memcpy(buffer_chunk + 1, data, buffer_chunk->size);
373
374 vunmap(data);
375 } else {
376 buffer_chunk->size = lima_bo_size(bo);
377
378 ret = drm_gem_vmap(&bo->base.base, &map);
379 if (ret) {
380 kvfree(et);
381 goto out;
382 }
383
384 memcpy(buffer_chunk + 1, map.vaddr, buffer_chunk->size);
385
386 drm_gem_vunmap(&bo->base.base, &map);
387 }
388
389 buffer_chunk = (void *)(buffer_chunk + 1) + buffer_chunk->size;
390 dt->num_chunks++;
391 }
392
393 list_add(&et->list, &dev->error_task_list);
394 dev->dump.size += et->size;
395 dev->dump.num_tasks++;
396
397 dev_info(dev->dev, "save error task state success\n");
398
399 out:
400 mutex_unlock(&dev->error_task_list_lock);
401 }
402
lima_sched_timedout_job(struct drm_sched_job * job)403 static enum drm_gpu_sched_stat lima_sched_timedout_job(struct drm_sched_job *job)
404 {
405 struct lima_sched_pipe *pipe = to_lima_pipe(job->sched);
406 struct lima_sched_task *task = to_lima_task(job);
407 struct lima_device *ldev = pipe->ldev;
408 struct lima_ip *ip = pipe->processor[0];
409 int i;
410
411 /*
412 * If the GPU managed to complete this jobs fence, the timeout is
413 * spurious. Bail out.
414 */
415 if (dma_fence_is_signaled(task->fence)) {
416 DRM_WARN("%s spurious timeout\n", lima_ip_name(ip));
417 return DRM_GPU_SCHED_STAT_RESET;
418 }
419
420 /*
421 * Lima IRQ handler may take a long time to process an interrupt
422 * if there is another IRQ handler hogging the processing.
423 * In order to catch such cases and not report spurious Lima job
424 * timeouts, synchronize the IRQ handler and re-check the fence
425 * status.
426 */
427 for (i = 0; i < pipe->num_processor; i++)
428 synchronize_irq(pipe->processor[i]->irq);
429 if (pipe->bcast_processor)
430 synchronize_irq(pipe->bcast_processor->irq);
431
432 if (dma_fence_is_signaled(task->fence)) {
433 DRM_WARN("%s unexpectedly high interrupt latency\n", lima_ip_name(ip));
434 return DRM_GPU_SCHED_STAT_RESET;
435 }
436
437 /*
438 * The task might still finish while this timeout handler runs.
439 * To prevent a race condition on its completion, mask all irqs
440 * on the running core until the next hard reset completes.
441 */
442 pipe->task_mask_irq(pipe);
443
444 if (!pipe->error)
445 DRM_ERROR("%s job timeout\n", lima_ip_name(ip));
446
447 drm_sched_stop(&pipe->base, &task->base);
448
449 drm_sched_increase_karma(&task->base);
450
451 if (lima_max_error_tasks)
452 lima_sched_build_error_task_list(task);
453
454 pipe->task_error(pipe);
455
456 if (pipe->bcast_mmu)
457 lima_mmu_page_fault_resume(pipe->bcast_mmu);
458 else {
459 for (i = 0; i < pipe->num_mmu; i++)
460 lima_mmu_page_fault_resume(pipe->mmu[i]);
461 }
462
463 lima_vm_put(pipe->current_vm);
464 pipe->current_vm = NULL;
465 pipe->current_task = NULL;
466
467 lima_pm_idle(ldev);
468
469 drm_sched_resubmit_jobs(&pipe->base);
470 drm_sched_start(&pipe->base, 0);
471
472 return DRM_GPU_SCHED_STAT_RESET;
473 }
474
lima_sched_free_job(struct drm_sched_job * job)475 static void lima_sched_free_job(struct drm_sched_job *job)
476 {
477 struct lima_sched_task *task = to_lima_task(job);
478 struct lima_sched_pipe *pipe = to_lima_pipe(job->sched);
479 struct lima_vm *vm = task->vm;
480 struct lima_bo **bos = task->bos;
481 int i;
482
483 dma_fence_put(task->fence);
484
485 for (i = 0; i < task->num_bos; i++)
486 lima_vm_bo_del(vm, bos[i]);
487
488 lima_sched_task_fini(task);
489 kmem_cache_free(pipe->task_slab, task);
490 }
491
492 static const struct drm_sched_backend_ops lima_sched_ops = {
493 .run_job = lima_sched_run_job,
494 .timedout_job = lima_sched_timedout_job,
495 .free_job = lima_sched_free_job,
496 };
497
lima_sched_recover_work(struct work_struct * work)498 static void lima_sched_recover_work(struct work_struct *work)
499 {
500 struct lima_sched_pipe *pipe =
501 container_of(work, struct lima_sched_pipe, recover_work);
502 int i;
503
504 for (i = 0; i < pipe->num_l2_cache; i++)
505 lima_l2_cache_flush(pipe->l2_cache[i]);
506
507 if (pipe->bcast_mmu) {
508 lima_mmu_flush_tlb(pipe->bcast_mmu);
509 } else {
510 for (i = 0; i < pipe->num_mmu; i++)
511 lima_mmu_flush_tlb(pipe->mmu[i]);
512 }
513
514 if (pipe->task_recover(pipe))
515 drm_sched_fault(&pipe->base);
516 }
517
lima_sched_pipe_init(struct lima_sched_pipe * pipe,const char * name)518 int lima_sched_pipe_init(struct lima_sched_pipe *pipe, const char *name)
519 {
520 unsigned int timeout = lima_sched_timeout_ms > 0 ?
521 lima_sched_timeout_ms : 10000;
522 const struct drm_sched_init_args args = {
523 .ops = &lima_sched_ops,
524 .num_rqs = DRM_SCHED_PRIORITY_COUNT,
525 .credit_limit = 1,
526 .hang_limit = lima_job_hang_limit,
527 .timeout = msecs_to_jiffies(timeout),
528 .name = name,
529 .dev = pipe->ldev->dev,
530 };
531
532 pipe->fence_context = dma_fence_context_alloc(1);
533 spin_lock_init(&pipe->fence_lock);
534
535 INIT_WORK(&pipe->recover_work, lima_sched_recover_work);
536
537 return drm_sched_init(&pipe->base, &args);
538 }
539
lima_sched_pipe_fini(struct lima_sched_pipe * pipe)540 void lima_sched_pipe_fini(struct lima_sched_pipe *pipe)
541 {
542 drm_sched_fini(&pipe->base);
543 }
544
lima_sched_pipe_task_done(struct lima_sched_pipe * pipe)545 void lima_sched_pipe_task_done(struct lima_sched_pipe *pipe)
546 {
547 struct lima_sched_task *task = pipe->current_task;
548 struct lima_device *ldev = pipe->ldev;
549
550 if (pipe->error) {
551 if (task && task->recoverable)
552 schedule_work(&pipe->recover_work);
553 else
554 drm_sched_fault(&pipe->base);
555 } else {
556 pipe->task_fini(pipe);
557 dma_fence_signal(task->fence);
558
559 lima_pm_idle(ldev);
560 }
561 }
562