xref: /linux/drivers/gpu/drm/i915/i915_gpu_error.c (revision 334fbe734e687404f346eba7d5d96ed2b44d35ab)
1 /*
2  * Copyright (c) 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Keith Packard <keithp@keithp.com>
26  *    Mika Kuoppala <mika.kuoppala@intel.com>
27  *
28  */
29 
30 #include <linux/ascii85.h>
31 #include <linux/debugfs.h>
32 #include <linux/highmem.h>
33 #include <linux/nmi.h>
34 #include <linux/folio_batch.h>
35 #include <linux/scatterlist.h>
36 #include <linux/string_helpers.h>
37 #include <linux/utsname.h>
38 #include <linux/zlib.h>
39 
40 #include <drm/drm_cache.h>
41 #include <drm/drm_print.h>
42 
43 #include "display/intel_display_snapshot.h"
44 
45 #include "gem/i915_gem_context.h"
46 #include "gem/i915_gem_lmem.h"
47 #include "gt/intel_engine_regs.h"
48 #include "gt/intel_gt.h"
49 #include "gt/intel_gt_mcr.h"
50 #include "gt/intel_gt_pm.h"
51 #include "gt/intel_gt_regs.h"
52 #include "gt/uc/intel_guc_capture.h"
53 
54 #include "i915_driver.h"
55 #include "i915_drv.h"
56 #include "i915_gpu_error.h"
57 #include "i915_memcpy.h"
58 #include "i915_reg.h"
59 #include "i915_scatterlist.h"
60 #include "i915_sysfs.h"
61 #include "i915_utils.h"
62 
63 #define ALLOW_FAIL (__GFP_KSWAPD_RECLAIM | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
64 #define ATOMIC_MAYFAIL (GFP_ATOMIC | __GFP_NOWARN)
65 
__sg_set_buf(struct scatterlist * sg,void * addr,unsigned int len,loff_t it)66 static void __sg_set_buf(struct scatterlist *sg,
67 			 void *addr, unsigned int len, loff_t it)
68 {
69 	sg->page_link = (unsigned long)virt_to_page(addr);
70 	sg->offset = offset_in_page(addr);
71 	sg->length = len;
72 	sg->dma_address = it;
73 }
74 
__i915_error_grow(struct drm_i915_error_state_buf * e,size_t len)75 static bool __i915_error_grow(struct drm_i915_error_state_buf *e, size_t len)
76 {
77 	if (!len)
78 		return false;
79 
80 	if (e->bytes + len + 1 <= e->size)
81 		return true;
82 
83 	if (e->bytes) {
84 		__sg_set_buf(e->cur++, e->buf, e->bytes, e->iter);
85 		e->iter += e->bytes;
86 		e->buf = NULL;
87 		e->bytes = 0;
88 	}
89 
90 	if (e->cur == e->end) {
91 		struct scatterlist *sgl;
92 
93 		sgl = (typeof(sgl))__get_free_page(ALLOW_FAIL);
94 		if (!sgl) {
95 			e->err = -ENOMEM;
96 			return false;
97 		}
98 
99 		if (e->cur) {
100 			e->cur->offset = 0;
101 			e->cur->length = 0;
102 			e->cur->page_link =
103 				(unsigned long)sgl | SG_CHAIN;
104 		} else {
105 			e->sgl = sgl;
106 		}
107 
108 		e->cur = sgl;
109 		e->end = sgl + SG_MAX_SINGLE_ALLOC - 1;
110 	}
111 
112 	e->size = ALIGN(len + 1, SZ_64K);
113 	e->buf = kmalloc(e->size, ALLOW_FAIL);
114 	if (!e->buf) {
115 		e->size = PAGE_ALIGN(len + 1);
116 		e->buf = kmalloc(e->size, GFP_KERNEL);
117 	}
118 	if (!e->buf) {
119 		e->err = -ENOMEM;
120 		return false;
121 	}
122 
123 	return true;
124 }
125 
126 __printf(2, 0)
i915_error_vprintf(struct drm_i915_error_state_buf * e,const char * fmt,va_list args)127 static void i915_error_vprintf(struct drm_i915_error_state_buf *e,
128 			       const char *fmt, va_list args)
129 {
130 	va_list ap;
131 	int len;
132 
133 	if (e->err)
134 		return;
135 
136 	va_copy(ap, args);
137 	len = vsnprintf(NULL, 0, fmt, ap);
138 	va_end(ap);
139 	if (len <= 0) {
140 		e->err = len;
141 		return;
142 	}
143 
144 	if (!__i915_error_grow(e, len))
145 		return;
146 
147 	GEM_BUG_ON(e->bytes >= e->size);
148 	len = vscnprintf(e->buf + e->bytes, e->size - e->bytes, fmt, args);
149 	if (len < 0) {
150 		e->err = len;
151 		return;
152 	}
153 	e->bytes += len;
154 }
155 
i915_error_puts(struct drm_i915_error_state_buf * e,const char * str)156 static void i915_error_puts(struct drm_i915_error_state_buf *e, const char *str)
157 {
158 	unsigned len;
159 
160 	if (e->err || !str)
161 		return;
162 
163 	len = strlen(str);
164 	if (!__i915_error_grow(e, len))
165 		return;
166 
167 	GEM_BUG_ON(e->bytes + len > e->size);
168 	memcpy(e->buf + e->bytes, str, len);
169 	e->bytes += len;
170 }
171 
172 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
173 #define err_puts(e, s) i915_error_puts(e, s)
174 
__i915_printfn_error(struct drm_printer * p,struct va_format * vaf)175 static void __i915_printfn_error(struct drm_printer *p, struct va_format *vaf)
176 {
177 	i915_error_vprintf(p->arg, vaf->fmt, *vaf->va);
178 }
179 
180 static inline struct drm_printer
i915_error_printer(struct drm_i915_error_state_buf * e)181 i915_error_printer(struct drm_i915_error_state_buf *e)
182 {
183 	struct drm_printer p = {
184 		.printfn = __i915_printfn_error,
185 		.arg = e,
186 	};
187 	return p;
188 }
189 
190 /* single threaded page allocator with a reserved stash for emergencies */
pool_fini(struct folio_batch * fbatch)191 static void pool_fini(struct folio_batch *fbatch)
192 {
193 	folio_batch_release(fbatch);
194 }
195 
pool_refill(struct folio_batch * fbatch,gfp_t gfp)196 static int pool_refill(struct folio_batch *fbatch, gfp_t gfp)
197 {
198 	while (folio_batch_space(fbatch)) {
199 		struct folio *folio;
200 
201 		folio = folio_alloc(gfp, 0);
202 		if (!folio)
203 			return -ENOMEM;
204 
205 		folio_batch_add(fbatch, folio);
206 	}
207 
208 	return 0;
209 }
210 
pool_init(struct folio_batch * fbatch,gfp_t gfp)211 static int pool_init(struct folio_batch *fbatch, gfp_t gfp)
212 {
213 	int err;
214 
215 	folio_batch_init(fbatch);
216 
217 	err = pool_refill(fbatch, gfp);
218 	if (err)
219 		pool_fini(fbatch);
220 
221 	return err;
222 }
223 
pool_alloc(struct folio_batch * fbatch,gfp_t gfp)224 static void *pool_alloc(struct folio_batch *fbatch, gfp_t gfp)
225 {
226 	struct folio *folio;
227 
228 	folio = folio_alloc(gfp, 0);
229 	if (!folio && folio_batch_count(fbatch))
230 		folio = fbatch->folios[--fbatch->nr];
231 
232 	return folio ? folio_address(folio) : NULL;
233 }
234 
pool_free(struct folio_batch * fbatch,void * addr)235 static void pool_free(struct folio_batch *fbatch, void *addr)
236 {
237 	struct folio *folio = virt_to_folio(addr);
238 
239 	if (folio_batch_space(fbatch))
240 		folio_batch_add(fbatch, folio);
241 	else
242 		folio_put(folio);
243 }
244 
245 #ifdef CONFIG_DRM_I915_COMPRESS_ERROR
246 
247 struct i915_vma_compress {
248 	struct folio_batch pool;
249 	struct z_stream_s zstream;
250 	void *tmp;
251 };
252 
compress_init(struct i915_vma_compress * c)253 static bool compress_init(struct i915_vma_compress *c)
254 {
255 	struct z_stream_s *zstream = &c->zstream;
256 
257 	if (pool_init(&c->pool, ALLOW_FAIL))
258 		return false;
259 
260 	zstream->workspace =
261 		kmalloc(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL),
262 			ALLOW_FAIL);
263 	if (!zstream->workspace) {
264 		pool_fini(&c->pool);
265 		return false;
266 	}
267 
268 	c->tmp = NULL;
269 	if (i915_has_memcpy_from_wc())
270 		c->tmp = pool_alloc(&c->pool, ALLOW_FAIL);
271 
272 	return true;
273 }
274 
compress_start(struct i915_vma_compress * c)275 static bool compress_start(struct i915_vma_compress *c)
276 {
277 	struct z_stream_s *zstream = &c->zstream;
278 	void *workspace = zstream->workspace;
279 
280 	memset(zstream, 0, sizeof(*zstream));
281 	zstream->workspace = workspace;
282 
283 	return zlib_deflateInit(zstream, Z_DEFAULT_COMPRESSION) == Z_OK;
284 }
285 
compress_next_page(struct i915_vma_compress * c,struct i915_vma_coredump * dst)286 static void *compress_next_page(struct i915_vma_compress *c,
287 				struct i915_vma_coredump *dst)
288 {
289 	void *page_addr;
290 	struct page *page;
291 
292 	page_addr = pool_alloc(&c->pool, ALLOW_FAIL);
293 	if (!page_addr)
294 		return ERR_PTR(-ENOMEM);
295 
296 	page = virt_to_page(page_addr);
297 	list_add_tail(&page->lru, &dst->page_list);
298 	return page_addr;
299 }
300 
compress_page(struct i915_vma_compress * c,void * src,struct i915_vma_coredump * dst,bool wc)301 static int compress_page(struct i915_vma_compress *c,
302 			 void *src,
303 			 struct i915_vma_coredump *dst,
304 			 bool wc)
305 {
306 	struct z_stream_s *zstream = &c->zstream;
307 
308 	zstream->next_in = src;
309 	if (wc && c->tmp && i915_memcpy_from_wc(c->tmp, src, PAGE_SIZE))
310 		zstream->next_in = c->tmp;
311 	zstream->avail_in = PAGE_SIZE;
312 
313 	do {
314 		if (zstream->avail_out == 0) {
315 			zstream->next_out = compress_next_page(c, dst);
316 			if (IS_ERR(zstream->next_out))
317 				return PTR_ERR(zstream->next_out);
318 
319 			zstream->avail_out = PAGE_SIZE;
320 		}
321 
322 		if (zlib_deflate(zstream, Z_NO_FLUSH) != Z_OK)
323 			return -EIO;
324 
325 		cond_resched();
326 	} while (zstream->avail_in);
327 
328 	/* Fallback to uncompressed if we increase size? */
329 	if (0 && zstream->total_out > zstream->total_in)
330 		return -E2BIG;
331 
332 	return 0;
333 }
334 
compress_flush(struct i915_vma_compress * c,struct i915_vma_coredump * dst)335 static int compress_flush(struct i915_vma_compress *c,
336 			  struct i915_vma_coredump *dst)
337 {
338 	struct z_stream_s *zstream = &c->zstream;
339 
340 	do {
341 		switch (zlib_deflate(zstream, Z_FINISH)) {
342 		case Z_OK: /* more space requested */
343 			zstream->next_out = compress_next_page(c, dst);
344 			if (IS_ERR(zstream->next_out))
345 				return PTR_ERR(zstream->next_out);
346 
347 			zstream->avail_out = PAGE_SIZE;
348 			break;
349 
350 		case Z_STREAM_END:
351 			goto end;
352 
353 		default: /* any error */
354 			return -EIO;
355 		}
356 	} while (1);
357 
358 end:
359 	memset(zstream->next_out, 0, zstream->avail_out);
360 	dst->unused = zstream->avail_out;
361 	return 0;
362 }
363 
compress_finish(struct i915_vma_compress * c)364 static void compress_finish(struct i915_vma_compress *c)
365 {
366 	zlib_deflateEnd(&c->zstream);
367 }
368 
compress_fini(struct i915_vma_compress * c)369 static void compress_fini(struct i915_vma_compress *c)
370 {
371 	kfree(c->zstream.workspace);
372 	if (c->tmp)
373 		pool_free(&c->pool, c->tmp);
374 	pool_fini(&c->pool);
375 }
376 
err_compression_marker(struct drm_i915_error_state_buf * m)377 static void err_compression_marker(struct drm_i915_error_state_buf *m)
378 {
379 	err_puts(m, ":");
380 }
381 
382 #else
383 
384 struct i915_vma_compress {
385 	struct folio_batch pool;
386 };
387 
compress_init(struct i915_vma_compress * c)388 static bool compress_init(struct i915_vma_compress *c)
389 {
390 	return pool_init(&c->pool, ALLOW_FAIL) == 0;
391 }
392 
compress_start(struct i915_vma_compress * c)393 static bool compress_start(struct i915_vma_compress *c)
394 {
395 	return true;
396 }
397 
compress_page(struct i915_vma_compress * c,void * src,struct i915_vma_coredump * dst,bool wc)398 static int compress_page(struct i915_vma_compress *c,
399 			 void *src,
400 			 struct i915_vma_coredump *dst,
401 			 bool wc)
402 {
403 	void *ptr;
404 
405 	ptr = pool_alloc(&c->pool, ALLOW_FAIL);
406 	if (!ptr)
407 		return -ENOMEM;
408 
409 	if (!(wc && i915_memcpy_from_wc(ptr, src, PAGE_SIZE)))
410 		memcpy(ptr, src, PAGE_SIZE);
411 	list_add_tail(&virt_to_page(ptr)->lru, &dst->page_list);
412 	cond_resched();
413 
414 	return 0;
415 }
416 
compress_flush(struct i915_vma_compress * c,struct i915_vma_coredump * dst)417 static int compress_flush(struct i915_vma_compress *c,
418 			  struct i915_vma_coredump *dst)
419 {
420 	return 0;
421 }
422 
compress_finish(struct i915_vma_compress * c)423 static void compress_finish(struct i915_vma_compress *c)
424 {
425 }
426 
compress_fini(struct i915_vma_compress * c)427 static void compress_fini(struct i915_vma_compress *c)
428 {
429 	pool_fini(&c->pool);
430 }
431 
err_compression_marker(struct drm_i915_error_state_buf * m)432 static void err_compression_marker(struct drm_i915_error_state_buf *m)
433 {
434 	err_puts(m, "~");
435 }
436 
437 #endif
438 
error_print_instdone(struct drm_i915_error_state_buf * m,const struct intel_engine_coredump * ee)439 static void error_print_instdone(struct drm_i915_error_state_buf *m,
440 				 const struct intel_engine_coredump *ee)
441 {
442 	int slice;
443 	int subslice;
444 	int iter;
445 
446 	err_printf(m, "  INSTDONE: 0x%08x\n",
447 		   ee->instdone.instdone);
448 
449 	if (ee->engine->class != RENDER_CLASS || GRAPHICS_VER(m->i915) <= 3)
450 		return;
451 
452 	err_printf(m, "  SC_INSTDONE: 0x%08x\n",
453 		   ee->instdone.slice_common);
454 
455 	if (GRAPHICS_VER(m->i915) <= 6)
456 		return;
457 
458 	for_each_ss_steering(iter, ee->engine->gt, slice, subslice)
459 		err_printf(m, "  SAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
460 			   slice, subslice,
461 			   ee->instdone.sampler[slice][subslice]);
462 
463 	for_each_ss_steering(iter, ee->engine->gt, slice, subslice)
464 		err_printf(m, "  ROW_INSTDONE[%d][%d]: 0x%08x\n",
465 			   slice, subslice,
466 			   ee->instdone.row[slice][subslice]);
467 
468 	if (GRAPHICS_VER(m->i915) < 12)
469 		return;
470 
471 	if (GRAPHICS_VER_FULL(m->i915) >= IP_VER(12, 55)) {
472 		for_each_ss_steering(iter, ee->engine->gt, slice, subslice)
473 			err_printf(m, "  GEOM_SVGUNIT_INSTDONE[%d][%d]: 0x%08x\n",
474 				   slice, subslice,
475 				   ee->instdone.geom_svg[slice][subslice]);
476 	}
477 
478 	err_printf(m, "  SC_INSTDONE_EXTRA: 0x%08x\n",
479 		   ee->instdone.slice_common_extra[0]);
480 	err_printf(m, "  SC_INSTDONE_EXTRA2: 0x%08x\n",
481 		   ee->instdone.slice_common_extra[1]);
482 }
483 
error_print_request(struct drm_i915_error_state_buf * m,const char * prefix,const struct i915_request_coredump * erq)484 static void error_print_request(struct drm_i915_error_state_buf *m,
485 				const char *prefix,
486 				const struct i915_request_coredump *erq)
487 {
488 	if (!erq->seqno)
489 		return;
490 
491 	err_printf(m, "%s pid %d, seqno %8x:%08x%s%s, prio %d, head %08x, tail %08x\n",
492 		   prefix, erq->pid, erq->context, erq->seqno,
493 		   test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
494 			    &erq->flags) ? "!" : "",
495 		   test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
496 			    &erq->flags) ? "+" : "",
497 		   erq->sched_attr.priority,
498 		   erq->head, erq->tail);
499 }
500 
error_print_context(struct drm_i915_error_state_buf * m,const char * header,const struct i915_gem_context_coredump * ctx)501 static void error_print_context(struct drm_i915_error_state_buf *m,
502 				const char *header,
503 				const struct i915_gem_context_coredump *ctx)
504 {
505 	err_printf(m, "%s%s[%d] prio %d, guilty %d active %d, runtime total %lluns, avg %lluns\n",
506 		   header, ctx->comm, ctx->pid, ctx->sched_attr.priority,
507 		   ctx->guilty, ctx->active,
508 		   ctx->total_runtime, ctx->avg_runtime);
509 	err_printf(m, "  context timeline seqno %u\n", ctx->hwsp_seqno);
510 }
511 
512 static struct i915_vma_coredump *
__find_vma(struct i915_vma_coredump * vma,const char * name)513 __find_vma(struct i915_vma_coredump *vma, const char *name)
514 {
515 	while (vma) {
516 		if (strcmp(vma->name, name) == 0)
517 			return vma;
518 		vma = vma->next;
519 	}
520 
521 	return NULL;
522 }
523 
524 static struct i915_vma_coredump *
intel_gpu_error_find_batch(const struct intel_engine_coredump * ee)525 intel_gpu_error_find_batch(const struct intel_engine_coredump *ee)
526 {
527 	return __find_vma(ee->vma, "batch");
528 }
529 
error_print_engine(struct drm_i915_error_state_buf * m,const struct intel_engine_coredump * ee)530 static void error_print_engine(struct drm_i915_error_state_buf *m,
531 			       const struct intel_engine_coredump *ee)
532 {
533 	struct i915_vma_coredump *batch;
534 	int n;
535 
536 	err_printf(m, "%s command stream:\n", ee->engine->name);
537 	err_printf(m, "  CCID:  0x%08x\n", ee->ccid);
538 	err_printf(m, "  START: 0x%08x\n", ee->start);
539 	err_printf(m, "  HEAD:  0x%08x [0x%08x]\n", ee->head, ee->rq_head);
540 	err_printf(m, "  TAIL:  0x%08x [0x%08x, 0x%08x]\n",
541 		   ee->tail, ee->rq_post, ee->rq_tail);
542 	err_printf(m, "  CTL:   0x%08x\n", ee->ctl);
543 	err_printf(m, "  MODE:  0x%08x\n", ee->mode);
544 	err_printf(m, "  HWS:   0x%08x\n", ee->hws);
545 	err_printf(m, "  ACTHD: 0x%08x %08x\n",
546 		   (u32)(ee->acthd>>32), (u32)ee->acthd);
547 	err_printf(m, "  IPEIR: 0x%08x\n", ee->ipeir);
548 	err_printf(m, "  IPEHR: 0x%08x\n", ee->ipehr);
549 	err_printf(m, "  ESR:   0x%08x\n", ee->esr);
550 
551 	error_print_instdone(m, ee);
552 
553 	batch = intel_gpu_error_find_batch(ee);
554 	if (batch) {
555 		u64 start = batch->gtt_offset;
556 		u64 end = start + batch->gtt_size;
557 
558 		err_printf(m, "  batch: [0x%08x_%08x, 0x%08x_%08x]\n",
559 			   upper_32_bits(start), lower_32_bits(start),
560 			   upper_32_bits(end), lower_32_bits(end));
561 	}
562 	if (GRAPHICS_VER(m->i915) >= 4) {
563 		err_printf(m, "  BBADDR: 0x%08x_%08x\n",
564 			   (u32)(ee->bbaddr>>32), (u32)ee->bbaddr);
565 		err_printf(m, "  BB_STATE: 0x%08x\n", ee->bbstate);
566 		err_printf(m, "  INSTPS: 0x%08x\n", ee->instps);
567 	}
568 	err_printf(m, "  INSTPM: 0x%08x\n", ee->instpm);
569 	err_printf(m, "  FADDR: 0x%08x %08x\n", upper_32_bits(ee->faddr),
570 		   lower_32_bits(ee->faddr));
571 	if (GRAPHICS_VER(m->i915) >= 6) {
572 		err_printf(m, "  RC PSMI: 0x%08x\n", ee->rc_psmi);
573 		err_printf(m, "  FAULT_REG: 0x%08x\n", ee->fault_reg);
574 	}
575 	if (GRAPHICS_VER(m->i915) >= 11) {
576 		err_printf(m, "  NOPID: 0x%08x\n", ee->nopid);
577 		err_printf(m, "  EXCC: 0x%08x\n", ee->excc);
578 		err_printf(m, "  CMD_CCTL: 0x%08x\n", ee->cmd_cctl);
579 		err_printf(m, "  CSCMDOP: 0x%08x\n", ee->cscmdop);
580 		err_printf(m, "  CTX_SR_CTL: 0x%08x\n", ee->ctx_sr_ctl);
581 		err_printf(m, "  DMA_FADDR_HI: 0x%08x\n", ee->dma_faddr_hi);
582 		err_printf(m, "  DMA_FADDR_LO: 0x%08x\n", ee->dma_faddr_lo);
583 	}
584 	if (HAS_PPGTT(m->i915)) {
585 		err_printf(m, "  GFX_MODE: 0x%08x\n", ee->vm_info.gfx_mode);
586 
587 		if (GRAPHICS_VER(m->i915) >= 8) {
588 			int i;
589 			for (i = 0; i < 4; i++)
590 				err_printf(m, "  PDP%d: 0x%016llx\n",
591 					   i, ee->vm_info.pdp[i]);
592 		} else {
593 			err_printf(m, "  PP_DIR_BASE: 0x%08x\n",
594 				   ee->vm_info.pp_dir_base);
595 		}
596 	}
597 
598 	for (n = 0; n < ee->num_ports; n++) {
599 		err_printf(m, "  ELSP[%d]:", n);
600 		error_print_request(m, " ", &ee->execlist[n]);
601 	}
602 }
603 
i915_error_printf(struct drm_i915_error_state_buf * e,const char * f,...)604 void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
605 {
606 	va_list args;
607 
608 	va_start(args, f);
609 	i915_error_vprintf(e, f, args);
610 	va_end(args);
611 }
612 
intel_gpu_error_print_vma(struct drm_i915_error_state_buf * m,const struct intel_engine_cs * engine,const struct i915_vma_coredump * vma)613 static void intel_gpu_error_print_vma(struct drm_i915_error_state_buf *m,
614 				      const struct intel_engine_cs *engine,
615 				      const struct i915_vma_coredump *vma)
616 {
617 	char out[ASCII85_BUFSZ];
618 	struct page *page;
619 
620 	if (!vma)
621 		return;
622 
623 	err_printf(m, "%s --- %s = 0x%08x %08x\n",
624 		   engine ? engine->name : "global", vma->name,
625 		   upper_32_bits(vma->gtt_offset),
626 		   lower_32_bits(vma->gtt_offset));
627 
628 	if (vma->gtt_page_sizes > I915_GTT_PAGE_SIZE_4K)
629 		err_printf(m, "gtt_page_sizes = 0x%08x\n", vma->gtt_page_sizes);
630 
631 	err_compression_marker(m);
632 	list_for_each_entry(page, &vma->page_list, lru) {
633 		int i, len;
634 		const u32 *addr = page_address(page);
635 
636 		len = PAGE_SIZE;
637 		if (page == list_last_entry(&vma->page_list, typeof(*page), lru))
638 			len -= vma->unused;
639 		len = ascii85_encode_len(len);
640 
641 		for (i = 0; i < len; i++)
642 			err_puts(m, ascii85_encode(addr[i], out));
643 	}
644 	err_puts(m, "\n");
645 }
646 
err_print_capabilities(struct drm_i915_error_state_buf * m,struct i915_gpu_coredump * error)647 static void err_print_capabilities(struct drm_i915_error_state_buf *m,
648 				   struct i915_gpu_coredump *error)
649 {
650 	struct drm_printer p = i915_error_printer(m);
651 
652 	intel_device_info_print(&error->device_info, &error->runtime_info, &p);
653 	intel_driver_caps_print(&error->driver_caps, &p);
654 }
655 
err_print_params(struct drm_i915_error_state_buf * m,const struct i915_params * params)656 static void err_print_params(struct drm_i915_error_state_buf *m,
657 			     const struct i915_params *params)
658 {
659 	struct drm_printer p = i915_error_printer(m);
660 
661 	i915_params_dump(params, &p);
662 }
663 
err_print_pciid(struct drm_i915_error_state_buf * m,struct drm_i915_private * i915)664 static void err_print_pciid(struct drm_i915_error_state_buf *m,
665 			    struct drm_i915_private *i915)
666 {
667 	struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
668 
669 	err_printf(m, "PCI ID: 0x%04x\n", pdev->device);
670 	err_printf(m, "PCI Revision: 0x%02x\n", pdev->revision);
671 	err_printf(m, "PCI Subsystem: %04x:%04x\n",
672 		   pdev->subsystem_vendor,
673 		   pdev->subsystem_device);
674 }
675 
err_print_guc_ctb(struct drm_i915_error_state_buf * m,const char * name,const struct intel_ctb_coredump * ctb)676 static void err_print_guc_ctb(struct drm_i915_error_state_buf *m,
677 			      const char *name,
678 			      const struct intel_ctb_coredump *ctb)
679 {
680 	if (!ctb->size)
681 		return;
682 
683 	err_printf(m, "GuC %s CTB: raw: 0x%08X, 0x%08X/%08X, cached: 0x%08X/%08X, desc = 0x%08X, buf = 0x%08X x 0x%08X\n",
684 		   name, ctb->raw_status, ctb->raw_head, ctb->raw_tail,
685 		   ctb->head, ctb->tail, ctb->desc_offset, ctb->cmds_offset, ctb->size);
686 }
687 
688 /* This list includes registers that are useful in debugging GuC hangs. */
689 static const struct {
690 	u32 start;
691 	u32 count;
692 } guc_hw_reg_state[] = {
693 	{ 0xc0b0, 2 },
694 	{ 0xc000, 65 },
695 	{ 0xc140, 1 },
696 	{ 0xc180, 16 },
697 	{ 0xc1dc, 10 },
698 	{ 0xc300, 79 },
699 	{ 0xc4b4, 47 },
700 	{ 0xc574, 1 },
701 	{ 0xc57c, 1 },
702 	{ 0xc584, 11 },
703 	{ 0xc5c0, 8 },
704 	{ 0xc5e4, 1 },
705 	{ 0xc5ec, 103 },
706 	{ 0xc7c0, 1 },
707 	{ 0xc0b0, 2 }
708 };
709 
print_range_line(struct drm_i915_error_state_buf * m,u32 start,u32 * dump,u32 count)710 static u32 print_range_line(struct drm_i915_error_state_buf *m, u32 start, u32 *dump, u32 count)
711 {
712 	if (count >= 8) {
713 		err_printf(m, "[0x%04x] 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
714 			   start, dump[0], dump[1], dump[2], dump[3],
715 			   dump[4], dump[5], dump[6], dump[7]);
716 		return 8;
717 	} else if (count >= 4) {
718 		err_printf(m, "[0x%04x] 0x%08x 0x%08x 0x%08x 0x%08x\n",
719 			   start, dump[0], dump[1], dump[2], dump[3]);
720 		return 4;
721 	} else if (count >= 2) {
722 		err_printf(m, "[0x%04x] 0x%08x 0x%08x\n", start, dump[0], dump[1]);
723 		return 2;
724 	}
725 
726 	err_printf(m, "[0x%04x] 0x%08x\n", start, dump[0]);
727 	return 1;
728 }
729 
err_print_guc_hw_state(struct drm_i915_error_state_buf * m,u32 * hw_state)730 static void err_print_guc_hw_state(struct drm_i915_error_state_buf *m, u32 *hw_state)
731 {
732 	u32 total = 0;
733 	int i;
734 
735 	if (!hw_state)
736 		return;
737 
738 	err_printf(m, "GuC Register State:\n");
739 
740 	for (i = 0; i < ARRAY_SIZE(guc_hw_reg_state); i++) {
741 		u32 entry = 0;
742 
743 		while (entry < guc_hw_reg_state[i].count) {
744 			u32 start = guc_hw_reg_state[i].start + entry * sizeof(u32);
745 			u32 count = guc_hw_reg_state[i].count - entry;
746 			u32 *values = hw_state + total + entry;
747 
748 			entry += print_range_line(m, start, values, count);
749 		}
750 
751 		GEM_BUG_ON(entry != guc_hw_reg_state[i].count);
752 		total += entry;
753 	}
754 }
755 
err_print_uc(struct drm_i915_error_state_buf * m,const struct intel_uc_coredump * error_uc)756 static void err_print_uc(struct drm_i915_error_state_buf *m,
757 			 const struct intel_uc_coredump *error_uc)
758 {
759 	struct drm_printer p = i915_error_printer(m);
760 
761 	intel_uc_fw_dump(&error_uc->guc_fw, &p);
762 	intel_uc_fw_dump(&error_uc->huc_fw, &p);
763 	err_printf(m, "GuC timestamp: 0x%08x\n", error_uc->guc.timestamp);
764 	err_print_guc_hw_state(m, error_uc->guc.hw_state);
765 	intel_gpu_error_print_vma(m, NULL, error_uc->guc.vma_log);
766 	err_printf(m, "GuC CTB fence: %d\n", error_uc->guc.last_fence);
767 	err_print_guc_ctb(m, "Send", error_uc->guc.ctb + 0);
768 	err_print_guc_ctb(m, "Recv", error_uc->guc.ctb + 1);
769 	intel_gpu_error_print_vma(m, NULL, error_uc->guc.vma_ctb);
770 }
771 
err_free_sgl(struct scatterlist * sgl)772 static void err_free_sgl(struct scatterlist *sgl)
773 {
774 	while (sgl) {
775 		struct scatterlist *sg;
776 
777 		for (sg = sgl; !sg_is_chain(sg); sg++) {
778 			kfree(sg_virt(sg));
779 			if (sg_is_last(sg))
780 				break;
781 		}
782 
783 		sg = sg_is_last(sg) ? NULL : sg_chain_ptr(sg);
784 		free_page((unsigned long)sgl);
785 		sgl = sg;
786 	}
787 }
788 
err_print_gt_info(struct drm_i915_error_state_buf * m,struct intel_gt_coredump * gt)789 static void err_print_gt_info(struct drm_i915_error_state_buf *m,
790 			      struct intel_gt_coredump *gt)
791 {
792 	struct drm_printer p = i915_error_printer(m);
793 
794 	intel_gt_info_print(&gt->info, &p);
795 	intel_sseu_print_topology(gt->_gt->i915, &gt->info.sseu, &p);
796 }
797 
err_print_gt_global_nonguc(struct drm_i915_error_state_buf * m,struct intel_gt_coredump * gt)798 static void err_print_gt_global_nonguc(struct drm_i915_error_state_buf *m,
799 				       struct intel_gt_coredump *gt)
800 {
801 	int i;
802 
803 	err_printf(m, "GT awake: %s\n", str_yes_no(gt->awake));
804 	err_printf(m, "CS timestamp frequency: %u Hz, %d ns\n",
805 		   gt->clock_frequency, gt->clock_period_ns);
806 	err_printf(m, "EIR: 0x%08x\n", gt->eir);
807 	err_printf(m, "PGTBL_ER: 0x%08x\n", gt->pgtbl_er);
808 
809 	for (i = 0; i < gt->ngtier; i++)
810 		err_printf(m, "GTIER[%d]: 0x%08x\n", i, gt->gtier[i]);
811 }
812 
err_print_gt_global(struct drm_i915_error_state_buf * m,struct intel_gt_coredump * gt)813 static void err_print_gt_global(struct drm_i915_error_state_buf *m,
814 				struct intel_gt_coredump *gt)
815 {
816 	err_printf(m, "FORCEWAKE: 0x%08x\n", gt->forcewake);
817 
818 	if (IS_GRAPHICS_VER(m->i915, 6, 11)) {
819 		err_printf(m, "ERROR: 0x%08x\n", gt->error);
820 		err_printf(m, "DONE_REG: 0x%08x\n", gt->done_reg);
821 	}
822 
823 	if (GRAPHICS_VER(m->i915) >= 8)
824 		err_printf(m, "FAULT_TLB_DATA: 0x%08x 0x%08x\n",
825 			   gt->fault_data1, gt->fault_data0);
826 
827 	if (IS_GRAPHICS_VER(m->i915, 8, 11))
828 		err_printf(m, "GTT_CACHE_EN: 0x%08x\n", gt->gtt_cache);
829 
830 	if (GRAPHICS_VER(m->i915) == 12)
831 		err_printf(m, "AUX_ERR_DBG: 0x%08x\n", gt->aux_err);
832 
833 	if (GRAPHICS_VER(m->i915) >= 12) {
834 		int i;
835 
836 		for (i = 0; i < I915_MAX_SFC; i++) {
837 			/*
838 			 * SFC_DONE resides in the VD forcewake domain, so it
839 			 * only exists if the corresponding VCS engine is
840 			 * present.
841 			 */
842 			if ((gt->_gt->info.sfc_mask & BIT(i)) == 0 ||
843 			    !HAS_ENGINE(gt->_gt, _VCS(i * 2)))
844 				continue;
845 
846 			err_printf(m, "  SFC_DONE[%d]: 0x%08x\n", i,
847 				   gt->sfc_done[i]);
848 		}
849 
850 		err_printf(m, "  GAM_DONE: 0x%08x\n", gt->gam_done);
851 	}
852 }
853 
err_print_gt_fences(struct drm_i915_error_state_buf * m,struct intel_gt_coredump * gt)854 static void err_print_gt_fences(struct drm_i915_error_state_buf *m,
855 				struct intel_gt_coredump *gt)
856 {
857 	int i;
858 
859 	for (i = 0; i < gt->nfence; i++)
860 		err_printf(m, "  fence[%d] = %08llx\n", i, gt->fence[i]);
861 }
862 
err_print_gt_engines(struct drm_i915_error_state_buf * m,struct intel_gt_coredump * gt)863 static void err_print_gt_engines(struct drm_i915_error_state_buf *m,
864 				 struct intel_gt_coredump *gt)
865 {
866 	const struct intel_engine_coredump *ee;
867 
868 	for (ee = gt->engine; ee; ee = ee->next) {
869 		const struct i915_vma_coredump *vma;
870 
871 		if (gt->uc && gt->uc->guc.is_guc_capture) {
872 			if (ee->guc_capture_node)
873 				intel_guc_capture_print_engine_node(m, ee);
874 			else
875 				err_printf(m, "  Missing GuC capture node for %s\n",
876 					   ee->engine->name);
877 		} else {
878 			error_print_engine(m, ee);
879 		}
880 
881 		err_printf(m, "  hung: %u\n", ee->hung);
882 		err_printf(m, "  engine reset count: %u\n", ee->reset_count);
883 		error_print_context(m, "  Active context: ", &ee->context);
884 
885 		for (vma = ee->vma; vma; vma = vma->next)
886 			intel_gpu_error_print_vma(m, ee->engine, vma);
887 	}
888 
889 }
890 
__err_print_to_sgl(struct drm_i915_error_state_buf * m,struct i915_gpu_coredump * error)891 static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
892 			       struct i915_gpu_coredump *error)
893 {
894 	struct drm_printer p = i915_error_printer(m);
895 	const struct intel_engine_coredump *ee;
896 	struct timespec64 ts;
897 
898 	if (*error->error_msg)
899 		err_printf(m, "%s\n", error->error_msg);
900 	err_printf(m, "Kernel: %s %s\n",
901 		   init_utsname()->release,
902 		   init_utsname()->machine);
903 	ts = ktime_to_timespec64(error->time);
904 	err_printf(m, "Time: %lld s %ld us\n",
905 		   (s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC);
906 	ts = ktime_to_timespec64(error->boottime);
907 	err_printf(m, "Boottime: %lld s %ld us\n",
908 		   (s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC);
909 	ts = ktime_to_timespec64(error->uptime);
910 	err_printf(m, "Uptime: %lld s %ld us\n",
911 		   (s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC);
912 	err_printf(m, "Capture: %lu jiffies; %d ms ago\n",
913 		   error->capture, jiffies_to_msecs(jiffies - error->capture));
914 
915 	for (ee = error->gt ? error->gt->engine : NULL; ee; ee = ee->next)
916 		err_printf(m, "Active process (on ring %s): %s [%d]\n",
917 			   ee->engine->name,
918 			   ee->context.comm,
919 			   ee->context.pid);
920 
921 	err_printf(m, "Reset count: %u\n", error->reset_count);
922 	err_printf(m, "Suspend count: %u\n", error->suspend_count);
923 	err_printf(m, "Platform: %s\n", intel_platform_name(error->device_info.platform));
924 	err_printf(m, "Subplatform: 0x%x\n",
925 		   intel_subplatform(&error->runtime_info,
926 				     error->device_info.platform));
927 	err_print_pciid(m, m->i915);
928 
929 	err_printf(m, "IOMMU enabled?: %d\n", error->iommu);
930 
931 	err_printf(m, "RPM wakelock: %s\n", str_yes_no(error->wakelock));
932 	err_printf(m, "PM suspended: %s\n", str_yes_no(error->suspended));
933 
934 	if (error->gt) {
935 		bool print_guc_capture = false;
936 
937 		if (error->gt->uc && error->gt->uc->guc.is_guc_capture)
938 			print_guc_capture = true;
939 
940 		err_print_gt_global_nonguc(m, error->gt);
941 		err_print_gt_fences(m, error->gt);
942 
943 		/*
944 		 * GuC dumped global, eng-class and eng-instance registers together
945 		 * as part of engine state dump so we print in err_print_gt_engines
946 		 */
947 		if (!print_guc_capture)
948 			err_print_gt_global(m, error->gt);
949 
950 		err_print_gt_engines(m, error->gt);
951 
952 		if (error->gt->uc)
953 			err_print_uc(m, error->gt->uc);
954 
955 		err_print_gt_info(m, error->gt);
956 	}
957 
958 	err_print_capabilities(m, error);
959 	err_print_params(m, &error->params);
960 
961 	intel_display_snapshot_print(error->display_snapshot, &p);
962 }
963 
err_print_to_sgl(struct i915_gpu_coredump * error)964 static int err_print_to_sgl(struct i915_gpu_coredump *error)
965 {
966 	struct drm_i915_error_state_buf m;
967 
968 	if (IS_ERR(error))
969 		return PTR_ERR(error);
970 
971 	if (READ_ONCE(error->sgl))
972 		return 0;
973 
974 	memset(&m, 0, sizeof(m));
975 	m.i915 = error->i915;
976 
977 	__err_print_to_sgl(&m, error);
978 
979 	if (m.buf) {
980 		__sg_set_buf(m.cur++, m.buf, m.bytes, m.iter);
981 		m.bytes = 0;
982 		m.buf = NULL;
983 	}
984 	if (m.cur) {
985 		GEM_BUG_ON(m.end < m.cur);
986 		sg_mark_end(m.cur - 1);
987 	}
988 	GEM_BUG_ON(m.sgl && !m.cur);
989 
990 	if (m.err) {
991 		err_free_sgl(m.sgl);
992 		return m.err;
993 	}
994 
995 	if (cmpxchg(&error->sgl, NULL, m.sgl))
996 		err_free_sgl(m.sgl);
997 
998 	return 0;
999 }
1000 
i915_gpu_coredump_copy_to_buffer(struct i915_gpu_coredump * error,char * buf,loff_t off,size_t rem)1001 ssize_t i915_gpu_coredump_copy_to_buffer(struct i915_gpu_coredump *error,
1002 					 char *buf, loff_t off, size_t rem)
1003 {
1004 	struct scatterlist *sg;
1005 	size_t count;
1006 	loff_t pos;
1007 	int err;
1008 
1009 	if (!error || !rem)
1010 		return 0;
1011 
1012 	err = err_print_to_sgl(error);
1013 	if (err)
1014 		return err;
1015 
1016 	sg = READ_ONCE(error->fit);
1017 	if (!sg || off < sg->dma_address)
1018 		sg = error->sgl;
1019 	if (!sg)
1020 		return 0;
1021 
1022 	pos = sg->dma_address;
1023 	count = 0;
1024 	do {
1025 		size_t len, start;
1026 
1027 		if (sg_is_chain(sg)) {
1028 			sg = sg_chain_ptr(sg);
1029 			GEM_BUG_ON(sg_is_chain(sg));
1030 		}
1031 
1032 		len = sg->length;
1033 		if (pos + len <= off) {
1034 			pos += len;
1035 			continue;
1036 		}
1037 
1038 		start = sg->offset;
1039 		if (pos < off) {
1040 			GEM_BUG_ON(off - pos > len);
1041 			len -= off - pos;
1042 			start += off - pos;
1043 			pos = off;
1044 		}
1045 
1046 		len = min(len, rem);
1047 		GEM_BUG_ON(!len || len > sg->length);
1048 
1049 		memcpy(buf, page_address(sg_page(sg)) + start, len);
1050 
1051 		count += len;
1052 		pos += len;
1053 
1054 		buf += len;
1055 		rem -= len;
1056 		if (!rem) {
1057 			WRITE_ONCE(error->fit, sg);
1058 			break;
1059 		}
1060 	} while (!sg_is_last(sg++));
1061 
1062 	return count;
1063 }
1064 
i915_vma_coredump_free(struct i915_vma_coredump * vma)1065 static void i915_vma_coredump_free(struct i915_vma_coredump *vma)
1066 {
1067 	while (vma) {
1068 		struct i915_vma_coredump *next = vma->next;
1069 		struct page *page, *n;
1070 
1071 		list_for_each_entry_safe(page, n, &vma->page_list, lru) {
1072 			list_del_init(&page->lru);
1073 			__free_page(page);
1074 		}
1075 
1076 		kfree(vma);
1077 		vma = next;
1078 	}
1079 }
1080 
cleanup_params(struct i915_gpu_coredump * error)1081 static void cleanup_params(struct i915_gpu_coredump *error)
1082 {
1083 	i915_params_free(&error->params);
1084 }
1085 
cleanup_uc(struct intel_uc_coredump * uc)1086 static void cleanup_uc(struct intel_uc_coredump *uc)
1087 {
1088 	kfree(uc->guc_fw.file_selected.path);
1089 	kfree(uc->huc_fw.file_selected.path);
1090 	kfree(uc->guc_fw.file_wanted.path);
1091 	kfree(uc->huc_fw.file_wanted.path);
1092 	i915_vma_coredump_free(uc->guc.vma_log);
1093 	i915_vma_coredump_free(uc->guc.vma_ctb);
1094 	kfree(uc->guc.hw_state);
1095 
1096 	kfree(uc);
1097 }
1098 
cleanup_gt(struct intel_gt_coredump * gt)1099 static void cleanup_gt(struct intel_gt_coredump *gt)
1100 {
1101 	while (gt->engine) {
1102 		struct intel_engine_coredump *ee = gt->engine;
1103 
1104 		gt->engine = ee->next;
1105 
1106 		i915_vma_coredump_free(ee->vma);
1107 		intel_guc_capture_free_node(ee);
1108 		kfree(ee);
1109 	}
1110 
1111 	if (gt->uc)
1112 		cleanup_uc(gt->uc);
1113 
1114 	kfree(gt);
1115 }
1116 
__i915_gpu_coredump_free(struct kref * error_ref)1117 void __i915_gpu_coredump_free(struct kref *error_ref)
1118 {
1119 	struct i915_gpu_coredump *error =
1120 		container_of(error_ref, typeof(*error), ref);
1121 
1122 	while (error->gt) {
1123 		struct intel_gt_coredump *gt = error->gt;
1124 
1125 		error->gt = gt->next;
1126 		cleanup_gt(gt);
1127 	}
1128 
1129 	intel_display_snapshot_free(error->display_snapshot);
1130 
1131 	cleanup_params(error);
1132 
1133 	err_free_sgl(error->sgl);
1134 	kfree(error);
1135 }
1136 
1137 static struct i915_vma_coredump *
i915_vma_coredump_create(const struct intel_gt * gt,const struct i915_vma_resource * vma_res,struct i915_vma_compress * compress,const char * name)1138 i915_vma_coredump_create(const struct intel_gt *gt,
1139 			 const struct i915_vma_resource *vma_res,
1140 			 struct i915_vma_compress *compress,
1141 			 const char *name)
1142 
1143 {
1144 	struct i915_ggtt *ggtt = gt->ggtt;
1145 	const u64 slot = ggtt->error_capture.start;
1146 	struct i915_vma_coredump *dst;
1147 	struct sgt_iter iter;
1148 	int ret;
1149 
1150 	might_sleep();
1151 
1152 	if (!vma_res || !vma_res->bi.pages || !compress)
1153 		return NULL;
1154 
1155 	dst = kmalloc_obj(*dst, ALLOW_FAIL);
1156 	if (!dst)
1157 		return NULL;
1158 
1159 	if (!compress_start(compress)) {
1160 		kfree(dst);
1161 		return NULL;
1162 	}
1163 
1164 	INIT_LIST_HEAD(&dst->page_list);
1165 	strscpy(dst->name, name);
1166 	dst->next = NULL;
1167 
1168 	dst->gtt_offset = vma_res->start;
1169 	dst->gtt_size = vma_res->node_size;
1170 	dst->gtt_page_sizes = vma_res->page_sizes_gtt;
1171 	dst->unused = 0;
1172 
1173 	ret = -EINVAL;
1174 	if (drm_mm_node_allocated(&ggtt->error_capture)) {
1175 		void __iomem *s;
1176 		dma_addr_t dma;
1177 
1178 		for_each_sgt_daddr(dma, iter, vma_res->bi.pages) {
1179 			mutex_lock(&ggtt->error_mutex);
1180 			if (ggtt->vm.raw_insert_page)
1181 				ggtt->vm.raw_insert_page(&ggtt->vm, dma, slot,
1182 							 i915_gem_get_pat_index(gt->i915,
1183 										I915_CACHE_NONE),
1184 							 0);
1185 			else
1186 				ggtt->vm.insert_page(&ggtt->vm, dma, slot,
1187 						     i915_gem_get_pat_index(gt->i915,
1188 									    I915_CACHE_NONE),
1189 						     0);
1190 			mb();
1191 
1192 			s = io_mapping_map_wc(&ggtt->iomap, slot, PAGE_SIZE);
1193 			ret = compress_page(compress,
1194 					    (void  __force *)s, dst,
1195 					    true);
1196 			io_mapping_unmap(s);
1197 
1198 			mb();
1199 			ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE);
1200 			mutex_unlock(&ggtt->error_mutex);
1201 			if (ret)
1202 				break;
1203 		}
1204 	} else if (vma_res->bi.lmem) {
1205 		struct intel_memory_region *mem = vma_res->mr;
1206 		dma_addr_t dma;
1207 
1208 		for_each_sgt_daddr(dma, iter, vma_res->bi.pages) {
1209 			dma_addr_t offset = dma - mem->region.start;
1210 			void __iomem *s;
1211 
1212 			if (offset + PAGE_SIZE > resource_size(&mem->io)) {
1213 				ret = -EINVAL;
1214 				break;
1215 			}
1216 
1217 			s = io_mapping_map_wc(&mem->iomap, offset, PAGE_SIZE);
1218 			ret = compress_page(compress,
1219 					    (void __force *)s, dst,
1220 					    true);
1221 			io_mapping_unmap(s);
1222 			if (ret)
1223 				break;
1224 		}
1225 	} else {
1226 		struct page *page;
1227 
1228 		for_each_sgt_page(page, iter, vma_res->bi.pages) {
1229 			void *s;
1230 
1231 			drm_clflush_pages(&page, 1);
1232 
1233 			s = kmap_local_page(page);
1234 			ret = compress_page(compress, s, dst, false);
1235 			kunmap_local(s);
1236 
1237 			drm_clflush_pages(&page, 1);
1238 
1239 			if (ret)
1240 				break;
1241 		}
1242 	}
1243 
1244 	if (ret || compress_flush(compress, dst)) {
1245 		struct page *page, *n;
1246 
1247 		list_for_each_entry_safe_reverse(page, n, &dst->page_list, lru) {
1248 			list_del_init(&page->lru);
1249 			pool_free(&compress->pool, page_address(page));
1250 		}
1251 
1252 		kfree(dst);
1253 		dst = NULL;
1254 	}
1255 	compress_finish(compress);
1256 
1257 	return dst;
1258 }
1259 
gt_record_fences(struct intel_gt_coredump * gt)1260 static void gt_record_fences(struct intel_gt_coredump *gt)
1261 {
1262 	struct i915_ggtt *ggtt = gt->_gt->ggtt;
1263 	struct intel_uncore *uncore = gt->_gt->uncore;
1264 	int i;
1265 
1266 	if (GRAPHICS_VER(uncore->i915) >= 6) {
1267 		for (i = 0; i < ggtt->num_fences; i++)
1268 			gt->fence[i] =
1269 				intel_uncore_read64(uncore,
1270 						    FENCE_REG_GEN6_LO(i));
1271 	} else if (GRAPHICS_VER(uncore->i915) >= 4) {
1272 		for (i = 0; i < ggtt->num_fences; i++)
1273 			gt->fence[i] =
1274 				intel_uncore_read64(uncore,
1275 						    FENCE_REG_965_LO(i));
1276 	} else {
1277 		for (i = 0; i < ggtt->num_fences; i++)
1278 			gt->fence[i] =
1279 				intel_uncore_read(uncore, FENCE_REG(i));
1280 	}
1281 	gt->nfence = i;
1282 }
1283 
engine_record_registers(struct intel_engine_coredump * ee)1284 static void engine_record_registers(struct intel_engine_coredump *ee)
1285 {
1286 	const struct intel_engine_cs *engine = ee->engine;
1287 	struct drm_i915_private *i915 = engine->i915;
1288 
1289 	if (GRAPHICS_VER(i915) >= 6) {
1290 		ee->rc_psmi = ENGINE_READ(engine, RING_PSMI_CTL);
1291 
1292 		/*
1293 		 * For the media GT, this ring fault register is not replicated,
1294 		 * so don't do multicast/replicated register read/write
1295 		 * operation on it.
1296 		 */
1297 		if (MEDIA_VER(i915) >= 13 && engine->gt->type == GT_MEDIA)
1298 			ee->fault_reg = intel_uncore_read(engine->uncore,
1299 							  XELPMP_RING_FAULT_REG);
1300 		else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55))
1301 			ee->fault_reg = intel_gt_mcr_read_any(engine->gt,
1302 							      XEHP_RING_FAULT_REG);
1303 		else if (GRAPHICS_VER(i915) >= 12)
1304 			ee->fault_reg = intel_uncore_read(engine->uncore,
1305 							  GEN12_RING_FAULT_REG);
1306 		else if (GRAPHICS_VER(i915) >= 8)
1307 			ee->fault_reg = intel_uncore_read(engine->uncore,
1308 							  GEN8_RING_FAULT_REG);
1309 		else
1310 			ee->fault_reg = GEN6_RING_FAULT_REG_READ(engine);
1311 	}
1312 
1313 	if (GRAPHICS_VER(i915) >= 4) {
1314 		ee->esr = ENGINE_READ(engine, RING_ESR);
1315 		ee->faddr = ENGINE_READ(engine, RING_DMA_FADD);
1316 		ee->ipeir = ENGINE_READ(engine, RING_IPEIR);
1317 		ee->ipehr = ENGINE_READ(engine, RING_IPEHR);
1318 		ee->instps = ENGINE_READ(engine, RING_INSTPS);
1319 		ee->bbaddr = ENGINE_READ(engine, RING_BBADDR);
1320 		ee->ccid = ENGINE_READ(engine, CCID);
1321 		if (GRAPHICS_VER(i915) >= 8) {
1322 			ee->faddr |= (u64)ENGINE_READ(engine, RING_DMA_FADD_UDW) << 32;
1323 			ee->bbaddr |= (u64)ENGINE_READ(engine, RING_BBADDR_UDW) << 32;
1324 		}
1325 		ee->bbstate = ENGINE_READ(engine, RING_BBSTATE);
1326 	} else {
1327 		ee->faddr = ENGINE_READ(engine, DMA_FADD_I8XX);
1328 		ee->ipeir = ENGINE_READ(engine, IPEIR);
1329 		ee->ipehr = ENGINE_READ(engine, IPEHR);
1330 	}
1331 
1332 	if (GRAPHICS_VER(i915) >= 11) {
1333 		ee->cmd_cctl = ENGINE_READ(engine, RING_CMD_CCTL);
1334 		ee->cscmdop = ENGINE_READ(engine, RING_CSCMDOP);
1335 		ee->ctx_sr_ctl = ENGINE_READ(engine, RING_CTX_SR_CTL);
1336 		ee->dma_faddr_hi = ENGINE_READ(engine, RING_DMA_FADD_UDW);
1337 		ee->dma_faddr_lo = ENGINE_READ(engine, RING_DMA_FADD);
1338 		ee->nopid = ENGINE_READ(engine, RING_NOPID);
1339 		ee->excc = ENGINE_READ(engine, RING_EXCC);
1340 	}
1341 
1342 	intel_engine_get_instdone(engine, &ee->instdone);
1343 
1344 	ee->instpm = ENGINE_READ(engine, RING_INSTPM);
1345 	ee->acthd = intel_engine_get_active_head(engine);
1346 	ee->start = ENGINE_READ(engine, RING_START);
1347 	ee->head = ENGINE_READ(engine, RING_HEAD);
1348 	ee->tail = ENGINE_READ(engine, RING_TAIL);
1349 	ee->ctl = ENGINE_READ(engine, RING_CTL);
1350 	if (GRAPHICS_VER(i915) > 2)
1351 		ee->mode = ENGINE_READ(engine, RING_MI_MODE);
1352 
1353 	if (!HWS_NEEDS_PHYSICAL(i915)) {
1354 		i915_reg_t mmio;
1355 
1356 		if (GRAPHICS_VER(i915) == 7) {
1357 			switch (engine->id) {
1358 			default:
1359 				MISSING_CASE(engine->id);
1360 				fallthrough;
1361 			case RCS0:
1362 				mmio = RENDER_HWS_PGA_GEN7;
1363 				break;
1364 			case BCS0:
1365 				mmio = BLT_HWS_PGA_GEN7;
1366 				break;
1367 			case VCS0:
1368 				mmio = BSD_HWS_PGA_GEN7;
1369 				break;
1370 			case VECS0:
1371 				mmio = VEBOX_HWS_PGA_GEN7;
1372 				break;
1373 			}
1374 		} else if (GRAPHICS_VER(engine->i915) == 6) {
1375 			mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
1376 		} else {
1377 			/* XXX: gen8 returns to sanity */
1378 			mmio = RING_HWS_PGA(engine->mmio_base);
1379 		}
1380 
1381 		ee->hws = intel_uncore_read(engine->uncore, mmio);
1382 	}
1383 
1384 	ee->reset_count = i915_reset_engine_count(&i915->gpu_error, engine);
1385 
1386 	if (HAS_PPGTT(i915)) {
1387 		int i;
1388 
1389 		ee->vm_info.gfx_mode = ENGINE_READ(engine, RING_MODE_GEN7);
1390 
1391 		if (GRAPHICS_VER(i915) == 6) {
1392 			ee->vm_info.pp_dir_base =
1393 				ENGINE_READ(engine, RING_PP_DIR_BASE_READ);
1394 		} else if (GRAPHICS_VER(i915) == 7) {
1395 			ee->vm_info.pp_dir_base =
1396 				ENGINE_READ(engine, RING_PP_DIR_BASE);
1397 		} else if (GRAPHICS_VER(i915) >= 8) {
1398 			u32 base = engine->mmio_base;
1399 
1400 			for (i = 0; i < 4; i++) {
1401 				ee->vm_info.pdp[i] =
1402 					intel_uncore_read(engine->uncore,
1403 							  GEN8_RING_PDP_UDW(base, i));
1404 				ee->vm_info.pdp[i] <<= 32;
1405 				ee->vm_info.pdp[i] |=
1406 					intel_uncore_read(engine->uncore,
1407 							  GEN8_RING_PDP_LDW(base, i));
1408 			}
1409 		}
1410 	}
1411 }
1412 
record_request(const struct i915_request * request,struct i915_request_coredump * erq)1413 static void record_request(const struct i915_request *request,
1414 			   struct i915_request_coredump *erq)
1415 {
1416 	erq->flags = request->fence.flags;
1417 	erq->context = request->fence.context;
1418 	erq->seqno = request->fence.seqno;
1419 	erq->sched_attr = request->sched.attr;
1420 	erq->head = request->head;
1421 	erq->tail = request->tail;
1422 
1423 	erq->pid = 0;
1424 	rcu_read_lock();
1425 	if (!intel_context_is_closed(request->context)) {
1426 		const struct i915_gem_context *ctx;
1427 
1428 		ctx = rcu_dereference(request->context->gem_context);
1429 		if (ctx)
1430 			erq->pid = pid_nr(ctx->pid);
1431 	}
1432 	rcu_read_unlock();
1433 }
1434 
engine_record_execlists(struct intel_engine_coredump * ee)1435 static void engine_record_execlists(struct intel_engine_coredump *ee)
1436 {
1437 	const struct intel_engine_execlists * const el = &ee->engine->execlists;
1438 	struct i915_request * const *port = el->active;
1439 	unsigned int n = 0;
1440 
1441 	while (*port)
1442 		record_request(*port++, &ee->execlist[n++]);
1443 
1444 	ee->num_ports = n;
1445 }
1446 
record_context(struct i915_gem_context_coredump * e,struct intel_context * ce)1447 static bool record_context(struct i915_gem_context_coredump *e,
1448 			   struct intel_context *ce)
1449 {
1450 	struct i915_gem_context *ctx;
1451 	struct task_struct *task;
1452 	bool simulated;
1453 
1454 	rcu_read_lock();
1455 	ctx = rcu_dereference(ce->gem_context);
1456 	if (ctx && !kref_get_unless_zero(&ctx->ref))
1457 		ctx = NULL;
1458 	rcu_read_unlock();
1459 	if (!ctx)
1460 		return true;
1461 
1462 	rcu_read_lock();
1463 	task = pid_task(ctx->pid, PIDTYPE_PID);
1464 	if (task) {
1465 		strscpy(e->comm, task->comm);
1466 		e->pid = task->pid;
1467 	}
1468 	rcu_read_unlock();
1469 
1470 	e->sched_attr = ctx->sched;
1471 	e->guilty = atomic_read(&ctx->guilty_count);
1472 	e->active = atomic_read(&ctx->active_count);
1473 	e->hwsp_seqno = (ce->timeline && ce->timeline->hwsp_seqno) ?
1474 				*ce->timeline->hwsp_seqno : ~0U;
1475 
1476 	e->total_runtime = intel_context_get_total_runtime_ns(ce);
1477 	e->avg_runtime = intel_context_get_avg_runtime_ns(ce);
1478 
1479 	simulated = i915_gem_context_no_error_capture(ctx);
1480 
1481 	i915_gem_context_put(ctx);
1482 	return simulated;
1483 }
1484 
1485 struct intel_engine_capture_vma {
1486 	struct intel_engine_capture_vma *next;
1487 	struct i915_vma_resource *vma_res;
1488 	char name[16];
1489 	bool lockdep_cookie;
1490 };
1491 
1492 static struct intel_engine_capture_vma *
capture_vma_snapshot(struct intel_engine_capture_vma * next,struct i915_vma_resource * vma_res,gfp_t gfp,const char * name)1493 capture_vma_snapshot(struct intel_engine_capture_vma *next,
1494 		     struct i915_vma_resource *vma_res,
1495 		     gfp_t gfp, const char *name)
1496 {
1497 	struct intel_engine_capture_vma *c;
1498 
1499 	if (!vma_res)
1500 		return next;
1501 
1502 	c = kmalloc_obj(*c, gfp);
1503 	if (!c)
1504 		return next;
1505 
1506 	if (!i915_vma_resource_hold(vma_res, &c->lockdep_cookie)) {
1507 		kfree(c);
1508 		return next;
1509 	}
1510 
1511 	strscpy(c->name, name);
1512 	c->vma_res = i915_vma_resource_get(vma_res);
1513 
1514 	c->next = next;
1515 	return c;
1516 }
1517 
1518 static struct intel_engine_capture_vma *
capture_vma(struct intel_engine_capture_vma * next,struct i915_vma * vma,const char * name,gfp_t gfp)1519 capture_vma(struct intel_engine_capture_vma *next,
1520 	    struct i915_vma *vma,
1521 	    const char *name,
1522 	    gfp_t gfp)
1523 {
1524 	if (!vma)
1525 		return next;
1526 
1527 	/*
1528 	 * If the vma isn't pinned, then the vma should be snapshotted
1529 	 * to a struct i915_vma_snapshot at command submission time.
1530 	 * Not here.
1531 	 */
1532 	if (GEM_WARN_ON(!i915_vma_is_pinned(vma)))
1533 		return next;
1534 
1535 	next = capture_vma_snapshot(next, vma->resource, gfp, name);
1536 
1537 	return next;
1538 }
1539 
1540 static struct intel_engine_capture_vma *
capture_user(struct intel_engine_capture_vma * capture,const struct i915_request * rq,gfp_t gfp)1541 capture_user(struct intel_engine_capture_vma *capture,
1542 	     const struct i915_request *rq,
1543 	     gfp_t gfp)
1544 {
1545 	struct i915_capture_list *c;
1546 
1547 	for (c = rq->capture_list; c; c = c->next)
1548 		capture = capture_vma_snapshot(capture, c->vma_res, gfp,
1549 					       "user");
1550 
1551 	return capture;
1552 }
1553 
add_vma(struct intel_engine_coredump * ee,struct i915_vma_coredump * vma)1554 static void add_vma(struct intel_engine_coredump *ee,
1555 		    struct i915_vma_coredump *vma)
1556 {
1557 	if (vma) {
1558 		vma->next = ee->vma;
1559 		ee->vma = vma;
1560 	}
1561 }
1562 
1563 static struct i915_vma_coredump *
create_vma_coredump(const struct intel_gt * gt,struct i915_vma * vma,const char * name,struct i915_vma_compress * compress)1564 create_vma_coredump(const struct intel_gt *gt, struct i915_vma *vma,
1565 		    const char *name, struct i915_vma_compress *compress)
1566 {
1567 	struct i915_vma_coredump *ret = NULL;
1568 	struct i915_vma_resource *vma_res;
1569 	bool lockdep_cookie;
1570 
1571 	if (!vma)
1572 		return NULL;
1573 
1574 	vma_res = vma->resource;
1575 
1576 	if (i915_vma_resource_hold(vma_res, &lockdep_cookie)) {
1577 		ret = i915_vma_coredump_create(gt, vma_res, compress, name);
1578 		i915_vma_resource_unhold(vma_res, lockdep_cookie);
1579 	}
1580 
1581 	return ret;
1582 }
1583 
add_vma_coredump(struct intel_engine_coredump * ee,const struct intel_gt * gt,struct i915_vma * vma,const char * name,struct i915_vma_compress * compress)1584 static void add_vma_coredump(struct intel_engine_coredump *ee,
1585 			     const struct intel_gt *gt,
1586 			     struct i915_vma *vma,
1587 			     const char *name,
1588 			     struct i915_vma_compress *compress)
1589 {
1590 	add_vma(ee, create_vma_coredump(gt, vma, name, compress));
1591 }
1592 
1593 struct intel_engine_coredump *
intel_engine_coredump_alloc(struct intel_engine_cs * engine,gfp_t gfp,u32 dump_flags)1594 intel_engine_coredump_alloc(struct intel_engine_cs *engine, gfp_t gfp, u32 dump_flags)
1595 {
1596 	struct intel_engine_coredump *ee;
1597 
1598 	ee = kzalloc_obj(*ee, gfp);
1599 	if (!ee)
1600 		return NULL;
1601 
1602 	ee->engine = engine;
1603 
1604 	if (!(dump_flags & CORE_DUMP_FLAG_IS_GUC_CAPTURE)) {
1605 		engine_record_registers(ee);
1606 		engine_record_execlists(ee);
1607 	}
1608 
1609 	return ee;
1610 }
1611 
1612 static struct intel_engine_capture_vma *
engine_coredump_add_context(struct intel_engine_coredump * ee,struct intel_context * ce,gfp_t gfp)1613 engine_coredump_add_context(struct intel_engine_coredump *ee,
1614 			    struct intel_context *ce,
1615 			    gfp_t gfp)
1616 {
1617 	struct intel_engine_capture_vma *vma = NULL;
1618 
1619 	ee->simulated |= record_context(&ee->context, ce);
1620 	if (ee->simulated)
1621 		return NULL;
1622 
1623 	/*
1624 	 * We need to copy these to an anonymous buffer
1625 	 * as the simplest method to avoid being overwritten
1626 	 * by userspace.
1627 	 */
1628 	vma = capture_vma(vma, ce->ring->vma, "ring", gfp);
1629 	vma = capture_vma(vma, ce->state, "HW context", gfp);
1630 
1631 	return vma;
1632 }
1633 
1634 struct intel_engine_capture_vma *
intel_engine_coredump_add_request(struct intel_engine_coredump * ee,struct i915_request * rq,gfp_t gfp)1635 intel_engine_coredump_add_request(struct intel_engine_coredump *ee,
1636 				  struct i915_request *rq,
1637 				  gfp_t gfp)
1638 {
1639 	struct intel_engine_capture_vma *vma;
1640 
1641 	vma = engine_coredump_add_context(ee, rq->context, gfp);
1642 	if (!vma)
1643 		return NULL;
1644 
1645 	/*
1646 	 * We need to copy these to an anonymous buffer
1647 	 * as the simplest method to avoid being overwritten
1648 	 * by userspace.
1649 	 */
1650 	vma = capture_vma_snapshot(vma, rq->batch_res, gfp, "batch");
1651 	vma = capture_user(vma, rq, gfp);
1652 
1653 	ee->rq_head = rq->head;
1654 	ee->rq_post = rq->postfix;
1655 	ee->rq_tail = rq->tail;
1656 
1657 	return vma;
1658 }
1659 
1660 void
intel_engine_coredump_add_vma(struct intel_engine_coredump * ee,struct intel_engine_capture_vma * capture,struct i915_vma_compress * compress)1661 intel_engine_coredump_add_vma(struct intel_engine_coredump *ee,
1662 			      struct intel_engine_capture_vma *capture,
1663 			      struct i915_vma_compress *compress)
1664 {
1665 	const struct intel_engine_cs *engine = ee->engine;
1666 
1667 	while (capture) {
1668 		struct intel_engine_capture_vma *this = capture;
1669 		struct i915_vma_resource *vma_res = this->vma_res;
1670 
1671 		add_vma(ee,
1672 			i915_vma_coredump_create(engine->gt, vma_res,
1673 						 compress, this->name));
1674 
1675 		i915_vma_resource_unhold(vma_res, this->lockdep_cookie);
1676 		i915_vma_resource_put(vma_res);
1677 
1678 		capture = this->next;
1679 		kfree(this);
1680 	}
1681 
1682 	add_vma_coredump(ee, engine->gt, engine->status_page.vma,
1683 			 "HW Status", compress);
1684 
1685 	add_vma_coredump(ee, engine->gt, engine->wa_ctx.vma,
1686 			 "WA context", compress);
1687 }
1688 
1689 static struct intel_engine_coredump *
capture_engine(struct intel_engine_cs * engine,struct i915_vma_compress * compress,u32 dump_flags)1690 capture_engine(struct intel_engine_cs *engine,
1691 	       struct i915_vma_compress *compress,
1692 	       u32 dump_flags)
1693 {
1694 	struct intel_engine_capture_vma *capture = NULL;
1695 	struct intel_engine_coredump *ee;
1696 	struct intel_context *ce = NULL;
1697 	struct i915_request *rq = NULL;
1698 
1699 	ee = intel_engine_coredump_alloc(engine, ALLOW_FAIL, dump_flags);
1700 	if (!ee)
1701 		return NULL;
1702 
1703 	intel_engine_get_hung_entity(engine, &ce, &rq);
1704 	if (rq && !i915_request_started(rq)) {
1705 		/*
1706 		 * We want to know also what is the guc_id of the context,
1707 		 * but if we don't have the context reference, then skip
1708 		 * printing it.
1709 		 */
1710 		if (ce)
1711 			drm_info(&engine->gt->i915->drm,
1712 				 "Got hung context on %s with active request %lld:%lld [0x%04X] not yet started\n",
1713 				 engine->name, rq->fence.context, rq->fence.seqno, ce->guc_id.id);
1714 		else
1715 			drm_info(&engine->gt->i915->drm,
1716 				 "Got hung context on %s with active request %lld:%lld not yet started\n",
1717 				 engine->name, rq->fence.context, rq->fence.seqno);
1718 	}
1719 
1720 	if (rq) {
1721 		capture = intel_engine_coredump_add_request(ee, rq, ATOMIC_MAYFAIL);
1722 		i915_request_put(rq);
1723 	} else if (ce) {
1724 		capture = engine_coredump_add_context(ee, ce, ATOMIC_MAYFAIL);
1725 	}
1726 
1727 	if (capture) {
1728 		intel_engine_coredump_add_vma(ee, capture, compress);
1729 
1730 		if (dump_flags & CORE_DUMP_FLAG_IS_GUC_CAPTURE)
1731 			intel_guc_capture_get_matching_node(engine->gt, ee, ce);
1732 	} else {
1733 		kfree(ee);
1734 		ee = NULL;
1735 	}
1736 
1737 	return ee;
1738 }
1739 
1740 static void
gt_record_engines(struct intel_gt_coredump * gt,intel_engine_mask_t engine_mask,struct i915_vma_compress * compress,u32 dump_flags)1741 gt_record_engines(struct intel_gt_coredump *gt,
1742 		  intel_engine_mask_t engine_mask,
1743 		  struct i915_vma_compress *compress,
1744 		  u32 dump_flags)
1745 {
1746 	struct intel_engine_cs *engine;
1747 	enum intel_engine_id id;
1748 
1749 	for_each_engine(engine, gt->_gt, id) {
1750 		struct intel_engine_coredump *ee;
1751 
1752 		/* Refill our page pool before entering atomic section */
1753 		pool_refill(&compress->pool, ALLOW_FAIL);
1754 
1755 		ee = capture_engine(engine, compress, dump_flags);
1756 		if (!ee)
1757 			continue;
1758 
1759 		ee->hung = engine->mask & engine_mask;
1760 
1761 		gt->simulated |= ee->simulated;
1762 		if (ee->simulated) {
1763 			if (dump_flags & CORE_DUMP_FLAG_IS_GUC_CAPTURE)
1764 				intel_guc_capture_free_node(ee);
1765 			kfree(ee);
1766 			continue;
1767 		}
1768 
1769 		ee->next = gt->engine;
1770 		gt->engine = ee;
1771 	}
1772 }
1773 
gt_record_guc_ctb(struct intel_ctb_coredump * saved,const struct intel_guc_ct_buffer * ctb,const void * blob_ptr,struct intel_guc * guc)1774 static void gt_record_guc_ctb(struct intel_ctb_coredump *saved,
1775 			      const struct intel_guc_ct_buffer *ctb,
1776 			      const void *blob_ptr, struct intel_guc *guc)
1777 {
1778 	if (!ctb || !ctb->desc)
1779 		return;
1780 
1781 	saved->raw_status = ctb->desc->status;
1782 	saved->raw_head = ctb->desc->head;
1783 	saved->raw_tail = ctb->desc->tail;
1784 	saved->head = ctb->head;
1785 	saved->tail = ctb->tail;
1786 	saved->size = ctb->size;
1787 	saved->desc_offset = ((void *)ctb->desc) - blob_ptr;
1788 	saved->cmds_offset = ((void *)ctb->cmds) - blob_ptr;
1789 }
1790 
read_guc_state_reg(struct intel_uncore * uncore,int range,int count)1791 static u32 read_guc_state_reg(struct intel_uncore *uncore, int range, int count)
1792 {
1793 	GEM_BUG_ON(range >= ARRAY_SIZE(guc_hw_reg_state));
1794 	GEM_BUG_ON(count >= guc_hw_reg_state[range].count);
1795 
1796 	return intel_uncore_read(uncore,
1797 				 _MMIO(guc_hw_reg_state[range].start + count * sizeof(u32)));
1798 }
1799 
gt_record_guc_hw_state(struct intel_uncore * uncore,struct intel_uc_coredump * error_uc)1800 static void gt_record_guc_hw_state(struct intel_uncore *uncore,
1801 				   struct intel_uc_coredump *error_uc)
1802 {
1803 	u32 *hw_state;
1804 	u32 count = 0;
1805 	int i, j;
1806 
1807 	for (i = 0; i < ARRAY_SIZE(guc_hw_reg_state); i++)
1808 		count += guc_hw_reg_state[i].count;
1809 
1810 	hw_state = kcalloc(count, sizeof(u32), ALLOW_FAIL);
1811 	if (!hw_state)
1812 		return;
1813 
1814 	count = 0;
1815 	for (i = 0; i < ARRAY_SIZE(guc_hw_reg_state); i++)
1816 		for (j = 0; j < guc_hw_reg_state[i].count; j++)
1817 			hw_state[count++] = read_guc_state_reg(uncore, i, j);
1818 
1819 	error_uc->guc.hw_state = hw_state;
1820 }
1821 
1822 static struct intel_uc_coredump *
gt_record_uc(struct intel_gt_coredump * gt,struct i915_vma_compress * compress)1823 gt_record_uc(struct intel_gt_coredump *gt,
1824 	     struct i915_vma_compress *compress)
1825 {
1826 	const struct intel_uc *uc = &gt->_gt->uc;
1827 	struct intel_uc_coredump *error_uc;
1828 
1829 	error_uc = kzalloc_obj(*error_uc, ALLOW_FAIL);
1830 	if (!error_uc)
1831 		return NULL;
1832 
1833 	memcpy(&error_uc->guc_fw, &uc->guc.fw, sizeof(uc->guc.fw));
1834 	memcpy(&error_uc->huc_fw, &uc->huc.fw, sizeof(uc->huc.fw));
1835 
1836 	error_uc->guc_fw.file_selected.path = kstrdup(uc->guc.fw.file_selected.path, ALLOW_FAIL);
1837 	error_uc->huc_fw.file_selected.path = kstrdup(uc->huc.fw.file_selected.path, ALLOW_FAIL);
1838 	error_uc->guc_fw.file_wanted.path = kstrdup(uc->guc.fw.file_wanted.path, ALLOW_FAIL);
1839 	error_uc->huc_fw.file_wanted.path = kstrdup(uc->huc.fw.file_wanted.path, ALLOW_FAIL);
1840 
1841 	/*
1842 	 * Save the GuC log and include a timestamp reference for converting the
1843 	 * log times to system times (in conjunction with the error->boottime and
1844 	 * gt->clock_frequency fields saved elsewhere).
1845 	 */
1846 	error_uc->guc.timestamp = intel_uncore_read(gt->_gt->uncore, GUCPMTIMESTAMP);
1847 	error_uc->guc.vma_log = create_vma_coredump(gt->_gt, uc->guc.log.vma,
1848 						    "GuC log buffer", compress);
1849 	error_uc->guc.vma_ctb = create_vma_coredump(gt->_gt, uc->guc.ct.vma,
1850 						    "GuC CT buffer", compress);
1851 	error_uc->guc.last_fence = uc->guc.ct.requests.last_fence;
1852 	gt_record_guc_ctb(error_uc->guc.ctb + 0, &uc->guc.ct.ctbs.send,
1853 			  uc->guc.ct.ctbs.send.desc, (struct intel_guc *)&uc->guc);
1854 	gt_record_guc_ctb(error_uc->guc.ctb + 1, &uc->guc.ct.ctbs.recv,
1855 			  uc->guc.ct.ctbs.send.desc, (struct intel_guc *)&uc->guc);
1856 	gt_record_guc_hw_state(gt->_gt->uncore, error_uc);
1857 
1858 	return error_uc;
1859 }
1860 
1861 /* Capture all other registers that GuC doesn't capture. */
gt_record_global_nonguc_regs(struct intel_gt_coredump * gt)1862 static void gt_record_global_nonguc_regs(struct intel_gt_coredump *gt)
1863 {
1864 	struct intel_uncore *uncore = gt->_gt->uncore;
1865 	struct drm_i915_private *i915 = uncore->i915;
1866 	int i;
1867 
1868 	if (IS_VALLEYVIEW(i915)) {
1869 		gt->gtier[0] = intel_uncore_read(uncore, GTIER);
1870 		gt->ngtier = 1;
1871 	} else if (GRAPHICS_VER(i915) >= 11) {
1872 		gt->gtier[0] =
1873 			intel_uncore_read(uncore,
1874 					  GEN11_RENDER_COPY_INTR_ENABLE);
1875 		gt->gtier[1] =
1876 			intel_uncore_read(uncore, GEN11_VCS_VECS_INTR_ENABLE);
1877 		gt->gtier[2] =
1878 			intel_uncore_read(uncore, GEN11_GUC_SG_INTR_ENABLE);
1879 		gt->gtier[3] =
1880 			intel_uncore_read(uncore,
1881 					  GEN11_GPM_WGBOXPERF_INTR_ENABLE);
1882 		gt->gtier[4] =
1883 			intel_uncore_read(uncore,
1884 					  GEN11_CRYPTO_RSVD_INTR_ENABLE);
1885 		gt->gtier[5] =
1886 			intel_uncore_read(uncore,
1887 					  GEN11_GUNIT_CSME_INTR_ENABLE);
1888 		gt->ngtier = 6;
1889 	} else if (GRAPHICS_VER(i915) >= 8) {
1890 		for (i = 0; i < 4; i++)
1891 			gt->gtier[i] =
1892 				intel_uncore_read(uncore, GEN8_GT_IER(i));
1893 		gt->ngtier = 4;
1894 	} else if (GRAPHICS_VER(i915) >= 5) {
1895 		gt->gtier[0] = intel_uncore_read(uncore, GTIER);
1896 		gt->ngtier = 1;
1897 	} else {
1898 		gt->gtier[0] = intel_uncore_read(uncore, GEN2_IER);
1899 		gt->ngtier = 1;
1900 	}
1901 
1902 	gt->eir = intel_uncore_read(uncore, EIR);
1903 	gt->pgtbl_er = intel_uncore_read(uncore, PGTBL_ER);
1904 }
1905 
1906 /*
1907  * Capture all registers that relate to workload submission.
1908  * NOTE: In GuC submission, when GuC resets an engine, it can dump these for us
1909  */
gt_record_global_regs(struct intel_gt_coredump * gt)1910 static void gt_record_global_regs(struct intel_gt_coredump *gt)
1911 {
1912 	struct intel_uncore *uncore = gt->_gt->uncore;
1913 	struct drm_i915_private *i915 = uncore->i915;
1914 	int i;
1915 
1916 	/*
1917 	 * General organization
1918 	 * 1. Registers specific to a single generation
1919 	 * 2. Registers which belong to multiple generations
1920 	 * 3. Feature specific registers.
1921 	 * 4. Everything else
1922 	 * Please try to follow the order.
1923 	 */
1924 
1925 	/* 1: Registers specific to a single generation */
1926 	if (IS_VALLEYVIEW(i915))
1927 		gt->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE_VLV);
1928 
1929 	if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55)) {
1930 		gt->fault_data0 = intel_gt_mcr_read_any((struct intel_gt *)gt->_gt,
1931 							XEHP_FAULT_TLB_DATA0);
1932 		gt->fault_data1 = intel_gt_mcr_read_any((struct intel_gt *)gt->_gt,
1933 							XEHP_FAULT_TLB_DATA1);
1934 	} else if (GRAPHICS_VER(i915) >= 12) {
1935 		gt->fault_data0 = intel_uncore_read(uncore,
1936 						    GEN12_FAULT_TLB_DATA0);
1937 		gt->fault_data1 = intel_uncore_read(uncore,
1938 						    GEN12_FAULT_TLB_DATA1);
1939 	} else if (GRAPHICS_VER(i915) >= 8) {
1940 		gt->fault_data0 = intel_uncore_read(uncore,
1941 						    GEN8_FAULT_TLB_DATA0);
1942 		gt->fault_data1 = intel_uncore_read(uncore,
1943 						    GEN8_FAULT_TLB_DATA1);
1944 	}
1945 
1946 	if (GRAPHICS_VER(i915) == 6) {
1947 		gt->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE);
1948 		gt->gab_ctl = intel_uncore_read(uncore, GAB_CTL);
1949 		gt->gfx_mode = intel_uncore_read(uncore, GFX_MODE);
1950 	}
1951 
1952 	/* 2: Registers which belong to multiple generations */
1953 	if (GRAPHICS_VER(i915) >= 7)
1954 		gt->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE_MT);
1955 
1956 	if (GRAPHICS_VER(i915) >= 6) {
1957 		if (GRAPHICS_VER(i915) < 12) {
1958 			gt->error = intel_uncore_read(uncore, ERROR_GEN6);
1959 			gt->done_reg = intel_uncore_read(uncore, DONE_REG);
1960 		}
1961 	}
1962 
1963 	/* 3: Feature specific registers */
1964 	if (IS_GRAPHICS_VER(i915, 6, 7)) {
1965 		gt->gam_ecochk = intel_uncore_read(uncore, GAM_ECOCHK);
1966 		gt->gac_eco = intel_uncore_read(uncore, GAC_ECO_BITS);
1967 	}
1968 
1969 	if (IS_GRAPHICS_VER(i915, 8, 11))
1970 		gt->gtt_cache = intel_uncore_read(uncore, HSW_GTT_CACHE_EN);
1971 
1972 	if (GRAPHICS_VER(i915) == 12)
1973 		gt->aux_err = intel_uncore_read(uncore, GEN12_AUX_ERR_DBG);
1974 
1975 	if (GRAPHICS_VER(i915) >= 12) {
1976 		for (i = 0; i < I915_MAX_SFC; i++) {
1977 			/*
1978 			 * SFC_DONE resides in the VD forcewake domain, so it
1979 			 * only exists if the corresponding VCS engine is
1980 			 * present.
1981 			 */
1982 			if ((gt->_gt->info.sfc_mask & BIT(i)) == 0 ||
1983 			    !HAS_ENGINE(gt->_gt, _VCS(i * 2)))
1984 				continue;
1985 
1986 			gt->sfc_done[i] =
1987 				intel_uncore_read(uncore, GEN12_SFC_DONE(i));
1988 		}
1989 
1990 		gt->gam_done = intel_uncore_read(uncore, GEN12_GAM_DONE);
1991 	}
1992 }
1993 
gt_record_info(struct intel_gt_coredump * gt)1994 static void gt_record_info(struct intel_gt_coredump *gt)
1995 {
1996 	memcpy(&gt->info, &gt->_gt->info, sizeof(struct intel_gt_info));
1997 	gt->clock_frequency = gt->_gt->clock_frequency;
1998 	gt->clock_period_ns = gt->_gt->clock_period_ns;
1999 }
2000 
2001 /*
2002  * Generate a semi-unique error code. The code is not meant to have meaning, The
2003  * code's only purpose is to try to prevent false duplicated bug reports by
2004  * grossly estimating a GPU error state.
2005  *
2006  * TODO Ideally, hashing the batchbuffer would be a very nice way to determine
2007  * the hang if we could strip the GTT offset information from it.
2008  *
2009  * It's only a small step better than a random number in its current form.
2010  */
generate_ecode(const struct intel_engine_coredump * ee)2011 static u32 generate_ecode(const struct intel_engine_coredump *ee)
2012 {
2013 	/*
2014 	 * IPEHR would be an ideal way to detect errors, as it's the gross
2015 	 * measure of "the command that hung." However, has some very common
2016 	 * synchronization commands which almost always appear in the case
2017 	 * strictly a client bug. Use instdone to differentiate those some.
2018 	 */
2019 	return ee ? ee->ipehr ^ ee->instdone.instdone : 0;
2020 }
2021 
error_msg(struct i915_gpu_coredump * error)2022 static const char *error_msg(struct i915_gpu_coredump *error)
2023 {
2024 	struct intel_engine_coredump *first = NULL;
2025 	unsigned int hung_classes = 0;
2026 	struct intel_gt_coredump *gt;
2027 	int len;
2028 
2029 	for (gt = error->gt; gt; gt = gt->next) {
2030 		struct intel_engine_coredump *cs;
2031 
2032 		for (cs = gt->engine; cs; cs = cs->next) {
2033 			if (cs->hung) {
2034 				hung_classes |= BIT(cs->engine->uabi_class);
2035 				if (!first)
2036 					first = cs;
2037 			}
2038 		}
2039 	}
2040 
2041 	len = scnprintf(error->error_msg, sizeof(error->error_msg),
2042 			"GPU HANG: ecode %d:%x:%08x",
2043 			GRAPHICS_VER(error->i915), hung_classes,
2044 			generate_ecode(first));
2045 	if (first && first->context.pid) {
2046 		/* Just show the first executing process, more is confusing */
2047 		len += scnprintf(error->error_msg + len,
2048 				 sizeof(error->error_msg) - len,
2049 				 ", in %s [%d]",
2050 				 first->context.comm, first->context.pid);
2051 	}
2052 
2053 	return error->error_msg;
2054 }
2055 
capture_gen(struct i915_gpu_coredump * error)2056 static void capture_gen(struct i915_gpu_coredump *error)
2057 {
2058 	struct drm_i915_private *i915 = error->i915;
2059 
2060 	error->wakelock = atomic_read(&i915->runtime_pm.wakeref_count);
2061 	error->suspended = pm_runtime_suspended(i915->drm.dev);
2062 
2063 	error->iommu = i915_vtd_active(i915);
2064 	error->reset_count = i915_reset_count(&i915->gpu_error);
2065 	error->suspend_count = i915->suspend_count;
2066 
2067 	i915_params_copy(&error->params, &i915->params);
2068 	memcpy(&error->device_info,
2069 	       INTEL_INFO(i915),
2070 	       sizeof(error->device_info));
2071 	memcpy(&error->runtime_info,
2072 	       RUNTIME_INFO(i915),
2073 	       sizeof(error->runtime_info));
2074 	error->driver_caps = i915->caps;
2075 }
2076 
2077 struct i915_gpu_coredump *
i915_gpu_coredump_alloc(struct drm_i915_private * i915,gfp_t gfp)2078 i915_gpu_coredump_alloc(struct drm_i915_private *i915, gfp_t gfp)
2079 {
2080 	struct i915_gpu_coredump *error;
2081 
2082 	if (!i915->params.error_capture)
2083 		return NULL;
2084 
2085 	error = kzalloc_obj(*error, gfp);
2086 	if (!error)
2087 		return NULL;
2088 
2089 	kref_init(&error->ref);
2090 	error->i915 = i915;
2091 
2092 	error->time = ktime_get_real();
2093 	error->boottime = ktime_get_boottime();
2094 	error->uptime = ktime_sub(ktime_get(), to_gt(i915)->last_init_time);
2095 	error->capture = jiffies;
2096 
2097 	capture_gen(error);
2098 
2099 	return error;
2100 }
2101 
2102 #define DAY_AS_SECONDS(x) (24 * 60 * 60 * (x))
2103 
2104 struct intel_gt_coredump *
intel_gt_coredump_alloc(struct intel_gt * gt,gfp_t gfp,u32 dump_flags)2105 intel_gt_coredump_alloc(struct intel_gt *gt, gfp_t gfp, u32 dump_flags)
2106 {
2107 	struct intel_gt_coredump *gc;
2108 
2109 	gc = kzalloc_obj(*gc, gfp);
2110 	if (!gc)
2111 		return NULL;
2112 
2113 	gc->_gt = gt;
2114 	gc->awake = intel_gt_pm_is_awake(gt);
2115 
2116 	gt_record_global_nonguc_regs(gc);
2117 
2118 	/*
2119 	 * GuC dumps global, eng-class and eng-instance registers
2120 	 * (that can change as part of engine state during execution)
2121 	 * before an engine is reset due to a hung context.
2122 	 * GuC captures and reports all three groups of registers
2123 	 * together as a single set before the engine is reset.
2124 	 * Thus, if GuC triggered the context reset we retrieve
2125 	 * the register values as part of gt_record_engines.
2126 	 */
2127 	if (!(dump_flags & CORE_DUMP_FLAG_IS_GUC_CAPTURE))
2128 		gt_record_global_regs(gc);
2129 
2130 	gt_record_fences(gc);
2131 
2132 	return gc;
2133 }
2134 
2135 struct i915_vma_compress *
i915_vma_capture_prepare(struct intel_gt_coredump * gt)2136 i915_vma_capture_prepare(struct intel_gt_coredump *gt)
2137 {
2138 	struct i915_vma_compress *compress;
2139 
2140 	compress = kmalloc_obj(*compress, ALLOW_FAIL);
2141 	if (!compress)
2142 		return NULL;
2143 
2144 	if (!compress_init(compress)) {
2145 		kfree(compress);
2146 		return NULL;
2147 	}
2148 
2149 	return compress;
2150 }
2151 
i915_vma_capture_finish(struct intel_gt_coredump * gt,struct i915_vma_compress * compress)2152 void i915_vma_capture_finish(struct intel_gt_coredump *gt,
2153 			     struct i915_vma_compress *compress)
2154 {
2155 	if (!compress)
2156 		return;
2157 
2158 	compress_fini(compress);
2159 	kfree(compress);
2160 }
2161 
2162 static struct i915_gpu_coredump *
__i915_gpu_coredump(struct intel_gt * gt,intel_engine_mask_t engine_mask,u32 dump_flags)2163 __i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask, u32 dump_flags)
2164 {
2165 	struct drm_i915_private *i915 = gt->i915;
2166 	struct intel_display *display = i915->display;
2167 	struct i915_gpu_coredump *error;
2168 
2169 	/* Check if GPU capture has been disabled */
2170 	error = READ_ONCE(i915->gpu_error.first_error);
2171 	if (IS_ERR(error))
2172 		return error;
2173 
2174 	error = i915_gpu_coredump_alloc(i915, ALLOW_FAIL);
2175 	if (!error)
2176 		return ERR_PTR(-ENOMEM);
2177 
2178 	error->gt = intel_gt_coredump_alloc(gt, ALLOW_FAIL, dump_flags);
2179 	if (error->gt) {
2180 		struct i915_vma_compress *compress;
2181 
2182 		compress = i915_vma_capture_prepare(error->gt);
2183 		if (!compress) {
2184 			kfree(error->gt);
2185 			kfree(error);
2186 			return ERR_PTR(-ENOMEM);
2187 		}
2188 
2189 		if (INTEL_INFO(i915)->has_gt_uc) {
2190 			error->gt->uc = gt_record_uc(error->gt, compress);
2191 			if (error->gt->uc) {
2192 				if (dump_flags & CORE_DUMP_FLAG_IS_GUC_CAPTURE)
2193 					error->gt->uc->guc.is_guc_capture = true;
2194 				else
2195 					GEM_BUG_ON(error->gt->uc->guc.is_guc_capture);
2196 			}
2197 		}
2198 
2199 		gt_record_info(error->gt);
2200 		gt_record_engines(error->gt, engine_mask, compress, dump_flags);
2201 
2202 
2203 		i915_vma_capture_finish(error->gt, compress);
2204 
2205 		error->simulated |= error->gt->simulated;
2206 	}
2207 
2208 	error->display_snapshot = intel_display_snapshot_capture(display);
2209 
2210 	return error;
2211 }
2212 
2213 static struct i915_gpu_coredump *
i915_gpu_coredump(struct intel_gt * gt,intel_engine_mask_t engine_mask,u32 dump_flags)2214 i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask, u32 dump_flags)
2215 {
2216 	static DEFINE_MUTEX(capture_mutex);
2217 	int ret = mutex_lock_interruptible(&capture_mutex);
2218 	struct i915_gpu_coredump *dump;
2219 
2220 	if (ret)
2221 		return ERR_PTR(ret);
2222 
2223 	dump = __i915_gpu_coredump(gt, engine_mask, dump_flags);
2224 	mutex_unlock(&capture_mutex);
2225 
2226 	return dump;
2227 }
2228 
i915_error_state_store(struct i915_gpu_coredump * error)2229 void i915_error_state_store(struct i915_gpu_coredump *error)
2230 {
2231 	struct drm_i915_private *i915;
2232 
2233 	if (IS_ERR_OR_NULL(error))
2234 		return;
2235 
2236 	i915 = error->i915;
2237 	drm_info(&i915->drm, "%s\n", error_msg(error));
2238 
2239 	if (error->simulated ||
2240 	    cmpxchg(&i915->gpu_error.first_error, NULL, error))
2241 		return;
2242 
2243 	i915_gpu_coredump_get(error);
2244 
2245 	drm_info(&i915->drm, "GPU error state saved to /sys/class/drm/card%d/error\n",
2246 		 i915->drm.primary->index);
2247 }
2248 
2249 /**
2250  * i915_capture_error_state - capture an error record for later analysis
2251  * @gt: intel_gt which originated the hang
2252  * @engine_mask: hung engines
2253  * @dump_flags: dump flags
2254  *
2255  * Should be called when an error is detected (either a hang or an error
2256  * interrupt) to capture error state from the time of the error.  Fills
2257  * out a structure which becomes available in debugfs for user level tools
2258  * to pick up.
2259  */
i915_capture_error_state(struct intel_gt * gt,intel_engine_mask_t engine_mask,u32 dump_flags)2260 void i915_capture_error_state(struct intel_gt *gt,
2261 			      intel_engine_mask_t engine_mask, u32 dump_flags)
2262 {
2263 	struct i915_gpu_coredump *error;
2264 
2265 	error = i915_gpu_coredump(gt, engine_mask, dump_flags);
2266 	if (IS_ERR(error)) {
2267 		cmpxchg(&gt->i915->gpu_error.first_error, NULL, error);
2268 		return;
2269 	}
2270 
2271 	i915_error_state_store(error);
2272 	i915_gpu_coredump_put(error);
2273 }
2274 
2275 static struct i915_gpu_coredump *
i915_first_error_state(struct drm_i915_private * i915)2276 i915_first_error_state(struct drm_i915_private *i915)
2277 {
2278 	struct i915_gpu_coredump *error;
2279 
2280 	spin_lock_irq(&i915->gpu_error.lock);
2281 	error = i915->gpu_error.first_error;
2282 	if (!IS_ERR_OR_NULL(error))
2283 		i915_gpu_coredump_get(error);
2284 	spin_unlock_irq(&i915->gpu_error.lock);
2285 
2286 	return error;
2287 }
2288 
i915_reset_error_state(struct drm_i915_private * i915)2289 void i915_reset_error_state(struct drm_i915_private *i915)
2290 {
2291 	struct i915_gpu_coredump *error;
2292 
2293 	spin_lock_irq(&i915->gpu_error.lock);
2294 	error = i915->gpu_error.first_error;
2295 	if (error != ERR_PTR(-ENODEV)) /* if disabled, always disabled */
2296 		i915->gpu_error.first_error = NULL;
2297 	spin_unlock_irq(&i915->gpu_error.lock);
2298 
2299 	if (!IS_ERR_OR_NULL(error))
2300 		i915_gpu_coredump_put(error);
2301 }
2302 
i915_disable_error_state(struct drm_i915_private * i915,int err)2303 void i915_disable_error_state(struct drm_i915_private *i915, int err)
2304 {
2305 	spin_lock_irq(&i915->gpu_error.lock);
2306 	if (!i915->gpu_error.first_error)
2307 		i915->gpu_error.first_error = ERR_PTR(err);
2308 	spin_unlock_irq(&i915->gpu_error.lock);
2309 }
2310 
2311 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
intel_klog_error_capture(struct intel_gt * gt,intel_engine_mask_t engine_mask)2312 void intel_klog_error_capture(struct intel_gt *gt,
2313 			      intel_engine_mask_t engine_mask)
2314 {
2315 	static int g_count;
2316 	struct drm_i915_private *i915 = gt->i915;
2317 	struct i915_gpu_coredump *error;
2318 	intel_wakeref_t wakeref;
2319 	size_t buf_size = PAGE_SIZE * 128;
2320 	size_t pos_err;
2321 	char *buf, *ptr, *next;
2322 	int l_count = g_count++;
2323 	int line = 0;
2324 
2325 	/* Can't allocate memory during a reset */
2326 	if (test_bit(I915_RESET_BACKOFF, &gt->reset.flags)) {
2327 		drm_err(&gt->i915->drm, "[Capture/%d.%d] Inside GT reset, skipping error capture :(\n",
2328 			l_count, line++);
2329 		return;
2330 	}
2331 
2332 	error = READ_ONCE(i915->gpu_error.first_error);
2333 	if (error) {
2334 		drm_err(&i915->drm, "[Capture/%d.%d] Clearing existing error capture first...\n",
2335 			l_count, line++);
2336 		i915_reset_error_state(i915);
2337 	}
2338 
2339 	with_intel_runtime_pm(&i915->runtime_pm, wakeref)
2340 		error = i915_gpu_coredump(gt, engine_mask, CORE_DUMP_FLAG_NONE);
2341 
2342 	if (IS_ERR(error)) {
2343 		drm_err(&i915->drm, "[Capture/%d.%d] Failed to capture error capture: %ld!\n",
2344 			l_count, line++, PTR_ERR(error));
2345 		return;
2346 	}
2347 
2348 	buf = kvmalloc(buf_size, GFP_KERNEL);
2349 	if (!buf) {
2350 		drm_err(&i915->drm, "[Capture/%d.%d] Failed to allocate buffer for error capture!\n",
2351 			l_count, line++);
2352 		i915_gpu_coredump_put(error);
2353 		return;
2354 	}
2355 
2356 	drm_info(&i915->drm, "[Capture/%d.%d] Dumping i915 error capture for %ps...\n",
2357 		 l_count, line++, __builtin_return_address(0));
2358 
2359 	/* Largest string length safe to print via dmesg */
2360 #	define MAX_CHUNK	800
2361 
2362 	pos_err = 0;
2363 	while (1) {
2364 		ssize_t got = i915_gpu_coredump_copy_to_buffer(error, buf, pos_err, buf_size - 1);
2365 
2366 		if (got <= 0)
2367 			break;
2368 
2369 		buf[got] = 0;
2370 		pos_err += got;
2371 
2372 		ptr = buf;
2373 		while (got > 0) {
2374 			size_t count;
2375 			char tag[2];
2376 
2377 			next = strnchr(ptr, got, '\n');
2378 			if (next) {
2379 				count = next - ptr;
2380 				*next = 0;
2381 				tag[0] = '>';
2382 				tag[1] = '<';
2383 			} else {
2384 				count = got;
2385 				tag[0] = '}';
2386 				tag[1] = '{';
2387 			}
2388 
2389 			if (count > MAX_CHUNK) {
2390 				size_t pos;
2391 				char *ptr2 = ptr;
2392 
2393 				for (pos = MAX_CHUNK; pos < count; pos += MAX_CHUNK) {
2394 					char chr = ptr[pos];
2395 
2396 					ptr[pos] = 0;
2397 					drm_info(&i915->drm, "[Capture/%d.%d] }%s{\n",
2398 						 l_count, line++, ptr2);
2399 					ptr[pos] = chr;
2400 					ptr2 = ptr + pos;
2401 
2402 					/*
2403 					 * If spewing large amounts of data via a serial console,
2404 					 * this can be a very slow process. So be friendly and try
2405 					 * not to cause 'softlockup on CPU' problems.
2406 					 */
2407 					cond_resched();
2408 				}
2409 
2410 				if (ptr2 < (ptr + count))
2411 					drm_info(&i915->drm, "[Capture/%d.%d] %c%s%c\n",
2412 						 l_count, line++, tag[0], ptr2, tag[1]);
2413 				else if (tag[0] == '>')
2414 					drm_info(&i915->drm, "[Capture/%d.%d] ><\n",
2415 						 l_count, line++);
2416 			} else {
2417 				drm_info(&i915->drm, "[Capture/%d.%d] %c%s%c\n",
2418 					 l_count, line++, tag[0], ptr, tag[1]);
2419 			}
2420 
2421 			ptr = next;
2422 			got -= count;
2423 			if (next) {
2424 				ptr++;
2425 				got--;
2426 			}
2427 
2428 			/* As above. */
2429 			cond_resched();
2430 		}
2431 
2432 		if (got)
2433 			drm_info(&i915->drm, "[Capture/%d.%d] Got %zd bytes remaining!\n",
2434 				 l_count, line++, got);
2435 	}
2436 
2437 	kvfree(buf);
2438 
2439 	drm_info(&i915->drm, "[Capture/%d.%d] Dumped %zd bytes\n", l_count, line++, pos_err);
2440 }
2441 #endif
2442 
gpu_state_read(struct file * file,char __user * ubuf,size_t count,loff_t * pos)2443 static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
2444 			      size_t count, loff_t *pos)
2445 {
2446 	struct i915_gpu_coredump *error;
2447 	ssize_t ret;
2448 	void *buf;
2449 
2450 	error = file->private_data;
2451 	if (!error)
2452 		return 0;
2453 
2454 	/* Bounce buffer required because of kernfs __user API convenience. */
2455 	buf = kmalloc(count, GFP_KERNEL);
2456 	if (!buf)
2457 		return -ENOMEM;
2458 
2459 	ret = i915_gpu_coredump_copy_to_buffer(error, buf, *pos, count);
2460 	if (ret <= 0)
2461 		goto out;
2462 
2463 	if (!copy_to_user(ubuf, buf, ret))
2464 		*pos += ret;
2465 	else
2466 		ret = -EFAULT;
2467 
2468 out:
2469 	kfree(buf);
2470 	return ret;
2471 }
2472 
gpu_state_release(struct inode * inode,struct file * file)2473 static int gpu_state_release(struct inode *inode, struct file *file)
2474 {
2475 	i915_gpu_coredump_put(file->private_data);
2476 	return 0;
2477 }
2478 
i915_gpu_info_open(struct inode * inode,struct file * file)2479 static int i915_gpu_info_open(struct inode *inode, struct file *file)
2480 {
2481 	struct drm_i915_private *i915 = inode->i_private;
2482 	struct i915_gpu_coredump *gpu;
2483 	intel_wakeref_t wakeref;
2484 
2485 	gpu = NULL;
2486 	with_intel_runtime_pm(&i915->runtime_pm, wakeref)
2487 		gpu = i915_gpu_coredump(to_gt(i915), ALL_ENGINES, CORE_DUMP_FLAG_NONE);
2488 
2489 	if (IS_ERR(gpu))
2490 		return PTR_ERR(gpu);
2491 
2492 	file->private_data = gpu;
2493 	return 0;
2494 }
2495 
2496 static const struct file_operations i915_gpu_info_fops = {
2497 	.owner = THIS_MODULE,
2498 	.open = i915_gpu_info_open,
2499 	.read = gpu_state_read,
2500 	.llseek = default_llseek,
2501 	.release = gpu_state_release,
2502 };
2503 
2504 static ssize_t
i915_error_state_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)2505 i915_error_state_write(struct file *filp,
2506 		       const char __user *ubuf,
2507 		       size_t cnt,
2508 		       loff_t *ppos)
2509 {
2510 	struct i915_gpu_coredump *error = filp->private_data;
2511 
2512 	if (!error)
2513 		return 0;
2514 
2515 	drm_dbg(&error->i915->drm, "Resetting error state\n");
2516 	i915_reset_error_state(error->i915);
2517 
2518 	return cnt;
2519 }
2520 
i915_error_state_open(struct inode * inode,struct file * file)2521 static int i915_error_state_open(struct inode *inode, struct file *file)
2522 {
2523 	struct i915_gpu_coredump *error;
2524 
2525 	error = i915_first_error_state(inode->i_private);
2526 	if (IS_ERR(error))
2527 		return PTR_ERR(error);
2528 
2529 	file->private_data  = error;
2530 	return 0;
2531 }
2532 
2533 static const struct file_operations i915_error_state_fops = {
2534 	.owner = THIS_MODULE,
2535 	.open = i915_error_state_open,
2536 	.read = gpu_state_read,
2537 	.write = i915_error_state_write,
2538 	.llseek = default_llseek,
2539 	.release = gpu_state_release,
2540 };
2541 
i915_gpu_error_debugfs_register(struct drm_i915_private * i915)2542 void i915_gpu_error_debugfs_register(struct drm_i915_private *i915)
2543 {
2544 	struct dentry *debugfs_root = i915->drm.debugfs_root;
2545 
2546 	debugfs_create_file("i915_error_state", 0644, debugfs_root, i915,
2547 			    &i915_error_state_fops);
2548 	debugfs_create_file("i915_gpu_info", 0644, debugfs_root, i915,
2549 			    &i915_gpu_info_fops);
2550 }
2551 
error_state_read(struct file * filp,struct kobject * kobj,const struct bin_attribute * attr,char * buf,loff_t off,size_t count)2552 static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
2553 				const struct bin_attribute *attr, char *buf,
2554 				loff_t off, size_t count)
2555 {
2556 
2557 	struct device *kdev = kobj_to_dev(kobj);
2558 	struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
2559 	struct i915_gpu_coredump *gpu;
2560 	ssize_t ret = 0;
2561 
2562 	/*
2563 	 * FIXME: Concurrent clients triggering resets and reading + clearing
2564 	 * dumps can cause inconsistent sysfs reads when a user calls in with a
2565 	 * non-zero offset to complete a prior partial read but the
2566 	 * gpu_coredump has been cleared or replaced.
2567 	 */
2568 
2569 	gpu = i915_first_error_state(i915);
2570 	if (IS_ERR(gpu)) {
2571 		ret = PTR_ERR(gpu);
2572 	} else if (gpu) {
2573 		ret = i915_gpu_coredump_copy_to_buffer(gpu, buf, off, count);
2574 		i915_gpu_coredump_put(gpu);
2575 	} else {
2576 		const char *str = "No error state collected\n";
2577 		size_t len = strlen(str);
2578 
2579 		if (off < len) {
2580 			ret = min_t(size_t, count, len - off);
2581 			memcpy(buf, str + off, ret);
2582 		}
2583 	}
2584 
2585 	return ret;
2586 }
2587 
error_state_write(struct file * file,struct kobject * kobj,const struct bin_attribute * attr,char * buf,loff_t off,size_t count)2588 static ssize_t error_state_write(struct file *file, struct kobject *kobj,
2589 				 const struct bin_attribute *attr, char *buf,
2590 				 loff_t off, size_t count)
2591 {
2592 	struct device *kdev = kobj_to_dev(kobj);
2593 	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
2594 
2595 	drm_dbg(&dev_priv->drm, "Resetting error state\n");
2596 	i915_reset_error_state(dev_priv);
2597 
2598 	return count;
2599 }
2600 
2601 static const struct bin_attribute error_state_attr = {
2602 	.attr.name = "error",
2603 	.attr.mode = S_IRUSR | S_IWUSR,
2604 	.size = 0,
2605 	.read = error_state_read,
2606 	.write = error_state_write,
2607 };
2608 
i915_gpu_error_sysfs_setup(struct drm_i915_private * i915)2609 void i915_gpu_error_sysfs_setup(struct drm_i915_private *i915)
2610 {
2611 	struct device *kdev = i915->drm.primary->kdev;
2612 
2613 	if (sysfs_create_bin_file(&kdev->kobj, &error_state_attr))
2614 		drm_err(&i915->drm, "error_state sysfs setup failed\n");
2615 }
2616 
i915_gpu_error_sysfs_teardown(struct drm_i915_private * i915)2617 void i915_gpu_error_sysfs_teardown(struct drm_i915_private *i915)
2618 {
2619 	struct device *kdev = i915->drm.primary->kdev;
2620 
2621 	sysfs_remove_bin_file(&kdev->kobj, &error_state_attr);
2622 }
2623