xref: /linux/drivers/gpu/drm/xe/xe_gt.c (revision 32e940f2bd3b16551f23ea44be47f6f5d1746d64)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #include "xe_gt.h"
7 
8 #include <linux/minmax.h>
9 
10 #include <drm/drm_managed.h>
11 #include <uapi/drm/xe_drm.h>
12 
13 #include <generated/xe_device_wa_oob.h>
14 #include <generated/xe_wa_oob.h>
15 
16 #include "instructions/xe_alu_commands.h"
17 #include "instructions/xe_mi_commands.h"
18 #include "regs/xe_engine_regs.h"
19 #include "regs/xe_gt_regs.h"
20 #include "xe_assert.h"
21 #include "xe_bb.h"
22 #include "xe_device.h"
23 #include "xe_eu_stall.h"
24 #include "xe_exec_queue.h"
25 #include "xe_execlist.h"
26 #include "xe_force_wake.h"
27 #include "xe_ggtt.h"
28 #include "xe_gsc.h"
29 #include "xe_gt_ccs_mode.h"
30 #include "xe_gt_clock.h"
31 #include "xe_gt_freq.h"
32 #include "xe_gt_idle.h"
33 #include "xe_gt_mcr.h"
34 #include "xe_gt_printk.h"
35 #include "xe_gt_sriov_pf.h"
36 #include "xe_gt_sriov_vf.h"
37 #include "xe_gt_stats.h"
38 #include "xe_gt_sysfs.h"
39 #include "xe_gt_topology.h"
40 #include "xe_guc_exec_queue_types.h"
41 #include "xe_guc_pc.h"
42 #include "xe_guc_rc.h"
43 #include "xe_guc_submit.h"
44 #include "xe_hw_fence.h"
45 #include "xe_hw_engine_class_sysfs.h"
46 #include "xe_irq.h"
47 #include "xe_lmtt.h"
48 #include "xe_lrc.h"
49 #include "xe_map.h"
50 #include "xe_migrate.h"
51 #include "xe_mmio.h"
52 #include "xe_pagefault.h"
53 #include "xe_pat.h"
54 #include "xe_pm.h"
55 #include "xe_mocs.h"
56 #include "xe_reg_sr.h"
57 #include "xe_ring_ops.h"
58 #include "xe_sa.h"
59 #include "xe_sched_job.h"
60 #include "xe_sriov.h"
61 #include "xe_tlb_inval.h"
62 #include "xe_tuning.h"
63 #include "xe_uc.h"
64 #include "xe_uc_fw.h"
65 #include "xe_vm.h"
66 #include "xe_wa.h"
67 #include "xe_wopcm.h"
68 
xe_gt_alloc(struct xe_tile * tile)69 struct xe_gt *xe_gt_alloc(struct xe_tile *tile)
70 {
71 	struct xe_device *xe = tile_to_xe(tile);
72 	struct drm_device *drm = &xe->drm;
73 	bool shared_wq = xe->info.needs_shared_vf_gt_wq && tile->primary_gt &&
74 		IS_SRIOV_VF(xe);
75 	struct workqueue_struct *ordered_wq;
76 	struct xe_gt *gt;
77 
78 	gt = drmm_kzalloc(drm, sizeof(*gt), GFP_KERNEL);
79 	if (!gt)
80 		return ERR_PTR(-ENOMEM);
81 
82 	gt->tile = tile;
83 	if (shared_wq && tile->primary_gt->ordered_wq)
84 		ordered_wq = tile->primary_gt->ordered_wq;
85 	else
86 		ordered_wq = drmm_alloc_ordered_workqueue(drm, "gt-ordered-wq",
87 							  WQ_MEM_RECLAIM);
88 	if (IS_ERR(ordered_wq))
89 		return ERR_CAST(ordered_wq);
90 
91 	gt->ordered_wq = ordered_wq;
92 
93 	return gt;
94 }
95 
xe_gt_sanitize(struct xe_gt * gt)96 void xe_gt_sanitize(struct xe_gt *gt)
97 {
98 	/*
99 	 * FIXME: if xe_uc_sanitize is called here, on TGL driver will not
100 	 * reload
101 	 */
102 	xe_guc_submit_disable(&gt->uc.guc);
103 }
104 
xe_gt_enable_host_l2_vram(struct xe_gt * gt)105 static void xe_gt_enable_host_l2_vram(struct xe_gt *gt)
106 {
107 	u32 reg;
108 
109 	if (!XE_GT_WA(gt, 16023588340))
110 		return;
111 
112 	CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FW_GT);
113 	if (!fw_ref.domains)
114 		return;
115 
116 	if (xe_gt_is_main_type(gt)) {
117 		reg = xe_gt_mcr_unicast_read_any(gt, XE2_GAMREQSTRM_CTRL);
118 		reg |= CG_DIS_CNTLBUS;
119 		xe_gt_mcr_multicast_write(gt, XE2_GAMREQSTRM_CTRL, reg);
120 	}
121 
122 	xe_gt_mcr_multicast_write(gt, XEHPC_L3CLOS_MASK(3), 0xF);
123 }
124 
xe_gt_disable_host_l2_vram(struct xe_gt * gt)125 static void xe_gt_disable_host_l2_vram(struct xe_gt *gt)
126 {
127 	u32 reg;
128 
129 	if (!XE_GT_WA(gt, 16023588340))
130 		return;
131 
132 	if (xe_gt_is_media_type(gt))
133 		return;
134 
135 	CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FW_GT);
136 	if (!fw_ref.domains)
137 		return;
138 
139 	reg = xe_gt_mcr_unicast_read_any(gt, XE2_GAMREQSTRM_CTRL);
140 	reg &= ~CG_DIS_CNTLBUS;
141 	xe_gt_mcr_multicast_write(gt, XE2_GAMREQSTRM_CTRL, reg);
142 }
143 
xe_gt_enable_comp_1wcoh(struct xe_gt * gt)144 static void xe_gt_enable_comp_1wcoh(struct xe_gt *gt)
145 {
146 	struct xe_device *xe = gt_to_xe(gt);
147 	u32 reg;
148 
149 	if (IS_SRIOV_VF(xe))
150 		return;
151 
152 	if (GRAPHICS_VER(xe) >= 30 && xe->info.has_flat_ccs) {
153 		CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FW_GT);
154 		if (!fw_ref.domains)
155 			return;
156 
157 		reg = xe_gt_mcr_unicast_read_any(gt, XE2_GAMREQSTRM_CTRL);
158 		reg |= EN_CMP_1WCOH;
159 		xe_gt_mcr_multicast_write(gt, XE2_GAMREQSTRM_CTRL, reg);
160 
161 		if (xe_gt_is_media_type(gt)) {
162 			xe_mmio_rmw32(&gt->mmio, XE2_GAMWALK_CTRL_MEDIA, 0, EN_CMP_1WCOH_GW);
163 		} else {
164 			reg = xe_gt_mcr_unicast_read_any(gt, XE2_GAMWALK_CTRL_3D);
165 			reg |= EN_CMP_1WCOH_GW;
166 			xe_gt_mcr_multicast_write(gt, XE2_GAMWALK_CTRL_3D, reg);
167 		}
168 	}
169 }
170 
171 static void gt_reset_worker(struct work_struct *w);
172 
emit_job_sync(struct xe_exec_queue * q,struct xe_bb * bb,long timeout_jiffies,bool force_reset)173 static int emit_job_sync(struct xe_exec_queue *q, struct xe_bb *bb,
174 			 long timeout_jiffies, bool force_reset)
175 {
176 	struct xe_sched_job *job;
177 	struct dma_fence *fence;
178 	long timeout;
179 
180 	job = xe_bb_create_job(q, bb);
181 	if (IS_ERR(job))
182 		return PTR_ERR(job);
183 
184 	job->ring_ops_force_reset = force_reset;
185 
186 	xe_sched_job_arm(job);
187 	fence = dma_fence_get(&job->drm.s_fence->finished);
188 	xe_sched_job_push(job);
189 
190 	timeout = dma_fence_wait_timeout(fence, false, timeout_jiffies);
191 	dma_fence_put(fence);
192 	if (timeout < 0)
193 		return timeout;
194 	else if (!timeout)
195 		return -ETIME;
196 
197 	return 0;
198 }
199 
emit_nop_job(struct xe_gt * gt,struct xe_exec_queue * q)200 static int emit_nop_job(struct xe_gt *gt, struct xe_exec_queue *q)
201 {
202 	struct xe_bb *bb;
203 	int ret;
204 
205 	bb = xe_bb_new(gt, 4, false);
206 	if (IS_ERR(bb))
207 		return PTR_ERR(bb);
208 
209 	ret = emit_job_sync(q, bb, HZ, false);
210 	xe_bb_free(bb, NULL);
211 
212 	return ret;
213 }
214 
215 /* Dwords required to emit a RMW of a register */
216 #define EMIT_RMW_DW 20
217 
emit_wa_job(struct xe_gt * gt,struct xe_exec_queue * q)218 static int emit_wa_job(struct xe_gt *gt, struct xe_exec_queue *q)
219 {
220 	struct xe_hw_engine *hwe = q->hwe;
221 	struct xe_reg_sr *sr = &hwe->reg_lrc;
222 	struct xe_reg_sr_entry *entry;
223 	int count_rmw = 0, count_rmw_mcr = 0, count = 0, ret;
224 	unsigned long idx;
225 	struct xe_bb *bb;
226 	size_t bb_len = 0;
227 	u32 *cs;
228 
229 	/* count RMW registers as those will be handled separately */
230 	xa_for_each(&sr->xa, idx, entry) {
231 		if (entry->reg.masked || entry->clr_bits == ~0)
232 			++count;
233 		else if (entry->reg.mcr)
234 			++count_rmw_mcr;
235 		else
236 			++count_rmw;
237 	}
238 
239 	if (count)
240 		bb_len += count * 2 + 1;
241 
242 	/*
243 	 * RMW of MCR registers is the same as a normal RMW, except an
244 	 * additional LRI (3 dwords) is required per register to steer the read
245 	 * to a nom-terminated instance.
246 	 *
247 	 * We could probably shorten the batch slightly by eliding the
248 	 * steering for consecutive MCR registers that have the same
249 	 * group/instance target, but it's not worth the extra complexity to do
250 	 * so.
251 	 */
252 	bb_len += count_rmw * EMIT_RMW_DW;
253 	bb_len += count_rmw_mcr * (EMIT_RMW_DW + 3);
254 
255 	/*
256 	 * After doing all RMW, we need 7 trailing dwords to clean up,
257 	 * plus an additional 3 dwords to reset steering if any of the
258 	 * registers were MCR.
259 	 */
260 	if (count_rmw || count_rmw_mcr)
261 		bb_len += 7 + (count_rmw_mcr ? 3 : 0);
262 
263 	if (hwe->class == XE_ENGINE_CLASS_RENDER)
264 		/*
265 		 * Big enough to emit all of the context's 3DSTATE via
266 		 * xe_lrc_emit_hwe_state_instructions()
267 		 */
268 		bb_len += xe_gt_lrc_size(gt, hwe->class) / sizeof(u32);
269 
270 	xe_gt_dbg(gt, "LRC %s WA job: %zu dwords\n", hwe->name, bb_len);
271 
272 	bb = xe_bb_new(gt, bb_len, false);
273 	if (IS_ERR(bb))
274 		return PTR_ERR(bb);
275 
276 	cs = bb->cs;
277 
278 	if (count) {
279 		/*
280 		 * Emit single LRI with all non RMW regs: 1 leading dw + 2dw per
281 		 * reg + 1
282 		 */
283 
284 		*cs++ = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(count);
285 
286 		xa_for_each(&sr->xa, idx, entry) {
287 			struct xe_reg reg = entry->reg;
288 			u32 val;
289 
290 			if (reg.masked)
291 				val = entry->clr_bits << 16;
292 			else if (entry->clr_bits == ~0)
293 				val = 0;
294 			else
295 				continue;
296 
297 			val |= entry->set_bits;
298 
299 			*cs++ = reg.addr;
300 			*cs++ = val;
301 			xe_gt_dbg(gt, "REG[0x%x] = 0x%08x", reg.addr, val);
302 		}
303 	}
304 
305 	if (count_rmw || count_rmw_mcr) {
306 		xa_for_each(&sr->xa, idx, entry) {
307 			if (entry->reg.masked || entry->clr_bits == ~0)
308 				continue;
309 
310 			if (entry->reg.mcr) {
311 				struct xe_reg_mcr reg = { .__reg.raw = entry->reg.raw };
312 				u8 group, instance;
313 
314 				xe_gt_mcr_get_nonterminated_steering(gt, reg, &group, &instance);
315 				*cs++ = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(1);
316 				*cs++ = CS_MMIO_GROUP_INSTANCE_SELECT(hwe->mmio_base).addr;
317 				*cs++ = SELECTIVE_READ_ADDRESSING |
318 					REG_FIELD_PREP(SELECTIVE_READ_GROUP, group) |
319 					REG_FIELD_PREP(SELECTIVE_READ_INSTANCE, instance);
320 			}
321 
322 			*cs++ = MI_LOAD_REGISTER_REG | MI_LRR_DST_CS_MMIO;
323 			*cs++ = entry->reg.addr;
324 			*cs++ = CS_GPR_REG(0, 0).addr;
325 
326 			*cs++ = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(2) |
327 				MI_LRI_LRM_CS_MMIO;
328 			*cs++ = CS_GPR_REG(0, 1).addr;
329 			*cs++ = entry->clr_bits;
330 			*cs++ = CS_GPR_REG(0, 2).addr;
331 			*cs++ = entry->set_bits;
332 
333 			*cs++ = MI_MATH(8);
334 			*cs++ = CS_ALU_INSTR_LOAD(SRCA, REG0);
335 			*cs++ = CS_ALU_INSTR_LOADINV(SRCB, REG1);
336 			*cs++ = CS_ALU_INSTR_AND;
337 			*cs++ = CS_ALU_INSTR_STORE(REG0, ACCU);
338 			*cs++ = CS_ALU_INSTR_LOAD(SRCA, REG0);
339 			*cs++ = CS_ALU_INSTR_LOAD(SRCB, REG2);
340 			*cs++ = CS_ALU_INSTR_OR;
341 			*cs++ = CS_ALU_INSTR_STORE(REG0, ACCU);
342 
343 			*cs++ = MI_LOAD_REGISTER_REG | MI_LRR_SRC_CS_MMIO;
344 			*cs++ = CS_GPR_REG(0, 0).addr;
345 			*cs++ = entry->reg.addr;
346 
347 			xe_gt_dbg(gt, "REG[%#x] = ~%#x|%#x%s\n",
348 				  entry->reg.addr, entry->clr_bits, entry->set_bits,
349 				  entry->reg.mcr ? " (MCR)" : "");
350 		}
351 
352 		/* reset used GPR */
353 		*cs++ = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(3) |
354 			MI_LRI_LRM_CS_MMIO;
355 		*cs++ = CS_GPR_REG(0, 0).addr;
356 		*cs++ = 0;
357 		*cs++ = CS_GPR_REG(0, 1).addr;
358 		*cs++ = 0;
359 		*cs++ = CS_GPR_REG(0, 2).addr;
360 		*cs++ = 0;
361 
362 		/* reset steering */
363 		if (count_rmw_mcr) {
364 			*cs++ = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(1);
365 			*cs++ = CS_MMIO_GROUP_INSTANCE_SELECT(q->hwe->mmio_base).addr;
366 			*cs++ = 0;
367 		}
368 	}
369 
370 	cs = xe_lrc_emit_hwe_state_instructions(q, cs);
371 
372 	bb->len = cs - bb->cs;
373 
374 	/* only VFs need to trigger reset to get a clean NULL context */
375 	ret = emit_job_sync(q, bb, HZ, IS_SRIOV_VF(gt_to_xe(gt)));
376 
377 	xe_bb_free(bb, NULL);
378 
379 	return ret;
380 }
381 
xe_gt_record_default_lrcs(struct xe_gt * gt)382 int xe_gt_record_default_lrcs(struct xe_gt *gt)
383 {
384 	struct xe_device *xe = gt_to_xe(gt);
385 	struct xe_hw_engine *hwe;
386 	enum xe_hw_engine_id id;
387 	int err = 0;
388 
389 	for_each_hw_engine(hwe, gt, id) {
390 		struct xe_exec_queue *q, *nop_q;
391 		void *default_lrc;
392 
393 		if (gt->default_lrc[hwe->class])
394 			continue;
395 
396 		xe_reg_sr_init(&hwe->reg_lrc, hwe->name, xe);
397 		xe_wa_process_lrc(hwe);
398 		xe_hw_engine_setup_default_lrc_state(hwe);
399 		xe_tuning_process_lrc(hwe);
400 
401 		default_lrc = drmm_kzalloc(&xe->drm,
402 					   xe_gt_lrc_size(gt, hwe->class),
403 					   GFP_KERNEL);
404 		if (!default_lrc)
405 			return -ENOMEM;
406 
407 		q = xe_exec_queue_create(xe, NULL, BIT(hwe->logical_instance), 1,
408 					 hwe, EXEC_QUEUE_FLAG_KERNEL, 0);
409 		if (IS_ERR(q)) {
410 			err = PTR_ERR(q);
411 			xe_gt_err(gt, "hwe %s: xe_exec_queue_create failed (%pe)\n",
412 				  hwe->name, q);
413 			return err;
414 		}
415 
416 		/* Prime golden LRC with known good state */
417 		err = emit_wa_job(gt, q);
418 		if (err) {
419 			xe_gt_err(gt, "hwe %s: emit_wa_job failed (%pe) guc_id=%u\n",
420 				  hwe->name, ERR_PTR(err), q->guc->id);
421 			goto put_exec_queue;
422 		}
423 
424 		nop_q = xe_exec_queue_create(xe, NULL, BIT(hwe->logical_instance),
425 					     1, hwe, EXEC_QUEUE_FLAG_KERNEL, 0);
426 		if (IS_ERR(nop_q)) {
427 			err = PTR_ERR(nop_q);
428 			xe_gt_err(gt, "hwe %s: nop xe_exec_queue_create failed (%pe)\n",
429 				  hwe->name, nop_q);
430 			goto put_exec_queue;
431 		}
432 
433 		/* Switch to different LRC */
434 		err = emit_nop_job(gt, nop_q);
435 		if (err) {
436 			xe_gt_err(gt, "hwe %s: nop emit_nop_job failed (%pe) guc_id=%u\n",
437 				  hwe->name, ERR_PTR(err), nop_q->guc->id);
438 			goto put_nop_q;
439 		}
440 
441 		xe_map_memcpy_from(xe, default_lrc,
442 				   &q->lrc[0]->bo->vmap,
443 				   xe_lrc_pphwsp_offset(q->lrc[0]),
444 				   xe_gt_lrc_size(gt, hwe->class));
445 
446 		gt->default_lrc[hwe->class] = default_lrc;
447 put_nop_q:
448 		xe_exec_queue_put(nop_q);
449 put_exec_queue:
450 		xe_exec_queue_put(q);
451 		if (err)
452 			break;
453 	}
454 
455 	return err;
456 }
457 
wa_14026539277(struct xe_gt * gt)458 static void wa_14026539277(struct xe_gt *gt)
459 {
460 	struct xe_device *xe = gt_to_xe(gt);
461 	u32 val;
462 
463 	/*
464 	 * FIXME: We currently can't use FUNC(xe_rtp_match_not_sriov_vf) in the
465 	 * rules for Wa_14026539277 due to xe_wa_process_device_oob() being
466 	 * called before xe_sriov_probe_early(); and we can't move the call to
467 	 * the former to happen after the latter because MMIO read functions
468 	 * already depend on a device OOB workaround.  This needs to be fixed by
469 	 * allowing workaround checks to happen at different stages of driver
470 	 * initialization.
471 	 */
472 	if (IS_SRIOV_VF(xe))
473 		return;
474 
475 	if (!XE_DEVICE_WA(xe, 14026539277))
476 		return;
477 
478 	if (!xe_gt_is_main_type(gt))
479 		return;
480 
481 	val = xe_gt_mcr_unicast_read_any(gt, L2COMPUTESIDECTRL);
482 	val &= ~CECTRL;
483 	val |= CECTRL_CENODATA_ALWAYS;
484 	xe_gt_mcr_multicast_write(gt, L2COMPUTESIDECTRL, val);
485 }
486 
xe_gt_init_early(struct xe_gt * gt)487 int xe_gt_init_early(struct xe_gt *gt)
488 {
489 	int err;
490 
491 	if (IS_SRIOV_PF(gt_to_xe(gt))) {
492 		err = xe_gt_sriov_pf_init_early(gt);
493 		if (err)
494 			return err;
495 	}
496 
497 	if (IS_SRIOV_VF(gt_to_xe(gt))) {
498 		err = xe_gt_sriov_vf_init_early(gt);
499 		if (err)
500 			return err;
501 	}
502 
503 	xe_reg_sr_init(&gt->reg_sr, "GT", gt_to_xe(gt));
504 
505 	err = xe_wa_gt_init(gt);
506 	if (err)
507 		return err;
508 
509 	err = xe_tuning_init(gt);
510 	if (err)
511 		return err;
512 
513 	xe_wa_process_gt_oob(gt);
514 
515 	xe_force_wake_init_gt(gt, gt_to_fw(gt));
516 	spin_lock_init(&gt->global_invl_lock);
517 
518 	err = xe_gt_tlb_inval_init_early(gt);
519 	if (err)
520 		return err;
521 
522 	xe_mocs_init_early(gt);
523 
524 	/*
525 	 * Only after this point can GT-specific MMIO operations
526 	 * (including things like communication with the GuC)
527 	 * be performed.
528 	 */
529 	xe_gt_mmio_init(gt);
530 
531 	err = xe_uc_init_noalloc(&gt->uc);
532 	if (err)
533 		return err;
534 
535 	err = xe_gt_stats_init(gt);
536 	if (err)
537 		return err;
538 
539 	CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FW_GT);
540 	if (!fw_ref.domains)
541 		return -ETIMEDOUT;
542 
543 	xe_gt_mcr_init_early(gt);
544 	xe_pat_init(gt);
545 
546 	return 0;
547 }
548 
dump_pat_on_error(struct xe_gt * gt)549 static void dump_pat_on_error(struct xe_gt *gt)
550 {
551 	struct drm_printer p;
552 	char prefix[32];
553 
554 	snprintf(prefix, sizeof(prefix), "[GT%u Error]", gt->info.id);
555 	p = drm_dbg_printer(&gt_to_xe(gt)->drm, DRM_UT_DRIVER, prefix);
556 
557 	xe_pat_dump(gt, &p);
558 }
559 
gt_init_with_gt_forcewake(struct xe_gt * gt)560 static int gt_init_with_gt_forcewake(struct xe_gt *gt)
561 {
562 	int err;
563 
564 	CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FW_GT);
565 	if (!fw_ref.domains)
566 		return -ETIMEDOUT;
567 
568 	err = xe_uc_init(&gt->uc);
569 	if (err)
570 		return err;
571 
572 	xe_gt_topology_init(gt);
573 	xe_gt_mcr_init(gt);
574 	xe_gt_enable_host_l2_vram(gt);
575 	xe_gt_enable_comp_1wcoh(gt);
576 
577 	if (xe_gt_is_main_type(gt)) {
578 		err = xe_ggtt_init(gt_to_tile(gt)->mem.ggtt);
579 		if (err)
580 			return err;
581 		if (IS_SRIOV_PF(gt_to_xe(gt)))
582 			xe_lmtt_init(&gt_to_tile(gt)->sriov.pf.lmtt);
583 	}
584 
585 	/* Enable per hw engine IRQs */
586 	xe_irq_enable_hwe(gt);
587 
588 	/* Rerun MCR init as we now have hw engine list */
589 	xe_gt_mcr_init(gt);
590 
591 	err = xe_hw_engines_init_early(gt);
592 	if (err) {
593 		dump_pat_on_error(gt);
594 		return err;
595 	}
596 
597 	err = xe_hw_engine_class_sysfs_init(gt);
598 	if (err)
599 		return err;
600 
601 	/* Initialize CCS mode sysfs after early initialization of HW engines */
602 	err = xe_gt_ccs_mode_sysfs_init(gt);
603 	if (err)
604 		return err;
605 
606 	/*
607 	 * Stash hardware-reported version.  Since this register does not exist
608 	 * on pre-MTL platforms, reading it there will (correctly) return 0.
609 	 */
610 	gt->info.gmdid = xe_mmio_read32(&gt->mmio, GMD_ID);
611 
612 	/*
613 	 * Wa_14026539277 can't be implemented as a regular GT workaround (i.e.
614 	 * as an entry in gt_was[]) for two reasons: it is actually a device
615 	 * workaround that happens to involve programming a GT register; and it
616 	 * needs to be applied early to avoid getting the hardware in a bad
617 	 * state before we have a chance to do the necessary programming.
618 	 */
619 	wa_14026539277(gt);
620 
621 	return 0;
622 }
623 
gt_init_with_all_forcewake(struct xe_gt * gt)624 static int gt_init_with_all_forcewake(struct xe_gt *gt)
625 {
626 	int err;
627 
628 	CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FORCEWAKE_ALL);
629 	if (!xe_force_wake_ref_has_domain(fw_ref.domains, XE_FORCEWAKE_ALL))
630 		return -ETIMEDOUT;
631 
632 	xe_gt_mcr_set_implicit_defaults(gt);
633 	xe_wa_process_gt(gt);
634 	xe_tuning_process_gt(gt);
635 	xe_reg_sr_apply_mmio(&gt->reg_sr, gt);
636 
637 	err = xe_gt_clock_init(gt);
638 	if (err)
639 		return err;
640 
641 	xe_mocs_init(gt);
642 	err = xe_execlist_init(gt);
643 	if (err)
644 		return err;
645 
646 	err = xe_hw_engines_init(gt);
647 	if (err)
648 		return err;
649 
650 	err = xe_uc_init_post_hwconfig(&gt->uc);
651 	if (err)
652 		return err;
653 
654 	if (xe_gt_is_main_type(gt)) {
655 		/*
656 		 * USM has its only SA pool to non-block behind user operations
657 		 */
658 		if (gt_to_xe(gt)->info.has_usm) {
659 			struct xe_device *xe = gt_to_xe(gt);
660 
661 			gt->usm.bb_pool = xe_sa_bo_manager_init(gt_to_tile(gt),
662 								IS_DGFX(xe) ? SZ_1M : SZ_512K, 16);
663 			if (IS_ERR(gt->usm.bb_pool))
664 				return PTR_ERR(gt->usm.bb_pool);
665 		}
666 	}
667 
668 	if (xe_gt_is_main_type(gt)) {
669 		struct xe_tile *tile = gt_to_tile(gt);
670 
671 		err = xe_migrate_init(tile->migrate);
672 		if (err)
673 			return err;
674 	}
675 
676 	err = xe_uc_load_hw(&gt->uc);
677 	if (err)
678 		return err;
679 
680 	/* Configure default CCS mode of 1 engine with all resources */
681 	if (xe_gt_ccs_mode_enabled(gt)) {
682 		gt->ccs_mode = 1;
683 		xe_gt_apply_ccs_mode(gt);
684 	}
685 
686 	if (IS_SRIOV_PF(gt_to_xe(gt)) && xe_gt_is_main_type(gt))
687 		xe_lmtt_init_hw(&gt_to_tile(gt)->sriov.pf.lmtt);
688 
689 	if (IS_SRIOV_PF(gt_to_xe(gt)))
690 		xe_gt_sriov_pf_init_hw(gt);
691 
692 	return 0;
693 }
694 
xe_gt_fini(void * arg)695 static void xe_gt_fini(void *arg)
696 {
697 	struct xe_gt *gt = arg;
698 	int i;
699 
700 	if (disable_work_sync(&gt->reset.worker))
701 		/*
702 		 * If gt_reset_worker was halted from executing, take care of
703 		 * releasing the rpm reference here.
704 		 */
705 		xe_pm_runtime_put(gt_to_xe(gt));
706 
707 	for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i)
708 		xe_hw_fence_irq_finish(&gt->fence_irq[i]);
709 
710 	xe_gt_disable_host_l2_vram(gt);
711 }
712 
xe_gt_init(struct xe_gt * gt)713 int xe_gt_init(struct xe_gt *gt)
714 {
715 	int err;
716 	int i;
717 
718 	INIT_WORK(&gt->reset.worker, gt_reset_worker);
719 
720 	for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i) {
721 		gt->ring_ops[i] = xe_ring_ops_get(gt, i);
722 		xe_hw_fence_irq_init(&gt->fence_irq[i]);
723 	}
724 
725 	err = devm_add_action_or_reset(gt_to_xe(gt)->drm.dev, xe_gt_fini, gt);
726 	if (err)
727 		return err;
728 
729 	err = xe_gt_sysfs_init(gt);
730 	if (err)
731 		return err;
732 
733 	err = gt_init_with_gt_forcewake(gt);
734 	if (err)
735 		return err;
736 
737 	err = xe_gt_idle_init(&gt->gtidle);
738 	if (err)
739 		return err;
740 
741 	err = xe_gt_freq_init(gt);
742 	if (err)
743 		return err;
744 
745 	xe_force_wake_init_engines(gt, gt_to_fw(gt));
746 
747 	err = gt_init_with_all_forcewake(gt);
748 	if (err)
749 		return err;
750 
751 	xe_gt_record_user_engines(gt);
752 
753 	err = xe_eu_stall_init(gt);
754 	if (err)
755 		return err;
756 
757 	if (IS_SRIOV_VF(gt_to_xe(gt))) {
758 		err = xe_gt_sriov_vf_init(gt);
759 		if (err)
760 			return err;
761 	}
762 
763 	return 0;
764 }
765 
766 /**
767  * xe_gt_mmio_init() - Initialize GT's MMIO access
768  * @gt: the GT object
769  *
770  * Initialize GT's MMIO accessor, which will be used to access registers inside
771  * this GT.
772  */
xe_gt_mmio_init(struct xe_gt * gt)773 void xe_gt_mmio_init(struct xe_gt *gt)
774 {
775 	struct xe_tile *tile = gt_to_tile(gt);
776 	struct xe_device *xe = tile_to_xe(tile);
777 
778 	xe_mmio_init(&gt->mmio, tile, tile->mmio.regs, tile->mmio.regs_size);
779 
780 	if (gt->info.type == XE_GT_TYPE_MEDIA) {
781 		gt->mmio.adj_offset = MEDIA_GT_GSI_OFFSET;
782 		gt->mmio.adj_limit = MEDIA_GT_GSI_LENGTH;
783 	} else {
784 		gt->mmio.adj_offset = 0;
785 		gt->mmio.adj_limit = 0;
786 	}
787 
788 	if (IS_SRIOV_VF(xe))
789 		gt->mmio.sriov_vf_gt = gt;
790 }
791 
xe_gt_record_user_engines(struct xe_gt * gt)792 void xe_gt_record_user_engines(struct xe_gt *gt)
793 {
794 	struct xe_hw_engine *hwe;
795 	enum xe_hw_engine_id id;
796 
797 	gt->user_engines.mask = 0;
798 	memset(gt->user_engines.instances_per_class, 0,
799 	       sizeof(gt->user_engines.instances_per_class));
800 
801 	for_each_hw_engine(hwe, gt, id) {
802 		if (xe_hw_engine_is_reserved(hwe))
803 			continue;
804 
805 		gt->user_engines.mask |= BIT_ULL(id);
806 		gt->user_engines.instances_per_class[hwe->class]++;
807 	}
808 
809 	xe_gt_assert(gt, (gt->user_engines.mask | gt->info.engine_mask)
810 		     == gt->info.engine_mask);
811 }
812 
do_gt_reset(struct xe_gt * gt)813 static int do_gt_reset(struct xe_gt *gt)
814 {
815 	int err;
816 
817 	if (IS_SRIOV_VF(gt_to_xe(gt)))
818 		return xe_gt_sriov_vf_reset(gt);
819 
820 	xe_gsc_wa_14015076503(gt, true);
821 
822 	xe_mmio_write32(&gt->mmio, GDRST, GRDOM_FULL);
823 	err = xe_mmio_wait32(&gt->mmio, GDRST, GRDOM_FULL, 0, 5000, NULL, false);
824 	if (err)
825 		xe_gt_err(gt, "failed to clear GRDOM_FULL (%pe)\n",
826 			  ERR_PTR(err));
827 
828 	xe_gsc_wa_14015076503(gt, false);
829 
830 	return err;
831 }
832 
vf_gt_restart(struct xe_gt * gt)833 static int vf_gt_restart(struct xe_gt *gt)
834 {
835 	int err;
836 
837 	err = xe_uc_sanitize_reset(&gt->uc);
838 	if (err)
839 		return err;
840 
841 	err = xe_uc_load_hw(&gt->uc);
842 	if (err)
843 		return err;
844 
845 	err = xe_uc_start(&gt->uc);
846 	if (err)
847 		return err;
848 
849 	return 0;
850 }
851 
do_gt_restart(struct xe_gt * gt)852 static int do_gt_restart(struct xe_gt *gt)
853 {
854 	struct xe_hw_engine *hwe;
855 	enum xe_hw_engine_id id;
856 	int err;
857 
858 	if (IS_SRIOV_VF(gt_to_xe(gt)))
859 		return vf_gt_restart(gt);
860 
861 	xe_pat_init(gt);
862 
863 	xe_gt_enable_host_l2_vram(gt);
864 	xe_gt_enable_comp_1wcoh(gt);
865 
866 	xe_gt_mcr_set_implicit_defaults(gt);
867 	xe_reg_sr_apply_mmio(&gt->reg_sr, gt);
868 
869 	err = xe_wopcm_init(&gt->uc.wopcm);
870 	if (err)
871 		return err;
872 
873 	for_each_hw_engine(hwe, gt, id)
874 		xe_hw_engine_enable_ring(hwe);
875 
876 	err = xe_uc_sanitize_reset(&gt->uc);
877 	if (err)
878 		return err;
879 
880 	err = xe_uc_load_hw(&gt->uc);
881 	if (err)
882 		return err;
883 
884 	if (IS_SRIOV_PF(gt_to_xe(gt)) && xe_gt_is_main_type(gt))
885 		xe_lmtt_init_hw(&gt_to_tile(gt)->sriov.pf.lmtt);
886 
887 	if (IS_SRIOV_PF(gt_to_xe(gt)))
888 		xe_gt_sriov_pf_init_hw(gt);
889 
890 	xe_mocs_init(gt);
891 
892 	for_each_hw_engine(hwe, gt, id)
893 		xe_reg_sr_apply_mmio(&hwe->reg_sr, gt);
894 
895 	/* Get CCS mode in sync between sw/hw */
896 	xe_gt_apply_ccs_mode(gt);
897 
898 	err = xe_uc_start(&gt->uc);
899 	if (err)
900 		return err;
901 
902 	/* Restore GT freq to expected values */
903 	xe_gt_sanitize_freq(gt);
904 
905 	if (IS_SRIOV_PF(gt_to_xe(gt)))
906 		xe_gt_sriov_pf_restart(gt);
907 
908 	return 0;
909 }
910 
gt_reset_worker(struct work_struct * w)911 static void gt_reset_worker(struct work_struct *w)
912 {
913 	struct xe_gt *gt = container_of(w, typeof(*gt), reset.worker);
914 	unsigned int fw_ref;
915 	int err;
916 
917 	if (xe_device_wedged(gt_to_xe(gt)))
918 		goto err_pm_put;
919 
920 	/* We only support GT resets with GuC submission */
921 	if (!xe_device_uc_enabled(gt_to_xe(gt)))
922 		goto err_pm_put;
923 
924 	xe_gt_info(gt, "reset started\n");
925 
926 	if (xe_fault_inject_gt_reset()) {
927 		err = -ECANCELED;
928 		goto err_fail;
929 	}
930 
931 	xe_gt_sanitize(gt);
932 
933 	fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
934 	if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) {
935 		err = -ETIMEDOUT;
936 		goto err_out;
937 	}
938 
939 	if (IS_SRIOV_PF(gt_to_xe(gt)))
940 		xe_gt_sriov_pf_stop_prepare(gt);
941 
942 	xe_guc_rc_disable(&gt->uc.guc);
943 	xe_uc_stop_prepare(&gt->uc);
944 	xe_pagefault_reset(gt_to_xe(gt), gt);
945 
946 	xe_uc_stop(&gt->uc);
947 
948 	xe_tlb_inval_reset(&gt->tlb_inval);
949 
950 	err = do_gt_reset(gt);
951 	if (err)
952 		goto err_out;
953 
954 	err = do_gt_restart(gt);
955 	if (err)
956 		goto err_out;
957 
958 	xe_force_wake_put(gt_to_fw(gt), fw_ref);
959 
960 	/* Pair with get while enqueueing the work in xe_gt_reset_async() */
961 	xe_pm_runtime_put(gt_to_xe(gt));
962 
963 	xe_gt_info(gt, "reset done\n");
964 
965 	return;
966 
967 err_out:
968 	xe_force_wake_put(gt_to_fw(gt), fw_ref);
969 	XE_WARN_ON(xe_uc_start(&gt->uc));
970 
971 err_fail:
972 	xe_gt_err(gt, "reset failed (%pe)\n", ERR_PTR(err));
973 	xe_device_declare_wedged(gt_to_xe(gt));
974 err_pm_put:
975 	xe_pm_runtime_put(gt_to_xe(gt));
976 }
977 
xe_gt_reset_async(struct xe_gt * gt)978 void xe_gt_reset_async(struct xe_gt *gt)
979 {
980 	xe_gt_info(gt, "trying reset from %ps\n", __builtin_return_address(0));
981 
982 	/* Don't do a reset while one is already in flight */
983 	if (!xe_fault_inject_gt_reset() && xe_uc_reset_prepare(&gt->uc))
984 		return;
985 
986 	xe_gt_info(gt, "reset queued\n");
987 
988 	/* Pair with put in gt_reset_worker() if work is enqueued */
989 	xe_pm_runtime_get_noresume(gt_to_xe(gt));
990 	if (!queue_work(gt->ordered_wq, &gt->reset.worker))
991 		xe_pm_runtime_put(gt_to_xe(gt));
992 }
993 
xe_gt_suspend_prepare(struct xe_gt * gt)994 void xe_gt_suspend_prepare(struct xe_gt *gt)
995 {
996 	CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FORCEWAKE_ALL);
997 	xe_uc_suspend_prepare(&gt->uc);
998 }
999 
xe_gt_suspend(struct xe_gt * gt)1000 int xe_gt_suspend(struct xe_gt *gt)
1001 {
1002 	int err;
1003 
1004 	xe_gt_dbg(gt, "suspending\n");
1005 	xe_gt_sanitize(gt);
1006 
1007 	CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FORCEWAKE_ALL);
1008 	if (!xe_force_wake_ref_has_domain(fw_ref.domains, XE_FORCEWAKE_ALL)) {
1009 		xe_gt_err(gt, "suspend failed (%pe)\n", ERR_PTR(-ETIMEDOUT));
1010 		return -ETIMEDOUT;
1011 	}
1012 
1013 	err = xe_uc_suspend(&gt->uc);
1014 	if (err) {
1015 		xe_gt_err(gt, "suspend failed (%pe)\n", ERR_PTR(err));
1016 		return err;
1017 	}
1018 
1019 	xe_gt_idle_disable_pg(gt);
1020 
1021 	xe_gt_disable_host_l2_vram(gt);
1022 
1023 	xe_gt_dbg(gt, "suspended\n");
1024 
1025 	return 0;
1026 }
1027 
xe_gt_shutdown(struct xe_gt * gt)1028 void xe_gt_shutdown(struct xe_gt *gt)
1029 {
1030 	CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FORCEWAKE_ALL);
1031 	do_gt_reset(gt);
1032 }
1033 
1034 /**
1035  * xe_gt_sanitize_freq() - Restore saved frequencies if necessary.
1036  * @gt: the GT object
1037  *
1038  * Called after driver init/GSC load completes to restore GT frequencies if we
1039  * limited them for any WAs.
1040  */
xe_gt_sanitize_freq(struct xe_gt * gt)1041 int xe_gt_sanitize_freq(struct xe_gt *gt)
1042 {
1043 	int ret = 0;
1044 
1045 	if ((!xe_uc_fw_is_available(&gt->uc.gsc.fw) ||
1046 	     xe_uc_fw_is_loaded(&gt->uc.gsc.fw) ||
1047 	     xe_uc_fw_is_in_error_state(&gt->uc.gsc.fw)) &&
1048 	    XE_GT_WA(gt, 22019338487))
1049 		ret = xe_guc_pc_restore_stashed_freq(&gt->uc.guc.pc);
1050 
1051 	return ret;
1052 }
1053 
xe_gt_resume(struct xe_gt * gt)1054 int xe_gt_resume(struct xe_gt *gt)
1055 {
1056 	int err;
1057 
1058 	xe_gt_dbg(gt, "resuming\n");
1059 	CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FORCEWAKE_ALL);
1060 	if (!xe_force_wake_ref_has_domain(fw_ref.domains, XE_FORCEWAKE_ALL)) {
1061 		xe_gt_err(gt, "resume failed (%pe)\n", ERR_PTR(-ETIMEDOUT));
1062 		return -ETIMEDOUT;
1063 	}
1064 
1065 	err = do_gt_restart(gt);
1066 	if (err)
1067 		return err;
1068 
1069 	xe_gt_idle_enable_pg(gt);
1070 
1071 	xe_gt_dbg(gt, "resumed\n");
1072 
1073 	return 0;
1074 }
1075 
1076 /**
1077  * xe_gt_runtime_suspend() - GT runtime suspend
1078  * @gt: the GT object
1079  *
1080  * Return: 0 on success, negative error code otherwise.
1081  */
xe_gt_runtime_suspend(struct xe_gt * gt)1082 int xe_gt_runtime_suspend(struct xe_gt *gt)
1083 {
1084 	xe_gt_dbg(gt, "runtime suspending\n");
1085 
1086 	CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FORCEWAKE_ALL);
1087 	if (!xe_force_wake_ref_has_domain(fw_ref.domains, XE_FORCEWAKE_ALL)) {
1088 		xe_gt_err(gt, "runtime suspend failed (%pe)\n", ERR_PTR(-ETIMEDOUT));
1089 		return -ETIMEDOUT;
1090 	}
1091 
1092 	xe_uc_runtime_suspend(&gt->uc);
1093 	xe_gt_disable_host_l2_vram(gt);
1094 
1095 	xe_gt_dbg(gt, "runtime suspended\n");
1096 
1097 	return 0;
1098 }
1099 
1100 /**
1101  * xe_gt_runtime_resume() - GT runtime resume
1102  * @gt: the GT object
1103  *
1104  * Return: 0 on success, negative error code otherwise.
1105  */
xe_gt_runtime_resume(struct xe_gt * gt)1106 int xe_gt_runtime_resume(struct xe_gt *gt)
1107 {
1108 	xe_gt_dbg(gt, "runtime resuming\n");
1109 
1110 	CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FORCEWAKE_ALL);
1111 	if (!xe_force_wake_ref_has_domain(fw_ref.domains, XE_FORCEWAKE_ALL)) {
1112 		xe_gt_err(gt, "runtime resume failed (%pe)\n", ERR_PTR(-ETIMEDOUT));
1113 		return -ETIMEDOUT;
1114 	}
1115 
1116 	xe_gt_enable_host_l2_vram(gt);
1117 	xe_uc_runtime_resume(&gt->uc);
1118 
1119 	xe_gt_dbg(gt, "runtime resumed\n");
1120 
1121 	return 0;
1122 }
1123 
xe_gt_hw_engine(struct xe_gt * gt,enum xe_engine_class class,u16 instance,bool logical)1124 struct xe_hw_engine *xe_gt_hw_engine(struct xe_gt *gt,
1125 				     enum xe_engine_class class,
1126 				     u16 instance, bool logical)
1127 {
1128 	struct xe_hw_engine *hwe;
1129 	enum xe_hw_engine_id id;
1130 
1131 	for_each_hw_engine(hwe, gt, id)
1132 		if (hwe->class == class &&
1133 		    ((!logical && hwe->instance == instance) ||
1134 		    (logical && hwe->logical_instance == instance)))
1135 			return hwe;
1136 
1137 	return NULL;
1138 }
1139 
xe_gt_any_hw_engine_by_reset_domain(struct xe_gt * gt,enum xe_engine_class class)1140 struct xe_hw_engine *xe_gt_any_hw_engine_by_reset_domain(struct xe_gt *gt,
1141 							 enum xe_engine_class class)
1142 {
1143 	struct xe_hw_engine *hwe;
1144 	enum xe_hw_engine_id id;
1145 
1146 	for_each_hw_engine(hwe, gt, id) {
1147 		switch (class) {
1148 		case XE_ENGINE_CLASS_RENDER:
1149 		case XE_ENGINE_CLASS_COMPUTE:
1150 			if (hwe->class == XE_ENGINE_CLASS_RENDER ||
1151 			    hwe->class == XE_ENGINE_CLASS_COMPUTE)
1152 				return hwe;
1153 			break;
1154 		default:
1155 			if (hwe->class == class)
1156 				return hwe;
1157 		}
1158 	}
1159 
1160 	return NULL;
1161 }
1162 
xe_gt_any_hw_engine(struct xe_gt * gt)1163 struct xe_hw_engine *xe_gt_any_hw_engine(struct xe_gt *gt)
1164 {
1165 	struct xe_hw_engine *hwe;
1166 	enum xe_hw_engine_id id;
1167 
1168 	for_each_hw_engine(hwe, gt, id)
1169 		return hwe;
1170 
1171 	return NULL;
1172 }
1173 
1174 /**
1175  * xe_gt_declare_wedged() - Declare GT wedged
1176  * @gt: the GT object
1177  *
1178  * Wedge the GT which stops all submission, saves desired debug state, and
1179  * cleans up anything which could timeout.
1180  */
xe_gt_declare_wedged(struct xe_gt * gt)1181 void xe_gt_declare_wedged(struct xe_gt *gt)
1182 {
1183 	xe_gt_assert(gt, gt_to_xe(gt)->wedged.mode);
1184 
1185 	xe_uc_declare_wedged(&gt->uc);
1186 	xe_tlb_inval_reset(&gt->tlb_inval);
1187 }
1188