xref: /linux/drivers/gpu/drm/xe/xe_irq.c (revision 260f6f4fda93c8485c8037865c941b42b9cba5d2) !
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5 
6 #include "xe_irq.h"
7 
8 #include <linux/sched/clock.h>
9 
10 #include <drm/drm_managed.h>
11 
12 #include "display/xe_display.h"
13 #include "regs/xe_guc_regs.h"
14 #include "regs/xe_irq_regs.h"
15 #include "xe_device.h"
16 #include "xe_drv.h"
17 #include "xe_gsc_proxy.h"
18 #include "xe_gt.h"
19 #include "xe_guc.h"
20 #include "xe_hw_engine.h"
21 #include "xe_i2c.h"
22 #include "xe_memirq.h"
23 #include "xe_mmio.h"
24 #include "xe_pxp.h"
25 #include "xe_sriov.h"
26 #include "xe_tile.h"
27 
28 /*
29  * Interrupt registers for a unit are always consecutive and ordered
30  * ISR, IMR, IIR, IER.
31  */
32 #define IMR(offset)				XE_REG(offset + 0x4)
33 #define IIR(offset)				XE_REG(offset + 0x8)
34 #define IER(offset)				XE_REG(offset + 0xc)
35 
36 static int xe_irq_msix_init(struct xe_device *xe);
37 static void xe_irq_msix_free(struct xe_device *xe);
38 static int xe_irq_msix_request_irqs(struct xe_device *xe);
39 static void xe_irq_msix_synchronize_irq(struct xe_device *xe);
40 
assert_iir_is_zero(struct xe_mmio * mmio,struct xe_reg reg)41 static void assert_iir_is_zero(struct xe_mmio *mmio, struct xe_reg reg)
42 {
43 	u32 val = xe_mmio_read32(mmio, reg);
44 
45 	if (val == 0)
46 		return;
47 
48 	drm_WARN(&mmio->tile->xe->drm, 1,
49 		 "Interrupt register 0x%x is not zero: 0x%08x\n",
50 		 reg.addr, val);
51 	xe_mmio_write32(mmio, reg, 0xffffffff);
52 	xe_mmio_read32(mmio, reg);
53 	xe_mmio_write32(mmio, reg, 0xffffffff);
54 	xe_mmio_read32(mmio, reg);
55 }
56 
57 /*
58  * Unmask and enable the specified interrupts.  Does not check current state,
59  * so any bits not specified here will become masked and disabled.
60  */
unmask_and_enable(struct xe_tile * tile,u32 irqregs,u32 bits)61 static void unmask_and_enable(struct xe_tile *tile, u32 irqregs, u32 bits)
62 {
63 	struct xe_mmio *mmio = &tile->mmio;
64 
65 	/*
66 	 * If we're just enabling an interrupt now, it shouldn't already
67 	 * be raised in the IIR.
68 	 */
69 	assert_iir_is_zero(mmio, IIR(irqregs));
70 
71 	xe_mmio_write32(mmio, IER(irqregs), bits);
72 	xe_mmio_write32(mmio, IMR(irqregs), ~bits);
73 
74 	/* Posting read */
75 	xe_mmio_read32(mmio, IMR(irqregs));
76 }
77 
78 /* Mask and disable all interrupts. */
mask_and_disable(struct xe_tile * tile,u32 irqregs)79 static void mask_and_disable(struct xe_tile *tile, u32 irqregs)
80 {
81 	struct xe_mmio *mmio = &tile->mmio;
82 
83 	xe_mmio_write32(mmio, IMR(irqregs), ~0);
84 	/* Posting read */
85 	xe_mmio_read32(mmio, IMR(irqregs));
86 
87 	xe_mmio_write32(mmio, IER(irqregs), 0);
88 
89 	/* IIR can theoretically queue up two events. Be paranoid. */
90 	xe_mmio_write32(mmio, IIR(irqregs), ~0);
91 	xe_mmio_read32(mmio, IIR(irqregs));
92 	xe_mmio_write32(mmio, IIR(irqregs), ~0);
93 	xe_mmio_read32(mmio, IIR(irqregs));
94 }
95 
xelp_intr_disable(struct xe_device * xe)96 static u32 xelp_intr_disable(struct xe_device *xe)
97 {
98 	struct xe_mmio *mmio = xe_root_tile_mmio(xe);
99 
100 	xe_mmio_write32(mmio, GFX_MSTR_IRQ, 0);
101 
102 	/*
103 	 * Now with master disabled, get a sample of level indications
104 	 * for this interrupt. Indications will be cleared on related acks.
105 	 * New indications can and will light up during processing,
106 	 * and will generate new interrupt after enabling master.
107 	 */
108 	return xe_mmio_read32(mmio, GFX_MSTR_IRQ);
109 }
110 
111 static u32
gu_misc_irq_ack(struct xe_device * xe,const u32 master_ctl)112 gu_misc_irq_ack(struct xe_device *xe, const u32 master_ctl)
113 {
114 	struct xe_mmio *mmio = xe_root_tile_mmio(xe);
115 	u32 iir;
116 
117 	if (!(master_ctl & GU_MISC_IRQ))
118 		return 0;
119 
120 	iir = xe_mmio_read32(mmio, IIR(GU_MISC_IRQ_OFFSET));
121 	if (likely(iir))
122 		xe_mmio_write32(mmio, IIR(GU_MISC_IRQ_OFFSET), iir);
123 
124 	return iir;
125 }
126 
xelp_intr_enable(struct xe_device * xe,bool stall)127 static inline void xelp_intr_enable(struct xe_device *xe, bool stall)
128 {
129 	struct xe_mmio *mmio = xe_root_tile_mmio(xe);
130 
131 	xe_mmio_write32(mmio, GFX_MSTR_IRQ, MASTER_IRQ);
132 	if (stall)
133 		xe_mmio_read32(mmio, GFX_MSTR_IRQ);
134 }
135 
136 /* Enable/unmask the HWE interrupts for a specific GT's engines. */
xe_irq_enable_hwe(struct xe_gt * gt)137 void xe_irq_enable_hwe(struct xe_gt *gt)
138 {
139 	struct xe_device *xe = gt_to_xe(gt);
140 	struct xe_mmio *mmio = &gt->mmio;
141 	u32 ccs_mask, bcs_mask;
142 	u32 irqs, dmask, smask;
143 	u32 gsc_mask = 0;
144 	u32 heci_mask = 0;
145 
146 	if (xe_device_uses_memirq(xe))
147 		return;
148 
149 	if (xe_device_uc_enabled(xe)) {
150 		irqs = GT_RENDER_USER_INTERRUPT |
151 			GT_RENDER_PIPECTL_NOTIFY_INTERRUPT;
152 	} else {
153 		irqs = GT_RENDER_USER_INTERRUPT |
154 		       GT_CS_MASTER_ERROR_INTERRUPT |
155 		       GT_CONTEXT_SWITCH_INTERRUPT |
156 		       GT_WAIT_SEMAPHORE_INTERRUPT;
157 	}
158 
159 	ccs_mask = xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_COMPUTE);
160 	bcs_mask = xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_COPY);
161 
162 	dmask = irqs << 16 | irqs;
163 	smask = irqs << 16;
164 
165 	if (xe_gt_is_main_type(gt)) {
166 		/* Enable interrupts for each engine class */
167 		xe_mmio_write32(mmio, RENDER_COPY_INTR_ENABLE, dmask);
168 		if (ccs_mask)
169 			xe_mmio_write32(mmio, CCS_RSVD_INTR_ENABLE, smask);
170 
171 		/* Unmask interrupts for each engine instance */
172 		xe_mmio_write32(mmio, RCS0_RSVD_INTR_MASK, ~smask);
173 		xe_mmio_write32(mmio, BCS_RSVD_INTR_MASK, ~smask);
174 		if (bcs_mask & (BIT(1)|BIT(2)))
175 			xe_mmio_write32(mmio, XEHPC_BCS1_BCS2_INTR_MASK, ~dmask);
176 		if (bcs_mask & (BIT(3)|BIT(4)))
177 			xe_mmio_write32(mmio, XEHPC_BCS3_BCS4_INTR_MASK, ~dmask);
178 		if (bcs_mask & (BIT(5)|BIT(6)))
179 			xe_mmio_write32(mmio, XEHPC_BCS5_BCS6_INTR_MASK, ~dmask);
180 		if (bcs_mask & (BIT(7)|BIT(8)))
181 			xe_mmio_write32(mmio, XEHPC_BCS7_BCS8_INTR_MASK, ~dmask);
182 		if (ccs_mask & (BIT(0)|BIT(1)))
183 			xe_mmio_write32(mmio, CCS0_CCS1_INTR_MASK, ~dmask);
184 		if (ccs_mask & (BIT(2)|BIT(3)))
185 			xe_mmio_write32(mmio, CCS2_CCS3_INTR_MASK, ~dmask);
186 	}
187 
188 	if (xe_gt_is_media_type(gt) || MEDIA_VER(xe) < 13) {
189 		/* Enable interrupts for each engine class */
190 		xe_mmio_write32(mmio, VCS_VECS_INTR_ENABLE, dmask);
191 
192 		/* Unmask interrupts for each engine instance */
193 		xe_mmio_write32(mmio, VCS0_VCS1_INTR_MASK, ~dmask);
194 		xe_mmio_write32(mmio, VCS2_VCS3_INTR_MASK, ~dmask);
195 		xe_mmio_write32(mmio, VECS0_VECS1_INTR_MASK, ~dmask);
196 
197 		/*
198 		 * the heci2 interrupt is enabled via the same register as the
199 		 * GSCCS interrupts, but it has its own mask register.
200 		 */
201 		if (xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_OTHER)) {
202 			gsc_mask = irqs | GSC_ER_COMPLETE;
203 			heci_mask = GSC_IRQ_INTF(1);
204 		} else if (xe->info.has_heci_gscfi) {
205 			gsc_mask = GSC_IRQ_INTF(1);
206 		}
207 
208 		if (gsc_mask) {
209 			xe_mmio_write32(mmio, GUNIT_GSC_INTR_ENABLE, gsc_mask | heci_mask);
210 			xe_mmio_write32(mmio, GUNIT_GSC_INTR_MASK, ~gsc_mask);
211 		}
212 		if (heci_mask)
213 			xe_mmio_write32(mmio, HECI2_RSVD_INTR_MASK, ~(heci_mask << 16));
214 
215 		if (xe_pxp_is_supported(xe)) {
216 			u32 kcr_mask = KCR_PXP_STATE_TERMINATED_INTERRUPT |
217 				       KCR_APP_TERMINATED_PER_FW_REQ_INTERRUPT |
218 				       KCR_PXP_STATE_RESET_COMPLETE_INTERRUPT;
219 
220 			xe_mmio_write32(mmio, CRYPTO_RSVD_INTR_ENABLE, kcr_mask << 16);
221 			xe_mmio_write32(mmio, CRYPTO_RSVD_INTR_MASK, ~(kcr_mask << 16));
222 		}
223 	}
224 }
225 
226 static u32
gt_engine_identity(struct xe_device * xe,struct xe_mmio * mmio,const unsigned int bank,const unsigned int bit)227 gt_engine_identity(struct xe_device *xe,
228 		   struct xe_mmio *mmio,
229 		   const unsigned int bank,
230 		   const unsigned int bit)
231 {
232 	u32 timeout_ts;
233 	u32 ident;
234 
235 	lockdep_assert_held(&xe->irq.lock);
236 
237 	xe_mmio_write32(mmio, IIR_REG_SELECTOR(bank), BIT(bit));
238 
239 	/*
240 	 * NB: Specs do not specify how long to spin wait,
241 	 * so we do ~100us as an educated guess.
242 	 */
243 	timeout_ts = (local_clock() >> 10) + 100;
244 	do {
245 		ident = xe_mmio_read32(mmio, INTR_IDENTITY_REG(bank));
246 	} while (!(ident & INTR_DATA_VALID) &&
247 		 !time_after32(local_clock() >> 10, timeout_ts));
248 
249 	if (unlikely(!(ident & INTR_DATA_VALID))) {
250 		drm_err(&xe->drm, "INTR_IDENTITY_REG%u:%u 0x%08x not valid!\n",
251 			bank, bit, ident);
252 		return 0;
253 	}
254 
255 	xe_mmio_write32(mmio, INTR_IDENTITY_REG(bank), ident);
256 
257 	return ident;
258 }
259 
260 #define   OTHER_MEDIA_GUC_INSTANCE           16
261 
262 static void
gt_other_irq_handler(struct xe_gt * gt,const u8 instance,const u16 iir)263 gt_other_irq_handler(struct xe_gt *gt, const u8 instance, const u16 iir)
264 {
265 	if (instance == OTHER_GUC_INSTANCE && xe_gt_is_main_type(gt))
266 		return xe_guc_irq_handler(&gt->uc.guc, iir);
267 	if (instance == OTHER_MEDIA_GUC_INSTANCE && xe_gt_is_media_type(gt))
268 		return xe_guc_irq_handler(&gt->uc.guc, iir);
269 	if (instance == OTHER_GSC_HECI2_INSTANCE && xe_gt_is_media_type(gt))
270 		return xe_gsc_proxy_irq_handler(&gt->uc.gsc, iir);
271 
272 	if (instance != OTHER_GUC_INSTANCE &&
273 	    instance != OTHER_MEDIA_GUC_INSTANCE) {
274 		WARN_ONCE(1, "unhandled other interrupt instance=0x%x, iir=0x%x\n",
275 			  instance, iir);
276 	}
277 }
278 
pick_engine_gt(struct xe_tile * tile,enum xe_engine_class class,unsigned int instance)279 static struct xe_gt *pick_engine_gt(struct xe_tile *tile,
280 				    enum xe_engine_class class,
281 				    unsigned int instance)
282 {
283 	struct xe_device *xe = tile_to_xe(tile);
284 
285 	if (MEDIA_VER(xe) < 13)
286 		return tile->primary_gt;
287 
288 	switch (class) {
289 	case XE_ENGINE_CLASS_VIDEO_DECODE:
290 	case XE_ENGINE_CLASS_VIDEO_ENHANCE:
291 		return tile->media_gt;
292 	case XE_ENGINE_CLASS_OTHER:
293 		switch (instance) {
294 		case OTHER_MEDIA_GUC_INSTANCE:
295 		case OTHER_GSC_INSTANCE:
296 		case OTHER_GSC_HECI2_INSTANCE:
297 			return tile->media_gt;
298 		default:
299 			break;
300 		}
301 		fallthrough;
302 	default:
303 		return tile->primary_gt;
304 	}
305 }
306 
gt_irq_handler(struct xe_tile * tile,u32 master_ctl,unsigned long * intr_dw,u32 * identity)307 static void gt_irq_handler(struct xe_tile *tile,
308 			   u32 master_ctl, unsigned long *intr_dw,
309 			   u32 *identity)
310 {
311 	struct xe_device *xe = tile_to_xe(tile);
312 	struct xe_mmio *mmio = &tile->mmio;
313 	unsigned int bank, bit;
314 	u16 instance, intr_vec;
315 	enum xe_engine_class class;
316 	struct xe_hw_engine *hwe;
317 
318 	spin_lock(&xe->irq.lock);
319 
320 	for (bank = 0; bank < 2; bank++) {
321 		if (!(master_ctl & GT_DW_IRQ(bank)))
322 			continue;
323 
324 		intr_dw[bank] = xe_mmio_read32(mmio, GT_INTR_DW(bank));
325 		for_each_set_bit(bit, intr_dw + bank, 32)
326 			identity[bit] = gt_engine_identity(xe, mmio, bank, bit);
327 		xe_mmio_write32(mmio, GT_INTR_DW(bank), intr_dw[bank]);
328 
329 		for_each_set_bit(bit, intr_dw + bank, 32) {
330 			struct xe_gt *engine_gt;
331 
332 			class = INTR_ENGINE_CLASS(identity[bit]);
333 			instance = INTR_ENGINE_INSTANCE(identity[bit]);
334 			intr_vec = INTR_ENGINE_INTR(identity[bit]);
335 
336 			engine_gt = pick_engine_gt(tile, class, instance);
337 
338 			hwe = xe_gt_hw_engine(engine_gt, class, instance, false);
339 			if (hwe) {
340 				xe_hw_engine_handle_irq(hwe, intr_vec);
341 				continue;
342 			}
343 
344 			if (class == XE_ENGINE_CLASS_OTHER) {
345 				/*
346 				 * HECI GSCFI interrupts come from outside of GT.
347 				 * KCR irqs come from inside GT but are handled
348 				 * by the global PXP subsystem.
349 				 */
350 				if (xe->info.has_heci_gscfi && instance == OTHER_GSC_INSTANCE)
351 					xe_heci_gsc_irq_handler(xe, intr_vec);
352 				else if (instance == OTHER_KCR_INSTANCE)
353 					xe_pxp_irq_handler(xe, intr_vec);
354 				else
355 					gt_other_irq_handler(engine_gt, instance, intr_vec);
356 			}
357 		}
358 	}
359 
360 	spin_unlock(&xe->irq.lock);
361 }
362 
363 /*
364  * Top-level interrupt handler for Xe_LP platforms (which did not have
365  * a "master tile" interrupt register.
366  */
xelp_irq_handler(int irq,void * arg)367 static irqreturn_t xelp_irq_handler(int irq, void *arg)
368 {
369 	struct xe_device *xe = arg;
370 	struct xe_tile *tile = xe_device_get_root_tile(xe);
371 	u32 master_ctl, gu_misc_iir;
372 	unsigned long intr_dw[2];
373 	u32 identity[32];
374 
375 	if (!atomic_read(&xe->irq.enabled))
376 		return IRQ_NONE;
377 
378 	master_ctl = xelp_intr_disable(xe);
379 	if (!master_ctl) {
380 		xelp_intr_enable(xe, false);
381 		return IRQ_NONE;
382 	}
383 
384 	gt_irq_handler(tile, master_ctl, intr_dw, identity);
385 
386 	xe_display_irq_handler(xe, master_ctl);
387 
388 	gu_misc_iir = gu_misc_irq_ack(xe, master_ctl);
389 
390 	xelp_intr_enable(xe, false);
391 
392 	xe_display_irq_enable(xe, gu_misc_iir);
393 
394 	return IRQ_HANDLED;
395 }
396 
dg1_intr_disable(struct xe_device * xe)397 static u32 dg1_intr_disable(struct xe_device *xe)
398 {
399 	struct xe_mmio *mmio = xe_root_tile_mmio(xe);
400 	u32 val;
401 
402 	/* First disable interrupts */
403 	xe_mmio_write32(mmio, DG1_MSTR_TILE_INTR, 0);
404 
405 	/* Get the indication levels and ack the master unit */
406 	val = xe_mmio_read32(mmio, DG1_MSTR_TILE_INTR);
407 	if (unlikely(!val))
408 		return 0;
409 
410 	xe_mmio_write32(mmio, DG1_MSTR_TILE_INTR, val);
411 
412 	return val;
413 }
414 
dg1_intr_enable(struct xe_device * xe,bool stall)415 static void dg1_intr_enable(struct xe_device *xe, bool stall)
416 {
417 	struct xe_mmio *mmio = xe_root_tile_mmio(xe);
418 
419 	xe_mmio_write32(mmio, DG1_MSTR_TILE_INTR, DG1_MSTR_IRQ);
420 	if (stall)
421 		xe_mmio_read32(mmio, DG1_MSTR_TILE_INTR);
422 }
423 
424 /*
425  * Top-level interrupt handler for Xe_LP+ and beyond.  These platforms have
426  * a "master tile" interrupt register which must be consulted before the
427  * "graphics master" interrupt register.
428  */
dg1_irq_handler(int irq,void * arg)429 static irqreturn_t dg1_irq_handler(int irq, void *arg)
430 {
431 	struct xe_device *xe = arg;
432 	struct xe_tile *tile;
433 	u32 master_tile_ctl, master_ctl = 0, gu_misc_iir = 0;
434 	unsigned long intr_dw[2];
435 	u32 identity[32];
436 	u8 id;
437 
438 	/* TODO: This really shouldn't be copied+pasted */
439 
440 	if (!atomic_read(&xe->irq.enabled))
441 		return IRQ_NONE;
442 
443 	master_tile_ctl = dg1_intr_disable(xe);
444 	if (!master_tile_ctl) {
445 		dg1_intr_enable(xe, false);
446 		return IRQ_NONE;
447 	}
448 
449 	for_each_tile(tile, xe, id) {
450 		struct xe_mmio *mmio = &tile->mmio;
451 
452 		if ((master_tile_ctl & DG1_MSTR_TILE(tile->id)) == 0)
453 			continue;
454 
455 		master_ctl = xe_mmio_read32(mmio, GFX_MSTR_IRQ);
456 
457 		/*
458 		 * We might be in irq handler just when PCIe DPC is initiated
459 		 * and all MMIO reads will be returned with all 1's. Ignore this
460 		 * irq as device is inaccessible.
461 		 */
462 		if (master_ctl == REG_GENMASK(31, 0)) {
463 			drm_dbg(&tile_to_xe(tile)->drm,
464 				"Ignore this IRQ as device might be in DPC containment.\n");
465 			return IRQ_HANDLED;
466 		}
467 
468 		xe_mmio_write32(mmio, GFX_MSTR_IRQ, master_ctl);
469 
470 		gt_irq_handler(tile, master_ctl, intr_dw, identity);
471 
472 		/*
473 		 * Display interrupts (including display backlight operations
474 		 * that get reported as Gunit GSE) would only be hooked up to
475 		 * the primary tile.
476 		 */
477 		if (id == 0) {
478 			if (xe->info.has_heci_cscfi)
479 				xe_heci_csc_irq_handler(xe, master_ctl);
480 			xe_display_irq_handler(xe, master_ctl);
481 			xe_i2c_irq_handler(xe, master_ctl);
482 			gu_misc_iir = gu_misc_irq_ack(xe, master_ctl);
483 		}
484 	}
485 
486 	dg1_intr_enable(xe, false);
487 	xe_display_irq_enable(xe, gu_misc_iir);
488 
489 	return IRQ_HANDLED;
490 }
491 
gt_irq_reset(struct xe_tile * tile)492 static void gt_irq_reset(struct xe_tile *tile)
493 {
494 	struct xe_mmio *mmio = &tile->mmio;
495 
496 	u32 ccs_mask = xe_hw_engine_mask_per_class(tile->primary_gt,
497 						   XE_ENGINE_CLASS_COMPUTE);
498 	u32 bcs_mask = xe_hw_engine_mask_per_class(tile->primary_gt,
499 						   XE_ENGINE_CLASS_COPY);
500 
501 	/* Disable RCS, BCS, VCS and VECS class engines. */
502 	xe_mmio_write32(mmio, RENDER_COPY_INTR_ENABLE, 0);
503 	xe_mmio_write32(mmio, VCS_VECS_INTR_ENABLE, 0);
504 	if (ccs_mask)
505 		xe_mmio_write32(mmio, CCS_RSVD_INTR_ENABLE, 0);
506 
507 	/* Restore masks irqs on RCS, BCS, VCS and VECS engines. */
508 	xe_mmio_write32(mmio, RCS0_RSVD_INTR_MASK,	~0);
509 	xe_mmio_write32(mmio, BCS_RSVD_INTR_MASK,	~0);
510 	if (bcs_mask & (BIT(1)|BIT(2)))
511 		xe_mmio_write32(mmio, XEHPC_BCS1_BCS2_INTR_MASK, ~0);
512 	if (bcs_mask & (BIT(3)|BIT(4)))
513 		xe_mmio_write32(mmio, XEHPC_BCS3_BCS4_INTR_MASK, ~0);
514 	if (bcs_mask & (BIT(5)|BIT(6)))
515 		xe_mmio_write32(mmio, XEHPC_BCS5_BCS6_INTR_MASK, ~0);
516 	if (bcs_mask & (BIT(7)|BIT(8)))
517 		xe_mmio_write32(mmio, XEHPC_BCS7_BCS8_INTR_MASK, ~0);
518 	xe_mmio_write32(mmio, VCS0_VCS1_INTR_MASK,	~0);
519 	xe_mmio_write32(mmio, VCS2_VCS3_INTR_MASK,	~0);
520 	xe_mmio_write32(mmio, VECS0_VECS1_INTR_MASK,	~0);
521 	if (ccs_mask & (BIT(0)|BIT(1)))
522 		xe_mmio_write32(mmio, CCS0_CCS1_INTR_MASK, ~0);
523 	if (ccs_mask & (BIT(2)|BIT(3)))
524 		xe_mmio_write32(mmio, CCS2_CCS3_INTR_MASK, ~0);
525 
526 	if ((tile->media_gt &&
527 	     xe_hw_engine_mask_per_class(tile->media_gt, XE_ENGINE_CLASS_OTHER)) ||
528 	    tile_to_xe(tile)->info.has_heci_gscfi) {
529 		xe_mmio_write32(mmio, GUNIT_GSC_INTR_ENABLE, 0);
530 		xe_mmio_write32(mmio, GUNIT_GSC_INTR_MASK, ~0);
531 		xe_mmio_write32(mmio, HECI2_RSVD_INTR_MASK, ~0);
532 		xe_mmio_write32(mmio, CRYPTO_RSVD_INTR_ENABLE, 0);
533 		xe_mmio_write32(mmio, CRYPTO_RSVD_INTR_MASK, ~0);
534 	}
535 
536 	xe_mmio_write32(mmio, GPM_WGBOXPERF_INTR_ENABLE, 0);
537 	xe_mmio_write32(mmio, GPM_WGBOXPERF_INTR_MASK,  ~0);
538 	xe_mmio_write32(mmio, GUC_SG_INTR_ENABLE,	 0);
539 	xe_mmio_write32(mmio, GUC_SG_INTR_MASK,		~0);
540 }
541 
xelp_irq_reset(struct xe_tile * tile)542 static void xelp_irq_reset(struct xe_tile *tile)
543 {
544 	xelp_intr_disable(tile_to_xe(tile));
545 
546 	gt_irq_reset(tile);
547 
548 	if (IS_SRIOV_VF(tile_to_xe(tile)))
549 		return;
550 
551 	mask_and_disable(tile, PCU_IRQ_OFFSET);
552 }
553 
dg1_irq_reset(struct xe_tile * tile)554 static void dg1_irq_reset(struct xe_tile *tile)
555 {
556 	if (xe_tile_is_root(tile))
557 		dg1_intr_disable(tile_to_xe(tile));
558 
559 	gt_irq_reset(tile);
560 
561 	if (IS_SRIOV_VF(tile_to_xe(tile)))
562 		return;
563 
564 	mask_and_disable(tile, PCU_IRQ_OFFSET);
565 }
566 
dg1_irq_reset_mstr(struct xe_tile * tile)567 static void dg1_irq_reset_mstr(struct xe_tile *tile)
568 {
569 	struct xe_mmio *mmio = &tile->mmio;
570 
571 	xe_mmio_write32(mmio, GFX_MSTR_IRQ, ~0);
572 }
573 
vf_irq_reset(struct xe_device * xe)574 static void vf_irq_reset(struct xe_device *xe)
575 {
576 	struct xe_tile *tile;
577 	unsigned int id;
578 
579 	xe_assert(xe, IS_SRIOV_VF(xe));
580 
581 	if (GRAPHICS_VERx100(xe) < 1210)
582 		xelp_intr_disable(xe);
583 	else
584 		xe_assert(xe, xe_device_has_memirq(xe));
585 
586 	for_each_tile(tile, xe, id) {
587 		if (xe_device_has_memirq(xe))
588 			xe_memirq_reset(&tile->memirq);
589 		else
590 			gt_irq_reset(tile);
591 	}
592 }
593 
xe_irq_reset(struct xe_device * xe)594 static void xe_irq_reset(struct xe_device *xe)
595 {
596 	struct xe_tile *tile;
597 	u8 id;
598 
599 	if (IS_SRIOV_VF(xe))
600 		return vf_irq_reset(xe);
601 
602 	if (xe_device_uses_memirq(xe)) {
603 		for_each_tile(tile, xe, id)
604 			xe_memirq_reset(&tile->memirq);
605 	}
606 
607 	for_each_tile(tile, xe, id) {
608 		if (GRAPHICS_VERx100(xe) >= 1210)
609 			dg1_irq_reset(tile);
610 		else
611 			xelp_irq_reset(tile);
612 	}
613 
614 	tile = xe_device_get_root_tile(xe);
615 	mask_and_disable(tile, GU_MISC_IRQ_OFFSET);
616 	xe_display_irq_reset(xe);
617 
618 	/*
619 	 * The tile's top-level status register should be the last one
620 	 * to be reset to avoid possible bit re-latching from lower
621 	 * level interrupts.
622 	 */
623 	if (GRAPHICS_VERx100(xe) >= 1210) {
624 		for_each_tile(tile, xe, id)
625 			dg1_irq_reset_mstr(tile);
626 	}
627 }
628 
vf_irq_postinstall(struct xe_device * xe)629 static void vf_irq_postinstall(struct xe_device *xe)
630 {
631 	struct xe_tile *tile;
632 	unsigned int id;
633 
634 	for_each_tile(tile, xe, id)
635 		if (xe_device_has_memirq(xe))
636 			xe_memirq_postinstall(&tile->memirq);
637 
638 	if (GRAPHICS_VERx100(xe) < 1210)
639 		xelp_intr_enable(xe, true);
640 	else
641 		xe_assert(xe, xe_device_has_memirq(xe));
642 }
643 
xe_irq_postinstall(struct xe_device * xe)644 static void xe_irq_postinstall(struct xe_device *xe)
645 {
646 	if (IS_SRIOV_VF(xe))
647 		return vf_irq_postinstall(xe);
648 
649 	if (xe_device_uses_memirq(xe)) {
650 		struct xe_tile *tile;
651 		unsigned int id;
652 
653 		for_each_tile(tile, xe, id)
654 			xe_memirq_postinstall(&tile->memirq);
655 	}
656 
657 	xe_display_irq_postinstall(xe, xe_root_mmio_gt(xe));
658 
659 	/*
660 	 * ASLE backlight operations are reported via GUnit GSE interrupts
661 	 * on the root tile.
662 	 */
663 	unmask_and_enable(xe_device_get_root_tile(xe),
664 			  GU_MISC_IRQ_OFFSET, GU_MISC_GSE);
665 
666 	/* Enable top-level interrupts */
667 	if (GRAPHICS_VERx100(xe) >= 1210)
668 		dg1_intr_enable(xe, true);
669 	else
670 		xelp_intr_enable(xe, true);
671 }
672 
vf_mem_irq_handler(int irq,void * arg)673 static irqreturn_t vf_mem_irq_handler(int irq, void *arg)
674 {
675 	struct xe_device *xe = arg;
676 	struct xe_tile *tile;
677 	unsigned int id;
678 
679 	if (!atomic_read(&xe->irq.enabled))
680 		return IRQ_NONE;
681 
682 	for_each_tile(tile, xe, id)
683 		xe_memirq_handler(&tile->memirq);
684 
685 	return IRQ_HANDLED;
686 }
687 
xe_irq_handler(struct xe_device * xe)688 static irq_handler_t xe_irq_handler(struct xe_device *xe)
689 {
690 	if (IS_SRIOV_VF(xe) && xe_device_has_memirq(xe))
691 		return vf_mem_irq_handler;
692 
693 	if (GRAPHICS_VERx100(xe) >= 1210)
694 		return dg1_irq_handler;
695 	else
696 		return xelp_irq_handler;
697 }
698 
xe_irq_msi_request_irqs(struct xe_device * xe)699 static int xe_irq_msi_request_irqs(struct xe_device *xe)
700 {
701 	struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
702 	irq_handler_t irq_handler;
703 	int irq, err;
704 
705 	irq_handler = xe_irq_handler(xe);
706 	if (!irq_handler) {
707 		drm_err(&xe->drm, "No supported interrupt handler");
708 		return -EINVAL;
709 	}
710 
711 	irq = pci_irq_vector(pdev, 0);
712 	err = request_irq(irq, irq_handler, IRQF_SHARED, DRIVER_NAME, xe);
713 	if (err < 0) {
714 		drm_err(&xe->drm, "Failed to request MSI IRQ %d\n", err);
715 		return err;
716 	}
717 
718 	return 0;
719 }
720 
xe_irq_msi_free(struct xe_device * xe)721 static void xe_irq_msi_free(struct xe_device *xe)
722 {
723 	struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
724 	int irq;
725 
726 	irq = pci_irq_vector(pdev, 0);
727 	free_irq(irq, xe);
728 }
729 
irq_uninstall(void * arg)730 static void irq_uninstall(void *arg)
731 {
732 	struct xe_device *xe = arg;
733 
734 	if (!atomic_xchg(&xe->irq.enabled, 0))
735 		return;
736 
737 	xe_irq_reset(xe);
738 
739 	if (xe_device_has_msix(xe))
740 		xe_irq_msix_free(xe);
741 	else
742 		xe_irq_msi_free(xe);
743 }
744 
xe_irq_init(struct xe_device * xe)745 int xe_irq_init(struct xe_device *xe)
746 {
747 	spin_lock_init(&xe->irq.lock);
748 
749 	return xe_irq_msix_init(xe);
750 }
751 
xe_irq_install(struct xe_device * xe)752 int xe_irq_install(struct xe_device *xe)
753 {
754 	struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
755 	unsigned int irq_flags = PCI_IRQ_MSI;
756 	int nvec = 1;
757 	int err;
758 
759 	xe_irq_reset(xe);
760 
761 	if (xe_device_has_msix(xe)) {
762 		nvec = xe->irq.msix.nvec;
763 		irq_flags = PCI_IRQ_MSIX;
764 	}
765 
766 	err = pci_alloc_irq_vectors(pdev, nvec, nvec, irq_flags);
767 	if (err < 0) {
768 		drm_err(&xe->drm, "Failed to allocate IRQ vectors: %d\n", err);
769 		return err;
770 	}
771 
772 	err = xe_device_has_msix(xe) ? xe_irq_msix_request_irqs(xe) :
773 					xe_irq_msi_request_irqs(xe);
774 	if (err)
775 		return err;
776 
777 	atomic_set(&xe->irq.enabled, 1);
778 
779 	xe_irq_postinstall(xe);
780 
781 	return devm_add_action_or_reset(xe->drm.dev, irq_uninstall, xe);
782 }
783 
xe_irq_msi_synchronize_irq(struct xe_device * xe)784 static void xe_irq_msi_synchronize_irq(struct xe_device *xe)
785 {
786 	synchronize_irq(to_pci_dev(xe->drm.dev)->irq);
787 }
788 
xe_irq_suspend(struct xe_device * xe)789 void xe_irq_suspend(struct xe_device *xe)
790 {
791 	atomic_set(&xe->irq.enabled, 0); /* no new irqs */
792 
793 	/* flush irqs */
794 	if (xe_device_has_msix(xe))
795 		xe_irq_msix_synchronize_irq(xe);
796 	else
797 		xe_irq_msi_synchronize_irq(xe);
798 	xe_irq_reset(xe); /* turn irqs off */
799 }
800 
xe_irq_resume(struct xe_device * xe)801 void xe_irq_resume(struct xe_device *xe)
802 {
803 	struct xe_gt *gt;
804 	int id;
805 
806 	/*
807 	 * lock not needed:
808 	 * 1. no irq will arrive before the postinstall
809 	 * 2. display is not yet resumed
810 	 */
811 	atomic_set(&xe->irq.enabled, 1);
812 	xe_irq_reset(xe);
813 	xe_irq_postinstall(xe); /* turn irqs on */
814 
815 	for_each_gt(gt, xe, id)
816 		xe_irq_enable_hwe(gt);
817 }
818 
819 /* MSI-X related definitions and functions below. */
820 
821 enum xe_irq_msix_static {
822 	GUC2HOST_MSIX = 0,
823 	DEFAULT_MSIX = XE_IRQ_DEFAULT_MSIX,
824 	/* Must be last */
825 	NUM_OF_STATIC_MSIX,
826 };
827 
xe_irq_msix_init(struct xe_device * xe)828 static int xe_irq_msix_init(struct xe_device *xe)
829 {
830 	struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
831 	int nvec = pci_msix_vec_count(pdev);
832 
833 	if (nvec == -EINVAL)
834 		return 0;  /* MSI */
835 
836 	if (nvec < 0) {
837 		drm_err(&xe->drm, "Failed getting MSI-X vectors count: %d\n", nvec);
838 		return nvec;
839 	}
840 
841 	xe->irq.msix.nvec = nvec;
842 	xa_init_flags(&xe->irq.msix.indexes, XA_FLAGS_ALLOC);
843 	return 0;
844 }
845 
guc2host_irq_handler(int irq,void * arg)846 static irqreturn_t guc2host_irq_handler(int irq, void *arg)
847 {
848 	struct xe_device *xe = arg;
849 	struct xe_tile *tile;
850 	u8 id;
851 
852 	if (!atomic_read(&xe->irq.enabled))
853 		return IRQ_NONE;
854 
855 	for_each_tile(tile, xe, id)
856 		xe_guc_irq_handler(&tile->primary_gt->uc.guc,
857 				   GUC_INTR_GUC2HOST);
858 
859 	return IRQ_HANDLED;
860 }
861 
xe_irq_msix_default_hwe_handler(int irq,void * arg)862 static irqreturn_t xe_irq_msix_default_hwe_handler(int irq, void *arg)
863 {
864 	unsigned int tile_id, gt_id;
865 	struct xe_device *xe = arg;
866 	struct xe_memirq *memirq;
867 	struct xe_hw_engine *hwe;
868 	enum xe_hw_engine_id id;
869 	struct xe_tile *tile;
870 	struct xe_gt *gt;
871 
872 	if (!atomic_read(&xe->irq.enabled))
873 		return IRQ_NONE;
874 
875 	for_each_tile(tile, xe, tile_id) {
876 		memirq = &tile->memirq;
877 		if (!memirq->bo)
878 			continue;
879 
880 		for_each_gt(gt, xe, gt_id) {
881 			if (gt->tile != tile)
882 				continue;
883 
884 			for_each_hw_engine(hwe, gt, id)
885 				xe_memirq_hwe_handler(memirq, hwe);
886 		}
887 	}
888 
889 	return IRQ_HANDLED;
890 }
891 
xe_irq_msix_alloc_vector(struct xe_device * xe,void * irq_buf,bool dynamic_msix,u16 * msix)892 static int xe_irq_msix_alloc_vector(struct xe_device *xe, void *irq_buf,
893 				    bool dynamic_msix, u16 *msix)
894 {
895 	struct xa_limit limit;
896 	int ret;
897 	u32 id;
898 
899 	limit = (dynamic_msix) ? XA_LIMIT(NUM_OF_STATIC_MSIX, xe->irq.msix.nvec - 1) :
900 				 XA_LIMIT(*msix, *msix);
901 	ret = xa_alloc(&xe->irq.msix.indexes, &id, irq_buf, limit, GFP_KERNEL);
902 	if (ret)
903 		return ret;
904 
905 	if (dynamic_msix)
906 		*msix = id;
907 
908 	return 0;
909 }
910 
xe_irq_msix_release_vector(struct xe_device * xe,u16 msix)911 static void xe_irq_msix_release_vector(struct xe_device *xe, u16 msix)
912 {
913 	xa_erase(&xe->irq.msix.indexes, msix);
914 }
915 
xe_irq_msix_request_irq_internal(struct xe_device * xe,irq_handler_t handler,void * irq_buf,const char * name,u16 msix)916 static int xe_irq_msix_request_irq_internal(struct xe_device *xe, irq_handler_t handler,
917 					    void *irq_buf, const char *name, u16 msix)
918 {
919 	struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
920 	int ret, irq;
921 
922 	irq = pci_irq_vector(pdev, msix);
923 	if (irq < 0)
924 		return irq;
925 
926 	ret = request_irq(irq, handler, IRQF_SHARED, name, irq_buf);
927 	if (ret < 0)
928 		return ret;
929 
930 	return 0;
931 }
932 
xe_irq_msix_request_irq(struct xe_device * xe,irq_handler_t handler,void * irq_buf,const char * name,bool dynamic_msix,u16 * msix)933 int xe_irq_msix_request_irq(struct xe_device *xe, irq_handler_t handler, void *irq_buf,
934 			    const char *name, bool dynamic_msix, u16 *msix)
935 {
936 	int ret;
937 
938 	ret = xe_irq_msix_alloc_vector(xe, irq_buf, dynamic_msix, msix);
939 	if (ret)
940 		return ret;
941 
942 	ret = xe_irq_msix_request_irq_internal(xe, handler, irq_buf, name, *msix);
943 	if (ret) {
944 		drm_err(&xe->drm, "Failed to request IRQ for MSI-X %u\n", *msix);
945 		xe_irq_msix_release_vector(xe, *msix);
946 		return ret;
947 	}
948 
949 	return 0;
950 }
951 
xe_irq_msix_free_irq(struct xe_device * xe,u16 msix)952 void xe_irq_msix_free_irq(struct xe_device *xe, u16 msix)
953 {
954 	struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
955 	int irq;
956 	void *irq_buf;
957 
958 	irq_buf = xa_load(&xe->irq.msix.indexes, msix);
959 	if (!irq_buf)
960 		return;
961 
962 	irq = pci_irq_vector(pdev, msix);
963 	if (irq < 0) {
964 		drm_err(&xe->drm, "MSI-X %u can't be released, there is no matching IRQ\n", msix);
965 		return;
966 	}
967 
968 	free_irq(irq, irq_buf);
969 	xe_irq_msix_release_vector(xe, msix);
970 }
971 
xe_irq_msix_request_irqs(struct xe_device * xe)972 int xe_irq_msix_request_irqs(struct xe_device *xe)
973 {
974 	int err;
975 	u16 msix;
976 
977 	msix = GUC2HOST_MSIX;
978 	err = xe_irq_msix_request_irq(xe, guc2host_irq_handler, xe,
979 				      DRIVER_NAME "-guc2host", false, &msix);
980 	if (err)
981 		return err;
982 
983 	msix = DEFAULT_MSIX;
984 	err = xe_irq_msix_request_irq(xe, xe_irq_msix_default_hwe_handler, xe,
985 				      DRIVER_NAME "-default-msix", false, &msix);
986 	if (err) {
987 		xe_irq_msix_free_irq(xe, GUC2HOST_MSIX);
988 		return err;
989 	}
990 
991 	return 0;
992 }
993 
xe_irq_msix_free(struct xe_device * xe)994 void xe_irq_msix_free(struct xe_device *xe)
995 {
996 	unsigned long msix;
997 	u32 *dummy;
998 
999 	xa_for_each(&xe->irq.msix.indexes, msix, dummy)
1000 		xe_irq_msix_free_irq(xe, msix);
1001 	xa_destroy(&xe->irq.msix.indexes);
1002 }
1003 
xe_irq_msix_synchronize_irq(struct xe_device * xe)1004 void xe_irq_msix_synchronize_irq(struct xe_device *xe)
1005 {
1006 	struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
1007 	unsigned long msix;
1008 	u32 *dummy;
1009 
1010 	xa_for_each(&xe->irq.msix.indexes, msix, dummy)
1011 		synchronize_irq(pci_irq_vector(pdev, msix));
1012 }
1013