xref: /linux/drivers/gpu/drm/i915/intel_uncore.c (revision 4a57e0913e8c7fff407e97909f4ae48caa84d612) !
1 /*
2  * Copyright © 2013 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include <linux/pm_runtime.h>
25 
26 #include <drm/drm_managed.h>
27 #include <drm/drm_print.h>
28 
29 #include "display/intel_display_core.h"
30 #include "gt/intel_engine_regs.h"
31 #include "gt/intel_gt.h"
32 #include "gt/intel_gt_regs.h"
33 
34 #include "i915_drv.h"
35 #include "i915_iosf_mbi.h"
36 #include "i915_reg.h"
37 #include "i915_vgpu.h"
38 #include "i915_wait_util.h"
39 #include "i915_mmio_range.h"
40 #include "intel_uncore_trace.h"
41 
42 #define FORCEWAKE_ACK_TIMEOUT_MS 50
43 #define GT_FIFO_TIMEOUT_MS	 10
44 
45 struct intel_uncore *to_intel_uncore(struct drm_device *drm)
46 {
47 	return &to_i915(drm)->uncore;
48 }
49 
50 #define __raw_posting_read(...) ((void)__raw_uncore_read32(__VA_ARGS__))
51 
52 static void
53 fw_domains_get(struct intel_uncore *uncore, enum forcewake_domains fw_domains)
54 {
55 	uncore->fw_get_funcs->force_wake_get(uncore, fw_domains);
56 }
57 
58 void
59 intel_uncore_mmio_debug_init_early(struct drm_i915_private *i915)
60 {
61 	spin_lock_init(&i915->mmio_debug.lock);
62 	i915->mmio_debug.unclaimed_mmio_check = 1;
63 
64 	i915->uncore.debug = &i915->mmio_debug;
65 }
66 
67 static void mmio_debug_suspend(struct intel_uncore *uncore)
68 {
69 	if (!uncore->debug)
70 		return;
71 
72 	spin_lock(&uncore->debug->lock);
73 
74 	/* Save and disable mmio debugging for the user bypass */
75 	if (!uncore->debug->suspend_count++) {
76 		uncore->debug->saved_mmio_check = uncore->debug->unclaimed_mmio_check;
77 		uncore->debug->unclaimed_mmio_check = 0;
78 	}
79 
80 	spin_unlock(&uncore->debug->lock);
81 }
82 
83 static bool check_for_unclaimed_mmio(struct intel_uncore *uncore);
84 
85 static void mmio_debug_resume(struct intel_uncore *uncore)
86 {
87 	if (!uncore->debug)
88 		return;
89 
90 	spin_lock(&uncore->debug->lock);
91 
92 	if (!--uncore->debug->suspend_count)
93 		uncore->debug->unclaimed_mmio_check = uncore->debug->saved_mmio_check;
94 
95 	if (check_for_unclaimed_mmio(uncore))
96 		drm_info(&uncore->i915->drm,
97 			 "Invalid mmio detected during user access\n");
98 
99 	spin_unlock(&uncore->debug->lock);
100 }
101 
102 static const char * const forcewake_domain_names[] = {
103 	"render",
104 	"gt",
105 	"media",
106 	"vdbox0",
107 	"vdbox1",
108 	"vdbox2",
109 	"vdbox3",
110 	"vdbox4",
111 	"vdbox5",
112 	"vdbox6",
113 	"vdbox7",
114 	"vebox0",
115 	"vebox1",
116 	"vebox2",
117 	"vebox3",
118 	"gsc",
119 };
120 
121 const char *
122 intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id)
123 {
124 	BUILD_BUG_ON(ARRAY_SIZE(forcewake_domain_names) != FW_DOMAIN_ID_COUNT);
125 
126 	if (id >= 0 && id < FW_DOMAIN_ID_COUNT)
127 		return forcewake_domain_names[id];
128 
129 	WARN_ON(id);
130 
131 	return "unknown";
132 }
133 
134 #define fw_ack(d) readl((d)->reg_ack)
135 #define fw_set(d, val) writel(REG_MASKED_FIELD_ENABLE((val)), (d)->reg_set)
136 #define fw_clear(d, val) writel(REG_MASKED_FIELD_DISABLE((val)), (d)->reg_set)
137 
138 static inline void
139 fw_domain_reset(const struct intel_uncore_forcewake_domain *d)
140 {
141 	/*
142 	 * We don't really know if the powerwell for the forcewake domain we are
143 	 * trying to reset here does exist at this point (engines could be fused
144 	 * off in ICL+), so no waiting for acks
145 	 */
146 	/* WaRsClearFWBitsAtReset */
147 	if (GRAPHICS_VER(d->uncore->i915) >= 12)
148 		fw_clear(d, 0xefff);
149 	else
150 		fw_clear(d, 0xffff);
151 }
152 
153 static inline void
154 fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d)
155 {
156 	GEM_BUG_ON(d->uncore->fw_domains_timer & d->mask);
157 	d->uncore->fw_domains_timer |= d->mask;
158 	d->wake_count++;
159 	hrtimer_start_range_ns(&d->timer,
160 			       NSEC_PER_MSEC,
161 			       NSEC_PER_MSEC,
162 			       HRTIMER_MODE_REL);
163 }
164 
165 static inline int
166 __wait_for_ack(const struct intel_uncore_forcewake_domain *d,
167 	       const u32 ack,
168 	       const u32 value)
169 {
170 	return wait_for_atomic((fw_ack(d) & ack) == value,
171 			       FORCEWAKE_ACK_TIMEOUT_MS);
172 }
173 
174 static inline int
175 wait_ack_clear(const struct intel_uncore_forcewake_domain *d,
176 	       const u32 ack)
177 {
178 	return __wait_for_ack(d, ack, 0);
179 }
180 
181 static inline int
182 wait_ack_set(const struct intel_uncore_forcewake_domain *d,
183 	     const u32 ack)
184 {
185 	return __wait_for_ack(d, ack, ack);
186 }
187 
188 static inline void
189 fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d)
190 {
191 	if (!wait_ack_clear(d, FORCEWAKE_KERNEL))
192 		return;
193 
194 	if (fw_ack(d) == ~0) {
195 		drm_err(&d->uncore->i915->drm,
196 			"%s: MMIO unreliable (forcewake register returns 0xFFFFFFFF)!\n",
197 			intel_uncore_forcewake_domain_to_str(d->id));
198 		intel_gt_set_wedged_async(d->uncore->gt);
199 	} else {
200 		drm_err(&d->uncore->i915->drm,
201 			"%s: timed out waiting for forcewake ack to clear.\n",
202 			intel_uncore_forcewake_domain_to_str(d->id));
203 	}
204 
205 	add_taint_for_CI(d->uncore->i915, TAINT_WARN); /* CI now unreliable */
206 }
207 
208 enum ack_type {
209 	ACK_CLEAR = 0,
210 	ACK_SET
211 };
212 
213 static int
214 fw_domain_wait_ack_with_fallback(const struct intel_uncore_forcewake_domain *d,
215 				 const enum ack_type type)
216 {
217 	const u32 ack_bit = FORCEWAKE_KERNEL;
218 	const u32 value = type == ACK_SET ? ack_bit : 0;
219 	unsigned int pass;
220 	bool ack_detected;
221 
222 	/*
223 	 * There is a possibility of driver's wake request colliding
224 	 * with hardware's own wake requests and that can cause
225 	 * hardware to not deliver the driver's ack message.
226 	 *
227 	 * Use a fallback bit toggle to kick the gpu state machine
228 	 * in the hope that the original ack will be delivered along with
229 	 * the fallback ack.
230 	 *
231 	 * This workaround is described in HSDES #1604254524 and it's known as:
232 	 * WaRsForcewakeAddDelayForAck:skl,bxt,kbl,glk,cfl,cnl,icl
233 	 * although the name is a bit misleading.
234 	 */
235 
236 	pass = 1;
237 	do {
238 		wait_ack_clear(d, FORCEWAKE_KERNEL_FALLBACK);
239 
240 		fw_set(d, FORCEWAKE_KERNEL_FALLBACK);
241 		/* Give gt some time to relax before the polling frenzy */
242 		udelay(10 * pass);
243 		wait_ack_set(d, FORCEWAKE_KERNEL_FALLBACK);
244 
245 		ack_detected = (fw_ack(d) & ack_bit) == value;
246 
247 		fw_clear(d, FORCEWAKE_KERNEL_FALLBACK);
248 	} while (!ack_detected && pass++ < 10);
249 
250 	drm_dbg(&d->uncore->i915->drm,
251 		"%s had to use fallback to %s ack, 0x%x (passes %u)\n",
252 		intel_uncore_forcewake_domain_to_str(d->id),
253 		type == ACK_SET ? "set" : "clear",
254 		fw_ack(d),
255 		pass);
256 
257 	return ack_detected ? 0 : -ETIMEDOUT;
258 }
259 
260 static inline void
261 fw_domain_wait_ack_clear_fallback(const struct intel_uncore_forcewake_domain *d)
262 {
263 	if (likely(!wait_ack_clear(d, FORCEWAKE_KERNEL)))
264 		return;
265 
266 	if (fw_domain_wait_ack_with_fallback(d, ACK_CLEAR))
267 		fw_domain_wait_ack_clear(d);
268 }
269 
270 static inline void
271 fw_domain_get(const struct intel_uncore_forcewake_domain *d)
272 {
273 	fw_set(d, FORCEWAKE_KERNEL);
274 }
275 
276 static inline void
277 fw_domain_wait_ack_set(const struct intel_uncore_forcewake_domain *d)
278 {
279 	if (wait_ack_set(d, FORCEWAKE_KERNEL)) {
280 		drm_err(&d->uncore->i915->drm,
281 			"%s: timed out waiting for forcewake ack request.\n",
282 			intel_uncore_forcewake_domain_to_str(d->id));
283 		add_taint_for_CI(d->uncore->i915, TAINT_WARN); /* CI now unreliable */
284 	}
285 }
286 
287 static inline void
288 fw_domain_wait_ack_set_fallback(const struct intel_uncore_forcewake_domain *d)
289 {
290 	if (likely(!wait_ack_set(d, FORCEWAKE_KERNEL)))
291 		return;
292 
293 	if (fw_domain_wait_ack_with_fallback(d, ACK_SET))
294 		fw_domain_wait_ack_set(d);
295 }
296 
297 static inline void
298 fw_domain_put(const struct intel_uncore_forcewake_domain *d)
299 {
300 	fw_clear(d, FORCEWAKE_KERNEL);
301 }
302 
303 static void
304 fw_domains_get_normal(struct intel_uncore *uncore, enum forcewake_domains fw_domains)
305 {
306 	struct intel_uncore_forcewake_domain *d;
307 	unsigned int tmp;
308 
309 	GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
310 
311 	for_each_fw_domain_masked(d, fw_domains, uncore, tmp) {
312 		fw_domain_wait_ack_clear(d);
313 		fw_domain_get(d);
314 	}
315 
316 	for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
317 		fw_domain_wait_ack_set(d);
318 
319 	uncore->fw_domains_active |= fw_domains;
320 }
321 
322 static void
323 fw_domains_get_with_fallback(struct intel_uncore *uncore,
324 			     enum forcewake_domains fw_domains)
325 {
326 	struct intel_uncore_forcewake_domain *d;
327 	unsigned int tmp;
328 
329 	GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
330 
331 	for_each_fw_domain_masked(d, fw_domains, uncore, tmp) {
332 		fw_domain_wait_ack_clear_fallback(d);
333 		fw_domain_get(d);
334 	}
335 
336 	for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
337 		fw_domain_wait_ack_set_fallback(d);
338 
339 	uncore->fw_domains_active |= fw_domains;
340 }
341 
342 static void
343 fw_domains_put(struct intel_uncore *uncore, enum forcewake_domains fw_domains)
344 {
345 	struct intel_uncore_forcewake_domain *d;
346 	unsigned int tmp;
347 
348 	GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
349 
350 	for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
351 		fw_domain_put(d);
352 
353 	uncore->fw_domains_active &= ~fw_domains;
354 }
355 
356 static void
357 fw_domains_reset(struct intel_uncore *uncore,
358 		 enum forcewake_domains fw_domains)
359 {
360 	struct intel_uncore_forcewake_domain *d;
361 	unsigned int tmp;
362 
363 	if (!fw_domains)
364 		return;
365 
366 	GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
367 
368 	for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
369 		fw_domain_reset(d);
370 }
371 
372 static inline u32 gt_thread_status(struct intel_uncore *uncore)
373 {
374 	u32 val;
375 
376 	val = __raw_uncore_read32(uncore, GEN6_GT_THREAD_STATUS_REG);
377 	val &= GEN6_GT_THREAD_STATUS_CORE_MASK;
378 
379 	return val;
380 }
381 
382 static void __gen6_gt_wait_for_thread_c0(struct intel_uncore *uncore)
383 {
384 	/*
385 	 * w/a for a sporadic read returning 0 by waiting for the GT
386 	 * thread to wake up.
387 	 */
388 	drm_WARN_ONCE(&uncore->i915->drm,
389 		      wait_for_atomic_us(gt_thread_status(uncore) == 0, 5000),
390 		      "GT thread status wait timed out\n");
391 }
392 
393 static void fw_domains_get_with_thread_status(struct intel_uncore *uncore,
394 					      enum forcewake_domains fw_domains)
395 {
396 	fw_domains_get_normal(uncore, fw_domains);
397 
398 	/* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */
399 	__gen6_gt_wait_for_thread_c0(uncore);
400 }
401 
402 static void
403 gen6_check_for_fifo_debug(struct intel_uncore *uncore)
404 {
405 	u32 fifodbg;
406 
407 	fifodbg = __raw_uncore_read32(uncore, GTFIFODBG);
408 
409 	if (unlikely(fifodbg)) {
410 		drm_dbg(&uncore->i915->drm, "GTFIFODBG = 0x08%x\n", fifodbg);
411 		__raw_uncore_write32(uncore, GTFIFODBG, fifodbg);
412 	}
413 }
414 
415 static void
416 fw_domains_get_normal_fifo(struct intel_uncore *uncore,
417 			   enum forcewake_domains fw_domains)
418 {
419 	gen6_check_for_fifo_debug(uncore);
420 	fw_domains_get_normal(uncore, fw_domains);
421 }
422 
423 static void
424 fw_domains_get_with_thread_status_fifo(struct intel_uncore *uncore,
425 				       enum forcewake_domains fw_domains)
426 {
427 	gen6_check_for_fifo_debug(uncore);
428 	fw_domains_get_with_thread_status(uncore, fw_domains);
429 }
430 
431 static inline u32 fifo_free_entries(struct intel_uncore *uncore)
432 {
433 	u32 count = __raw_uncore_read32(uncore, GTFIFOCTL);
434 
435 	return count & GT_FIFO_FREE_ENTRIES_MASK;
436 }
437 
438 static void __gen6_gt_wait_for_fifo(struct intel_uncore *uncore)
439 {
440 	u32 n;
441 
442 	/* On VLV, FIFO will be shared by both SW and HW.
443 	 * So, we need to read the FREE_ENTRIES everytime */
444 	if (IS_VALLEYVIEW(uncore->i915))
445 		n = fifo_free_entries(uncore);
446 	else
447 		n = uncore->fifo_count;
448 
449 	if (n <= GT_FIFO_NUM_RESERVED_ENTRIES) {
450 		if (wait_for_atomic((n = fifo_free_entries(uncore)) >
451 				    GT_FIFO_NUM_RESERVED_ENTRIES,
452 				    GT_FIFO_TIMEOUT_MS)) {
453 			drm_dbg(&uncore->i915->drm,
454 				"GT_FIFO timeout, entries: %u\n", n);
455 			return;
456 		}
457 	}
458 
459 	uncore->fifo_count = n - 1;
460 }
461 
462 static enum hrtimer_restart
463 intel_uncore_fw_release_timer(struct hrtimer *timer)
464 {
465 	struct intel_uncore_forcewake_domain *domain =
466 	       container_of(timer, struct intel_uncore_forcewake_domain, timer);
467 	struct intel_uncore *uncore = domain->uncore;
468 	unsigned long irqflags;
469 
470 	assert_rpm_device_not_suspended(uncore->rpm);
471 
472 	if (xchg(&domain->active, false))
473 		return HRTIMER_RESTART;
474 
475 	spin_lock_irqsave(&uncore->lock, irqflags);
476 
477 	uncore->fw_domains_timer &= ~domain->mask;
478 
479 	GEM_BUG_ON(!domain->wake_count);
480 	if (--domain->wake_count == 0)
481 		fw_domains_put(uncore, domain->mask);
482 
483 	spin_unlock_irqrestore(&uncore->lock, irqflags);
484 
485 	return HRTIMER_NORESTART;
486 }
487 
488 /* Note callers must have acquired the PUNIT->PMIC bus, before calling this. */
489 static unsigned int
490 intel_uncore_forcewake_reset(struct intel_uncore *uncore)
491 {
492 	unsigned long irqflags;
493 	struct intel_uncore_forcewake_domain *domain;
494 	int retry_count = 100;
495 	enum forcewake_domains fw, active_domains;
496 
497 	iosf_mbi_assert_punit_acquired();
498 
499 	/* Hold uncore.lock across reset to prevent any register access
500 	 * with forcewake not set correctly. Wait until all pending
501 	 * timers are run before holding.
502 	 */
503 	while (1) {
504 		unsigned int tmp;
505 
506 		active_domains = 0;
507 
508 		for_each_fw_domain(domain, uncore, tmp) {
509 			smp_store_mb(domain->active, false);
510 			if (hrtimer_cancel(&domain->timer) == 0)
511 				continue;
512 
513 			intel_uncore_fw_release_timer(&domain->timer);
514 		}
515 
516 		spin_lock_irqsave(&uncore->lock, irqflags);
517 
518 		for_each_fw_domain(domain, uncore, tmp) {
519 			if (hrtimer_active(&domain->timer))
520 				active_domains |= domain->mask;
521 		}
522 
523 		if (active_domains == 0)
524 			break;
525 
526 		if (--retry_count == 0) {
527 			drm_err(&uncore->i915->drm, "Timed out waiting for forcewake timers to finish\n");
528 			break;
529 		}
530 
531 		spin_unlock_irqrestore(&uncore->lock, irqflags);
532 		cond_resched();
533 	}
534 
535 	drm_WARN_ON(&uncore->i915->drm, active_domains);
536 
537 	fw = uncore->fw_domains_active;
538 	if (fw)
539 		fw_domains_put(uncore, fw);
540 
541 	fw_domains_reset(uncore, uncore->fw_domains);
542 	assert_forcewakes_inactive(uncore);
543 
544 	spin_unlock_irqrestore(&uncore->lock, irqflags);
545 
546 	return fw; /* track the lost user forcewake domains */
547 }
548 
549 static bool
550 fpga_check_for_unclaimed_mmio(struct intel_uncore *uncore)
551 {
552 	u32 dbg;
553 
554 	dbg = __raw_uncore_read32(uncore, FPGA_DBG);
555 	if (likely(!(dbg & FPGA_DBG_RM_NOCLAIM)))
556 		return false;
557 
558 	/*
559 	 * Bugs in PCI programming (or failing hardware) can occasionally cause
560 	 * us to lose access to the MMIO BAR.  When this happens, register
561 	 * reads will come back with 0xFFFFFFFF for every register and things
562 	 * go bad very quickly.  Let's try to detect that special case and at
563 	 * least try to print a more informative message about what has
564 	 * happened.
565 	 *
566 	 * During normal operation the FPGA_DBG register has several unused
567 	 * bits that will always read back as 0's so we can use them as canaries
568 	 * to recognize when MMIO accesses are just busted.
569 	 */
570 	if (unlikely(dbg == ~0))
571 		drm_err(&uncore->i915->drm,
572 			"Lost access to MMIO BAR; all registers now read back as 0xFFFFFFFF!\n");
573 
574 	__raw_uncore_write32(uncore, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
575 
576 	return true;
577 }
578 
579 static bool
580 vlv_check_for_unclaimed_mmio(struct intel_uncore *uncore)
581 {
582 	u32 cer;
583 
584 	cer = __raw_uncore_read32(uncore, CLAIM_ER);
585 	if (likely(!(cer & (CLAIM_ER_OVERFLOW | CLAIM_ER_CTR_MASK))))
586 		return false;
587 
588 	__raw_uncore_write32(uncore, CLAIM_ER, CLAIM_ER_CLR);
589 
590 	return true;
591 }
592 
593 static bool
594 check_for_unclaimed_mmio(struct intel_uncore *uncore)
595 {
596 	bool ret = false;
597 
598 	lockdep_assert_held(&uncore->debug->lock);
599 
600 	if (uncore->debug->suspend_count)
601 		return false;
602 
603 	if (intel_uncore_has_fpga_dbg_unclaimed(uncore))
604 		ret |= fpga_check_for_unclaimed_mmio(uncore);
605 
606 	if (intel_uncore_has_dbg_unclaimed(uncore))
607 		ret |= vlv_check_for_unclaimed_mmio(uncore);
608 
609 	return ret;
610 }
611 
612 static void forcewake_early_sanitize(struct intel_uncore *uncore,
613 				     unsigned int restore_forcewake)
614 {
615 	GEM_BUG_ON(!intel_uncore_has_forcewake(uncore));
616 
617 	/* WaDisableShadowRegForCpd:chv */
618 	if (IS_CHERRYVIEW(uncore->i915)) {
619 		__raw_uncore_write32(uncore, GTFIFOCTL,
620 				     __raw_uncore_read32(uncore, GTFIFOCTL) |
621 				     GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL |
622 				     GT_FIFO_CTL_RC6_POLICY_STALL);
623 	}
624 
625 	if (intel_uncore_has_fifo(uncore))
626 		gen6_check_for_fifo_debug(uncore);
627 
628 	iosf_mbi_punit_acquire();
629 	intel_uncore_forcewake_reset(uncore);
630 	if (restore_forcewake) {
631 		spin_lock_irq(&uncore->lock);
632 		fw_domains_get(uncore, restore_forcewake);
633 
634 		if (intel_uncore_has_fifo(uncore))
635 			uncore->fifo_count = fifo_free_entries(uncore);
636 		spin_unlock_irq(&uncore->lock);
637 	}
638 	iosf_mbi_punit_release();
639 }
640 
641 void intel_uncore_suspend(struct intel_uncore *uncore)
642 {
643 	if (!intel_uncore_has_forcewake(uncore))
644 		return;
645 
646 	iosf_mbi_punit_acquire();
647 	iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
648 		&uncore->pmic_bus_access_nb);
649 	uncore->fw_domains_saved = intel_uncore_forcewake_reset(uncore);
650 	iosf_mbi_punit_release();
651 }
652 
653 void intel_uncore_resume_early(struct intel_uncore *uncore)
654 {
655 	unsigned int restore_forcewake;
656 
657 	if (intel_uncore_unclaimed_mmio(uncore))
658 		drm_dbg(&uncore->i915->drm, "unclaimed mmio detected on resume, clearing\n");
659 
660 	if (!intel_uncore_has_forcewake(uncore))
661 		return;
662 
663 	restore_forcewake = fetch_and_zero(&uncore->fw_domains_saved);
664 	forcewake_early_sanitize(uncore, restore_forcewake);
665 
666 	iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
667 }
668 
669 void intel_uncore_runtime_resume(struct intel_uncore *uncore)
670 {
671 	if (!intel_uncore_has_forcewake(uncore))
672 		return;
673 
674 	iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
675 }
676 
677 static void __intel_uncore_forcewake_get(struct intel_uncore *uncore,
678 					 enum forcewake_domains fw_domains)
679 {
680 	struct intel_uncore_forcewake_domain *domain;
681 	unsigned int tmp;
682 
683 	fw_domains &= uncore->fw_domains;
684 
685 	for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
686 		if (domain->wake_count++) {
687 			fw_domains &= ~domain->mask;
688 			domain->active = true;
689 		}
690 	}
691 
692 	if (fw_domains)
693 		fw_domains_get(uncore, fw_domains);
694 }
695 
696 /**
697  * intel_uncore_forcewake_get - grab forcewake domain references
698  * @uncore: the intel_uncore structure
699  * @fw_domains: forcewake domains to get reference on
700  *
701  * This function can be used get GT's forcewake domain references.
702  * Normal register access will handle the forcewake domains automatically.
703  * However if some sequence requires the GT to not power down a particular
704  * forcewake domains this function should be called at the beginning of the
705  * sequence. And subsequently the reference should be dropped by symmetric
706  * call to intel_unforce_forcewake_put(). Usually caller wants all the domains
707  * to be kept awake so the @fw_domains would be then FORCEWAKE_ALL.
708  */
709 void intel_uncore_forcewake_get(struct intel_uncore *uncore,
710 				enum forcewake_domains fw_domains)
711 {
712 	unsigned long irqflags;
713 
714 	if (!uncore->fw_get_funcs)
715 		return;
716 
717 	assert_rpm_wakelock_held(uncore->rpm);
718 
719 	spin_lock_irqsave(&uncore->lock, irqflags);
720 	__intel_uncore_forcewake_get(uncore, fw_domains);
721 	spin_unlock_irqrestore(&uncore->lock, irqflags);
722 }
723 
724 /**
725  * intel_uncore_forcewake_user_get - claim forcewake on behalf of userspace
726  * @uncore: the intel_uncore structure
727  *
728  * This function is a wrapper around intel_uncore_forcewake_get() to acquire
729  * the GT powerwell and in the process disable our debugging for the
730  * duration of userspace's bypass.
731  */
732 void intel_uncore_forcewake_user_get(struct intel_uncore *uncore)
733 {
734 	spin_lock_irq(&uncore->lock);
735 	if (!uncore->user_forcewake_count++) {
736 		intel_uncore_forcewake_get__locked(uncore, FORCEWAKE_ALL);
737 		mmio_debug_suspend(uncore);
738 	}
739 	spin_unlock_irq(&uncore->lock);
740 }
741 
742 /**
743  * intel_uncore_forcewake_user_put - release forcewake on behalf of userspace
744  * @uncore: the intel_uncore structure
745  *
746  * This function complements intel_uncore_forcewake_user_get() and releases
747  * the GT powerwell taken on behalf of the userspace bypass.
748  */
749 void intel_uncore_forcewake_user_put(struct intel_uncore *uncore)
750 {
751 	spin_lock_irq(&uncore->lock);
752 	if (!--uncore->user_forcewake_count) {
753 		mmio_debug_resume(uncore);
754 		intel_uncore_forcewake_put__locked(uncore, FORCEWAKE_ALL);
755 	}
756 	spin_unlock_irq(&uncore->lock);
757 }
758 
759 /**
760  * intel_uncore_forcewake_get__locked - grab forcewake domain references
761  * @uncore: the intel_uncore structure
762  * @fw_domains: forcewake domains to get reference on
763  *
764  * See intel_uncore_forcewake_get(). This variant places the onus
765  * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
766  */
767 void intel_uncore_forcewake_get__locked(struct intel_uncore *uncore,
768 					enum forcewake_domains fw_domains)
769 {
770 	lockdep_assert_held(&uncore->lock);
771 
772 	if (!uncore->fw_get_funcs)
773 		return;
774 
775 	__intel_uncore_forcewake_get(uncore, fw_domains);
776 }
777 
778 static void __intel_uncore_forcewake_put(struct intel_uncore *uncore,
779 					 enum forcewake_domains fw_domains,
780 					 bool delayed)
781 {
782 	struct intel_uncore_forcewake_domain *domain;
783 	unsigned int tmp;
784 
785 	fw_domains &= uncore->fw_domains;
786 
787 	for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
788 		GEM_BUG_ON(!domain->wake_count);
789 
790 		if (--domain->wake_count) {
791 			domain->active = true;
792 			continue;
793 		}
794 
795 		if (delayed &&
796 		    !(domain->uncore->fw_domains_timer & domain->mask))
797 			fw_domain_arm_timer(domain);
798 		else
799 			fw_domains_put(uncore, domain->mask);
800 	}
801 }
802 
803 /**
804  * intel_uncore_forcewake_put - release a forcewake domain reference
805  * @uncore: the intel_uncore structure
806  * @fw_domains: forcewake domains to put references
807  *
808  * This function drops the device-level forcewakes for specified
809  * domains obtained by intel_uncore_forcewake_get().
810  */
811 void intel_uncore_forcewake_put(struct intel_uncore *uncore,
812 				enum forcewake_domains fw_domains)
813 {
814 	unsigned long irqflags;
815 
816 	if (!uncore->fw_get_funcs)
817 		return;
818 
819 	spin_lock_irqsave(&uncore->lock, irqflags);
820 	__intel_uncore_forcewake_put(uncore, fw_domains, false);
821 	spin_unlock_irqrestore(&uncore->lock, irqflags);
822 }
823 
824 void intel_uncore_forcewake_put_delayed(struct intel_uncore *uncore,
825 					enum forcewake_domains fw_domains)
826 {
827 	unsigned long irqflags;
828 
829 	if (!uncore->fw_get_funcs)
830 		return;
831 
832 	spin_lock_irqsave(&uncore->lock, irqflags);
833 	__intel_uncore_forcewake_put(uncore, fw_domains, true);
834 	spin_unlock_irqrestore(&uncore->lock, irqflags);
835 }
836 
837 /**
838  * intel_uncore_forcewake_flush - flush the delayed release
839  * @uncore: the intel_uncore structure
840  * @fw_domains: forcewake domains to flush
841  */
842 void intel_uncore_forcewake_flush(struct intel_uncore *uncore,
843 				  enum forcewake_domains fw_domains)
844 {
845 	struct intel_uncore_forcewake_domain *domain;
846 	unsigned int tmp;
847 
848 	if (!uncore->fw_get_funcs)
849 		return;
850 
851 	fw_domains &= uncore->fw_domains;
852 	for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
853 		WRITE_ONCE(domain->active, false);
854 		if (hrtimer_cancel(&domain->timer))
855 			intel_uncore_fw_release_timer(&domain->timer);
856 	}
857 }
858 
859 /**
860  * intel_uncore_forcewake_put__locked - release forcewake domain references
861  * @uncore: the intel_uncore structure
862  * @fw_domains: forcewake domains to put references
863  *
864  * See intel_uncore_forcewake_put(). This variant places the onus
865  * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
866  */
867 void intel_uncore_forcewake_put__locked(struct intel_uncore *uncore,
868 					enum forcewake_domains fw_domains)
869 {
870 	lockdep_assert_held(&uncore->lock);
871 
872 	if (!uncore->fw_get_funcs)
873 		return;
874 
875 	__intel_uncore_forcewake_put(uncore, fw_domains, false);
876 }
877 
878 void assert_forcewakes_inactive(struct intel_uncore *uncore)
879 {
880 	if (!uncore->fw_get_funcs)
881 		return;
882 
883 	drm_WARN(&uncore->i915->drm, uncore->fw_domains_active,
884 		 "Expected all fw_domains to be inactive, but %08x are still on\n",
885 		 uncore->fw_domains_active);
886 }
887 
888 void assert_forcewakes_active(struct intel_uncore *uncore,
889 			      enum forcewake_domains fw_domains)
890 {
891 	struct intel_uncore_forcewake_domain *domain;
892 	unsigned int tmp;
893 
894 	if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM))
895 		return;
896 
897 	if (!uncore->fw_get_funcs)
898 		return;
899 
900 	spin_lock_irq(&uncore->lock);
901 
902 	assert_rpm_wakelock_held(uncore->rpm);
903 
904 	fw_domains &= uncore->fw_domains;
905 	drm_WARN(&uncore->i915->drm, fw_domains & ~uncore->fw_domains_active,
906 		 "Expected %08x fw_domains to be active, but %08x are off\n",
907 		 fw_domains, fw_domains & ~uncore->fw_domains_active);
908 
909 	/*
910 	 * Check that the caller has an explicit wakeref and we don't mistake
911 	 * it for the auto wakeref.
912 	 */
913 	for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
914 		unsigned int actual = READ_ONCE(domain->wake_count);
915 		unsigned int expect = 1;
916 
917 		if (uncore->fw_domains_timer & domain->mask)
918 			expect++; /* pending automatic release */
919 
920 		if (drm_WARN(&uncore->i915->drm, actual < expect,
921 			     "Expected domain %d to be held awake by caller, count=%d\n",
922 			     domain->id, actual))
923 			break;
924 	}
925 
926 	spin_unlock_irq(&uncore->lock);
927 }
928 
929 /*
930  * We give fast paths for the really cool registers.  The second range includes
931  * media domains (and the GSC starting from Xe_LPM+)
932  */
933 #define NEEDS_FORCE_WAKE(reg) ({ \
934 	u32 __reg = (reg); \
935 	__reg < 0x40000 || __reg >= 0x116000; \
936 })
937 
938 static int fw_range_cmp(u32 offset, const struct intel_forcewake_range *entry)
939 {
940 	if (offset < entry->start)
941 		return -1;
942 	else if (offset > entry->end)
943 		return 1;
944 	else
945 		return 0;
946 }
947 
948 /* Copied and "macroized" from lib/bsearch.c */
949 #define BSEARCH(key, base, num, cmp) ({                                 \
950 	unsigned int start__ = 0, end__ = (num);                        \
951 	typeof(base) result__ = NULL;                                   \
952 	while (start__ < end__) {                                       \
953 		unsigned int mid__ = start__ + (end__ - start__) / 2;   \
954 		int ret__ = (cmp)((key), (base) + mid__);               \
955 		if (ret__ < 0) {                                        \
956 			end__ = mid__;                                  \
957 		} else if (ret__ > 0) {                                 \
958 			start__ = mid__ + 1;                            \
959 		} else {                                                \
960 			result__ = (base) + mid__;                      \
961 			break;                                          \
962 		}                                                       \
963 	}                                                               \
964 	result__;                                                       \
965 })
966 
967 static enum forcewake_domains
968 find_fw_domain(struct intel_uncore *uncore, u32 offset)
969 {
970 	const struct intel_forcewake_range *entry;
971 
972 	if (IS_GSI_REG(offset))
973 		offset += uncore->gsi_offset;
974 
975 	entry = BSEARCH(offset,
976 			uncore->fw_domains_table,
977 			uncore->fw_domains_table_entries,
978 			fw_range_cmp);
979 
980 	if (!entry)
981 		return 0;
982 
983 	/*
984 	 * The list of FW domains depends on the SKU in gen11+ so we
985 	 * can't determine it statically. We use FORCEWAKE_ALL and
986 	 * translate it here to the list of available domains.
987 	 */
988 	if (entry->domains == FORCEWAKE_ALL)
989 		return uncore->fw_domains;
990 
991 	drm_WARN(&uncore->i915->drm, entry->domains & ~uncore->fw_domains,
992 		 "Uninitialized forcewake domain(s) 0x%x accessed at 0x%x\n",
993 		 entry->domains & ~uncore->fw_domains, offset);
994 
995 	return entry->domains;
996 }
997 
998 /*
999  * Shadowed register tables describe special register ranges that i915 is
1000  * allowed to write to without acquiring forcewake.  If these registers' power
1001  * wells are down, the hardware will save values written by i915 to a shadow
1002  * copy and automatically transfer them into the real register the next time
1003  * the power well is woken up.  Shadowing only applies to writes; forcewake
1004  * must still be acquired when reading from registers in these ranges.
1005  *
1006  * The documentation for shadowed registers is somewhat spotty on older
1007  * platforms.  However missing registers from these lists is non-fatal; it just
1008  * means we'll wake up the hardware for some register accesses where we didn't
1009  * really need to.
1010  *
1011  * The ranges listed in these tables must be sorted by offset.
1012  *
1013  * When adding new tables here, please also add them to
1014  * intel_shadow_table_check() in selftests/intel_uncore.c so that they will be
1015  * scanned for obvious mistakes or typos by the selftests.
1016  */
1017 
1018 static const struct i915_mmio_range gen8_shadowed_regs[] = {
1019 	{ .start =  0x2030, .end =  0x2030 },
1020 	{ .start =  0xA008, .end =  0xA00C },
1021 	{ .start = 0x12030, .end = 0x12030 },
1022 	{ .start = 0x1a030, .end = 0x1a030 },
1023 	{ .start = 0x22030, .end = 0x22030 },
1024 };
1025 
1026 static const struct i915_mmio_range gen11_shadowed_regs[] = {
1027 	{ .start =   0x2030, .end =   0x2030 },
1028 	{ .start =   0x2550, .end =   0x2550 },
1029 	{ .start =   0xA008, .end =   0xA00C },
1030 	{ .start =  0x22030, .end =  0x22030 },
1031 	{ .start =  0x22230, .end =  0x22230 },
1032 	{ .start =  0x22510, .end =  0x22550 },
1033 	{ .start = 0x1C0030, .end = 0x1C0030 },
1034 	{ .start = 0x1C0230, .end = 0x1C0230 },
1035 	{ .start = 0x1C0510, .end = 0x1C0550 },
1036 	{ .start = 0x1C4030, .end = 0x1C4030 },
1037 	{ .start = 0x1C4230, .end = 0x1C4230 },
1038 	{ .start = 0x1C4510, .end = 0x1C4550 },
1039 	{ .start = 0x1C8030, .end = 0x1C8030 },
1040 	{ .start = 0x1C8230, .end = 0x1C8230 },
1041 	{ .start = 0x1C8510, .end = 0x1C8550 },
1042 	{ .start = 0x1D0030, .end = 0x1D0030 },
1043 	{ .start = 0x1D0230, .end = 0x1D0230 },
1044 	{ .start = 0x1D0510, .end = 0x1D0550 },
1045 	{ .start = 0x1D4030, .end = 0x1D4030 },
1046 	{ .start = 0x1D4230, .end = 0x1D4230 },
1047 	{ .start = 0x1D4510, .end = 0x1D4550 },
1048 	{ .start = 0x1D8030, .end = 0x1D8030 },
1049 	{ .start = 0x1D8230, .end = 0x1D8230 },
1050 	{ .start = 0x1D8510, .end = 0x1D8550 },
1051 };
1052 
1053 static const struct i915_mmio_range gen12_shadowed_regs[] = {
1054 	{ .start =   0x2030, .end =   0x2030 },
1055 	{ .start =   0x2510, .end =   0x2550 },
1056 	{ .start =   0xA008, .end =   0xA00C },
1057 	{ .start =   0xA188, .end =   0xA188 },
1058 	{ .start =   0xA278, .end =   0xA278 },
1059 	{ .start =   0xA540, .end =   0xA56C },
1060 	{ .start =   0xC4C8, .end =   0xC4C8 },
1061 	{ .start =   0xC4D4, .end =   0xC4D4 },
1062 	{ .start =   0xC600, .end =   0xC600 },
1063 	{ .start =  0x22030, .end =  0x22030 },
1064 	{ .start =  0x22510, .end =  0x22550 },
1065 	{ .start = 0x1C0030, .end = 0x1C0030 },
1066 	{ .start = 0x1C0510, .end = 0x1C0550 },
1067 	{ .start = 0x1C4030, .end = 0x1C4030 },
1068 	{ .start = 0x1C4510, .end = 0x1C4550 },
1069 	{ .start = 0x1C8030, .end = 0x1C8030 },
1070 	{ .start = 0x1C8510, .end = 0x1C8550 },
1071 	{ .start = 0x1D0030, .end = 0x1D0030 },
1072 	{ .start = 0x1D0510, .end = 0x1D0550 },
1073 	{ .start = 0x1D4030, .end = 0x1D4030 },
1074 	{ .start = 0x1D4510, .end = 0x1D4550 },
1075 	{ .start = 0x1D8030, .end = 0x1D8030 },
1076 	{ .start = 0x1D8510, .end = 0x1D8550 },
1077 
1078 	/*
1079 	 * The rest of these ranges are specific to Xe_HP and beyond, but
1080 	 * are reserved/unused ranges on earlier gen12 platforms, so they can
1081 	 * be safely added to the gen12 table.
1082 	 */
1083 	{ .start = 0x1E0030, .end = 0x1E0030 },
1084 	{ .start = 0x1E0510, .end = 0x1E0550 },
1085 	{ .start = 0x1E4030, .end = 0x1E4030 },
1086 	{ .start = 0x1E4510, .end = 0x1E4550 },
1087 	{ .start = 0x1E8030, .end = 0x1E8030 },
1088 	{ .start = 0x1E8510, .end = 0x1E8550 },
1089 	{ .start = 0x1F0030, .end = 0x1F0030 },
1090 	{ .start = 0x1F0510, .end = 0x1F0550 },
1091 	{ .start = 0x1F4030, .end = 0x1F4030 },
1092 	{ .start = 0x1F4510, .end = 0x1F4550 },
1093 	{ .start = 0x1F8030, .end = 0x1F8030 },
1094 	{ .start = 0x1F8510, .end = 0x1F8550 },
1095 };
1096 
1097 static const struct i915_mmio_range dg2_shadowed_regs[] = {
1098 	{ .start =   0x2030, .end =   0x2030 },
1099 	{ .start =   0x2510, .end =   0x2550 },
1100 	{ .start =   0xA008, .end =   0xA00C },
1101 	{ .start =   0xA188, .end =   0xA188 },
1102 	{ .start =   0xA278, .end =   0xA278 },
1103 	{ .start =   0xA540, .end =   0xA56C },
1104 	{ .start =   0xC4C8, .end =   0xC4C8 },
1105 	{ .start =   0xC4E0, .end =   0xC4E0 },
1106 	{ .start =   0xC600, .end =   0xC600 },
1107 	{ .start =   0xC658, .end =   0xC658 },
1108 	{ .start =  0x22030, .end =  0x22030 },
1109 	{ .start =  0x22510, .end =  0x22550 },
1110 	{ .start = 0x1C0030, .end = 0x1C0030 },
1111 	{ .start = 0x1C0510, .end = 0x1C0550 },
1112 	{ .start = 0x1C4030, .end = 0x1C4030 },
1113 	{ .start = 0x1C4510, .end = 0x1C4550 },
1114 	{ .start = 0x1C8030, .end = 0x1C8030 },
1115 	{ .start = 0x1C8510, .end = 0x1C8550 },
1116 	{ .start = 0x1D0030, .end = 0x1D0030 },
1117 	{ .start = 0x1D0510, .end = 0x1D0550 },
1118 	{ .start = 0x1D4030, .end = 0x1D4030 },
1119 	{ .start = 0x1D4510, .end = 0x1D4550 },
1120 	{ .start = 0x1D8030, .end = 0x1D8030 },
1121 	{ .start = 0x1D8510, .end = 0x1D8550 },
1122 	{ .start = 0x1E0030, .end = 0x1E0030 },
1123 	{ .start = 0x1E0510, .end = 0x1E0550 },
1124 	{ .start = 0x1E4030, .end = 0x1E4030 },
1125 	{ .start = 0x1E4510, .end = 0x1E4550 },
1126 	{ .start = 0x1E8030, .end = 0x1E8030 },
1127 	{ .start = 0x1E8510, .end = 0x1E8550 },
1128 	{ .start = 0x1F0030, .end = 0x1F0030 },
1129 	{ .start = 0x1F0510, .end = 0x1F0550 },
1130 	{ .start = 0x1F4030, .end = 0x1F4030 },
1131 	{ .start = 0x1F4510, .end = 0x1F4550 },
1132 	{ .start = 0x1F8030, .end = 0x1F8030 },
1133 	{ .start = 0x1F8510, .end = 0x1F8550 },
1134 };
1135 
1136 static const struct i915_mmio_range mtl_shadowed_regs[] = {
1137 	{ .start =   0x2030, .end =   0x2030 },
1138 	{ .start =   0x2510, .end =   0x2550 },
1139 	{ .start =   0xA008, .end =   0xA00C },
1140 	{ .start =   0xA188, .end =   0xA188 },
1141 	{ .start =   0xA278, .end =   0xA278 },
1142 	{ .start =   0xA540, .end =   0xA56C },
1143 	{ .start =   0xC050, .end =   0xC050 },
1144 	{ .start =   0xC340, .end =   0xC340 },
1145 	{ .start =   0xC4C8, .end =   0xC4C8 },
1146 	{ .start =   0xC4E0, .end =   0xC4E0 },
1147 	{ .start =   0xC600, .end =   0xC600 },
1148 	{ .start =   0xC658, .end =   0xC658 },
1149 	{ .start =   0xCFD4, .end =   0xCFDC },
1150 	{ .start =  0x22030, .end =  0x22030 },
1151 	{ .start =  0x22510, .end =  0x22550 },
1152 };
1153 
1154 static const struct i915_mmio_range xelpmp_shadowed_regs[] = {
1155 	{ .start = 0x1C0030, .end = 0x1C0030 },
1156 	{ .start = 0x1C0510, .end = 0x1C0550 },
1157 	{ .start = 0x1C8030, .end = 0x1C8030 },
1158 	{ .start = 0x1C8510, .end = 0x1C8550 },
1159 	{ .start = 0x1D0030, .end = 0x1D0030 },
1160 	{ .start = 0x1D0510, .end = 0x1D0550 },
1161 	{ .start = 0x38A008, .end = 0x38A00C },
1162 	{ .start = 0x38A188, .end = 0x38A188 },
1163 	{ .start = 0x38A278, .end = 0x38A278 },
1164 	{ .start = 0x38A540, .end = 0x38A56C },
1165 	{ .start = 0x38A618, .end = 0x38A618 },
1166 	{ .start = 0x38C050, .end = 0x38C050 },
1167 	{ .start = 0x38C340, .end = 0x38C340 },
1168 	{ .start = 0x38C4C8, .end = 0x38C4C8 },
1169 	{ .start = 0x38C4E0, .end = 0x38C4E4 },
1170 	{ .start = 0x38C600, .end = 0x38C600 },
1171 	{ .start = 0x38C658, .end = 0x38C658 },
1172 	{ .start = 0x38CFD4, .end = 0x38CFDC },
1173 };
1174 
1175 static int mmio_range_cmp(u32 key, const struct i915_mmio_range *range)
1176 {
1177 	if (key < range->start)
1178 		return -1;
1179 	else if (key > range->end)
1180 		return 1;
1181 	else
1182 		return 0;
1183 }
1184 
1185 static bool is_shadowed(struct intel_uncore *uncore, u32 offset)
1186 {
1187 	if (drm_WARN_ON(&uncore->i915->drm, !uncore->shadowed_reg_table))
1188 		return false;
1189 
1190 	if (IS_GSI_REG(offset))
1191 		offset += uncore->gsi_offset;
1192 
1193 	return BSEARCH(offset,
1194 		       uncore->shadowed_reg_table,
1195 		       uncore->shadowed_reg_table_entries,
1196 		       mmio_range_cmp);
1197 }
1198 
1199 static enum forcewake_domains
1200 gen6_reg_write_fw_domains(struct intel_uncore *uncore, i915_reg_t reg)
1201 {
1202 	return FORCEWAKE_RENDER;
1203 }
1204 
1205 #define __fwtable_reg_read_fw_domains(uncore, offset) \
1206 ({ \
1207 	enum forcewake_domains __fwd = 0; \
1208 	if (NEEDS_FORCE_WAKE((offset))) \
1209 		__fwd = find_fw_domain(uncore, offset); \
1210 	__fwd; \
1211 })
1212 
1213 #define __fwtable_reg_write_fw_domains(uncore, offset) \
1214 ({ \
1215 	enum forcewake_domains __fwd = 0; \
1216 	const u32 __offset = (offset); \
1217 	if (NEEDS_FORCE_WAKE((__offset)) && !is_shadowed(uncore, __offset)) \
1218 		__fwd = find_fw_domain(uncore, __offset); \
1219 	__fwd; \
1220 })
1221 
1222 #define GEN_FW_RANGE(s, e, d) \
1223 	{ .start = (s), .end = (e), .domains = (d) }
1224 
1225 /*
1226  * All platforms' forcewake tables below must be sorted by offset ranges.
1227  * Furthermore, new forcewake tables added should be "watertight" and have
1228  * no gaps between ranges.
1229  *
1230  * When there are multiple consecutive ranges listed in the bspec with
1231  * the same forcewake domain, it is customary to combine them into a single
1232  * row in the tables below to keep the tables small and lookups fast.
1233  * Likewise, reserved/unused ranges may be combined with the preceding and/or
1234  * following ranges since the driver will never be making MMIO accesses in
1235  * those ranges.
1236  *
1237  * For example, if the bspec were to list:
1238  *
1239  *    ...
1240  *    0x1000 - 0x1fff:  GT
1241  *    0x2000 - 0x2cff:  GT
1242  *    0x2d00 - 0x2fff:  unused/reserved
1243  *    0x3000 - 0xffff:  GT
1244  *    ...
1245  *
1246  * these could all be represented by a single line in the code:
1247  *
1248  *   GEN_FW_RANGE(0x1000, 0xffff, FORCEWAKE_GT)
1249  *
1250  * When adding new forcewake tables here, please also add them to
1251  * intel_uncore_mock_selftests in selftests/intel_uncore.c so that they will be
1252  * scanned for obvious mistakes or typos by the selftests.
1253  */
1254 
1255 static const struct intel_forcewake_range __gen6_fw_ranges[] = {
1256 	GEN_FW_RANGE(0x0, 0x3ffff, FORCEWAKE_RENDER),
1257 };
1258 
1259 static const struct intel_forcewake_range __vlv_fw_ranges[] = {
1260 	GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
1261 	GEN_FW_RANGE(0x5000, 0x7fff, FORCEWAKE_RENDER),
1262 	GEN_FW_RANGE(0xb000, 0x11fff, FORCEWAKE_RENDER),
1263 	GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
1264 	GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_MEDIA),
1265 	GEN_FW_RANGE(0x2e000, 0x2ffff, FORCEWAKE_RENDER),
1266 	GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
1267 };
1268 
1269 static const struct intel_forcewake_range __chv_fw_ranges[] = {
1270 	GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
1271 	GEN_FW_RANGE(0x4000, 0x4fff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
1272 	GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
1273 	GEN_FW_RANGE(0x8000, 0x82ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
1274 	GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
1275 	GEN_FW_RANGE(0x8500, 0x85ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
1276 	GEN_FW_RANGE(0x8800, 0x88ff, FORCEWAKE_MEDIA),
1277 	GEN_FW_RANGE(0x9000, 0xafff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
1278 	GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
1279 	GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA),
1280 	GEN_FW_RANGE(0xe000, 0xe7ff, FORCEWAKE_RENDER),
1281 	GEN_FW_RANGE(0xf000, 0xffff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
1282 	GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
1283 	GEN_FW_RANGE(0x1a000, 0x1bfff, FORCEWAKE_MEDIA),
1284 	GEN_FW_RANGE(0x1e800, 0x1e9ff, FORCEWAKE_MEDIA),
1285 	GEN_FW_RANGE(0x30000, 0x37fff, FORCEWAKE_MEDIA),
1286 };
1287 
1288 static const struct intel_forcewake_range __gen9_fw_ranges[] = {
1289 	GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_GT),
1290 	GEN_FW_RANGE(0xb00, 0x1fff, 0), /* uncore range */
1291 	GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
1292 	GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_GT),
1293 	GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
1294 	GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_GT),
1295 	GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
1296 	GEN_FW_RANGE(0x8000, 0x812f, FORCEWAKE_GT),
1297 	GEN_FW_RANGE(0x8130, 0x813f, FORCEWAKE_MEDIA),
1298 	GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
1299 	GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_GT),
1300 	GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
1301 	GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_GT),
1302 	GEN_FW_RANGE(0x8800, 0x89ff, FORCEWAKE_MEDIA),
1303 	GEN_FW_RANGE(0x8a00, 0x8bff, FORCEWAKE_GT),
1304 	GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER),
1305 	GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_GT),
1306 	GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
1307 	GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_GT),
1308 	GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
1309 	GEN_FW_RANGE(0xb480, 0xcfff, FORCEWAKE_GT),
1310 	GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA),
1311 	GEN_FW_RANGE(0xd800, 0xdfff, FORCEWAKE_GT),
1312 	GEN_FW_RANGE(0xe000, 0xe8ff, FORCEWAKE_RENDER),
1313 	GEN_FW_RANGE(0xe900, 0x11fff, FORCEWAKE_GT),
1314 	GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
1315 	GEN_FW_RANGE(0x14000, 0x19fff, FORCEWAKE_GT),
1316 	GEN_FW_RANGE(0x1a000, 0x1e9ff, FORCEWAKE_MEDIA),
1317 	GEN_FW_RANGE(0x1ea00, 0x243ff, FORCEWAKE_GT),
1318 	GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER),
1319 	GEN_FW_RANGE(0x24800, 0x2ffff, FORCEWAKE_GT),
1320 	GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
1321 };
1322 
1323 static const struct intel_forcewake_range __gen11_fw_ranges[] = {
1324 	GEN_FW_RANGE(0x0, 0x1fff, 0), /* uncore range */
1325 	GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
1326 	GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_GT),
1327 	GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
1328 	GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_GT),
1329 	GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
1330 	GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_GT),
1331 	GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
1332 	GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_GT),
1333 	GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
1334 	GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_GT),
1335 	GEN_FW_RANGE(0x8800, 0x8bff, 0),
1336 	GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER),
1337 	GEN_FW_RANGE(0x8d00, 0x94cf, FORCEWAKE_GT),
1338 	GEN_FW_RANGE(0x94d0, 0x955f, FORCEWAKE_RENDER),
1339 	GEN_FW_RANGE(0x9560, 0x95ff, 0),
1340 	GEN_FW_RANGE(0x9600, 0xafff, FORCEWAKE_GT),
1341 	GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
1342 	GEN_FW_RANGE(0xb480, 0xdeff, FORCEWAKE_GT),
1343 	GEN_FW_RANGE(0xdf00, 0xe8ff, FORCEWAKE_RENDER),
1344 	GEN_FW_RANGE(0xe900, 0x16dff, FORCEWAKE_GT),
1345 	GEN_FW_RANGE(0x16e00, 0x19fff, FORCEWAKE_RENDER),
1346 	GEN_FW_RANGE(0x1a000, 0x23fff, FORCEWAKE_GT),
1347 	GEN_FW_RANGE(0x24000, 0x2407f, 0),
1348 	GEN_FW_RANGE(0x24080, 0x2417f, FORCEWAKE_GT),
1349 	GEN_FW_RANGE(0x24180, 0x242ff, FORCEWAKE_RENDER),
1350 	GEN_FW_RANGE(0x24300, 0x243ff, FORCEWAKE_GT),
1351 	GEN_FW_RANGE(0x24400, 0x24fff, FORCEWAKE_RENDER),
1352 	GEN_FW_RANGE(0x25000, 0x3ffff, FORCEWAKE_GT),
1353 	GEN_FW_RANGE(0x40000, 0x1bffff, 0),
1354 	GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0),
1355 	GEN_FW_RANGE(0x1c4000, 0x1c7fff, 0),
1356 	GEN_FW_RANGE(0x1c8000, 0x1cffff, FORCEWAKE_MEDIA_VEBOX0),
1357 	GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2),
1358 	GEN_FW_RANGE(0x1d4000, 0x1dbfff, 0)
1359 };
1360 
1361 static const struct intel_forcewake_range __gen12_fw_ranges[] = {
1362 	GEN_FW_RANGE(0x0, 0x1fff, 0), /*
1363 		0x0   -  0xaff: reserved
1364 		0xb00 - 0x1fff: always on */
1365 	GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
1366 	GEN_FW_RANGE(0x2700, 0x27ff, FORCEWAKE_GT),
1367 	GEN_FW_RANGE(0x2800, 0x2aff, FORCEWAKE_RENDER),
1368 	GEN_FW_RANGE(0x2b00, 0x2fff, FORCEWAKE_GT),
1369 	GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
1370 	GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_GT), /*
1371 		0x4000 - 0x48ff: gt
1372 		0x4900 - 0x51ff: reserved */
1373 	GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER), /*
1374 		0x5200 - 0x53ff: render
1375 		0x5400 - 0x54ff: reserved
1376 		0x5500 - 0x7fff: render */
1377 	GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_GT),
1378 	GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
1379 	GEN_FW_RANGE(0x8160, 0x81ff, 0), /*
1380 		0x8160 - 0x817f: reserved
1381 		0x8180 - 0x81ff: always on */
1382 	GEN_FW_RANGE(0x8200, 0x82ff, FORCEWAKE_GT),
1383 	GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
1384 	GEN_FW_RANGE(0x8500, 0x94cf, FORCEWAKE_GT), /*
1385 		0x8500 - 0x87ff: gt
1386 		0x8800 - 0x8fff: reserved
1387 		0x9000 - 0x947f: gt
1388 		0x9480 - 0x94cf: reserved */
1389 	GEN_FW_RANGE(0x94d0, 0x955f, FORCEWAKE_RENDER),
1390 	GEN_FW_RANGE(0x9560, 0x97ff, 0), /*
1391 		0x9560 - 0x95ff: always on
1392 		0x9600 - 0x97ff: reserved */
1393 	GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_GT),
1394 	GEN_FW_RANGE(0xb000, 0xb3ff, FORCEWAKE_RENDER),
1395 	GEN_FW_RANGE(0xb400, 0xcfff, FORCEWAKE_GT), /*
1396 		0xb400 - 0xbf7f: gt
1397 		0xb480 - 0xbfff: reserved
1398 		0xc000 - 0xcfff: gt */
1399 	GEN_FW_RANGE(0xd000, 0xd7ff, 0),
1400 	GEN_FW_RANGE(0xd800, 0xd8ff, FORCEWAKE_RENDER),
1401 	GEN_FW_RANGE(0xd900, 0xdbff, FORCEWAKE_GT),
1402 	GEN_FW_RANGE(0xdc00, 0xefff, FORCEWAKE_RENDER), /*
1403 		0xdc00 - 0xddff: render
1404 		0xde00 - 0xde7f: reserved
1405 		0xde80 - 0xe8ff: render
1406 		0xe900 - 0xefff: reserved */
1407 	GEN_FW_RANGE(0xf000, 0x147ff, FORCEWAKE_GT), /*
1408 		 0xf000 - 0xffff: gt
1409 		0x10000 - 0x147ff: reserved */
1410 	GEN_FW_RANGE(0x14800, 0x1ffff, FORCEWAKE_RENDER), /*
1411 		0x14800 - 0x14fff: render
1412 		0x15000 - 0x16dff: reserved
1413 		0x16e00 - 0x1bfff: render
1414 		0x1c000 - 0x1ffff: reserved */
1415 	GEN_FW_RANGE(0x20000, 0x20fff, FORCEWAKE_MEDIA_VDBOX0),
1416 	GEN_FW_RANGE(0x21000, 0x21fff, FORCEWAKE_MEDIA_VDBOX2),
1417 	GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_GT),
1418 	GEN_FW_RANGE(0x24000, 0x2417f, 0), /*
1419 		0x24000 - 0x2407f: always on
1420 		0x24080 - 0x2417f: reserved */
1421 	GEN_FW_RANGE(0x24180, 0x249ff, FORCEWAKE_GT), /*
1422 		0x24180 - 0x241ff: gt
1423 		0x24200 - 0x249ff: reserved */
1424 	GEN_FW_RANGE(0x24a00, 0x251ff, FORCEWAKE_RENDER), /*
1425 		0x24a00 - 0x24a7f: render
1426 		0x24a80 - 0x251ff: reserved */
1427 	GEN_FW_RANGE(0x25200, 0x255ff, FORCEWAKE_GT), /*
1428 		0x25200 - 0x252ff: gt
1429 		0x25300 - 0x255ff: reserved */
1430 	GEN_FW_RANGE(0x25600, 0x2567f, FORCEWAKE_MEDIA_VDBOX0),
1431 	GEN_FW_RANGE(0x25680, 0x259ff, FORCEWAKE_MEDIA_VDBOX2), /*
1432 		0x25680 - 0x256ff: VD2
1433 		0x25700 - 0x259ff: reserved */
1434 	GEN_FW_RANGE(0x25a00, 0x25a7f, FORCEWAKE_MEDIA_VDBOX0),
1435 	GEN_FW_RANGE(0x25a80, 0x2ffff, FORCEWAKE_MEDIA_VDBOX2), /*
1436 		0x25a80 - 0x25aff: VD2
1437 		0x25b00 - 0x2ffff: reserved */
1438 	GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_GT),
1439 	GEN_FW_RANGE(0x40000, 0x1bffff, 0),
1440 	GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0), /*
1441 		0x1c0000 - 0x1c2bff: VD0
1442 		0x1c2c00 - 0x1c2cff: reserved
1443 		0x1c2d00 - 0x1c2dff: VD0
1444 		0x1c2e00 - 0x1c3eff: reserved
1445 		0x1c3f00 - 0x1c3fff: VD0 */
1446 	GEN_FW_RANGE(0x1c4000, 0x1c7fff, 0),
1447 	GEN_FW_RANGE(0x1c8000, 0x1cbfff, FORCEWAKE_MEDIA_VEBOX0), /*
1448 		0x1c8000 - 0x1ca0ff: VE0
1449 		0x1ca100 - 0x1cbeff: reserved
1450 		0x1cbf00 - 0x1cbfff: VE0 */
1451 	GEN_FW_RANGE(0x1cc000, 0x1cffff, FORCEWAKE_MEDIA_VDBOX0), /*
1452 		0x1cc000 - 0x1ccfff: VD0
1453 		0x1cd000 - 0x1cffff: reserved */
1454 	GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2), /*
1455 		0x1d0000 - 0x1d2bff: VD2
1456 		0x1d2c00 - 0x1d2cff: reserved
1457 		0x1d2d00 - 0x1d2dff: VD2
1458 		0x1d2e00 - 0x1d3eff: reserved
1459 		0x1d3f00 - 0x1d3fff: VD2 */
1460 };
1461 
1462 static const struct intel_forcewake_range __dg2_fw_ranges[] = {
1463 	GEN_FW_RANGE(0x0, 0x1fff, 0), /*
1464 		  0x0 -  0xaff: reserved
1465 		0xb00 - 0x1fff: always on */
1466 	GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
1467 	GEN_FW_RANGE(0x2700, 0x4aff, FORCEWAKE_GT),
1468 	GEN_FW_RANGE(0x4b00, 0x51ff, 0), /*
1469 		0x4b00 - 0x4fff: reserved
1470 		0x5000 - 0x51ff: always on */
1471 	GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
1472 	GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_GT),
1473 	GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
1474 	GEN_FW_RANGE(0x8160, 0x81ff, 0), /*
1475 		0x8160 - 0x817f: reserved
1476 		0x8180 - 0x81ff: always on */
1477 	GEN_FW_RANGE(0x8200, 0x82ff, FORCEWAKE_GT),
1478 	GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
1479 	GEN_FW_RANGE(0x8500, 0x8cff, FORCEWAKE_GT), /*
1480 		0x8500 - 0x87ff: gt
1481 		0x8800 - 0x8c7f: reserved
1482 		0x8c80 - 0x8cff: gt (DG2 only) */
1483 	GEN_FW_RANGE(0x8d00, 0x8fff, FORCEWAKE_RENDER), /*
1484 		0x8d00 - 0x8dff: render (DG2 only)
1485 		0x8e00 - 0x8fff: reserved */
1486 	GEN_FW_RANGE(0x9000, 0x94cf, FORCEWAKE_GT), /*
1487 		0x9000 - 0x947f: gt
1488 		0x9480 - 0x94cf: reserved */
1489 	GEN_FW_RANGE(0x94d0, 0x955f, FORCEWAKE_RENDER),
1490 	GEN_FW_RANGE(0x9560, 0x967f, 0), /*
1491 		0x9560 - 0x95ff: always on
1492 		0x9600 - 0x967f: reserved */
1493 	GEN_FW_RANGE(0x9680, 0x97ff, FORCEWAKE_RENDER), /*
1494 		0x9680 - 0x96ff: render
1495 		0x9700 - 0x97ff: reserved */
1496 	GEN_FW_RANGE(0x9800, 0xcfff, FORCEWAKE_GT), /*
1497 		0x9800 - 0xb4ff: gt
1498 		0xb500 - 0xbfff: reserved
1499 		0xc000 - 0xcfff: gt */
1500 	GEN_FW_RANGE(0xd000, 0xd7ff, 0),
1501 	GEN_FW_RANGE(0xd800, 0xd87f, FORCEWAKE_RENDER),
1502 	GEN_FW_RANGE(0xd880, 0xdbff, FORCEWAKE_GT),
1503 	GEN_FW_RANGE(0xdc00, 0xdcff, FORCEWAKE_RENDER),
1504 	GEN_FW_RANGE(0xdd00, 0xde7f, FORCEWAKE_GT), /*
1505 		0xdd00 - 0xddff: gt
1506 		0xde00 - 0xde7f: reserved */
1507 	GEN_FW_RANGE(0xde80, 0xe8ff, FORCEWAKE_RENDER), /*
1508 		0xde80 - 0xdfff: render
1509 		0xe000 - 0xe0ff: reserved
1510 		0xe100 - 0xe8ff: render */
1511 	GEN_FW_RANGE(0xe900, 0xffff, FORCEWAKE_GT), /*
1512 		0xe900 - 0xe9ff: gt
1513 		0xea00 - 0xefff: reserved
1514 		0xf000 - 0xffff: gt */
1515 	GEN_FW_RANGE(0x10000, 0x12fff, 0), /*
1516 		0x10000 - 0x11fff: reserved
1517 		0x12000 - 0x127ff: always on
1518 		0x12800 - 0x12fff: reserved */
1519 	GEN_FW_RANGE(0x13000, 0x131ff, FORCEWAKE_MEDIA_VDBOX0),
1520 	GEN_FW_RANGE(0x13200, 0x147ff, FORCEWAKE_MEDIA_VDBOX2), /*
1521 		0x13200 - 0x133ff: VD2 (DG2 only)
1522 		0x13400 - 0x147ff: reserved */
1523 	GEN_FW_RANGE(0x14800, 0x14fff, FORCEWAKE_RENDER),
1524 	GEN_FW_RANGE(0x15000, 0x16dff, FORCEWAKE_GT), /*
1525 		0x15000 - 0x15fff: gt (DG2 only)
1526 		0x16000 - 0x16dff: reserved */
1527 	GEN_FW_RANGE(0x16e00, 0x21fff, FORCEWAKE_RENDER), /*
1528 		0x16e00 - 0x1ffff: render
1529 		0x20000 - 0x21fff: reserved */
1530 	GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_GT),
1531 	GEN_FW_RANGE(0x24000, 0x2417f, 0), /*
1532 		0x24000 - 0x2407f: always on
1533 		0x24080 - 0x2417f: reserved */
1534 	GEN_FW_RANGE(0x24180, 0x249ff, FORCEWAKE_GT), /*
1535 		0x24180 - 0x241ff: gt
1536 		0x24200 - 0x249ff: reserved */
1537 	GEN_FW_RANGE(0x24a00, 0x251ff, FORCEWAKE_RENDER), /*
1538 		0x24a00 - 0x24a7f: render
1539 		0x24a80 - 0x251ff: reserved */
1540 	GEN_FW_RANGE(0x25200, 0x25fff, FORCEWAKE_GT), /*
1541 		0x25200 - 0x252ff: gt
1542 		0x25300 - 0x25fff: reserved */
1543 	GEN_FW_RANGE(0x26000, 0x2ffff, FORCEWAKE_RENDER), /*
1544 		0x26000 - 0x27fff: render
1545 		0x28000 - 0x29fff: reserved
1546 		0x2a000 - 0x2ffff: undocumented */
1547 	GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_GT),
1548 	GEN_FW_RANGE(0x40000, 0x1bffff, 0),
1549 	GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0), /*
1550 		0x1c0000 - 0x1c2bff: VD0
1551 		0x1c2c00 - 0x1c2cff: reserved
1552 		0x1c2d00 - 0x1c2dff: VD0
1553 		0x1c2e00 - 0x1c3eff: VD0
1554 		0x1c3f00 - 0x1c3fff: VD0 */
1555 	GEN_FW_RANGE(0x1c4000, 0x1c7fff, FORCEWAKE_MEDIA_VDBOX1), /*
1556 		0x1c4000 - 0x1c6bff: VD1
1557 		0x1c6c00 - 0x1c6cff: reserved
1558 		0x1c6d00 - 0x1c6dff: VD1
1559 		0x1c6e00 - 0x1c7fff: reserved */
1560 	GEN_FW_RANGE(0x1c8000, 0x1cbfff, FORCEWAKE_MEDIA_VEBOX0), /*
1561 		0x1c8000 - 0x1ca0ff: VE0
1562 		0x1ca100 - 0x1cbfff: reserved */
1563 	GEN_FW_RANGE(0x1cc000, 0x1ccfff, FORCEWAKE_MEDIA_VDBOX0),
1564 	GEN_FW_RANGE(0x1cd000, 0x1cdfff, FORCEWAKE_MEDIA_VDBOX2),
1565 	GEN_FW_RANGE(0x1ce000, 0x1cefff, FORCEWAKE_MEDIA_VDBOX4),
1566 	GEN_FW_RANGE(0x1cf000, 0x1cffff, FORCEWAKE_MEDIA_VDBOX6),
1567 	GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2), /*
1568 		0x1d0000 - 0x1d2bff: VD2
1569 		0x1d2c00 - 0x1d2cff: reserved
1570 		0x1d2d00 - 0x1d2dff: VD2
1571 		0x1d2e00 - 0x1d3dff: VD2
1572 		0x1d3e00 - 0x1d3eff: reserved
1573 		0x1d3f00 - 0x1d3fff: VD2 */
1574 	GEN_FW_RANGE(0x1d4000, 0x1d7fff, FORCEWAKE_MEDIA_VDBOX3), /*
1575 		0x1d4000 - 0x1d6bff: VD3
1576 		0x1d6c00 - 0x1d6cff: reserved
1577 		0x1d6d00 - 0x1d6dff: VD3
1578 		0x1d6e00 - 0x1d7fff: reserved */
1579 	GEN_FW_RANGE(0x1d8000, 0x1dffff, FORCEWAKE_MEDIA_VEBOX1), /*
1580 		0x1d8000 - 0x1da0ff: VE1
1581 		0x1da100 - 0x1dffff: reserved */
1582 	GEN_FW_RANGE(0x1e0000, 0x1e3fff, FORCEWAKE_MEDIA_VDBOX4), /*
1583 		0x1e0000 - 0x1e2bff: VD4
1584 		0x1e2c00 - 0x1e2cff: reserved
1585 		0x1e2d00 - 0x1e2dff: VD4
1586 		0x1e2e00 - 0x1e3eff: reserved
1587 		0x1e3f00 - 0x1e3fff: VD4 */
1588 	GEN_FW_RANGE(0x1e4000, 0x1e7fff, FORCEWAKE_MEDIA_VDBOX5), /*
1589 		0x1e4000 - 0x1e6bff: VD5
1590 		0x1e6c00 - 0x1e6cff: reserved
1591 		0x1e6d00 - 0x1e6dff: VD5
1592 		0x1e6e00 - 0x1e7fff: reserved */
1593 	GEN_FW_RANGE(0x1e8000, 0x1effff, FORCEWAKE_MEDIA_VEBOX2), /*
1594 		0x1e8000 - 0x1ea0ff: VE2
1595 		0x1ea100 - 0x1effff: reserved */
1596 	GEN_FW_RANGE(0x1f0000, 0x1f3fff, FORCEWAKE_MEDIA_VDBOX6), /*
1597 		0x1f0000 - 0x1f2bff: VD6
1598 		0x1f2c00 - 0x1f2cff: reserved
1599 		0x1f2d00 - 0x1f2dff: VD6
1600 		0x1f2e00 - 0x1f3eff: reserved
1601 		0x1f3f00 - 0x1f3fff: VD6 */
1602 	GEN_FW_RANGE(0x1f4000, 0x1f7fff, FORCEWAKE_MEDIA_VDBOX7), /*
1603 		0x1f4000 - 0x1f6bff: VD7
1604 		0x1f6c00 - 0x1f6cff: reserved
1605 		0x1f6d00 - 0x1f6dff: VD7
1606 		0x1f6e00 - 0x1f7fff: reserved */
1607 	GEN_FW_RANGE(0x1f8000, 0x1fa0ff, FORCEWAKE_MEDIA_VEBOX3),
1608 };
1609 
1610 static const struct intel_forcewake_range __mtl_fw_ranges[] = {
1611 	GEN_FW_RANGE(0x0, 0xaff, 0),
1612 	GEN_FW_RANGE(0xb00, 0xbff, FORCEWAKE_GT),
1613 	GEN_FW_RANGE(0xc00, 0xfff, 0),
1614 	GEN_FW_RANGE(0x1000, 0x1fff, FORCEWAKE_GT),
1615 	GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
1616 	GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_GT),
1617 	GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
1618 	GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_GT), /*
1619 		0x4000 - 0x48ff: render
1620 		0x4900 - 0x51ff: reserved */
1621 	GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER), /*
1622 		0x5200 - 0x53ff: render
1623 		0x5400 - 0x54ff: reserved
1624 		0x5500 - 0x7fff: render */
1625 	GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_GT),
1626 	GEN_FW_RANGE(0x8140, 0x817f, FORCEWAKE_RENDER), /*
1627 		0x8140 - 0x815f: render
1628 		0x8160 - 0x817f: reserved */
1629 	GEN_FW_RANGE(0x8180, 0x81ff, 0),
1630 	GEN_FW_RANGE(0x8200, 0x94cf, FORCEWAKE_GT), /*
1631 		0x8200 - 0x87ff: gt
1632 		0x8800 - 0x8dff: reserved
1633 		0x8e00 - 0x8f7f: gt
1634 		0x8f80 - 0x8fff: reserved
1635 		0x9000 - 0x947f: gt
1636 		0x9480 - 0x94cf: reserved */
1637 	GEN_FW_RANGE(0x94d0, 0x955f, FORCEWAKE_RENDER),
1638 	GEN_FW_RANGE(0x9560, 0x967f, 0), /*
1639 		0x9560 - 0x95ff: always on
1640 		0x9600 - 0x967f: reserved */
1641 	GEN_FW_RANGE(0x9680, 0x97ff, FORCEWAKE_RENDER), /*
1642 		0x9680 - 0x96ff: render
1643 		0x9700 - 0x97ff: reserved */
1644 	GEN_FW_RANGE(0x9800, 0xcfff, FORCEWAKE_GT), /*
1645 		0x9800 - 0xb4ff: gt
1646 		0xb500 - 0xbfff: reserved
1647 		0xc000 - 0xcfff: gt */
1648 	GEN_FW_RANGE(0xd000, 0xd7ff, 0), /*
1649 		0xd000 - 0xd3ff: always on
1650 		0xd400 - 0xd7ff: reserved */
1651 	GEN_FW_RANGE(0xd800, 0xd87f, FORCEWAKE_RENDER),
1652 	GEN_FW_RANGE(0xd880, 0xdbff, FORCEWAKE_GT),
1653 	GEN_FW_RANGE(0xdc00, 0xdcff, FORCEWAKE_RENDER),
1654 	GEN_FW_RANGE(0xdd00, 0xde7f, FORCEWAKE_GT), /*
1655 		0xdd00 - 0xddff: gt
1656 		0xde00 - 0xde7f: reserved */
1657 	GEN_FW_RANGE(0xde80, 0xe8ff, FORCEWAKE_RENDER), /*
1658 		0xde80 - 0xdfff: render
1659 		0xe000 - 0xe0ff: reserved
1660 		0xe100 - 0xe8ff: render */
1661 	GEN_FW_RANGE(0xe900, 0xe9ff, FORCEWAKE_GT),
1662 	GEN_FW_RANGE(0xea00, 0x147ff, 0), /*
1663 		 0xea00 - 0x11fff: reserved
1664 		0x12000 - 0x127ff: always on
1665 		0x12800 - 0x147ff: reserved */
1666 	GEN_FW_RANGE(0x14800, 0x19fff, FORCEWAKE_GT), /*
1667 		0x14800 - 0x153ff: gt
1668 		0x15400 - 0x19fff: reserved */
1669 	GEN_FW_RANGE(0x1a000, 0x21fff, FORCEWAKE_RENDER), /*
1670 		0x1a000 - 0x1bfff: render
1671 		0x1c000 - 0x21fff: reserved */
1672 	GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_GT),
1673 	GEN_FW_RANGE(0x24000, 0x2ffff, 0), /*
1674 		0x24000 - 0x2407f: always on
1675 		0x24080 - 0x2ffff: reserved */
1676 	GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_GT),
1677 	GEN_FW_RANGE(0x40000, 0x1901ef, 0),
1678 	GEN_FW_RANGE(0x1901f0, 0x1901f3, FORCEWAKE_GT)
1679 		/* FIXME: WA to wake GT while triggering H2G */
1680 };
1681 
1682 /*
1683  * Note that the register ranges here are the final offsets after
1684  * translation of the GSI block to the 0x380000 offset.
1685  *
1686  * NOTE:  There are a couple MCR ranges near the bottom of this table
1687  * that need to power up either VD0 or VD2 depending on which replicated
1688  * instance of the register we're trying to access.  Our forcewake logic
1689  * at the moment doesn't have a good way to take steering into consideration,
1690  * and the driver doesn't even access any registers in those ranges today,
1691  * so for now we just mark those ranges as FORCEWAKE_ALL.  That will ensure
1692  * proper operation if we do start using the ranges in the future, and we
1693  * can determine at that time whether it's worth adding extra complexity to
1694  * the forcewake handling to take steering into consideration.
1695  */
1696 static const struct intel_forcewake_range __xelpmp_fw_ranges[] = {
1697 	GEN_FW_RANGE(0x0, 0x115fff, 0), /* render GT range */
1698 	GEN_FW_RANGE(0x116000, 0x11ffff, FORCEWAKE_GSC), /*
1699 		0x116000 - 0x117fff: gsc
1700 		0x118000 - 0x119fff: reserved
1701 		0x11a000 - 0x11efff: gsc
1702 		0x11f000 - 0x11ffff: reserved */
1703 	GEN_FW_RANGE(0x120000, 0x1bffff, 0), /* non-GT range */
1704 	GEN_FW_RANGE(0x1c0000, 0x1c7fff, FORCEWAKE_MEDIA_VDBOX0), /*
1705 		0x1c0000 - 0x1c3dff: VD0
1706 		0x1c3e00 - 0x1c3eff: reserved
1707 		0x1c3f00 - 0x1c3fff: VD0
1708 		0x1c4000 - 0x1c7fff: reserved */
1709 	GEN_FW_RANGE(0x1c8000, 0x1cbfff, FORCEWAKE_MEDIA_VEBOX0), /*
1710 		0x1c8000 - 0x1ca0ff: VE0
1711 		0x1ca100 - 0x1cbfff: reserved */
1712 	GEN_FW_RANGE(0x1cc000, 0x1cffff, FORCEWAKE_MEDIA_VDBOX0), /*
1713 		0x1cc000 - 0x1cdfff: VD0
1714 		0x1ce000 - 0x1cffff: reserved */
1715 	GEN_FW_RANGE(0x1d0000, 0x1d7fff, FORCEWAKE_MEDIA_VDBOX2), /*
1716 		0x1d0000 - 0x1d3dff: VD2
1717 		0x1d3e00 - 0x1d3eff: reserved
1718 		0x1d4000 - 0x1d7fff: VD2 */
1719 	GEN_FW_RANGE(0x1d8000, 0x1da0ff, FORCEWAKE_MEDIA_VEBOX1),
1720 	GEN_FW_RANGE(0x1da100, 0x380aff, 0), /*
1721 		0x1da100 - 0x23ffff: reserved
1722 		0x240000 - 0x37ffff: non-GT range
1723 		0x380000 - 0x380aff: reserved */
1724 	GEN_FW_RANGE(0x380b00, 0x380bff, FORCEWAKE_GT),
1725 	GEN_FW_RANGE(0x380c00, 0x380fff, 0),
1726 	GEN_FW_RANGE(0x381000, 0x38817f, FORCEWAKE_GT), /*
1727 		0x381000 - 0x381fff: gt
1728 		0x382000 - 0x383fff: reserved
1729 		0x384000 - 0x384aff: gt
1730 		0x384b00 - 0x3851ff: reserved
1731 		0x385200 - 0x3871ff: gt
1732 		0x387200 - 0x387fff: reserved
1733 		0x388000 - 0x38813f: gt
1734 		0x388140 - 0x38817f: reserved */
1735 	GEN_FW_RANGE(0x388180, 0x3882ff, 0), /*
1736 		0x388180 - 0x3881ff: always on
1737 		0x388200 - 0x3882ff: reserved */
1738 	GEN_FW_RANGE(0x388300, 0x38955f, FORCEWAKE_GT), /*
1739 		0x388300 - 0x38887f: gt
1740 		0x388880 - 0x388fff: reserved
1741 		0x389000 - 0x38947f: gt
1742 		0x389480 - 0x38955f: reserved */
1743 	GEN_FW_RANGE(0x389560, 0x389fff, 0), /*
1744 		0x389560 - 0x3895ff: always on
1745 		0x389600 - 0x389fff: reserved */
1746 	GEN_FW_RANGE(0x38a000, 0x38cfff, FORCEWAKE_GT), /*
1747 		0x38a000 - 0x38afff: gt
1748 		0x38b000 - 0x38bfff: reserved
1749 		0x38c000 - 0x38cfff: gt */
1750 	GEN_FW_RANGE(0x38d000, 0x38d11f, 0),
1751 	GEN_FW_RANGE(0x38d120, 0x391fff, FORCEWAKE_GT), /*
1752 		0x38d120 - 0x38dfff: gt
1753 		0x38e000 - 0x38efff: reserved
1754 		0x38f000 - 0x38ffff: gt
1755 		0x389000 - 0x391fff: reserved */
1756 	GEN_FW_RANGE(0x392000, 0x392fff, 0), /*
1757 		0x392000 - 0x3927ff: always on
1758 		0x392800 - 0x292fff: reserved */
1759 	GEN_FW_RANGE(0x393000, 0x3931ff, FORCEWAKE_GT),
1760 	GEN_FW_RANGE(0x393200, 0x39323f, FORCEWAKE_ALL), /* instance-based, see note above */
1761 	GEN_FW_RANGE(0x393240, 0x3933ff, FORCEWAKE_GT),
1762 	GEN_FW_RANGE(0x393400, 0x3934ff, FORCEWAKE_ALL), /* instance-based, see note above */
1763 	GEN_FW_RANGE(0x393500, 0x393c7f, 0), /*
1764 		0x393500 - 0x393bff: reserved
1765 		0x393c00 - 0x393c7f: always on */
1766 	GEN_FW_RANGE(0x393c80, 0x393dff, FORCEWAKE_GT),
1767 };
1768 
1769 static void
1770 ilk_dummy_write(struct intel_uncore *uncore)
1771 {
1772 	/* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
1773 	 * the chip from rc6 before touching it for real. MI_MODE is masked,
1774 	 * hence harmless to write 0 into. */
1775 	__raw_uncore_write32(uncore, RING_MI_MODE(RENDER_RING_BASE), 0);
1776 }
1777 
1778 static void
1779 __unclaimed_reg_debug(struct intel_uncore *uncore,
1780 		      const i915_reg_t reg,
1781 		      const bool read)
1782 {
1783 	if (drm_WARN(&uncore->i915->drm,
1784 		     check_for_unclaimed_mmio(uncore),
1785 		     "Unclaimed %s register 0x%x\n",
1786 		     read ? "read from" : "write to",
1787 		     i915_mmio_reg_offset(reg)))
1788 		/* Only report the first N failures */
1789 		uncore->i915->params.mmio_debug--;
1790 }
1791 
1792 static void
1793 __unclaimed_previous_reg_debug(struct intel_uncore *uncore,
1794 			       const i915_reg_t reg,
1795 			       const bool read)
1796 {
1797 	if (check_for_unclaimed_mmio(uncore))
1798 		drm_dbg(&uncore->i915->drm,
1799 			"Unclaimed access detected before %s register 0x%x\n",
1800 			read ? "read from" : "write to",
1801 			i915_mmio_reg_offset(reg));
1802 }
1803 
1804 static inline bool __must_check
1805 unclaimed_reg_debug_header(struct intel_uncore *uncore,
1806 			   const i915_reg_t reg, const bool read)
1807 {
1808 	if (likely(!uncore->i915->params.mmio_debug) || !uncore->debug)
1809 		return false;
1810 
1811 	/* interrupts are disabled and re-enabled around uncore->lock usage */
1812 	lockdep_assert_held(&uncore->lock);
1813 
1814 	spin_lock(&uncore->debug->lock);
1815 	__unclaimed_previous_reg_debug(uncore, reg, read);
1816 
1817 	return true;
1818 }
1819 
1820 static inline void
1821 unclaimed_reg_debug_footer(struct intel_uncore *uncore,
1822 			   const i915_reg_t reg, const bool read)
1823 {
1824 	/* interrupts are disabled and re-enabled around uncore->lock usage */
1825 	lockdep_assert_held(&uncore->lock);
1826 
1827 	__unclaimed_reg_debug(uncore, reg, read);
1828 	spin_unlock(&uncore->debug->lock);
1829 }
1830 
1831 #define __vgpu_read(x) \
1832 static u##x \
1833 vgpu_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
1834 	u##x val = __raw_uncore_read##x(uncore, reg); \
1835 	trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
1836 	return val; \
1837 }
1838 __vgpu_read(8)
1839 __vgpu_read(16)
1840 __vgpu_read(32)
1841 __vgpu_read(64)
1842 
1843 #define GEN2_READ_HEADER(x) \
1844 	u##x val = 0; \
1845 	assert_rpm_wakelock_held(uncore->rpm);
1846 
1847 #define GEN2_READ_FOOTER \
1848 	trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
1849 	return val
1850 
1851 #define __gen2_read(x) \
1852 static u##x \
1853 gen2_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
1854 	GEN2_READ_HEADER(x); \
1855 	val = __raw_uncore_read##x(uncore, reg); \
1856 	GEN2_READ_FOOTER; \
1857 }
1858 
1859 #define __gen5_read(x) \
1860 static u##x \
1861 gen5_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
1862 	GEN2_READ_HEADER(x); \
1863 	ilk_dummy_write(uncore); \
1864 	val = __raw_uncore_read##x(uncore, reg); \
1865 	GEN2_READ_FOOTER; \
1866 }
1867 
1868 __gen5_read(8)
1869 __gen5_read(16)
1870 __gen5_read(32)
1871 __gen5_read(64)
1872 __gen2_read(8)
1873 __gen2_read(16)
1874 __gen2_read(32)
1875 __gen2_read(64)
1876 
1877 #undef __gen5_read
1878 #undef __gen2_read
1879 
1880 #undef GEN2_READ_FOOTER
1881 #undef GEN2_READ_HEADER
1882 
1883 #define GEN6_READ_HEADER(x) \
1884 	u32 offset = i915_mmio_reg_offset(reg); \
1885 	unsigned long irqflags; \
1886 	bool unclaimed_reg_debug; \
1887 	u##x val = 0; \
1888 	assert_rpm_wakelock_held(uncore->rpm); \
1889 	spin_lock_irqsave(&uncore->lock, irqflags); \
1890 	unclaimed_reg_debug = unclaimed_reg_debug_header(uncore, reg, true)
1891 
1892 #define GEN6_READ_FOOTER \
1893 	if (unclaimed_reg_debug) \
1894 		unclaimed_reg_debug_footer(uncore, reg, true);	\
1895 	spin_unlock_irqrestore(&uncore->lock, irqflags); \
1896 	trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
1897 	return val
1898 
1899 static noinline void ___force_wake_auto(struct intel_uncore *uncore,
1900 					enum forcewake_domains fw_domains)
1901 {
1902 	struct intel_uncore_forcewake_domain *domain;
1903 	unsigned int tmp;
1904 
1905 	GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
1906 
1907 	for_each_fw_domain_masked(domain, fw_domains, uncore, tmp)
1908 		fw_domain_arm_timer(domain);
1909 
1910 	fw_domains_get(uncore, fw_domains);
1911 }
1912 
1913 static inline void __force_wake_auto(struct intel_uncore *uncore,
1914 				     enum forcewake_domains fw_domains)
1915 {
1916 	GEM_BUG_ON(!fw_domains);
1917 
1918 	/* Turn on all requested but inactive supported forcewake domains. */
1919 	fw_domains &= uncore->fw_domains;
1920 	fw_domains &= ~uncore->fw_domains_active;
1921 
1922 	if (fw_domains)
1923 		___force_wake_auto(uncore, fw_domains);
1924 }
1925 
1926 #define __gen_fwtable_read(x) \
1927 static u##x \
1928 fwtable_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) \
1929 { \
1930 	enum forcewake_domains fw_engine; \
1931 	GEN6_READ_HEADER(x); \
1932 	fw_engine = __fwtable_reg_read_fw_domains(uncore, offset); \
1933 	if (fw_engine) \
1934 		__force_wake_auto(uncore, fw_engine); \
1935 	val = __raw_uncore_read##x(uncore, reg); \
1936 	GEN6_READ_FOOTER; \
1937 }
1938 
1939 static enum forcewake_domains
1940 fwtable_reg_read_fw_domains(struct intel_uncore *uncore, i915_reg_t reg) {
1941 	return __fwtable_reg_read_fw_domains(uncore, i915_mmio_reg_offset(reg));
1942 }
1943 
1944 __gen_fwtable_read(8)
1945 __gen_fwtable_read(16)
1946 __gen_fwtable_read(32)
1947 __gen_fwtable_read(64)
1948 
1949 #undef __gen_fwtable_read
1950 #undef GEN6_READ_FOOTER
1951 #undef GEN6_READ_HEADER
1952 
1953 #define GEN2_WRITE_HEADER \
1954 	trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
1955 	assert_rpm_wakelock_held(uncore->rpm); \
1956 
1957 #define GEN2_WRITE_FOOTER
1958 
1959 #define __gen2_write(x) \
1960 static void \
1961 gen2_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
1962 	GEN2_WRITE_HEADER; \
1963 	__raw_uncore_write##x(uncore, reg, val); \
1964 	GEN2_WRITE_FOOTER; \
1965 }
1966 
1967 #define __gen5_write(x) \
1968 static void \
1969 gen5_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
1970 	GEN2_WRITE_HEADER; \
1971 	ilk_dummy_write(uncore); \
1972 	__raw_uncore_write##x(uncore, reg, val); \
1973 	GEN2_WRITE_FOOTER; \
1974 }
1975 
1976 __gen5_write(8)
1977 __gen5_write(16)
1978 __gen5_write(32)
1979 __gen2_write(8)
1980 __gen2_write(16)
1981 __gen2_write(32)
1982 
1983 #undef __gen5_write
1984 #undef __gen2_write
1985 
1986 #undef GEN2_WRITE_FOOTER
1987 #undef GEN2_WRITE_HEADER
1988 
1989 #define GEN6_WRITE_HEADER \
1990 	u32 offset = i915_mmio_reg_offset(reg); \
1991 	unsigned long irqflags; \
1992 	bool unclaimed_reg_debug; \
1993 	trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
1994 	assert_rpm_wakelock_held(uncore->rpm); \
1995 	spin_lock_irqsave(&uncore->lock, irqflags); \
1996 	unclaimed_reg_debug = unclaimed_reg_debug_header(uncore, reg, false)
1997 
1998 #define GEN6_WRITE_FOOTER \
1999 	if (unclaimed_reg_debug) \
2000 		unclaimed_reg_debug_footer(uncore, reg, false); \
2001 	spin_unlock_irqrestore(&uncore->lock, irqflags)
2002 
2003 #define __gen6_write(x) \
2004 static void \
2005 gen6_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
2006 	GEN6_WRITE_HEADER; \
2007 	if (NEEDS_FORCE_WAKE(offset)) \
2008 		__gen6_gt_wait_for_fifo(uncore); \
2009 	__raw_uncore_write##x(uncore, reg, val); \
2010 	GEN6_WRITE_FOOTER; \
2011 }
2012 __gen6_write(8)
2013 __gen6_write(16)
2014 __gen6_write(32)
2015 
2016 #define __gen_fwtable_write(x) \
2017 static void \
2018 fwtable_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
2019 	enum forcewake_domains fw_engine; \
2020 	GEN6_WRITE_HEADER; \
2021 	fw_engine = __fwtable_reg_write_fw_domains(uncore, offset); \
2022 	if (fw_engine) \
2023 		__force_wake_auto(uncore, fw_engine); \
2024 	__raw_uncore_write##x(uncore, reg, val); \
2025 	GEN6_WRITE_FOOTER; \
2026 }
2027 
2028 static enum forcewake_domains
2029 fwtable_reg_write_fw_domains(struct intel_uncore *uncore, i915_reg_t reg)
2030 {
2031 	return __fwtable_reg_write_fw_domains(uncore, i915_mmio_reg_offset(reg));
2032 }
2033 
2034 __gen_fwtable_write(8)
2035 __gen_fwtable_write(16)
2036 __gen_fwtable_write(32)
2037 
2038 #undef __gen_fwtable_write
2039 #undef GEN6_WRITE_FOOTER
2040 #undef GEN6_WRITE_HEADER
2041 
2042 #define __vgpu_write(x) \
2043 static void \
2044 vgpu_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
2045 	trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
2046 	__raw_uncore_write##x(uncore, reg, val); \
2047 }
2048 __vgpu_write(8)
2049 __vgpu_write(16)
2050 __vgpu_write(32)
2051 
2052 #define ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, x) \
2053 do { \
2054 	(uncore)->funcs.mmio_writeb = x##_write8; \
2055 	(uncore)->funcs.mmio_writew = x##_write16; \
2056 	(uncore)->funcs.mmio_writel = x##_write32; \
2057 } while (0)
2058 
2059 #define ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, x) \
2060 do { \
2061 	(uncore)->funcs.mmio_readb = x##_read8; \
2062 	(uncore)->funcs.mmio_readw = x##_read16; \
2063 	(uncore)->funcs.mmio_readl = x##_read32; \
2064 	(uncore)->funcs.mmio_readq = x##_read64; \
2065 } while (0)
2066 
2067 #define ASSIGN_WRITE_MMIO_VFUNCS(uncore, x) \
2068 do { \
2069 	ASSIGN_RAW_WRITE_MMIO_VFUNCS((uncore), x); \
2070 	(uncore)->funcs.write_fw_domains = x##_reg_write_fw_domains; \
2071 } while (0)
2072 
2073 #define ASSIGN_READ_MMIO_VFUNCS(uncore, x) \
2074 do { \
2075 	ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, x); \
2076 	(uncore)->funcs.read_fw_domains = x##_reg_read_fw_domains; \
2077 } while (0)
2078 
2079 static int __fw_domain_init(struct intel_uncore *uncore,
2080 			    enum forcewake_domain_id domain_id,
2081 			    i915_reg_t reg_set,
2082 			    i915_reg_t reg_ack)
2083 {
2084 	struct intel_uncore_forcewake_domain *d;
2085 
2086 	GEM_BUG_ON(domain_id >= FW_DOMAIN_ID_COUNT);
2087 	GEM_BUG_ON(uncore->fw_domain[domain_id]);
2088 
2089 	d = kzalloc_obj(*d);
2090 	if (!d)
2091 		return -ENOMEM;
2092 
2093 	drm_WARN_ON(&uncore->i915->drm, !i915_mmio_reg_valid(reg_set));
2094 	drm_WARN_ON(&uncore->i915->drm, !i915_mmio_reg_valid(reg_ack));
2095 
2096 	d->uncore = uncore;
2097 	d->wake_count = 0;
2098 	d->reg_set = uncore->regs + i915_mmio_reg_offset(reg_set) + uncore->gsi_offset;
2099 	d->reg_ack = uncore->regs + i915_mmio_reg_offset(reg_ack) + uncore->gsi_offset;
2100 
2101 	d->id = domain_id;
2102 
2103 	BUILD_BUG_ON(FORCEWAKE_RENDER != (1 << FW_DOMAIN_ID_RENDER));
2104 	BUILD_BUG_ON(FORCEWAKE_GT != (1 << FW_DOMAIN_ID_GT));
2105 	BUILD_BUG_ON(FORCEWAKE_MEDIA != (1 << FW_DOMAIN_ID_MEDIA));
2106 	BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX0 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX0));
2107 	BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX1 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX1));
2108 	BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX2 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX2));
2109 	BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX3 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX3));
2110 	BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX4 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX4));
2111 	BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX5 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX5));
2112 	BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX6 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX6));
2113 	BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX7 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX7));
2114 	BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX0 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX0));
2115 	BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX1 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX1));
2116 	BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX2 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX2));
2117 	BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX3 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX3));
2118 	BUILD_BUG_ON(FORCEWAKE_GSC != (1 << FW_DOMAIN_ID_GSC));
2119 
2120 	d->mask = BIT(domain_id);
2121 
2122 	hrtimer_setup(&d->timer, intel_uncore_fw_release_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2123 
2124 	uncore->fw_domains |= BIT(domain_id);
2125 
2126 	fw_domain_reset(d);
2127 
2128 	uncore->fw_domain[domain_id] = d;
2129 
2130 	return 0;
2131 }
2132 ALLOW_ERROR_INJECTION(__fw_domain_init, ERRNO);
2133 
2134 static void fw_domain_fini(struct intel_uncore *uncore,
2135 			   enum forcewake_domain_id domain_id)
2136 {
2137 	struct intel_uncore_forcewake_domain *d;
2138 
2139 	GEM_BUG_ON(domain_id >= FW_DOMAIN_ID_COUNT);
2140 
2141 	d = fetch_and_zero(&uncore->fw_domain[domain_id]);
2142 	if (!d)
2143 		return;
2144 
2145 	uncore->fw_domains &= ~BIT(domain_id);
2146 	drm_WARN_ON(&uncore->i915->drm, d->wake_count);
2147 	drm_WARN_ON(&uncore->i915->drm, hrtimer_cancel(&d->timer));
2148 	kfree(d);
2149 }
2150 
2151 static void intel_uncore_fw_domains_fini(struct intel_uncore *uncore)
2152 {
2153 	struct intel_uncore_forcewake_domain *d;
2154 	int tmp;
2155 
2156 	for_each_fw_domain(d, uncore, tmp)
2157 		fw_domain_fini(uncore, d->id);
2158 }
2159 
2160 static const struct intel_uncore_fw_get uncore_get_fallback = {
2161 	.force_wake_get = fw_domains_get_with_fallback
2162 };
2163 
2164 static const struct intel_uncore_fw_get uncore_get_normal = {
2165 	.force_wake_get = fw_domains_get_normal,
2166 };
2167 
2168 static const struct intel_uncore_fw_get uncore_get_thread_status = {
2169 	.force_wake_get = fw_domains_get_with_thread_status
2170 };
2171 
2172 static const struct intel_uncore_fw_get uncore_get_normal_fifo = {
2173 	.force_wake_get = fw_domains_get_normal_fifo,
2174 };
2175 
2176 static const struct intel_uncore_fw_get uncore_get_thread_status_fifo = {
2177 	.force_wake_get = fw_domains_get_with_thread_status_fifo
2178 };
2179 
2180 static int intel_uncore_fw_domains_init(struct intel_uncore *uncore)
2181 {
2182 	struct drm_i915_private *i915 = uncore->i915;
2183 	int ret = 0;
2184 
2185 	GEM_BUG_ON(!intel_uncore_has_forcewake(uncore));
2186 
2187 #define fw_domain_init(uncore__, id__, set__, ack__) \
2188 	(ret ?: (ret = __fw_domain_init((uncore__), (id__), (set__), (ack__))))
2189 
2190 	if (GRAPHICS_VER(i915) >= 11) {
2191 		intel_engine_mask_t emask;
2192 		int i;
2193 
2194 		/* we'll prune the domains of missing engines later */
2195 		emask = uncore->gt->info.engine_mask;
2196 
2197 		uncore->fw_get_funcs = &uncore_get_fallback;
2198 		if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70))
2199 			fw_domain_init(uncore, FW_DOMAIN_ID_GT,
2200 				       FORCEWAKE_GT_GEN9,
2201 				       FORCEWAKE_ACK_GT_MTL);
2202 		else
2203 			fw_domain_init(uncore, FW_DOMAIN_ID_GT,
2204 				       FORCEWAKE_GT_GEN9,
2205 				       FORCEWAKE_ACK_GT_GEN9);
2206 
2207 		if (RCS_MASK(uncore->gt) || CCS_MASK(uncore->gt))
2208 			fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
2209 				       FORCEWAKE_RENDER_GEN9,
2210 				       FORCEWAKE_ACK_RENDER_GEN9);
2211 
2212 		for (i = 0; i < I915_MAX_VCS; i++) {
2213 			if (!__HAS_ENGINE(emask, _VCS(i)))
2214 				continue;
2215 
2216 			fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA_VDBOX0 + i,
2217 				       FORCEWAKE_MEDIA_VDBOX_GEN11(i),
2218 				       FORCEWAKE_ACK_MEDIA_VDBOX_GEN11(i));
2219 		}
2220 		for (i = 0; i < I915_MAX_VECS; i++) {
2221 			if (!__HAS_ENGINE(emask, _VECS(i)))
2222 				continue;
2223 
2224 			fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA_VEBOX0 + i,
2225 				       FORCEWAKE_MEDIA_VEBOX_GEN11(i),
2226 				       FORCEWAKE_ACK_MEDIA_VEBOX_GEN11(i));
2227 		}
2228 
2229 		if (uncore->gt->type == GT_MEDIA)
2230 			fw_domain_init(uncore, FW_DOMAIN_ID_GSC,
2231 				       FORCEWAKE_REQ_GSC, FORCEWAKE_ACK_GSC);
2232 	} else if (IS_GRAPHICS_VER(i915, 9, 10)) {
2233 		uncore->fw_get_funcs = &uncore_get_fallback;
2234 		fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
2235 			       FORCEWAKE_RENDER_GEN9,
2236 			       FORCEWAKE_ACK_RENDER_GEN9);
2237 		fw_domain_init(uncore, FW_DOMAIN_ID_GT,
2238 			       FORCEWAKE_GT_GEN9,
2239 			       FORCEWAKE_ACK_GT_GEN9);
2240 		fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA,
2241 			       FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9);
2242 	} else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
2243 		if (intel_uncore_has_fifo(uncore))
2244 			uncore->fw_get_funcs = &uncore_get_normal_fifo;
2245 		else
2246 			uncore->fw_get_funcs = &uncore_get_normal;
2247 		fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
2248 			       FORCEWAKE_VLV, FORCEWAKE_ACK_VLV);
2249 		fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA,
2250 			       FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV);
2251 	} else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
2252 		if (intel_uncore_has_fifo(uncore))
2253 			uncore->fw_get_funcs = &uncore_get_thread_status_fifo;
2254 		else
2255 			uncore->fw_get_funcs = &uncore_get_thread_status;
2256 		fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
2257 			       FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
2258 	} else if (IS_IVYBRIDGE(i915)) {
2259 		u32 ecobus;
2260 
2261 		/* IVB configs may use multi-threaded forcewake */
2262 
2263 		/* A small trick here - if the bios hasn't configured
2264 		 * MT forcewake, and if the device is in RC6, then
2265 		 * force_wake_mt_get will not wake the device and the
2266 		 * ECOBUS read will return zero. Which will be
2267 		 * (correctly) interpreted by the test below as MT
2268 		 * forcewake being disabled.
2269 		 */
2270 		uncore->fw_get_funcs = &uncore_get_thread_status_fifo;
2271 
2272 		/* We need to init first for ECOBUS access and then
2273 		 * determine later if we want to reinit, in case of MT access is
2274 		 * not working. In this stage we don't know which flavour this
2275 		 * ivb is, so it is better to reset also the gen6 fw registers
2276 		 * before the ecobus check.
2277 		 */
2278 
2279 		__raw_uncore_write32(uncore, FORCEWAKE, 0);
2280 		__raw_posting_read(uncore, ECOBUS);
2281 
2282 		ret = __fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
2283 				       FORCEWAKE_MT, FORCEWAKE_MT_ACK);
2284 		if (ret)
2285 			goto out;
2286 
2287 		spin_lock_irq(&uncore->lock);
2288 		fw_domains_get_with_thread_status(uncore, FORCEWAKE_RENDER);
2289 		ecobus = __raw_uncore_read32(uncore, ECOBUS);
2290 		fw_domains_put(uncore, FORCEWAKE_RENDER);
2291 		spin_unlock_irq(&uncore->lock);
2292 
2293 		if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
2294 			drm_info(&i915->drm, "No MT forcewake available on Ivybridge, this can result in issues\n");
2295 			drm_info(&i915->drm, "when using vblank-synced partial screen updates.\n");
2296 			fw_domain_fini(uncore, FW_DOMAIN_ID_RENDER);
2297 			fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
2298 				       FORCEWAKE, FORCEWAKE_ACK);
2299 		}
2300 	} else if (GRAPHICS_VER(i915) == 6) {
2301 		uncore->fw_get_funcs = &uncore_get_thread_status_fifo;
2302 		fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
2303 			       FORCEWAKE, FORCEWAKE_ACK);
2304 	}
2305 
2306 #undef fw_domain_init
2307 
2308 	/* All future platforms are expected to require complex power gating */
2309 	drm_WARN_ON(&i915->drm, !ret && uncore->fw_domains == 0);
2310 
2311 out:
2312 	if (ret)
2313 		intel_uncore_fw_domains_fini(uncore);
2314 
2315 	return ret;
2316 }
2317 
2318 #define ASSIGN_FW_DOMAINS_TABLE(uncore, d) \
2319 { \
2320 	(uncore)->fw_domains_table = \
2321 			(struct intel_forcewake_range *)(d); \
2322 	(uncore)->fw_domains_table_entries = ARRAY_SIZE((d)); \
2323 }
2324 
2325 #define ASSIGN_SHADOW_TABLE(uncore, d) \
2326 { \
2327 	(uncore)->shadowed_reg_table = d; \
2328 	(uncore)->shadowed_reg_table_entries = ARRAY_SIZE((d)); \
2329 }
2330 
2331 static int i915_pmic_bus_access_notifier(struct notifier_block *nb,
2332 					 unsigned long action, void *data)
2333 {
2334 	struct intel_uncore *uncore = container_of(nb,
2335 			struct intel_uncore, pmic_bus_access_nb);
2336 
2337 	switch (action) {
2338 	case MBI_PMIC_BUS_ACCESS_BEGIN:
2339 		/*
2340 		 * forcewake all now to make sure that we don't need to do a
2341 		 * forcewake later which on systems where this notifier gets
2342 		 * called requires the punit to access to the shared pmic i2c
2343 		 * bus, which will be busy after this notification, leading to:
2344 		 * "render: timed out waiting for forcewake ack request."
2345 		 * errors.
2346 		 *
2347 		 * The notifier is unregistered during intel_runtime_suspend(),
2348 		 * so it's ok to access the HW here without holding a RPM
2349 		 * wake reference -> disable wakeref asserts for the time of
2350 		 * the access.
2351 		 */
2352 		disable_rpm_wakeref_asserts(uncore->rpm);
2353 		intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
2354 		enable_rpm_wakeref_asserts(uncore->rpm);
2355 		break;
2356 	case MBI_PMIC_BUS_ACCESS_END:
2357 		intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
2358 		break;
2359 	}
2360 
2361 	return NOTIFY_OK;
2362 }
2363 
2364 static void uncore_unmap_mmio(struct drm_device *drm, void *regs)
2365 {
2366 	iounmap((void __iomem *)regs);
2367 }
2368 
2369 int intel_uncore_setup_mmio(struct intel_uncore *uncore, phys_addr_t phys_addr)
2370 {
2371 	struct drm_i915_private *i915 = uncore->i915;
2372 	int mmio_size;
2373 
2374 	/*
2375 	 * Before gen4, the registers and the GTT are behind different BARs.
2376 	 * However, from gen4 onwards, the registers and the GTT are shared
2377 	 * in the same BAR, so we want to restrict this ioremap from
2378 	 * clobbering the GTT which we want ioremap_wc instead. Fortunately,
2379 	 * the register BAR remains the same size for all the earlier
2380 	 * generations up to Ironlake.
2381 	 * For dgfx chips register range is expanded to 4MB, and this larger
2382 	 * range is also used for integrated gpus beginning with Meteor Lake.
2383 	 */
2384 	if (IS_DGFX(i915) || GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70))
2385 		mmio_size = 4 * 1024 * 1024;
2386 	else if (GRAPHICS_VER(i915) >= 5)
2387 		mmio_size = 2 * 1024 * 1024;
2388 	else
2389 		mmio_size = 512 * 1024;
2390 
2391 	uncore->regs = ioremap(phys_addr, mmio_size);
2392 	if (uncore->regs == NULL) {
2393 		drm_err(&i915->drm, "failed to map registers\n");
2394 		return -EIO;
2395 	}
2396 
2397 	return drmm_add_action_or_reset(&i915->drm, uncore_unmap_mmio,
2398 					(void __force *)uncore->regs);
2399 }
2400 
2401 void intel_uncore_init_early(struct intel_uncore *uncore,
2402 			     struct intel_gt *gt)
2403 {
2404 	spin_lock_init(&uncore->lock);
2405 	uncore->i915 = gt->i915;
2406 	uncore->gt = gt;
2407 	uncore->rpm = &gt->i915->runtime_pm;
2408 }
2409 
2410 static void uncore_raw_init(struct intel_uncore *uncore)
2411 {
2412 	GEM_BUG_ON(intel_uncore_has_forcewake(uncore));
2413 
2414 	if (intel_vgpu_active(uncore->i915)) {
2415 		ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, vgpu);
2416 		ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, vgpu);
2417 	} else if (GRAPHICS_VER(uncore->i915) == 5) {
2418 		ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, gen5);
2419 		ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, gen5);
2420 	} else {
2421 		ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, gen2);
2422 		ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, gen2);
2423 	}
2424 }
2425 
2426 static int uncore_media_forcewake_init(struct intel_uncore *uncore)
2427 {
2428 	struct drm_i915_private *i915 = uncore->i915;
2429 
2430 	if (MEDIA_VER(i915) >= 13) {
2431 		ASSIGN_FW_DOMAINS_TABLE(uncore, __xelpmp_fw_ranges);
2432 		ASSIGN_SHADOW_TABLE(uncore, xelpmp_shadowed_regs);
2433 		ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2434 	} else {
2435 		MISSING_CASE(MEDIA_VER(i915));
2436 		return -ENODEV;
2437 	}
2438 
2439 	return 0;
2440 }
2441 
2442 static int uncore_forcewake_init(struct intel_uncore *uncore)
2443 {
2444 	struct drm_i915_private *i915 = uncore->i915;
2445 	int ret;
2446 
2447 	GEM_BUG_ON(!intel_uncore_has_forcewake(uncore));
2448 
2449 	ret = intel_uncore_fw_domains_init(uncore);
2450 	if (ret)
2451 		return ret;
2452 	forcewake_early_sanitize(uncore, 0);
2453 
2454 	ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable);
2455 
2456 	if (uncore->gt->type == GT_MEDIA)
2457 		return uncore_media_forcewake_init(uncore);
2458 
2459 	if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70)) {
2460 		ASSIGN_FW_DOMAINS_TABLE(uncore, __mtl_fw_ranges);
2461 		ASSIGN_SHADOW_TABLE(uncore, mtl_shadowed_regs);
2462 		ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2463 	} else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55)) {
2464 		ASSIGN_FW_DOMAINS_TABLE(uncore, __dg2_fw_ranges);
2465 		ASSIGN_SHADOW_TABLE(uncore, dg2_shadowed_regs);
2466 		ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2467 	} else if (GRAPHICS_VER(i915) >= 12) {
2468 		ASSIGN_FW_DOMAINS_TABLE(uncore, __gen12_fw_ranges);
2469 		ASSIGN_SHADOW_TABLE(uncore, gen12_shadowed_regs);
2470 		ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2471 	} else if (GRAPHICS_VER(i915) == 11) {
2472 		ASSIGN_FW_DOMAINS_TABLE(uncore, __gen11_fw_ranges);
2473 		ASSIGN_SHADOW_TABLE(uncore, gen11_shadowed_regs);
2474 		ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2475 	} else if (IS_GRAPHICS_VER(i915, 9, 10)) {
2476 		ASSIGN_FW_DOMAINS_TABLE(uncore, __gen9_fw_ranges);
2477 		ASSIGN_SHADOW_TABLE(uncore, gen8_shadowed_regs);
2478 		ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2479 	} else if (IS_CHERRYVIEW(i915)) {
2480 		ASSIGN_FW_DOMAINS_TABLE(uncore, __chv_fw_ranges);
2481 		ASSIGN_SHADOW_TABLE(uncore, gen8_shadowed_regs);
2482 		ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2483 	} else if (GRAPHICS_VER(i915) == 8) {
2484 		ASSIGN_FW_DOMAINS_TABLE(uncore, __gen6_fw_ranges);
2485 		ASSIGN_SHADOW_TABLE(uncore, gen8_shadowed_regs);
2486 		ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2487 	} else if (IS_VALLEYVIEW(i915)) {
2488 		ASSIGN_FW_DOMAINS_TABLE(uncore, __vlv_fw_ranges);
2489 		ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen6);
2490 	} else if (IS_GRAPHICS_VER(i915, 6, 7)) {
2491 		ASSIGN_FW_DOMAINS_TABLE(uncore, __gen6_fw_ranges);
2492 		ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen6);
2493 	}
2494 
2495 	uncore->pmic_bus_access_nb.notifier_call = i915_pmic_bus_access_notifier;
2496 	iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
2497 
2498 	return 0;
2499 }
2500 
2501 static int sanity_check_mmio_access(struct intel_uncore *uncore)
2502 {
2503 	struct drm_i915_private *i915 = uncore->i915;
2504 
2505 	if (GRAPHICS_VER(i915) < 8)
2506 		return 0;
2507 
2508 	/*
2509 	 * Sanitycheck that MMIO access to the device is working properly.  If
2510 	 * the CPU is unable to communicate with a PCI device, BAR reads will
2511 	 * return 0xFFFFFFFF.  Let's make sure the device isn't in this state
2512 	 * before we start trying to access registers.
2513 	 *
2514 	 * We use the primary GT's forcewake register as our guinea pig since
2515 	 * it's been around since HSW and it's a masked register so the upper
2516 	 * 16 bits can never read back as 1's if device access is operating
2517 	 * properly.
2518 	 *
2519 	 * If MMIO isn't working, we'll wait up to 2 seconds to see if it
2520 	 * recovers, then give up.
2521 	 */
2522 #define COND (__raw_uncore_read32(uncore, FORCEWAKE_MT) != ~0)
2523 	if (wait_for(COND, 2000) == -ETIMEDOUT) {
2524 		drm_err(&i915->drm, "Device is non-operational; MMIO access returns 0xFFFFFFFF!\n");
2525 		return -EIO;
2526 	}
2527 
2528 	return 0;
2529 }
2530 
2531 int intel_uncore_init_mmio(struct intel_uncore *uncore)
2532 {
2533 	struct drm_i915_private *i915 = uncore->i915;
2534 	struct intel_display *display = i915->display;
2535 	int ret;
2536 
2537 	ret = sanity_check_mmio_access(uncore);
2538 	if (ret)
2539 		return ret;
2540 
2541 	/*
2542 	 * The boot firmware initializes local memory and assesses its health.
2543 	 * If memory training fails, the punit will have been instructed to
2544 	 * keep the GT powered down; we won't be able to communicate with it
2545 	 * and we should not continue with driver initialization.
2546 	 */
2547 	if (IS_DGFX(i915) &&
2548 	    !(__raw_uncore_read32(uncore, GU_CNTL) & LMEM_INIT)) {
2549 		drm_err(&i915->drm, "LMEM not initialized by firmware\n");
2550 		return -ENODEV;
2551 	}
2552 
2553 	if (GRAPHICS_VER(i915) > 5 && !intel_vgpu_active(i915))
2554 		uncore->flags |= UNCORE_HAS_FORCEWAKE;
2555 
2556 	if (!intel_uncore_has_forcewake(uncore)) {
2557 		uncore_raw_init(uncore);
2558 	} else {
2559 		ret = uncore_forcewake_init(uncore);
2560 		if (ret)
2561 			return ret;
2562 	}
2563 
2564 	/* make sure fw funcs are set if and only if we have fw*/
2565 	GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->fw_get_funcs);
2566 	GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.read_fw_domains);
2567 	GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.write_fw_domains);
2568 
2569 	if (HAS_FPGA_DBG_UNCLAIMED(display))
2570 		uncore->flags |= UNCORE_HAS_FPGA_DBG_UNCLAIMED;
2571 
2572 	if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
2573 		uncore->flags |= UNCORE_HAS_DBG_UNCLAIMED;
2574 
2575 	if (IS_GRAPHICS_VER(i915, 6, 7))
2576 		uncore->flags |= UNCORE_HAS_FIFO;
2577 
2578 	/* clear out unclaimed reg detection bit */
2579 	if (intel_uncore_unclaimed_mmio(uncore))
2580 		drm_dbg(&i915->drm, "unclaimed mmio detected on uncore init, clearing\n");
2581 
2582 	return 0;
2583 }
2584 
2585 /*
2586  * We might have detected that some engines are fused off after we initialized
2587  * the forcewake domains. Prune them, to make sure they only reference existing
2588  * engines.
2589  */
2590 void intel_uncore_prune_engine_fw_domains(struct intel_uncore *uncore,
2591 					  struct intel_gt *gt)
2592 {
2593 	enum forcewake_domains fw_domains = uncore->fw_domains;
2594 	enum forcewake_domain_id domain_id;
2595 	int i;
2596 
2597 	if (!intel_uncore_has_forcewake(uncore) || GRAPHICS_VER(uncore->i915) < 11)
2598 		return;
2599 
2600 	for (i = 0; i < I915_MAX_VCS; i++) {
2601 		domain_id = FW_DOMAIN_ID_MEDIA_VDBOX0 + i;
2602 
2603 		if (HAS_ENGINE(gt, _VCS(i)))
2604 			continue;
2605 
2606 		/*
2607 		 * Starting with XeHP, the power well for an even-numbered
2608 		 * VDBOX is also used for shared units within the
2609 		 * media slice such as SFC.  So even if the engine
2610 		 * itself is fused off, we still need to initialize
2611 		 * the forcewake domain if any of the other engines
2612 		 * in the same media slice are present.
2613 		 */
2614 		if (GRAPHICS_VER_FULL(uncore->i915) >= IP_VER(12, 55) && i % 2 == 0) {
2615 			if ((i + 1 < I915_MAX_VCS) && HAS_ENGINE(gt, _VCS(i + 1)))
2616 				continue;
2617 
2618 			if (HAS_ENGINE(gt, _VECS(i / 2)))
2619 				continue;
2620 		}
2621 
2622 		if (fw_domains & BIT(domain_id))
2623 			fw_domain_fini(uncore, domain_id);
2624 	}
2625 
2626 	for (i = 0; i < I915_MAX_VECS; i++) {
2627 		domain_id = FW_DOMAIN_ID_MEDIA_VEBOX0 + i;
2628 
2629 		if (HAS_ENGINE(gt, _VECS(i)))
2630 			continue;
2631 
2632 		if (fw_domains & BIT(domain_id))
2633 			fw_domain_fini(uncore, domain_id);
2634 	}
2635 
2636 	if ((fw_domains & BIT(FW_DOMAIN_ID_GSC)) && !HAS_ENGINE(gt, GSC0))
2637 		fw_domain_fini(uncore, FW_DOMAIN_ID_GSC);
2638 }
2639 
2640 /*
2641  * The driver-initiated FLR is the highest level of reset that we can trigger
2642  * from within the driver. It is different from the PCI FLR in that it doesn't
2643  * fully reset the SGUnit and doesn't modify the PCI config space and therefore
2644  * it doesn't require a re-enumeration of the PCI BARs. However, the
2645  * driver-initiated FLR does still cause a reset of both GT and display and a
2646  * memory wipe of local and stolen memory, so recovery would require a full HW
2647  * re-init and saving/restoring (or re-populating) the wiped memory. Since we
2648  * perform the FLR as the very last action before releasing access to the HW
2649  * during the driver release flow, we don't attempt recovery at all, because
2650  * if/when a new instance of i915 is bound to the device it will do a full
2651  * re-init anyway.
2652  */
2653 static void driver_initiated_flr(struct intel_uncore *uncore)
2654 {
2655 	struct drm_i915_private *i915 = uncore->i915;
2656 	unsigned int flr_timeout_ms;
2657 	int ret;
2658 
2659 	drm_dbg(&i915->drm, "Triggering Driver-FLR\n");
2660 
2661 	/*
2662 	 * The specification recommends a 3 seconds FLR reset timeout. To be
2663 	 * cautious, we will extend this to 9 seconds, three times the specified
2664 	 * timeout.
2665 	 */
2666 	flr_timeout_ms = 9000;
2667 
2668 	/*
2669 	 * Make sure any pending FLR requests have cleared by waiting for the
2670 	 * FLR trigger bit to go to zero. Also clear GU_DEBUG's DRIVERFLR_STATUS
2671 	 * to make sure it's not still set from a prior attempt (it's a write to
2672 	 * clear bit).
2673 	 * Note that we should never be in a situation where a previous attempt
2674 	 * is still pending (unless the HW is totally dead), but better to be
2675 	 * safe in case something unexpected happens
2676 	 */
2677 	ret = intel_wait_for_register_fw(uncore, GU_CNTL, DRIVERFLR, 0, flr_timeout_ms, NULL);
2678 	if (ret) {
2679 		drm_err(&i915->drm,
2680 			"Failed to wait for Driver-FLR bit to clear! %d\n",
2681 			ret);
2682 		return;
2683 	}
2684 	intel_uncore_write_fw(uncore, GU_DEBUG, DRIVERFLR_STATUS);
2685 
2686 	/* Trigger the actual Driver-FLR */
2687 	intel_uncore_rmw_fw(uncore, GU_CNTL, 0, DRIVERFLR);
2688 
2689 	/* Wait for hardware teardown to complete */
2690 	ret = intel_wait_for_register_fw(uncore, GU_CNTL,
2691 					 DRIVERFLR, 0,
2692 					 flr_timeout_ms, NULL);
2693 	if (ret) {
2694 		drm_err(&i915->drm, "Driver-FLR-teardown wait completion failed! %d\n", ret);
2695 		return;
2696 	}
2697 
2698 	/* Wait for hardware/firmware re-init to complete */
2699 	ret = intel_wait_for_register_fw(uncore, GU_DEBUG,
2700 					 DRIVERFLR_STATUS, DRIVERFLR_STATUS,
2701 					 flr_timeout_ms, NULL);
2702 	if (ret) {
2703 		drm_err(&i915->drm, "Driver-FLR-reinit wait completion failed! %d\n", ret);
2704 		return;
2705 	}
2706 
2707 	/* Clear sticky completion status */
2708 	intel_uncore_write_fw(uncore, GU_DEBUG, DRIVERFLR_STATUS);
2709 }
2710 
2711 /* Called via drm-managed action */
2712 void intel_uncore_fini_mmio(struct drm_device *dev, void *data)
2713 {
2714 	struct intel_uncore *uncore = data;
2715 
2716 	if (intel_uncore_has_forcewake(uncore)) {
2717 		iosf_mbi_punit_acquire();
2718 		iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
2719 			&uncore->pmic_bus_access_nb);
2720 		intel_uncore_forcewake_reset(uncore);
2721 		intel_uncore_fw_domains_fini(uncore);
2722 		iosf_mbi_punit_release();
2723 	}
2724 
2725 	if (intel_uncore_needs_flr_on_fini(uncore))
2726 		driver_initiated_flr(uncore);
2727 }
2728 
2729 /**
2730  * __intel_wait_for_register_fw - wait until register matches expected state
2731  * @uncore: the struct intel_uncore
2732  * @reg: the register to read
2733  * @mask: mask to apply to register value
2734  * @value: expected value
2735  * @fast_timeout_us: fast timeout in microsecond for atomic/tight wait
2736  * @slow_timeout_ms: slow timeout in millisecond
2737  * @out_value: optional placeholder to hold registry value
2738  *
2739  * This routine waits until the target register @reg contains the expected
2740  * @value after applying the @mask, i.e. it waits until ::
2741  *
2742  *     (intel_uncore_read_fw(uncore, reg) & mask) == value
2743  *
2744  * Otherwise, the wait will timeout after @slow_timeout_ms milliseconds.
2745  * For atomic context @slow_timeout_ms must be zero and @fast_timeout_us
2746  * must be not larger than 20,0000 microseconds.
2747  *
2748  * Note that this routine assumes the caller holds forcewake asserted, it is
2749  * not suitable for very long waits. See intel_wait_for_register() if you
2750  * wish to wait without holding forcewake for the duration (i.e. you expect
2751  * the wait to be slow).
2752  *
2753  * Return: 0 if the register matches the desired condition, or -ETIMEDOUT.
2754  */
2755 int __intel_wait_for_register_fw(struct intel_uncore *uncore,
2756 				 i915_reg_t reg,
2757 				 u32 mask,
2758 				 u32 value,
2759 				 unsigned int fast_timeout_us,
2760 				 unsigned int slow_timeout_ms,
2761 				 u32 *out_value)
2762 {
2763 	u32 reg_value = 0;
2764 #define done (((reg_value = intel_uncore_read_fw(uncore, reg)) & mask) == value)
2765 	int ret;
2766 
2767 	/* Catch any overuse of this function */
2768 	might_sleep_if(slow_timeout_ms);
2769 	GEM_BUG_ON(fast_timeout_us > 20000);
2770 	GEM_BUG_ON(!fast_timeout_us && !slow_timeout_ms);
2771 
2772 	ret = -ETIMEDOUT;
2773 	if (fast_timeout_us && fast_timeout_us <= 20000)
2774 		ret = _wait_for_atomic(done, fast_timeout_us, 0);
2775 	if (ret && slow_timeout_ms)
2776 		ret = wait_for(done, slow_timeout_ms);
2777 
2778 	if (out_value)
2779 		*out_value = reg_value;
2780 
2781 	return ret;
2782 #undef done
2783 }
2784 
2785 /**
2786  * __intel_wait_for_register - wait until register matches expected state
2787  * @uncore: the struct intel_uncore
2788  * @reg: the register to read
2789  * @mask: mask to apply to register value
2790  * @value: expected value
2791  * @fast_timeout_us: fast timeout in microsecond for atomic/tight wait
2792  * @slow_timeout_ms: slow timeout in millisecond
2793  * @out_value: optional placeholder to hold registry value
2794  *
2795  * This routine waits until the target register @reg contains the expected
2796  * @value after applying the @mask, i.e. it waits until ::
2797  *
2798  *     (intel_uncore_read(uncore, reg) & mask) == value
2799  *
2800  * Otherwise, the wait will timeout after @timeout_ms milliseconds.
2801  *
2802  * Return: 0 if the register matches the desired condition, or -ETIMEDOUT.
2803  */
2804 int __intel_wait_for_register(struct intel_uncore *uncore,
2805 			      i915_reg_t reg,
2806 			      u32 mask,
2807 			      u32 value,
2808 			      unsigned int fast_timeout_us,
2809 			      unsigned int slow_timeout_ms,
2810 			      u32 *out_value)
2811 {
2812 	unsigned fw =
2813 		intel_uncore_forcewake_for_reg(uncore, reg, FW_REG_READ);
2814 	u32 reg_value;
2815 	int ret;
2816 
2817 	might_sleep_if(slow_timeout_ms);
2818 
2819 	spin_lock_irq(&uncore->lock);
2820 	intel_uncore_forcewake_get__locked(uncore, fw);
2821 
2822 	ret = __intel_wait_for_register_fw(uncore,
2823 					   reg, mask, value,
2824 					   fast_timeout_us, 0, &reg_value);
2825 
2826 	intel_uncore_forcewake_put__locked(uncore, fw);
2827 	spin_unlock_irq(&uncore->lock);
2828 
2829 	if (ret && slow_timeout_ms)
2830 		ret = __wait_for(reg_value = intel_uncore_read_notrace(uncore,
2831 								       reg),
2832 				 (reg_value & mask) == value,
2833 				 slow_timeout_ms * 1000, 10, 1000);
2834 
2835 	/* just trace the final value */
2836 	trace_i915_reg_rw(false, reg, reg_value, sizeof(reg_value), true);
2837 
2838 	if (out_value)
2839 		*out_value = reg_value;
2840 
2841 	return ret;
2842 }
2843 
2844 bool intel_uncore_unclaimed_mmio(struct intel_uncore *uncore)
2845 {
2846 	bool ret;
2847 
2848 	if (!uncore->debug)
2849 		return false;
2850 
2851 	spin_lock_irq(&uncore->debug->lock);
2852 	ret = check_for_unclaimed_mmio(uncore);
2853 	spin_unlock_irq(&uncore->debug->lock);
2854 
2855 	return ret;
2856 }
2857 
2858 bool
2859 intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore *uncore)
2860 {
2861 	bool ret = false;
2862 
2863 	if (drm_WARN_ON(&uncore->i915->drm, !uncore->debug))
2864 		return false;
2865 
2866 	spin_lock_irq(&uncore->debug->lock);
2867 
2868 	if (unlikely(uncore->debug->unclaimed_mmio_check <= 0))
2869 		goto out;
2870 
2871 	if (unlikely(check_for_unclaimed_mmio(uncore))) {
2872 		if (!uncore->i915->params.mmio_debug) {
2873 			drm_dbg(&uncore->i915->drm,
2874 				"Unclaimed register detected, "
2875 				"enabling oneshot unclaimed register reporting. "
2876 				"Please use i915.mmio_debug=N for more information.\n");
2877 			uncore->i915->params.mmio_debug++;
2878 		}
2879 		uncore->debug->unclaimed_mmio_check--;
2880 		ret = true;
2881 	}
2882 
2883 out:
2884 	spin_unlock_irq(&uncore->debug->lock);
2885 
2886 	return ret;
2887 }
2888 
2889 /**
2890  * intel_uncore_forcewake_for_reg - which forcewake domains are needed to access
2891  * 				    a register
2892  * @uncore: pointer to struct intel_uncore
2893  * @reg: register in question
2894  * @op: operation bitmask of FW_REG_READ and/or FW_REG_WRITE
2895  *
2896  * Returns a set of forcewake domains required to be taken with for example
2897  * intel_uncore_forcewake_get for the specified register to be accessible in the
2898  * specified mode (read, write or read/write) with raw mmio accessors.
2899  *
2900  * NOTE: On Gen6 and Gen7 write forcewake domain (FORCEWAKE_RENDER) requires the
2901  * callers to do FIFO management on their own or risk losing writes.
2902  */
2903 enum forcewake_domains
2904 intel_uncore_forcewake_for_reg(struct intel_uncore *uncore,
2905 			       i915_reg_t reg, unsigned int op)
2906 {
2907 	enum forcewake_domains fw_domains = 0;
2908 
2909 	drm_WARN_ON(&uncore->i915->drm, !op);
2910 
2911 	if (!intel_uncore_has_forcewake(uncore))
2912 		return 0;
2913 
2914 	if (op & FW_REG_READ)
2915 		fw_domains = uncore->funcs.read_fw_domains(uncore, reg);
2916 
2917 	if (op & FW_REG_WRITE)
2918 		fw_domains |= uncore->funcs.write_fw_domains(uncore, reg);
2919 
2920 	drm_WARN_ON(&uncore->i915->drm, fw_domains & ~uncore->fw_domains);
2921 
2922 	return fw_domains;
2923 }
2924 
2925 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
2926 #include "selftests/mock_uncore.c"
2927 #include "selftests/intel_uncore.c"
2928 #endif
2929