1 /* SPDX-License-Identifier: MIT */
2 /*
3 * Copyright © 2019 Intel Corporation
4 */
5
6 #include <linux/string_helpers.h>
7
8 #include "i915_drv.h"
9 #include "i915_irq.h"
10 #include "i915_reg.h"
11 #include "intel_backlight_regs.h"
12 #include "intel_cdclk.h"
13 #include "intel_clock_gating.h"
14 #include "intel_combo_phy.h"
15 #include "intel_de.h"
16 #include "intel_display_power.h"
17 #include "intel_display_power_map.h"
18 #include "intel_display_power_well.h"
19 #include "intel_display_types.h"
20 #include "intel_dmc.h"
21 #include "intel_mchbar_regs.h"
22 #include "intel_pch_refclk.h"
23 #include "intel_pcode.h"
24 #include "intel_pmdemand.h"
25 #include "intel_pps_regs.h"
26 #include "intel_snps_phy.h"
27 #include "skl_watermark.h"
28 #include "skl_watermark_regs.h"
29 #include "vlv_sideband.h"
30
31 #define for_each_power_domain_well(__display, __power_well, __domain) \
32 for_each_power_well((__display), __power_well) \
33 for_each_if(test_bit((__domain), (__power_well)->domains.bits))
34
35 #define for_each_power_domain_well_reverse(__display, __power_well, __domain) \
36 for_each_power_well_reverse((__display), __power_well) \
37 for_each_if(test_bit((__domain), (__power_well)->domains.bits))
38
39 static const char *
intel_display_power_domain_str(enum intel_display_power_domain domain)40 intel_display_power_domain_str(enum intel_display_power_domain domain)
41 {
42 switch (domain) {
43 case POWER_DOMAIN_DISPLAY_CORE:
44 return "DISPLAY_CORE";
45 case POWER_DOMAIN_PIPE_A:
46 return "PIPE_A";
47 case POWER_DOMAIN_PIPE_B:
48 return "PIPE_B";
49 case POWER_DOMAIN_PIPE_C:
50 return "PIPE_C";
51 case POWER_DOMAIN_PIPE_D:
52 return "PIPE_D";
53 case POWER_DOMAIN_PIPE_PANEL_FITTER_A:
54 return "PIPE_PANEL_FITTER_A";
55 case POWER_DOMAIN_PIPE_PANEL_FITTER_B:
56 return "PIPE_PANEL_FITTER_B";
57 case POWER_DOMAIN_PIPE_PANEL_FITTER_C:
58 return "PIPE_PANEL_FITTER_C";
59 case POWER_DOMAIN_PIPE_PANEL_FITTER_D:
60 return "PIPE_PANEL_FITTER_D";
61 case POWER_DOMAIN_TRANSCODER_A:
62 return "TRANSCODER_A";
63 case POWER_DOMAIN_TRANSCODER_B:
64 return "TRANSCODER_B";
65 case POWER_DOMAIN_TRANSCODER_C:
66 return "TRANSCODER_C";
67 case POWER_DOMAIN_TRANSCODER_D:
68 return "TRANSCODER_D";
69 case POWER_DOMAIN_TRANSCODER_EDP:
70 return "TRANSCODER_EDP";
71 case POWER_DOMAIN_TRANSCODER_DSI_A:
72 return "TRANSCODER_DSI_A";
73 case POWER_DOMAIN_TRANSCODER_DSI_C:
74 return "TRANSCODER_DSI_C";
75 case POWER_DOMAIN_TRANSCODER_VDSC_PW2:
76 return "TRANSCODER_VDSC_PW2";
77 case POWER_DOMAIN_PORT_DDI_LANES_A:
78 return "PORT_DDI_LANES_A";
79 case POWER_DOMAIN_PORT_DDI_LANES_B:
80 return "PORT_DDI_LANES_B";
81 case POWER_DOMAIN_PORT_DDI_LANES_C:
82 return "PORT_DDI_LANES_C";
83 case POWER_DOMAIN_PORT_DDI_LANES_D:
84 return "PORT_DDI_LANES_D";
85 case POWER_DOMAIN_PORT_DDI_LANES_E:
86 return "PORT_DDI_LANES_E";
87 case POWER_DOMAIN_PORT_DDI_LANES_F:
88 return "PORT_DDI_LANES_F";
89 case POWER_DOMAIN_PORT_DDI_LANES_TC1:
90 return "PORT_DDI_LANES_TC1";
91 case POWER_DOMAIN_PORT_DDI_LANES_TC2:
92 return "PORT_DDI_LANES_TC2";
93 case POWER_DOMAIN_PORT_DDI_LANES_TC3:
94 return "PORT_DDI_LANES_TC3";
95 case POWER_DOMAIN_PORT_DDI_LANES_TC4:
96 return "PORT_DDI_LANES_TC4";
97 case POWER_DOMAIN_PORT_DDI_LANES_TC5:
98 return "PORT_DDI_LANES_TC5";
99 case POWER_DOMAIN_PORT_DDI_LANES_TC6:
100 return "PORT_DDI_LANES_TC6";
101 case POWER_DOMAIN_PORT_DDI_IO_A:
102 return "PORT_DDI_IO_A";
103 case POWER_DOMAIN_PORT_DDI_IO_B:
104 return "PORT_DDI_IO_B";
105 case POWER_DOMAIN_PORT_DDI_IO_C:
106 return "PORT_DDI_IO_C";
107 case POWER_DOMAIN_PORT_DDI_IO_D:
108 return "PORT_DDI_IO_D";
109 case POWER_DOMAIN_PORT_DDI_IO_E:
110 return "PORT_DDI_IO_E";
111 case POWER_DOMAIN_PORT_DDI_IO_F:
112 return "PORT_DDI_IO_F";
113 case POWER_DOMAIN_PORT_DDI_IO_TC1:
114 return "PORT_DDI_IO_TC1";
115 case POWER_DOMAIN_PORT_DDI_IO_TC2:
116 return "PORT_DDI_IO_TC2";
117 case POWER_DOMAIN_PORT_DDI_IO_TC3:
118 return "PORT_DDI_IO_TC3";
119 case POWER_DOMAIN_PORT_DDI_IO_TC4:
120 return "PORT_DDI_IO_TC4";
121 case POWER_DOMAIN_PORT_DDI_IO_TC5:
122 return "PORT_DDI_IO_TC5";
123 case POWER_DOMAIN_PORT_DDI_IO_TC6:
124 return "PORT_DDI_IO_TC6";
125 case POWER_DOMAIN_PORT_DSI:
126 return "PORT_DSI";
127 case POWER_DOMAIN_PORT_CRT:
128 return "PORT_CRT";
129 case POWER_DOMAIN_PORT_OTHER:
130 return "PORT_OTHER";
131 case POWER_DOMAIN_VGA:
132 return "VGA";
133 case POWER_DOMAIN_AUDIO_MMIO:
134 return "AUDIO_MMIO";
135 case POWER_DOMAIN_AUDIO_PLAYBACK:
136 return "AUDIO_PLAYBACK";
137 case POWER_DOMAIN_AUX_IO_A:
138 return "AUX_IO_A";
139 case POWER_DOMAIN_AUX_IO_B:
140 return "AUX_IO_B";
141 case POWER_DOMAIN_AUX_IO_C:
142 return "AUX_IO_C";
143 case POWER_DOMAIN_AUX_IO_D:
144 return "AUX_IO_D";
145 case POWER_DOMAIN_AUX_IO_E:
146 return "AUX_IO_E";
147 case POWER_DOMAIN_AUX_IO_F:
148 return "AUX_IO_F";
149 case POWER_DOMAIN_AUX_A:
150 return "AUX_A";
151 case POWER_DOMAIN_AUX_B:
152 return "AUX_B";
153 case POWER_DOMAIN_AUX_C:
154 return "AUX_C";
155 case POWER_DOMAIN_AUX_D:
156 return "AUX_D";
157 case POWER_DOMAIN_AUX_E:
158 return "AUX_E";
159 case POWER_DOMAIN_AUX_F:
160 return "AUX_F";
161 case POWER_DOMAIN_AUX_USBC1:
162 return "AUX_USBC1";
163 case POWER_DOMAIN_AUX_USBC2:
164 return "AUX_USBC2";
165 case POWER_DOMAIN_AUX_USBC3:
166 return "AUX_USBC3";
167 case POWER_DOMAIN_AUX_USBC4:
168 return "AUX_USBC4";
169 case POWER_DOMAIN_AUX_USBC5:
170 return "AUX_USBC5";
171 case POWER_DOMAIN_AUX_USBC6:
172 return "AUX_USBC6";
173 case POWER_DOMAIN_AUX_TBT1:
174 return "AUX_TBT1";
175 case POWER_DOMAIN_AUX_TBT2:
176 return "AUX_TBT2";
177 case POWER_DOMAIN_AUX_TBT3:
178 return "AUX_TBT3";
179 case POWER_DOMAIN_AUX_TBT4:
180 return "AUX_TBT4";
181 case POWER_DOMAIN_AUX_TBT5:
182 return "AUX_TBT5";
183 case POWER_DOMAIN_AUX_TBT6:
184 return "AUX_TBT6";
185 case POWER_DOMAIN_GMBUS:
186 return "GMBUS";
187 case POWER_DOMAIN_INIT:
188 return "INIT";
189 case POWER_DOMAIN_GT_IRQ:
190 return "GT_IRQ";
191 case POWER_DOMAIN_DC_OFF:
192 return "DC_OFF";
193 case POWER_DOMAIN_TC_COLD_OFF:
194 return "TC_COLD_OFF";
195 default:
196 MISSING_CASE(domain);
197 return "?";
198 }
199 }
200
__intel_display_power_is_enabled(struct intel_display * display,enum intel_display_power_domain domain)201 static bool __intel_display_power_is_enabled(struct intel_display *display,
202 enum intel_display_power_domain domain)
203 {
204 struct i915_power_well *power_well;
205 bool is_enabled;
206
207 if (pm_runtime_suspended(display->drm->dev))
208 return false;
209
210 is_enabled = true;
211
212 for_each_power_domain_well_reverse(display, power_well, domain) {
213 if (intel_power_well_is_always_on(power_well))
214 continue;
215
216 if (!intel_power_well_is_enabled_cached(power_well)) {
217 is_enabled = false;
218 break;
219 }
220 }
221
222 return is_enabled;
223 }
224
225 /**
226 * intel_display_power_is_enabled - check for a power domain
227 * @display: display device instance
228 * @domain: power domain to check
229 *
230 * This function can be used to check the hw power domain state. It is mostly
231 * used in hardware state readout functions. Everywhere else code should rely
232 * upon explicit power domain reference counting to ensure that the hardware
233 * block is powered up before accessing it.
234 *
235 * Callers must hold the relevant modesetting locks to ensure that concurrent
236 * threads can't disable the power well while the caller tries to read a few
237 * registers.
238 *
239 * Returns:
240 * True when the power domain is enabled, false otherwise.
241 */
intel_display_power_is_enabled(struct intel_display * display,enum intel_display_power_domain domain)242 bool intel_display_power_is_enabled(struct intel_display *display,
243 enum intel_display_power_domain domain)
244 {
245 struct i915_power_domains *power_domains = &display->power.domains;
246 bool ret;
247
248 mutex_lock(&power_domains->lock);
249 ret = __intel_display_power_is_enabled(display, domain);
250 mutex_unlock(&power_domains->lock);
251
252 return ret;
253 }
254
255 static u32
sanitize_target_dc_state(struct intel_display * display,u32 target_dc_state)256 sanitize_target_dc_state(struct intel_display *display,
257 u32 target_dc_state)
258 {
259 struct i915_power_domains *power_domains = &display->power.domains;
260 static const u32 states[] = {
261 DC_STATE_EN_UPTO_DC6,
262 DC_STATE_EN_UPTO_DC5,
263 DC_STATE_EN_DC3CO,
264 DC_STATE_DISABLE,
265 };
266 int i;
267
268 for (i = 0; i < ARRAY_SIZE(states) - 1; i++) {
269 if (target_dc_state != states[i])
270 continue;
271
272 if (power_domains->allowed_dc_mask & target_dc_state)
273 break;
274
275 target_dc_state = states[i + 1];
276 }
277
278 return target_dc_state;
279 }
280
281 /**
282 * intel_display_power_set_target_dc_state - Set target dc state.
283 * @display: display device
284 * @state: state which needs to be set as target_dc_state.
285 *
286 * This function set the "DC off" power well target_dc_state,
287 * based upon this target_dc_stste, "DC off" power well will
288 * enable desired DC state.
289 */
intel_display_power_set_target_dc_state(struct intel_display * display,u32 state)290 void intel_display_power_set_target_dc_state(struct intel_display *display,
291 u32 state)
292 {
293 struct i915_power_well *power_well;
294 bool dc_off_enabled;
295 struct i915_power_domains *power_domains = &display->power.domains;
296
297 mutex_lock(&power_domains->lock);
298 power_well = lookup_power_well(display, SKL_DISP_DC_OFF);
299
300 if (drm_WARN_ON(display->drm, !power_well))
301 goto unlock;
302
303 state = sanitize_target_dc_state(display, state);
304
305 if (state == power_domains->target_dc_state)
306 goto unlock;
307
308 dc_off_enabled = intel_power_well_is_enabled(display, power_well);
309 /*
310 * If DC off power well is disabled, need to enable and disable the
311 * DC off power well to effect target DC state.
312 */
313 if (!dc_off_enabled)
314 intel_power_well_enable(display, power_well);
315
316 power_domains->target_dc_state = state;
317
318 if (!dc_off_enabled)
319 intel_power_well_disable(display, power_well);
320
321 unlock:
322 mutex_unlock(&power_domains->lock);
323 }
324
__async_put_domains_mask(struct i915_power_domains * power_domains,struct intel_power_domain_mask * mask)325 static void __async_put_domains_mask(struct i915_power_domains *power_domains,
326 struct intel_power_domain_mask *mask)
327 {
328 bitmap_or(mask->bits,
329 power_domains->async_put_domains[0].bits,
330 power_domains->async_put_domains[1].bits,
331 POWER_DOMAIN_NUM);
332 }
333
334 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
335
336 static bool
assert_async_put_domain_masks_disjoint(struct i915_power_domains * power_domains)337 assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
338 {
339 struct intel_display *display = container_of(power_domains,
340 struct intel_display,
341 power.domains);
342
343 return !drm_WARN_ON(display->drm,
344 bitmap_intersects(power_domains->async_put_domains[0].bits,
345 power_domains->async_put_domains[1].bits,
346 POWER_DOMAIN_NUM));
347 }
348
349 static bool
__async_put_domains_state_ok(struct i915_power_domains * power_domains)350 __async_put_domains_state_ok(struct i915_power_domains *power_domains)
351 {
352 struct intel_display *display = container_of(power_domains,
353 struct intel_display,
354 power.domains);
355 struct intel_power_domain_mask async_put_mask;
356 enum intel_display_power_domain domain;
357 bool err = false;
358
359 err |= !assert_async_put_domain_masks_disjoint(power_domains);
360 __async_put_domains_mask(power_domains, &async_put_mask);
361 err |= drm_WARN_ON(display->drm,
362 !!power_domains->async_put_wakeref !=
363 !bitmap_empty(async_put_mask.bits, POWER_DOMAIN_NUM));
364
365 for_each_power_domain(domain, &async_put_mask)
366 err |= drm_WARN_ON(display->drm,
367 power_domains->domain_use_count[domain] != 1);
368
369 return !err;
370 }
371
print_power_domains(struct i915_power_domains * power_domains,const char * prefix,struct intel_power_domain_mask * mask)372 static void print_power_domains(struct i915_power_domains *power_domains,
373 const char *prefix, struct intel_power_domain_mask *mask)
374 {
375 struct intel_display *display = container_of(power_domains,
376 struct intel_display,
377 power.domains);
378 enum intel_display_power_domain domain;
379
380 drm_dbg_kms(display->drm, "%s (%d):\n", prefix, bitmap_weight(mask->bits, POWER_DOMAIN_NUM));
381 for_each_power_domain(domain, mask)
382 drm_dbg_kms(display->drm, "%s use_count %d\n",
383 intel_display_power_domain_str(domain),
384 power_domains->domain_use_count[domain]);
385 }
386
387 static void
print_async_put_domains_state(struct i915_power_domains * power_domains)388 print_async_put_domains_state(struct i915_power_domains *power_domains)
389 {
390 struct intel_display *display = container_of(power_domains,
391 struct intel_display,
392 power.domains);
393
394 drm_dbg_kms(display->drm, "async_put_wakeref: %s\n",
395 str_yes_no(power_domains->async_put_wakeref));
396
397 print_power_domains(power_domains, "async_put_domains[0]",
398 &power_domains->async_put_domains[0]);
399 print_power_domains(power_domains, "async_put_domains[1]",
400 &power_domains->async_put_domains[1]);
401 }
402
403 static void
verify_async_put_domains_state(struct i915_power_domains * power_domains)404 verify_async_put_domains_state(struct i915_power_domains *power_domains)
405 {
406 if (!__async_put_domains_state_ok(power_domains))
407 print_async_put_domains_state(power_domains);
408 }
409
410 #else
411
412 static void
assert_async_put_domain_masks_disjoint(struct i915_power_domains * power_domains)413 assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
414 {
415 }
416
417 static void
verify_async_put_domains_state(struct i915_power_domains * power_domains)418 verify_async_put_domains_state(struct i915_power_domains *power_domains)
419 {
420 }
421
422 #endif /* CONFIG_DRM_I915_DEBUG_RUNTIME_PM */
423
async_put_domains_mask(struct i915_power_domains * power_domains,struct intel_power_domain_mask * mask)424 static void async_put_domains_mask(struct i915_power_domains *power_domains,
425 struct intel_power_domain_mask *mask)
426
427 {
428 assert_async_put_domain_masks_disjoint(power_domains);
429
430 __async_put_domains_mask(power_domains, mask);
431 }
432
433 static void
async_put_domains_clear_domain(struct i915_power_domains * power_domains,enum intel_display_power_domain domain)434 async_put_domains_clear_domain(struct i915_power_domains *power_domains,
435 enum intel_display_power_domain domain)
436 {
437 assert_async_put_domain_masks_disjoint(power_domains);
438
439 clear_bit(domain, power_domains->async_put_domains[0].bits);
440 clear_bit(domain, power_domains->async_put_domains[1].bits);
441 }
442
443 static void
cancel_async_put_work(struct i915_power_domains * power_domains,bool sync)444 cancel_async_put_work(struct i915_power_domains *power_domains, bool sync)
445 {
446 if (sync)
447 cancel_delayed_work_sync(&power_domains->async_put_work);
448 else
449 cancel_delayed_work(&power_domains->async_put_work);
450
451 power_domains->async_put_next_delay = 0;
452 }
453
454 static bool
intel_display_power_grab_async_put_ref(struct intel_display * display,enum intel_display_power_domain domain)455 intel_display_power_grab_async_put_ref(struct intel_display *display,
456 enum intel_display_power_domain domain)
457 {
458 struct drm_i915_private *dev_priv = to_i915(display->drm);
459 struct i915_power_domains *power_domains = &display->power.domains;
460 struct intel_power_domain_mask async_put_mask;
461 bool ret = false;
462
463 async_put_domains_mask(power_domains, &async_put_mask);
464 if (!test_bit(domain, async_put_mask.bits))
465 goto out_verify;
466
467 async_put_domains_clear_domain(power_domains, domain);
468
469 ret = true;
470
471 async_put_domains_mask(power_domains, &async_put_mask);
472 if (!bitmap_empty(async_put_mask.bits, POWER_DOMAIN_NUM))
473 goto out_verify;
474
475 cancel_async_put_work(power_domains, false);
476 intel_runtime_pm_put_raw(&dev_priv->runtime_pm,
477 fetch_and_zero(&power_domains->async_put_wakeref));
478 out_verify:
479 verify_async_put_domains_state(power_domains);
480
481 return ret;
482 }
483
484 static void
__intel_display_power_get_domain(struct intel_display * display,enum intel_display_power_domain domain)485 __intel_display_power_get_domain(struct intel_display *display,
486 enum intel_display_power_domain domain)
487 {
488 struct i915_power_domains *power_domains = &display->power.domains;
489 struct i915_power_well *power_well;
490
491 if (intel_display_power_grab_async_put_ref(display, domain))
492 return;
493
494 for_each_power_domain_well(display, power_well, domain)
495 intel_power_well_get(display, power_well);
496
497 power_domains->domain_use_count[domain]++;
498 }
499
500 /**
501 * intel_display_power_get - grab a power domain reference
502 * @display: display device instance
503 * @domain: power domain to reference
504 *
505 * This function grabs a power domain reference for @domain and ensures that the
506 * power domain and all its parents are powered up. Therefore users should only
507 * grab a reference to the innermost power domain they need.
508 *
509 * Any power domain reference obtained by this function must have a symmetric
510 * call to intel_display_power_put() to release the reference again.
511 */
intel_display_power_get(struct intel_display * display,enum intel_display_power_domain domain)512 intel_wakeref_t intel_display_power_get(struct intel_display *display,
513 enum intel_display_power_domain domain)
514 {
515 struct drm_i915_private *dev_priv = to_i915(display->drm);
516 struct i915_power_domains *power_domains = &display->power.domains;
517 intel_wakeref_t wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
518
519 mutex_lock(&power_domains->lock);
520 __intel_display_power_get_domain(display, domain);
521 mutex_unlock(&power_domains->lock);
522
523 return wakeref;
524 }
525
526 /**
527 * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain
528 * @display: display device instance
529 * @domain: power domain to reference
530 *
531 * This function grabs a power domain reference for @domain and ensures that the
532 * power domain and all its parents are powered up. Therefore users should only
533 * grab a reference to the innermost power domain they need.
534 *
535 * Any power domain reference obtained by this function must have a symmetric
536 * call to intel_display_power_put() to release the reference again.
537 */
538 intel_wakeref_t
intel_display_power_get_if_enabled(struct intel_display * display,enum intel_display_power_domain domain)539 intel_display_power_get_if_enabled(struct intel_display *display,
540 enum intel_display_power_domain domain)
541 {
542 struct drm_i915_private *dev_priv = to_i915(display->drm);
543 struct i915_power_domains *power_domains = &display->power.domains;
544 intel_wakeref_t wakeref;
545 bool is_enabled;
546
547 wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm);
548 if (!wakeref)
549 return NULL;
550
551 mutex_lock(&power_domains->lock);
552
553 if (__intel_display_power_is_enabled(display, domain)) {
554 __intel_display_power_get_domain(display, domain);
555 is_enabled = true;
556 } else {
557 is_enabled = false;
558 }
559
560 mutex_unlock(&power_domains->lock);
561
562 if (!is_enabled) {
563 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
564 wakeref = NULL;
565 }
566
567 return wakeref;
568 }
569
570 static void
__intel_display_power_put_domain(struct intel_display * display,enum intel_display_power_domain domain)571 __intel_display_power_put_domain(struct intel_display *display,
572 enum intel_display_power_domain domain)
573 {
574 struct i915_power_domains *power_domains = &display->power.domains;
575 struct i915_power_well *power_well;
576 const char *name = intel_display_power_domain_str(domain);
577 struct intel_power_domain_mask async_put_mask;
578
579 drm_WARN(display->drm, !power_domains->domain_use_count[domain],
580 "Use count on domain %s is already zero\n",
581 name);
582 async_put_domains_mask(power_domains, &async_put_mask);
583 drm_WARN(display->drm,
584 test_bit(domain, async_put_mask.bits),
585 "Async disabling of domain %s is pending\n",
586 name);
587
588 power_domains->domain_use_count[domain]--;
589
590 for_each_power_domain_well_reverse(display, power_well, domain)
591 intel_power_well_put(display, power_well);
592 }
593
__intel_display_power_put(struct intel_display * display,enum intel_display_power_domain domain)594 static void __intel_display_power_put(struct intel_display *display,
595 enum intel_display_power_domain domain)
596 {
597 struct i915_power_domains *power_domains = &display->power.domains;
598
599 mutex_lock(&power_domains->lock);
600 __intel_display_power_put_domain(display, domain);
601 mutex_unlock(&power_domains->lock);
602 }
603
604 static void
queue_async_put_domains_work(struct i915_power_domains * power_domains,intel_wakeref_t wakeref,int delay_ms)605 queue_async_put_domains_work(struct i915_power_domains *power_domains,
606 intel_wakeref_t wakeref,
607 int delay_ms)
608 {
609 struct intel_display *display = container_of(power_domains,
610 struct intel_display,
611 power.domains);
612 drm_WARN_ON(display->drm, power_domains->async_put_wakeref);
613 power_domains->async_put_wakeref = wakeref;
614 drm_WARN_ON(display->drm, !queue_delayed_work(system_unbound_wq,
615 &power_domains->async_put_work,
616 msecs_to_jiffies(delay_ms)));
617 }
618
619 static void
release_async_put_domains(struct i915_power_domains * power_domains,struct intel_power_domain_mask * mask)620 release_async_put_domains(struct i915_power_domains *power_domains,
621 struct intel_power_domain_mask *mask)
622 {
623 struct intel_display *display = container_of(power_domains,
624 struct intel_display,
625 power.domains);
626 struct drm_i915_private *dev_priv = to_i915(display->drm);
627 struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
628 enum intel_display_power_domain domain;
629 intel_wakeref_t wakeref;
630
631 wakeref = intel_runtime_pm_get_noresume(rpm);
632
633 for_each_power_domain(domain, mask) {
634 /* Clear before put, so put's sanity check is happy. */
635 async_put_domains_clear_domain(power_domains, domain);
636 __intel_display_power_put_domain(display, domain);
637 }
638
639 intel_runtime_pm_put(rpm, wakeref);
640 }
641
642 static void
intel_display_power_put_async_work(struct work_struct * work)643 intel_display_power_put_async_work(struct work_struct *work)
644 {
645 struct intel_display *display = container_of(work, struct intel_display,
646 power.domains.async_put_work.work);
647 struct drm_i915_private *dev_priv = to_i915(display->drm);
648 struct i915_power_domains *power_domains = &display->power.domains;
649 struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
650 intel_wakeref_t new_work_wakeref = intel_runtime_pm_get_raw(rpm);
651 intel_wakeref_t old_work_wakeref = NULL;
652
653 mutex_lock(&power_domains->lock);
654
655 /*
656 * Bail out if all the domain refs pending to be released were grabbed
657 * by subsequent gets or a flush_work.
658 */
659 old_work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref);
660 if (!old_work_wakeref)
661 goto out_verify;
662
663 release_async_put_domains(power_domains,
664 &power_domains->async_put_domains[0]);
665
666 /*
667 * Cancel the work that got queued after this one got dequeued,
668 * since here we released the corresponding async-put reference.
669 */
670 cancel_async_put_work(power_domains, false);
671
672 /* Requeue the work if more domains were async put meanwhile. */
673 if (!bitmap_empty(power_domains->async_put_domains[1].bits, POWER_DOMAIN_NUM)) {
674 bitmap_copy(power_domains->async_put_domains[0].bits,
675 power_domains->async_put_domains[1].bits,
676 POWER_DOMAIN_NUM);
677 bitmap_zero(power_domains->async_put_domains[1].bits,
678 POWER_DOMAIN_NUM);
679 queue_async_put_domains_work(power_domains,
680 fetch_and_zero(&new_work_wakeref),
681 power_domains->async_put_next_delay);
682 power_domains->async_put_next_delay = 0;
683 }
684
685 out_verify:
686 verify_async_put_domains_state(power_domains);
687
688 mutex_unlock(&power_domains->lock);
689
690 if (old_work_wakeref)
691 intel_runtime_pm_put_raw(rpm, old_work_wakeref);
692 if (new_work_wakeref)
693 intel_runtime_pm_put_raw(rpm, new_work_wakeref);
694 }
695
696 /**
697 * __intel_display_power_put_async - release a power domain reference asynchronously
698 * @display: display device instance
699 * @domain: power domain to reference
700 * @wakeref: wakeref acquired for the reference that is being released
701 * @delay_ms: delay of powering down the power domain
702 *
703 * This function drops the power domain reference obtained by
704 * intel_display_power_get*() and schedules a work to power down the
705 * corresponding hardware block if this is the last reference.
706 * The power down is delayed by @delay_ms if this is >= 0, or by a default
707 * 100 ms otherwise.
708 */
__intel_display_power_put_async(struct intel_display * display,enum intel_display_power_domain domain,intel_wakeref_t wakeref,int delay_ms)709 void __intel_display_power_put_async(struct intel_display *display,
710 enum intel_display_power_domain domain,
711 intel_wakeref_t wakeref,
712 int delay_ms)
713 {
714 struct drm_i915_private *i915 = to_i915(display->drm);
715 struct i915_power_domains *power_domains = &display->power.domains;
716 struct intel_runtime_pm *rpm = &i915->runtime_pm;
717 intel_wakeref_t work_wakeref = intel_runtime_pm_get_raw(rpm);
718
719 delay_ms = delay_ms >= 0 ? delay_ms : 100;
720
721 mutex_lock(&power_domains->lock);
722
723 if (power_domains->domain_use_count[domain] > 1) {
724 __intel_display_power_put_domain(display, domain);
725
726 goto out_verify;
727 }
728
729 drm_WARN_ON(display->drm, power_domains->domain_use_count[domain] != 1);
730
731 /* Let a pending work requeue itself or queue a new one. */
732 if (power_domains->async_put_wakeref) {
733 set_bit(domain, power_domains->async_put_domains[1].bits);
734 power_domains->async_put_next_delay = max(power_domains->async_put_next_delay,
735 delay_ms);
736 } else {
737 set_bit(domain, power_domains->async_put_domains[0].bits);
738 queue_async_put_domains_work(power_domains,
739 fetch_and_zero(&work_wakeref),
740 delay_ms);
741 }
742
743 out_verify:
744 verify_async_put_domains_state(power_domains);
745
746 mutex_unlock(&power_domains->lock);
747
748 if (work_wakeref)
749 intel_runtime_pm_put_raw(rpm, work_wakeref);
750
751 intel_runtime_pm_put(rpm, wakeref);
752 }
753
754 /**
755 * intel_display_power_flush_work - flushes the async display power disabling work
756 * @display: display device instance
757 *
758 * Flushes any pending work that was scheduled by a preceding
759 * intel_display_power_put_async() call, completing the disabling of the
760 * corresponding power domains.
761 *
762 * Note that the work handler function may still be running after this
763 * function returns; to ensure that the work handler isn't running use
764 * intel_display_power_flush_work_sync() instead.
765 */
intel_display_power_flush_work(struct intel_display * display)766 void intel_display_power_flush_work(struct intel_display *display)
767 {
768 struct drm_i915_private *i915 = to_i915(display->drm);
769 struct i915_power_domains *power_domains = &display->power.domains;
770 struct intel_power_domain_mask async_put_mask;
771 intel_wakeref_t work_wakeref;
772
773 mutex_lock(&power_domains->lock);
774
775 work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref);
776 if (!work_wakeref)
777 goto out_verify;
778
779 async_put_domains_mask(power_domains, &async_put_mask);
780 release_async_put_domains(power_domains, &async_put_mask);
781 cancel_async_put_work(power_domains, false);
782
783 out_verify:
784 verify_async_put_domains_state(power_domains);
785
786 mutex_unlock(&power_domains->lock);
787
788 if (work_wakeref)
789 intel_runtime_pm_put_raw(&i915->runtime_pm, work_wakeref);
790 }
791
792 /**
793 * intel_display_power_flush_work_sync - flushes and syncs the async display power disabling work
794 * @display: display device instance
795 *
796 * Like intel_display_power_flush_work(), but also ensure that the work
797 * handler function is not running any more when this function returns.
798 */
799 static void
intel_display_power_flush_work_sync(struct intel_display * display)800 intel_display_power_flush_work_sync(struct intel_display *display)
801 {
802 struct i915_power_domains *power_domains = &display->power.domains;
803
804 intel_display_power_flush_work(display);
805 cancel_async_put_work(power_domains, true);
806
807 verify_async_put_domains_state(power_domains);
808
809 drm_WARN_ON(display->drm, power_domains->async_put_wakeref);
810 }
811
812 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
813 /**
814 * intel_display_power_put - release a power domain reference
815 * @display: display device instance
816 * @domain: power domain to reference
817 * @wakeref: wakeref acquired for the reference that is being released
818 *
819 * This function drops the power domain reference obtained by
820 * intel_display_power_get() and might power down the corresponding hardware
821 * block right away if this is the last reference.
822 */
intel_display_power_put(struct intel_display * display,enum intel_display_power_domain domain,intel_wakeref_t wakeref)823 void intel_display_power_put(struct intel_display *display,
824 enum intel_display_power_domain domain,
825 intel_wakeref_t wakeref)
826 {
827 struct drm_i915_private *dev_priv = to_i915(display->drm);
828
829 __intel_display_power_put(display, domain);
830 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
831 }
832 #else
833 /**
834 * intel_display_power_put_unchecked - release an unchecked power domain reference
835 * @display: display device instance
836 * @domain: power domain to reference
837 *
838 * This function drops the power domain reference obtained by
839 * intel_display_power_get() and might power down the corresponding hardware
840 * block right away if this is the last reference.
841 *
842 * This function is only for the power domain code's internal use to suppress wakeref
843 * tracking when the corresponding debug kconfig option is disabled, should not
844 * be used otherwise.
845 */
intel_display_power_put_unchecked(struct intel_display * display,enum intel_display_power_domain domain)846 void intel_display_power_put_unchecked(struct intel_display *display,
847 enum intel_display_power_domain domain)
848 {
849 struct drm_i915_private *dev_priv = to_i915(display->drm);
850
851 __intel_display_power_put(display, domain);
852 intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
853 }
854 #endif
855
856 void
intel_display_power_get_in_set(struct intel_display * display,struct intel_display_power_domain_set * power_domain_set,enum intel_display_power_domain domain)857 intel_display_power_get_in_set(struct intel_display *display,
858 struct intel_display_power_domain_set *power_domain_set,
859 enum intel_display_power_domain domain)
860 {
861 intel_wakeref_t __maybe_unused wf;
862
863 drm_WARN_ON(display->drm, test_bit(domain, power_domain_set->mask.bits));
864
865 wf = intel_display_power_get(display, domain);
866 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
867 power_domain_set->wakerefs[domain] = wf;
868 #endif
869 set_bit(domain, power_domain_set->mask.bits);
870 }
871
872 bool
intel_display_power_get_in_set_if_enabled(struct intel_display * display,struct intel_display_power_domain_set * power_domain_set,enum intel_display_power_domain domain)873 intel_display_power_get_in_set_if_enabled(struct intel_display *display,
874 struct intel_display_power_domain_set *power_domain_set,
875 enum intel_display_power_domain domain)
876 {
877 intel_wakeref_t wf;
878
879 drm_WARN_ON(display->drm, test_bit(domain, power_domain_set->mask.bits));
880
881 wf = intel_display_power_get_if_enabled(display, domain);
882 if (!wf)
883 return false;
884
885 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
886 power_domain_set->wakerefs[domain] = wf;
887 #endif
888 set_bit(domain, power_domain_set->mask.bits);
889
890 return true;
891 }
892
893 void
intel_display_power_put_mask_in_set(struct intel_display * display,struct intel_display_power_domain_set * power_domain_set,struct intel_power_domain_mask * mask)894 intel_display_power_put_mask_in_set(struct intel_display *display,
895 struct intel_display_power_domain_set *power_domain_set,
896 struct intel_power_domain_mask *mask)
897 {
898 enum intel_display_power_domain domain;
899
900 drm_WARN_ON(display->drm,
901 !bitmap_subset(mask->bits, power_domain_set->mask.bits, POWER_DOMAIN_NUM));
902
903 for_each_power_domain(domain, mask) {
904 intel_wakeref_t __maybe_unused wf = INTEL_WAKEREF_DEF;
905
906 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
907 wf = fetch_and_zero(&power_domain_set->wakerefs[domain]);
908 #endif
909 intel_display_power_put(display, domain, wf);
910 clear_bit(domain, power_domain_set->mask.bits);
911 }
912 }
913
914 static int
sanitize_disable_power_well_option(int disable_power_well)915 sanitize_disable_power_well_option(int disable_power_well)
916 {
917 if (disable_power_well >= 0)
918 return !!disable_power_well;
919
920 return 1;
921 }
922
get_allowed_dc_mask(struct intel_display * display,int enable_dc)923 static u32 get_allowed_dc_mask(struct intel_display *display, int enable_dc)
924 {
925 u32 mask;
926 int requested_dc;
927 int max_dc;
928
929 if (!HAS_DISPLAY(display))
930 return 0;
931
932 if (DISPLAY_VER(display) >= 20)
933 max_dc = 2;
934 else if (display->platform.dg2)
935 max_dc = 1;
936 else if (display->platform.dg1)
937 max_dc = 3;
938 else if (DISPLAY_VER(display) >= 12)
939 max_dc = 4;
940 else if (display->platform.geminilake || display->platform.broxton)
941 max_dc = 1;
942 else if (DISPLAY_VER(display) >= 9)
943 max_dc = 2;
944 else
945 max_dc = 0;
946
947 /*
948 * DC9 has a separate HW flow from the rest of the DC states,
949 * not depending on the DMC firmware. It's needed by system
950 * suspend/resume, so allow it unconditionally.
951 */
952 mask = display->platform.geminilake || display->platform.broxton ||
953 DISPLAY_VER(display) >= 11 ? DC_STATE_EN_DC9 : 0;
954
955 if (!display->params.disable_power_well)
956 max_dc = 0;
957
958 if (enable_dc >= 0 && enable_dc <= max_dc) {
959 requested_dc = enable_dc;
960 } else if (enable_dc == -1) {
961 requested_dc = max_dc;
962 } else if (enable_dc > max_dc && enable_dc <= 4) {
963 drm_dbg_kms(display->drm,
964 "Adjusting requested max DC state (%d->%d)\n",
965 enable_dc, max_dc);
966 requested_dc = max_dc;
967 } else {
968 drm_err(display->drm,
969 "Unexpected value for enable_dc (%d)\n", enable_dc);
970 requested_dc = max_dc;
971 }
972
973 switch (requested_dc) {
974 case 4:
975 mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6;
976 break;
977 case 3:
978 mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC5;
979 break;
980 case 2:
981 mask |= DC_STATE_EN_UPTO_DC6;
982 break;
983 case 1:
984 mask |= DC_STATE_EN_UPTO_DC5;
985 break;
986 }
987
988 drm_dbg_kms(display->drm, "Allowed DC state mask %02x\n", mask);
989
990 return mask;
991 }
992
993 /**
994 * intel_power_domains_init - initializes the power domain structures
995 * @display: display device instance
996 *
997 * Initializes the power domain structures for @display depending upon the
998 * supported platform.
999 */
intel_power_domains_init(struct intel_display * display)1000 int intel_power_domains_init(struct intel_display *display)
1001 {
1002 struct i915_power_domains *power_domains = &display->power.domains;
1003
1004 display->params.disable_power_well =
1005 sanitize_disable_power_well_option(display->params.disable_power_well);
1006 power_domains->allowed_dc_mask =
1007 get_allowed_dc_mask(display, display->params.enable_dc);
1008
1009 power_domains->target_dc_state =
1010 sanitize_target_dc_state(display, DC_STATE_EN_UPTO_DC6);
1011
1012 mutex_init(&power_domains->lock);
1013
1014 INIT_DELAYED_WORK(&power_domains->async_put_work,
1015 intel_display_power_put_async_work);
1016
1017 return intel_display_power_map_init(power_domains);
1018 }
1019
1020 /**
1021 * intel_power_domains_cleanup - clean up power domains resources
1022 * @display: display device instance
1023 *
1024 * Release any resources acquired by intel_power_domains_init()
1025 */
intel_power_domains_cleanup(struct intel_display * display)1026 void intel_power_domains_cleanup(struct intel_display *display)
1027 {
1028 intel_display_power_map_cleanup(&display->power.domains);
1029 }
1030
intel_power_domains_sync_hw(struct intel_display * display)1031 static void intel_power_domains_sync_hw(struct intel_display *display)
1032 {
1033 struct i915_power_domains *power_domains = &display->power.domains;
1034 struct i915_power_well *power_well;
1035
1036 mutex_lock(&power_domains->lock);
1037 for_each_power_well(display, power_well)
1038 intel_power_well_sync_hw(display, power_well);
1039 mutex_unlock(&power_domains->lock);
1040 }
1041
gen9_dbuf_slice_set(struct intel_display * display,enum dbuf_slice slice,bool enable)1042 static void gen9_dbuf_slice_set(struct intel_display *display,
1043 enum dbuf_slice slice, bool enable)
1044 {
1045 i915_reg_t reg = DBUF_CTL_S(slice);
1046 bool state;
1047
1048 intel_de_rmw(display, reg, DBUF_POWER_REQUEST,
1049 enable ? DBUF_POWER_REQUEST : 0);
1050 intel_de_posting_read(display, reg);
1051 udelay(10);
1052
1053 state = intel_de_read(display, reg) & DBUF_POWER_STATE;
1054 drm_WARN(display->drm, enable != state,
1055 "DBuf slice %d power %s timeout!\n",
1056 slice, str_enable_disable(enable));
1057 }
1058
gen9_dbuf_slices_update(struct intel_display * display,u8 req_slices)1059 void gen9_dbuf_slices_update(struct intel_display *display,
1060 u8 req_slices)
1061 {
1062 struct i915_power_domains *power_domains = &display->power.domains;
1063 u8 slice_mask = DISPLAY_INFO(display)->dbuf.slice_mask;
1064 enum dbuf_slice slice;
1065
1066 drm_WARN(display->drm, req_slices & ~slice_mask,
1067 "Invalid set of dbuf slices (0x%x) requested (total dbuf slices 0x%x)\n",
1068 req_slices, slice_mask);
1069
1070 drm_dbg_kms(display->drm, "Updating dbuf slices to 0x%x\n",
1071 req_slices);
1072
1073 /*
1074 * Might be running this in parallel to gen9_dc_off_power_well_enable
1075 * being called from intel_dp_detect for instance,
1076 * which causes assertion triggered by race condition,
1077 * as gen9_assert_dbuf_enabled might preempt this when registers
1078 * were already updated, while dev_priv was not.
1079 */
1080 mutex_lock(&power_domains->lock);
1081
1082 for_each_dbuf_slice(display, slice)
1083 gen9_dbuf_slice_set(display, slice, req_slices & BIT(slice));
1084
1085 display->dbuf.enabled_slices = req_slices;
1086
1087 mutex_unlock(&power_domains->lock);
1088 }
1089
gen9_dbuf_enable(struct intel_display * display)1090 static void gen9_dbuf_enable(struct intel_display *display)
1091 {
1092 u8 slices_mask;
1093
1094 display->dbuf.enabled_slices = intel_enabled_dbuf_slices_mask(display);
1095
1096 slices_mask = BIT(DBUF_S1) | display->dbuf.enabled_slices;
1097
1098 if (DISPLAY_VER(display) >= 14)
1099 intel_pmdemand_program_dbuf(display, slices_mask);
1100
1101 /*
1102 * Just power up at least 1 slice, we will
1103 * figure out later which slices we have and what we need.
1104 */
1105 gen9_dbuf_slices_update(display, slices_mask);
1106 }
1107
gen9_dbuf_disable(struct intel_display * display)1108 static void gen9_dbuf_disable(struct intel_display *display)
1109 {
1110 gen9_dbuf_slices_update(display, 0);
1111
1112 if (DISPLAY_VER(display) >= 14)
1113 intel_pmdemand_program_dbuf(display, 0);
1114 }
1115
gen12_dbuf_slices_config(struct intel_display * display)1116 static void gen12_dbuf_slices_config(struct intel_display *display)
1117 {
1118 enum dbuf_slice slice;
1119
1120 for_each_dbuf_slice(display, slice)
1121 intel_de_rmw(display, DBUF_CTL_S(slice),
1122 DBUF_TRACKER_STATE_SERVICE_MASK,
1123 DBUF_TRACKER_STATE_SERVICE(8));
1124 }
1125
icl_mbus_init(struct intel_display * display)1126 static void icl_mbus_init(struct intel_display *display)
1127 {
1128 unsigned long abox_regs = DISPLAY_INFO(display)->abox_mask;
1129 u32 mask, val, i;
1130
1131 if (display->platform.alderlake_p || DISPLAY_VER(display) >= 14)
1132 return;
1133
1134 mask = MBUS_ABOX_BT_CREDIT_POOL1_MASK |
1135 MBUS_ABOX_BT_CREDIT_POOL2_MASK |
1136 MBUS_ABOX_B_CREDIT_MASK |
1137 MBUS_ABOX_BW_CREDIT_MASK;
1138 val = MBUS_ABOX_BT_CREDIT_POOL1(16) |
1139 MBUS_ABOX_BT_CREDIT_POOL2(16) |
1140 MBUS_ABOX_B_CREDIT(1) |
1141 MBUS_ABOX_BW_CREDIT(1);
1142
1143 /*
1144 * gen12 platforms that use abox1 and abox2 for pixel data reads still
1145 * expect us to program the abox_ctl0 register as well, even though
1146 * we don't have to program other instance-0 registers like BW_BUDDY.
1147 */
1148 if (DISPLAY_VER(display) == 12)
1149 abox_regs |= BIT(0);
1150
1151 for_each_set_bit(i, &abox_regs, sizeof(abox_regs))
1152 intel_de_rmw(display, MBUS_ABOX_CTL(i), mask, val);
1153 }
1154
hsw_assert_cdclk(struct intel_display * display)1155 static void hsw_assert_cdclk(struct intel_display *display)
1156 {
1157 u32 val = intel_de_read(display, LCPLL_CTL);
1158
1159 /*
1160 * The LCPLL register should be turned on by the BIOS. For now
1161 * let's just check its state and print errors in case
1162 * something is wrong. Don't even try to turn it on.
1163 */
1164
1165 if (val & LCPLL_CD_SOURCE_FCLK)
1166 drm_err(display->drm, "CDCLK source is not LCPLL\n");
1167
1168 if (val & LCPLL_PLL_DISABLE)
1169 drm_err(display->drm, "LCPLL is disabled\n");
1170
1171 if ((val & LCPLL_REF_MASK) != LCPLL_REF_NON_SSC)
1172 drm_err(display->drm, "LCPLL not using non-SSC reference\n");
1173 }
1174
assert_can_disable_lcpll(struct intel_display * display)1175 static void assert_can_disable_lcpll(struct intel_display *display)
1176 {
1177 struct drm_i915_private *dev_priv = to_i915(display->drm);
1178 struct intel_crtc *crtc;
1179
1180 for_each_intel_crtc(display->drm, crtc)
1181 INTEL_DISPLAY_STATE_WARN(display, crtc->active,
1182 "CRTC for pipe %c enabled\n",
1183 pipe_name(crtc->pipe));
1184
1185 INTEL_DISPLAY_STATE_WARN(display, intel_de_read(display, HSW_PWR_WELL_CTL2),
1186 "Display power well on\n");
1187 INTEL_DISPLAY_STATE_WARN(display,
1188 intel_de_read(display, SPLL_CTL) & SPLL_PLL_ENABLE,
1189 "SPLL enabled\n");
1190 INTEL_DISPLAY_STATE_WARN(display,
1191 intel_de_read(display, WRPLL_CTL(0)) & WRPLL_PLL_ENABLE,
1192 "WRPLL1 enabled\n");
1193 INTEL_DISPLAY_STATE_WARN(display,
1194 intel_de_read(display, WRPLL_CTL(1)) & WRPLL_PLL_ENABLE,
1195 "WRPLL2 enabled\n");
1196 INTEL_DISPLAY_STATE_WARN(display,
1197 intel_de_read(display, PP_STATUS(display, 0)) & PP_ON,
1198 "Panel power on\n");
1199 INTEL_DISPLAY_STATE_WARN(display,
1200 intel_de_read(display, BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
1201 "CPU PWM1 enabled\n");
1202 if (display->platform.haswell)
1203 INTEL_DISPLAY_STATE_WARN(display,
1204 intel_de_read(display, HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
1205 "CPU PWM2 enabled\n");
1206 INTEL_DISPLAY_STATE_WARN(display,
1207 intel_de_read(display, BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
1208 "PCH PWM1 enabled\n");
1209 INTEL_DISPLAY_STATE_WARN(display,
1210 (intel_de_read(display, UTIL_PIN_CTL) & (UTIL_PIN_ENABLE | UTIL_PIN_MODE_MASK)) == (UTIL_PIN_ENABLE | UTIL_PIN_MODE_PWM),
1211 "Utility pin enabled in PWM mode\n");
1212 INTEL_DISPLAY_STATE_WARN(display,
1213 intel_de_read(display, PCH_GTC_CTL) & PCH_GTC_ENABLE,
1214 "PCH GTC enabled\n");
1215
1216 /*
1217 * In theory we can still leave IRQs enabled, as long as only the HPD
1218 * interrupts remain enabled. We used to check for that, but since it's
1219 * gen-specific and since we only disable LCPLL after we fully disable
1220 * the interrupts, the check below should be enough.
1221 */
1222 INTEL_DISPLAY_STATE_WARN(display, intel_irqs_enabled(dev_priv),
1223 "IRQs enabled\n");
1224 }
1225
hsw_read_dcomp(struct intel_display * display)1226 static u32 hsw_read_dcomp(struct intel_display *display)
1227 {
1228 if (display->platform.haswell)
1229 return intel_de_read(display, D_COMP_HSW);
1230 else
1231 return intel_de_read(display, D_COMP_BDW);
1232 }
1233
hsw_write_dcomp(struct intel_display * display,u32 val)1234 static void hsw_write_dcomp(struct intel_display *display, u32 val)
1235 {
1236 struct drm_i915_private *dev_priv = to_i915(display->drm);
1237
1238 if (display->platform.haswell) {
1239 if (snb_pcode_write(&dev_priv->uncore, GEN6_PCODE_WRITE_D_COMP, val))
1240 drm_dbg_kms(display->drm, "Failed to write to D_COMP\n");
1241 } else {
1242 intel_de_write(display, D_COMP_BDW, val);
1243 intel_de_posting_read(display, D_COMP_BDW);
1244 }
1245 }
1246
1247 /*
1248 * This function implements pieces of two sequences from BSpec:
1249 * - Sequence for display software to disable LCPLL
1250 * - Sequence for display software to allow package C8+
1251 * The steps implemented here are just the steps that actually touch the LCPLL
1252 * register. Callers should take care of disabling all the display engine
1253 * functions, doing the mode unset, fixing interrupts, etc.
1254 */
hsw_disable_lcpll(struct intel_display * display,bool switch_to_fclk,bool allow_power_down)1255 static void hsw_disable_lcpll(struct intel_display *display,
1256 bool switch_to_fclk, bool allow_power_down)
1257 {
1258 u32 val;
1259
1260 assert_can_disable_lcpll(display);
1261
1262 val = intel_de_read(display, LCPLL_CTL);
1263
1264 if (switch_to_fclk) {
1265 val |= LCPLL_CD_SOURCE_FCLK;
1266 intel_de_write(display, LCPLL_CTL, val);
1267
1268 if (wait_for_us(intel_de_read(display, LCPLL_CTL) &
1269 LCPLL_CD_SOURCE_FCLK_DONE, 1))
1270 drm_err(display->drm, "Switching to FCLK failed\n");
1271
1272 val = intel_de_read(display, LCPLL_CTL);
1273 }
1274
1275 val |= LCPLL_PLL_DISABLE;
1276 intel_de_write(display, LCPLL_CTL, val);
1277 intel_de_posting_read(display, LCPLL_CTL);
1278
1279 if (intel_de_wait_for_clear(display, LCPLL_CTL, LCPLL_PLL_LOCK, 1))
1280 drm_err(display->drm, "LCPLL still locked\n");
1281
1282 val = hsw_read_dcomp(display);
1283 val |= D_COMP_COMP_DISABLE;
1284 hsw_write_dcomp(display, val);
1285 ndelay(100);
1286
1287 if (wait_for((hsw_read_dcomp(display) &
1288 D_COMP_RCOMP_IN_PROGRESS) == 0, 1))
1289 drm_err(display->drm, "D_COMP RCOMP still in progress\n");
1290
1291 if (allow_power_down) {
1292 intel_de_rmw(display, LCPLL_CTL, 0, LCPLL_POWER_DOWN_ALLOW);
1293 intel_de_posting_read(display, LCPLL_CTL);
1294 }
1295 }
1296
1297 /*
1298 * Fully restores LCPLL, disallowing power down and switching back to LCPLL
1299 * source.
1300 */
hsw_restore_lcpll(struct intel_display * display)1301 static void hsw_restore_lcpll(struct intel_display *display)
1302 {
1303 struct drm_i915_private __maybe_unused *dev_priv = to_i915(display->drm);
1304 u32 val;
1305
1306 val = intel_de_read(display, LCPLL_CTL);
1307
1308 if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
1309 LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
1310 return;
1311
1312 /*
1313 * Make sure we're not on PC8 state before disabling PC8, otherwise
1314 * we'll hang the machine. To prevent PC8 state, just enable force_wake.
1315 */
1316 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
1317
1318 if (val & LCPLL_POWER_DOWN_ALLOW) {
1319 val &= ~LCPLL_POWER_DOWN_ALLOW;
1320 intel_de_write(display, LCPLL_CTL, val);
1321 intel_de_posting_read(display, LCPLL_CTL);
1322 }
1323
1324 val = hsw_read_dcomp(display);
1325 val |= D_COMP_COMP_FORCE;
1326 val &= ~D_COMP_COMP_DISABLE;
1327 hsw_write_dcomp(display, val);
1328
1329 val = intel_de_read(display, LCPLL_CTL);
1330 val &= ~LCPLL_PLL_DISABLE;
1331 intel_de_write(display, LCPLL_CTL, val);
1332
1333 if (intel_de_wait_for_set(display, LCPLL_CTL, LCPLL_PLL_LOCK, 5))
1334 drm_err(display->drm, "LCPLL not locked yet\n");
1335
1336 if (val & LCPLL_CD_SOURCE_FCLK) {
1337 intel_de_rmw(display, LCPLL_CTL, LCPLL_CD_SOURCE_FCLK, 0);
1338
1339 if (wait_for_us((intel_de_read(display, LCPLL_CTL) &
1340 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
1341 drm_err(display->drm,
1342 "Switching back to LCPLL failed\n");
1343 }
1344
1345 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
1346
1347 intel_update_cdclk(display);
1348 intel_cdclk_dump_config(display, &display->cdclk.hw, "Current CDCLK");
1349 }
1350
1351 /*
1352 * Package states C8 and deeper are really deep PC states that can only be
1353 * reached when all the devices on the system allow it, so even if the graphics
1354 * device allows PC8+, it doesn't mean the system will actually get to these
1355 * states. Our driver only allows PC8+ when going into runtime PM.
1356 *
1357 * The requirements for PC8+ are that all the outputs are disabled, the power
1358 * well is disabled and most interrupts are disabled, and these are also
1359 * requirements for runtime PM. When these conditions are met, we manually do
1360 * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
1361 * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
1362 * hang the machine.
1363 *
1364 * When we really reach PC8 or deeper states (not just when we allow it) we lose
1365 * the state of some registers, so when we come back from PC8+ we need to
1366 * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
1367 * need to take care of the registers kept by RC6. Notice that this happens even
1368 * if we don't put the device in PCI D3 state (which is what currently happens
1369 * because of the runtime PM support).
1370 *
1371 * For more, read "Display Sequences for Package C8" on the hardware
1372 * documentation.
1373 */
hsw_enable_pc8(struct intel_display * display)1374 static void hsw_enable_pc8(struct intel_display *display)
1375 {
1376 struct drm_i915_private *dev_priv = to_i915(display->drm);
1377
1378 drm_dbg_kms(display->drm, "Enabling package C8+\n");
1379
1380 if (HAS_PCH_LPT_LP(dev_priv))
1381 intel_de_rmw(display, SOUTH_DSPCLK_GATE_D,
1382 PCH_LP_PARTITION_LEVEL_DISABLE, 0);
1383
1384 lpt_disable_clkout_dp(dev_priv);
1385 hsw_disable_lcpll(display, true, true);
1386 }
1387
hsw_disable_pc8(struct intel_display * display)1388 static void hsw_disable_pc8(struct intel_display *display)
1389 {
1390 struct drm_i915_private *dev_priv = to_i915(display->drm);
1391
1392 drm_dbg_kms(display->drm, "Disabling package C8+\n");
1393
1394 hsw_restore_lcpll(display);
1395 intel_init_pch_refclk(dev_priv);
1396
1397 /* Many display registers don't survive PC8+ */
1398 #ifdef I915 /* FIXME */
1399 intel_clock_gating_init(dev_priv);
1400 #endif
1401 }
1402
intel_pch_reset_handshake(struct intel_display * display,bool enable)1403 static void intel_pch_reset_handshake(struct intel_display *display,
1404 bool enable)
1405 {
1406 i915_reg_t reg;
1407 u32 reset_bits;
1408
1409 if (display->platform.ivybridge) {
1410 reg = GEN7_MSG_CTL;
1411 reset_bits = WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK;
1412 } else {
1413 reg = HSW_NDE_RSTWRN_OPT;
1414 reset_bits = RESET_PCH_HANDSHAKE_ENABLE;
1415 }
1416
1417 if (DISPLAY_VER(display) >= 14)
1418 reset_bits |= MTL_RESET_PICA_HANDSHAKE_EN;
1419
1420 intel_de_rmw(display, reg, reset_bits, enable ? reset_bits : 0);
1421 }
1422
skl_display_core_init(struct intel_display * display,bool resume)1423 static void skl_display_core_init(struct intel_display *display,
1424 bool resume)
1425 {
1426 struct drm_i915_private *dev_priv = to_i915(display->drm);
1427 struct i915_power_domains *power_domains = &display->power.domains;
1428 struct i915_power_well *well;
1429
1430 gen9_set_dc_state(display, DC_STATE_DISABLE);
1431
1432 /* enable PCH reset handshake */
1433 intel_pch_reset_handshake(display, !HAS_PCH_NOP(dev_priv));
1434
1435 if (!HAS_DISPLAY(display))
1436 return;
1437
1438 /* enable PG1 and Misc I/O */
1439 mutex_lock(&power_domains->lock);
1440
1441 well = lookup_power_well(display, SKL_DISP_PW_1);
1442 intel_power_well_enable(display, well);
1443
1444 well = lookup_power_well(display, SKL_DISP_PW_MISC_IO);
1445 intel_power_well_enable(display, well);
1446
1447 mutex_unlock(&power_domains->lock);
1448
1449 intel_cdclk_init_hw(display);
1450
1451 gen9_dbuf_enable(display);
1452
1453 if (resume)
1454 intel_dmc_load_program(display);
1455 }
1456
skl_display_core_uninit(struct intel_display * display)1457 static void skl_display_core_uninit(struct intel_display *display)
1458 {
1459 struct i915_power_domains *power_domains = &display->power.domains;
1460 struct i915_power_well *well;
1461
1462 if (!HAS_DISPLAY(display))
1463 return;
1464
1465 gen9_disable_dc_states(display);
1466 /* TODO: disable DMC program */
1467
1468 gen9_dbuf_disable(display);
1469
1470 intel_cdclk_uninit_hw(display);
1471
1472 /* The spec doesn't call for removing the reset handshake flag */
1473 /* disable PG1 and Misc I/O */
1474
1475 mutex_lock(&power_domains->lock);
1476
1477 /*
1478 * BSpec says to keep the MISC IO power well enabled here, only
1479 * remove our request for power well 1.
1480 * Note that even though the driver's request is removed power well 1
1481 * may stay enabled after this due to DMC's own request on it.
1482 */
1483 well = lookup_power_well(display, SKL_DISP_PW_1);
1484 intel_power_well_disable(display, well);
1485
1486 mutex_unlock(&power_domains->lock);
1487
1488 usleep_range(10, 30); /* 10 us delay per Bspec */
1489 }
1490
bxt_display_core_init(struct intel_display * display,bool resume)1491 static void bxt_display_core_init(struct intel_display *display, bool resume)
1492 {
1493 struct i915_power_domains *power_domains = &display->power.domains;
1494 struct i915_power_well *well;
1495
1496 gen9_set_dc_state(display, DC_STATE_DISABLE);
1497
1498 /*
1499 * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT
1500 * or else the reset will hang because there is no PCH to respond.
1501 * Move the handshake programming to initialization sequence.
1502 * Previously was left up to BIOS.
1503 */
1504 intel_pch_reset_handshake(display, false);
1505
1506 if (!HAS_DISPLAY(display))
1507 return;
1508
1509 /* Enable PG1 */
1510 mutex_lock(&power_domains->lock);
1511
1512 well = lookup_power_well(display, SKL_DISP_PW_1);
1513 intel_power_well_enable(display, well);
1514
1515 mutex_unlock(&power_domains->lock);
1516
1517 intel_cdclk_init_hw(display);
1518
1519 gen9_dbuf_enable(display);
1520
1521 if (resume)
1522 intel_dmc_load_program(display);
1523 }
1524
bxt_display_core_uninit(struct intel_display * display)1525 static void bxt_display_core_uninit(struct intel_display *display)
1526 {
1527 struct i915_power_domains *power_domains = &display->power.domains;
1528 struct i915_power_well *well;
1529
1530 if (!HAS_DISPLAY(display))
1531 return;
1532
1533 gen9_disable_dc_states(display);
1534 /* TODO: disable DMC program */
1535
1536 gen9_dbuf_disable(display);
1537
1538 intel_cdclk_uninit_hw(display);
1539
1540 /* The spec doesn't call for removing the reset handshake flag */
1541
1542 /*
1543 * Disable PW1 (PG1).
1544 * Note that even though the driver's request is removed power well 1
1545 * may stay enabled after this due to DMC's own request on it.
1546 */
1547 mutex_lock(&power_domains->lock);
1548
1549 well = lookup_power_well(display, SKL_DISP_PW_1);
1550 intel_power_well_disable(display, well);
1551
1552 mutex_unlock(&power_domains->lock);
1553
1554 usleep_range(10, 30); /* 10 us delay per Bspec */
1555 }
1556
1557 struct buddy_page_mask {
1558 u32 page_mask;
1559 u8 type;
1560 u8 num_channels;
1561 };
1562
1563 static const struct buddy_page_mask tgl_buddy_page_masks[] = {
1564 { .num_channels = 1, .type = INTEL_DRAM_DDR4, .page_mask = 0xF },
1565 { .num_channels = 1, .type = INTEL_DRAM_DDR5, .page_mask = 0xF },
1566 { .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1C },
1567 { .num_channels = 2, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x1C },
1568 { .num_channels = 2, .type = INTEL_DRAM_DDR4, .page_mask = 0x1F },
1569 { .num_channels = 2, .type = INTEL_DRAM_DDR5, .page_mask = 0x1E },
1570 { .num_channels = 4, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x38 },
1571 { .num_channels = 4, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x38 },
1572 {}
1573 };
1574
1575 static const struct buddy_page_mask wa_1409767108_buddy_page_masks[] = {
1576 { .num_channels = 1, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1 },
1577 { .num_channels = 1, .type = INTEL_DRAM_DDR4, .page_mask = 0x1 },
1578 { .num_channels = 1, .type = INTEL_DRAM_DDR5, .page_mask = 0x1 },
1579 { .num_channels = 1, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x1 },
1580 { .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x3 },
1581 { .num_channels = 2, .type = INTEL_DRAM_DDR4, .page_mask = 0x3 },
1582 { .num_channels = 2, .type = INTEL_DRAM_DDR5, .page_mask = 0x3 },
1583 { .num_channels = 2, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x3 },
1584 {}
1585 };
1586
tgl_bw_buddy_init(struct intel_display * display)1587 static void tgl_bw_buddy_init(struct intel_display *display)
1588 {
1589 struct drm_i915_private *dev_priv = to_i915(display->drm);
1590 enum intel_dram_type type = dev_priv->dram_info.type;
1591 u8 num_channels = dev_priv->dram_info.num_channels;
1592 const struct buddy_page_mask *table;
1593 unsigned long abox_mask = DISPLAY_INFO(display)->abox_mask;
1594 int config, i;
1595
1596 /* BW_BUDDY registers are not used on dgpu's beyond DG1 */
1597 if (display->platform.dgfx && !display->platform.dg1)
1598 return;
1599
1600 if (display->platform.alderlake_s ||
1601 (display->platform.rocketlake && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0)))
1602 /* Wa_1409767108 */
1603 table = wa_1409767108_buddy_page_masks;
1604 else
1605 table = tgl_buddy_page_masks;
1606
1607 for (config = 0; table[config].page_mask != 0; config++)
1608 if (table[config].num_channels == num_channels &&
1609 table[config].type == type)
1610 break;
1611
1612 if (table[config].page_mask == 0) {
1613 drm_dbg_kms(display->drm,
1614 "Unknown memory configuration; disabling address buddy logic.\n");
1615 for_each_set_bit(i, &abox_mask, sizeof(abox_mask))
1616 intel_de_write(display, BW_BUDDY_CTL(i),
1617 BW_BUDDY_DISABLE);
1618 } else {
1619 for_each_set_bit(i, &abox_mask, sizeof(abox_mask)) {
1620 intel_de_write(display, BW_BUDDY_PAGE_MASK(i),
1621 table[config].page_mask);
1622
1623 /* Wa_22010178259:tgl,dg1,rkl,adl-s */
1624 if (DISPLAY_VER(display) == 12)
1625 intel_de_rmw(display, BW_BUDDY_CTL(i),
1626 BW_BUDDY_TLB_REQ_TIMER_MASK,
1627 BW_BUDDY_TLB_REQ_TIMER(0x8));
1628 }
1629 }
1630 }
1631
icl_display_core_init(struct intel_display * display,bool resume)1632 static void icl_display_core_init(struct intel_display *display,
1633 bool resume)
1634 {
1635 struct drm_i915_private *dev_priv = to_i915(display->drm);
1636 struct i915_power_domains *power_domains = &display->power.domains;
1637 struct i915_power_well *well;
1638
1639 gen9_set_dc_state(display, DC_STATE_DISABLE);
1640
1641 /* Wa_14011294188:ehl,jsl,tgl,rkl,adl-s */
1642 if (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP &&
1643 INTEL_PCH_TYPE(dev_priv) < PCH_DG1)
1644 intel_de_rmw(display, SOUTH_DSPCLK_GATE_D, 0,
1645 PCH_DPMGUNIT_CLOCK_GATE_DISABLE);
1646
1647 /* 1. Enable PCH reset handshake. */
1648 intel_pch_reset_handshake(display, !HAS_PCH_NOP(dev_priv));
1649
1650 if (!HAS_DISPLAY(display))
1651 return;
1652
1653 /* 2. Initialize all combo phys */
1654 intel_combo_phy_init(display);
1655
1656 /*
1657 * 3. Enable Power Well 1 (PG1).
1658 * The AUX IO power wells will be enabled on demand.
1659 */
1660 mutex_lock(&power_domains->lock);
1661 well = lookup_power_well(display, SKL_DISP_PW_1);
1662 intel_power_well_enable(display, well);
1663 mutex_unlock(&power_domains->lock);
1664
1665 if (DISPLAY_VER(display) == 14)
1666 intel_de_rmw(display, DC_STATE_EN,
1667 HOLD_PHY_PG1_LATCH | HOLD_PHY_CLKREQ_PG1_LATCH, 0);
1668
1669 /* 4. Enable CDCLK. */
1670 intel_cdclk_init_hw(display);
1671
1672 if (DISPLAY_VER(display) == 12 || display->platform.dg2)
1673 gen12_dbuf_slices_config(display);
1674
1675 /* 5. Enable DBUF. */
1676 gen9_dbuf_enable(display);
1677
1678 /* 6. Setup MBUS. */
1679 icl_mbus_init(display);
1680
1681 /* 7. Program arbiter BW_BUDDY registers */
1682 if (DISPLAY_VER(display) >= 12)
1683 tgl_bw_buddy_init(display);
1684
1685 /* 8. Ensure PHYs have completed calibration and adaptation */
1686 if (display->platform.dg2)
1687 intel_snps_phy_wait_for_calibration(display);
1688
1689 /* 9. XE2_HPD: Program CHICKEN_MISC_2 before any cursor or planes are enabled */
1690 if (DISPLAY_VERx100(display) == 1401)
1691 intel_de_rmw(display, CHICKEN_MISC_2, BMG_DARB_HALF_BLK_END_BURST, 1);
1692
1693 if (resume)
1694 intel_dmc_load_program(display);
1695
1696 /* Wa_14011508470:tgl,dg1,rkl,adl-s,adl-p,dg2 */
1697 if (IS_DISPLAY_VERx100(display, 1200, 1300))
1698 intel_de_rmw(display, GEN11_CHICKEN_DCPR_2, 0,
1699 DCPR_CLEAR_MEMSTAT_DIS | DCPR_SEND_RESP_IMM |
1700 DCPR_MASK_LPMODE | DCPR_MASK_MAXLATENCY_MEMUP_CLR);
1701
1702 /* Wa_14011503030:xelpd */
1703 if (DISPLAY_VER(display) == 13)
1704 intel_de_write(display, XELPD_DISPLAY_ERR_FATAL_MASK, ~0);
1705
1706 /* Wa_15013987218 */
1707 if (DISPLAY_VER(display) == 20) {
1708 intel_de_rmw(display, SOUTH_DSPCLK_GATE_D,
1709 0, PCH_GMBUSUNIT_CLOCK_GATE_DISABLE);
1710 intel_de_rmw(display, SOUTH_DSPCLK_GATE_D,
1711 PCH_GMBUSUNIT_CLOCK_GATE_DISABLE, 0);
1712 }
1713 }
1714
icl_display_core_uninit(struct intel_display * display)1715 static void icl_display_core_uninit(struct intel_display *display)
1716 {
1717 struct i915_power_domains *power_domains = &display->power.domains;
1718 struct i915_power_well *well;
1719
1720 if (!HAS_DISPLAY(display))
1721 return;
1722
1723 gen9_disable_dc_states(display);
1724 intel_dmc_disable_program(display);
1725
1726 /* 1. Disable all display engine functions -> already done */
1727
1728 /* 2. Disable DBUF */
1729 gen9_dbuf_disable(display);
1730
1731 /* 3. Disable CD clock */
1732 intel_cdclk_uninit_hw(display);
1733
1734 if (DISPLAY_VER(display) == 14)
1735 intel_de_rmw(display, DC_STATE_EN, 0,
1736 HOLD_PHY_PG1_LATCH | HOLD_PHY_CLKREQ_PG1_LATCH);
1737
1738 /*
1739 * 4. Disable Power Well 1 (PG1).
1740 * The AUX IO power wells are toggled on demand, so they are already
1741 * disabled at this point.
1742 */
1743 mutex_lock(&power_domains->lock);
1744 well = lookup_power_well(display, SKL_DISP_PW_1);
1745 intel_power_well_disable(display, well);
1746 mutex_unlock(&power_domains->lock);
1747
1748 /* 5. */
1749 intel_combo_phy_uninit(display);
1750 }
1751
chv_phy_control_init(struct intel_display * display)1752 static void chv_phy_control_init(struct intel_display *display)
1753 {
1754 struct i915_power_well *cmn_bc =
1755 lookup_power_well(display, VLV_DISP_PW_DPIO_CMN_BC);
1756 struct i915_power_well *cmn_d =
1757 lookup_power_well(display, CHV_DISP_PW_DPIO_CMN_D);
1758
1759 /*
1760 * DISPLAY_PHY_CONTROL can get corrupted if read. As a
1761 * workaround never ever read DISPLAY_PHY_CONTROL, and
1762 * instead maintain a shadow copy ourselves. Use the actual
1763 * power well state and lane status to reconstruct the
1764 * expected initial value.
1765 */
1766 display->power.chv_phy_control =
1767 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) |
1768 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) |
1769 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) |
1770 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) |
1771 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0);
1772
1773 /*
1774 * If all lanes are disabled we leave the override disabled
1775 * with all power down bits cleared to match the state we
1776 * would use after disabling the port. Otherwise enable the
1777 * override and set the lane powerdown bits accding to the
1778 * current lane status.
1779 */
1780 if (intel_power_well_is_enabled(display, cmn_bc)) {
1781 u32 status = intel_de_read(display, DPLL(display, PIPE_A));
1782 unsigned int mask;
1783
1784 mask = status & DPLL_PORTB_READY_MASK;
1785 if (mask == 0xf)
1786 mask = 0x0;
1787 else
1788 display->power.chv_phy_control |=
1789 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0);
1790
1791 display->power.chv_phy_control |=
1792 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0);
1793
1794 mask = (status & DPLL_PORTC_READY_MASK) >> 4;
1795 if (mask == 0xf)
1796 mask = 0x0;
1797 else
1798 display->power.chv_phy_control |=
1799 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1);
1800
1801 display->power.chv_phy_control |=
1802 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1);
1803
1804 display->power.chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
1805
1806 display->power.chv_phy_assert[DPIO_PHY0] = false;
1807 } else {
1808 display->power.chv_phy_assert[DPIO_PHY0] = true;
1809 }
1810
1811 if (intel_power_well_is_enabled(display, cmn_d)) {
1812 u32 status = intel_de_read(display, DPIO_PHY_STATUS);
1813 unsigned int mask;
1814
1815 mask = status & DPLL_PORTD_READY_MASK;
1816
1817 if (mask == 0xf)
1818 mask = 0x0;
1819 else
1820 display->power.chv_phy_control |=
1821 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0);
1822
1823 display->power.chv_phy_control |=
1824 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0);
1825
1826 display->power.chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
1827
1828 display->power.chv_phy_assert[DPIO_PHY1] = false;
1829 } else {
1830 display->power.chv_phy_assert[DPIO_PHY1] = true;
1831 }
1832
1833 drm_dbg_kms(display->drm, "Initial PHY_CONTROL=0x%08x\n",
1834 display->power.chv_phy_control);
1835
1836 /* Defer application of initial phy_control to enabling the powerwell */
1837 }
1838
vlv_cmnlane_wa(struct intel_display * display)1839 static void vlv_cmnlane_wa(struct intel_display *display)
1840 {
1841 struct i915_power_well *cmn =
1842 lookup_power_well(display, VLV_DISP_PW_DPIO_CMN_BC);
1843 struct i915_power_well *disp2d =
1844 lookup_power_well(display, VLV_DISP_PW_DISP2D);
1845
1846 /* If the display might be already active skip this */
1847 if (intel_power_well_is_enabled(display, cmn) &&
1848 intel_power_well_is_enabled(display, disp2d) &&
1849 intel_de_read(display, DPIO_CTL) & DPIO_CMNRST)
1850 return;
1851
1852 drm_dbg_kms(display->drm, "toggling display PHY side reset\n");
1853
1854 /* cmnlane needs DPLL registers */
1855 intel_power_well_enable(display, disp2d);
1856
1857 /*
1858 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
1859 * Need to assert and de-assert PHY SB reset by gating the
1860 * common lane power, then un-gating it.
1861 * Simply ungating isn't enough to reset the PHY enough to get
1862 * ports and lanes running.
1863 */
1864 intel_power_well_disable(display, cmn);
1865 }
1866
vlv_punit_is_power_gated(struct intel_display * display,u32 reg0)1867 static bool vlv_punit_is_power_gated(struct intel_display *display, u32 reg0)
1868 {
1869 struct drm_i915_private *dev_priv = to_i915(display->drm);
1870 bool ret;
1871
1872 vlv_punit_get(dev_priv);
1873 ret = (vlv_punit_read(dev_priv, reg0) & SSPM0_SSC_MASK) == SSPM0_SSC_PWR_GATE;
1874 vlv_punit_put(dev_priv);
1875
1876 return ret;
1877 }
1878
assert_ved_power_gated(struct intel_display * display)1879 static void assert_ved_power_gated(struct intel_display *display)
1880 {
1881 drm_WARN(display->drm,
1882 !vlv_punit_is_power_gated(display, PUNIT_REG_VEDSSPM0),
1883 "VED not power gated\n");
1884 }
1885
assert_isp_power_gated(struct intel_display * display)1886 static void assert_isp_power_gated(struct intel_display *display)
1887 {
1888 static const struct pci_device_id isp_ids[] = {
1889 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0f38)},
1890 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x22b8)},
1891 {}
1892 };
1893
1894 drm_WARN(display->drm, !pci_dev_present(isp_ids) &&
1895 !vlv_punit_is_power_gated(display, PUNIT_REG_ISPSSPM0),
1896 "ISP not power gated\n");
1897 }
1898
1899 static void intel_power_domains_verify_state(struct intel_display *display);
1900
1901 /**
1902 * intel_power_domains_init_hw - initialize hardware power domain state
1903 * @display: display device instance
1904 * @resume: Called from resume code paths or not
1905 *
1906 * This function initializes the hardware power domain state and enables all
1907 * power wells belonging to the INIT power domain. Power wells in other
1908 * domains (and not in the INIT domain) are referenced or disabled by
1909 * intel_modeset_readout_hw_state(). After that the reference count of each
1910 * power well must match its HW enabled state, see
1911 * intel_power_domains_verify_state().
1912 *
1913 * It will return with power domains disabled (to be enabled later by
1914 * intel_power_domains_enable()) and must be paired with
1915 * intel_power_domains_driver_remove().
1916 */
intel_power_domains_init_hw(struct intel_display * display,bool resume)1917 void intel_power_domains_init_hw(struct intel_display *display, bool resume)
1918 {
1919 struct drm_i915_private *i915 = to_i915(display->drm);
1920 struct i915_power_domains *power_domains = &display->power.domains;
1921
1922 power_domains->initializing = true;
1923
1924 if (DISPLAY_VER(display) >= 11) {
1925 icl_display_core_init(display, resume);
1926 } else if (display->platform.geminilake || display->platform.broxton) {
1927 bxt_display_core_init(display, resume);
1928 } else if (DISPLAY_VER(display) == 9) {
1929 skl_display_core_init(display, resume);
1930 } else if (display->platform.cherryview) {
1931 mutex_lock(&power_domains->lock);
1932 chv_phy_control_init(display);
1933 mutex_unlock(&power_domains->lock);
1934 assert_isp_power_gated(display);
1935 } else if (display->platform.valleyview) {
1936 mutex_lock(&power_domains->lock);
1937 vlv_cmnlane_wa(display);
1938 mutex_unlock(&power_domains->lock);
1939 assert_ved_power_gated(display);
1940 assert_isp_power_gated(display);
1941 } else if (display->platform.broadwell || display->platform.haswell) {
1942 hsw_assert_cdclk(display);
1943 intel_pch_reset_handshake(display, !HAS_PCH_NOP(i915));
1944 } else if (display->platform.ivybridge) {
1945 intel_pch_reset_handshake(display, !HAS_PCH_NOP(i915));
1946 }
1947
1948 /*
1949 * Keep all power wells enabled for any dependent HW access during
1950 * initialization and to make sure we keep BIOS enabled display HW
1951 * resources powered until display HW readout is complete. We drop
1952 * this reference in intel_power_domains_enable().
1953 */
1954 drm_WARN_ON(display->drm, power_domains->init_wakeref);
1955 power_domains->init_wakeref =
1956 intel_display_power_get(display, POWER_DOMAIN_INIT);
1957
1958 /* Disable power support if the user asked so. */
1959 if (!display->params.disable_power_well) {
1960 drm_WARN_ON(display->drm, power_domains->disable_wakeref);
1961 display->power.domains.disable_wakeref = intel_display_power_get(display,
1962 POWER_DOMAIN_INIT);
1963 }
1964 intel_power_domains_sync_hw(display);
1965
1966 power_domains->initializing = false;
1967 }
1968
1969 /**
1970 * intel_power_domains_driver_remove - deinitialize hw power domain state
1971 * @display: display device instance
1972 *
1973 * De-initializes the display power domain HW state. It also ensures that the
1974 * device stays powered up so that the driver can be reloaded.
1975 *
1976 * It must be called with power domains already disabled (after a call to
1977 * intel_power_domains_disable()) and must be paired with
1978 * intel_power_domains_init_hw().
1979 */
intel_power_domains_driver_remove(struct intel_display * display)1980 void intel_power_domains_driver_remove(struct intel_display *display)
1981 {
1982 struct drm_i915_private *i915 = to_i915(display->drm);
1983 intel_wakeref_t wakeref __maybe_unused =
1984 fetch_and_zero(&display->power.domains.init_wakeref);
1985
1986 /* Remove the refcount we took to keep power well support disabled. */
1987 if (!display->params.disable_power_well)
1988 intel_display_power_put(display, POWER_DOMAIN_INIT,
1989 fetch_and_zero(&display->power.domains.disable_wakeref));
1990
1991 intel_display_power_flush_work_sync(display);
1992
1993 intel_power_domains_verify_state(display);
1994
1995 /* Keep the power well enabled, but cancel its rpm wakeref. */
1996 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1997 }
1998
1999 /**
2000 * intel_power_domains_sanitize_state - sanitize power domains state
2001 * @display: display device instance
2002 *
2003 * Sanitize the power domains state during driver loading and system resume.
2004 * The function will disable all display power wells that BIOS has enabled
2005 * without a user for it (any user for a power well has taken a reference
2006 * on it by the time this function is called, after the state of all the
2007 * pipe, encoder, etc. HW resources have been sanitized).
2008 */
intel_power_domains_sanitize_state(struct intel_display * display)2009 void intel_power_domains_sanitize_state(struct intel_display *display)
2010 {
2011 struct i915_power_domains *power_domains = &display->power.domains;
2012 struct i915_power_well *power_well;
2013
2014 mutex_lock(&power_domains->lock);
2015
2016 for_each_power_well_reverse(display, power_well) {
2017 if (power_well->desc->always_on || power_well->count ||
2018 !intel_power_well_is_enabled(display, power_well))
2019 continue;
2020
2021 drm_dbg_kms(display->drm,
2022 "BIOS left unused %s power well enabled, disabling it\n",
2023 intel_power_well_name(power_well));
2024 intel_power_well_disable(display, power_well);
2025 }
2026
2027 mutex_unlock(&power_domains->lock);
2028 }
2029
2030 /**
2031 * intel_power_domains_enable - enable toggling of display power wells
2032 * @display: display device instance
2033 *
2034 * Enable the ondemand enabling/disabling of the display power wells. Note that
2035 * power wells not belonging to POWER_DOMAIN_INIT are allowed to be toggled
2036 * only at specific points of the display modeset sequence, thus they are not
2037 * affected by the intel_power_domains_enable()/disable() calls. The purpose
2038 * of these function is to keep the rest of power wells enabled until the end
2039 * of display HW readout (which will acquire the power references reflecting
2040 * the current HW state).
2041 */
intel_power_domains_enable(struct intel_display * display)2042 void intel_power_domains_enable(struct intel_display *display)
2043 {
2044 intel_wakeref_t wakeref __maybe_unused =
2045 fetch_and_zero(&display->power.domains.init_wakeref);
2046
2047 intel_display_power_put(display, POWER_DOMAIN_INIT, wakeref);
2048 intel_power_domains_verify_state(display);
2049 }
2050
2051 /**
2052 * intel_power_domains_disable - disable toggling of display power wells
2053 * @display: display device instance
2054 *
2055 * Disable the ondemand enabling/disabling of the display power wells. See
2056 * intel_power_domains_enable() for which power wells this call controls.
2057 */
intel_power_domains_disable(struct intel_display * display)2058 void intel_power_domains_disable(struct intel_display *display)
2059 {
2060 struct i915_power_domains *power_domains = &display->power.domains;
2061
2062 drm_WARN_ON(display->drm, power_domains->init_wakeref);
2063 power_domains->init_wakeref =
2064 intel_display_power_get(display, POWER_DOMAIN_INIT);
2065
2066 intel_power_domains_verify_state(display);
2067 }
2068
2069 /**
2070 * intel_power_domains_suspend - suspend power domain state
2071 * @display: display device instance
2072 * @s2idle: specifies whether we go to idle, or deeper sleep
2073 *
2074 * This function prepares the hardware power domain state before entering
2075 * system suspend.
2076 *
2077 * It must be called with power domains already disabled (after a call to
2078 * intel_power_domains_disable()) and paired with intel_power_domains_resume().
2079 */
intel_power_domains_suspend(struct intel_display * display,bool s2idle)2080 void intel_power_domains_suspend(struct intel_display *display, bool s2idle)
2081 {
2082 struct i915_power_domains *power_domains = &display->power.domains;
2083 intel_wakeref_t wakeref __maybe_unused =
2084 fetch_and_zero(&power_domains->init_wakeref);
2085
2086 intel_display_power_put(display, POWER_DOMAIN_INIT, wakeref);
2087
2088 /*
2089 * In case of suspend-to-idle (aka S0ix) on a DMC platform without DC9
2090 * support don't manually deinit the power domains. This also means the
2091 * DMC firmware will stay active, it will power down any HW
2092 * resources as required and also enable deeper system power states
2093 * that would be blocked if the firmware was inactive.
2094 */
2095 if (!(power_domains->allowed_dc_mask & DC_STATE_EN_DC9) && s2idle &&
2096 intel_dmc_has_payload(display)) {
2097 intel_display_power_flush_work(display);
2098 intel_power_domains_verify_state(display);
2099 return;
2100 }
2101
2102 /*
2103 * Even if power well support was disabled we still want to disable
2104 * power wells if power domains must be deinitialized for suspend.
2105 */
2106 if (!display->params.disable_power_well)
2107 intel_display_power_put(display, POWER_DOMAIN_INIT,
2108 fetch_and_zero(&display->power.domains.disable_wakeref));
2109
2110 intel_display_power_flush_work(display);
2111 intel_power_domains_verify_state(display);
2112
2113 if (DISPLAY_VER(display) >= 11)
2114 icl_display_core_uninit(display);
2115 else if (display->platform.geminilake || display->platform.broxton)
2116 bxt_display_core_uninit(display);
2117 else if (DISPLAY_VER(display) == 9)
2118 skl_display_core_uninit(display);
2119
2120 power_domains->display_core_suspended = true;
2121 }
2122
2123 /**
2124 * intel_power_domains_resume - resume power domain state
2125 * @display: display device instance
2126 *
2127 * This function resume the hardware power domain state during system resume.
2128 *
2129 * It will return with power domain support disabled (to be enabled later by
2130 * intel_power_domains_enable()) and must be paired with
2131 * intel_power_domains_suspend().
2132 */
intel_power_domains_resume(struct intel_display * display)2133 void intel_power_domains_resume(struct intel_display *display)
2134 {
2135 struct i915_power_domains *power_domains = &display->power.domains;
2136
2137 if (power_domains->display_core_suspended) {
2138 intel_power_domains_init_hw(display, true);
2139 power_domains->display_core_suspended = false;
2140 } else {
2141 drm_WARN_ON(display->drm, power_domains->init_wakeref);
2142 power_domains->init_wakeref =
2143 intel_display_power_get(display, POWER_DOMAIN_INIT);
2144 }
2145
2146 intel_power_domains_verify_state(display);
2147 }
2148
2149 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
2150
intel_power_domains_dump_info(struct intel_display * display)2151 static void intel_power_domains_dump_info(struct intel_display *display)
2152 {
2153 struct i915_power_domains *power_domains = &display->power.domains;
2154 struct i915_power_well *power_well;
2155
2156 for_each_power_well(display, power_well) {
2157 enum intel_display_power_domain domain;
2158
2159 drm_dbg_kms(display->drm, "%-25s %d\n",
2160 intel_power_well_name(power_well), intel_power_well_refcount(power_well));
2161
2162 for_each_power_domain(domain, intel_power_well_domains(power_well))
2163 drm_dbg_kms(display->drm, " %-23s %d\n",
2164 intel_display_power_domain_str(domain),
2165 power_domains->domain_use_count[domain]);
2166 }
2167 }
2168
2169 /**
2170 * intel_power_domains_verify_state - verify the HW/SW state for all power wells
2171 * @display: display device instance
2172 *
2173 * Verify if the reference count of each power well matches its HW enabled
2174 * state and the total refcount of the domains it belongs to. This must be
2175 * called after modeset HW state sanitization, which is responsible for
2176 * acquiring reference counts for any power wells in use and disabling the
2177 * ones left on by BIOS but not required by any active output.
2178 */
intel_power_domains_verify_state(struct intel_display * display)2179 static void intel_power_domains_verify_state(struct intel_display *display)
2180 {
2181 struct i915_power_domains *power_domains = &display->power.domains;
2182 struct i915_power_well *power_well;
2183 bool dump_domain_info;
2184
2185 mutex_lock(&power_domains->lock);
2186
2187 verify_async_put_domains_state(power_domains);
2188
2189 dump_domain_info = false;
2190 for_each_power_well(display, power_well) {
2191 enum intel_display_power_domain domain;
2192 int domains_count;
2193 bool enabled;
2194
2195 enabled = intel_power_well_is_enabled(display, power_well);
2196 if ((intel_power_well_refcount(power_well) ||
2197 intel_power_well_is_always_on(power_well)) !=
2198 enabled)
2199 drm_err(display->drm,
2200 "power well %s state mismatch (refcount %d/enabled %d)",
2201 intel_power_well_name(power_well),
2202 intel_power_well_refcount(power_well), enabled);
2203
2204 domains_count = 0;
2205 for_each_power_domain(domain, intel_power_well_domains(power_well))
2206 domains_count += power_domains->domain_use_count[domain];
2207
2208 if (intel_power_well_refcount(power_well) != domains_count) {
2209 drm_err(display->drm,
2210 "power well %s refcount/domain refcount mismatch "
2211 "(refcount %d/domains refcount %d)\n",
2212 intel_power_well_name(power_well),
2213 intel_power_well_refcount(power_well),
2214 domains_count);
2215 dump_domain_info = true;
2216 }
2217 }
2218
2219 if (dump_domain_info) {
2220 static bool dumped;
2221
2222 if (!dumped) {
2223 intel_power_domains_dump_info(display);
2224 dumped = true;
2225 }
2226 }
2227
2228 mutex_unlock(&power_domains->lock);
2229 }
2230
2231 #else
2232
intel_power_domains_verify_state(struct intel_display * display)2233 static void intel_power_domains_verify_state(struct intel_display *display)
2234 {
2235 }
2236
2237 #endif
2238
intel_display_power_suspend_late(struct intel_display * display,bool s2idle)2239 void intel_display_power_suspend_late(struct intel_display *display, bool s2idle)
2240 {
2241 struct drm_i915_private *i915 = to_i915(display->drm);
2242
2243 intel_power_domains_suspend(display, s2idle);
2244
2245 if (DISPLAY_VER(display) >= 11 || display->platform.geminilake ||
2246 display->platform.broxton) {
2247 bxt_enable_dc9(display);
2248 } else if (display->platform.haswell || display->platform.broadwell) {
2249 hsw_enable_pc8(display);
2250 }
2251
2252 /* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */
2253 if (INTEL_PCH_TYPE(i915) >= PCH_CNP && INTEL_PCH_TYPE(i915) < PCH_DG1)
2254 intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, SBCLK_RUN_REFCLK_DIS);
2255 }
2256
intel_display_power_resume_early(struct intel_display * display)2257 void intel_display_power_resume_early(struct intel_display *display)
2258 {
2259 struct drm_i915_private *i915 = to_i915(display->drm);
2260
2261 if (DISPLAY_VER(display) >= 11 || display->platform.geminilake ||
2262 display->platform.broxton) {
2263 gen9_sanitize_dc_state(display);
2264 bxt_disable_dc9(display);
2265 } else if (display->platform.haswell || display->platform.broadwell) {
2266 hsw_disable_pc8(display);
2267 }
2268
2269 /* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */
2270 if (INTEL_PCH_TYPE(i915) >= PCH_CNP && INTEL_PCH_TYPE(i915) < PCH_DG1)
2271 intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, 0);
2272
2273 intel_power_domains_resume(display);
2274 }
2275
intel_display_power_suspend(struct intel_display * display)2276 void intel_display_power_suspend(struct intel_display *display)
2277 {
2278 if (DISPLAY_VER(display) >= 11) {
2279 icl_display_core_uninit(display);
2280 bxt_enable_dc9(display);
2281 } else if (display->platform.geminilake || display->platform.broxton) {
2282 bxt_display_core_uninit(display);
2283 bxt_enable_dc9(display);
2284 } else if (display->platform.haswell || display->platform.broadwell) {
2285 hsw_enable_pc8(display);
2286 }
2287 }
2288
intel_display_power_resume(struct intel_display * display)2289 void intel_display_power_resume(struct intel_display *display)
2290 {
2291 struct i915_power_domains *power_domains = &display->power.domains;
2292
2293 if (DISPLAY_VER(display) >= 11) {
2294 bxt_disable_dc9(display);
2295 icl_display_core_init(display, true);
2296 if (intel_dmc_has_payload(display)) {
2297 if (power_domains->allowed_dc_mask & DC_STATE_EN_UPTO_DC6)
2298 skl_enable_dc6(display);
2299 else if (power_domains->allowed_dc_mask & DC_STATE_EN_UPTO_DC5)
2300 gen9_enable_dc5(display);
2301 }
2302 } else if (display->platform.geminilake || display->platform.broxton) {
2303 bxt_disable_dc9(display);
2304 bxt_display_core_init(display, true);
2305 if (intel_dmc_has_payload(display) &&
2306 (power_domains->allowed_dc_mask & DC_STATE_EN_UPTO_DC5))
2307 gen9_enable_dc5(display);
2308 } else if (display->platform.haswell || display->platform.broadwell) {
2309 hsw_disable_pc8(display);
2310 }
2311 }
2312
intel_display_power_debug(struct intel_display * display,struct seq_file * m)2313 void intel_display_power_debug(struct intel_display *display, struct seq_file *m)
2314 {
2315 struct i915_power_domains *power_domains = &display->power.domains;
2316 int i;
2317
2318 mutex_lock(&power_domains->lock);
2319
2320 seq_printf(m, "Runtime power status: %s\n",
2321 str_enabled_disabled(!power_domains->init_wakeref));
2322
2323 seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
2324 for (i = 0; i < power_domains->power_well_count; i++) {
2325 struct i915_power_well *power_well;
2326 enum intel_display_power_domain power_domain;
2327
2328 power_well = &power_domains->power_wells[i];
2329 seq_printf(m, "%-25s %d\n", intel_power_well_name(power_well),
2330 intel_power_well_refcount(power_well));
2331
2332 for_each_power_domain(power_domain, intel_power_well_domains(power_well))
2333 seq_printf(m, " %-23s %d\n",
2334 intel_display_power_domain_str(power_domain),
2335 power_domains->domain_use_count[power_domain]);
2336 }
2337
2338 mutex_unlock(&power_domains->lock);
2339 }
2340
2341 struct intel_ddi_port_domains {
2342 enum port port_start;
2343 enum port port_end;
2344 enum aux_ch aux_ch_start;
2345 enum aux_ch aux_ch_end;
2346
2347 enum intel_display_power_domain ddi_lanes;
2348 enum intel_display_power_domain ddi_io;
2349 enum intel_display_power_domain aux_io;
2350 enum intel_display_power_domain aux_legacy_usbc;
2351 enum intel_display_power_domain aux_tbt;
2352 };
2353
2354 static const struct intel_ddi_port_domains
2355 i9xx_port_domains[] = {
2356 {
2357 .port_start = PORT_A,
2358 .port_end = PORT_F,
2359 .aux_ch_start = AUX_CH_A,
2360 .aux_ch_end = AUX_CH_F,
2361
2362 .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_A,
2363 .ddi_io = POWER_DOMAIN_PORT_DDI_IO_A,
2364 .aux_io = POWER_DOMAIN_AUX_IO_A,
2365 .aux_legacy_usbc = POWER_DOMAIN_AUX_A,
2366 .aux_tbt = POWER_DOMAIN_INVALID,
2367 },
2368 };
2369
2370 static const struct intel_ddi_port_domains
2371 d11_port_domains[] = {
2372 {
2373 .port_start = PORT_A,
2374 .port_end = PORT_B,
2375 .aux_ch_start = AUX_CH_A,
2376 .aux_ch_end = AUX_CH_B,
2377
2378 .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_A,
2379 .ddi_io = POWER_DOMAIN_PORT_DDI_IO_A,
2380 .aux_io = POWER_DOMAIN_AUX_IO_A,
2381 .aux_legacy_usbc = POWER_DOMAIN_AUX_A,
2382 .aux_tbt = POWER_DOMAIN_INVALID,
2383 }, {
2384 .port_start = PORT_C,
2385 .port_end = PORT_F,
2386 .aux_ch_start = AUX_CH_C,
2387 .aux_ch_end = AUX_CH_F,
2388
2389 .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_C,
2390 .ddi_io = POWER_DOMAIN_PORT_DDI_IO_C,
2391 .aux_io = POWER_DOMAIN_AUX_IO_C,
2392 .aux_legacy_usbc = POWER_DOMAIN_AUX_C,
2393 .aux_tbt = POWER_DOMAIN_AUX_TBT1,
2394 },
2395 };
2396
2397 static const struct intel_ddi_port_domains
2398 d12_port_domains[] = {
2399 {
2400 .port_start = PORT_A,
2401 .port_end = PORT_C,
2402 .aux_ch_start = AUX_CH_A,
2403 .aux_ch_end = AUX_CH_C,
2404
2405 .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_A,
2406 .ddi_io = POWER_DOMAIN_PORT_DDI_IO_A,
2407 .aux_io = POWER_DOMAIN_AUX_IO_A,
2408 .aux_legacy_usbc = POWER_DOMAIN_AUX_A,
2409 .aux_tbt = POWER_DOMAIN_INVALID,
2410 }, {
2411 .port_start = PORT_TC1,
2412 .port_end = PORT_TC6,
2413 .aux_ch_start = AUX_CH_USBC1,
2414 .aux_ch_end = AUX_CH_USBC6,
2415
2416 .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_TC1,
2417 .ddi_io = POWER_DOMAIN_PORT_DDI_IO_TC1,
2418 .aux_io = POWER_DOMAIN_INVALID,
2419 .aux_legacy_usbc = POWER_DOMAIN_AUX_USBC1,
2420 .aux_tbt = POWER_DOMAIN_AUX_TBT1,
2421 },
2422 };
2423
2424 static const struct intel_ddi_port_domains
2425 d13_port_domains[] = {
2426 {
2427 .port_start = PORT_A,
2428 .port_end = PORT_C,
2429 .aux_ch_start = AUX_CH_A,
2430 .aux_ch_end = AUX_CH_C,
2431
2432 .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_A,
2433 .ddi_io = POWER_DOMAIN_PORT_DDI_IO_A,
2434 .aux_io = POWER_DOMAIN_AUX_IO_A,
2435 .aux_legacy_usbc = POWER_DOMAIN_AUX_A,
2436 .aux_tbt = POWER_DOMAIN_INVALID,
2437 }, {
2438 .port_start = PORT_TC1,
2439 .port_end = PORT_TC4,
2440 .aux_ch_start = AUX_CH_USBC1,
2441 .aux_ch_end = AUX_CH_USBC4,
2442
2443 .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_TC1,
2444 .ddi_io = POWER_DOMAIN_PORT_DDI_IO_TC1,
2445 .aux_io = POWER_DOMAIN_INVALID,
2446 .aux_legacy_usbc = POWER_DOMAIN_AUX_USBC1,
2447 .aux_tbt = POWER_DOMAIN_AUX_TBT1,
2448 }, {
2449 .port_start = PORT_D_XELPD,
2450 .port_end = PORT_E_XELPD,
2451 .aux_ch_start = AUX_CH_D_XELPD,
2452 .aux_ch_end = AUX_CH_E_XELPD,
2453
2454 .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_D,
2455 .ddi_io = POWER_DOMAIN_PORT_DDI_IO_D,
2456 .aux_io = POWER_DOMAIN_AUX_IO_D,
2457 .aux_legacy_usbc = POWER_DOMAIN_AUX_D,
2458 .aux_tbt = POWER_DOMAIN_INVALID,
2459 },
2460 };
2461
2462 static void
intel_port_domains_for_platform(struct intel_display * display,const struct intel_ddi_port_domains ** domains,int * domains_size)2463 intel_port_domains_for_platform(struct intel_display *display,
2464 const struct intel_ddi_port_domains **domains,
2465 int *domains_size)
2466 {
2467 if (DISPLAY_VER(display) >= 13) {
2468 *domains = d13_port_domains;
2469 *domains_size = ARRAY_SIZE(d13_port_domains);
2470 } else if (DISPLAY_VER(display) >= 12) {
2471 *domains = d12_port_domains;
2472 *domains_size = ARRAY_SIZE(d12_port_domains);
2473 } else if (DISPLAY_VER(display) >= 11) {
2474 *domains = d11_port_domains;
2475 *domains_size = ARRAY_SIZE(d11_port_domains);
2476 } else {
2477 *domains = i9xx_port_domains;
2478 *domains_size = ARRAY_SIZE(i9xx_port_domains);
2479 }
2480 }
2481
2482 static const struct intel_ddi_port_domains *
intel_port_domains_for_port(struct intel_display * display,enum port port)2483 intel_port_domains_for_port(struct intel_display *display, enum port port)
2484 {
2485 const struct intel_ddi_port_domains *domains;
2486 int domains_size;
2487 int i;
2488
2489 intel_port_domains_for_platform(display, &domains, &domains_size);
2490 for (i = 0; i < domains_size; i++)
2491 if (port >= domains[i].port_start && port <= domains[i].port_end)
2492 return &domains[i];
2493
2494 return NULL;
2495 }
2496
2497 enum intel_display_power_domain
intel_display_power_ddi_io_domain(struct intel_display * display,enum port port)2498 intel_display_power_ddi_io_domain(struct intel_display *display, enum port port)
2499 {
2500 const struct intel_ddi_port_domains *domains = intel_port_domains_for_port(display, port);
2501
2502 if (drm_WARN_ON(display->drm, !domains || domains->ddi_io == POWER_DOMAIN_INVALID))
2503 return POWER_DOMAIN_PORT_DDI_IO_A;
2504
2505 return domains->ddi_io + (int)(port - domains->port_start);
2506 }
2507
2508 enum intel_display_power_domain
intel_display_power_ddi_lanes_domain(struct intel_display * display,enum port port)2509 intel_display_power_ddi_lanes_domain(struct intel_display *display, enum port port)
2510 {
2511 const struct intel_ddi_port_domains *domains = intel_port_domains_for_port(display, port);
2512
2513 if (drm_WARN_ON(display->drm, !domains || domains->ddi_lanes == POWER_DOMAIN_INVALID))
2514 return POWER_DOMAIN_PORT_DDI_LANES_A;
2515
2516 return domains->ddi_lanes + (int)(port - domains->port_start);
2517 }
2518
2519 static const struct intel_ddi_port_domains *
intel_port_domains_for_aux_ch(struct intel_display * display,enum aux_ch aux_ch)2520 intel_port_domains_for_aux_ch(struct intel_display *display, enum aux_ch aux_ch)
2521 {
2522 const struct intel_ddi_port_domains *domains;
2523 int domains_size;
2524 int i;
2525
2526 intel_port_domains_for_platform(display, &domains, &domains_size);
2527 for (i = 0; i < domains_size; i++)
2528 if (aux_ch >= domains[i].aux_ch_start && aux_ch <= domains[i].aux_ch_end)
2529 return &domains[i];
2530
2531 return NULL;
2532 }
2533
2534 enum intel_display_power_domain
intel_display_power_aux_io_domain(struct intel_display * display,enum aux_ch aux_ch)2535 intel_display_power_aux_io_domain(struct intel_display *display, enum aux_ch aux_ch)
2536 {
2537 const struct intel_ddi_port_domains *domains = intel_port_domains_for_aux_ch(display, aux_ch);
2538
2539 if (drm_WARN_ON(display->drm, !domains || domains->aux_io == POWER_DOMAIN_INVALID))
2540 return POWER_DOMAIN_AUX_IO_A;
2541
2542 return domains->aux_io + (int)(aux_ch - domains->aux_ch_start);
2543 }
2544
2545 enum intel_display_power_domain
intel_display_power_legacy_aux_domain(struct intel_display * display,enum aux_ch aux_ch)2546 intel_display_power_legacy_aux_domain(struct intel_display *display, enum aux_ch aux_ch)
2547 {
2548 const struct intel_ddi_port_domains *domains = intel_port_domains_for_aux_ch(display, aux_ch);
2549
2550 if (drm_WARN_ON(display->drm, !domains || domains->aux_legacy_usbc == POWER_DOMAIN_INVALID))
2551 return POWER_DOMAIN_AUX_A;
2552
2553 return domains->aux_legacy_usbc + (int)(aux_ch - domains->aux_ch_start);
2554 }
2555
2556 enum intel_display_power_domain
intel_display_power_tbt_aux_domain(struct intel_display * display,enum aux_ch aux_ch)2557 intel_display_power_tbt_aux_domain(struct intel_display *display, enum aux_ch aux_ch)
2558 {
2559 const struct intel_ddi_port_domains *domains = intel_port_domains_for_aux_ch(display, aux_ch);
2560
2561 if (drm_WARN_ON(display->drm, !domains || domains->aux_tbt == POWER_DOMAIN_INVALID))
2562 return POWER_DOMAIN_AUX_TBT1;
2563
2564 return domains->aux_tbt + (int)(aux_ch - domains->aux_ch_start);
2565 }
2566