1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2020 Intel Corporation
4 */
5
6 #include <linux/debugfs.h>
7
8 #include "g4x_dp.h"
9 #include "i915_drv.h"
10 #include "i915_reg.h"
11 #include "intel_de.h"
12 #include "intel_display_power_well.h"
13 #include "intel_display_types.h"
14 #include "intel_dp.h"
15 #include "intel_dpio_phy.h"
16 #include "intel_dpll.h"
17 #include "intel_lvds.h"
18 #include "intel_lvds_regs.h"
19 #include "intel_pps.h"
20 #include "intel_pps_regs.h"
21 #include "intel_quirks.h"
22
23 static void vlv_steal_power_sequencer(struct intel_display *display,
24 enum pipe pipe);
25
26 static void pps_init_delays(struct intel_dp *intel_dp);
27 static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd);
28
pps_name(struct intel_dp * intel_dp)29 static const char *pps_name(struct intel_dp *intel_dp)
30 {
31 struct intel_display *display = to_intel_display(intel_dp);
32 struct intel_pps *pps = &intel_dp->pps;
33
34 if (display->platform.valleyview || display->platform.cherryview) {
35 switch (pps->vlv_pps_pipe) {
36 case INVALID_PIPE:
37 /*
38 * FIXME would be nice if we can guarantee
39 * to always have a valid PPS when calling this.
40 */
41 return "PPS <none>";
42 case PIPE_A:
43 return "PPS A";
44 case PIPE_B:
45 return "PPS B";
46 default:
47 MISSING_CASE(pps->vlv_pps_pipe);
48 break;
49 }
50 } else {
51 switch (pps->pps_idx) {
52 case 0:
53 return "PPS 0";
54 case 1:
55 return "PPS 1";
56 default:
57 MISSING_CASE(pps->pps_idx);
58 break;
59 }
60 }
61
62 return "PPS <invalid>";
63 }
64
intel_pps_lock(struct intel_dp * intel_dp)65 intel_wakeref_t intel_pps_lock(struct intel_dp *intel_dp)
66 {
67 struct intel_display *display = to_intel_display(intel_dp);
68 intel_wakeref_t wakeref;
69
70 /*
71 * See vlv_pps_reset_all() why we need a power domain reference here.
72 */
73 wakeref = intel_display_power_get(display, POWER_DOMAIN_DISPLAY_CORE);
74 mutex_lock(&display->pps.mutex);
75
76 return wakeref;
77 }
78
intel_pps_unlock(struct intel_dp * intel_dp,intel_wakeref_t wakeref)79 intel_wakeref_t intel_pps_unlock(struct intel_dp *intel_dp,
80 intel_wakeref_t wakeref)
81 {
82 struct intel_display *display = to_intel_display(intel_dp);
83
84 mutex_unlock(&display->pps.mutex);
85 intel_display_power_put(display, POWER_DOMAIN_DISPLAY_CORE, wakeref);
86
87 return NULL;
88 }
89
90 static void
vlv_power_sequencer_kick(struct intel_dp * intel_dp)91 vlv_power_sequencer_kick(struct intel_dp *intel_dp)
92 {
93 struct intel_display *display = to_intel_display(intel_dp);
94 struct drm_i915_private *dev_priv = to_i915(display->drm);
95 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
96 enum pipe pipe = intel_dp->pps.vlv_pps_pipe;
97 bool pll_enabled, release_cl_override = false;
98 enum dpio_phy phy = vlv_pipe_to_phy(pipe);
99 enum dpio_channel ch = vlv_pipe_to_channel(pipe);
100 u32 DP;
101
102 if (drm_WARN(display->drm,
103 intel_de_read(display, intel_dp->output_reg) & DP_PORT_EN,
104 "skipping %s kick due to [ENCODER:%d:%s] being active\n",
105 pps_name(intel_dp),
106 dig_port->base.base.base.id, dig_port->base.base.name))
107 return;
108
109 drm_dbg_kms(display->drm,
110 "kicking %s for [ENCODER:%d:%s]\n",
111 pps_name(intel_dp),
112 dig_port->base.base.base.id, dig_port->base.base.name);
113
114 /* Preserve the BIOS-computed detected bit. This is
115 * supposed to be read-only.
116 */
117 DP = intel_de_read(display, intel_dp->output_reg) & DP_DETECTED;
118 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
119 DP |= DP_PORT_WIDTH(1);
120 DP |= DP_LINK_TRAIN_PAT_1;
121
122 if (display->platform.cherryview)
123 DP |= DP_PIPE_SEL_CHV(pipe);
124 else
125 DP |= DP_PIPE_SEL(pipe);
126
127 pll_enabled = intel_de_read(display, DPLL(display, pipe)) & DPLL_VCO_ENABLE;
128
129 /*
130 * The DPLL for the pipe must be enabled for this to work.
131 * So enable temporarily it if it's not already enabled.
132 */
133 if (!pll_enabled) {
134 release_cl_override = display->platform.cherryview &&
135 !chv_phy_powergate_ch(display, phy, ch, true);
136
137 if (vlv_force_pll_on(dev_priv, pipe, vlv_get_dpll(display))) {
138 drm_err(display->drm,
139 "Failed to force on PLL for pipe %c!\n",
140 pipe_name(pipe));
141 return;
142 }
143 }
144
145 /*
146 * Similar magic as in intel_dp_enable_port().
147 * We _must_ do this port enable + disable trick
148 * to make this power sequencer lock onto the port.
149 * Otherwise even VDD force bit won't work.
150 */
151 intel_de_write(display, intel_dp->output_reg, DP);
152 intel_de_posting_read(display, intel_dp->output_reg);
153
154 intel_de_write(display, intel_dp->output_reg, DP | DP_PORT_EN);
155 intel_de_posting_read(display, intel_dp->output_reg);
156
157 intel_de_write(display, intel_dp->output_reg, DP & ~DP_PORT_EN);
158 intel_de_posting_read(display, intel_dp->output_reg);
159
160 if (!pll_enabled) {
161 vlv_force_pll_off(dev_priv, pipe);
162
163 if (release_cl_override)
164 chv_phy_powergate_ch(display, phy, ch, false);
165 }
166 }
167
vlv_find_free_pps(struct intel_display * display)168 static enum pipe vlv_find_free_pps(struct intel_display *display)
169 {
170 struct intel_encoder *encoder;
171 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
172
173 /*
174 * We don't have power sequencer currently.
175 * Pick one that's not used by other ports.
176 */
177 for_each_intel_dp(display->drm, encoder) {
178 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
179
180 if (encoder->type == INTEL_OUTPUT_EDP) {
181 drm_WARN_ON(display->drm,
182 intel_dp->pps.vlv_active_pipe != INVALID_PIPE &&
183 intel_dp->pps.vlv_active_pipe !=
184 intel_dp->pps.vlv_pps_pipe);
185
186 if (intel_dp->pps.vlv_pps_pipe != INVALID_PIPE)
187 pipes &= ~(1 << intel_dp->pps.vlv_pps_pipe);
188 } else {
189 drm_WARN_ON(display->drm,
190 intel_dp->pps.vlv_pps_pipe != INVALID_PIPE);
191
192 if (intel_dp->pps.vlv_active_pipe != INVALID_PIPE)
193 pipes &= ~(1 << intel_dp->pps.vlv_active_pipe);
194 }
195 }
196
197 if (pipes == 0)
198 return INVALID_PIPE;
199
200 return ffs(pipes) - 1;
201 }
202
203 static enum pipe
vlv_power_sequencer_pipe(struct intel_dp * intel_dp)204 vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
205 {
206 struct intel_display *display = to_intel_display(intel_dp);
207 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
208 enum pipe pipe;
209
210 lockdep_assert_held(&display->pps.mutex);
211
212 /* We should never land here with regular DP ports */
213 drm_WARN_ON(display->drm, !intel_dp_is_edp(intel_dp));
214
215 drm_WARN_ON(display->drm, intel_dp->pps.vlv_active_pipe != INVALID_PIPE &&
216 intel_dp->pps.vlv_active_pipe != intel_dp->pps.vlv_pps_pipe);
217
218 if (intel_dp->pps.vlv_pps_pipe != INVALID_PIPE)
219 return intel_dp->pps.vlv_pps_pipe;
220
221 pipe = vlv_find_free_pps(display);
222
223 /*
224 * Didn't find one. This should not happen since there
225 * are two power sequencers and up to two eDP ports.
226 */
227 if (drm_WARN_ON(display->drm, pipe == INVALID_PIPE))
228 pipe = PIPE_A;
229
230 vlv_steal_power_sequencer(display, pipe);
231 intel_dp->pps.vlv_pps_pipe = pipe;
232
233 drm_dbg_kms(display->drm,
234 "picked %s for [ENCODER:%d:%s]\n",
235 pps_name(intel_dp),
236 dig_port->base.base.base.id, dig_port->base.base.name);
237
238 /* init power sequencer on this pipe and port */
239 pps_init_delays(intel_dp);
240 pps_init_registers(intel_dp, true);
241
242 /*
243 * Even vdd force doesn't work until we've made
244 * the power sequencer lock in on the port.
245 */
246 vlv_power_sequencer_kick(intel_dp);
247
248 return intel_dp->pps.vlv_pps_pipe;
249 }
250
251 static int
bxt_power_sequencer_idx(struct intel_dp * intel_dp)252 bxt_power_sequencer_idx(struct intel_dp *intel_dp)
253 {
254 struct intel_display *display = to_intel_display(intel_dp);
255 int pps_idx = intel_dp->pps.pps_idx;
256
257 lockdep_assert_held(&display->pps.mutex);
258
259 /* We should never land here with regular DP ports */
260 drm_WARN_ON(display->drm, !intel_dp_is_edp(intel_dp));
261
262 if (!intel_dp->pps.bxt_pps_reset)
263 return pps_idx;
264
265 intel_dp->pps.bxt_pps_reset = false;
266
267 /*
268 * Only the HW needs to be reprogrammed, the SW state is fixed and
269 * has been setup during connector init.
270 */
271 pps_init_registers(intel_dp, false);
272
273 return pps_idx;
274 }
275
276 typedef bool (*pps_check)(struct intel_display *display, int pps_idx);
277
pps_has_pp_on(struct intel_display * display,int pps_idx)278 static bool pps_has_pp_on(struct intel_display *display, int pps_idx)
279 {
280 return intel_de_read(display, PP_STATUS(display, pps_idx)) & PP_ON;
281 }
282
pps_has_vdd_on(struct intel_display * display,int pps_idx)283 static bool pps_has_vdd_on(struct intel_display *display, int pps_idx)
284 {
285 return intel_de_read(display, PP_CONTROL(display, pps_idx)) & EDP_FORCE_VDD;
286 }
287
pps_any(struct intel_display * display,int pps_idx)288 static bool pps_any(struct intel_display *display, int pps_idx)
289 {
290 return true;
291 }
292
293 static enum pipe
vlv_initial_pps_pipe(struct intel_display * display,enum port port,pps_check check)294 vlv_initial_pps_pipe(struct intel_display *display,
295 enum port port, pps_check check)
296 {
297 enum pipe pipe;
298
299 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
300 u32 port_sel = intel_de_read(display,
301 PP_ON_DELAYS(display, pipe)) &
302 PANEL_PORT_SELECT_MASK;
303
304 if (port_sel != PANEL_PORT_SELECT_VLV(port))
305 continue;
306
307 if (!check(display, pipe))
308 continue;
309
310 return pipe;
311 }
312
313 return INVALID_PIPE;
314 }
315
316 static void
vlv_initial_power_sequencer_setup(struct intel_dp * intel_dp)317 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
318 {
319 struct intel_display *display = to_intel_display(intel_dp);
320 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
321 enum port port = dig_port->base.port;
322
323 lockdep_assert_held(&display->pps.mutex);
324
325 /* try to find a pipe with this port selected */
326 /* first pick one where the panel is on */
327 intel_dp->pps.vlv_pps_pipe = vlv_initial_pps_pipe(display, port,
328 pps_has_pp_on);
329 /* didn't find one? pick one where vdd is on */
330 if (intel_dp->pps.vlv_pps_pipe == INVALID_PIPE)
331 intel_dp->pps.vlv_pps_pipe = vlv_initial_pps_pipe(display, port,
332 pps_has_vdd_on);
333 /* didn't find one? pick one with just the correct port */
334 if (intel_dp->pps.vlv_pps_pipe == INVALID_PIPE)
335 intel_dp->pps.vlv_pps_pipe = vlv_initial_pps_pipe(display, port,
336 pps_any);
337
338 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
339 if (intel_dp->pps.vlv_pps_pipe == INVALID_PIPE) {
340 drm_dbg_kms(display->drm,
341 "[ENCODER:%d:%s] no initial power sequencer\n",
342 dig_port->base.base.base.id, dig_port->base.base.name);
343 return;
344 }
345
346 drm_dbg_kms(display->drm,
347 "[ENCODER:%d:%s] initial power sequencer: %s\n",
348 dig_port->base.base.base.id, dig_port->base.base.name,
349 pps_name(intel_dp));
350 }
351
intel_num_pps(struct intel_display * display)352 static int intel_num_pps(struct intel_display *display)
353 {
354 struct drm_i915_private *i915 = to_i915(display->drm);
355
356 if (display->platform.valleyview || display->platform.cherryview)
357 return 2;
358
359 if (display->platform.geminilake || display->platform.broxton)
360 return 2;
361
362 if (INTEL_PCH_TYPE(i915) >= PCH_MTL)
363 return 2;
364
365 if (INTEL_PCH_TYPE(i915) >= PCH_DG1)
366 return 1;
367
368 if (INTEL_PCH_TYPE(i915) >= PCH_ICP)
369 return 2;
370
371 return 1;
372 }
373
intel_pps_is_valid(struct intel_dp * intel_dp)374 static bool intel_pps_is_valid(struct intel_dp *intel_dp)
375 {
376 struct intel_display *display = to_intel_display(intel_dp);
377 struct drm_i915_private *i915 = to_i915(display->drm);
378
379 if (intel_dp->pps.pps_idx == 1 &&
380 INTEL_PCH_TYPE(i915) >= PCH_ICP &&
381 INTEL_PCH_TYPE(i915) <= PCH_ADP)
382 return intel_de_read(display, SOUTH_CHICKEN1) & ICP_SECOND_PPS_IO_SELECT;
383
384 return true;
385 }
386
387 static int
bxt_initial_pps_idx(struct intel_display * display,pps_check check)388 bxt_initial_pps_idx(struct intel_display *display, pps_check check)
389 {
390 int pps_idx, pps_num = intel_num_pps(display);
391
392 for (pps_idx = 0; pps_idx < pps_num; pps_idx++) {
393 if (check(display, pps_idx))
394 return pps_idx;
395 }
396
397 return -1;
398 }
399
400 static bool
pps_initial_setup(struct intel_dp * intel_dp)401 pps_initial_setup(struct intel_dp *intel_dp)
402 {
403 struct intel_display *display = to_intel_display(intel_dp);
404 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
405 struct intel_connector *connector = intel_dp->attached_connector;
406
407 lockdep_assert_held(&display->pps.mutex);
408
409 if (display->platform.valleyview || display->platform.cherryview) {
410 vlv_initial_power_sequencer_setup(intel_dp);
411 return true;
412 }
413
414 /* first ask the VBT */
415 if (intel_num_pps(display) > 1)
416 intel_dp->pps.pps_idx = connector->panel.vbt.backlight.controller;
417 else
418 intel_dp->pps.pps_idx = 0;
419
420 if (drm_WARN_ON(display->drm, intel_dp->pps.pps_idx >= intel_num_pps(display)))
421 intel_dp->pps.pps_idx = -1;
422
423 /* VBT wasn't parsed yet? pick one where the panel is on */
424 if (intel_dp->pps.pps_idx < 0)
425 intel_dp->pps.pps_idx = bxt_initial_pps_idx(display, pps_has_pp_on);
426 /* didn't find one? pick one where vdd is on */
427 if (intel_dp->pps.pps_idx < 0)
428 intel_dp->pps.pps_idx = bxt_initial_pps_idx(display, pps_has_vdd_on);
429 /* didn't find one? pick any */
430 if (intel_dp->pps.pps_idx < 0) {
431 intel_dp->pps.pps_idx = bxt_initial_pps_idx(display, pps_any);
432
433 drm_dbg_kms(display->drm,
434 "[ENCODER:%d:%s] no initial power sequencer, assuming %s\n",
435 encoder->base.base.id, encoder->base.name,
436 pps_name(intel_dp));
437 } else {
438 drm_dbg_kms(display->drm,
439 "[ENCODER:%d:%s] initial power sequencer: %s\n",
440 encoder->base.base.id, encoder->base.name,
441 pps_name(intel_dp));
442 }
443
444 return intel_pps_is_valid(intel_dp);
445 }
446
vlv_pps_reset_all(struct intel_display * display)447 void vlv_pps_reset_all(struct intel_display *display)
448 {
449 struct intel_encoder *encoder;
450
451 if (!HAS_DISPLAY(display))
452 return;
453
454 /*
455 * We can't grab pps_mutex here due to deadlock with power_domain
456 * mutex when power_domain functions are called while holding pps_mutex.
457 * That also means that in order to use vlv_pps_pipe the code needs to
458 * hold both a power domain reference and pps_mutex, and the power domain
459 * reference get/put must be done while _not_ holding pps_mutex.
460 * pps_{lock,unlock}() do these steps in the correct order, so one
461 * should use them always.
462 */
463
464 for_each_intel_dp(display->drm, encoder) {
465 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
466
467 drm_WARN_ON(display->drm, intel_dp->pps.vlv_active_pipe != INVALID_PIPE);
468
469 if (encoder->type == INTEL_OUTPUT_EDP)
470 intel_dp->pps.vlv_pps_pipe = INVALID_PIPE;
471 }
472 }
473
bxt_pps_reset_all(struct intel_display * display)474 void bxt_pps_reset_all(struct intel_display *display)
475 {
476 struct intel_encoder *encoder;
477
478 if (!HAS_DISPLAY(display))
479 return;
480
481 /* See vlv_pps_reset_all() for why we can't grab pps_mutex here. */
482
483 for_each_intel_dp(display->drm, encoder) {
484 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
485
486 if (encoder->type == INTEL_OUTPUT_EDP)
487 intel_dp->pps.bxt_pps_reset = true;
488 }
489 }
490
491 struct pps_registers {
492 i915_reg_t pp_ctrl;
493 i915_reg_t pp_stat;
494 i915_reg_t pp_on;
495 i915_reg_t pp_off;
496 i915_reg_t pp_div;
497 };
498
intel_pps_get_registers(struct intel_dp * intel_dp,struct pps_registers * regs)499 static void intel_pps_get_registers(struct intel_dp *intel_dp,
500 struct pps_registers *regs)
501 {
502 struct intel_display *display = to_intel_display(intel_dp);
503 struct drm_i915_private *dev_priv = to_i915(display->drm);
504 int pps_idx;
505
506 memset(regs, 0, sizeof(*regs));
507
508 if (display->platform.valleyview || display->platform.cherryview)
509 pps_idx = vlv_power_sequencer_pipe(intel_dp);
510 else if (display->platform.geminilake || display->platform.broxton)
511 pps_idx = bxt_power_sequencer_idx(intel_dp);
512 else
513 pps_idx = intel_dp->pps.pps_idx;
514
515 regs->pp_ctrl = PP_CONTROL(display, pps_idx);
516 regs->pp_stat = PP_STATUS(display, pps_idx);
517 regs->pp_on = PP_ON_DELAYS(display, pps_idx);
518 regs->pp_off = PP_OFF_DELAYS(display, pps_idx);
519
520 /* Cycle delay moved from PP_DIVISOR to PP_CONTROL */
521 if (display->platform.geminilake || display->platform.broxton ||
522 INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
523 regs->pp_div = INVALID_MMIO_REG;
524 else
525 regs->pp_div = PP_DIVISOR(display, pps_idx);
526 }
527
528 static i915_reg_t
_pp_ctrl_reg(struct intel_dp * intel_dp)529 _pp_ctrl_reg(struct intel_dp *intel_dp)
530 {
531 struct pps_registers regs;
532
533 intel_pps_get_registers(intel_dp, ®s);
534
535 return regs.pp_ctrl;
536 }
537
538 static i915_reg_t
_pp_stat_reg(struct intel_dp * intel_dp)539 _pp_stat_reg(struct intel_dp *intel_dp)
540 {
541 struct pps_registers regs;
542
543 intel_pps_get_registers(intel_dp, ®s);
544
545 return regs.pp_stat;
546 }
547
edp_have_panel_power(struct intel_dp * intel_dp)548 static bool edp_have_panel_power(struct intel_dp *intel_dp)
549 {
550 struct intel_display *display = to_intel_display(intel_dp);
551
552 lockdep_assert_held(&display->pps.mutex);
553
554 if ((display->platform.valleyview || display->platform.cherryview) &&
555 intel_dp->pps.vlv_pps_pipe == INVALID_PIPE)
556 return false;
557
558 return (intel_de_read(display, _pp_stat_reg(intel_dp)) & PP_ON) != 0;
559 }
560
edp_have_panel_vdd(struct intel_dp * intel_dp)561 static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
562 {
563 struct intel_display *display = to_intel_display(intel_dp);
564
565 lockdep_assert_held(&display->pps.mutex);
566
567 if ((display->platform.valleyview || display->platform.cherryview) &&
568 intel_dp->pps.vlv_pps_pipe == INVALID_PIPE)
569 return false;
570
571 return intel_de_read(display, _pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
572 }
573
intel_pps_check_power_unlocked(struct intel_dp * intel_dp)574 void intel_pps_check_power_unlocked(struct intel_dp *intel_dp)
575 {
576 struct intel_display *display = to_intel_display(intel_dp);
577 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
578
579 if (!intel_dp_is_edp(intel_dp))
580 return;
581
582 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
583 drm_WARN(display->drm, 1,
584 "[ENCODER:%d:%s] %s powered off while attempting AUX CH communication.\n",
585 dig_port->base.base.base.id, dig_port->base.base.name,
586 pps_name(intel_dp));
587 drm_dbg_kms(display->drm,
588 "[ENCODER:%d:%s] %s PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
589 dig_port->base.base.base.id, dig_port->base.base.name,
590 pps_name(intel_dp),
591 intel_de_read(display, _pp_stat_reg(intel_dp)),
592 intel_de_read(display, _pp_ctrl_reg(intel_dp)));
593 }
594 }
595
596 #define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
597 #define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
598
599 #define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
600 #define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
601
602 #define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
603 #define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
604
605 static void intel_pps_verify_state(struct intel_dp *intel_dp);
606
wait_panel_status(struct intel_dp * intel_dp,u32 mask,u32 value)607 static void wait_panel_status(struct intel_dp *intel_dp,
608 u32 mask, u32 value)
609 {
610 struct intel_display *display = to_intel_display(intel_dp);
611 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
612 i915_reg_t pp_stat_reg, pp_ctrl_reg;
613
614 lockdep_assert_held(&display->pps.mutex);
615
616 intel_pps_verify_state(intel_dp);
617
618 pp_stat_reg = _pp_stat_reg(intel_dp);
619 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
620
621 drm_dbg_kms(display->drm,
622 "[ENCODER:%d:%s] %s mask: 0x%08x value: 0x%08x PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
623 dig_port->base.base.base.id, dig_port->base.base.name,
624 pps_name(intel_dp),
625 mask, value,
626 intel_de_read(display, pp_stat_reg),
627 intel_de_read(display, pp_ctrl_reg));
628
629 if (intel_de_wait(display, pp_stat_reg, mask, value, 5000))
630 drm_err(display->drm,
631 "[ENCODER:%d:%s] %s panel status timeout: PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
632 dig_port->base.base.base.id, dig_port->base.base.name,
633 pps_name(intel_dp),
634 intel_de_read(display, pp_stat_reg),
635 intel_de_read(display, pp_ctrl_reg));
636
637 drm_dbg_kms(display->drm, "Wait complete\n");
638 }
639
wait_panel_on(struct intel_dp * intel_dp)640 static void wait_panel_on(struct intel_dp *intel_dp)
641 {
642 struct intel_display *display = to_intel_display(intel_dp);
643 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
644
645 drm_dbg_kms(display->drm,
646 "[ENCODER:%d:%s] %s wait for panel power on\n",
647 dig_port->base.base.base.id, dig_port->base.base.name,
648 pps_name(intel_dp));
649 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
650 }
651
wait_panel_off(struct intel_dp * intel_dp)652 static void wait_panel_off(struct intel_dp *intel_dp)
653 {
654 struct intel_display *display = to_intel_display(intel_dp);
655 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
656
657 drm_dbg_kms(display->drm,
658 "[ENCODER:%d:%s] %s wait for panel power off time\n",
659 dig_port->base.base.base.id, dig_port->base.base.name,
660 pps_name(intel_dp));
661 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
662 }
663
wait_panel_power_cycle(struct intel_dp * intel_dp)664 static void wait_panel_power_cycle(struct intel_dp *intel_dp)
665 {
666 struct intel_display *display = to_intel_display(intel_dp);
667 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
668 ktime_t panel_power_on_time;
669 s64 panel_power_off_duration, remaining;
670
671 /* take the difference of current time and panel power off time
672 * and then make panel wait for power_cycle if needed. */
673 panel_power_on_time = ktime_get_boottime();
674 panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->pps.panel_power_off_time);
675
676 remaining = max(0, intel_dp->pps.panel_power_cycle_delay - panel_power_off_duration);
677
678 drm_dbg_kms(display->drm,
679 "[ENCODER:%d:%s] %s wait for panel power cycle (%lld ms remaining)\n",
680 dig_port->base.base.base.id, dig_port->base.base.name,
681 pps_name(intel_dp), remaining);
682
683 /* When we disable the VDD override bit last we have to do the manual
684 * wait. */
685 if (remaining)
686 wait_remaining_ms_from_jiffies(jiffies, remaining);
687
688 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
689 }
690
intel_pps_wait_power_cycle(struct intel_dp * intel_dp)691 void intel_pps_wait_power_cycle(struct intel_dp *intel_dp)
692 {
693 intel_wakeref_t wakeref;
694
695 if (!intel_dp_is_edp(intel_dp))
696 return;
697
698 with_intel_pps_lock(intel_dp, wakeref)
699 wait_panel_power_cycle(intel_dp);
700 }
701
wait_backlight_on(struct intel_dp * intel_dp)702 static void wait_backlight_on(struct intel_dp *intel_dp)
703 {
704 wait_remaining_ms_from_jiffies(intel_dp->pps.last_power_on,
705 intel_dp->pps.backlight_on_delay);
706 }
707
edp_wait_backlight_off(struct intel_dp * intel_dp)708 static void edp_wait_backlight_off(struct intel_dp *intel_dp)
709 {
710 wait_remaining_ms_from_jiffies(intel_dp->pps.last_backlight_off,
711 intel_dp->pps.backlight_off_delay);
712 }
713
714 /* Read the current pp_control value, unlocking the register if it
715 * is locked
716 */
717
ilk_get_pp_control(struct intel_dp * intel_dp)718 static u32 ilk_get_pp_control(struct intel_dp *intel_dp)
719 {
720 struct intel_display *display = to_intel_display(intel_dp);
721 u32 control;
722
723 lockdep_assert_held(&display->pps.mutex);
724
725 control = intel_de_read(display, _pp_ctrl_reg(intel_dp));
726 if (drm_WARN_ON(display->drm, !HAS_DDI(display) &&
727 (control & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS)) {
728 control &= ~PANEL_UNLOCK_MASK;
729 control |= PANEL_UNLOCK_REGS;
730 }
731 return control;
732 }
733
734 /*
735 * Must be paired with intel_pps_vdd_off_unlocked().
736 * Must hold pps_mutex around the whole on/off sequence.
737 * Can be nested with intel_pps_vdd_{on,off}() calls.
738 */
intel_pps_vdd_on_unlocked(struct intel_dp * intel_dp)739 bool intel_pps_vdd_on_unlocked(struct intel_dp *intel_dp)
740 {
741 struct intel_display *display = to_intel_display(intel_dp);
742 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
743 u32 pp;
744 i915_reg_t pp_stat_reg, pp_ctrl_reg;
745 bool need_to_disable = !intel_dp->pps.want_panel_vdd;
746
747 lockdep_assert_held(&display->pps.mutex);
748
749 if (!intel_dp_is_edp(intel_dp))
750 return false;
751
752 cancel_delayed_work(&intel_dp->pps.panel_vdd_work);
753 intel_dp->pps.want_panel_vdd = true;
754
755 if (edp_have_panel_vdd(intel_dp))
756 return need_to_disable;
757
758 drm_WARN_ON(display->drm, intel_dp->pps.vdd_wakeref);
759 intel_dp->pps.vdd_wakeref = intel_display_power_get(display,
760 intel_aux_power_domain(dig_port));
761
762 pp_stat_reg = _pp_stat_reg(intel_dp);
763 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
764
765 drm_dbg_kms(display->drm, "[ENCODER:%d:%s] %s turning VDD on\n",
766 dig_port->base.base.base.id, dig_port->base.base.name,
767 pps_name(intel_dp));
768
769 if (!edp_have_panel_power(intel_dp))
770 wait_panel_power_cycle(intel_dp);
771
772 pp = ilk_get_pp_control(intel_dp);
773 pp |= EDP_FORCE_VDD;
774
775 intel_de_write(display, pp_ctrl_reg, pp);
776 intel_de_posting_read(display, pp_ctrl_reg);
777 drm_dbg_kms(display->drm,
778 "[ENCODER:%d:%s] %s PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
779 dig_port->base.base.base.id, dig_port->base.base.name,
780 pps_name(intel_dp),
781 intel_de_read(display, pp_stat_reg),
782 intel_de_read(display, pp_ctrl_reg));
783 /*
784 * If the panel wasn't on, delay before accessing aux channel
785 */
786 if (!edp_have_panel_power(intel_dp)) {
787 drm_dbg_kms(display->drm,
788 "[ENCODER:%d:%s] %s panel power wasn't enabled\n",
789 dig_port->base.base.base.id, dig_port->base.base.name,
790 pps_name(intel_dp));
791 msleep(intel_dp->pps.panel_power_up_delay);
792 }
793
794 return need_to_disable;
795 }
796
797 /*
798 * Must be paired with intel_pps_vdd_off() or - to disable
799 * both VDD and panel power - intel_pps_off().
800 * Nested calls to these functions are not allowed since
801 * we drop the lock. Caller must use some higher level
802 * locking to prevent nested calls from other threads.
803 */
intel_pps_vdd_on(struct intel_dp * intel_dp)804 void intel_pps_vdd_on(struct intel_dp *intel_dp)
805 {
806 struct intel_display *display = to_intel_display(intel_dp);
807 intel_wakeref_t wakeref;
808 bool vdd;
809
810 if (!intel_dp_is_edp(intel_dp))
811 return;
812
813 vdd = false;
814 with_intel_pps_lock(intel_dp, wakeref)
815 vdd = intel_pps_vdd_on_unlocked(intel_dp);
816 INTEL_DISPLAY_STATE_WARN(display, !vdd, "[ENCODER:%d:%s] %s VDD already requested on\n",
817 dp_to_dig_port(intel_dp)->base.base.base.id,
818 dp_to_dig_port(intel_dp)->base.base.name,
819 pps_name(intel_dp));
820 }
821
intel_pps_vdd_off_sync_unlocked(struct intel_dp * intel_dp)822 static void intel_pps_vdd_off_sync_unlocked(struct intel_dp *intel_dp)
823 {
824 struct intel_display *display = to_intel_display(intel_dp);
825 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
826 u32 pp;
827 i915_reg_t pp_stat_reg, pp_ctrl_reg;
828
829 lockdep_assert_held(&display->pps.mutex);
830
831 drm_WARN_ON(display->drm, intel_dp->pps.want_panel_vdd);
832
833 if (!edp_have_panel_vdd(intel_dp))
834 return;
835
836 drm_dbg_kms(display->drm, "[ENCODER:%d:%s] %s turning VDD off\n",
837 dig_port->base.base.base.id, dig_port->base.base.name,
838 pps_name(intel_dp));
839
840 pp = ilk_get_pp_control(intel_dp);
841 pp &= ~EDP_FORCE_VDD;
842
843 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
844 pp_stat_reg = _pp_stat_reg(intel_dp);
845
846 intel_de_write(display, pp_ctrl_reg, pp);
847 intel_de_posting_read(display, pp_ctrl_reg);
848
849 /* Make sure sequencer is idle before allowing subsequent activity */
850 drm_dbg_kms(display->drm,
851 "[ENCODER:%d:%s] %s PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
852 dig_port->base.base.base.id, dig_port->base.base.name,
853 pps_name(intel_dp),
854 intel_de_read(display, pp_stat_reg),
855 intel_de_read(display, pp_ctrl_reg));
856
857 if ((pp & PANEL_POWER_ON) == 0) {
858 intel_dp->pps.panel_power_off_time = ktime_get_boottime();
859 intel_dp_invalidate_source_oui(intel_dp);
860 }
861
862 intel_display_power_put(display,
863 intel_aux_power_domain(dig_port),
864 fetch_and_zero(&intel_dp->pps.vdd_wakeref));
865 }
866
intel_pps_vdd_off_sync(struct intel_dp * intel_dp)867 void intel_pps_vdd_off_sync(struct intel_dp *intel_dp)
868 {
869 intel_wakeref_t wakeref;
870
871 if (!intel_dp_is_edp(intel_dp))
872 return;
873
874 cancel_delayed_work_sync(&intel_dp->pps.panel_vdd_work);
875 /*
876 * vdd might still be enabled due to the delayed vdd off.
877 * Make sure vdd is actually turned off here.
878 */
879 with_intel_pps_lock(intel_dp, wakeref)
880 intel_pps_vdd_off_sync_unlocked(intel_dp);
881 }
882
edp_panel_vdd_work(struct work_struct * __work)883 static void edp_panel_vdd_work(struct work_struct *__work)
884 {
885 struct intel_pps *pps = container_of(to_delayed_work(__work),
886 struct intel_pps, panel_vdd_work);
887 struct intel_dp *intel_dp = container_of(pps, struct intel_dp, pps);
888 intel_wakeref_t wakeref;
889
890 with_intel_pps_lock(intel_dp, wakeref) {
891 if (!intel_dp->pps.want_panel_vdd)
892 intel_pps_vdd_off_sync_unlocked(intel_dp);
893 }
894 }
895
edp_panel_vdd_schedule_off(struct intel_dp * intel_dp)896 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
897 {
898 struct intel_display *display = to_intel_display(intel_dp);
899 struct drm_i915_private *i915 = to_i915(display->drm);
900 unsigned long delay;
901
902 /*
903 * We may not yet know the real power sequencing delays,
904 * so keep VDD enabled until we're done with init.
905 */
906 if (intel_dp->pps.initializing)
907 return;
908
909 /*
910 * Queue the timer to fire a long time from now (relative to the power
911 * down delay) to keep the panel power up across a sequence of
912 * operations.
913 */
914 delay = msecs_to_jiffies(intel_dp->pps.panel_power_cycle_delay * 5);
915 queue_delayed_work(i915->unordered_wq,
916 &intel_dp->pps.panel_vdd_work, delay);
917 }
918
919 /*
920 * Must be paired with edp_panel_vdd_on().
921 * Must hold pps_mutex around the whole on/off sequence.
922 * Can be nested with intel_pps_vdd_{on,off}() calls.
923 */
intel_pps_vdd_off_unlocked(struct intel_dp * intel_dp,bool sync)924 void intel_pps_vdd_off_unlocked(struct intel_dp *intel_dp, bool sync)
925 {
926 struct intel_display *display = to_intel_display(intel_dp);
927
928 lockdep_assert_held(&display->pps.mutex);
929
930 if (!intel_dp_is_edp(intel_dp))
931 return;
932
933 INTEL_DISPLAY_STATE_WARN(display, !intel_dp->pps.want_panel_vdd,
934 "[ENCODER:%d:%s] %s VDD not forced on",
935 dp_to_dig_port(intel_dp)->base.base.base.id,
936 dp_to_dig_port(intel_dp)->base.base.name,
937 pps_name(intel_dp));
938
939 intel_dp->pps.want_panel_vdd = false;
940
941 if (sync)
942 intel_pps_vdd_off_sync_unlocked(intel_dp);
943 else
944 edp_panel_vdd_schedule_off(intel_dp);
945 }
946
intel_pps_vdd_off(struct intel_dp * intel_dp)947 void intel_pps_vdd_off(struct intel_dp *intel_dp)
948 {
949 intel_wakeref_t wakeref;
950
951 if (!intel_dp_is_edp(intel_dp))
952 return;
953
954 with_intel_pps_lock(intel_dp, wakeref)
955 intel_pps_vdd_off_unlocked(intel_dp, false);
956 }
957
intel_pps_on_unlocked(struct intel_dp * intel_dp)958 void intel_pps_on_unlocked(struct intel_dp *intel_dp)
959 {
960 struct intel_display *display = to_intel_display(intel_dp);
961 u32 pp;
962 i915_reg_t pp_ctrl_reg;
963
964 lockdep_assert_held(&display->pps.mutex);
965
966 if (!intel_dp_is_edp(intel_dp))
967 return;
968
969 drm_dbg_kms(display->drm, "[ENCODER:%d:%s] %s turn panel power on\n",
970 dp_to_dig_port(intel_dp)->base.base.base.id,
971 dp_to_dig_port(intel_dp)->base.base.name,
972 pps_name(intel_dp));
973
974 if (drm_WARN(display->drm, edp_have_panel_power(intel_dp),
975 "[ENCODER:%d:%s] %s panel power already on\n",
976 dp_to_dig_port(intel_dp)->base.base.base.id,
977 dp_to_dig_port(intel_dp)->base.base.name,
978 pps_name(intel_dp)))
979 return;
980
981 wait_panel_power_cycle(intel_dp);
982
983 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
984 pp = ilk_get_pp_control(intel_dp);
985 if (display->platform.ironlake) {
986 /* ILK workaround: disable reset around power sequence */
987 pp &= ~PANEL_POWER_RESET;
988 intel_de_write(display, pp_ctrl_reg, pp);
989 intel_de_posting_read(display, pp_ctrl_reg);
990 }
991
992 /*
993 * WA: 22019252566
994 * Disable DPLS gating around power sequence.
995 */
996 if (IS_DISPLAY_VER(display, 13, 14))
997 intel_de_rmw(display, SOUTH_DSPCLK_GATE_D,
998 0, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
999
1000 pp |= PANEL_POWER_ON;
1001 if (!display->platform.ironlake)
1002 pp |= PANEL_POWER_RESET;
1003
1004 intel_de_write(display, pp_ctrl_reg, pp);
1005 intel_de_posting_read(display, pp_ctrl_reg);
1006
1007 wait_panel_on(intel_dp);
1008 intel_dp->pps.last_power_on = jiffies;
1009
1010 if (IS_DISPLAY_VER(display, 13, 14))
1011 intel_de_rmw(display, SOUTH_DSPCLK_GATE_D,
1012 PCH_DPLSUNIT_CLOCK_GATE_DISABLE, 0);
1013
1014 if (display->platform.ironlake) {
1015 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
1016 intel_de_write(display, pp_ctrl_reg, pp);
1017 intel_de_posting_read(display, pp_ctrl_reg);
1018 }
1019 }
1020
intel_pps_on(struct intel_dp * intel_dp)1021 void intel_pps_on(struct intel_dp *intel_dp)
1022 {
1023 intel_wakeref_t wakeref;
1024
1025 if (!intel_dp_is_edp(intel_dp))
1026 return;
1027
1028 with_intel_pps_lock(intel_dp, wakeref)
1029 intel_pps_on_unlocked(intel_dp);
1030 }
1031
intel_pps_off_unlocked(struct intel_dp * intel_dp)1032 void intel_pps_off_unlocked(struct intel_dp *intel_dp)
1033 {
1034 struct intel_display *display = to_intel_display(intel_dp);
1035 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1036 u32 pp;
1037 i915_reg_t pp_ctrl_reg;
1038
1039 lockdep_assert_held(&display->pps.mutex);
1040
1041 if (!intel_dp_is_edp(intel_dp))
1042 return;
1043
1044 drm_dbg_kms(display->drm, "[ENCODER:%d:%s] %s turn panel power off\n",
1045 dig_port->base.base.base.id, dig_port->base.base.name,
1046 pps_name(intel_dp));
1047
1048 drm_WARN(display->drm, !intel_dp->pps.want_panel_vdd,
1049 "[ENCODER:%d:%s] %s need VDD to turn off panel\n",
1050 dig_port->base.base.base.id, dig_port->base.base.name,
1051 pps_name(intel_dp));
1052
1053 pp = ilk_get_pp_control(intel_dp);
1054 /* We need to switch off panel power _and_ force vdd, for otherwise some
1055 * panels get very unhappy and cease to work. */
1056 pp &= ~(PANEL_POWER_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
1057 EDP_BLC_ENABLE);
1058
1059 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1060
1061 intel_dp->pps.want_panel_vdd = false;
1062
1063 intel_de_write(display, pp_ctrl_reg, pp);
1064 intel_de_posting_read(display, pp_ctrl_reg);
1065
1066 wait_panel_off(intel_dp);
1067 intel_dp->pps.panel_power_off_time = ktime_get_boottime();
1068
1069 intel_dp_invalidate_source_oui(intel_dp);
1070
1071 /* We got a reference when we enabled the VDD. */
1072 intel_display_power_put(display,
1073 intel_aux_power_domain(dig_port),
1074 fetch_and_zero(&intel_dp->pps.vdd_wakeref));
1075 }
1076
intel_pps_off(struct intel_dp * intel_dp)1077 void intel_pps_off(struct intel_dp *intel_dp)
1078 {
1079 intel_wakeref_t wakeref;
1080
1081 if (!intel_dp_is_edp(intel_dp))
1082 return;
1083
1084 with_intel_pps_lock(intel_dp, wakeref)
1085 intel_pps_off_unlocked(intel_dp);
1086 }
1087
1088 /* Enable backlight in the panel power control. */
intel_pps_backlight_on(struct intel_dp * intel_dp)1089 void intel_pps_backlight_on(struct intel_dp *intel_dp)
1090 {
1091 struct intel_display *display = to_intel_display(intel_dp);
1092 intel_wakeref_t wakeref;
1093
1094 /*
1095 * If we enable the backlight right away following a panel power
1096 * on, we may see slight flicker as the panel syncs with the eDP
1097 * link. So delay a bit to make sure the image is solid before
1098 * allowing it to appear.
1099 */
1100 wait_backlight_on(intel_dp);
1101
1102 with_intel_pps_lock(intel_dp, wakeref) {
1103 i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1104 u32 pp;
1105
1106 pp = ilk_get_pp_control(intel_dp);
1107 pp |= EDP_BLC_ENABLE;
1108
1109 intel_de_write(display, pp_ctrl_reg, pp);
1110 intel_de_posting_read(display, pp_ctrl_reg);
1111 }
1112 }
1113
1114 /* Disable backlight in the panel power control. */
intel_pps_backlight_off(struct intel_dp * intel_dp)1115 void intel_pps_backlight_off(struct intel_dp *intel_dp)
1116 {
1117 struct intel_display *display = to_intel_display(intel_dp);
1118 intel_wakeref_t wakeref;
1119
1120 if (!intel_dp_is_edp(intel_dp))
1121 return;
1122
1123 with_intel_pps_lock(intel_dp, wakeref) {
1124 i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1125 u32 pp;
1126
1127 pp = ilk_get_pp_control(intel_dp);
1128 pp &= ~EDP_BLC_ENABLE;
1129
1130 intel_de_write(display, pp_ctrl_reg, pp);
1131 intel_de_posting_read(display, pp_ctrl_reg);
1132 }
1133
1134 intel_dp->pps.last_backlight_off = jiffies;
1135 edp_wait_backlight_off(intel_dp);
1136 }
1137
1138 /*
1139 * Hook for controlling the panel power control backlight through the bl_power
1140 * sysfs attribute. Take care to handle multiple calls.
1141 */
intel_pps_backlight_power(struct intel_connector * connector,bool enable)1142 void intel_pps_backlight_power(struct intel_connector *connector, bool enable)
1143 {
1144 struct intel_display *display = to_intel_display(connector);
1145 struct intel_dp *intel_dp = intel_attached_dp(connector);
1146 intel_wakeref_t wakeref;
1147 bool is_enabled;
1148
1149 is_enabled = false;
1150 with_intel_pps_lock(intel_dp, wakeref)
1151 is_enabled = ilk_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
1152 if (is_enabled == enable)
1153 return;
1154
1155 drm_dbg_kms(display->drm, "panel power control backlight %s\n",
1156 str_enable_disable(enable));
1157
1158 if (enable)
1159 intel_pps_backlight_on(intel_dp);
1160 else
1161 intel_pps_backlight_off(intel_dp);
1162 }
1163
vlv_detach_power_sequencer(struct intel_dp * intel_dp)1164 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
1165 {
1166 struct intel_display *display = to_intel_display(intel_dp);
1167 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1168 enum pipe pipe = intel_dp->pps.vlv_pps_pipe;
1169 i915_reg_t pp_on_reg = PP_ON_DELAYS(display, pipe);
1170
1171 drm_WARN_ON(display->drm, intel_dp->pps.vlv_active_pipe != INVALID_PIPE);
1172
1173 if (drm_WARN_ON(display->drm, pipe != PIPE_A && pipe != PIPE_B))
1174 return;
1175
1176 intel_pps_vdd_off_sync_unlocked(intel_dp);
1177
1178 /*
1179 * VLV seems to get confused when multiple power sequencers
1180 * have the same port selected (even if only one has power/vdd
1181 * enabled). The failure manifests as vlv_wait_port_ready() failing
1182 * CHV on the other hand doesn't seem to mind having the same port
1183 * selected in multiple power sequencers, but let's clear the
1184 * port select always when logically disconnecting a power sequencer
1185 * from a port.
1186 */
1187 drm_dbg_kms(display->drm,
1188 "detaching %s from [ENCODER:%d:%s]\n",
1189 pps_name(intel_dp),
1190 dig_port->base.base.base.id, dig_port->base.base.name);
1191 intel_de_write(display, pp_on_reg, 0);
1192 intel_de_posting_read(display, pp_on_reg);
1193
1194 intel_dp->pps.vlv_pps_pipe = INVALID_PIPE;
1195 }
1196
vlv_steal_power_sequencer(struct intel_display * display,enum pipe pipe)1197 static void vlv_steal_power_sequencer(struct intel_display *display,
1198 enum pipe pipe)
1199 {
1200 struct intel_encoder *encoder;
1201
1202 lockdep_assert_held(&display->pps.mutex);
1203
1204 for_each_intel_dp(display->drm, encoder) {
1205 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1206
1207 drm_WARN(display->drm, intel_dp->pps.vlv_active_pipe == pipe,
1208 "stealing PPS %c from active [ENCODER:%d:%s]\n",
1209 pipe_name(pipe), encoder->base.base.id,
1210 encoder->base.name);
1211
1212 if (intel_dp->pps.vlv_pps_pipe != pipe)
1213 continue;
1214
1215 drm_dbg_kms(display->drm,
1216 "stealing PPS %c from [ENCODER:%d:%s]\n",
1217 pipe_name(pipe), encoder->base.base.id,
1218 encoder->base.name);
1219
1220 /* make sure vdd is off before we steal it */
1221 vlv_detach_power_sequencer(intel_dp);
1222 }
1223 }
1224
vlv_active_pipe(struct intel_dp * intel_dp)1225 static enum pipe vlv_active_pipe(struct intel_dp *intel_dp)
1226 {
1227 struct intel_display *display = to_intel_display(intel_dp);
1228 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
1229 enum pipe pipe;
1230
1231 if (g4x_dp_port_enabled(display, intel_dp->output_reg,
1232 encoder->port, &pipe))
1233 return pipe;
1234
1235 return INVALID_PIPE;
1236 }
1237
1238 /* Call on all DP, not just eDP */
vlv_pps_pipe_init(struct intel_dp * intel_dp)1239 void vlv_pps_pipe_init(struct intel_dp *intel_dp)
1240 {
1241 intel_dp->pps.vlv_pps_pipe = INVALID_PIPE;
1242 intel_dp->pps.vlv_active_pipe = vlv_active_pipe(intel_dp);
1243 }
1244
1245 /* Call on all DP, not just eDP */
vlv_pps_pipe_reset(struct intel_dp * intel_dp)1246 void vlv_pps_pipe_reset(struct intel_dp *intel_dp)
1247 {
1248 intel_wakeref_t wakeref;
1249
1250 with_intel_pps_lock(intel_dp, wakeref)
1251 intel_dp->pps.vlv_active_pipe = vlv_active_pipe(intel_dp);
1252 }
1253
vlv_pps_backlight_initial_pipe(struct intel_dp * intel_dp)1254 enum pipe vlv_pps_backlight_initial_pipe(struct intel_dp *intel_dp)
1255 {
1256 enum pipe pipe;
1257
1258 /*
1259 * Figure out the current pipe for the initial backlight setup. If the
1260 * current pipe isn't valid, try the PPS pipe, and if that fails just
1261 * assume pipe A.
1262 */
1263 pipe = vlv_active_pipe(intel_dp);
1264
1265 if (pipe != PIPE_A && pipe != PIPE_B)
1266 pipe = intel_dp->pps.vlv_pps_pipe;
1267
1268 if (pipe != PIPE_A && pipe != PIPE_B)
1269 pipe = PIPE_A;
1270
1271 return pipe;
1272 }
1273
1274 /* Call on all DP, not just eDP */
vlv_pps_port_enable_unlocked(struct intel_encoder * encoder,const struct intel_crtc_state * crtc_state)1275 void vlv_pps_port_enable_unlocked(struct intel_encoder *encoder,
1276 const struct intel_crtc_state *crtc_state)
1277 {
1278 struct intel_display *display = to_intel_display(encoder);
1279 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1280 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1281
1282 lockdep_assert_held(&display->pps.mutex);
1283
1284 drm_WARN_ON(display->drm, intel_dp->pps.vlv_active_pipe != INVALID_PIPE);
1285
1286 if (intel_dp->pps.vlv_pps_pipe != INVALID_PIPE &&
1287 intel_dp->pps.vlv_pps_pipe != crtc->pipe) {
1288 /*
1289 * If another power sequencer was being used on this
1290 * port previously make sure to turn off vdd there while
1291 * we still have control of it.
1292 */
1293 vlv_detach_power_sequencer(intel_dp);
1294 }
1295
1296 /*
1297 * We may be stealing the power
1298 * sequencer from another port.
1299 */
1300 vlv_steal_power_sequencer(display, crtc->pipe);
1301
1302 intel_dp->pps.vlv_active_pipe = crtc->pipe;
1303
1304 if (!intel_dp_is_edp(intel_dp))
1305 return;
1306
1307 /* now it's all ours */
1308 intel_dp->pps.vlv_pps_pipe = crtc->pipe;
1309
1310 drm_dbg_kms(display->drm,
1311 "initializing %s for [ENCODER:%d:%s]\n",
1312 pps_name(intel_dp),
1313 encoder->base.base.id, encoder->base.name);
1314
1315 /* init power sequencer on this pipe and port */
1316 pps_init_delays(intel_dp);
1317 pps_init_registers(intel_dp, true);
1318 }
1319
1320 /* Call on all DP, not just eDP */
vlv_pps_port_disable(struct intel_encoder * encoder,const struct intel_crtc_state * crtc_state)1321 void vlv_pps_port_disable(struct intel_encoder *encoder,
1322 const struct intel_crtc_state *crtc_state)
1323 {
1324 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1325
1326 intel_wakeref_t wakeref;
1327
1328 with_intel_pps_lock(intel_dp, wakeref)
1329 intel_dp->pps.vlv_active_pipe = INVALID_PIPE;
1330 }
1331
pps_vdd_init(struct intel_dp * intel_dp)1332 static void pps_vdd_init(struct intel_dp *intel_dp)
1333 {
1334 struct intel_display *display = to_intel_display(intel_dp);
1335 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1336
1337 lockdep_assert_held(&display->pps.mutex);
1338
1339 if (!edp_have_panel_vdd(intel_dp))
1340 return;
1341
1342 /*
1343 * The VDD bit needs a power domain reference, so if the bit is
1344 * already enabled when we boot or resume, grab this reference and
1345 * schedule a vdd off, so we don't hold on to the reference
1346 * indefinitely.
1347 */
1348 drm_dbg_kms(display->drm,
1349 "[ENCODER:%d:%s] %s VDD left on by BIOS, adjusting state tracking\n",
1350 dig_port->base.base.base.id, dig_port->base.base.name,
1351 pps_name(intel_dp));
1352 drm_WARN_ON(display->drm, intel_dp->pps.vdd_wakeref);
1353 intel_dp->pps.vdd_wakeref = intel_display_power_get(display,
1354 intel_aux_power_domain(dig_port));
1355 }
1356
intel_pps_have_panel_power_or_vdd(struct intel_dp * intel_dp)1357 bool intel_pps_have_panel_power_or_vdd(struct intel_dp *intel_dp)
1358 {
1359 intel_wakeref_t wakeref;
1360 bool have_power = false;
1361
1362 with_intel_pps_lock(intel_dp, wakeref) {
1363 have_power = edp_have_panel_power(intel_dp) ||
1364 edp_have_panel_vdd(intel_dp);
1365 }
1366
1367 return have_power;
1368 }
1369
pps_init_timestamps(struct intel_dp * intel_dp)1370 static void pps_init_timestamps(struct intel_dp *intel_dp)
1371 {
1372 /*
1373 * Initialize panel power off time to 0, assuming panel power could have
1374 * been toggled between kernel boot and now only by a previously loaded
1375 * and removed i915, which has already ensured sufficient power off
1376 * delay at module remove.
1377 */
1378 intel_dp->pps.panel_power_off_time = 0;
1379 intel_dp->pps.last_power_on = jiffies;
1380 intel_dp->pps.last_backlight_off = jiffies;
1381 }
1382
1383 static void
intel_pps_readout_hw_state(struct intel_dp * intel_dp,struct intel_pps_delays * seq)1384 intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct intel_pps_delays *seq)
1385 {
1386 struct intel_display *display = to_intel_display(intel_dp);
1387 u32 pp_on, pp_off, pp_ctl, power_cycle_delay;
1388 struct pps_registers regs;
1389
1390 intel_pps_get_registers(intel_dp, ®s);
1391
1392 pp_ctl = ilk_get_pp_control(intel_dp);
1393
1394 /* Ensure PPS is unlocked */
1395 if (!HAS_DDI(display))
1396 intel_de_write(display, regs.pp_ctrl, pp_ctl);
1397
1398 pp_on = intel_de_read(display, regs.pp_on);
1399 pp_off = intel_de_read(display, regs.pp_off);
1400
1401 /* Pull timing values out of registers */
1402 seq->power_up = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, pp_on);
1403 seq->backlight_on = REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK, pp_on);
1404 seq->backlight_off = REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK, pp_off);
1405 seq->power_down = REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK, pp_off);
1406
1407 if (i915_mmio_reg_valid(regs.pp_div)) {
1408 u32 pp_div;
1409
1410 pp_div = intel_de_read(display, regs.pp_div);
1411
1412 power_cycle_delay = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, pp_div);
1413 } else {
1414 power_cycle_delay = REG_FIELD_GET(BXT_POWER_CYCLE_DELAY_MASK, pp_ctl);
1415 }
1416
1417 /* hardware wants <delay>+1 in 100ms units */
1418 seq->power_cycle = power_cycle_delay ? (power_cycle_delay - 1) * 1000 : 0;
1419 }
1420
1421 static void
intel_pps_dump_state(struct intel_dp * intel_dp,const char * state_name,const struct intel_pps_delays * seq)1422 intel_pps_dump_state(struct intel_dp *intel_dp, const char *state_name,
1423 const struct intel_pps_delays *seq)
1424 {
1425 struct intel_display *display = to_intel_display(intel_dp);
1426
1427 drm_dbg_kms(display->drm,
1428 "%s power_up %d backlight_on %d backlight_off %d power_down %d power_cycle %d\n",
1429 state_name, seq->power_up, seq->backlight_on,
1430 seq->backlight_off, seq->power_down, seq->power_cycle);
1431 }
1432
1433 static void
intel_pps_verify_state(struct intel_dp * intel_dp)1434 intel_pps_verify_state(struct intel_dp *intel_dp)
1435 {
1436 struct intel_display *display = to_intel_display(intel_dp);
1437 struct intel_pps_delays hw;
1438 struct intel_pps_delays *sw = &intel_dp->pps.pps_delays;
1439
1440 intel_pps_readout_hw_state(intel_dp, &hw);
1441
1442 if (hw.power_up != sw->power_up ||
1443 hw.backlight_on != sw->backlight_on ||
1444 hw.backlight_off != sw->backlight_off ||
1445 hw.power_down != sw->power_down ||
1446 hw.power_cycle != sw->power_cycle) {
1447 drm_err(display->drm, "PPS state mismatch\n");
1448 intel_pps_dump_state(intel_dp, "sw", sw);
1449 intel_pps_dump_state(intel_dp, "hw", &hw);
1450 }
1451 }
1452
pps_delays_valid(struct intel_pps_delays * delays)1453 static bool pps_delays_valid(struct intel_pps_delays *delays)
1454 {
1455 return delays->power_up || delays->backlight_on || delays->backlight_off ||
1456 delays->power_down || delays->power_cycle;
1457 }
1458
msecs_to_pps_units(int msecs)1459 static int msecs_to_pps_units(int msecs)
1460 {
1461 /* PPS uses 100us units */
1462 return msecs * 10;
1463 }
1464
pps_units_to_msecs(int val)1465 static int pps_units_to_msecs(int val)
1466 {
1467 /* PPS uses 100us units */
1468 return DIV_ROUND_UP(val, 10);
1469 }
1470
pps_init_delays_bios(struct intel_dp * intel_dp,struct intel_pps_delays * bios)1471 static void pps_init_delays_bios(struct intel_dp *intel_dp,
1472 struct intel_pps_delays *bios)
1473 {
1474 struct intel_display *display = to_intel_display(intel_dp);
1475
1476 lockdep_assert_held(&display->pps.mutex);
1477
1478 if (!pps_delays_valid(&intel_dp->pps.bios_pps_delays))
1479 intel_pps_readout_hw_state(intel_dp, &intel_dp->pps.bios_pps_delays);
1480
1481 *bios = intel_dp->pps.bios_pps_delays;
1482
1483 intel_pps_dump_state(intel_dp, "bios", bios);
1484 }
1485
pps_init_delays_vbt(struct intel_dp * intel_dp,struct intel_pps_delays * vbt)1486 static void pps_init_delays_vbt(struct intel_dp *intel_dp,
1487 struct intel_pps_delays *vbt)
1488 {
1489 struct intel_display *display = to_intel_display(intel_dp);
1490 struct intel_connector *connector = intel_dp->attached_connector;
1491
1492 *vbt = connector->panel.vbt.edp.pps;
1493
1494 if (!pps_delays_valid(vbt))
1495 return;
1496
1497 /*
1498 * On Toshiba Satellite P50-C-18C system the VBT T12 delay
1499 * of 500ms appears to be too short. Occasionally the panel
1500 * just fails to power back on. Increasing the delay to 800ms
1501 * seems sufficient to avoid this problem.
1502 */
1503 if (intel_has_quirk(display, QUIRK_INCREASE_T12_DELAY)) {
1504 vbt->power_cycle = max_t(u16, vbt->power_cycle, msecs_to_pps_units(1300));
1505 drm_dbg_kms(display->drm,
1506 "Increasing T12 panel delay as per the quirk to %d\n",
1507 vbt->power_cycle);
1508 }
1509
1510 intel_pps_dump_state(intel_dp, "vbt", vbt);
1511 }
1512
pps_init_delays_spec(struct intel_dp * intel_dp,struct intel_pps_delays * spec)1513 static void pps_init_delays_spec(struct intel_dp *intel_dp,
1514 struct intel_pps_delays *spec)
1515 {
1516 struct intel_display *display = to_intel_display(intel_dp);
1517
1518 lockdep_assert_held(&display->pps.mutex);
1519
1520 /* Upper limits from eDP 1.3 spec */
1521 spec->power_up = msecs_to_pps_units(10 + 200); /* T1+T3 */
1522 spec->backlight_on = msecs_to_pps_units(50); /* no limit for T8, use T7 instead */
1523 spec->backlight_off = msecs_to_pps_units(50); /* no limit for T9, make it symmetric with T8 */
1524 spec->power_down = msecs_to_pps_units(500); /* T10 */
1525 spec->power_cycle = msecs_to_pps_units(10 + 500); /* T11+T12 */
1526
1527 intel_pps_dump_state(intel_dp, "spec", spec);
1528 }
1529
pps_init_delays(struct intel_dp * intel_dp)1530 static void pps_init_delays(struct intel_dp *intel_dp)
1531 {
1532 struct intel_display *display = to_intel_display(intel_dp);
1533 struct intel_pps_delays cur, vbt, spec,
1534 *final = &intel_dp->pps.pps_delays;
1535
1536 lockdep_assert_held(&display->pps.mutex);
1537
1538 /* already initialized? */
1539 if (pps_delays_valid(final))
1540 return;
1541
1542 pps_init_delays_bios(intel_dp, &cur);
1543 pps_init_delays_vbt(intel_dp, &vbt);
1544 pps_init_delays_spec(intel_dp, &spec);
1545
1546 /* Use the max of the register settings and vbt. If both are
1547 * unset, fall back to the spec limits. */
1548 #define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
1549 spec.field : \
1550 max(cur.field, vbt.field))
1551 assign_final(power_up);
1552 assign_final(backlight_on);
1553 assign_final(backlight_off);
1554 assign_final(power_down);
1555 assign_final(power_cycle);
1556 #undef assign_final
1557
1558 intel_dp->pps.panel_power_up_delay = pps_units_to_msecs(final->power_up);
1559 intel_dp->pps.backlight_on_delay = pps_units_to_msecs(final->backlight_on);
1560 intel_dp->pps.backlight_off_delay = pps_units_to_msecs(final->backlight_off);
1561 intel_dp->pps.panel_power_down_delay = pps_units_to_msecs(final->power_down);
1562 intel_dp->pps.panel_power_cycle_delay = pps_units_to_msecs(final->power_cycle);
1563
1564 drm_dbg_kms(display->drm,
1565 "panel power up delay %d, power down delay %d, power cycle delay %d\n",
1566 intel_dp->pps.panel_power_up_delay,
1567 intel_dp->pps.panel_power_down_delay,
1568 intel_dp->pps.panel_power_cycle_delay);
1569
1570 drm_dbg_kms(display->drm, "backlight on delay %d, off delay %d\n",
1571 intel_dp->pps.backlight_on_delay,
1572 intel_dp->pps.backlight_off_delay);
1573
1574 /*
1575 * We override the HW backlight delays to 1 because we do manual waits
1576 * on them. For backlight_on, even BSpec recommends doing it. For
1577 * backlight_off, if we don't do this, we'll end up waiting for the
1578 * backlight off delay twice: once when we do the manual sleep, and
1579 * once when we disable the panel and wait for the PP_STATUS bit to
1580 * become zero.
1581 */
1582 final->backlight_on = 1;
1583 final->backlight_off = 1;
1584
1585 /*
1586 * HW has only a 100msec granularity for power_cycle so round it up
1587 * accordingly.
1588 */
1589 final->power_cycle = roundup(final->power_cycle, msecs_to_pps_units(100));
1590 }
1591
pps_init_registers(struct intel_dp * intel_dp,bool force_disable_vdd)1592 static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd)
1593 {
1594 struct intel_display *display = to_intel_display(intel_dp);
1595 struct drm_i915_private *dev_priv = to_i915(display->drm);
1596 u32 pp_on, pp_off, port_sel = 0;
1597 int div = DISPLAY_RUNTIME_INFO(display)->rawclk_freq / 1000;
1598 struct pps_registers regs;
1599 enum port port = dp_to_dig_port(intel_dp)->base.port;
1600 const struct intel_pps_delays *seq = &intel_dp->pps.pps_delays;
1601
1602 lockdep_assert_held(&display->pps.mutex);
1603
1604 intel_pps_get_registers(intel_dp, ®s);
1605
1606 /*
1607 * On some VLV machines the BIOS can leave the VDD
1608 * enabled even on power sequencers which aren't
1609 * hooked up to any port. This would mess up the
1610 * power domain tracking the first time we pick
1611 * one of these power sequencers for use since
1612 * intel_pps_vdd_on_unlocked() would notice that the VDD was
1613 * already on and therefore wouldn't grab the power
1614 * domain reference. Disable VDD first to avoid this.
1615 * This also avoids spuriously turning the VDD on as
1616 * soon as the new power sequencer gets initialized.
1617 */
1618 if (force_disable_vdd) {
1619 u32 pp = ilk_get_pp_control(intel_dp);
1620
1621 drm_WARN(display->drm, pp & PANEL_POWER_ON,
1622 "Panel power already on\n");
1623
1624 if (pp & EDP_FORCE_VDD)
1625 drm_dbg_kms(display->drm,
1626 "VDD already on, disabling first\n");
1627
1628 pp &= ~EDP_FORCE_VDD;
1629
1630 intel_de_write(display, regs.pp_ctrl, pp);
1631 }
1632
1633 pp_on = REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, seq->power_up) |
1634 REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, seq->backlight_on);
1635 pp_off = REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, seq->backlight_off) |
1636 REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, seq->power_down);
1637
1638 /* Haswell doesn't have any port selection bits for the panel
1639 * power sequencer any more. */
1640 if (display->platform.valleyview || display->platform.cherryview) {
1641 port_sel = PANEL_PORT_SELECT_VLV(port);
1642 } else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
1643 switch (port) {
1644 case PORT_A:
1645 port_sel = PANEL_PORT_SELECT_DPA;
1646 break;
1647 case PORT_C:
1648 port_sel = PANEL_PORT_SELECT_DPC;
1649 break;
1650 case PORT_D:
1651 port_sel = PANEL_PORT_SELECT_DPD;
1652 break;
1653 default:
1654 MISSING_CASE(port);
1655 break;
1656 }
1657 }
1658
1659 pp_on |= port_sel;
1660
1661 intel_de_write(display, regs.pp_on, pp_on);
1662 intel_de_write(display, regs.pp_off, pp_off);
1663
1664 /*
1665 * Compute the divisor for the pp clock, simply match the Bspec formula.
1666 */
1667 if (i915_mmio_reg_valid(regs.pp_div))
1668 intel_de_write(display, regs.pp_div,
1669 REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK,
1670 (100 * div) / 2 - 1) |
1671 REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK,
1672 DIV_ROUND_UP(seq->power_cycle, 1000) + 1));
1673 else
1674 intel_de_rmw(display, regs.pp_ctrl, BXT_POWER_CYCLE_DELAY_MASK,
1675 REG_FIELD_PREP(BXT_POWER_CYCLE_DELAY_MASK,
1676 DIV_ROUND_UP(seq->power_cycle, 1000) + 1));
1677
1678 drm_dbg_kms(display->drm,
1679 "panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
1680 intel_de_read(display, regs.pp_on),
1681 intel_de_read(display, regs.pp_off),
1682 i915_mmio_reg_valid(regs.pp_div) ?
1683 intel_de_read(display, regs.pp_div) :
1684 (intel_de_read(display, regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK));
1685 }
1686
intel_pps_encoder_reset(struct intel_dp * intel_dp)1687 void intel_pps_encoder_reset(struct intel_dp *intel_dp)
1688 {
1689 struct intel_display *display = to_intel_display(intel_dp);
1690 intel_wakeref_t wakeref;
1691
1692 if (!intel_dp_is_edp(intel_dp))
1693 return;
1694
1695 with_intel_pps_lock(intel_dp, wakeref) {
1696 /*
1697 * Reinit the power sequencer also on the resume path, in case
1698 * BIOS did something nasty with it.
1699 */
1700 if (display->platform.valleyview || display->platform.cherryview)
1701 vlv_initial_power_sequencer_setup(intel_dp);
1702
1703 pps_init_delays(intel_dp);
1704 pps_init_registers(intel_dp, false);
1705 pps_vdd_init(intel_dp);
1706
1707 if (edp_have_panel_vdd(intel_dp))
1708 edp_panel_vdd_schedule_off(intel_dp);
1709 }
1710 }
1711
intel_pps_init(struct intel_dp * intel_dp)1712 bool intel_pps_init(struct intel_dp *intel_dp)
1713 {
1714 intel_wakeref_t wakeref;
1715 bool ret;
1716
1717 intel_dp->pps.initializing = true;
1718 INIT_DELAYED_WORK(&intel_dp->pps.panel_vdd_work, edp_panel_vdd_work);
1719
1720 pps_init_timestamps(intel_dp);
1721
1722 with_intel_pps_lock(intel_dp, wakeref) {
1723 ret = pps_initial_setup(intel_dp);
1724
1725 pps_init_delays(intel_dp);
1726 pps_init_registers(intel_dp, false);
1727 pps_vdd_init(intel_dp);
1728 }
1729
1730 return ret;
1731 }
1732
pps_init_late(struct intel_dp * intel_dp)1733 static void pps_init_late(struct intel_dp *intel_dp)
1734 {
1735 struct intel_display *display = to_intel_display(intel_dp);
1736 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
1737 struct intel_connector *connector = intel_dp->attached_connector;
1738
1739 if (display->platform.valleyview || display->platform.cherryview)
1740 return;
1741
1742 if (intel_num_pps(display) < 2)
1743 return;
1744
1745 drm_WARN(display->drm,
1746 connector->panel.vbt.backlight.controller >= 0 &&
1747 intel_dp->pps.pps_idx != connector->panel.vbt.backlight.controller,
1748 "[ENCODER:%d:%s] power sequencer mismatch: %d (initial) vs. %d (VBT)\n",
1749 encoder->base.base.id, encoder->base.name,
1750 intel_dp->pps.pps_idx, connector->panel.vbt.backlight.controller);
1751
1752 if (connector->panel.vbt.backlight.controller >= 0)
1753 intel_dp->pps.pps_idx = connector->panel.vbt.backlight.controller;
1754 }
1755
intel_pps_init_late(struct intel_dp * intel_dp)1756 void intel_pps_init_late(struct intel_dp *intel_dp)
1757 {
1758 intel_wakeref_t wakeref;
1759
1760 with_intel_pps_lock(intel_dp, wakeref) {
1761 /* Reinit delays after per-panel info has been parsed from VBT */
1762 pps_init_late(intel_dp);
1763
1764 memset(&intel_dp->pps.pps_delays, 0, sizeof(intel_dp->pps.pps_delays));
1765 pps_init_delays(intel_dp);
1766 pps_init_registers(intel_dp, false);
1767
1768 intel_dp->pps.initializing = false;
1769
1770 if (edp_have_panel_vdd(intel_dp))
1771 edp_panel_vdd_schedule_off(intel_dp);
1772 }
1773 }
1774
intel_pps_unlock_regs_wa(struct intel_display * display)1775 void intel_pps_unlock_regs_wa(struct intel_display *display)
1776 {
1777 int pps_num;
1778 int pps_idx;
1779
1780 if (!HAS_DISPLAY(display) || HAS_DDI(display))
1781 return;
1782 /*
1783 * This w/a is needed at least on CPT/PPT, but to be sure apply it
1784 * everywhere where registers can be write protected.
1785 */
1786 pps_num = intel_num_pps(display);
1787
1788 for (pps_idx = 0; pps_idx < pps_num; pps_idx++)
1789 intel_de_rmw(display, PP_CONTROL(display, pps_idx),
1790 PANEL_UNLOCK_MASK, PANEL_UNLOCK_REGS);
1791 }
1792
intel_pps_setup(struct intel_display * display)1793 void intel_pps_setup(struct intel_display *display)
1794 {
1795 struct drm_i915_private *i915 = to_i915(display->drm);
1796
1797 if (HAS_PCH_SPLIT(i915) || display->platform.geminilake || display->platform.broxton)
1798 display->pps.mmio_base = PCH_PPS_BASE;
1799 else if (display->platform.valleyview || display->platform.cherryview)
1800 display->pps.mmio_base = VLV_PPS_BASE;
1801 else
1802 display->pps.mmio_base = PPS_BASE;
1803 }
1804
intel_pps_show(struct seq_file * m,void * data)1805 static int intel_pps_show(struct seq_file *m, void *data)
1806 {
1807 struct intel_connector *connector = m->private;
1808 struct intel_dp *intel_dp = intel_attached_dp(connector);
1809
1810 if (connector->base.status != connector_status_connected)
1811 return -ENODEV;
1812
1813 seq_printf(m, "Panel power up delay: %d\n",
1814 intel_dp->pps.panel_power_up_delay);
1815 seq_printf(m, "Panel power down delay: %d\n",
1816 intel_dp->pps.panel_power_down_delay);
1817 seq_printf(m, "Panel power cycle delay: %d\n",
1818 intel_dp->pps.panel_power_cycle_delay);
1819 seq_printf(m, "Backlight on delay: %d\n",
1820 intel_dp->pps.backlight_on_delay);
1821 seq_printf(m, "Backlight off delay: %d\n",
1822 intel_dp->pps.backlight_off_delay);
1823
1824 return 0;
1825 }
1826 DEFINE_SHOW_ATTRIBUTE(intel_pps);
1827
intel_pps_connector_debugfs_add(struct intel_connector * connector)1828 void intel_pps_connector_debugfs_add(struct intel_connector *connector)
1829 {
1830 struct dentry *root = connector->base.debugfs_entry;
1831 int connector_type = connector->base.connector_type;
1832
1833 if (connector_type == DRM_MODE_CONNECTOR_eDP)
1834 debugfs_create_file("i915_panel_timings", 0444, root,
1835 connector, &intel_pps_fops);
1836 }
1837
assert_pps_unlocked(struct intel_display * display,enum pipe pipe)1838 void assert_pps_unlocked(struct intel_display *display, enum pipe pipe)
1839 {
1840 struct drm_i915_private *dev_priv = to_i915(display->drm);
1841 i915_reg_t pp_reg;
1842 u32 val;
1843 enum pipe panel_pipe = INVALID_PIPE;
1844 bool locked = true;
1845
1846 if (drm_WARN_ON(display->drm, HAS_DDI(display)))
1847 return;
1848
1849 if (HAS_PCH_SPLIT(dev_priv)) {
1850 u32 port_sel;
1851
1852 pp_reg = PP_CONTROL(display, 0);
1853 port_sel = intel_de_read(display, PP_ON_DELAYS(display, 0)) &
1854 PANEL_PORT_SELECT_MASK;
1855
1856 switch (port_sel) {
1857 case PANEL_PORT_SELECT_LVDS:
1858 intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe);
1859 break;
1860 case PANEL_PORT_SELECT_DPA:
1861 g4x_dp_port_enabled(display, DP_A, PORT_A, &panel_pipe);
1862 break;
1863 case PANEL_PORT_SELECT_DPC:
1864 g4x_dp_port_enabled(display, PCH_DP_C, PORT_C, &panel_pipe);
1865 break;
1866 case PANEL_PORT_SELECT_DPD:
1867 g4x_dp_port_enabled(display, PCH_DP_D, PORT_D, &panel_pipe);
1868 break;
1869 default:
1870 MISSING_CASE(port_sel);
1871 break;
1872 }
1873 } else if (display->platform.valleyview || display->platform.cherryview) {
1874 /* presumably write lock depends on pipe, not port select */
1875 pp_reg = PP_CONTROL(display, pipe);
1876 panel_pipe = pipe;
1877 } else {
1878 u32 port_sel;
1879
1880 pp_reg = PP_CONTROL(display, 0);
1881 port_sel = intel_de_read(display, PP_ON_DELAYS(display, 0)) &
1882 PANEL_PORT_SELECT_MASK;
1883
1884 drm_WARN_ON(display->drm,
1885 port_sel != PANEL_PORT_SELECT_LVDS);
1886 intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe);
1887 }
1888
1889 val = intel_de_read(display, pp_reg);
1890 if (!(val & PANEL_POWER_ON) ||
1891 ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
1892 locked = false;
1893
1894 INTEL_DISPLAY_STATE_WARN(display, panel_pipe == pipe && locked,
1895 "panel assertion failure, pipe %c regs locked\n",
1896 pipe_name(pipe));
1897 }
1898