1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2022 Intel Corporation
4 */
5
6 #include "i915_drv.h"
7 #include "i915_irq.h"
8 #include "i915_reg.h"
9 #include "intel_backlight_regs.h"
10 #include "intel_combo_phy.h"
11 #include "intel_combo_phy_regs.h"
12 #include "intel_crt.h"
13 #include "intel_de.h"
14 #include "intel_display_irq.h"
15 #include "intel_display_power_well.h"
16 #include "intel_display_types.h"
17 #include "intel_dkl_phy.h"
18 #include "intel_dkl_phy_regs.h"
19 #include "intel_dmc.h"
20 #include "intel_dmc_wl.h"
21 #include "intel_dp_aux_regs.h"
22 #include "intel_dpio_phy.h"
23 #include "intel_dpll.h"
24 #include "intel_hotplug.h"
25 #include "intel_pcode.h"
26 #include "intel_pps.h"
27 #include "intel_tc.h"
28 #include "intel_vga.h"
29 #include "skl_watermark.h"
30 #include "vlv_dpio_phy_regs.h"
31 #include "vlv_sideband.h"
32 #include "vlv_sideband_reg.h"
33
34 struct i915_power_well_regs {
35 i915_reg_t bios;
36 i915_reg_t driver;
37 i915_reg_t kvmr;
38 i915_reg_t debug;
39 };
40
41 struct i915_power_well_ops {
42 const struct i915_power_well_regs *regs;
43 /*
44 * Synchronize the well's hw state to match the current sw state, for
45 * example enable/disable it based on the current refcount. Called
46 * during driver init and resume time, possibly after first calling
47 * the enable/disable handlers.
48 */
49 void (*sync_hw)(struct intel_display *display,
50 struct i915_power_well *power_well);
51 /*
52 * Enable the well and resources that depend on it (for example
53 * interrupts located on the well). Called after the 0->1 refcount
54 * transition.
55 */
56 void (*enable)(struct intel_display *display,
57 struct i915_power_well *power_well);
58 /*
59 * Disable the well and resources that depend on it. Called after
60 * the 1->0 refcount transition.
61 */
62 void (*disable)(struct intel_display *display,
63 struct i915_power_well *power_well);
64 /* Returns the hw enabled state. */
65 bool (*is_enabled)(struct intel_display *display,
66 struct i915_power_well *power_well);
67 };
68
69 static const struct i915_power_well_instance *
i915_power_well_instance(const struct i915_power_well * power_well)70 i915_power_well_instance(const struct i915_power_well *power_well)
71 {
72 return &power_well->desc->instances->list[power_well->instance_idx];
73 }
74
75 struct i915_power_well *
lookup_power_well(struct intel_display * display,enum i915_power_well_id power_well_id)76 lookup_power_well(struct intel_display *display,
77 enum i915_power_well_id power_well_id)
78 {
79 struct i915_power_well *power_well;
80
81 for_each_power_well(display, power_well)
82 if (i915_power_well_instance(power_well)->id == power_well_id)
83 return power_well;
84
85 /*
86 * It's not feasible to add error checking code to the callers since
87 * this condition really shouldn't happen and it doesn't even make sense
88 * to abort things like display initialization sequences. Just return
89 * the first power well and hope the WARN gets reported so we can fix
90 * our driver.
91 */
92 drm_WARN(display->drm, 1,
93 "Power well %d not defined for this platform\n",
94 power_well_id);
95 return &display->power.domains.power_wells[0];
96 }
97
intel_power_well_enable(struct intel_display * display,struct i915_power_well * power_well)98 void intel_power_well_enable(struct intel_display *display,
99 struct i915_power_well *power_well)
100 {
101 drm_dbg_kms(display->drm, "enabling %s\n", intel_power_well_name(power_well));
102 power_well->desc->ops->enable(display, power_well);
103 power_well->hw_enabled = true;
104 }
105
intel_power_well_disable(struct intel_display * display,struct i915_power_well * power_well)106 void intel_power_well_disable(struct intel_display *display,
107 struct i915_power_well *power_well)
108 {
109 drm_dbg_kms(display->drm, "disabling %s\n", intel_power_well_name(power_well));
110 power_well->hw_enabled = false;
111 power_well->desc->ops->disable(display, power_well);
112 }
113
intel_power_well_sync_hw(struct intel_display * display,struct i915_power_well * power_well)114 void intel_power_well_sync_hw(struct intel_display *display,
115 struct i915_power_well *power_well)
116 {
117 power_well->desc->ops->sync_hw(display, power_well);
118 power_well->hw_enabled = power_well->desc->ops->is_enabled(display, power_well);
119 }
120
intel_power_well_get(struct intel_display * display,struct i915_power_well * power_well)121 void intel_power_well_get(struct intel_display *display,
122 struct i915_power_well *power_well)
123 {
124 if (!power_well->count++)
125 intel_power_well_enable(display, power_well);
126 }
127
intel_power_well_put(struct intel_display * display,struct i915_power_well * power_well)128 void intel_power_well_put(struct intel_display *display,
129 struct i915_power_well *power_well)
130 {
131 drm_WARN(display->drm, !power_well->count,
132 "Use count on power well %s is already zero",
133 i915_power_well_instance(power_well)->name);
134
135 if (!--power_well->count)
136 intel_power_well_disable(display, power_well);
137 }
138
intel_power_well_is_enabled(struct intel_display * display,struct i915_power_well * power_well)139 bool intel_power_well_is_enabled(struct intel_display *display,
140 struct i915_power_well *power_well)
141 {
142 return power_well->desc->ops->is_enabled(display, power_well);
143 }
144
intel_power_well_is_enabled_cached(struct i915_power_well * power_well)145 bool intel_power_well_is_enabled_cached(struct i915_power_well *power_well)
146 {
147 return power_well->hw_enabled;
148 }
149
intel_display_power_well_is_enabled(struct intel_display * display,enum i915_power_well_id power_well_id)150 bool intel_display_power_well_is_enabled(struct intel_display *display,
151 enum i915_power_well_id power_well_id)
152 {
153 struct i915_power_well *power_well;
154
155 power_well = lookup_power_well(display, power_well_id);
156
157 return intel_power_well_is_enabled(display, power_well);
158 }
159
intel_power_well_is_always_on(struct i915_power_well * power_well)160 bool intel_power_well_is_always_on(struct i915_power_well *power_well)
161 {
162 return power_well->desc->always_on;
163 }
164
intel_power_well_name(struct i915_power_well * power_well)165 const char *intel_power_well_name(struct i915_power_well *power_well)
166 {
167 return i915_power_well_instance(power_well)->name;
168 }
169
intel_power_well_domains(struct i915_power_well * power_well)170 struct intel_power_domain_mask *intel_power_well_domains(struct i915_power_well *power_well)
171 {
172 return &power_well->domains;
173 }
174
intel_power_well_refcount(struct i915_power_well * power_well)175 int intel_power_well_refcount(struct i915_power_well *power_well)
176 {
177 return power_well->count;
178 }
179
180 /*
181 * Starting with Haswell, we have a "Power Down Well" that can be turned off
182 * when not needed anymore. We have 4 registers that can request the power well
183 * to be enabled, and it will only be disabled if none of the registers is
184 * requesting it to be enabled.
185 */
hsw_power_well_post_enable(struct intel_display * display,u8 irq_pipe_mask,bool has_vga)186 static void hsw_power_well_post_enable(struct intel_display *display,
187 u8 irq_pipe_mask, bool has_vga)
188 {
189 struct drm_i915_private *dev_priv = to_i915(display->drm);
190
191 if (has_vga)
192 intel_vga_reset_io_mem(display);
193
194 if (irq_pipe_mask)
195 gen8_irq_power_well_post_enable(dev_priv, irq_pipe_mask);
196 }
197
hsw_power_well_pre_disable(struct intel_display * display,u8 irq_pipe_mask)198 static void hsw_power_well_pre_disable(struct intel_display *display,
199 u8 irq_pipe_mask)
200 {
201 struct drm_i915_private *dev_priv = to_i915(display->drm);
202
203 if (irq_pipe_mask)
204 gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask);
205 }
206
207 #define ICL_AUX_PW_TO_PHY(pw_idx) \
208 ((pw_idx) - ICL_PW_CTL_IDX_AUX_A + PHY_A)
209
210 #define ICL_AUX_PW_TO_CH(pw_idx) \
211 ((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A)
212
213 #define ICL_TBT_AUX_PW_TO_CH(pw_idx) \
214 ((pw_idx) - ICL_PW_CTL_IDX_AUX_TBT1 + AUX_CH_C)
215
icl_aux_pw_to_ch(const struct i915_power_well * power_well)216 static enum aux_ch icl_aux_pw_to_ch(const struct i915_power_well *power_well)
217 {
218 int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
219
220 return power_well->desc->is_tc_tbt ? ICL_TBT_AUX_PW_TO_CH(pw_idx) :
221 ICL_AUX_PW_TO_CH(pw_idx);
222 }
223
224 static struct intel_digital_port *
aux_ch_to_digital_port(struct intel_display * display,enum aux_ch aux_ch)225 aux_ch_to_digital_port(struct intel_display *display,
226 enum aux_ch aux_ch)
227 {
228 struct intel_encoder *encoder;
229
230 for_each_intel_encoder(display->drm, encoder) {
231 struct intel_digital_port *dig_port;
232
233 /* We'll check the MST primary port */
234 if (encoder->type == INTEL_OUTPUT_DP_MST)
235 continue;
236
237 dig_port = enc_to_dig_port(encoder);
238
239 if (dig_port && dig_port->aux_ch == aux_ch)
240 return dig_port;
241 }
242
243 return NULL;
244 }
245
icl_aux_pw_to_phy(struct intel_display * display,const struct i915_power_well * power_well)246 static enum phy icl_aux_pw_to_phy(struct intel_display *display,
247 const struct i915_power_well *power_well)
248 {
249 enum aux_ch aux_ch = icl_aux_pw_to_ch(power_well);
250 struct intel_digital_port *dig_port = aux_ch_to_digital_port(display, aux_ch);
251
252 /*
253 * FIXME should we care about the (VBT defined) dig_port->aux_ch
254 * relationship or should this be purely defined by the hardware layout?
255 * Currently if the port doesn't appear in the VBT, or if it's declared
256 * as HDMI-only and routed to a combo PHY, the encoder either won't be
257 * present at all or it will not have an aux_ch assigned.
258 */
259 return dig_port ? intel_encoder_to_phy(&dig_port->base) : PHY_NONE;
260 }
261
hsw_wait_for_power_well_enable(struct intel_display * display,struct i915_power_well * power_well,bool timeout_expected)262 static void hsw_wait_for_power_well_enable(struct intel_display *display,
263 struct i915_power_well *power_well,
264 bool timeout_expected)
265 {
266 const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
267 int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
268 int timeout = power_well->desc->enable_timeout ? : 1;
269
270 /*
271 * For some power wells we're not supposed to watch the status bit for
272 * an ack, but rather just wait a fixed amount of time and then
273 * proceed. This is only used on DG2.
274 */
275 if (display->platform.dg2 && power_well->desc->fixed_enable_delay) {
276 usleep_range(600, 1200);
277 return;
278 }
279
280 /* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */
281 if (intel_de_wait_for_set(display, regs->driver,
282 HSW_PWR_WELL_CTL_STATE(pw_idx), timeout)) {
283 drm_dbg_kms(display->drm, "%s power well enable timeout\n",
284 intel_power_well_name(power_well));
285
286 drm_WARN_ON(display->drm, !timeout_expected);
287
288 }
289 }
290
hsw_power_well_requesters(struct intel_display * display,const struct i915_power_well_regs * regs,int pw_idx)291 static u32 hsw_power_well_requesters(struct intel_display *display,
292 const struct i915_power_well_regs *regs,
293 int pw_idx)
294 {
295 u32 req_mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
296 u32 ret;
297
298 ret = intel_de_read(display, regs->bios) & req_mask ? 1 : 0;
299 ret |= intel_de_read(display, regs->driver) & req_mask ? 2 : 0;
300 if (regs->kvmr.reg)
301 ret |= intel_de_read(display, regs->kvmr) & req_mask ? 4 : 0;
302 ret |= intel_de_read(display, regs->debug) & req_mask ? 8 : 0;
303
304 return ret;
305 }
306
hsw_wait_for_power_well_disable(struct intel_display * display,struct i915_power_well * power_well)307 static void hsw_wait_for_power_well_disable(struct intel_display *display,
308 struct i915_power_well *power_well)
309 {
310 const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
311 int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
312 bool disabled;
313 u32 reqs;
314
315 /*
316 * Bspec doesn't require waiting for PWs to get disabled, but still do
317 * this for paranoia. The known cases where a PW will be forced on:
318 * - a KVMR request on any power well via the KVMR request register
319 * - a DMC request on PW1 and MISC_IO power wells via the BIOS and
320 * DEBUG request registers
321 * Skip the wait in case any of the request bits are set and print a
322 * diagnostic message.
323 */
324 wait_for((disabled = !(intel_de_read(display, regs->driver) &
325 HSW_PWR_WELL_CTL_STATE(pw_idx))) ||
326 (reqs = hsw_power_well_requesters(display, regs, pw_idx)), 1);
327 if (disabled)
328 return;
329
330 drm_dbg_kms(display->drm,
331 "%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n",
332 intel_power_well_name(power_well),
333 !!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8));
334 }
335
gen9_wait_for_power_well_fuses(struct intel_display * display,enum skl_power_gate pg)336 static void gen9_wait_for_power_well_fuses(struct intel_display *display,
337 enum skl_power_gate pg)
338 {
339 /* Timeout 5us for PG#0, for other PGs 1us */
340 drm_WARN_ON(display->drm,
341 intel_de_wait_for_set(display, SKL_FUSE_STATUS,
342 SKL_FUSE_PG_DIST_STATUS(pg), 1));
343 }
344
hsw_power_well_enable(struct intel_display * display,struct i915_power_well * power_well)345 static void hsw_power_well_enable(struct intel_display *display,
346 struct i915_power_well *power_well)
347 {
348 const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
349 int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
350
351 if (power_well->desc->has_fuses) {
352 enum skl_power_gate pg;
353
354 pg = DISPLAY_VER(display) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
355 SKL_PW_CTL_IDX_TO_PG(pw_idx);
356
357 /* Wa_16013190616:adlp */
358 if (display->platform.alderlake_p && pg == SKL_PG1)
359 intel_de_rmw(display, GEN8_CHICKEN_DCPR_1, 0, DISABLE_FLR_SRC);
360
361 /*
362 * For PW1 we have to wait both for the PW0/PG0 fuse state
363 * before enabling the power well and PW1/PG1's own fuse
364 * state after the enabling. For all other power wells with
365 * fuses we only have to wait for that PW/PG's fuse state
366 * after the enabling.
367 */
368 if (pg == SKL_PG1)
369 gen9_wait_for_power_well_fuses(display, SKL_PG0);
370 }
371
372 intel_de_rmw(display, regs->driver, 0, HSW_PWR_WELL_CTL_REQ(pw_idx));
373
374 hsw_wait_for_power_well_enable(display, power_well, false);
375
376 if (power_well->desc->has_fuses) {
377 enum skl_power_gate pg;
378
379 pg = DISPLAY_VER(display) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
380 SKL_PW_CTL_IDX_TO_PG(pw_idx);
381 gen9_wait_for_power_well_fuses(display, pg);
382 }
383
384 hsw_power_well_post_enable(display,
385 power_well->desc->irq_pipe_mask,
386 power_well->desc->has_vga);
387 }
388
hsw_power_well_disable(struct intel_display * display,struct i915_power_well * power_well)389 static void hsw_power_well_disable(struct intel_display *display,
390 struct i915_power_well *power_well)
391 {
392 const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
393 int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
394
395 hsw_power_well_pre_disable(display,
396 power_well->desc->irq_pipe_mask);
397
398 intel_de_rmw(display, regs->driver, HSW_PWR_WELL_CTL_REQ(pw_idx), 0);
399 hsw_wait_for_power_well_disable(display, power_well);
400 }
401
intel_aux_ch_is_edp(struct intel_display * display,enum aux_ch aux_ch)402 static bool intel_aux_ch_is_edp(struct intel_display *display, enum aux_ch aux_ch)
403 {
404 struct intel_digital_port *dig_port = aux_ch_to_digital_port(display, aux_ch);
405
406 return dig_port && dig_port->base.type == INTEL_OUTPUT_EDP;
407 }
408
409 static void
icl_combo_phy_aux_power_well_enable(struct intel_display * display,struct i915_power_well * power_well)410 icl_combo_phy_aux_power_well_enable(struct intel_display *display,
411 struct i915_power_well *power_well)
412 {
413 const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
414 int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
415
416 drm_WARN_ON(display->drm, !display->platform.icelake);
417
418 intel_de_rmw(display, regs->driver, 0, HSW_PWR_WELL_CTL_REQ(pw_idx));
419
420 /*
421 * FIXME not sure if we should derive the PHY from the pw_idx, or
422 * from the VBT defined AUX_CH->DDI->PHY mapping.
423 */
424 intel_de_rmw(display, ICL_PORT_CL_DW12(ICL_AUX_PW_TO_PHY(pw_idx)),
425 0, ICL_LANE_ENABLE_AUX);
426
427 hsw_wait_for_power_well_enable(display, power_well, false);
428
429 /* Display WA #1178: icl */
430 if (pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B &&
431 !intel_aux_ch_is_edp(display, ICL_AUX_PW_TO_CH(pw_idx)))
432 intel_de_rmw(display, ICL_PORT_TX_DW6_AUX(ICL_AUX_PW_TO_PHY(pw_idx)),
433 0, O_FUNC_OVRD_EN | O_LDO_BYPASS_CRI);
434 }
435
436 static void
icl_combo_phy_aux_power_well_disable(struct intel_display * display,struct i915_power_well * power_well)437 icl_combo_phy_aux_power_well_disable(struct intel_display *display,
438 struct i915_power_well *power_well)
439 {
440 const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
441 int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
442
443 drm_WARN_ON(display->drm, !display->platform.icelake);
444
445 /*
446 * FIXME not sure if we should derive the PHY from the pw_idx, or
447 * from the VBT defined AUX_CH->DDI->PHY mapping.
448 */
449 intel_de_rmw(display, ICL_PORT_CL_DW12(ICL_AUX_PW_TO_PHY(pw_idx)),
450 ICL_LANE_ENABLE_AUX, 0);
451
452 intel_de_rmw(display, regs->driver, HSW_PWR_WELL_CTL_REQ(pw_idx), 0);
453
454 hsw_wait_for_power_well_disable(display, power_well);
455 }
456
457 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
458
icl_tc_port_assert_ref_held(struct intel_display * display,struct i915_power_well * power_well,struct intel_digital_port * dig_port)459 static void icl_tc_port_assert_ref_held(struct intel_display *display,
460 struct i915_power_well *power_well,
461 struct intel_digital_port *dig_port)
462 {
463 if (drm_WARN_ON(display->drm, !dig_port))
464 return;
465
466 if (DISPLAY_VER(display) == 11 && intel_tc_cold_requires_aux_pw(dig_port))
467 return;
468
469 drm_WARN_ON(display->drm, !intel_tc_port_ref_held(dig_port));
470 }
471
472 #else
473
icl_tc_port_assert_ref_held(struct intel_display * display,struct i915_power_well * power_well,struct intel_digital_port * dig_port)474 static void icl_tc_port_assert_ref_held(struct intel_display *display,
475 struct i915_power_well *power_well,
476 struct intel_digital_port *dig_port)
477 {
478 }
479
480 #endif
481
482 #define TGL_AUX_PW_TO_TC_PORT(pw_idx) ((pw_idx) - TGL_PW_CTL_IDX_AUX_TC1)
483
icl_tc_cold_exit(struct intel_display * display)484 static void icl_tc_cold_exit(struct intel_display *display)
485 {
486 struct drm_i915_private *i915 = to_i915(display->drm);
487 int ret, tries = 0;
488
489 while (1) {
490 ret = snb_pcode_write_timeout(&i915->uncore, ICL_PCODE_EXIT_TCCOLD, 0,
491 250, 1);
492 if (ret != -EAGAIN || ++tries == 3)
493 break;
494 msleep(1);
495 }
496
497 /* Spec states that TC cold exit can take up to 1ms to complete */
498 if (!ret)
499 msleep(1);
500
501 /* TODO: turn failure into a error as soon i915 CI updates ICL IFWI */
502 drm_dbg_kms(&i915->drm, "TC cold block %s\n", ret ? "failed" :
503 "succeeded");
504 }
505
506 static void
icl_tc_phy_aux_power_well_enable(struct intel_display * display,struct i915_power_well * power_well)507 icl_tc_phy_aux_power_well_enable(struct intel_display *display,
508 struct i915_power_well *power_well)
509 {
510 enum aux_ch aux_ch = icl_aux_pw_to_ch(power_well);
511 struct intel_digital_port *dig_port = aux_ch_to_digital_port(display, aux_ch);
512 const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
513 bool is_tbt = power_well->desc->is_tc_tbt;
514 bool timeout_expected;
515
516 icl_tc_port_assert_ref_held(display, power_well, dig_port);
517
518 intel_de_rmw(display, DP_AUX_CH_CTL(aux_ch),
519 DP_AUX_CH_CTL_TBT_IO, is_tbt ? DP_AUX_CH_CTL_TBT_IO : 0);
520
521 intel_de_rmw(display, regs->driver,
522 0,
523 HSW_PWR_WELL_CTL_REQ(i915_power_well_instance(power_well)->hsw.idx));
524
525 /*
526 * An AUX timeout is expected if the TBT DP tunnel is down,
527 * or need to enable AUX on a legacy TypeC port as part of the TC-cold
528 * exit sequence.
529 */
530 timeout_expected = is_tbt || intel_tc_cold_requires_aux_pw(dig_port);
531 if (DISPLAY_VER(display) == 11 && intel_tc_cold_requires_aux_pw(dig_port))
532 icl_tc_cold_exit(display);
533
534 hsw_wait_for_power_well_enable(display, power_well, timeout_expected);
535
536 if (DISPLAY_VER(display) >= 12 && !is_tbt) {
537 enum tc_port tc_port;
538
539 tc_port = TGL_AUX_PW_TO_TC_PORT(i915_power_well_instance(power_well)->hsw.idx);
540
541 if (wait_for(intel_dkl_phy_read(display, DKL_CMN_UC_DW_27(tc_port)) &
542 DKL_CMN_UC_DW27_UC_HEALTH, 1))
543 drm_warn(display->drm,
544 "Timeout waiting TC uC health\n");
545 }
546 }
547
548 static void
icl_aux_power_well_enable(struct intel_display * display,struct i915_power_well * power_well)549 icl_aux_power_well_enable(struct intel_display *display,
550 struct i915_power_well *power_well)
551 {
552 enum phy phy = icl_aux_pw_to_phy(display, power_well);
553
554 if (intel_phy_is_tc(display, phy))
555 return icl_tc_phy_aux_power_well_enable(display, power_well);
556 else if (display->platform.icelake)
557 return icl_combo_phy_aux_power_well_enable(display,
558 power_well);
559 else
560 return hsw_power_well_enable(display, power_well);
561 }
562
563 static void
icl_aux_power_well_disable(struct intel_display * display,struct i915_power_well * power_well)564 icl_aux_power_well_disable(struct intel_display *display,
565 struct i915_power_well *power_well)
566 {
567 enum phy phy = icl_aux_pw_to_phy(display, power_well);
568
569 if (intel_phy_is_tc(display, phy))
570 return hsw_power_well_disable(display, power_well);
571 else if (display->platform.icelake)
572 return icl_combo_phy_aux_power_well_disable(display,
573 power_well);
574 else
575 return hsw_power_well_disable(display, power_well);
576 }
577
578 /*
579 * We should only use the power well if we explicitly asked the hardware to
580 * enable it, so check if it's enabled and also check if we've requested it to
581 * be enabled.
582 */
hsw_power_well_enabled(struct intel_display * display,struct i915_power_well * power_well)583 static bool hsw_power_well_enabled(struct intel_display *display,
584 struct i915_power_well *power_well)
585 {
586 const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
587 enum i915_power_well_id id = i915_power_well_instance(power_well)->id;
588 int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
589 u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx) |
590 HSW_PWR_WELL_CTL_STATE(pw_idx);
591 u32 val;
592
593 val = intel_de_read(display, regs->driver);
594
595 /*
596 * On GEN9 big core due to a DMC bug the driver's request bits for PW1
597 * and the MISC_IO PW will be not restored, so check instead for the
598 * BIOS's own request bits, which are forced-on for these power wells
599 * when exiting DC5/6.
600 */
601 if (DISPLAY_VER(display) == 9 && !display->platform.broxton &&
602 (id == SKL_DISP_PW_1 || id == SKL_DISP_PW_MISC_IO))
603 val |= intel_de_read(display, regs->bios);
604
605 return (val & mask) == mask;
606 }
607
assert_can_enable_dc9(struct intel_display * display)608 static void assert_can_enable_dc9(struct intel_display *display)
609 {
610 struct drm_i915_private *dev_priv = to_i915(display->drm);
611
612 drm_WARN_ONCE(display->drm,
613 (intel_de_read(display, DC_STATE_EN) & DC_STATE_EN_DC9),
614 "DC9 already programmed to be enabled.\n");
615 drm_WARN_ONCE(display->drm,
616 intel_de_read(display, DC_STATE_EN) &
617 DC_STATE_EN_UPTO_DC5,
618 "DC5 still not disabled to enable DC9.\n");
619 drm_WARN_ONCE(display->drm,
620 intel_de_read(display, HSW_PWR_WELL_CTL2) &
621 HSW_PWR_WELL_CTL_REQ(SKL_PW_CTL_IDX_PW_2),
622 "Power well 2 on.\n");
623 drm_WARN_ONCE(display->drm, intel_irqs_enabled(dev_priv),
624 "Interrupts not disabled yet.\n");
625
626 /*
627 * TODO: check for the following to verify the conditions to enter DC9
628 * state are satisfied:
629 * 1] Check relevant display engine registers to verify if mode set
630 * disable sequence was followed.
631 * 2] Check if display uninitialize sequence is initialized.
632 */
633 }
634
assert_can_disable_dc9(struct intel_display * display)635 static void assert_can_disable_dc9(struct intel_display *display)
636 {
637 struct drm_i915_private *dev_priv = to_i915(display->drm);
638
639 drm_WARN_ONCE(display->drm, intel_irqs_enabled(dev_priv),
640 "Interrupts not disabled yet.\n");
641 drm_WARN_ONCE(display->drm,
642 intel_de_read(display, DC_STATE_EN) &
643 DC_STATE_EN_UPTO_DC5,
644 "DC5 still not disabled.\n");
645
646 /*
647 * TODO: check for the following to verify DC9 state was indeed
648 * entered before programming to disable it:
649 * 1] Check relevant display engine registers to verify if mode
650 * set disable sequence was followed.
651 * 2] Check if display uninitialize sequence is initialized.
652 */
653 }
654
gen9_write_dc_state(struct intel_display * display,u32 state)655 static void gen9_write_dc_state(struct intel_display *display,
656 u32 state)
657 {
658 int rewrites = 0;
659 int rereads = 0;
660 u32 v;
661
662 intel_de_write(display, DC_STATE_EN, state);
663
664 /* It has been observed that disabling the dc6 state sometimes
665 * doesn't stick and dmc keeps returning old value. Make sure
666 * the write really sticks enough times and also force rewrite until
667 * we are confident that state is exactly what we want.
668 */
669 do {
670 v = intel_de_read(display, DC_STATE_EN);
671
672 if (v != state) {
673 intel_de_write(display, DC_STATE_EN, state);
674 rewrites++;
675 rereads = 0;
676 } else if (rereads++ > 5) {
677 break;
678 }
679
680 } while (rewrites < 100);
681
682 if (v != state)
683 drm_err(display->drm,
684 "Writing dc state to 0x%x failed, now 0x%x\n",
685 state, v);
686
687 /* Most of the times we need one retry, avoid spam */
688 if (rewrites > 1)
689 drm_dbg_kms(display->drm,
690 "Rewrote dc state to 0x%x %d times\n",
691 state, rewrites);
692 }
693
gen9_dc_mask(struct intel_display * display)694 static u32 gen9_dc_mask(struct intel_display *display)
695 {
696 u32 mask;
697
698 mask = DC_STATE_EN_UPTO_DC5;
699
700 if (DISPLAY_VER(display) >= 12)
701 mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6
702 | DC_STATE_EN_DC9;
703 else if (DISPLAY_VER(display) == 11)
704 mask |= DC_STATE_EN_UPTO_DC6 | DC_STATE_EN_DC9;
705 else if (display->platform.geminilake || display->platform.broxton)
706 mask |= DC_STATE_EN_DC9;
707 else
708 mask |= DC_STATE_EN_UPTO_DC6;
709
710 return mask;
711 }
712
gen9_sanitize_dc_state(struct intel_display * display)713 void gen9_sanitize_dc_state(struct intel_display *display)
714 {
715 struct i915_power_domains *power_domains = &display->power.domains;
716 u32 val;
717
718 if (!HAS_DISPLAY(display))
719 return;
720
721 val = intel_de_read(display, DC_STATE_EN) & gen9_dc_mask(display);
722
723 drm_dbg_kms(display->drm,
724 "Resetting DC state tracking from %02x to %02x\n",
725 power_domains->dc_state, val);
726 power_domains->dc_state = val;
727 }
728
729 /**
730 * gen9_set_dc_state - set target display C power state
731 * @display: display instance
732 * @state: target DC power state
733 * - DC_STATE_DISABLE
734 * - DC_STATE_EN_UPTO_DC5
735 * - DC_STATE_EN_UPTO_DC6
736 * - DC_STATE_EN_DC9
737 *
738 * Signal to DMC firmware/HW the target DC power state passed in @state.
739 * DMC/HW can turn off individual display clocks and power rails when entering
740 * a deeper DC power state (higher in number) and turns these back when exiting
741 * that state to a shallower power state (lower in number). The HW will decide
742 * when to actually enter a given state on an on-demand basis, for instance
743 * depending on the active state of display pipes. The state of display
744 * registers backed by affected power rails are saved/restored as needed.
745 *
746 * Based on the above enabling a deeper DC power state is asynchronous wrt.
747 * enabling it. Disabling a deeper power state is synchronous: for instance
748 * setting %DC_STATE_DISABLE won't complete until all HW resources are turned
749 * back on and register state is restored. This is guaranteed by the MMIO write
750 * to DC_STATE_EN blocking until the state is restored.
751 */
gen9_set_dc_state(struct intel_display * display,u32 state)752 void gen9_set_dc_state(struct intel_display *display, u32 state)
753 {
754 struct i915_power_domains *power_domains = &display->power.domains;
755 u32 val;
756 u32 mask;
757
758 if (!HAS_DISPLAY(display))
759 return;
760
761 if (drm_WARN_ON_ONCE(display->drm,
762 state & ~power_domains->allowed_dc_mask))
763 state &= power_domains->allowed_dc_mask;
764
765 val = intel_de_read(display, DC_STATE_EN);
766 mask = gen9_dc_mask(display);
767 drm_dbg_kms(display->drm, "Setting DC state from %02x to %02x\n",
768 val & mask, state);
769
770 /* Check if DMC is ignoring our DC state requests */
771 if ((val & mask) != power_domains->dc_state)
772 drm_err(display->drm, "DC state mismatch (0x%x -> 0x%x)\n",
773 power_domains->dc_state, val & mask);
774
775 val &= ~mask;
776 val |= state;
777
778 gen9_write_dc_state(display, val);
779
780 power_domains->dc_state = val & mask;
781 }
782
tgl_enable_dc3co(struct intel_display * display)783 static void tgl_enable_dc3co(struct intel_display *display)
784 {
785 drm_dbg_kms(display->drm, "Enabling DC3CO\n");
786 gen9_set_dc_state(display, DC_STATE_EN_DC3CO);
787 }
788
tgl_disable_dc3co(struct intel_display * display)789 static void tgl_disable_dc3co(struct intel_display *display)
790 {
791 drm_dbg_kms(display->drm, "Disabling DC3CO\n");
792 intel_de_rmw(display, DC_STATE_EN, DC_STATE_DC3CO_STATUS, 0);
793 gen9_set_dc_state(display, DC_STATE_DISABLE);
794 /*
795 * Delay of 200us DC3CO Exit time B.Spec 49196
796 */
797 usleep_range(200, 210);
798 }
799
assert_can_enable_dc5(struct intel_display * display)800 static void assert_can_enable_dc5(struct intel_display *display)
801 {
802 struct drm_i915_private __maybe_unused *dev_priv = to_i915(display->drm);
803 enum i915_power_well_id high_pg;
804
805 /* Power wells at this level and above must be disabled for DC5 entry */
806 if (DISPLAY_VER(display) == 12)
807 high_pg = ICL_DISP_PW_3;
808 else
809 high_pg = SKL_DISP_PW_2;
810
811 drm_WARN_ONCE(display->drm,
812 intel_display_power_well_is_enabled(display, high_pg),
813 "Power wells above platform's DC5 limit still enabled.\n");
814
815 drm_WARN_ONCE(display->drm,
816 (intel_de_read(display, DC_STATE_EN) &
817 DC_STATE_EN_UPTO_DC5),
818 "DC5 already programmed to be enabled.\n");
819 assert_rpm_wakelock_held(&dev_priv->runtime_pm);
820
821 assert_dmc_loaded(display);
822 }
823
gen9_enable_dc5(struct intel_display * display)824 void gen9_enable_dc5(struct intel_display *display)
825 {
826 assert_can_enable_dc5(display);
827
828 drm_dbg_kms(display->drm, "Enabling DC5\n");
829
830 /* Wa Display #1183: skl,kbl,cfl */
831 if (DISPLAY_VER(display) == 9 && !display->platform.broxton)
832 intel_de_rmw(display, GEN8_CHICKEN_DCPR_1,
833 0, SKL_SELECT_ALTERNATE_DC_EXIT);
834
835 intel_dmc_wl_enable(display, DC_STATE_EN_UPTO_DC5);
836
837 gen9_set_dc_state(display, DC_STATE_EN_UPTO_DC5);
838 }
839
assert_can_enable_dc6(struct intel_display * display)840 static void assert_can_enable_dc6(struct intel_display *display)
841 {
842 drm_WARN_ONCE(display->drm,
843 (intel_de_read(display, UTIL_PIN_CTL) &
844 (UTIL_PIN_ENABLE | UTIL_PIN_MODE_MASK)) ==
845 (UTIL_PIN_ENABLE | UTIL_PIN_MODE_PWM),
846 "Utility pin enabled in PWM mode\n");
847 drm_WARN_ONCE(display->drm,
848 (intel_de_read(display, DC_STATE_EN) &
849 DC_STATE_EN_UPTO_DC6),
850 "DC6 already programmed to be enabled.\n");
851
852 assert_dmc_loaded(display);
853 }
854
skl_enable_dc6(struct intel_display * display)855 void skl_enable_dc6(struct intel_display *display)
856 {
857 assert_can_enable_dc6(display);
858
859 drm_dbg_kms(display->drm, "Enabling DC6\n");
860
861 /* Wa Display #1183: skl,kbl,cfl */
862 if (DISPLAY_VER(display) == 9 && !display->platform.broxton)
863 intel_de_rmw(display, GEN8_CHICKEN_DCPR_1,
864 0, SKL_SELECT_ALTERNATE_DC_EXIT);
865
866 intel_dmc_wl_enable(display, DC_STATE_EN_UPTO_DC6);
867
868 gen9_set_dc_state(display, DC_STATE_EN_UPTO_DC6);
869 }
870
bxt_enable_dc9(struct intel_display * display)871 void bxt_enable_dc9(struct intel_display *display)
872 {
873 assert_can_enable_dc9(display);
874
875 drm_dbg_kms(display->drm, "Enabling DC9\n");
876 /*
877 * Power sequencer reset is needed on BXT/GLK, because the PPS registers
878 * aren't always on, unlike with South Display Engine on PCH.
879 */
880 if (display->platform.broxton || display->platform.geminilake)
881 bxt_pps_reset_all(display);
882 gen9_set_dc_state(display, DC_STATE_EN_DC9);
883 }
884
bxt_disable_dc9(struct intel_display * display)885 void bxt_disable_dc9(struct intel_display *display)
886 {
887 assert_can_disable_dc9(display);
888
889 drm_dbg_kms(display->drm, "Disabling DC9\n");
890
891 gen9_set_dc_state(display, DC_STATE_DISABLE);
892
893 intel_pps_unlock_regs_wa(display);
894 }
895
hsw_power_well_sync_hw(struct intel_display * display,struct i915_power_well * power_well)896 static void hsw_power_well_sync_hw(struct intel_display *display,
897 struct i915_power_well *power_well)
898 {
899 const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
900 int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
901 u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
902 u32 bios_req = intel_de_read(display, regs->bios);
903
904 /* Take over the request bit if set by BIOS. */
905 if (bios_req & mask) {
906 u32 drv_req = intel_de_read(display, regs->driver);
907
908 if (!(drv_req & mask))
909 intel_de_write(display, regs->driver, drv_req | mask);
910 intel_de_write(display, regs->bios, bios_req & ~mask);
911 }
912 }
913
bxt_dpio_cmn_power_well_enable(struct intel_display * display,struct i915_power_well * power_well)914 static void bxt_dpio_cmn_power_well_enable(struct intel_display *display,
915 struct i915_power_well *power_well)
916 {
917 bxt_dpio_phy_init(display, i915_power_well_instance(power_well)->bxt.phy);
918 }
919
bxt_dpio_cmn_power_well_disable(struct intel_display * display,struct i915_power_well * power_well)920 static void bxt_dpio_cmn_power_well_disable(struct intel_display *display,
921 struct i915_power_well *power_well)
922 {
923 bxt_dpio_phy_uninit(display, i915_power_well_instance(power_well)->bxt.phy);
924 }
925
bxt_dpio_cmn_power_well_enabled(struct intel_display * display,struct i915_power_well * power_well)926 static bool bxt_dpio_cmn_power_well_enabled(struct intel_display *display,
927 struct i915_power_well *power_well)
928 {
929 return bxt_dpio_phy_is_enabled(display, i915_power_well_instance(power_well)->bxt.phy);
930 }
931
bxt_verify_dpio_phy_power_wells(struct intel_display * display)932 static void bxt_verify_dpio_phy_power_wells(struct intel_display *display)
933 {
934 struct i915_power_well *power_well;
935
936 power_well = lookup_power_well(display, BXT_DISP_PW_DPIO_CMN_A);
937 if (intel_power_well_refcount(power_well) > 0)
938 bxt_dpio_phy_verify_state(display, i915_power_well_instance(power_well)->bxt.phy);
939
940 power_well = lookup_power_well(display, VLV_DISP_PW_DPIO_CMN_BC);
941 if (intel_power_well_refcount(power_well) > 0)
942 bxt_dpio_phy_verify_state(display, i915_power_well_instance(power_well)->bxt.phy);
943
944 if (display->platform.geminilake) {
945 power_well = lookup_power_well(display,
946 GLK_DISP_PW_DPIO_CMN_C);
947 if (intel_power_well_refcount(power_well) > 0)
948 bxt_dpio_phy_verify_state(display,
949 i915_power_well_instance(power_well)->bxt.phy);
950 }
951 }
952
gen9_dc_off_power_well_enabled(struct intel_display * display,struct i915_power_well * power_well)953 static bool gen9_dc_off_power_well_enabled(struct intel_display *display,
954 struct i915_power_well *power_well)
955 {
956 return ((intel_de_read(display, DC_STATE_EN) & DC_STATE_EN_DC3CO) == 0 &&
957 (intel_de_read(display, DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0);
958 }
959
gen9_assert_dbuf_enabled(struct intel_display * display)960 static void gen9_assert_dbuf_enabled(struct intel_display *display)
961 {
962 u8 hw_enabled_dbuf_slices = intel_enabled_dbuf_slices_mask(display);
963 u8 enabled_dbuf_slices = display->dbuf.enabled_slices;
964
965 drm_WARN(display->drm,
966 hw_enabled_dbuf_slices != enabled_dbuf_slices,
967 "Unexpected DBuf power power state (0x%08x, expected 0x%08x)\n",
968 hw_enabled_dbuf_slices,
969 enabled_dbuf_slices);
970 }
971
gen9_disable_dc_states(struct intel_display * display)972 void gen9_disable_dc_states(struct intel_display *display)
973 {
974 struct i915_power_domains *power_domains = &display->power.domains;
975 struct intel_cdclk_config cdclk_config = {};
976 u32 old_state = power_domains->dc_state;
977
978 if (power_domains->target_dc_state == DC_STATE_EN_DC3CO) {
979 tgl_disable_dc3co(display);
980 return;
981 }
982
983 if (HAS_DISPLAY(display)) {
984 intel_dmc_wl_get_noreg(display);
985 gen9_set_dc_state(display, DC_STATE_DISABLE);
986 intel_dmc_wl_put_noreg(display);
987 } else {
988 gen9_set_dc_state(display, DC_STATE_DISABLE);
989 return;
990 }
991
992 if (old_state == DC_STATE_EN_UPTO_DC5 ||
993 old_state == DC_STATE_EN_UPTO_DC6)
994 intel_dmc_wl_disable(display);
995
996 intel_cdclk_get_cdclk(display, &cdclk_config);
997 /* Can't read out voltage_level so can't use intel_cdclk_changed() */
998 drm_WARN_ON(display->drm,
999 intel_cdclk_clock_changed(&display->cdclk.hw,
1000 &cdclk_config));
1001
1002 gen9_assert_dbuf_enabled(display);
1003
1004 if (display->platform.geminilake || display->platform.broxton)
1005 bxt_verify_dpio_phy_power_wells(display);
1006
1007 if (DISPLAY_VER(display) >= 11)
1008 /*
1009 * DMC retains HW context only for port A, the other combo
1010 * PHY's HW context for port B is lost after DC transitions,
1011 * so we need to restore it manually.
1012 */
1013 intel_combo_phy_init(display);
1014 }
1015
gen9_dc_off_power_well_enable(struct intel_display * display,struct i915_power_well * power_well)1016 static void gen9_dc_off_power_well_enable(struct intel_display *display,
1017 struct i915_power_well *power_well)
1018 {
1019 gen9_disable_dc_states(display);
1020 }
1021
gen9_dc_off_power_well_disable(struct intel_display * display,struct i915_power_well * power_well)1022 static void gen9_dc_off_power_well_disable(struct intel_display *display,
1023 struct i915_power_well *power_well)
1024 {
1025 struct i915_power_domains *power_domains = &display->power.domains;
1026
1027 if (!intel_dmc_has_payload(display))
1028 return;
1029
1030 switch (power_domains->target_dc_state) {
1031 case DC_STATE_EN_DC3CO:
1032 tgl_enable_dc3co(display);
1033 break;
1034 case DC_STATE_EN_UPTO_DC6:
1035 skl_enable_dc6(display);
1036 break;
1037 case DC_STATE_EN_UPTO_DC5:
1038 gen9_enable_dc5(display);
1039 break;
1040 }
1041 }
1042
i9xx_power_well_sync_hw_noop(struct intel_display * display,struct i915_power_well * power_well)1043 static void i9xx_power_well_sync_hw_noop(struct intel_display *display,
1044 struct i915_power_well *power_well)
1045 {
1046 }
1047
i9xx_always_on_power_well_noop(struct intel_display * display,struct i915_power_well * power_well)1048 static void i9xx_always_on_power_well_noop(struct intel_display *display,
1049 struct i915_power_well *power_well)
1050 {
1051 }
1052
i9xx_always_on_power_well_enabled(struct intel_display * display,struct i915_power_well * power_well)1053 static bool i9xx_always_on_power_well_enabled(struct intel_display *display,
1054 struct i915_power_well *power_well)
1055 {
1056 return true;
1057 }
1058
i830_pipes_power_well_enable(struct intel_display * display,struct i915_power_well * power_well)1059 static void i830_pipes_power_well_enable(struct intel_display *display,
1060 struct i915_power_well *power_well)
1061 {
1062 if ((intel_de_read(display, TRANSCONF(display, PIPE_A)) & TRANSCONF_ENABLE) == 0)
1063 i830_enable_pipe(display, PIPE_A);
1064 if ((intel_de_read(display, TRANSCONF(display, PIPE_B)) & TRANSCONF_ENABLE) == 0)
1065 i830_enable_pipe(display, PIPE_B);
1066 }
1067
i830_pipes_power_well_disable(struct intel_display * display,struct i915_power_well * power_well)1068 static void i830_pipes_power_well_disable(struct intel_display *display,
1069 struct i915_power_well *power_well)
1070 {
1071 i830_disable_pipe(display, PIPE_B);
1072 i830_disable_pipe(display, PIPE_A);
1073 }
1074
i830_pipes_power_well_enabled(struct intel_display * display,struct i915_power_well * power_well)1075 static bool i830_pipes_power_well_enabled(struct intel_display *display,
1076 struct i915_power_well *power_well)
1077 {
1078 return intel_de_read(display, TRANSCONF(display, PIPE_A)) & TRANSCONF_ENABLE &&
1079 intel_de_read(display, TRANSCONF(display, PIPE_B)) & TRANSCONF_ENABLE;
1080 }
1081
i830_pipes_power_well_sync_hw(struct intel_display * display,struct i915_power_well * power_well)1082 static void i830_pipes_power_well_sync_hw(struct intel_display *display,
1083 struct i915_power_well *power_well)
1084 {
1085 if (intel_power_well_refcount(power_well) > 0)
1086 i830_pipes_power_well_enable(display, power_well);
1087 else
1088 i830_pipes_power_well_disable(display, power_well);
1089 }
1090
vlv_set_power_well(struct intel_display * display,struct i915_power_well * power_well,bool enable)1091 static void vlv_set_power_well(struct intel_display *display,
1092 struct i915_power_well *power_well, bool enable)
1093 {
1094 struct drm_i915_private *dev_priv = to_i915(display->drm);
1095 int pw_idx = i915_power_well_instance(power_well)->vlv.idx;
1096 u32 mask;
1097 u32 state;
1098 u32 ctrl;
1099
1100 mask = PUNIT_PWRGT_MASK(pw_idx);
1101 state = enable ? PUNIT_PWRGT_PWR_ON(pw_idx) :
1102 PUNIT_PWRGT_PWR_GATE(pw_idx);
1103
1104 vlv_punit_get(dev_priv);
1105
1106 #define COND \
1107 ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
1108
1109 if (COND)
1110 goto out;
1111
1112 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
1113 ctrl &= ~mask;
1114 ctrl |= state;
1115 vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
1116
1117 if (wait_for(COND, 100))
1118 drm_err(display->drm,
1119 "timeout setting power well state %08x (%08x)\n",
1120 state,
1121 vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
1122
1123 #undef COND
1124
1125 out:
1126 vlv_punit_put(dev_priv);
1127 }
1128
vlv_power_well_enable(struct intel_display * display,struct i915_power_well * power_well)1129 static void vlv_power_well_enable(struct intel_display *display,
1130 struct i915_power_well *power_well)
1131 {
1132 vlv_set_power_well(display, power_well, true);
1133 }
1134
vlv_power_well_disable(struct intel_display * display,struct i915_power_well * power_well)1135 static void vlv_power_well_disable(struct intel_display *display,
1136 struct i915_power_well *power_well)
1137 {
1138 vlv_set_power_well(display, power_well, false);
1139 }
1140
vlv_power_well_enabled(struct intel_display * display,struct i915_power_well * power_well)1141 static bool vlv_power_well_enabled(struct intel_display *display,
1142 struct i915_power_well *power_well)
1143 {
1144 struct drm_i915_private *dev_priv = to_i915(display->drm);
1145 int pw_idx = i915_power_well_instance(power_well)->vlv.idx;
1146 bool enabled = false;
1147 u32 mask;
1148 u32 state;
1149 u32 ctrl;
1150
1151 mask = PUNIT_PWRGT_MASK(pw_idx);
1152 ctrl = PUNIT_PWRGT_PWR_ON(pw_idx);
1153
1154 vlv_punit_get(dev_priv);
1155
1156 state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
1157 /*
1158 * We only ever set the power-on and power-gate states, anything
1159 * else is unexpected.
1160 */
1161 drm_WARN_ON(display->drm, state != PUNIT_PWRGT_PWR_ON(pw_idx) &&
1162 state != PUNIT_PWRGT_PWR_GATE(pw_idx));
1163 if (state == ctrl)
1164 enabled = true;
1165
1166 /*
1167 * A transient state at this point would mean some unexpected party
1168 * is poking at the power controls too.
1169 */
1170 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
1171 drm_WARN_ON(display->drm, ctrl != state);
1172
1173 vlv_punit_put(dev_priv);
1174
1175 return enabled;
1176 }
1177
vlv_init_display_clock_gating(struct intel_display * display)1178 static void vlv_init_display_clock_gating(struct intel_display *display)
1179 {
1180 /*
1181 * On driver load, a pipe may be active and driving a DSI display.
1182 * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck
1183 * (and never recovering) in this case. intel_dsi_post_disable() will
1184 * clear it when we turn off the display.
1185 */
1186 intel_de_rmw(display, DSPCLK_GATE_D(display),
1187 ~DPOUNIT_CLOCK_GATE_DISABLE, VRHUNIT_CLOCK_GATE_DISABLE);
1188
1189 /*
1190 * Disable trickle feed and enable pnd deadline calculation
1191 */
1192 intel_de_write(display, MI_ARB_VLV,
1193 MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
1194 intel_de_write(display, CBR1_VLV, 0);
1195
1196 drm_WARN_ON(display->drm, DISPLAY_RUNTIME_INFO(display)->rawclk_freq == 0);
1197 intel_de_write(display, RAWCLK_FREQ_VLV,
1198 DIV_ROUND_CLOSEST(DISPLAY_RUNTIME_INFO(display)->rawclk_freq,
1199 1000));
1200 }
1201
vlv_display_power_well_init(struct intel_display * display)1202 static void vlv_display_power_well_init(struct intel_display *display)
1203 {
1204 struct drm_i915_private *dev_priv = to_i915(display->drm);
1205 struct intel_encoder *encoder;
1206 enum pipe pipe;
1207
1208 /*
1209 * Enable the CRI clock source so we can get at the
1210 * display and the reference clock for VGA
1211 * hotplug / manual detection. Supposedly DSI also
1212 * needs the ref clock up and running.
1213 *
1214 * CHV DPLL B/C have some issues if VGA mode is enabled.
1215 */
1216 for_each_pipe(display, pipe) {
1217 u32 val = intel_de_read(display, DPLL(display, pipe));
1218
1219 val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1220 if (pipe != PIPE_A)
1221 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1222
1223 intel_de_write(display, DPLL(display, pipe), val);
1224 }
1225
1226 vlv_init_display_clock_gating(display);
1227
1228 spin_lock_irq(&dev_priv->irq_lock);
1229 valleyview_enable_display_irqs(dev_priv);
1230 spin_unlock_irq(&dev_priv->irq_lock);
1231
1232 /*
1233 * During driver initialization/resume we can avoid restoring the
1234 * part of the HW/SW state that will be inited anyway explicitly.
1235 */
1236 if (display->power.domains.initializing)
1237 return;
1238
1239 intel_hpd_init(dev_priv);
1240 intel_hpd_poll_disable(dev_priv);
1241
1242 /* Re-enable the ADPA, if we have one */
1243 for_each_intel_encoder(display->drm, encoder) {
1244 if (encoder->type == INTEL_OUTPUT_ANALOG)
1245 intel_crt_reset(&encoder->base);
1246 }
1247
1248 intel_vga_redisable_power_on(display);
1249
1250 intel_pps_unlock_regs_wa(display);
1251 }
1252
vlv_display_power_well_deinit(struct intel_display * display)1253 static void vlv_display_power_well_deinit(struct intel_display *display)
1254 {
1255 struct drm_i915_private *dev_priv = to_i915(display->drm);
1256
1257 spin_lock_irq(&dev_priv->irq_lock);
1258 valleyview_disable_display_irqs(dev_priv);
1259 spin_unlock_irq(&dev_priv->irq_lock);
1260
1261 /* make sure we're done processing display irqs */
1262 intel_synchronize_irq(dev_priv);
1263
1264 vlv_pps_reset_all(display);
1265
1266 /* Prevent us from re-enabling polling on accident in late suspend */
1267 if (!display->drm->dev->power.is_suspended)
1268 intel_hpd_poll_enable(dev_priv);
1269 }
1270
vlv_display_power_well_enable(struct intel_display * display,struct i915_power_well * power_well)1271 static void vlv_display_power_well_enable(struct intel_display *display,
1272 struct i915_power_well *power_well)
1273 {
1274 vlv_set_power_well(display, power_well, true);
1275
1276 vlv_display_power_well_init(display);
1277 }
1278
vlv_display_power_well_disable(struct intel_display * display,struct i915_power_well * power_well)1279 static void vlv_display_power_well_disable(struct intel_display *display,
1280 struct i915_power_well *power_well)
1281 {
1282 vlv_display_power_well_deinit(display);
1283
1284 vlv_set_power_well(display, power_well, false);
1285 }
1286
vlv_dpio_cmn_power_well_enable(struct intel_display * display,struct i915_power_well * power_well)1287 static void vlv_dpio_cmn_power_well_enable(struct intel_display *display,
1288 struct i915_power_well *power_well)
1289 {
1290 /* since ref/cri clock was enabled */
1291 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1292
1293 vlv_set_power_well(display, power_well, true);
1294
1295 /*
1296 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
1297 * 6. De-assert cmn_reset/side_reset. Same as VLV X0.
1298 * a. GUnit 0x2110 bit[0] set to 1 (def 0)
1299 * b. The other bits such as sfr settings / modesel may all
1300 * be set to 0.
1301 *
1302 * This should only be done on init and resume from S3 with
1303 * both PLLs disabled, or we risk losing DPIO and PLL
1304 * synchronization.
1305 */
1306 intel_de_rmw(display, DPIO_CTL, 0, DPIO_CMNRST);
1307 }
1308
vlv_dpio_cmn_power_well_disable(struct intel_display * display,struct i915_power_well * power_well)1309 static void vlv_dpio_cmn_power_well_disable(struct intel_display *display,
1310 struct i915_power_well *power_well)
1311 {
1312 enum pipe pipe;
1313
1314 for_each_pipe(display, pipe)
1315 assert_pll_disabled(display, pipe);
1316
1317 /* Assert common reset */
1318 intel_de_rmw(display, DPIO_CTL, DPIO_CMNRST, 0);
1319
1320 vlv_set_power_well(display, power_well, false);
1321 }
1322
1323 #define BITS_SET(val, bits) (((val) & (bits)) == (bits))
1324
assert_chv_phy_status(struct intel_display * display)1325 static void assert_chv_phy_status(struct intel_display *display)
1326 {
1327 struct i915_power_well *cmn_bc =
1328 lookup_power_well(display, VLV_DISP_PW_DPIO_CMN_BC);
1329 struct i915_power_well *cmn_d =
1330 lookup_power_well(display, CHV_DISP_PW_DPIO_CMN_D);
1331 u32 phy_control = display->power.chv_phy_control;
1332 u32 phy_status = 0;
1333 u32 phy_status_mask = 0xffffffff;
1334
1335 /*
1336 * The BIOS can leave the PHY is some weird state
1337 * where it doesn't fully power down some parts.
1338 * Disable the asserts until the PHY has been fully
1339 * reset (ie. the power well has been disabled at
1340 * least once).
1341 */
1342 if (!display->power.chv_phy_assert[DPIO_PHY0])
1343 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) |
1344 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) |
1345 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) |
1346 PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) |
1347 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) |
1348 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1));
1349
1350 if (!display->power.chv_phy_assert[DPIO_PHY1])
1351 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) |
1352 PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) |
1353 PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1));
1354
1355 if (intel_power_well_is_enabled(display, cmn_bc)) {
1356 phy_status |= PHY_POWERGOOD(DPIO_PHY0);
1357
1358 /* this assumes override is only used to enable lanes */
1359 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0)
1360 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0);
1361
1362 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0)
1363 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1);
1364
1365 /* CL1 is on whenever anything is on in either channel */
1366 if (BITS_SET(phy_control,
1367 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) |
1368 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)))
1369 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0);
1370
1371 /*
1372 * The DPLLB check accounts for the pipe B + port A usage
1373 * with CL2 powered up but all the lanes in the second channel
1374 * powered down.
1375 */
1376 if (BITS_SET(phy_control,
1377 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) &&
1378 (intel_de_read(display, DPLL(display, PIPE_B)) & DPLL_VCO_ENABLE) == 0)
1379 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1);
1380
1381 if (BITS_SET(phy_control,
1382 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0)))
1383 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0);
1384 if (BITS_SET(phy_control,
1385 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0)))
1386 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1);
1387
1388 if (BITS_SET(phy_control,
1389 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1)))
1390 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0);
1391 if (BITS_SET(phy_control,
1392 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1)))
1393 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1);
1394 }
1395
1396 if (intel_power_well_is_enabled(display, cmn_d)) {
1397 phy_status |= PHY_POWERGOOD(DPIO_PHY1);
1398
1399 /* this assumes override is only used to enable lanes */
1400 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0)
1401 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0);
1402
1403 if (BITS_SET(phy_control,
1404 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0)))
1405 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0);
1406
1407 if (BITS_SET(phy_control,
1408 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0)))
1409 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0);
1410 if (BITS_SET(phy_control,
1411 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0)))
1412 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1);
1413 }
1414
1415 phy_status &= phy_status_mask;
1416
1417 /*
1418 * The PHY may be busy with some initial calibration and whatnot,
1419 * so the power state can take a while to actually change.
1420 */
1421 if (intel_de_wait(display, DISPLAY_PHY_STATUS,
1422 phy_status_mask, phy_status, 10))
1423 drm_err(display->drm,
1424 "Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
1425 intel_de_read(display, DISPLAY_PHY_STATUS) & phy_status_mask,
1426 phy_status, display->power.chv_phy_control);
1427 }
1428
1429 #undef BITS_SET
1430
chv_dpio_cmn_power_well_enable(struct intel_display * display,struct i915_power_well * power_well)1431 static void chv_dpio_cmn_power_well_enable(struct intel_display *display,
1432 struct i915_power_well *power_well)
1433 {
1434 struct drm_i915_private *dev_priv = to_i915(display->drm);
1435 enum i915_power_well_id id = i915_power_well_instance(power_well)->id;
1436 enum dpio_phy phy;
1437 u32 tmp;
1438
1439 drm_WARN_ON_ONCE(display->drm,
1440 id != VLV_DISP_PW_DPIO_CMN_BC &&
1441 id != CHV_DISP_PW_DPIO_CMN_D);
1442
1443 if (id == VLV_DISP_PW_DPIO_CMN_BC)
1444 phy = DPIO_PHY0;
1445 else
1446 phy = DPIO_PHY1;
1447
1448 /* since ref/cri clock was enabled */
1449 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1450 vlv_set_power_well(display, power_well, true);
1451
1452 /* Poll for phypwrgood signal */
1453 if (intel_de_wait_for_set(display, DISPLAY_PHY_STATUS,
1454 PHY_POWERGOOD(phy), 1))
1455 drm_err(display->drm, "Display PHY %d is not power up\n",
1456 phy);
1457
1458 vlv_dpio_get(dev_priv);
1459
1460 /* Enable dynamic power down */
1461 tmp = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW28);
1462 tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN |
1463 DPIO_SUS_CLK_CONFIG_GATE_CLKREQ;
1464 vlv_dpio_write(dev_priv, phy, CHV_CMN_DW28, tmp);
1465
1466 if (id == VLV_DISP_PW_DPIO_CMN_BC) {
1467 tmp = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW6_CH1);
1468 tmp |= DPIO_DYNPWRDOWNEN_CH1;
1469 vlv_dpio_write(dev_priv, phy, CHV_CMN_DW6_CH1, tmp);
1470 } else {
1471 /*
1472 * Force the non-existing CL2 off. BXT does this
1473 * too, so maybe it saves some power even though
1474 * CL2 doesn't exist?
1475 */
1476 tmp = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW30);
1477 tmp |= DPIO_CL2_LDOFUSE_PWRENB;
1478 vlv_dpio_write(dev_priv, phy, CHV_CMN_DW30, tmp);
1479 }
1480
1481 vlv_dpio_put(dev_priv);
1482
1483 display->power.chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
1484 intel_de_write(display, DISPLAY_PHY_CONTROL,
1485 display->power.chv_phy_control);
1486
1487 drm_dbg_kms(display->drm,
1488 "Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1489 phy, display->power.chv_phy_control);
1490
1491 assert_chv_phy_status(display);
1492 }
1493
chv_dpio_cmn_power_well_disable(struct intel_display * display,struct i915_power_well * power_well)1494 static void chv_dpio_cmn_power_well_disable(struct intel_display *display,
1495 struct i915_power_well *power_well)
1496 {
1497 enum i915_power_well_id id = i915_power_well_instance(power_well)->id;
1498 enum dpio_phy phy;
1499
1500 drm_WARN_ON_ONCE(display->drm,
1501 id != VLV_DISP_PW_DPIO_CMN_BC &&
1502 id != CHV_DISP_PW_DPIO_CMN_D);
1503
1504 if (id == VLV_DISP_PW_DPIO_CMN_BC) {
1505 phy = DPIO_PHY0;
1506 assert_pll_disabled(display, PIPE_A);
1507 assert_pll_disabled(display, PIPE_B);
1508 } else {
1509 phy = DPIO_PHY1;
1510 assert_pll_disabled(display, PIPE_C);
1511 }
1512
1513 display->power.chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy);
1514 intel_de_write(display, DISPLAY_PHY_CONTROL,
1515 display->power.chv_phy_control);
1516
1517 vlv_set_power_well(display, power_well, false);
1518
1519 drm_dbg_kms(display->drm,
1520 "Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1521 phy, display->power.chv_phy_control);
1522
1523 /* PHY is fully reset now, so we can enable the PHY state asserts */
1524 display->power.chv_phy_assert[phy] = true;
1525
1526 assert_chv_phy_status(display);
1527 }
1528
assert_chv_phy_powergate(struct intel_display * display,enum dpio_phy phy,enum dpio_channel ch,bool override,unsigned int mask)1529 static void assert_chv_phy_powergate(struct intel_display *display, enum dpio_phy phy,
1530 enum dpio_channel ch, bool override, unsigned int mask)
1531 {
1532 struct drm_i915_private *dev_priv = to_i915(display->drm);
1533 u32 reg, val, expected, actual;
1534
1535 /*
1536 * The BIOS can leave the PHY is some weird state
1537 * where it doesn't fully power down some parts.
1538 * Disable the asserts until the PHY has been fully
1539 * reset (ie. the power well has been disabled at
1540 * least once).
1541 */
1542 if (!display->power.chv_phy_assert[phy])
1543 return;
1544
1545 if (ch == DPIO_CH0)
1546 reg = CHV_CMN_DW0_CH0;
1547 else
1548 reg = CHV_CMN_DW6_CH1;
1549
1550 vlv_dpio_get(dev_priv);
1551 val = vlv_dpio_read(dev_priv, phy, reg);
1552 vlv_dpio_put(dev_priv);
1553
1554 /*
1555 * This assumes !override is only used when the port is disabled.
1556 * All lanes should power down even without the override when
1557 * the port is disabled.
1558 */
1559 if (!override || mask == 0xf) {
1560 expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1561 /*
1562 * If CH1 common lane is not active anymore
1563 * (eg. for pipe B DPLL) the entire channel will
1564 * shut down, which causes the common lane registers
1565 * to read as 0. That means we can't actually check
1566 * the lane power down status bits, but as the entire
1567 * register reads as 0 it's a good indication that the
1568 * channel is indeed entirely powered down.
1569 */
1570 if (ch == DPIO_CH1 && val == 0)
1571 expected = 0;
1572 } else if (mask != 0x0) {
1573 expected = DPIO_ANYDL_POWERDOWN;
1574 } else {
1575 expected = 0;
1576 }
1577
1578 if (ch == DPIO_CH0)
1579 actual = REG_FIELD_GET(DPIO_ANYDL_POWERDOWN_CH0 |
1580 DPIO_ALLDL_POWERDOWN_CH0, val);
1581 else
1582 actual = REG_FIELD_GET(DPIO_ANYDL_POWERDOWN_CH1 |
1583 DPIO_ALLDL_POWERDOWN_CH1, val);
1584
1585 drm_WARN(display->drm, actual != expected,
1586 "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n",
1587 !!(actual & DPIO_ALLDL_POWERDOWN),
1588 !!(actual & DPIO_ANYDL_POWERDOWN),
1589 !!(expected & DPIO_ALLDL_POWERDOWN),
1590 !!(expected & DPIO_ANYDL_POWERDOWN),
1591 reg, val);
1592 }
1593
chv_phy_powergate_ch(struct intel_display * display,enum dpio_phy phy,enum dpio_channel ch,bool override)1594 bool chv_phy_powergate_ch(struct intel_display *display, enum dpio_phy phy,
1595 enum dpio_channel ch, bool override)
1596 {
1597 struct i915_power_domains *power_domains = &display->power.domains;
1598 bool was_override;
1599
1600 mutex_lock(&power_domains->lock);
1601
1602 was_override = display->power.chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1603
1604 if (override == was_override)
1605 goto out;
1606
1607 if (override)
1608 display->power.chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1609 else
1610 display->power.chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1611
1612 intel_de_write(display, DISPLAY_PHY_CONTROL,
1613 display->power.chv_phy_control);
1614
1615 drm_dbg_kms(display->drm,
1616 "Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n",
1617 phy, ch, display->power.chv_phy_control);
1618
1619 assert_chv_phy_status(display);
1620
1621 out:
1622 mutex_unlock(&power_domains->lock);
1623
1624 return was_override;
1625 }
1626
chv_phy_powergate_lanes(struct intel_encoder * encoder,bool override,unsigned int mask)1627 void chv_phy_powergate_lanes(struct intel_encoder *encoder,
1628 bool override, unsigned int mask)
1629 {
1630 struct intel_display *display = to_intel_display(encoder);
1631 struct i915_power_domains *power_domains = &display->power.domains;
1632 enum dpio_phy phy = vlv_dig_port_to_phy(enc_to_dig_port(encoder));
1633 enum dpio_channel ch = vlv_dig_port_to_channel(enc_to_dig_port(encoder));
1634
1635 mutex_lock(&power_domains->lock);
1636
1637 display->power.chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch);
1638 display->power.chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch);
1639
1640 if (override)
1641 display->power.chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1642 else
1643 display->power.chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1644
1645 intel_de_write(display, DISPLAY_PHY_CONTROL,
1646 display->power.chv_phy_control);
1647
1648 drm_dbg_kms(display->drm,
1649 "Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n",
1650 phy, ch, mask, display->power.chv_phy_control);
1651
1652 assert_chv_phy_status(display);
1653
1654 assert_chv_phy_powergate(display, phy, ch, override, mask);
1655
1656 mutex_unlock(&power_domains->lock);
1657 }
1658
chv_pipe_power_well_enabled(struct intel_display * display,struct i915_power_well * power_well)1659 static bool chv_pipe_power_well_enabled(struct intel_display *display,
1660 struct i915_power_well *power_well)
1661 {
1662 struct drm_i915_private *dev_priv = to_i915(display->drm);
1663 enum pipe pipe = PIPE_A;
1664 bool enabled;
1665 u32 state, ctrl;
1666
1667 vlv_punit_get(dev_priv);
1668
1669 state = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe);
1670 /*
1671 * We only ever set the power-on and power-gate states, anything
1672 * else is unexpected.
1673 */
1674 drm_WARN_ON(display->drm, state != DP_SSS_PWR_ON(pipe) &&
1675 state != DP_SSS_PWR_GATE(pipe));
1676 enabled = state == DP_SSS_PWR_ON(pipe);
1677
1678 /*
1679 * A transient state at this point would mean some unexpected party
1680 * is poking at the power controls too.
1681 */
1682 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSC_MASK(pipe);
1683 drm_WARN_ON(display->drm, ctrl << 16 != state);
1684
1685 vlv_punit_put(dev_priv);
1686
1687 return enabled;
1688 }
1689
chv_set_pipe_power_well(struct intel_display * display,struct i915_power_well * power_well,bool enable)1690 static void chv_set_pipe_power_well(struct intel_display *display,
1691 struct i915_power_well *power_well,
1692 bool enable)
1693 {
1694 struct drm_i915_private *dev_priv = to_i915(display->drm);
1695 enum pipe pipe = PIPE_A;
1696 u32 state;
1697 u32 ctrl;
1698
1699 state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
1700
1701 vlv_punit_get(dev_priv);
1702
1703 #define COND \
1704 ((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe)) == state)
1705
1706 if (COND)
1707 goto out;
1708
1709 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
1710 ctrl &= ~DP_SSC_MASK(pipe);
1711 ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
1712 vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, ctrl);
1713
1714 if (wait_for(COND, 100))
1715 drm_err(display->drm,
1716 "timeout setting power well state %08x (%08x)\n",
1717 state,
1718 vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM));
1719
1720 #undef COND
1721
1722 out:
1723 vlv_punit_put(dev_priv);
1724 }
1725
chv_pipe_power_well_sync_hw(struct intel_display * display,struct i915_power_well * power_well)1726 static void chv_pipe_power_well_sync_hw(struct intel_display *display,
1727 struct i915_power_well *power_well)
1728 {
1729 intel_de_write(display, DISPLAY_PHY_CONTROL,
1730 display->power.chv_phy_control);
1731 }
1732
chv_pipe_power_well_enable(struct intel_display * display,struct i915_power_well * power_well)1733 static void chv_pipe_power_well_enable(struct intel_display *display,
1734 struct i915_power_well *power_well)
1735 {
1736 chv_set_pipe_power_well(display, power_well, true);
1737
1738 vlv_display_power_well_init(display);
1739 }
1740
chv_pipe_power_well_disable(struct intel_display * display,struct i915_power_well * power_well)1741 static void chv_pipe_power_well_disable(struct intel_display *display,
1742 struct i915_power_well *power_well)
1743 {
1744 vlv_display_power_well_deinit(display);
1745
1746 chv_set_pipe_power_well(display, power_well, false);
1747 }
1748
1749 static void
tgl_tc_cold_request(struct intel_display * display,bool block)1750 tgl_tc_cold_request(struct intel_display *display, bool block)
1751 {
1752 struct drm_i915_private *i915 = to_i915(display->drm);
1753 u8 tries = 0;
1754 int ret;
1755
1756 while (1) {
1757 u32 low_val;
1758 u32 high_val = 0;
1759
1760 if (block)
1761 low_val = TGL_PCODE_EXIT_TCCOLD_DATA_L_BLOCK_REQ;
1762 else
1763 low_val = TGL_PCODE_EXIT_TCCOLD_DATA_L_UNBLOCK_REQ;
1764
1765 /*
1766 * Spec states that we should timeout the request after 200us
1767 * but the function below will timeout after 500us
1768 */
1769 ret = snb_pcode_read(&i915->uncore, TGL_PCODE_TCCOLD, &low_val, &high_val);
1770 if (ret == 0) {
1771 if (block &&
1772 (low_val & TGL_PCODE_EXIT_TCCOLD_DATA_L_EXIT_FAILED))
1773 ret = -EIO;
1774 else
1775 break;
1776 }
1777
1778 if (++tries == 3)
1779 break;
1780
1781 msleep(1);
1782 }
1783
1784 if (ret)
1785 drm_err(&i915->drm, "TC cold %sblock failed\n",
1786 block ? "" : "un");
1787 else
1788 drm_dbg_kms(&i915->drm, "TC cold %sblock succeeded\n",
1789 block ? "" : "un");
1790 }
1791
1792 static void
tgl_tc_cold_off_power_well_enable(struct intel_display * display,struct i915_power_well * power_well)1793 tgl_tc_cold_off_power_well_enable(struct intel_display *display,
1794 struct i915_power_well *power_well)
1795 {
1796 tgl_tc_cold_request(display, true);
1797 }
1798
1799 static void
tgl_tc_cold_off_power_well_disable(struct intel_display * display,struct i915_power_well * power_well)1800 tgl_tc_cold_off_power_well_disable(struct intel_display *display,
1801 struct i915_power_well *power_well)
1802 {
1803 tgl_tc_cold_request(display, false);
1804 }
1805
1806 static void
tgl_tc_cold_off_power_well_sync_hw(struct intel_display * display,struct i915_power_well * power_well)1807 tgl_tc_cold_off_power_well_sync_hw(struct intel_display *display,
1808 struct i915_power_well *power_well)
1809 {
1810 if (intel_power_well_refcount(power_well) > 0)
1811 tgl_tc_cold_off_power_well_enable(display, power_well);
1812 else
1813 tgl_tc_cold_off_power_well_disable(display, power_well);
1814 }
1815
1816 static bool
tgl_tc_cold_off_power_well_is_enabled(struct intel_display * display,struct i915_power_well * power_well)1817 tgl_tc_cold_off_power_well_is_enabled(struct intel_display *display,
1818 struct i915_power_well *power_well)
1819 {
1820 /*
1821 * Not the correctly implementation but there is no way to just read it
1822 * from PCODE, so returning count to avoid state mismatch errors
1823 */
1824 return intel_power_well_refcount(power_well);
1825 }
1826
xelpdp_aux_power_well_enable(struct intel_display * display,struct i915_power_well * power_well)1827 static void xelpdp_aux_power_well_enable(struct intel_display *display,
1828 struct i915_power_well *power_well)
1829 {
1830 enum aux_ch aux_ch = i915_power_well_instance(power_well)->xelpdp.aux_ch;
1831 enum phy phy = icl_aux_pw_to_phy(display, power_well);
1832
1833 if (intel_phy_is_tc(display, phy))
1834 icl_tc_port_assert_ref_held(display, power_well,
1835 aux_ch_to_digital_port(display, aux_ch));
1836
1837 intel_de_rmw(display, XELPDP_DP_AUX_CH_CTL(display, aux_ch),
1838 XELPDP_DP_AUX_CH_CTL_POWER_REQUEST,
1839 XELPDP_DP_AUX_CH_CTL_POWER_REQUEST);
1840
1841 /*
1842 * The power status flag cannot be used to determine whether aux
1843 * power wells have finished powering up. Instead we're
1844 * expected to just wait a fixed 600us after raising the request
1845 * bit.
1846 */
1847 usleep_range(600, 1200);
1848 }
1849
xelpdp_aux_power_well_disable(struct intel_display * display,struct i915_power_well * power_well)1850 static void xelpdp_aux_power_well_disable(struct intel_display *display,
1851 struct i915_power_well *power_well)
1852 {
1853 enum aux_ch aux_ch = i915_power_well_instance(power_well)->xelpdp.aux_ch;
1854
1855 intel_de_rmw(display, XELPDP_DP_AUX_CH_CTL(display, aux_ch),
1856 XELPDP_DP_AUX_CH_CTL_POWER_REQUEST,
1857 0);
1858 usleep_range(10, 30);
1859 }
1860
xelpdp_aux_power_well_enabled(struct intel_display * display,struct i915_power_well * power_well)1861 static bool xelpdp_aux_power_well_enabled(struct intel_display *display,
1862 struct i915_power_well *power_well)
1863 {
1864 enum aux_ch aux_ch = i915_power_well_instance(power_well)->xelpdp.aux_ch;
1865
1866 return intel_de_read(display, XELPDP_DP_AUX_CH_CTL(display, aux_ch)) &
1867 XELPDP_DP_AUX_CH_CTL_POWER_STATUS;
1868 }
1869
xe2lpd_pica_power_well_enable(struct intel_display * display,struct i915_power_well * power_well)1870 static void xe2lpd_pica_power_well_enable(struct intel_display *display,
1871 struct i915_power_well *power_well)
1872 {
1873 intel_de_write(display, XE2LPD_PICA_PW_CTL,
1874 XE2LPD_PICA_CTL_POWER_REQUEST);
1875
1876 if (intel_de_wait_for_set(display, XE2LPD_PICA_PW_CTL,
1877 XE2LPD_PICA_CTL_POWER_STATUS, 1)) {
1878 drm_dbg_kms(display->drm, "pica power well enable timeout\n");
1879
1880 drm_WARN(display->drm, 1, "Power well PICA timeout when enabled");
1881 }
1882 }
1883
xe2lpd_pica_power_well_disable(struct intel_display * display,struct i915_power_well * power_well)1884 static void xe2lpd_pica_power_well_disable(struct intel_display *display,
1885 struct i915_power_well *power_well)
1886 {
1887 intel_de_write(display, XE2LPD_PICA_PW_CTL, 0);
1888
1889 if (intel_de_wait_for_clear(display, XE2LPD_PICA_PW_CTL,
1890 XE2LPD_PICA_CTL_POWER_STATUS, 1)) {
1891 drm_dbg_kms(display->drm, "pica power well disable timeout\n");
1892
1893 drm_WARN(display->drm, 1, "Power well PICA timeout when disabled");
1894 }
1895 }
1896
xe2lpd_pica_power_well_enabled(struct intel_display * display,struct i915_power_well * power_well)1897 static bool xe2lpd_pica_power_well_enabled(struct intel_display *display,
1898 struct i915_power_well *power_well)
1899 {
1900 return intel_de_read(display, XE2LPD_PICA_PW_CTL) &
1901 XE2LPD_PICA_CTL_POWER_STATUS;
1902 }
1903
1904 const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
1905 .sync_hw = i9xx_power_well_sync_hw_noop,
1906 .enable = i9xx_always_on_power_well_noop,
1907 .disable = i9xx_always_on_power_well_noop,
1908 .is_enabled = i9xx_always_on_power_well_enabled,
1909 };
1910
1911 const struct i915_power_well_ops chv_pipe_power_well_ops = {
1912 .sync_hw = chv_pipe_power_well_sync_hw,
1913 .enable = chv_pipe_power_well_enable,
1914 .disable = chv_pipe_power_well_disable,
1915 .is_enabled = chv_pipe_power_well_enabled,
1916 };
1917
1918 const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
1919 .sync_hw = i9xx_power_well_sync_hw_noop,
1920 .enable = chv_dpio_cmn_power_well_enable,
1921 .disable = chv_dpio_cmn_power_well_disable,
1922 .is_enabled = vlv_power_well_enabled,
1923 };
1924
1925 const struct i915_power_well_ops i830_pipes_power_well_ops = {
1926 .sync_hw = i830_pipes_power_well_sync_hw,
1927 .enable = i830_pipes_power_well_enable,
1928 .disable = i830_pipes_power_well_disable,
1929 .is_enabled = i830_pipes_power_well_enabled,
1930 };
1931
1932 static const struct i915_power_well_regs hsw_power_well_regs = {
1933 .bios = HSW_PWR_WELL_CTL1,
1934 .driver = HSW_PWR_WELL_CTL2,
1935 .kvmr = HSW_PWR_WELL_CTL3,
1936 .debug = HSW_PWR_WELL_CTL4,
1937 };
1938
1939 const struct i915_power_well_ops hsw_power_well_ops = {
1940 .regs = &hsw_power_well_regs,
1941 .sync_hw = hsw_power_well_sync_hw,
1942 .enable = hsw_power_well_enable,
1943 .disable = hsw_power_well_disable,
1944 .is_enabled = hsw_power_well_enabled,
1945 };
1946
1947 const struct i915_power_well_ops gen9_dc_off_power_well_ops = {
1948 .sync_hw = i9xx_power_well_sync_hw_noop,
1949 .enable = gen9_dc_off_power_well_enable,
1950 .disable = gen9_dc_off_power_well_disable,
1951 .is_enabled = gen9_dc_off_power_well_enabled,
1952 };
1953
1954 const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = {
1955 .sync_hw = i9xx_power_well_sync_hw_noop,
1956 .enable = bxt_dpio_cmn_power_well_enable,
1957 .disable = bxt_dpio_cmn_power_well_disable,
1958 .is_enabled = bxt_dpio_cmn_power_well_enabled,
1959 };
1960
1961 const struct i915_power_well_ops vlv_display_power_well_ops = {
1962 .sync_hw = i9xx_power_well_sync_hw_noop,
1963 .enable = vlv_display_power_well_enable,
1964 .disable = vlv_display_power_well_disable,
1965 .is_enabled = vlv_power_well_enabled,
1966 };
1967
1968 const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
1969 .sync_hw = i9xx_power_well_sync_hw_noop,
1970 .enable = vlv_dpio_cmn_power_well_enable,
1971 .disable = vlv_dpio_cmn_power_well_disable,
1972 .is_enabled = vlv_power_well_enabled,
1973 };
1974
1975 const struct i915_power_well_ops vlv_dpio_power_well_ops = {
1976 .sync_hw = i9xx_power_well_sync_hw_noop,
1977 .enable = vlv_power_well_enable,
1978 .disable = vlv_power_well_disable,
1979 .is_enabled = vlv_power_well_enabled,
1980 };
1981
1982 static const struct i915_power_well_regs icl_aux_power_well_regs = {
1983 .bios = ICL_PWR_WELL_CTL_AUX1,
1984 .driver = ICL_PWR_WELL_CTL_AUX2,
1985 .debug = ICL_PWR_WELL_CTL_AUX4,
1986 };
1987
1988 const struct i915_power_well_ops icl_aux_power_well_ops = {
1989 .regs = &icl_aux_power_well_regs,
1990 .sync_hw = hsw_power_well_sync_hw,
1991 .enable = icl_aux_power_well_enable,
1992 .disable = icl_aux_power_well_disable,
1993 .is_enabled = hsw_power_well_enabled,
1994 };
1995
1996 static const struct i915_power_well_regs icl_ddi_power_well_regs = {
1997 .bios = ICL_PWR_WELL_CTL_DDI1,
1998 .driver = ICL_PWR_WELL_CTL_DDI2,
1999 .debug = ICL_PWR_WELL_CTL_DDI4,
2000 };
2001
2002 const struct i915_power_well_ops icl_ddi_power_well_ops = {
2003 .regs = &icl_ddi_power_well_regs,
2004 .sync_hw = hsw_power_well_sync_hw,
2005 .enable = hsw_power_well_enable,
2006 .disable = hsw_power_well_disable,
2007 .is_enabled = hsw_power_well_enabled,
2008 };
2009
2010 const struct i915_power_well_ops tgl_tc_cold_off_ops = {
2011 .sync_hw = tgl_tc_cold_off_power_well_sync_hw,
2012 .enable = tgl_tc_cold_off_power_well_enable,
2013 .disable = tgl_tc_cold_off_power_well_disable,
2014 .is_enabled = tgl_tc_cold_off_power_well_is_enabled,
2015 };
2016
2017 const struct i915_power_well_ops xelpdp_aux_power_well_ops = {
2018 .sync_hw = i9xx_power_well_sync_hw_noop,
2019 .enable = xelpdp_aux_power_well_enable,
2020 .disable = xelpdp_aux_power_well_disable,
2021 .is_enabled = xelpdp_aux_power_well_enabled,
2022 };
2023
2024 const struct i915_power_well_ops xe2lpd_pica_power_well_ops = {
2025 .sync_hw = i9xx_power_well_sync_hw_noop,
2026 .enable = xe2lpd_pica_power_well_enable,
2027 .disable = xe2lpd_pica_power_well_disable,
2028 .is_enabled = xe2lpd_pica_power_well_enabled,
2029 };
2030