1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2020 Intel Corporation
4 */
5
6 #include <linux/string_helpers.h>
7
8 #include <drm/drm_fixed.h>
9
10 #include "i915_drv.h"
11 #include "i915_reg.h"
12 #include "intel_atomic.h"
13 #include "intel_crtc.h"
14 #include "intel_ddi.h"
15 #include "intel_de.h"
16 #include "intel_dp.h"
17 #include "intel_display_types.h"
18 #include "intel_fdi.h"
19 #include "intel_fdi_regs.h"
20 #include "intel_link_bw.h"
21
22 struct intel_fdi_funcs {
23 void (*fdi_link_train)(struct intel_crtc *crtc,
24 const struct intel_crtc_state *crtc_state);
25 };
26
assert_fdi_tx(struct intel_display * display,enum pipe pipe,bool state)27 static void assert_fdi_tx(struct intel_display *display,
28 enum pipe pipe, bool state)
29 {
30 bool cur_state;
31
32 if (HAS_DDI(display)) {
33 /*
34 * DDI does not have a specific FDI_TX register.
35 *
36 * FDI is never fed from EDP transcoder
37 * so pipe->transcoder cast is fine here.
38 */
39 enum transcoder cpu_transcoder = (enum transcoder)pipe;
40 cur_state = intel_de_read(display,
41 TRANS_DDI_FUNC_CTL(display, cpu_transcoder)) & TRANS_DDI_FUNC_ENABLE;
42 } else {
43 cur_state = intel_de_read(display, FDI_TX_CTL(pipe)) & FDI_TX_ENABLE;
44 }
45 INTEL_DISPLAY_STATE_WARN(display, cur_state != state,
46 "FDI TX state assertion failure (expected %s, current %s)\n",
47 str_on_off(state), str_on_off(cur_state));
48 }
49
assert_fdi_tx_enabled(struct intel_display * display,enum pipe pipe)50 void assert_fdi_tx_enabled(struct intel_display *display, enum pipe pipe)
51 {
52 assert_fdi_tx(display, pipe, true);
53 }
54
assert_fdi_tx_disabled(struct intel_display * display,enum pipe pipe)55 void assert_fdi_tx_disabled(struct intel_display *display, enum pipe pipe)
56 {
57 assert_fdi_tx(display, pipe, false);
58 }
59
assert_fdi_rx(struct intel_display * display,enum pipe pipe,bool state)60 static void assert_fdi_rx(struct intel_display *display,
61 enum pipe pipe, bool state)
62 {
63 bool cur_state;
64
65 cur_state = intel_de_read(display, FDI_RX_CTL(pipe)) & FDI_RX_ENABLE;
66 INTEL_DISPLAY_STATE_WARN(display, cur_state != state,
67 "FDI RX state assertion failure (expected %s, current %s)\n",
68 str_on_off(state), str_on_off(cur_state));
69 }
70
assert_fdi_rx_enabled(struct intel_display * display,enum pipe pipe)71 void assert_fdi_rx_enabled(struct intel_display *display, enum pipe pipe)
72 {
73 assert_fdi_rx(display, pipe, true);
74 }
75
assert_fdi_rx_disabled(struct intel_display * display,enum pipe pipe)76 void assert_fdi_rx_disabled(struct intel_display *display, enum pipe pipe)
77 {
78 assert_fdi_rx(display, pipe, false);
79 }
80
assert_fdi_tx_pll_enabled(struct intel_display * display,enum pipe pipe)81 void assert_fdi_tx_pll_enabled(struct intel_display *display, enum pipe pipe)
82 {
83 bool cur_state;
84
85 /* ILK FDI PLL is always enabled */
86 if (display->platform.ironlake)
87 return;
88
89 /* On Haswell, DDI ports are responsible for the FDI PLL setup */
90 if (HAS_DDI(display))
91 return;
92
93 cur_state = intel_de_read(display, FDI_TX_CTL(pipe)) & FDI_TX_PLL_ENABLE;
94 INTEL_DISPLAY_STATE_WARN(display, !cur_state,
95 "FDI TX PLL assertion failure, should be active but is disabled\n");
96 }
97
assert_fdi_rx_pll(struct intel_display * display,enum pipe pipe,bool state)98 static void assert_fdi_rx_pll(struct intel_display *display,
99 enum pipe pipe, bool state)
100 {
101 bool cur_state;
102
103 cur_state = intel_de_read(display, FDI_RX_CTL(pipe)) & FDI_RX_PLL_ENABLE;
104 INTEL_DISPLAY_STATE_WARN(display, cur_state != state,
105 "FDI RX PLL assertion failure (expected %s, current %s)\n",
106 str_on_off(state), str_on_off(cur_state));
107 }
108
assert_fdi_rx_pll_enabled(struct intel_display * display,enum pipe pipe)109 void assert_fdi_rx_pll_enabled(struct intel_display *display, enum pipe pipe)
110 {
111 assert_fdi_rx_pll(display, pipe, true);
112 }
113
assert_fdi_rx_pll_disabled(struct intel_display * display,enum pipe pipe)114 void assert_fdi_rx_pll_disabled(struct intel_display *display, enum pipe pipe)
115 {
116 assert_fdi_rx_pll(display, pipe, false);
117 }
118
intel_fdi_link_train(struct intel_crtc * crtc,const struct intel_crtc_state * crtc_state)119 void intel_fdi_link_train(struct intel_crtc *crtc,
120 const struct intel_crtc_state *crtc_state)
121 {
122 struct intel_display *display = to_intel_display(crtc);
123
124 display->funcs.fdi->fdi_link_train(crtc, crtc_state);
125 }
126
127 /**
128 * intel_fdi_add_affected_crtcs - add CRTCs on FDI affected by other modeset CRTCs
129 * @state: intel atomic state
130 *
131 * Add a CRTC using FDI to @state if changing another CRTC's FDI BW usage is
132 * known to affect the available FDI BW for the former CRTC. In practice this
133 * means adding CRTC B on IVYBRIDGE if its use of FDI lanes is limited (by
134 * CRTC C) and CRTC C is getting disabled.
135 *
136 * Returns 0 in case of success, or a negative error code otherwise.
137 */
intel_fdi_add_affected_crtcs(struct intel_atomic_state * state)138 int intel_fdi_add_affected_crtcs(struct intel_atomic_state *state)
139 {
140 struct intel_display *display = to_intel_display(state);
141 const struct intel_crtc_state *old_crtc_state;
142 const struct intel_crtc_state *new_crtc_state;
143 struct intel_crtc *crtc;
144
145 if (!display->platform.ivybridge || INTEL_NUM_PIPES(display) != 3)
146 return 0;
147
148 crtc = intel_crtc_for_pipe(display, PIPE_C);
149 new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
150 if (!new_crtc_state)
151 return 0;
152
153 if (!intel_crtc_needs_modeset(new_crtc_state))
154 return 0;
155
156 old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc);
157 if (!old_crtc_state->fdi_lanes)
158 return 0;
159
160 crtc = intel_crtc_for_pipe(display, PIPE_B);
161 new_crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
162 if (IS_ERR(new_crtc_state))
163 return PTR_ERR(new_crtc_state);
164
165 old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc);
166 if (!old_crtc_state->fdi_lanes)
167 return 0;
168
169 return intel_modeset_pipes_in_mask_early(state,
170 "FDI link BW decrease on pipe C",
171 BIT(PIPE_B));
172 }
173
174 /* units of 100MHz */
pipe_required_fdi_lanes(struct intel_crtc_state * crtc_state)175 static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
176 {
177 if (crtc_state->hw.enable && crtc_state->has_pch_encoder)
178 return crtc_state->fdi_lanes;
179
180 return 0;
181 }
182
ilk_check_fdi_lanes(struct intel_display * display,enum pipe pipe,struct intel_crtc_state * pipe_config,enum pipe * pipe_to_reduce)183 static int ilk_check_fdi_lanes(struct intel_display *display, enum pipe pipe,
184 struct intel_crtc_state *pipe_config,
185 enum pipe *pipe_to_reduce)
186 {
187 struct drm_atomic_state *state = pipe_config->uapi.state;
188 struct intel_crtc *other_crtc;
189 struct intel_crtc_state *other_crtc_state;
190
191 *pipe_to_reduce = pipe;
192
193 drm_dbg_kms(display->drm,
194 "checking fdi config on pipe %c, lanes %i\n",
195 pipe_name(pipe), pipe_config->fdi_lanes);
196 if (pipe_config->fdi_lanes > 4) {
197 drm_dbg_kms(display->drm,
198 "invalid fdi lane config on pipe %c: %i lanes\n",
199 pipe_name(pipe), pipe_config->fdi_lanes);
200 return -EINVAL;
201 }
202
203 if (display->platform.haswell || display->platform.broadwell) {
204 if (pipe_config->fdi_lanes > 2) {
205 drm_dbg_kms(display->drm,
206 "only 2 lanes on haswell, required: %i lanes\n",
207 pipe_config->fdi_lanes);
208 return -EINVAL;
209 } else {
210 return 0;
211 }
212 }
213
214 if (INTEL_NUM_PIPES(display) == 2)
215 return 0;
216
217 /* Ivybridge 3 pipe is really complicated */
218 switch (pipe) {
219 case PIPE_A:
220 return 0;
221 case PIPE_B:
222 if (pipe_config->fdi_lanes <= 2)
223 return 0;
224
225 other_crtc = intel_crtc_for_pipe(display, PIPE_C);
226 other_crtc_state =
227 intel_atomic_get_crtc_state(state, other_crtc);
228 if (IS_ERR(other_crtc_state))
229 return PTR_ERR(other_crtc_state);
230
231 if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
232 drm_dbg_kms(display->drm,
233 "invalid shared fdi lane config on pipe %c: %i lanes\n",
234 pipe_name(pipe), pipe_config->fdi_lanes);
235 return -EINVAL;
236 }
237 return 0;
238 case PIPE_C:
239 if (pipe_config->fdi_lanes > 2) {
240 drm_dbg_kms(display->drm,
241 "only 2 lanes on pipe %c: required %i lanes\n",
242 pipe_name(pipe), pipe_config->fdi_lanes);
243 return -EINVAL;
244 }
245
246 other_crtc = intel_crtc_for_pipe(display, PIPE_B);
247 other_crtc_state =
248 intel_atomic_get_crtc_state(state, other_crtc);
249 if (IS_ERR(other_crtc_state))
250 return PTR_ERR(other_crtc_state);
251
252 if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
253 drm_dbg_kms(display->drm,
254 "fdi link B uses too many lanes to enable link C\n");
255
256 *pipe_to_reduce = PIPE_B;
257
258 return -EINVAL;
259 }
260 return 0;
261 default:
262 MISSING_CASE(pipe);
263 return 0;
264 }
265 }
266
intel_fdi_pll_freq_update(struct intel_display * display)267 void intel_fdi_pll_freq_update(struct intel_display *display)
268 {
269 if (display->platform.ironlake) {
270 u32 fdi_pll_clk;
271
272 fdi_pll_clk = intel_de_read(display, FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
273
274 display->fdi.pll_freq = (fdi_pll_clk + 2) * 10000;
275 } else if (display->platform.sandybridge || display->platform.ivybridge) {
276 display->fdi.pll_freq = 270000;
277 } else {
278 return;
279 }
280
281 drm_dbg(display->drm, "FDI PLL freq=%d\n", display->fdi.pll_freq);
282 }
283
intel_fdi_link_freq(struct intel_display * display,const struct intel_crtc_state * pipe_config)284 int intel_fdi_link_freq(struct intel_display *display,
285 const struct intel_crtc_state *pipe_config)
286 {
287 if (HAS_DDI(display))
288 return pipe_config->port_clock; /* SPLL */
289 else
290 return display->fdi.pll_freq;
291 }
292
293 /**
294 * intel_fdi_compute_pipe_bpp - compute pipe bpp limited by max link bpp
295 * @crtc_state: the crtc state
296 *
297 * Compute the pipe bpp limited by the CRTC's maximum link bpp. Encoders can
298 * call this function during state computation in the simple case where the
299 * link bpp will always match the pipe bpp. This is the case for all non-DP
300 * encoders, while DP encoders will use a link bpp lower than pipe bpp in case
301 * of DSC compression.
302 *
303 * Returns %true in case of success, %false if pipe bpp would need to be
304 * reduced below its valid range.
305 */
intel_fdi_compute_pipe_bpp(struct intel_crtc_state * crtc_state)306 bool intel_fdi_compute_pipe_bpp(struct intel_crtc_state *crtc_state)
307 {
308 int pipe_bpp = min(crtc_state->pipe_bpp,
309 fxp_q4_to_int(crtc_state->max_link_bpp_x16));
310
311 pipe_bpp = rounddown(pipe_bpp, 2 * 3);
312
313 if (pipe_bpp < 6 * 3)
314 return false;
315
316 crtc_state->pipe_bpp = pipe_bpp;
317
318 return true;
319 }
320
ilk_fdi_compute_config(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)321 int ilk_fdi_compute_config(struct intel_crtc *crtc,
322 struct intel_crtc_state *pipe_config)
323 {
324 struct intel_display *display = to_intel_display(crtc);
325 const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
326 int lane, link_bw, fdi_dotclock;
327
328 /* FDI is a binary signal running at ~2.7GHz, encoding
329 * each output octet as 10 bits. The actual frequency
330 * is stored as a divider into a 100MHz clock, and the
331 * mode pixel clock is stored in units of 1KHz.
332 * Hence the bw of each lane in terms of the mode signal
333 * is:
334 */
335 link_bw = intel_fdi_link_freq(display, pipe_config);
336
337 fdi_dotclock = adjusted_mode->crtc_clock;
338
339 lane = ilk_get_lanes_required(fdi_dotclock, link_bw,
340 pipe_config->pipe_bpp);
341
342 pipe_config->fdi_lanes = lane;
343
344 intel_link_compute_m_n(fxp_q4_from_int(pipe_config->pipe_bpp),
345 lane, fdi_dotclock,
346 link_bw,
347 intel_dp_bw_fec_overhead(false),
348 &pipe_config->fdi_m_n);
349
350 return 0;
351 }
352
intel_fdi_atomic_check_bw(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_crtc_state * pipe_config,struct intel_link_bw_limits * limits)353 static int intel_fdi_atomic_check_bw(struct intel_atomic_state *state,
354 struct intel_crtc *crtc,
355 struct intel_crtc_state *pipe_config,
356 struct intel_link_bw_limits *limits)
357 {
358 struct intel_display *display = to_intel_display(crtc);
359 enum pipe pipe_to_reduce;
360 int ret;
361
362 ret = ilk_check_fdi_lanes(display, crtc->pipe, pipe_config,
363 &pipe_to_reduce);
364 if (ret != -EINVAL)
365 return ret;
366
367 ret = intel_link_bw_reduce_bpp(state, limits,
368 BIT(pipe_to_reduce),
369 "FDI link BW");
370
371 return ret ? : -EAGAIN;
372 }
373
374 /**
375 * intel_fdi_atomic_check_link - check all modeset FDI link configuration
376 * @state: intel atomic state
377 * @limits: link BW limits
378 *
379 * Check the link configuration for all modeset FDI outputs. If the
380 * configuration is invalid @limits will be updated if possible to
381 * reduce the total BW, after which the configuration for all CRTCs in
382 * @state must be recomputed with the updated @limits.
383 *
384 * Returns:
385 * - 0 if the configuration is valid
386 * - %-EAGAIN, if the configuration is invalid and @limits got updated
387 * with fallback values with which the configuration of all CRTCs
388 * in @state must be recomputed
389 * - Other negative error, if the configuration is invalid without a
390 * fallback possibility, or the check failed for another reason
391 */
intel_fdi_atomic_check_link(struct intel_atomic_state * state,struct intel_link_bw_limits * limits)392 int intel_fdi_atomic_check_link(struct intel_atomic_state *state,
393 struct intel_link_bw_limits *limits)
394 {
395 struct intel_crtc *crtc;
396 struct intel_crtc_state *crtc_state;
397 int i;
398
399 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
400 int ret;
401
402 if (!crtc_state->has_pch_encoder ||
403 !intel_crtc_needs_modeset(crtc_state) ||
404 !crtc_state->hw.enable)
405 continue;
406
407 ret = intel_fdi_atomic_check_bw(state, crtc, crtc_state, limits);
408 if (ret)
409 return ret;
410 }
411
412 return 0;
413 }
414
cpt_set_fdi_bc_bifurcation(struct intel_display * display,bool enable)415 static void cpt_set_fdi_bc_bifurcation(struct intel_display *display, bool enable)
416 {
417 u32 temp;
418
419 temp = intel_de_read(display, SOUTH_CHICKEN1);
420 if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
421 return;
422
423 drm_WARN_ON(display->drm,
424 intel_de_read(display, FDI_RX_CTL(PIPE_B)) &
425 FDI_RX_ENABLE);
426 drm_WARN_ON(display->drm,
427 intel_de_read(display, FDI_RX_CTL(PIPE_C)) &
428 FDI_RX_ENABLE);
429
430 temp &= ~FDI_BC_BIFURCATION_SELECT;
431 if (enable)
432 temp |= FDI_BC_BIFURCATION_SELECT;
433
434 drm_dbg_kms(display->drm, "%sabling fdi C rx\n",
435 enable ? "en" : "dis");
436 intel_de_write(display, SOUTH_CHICKEN1, temp);
437 intel_de_posting_read(display, SOUTH_CHICKEN1);
438 }
439
ivb_update_fdi_bc_bifurcation(const struct intel_crtc_state * crtc_state)440 static void ivb_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
441 {
442 struct intel_display *display = to_intel_display(crtc_state);
443 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
444
445 switch (crtc->pipe) {
446 case PIPE_A:
447 break;
448 case PIPE_B:
449 if (crtc_state->fdi_lanes > 2)
450 cpt_set_fdi_bc_bifurcation(display, false);
451 else
452 cpt_set_fdi_bc_bifurcation(display, true);
453
454 break;
455 case PIPE_C:
456 cpt_set_fdi_bc_bifurcation(display, true);
457
458 break;
459 default:
460 MISSING_CASE(crtc->pipe);
461 }
462 }
463
intel_fdi_normal_train(struct intel_crtc * crtc)464 void intel_fdi_normal_train(struct intel_crtc *crtc)
465 {
466 struct intel_display *display = to_intel_display(crtc);
467 struct drm_i915_private *dev_priv = to_i915(display->drm);
468 enum pipe pipe = crtc->pipe;
469 i915_reg_t reg;
470 u32 temp;
471
472 /* enable normal train */
473 reg = FDI_TX_CTL(pipe);
474 temp = intel_de_read(display, reg);
475 if (display->platform.ivybridge) {
476 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
477 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
478 } else {
479 temp &= ~FDI_LINK_TRAIN_NONE;
480 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
481 }
482 intel_de_write(display, reg, temp);
483
484 reg = FDI_RX_CTL(pipe);
485 temp = intel_de_read(display, reg);
486 if (HAS_PCH_CPT(dev_priv)) {
487 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
488 temp |= FDI_LINK_TRAIN_NORMAL_CPT;
489 } else {
490 temp &= ~FDI_LINK_TRAIN_NONE;
491 temp |= FDI_LINK_TRAIN_NONE;
492 }
493 intel_de_write(display, reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
494
495 /* wait one idle pattern time */
496 intel_de_posting_read(display, reg);
497 udelay(1000);
498
499 /* IVB wants error correction enabled */
500 if (display->platform.ivybridge)
501 intel_de_rmw(display, reg, 0, FDI_FS_ERRC_ENABLE | FDI_FE_ERRC_ENABLE);
502 }
503
504 /* The FDI link training functions for ILK/Ibexpeak. */
ilk_fdi_link_train(struct intel_crtc * crtc,const struct intel_crtc_state * crtc_state)505 static void ilk_fdi_link_train(struct intel_crtc *crtc,
506 const struct intel_crtc_state *crtc_state)
507 {
508 struct intel_display *display = to_intel_display(crtc);
509 enum pipe pipe = crtc->pipe;
510 i915_reg_t reg;
511 u32 temp, tries;
512
513 /*
514 * Write the TU size bits before fdi link training, so that error
515 * detection works.
516 */
517 intel_de_write(display, FDI_RX_TUSIZE1(pipe),
518 intel_de_read(display, PIPE_DATA_M1(display, pipe)) & TU_SIZE_MASK);
519
520 /* FDI needs bits from pipe first */
521 assert_transcoder_enabled(display, crtc_state->cpu_transcoder);
522
523 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
524 for train result */
525 reg = FDI_RX_IMR(pipe);
526 temp = intel_de_read(display, reg);
527 temp &= ~FDI_RX_SYMBOL_LOCK;
528 temp &= ~FDI_RX_BIT_LOCK;
529 intel_de_write(display, reg, temp);
530 intel_de_read(display, reg);
531 udelay(150);
532
533 /* enable CPU FDI TX and PCH FDI RX */
534 reg = FDI_TX_CTL(pipe);
535 temp = intel_de_read(display, reg);
536 temp &= ~FDI_DP_PORT_WIDTH_MASK;
537 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
538 temp &= ~FDI_LINK_TRAIN_NONE;
539 temp |= FDI_LINK_TRAIN_PATTERN_1;
540 intel_de_write(display, reg, temp | FDI_TX_ENABLE);
541
542 reg = FDI_RX_CTL(pipe);
543 temp = intel_de_read(display, reg);
544 temp &= ~FDI_LINK_TRAIN_NONE;
545 temp |= FDI_LINK_TRAIN_PATTERN_1;
546 intel_de_write(display, reg, temp | FDI_RX_ENABLE);
547
548 intel_de_posting_read(display, reg);
549 udelay(150);
550
551 /* Ironlake workaround, enable clock pointer after FDI enable*/
552 intel_de_write(display, FDI_RX_CHICKEN(pipe),
553 FDI_RX_PHASE_SYNC_POINTER_OVR);
554 intel_de_write(display, FDI_RX_CHICKEN(pipe),
555 FDI_RX_PHASE_SYNC_POINTER_OVR | FDI_RX_PHASE_SYNC_POINTER_EN);
556
557 reg = FDI_RX_IIR(pipe);
558 for (tries = 0; tries < 5; tries++) {
559 temp = intel_de_read(display, reg);
560 drm_dbg_kms(display->drm, "FDI_RX_IIR 0x%x\n", temp);
561
562 if ((temp & FDI_RX_BIT_LOCK)) {
563 drm_dbg_kms(display->drm, "FDI train 1 done.\n");
564 intel_de_write(display, reg, temp | FDI_RX_BIT_LOCK);
565 break;
566 }
567 }
568 if (tries == 5)
569 drm_err(display->drm, "FDI train 1 fail!\n");
570
571 /* Train 2 */
572 intel_de_rmw(display, FDI_TX_CTL(pipe),
573 FDI_LINK_TRAIN_NONE, FDI_LINK_TRAIN_PATTERN_2);
574 intel_de_rmw(display, FDI_RX_CTL(pipe),
575 FDI_LINK_TRAIN_NONE, FDI_LINK_TRAIN_PATTERN_2);
576 intel_de_posting_read(display, FDI_RX_CTL(pipe));
577 udelay(150);
578
579 reg = FDI_RX_IIR(pipe);
580 for (tries = 0; tries < 5; tries++) {
581 temp = intel_de_read(display, reg);
582 drm_dbg_kms(display->drm, "FDI_RX_IIR 0x%x\n", temp);
583
584 if (temp & FDI_RX_SYMBOL_LOCK) {
585 intel_de_write(display, reg,
586 temp | FDI_RX_SYMBOL_LOCK);
587 drm_dbg_kms(display->drm, "FDI train 2 done.\n");
588 break;
589 }
590 }
591 if (tries == 5)
592 drm_err(display->drm, "FDI train 2 fail!\n");
593
594 drm_dbg_kms(display->drm, "FDI train done\n");
595
596 }
597
598 static const int snb_b_fdi_train_param[] = {
599 FDI_LINK_TRAIN_400MV_0DB_SNB_B,
600 FDI_LINK_TRAIN_400MV_6DB_SNB_B,
601 FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
602 FDI_LINK_TRAIN_800MV_0DB_SNB_B,
603 };
604
605 /* The FDI link training functions for SNB/Cougarpoint. */
gen6_fdi_link_train(struct intel_crtc * crtc,const struct intel_crtc_state * crtc_state)606 static void gen6_fdi_link_train(struct intel_crtc *crtc,
607 const struct intel_crtc_state *crtc_state)
608 {
609 struct intel_display *display = to_intel_display(crtc);
610 struct drm_i915_private *dev_priv = to_i915(display->drm);
611 enum pipe pipe = crtc->pipe;
612 i915_reg_t reg;
613 u32 temp, i, retry;
614
615 /*
616 * Write the TU size bits before fdi link training, so that error
617 * detection works.
618 */
619 intel_de_write(display, FDI_RX_TUSIZE1(pipe),
620 intel_de_read(display, PIPE_DATA_M1(display, pipe)) & TU_SIZE_MASK);
621
622 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
623 for train result */
624 reg = FDI_RX_IMR(pipe);
625 temp = intel_de_read(display, reg);
626 temp &= ~FDI_RX_SYMBOL_LOCK;
627 temp &= ~FDI_RX_BIT_LOCK;
628 intel_de_write(display, reg, temp);
629
630 intel_de_posting_read(display, reg);
631 udelay(150);
632
633 /* enable CPU FDI TX and PCH FDI RX */
634 reg = FDI_TX_CTL(pipe);
635 temp = intel_de_read(display, reg);
636 temp &= ~FDI_DP_PORT_WIDTH_MASK;
637 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
638 temp &= ~FDI_LINK_TRAIN_NONE;
639 temp |= FDI_LINK_TRAIN_PATTERN_1;
640 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
641 /* SNB-B */
642 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
643 intel_de_write(display, reg, temp | FDI_TX_ENABLE);
644
645 intel_de_write(display, FDI_RX_MISC(pipe),
646 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
647
648 reg = FDI_RX_CTL(pipe);
649 temp = intel_de_read(display, reg);
650 if (HAS_PCH_CPT(dev_priv)) {
651 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
652 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
653 } else {
654 temp &= ~FDI_LINK_TRAIN_NONE;
655 temp |= FDI_LINK_TRAIN_PATTERN_1;
656 }
657 intel_de_write(display, reg, temp | FDI_RX_ENABLE);
658
659 intel_de_posting_read(display, reg);
660 udelay(150);
661
662 for (i = 0; i < 4; i++) {
663 intel_de_rmw(display, FDI_TX_CTL(pipe),
664 FDI_LINK_TRAIN_VOL_EMP_MASK, snb_b_fdi_train_param[i]);
665 intel_de_posting_read(display, FDI_TX_CTL(pipe));
666 udelay(500);
667
668 for (retry = 0; retry < 5; retry++) {
669 reg = FDI_RX_IIR(pipe);
670 temp = intel_de_read(display, reg);
671 drm_dbg_kms(display->drm, "FDI_RX_IIR 0x%x\n", temp);
672 if (temp & FDI_RX_BIT_LOCK) {
673 intel_de_write(display, reg,
674 temp | FDI_RX_BIT_LOCK);
675 drm_dbg_kms(display->drm,
676 "FDI train 1 done.\n");
677 break;
678 }
679 udelay(50);
680 }
681 if (retry < 5)
682 break;
683 }
684 if (i == 4)
685 drm_err(display->drm, "FDI train 1 fail!\n");
686
687 /* Train 2 */
688 reg = FDI_TX_CTL(pipe);
689 temp = intel_de_read(display, reg);
690 temp &= ~FDI_LINK_TRAIN_NONE;
691 temp |= FDI_LINK_TRAIN_PATTERN_2;
692 if (display->platform.sandybridge) {
693 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
694 /* SNB-B */
695 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
696 }
697 intel_de_write(display, reg, temp);
698
699 reg = FDI_RX_CTL(pipe);
700 temp = intel_de_read(display, reg);
701 if (HAS_PCH_CPT(dev_priv)) {
702 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
703 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
704 } else {
705 temp &= ~FDI_LINK_TRAIN_NONE;
706 temp |= FDI_LINK_TRAIN_PATTERN_2;
707 }
708 intel_de_write(display, reg, temp);
709
710 intel_de_posting_read(display, reg);
711 udelay(150);
712
713 for (i = 0; i < 4; i++) {
714 intel_de_rmw(display, FDI_TX_CTL(pipe),
715 FDI_LINK_TRAIN_VOL_EMP_MASK, snb_b_fdi_train_param[i]);
716 intel_de_posting_read(display, FDI_TX_CTL(pipe));
717 udelay(500);
718
719 for (retry = 0; retry < 5; retry++) {
720 reg = FDI_RX_IIR(pipe);
721 temp = intel_de_read(display, reg);
722 drm_dbg_kms(display->drm, "FDI_RX_IIR 0x%x\n", temp);
723 if (temp & FDI_RX_SYMBOL_LOCK) {
724 intel_de_write(display, reg,
725 temp | FDI_RX_SYMBOL_LOCK);
726 drm_dbg_kms(display->drm,
727 "FDI train 2 done.\n");
728 break;
729 }
730 udelay(50);
731 }
732 if (retry < 5)
733 break;
734 }
735 if (i == 4)
736 drm_err(display->drm, "FDI train 2 fail!\n");
737
738 drm_dbg_kms(display->drm, "FDI train done.\n");
739 }
740
741 /* Manual link training for Ivy Bridge A0 parts */
ivb_manual_fdi_link_train(struct intel_crtc * crtc,const struct intel_crtc_state * crtc_state)742 static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
743 const struct intel_crtc_state *crtc_state)
744 {
745 struct intel_display *display = to_intel_display(crtc);
746 enum pipe pipe = crtc->pipe;
747 i915_reg_t reg;
748 u32 temp, i, j;
749
750 ivb_update_fdi_bc_bifurcation(crtc_state);
751
752 /*
753 * Write the TU size bits before fdi link training, so that error
754 * detection works.
755 */
756 intel_de_write(display, FDI_RX_TUSIZE1(pipe),
757 intel_de_read(display, PIPE_DATA_M1(display, pipe)) & TU_SIZE_MASK);
758
759 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
760 for train result */
761 reg = FDI_RX_IMR(pipe);
762 temp = intel_de_read(display, reg);
763 temp &= ~FDI_RX_SYMBOL_LOCK;
764 temp &= ~FDI_RX_BIT_LOCK;
765 intel_de_write(display, reg, temp);
766
767 intel_de_posting_read(display, reg);
768 udelay(150);
769
770 drm_dbg_kms(display->drm, "FDI_RX_IIR before link train 0x%x\n",
771 intel_de_read(display, FDI_RX_IIR(pipe)));
772
773 /* Try each vswing and preemphasis setting twice before moving on */
774 for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
775 /* disable first in case we need to retry */
776 reg = FDI_TX_CTL(pipe);
777 temp = intel_de_read(display, reg);
778 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
779 temp &= ~FDI_TX_ENABLE;
780 intel_de_write(display, reg, temp);
781
782 reg = FDI_RX_CTL(pipe);
783 temp = intel_de_read(display, reg);
784 temp &= ~FDI_LINK_TRAIN_AUTO;
785 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
786 temp &= ~FDI_RX_ENABLE;
787 intel_de_write(display, reg, temp);
788
789 /* enable CPU FDI TX and PCH FDI RX */
790 reg = FDI_TX_CTL(pipe);
791 temp = intel_de_read(display, reg);
792 temp &= ~FDI_DP_PORT_WIDTH_MASK;
793 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
794 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
795 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
796 temp |= snb_b_fdi_train_param[j/2];
797 temp |= FDI_COMPOSITE_SYNC;
798 intel_de_write(display, reg, temp | FDI_TX_ENABLE);
799
800 intel_de_write(display, FDI_RX_MISC(pipe),
801 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
802
803 reg = FDI_RX_CTL(pipe);
804 temp = intel_de_read(display, reg);
805 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
806 temp |= FDI_COMPOSITE_SYNC;
807 intel_de_write(display, reg, temp | FDI_RX_ENABLE);
808
809 intel_de_posting_read(display, reg);
810 udelay(1); /* should be 0.5us */
811
812 for (i = 0; i < 4; i++) {
813 reg = FDI_RX_IIR(pipe);
814 temp = intel_de_read(display, reg);
815 drm_dbg_kms(display->drm, "FDI_RX_IIR 0x%x\n", temp);
816
817 if (temp & FDI_RX_BIT_LOCK ||
818 (intel_de_read(display, reg) & FDI_RX_BIT_LOCK)) {
819 intel_de_write(display, reg,
820 temp | FDI_RX_BIT_LOCK);
821 drm_dbg_kms(display->drm,
822 "FDI train 1 done, level %i.\n",
823 i);
824 break;
825 }
826 udelay(1); /* should be 0.5us */
827 }
828 if (i == 4) {
829 drm_dbg_kms(display->drm,
830 "FDI train 1 fail on vswing %d\n", j / 2);
831 continue;
832 }
833
834 /* Train 2 */
835 intel_de_rmw(display, FDI_TX_CTL(pipe),
836 FDI_LINK_TRAIN_NONE_IVB,
837 FDI_LINK_TRAIN_PATTERN_2_IVB);
838 intel_de_rmw(display, FDI_RX_CTL(pipe),
839 FDI_LINK_TRAIN_PATTERN_MASK_CPT,
840 FDI_LINK_TRAIN_PATTERN_2_CPT);
841 intel_de_posting_read(display, FDI_RX_CTL(pipe));
842 udelay(2); /* should be 1.5us */
843
844 for (i = 0; i < 4; i++) {
845 reg = FDI_RX_IIR(pipe);
846 temp = intel_de_read(display, reg);
847 drm_dbg_kms(display->drm, "FDI_RX_IIR 0x%x\n", temp);
848
849 if (temp & FDI_RX_SYMBOL_LOCK ||
850 (intel_de_read(display, reg) & FDI_RX_SYMBOL_LOCK)) {
851 intel_de_write(display, reg,
852 temp | FDI_RX_SYMBOL_LOCK);
853 drm_dbg_kms(display->drm,
854 "FDI train 2 done, level %i.\n",
855 i);
856 goto train_done;
857 }
858 udelay(2); /* should be 1.5us */
859 }
860 if (i == 4)
861 drm_dbg_kms(display->drm,
862 "FDI train 2 fail on vswing %d\n", j / 2);
863 }
864
865 train_done:
866 drm_dbg_kms(display->drm, "FDI train done.\n");
867 }
868
869 /* Starting with Haswell, different DDI ports can work in FDI mode for
870 * connection to the PCH-located connectors. For this, it is necessary to train
871 * both the DDI port and PCH receiver for the desired DDI buffer settings.
872 *
873 * The recommended port to work in FDI mode is DDI E, which we use here. Also,
874 * please note that when FDI mode is active on DDI E, it shares 2 lines with
875 * DDI A (which is used for eDP)
876 */
hsw_fdi_link_train(struct intel_encoder * encoder,const struct intel_crtc_state * crtc_state)877 void hsw_fdi_link_train(struct intel_encoder *encoder,
878 const struct intel_crtc_state *crtc_state)
879 {
880 struct intel_display *display = to_intel_display(crtc_state);
881 u32 temp, i, rx_ctl_val;
882 int n_entries;
883
884 encoder->get_buf_trans(encoder, crtc_state, &n_entries);
885
886 hsw_prepare_dp_ddi_buffers(encoder, crtc_state);
887
888 /* Set the FDI_RX_MISC pwrdn lanes and the 2 workarounds listed at the
889 * mode set "sequence for CRT port" document:
890 * - TP1 to TP2 time with the default value
891 * - FDI delay to 90h
892 *
893 * WaFDIAutoLinkSetTimingOverrride:hsw
894 */
895 intel_de_write(display, FDI_RX_MISC(PIPE_A),
896 FDI_RX_PWRDN_LANE1_VAL(2) |
897 FDI_RX_PWRDN_LANE0_VAL(2) |
898 FDI_RX_TP1_TO_TP2_48 |
899 FDI_RX_FDI_DELAY_90);
900
901 /* Enable the PCH Receiver FDI PLL */
902 rx_ctl_val = display->fdi.rx_config | FDI_RX_ENHANCE_FRAME_ENABLE |
903 FDI_RX_PLL_ENABLE |
904 FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
905 intel_de_write(display, FDI_RX_CTL(PIPE_A), rx_ctl_val);
906 intel_de_posting_read(display, FDI_RX_CTL(PIPE_A));
907 udelay(220);
908
909 /* Switch from Rawclk to PCDclk */
910 rx_ctl_val |= FDI_PCDCLK;
911 intel_de_write(display, FDI_RX_CTL(PIPE_A), rx_ctl_val);
912
913 /* Configure Port Clock Select */
914 drm_WARN_ON(display->drm, crtc_state->shared_dpll->info->id != DPLL_ID_SPLL);
915 intel_ddi_enable_clock(encoder, crtc_state);
916
917 /* Start the training iterating through available voltages and emphasis,
918 * testing each value twice. */
919 for (i = 0; i < n_entries * 2; i++) {
920 /* Configure DP_TP_CTL with auto-training */
921 intel_de_write(display, DP_TP_CTL(PORT_E),
922 DP_TP_CTL_FDI_AUTOTRAIN |
923 DP_TP_CTL_ENHANCED_FRAME_ENABLE |
924 DP_TP_CTL_LINK_TRAIN_PAT1 |
925 DP_TP_CTL_ENABLE);
926
927 /* Configure and enable DDI_BUF_CTL for DDI E with next voltage.
928 * DDI E does not support port reversal, the functionality is
929 * achieved on the PCH side in FDI_RX_CTL, so no need to set the
930 * port reversal bit */
931 intel_de_write(display, DDI_BUF_CTL(PORT_E),
932 DDI_BUF_CTL_ENABLE |
933 ((crtc_state->fdi_lanes - 1) << 1) |
934 DDI_BUF_TRANS_SELECT(i / 2));
935 intel_de_posting_read(display, DDI_BUF_CTL(PORT_E));
936
937 udelay(600);
938
939 /* Program PCH FDI Receiver TU */
940 intel_de_write(display, FDI_RX_TUSIZE1(PIPE_A), TU_SIZE(64));
941
942 /* Enable PCH FDI Receiver with auto-training */
943 rx_ctl_val |= FDI_RX_ENABLE | FDI_LINK_TRAIN_AUTO;
944 intel_de_write(display, FDI_RX_CTL(PIPE_A), rx_ctl_val);
945 intel_de_posting_read(display, FDI_RX_CTL(PIPE_A));
946
947 /* Wait for FDI receiver lane calibration */
948 udelay(30);
949
950 /* Unset FDI_RX_MISC pwrdn lanes */
951 intel_de_rmw(display, FDI_RX_MISC(PIPE_A),
952 FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK, 0);
953 intel_de_posting_read(display, FDI_RX_MISC(PIPE_A));
954
955 /* Wait for FDI auto training time */
956 udelay(5);
957
958 temp = intel_de_read(display, DP_TP_STATUS(PORT_E));
959 if (temp & DP_TP_STATUS_AUTOTRAIN_DONE) {
960 drm_dbg_kms(display->drm,
961 "FDI link training done on step %d\n", i);
962 break;
963 }
964
965 /*
966 * Leave things enabled even if we failed to train FDI.
967 * Results in less fireworks from the state checker.
968 */
969 if (i == n_entries * 2 - 1) {
970 drm_err(display->drm, "FDI link training failed!\n");
971 break;
972 }
973
974 rx_ctl_val &= ~FDI_RX_ENABLE;
975 intel_de_write(display, FDI_RX_CTL(PIPE_A), rx_ctl_val);
976 intel_de_posting_read(display, FDI_RX_CTL(PIPE_A));
977
978 intel_de_rmw(display, DDI_BUF_CTL(PORT_E), DDI_BUF_CTL_ENABLE, 0);
979 intel_de_posting_read(display, DDI_BUF_CTL(PORT_E));
980
981 /* Disable DP_TP_CTL and FDI_RX_CTL and retry */
982 intel_de_rmw(display, DP_TP_CTL(PORT_E), DP_TP_CTL_ENABLE, 0);
983 intel_de_posting_read(display, DP_TP_CTL(PORT_E));
984
985 intel_wait_ddi_buf_idle(display, PORT_E);
986
987 /* Reset FDI_RX_MISC pwrdn lanes */
988 intel_de_rmw(display, FDI_RX_MISC(PIPE_A),
989 FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK,
990 FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2));
991 intel_de_posting_read(display, FDI_RX_MISC(PIPE_A));
992 }
993
994 /* Enable normal pixel sending for FDI */
995 intel_de_write(display, DP_TP_CTL(PORT_E),
996 DP_TP_CTL_FDI_AUTOTRAIN |
997 DP_TP_CTL_LINK_TRAIN_NORMAL |
998 DP_TP_CTL_ENHANCED_FRAME_ENABLE |
999 DP_TP_CTL_ENABLE);
1000 }
1001
hsw_fdi_disable(struct intel_encoder * encoder)1002 void hsw_fdi_disable(struct intel_encoder *encoder)
1003 {
1004 struct intel_display *display = to_intel_display(encoder);
1005
1006 /*
1007 * Bspec lists this as both step 13 (before DDI_BUF_CTL disable)
1008 * and step 18 (after clearing PORT_CLK_SEL). Based on a BUN,
1009 * step 13 is the correct place for it. Step 18 is where it was
1010 * originally before the BUN.
1011 */
1012 intel_de_rmw(display, FDI_RX_CTL(PIPE_A), FDI_RX_ENABLE, 0);
1013 intel_de_rmw(display, DDI_BUF_CTL(PORT_E), DDI_BUF_CTL_ENABLE, 0);
1014 intel_wait_ddi_buf_idle(display, PORT_E);
1015 intel_ddi_disable_clock(encoder);
1016 intel_de_rmw(display, FDI_RX_MISC(PIPE_A),
1017 FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK,
1018 FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2));
1019 intel_de_rmw(display, FDI_RX_CTL(PIPE_A), FDI_PCDCLK, 0);
1020 intel_de_rmw(display, FDI_RX_CTL(PIPE_A), FDI_RX_PLL_ENABLE, 0);
1021 }
1022
ilk_fdi_pll_enable(const struct intel_crtc_state * crtc_state)1023 void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
1024 {
1025 struct intel_display *display = to_intel_display(crtc_state);
1026 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1027 enum pipe pipe = crtc->pipe;
1028 i915_reg_t reg;
1029 u32 temp;
1030
1031 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
1032 reg = FDI_RX_CTL(pipe);
1033 temp = intel_de_read(display, reg);
1034 temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
1035 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
1036 temp |= (intel_de_read(display, TRANSCONF(display, pipe)) & TRANSCONF_BPC_MASK) << 11;
1037 intel_de_write(display, reg, temp | FDI_RX_PLL_ENABLE);
1038
1039 intel_de_posting_read(display, reg);
1040 udelay(200);
1041
1042 /* Switch from Rawclk to PCDclk */
1043 intel_de_rmw(display, reg, 0, FDI_PCDCLK);
1044 intel_de_posting_read(display, reg);
1045 udelay(200);
1046
1047 /* Enable CPU FDI TX PLL, always on for Ironlake */
1048 reg = FDI_TX_CTL(pipe);
1049 temp = intel_de_read(display, reg);
1050 if ((temp & FDI_TX_PLL_ENABLE) == 0) {
1051 intel_de_write(display, reg, temp | FDI_TX_PLL_ENABLE);
1052
1053 intel_de_posting_read(display, reg);
1054 udelay(100);
1055 }
1056 }
1057
ilk_fdi_pll_disable(struct intel_crtc * crtc)1058 void ilk_fdi_pll_disable(struct intel_crtc *crtc)
1059 {
1060 struct intel_display *display = to_intel_display(crtc);
1061 enum pipe pipe = crtc->pipe;
1062
1063 /* Switch from PCDclk to Rawclk */
1064 intel_de_rmw(display, FDI_RX_CTL(pipe), FDI_PCDCLK, 0);
1065
1066 /* Disable CPU FDI TX PLL */
1067 intel_de_rmw(display, FDI_TX_CTL(pipe), FDI_TX_PLL_ENABLE, 0);
1068 intel_de_posting_read(display, FDI_TX_CTL(pipe));
1069 udelay(100);
1070
1071 /* Wait for the clocks to turn off. */
1072 intel_de_rmw(display, FDI_RX_CTL(pipe), FDI_RX_PLL_ENABLE, 0);
1073 intel_de_posting_read(display, FDI_RX_CTL(pipe));
1074 udelay(100);
1075 }
1076
ilk_fdi_disable(struct intel_crtc * crtc)1077 void ilk_fdi_disable(struct intel_crtc *crtc)
1078 {
1079 struct intel_display *display = to_intel_display(crtc);
1080 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1081 enum pipe pipe = crtc->pipe;
1082 i915_reg_t reg;
1083 u32 temp;
1084
1085 /* disable CPU FDI tx and PCH FDI rx */
1086 intel_de_rmw(display, FDI_TX_CTL(pipe), FDI_TX_ENABLE, 0);
1087 intel_de_posting_read(display, FDI_TX_CTL(pipe));
1088
1089 reg = FDI_RX_CTL(pipe);
1090 temp = intel_de_read(display, reg);
1091 temp &= ~(0x7 << 16);
1092 temp |= (intel_de_read(display, TRANSCONF(display, pipe)) & TRANSCONF_BPC_MASK) << 11;
1093 intel_de_write(display, reg, temp & ~FDI_RX_ENABLE);
1094
1095 intel_de_posting_read(display, reg);
1096 udelay(100);
1097
1098 /* Ironlake workaround, disable clock pointer after downing FDI */
1099 if (HAS_PCH_IBX(dev_priv))
1100 intel_de_write(display, FDI_RX_CHICKEN(pipe),
1101 FDI_RX_PHASE_SYNC_POINTER_OVR);
1102
1103 /* still set train pattern 1 */
1104 intel_de_rmw(display, FDI_TX_CTL(pipe),
1105 FDI_LINK_TRAIN_NONE, FDI_LINK_TRAIN_PATTERN_1);
1106
1107 reg = FDI_RX_CTL(pipe);
1108 temp = intel_de_read(display, reg);
1109 if (HAS_PCH_CPT(dev_priv)) {
1110 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
1111 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
1112 } else {
1113 temp &= ~FDI_LINK_TRAIN_NONE;
1114 temp |= FDI_LINK_TRAIN_PATTERN_1;
1115 }
1116 /* BPC in FDI rx is consistent with that in TRANSCONF */
1117 temp &= ~(0x07 << 16);
1118 temp |= (intel_de_read(display, TRANSCONF(display, pipe)) & TRANSCONF_BPC_MASK) << 11;
1119 intel_de_write(display, reg, temp);
1120
1121 intel_de_posting_read(display, reg);
1122 udelay(100);
1123 }
1124
1125 static const struct intel_fdi_funcs ilk_funcs = {
1126 .fdi_link_train = ilk_fdi_link_train,
1127 };
1128
1129 static const struct intel_fdi_funcs gen6_funcs = {
1130 .fdi_link_train = gen6_fdi_link_train,
1131 };
1132
1133 static const struct intel_fdi_funcs ivb_funcs = {
1134 .fdi_link_train = ivb_manual_fdi_link_train,
1135 };
1136
1137 void
intel_fdi_init_hook(struct intel_display * display)1138 intel_fdi_init_hook(struct intel_display *display)
1139 {
1140 if (display->platform.ironlake) {
1141 display->funcs.fdi = &ilk_funcs;
1142 } else if (display->platform.sandybridge) {
1143 display->funcs.fdi = &gen6_funcs;
1144 } else if (display->platform.ivybridge) {
1145 /* FIXME: detect B0+ stepping and use auto training */
1146 display->funcs.fdi = &ivb_funcs;
1147 }
1148 }
1149