1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2020 Intel Corporation
4 */
5
6 #include <linux/kernel.h>
7 #include <linux/string_helpers.h>
8
9 #include <drm/drm_print.h>
10
11 #include "intel_atomic.h"
12 #include "intel_crtc.h"
13 #include "intel_cx0_phy.h"
14 #include "intel_de.h"
15 #include "intel_display.h"
16 #include "intel_display_regs.h"
17 #include "intel_display_types.h"
18 #include "intel_dpio_phy.h"
19 #include "intel_dpll.h"
20 #include "intel_lvds.h"
21 #include "intel_lvds_regs.h"
22 #include "intel_panel.h"
23 #include "intel_pps.h"
24 #include "intel_snps_phy.h"
25 #include "vlv_dpio_phy_regs.h"
26 #include "vlv_sideband.h"
27
28 struct intel_dpll_global_funcs {
29 int (*crtc_compute_clock)(struct intel_atomic_state *state,
30 struct intel_crtc *crtc);
31 int (*crtc_get_dpll)(struct intel_atomic_state *state,
32 struct intel_crtc *crtc);
33 };
34
35 struct intel_limit {
36 struct {
37 int min, max;
38 } dot, vco, n, m, m1, m2, p, p1;
39
40 struct {
41 int dot_limit;
42 int p2_slow, p2_fast;
43 } p2;
44 };
45 static const struct intel_limit intel_limits_i8xx_dac = {
46 .dot = { .min = 25000, .max = 350000 },
47 .vco = { .min = 908000, .max = 1512000 },
48 .n = { .min = 2, .max = 16 },
49 .m = { .min = 96, .max = 140 },
50 .m1 = { .min = 18, .max = 26 },
51 .m2 = { .min = 6, .max = 16 },
52 .p = { .min = 4, .max = 128 },
53 .p1 = { .min = 2, .max = 33 },
54 .p2 = { .dot_limit = 165000,
55 .p2_slow = 4, .p2_fast = 2 },
56 };
57
58 static const struct intel_limit intel_limits_i8xx_dvo = {
59 .dot = { .min = 25000, .max = 350000 },
60 .vco = { .min = 908000, .max = 1512000 },
61 .n = { .min = 2, .max = 16 },
62 .m = { .min = 96, .max = 140 },
63 .m1 = { .min = 18, .max = 26 },
64 .m2 = { .min = 6, .max = 16 },
65 .p = { .min = 4, .max = 128 },
66 .p1 = { .min = 2, .max = 33 },
67 .p2 = { .dot_limit = 165000,
68 .p2_slow = 4, .p2_fast = 4 },
69 };
70
71 static const struct intel_limit intel_limits_i8xx_lvds = {
72 .dot = { .min = 25000, .max = 350000 },
73 .vco = { .min = 908000, .max = 1512000 },
74 .n = { .min = 2, .max = 16 },
75 .m = { .min = 96, .max = 140 },
76 .m1 = { .min = 18, .max = 26 },
77 .m2 = { .min = 6, .max = 16 },
78 .p = { .min = 4, .max = 128 },
79 .p1 = { .min = 1, .max = 6 },
80 .p2 = { .dot_limit = 165000,
81 .p2_slow = 14, .p2_fast = 7 },
82 };
83
84 static const struct intel_limit intel_limits_i9xx_sdvo = {
85 .dot = { .min = 20000, .max = 400000 },
86 .vco = { .min = 1400000, .max = 2800000 },
87 .n = { .min = 1, .max = 6 },
88 .m = { .min = 70, .max = 120 },
89 .m1 = { .min = 8, .max = 18 },
90 .m2 = { .min = 3, .max = 7 },
91 .p = { .min = 5, .max = 80 },
92 .p1 = { .min = 1, .max = 8 },
93 .p2 = { .dot_limit = 200000,
94 .p2_slow = 10, .p2_fast = 5 },
95 };
96
97 static const struct intel_limit intel_limits_i9xx_lvds = {
98 .dot = { .min = 20000, .max = 400000 },
99 .vco = { .min = 1400000, .max = 2800000 },
100 .n = { .min = 1, .max = 6 },
101 .m = { .min = 70, .max = 120 },
102 .m1 = { .min = 8, .max = 18 },
103 .m2 = { .min = 3, .max = 7 },
104 .p = { .min = 7, .max = 98 },
105 .p1 = { .min = 1, .max = 8 },
106 .p2 = { .dot_limit = 112000,
107 .p2_slow = 14, .p2_fast = 7 },
108 };
109
110
111 static const struct intel_limit intel_limits_g4x_sdvo = {
112 .dot = { .min = 25000, .max = 270000 },
113 .vco = { .min = 1750000, .max = 3500000},
114 .n = { .min = 1, .max = 4 },
115 .m = { .min = 104, .max = 138 },
116 .m1 = { .min = 17, .max = 23 },
117 .m2 = { .min = 5, .max = 11 },
118 .p = { .min = 10, .max = 30 },
119 .p1 = { .min = 1, .max = 3},
120 .p2 = { .dot_limit = 270000,
121 .p2_slow = 10,
122 .p2_fast = 10
123 },
124 };
125
126 static const struct intel_limit intel_limits_g4x_hdmi = {
127 .dot = { .min = 22000, .max = 400000 },
128 .vco = { .min = 1750000, .max = 3500000},
129 .n = { .min = 1, .max = 4 },
130 .m = { .min = 104, .max = 138 },
131 .m1 = { .min = 16, .max = 23 },
132 .m2 = { .min = 5, .max = 11 },
133 .p = { .min = 5, .max = 80 },
134 .p1 = { .min = 1, .max = 8},
135 .p2 = { .dot_limit = 165000,
136 .p2_slow = 10, .p2_fast = 5 },
137 };
138
139 static const struct intel_limit intel_limits_g4x_single_channel_lvds = {
140 .dot = { .min = 20000, .max = 115000 },
141 .vco = { .min = 1750000, .max = 3500000 },
142 .n = { .min = 1, .max = 3 },
143 .m = { .min = 104, .max = 138 },
144 .m1 = { .min = 17, .max = 23 },
145 .m2 = { .min = 5, .max = 11 },
146 .p = { .min = 28, .max = 112 },
147 .p1 = { .min = 2, .max = 8 },
148 .p2 = { .dot_limit = 0,
149 .p2_slow = 14, .p2_fast = 14
150 },
151 };
152
153 static const struct intel_limit intel_limits_g4x_dual_channel_lvds = {
154 .dot = { .min = 80000, .max = 224000 },
155 .vco = { .min = 1750000, .max = 3500000 },
156 .n = { .min = 1, .max = 3 },
157 .m = { .min = 104, .max = 138 },
158 .m1 = { .min = 17, .max = 23 },
159 .m2 = { .min = 5, .max = 11 },
160 .p = { .min = 14, .max = 42 },
161 .p1 = { .min = 2, .max = 6 },
162 .p2 = { .dot_limit = 0,
163 .p2_slow = 7, .p2_fast = 7
164 },
165 };
166
167 static const struct intel_limit pnv_limits_sdvo = {
168 .dot = { .min = 20000, .max = 400000},
169 .vco = { .min = 1700000, .max = 3500000 },
170 /* Pineview's Ncounter is a ring counter */
171 .n = { .min = 3, .max = 6 },
172 .m = { .min = 2, .max = 256 },
173 /* Pineview only has one combined m divider, which we treat as m2. */
174 .m1 = { .min = 0, .max = 0 },
175 .m2 = { .min = 0, .max = 254 },
176 .p = { .min = 5, .max = 80 },
177 .p1 = { .min = 1, .max = 8 },
178 .p2 = { .dot_limit = 200000,
179 .p2_slow = 10, .p2_fast = 5 },
180 };
181
182 static const struct intel_limit pnv_limits_lvds = {
183 .dot = { .min = 20000, .max = 400000 },
184 .vco = { .min = 1700000, .max = 3500000 },
185 .n = { .min = 3, .max = 6 },
186 .m = { .min = 2, .max = 256 },
187 .m1 = { .min = 0, .max = 0 },
188 .m2 = { .min = 0, .max = 254 },
189 .p = { .min = 7, .max = 112 },
190 .p1 = { .min = 1, .max = 8 },
191 .p2 = { .dot_limit = 112000,
192 .p2_slow = 14, .p2_fast = 14 },
193 };
194
195 /* Ironlake / Sandybridge
196 *
197 * We calculate clock using (register_value + 2) for N/M1/M2, so here
198 * the range value for them is (actual_value - 2).
199 */
200 static const struct intel_limit ilk_limits_dac = {
201 .dot = { .min = 25000, .max = 350000 },
202 .vco = { .min = 1760000, .max = 3510000 },
203 .n = { .min = 1, .max = 5 },
204 .m = { .min = 79, .max = 127 },
205 .m1 = { .min = 12, .max = 22 },
206 .m2 = { .min = 5, .max = 9 },
207 .p = { .min = 5, .max = 80 },
208 .p1 = { .min = 1, .max = 8 },
209 .p2 = { .dot_limit = 225000,
210 .p2_slow = 10, .p2_fast = 5 },
211 };
212
213 static const struct intel_limit ilk_limits_single_lvds = {
214 .dot = { .min = 25000, .max = 350000 },
215 .vco = { .min = 1760000, .max = 3510000 },
216 .n = { .min = 1, .max = 3 },
217 .m = { .min = 79, .max = 118 },
218 .m1 = { .min = 12, .max = 22 },
219 .m2 = { .min = 5, .max = 9 },
220 .p = { .min = 28, .max = 112 },
221 .p1 = { .min = 2, .max = 8 },
222 .p2 = { .dot_limit = 225000,
223 .p2_slow = 14, .p2_fast = 14 },
224 };
225
226 static const struct intel_limit ilk_limits_dual_lvds = {
227 .dot = { .min = 25000, .max = 350000 },
228 .vco = { .min = 1760000, .max = 3510000 },
229 .n = { .min = 1, .max = 3 },
230 .m = { .min = 79, .max = 127 },
231 .m1 = { .min = 12, .max = 22 },
232 .m2 = { .min = 5, .max = 9 },
233 .p = { .min = 14, .max = 56 },
234 .p1 = { .min = 2, .max = 8 },
235 .p2 = { .dot_limit = 225000,
236 .p2_slow = 7, .p2_fast = 7 },
237 };
238
239 /* LVDS 100mhz refclk limits. */
240 static const struct intel_limit ilk_limits_single_lvds_100m = {
241 .dot = { .min = 25000, .max = 350000 },
242 .vco = { .min = 1760000, .max = 3510000 },
243 .n = { .min = 1, .max = 2 },
244 .m = { .min = 79, .max = 126 },
245 .m1 = { .min = 12, .max = 22 },
246 .m2 = { .min = 5, .max = 9 },
247 .p = { .min = 28, .max = 112 },
248 .p1 = { .min = 2, .max = 8 },
249 .p2 = { .dot_limit = 225000,
250 .p2_slow = 14, .p2_fast = 14 },
251 };
252
253 static const struct intel_limit ilk_limits_dual_lvds_100m = {
254 .dot = { .min = 25000, .max = 350000 },
255 .vco = { .min = 1760000, .max = 3510000 },
256 .n = { .min = 1, .max = 3 },
257 .m = { .min = 79, .max = 126 },
258 .m1 = { .min = 12, .max = 22 },
259 .m2 = { .min = 5, .max = 9 },
260 .p = { .min = 14, .max = 42 },
261 .p1 = { .min = 2, .max = 6 },
262 .p2 = { .dot_limit = 225000,
263 .p2_slow = 7, .p2_fast = 7 },
264 };
265
266 static const struct intel_limit intel_limits_vlv = {
267 /*
268 * These are based on the data rate limits (measured in fast clocks)
269 * since those are the strictest limits we have. The fast
270 * clock and actual rate limits are more relaxed, so checking
271 * them would make no difference.
272 */
273 .dot = { .min = 25000, .max = 270000 },
274 .vco = { .min = 4000000, .max = 6000000 },
275 .n = { .min = 1, .max = 7 },
276 .m1 = { .min = 2, .max = 3 },
277 .m2 = { .min = 11, .max = 156 },
278 .p1 = { .min = 2, .max = 3 },
279 .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
280 };
281
282 static const struct intel_limit intel_limits_chv = {
283 /*
284 * These are based on the data rate limits (measured in fast clocks)
285 * since those are the strictest limits we have. The fast
286 * clock and actual rate limits are more relaxed, so checking
287 * them would make no difference.
288 */
289 .dot = { .min = 25000, .max = 540000 },
290 .vco = { .min = 4800000, .max = 6480000 },
291 .n = { .min = 1, .max = 1 },
292 .m1 = { .min = 2, .max = 2 },
293 .m2 = { .min = 24 << 22, .max = 175 << 22 },
294 .p1 = { .min = 2, .max = 4 },
295 .p2 = { .p2_slow = 1, .p2_fast = 14 },
296 };
297
298 static const struct intel_limit intel_limits_bxt = {
299 .dot = { .min = 25000, .max = 594000 },
300 .vco = { .min = 4800000, .max = 6700000 },
301 .n = { .min = 1, .max = 1 },
302 .m1 = { .min = 2, .max = 2 },
303 /* FIXME: find real m2 limits */
304 .m2 = { .min = 2 << 22, .max = 255 << 22 },
305 .p1 = { .min = 2, .max = 4 },
306 .p2 = { .p2_slow = 1, .p2_fast = 20 },
307 };
308
309 /*
310 * Platform specific helpers to calculate the port PLL loopback- (clock.m),
311 * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
312 * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
313 * The helpers' return value is the rate of the clock that is fed to the
314 * display engine's pipe which can be the above fast dot clock rate or a
315 * divided-down version of it.
316 */
317 /* m1 is reserved as 0 in Pineview, n is a ring counter */
pnv_calc_dpll_params(int refclk,struct dpll * clock)318 static int pnv_calc_dpll_params(int refclk, struct dpll *clock)
319 {
320 clock->m = clock->m2 + 2;
321 clock->p = clock->p1 * clock->p2;
322
323 clock->vco = clock->n == 0 ? 0 :
324 DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
325 clock->dot = clock->p == 0 ? 0 :
326 DIV_ROUND_CLOSEST(clock->vco, clock->p);
327
328 return clock->dot;
329 }
330
i9xx_dpll_compute_m(const struct dpll * dpll)331 static u32 i9xx_dpll_compute_m(const struct dpll *dpll)
332 {
333 return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
334 }
335
i9xx_calc_dpll_params(int refclk,struct dpll * clock)336 int i9xx_calc_dpll_params(int refclk, struct dpll *clock)
337 {
338 clock->m = i9xx_dpll_compute_m(clock);
339 clock->p = clock->p1 * clock->p2;
340
341 clock->vco = clock->n + 2 == 0 ? 0 :
342 DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
343 clock->dot = clock->p == 0 ? 0 :
344 DIV_ROUND_CLOSEST(clock->vco, clock->p);
345
346 return clock->dot;
347 }
348
vlv_calc_dpll_params(int refclk,struct dpll * clock)349 static int vlv_calc_dpll_params(int refclk, struct dpll *clock)
350 {
351 clock->m = clock->m1 * clock->m2;
352 clock->p = clock->p1 * clock->p2 * 5;
353
354 clock->vco = clock->n == 0 ? 0 :
355 DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
356 clock->dot = clock->p == 0 ? 0 :
357 DIV_ROUND_CLOSEST(clock->vco, clock->p);
358
359 return clock->dot;
360 }
361
chv_calc_dpll_params(int refclk,struct dpll * clock)362 int chv_calc_dpll_params(int refclk, struct dpll *clock)
363 {
364 clock->m = clock->m1 * clock->m2;
365 clock->p = clock->p1 * clock->p2 * 5;
366
367 clock->vco = clock->n == 0 ? 0 :
368 DIV_ROUND_CLOSEST_ULL(mul_u32_u32(refclk, clock->m), clock->n << 22);
369 clock->dot = clock->p == 0 ? 0 :
370 DIV_ROUND_CLOSEST(clock->vco, clock->p);
371
372 return clock->dot;
373 }
374
i9xx_pll_refclk(const struct intel_crtc_state * crtc_state)375 static int i9xx_pll_refclk(const struct intel_crtc_state *crtc_state)
376 {
377 struct intel_display *display = to_intel_display(crtc_state);
378 const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
379
380 if ((hw_state->dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
381 return display->vbt.lvds_ssc_freq;
382 else if (HAS_PCH_SPLIT(display))
383 return 120000;
384 else if (DISPLAY_VER(display) != 2)
385 return 96000;
386 else
387 return 48000;
388 }
389
i9xx_dpll_get_hw_state(struct intel_crtc * crtc,struct intel_dpll_hw_state * dpll_hw_state)390 void i9xx_dpll_get_hw_state(struct intel_crtc *crtc,
391 struct intel_dpll_hw_state *dpll_hw_state)
392 {
393 struct intel_display *display = to_intel_display(crtc);
394 struct i9xx_dpll_hw_state *hw_state = &dpll_hw_state->i9xx;
395
396 if (DISPLAY_VER(display) >= 4) {
397 u32 tmp;
398
399 /* No way to read it out on pipes B and C */
400 if (display->platform.cherryview && crtc->pipe != PIPE_A)
401 tmp = display->state.chv_dpll_md[crtc->pipe];
402 else
403 tmp = intel_de_read(display,
404 DPLL_MD(display, crtc->pipe));
405
406 hw_state->dpll_md = tmp;
407 }
408
409 hw_state->dpll = intel_de_read(display, DPLL(display, crtc->pipe));
410
411 if (!display->platform.valleyview && !display->platform.cherryview) {
412 hw_state->fp0 = intel_de_read(display, FP0(crtc->pipe));
413 hw_state->fp1 = intel_de_read(display, FP1(crtc->pipe));
414 } else {
415 /* Mask out read-only status bits. */
416 hw_state->dpll &= ~(DPLL_LOCK_VLV |
417 DPLL_PORTC_READY_MASK |
418 DPLL_PORTB_READY_MASK);
419 }
420 }
421
422 /* Returns the clock of the currently programmed mode of the given pipe. */
i9xx_crtc_clock_get(struct intel_crtc_state * crtc_state)423 void i9xx_crtc_clock_get(struct intel_crtc_state *crtc_state)
424 {
425 struct intel_display *display = to_intel_display(crtc_state);
426 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
427 const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
428 u32 dpll = hw_state->dpll;
429 u32 fp;
430 struct dpll clock;
431 int port_clock;
432 int refclk = i9xx_pll_refclk(crtc_state);
433
434 if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
435 fp = hw_state->fp0;
436 else
437 fp = hw_state->fp1;
438
439 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
440 if (display->platform.pineview) {
441 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
442 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
443 } else {
444 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
445 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
446 }
447
448 if (DISPLAY_VER(display) != 2) {
449 if (display->platform.pineview)
450 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
451 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
452 else
453 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
454 DPLL_FPA01_P1_POST_DIV_SHIFT);
455
456 switch (dpll & DPLL_MODE_MASK) {
457 case DPLLB_MODE_DAC_SERIAL:
458 clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
459 5 : 10;
460 break;
461 case DPLLB_MODE_LVDS:
462 clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
463 7 : 14;
464 break;
465 default:
466 drm_dbg_kms(display->drm,
467 "Unknown DPLL mode %08x in programmed "
468 "mode\n", (int)(dpll & DPLL_MODE_MASK));
469 return;
470 }
471
472 if (display->platform.pineview)
473 port_clock = pnv_calc_dpll_params(refclk, &clock);
474 else
475 port_clock = i9xx_calc_dpll_params(refclk, &clock);
476 } else {
477 enum pipe lvds_pipe;
478
479 if (display->platform.i85x &&
480 intel_lvds_port_enabled(display, LVDS, &lvds_pipe) &&
481 lvds_pipe == crtc->pipe) {
482 u32 lvds = intel_de_read(display, LVDS);
483
484 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
485 DPLL_FPA01_P1_POST_DIV_SHIFT);
486
487 if (lvds & LVDS_CLKB_POWER_UP)
488 clock.p2 = 7;
489 else
490 clock.p2 = 14;
491 } else {
492 if (dpll & PLL_P1_DIVIDE_BY_TWO)
493 clock.p1 = 2;
494 else {
495 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
496 DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
497 }
498 if (dpll & PLL_P2_DIVIDE_BY_4)
499 clock.p2 = 4;
500 else
501 clock.p2 = 2;
502 }
503
504 port_clock = i9xx_calc_dpll_params(refclk, &clock);
505 }
506
507 /*
508 * This value includes pixel_multiplier. We will use
509 * port_clock to compute adjusted_mode.crtc_clock in the
510 * encoder's get_config() function.
511 */
512 crtc_state->port_clock = port_clock;
513 }
514
vlv_crtc_clock_get(struct intel_crtc_state * crtc_state)515 void vlv_crtc_clock_get(struct intel_crtc_state *crtc_state)
516 {
517 struct intel_display *display = to_intel_display(crtc_state);
518 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
519 enum dpio_channel ch = vlv_pipe_to_channel(crtc->pipe);
520 enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
521 const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
522 int refclk = 100000;
523 struct dpll clock;
524 u32 tmp;
525
526 /* In case of DSI, DPLL will not be used */
527 if ((hw_state->dpll & DPLL_VCO_ENABLE) == 0)
528 return;
529
530 vlv_dpio_get(display->drm);
531 tmp = vlv_dpio_read(display->drm, phy, VLV_PLL_DW3(ch));
532 vlv_dpio_put(display->drm);
533
534 clock.m1 = REG_FIELD_GET(DPIO_M1_DIV_MASK, tmp);
535 clock.m2 = REG_FIELD_GET(DPIO_M2_DIV_MASK, tmp);
536 clock.n = REG_FIELD_GET(DPIO_N_DIV_MASK, tmp);
537 clock.p1 = REG_FIELD_GET(DPIO_P1_DIV_MASK, tmp);
538 clock.p2 = REG_FIELD_GET(DPIO_P2_DIV_MASK, tmp);
539
540 crtc_state->port_clock = vlv_calc_dpll_params(refclk, &clock);
541 }
542
chv_crtc_clock_get(struct intel_crtc_state * crtc_state)543 void chv_crtc_clock_get(struct intel_crtc_state *crtc_state)
544 {
545 struct intel_display *display = to_intel_display(crtc_state);
546 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
547 enum dpio_channel ch = vlv_pipe_to_channel(crtc->pipe);
548 enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
549 const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
550 struct dpll clock;
551 u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
552 int refclk = 100000;
553
554 /* In case of DSI, DPLL will not be used */
555 if ((hw_state->dpll & DPLL_VCO_ENABLE) == 0)
556 return;
557
558 vlv_dpio_get(display->drm);
559 cmn_dw13 = vlv_dpio_read(display->drm, phy, CHV_CMN_DW13(ch));
560 pll_dw0 = vlv_dpio_read(display->drm, phy, CHV_PLL_DW0(ch));
561 pll_dw1 = vlv_dpio_read(display->drm, phy, CHV_PLL_DW1(ch));
562 pll_dw2 = vlv_dpio_read(display->drm, phy, CHV_PLL_DW2(ch));
563 pll_dw3 = vlv_dpio_read(display->drm, phy, CHV_PLL_DW3(ch));
564 vlv_dpio_put(display->drm);
565
566 clock.m1 = REG_FIELD_GET(DPIO_CHV_M1_DIV_MASK, pll_dw1) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
567 clock.m2 = REG_FIELD_GET(DPIO_CHV_M2_DIV_MASK, pll_dw0) << 22;
568 if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
569 clock.m2 |= REG_FIELD_GET(DPIO_CHV_M2_FRAC_DIV_MASK, pll_dw2);
570 clock.n = REG_FIELD_GET(DPIO_CHV_N_DIV_MASK, pll_dw1);
571 clock.p1 = REG_FIELD_GET(DPIO_CHV_P1_DIV_MASK, cmn_dw13);
572 clock.p2 = REG_FIELD_GET(DPIO_CHV_P2_DIV_MASK, cmn_dw13);
573
574 crtc_state->port_clock = chv_calc_dpll_params(refclk, &clock);
575 }
576
577 /*
578 * Returns whether the given set of divisors are valid for a given refclk with
579 * the given connectors.
580 */
intel_pll_is_valid(struct intel_display * display,const struct intel_limit * limit,const struct dpll * clock)581 static bool intel_pll_is_valid(struct intel_display *display,
582 const struct intel_limit *limit,
583 const struct dpll *clock)
584 {
585 if (clock->n < limit->n.min || limit->n.max < clock->n)
586 return false;
587 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
588 return false;
589 if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
590 return false;
591 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
592 return false;
593
594 if (!display->platform.pineview &&
595 !display->platform.valleyview && !display->platform.cherryview &&
596 !display->platform.broxton && !display->platform.geminilake)
597 if (clock->m1 <= clock->m2)
598 return false;
599
600 if (!display->platform.valleyview && !display->platform.cherryview &&
601 !display->platform.broxton && !display->platform.geminilake) {
602 if (clock->p < limit->p.min || limit->p.max < clock->p)
603 return false;
604 if (clock->m < limit->m.min || limit->m.max < clock->m)
605 return false;
606 }
607
608 if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
609 return false;
610 /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
611 * connector, etc., rather than just a single range.
612 */
613 if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
614 return false;
615
616 return true;
617 }
618
619 static int
i9xx_select_p2_div(const struct intel_limit * limit,const struct intel_crtc_state * crtc_state,int target)620 i9xx_select_p2_div(const struct intel_limit *limit,
621 const struct intel_crtc_state *crtc_state,
622 int target)
623 {
624 struct intel_display *display = to_intel_display(crtc_state);
625
626 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
627 /*
628 * For LVDS just rely on its current settings for dual-channel.
629 * We haven't figured out how to reliably set up different
630 * single/dual channel state, if we even can.
631 */
632 if (intel_is_dual_link_lvds(display))
633 return limit->p2.p2_fast;
634 else
635 return limit->p2.p2_slow;
636 } else {
637 if (target < limit->p2.dot_limit)
638 return limit->p2.p2_slow;
639 else
640 return limit->p2.p2_fast;
641 }
642 }
643
644 /*
645 * Returns a set of divisors for the desired target clock with the given
646 * refclk, or FALSE.
647 *
648 * Target and reference clocks are specified in kHz.
649 *
650 * If match_clock is provided, then best_clock P divider must match the P
651 * divider from @match_clock used for LVDS downclocking.
652 */
653 static bool
i9xx_find_best_dpll(const struct intel_limit * limit,struct intel_crtc_state * crtc_state,int target,int refclk,const struct dpll * match_clock,struct dpll * best_clock)654 i9xx_find_best_dpll(const struct intel_limit *limit,
655 struct intel_crtc_state *crtc_state,
656 int target, int refclk,
657 const struct dpll *match_clock,
658 struct dpll *best_clock)
659 {
660 struct intel_display *display = to_intel_display(crtc_state);
661 struct dpll clock;
662 int err = target;
663
664 memset(best_clock, 0, sizeof(*best_clock));
665
666 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
667
668 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
669 clock.m1++) {
670 for (clock.m2 = limit->m2.min;
671 clock.m2 <= limit->m2.max; clock.m2++) {
672 if (clock.m2 >= clock.m1)
673 break;
674 for (clock.n = limit->n.min;
675 clock.n <= limit->n.max; clock.n++) {
676 for (clock.p1 = limit->p1.min;
677 clock.p1 <= limit->p1.max; clock.p1++) {
678 int this_err;
679
680 i9xx_calc_dpll_params(refclk, &clock);
681 if (!intel_pll_is_valid(display,
682 limit,
683 &clock))
684 continue;
685 if (match_clock &&
686 clock.p != match_clock->p)
687 continue;
688
689 this_err = abs(clock.dot - target);
690 if (this_err < err) {
691 *best_clock = clock;
692 err = this_err;
693 }
694 }
695 }
696 }
697 }
698
699 return (err != target);
700 }
701
702 /*
703 * Returns a set of divisors for the desired target clock with the given
704 * refclk, or FALSE.
705 *
706 * Target and reference clocks are specified in kHz.
707 *
708 * If match_clock is provided, then best_clock P divider must match the P
709 * divider from @match_clock used for LVDS downclocking.
710 */
711 static bool
pnv_find_best_dpll(const struct intel_limit * limit,struct intel_crtc_state * crtc_state,int target,int refclk,const struct dpll * match_clock,struct dpll * best_clock)712 pnv_find_best_dpll(const struct intel_limit *limit,
713 struct intel_crtc_state *crtc_state,
714 int target, int refclk,
715 const struct dpll *match_clock,
716 struct dpll *best_clock)
717 {
718 struct intel_display *display = to_intel_display(crtc_state);
719 struct dpll clock;
720 int err = target;
721
722 memset(best_clock, 0, sizeof(*best_clock));
723
724 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
725
726 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
727 clock.m1++) {
728 for (clock.m2 = limit->m2.min;
729 clock.m2 <= limit->m2.max; clock.m2++) {
730 for (clock.n = limit->n.min;
731 clock.n <= limit->n.max; clock.n++) {
732 for (clock.p1 = limit->p1.min;
733 clock.p1 <= limit->p1.max; clock.p1++) {
734 int this_err;
735
736 pnv_calc_dpll_params(refclk, &clock);
737 if (!intel_pll_is_valid(display,
738 limit,
739 &clock))
740 continue;
741 if (match_clock &&
742 clock.p != match_clock->p)
743 continue;
744
745 this_err = abs(clock.dot - target);
746 if (this_err < err) {
747 *best_clock = clock;
748 err = this_err;
749 }
750 }
751 }
752 }
753 }
754
755 return (err != target);
756 }
757
758 /*
759 * Returns a set of divisors for the desired target clock with the given
760 * refclk, or FALSE.
761 *
762 * Target and reference clocks are specified in kHz.
763 *
764 * If match_clock is provided, then best_clock P divider must match the P
765 * divider from @match_clock used for LVDS downclocking.
766 */
767 static bool
g4x_find_best_dpll(const struct intel_limit * limit,struct intel_crtc_state * crtc_state,int target,int refclk,const struct dpll * match_clock,struct dpll * best_clock)768 g4x_find_best_dpll(const struct intel_limit *limit,
769 struct intel_crtc_state *crtc_state,
770 int target, int refclk,
771 const struct dpll *match_clock,
772 struct dpll *best_clock)
773 {
774 struct intel_display *display = to_intel_display(crtc_state);
775 struct dpll clock;
776 int max_n;
777 bool found = false;
778 /* approximately equals target * 0.00585 */
779 int err_most = (target >> 8) + (target >> 9);
780
781 memset(best_clock, 0, sizeof(*best_clock));
782
783 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
784
785 max_n = limit->n.max;
786 /* based on hardware requirement, prefer smaller n to precision */
787 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
788 /* based on hardware requirement, prefer larger m1,m2 */
789 for (clock.m1 = limit->m1.max;
790 clock.m1 >= limit->m1.min; clock.m1--) {
791 for (clock.m2 = limit->m2.max;
792 clock.m2 >= limit->m2.min; clock.m2--) {
793 for (clock.p1 = limit->p1.max;
794 clock.p1 >= limit->p1.min; clock.p1--) {
795 int this_err;
796
797 i9xx_calc_dpll_params(refclk, &clock);
798 if (!intel_pll_is_valid(display,
799 limit,
800 &clock))
801 continue;
802
803 this_err = abs(clock.dot - target);
804 if (this_err < err_most) {
805 *best_clock = clock;
806 err_most = this_err;
807 max_n = clock.n;
808 found = true;
809 }
810 }
811 }
812 }
813 }
814 return found;
815 }
816
817 /*
818 * Check if the calculated PLL configuration is more optimal compared to the
819 * best configuration and error found so far. Return the calculated error.
820 */
vlv_PLL_is_optimal(struct intel_display * display,int target_freq,const struct dpll * calculated_clock,const struct dpll * best_clock,unsigned int best_error_ppm,unsigned int * error_ppm)821 static bool vlv_PLL_is_optimal(struct intel_display *display, int target_freq,
822 const struct dpll *calculated_clock,
823 const struct dpll *best_clock,
824 unsigned int best_error_ppm,
825 unsigned int *error_ppm)
826 {
827 /*
828 * For CHV ignore the error and consider only the P value.
829 * Prefer a bigger P value based on HW requirements.
830 */
831 if (display->platform.cherryview) {
832 *error_ppm = 0;
833
834 return calculated_clock->p > best_clock->p;
835 }
836
837 if (drm_WARN_ON_ONCE(display->drm, !target_freq))
838 return false;
839
840 *error_ppm = div_u64(1000000ULL *
841 abs(target_freq - calculated_clock->dot),
842 target_freq);
843 /*
844 * Prefer a better P value over a better (smaller) error if the error
845 * is small. Ensure this preference for future configurations too by
846 * setting the error to 0.
847 */
848 if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
849 *error_ppm = 0;
850
851 return true;
852 }
853
854 return *error_ppm + 10 < best_error_ppm;
855 }
856
857 /*
858 * Returns a set of divisors for the desired target clock with the given
859 * refclk, or FALSE.
860 */
861 static bool
vlv_find_best_dpll(const struct intel_limit * limit,struct intel_crtc_state * crtc_state,int target,int refclk,const struct dpll * match_clock,struct dpll * best_clock)862 vlv_find_best_dpll(const struct intel_limit *limit,
863 struct intel_crtc_state *crtc_state,
864 int target, int refclk,
865 const struct dpll *match_clock,
866 struct dpll *best_clock)
867 {
868 struct intel_display *display = to_intel_display(crtc_state);
869 struct dpll clock;
870 unsigned int bestppm = 1000000;
871 /* min update 19.2 MHz */
872 int max_n = min(limit->n.max, refclk / 19200);
873 bool found = false;
874
875 memset(best_clock, 0, sizeof(*best_clock));
876
877 /* based on hardware requirement, prefer smaller n to precision */
878 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
879 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
880 for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
881 clock.p2 -= clock.p2 > 10 ? 2 : 1) {
882 clock.p = clock.p1 * clock.p2 * 5;
883 /* based on hardware requirement, prefer bigger m1,m2 values */
884 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
885 unsigned int ppm;
886
887 clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
888 refclk * clock.m1);
889
890 vlv_calc_dpll_params(refclk, &clock);
891
892 if (!intel_pll_is_valid(display,
893 limit,
894 &clock))
895 continue;
896
897 if (!vlv_PLL_is_optimal(display, target,
898 &clock,
899 best_clock,
900 bestppm, &ppm))
901 continue;
902
903 *best_clock = clock;
904 bestppm = ppm;
905 found = true;
906 }
907 }
908 }
909 }
910
911 return found;
912 }
913
914 /*
915 * Returns a set of divisors for the desired target clock with the given
916 * refclk, or FALSE.
917 */
918 static bool
chv_find_best_dpll(const struct intel_limit * limit,struct intel_crtc_state * crtc_state,int target,int refclk,const struct dpll * match_clock,struct dpll * best_clock)919 chv_find_best_dpll(const struct intel_limit *limit,
920 struct intel_crtc_state *crtc_state,
921 int target, int refclk,
922 const struct dpll *match_clock,
923 struct dpll *best_clock)
924 {
925 struct intel_display *display = to_intel_display(crtc_state);
926 unsigned int best_error_ppm;
927 struct dpll clock;
928 u64 m2;
929 int found = false;
930
931 memset(best_clock, 0, sizeof(*best_clock));
932 best_error_ppm = 1000000;
933
934 /*
935 * Based on hardware doc, the n always set to 1, and m1 always
936 * set to 2. If requires to support 200Mhz refclk, we need to
937 * revisit this because n may not 1 anymore.
938 */
939 clock.n = 1;
940 clock.m1 = 2;
941
942 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
943 for (clock.p2 = limit->p2.p2_fast;
944 clock.p2 >= limit->p2.p2_slow;
945 clock.p2 -= clock.p2 > 10 ? 2 : 1) {
946 unsigned int error_ppm;
947
948 clock.p = clock.p1 * clock.p2 * 5;
949
950 m2 = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(target, clock.p * clock.n) << 22,
951 refclk * clock.m1);
952
953 if (m2 > INT_MAX/clock.m1)
954 continue;
955
956 clock.m2 = m2;
957
958 chv_calc_dpll_params(refclk, &clock);
959
960 if (!intel_pll_is_valid(display, limit, &clock))
961 continue;
962
963 if (!vlv_PLL_is_optimal(display, target, &clock, best_clock,
964 best_error_ppm, &error_ppm))
965 continue;
966
967 *best_clock = clock;
968 best_error_ppm = error_ppm;
969 found = true;
970 }
971 }
972
973 return found;
974 }
975
bxt_find_best_dpll(struct intel_crtc_state * crtc_state,struct dpll * best_clock)976 bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
977 struct dpll *best_clock)
978 {
979 const struct intel_limit *limit = &intel_limits_bxt;
980 int refclk = 100000;
981
982 return chv_find_best_dpll(limit, crtc_state,
983 crtc_state->port_clock, refclk,
984 NULL, best_clock);
985 }
986
i9xx_dpll_compute_fp(const struct dpll * dpll)987 u32 i9xx_dpll_compute_fp(const struct dpll *dpll)
988 {
989 return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
990 }
991
pnv_dpll_compute_fp(const struct dpll * dpll)992 static u32 pnv_dpll_compute_fp(const struct dpll *dpll)
993 {
994 return (1 << dpll->n) << 16 | dpll->m2;
995 }
996
i965_dpll_md(const struct intel_crtc_state * crtc_state)997 static u32 i965_dpll_md(const struct intel_crtc_state *crtc_state)
998 {
999 return (crtc_state->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
1000 }
1001
i9xx_dpll(const struct intel_crtc_state * crtc_state,const struct dpll * clock,const struct dpll * reduced_clock)1002 static u32 i9xx_dpll(const struct intel_crtc_state *crtc_state,
1003 const struct dpll *clock,
1004 const struct dpll *reduced_clock)
1005 {
1006 struct intel_display *display = to_intel_display(crtc_state);
1007 u32 dpll;
1008
1009 dpll = DPLL_VCO_ENABLE | DPLL_VGA_MODE_DIS;
1010
1011 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
1012 dpll |= DPLLB_MODE_LVDS;
1013 else
1014 dpll |= DPLLB_MODE_DAC_SERIAL;
1015
1016 if (display->platform.i945g || display->platform.i945gm ||
1017 display->platform.g33 || display->platform.pineview) {
1018 dpll |= (crtc_state->pixel_multiplier - 1)
1019 << SDVO_MULTIPLIER_SHIFT_HIRES;
1020 }
1021
1022 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
1023 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1024 dpll |= DPLL_SDVO_HIGH_SPEED;
1025
1026 if (intel_crtc_has_dp_encoder(crtc_state))
1027 dpll |= DPLL_SDVO_HIGH_SPEED;
1028
1029 /* compute bitmask from p1 value */
1030 if (display->platform.g4x) {
1031 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
1032 dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
1033 } else if (display->platform.pineview) {
1034 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
1035 WARN_ON(reduced_clock->p1 != clock->p1);
1036 } else {
1037 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
1038 WARN_ON(reduced_clock->p1 != clock->p1);
1039 }
1040
1041 switch (clock->p2) {
1042 case 5:
1043 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
1044 break;
1045 case 7:
1046 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
1047 break;
1048 case 10:
1049 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
1050 break;
1051 case 14:
1052 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
1053 break;
1054 }
1055 WARN_ON(reduced_clock->p2 != clock->p2);
1056
1057 if (DISPLAY_VER(display) >= 4)
1058 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
1059
1060 if (crtc_state->sdvo_tv_clock)
1061 dpll |= PLL_REF_INPUT_TVCLKINBC;
1062 else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
1063 intel_panel_use_ssc(display))
1064 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
1065 else
1066 dpll |= PLL_REF_INPUT_DREFCLK;
1067
1068 return dpll;
1069 }
1070
i9xx_compute_dpll(struct intel_crtc_state * crtc_state,const struct dpll * clock,const struct dpll * reduced_clock)1071 static void i9xx_compute_dpll(struct intel_crtc_state *crtc_state,
1072 const struct dpll *clock,
1073 const struct dpll *reduced_clock)
1074 {
1075 struct intel_display *display = to_intel_display(crtc_state);
1076 struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
1077
1078 if (display->platform.pineview) {
1079 hw_state->fp0 = pnv_dpll_compute_fp(clock);
1080 hw_state->fp1 = pnv_dpll_compute_fp(reduced_clock);
1081 } else {
1082 hw_state->fp0 = i9xx_dpll_compute_fp(clock);
1083 hw_state->fp1 = i9xx_dpll_compute_fp(reduced_clock);
1084 }
1085
1086 hw_state->dpll = i9xx_dpll(crtc_state, clock, reduced_clock);
1087
1088 if (DISPLAY_VER(display) >= 4)
1089 hw_state->dpll_md = i965_dpll_md(crtc_state);
1090 }
1091
i8xx_dpll(const struct intel_crtc_state * crtc_state,const struct dpll * clock,const struct dpll * reduced_clock)1092 static u32 i8xx_dpll(const struct intel_crtc_state *crtc_state,
1093 const struct dpll *clock,
1094 const struct dpll *reduced_clock)
1095 {
1096 struct intel_display *display = to_intel_display(crtc_state);
1097 u32 dpll;
1098
1099 dpll = DPLL_VCO_ENABLE | DPLL_VGA_MODE_DIS;
1100
1101 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
1102 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
1103 } else {
1104 if (clock->p1 == 2)
1105 dpll |= PLL_P1_DIVIDE_BY_TWO;
1106 else
1107 dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
1108 if (clock->p2 == 4)
1109 dpll |= PLL_P2_DIVIDE_BY_4;
1110 }
1111 WARN_ON(reduced_clock->p1 != clock->p1);
1112 WARN_ON(reduced_clock->p2 != clock->p2);
1113
1114 /*
1115 * Bspec:
1116 * "[Almador Errata}: For the correct operation of the muxed DVO pins
1117 * (GDEVSELB/I2Cdata, GIRDBY/I2CClk) and (GFRAMEB/DVI_Data,
1118 * GTRDYB/DVI_Clk): Bit 31 (DPLL VCO Enable) and Bit 30 (2X Clock
1119 * Enable) must be set to “1” in both the DPLL A Control Register
1120 * (06014h-06017h) and DPLL B Control Register (06018h-0601Bh)."
1121 *
1122 * For simplicity We simply keep both bits always enabled in
1123 * both DPLLS. The spec says we should disable the DVO 2X clock
1124 * when not needed, but this seems to work fine in practice.
1125 */
1126 if (display->platform.i830 ||
1127 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO))
1128 dpll |= DPLL_DVO_2X_MODE;
1129
1130 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
1131 intel_panel_use_ssc(display))
1132 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
1133 else
1134 dpll |= PLL_REF_INPUT_DREFCLK;
1135
1136 return dpll;
1137 }
1138
i8xx_compute_dpll(struct intel_crtc_state * crtc_state,const struct dpll * clock,const struct dpll * reduced_clock)1139 static void i8xx_compute_dpll(struct intel_crtc_state *crtc_state,
1140 const struct dpll *clock,
1141 const struct dpll *reduced_clock)
1142 {
1143 struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
1144
1145 hw_state->fp0 = i9xx_dpll_compute_fp(clock);
1146 hw_state->fp1 = i9xx_dpll_compute_fp(reduced_clock);
1147
1148 hw_state->dpll = i8xx_dpll(crtc_state, clock, reduced_clock);
1149 }
1150
hsw_crtc_compute_clock(struct intel_atomic_state * state,struct intel_crtc * crtc)1151 static int hsw_crtc_compute_clock(struct intel_atomic_state *state,
1152 struct intel_crtc *crtc)
1153 {
1154 struct intel_display *display = to_intel_display(state);
1155 struct intel_crtc_state *crtc_state =
1156 intel_atomic_get_new_crtc_state(state, crtc);
1157 struct intel_encoder *encoder =
1158 intel_get_crtc_new_encoder(state, crtc_state);
1159 int ret;
1160
1161 if (DISPLAY_VER(display) < 11 &&
1162 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
1163 return 0;
1164
1165 ret = intel_dpll_compute(state, crtc, encoder);
1166 if (ret)
1167 return ret;
1168
1169 /* FIXME this is a mess */
1170 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
1171 return 0;
1172
1173 /* CRT dotclock is determined via other means */
1174 if (!crtc_state->has_pch_encoder)
1175 crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
1176
1177 return 0;
1178 }
1179
hsw_crtc_get_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc)1180 static int hsw_crtc_get_dpll(struct intel_atomic_state *state,
1181 struct intel_crtc *crtc)
1182 {
1183 struct intel_display *display = to_intel_display(state);
1184 struct intel_crtc_state *crtc_state =
1185 intel_atomic_get_new_crtc_state(state, crtc);
1186 struct intel_encoder *encoder =
1187 intel_get_crtc_new_encoder(state, crtc_state);
1188
1189 if (DISPLAY_VER(display) < 11 &&
1190 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
1191 return 0;
1192
1193 return intel_dpll_reserve(state, crtc, encoder);
1194 }
1195
dg2_crtc_compute_clock(struct intel_atomic_state * state,struct intel_crtc * crtc)1196 static int dg2_crtc_compute_clock(struct intel_atomic_state *state,
1197 struct intel_crtc *crtc)
1198 {
1199 struct intel_crtc_state *crtc_state =
1200 intel_atomic_get_new_crtc_state(state, crtc);
1201 struct intel_encoder *encoder =
1202 intel_get_crtc_new_encoder(state, crtc_state);
1203 int ret;
1204
1205 ret = intel_mpllb_calc_state(crtc_state, encoder);
1206 if (ret)
1207 return ret;
1208
1209 crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
1210
1211 return 0;
1212 }
1213
mtl_crtc_compute_clock(struct intel_atomic_state * state,struct intel_crtc * crtc)1214 static int mtl_crtc_compute_clock(struct intel_atomic_state *state,
1215 struct intel_crtc *crtc)
1216 {
1217 struct intel_crtc_state *crtc_state =
1218 intel_atomic_get_new_crtc_state(state, crtc);
1219 struct intel_encoder *encoder =
1220 intel_get_crtc_new_encoder(state, crtc_state);
1221 int ret;
1222
1223 ret = intel_cx0pll_calc_state(crtc_state, encoder);
1224 if (ret)
1225 return ret;
1226
1227 /* TODO: Do the readback via intel_dpll_compute() */
1228 crtc_state->port_clock = intel_cx0pll_calc_port_clock(encoder, &crtc_state->dpll_hw_state.cx0pll);
1229
1230 crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
1231
1232 return 0;
1233 }
1234
ilk_fb_cb_factor(const struct intel_crtc_state * crtc_state)1235 static int ilk_fb_cb_factor(const struct intel_crtc_state *crtc_state)
1236 {
1237 struct intel_display *display = to_intel_display(crtc_state);
1238
1239 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
1240 ((intel_panel_use_ssc(display) && display->vbt.lvds_ssc_freq == 100000) ||
1241 (HAS_PCH_IBX(display) && intel_is_dual_link_lvds(display))))
1242 return 25;
1243
1244 if (crtc_state->sdvo_tv_clock)
1245 return 20;
1246
1247 return 21;
1248 }
1249
ilk_needs_fb_cb_tune(const struct dpll * dpll,int factor)1250 static bool ilk_needs_fb_cb_tune(const struct dpll *dpll, int factor)
1251 {
1252 return dpll->m < factor * dpll->n;
1253 }
1254
ilk_dpll_compute_fp(const struct dpll * clock,int factor)1255 static u32 ilk_dpll_compute_fp(const struct dpll *clock, int factor)
1256 {
1257 u32 fp;
1258
1259 fp = i9xx_dpll_compute_fp(clock);
1260 if (ilk_needs_fb_cb_tune(clock, factor))
1261 fp |= FP_CB_TUNE;
1262
1263 return fp;
1264 }
1265
ilk_dpll(const struct intel_crtc_state * crtc_state,const struct dpll * clock,const struct dpll * reduced_clock)1266 static u32 ilk_dpll(const struct intel_crtc_state *crtc_state,
1267 const struct dpll *clock,
1268 const struct dpll *reduced_clock)
1269 {
1270 struct intel_display *display = to_intel_display(crtc_state);
1271 u32 dpll;
1272
1273 dpll = DPLL_VCO_ENABLE;
1274
1275 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
1276 dpll |= DPLLB_MODE_LVDS;
1277 else
1278 dpll |= DPLLB_MODE_DAC_SERIAL;
1279
1280 dpll |= (crtc_state->pixel_multiplier - 1)
1281 << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
1282
1283 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
1284 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1285 dpll |= DPLL_SDVO_HIGH_SPEED;
1286
1287 if (intel_crtc_has_dp_encoder(crtc_state))
1288 dpll |= DPLL_SDVO_HIGH_SPEED;
1289
1290 /*
1291 * The high speed IO clock is only really required for
1292 * SDVO/HDMI/DP, but we also enable it for CRT to make it
1293 * possible to share the DPLL between CRT and HDMI. Enabling
1294 * the clock needlessly does no real harm, except use up a
1295 * bit of power potentially.
1296 *
1297 * We'll limit this to IVB with 3 pipes, since it has only two
1298 * DPLLs and so DPLL sharing is the only way to get three pipes
1299 * driving PCH ports at the same time. On SNB we could do this,
1300 * and potentially avoid enabling the second DPLL, but it's not
1301 * clear if it''s a win or loss power wise. No point in doing
1302 * this on ILK at all since it has a fixed DPLL<->pipe mapping.
1303 */
1304 if (INTEL_NUM_PIPES(display) == 3 &&
1305 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
1306 dpll |= DPLL_SDVO_HIGH_SPEED;
1307
1308 /* compute bitmask from p1 value */
1309 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
1310 /* also FPA1 */
1311 dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
1312
1313 switch (clock->p2) {
1314 case 5:
1315 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
1316 break;
1317 case 7:
1318 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
1319 break;
1320 case 10:
1321 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
1322 break;
1323 case 14:
1324 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
1325 break;
1326 }
1327 WARN_ON(reduced_clock->p2 != clock->p2);
1328
1329 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
1330 intel_panel_use_ssc(display))
1331 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
1332 else
1333 dpll |= PLL_REF_INPUT_DREFCLK;
1334
1335 return dpll;
1336 }
1337
ilk_compute_dpll(struct intel_crtc_state * crtc_state,const struct dpll * clock,const struct dpll * reduced_clock)1338 static void ilk_compute_dpll(struct intel_crtc_state *crtc_state,
1339 const struct dpll *clock,
1340 const struct dpll *reduced_clock)
1341 {
1342 struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
1343 int factor = ilk_fb_cb_factor(crtc_state);
1344
1345 hw_state->fp0 = ilk_dpll_compute_fp(clock, factor);
1346 hw_state->fp1 = ilk_dpll_compute_fp(reduced_clock, factor);
1347
1348 hw_state->dpll = ilk_dpll(crtc_state, clock, reduced_clock);
1349 }
1350
ilk_crtc_compute_clock(struct intel_atomic_state * state,struct intel_crtc * crtc)1351 static int ilk_crtc_compute_clock(struct intel_atomic_state *state,
1352 struct intel_crtc *crtc)
1353 {
1354 struct intel_display *display = to_intel_display(state);
1355 struct intel_crtc_state *crtc_state =
1356 intel_atomic_get_new_crtc_state(state, crtc);
1357 const struct intel_limit *limit;
1358 int refclk = 120000;
1359 int ret;
1360
1361 /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
1362 if (!crtc_state->has_pch_encoder)
1363 return 0;
1364
1365 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
1366 if (intel_panel_use_ssc(display)) {
1367 drm_dbg_kms(display->drm,
1368 "using SSC reference clock of %d kHz\n",
1369 display->vbt.lvds_ssc_freq);
1370 refclk = display->vbt.lvds_ssc_freq;
1371 }
1372
1373 if (intel_is_dual_link_lvds(display)) {
1374 if (refclk == 100000)
1375 limit = &ilk_limits_dual_lvds_100m;
1376 else
1377 limit = &ilk_limits_dual_lvds;
1378 } else {
1379 if (refclk == 100000)
1380 limit = &ilk_limits_single_lvds_100m;
1381 else
1382 limit = &ilk_limits_single_lvds;
1383 }
1384 } else {
1385 limit = &ilk_limits_dac;
1386 }
1387
1388 if (!crtc_state->clock_set &&
1389 !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
1390 refclk, NULL, &crtc_state->dpll))
1391 return -EINVAL;
1392
1393 i9xx_calc_dpll_params(refclk, &crtc_state->dpll);
1394
1395 ilk_compute_dpll(crtc_state, &crtc_state->dpll,
1396 &crtc_state->dpll);
1397
1398 ret = intel_dpll_compute(state, crtc, NULL);
1399 if (ret)
1400 return ret;
1401
1402 crtc_state->port_clock = crtc_state->dpll.dot;
1403 crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
1404
1405 return ret;
1406 }
1407
ilk_crtc_get_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc)1408 static int ilk_crtc_get_dpll(struct intel_atomic_state *state,
1409 struct intel_crtc *crtc)
1410 {
1411 struct intel_crtc_state *crtc_state =
1412 intel_atomic_get_new_crtc_state(state, crtc);
1413
1414 /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
1415 if (!crtc_state->has_pch_encoder)
1416 return 0;
1417
1418 return intel_dpll_reserve(state, crtc, NULL);
1419 }
1420
vlv_dpll(const struct intel_crtc_state * crtc_state)1421 static u32 vlv_dpll(const struct intel_crtc_state *crtc_state)
1422 {
1423 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1424 u32 dpll;
1425
1426 dpll = DPLL_INTEGRATED_REF_CLK_VLV |
1427 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1428
1429 if (crtc->pipe != PIPE_A)
1430 dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
1431
1432 /* DPLL not used with DSI, but still need the rest set up */
1433 if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
1434 dpll |= DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV;
1435
1436 return dpll;
1437 }
1438
vlv_compute_dpll(struct intel_crtc_state * crtc_state)1439 void vlv_compute_dpll(struct intel_crtc_state *crtc_state)
1440 {
1441 struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
1442
1443 hw_state->dpll = vlv_dpll(crtc_state);
1444 hw_state->dpll_md = i965_dpll_md(crtc_state);
1445 }
1446
chv_dpll(const struct intel_crtc_state * crtc_state)1447 static u32 chv_dpll(const struct intel_crtc_state *crtc_state)
1448 {
1449 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1450 u32 dpll;
1451
1452 dpll = DPLL_SSC_REF_CLK_CHV |
1453 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1454
1455 if (crtc->pipe != PIPE_A)
1456 dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
1457
1458 /* DPLL not used with DSI, but still need the rest set up */
1459 if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
1460 dpll |= DPLL_VCO_ENABLE;
1461
1462 return dpll;
1463 }
1464
chv_compute_dpll(struct intel_crtc_state * crtc_state)1465 void chv_compute_dpll(struct intel_crtc_state *crtc_state)
1466 {
1467 struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
1468
1469 hw_state->dpll = chv_dpll(crtc_state);
1470 hw_state->dpll_md = i965_dpll_md(crtc_state);
1471 }
1472
chv_crtc_compute_clock(struct intel_atomic_state * state,struct intel_crtc * crtc)1473 static int chv_crtc_compute_clock(struct intel_atomic_state *state,
1474 struct intel_crtc *crtc)
1475 {
1476 struct intel_crtc_state *crtc_state =
1477 intel_atomic_get_new_crtc_state(state, crtc);
1478 const struct intel_limit *limit = &intel_limits_chv;
1479 int refclk = 100000;
1480
1481 if (!crtc_state->clock_set &&
1482 !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
1483 refclk, NULL, &crtc_state->dpll))
1484 return -EINVAL;
1485
1486 chv_calc_dpll_params(refclk, &crtc_state->dpll);
1487
1488 chv_compute_dpll(crtc_state);
1489
1490 /* FIXME this is a mess */
1491 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
1492 return 0;
1493
1494 crtc_state->port_clock = crtc_state->dpll.dot;
1495 crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
1496
1497 return 0;
1498 }
1499
vlv_crtc_compute_clock(struct intel_atomic_state * state,struct intel_crtc * crtc)1500 static int vlv_crtc_compute_clock(struct intel_atomic_state *state,
1501 struct intel_crtc *crtc)
1502 {
1503 struct intel_crtc_state *crtc_state =
1504 intel_atomic_get_new_crtc_state(state, crtc);
1505 const struct intel_limit *limit = &intel_limits_vlv;
1506 int refclk = 100000;
1507
1508 if (!crtc_state->clock_set &&
1509 !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
1510 refclk, NULL, &crtc_state->dpll))
1511 return -EINVAL;
1512
1513 vlv_calc_dpll_params(refclk, &crtc_state->dpll);
1514
1515 vlv_compute_dpll(crtc_state);
1516
1517 /* FIXME this is a mess */
1518 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
1519 return 0;
1520
1521 crtc_state->port_clock = crtc_state->dpll.dot;
1522 crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
1523
1524 return 0;
1525 }
1526
g4x_crtc_compute_clock(struct intel_atomic_state * state,struct intel_crtc * crtc)1527 static int g4x_crtc_compute_clock(struct intel_atomic_state *state,
1528 struct intel_crtc *crtc)
1529 {
1530 struct intel_display *display = to_intel_display(state);
1531 struct intel_crtc_state *crtc_state =
1532 intel_atomic_get_new_crtc_state(state, crtc);
1533 const struct intel_limit *limit;
1534 int refclk = 96000;
1535
1536 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
1537 if (intel_panel_use_ssc(display)) {
1538 refclk = display->vbt.lvds_ssc_freq;
1539 drm_dbg_kms(display->drm,
1540 "using SSC reference clock of %d kHz\n",
1541 refclk);
1542 }
1543
1544 if (intel_is_dual_link_lvds(display))
1545 limit = &intel_limits_g4x_dual_channel_lvds;
1546 else
1547 limit = &intel_limits_g4x_single_channel_lvds;
1548 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
1549 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
1550 limit = &intel_limits_g4x_hdmi;
1551 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) {
1552 limit = &intel_limits_g4x_sdvo;
1553 } else {
1554 /* The option is for other outputs */
1555 limit = &intel_limits_i9xx_sdvo;
1556 }
1557
1558 if (!crtc_state->clock_set &&
1559 !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
1560 refclk, NULL, &crtc_state->dpll))
1561 return -EINVAL;
1562
1563 i9xx_calc_dpll_params(refclk, &crtc_state->dpll);
1564
1565 i9xx_compute_dpll(crtc_state, &crtc_state->dpll,
1566 &crtc_state->dpll);
1567
1568 crtc_state->port_clock = crtc_state->dpll.dot;
1569 /* FIXME this is a mess */
1570 if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_TVOUT))
1571 crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
1572
1573 return 0;
1574 }
1575
pnv_crtc_compute_clock(struct intel_atomic_state * state,struct intel_crtc * crtc)1576 static int pnv_crtc_compute_clock(struct intel_atomic_state *state,
1577 struct intel_crtc *crtc)
1578 {
1579 struct intel_display *display = to_intel_display(state);
1580 struct intel_crtc_state *crtc_state =
1581 intel_atomic_get_new_crtc_state(state, crtc);
1582 const struct intel_limit *limit;
1583 int refclk = 96000;
1584
1585 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
1586 if (intel_panel_use_ssc(display)) {
1587 refclk = display->vbt.lvds_ssc_freq;
1588 drm_dbg_kms(display->drm,
1589 "using SSC reference clock of %d kHz\n",
1590 refclk);
1591 }
1592
1593 limit = &pnv_limits_lvds;
1594 } else {
1595 limit = &pnv_limits_sdvo;
1596 }
1597
1598 if (!crtc_state->clock_set &&
1599 !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
1600 refclk, NULL, &crtc_state->dpll))
1601 return -EINVAL;
1602
1603 pnv_calc_dpll_params(refclk, &crtc_state->dpll);
1604
1605 i9xx_compute_dpll(crtc_state, &crtc_state->dpll,
1606 &crtc_state->dpll);
1607
1608 crtc_state->port_clock = crtc_state->dpll.dot;
1609 crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
1610
1611 return 0;
1612 }
1613
i9xx_crtc_compute_clock(struct intel_atomic_state * state,struct intel_crtc * crtc)1614 static int i9xx_crtc_compute_clock(struct intel_atomic_state *state,
1615 struct intel_crtc *crtc)
1616 {
1617 struct intel_display *display = to_intel_display(state);
1618 struct intel_crtc_state *crtc_state =
1619 intel_atomic_get_new_crtc_state(state, crtc);
1620 const struct intel_limit *limit;
1621 int refclk = 96000;
1622
1623 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
1624 if (intel_panel_use_ssc(display)) {
1625 refclk = display->vbt.lvds_ssc_freq;
1626 drm_dbg_kms(display->drm,
1627 "using SSC reference clock of %d kHz\n",
1628 refclk);
1629 }
1630
1631 limit = &intel_limits_i9xx_lvds;
1632 } else {
1633 limit = &intel_limits_i9xx_sdvo;
1634 }
1635
1636 if (!crtc_state->clock_set &&
1637 !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
1638 refclk, NULL, &crtc_state->dpll))
1639 return -EINVAL;
1640
1641 i9xx_calc_dpll_params(refclk, &crtc_state->dpll);
1642
1643 i9xx_compute_dpll(crtc_state, &crtc_state->dpll,
1644 &crtc_state->dpll);
1645
1646 crtc_state->port_clock = crtc_state->dpll.dot;
1647 /* FIXME this is a mess */
1648 if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_TVOUT))
1649 crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
1650
1651 return 0;
1652 }
1653
i8xx_crtc_compute_clock(struct intel_atomic_state * state,struct intel_crtc * crtc)1654 static int i8xx_crtc_compute_clock(struct intel_atomic_state *state,
1655 struct intel_crtc *crtc)
1656 {
1657 struct intel_display *display = to_intel_display(state);
1658 struct intel_crtc_state *crtc_state =
1659 intel_atomic_get_new_crtc_state(state, crtc);
1660 const struct intel_limit *limit;
1661 int refclk = 48000;
1662
1663 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
1664 if (intel_panel_use_ssc(display)) {
1665 refclk = display->vbt.lvds_ssc_freq;
1666 drm_dbg_kms(display->drm,
1667 "using SSC reference clock of %d kHz\n",
1668 refclk);
1669 }
1670
1671 limit = &intel_limits_i8xx_lvds;
1672 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) {
1673 limit = &intel_limits_i8xx_dvo;
1674 } else {
1675 limit = &intel_limits_i8xx_dac;
1676 }
1677
1678 if (!crtc_state->clock_set &&
1679 !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
1680 refclk, NULL, &crtc_state->dpll))
1681 return -EINVAL;
1682
1683 i9xx_calc_dpll_params(refclk, &crtc_state->dpll);
1684
1685 i8xx_compute_dpll(crtc_state, &crtc_state->dpll,
1686 &crtc_state->dpll);
1687
1688 crtc_state->port_clock = crtc_state->dpll.dot;
1689 crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
1690
1691 return 0;
1692 }
1693
1694 static const struct intel_dpll_global_funcs mtl_dpll_funcs = {
1695 .crtc_compute_clock = mtl_crtc_compute_clock,
1696 };
1697
1698 static const struct intel_dpll_global_funcs dg2_dpll_funcs = {
1699 .crtc_compute_clock = dg2_crtc_compute_clock,
1700 };
1701
1702 static const struct intel_dpll_global_funcs hsw_dpll_funcs = {
1703 .crtc_compute_clock = hsw_crtc_compute_clock,
1704 .crtc_get_dpll = hsw_crtc_get_dpll,
1705 };
1706
1707 static const struct intel_dpll_global_funcs ilk_dpll_funcs = {
1708 .crtc_compute_clock = ilk_crtc_compute_clock,
1709 .crtc_get_dpll = ilk_crtc_get_dpll,
1710 };
1711
1712 static const struct intel_dpll_global_funcs chv_dpll_funcs = {
1713 .crtc_compute_clock = chv_crtc_compute_clock,
1714 };
1715
1716 static const struct intel_dpll_global_funcs vlv_dpll_funcs = {
1717 .crtc_compute_clock = vlv_crtc_compute_clock,
1718 };
1719
1720 static const struct intel_dpll_global_funcs g4x_dpll_funcs = {
1721 .crtc_compute_clock = g4x_crtc_compute_clock,
1722 };
1723
1724 static const struct intel_dpll_global_funcs pnv_dpll_funcs = {
1725 .crtc_compute_clock = pnv_crtc_compute_clock,
1726 };
1727
1728 static const struct intel_dpll_global_funcs i9xx_dpll_funcs = {
1729 .crtc_compute_clock = i9xx_crtc_compute_clock,
1730 };
1731
1732 static const struct intel_dpll_global_funcs i8xx_dpll_funcs = {
1733 .crtc_compute_clock = i8xx_crtc_compute_clock,
1734 };
1735
intel_dpll_crtc_compute_clock(struct intel_atomic_state * state,struct intel_crtc * crtc)1736 int intel_dpll_crtc_compute_clock(struct intel_atomic_state *state,
1737 struct intel_crtc *crtc)
1738 {
1739 struct intel_display *display = to_intel_display(state);
1740 struct intel_crtc_state *crtc_state =
1741 intel_atomic_get_new_crtc_state(state, crtc);
1742 int ret;
1743
1744 drm_WARN_ON(display->drm, !intel_crtc_needs_modeset(crtc_state));
1745
1746 memset(&crtc_state->dpll_hw_state, 0,
1747 sizeof(crtc_state->dpll_hw_state));
1748
1749 if (!crtc_state->hw.enable)
1750 return 0;
1751
1752 ret = display->funcs.dpll->crtc_compute_clock(state, crtc);
1753 if (ret) {
1754 drm_dbg_kms(display->drm, "[CRTC:%d:%s] Couldn't calculate DPLL settings\n",
1755 crtc->base.base.id, crtc->base.name);
1756 return ret;
1757 }
1758
1759 return 0;
1760 }
1761
intel_dpll_crtc_get_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc)1762 int intel_dpll_crtc_get_dpll(struct intel_atomic_state *state,
1763 struct intel_crtc *crtc)
1764 {
1765 struct intel_display *display = to_intel_display(state);
1766 struct intel_crtc_state *crtc_state =
1767 intel_atomic_get_new_crtc_state(state, crtc);
1768 int ret;
1769
1770 drm_WARN_ON(display->drm, !intel_crtc_needs_modeset(crtc_state));
1771 drm_WARN_ON(display->drm, !crtc_state->hw.enable && crtc_state->intel_dpll);
1772
1773 if (!crtc_state->hw.enable || crtc_state->intel_dpll)
1774 return 0;
1775
1776 if (!display->funcs.dpll->crtc_get_dpll)
1777 return 0;
1778
1779 ret = display->funcs.dpll->crtc_get_dpll(state, crtc);
1780 if (ret) {
1781 drm_dbg_kms(display->drm, "[CRTC:%d:%s] Couldn't get a shared DPLL\n",
1782 crtc->base.base.id, crtc->base.name);
1783 return ret;
1784 }
1785
1786 return 0;
1787 }
1788
1789 void
intel_dpll_init_clock_hook(struct intel_display * display)1790 intel_dpll_init_clock_hook(struct intel_display *display)
1791 {
1792 if (DISPLAY_VER(display) >= 14)
1793 display->funcs.dpll = &mtl_dpll_funcs;
1794 else if (display->platform.dg2)
1795 display->funcs.dpll = &dg2_dpll_funcs;
1796 else if (DISPLAY_VER(display) >= 9 || HAS_DDI(display))
1797 display->funcs.dpll = &hsw_dpll_funcs;
1798 else if (HAS_PCH_SPLIT(display))
1799 display->funcs.dpll = &ilk_dpll_funcs;
1800 else if (display->platform.cherryview)
1801 display->funcs.dpll = &chv_dpll_funcs;
1802 else if (display->platform.valleyview)
1803 display->funcs.dpll = &vlv_dpll_funcs;
1804 else if (display->platform.g4x)
1805 display->funcs.dpll = &g4x_dpll_funcs;
1806 else if (display->platform.pineview)
1807 display->funcs.dpll = &pnv_dpll_funcs;
1808 else if (DISPLAY_VER(display) != 2)
1809 display->funcs.dpll = &i9xx_dpll_funcs;
1810 else
1811 display->funcs.dpll = &i8xx_dpll_funcs;
1812 }
1813
i9xx_has_pps(struct intel_display * display)1814 static bool i9xx_has_pps(struct intel_display *display)
1815 {
1816 if (display->platform.i830)
1817 return false;
1818
1819 return display->platform.pineview || display->platform.mobile;
1820 }
1821
i9xx_enable_pll(const struct intel_crtc_state * crtc_state)1822 void i9xx_enable_pll(const struct intel_crtc_state *crtc_state)
1823 {
1824 struct intel_display *display = to_intel_display(crtc_state);
1825 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1826 const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
1827 enum pipe pipe = crtc->pipe;
1828 int i;
1829
1830 assert_transcoder_disabled(display, crtc_state->cpu_transcoder);
1831
1832 /* PLL is protected by panel, make sure we can write it */
1833 if (i9xx_has_pps(display))
1834 assert_pps_unlocked(display, pipe);
1835
1836 intel_de_write(display, FP0(pipe), hw_state->fp0);
1837 intel_de_write(display, FP1(pipe), hw_state->fp1);
1838
1839 /*
1840 * Apparently we need to have VGA mode enabled prior to changing
1841 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
1842 * dividers, even though the register value does change.
1843 */
1844 intel_de_write(display, DPLL(display, pipe),
1845 hw_state->dpll & ~DPLL_VGA_MODE_DIS);
1846 intel_de_write(display, DPLL(display, pipe), hw_state->dpll);
1847
1848 /* Wait for the clocks to stabilize. */
1849 intel_de_posting_read(display, DPLL(display, pipe));
1850 udelay(150);
1851
1852 if (DISPLAY_VER(display) >= 4) {
1853 intel_de_write(display, DPLL_MD(display, pipe),
1854 hw_state->dpll_md);
1855 } else {
1856 /* The pixel multiplier can only be updated once the
1857 * DPLL is enabled and the clocks are stable.
1858 *
1859 * So write it again.
1860 */
1861 intel_de_write(display, DPLL(display, pipe), hw_state->dpll);
1862 }
1863
1864 /* We do this three times for luck */
1865 for (i = 0; i < 3; i++) {
1866 intel_de_write(display, DPLL(display, pipe), hw_state->dpll);
1867 intel_de_posting_read(display, DPLL(display, pipe));
1868 udelay(150); /* wait for warmup */
1869 }
1870 }
1871
vlv_pllb_recal_opamp(struct intel_display * display,enum dpio_phy phy,enum dpio_channel ch)1872 static void vlv_pllb_recal_opamp(struct intel_display *display,
1873 enum dpio_phy phy, enum dpio_channel ch)
1874 {
1875 u32 tmp;
1876
1877 /*
1878 * PLLB opamp always calibrates to max value of 0x3f, force enable it
1879 * and set it to a reasonable value instead.
1880 */
1881 tmp = vlv_dpio_read(display->drm, phy, VLV_PLL_DW17(ch));
1882 tmp &= 0xffffff00;
1883 tmp |= 0x00000030;
1884 vlv_dpio_write(display->drm, phy, VLV_PLL_DW17(ch), tmp);
1885
1886 tmp = vlv_dpio_read(display->drm, phy, VLV_REF_DW11);
1887 tmp &= 0x00ffffff;
1888 tmp |= 0x8c000000;
1889 vlv_dpio_write(display->drm, phy, VLV_REF_DW11, tmp);
1890
1891 tmp = vlv_dpio_read(display->drm, phy, VLV_PLL_DW17(ch));
1892 tmp &= 0xffffff00;
1893 vlv_dpio_write(display->drm, phy, VLV_PLL_DW17(ch), tmp);
1894
1895 tmp = vlv_dpio_read(display->drm, phy, VLV_REF_DW11);
1896 tmp &= 0x00ffffff;
1897 tmp |= 0xb0000000;
1898 vlv_dpio_write(display->drm, phy, VLV_REF_DW11, tmp);
1899 }
1900
vlv_prepare_pll(const struct intel_crtc_state * crtc_state)1901 static void vlv_prepare_pll(const struct intel_crtc_state *crtc_state)
1902 {
1903 struct intel_display *display = to_intel_display(crtc_state);
1904 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1905 const struct dpll *clock = &crtc_state->dpll;
1906 enum dpio_channel ch = vlv_pipe_to_channel(crtc->pipe);
1907 enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
1908 enum pipe pipe = crtc->pipe;
1909 u32 tmp, coreclk;
1910
1911 vlv_dpio_get(display->drm);
1912
1913 /* See eDP HDMI DPIO driver vbios notes doc */
1914
1915 /* PLL B needs special handling */
1916 if (pipe == PIPE_B)
1917 vlv_pllb_recal_opamp(display, phy, ch);
1918
1919 /* Set up Tx target for periodic Rcomp update */
1920 vlv_dpio_write(display->drm, phy, VLV_PCS_DW17_BCAST, 0x0100000f);
1921
1922 /* Disable target IRef on PLL */
1923 tmp = vlv_dpio_read(display->drm, phy, VLV_PLL_DW16(ch));
1924 tmp &= 0x00ffffff;
1925 vlv_dpio_write(display->drm, phy, VLV_PLL_DW16(ch), tmp);
1926
1927 /* Disable fast lock */
1928 vlv_dpio_write(display->drm, phy, VLV_CMN_DW0, 0x610);
1929
1930 /* Set idtafcrecal before PLL is enabled */
1931 tmp = DPIO_M1_DIV(clock->m1) |
1932 DPIO_M2_DIV(clock->m2) |
1933 DPIO_P1_DIV(clock->p1) |
1934 DPIO_P2_DIV(clock->p2) |
1935 DPIO_N_DIV(clock->n) |
1936 DPIO_K_DIV(1);
1937
1938 /*
1939 * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
1940 * but we don't support that).
1941 * Note: don't use the DAC post divider as it seems unstable.
1942 */
1943 tmp |= DPIO_S1_DIV(DPIO_S1_DIV_HDMIDP);
1944 vlv_dpio_write(display->drm, phy, VLV_PLL_DW3(ch), tmp);
1945
1946 tmp |= DPIO_ENABLE_CALIBRATION;
1947 vlv_dpio_write(display->drm, phy, VLV_PLL_DW3(ch), tmp);
1948
1949 /* Set HBR and RBR LPF coefficients */
1950 if (crtc_state->port_clock == 162000 ||
1951 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG) ||
1952 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1953 vlv_dpio_write(display->drm, phy, VLV_PLL_DW18(ch), 0x009f0003);
1954 else
1955 vlv_dpio_write(display->drm, phy, VLV_PLL_DW18(ch), 0x00d0000f);
1956
1957 if (intel_crtc_has_dp_encoder(crtc_state)) {
1958 /* Use SSC source */
1959 if (pipe == PIPE_A)
1960 vlv_dpio_write(display->drm, phy, VLV_PLL_DW5(ch), 0x0df40000);
1961 else
1962 vlv_dpio_write(display->drm, phy, VLV_PLL_DW5(ch), 0x0df70000);
1963 } else { /* HDMI or VGA */
1964 /* Use bend source */
1965 if (pipe == PIPE_A)
1966 vlv_dpio_write(display->drm, phy, VLV_PLL_DW5(ch), 0x0df70000);
1967 else
1968 vlv_dpio_write(display->drm, phy, VLV_PLL_DW5(ch), 0x0df40000);
1969 }
1970
1971 coreclk = vlv_dpio_read(display->drm, phy, VLV_PLL_DW7(ch));
1972 coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
1973 if (intel_crtc_has_dp_encoder(crtc_state))
1974 coreclk |= 0x01000000;
1975 vlv_dpio_write(display->drm, phy, VLV_PLL_DW7(ch), coreclk);
1976
1977 vlv_dpio_write(display->drm, phy, VLV_PLL_DW19(ch), 0x87871000);
1978
1979 vlv_dpio_put(display->drm);
1980 }
1981
_vlv_enable_pll(const struct intel_crtc_state * crtc_state)1982 static void _vlv_enable_pll(const struct intel_crtc_state *crtc_state)
1983 {
1984 struct intel_display *display = to_intel_display(crtc_state);
1985 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1986 const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
1987 enum pipe pipe = crtc->pipe;
1988
1989 intel_de_write(display, DPLL(display, pipe), hw_state->dpll);
1990 intel_de_posting_read(display, DPLL(display, pipe));
1991 udelay(150);
1992
1993 if (intel_de_wait_for_set(display, DPLL(display, pipe), DPLL_LOCK_VLV, 1))
1994 drm_err(display->drm, "DPLL %d failed to lock\n", pipe);
1995 }
1996
vlv_enable_pll(const struct intel_crtc_state * crtc_state)1997 void vlv_enable_pll(const struct intel_crtc_state *crtc_state)
1998 {
1999 struct intel_display *display = to_intel_display(crtc_state);
2000 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2001 const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
2002 enum pipe pipe = crtc->pipe;
2003
2004 assert_transcoder_disabled(display, crtc_state->cpu_transcoder);
2005
2006 /* PLL is protected by panel, make sure we can write it */
2007 assert_pps_unlocked(display, pipe);
2008
2009 /* Enable Refclk */
2010 intel_de_write(display, DPLL(display, pipe),
2011 hw_state->dpll & ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
2012
2013 if (hw_state->dpll & DPLL_VCO_ENABLE) {
2014 vlv_prepare_pll(crtc_state);
2015 _vlv_enable_pll(crtc_state);
2016 }
2017
2018 intel_de_write(display, DPLL_MD(display, pipe), hw_state->dpll_md);
2019 intel_de_posting_read(display, DPLL_MD(display, pipe));
2020 }
2021
chv_prepare_pll(const struct intel_crtc_state * crtc_state)2022 static void chv_prepare_pll(const struct intel_crtc_state *crtc_state)
2023 {
2024 struct intel_display *display = to_intel_display(crtc_state);
2025 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2026 const struct dpll *clock = &crtc_state->dpll;
2027 enum dpio_channel ch = vlv_pipe_to_channel(crtc->pipe);
2028 enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
2029 u32 tmp, loopfilter, tribuf_calcntr;
2030 u32 m2_frac;
2031
2032 m2_frac = clock->m2 & 0x3fffff;
2033
2034 vlv_dpio_get(display->drm);
2035
2036 /* p1 and p2 divider */
2037 vlv_dpio_write(display->drm, phy, CHV_CMN_DW13(ch),
2038 DPIO_CHV_S1_DIV(5) |
2039 DPIO_CHV_P1_DIV(clock->p1) |
2040 DPIO_CHV_P2_DIV(clock->p2) |
2041 DPIO_CHV_K_DIV(1));
2042
2043 /* Feedback post-divider - m2 */
2044 vlv_dpio_write(display->drm, phy, CHV_PLL_DW0(ch),
2045 DPIO_CHV_M2_DIV(clock->m2 >> 22));
2046
2047 /* Feedback refclk divider - n and m1 */
2048 vlv_dpio_write(display->drm, phy, CHV_PLL_DW1(ch),
2049 DPIO_CHV_M1_DIV(DPIO_CHV_M1_DIV_BY_2) |
2050 DPIO_CHV_N_DIV(1));
2051
2052 /* M2 fraction division */
2053 vlv_dpio_write(display->drm, phy, CHV_PLL_DW2(ch),
2054 DPIO_CHV_M2_FRAC_DIV(m2_frac));
2055
2056 /* M2 fraction division enable */
2057 tmp = vlv_dpio_read(display->drm, phy, CHV_PLL_DW3(ch));
2058 tmp &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
2059 tmp |= DPIO_CHV_FEEDFWD_GAIN(2);
2060 if (m2_frac)
2061 tmp |= DPIO_CHV_FRAC_DIV_EN;
2062 vlv_dpio_write(display->drm, phy, CHV_PLL_DW3(ch), tmp);
2063
2064 /* Program digital lock detect threshold */
2065 tmp = vlv_dpio_read(display->drm, phy, CHV_PLL_DW9(ch));
2066 tmp &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
2067 DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
2068 tmp |= DPIO_CHV_INT_LOCK_THRESHOLD(0x5);
2069 if (!m2_frac)
2070 tmp |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
2071 vlv_dpio_write(display->drm, phy, CHV_PLL_DW9(ch), tmp);
2072
2073 /* Loop filter */
2074 if (clock->vco == 5400000) {
2075 loopfilter = DPIO_CHV_PROP_COEFF(0x3) |
2076 DPIO_CHV_INT_COEFF(0x8) |
2077 DPIO_CHV_GAIN_CTRL(0x1);
2078 tribuf_calcntr = 0x9;
2079 } else if (clock->vco <= 6200000) {
2080 loopfilter = DPIO_CHV_PROP_COEFF(0x5) |
2081 DPIO_CHV_INT_COEFF(0xB) |
2082 DPIO_CHV_GAIN_CTRL(0x3);
2083 tribuf_calcntr = 0x9;
2084 } else if (clock->vco <= 6480000) {
2085 loopfilter = DPIO_CHV_PROP_COEFF(0x4) |
2086 DPIO_CHV_INT_COEFF(0x9) |
2087 DPIO_CHV_GAIN_CTRL(0x3);
2088 tribuf_calcntr = 0x8;
2089 } else {
2090 /* Not supported. Apply the same limits as in the max case */
2091 loopfilter = DPIO_CHV_PROP_COEFF(0x4) |
2092 DPIO_CHV_INT_COEFF(0x9) |
2093 DPIO_CHV_GAIN_CTRL(0x3);
2094 tribuf_calcntr = 0;
2095 }
2096 vlv_dpio_write(display->drm, phy, CHV_PLL_DW6(ch), loopfilter);
2097
2098 tmp = vlv_dpio_read(display->drm, phy, CHV_PLL_DW8(ch));
2099 tmp &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
2100 tmp |= DPIO_CHV_TDC_TARGET_CNT(tribuf_calcntr);
2101 vlv_dpio_write(display->drm, phy, CHV_PLL_DW8(ch), tmp);
2102
2103 /* AFC Recal */
2104 vlv_dpio_write(display->drm, phy, CHV_CMN_DW14(ch),
2105 vlv_dpio_read(display->drm, phy, CHV_CMN_DW14(ch)) |
2106 DPIO_AFC_RECAL);
2107
2108 vlv_dpio_put(display->drm);
2109 }
2110
_chv_enable_pll(const struct intel_crtc_state * crtc_state)2111 static void _chv_enable_pll(const struct intel_crtc_state *crtc_state)
2112 {
2113 struct intel_display *display = to_intel_display(crtc_state);
2114 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2115 const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
2116 enum dpio_channel ch = vlv_pipe_to_channel(crtc->pipe);
2117 enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
2118 enum pipe pipe = crtc->pipe;
2119 u32 tmp;
2120
2121 vlv_dpio_get(display->drm);
2122
2123 /* Enable back the 10bit clock to display controller */
2124 tmp = vlv_dpio_read(display->drm, phy, CHV_CMN_DW14(ch));
2125 tmp |= DPIO_DCLKP_EN;
2126 vlv_dpio_write(display->drm, phy, CHV_CMN_DW14(ch), tmp);
2127
2128 vlv_dpio_put(display->drm);
2129
2130 /*
2131 * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
2132 */
2133 udelay(1);
2134
2135 /* Enable PLL */
2136 intel_de_write(display, DPLL(display, pipe), hw_state->dpll);
2137
2138 /* Check PLL is locked */
2139 if (intel_de_wait_for_set(display, DPLL(display, pipe), DPLL_LOCK_VLV, 1))
2140 drm_err(display->drm, "PLL %d failed to lock\n", pipe);
2141 }
2142
chv_enable_pll(const struct intel_crtc_state * crtc_state)2143 void chv_enable_pll(const struct intel_crtc_state *crtc_state)
2144 {
2145 struct intel_display *display = to_intel_display(crtc_state);
2146 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2147 const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
2148 enum pipe pipe = crtc->pipe;
2149
2150 assert_transcoder_disabled(display, crtc_state->cpu_transcoder);
2151
2152 /* PLL is protected by panel, make sure we can write it */
2153 assert_pps_unlocked(display, pipe);
2154
2155 /* Enable Refclk and SSC */
2156 intel_de_write(display, DPLL(display, pipe),
2157 hw_state->dpll & ~DPLL_VCO_ENABLE);
2158
2159 if (hw_state->dpll & DPLL_VCO_ENABLE) {
2160 chv_prepare_pll(crtc_state);
2161 _chv_enable_pll(crtc_state);
2162 }
2163
2164 if (pipe != PIPE_A) {
2165 /*
2166 * WaPixelRepeatModeFixForC0:chv
2167 *
2168 * DPLLCMD is AWOL. Use chicken bits to propagate
2169 * the value from DPLLBMD to either pipe B or C.
2170 */
2171 intel_de_write(display, CBR4_VLV, CBR_DPLLBMD_PIPE(pipe));
2172 intel_de_write(display, DPLL_MD(display, PIPE_B),
2173 hw_state->dpll_md);
2174 intel_de_write(display, CBR4_VLV, 0);
2175 display->state.chv_dpll_md[pipe] = hw_state->dpll_md;
2176
2177 /*
2178 * DPLLB VGA mode also seems to cause problems.
2179 * We should always have it disabled.
2180 */
2181 drm_WARN_ON(display->drm,
2182 (intel_de_read(display, DPLL(display, PIPE_B)) &
2183 DPLL_VGA_MODE_DIS) == 0);
2184 } else {
2185 intel_de_write(display, DPLL_MD(display, pipe),
2186 hw_state->dpll_md);
2187 intel_de_posting_read(display, DPLL_MD(display, pipe));
2188 }
2189 }
2190
2191 /**
2192 * vlv_force_pll_on - forcibly enable just the PLL
2193 * @display: display device
2194 * @pipe: pipe PLL to enable
2195 * @dpll: PLL configuration
2196 *
2197 * Enable the PLL for @pipe using the supplied @dpll config. To be used
2198 * in cases where we need the PLL enabled even when @pipe is not going to
2199 * be enabled.
2200 */
vlv_force_pll_on(struct intel_display * display,enum pipe pipe,const struct dpll * dpll)2201 int vlv_force_pll_on(struct intel_display *display, enum pipe pipe,
2202 const struct dpll *dpll)
2203 {
2204 struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe);
2205 struct intel_crtc_state *crtc_state;
2206
2207 crtc_state = intel_crtc_state_alloc(crtc);
2208 if (!crtc_state)
2209 return -ENOMEM;
2210
2211 crtc_state->cpu_transcoder = (enum transcoder)pipe;
2212 crtc_state->pixel_multiplier = 1;
2213 crtc_state->dpll = *dpll;
2214 crtc_state->output_types = BIT(INTEL_OUTPUT_EDP);
2215
2216 if (display->platform.cherryview) {
2217 chv_compute_dpll(crtc_state);
2218 chv_enable_pll(crtc_state);
2219 } else {
2220 vlv_compute_dpll(crtc_state);
2221 vlv_enable_pll(crtc_state);
2222 }
2223
2224 intel_crtc_destroy_state(&crtc->base, &crtc_state->uapi);
2225
2226 return 0;
2227 }
2228
vlv_disable_pll(struct intel_display * display,enum pipe pipe)2229 void vlv_disable_pll(struct intel_display *display, enum pipe pipe)
2230 {
2231 u32 val;
2232
2233 /* Make sure the pipe isn't still relying on us */
2234 assert_transcoder_disabled(display, (enum transcoder)pipe);
2235
2236 val = DPLL_INTEGRATED_REF_CLK_VLV |
2237 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
2238 if (pipe != PIPE_A)
2239 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
2240
2241 intel_de_write(display, DPLL(display, pipe), val);
2242 intel_de_posting_read(display, DPLL(display, pipe));
2243 }
2244
chv_disable_pll(struct intel_display * display,enum pipe pipe)2245 void chv_disable_pll(struct intel_display *display, enum pipe pipe)
2246 {
2247 enum dpio_channel ch = vlv_pipe_to_channel(pipe);
2248 enum dpio_phy phy = vlv_pipe_to_phy(pipe);
2249 u32 val;
2250
2251 /* Make sure the pipe isn't still relying on us */
2252 assert_transcoder_disabled(display, (enum transcoder)pipe);
2253
2254 val = DPLL_SSC_REF_CLK_CHV |
2255 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
2256 if (pipe != PIPE_A)
2257 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
2258
2259 intel_de_write(display, DPLL(display, pipe), val);
2260 intel_de_posting_read(display, DPLL(display, pipe));
2261
2262 vlv_dpio_get(display->drm);
2263
2264 /* Disable 10bit clock to display controller */
2265 val = vlv_dpio_read(display->drm, phy, CHV_CMN_DW14(ch));
2266 val &= ~DPIO_DCLKP_EN;
2267 vlv_dpio_write(display->drm, phy, CHV_CMN_DW14(ch), val);
2268
2269 vlv_dpio_put(display->drm);
2270 }
2271
i9xx_disable_pll(const struct intel_crtc_state * crtc_state)2272 void i9xx_disable_pll(const struct intel_crtc_state *crtc_state)
2273 {
2274 struct intel_display *display = to_intel_display(crtc_state);
2275 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2276 enum pipe pipe = crtc->pipe;
2277
2278 /* Don't disable pipe or pipe PLLs if needed */
2279 if (display->platform.i830)
2280 return;
2281
2282 /* Make sure the pipe isn't still relying on us */
2283 assert_transcoder_disabled(display, crtc_state->cpu_transcoder);
2284
2285 intel_de_write(display, DPLL(display, pipe), DPLL_VGA_MODE_DIS);
2286 intel_de_posting_read(display, DPLL(display, pipe));
2287 }
2288
2289
2290 /**
2291 * vlv_force_pll_off - forcibly disable just the PLL
2292 * @display: display device
2293 * @pipe: pipe PLL to disable
2294 *
2295 * Disable the PLL for @pipe. To be used in cases where we need
2296 * the PLL enabled even when @pipe is not going to be enabled.
2297 */
vlv_force_pll_off(struct intel_display * display,enum pipe pipe)2298 void vlv_force_pll_off(struct intel_display *display, enum pipe pipe)
2299 {
2300 if (display->platform.cherryview)
2301 chv_disable_pll(display, pipe);
2302 else
2303 vlv_disable_pll(display, pipe);
2304 }
2305
2306 /* Only for pre-ILK configs */
assert_pll(struct intel_display * display,enum pipe pipe,bool state)2307 static void assert_pll(struct intel_display *display,
2308 enum pipe pipe, bool state)
2309 {
2310 bool cur_state;
2311
2312 cur_state = intel_de_read(display, DPLL(display, pipe)) & DPLL_VCO_ENABLE;
2313 INTEL_DISPLAY_STATE_WARN(display, cur_state != state,
2314 "PLL state assertion failure (expected %s, current %s)\n",
2315 str_on_off(state), str_on_off(cur_state));
2316 }
2317
assert_pll_enabled(struct intel_display * display,enum pipe pipe)2318 void assert_pll_enabled(struct intel_display *display, enum pipe pipe)
2319 {
2320 assert_pll(display, pipe, true);
2321 }
2322
assert_pll_disabled(struct intel_display * display,enum pipe pipe)2323 void assert_pll_disabled(struct intel_display *display, enum pipe pipe)
2324 {
2325 assert_pll(display, pipe, false);
2326 }
2327