1 /*
2 * Copyright © 2006-2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24 #include <linux/math.h>
25 #include <linux/string_helpers.h>
26
27 #include "bxt_dpio_phy_regs.h"
28 #include "i915_drv.h"
29 #include "i915_reg.h"
30 #include "intel_cx0_phy.h"
31 #include "intel_de.h"
32 #include "intel_display_types.h"
33 #include "intel_dkl_phy.h"
34 #include "intel_dkl_phy_regs.h"
35 #include "intel_dpio_phy.h"
36 #include "intel_dpll.h"
37 #include "intel_dpll_mgr.h"
38 #include "intel_hti.h"
39 #include "intel_mg_phy_regs.h"
40 #include "intel_pch_refclk.h"
41 #include "intel_tc.h"
42
43 /**
44 * DOC: Display PLLs
45 *
46 * Display PLLs used for driving outputs vary by platform. While some have
47 * per-pipe or per-encoder dedicated PLLs, others allow the use of any PLL
48 * from a pool. In the latter scenario, it is possible that multiple pipes
49 * share a PLL if their configurations match.
50 *
51 * This file provides an abstraction over display PLLs. The function
52 * intel_shared_dpll_init() initializes the PLLs for the given platform. The
53 * users of a PLL are tracked and that tracking is integrated with the atomic
54 * modset interface. During an atomic operation, required PLLs can be reserved
55 * for a given CRTC and encoder configuration by calling
56 * intel_reserve_shared_dplls() and previously reserved PLLs can be released
57 * with intel_release_shared_dplls().
58 * Changes to the users are first staged in the atomic state, and then made
59 * effective by calling intel_shared_dpll_swap_state() during the atomic
60 * commit phase.
61 */
62
63 /* platform specific hooks for managing DPLLs */
64 struct intel_shared_dpll_funcs {
65 /*
66 * Hook for enabling the pll, called from intel_enable_shared_dpll() if
67 * the pll is not already enabled.
68 */
69 void (*enable)(struct intel_display *display,
70 struct intel_shared_dpll *pll,
71 const struct intel_dpll_hw_state *dpll_hw_state);
72
73 /*
74 * Hook for disabling the pll, called from intel_disable_shared_dpll()
75 * only when it is safe to disable the pll, i.e., there are no more
76 * tracked users for it.
77 */
78 void (*disable)(struct intel_display *display,
79 struct intel_shared_dpll *pll);
80
81 /*
82 * Hook for reading the values currently programmed to the DPLL
83 * registers. This is used for initial hw state readout and state
84 * verification after a mode set.
85 */
86 bool (*get_hw_state)(struct intel_display *display,
87 struct intel_shared_dpll *pll,
88 struct intel_dpll_hw_state *dpll_hw_state);
89
90 /*
91 * Hook for calculating the pll's output frequency based on its passed
92 * in state.
93 */
94 int (*get_freq)(struct intel_display *i915,
95 const struct intel_shared_dpll *pll,
96 const struct intel_dpll_hw_state *dpll_hw_state);
97 };
98
99 struct intel_dpll_mgr {
100 const struct dpll_info *dpll_info;
101
102 int (*compute_dplls)(struct intel_atomic_state *state,
103 struct intel_crtc *crtc,
104 struct intel_encoder *encoder);
105 int (*get_dplls)(struct intel_atomic_state *state,
106 struct intel_crtc *crtc,
107 struct intel_encoder *encoder);
108 void (*put_dplls)(struct intel_atomic_state *state,
109 struct intel_crtc *crtc);
110 void (*update_active_dpll)(struct intel_atomic_state *state,
111 struct intel_crtc *crtc,
112 struct intel_encoder *encoder);
113 void (*update_ref_clks)(struct intel_display *display);
114 void (*dump_hw_state)(struct drm_printer *p,
115 const struct intel_dpll_hw_state *dpll_hw_state);
116 bool (*compare_hw_state)(const struct intel_dpll_hw_state *a,
117 const struct intel_dpll_hw_state *b);
118 };
119
120 static void
intel_atomic_duplicate_dpll_state(struct intel_display * display,struct intel_shared_dpll_state * shared_dpll)121 intel_atomic_duplicate_dpll_state(struct intel_display *display,
122 struct intel_shared_dpll_state *shared_dpll)
123 {
124 struct intel_shared_dpll *pll;
125 int i;
126
127 /* Copy shared dpll state */
128 for_each_shared_dpll(display, pll, i)
129 shared_dpll[pll->index] = pll->state;
130 }
131
132 static struct intel_shared_dpll_state *
intel_atomic_get_shared_dpll_state(struct drm_atomic_state * s)133 intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
134 {
135 struct intel_atomic_state *state = to_intel_atomic_state(s);
136 struct intel_display *display = to_intel_display(state);
137
138 drm_WARN_ON(s->dev, !drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
139
140 if (!state->dpll_set) {
141 state->dpll_set = true;
142
143 intel_atomic_duplicate_dpll_state(display,
144 state->shared_dpll);
145 }
146
147 return state->shared_dpll;
148 }
149
150 /**
151 * intel_get_shared_dpll_by_id - get a DPLL given its id
152 * @display: intel_display device instance
153 * @id: pll id
154 *
155 * Returns:
156 * A pointer to the DPLL with @id
157 */
158 struct intel_shared_dpll *
intel_get_shared_dpll_by_id(struct intel_display * display,enum intel_dpll_id id)159 intel_get_shared_dpll_by_id(struct intel_display *display,
160 enum intel_dpll_id id)
161 {
162 struct intel_shared_dpll *pll;
163 int i;
164
165 for_each_shared_dpll(display, pll, i) {
166 if (pll->info->id == id)
167 return pll;
168 }
169
170 MISSING_CASE(id);
171 return NULL;
172 }
173
174 /* For ILK+ */
assert_shared_dpll(struct intel_display * display,struct intel_shared_dpll * pll,bool state)175 void assert_shared_dpll(struct intel_display *display,
176 struct intel_shared_dpll *pll,
177 bool state)
178 {
179 bool cur_state;
180 struct intel_dpll_hw_state hw_state;
181
182 if (drm_WARN(display->drm, !pll,
183 "asserting DPLL %s with no DPLL\n", str_on_off(state)))
184 return;
185
186 cur_state = intel_dpll_get_hw_state(display, pll, &hw_state);
187 INTEL_DISPLAY_STATE_WARN(display, cur_state != state,
188 "%s assertion failure (expected %s, current %s)\n",
189 pll->info->name, str_on_off(state),
190 str_on_off(cur_state));
191 }
192
icl_pll_id_to_tc_port(enum intel_dpll_id id)193 static enum tc_port icl_pll_id_to_tc_port(enum intel_dpll_id id)
194 {
195 return TC_PORT_1 + id - DPLL_ID_ICL_MGPLL1;
196 }
197
icl_tc_port_to_pll_id(enum tc_port tc_port)198 enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port)
199 {
200 return tc_port - TC_PORT_1 + DPLL_ID_ICL_MGPLL1;
201 }
202
203 static i915_reg_t
intel_combo_pll_enable_reg(struct intel_display * display,struct intel_shared_dpll * pll)204 intel_combo_pll_enable_reg(struct intel_display *display,
205 struct intel_shared_dpll *pll)
206 {
207 if (display->platform.dg1)
208 return DG1_DPLL_ENABLE(pll->info->id);
209 else if ((display->platform.jasperlake || display->platform.elkhartlake) &&
210 (pll->info->id == DPLL_ID_EHL_DPLL4))
211 return MG_PLL_ENABLE(0);
212
213 return ICL_DPLL_ENABLE(pll->info->id);
214 }
215
216 static i915_reg_t
intel_tc_pll_enable_reg(struct intel_display * display,struct intel_shared_dpll * pll)217 intel_tc_pll_enable_reg(struct intel_display *display,
218 struct intel_shared_dpll *pll)
219 {
220 const enum intel_dpll_id id = pll->info->id;
221 enum tc_port tc_port = icl_pll_id_to_tc_port(id);
222
223 if (display->platform.alderlake_p)
224 return ADLP_PORTTC_PLL_ENABLE(tc_port);
225
226 return MG_PLL_ENABLE(tc_port);
227 }
228
_intel_enable_shared_dpll(struct intel_display * display,struct intel_shared_dpll * pll)229 static void _intel_enable_shared_dpll(struct intel_display *display,
230 struct intel_shared_dpll *pll)
231 {
232 if (pll->info->power_domain)
233 pll->wakeref = intel_display_power_get(display, pll->info->power_domain);
234
235 pll->info->funcs->enable(display, pll, &pll->state.hw_state);
236 pll->on = true;
237 }
238
_intel_disable_shared_dpll(struct intel_display * display,struct intel_shared_dpll * pll)239 static void _intel_disable_shared_dpll(struct intel_display *display,
240 struct intel_shared_dpll *pll)
241 {
242 pll->info->funcs->disable(display, pll);
243 pll->on = false;
244
245 if (pll->info->power_domain)
246 intel_display_power_put(display, pll->info->power_domain, pll->wakeref);
247 }
248
249 /**
250 * intel_enable_shared_dpll - enable a CRTC's shared DPLL
251 * @crtc_state: CRTC, and its state, which has a shared DPLL
252 *
253 * Enable the shared DPLL used by @crtc.
254 */
intel_enable_shared_dpll(const struct intel_crtc_state * crtc_state)255 void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
256 {
257 struct intel_display *display = to_intel_display(crtc_state);
258 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
259 struct intel_shared_dpll *pll = crtc_state->shared_dpll;
260 unsigned int pipe_mask = BIT(crtc->pipe);
261 unsigned int old_mask;
262
263 if (drm_WARN_ON(display->drm, !pll))
264 return;
265
266 mutex_lock(&display->dpll.lock);
267 old_mask = pll->active_mask;
268
269 if (drm_WARN_ON(display->drm, !(pll->state.pipe_mask & pipe_mask)) ||
270 drm_WARN_ON(display->drm, pll->active_mask & pipe_mask))
271 goto out;
272
273 pll->active_mask |= pipe_mask;
274
275 drm_dbg_kms(display->drm,
276 "enable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
277 pll->info->name, pll->active_mask, pll->on,
278 crtc->base.base.id, crtc->base.name);
279
280 if (old_mask) {
281 drm_WARN_ON(display->drm, !pll->on);
282 assert_shared_dpll_enabled(display, pll);
283 goto out;
284 }
285 drm_WARN_ON(display->drm, pll->on);
286
287 drm_dbg_kms(display->drm, "enabling %s\n", pll->info->name);
288
289 _intel_enable_shared_dpll(display, pll);
290
291 out:
292 mutex_unlock(&display->dpll.lock);
293 }
294
295 /**
296 * intel_disable_shared_dpll - disable a CRTC's shared DPLL
297 * @crtc_state: CRTC, and its state, which has a shared DPLL
298 *
299 * Disable the shared DPLL used by @crtc.
300 */
intel_disable_shared_dpll(const struct intel_crtc_state * crtc_state)301 void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
302 {
303 struct intel_display *display = to_intel_display(crtc_state);
304 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
305 struct intel_shared_dpll *pll = crtc_state->shared_dpll;
306 unsigned int pipe_mask = BIT(crtc->pipe);
307
308 /* PCH only available on ILK+ */
309 if (DISPLAY_VER(display) < 5)
310 return;
311
312 if (pll == NULL)
313 return;
314
315 mutex_lock(&display->dpll.lock);
316 if (drm_WARN(display->drm, !(pll->active_mask & pipe_mask),
317 "%s not used by [CRTC:%d:%s]\n", pll->info->name,
318 crtc->base.base.id, crtc->base.name))
319 goto out;
320
321 drm_dbg_kms(display->drm,
322 "disable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
323 pll->info->name, pll->active_mask, pll->on,
324 crtc->base.base.id, crtc->base.name);
325
326 assert_shared_dpll_enabled(display, pll);
327 drm_WARN_ON(display->drm, !pll->on);
328
329 pll->active_mask &= ~pipe_mask;
330 if (pll->active_mask)
331 goto out;
332
333 drm_dbg_kms(display->drm, "disabling %s\n", pll->info->name);
334
335 _intel_disable_shared_dpll(display, pll);
336
337 out:
338 mutex_unlock(&display->dpll.lock);
339 }
340
341 static unsigned long
intel_dpll_mask_all(struct intel_display * display)342 intel_dpll_mask_all(struct intel_display *display)
343 {
344 struct intel_shared_dpll *pll;
345 unsigned long dpll_mask = 0;
346 int i;
347
348 for_each_shared_dpll(display, pll, i) {
349 drm_WARN_ON(display->drm, dpll_mask & BIT(pll->info->id));
350
351 dpll_mask |= BIT(pll->info->id);
352 }
353
354 return dpll_mask;
355 }
356
357 static struct intel_shared_dpll *
intel_find_shared_dpll(struct intel_atomic_state * state,const struct intel_crtc * crtc,const struct intel_dpll_hw_state * dpll_hw_state,unsigned long dpll_mask)358 intel_find_shared_dpll(struct intel_atomic_state *state,
359 const struct intel_crtc *crtc,
360 const struct intel_dpll_hw_state *dpll_hw_state,
361 unsigned long dpll_mask)
362 {
363 struct intel_display *display = to_intel_display(crtc);
364 unsigned long dpll_mask_all = intel_dpll_mask_all(display);
365 struct intel_shared_dpll_state *shared_dpll;
366 struct intel_shared_dpll *unused_pll = NULL;
367 enum intel_dpll_id id;
368
369 shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
370
371 drm_WARN_ON(display->drm, dpll_mask & ~dpll_mask_all);
372
373 for_each_set_bit(id, &dpll_mask, fls(dpll_mask_all)) {
374 struct intel_shared_dpll *pll;
375
376 pll = intel_get_shared_dpll_by_id(display, id);
377 if (!pll)
378 continue;
379
380 /* Only want to check enabled timings first */
381 if (shared_dpll[pll->index].pipe_mask == 0) {
382 if (!unused_pll)
383 unused_pll = pll;
384 continue;
385 }
386
387 if (memcmp(dpll_hw_state,
388 &shared_dpll[pll->index].hw_state,
389 sizeof(*dpll_hw_state)) == 0) {
390 drm_dbg_kms(display->drm,
391 "[CRTC:%d:%s] sharing existing %s (pipe mask 0x%x, active 0x%x)\n",
392 crtc->base.base.id, crtc->base.name,
393 pll->info->name,
394 shared_dpll[pll->index].pipe_mask,
395 pll->active_mask);
396 return pll;
397 }
398 }
399
400 /* Ok no matching timings, maybe there's a free one? */
401 if (unused_pll) {
402 drm_dbg_kms(display->drm, "[CRTC:%d:%s] allocated %s\n",
403 crtc->base.base.id, crtc->base.name,
404 unused_pll->info->name);
405 return unused_pll;
406 }
407
408 return NULL;
409 }
410
411 /**
412 * intel_reference_shared_dpll_crtc - Get a DPLL reference for a CRTC
413 * @crtc: CRTC on which behalf the reference is taken
414 * @pll: DPLL for which the reference is taken
415 * @shared_dpll_state: the DPLL atomic state in which the reference is tracked
416 *
417 * Take a reference for @pll tracking the use of it by @crtc.
418 */
419 static void
intel_reference_shared_dpll_crtc(const struct intel_crtc * crtc,const struct intel_shared_dpll * pll,struct intel_shared_dpll_state * shared_dpll_state)420 intel_reference_shared_dpll_crtc(const struct intel_crtc *crtc,
421 const struct intel_shared_dpll *pll,
422 struct intel_shared_dpll_state *shared_dpll_state)
423 {
424 struct intel_display *display = to_intel_display(crtc);
425
426 drm_WARN_ON(display->drm, (shared_dpll_state->pipe_mask & BIT(crtc->pipe)) != 0);
427
428 shared_dpll_state->pipe_mask |= BIT(crtc->pipe);
429
430 drm_dbg_kms(display->drm, "[CRTC:%d:%s] reserving %s\n",
431 crtc->base.base.id, crtc->base.name, pll->info->name);
432 }
433
434 static void
intel_reference_shared_dpll(struct intel_atomic_state * state,const struct intel_crtc * crtc,const struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * dpll_hw_state)435 intel_reference_shared_dpll(struct intel_atomic_state *state,
436 const struct intel_crtc *crtc,
437 const struct intel_shared_dpll *pll,
438 const struct intel_dpll_hw_state *dpll_hw_state)
439 {
440 struct intel_shared_dpll_state *shared_dpll;
441
442 shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
443
444 if (shared_dpll[pll->index].pipe_mask == 0)
445 shared_dpll[pll->index].hw_state = *dpll_hw_state;
446
447 intel_reference_shared_dpll_crtc(crtc, pll, &shared_dpll[pll->index]);
448 }
449
450 /**
451 * intel_unreference_shared_dpll_crtc - Drop a DPLL reference for a CRTC
452 * @crtc: CRTC on which behalf the reference is dropped
453 * @pll: DPLL for which the reference is dropped
454 * @shared_dpll_state: the DPLL atomic state in which the reference is tracked
455 *
456 * Drop a reference for @pll tracking the end of use of it by @crtc.
457 */
458 void
intel_unreference_shared_dpll_crtc(const struct intel_crtc * crtc,const struct intel_shared_dpll * pll,struct intel_shared_dpll_state * shared_dpll_state)459 intel_unreference_shared_dpll_crtc(const struct intel_crtc *crtc,
460 const struct intel_shared_dpll *pll,
461 struct intel_shared_dpll_state *shared_dpll_state)
462 {
463 struct intel_display *display = to_intel_display(crtc);
464
465 drm_WARN_ON(display->drm, (shared_dpll_state->pipe_mask & BIT(crtc->pipe)) == 0);
466
467 shared_dpll_state->pipe_mask &= ~BIT(crtc->pipe);
468
469 drm_dbg_kms(display->drm, "[CRTC:%d:%s] releasing %s\n",
470 crtc->base.base.id, crtc->base.name, pll->info->name);
471 }
472
intel_unreference_shared_dpll(struct intel_atomic_state * state,const struct intel_crtc * crtc,const struct intel_shared_dpll * pll)473 static void intel_unreference_shared_dpll(struct intel_atomic_state *state,
474 const struct intel_crtc *crtc,
475 const struct intel_shared_dpll *pll)
476 {
477 struct intel_shared_dpll_state *shared_dpll;
478
479 shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
480
481 intel_unreference_shared_dpll_crtc(crtc, pll, &shared_dpll[pll->index]);
482 }
483
intel_put_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc)484 static void intel_put_dpll(struct intel_atomic_state *state,
485 struct intel_crtc *crtc)
486 {
487 const struct intel_crtc_state *old_crtc_state =
488 intel_atomic_get_old_crtc_state(state, crtc);
489 struct intel_crtc_state *new_crtc_state =
490 intel_atomic_get_new_crtc_state(state, crtc);
491
492 new_crtc_state->shared_dpll = NULL;
493
494 if (!old_crtc_state->shared_dpll)
495 return;
496
497 intel_unreference_shared_dpll(state, crtc, old_crtc_state->shared_dpll);
498 }
499
500 /**
501 * intel_shared_dpll_swap_state - make atomic DPLL configuration effective
502 * @state: atomic state
503 *
504 * This is the dpll version of drm_atomic_helper_swap_state() since the
505 * helper does not handle driver-specific global state.
506 *
507 * For consistency with atomic helpers this function does a complete swap,
508 * i.e. it also puts the current state into @state, even though there is no
509 * need for that at this moment.
510 */
intel_shared_dpll_swap_state(struct intel_atomic_state * state)511 void intel_shared_dpll_swap_state(struct intel_atomic_state *state)
512 {
513 struct intel_display *display = to_intel_display(state);
514 struct intel_shared_dpll_state *shared_dpll = state->shared_dpll;
515 struct intel_shared_dpll *pll;
516 int i;
517
518 if (!state->dpll_set)
519 return;
520
521 for_each_shared_dpll(display, pll, i)
522 swap(pll->state, shared_dpll[pll->index]);
523 }
524
ibx_pch_dpll_get_hw_state(struct intel_display * display,struct intel_shared_dpll * pll,struct intel_dpll_hw_state * dpll_hw_state)525 static bool ibx_pch_dpll_get_hw_state(struct intel_display *display,
526 struct intel_shared_dpll *pll,
527 struct intel_dpll_hw_state *dpll_hw_state)
528 {
529 struct i9xx_dpll_hw_state *hw_state = &dpll_hw_state->i9xx;
530 const enum intel_dpll_id id = pll->info->id;
531 intel_wakeref_t wakeref;
532 u32 val;
533
534 wakeref = intel_display_power_get_if_enabled(display,
535 POWER_DOMAIN_DISPLAY_CORE);
536 if (!wakeref)
537 return false;
538
539 val = intel_de_read(display, PCH_DPLL(id));
540 hw_state->dpll = val;
541 hw_state->fp0 = intel_de_read(display, PCH_FP0(id));
542 hw_state->fp1 = intel_de_read(display, PCH_FP1(id));
543
544 intel_display_power_put(display, POWER_DOMAIN_DISPLAY_CORE, wakeref);
545
546 return val & DPLL_VCO_ENABLE;
547 }
548
ibx_assert_pch_refclk_enabled(struct intel_display * display)549 static void ibx_assert_pch_refclk_enabled(struct intel_display *display)
550 {
551 u32 val;
552 bool enabled;
553
554 val = intel_de_read(display, PCH_DREF_CONTROL);
555 enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
556 DREF_SUPERSPREAD_SOURCE_MASK));
557 INTEL_DISPLAY_STATE_WARN(display, !enabled,
558 "PCH refclk assertion failure, should be active but is disabled\n");
559 }
560
ibx_pch_dpll_enable(struct intel_display * display,struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * dpll_hw_state)561 static void ibx_pch_dpll_enable(struct intel_display *display,
562 struct intel_shared_dpll *pll,
563 const struct intel_dpll_hw_state *dpll_hw_state)
564 {
565 const struct i9xx_dpll_hw_state *hw_state = &dpll_hw_state->i9xx;
566 const enum intel_dpll_id id = pll->info->id;
567
568 /* PCH refclock must be enabled first */
569 ibx_assert_pch_refclk_enabled(display);
570
571 intel_de_write(display, PCH_FP0(id), hw_state->fp0);
572 intel_de_write(display, PCH_FP1(id), hw_state->fp1);
573
574 intel_de_write(display, PCH_DPLL(id), hw_state->dpll);
575
576 /* Wait for the clocks to stabilize. */
577 intel_de_posting_read(display, PCH_DPLL(id));
578 udelay(150);
579
580 /* The pixel multiplier can only be updated once the
581 * DPLL is enabled and the clocks are stable.
582 *
583 * So write it again.
584 */
585 intel_de_write(display, PCH_DPLL(id), hw_state->dpll);
586 intel_de_posting_read(display, PCH_DPLL(id));
587 udelay(200);
588 }
589
ibx_pch_dpll_disable(struct intel_display * display,struct intel_shared_dpll * pll)590 static void ibx_pch_dpll_disable(struct intel_display *display,
591 struct intel_shared_dpll *pll)
592 {
593 const enum intel_dpll_id id = pll->info->id;
594
595 intel_de_write(display, PCH_DPLL(id), 0);
596 intel_de_posting_read(display, PCH_DPLL(id));
597 udelay(200);
598 }
599
ibx_compute_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)600 static int ibx_compute_dpll(struct intel_atomic_state *state,
601 struct intel_crtc *crtc,
602 struct intel_encoder *encoder)
603 {
604 return 0;
605 }
606
ibx_get_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)607 static int ibx_get_dpll(struct intel_atomic_state *state,
608 struct intel_crtc *crtc,
609 struct intel_encoder *encoder)
610 {
611 struct intel_display *display = to_intel_display(state);
612 struct drm_i915_private *i915 = to_i915(display->drm);
613 struct intel_crtc_state *crtc_state =
614 intel_atomic_get_new_crtc_state(state, crtc);
615 struct intel_shared_dpll *pll;
616 enum intel_dpll_id id;
617
618 if (HAS_PCH_IBX(i915)) {
619 /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
620 id = (enum intel_dpll_id) crtc->pipe;
621 pll = intel_get_shared_dpll_by_id(display, id);
622
623 drm_dbg_kms(display->drm,
624 "[CRTC:%d:%s] using pre-allocated %s\n",
625 crtc->base.base.id, crtc->base.name,
626 pll->info->name);
627 } else {
628 pll = intel_find_shared_dpll(state, crtc,
629 &crtc_state->dpll_hw_state,
630 BIT(DPLL_ID_PCH_PLL_B) |
631 BIT(DPLL_ID_PCH_PLL_A));
632 }
633
634 if (!pll)
635 return -EINVAL;
636
637 /* reference the pll */
638 intel_reference_shared_dpll(state, crtc,
639 pll, &crtc_state->dpll_hw_state);
640
641 crtc_state->shared_dpll = pll;
642
643 return 0;
644 }
645
ibx_dump_hw_state(struct drm_printer * p,const struct intel_dpll_hw_state * dpll_hw_state)646 static void ibx_dump_hw_state(struct drm_printer *p,
647 const struct intel_dpll_hw_state *dpll_hw_state)
648 {
649 const struct i9xx_dpll_hw_state *hw_state = &dpll_hw_state->i9xx;
650
651 drm_printf(p, "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
652 "fp0: 0x%x, fp1: 0x%x\n",
653 hw_state->dpll,
654 hw_state->dpll_md,
655 hw_state->fp0,
656 hw_state->fp1);
657 }
658
ibx_compare_hw_state(const struct intel_dpll_hw_state * _a,const struct intel_dpll_hw_state * _b)659 static bool ibx_compare_hw_state(const struct intel_dpll_hw_state *_a,
660 const struct intel_dpll_hw_state *_b)
661 {
662 const struct i9xx_dpll_hw_state *a = &_a->i9xx;
663 const struct i9xx_dpll_hw_state *b = &_b->i9xx;
664
665 return a->dpll == b->dpll &&
666 a->dpll_md == b->dpll_md &&
667 a->fp0 == b->fp0 &&
668 a->fp1 == b->fp1;
669 }
670
671 static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
672 .enable = ibx_pch_dpll_enable,
673 .disable = ibx_pch_dpll_disable,
674 .get_hw_state = ibx_pch_dpll_get_hw_state,
675 };
676
677 static const struct dpll_info pch_plls[] = {
678 { .name = "PCH DPLL A", .funcs = &ibx_pch_dpll_funcs, .id = DPLL_ID_PCH_PLL_A, },
679 { .name = "PCH DPLL B", .funcs = &ibx_pch_dpll_funcs, .id = DPLL_ID_PCH_PLL_B, },
680 {}
681 };
682
683 static const struct intel_dpll_mgr pch_pll_mgr = {
684 .dpll_info = pch_plls,
685 .compute_dplls = ibx_compute_dpll,
686 .get_dplls = ibx_get_dpll,
687 .put_dplls = intel_put_dpll,
688 .dump_hw_state = ibx_dump_hw_state,
689 .compare_hw_state = ibx_compare_hw_state,
690 };
691
hsw_ddi_wrpll_enable(struct intel_display * display,struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * dpll_hw_state)692 static void hsw_ddi_wrpll_enable(struct intel_display *display,
693 struct intel_shared_dpll *pll,
694 const struct intel_dpll_hw_state *dpll_hw_state)
695 {
696 const struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
697 const enum intel_dpll_id id = pll->info->id;
698
699 intel_de_write(display, WRPLL_CTL(id), hw_state->wrpll);
700 intel_de_posting_read(display, WRPLL_CTL(id));
701 udelay(20);
702 }
703
hsw_ddi_spll_enable(struct intel_display * display,struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * dpll_hw_state)704 static void hsw_ddi_spll_enable(struct intel_display *display,
705 struct intel_shared_dpll *pll,
706 const struct intel_dpll_hw_state *dpll_hw_state)
707 {
708 const struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
709
710 intel_de_write(display, SPLL_CTL, hw_state->spll);
711 intel_de_posting_read(display, SPLL_CTL);
712 udelay(20);
713 }
714
hsw_ddi_wrpll_disable(struct intel_display * display,struct intel_shared_dpll * pll)715 static void hsw_ddi_wrpll_disable(struct intel_display *display,
716 struct intel_shared_dpll *pll)
717 {
718 struct drm_i915_private *i915 = to_i915(display->drm);
719 const enum intel_dpll_id id = pll->info->id;
720
721 intel_de_rmw(display, WRPLL_CTL(id), WRPLL_PLL_ENABLE, 0);
722 intel_de_posting_read(display, WRPLL_CTL(id));
723
724 /*
725 * Try to set up the PCH reference clock once all DPLLs
726 * that depend on it have been shut down.
727 */
728 if (display->dpll.pch_ssc_use & BIT(id))
729 intel_init_pch_refclk(i915);
730 }
731
hsw_ddi_spll_disable(struct intel_display * display,struct intel_shared_dpll * pll)732 static void hsw_ddi_spll_disable(struct intel_display *display,
733 struct intel_shared_dpll *pll)
734 {
735 struct drm_i915_private *i915 = to_i915(display->drm);
736 enum intel_dpll_id id = pll->info->id;
737
738 intel_de_rmw(display, SPLL_CTL, SPLL_PLL_ENABLE, 0);
739 intel_de_posting_read(display, SPLL_CTL);
740
741 /*
742 * Try to set up the PCH reference clock once all DPLLs
743 * that depend on it have been shut down.
744 */
745 if (display->dpll.pch_ssc_use & BIT(id))
746 intel_init_pch_refclk(i915);
747 }
748
hsw_ddi_wrpll_get_hw_state(struct intel_display * display,struct intel_shared_dpll * pll,struct intel_dpll_hw_state * dpll_hw_state)749 static bool hsw_ddi_wrpll_get_hw_state(struct intel_display *display,
750 struct intel_shared_dpll *pll,
751 struct intel_dpll_hw_state *dpll_hw_state)
752 {
753 struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
754 const enum intel_dpll_id id = pll->info->id;
755 intel_wakeref_t wakeref;
756 u32 val;
757
758 wakeref = intel_display_power_get_if_enabled(display,
759 POWER_DOMAIN_DISPLAY_CORE);
760 if (!wakeref)
761 return false;
762
763 val = intel_de_read(display, WRPLL_CTL(id));
764 hw_state->wrpll = val;
765
766 intel_display_power_put(display, POWER_DOMAIN_DISPLAY_CORE, wakeref);
767
768 return val & WRPLL_PLL_ENABLE;
769 }
770
hsw_ddi_spll_get_hw_state(struct intel_display * display,struct intel_shared_dpll * pll,struct intel_dpll_hw_state * dpll_hw_state)771 static bool hsw_ddi_spll_get_hw_state(struct intel_display *display,
772 struct intel_shared_dpll *pll,
773 struct intel_dpll_hw_state *dpll_hw_state)
774 {
775 struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
776 intel_wakeref_t wakeref;
777 u32 val;
778
779 wakeref = intel_display_power_get_if_enabled(display,
780 POWER_DOMAIN_DISPLAY_CORE);
781 if (!wakeref)
782 return false;
783
784 val = intel_de_read(display, SPLL_CTL);
785 hw_state->spll = val;
786
787 intel_display_power_put(display, POWER_DOMAIN_DISPLAY_CORE, wakeref);
788
789 return val & SPLL_PLL_ENABLE;
790 }
791
792 #define LC_FREQ 2700
793 #define LC_FREQ_2K U64_C(LC_FREQ * 2000)
794
795 #define P_MIN 2
796 #define P_MAX 64
797 #define P_INC 2
798
799 /* Constraints for PLL good behavior */
800 #define REF_MIN 48
801 #define REF_MAX 400
802 #define VCO_MIN 2400
803 #define VCO_MAX 4800
804
805 struct hsw_wrpll_rnp {
806 unsigned p, n2, r2;
807 };
808
hsw_wrpll_get_budget_for_freq(int clock)809 static unsigned hsw_wrpll_get_budget_for_freq(int clock)
810 {
811 switch (clock) {
812 case 25175000:
813 case 25200000:
814 case 27000000:
815 case 27027000:
816 case 37762500:
817 case 37800000:
818 case 40500000:
819 case 40541000:
820 case 54000000:
821 case 54054000:
822 case 59341000:
823 case 59400000:
824 case 72000000:
825 case 74176000:
826 case 74250000:
827 case 81000000:
828 case 81081000:
829 case 89012000:
830 case 89100000:
831 case 108000000:
832 case 108108000:
833 case 111264000:
834 case 111375000:
835 case 148352000:
836 case 148500000:
837 case 162000000:
838 case 162162000:
839 case 222525000:
840 case 222750000:
841 case 296703000:
842 case 297000000:
843 return 0;
844 case 233500000:
845 case 245250000:
846 case 247750000:
847 case 253250000:
848 case 298000000:
849 return 1500;
850 case 169128000:
851 case 169500000:
852 case 179500000:
853 case 202000000:
854 return 2000;
855 case 256250000:
856 case 262500000:
857 case 270000000:
858 case 272500000:
859 case 273750000:
860 case 280750000:
861 case 281250000:
862 case 286000000:
863 case 291750000:
864 return 4000;
865 case 267250000:
866 case 268500000:
867 return 5000;
868 default:
869 return 1000;
870 }
871 }
872
hsw_wrpll_update_rnp(u64 freq2k,unsigned int budget,unsigned int r2,unsigned int n2,unsigned int p,struct hsw_wrpll_rnp * best)873 static void hsw_wrpll_update_rnp(u64 freq2k, unsigned int budget,
874 unsigned int r2, unsigned int n2,
875 unsigned int p,
876 struct hsw_wrpll_rnp *best)
877 {
878 u64 a, b, c, d, diff, diff_best;
879
880 /* No best (r,n,p) yet */
881 if (best->p == 0) {
882 best->p = p;
883 best->n2 = n2;
884 best->r2 = r2;
885 return;
886 }
887
888 /*
889 * Output clock is (LC_FREQ_2K / 2000) * N / (P * R), which compares to
890 * freq2k.
891 *
892 * delta = 1e6 *
893 * abs(freq2k - (LC_FREQ_2K * n2/(p * r2))) /
894 * freq2k;
895 *
896 * and we would like delta <= budget.
897 *
898 * If the discrepancy is above the PPM-based budget, always prefer to
899 * improve upon the previous solution. However, if you're within the
900 * budget, try to maximize Ref * VCO, that is N / (P * R^2).
901 */
902 a = freq2k * budget * p * r2;
903 b = freq2k * budget * best->p * best->r2;
904 diff = abs_diff(freq2k * p * r2, LC_FREQ_2K * n2);
905 diff_best = abs_diff(freq2k * best->p * best->r2,
906 LC_FREQ_2K * best->n2);
907 c = 1000000 * diff;
908 d = 1000000 * diff_best;
909
910 if (a < c && b < d) {
911 /* If both are above the budget, pick the closer */
912 if (best->p * best->r2 * diff < p * r2 * diff_best) {
913 best->p = p;
914 best->n2 = n2;
915 best->r2 = r2;
916 }
917 } else if (a >= c && b < d) {
918 /* If A is below the threshold but B is above it? Update. */
919 best->p = p;
920 best->n2 = n2;
921 best->r2 = r2;
922 } else if (a >= c && b >= d) {
923 /* Both are below the limit, so pick the higher n2/(r2*r2) */
924 if (n2 * best->r2 * best->r2 > best->n2 * r2 * r2) {
925 best->p = p;
926 best->n2 = n2;
927 best->r2 = r2;
928 }
929 }
930 /* Otherwise a < c && b >= d, do nothing */
931 }
932
933 static void
hsw_ddi_calculate_wrpll(int clock,unsigned * r2_out,unsigned * n2_out,unsigned * p_out)934 hsw_ddi_calculate_wrpll(int clock /* in Hz */,
935 unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
936 {
937 u64 freq2k;
938 unsigned p, n2, r2;
939 struct hsw_wrpll_rnp best = {};
940 unsigned budget;
941
942 freq2k = clock / 100;
943
944 budget = hsw_wrpll_get_budget_for_freq(clock);
945
946 /* Special case handling for 540 pixel clock: bypass WR PLL entirely
947 * and directly pass the LC PLL to it. */
948 if (freq2k == 5400000) {
949 *n2_out = 2;
950 *p_out = 1;
951 *r2_out = 2;
952 return;
953 }
954
955 /*
956 * Ref = LC_FREQ / R, where Ref is the actual reference input seen by
957 * the WR PLL.
958 *
959 * We want R so that REF_MIN <= Ref <= REF_MAX.
960 * Injecting R2 = 2 * R gives:
961 * REF_MAX * r2 > LC_FREQ * 2 and
962 * REF_MIN * r2 < LC_FREQ * 2
963 *
964 * Which means the desired boundaries for r2 are:
965 * LC_FREQ * 2 / REF_MAX < r2 < LC_FREQ * 2 / REF_MIN
966 *
967 */
968 for (r2 = LC_FREQ * 2 / REF_MAX + 1;
969 r2 <= LC_FREQ * 2 / REF_MIN;
970 r2++) {
971
972 /*
973 * VCO = N * Ref, that is: VCO = N * LC_FREQ / R
974 *
975 * Once again we want VCO_MIN <= VCO <= VCO_MAX.
976 * Injecting R2 = 2 * R and N2 = 2 * N, we get:
977 * VCO_MAX * r2 > n2 * LC_FREQ and
978 * VCO_MIN * r2 < n2 * LC_FREQ)
979 *
980 * Which means the desired boundaries for n2 are:
981 * VCO_MIN * r2 / LC_FREQ < n2 < VCO_MAX * r2 / LC_FREQ
982 */
983 for (n2 = VCO_MIN * r2 / LC_FREQ + 1;
984 n2 <= VCO_MAX * r2 / LC_FREQ;
985 n2++) {
986
987 for (p = P_MIN; p <= P_MAX; p += P_INC)
988 hsw_wrpll_update_rnp(freq2k, budget,
989 r2, n2, p, &best);
990 }
991 }
992
993 *n2_out = best.n2;
994 *p_out = best.p;
995 *r2_out = best.r2;
996 }
997
hsw_ddi_wrpll_get_freq(struct intel_display * display,const struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * dpll_hw_state)998 static int hsw_ddi_wrpll_get_freq(struct intel_display *display,
999 const struct intel_shared_dpll *pll,
1000 const struct intel_dpll_hw_state *dpll_hw_state)
1001 {
1002 const struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
1003 int refclk;
1004 int n, p, r;
1005 u32 wrpll = hw_state->wrpll;
1006
1007 switch (wrpll & WRPLL_REF_MASK) {
1008 case WRPLL_REF_SPECIAL_HSW:
1009 /* Muxed-SSC for BDW, non-SSC for non-ULT HSW. */
1010 if (display->platform.haswell && !display->platform.haswell_ult) {
1011 refclk = display->dpll.ref_clks.nssc;
1012 break;
1013 }
1014 fallthrough;
1015 case WRPLL_REF_PCH_SSC:
1016 /*
1017 * We could calculate spread here, but our checking
1018 * code only cares about 5% accuracy, and spread is a max of
1019 * 0.5% downspread.
1020 */
1021 refclk = display->dpll.ref_clks.ssc;
1022 break;
1023 case WRPLL_REF_LCPLL:
1024 refclk = 2700000;
1025 break;
1026 default:
1027 MISSING_CASE(wrpll);
1028 return 0;
1029 }
1030
1031 r = wrpll & WRPLL_DIVIDER_REF_MASK;
1032 p = (wrpll & WRPLL_DIVIDER_POST_MASK) >> WRPLL_DIVIDER_POST_SHIFT;
1033 n = (wrpll & WRPLL_DIVIDER_FB_MASK) >> WRPLL_DIVIDER_FB_SHIFT;
1034
1035 /* Convert to KHz, p & r have a fixed point portion */
1036 return (refclk * n / 10) / (p * r) * 2;
1037 }
1038
1039 static int
hsw_ddi_wrpll_compute_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc)1040 hsw_ddi_wrpll_compute_dpll(struct intel_atomic_state *state,
1041 struct intel_crtc *crtc)
1042 {
1043 struct intel_display *display = to_intel_display(state);
1044 struct intel_crtc_state *crtc_state =
1045 intel_atomic_get_new_crtc_state(state, crtc);
1046 struct hsw_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.hsw;
1047 unsigned int p, n2, r2;
1048
1049 hsw_ddi_calculate_wrpll(crtc_state->port_clock * 1000, &r2, &n2, &p);
1050
1051 hw_state->wrpll =
1052 WRPLL_PLL_ENABLE | WRPLL_REF_LCPLL |
1053 WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
1054 WRPLL_DIVIDER_POST(p);
1055
1056 crtc_state->port_clock = hsw_ddi_wrpll_get_freq(display, NULL,
1057 &crtc_state->dpll_hw_state);
1058
1059 return 0;
1060 }
1061
1062 static struct intel_shared_dpll *
hsw_ddi_wrpll_get_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc)1063 hsw_ddi_wrpll_get_dpll(struct intel_atomic_state *state,
1064 struct intel_crtc *crtc)
1065 {
1066 struct intel_crtc_state *crtc_state =
1067 intel_atomic_get_new_crtc_state(state, crtc);
1068
1069 return intel_find_shared_dpll(state, crtc,
1070 &crtc_state->dpll_hw_state,
1071 BIT(DPLL_ID_WRPLL2) |
1072 BIT(DPLL_ID_WRPLL1));
1073 }
1074
1075 static int
hsw_ddi_lcpll_compute_dpll(struct intel_crtc_state * crtc_state)1076 hsw_ddi_lcpll_compute_dpll(struct intel_crtc_state *crtc_state)
1077 {
1078 struct intel_display *display = to_intel_display(crtc_state);
1079 int clock = crtc_state->port_clock;
1080
1081 switch (clock / 2) {
1082 case 81000:
1083 case 135000:
1084 case 270000:
1085 return 0;
1086 default:
1087 drm_dbg_kms(display->drm, "Invalid clock for DP: %d\n",
1088 clock);
1089 return -EINVAL;
1090 }
1091 }
1092
1093 static struct intel_shared_dpll *
hsw_ddi_lcpll_get_dpll(struct intel_crtc_state * crtc_state)1094 hsw_ddi_lcpll_get_dpll(struct intel_crtc_state *crtc_state)
1095 {
1096 struct intel_display *display = to_intel_display(crtc_state);
1097 struct intel_shared_dpll *pll;
1098 enum intel_dpll_id pll_id;
1099 int clock = crtc_state->port_clock;
1100
1101 switch (clock / 2) {
1102 case 81000:
1103 pll_id = DPLL_ID_LCPLL_810;
1104 break;
1105 case 135000:
1106 pll_id = DPLL_ID_LCPLL_1350;
1107 break;
1108 case 270000:
1109 pll_id = DPLL_ID_LCPLL_2700;
1110 break;
1111 default:
1112 MISSING_CASE(clock / 2);
1113 return NULL;
1114 }
1115
1116 pll = intel_get_shared_dpll_by_id(display, pll_id);
1117
1118 if (!pll)
1119 return NULL;
1120
1121 return pll;
1122 }
1123
hsw_ddi_lcpll_get_freq(struct intel_display * display,const struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * dpll_hw_state)1124 static int hsw_ddi_lcpll_get_freq(struct intel_display *display,
1125 const struct intel_shared_dpll *pll,
1126 const struct intel_dpll_hw_state *dpll_hw_state)
1127 {
1128 int link_clock = 0;
1129
1130 switch (pll->info->id) {
1131 case DPLL_ID_LCPLL_810:
1132 link_clock = 81000;
1133 break;
1134 case DPLL_ID_LCPLL_1350:
1135 link_clock = 135000;
1136 break;
1137 case DPLL_ID_LCPLL_2700:
1138 link_clock = 270000;
1139 break;
1140 default:
1141 drm_WARN(display->drm, 1, "bad port clock sel\n");
1142 break;
1143 }
1144
1145 return link_clock * 2;
1146 }
1147
1148 static int
hsw_ddi_spll_compute_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc)1149 hsw_ddi_spll_compute_dpll(struct intel_atomic_state *state,
1150 struct intel_crtc *crtc)
1151 {
1152 struct intel_crtc_state *crtc_state =
1153 intel_atomic_get_new_crtc_state(state, crtc);
1154 struct hsw_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.hsw;
1155
1156 if (drm_WARN_ON(crtc->base.dev, crtc_state->port_clock / 2 != 135000))
1157 return -EINVAL;
1158
1159 hw_state->spll =
1160 SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz | SPLL_REF_MUXED_SSC;
1161
1162 return 0;
1163 }
1164
1165 static struct intel_shared_dpll *
hsw_ddi_spll_get_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc)1166 hsw_ddi_spll_get_dpll(struct intel_atomic_state *state,
1167 struct intel_crtc *crtc)
1168 {
1169 struct intel_crtc_state *crtc_state =
1170 intel_atomic_get_new_crtc_state(state, crtc);
1171
1172 return intel_find_shared_dpll(state, crtc, &crtc_state->dpll_hw_state,
1173 BIT(DPLL_ID_SPLL));
1174 }
1175
hsw_ddi_spll_get_freq(struct intel_display * display,const struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * dpll_hw_state)1176 static int hsw_ddi_spll_get_freq(struct intel_display *display,
1177 const struct intel_shared_dpll *pll,
1178 const struct intel_dpll_hw_state *dpll_hw_state)
1179 {
1180 const struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
1181 int link_clock = 0;
1182
1183 switch (hw_state->spll & SPLL_FREQ_MASK) {
1184 case SPLL_FREQ_810MHz:
1185 link_clock = 81000;
1186 break;
1187 case SPLL_FREQ_1350MHz:
1188 link_clock = 135000;
1189 break;
1190 case SPLL_FREQ_2700MHz:
1191 link_clock = 270000;
1192 break;
1193 default:
1194 drm_WARN(display->drm, 1, "bad spll freq\n");
1195 break;
1196 }
1197
1198 return link_clock * 2;
1199 }
1200
hsw_compute_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)1201 static int hsw_compute_dpll(struct intel_atomic_state *state,
1202 struct intel_crtc *crtc,
1203 struct intel_encoder *encoder)
1204 {
1205 struct intel_crtc_state *crtc_state =
1206 intel_atomic_get_new_crtc_state(state, crtc);
1207
1208 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1209 return hsw_ddi_wrpll_compute_dpll(state, crtc);
1210 else if (intel_crtc_has_dp_encoder(crtc_state))
1211 return hsw_ddi_lcpll_compute_dpll(crtc_state);
1212 else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
1213 return hsw_ddi_spll_compute_dpll(state, crtc);
1214 else
1215 return -EINVAL;
1216 }
1217
hsw_get_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)1218 static int hsw_get_dpll(struct intel_atomic_state *state,
1219 struct intel_crtc *crtc,
1220 struct intel_encoder *encoder)
1221 {
1222 struct intel_crtc_state *crtc_state =
1223 intel_atomic_get_new_crtc_state(state, crtc);
1224 struct intel_shared_dpll *pll = NULL;
1225
1226 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1227 pll = hsw_ddi_wrpll_get_dpll(state, crtc);
1228 else if (intel_crtc_has_dp_encoder(crtc_state))
1229 pll = hsw_ddi_lcpll_get_dpll(crtc_state);
1230 else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
1231 pll = hsw_ddi_spll_get_dpll(state, crtc);
1232
1233 if (!pll)
1234 return -EINVAL;
1235
1236 intel_reference_shared_dpll(state, crtc,
1237 pll, &crtc_state->dpll_hw_state);
1238
1239 crtc_state->shared_dpll = pll;
1240
1241 return 0;
1242 }
1243
hsw_update_dpll_ref_clks(struct intel_display * display)1244 static void hsw_update_dpll_ref_clks(struct intel_display *display)
1245 {
1246 display->dpll.ref_clks.ssc = 135000;
1247 /* Non-SSC is only used on non-ULT HSW. */
1248 if (intel_de_read(display, FUSE_STRAP3) & HSW_REF_CLK_SELECT)
1249 display->dpll.ref_clks.nssc = 24000;
1250 else
1251 display->dpll.ref_clks.nssc = 135000;
1252 }
1253
hsw_dump_hw_state(struct drm_printer * p,const struct intel_dpll_hw_state * dpll_hw_state)1254 static void hsw_dump_hw_state(struct drm_printer *p,
1255 const struct intel_dpll_hw_state *dpll_hw_state)
1256 {
1257 const struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
1258
1259 drm_printf(p, "dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
1260 hw_state->wrpll, hw_state->spll);
1261 }
1262
hsw_compare_hw_state(const struct intel_dpll_hw_state * _a,const struct intel_dpll_hw_state * _b)1263 static bool hsw_compare_hw_state(const struct intel_dpll_hw_state *_a,
1264 const struct intel_dpll_hw_state *_b)
1265 {
1266 const struct hsw_dpll_hw_state *a = &_a->hsw;
1267 const struct hsw_dpll_hw_state *b = &_b->hsw;
1268
1269 return a->wrpll == b->wrpll &&
1270 a->spll == b->spll;
1271 }
1272
1273 static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = {
1274 .enable = hsw_ddi_wrpll_enable,
1275 .disable = hsw_ddi_wrpll_disable,
1276 .get_hw_state = hsw_ddi_wrpll_get_hw_state,
1277 .get_freq = hsw_ddi_wrpll_get_freq,
1278 };
1279
1280 static const struct intel_shared_dpll_funcs hsw_ddi_spll_funcs = {
1281 .enable = hsw_ddi_spll_enable,
1282 .disable = hsw_ddi_spll_disable,
1283 .get_hw_state = hsw_ddi_spll_get_hw_state,
1284 .get_freq = hsw_ddi_spll_get_freq,
1285 };
1286
hsw_ddi_lcpll_enable(struct intel_display * display,struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * hw_state)1287 static void hsw_ddi_lcpll_enable(struct intel_display *display,
1288 struct intel_shared_dpll *pll,
1289 const struct intel_dpll_hw_state *hw_state)
1290 {
1291 }
1292
hsw_ddi_lcpll_disable(struct intel_display * display,struct intel_shared_dpll * pll)1293 static void hsw_ddi_lcpll_disable(struct intel_display *display,
1294 struct intel_shared_dpll *pll)
1295 {
1296 }
1297
hsw_ddi_lcpll_get_hw_state(struct intel_display * display,struct intel_shared_dpll * pll,struct intel_dpll_hw_state * dpll_hw_state)1298 static bool hsw_ddi_lcpll_get_hw_state(struct intel_display *display,
1299 struct intel_shared_dpll *pll,
1300 struct intel_dpll_hw_state *dpll_hw_state)
1301 {
1302 return true;
1303 }
1304
1305 static const struct intel_shared_dpll_funcs hsw_ddi_lcpll_funcs = {
1306 .enable = hsw_ddi_lcpll_enable,
1307 .disable = hsw_ddi_lcpll_disable,
1308 .get_hw_state = hsw_ddi_lcpll_get_hw_state,
1309 .get_freq = hsw_ddi_lcpll_get_freq,
1310 };
1311
1312 static const struct dpll_info hsw_plls[] = {
1313 { .name = "WRPLL 1", .funcs = &hsw_ddi_wrpll_funcs, .id = DPLL_ID_WRPLL1, },
1314 { .name = "WRPLL 2", .funcs = &hsw_ddi_wrpll_funcs, .id = DPLL_ID_WRPLL2, },
1315 { .name = "SPLL", .funcs = &hsw_ddi_spll_funcs, .id = DPLL_ID_SPLL, },
1316 { .name = "LCPLL 810", .funcs = &hsw_ddi_lcpll_funcs, .id = DPLL_ID_LCPLL_810,
1317 .always_on = true, },
1318 { .name = "LCPLL 1350", .funcs = &hsw_ddi_lcpll_funcs, .id = DPLL_ID_LCPLL_1350,
1319 .always_on = true, },
1320 { .name = "LCPLL 2700", .funcs = &hsw_ddi_lcpll_funcs, .id = DPLL_ID_LCPLL_2700,
1321 .always_on = true, },
1322 {}
1323 };
1324
1325 static const struct intel_dpll_mgr hsw_pll_mgr = {
1326 .dpll_info = hsw_plls,
1327 .compute_dplls = hsw_compute_dpll,
1328 .get_dplls = hsw_get_dpll,
1329 .put_dplls = intel_put_dpll,
1330 .update_ref_clks = hsw_update_dpll_ref_clks,
1331 .dump_hw_state = hsw_dump_hw_state,
1332 .compare_hw_state = hsw_compare_hw_state,
1333 };
1334
1335 struct skl_dpll_regs {
1336 i915_reg_t ctl, cfgcr1, cfgcr2;
1337 };
1338
1339 /* this array is indexed by the *shared* pll id */
1340 static const struct skl_dpll_regs skl_dpll_regs[4] = {
1341 {
1342 /* DPLL 0 */
1343 .ctl = LCPLL1_CTL,
1344 /* DPLL 0 doesn't support HDMI mode */
1345 },
1346 {
1347 /* DPLL 1 */
1348 .ctl = LCPLL2_CTL,
1349 .cfgcr1 = DPLL_CFGCR1(SKL_DPLL1),
1350 .cfgcr2 = DPLL_CFGCR2(SKL_DPLL1),
1351 },
1352 {
1353 /* DPLL 2 */
1354 .ctl = WRPLL_CTL(0),
1355 .cfgcr1 = DPLL_CFGCR1(SKL_DPLL2),
1356 .cfgcr2 = DPLL_CFGCR2(SKL_DPLL2),
1357 },
1358 {
1359 /* DPLL 3 */
1360 .ctl = WRPLL_CTL(1),
1361 .cfgcr1 = DPLL_CFGCR1(SKL_DPLL3),
1362 .cfgcr2 = DPLL_CFGCR2(SKL_DPLL3),
1363 },
1364 };
1365
skl_ddi_pll_write_ctrl1(struct intel_display * display,struct intel_shared_dpll * pll,const struct skl_dpll_hw_state * hw_state)1366 static void skl_ddi_pll_write_ctrl1(struct intel_display *display,
1367 struct intel_shared_dpll *pll,
1368 const struct skl_dpll_hw_state *hw_state)
1369 {
1370 const enum intel_dpll_id id = pll->info->id;
1371
1372 intel_de_rmw(display, DPLL_CTRL1,
1373 DPLL_CTRL1_HDMI_MODE(id) |
1374 DPLL_CTRL1_SSC(id) |
1375 DPLL_CTRL1_LINK_RATE_MASK(id),
1376 hw_state->ctrl1 << (id * 6));
1377 intel_de_posting_read(display, DPLL_CTRL1);
1378 }
1379
skl_ddi_pll_enable(struct intel_display * display,struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * dpll_hw_state)1380 static void skl_ddi_pll_enable(struct intel_display *display,
1381 struct intel_shared_dpll *pll,
1382 const struct intel_dpll_hw_state *dpll_hw_state)
1383 {
1384 const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1385 const struct skl_dpll_regs *regs = skl_dpll_regs;
1386 const enum intel_dpll_id id = pll->info->id;
1387
1388 skl_ddi_pll_write_ctrl1(display, pll, hw_state);
1389
1390 intel_de_write(display, regs[id].cfgcr1, hw_state->cfgcr1);
1391 intel_de_write(display, regs[id].cfgcr2, hw_state->cfgcr2);
1392 intel_de_posting_read(display, regs[id].cfgcr1);
1393 intel_de_posting_read(display, regs[id].cfgcr2);
1394
1395 /* the enable bit is always bit 31 */
1396 intel_de_rmw(display, regs[id].ctl, 0, LCPLL_PLL_ENABLE);
1397
1398 if (intel_de_wait_for_set(display, DPLL_STATUS, DPLL_LOCK(id), 5))
1399 drm_err(display->drm, "DPLL %d not locked\n", id);
1400 }
1401
skl_ddi_dpll0_enable(struct intel_display * display,struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * dpll_hw_state)1402 static void skl_ddi_dpll0_enable(struct intel_display *display,
1403 struct intel_shared_dpll *pll,
1404 const struct intel_dpll_hw_state *dpll_hw_state)
1405 {
1406 const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1407
1408 skl_ddi_pll_write_ctrl1(display, pll, hw_state);
1409 }
1410
skl_ddi_pll_disable(struct intel_display * display,struct intel_shared_dpll * pll)1411 static void skl_ddi_pll_disable(struct intel_display *display,
1412 struct intel_shared_dpll *pll)
1413 {
1414 const struct skl_dpll_regs *regs = skl_dpll_regs;
1415 const enum intel_dpll_id id = pll->info->id;
1416
1417 /* the enable bit is always bit 31 */
1418 intel_de_rmw(display, regs[id].ctl, LCPLL_PLL_ENABLE, 0);
1419 intel_de_posting_read(display, regs[id].ctl);
1420 }
1421
skl_ddi_dpll0_disable(struct intel_display * display,struct intel_shared_dpll * pll)1422 static void skl_ddi_dpll0_disable(struct intel_display *display,
1423 struct intel_shared_dpll *pll)
1424 {
1425 }
1426
skl_ddi_pll_get_hw_state(struct intel_display * display,struct intel_shared_dpll * pll,struct intel_dpll_hw_state * dpll_hw_state)1427 static bool skl_ddi_pll_get_hw_state(struct intel_display *display,
1428 struct intel_shared_dpll *pll,
1429 struct intel_dpll_hw_state *dpll_hw_state)
1430 {
1431 struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1432 const struct skl_dpll_regs *regs = skl_dpll_regs;
1433 const enum intel_dpll_id id = pll->info->id;
1434 intel_wakeref_t wakeref;
1435 bool ret;
1436 u32 val;
1437
1438 wakeref = intel_display_power_get_if_enabled(display,
1439 POWER_DOMAIN_DISPLAY_CORE);
1440 if (!wakeref)
1441 return false;
1442
1443 ret = false;
1444
1445 val = intel_de_read(display, regs[id].ctl);
1446 if (!(val & LCPLL_PLL_ENABLE))
1447 goto out;
1448
1449 val = intel_de_read(display, DPLL_CTRL1);
1450 hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1451
1452 /* avoid reading back stale values if HDMI mode is not enabled */
1453 if (val & DPLL_CTRL1_HDMI_MODE(id)) {
1454 hw_state->cfgcr1 = intel_de_read(display, regs[id].cfgcr1);
1455 hw_state->cfgcr2 = intel_de_read(display, regs[id].cfgcr2);
1456 }
1457 ret = true;
1458
1459 out:
1460 intel_display_power_put(display, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1461
1462 return ret;
1463 }
1464
skl_ddi_dpll0_get_hw_state(struct intel_display * display,struct intel_shared_dpll * pll,struct intel_dpll_hw_state * dpll_hw_state)1465 static bool skl_ddi_dpll0_get_hw_state(struct intel_display *display,
1466 struct intel_shared_dpll *pll,
1467 struct intel_dpll_hw_state *dpll_hw_state)
1468 {
1469 struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1470 const struct skl_dpll_regs *regs = skl_dpll_regs;
1471 const enum intel_dpll_id id = pll->info->id;
1472 intel_wakeref_t wakeref;
1473 u32 val;
1474 bool ret;
1475
1476 wakeref = intel_display_power_get_if_enabled(display,
1477 POWER_DOMAIN_DISPLAY_CORE);
1478 if (!wakeref)
1479 return false;
1480
1481 ret = false;
1482
1483 /* DPLL0 is always enabled since it drives CDCLK */
1484 val = intel_de_read(display, regs[id].ctl);
1485 if (drm_WARN_ON(display->drm, !(val & LCPLL_PLL_ENABLE)))
1486 goto out;
1487
1488 val = intel_de_read(display, DPLL_CTRL1);
1489 hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1490
1491 ret = true;
1492
1493 out:
1494 intel_display_power_put(display, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1495
1496 return ret;
1497 }
1498
1499 struct skl_wrpll_context {
1500 u64 min_deviation; /* current minimal deviation */
1501 u64 central_freq; /* chosen central freq */
1502 u64 dco_freq; /* chosen dco freq */
1503 unsigned int p; /* chosen divider */
1504 };
1505
1506 /* DCO freq must be within +1%/-6% of the DCO central freq */
1507 #define SKL_DCO_MAX_PDEVIATION 100
1508 #define SKL_DCO_MAX_NDEVIATION 600
1509
skl_wrpll_try_divider(struct skl_wrpll_context * ctx,u64 central_freq,u64 dco_freq,unsigned int divider)1510 static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx,
1511 u64 central_freq,
1512 u64 dco_freq,
1513 unsigned int divider)
1514 {
1515 u64 deviation;
1516
1517 deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq),
1518 central_freq);
1519
1520 /* positive deviation */
1521 if (dco_freq >= central_freq) {
1522 if (deviation < SKL_DCO_MAX_PDEVIATION &&
1523 deviation < ctx->min_deviation) {
1524 ctx->min_deviation = deviation;
1525 ctx->central_freq = central_freq;
1526 ctx->dco_freq = dco_freq;
1527 ctx->p = divider;
1528 }
1529 /* negative deviation */
1530 } else if (deviation < SKL_DCO_MAX_NDEVIATION &&
1531 deviation < ctx->min_deviation) {
1532 ctx->min_deviation = deviation;
1533 ctx->central_freq = central_freq;
1534 ctx->dco_freq = dco_freq;
1535 ctx->p = divider;
1536 }
1537 }
1538
skl_wrpll_get_multipliers(unsigned int p,unsigned int * p0,unsigned int * p1,unsigned int * p2)1539 static void skl_wrpll_get_multipliers(unsigned int p,
1540 unsigned int *p0 /* out */,
1541 unsigned int *p1 /* out */,
1542 unsigned int *p2 /* out */)
1543 {
1544 /* even dividers */
1545 if (p % 2 == 0) {
1546 unsigned int half = p / 2;
1547
1548 if (half == 1 || half == 2 || half == 3 || half == 5) {
1549 *p0 = 2;
1550 *p1 = 1;
1551 *p2 = half;
1552 } else if (half % 2 == 0) {
1553 *p0 = 2;
1554 *p1 = half / 2;
1555 *p2 = 2;
1556 } else if (half % 3 == 0) {
1557 *p0 = 3;
1558 *p1 = half / 3;
1559 *p2 = 2;
1560 } else if (half % 7 == 0) {
1561 *p0 = 7;
1562 *p1 = half / 7;
1563 *p2 = 2;
1564 }
1565 } else if (p == 3 || p == 9) { /* 3, 5, 7, 9, 15, 21, 35 */
1566 *p0 = 3;
1567 *p1 = 1;
1568 *p2 = p / 3;
1569 } else if (p == 5 || p == 7) {
1570 *p0 = p;
1571 *p1 = 1;
1572 *p2 = 1;
1573 } else if (p == 15) {
1574 *p0 = 3;
1575 *p1 = 1;
1576 *p2 = 5;
1577 } else if (p == 21) {
1578 *p0 = 7;
1579 *p1 = 1;
1580 *p2 = 3;
1581 } else if (p == 35) {
1582 *p0 = 7;
1583 *p1 = 1;
1584 *p2 = 5;
1585 }
1586 }
1587
1588 struct skl_wrpll_params {
1589 u32 dco_fraction;
1590 u32 dco_integer;
1591 u32 qdiv_ratio;
1592 u32 qdiv_mode;
1593 u32 kdiv;
1594 u32 pdiv;
1595 u32 central_freq;
1596 };
1597
skl_wrpll_params_populate(struct skl_wrpll_params * params,u64 afe_clock,int ref_clock,u64 central_freq,u32 p0,u32 p1,u32 p2)1598 static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
1599 u64 afe_clock,
1600 int ref_clock,
1601 u64 central_freq,
1602 u32 p0, u32 p1, u32 p2)
1603 {
1604 u64 dco_freq;
1605
1606 switch (central_freq) {
1607 case 9600000000ULL:
1608 params->central_freq = 0;
1609 break;
1610 case 9000000000ULL:
1611 params->central_freq = 1;
1612 break;
1613 case 8400000000ULL:
1614 params->central_freq = 3;
1615 }
1616
1617 switch (p0) {
1618 case 1:
1619 params->pdiv = 0;
1620 break;
1621 case 2:
1622 params->pdiv = 1;
1623 break;
1624 case 3:
1625 params->pdiv = 2;
1626 break;
1627 case 7:
1628 params->pdiv = 4;
1629 break;
1630 default:
1631 WARN(1, "Incorrect PDiv\n");
1632 }
1633
1634 switch (p2) {
1635 case 5:
1636 params->kdiv = 0;
1637 break;
1638 case 2:
1639 params->kdiv = 1;
1640 break;
1641 case 3:
1642 params->kdiv = 2;
1643 break;
1644 case 1:
1645 params->kdiv = 3;
1646 break;
1647 default:
1648 WARN(1, "Incorrect KDiv\n");
1649 }
1650
1651 params->qdiv_ratio = p1;
1652 params->qdiv_mode = (params->qdiv_ratio == 1) ? 0 : 1;
1653
1654 dco_freq = p0 * p1 * p2 * afe_clock;
1655
1656 /*
1657 * Intermediate values are in Hz.
1658 * Divide by MHz to match bsepc
1659 */
1660 params->dco_integer = div_u64(dco_freq, ref_clock * KHz(1));
1661 params->dco_fraction =
1662 div_u64((div_u64(dco_freq, ref_clock / KHz(1)) -
1663 params->dco_integer * MHz(1)) * 0x8000, MHz(1));
1664 }
1665
1666 static int
skl_ddi_calculate_wrpll(int clock,int ref_clock,struct skl_wrpll_params * wrpll_params)1667 skl_ddi_calculate_wrpll(int clock,
1668 int ref_clock,
1669 struct skl_wrpll_params *wrpll_params)
1670 {
1671 static const u64 dco_central_freq[3] = { 8400000000ULL,
1672 9000000000ULL,
1673 9600000000ULL };
1674 static const u8 even_dividers[] = { 4, 6, 8, 10, 12, 14, 16, 18, 20,
1675 24, 28, 30, 32, 36, 40, 42, 44,
1676 48, 52, 54, 56, 60, 64, 66, 68,
1677 70, 72, 76, 78, 80, 84, 88, 90,
1678 92, 96, 98 };
1679 static const u8 odd_dividers[] = { 3, 5, 7, 9, 15, 21, 35 };
1680 static const struct {
1681 const u8 *list;
1682 int n_dividers;
1683 } dividers[] = {
1684 { even_dividers, ARRAY_SIZE(even_dividers) },
1685 { odd_dividers, ARRAY_SIZE(odd_dividers) },
1686 };
1687 struct skl_wrpll_context ctx = {
1688 .min_deviation = U64_MAX,
1689 };
1690 unsigned int dco, d, i;
1691 unsigned int p0, p1, p2;
1692 u64 afe_clock = (u64)clock * 1000 * 5; /* AFE Clock is 5x Pixel clock, in Hz */
1693
1694 for (d = 0; d < ARRAY_SIZE(dividers); d++) {
1695 for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
1696 for (i = 0; i < dividers[d].n_dividers; i++) {
1697 unsigned int p = dividers[d].list[i];
1698 u64 dco_freq = p * afe_clock;
1699
1700 skl_wrpll_try_divider(&ctx,
1701 dco_central_freq[dco],
1702 dco_freq,
1703 p);
1704 /*
1705 * Skip the remaining dividers if we're sure to
1706 * have found the definitive divider, we can't
1707 * improve a 0 deviation.
1708 */
1709 if (ctx.min_deviation == 0)
1710 goto skip_remaining_dividers;
1711 }
1712 }
1713
1714 skip_remaining_dividers:
1715 /*
1716 * If a solution is found with an even divider, prefer
1717 * this one.
1718 */
1719 if (d == 0 && ctx.p)
1720 break;
1721 }
1722
1723 if (!ctx.p)
1724 return -EINVAL;
1725
1726 /*
1727 * gcc incorrectly analyses that these can be used without being
1728 * initialized. To be fair, it's hard to guess.
1729 */
1730 p0 = p1 = p2 = 0;
1731 skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2);
1732 skl_wrpll_params_populate(wrpll_params, afe_clock, ref_clock,
1733 ctx.central_freq, p0, p1, p2);
1734
1735 return 0;
1736 }
1737
skl_ddi_wrpll_get_freq(struct intel_display * display,const struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * dpll_hw_state)1738 static int skl_ddi_wrpll_get_freq(struct intel_display *display,
1739 const struct intel_shared_dpll *pll,
1740 const struct intel_dpll_hw_state *dpll_hw_state)
1741 {
1742 const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1743 int ref_clock = display->dpll.ref_clks.nssc;
1744 u32 p0, p1, p2, dco_freq;
1745
1746 p0 = hw_state->cfgcr2 & DPLL_CFGCR2_PDIV_MASK;
1747 p2 = hw_state->cfgcr2 & DPLL_CFGCR2_KDIV_MASK;
1748
1749 if (hw_state->cfgcr2 & DPLL_CFGCR2_QDIV_MODE(1))
1750 p1 = (hw_state->cfgcr2 & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8;
1751 else
1752 p1 = 1;
1753
1754
1755 switch (p0) {
1756 case DPLL_CFGCR2_PDIV_1:
1757 p0 = 1;
1758 break;
1759 case DPLL_CFGCR2_PDIV_2:
1760 p0 = 2;
1761 break;
1762 case DPLL_CFGCR2_PDIV_3:
1763 p0 = 3;
1764 break;
1765 case DPLL_CFGCR2_PDIV_7_INVALID:
1766 /*
1767 * Incorrect ASUS-Z170M BIOS setting, the HW seems to ignore bit#0,
1768 * handling it the same way as PDIV_7.
1769 */
1770 drm_dbg_kms(display->drm, "Invalid WRPLL PDIV divider value, fixing it.\n");
1771 fallthrough;
1772 case DPLL_CFGCR2_PDIV_7:
1773 p0 = 7;
1774 break;
1775 default:
1776 MISSING_CASE(p0);
1777 return 0;
1778 }
1779
1780 switch (p2) {
1781 case DPLL_CFGCR2_KDIV_5:
1782 p2 = 5;
1783 break;
1784 case DPLL_CFGCR2_KDIV_2:
1785 p2 = 2;
1786 break;
1787 case DPLL_CFGCR2_KDIV_3:
1788 p2 = 3;
1789 break;
1790 case DPLL_CFGCR2_KDIV_1:
1791 p2 = 1;
1792 break;
1793 default:
1794 MISSING_CASE(p2);
1795 return 0;
1796 }
1797
1798 dco_freq = (hw_state->cfgcr1 & DPLL_CFGCR1_DCO_INTEGER_MASK) *
1799 ref_clock;
1800
1801 dco_freq += ((hw_state->cfgcr1 & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) *
1802 ref_clock / 0x8000;
1803
1804 if (drm_WARN_ON(display->drm, p0 == 0 || p1 == 0 || p2 == 0))
1805 return 0;
1806
1807 return dco_freq / (p0 * p1 * p2 * 5);
1808 }
1809
skl_ddi_hdmi_pll_dividers(struct intel_crtc_state * crtc_state)1810 static int skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
1811 {
1812 struct intel_display *display = to_intel_display(crtc_state);
1813 struct skl_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.skl;
1814 struct skl_wrpll_params wrpll_params = {};
1815 int ret;
1816
1817 ret = skl_ddi_calculate_wrpll(crtc_state->port_clock,
1818 display->dpll.ref_clks.nssc, &wrpll_params);
1819 if (ret)
1820 return ret;
1821
1822 /*
1823 * See comment in intel_dpll_hw_state to understand why we always use 0
1824 * as the DPLL id in this function.
1825 */
1826 hw_state->ctrl1 =
1827 DPLL_CTRL1_OVERRIDE(0) |
1828 DPLL_CTRL1_HDMI_MODE(0);
1829
1830 hw_state->cfgcr1 =
1831 DPLL_CFGCR1_FREQ_ENABLE |
1832 DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
1833 wrpll_params.dco_integer;
1834
1835 hw_state->cfgcr2 =
1836 DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
1837 DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
1838 DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
1839 DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
1840 wrpll_params.central_freq;
1841
1842 crtc_state->port_clock = skl_ddi_wrpll_get_freq(display, NULL,
1843 &crtc_state->dpll_hw_state);
1844
1845 return 0;
1846 }
1847
1848 static int
skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state * crtc_state)1849 skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
1850 {
1851 struct skl_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.skl;
1852 u32 ctrl1;
1853
1854 /*
1855 * See comment in intel_dpll_hw_state to understand why we always use 0
1856 * as the DPLL id in this function.
1857 */
1858 ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1859 switch (crtc_state->port_clock / 2) {
1860 case 81000:
1861 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
1862 break;
1863 case 135000:
1864 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
1865 break;
1866 case 270000:
1867 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
1868 break;
1869 /* eDP 1.4 rates */
1870 case 162000:
1871 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0);
1872 break;
1873 case 108000:
1874 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0);
1875 break;
1876 case 216000:
1877 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, 0);
1878 break;
1879 }
1880
1881 hw_state->ctrl1 = ctrl1;
1882
1883 return 0;
1884 }
1885
skl_ddi_lcpll_get_freq(struct intel_display * display,const struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * dpll_hw_state)1886 static int skl_ddi_lcpll_get_freq(struct intel_display *display,
1887 const struct intel_shared_dpll *pll,
1888 const struct intel_dpll_hw_state *dpll_hw_state)
1889 {
1890 const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1891 int link_clock = 0;
1892
1893 switch ((hw_state->ctrl1 & DPLL_CTRL1_LINK_RATE_MASK(0)) >>
1894 DPLL_CTRL1_LINK_RATE_SHIFT(0)) {
1895 case DPLL_CTRL1_LINK_RATE_810:
1896 link_clock = 81000;
1897 break;
1898 case DPLL_CTRL1_LINK_RATE_1080:
1899 link_clock = 108000;
1900 break;
1901 case DPLL_CTRL1_LINK_RATE_1350:
1902 link_clock = 135000;
1903 break;
1904 case DPLL_CTRL1_LINK_RATE_1620:
1905 link_clock = 162000;
1906 break;
1907 case DPLL_CTRL1_LINK_RATE_2160:
1908 link_clock = 216000;
1909 break;
1910 case DPLL_CTRL1_LINK_RATE_2700:
1911 link_clock = 270000;
1912 break;
1913 default:
1914 drm_WARN(display->drm, 1, "Unsupported link rate\n");
1915 break;
1916 }
1917
1918 return link_clock * 2;
1919 }
1920
skl_compute_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)1921 static int skl_compute_dpll(struct intel_atomic_state *state,
1922 struct intel_crtc *crtc,
1923 struct intel_encoder *encoder)
1924 {
1925 struct intel_crtc_state *crtc_state =
1926 intel_atomic_get_new_crtc_state(state, crtc);
1927
1928 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1929 return skl_ddi_hdmi_pll_dividers(crtc_state);
1930 else if (intel_crtc_has_dp_encoder(crtc_state))
1931 return skl_ddi_dp_set_dpll_hw_state(crtc_state);
1932 else
1933 return -EINVAL;
1934 }
1935
skl_get_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)1936 static int skl_get_dpll(struct intel_atomic_state *state,
1937 struct intel_crtc *crtc,
1938 struct intel_encoder *encoder)
1939 {
1940 struct intel_crtc_state *crtc_state =
1941 intel_atomic_get_new_crtc_state(state, crtc);
1942 struct intel_shared_dpll *pll;
1943
1944 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
1945 pll = intel_find_shared_dpll(state, crtc,
1946 &crtc_state->dpll_hw_state,
1947 BIT(DPLL_ID_SKL_DPLL0));
1948 else
1949 pll = intel_find_shared_dpll(state, crtc,
1950 &crtc_state->dpll_hw_state,
1951 BIT(DPLL_ID_SKL_DPLL3) |
1952 BIT(DPLL_ID_SKL_DPLL2) |
1953 BIT(DPLL_ID_SKL_DPLL1));
1954 if (!pll)
1955 return -EINVAL;
1956
1957 intel_reference_shared_dpll(state, crtc,
1958 pll, &crtc_state->dpll_hw_state);
1959
1960 crtc_state->shared_dpll = pll;
1961
1962 return 0;
1963 }
1964
skl_ddi_pll_get_freq(struct intel_display * display,const struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * dpll_hw_state)1965 static int skl_ddi_pll_get_freq(struct intel_display *display,
1966 const struct intel_shared_dpll *pll,
1967 const struct intel_dpll_hw_state *dpll_hw_state)
1968 {
1969 const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1970
1971 /*
1972 * ctrl1 register is already shifted for each pll, just use 0 to get
1973 * the internal shift for each field
1974 */
1975 if (hw_state->ctrl1 & DPLL_CTRL1_HDMI_MODE(0))
1976 return skl_ddi_wrpll_get_freq(display, pll, dpll_hw_state);
1977 else
1978 return skl_ddi_lcpll_get_freq(display, pll, dpll_hw_state);
1979 }
1980
skl_update_dpll_ref_clks(struct intel_display * display)1981 static void skl_update_dpll_ref_clks(struct intel_display *display)
1982 {
1983 /* No SSC ref */
1984 display->dpll.ref_clks.nssc = display->cdclk.hw.ref;
1985 }
1986
skl_dump_hw_state(struct drm_printer * p,const struct intel_dpll_hw_state * dpll_hw_state)1987 static void skl_dump_hw_state(struct drm_printer *p,
1988 const struct intel_dpll_hw_state *dpll_hw_state)
1989 {
1990 const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1991
1992 drm_printf(p, "dpll_hw_state: ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
1993 hw_state->ctrl1, hw_state->cfgcr1, hw_state->cfgcr2);
1994 }
1995
skl_compare_hw_state(const struct intel_dpll_hw_state * _a,const struct intel_dpll_hw_state * _b)1996 static bool skl_compare_hw_state(const struct intel_dpll_hw_state *_a,
1997 const struct intel_dpll_hw_state *_b)
1998 {
1999 const struct skl_dpll_hw_state *a = &_a->skl;
2000 const struct skl_dpll_hw_state *b = &_b->skl;
2001
2002 return a->ctrl1 == b->ctrl1 &&
2003 a->cfgcr1 == b->cfgcr1 &&
2004 a->cfgcr2 == b->cfgcr2;
2005 }
2006
2007 static const struct intel_shared_dpll_funcs skl_ddi_pll_funcs = {
2008 .enable = skl_ddi_pll_enable,
2009 .disable = skl_ddi_pll_disable,
2010 .get_hw_state = skl_ddi_pll_get_hw_state,
2011 .get_freq = skl_ddi_pll_get_freq,
2012 };
2013
2014 static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = {
2015 .enable = skl_ddi_dpll0_enable,
2016 .disable = skl_ddi_dpll0_disable,
2017 .get_hw_state = skl_ddi_dpll0_get_hw_state,
2018 .get_freq = skl_ddi_pll_get_freq,
2019 };
2020
2021 static const struct dpll_info skl_plls[] = {
2022 { .name = "DPLL 0", .funcs = &skl_ddi_dpll0_funcs, .id = DPLL_ID_SKL_DPLL0,
2023 .always_on = true, },
2024 { .name = "DPLL 1", .funcs = &skl_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL1, },
2025 { .name = "DPLL 2", .funcs = &skl_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL2, },
2026 { .name = "DPLL 3", .funcs = &skl_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL3, },
2027 {}
2028 };
2029
2030 static const struct intel_dpll_mgr skl_pll_mgr = {
2031 .dpll_info = skl_plls,
2032 .compute_dplls = skl_compute_dpll,
2033 .get_dplls = skl_get_dpll,
2034 .put_dplls = intel_put_dpll,
2035 .update_ref_clks = skl_update_dpll_ref_clks,
2036 .dump_hw_state = skl_dump_hw_state,
2037 .compare_hw_state = skl_compare_hw_state,
2038 };
2039
bxt_ddi_pll_enable(struct intel_display * display,struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * dpll_hw_state)2040 static void bxt_ddi_pll_enable(struct intel_display *display,
2041 struct intel_shared_dpll *pll,
2042 const struct intel_dpll_hw_state *dpll_hw_state)
2043 {
2044 const struct bxt_dpll_hw_state *hw_state = &dpll_hw_state->bxt;
2045 enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
2046 enum dpio_phy phy = DPIO_PHY0;
2047 enum dpio_channel ch = DPIO_CH0;
2048 u32 temp;
2049
2050 bxt_port_to_phy_channel(display, port, &phy, &ch);
2051
2052 /* Non-SSC reference */
2053 intel_de_rmw(display, BXT_PORT_PLL_ENABLE(port), 0, PORT_PLL_REF_SEL);
2054
2055 if (display->platform.geminilake) {
2056 intel_de_rmw(display, BXT_PORT_PLL_ENABLE(port),
2057 0, PORT_PLL_POWER_ENABLE);
2058
2059 if (wait_for_us((intel_de_read(display, BXT_PORT_PLL_ENABLE(port)) &
2060 PORT_PLL_POWER_STATE), 200))
2061 drm_err(display->drm,
2062 "Power state not set for PLL:%d\n", port);
2063 }
2064
2065 /* Disable 10 bit clock */
2066 intel_de_rmw(display, BXT_PORT_PLL_EBB_4(phy, ch),
2067 PORT_PLL_10BIT_CLK_ENABLE, 0);
2068
2069 /* Write P1 & P2 */
2070 intel_de_rmw(display, BXT_PORT_PLL_EBB_0(phy, ch),
2071 PORT_PLL_P1_MASK | PORT_PLL_P2_MASK, hw_state->ebb0);
2072
2073 /* Write M2 integer */
2074 intel_de_rmw(display, BXT_PORT_PLL(phy, ch, 0),
2075 PORT_PLL_M2_INT_MASK, hw_state->pll0);
2076
2077 /* Write N */
2078 intel_de_rmw(display, BXT_PORT_PLL(phy, ch, 1),
2079 PORT_PLL_N_MASK, hw_state->pll1);
2080
2081 /* Write M2 fraction */
2082 intel_de_rmw(display, BXT_PORT_PLL(phy, ch, 2),
2083 PORT_PLL_M2_FRAC_MASK, hw_state->pll2);
2084
2085 /* Write M2 fraction enable */
2086 intel_de_rmw(display, BXT_PORT_PLL(phy, ch, 3),
2087 PORT_PLL_M2_FRAC_ENABLE, hw_state->pll3);
2088
2089 /* Write coeff */
2090 temp = intel_de_read(display, BXT_PORT_PLL(phy, ch, 6));
2091 temp &= ~PORT_PLL_PROP_COEFF_MASK;
2092 temp &= ~PORT_PLL_INT_COEFF_MASK;
2093 temp &= ~PORT_PLL_GAIN_CTL_MASK;
2094 temp |= hw_state->pll6;
2095 intel_de_write(display, BXT_PORT_PLL(phy, ch, 6), temp);
2096
2097 /* Write calibration val */
2098 intel_de_rmw(display, BXT_PORT_PLL(phy, ch, 8),
2099 PORT_PLL_TARGET_CNT_MASK, hw_state->pll8);
2100
2101 intel_de_rmw(display, BXT_PORT_PLL(phy, ch, 9),
2102 PORT_PLL_LOCK_THRESHOLD_MASK, hw_state->pll9);
2103
2104 temp = intel_de_read(display, BXT_PORT_PLL(phy, ch, 10));
2105 temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
2106 temp &= ~PORT_PLL_DCO_AMP_MASK;
2107 temp |= hw_state->pll10;
2108 intel_de_write(display, BXT_PORT_PLL(phy, ch, 10), temp);
2109
2110 /* Recalibrate with new settings */
2111 temp = intel_de_read(display, BXT_PORT_PLL_EBB_4(phy, ch));
2112 temp |= PORT_PLL_RECALIBRATE;
2113 intel_de_write(display, BXT_PORT_PLL_EBB_4(phy, ch), temp);
2114 temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
2115 temp |= hw_state->ebb4;
2116 intel_de_write(display, BXT_PORT_PLL_EBB_4(phy, ch), temp);
2117
2118 /* Enable PLL */
2119 intel_de_rmw(display, BXT_PORT_PLL_ENABLE(port), 0, PORT_PLL_ENABLE);
2120 intel_de_posting_read(display, BXT_PORT_PLL_ENABLE(port));
2121
2122 if (wait_for_us((intel_de_read(display, BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK),
2123 200))
2124 drm_err(display->drm, "PLL %d not locked\n", port);
2125
2126 if (display->platform.geminilake) {
2127 temp = intel_de_read(display, BXT_PORT_TX_DW5_LN(phy, ch, 0));
2128 temp |= DCC_DELAY_RANGE_2;
2129 intel_de_write(display, BXT_PORT_TX_DW5_GRP(phy, ch), temp);
2130 }
2131
2132 /*
2133 * While we write to the group register to program all lanes at once we
2134 * can read only lane registers and we pick lanes 0/1 for that.
2135 */
2136 temp = intel_de_read(display, BXT_PORT_PCS_DW12_LN01(phy, ch));
2137 temp &= ~LANE_STAGGER_MASK;
2138 temp &= ~LANESTAGGER_STRAP_OVRD;
2139 temp |= hw_state->pcsdw12;
2140 intel_de_write(display, BXT_PORT_PCS_DW12_GRP(phy, ch), temp);
2141 }
2142
bxt_ddi_pll_disable(struct intel_display * display,struct intel_shared_dpll * pll)2143 static void bxt_ddi_pll_disable(struct intel_display *display,
2144 struct intel_shared_dpll *pll)
2145 {
2146 enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
2147
2148 intel_de_rmw(display, BXT_PORT_PLL_ENABLE(port), PORT_PLL_ENABLE, 0);
2149 intel_de_posting_read(display, BXT_PORT_PLL_ENABLE(port));
2150
2151 if (display->platform.geminilake) {
2152 intel_de_rmw(display, BXT_PORT_PLL_ENABLE(port),
2153 PORT_PLL_POWER_ENABLE, 0);
2154
2155 if (wait_for_us(!(intel_de_read(display, BXT_PORT_PLL_ENABLE(port)) &
2156 PORT_PLL_POWER_STATE), 200))
2157 drm_err(display->drm,
2158 "Power state not reset for PLL:%d\n", port);
2159 }
2160 }
2161
bxt_ddi_pll_get_hw_state(struct intel_display * display,struct intel_shared_dpll * pll,struct intel_dpll_hw_state * dpll_hw_state)2162 static bool bxt_ddi_pll_get_hw_state(struct intel_display *display,
2163 struct intel_shared_dpll *pll,
2164 struct intel_dpll_hw_state *dpll_hw_state)
2165 {
2166 struct bxt_dpll_hw_state *hw_state = &dpll_hw_state->bxt;
2167 enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
2168 intel_wakeref_t wakeref;
2169 enum dpio_phy phy;
2170 enum dpio_channel ch;
2171 u32 val;
2172 bool ret;
2173
2174 bxt_port_to_phy_channel(display, port, &phy, &ch);
2175
2176 wakeref = intel_display_power_get_if_enabled(display,
2177 POWER_DOMAIN_DISPLAY_CORE);
2178 if (!wakeref)
2179 return false;
2180
2181 ret = false;
2182
2183 val = intel_de_read(display, BXT_PORT_PLL_ENABLE(port));
2184 if (!(val & PORT_PLL_ENABLE))
2185 goto out;
2186
2187 hw_state->ebb0 = intel_de_read(display, BXT_PORT_PLL_EBB_0(phy, ch));
2188 hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
2189
2190 hw_state->ebb4 = intel_de_read(display, BXT_PORT_PLL_EBB_4(phy, ch));
2191 hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE;
2192
2193 hw_state->pll0 = intel_de_read(display, BXT_PORT_PLL(phy, ch, 0));
2194 hw_state->pll0 &= PORT_PLL_M2_INT_MASK;
2195
2196 hw_state->pll1 = intel_de_read(display, BXT_PORT_PLL(phy, ch, 1));
2197 hw_state->pll1 &= PORT_PLL_N_MASK;
2198
2199 hw_state->pll2 = intel_de_read(display, BXT_PORT_PLL(phy, ch, 2));
2200 hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK;
2201
2202 hw_state->pll3 = intel_de_read(display, BXT_PORT_PLL(phy, ch, 3));
2203 hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE;
2204
2205 hw_state->pll6 = intel_de_read(display, BXT_PORT_PLL(phy, ch, 6));
2206 hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK |
2207 PORT_PLL_INT_COEFF_MASK |
2208 PORT_PLL_GAIN_CTL_MASK;
2209
2210 hw_state->pll8 = intel_de_read(display, BXT_PORT_PLL(phy, ch, 8));
2211 hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK;
2212
2213 hw_state->pll9 = intel_de_read(display, BXT_PORT_PLL(phy, ch, 9));
2214 hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK;
2215
2216 hw_state->pll10 = intel_de_read(display, BXT_PORT_PLL(phy, ch, 10));
2217 hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H |
2218 PORT_PLL_DCO_AMP_MASK;
2219
2220 /*
2221 * While we write to the group register to program all lanes at once we
2222 * can read only lane registers. We configure all lanes the same way, so
2223 * here just read out lanes 0/1 and output a note if lanes 2/3 differ.
2224 */
2225 hw_state->pcsdw12 = intel_de_read(display,
2226 BXT_PORT_PCS_DW12_LN01(phy, ch));
2227 if (intel_de_read(display, BXT_PORT_PCS_DW12_LN23(phy, ch)) != hw_state->pcsdw12)
2228 drm_dbg(display->drm,
2229 "lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
2230 hw_state->pcsdw12,
2231 intel_de_read(display,
2232 BXT_PORT_PCS_DW12_LN23(phy, ch)));
2233 hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
2234
2235 ret = true;
2236
2237 out:
2238 intel_display_power_put(display, POWER_DOMAIN_DISPLAY_CORE, wakeref);
2239
2240 return ret;
2241 }
2242
2243 /* pre-calculated values for DP linkrates */
2244 static const struct dpll bxt_dp_clk_val[] = {
2245 /* m2 is .22 binary fixed point */
2246 { .dot = 162000, .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2247 { .dot = 270000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 /* 27.0 */ },
2248 { .dot = 540000, .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 /* 27.0 */ },
2249 { .dot = 216000, .p1 = 3, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2250 { .dot = 243000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6133333 /* 24.3 */ },
2251 { .dot = 324000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2252 { .dot = 432000, .p1 = 3, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2253 };
2254
2255 static int
bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state * crtc_state,struct dpll * clk_div)2256 bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state,
2257 struct dpll *clk_div)
2258 {
2259 struct intel_display *display = to_intel_display(crtc_state);
2260
2261 /* Calculate HDMI div */
2262 /*
2263 * FIXME: tie the following calculation into
2264 * i9xx_crtc_compute_clock
2265 */
2266 if (!bxt_find_best_dpll(crtc_state, clk_div))
2267 return -EINVAL;
2268
2269 drm_WARN_ON(display->drm, clk_div->m1 != 2);
2270
2271 return 0;
2272 }
2273
bxt_ddi_dp_pll_dividers(struct intel_crtc_state * crtc_state,struct dpll * clk_div)2274 static void bxt_ddi_dp_pll_dividers(struct intel_crtc_state *crtc_state,
2275 struct dpll *clk_div)
2276 {
2277 struct intel_display *display = to_intel_display(crtc_state);
2278 int i;
2279
2280 *clk_div = bxt_dp_clk_val[0];
2281 for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) {
2282 if (crtc_state->port_clock == bxt_dp_clk_val[i].dot) {
2283 *clk_div = bxt_dp_clk_val[i];
2284 break;
2285 }
2286 }
2287
2288 chv_calc_dpll_params(display->dpll.ref_clks.nssc, clk_div);
2289
2290 drm_WARN_ON(display->drm, clk_div->vco == 0 ||
2291 clk_div->dot != crtc_state->port_clock);
2292 }
2293
bxt_ddi_set_dpll_hw_state(struct intel_crtc_state * crtc_state,const struct dpll * clk_div)2294 static int bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
2295 const struct dpll *clk_div)
2296 {
2297 struct intel_display *display = to_intel_display(crtc_state);
2298 struct bxt_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.bxt;
2299 int clock = crtc_state->port_clock;
2300 int vco = clk_div->vco;
2301 u32 prop_coef, int_coef, gain_ctl, targ_cnt;
2302 u32 lanestagger;
2303
2304 if (vco >= 6200000 && vco <= 6700000) {
2305 prop_coef = 4;
2306 int_coef = 9;
2307 gain_ctl = 3;
2308 targ_cnt = 8;
2309 } else if ((vco > 5400000 && vco < 6200000) ||
2310 (vco >= 4800000 && vco < 5400000)) {
2311 prop_coef = 5;
2312 int_coef = 11;
2313 gain_ctl = 3;
2314 targ_cnt = 9;
2315 } else if (vco == 5400000) {
2316 prop_coef = 3;
2317 int_coef = 8;
2318 gain_ctl = 1;
2319 targ_cnt = 9;
2320 } else {
2321 drm_err(display->drm, "Invalid VCO\n");
2322 return -EINVAL;
2323 }
2324
2325 if (clock > 270000)
2326 lanestagger = 0x18;
2327 else if (clock > 135000)
2328 lanestagger = 0x0d;
2329 else if (clock > 67000)
2330 lanestagger = 0x07;
2331 else if (clock > 33000)
2332 lanestagger = 0x04;
2333 else
2334 lanestagger = 0x02;
2335
2336 hw_state->ebb0 = PORT_PLL_P1(clk_div->p1) | PORT_PLL_P2(clk_div->p2);
2337 hw_state->pll0 = PORT_PLL_M2_INT(clk_div->m2 >> 22);
2338 hw_state->pll1 = PORT_PLL_N(clk_div->n);
2339 hw_state->pll2 = PORT_PLL_M2_FRAC(clk_div->m2 & 0x3fffff);
2340
2341 if (clk_div->m2 & 0x3fffff)
2342 hw_state->pll3 = PORT_PLL_M2_FRAC_ENABLE;
2343
2344 hw_state->pll6 = PORT_PLL_PROP_COEFF(prop_coef) |
2345 PORT_PLL_INT_COEFF(int_coef) |
2346 PORT_PLL_GAIN_CTL(gain_ctl);
2347
2348 hw_state->pll8 = PORT_PLL_TARGET_CNT(targ_cnt);
2349
2350 hw_state->pll9 = PORT_PLL_LOCK_THRESHOLD(5);
2351
2352 hw_state->pll10 = PORT_PLL_DCO_AMP(15) |
2353 PORT_PLL_DCO_AMP_OVR_EN_H;
2354
2355 hw_state->ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
2356
2357 hw_state->pcsdw12 = LANESTAGGER_STRAP_OVRD | lanestagger;
2358
2359 return 0;
2360 }
2361
bxt_ddi_pll_get_freq(struct intel_display * display,const struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * dpll_hw_state)2362 static int bxt_ddi_pll_get_freq(struct intel_display *display,
2363 const struct intel_shared_dpll *pll,
2364 const struct intel_dpll_hw_state *dpll_hw_state)
2365 {
2366 const struct bxt_dpll_hw_state *hw_state = &dpll_hw_state->bxt;
2367 struct dpll clock;
2368
2369 clock.m1 = 2;
2370 clock.m2 = REG_FIELD_GET(PORT_PLL_M2_INT_MASK, hw_state->pll0) << 22;
2371 if (hw_state->pll3 & PORT_PLL_M2_FRAC_ENABLE)
2372 clock.m2 |= REG_FIELD_GET(PORT_PLL_M2_FRAC_MASK,
2373 hw_state->pll2);
2374 clock.n = REG_FIELD_GET(PORT_PLL_N_MASK, hw_state->pll1);
2375 clock.p1 = REG_FIELD_GET(PORT_PLL_P1_MASK, hw_state->ebb0);
2376 clock.p2 = REG_FIELD_GET(PORT_PLL_P2_MASK, hw_state->ebb0);
2377
2378 return chv_calc_dpll_params(display->dpll.ref_clks.nssc, &clock);
2379 }
2380
2381 static int
bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state * crtc_state)2382 bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2383 {
2384 struct dpll clk_div = {};
2385
2386 bxt_ddi_dp_pll_dividers(crtc_state, &clk_div);
2387
2388 return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2389 }
2390
2391 static int
bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state * crtc_state)2392 bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2393 {
2394 struct intel_display *display = to_intel_display(crtc_state);
2395 struct dpll clk_div = {};
2396 int ret;
2397
2398 bxt_ddi_hdmi_pll_dividers(crtc_state, &clk_div);
2399
2400 ret = bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2401 if (ret)
2402 return ret;
2403
2404 crtc_state->port_clock = bxt_ddi_pll_get_freq(display, NULL,
2405 &crtc_state->dpll_hw_state);
2406
2407 return 0;
2408 }
2409
bxt_compute_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)2410 static int bxt_compute_dpll(struct intel_atomic_state *state,
2411 struct intel_crtc *crtc,
2412 struct intel_encoder *encoder)
2413 {
2414 struct intel_crtc_state *crtc_state =
2415 intel_atomic_get_new_crtc_state(state, crtc);
2416
2417 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
2418 return bxt_ddi_hdmi_set_dpll_hw_state(crtc_state);
2419 else if (intel_crtc_has_dp_encoder(crtc_state))
2420 return bxt_ddi_dp_set_dpll_hw_state(crtc_state);
2421 else
2422 return -EINVAL;
2423 }
2424
bxt_get_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)2425 static int bxt_get_dpll(struct intel_atomic_state *state,
2426 struct intel_crtc *crtc,
2427 struct intel_encoder *encoder)
2428 {
2429 struct intel_display *display = to_intel_display(state);
2430 struct intel_crtc_state *crtc_state =
2431 intel_atomic_get_new_crtc_state(state, crtc);
2432 struct intel_shared_dpll *pll;
2433 enum intel_dpll_id id;
2434
2435 /* 1:1 mapping between ports and PLLs */
2436 id = (enum intel_dpll_id) encoder->port;
2437 pll = intel_get_shared_dpll_by_id(display, id);
2438
2439 drm_dbg_kms(display->drm, "[CRTC:%d:%s] using pre-allocated %s\n",
2440 crtc->base.base.id, crtc->base.name, pll->info->name);
2441
2442 intel_reference_shared_dpll(state, crtc,
2443 pll, &crtc_state->dpll_hw_state);
2444
2445 crtc_state->shared_dpll = pll;
2446
2447 return 0;
2448 }
2449
bxt_update_dpll_ref_clks(struct intel_display * display)2450 static void bxt_update_dpll_ref_clks(struct intel_display *display)
2451 {
2452 display->dpll.ref_clks.ssc = 100000;
2453 display->dpll.ref_clks.nssc = 100000;
2454 /* DSI non-SSC ref 19.2MHz */
2455 }
2456
bxt_dump_hw_state(struct drm_printer * p,const struct intel_dpll_hw_state * dpll_hw_state)2457 static void bxt_dump_hw_state(struct drm_printer *p,
2458 const struct intel_dpll_hw_state *dpll_hw_state)
2459 {
2460 const struct bxt_dpll_hw_state *hw_state = &dpll_hw_state->bxt;
2461
2462 drm_printf(p, "dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
2463 "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
2464 "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
2465 hw_state->ebb0, hw_state->ebb4,
2466 hw_state->pll0, hw_state->pll1, hw_state->pll2, hw_state->pll3,
2467 hw_state->pll6, hw_state->pll8, hw_state->pll9, hw_state->pll10,
2468 hw_state->pcsdw12);
2469 }
2470
bxt_compare_hw_state(const struct intel_dpll_hw_state * _a,const struct intel_dpll_hw_state * _b)2471 static bool bxt_compare_hw_state(const struct intel_dpll_hw_state *_a,
2472 const struct intel_dpll_hw_state *_b)
2473 {
2474 const struct bxt_dpll_hw_state *a = &_a->bxt;
2475 const struct bxt_dpll_hw_state *b = &_b->bxt;
2476
2477 return a->ebb0 == b->ebb0 &&
2478 a->ebb4 == b->ebb4 &&
2479 a->pll0 == b->pll0 &&
2480 a->pll1 == b->pll1 &&
2481 a->pll2 == b->pll2 &&
2482 a->pll3 == b->pll3 &&
2483 a->pll6 == b->pll6 &&
2484 a->pll8 == b->pll8 &&
2485 a->pll10 == b->pll10 &&
2486 a->pcsdw12 == b->pcsdw12;
2487 }
2488
2489 static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
2490 .enable = bxt_ddi_pll_enable,
2491 .disable = bxt_ddi_pll_disable,
2492 .get_hw_state = bxt_ddi_pll_get_hw_state,
2493 .get_freq = bxt_ddi_pll_get_freq,
2494 };
2495
2496 static const struct dpll_info bxt_plls[] = {
2497 { .name = "PORT PLL A", .funcs = &bxt_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL0, },
2498 { .name = "PORT PLL B", .funcs = &bxt_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL1, },
2499 { .name = "PORT PLL C", .funcs = &bxt_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL2, },
2500 {}
2501 };
2502
2503 static const struct intel_dpll_mgr bxt_pll_mgr = {
2504 .dpll_info = bxt_plls,
2505 .compute_dplls = bxt_compute_dpll,
2506 .get_dplls = bxt_get_dpll,
2507 .put_dplls = intel_put_dpll,
2508 .update_ref_clks = bxt_update_dpll_ref_clks,
2509 .dump_hw_state = bxt_dump_hw_state,
2510 .compare_hw_state = bxt_compare_hw_state,
2511 };
2512
icl_wrpll_get_multipliers(int bestdiv,int * pdiv,int * qdiv,int * kdiv)2513 static void icl_wrpll_get_multipliers(int bestdiv, int *pdiv,
2514 int *qdiv, int *kdiv)
2515 {
2516 /* even dividers */
2517 if (bestdiv % 2 == 0) {
2518 if (bestdiv == 2) {
2519 *pdiv = 2;
2520 *qdiv = 1;
2521 *kdiv = 1;
2522 } else if (bestdiv % 4 == 0) {
2523 *pdiv = 2;
2524 *qdiv = bestdiv / 4;
2525 *kdiv = 2;
2526 } else if (bestdiv % 6 == 0) {
2527 *pdiv = 3;
2528 *qdiv = bestdiv / 6;
2529 *kdiv = 2;
2530 } else if (bestdiv % 5 == 0) {
2531 *pdiv = 5;
2532 *qdiv = bestdiv / 10;
2533 *kdiv = 2;
2534 } else if (bestdiv % 14 == 0) {
2535 *pdiv = 7;
2536 *qdiv = bestdiv / 14;
2537 *kdiv = 2;
2538 }
2539 } else {
2540 if (bestdiv == 3 || bestdiv == 5 || bestdiv == 7) {
2541 *pdiv = bestdiv;
2542 *qdiv = 1;
2543 *kdiv = 1;
2544 } else { /* 9, 15, 21 */
2545 *pdiv = bestdiv / 3;
2546 *qdiv = 1;
2547 *kdiv = 3;
2548 }
2549 }
2550 }
2551
icl_wrpll_params_populate(struct skl_wrpll_params * params,u32 dco_freq,u32 ref_freq,int pdiv,int qdiv,int kdiv)2552 static void icl_wrpll_params_populate(struct skl_wrpll_params *params,
2553 u32 dco_freq, u32 ref_freq,
2554 int pdiv, int qdiv, int kdiv)
2555 {
2556 u32 dco;
2557
2558 switch (kdiv) {
2559 case 1:
2560 params->kdiv = 1;
2561 break;
2562 case 2:
2563 params->kdiv = 2;
2564 break;
2565 case 3:
2566 params->kdiv = 4;
2567 break;
2568 default:
2569 WARN(1, "Incorrect KDiv\n");
2570 }
2571
2572 switch (pdiv) {
2573 case 2:
2574 params->pdiv = 1;
2575 break;
2576 case 3:
2577 params->pdiv = 2;
2578 break;
2579 case 5:
2580 params->pdiv = 4;
2581 break;
2582 case 7:
2583 params->pdiv = 8;
2584 break;
2585 default:
2586 WARN(1, "Incorrect PDiv\n");
2587 }
2588
2589 WARN_ON(kdiv != 2 && qdiv != 1);
2590
2591 params->qdiv_ratio = qdiv;
2592 params->qdiv_mode = (qdiv == 1) ? 0 : 1;
2593
2594 dco = div_u64((u64)dco_freq << 15, ref_freq);
2595
2596 params->dco_integer = dco >> 15;
2597 params->dco_fraction = dco & 0x7fff;
2598 }
2599
2600 /*
2601 * Display WA #22010492432: ehl, tgl, adl-s, adl-p
2602 * Program half of the nominal DCO divider fraction value.
2603 */
2604 static bool
ehl_combo_pll_div_frac_wa_needed(struct intel_display * display)2605 ehl_combo_pll_div_frac_wa_needed(struct intel_display *display)
2606 {
2607 return ((display->platform.elkhartlake &&
2608 IS_DISPLAY_STEP(display, STEP_B0, STEP_FOREVER)) ||
2609 display->platform.tigerlake ||
2610 display->platform.alderlake_s ||
2611 display->platform.alderlake_p) &&
2612 display->dpll.ref_clks.nssc == 38400;
2613 }
2614
2615 struct icl_combo_pll_params {
2616 int clock;
2617 struct skl_wrpll_params wrpll;
2618 };
2619
2620 /*
2621 * These values alrea already adjusted: they're the bits we write to the
2622 * registers, not the logical values.
2623 */
2624 static const struct icl_combo_pll_params icl_dp_combo_pll_24MHz_values[] = {
2625 { 540000,
2626 { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [0]: 5.4 */
2627 .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2628 { 270000,
2629 { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [1]: 2.7 */
2630 .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2631 { 162000,
2632 { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [2]: 1.62 */
2633 .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2634 { 324000,
2635 { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [3]: 3.24 */
2636 .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2637 { 216000,
2638 { .dco_integer = 0x168, .dco_fraction = 0x0000, /* [4]: 2.16 */
2639 .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2640 { 432000,
2641 { .dco_integer = 0x168, .dco_fraction = 0x0000, /* [5]: 4.32 */
2642 .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2643 { 648000,
2644 { .dco_integer = 0x195, .dco_fraction = 0x0000, /* [6]: 6.48 */
2645 .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2646 { 810000,
2647 { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [7]: 8.1 */
2648 .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2649 };
2650
2651
2652 /* Also used for 38.4 MHz values. */
2653 static const struct icl_combo_pll_params icl_dp_combo_pll_19_2MHz_values[] = {
2654 { 540000,
2655 { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [0]: 5.4 */
2656 .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2657 { 270000,
2658 { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [1]: 2.7 */
2659 .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2660 { 162000,
2661 { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [2]: 1.62 */
2662 .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2663 { 324000,
2664 { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [3]: 3.24 */
2665 .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2666 { 216000,
2667 { .dco_integer = 0x1C2, .dco_fraction = 0x0000, /* [4]: 2.16 */
2668 .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2669 { 432000,
2670 { .dco_integer = 0x1C2, .dco_fraction = 0x0000, /* [5]: 4.32 */
2671 .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2672 { 648000,
2673 { .dco_integer = 0x1FA, .dco_fraction = 0x2000, /* [6]: 6.48 */
2674 .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2675 { 810000,
2676 { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [7]: 8.1 */
2677 .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2678 };
2679
2680 static const struct skl_wrpll_params icl_tbt_pll_24MHz_values = {
2681 .dco_integer = 0x151, .dco_fraction = 0x4000,
2682 .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2683 };
2684
2685 static const struct skl_wrpll_params icl_tbt_pll_19_2MHz_values = {
2686 .dco_integer = 0x1A5, .dco_fraction = 0x7000,
2687 .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2688 };
2689
2690 static const struct skl_wrpll_params tgl_tbt_pll_19_2MHz_values = {
2691 .dco_integer = 0x54, .dco_fraction = 0x3000,
2692 /* the following params are unused */
2693 .pdiv = 0, .kdiv = 0, .qdiv_mode = 0, .qdiv_ratio = 0,
2694 };
2695
2696 static const struct skl_wrpll_params tgl_tbt_pll_24MHz_values = {
2697 .dco_integer = 0x43, .dco_fraction = 0x4000,
2698 /* the following params are unused */
2699 };
2700
icl_calc_dp_combo_pll(struct intel_crtc_state * crtc_state,struct skl_wrpll_params * pll_params)2701 static int icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
2702 struct skl_wrpll_params *pll_params)
2703 {
2704 struct intel_display *display = to_intel_display(crtc_state);
2705 const struct icl_combo_pll_params *params =
2706 display->dpll.ref_clks.nssc == 24000 ?
2707 icl_dp_combo_pll_24MHz_values :
2708 icl_dp_combo_pll_19_2MHz_values;
2709 int clock = crtc_state->port_clock;
2710 int i;
2711
2712 for (i = 0; i < ARRAY_SIZE(icl_dp_combo_pll_24MHz_values); i++) {
2713 if (clock == params[i].clock) {
2714 *pll_params = params[i].wrpll;
2715 return 0;
2716 }
2717 }
2718
2719 MISSING_CASE(clock);
2720 return -EINVAL;
2721 }
2722
icl_calc_tbt_pll(struct intel_crtc_state * crtc_state,struct skl_wrpll_params * pll_params)2723 static int icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
2724 struct skl_wrpll_params *pll_params)
2725 {
2726 struct intel_display *display = to_intel_display(crtc_state);
2727
2728 if (DISPLAY_VER(display) >= 12) {
2729 switch (display->dpll.ref_clks.nssc) {
2730 default:
2731 MISSING_CASE(display->dpll.ref_clks.nssc);
2732 fallthrough;
2733 case 19200:
2734 case 38400:
2735 *pll_params = tgl_tbt_pll_19_2MHz_values;
2736 break;
2737 case 24000:
2738 *pll_params = tgl_tbt_pll_24MHz_values;
2739 break;
2740 }
2741 } else {
2742 switch (display->dpll.ref_clks.nssc) {
2743 default:
2744 MISSING_CASE(display->dpll.ref_clks.nssc);
2745 fallthrough;
2746 case 19200:
2747 case 38400:
2748 *pll_params = icl_tbt_pll_19_2MHz_values;
2749 break;
2750 case 24000:
2751 *pll_params = icl_tbt_pll_24MHz_values;
2752 break;
2753 }
2754 }
2755
2756 return 0;
2757 }
2758
icl_ddi_tbt_pll_get_freq(struct intel_display * display,const struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * dpll_hw_state)2759 static int icl_ddi_tbt_pll_get_freq(struct intel_display *display,
2760 const struct intel_shared_dpll *pll,
2761 const struct intel_dpll_hw_state *dpll_hw_state)
2762 {
2763 /*
2764 * The PLL outputs multiple frequencies at the same time, selection is
2765 * made at DDI clock mux level.
2766 */
2767 drm_WARN_ON(display->drm, 1);
2768
2769 return 0;
2770 }
2771
icl_wrpll_ref_clock(struct intel_display * display)2772 static int icl_wrpll_ref_clock(struct intel_display *display)
2773 {
2774 int ref_clock = display->dpll.ref_clks.nssc;
2775
2776 /*
2777 * For ICL+, the spec states: if reference frequency is 38.4,
2778 * use 19.2 because the DPLL automatically divides that by 2.
2779 */
2780 if (ref_clock == 38400)
2781 ref_clock = 19200;
2782
2783 return ref_clock;
2784 }
2785
2786 static int
icl_calc_wrpll(struct intel_crtc_state * crtc_state,struct skl_wrpll_params * wrpll_params)2787 icl_calc_wrpll(struct intel_crtc_state *crtc_state,
2788 struct skl_wrpll_params *wrpll_params)
2789 {
2790 struct intel_display *display = to_intel_display(crtc_state);
2791 int ref_clock = icl_wrpll_ref_clock(display);
2792 u32 afe_clock = crtc_state->port_clock * 5;
2793 u32 dco_min = 7998000;
2794 u32 dco_max = 10000000;
2795 u32 dco_mid = (dco_min + dco_max) / 2;
2796 static const int dividers[] = { 2, 4, 6, 8, 10, 12, 14, 16,
2797 18, 20, 24, 28, 30, 32, 36, 40,
2798 42, 44, 48, 50, 52, 54, 56, 60,
2799 64, 66, 68, 70, 72, 76, 78, 80,
2800 84, 88, 90, 92, 96, 98, 100, 102,
2801 3, 5, 7, 9, 15, 21 };
2802 u32 dco, best_dco = 0, dco_centrality = 0;
2803 u32 best_dco_centrality = U32_MAX; /* Spec meaning of 999999 MHz */
2804 int d, best_div = 0, pdiv = 0, qdiv = 0, kdiv = 0;
2805
2806 for (d = 0; d < ARRAY_SIZE(dividers); d++) {
2807 dco = afe_clock * dividers[d];
2808
2809 if (dco <= dco_max && dco >= dco_min) {
2810 dco_centrality = abs(dco - dco_mid);
2811
2812 if (dco_centrality < best_dco_centrality) {
2813 best_dco_centrality = dco_centrality;
2814 best_div = dividers[d];
2815 best_dco = dco;
2816 }
2817 }
2818 }
2819
2820 if (best_div == 0)
2821 return -EINVAL;
2822
2823 icl_wrpll_get_multipliers(best_div, &pdiv, &qdiv, &kdiv);
2824 icl_wrpll_params_populate(wrpll_params, best_dco, ref_clock,
2825 pdiv, qdiv, kdiv);
2826
2827 return 0;
2828 }
2829
icl_ddi_combo_pll_get_freq(struct intel_display * display,const struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * dpll_hw_state)2830 static int icl_ddi_combo_pll_get_freq(struct intel_display *display,
2831 const struct intel_shared_dpll *pll,
2832 const struct intel_dpll_hw_state *dpll_hw_state)
2833 {
2834 const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
2835 int ref_clock = icl_wrpll_ref_clock(display);
2836 u32 dco_fraction;
2837 u32 p0, p1, p2, dco_freq;
2838
2839 p0 = hw_state->cfgcr1 & DPLL_CFGCR1_PDIV_MASK;
2840 p2 = hw_state->cfgcr1 & DPLL_CFGCR1_KDIV_MASK;
2841
2842 if (hw_state->cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1))
2843 p1 = (hw_state->cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >>
2844 DPLL_CFGCR1_QDIV_RATIO_SHIFT;
2845 else
2846 p1 = 1;
2847
2848 switch (p0) {
2849 case DPLL_CFGCR1_PDIV_2:
2850 p0 = 2;
2851 break;
2852 case DPLL_CFGCR1_PDIV_3:
2853 p0 = 3;
2854 break;
2855 case DPLL_CFGCR1_PDIV_5:
2856 p0 = 5;
2857 break;
2858 case DPLL_CFGCR1_PDIV_7:
2859 p0 = 7;
2860 break;
2861 }
2862
2863 switch (p2) {
2864 case DPLL_CFGCR1_KDIV_1:
2865 p2 = 1;
2866 break;
2867 case DPLL_CFGCR1_KDIV_2:
2868 p2 = 2;
2869 break;
2870 case DPLL_CFGCR1_KDIV_3:
2871 p2 = 3;
2872 break;
2873 }
2874
2875 dco_freq = (hw_state->cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) *
2876 ref_clock;
2877
2878 dco_fraction = (hw_state->cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
2879 DPLL_CFGCR0_DCO_FRACTION_SHIFT;
2880
2881 if (ehl_combo_pll_div_frac_wa_needed(display))
2882 dco_fraction *= 2;
2883
2884 dco_freq += (dco_fraction * ref_clock) / 0x8000;
2885
2886 if (drm_WARN_ON(display->drm, p0 == 0 || p1 == 0 || p2 == 0))
2887 return 0;
2888
2889 return dco_freq / (p0 * p1 * p2 * 5);
2890 }
2891
icl_calc_dpll_state(struct intel_display * display,const struct skl_wrpll_params * pll_params,struct intel_dpll_hw_state * dpll_hw_state)2892 static void icl_calc_dpll_state(struct intel_display *display,
2893 const struct skl_wrpll_params *pll_params,
2894 struct intel_dpll_hw_state *dpll_hw_state)
2895 {
2896 struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
2897 u32 dco_fraction = pll_params->dco_fraction;
2898
2899 if (ehl_combo_pll_div_frac_wa_needed(display))
2900 dco_fraction = DIV_ROUND_CLOSEST(dco_fraction, 2);
2901
2902 hw_state->cfgcr0 = DPLL_CFGCR0_DCO_FRACTION(dco_fraction) |
2903 pll_params->dco_integer;
2904
2905 hw_state->cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(pll_params->qdiv_ratio) |
2906 DPLL_CFGCR1_QDIV_MODE(pll_params->qdiv_mode) |
2907 DPLL_CFGCR1_KDIV(pll_params->kdiv) |
2908 DPLL_CFGCR1_PDIV(pll_params->pdiv);
2909
2910 if (DISPLAY_VER(display) >= 12)
2911 hw_state->cfgcr1 |= TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL;
2912 else
2913 hw_state->cfgcr1 |= DPLL_CFGCR1_CENTRAL_FREQ_8400;
2914
2915 if (display->vbt.override_afc_startup)
2916 hw_state->div0 = TGL_DPLL0_DIV0_AFC_STARTUP(display->vbt.override_afc_startup_val);
2917 }
2918
icl_mg_pll_find_divisors(int clock_khz,bool is_dp,bool use_ssc,u32 * target_dco_khz,struct icl_dpll_hw_state * hw_state,bool is_dkl)2919 static int icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
2920 u32 *target_dco_khz,
2921 struct icl_dpll_hw_state *hw_state,
2922 bool is_dkl)
2923 {
2924 static const u8 div1_vals[] = { 7, 5, 3, 2 };
2925 u32 dco_min_freq, dco_max_freq;
2926 unsigned int i;
2927 int div2;
2928
2929 dco_min_freq = is_dp ? 8100000 : use_ssc ? 8000000 : 7992000;
2930 dco_max_freq = is_dp ? 8100000 : 10000000;
2931
2932 for (i = 0; i < ARRAY_SIZE(div1_vals); i++) {
2933 int div1 = div1_vals[i];
2934
2935 for (div2 = 10; div2 > 0; div2--) {
2936 int dco = div1 * div2 * clock_khz * 5;
2937 int a_divratio, tlinedrv, inputsel;
2938 u32 hsdiv;
2939
2940 if (dco < dco_min_freq || dco > dco_max_freq)
2941 continue;
2942
2943 if (div2 >= 2) {
2944 /*
2945 * Note: a_divratio not matching TGL BSpec
2946 * algorithm but matching hardcoded values and
2947 * working on HW for DP alt-mode at least
2948 */
2949 a_divratio = is_dp ? 10 : 5;
2950 tlinedrv = is_dkl ? 1 : 2;
2951 } else {
2952 a_divratio = 5;
2953 tlinedrv = 0;
2954 }
2955 inputsel = is_dp ? 0 : 1;
2956
2957 switch (div1) {
2958 default:
2959 MISSING_CASE(div1);
2960 fallthrough;
2961 case 2:
2962 hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2;
2963 break;
2964 case 3:
2965 hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3;
2966 break;
2967 case 5:
2968 hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5;
2969 break;
2970 case 7:
2971 hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7;
2972 break;
2973 }
2974
2975 *target_dco_khz = dco;
2976
2977 hw_state->mg_refclkin_ctl = MG_REFCLKIN_CTL_OD_2_MUX(1);
2978
2979 hw_state->mg_clktop2_coreclkctl1 =
2980 MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO(a_divratio);
2981
2982 hw_state->mg_clktop2_hsclkctl =
2983 MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(tlinedrv) |
2984 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL(inputsel) |
2985 hsdiv |
2986 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(div2);
2987
2988 return 0;
2989 }
2990 }
2991
2992 return -EINVAL;
2993 }
2994
2995 /*
2996 * The specification for this function uses real numbers, so the math had to be
2997 * adapted to integer-only calculation, that's why it looks so different.
2998 */
icl_calc_mg_pll_state(struct intel_crtc_state * crtc_state,struct intel_dpll_hw_state * dpll_hw_state)2999 static int icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
3000 struct intel_dpll_hw_state *dpll_hw_state)
3001 {
3002 struct intel_display *display = to_intel_display(crtc_state);
3003 struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
3004 int refclk_khz = display->dpll.ref_clks.nssc;
3005 int clock = crtc_state->port_clock;
3006 u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
3007 u32 iref_ndiv, iref_trim, iref_pulse_w;
3008 u32 prop_coeff, int_coeff;
3009 u32 tdc_targetcnt, feedfwgain;
3010 u64 ssc_stepsize, ssc_steplen, ssc_steplog;
3011 u64 tmp;
3012 bool use_ssc = false;
3013 bool is_dp = !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI);
3014 bool is_dkl = DISPLAY_VER(display) >= 12;
3015 int ret;
3016
3017 ret = icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz,
3018 hw_state, is_dkl);
3019 if (ret)
3020 return ret;
3021
3022 m1div = 2;
3023 m2div_int = dco_khz / (refclk_khz * m1div);
3024 if (m2div_int > 255) {
3025 if (!is_dkl) {
3026 m1div = 4;
3027 m2div_int = dco_khz / (refclk_khz * m1div);
3028 }
3029
3030 if (m2div_int > 255)
3031 return -EINVAL;
3032 }
3033 m2div_rem = dco_khz % (refclk_khz * m1div);
3034
3035 tmp = (u64)m2div_rem * (1 << 22);
3036 do_div(tmp, refclk_khz * m1div);
3037 m2div_frac = tmp;
3038
3039 switch (refclk_khz) {
3040 case 19200:
3041 iref_ndiv = 1;
3042 iref_trim = 28;
3043 iref_pulse_w = 1;
3044 break;
3045 case 24000:
3046 iref_ndiv = 1;
3047 iref_trim = 25;
3048 iref_pulse_w = 2;
3049 break;
3050 case 38400:
3051 iref_ndiv = 2;
3052 iref_trim = 28;
3053 iref_pulse_w = 1;
3054 break;
3055 default:
3056 MISSING_CASE(refclk_khz);
3057 return -EINVAL;
3058 }
3059
3060 /*
3061 * tdc_res = 0.000003
3062 * tdc_targetcnt = int(2 / (tdc_res * 8 * 50 * 1.1) / refclk_mhz + 0.5)
3063 *
3064 * The multiplication by 1000 is due to refclk MHz to KHz conversion. It
3065 * was supposed to be a division, but we rearranged the operations of
3066 * the formula to avoid early divisions so we don't multiply the
3067 * rounding errors.
3068 *
3069 * 0.000003 * 8 * 50 * 1.1 = 0.00132, also known as 132 / 100000, which
3070 * we also rearrange to work with integers.
3071 *
3072 * The 0.5 transformed to 5 results in a multiplication by 10 and the
3073 * last division by 10.
3074 */
3075 tdc_targetcnt = (2 * 1000 * 100000 * 10 / (132 * refclk_khz) + 5) / 10;
3076
3077 /*
3078 * Here we divide dco_khz by 10 in order to allow the dividend to fit in
3079 * 32 bits. That's not a problem since we round the division down
3080 * anyway.
3081 */
3082 feedfwgain = (use_ssc || m2div_rem > 0) ?
3083 m1div * 1000000 * 100 / (dco_khz * 3 / 10) : 0;
3084
3085 if (dco_khz >= 9000000) {
3086 prop_coeff = 5;
3087 int_coeff = 10;
3088 } else {
3089 prop_coeff = 4;
3090 int_coeff = 8;
3091 }
3092
3093 if (use_ssc) {
3094 tmp = mul_u32_u32(dco_khz, 47 * 32);
3095 do_div(tmp, refclk_khz * m1div * 10000);
3096 ssc_stepsize = tmp;
3097
3098 tmp = mul_u32_u32(dco_khz, 1000);
3099 ssc_steplen = DIV_ROUND_UP_ULL(tmp, 32 * 2 * 32);
3100 } else {
3101 ssc_stepsize = 0;
3102 ssc_steplen = 0;
3103 }
3104 ssc_steplog = 4;
3105
3106 /* write pll_state calculations */
3107 if (is_dkl) {
3108 hw_state->mg_pll_div0 = DKL_PLL_DIV0_INTEG_COEFF(int_coeff) |
3109 DKL_PLL_DIV0_PROP_COEFF(prop_coeff) |
3110 DKL_PLL_DIV0_FBPREDIV(m1div) |
3111 DKL_PLL_DIV0_FBDIV_INT(m2div_int);
3112 if (display->vbt.override_afc_startup) {
3113 u8 val = display->vbt.override_afc_startup_val;
3114
3115 hw_state->mg_pll_div0 |= DKL_PLL_DIV0_AFC_STARTUP(val);
3116 }
3117
3118 hw_state->mg_pll_div1 = DKL_PLL_DIV1_IREF_TRIM(iref_trim) |
3119 DKL_PLL_DIV1_TDC_TARGET_CNT(tdc_targetcnt);
3120
3121 hw_state->mg_pll_ssc = DKL_PLL_SSC_IREF_NDIV_RATIO(iref_ndiv) |
3122 DKL_PLL_SSC_STEP_LEN(ssc_steplen) |
3123 DKL_PLL_SSC_STEP_NUM(ssc_steplog) |
3124 (use_ssc ? DKL_PLL_SSC_EN : 0);
3125
3126 hw_state->mg_pll_bias = (m2div_frac ? DKL_PLL_BIAS_FRAC_EN_H : 0) |
3127 DKL_PLL_BIAS_FBDIV_FRAC(m2div_frac);
3128
3129 hw_state->mg_pll_tdc_coldst_bias =
3130 DKL_PLL_TDC_SSC_STEP_SIZE(ssc_stepsize) |
3131 DKL_PLL_TDC_FEED_FWD_GAIN(feedfwgain);
3132
3133 } else {
3134 hw_state->mg_pll_div0 =
3135 (m2div_rem > 0 ? MG_PLL_DIV0_FRACNEN_H : 0) |
3136 MG_PLL_DIV0_FBDIV_FRAC(m2div_frac) |
3137 MG_PLL_DIV0_FBDIV_INT(m2div_int);
3138
3139 hw_state->mg_pll_div1 =
3140 MG_PLL_DIV1_IREF_NDIVRATIO(iref_ndiv) |
3141 MG_PLL_DIV1_DITHER_DIV_2 |
3142 MG_PLL_DIV1_NDIVRATIO(1) |
3143 MG_PLL_DIV1_FBPREDIV(m1div);
3144
3145 hw_state->mg_pll_lf =
3146 MG_PLL_LF_TDCTARGETCNT(tdc_targetcnt) |
3147 MG_PLL_LF_AFCCNTSEL_512 |
3148 MG_PLL_LF_GAINCTRL(1) |
3149 MG_PLL_LF_INT_COEFF(int_coeff) |
3150 MG_PLL_LF_PROP_COEFF(prop_coeff);
3151
3152 hw_state->mg_pll_frac_lock =
3153 MG_PLL_FRAC_LOCK_TRUELOCK_CRIT_32 |
3154 MG_PLL_FRAC_LOCK_EARLYLOCK_CRIT_32 |
3155 MG_PLL_FRAC_LOCK_LOCKTHRESH(10) |
3156 MG_PLL_FRAC_LOCK_DCODITHEREN |
3157 MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(feedfwgain);
3158 if (use_ssc || m2div_rem > 0)
3159 hw_state->mg_pll_frac_lock |=
3160 MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN;
3161
3162 hw_state->mg_pll_ssc =
3163 (use_ssc ? MG_PLL_SSC_EN : 0) |
3164 MG_PLL_SSC_TYPE(2) |
3165 MG_PLL_SSC_STEPLENGTH(ssc_steplen) |
3166 MG_PLL_SSC_STEPNUM(ssc_steplog) |
3167 MG_PLL_SSC_FLLEN |
3168 MG_PLL_SSC_STEPSIZE(ssc_stepsize);
3169
3170 hw_state->mg_pll_tdc_coldst_bias =
3171 MG_PLL_TDC_COLDST_COLDSTART |
3172 MG_PLL_TDC_COLDST_IREFINT_EN |
3173 MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) |
3174 MG_PLL_TDC_TDCOVCCORR_EN |
3175 MG_PLL_TDC_TDCSEL(3);
3176
3177 hw_state->mg_pll_bias =
3178 MG_PLL_BIAS_BIAS_GB_SEL(3) |
3179 MG_PLL_BIAS_INIT_DCOAMP(0x3F) |
3180 MG_PLL_BIAS_BIAS_BONUS(10) |
3181 MG_PLL_BIAS_BIASCAL_EN |
3182 MG_PLL_BIAS_CTRIM(12) |
3183 MG_PLL_BIAS_VREF_RDAC(4) |
3184 MG_PLL_BIAS_IREFTRIM(iref_trim);
3185
3186 if (refclk_khz == 38400) {
3187 hw_state->mg_pll_tdc_coldst_bias_mask =
3188 MG_PLL_TDC_COLDST_COLDSTART;
3189 hw_state->mg_pll_bias_mask = 0;
3190 } else {
3191 hw_state->mg_pll_tdc_coldst_bias_mask = -1U;
3192 hw_state->mg_pll_bias_mask = -1U;
3193 }
3194
3195 hw_state->mg_pll_tdc_coldst_bias &=
3196 hw_state->mg_pll_tdc_coldst_bias_mask;
3197 hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
3198 }
3199
3200 return 0;
3201 }
3202
icl_ddi_mg_pll_get_freq(struct intel_display * display,const struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * dpll_hw_state)3203 static int icl_ddi_mg_pll_get_freq(struct intel_display *display,
3204 const struct intel_shared_dpll *pll,
3205 const struct intel_dpll_hw_state *dpll_hw_state)
3206 {
3207 const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
3208 u32 m1, m2_int, m2_frac, div1, div2, ref_clock;
3209 u64 tmp;
3210
3211 ref_clock = display->dpll.ref_clks.nssc;
3212
3213 if (DISPLAY_VER(display) >= 12) {
3214 m1 = hw_state->mg_pll_div0 & DKL_PLL_DIV0_FBPREDIV_MASK;
3215 m1 = m1 >> DKL_PLL_DIV0_FBPREDIV_SHIFT;
3216 m2_int = hw_state->mg_pll_div0 & DKL_PLL_DIV0_FBDIV_INT_MASK;
3217
3218 if (hw_state->mg_pll_bias & DKL_PLL_BIAS_FRAC_EN_H) {
3219 m2_frac = hw_state->mg_pll_bias &
3220 DKL_PLL_BIAS_FBDIV_FRAC_MASK;
3221 m2_frac = m2_frac >> DKL_PLL_BIAS_FBDIV_SHIFT;
3222 } else {
3223 m2_frac = 0;
3224 }
3225 } else {
3226 m1 = hw_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK;
3227 m2_int = hw_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK;
3228
3229 if (hw_state->mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) {
3230 m2_frac = hw_state->mg_pll_div0 &
3231 MG_PLL_DIV0_FBDIV_FRAC_MASK;
3232 m2_frac = m2_frac >> MG_PLL_DIV0_FBDIV_FRAC_SHIFT;
3233 } else {
3234 m2_frac = 0;
3235 }
3236 }
3237
3238 switch (hw_state->mg_clktop2_hsclkctl &
3239 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK) {
3240 case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2:
3241 div1 = 2;
3242 break;
3243 case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3:
3244 div1 = 3;
3245 break;
3246 case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5:
3247 div1 = 5;
3248 break;
3249 case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7:
3250 div1 = 7;
3251 break;
3252 default:
3253 MISSING_CASE(hw_state->mg_clktop2_hsclkctl);
3254 return 0;
3255 }
3256
3257 div2 = (hw_state->mg_clktop2_hsclkctl &
3258 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK) >>
3259 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_SHIFT;
3260
3261 /* div2 value of 0 is same as 1 means no div */
3262 if (div2 == 0)
3263 div2 = 1;
3264
3265 /*
3266 * Adjust the original formula to delay the division by 2^22 in order to
3267 * minimize possible rounding errors.
3268 */
3269 tmp = (u64)m1 * m2_int * ref_clock +
3270 (((u64)m1 * m2_frac * ref_clock) >> 22);
3271 tmp = div_u64(tmp, 5 * div1 * div2);
3272
3273 return tmp;
3274 }
3275
3276 /**
3277 * icl_set_active_port_dpll - select the active port DPLL for a given CRTC
3278 * @crtc_state: state for the CRTC to select the DPLL for
3279 * @port_dpll_id: the active @port_dpll_id to select
3280 *
3281 * Select the given @port_dpll_id instance from the DPLLs reserved for the
3282 * CRTC.
3283 */
icl_set_active_port_dpll(struct intel_crtc_state * crtc_state,enum icl_port_dpll_id port_dpll_id)3284 void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state,
3285 enum icl_port_dpll_id port_dpll_id)
3286 {
3287 struct icl_port_dpll *port_dpll =
3288 &crtc_state->icl_port_dplls[port_dpll_id];
3289
3290 crtc_state->shared_dpll = port_dpll->pll;
3291 crtc_state->dpll_hw_state = port_dpll->hw_state;
3292 }
3293
icl_update_active_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)3294 static void icl_update_active_dpll(struct intel_atomic_state *state,
3295 struct intel_crtc *crtc,
3296 struct intel_encoder *encoder)
3297 {
3298 struct intel_crtc_state *crtc_state =
3299 intel_atomic_get_new_crtc_state(state, crtc);
3300 struct intel_digital_port *primary_port;
3301 enum icl_port_dpll_id port_dpll_id = ICL_PORT_DPLL_DEFAULT;
3302
3303 primary_port = encoder->type == INTEL_OUTPUT_DP_MST ?
3304 enc_to_mst(encoder)->primary :
3305 enc_to_dig_port(encoder);
3306
3307 if (primary_port &&
3308 (intel_tc_port_in_dp_alt_mode(primary_port) ||
3309 intel_tc_port_in_legacy_mode(primary_port)))
3310 port_dpll_id = ICL_PORT_DPLL_MG_PHY;
3311
3312 icl_set_active_port_dpll(crtc_state, port_dpll_id);
3313 }
3314
icl_compute_combo_phy_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc)3315 static int icl_compute_combo_phy_dpll(struct intel_atomic_state *state,
3316 struct intel_crtc *crtc)
3317 {
3318 struct intel_display *display = to_intel_display(state);
3319 struct intel_crtc_state *crtc_state =
3320 intel_atomic_get_new_crtc_state(state, crtc);
3321 struct icl_port_dpll *port_dpll =
3322 &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3323 struct skl_wrpll_params pll_params = {};
3324 int ret;
3325
3326 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
3327 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
3328 ret = icl_calc_wrpll(crtc_state, &pll_params);
3329 else
3330 ret = icl_calc_dp_combo_pll(crtc_state, &pll_params);
3331
3332 if (ret)
3333 return ret;
3334
3335 icl_calc_dpll_state(display, &pll_params, &port_dpll->hw_state);
3336
3337 /* this is mainly for the fastset check */
3338 icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_DEFAULT);
3339
3340 crtc_state->port_clock = icl_ddi_combo_pll_get_freq(display, NULL,
3341 &port_dpll->hw_state);
3342
3343 return 0;
3344 }
3345
icl_get_combo_phy_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)3346 static int icl_get_combo_phy_dpll(struct intel_atomic_state *state,
3347 struct intel_crtc *crtc,
3348 struct intel_encoder *encoder)
3349 {
3350 struct intel_display *display = to_intel_display(crtc);
3351 struct intel_crtc_state *crtc_state =
3352 intel_atomic_get_new_crtc_state(state, crtc);
3353 struct icl_port_dpll *port_dpll =
3354 &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3355 enum port port = encoder->port;
3356 unsigned long dpll_mask;
3357
3358 if (display->platform.alderlake_s) {
3359 dpll_mask =
3360 BIT(DPLL_ID_DG1_DPLL3) |
3361 BIT(DPLL_ID_DG1_DPLL2) |
3362 BIT(DPLL_ID_ICL_DPLL1) |
3363 BIT(DPLL_ID_ICL_DPLL0);
3364 } else if (display->platform.dg1) {
3365 if (port == PORT_D || port == PORT_E) {
3366 dpll_mask =
3367 BIT(DPLL_ID_DG1_DPLL2) |
3368 BIT(DPLL_ID_DG1_DPLL3);
3369 } else {
3370 dpll_mask =
3371 BIT(DPLL_ID_DG1_DPLL0) |
3372 BIT(DPLL_ID_DG1_DPLL1);
3373 }
3374 } else if (display->platform.rocketlake) {
3375 dpll_mask =
3376 BIT(DPLL_ID_EHL_DPLL4) |
3377 BIT(DPLL_ID_ICL_DPLL1) |
3378 BIT(DPLL_ID_ICL_DPLL0);
3379 } else if ((display->platform.jasperlake ||
3380 display->platform.elkhartlake) &&
3381 port != PORT_A) {
3382 dpll_mask =
3383 BIT(DPLL_ID_EHL_DPLL4) |
3384 BIT(DPLL_ID_ICL_DPLL1) |
3385 BIT(DPLL_ID_ICL_DPLL0);
3386 } else {
3387 dpll_mask = BIT(DPLL_ID_ICL_DPLL1) | BIT(DPLL_ID_ICL_DPLL0);
3388 }
3389
3390 /* Eliminate DPLLs from consideration if reserved by HTI */
3391 dpll_mask &= ~intel_hti_dpll_mask(display);
3392
3393 port_dpll->pll = intel_find_shared_dpll(state, crtc,
3394 &port_dpll->hw_state,
3395 dpll_mask);
3396 if (!port_dpll->pll)
3397 return -EINVAL;
3398
3399 intel_reference_shared_dpll(state, crtc,
3400 port_dpll->pll, &port_dpll->hw_state);
3401
3402 icl_update_active_dpll(state, crtc, encoder);
3403
3404 return 0;
3405 }
3406
icl_compute_tc_phy_dplls(struct intel_atomic_state * state,struct intel_crtc * crtc)3407 static int icl_compute_tc_phy_dplls(struct intel_atomic_state *state,
3408 struct intel_crtc *crtc)
3409 {
3410 struct intel_display *display = to_intel_display(state);
3411 struct intel_crtc_state *crtc_state =
3412 intel_atomic_get_new_crtc_state(state, crtc);
3413 const struct intel_crtc_state *old_crtc_state =
3414 intel_atomic_get_old_crtc_state(state, crtc);
3415 struct icl_port_dpll *port_dpll =
3416 &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3417 struct skl_wrpll_params pll_params = {};
3418 int ret;
3419
3420 port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3421 ret = icl_calc_tbt_pll(crtc_state, &pll_params);
3422 if (ret)
3423 return ret;
3424
3425 icl_calc_dpll_state(display, &pll_params, &port_dpll->hw_state);
3426
3427 port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
3428 ret = icl_calc_mg_pll_state(crtc_state, &port_dpll->hw_state);
3429 if (ret)
3430 return ret;
3431
3432 /* this is mainly for the fastset check */
3433 if (old_crtc_state->shared_dpll &&
3434 old_crtc_state->shared_dpll->info->id == DPLL_ID_ICL_TBTPLL)
3435 icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_DEFAULT);
3436 else
3437 icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_MG_PHY);
3438
3439 crtc_state->port_clock = icl_ddi_mg_pll_get_freq(display, NULL,
3440 &port_dpll->hw_state);
3441
3442 return 0;
3443 }
3444
icl_get_tc_phy_dplls(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)3445 static int icl_get_tc_phy_dplls(struct intel_atomic_state *state,
3446 struct intel_crtc *crtc,
3447 struct intel_encoder *encoder)
3448 {
3449 struct intel_crtc_state *crtc_state =
3450 intel_atomic_get_new_crtc_state(state, crtc);
3451 struct icl_port_dpll *port_dpll =
3452 &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3453 enum intel_dpll_id dpll_id;
3454 int ret;
3455
3456 port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3457 port_dpll->pll = intel_find_shared_dpll(state, crtc,
3458 &port_dpll->hw_state,
3459 BIT(DPLL_ID_ICL_TBTPLL));
3460 if (!port_dpll->pll)
3461 return -EINVAL;
3462 intel_reference_shared_dpll(state, crtc,
3463 port_dpll->pll, &port_dpll->hw_state);
3464
3465
3466 port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
3467 dpll_id = icl_tc_port_to_pll_id(intel_encoder_to_tc(encoder));
3468 port_dpll->pll = intel_find_shared_dpll(state, crtc,
3469 &port_dpll->hw_state,
3470 BIT(dpll_id));
3471 if (!port_dpll->pll) {
3472 ret = -EINVAL;
3473 goto err_unreference_tbt_pll;
3474 }
3475 intel_reference_shared_dpll(state, crtc,
3476 port_dpll->pll, &port_dpll->hw_state);
3477
3478 icl_update_active_dpll(state, crtc, encoder);
3479
3480 return 0;
3481
3482 err_unreference_tbt_pll:
3483 port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3484 intel_unreference_shared_dpll(state, crtc, port_dpll->pll);
3485
3486 return ret;
3487 }
3488
icl_compute_dplls(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)3489 static int icl_compute_dplls(struct intel_atomic_state *state,
3490 struct intel_crtc *crtc,
3491 struct intel_encoder *encoder)
3492 {
3493 if (intel_encoder_is_combo(encoder))
3494 return icl_compute_combo_phy_dpll(state, crtc);
3495 else if (intel_encoder_is_tc(encoder))
3496 return icl_compute_tc_phy_dplls(state, crtc);
3497
3498 MISSING_CASE(encoder->port);
3499
3500 return 0;
3501 }
3502
icl_get_dplls(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)3503 static int icl_get_dplls(struct intel_atomic_state *state,
3504 struct intel_crtc *crtc,
3505 struct intel_encoder *encoder)
3506 {
3507 if (intel_encoder_is_combo(encoder))
3508 return icl_get_combo_phy_dpll(state, crtc, encoder);
3509 else if (intel_encoder_is_tc(encoder))
3510 return icl_get_tc_phy_dplls(state, crtc, encoder);
3511
3512 MISSING_CASE(encoder->port);
3513
3514 return -EINVAL;
3515 }
3516
icl_put_dplls(struct intel_atomic_state * state,struct intel_crtc * crtc)3517 static void icl_put_dplls(struct intel_atomic_state *state,
3518 struct intel_crtc *crtc)
3519 {
3520 const struct intel_crtc_state *old_crtc_state =
3521 intel_atomic_get_old_crtc_state(state, crtc);
3522 struct intel_crtc_state *new_crtc_state =
3523 intel_atomic_get_new_crtc_state(state, crtc);
3524 enum icl_port_dpll_id id;
3525
3526 new_crtc_state->shared_dpll = NULL;
3527
3528 for (id = ICL_PORT_DPLL_DEFAULT; id < ICL_PORT_DPLL_COUNT; id++) {
3529 const struct icl_port_dpll *old_port_dpll =
3530 &old_crtc_state->icl_port_dplls[id];
3531 struct icl_port_dpll *new_port_dpll =
3532 &new_crtc_state->icl_port_dplls[id];
3533
3534 new_port_dpll->pll = NULL;
3535
3536 if (!old_port_dpll->pll)
3537 continue;
3538
3539 intel_unreference_shared_dpll(state, crtc, old_port_dpll->pll);
3540 }
3541 }
3542
mg_pll_get_hw_state(struct intel_display * display,struct intel_shared_dpll * pll,struct intel_dpll_hw_state * dpll_hw_state)3543 static bool mg_pll_get_hw_state(struct intel_display *display,
3544 struct intel_shared_dpll *pll,
3545 struct intel_dpll_hw_state *dpll_hw_state)
3546 {
3547 struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
3548 const enum intel_dpll_id id = pll->info->id;
3549 enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3550 intel_wakeref_t wakeref;
3551 bool ret = false;
3552 u32 val;
3553
3554 i915_reg_t enable_reg = intel_tc_pll_enable_reg(display, pll);
3555
3556 wakeref = intel_display_power_get_if_enabled(display,
3557 POWER_DOMAIN_DISPLAY_CORE);
3558 if (!wakeref)
3559 return false;
3560
3561 val = intel_de_read(display, enable_reg);
3562 if (!(val & PLL_ENABLE))
3563 goto out;
3564
3565 hw_state->mg_refclkin_ctl = intel_de_read(display,
3566 MG_REFCLKIN_CTL(tc_port));
3567 hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3568
3569 hw_state->mg_clktop2_coreclkctl1 =
3570 intel_de_read(display, MG_CLKTOP2_CORECLKCTL1(tc_port));
3571 hw_state->mg_clktop2_coreclkctl1 &=
3572 MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3573
3574 hw_state->mg_clktop2_hsclkctl =
3575 intel_de_read(display, MG_CLKTOP2_HSCLKCTL(tc_port));
3576 hw_state->mg_clktop2_hsclkctl &=
3577 MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3578 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3579 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3580 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3581
3582 hw_state->mg_pll_div0 = intel_de_read(display, MG_PLL_DIV0(tc_port));
3583 hw_state->mg_pll_div1 = intel_de_read(display, MG_PLL_DIV1(tc_port));
3584 hw_state->mg_pll_lf = intel_de_read(display, MG_PLL_LF(tc_port));
3585 hw_state->mg_pll_frac_lock = intel_de_read(display,
3586 MG_PLL_FRAC_LOCK(tc_port));
3587 hw_state->mg_pll_ssc = intel_de_read(display, MG_PLL_SSC(tc_port));
3588
3589 hw_state->mg_pll_bias = intel_de_read(display, MG_PLL_BIAS(tc_port));
3590 hw_state->mg_pll_tdc_coldst_bias =
3591 intel_de_read(display, MG_PLL_TDC_COLDST_BIAS(tc_port));
3592
3593 if (display->dpll.ref_clks.nssc == 38400) {
3594 hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
3595 hw_state->mg_pll_bias_mask = 0;
3596 } else {
3597 hw_state->mg_pll_tdc_coldst_bias_mask = -1U;
3598 hw_state->mg_pll_bias_mask = -1U;
3599 }
3600
3601 hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask;
3602 hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
3603
3604 ret = true;
3605 out:
3606 intel_display_power_put(display, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3607 return ret;
3608 }
3609
dkl_pll_get_hw_state(struct intel_display * display,struct intel_shared_dpll * pll,struct intel_dpll_hw_state * dpll_hw_state)3610 static bool dkl_pll_get_hw_state(struct intel_display *display,
3611 struct intel_shared_dpll *pll,
3612 struct intel_dpll_hw_state *dpll_hw_state)
3613 {
3614 struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
3615 const enum intel_dpll_id id = pll->info->id;
3616 enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3617 intel_wakeref_t wakeref;
3618 bool ret = false;
3619 u32 val;
3620
3621 wakeref = intel_display_power_get_if_enabled(display,
3622 POWER_DOMAIN_DISPLAY_CORE);
3623 if (!wakeref)
3624 return false;
3625
3626 val = intel_de_read(display, intel_tc_pll_enable_reg(display, pll));
3627 if (!(val & PLL_ENABLE))
3628 goto out;
3629
3630 /*
3631 * All registers read here have the same HIP_INDEX_REG even though
3632 * they are on different building blocks
3633 */
3634 hw_state->mg_refclkin_ctl = intel_dkl_phy_read(display,
3635 DKL_REFCLKIN_CTL(tc_port));
3636 hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3637
3638 hw_state->mg_clktop2_hsclkctl =
3639 intel_dkl_phy_read(display, DKL_CLKTOP2_HSCLKCTL(tc_port));
3640 hw_state->mg_clktop2_hsclkctl &=
3641 MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3642 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3643 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3644 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3645
3646 hw_state->mg_clktop2_coreclkctl1 =
3647 intel_dkl_phy_read(display, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3648 hw_state->mg_clktop2_coreclkctl1 &=
3649 MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3650
3651 hw_state->mg_pll_div0 = intel_dkl_phy_read(display, DKL_PLL_DIV0(tc_port));
3652 val = DKL_PLL_DIV0_MASK;
3653 if (display->vbt.override_afc_startup)
3654 val |= DKL_PLL_DIV0_AFC_STARTUP_MASK;
3655 hw_state->mg_pll_div0 &= val;
3656
3657 hw_state->mg_pll_div1 = intel_dkl_phy_read(display, DKL_PLL_DIV1(tc_port));
3658 hw_state->mg_pll_div1 &= (DKL_PLL_DIV1_IREF_TRIM_MASK |
3659 DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3660
3661 hw_state->mg_pll_ssc = intel_dkl_phy_read(display, DKL_PLL_SSC(tc_port));
3662 hw_state->mg_pll_ssc &= (DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3663 DKL_PLL_SSC_STEP_LEN_MASK |
3664 DKL_PLL_SSC_STEP_NUM_MASK |
3665 DKL_PLL_SSC_EN);
3666
3667 hw_state->mg_pll_bias = intel_dkl_phy_read(display, DKL_PLL_BIAS(tc_port));
3668 hw_state->mg_pll_bias &= (DKL_PLL_BIAS_FRAC_EN_H |
3669 DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3670
3671 hw_state->mg_pll_tdc_coldst_bias =
3672 intel_dkl_phy_read(display, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3673 hw_state->mg_pll_tdc_coldst_bias &= (DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3674 DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3675
3676 ret = true;
3677 out:
3678 intel_display_power_put(display, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3679 return ret;
3680 }
3681
icl_pll_get_hw_state(struct intel_display * display,struct intel_shared_dpll * pll,struct intel_dpll_hw_state * dpll_hw_state,i915_reg_t enable_reg)3682 static bool icl_pll_get_hw_state(struct intel_display *display,
3683 struct intel_shared_dpll *pll,
3684 struct intel_dpll_hw_state *dpll_hw_state,
3685 i915_reg_t enable_reg)
3686 {
3687 struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
3688 const enum intel_dpll_id id = pll->info->id;
3689 intel_wakeref_t wakeref;
3690 bool ret = false;
3691 u32 val;
3692
3693 wakeref = intel_display_power_get_if_enabled(display,
3694 POWER_DOMAIN_DISPLAY_CORE);
3695 if (!wakeref)
3696 return false;
3697
3698 val = intel_de_read(display, enable_reg);
3699 if (!(val & PLL_ENABLE))
3700 goto out;
3701
3702 if (display->platform.alderlake_s) {
3703 hw_state->cfgcr0 = intel_de_read(display, ADLS_DPLL_CFGCR0(id));
3704 hw_state->cfgcr1 = intel_de_read(display, ADLS_DPLL_CFGCR1(id));
3705 } else if (display->platform.dg1) {
3706 hw_state->cfgcr0 = intel_de_read(display, DG1_DPLL_CFGCR0(id));
3707 hw_state->cfgcr1 = intel_de_read(display, DG1_DPLL_CFGCR1(id));
3708 } else if (display->platform.rocketlake) {
3709 hw_state->cfgcr0 = intel_de_read(display,
3710 RKL_DPLL_CFGCR0(id));
3711 hw_state->cfgcr1 = intel_de_read(display,
3712 RKL_DPLL_CFGCR1(id));
3713 } else if (DISPLAY_VER(display) >= 12) {
3714 hw_state->cfgcr0 = intel_de_read(display,
3715 TGL_DPLL_CFGCR0(id));
3716 hw_state->cfgcr1 = intel_de_read(display,
3717 TGL_DPLL_CFGCR1(id));
3718 if (display->vbt.override_afc_startup) {
3719 hw_state->div0 = intel_de_read(display, TGL_DPLL0_DIV0(id));
3720 hw_state->div0 &= TGL_DPLL0_DIV0_AFC_STARTUP_MASK;
3721 }
3722 } else {
3723 if ((display->platform.jasperlake || display->platform.elkhartlake) &&
3724 id == DPLL_ID_EHL_DPLL4) {
3725 hw_state->cfgcr0 = intel_de_read(display,
3726 ICL_DPLL_CFGCR0(4));
3727 hw_state->cfgcr1 = intel_de_read(display,
3728 ICL_DPLL_CFGCR1(4));
3729 } else {
3730 hw_state->cfgcr0 = intel_de_read(display,
3731 ICL_DPLL_CFGCR0(id));
3732 hw_state->cfgcr1 = intel_de_read(display,
3733 ICL_DPLL_CFGCR1(id));
3734 }
3735 }
3736
3737 ret = true;
3738 out:
3739 intel_display_power_put(display, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3740 return ret;
3741 }
3742
combo_pll_get_hw_state(struct intel_display * display,struct intel_shared_dpll * pll,struct intel_dpll_hw_state * dpll_hw_state)3743 static bool combo_pll_get_hw_state(struct intel_display *display,
3744 struct intel_shared_dpll *pll,
3745 struct intel_dpll_hw_state *dpll_hw_state)
3746 {
3747 i915_reg_t enable_reg = intel_combo_pll_enable_reg(display, pll);
3748
3749 return icl_pll_get_hw_state(display, pll, dpll_hw_state, enable_reg);
3750 }
3751
tbt_pll_get_hw_state(struct intel_display * display,struct intel_shared_dpll * pll,struct intel_dpll_hw_state * dpll_hw_state)3752 static bool tbt_pll_get_hw_state(struct intel_display *display,
3753 struct intel_shared_dpll *pll,
3754 struct intel_dpll_hw_state *dpll_hw_state)
3755 {
3756 return icl_pll_get_hw_state(display, pll, dpll_hw_state, TBT_PLL_ENABLE);
3757 }
3758
icl_dpll_write(struct intel_display * display,struct intel_shared_dpll * pll,const struct icl_dpll_hw_state * hw_state)3759 static void icl_dpll_write(struct intel_display *display,
3760 struct intel_shared_dpll *pll,
3761 const struct icl_dpll_hw_state *hw_state)
3762 {
3763 const enum intel_dpll_id id = pll->info->id;
3764 i915_reg_t cfgcr0_reg, cfgcr1_reg, div0_reg = INVALID_MMIO_REG;
3765
3766 if (display->platform.alderlake_s) {
3767 cfgcr0_reg = ADLS_DPLL_CFGCR0(id);
3768 cfgcr1_reg = ADLS_DPLL_CFGCR1(id);
3769 } else if (display->platform.dg1) {
3770 cfgcr0_reg = DG1_DPLL_CFGCR0(id);
3771 cfgcr1_reg = DG1_DPLL_CFGCR1(id);
3772 } else if (display->platform.rocketlake) {
3773 cfgcr0_reg = RKL_DPLL_CFGCR0(id);
3774 cfgcr1_reg = RKL_DPLL_CFGCR1(id);
3775 } else if (DISPLAY_VER(display) >= 12) {
3776 cfgcr0_reg = TGL_DPLL_CFGCR0(id);
3777 cfgcr1_reg = TGL_DPLL_CFGCR1(id);
3778 div0_reg = TGL_DPLL0_DIV0(id);
3779 } else {
3780 if ((display->platform.jasperlake || display->platform.elkhartlake) &&
3781 id == DPLL_ID_EHL_DPLL4) {
3782 cfgcr0_reg = ICL_DPLL_CFGCR0(4);
3783 cfgcr1_reg = ICL_DPLL_CFGCR1(4);
3784 } else {
3785 cfgcr0_reg = ICL_DPLL_CFGCR0(id);
3786 cfgcr1_reg = ICL_DPLL_CFGCR1(id);
3787 }
3788 }
3789
3790 intel_de_write(display, cfgcr0_reg, hw_state->cfgcr0);
3791 intel_de_write(display, cfgcr1_reg, hw_state->cfgcr1);
3792 drm_WARN_ON_ONCE(display->drm, display->vbt.override_afc_startup &&
3793 !i915_mmio_reg_valid(div0_reg));
3794 if (display->vbt.override_afc_startup &&
3795 i915_mmio_reg_valid(div0_reg))
3796 intel_de_rmw(display, div0_reg,
3797 TGL_DPLL0_DIV0_AFC_STARTUP_MASK, hw_state->div0);
3798 intel_de_posting_read(display, cfgcr1_reg);
3799 }
3800
icl_mg_pll_write(struct intel_display * display,struct intel_shared_dpll * pll,const struct icl_dpll_hw_state * hw_state)3801 static void icl_mg_pll_write(struct intel_display *display,
3802 struct intel_shared_dpll *pll,
3803 const struct icl_dpll_hw_state *hw_state)
3804 {
3805 enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3806
3807 /*
3808 * Some of the following registers have reserved fields, so program
3809 * these with RMW based on a mask. The mask can be fixed or generated
3810 * during the calc/readout phase if the mask depends on some other HW
3811 * state like refclk, see icl_calc_mg_pll_state().
3812 */
3813 intel_de_rmw(display, MG_REFCLKIN_CTL(tc_port),
3814 MG_REFCLKIN_CTL_OD_2_MUX_MASK, hw_state->mg_refclkin_ctl);
3815
3816 intel_de_rmw(display, MG_CLKTOP2_CORECLKCTL1(tc_port),
3817 MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK,
3818 hw_state->mg_clktop2_coreclkctl1);
3819
3820 intel_de_rmw(display, MG_CLKTOP2_HSCLKCTL(tc_port),
3821 MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3822 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3823 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3824 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK,
3825 hw_state->mg_clktop2_hsclkctl);
3826
3827 intel_de_write(display, MG_PLL_DIV0(tc_port), hw_state->mg_pll_div0);
3828 intel_de_write(display, MG_PLL_DIV1(tc_port), hw_state->mg_pll_div1);
3829 intel_de_write(display, MG_PLL_LF(tc_port), hw_state->mg_pll_lf);
3830 intel_de_write(display, MG_PLL_FRAC_LOCK(tc_port),
3831 hw_state->mg_pll_frac_lock);
3832 intel_de_write(display, MG_PLL_SSC(tc_port), hw_state->mg_pll_ssc);
3833
3834 intel_de_rmw(display, MG_PLL_BIAS(tc_port),
3835 hw_state->mg_pll_bias_mask, hw_state->mg_pll_bias);
3836
3837 intel_de_rmw(display, MG_PLL_TDC_COLDST_BIAS(tc_port),
3838 hw_state->mg_pll_tdc_coldst_bias_mask,
3839 hw_state->mg_pll_tdc_coldst_bias);
3840
3841 intel_de_posting_read(display, MG_PLL_TDC_COLDST_BIAS(tc_port));
3842 }
3843
dkl_pll_write(struct intel_display * display,struct intel_shared_dpll * pll,const struct icl_dpll_hw_state * hw_state)3844 static void dkl_pll_write(struct intel_display *display,
3845 struct intel_shared_dpll *pll,
3846 const struct icl_dpll_hw_state *hw_state)
3847 {
3848 enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3849 u32 val;
3850
3851 /*
3852 * All registers programmed here have the same HIP_INDEX_REG even
3853 * though on different building block
3854 */
3855 /* All the registers are RMW */
3856 val = intel_dkl_phy_read(display, DKL_REFCLKIN_CTL(tc_port));
3857 val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3858 val |= hw_state->mg_refclkin_ctl;
3859 intel_dkl_phy_write(display, DKL_REFCLKIN_CTL(tc_port), val);
3860
3861 val = intel_dkl_phy_read(display, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3862 val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3863 val |= hw_state->mg_clktop2_coreclkctl1;
3864 intel_dkl_phy_write(display, DKL_CLKTOP2_CORECLKCTL1(tc_port), val);
3865
3866 val = intel_dkl_phy_read(display, DKL_CLKTOP2_HSCLKCTL(tc_port));
3867 val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3868 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3869 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3870 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
3871 val |= hw_state->mg_clktop2_hsclkctl;
3872 intel_dkl_phy_write(display, DKL_CLKTOP2_HSCLKCTL(tc_port), val);
3873
3874 val = DKL_PLL_DIV0_MASK;
3875 if (display->vbt.override_afc_startup)
3876 val |= DKL_PLL_DIV0_AFC_STARTUP_MASK;
3877 intel_dkl_phy_rmw(display, DKL_PLL_DIV0(tc_port), val,
3878 hw_state->mg_pll_div0);
3879
3880 val = intel_dkl_phy_read(display, DKL_PLL_DIV1(tc_port));
3881 val &= ~(DKL_PLL_DIV1_IREF_TRIM_MASK |
3882 DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3883 val |= hw_state->mg_pll_div1;
3884 intel_dkl_phy_write(display, DKL_PLL_DIV1(tc_port), val);
3885
3886 val = intel_dkl_phy_read(display, DKL_PLL_SSC(tc_port));
3887 val &= ~(DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3888 DKL_PLL_SSC_STEP_LEN_MASK |
3889 DKL_PLL_SSC_STEP_NUM_MASK |
3890 DKL_PLL_SSC_EN);
3891 val |= hw_state->mg_pll_ssc;
3892 intel_dkl_phy_write(display, DKL_PLL_SSC(tc_port), val);
3893
3894 val = intel_dkl_phy_read(display, DKL_PLL_BIAS(tc_port));
3895 val &= ~(DKL_PLL_BIAS_FRAC_EN_H |
3896 DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3897 val |= hw_state->mg_pll_bias;
3898 intel_dkl_phy_write(display, DKL_PLL_BIAS(tc_port), val);
3899
3900 val = intel_dkl_phy_read(display, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3901 val &= ~(DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3902 DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3903 val |= hw_state->mg_pll_tdc_coldst_bias;
3904 intel_dkl_phy_write(display, DKL_PLL_TDC_COLDST_BIAS(tc_port), val);
3905
3906 intel_dkl_phy_posting_read(display, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3907 }
3908
icl_pll_power_enable(struct intel_display * display,struct intel_shared_dpll * pll,i915_reg_t enable_reg)3909 static void icl_pll_power_enable(struct intel_display *display,
3910 struct intel_shared_dpll *pll,
3911 i915_reg_t enable_reg)
3912 {
3913 intel_de_rmw(display, enable_reg, 0, PLL_POWER_ENABLE);
3914
3915 /*
3916 * The spec says we need to "wait" but it also says it should be
3917 * immediate.
3918 */
3919 if (intel_de_wait_for_set(display, enable_reg, PLL_POWER_STATE, 1))
3920 drm_err(display->drm, "PLL %d Power not enabled\n",
3921 pll->info->id);
3922 }
3923
icl_pll_enable(struct intel_display * display,struct intel_shared_dpll * pll,i915_reg_t enable_reg)3924 static void icl_pll_enable(struct intel_display *display,
3925 struct intel_shared_dpll *pll,
3926 i915_reg_t enable_reg)
3927 {
3928 intel_de_rmw(display, enable_reg, 0, PLL_ENABLE);
3929
3930 /* Timeout is actually 600us. */
3931 if (intel_de_wait_for_set(display, enable_reg, PLL_LOCK, 1))
3932 drm_err(display->drm, "PLL %d not locked\n", pll->info->id);
3933 }
3934
adlp_cmtg_clock_gating_wa(struct intel_display * display,struct intel_shared_dpll * pll)3935 static void adlp_cmtg_clock_gating_wa(struct intel_display *display, struct intel_shared_dpll *pll)
3936 {
3937 u32 val;
3938
3939 if (!(display->platform.alderlake_p && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0)) ||
3940 pll->info->id != DPLL_ID_ICL_DPLL0)
3941 return;
3942 /*
3943 * Wa_16011069516:adl-p[a0]
3944 *
3945 * All CMTG regs are unreliable until CMTG clock gating is disabled,
3946 * so we can only assume the default TRANS_CMTG_CHICKEN reg value and
3947 * sanity check this assumption with a double read, which presumably
3948 * returns the correct value even with clock gating on.
3949 *
3950 * Instead of the usual place for workarounds we apply this one here,
3951 * since TRANS_CMTG_CHICKEN is only accessible while DPLL0 is enabled.
3952 */
3953 val = intel_de_read(display, TRANS_CMTG_CHICKEN);
3954 val = intel_de_rmw(display, TRANS_CMTG_CHICKEN, ~0, DISABLE_DPT_CLK_GATING);
3955 if (drm_WARN_ON(display->drm, val & ~DISABLE_DPT_CLK_GATING))
3956 drm_dbg_kms(display->drm, "Unexpected flags in TRANS_CMTG_CHICKEN: %08x\n", val);
3957 }
3958
combo_pll_enable(struct intel_display * display,struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * dpll_hw_state)3959 static void combo_pll_enable(struct intel_display *display,
3960 struct intel_shared_dpll *pll,
3961 const struct intel_dpll_hw_state *dpll_hw_state)
3962 {
3963 const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
3964 i915_reg_t enable_reg = intel_combo_pll_enable_reg(display, pll);
3965
3966 icl_pll_power_enable(display, pll, enable_reg);
3967
3968 icl_dpll_write(display, pll, hw_state);
3969
3970 /*
3971 * DVFS pre sequence would be here, but in our driver the cdclk code
3972 * paths should already be setting the appropriate voltage, hence we do
3973 * nothing here.
3974 */
3975
3976 icl_pll_enable(display, pll, enable_reg);
3977
3978 adlp_cmtg_clock_gating_wa(display, pll);
3979
3980 /* DVFS post sequence would be here. See the comment above. */
3981 }
3982
tbt_pll_enable(struct intel_display * display,struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * dpll_hw_state)3983 static void tbt_pll_enable(struct intel_display *display,
3984 struct intel_shared_dpll *pll,
3985 const struct intel_dpll_hw_state *dpll_hw_state)
3986 {
3987 const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
3988
3989 icl_pll_power_enable(display, pll, TBT_PLL_ENABLE);
3990
3991 icl_dpll_write(display, pll, hw_state);
3992
3993 /*
3994 * DVFS pre sequence would be here, but in our driver the cdclk code
3995 * paths should already be setting the appropriate voltage, hence we do
3996 * nothing here.
3997 */
3998
3999 icl_pll_enable(display, pll, TBT_PLL_ENABLE);
4000
4001 /* DVFS post sequence would be here. See the comment above. */
4002 }
4003
mg_pll_enable(struct intel_display * display,struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * dpll_hw_state)4004 static void mg_pll_enable(struct intel_display *display,
4005 struct intel_shared_dpll *pll,
4006 const struct intel_dpll_hw_state *dpll_hw_state)
4007 {
4008 const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
4009 i915_reg_t enable_reg = intel_tc_pll_enable_reg(display, pll);
4010
4011 icl_pll_power_enable(display, pll, enable_reg);
4012
4013 if (DISPLAY_VER(display) >= 12)
4014 dkl_pll_write(display, pll, hw_state);
4015 else
4016 icl_mg_pll_write(display, pll, hw_state);
4017
4018 /*
4019 * DVFS pre sequence would be here, but in our driver the cdclk code
4020 * paths should already be setting the appropriate voltage, hence we do
4021 * nothing here.
4022 */
4023
4024 icl_pll_enable(display, pll, enable_reg);
4025
4026 /* DVFS post sequence would be here. See the comment above. */
4027 }
4028
icl_pll_disable(struct intel_display * display,struct intel_shared_dpll * pll,i915_reg_t enable_reg)4029 static void icl_pll_disable(struct intel_display *display,
4030 struct intel_shared_dpll *pll,
4031 i915_reg_t enable_reg)
4032 {
4033 /* The first steps are done by intel_ddi_post_disable(). */
4034
4035 /*
4036 * DVFS pre sequence would be here, but in our driver the cdclk code
4037 * paths should already be setting the appropriate voltage, hence we do
4038 * nothing here.
4039 */
4040
4041 intel_de_rmw(display, enable_reg, PLL_ENABLE, 0);
4042
4043 /* Timeout is actually 1us. */
4044 if (intel_de_wait_for_clear(display, enable_reg, PLL_LOCK, 1))
4045 drm_err(display->drm, "PLL %d locked\n", pll->info->id);
4046
4047 /* DVFS post sequence would be here. See the comment above. */
4048
4049 intel_de_rmw(display, enable_reg, PLL_POWER_ENABLE, 0);
4050
4051 /*
4052 * The spec says we need to "wait" but it also says it should be
4053 * immediate.
4054 */
4055 if (intel_de_wait_for_clear(display, enable_reg, PLL_POWER_STATE, 1))
4056 drm_err(display->drm, "PLL %d Power not disabled\n",
4057 pll->info->id);
4058 }
4059
combo_pll_disable(struct intel_display * display,struct intel_shared_dpll * pll)4060 static void combo_pll_disable(struct intel_display *display,
4061 struct intel_shared_dpll *pll)
4062 {
4063 i915_reg_t enable_reg = intel_combo_pll_enable_reg(display, pll);
4064
4065 icl_pll_disable(display, pll, enable_reg);
4066 }
4067
tbt_pll_disable(struct intel_display * display,struct intel_shared_dpll * pll)4068 static void tbt_pll_disable(struct intel_display *display,
4069 struct intel_shared_dpll *pll)
4070 {
4071 icl_pll_disable(display, pll, TBT_PLL_ENABLE);
4072 }
4073
mg_pll_disable(struct intel_display * display,struct intel_shared_dpll * pll)4074 static void mg_pll_disable(struct intel_display *display,
4075 struct intel_shared_dpll *pll)
4076 {
4077 i915_reg_t enable_reg = intel_tc_pll_enable_reg(display, pll);
4078
4079 icl_pll_disable(display, pll, enable_reg);
4080 }
4081
icl_update_dpll_ref_clks(struct intel_display * display)4082 static void icl_update_dpll_ref_clks(struct intel_display *display)
4083 {
4084 /* No SSC ref */
4085 display->dpll.ref_clks.nssc = display->cdclk.hw.ref;
4086 }
4087
icl_dump_hw_state(struct drm_printer * p,const struct intel_dpll_hw_state * dpll_hw_state)4088 static void icl_dump_hw_state(struct drm_printer *p,
4089 const struct intel_dpll_hw_state *dpll_hw_state)
4090 {
4091 const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
4092
4093 drm_printf(p, "dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, div0: 0x%x, "
4094 "mg_refclkin_ctl: 0x%x, hg_clktop2_coreclkctl1: 0x%x, "
4095 "mg_clktop2_hsclkctl: 0x%x, mg_pll_div0: 0x%x, "
4096 "mg_pll_div2: 0x%x, mg_pll_lf: 0x%x, "
4097 "mg_pll_frac_lock: 0x%x, mg_pll_ssc: 0x%x, "
4098 "mg_pll_bias: 0x%x, mg_pll_tdc_coldst_bias: 0x%x\n",
4099 hw_state->cfgcr0, hw_state->cfgcr1, hw_state->div0,
4100 hw_state->mg_refclkin_ctl,
4101 hw_state->mg_clktop2_coreclkctl1,
4102 hw_state->mg_clktop2_hsclkctl,
4103 hw_state->mg_pll_div0,
4104 hw_state->mg_pll_div1,
4105 hw_state->mg_pll_lf,
4106 hw_state->mg_pll_frac_lock,
4107 hw_state->mg_pll_ssc,
4108 hw_state->mg_pll_bias,
4109 hw_state->mg_pll_tdc_coldst_bias);
4110 }
4111
icl_compare_hw_state(const struct intel_dpll_hw_state * _a,const struct intel_dpll_hw_state * _b)4112 static bool icl_compare_hw_state(const struct intel_dpll_hw_state *_a,
4113 const struct intel_dpll_hw_state *_b)
4114 {
4115 const struct icl_dpll_hw_state *a = &_a->icl;
4116 const struct icl_dpll_hw_state *b = &_b->icl;
4117
4118 /* FIXME split combo vs. mg more thoroughly */
4119 return a->cfgcr0 == b->cfgcr0 &&
4120 a->cfgcr1 == b->cfgcr1 &&
4121 a->div0 == b->div0 &&
4122 a->mg_refclkin_ctl == b->mg_refclkin_ctl &&
4123 a->mg_clktop2_coreclkctl1 == b->mg_clktop2_coreclkctl1 &&
4124 a->mg_clktop2_hsclkctl == b->mg_clktop2_hsclkctl &&
4125 a->mg_pll_div0 == b->mg_pll_div0 &&
4126 a->mg_pll_div1 == b->mg_pll_div1 &&
4127 a->mg_pll_lf == b->mg_pll_lf &&
4128 a->mg_pll_frac_lock == b->mg_pll_frac_lock &&
4129 a->mg_pll_ssc == b->mg_pll_ssc &&
4130 a->mg_pll_bias == b->mg_pll_bias &&
4131 a->mg_pll_tdc_coldst_bias == b->mg_pll_tdc_coldst_bias;
4132 }
4133
4134 static const struct intel_shared_dpll_funcs combo_pll_funcs = {
4135 .enable = combo_pll_enable,
4136 .disable = combo_pll_disable,
4137 .get_hw_state = combo_pll_get_hw_state,
4138 .get_freq = icl_ddi_combo_pll_get_freq,
4139 };
4140
4141 static const struct intel_shared_dpll_funcs tbt_pll_funcs = {
4142 .enable = tbt_pll_enable,
4143 .disable = tbt_pll_disable,
4144 .get_hw_state = tbt_pll_get_hw_state,
4145 .get_freq = icl_ddi_tbt_pll_get_freq,
4146 };
4147
4148 static const struct intel_shared_dpll_funcs mg_pll_funcs = {
4149 .enable = mg_pll_enable,
4150 .disable = mg_pll_disable,
4151 .get_hw_state = mg_pll_get_hw_state,
4152 .get_freq = icl_ddi_mg_pll_get_freq,
4153 };
4154
4155 static const struct dpll_info icl_plls[] = {
4156 { .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4157 { .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4158 { .name = "TBT PLL", .funcs = &tbt_pll_funcs, .id = DPLL_ID_ICL_TBTPLL,
4159 .is_alt_port_dpll = true, },
4160 { .name = "MG PLL 1", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL1, },
4161 { .name = "MG PLL 2", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL2, },
4162 { .name = "MG PLL 3", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL3, },
4163 { .name = "MG PLL 4", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL4, },
4164 {}
4165 };
4166
4167 static const struct intel_dpll_mgr icl_pll_mgr = {
4168 .dpll_info = icl_plls,
4169 .compute_dplls = icl_compute_dplls,
4170 .get_dplls = icl_get_dplls,
4171 .put_dplls = icl_put_dplls,
4172 .update_active_dpll = icl_update_active_dpll,
4173 .update_ref_clks = icl_update_dpll_ref_clks,
4174 .dump_hw_state = icl_dump_hw_state,
4175 .compare_hw_state = icl_compare_hw_state,
4176 };
4177
4178 static const struct dpll_info ehl_plls[] = {
4179 { .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4180 { .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4181 { .name = "DPLL 4", .funcs = &combo_pll_funcs, .id = DPLL_ID_EHL_DPLL4,
4182 .power_domain = POWER_DOMAIN_DC_OFF, },
4183 {}
4184 };
4185
4186 static const struct intel_dpll_mgr ehl_pll_mgr = {
4187 .dpll_info = ehl_plls,
4188 .compute_dplls = icl_compute_dplls,
4189 .get_dplls = icl_get_dplls,
4190 .put_dplls = icl_put_dplls,
4191 .update_ref_clks = icl_update_dpll_ref_clks,
4192 .dump_hw_state = icl_dump_hw_state,
4193 .compare_hw_state = icl_compare_hw_state,
4194 };
4195
4196 static const struct intel_shared_dpll_funcs dkl_pll_funcs = {
4197 .enable = mg_pll_enable,
4198 .disable = mg_pll_disable,
4199 .get_hw_state = dkl_pll_get_hw_state,
4200 .get_freq = icl_ddi_mg_pll_get_freq,
4201 };
4202
4203 static const struct dpll_info tgl_plls[] = {
4204 { .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4205 { .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4206 { .name = "TBT PLL", .funcs = &tbt_pll_funcs, .id = DPLL_ID_ICL_TBTPLL,
4207 .is_alt_port_dpll = true, },
4208 { .name = "TC PLL 1", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL1, },
4209 { .name = "TC PLL 2", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL2, },
4210 { .name = "TC PLL 3", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL3, },
4211 { .name = "TC PLL 4", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL4, },
4212 { .name = "TC PLL 5", .funcs = &dkl_pll_funcs, .id = DPLL_ID_TGL_MGPLL5, },
4213 { .name = "TC PLL 6", .funcs = &dkl_pll_funcs, .id = DPLL_ID_TGL_MGPLL6, },
4214 {}
4215 };
4216
4217 static const struct intel_dpll_mgr tgl_pll_mgr = {
4218 .dpll_info = tgl_plls,
4219 .compute_dplls = icl_compute_dplls,
4220 .get_dplls = icl_get_dplls,
4221 .put_dplls = icl_put_dplls,
4222 .update_active_dpll = icl_update_active_dpll,
4223 .update_ref_clks = icl_update_dpll_ref_clks,
4224 .dump_hw_state = icl_dump_hw_state,
4225 .compare_hw_state = icl_compare_hw_state,
4226 };
4227
4228 static const struct dpll_info rkl_plls[] = {
4229 { .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4230 { .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4231 { .name = "DPLL 4", .funcs = &combo_pll_funcs, .id = DPLL_ID_EHL_DPLL4, },
4232 {}
4233 };
4234
4235 static const struct intel_dpll_mgr rkl_pll_mgr = {
4236 .dpll_info = rkl_plls,
4237 .compute_dplls = icl_compute_dplls,
4238 .get_dplls = icl_get_dplls,
4239 .put_dplls = icl_put_dplls,
4240 .update_ref_clks = icl_update_dpll_ref_clks,
4241 .dump_hw_state = icl_dump_hw_state,
4242 .compare_hw_state = icl_compare_hw_state,
4243 };
4244
4245 static const struct dpll_info dg1_plls[] = {
4246 { .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL0, },
4247 { .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL1, },
4248 { .name = "DPLL 2", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL2, },
4249 { .name = "DPLL 3", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL3, },
4250 {}
4251 };
4252
4253 static const struct intel_dpll_mgr dg1_pll_mgr = {
4254 .dpll_info = dg1_plls,
4255 .compute_dplls = icl_compute_dplls,
4256 .get_dplls = icl_get_dplls,
4257 .put_dplls = icl_put_dplls,
4258 .update_ref_clks = icl_update_dpll_ref_clks,
4259 .dump_hw_state = icl_dump_hw_state,
4260 .compare_hw_state = icl_compare_hw_state,
4261 };
4262
4263 static const struct dpll_info adls_plls[] = {
4264 { .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4265 { .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4266 { .name = "DPLL 2", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL2, },
4267 { .name = "DPLL 3", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL3, },
4268 {}
4269 };
4270
4271 static const struct intel_dpll_mgr adls_pll_mgr = {
4272 .dpll_info = adls_plls,
4273 .compute_dplls = icl_compute_dplls,
4274 .get_dplls = icl_get_dplls,
4275 .put_dplls = icl_put_dplls,
4276 .update_ref_clks = icl_update_dpll_ref_clks,
4277 .dump_hw_state = icl_dump_hw_state,
4278 .compare_hw_state = icl_compare_hw_state,
4279 };
4280
4281 static const struct dpll_info adlp_plls[] = {
4282 { .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4283 { .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4284 { .name = "TBT PLL", .funcs = &tbt_pll_funcs, .id = DPLL_ID_ICL_TBTPLL,
4285 .is_alt_port_dpll = true, },
4286 { .name = "TC PLL 1", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL1, },
4287 { .name = "TC PLL 2", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL2, },
4288 { .name = "TC PLL 3", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL3, },
4289 { .name = "TC PLL 4", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL4, },
4290 {}
4291 };
4292
4293 static const struct intel_dpll_mgr adlp_pll_mgr = {
4294 .dpll_info = adlp_plls,
4295 .compute_dplls = icl_compute_dplls,
4296 .get_dplls = icl_get_dplls,
4297 .put_dplls = icl_put_dplls,
4298 .update_active_dpll = icl_update_active_dpll,
4299 .update_ref_clks = icl_update_dpll_ref_clks,
4300 .dump_hw_state = icl_dump_hw_state,
4301 .compare_hw_state = icl_compare_hw_state,
4302 };
4303
4304 /**
4305 * intel_shared_dpll_init - Initialize shared DPLLs
4306 * @display: intel_display device
4307 *
4308 * Initialize shared DPLLs for @display.
4309 */
intel_shared_dpll_init(struct intel_display * display)4310 void intel_shared_dpll_init(struct intel_display *display)
4311 {
4312 struct drm_i915_private *i915 = to_i915(display->drm);
4313 const struct intel_dpll_mgr *dpll_mgr = NULL;
4314 const struct dpll_info *dpll_info;
4315 int i;
4316
4317 mutex_init(&display->dpll.lock);
4318
4319 if (DISPLAY_VER(display) >= 14 || display->platform.dg2)
4320 /* No shared DPLLs on DG2; port PLLs are part of the PHY */
4321 dpll_mgr = NULL;
4322 else if (display->platform.alderlake_p)
4323 dpll_mgr = &adlp_pll_mgr;
4324 else if (display->platform.alderlake_s)
4325 dpll_mgr = &adls_pll_mgr;
4326 else if (display->platform.dg1)
4327 dpll_mgr = &dg1_pll_mgr;
4328 else if (display->platform.rocketlake)
4329 dpll_mgr = &rkl_pll_mgr;
4330 else if (DISPLAY_VER(display) >= 12)
4331 dpll_mgr = &tgl_pll_mgr;
4332 else if (display->platform.jasperlake || display->platform.elkhartlake)
4333 dpll_mgr = &ehl_pll_mgr;
4334 else if (DISPLAY_VER(display) >= 11)
4335 dpll_mgr = &icl_pll_mgr;
4336 else if (display->platform.geminilake || display->platform.broxton)
4337 dpll_mgr = &bxt_pll_mgr;
4338 else if (DISPLAY_VER(display) == 9)
4339 dpll_mgr = &skl_pll_mgr;
4340 else if (HAS_DDI(display))
4341 dpll_mgr = &hsw_pll_mgr;
4342 else if (HAS_PCH_IBX(i915) || HAS_PCH_CPT(i915))
4343 dpll_mgr = &pch_pll_mgr;
4344
4345 if (!dpll_mgr)
4346 return;
4347
4348 dpll_info = dpll_mgr->dpll_info;
4349
4350 for (i = 0; dpll_info[i].name; i++) {
4351 if (drm_WARN_ON(display->drm,
4352 i >= ARRAY_SIZE(display->dpll.shared_dplls)))
4353 break;
4354
4355 /* must fit into unsigned long bitmask on 32bit */
4356 if (drm_WARN_ON(display->drm, dpll_info[i].id >= 32))
4357 break;
4358
4359 display->dpll.shared_dplls[i].info = &dpll_info[i];
4360 display->dpll.shared_dplls[i].index = i;
4361 }
4362
4363 display->dpll.mgr = dpll_mgr;
4364 display->dpll.num_shared_dpll = i;
4365 }
4366
4367 /**
4368 * intel_compute_shared_dplls - compute DPLL state CRTC and encoder combination
4369 * @state: atomic state
4370 * @crtc: CRTC to compute DPLLs for
4371 * @encoder: encoder
4372 *
4373 * This function computes the DPLL state for the given CRTC and encoder.
4374 *
4375 * The new configuration in the atomic commit @state is made effective by
4376 * calling intel_shared_dpll_swap_state().
4377 *
4378 * Returns:
4379 * 0 on success, negative error code on failure.
4380 */
intel_compute_shared_dplls(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)4381 int intel_compute_shared_dplls(struct intel_atomic_state *state,
4382 struct intel_crtc *crtc,
4383 struct intel_encoder *encoder)
4384 {
4385 struct intel_display *display = to_intel_display(state);
4386 const struct intel_dpll_mgr *dpll_mgr = display->dpll.mgr;
4387
4388 if (drm_WARN_ON(display->drm, !dpll_mgr))
4389 return -EINVAL;
4390
4391 return dpll_mgr->compute_dplls(state, crtc, encoder);
4392 }
4393
4394 /**
4395 * intel_reserve_shared_dplls - reserve DPLLs for CRTC and encoder combination
4396 * @state: atomic state
4397 * @crtc: CRTC to reserve DPLLs for
4398 * @encoder: encoder
4399 *
4400 * This function reserves all required DPLLs for the given CRTC and encoder
4401 * combination in the current atomic commit @state and the new @crtc atomic
4402 * state.
4403 *
4404 * The new configuration in the atomic commit @state is made effective by
4405 * calling intel_shared_dpll_swap_state().
4406 *
4407 * The reserved DPLLs should be released by calling
4408 * intel_release_shared_dplls().
4409 *
4410 * Returns:
4411 * 0 if all required DPLLs were successfully reserved,
4412 * negative error code otherwise.
4413 */
intel_reserve_shared_dplls(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)4414 int intel_reserve_shared_dplls(struct intel_atomic_state *state,
4415 struct intel_crtc *crtc,
4416 struct intel_encoder *encoder)
4417 {
4418 struct intel_display *display = to_intel_display(state);
4419 const struct intel_dpll_mgr *dpll_mgr = display->dpll.mgr;
4420
4421 if (drm_WARN_ON(display->drm, !dpll_mgr))
4422 return -EINVAL;
4423
4424 return dpll_mgr->get_dplls(state, crtc, encoder);
4425 }
4426
4427 /**
4428 * intel_release_shared_dplls - end use of DPLLs by CRTC in atomic state
4429 * @state: atomic state
4430 * @crtc: crtc from which the DPLLs are to be released
4431 *
4432 * This function releases all DPLLs reserved by intel_reserve_shared_dplls()
4433 * from the current atomic commit @state and the old @crtc atomic state.
4434 *
4435 * The new configuration in the atomic commit @state is made effective by
4436 * calling intel_shared_dpll_swap_state().
4437 */
intel_release_shared_dplls(struct intel_atomic_state * state,struct intel_crtc * crtc)4438 void intel_release_shared_dplls(struct intel_atomic_state *state,
4439 struct intel_crtc *crtc)
4440 {
4441 struct intel_display *display = to_intel_display(state);
4442 const struct intel_dpll_mgr *dpll_mgr = display->dpll.mgr;
4443
4444 /*
4445 * FIXME: this function is called for every platform having a
4446 * compute_clock hook, even though the platform doesn't yet support
4447 * the shared DPLL framework and intel_reserve_shared_dplls() is not
4448 * called on those.
4449 */
4450 if (!dpll_mgr)
4451 return;
4452
4453 dpll_mgr->put_dplls(state, crtc);
4454 }
4455
4456 /**
4457 * intel_update_active_dpll - update the active DPLL for a CRTC/encoder
4458 * @state: atomic state
4459 * @crtc: the CRTC for which to update the active DPLL
4460 * @encoder: encoder determining the type of port DPLL
4461 *
4462 * Update the active DPLL for the given @crtc/@encoder in @crtc's atomic state,
4463 * from the port DPLLs reserved previously by intel_reserve_shared_dplls(). The
4464 * DPLL selected will be based on the current mode of the encoder's port.
4465 */
intel_update_active_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)4466 void intel_update_active_dpll(struct intel_atomic_state *state,
4467 struct intel_crtc *crtc,
4468 struct intel_encoder *encoder)
4469 {
4470 struct intel_display *display = to_intel_display(encoder);
4471 const struct intel_dpll_mgr *dpll_mgr = display->dpll.mgr;
4472
4473 if (drm_WARN_ON(display->drm, !dpll_mgr))
4474 return;
4475
4476 dpll_mgr->update_active_dpll(state, crtc, encoder);
4477 }
4478
4479 /**
4480 * intel_dpll_get_freq - calculate the DPLL's output frequency
4481 * @display: intel_display device
4482 * @pll: DPLL for which to calculate the output frequency
4483 * @dpll_hw_state: DPLL state from which to calculate the output frequency
4484 *
4485 * Return the output frequency corresponding to @pll's passed in @dpll_hw_state.
4486 */
intel_dpll_get_freq(struct intel_display * display,const struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * dpll_hw_state)4487 int intel_dpll_get_freq(struct intel_display *display,
4488 const struct intel_shared_dpll *pll,
4489 const struct intel_dpll_hw_state *dpll_hw_state)
4490 {
4491 if (drm_WARN_ON(display->drm, !pll->info->funcs->get_freq))
4492 return 0;
4493
4494 return pll->info->funcs->get_freq(display, pll, dpll_hw_state);
4495 }
4496
4497 /**
4498 * intel_dpll_get_hw_state - readout the DPLL's hardware state
4499 * @display: intel_display device instance
4500 * @pll: DPLL for which to calculate the output frequency
4501 * @dpll_hw_state: DPLL's hardware state
4502 *
4503 * Read out @pll's hardware state into @dpll_hw_state.
4504 */
intel_dpll_get_hw_state(struct intel_display * display,struct intel_shared_dpll * pll,struct intel_dpll_hw_state * dpll_hw_state)4505 bool intel_dpll_get_hw_state(struct intel_display *display,
4506 struct intel_shared_dpll *pll,
4507 struct intel_dpll_hw_state *dpll_hw_state)
4508 {
4509 return pll->info->funcs->get_hw_state(display, pll, dpll_hw_state);
4510 }
4511
readout_dpll_hw_state(struct intel_display * display,struct intel_shared_dpll * pll)4512 static void readout_dpll_hw_state(struct intel_display *display,
4513 struct intel_shared_dpll *pll)
4514 {
4515 struct intel_crtc *crtc;
4516
4517 pll->on = intel_dpll_get_hw_state(display, pll, &pll->state.hw_state);
4518
4519 if (pll->on && pll->info->power_domain)
4520 pll->wakeref = intel_display_power_get(display, pll->info->power_domain);
4521
4522 pll->state.pipe_mask = 0;
4523 for_each_intel_crtc(display->drm, crtc) {
4524 struct intel_crtc_state *crtc_state =
4525 to_intel_crtc_state(crtc->base.state);
4526
4527 if (crtc_state->hw.active && crtc_state->shared_dpll == pll)
4528 intel_reference_shared_dpll_crtc(crtc, pll, &pll->state);
4529 }
4530 pll->active_mask = pll->state.pipe_mask;
4531
4532 drm_dbg_kms(display->drm,
4533 "%s hw state readout: pipe_mask 0x%x, on %i\n",
4534 pll->info->name, pll->state.pipe_mask, pll->on);
4535 }
4536
intel_dpll_update_ref_clks(struct intel_display * display)4537 void intel_dpll_update_ref_clks(struct intel_display *display)
4538 {
4539 if (display->dpll.mgr && display->dpll.mgr->update_ref_clks)
4540 display->dpll.mgr->update_ref_clks(display);
4541 }
4542
intel_dpll_readout_hw_state(struct intel_display * display)4543 void intel_dpll_readout_hw_state(struct intel_display *display)
4544 {
4545 struct intel_shared_dpll *pll;
4546 int i;
4547
4548 for_each_shared_dpll(display, pll, i)
4549 readout_dpll_hw_state(display, pll);
4550 }
4551
sanitize_dpll_state(struct intel_display * display,struct intel_shared_dpll * pll)4552 static void sanitize_dpll_state(struct intel_display *display,
4553 struct intel_shared_dpll *pll)
4554 {
4555 if (!pll->on)
4556 return;
4557
4558 adlp_cmtg_clock_gating_wa(display, pll);
4559
4560 if (pll->active_mask)
4561 return;
4562
4563 drm_dbg_kms(display->drm,
4564 "%s enabled but not in use, disabling\n",
4565 pll->info->name);
4566
4567 _intel_disable_shared_dpll(display, pll);
4568 }
4569
intel_dpll_sanitize_state(struct intel_display * display)4570 void intel_dpll_sanitize_state(struct intel_display *display)
4571 {
4572 struct intel_shared_dpll *pll;
4573 int i;
4574
4575 intel_cx0_pll_power_save_wa(display);
4576
4577 for_each_shared_dpll(display, pll, i)
4578 sanitize_dpll_state(display, pll);
4579 }
4580
4581 /**
4582 * intel_dpll_dump_hw_state - dump hw_state
4583 * @display: intel_display structure
4584 * @p: where to print the state to
4585 * @dpll_hw_state: hw state to be dumped
4586 *
4587 * Dumo out the relevant values in @dpll_hw_state.
4588 */
intel_dpll_dump_hw_state(struct intel_display * display,struct drm_printer * p,const struct intel_dpll_hw_state * dpll_hw_state)4589 void intel_dpll_dump_hw_state(struct intel_display *display,
4590 struct drm_printer *p,
4591 const struct intel_dpll_hw_state *dpll_hw_state)
4592 {
4593 if (display->dpll.mgr) {
4594 display->dpll.mgr->dump_hw_state(p, dpll_hw_state);
4595 } else {
4596 /* fallback for platforms that don't use the shared dpll
4597 * infrastructure
4598 */
4599 ibx_dump_hw_state(p, dpll_hw_state);
4600 }
4601 }
4602
4603 /**
4604 * intel_dpll_compare_hw_state - compare the two states
4605 * @display: intel_display structure
4606 * @a: first DPLL hw state
4607 * @b: second DPLL hw state
4608 *
4609 * Compare DPLL hw states @a and @b.
4610 *
4611 * Returns: true if the states are equal, false if the differ
4612 */
intel_dpll_compare_hw_state(struct intel_display * display,const struct intel_dpll_hw_state * a,const struct intel_dpll_hw_state * b)4613 bool intel_dpll_compare_hw_state(struct intel_display *display,
4614 const struct intel_dpll_hw_state *a,
4615 const struct intel_dpll_hw_state *b)
4616 {
4617 if (display->dpll.mgr) {
4618 return display->dpll.mgr->compare_hw_state(a, b);
4619 } else {
4620 /* fallback for platforms that don't use the shared dpll
4621 * infrastructure
4622 */
4623 return ibx_compare_hw_state(a, b);
4624 }
4625 }
4626
4627 static void
verify_single_dpll_state(struct intel_display * display,struct intel_shared_dpll * pll,struct intel_crtc * crtc,const struct intel_crtc_state * new_crtc_state)4628 verify_single_dpll_state(struct intel_display *display,
4629 struct intel_shared_dpll *pll,
4630 struct intel_crtc *crtc,
4631 const struct intel_crtc_state *new_crtc_state)
4632 {
4633 struct intel_dpll_hw_state dpll_hw_state = {};
4634 u8 pipe_mask;
4635 bool active;
4636
4637 active = intel_dpll_get_hw_state(display, pll, &dpll_hw_state);
4638
4639 if (!pll->info->always_on) {
4640 INTEL_DISPLAY_STATE_WARN(display, !pll->on && pll->active_mask,
4641 "%s: pll in active use but not on in sw tracking\n",
4642 pll->info->name);
4643 INTEL_DISPLAY_STATE_WARN(display, pll->on && !pll->active_mask,
4644 "%s: pll is on but not used by any active pipe\n",
4645 pll->info->name);
4646 INTEL_DISPLAY_STATE_WARN(display, pll->on != active,
4647 "%s: pll on state mismatch (expected %i, found %i)\n",
4648 pll->info->name, pll->on, active);
4649 }
4650
4651 if (!crtc) {
4652 INTEL_DISPLAY_STATE_WARN(display,
4653 pll->active_mask & ~pll->state.pipe_mask,
4654 "%s: more active pll users than references: 0x%x vs 0x%x\n",
4655 pll->info->name, pll->active_mask, pll->state.pipe_mask);
4656
4657 return;
4658 }
4659
4660 pipe_mask = BIT(crtc->pipe);
4661
4662 if (new_crtc_state->hw.active)
4663 INTEL_DISPLAY_STATE_WARN(display, !(pll->active_mask & pipe_mask),
4664 "%s: pll active mismatch (expected pipe %c in active mask 0x%x)\n",
4665 pll->info->name, pipe_name(crtc->pipe), pll->active_mask);
4666 else
4667 INTEL_DISPLAY_STATE_WARN(display, pll->active_mask & pipe_mask,
4668 "%s: pll active mismatch (didn't expect pipe %c in active mask 0x%x)\n",
4669 pll->info->name, pipe_name(crtc->pipe), pll->active_mask);
4670
4671 INTEL_DISPLAY_STATE_WARN(display, !(pll->state.pipe_mask & pipe_mask),
4672 "%s: pll enabled crtcs mismatch (expected 0x%x in 0x%x)\n",
4673 pll->info->name, pipe_mask, pll->state.pipe_mask);
4674
4675 INTEL_DISPLAY_STATE_WARN(display,
4676 pll->on && memcmp(&pll->state.hw_state, &dpll_hw_state,
4677 sizeof(dpll_hw_state)),
4678 "%s: pll hw state mismatch\n",
4679 pll->info->name);
4680 }
4681
has_alt_port_dpll(const struct intel_shared_dpll * old_pll,const struct intel_shared_dpll * new_pll)4682 static bool has_alt_port_dpll(const struct intel_shared_dpll *old_pll,
4683 const struct intel_shared_dpll *new_pll)
4684 {
4685 return old_pll && new_pll && old_pll != new_pll &&
4686 (old_pll->info->is_alt_port_dpll || new_pll->info->is_alt_port_dpll);
4687 }
4688
intel_shared_dpll_state_verify(struct intel_atomic_state * state,struct intel_crtc * crtc)4689 void intel_shared_dpll_state_verify(struct intel_atomic_state *state,
4690 struct intel_crtc *crtc)
4691 {
4692 struct intel_display *display = to_intel_display(state);
4693 const struct intel_crtc_state *old_crtc_state =
4694 intel_atomic_get_old_crtc_state(state, crtc);
4695 const struct intel_crtc_state *new_crtc_state =
4696 intel_atomic_get_new_crtc_state(state, crtc);
4697
4698 if (new_crtc_state->shared_dpll)
4699 verify_single_dpll_state(display, new_crtc_state->shared_dpll,
4700 crtc, new_crtc_state);
4701
4702 if (old_crtc_state->shared_dpll &&
4703 old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) {
4704 u8 pipe_mask = BIT(crtc->pipe);
4705 struct intel_shared_dpll *pll = old_crtc_state->shared_dpll;
4706
4707 INTEL_DISPLAY_STATE_WARN(display, pll->active_mask & pipe_mask,
4708 "%s: pll active mismatch (didn't expect pipe %c in active mask (0x%x))\n",
4709 pll->info->name, pipe_name(crtc->pipe), pll->active_mask);
4710
4711 /* TC ports have both MG/TC and TBT PLL referenced simultaneously */
4712 INTEL_DISPLAY_STATE_WARN(display, !has_alt_port_dpll(old_crtc_state->shared_dpll,
4713 new_crtc_state->shared_dpll) &&
4714 pll->state.pipe_mask & pipe_mask,
4715 "%s: pll enabled crtcs mismatch (found pipe %c in enabled mask (0x%x))\n",
4716 pll->info->name, pipe_name(crtc->pipe), pll->state.pipe_mask);
4717 }
4718 }
4719
intel_shared_dpll_verify_disabled(struct intel_atomic_state * state)4720 void intel_shared_dpll_verify_disabled(struct intel_atomic_state *state)
4721 {
4722 struct intel_display *display = to_intel_display(state);
4723 struct intel_shared_dpll *pll;
4724 int i;
4725
4726 for_each_shared_dpll(display, pll, i)
4727 verify_single_dpll_state(display, pll, NULL, NULL);
4728 }
4729