1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2015, The Linux Foundation. All rights reserved.
4 */
5
6 #include <dt-bindings/clock/qcom,dsi-phy-28nm.h>
7 #include <linux/clk.h>
8 #include <linux/clk-provider.h>
9
10 #include "dsi_phy.h"
11 #include "dsi.xml.h"
12 #include "dsi_phy_28nm.xml.h"
13
14 /*
15 * DSI PLL 28nm - clock diagram (eg: DSI0):
16 *
17 * dsi0analog_postdiv_clk
18 * | dsi0indirect_path_div2_clk
19 * | |
20 * +------+ | +----+ | |\ dsi0byte_mux
21 * dsi0vco_clk --o--| DIV1 |--o--| /2 |--o--| \ |
22 * | +------+ +----+ | m| | +----+
23 * | | u|--o--| /4 |-- dsi0pllbyte
24 * | | x| +----+
25 * o--------------------------| /
26 * | |/
27 * | +------+
28 * o----------| DIV3 |------------------------- dsi0pll
29 * +------+
30 */
31
32 #define POLL_MAX_READS 10
33 #define POLL_TIMEOUT_US 50
34
35 #define VCO_REF_CLK_RATE 19200000
36 #define VCO_MIN_RATE 350000000
37 #define VCO_MAX_RATE 750000000
38
39 /* v2.0.0 28nm LP implementation */
40 #define DSI_PHY_28NM_QUIRK_PHY_LP BIT(0)
41 #define DSI_PHY_28NM_QUIRK_PHY_8226 BIT(1)
42
43 #define LPFR_LUT_SIZE 10
44 struct lpfr_cfg {
45 unsigned long vco_rate;
46 u32 resistance;
47 };
48
49 /* Loop filter resistance: */
50 static const struct lpfr_cfg lpfr_lut[LPFR_LUT_SIZE] = {
51 { 479500000, 8 },
52 { 480000000, 11 },
53 { 575500000, 8 },
54 { 576000000, 12 },
55 { 610500000, 8 },
56 { 659500000, 9 },
57 { 671500000, 10 },
58 { 672000000, 14 },
59 { 708500000, 10 },
60 { 750000000, 11 },
61 };
62
63 struct pll_28nm_cached_state {
64 unsigned long vco_rate;
65 u8 postdiv3;
66 u8 postdiv1;
67 u8 byte_mux;
68 };
69
70 struct dsi_pll_28nm {
71 struct clk_hw clk_hw;
72
73 struct msm_dsi_phy *phy;
74
75 struct pll_28nm_cached_state cached_state;
76 };
77
78 #define to_pll_28nm(x) container_of(x, struct dsi_pll_28nm, clk_hw)
79
pll_28nm_poll_for_ready(struct dsi_pll_28nm * pll_28nm,u32 nb_tries,u32 timeout_us)80 static bool pll_28nm_poll_for_ready(struct dsi_pll_28nm *pll_28nm,
81 u32 nb_tries, u32 timeout_us)
82 {
83 bool pll_locked = false;
84 u32 val;
85
86 while (nb_tries--) {
87 val = readl(pll_28nm->phy->pll_base + REG_DSI_28nm_PHY_PLL_STATUS);
88 pll_locked = !!(val & DSI_28nm_PHY_PLL_STATUS_PLL_RDY);
89
90 if (pll_locked)
91 break;
92
93 udelay(timeout_us);
94 }
95 DBG("DSI PLL is %slocked", pll_locked ? "" : "*not* ");
96
97 return pll_locked;
98 }
99
pll_28nm_software_reset(struct dsi_pll_28nm * pll_28nm)100 static void pll_28nm_software_reset(struct dsi_pll_28nm *pll_28nm)
101 {
102 void __iomem *base = pll_28nm->phy->pll_base;
103
104 /*
105 * Add HW recommended delays after toggling the software
106 * reset bit off and back on.
107 */
108 writel(DSI_28nm_PHY_PLL_TEST_CFG_PLL_SW_RESET, base + REG_DSI_28nm_PHY_PLL_TEST_CFG);
109 udelay(1);
110 writel(0, base + REG_DSI_28nm_PHY_PLL_TEST_CFG);
111 udelay(1);
112 }
113
114 /*
115 * Clock Callbacks
116 */
dsi_pll_28nm_clk_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)117 static int dsi_pll_28nm_clk_set_rate(struct clk_hw *hw, unsigned long rate,
118 unsigned long parent_rate)
119 {
120 struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
121 struct device *dev = &pll_28nm->phy->pdev->dev;
122 void __iomem *base = pll_28nm->phy->pll_base;
123 unsigned long div_fbx1000, gen_vco_clk;
124 u32 refclk_cfg, frac_n_mode, frac_n_value;
125 u32 sdm_cfg0, sdm_cfg1, sdm_cfg2, sdm_cfg3;
126 u32 cal_cfg10, cal_cfg11;
127 u32 rem;
128 int i;
129
130 VERB("rate=%lu, parent's=%lu", rate, parent_rate);
131
132 /* Force postdiv2 to be div-4 */
133 writel(3, base + REG_DSI_28nm_PHY_PLL_POSTDIV2_CFG);
134
135 /* Configure the Loop filter resistance */
136 for (i = 0; i < LPFR_LUT_SIZE; i++)
137 if (rate <= lpfr_lut[i].vco_rate)
138 break;
139 if (i == LPFR_LUT_SIZE) {
140 DRM_DEV_ERROR(dev, "unable to get loop filter resistance. vco=%lu\n",
141 rate);
142 return -EINVAL;
143 }
144 writel(lpfr_lut[i].resistance, base + REG_DSI_28nm_PHY_PLL_LPFR_CFG);
145
146 /* Loop filter capacitance values : c1 and c2 */
147 writel(0x70, base + REG_DSI_28nm_PHY_PLL_LPFC1_CFG);
148 writel(0x15, base + REG_DSI_28nm_PHY_PLL_LPFC2_CFG);
149
150 rem = rate % VCO_REF_CLK_RATE;
151 if (rem) {
152 refclk_cfg = DSI_28nm_PHY_PLL_REFCLK_CFG_DBLR;
153 frac_n_mode = 1;
154 div_fbx1000 = rate / (VCO_REF_CLK_RATE / 500);
155 gen_vco_clk = div_fbx1000 * (VCO_REF_CLK_RATE / 500);
156 } else {
157 refclk_cfg = 0x0;
158 frac_n_mode = 0;
159 div_fbx1000 = rate / (VCO_REF_CLK_RATE / 1000);
160 gen_vco_clk = div_fbx1000 * (VCO_REF_CLK_RATE / 1000);
161 }
162
163 DBG("refclk_cfg = %d", refclk_cfg);
164
165 rem = div_fbx1000 % 1000;
166 frac_n_value = (rem << 16) / 1000;
167
168 DBG("div_fb = %lu", div_fbx1000);
169 DBG("frac_n_value = %d", frac_n_value);
170
171 DBG("Generated VCO Clock: %lu", gen_vco_clk);
172 rem = 0;
173 sdm_cfg1 = readl(base + REG_DSI_28nm_PHY_PLL_SDM_CFG1);
174 sdm_cfg1 &= ~DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET__MASK;
175 if (frac_n_mode) {
176 sdm_cfg0 = 0x0;
177 sdm_cfg0 |= DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV(0);
178 sdm_cfg1 |= DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET(
179 (u32)(((div_fbx1000 / 1000) & 0x3f) - 1));
180 sdm_cfg3 = frac_n_value >> 8;
181 sdm_cfg2 = frac_n_value & 0xff;
182 } else {
183 sdm_cfg0 = DSI_28nm_PHY_PLL_SDM_CFG0_BYP;
184 sdm_cfg0 |= DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV(
185 (u32)(((div_fbx1000 / 1000) & 0x3f) - 1));
186 sdm_cfg1 |= DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET(0);
187 sdm_cfg2 = 0;
188 sdm_cfg3 = 0;
189 }
190
191 DBG("sdm_cfg0=%d", sdm_cfg0);
192 DBG("sdm_cfg1=%d", sdm_cfg1);
193 DBG("sdm_cfg2=%d", sdm_cfg2);
194 DBG("sdm_cfg3=%d", sdm_cfg3);
195
196 cal_cfg11 = (u32)(gen_vco_clk / (256 * 1000000));
197 cal_cfg10 = (u32)((gen_vco_clk % (256 * 1000000)) / 1000000);
198 DBG("cal_cfg10=%d, cal_cfg11=%d", cal_cfg10, cal_cfg11);
199
200 writel(0x02, base + REG_DSI_28nm_PHY_PLL_CHGPUMP_CFG);
201 writel(0x2b, base + REG_DSI_28nm_PHY_PLL_CAL_CFG3);
202 writel(0x06, base + REG_DSI_28nm_PHY_PLL_CAL_CFG4);
203 writel(0x0d, base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2);
204
205 writel(sdm_cfg1, base + REG_DSI_28nm_PHY_PLL_SDM_CFG1);
206 writel(DSI_28nm_PHY_PLL_SDM_CFG2_FREQ_SEED_7_0(sdm_cfg2),
207 base + REG_DSI_28nm_PHY_PLL_SDM_CFG2);
208 writel(DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8(sdm_cfg3),
209 base + REG_DSI_28nm_PHY_PLL_SDM_CFG3);
210 writel(0, base + REG_DSI_28nm_PHY_PLL_SDM_CFG4);
211
212 /* Add hardware recommended delay for correct PLL configuration */
213 if (pll_28nm->phy->cfg->quirks & DSI_PHY_28NM_QUIRK_PHY_LP)
214 udelay(1000);
215 else
216 udelay(1);
217
218 writel(refclk_cfg, base + REG_DSI_28nm_PHY_PLL_REFCLK_CFG);
219 writel(0x00, base + REG_DSI_28nm_PHY_PLL_PWRGEN_CFG);
220 writel(0x31, base + REG_DSI_28nm_PHY_PLL_VCOLPF_CFG);
221 writel(sdm_cfg0, base + REG_DSI_28nm_PHY_PLL_SDM_CFG0);
222 writel(0x12, base + REG_DSI_28nm_PHY_PLL_CAL_CFG0);
223 writel(0x30, base + REG_DSI_28nm_PHY_PLL_CAL_CFG6);
224 writel(0x00, base + REG_DSI_28nm_PHY_PLL_CAL_CFG7);
225 writel(0x60, base + REG_DSI_28nm_PHY_PLL_CAL_CFG8);
226 writel(0x00, base + REG_DSI_28nm_PHY_PLL_CAL_CFG9);
227 writel(cal_cfg10 & 0xff, base + REG_DSI_28nm_PHY_PLL_CAL_CFG10);
228 writel(cal_cfg11 & 0xff, base + REG_DSI_28nm_PHY_PLL_CAL_CFG11);
229 writel(0x20, base + REG_DSI_28nm_PHY_PLL_EFUSE_CFG);
230
231 return 0;
232 }
233
dsi_pll_28nm_clk_is_enabled(struct clk_hw * hw)234 static int dsi_pll_28nm_clk_is_enabled(struct clk_hw *hw)
235 {
236 struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
237
238 return pll_28nm_poll_for_ready(pll_28nm, POLL_MAX_READS,
239 POLL_TIMEOUT_US);
240 }
241
dsi_pll_28nm_clk_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)242 static unsigned long dsi_pll_28nm_clk_recalc_rate(struct clk_hw *hw,
243 unsigned long parent_rate)
244 {
245 struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
246 void __iomem *base = pll_28nm->phy->pll_base;
247 u32 sdm0, doubler, sdm_byp_div;
248 u32 sdm_dc_off, sdm_freq_seed, sdm2, sdm3;
249 u32 ref_clk = VCO_REF_CLK_RATE;
250 unsigned long vco_rate;
251
252 VERB("parent_rate=%lu", parent_rate);
253
254 /* Check to see if the ref clk doubler is enabled */
255 doubler = readl(base + REG_DSI_28nm_PHY_PLL_REFCLK_CFG) &
256 DSI_28nm_PHY_PLL_REFCLK_CFG_DBLR;
257 ref_clk += (doubler * VCO_REF_CLK_RATE);
258
259 /* see if it is integer mode or sdm mode */
260 sdm0 = readl(base + REG_DSI_28nm_PHY_PLL_SDM_CFG0);
261 if (sdm0 & DSI_28nm_PHY_PLL_SDM_CFG0_BYP) {
262 /* integer mode */
263 sdm_byp_div = FIELD(
264 readl(base + REG_DSI_28nm_PHY_PLL_SDM_CFG0),
265 DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV) + 1;
266 vco_rate = ref_clk * sdm_byp_div;
267 } else {
268 /* sdm mode */
269 sdm_dc_off = FIELD(
270 readl(base + REG_DSI_28nm_PHY_PLL_SDM_CFG1),
271 DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET);
272 DBG("sdm_dc_off = %d", sdm_dc_off);
273 sdm2 = FIELD(readl(base + REG_DSI_28nm_PHY_PLL_SDM_CFG2),
274 DSI_28nm_PHY_PLL_SDM_CFG2_FREQ_SEED_7_0);
275 sdm3 = FIELD(readl(base + REG_DSI_28nm_PHY_PLL_SDM_CFG3),
276 DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8);
277 sdm_freq_seed = (sdm3 << 8) | sdm2;
278 DBG("sdm_freq_seed = %d", sdm_freq_seed);
279
280 vco_rate = (ref_clk * (sdm_dc_off + 1)) +
281 mult_frac(ref_clk, sdm_freq_seed, BIT(16));
282 DBG("vco rate = %lu", vco_rate);
283 }
284
285 DBG("returning vco rate = %lu", vco_rate);
286
287 return vco_rate;
288 }
289
_dsi_pll_28nm_vco_prepare_hpm(struct dsi_pll_28nm * pll_28nm)290 static int _dsi_pll_28nm_vco_prepare_hpm(struct dsi_pll_28nm *pll_28nm)
291 {
292 struct device *dev = &pll_28nm->phy->pdev->dev;
293 void __iomem *base = pll_28nm->phy->pll_base;
294 u32 max_reads = 5, timeout_us = 100;
295 bool locked;
296 u32 val;
297 int i;
298
299 DBG("id=%d", pll_28nm->phy->id);
300
301 pll_28nm_software_reset(pll_28nm);
302
303 /*
304 * PLL power up sequence.
305 * Add necessary delays recommended by hardware.
306 */
307 val = DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B;
308 writel(val, base + REG_DSI_28nm_PHY_PLL_GLB_CFG);
309 udelay(1);
310
311 val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B;
312 writel(val, base + REG_DSI_28nm_PHY_PLL_GLB_CFG);
313 udelay(200);
314
315 val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;
316 writel(val, base + REG_DSI_28nm_PHY_PLL_GLB_CFG);
317 udelay(500);
318
319 val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE;
320 writel(val, base + REG_DSI_28nm_PHY_PLL_GLB_CFG);
321 udelay(600);
322
323 for (i = 0; i < 2; i++) {
324 /* DSI Uniphy lock detect setting */
325 writel(0x0c, base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2);
326 udelay(100);
327 writel(0x0d, base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2);
328
329 /* poll for PLL ready status */
330 locked = pll_28nm_poll_for_ready(pll_28nm, max_reads,
331 timeout_us);
332 if (locked)
333 break;
334
335 pll_28nm_software_reset(pll_28nm);
336
337 /*
338 * PLL power up sequence.
339 * Add necessary delays recommended by hardware.
340 */
341 val = DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B;
342 writel(val, base + REG_DSI_28nm_PHY_PLL_GLB_CFG);
343 udelay(1);
344
345 val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B;
346 writel(val, base + REG_DSI_28nm_PHY_PLL_GLB_CFG);
347 udelay(200);
348
349 val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;
350 writel(val, base + REG_DSI_28nm_PHY_PLL_GLB_CFG);
351 udelay(250);
352
353 val &= ~DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;
354 writel(val, base + REG_DSI_28nm_PHY_PLL_GLB_CFG);
355 udelay(200);
356
357 val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;
358 writel(val, base + REG_DSI_28nm_PHY_PLL_GLB_CFG);
359 udelay(500);
360
361 val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE;
362 writel(val, base + REG_DSI_28nm_PHY_PLL_GLB_CFG);
363 udelay(600);
364 }
365
366 if (unlikely(!locked))
367 DRM_DEV_ERROR(dev, "DSI PLL lock failed\n");
368 else
369 DBG("DSI PLL Lock success");
370
371 return locked ? 0 : -EINVAL;
372 }
373
dsi_pll_28nm_vco_prepare_hpm(struct clk_hw * hw)374 static int dsi_pll_28nm_vco_prepare_hpm(struct clk_hw *hw)
375 {
376 struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
377 int i, ret;
378
379 if (unlikely(pll_28nm->phy->pll_on))
380 return 0;
381
382 for (i = 0; i < 3; i++) {
383 ret = _dsi_pll_28nm_vco_prepare_hpm(pll_28nm);
384 if (!ret) {
385 pll_28nm->phy->pll_on = true;
386 return 0;
387 }
388 }
389
390 return ret;
391 }
392
dsi_pll_28nm_vco_prepare_8226(struct clk_hw * hw)393 static int dsi_pll_28nm_vco_prepare_8226(struct clk_hw *hw)
394 {
395 struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
396 struct device *dev = &pll_28nm->phy->pdev->dev;
397 void __iomem *base = pll_28nm->phy->pll_base;
398 u32 max_reads = 5, timeout_us = 100;
399 bool locked;
400 u32 val;
401 int i;
402
403 DBG("id=%d", pll_28nm->phy->id);
404
405 pll_28nm_software_reset(pll_28nm);
406
407 /*
408 * PLL power up sequence.
409 * Add necessary delays recommended by hardware.
410 */
411 writel(0x34, base + REG_DSI_28nm_PHY_PLL_CAL_CFG1);
412
413 val = DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B;
414 writel(val, base + REG_DSI_28nm_PHY_PLL_GLB_CFG);
415 udelay(200);
416
417 val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B;
418 writel(val, base + REG_DSI_28nm_PHY_PLL_GLB_CFG);
419 udelay(200);
420
421 val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;
422 val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE;
423 writel(val, base + REG_DSI_28nm_PHY_PLL_GLB_CFG);
424 udelay(600);
425
426 for (i = 0; i < 7; i++) {
427 /* DSI Uniphy lock detect setting */
428 writel(0x0d, base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2);
429 writel(0x0c, base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2);
430 udelay(100);
431 writel(0x0d, base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2);
432
433 /* poll for PLL ready status */
434 locked = pll_28nm_poll_for_ready(pll_28nm,
435 max_reads, timeout_us);
436 if (locked)
437 break;
438
439 pll_28nm_software_reset(pll_28nm);
440
441 /*
442 * PLL power up sequence.
443 * Add necessary delays recommended by hardware.
444 */
445 writel(0x00, base + REG_DSI_28nm_PHY_PLL_PWRGEN_CFG);
446 udelay(50);
447
448 val = DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B;
449 val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B;
450 writel(val, base + REG_DSI_28nm_PHY_PLL_GLB_CFG);
451 udelay(100);
452
453 val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;
454 val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE;
455 writel(val, base + REG_DSI_28nm_PHY_PLL_GLB_CFG);
456 udelay(600);
457 }
458
459 if (unlikely(!locked))
460 DRM_DEV_ERROR(dev, "DSI PLL lock failed\n");
461 else
462 DBG("DSI PLL Lock success");
463
464 return locked ? 0 : -EINVAL;
465 }
466
dsi_pll_28nm_vco_prepare_lp(struct clk_hw * hw)467 static int dsi_pll_28nm_vco_prepare_lp(struct clk_hw *hw)
468 {
469 struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
470 struct device *dev = &pll_28nm->phy->pdev->dev;
471 void __iomem *base = pll_28nm->phy->pll_base;
472 bool locked;
473 u32 max_reads = 10, timeout_us = 50;
474 u32 val;
475
476 DBG("id=%d", pll_28nm->phy->id);
477
478 if (unlikely(pll_28nm->phy->pll_on))
479 return 0;
480
481 pll_28nm_software_reset(pll_28nm);
482
483 /*
484 * PLL power up sequence.
485 * Add necessary delays recommended by hardware.
486 */
487 writel(0x34, base + REG_DSI_28nm_PHY_PLL_CAL_CFG1);
488 ndelay(500);
489
490 val = DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B;
491 writel(val, base + REG_DSI_28nm_PHY_PLL_GLB_CFG);
492 ndelay(500);
493
494 val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B;
495 writel(val, base + REG_DSI_28nm_PHY_PLL_GLB_CFG);
496 ndelay(500);
497
498 val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B |
499 DSI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE;
500 writel(val, base + REG_DSI_28nm_PHY_PLL_GLB_CFG);
501 ndelay(500);
502
503 /* DSI PLL toggle lock detect setting */
504 writel(0x04, base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2);
505 ndelay(500);
506 writel(0x05, base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2);
507 udelay(512);
508
509 locked = pll_28nm_poll_for_ready(pll_28nm, max_reads, timeout_us);
510
511 if (unlikely(!locked)) {
512 DRM_DEV_ERROR(dev, "DSI PLL lock failed\n");
513 return -EINVAL;
514 }
515
516 DBG("DSI PLL lock success");
517 pll_28nm->phy->pll_on = true;
518
519 return 0;
520 }
521
dsi_pll_28nm_vco_unprepare(struct clk_hw * hw)522 static void dsi_pll_28nm_vco_unprepare(struct clk_hw *hw)
523 {
524 struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
525
526 DBG("id=%d", pll_28nm->phy->id);
527
528 if (unlikely(!pll_28nm->phy->pll_on))
529 return;
530
531 writel(0, pll_28nm->phy->pll_base + REG_DSI_28nm_PHY_PLL_GLB_CFG);
532
533 pll_28nm->phy->pll_on = false;
534 }
535
dsi_pll_28nm_clk_round_rate(struct clk_hw * hw,unsigned long rate,unsigned long * parent_rate)536 static long dsi_pll_28nm_clk_round_rate(struct clk_hw *hw,
537 unsigned long rate, unsigned long *parent_rate)
538 {
539 struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
540
541 if (rate < pll_28nm->phy->cfg->min_pll_rate)
542 return pll_28nm->phy->cfg->min_pll_rate;
543 else if (rate > pll_28nm->phy->cfg->max_pll_rate)
544 return pll_28nm->phy->cfg->max_pll_rate;
545 else
546 return rate;
547 }
548
549 static const struct clk_ops clk_ops_dsi_pll_28nm_vco_hpm = {
550 .round_rate = dsi_pll_28nm_clk_round_rate,
551 .set_rate = dsi_pll_28nm_clk_set_rate,
552 .recalc_rate = dsi_pll_28nm_clk_recalc_rate,
553 .prepare = dsi_pll_28nm_vco_prepare_hpm,
554 .unprepare = dsi_pll_28nm_vco_unprepare,
555 .is_enabled = dsi_pll_28nm_clk_is_enabled,
556 };
557
558 static const struct clk_ops clk_ops_dsi_pll_28nm_vco_lp = {
559 .round_rate = dsi_pll_28nm_clk_round_rate,
560 .set_rate = dsi_pll_28nm_clk_set_rate,
561 .recalc_rate = dsi_pll_28nm_clk_recalc_rate,
562 .prepare = dsi_pll_28nm_vco_prepare_lp,
563 .unprepare = dsi_pll_28nm_vco_unprepare,
564 .is_enabled = dsi_pll_28nm_clk_is_enabled,
565 };
566
567 static const struct clk_ops clk_ops_dsi_pll_28nm_vco_8226 = {
568 .round_rate = dsi_pll_28nm_clk_round_rate,
569 .set_rate = dsi_pll_28nm_clk_set_rate,
570 .recalc_rate = dsi_pll_28nm_clk_recalc_rate,
571 .prepare = dsi_pll_28nm_vco_prepare_8226,
572 .unprepare = dsi_pll_28nm_vco_unprepare,
573 .is_enabled = dsi_pll_28nm_clk_is_enabled,
574 };
575
576 /*
577 * PLL Callbacks
578 */
579
dsi_28nm_pll_save_state(struct msm_dsi_phy * phy)580 static void dsi_28nm_pll_save_state(struct msm_dsi_phy *phy)
581 {
582 struct dsi_pll_28nm *pll_28nm = to_pll_28nm(phy->vco_hw);
583 struct pll_28nm_cached_state *cached_state = &pll_28nm->cached_state;
584 void __iomem *base = pll_28nm->phy->pll_base;
585
586 cached_state->postdiv3 =
587 readl(base + REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG);
588 cached_state->postdiv1 =
589 readl(base + REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG);
590 cached_state->byte_mux = readl(base + REG_DSI_28nm_PHY_PLL_VREG_CFG);
591 if (dsi_pll_28nm_clk_is_enabled(phy->vco_hw))
592 cached_state->vco_rate = clk_hw_get_rate(phy->vco_hw);
593 else
594 cached_state->vco_rate = 0;
595 }
596
dsi_28nm_pll_restore_state(struct msm_dsi_phy * phy)597 static int dsi_28nm_pll_restore_state(struct msm_dsi_phy *phy)
598 {
599 struct dsi_pll_28nm *pll_28nm = to_pll_28nm(phy->vco_hw);
600 struct pll_28nm_cached_state *cached_state = &pll_28nm->cached_state;
601 void __iomem *base = pll_28nm->phy->pll_base;
602 int ret;
603
604 ret = dsi_pll_28nm_clk_set_rate(phy->vco_hw,
605 cached_state->vco_rate, 0);
606 if (ret) {
607 DRM_DEV_ERROR(&pll_28nm->phy->pdev->dev,
608 "restore vco rate failed. ret=%d\n", ret);
609 return ret;
610 }
611
612 writel(cached_state->postdiv3, base + REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG);
613 writel(cached_state->postdiv1, base + REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG);
614 writel(cached_state->byte_mux, base + REG_DSI_28nm_PHY_PLL_VREG_CFG);
615
616 return 0;
617 }
618
pll_28nm_register(struct dsi_pll_28nm * pll_28nm,struct clk_hw ** provided_clocks)619 static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm, struct clk_hw **provided_clocks)
620 {
621 char clk_name[32];
622 struct clk_init_data vco_init = {
623 .parent_data = &(const struct clk_parent_data) {
624 .fw_name = "ref", .name = "xo",
625 },
626 .num_parents = 1,
627 .name = clk_name,
628 .flags = CLK_IGNORE_UNUSED,
629 };
630 struct device *dev = &pll_28nm->phy->pdev->dev;
631 struct clk_hw *hw, *analog_postdiv, *indirect_path_div2, *byte_mux;
632 int ret;
633
634 DBG("%d", pll_28nm->phy->id);
635
636 if (pll_28nm->phy->cfg->quirks & DSI_PHY_28NM_QUIRK_PHY_LP)
637 vco_init.ops = &clk_ops_dsi_pll_28nm_vco_lp;
638 else if (pll_28nm->phy->cfg->quirks & DSI_PHY_28NM_QUIRK_PHY_8226)
639 vco_init.ops = &clk_ops_dsi_pll_28nm_vco_8226;
640 else
641 vco_init.ops = &clk_ops_dsi_pll_28nm_vco_hpm;
642
643 snprintf(clk_name, sizeof(clk_name), "dsi%dvco_clk", pll_28nm->phy->id);
644 pll_28nm->clk_hw.init = &vco_init;
645 ret = devm_clk_hw_register(dev, &pll_28nm->clk_hw);
646 if (ret)
647 return ret;
648
649 snprintf(clk_name, sizeof(clk_name), "dsi%danalog_postdiv_clk", pll_28nm->phy->id);
650 analog_postdiv = devm_clk_hw_register_divider_parent_hw(dev, clk_name,
651 &pll_28nm->clk_hw, CLK_SET_RATE_PARENT,
652 pll_28nm->phy->pll_base +
653 REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG,
654 0, 4, 0, NULL);
655 if (IS_ERR(analog_postdiv))
656 return PTR_ERR(analog_postdiv);
657
658 snprintf(clk_name, sizeof(clk_name), "dsi%dindirect_path_div2_clk", pll_28nm->phy->id);
659 indirect_path_div2 = devm_clk_hw_register_fixed_factor_parent_hw(dev,
660 clk_name, analog_postdiv, CLK_SET_RATE_PARENT, 1, 2);
661 if (IS_ERR(indirect_path_div2))
662 return PTR_ERR(indirect_path_div2);
663
664 snprintf(clk_name, sizeof(clk_name), "dsi%dpll", pll_28nm->phy->id);
665 hw = devm_clk_hw_register_divider_parent_hw(dev, clk_name,
666 &pll_28nm->clk_hw, 0, pll_28nm->phy->pll_base +
667 REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG,
668 0, 8, 0, NULL);
669 if (IS_ERR(hw))
670 return PTR_ERR(hw);
671 provided_clocks[DSI_PIXEL_PLL_CLK] = hw;
672
673 snprintf(clk_name, sizeof(clk_name), "dsi%dbyte_mux", pll_28nm->phy->id);
674 byte_mux = devm_clk_hw_register_mux_parent_hws(dev, clk_name,
675 ((const struct clk_hw *[]){
676 &pll_28nm->clk_hw,
677 indirect_path_div2,
678 }), 2, CLK_SET_RATE_PARENT, pll_28nm->phy->pll_base +
679 REG_DSI_28nm_PHY_PLL_VREG_CFG, 1, 1, 0, NULL);
680 if (IS_ERR(byte_mux))
681 return PTR_ERR(byte_mux);
682
683 snprintf(clk_name, sizeof(clk_name), "dsi%dpllbyte", pll_28nm->phy->id);
684 hw = devm_clk_hw_register_fixed_factor_parent_hw(dev, clk_name,
685 byte_mux, CLK_SET_RATE_PARENT, 1, 4);
686 if (IS_ERR(hw))
687 return PTR_ERR(hw);
688 provided_clocks[DSI_BYTE_PLL_CLK] = hw;
689
690 return 0;
691 }
692
dsi_pll_28nm_init(struct msm_dsi_phy * phy)693 static int dsi_pll_28nm_init(struct msm_dsi_phy *phy)
694 {
695 struct platform_device *pdev = phy->pdev;
696 struct dsi_pll_28nm *pll_28nm;
697 int ret;
698
699 if (!pdev)
700 return -ENODEV;
701
702 pll_28nm = devm_kzalloc(&pdev->dev, sizeof(*pll_28nm), GFP_KERNEL);
703 if (!pll_28nm)
704 return -ENOMEM;
705
706 pll_28nm->phy = phy;
707
708 ret = pll_28nm_register(pll_28nm, phy->provided_clocks->hws);
709 if (ret) {
710 DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret);
711 return ret;
712 }
713
714 phy->vco_hw = &pll_28nm->clk_hw;
715
716 return 0;
717 }
718
dsi_28nm_dphy_set_timing(struct msm_dsi_phy * phy,struct msm_dsi_dphy_timing * timing)719 static void dsi_28nm_dphy_set_timing(struct msm_dsi_phy *phy,
720 struct msm_dsi_dphy_timing *timing)
721 {
722 void __iomem *base = phy->base;
723
724 writel(DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO(timing->clk_zero),
725 base + REG_DSI_28nm_PHY_TIMING_CTRL_0);
726 writel(DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL(timing->clk_trail),
727 base + REG_DSI_28nm_PHY_TIMING_CTRL_1);
728 writel(DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE(timing->clk_prepare),
729 base + REG_DSI_28nm_PHY_TIMING_CTRL_2);
730 if (timing->clk_zero & BIT(8))
731 writel(DSI_28nm_PHY_TIMING_CTRL_3_CLK_ZERO_8,
732 base + REG_DSI_28nm_PHY_TIMING_CTRL_3);
733 writel(DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT(timing->hs_exit),
734 base + REG_DSI_28nm_PHY_TIMING_CTRL_4);
735 writel(DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO(timing->hs_zero),
736 base + REG_DSI_28nm_PHY_TIMING_CTRL_5);
737 writel(DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE(timing->hs_prepare),
738 base + REG_DSI_28nm_PHY_TIMING_CTRL_6);
739 writel(DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL(timing->hs_trail),
740 base + REG_DSI_28nm_PHY_TIMING_CTRL_7);
741 writel(DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST(timing->hs_rqst),
742 base + REG_DSI_28nm_PHY_TIMING_CTRL_8);
743 writel(DSI_28nm_PHY_TIMING_CTRL_9_TA_GO(timing->ta_go) |
744 DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE(timing->ta_sure),
745 base + REG_DSI_28nm_PHY_TIMING_CTRL_9);
746 writel(DSI_28nm_PHY_TIMING_CTRL_10_TA_GET(timing->ta_get),
747 base + REG_DSI_28nm_PHY_TIMING_CTRL_10);
748 writel(DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD(0),
749 base + REG_DSI_28nm_PHY_TIMING_CTRL_11);
750 }
751
dsi_28nm_phy_regulator_enable_dcdc(struct msm_dsi_phy * phy)752 static void dsi_28nm_phy_regulator_enable_dcdc(struct msm_dsi_phy *phy)
753 {
754 void __iomem *base = phy->reg_base;
755
756 writel(0x0, base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0);
757 writel(1, base + REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG);
758 writel(0, base + REG_DSI_28nm_PHY_REGULATOR_CTRL_5);
759 writel(0, base + REG_DSI_28nm_PHY_REGULATOR_CTRL_3);
760 writel(0x3, base + REG_DSI_28nm_PHY_REGULATOR_CTRL_2);
761 writel(0x9, base + REG_DSI_28nm_PHY_REGULATOR_CTRL_1);
762 writel(0x7, base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0);
763 writel(0x20, base + REG_DSI_28nm_PHY_REGULATOR_CTRL_4);
764 writel(0x00, phy->base + REG_DSI_28nm_PHY_LDO_CNTRL);
765 }
766
dsi_28nm_phy_regulator_enable_ldo(struct msm_dsi_phy * phy)767 static void dsi_28nm_phy_regulator_enable_ldo(struct msm_dsi_phy *phy)
768 {
769 void __iomem *base = phy->reg_base;
770
771 writel(0x0, base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0);
772 writel(0, base + REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG);
773 writel(0x7, base + REG_DSI_28nm_PHY_REGULATOR_CTRL_5);
774 writel(0, base + REG_DSI_28nm_PHY_REGULATOR_CTRL_3);
775 writel(0x1, base + REG_DSI_28nm_PHY_REGULATOR_CTRL_2);
776 writel(0x1, base + REG_DSI_28nm_PHY_REGULATOR_CTRL_1);
777 writel(0x20, base + REG_DSI_28nm_PHY_REGULATOR_CTRL_4);
778
779 if (phy->cfg->quirks & DSI_PHY_28NM_QUIRK_PHY_LP)
780 writel(0x05, phy->base + REG_DSI_28nm_PHY_LDO_CNTRL);
781 else
782 writel(0x0d, phy->base + REG_DSI_28nm_PHY_LDO_CNTRL);
783 }
784
dsi_28nm_phy_regulator_ctrl(struct msm_dsi_phy * phy,bool enable)785 static void dsi_28nm_phy_regulator_ctrl(struct msm_dsi_phy *phy, bool enable)
786 {
787 if (!enable) {
788 writel(0, phy->reg_base + REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG);
789 return;
790 }
791
792 if (phy->regulator_ldo_mode)
793 dsi_28nm_phy_regulator_enable_ldo(phy);
794 else
795 dsi_28nm_phy_regulator_enable_dcdc(phy);
796 }
797
dsi_28nm_phy_enable(struct msm_dsi_phy * phy,struct msm_dsi_phy_clk_request * clk_req)798 static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy,
799 struct msm_dsi_phy_clk_request *clk_req)
800 {
801 struct msm_dsi_dphy_timing *timing = &phy->timing;
802 int i;
803 void __iomem *base = phy->base;
804 u32 val;
805
806 DBG("");
807
808 if (msm_dsi_dphy_timing_calc(timing, clk_req)) {
809 DRM_DEV_ERROR(&phy->pdev->dev,
810 "%s: D-PHY timing calculation failed\n",
811 __func__);
812 return -EINVAL;
813 }
814
815 writel(0xff, base + REG_DSI_28nm_PHY_STRENGTH_0);
816
817 dsi_28nm_phy_regulator_ctrl(phy, true);
818
819 dsi_28nm_dphy_set_timing(phy, timing);
820
821 writel(0x00, base + REG_DSI_28nm_PHY_CTRL_1);
822 writel(0x5f, base + REG_DSI_28nm_PHY_CTRL_0);
823
824 writel(0x6, base + REG_DSI_28nm_PHY_STRENGTH_1);
825
826 for (i = 0; i < 4; i++) {
827 writel(0, base + REG_DSI_28nm_PHY_LN_CFG_0(i));
828 writel(0, base + REG_DSI_28nm_PHY_LN_CFG_1(i));
829 writel(0, base + REG_DSI_28nm_PHY_LN_CFG_2(i));
830 writel(0, base + REG_DSI_28nm_PHY_LN_CFG_3(i));
831 writel(0, base + REG_DSI_28nm_PHY_LN_CFG_4(i));
832 writel(0, base + REG_DSI_28nm_PHY_LN_TEST_DATAPATH(i));
833 writel(0, base + REG_DSI_28nm_PHY_LN_DEBUG_SEL(i));
834 writel(0x1, base + REG_DSI_28nm_PHY_LN_TEST_STR_0(i));
835 writel(0x97, base + REG_DSI_28nm_PHY_LN_TEST_STR_1(i));
836 }
837
838 writel(0, base + REG_DSI_28nm_PHY_LNCK_CFG_4);
839 writel(0xc0, base + REG_DSI_28nm_PHY_LNCK_CFG_1);
840 writel(0x1, base + REG_DSI_28nm_PHY_LNCK_TEST_STR0);
841 writel(0xbb, base + REG_DSI_28nm_PHY_LNCK_TEST_STR1);
842
843 writel(0x5f, base + REG_DSI_28nm_PHY_CTRL_0);
844
845 val = readl(base + REG_DSI_28nm_PHY_GLBL_TEST_CTRL);
846 if (phy->id == DSI_1 && phy->usecase == MSM_DSI_PHY_SLAVE)
847 val &= ~DSI_28nm_PHY_GLBL_TEST_CTRL_BITCLK_HS_SEL;
848 else
849 val |= DSI_28nm_PHY_GLBL_TEST_CTRL_BITCLK_HS_SEL;
850 writel(val, base + REG_DSI_28nm_PHY_GLBL_TEST_CTRL);
851
852 return 0;
853 }
854
dsi_28nm_phy_disable(struct msm_dsi_phy * phy)855 static void dsi_28nm_phy_disable(struct msm_dsi_phy *phy)
856 {
857 writel(0, phy->base + REG_DSI_28nm_PHY_CTRL_0);
858 dsi_28nm_phy_regulator_ctrl(phy, false);
859
860 /*
861 * Wait for the registers writes to complete in order to
862 * ensure that the phy is completely disabled
863 */
864 wmb();
865 }
866
867 static const struct regulator_bulk_data dsi_phy_28nm_regulators[] = {
868 { .supply = "vddio", .init_load_uA = 100000 },
869 };
870
871 const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs = {
872 .has_phy_regulator = true,
873 .regulator_data = dsi_phy_28nm_regulators,
874 .num_regulators = ARRAY_SIZE(dsi_phy_28nm_regulators),
875 .ops = {
876 .enable = dsi_28nm_phy_enable,
877 .disable = dsi_28nm_phy_disable,
878 .pll_init = dsi_pll_28nm_init,
879 .save_pll_state = dsi_28nm_pll_save_state,
880 .restore_pll_state = dsi_28nm_pll_restore_state,
881 },
882 .min_pll_rate = VCO_MIN_RATE,
883 .max_pll_rate = VCO_MAX_RATE,
884 .io_start = { 0xfd922b00, 0xfd923100 },
885 .num_dsi_phy = 2,
886 };
887
888 const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_famb_cfgs = {
889 .has_phy_regulator = true,
890 .regulator_data = dsi_phy_28nm_regulators,
891 .num_regulators = ARRAY_SIZE(dsi_phy_28nm_regulators),
892 .ops = {
893 .enable = dsi_28nm_phy_enable,
894 .disable = dsi_28nm_phy_disable,
895 .pll_init = dsi_pll_28nm_init,
896 .save_pll_state = dsi_28nm_pll_save_state,
897 .restore_pll_state = dsi_28nm_pll_restore_state,
898 },
899 .min_pll_rate = VCO_MIN_RATE,
900 .max_pll_rate = VCO_MAX_RATE,
901 .io_start = { 0x1a94400, 0x1a96400 },
902 .num_dsi_phy = 2,
903 };
904
905 const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs = {
906 .has_phy_regulator = true,
907 .regulator_data = dsi_phy_28nm_regulators,
908 .num_regulators = ARRAY_SIZE(dsi_phy_28nm_regulators),
909 .ops = {
910 .enable = dsi_28nm_phy_enable,
911 .disable = dsi_28nm_phy_disable,
912 .pll_init = dsi_pll_28nm_init,
913 .save_pll_state = dsi_28nm_pll_save_state,
914 .restore_pll_state = dsi_28nm_pll_restore_state,
915 },
916 .min_pll_rate = VCO_MIN_RATE,
917 .max_pll_rate = VCO_MAX_RATE,
918 .io_start = { 0x1a98500 },
919 .num_dsi_phy = 1,
920 .quirks = DSI_PHY_28NM_QUIRK_PHY_LP,
921 };
922
923 const struct msm_dsi_phy_cfg dsi_phy_28nm_8226_cfgs = {
924 .has_phy_regulator = true,
925 .regulator_data = dsi_phy_28nm_regulators,
926 .num_regulators = ARRAY_SIZE(dsi_phy_28nm_regulators),
927 .ops = {
928 .enable = dsi_28nm_phy_enable,
929 .disable = dsi_28nm_phy_disable,
930 .pll_init = dsi_pll_28nm_init,
931 .save_pll_state = dsi_28nm_pll_save_state,
932 .restore_pll_state = dsi_28nm_pll_restore_state,
933 },
934 .min_pll_rate = VCO_MIN_RATE,
935 .max_pll_rate = VCO_MAX_RATE,
936 .io_start = { 0xfd922b00 },
937 .num_dsi_phy = 1,
938 .quirks = DSI_PHY_28NM_QUIRK_PHY_8226,
939 };
940
941 const struct msm_dsi_phy_cfg dsi_phy_28nm_8937_cfgs = {
942 .has_phy_regulator = true,
943 .regulator_data = dsi_phy_28nm_regulators,
944 .num_regulators = ARRAY_SIZE(dsi_phy_28nm_regulators),
945 .ops = {
946 .enable = dsi_28nm_phy_enable,
947 .disable = dsi_28nm_phy_disable,
948 .pll_init = dsi_pll_28nm_init,
949 .save_pll_state = dsi_28nm_pll_save_state,
950 .restore_pll_state = dsi_28nm_pll_restore_state,
951 },
952 .min_pll_rate = VCO_MIN_RATE,
953 .max_pll_rate = VCO_MAX_RATE,
954 .io_start = { 0x1a94400, 0x1a96400 },
955 .num_dsi_phy = 2,
956 .quirks = DSI_PHY_28NM_QUIRK_PHY_LP,
957 };
958