1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Purna Chandra Mandal,<purna.mandal@microchip.com>
4 * Copyright (C) 2015 Microchip Technology Inc. All rights reserved.
5 */
6 #include <linux/clk-provider.h>
7 #include <linux/delay.h>
8 #include <linux/device.h>
9 #include <linux/interrupt.h>
10 #include <linux/io.h>
11 #include <linux/iopoll.h>
12 #include <linux/platform_data/pic32.h>
13
14 #include "clk-core.h"
15
16 /* OSCCON Reg fields */
17 #define OSC_CUR_MASK 0x07
18 #define OSC_CUR_SHIFT 12
19 #define OSC_NEW_MASK 0x07
20 #define OSC_NEW_SHIFT 8
21 #define OSC_SWEN BIT(0)
22
23 /* SPLLCON Reg fields */
24 #define PLL_RANGE_MASK 0x07
25 #define PLL_RANGE_SHIFT 0
26 #define PLL_ICLK_MASK 0x01
27 #define PLL_ICLK_SHIFT 7
28 #define PLL_IDIV_MASK 0x07
29 #define PLL_IDIV_SHIFT 8
30 #define PLL_ODIV_MASK 0x07
31 #define PLL_ODIV_SHIFT 24
32 #define PLL_MULT_MASK 0x7F
33 #define PLL_MULT_SHIFT 16
34 #define PLL_MULT_MAX 128
35 #define PLL_ODIV_MIN 1
36 #define PLL_ODIV_MAX 5
37
38 /* Peripheral Bus Clock Reg Fields */
39 #define PB_DIV_MASK 0x7f
40 #define PB_DIV_SHIFT 0
41 #define PB_DIV_READY BIT(11)
42 #define PB_DIV_ENABLE BIT(15)
43 #define PB_DIV_MAX 128
44 #define PB_DIV_MIN 0
45
46 /* Reference Oscillator Control Reg fields */
47 #define REFO_SEL_MASK 0x0f
48 #define REFO_SEL_SHIFT 0
49 #define REFO_ACTIVE BIT(8)
50 #define REFO_DIVSW_EN BIT(9)
51 #define REFO_OE BIT(12)
52 #define REFO_ON BIT(15)
53 #define REFO_DIV_SHIFT 16
54 #define REFO_DIV_MASK 0x7fff
55
56 /* Reference Oscillator Trim Register Fields */
57 #define REFO_TRIM_REG 0x10
58 #define REFO_TRIM_MASK 0x1ff
59 #define REFO_TRIM_SHIFT 23
60 #define REFO_TRIM_MAX 511
61
62 /* Mux Slew Control Register fields */
63 #define SLEW_BUSY BIT(0)
64 #define SLEW_DOWNEN BIT(1)
65 #define SLEW_UPEN BIT(2)
66 #define SLEW_DIV 0x07
67 #define SLEW_DIV_SHIFT 8
68 #define SLEW_SYSDIV 0x0f
69 #define SLEW_SYSDIV_SHIFT 20
70
71 /* Clock Poll Timeout */
72 #define LOCK_TIMEOUT_US USEC_PER_MSEC
73
74 /* SoC specific clock needed during SPLL clock rate switch */
75 static struct clk_hw *pic32_sclk_hw;
76
77 /* add instruction pipeline delay while CPU clock is in-transition. */
78 #define cpu_nop5() \
79 do { \
80 __asm__ __volatile__("nop"); \
81 __asm__ __volatile__("nop"); \
82 __asm__ __volatile__("nop"); \
83 __asm__ __volatile__("nop"); \
84 __asm__ __volatile__("nop"); \
85 } while (0)
86
87 /* Perpheral bus clocks */
88 struct pic32_periph_clk {
89 struct clk_hw hw;
90 void __iomem *ctrl_reg;
91 struct pic32_clk_common *core;
92 };
93
94 #define clkhw_to_pbclk(_hw) container_of(_hw, struct pic32_periph_clk, hw)
95
pbclk_is_enabled(struct clk_hw * hw)96 static int pbclk_is_enabled(struct clk_hw *hw)
97 {
98 struct pic32_periph_clk *pb = clkhw_to_pbclk(hw);
99
100 return readl(pb->ctrl_reg) & PB_DIV_ENABLE;
101 }
102
pbclk_enable(struct clk_hw * hw)103 static int pbclk_enable(struct clk_hw *hw)
104 {
105 struct pic32_periph_clk *pb = clkhw_to_pbclk(hw);
106
107 writel(PB_DIV_ENABLE, PIC32_SET(pb->ctrl_reg));
108 return 0;
109 }
110
pbclk_disable(struct clk_hw * hw)111 static void pbclk_disable(struct clk_hw *hw)
112 {
113 struct pic32_periph_clk *pb = clkhw_to_pbclk(hw);
114
115 writel(PB_DIV_ENABLE, PIC32_CLR(pb->ctrl_reg));
116 }
117
calc_best_divided_rate(unsigned long rate,unsigned long parent_rate,u32 divider_max,u32 divider_min)118 static unsigned long calc_best_divided_rate(unsigned long rate,
119 unsigned long parent_rate,
120 u32 divider_max,
121 u32 divider_min)
122 {
123 unsigned long divided_rate, divided_rate_down, best_rate;
124 unsigned long div, div_up;
125
126 /* eq. clk_rate = parent_rate / divider.
127 *
128 * Find best divider to produce closest of target divided rate.
129 */
130 div = parent_rate / rate;
131 div = clamp_val(div, divider_min, divider_max);
132 div_up = clamp_val(div + 1, divider_min, divider_max);
133
134 divided_rate = parent_rate / div;
135 divided_rate_down = parent_rate / div_up;
136 if (abs(rate - divided_rate_down) < abs(rate - divided_rate))
137 best_rate = divided_rate_down;
138 else
139 best_rate = divided_rate;
140
141 return best_rate;
142 }
143
pbclk_read_pbdiv(struct pic32_periph_clk * pb)144 static inline u32 pbclk_read_pbdiv(struct pic32_periph_clk *pb)
145 {
146 return ((readl(pb->ctrl_reg) >> PB_DIV_SHIFT) & PB_DIV_MASK) + 1;
147 }
148
pbclk_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)149 static unsigned long pbclk_recalc_rate(struct clk_hw *hw,
150 unsigned long parent_rate)
151 {
152 struct pic32_periph_clk *pb = clkhw_to_pbclk(hw);
153
154 return parent_rate / pbclk_read_pbdiv(pb);
155 }
156
pbclk_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)157 static int pbclk_determine_rate(struct clk_hw *hw,
158 struct clk_rate_request *req)
159 {
160 req->rate = calc_best_divided_rate(req->rate, req->best_parent_rate,
161 PB_DIV_MAX, PB_DIV_MIN);
162
163 return 0;
164 }
165
pbclk_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)166 static int pbclk_set_rate(struct clk_hw *hw, unsigned long rate,
167 unsigned long parent_rate)
168 {
169 struct pic32_periph_clk *pb = clkhw_to_pbclk(hw);
170 unsigned long flags;
171 u32 v, div;
172 int err;
173
174 /* check & wait for DIV_READY */
175 err = readl_poll_timeout(pb->ctrl_reg, v, v & PB_DIV_READY,
176 1, LOCK_TIMEOUT_US);
177 if (err)
178 return err;
179
180 /* calculate clkdiv and best rate */
181 div = DIV_ROUND_CLOSEST(parent_rate, rate);
182
183 spin_lock_irqsave(&pb->core->reg_lock, flags);
184
185 /* apply new div */
186 v = readl(pb->ctrl_reg);
187 v &= ~PB_DIV_MASK;
188 v |= (div - 1);
189
190 pic32_syskey_unlock();
191
192 writel(v, pb->ctrl_reg);
193
194 spin_unlock_irqrestore(&pb->core->reg_lock, flags);
195
196 /* wait again for DIV_READY */
197 err = readl_poll_timeout(pb->ctrl_reg, v, v & PB_DIV_READY,
198 1, LOCK_TIMEOUT_US);
199 if (err)
200 return err;
201
202 /* confirm that new div is applied correctly */
203 return (pbclk_read_pbdiv(pb) == div) ? 0 : -EBUSY;
204 }
205
206 const struct clk_ops pic32_pbclk_ops = {
207 .enable = pbclk_enable,
208 .disable = pbclk_disable,
209 .is_enabled = pbclk_is_enabled,
210 .recalc_rate = pbclk_recalc_rate,
211 .determine_rate = pbclk_determine_rate,
212 .set_rate = pbclk_set_rate,
213 };
214
pic32_periph_clk_register(const struct pic32_periph_clk_data * desc,struct pic32_clk_common * core)215 struct clk *pic32_periph_clk_register(const struct pic32_periph_clk_data *desc,
216 struct pic32_clk_common *core)
217 {
218 struct pic32_periph_clk *pbclk;
219 struct clk *clk;
220
221 pbclk = devm_kzalloc(core->dev, sizeof(*pbclk), GFP_KERNEL);
222 if (!pbclk)
223 return ERR_PTR(-ENOMEM);
224
225 pbclk->hw.init = &desc->init_data;
226 pbclk->core = core;
227 pbclk->ctrl_reg = desc->ctrl_reg + core->iobase;
228
229 clk = devm_clk_register(core->dev, &pbclk->hw);
230 if (IS_ERR(clk)) {
231 dev_err(core->dev, "%s: clk_register() failed\n", __func__);
232 devm_kfree(core->dev, pbclk);
233 }
234
235 return clk;
236 }
237
238 /* Reference oscillator operations */
239 struct pic32_ref_osc {
240 struct clk_hw hw;
241 void __iomem *ctrl_reg;
242 const u32 *parent_map;
243 struct pic32_clk_common *core;
244 };
245
246 #define clkhw_to_refosc(_hw) container_of(_hw, struct pic32_ref_osc, hw)
247
roclk_is_enabled(struct clk_hw * hw)248 static int roclk_is_enabled(struct clk_hw *hw)
249 {
250 struct pic32_ref_osc *refo = clkhw_to_refosc(hw);
251
252 return readl(refo->ctrl_reg) & REFO_ON;
253 }
254
roclk_enable(struct clk_hw * hw)255 static int roclk_enable(struct clk_hw *hw)
256 {
257 struct pic32_ref_osc *refo = clkhw_to_refosc(hw);
258
259 writel(REFO_ON | REFO_OE, PIC32_SET(refo->ctrl_reg));
260 return 0;
261 }
262
roclk_disable(struct clk_hw * hw)263 static void roclk_disable(struct clk_hw *hw)
264 {
265 struct pic32_ref_osc *refo = clkhw_to_refosc(hw);
266
267 writel(REFO_ON | REFO_OE, PIC32_CLR(refo->ctrl_reg));
268 }
269
roclk_init(struct clk_hw * hw)270 static int roclk_init(struct clk_hw *hw)
271 {
272 /* initialize clock in disabled state */
273 roclk_disable(hw);
274
275 return 0;
276 }
277
roclk_get_parent(struct clk_hw * hw)278 static u8 roclk_get_parent(struct clk_hw *hw)
279 {
280 struct pic32_ref_osc *refo = clkhw_to_refosc(hw);
281 u32 v, i;
282
283 v = (readl(refo->ctrl_reg) >> REFO_SEL_SHIFT) & REFO_SEL_MASK;
284
285 if (refo->parent_map) {
286 for (i = 0; i < clk_hw_get_num_parents(hw); i++)
287 if (refo->parent_map[i] == v)
288 return i;
289 }
290
291 return v;
292 }
293
roclk_calc_rate(unsigned long parent_rate,u32 rodiv,u32 rotrim)294 static unsigned long roclk_calc_rate(unsigned long parent_rate,
295 u32 rodiv, u32 rotrim)
296 {
297 u64 rate64;
298
299 /* fout = fin / [2 * {div + (trim / 512)}]
300 * = fin * 512 / [1024 * div + 2 * trim]
301 * = fin * 256 / (512 * div + trim)
302 * = (fin << 8) / ((div << 9) + trim)
303 */
304 if (rotrim) {
305 rodiv = (rodiv << 9) + rotrim;
306 rate64 = parent_rate;
307 rate64 <<= 8;
308 do_div(rate64, rodiv);
309 } else if (rodiv) {
310 rate64 = parent_rate / (rodiv << 1);
311 } else {
312 rate64 = parent_rate;
313 }
314 return rate64;
315 }
316
roclk_calc_div_trim(unsigned long rate,unsigned long parent_rate,u32 * rodiv_p,u32 * rotrim_p)317 static void roclk_calc_div_trim(unsigned long rate,
318 unsigned long parent_rate,
319 u32 *rodiv_p, u32 *rotrim_p)
320 {
321 u32 div, rotrim, rodiv;
322 u64 frac;
323
324 /* Find integer approximation of floating-point arithmetic.
325 * fout = fin / [2 * {rodiv + (rotrim / 512)}] ... (1)
326 * i.e. fout = fin / 2 * DIV
327 * whereas DIV = rodiv + (rotrim / 512)
328 *
329 * Since kernel does not perform floating-point arithmetic so
330 * (rotrim/512) will be zero. And DIV & rodiv will result same.
331 *
332 * ie. fout = (fin * 256) / [(512 * rodiv) + rotrim] ... from (1)
333 * ie. rotrim = ((fin * 256) / fout) - (512 * DIV)
334 */
335 if (parent_rate <= rate) {
336 div = 0;
337 frac = 0;
338 rodiv = 0;
339 rotrim = 0;
340 } else {
341 div = parent_rate / (rate << 1);
342 frac = parent_rate;
343 frac <<= 8;
344 do_div(frac, rate);
345 frac -= (u64)(div << 9);
346
347 rodiv = (div > REFO_DIV_MASK) ? REFO_DIV_MASK : div;
348 rotrim = (frac >= REFO_TRIM_MAX) ? REFO_TRIM_MAX : frac;
349 }
350
351 if (rodiv_p)
352 *rodiv_p = rodiv;
353
354 if (rotrim_p)
355 *rotrim_p = rotrim;
356 }
357
roclk_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)358 static unsigned long roclk_recalc_rate(struct clk_hw *hw,
359 unsigned long parent_rate)
360 {
361 struct pic32_ref_osc *refo = clkhw_to_refosc(hw);
362 u32 v, rodiv, rotrim;
363
364 /* get rodiv */
365 v = readl(refo->ctrl_reg);
366 rodiv = (v >> REFO_DIV_SHIFT) & REFO_DIV_MASK;
367
368 /* get trim */
369 v = readl(refo->ctrl_reg + REFO_TRIM_REG);
370 rotrim = (v >> REFO_TRIM_SHIFT) & REFO_TRIM_MASK;
371
372 return roclk_calc_rate(parent_rate, rodiv, rotrim);
373 }
374
roclk_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)375 static int roclk_determine_rate(struct clk_hw *hw,
376 struct clk_rate_request *req)
377 {
378 struct clk_hw *parent_clk, *best_parent_clk = NULL;
379 unsigned int i, delta, best_delta = -1;
380 unsigned long parent_rate, best_parent_rate = 0;
381 unsigned long best = 0, nearest_rate;
382
383 /* find a parent which can generate nearest clkrate >= rate */
384 for (i = 0; i < clk_hw_get_num_parents(hw); i++) {
385 u32 rotrim, rodiv;
386
387 /* get parent */
388 parent_clk = clk_hw_get_parent_by_index(hw, i);
389 if (!parent_clk)
390 continue;
391
392 /* skip if parent runs slower than target rate */
393 parent_rate = clk_hw_get_rate(parent_clk);
394 if (req->rate > parent_rate)
395 continue;
396
397 /* calculate dividers for new rate */
398 roclk_calc_div_trim(req->rate, req->best_parent_rate, &rodiv, &rotrim);
399
400 /* caclulate new rate (rounding) based on new rodiv & rotrim */
401 nearest_rate = roclk_calc_rate(req->best_parent_rate, rodiv, rotrim);
402
403 delta = abs(nearest_rate - req->rate);
404 if ((nearest_rate >= req->rate) && (delta < best_delta)) {
405 best_parent_clk = parent_clk;
406 best_parent_rate = parent_rate;
407 best = nearest_rate;
408 best_delta = delta;
409
410 if (delta == 0)
411 break;
412 }
413 }
414
415 /* if no match found, retain old rate */
416 if (!best_parent_clk) {
417 pr_err("%s:%s, no parent found for rate %lu.\n",
418 __func__, clk_hw_get_name(hw), req->rate);
419 return clk_hw_get_rate(hw);
420 }
421
422 pr_debug("%s,rate %lu, best_parent(%s, %lu), best %lu, delta %d\n",
423 clk_hw_get_name(hw), req->rate,
424 clk_hw_get_name(best_parent_clk), best_parent_rate,
425 best, best_delta);
426
427 if (req->best_parent_rate)
428 req->best_parent_rate = best_parent_rate;
429
430 if (req->best_parent_hw)
431 req->best_parent_hw = best_parent_clk;
432
433 return best;
434 }
435
roclk_set_parent(struct clk_hw * hw,u8 index)436 static int roclk_set_parent(struct clk_hw *hw, u8 index)
437 {
438 struct pic32_ref_osc *refo = clkhw_to_refosc(hw);
439 unsigned long flags;
440 u32 v;
441 int err;
442
443 if (refo->parent_map)
444 index = refo->parent_map[index];
445
446 /* wait until ACTIVE bit is zero or timeout */
447 err = readl_poll_timeout(refo->ctrl_reg, v, !(v & REFO_ACTIVE),
448 1, LOCK_TIMEOUT_US);
449 if (err) {
450 pr_err("%s: poll failed, clk active\n", clk_hw_get_name(hw));
451 return err;
452 }
453
454 spin_lock_irqsave(&refo->core->reg_lock, flags);
455
456 pic32_syskey_unlock();
457
458 /* calculate & apply new */
459 v = readl(refo->ctrl_reg);
460 v &= ~(REFO_SEL_MASK << REFO_SEL_SHIFT);
461 v |= index << REFO_SEL_SHIFT;
462
463 writel(v, refo->ctrl_reg);
464
465 spin_unlock_irqrestore(&refo->core->reg_lock, flags);
466
467 return 0;
468 }
469
roclk_set_rate_and_parent(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate,u8 index)470 static int roclk_set_rate_and_parent(struct clk_hw *hw,
471 unsigned long rate,
472 unsigned long parent_rate,
473 u8 index)
474 {
475 struct pic32_ref_osc *refo = clkhw_to_refosc(hw);
476 unsigned long flags;
477 u32 trim, rodiv, v;
478 int err;
479
480 /* calculate new rodiv & rotrim for new rate */
481 roclk_calc_div_trim(rate, parent_rate, &rodiv, &trim);
482
483 pr_debug("parent_rate = %lu, rate = %lu, div = %d, trim = %d\n",
484 parent_rate, rate, rodiv, trim);
485
486 /* wait till source change is active */
487 err = readl_poll_timeout(refo->ctrl_reg, v,
488 !(v & (REFO_ACTIVE | REFO_DIVSW_EN)),
489 1, LOCK_TIMEOUT_US);
490 if (err) {
491 pr_err("%s: poll timedout, clock is still active\n", __func__);
492 return err;
493 }
494
495 spin_lock_irqsave(&refo->core->reg_lock, flags);
496 v = readl(refo->ctrl_reg);
497
498 pic32_syskey_unlock();
499
500 /* apply parent, if required */
501 if (refo->parent_map)
502 index = refo->parent_map[index];
503
504 v &= ~(REFO_SEL_MASK << REFO_SEL_SHIFT);
505 v |= index << REFO_SEL_SHIFT;
506
507 /* apply RODIV */
508 v &= ~(REFO_DIV_MASK << REFO_DIV_SHIFT);
509 v |= rodiv << REFO_DIV_SHIFT;
510 writel(v, refo->ctrl_reg);
511
512 /* apply ROTRIM */
513 v = readl(refo->ctrl_reg + REFO_TRIM_REG);
514 v &= ~(REFO_TRIM_MASK << REFO_TRIM_SHIFT);
515 v |= trim << REFO_TRIM_SHIFT;
516 writel(v, refo->ctrl_reg + REFO_TRIM_REG);
517
518 /* enable & activate divider switching */
519 writel(REFO_ON | REFO_DIVSW_EN, PIC32_SET(refo->ctrl_reg));
520
521 /* wait till divswen is in-progress */
522 err = readl_poll_timeout_atomic(refo->ctrl_reg, v, !(v & REFO_DIVSW_EN),
523 1, LOCK_TIMEOUT_US);
524 /* leave the clk gated as it was */
525 writel(REFO_ON, PIC32_CLR(refo->ctrl_reg));
526
527 spin_unlock_irqrestore(&refo->core->reg_lock, flags);
528
529 return err;
530 }
531
roclk_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)532 static int roclk_set_rate(struct clk_hw *hw, unsigned long rate,
533 unsigned long parent_rate)
534 {
535 u8 index = roclk_get_parent(hw);
536
537 return roclk_set_rate_and_parent(hw, rate, parent_rate, index);
538 }
539
540 const struct clk_ops pic32_roclk_ops = {
541 .enable = roclk_enable,
542 .disable = roclk_disable,
543 .is_enabled = roclk_is_enabled,
544 .get_parent = roclk_get_parent,
545 .set_parent = roclk_set_parent,
546 .determine_rate = roclk_determine_rate,
547 .recalc_rate = roclk_recalc_rate,
548 .set_rate_and_parent = roclk_set_rate_and_parent,
549 .set_rate = roclk_set_rate,
550 .init = roclk_init,
551 };
552
pic32_refo_clk_register(const struct pic32_ref_osc_data * data,struct pic32_clk_common * core)553 struct clk *pic32_refo_clk_register(const struct pic32_ref_osc_data *data,
554 struct pic32_clk_common *core)
555 {
556 struct pic32_ref_osc *refo;
557 struct clk *clk;
558
559 refo = devm_kzalloc(core->dev, sizeof(*refo), GFP_KERNEL);
560 if (!refo)
561 return ERR_PTR(-ENOMEM);
562
563 refo->core = core;
564 refo->hw.init = &data->init_data;
565 refo->ctrl_reg = data->ctrl_reg + core->iobase;
566 refo->parent_map = data->parent_map;
567
568 clk = devm_clk_register(core->dev, &refo->hw);
569 if (IS_ERR(clk))
570 dev_err(core->dev, "%s: clk_register() failed\n", __func__);
571
572 return clk;
573 }
574
575 struct pic32_sys_pll {
576 struct clk_hw hw;
577 void __iomem *ctrl_reg;
578 void __iomem *status_reg;
579 u32 lock_mask;
580 u32 idiv; /* PLL iclk divider, treated fixed */
581 struct pic32_clk_common *core;
582 };
583
584 #define clkhw_to_spll(_hw) container_of(_hw, struct pic32_sys_pll, hw)
585
spll_odiv_to_divider(u32 odiv)586 static inline u32 spll_odiv_to_divider(u32 odiv)
587 {
588 odiv = clamp_val(odiv, PLL_ODIV_MIN, PLL_ODIV_MAX);
589
590 return 1 << odiv;
591 }
592
spll_calc_mult_div(struct pic32_sys_pll * pll,unsigned long rate,unsigned long parent_rate,u32 * mult_p,u32 * odiv_p)593 static unsigned long spll_calc_mult_div(struct pic32_sys_pll *pll,
594 unsigned long rate,
595 unsigned long parent_rate,
596 u32 *mult_p, u32 *odiv_p)
597 {
598 u32 mul, div, best_mul = 1, best_div = 1;
599 unsigned long new_rate, best_rate = rate;
600 unsigned int best_delta = -1, delta, match_found = 0;
601 u64 rate64;
602
603 parent_rate /= pll->idiv;
604
605 for (mul = 1; mul <= PLL_MULT_MAX; mul++) {
606 for (div = PLL_ODIV_MIN; div <= PLL_ODIV_MAX; div++) {
607 rate64 = parent_rate;
608 rate64 *= mul;
609 do_div(rate64, 1 << div);
610 new_rate = rate64;
611 delta = abs(rate - new_rate);
612 if ((new_rate >= rate) && (delta < best_delta)) {
613 best_delta = delta;
614 best_rate = new_rate;
615 best_mul = mul;
616 best_div = div;
617 match_found = 1;
618 }
619 }
620 }
621
622 if (!match_found) {
623 pr_warn("spll: no match found\n");
624 return 0;
625 }
626
627 pr_debug("rate %lu, par_rate %lu/mult %u, div %u, best_rate %lu\n",
628 rate, parent_rate, best_mul, best_div, best_rate);
629
630 if (mult_p)
631 *mult_p = best_mul - 1;
632
633 if (odiv_p)
634 *odiv_p = best_div;
635
636 return best_rate;
637 }
638
spll_clk_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)639 static unsigned long spll_clk_recalc_rate(struct clk_hw *hw,
640 unsigned long parent_rate)
641 {
642 struct pic32_sys_pll *pll = clkhw_to_spll(hw);
643 unsigned long pll_in_rate;
644 u32 mult, odiv, div, v;
645 u64 rate64;
646
647 v = readl(pll->ctrl_reg);
648 odiv = ((v >> PLL_ODIV_SHIFT) & PLL_ODIV_MASK);
649 mult = ((v >> PLL_MULT_SHIFT) & PLL_MULT_MASK) + 1;
650 div = spll_odiv_to_divider(odiv);
651
652 /* pll_in_rate = parent_rate / idiv
653 * pll_out_rate = pll_in_rate * mult / div;
654 */
655 pll_in_rate = parent_rate / pll->idiv;
656 rate64 = pll_in_rate;
657 rate64 *= mult;
658 do_div(rate64, div);
659
660 return rate64;
661 }
662
spll_clk_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)663 static int spll_clk_determine_rate(struct clk_hw *hw,
664 struct clk_rate_request *req)
665 {
666 struct pic32_sys_pll *pll = clkhw_to_spll(hw);
667
668 req->rate = spll_calc_mult_div(pll, req->rate, req->best_parent_rate,
669 NULL, NULL);
670
671 return 0;
672 }
673
spll_clk_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)674 static int spll_clk_set_rate(struct clk_hw *hw, unsigned long rate,
675 unsigned long parent_rate)
676 {
677 struct pic32_sys_pll *pll = clkhw_to_spll(hw);
678 unsigned long ret, flags;
679 u32 mult, odiv, v;
680 int err;
681
682 ret = spll_calc_mult_div(pll, rate, parent_rate, &mult, &odiv);
683 if (!ret)
684 return -EINVAL;
685
686 /*
687 * We can't change SPLL counters when it is in-active use
688 * by SYSCLK. So check before applying new counters/rate.
689 */
690
691 /* Is spll_clk active parent of sys_clk ? */
692 if (unlikely(clk_hw_get_parent(pic32_sclk_hw) == hw)) {
693 pr_err("%s: failed, clk in-use\n", __func__);
694 return -EBUSY;
695 }
696
697 spin_lock_irqsave(&pll->core->reg_lock, flags);
698
699 /* apply new multiplier & divisor */
700 v = readl(pll->ctrl_reg);
701 v &= ~(PLL_MULT_MASK << PLL_MULT_SHIFT);
702 v &= ~(PLL_ODIV_MASK << PLL_ODIV_SHIFT);
703 v |= (mult << PLL_MULT_SHIFT) | (odiv << PLL_ODIV_SHIFT);
704
705 /* sys unlock before write */
706 pic32_syskey_unlock();
707
708 writel(v, pll->ctrl_reg);
709 cpu_relax();
710
711 /* insert few nops (5-stage) to ensure CPU does not hang */
712 cpu_nop5();
713 cpu_nop5();
714
715 /* Wait until PLL is locked (maximum 100 usecs). */
716 err = readl_poll_timeout_atomic(pll->status_reg, v,
717 v & pll->lock_mask, 1, 100);
718 spin_unlock_irqrestore(&pll->core->reg_lock, flags);
719
720 return err;
721 }
722
723 /* SPLL clock operation */
724 const struct clk_ops pic32_spll_ops = {
725 .recalc_rate = spll_clk_recalc_rate,
726 .determine_rate = spll_clk_determine_rate,
727 .set_rate = spll_clk_set_rate,
728 };
729
pic32_spll_clk_register(const struct pic32_sys_pll_data * data,struct pic32_clk_common * core)730 struct clk *pic32_spll_clk_register(const struct pic32_sys_pll_data *data,
731 struct pic32_clk_common *core)
732 {
733 struct pic32_sys_pll *spll;
734 struct clk *clk;
735
736 spll = devm_kzalloc(core->dev, sizeof(*spll), GFP_KERNEL);
737 if (!spll)
738 return ERR_PTR(-ENOMEM);
739
740 spll->core = core;
741 spll->hw.init = &data->init_data;
742 spll->ctrl_reg = data->ctrl_reg + core->iobase;
743 spll->status_reg = data->status_reg + core->iobase;
744 spll->lock_mask = data->lock_mask;
745
746 /* cache PLL idiv; PLL driver uses it as constant.*/
747 spll->idiv = (readl(spll->ctrl_reg) >> PLL_IDIV_SHIFT) & PLL_IDIV_MASK;
748 spll->idiv += 1;
749
750 clk = devm_clk_register(core->dev, &spll->hw);
751 if (IS_ERR(clk))
752 dev_err(core->dev, "sys_pll: clk_register() failed\n");
753
754 return clk;
755 }
756
757 /* System mux clock(aka SCLK) */
758
759 struct pic32_sys_clk {
760 struct clk_hw hw;
761 void __iomem *mux_reg;
762 void __iomem *slew_reg;
763 u32 slew_div;
764 const u32 *parent_map;
765 struct pic32_clk_common *core;
766 };
767
768 #define clkhw_to_sys_clk(_hw) container_of(_hw, struct pic32_sys_clk, hw)
769
sclk_get_rate(struct clk_hw * hw,unsigned long parent_rate)770 static unsigned long sclk_get_rate(struct clk_hw *hw, unsigned long parent_rate)
771 {
772 struct pic32_sys_clk *sclk = clkhw_to_sys_clk(hw);
773 u32 div;
774
775 div = (readl(sclk->slew_reg) >> SLEW_SYSDIV_SHIFT) & SLEW_SYSDIV;
776 div += 1; /* sys-div to divider */
777
778 return parent_rate / div;
779 }
780
sclk_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)781 static int sclk_set_rate(struct clk_hw *hw,
782 unsigned long rate, unsigned long parent_rate)
783 {
784 struct pic32_sys_clk *sclk = clkhw_to_sys_clk(hw);
785 unsigned long flags;
786 u32 v, div;
787 int err;
788
789 div = parent_rate / rate;
790
791 spin_lock_irqsave(&sclk->core->reg_lock, flags);
792
793 /* apply new div */
794 v = readl(sclk->slew_reg);
795 v &= ~(SLEW_SYSDIV << SLEW_SYSDIV_SHIFT);
796 v |= (div - 1) << SLEW_SYSDIV_SHIFT;
797
798 pic32_syskey_unlock();
799
800 writel(v, sclk->slew_reg);
801
802 /* wait until BUSY is cleared */
803 err = readl_poll_timeout_atomic(sclk->slew_reg, v,
804 !(v & SLEW_BUSY), 1, LOCK_TIMEOUT_US);
805
806 spin_unlock_irqrestore(&sclk->core->reg_lock, flags);
807
808 return err;
809 }
810
sclk_get_parent(struct clk_hw * hw)811 static u8 sclk_get_parent(struct clk_hw *hw)
812 {
813 struct pic32_sys_clk *sclk = clkhw_to_sys_clk(hw);
814 u32 i, v;
815
816 v = (readl(sclk->mux_reg) >> OSC_CUR_SHIFT) & OSC_CUR_MASK;
817
818 if (sclk->parent_map) {
819 for (i = 0; i < clk_hw_get_num_parents(hw); i++)
820 if (sclk->parent_map[i] == v)
821 return i;
822 }
823
824 return v;
825 }
826
sclk_set_parent(struct clk_hw * hw,u8 index)827 static int sclk_set_parent(struct clk_hw *hw, u8 index)
828 {
829 struct pic32_sys_clk *sclk = clkhw_to_sys_clk(hw);
830 unsigned long flags;
831 u32 nosc, cosc, v;
832 int err;
833
834 spin_lock_irqsave(&sclk->core->reg_lock, flags);
835
836 /* find new_osc */
837 nosc = sclk->parent_map ? sclk->parent_map[index] : index;
838
839 /* set new parent */
840 v = readl(sclk->mux_reg);
841 v &= ~(OSC_NEW_MASK << OSC_NEW_SHIFT);
842 v |= nosc << OSC_NEW_SHIFT;
843
844 pic32_syskey_unlock();
845
846 writel(v, sclk->mux_reg);
847
848 /* initate switch */
849 writel(OSC_SWEN, PIC32_SET(sclk->mux_reg));
850 cpu_relax();
851
852 /* add nop to flush pipeline (as cpu_clk is in-flux) */
853 cpu_nop5();
854
855 /* wait for SWEN bit to clear */
856 err = readl_poll_timeout_atomic(sclk->slew_reg, v,
857 !(v & OSC_SWEN), 1, LOCK_TIMEOUT_US);
858
859 spin_unlock_irqrestore(&sclk->core->reg_lock, flags);
860
861 /*
862 * SCLK clock-switching logic might reject a clock switching request
863 * if pre-requisites (like new clk_src not present or unstable) are
864 * not met.
865 * So confirm before claiming success.
866 */
867 cosc = (readl(sclk->mux_reg) >> OSC_CUR_SHIFT) & OSC_CUR_MASK;
868 if (cosc != nosc) {
869 pr_err("%s: err, failed to set_parent() to %d, current %d\n",
870 clk_hw_get_name(hw), nosc, cosc);
871 err = -EBUSY;
872 }
873
874 return err;
875 }
876
sclk_init(struct clk_hw * hw)877 static int sclk_init(struct clk_hw *hw)
878 {
879 struct pic32_sys_clk *sclk = clkhw_to_sys_clk(hw);
880 unsigned long flags;
881 u32 v;
882
883 /* Maintain reference to this clk, required in spll_clk_set_rate() */
884 pic32_sclk_hw = hw;
885
886 /* apply slew divider on both up and down scaling */
887 if (sclk->slew_div) {
888 spin_lock_irqsave(&sclk->core->reg_lock, flags);
889 v = readl(sclk->slew_reg);
890 v &= ~(SLEW_DIV << SLEW_DIV_SHIFT);
891 v |= sclk->slew_div << SLEW_DIV_SHIFT;
892 v |= SLEW_DOWNEN | SLEW_UPEN;
893 writel(v, sclk->slew_reg);
894 spin_unlock_irqrestore(&sclk->core->reg_lock, flags);
895 }
896
897 return 0;
898 }
899
900 /* sclk with post-divider */
901 const struct clk_ops pic32_sclk_ops = {
902 .get_parent = sclk_get_parent,
903 .set_parent = sclk_set_parent,
904 .set_rate = sclk_set_rate,
905 .recalc_rate = sclk_get_rate,
906 .init = sclk_init,
907 .determine_rate = __clk_mux_determine_rate,
908 };
909
910 /* sclk with no slew and no post-divider */
911 const struct clk_ops pic32_sclk_no_div_ops = {
912 .get_parent = sclk_get_parent,
913 .set_parent = sclk_set_parent,
914 .init = sclk_init,
915 .determine_rate = __clk_mux_determine_rate,
916 };
917
pic32_sys_clk_register(const struct pic32_sys_clk_data * data,struct pic32_clk_common * core)918 struct clk *pic32_sys_clk_register(const struct pic32_sys_clk_data *data,
919 struct pic32_clk_common *core)
920 {
921 struct pic32_sys_clk *sclk;
922 struct clk *clk;
923
924 sclk = devm_kzalloc(core->dev, sizeof(*sclk), GFP_KERNEL);
925 if (!sclk)
926 return ERR_PTR(-ENOMEM);
927
928 sclk->core = core;
929 sclk->hw.init = &data->init_data;
930 sclk->mux_reg = data->mux_reg + core->iobase;
931 sclk->slew_reg = data->slew_reg + core->iobase;
932 sclk->slew_div = data->slew_div;
933 sclk->parent_map = data->parent_map;
934
935 clk = devm_clk_register(core->dev, &sclk->hw);
936 if (IS_ERR(clk))
937 dev_err(core->dev, "%s: clk register failed\n", __func__);
938
939 return clk;
940 }
941
942 /* secondary oscillator */
943 struct pic32_sec_osc {
944 struct clk_hw hw;
945 void __iomem *enable_reg;
946 void __iomem *status_reg;
947 u32 enable_mask;
948 u32 status_mask;
949 unsigned long fixed_rate;
950 struct pic32_clk_common *core;
951 };
952
953 #define clkhw_to_sosc(_hw) container_of(_hw, struct pic32_sec_osc, hw)
sosc_clk_enable(struct clk_hw * hw)954 static int sosc_clk_enable(struct clk_hw *hw)
955 {
956 struct pic32_sec_osc *sosc = clkhw_to_sosc(hw);
957 u32 v;
958
959 /* enable SOSC */
960 pic32_syskey_unlock();
961 writel(sosc->enable_mask, PIC32_SET(sosc->enable_reg));
962
963 /* wait till warm-up period expires or ready-status is updated */
964 return readl_poll_timeout_atomic(sosc->status_reg, v,
965 v & sosc->status_mask, 1, 100);
966 }
967
sosc_clk_disable(struct clk_hw * hw)968 static void sosc_clk_disable(struct clk_hw *hw)
969 {
970 struct pic32_sec_osc *sosc = clkhw_to_sosc(hw);
971
972 pic32_syskey_unlock();
973 writel(sosc->enable_mask, PIC32_CLR(sosc->enable_reg));
974 }
975
sosc_clk_is_enabled(struct clk_hw * hw)976 static int sosc_clk_is_enabled(struct clk_hw *hw)
977 {
978 struct pic32_sec_osc *sosc = clkhw_to_sosc(hw);
979 u32 enabled, ready;
980
981 /* check enabled and ready status */
982 enabled = readl(sosc->enable_reg) & sosc->enable_mask;
983 ready = readl(sosc->status_reg) & sosc->status_mask;
984
985 return enabled && ready;
986 }
987
sosc_clk_calc_rate(struct clk_hw * hw,unsigned long parent_rate)988 static unsigned long sosc_clk_calc_rate(struct clk_hw *hw,
989 unsigned long parent_rate)
990 {
991 return clkhw_to_sosc(hw)->fixed_rate;
992 }
993
994 const struct clk_ops pic32_sosc_ops = {
995 .enable = sosc_clk_enable,
996 .disable = sosc_clk_disable,
997 .is_enabled = sosc_clk_is_enabled,
998 .recalc_rate = sosc_clk_calc_rate,
999 };
1000
pic32_sosc_clk_register(const struct pic32_sec_osc_data * data,struct pic32_clk_common * core)1001 struct clk *pic32_sosc_clk_register(const struct pic32_sec_osc_data *data,
1002 struct pic32_clk_common *core)
1003 {
1004 struct pic32_sec_osc *sosc;
1005
1006 sosc = devm_kzalloc(core->dev, sizeof(*sosc), GFP_KERNEL);
1007 if (!sosc)
1008 return ERR_PTR(-ENOMEM);
1009
1010 sosc->core = core;
1011 sosc->hw.init = &data->init_data;
1012 sosc->fixed_rate = data->fixed_rate;
1013 sosc->enable_mask = data->enable_mask;
1014 sosc->status_mask = data->status_mask;
1015 sosc->enable_reg = data->enable_reg + core->iobase;
1016 sosc->status_reg = data->status_reg + core->iobase;
1017
1018 return devm_clk_register(core->dev, &sosc->hw);
1019 }
1020