1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * RZ/G2L Clock Pulse Generator
4 *
5 * Copyright (C) 2021 Renesas Electronics Corp.
6 *
7 * Based on renesas-cpg-mssr.c
8 *
9 * Copyright (C) 2015 Glider bvba
10 * Copyright (C) 2013 Ideas On Board SPRL
11 * Copyright (C) 2015 Renesas Electronics Corp.
12 */
13
14 #include <linux/bitfield.h>
15 #include <linux/clk.h>
16 #include <linux/clk-provider.h>
17 #include <linux/clk/renesas.h>
18 #include <linux/delay.h>
19 #include <linux/device.h>
20 #include <linux/init.h>
21 #include <linux/iopoll.h>
22 #include <linux/mod_devicetable.h>
23 #include <linux/module.h>
24 #include <linux/of.h>
25 #include <linux/platform_device.h>
26 #include <linux/pm_clock.h>
27 #include <linux/pm_domain.h>
28 #include <linux/reset-controller.h>
29 #include <linux/slab.h>
30 #include <linux/units.h>
31
32 #include <dt-bindings/clock/renesas-cpg-mssr.h>
33
34 #include "rzg2l-cpg.h"
35
36 #ifdef DEBUG
37 #define WARN_DEBUG(x) WARN_ON(x)
38 #else
39 #define WARN_DEBUG(x) do { } while (0)
40 #endif
41
42 #define GET_SHIFT(val) ((val >> 12) & 0xff)
43 #define GET_WIDTH(val) ((val >> 8) & 0xf)
44
45 #define KDIV(val) ((s16)FIELD_GET(GENMASK(31, 16), val))
46 #define MDIV(val) FIELD_GET(GENMASK(15, 6), val)
47 #define PDIV(val) FIELD_GET(GENMASK(5, 0), val)
48 #define SDIV(val) FIELD_GET(GENMASK(2, 0), val)
49
50 #define RZG3S_DIV_P GENMASK(28, 26)
51 #define RZG3S_DIV_M GENMASK(25, 22)
52 #define RZG3S_DIV_NI GENMASK(21, 13)
53 #define RZG3S_DIV_NF GENMASK(12, 1)
54 #define RZG3S_SEL_PLL BIT(0)
55
56 #define CLK_ON_R(reg) (reg)
57 #define CLK_MON_R(reg) (0x180 + (reg))
58 #define CLK_RST_R(reg) (reg)
59 #define CLK_MRST_R(reg) (0x180 + (reg))
60
61 #define GET_REG_OFFSET(val) ((val >> 20) & 0xfff)
62 #define GET_REG_SAMPLL_CLK1(val) ((val >> 22) & 0xfff)
63 #define GET_REG_SAMPLL_CLK2(val) ((val >> 12) & 0xfff)
64 #define GET_REG_SAMPLL_SETTING(val) ((val) & 0xfff)
65
66 #define CPG_WEN_BIT BIT(16)
67
68 #define MAX_VCLK_FREQ (148500000)
69
70 /**
71 * struct clk_hw_data - clock hardware data
72 * @hw: clock hw
73 * @conf: clock configuration (register offset, shift, width)
74 * @sconf: clock status configuration (register offset, shift, width)
75 * @priv: CPG private data structure
76 */
77 struct clk_hw_data {
78 struct clk_hw hw;
79 u32 conf;
80 u32 sconf;
81 struct rzg2l_cpg_priv *priv;
82 };
83
84 #define to_clk_hw_data(_hw) container_of(_hw, struct clk_hw_data, hw)
85
86 /**
87 * struct sd_mux_hw_data - SD MUX clock hardware data
88 * @hw_data: clock hw data
89 * @mtable: clock mux table
90 */
91 struct sd_mux_hw_data {
92 struct clk_hw_data hw_data;
93 const u32 *mtable;
94 };
95
96 #define to_sd_mux_hw_data(_hw) container_of(_hw, struct sd_mux_hw_data, hw_data)
97
98 /**
99 * struct div_hw_data - divider clock hardware data
100 * @hw_data: clock hw data
101 * @dtable: pointer to divider table
102 * @invalid_rate: invalid rate for divider
103 * @max_rate: maximum rate for divider
104 * @width: divider width
105 */
106 struct div_hw_data {
107 struct clk_hw_data hw_data;
108 const struct clk_div_table *dtable;
109 unsigned long invalid_rate;
110 unsigned long max_rate;
111 u32 width;
112 };
113
114 #define to_div_hw_data(_hw) container_of(_hw, struct div_hw_data, hw_data)
115
116 struct rzg2l_pll5_param {
117 u32 pl5_fracin;
118 u8 pl5_refdiv;
119 u8 pl5_intin;
120 u8 pl5_postdiv1;
121 u8 pl5_postdiv2;
122 u8 pl5_spread;
123 };
124
125 struct rzg2l_pll5_mux_dsi_div_param {
126 u8 clksrc;
127 u8 dsi_div_a;
128 u8 dsi_div_b;
129 };
130
131 /**
132 * struct rzg2l_cpg_priv - Clock Pulse Generator Private Data
133 *
134 * @rcdev: Reset controller entity
135 * @dev: CPG device
136 * @base: CPG register block base address
137 * @rmw_lock: protects register accesses
138 * @clks: Array containing all Core and Module Clocks
139 * @num_core_clks: Number of Core Clocks in clks[]
140 * @num_mod_clks: Number of Module Clocks in clks[]
141 * @num_resets: Number of Module Resets in info->resets[]
142 * @last_dt_core_clk: ID of the last Core Clock exported to DT
143 * @info: Pointer to platform data
144 * @mux_dsi_div_params: pll5 mux and dsi div parameters
145 */
146 struct rzg2l_cpg_priv {
147 struct reset_controller_dev rcdev;
148 struct device *dev;
149 void __iomem *base;
150 spinlock_t rmw_lock;
151
152 struct clk **clks;
153 unsigned int num_core_clks;
154 unsigned int num_mod_clks;
155 unsigned int num_resets;
156 unsigned int last_dt_core_clk;
157
158 const struct rzg2l_cpg_info *info;
159
160 struct rzg2l_pll5_mux_dsi_div_param mux_dsi_div_params;
161 };
162
rzg2l_cpg_del_clk_provider(void * data)163 static void rzg2l_cpg_del_clk_provider(void *data)
164 {
165 of_clk_del_provider(data);
166 }
167
168 /* Must be called in atomic context. */
rzg2l_cpg_wait_clk_update_done(void __iomem * base,u32 conf)169 static int rzg2l_cpg_wait_clk_update_done(void __iomem *base, u32 conf)
170 {
171 u32 bitmask = GENMASK(GET_WIDTH(conf) - 1, 0) << GET_SHIFT(conf);
172 u32 off = GET_REG_OFFSET(conf);
173 u32 val;
174
175 return readl_poll_timeout_atomic(base + off, val, !(val & bitmask), 10, 200);
176 }
177
rzg2l_cpg_sd_clk_mux_notifier(struct notifier_block * nb,unsigned long event,void * data)178 int rzg2l_cpg_sd_clk_mux_notifier(struct notifier_block *nb, unsigned long event,
179 void *data)
180 {
181 struct clk_notifier_data *cnd = data;
182 struct clk_hw *hw = __clk_get_hw(cnd->clk);
183 struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw);
184 struct rzg2l_cpg_priv *priv = clk_hw_data->priv;
185 u32 off = GET_REG_OFFSET(clk_hw_data->conf);
186 u32 shift = GET_SHIFT(clk_hw_data->conf);
187 const u32 clk_src_266 = 3;
188 unsigned long flags;
189 int ret;
190
191 if (event != PRE_RATE_CHANGE || (cnd->new_rate / MEGA == 266))
192 return NOTIFY_DONE;
193
194 spin_lock_irqsave(&priv->rmw_lock, flags);
195
196 /*
197 * As per the HW manual, we should not directly switch from 533 MHz to
198 * 400 MHz and vice versa. To change the setting from 2’b01 (533 MHz)
199 * to 2’b10 (400 MHz) or vice versa, Switch to 2’b11 (266 MHz) first,
200 * and then switch to the target setting (2’b01 (533 MHz) or 2’b10
201 * (400 MHz)).
202 * Setting a value of '0' to the SEL_SDHI0_SET or SEL_SDHI1_SET clock
203 * switching register is prohibited.
204 * The clock mux has 3 input clocks(533 MHz, 400 MHz, and 266 MHz), and
205 * the index to value mapping is done by adding 1 to the index.
206 */
207
208 writel((CPG_WEN_BIT | clk_src_266) << shift, priv->base + off);
209
210 /* Wait for the update done. */
211 ret = rzg2l_cpg_wait_clk_update_done(priv->base, clk_hw_data->sconf);
212
213 spin_unlock_irqrestore(&priv->rmw_lock, flags);
214
215 if (ret)
216 dev_err(priv->dev, "failed to switch to safe clk source\n");
217
218 return notifier_from_errno(ret);
219 }
220
rzg3s_cpg_div_clk_notifier(struct notifier_block * nb,unsigned long event,void * data)221 int rzg3s_cpg_div_clk_notifier(struct notifier_block *nb, unsigned long event,
222 void *data)
223 {
224 struct clk_notifier_data *cnd = data;
225 struct clk_hw *hw = __clk_get_hw(cnd->clk);
226 struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw);
227 struct div_hw_data *div_hw_data = to_div_hw_data(clk_hw_data);
228 struct rzg2l_cpg_priv *priv = clk_hw_data->priv;
229 u32 off = GET_REG_OFFSET(clk_hw_data->conf);
230 u32 shift = GET_SHIFT(clk_hw_data->conf);
231 unsigned long flags;
232 int ret = 0;
233 u32 val;
234
235 if (event != PRE_RATE_CHANGE || !div_hw_data->invalid_rate ||
236 div_hw_data->invalid_rate % cnd->new_rate)
237 return NOTIFY_DONE;
238
239 spin_lock_irqsave(&priv->rmw_lock, flags);
240
241 val = readl(priv->base + off);
242 val >>= shift;
243 val &= GENMASK(GET_WIDTH(clk_hw_data->conf) - 1, 0);
244
245 /*
246 * There are different constraints for the user of this notifiers as follows:
247 * 1/ SD div cannot be 1 (val == 0) if parent rate is 800MHz
248 * 2/ OCTA / SPI div cannot be 1 (val == 0) if parent rate is 400MHz
249 * As SD can have only one parent having 800MHz and OCTA div can have
250 * only one parent having 400MHz we took into account the parent rate
251 * at the beginning of function (by checking invalid_rate % new_rate).
252 * Now it is time to check the hardware divider and update it accordingly.
253 */
254 if (!val) {
255 writel((CPG_WEN_BIT | 1) << shift, priv->base + off);
256 /* Wait for the update done. */
257 ret = rzg2l_cpg_wait_clk_update_done(priv->base, clk_hw_data->sconf);
258 }
259
260 spin_unlock_irqrestore(&priv->rmw_lock, flags);
261
262 if (ret)
263 dev_err(priv->dev, "Failed to downgrade the div\n");
264
265 return notifier_from_errno(ret);
266 }
267
rzg2l_register_notifier(struct clk_hw * hw,const struct cpg_core_clk * core,struct rzg2l_cpg_priv * priv)268 static int rzg2l_register_notifier(struct clk_hw *hw, const struct cpg_core_clk *core,
269 struct rzg2l_cpg_priv *priv)
270 {
271 struct notifier_block *nb;
272
273 if (!core->notifier)
274 return 0;
275
276 nb = devm_kzalloc(priv->dev, sizeof(*nb), GFP_KERNEL);
277 if (!nb)
278 return -ENOMEM;
279
280 nb->notifier_call = core->notifier;
281
282 return clk_notifier_register(hw->clk, nb);
283 }
284
rzg3s_div_clk_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)285 static unsigned long rzg3s_div_clk_recalc_rate(struct clk_hw *hw,
286 unsigned long parent_rate)
287 {
288 struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw);
289 struct div_hw_data *div_hw_data = to_div_hw_data(clk_hw_data);
290 struct rzg2l_cpg_priv *priv = clk_hw_data->priv;
291 u32 val;
292
293 val = readl(priv->base + GET_REG_OFFSET(clk_hw_data->conf));
294 val >>= GET_SHIFT(clk_hw_data->conf);
295 val &= GENMASK(GET_WIDTH(clk_hw_data->conf) - 1, 0);
296
297 return divider_recalc_rate(hw, parent_rate, val, div_hw_data->dtable,
298 CLK_DIVIDER_ROUND_CLOSEST, div_hw_data->width);
299 }
300
rzg3s_div_clk_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)301 static int rzg3s_div_clk_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
302 {
303 struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw);
304 struct div_hw_data *div_hw_data = to_div_hw_data(clk_hw_data);
305
306 if (div_hw_data->max_rate && req->rate > div_hw_data->max_rate)
307 req->rate = div_hw_data->max_rate;
308
309 return divider_determine_rate(hw, req, div_hw_data->dtable, div_hw_data->width,
310 CLK_DIVIDER_ROUND_CLOSEST);
311 }
312
rzg3s_div_clk_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)313 static int rzg3s_div_clk_set_rate(struct clk_hw *hw, unsigned long rate,
314 unsigned long parent_rate)
315 {
316 struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw);
317 struct div_hw_data *div_hw_data = to_div_hw_data(clk_hw_data);
318 struct rzg2l_cpg_priv *priv = clk_hw_data->priv;
319 u32 off = GET_REG_OFFSET(clk_hw_data->conf);
320 u32 shift = GET_SHIFT(clk_hw_data->conf);
321 unsigned long flags;
322 u32 val;
323 int ret;
324
325 val = divider_get_val(rate, parent_rate, div_hw_data->dtable, div_hw_data->width,
326 CLK_DIVIDER_ROUND_CLOSEST);
327
328 spin_lock_irqsave(&priv->rmw_lock, flags);
329 writel((CPG_WEN_BIT | val) << shift, priv->base + off);
330 /* Wait for the update done. */
331 ret = rzg2l_cpg_wait_clk_update_done(priv->base, clk_hw_data->sconf);
332 spin_unlock_irqrestore(&priv->rmw_lock, flags);
333
334 return ret;
335 }
336
337 static const struct clk_ops rzg3s_div_clk_ops = {
338 .recalc_rate = rzg3s_div_clk_recalc_rate,
339 .determine_rate = rzg3s_div_clk_determine_rate,
340 .set_rate = rzg3s_div_clk_set_rate,
341 };
342
343 static struct clk * __init
rzg3s_cpg_div_clk_register(const struct cpg_core_clk * core,struct rzg2l_cpg_priv * priv)344 rzg3s_cpg_div_clk_register(const struct cpg_core_clk *core, struct rzg2l_cpg_priv *priv)
345 {
346 struct div_hw_data *div_hw_data;
347 struct clk_init_data init = {};
348 const struct clk_div_table *clkt;
349 struct clk_hw *clk_hw;
350 const struct clk *parent;
351 const char *parent_name;
352 u32 max = 0;
353 int ret;
354
355 parent = priv->clks[core->parent];
356 if (IS_ERR(parent))
357 return ERR_CAST(parent);
358
359 parent_name = __clk_get_name(parent);
360
361 div_hw_data = devm_kzalloc(priv->dev, sizeof(*div_hw_data), GFP_KERNEL);
362 if (!div_hw_data)
363 return ERR_PTR(-ENOMEM);
364
365 init.name = core->name;
366 init.flags = core->flag;
367 init.ops = &rzg3s_div_clk_ops;
368 init.parent_names = &parent_name;
369 init.num_parents = 1;
370
371 /* Get the maximum divider to retrieve div width. */
372 for (clkt = core->dtable; clkt->div; clkt++) {
373 if (max < clkt->div)
374 max = clkt->div;
375 }
376
377 div_hw_data->hw_data.priv = priv;
378 div_hw_data->hw_data.conf = core->conf;
379 div_hw_data->hw_data.sconf = core->sconf;
380 div_hw_data->dtable = core->dtable;
381 div_hw_data->invalid_rate = core->invalid_rate;
382 div_hw_data->max_rate = core->max_rate;
383 div_hw_data->width = fls(max) - 1;
384
385 clk_hw = &div_hw_data->hw_data.hw;
386 clk_hw->init = &init;
387
388 ret = devm_clk_hw_register(priv->dev, clk_hw);
389 if (ret)
390 return ERR_PTR(ret);
391
392 ret = rzg2l_register_notifier(clk_hw, core, priv);
393 if (ret) {
394 dev_err(priv->dev, "Failed to register notifier for %s\n",
395 core->name);
396 return ERR_PTR(ret);
397 }
398
399 return clk_hw->clk;
400 }
401
402 static struct clk * __init
rzg2l_cpg_div_clk_register(const struct cpg_core_clk * core,struct rzg2l_cpg_priv * priv)403 rzg2l_cpg_div_clk_register(const struct cpg_core_clk *core,
404 struct rzg2l_cpg_priv *priv)
405 {
406 void __iomem *base = priv->base;
407 struct device *dev = priv->dev;
408 const struct clk *parent;
409 const char *parent_name;
410 struct clk_hw *clk_hw;
411
412 parent = priv->clks[core->parent];
413 if (IS_ERR(parent))
414 return ERR_CAST(parent);
415
416 parent_name = __clk_get_name(parent);
417
418 if (core->dtable)
419 clk_hw = clk_hw_register_divider_table(dev, core->name,
420 parent_name, 0,
421 base + GET_REG_OFFSET(core->conf),
422 GET_SHIFT(core->conf),
423 GET_WIDTH(core->conf),
424 core->flag,
425 core->dtable,
426 &priv->rmw_lock);
427 else
428 clk_hw = clk_hw_register_divider(dev, core->name,
429 parent_name, 0,
430 base + GET_REG_OFFSET(core->conf),
431 GET_SHIFT(core->conf),
432 GET_WIDTH(core->conf),
433 core->flag, &priv->rmw_lock);
434
435 if (IS_ERR(clk_hw))
436 return ERR_CAST(clk_hw);
437
438 return clk_hw->clk;
439 }
440
441 static struct clk * __init
rzg2l_cpg_mux_clk_register(const struct cpg_core_clk * core,struct rzg2l_cpg_priv * priv)442 rzg2l_cpg_mux_clk_register(const struct cpg_core_clk *core,
443 struct rzg2l_cpg_priv *priv)
444 {
445 const struct clk_hw *clk_hw;
446
447 clk_hw = devm_clk_hw_register_mux(priv->dev, core->name,
448 core->parent_names, core->num_parents,
449 core->flag,
450 priv->base + GET_REG_OFFSET(core->conf),
451 GET_SHIFT(core->conf),
452 GET_WIDTH(core->conf),
453 core->mux_flags, &priv->rmw_lock);
454 if (IS_ERR(clk_hw))
455 return ERR_CAST(clk_hw);
456
457 return clk_hw->clk;
458 }
459
rzg2l_cpg_sd_clk_mux_set_parent(struct clk_hw * hw,u8 index)460 static int rzg2l_cpg_sd_clk_mux_set_parent(struct clk_hw *hw, u8 index)
461 {
462 struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw);
463 struct sd_mux_hw_data *sd_mux_hw_data = to_sd_mux_hw_data(clk_hw_data);
464 struct rzg2l_cpg_priv *priv = clk_hw_data->priv;
465 u32 off = GET_REG_OFFSET(clk_hw_data->conf);
466 u32 shift = GET_SHIFT(clk_hw_data->conf);
467 unsigned long flags;
468 u32 val;
469 int ret;
470
471 val = clk_mux_index_to_val(sd_mux_hw_data->mtable, CLK_MUX_ROUND_CLOSEST, index);
472
473 spin_lock_irqsave(&priv->rmw_lock, flags);
474
475 writel((CPG_WEN_BIT | val) << shift, priv->base + off);
476
477 /* Wait for the update done. */
478 ret = rzg2l_cpg_wait_clk_update_done(priv->base, clk_hw_data->sconf);
479
480 spin_unlock_irqrestore(&priv->rmw_lock, flags);
481
482 if (ret)
483 dev_err(priv->dev, "Failed to switch parent\n");
484
485 return ret;
486 }
487
rzg2l_cpg_sd_clk_mux_get_parent(struct clk_hw * hw)488 static u8 rzg2l_cpg_sd_clk_mux_get_parent(struct clk_hw *hw)
489 {
490 struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw);
491 struct sd_mux_hw_data *sd_mux_hw_data = to_sd_mux_hw_data(clk_hw_data);
492 struct rzg2l_cpg_priv *priv = clk_hw_data->priv;
493 u32 val;
494
495 val = readl(priv->base + GET_REG_OFFSET(clk_hw_data->conf));
496 val >>= GET_SHIFT(clk_hw_data->conf);
497 val &= GENMASK(GET_WIDTH(clk_hw_data->conf) - 1, 0);
498
499 return clk_mux_val_to_index(hw, sd_mux_hw_data->mtable, CLK_MUX_ROUND_CLOSEST, val);
500 }
501
502 static const struct clk_ops rzg2l_cpg_sd_clk_mux_ops = {
503 .determine_rate = __clk_mux_determine_rate_closest,
504 .set_parent = rzg2l_cpg_sd_clk_mux_set_parent,
505 .get_parent = rzg2l_cpg_sd_clk_mux_get_parent,
506 };
507
508 static struct clk * __init
rzg2l_cpg_sd_mux_clk_register(const struct cpg_core_clk * core,struct rzg2l_cpg_priv * priv)509 rzg2l_cpg_sd_mux_clk_register(const struct cpg_core_clk *core,
510 struct rzg2l_cpg_priv *priv)
511 {
512 struct sd_mux_hw_data *sd_mux_hw_data;
513 struct clk_init_data init;
514 struct clk_hw *clk_hw;
515 int ret;
516
517 sd_mux_hw_data = devm_kzalloc(priv->dev, sizeof(*sd_mux_hw_data), GFP_KERNEL);
518 if (!sd_mux_hw_data)
519 return ERR_PTR(-ENOMEM);
520
521 sd_mux_hw_data->hw_data.priv = priv;
522 sd_mux_hw_data->hw_data.conf = core->conf;
523 sd_mux_hw_data->hw_data.sconf = core->sconf;
524 sd_mux_hw_data->mtable = core->mtable;
525
526 init.name = core->name;
527 init.ops = &rzg2l_cpg_sd_clk_mux_ops;
528 init.flags = core->flag;
529 init.num_parents = core->num_parents;
530 init.parent_names = core->parent_names;
531
532 clk_hw = &sd_mux_hw_data->hw_data.hw;
533 clk_hw->init = &init;
534
535 ret = devm_clk_hw_register(priv->dev, clk_hw);
536 if (ret)
537 return ERR_PTR(ret);
538
539 ret = rzg2l_register_notifier(clk_hw, core, priv);
540 if (ret) {
541 dev_err(priv->dev, "Failed to register notifier for %s\n",
542 core->name);
543 return ERR_PTR(ret);
544 }
545
546 return clk_hw->clk;
547 }
548
549 static unsigned long
rzg2l_cpg_get_foutpostdiv_rate(struct rzg2l_pll5_param * params,unsigned long rate)550 rzg2l_cpg_get_foutpostdiv_rate(struct rzg2l_pll5_param *params,
551 unsigned long rate)
552 {
553 unsigned long foutpostdiv_rate, foutvco_rate;
554
555 params->pl5_intin = rate / MEGA;
556 params->pl5_fracin = div_u64(((u64)rate % MEGA) << 24, MEGA);
557 params->pl5_refdiv = 2;
558 params->pl5_postdiv1 = 1;
559 params->pl5_postdiv2 = 1;
560 params->pl5_spread = 0x16;
561
562 foutvco_rate = div_u64(mul_u32_u32(EXTAL_FREQ_IN_MEGA_HZ * MEGA,
563 (params->pl5_intin << 24) + params->pl5_fracin),
564 params->pl5_refdiv) >> 24;
565 foutpostdiv_rate = DIV_ROUND_CLOSEST_ULL(foutvco_rate,
566 params->pl5_postdiv1 * params->pl5_postdiv2);
567
568 return foutpostdiv_rate;
569 }
570
571 struct dsi_div_hw_data {
572 struct clk_hw hw;
573 u32 conf;
574 unsigned long rate;
575 struct rzg2l_cpg_priv *priv;
576 };
577
578 #define to_dsi_div_hw_data(_hw) container_of(_hw, struct dsi_div_hw_data, hw)
579
rzg2l_cpg_dsi_div_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)580 static unsigned long rzg2l_cpg_dsi_div_recalc_rate(struct clk_hw *hw,
581 unsigned long parent_rate)
582 {
583 struct dsi_div_hw_data *dsi_div = to_dsi_div_hw_data(hw);
584 unsigned long rate = dsi_div->rate;
585
586 if (!rate)
587 rate = parent_rate;
588
589 return rate;
590 }
591
rzg2l_cpg_get_vclk_parent_rate(struct clk_hw * hw,unsigned long rate)592 static unsigned long rzg2l_cpg_get_vclk_parent_rate(struct clk_hw *hw,
593 unsigned long rate)
594 {
595 struct dsi_div_hw_data *dsi_div = to_dsi_div_hw_data(hw);
596 struct rzg2l_cpg_priv *priv = dsi_div->priv;
597 struct rzg2l_pll5_param params;
598 unsigned long parent_rate;
599
600 parent_rate = rzg2l_cpg_get_foutpostdiv_rate(¶ms, rate);
601
602 if (priv->mux_dsi_div_params.clksrc)
603 parent_rate /= 2;
604
605 return parent_rate;
606 }
607
rzg2l_cpg_dsi_div_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)608 static int rzg2l_cpg_dsi_div_determine_rate(struct clk_hw *hw,
609 struct clk_rate_request *req)
610 {
611 if (req->rate > MAX_VCLK_FREQ)
612 req->rate = MAX_VCLK_FREQ;
613
614 req->best_parent_rate = rzg2l_cpg_get_vclk_parent_rate(hw, req->rate);
615
616 return 0;
617 }
618
rzg2l_cpg_dsi_div_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)619 static int rzg2l_cpg_dsi_div_set_rate(struct clk_hw *hw,
620 unsigned long rate,
621 unsigned long parent_rate)
622 {
623 struct dsi_div_hw_data *dsi_div = to_dsi_div_hw_data(hw);
624 struct rzg2l_cpg_priv *priv = dsi_div->priv;
625
626 /*
627 * MUX -->DIV_DSI_{A,B} -->M3 -->VCLK
628 *
629 * Based on the dot clock, the DSI divider clock sets the divider value,
630 * calculates the pll parameters for generating FOUTPOSTDIV and the clk
631 * source for the MUX and propagates that info to the parents.
632 */
633
634 if (!rate || rate > MAX_VCLK_FREQ)
635 return -EINVAL;
636
637 dsi_div->rate = rate;
638 writel(CPG_PL5_SDIV_DIV_DSI_A_WEN | CPG_PL5_SDIV_DIV_DSI_B_WEN |
639 (priv->mux_dsi_div_params.dsi_div_a << 0) |
640 (priv->mux_dsi_div_params.dsi_div_b << 8),
641 priv->base + CPG_PL5_SDIV);
642
643 return 0;
644 }
645
646 static const struct clk_ops rzg2l_cpg_dsi_div_ops = {
647 .recalc_rate = rzg2l_cpg_dsi_div_recalc_rate,
648 .determine_rate = rzg2l_cpg_dsi_div_determine_rate,
649 .set_rate = rzg2l_cpg_dsi_div_set_rate,
650 };
651
652 static struct clk * __init
rzg2l_cpg_dsi_div_clk_register(const struct cpg_core_clk * core,struct rzg2l_cpg_priv * priv)653 rzg2l_cpg_dsi_div_clk_register(const struct cpg_core_clk *core,
654 struct rzg2l_cpg_priv *priv)
655 {
656 struct dsi_div_hw_data *clk_hw_data;
657 const struct clk *parent;
658 const char *parent_name;
659 struct clk_init_data init;
660 struct clk_hw *clk_hw;
661 int ret;
662
663 parent = priv->clks[core->parent];
664 if (IS_ERR(parent))
665 return ERR_CAST(parent);
666
667 clk_hw_data = devm_kzalloc(priv->dev, sizeof(*clk_hw_data), GFP_KERNEL);
668 if (!clk_hw_data)
669 return ERR_PTR(-ENOMEM);
670
671 clk_hw_data->priv = priv;
672
673 parent_name = __clk_get_name(parent);
674 init.name = core->name;
675 init.ops = &rzg2l_cpg_dsi_div_ops;
676 init.flags = CLK_SET_RATE_PARENT;
677 init.parent_names = &parent_name;
678 init.num_parents = 1;
679
680 clk_hw = &clk_hw_data->hw;
681 clk_hw->init = &init;
682
683 ret = devm_clk_hw_register(priv->dev, clk_hw);
684 if (ret)
685 return ERR_PTR(ret);
686
687 return clk_hw->clk;
688 }
689
690 struct pll5_mux_hw_data {
691 struct clk_hw hw;
692 u32 conf;
693 unsigned long rate;
694 struct rzg2l_cpg_priv *priv;
695 };
696
697 #define to_pll5_mux_hw_data(_hw) container_of(_hw, struct pll5_mux_hw_data, hw)
698
rzg2l_cpg_pll5_4_clk_mux_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)699 static int rzg2l_cpg_pll5_4_clk_mux_determine_rate(struct clk_hw *hw,
700 struct clk_rate_request *req)
701 {
702 struct clk_hw *parent;
703 struct pll5_mux_hw_data *hwdata = to_pll5_mux_hw_data(hw);
704 struct rzg2l_cpg_priv *priv = hwdata->priv;
705
706 parent = clk_hw_get_parent_by_index(hw, priv->mux_dsi_div_params.clksrc);
707 req->best_parent_hw = parent;
708 req->best_parent_rate = req->rate;
709
710 return 0;
711 }
712
rzg2l_cpg_pll5_4_clk_mux_set_parent(struct clk_hw * hw,u8 index)713 static int rzg2l_cpg_pll5_4_clk_mux_set_parent(struct clk_hw *hw, u8 index)
714 {
715 struct pll5_mux_hw_data *hwdata = to_pll5_mux_hw_data(hw);
716 struct rzg2l_cpg_priv *priv = hwdata->priv;
717
718 /*
719 * FOUTPOSTDIV--->|
720 * | | -->MUX -->DIV_DSIA_B -->M3 -->VCLK
721 * |--FOUT1PH0-->|
722 *
723 * Based on the dot clock, the DSI divider clock calculates the parent
724 * rate and clk source for the MUX. It propagates that info to
725 * pll5_4_clk_mux which sets the clock source for DSI divider clock.
726 */
727
728 writel(CPG_OTHERFUNC1_REG_RES0_ON_WEN | index,
729 priv->base + CPG_OTHERFUNC1_REG);
730
731 return 0;
732 }
733
rzg2l_cpg_pll5_4_clk_mux_get_parent(struct clk_hw * hw)734 static u8 rzg2l_cpg_pll5_4_clk_mux_get_parent(struct clk_hw *hw)
735 {
736 struct pll5_mux_hw_data *hwdata = to_pll5_mux_hw_data(hw);
737 struct rzg2l_cpg_priv *priv = hwdata->priv;
738
739 return readl(priv->base + GET_REG_OFFSET(hwdata->conf));
740 }
741
742 static const struct clk_ops rzg2l_cpg_pll5_4_clk_mux_ops = {
743 .determine_rate = rzg2l_cpg_pll5_4_clk_mux_determine_rate,
744 .set_parent = rzg2l_cpg_pll5_4_clk_mux_set_parent,
745 .get_parent = rzg2l_cpg_pll5_4_clk_mux_get_parent,
746 };
747
748 static struct clk * __init
rzg2l_cpg_pll5_4_mux_clk_register(const struct cpg_core_clk * core,struct rzg2l_cpg_priv * priv)749 rzg2l_cpg_pll5_4_mux_clk_register(const struct cpg_core_clk *core,
750 struct rzg2l_cpg_priv *priv)
751 {
752 struct pll5_mux_hw_data *clk_hw_data;
753 struct clk_init_data init;
754 struct clk_hw *clk_hw;
755 int ret;
756
757 clk_hw_data = devm_kzalloc(priv->dev, sizeof(*clk_hw_data), GFP_KERNEL);
758 if (!clk_hw_data)
759 return ERR_PTR(-ENOMEM);
760
761 clk_hw_data->priv = priv;
762 clk_hw_data->conf = core->conf;
763
764 init.name = core->name;
765 init.ops = &rzg2l_cpg_pll5_4_clk_mux_ops;
766 init.flags = CLK_SET_RATE_PARENT;
767 init.num_parents = core->num_parents;
768 init.parent_names = core->parent_names;
769
770 clk_hw = &clk_hw_data->hw;
771 clk_hw->init = &init;
772
773 ret = devm_clk_hw_register(priv->dev, clk_hw);
774 if (ret)
775 return ERR_PTR(ret);
776
777 return clk_hw->clk;
778 }
779
780 struct sipll5 {
781 struct clk_hw hw;
782 u32 conf;
783 unsigned long foutpostdiv_rate;
784 struct rzg2l_cpg_priv *priv;
785 };
786
787 #define to_sipll5(_hw) container_of(_hw, struct sipll5, hw)
788
rzg2l_cpg_get_vclk_rate(struct clk_hw * hw,unsigned long rate)789 static unsigned long rzg2l_cpg_get_vclk_rate(struct clk_hw *hw,
790 unsigned long rate)
791 {
792 struct sipll5 *sipll5 = to_sipll5(hw);
793 struct rzg2l_cpg_priv *priv = sipll5->priv;
794 unsigned long vclk;
795
796 vclk = rate / ((1 << priv->mux_dsi_div_params.dsi_div_a) *
797 (priv->mux_dsi_div_params.dsi_div_b + 1));
798
799 if (priv->mux_dsi_div_params.clksrc)
800 vclk /= 2;
801
802 return vclk;
803 }
804
rzg2l_cpg_sipll5_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)805 static unsigned long rzg2l_cpg_sipll5_recalc_rate(struct clk_hw *hw,
806 unsigned long parent_rate)
807 {
808 struct sipll5 *sipll5 = to_sipll5(hw);
809 unsigned long pll5_rate = sipll5->foutpostdiv_rate;
810
811 if (!pll5_rate)
812 pll5_rate = parent_rate;
813
814 return pll5_rate;
815 }
816
rzg2l_cpg_sipll5_round_rate(struct clk_hw * hw,unsigned long rate,unsigned long * parent_rate)817 static long rzg2l_cpg_sipll5_round_rate(struct clk_hw *hw,
818 unsigned long rate,
819 unsigned long *parent_rate)
820 {
821 return rate;
822 }
823
rzg2l_cpg_sipll5_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)824 static int rzg2l_cpg_sipll5_set_rate(struct clk_hw *hw,
825 unsigned long rate,
826 unsigned long parent_rate)
827 {
828 struct sipll5 *sipll5 = to_sipll5(hw);
829 struct rzg2l_cpg_priv *priv = sipll5->priv;
830 struct rzg2l_pll5_param params;
831 unsigned long vclk_rate;
832 int ret;
833 u32 val;
834
835 /*
836 * OSC --> PLL5 --> FOUTPOSTDIV-->|
837 * | | -->MUX -->DIV_DSIA_B -->M3 -->VCLK
838 * |--FOUT1PH0-->|
839 *
840 * Based on the dot clock, the DSI divider clock calculates the parent
841 * rate and the pll5 parameters for generating FOUTPOSTDIV. It propagates
842 * that info to sipll5 which sets parameters for generating FOUTPOSTDIV.
843 *
844 * OSC --> PLL5 --> FOUTPOSTDIV
845 */
846
847 if (!rate)
848 return -EINVAL;
849
850 vclk_rate = rzg2l_cpg_get_vclk_rate(hw, rate);
851 sipll5->foutpostdiv_rate =
852 rzg2l_cpg_get_foutpostdiv_rate(¶ms, vclk_rate);
853
854 /* Put PLL5 into standby mode */
855 writel(CPG_SIPLL5_STBY_RESETB_WEN, priv->base + CPG_SIPLL5_STBY);
856 ret = readl_poll_timeout(priv->base + CPG_SIPLL5_MON, val,
857 !(val & CPG_SIPLL5_MON_PLL5_LOCK), 100, 250000);
858 if (ret) {
859 dev_err(priv->dev, "failed to release pll5 lock");
860 return ret;
861 }
862
863 /* Output clock setting 1 */
864 writel((params.pl5_postdiv1 << 0) | (params.pl5_postdiv2 << 4) |
865 (params.pl5_refdiv << 8), priv->base + CPG_SIPLL5_CLK1);
866
867 /* Output clock setting, SSCG modulation value setting 3 */
868 writel((params.pl5_fracin << 8), priv->base + CPG_SIPLL5_CLK3);
869
870 /* Output clock setting 4 */
871 writel(CPG_SIPLL5_CLK4_RESV_LSB | (params.pl5_intin << 16),
872 priv->base + CPG_SIPLL5_CLK4);
873
874 /* Output clock setting 5 */
875 writel(params.pl5_spread, priv->base + CPG_SIPLL5_CLK5);
876
877 /* PLL normal mode setting */
878 writel(CPG_SIPLL5_STBY_DOWNSPREAD_WEN | CPG_SIPLL5_STBY_SSCG_EN_WEN |
879 CPG_SIPLL5_STBY_RESETB_WEN | CPG_SIPLL5_STBY_RESETB,
880 priv->base + CPG_SIPLL5_STBY);
881
882 /* PLL normal mode transition, output clock stability check */
883 ret = readl_poll_timeout(priv->base + CPG_SIPLL5_MON, val,
884 (val & CPG_SIPLL5_MON_PLL5_LOCK), 100, 250000);
885 if (ret) {
886 dev_err(priv->dev, "failed to lock pll5");
887 return ret;
888 }
889
890 return 0;
891 }
892
893 static const struct clk_ops rzg2l_cpg_sipll5_ops = {
894 .recalc_rate = rzg2l_cpg_sipll5_recalc_rate,
895 .round_rate = rzg2l_cpg_sipll5_round_rate,
896 .set_rate = rzg2l_cpg_sipll5_set_rate,
897 };
898
899 static struct clk * __init
rzg2l_cpg_sipll5_register(const struct cpg_core_clk * core,struct rzg2l_cpg_priv * priv)900 rzg2l_cpg_sipll5_register(const struct cpg_core_clk *core,
901 struct rzg2l_cpg_priv *priv)
902 {
903 const struct clk *parent;
904 struct clk_init_data init;
905 const char *parent_name;
906 struct sipll5 *sipll5;
907 struct clk_hw *clk_hw;
908 int ret;
909
910 parent = priv->clks[core->parent];
911 if (IS_ERR(parent))
912 return ERR_CAST(parent);
913
914 sipll5 = devm_kzalloc(priv->dev, sizeof(*sipll5), GFP_KERNEL);
915 if (!sipll5)
916 return ERR_PTR(-ENOMEM);
917
918 init.name = core->name;
919 parent_name = __clk_get_name(parent);
920 init.ops = &rzg2l_cpg_sipll5_ops;
921 init.flags = 0;
922 init.parent_names = &parent_name;
923 init.num_parents = 1;
924
925 sipll5->hw.init = &init;
926 sipll5->conf = core->conf;
927 sipll5->priv = priv;
928
929 writel(CPG_SIPLL5_STBY_SSCG_EN_WEN | CPG_SIPLL5_STBY_RESETB_WEN |
930 CPG_SIPLL5_STBY_RESETB, priv->base + CPG_SIPLL5_STBY);
931
932 clk_hw = &sipll5->hw;
933 clk_hw->init = &init;
934
935 ret = devm_clk_hw_register(priv->dev, clk_hw);
936 if (ret)
937 return ERR_PTR(ret);
938
939 priv->mux_dsi_div_params.clksrc = 1; /* Use clk src 1 for DSI */
940 priv->mux_dsi_div_params.dsi_div_a = 1; /* Divided by 2 */
941 priv->mux_dsi_div_params.dsi_div_b = 2; /* Divided by 3 */
942
943 return clk_hw->clk;
944 }
945
946 struct pll_clk {
947 struct clk_hw hw;
948 unsigned long default_rate;
949 unsigned int conf;
950 unsigned int type;
951 void __iomem *base;
952 struct rzg2l_cpg_priv *priv;
953 };
954
955 #define to_pll(_hw) container_of(_hw, struct pll_clk, hw)
956
rzg2l_cpg_pll_clk_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)957 static unsigned long rzg2l_cpg_pll_clk_recalc_rate(struct clk_hw *hw,
958 unsigned long parent_rate)
959 {
960 struct pll_clk *pll_clk = to_pll(hw);
961 struct rzg2l_cpg_priv *priv = pll_clk->priv;
962 unsigned int val1, val2;
963 u64 rate;
964
965 if (pll_clk->type != CLK_TYPE_SAM_PLL)
966 return parent_rate;
967
968 val1 = readl(priv->base + GET_REG_SAMPLL_CLK1(pll_clk->conf));
969 val2 = readl(priv->base + GET_REG_SAMPLL_CLK2(pll_clk->conf));
970
971 rate = mul_u64_u32_shr(parent_rate, (MDIV(val1) << 16) + KDIV(val1),
972 16 + SDIV(val2));
973
974 return DIV_ROUND_CLOSEST_ULL(rate, PDIV(val1));
975 }
976
977 static const struct clk_ops rzg2l_cpg_pll_ops = {
978 .recalc_rate = rzg2l_cpg_pll_clk_recalc_rate,
979 };
980
rzg3s_cpg_pll_clk_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)981 static unsigned long rzg3s_cpg_pll_clk_recalc_rate(struct clk_hw *hw,
982 unsigned long parent_rate)
983 {
984 struct pll_clk *pll_clk = to_pll(hw);
985 struct rzg2l_cpg_priv *priv = pll_clk->priv;
986 u32 nir, nfr, mr, pr, val, setting;
987 u64 rate;
988
989 if (pll_clk->type != CLK_TYPE_G3S_PLL)
990 return parent_rate;
991
992 setting = GET_REG_SAMPLL_SETTING(pll_clk->conf);
993 if (setting) {
994 val = readl(priv->base + setting);
995 if (val & RZG3S_SEL_PLL)
996 return pll_clk->default_rate;
997 }
998
999 val = readl(priv->base + GET_REG_SAMPLL_CLK1(pll_clk->conf));
1000
1001 pr = 1 << FIELD_GET(RZG3S_DIV_P, val);
1002 /* Hardware interprets values higher than 8 as p = 16. */
1003 if (pr > 8)
1004 pr = 16;
1005
1006 mr = FIELD_GET(RZG3S_DIV_M, val) + 1;
1007 nir = FIELD_GET(RZG3S_DIV_NI, val) + 1;
1008 nfr = FIELD_GET(RZG3S_DIV_NF, val);
1009
1010 rate = mul_u64_u32_shr(parent_rate, 4096 * nir + nfr, 12);
1011
1012 return DIV_ROUND_CLOSEST_ULL(rate, (mr * pr));
1013 }
1014
1015 static const struct clk_ops rzg3s_cpg_pll_ops = {
1016 .recalc_rate = rzg3s_cpg_pll_clk_recalc_rate,
1017 };
1018
1019 static struct clk * __init
rzg2l_cpg_pll_clk_register(const struct cpg_core_clk * core,struct rzg2l_cpg_priv * priv,const struct clk_ops * ops)1020 rzg2l_cpg_pll_clk_register(const struct cpg_core_clk *core,
1021 struct rzg2l_cpg_priv *priv,
1022 const struct clk_ops *ops)
1023 {
1024 struct device *dev = priv->dev;
1025 const struct clk *parent;
1026 struct clk_init_data init;
1027 const char *parent_name;
1028 struct pll_clk *pll_clk;
1029 int ret;
1030
1031 parent = priv->clks[core->parent];
1032 if (IS_ERR(parent))
1033 return ERR_CAST(parent);
1034
1035 pll_clk = devm_kzalloc(dev, sizeof(*pll_clk), GFP_KERNEL);
1036 if (!pll_clk)
1037 return ERR_PTR(-ENOMEM);
1038
1039 parent_name = __clk_get_name(parent);
1040 init.name = core->name;
1041 init.ops = ops;
1042 init.flags = 0;
1043 init.parent_names = &parent_name;
1044 init.num_parents = 1;
1045
1046 pll_clk->hw.init = &init;
1047 pll_clk->conf = core->conf;
1048 pll_clk->base = priv->base;
1049 pll_clk->priv = priv;
1050 pll_clk->type = core->type;
1051 pll_clk->default_rate = core->default_rate;
1052
1053 ret = devm_clk_hw_register(dev, &pll_clk->hw);
1054 if (ret)
1055 return ERR_PTR(ret);
1056
1057 return pll_clk->hw.clk;
1058 }
1059
1060 static struct clk
rzg2l_cpg_clk_src_twocell_get(struct of_phandle_args * clkspec,void * data)1061 *rzg2l_cpg_clk_src_twocell_get(struct of_phandle_args *clkspec,
1062 void *data)
1063 {
1064 unsigned int clkidx = clkspec->args[1];
1065 struct rzg2l_cpg_priv *priv = data;
1066 struct device *dev = priv->dev;
1067 const char *type;
1068 struct clk *clk;
1069
1070 switch (clkspec->args[0]) {
1071 case CPG_CORE:
1072 type = "core";
1073 if (clkidx > priv->last_dt_core_clk) {
1074 dev_err(dev, "Invalid %s clock index %u\n", type, clkidx);
1075 return ERR_PTR(-EINVAL);
1076 }
1077 clk = priv->clks[clkidx];
1078 break;
1079
1080 case CPG_MOD:
1081 type = "module";
1082 if (clkidx >= priv->num_mod_clks) {
1083 dev_err(dev, "Invalid %s clock index %u\n", type,
1084 clkidx);
1085 return ERR_PTR(-EINVAL);
1086 }
1087 clk = priv->clks[priv->num_core_clks + clkidx];
1088 break;
1089
1090 default:
1091 dev_err(dev, "Invalid CPG clock type %u\n", clkspec->args[0]);
1092 return ERR_PTR(-EINVAL);
1093 }
1094
1095 if (IS_ERR(clk))
1096 dev_err(dev, "Cannot get %s clock %u: %ld", type, clkidx,
1097 PTR_ERR(clk));
1098 else
1099 dev_dbg(dev, "clock (%u, %u) is %pC at %lu Hz\n",
1100 clkspec->args[0], clkspec->args[1], clk,
1101 clk_get_rate(clk));
1102 return clk;
1103 }
1104
1105 static void __init
rzg2l_cpg_register_core_clk(const struct cpg_core_clk * core,const struct rzg2l_cpg_info * info,struct rzg2l_cpg_priv * priv)1106 rzg2l_cpg_register_core_clk(const struct cpg_core_clk *core,
1107 const struct rzg2l_cpg_info *info,
1108 struct rzg2l_cpg_priv *priv)
1109 {
1110 struct clk *clk = ERR_PTR(-EOPNOTSUPP), *parent;
1111 struct device *dev = priv->dev;
1112 unsigned int id = core->id, div = core->div;
1113 const char *parent_name;
1114 struct clk_hw *clk_hw;
1115
1116 WARN_DEBUG(id >= priv->num_core_clks);
1117 WARN_DEBUG(PTR_ERR(priv->clks[id]) != -ENOENT);
1118
1119 switch (core->type) {
1120 case CLK_TYPE_IN:
1121 clk = of_clk_get_by_name(priv->dev->of_node, core->name);
1122 break;
1123 case CLK_TYPE_FF:
1124 WARN_DEBUG(core->parent >= priv->num_core_clks);
1125 parent = priv->clks[core->parent];
1126 if (IS_ERR(parent)) {
1127 clk = parent;
1128 goto fail;
1129 }
1130
1131 parent_name = __clk_get_name(parent);
1132 clk_hw = devm_clk_hw_register_fixed_factor(dev, core->name, parent_name,
1133 CLK_SET_RATE_PARENT,
1134 core->mult, div);
1135 if (IS_ERR(clk_hw))
1136 clk = ERR_CAST(clk_hw);
1137 else
1138 clk = clk_hw->clk;
1139 break;
1140 case CLK_TYPE_SAM_PLL:
1141 clk = rzg2l_cpg_pll_clk_register(core, priv, &rzg2l_cpg_pll_ops);
1142 break;
1143 case CLK_TYPE_G3S_PLL:
1144 clk = rzg2l_cpg_pll_clk_register(core, priv, &rzg3s_cpg_pll_ops);
1145 break;
1146 case CLK_TYPE_SIPLL5:
1147 clk = rzg2l_cpg_sipll5_register(core, priv);
1148 break;
1149 case CLK_TYPE_DIV:
1150 clk = rzg2l_cpg_div_clk_register(core, priv);
1151 break;
1152 case CLK_TYPE_G3S_DIV:
1153 clk = rzg3s_cpg_div_clk_register(core, priv);
1154 break;
1155 case CLK_TYPE_MUX:
1156 clk = rzg2l_cpg_mux_clk_register(core, priv);
1157 break;
1158 case CLK_TYPE_SD_MUX:
1159 clk = rzg2l_cpg_sd_mux_clk_register(core, priv);
1160 break;
1161 case CLK_TYPE_PLL5_4_MUX:
1162 clk = rzg2l_cpg_pll5_4_mux_clk_register(core, priv);
1163 break;
1164 case CLK_TYPE_DSI_DIV:
1165 clk = rzg2l_cpg_dsi_div_clk_register(core, priv);
1166 break;
1167 default:
1168 goto fail;
1169 }
1170
1171 if (IS_ERR_OR_NULL(clk))
1172 goto fail;
1173
1174 dev_dbg(dev, "Core clock %pC at %lu Hz\n", clk, clk_get_rate(clk));
1175 priv->clks[id] = clk;
1176 return;
1177
1178 fail:
1179 dev_err(dev, "Failed to register %s clock %s: %ld\n", "core",
1180 core->name, PTR_ERR(clk));
1181 }
1182
1183 /**
1184 * struct mstp_clock - MSTP gating clock
1185 *
1186 * @hw: handle between common and hardware-specific interfaces
1187 * @off: register offset
1188 * @bit: ON/MON bit
1189 * @enabled: soft state of the clock, if it is coupled with another clock
1190 * @priv: CPG/MSTP private data
1191 * @sibling: pointer to the other coupled clock
1192 */
1193 struct mstp_clock {
1194 struct clk_hw hw;
1195 u16 off;
1196 u8 bit;
1197 bool enabled;
1198 struct rzg2l_cpg_priv *priv;
1199 struct mstp_clock *sibling;
1200 };
1201
1202 #define to_mod_clock(_hw) container_of(_hw, struct mstp_clock, hw)
1203
rzg2l_mod_clock_endisable(struct clk_hw * hw,bool enable)1204 static int rzg2l_mod_clock_endisable(struct clk_hw *hw, bool enable)
1205 {
1206 struct mstp_clock *clock = to_mod_clock(hw);
1207 struct rzg2l_cpg_priv *priv = clock->priv;
1208 unsigned int reg = clock->off;
1209 struct device *dev = priv->dev;
1210 u32 bitmask = BIT(clock->bit);
1211 u32 value;
1212 int error;
1213
1214 if (!clock->off) {
1215 dev_dbg(dev, "%pC does not support ON/OFF\n", hw->clk);
1216 return 0;
1217 }
1218
1219 dev_dbg(dev, "CLK_ON 0x%x/%pC %s\n", CLK_ON_R(reg), hw->clk,
1220 enable ? "ON" : "OFF");
1221
1222 value = bitmask << 16;
1223 if (enable)
1224 value |= bitmask;
1225
1226 writel(value, priv->base + CLK_ON_R(reg));
1227
1228 if (!enable)
1229 return 0;
1230
1231 if (!priv->info->has_clk_mon_regs)
1232 return 0;
1233
1234 error = readl_poll_timeout_atomic(priv->base + CLK_MON_R(reg), value,
1235 value & bitmask, 0, 10);
1236 if (error)
1237 dev_err(dev, "Failed to enable CLK_ON 0x%x/%pC\n",
1238 CLK_ON_R(reg), hw->clk);
1239
1240 return error;
1241 }
1242
rzg2l_mod_clock_enable(struct clk_hw * hw)1243 static int rzg2l_mod_clock_enable(struct clk_hw *hw)
1244 {
1245 struct mstp_clock *clock = to_mod_clock(hw);
1246
1247 if (clock->sibling) {
1248 struct rzg2l_cpg_priv *priv = clock->priv;
1249 unsigned long flags;
1250 bool enabled;
1251
1252 spin_lock_irqsave(&priv->rmw_lock, flags);
1253 enabled = clock->sibling->enabled;
1254 clock->enabled = true;
1255 spin_unlock_irqrestore(&priv->rmw_lock, flags);
1256 if (enabled)
1257 return 0;
1258 }
1259
1260 return rzg2l_mod_clock_endisable(hw, true);
1261 }
1262
rzg2l_mod_clock_disable(struct clk_hw * hw)1263 static void rzg2l_mod_clock_disable(struct clk_hw *hw)
1264 {
1265 struct mstp_clock *clock = to_mod_clock(hw);
1266
1267 if (clock->sibling) {
1268 struct rzg2l_cpg_priv *priv = clock->priv;
1269 unsigned long flags;
1270 bool enabled;
1271
1272 spin_lock_irqsave(&priv->rmw_lock, flags);
1273 enabled = clock->sibling->enabled;
1274 clock->enabled = false;
1275 spin_unlock_irqrestore(&priv->rmw_lock, flags);
1276 if (enabled)
1277 return;
1278 }
1279
1280 rzg2l_mod_clock_endisable(hw, false);
1281 }
1282
rzg2l_mod_clock_is_enabled(struct clk_hw * hw)1283 static int rzg2l_mod_clock_is_enabled(struct clk_hw *hw)
1284 {
1285 struct mstp_clock *clock = to_mod_clock(hw);
1286 struct rzg2l_cpg_priv *priv = clock->priv;
1287 u32 bitmask = BIT(clock->bit);
1288 u32 value;
1289
1290 if (!clock->off) {
1291 dev_dbg(priv->dev, "%pC does not support ON/OFF\n", hw->clk);
1292 return 1;
1293 }
1294
1295 if (clock->sibling)
1296 return clock->enabled;
1297
1298 if (priv->info->has_clk_mon_regs)
1299 value = readl(priv->base + CLK_MON_R(clock->off));
1300 else
1301 value = readl(priv->base + clock->off);
1302
1303 return value & bitmask;
1304 }
1305
1306 static const struct clk_ops rzg2l_mod_clock_ops = {
1307 .enable = rzg2l_mod_clock_enable,
1308 .disable = rzg2l_mod_clock_disable,
1309 .is_enabled = rzg2l_mod_clock_is_enabled,
1310 };
1311
1312 static struct mstp_clock
rzg2l_mod_clock_get_sibling(struct mstp_clock * clock,struct rzg2l_cpg_priv * priv)1313 *rzg2l_mod_clock_get_sibling(struct mstp_clock *clock,
1314 struct rzg2l_cpg_priv *priv)
1315 {
1316 struct clk_hw *hw;
1317 unsigned int i;
1318
1319 for (i = 0; i < priv->num_mod_clks; i++) {
1320 struct mstp_clock *clk;
1321
1322 if (priv->clks[priv->num_core_clks + i] == ERR_PTR(-ENOENT))
1323 continue;
1324
1325 hw = __clk_get_hw(priv->clks[priv->num_core_clks + i]);
1326 clk = to_mod_clock(hw);
1327 if (clock->off == clk->off && clock->bit == clk->bit)
1328 return clk;
1329 }
1330
1331 return NULL;
1332 }
1333
1334 static void __init
rzg2l_cpg_register_mod_clk(const struct rzg2l_mod_clk * mod,const struct rzg2l_cpg_info * info,struct rzg2l_cpg_priv * priv)1335 rzg2l_cpg_register_mod_clk(const struct rzg2l_mod_clk *mod,
1336 const struct rzg2l_cpg_info *info,
1337 struct rzg2l_cpg_priv *priv)
1338 {
1339 struct mstp_clock *clock = NULL;
1340 struct device *dev = priv->dev;
1341 unsigned int id = mod->id;
1342 struct clk_init_data init;
1343 struct clk *parent, *clk;
1344 const char *parent_name;
1345 unsigned int i;
1346 int ret;
1347
1348 WARN_DEBUG(id < priv->num_core_clks);
1349 WARN_DEBUG(id >= priv->num_core_clks + priv->num_mod_clks);
1350 WARN_DEBUG(mod->parent >= priv->num_core_clks + priv->num_mod_clks);
1351 WARN_DEBUG(PTR_ERR(priv->clks[id]) != -ENOENT);
1352
1353 parent = priv->clks[mod->parent];
1354 if (IS_ERR(parent)) {
1355 clk = parent;
1356 goto fail;
1357 }
1358
1359 clock = devm_kzalloc(dev, sizeof(*clock), GFP_KERNEL);
1360 if (!clock) {
1361 clk = ERR_PTR(-ENOMEM);
1362 goto fail;
1363 }
1364
1365 init.name = mod->name;
1366 init.ops = &rzg2l_mod_clock_ops;
1367 init.flags = CLK_SET_RATE_PARENT;
1368 for (i = 0; i < info->num_crit_mod_clks; i++)
1369 if (id == info->crit_mod_clks[i]) {
1370 dev_dbg(dev, "CPG %s setting CLK_IS_CRITICAL\n",
1371 mod->name);
1372 init.flags |= CLK_IS_CRITICAL;
1373 break;
1374 }
1375
1376 parent_name = __clk_get_name(parent);
1377 init.parent_names = &parent_name;
1378 init.num_parents = 1;
1379
1380 clock->off = mod->off;
1381 clock->bit = mod->bit;
1382 clock->priv = priv;
1383 clock->hw.init = &init;
1384
1385 ret = devm_clk_hw_register(dev, &clock->hw);
1386 if (ret) {
1387 clk = ERR_PTR(ret);
1388 goto fail;
1389 }
1390
1391 clk = clock->hw.clk;
1392 dev_dbg(dev, "Module clock %pC at %lu Hz\n", clk, clk_get_rate(clk));
1393 priv->clks[id] = clk;
1394
1395 if (mod->is_coupled) {
1396 struct mstp_clock *sibling;
1397
1398 clock->enabled = rzg2l_mod_clock_is_enabled(&clock->hw);
1399 sibling = rzg2l_mod_clock_get_sibling(clock, priv);
1400 if (sibling) {
1401 clock->sibling = sibling;
1402 sibling->sibling = clock;
1403 }
1404 }
1405
1406 return;
1407
1408 fail:
1409 dev_err(dev, "Failed to register %s clock %s: %ld\n", "module",
1410 mod->name, PTR_ERR(clk));
1411 }
1412
1413 #define rcdev_to_priv(x) container_of(x, struct rzg2l_cpg_priv, rcdev)
1414
rzg2l_cpg_assert(struct reset_controller_dev * rcdev,unsigned long id)1415 static int rzg2l_cpg_assert(struct reset_controller_dev *rcdev,
1416 unsigned long id)
1417 {
1418 struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
1419 const struct rzg2l_cpg_info *info = priv->info;
1420 unsigned int reg = info->resets[id].off;
1421 u32 mask = BIT(info->resets[id].bit);
1422 s8 monbit = info->resets[id].monbit;
1423 u32 value = mask << 16;
1424
1425 dev_dbg(rcdev->dev, "assert id:%ld offset:0x%x\n", id, CLK_RST_R(reg));
1426
1427 writel(value, priv->base + CLK_RST_R(reg));
1428
1429 if (info->has_clk_mon_regs) {
1430 reg = CLK_MRST_R(reg);
1431 } else if (monbit >= 0) {
1432 reg = CPG_RST_MON;
1433 mask = BIT(monbit);
1434 } else {
1435 /* Wait for at least one cycle of the RCLK clock (@ ca. 32 kHz) */
1436 udelay(35);
1437 return 0;
1438 }
1439
1440 return readl_poll_timeout_atomic(priv->base + reg, value,
1441 value & mask, 10, 200);
1442 }
1443
rzg2l_cpg_deassert(struct reset_controller_dev * rcdev,unsigned long id)1444 static int rzg2l_cpg_deassert(struct reset_controller_dev *rcdev,
1445 unsigned long id)
1446 {
1447 struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
1448 const struct rzg2l_cpg_info *info = priv->info;
1449 unsigned int reg = info->resets[id].off;
1450 u32 mask = BIT(info->resets[id].bit);
1451 s8 monbit = info->resets[id].monbit;
1452 u32 value = (mask << 16) | mask;
1453
1454 dev_dbg(rcdev->dev, "deassert id:%ld offset:0x%x\n", id,
1455 CLK_RST_R(reg));
1456
1457 writel(value, priv->base + CLK_RST_R(reg));
1458
1459 if (info->has_clk_mon_regs) {
1460 reg = CLK_MRST_R(reg);
1461 } else if (monbit >= 0) {
1462 reg = CPG_RST_MON;
1463 mask = BIT(monbit);
1464 } else {
1465 /* Wait for at least one cycle of the RCLK clock (@ ca. 32 kHz) */
1466 udelay(35);
1467 return 0;
1468 }
1469
1470 return readl_poll_timeout_atomic(priv->base + reg, value,
1471 !(value & mask), 10, 200);
1472 }
1473
rzg2l_cpg_reset(struct reset_controller_dev * rcdev,unsigned long id)1474 static int rzg2l_cpg_reset(struct reset_controller_dev *rcdev,
1475 unsigned long id)
1476 {
1477 int ret;
1478
1479 ret = rzg2l_cpg_assert(rcdev, id);
1480 if (ret)
1481 return ret;
1482
1483 return rzg2l_cpg_deassert(rcdev, id);
1484 }
1485
rzg2l_cpg_status(struct reset_controller_dev * rcdev,unsigned long id)1486 static int rzg2l_cpg_status(struct reset_controller_dev *rcdev,
1487 unsigned long id)
1488 {
1489 struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
1490 const struct rzg2l_cpg_info *info = priv->info;
1491 s8 monbit = info->resets[id].monbit;
1492 unsigned int reg;
1493 u32 bitmask;
1494
1495 if (info->has_clk_mon_regs) {
1496 reg = CLK_MRST_R(info->resets[id].off);
1497 bitmask = BIT(info->resets[id].bit);
1498 } else if (monbit >= 0) {
1499 reg = CPG_RST_MON;
1500 bitmask = BIT(monbit);
1501 } else {
1502 return -ENOTSUPP;
1503 }
1504
1505 return !!(readl(priv->base + reg) & bitmask);
1506 }
1507
1508 static const struct reset_control_ops rzg2l_cpg_reset_ops = {
1509 .reset = rzg2l_cpg_reset,
1510 .assert = rzg2l_cpg_assert,
1511 .deassert = rzg2l_cpg_deassert,
1512 .status = rzg2l_cpg_status,
1513 };
1514
rzg2l_cpg_reset_xlate(struct reset_controller_dev * rcdev,const struct of_phandle_args * reset_spec)1515 static int rzg2l_cpg_reset_xlate(struct reset_controller_dev *rcdev,
1516 const struct of_phandle_args *reset_spec)
1517 {
1518 struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
1519 const struct rzg2l_cpg_info *info = priv->info;
1520 unsigned int id = reset_spec->args[0];
1521
1522 if (id >= rcdev->nr_resets || !info->resets[id].off) {
1523 dev_err(rcdev->dev, "Invalid reset index %u\n", id);
1524 return -EINVAL;
1525 }
1526
1527 return id;
1528 }
1529
rzg2l_cpg_reset_controller_register(struct rzg2l_cpg_priv * priv)1530 static int rzg2l_cpg_reset_controller_register(struct rzg2l_cpg_priv *priv)
1531 {
1532 priv->rcdev.ops = &rzg2l_cpg_reset_ops;
1533 priv->rcdev.of_node = priv->dev->of_node;
1534 priv->rcdev.dev = priv->dev;
1535 priv->rcdev.of_reset_n_cells = 1;
1536 priv->rcdev.of_xlate = rzg2l_cpg_reset_xlate;
1537 priv->rcdev.nr_resets = priv->num_resets;
1538
1539 return devm_reset_controller_register(priv->dev, &priv->rcdev);
1540 }
1541
1542 /**
1543 * struct rzg2l_cpg_pm_domains - RZ/G2L PM domains data structure
1544 * @onecell_data: cell data
1545 * @domains: generic PM domains
1546 */
1547 struct rzg2l_cpg_pm_domains {
1548 struct genpd_onecell_data onecell_data;
1549 struct generic_pm_domain *domains[];
1550 };
1551
1552 /**
1553 * struct rzg2l_cpg_pd - RZ/G2L power domain data structure
1554 * @genpd: generic PM domain
1555 * @priv: pointer to CPG private data structure
1556 * @conf: CPG PM domain configuration info
1557 * @id: RZ/G2L power domain ID
1558 */
1559 struct rzg2l_cpg_pd {
1560 struct generic_pm_domain genpd;
1561 struct rzg2l_cpg_priv *priv;
1562 struct rzg2l_cpg_pm_domain_conf conf;
1563 u16 id;
1564 };
1565
rzg2l_cpg_is_pm_clk(struct rzg2l_cpg_pd * pd,const struct of_phandle_args * clkspec)1566 static bool rzg2l_cpg_is_pm_clk(struct rzg2l_cpg_pd *pd,
1567 const struct of_phandle_args *clkspec)
1568 {
1569 if (clkspec->np != pd->genpd.dev.of_node || clkspec->args_count != 2)
1570 return false;
1571
1572 switch (clkspec->args[0]) {
1573 case CPG_MOD: {
1574 struct rzg2l_cpg_priv *priv = pd->priv;
1575 const struct rzg2l_cpg_info *info = priv->info;
1576 unsigned int id = clkspec->args[1];
1577
1578 if (id >= priv->num_mod_clks)
1579 return false;
1580
1581 id += info->num_total_core_clks;
1582
1583 for (unsigned int i = 0; i < info->num_no_pm_mod_clks; i++) {
1584 if (info->no_pm_mod_clks[i] == id)
1585 return false;
1586 }
1587
1588 return true;
1589 }
1590
1591 case CPG_CORE:
1592 default:
1593 return false;
1594 }
1595 }
1596
rzg2l_cpg_attach_dev(struct generic_pm_domain * domain,struct device * dev)1597 static int rzg2l_cpg_attach_dev(struct generic_pm_domain *domain, struct device *dev)
1598 {
1599 struct rzg2l_cpg_pd *pd = container_of(domain, struct rzg2l_cpg_pd, genpd);
1600 struct device_node *np = dev->of_node;
1601 struct of_phandle_args clkspec;
1602 bool once = true;
1603 struct clk *clk;
1604 unsigned int i;
1605 int error;
1606
1607 for (i = 0; !of_parse_phandle_with_args(np, "clocks", "#clock-cells", i, &clkspec); i++) {
1608 if (!rzg2l_cpg_is_pm_clk(pd, &clkspec)) {
1609 of_node_put(clkspec.np);
1610 continue;
1611 }
1612
1613 if (once) {
1614 once = false;
1615 error = pm_clk_create(dev);
1616 if (error) {
1617 of_node_put(clkspec.np);
1618 goto err;
1619 }
1620 }
1621 clk = of_clk_get_from_provider(&clkspec);
1622 of_node_put(clkspec.np);
1623 if (IS_ERR(clk)) {
1624 error = PTR_ERR(clk);
1625 goto fail_destroy;
1626 }
1627
1628 error = pm_clk_add_clk(dev, clk);
1629 if (error) {
1630 dev_err(dev, "pm_clk_add_clk failed %d\n", error);
1631 goto fail_put;
1632 }
1633 }
1634
1635 return 0;
1636
1637 fail_put:
1638 clk_put(clk);
1639
1640 fail_destroy:
1641 pm_clk_destroy(dev);
1642 err:
1643 return error;
1644 }
1645
rzg2l_cpg_detach_dev(struct generic_pm_domain * unused,struct device * dev)1646 static void rzg2l_cpg_detach_dev(struct generic_pm_domain *unused, struct device *dev)
1647 {
1648 if (!pm_clk_no_clocks(dev))
1649 pm_clk_destroy(dev);
1650 }
1651
rzg2l_cpg_genpd_remove(void * data)1652 static void rzg2l_cpg_genpd_remove(void *data)
1653 {
1654 struct genpd_onecell_data *celldata = data;
1655
1656 for (unsigned int i = 0; i < celldata->num_domains; i++)
1657 pm_genpd_remove(celldata->domains[i]);
1658 }
1659
rzg2l_cpg_genpd_remove_simple(void * data)1660 static void rzg2l_cpg_genpd_remove_simple(void *data)
1661 {
1662 pm_genpd_remove(data);
1663 }
1664
rzg2l_cpg_power_on(struct generic_pm_domain * domain)1665 static int rzg2l_cpg_power_on(struct generic_pm_domain *domain)
1666 {
1667 struct rzg2l_cpg_pd *pd = container_of(domain, struct rzg2l_cpg_pd, genpd);
1668 struct rzg2l_cpg_reg_conf mstop = pd->conf.mstop;
1669 struct rzg2l_cpg_priv *priv = pd->priv;
1670
1671 /* Set MSTOP. */
1672 if (mstop.mask)
1673 writel(mstop.mask << 16, priv->base + mstop.off);
1674
1675 return 0;
1676 }
1677
rzg2l_cpg_power_off(struct generic_pm_domain * domain)1678 static int rzg2l_cpg_power_off(struct generic_pm_domain *domain)
1679 {
1680 struct rzg2l_cpg_pd *pd = container_of(domain, struct rzg2l_cpg_pd, genpd);
1681 struct rzg2l_cpg_reg_conf mstop = pd->conf.mstop;
1682 struct rzg2l_cpg_priv *priv = pd->priv;
1683
1684 /* Set MSTOP. */
1685 if (mstop.mask)
1686 writel(mstop.mask | (mstop.mask << 16), priv->base + mstop.off);
1687
1688 return 0;
1689 }
1690
rzg2l_cpg_pd_setup(struct rzg2l_cpg_pd * pd)1691 static int __init rzg2l_cpg_pd_setup(struct rzg2l_cpg_pd *pd)
1692 {
1693 bool always_on = !!(pd->genpd.flags & GENPD_FLAG_ALWAYS_ON);
1694 struct dev_power_governor *governor;
1695 int ret;
1696
1697 if (always_on)
1698 governor = &pm_domain_always_on_gov;
1699 else
1700 governor = &simple_qos_governor;
1701
1702 pd->genpd.flags |= GENPD_FLAG_PM_CLK | GENPD_FLAG_ACTIVE_WAKEUP;
1703 pd->genpd.attach_dev = rzg2l_cpg_attach_dev;
1704 pd->genpd.detach_dev = rzg2l_cpg_detach_dev;
1705 pd->genpd.power_on = rzg2l_cpg_power_on;
1706 pd->genpd.power_off = rzg2l_cpg_power_off;
1707
1708 ret = pm_genpd_init(&pd->genpd, governor, !always_on);
1709 if (ret)
1710 return ret;
1711
1712 if (always_on)
1713 ret = rzg2l_cpg_power_on(&pd->genpd);
1714
1715 return ret;
1716 }
1717
rzg2l_cpg_add_clk_domain(struct rzg2l_cpg_priv * priv)1718 static int __init rzg2l_cpg_add_clk_domain(struct rzg2l_cpg_priv *priv)
1719 {
1720 struct device *dev = priv->dev;
1721 struct device_node *np = dev->of_node;
1722 struct rzg2l_cpg_pd *pd;
1723 int ret;
1724
1725 pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
1726 if (!pd)
1727 return -ENOMEM;
1728
1729 pd->genpd.name = np->name;
1730 pd->genpd.flags = GENPD_FLAG_ALWAYS_ON;
1731 pd->priv = priv;
1732 ret = rzg2l_cpg_pd_setup(pd);
1733 if (ret)
1734 return ret;
1735
1736 ret = devm_add_action_or_reset(dev, rzg2l_cpg_genpd_remove_simple, &pd->genpd);
1737 if (ret)
1738 return ret;
1739
1740 return of_genpd_add_provider_simple(np, &pd->genpd);
1741 }
1742
1743 static struct generic_pm_domain *
rzg2l_cpg_pm_domain_xlate(const struct of_phandle_args * spec,void * data)1744 rzg2l_cpg_pm_domain_xlate(const struct of_phandle_args *spec, void *data)
1745 {
1746 struct generic_pm_domain *domain = ERR_PTR(-ENOENT);
1747 struct genpd_onecell_data *genpd = data;
1748
1749 if (spec->args_count != 1)
1750 return ERR_PTR(-EINVAL);
1751
1752 for (unsigned int i = 0; i < genpd->num_domains; i++) {
1753 struct rzg2l_cpg_pd *pd = container_of(genpd->domains[i], struct rzg2l_cpg_pd,
1754 genpd);
1755
1756 if (pd->id == spec->args[0]) {
1757 domain = &pd->genpd;
1758 break;
1759 }
1760 }
1761
1762 return domain;
1763 }
1764
rzg2l_cpg_add_pm_domains(struct rzg2l_cpg_priv * priv)1765 static int __init rzg2l_cpg_add_pm_domains(struct rzg2l_cpg_priv *priv)
1766 {
1767 const struct rzg2l_cpg_info *info = priv->info;
1768 struct device *dev = priv->dev;
1769 struct device_node *np = dev->of_node;
1770 struct rzg2l_cpg_pm_domains *domains;
1771 struct generic_pm_domain *parent;
1772 u32 ncells;
1773 int ret;
1774
1775 ret = of_property_read_u32(np, "#power-domain-cells", &ncells);
1776 if (ret)
1777 return ret;
1778
1779 /* For backward compatibility. */
1780 if (!ncells)
1781 return rzg2l_cpg_add_clk_domain(priv);
1782
1783 domains = devm_kzalloc(dev, struct_size(domains, domains, info->num_pm_domains),
1784 GFP_KERNEL);
1785 if (!domains)
1786 return -ENOMEM;
1787
1788 domains->onecell_data.domains = domains->domains;
1789 domains->onecell_data.num_domains = info->num_pm_domains;
1790 domains->onecell_data.xlate = rzg2l_cpg_pm_domain_xlate;
1791
1792 ret = devm_add_action_or_reset(dev, rzg2l_cpg_genpd_remove, &domains->onecell_data);
1793 if (ret)
1794 return ret;
1795
1796 for (unsigned int i = 0; i < info->num_pm_domains; i++) {
1797 struct rzg2l_cpg_pd *pd;
1798
1799 pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
1800 if (!pd)
1801 return -ENOMEM;
1802
1803 pd->genpd.name = info->pm_domains[i].name;
1804 pd->genpd.flags = info->pm_domains[i].genpd_flags;
1805 pd->conf = info->pm_domains[i].conf;
1806 pd->id = info->pm_domains[i].id;
1807 pd->priv = priv;
1808
1809 ret = rzg2l_cpg_pd_setup(pd);
1810 if (ret)
1811 return ret;
1812
1813 domains->domains[i] = &pd->genpd;
1814 /* Parent should be on the very first entry of info->pm_domains[]. */
1815 if (!i) {
1816 parent = &pd->genpd;
1817 continue;
1818 }
1819
1820 ret = pm_genpd_add_subdomain(parent, &pd->genpd);
1821 if (ret)
1822 return ret;
1823 }
1824
1825 ret = of_genpd_add_provider_onecell(np, &domains->onecell_data);
1826 if (ret)
1827 return ret;
1828
1829 return 0;
1830 }
1831
rzg2l_cpg_probe(struct platform_device * pdev)1832 static int __init rzg2l_cpg_probe(struct platform_device *pdev)
1833 {
1834 struct device *dev = &pdev->dev;
1835 struct device_node *np = dev->of_node;
1836 const struct rzg2l_cpg_info *info;
1837 struct rzg2l_cpg_priv *priv;
1838 unsigned int nclks, i;
1839 struct clk **clks;
1840 int error;
1841
1842 info = of_device_get_match_data(dev);
1843
1844 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
1845 if (!priv)
1846 return -ENOMEM;
1847
1848 priv->dev = dev;
1849 priv->info = info;
1850 spin_lock_init(&priv->rmw_lock);
1851
1852 priv->base = devm_platform_ioremap_resource(pdev, 0);
1853 if (IS_ERR(priv->base))
1854 return PTR_ERR(priv->base);
1855
1856 nclks = info->num_total_core_clks + info->num_hw_mod_clks;
1857 clks = devm_kmalloc_array(dev, nclks, sizeof(*clks), GFP_KERNEL);
1858 if (!clks)
1859 return -ENOMEM;
1860
1861 dev_set_drvdata(dev, priv);
1862 priv->clks = clks;
1863 priv->num_core_clks = info->num_total_core_clks;
1864 priv->num_mod_clks = info->num_hw_mod_clks;
1865 priv->num_resets = info->num_resets;
1866 priv->last_dt_core_clk = info->last_dt_core_clk;
1867
1868 for (i = 0; i < nclks; i++)
1869 clks[i] = ERR_PTR(-ENOENT);
1870
1871 for (i = 0; i < info->num_core_clks; i++)
1872 rzg2l_cpg_register_core_clk(&info->core_clks[i], info, priv);
1873
1874 for (i = 0; i < info->num_mod_clks; i++)
1875 rzg2l_cpg_register_mod_clk(&info->mod_clks[i], info, priv);
1876
1877 error = of_clk_add_provider(np, rzg2l_cpg_clk_src_twocell_get, priv);
1878 if (error)
1879 return error;
1880
1881 error = devm_add_action_or_reset(dev, rzg2l_cpg_del_clk_provider, np);
1882 if (error)
1883 return error;
1884
1885 error = rzg2l_cpg_add_pm_domains(priv);
1886 if (error)
1887 return error;
1888
1889 error = rzg2l_cpg_reset_controller_register(priv);
1890 if (error)
1891 return error;
1892
1893 return 0;
1894 }
1895
1896 static const struct of_device_id rzg2l_cpg_match[] = {
1897 #ifdef CONFIG_CLK_R9A07G043
1898 {
1899 .compatible = "renesas,r9a07g043-cpg",
1900 .data = &r9a07g043_cpg_info,
1901 },
1902 #endif
1903 #ifdef CONFIG_CLK_R9A07G044
1904 {
1905 .compatible = "renesas,r9a07g044-cpg",
1906 .data = &r9a07g044_cpg_info,
1907 },
1908 #endif
1909 #ifdef CONFIG_CLK_R9A07G054
1910 {
1911 .compatible = "renesas,r9a07g054-cpg",
1912 .data = &r9a07g054_cpg_info,
1913 },
1914 #endif
1915 #ifdef CONFIG_CLK_R9A08G045
1916 {
1917 .compatible = "renesas,r9a08g045-cpg",
1918 .data = &r9a08g045_cpg_info,
1919 },
1920 #endif
1921 #ifdef CONFIG_CLK_R9A09G011
1922 {
1923 .compatible = "renesas,r9a09g011-cpg",
1924 .data = &r9a09g011_cpg_info,
1925 },
1926 #endif
1927 { /* sentinel */ }
1928 };
1929
1930 static struct platform_driver rzg2l_cpg_driver = {
1931 .driver = {
1932 .name = "rzg2l-cpg",
1933 .of_match_table = rzg2l_cpg_match,
1934 },
1935 };
1936
rzg2l_cpg_init(void)1937 static int __init rzg2l_cpg_init(void)
1938 {
1939 return platform_driver_probe(&rzg2l_cpg_driver, rzg2l_cpg_probe);
1940 }
1941
1942 subsys_initcall(rzg2l_cpg_init);
1943
1944 MODULE_DESCRIPTION("Renesas RZ/G2L CPG Driver");
1945