xref: /linux/drivers/clk/renesas/rzg2l-cpg.c (revision ab93e0dd72c37d378dd936f031ffb83ff2bd87ce)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * RZ/G2L Clock Pulse Generator
4  *
5  * Copyright (C) 2021 Renesas Electronics Corp.
6  *
7  * Based on renesas-cpg-mssr.c
8  *
9  * Copyright (C) 2015 Glider bvba
10  * Copyright (C) 2013 Ideas On Board SPRL
11  * Copyright (C) 2015 Renesas Electronics Corp.
12  */
13 
14 #include <linux/atomic.h>
15 #include <linux/bitfield.h>
16 #include <linux/cleanup.h>
17 #include <linux/clk.h>
18 #include <linux/clk-provider.h>
19 #include <linux/clk/renesas.h>
20 #include <linux/debugfs.h>
21 #include <linux/delay.h>
22 #include <linux/device.h>
23 #include <linux/init.h>
24 #include <linux/iopoll.h>
25 #include <linux/mod_devicetable.h>
26 #include <linux/module.h>
27 #include <linux/of.h>
28 #include <linux/platform_device.h>
29 #include <linux/pm_clock.h>
30 #include <linux/pm_domain.h>
31 #include <linux/reset-controller.h>
32 #include <linux/slab.h>
33 #include <linux/string_choices.h>
34 #include <linux/units.h>
35 
36 #include <dt-bindings/clock/renesas-cpg-mssr.h>
37 
38 #include "rzg2l-cpg.h"
39 
40 #ifdef DEBUG
41 #define WARN_DEBUG(x)	WARN_ON(x)
42 #else
43 #define WARN_DEBUG(x)	do { } while (0)
44 #endif
45 
46 #define GET_SHIFT(val)		((val >> 12) & 0xff)
47 #define GET_WIDTH(val)		((val >> 8) & 0xf)
48 
49 #define KDIV(val)		((s16)FIELD_GET(GENMASK(31, 16), val))
50 #define MDIV(val)		FIELD_GET(GENMASK(15, 6), val)
51 #define PDIV(val)		FIELD_GET(GENMASK(5, 0), val)
52 #define SDIV(val)		FIELD_GET(GENMASK(2, 0), val)
53 
54 #define RZG3S_DIV_P		GENMASK(28, 26)
55 #define RZG3S_DIV_M		GENMASK(25, 22)
56 #define RZG3S_DIV_NI		GENMASK(21, 13)
57 #define RZG3S_DIV_NF		GENMASK(12, 1)
58 #define RZG3S_SEL_PLL		BIT(0)
59 
60 #define CLK_ON_R(reg)		(reg)
61 #define CLK_MON_R(reg)		(0x180 + (reg))
62 #define CLK_RST_R(reg)		(reg)
63 #define CLK_MRST_R(reg)		(0x180 + (reg))
64 
65 #define GET_REG_OFFSET(val)		((val >> 20) & 0xfff)
66 #define GET_REG_SAMPLL_CLK1(val)	((val >> 22) & 0xfff)
67 #define GET_REG_SAMPLL_CLK2(val)	((val >> 12) & 0xfff)
68 #define GET_REG_SAMPLL_SETTING(val)	((val) & 0xfff)
69 
70 #define CPG_WEN_BIT		BIT(16)
71 
72 #define MAX_VCLK_FREQ		(148500000)
73 
74 #define MSTOP_OFF(conf)		FIELD_GET(GENMASK(31, 16), (conf))
75 #define MSTOP_MASK(conf)	FIELD_GET(GENMASK(15, 0), (conf))
76 
77 /**
78  * struct clk_hw_data - clock hardware data
79  * @hw: clock hw
80  * @conf: clock configuration (register offset, shift, width)
81  * @sconf: clock status configuration (register offset, shift, width)
82  * @priv: CPG private data structure
83  */
84 struct clk_hw_data {
85 	struct clk_hw hw;
86 	u32 conf;
87 	u32 sconf;
88 	struct rzg2l_cpg_priv *priv;
89 };
90 
91 #define to_clk_hw_data(_hw)	container_of(_hw, struct clk_hw_data, hw)
92 
93 /**
94  * struct sd_mux_hw_data - SD MUX clock hardware data
95  * @hw_data: clock hw data
96  * @mtable: clock mux table
97  */
98 struct sd_mux_hw_data {
99 	struct clk_hw_data hw_data;
100 	const u32 *mtable;
101 };
102 
103 #define to_sd_mux_hw_data(_hw)	container_of(_hw, struct sd_mux_hw_data, hw_data)
104 
105 /**
106  * struct div_hw_data - divider clock hardware data
107  * @hw_data: clock hw data
108  * @dtable: pointer to divider table
109  * @invalid_rate: invalid rate for divider
110  * @max_rate: maximum rate for divider
111  * @width: divider width
112  */
113 struct div_hw_data {
114 	struct clk_hw_data hw_data;
115 	const struct clk_div_table *dtable;
116 	unsigned long invalid_rate;
117 	unsigned long max_rate;
118 	u32 width;
119 };
120 
121 #define to_div_hw_data(_hw)	container_of(_hw, struct div_hw_data, hw_data)
122 
123 struct rzg2l_pll5_param {
124 	u32 pl5_fracin;
125 	u8 pl5_refdiv;
126 	u8 pl5_intin;
127 	u8 pl5_postdiv1;
128 	u8 pl5_postdiv2;
129 	u8 pl5_spread;
130 };
131 
132 struct rzg2l_pll5_mux_dsi_div_param {
133 	u8 clksrc;
134 	u8 dsi_div_a;
135 	u8 dsi_div_b;
136 };
137 
138 /**
139  * struct rzg2l_cpg_priv - Clock Pulse Generator Private Data
140  *
141  * @rcdev: Reset controller entity
142  * @dev: CPG device
143  * @base: CPG register block base address
144  * @rmw_lock: protects register accesses
145  * @clks: Array containing all Core and Module Clocks
146  * @num_core_clks: Number of Core Clocks in clks[]
147  * @num_mod_clks: Number of Module Clocks in clks[]
148  * @num_resets: Number of Module Resets in info->resets[]
149  * @last_dt_core_clk: ID of the last Core Clock exported to DT
150  * @info: Pointer to platform data
151  * @genpd: PM domain
152  * @mux_dsi_div_params: pll5 mux and dsi div parameters
153  */
154 struct rzg2l_cpg_priv {
155 	struct reset_controller_dev rcdev;
156 	struct device *dev;
157 	void __iomem *base;
158 	spinlock_t rmw_lock;
159 
160 	struct clk **clks;
161 	unsigned int num_core_clks;
162 	unsigned int num_mod_clks;
163 	unsigned int num_resets;
164 	unsigned int last_dt_core_clk;
165 
166 	const struct rzg2l_cpg_info *info;
167 
168 	struct generic_pm_domain genpd;
169 
170 	struct rzg2l_pll5_mux_dsi_div_param mux_dsi_div_params;
171 };
172 
rzg2l_cpg_del_clk_provider(void * data)173 static void rzg2l_cpg_del_clk_provider(void *data)
174 {
175 	of_clk_del_provider(data);
176 }
177 
178 /* Must be called in atomic context. */
rzg2l_cpg_wait_clk_update_done(void __iomem * base,u32 conf)179 static int rzg2l_cpg_wait_clk_update_done(void __iomem *base, u32 conf)
180 {
181 	u32 bitmask = GENMASK(GET_WIDTH(conf) - 1, 0) << GET_SHIFT(conf);
182 	u32 off = GET_REG_OFFSET(conf);
183 	u32 val;
184 
185 	return readl_poll_timeout_atomic(base + off, val, !(val & bitmask), 10, 200);
186 }
187 
rzg2l_cpg_sd_clk_mux_notifier(struct notifier_block * nb,unsigned long event,void * data)188 int rzg2l_cpg_sd_clk_mux_notifier(struct notifier_block *nb, unsigned long event,
189 				  void *data)
190 {
191 	struct clk_notifier_data *cnd = data;
192 	struct clk_hw *hw = __clk_get_hw(cnd->clk);
193 	struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw);
194 	struct rzg2l_cpg_priv *priv = clk_hw_data->priv;
195 	u32 off = GET_REG_OFFSET(clk_hw_data->conf);
196 	u32 shift = GET_SHIFT(clk_hw_data->conf);
197 	const u32 clk_src_266 = 3;
198 	unsigned long flags;
199 	int ret;
200 
201 	if (event != PRE_RATE_CHANGE || (cnd->new_rate / MEGA == 266))
202 		return NOTIFY_DONE;
203 
204 	spin_lock_irqsave(&priv->rmw_lock, flags);
205 
206 	/*
207 	 * As per the HW manual, we should not directly switch from 533 MHz to
208 	 * 400 MHz and vice versa. To change the setting from 2’b01 (533 MHz)
209 	 * to 2’b10 (400 MHz) or vice versa, Switch to 2’b11 (266 MHz) first,
210 	 * and then switch to the target setting (2’b01 (533 MHz) or 2’b10
211 	 * (400 MHz)).
212 	 * Setting a value of '0' to the SEL_SDHI0_SET or SEL_SDHI1_SET clock
213 	 * switching register is prohibited.
214 	 * The clock mux has 3 input clocks(533 MHz, 400 MHz, and 266 MHz), and
215 	 * the index to value mapping is done by adding 1 to the index.
216 	 */
217 
218 	writel((CPG_WEN_BIT | clk_src_266) << shift, priv->base + off);
219 
220 	/* Wait for the update done. */
221 	ret = rzg2l_cpg_wait_clk_update_done(priv->base, clk_hw_data->sconf);
222 
223 	spin_unlock_irqrestore(&priv->rmw_lock, flags);
224 
225 	if (ret)
226 		dev_err(priv->dev, "failed to switch to safe clk source\n");
227 
228 	return notifier_from_errno(ret);
229 }
230 
rzg3s_cpg_div_clk_notifier(struct notifier_block * nb,unsigned long event,void * data)231 int rzg3s_cpg_div_clk_notifier(struct notifier_block *nb, unsigned long event,
232 			       void *data)
233 {
234 	struct clk_notifier_data *cnd = data;
235 	struct clk_hw *hw = __clk_get_hw(cnd->clk);
236 	struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw);
237 	struct div_hw_data *div_hw_data = to_div_hw_data(clk_hw_data);
238 	struct rzg2l_cpg_priv *priv = clk_hw_data->priv;
239 	u32 off = GET_REG_OFFSET(clk_hw_data->conf);
240 	u32 shift = GET_SHIFT(clk_hw_data->conf);
241 	unsigned long flags;
242 	int ret = 0;
243 	u32 val;
244 
245 	if (event != PRE_RATE_CHANGE || !div_hw_data->invalid_rate ||
246 	    div_hw_data->invalid_rate % cnd->new_rate)
247 		return NOTIFY_DONE;
248 
249 	spin_lock_irqsave(&priv->rmw_lock, flags);
250 
251 	val = readl(priv->base + off);
252 	val >>= shift;
253 	val &= GENMASK(GET_WIDTH(clk_hw_data->conf) - 1, 0);
254 
255 	/*
256 	 * There are different constraints for the user of this notifiers as follows:
257 	 * 1/ SD div cannot be 1 (val == 0) if parent rate is 800MHz
258 	 * 2/ OCTA / SPI div cannot be 1 (val == 0) if parent rate is 400MHz
259 	 * As SD can have only one parent having 800MHz and OCTA div can have
260 	 * only one parent having 400MHz we took into account the parent rate
261 	 * at the beginning of function (by checking invalid_rate % new_rate).
262 	 * Now it is time to check the hardware divider and update it accordingly.
263 	 */
264 	if (!val) {
265 		writel((CPG_WEN_BIT | 1) << shift, priv->base + off);
266 		/* Wait for the update done. */
267 		ret = rzg2l_cpg_wait_clk_update_done(priv->base, clk_hw_data->sconf);
268 	}
269 
270 	spin_unlock_irqrestore(&priv->rmw_lock, flags);
271 
272 	if (ret)
273 		dev_err(priv->dev, "Failed to downgrade the div\n");
274 
275 	return notifier_from_errno(ret);
276 }
277 
rzg2l_register_notifier(struct clk_hw * hw,const struct cpg_core_clk * core,struct rzg2l_cpg_priv * priv)278 static int rzg2l_register_notifier(struct clk_hw *hw, const struct cpg_core_clk *core,
279 				   struct rzg2l_cpg_priv *priv)
280 {
281 	struct notifier_block *nb;
282 
283 	if (!core->notifier)
284 		return 0;
285 
286 	nb = devm_kzalloc(priv->dev, sizeof(*nb), GFP_KERNEL);
287 	if (!nb)
288 		return -ENOMEM;
289 
290 	nb->notifier_call = core->notifier;
291 
292 	return clk_notifier_register(hw->clk, nb);
293 }
294 
rzg3s_div_clk_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)295 static unsigned long rzg3s_div_clk_recalc_rate(struct clk_hw *hw,
296 					       unsigned long parent_rate)
297 {
298 	struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw);
299 	struct div_hw_data *div_hw_data = to_div_hw_data(clk_hw_data);
300 	struct rzg2l_cpg_priv *priv = clk_hw_data->priv;
301 	u32 val;
302 
303 	val = readl(priv->base + GET_REG_OFFSET(clk_hw_data->conf));
304 	val >>= GET_SHIFT(clk_hw_data->conf);
305 	val &= GENMASK(GET_WIDTH(clk_hw_data->conf) - 1, 0);
306 
307 	return divider_recalc_rate(hw, parent_rate, val, div_hw_data->dtable,
308 				   CLK_DIVIDER_ROUND_CLOSEST, div_hw_data->width);
309 }
310 
rzg3s_div_clk_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)311 static int rzg3s_div_clk_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
312 {
313 	struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw);
314 	struct div_hw_data *div_hw_data = to_div_hw_data(clk_hw_data);
315 
316 	if (div_hw_data->max_rate && req->rate > div_hw_data->max_rate)
317 		req->rate = div_hw_data->max_rate;
318 
319 	return divider_determine_rate(hw, req, div_hw_data->dtable, div_hw_data->width,
320 				      CLK_DIVIDER_ROUND_CLOSEST);
321 }
322 
rzg3s_div_clk_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)323 static int rzg3s_div_clk_set_rate(struct clk_hw *hw, unsigned long rate,
324 				  unsigned long parent_rate)
325 {
326 	struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw);
327 	struct div_hw_data *div_hw_data = to_div_hw_data(clk_hw_data);
328 	struct rzg2l_cpg_priv *priv = clk_hw_data->priv;
329 	u32 off = GET_REG_OFFSET(clk_hw_data->conf);
330 	u32 shift = GET_SHIFT(clk_hw_data->conf);
331 	unsigned long flags;
332 	u32 val;
333 	int ret;
334 
335 	val = divider_get_val(rate, parent_rate, div_hw_data->dtable, div_hw_data->width,
336 			      CLK_DIVIDER_ROUND_CLOSEST);
337 
338 	spin_lock_irqsave(&priv->rmw_lock, flags);
339 	writel((CPG_WEN_BIT | val) << shift, priv->base + off);
340 	/* Wait for the update done. */
341 	ret = rzg2l_cpg_wait_clk_update_done(priv->base, clk_hw_data->sconf);
342 	spin_unlock_irqrestore(&priv->rmw_lock, flags);
343 
344 	return ret;
345 }
346 
347 static const struct clk_ops rzg3s_div_clk_ops = {
348 	.recalc_rate = rzg3s_div_clk_recalc_rate,
349 	.determine_rate = rzg3s_div_clk_determine_rate,
350 	.set_rate = rzg3s_div_clk_set_rate,
351 };
352 
353 static struct clk * __init
rzg3s_cpg_div_clk_register(const struct cpg_core_clk * core,struct rzg2l_cpg_priv * priv)354 rzg3s_cpg_div_clk_register(const struct cpg_core_clk *core, struct rzg2l_cpg_priv *priv)
355 {
356 	struct div_hw_data *div_hw_data;
357 	struct clk_init_data init = {};
358 	const struct clk_div_table *clkt;
359 	struct clk_hw *clk_hw;
360 	const struct clk *parent;
361 	const char *parent_name;
362 	u32 max = 0;
363 	int ret;
364 
365 	parent = priv->clks[core->parent];
366 	if (IS_ERR(parent))
367 		return ERR_CAST(parent);
368 
369 	parent_name = __clk_get_name(parent);
370 
371 	div_hw_data = devm_kzalloc(priv->dev, sizeof(*div_hw_data), GFP_KERNEL);
372 	if (!div_hw_data)
373 		return ERR_PTR(-ENOMEM);
374 
375 	init.name = core->name;
376 	init.flags = core->flag;
377 	init.ops = &rzg3s_div_clk_ops;
378 	init.parent_names = &parent_name;
379 	init.num_parents = 1;
380 
381 	/* Get the maximum divider to retrieve div width. */
382 	for (clkt = core->dtable; clkt->div; clkt++) {
383 		if (max < clkt->div)
384 			max = clkt->div;
385 	}
386 
387 	div_hw_data->hw_data.priv = priv;
388 	div_hw_data->hw_data.conf = core->conf;
389 	div_hw_data->hw_data.sconf = core->sconf;
390 	div_hw_data->dtable = core->dtable;
391 	div_hw_data->invalid_rate = core->invalid_rate;
392 	div_hw_data->max_rate = core->max_rate;
393 	div_hw_data->width = fls(max) - 1;
394 
395 	clk_hw = &div_hw_data->hw_data.hw;
396 	clk_hw->init = &init;
397 
398 	ret = devm_clk_hw_register(priv->dev, clk_hw);
399 	if (ret)
400 		return ERR_PTR(ret);
401 
402 	ret = rzg2l_register_notifier(clk_hw, core, priv);
403 	if (ret) {
404 		dev_err(priv->dev, "Failed to register notifier for %s\n",
405 			core->name);
406 		return ERR_PTR(ret);
407 	}
408 
409 	return clk_hw->clk;
410 }
411 
412 static struct clk * __init
rzg2l_cpg_div_clk_register(const struct cpg_core_clk * core,struct rzg2l_cpg_priv * priv)413 rzg2l_cpg_div_clk_register(const struct cpg_core_clk *core,
414 			   struct rzg2l_cpg_priv *priv)
415 {
416 	void __iomem *base = priv->base;
417 	struct device *dev = priv->dev;
418 	const struct clk *parent;
419 	const char *parent_name;
420 	struct clk_hw *clk_hw;
421 
422 	parent = priv->clks[core->parent];
423 	if (IS_ERR(parent))
424 		return ERR_CAST(parent);
425 
426 	parent_name = __clk_get_name(parent);
427 
428 	if (core->dtable)
429 		clk_hw = clk_hw_register_divider_table(dev, core->name,
430 						       parent_name, 0,
431 						       base + GET_REG_OFFSET(core->conf),
432 						       GET_SHIFT(core->conf),
433 						       GET_WIDTH(core->conf),
434 						       core->flag,
435 						       core->dtable,
436 						       &priv->rmw_lock);
437 	else
438 		clk_hw = clk_hw_register_divider(dev, core->name,
439 						 parent_name, 0,
440 						 base + GET_REG_OFFSET(core->conf),
441 						 GET_SHIFT(core->conf),
442 						 GET_WIDTH(core->conf),
443 						 core->flag, &priv->rmw_lock);
444 
445 	if (IS_ERR(clk_hw))
446 		return ERR_CAST(clk_hw);
447 
448 	return clk_hw->clk;
449 }
450 
451 static struct clk * __init
rzg2l_cpg_mux_clk_register(const struct cpg_core_clk * core,struct rzg2l_cpg_priv * priv)452 rzg2l_cpg_mux_clk_register(const struct cpg_core_clk *core,
453 			   struct rzg2l_cpg_priv *priv)
454 {
455 	const struct clk_hw *clk_hw;
456 
457 	clk_hw = devm_clk_hw_register_mux(priv->dev, core->name,
458 					  core->parent_names, core->num_parents,
459 					  core->flag,
460 					  priv->base + GET_REG_OFFSET(core->conf),
461 					  GET_SHIFT(core->conf),
462 					  GET_WIDTH(core->conf),
463 					  core->mux_flags, &priv->rmw_lock);
464 	if (IS_ERR(clk_hw))
465 		return ERR_CAST(clk_hw);
466 
467 	return clk_hw->clk;
468 }
469 
rzg2l_cpg_sd_clk_mux_set_parent(struct clk_hw * hw,u8 index)470 static int rzg2l_cpg_sd_clk_mux_set_parent(struct clk_hw *hw, u8 index)
471 {
472 	struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw);
473 	struct sd_mux_hw_data *sd_mux_hw_data = to_sd_mux_hw_data(clk_hw_data);
474 	struct rzg2l_cpg_priv *priv = clk_hw_data->priv;
475 	u32 off = GET_REG_OFFSET(clk_hw_data->conf);
476 	u32 shift = GET_SHIFT(clk_hw_data->conf);
477 	unsigned long flags;
478 	u32 val;
479 	int ret;
480 
481 	val = clk_mux_index_to_val(sd_mux_hw_data->mtable, CLK_MUX_ROUND_CLOSEST, index);
482 
483 	spin_lock_irqsave(&priv->rmw_lock, flags);
484 
485 	writel((CPG_WEN_BIT | val) << shift, priv->base + off);
486 
487 	/* Wait for the update done. */
488 	ret = rzg2l_cpg_wait_clk_update_done(priv->base, clk_hw_data->sconf);
489 
490 	spin_unlock_irqrestore(&priv->rmw_lock, flags);
491 
492 	if (ret)
493 		dev_err(priv->dev, "Failed to switch parent\n");
494 
495 	return ret;
496 }
497 
rzg2l_cpg_sd_clk_mux_get_parent(struct clk_hw * hw)498 static u8 rzg2l_cpg_sd_clk_mux_get_parent(struct clk_hw *hw)
499 {
500 	struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw);
501 	struct sd_mux_hw_data *sd_mux_hw_data = to_sd_mux_hw_data(clk_hw_data);
502 	struct rzg2l_cpg_priv *priv = clk_hw_data->priv;
503 	u32 val;
504 
505 	val = readl(priv->base + GET_REG_OFFSET(clk_hw_data->conf));
506 	val >>= GET_SHIFT(clk_hw_data->conf);
507 	val &= GENMASK(GET_WIDTH(clk_hw_data->conf) - 1, 0);
508 
509 	return clk_mux_val_to_index(hw, sd_mux_hw_data->mtable, CLK_MUX_ROUND_CLOSEST, val);
510 }
511 
512 static const struct clk_ops rzg2l_cpg_sd_clk_mux_ops = {
513 	.determine_rate = __clk_mux_determine_rate_closest,
514 	.set_parent	= rzg2l_cpg_sd_clk_mux_set_parent,
515 	.get_parent	= rzg2l_cpg_sd_clk_mux_get_parent,
516 };
517 
518 static struct clk * __init
rzg2l_cpg_sd_mux_clk_register(const struct cpg_core_clk * core,struct rzg2l_cpg_priv * priv)519 rzg2l_cpg_sd_mux_clk_register(const struct cpg_core_clk *core,
520 			      struct rzg2l_cpg_priv *priv)
521 {
522 	struct sd_mux_hw_data *sd_mux_hw_data;
523 	struct clk_init_data init;
524 	struct clk_hw *clk_hw;
525 	int ret;
526 
527 	sd_mux_hw_data = devm_kzalloc(priv->dev, sizeof(*sd_mux_hw_data), GFP_KERNEL);
528 	if (!sd_mux_hw_data)
529 		return ERR_PTR(-ENOMEM);
530 
531 	sd_mux_hw_data->hw_data.priv = priv;
532 	sd_mux_hw_data->hw_data.conf = core->conf;
533 	sd_mux_hw_data->hw_data.sconf = core->sconf;
534 	sd_mux_hw_data->mtable = core->mtable;
535 
536 	init.name = core->name;
537 	init.ops = &rzg2l_cpg_sd_clk_mux_ops;
538 	init.flags = core->flag;
539 	init.num_parents = core->num_parents;
540 	init.parent_names = core->parent_names;
541 
542 	clk_hw = &sd_mux_hw_data->hw_data.hw;
543 	clk_hw->init = &init;
544 
545 	ret = devm_clk_hw_register(priv->dev, clk_hw);
546 	if (ret)
547 		return ERR_PTR(ret);
548 
549 	ret = rzg2l_register_notifier(clk_hw, core, priv);
550 	if (ret) {
551 		dev_err(priv->dev, "Failed to register notifier for %s\n",
552 			core->name);
553 		return ERR_PTR(ret);
554 	}
555 
556 	return clk_hw->clk;
557 }
558 
559 static unsigned long
rzg2l_cpg_get_foutpostdiv_rate(struct rzg2l_pll5_param * params,unsigned long rate)560 rzg2l_cpg_get_foutpostdiv_rate(struct rzg2l_pll5_param *params,
561 			       unsigned long rate)
562 {
563 	unsigned long foutpostdiv_rate, foutvco_rate;
564 
565 	params->pl5_intin = rate / MEGA;
566 	params->pl5_fracin = div_u64(((u64)rate % MEGA) << 24, MEGA);
567 	params->pl5_refdiv = 2;
568 	params->pl5_postdiv1 = 1;
569 	params->pl5_postdiv2 = 1;
570 	params->pl5_spread = 0x16;
571 
572 	foutvco_rate = div_u64(mul_u32_u32(EXTAL_FREQ_IN_MEGA_HZ * MEGA,
573 					   (params->pl5_intin << 24) + params->pl5_fracin),
574 			       params->pl5_refdiv) >> 24;
575 	foutpostdiv_rate = DIV_ROUND_CLOSEST_ULL(foutvco_rate,
576 						 params->pl5_postdiv1 * params->pl5_postdiv2);
577 
578 	return foutpostdiv_rate;
579 }
580 
581 struct dsi_div_hw_data {
582 	struct clk_hw hw;
583 	u32 conf;
584 	unsigned long rate;
585 	struct rzg2l_cpg_priv *priv;
586 };
587 
588 #define to_dsi_div_hw_data(_hw)	container_of(_hw, struct dsi_div_hw_data, hw)
589 
rzg2l_cpg_dsi_div_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)590 static unsigned long rzg2l_cpg_dsi_div_recalc_rate(struct clk_hw *hw,
591 						   unsigned long parent_rate)
592 {
593 	struct dsi_div_hw_data *dsi_div = to_dsi_div_hw_data(hw);
594 	unsigned long rate = dsi_div->rate;
595 
596 	if (!rate)
597 		rate = parent_rate;
598 
599 	return rate;
600 }
601 
rzg2l_cpg_get_vclk_parent_rate(struct clk_hw * hw,unsigned long rate)602 static unsigned long rzg2l_cpg_get_vclk_parent_rate(struct clk_hw *hw,
603 						    unsigned long rate)
604 {
605 	struct dsi_div_hw_data *dsi_div = to_dsi_div_hw_data(hw);
606 	struct rzg2l_cpg_priv *priv = dsi_div->priv;
607 	struct rzg2l_pll5_param params;
608 	unsigned long parent_rate;
609 
610 	parent_rate = rzg2l_cpg_get_foutpostdiv_rate(&params, rate);
611 
612 	if (priv->mux_dsi_div_params.clksrc)
613 		parent_rate /= 2;
614 
615 	return parent_rate;
616 }
617 
rzg2l_cpg_dsi_div_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)618 static int rzg2l_cpg_dsi_div_determine_rate(struct clk_hw *hw,
619 					    struct clk_rate_request *req)
620 {
621 	if (req->rate > MAX_VCLK_FREQ)
622 		req->rate = MAX_VCLK_FREQ;
623 
624 	req->best_parent_rate = rzg2l_cpg_get_vclk_parent_rate(hw, req->rate);
625 
626 	return 0;
627 }
628 
rzg2l_cpg_dsi_div_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)629 static int rzg2l_cpg_dsi_div_set_rate(struct clk_hw *hw,
630 				      unsigned long rate,
631 				      unsigned long parent_rate)
632 {
633 	struct dsi_div_hw_data *dsi_div = to_dsi_div_hw_data(hw);
634 	struct rzg2l_cpg_priv *priv = dsi_div->priv;
635 
636 	/*
637 	 * MUX -->DIV_DSI_{A,B} -->M3 -->VCLK
638 	 *
639 	 * Based on the dot clock, the DSI divider clock sets the divider value,
640 	 * calculates the pll parameters for generating FOUTPOSTDIV and the clk
641 	 * source for the MUX and propagates that info to the parents.
642 	 */
643 
644 	if (!rate || rate > MAX_VCLK_FREQ)
645 		return -EINVAL;
646 
647 	dsi_div->rate = rate;
648 	writel(CPG_PL5_SDIV_DIV_DSI_A_WEN | CPG_PL5_SDIV_DIV_DSI_B_WEN |
649 	       (priv->mux_dsi_div_params.dsi_div_a << 0) |
650 	       (priv->mux_dsi_div_params.dsi_div_b << 8),
651 	       priv->base + CPG_PL5_SDIV);
652 
653 	return 0;
654 }
655 
656 static const struct clk_ops rzg2l_cpg_dsi_div_ops = {
657 	.recalc_rate = rzg2l_cpg_dsi_div_recalc_rate,
658 	.determine_rate = rzg2l_cpg_dsi_div_determine_rate,
659 	.set_rate = rzg2l_cpg_dsi_div_set_rate,
660 };
661 
662 static struct clk * __init
rzg2l_cpg_dsi_div_clk_register(const struct cpg_core_clk * core,struct rzg2l_cpg_priv * priv)663 rzg2l_cpg_dsi_div_clk_register(const struct cpg_core_clk *core,
664 			       struct rzg2l_cpg_priv *priv)
665 {
666 	struct dsi_div_hw_data *clk_hw_data;
667 	const struct clk *parent;
668 	const char *parent_name;
669 	struct clk_init_data init;
670 	struct clk_hw *clk_hw;
671 	int ret;
672 
673 	parent = priv->clks[core->parent];
674 	if (IS_ERR(parent))
675 		return ERR_CAST(parent);
676 
677 	clk_hw_data = devm_kzalloc(priv->dev, sizeof(*clk_hw_data), GFP_KERNEL);
678 	if (!clk_hw_data)
679 		return ERR_PTR(-ENOMEM);
680 
681 	clk_hw_data->priv = priv;
682 
683 	parent_name = __clk_get_name(parent);
684 	init.name = core->name;
685 	init.ops = &rzg2l_cpg_dsi_div_ops;
686 	init.flags = CLK_SET_RATE_PARENT;
687 	init.parent_names = &parent_name;
688 	init.num_parents = 1;
689 
690 	clk_hw = &clk_hw_data->hw;
691 	clk_hw->init = &init;
692 
693 	ret = devm_clk_hw_register(priv->dev, clk_hw);
694 	if (ret)
695 		return ERR_PTR(ret);
696 
697 	return clk_hw->clk;
698 }
699 
700 struct pll5_mux_hw_data {
701 	struct clk_hw hw;
702 	u32 conf;
703 	unsigned long rate;
704 	struct rzg2l_cpg_priv *priv;
705 };
706 
707 #define to_pll5_mux_hw_data(_hw)	container_of(_hw, struct pll5_mux_hw_data, hw)
708 
rzg2l_cpg_pll5_4_clk_mux_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)709 static int rzg2l_cpg_pll5_4_clk_mux_determine_rate(struct clk_hw *hw,
710 						   struct clk_rate_request *req)
711 {
712 	struct clk_hw *parent;
713 	struct pll5_mux_hw_data *hwdata = to_pll5_mux_hw_data(hw);
714 	struct rzg2l_cpg_priv *priv = hwdata->priv;
715 
716 	parent = clk_hw_get_parent_by_index(hw, priv->mux_dsi_div_params.clksrc);
717 	req->best_parent_hw = parent;
718 	req->best_parent_rate = req->rate;
719 
720 	return 0;
721 }
722 
rzg2l_cpg_pll5_4_clk_mux_set_parent(struct clk_hw * hw,u8 index)723 static int rzg2l_cpg_pll5_4_clk_mux_set_parent(struct clk_hw *hw, u8 index)
724 {
725 	struct pll5_mux_hw_data *hwdata = to_pll5_mux_hw_data(hw);
726 	struct rzg2l_cpg_priv *priv = hwdata->priv;
727 
728 	/*
729 	 * FOUTPOSTDIV--->|
730 	 *  |             | -->MUX -->DIV_DSIA_B -->M3 -->VCLK
731 	 *  |--FOUT1PH0-->|
732 	 *
733 	 * Based on the dot clock, the DSI divider clock calculates the parent
734 	 * rate and clk source for the MUX. It propagates that info to
735 	 * pll5_4_clk_mux which sets the clock source for DSI divider clock.
736 	 */
737 
738 	writel(CPG_OTHERFUNC1_REG_RES0_ON_WEN | index,
739 	       priv->base + CPG_OTHERFUNC1_REG);
740 
741 	return 0;
742 }
743 
rzg2l_cpg_pll5_4_clk_mux_get_parent(struct clk_hw * hw)744 static u8 rzg2l_cpg_pll5_4_clk_mux_get_parent(struct clk_hw *hw)
745 {
746 	struct pll5_mux_hw_data *hwdata = to_pll5_mux_hw_data(hw);
747 	struct rzg2l_cpg_priv *priv = hwdata->priv;
748 
749 	return readl(priv->base + GET_REG_OFFSET(hwdata->conf));
750 }
751 
752 static const struct clk_ops rzg2l_cpg_pll5_4_clk_mux_ops = {
753 	.determine_rate = rzg2l_cpg_pll5_4_clk_mux_determine_rate,
754 	.set_parent	= rzg2l_cpg_pll5_4_clk_mux_set_parent,
755 	.get_parent	= rzg2l_cpg_pll5_4_clk_mux_get_parent,
756 };
757 
758 static struct clk * __init
rzg2l_cpg_pll5_4_mux_clk_register(const struct cpg_core_clk * core,struct rzg2l_cpg_priv * priv)759 rzg2l_cpg_pll5_4_mux_clk_register(const struct cpg_core_clk *core,
760 				  struct rzg2l_cpg_priv *priv)
761 {
762 	struct pll5_mux_hw_data *clk_hw_data;
763 	struct clk_init_data init;
764 	struct clk_hw *clk_hw;
765 	int ret;
766 
767 	clk_hw_data = devm_kzalloc(priv->dev, sizeof(*clk_hw_data), GFP_KERNEL);
768 	if (!clk_hw_data)
769 		return ERR_PTR(-ENOMEM);
770 
771 	clk_hw_data->priv = priv;
772 	clk_hw_data->conf = core->conf;
773 
774 	init.name = core->name;
775 	init.ops = &rzg2l_cpg_pll5_4_clk_mux_ops;
776 	init.flags = CLK_SET_RATE_PARENT;
777 	init.num_parents = core->num_parents;
778 	init.parent_names = core->parent_names;
779 
780 	clk_hw = &clk_hw_data->hw;
781 	clk_hw->init = &init;
782 
783 	ret = devm_clk_hw_register(priv->dev, clk_hw);
784 	if (ret)
785 		return ERR_PTR(ret);
786 
787 	return clk_hw->clk;
788 }
789 
790 struct sipll5 {
791 	struct clk_hw hw;
792 	u32 conf;
793 	unsigned long foutpostdiv_rate;
794 	struct rzg2l_cpg_priv *priv;
795 };
796 
797 #define to_sipll5(_hw)	container_of(_hw, struct sipll5, hw)
798 
rzg2l_cpg_get_vclk_rate(struct clk_hw * hw,unsigned long rate)799 static unsigned long rzg2l_cpg_get_vclk_rate(struct clk_hw *hw,
800 					     unsigned long rate)
801 {
802 	struct sipll5 *sipll5 = to_sipll5(hw);
803 	struct rzg2l_cpg_priv *priv = sipll5->priv;
804 	unsigned long vclk;
805 
806 	vclk = rate / ((1 << priv->mux_dsi_div_params.dsi_div_a) *
807 		       (priv->mux_dsi_div_params.dsi_div_b + 1));
808 
809 	if (priv->mux_dsi_div_params.clksrc)
810 		vclk /= 2;
811 
812 	return vclk;
813 }
814 
rzg2l_cpg_sipll5_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)815 static unsigned long rzg2l_cpg_sipll5_recalc_rate(struct clk_hw *hw,
816 						  unsigned long parent_rate)
817 {
818 	struct sipll5 *sipll5 = to_sipll5(hw);
819 	unsigned long pll5_rate = sipll5->foutpostdiv_rate;
820 
821 	if (!pll5_rate)
822 		pll5_rate = parent_rate;
823 
824 	return pll5_rate;
825 }
826 
rzg2l_cpg_sipll5_round_rate(struct clk_hw * hw,unsigned long rate,unsigned long * parent_rate)827 static long rzg2l_cpg_sipll5_round_rate(struct clk_hw *hw,
828 					unsigned long rate,
829 					unsigned long *parent_rate)
830 {
831 	return rate;
832 }
833 
rzg2l_cpg_sipll5_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)834 static int rzg2l_cpg_sipll5_set_rate(struct clk_hw *hw,
835 				     unsigned long rate,
836 				     unsigned long parent_rate)
837 {
838 	struct sipll5 *sipll5 = to_sipll5(hw);
839 	struct rzg2l_cpg_priv *priv = sipll5->priv;
840 	struct rzg2l_pll5_param params;
841 	unsigned long vclk_rate;
842 	int ret;
843 	u32 val;
844 
845 	/*
846 	 *  OSC --> PLL5 --> FOUTPOSTDIV-->|
847 	 *                   |             | -->MUX -->DIV_DSIA_B -->M3 -->VCLK
848 	 *                   |--FOUT1PH0-->|
849 	 *
850 	 * Based on the dot clock, the DSI divider clock calculates the parent
851 	 * rate and the pll5 parameters for generating FOUTPOSTDIV. It propagates
852 	 * that info to sipll5 which sets parameters for generating FOUTPOSTDIV.
853 	 *
854 	 * OSC --> PLL5 --> FOUTPOSTDIV
855 	 */
856 
857 	if (!rate)
858 		return -EINVAL;
859 
860 	vclk_rate = rzg2l_cpg_get_vclk_rate(hw, rate);
861 	sipll5->foutpostdiv_rate =
862 		rzg2l_cpg_get_foutpostdiv_rate(&params, vclk_rate);
863 
864 	/* Put PLL5 into standby mode */
865 	writel(CPG_SIPLL5_STBY_RESETB_WEN, priv->base + CPG_SIPLL5_STBY);
866 	ret = readl_poll_timeout(priv->base + CPG_SIPLL5_MON, val,
867 				 !(val & CPG_SIPLL5_MON_PLL5_LOCK), 100, 250000);
868 	if (ret) {
869 		dev_err(priv->dev, "failed to release pll5 lock");
870 		return ret;
871 	}
872 
873 	/* Output clock setting 1 */
874 	writel((params.pl5_postdiv1 << 0) | (params.pl5_postdiv2 << 4) |
875 	       (params.pl5_refdiv << 8), priv->base + CPG_SIPLL5_CLK1);
876 
877 	/* Output clock setting, SSCG modulation value setting 3 */
878 	writel((params.pl5_fracin << 8), priv->base + CPG_SIPLL5_CLK3);
879 
880 	/* Output clock setting 4 */
881 	writel(CPG_SIPLL5_CLK4_RESV_LSB | (params.pl5_intin << 16),
882 	       priv->base + CPG_SIPLL5_CLK4);
883 
884 	/* Output clock setting 5 */
885 	writel(params.pl5_spread, priv->base + CPG_SIPLL5_CLK5);
886 
887 	/* PLL normal mode setting */
888 	writel(CPG_SIPLL5_STBY_DOWNSPREAD_WEN | CPG_SIPLL5_STBY_SSCG_EN_WEN |
889 	       CPG_SIPLL5_STBY_RESETB_WEN | CPG_SIPLL5_STBY_RESETB,
890 	       priv->base + CPG_SIPLL5_STBY);
891 
892 	/* PLL normal mode transition, output clock stability check */
893 	ret = readl_poll_timeout(priv->base + CPG_SIPLL5_MON, val,
894 				 (val & CPG_SIPLL5_MON_PLL5_LOCK), 100, 250000);
895 	if (ret) {
896 		dev_err(priv->dev, "failed to lock pll5");
897 		return ret;
898 	}
899 
900 	return 0;
901 }
902 
903 static const struct clk_ops rzg2l_cpg_sipll5_ops = {
904 	.recalc_rate = rzg2l_cpg_sipll5_recalc_rate,
905 	.round_rate = rzg2l_cpg_sipll5_round_rate,
906 	.set_rate = rzg2l_cpg_sipll5_set_rate,
907 };
908 
909 static struct clk * __init
rzg2l_cpg_sipll5_register(const struct cpg_core_clk * core,struct rzg2l_cpg_priv * priv)910 rzg2l_cpg_sipll5_register(const struct cpg_core_clk *core,
911 			  struct rzg2l_cpg_priv *priv)
912 {
913 	const struct clk *parent;
914 	struct clk_init_data init;
915 	const char *parent_name;
916 	struct sipll5 *sipll5;
917 	struct clk_hw *clk_hw;
918 	int ret;
919 
920 	parent = priv->clks[core->parent];
921 	if (IS_ERR(parent))
922 		return ERR_CAST(parent);
923 
924 	sipll5 = devm_kzalloc(priv->dev, sizeof(*sipll5), GFP_KERNEL);
925 	if (!sipll5)
926 		return ERR_PTR(-ENOMEM);
927 
928 	init.name = core->name;
929 	parent_name = __clk_get_name(parent);
930 	init.ops = &rzg2l_cpg_sipll5_ops;
931 	init.flags = 0;
932 	init.parent_names = &parent_name;
933 	init.num_parents = 1;
934 
935 	sipll5->hw.init = &init;
936 	sipll5->conf = core->conf;
937 	sipll5->priv = priv;
938 
939 	writel(CPG_SIPLL5_STBY_SSCG_EN_WEN | CPG_SIPLL5_STBY_RESETB_WEN |
940 	       CPG_SIPLL5_STBY_RESETB, priv->base + CPG_SIPLL5_STBY);
941 
942 	clk_hw = &sipll5->hw;
943 	clk_hw->init = &init;
944 
945 	ret = devm_clk_hw_register(priv->dev, clk_hw);
946 	if (ret)
947 		return ERR_PTR(ret);
948 
949 	priv->mux_dsi_div_params.clksrc = 1; /* Use clk src 1 for DSI */
950 	priv->mux_dsi_div_params.dsi_div_a = 1; /* Divided by 2 */
951 	priv->mux_dsi_div_params.dsi_div_b = 2; /* Divided by 3 */
952 
953 	return clk_hw->clk;
954 }
955 
956 struct pll_clk {
957 	struct clk_hw hw;
958 	unsigned long default_rate;
959 	unsigned int conf;
960 	unsigned int type;
961 	void __iomem *base;
962 	struct rzg2l_cpg_priv *priv;
963 };
964 
965 #define to_pll(_hw)	container_of(_hw, struct pll_clk, hw)
966 
rzg2l_cpg_pll_clk_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)967 static unsigned long rzg2l_cpg_pll_clk_recalc_rate(struct clk_hw *hw,
968 						   unsigned long parent_rate)
969 {
970 	struct pll_clk *pll_clk = to_pll(hw);
971 	struct rzg2l_cpg_priv *priv = pll_clk->priv;
972 	unsigned int val1, val2;
973 	u64 rate;
974 
975 	if (pll_clk->type != CLK_TYPE_SAM_PLL)
976 		return parent_rate;
977 
978 	val1 = readl(priv->base + GET_REG_SAMPLL_CLK1(pll_clk->conf));
979 	val2 = readl(priv->base + GET_REG_SAMPLL_CLK2(pll_clk->conf));
980 
981 	rate = mul_u64_u32_shr(parent_rate, (MDIV(val1) << 16) + KDIV(val1),
982 			       16 + SDIV(val2));
983 
984 	return DIV_ROUND_CLOSEST_ULL(rate, PDIV(val1));
985 }
986 
987 static const struct clk_ops rzg2l_cpg_pll_ops = {
988 	.recalc_rate = rzg2l_cpg_pll_clk_recalc_rate,
989 };
990 
rzg3s_cpg_pll_clk_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)991 static unsigned long rzg3s_cpg_pll_clk_recalc_rate(struct clk_hw *hw,
992 						   unsigned long parent_rate)
993 {
994 	struct pll_clk *pll_clk = to_pll(hw);
995 	struct rzg2l_cpg_priv *priv = pll_clk->priv;
996 	u32 nir, nfr, mr, pr, val, setting;
997 	u64 rate;
998 
999 	if (pll_clk->type != CLK_TYPE_G3S_PLL)
1000 		return parent_rate;
1001 
1002 	setting = GET_REG_SAMPLL_SETTING(pll_clk->conf);
1003 	if (setting) {
1004 		val = readl(priv->base + setting);
1005 		if (val & RZG3S_SEL_PLL)
1006 			return pll_clk->default_rate;
1007 	}
1008 
1009 	val = readl(priv->base + GET_REG_SAMPLL_CLK1(pll_clk->conf));
1010 
1011 	pr = 1 << FIELD_GET(RZG3S_DIV_P, val);
1012 	/* Hardware interprets values higher than 8 as p = 16. */
1013 	if (pr > 8)
1014 		pr = 16;
1015 
1016 	mr  = FIELD_GET(RZG3S_DIV_M, val) + 1;
1017 	nir = FIELD_GET(RZG3S_DIV_NI, val) + 1;
1018 	nfr = FIELD_GET(RZG3S_DIV_NF, val);
1019 
1020 	rate = mul_u64_u32_shr(parent_rate, 4096 * nir + nfr, 12);
1021 
1022 	return DIV_ROUND_CLOSEST_ULL(rate, (mr * pr));
1023 }
1024 
1025 static const struct clk_ops rzg3s_cpg_pll_ops = {
1026 	.recalc_rate = rzg3s_cpg_pll_clk_recalc_rate,
1027 };
1028 
1029 static struct clk * __init
rzg2l_cpg_pll_clk_register(const struct cpg_core_clk * core,struct rzg2l_cpg_priv * priv,const struct clk_ops * ops)1030 rzg2l_cpg_pll_clk_register(const struct cpg_core_clk *core,
1031 			   struct rzg2l_cpg_priv *priv,
1032 			   const struct clk_ops *ops)
1033 {
1034 	struct device *dev = priv->dev;
1035 	const struct clk *parent;
1036 	struct clk_init_data init;
1037 	const char *parent_name;
1038 	struct pll_clk *pll_clk;
1039 	int ret;
1040 
1041 	parent = priv->clks[core->parent];
1042 	if (IS_ERR(parent))
1043 		return ERR_CAST(parent);
1044 
1045 	pll_clk = devm_kzalloc(dev, sizeof(*pll_clk), GFP_KERNEL);
1046 	if (!pll_clk)
1047 		return ERR_PTR(-ENOMEM);
1048 
1049 	parent_name = __clk_get_name(parent);
1050 	init.name = core->name;
1051 	init.ops = ops;
1052 	init.flags = 0;
1053 	init.parent_names = &parent_name;
1054 	init.num_parents = 1;
1055 
1056 	pll_clk->hw.init = &init;
1057 	pll_clk->conf = core->conf;
1058 	pll_clk->base = priv->base;
1059 	pll_clk->priv = priv;
1060 	pll_clk->type = core->type;
1061 	pll_clk->default_rate = core->default_rate;
1062 
1063 	ret = devm_clk_hw_register(dev, &pll_clk->hw);
1064 	if (ret)
1065 		return ERR_PTR(ret);
1066 
1067 	return pll_clk->hw.clk;
1068 }
1069 
1070 static struct clk
rzg2l_cpg_clk_src_twocell_get(struct of_phandle_args * clkspec,void * data)1071 *rzg2l_cpg_clk_src_twocell_get(struct of_phandle_args *clkspec,
1072 			       void *data)
1073 {
1074 	unsigned int clkidx = clkspec->args[1];
1075 	struct rzg2l_cpg_priv *priv = data;
1076 	struct device *dev = priv->dev;
1077 	const char *type;
1078 	struct clk *clk;
1079 
1080 	switch (clkspec->args[0]) {
1081 	case CPG_CORE:
1082 		type = "core";
1083 		if (clkidx > priv->last_dt_core_clk) {
1084 			dev_err(dev, "Invalid %s clock index %u\n", type, clkidx);
1085 			return ERR_PTR(-EINVAL);
1086 		}
1087 		clk = priv->clks[clkidx];
1088 		break;
1089 
1090 	case CPG_MOD:
1091 		type = "module";
1092 		if (clkidx >= priv->num_mod_clks) {
1093 			dev_err(dev, "Invalid %s clock index %u\n", type,
1094 				clkidx);
1095 			return ERR_PTR(-EINVAL);
1096 		}
1097 		clk = priv->clks[priv->num_core_clks + clkidx];
1098 		break;
1099 
1100 	default:
1101 		dev_err(dev, "Invalid CPG clock type %u\n", clkspec->args[0]);
1102 		return ERR_PTR(-EINVAL);
1103 	}
1104 
1105 	if (IS_ERR(clk))
1106 		dev_err(dev, "Cannot get %s clock %u: %ld", type, clkidx,
1107 			PTR_ERR(clk));
1108 	else
1109 		dev_dbg(dev, "clock (%u, %u) is %pC at %lu Hz\n",
1110 			clkspec->args[0], clkspec->args[1], clk,
1111 			clk_get_rate(clk));
1112 	return clk;
1113 }
1114 
1115 static void __init
rzg2l_cpg_register_core_clk(const struct cpg_core_clk * core,const struct rzg2l_cpg_info * info,struct rzg2l_cpg_priv * priv)1116 rzg2l_cpg_register_core_clk(const struct cpg_core_clk *core,
1117 			    const struct rzg2l_cpg_info *info,
1118 			    struct rzg2l_cpg_priv *priv)
1119 {
1120 	struct clk *clk = ERR_PTR(-EOPNOTSUPP), *parent;
1121 	struct device *dev = priv->dev;
1122 	unsigned int id = core->id, div = core->div;
1123 	const char *parent_name;
1124 	struct clk_hw *clk_hw;
1125 
1126 	WARN_DEBUG(id >= priv->num_core_clks);
1127 	WARN_DEBUG(PTR_ERR(priv->clks[id]) != -ENOENT);
1128 
1129 	switch (core->type) {
1130 	case CLK_TYPE_IN:
1131 		clk = of_clk_get_by_name(priv->dev->of_node, core->name);
1132 		break;
1133 	case CLK_TYPE_FF:
1134 		WARN_DEBUG(core->parent >= priv->num_core_clks);
1135 		parent = priv->clks[core->parent];
1136 		if (IS_ERR(parent)) {
1137 			clk = parent;
1138 			goto fail;
1139 		}
1140 
1141 		parent_name = __clk_get_name(parent);
1142 		clk_hw = devm_clk_hw_register_fixed_factor(dev, core->name, parent_name,
1143 							   CLK_SET_RATE_PARENT,
1144 							   core->mult, div);
1145 		if (IS_ERR(clk_hw))
1146 			clk = ERR_CAST(clk_hw);
1147 		else
1148 			clk = clk_hw->clk;
1149 		break;
1150 	case CLK_TYPE_SAM_PLL:
1151 		clk = rzg2l_cpg_pll_clk_register(core, priv, &rzg2l_cpg_pll_ops);
1152 		break;
1153 	case CLK_TYPE_G3S_PLL:
1154 		clk = rzg2l_cpg_pll_clk_register(core, priv, &rzg3s_cpg_pll_ops);
1155 		break;
1156 	case CLK_TYPE_SIPLL5:
1157 		clk = rzg2l_cpg_sipll5_register(core, priv);
1158 		break;
1159 	case CLK_TYPE_DIV:
1160 		clk = rzg2l_cpg_div_clk_register(core, priv);
1161 		break;
1162 	case CLK_TYPE_G3S_DIV:
1163 		clk = rzg3s_cpg_div_clk_register(core, priv);
1164 		break;
1165 	case CLK_TYPE_MUX:
1166 		clk = rzg2l_cpg_mux_clk_register(core, priv);
1167 		break;
1168 	case CLK_TYPE_SD_MUX:
1169 		clk = rzg2l_cpg_sd_mux_clk_register(core, priv);
1170 		break;
1171 	case CLK_TYPE_PLL5_4_MUX:
1172 		clk = rzg2l_cpg_pll5_4_mux_clk_register(core, priv);
1173 		break;
1174 	case CLK_TYPE_DSI_DIV:
1175 		clk = rzg2l_cpg_dsi_div_clk_register(core, priv);
1176 		break;
1177 	default:
1178 		goto fail;
1179 	}
1180 
1181 	if (IS_ERR_OR_NULL(clk))
1182 		goto fail;
1183 
1184 	dev_dbg(dev, "Core clock %pC at %lu Hz\n", clk, clk_get_rate(clk));
1185 	priv->clks[id] = clk;
1186 	return;
1187 
1188 fail:
1189 	dev_err(dev, "Failed to register %s clock %s: %ld\n", "core",
1190 		core->name, PTR_ERR(clk));
1191 }
1192 
1193 /**
1194  * struct mstop - MSTOP specific data structure
1195  * @usecnt: Usage counter for MSTOP settings (when zero the settings
1196  *          are applied to register)
1197  * @conf: MSTOP configuration (register offset, setup bits)
1198  */
1199 struct mstop {
1200 	atomic_t usecnt;
1201 	u32 conf;
1202 };
1203 
1204 /**
1205  * struct mod_clock - Module clock
1206  *
1207  * @hw: handle between common and hardware-specific interfaces
1208  * @priv: CPG/MSTP private data
1209  * @sibling: pointer to the other coupled clock
1210  * @mstop: MSTOP configuration
1211  * @shared_mstop_clks: clocks sharing the MSTOP with this clock
1212  * @off: register offset
1213  * @bit: ON/MON bit
1214  * @num_shared_mstop_clks: number of the clocks sharing MSTOP with this clock
1215  * @enabled: soft state of the clock, if it is coupled with another clock
1216  */
1217 struct mod_clock {
1218 	struct clk_hw hw;
1219 	struct rzg2l_cpg_priv *priv;
1220 	struct mod_clock *sibling;
1221 	struct mstop *mstop;
1222 	struct mod_clock **shared_mstop_clks;
1223 	u16 off;
1224 	u8 bit;
1225 	u8 num_shared_mstop_clks;
1226 	bool enabled;
1227 };
1228 
1229 #define to_mod_clock(_hw) container_of(_hw, struct mod_clock, hw)
1230 
1231 #define for_each_mod_clock(mod_clock, hw, priv) \
1232 	for (unsigned int i = 0; (priv) && i < (priv)->num_mod_clks; i++) \
1233 		if ((priv)->clks[(priv)->num_core_clks + i] == ERR_PTR(-ENOENT)) \
1234 			continue; \
1235 		else if (((hw) = __clk_get_hw((priv)->clks[(priv)->num_core_clks + i])) && \
1236 			 ((mod_clock) = to_mod_clock(hw)))
1237 
1238 /* Need to be called with a lock held to avoid concurrent access to mstop->usecnt. */
rzg2l_mod_clock_module_set_state(struct mod_clock * clock,bool standby)1239 static void rzg2l_mod_clock_module_set_state(struct mod_clock *clock,
1240 					     bool standby)
1241 {
1242 	struct rzg2l_cpg_priv *priv = clock->priv;
1243 	struct mstop *mstop = clock->mstop;
1244 	bool update = false;
1245 	u32 value;
1246 
1247 	if (!mstop)
1248 		return;
1249 
1250 	value = MSTOP_MASK(mstop->conf) << 16;
1251 
1252 	if (standby) {
1253 		unsigned int criticals = 0;
1254 
1255 		for (unsigned int i = 0; i < clock->num_shared_mstop_clks; i++) {
1256 			struct mod_clock *clk = clock->shared_mstop_clks[i];
1257 
1258 			if (clk_hw_get_flags(&clk->hw) & CLK_IS_CRITICAL)
1259 				criticals++;
1260 		}
1261 
1262 		if (!clock->num_shared_mstop_clks &&
1263 		    clk_hw_get_flags(&clock->hw) & CLK_IS_CRITICAL)
1264 			criticals++;
1265 
1266 		/*
1267 		 * If this is a shared MSTOP and it is shared with critical clocks,
1268 		 * and the system boots up with this clock enabled but no driver
1269 		 * uses it the CCF will disable it (as it is unused). As we don't
1270 		 * increment reference counter for it at registration (to avoid
1271 		 * messing with clocks enabled at probe but later used by drivers)
1272 		 * do not set the MSTOP here too if it is shared with critical
1273 		 * clocks and ref counted only by those critical clocks.
1274 		 */
1275 		if (criticals && criticals == atomic_read(&mstop->usecnt))
1276 			return;
1277 
1278 		value |= MSTOP_MASK(mstop->conf);
1279 
1280 		/* Allow updates on probe when usecnt = 0. */
1281 		if (!atomic_read(&mstop->usecnt))
1282 			update = true;
1283 		else
1284 			update = atomic_dec_and_test(&mstop->usecnt);
1285 	} else {
1286 		if (!atomic_read(&mstop->usecnt))
1287 			update = true;
1288 		atomic_inc(&mstop->usecnt);
1289 	}
1290 
1291 	if (update)
1292 		writel(value, priv->base + MSTOP_OFF(mstop->conf));
1293 }
1294 
rzg2l_mod_clock_mstop_show(struct seq_file * s,void * what)1295 static int rzg2l_mod_clock_mstop_show(struct seq_file *s, void *what)
1296 {
1297 	struct rzg2l_cpg_priv *priv = s->private;
1298 	struct mod_clock *clk;
1299 	struct clk_hw *hw;
1300 
1301 	seq_printf(s, "%-20s %-5s %-10s\n", "", "", "MSTOP");
1302 	seq_printf(s, "%-20s %-5s %-10s\n", "", "clk", "-------------------------");
1303 	seq_printf(s, "%-20s %-5s %-5s %-5s %-6s %-6s\n",
1304 		   "clk_name", "cnt", "cnt", "off", "val", "shared");
1305 	seq_printf(s, "%-20s %-5s %-5s %-5s %-6s %-6s\n",
1306 		   "--------", "-----", "-----", "-----", "------", "------");
1307 
1308 	for_each_mod_clock(clk, hw, priv) {
1309 		u32 val;
1310 
1311 		if (!clk->mstop)
1312 			continue;
1313 
1314 		val = readl(priv->base + MSTOP_OFF(clk->mstop->conf)) &
1315 		      MSTOP_MASK(clk->mstop->conf);
1316 
1317 		seq_printf(s, "%-20s %-5d %-5d 0x%-3lx 0x%-4x", clk_hw_get_name(hw),
1318 			   __clk_get_enable_count(hw->clk), atomic_read(&clk->mstop->usecnt),
1319 			   MSTOP_OFF(clk->mstop->conf), val);
1320 
1321 		for (unsigned int i = 0; i < clk->num_shared_mstop_clks; i++)
1322 			seq_printf(s, " %pC", clk->shared_mstop_clks[i]->hw.clk);
1323 
1324 		seq_puts(s, "\n");
1325 	}
1326 
1327 	return 0;
1328 }
1329 DEFINE_SHOW_ATTRIBUTE(rzg2l_mod_clock_mstop);
1330 
rzg2l_mod_clock_endisable(struct clk_hw * hw,bool enable)1331 static int rzg2l_mod_clock_endisable(struct clk_hw *hw, bool enable)
1332 {
1333 	struct mod_clock *clock = to_mod_clock(hw);
1334 	struct rzg2l_cpg_priv *priv = clock->priv;
1335 	unsigned int reg = clock->off;
1336 	struct device *dev = priv->dev;
1337 	u32 bitmask = BIT(clock->bit);
1338 	u32 value;
1339 	int error;
1340 
1341 	if (!clock->off) {
1342 		dev_dbg(dev, "%pC does not support ON/OFF\n",  hw->clk);
1343 		return 0;
1344 	}
1345 
1346 	dev_dbg(dev, "CLK_ON 0x%x/%pC %s\n", CLK_ON_R(reg), hw->clk,
1347 		str_on_off(enable));
1348 
1349 	value = bitmask << 16;
1350 	if (enable)
1351 		value |= bitmask;
1352 
1353 	scoped_guard(spinlock_irqsave, &priv->rmw_lock) {
1354 		if (enable) {
1355 			writel(value, priv->base + CLK_ON_R(reg));
1356 			rzg2l_mod_clock_module_set_state(clock, false);
1357 		} else {
1358 			rzg2l_mod_clock_module_set_state(clock, true);
1359 			writel(value, priv->base + CLK_ON_R(reg));
1360 		}
1361 	}
1362 
1363 	if (!enable)
1364 		return 0;
1365 
1366 	if (!priv->info->has_clk_mon_regs)
1367 		return 0;
1368 
1369 	error = readl_poll_timeout_atomic(priv->base + CLK_MON_R(reg), value,
1370 					  value & bitmask, 0, 10);
1371 	if (error)
1372 		dev_err(dev, "Failed to enable CLK_ON 0x%x/%pC\n",
1373 			CLK_ON_R(reg), hw->clk);
1374 
1375 	return error;
1376 }
1377 
rzg2l_mod_clock_enable(struct clk_hw * hw)1378 static int rzg2l_mod_clock_enable(struct clk_hw *hw)
1379 {
1380 	struct mod_clock *clock = to_mod_clock(hw);
1381 
1382 	if (clock->sibling) {
1383 		struct rzg2l_cpg_priv *priv = clock->priv;
1384 		unsigned long flags;
1385 		bool enabled;
1386 
1387 		spin_lock_irqsave(&priv->rmw_lock, flags);
1388 		enabled = clock->sibling->enabled;
1389 		clock->enabled = true;
1390 		spin_unlock_irqrestore(&priv->rmw_lock, flags);
1391 		if (enabled)
1392 			return 0;
1393 	}
1394 
1395 	return rzg2l_mod_clock_endisable(hw, true);
1396 }
1397 
rzg2l_mod_clock_disable(struct clk_hw * hw)1398 static void rzg2l_mod_clock_disable(struct clk_hw *hw)
1399 {
1400 	struct mod_clock *clock = to_mod_clock(hw);
1401 
1402 	if (clock->sibling) {
1403 		struct rzg2l_cpg_priv *priv = clock->priv;
1404 		unsigned long flags;
1405 		bool enabled;
1406 
1407 		spin_lock_irqsave(&priv->rmw_lock, flags);
1408 		enabled = clock->sibling->enabled;
1409 		clock->enabled = false;
1410 		spin_unlock_irqrestore(&priv->rmw_lock, flags);
1411 		if (enabled)
1412 			return;
1413 	}
1414 
1415 	rzg2l_mod_clock_endisable(hw, false);
1416 }
1417 
rzg2l_mod_clock_is_enabled(struct clk_hw * hw)1418 static int rzg2l_mod_clock_is_enabled(struct clk_hw *hw)
1419 {
1420 	struct mod_clock *clock = to_mod_clock(hw);
1421 	struct rzg2l_cpg_priv *priv = clock->priv;
1422 	u32 bitmask = BIT(clock->bit);
1423 	u32 value;
1424 
1425 	if (!clock->off) {
1426 		dev_dbg(priv->dev, "%pC does not support ON/OFF\n",  hw->clk);
1427 		return 1;
1428 	}
1429 
1430 	if (clock->sibling)
1431 		return clock->enabled;
1432 
1433 	if (priv->info->has_clk_mon_regs)
1434 		value = readl(priv->base + CLK_MON_R(clock->off));
1435 	else
1436 		value = readl(priv->base + clock->off);
1437 
1438 	return value & bitmask;
1439 }
1440 
1441 static const struct clk_ops rzg2l_mod_clock_ops = {
1442 	.enable = rzg2l_mod_clock_enable,
1443 	.disable = rzg2l_mod_clock_disable,
1444 	.is_enabled = rzg2l_mod_clock_is_enabled,
1445 };
1446 
1447 static struct mod_clock
rzg2l_mod_clock_get_sibling(struct mod_clock * clock,struct rzg2l_cpg_priv * priv)1448 *rzg2l_mod_clock_get_sibling(struct mod_clock *clock,
1449 			     struct rzg2l_cpg_priv *priv)
1450 {
1451 	struct mod_clock *clk;
1452 	struct clk_hw *hw;
1453 
1454 	for_each_mod_clock(clk, hw, priv) {
1455 		if (clock->off == clk->off && clock->bit == clk->bit)
1456 			return clk;
1457 	}
1458 
1459 	return NULL;
1460 }
1461 
rzg2l_mod_clock_get_mstop(struct rzg2l_cpg_priv * priv,u32 conf)1462 static struct mstop *rzg2l_mod_clock_get_mstop(struct rzg2l_cpg_priv *priv, u32 conf)
1463 {
1464 	struct mod_clock *clk;
1465 	struct clk_hw *hw;
1466 
1467 	for_each_mod_clock(clk, hw, priv) {
1468 		if (!clk->mstop)
1469 			continue;
1470 
1471 		if (clk->mstop->conf == conf)
1472 			return clk->mstop;
1473 	}
1474 
1475 	return NULL;
1476 }
1477 
rzg2l_mod_clock_init_mstop(struct rzg2l_cpg_priv * priv)1478 static void rzg2l_mod_clock_init_mstop(struct rzg2l_cpg_priv *priv)
1479 {
1480 	struct mod_clock *clk;
1481 	struct clk_hw *hw;
1482 
1483 	for_each_mod_clock(clk, hw, priv) {
1484 		if (!clk->mstop)
1485 			continue;
1486 
1487 		/*
1488 		 * Out of reset all modules are enabled. Set module state
1489 		 * in case associated clocks are disabled at probe. Otherwise
1490 		 * module is in invalid HW state.
1491 		 */
1492 		scoped_guard(spinlock_irqsave, &priv->rmw_lock) {
1493 			if (!rzg2l_mod_clock_is_enabled(&clk->hw))
1494 				rzg2l_mod_clock_module_set_state(clk, true);
1495 		}
1496 	}
1497 }
1498 
rzg2l_mod_clock_update_shared_mstop_clks(struct rzg2l_cpg_priv * priv,struct mod_clock * clock)1499 static int rzg2l_mod_clock_update_shared_mstop_clks(struct rzg2l_cpg_priv *priv,
1500 						    struct mod_clock *clock)
1501 {
1502 	struct mod_clock *clk;
1503 	struct clk_hw *hw;
1504 
1505 	if (!clock->mstop)
1506 		return 0;
1507 
1508 	for_each_mod_clock(clk, hw, priv) {
1509 		int num_shared_mstop_clks, incr = 1;
1510 		struct mod_clock **new_clks;
1511 
1512 		if (clk->mstop != clock->mstop)
1513 			continue;
1514 
1515 		num_shared_mstop_clks = clk->num_shared_mstop_clks;
1516 		if (!num_shared_mstop_clks)
1517 			incr++;
1518 
1519 		new_clks = devm_krealloc(priv->dev, clk->shared_mstop_clks,
1520 					 (num_shared_mstop_clks + incr) * sizeof(*new_clks),
1521 					 GFP_KERNEL);
1522 		if (!new_clks)
1523 			return -ENOMEM;
1524 
1525 		if (!num_shared_mstop_clks)
1526 			new_clks[num_shared_mstop_clks++] = clk;
1527 		new_clks[num_shared_mstop_clks++] = clock;
1528 
1529 		for (unsigned int i = 0; i < num_shared_mstop_clks; i++) {
1530 			new_clks[i]->shared_mstop_clks = new_clks;
1531 			new_clks[i]->num_shared_mstop_clks = num_shared_mstop_clks;
1532 		}
1533 		break;
1534 	}
1535 
1536 	return 0;
1537 }
1538 
1539 static void __init
rzg2l_cpg_register_mod_clk(const struct rzg2l_mod_clk * mod,const struct rzg2l_cpg_info * info,struct rzg2l_cpg_priv * priv)1540 rzg2l_cpg_register_mod_clk(const struct rzg2l_mod_clk *mod,
1541 			   const struct rzg2l_cpg_info *info,
1542 			   struct rzg2l_cpg_priv *priv)
1543 {
1544 	struct mod_clock *clock = NULL;
1545 	struct device *dev = priv->dev;
1546 	unsigned int id = mod->id;
1547 	struct clk_init_data init;
1548 	struct clk *parent, *clk;
1549 	const char *parent_name;
1550 	unsigned int i;
1551 	int ret;
1552 
1553 	WARN_DEBUG(id < priv->num_core_clks);
1554 	WARN_DEBUG(id >= priv->num_core_clks + priv->num_mod_clks);
1555 	WARN_DEBUG(mod->parent >= priv->num_core_clks + priv->num_mod_clks);
1556 	WARN_DEBUG(PTR_ERR(priv->clks[id]) != -ENOENT);
1557 
1558 	parent = priv->clks[mod->parent];
1559 	if (IS_ERR(parent)) {
1560 		clk = parent;
1561 		goto fail;
1562 	}
1563 
1564 	clock = devm_kzalloc(dev, sizeof(*clock), GFP_KERNEL);
1565 	if (!clock) {
1566 		clk = ERR_PTR(-ENOMEM);
1567 		goto fail;
1568 	}
1569 
1570 	init.name = mod->name;
1571 	init.ops = &rzg2l_mod_clock_ops;
1572 	init.flags = CLK_SET_RATE_PARENT;
1573 	for (i = 0; i < info->num_crit_mod_clks; i++)
1574 		if (id == info->crit_mod_clks[i]) {
1575 			dev_dbg(dev, "CPG %s setting CLK_IS_CRITICAL\n",
1576 				mod->name);
1577 			init.flags |= CLK_IS_CRITICAL;
1578 			break;
1579 		}
1580 
1581 	parent_name = __clk_get_name(parent);
1582 	init.parent_names = &parent_name;
1583 	init.num_parents = 1;
1584 
1585 	clock->off = mod->off;
1586 	clock->bit = mod->bit;
1587 	clock->priv = priv;
1588 	clock->hw.init = &init;
1589 
1590 	if (mod->mstop_conf) {
1591 		struct mstop *mstop = rzg2l_mod_clock_get_mstop(priv, mod->mstop_conf);
1592 
1593 		if (!mstop) {
1594 			mstop = devm_kzalloc(dev, sizeof(*mstop), GFP_KERNEL);
1595 			if (!mstop) {
1596 				clk = ERR_PTR(-ENOMEM);
1597 				goto fail;
1598 			}
1599 			mstop->conf = mod->mstop_conf;
1600 			atomic_set(&mstop->usecnt, 0);
1601 		}
1602 		clock->mstop = mstop;
1603 	}
1604 
1605 	ret = devm_clk_hw_register(dev, &clock->hw);
1606 	if (ret) {
1607 		clk = ERR_PTR(ret);
1608 		goto fail;
1609 	}
1610 
1611 	if (mod->is_coupled) {
1612 		struct mod_clock *sibling;
1613 
1614 		clock->enabled = rzg2l_mod_clock_is_enabled(&clock->hw);
1615 		sibling = rzg2l_mod_clock_get_sibling(clock, priv);
1616 		if (sibling) {
1617 			clock->sibling = sibling;
1618 			sibling->sibling = clock;
1619 		}
1620 	}
1621 
1622 	/* Keep this before priv->clks[id] is updated. */
1623 	ret = rzg2l_mod_clock_update_shared_mstop_clks(priv, clock);
1624 	if (ret) {
1625 		clk = ERR_PTR(ret);
1626 		goto fail;
1627 	}
1628 
1629 	clk = clock->hw.clk;
1630 	dev_dbg(dev, "Module clock %pC at %lu Hz\n", clk, clk_get_rate(clk));
1631 	priv->clks[id] = clk;
1632 
1633 	return;
1634 
1635 fail:
1636 	dev_err(dev, "Failed to register %s clock %s: %ld\n", "module",
1637 		mod->name, PTR_ERR(clk));
1638 }
1639 
1640 #define rcdev_to_priv(x)	container_of(x, struct rzg2l_cpg_priv, rcdev)
1641 
rzg2l_cpg_assert(struct reset_controller_dev * rcdev,unsigned long id)1642 static int rzg2l_cpg_assert(struct reset_controller_dev *rcdev,
1643 			    unsigned long id)
1644 {
1645 	struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
1646 	const struct rzg2l_cpg_info *info = priv->info;
1647 	unsigned int reg = info->resets[id].off;
1648 	u32 mask = BIT(info->resets[id].bit);
1649 	s8 monbit = info->resets[id].monbit;
1650 	u32 value = mask << 16;
1651 
1652 	dev_dbg(rcdev->dev, "assert id:%ld offset:0x%x\n", id, CLK_RST_R(reg));
1653 
1654 	writel(value, priv->base + CLK_RST_R(reg));
1655 
1656 	if (info->has_clk_mon_regs) {
1657 		reg = CLK_MRST_R(reg);
1658 	} else if (monbit >= 0) {
1659 		reg = CPG_RST_MON;
1660 		mask = BIT(monbit);
1661 	} else {
1662 		/* Wait for at least one cycle of the RCLK clock (@ ca. 32 kHz) */
1663 		udelay(35);
1664 		return 0;
1665 	}
1666 
1667 	return readl_poll_timeout_atomic(priv->base + reg, value,
1668 					 value & mask, 10, 200);
1669 }
1670 
rzg2l_cpg_deassert(struct reset_controller_dev * rcdev,unsigned long id)1671 static int rzg2l_cpg_deassert(struct reset_controller_dev *rcdev,
1672 			      unsigned long id)
1673 {
1674 	struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
1675 	const struct rzg2l_cpg_info *info = priv->info;
1676 	unsigned int reg = info->resets[id].off;
1677 	u32 mask = BIT(info->resets[id].bit);
1678 	s8 monbit = info->resets[id].monbit;
1679 	u32 value = (mask << 16) | mask;
1680 
1681 	dev_dbg(rcdev->dev, "deassert id:%ld offset:0x%x\n", id,
1682 		CLK_RST_R(reg));
1683 
1684 	writel(value, priv->base + CLK_RST_R(reg));
1685 
1686 	if (info->has_clk_mon_regs) {
1687 		reg = CLK_MRST_R(reg);
1688 	} else if (monbit >= 0) {
1689 		reg = CPG_RST_MON;
1690 		mask = BIT(monbit);
1691 	} else {
1692 		/* Wait for at least one cycle of the RCLK clock (@ ca. 32 kHz) */
1693 		udelay(35);
1694 		return 0;
1695 	}
1696 
1697 	return readl_poll_timeout_atomic(priv->base + reg, value,
1698 					 !(value & mask), 10, 200);
1699 }
1700 
rzg2l_cpg_reset(struct reset_controller_dev * rcdev,unsigned long id)1701 static int rzg2l_cpg_reset(struct reset_controller_dev *rcdev,
1702 			   unsigned long id)
1703 {
1704 	int ret;
1705 
1706 	ret = rzg2l_cpg_assert(rcdev, id);
1707 	if (ret)
1708 		return ret;
1709 
1710 	return rzg2l_cpg_deassert(rcdev, id);
1711 }
1712 
rzg2l_cpg_status(struct reset_controller_dev * rcdev,unsigned long id)1713 static int rzg2l_cpg_status(struct reset_controller_dev *rcdev,
1714 			    unsigned long id)
1715 {
1716 	struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
1717 	const struct rzg2l_cpg_info *info = priv->info;
1718 	s8 monbit = info->resets[id].monbit;
1719 	unsigned int reg;
1720 	u32 bitmask;
1721 
1722 	if (info->has_clk_mon_regs) {
1723 		reg = CLK_MRST_R(info->resets[id].off);
1724 		bitmask = BIT(info->resets[id].bit);
1725 	} else if (monbit >= 0) {
1726 		reg = CPG_RST_MON;
1727 		bitmask = BIT(monbit);
1728 	} else {
1729 		return -ENOTSUPP;
1730 	}
1731 
1732 	return !!(readl(priv->base + reg) & bitmask);
1733 }
1734 
1735 static const struct reset_control_ops rzg2l_cpg_reset_ops = {
1736 	.reset = rzg2l_cpg_reset,
1737 	.assert = rzg2l_cpg_assert,
1738 	.deassert = rzg2l_cpg_deassert,
1739 	.status = rzg2l_cpg_status,
1740 };
1741 
rzg2l_cpg_reset_xlate(struct reset_controller_dev * rcdev,const struct of_phandle_args * reset_spec)1742 static int rzg2l_cpg_reset_xlate(struct reset_controller_dev *rcdev,
1743 				 const struct of_phandle_args *reset_spec)
1744 {
1745 	struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
1746 	const struct rzg2l_cpg_info *info = priv->info;
1747 	unsigned int id = reset_spec->args[0];
1748 
1749 	if (id >= rcdev->nr_resets || !info->resets[id].off) {
1750 		dev_err(rcdev->dev, "Invalid reset index %u\n", id);
1751 		return -EINVAL;
1752 	}
1753 
1754 	return id;
1755 }
1756 
rzg2l_cpg_reset_controller_register(struct rzg2l_cpg_priv * priv)1757 static int rzg2l_cpg_reset_controller_register(struct rzg2l_cpg_priv *priv)
1758 {
1759 	priv->rcdev.ops = &rzg2l_cpg_reset_ops;
1760 	priv->rcdev.of_node = priv->dev->of_node;
1761 	priv->rcdev.dev = priv->dev;
1762 	priv->rcdev.of_reset_n_cells = 1;
1763 	priv->rcdev.of_xlate = rzg2l_cpg_reset_xlate;
1764 	priv->rcdev.nr_resets = priv->num_resets;
1765 
1766 	return devm_reset_controller_register(priv->dev, &priv->rcdev);
1767 }
1768 
rzg2l_cpg_is_pm_clk(struct rzg2l_cpg_priv * priv,const struct of_phandle_args * clkspec)1769 static bool rzg2l_cpg_is_pm_clk(struct rzg2l_cpg_priv *priv,
1770 				const struct of_phandle_args *clkspec)
1771 {
1772 	if (clkspec->np != priv->genpd.dev.of_node || clkspec->args_count != 2)
1773 		return false;
1774 
1775 	switch (clkspec->args[0]) {
1776 	case CPG_MOD: {
1777 		const struct rzg2l_cpg_info *info = priv->info;
1778 		unsigned int id = clkspec->args[1];
1779 
1780 		if (id >= priv->num_mod_clks)
1781 			return false;
1782 
1783 		id += info->num_total_core_clks;
1784 
1785 		for (unsigned int i = 0; i < info->num_no_pm_mod_clks; i++) {
1786 			if (info->no_pm_mod_clks[i] == id)
1787 				return false;
1788 		}
1789 
1790 		return true;
1791 	}
1792 
1793 	case CPG_CORE:
1794 	default:
1795 		return false;
1796 	}
1797 }
1798 
rzg2l_cpg_attach_dev(struct generic_pm_domain * domain,struct device * dev)1799 static int rzg2l_cpg_attach_dev(struct generic_pm_domain *domain, struct device *dev)
1800 {
1801 	struct rzg2l_cpg_priv *priv = container_of(domain, struct rzg2l_cpg_priv, genpd);
1802 	struct device_node *np = dev->of_node;
1803 	struct of_phandle_args clkspec;
1804 	bool once = true;
1805 	struct clk *clk;
1806 	unsigned int i;
1807 	int error;
1808 
1809 	for (i = 0; !of_parse_phandle_with_args(np, "clocks", "#clock-cells", i, &clkspec); i++) {
1810 		if (!rzg2l_cpg_is_pm_clk(priv, &clkspec)) {
1811 			of_node_put(clkspec.np);
1812 			continue;
1813 		}
1814 
1815 		if (once) {
1816 			once = false;
1817 			error = pm_clk_create(dev);
1818 			if (error) {
1819 				of_node_put(clkspec.np);
1820 				goto err;
1821 			}
1822 		}
1823 		clk = of_clk_get_from_provider(&clkspec);
1824 		of_node_put(clkspec.np);
1825 		if (IS_ERR(clk)) {
1826 			error = PTR_ERR(clk);
1827 			goto fail_destroy;
1828 		}
1829 
1830 		error = pm_clk_add_clk(dev, clk);
1831 		if (error) {
1832 			dev_err(dev, "pm_clk_add_clk failed %d\n", error);
1833 			goto fail_put;
1834 		}
1835 	}
1836 
1837 	return 0;
1838 
1839 fail_put:
1840 	clk_put(clk);
1841 
1842 fail_destroy:
1843 	pm_clk_destroy(dev);
1844 err:
1845 	return error;
1846 }
1847 
rzg2l_cpg_detach_dev(struct generic_pm_domain * unused,struct device * dev)1848 static void rzg2l_cpg_detach_dev(struct generic_pm_domain *unused, struct device *dev)
1849 {
1850 	if (!pm_clk_no_clocks(dev))
1851 		pm_clk_destroy(dev);
1852 }
1853 
rzg2l_cpg_genpd_remove(void * data)1854 static void rzg2l_cpg_genpd_remove(void *data)
1855 {
1856 	pm_genpd_remove(data);
1857 }
1858 
rzg2l_cpg_add_clk_domain(struct rzg2l_cpg_priv * priv)1859 static int __init rzg2l_cpg_add_clk_domain(struct rzg2l_cpg_priv *priv)
1860 {
1861 	struct device *dev = priv->dev;
1862 	struct device_node *np = dev->of_node;
1863 	struct generic_pm_domain *genpd = &priv->genpd;
1864 	int ret;
1865 
1866 	genpd->name = np->name;
1867 	genpd->flags = GENPD_FLAG_PM_CLK | GENPD_FLAG_ALWAYS_ON |
1868 		       GENPD_FLAG_ACTIVE_WAKEUP;
1869 	genpd->attach_dev = rzg2l_cpg_attach_dev;
1870 	genpd->detach_dev = rzg2l_cpg_detach_dev;
1871 	ret = pm_genpd_init(genpd, &pm_domain_always_on_gov, false);
1872 	if (ret)
1873 		return ret;
1874 
1875 	ret = devm_add_action_or_reset(dev, rzg2l_cpg_genpd_remove, genpd);
1876 	if (ret)
1877 		return ret;
1878 
1879 	return of_genpd_add_provider_simple(np, genpd);
1880 }
1881 
rzg2l_cpg_probe(struct platform_device * pdev)1882 static int __init rzg2l_cpg_probe(struct platform_device *pdev)
1883 {
1884 	struct device *dev = &pdev->dev;
1885 	struct device_node *np = dev->of_node;
1886 	const struct rzg2l_cpg_info *info;
1887 	struct rzg2l_cpg_priv *priv;
1888 	unsigned int nclks, i;
1889 	struct clk **clks;
1890 	int error;
1891 
1892 	info = of_device_get_match_data(dev);
1893 
1894 	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
1895 	if (!priv)
1896 		return -ENOMEM;
1897 
1898 	priv->dev = dev;
1899 	priv->info = info;
1900 	spin_lock_init(&priv->rmw_lock);
1901 
1902 	priv->base = devm_platform_ioremap_resource(pdev, 0);
1903 	if (IS_ERR(priv->base))
1904 		return PTR_ERR(priv->base);
1905 
1906 	nclks = info->num_total_core_clks + info->num_hw_mod_clks;
1907 	clks = devm_kmalloc_array(dev, nclks, sizeof(*clks), GFP_KERNEL);
1908 	if (!clks)
1909 		return -ENOMEM;
1910 
1911 	dev_set_drvdata(dev, priv);
1912 	priv->clks = clks;
1913 	priv->num_core_clks = info->num_total_core_clks;
1914 	priv->num_mod_clks = info->num_hw_mod_clks;
1915 	priv->num_resets = info->num_resets;
1916 	priv->last_dt_core_clk = info->last_dt_core_clk;
1917 
1918 	for (i = 0; i < nclks; i++)
1919 		clks[i] = ERR_PTR(-ENOENT);
1920 
1921 	for (i = 0; i < info->num_core_clks; i++)
1922 		rzg2l_cpg_register_core_clk(&info->core_clks[i], info, priv);
1923 
1924 	for (i = 0; i < info->num_mod_clks; i++)
1925 		rzg2l_cpg_register_mod_clk(&info->mod_clks[i], info, priv);
1926 
1927 	/*
1928 	 * Initialize MSTOP after all the clocks were registered to avoid
1929 	 * invalid reference counting when multiple clocks (critical,
1930 	 * non-critical) share the same MSTOP.
1931 	 */
1932 	rzg2l_mod_clock_init_mstop(priv);
1933 
1934 	error = of_clk_add_provider(np, rzg2l_cpg_clk_src_twocell_get, priv);
1935 	if (error)
1936 		return error;
1937 
1938 	error = devm_add_action_or_reset(dev, rzg2l_cpg_del_clk_provider, np);
1939 	if (error)
1940 		return error;
1941 
1942 	error = rzg2l_cpg_add_clk_domain(priv);
1943 	if (error)
1944 		return error;
1945 
1946 	error = rzg2l_cpg_reset_controller_register(priv);
1947 	if (error)
1948 		return error;
1949 
1950 	debugfs_create_file("mstop", 0444, NULL, priv, &rzg2l_mod_clock_mstop_fops);
1951 	return 0;
1952 }
1953 
rzg2l_cpg_resume(struct device * dev)1954 static int rzg2l_cpg_resume(struct device *dev)
1955 {
1956 	struct rzg2l_cpg_priv *priv = dev_get_drvdata(dev);
1957 
1958 	rzg2l_mod_clock_init_mstop(priv);
1959 
1960 	return 0;
1961 }
1962 
1963 static const struct dev_pm_ops rzg2l_cpg_pm_ops = {
1964 	NOIRQ_SYSTEM_SLEEP_PM_OPS(NULL, rzg2l_cpg_resume)
1965 };
1966 
1967 static const struct of_device_id rzg2l_cpg_match[] = {
1968 #ifdef CONFIG_CLK_R9A07G043
1969 	{
1970 		.compatible = "renesas,r9a07g043-cpg",
1971 		.data = &r9a07g043_cpg_info,
1972 	},
1973 #endif
1974 #ifdef CONFIG_CLK_R9A07G044
1975 	{
1976 		.compatible = "renesas,r9a07g044-cpg",
1977 		.data = &r9a07g044_cpg_info,
1978 	},
1979 #endif
1980 #ifdef CONFIG_CLK_R9A07G054
1981 	{
1982 		.compatible = "renesas,r9a07g054-cpg",
1983 		.data = &r9a07g054_cpg_info,
1984 	},
1985 #endif
1986 #ifdef CONFIG_CLK_R9A08G045
1987 	{
1988 		.compatible = "renesas,r9a08g045-cpg",
1989 		.data = &r9a08g045_cpg_info,
1990 	},
1991 #endif
1992 #ifdef CONFIG_CLK_R9A09G011
1993 	{
1994 		.compatible = "renesas,r9a09g011-cpg",
1995 		.data = &r9a09g011_cpg_info,
1996 	},
1997 #endif
1998 	{ /* sentinel */ }
1999 };
2000 
2001 static struct platform_driver rzg2l_cpg_driver = {
2002 	.driver		= {
2003 		.name	= "rzg2l-cpg",
2004 		.of_match_table = rzg2l_cpg_match,
2005 		.pm	= pm_sleep_ptr(&rzg2l_cpg_pm_ops),
2006 	},
2007 };
2008 
rzg2l_cpg_init(void)2009 static int __init rzg2l_cpg_init(void)
2010 {
2011 	return platform_driver_probe(&rzg2l_cpg_driver, rzg2l_cpg_probe);
2012 }
2013 
2014 subsys_initcall(rzg2l_cpg_init);
2015 
2016 MODULE_DESCRIPTION("Renesas RZ/G2L CPG Driver");
2017