1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Renesas Clock Pulse Generator / Module Standby and Software Reset
4  *
5  * Copyright (C) 2015 Glider bvba
6  *
7  * Based on clk-mstp.c, clk-rcar-gen2.c, and clk-rcar-gen3.c
8  *
9  * Copyright (C) 2013 Ideas On Board SPRL
10  * Copyright (C) 2015 Renesas Electronics Corp.
11  */
12 
13 #include <linux/clk.h>
14 #include <linux/clk-provider.h>
15 #include <linux/clk/renesas.h>
16 #include <linux/delay.h>
17 #include <linux/device.h>
18 #include <linux/init.h>
19 #include <linux/io.h>
20 #include <linux/iopoll.h>
21 #include <linux/mod_devicetable.h>
22 #include <linux/module.h>
23 #include <linux/of_address.h>
24 #include <linux/platform_device.h>
25 #include <linux/pm_clock.h>
26 #include <linux/pm_domain.h>
27 #include <linux/psci.h>
28 #include <linux/reset-controller.h>
29 #include <linux/slab.h>
30 
31 #include <dt-bindings/clock/renesas-cpg-mssr.h>
32 
33 #include "renesas-cpg-mssr.h"
34 #include "clk-div6.h"
35 
36 #ifdef DEBUG
37 #define WARN_DEBUG(x)	WARN_ON(x)
38 #else
39 #define WARN_DEBUG(x)	do { } while (0)
40 #endif
41 
42 /*
43  * Module Standby and Software Reset register offets.
44  *
45  * If the registers exist, these are valid for SH-Mobile, R-Mobile,
46  * R-Car Gen2, R-Car Gen3, and RZ/G1.
47  * These are NOT valid for R-Car Gen1 and RZ/A1!
48  */
49 
50 /*
51  * Module Stop Status Register offsets
52  */
53 
54 static const u16 mstpsr[] = {
55 	0x030, 0x038, 0x040, 0x048, 0x04C, 0x03C, 0x1C0, 0x1C4,
56 	0x9A0, 0x9A4, 0x9A8, 0x9AC,
57 };
58 
59 static const u16 mstpsr_for_gen4[] = {
60 	0x2E00, 0x2E04, 0x2E08, 0x2E0C, 0x2E10, 0x2E14, 0x2E18, 0x2E1C,
61 	0x2E20, 0x2E24, 0x2E28, 0x2E2C, 0x2E30, 0x2E34, 0x2E38, 0x2E3C,
62 	0x2E40, 0x2E44, 0x2E48, 0x2E4C, 0x2E50, 0x2E54, 0x2E58, 0x2E5C,
63 	0x2E60, 0x2E64, 0x2E68, 0x2E6C, 0x2E70, 0x2E74,
64 };
65 
66 /*
67  * System Module Stop Control Register offsets
68  */
69 
70 static const u16 smstpcr[] = {
71 	0x130, 0x134, 0x138, 0x13C, 0x140, 0x144, 0x148, 0x14C,
72 	0x990, 0x994, 0x998, 0x99C,
73 };
74 
75 static const u16 mstpcr_for_gen4[] = {
76 	0x2D00, 0x2D04, 0x2D08, 0x2D0C, 0x2D10, 0x2D14, 0x2D18, 0x2D1C,
77 	0x2D20, 0x2D24, 0x2D28, 0x2D2C, 0x2D30, 0x2D34, 0x2D38, 0x2D3C,
78 	0x2D40, 0x2D44, 0x2D48, 0x2D4C, 0x2D50, 0x2D54, 0x2D58, 0x2D5C,
79 	0x2D60, 0x2D64, 0x2D68, 0x2D6C, 0x2D70, 0x2D74,
80 };
81 
82 /*
83  * Standby Control Register offsets (RZ/A)
84  * Base address is FRQCR register
85  */
86 
87 static const u16 stbcr[] = {
88 	0xFFFF/*dummy*/, 0x010, 0x014, 0x410, 0x414, 0x418, 0x41C, 0x420,
89 	0x424, 0x428, 0x42C,
90 };
91 
92 /*
93  * Software Reset Register offsets
94  */
95 
96 static const u16 srcr[] = {
97 	0x0A0, 0x0A8, 0x0B0, 0x0B8, 0x0BC, 0x0C4, 0x1C8, 0x1CC,
98 	0x920, 0x924, 0x928, 0x92C,
99 };
100 
101 static const u16 srcr_for_gen4[] = {
102 	0x2C00, 0x2C04, 0x2C08, 0x2C0C, 0x2C10, 0x2C14, 0x2C18, 0x2C1C,
103 	0x2C20, 0x2C24, 0x2C28, 0x2C2C, 0x2C30, 0x2C34, 0x2C38, 0x2C3C,
104 	0x2C40, 0x2C44, 0x2C48, 0x2C4C, 0x2C50, 0x2C54, 0x2C58, 0x2C5C,
105 	0x2C60, 0x2C64, 0x2C68, 0x2C6C, 0x2C70, 0x2C74,
106 };
107 
108 /*
109  * Software Reset Clearing Register offsets
110  */
111 
112 static const u16 srstclr[] = {
113 	0x940, 0x944, 0x948, 0x94C, 0x950, 0x954, 0x958, 0x95C,
114 	0x960, 0x964, 0x968, 0x96C,
115 };
116 
117 static const u16 srstclr_for_gen4[] = {
118 	0x2C80, 0x2C84, 0x2C88, 0x2C8C, 0x2C90, 0x2C94, 0x2C98, 0x2C9C,
119 	0x2CA0, 0x2CA4, 0x2CA8, 0x2CAC, 0x2CB0, 0x2CB4, 0x2CB8, 0x2CBC,
120 	0x2CC0, 0x2CC4, 0x2CC8, 0x2CCC, 0x2CD0, 0x2CD4, 0x2CD8, 0x2CDC,
121 	0x2CE0, 0x2CE4, 0x2CE8, 0x2CEC, 0x2CF0, 0x2CF4,
122 };
123 
124 /**
125  * struct cpg_mssr_priv - Clock Pulse Generator / Module Standby
126  *                        and Software Reset Private Data
127  *
128  * @rcdev: Optional reset controller entity
129  * @dev: CPG/MSSR device
130  * @base: CPG/MSSR register block base address
131  * @reg_layout: CPG/MSSR register layout
132  * @rmw_lock: protects RMW register accesses
133  * @np: Device node in DT for this CPG/MSSR module
134  * @num_core_clks: Number of Core Clocks in clks[]
135  * @num_mod_clks: Number of Module Clocks in clks[]
136  * @last_dt_core_clk: ID of the last Core Clock exported to DT
137  * @notifiers: Notifier chain to save/restore clock state for system resume
138  * @status_regs: Pointer to status registers array
139  * @control_regs: Pointer to control registers array
140  * @reset_regs: Pointer to reset registers array
141  * @reset_clear_regs:  Pointer to reset clearing registers array
142  * @smstpcr_saved: [].mask: Mask of SMSTPCR[] bits under our control
143  *                 [].val: Saved values of SMSTPCR[]
144  * @reserved_ids: Temporary used, reserved id list
145  * @num_reserved_ids: Temporary used, number of reserved id list
146  * @clks: Array containing all Core and Module Clocks
147  */
148 struct cpg_mssr_priv {
149 #ifdef CONFIG_RESET_CONTROLLER
150 	struct reset_controller_dev rcdev;
151 #endif
152 	struct device *dev;
153 	void __iomem *base;
154 	enum clk_reg_layout reg_layout;
155 	spinlock_t rmw_lock;
156 	struct device_node *np;
157 
158 	unsigned int num_core_clks;
159 	unsigned int num_mod_clks;
160 	unsigned int last_dt_core_clk;
161 
162 	struct raw_notifier_head notifiers;
163 	const u16 *status_regs;
164 	const u16 *control_regs;
165 	const u16 *reset_regs;
166 	const u16 *reset_clear_regs;
167 	struct {
168 		u32 mask;
169 		u32 val;
170 	} smstpcr_saved[ARRAY_SIZE(mstpsr_for_gen4)];
171 
172 	unsigned int *reserved_ids;
173 	unsigned int num_reserved_ids;
174 
175 	struct clk *clks[];
176 };
177 
178 static struct cpg_mssr_priv *cpg_mssr_priv;
179 
180 /**
181  * struct mstp_clock - MSTP gating clock
182  * @hw: handle between common and hardware-specific interfaces
183  * @index: MSTP clock number
184  * @priv: CPG/MSSR private data
185  */
186 struct mstp_clock {
187 	struct clk_hw hw;
188 	u32 index;
189 	struct cpg_mssr_priv *priv;
190 };
191 
192 #define to_mstp_clock(_hw) container_of(_hw, struct mstp_clock, hw)
193 
cpg_mstp_clock_endisable(struct clk_hw * hw,bool enable)194 static int cpg_mstp_clock_endisable(struct clk_hw *hw, bool enable)
195 {
196 	struct mstp_clock *clock = to_mstp_clock(hw);
197 	struct cpg_mssr_priv *priv = clock->priv;
198 	unsigned int reg = clock->index / 32;
199 	unsigned int bit = clock->index % 32;
200 	struct device *dev = priv->dev;
201 	u32 bitmask = BIT(bit);
202 	unsigned long flags;
203 	u32 value;
204 	int error;
205 
206 	dev_dbg(dev, "MSTP %u%02u/%pC %s\n", reg, bit, hw->clk,
207 		enable ? "ON" : "OFF");
208 	spin_lock_irqsave(&priv->rmw_lock, flags);
209 
210 	if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A) {
211 		value = readb(priv->base + priv->control_regs[reg]);
212 		if (enable)
213 			value &= ~bitmask;
214 		else
215 			value |= bitmask;
216 		writeb(value, priv->base + priv->control_regs[reg]);
217 
218 		/* dummy read to ensure write has completed */
219 		readb(priv->base + priv->control_regs[reg]);
220 		barrier_data(priv->base + priv->control_regs[reg]);
221 	} else {
222 		value = readl(priv->base + priv->control_regs[reg]);
223 		if (enable)
224 			value &= ~bitmask;
225 		else
226 			value |= bitmask;
227 		writel(value, priv->base + priv->control_regs[reg]);
228 	}
229 
230 	spin_unlock_irqrestore(&priv->rmw_lock, flags);
231 
232 	if (!enable || priv->reg_layout == CLK_REG_LAYOUT_RZ_A)
233 		return 0;
234 
235 	error = readl_poll_timeout_atomic(priv->base + priv->status_regs[reg],
236 					  value, !(value & bitmask), 0, 10);
237 	if (error)
238 		dev_err(dev, "Failed to enable SMSTP %p[%d]\n",
239 			priv->base + priv->control_regs[reg], bit);
240 
241 	return error;
242 }
243 
cpg_mstp_clock_enable(struct clk_hw * hw)244 static int cpg_mstp_clock_enable(struct clk_hw *hw)
245 {
246 	return cpg_mstp_clock_endisable(hw, true);
247 }
248 
cpg_mstp_clock_disable(struct clk_hw * hw)249 static void cpg_mstp_clock_disable(struct clk_hw *hw)
250 {
251 	cpg_mstp_clock_endisable(hw, false);
252 }
253 
cpg_mstp_clock_is_enabled(struct clk_hw * hw)254 static int cpg_mstp_clock_is_enabled(struct clk_hw *hw)
255 {
256 	struct mstp_clock *clock = to_mstp_clock(hw);
257 	struct cpg_mssr_priv *priv = clock->priv;
258 	u32 value;
259 
260 	if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A)
261 		value = readb(priv->base + priv->control_regs[clock->index / 32]);
262 	else
263 		value = readl(priv->base + priv->status_regs[clock->index / 32]);
264 
265 	return !(value & BIT(clock->index % 32));
266 }
267 
268 static const struct clk_ops cpg_mstp_clock_ops = {
269 	.enable = cpg_mstp_clock_enable,
270 	.disable = cpg_mstp_clock_disable,
271 	.is_enabled = cpg_mstp_clock_is_enabled,
272 };
273 
274 static
cpg_mssr_clk_src_twocell_get(struct of_phandle_args * clkspec,void * data)275 struct clk *cpg_mssr_clk_src_twocell_get(struct of_phandle_args *clkspec,
276 					 void *data)
277 {
278 	unsigned int clkidx = clkspec->args[1];
279 	struct cpg_mssr_priv *priv = data;
280 	struct device *dev = priv->dev;
281 	unsigned int idx;
282 	const char *type;
283 	struct clk *clk;
284 	int range_check;
285 
286 	switch (clkspec->args[0]) {
287 	case CPG_CORE:
288 		type = "core";
289 		if (clkidx > priv->last_dt_core_clk) {
290 			dev_err(dev, "Invalid %s clock index %u\n", type,
291 			       clkidx);
292 			return ERR_PTR(-EINVAL);
293 		}
294 		clk = priv->clks[clkidx];
295 		break;
296 
297 	case CPG_MOD:
298 		type = "module";
299 		if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A) {
300 			idx = MOD_CLK_PACK_10(clkidx);
301 			range_check = 7 - (clkidx % 10);
302 		} else {
303 			idx = MOD_CLK_PACK(clkidx);
304 			range_check = 31 - (clkidx % 100);
305 		}
306 		if (range_check < 0 || idx >= priv->num_mod_clks) {
307 			dev_err(dev, "Invalid %s clock index %u\n", type,
308 				clkidx);
309 			return ERR_PTR(-EINVAL);
310 		}
311 		clk = priv->clks[priv->num_core_clks + idx];
312 		break;
313 
314 	default:
315 		dev_err(dev, "Invalid CPG clock type %u\n", clkspec->args[0]);
316 		return ERR_PTR(-EINVAL);
317 	}
318 
319 	if (IS_ERR(clk))
320 		dev_err(dev, "Cannot get %s clock %u: %ld", type, clkidx,
321 		       PTR_ERR(clk));
322 	else
323 		dev_dbg(dev, "clock (%u, %u) is %pC at %lu Hz\n",
324 			clkspec->args[0], clkspec->args[1], clk,
325 			clk_get_rate(clk));
326 	return clk;
327 }
328 
cpg_mssr_register_core_clk(const struct cpg_core_clk * core,const struct cpg_mssr_info * info,struct cpg_mssr_priv * priv)329 static void __init cpg_mssr_register_core_clk(const struct cpg_core_clk *core,
330 					      const struct cpg_mssr_info *info,
331 					      struct cpg_mssr_priv *priv)
332 {
333 	struct clk *clk = ERR_PTR(-ENOTSUPP), *parent;
334 	struct device *dev = priv->dev;
335 	unsigned int id = core->id, div = core->div;
336 	const char *parent_name;
337 
338 	WARN_DEBUG(id >= priv->num_core_clks);
339 	WARN_DEBUG(PTR_ERR(priv->clks[id]) != -ENOENT);
340 
341 	switch (core->type) {
342 	case CLK_TYPE_IN:
343 		clk = of_clk_get_by_name(priv->np, core->name);
344 		break;
345 
346 	case CLK_TYPE_FF:
347 	case CLK_TYPE_DIV6P1:
348 	case CLK_TYPE_DIV6_RO:
349 		WARN_DEBUG(core->parent >= priv->num_core_clks);
350 		parent = priv->clks[core->parent];
351 		if (IS_ERR(parent)) {
352 			clk = parent;
353 			goto fail;
354 		}
355 
356 		parent_name = __clk_get_name(parent);
357 
358 		if (core->type == CLK_TYPE_DIV6_RO)
359 			/* Multiply with the DIV6 register value */
360 			div *= (readl(priv->base + core->offset) & 0x3f) + 1;
361 
362 		if (core->type == CLK_TYPE_DIV6P1) {
363 			clk = cpg_div6_register(core->name, 1, &parent_name,
364 						priv->base + core->offset,
365 						&priv->notifiers);
366 		} else {
367 			clk = clk_register_fixed_factor(NULL, core->name,
368 							parent_name, 0,
369 							core->mult, div);
370 		}
371 		break;
372 
373 	case CLK_TYPE_FR:
374 		clk = clk_register_fixed_rate(NULL, core->name, NULL, 0,
375 					      core->mult);
376 		break;
377 
378 	default:
379 		if (info->cpg_clk_register)
380 			clk = info->cpg_clk_register(dev, core, info,
381 						     priv->clks, priv->base,
382 						     &priv->notifiers);
383 		else
384 			dev_err(dev, "%s has unsupported core clock type %u\n",
385 				core->name, core->type);
386 		break;
387 	}
388 
389 	if (IS_ERR_OR_NULL(clk))
390 		goto fail;
391 
392 	dev_dbg(dev, "Core clock %pC at %lu Hz\n", clk, clk_get_rate(clk));
393 	priv->clks[id] = clk;
394 	return;
395 
396 fail:
397 	dev_err(dev, "Failed to register %s clock %s: %ld\n", "core",
398 		core->name, PTR_ERR(clk));
399 }
400 
cpg_mssr_register_mod_clk(const struct mssr_mod_clk * mod,const struct cpg_mssr_info * info,struct cpg_mssr_priv * priv)401 static void __init cpg_mssr_register_mod_clk(const struct mssr_mod_clk *mod,
402 					     const struct cpg_mssr_info *info,
403 					     struct cpg_mssr_priv *priv)
404 {
405 	struct mstp_clock *clock = NULL;
406 	struct device *dev = priv->dev;
407 	unsigned int id = mod->id;
408 	struct clk_init_data init = {};
409 	struct clk *parent, *clk;
410 	const char *parent_name;
411 	unsigned int i;
412 
413 	WARN_DEBUG(id < priv->num_core_clks);
414 	WARN_DEBUG(id >= priv->num_core_clks + priv->num_mod_clks);
415 	WARN_DEBUG(mod->parent >= priv->num_core_clks + priv->num_mod_clks);
416 	WARN_DEBUG(PTR_ERR(priv->clks[id]) != -ENOENT);
417 
418 	if (!mod->name) {
419 		/* Skip NULLified clock */
420 		return;
421 	}
422 
423 	parent = priv->clks[mod->parent];
424 	if (IS_ERR(parent)) {
425 		clk = parent;
426 		goto fail;
427 	}
428 
429 	clock = kzalloc(sizeof(*clock), GFP_KERNEL);
430 	if (!clock) {
431 		clk = ERR_PTR(-ENOMEM);
432 		goto fail;
433 	}
434 
435 	init.name = mod->name;
436 	init.ops = &cpg_mstp_clock_ops;
437 	init.flags = CLK_SET_RATE_PARENT;
438 	parent_name = __clk_get_name(parent);
439 	init.parent_names = &parent_name;
440 	init.num_parents = 1;
441 
442 	clock->index = id - priv->num_core_clks;
443 	clock->priv = priv;
444 	clock->hw.init = &init;
445 
446 	for (i = 0; i < info->num_crit_mod_clks; i++)
447 		if (id == info->crit_mod_clks[i] &&
448 		    cpg_mstp_clock_is_enabled(&clock->hw)) {
449 			dev_dbg(dev, "MSTP %s setting CLK_IS_CRITICAL\n",
450 				mod->name);
451 			init.flags |= CLK_IS_CRITICAL;
452 			break;
453 		}
454 
455 	/*
456 	 * Ignore reserved device.
457 	 * see
458 	 *	cpg_mssr_reserved_init()
459 	 */
460 	for (i = 0; i < priv->num_reserved_ids; i++) {
461 		if (id == priv->reserved_ids[i]) {
462 			dev_info(dev, "Ignore Linux non-assigned mod (%s)\n", mod->name);
463 			init.flags |= CLK_IGNORE_UNUSED;
464 			break;
465 		}
466 	}
467 
468 	clk = clk_register(NULL, &clock->hw);
469 	if (IS_ERR(clk))
470 		goto fail;
471 
472 	dev_dbg(dev, "Module clock %pC at %lu Hz\n", clk, clk_get_rate(clk));
473 	priv->clks[id] = clk;
474 	priv->smstpcr_saved[clock->index / 32].mask |= BIT(clock->index % 32);
475 	return;
476 
477 fail:
478 	dev_err(dev, "Failed to register %s clock %s: %ld\n", "module",
479 		mod->name, PTR_ERR(clk));
480 	kfree(clock);
481 }
482 
483 struct cpg_mssr_clk_domain {
484 	struct generic_pm_domain genpd;
485 	unsigned int num_core_pm_clks;
486 	unsigned int core_pm_clks[];
487 };
488 
489 static struct cpg_mssr_clk_domain *cpg_mssr_clk_domain;
490 
cpg_mssr_is_pm_clk(const struct of_phandle_args * clkspec,struct cpg_mssr_clk_domain * pd)491 static bool cpg_mssr_is_pm_clk(const struct of_phandle_args *clkspec,
492 			       struct cpg_mssr_clk_domain *pd)
493 {
494 	unsigned int i;
495 
496 	if (clkspec->np != pd->genpd.dev.of_node || clkspec->args_count != 2)
497 		return false;
498 
499 	switch (clkspec->args[0]) {
500 	case CPG_CORE:
501 		for (i = 0; i < pd->num_core_pm_clks; i++)
502 			if (clkspec->args[1] == pd->core_pm_clks[i])
503 				return true;
504 		return false;
505 
506 	case CPG_MOD:
507 		return true;
508 
509 	default:
510 		return false;
511 	}
512 }
513 
cpg_mssr_attach_dev(struct generic_pm_domain * unused,struct device * dev)514 int cpg_mssr_attach_dev(struct generic_pm_domain *unused, struct device *dev)
515 {
516 	struct cpg_mssr_clk_domain *pd = cpg_mssr_clk_domain;
517 	struct device_node *np = dev->of_node;
518 	struct of_phandle_args clkspec;
519 	struct clk *clk;
520 	int i = 0;
521 	int error;
522 
523 	if (!pd) {
524 		dev_dbg(dev, "CPG/MSSR clock domain not yet available\n");
525 		return -EPROBE_DEFER;
526 	}
527 
528 	while (!of_parse_phandle_with_args(np, "clocks", "#clock-cells", i,
529 					   &clkspec)) {
530 		if (cpg_mssr_is_pm_clk(&clkspec, pd))
531 			goto found;
532 
533 		of_node_put(clkspec.np);
534 		i++;
535 	}
536 
537 	return 0;
538 
539 found:
540 	clk = of_clk_get_from_provider(&clkspec);
541 	of_node_put(clkspec.np);
542 
543 	if (IS_ERR(clk))
544 		return PTR_ERR(clk);
545 
546 	error = pm_clk_create(dev);
547 	if (error)
548 		goto fail_put;
549 
550 	error = pm_clk_add_clk(dev, clk);
551 	if (error)
552 		goto fail_destroy;
553 
554 	return 0;
555 
556 fail_destroy:
557 	pm_clk_destroy(dev);
558 fail_put:
559 	clk_put(clk);
560 	return error;
561 }
562 
cpg_mssr_detach_dev(struct generic_pm_domain * unused,struct device * dev)563 void cpg_mssr_detach_dev(struct generic_pm_domain *unused, struct device *dev)
564 {
565 	if (!pm_clk_no_clocks(dev))
566 		pm_clk_destroy(dev);
567 }
568 
cpg_mssr_genpd_remove(void * data)569 static void cpg_mssr_genpd_remove(void *data)
570 {
571 	pm_genpd_remove(data);
572 }
573 
cpg_mssr_add_clk_domain(struct device * dev,const unsigned int * core_pm_clks,unsigned int num_core_pm_clks)574 static int __init cpg_mssr_add_clk_domain(struct device *dev,
575 					  const unsigned int *core_pm_clks,
576 					  unsigned int num_core_pm_clks)
577 {
578 	struct device_node *np = dev->of_node;
579 	struct generic_pm_domain *genpd;
580 	struct cpg_mssr_clk_domain *pd;
581 	size_t pm_size = num_core_pm_clks * sizeof(core_pm_clks[0]);
582 	int ret;
583 
584 	pd = devm_kzalloc(dev, sizeof(*pd) + pm_size, GFP_KERNEL);
585 	if (!pd)
586 		return -ENOMEM;
587 
588 	pd->num_core_pm_clks = num_core_pm_clks;
589 	memcpy(pd->core_pm_clks, core_pm_clks, pm_size);
590 
591 	genpd = &pd->genpd;
592 	genpd->name = np->name;
593 	genpd->flags = GENPD_FLAG_PM_CLK | GENPD_FLAG_ALWAYS_ON |
594 		       GENPD_FLAG_ACTIVE_WAKEUP;
595 	genpd->attach_dev = cpg_mssr_attach_dev;
596 	genpd->detach_dev = cpg_mssr_detach_dev;
597 	ret = pm_genpd_init(genpd, &pm_domain_always_on_gov, false);
598 	if (ret)
599 		return ret;
600 
601 	ret = devm_add_action_or_reset(dev, cpg_mssr_genpd_remove, genpd);
602 	if (ret)
603 		return ret;
604 
605 	cpg_mssr_clk_domain = pd;
606 
607 	return of_genpd_add_provider_simple(np, genpd);
608 }
609 
610 #ifdef CONFIG_RESET_CONTROLLER
611 
612 #define rcdev_to_priv(x)	container_of(x, struct cpg_mssr_priv, rcdev)
613 
cpg_mssr_reset(struct reset_controller_dev * rcdev,unsigned long id)614 static int cpg_mssr_reset(struct reset_controller_dev *rcdev,
615 			  unsigned long id)
616 {
617 	struct cpg_mssr_priv *priv = rcdev_to_priv(rcdev);
618 	unsigned int reg = id / 32;
619 	unsigned int bit = id % 32;
620 	u32 bitmask = BIT(bit);
621 
622 	dev_dbg(priv->dev, "reset %u%02u\n", reg, bit);
623 
624 	/* Reset module */
625 	writel(bitmask, priv->base + priv->reset_regs[reg]);
626 
627 	/* Wait for at least one cycle of the RCLK clock (@ ca. 32 kHz) */
628 	udelay(35);
629 
630 	/* Release module from reset state */
631 	writel(bitmask, priv->base + priv->reset_clear_regs[reg]);
632 
633 	return 0;
634 }
635 
cpg_mssr_assert(struct reset_controller_dev * rcdev,unsigned long id)636 static int cpg_mssr_assert(struct reset_controller_dev *rcdev, unsigned long id)
637 {
638 	struct cpg_mssr_priv *priv = rcdev_to_priv(rcdev);
639 	unsigned int reg = id / 32;
640 	unsigned int bit = id % 32;
641 	u32 bitmask = BIT(bit);
642 
643 	dev_dbg(priv->dev, "assert %u%02u\n", reg, bit);
644 
645 	writel(bitmask, priv->base + priv->reset_regs[reg]);
646 	return 0;
647 }
648 
cpg_mssr_deassert(struct reset_controller_dev * rcdev,unsigned long id)649 static int cpg_mssr_deassert(struct reset_controller_dev *rcdev,
650 			     unsigned long id)
651 {
652 	struct cpg_mssr_priv *priv = rcdev_to_priv(rcdev);
653 	unsigned int reg = id / 32;
654 	unsigned int bit = id % 32;
655 	u32 bitmask = BIT(bit);
656 
657 	dev_dbg(priv->dev, "deassert %u%02u\n", reg, bit);
658 
659 	writel(bitmask, priv->base + priv->reset_clear_regs[reg]);
660 	return 0;
661 }
662 
cpg_mssr_status(struct reset_controller_dev * rcdev,unsigned long id)663 static int cpg_mssr_status(struct reset_controller_dev *rcdev,
664 			   unsigned long id)
665 {
666 	struct cpg_mssr_priv *priv = rcdev_to_priv(rcdev);
667 	unsigned int reg = id / 32;
668 	unsigned int bit = id % 32;
669 	u32 bitmask = BIT(bit);
670 
671 	return !!(readl(priv->base + priv->reset_regs[reg]) & bitmask);
672 }
673 
674 static const struct reset_control_ops cpg_mssr_reset_ops = {
675 	.reset = cpg_mssr_reset,
676 	.assert = cpg_mssr_assert,
677 	.deassert = cpg_mssr_deassert,
678 	.status = cpg_mssr_status,
679 };
680 
cpg_mssr_reset_xlate(struct reset_controller_dev * rcdev,const struct of_phandle_args * reset_spec)681 static int cpg_mssr_reset_xlate(struct reset_controller_dev *rcdev,
682 				const struct of_phandle_args *reset_spec)
683 {
684 	struct cpg_mssr_priv *priv = rcdev_to_priv(rcdev);
685 	unsigned int unpacked = reset_spec->args[0];
686 	unsigned int idx = MOD_CLK_PACK(unpacked);
687 
688 	if (unpacked % 100 > 31 || idx >= rcdev->nr_resets) {
689 		dev_err(priv->dev, "Invalid reset index %u\n", unpacked);
690 		return -EINVAL;
691 	}
692 
693 	return idx;
694 }
695 
cpg_mssr_reset_controller_register(struct cpg_mssr_priv * priv)696 static int cpg_mssr_reset_controller_register(struct cpg_mssr_priv *priv)
697 {
698 	priv->rcdev.ops = &cpg_mssr_reset_ops;
699 	priv->rcdev.of_node = priv->dev->of_node;
700 	priv->rcdev.of_reset_n_cells = 1;
701 	priv->rcdev.of_xlate = cpg_mssr_reset_xlate;
702 	priv->rcdev.nr_resets = priv->num_mod_clks;
703 	return devm_reset_controller_register(priv->dev, &priv->rcdev);
704 }
705 
706 #else /* !CONFIG_RESET_CONTROLLER */
cpg_mssr_reset_controller_register(struct cpg_mssr_priv * priv)707 static inline int cpg_mssr_reset_controller_register(struct cpg_mssr_priv *priv)
708 {
709 	return 0;
710 }
711 #endif /* !CONFIG_RESET_CONTROLLER */
712 
713 static const struct of_device_id cpg_mssr_match[] = {
714 #ifdef CONFIG_CLK_R7S9210
715 	{
716 		.compatible = "renesas,r7s9210-cpg-mssr",
717 		.data = &r7s9210_cpg_mssr_info,
718 	},
719 #endif
720 #ifdef CONFIG_CLK_R8A7742
721 	{
722 		.compatible = "renesas,r8a7742-cpg-mssr",
723 		.data = &r8a7742_cpg_mssr_info,
724 	},
725 #endif
726 #ifdef CONFIG_CLK_R8A7743
727 	{
728 		.compatible = "renesas,r8a7743-cpg-mssr",
729 		.data = &r8a7743_cpg_mssr_info,
730 	},
731 	/* RZ/G1N is (almost) identical to RZ/G1M w.r.t. clocks. */
732 	{
733 		.compatible = "renesas,r8a7744-cpg-mssr",
734 		.data = &r8a7743_cpg_mssr_info,
735 	},
736 #endif
737 #ifdef CONFIG_CLK_R8A7745
738 	{
739 		.compatible = "renesas,r8a7745-cpg-mssr",
740 		.data = &r8a7745_cpg_mssr_info,
741 	},
742 #endif
743 #ifdef CONFIG_CLK_R8A77470
744 	{
745 		.compatible = "renesas,r8a77470-cpg-mssr",
746 		.data = &r8a77470_cpg_mssr_info,
747 	},
748 #endif
749 #ifdef CONFIG_CLK_R8A774A1
750 	{
751 		.compatible = "renesas,r8a774a1-cpg-mssr",
752 		.data = &r8a774a1_cpg_mssr_info,
753 	},
754 #endif
755 #ifdef CONFIG_CLK_R8A774B1
756 	{
757 		.compatible = "renesas,r8a774b1-cpg-mssr",
758 		.data = &r8a774b1_cpg_mssr_info,
759 	},
760 #endif
761 #ifdef CONFIG_CLK_R8A774C0
762 	{
763 		.compatible = "renesas,r8a774c0-cpg-mssr",
764 		.data = &r8a774c0_cpg_mssr_info,
765 	},
766 #endif
767 #ifdef CONFIG_CLK_R8A774E1
768 	{
769 		.compatible = "renesas,r8a774e1-cpg-mssr",
770 		.data = &r8a774e1_cpg_mssr_info,
771 	},
772 #endif
773 #ifdef CONFIG_CLK_R8A7790
774 	{
775 		.compatible = "renesas,r8a7790-cpg-mssr",
776 		.data = &r8a7790_cpg_mssr_info,
777 	},
778 #endif
779 #ifdef CONFIG_CLK_R8A7791
780 	{
781 		.compatible = "renesas,r8a7791-cpg-mssr",
782 		.data = &r8a7791_cpg_mssr_info,
783 	},
784 	/* R-Car M2-N is (almost) identical to R-Car M2-W w.r.t. clocks. */
785 	{
786 		.compatible = "renesas,r8a7793-cpg-mssr",
787 		.data = &r8a7791_cpg_mssr_info,
788 	},
789 #endif
790 #ifdef CONFIG_CLK_R8A7792
791 	{
792 		.compatible = "renesas,r8a7792-cpg-mssr",
793 		.data = &r8a7792_cpg_mssr_info,
794 	},
795 #endif
796 #ifdef CONFIG_CLK_R8A7794
797 	{
798 		.compatible = "renesas,r8a7794-cpg-mssr",
799 		.data = &r8a7794_cpg_mssr_info,
800 	},
801 #endif
802 #ifdef CONFIG_CLK_R8A7795
803 	{
804 		.compatible = "renesas,r8a7795-cpg-mssr",
805 		.data = &r8a7795_cpg_mssr_info,
806 	},
807 #endif
808 #ifdef CONFIG_CLK_R8A77960
809 	{
810 		.compatible = "renesas,r8a7796-cpg-mssr",
811 		.data = &r8a7796_cpg_mssr_info,
812 	},
813 #endif
814 #ifdef CONFIG_CLK_R8A77961
815 	{
816 		.compatible = "renesas,r8a77961-cpg-mssr",
817 		.data = &r8a7796_cpg_mssr_info,
818 	},
819 #endif
820 #ifdef CONFIG_CLK_R8A77965
821 	{
822 		.compatible = "renesas,r8a77965-cpg-mssr",
823 		.data = &r8a77965_cpg_mssr_info,
824 	},
825 #endif
826 #ifdef CONFIG_CLK_R8A77970
827 	{
828 		.compatible = "renesas,r8a77970-cpg-mssr",
829 		.data = &r8a77970_cpg_mssr_info,
830 	},
831 #endif
832 #ifdef CONFIG_CLK_R8A77980
833 	{
834 		.compatible = "renesas,r8a77980-cpg-mssr",
835 		.data = &r8a77980_cpg_mssr_info,
836 	},
837 #endif
838 #ifdef CONFIG_CLK_R8A77990
839 	{
840 		.compatible = "renesas,r8a77990-cpg-mssr",
841 		.data = &r8a77990_cpg_mssr_info,
842 	},
843 #endif
844 #ifdef CONFIG_CLK_R8A77995
845 	{
846 		.compatible = "renesas,r8a77995-cpg-mssr",
847 		.data = &r8a77995_cpg_mssr_info,
848 	},
849 #endif
850 #ifdef CONFIG_CLK_R8A779A0
851 	{
852 		.compatible = "renesas,r8a779a0-cpg-mssr",
853 		.data = &r8a779a0_cpg_mssr_info,
854 	},
855 #endif
856 #ifdef CONFIG_CLK_R8A779F0
857 	{
858 		.compatible = "renesas,r8a779f0-cpg-mssr",
859 		.data = &r8a779f0_cpg_mssr_info,
860 	},
861 #endif
862 #ifdef CONFIG_CLK_R8A779G0
863 	{
864 		.compatible = "renesas,r8a779g0-cpg-mssr",
865 		.data = &r8a779g0_cpg_mssr_info,
866 	},
867 #endif
868 #ifdef CONFIG_CLK_R8A779H0
869 	{
870 		.compatible = "renesas,r8a779h0-cpg-mssr",
871 		.data = &r8a779h0_cpg_mssr_info,
872 	},
873 #endif
874 	{ /* sentinel */ }
875 };
876 
cpg_mssr_del_clk_provider(void * data)877 static void cpg_mssr_del_clk_provider(void *data)
878 {
879 	of_clk_del_provider(data);
880 }
881 
882 #if defined(CONFIG_PM_SLEEP) && defined(CONFIG_ARM_PSCI_FW)
cpg_mssr_suspend_noirq(struct device * dev)883 static int cpg_mssr_suspend_noirq(struct device *dev)
884 {
885 	struct cpg_mssr_priv *priv = dev_get_drvdata(dev);
886 	unsigned int reg;
887 
888 	/* This is the best we can do to check for the presence of PSCI */
889 	if (!psci_ops.cpu_suspend)
890 		return 0;
891 
892 	/* Save module registers with bits under our control */
893 	for (reg = 0; reg < ARRAY_SIZE(priv->smstpcr_saved); reg++) {
894 		if (priv->smstpcr_saved[reg].mask)
895 			priv->smstpcr_saved[reg].val =
896 				priv->reg_layout == CLK_REG_LAYOUT_RZ_A ?
897 				readb(priv->base + priv->control_regs[reg]) :
898 				readl(priv->base + priv->control_regs[reg]);
899 	}
900 
901 	/* Save core clocks */
902 	raw_notifier_call_chain(&priv->notifiers, PM_EVENT_SUSPEND, NULL);
903 
904 	return 0;
905 }
906 
cpg_mssr_resume_noirq(struct device * dev)907 static int cpg_mssr_resume_noirq(struct device *dev)
908 {
909 	struct cpg_mssr_priv *priv = dev_get_drvdata(dev);
910 	unsigned int reg;
911 	u32 mask, oldval, newval;
912 	int error;
913 
914 	/* This is the best we can do to check for the presence of PSCI */
915 	if (!psci_ops.cpu_suspend)
916 		return 0;
917 
918 	/* Restore core clocks */
919 	raw_notifier_call_chain(&priv->notifiers, PM_EVENT_RESUME, NULL);
920 
921 	/* Restore module clocks */
922 	for (reg = 0; reg < ARRAY_SIZE(priv->smstpcr_saved); reg++) {
923 		mask = priv->smstpcr_saved[reg].mask;
924 		if (!mask)
925 			continue;
926 
927 		if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A)
928 			oldval = readb(priv->base + priv->control_regs[reg]);
929 		else
930 			oldval = readl(priv->base + priv->control_regs[reg]);
931 		newval = oldval & ~mask;
932 		newval |= priv->smstpcr_saved[reg].val & mask;
933 		if (newval == oldval)
934 			continue;
935 
936 		if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A) {
937 			writeb(newval, priv->base + priv->control_regs[reg]);
938 			/* dummy read to ensure write has completed */
939 			readb(priv->base + priv->control_regs[reg]);
940 			barrier_data(priv->base + priv->control_regs[reg]);
941 			continue;
942 		} else
943 			writel(newval, priv->base + priv->control_regs[reg]);
944 
945 		/* Wait until enabled clocks are really enabled */
946 		mask &= ~priv->smstpcr_saved[reg].val;
947 		if (!mask)
948 			continue;
949 
950 		error = readl_poll_timeout_atomic(priv->base + priv->status_regs[reg],
951 						oldval, !(oldval & mask), 0, 10);
952 		if (error)
953 			dev_warn(dev, "Failed to enable SMSTP%u[0x%x]\n", reg,
954 				 oldval & mask);
955 	}
956 
957 	return 0;
958 }
959 
960 static const struct dev_pm_ops cpg_mssr_pm = {
961 	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(cpg_mssr_suspend_noirq,
962 				      cpg_mssr_resume_noirq)
963 };
964 #define DEV_PM_OPS	&cpg_mssr_pm
965 #else
966 #define DEV_PM_OPS	NULL
967 #endif /* CONFIG_PM_SLEEP && CONFIG_ARM_PSCI_FW */
968 
cpg_mssr_reserved_exit(struct cpg_mssr_priv * priv)969 static void __init cpg_mssr_reserved_exit(struct cpg_mssr_priv *priv)
970 {
971 	kfree(priv->reserved_ids);
972 }
973 
cpg_mssr_reserved_init(struct cpg_mssr_priv * priv,const struct cpg_mssr_info * info)974 static int __init cpg_mssr_reserved_init(struct cpg_mssr_priv *priv,
975 					 const struct cpg_mssr_info *info)
976 {
977 	struct device_node *soc __free(device_node) = of_find_node_by_path("/soc");
978 	struct device_node *node;
979 	uint32_t args[MAX_PHANDLE_ARGS];
980 	unsigned int *ids = NULL;
981 	unsigned int num = 0;
982 
983 	/*
984 	 * Because clk_disable_unused() will disable all unused clocks, the device which is assigned
985 	 * to a non-Linux system will be disabled when Linux is booted.
986 	 *
987 	 * To avoid such situation, renesas-cpg-mssr assumes the device which has
988 	 * status = "reserved" is assigned to a non-Linux system, and adds CLK_IGNORE_UNUSED flag
989 	 * to its CPG_MOD clocks.
990 	 * see also
991 	 *	cpg_mssr_register_mod_clk()
992 	 *
993 	 *	scif5: serial@e6f30000 {
994 	 *		...
995 	 * =>		clocks = <&cpg CPG_MOD 202>,
996 	 *			 <&cpg CPG_CORE R8A7795_CLK_S3D1>,
997 	 *			 <&scif_clk>;
998 	 *			 ...
999 	 *		 status = "reserved";
1000 	 *	};
1001 	 */
1002 	for_each_reserved_child_of_node(soc, node) {
1003 		struct of_phandle_iterator it;
1004 		int rc;
1005 
1006 		of_for_each_phandle(&it, rc, node, "clocks", "#clock-cells", -1) {
1007 			int idx;
1008 
1009 			if (it.node != priv->np)
1010 				continue;
1011 
1012 			if (of_phandle_iterator_args(&it, args, MAX_PHANDLE_ARGS) != 2)
1013 				continue;
1014 
1015 			if (args[0] != CPG_MOD)
1016 				continue;
1017 
1018 			ids = krealloc_array(ids, (num + 1), sizeof(*ids), GFP_KERNEL);
1019 			if (!ids) {
1020 				of_node_put(it.node);
1021 				return -ENOMEM;
1022 			}
1023 
1024 			if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A)
1025 				idx = MOD_CLK_PACK_10(args[1]);	/* for DEF_MOD_STB() */
1026 			else
1027 				idx = MOD_CLK_PACK(args[1]);	/* for DEF_MOD() */
1028 
1029 			ids[num] = info->num_total_core_clks + idx;
1030 
1031 			num++;
1032 		}
1033 	}
1034 
1035 	priv->num_reserved_ids	= num;
1036 	priv->reserved_ids	= ids;
1037 
1038 	return 0;
1039 }
1040 
cpg_mssr_common_init(struct device * dev,struct device_node * np,const struct cpg_mssr_info * info)1041 static int __init cpg_mssr_common_init(struct device *dev,
1042 				       struct device_node *np,
1043 				       const struct cpg_mssr_info *info)
1044 {
1045 	struct cpg_mssr_priv *priv;
1046 	unsigned int nclks, i;
1047 	int error;
1048 
1049 	if (info->init) {
1050 		error = info->init(dev);
1051 		if (error)
1052 			return error;
1053 	}
1054 
1055 	nclks = info->num_total_core_clks + info->num_hw_mod_clks;
1056 	priv = kzalloc(struct_size(priv, clks, nclks), GFP_KERNEL);
1057 	if (!priv)
1058 		return -ENOMEM;
1059 
1060 	priv->np = np;
1061 	priv->dev = dev;
1062 	spin_lock_init(&priv->rmw_lock);
1063 
1064 	priv->base = of_iomap(np, 0);
1065 	if (!priv->base) {
1066 		error = -ENOMEM;
1067 		goto out_err;
1068 	}
1069 
1070 	priv->num_core_clks = info->num_total_core_clks;
1071 	priv->num_mod_clks = info->num_hw_mod_clks;
1072 	priv->last_dt_core_clk = info->last_dt_core_clk;
1073 	RAW_INIT_NOTIFIER_HEAD(&priv->notifiers);
1074 	priv->reg_layout = info->reg_layout;
1075 	if (priv->reg_layout == CLK_REG_LAYOUT_RCAR_GEN2_AND_GEN3) {
1076 		priv->status_regs = mstpsr;
1077 		priv->control_regs = smstpcr;
1078 		priv->reset_regs = srcr;
1079 		priv->reset_clear_regs = srstclr;
1080 	} else if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A) {
1081 		priv->control_regs = stbcr;
1082 	} else if (priv->reg_layout == CLK_REG_LAYOUT_RCAR_GEN4) {
1083 		priv->status_regs = mstpsr_for_gen4;
1084 		priv->control_regs = mstpcr_for_gen4;
1085 		priv->reset_regs = srcr_for_gen4;
1086 		priv->reset_clear_regs = srstclr_for_gen4;
1087 	} else {
1088 		error = -EINVAL;
1089 		goto out_err;
1090 	}
1091 
1092 	for (i = 0; i < nclks; i++)
1093 		priv->clks[i] = ERR_PTR(-ENOENT);
1094 
1095 	error = cpg_mssr_reserved_init(priv, info);
1096 	if (error)
1097 		goto out_err;
1098 
1099 	error = of_clk_add_provider(np, cpg_mssr_clk_src_twocell_get, priv);
1100 	if (error)
1101 		goto reserve_err;
1102 
1103 	cpg_mssr_priv = priv;
1104 
1105 	return 0;
1106 
1107 reserve_err:
1108 	cpg_mssr_reserved_exit(priv);
1109 out_err:
1110 	if (priv->base)
1111 		iounmap(priv->base);
1112 	kfree(priv);
1113 
1114 	return error;
1115 }
1116 
cpg_mssr_early_init(struct device_node * np,const struct cpg_mssr_info * info)1117 void __init cpg_mssr_early_init(struct device_node *np,
1118 				const struct cpg_mssr_info *info)
1119 {
1120 	int error;
1121 	int i;
1122 
1123 	error = cpg_mssr_common_init(NULL, np, info);
1124 	if (error)
1125 		return;
1126 
1127 	for (i = 0; i < info->num_early_core_clks; i++)
1128 		cpg_mssr_register_core_clk(&info->early_core_clks[i], info,
1129 					   cpg_mssr_priv);
1130 
1131 	for (i = 0; i < info->num_early_mod_clks; i++)
1132 		cpg_mssr_register_mod_clk(&info->early_mod_clks[i], info,
1133 					  cpg_mssr_priv);
1134 
1135 }
1136 
cpg_mssr_probe(struct platform_device * pdev)1137 static int __init cpg_mssr_probe(struct platform_device *pdev)
1138 {
1139 	struct device *dev = &pdev->dev;
1140 	struct device_node *np = dev->of_node;
1141 	const struct cpg_mssr_info *info;
1142 	struct cpg_mssr_priv *priv;
1143 	unsigned int i;
1144 	int error;
1145 
1146 	info = of_device_get_match_data(dev);
1147 
1148 	if (!cpg_mssr_priv) {
1149 		error = cpg_mssr_common_init(dev, dev->of_node, info);
1150 		if (error)
1151 			return error;
1152 	}
1153 
1154 	priv = cpg_mssr_priv;
1155 	priv->dev = dev;
1156 	dev_set_drvdata(dev, priv);
1157 
1158 	for (i = 0; i < info->num_core_clks; i++)
1159 		cpg_mssr_register_core_clk(&info->core_clks[i], info, priv);
1160 
1161 	for (i = 0; i < info->num_mod_clks; i++)
1162 		cpg_mssr_register_mod_clk(&info->mod_clks[i], info, priv);
1163 
1164 	error = devm_add_action_or_reset(dev,
1165 					 cpg_mssr_del_clk_provider,
1166 					 np);
1167 	if (error)
1168 		goto reserve_exit;
1169 
1170 	error = cpg_mssr_add_clk_domain(dev, info->core_pm_clks,
1171 					info->num_core_pm_clks);
1172 	if (error)
1173 		goto reserve_exit;
1174 
1175 	/* Reset Controller not supported for Standby Control SoCs */
1176 	if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A)
1177 		goto reserve_exit;
1178 
1179 	error = cpg_mssr_reset_controller_register(priv);
1180 
1181 reserve_exit:
1182 	cpg_mssr_reserved_exit(priv);
1183 
1184 	return error;
1185 }
1186 
1187 static struct platform_driver cpg_mssr_driver = {
1188 	.driver		= {
1189 		.name	= "renesas-cpg-mssr",
1190 		.of_match_table = cpg_mssr_match,
1191 		.pm = DEV_PM_OPS,
1192 	},
1193 };
1194 
cpg_mssr_init(void)1195 static int __init cpg_mssr_init(void)
1196 {
1197 	return platform_driver_probe(&cpg_mssr_driver, cpg_mssr_probe);
1198 }
1199 
1200 subsys_initcall(cpg_mssr_init);
1201 
mssr_mod_nullify(struct mssr_mod_clk * mod_clks,unsigned int num_mod_clks,const unsigned int * clks,unsigned int n)1202 void __init mssr_mod_nullify(struct mssr_mod_clk *mod_clks,
1203 			     unsigned int num_mod_clks,
1204 			     const unsigned int *clks, unsigned int n)
1205 {
1206 	unsigned int i, j;
1207 
1208 	for (i = 0, j = 0; i < num_mod_clks && j < n; i++)
1209 		if (mod_clks[i].id == clks[j]) {
1210 			mod_clks[i].name = NULL;
1211 			j++;
1212 		}
1213 }
1214 
1215 MODULE_DESCRIPTION("Renesas CPG/MSSR Driver");
1216