1 /*
2  *
3  * Copyright (C) 2010 Google, Inc.
4  *
5  * Author:
6  *	Colin Cross <ccross@google.com>
7  *
8  * This software is licensed under the terms of the GNU General Public
9  * License version 2, as published by the Free Software Foundation, and
10  * may be copied, distributed, and modified under those terms.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  */
18 
19 #include <linux/kernel.h>
20 #include <linux/clk.h>
21 #include <linux/clkdev.h>
22 #include <linux/debugfs.h>
23 #include <linux/delay.h>
24 #include <linux/init.h>
25 #include <linux/list.h>
26 #include <linux/module.h>
27 #include <linux/sched.h>
28 #include <linux/seq_file.h>
29 #include <linux/slab.h>
30 
31 #include <mach/clk.h>
32 
33 #include "board.h"
34 #include "clock.h"
35 
36 /*
37  * Locking:
38  *
39  * Each struct clk has a spinlock.
40  *
41  * To avoid AB-BA locking problems, locks must always be traversed from child
42  * clock to parent clock.  For example, when enabling a clock, the clock's lock
43  * is taken, and then clk_enable is called on the parent, which take's the
44  * parent clock's lock.  There is one exceptions to this ordering: When dumping
45  * the clock tree through debugfs.  In this case, clk_lock_all is called,
46  * which attemps to iterate through the entire list of clocks and take every
47  * clock lock.  If any call to spin_trylock fails, all locked clocks are
48  * unlocked, and the process is retried.  When all the locks are held,
49  * the only clock operation that can be called is clk_get_rate_all_locked.
50  *
51  * Within a single clock, no clock operation can call another clock operation
52  * on itself, except for clk_get_rate_locked and clk_set_rate_locked.  Any
53  * clock operation can call any other clock operation on any of it's possible
54  * parents.
55  *
56  * An additional mutex, clock_list_lock, is used to protect the list of all
57  * clocks.
58  *
59  * The clock operations must lock internally to protect against
60  * read-modify-write on registers that are shared by multiple clocks
61  */
62 static DEFINE_MUTEX(clock_list_lock);
63 static LIST_HEAD(clocks);
64 
tegra_get_clock_by_name(const char * name)65 struct clk *tegra_get_clock_by_name(const char *name)
66 {
67 	struct clk *c;
68 	struct clk *ret = NULL;
69 	mutex_lock(&clock_list_lock);
70 	list_for_each_entry(c, &clocks, node) {
71 		if (strcmp(c->name, name) == 0) {
72 			ret = c;
73 			break;
74 		}
75 	}
76 	mutex_unlock(&clock_list_lock);
77 	return ret;
78 }
79 
80 /* Must be called with c->spinlock held */
clk_predict_rate_from_parent(struct clk * c,struct clk * p)81 static unsigned long clk_predict_rate_from_parent(struct clk *c, struct clk *p)
82 {
83 	u64 rate;
84 
85 	rate = clk_get_rate(p);
86 
87 	if (c->mul != 0 && c->div != 0) {
88 		rate *= c->mul;
89 		rate += c->div - 1; /* round up */
90 		do_div(rate, c->div);
91 	}
92 
93 	return rate;
94 }
95 
96 /* Must be called with c->spinlock held */
clk_get_rate_locked(struct clk * c)97 unsigned long clk_get_rate_locked(struct clk *c)
98 {
99 	unsigned long rate;
100 
101 	if (c->parent)
102 		rate = clk_predict_rate_from_parent(c, c->parent);
103 	else
104 		rate = c->rate;
105 
106 	return rate;
107 }
108 
clk_get_rate(struct clk * c)109 unsigned long clk_get_rate(struct clk *c)
110 {
111 	unsigned long flags;
112 	unsigned long rate;
113 
114 	spin_lock_irqsave(&c->spinlock, flags);
115 
116 	rate = clk_get_rate_locked(c);
117 
118 	spin_unlock_irqrestore(&c->spinlock, flags);
119 
120 	return rate;
121 }
122 EXPORT_SYMBOL(clk_get_rate);
123 
clk_reparent(struct clk * c,struct clk * parent)124 int clk_reparent(struct clk *c, struct clk *parent)
125 {
126 	c->parent = parent;
127 	return 0;
128 }
129 
clk_init(struct clk * c)130 void clk_init(struct clk *c)
131 {
132 	spin_lock_init(&c->spinlock);
133 
134 	if (c->ops && c->ops->init)
135 		c->ops->init(c);
136 
137 	if (!c->ops || !c->ops->enable) {
138 		c->refcnt++;
139 		c->set = true;
140 		if (c->parent)
141 			c->state = c->parent->state;
142 		else
143 			c->state = ON;
144 	}
145 
146 	mutex_lock(&clock_list_lock);
147 	list_add(&c->node, &clocks);
148 	mutex_unlock(&clock_list_lock);
149 }
150 
clk_enable(struct clk * c)151 int clk_enable(struct clk *c)
152 {
153 	int ret = 0;
154 	unsigned long flags;
155 
156 	spin_lock_irqsave(&c->spinlock, flags);
157 
158 	if (c->refcnt == 0) {
159 		if (c->parent) {
160 			ret = clk_enable(c->parent);
161 			if (ret)
162 				goto out;
163 		}
164 
165 		if (c->ops && c->ops->enable) {
166 			ret = c->ops->enable(c);
167 			if (ret) {
168 				if (c->parent)
169 					clk_disable(c->parent);
170 				goto out;
171 			}
172 			c->state = ON;
173 			c->set = true;
174 		}
175 	}
176 	c->refcnt++;
177 out:
178 	spin_unlock_irqrestore(&c->spinlock, flags);
179 	return ret;
180 }
181 EXPORT_SYMBOL(clk_enable);
182 
clk_disable(struct clk * c)183 void clk_disable(struct clk *c)
184 {
185 	unsigned long flags;
186 
187 	spin_lock_irqsave(&c->spinlock, flags);
188 
189 	if (c->refcnt == 0) {
190 		WARN(1, "Attempting to disable clock %s with refcnt 0", c->name);
191 		spin_unlock_irqrestore(&c->spinlock, flags);
192 		return;
193 	}
194 	if (c->refcnt == 1) {
195 		if (c->ops && c->ops->disable)
196 			c->ops->disable(c);
197 
198 		if (c->parent)
199 			clk_disable(c->parent);
200 
201 		c->state = OFF;
202 	}
203 	c->refcnt--;
204 
205 	spin_unlock_irqrestore(&c->spinlock, flags);
206 }
207 EXPORT_SYMBOL(clk_disable);
208 
clk_set_parent(struct clk * c,struct clk * parent)209 int clk_set_parent(struct clk *c, struct clk *parent)
210 {
211 	int ret;
212 	unsigned long flags;
213 	unsigned long new_rate;
214 	unsigned long old_rate;
215 
216 	spin_lock_irqsave(&c->spinlock, flags);
217 
218 	if (!c->ops || !c->ops->set_parent) {
219 		ret = -ENOSYS;
220 		goto out;
221 	}
222 
223 	new_rate = clk_predict_rate_from_parent(c, parent);
224 	old_rate = clk_get_rate_locked(c);
225 
226 	ret = c->ops->set_parent(c, parent);
227 	if (ret)
228 		goto out;
229 
230 out:
231 	spin_unlock_irqrestore(&c->spinlock, flags);
232 	return ret;
233 }
234 EXPORT_SYMBOL(clk_set_parent);
235 
clk_get_parent(struct clk * c)236 struct clk *clk_get_parent(struct clk *c)
237 {
238 	return c->parent;
239 }
240 EXPORT_SYMBOL(clk_get_parent);
241 
clk_set_rate_locked(struct clk * c,unsigned long rate)242 int clk_set_rate_locked(struct clk *c, unsigned long rate)
243 {
244 	long new_rate;
245 
246 	if (!c->ops || !c->ops->set_rate)
247 		return -ENOSYS;
248 
249 	if (rate > c->max_rate)
250 		rate = c->max_rate;
251 
252 	if (c->ops && c->ops->round_rate) {
253 		new_rate = c->ops->round_rate(c, rate);
254 
255 		if (new_rate < 0)
256 			return new_rate;
257 
258 		rate = new_rate;
259 	}
260 
261 	return c->ops->set_rate(c, rate);
262 }
263 
clk_set_rate(struct clk * c,unsigned long rate)264 int clk_set_rate(struct clk *c, unsigned long rate)
265 {
266 	int ret;
267 	unsigned long flags;
268 
269 	spin_lock_irqsave(&c->spinlock, flags);
270 
271 	ret = clk_set_rate_locked(c, rate);
272 
273 	spin_unlock_irqrestore(&c->spinlock, flags);
274 
275 	return ret;
276 }
277 EXPORT_SYMBOL(clk_set_rate);
278 
279 
280 /* Must be called with clocks lock and all indvidual clock locks held */
clk_get_rate_all_locked(struct clk * c)281 unsigned long clk_get_rate_all_locked(struct clk *c)
282 {
283 	u64 rate;
284 	int mul = 1;
285 	int div = 1;
286 	struct clk *p = c;
287 
288 	while (p) {
289 		c = p;
290 		if (c->mul != 0 && c->div != 0) {
291 			mul *= c->mul;
292 			div *= c->div;
293 		}
294 		p = c->parent;
295 	}
296 
297 	rate = c->rate;
298 	rate *= mul;
299 	do_div(rate, div);
300 
301 	return rate;
302 }
303 
clk_round_rate(struct clk * c,unsigned long rate)304 long clk_round_rate(struct clk *c, unsigned long rate)
305 {
306 	unsigned long flags;
307 	long ret;
308 
309 	spin_lock_irqsave(&c->spinlock, flags);
310 
311 	if (!c->ops || !c->ops->round_rate) {
312 		ret = -ENOSYS;
313 		goto out;
314 	}
315 
316 	if (rate > c->max_rate)
317 		rate = c->max_rate;
318 
319 	ret = c->ops->round_rate(c, rate);
320 
321 out:
322 	spin_unlock_irqrestore(&c->spinlock, flags);
323 	return ret;
324 }
325 EXPORT_SYMBOL(clk_round_rate);
326 
tegra_clk_init_one_from_table(struct tegra_clk_init_table * table)327 static int tegra_clk_init_one_from_table(struct tegra_clk_init_table *table)
328 {
329 	struct clk *c;
330 	struct clk *p;
331 
332 	int ret = 0;
333 
334 	c = tegra_get_clock_by_name(table->name);
335 
336 	if (!c) {
337 		pr_warning("Unable to initialize clock %s\n",
338 			table->name);
339 		return -ENODEV;
340 	}
341 
342 	if (table->parent) {
343 		p = tegra_get_clock_by_name(table->parent);
344 		if (!p) {
345 			pr_warning("Unable to find parent %s of clock %s\n",
346 				table->parent, table->name);
347 			return -ENODEV;
348 		}
349 
350 		if (c->parent != p) {
351 			ret = clk_set_parent(c, p);
352 			if (ret) {
353 				pr_warning("Unable to set parent %s of clock %s: %d\n",
354 					table->parent, table->name, ret);
355 				return -EINVAL;
356 			}
357 		}
358 	}
359 
360 	if (table->rate && table->rate != clk_get_rate(c)) {
361 		ret = clk_set_rate(c, table->rate);
362 		if (ret) {
363 			pr_warning("Unable to set clock %s to rate %lu: %d\n",
364 				table->name, table->rate, ret);
365 			return -EINVAL;
366 		}
367 	}
368 
369 	if (table->enabled) {
370 		ret = clk_enable(c);
371 		if (ret) {
372 			pr_warning("Unable to enable clock %s: %d\n",
373 				table->name, ret);
374 			return -EINVAL;
375 		}
376 	}
377 
378 	return 0;
379 }
380 
tegra_clk_init_from_table(struct tegra_clk_init_table * table)381 void tegra_clk_init_from_table(struct tegra_clk_init_table *table)
382 {
383 	for (; table->name; table++)
384 		tegra_clk_init_one_from_table(table);
385 }
386 EXPORT_SYMBOL(tegra_clk_init_from_table);
387 
tegra_periph_reset_deassert(struct clk * c)388 void tegra_periph_reset_deassert(struct clk *c)
389 {
390 	BUG_ON(!c->ops->reset);
391 	c->ops->reset(c, false);
392 }
393 EXPORT_SYMBOL(tegra_periph_reset_deassert);
394 
tegra_periph_reset_assert(struct clk * c)395 void tegra_periph_reset_assert(struct clk *c)
396 {
397 	BUG_ON(!c->ops->reset);
398 	c->ops->reset(c, true);
399 }
400 EXPORT_SYMBOL(tegra_periph_reset_assert);
401 
402 #ifdef CONFIG_DEBUG_FS
403 
__clk_lock_all_spinlocks(void)404 static int __clk_lock_all_spinlocks(void)
405 {
406 	struct clk *c;
407 
408 	list_for_each_entry(c, &clocks, node)
409 		if (!spin_trylock(&c->spinlock))
410 			goto unlock_spinlocks;
411 
412 	return 0;
413 
414 unlock_spinlocks:
415 	list_for_each_entry_continue_reverse(c, &clocks, node)
416 		spin_unlock(&c->spinlock);
417 
418 	return -EAGAIN;
419 }
420 
__clk_unlock_all_spinlocks(void)421 static void __clk_unlock_all_spinlocks(void)
422 {
423 	struct clk *c;
424 
425 	list_for_each_entry_reverse(c, &clocks, node)
426 		spin_unlock(&c->spinlock);
427 }
428 
429 /*
430  * This function retries until it can take all locks, and may take
431  * an arbitrarily long time to complete.
432  * Must be called with irqs enabled, returns with irqs disabled
433  * Must be called with clock_list_lock held
434  */
clk_lock_all(void)435 static void clk_lock_all(void)
436 {
437 	int ret;
438 retry:
439 	local_irq_disable();
440 
441 	ret = __clk_lock_all_spinlocks();
442 	if (ret)
443 		goto failed_spinlocks;
444 
445 	/* All locks taken successfully, return */
446 	return;
447 
448 failed_spinlocks:
449 	local_irq_enable();
450 	yield();
451 	goto retry;
452 }
453 
454 /*
455  * Unlocks all clocks after a clk_lock_all
456  * Must be called with irqs disabled, returns with irqs enabled
457  * Must be called with clock_list_lock held
458  */
clk_unlock_all(void)459 static void clk_unlock_all(void)
460 {
461 	__clk_unlock_all_spinlocks();
462 
463 	local_irq_enable();
464 }
465 
466 static struct dentry *clk_debugfs_root;
467 
468 
clock_tree_show_one(struct seq_file * s,struct clk * c,int level)469 static void clock_tree_show_one(struct seq_file *s, struct clk *c, int level)
470 {
471 	struct clk *child;
472 	const char *state = "uninit";
473 	char div[8] = {0};
474 
475 	if (c->state == ON)
476 		state = "on";
477 	else if (c->state == OFF)
478 		state = "off";
479 
480 	if (c->mul != 0 && c->div != 0) {
481 		if (c->mul > c->div) {
482 			int mul = c->mul / c->div;
483 			int mul2 = (c->mul * 10 / c->div) % 10;
484 			int mul3 = (c->mul * 10) % c->div;
485 			if (mul2 == 0 && mul3 == 0)
486 				snprintf(div, sizeof(div), "x%d", mul);
487 			else if (mul3 == 0)
488 				snprintf(div, sizeof(div), "x%d.%d", mul, mul2);
489 			else
490 				snprintf(div, sizeof(div), "x%d.%d..", mul, mul2);
491 		} else {
492 			snprintf(div, sizeof(div), "%d%s", c->div / c->mul,
493 				(c->div % c->mul) ? ".5" : "");
494 		}
495 	}
496 
497 	seq_printf(s, "%*s%c%c%-*s %-6s %-3d %-8s %-10lu\n",
498 		level * 3 + 1, "",
499 		c->rate > c->max_rate ? '!' : ' ',
500 		!c->set ? '*' : ' ',
501 		30 - level * 3, c->name,
502 		state, c->refcnt, div, clk_get_rate_all_locked(c));
503 
504 	list_for_each_entry(child, &clocks, node) {
505 		if (child->parent != c)
506 			continue;
507 
508 		clock_tree_show_one(s, child, level + 1);
509 	}
510 }
511 
clock_tree_show(struct seq_file * s,void * data)512 static int clock_tree_show(struct seq_file *s, void *data)
513 {
514 	struct clk *c;
515 	seq_printf(s, "   clock                          state  ref div      rate\n");
516 	seq_printf(s, "--------------------------------------------------------------\n");
517 
518 	mutex_lock(&clock_list_lock);
519 
520 	clk_lock_all();
521 
522 	list_for_each_entry(c, &clocks, node)
523 		if (c->parent == NULL)
524 			clock_tree_show_one(s, c, 0);
525 
526 	clk_unlock_all();
527 
528 	mutex_unlock(&clock_list_lock);
529 	return 0;
530 }
531 
clock_tree_open(struct inode * inode,struct file * file)532 static int clock_tree_open(struct inode *inode, struct file *file)
533 {
534 	return single_open(file, clock_tree_show, inode->i_private);
535 }
536 
537 static const struct file_operations clock_tree_fops = {
538 	.open		= clock_tree_open,
539 	.read		= seq_read,
540 	.llseek		= seq_lseek,
541 	.release	= single_release,
542 };
543 
possible_parents_show(struct seq_file * s,void * data)544 static int possible_parents_show(struct seq_file *s, void *data)
545 {
546 	struct clk *c = s->private;
547 	int i;
548 
549 	for (i = 0; c->inputs[i].input; i++) {
550 		char *first = (i == 0) ? "" : " ";
551 		seq_printf(s, "%s%s", first, c->inputs[i].input->name);
552 	}
553 	seq_printf(s, "\n");
554 	return 0;
555 }
556 
possible_parents_open(struct inode * inode,struct file * file)557 static int possible_parents_open(struct inode *inode, struct file *file)
558 {
559 	return single_open(file, possible_parents_show, inode->i_private);
560 }
561 
562 static const struct file_operations possible_parents_fops = {
563 	.open		= possible_parents_open,
564 	.read		= seq_read,
565 	.llseek		= seq_lseek,
566 	.release	= single_release,
567 };
568 
clk_debugfs_register_one(struct clk * c)569 static int clk_debugfs_register_one(struct clk *c)
570 {
571 	struct dentry *d;
572 
573 	d = debugfs_create_dir(c->name, clk_debugfs_root);
574 	if (!d)
575 		return -ENOMEM;
576 	c->dent = d;
577 
578 	d = debugfs_create_u8("refcnt", S_IRUGO, c->dent, (u8 *)&c->refcnt);
579 	if (!d)
580 		goto err_out;
581 
582 	d = debugfs_create_u32("rate", S_IRUGO, c->dent, (u32 *)&c->rate);
583 	if (!d)
584 		goto err_out;
585 
586 	d = debugfs_create_x32("flags", S_IRUGO, c->dent, (u32 *)&c->flags);
587 	if (!d)
588 		goto err_out;
589 
590 	if (c->inputs) {
591 		d = debugfs_create_file("possible_parents", S_IRUGO, c->dent,
592 			c, &possible_parents_fops);
593 		if (!d)
594 			goto err_out;
595 	}
596 
597 	return 0;
598 
599 err_out:
600 	debugfs_remove_recursive(c->dent);
601 	return -ENOMEM;
602 }
603 
clk_debugfs_register(struct clk * c)604 static int clk_debugfs_register(struct clk *c)
605 {
606 	int err;
607 	struct clk *pa = c->parent;
608 
609 	if (pa && !pa->dent) {
610 		err = clk_debugfs_register(pa);
611 		if (err)
612 			return err;
613 	}
614 
615 	if (!c->dent) {
616 		err = clk_debugfs_register_one(c);
617 		if (err)
618 			return err;
619 	}
620 	return 0;
621 }
622 
clk_debugfs_init(void)623 static int __init clk_debugfs_init(void)
624 {
625 	struct clk *c;
626 	struct dentry *d;
627 	int err = -ENOMEM;
628 
629 	d = debugfs_create_dir("clock", NULL);
630 	if (!d)
631 		return -ENOMEM;
632 	clk_debugfs_root = d;
633 
634 	d = debugfs_create_file("clock_tree", S_IRUGO, clk_debugfs_root, NULL,
635 		&clock_tree_fops);
636 	if (!d)
637 		goto err_out;
638 
639 	list_for_each_entry(c, &clocks, node) {
640 		err = clk_debugfs_register(c);
641 		if (err)
642 			goto err_out;
643 	}
644 	return 0;
645 err_out:
646 	debugfs_remove_recursive(clk_debugfs_root);
647 	return err;
648 }
649 
650 late_initcall(clk_debugfs_init);
651 #endif
652