1 #include <linux/init.h>
2 #include <linux/clocksource.h>
3 #include <linux/clockchips.h>
4 #include <linux/interrupt.h>
5 #include <linux/irq.h>
6 
7 #include <linux/clk.h>
8 #include <linux/err.h>
9 #include <linux/ioport.h>
10 #include <linux/io.h>
11 #include <linux/platform_device.h>
12 #include <linux/atmel_tc.h>
13 
14 
15 /*
16  * We're configured to use a specific TC block, one that's not hooked
17  * up to external hardware, to provide a time solution:
18  *
19  *   - Two channels combine to create a free-running 32 bit counter
20  *     with a base rate of 5+ MHz, packaged as a clocksource (with
21  *     resolution better than 200 nsec).
22  *
23  *   - The third channel may be used to provide a 16-bit clockevent
24  *     source, used in either periodic or oneshot mode.  This runs
25  *     at 32 KiHZ, and can handle delays of up to two seconds.
26  *
27  * A boot clocksource and clockevent source are also currently needed,
28  * unless the relevant platforms (ARM/AT91, AVR32/AT32) are changed so
29  * this code can be used when init_timers() is called, well before most
30  * devices are set up.  (Some low end AT91 parts, which can run uClinux,
31  * have only the timers in one TC block... they currently don't support
32  * the tclib code, because of that initialization issue.)
33  *
34  * REVISIT behavior during system suspend states... we should disable
35  * all clocks and save the power.  Easily done for clockevent devices,
36  * but clocksources won't necessarily get the needed notifications.
37  * For deeper system sleep states, this will be mandatory...
38  */
39 
40 static void __iomem *tcaddr;
41 
tc_get_cycles(struct clocksource * cs)42 static cycle_t tc_get_cycles(struct clocksource *cs)
43 {
44 	unsigned long	flags;
45 	u32		lower, upper;
46 
47 	raw_local_irq_save(flags);
48 	do {
49 		upper = __raw_readl(tcaddr + ATMEL_TC_REG(1, CV));
50 		lower = __raw_readl(tcaddr + ATMEL_TC_REG(0, CV));
51 	} while (upper != __raw_readl(tcaddr + ATMEL_TC_REG(1, CV)));
52 
53 	raw_local_irq_restore(flags);
54 	return (upper << 16) | lower;
55 }
56 
57 static struct clocksource clksrc = {
58 	.name           = "tcb_clksrc",
59 	.rating         = 200,
60 	.read           = tc_get_cycles,
61 	.mask           = CLOCKSOURCE_MASK(32),
62 	.flags		= CLOCK_SOURCE_IS_CONTINUOUS,
63 };
64 
65 #ifdef CONFIG_GENERIC_CLOCKEVENTS
66 
67 struct tc_clkevt_device {
68 	struct clock_event_device	clkevt;
69 	struct clk			*clk;
70 	void __iomem			*regs;
71 };
72 
to_tc_clkevt(struct clock_event_device * clkevt)73 static struct tc_clkevt_device *to_tc_clkevt(struct clock_event_device *clkevt)
74 {
75 	return container_of(clkevt, struct tc_clkevt_device, clkevt);
76 }
77 
78 /* For now, we always use the 32K clock ... this optimizes for NO_HZ,
79  * because using one of the divided clocks would usually mean the
80  * tick rate can never be less than several dozen Hz (vs 0.5 Hz).
81  *
82  * A divided clock could be good for high resolution timers, since
83  * 30.5 usec resolution can seem "low".
84  */
85 static u32 timer_clock;
86 
tc_mode(enum clock_event_mode m,struct clock_event_device * d)87 static void tc_mode(enum clock_event_mode m, struct clock_event_device *d)
88 {
89 	struct tc_clkevt_device *tcd = to_tc_clkevt(d);
90 	void __iomem		*regs = tcd->regs;
91 
92 	if (tcd->clkevt.mode == CLOCK_EVT_MODE_PERIODIC
93 			|| tcd->clkevt.mode == CLOCK_EVT_MODE_ONESHOT) {
94 		__raw_writel(0xff, regs + ATMEL_TC_REG(2, IDR));
95 		__raw_writel(ATMEL_TC_CLKDIS, regs + ATMEL_TC_REG(2, CCR));
96 		clk_disable(tcd->clk);
97 	}
98 
99 	switch (m) {
100 
101 	/* By not making the gentime core emulate periodic mode on top
102 	 * of oneshot, we get lower overhead and improved accuracy.
103 	 */
104 	case CLOCK_EVT_MODE_PERIODIC:
105 		clk_enable(tcd->clk);
106 
107 		/* slow clock, count up to RC, then irq and restart */
108 		__raw_writel(timer_clock
109 				| ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO,
110 				regs + ATMEL_TC_REG(2, CMR));
111 		__raw_writel((32768 + HZ/2) / HZ, tcaddr + ATMEL_TC_REG(2, RC));
112 
113 		/* Enable clock and interrupts on RC compare */
114 		__raw_writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER));
115 
116 		/* go go gadget! */
117 		__raw_writel(ATMEL_TC_CLKEN | ATMEL_TC_SWTRG,
118 				regs + ATMEL_TC_REG(2, CCR));
119 		break;
120 
121 	case CLOCK_EVT_MODE_ONESHOT:
122 		clk_enable(tcd->clk);
123 
124 		/* slow clock, count up to RC, then irq and stop */
125 		__raw_writel(timer_clock | ATMEL_TC_CPCSTOP
126 				| ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO,
127 				regs + ATMEL_TC_REG(2, CMR));
128 		__raw_writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER));
129 
130 		/* set_next_event() configures and starts the timer */
131 		break;
132 
133 	default:
134 		break;
135 	}
136 }
137 
tc_next_event(unsigned long delta,struct clock_event_device * d)138 static int tc_next_event(unsigned long delta, struct clock_event_device *d)
139 {
140 	__raw_writel(delta, tcaddr + ATMEL_TC_REG(2, RC));
141 
142 	/* go go gadget! */
143 	__raw_writel(ATMEL_TC_CLKEN | ATMEL_TC_SWTRG,
144 			tcaddr + ATMEL_TC_REG(2, CCR));
145 	return 0;
146 }
147 
148 static struct tc_clkevt_device clkevt = {
149 	.clkevt	= {
150 		.name		= "tc_clkevt",
151 		.features	= CLOCK_EVT_FEAT_PERIODIC
152 					| CLOCK_EVT_FEAT_ONESHOT,
153 		.shift		= 32,
154 		/* Should be lower than at91rm9200's system timer */
155 		.rating		= 125,
156 		.set_next_event	= tc_next_event,
157 		.set_mode	= tc_mode,
158 	},
159 };
160 
ch2_irq(int irq,void * handle)161 static irqreturn_t ch2_irq(int irq, void *handle)
162 {
163 	struct tc_clkevt_device	*dev = handle;
164 	unsigned int		sr;
165 
166 	sr = __raw_readl(dev->regs + ATMEL_TC_REG(2, SR));
167 	if (sr & ATMEL_TC_CPCS) {
168 		dev->clkevt.event_handler(&dev->clkevt);
169 		return IRQ_HANDLED;
170 	}
171 
172 	return IRQ_NONE;
173 }
174 
175 static struct irqaction tc_irqaction = {
176 	.name		= "tc_clkevt",
177 	.flags		= IRQF_TIMER | IRQF_DISABLED,
178 	.handler	= ch2_irq,
179 };
180 
setup_clkevents(struct atmel_tc * tc,int clk32k_divisor_idx)181 static void __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
182 {
183 	struct clk *t2_clk = tc->clk[2];
184 	int irq = tc->irq[2];
185 
186 	clkevt.regs = tc->regs;
187 	clkevt.clk = t2_clk;
188 	tc_irqaction.dev_id = &clkevt;
189 
190 	timer_clock = clk32k_divisor_idx;
191 
192 	clkevt.clkevt.mult = div_sc(32768, NSEC_PER_SEC, clkevt.clkevt.shift);
193 	clkevt.clkevt.max_delta_ns
194 		= clockevent_delta2ns(0xffff, &clkevt.clkevt);
195 	clkevt.clkevt.min_delta_ns = clockevent_delta2ns(1, &clkevt.clkevt) + 1;
196 	clkevt.clkevt.cpumask = cpumask_of(0);
197 
198 	clockevents_register_device(&clkevt.clkevt);
199 
200 	setup_irq(irq, &tc_irqaction);
201 }
202 
203 #else /* !CONFIG_GENERIC_CLOCKEVENTS */
204 
setup_clkevents(struct atmel_tc * tc,int clk32k_divisor_idx)205 static void __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
206 {
207 	/* NOTHING */
208 }
209 
210 #endif
211 
tcb_clksrc_init(void)212 static int __init tcb_clksrc_init(void)
213 {
214 	static char bootinfo[] __initdata
215 		= KERN_DEBUG "%s: tc%d at %d.%03d MHz\n";
216 
217 	struct platform_device *pdev;
218 	struct atmel_tc *tc;
219 	struct clk *t0_clk;
220 	u32 rate, divided_rate = 0;
221 	int best_divisor_idx = -1;
222 	int clk32k_divisor_idx = -1;
223 	int i;
224 
225 	tc = atmel_tc_alloc(CONFIG_ATMEL_TCB_CLKSRC_BLOCK, clksrc.name);
226 	if (!tc) {
227 		pr_debug("can't alloc TC for clocksource\n");
228 		return -ENODEV;
229 	}
230 	tcaddr = tc->regs;
231 	pdev = tc->pdev;
232 
233 	t0_clk = tc->clk[0];
234 	clk_enable(t0_clk);
235 
236 	/* How fast will we be counting?  Pick something over 5 MHz.  */
237 	rate = (u32) clk_get_rate(t0_clk);
238 	for (i = 0; i < 5; i++) {
239 		unsigned divisor = atmel_tc_divisors[i];
240 		unsigned tmp;
241 
242 		/* remember 32 KiHz clock for later */
243 		if (!divisor) {
244 			clk32k_divisor_idx = i;
245 			continue;
246 		}
247 
248 		tmp = rate / divisor;
249 		pr_debug("TC: %u / %-3u [%d] --> %u\n", rate, divisor, i, tmp);
250 		if (best_divisor_idx > 0) {
251 			if (tmp < 5 * 1000 * 1000)
252 				continue;
253 		}
254 		divided_rate = tmp;
255 		best_divisor_idx = i;
256 	}
257 
258 
259 	printk(bootinfo, clksrc.name, CONFIG_ATMEL_TCB_CLKSRC_BLOCK,
260 			divided_rate / 1000000,
261 			((divided_rate + 500000) % 1000000) / 1000);
262 
263 	/* tclib will give us three clocks no matter what the
264 	 * underlying platform supports.
265 	 */
266 	clk_enable(tc->clk[1]);
267 
268 	/* channel 0:  waveform mode, input mclk/8, clock TIOA0 on overflow */
269 	__raw_writel(best_divisor_idx			/* likely divide-by-8 */
270 			| ATMEL_TC_WAVE
271 			| ATMEL_TC_WAVESEL_UP		/* free-run */
272 			| ATMEL_TC_ACPA_SET		/* TIOA0 rises at 0 */
273 			| ATMEL_TC_ACPC_CLEAR,		/* (duty cycle 50%) */
274 			tcaddr + ATMEL_TC_REG(0, CMR));
275 	__raw_writel(0x0000, tcaddr + ATMEL_TC_REG(0, RA));
276 	__raw_writel(0x8000, tcaddr + ATMEL_TC_REG(0, RC));
277 	__raw_writel(0xff, tcaddr + ATMEL_TC_REG(0, IDR));	/* no irqs */
278 	__raw_writel(ATMEL_TC_CLKEN, tcaddr + ATMEL_TC_REG(0, CCR));
279 
280 	/* channel 1:  waveform mode, input TIOA0 */
281 	__raw_writel(ATMEL_TC_XC1			/* input: TIOA0 */
282 			| ATMEL_TC_WAVE
283 			| ATMEL_TC_WAVESEL_UP,		/* free-run */
284 			tcaddr + ATMEL_TC_REG(1, CMR));
285 	__raw_writel(0xff, tcaddr + ATMEL_TC_REG(1, IDR));	/* no irqs */
286 	__raw_writel(ATMEL_TC_CLKEN, tcaddr + ATMEL_TC_REG(1, CCR));
287 
288 	/* chain channel 0 to channel 1, then reset all the timers */
289 	__raw_writel(ATMEL_TC_TC1XC1S_TIOA0, tcaddr + ATMEL_TC_BMR);
290 	__raw_writel(ATMEL_TC_SYNC, tcaddr + ATMEL_TC_BCR);
291 
292 	/* and away we go! */
293 	clocksource_register_hz(&clksrc, divided_rate);
294 
295 	/* channel 2:  periodic and oneshot timer support */
296 	setup_clkevents(tc, clk32k_divisor_idx);
297 
298 	return 0;
299 }
300 arch_initcall(tcb_clksrc_init);
301