xref: /linux/kernel/time/clockevents.c (revision ab3dee26406be0ed0a26af70311dcdc760db3996)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * This file contains functions which manage clock event devices.
4  *
5  * Copyright(C) 2005-2006, Linutronix GmbH, Thomas Gleixner <tglx@kernel.org>
6  * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
7  * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner
8  */
9 
10 #include <linux/clockchips.h>
11 #include <linux/hrtimer.h>
12 #include <linux/init.h>
13 #include <linux/module.h>
14 #include <linux/smp.h>
15 #include <linux/device.h>
16 
17 #include "tick-internal.h"
18 
19 /* The registered clock event devices */
20 static LIST_HEAD(clockevent_devices);
21 static LIST_HEAD(clockevents_released);
22 /* Protection for the above */
23 static DEFINE_RAW_SPINLOCK(clockevents_lock);
24 /* Protection for unbind operations */
25 static DEFINE_MUTEX(clockevents_mutex);
26 
27 struct ce_unbind {
28 	struct clock_event_device *ce;
29 	int res;
30 };
31 
cev_delta2ns(unsigned long latch,struct clock_event_device * evt,bool ismax)32 static u64 cev_delta2ns(unsigned long latch, struct clock_event_device *evt,
33 			bool ismax)
34 {
35 	u64 clc = (u64) latch << evt->shift;
36 	u64 rnd;
37 
38 	if (WARN_ON(!evt->mult))
39 		evt->mult = 1;
40 	rnd = (u64) evt->mult - 1;
41 
42 	/*
43 	 * Upper bound sanity check. If the backwards conversion is
44 	 * not equal latch, we know that the above shift overflowed.
45 	 */
46 	if ((clc >> evt->shift) != (u64)latch)
47 		clc = ~0ULL;
48 
49 	/*
50 	 * Scaled math oddities:
51 	 *
52 	 * For mult <= (1 << shift) we can safely add mult - 1 to
53 	 * prevent integer rounding loss. So the backwards conversion
54 	 * from nsec to device ticks will be correct.
55 	 *
56 	 * For mult > (1 << shift), i.e. device frequency is > 1GHz we
57 	 * need to be careful. Adding mult - 1 will result in a value
58 	 * which when converted back to device ticks can be larger
59 	 * than latch by up to (mult - 1) >> shift. For the min_delta
60 	 * calculation we still want to apply this in order to stay
61 	 * above the minimum device ticks limit. For the upper limit
62 	 * we would end up with a latch value larger than the upper
63 	 * limit of the device, so we omit the add to stay below the
64 	 * device upper boundary.
65 	 *
66 	 * Also omit the add if it would overflow the u64 boundary.
67 	 */
68 	if ((~0ULL - clc > rnd) &&
69 	    (!ismax || evt->mult <= (1ULL << evt->shift)))
70 		clc += rnd;
71 
72 	do_div(clc, evt->mult);
73 
74 	/* Deltas less than 1usec are pointless noise */
75 	return clc > 1000 ? clc : 1000;
76 }
77 
78 /**
79  * clockevent_delta2ns - Convert a latch value (device ticks) to nanoseconds
80  * @latch:	value to convert
81  * @evt:	pointer to clock event device descriptor
82  *
83  * Math helper, returns latch value converted to nanoseconds (bound checked)
84  */
clockevent_delta2ns(unsigned long latch,struct clock_event_device * evt)85 u64 clockevent_delta2ns(unsigned long latch, struct clock_event_device *evt)
86 {
87 	return cev_delta2ns(latch, evt, false);
88 }
89 EXPORT_SYMBOL_GPL(clockevent_delta2ns);
90 
__clockevents_switch_state(struct clock_event_device * dev,enum clock_event_state state)91 static int __clockevents_switch_state(struct clock_event_device *dev,
92 				      enum clock_event_state state)
93 {
94 	if (dev->features & CLOCK_EVT_FEAT_DUMMY)
95 		return 0;
96 
97 	/* Transition with new state-specific callbacks */
98 	switch (state) {
99 	case CLOCK_EVT_STATE_DETACHED:
100 		/* The clockevent device is getting replaced. Shut it down. */
101 
102 	case CLOCK_EVT_STATE_SHUTDOWN:
103 		if (dev->set_state_shutdown)
104 			return dev->set_state_shutdown(dev);
105 		return 0;
106 
107 	case CLOCK_EVT_STATE_PERIODIC:
108 		/* Core internal bug */
109 		if (!(dev->features & CLOCK_EVT_FEAT_PERIODIC))
110 			return -ENOSYS;
111 		if (dev->set_state_periodic)
112 			return dev->set_state_periodic(dev);
113 		return 0;
114 
115 	case CLOCK_EVT_STATE_ONESHOT:
116 		/* Core internal bug */
117 		if (!(dev->features & CLOCK_EVT_FEAT_ONESHOT))
118 			return -ENOSYS;
119 		if (dev->set_state_oneshot)
120 			return dev->set_state_oneshot(dev);
121 		return 0;
122 
123 	case CLOCK_EVT_STATE_ONESHOT_STOPPED:
124 		/* Core internal bug */
125 		if (WARN_ONCE(!clockevent_state_oneshot(dev),
126 			      "Current state: %d\n",
127 			      clockevent_get_state(dev)))
128 			return -EINVAL;
129 
130 		if (dev->set_state_oneshot_stopped)
131 			return dev->set_state_oneshot_stopped(dev);
132 		else
133 			return -ENOSYS;
134 
135 	default:
136 		return -ENOSYS;
137 	}
138 }
139 
140 /**
141  * clockevents_switch_state - set the operating state of a clock event device
142  * @dev:	device to modify
143  * @state:	new state
144  *
145  * Must be called with interrupts disabled !
146  */
clockevents_switch_state(struct clock_event_device * dev,enum clock_event_state state)147 void clockevents_switch_state(struct clock_event_device *dev,
148 			      enum clock_event_state state)
149 {
150 	if (clockevent_get_state(dev) != state) {
151 		if (__clockevents_switch_state(dev, state))
152 			return;
153 
154 		clockevent_set_state(dev, state);
155 
156 		/*
157 		 * A nsec2cyc multiplicator of 0 is invalid and we'd crash
158 		 * on it, so fix it up and emit a warning:
159 		 */
160 		if (clockevent_state_oneshot(dev)) {
161 			if (WARN_ON(!dev->mult))
162 				dev->mult = 1;
163 		}
164 	}
165 }
166 
167 /**
168  * clockevents_shutdown - shutdown the device and clear next_event
169  * @dev:	device to shutdown
170  */
clockevents_shutdown(struct clock_event_device * dev)171 void clockevents_shutdown(struct clock_event_device *dev)
172 {
173 	clockevents_switch_state(dev, CLOCK_EVT_STATE_SHUTDOWN);
174 	dev->next_event = KTIME_MAX;
175 	dev->next_event_forced = 0;
176 }
177 
178 /**
179  * clockevents_tick_resume -	Resume the tick device before using it again
180  * @dev:			device to resume
181  */
clockevents_tick_resume(struct clock_event_device * dev)182 int clockevents_tick_resume(struct clock_event_device *dev)
183 {
184 	int ret = 0;
185 
186 	if (dev->tick_resume)
187 		ret = dev->tick_resume(dev);
188 
189 	return ret;
190 }
191 
192 #ifdef CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST
193 
194 /* Limit min_delta to a jiffy */
195 #define MIN_DELTA_LIMIT		(NSEC_PER_SEC / HZ)
196 
197 /**
198  * clockevents_increase_min_delta - raise minimum delta of a clock event device
199  * @dev:       device to increase the minimum delta
200  *
201  * Returns 0 on success, -ETIME when the minimum delta reached the limit.
202  */
clockevents_increase_min_delta(struct clock_event_device * dev)203 static int clockevents_increase_min_delta(struct clock_event_device *dev)
204 {
205 	/* Nothing to do if we already reached the limit */
206 	if (dev->min_delta_ns >= MIN_DELTA_LIMIT) {
207 		printk_deferred(KERN_WARNING
208 				"CE: Reprogramming failure. Giving up\n");
209 		dev->next_event = KTIME_MAX;
210 		return -ETIME;
211 	}
212 
213 	if (dev->min_delta_ns < 5000)
214 		dev->min_delta_ns = 5000;
215 	else
216 		dev->min_delta_ns += dev->min_delta_ns >> 1;
217 
218 	if (dev->min_delta_ns > MIN_DELTA_LIMIT)
219 		dev->min_delta_ns = MIN_DELTA_LIMIT;
220 
221 	printk_deferred(KERN_WARNING
222 			"CE: %s increased min_delta_ns to %llu nsec\n",
223 			dev->name ? dev->name : "?",
224 			(unsigned long long) dev->min_delta_ns);
225 	return 0;
226 }
227 
228 /**
229  * clockevents_program_min_delta - Set clock event device to the minimum delay.
230  * @dev:	device to program
231  *
232  * Returns 0 on success, -ETIME when the retry loop failed.
233  */
clockevents_program_min_delta(struct clock_event_device * dev)234 static int clockevents_program_min_delta(struct clock_event_device *dev)
235 {
236 	unsigned long long clc;
237 	int64_t delta;
238 	int i;
239 
240 	for (i = 0;;) {
241 		delta = dev->min_delta_ns;
242 		dev->next_event = ktime_add_ns(ktime_get(), delta);
243 
244 		if (clockevent_state_shutdown(dev))
245 			return 0;
246 
247 		dev->retries++;
248 		clc = ((unsigned long long) delta * dev->mult) >> dev->shift;
249 		if (dev->set_next_event((unsigned long) clc, dev) == 0)
250 			return 0;
251 
252 		if (++i > 2) {
253 			/*
254 			 * We tried 3 times to program the device with the
255 			 * given min_delta_ns. Try to increase the minimum
256 			 * delta, if that fails as well get out of here.
257 			 */
258 			if (clockevents_increase_min_delta(dev))
259 				return -ETIME;
260 			i = 0;
261 		}
262 	}
263 }
264 
265 #else  /* CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST */
266 
267 /**
268  * clockevents_program_min_delta - Set clock event device to the minimum delay.
269  * @dev:	device to program
270  *
271  * Returns 0 on success, -ETIME when the retry loop failed.
272  */
clockevents_program_min_delta(struct clock_event_device * dev)273 static int clockevents_program_min_delta(struct clock_event_device *dev)
274 {
275 	unsigned long long clc;
276 	int64_t delta = 0;
277 	int i;
278 
279 	for (i = 0; i < 10; i++) {
280 		delta += dev->min_delta_ns;
281 		dev->next_event = ktime_add_ns(ktime_get(), delta);
282 
283 		if (clockevent_state_shutdown(dev))
284 			return 0;
285 
286 		dev->retries++;
287 		clc = ((unsigned long long) delta * dev->mult) >> dev->shift;
288 		if (dev->set_next_event((unsigned long) clc, dev) == 0)
289 			return 0;
290 	}
291 	return -ETIME;
292 }
293 
294 #endif /* CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST */
295 
296 /**
297  * clockevents_program_event - Reprogram the clock event device.
298  * @dev:	device to program
299  * @expires:	absolute expiry time (monotonic clock)
300  * @force:	program minimum delay if expires can not be set
301  *
302  * Returns 0 on success, -ETIME when the event is in the past.
303  */
clockevents_program_event(struct clock_event_device * dev,ktime_t expires,bool force)304 int clockevents_program_event(struct clock_event_device *dev, ktime_t expires,
305 			      bool force)
306 {
307 	unsigned long long clc;
308 	int64_t delta;
309 
310 	if (WARN_ON_ONCE(expires < 0))
311 		return -ETIME;
312 
313 	dev->next_event = expires;
314 
315 	if (clockevent_state_shutdown(dev))
316 		return 0;
317 
318 	/* We must be in ONESHOT state here */
319 	WARN_ONCE(!clockevent_state_oneshot(dev), "Current state: %d\n",
320 		  clockevent_get_state(dev));
321 
322 	/* Shortcut for clockevent devices that can deal with ktime. */
323 	if (dev->features & CLOCK_EVT_FEAT_KTIME)
324 		return dev->set_next_ktime(expires, dev);
325 
326 	delta = ktime_to_ns(ktime_sub(expires, ktime_get()));
327 
328 	/* Required for tick_periodic() during early boot */
329 	if (delta <= 0 && !force)
330 		return -ETIME;
331 
332 	if (delta > (int64_t)dev->min_delta_ns) {
333 		delta = min(delta, (int64_t) dev->max_delta_ns);
334 		clc = ((unsigned long long) delta * dev->mult) >> dev->shift;
335 		if (!dev->set_next_event((unsigned long) clc, dev))
336 			return 0;
337 	}
338 
339 	if (dev->next_event_forced)
340 		return 0;
341 
342 	if (dev->set_next_event(dev->min_delta_ticks, dev)) {
343 		if (!force || clockevents_program_min_delta(dev))
344 			return -ETIME;
345 	}
346 	dev->next_event_forced = 1;
347 	return 0;
348 }
349 
350 /*
351  * Called after a clockevent has been added which might
352  * have replaced a current regular or broadcast device. A
353  * released normal device might be a suitable replacement
354  * for the current broadcast device. Similarly a released
355  * broadcast device might be a suitable replacement for a
356  * normal device.
357  */
clockevents_notify_released(void)358 static void clockevents_notify_released(void)
359 {
360 	struct clock_event_device *dev;
361 
362 	/*
363 	 * Keep iterating as long as tick_check_new_device()
364 	 * replaces a device.
365 	 */
366 	while (!list_empty(&clockevents_released)) {
367 		dev = list_entry(clockevents_released.next,
368 				 struct clock_event_device, list);
369 		list_move(&dev->list, &clockevent_devices);
370 		tick_check_new_device(dev);
371 	}
372 }
373 
374 /*
375  * Try to install a replacement clock event device
376  */
clockevents_replace(struct clock_event_device * ced)377 static int clockevents_replace(struct clock_event_device *ced)
378 {
379 	struct clock_event_device *dev, *newdev = NULL;
380 
381 	list_for_each_entry(dev, &clockevent_devices, list) {
382 		if (dev == ced || !clockevent_state_detached(dev))
383 			continue;
384 
385 		if (!tick_check_replacement(newdev, dev))
386 			continue;
387 
388 		if (!try_module_get(dev->owner))
389 			continue;
390 
391 		if (newdev)
392 			module_put(newdev->owner);
393 		newdev = dev;
394 	}
395 	if (newdev) {
396 		tick_install_replacement(newdev);
397 		list_del_init(&ced->list);
398 	}
399 	return newdev ? 0 : -EBUSY;
400 }
401 
402 /*
403  * Called with clockevents_mutex and clockevents_lock held
404  */
__clockevents_try_unbind(struct clock_event_device * ced,int cpu)405 static int __clockevents_try_unbind(struct clock_event_device *ced, int cpu)
406 {
407 	/* Fast track. Device is unused */
408 	if (clockevent_state_detached(ced)) {
409 		list_del_init(&ced->list);
410 		return 0;
411 	}
412 
413 	return ced == per_cpu(tick_cpu_device, cpu).evtdev ? -EAGAIN : -EBUSY;
414 }
415 
416 /*
417  * SMP function call to unbind a device
418  */
__clockevents_unbind(void * arg)419 static void __clockevents_unbind(void *arg)
420 {
421 	struct ce_unbind *cu = arg;
422 	int res;
423 
424 	raw_spin_lock(&clockevents_lock);
425 	res = __clockevents_try_unbind(cu->ce, smp_processor_id());
426 	if (res == -EAGAIN)
427 		res = clockevents_replace(cu->ce);
428 	cu->res = res;
429 	raw_spin_unlock(&clockevents_lock);
430 }
431 
432 /*
433  * Issues smp function call to unbind a per cpu device. Called with
434  * clockevents_mutex held.
435  */
clockevents_unbind(struct clock_event_device * ced,int cpu)436 static int clockevents_unbind(struct clock_event_device *ced, int cpu)
437 {
438 	struct ce_unbind cu = { .ce = ced, .res = -ENODEV };
439 
440 	smp_call_function_single(cpu, __clockevents_unbind, &cu, 1);
441 	return cu.res;
442 }
443 
444 /*
445  * Unbind a clockevents device.
446  */
clockevents_unbind_device(struct clock_event_device * ced,int cpu)447 int clockevents_unbind_device(struct clock_event_device *ced, int cpu)
448 {
449 	int ret;
450 
451 	mutex_lock(&clockevents_mutex);
452 	ret = clockevents_unbind(ced, cpu);
453 	mutex_unlock(&clockevents_mutex);
454 	return ret;
455 }
456 EXPORT_SYMBOL_GPL(clockevents_unbind_device);
457 
458 /**
459  * clockevents_register_device - register a clock event device
460  * @dev:	device to register
461  */
clockevents_register_device(struct clock_event_device * dev)462 void clockevents_register_device(struct clock_event_device *dev)
463 {
464 	unsigned long flags;
465 
466 	/* Initialize state to DETACHED */
467 	clockevent_set_state(dev, CLOCK_EVT_STATE_DETACHED);
468 
469 	if (!dev->cpumask) {
470 		WARN_ON(num_possible_cpus() > 1);
471 		dev->cpumask = cpumask_of(smp_processor_id());
472 	}
473 
474 	if (dev->cpumask == cpu_all_mask) {
475 		WARN(1, "%s cpumask == cpu_all_mask, using cpu_possible_mask instead\n",
476 		     dev->name);
477 		dev->cpumask = cpu_possible_mask;
478 	}
479 
480 	raw_spin_lock_irqsave(&clockevents_lock, flags);
481 
482 	list_add(&dev->list, &clockevent_devices);
483 	tick_check_new_device(dev);
484 	clockevents_notify_released();
485 
486 	raw_spin_unlock_irqrestore(&clockevents_lock, flags);
487 }
488 EXPORT_SYMBOL_GPL(clockevents_register_device);
489 
clockevents_config(struct clock_event_device * dev,u32 freq)490 static void clockevents_config(struct clock_event_device *dev, u32 freq)
491 {
492 	u64 sec;
493 
494 	if (!(dev->features & CLOCK_EVT_FEAT_ONESHOT))
495 		return;
496 
497 	/*
498 	 * Calculate the maximum number of seconds we can sleep. Limit
499 	 * to 10 minutes for hardware which can program more than
500 	 * 32bit ticks so we still get reasonable conversion values.
501 	 */
502 	sec = dev->max_delta_ticks;
503 	do_div(sec, freq);
504 	if (!sec)
505 		sec = 1;
506 	else if (sec > 600 && dev->max_delta_ticks > UINT_MAX)
507 		sec = 600;
508 
509 	clockevents_calc_mult_shift(dev, freq, sec);
510 	dev->min_delta_ns = cev_delta2ns(dev->min_delta_ticks, dev, false);
511 	dev->max_delta_ns = cev_delta2ns(dev->max_delta_ticks, dev, true);
512 }
513 
514 /**
515  * clockevents_config_and_register - Configure and register a clock event device
516  * @dev:	device to register
517  * @freq:	The clock frequency
518  * @min_delta:	The minimum clock ticks to program in oneshot mode
519  * @max_delta:	The maximum clock ticks to program in oneshot mode
520  *
521  * min/max_delta can be 0 for devices which do not support oneshot mode.
522  */
clockevents_config_and_register(struct clock_event_device * dev,u32 freq,unsigned long min_delta,unsigned long max_delta)523 void clockevents_config_and_register(struct clock_event_device *dev,
524 				     u32 freq, unsigned long min_delta,
525 				     unsigned long max_delta)
526 {
527 	dev->min_delta_ticks = min_delta;
528 	dev->max_delta_ticks = max_delta;
529 	clockevents_config(dev, freq);
530 	clockevents_register_device(dev);
531 }
532 EXPORT_SYMBOL_GPL(clockevents_config_and_register);
533 
__clockevents_update_freq(struct clock_event_device * dev,u32 freq)534 int __clockevents_update_freq(struct clock_event_device *dev, u32 freq)
535 {
536 	clockevents_config(dev, freq);
537 
538 	if (clockevent_state_oneshot(dev))
539 		return clockevents_program_event(dev, dev->next_event, false);
540 
541 	if (clockevent_state_periodic(dev))
542 		return __clockevents_switch_state(dev, CLOCK_EVT_STATE_PERIODIC);
543 
544 	return 0;
545 }
546 
547 /**
548  * clockevents_update_freq - Update frequency and reprogram a clock event device.
549  * @dev:	device to modify
550  * @freq:	new device frequency
551  *
552  * Reconfigure and reprogram a clock event device in oneshot
553  * mode. Must be called on the cpu for which the device delivers per
554  * cpu timer events. If called for the broadcast device the core takes
555  * care of serialization.
556  *
557  * Returns 0 on success, -ETIME when the event is in the past.
558  */
clockevents_update_freq(struct clock_event_device * dev,u32 freq)559 int clockevents_update_freq(struct clock_event_device *dev, u32 freq)
560 {
561 	unsigned long flags;
562 	int ret;
563 
564 	local_irq_save(flags);
565 	ret = tick_broadcast_update_freq(dev, freq);
566 	if (ret == -ENODEV)
567 		ret = __clockevents_update_freq(dev, freq);
568 	local_irq_restore(flags);
569 	return ret;
570 }
571 
572 /*
573  * Noop handler when we shut down an event device
574  */
clockevents_handle_noop(struct clock_event_device * dev)575 void clockevents_handle_noop(struct clock_event_device *dev)
576 {
577 }
578 
579 /**
580  * clockevents_exchange_device - release and request clock devices
581  * @old:	device to release (can be NULL)
582  * @new:	device to request (can be NULL)
583  *
584  * Called from various tick functions with clockevents_lock held and
585  * interrupts disabled.
586  */
clockevents_exchange_device(struct clock_event_device * old,struct clock_event_device * new)587 void clockevents_exchange_device(struct clock_event_device *old,
588 				 struct clock_event_device *new)
589 {
590 	/*
591 	 * Caller releases a clock event device. We queue it into the
592 	 * released list and do a notify add later.
593 	 */
594 	if (old) {
595 		module_put(old->owner);
596 		clockevents_switch_state(old, CLOCK_EVT_STATE_DETACHED);
597 		list_move(&old->list, &clockevents_released);
598 	}
599 
600 	if (new) {
601 		BUG_ON(!clockevent_state_detached(new));
602 		clockevents_shutdown(new);
603 	}
604 }
605 
606 /**
607  * clockevents_suspend - suspend clock devices
608  */
clockevents_suspend(void)609 void clockevents_suspend(void)
610 {
611 	struct clock_event_device *dev;
612 
613 	list_for_each_entry_reverse(dev, &clockevent_devices, list)
614 		if (dev->suspend && !clockevent_state_detached(dev))
615 			dev->suspend(dev);
616 }
617 
618 /**
619  * clockevents_resume - resume clock devices
620  */
clockevents_resume(void)621 void clockevents_resume(void)
622 {
623 	struct clock_event_device *dev;
624 
625 	list_for_each_entry(dev, &clockevent_devices, list)
626 		if (dev->resume && !clockevent_state_detached(dev))
627 			dev->resume(dev);
628 }
629 
630 #ifdef CONFIG_HOTPLUG_CPU
631 
632 /**
633  * tick_offline_cpu - Shutdown all clock events related
634  *                    to this CPU and take it out of the
635  *                    broadcast mechanism.
636  * @cpu:	The outgoing CPU
637  *
638  * Called by the dying CPU during teardown.
639  */
tick_offline_cpu(unsigned int cpu)640 void tick_offline_cpu(unsigned int cpu)
641 {
642 	struct clock_event_device *dev, *tmp;
643 
644 	raw_spin_lock(&clockevents_lock);
645 
646 	tick_broadcast_offline(cpu);
647 	tick_shutdown();
648 
649 	/*
650 	 * Unregister the clock event devices which were
651 	 * released above.
652 	 */
653 	list_for_each_entry_safe(dev, tmp, &clockevents_released, list)
654 		list_del(&dev->list);
655 
656 	/*
657 	 * Now check whether the CPU has left unused per cpu devices
658 	 */
659 	list_for_each_entry_safe(dev, tmp, &clockevent_devices, list) {
660 		if (cpumask_test_cpu(cpu, dev->cpumask) &&
661 		    cpumask_weight(dev->cpumask) == 1 &&
662 		    !tick_is_broadcast_device(dev)) {
663 			BUG_ON(!clockevent_state_detached(dev));
664 			list_del(&dev->list);
665 		}
666 	}
667 
668 	raw_spin_unlock(&clockevents_lock);
669 }
670 #endif
671 
672 #ifdef CONFIG_SYSFS
673 static const struct bus_type clockevents_subsys = {
674 	.name		= "clockevents",
675 	.dev_name       = "clockevent",
676 };
677 
678 static DEFINE_PER_CPU(struct device, tick_percpu_dev);
679 static struct tick_device *tick_get_tick_dev(struct device *dev);
680 
current_device_show(struct device * dev,struct device_attribute * attr,char * buf)681 static ssize_t current_device_show(struct device *dev,
682 				   struct device_attribute *attr,
683 				   char *buf)
684 {
685 	struct tick_device *td;
686 	ssize_t count = 0;
687 
688 	raw_spin_lock_irq(&clockevents_lock);
689 	td = tick_get_tick_dev(dev);
690 	if (td && td->evtdev)
691 		count = sysfs_emit(buf, "%s\n", td->evtdev->name);
692 	raw_spin_unlock_irq(&clockevents_lock);
693 	return count;
694 }
695 static DEVICE_ATTR_RO(current_device);
696 
697 /* We don't support the abomination of removable broadcast devices */
unbind_device_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)698 static ssize_t unbind_device_store(struct device *dev,
699 				   struct device_attribute *attr,
700 				   const char *buf, size_t count)
701 {
702 	char name[CS_NAME_LEN];
703 	ssize_t ret = sysfs_get_uname(buf, name, count);
704 	struct clock_event_device *ce = NULL, *iter;
705 
706 	if (ret < 0)
707 		return ret;
708 
709 	ret = -ENODEV;
710 	mutex_lock(&clockevents_mutex);
711 	raw_spin_lock_irq(&clockevents_lock);
712 	list_for_each_entry(iter, &clockevent_devices, list) {
713 		if (!strcmp(iter->name, name)) {
714 			ret = __clockevents_try_unbind(iter, dev->id);
715 			ce = iter;
716 			break;
717 		}
718 	}
719 	raw_spin_unlock_irq(&clockevents_lock);
720 	/*
721 	 * We hold clockevents_mutex, so ce can't go away
722 	 */
723 	if (ret == -EAGAIN)
724 		ret = clockevents_unbind(ce, dev->id);
725 	mutex_unlock(&clockevents_mutex);
726 	return ret ? ret : count;
727 }
728 static DEVICE_ATTR_WO(unbind_device);
729 
730 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
731 static struct device tick_bc_dev = {
732 	.init_name	= "broadcast",
733 	.id		= 0,
734 	.bus		= &clockevents_subsys,
735 };
736 
tick_get_tick_dev(struct device * dev)737 static struct tick_device *tick_get_tick_dev(struct device *dev)
738 {
739 	return dev == &tick_bc_dev ? tick_get_broadcast_device() :
740 		&per_cpu(tick_cpu_device, dev->id);
741 }
742 
tick_broadcast_init_sysfs(void)743 static __init int tick_broadcast_init_sysfs(void)
744 {
745 	int err = device_register(&tick_bc_dev);
746 
747 	if (!err)
748 		err = device_create_file(&tick_bc_dev, &dev_attr_current_device);
749 	return err;
750 }
751 #else
tick_get_tick_dev(struct device * dev)752 static struct tick_device *tick_get_tick_dev(struct device *dev)
753 {
754 	return &per_cpu(tick_cpu_device, dev->id);
755 }
tick_broadcast_init_sysfs(void)756 static inline int tick_broadcast_init_sysfs(void) { return 0; }
757 #endif
758 
tick_init_sysfs(void)759 static int __init tick_init_sysfs(void)
760 {
761 	int cpu;
762 
763 	for_each_possible_cpu(cpu) {
764 		struct device *dev = &per_cpu(tick_percpu_dev, cpu);
765 		int err;
766 
767 		dev->id = cpu;
768 		dev->bus = &clockevents_subsys;
769 		err = device_register(dev);
770 		if (!err)
771 			err = device_create_file(dev, &dev_attr_current_device);
772 		if (!err)
773 			err = device_create_file(dev, &dev_attr_unbind_device);
774 		if (err)
775 			return err;
776 	}
777 	return tick_broadcast_init_sysfs();
778 }
779 
clockevents_init_sysfs(void)780 static int __init clockevents_init_sysfs(void)
781 {
782 	int err = subsys_system_register(&clockevents_subsys, NULL);
783 
784 	if (!err)
785 		err = tick_init_sysfs();
786 	return err;
787 }
788 device_initcall(clockevents_init_sysfs);
789 #endif /* SYSFS */
790