1 /*
2  * linux/kernel/irq/manage.c
3  *
4  * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
5  * Copyright (C) 2005-2006 Thomas Gleixner
6  *
7  * This file contains driver APIs to the irq subsystem.
8  */
9 
10 #include <linux/irq.h>
11 #include <linux/kthread.h>
12 #include <linux/module.h>
13 #include <linux/random.h>
14 #include <linux/interrupt.h>
15 #include <linux/slab.h>
16 #include <linux/sched.h>
17 
18 #include "internals.h"
19 
20 #ifdef CONFIG_IRQ_FORCED_THREADING
21 __read_mostly bool force_irqthreads;
22 
setup_forced_irqthreads(char * arg)23 static int __init setup_forced_irqthreads(char *arg)
24 {
25 	force_irqthreads = true;
26 	return 0;
27 }
28 early_param("threadirqs", setup_forced_irqthreads);
29 #endif
30 
31 /**
32  *	synchronize_irq - wait for pending IRQ handlers (on other CPUs)
33  *	@irq: interrupt number to wait for
34  *
35  *	This function waits for any pending IRQ handlers for this interrupt
36  *	to complete before returning. If you use this function while
37  *	holding a resource the IRQ handler may need you will deadlock.
38  *
39  *	This function may be called - with care - from IRQ context.
40  */
synchronize_irq(unsigned int irq)41 void synchronize_irq(unsigned int irq)
42 {
43 	struct irq_desc *desc = irq_to_desc(irq);
44 	bool inprogress;
45 
46 	if (!desc)
47 		return;
48 
49 	do {
50 		unsigned long flags;
51 
52 		/*
53 		 * Wait until we're out of the critical section.  This might
54 		 * give the wrong answer due to the lack of memory barriers.
55 		 */
56 		while (irqd_irq_inprogress(&desc->irq_data))
57 			cpu_relax();
58 
59 		/* Ok, that indicated we're done: double-check carefully. */
60 		raw_spin_lock_irqsave(&desc->lock, flags);
61 		inprogress = irqd_irq_inprogress(&desc->irq_data);
62 		raw_spin_unlock_irqrestore(&desc->lock, flags);
63 
64 		/* Oops, that failed? */
65 	} while (inprogress);
66 
67 	/*
68 	 * We made sure that no hardirq handler is running. Now verify
69 	 * that no threaded handlers are active.
70 	 */
71 	wait_event(desc->wait_for_threads, !atomic_read(&desc->threads_active));
72 }
73 EXPORT_SYMBOL(synchronize_irq);
74 
75 #ifdef CONFIG_SMP
76 cpumask_var_t irq_default_affinity;
77 
78 /**
79  *	irq_can_set_affinity - Check if the affinity of a given irq can be set
80  *	@irq:		Interrupt to check
81  *
82  */
irq_can_set_affinity(unsigned int irq)83 int irq_can_set_affinity(unsigned int irq)
84 {
85 	struct irq_desc *desc = irq_to_desc(irq);
86 
87 	if (!desc || !irqd_can_balance(&desc->irq_data) ||
88 	    !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
89 		return 0;
90 
91 	return 1;
92 }
93 
94 /**
95  *	irq_set_thread_affinity - Notify irq threads to adjust affinity
96  *	@desc:		irq descriptor which has affitnity changed
97  *
98  *	We just set IRQTF_AFFINITY and delegate the affinity setting
99  *	to the interrupt thread itself. We can not call
100  *	set_cpus_allowed_ptr() here as we hold desc->lock and this
101  *	code can be called from hard interrupt context.
102  */
irq_set_thread_affinity(struct irq_desc * desc)103 void irq_set_thread_affinity(struct irq_desc *desc)
104 {
105 	struct irqaction *action = desc->action;
106 
107 	while (action) {
108 		if (action->thread)
109 			set_bit(IRQTF_AFFINITY, &action->thread_flags);
110 		action = action->next;
111 	}
112 }
113 
114 #ifdef CONFIG_GENERIC_PENDING_IRQ
irq_can_move_pcntxt(struct irq_data * data)115 static inline bool irq_can_move_pcntxt(struct irq_data *data)
116 {
117 	return irqd_can_move_in_process_context(data);
118 }
irq_move_pending(struct irq_data * data)119 static inline bool irq_move_pending(struct irq_data *data)
120 {
121 	return irqd_is_setaffinity_pending(data);
122 }
123 static inline void
irq_copy_pending(struct irq_desc * desc,const struct cpumask * mask)124 irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask)
125 {
126 	cpumask_copy(desc->pending_mask, mask);
127 }
128 static inline void
irq_get_pending(struct cpumask * mask,struct irq_desc * desc)129 irq_get_pending(struct cpumask *mask, struct irq_desc *desc)
130 {
131 	cpumask_copy(mask, desc->pending_mask);
132 }
133 #else
irq_can_move_pcntxt(struct irq_data * data)134 static inline bool irq_can_move_pcntxt(struct irq_data *data) { return true; }
irq_move_pending(struct irq_data * data)135 static inline bool irq_move_pending(struct irq_data *data) { return false; }
136 static inline void
irq_copy_pending(struct irq_desc * desc,const struct cpumask * mask)137 irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) { }
138 static inline void
irq_get_pending(struct cpumask * mask,struct irq_desc * desc)139 irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { }
140 #endif
141 
__irq_set_affinity_locked(struct irq_data * data,const struct cpumask * mask)142 int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask)
143 {
144 	struct irq_chip *chip = irq_data_get_irq_chip(data);
145 	struct irq_desc *desc = irq_data_to_desc(data);
146 	int ret = 0;
147 
148 	if (!chip || !chip->irq_set_affinity)
149 		return -EINVAL;
150 
151 	if (irq_can_move_pcntxt(data)) {
152 		ret = chip->irq_set_affinity(data, mask, false);
153 		switch (ret) {
154 		case IRQ_SET_MASK_OK:
155 			cpumask_copy(data->affinity, mask);
156 		case IRQ_SET_MASK_OK_NOCOPY:
157 			irq_set_thread_affinity(desc);
158 			ret = 0;
159 		}
160 	} else {
161 		irqd_set_move_pending(data);
162 		irq_copy_pending(desc, mask);
163 	}
164 
165 	if (desc->affinity_notify) {
166 		kref_get(&desc->affinity_notify->kref);
167 		schedule_work(&desc->affinity_notify->work);
168 	}
169 	irqd_set(data, IRQD_AFFINITY_SET);
170 
171 	return ret;
172 }
173 
174 /**
175  *	irq_set_affinity - Set the irq affinity of a given irq
176  *	@irq:		Interrupt to set affinity
177  *	@mask:		cpumask
178  *
179  */
irq_set_affinity(unsigned int irq,const struct cpumask * mask)180 int irq_set_affinity(unsigned int irq, const struct cpumask *mask)
181 {
182 	struct irq_desc *desc = irq_to_desc(irq);
183 	unsigned long flags;
184 	int ret;
185 
186 	if (!desc)
187 		return -EINVAL;
188 
189 	raw_spin_lock_irqsave(&desc->lock, flags);
190 	ret =  __irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask);
191 	raw_spin_unlock_irqrestore(&desc->lock, flags);
192 	return ret;
193 }
194 
irq_set_affinity_hint(unsigned int irq,const struct cpumask * m)195 int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
196 {
197 	unsigned long flags;
198 	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
199 
200 	if (!desc)
201 		return -EINVAL;
202 	desc->affinity_hint = m;
203 	irq_put_desc_unlock(desc, flags);
204 	return 0;
205 }
206 EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
207 
irq_affinity_notify(struct work_struct * work)208 static void irq_affinity_notify(struct work_struct *work)
209 {
210 	struct irq_affinity_notify *notify =
211 		container_of(work, struct irq_affinity_notify, work);
212 	struct irq_desc *desc = irq_to_desc(notify->irq);
213 	cpumask_var_t cpumask;
214 	unsigned long flags;
215 
216 	if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
217 		goto out;
218 
219 	raw_spin_lock_irqsave(&desc->lock, flags);
220 	if (irq_move_pending(&desc->irq_data))
221 		irq_get_pending(cpumask, desc);
222 	else
223 		cpumask_copy(cpumask, desc->irq_data.affinity);
224 	raw_spin_unlock_irqrestore(&desc->lock, flags);
225 
226 	notify->notify(notify, cpumask);
227 
228 	free_cpumask_var(cpumask);
229 out:
230 	kref_put(&notify->kref, notify->release);
231 }
232 
233 /**
234  *	irq_set_affinity_notifier - control notification of IRQ affinity changes
235  *	@irq:		Interrupt for which to enable/disable notification
236  *	@notify:	Context for notification, or %NULL to disable
237  *			notification.  Function pointers must be initialised;
238  *			the other fields will be initialised by this function.
239  *
240  *	Must be called in process context.  Notification may only be enabled
241  *	after the IRQ is allocated and must be disabled before the IRQ is
242  *	freed using free_irq().
243  */
244 int
irq_set_affinity_notifier(unsigned int irq,struct irq_affinity_notify * notify)245 irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
246 {
247 	struct irq_desc *desc = irq_to_desc(irq);
248 	struct irq_affinity_notify *old_notify;
249 	unsigned long flags;
250 
251 	/* The release function is promised process context */
252 	might_sleep();
253 
254 	if (!desc)
255 		return -EINVAL;
256 
257 	/* Complete initialisation of *notify */
258 	if (notify) {
259 		notify->irq = irq;
260 		kref_init(&notify->kref);
261 		INIT_WORK(&notify->work, irq_affinity_notify);
262 	}
263 
264 	raw_spin_lock_irqsave(&desc->lock, flags);
265 	old_notify = desc->affinity_notify;
266 	desc->affinity_notify = notify;
267 	raw_spin_unlock_irqrestore(&desc->lock, flags);
268 
269 	if (old_notify)
270 		kref_put(&old_notify->kref, old_notify->release);
271 
272 	return 0;
273 }
274 EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
275 
276 #ifndef CONFIG_AUTO_IRQ_AFFINITY
277 /*
278  * Generic version of the affinity autoselector.
279  */
280 static int
setup_affinity(unsigned int irq,struct irq_desc * desc,struct cpumask * mask)281 setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
282 {
283 	struct irq_chip *chip = irq_desc_get_chip(desc);
284 	struct cpumask *set = irq_default_affinity;
285 	int ret;
286 
287 	/* Excludes PER_CPU and NO_BALANCE interrupts */
288 	if (!irq_can_set_affinity(irq))
289 		return 0;
290 
291 	/*
292 	 * Preserve an userspace affinity setup, but make sure that
293 	 * one of the targets is online.
294 	 */
295 	if (irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
296 		if (cpumask_intersects(desc->irq_data.affinity,
297 				       cpu_online_mask))
298 			set = desc->irq_data.affinity;
299 		else
300 			irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
301 	}
302 
303 	cpumask_and(mask, cpu_online_mask, set);
304 	ret = chip->irq_set_affinity(&desc->irq_data, mask, false);
305 	switch (ret) {
306 	case IRQ_SET_MASK_OK:
307 		cpumask_copy(desc->irq_data.affinity, mask);
308 	case IRQ_SET_MASK_OK_NOCOPY:
309 		irq_set_thread_affinity(desc);
310 	}
311 	return 0;
312 }
313 #else
314 static inline int
setup_affinity(unsigned int irq,struct irq_desc * d,struct cpumask * mask)315 setup_affinity(unsigned int irq, struct irq_desc *d, struct cpumask *mask)
316 {
317 	return irq_select_affinity(irq);
318 }
319 #endif
320 
321 /*
322  * Called when affinity is set via /proc/irq
323  */
irq_select_affinity_usr(unsigned int irq,struct cpumask * mask)324 int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask)
325 {
326 	struct irq_desc *desc = irq_to_desc(irq);
327 	unsigned long flags;
328 	int ret;
329 
330 	raw_spin_lock_irqsave(&desc->lock, flags);
331 	ret = setup_affinity(irq, desc, mask);
332 	raw_spin_unlock_irqrestore(&desc->lock, flags);
333 	return ret;
334 }
335 
336 #else
337 static inline int
setup_affinity(unsigned int irq,struct irq_desc * desc,struct cpumask * mask)338 setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
339 {
340 	return 0;
341 }
342 #endif
343 
__disable_irq(struct irq_desc * desc,unsigned int irq,bool suspend)344 void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend)
345 {
346 	if (suspend) {
347 		if (!desc->action || (desc->action->flags & IRQF_NO_SUSPEND))
348 			return;
349 		desc->istate |= IRQS_SUSPENDED;
350 	}
351 
352 	if (!desc->depth++)
353 		irq_disable(desc);
354 }
355 
__disable_irq_nosync(unsigned int irq)356 static int __disable_irq_nosync(unsigned int irq)
357 {
358 	unsigned long flags;
359 	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
360 
361 	if (!desc)
362 		return -EINVAL;
363 	__disable_irq(desc, irq, false);
364 	irq_put_desc_busunlock(desc, flags);
365 	return 0;
366 }
367 
368 /**
369  *	disable_irq_nosync - disable an irq without waiting
370  *	@irq: Interrupt to disable
371  *
372  *	Disable the selected interrupt line.  Disables and Enables are
373  *	nested.
374  *	Unlike disable_irq(), this function does not ensure existing
375  *	instances of the IRQ handler have completed before returning.
376  *
377  *	This function may be called from IRQ context.
378  */
disable_irq_nosync(unsigned int irq)379 void disable_irq_nosync(unsigned int irq)
380 {
381 	__disable_irq_nosync(irq);
382 }
383 EXPORT_SYMBOL(disable_irq_nosync);
384 
385 /**
386  *	disable_irq - disable an irq and wait for completion
387  *	@irq: Interrupt to disable
388  *
389  *	Disable the selected interrupt line.  Enables and Disables are
390  *	nested.
391  *	This function waits for any pending IRQ handlers for this interrupt
392  *	to complete before returning. If you use this function while
393  *	holding a resource the IRQ handler may need you will deadlock.
394  *
395  *	This function may be called - with care - from IRQ context.
396  */
disable_irq(unsigned int irq)397 void disable_irq(unsigned int irq)
398 {
399 	if (!__disable_irq_nosync(irq))
400 		synchronize_irq(irq);
401 }
402 EXPORT_SYMBOL(disable_irq);
403 
__enable_irq(struct irq_desc * desc,unsigned int irq,bool resume)404 void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume)
405 {
406 	if (resume) {
407 		if (!(desc->istate & IRQS_SUSPENDED)) {
408 			if (!desc->action)
409 				return;
410 			if (!(desc->action->flags & IRQF_FORCE_RESUME))
411 				return;
412 			/* Pretend that it got disabled ! */
413 			desc->depth++;
414 		}
415 		desc->istate &= ~IRQS_SUSPENDED;
416 	}
417 
418 	switch (desc->depth) {
419 	case 0:
420  err_out:
421 		WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq);
422 		break;
423 	case 1: {
424 		if (desc->istate & IRQS_SUSPENDED)
425 			goto err_out;
426 		/* Prevent probing on this irq: */
427 		irq_settings_set_noprobe(desc);
428 		irq_enable(desc);
429 		check_irq_resend(desc, irq);
430 		/* fall-through */
431 	}
432 	default:
433 		desc->depth--;
434 	}
435 }
436 
437 /**
438  *	enable_irq - enable handling of an irq
439  *	@irq: Interrupt to enable
440  *
441  *	Undoes the effect of one call to disable_irq().  If this
442  *	matches the last disable, processing of interrupts on this
443  *	IRQ line is re-enabled.
444  *
445  *	This function may be called from IRQ context only when
446  *	desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL !
447  */
enable_irq(unsigned int irq)448 void enable_irq(unsigned int irq)
449 {
450 	unsigned long flags;
451 	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
452 
453 	if (!desc)
454 		return;
455 	if (WARN(!desc->irq_data.chip,
456 		 KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
457 		goto out;
458 
459 	__enable_irq(desc, irq, false);
460 out:
461 	irq_put_desc_busunlock(desc, flags);
462 }
463 EXPORT_SYMBOL(enable_irq);
464 
set_irq_wake_real(unsigned int irq,unsigned int on)465 static int set_irq_wake_real(unsigned int irq, unsigned int on)
466 {
467 	struct irq_desc *desc = irq_to_desc(irq);
468 	int ret = -ENXIO;
469 
470 	if (irq_desc_get_chip(desc)->flags &  IRQCHIP_SKIP_SET_WAKE)
471 		return 0;
472 
473 	if (desc->irq_data.chip->irq_set_wake)
474 		ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
475 
476 	return ret;
477 }
478 
479 /**
480  *	irq_set_irq_wake - control irq power management wakeup
481  *	@irq:	interrupt to control
482  *	@on:	enable/disable power management wakeup
483  *
484  *	Enable/disable power management wakeup mode, which is
485  *	disabled by default.  Enables and disables must match,
486  *	just as they match for non-wakeup mode support.
487  *
488  *	Wakeup mode lets this IRQ wake the system from sleep
489  *	states like "suspend to RAM".
490  */
irq_set_irq_wake(unsigned int irq,unsigned int on)491 int irq_set_irq_wake(unsigned int irq, unsigned int on)
492 {
493 	unsigned long flags;
494 	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
495 	int ret = 0;
496 
497 	if (!desc)
498 		return -EINVAL;
499 
500 	/* wakeup-capable irqs can be shared between drivers that
501 	 * don't need to have the same sleep mode behaviors.
502 	 */
503 	if (on) {
504 		if (desc->wake_depth++ == 0) {
505 			ret = set_irq_wake_real(irq, on);
506 			if (ret)
507 				desc->wake_depth = 0;
508 			else
509 				irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
510 		}
511 	} else {
512 		if (desc->wake_depth == 0) {
513 			WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
514 		} else if (--desc->wake_depth == 0) {
515 			ret = set_irq_wake_real(irq, on);
516 			if (ret)
517 				desc->wake_depth = 1;
518 			else
519 				irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
520 		}
521 	}
522 	irq_put_desc_busunlock(desc, flags);
523 	return ret;
524 }
525 EXPORT_SYMBOL(irq_set_irq_wake);
526 
527 /*
528  * Internal function that tells the architecture code whether a
529  * particular irq has been exclusively allocated or is available
530  * for driver use.
531  */
can_request_irq(unsigned int irq,unsigned long irqflags)532 int can_request_irq(unsigned int irq, unsigned long irqflags)
533 {
534 	unsigned long flags;
535 	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
536 	int canrequest = 0;
537 
538 	if (!desc)
539 		return 0;
540 
541 	if (irq_settings_can_request(desc)) {
542 		if (desc->action)
543 			if (irqflags & desc->action->flags & IRQF_SHARED)
544 				canrequest =1;
545 	}
546 	irq_put_desc_unlock(desc, flags);
547 	return canrequest;
548 }
549 
__irq_set_trigger(struct irq_desc * desc,unsigned int irq,unsigned long flags)550 int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
551 		      unsigned long flags)
552 {
553 	struct irq_chip *chip = desc->irq_data.chip;
554 	int ret, unmask = 0;
555 
556 	if (!chip || !chip->irq_set_type) {
557 		/*
558 		 * IRQF_TRIGGER_* but the PIC does not support multiple
559 		 * flow-types?
560 		 */
561 		pr_debug("No set_type function for IRQ %d (%s)\n", irq,
562 				chip ? (chip->name ? : "unknown") : "unknown");
563 		return 0;
564 	}
565 
566 	flags &= IRQ_TYPE_SENSE_MASK;
567 
568 	if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
569 		if (!irqd_irq_masked(&desc->irq_data))
570 			mask_irq(desc);
571 		if (!irqd_irq_disabled(&desc->irq_data))
572 			unmask = 1;
573 	}
574 
575 	/* caller masked out all except trigger mode flags */
576 	ret = chip->irq_set_type(&desc->irq_data, flags);
577 
578 	switch (ret) {
579 	case IRQ_SET_MASK_OK:
580 		irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
581 		irqd_set(&desc->irq_data, flags);
582 
583 	case IRQ_SET_MASK_OK_NOCOPY:
584 		flags = irqd_get_trigger_type(&desc->irq_data);
585 		irq_settings_set_trigger_mask(desc, flags);
586 		irqd_clear(&desc->irq_data, IRQD_LEVEL);
587 		irq_settings_clr_level(desc);
588 		if (flags & IRQ_TYPE_LEVEL_MASK) {
589 			irq_settings_set_level(desc);
590 			irqd_set(&desc->irq_data, IRQD_LEVEL);
591 		}
592 
593 		ret = 0;
594 		break;
595 	default:
596 		pr_err("setting trigger mode %lu for irq %u failed (%pF)\n",
597 		       flags, irq, chip->irq_set_type);
598 	}
599 	if (unmask)
600 		unmask_irq(desc);
601 	return ret;
602 }
603 
604 /*
605  * Default primary interrupt handler for threaded interrupts. Is
606  * assigned as primary handler when request_threaded_irq is called
607  * with handler == NULL. Useful for oneshot interrupts.
608  */
irq_default_primary_handler(int irq,void * dev_id)609 static irqreturn_t irq_default_primary_handler(int irq, void *dev_id)
610 {
611 	return IRQ_WAKE_THREAD;
612 }
613 
614 /*
615  * Primary handler for nested threaded interrupts. Should never be
616  * called.
617  */
irq_nested_primary_handler(int irq,void * dev_id)618 static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
619 {
620 	WARN(1, "Primary handler called for nested irq %d\n", irq);
621 	return IRQ_NONE;
622 }
623 
irq_wait_for_interrupt(struct irqaction * action)624 static int irq_wait_for_interrupt(struct irqaction *action)
625 {
626 	set_current_state(TASK_INTERRUPTIBLE);
627 
628 	while (!kthread_should_stop()) {
629 
630 		if (test_and_clear_bit(IRQTF_RUNTHREAD,
631 				       &action->thread_flags)) {
632 			__set_current_state(TASK_RUNNING);
633 			return 0;
634 		}
635 		schedule();
636 		set_current_state(TASK_INTERRUPTIBLE);
637 	}
638 	__set_current_state(TASK_RUNNING);
639 	return -1;
640 }
641 
642 /*
643  * Oneshot interrupts keep the irq line masked until the threaded
644  * handler finished. unmask if the interrupt has not been disabled and
645  * is marked MASKED.
646  */
irq_finalize_oneshot(struct irq_desc * desc,struct irqaction * action,bool force)647 static void irq_finalize_oneshot(struct irq_desc *desc,
648 				 struct irqaction *action, bool force)
649 {
650 	if (!(desc->istate & IRQS_ONESHOT))
651 		return;
652 again:
653 	chip_bus_lock(desc);
654 	raw_spin_lock_irq(&desc->lock);
655 
656 	/*
657 	 * Implausible though it may be we need to protect us against
658 	 * the following scenario:
659 	 *
660 	 * The thread is faster done than the hard interrupt handler
661 	 * on the other CPU. If we unmask the irq line then the
662 	 * interrupt can come in again and masks the line, leaves due
663 	 * to IRQS_INPROGRESS and the irq line is masked forever.
664 	 *
665 	 * This also serializes the state of shared oneshot handlers
666 	 * versus "desc->threads_onehsot |= action->thread_mask;" in
667 	 * irq_wake_thread(). See the comment there which explains the
668 	 * serialization.
669 	 */
670 	if (unlikely(irqd_irq_inprogress(&desc->irq_data))) {
671 		raw_spin_unlock_irq(&desc->lock);
672 		chip_bus_sync_unlock(desc);
673 		cpu_relax();
674 		goto again;
675 	}
676 
677 	/*
678 	 * Now check again, whether the thread should run. Otherwise
679 	 * we would clear the threads_oneshot bit of this thread which
680 	 * was just set.
681 	 */
682 	if (!force && test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
683 		goto out_unlock;
684 
685 	desc->threads_oneshot &= ~action->thread_mask;
686 
687 	if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) &&
688 	    irqd_irq_masked(&desc->irq_data))
689 		unmask_irq(desc);
690 
691 out_unlock:
692 	raw_spin_unlock_irq(&desc->lock);
693 	chip_bus_sync_unlock(desc);
694 }
695 
696 #ifdef CONFIG_SMP
697 /*
698  * Check whether we need to chasnge the affinity of the interrupt thread.
699  */
700 static void
irq_thread_check_affinity(struct irq_desc * desc,struct irqaction * action)701 irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
702 {
703 	cpumask_var_t mask;
704 
705 	if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
706 		return;
707 
708 	/*
709 	 * In case we are out of memory we set IRQTF_AFFINITY again and
710 	 * try again next time
711 	 */
712 	if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
713 		set_bit(IRQTF_AFFINITY, &action->thread_flags);
714 		return;
715 	}
716 
717 	raw_spin_lock_irq(&desc->lock);
718 	cpumask_copy(mask, desc->irq_data.affinity);
719 	raw_spin_unlock_irq(&desc->lock);
720 
721 	set_cpus_allowed_ptr(current, mask);
722 	free_cpumask_var(mask);
723 }
724 #else
725 static inline void
irq_thread_check_affinity(struct irq_desc * desc,struct irqaction * action)726 irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
727 #endif
728 
729 /*
730  * Interrupts which are not explicitely requested as threaded
731  * interrupts rely on the implicit bh/preempt disable of the hard irq
732  * context. So we need to disable bh here to avoid deadlocks and other
733  * side effects.
734  */
735 static irqreturn_t
irq_forced_thread_fn(struct irq_desc * desc,struct irqaction * action)736 irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
737 {
738 	irqreturn_t ret;
739 
740 	local_bh_disable();
741 	ret = action->thread_fn(action->irq, action->dev_id);
742 	irq_finalize_oneshot(desc, action, false);
743 	local_bh_enable();
744 	return ret;
745 }
746 
747 /*
748  * Interrupts explicitely requested as threaded interupts want to be
749  * preemtible - many of them need to sleep and wait for slow busses to
750  * complete.
751  */
irq_thread_fn(struct irq_desc * desc,struct irqaction * action)752 static irqreturn_t irq_thread_fn(struct irq_desc *desc,
753 		struct irqaction *action)
754 {
755 	irqreturn_t ret;
756 
757 	ret = action->thread_fn(action->irq, action->dev_id);
758 	irq_finalize_oneshot(desc, action, false);
759 	return ret;
760 }
761 
762 /*
763  * Interrupt handler thread
764  */
irq_thread(void * data)765 static int irq_thread(void *data)
766 {
767 	static const struct sched_param param = {
768 		.sched_priority = MAX_USER_RT_PRIO/2,
769 	};
770 	struct irqaction *action = data;
771 	struct irq_desc *desc = irq_to_desc(action->irq);
772 	irqreturn_t (*handler_fn)(struct irq_desc *desc,
773 			struct irqaction *action);
774 	int wake;
775 
776 	if (force_irqthreads & test_bit(IRQTF_FORCED_THREAD,
777 					&action->thread_flags))
778 		handler_fn = irq_forced_thread_fn;
779 	else
780 		handler_fn = irq_thread_fn;
781 
782 	sched_setscheduler(current, SCHED_FIFO, &param);
783 	current->irqaction = action;
784 
785 	while (!irq_wait_for_interrupt(action)) {
786 
787 		irq_thread_check_affinity(desc, action);
788 
789 		atomic_inc(&desc->threads_active);
790 
791 		raw_spin_lock_irq(&desc->lock);
792 		if (unlikely(irqd_irq_disabled(&desc->irq_data))) {
793 			/*
794 			 * CHECKME: We might need a dedicated
795 			 * IRQ_THREAD_PENDING flag here, which
796 			 * retriggers the thread in check_irq_resend()
797 			 * but AFAICT IRQS_PENDING should be fine as it
798 			 * retriggers the interrupt itself --- tglx
799 			 */
800 			desc->istate |= IRQS_PENDING;
801 			raw_spin_unlock_irq(&desc->lock);
802 		} else {
803 			irqreturn_t action_ret;
804 
805 			raw_spin_unlock_irq(&desc->lock);
806 			action_ret = handler_fn(desc, action);
807 			if (!noirqdebug)
808 				note_interrupt(action->irq, desc, action_ret);
809 		}
810 
811 		wake = atomic_dec_and_test(&desc->threads_active);
812 
813 		if (wake && waitqueue_active(&desc->wait_for_threads))
814 			wake_up(&desc->wait_for_threads);
815 	}
816 
817 	/* Prevent a stale desc->threads_oneshot */
818 	irq_finalize_oneshot(desc, action, true);
819 
820 	/*
821 	 * Clear irqaction. Otherwise exit_irq_thread() would make
822 	 * fuzz about an active irq thread going into nirvana.
823 	 */
824 	current->irqaction = NULL;
825 	return 0;
826 }
827 
828 /*
829  * Called from do_exit()
830  */
exit_irq_thread(void)831 void exit_irq_thread(void)
832 {
833 	struct task_struct *tsk = current;
834 	struct irq_desc *desc;
835 
836 	if (!tsk->irqaction)
837 		return;
838 
839 	printk(KERN_ERR
840 	       "exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
841 	       tsk->comm ? tsk->comm : "", tsk->pid, tsk->irqaction->irq);
842 
843 	desc = irq_to_desc(tsk->irqaction->irq);
844 
845 	/*
846 	 * Prevent a stale desc->threads_oneshot. Must be called
847 	 * before setting the IRQTF_DIED flag.
848 	 */
849 	irq_finalize_oneshot(desc, tsk->irqaction, true);
850 
851 	/*
852 	 * Set the THREAD DIED flag to prevent further wakeups of the
853 	 * soon to be gone threaded handler.
854 	 */
855 	set_bit(IRQTF_DIED, &tsk->irqaction->flags);
856 }
857 
irq_setup_forced_threading(struct irqaction * new)858 static void irq_setup_forced_threading(struct irqaction *new)
859 {
860 	if (!force_irqthreads)
861 		return;
862 	if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
863 		return;
864 
865 	new->flags |= IRQF_ONESHOT;
866 
867 	if (!new->thread_fn) {
868 		set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
869 		new->thread_fn = new->handler;
870 		new->handler = irq_default_primary_handler;
871 	}
872 }
873 
874 /*
875  * Internal function to register an irqaction - typically used to
876  * allocate special interrupts that are part of the architecture.
877  */
878 static int
__setup_irq(unsigned int irq,struct irq_desc * desc,struct irqaction * new)879 __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
880 {
881 	struct irqaction *old, **old_ptr;
882 	const char *old_name = NULL;
883 	unsigned long flags, thread_mask = 0;
884 	int ret, nested, shared = 0;
885 	cpumask_var_t mask;
886 
887 	if (!desc)
888 		return -EINVAL;
889 
890 	if (desc->irq_data.chip == &no_irq_chip)
891 		return -ENOSYS;
892 	if (!try_module_get(desc->owner))
893 		return -ENODEV;
894 	/*
895 	 * Some drivers like serial.c use request_irq() heavily,
896 	 * so we have to be careful not to interfere with a
897 	 * running system.
898 	 */
899 	if (new->flags & IRQF_SAMPLE_RANDOM) {
900 		/*
901 		 * This function might sleep, we want to call it first,
902 		 * outside of the atomic block.
903 		 * Yes, this might clear the entropy pool if the wrong
904 		 * driver is attempted to be loaded, without actually
905 		 * installing a new handler, but is this really a problem,
906 		 * only the sysadmin is able to do this.
907 		 */
908 		rand_initialize_irq(irq);
909 	}
910 
911 	/*
912 	 * Check whether the interrupt nests into another interrupt
913 	 * thread.
914 	 */
915 	nested = irq_settings_is_nested_thread(desc);
916 	if (nested) {
917 		if (!new->thread_fn) {
918 			ret = -EINVAL;
919 			goto out_mput;
920 		}
921 		/*
922 		 * Replace the primary handler which was provided from
923 		 * the driver for non nested interrupt handling by the
924 		 * dummy function which warns when called.
925 		 */
926 		new->handler = irq_nested_primary_handler;
927 	} else {
928 		if (irq_settings_can_thread(desc))
929 			irq_setup_forced_threading(new);
930 	}
931 
932 	/*
933 	 * Create a handler thread when a thread function is supplied
934 	 * and the interrupt does not nest into another interrupt
935 	 * thread.
936 	 */
937 	if (new->thread_fn && !nested) {
938 		struct task_struct *t;
939 
940 		t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
941 				   new->name);
942 		if (IS_ERR(t)) {
943 			ret = PTR_ERR(t);
944 			goto out_mput;
945 		}
946 		/*
947 		 * We keep the reference to the task struct even if
948 		 * the thread dies to avoid that the interrupt code
949 		 * references an already freed task_struct.
950 		 */
951 		get_task_struct(t);
952 		new->thread = t;
953 	}
954 
955 	if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
956 		ret = -ENOMEM;
957 		goto out_thread;
958 	}
959 
960 	/*
961 	 * The following block of code has to be executed atomically
962 	 */
963 	raw_spin_lock_irqsave(&desc->lock, flags);
964 	old_ptr = &desc->action;
965 	old = *old_ptr;
966 	if (old) {
967 		/*
968 		 * Can't share interrupts unless both agree to and are
969 		 * the same type (level, edge, polarity). So both flag
970 		 * fields must have IRQF_SHARED set and the bits which
971 		 * set the trigger type must match. Also all must
972 		 * agree on ONESHOT.
973 		 */
974 		if (!((old->flags & new->flags) & IRQF_SHARED) ||
975 		    ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK) ||
976 		    ((old->flags ^ new->flags) & IRQF_ONESHOT)) {
977 			old_name = old->name;
978 			goto mismatch;
979 		}
980 
981 		/* All handlers must agree on per-cpuness */
982 		if ((old->flags & IRQF_PERCPU) !=
983 		    (new->flags & IRQF_PERCPU))
984 			goto mismatch;
985 
986 		/* add new interrupt at end of irq queue */
987 		do {
988 			/*
989 			 * Or all existing action->thread_mask bits,
990 			 * so we can find the next zero bit for this
991 			 * new action.
992 			 */
993 			thread_mask |= old->thread_mask;
994 			old_ptr = &old->next;
995 			old = *old_ptr;
996 		} while (old);
997 		shared = 1;
998 	}
999 
1000 	/*
1001 	 * Setup the thread mask for this irqaction for ONESHOT. For
1002 	 * !ONESHOT irqs the thread mask is 0 so we can avoid a
1003 	 * conditional in irq_wake_thread().
1004 	 */
1005 	if (new->flags & IRQF_ONESHOT) {
1006 		/*
1007 		 * Unlikely to have 32 resp 64 irqs sharing one line,
1008 		 * but who knows.
1009 		 */
1010 		if (thread_mask == ~0UL) {
1011 			ret = -EBUSY;
1012 			goto out_mask;
1013 		}
1014 		/*
1015 		 * The thread_mask for the action is or'ed to
1016 		 * desc->thread_active to indicate that the
1017 		 * IRQF_ONESHOT thread handler has been woken, but not
1018 		 * yet finished. The bit is cleared when a thread
1019 		 * completes. When all threads of a shared interrupt
1020 		 * line have completed desc->threads_active becomes
1021 		 * zero and the interrupt line is unmasked. See
1022 		 * handle.c:irq_wake_thread() for further information.
1023 		 *
1024 		 * If no thread is woken by primary (hard irq context)
1025 		 * interrupt handlers, then desc->threads_active is
1026 		 * also checked for zero to unmask the irq line in the
1027 		 * affected hard irq flow handlers
1028 		 * (handle_[fasteoi|level]_irq).
1029 		 *
1030 		 * The new action gets the first zero bit of
1031 		 * thread_mask assigned. See the loop above which or's
1032 		 * all existing action->thread_mask bits.
1033 		 */
1034 		new->thread_mask = 1 << ffz(thread_mask);
1035 	}
1036 
1037 	if (!shared) {
1038 		init_waitqueue_head(&desc->wait_for_threads);
1039 
1040 		/* Setup the type (level, edge polarity) if configured: */
1041 		if (new->flags & IRQF_TRIGGER_MASK) {
1042 			ret = __irq_set_trigger(desc, irq,
1043 					new->flags & IRQF_TRIGGER_MASK);
1044 
1045 			if (ret)
1046 				goto out_mask;
1047 		}
1048 
1049 		desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
1050 				  IRQS_ONESHOT | IRQS_WAITING);
1051 		irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
1052 
1053 		if (new->flags & IRQF_PERCPU) {
1054 			irqd_set(&desc->irq_data, IRQD_PER_CPU);
1055 			irq_settings_set_per_cpu(desc);
1056 		}
1057 
1058 		if (new->flags & IRQF_ONESHOT)
1059 			desc->istate |= IRQS_ONESHOT;
1060 
1061 		if (irq_settings_can_autoenable(desc))
1062 			irq_startup(desc, true);
1063 		else
1064 			/* Undo nested disables: */
1065 			desc->depth = 1;
1066 
1067 		/* Exclude IRQ from balancing if requested */
1068 		if (new->flags & IRQF_NOBALANCING) {
1069 			irq_settings_set_no_balancing(desc);
1070 			irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
1071 		}
1072 
1073 		/* Set default affinity mask once everything is setup */
1074 		setup_affinity(irq, desc, mask);
1075 
1076 	} else if (new->flags & IRQF_TRIGGER_MASK) {
1077 		unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
1078 		unsigned int omsk = irq_settings_get_trigger_mask(desc);
1079 
1080 		if (nmsk != omsk)
1081 			/* hope the handler works with current  trigger mode */
1082 			pr_warning("IRQ %d uses trigger mode %u; requested %u\n",
1083 				   irq, nmsk, omsk);
1084 	}
1085 
1086 	new->irq = irq;
1087 	*old_ptr = new;
1088 
1089 	/* Reset broken irq detection when installing new handler */
1090 	desc->irq_count = 0;
1091 	desc->irqs_unhandled = 0;
1092 
1093 	/*
1094 	 * Check whether we disabled the irq via the spurious handler
1095 	 * before. Reenable it and give it another chance.
1096 	 */
1097 	if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
1098 		desc->istate &= ~IRQS_SPURIOUS_DISABLED;
1099 		__enable_irq(desc, irq, false);
1100 	}
1101 
1102 	raw_spin_unlock_irqrestore(&desc->lock, flags);
1103 
1104 	/*
1105 	 * Strictly no need to wake it up, but hung_task complains
1106 	 * when no hard interrupt wakes the thread up.
1107 	 */
1108 	if (new->thread)
1109 		wake_up_process(new->thread);
1110 
1111 	register_irq_proc(irq, desc);
1112 	new->dir = NULL;
1113 	register_handler_proc(irq, new);
1114 	free_cpumask_var(mask);
1115 
1116 	return 0;
1117 
1118 mismatch:
1119 #ifdef CONFIG_DEBUG_SHIRQ
1120 	if (!(new->flags & IRQF_PROBE_SHARED)) {
1121 		printk(KERN_ERR "IRQ handler type mismatch for IRQ %d\n", irq);
1122 		if (old_name)
1123 			printk(KERN_ERR "current handler: %s\n", old_name);
1124 		dump_stack();
1125 	}
1126 #endif
1127 	ret = -EBUSY;
1128 
1129 out_mask:
1130 	raw_spin_unlock_irqrestore(&desc->lock, flags);
1131 	free_cpumask_var(mask);
1132 
1133 out_thread:
1134 	if (new->thread) {
1135 		struct task_struct *t = new->thread;
1136 
1137 		new->thread = NULL;
1138 		if (likely(!test_bit(IRQTF_DIED, &new->thread_flags)))
1139 			kthread_stop(t);
1140 		put_task_struct(t);
1141 	}
1142 out_mput:
1143 	module_put(desc->owner);
1144 	return ret;
1145 }
1146 
1147 /**
1148  *	setup_irq - setup an interrupt
1149  *	@irq: Interrupt line to setup
1150  *	@act: irqaction for the interrupt
1151  *
1152  * Used to statically setup interrupts in the early boot process.
1153  */
setup_irq(unsigned int irq,struct irqaction * act)1154 int setup_irq(unsigned int irq, struct irqaction *act)
1155 {
1156 	int retval;
1157 	struct irq_desc *desc = irq_to_desc(irq);
1158 
1159 	if (WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1160 		return -EINVAL;
1161 	chip_bus_lock(desc);
1162 	retval = __setup_irq(irq, desc, act);
1163 	chip_bus_sync_unlock(desc);
1164 
1165 	return retval;
1166 }
1167 EXPORT_SYMBOL_GPL(setup_irq);
1168 
1169 /*
1170  * Internal function to unregister an irqaction - used to free
1171  * regular and special interrupts that are part of the architecture.
1172  */
__free_irq(unsigned int irq,void * dev_id)1173 static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
1174 {
1175 	struct irq_desc *desc = irq_to_desc(irq);
1176 	struct irqaction *action, **action_ptr;
1177 	unsigned long flags;
1178 
1179 	WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1180 
1181 	if (!desc)
1182 		return NULL;
1183 
1184 	raw_spin_lock_irqsave(&desc->lock, flags);
1185 
1186 	/*
1187 	 * There can be multiple actions per IRQ descriptor, find the right
1188 	 * one based on the dev_id:
1189 	 */
1190 	action_ptr = &desc->action;
1191 	for (;;) {
1192 		action = *action_ptr;
1193 
1194 		if (!action) {
1195 			WARN(1, "Trying to free already-free IRQ %d\n", irq);
1196 			raw_spin_unlock_irqrestore(&desc->lock, flags);
1197 
1198 			return NULL;
1199 		}
1200 
1201 		if (action->dev_id == dev_id)
1202 			break;
1203 		action_ptr = &action->next;
1204 	}
1205 
1206 	/* Found it - now remove it from the list of entries: */
1207 	*action_ptr = action->next;
1208 
1209 	/* Currently used only by UML, might disappear one day: */
1210 #ifdef CONFIG_IRQ_RELEASE_METHOD
1211 	if (desc->irq_data.chip->release)
1212 		desc->irq_data.chip->release(irq, dev_id);
1213 #endif
1214 
1215 	/* If this was the last handler, shut down the IRQ line: */
1216 	if (!desc->action)
1217 		irq_shutdown(desc);
1218 
1219 #ifdef CONFIG_SMP
1220 	/* make sure affinity_hint is cleaned up */
1221 	if (WARN_ON_ONCE(desc->affinity_hint))
1222 		desc->affinity_hint = NULL;
1223 #endif
1224 
1225 	raw_spin_unlock_irqrestore(&desc->lock, flags);
1226 
1227 	unregister_handler_proc(irq, action);
1228 
1229 	/* Make sure it's not being used on another CPU: */
1230 	synchronize_irq(irq);
1231 
1232 #ifdef CONFIG_DEBUG_SHIRQ
1233 	/*
1234 	 * It's a shared IRQ -- the driver ought to be prepared for an IRQ
1235 	 * event to happen even now it's being freed, so let's make sure that
1236 	 * is so by doing an extra call to the handler ....
1237 	 *
1238 	 * ( We do this after actually deregistering it, to make sure that a
1239 	 *   'real' IRQ doesn't run in * parallel with our fake. )
1240 	 */
1241 	if (action->flags & IRQF_SHARED) {
1242 		local_irq_save(flags);
1243 		action->handler(irq, dev_id);
1244 		local_irq_restore(flags);
1245 	}
1246 #endif
1247 
1248 	if (action->thread) {
1249 		if (!test_bit(IRQTF_DIED, &action->thread_flags))
1250 			kthread_stop(action->thread);
1251 		put_task_struct(action->thread);
1252 	}
1253 
1254 	module_put(desc->owner);
1255 	return action;
1256 }
1257 
1258 /**
1259  *	remove_irq - free an interrupt
1260  *	@irq: Interrupt line to free
1261  *	@act: irqaction for the interrupt
1262  *
1263  * Used to remove interrupts statically setup by the early boot process.
1264  */
remove_irq(unsigned int irq,struct irqaction * act)1265 void remove_irq(unsigned int irq, struct irqaction *act)
1266 {
1267 	struct irq_desc *desc = irq_to_desc(irq);
1268 
1269 	if (desc && !WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1270 	    __free_irq(irq, act->dev_id);
1271 }
1272 EXPORT_SYMBOL_GPL(remove_irq);
1273 
1274 /**
1275  *	free_irq - free an interrupt allocated with request_irq
1276  *	@irq: Interrupt line to free
1277  *	@dev_id: Device identity to free
1278  *
1279  *	Remove an interrupt handler. The handler is removed and if the
1280  *	interrupt line is no longer in use by any driver it is disabled.
1281  *	On a shared IRQ the caller must ensure the interrupt is disabled
1282  *	on the card it drives before calling this function. The function
1283  *	does not return until any executing interrupts for this IRQ
1284  *	have completed.
1285  *
1286  *	This function must not be called from interrupt context.
1287  */
free_irq(unsigned int irq,void * dev_id)1288 void free_irq(unsigned int irq, void *dev_id)
1289 {
1290 	struct irq_desc *desc = irq_to_desc(irq);
1291 
1292 	if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1293 		return;
1294 
1295 #ifdef CONFIG_SMP
1296 	if (WARN_ON(desc->affinity_notify))
1297 		desc->affinity_notify = NULL;
1298 #endif
1299 
1300 	chip_bus_lock(desc);
1301 	kfree(__free_irq(irq, dev_id));
1302 	chip_bus_sync_unlock(desc);
1303 }
1304 EXPORT_SYMBOL(free_irq);
1305 
1306 /**
1307  *	request_threaded_irq - allocate an interrupt line
1308  *	@irq: Interrupt line to allocate
1309  *	@handler: Function to be called when the IRQ occurs.
1310  *		  Primary handler for threaded interrupts
1311  *		  If NULL and thread_fn != NULL the default
1312  *		  primary handler is installed
1313  *	@thread_fn: Function called from the irq handler thread
1314  *		    If NULL, no irq thread is created
1315  *	@irqflags: Interrupt type flags
1316  *	@devname: An ascii name for the claiming device
1317  *	@dev_id: A cookie passed back to the handler function
1318  *
1319  *	This call allocates interrupt resources and enables the
1320  *	interrupt line and IRQ handling. From the point this
1321  *	call is made your handler function may be invoked. Since
1322  *	your handler function must clear any interrupt the board
1323  *	raises, you must take care both to initialise your hardware
1324  *	and to set up the interrupt handler in the right order.
1325  *
1326  *	If you want to set up a threaded irq handler for your device
1327  *	then you need to supply @handler and @thread_fn. @handler is
1328  *	still called in hard interrupt context and has to check
1329  *	whether the interrupt originates from the device. If yes it
1330  *	needs to disable the interrupt on the device and return
1331  *	IRQ_WAKE_THREAD which will wake up the handler thread and run
1332  *	@thread_fn. This split handler design is necessary to support
1333  *	shared interrupts.
1334  *
1335  *	Dev_id must be globally unique. Normally the address of the
1336  *	device data structure is used as the cookie. Since the handler
1337  *	receives this value it makes sense to use it.
1338  *
1339  *	If your interrupt is shared you must pass a non NULL dev_id
1340  *	as this is required when freeing the interrupt.
1341  *
1342  *	Flags:
1343  *
1344  *	IRQF_SHARED		Interrupt is shared
1345  *	IRQF_SAMPLE_RANDOM	The interrupt can be used for entropy
1346  *	IRQF_TRIGGER_*		Specify active edge(s) or level
1347  *
1348  */
request_threaded_irq(unsigned int irq,irq_handler_t handler,irq_handler_t thread_fn,unsigned long irqflags,const char * devname,void * dev_id)1349 int request_threaded_irq(unsigned int irq, irq_handler_t handler,
1350 			 irq_handler_t thread_fn, unsigned long irqflags,
1351 			 const char *devname, void *dev_id)
1352 {
1353 	struct irqaction *action;
1354 	struct irq_desc *desc;
1355 	int retval;
1356 
1357 	/*
1358 	 * Sanity-check: shared interrupts must pass in a real dev-ID,
1359 	 * otherwise we'll have trouble later trying to figure out
1360 	 * which interrupt is which (messes up the interrupt freeing
1361 	 * logic etc).
1362 	 */
1363 	if ((irqflags & IRQF_SHARED) && !dev_id)
1364 		return -EINVAL;
1365 
1366 	desc = irq_to_desc(irq);
1367 	if (!desc)
1368 		return -EINVAL;
1369 
1370 	if (!irq_settings_can_request(desc) ||
1371 	    WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1372 		return -EINVAL;
1373 
1374 	if (!handler) {
1375 		if (!thread_fn)
1376 			return -EINVAL;
1377 		handler = irq_default_primary_handler;
1378 	}
1379 
1380 	action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1381 	if (!action)
1382 		return -ENOMEM;
1383 
1384 	action->handler = handler;
1385 	action->thread_fn = thread_fn;
1386 	action->flags = irqflags;
1387 	action->name = devname;
1388 	action->dev_id = dev_id;
1389 
1390 	chip_bus_lock(desc);
1391 	retval = __setup_irq(irq, desc, action);
1392 	chip_bus_sync_unlock(desc);
1393 
1394 	if (retval)
1395 		kfree(action);
1396 
1397 #ifdef CONFIG_DEBUG_SHIRQ_FIXME
1398 	if (!retval && (irqflags & IRQF_SHARED)) {
1399 		/*
1400 		 * It's a shared IRQ -- the driver ought to be prepared for it
1401 		 * to happen immediately, so let's make sure....
1402 		 * We disable the irq to make sure that a 'real' IRQ doesn't
1403 		 * run in parallel with our fake.
1404 		 */
1405 		unsigned long flags;
1406 
1407 		disable_irq(irq);
1408 		local_irq_save(flags);
1409 
1410 		handler(irq, dev_id);
1411 
1412 		local_irq_restore(flags);
1413 		enable_irq(irq);
1414 	}
1415 #endif
1416 	return retval;
1417 }
1418 EXPORT_SYMBOL(request_threaded_irq);
1419 
1420 /**
1421  *	request_any_context_irq - allocate an interrupt line
1422  *	@irq: Interrupt line to allocate
1423  *	@handler: Function to be called when the IRQ occurs.
1424  *		  Threaded handler for threaded interrupts.
1425  *	@flags: Interrupt type flags
1426  *	@name: An ascii name for the claiming device
1427  *	@dev_id: A cookie passed back to the handler function
1428  *
1429  *	This call allocates interrupt resources and enables the
1430  *	interrupt line and IRQ handling. It selects either a
1431  *	hardirq or threaded handling method depending on the
1432  *	context.
1433  *
1434  *	On failure, it returns a negative value. On success,
1435  *	it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED.
1436  */
request_any_context_irq(unsigned int irq,irq_handler_t handler,unsigned long flags,const char * name,void * dev_id)1437 int request_any_context_irq(unsigned int irq, irq_handler_t handler,
1438 			    unsigned long flags, const char *name, void *dev_id)
1439 {
1440 	struct irq_desc *desc = irq_to_desc(irq);
1441 	int ret;
1442 
1443 	if (!desc)
1444 		return -EINVAL;
1445 
1446 	if (irq_settings_is_nested_thread(desc)) {
1447 		ret = request_threaded_irq(irq, NULL, handler,
1448 					   flags, name, dev_id);
1449 		return !ret ? IRQC_IS_NESTED : ret;
1450 	}
1451 
1452 	ret = request_irq(irq, handler, flags, name, dev_id);
1453 	return !ret ? IRQC_IS_HARDIRQ : ret;
1454 }
1455 EXPORT_SYMBOL_GPL(request_any_context_irq);
1456 
enable_percpu_irq(unsigned int irq,unsigned int type)1457 void enable_percpu_irq(unsigned int irq, unsigned int type)
1458 {
1459 	unsigned int cpu = smp_processor_id();
1460 	unsigned long flags;
1461 	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1462 
1463 	if (!desc)
1464 		return;
1465 
1466 	type &= IRQ_TYPE_SENSE_MASK;
1467 	if (type != IRQ_TYPE_NONE) {
1468 		int ret;
1469 
1470 		ret = __irq_set_trigger(desc, irq, type);
1471 
1472 		if (ret) {
1473 			WARN(1, "failed to set type for IRQ%d\n", irq);
1474 			goto out;
1475 		}
1476 	}
1477 
1478 	irq_percpu_enable(desc, cpu);
1479 out:
1480 	irq_put_desc_unlock(desc, flags);
1481 }
1482 
disable_percpu_irq(unsigned int irq)1483 void disable_percpu_irq(unsigned int irq)
1484 {
1485 	unsigned int cpu = smp_processor_id();
1486 	unsigned long flags;
1487 	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1488 
1489 	if (!desc)
1490 		return;
1491 
1492 	irq_percpu_disable(desc, cpu);
1493 	irq_put_desc_unlock(desc, flags);
1494 }
1495 
1496 /*
1497  * Internal function to unregister a percpu irqaction.
1498  */
__free_percpu_irq(unsigned int irq,void __percpu * dev_id)1499 static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id)
1500 {
1501 	struct irq_desc *desc = irq_to_desc(irq);
1502 	struct irqaction *action;
1503 	unsigned long flags;
1504 
1505 	WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1506 
1507 	if (!desc)
1508 		return NULL;
1509 
1510 	raw_spin_lock_irqsave(&desc->lock, flags);
1511 
1512 	action = desc->action;
1513 	if (!action || action->percpu_dev_id != dev_id) {
1514 		WARN(1, "Trying to free already-free IRQ %d\n", irq);
1515 		goto bad;
1516 	}
1517 
1518 	if (!cpumask_empty(desc->percpu_enabled)) {
1519 		WARN(1, "percpu IRQ %d still enabled on CPU%d!\n",
1520 		     irq, cpumask_first(desc->percpu_enabled));
1521 		goto bad;
1522 	}
1523 
1524 	/* Found it - now remove it from the list of entries: */
1525 	desc->action = NULL;
1526 
1527 	raw_spin_unlock_irqrestore(&desc->lock, flags);
1528 
1529 	unregister_handler_proc(irq, action);
1530 
1531 	module_put(desc->owner);
1532 	return action;
1533 
1534 bad:
1535 	raw_spin_unlock_irqrestore(&desc->lock, flags);
1536 	return NULL;
1537 }
1538 
1539 /**
1540  *	remove_percpu_irq - free a per-cpu interrupt
1541  *	@irq: Interrupt line to free
1542  *	@act: irqaction for the interrupt
1543  *
1544  * Used to remove interrupts statically setup by the early boot process.
1545  */
remove_percpu_irq(unsigned int irq,struct irqaction * act)1546 void remove_percpu_irq(unsigned int irq, struct irqaction *act)
1547 {
1548 	struct irq_desc *desc = irq_to_desc(irq);
1549 
1550 	if (desc && irq_settings_is_per_cpu_devid(desc))
1551 	    __free_percpu_irq(irq, act->percpu_dev_id);
1552 }
1553 
1554 /**
1555  *	free_percpu_irq - free an interrupt allocated with request_percpu_irq
1556  *	@irq: Interrupt line to free
1557  *	@dev_id: Device identity to free
1558  *
1559  *	Remove a percpu interrupt handler. The handler is removed, but
1560  *	the interrupt line is not disabled. This must be done on each
1561  *	CPU before calling this function. The function does not return
1562  *	until any executing interrupts for this IRQ have completed.
1563  *
1564  *	This function must not be called from interrupt context.
1565  */
free_percpu_irq(unsigned int irq,void __percpu * dev_id)1566 void free_percpu_irq(unsigned int irq, void __percpu *dev_id)
1567 {
1568 	struct irq_desc *desc = irq_to_desc(irq);
1569 
1570 	if (!desc || !irq_settings_is_per_cpu_devid(desc))
1571 		return;
1572 
1573 	chip_bus_lock(desc);
1574 	kfree(__free_percpu_irq(irq, dev_id));
1575 	chip_bus_sync_unlock(desc);
1576 }
1577 
1578 /**
1579  *	setup_percpu_irq - setup a per-cpu interrupt
1580  *	@irq: Interrupt line to setup
1581  *	@act: irqaction for the interrupt
1582  *
1583  * Used to statically setup per-cpu interrupts in the early boot process.
1584  */
setup_percpu_irq(unsigned int irq,struct irqaction * act)1585 int setup_percpu_irq(unsigned int irq, struct irqaction *act)
1586 {
1587 	struct irq_desc *desc = irq_to_desc(irq);
1588 	int retval;
1589 
1590 	if (!desc || !irq_settings_is_per_cpu_devid(desc))
1591 		return -EINVAL;
1592 	chip_bus_lock(desc);
1593 	retval = __setup_irq(irq, desc, act);
1594 	chip_bus_sync_unlock(desc);
1595 
1596 	return retval;
1597 }
1598 
1599 /**
1600  *	request_percpu_irq - allocate a percpu interrupt line
1601  *	@irq: Interrupt line to allocate
1602  *	@handler: Function to be called when the IRQ occurs.
1603  *	@devname: An ascii name for the claiming device
1604  *	@dev_id: A percpu cookie passed back to the handler function
1605  *
1606  *	This call allocates interrupt resources, but doesn't
1607  *	automatically enable the interrupt. It has to be done on each
1608  *	CPU using enable_percpu_irq().
1609  *
1610  *	Dev_id must be globally unique. It is a per-cpu variable, and
1611  *	the handler gets called with the interrupted CPU's instance of
1612  *	that variable.
1613  */
request_percpu_irq(unsigned int irq,irq_handler_t handler,const char * devname,void __percpu * dev_id)1614 int request_percpu_irq(unsigned int irq, irq_handler_t handler,
1615 		       const char *devname, void __percpu *dev_id)
1616 {
1617 	struct irqaction *action;
1618 	struct irq_desc *desc;
1619 	int retval;
1620 
1621 	if (!dev_id)
1622 		return -EINVAL;
1623 
1624 	desc = irq_to_desc(irq);
1625 	if (!desc || !irq_settings_can_request(desc) ||
1626 	    !irq_settings_is_per_cpu_devid(desc))
1627 		return -EINVAL;
1628 
1629 	action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1630 	if (!action)
1631 		return -ENOMEM;
1632 
1633 	action->handler = handler;
1634 	action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND;
1635 	action->name = devname;
1636 	action->percpu_dev_id = dev_id;
1637 
1638 	chip_bus_lock(desc);
1639 	retval = __setup_irq(irq, desc, action);
1640 	chip_bus_sync_unlock(desc);
1641 
1642 	if (retval)
1643 		kfree(action);
1644 
1645 	return retval;
1646 }
1647