Lines Matching full:irq

6  * This file contains driver APIs to the irq subsystem.
11 #include <linux/irq.h>
78 * synchronize_hardirq - wait for pending hard IRQ handlers (on other CPUs)
79 * @irq: interrupt number to wait for
81 * This function waits for any pending hard IRQ handlers for this
83 * function while holding a resource the IRQ handler may need you
92 * This function may be called - with care - from IRQ context.
99 bool synchronize_hardirq(unsigned int irq) in synchronize_hardirq() argument
101 struct irq_desc *desc = irq_to_desc(irq); in synchronize_hardirq()
113 * synchronize_irq - wait for pending IRQ handlers (on other CPUs)
114 * @irq: interrupt number to wait for
116 * This function waits for any pending IRQ handlers for this interrupt
118 * holding a resource the IRQ handler may need you will deadlock.
121 * an interrupt thread is associated to @irq.
123 * It optionally makes sure (when the irq chip supports that method)
127 void synchronize_irq(unsigned int irq) in synchronize_irq() argument
129 struct irq_desc *desc = irq_to_desc(irq); in synchronize_irq()
156 * irq_can_set_affinity - Check if the affinity of a given irq can be set
157 * @irq: Interrupt to check
160 int irq_can_set_affinity(unsigned int irq) in irq_can_set_affinity() argument
162 return __irq_can_set_affinity(irq_to_desc(irq)); in irq_can_set_affinity()
166 * irq_can_set_affinity_usr - Check if affinity of a irq can be set from user space
167 * @irq: Interrupt to check
172 bool irq_can_set_affinity_usr(unsigned int irq) in irq_can_set_affinity_usr() argument
174 struct irq_desc *desc = irq_to_desc(irq); in irq_can_set_affinity_usr()
181 * irq_set_thread_affinity - Notify irq threads to adjust affinity
182 * @desc: irq descriptor which has affitnity changed
206 pr_warn_once("irq_chip %s did not update eff. affinity mask of irq %u\n", in irq_validate_effective_affinity()
207 chip->name, data->irq); in irq_validate_effective_affinity()
323 * Handle irq chips which can handle affinity only in activated in irq_set_affinity_deactivated()
374 int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force) in __irq_set_affinity() argument
376 struct irq_desc *desc = irq_to_desc(irq); in __irq_set_affinity()
389 int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m) in irq_set_affinity_hint() argument
392 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); in irq_set_affinity_hint()
400 __irq_set_affinity(irq, m, false); in irq_set_affinity_hint()
409 struct irq_desc *desc = irq_to_desc(notify->irq); in irq_affinity_notify()
431 * irq_set_affinity_notifier - control notification of IRQ affinity changes
432 * @irq: Interrupt for which to enable/disable notification
438 * after the IRQ is allocated and must be disabled before the IRQ is
442 irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify) in irq_set_affinity_notifier() argument
444 struct irq_desc *desc = irq_to_desc(irq); in irq_set_affinity_notifier()
456 notify->irq = irq; in irq_set_affinity_notifier()
534 * @irq: interrupt number to set affinity
539 * affinity for an irq. The vCPU specific data is passed from
543 int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info) in irq_set_vcpu_affinity() argument
546 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); in irq_set_vcpu_affinity()
580 static int __disable_irq_nosync(unsigned int irq) in __disable_irq_nosync() argument
583 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); in __disable_irq_nosync()
593 * disable_irq_nosync - disable an irq without waiting
594 * @irq: Interrupt to disable
599 * instances of the IRQ handler have completed before returning.
601 * This function may be called from IRQ context.
603 void disable_irq_nosync(unsigned int irq) in disable_irq_nosync() argument
605 __disable_irq_nosync(irq); in disable_irq_nosync()
610 * disable_irq - disable an irq and wait for completion
611 * @irq: Interrupt to disable
615 * This function waits for any pending IRQ handlers for this interrupt
617 * holding a resource the IRQ handler may need you will deadlock.
619 * This function may be called - with care - from IRQ context.
621 void disable_irq(unsigned int irq) in disable_irq() argument
623 if (!__disable_irq_nosync(irq)) in disable_irq()
624 synchronize_irq(irq); in disable_irq()
629 * disable_hardirq - disables an irq and waits for hardirq completion
630 * @irq: Interrupt to disable
634 * This function waits for any pending hard IRQ handlers for this
636 * holding a resource the hard IRQ handler may need you will deadlock.
643 * This function may be called - with care - from IRQ context.
645 bool disable_hardirq(unsigned int irq) in disable_hardirq() argument
647 if (!__disable_irq_nosync(irq)) in disable_hardirq()
648 return synchronize_hardirq(irq); in disable_hardirq()
656 * @irq: Interrupt to disable
662 * instances of the IRQ handler have completed before returning.
664 void disable_nmi_nosync(unsigned int irq) in disable_nmi_nosync() argument
666 disable_irq_nosync(irq); in disable_nmi_nosync()
674 WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", in __enable_irq()
680 /* Prevent probing on this irq: */ in __enable_irq()
698 * enable_irq - enable handling of an irq
699 * @irq: Interrupt to enable
703 * IRQ line is re-enabled.
705 * This function may be called from IRQ context only when
708 void enable_irq(unsigned int irq) in enable_irq() argument
711 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); in enable_irq()
716 KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq)) in enable_irq()
727 * @irq: Interrupt to enable
732 * IRQ line is re-enabled.
734 void enable_nmi(unsigned int irq) in enable_nmi() argument
736 enable_irq(irq); in enable_nmi()
739 static int set_irq_wake_real(unsigned int irq, unsigned int on) in set_irq_wake_real() argument
741 struct irq_desc *desc = irq_to_desc(irq); in set_irq_wake_real()
754 * irq_set_irq_wake - control irq power management wakeup
755 * @irq: interrupt to control
762 * Wakeup mode lets this IRQ wake the system from sleep
765 * Note: irq enable/disable state is completely orthogonal
766 * to the enable/disable state of irq wake. An irq can be
768 * long as the irq has wake enabled. If this does not hold,
769 * then the underlying irq chip and the related driver need
772 int irq_set_irq_wake(unsigned int irq, unsigned int on) in irq_set_irq_wake() argument
775 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); in irq_set_irq_wake()
792 ret = set_irq_wake_real(irq, on); in irq_set_irq_wake()
800 WARN(1, "Unbalanced IRQ %d wake disable\n", irq); in irq_set_irq_wake()
802 ret = set_irq_wake_real(irq, on); in irq_set_irq_wake()
818 * particular irq has been exclusively allocated or is available
821 int can_request_irq(unsigned int irq, unsigned long irqflags) in can_request_irq() argument
824 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); in can_request_irq()
849 pr_debug("No set_type function for IRQ %d (%s)\n", in __irq_set_trigger()
886 pr_err("Setting trigger mode %lu for irq %u failed (%pS)\n", in __irq_set_trigger()
895 int irq_set_parent(int irq, int parent_irq) in irq_set_parent() argument
898 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); in irq_set_parent()
916 static irqreturn_t irq_default_primary_handler(int irq, void *dev_id) in irq_default_primary_handler() argument
925 static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id) in irq_nested_primary_handler() argument
927 WARN(1, "Primary handler called for nested irq %d\n", irq); in irq_nested_primary_handler()
931 static irqreturn_t irq_forced_secondary_handler(int irq, void *dev_id) in irq_forced_secondary_handler() argument
933 WARN(1, "Secondary action handler called for irq %d\n", irq); in irq_forced_secondary_handler()
963 * Oneshot interrupts keep the irq line masked until the threaded
982 * on the other CPU. If we unmask the irq line then the in irq_finalize_oneshot()
984 * to IRQS_INPROGRESS and the irq line is masked forever. in irq_finalize_oneshot()
1065 * interrupts rely on the implicit bh/preempt disable of the hard irq
1075 ret = action->thread_fn(action->irq, action->dev_id); in irq_forced_thread_fn()
1094 ret = action->thread_fn(action->irq, action->dev_id); in irq_thread_fn()
1119 pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n", in irq_thread_dtor()
1120 tsk->comm, tsk->pid, action->irq); in irq_thread_dtor()
1123 desc = irq_to_desc(action->irq); in irq_thread_dtor()
1154 struct irq_desc *desc = irq_to_desc(action->irq); in irq_thread()
1192 * irq_wake_thread - wake the irq thread for the action identified by dev_id
1193 * @irq: Interrupt line
1197 void irq_wake_thread(unsigned int irq, void *dev_id) in irq_wake_thread() argument
1199 struct irq_desc *desc = irq_to_desc(irq); in irq_wake_thread()
1247 new->secondary->irq = new->irq; in irq_setup_forced_threading()
1308 setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary) in setup_irq_thread() argument
1313 t = kthread_create(irq_thread, new, "irq/%d-%s", irq, in setup_irq_thread()
1316 t = kthread_create(irq_thread, new, "irq/%d-s-%s", irq, in setup_irq_thread()
1359 __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) in __setup_irq() argument
1373 new->irq = irq; in __setup_irq()
1412 ret = setup_irq_thread(new, irq, false); in __setup_irq()
1416 ret = setup_irq_thread(new->secondary, irq, true); in __setup_irq()
1424 * underlying irq chip implementation, so a request for a in __setup_irq()
1425 * threaded irq without a primary hard irq context handler in __setup_irq()
1426 * requires the ONESHOT flag to be set. Some irq chips like in __setup_irq()
1454 pr_err("Failed to request resources for %s (irq %d) on irqchip %s\n", in __setup_irq()
1455 new->name, irq, desc->irq_data.chip->name); in __setup_irq()
1481 pr_err("Invalid attempt to share NMI for %s (irq %d) on irqchip %s.\n", in __setup_irq()
1482 new->name, irq, desc->irq_data.chip->name); in __setup_irq()
1508 /* add new interrupt at end of irq queue */ in __setup_irq()
1546 * If no thread is woken by primary (hard irq context) in __setup_irq()
1548 * also checked for zero to unmask the irq line in the in __setup_irq()
1549 * affected hard irq flow handlers in __setup_irq()
1566 * the irq lines is reenabled, but the device still in __setup_irq()
1567 * has the level irq asserted. Rinse and repeat.... in __setup_irq()
1575 pr_err("Threaded irq requested with handler=NULL and !ONESHOT for %s (irq %d)\n", in __setup_irq()
1576 new->name, irq); in __setup_irq()
1620 /* Exclude IRQ from balancing if requested */ in __setup_irq()
1646 pr_warn("irq %d uses trigger mode %u; requested %u\n", in __setup_irq()
1647 irq, omsk, nmsk); in __setup_irq()
1654 /* Reset broken irq detection when installing new handler */ in __setup_irq()
1659 * Check whether we disabled the irq via the spurious handler in __setup_irq()
1682 register_irq_proc(irq, desc); in __setup_irq()
1684 register_handler_proc(irq, new); in __setup_irq()
1689 pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n", in __setup_irq()
1690 irq, new->flags, new->name, old->flags, old->name); in __setup_irq()
1732 unsigned irq = desc->irq_data.irq; in __free_irq() local
1736 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq); in __free_irq()
1743 * There can be multiple actions per IRQ descriptor, find the right in __free_irq()
1751 WARN(1, "Trying to free already-free IRQ %d\n", irq); in __free_irq()
1768 /* If this was the last handler, shut down the IRQ line: */ in __free_irq()
1784 * callbacks above are synced out to the irq chips which hang in __free_irq()
1793 * concurrent request_irq() of this irq so the release of resources in __free_irq()
1798 unregister_handler_proc(irq, action); in __free_irq()
1809 * It's a shared IRQ -- the driver ought to be prepared for an IRQ in __free_irq()
1814 * 'real' IRQ doesn't run in parallel with our fake. ) in __free_irq()
1818 action->handler(irq, dev_id); in __free_irq()
1868 * @irq: Interrupt line to free
1873 * On a shared IRQ the caller must ensure the interrupt is disabled
1875 * does not return until any executing interrupts for this IRQ
1882 const void *free_irq(unsigned int irq, void *dev_id) in free_irq() argument
1884 struct irq_desc *desc = irq_to_desc(irq); in free_irq()
1908 static const void *__cleanup_nmi(unsigned int irq, struct irq_desc *desc) in __cleanup_nmi() argument
1917 unregister_handler_proc(irq, desc->action); in __cleanup_nmi()
1934 const void *free_nmi(unsigned int irq, void *dev_id) in free_nmi() argument
1936 struct irq_desc *desc = irq_to_desc(irq); in free_nmi()
1948 disable_nmi_nosync(irq); in free_nmi()
1953 devname = __cleanup_nmi(irq, desc); in free_nmi()
1962 * @irq: Interrupt line to allocate
1963 * @handler: Function to be called when the IRQ occurs.
1967 * @thread_fn: Function called from the irq handler thread
1968 * If NULL, no irq thread is created
1974 * interrupt line and IRQ handling. From the point this
1980 * If you want to set up a threaded irq handler for your device
2002 int request_threaded_irq(unsigned int irq, irq_handler_t handler, in request_threaded_irq() argument
2010 if (irq == IRQ_NOTCONNECTED) in request_threaded_irq()
2027 desc = irq_to_desc(irq); in request_threaded_irq()
2057 retval = __setup_irq(irq, desc, action); in request_threaded_irq()
2068 * It's a shared IRQ -- the driver ought to be prepared for it in request_threaded_irq()
2070 * We disable the irq to make sure that a 'real' IRQ doesn't in request_threaded_irq()
2075 disable_irq(irq); in request_threaded_irq()
2078 handler(irq, dev_id); in request_threaded_irq()
2081 enable_irq(irq); in request_threaded_irq()
2090 * @irq: Interrupt line to allocate
2091 * @handler: Function to be called when the IRQ occurs.
2098 * interrupt line and IRQ handling. It selects either a
2105 int request_any_context_irq(unsigned int irq, irq_handler_t handler, in request_any_context_irq() argument
2111 if (irq == IRQ_NOTCONNECTED) in request_any_context_irq()
2114 desc = irq_to_desc(irq); in request_any_context_irq()
2119 ret = request_threaded_irq(irq, NULL, handler, in request_any_context_irq()
2124 ret = request_irq(irq, handler, flags, name, dev_id); in request_any_context_irq()
2131 * @irq: Interrupt line to allocate
2132 * @handler: Function to be called when the IRQ occurs.
2139 * interrupt line and IRQ handling. It sets up the IRQ line
2142 * An interrupt line delivering NMIs cannot be shared and IRQ handling
2155 int request_nmi(unsigned int irq, irq_handler_t handler, in request_nmi() argument
2163 if (irq == IRQ_NOTCONNECTED) in request_nmi()
2176 desc = irq_to_desc(irq); in request_nmi()
2197 retval = __setup_irq(irq, desc, action); in request_nmi()
2207 __cleanup_nmi(irq, desc); in request_nmi()
2224 void enable_percpu_irq(unsigned int irq, unsigned int type) in enable_percpu_irq() argument
2228 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU); in enable_percpu_irq()
2247 WARN(1, "failed to set type for IRQ%d\n", irq); in enable_percpu_irq()
2258 void enable_percpu_nmi(unsigned int irq, unsigned int type) in enable_percpu_nmi() argument
2260 enable_percpu_irq(irq, type); in enable_percpu_nmi()
2264 * irq_percpu_is_enabled - Check whether the per cpu irq is enabled
2265 * @irq: Linux irq number to check for
2270 bool irq_percpu_is_enabled(unsigned int irq) in irq_percpu_is_enabled() argument
2277 desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU); in irq_percpu_is_enabled()
2288 void disable_percpu_irq(unsigned int irq) in disable_percpu_irq() argument
2292 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU); in disable_percpu_irq()
2302 void disable_percpu_nmi(unsigned int irq) in disable_percpu_nmi() argument
2304 disable_percpu_irq(irq); in disable_percpu_nmi()
2310 static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id) in __free_percpu_irq() argument
2312 struct irq_desc *desc = irq_to_desc(irq); in __free_percpu_irq()
2316 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq); in __free_percpu_irq()
2325 WARN(1, "Trying to free already-free IRQ %d\n", irq); in __free_percpu_irq()
2330 WARN(1, "percpu IRQ %d still enabled on CPU%d!\n", in __free_percpu_irq()
2331 irq, cpumask_first(desc->percpu_enabled)); in __free_percpu_irq()
2342 unregister_handler_proc(irq, action); in __free_percpu_irq()
2355 * @irq: Interrupt line to free
2360 void remove_percpu_irq(unsigned int irq, struct irqaction *act) in remove_percpu_irq() argument
2362 struct irq_desc *desc = irq_to_desc(irq); in remove_percpu_irq()
2365 __free_percpu_irq(irq, act->percpu_dev_id); in remove_percpu_irq()
2370 * @irq: Interrupt line to free
2376 * until any executing interrupts for this IRQ have completed.
2380 void free_percpu_irq(unsigned int irq, void __percpu *dev_id) in free_percpu_irq() argument
2382 struct irq_desc *desc = irq_to_desc(irq); in free_percpu_irq()
2388 kfree(__free_percpu_irq(irq, dev_id)); in free_percpu_irq()
2393 void free_percpu_nmi(unsigned int irq, void __percpu *dev_id) in free_percpu_nmi() argument
2395 struct irq_desc *desc = irq_to_desc(irq); in free_percpu_nmi()
2403 kfree(__free_percpu_irq(irq, dev_id)); in free_percpu_nmi()
2408 * @irq: Interrupt line to setup
2413 int setup_percpu_irq(unsigned int irq, struct irqaction *act) in setup_percpu_irq() argument
2415 struct irq_desc *desc = irq_to_desc(irq); in setup_percpu_irq()
2425 retval = __setup_irq(irq, desc, act); in setup_percpu_irq()
2435 * @irq: Interrupt line to allocate
2436 * @handler: Function to be called when the IRQ occurs.
2450 int __request_percpu_irq(unsigned int irq, irq_handler_t handler, in __request_percpu_irq() argument
2461 desc = irq_to_desc(irq); in __request_percpu_irq()
2484 retval = __setup_irq(irq, desc, action); in __request_percpu_irq()
2497 * @irq: Interrupt line to allocate
2498 * @handler: Function to be called when the IRQ occurs.
2516 int request_percpu_nmi(unsigned int irq, irq_handler_t handler, in request_percpu_nmi() argument
2527 desc = irq_to_desc(irq); in request_percpu_nmi()
2553 retval = __setup_irq(irq, desc, action); in request_percpu_nmi()
2573 * @irq: Interrupt line to prepare for NMI delivery
2584 int prepare_percpu_nmi(unsigned int irq) in prepare_percpu_nmi() argument
2592 desc = irq_get_desc_lock(irq, &flags, in prepare_percpu_nmi()
2598 KERN_ERR "prepare_percpu_nmi called for a non-NMI interrupt: irq %u\n", in prepare_percpu_nmi()
2599 irq)) { in prepare_percpu_nmi()
2606 pr_err("Failed to setup NMI delivery: irq %u\n", irq); in prepare_percpu_nmi()
2616 * teardown_percpu_nmi - undoes NMI setup of IRQ line
2617 * @irq: Interrupt line from which CPU local NMI configuration should be
2622 * IRQ line should not be enabled for the current CPU.
2627 void teardown_percpu_nmi(unsigned int irq) in teardown_percpu_nmi() argument
2634 desc = irq_get_desc_lock(irq, &flags, in teardown_percpu_nmi()
2673 * @irq: Interrupt line that is forwarded to a VM
2684 int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which, in irq_get_irqchip_state() argument
2692 desc = irq_get_desc_buslock(irq, &flags, 0); in irq_get_irqchip_state()
2707 * @irq: Interrupt line that is forwarded to a VM
2717 int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which, in irq_set_irqchip_state() argument
2726 desc = irq_get_desc_buslock(irq, &flags, 0); in irq_set_irqchip_state()