Lines Matching +full:on +full:- +full:chip

1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
4 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
6 * This file contains the core interrupt handling code, for irq-chip based
8 * Documentation/core-api/genericirq.rst
29 * Chained handlers should never call action on their IRQ. This default
37 * irq_set_chip - set the irq chip for an irq
39 * @chip: pointer to irq chip description structure
41 int irq_set_chip(unsigned int irq, struct irq_chip *chip) in irq_set_chip() argument
47 return -EINVAL; in irq_set_chip()
49 if (!chip) in irq_set_chip()
50 chip = &no_irq_chip; in irq_set_chip()
52 desc->irq_data.chip = chip; in irq_set_chip()
64 * irq_set_type - set the irq trigger type for an irq
66 * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
75 return -EINVAL; in irq_set_irq_type()
84 * irq_set_handler_data - set irq handler data for an irq
96 return -EINVAL; in irq_set_handler_data()
97 desc->irq_common_data.handler_data = data; in irq_set_handler_data()
104 * irq_set_msi_desc_off - set MSI descriptor data for an irq at offset
118 return -EINVAL; in irq_set_msi_desc_off()
119 desc->irq_common_data.msi_desc = entry; in irq_set_msi_desc_off()
121 entry->irq = irq_base; in irq_set_msi_desc_off()
127 * irq_set_msi_desc - set MSI descriptor data for an irq
139 * irq_set_chip_data - set irq chip data for an irq
141 * @data: Pointer to chip specific data
143 * Set the hardware irq chip data for an irq
151 return -EINVAL; in irq_set_chip_data()
152 desc->irq_data.chip_data = data; in irq_set_chip_data()
162 return desc ? &desc->irq_data : NULL; in irq_get_irq_data()
168 irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED); in irq_state_clr_disabled()
173 irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED); in irq_state_clr_masked()
178 irqd_clear(&desc->irq_data, IRQD_IRQ_STARTED); in irq_state_clr_started()
183 irqd_set(&desc->irq_data, IRQD_IRQ_STARTED); in irq_state_set_started()
205 * Catch code which fiddles with enable_irq() on a managed in __irq_startup_managed()
207 * installment or irq auto probing should not happen on in __irq_startup_managed()
244 if (d->chip->irq_startup) { in __irq_startup()
245 ret = d->chip->irq_startup(d); in __irq_startup()
261 desc->depth = 0; in irq_startup()
306 if (irqd_is_started(&desc->irq_data)) { in irq_shutdown()
307 desc->depth = 1; in irq_shutdown()
308 if (desc->irq_data.chip->irq_shutdown) { in irq_shutdown()
309 desc->irq_data.chip->irq_shutdown(&desc->irq_data); in irq_shutdown()
329 irq_domain_deactivate_irq(&desc->irq_data); in irq_shutdown_and_deactivate()
334 if (!irqd_irq_disabled(&desc->irq_data)) { in irq_enable()
338 if (desc->irq_data.chip->irq_enable) { in irq_enable()
339 desc->irq_data.chip->irq_enable(&desc->irq_data); in irq_enable()
349 if (irqd_irq_disabled(&desc->irq_data)) { in __irq_disable()
354 if (desc->irq_data.chip->irq_disable) { in __irq_disable()
355 desc->irq_data.chip->irq_disable(&desc->irq_data); in __irq_disable()
364 * irq_disable - Mark interrupt disabled
367 * If the chip does not implement the irq_disable callback, we
376 * If the interrupt chip does not implement the irq_disable callback,
390 if (desc->irq_data.chip->irq_enable) in irq_percpu_enable()
391 desc->irq_data.chip->irq_enable(&desc->irq_data); in irq_percpu_enable()
393 desc->irq_data.chip->irq_unmask(&desc->irq_data); in irq_percpu_enable()
394 cpumask_set_cpu(cpu, desc->percpu_enabled); in irq_percpu_enable()
399 if (desc->irq_data.chip->irq_disable) in irq_percpu_disable()
400 desc->irq_data.chip->irq_disable(&desc->irq_data); in irq_percpu_disable()
402 desc->irq_data.chip->irq_mask(&desc->irq_data); in irq_percpu_disable()
403 cpumask_clear_cpu(cpu, desc->percpu_enabled); in irq_percpu_disable()
408 if (desc->irq_data.chip->irq_mask_ack) { in mask_ack_irq()
409 desc->irq_data.chip->irq_mask_ack(&desc->irq_data); in mask_ack_irq()
413 if (desc->irq_data.chip->irq_ack) in mask_ack_irq()
414 desc->irq_data.chip->irq_ack(&desc->irq_data); in mask_ack_irq()
420 if (irqd_irq_masked(&desc->irq_data)) in mask_irq()
423 if (desc->irq_data.chip->irq_mask) { in mask_irq()
424 desc->irq_data.chip->irq_mask(&desc->irq_data); in mask_irq()
431 if (!irqd_irq_masked(&desc->irq_data)) in unmask_irq()
434 if (desc->irq_data.chip->irq_unmask) { in unmask_irq()
435 desc->irq_data.chip->irq_unmask(&desc->irq_data); in unmask_irq()
442 struct irq_chip *chip = desc->irq_data.chip; in unmask_threaded_irq() local
444 if (chip->flags & IRQCHIP_EOI_THREADED) in unmask_threaded_irq()
445 chip->irq_eoi(&desc->irq_data); in unmask_threaded_irq()
451 * handle_nested_irq - Handle a nested irq from a irq thread
466 raw_spin_lock_irq(&desc->lock); in handle_nested_irq()
468 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); in handle_nested_irq()
470 action = desc->action; in handle_nested_irq()
471 if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) { in handle_nested_irq()
472 desc->istate |= IRQS_PENDING; in handle_nested_irq()
477 irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS); in handle_nested_irq()
478 raw_spin_unlock_irq(&desc->lock); in handle_nested_irq()
482 action_ret |= action->thread_fn(action->irq, action->dev_id); in handle_nested_irq()
487 raw_spin_lock_irq(&desc->lock); in handle_nested_irq()
488 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); in handle_nested_irq()
491 raw_spin_unlock_irq(&desc->lock); in handle_nested_irq()
497 if (!(desc->istate & IRQS_POLL_INPROGRESS)) in irq_check_poll()
510 if (!irqd_has_set(&desc->irq_data, mask)) in irq_may_run()
522 * Handle a potential concurrent poll on a different core. in irq_may_run()
528 * handle_simple_irq - Simple and software-decoded IRQs.
540 raw_spin_lock(&desc->lock); in handle_simple_irq()
545 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); in handle_simple_irq()
547 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { in handle_simple_irq()
548 desc->istate |= IRQS_PENDING; in handle_simple_irq()
556 raw_spin_unlock(&desc->lock); in handle_simple_irq()
561 * handle_untracked_irq - Simple and software-decoded IRQs.
577 raw_spin_lock(&desc->lock); in handle_untracked_irq()
582 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); in handle_untracked_irq()
584 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { in handle_untracked_irq()
585 desc->istate |= IRQS_PENDING; in handle_untracked_irq()
589 desc->istate &= ~IRQS_PENDING; in handle_untracked_irq()
590 irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS); in handle_untracked_irq()
591 raw_spin_unlock(&desc->lock); in handle_untracked_irq()
595 raw_spin_lock(&desc->lock); in handle_untracked_irq()
596 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); in handle_untracked_irq()
599 raw_spin_unlock(&desc->lock); in handle_untracked_irq()
611 * - Standard level irq (IRQF_ONESHOT is not set) in cond_unmask_irq()
612 * - Oneshot irq which did not wake the thread (caused by a in cond_unmask_irq()
616 if (!irqd_irq_disabled(&desc->irq_data) && in cond_unmask_irq()
617 irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) in cond_unmask_irq()
622 * handle_level_irq - Level type irq handler
632 raw_spin_lock(&desc->lock); in handle_level_irq()
638 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); in handle_level_irq()
644 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { in handle_level_irq()
645 desc->istate |= IRQS_PENDING; in handle_level_irq()
655 raw_spin_unlock(&desc->lock); in handle_level_irq()
659 static void cond_unmask_eoi_irq(struct irq_desc *desc, struct irq_chip *chip) in cond_unmask_eoi_irq() argument
661 if (!(desc->istate & IRQS_ONESHOT)) { in cond_unmask_eoi_irq()
662 chip->irq_eoi(&desc->irq_data); in cond_unmask_eoi_irq()
667 * - Oneshot irq which did not wake the thread (caused by a in cond_unmask_eoi_irq()
671 if (!irqd_irq_disabled(&desc->irq_data) && in cond_unmask_eoi_irq()
672 irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) { in cond_unmask_eoi_irq()
673 chip->irq_eoi(&desc->irq_data); in cond_unmask_eoi_irq()
675 } else if (!(chip->flags & IRQCHIP_EOI_THREADED)) { in cond_unmask_eoi_irq()
676 chip->irq_eoi(&desc->irq_data); in cond_unmask_eoi_irq()
681 * handle_fasteoi_irq - irq handler for transparent controllers
684 * Only a single callback will be issued to the chip: an ->eoi()
691 struct irq_chip *chip = desc->irq_data.chip; in handle_fasteoi_irq() local
693 raw_spin_lock(&desc->lock); in handle_fasteoi_irq()
698 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); in handle_fasteoi_irq()
704 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { in handle_fasteoi_irq()
705 desc->istate |= IRQS_PENDING; in handle_fasteoi_irq()
711 if (desc->istate & IRQS_ONESHOT) in handle_fasteoi_irq()
716 cond_unmask_eoi_irq(desc, chip); in handle_fasteoi_irq()
718 raw_spin_unlock(&desc->lock); in handle_fasteoi_irq()
721 if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED)) in handle_fasteoi_irq()
722 chip->irq_eoi(&desc->irq_data); in handle_fasteoi_irq()
723 raw_spin_unlock(&desc->lock); in handle_fasteoi_irq()
728 * handle_fasteoi_nmi - irq handler for NMI interrupt lines
731 * A simple NMI-safe handler, considering the restrictions
734 * Only a single callback will be issued to the chip: an ->eoi()
741 struct irq_chip *chip = irq_desc_get_chip(desc); in handle_fasteoi_nmi() local
742 struct irqaction *action = desc->action; in handle_fasteoi_nmi()
752 res = action->handler(irq, action->dev_id); in handle_fasteoi_nmi()
755 if (chip->irq_eoi) in handle_fasteoi_nmi()
756 chip->irq_eoi(&desc->irq_data); in handle_fasteoi_nmi()
761 * handle_edge_irq - edge type IRQ handler
764 * Interrupt occures on the falling and/or rising edge of a hardware
767 * interrupt can happen on the same source even before the first one
769 * might be necessary to disable (mask) the interrupt depending on the
777 raw_spin_lock(&desc->lock); in handle_edge_irq()
779 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); in handle_edge_irq()
782 desc->istate |= IRQS_PENDING; in handle_edge_irq()
791 if (irqd_irq_disabled(&desc->irq_data) || !desc->action) { in handle_edge_irq()
792 desc->istate |= IRQS_PENDING; in handle_edge_irq()
800 desc->irq_data.chip->irq_ack(&desc->irq_data); in handle_edge_irq()
803 if (unlikely(!desc->action)) { in handle_edge_irq()
813 if (unlikely(desc->istate & IRQS_PENDING)) { in handle_edge_irq()
814 if (!irqd_irq_disabled(&desc->irq_data) && in handle_edge_irq()
815 irqd_irq_masked(&desc->irq_data)) in handle_edge_irq()
821 } while ((desc->istate & IRQS_PENDING) && in handle_edge_irq()
822 !irqd_irq_disabled(&desc->irq_data)); in handle_edge_irq()
825 raw_spin_unlock(&desc->lock); in handle_edge_irq()
831 * handle_edge_eoi_irq - edge eoi type IRQ handler
839 struct irq_chip *chip = irq_desc_get_chip(desc); in handle_edge_eoi_irq() local
841 raw_spin_lock(&desc->lock); in handle_edge_eoi_irq()
843 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); in handle_edge_eoi_irq()
846 desc->istate |= IRQS_PENDING; in handle_edge_eoi_irq()
854 if (irqd_irq_disabled(&desc->irq_data) || !desc->action) { in handle_edge_eoi_irq()
855 desc->istate |= IRQS_PENDING; in handle_edge_eoi_irq()
862 if (unlikely(!desc->action)) in handle_edge_eoi_irq()
867 } while ((desc->istate & IRQS_PENDING) && in handle_edge_eoi_irq()
868 !irqd_irq_disabled(&desc->irq_data)); in handle_edge_eoi_irq()
871 chip->irq_eoi(&desc->irq_data); in handle_edge_eoi_irq()
872 raw_spin_unlock(&desc->lock); in handle_edge_eoi_irq()
877 * handle_percpu_irq - Per CPU local irq handler
880 * Per CPU interrupts on SMP machines without locking requirements
884 struct irq_chip *chip = irq_desc_get_chip(desc); in handle_percpu_irq() local
888 * desc->tot_count. in handle_percpu_irq()
892 if (chip->irq_ack) in handle_percpu_irq()
893 chip->irq_ack(&desc->irq_data); in handle_percpu_irq()
897 if (chip->irq_eoi) in handle_percpu_irq()
898 chip->irq_eoi(&desc->irq_data); in handle_percpu_irq()
902 * handle_percpu_devid_irq - Per CPU local irq handler with per cpu dev ids
905 * Per CPU interrupts on SMP machines without locking requirements. Same as
908 * action->percpu_dev_id is a pointer to percpu variables which
909 * contain the real device id for the cpu on which this handler is
914 struct irq_chip *chip = irq_desc_get_chip(desc); in handle_percpu_devid_irq() local
915 struct irqaction *action = desc->action; in handle_percpu_devid_irq()
921 * desc->tot_count. in handle_percpu_devid_irq()
925 if (chip->irq_ack) in handle_percpu_devid_irq()
926 chip->irq_ack(&desc->irq_data); in handle_percpu_devid_irq()
930 res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id)); in handle_percpu_devid_irq()
934 bool enabled = cpumask_test_cpu(cpu, desc->percpu_enabled); in handle_percpu_devid_irq()
939 pr_err_once("Spurious%s percpu IRQ%u on CPU%u\n", in handle_percpu_devid_irq()
943 if (chip->irq_eoi) in handle_percpu_devid_irq()
944 chip->irq_eoi(&desc->irq_data); in handle_percpu_devid_irq()
948 * handle_percpu_devid_fasteoi_ipi - Per CPU local IPI handler with per cpu
959 struct irq_chip *chip = irq_desc_get_chip(desc); in handle_percpu_devid_fasteoi_ipi() local
960 struct irqaction *action = desc->action; in handle_percpu_devid_fasteoi_ipi()
966 if (chip->irq_eoi) in handle_percpu_devid_fasteoi_ipi()
967 chip->irq_eoi(&desc->irq_data); in handle_percpu_devid_fasteoi_ipi()
970 res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id)); in handle_percpu_devid_fasteoi_ipi()
975 * handle_percpu_devid_fasteoi_nmi - Per CPU local NMI handler with per cpu
984 struct irq_chip *chip = irq_desc_get_chip(desc); in handle_percpu_devid_fasteoi_nmi() local
985 struct irqaction *action = desc->action; in handle_percpu_devid_fasteoi_nmi()
992 res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id)); in handle_percpu_devid_fasteoi_nmi()
995 if (chip->irq_eoi) in handle_percpu_devid_fasteoi_nmi()
996 chip->irq_eoi(&desc->irq_data); in handle_percpu_devid_fasteoi_nmi()
1006 struct irq_data *irq_data = &desc->irq_data; in __irq_do_set_handler()
1010 * situation where the outermost chip is not yet set in __irq_do_set_handler()
1016 if (irq_data->chip != &no_irq_chip) in __irq_do_set_handler()
1019 * Bail out if the outer chip is not set up in __irq_do_set_handler()
1026 irq_data = irq_data->parent_data; in __irq_do_set_handler()
1029 if (WARN_ON(!irq_data || irq_data->chip == &no_irq_chip)) in __irq_do_set_handler()
1035 if (desc->irq_data.chip != &no_irq_chip) in __irq_do_set_handler()
1039 desc->action = NULL; in __irq_do_set_handler()
1040 desc->depth = 1; in __irq_do_set_handler()
1042 desc->handle_irq = handle; in __irq_do_set_handler()
1043 desc->name = name; in __irq_do_set_handler()
1046 unsigned int type = irqd_get_trigger_type(&desc->irq_data); in __irq_do_set_handler()
1058 desc->handle_irq = handle; in __irq_do_set_handler()
1064 desc->action = &chained_action; in __irq_do_set_handler()
1094 desc->irq_common_data.handler_data = data; in irq_set_chained_handler_and_data()
1102 irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip, in irq_set_chip_and_handler_name() argument
1105 irq_set_chip(irq, chip); in irq_set_chip_and_handler_name()
1119 * Warn when a driver sets the no autoenable flag on an already in irq_modify_status()
1122 WARN_ON_ONCE(!desc->depth && (set & _IRQ_NOAUTOEN)); in irq_modify_status()
1126 trigger = irqd_get_trigger_type(&desc->irq_data); in irq_modify_status()
1128 irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU | in irq_modify_status()
1131 irqd_set(&desc->irq_data, IRQD_NO_BALANCING); in irq_modify_status()
1133 irqd_set(&desc->irq_data, IRQD_PER_CPU); in irq_modify_status()
1135 irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT); in irq_modify_status()
1137 irqd_set(&desc->irq_data, IRQD_LEVEL); in irq_modify_status()
1143 irqd_set(&desc->irq_data, trigger); in irq_modify_status()
1150 * irq_cpu_online - Invoke all irq_cpu_online functions.
1152 * Iterate through all irqs and invoke the chip.irq_cpu_online()
1158 struct irq_chip *chip; in irq_cpu_online() local
1167 raw_spin_lock_irqsave(&desc->lock, flags); in irq_cpu_online()
1169 chip = irq_data_get_irq_chip(&desc->irq_data); in irq_cpu_online()
1170 if (chip && chip->irq_cpu_online && in irq_cpu_online()
1171 (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) || in irq_cpu_online()
1172 !irqd_irq_disabled(&desc->irq_data))) in irq_cpu_online()
1173 chip->irq_cpu_online(&desc->irq_data); in irq_cpu_online()
1175 raw_spin_unlock_irqrestore(&desc->lock, flags); in irq_cpu_online()
1180 * irq_cpu_offline - Invoke all irq_cpu_offline functions.
1182 * Iterate through all irqs and invoke the chip.irq_cpu_offline()
1188 struct irq_chip *chip; in irq_cpu_offline() local
1197 raw_spin_lock_irqsave(&desc->lock, flags); in irq_cpu_offline()
1199 chip = irq_data_get_irq_chip(&desc->irq_data); in irq_cpu_offline()
1200 if (chip && chip->irq_cpu_offline && in irq_cpu_offline()
1201 (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) || in irq_cpu_offline()
1202 !irqd_irq_disabled(&desc->irq_data))) in irq_cpu_offline()
1203 chip->irq_cpu_offline(&desc->irq_data); in irq_cpu_offline()
1205 raw_spin_unlock_irqrestore(&desc->lock, flags); in irq_cpu_offline()
1213 * handle_fasteoi_ack_irq - irq handler for edge hierarchy
1214 * stacked on transparent controllers
1219 * the irq_chip also needs to have its ->irq_ack() function
1224 struct irq_chip *chip = desc->irq_data.chip; in handle_fasteoi_ack_irq() local
1226 raw_spin_lock(&desc->lock); in handle_fasteoi_ack_irq()
1231 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); in handle_fasteoi_ack_irq()
1237 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { in handle_fasteoi_ack_irq()
1238 desc->istate |= IRQS_PENDING; in handle_fasteoi_ack_irq()
1244 if (desc->istate & IRQS_ONESHOT) in handle_fasteoi_ack_irq()
1248 desc->irq_data.chip->irq_ack(&desc->irq_data); in handle_fasteoi_ack_irq()
1252 cond_unmask_eoi_irq(desc, chip); in handle_fasteoi_ack_irq()
1254 raw_spin_unlock(&desc->lock); in handle_fasteoi_ack_irq()
1257 if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED)) in handle_fasteoi_ack_irq()
1258 chip->irq_eoi(&desc->irq_data); in handle_fasteoi_ack_irq()
1259 raw_spin_unlock(&desc->lock); in handle_fasteoi_ack_irq()
1264 * handle_fasteoi_mask_irq - irq handler for level hierarchy
1265 * stacked on transparent controllers
1270 * the irq_chip also needs to have its ->irq_mask_ack() function
1275 struct irq_chip *chip = desc->irq_data.chip; in handle_fasteoi_mask_irq() local
1277 raw_spin_lock(&desc->lock); in handle_fasteoi_mask_irq()
1283 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); in handle_fasteoi_mask_irq()
1289 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { in handle_fasteoi_mask_irq()
1290 desc->istate |= IRQS_PENDING; in handle_fasteoi_mask_irq()
1296 if (desc->istate & IRQS_ONESHOT) in handle_fasteoi_mask_irq()
1301 cond_unmask_eoi_irq(desc, chip); in handle_fasteoi_mask_irq()
1303 raw_spin_unlock(&desc->lock); in handle_fasteoi_mask_irq()
1306 if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED)) in handle_fasteoi_mask_irq()
1307 chip->irq_eoi(&desc->irq_data); in handle_fasteoi_mask_irq()
1308 raw_spin_unlock(&desc->lock); in handle_fasteoi_mask_irq()
1315 * irq_chip_set_parent_state - set the state of a parent interrupt.
1327 data = data->parent_data; in irq_chip_set_parent_state()
1329 if (!data || !data->chip->irq_set_irqchip_state) in irq_chip_set_parent_state()
1332 return data->chip->irq_set_irqchip_state(data, which, val); in irq_chip_set_parent_state()
1337 * irq_chip_get_parent_state - get the state of a parent interrupt.
1349 data = data->parent_data; in irq_chip_get_parent_state()
1351 if (!data || !data->chip->irq_get_irqchip_state) in irq_chip_get_parent_state()
1354 return data->chip->irq_get_irqchip_state(data, which, state); in irq_chip_get_parent_state()
1359 * irq_chip_enable_parent - Enable the parent interrupt (defaults to unmask if
1365 data = data->parent_data; in irq_chip_enable_parent()
1366 if (data->chip->irq_enable) in irq_chip_enable_parent()
1367 data->chip->irq_enable(data); in irq_chip_enable_parent()
1369 data->chip->irq_unmask(data); in irq_chip_enable_parent()
1374 * irq_chip_disable_parent - Disable the parent interrupt (defaults to mask if
1380 data = data->parent_data; in irq_chip_disable_parent()
1381 if (data->chip->irq_disable) in irq_chip_disable_parent()
1382 data->chip->irq_disable(data); in irq_chip_disable_parent()
1384 data->chip->irq_mask(data); in irq_chip_disable_parent()
1389 * irq_chip_ack_parent - Acknowledge the parent interrupt
1394 data = data->parent_data; in irq_chip_ack_parent()
1395 data->chip->irq_ack(data); in irq_chip_ack_parent()
1400 * irq_chip_mask_parent - Mask the parent interrupt
1405 data = data->parent_data; in irq_chip_mask_parent()
1406 data->chip->irq_mask(data); in irq_chip_mask_parent()
1411 * irq_chip_mask_ack_parent - Mask and acknowledge the parent interrupt
1416 data = data->parent_data; in irq_chip_mask_ack_parent()
1417 data->chip->irq_mask_ack(data); in irq_chip_mask_ack_parent()
1422 * irq_chip_unmask_parent - Unmask the parent interrupt
1427 data = data->parent_data; in irq_chip_unmask_parent()
1428 data->chip->irq_unmask(data); in irq_chip_unmask_parent()
1433 * irq_chip_eoi_parent - Invoke EOI on the parent interrupt
1438 data = data->parent_data; in irq_chip_eoi_parent()
1439 data->chip->irq_eoi(data); in irq_chip_eoi_parent()
1444 * irq_chip_set_affinity_parent - Set affinity on the parent interrupt
1449 * Conditinal, as the underlying parent chip might not implement it.
1454 data = data->parent_data; in irq_chip_set_affinity_parent()
1455 if (data->chip->irq_set_affinity) in irq_chip_set_affinity_parent()
1456 return data->chip->irq_set_affinity(data, dest, force); in irq_chip_set_affinity_parent()
1458 return -ENOSYS; in irq_chip_set_affinity_parent()
1463 * irq_chip_set_type_parent - Set IRQ type on the parent interrupt
1465 * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
1467 * Conditional, as the underlying parent chip might not implement it.
1471 data = data->parent_data; in irq_chip_set_type_parent()
1473 if (data->chip->irq_set_type) in irq_chip_set_type_parent()
1474 return data->chip->irq_set_type(data, type); in irq_chip_set_type_parent()
1476 return -ENOSYS; in irq_chip_set_type_parent()
1481 * irq_chip_retrigger_hierarchy - Retrigger an interrupt in hardware
1489 for (data = data->parent_data; data; data = data->parent_data) in irq_chip_retrigger_hierarchy()
1490 if (data->chip && data->chip->irq_retrigger) in irq_chip_retrigger_hierarchy()
1491 return data->chip->irq_retrigger(data); in irq_chip_retrigger_hierarchy()
1498 * irq_chip_set_vcpu_affinity_parent - Set vcpu affinity on the parent interrupt
1504 data = data->parent_data; in irq_chip_set_vcpu_affinity_parent()
1505 if (data->chip->irq_set_vcpu_affinity) in irq_chip_set_vcpu_affinity_parent()
1506 return data->chip->irq_set_vcpu_affinity(data, vcpu_info); in irq_chip_set_vcpu_affinity_parent()
1508 return -ENOSYS; in irq_chip_set_vcpu_affinity_parent()
1512 * irq_chip_set_wake_parent - Set/reset wake-up on the parent interrupt
1514 * @on: Whether to set or reset the wake-up capability of this irq
1516 * Conditional, as the underlying parent chip might not implement it.
1518 int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on) in irq_chip_set_wake_parent() argument
1520 data = data->parent_data; in irq_chip_set_wake_parent()
1522 if (data->chip->flags & IRQCHIP_SKIP_SET_WAKE) in irq_chip_set_wake_parent()
1525 if (data->chip->irq_set_wake) in irq_chip_set_wake_parent()
1526 return data->chip->irq_set_wake(data, on); in irq_chip_set_wake_parent()
1528 return -ENOSYS; in irq_chip_set_wake_parent()
1533 * irq_chip_request_resources_parent - Request resources on the parent interrupt
1538 data = data->parent_data; in irq_chip_request_resources_parent()
1540 if (data->chip->irq_request_resources) in irq_chip_request_resources_parent()
1541 return data->chip->irq_request_resources(data); in irq_chip_request_resources_parent()
1543 return -ENOSYS; in irq_chip_request_resources_parent()
1548 * irq_chip_release_resources_parent - Release resources on the parent interrupt
1553 data = data->parent_data; in irq_chip_release_resources_parent()
1554 if (data->chip->irq_release_resources) in irq_chip_release_resources_parent()
1555 data->chip->irq_release_resources(data); in irq_chip_release_resources_parent()
1561 * irq_chip_compose_msi_msg - Componse msi message for a irq chip
1565 * For hierarchical domains we find the first chip in the hierarchy
1567 * hierarchical we use the top level chip.
1574 if (data->chip && data->chip->irq_compose_msi_msg) in irq_chip_compose_msi_msg()
1579 return -ENOSYS; in irq_chip_compose_msi_msg()
1581 pos->chip->irq_compose_msi_msg(pos, msg); in irq_chip_compose_msi_msg()
1586 * irq_chip_pm_get - Enable power for an IRQ chip
1589 * Enable the power to the IRQ chip referenced by the interrupt data
1596 if (IS_ENABLED(CONFIG_PM) && data->chip->parent_device) { in irq_chip_pm_get()
1597 retval = pm_runtime_get_sync(data->chip->parent_device); in irq_chip_pm_get()
1599 pm_runtime_put_noidle(data->chip->parent_device); in irq_chip_pm_get()
1608 * irq_chip_pm_put - Disable power for an IRQ chip
1611 * Disable the power to the IRQ chip referenced by the interrupt data
1619 if (IS_ENABLED(CONFIG_PM) && data->chip->parent_device) in irq_chip_pm_put()
1620 retval = pm_runtime_put(data->chip->parent_device); in irq_chip_pm_put()