1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar 4 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King 5 * 6 * This file contains the core interrupt handling code, for irq-chip based 7 * architectures. Detailed information is available in 8 * Documentation/core-api/genericirq.rst 9 */ 10 11 #include <linux/irq.h> 12 #include <linux/msi.h> 13 #include <linux/module.h> 14 #include <linux/interrupt.h> 15 #include <linux/kernel_stat.h> 16 #include <linux/irqdomain.h> 17 #include <linux/random.h> 18 19 #include <trace/events/irq.h> 20 21 #include "internals.h" 22 23 static irqreturn_t bad_chained_irq(int irq, void *dev_id) 24 { 25 WARN_ONCE(1, "Chained irq %d should not call an action\n", irq); 26 return IRQ_NONE; 27 } 28 29 /* 30 * Chained handlers should never call action on their IRQ. This default 31 * action will emit warning if such thing happens. 32 */ 33 struct irqaction chained_action = { 34 .handler = bad_chained_irq, 35 }; 36 37 /** 38 * irq_set_chip - set the irq chip for an irq 39 * @irq: irq number 40 * @chip: pointer to irq chip description structure 41 */ 42 int irq_set_chip(unsigned int irq, const struct irq_chip *chip) 43 { 44 int ret = -EINVAL; 45 46 scoped_irqdesc_get_and_lock(irq, 0) { 47 scoped_irqdesc->irq_data.chip = (struct irq_chip *)(chip ?: &no_irq_chip); 48 ret = 0; 49 } 50 /* For !CONFIG_SPARSE_IRQ make the irq show up in allocated_irqs. */ 51 if (!ret) 52 irq_mark_irq(irq); 53 return ret; 54 } 55 EXPORT_SYMBOL(irq_set_chip); 56 57 /** 58 * irq_set_irq_type - set the irq trigger type for an irq 59 * @irq: irq number 60 * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h 61 */ 62 int irq_set_irq_type(unsigned int irq, unsigned int type) 63 { 64 scoped_irqdesc_get_and_buslock(irq, IRQ_GET_DESC_CHECK_GLOBAL) 65 return __irq_set_trigger(scoped_irqdesc, type); 66 return -EINVAL; 67 } 68 EXPORT_SYMBOL(irq_set_irq_type); 69 70 /** 71 * irq_set_handler_data - set irq handler data for an irq 72 * @irq: Interrupt number 73 * @data: Pointer to interrupt specific data 74 * 75 * Set the hardware irq controller data for an irq 76 */ 77 int irq_set_handler_data(unsigned int irq, void *data) 78 { 79 scoped_irqdesc_get_and_lock(irq, 0) { 80 scoped_irqdesc->irq_common_data.handler_data = data; 81 return 0; 82 } 83 return -EINVAL; 84 } 85 EXPORT_SYMBOL(irq_set_handler_data); 86 87 /** 88 * irq_set_msi_desc_off - set MSI descriptor data for an irq at offset 89 * @irq_base: Interrupt number base 90 * @irq_offset: Interrupt number offset 91 * @entry: Pointer to MSI descriptor data 92 * 93 * Set the MSI descriptor entry for an irq at offset 94 */ 95 int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset, struct msi_desc *entry) 96 { 97 scoped_irqdesc_get_and_lock(irq_base + irq_offset, IRQ_GET_DESC_CHECK_GLOBAL) { 98 scoped_irqdesc->irq_common_data.msi_desc = entry; 99 if (entry && !irq_offset) 100 entry->irq = irq_base; 101 return 0; 102 } 103 return -EINVAL; 104 } 105 106 /** 107 * irq_set_msi_desc - set MSI descriptor data for an irq 108 * @irq: Interrupt number 109 * @entry: Pointer to MSI descriptor data 110 * 111 * Set the MSI descriptor entry for an irq 112 */ 113 int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry) 114 { 115 return irq_set_msi_desc_off(irq, 0, entry); 116 } 117 118 /** 119 * irq_set_chip_data - set irq chip data for an irq 120 * @irq: Interrupt number 121 * @data: Pointer to chip specific data 122 * 123 * Set the hardware irq chip data for an irq 124 */ 125 int irq_set_chip_data(unsigned int irq, void *data) 126 { 127 scoped_irqdesc_get_and_lock(irq, 0) { 128 scoped_irqdesc->irq_data.chip_data = data; 129 return 0; 130 } 131 return -EINVAL; 132 } 133 EXPORT_SYMBOL(irq_set_chip_data); 134 135 struct irq_data *irq_get_irq_data(unsigned int irq) 136 { 137 struct irq_desc *desc = irq_to_desc(irq); 138 139 return desc ? &desc->irq_data : NULL; 140 } 141 EXPORT_SYMBOL_GPL(irq_get_irq_data); 142 143 static void irq_state_clr_disabled(struct irq_desc *desc) 144 { 145 irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED); 146 } 147 148 static void irq_state_clr_masked(struct irq_desc *desc) 149 { 150 irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED); 151 } 152 153 static void irq_state_clr_started(struct irq_desc *desc) 154 { 155 irqd_clear(&desc->irq_data, IRQD_IRQ_STARTED); 156 } 157 158 static void irq_state_set_started(struct irq_desc *desc) 159 { 160 irqd_set(&desc->irq_data, IRQD_IRQ_STARTED); 161 } 162 163 enum { 164 IRQ_STARTUP_NORMAL, 165 IRQ_STARTUP_MANAGED, 166 IRQ_STARTUP_ABORT, 167 }; 168 169 #ifdef CONFIG_SMP 170 static int 171 __irq_startup_managed(struct irq_desc *desc, const struct cpumask *aff, 172 bool force) 173 { 174 struct irq_data *d = irq_desc_get_irq_data(desc); 175 176 if (!irqd_affinity_is_managed(d)) 177 return IRQ_STARTUP_NORMAL; 178 179 irqd_clr_managed_shutdown(d); 180 181 if (!cpumask_intersects(aff, cpu_online_mask)) { 182 /* 183 * Catch code which fiddles with enable_irq() on a managed 184 * and potentially shutdown IRQ. Chained interrupt 185 * installment or irq auto probing should not happen on 186 * managed irqs either. 187 */ 188 if (WARN_ON_ONCE(force)) 189 return IRQ_STARTUP_ABORT; 190 /* 191 * The interrupt was requested, but there is no online CPU 192 * in it's affinity mask. Put it into managed shutdown 193 * state and let the cpu hotplug mechanism start it up once 194 * a CPU in the mask becomes available. 195 */ 196 return IRQ_STARTUP_ABORT; 197 } 198 /* 199 * Managed interrupts have reserved resources, so this should not 200 * happen. 201 */ 202 if (WARN_ON(irq_domain_activate_irq(d, false))) 203 return IRQ_STARTUP_ABORT; 204 return IRQ_STARTUP_MANAGED; 205 } 206 207 void irq_startup_managed(struct irq_desc *desc) 208 { 209 struct irq_data *d = irq_desc_get_irq_data(desc); 210 211 /* 212 * Clear managed-shutdown flag, so we don't repeat managed-startup for 213 * multiple hotplugs, and cause imbalanced disable depth. 214 */ 215 irqd_clr_managed_shutdown(d); 216 217 /* 218 * Only start it up when the disable depth is 1, so that a disable, 219 * hotunplug, hotplug sequence does not end up enabling it during 220 * hotplug unconditionally. 221 */ 222 desc->depth--; 223 if (!desc->depth) 224 irq_startup(desc, IRQ_RESEND, IRQ_START_COND); 225 } 226 227 #else 228 static __always_inline int 229 __irq_startup_managed(struct irq_desc *desc, const struct cpumask *aff, 230 bool force) 231 { 232 return IRQ_STARTUP_NORMAL; 233 } 234 #endif 235 236 static void irq_enable(struct irq_desc *desc) 237 { 238 if (!irqd_irq_disabled(&desc->irq_data)) { 239 unmask_irq(desc); 240 } else { 241 irq_state_clr_disabled(desc); 242 if (desc->irq_data.chip->irq_enable) { 243 desc->irq_data.chip->irq_enable(&desc->irq_data); 244 irq_state_clr_masked(desc); 245 } else { 246 unmask_irq(desc); 247 } 248 } 249 } 250 251 static int __irq_startup(struct irq_desc *desc) 252 { 253 struct irq_data *d = irq_desc_get_irq_data(desc); 254 int ret = 0; 255 256 /* Warn if this interrupt is not activated but try nevertheless */ 257 WARN_ON_ONCE(!irqd_is_activated(d)); 258 259 if (d->chip->irq_startup) { 260 ret = d->chip->irq_startup(d); 261 irq_state_clr_disabled(desc); 262 irq_state_clr_masked(desc); 263 } else { 264 irq_enable(desc); 265 } 266 irq_state_set_started(desc); 267 return ret; 268 } 269 270 int irq_startup(struct irq_desc *desc, bool resend, bool force) 271 { 272 struct irq_data *d = irq_desc_get_irq_data(desc); 273 const struct cpumask *aff = irq_data_get_affinity_mask(d); 274 int ret = 0; 275 276 desc->depth = 0; 277 278 if (irqd_is_started(d)) { 279 irq_enable(desc); 280 } else { 281 switch (__irq_startup_managed(desc, aff, force)) { 282 case IRQ_STARTUP_NORMAL: 283 if (d->chip->flags & IRQCHIP_AFFINITY_PRE_STARTUP) 284 irq_setup_affinity(desc); 285 ret = __irq_startup(desc); 286 if (!(d->chip->flags & IRQCHIP_AFFINITY_PRE_STARTUP)) 287 irq_setup_affinity(desc); 288 break; 289 case IRQ_STARTUP_MANAGED: 290 irq_do_set_affinity(d, aff, false); 291 ret = __irq_startup(desc); 292 break; 293 case IRQ_STARTUP_ABORT: 294 desc->depth = 1; 295 irqd_set_managed_shutdown(d); 296 return 0; 297 } 298 } 299 if (resend) 300 check_irq_resend(desc, false); 301 302 return ret; 303 } 304 305 int irq_activate(struct irq_desc *desc) 306 { 307 struct irq_data *d = irq_desc_get_irq_data(desc); 308 309 if (!irqd_affinity_is_managed(d)) 310 return irq_domain_activate_irq(d, false); 311 return 0; 312 } 313 314 int irq_activate_and_startup(struct irq_desc *desc, bool resend) 315 { 316 if (WARN_ON(irq_activate(desc))) 317 return 0; 318 return irq_startup(desc, resend, IRQ_START_FORCE); 319 } 320 321 static void __irq_disable(struct irq_desc *desc, bool mask); 322 323 void irq_shutdown(struct irq_desc *desc) 324 { 325 if (irqd_is_started(&desc->irq_data)) { 326 clear_irq_resend(desc); 327 /* 328 * Increment disable depth, so that a managed shutdown on 329 * CPU hotunplug preserves the actual disabled state when the 330 * CPU comes back online. See irq_startup_managed(). 331 */ 332 desc->depth++; 333 334 if (desc->irq_data.chip->irq_shutdown) { 335 desc->irq_data.chip->irq_shutdown(&desc->irq_data); 336 irq_state_set_disabled(desc); 337 irq_state_set_masked(desc); 338 } else { 339 __irq_disable(desc, true); 340 } 341 irq_state_clr_started(desc); 342 } 343 } 344 345 346 void irq_shutdown_and_deactivate(struct irq_desc *desc) 347 { 348 irq_shutdown(desc); 349 /* 350 * This must be called even if the interrupt was never started up, 351 * because the activation can happen before the interrupt is 352 * available for request/startup. It has it's own state tracking so 353 * it's safe to call it unconditionally. 354 */ 355 irq_domain_deactivate_irq(&desc->irq_data); 356 } 357 358 static void __irq_disable(struct irq_desc *desc, bool mask) 359 { 360 if (irqd_irq_disabled(&desc->irq_data)) { 361 if (mask) 362 mask_irq(desc); 363 } else { 364 irq_state_set_disabled(desc); 365 if (desc->irq_data.chip->irq_disable) { 366 desc->irq_data.chip->irq_disable(&desc->irq_data); 367 irq_state_set_masked(desc); 368 } else if (mask) { 369 mask_irq(desc); 370 } 371 } 372 } 373 374 /** 375 * irq_disable - Mark interrupt disabled 376 * @desc: irq descriptor which should be disabled 377 * 378 * If the chip does not implement the irq_disable callback, we 379 * use a lazy disable approach. That means we mark the interrupt 380 * disabled, but leave the hardware unmasked. That's an 381 * optimization because we avoid the hardware access for the 382 * common case where no interrupt happens after we marked it 383 * disabled. If an interrupt happens, then the interrupt flow 384 * handler masks the line at the hardware level and marks it 385 * pending. 386 * 387 * If the interrupt chip does not implement the irq_disable callback, 388 * a driver can disable the lazy approach for a particular irq line by 389 * calling 'irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY)'. This can 390 * be used for devices which cannot disable the interrupt at the 391 * device level under certain circumstances and have to use 392 * disable_irq[_nosync] instead. 393 */ 394 void irq_disable(struct irq_desc *desc) 395 { 396 __irq_disable(desc, irq_settings_disable_unlazy(desc)); 397 } 398 399 void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu) 400 { 401 if (desc->irq_data.chip->irq_enable) 402 desc->irq_data.chip->irq_enable(&desc->irq_data); 403 else 404 desc->irq_data.chip->irq_unmask(&desc->irq_data); 405 cpumask_set_cpu(cpu, desc->percpu_enabled); 406 } 407 408 void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu) 409 { 410 if (desc->irq_data.chip->irq_disable) 411 desc->irq_data.chip->irq_disable(&desc->irq_data); 412 else 413 desc->irq_data.chip->irq_mask(&desc->irq_data); 414 cpumask_clear_cpu(cpu, desc->percpu_enabled); 415 } 416 417 static inline void mask_ack_irq(struct irq_desc *desc) 418 { 419 if (desc->irq_data.chip->irq_mask_ack) { 420 desc->irq_data.chip->irq_mask_ack(&desc->irq_data); 421 irq_state_set_masked(desc); 422 } else { 423 mask_irq(desc); 424 if (desc->irq_data.chip->irq_ack) 425 desc->irq_data.chip->irq_ack(&desc->irq_data); 426 } 427 } 428 429 void mask_irq(struct irq_desc *desc) 430 { 431 if (irqd_irq_masked(&desc->irq_data)) 432 return; 433 434 if (desc->irq_data.chip->irq_mask) { 435 desc->irq_data.chip->irq_mask(&desc->irq_data); 436 irq_state_set_masked(desc); 437 } 438 } 439 440 void unmask_irq(struct irq_desc *desc) 441 { 442 if (!irqd_irq_masked(&desc->irq_data)) 443 return; 444 445 if (desc->irq_data.chip->irq_unmask) { 446 desc->irq_data.chip->irq_unmask(&desc->irq_data); 447 irq_state_clr_masked(desc); 448 } 449 } 450 451 void unmask_threaded_irq(struct irq_desc *desc) 452 { 453 struct irq_chip *chip = desc->irq_data.chip; 454 455 if (chip->flags & IRQCHIP_EOI_THREADED) 456 chip->irq_eoi(&desc->irq_data); 457 458 unmask_irq(desc); 459 } 460 461 /* Busy wait until INPROGRESS is cleared */ 462 static bool irq_wait_on_inprogress(struct irq_desc *desc) 463 { 464 if (IS_ENABLED(CONFIG_SMP)) { 465 do { 466 raw_spin_unlock(&desc->lock); 467 while (irqd_irq_inprogress(&desc->irq_data)) 468 cpu_relax(); 469 raw_spin_lock(&desc->lock); 470 } while (irqd_irq_inprogress(&desc->irq_data)); 471 472 /* Might have been disabled in meantime */ 473 return !irqd_irq_disabled(&desc->irq_data) && desc->action; 474 } 475 return false; 476 } 477 478 static bool irq_can_handle_pm(struct irq_desc *desc) 479 { 480 struct irq_data *irqd = &desc->irq_data; 481 const struct cpumask *aff; 482 483 /* 484 * If the interrupt is not in progress and is not an armed 485 * wakeup interrupt, proceed. 486 */ 487 if (!irqd_has_set(irqd, IRQD_IRQ_INPROGRESS | IRQD_WAKEUP_ARMED)) 488 return true; 489 490 /* 491 * If the interrupt is an armed wakeup source, mark it pending 492 * and suspended, disable it and notify the pm core about the 493 * event. 494 */ 495 if (unlikely(irqd_has_set(irqd, IRQD_WAKEUP_ARMED))) { 496 irq_pm_handle_wakeup(desc); 497 return false; 498 } 499 500 /* Check whether the interrupt is polled on another CPU */ 501 if (unlikely(desc->istate & IRQS_POLL_INPROGRESS)) { 502 if (WARN_ONCE(irq_poll_cpu == smp_processor_id(), 503 "irq poll in progress on cpu %d for irq %d\n", 504 smp_processor_id(), desc->irq_data.irq)) 505 return false; 506 return irq_wait_on_inprogress(desc); 507 } 508 509 /* The below works only for single target interrupts */ 510 if (!IS_ENABLED(CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK) || 511 !irqd_is_single_target(irqd) || desc->handle_irq != handle_edge_irq) 512 return false; 513 514 /* 515 * If the interrupt affinity was moved to this CPU and the 516 * interrupt is currently handled on the previous target CPU, then 517 * busy wait for INPROGRESS to be cleared. Otherwise for edge type 518 * interrupts the handler might get stuck on the previous target: 519 * 520 * CPU 0 CPU 1 (new target) 521 * handle_edge_irq() 522 * repeat: 523 * handle_event() handle_edge_irq() 524 * if (INPROGESS) { 525 * set(PENDING); 526 * mask(); 527 * return; 528 * } 529 * if (PENDING) { 530 * clear(PENDING); 531 * unmask(); 532 * goto repeat; 533 * } 534 * 535 * This happens when the device raises interrupts with a high rate 536 * and always before handle_event() completes and the CPU0 handler 537 * can clear INPROGRESS. This has been observed in virtual machines. 538 */ 539 aff = irq_data_get_effective_affinity_mask(irqd); 540 if (cpumask_first(aff) != smp_processor_id()) 541 return false; 542 return irq_wait_on_inprogress(desc); 543 } 544 545 static inline bool irq_can_handle_actions(struct irq_desc *desc) 546 { 547 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 548 549 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { 550 desc->istate |= IRQS_PENDING; 551 return false; 552 } 553 return true; 554 } 555 556 static inline bool irq_can_handle(struct irq_desc *desc) 557 { 558 if (!irq_can_handle_pm(desc)) 559 return false; 560 561 return irq_can_handle_actions(desc); 562 } 563 564 /** 565 * handle_nested_irq - Handle a nested irq from a irq thread 566 * @irq: the interrupt number 567 * 568 * Handle interrupts which are nested into a threaded interrupt 569 * handler. The handler function is called inside the calling threads 570 * context. 571 */ 572 void handle_nested_irq(unsigned int irq) 573 { 574 struct irq_desc *desc = irq_to_desc(irq); 575 struct irqaction *action; 576 irqreturn_t action_ret; 577 578 might_sleep(); 579 580 scoped_guard(raw_spinlock_irq, &desc->lock) { 581 if (!irq_can_handle_actions(desc)) 582 return; 583 584 action = desc->action; 585 kstat_incr_irqs_this_cpu(desc); 586 atomic_inc(&desc->threads_active); 587 } 588 589 action_ret = IRQ_NONE; 590 for_each_action_of_desc(desc, action) 591 action_ret |= action->thread_fn(action->irq, action->dev_id); 592 593 if (!irq_settings_no_debug(desc)) 594 note_interrupt(desc, action_ret); 595 596 wake_threads_waitq(desc); 597 } 598 EXPORT_SYMBOL_GPL(handle_nested_irq); 599 600 /** 601 * handle_simple_irq - Simple and software-decoded IRQs. 602 * @desc: the interrupt description structure for this irq 603 * 604 * Simple interrupts are either sent from a demultiplexing interrupt 605 * handler or come from hardware, where no interrupt hardware control is 606 * necessary. 607 * 608 * Note: The caller is expected to handle the ack, clear, mask and unmask 609 * issues if necessary. 610 */ 611 void handle_simple_irq(struct irq_desc *desc) 612 { 613 guard(raw_spinlock)(&desc->lock); 614 615 if (!irq_can_handle_pm(desc)) { 616 if (irqd_needs_resend_when_in_progress(&desc->irq_data)) 617 desc->istate |= IRQS_PENDING; 618 return; 619 } 620 621 if (!irq_can_handle_actions(desc)) 622 return; 623 624 kstat_incr_irqs_this_cpu(desc); 625 handle_irq_event(desc); 626 } 627 EXPORT_SYMBOL_GPL(handle_simple_irq); 628 629 /** 630 * handle_untracked_irq - Simple and software-decoded IRQs. 631 * @desc: the interrupt description structure for this irq 632 * 633 * Untracked interrupts are sent from a demultiplexing interrupt handler 634 * when the demultiplexer does not know which device it its multiplexed irq 635 * domain generated the interrupt. IRQ's handled through here are not 636 * subjected to stats tracking, randomness, or spurious interrupt 637 * detection. 638 * 639 * Note: Like handle_simple_irq, the caller is expected to handle the ack, 640 * clear, mask and unmask issues if necessary. 641 */ 642 void handle_untracked_irq(struct irq_desc *desc) 643 { 644 scoped_guard(raw_spinlock, &desc->lock) { 645 if (!irq_can_handle(desc)) 646 return; 647 648 desc->istate &= ~IRQS_PENDING; 649 irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS); 650 } 651 652 __handle_irq_event_percpu(desc); 653 654 scoped_guard(raw_spinlock, &desc->lock) 655 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); 656 } 657 EXPORT_SYMBOL_GPL(handle_untracked_irq); 658 659 /* 660 * Called unconditionally from handle_level_irq() and only for oneshot 661 * interrupts from handle_fasteoi_irq() 662 */ 663 static void cond_unmask_irq(struct irq_desc *desc) 664 { 665 /* 666 * We need to unmask in the following cases: 667 * - Standard level irq (IRQF_ONESHOT is not set) 668 * - Oneshot irq which did not wake the thread (caused by a 669 * spurious interrupt or a primary handler handling it 670 * completely). 671 */ 672 if (!irqd_irq_disabled(&desc->irq_data) && 673 irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) 674 unmask_irq(desc); 675 } 676 677 /** 678 * handle_level_irq - Level type irq handler 679 * @desc: the interrupt description structure for this irq 680 * 681 * Level type interrupts are active as long as the hardware line has the 682 * active level. This may require to mask the interrupt and unmask it after 683 * the associated handler has acknowledged the device, so the interrupt 684 * line is back to inactive. 685 */ 686 void handle_level_irq(struct irq_desc *desc) 687 { 688 guard(raw_spinlock)(&desc->lock); 689 mask_ack_irq(desc); 690 691 if (!irq_can_handle(desc)) 692 return; 693 694 kstat_incr_irqs_this_cpu(desc); 695 handle_irq_event(desc); 696 697 cond_unmask_irq(desc); 698 } 699 EXPORT_SYMBOL_GPL(handle_level_irq); 700 701 static void cond_unmask_eoi_irq(struct irq_desc *desc, struct irq_chip *chip) 702 { 703 if (!(desc->istate & IRQS_ONESHOT)) { 704 chip->irq_eoi(&desc->irq_data); 705 return; 706 } 707 /* 708 * We need to unmask in the following cases: 709 * - Oneshot irq which did not wake the thread (caused by a 710 * spurious interrupt or a primary handler handling it 711 * completely). 712 */ 713 if (!irqd_irq_disabled(&desc->irq_data) && 714 irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) { 715 chip->irq_eoi(&desc->irq_data); 716 unmask_irq(desc); 717 } else if (!(chip->flags & IRQCHIP_EOI_THREADED)) { 718 chip->irq_eoi(&desc->irq_data); 719 } 720 } 721 722 static inline void cond_eoi_irq(struct irq_chip *chip, struct irq_data *data) 723 { 724 if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED)) 725 chip->irq_eoi(data); 726 } 727 728 /** 729 * handle_fasteoi_irq - irq handler for transparent controllers 730 * @desc: the interrupt description structure for this irq 731 * 732 * Only a single callback will be issued to the chip: an ->eoi() call when 733 * the interrupt has been serviced. This enables support for modern forms 734 * of interrupt handlers, which handle the flow details in hardware, 735 * transparently. 736 */ 737 void handle_fasteoi_irq(struct irq_desc *desc) 738 { 739 struct irq_chip *chip = desc->irq_data.chip; 740 741 guard(raw_spinlock)(&desc->lock); 742 743 /* 744 * When an affinity change races with IRQ handling, the next interrupt 745 * can arrive on the new CPU before the original CPU has completed 746 * handling the previous one - it may need to be resent. 747 */ 748 if (!irq_can_handle_pm(desc)) { 749 if (irqd_needs_resend_when_in_progress(&desc->irq_data)) 750 desc->istate |= IRQS_PENDING; 751 cond_eoi_irq(chip, &desc->irq_data); 752 return; 753 } 754 755 if (!irq_can_handle_actions(desc)) { 756 mask_irq(desc); 757 cond_eoi_irq(chip, &desc->irq_data); 758 return; 759 } 760 761 kstat_incr_irqs_this_cpu(desc); 762 if (desc->istate & IRQS_ONESHOT) 763 mask_irq(desc); 764 765 handle_irq_event(desc); 766 767 cond_unmask_eoi_irq(desc, chip); 768 769 /* 770 * When the race described above happens this will resend the interrupt. 771 */ 772 if (unlikely(desc->istate & IRQS_PENDING)) 773 check_irq_resend(desc, false); 774 } 775 EXPORT_SYMBOL_GPL(handle_fasteoi_irq); 776 777 /** 778 * handle_fasteoi_nmi - irq handler for NMI interrupt lines 779 * @desc: the interrupt description structure for this irq 780 * 781 * A simple NMI-safe handler, considering the restrictions 782 * from request_nmi. 783 * 784 * Only a single callback will be issued to the chip: an ->eoi() 785 * call when the interrupt has been serviced. This enables support 786 * for modern forms of interrupt handlers, which handle the flow 787 * details in hardware, transparently. 788 */ 789 void handle_fasteoi_nmi(struct irq_desc *desc) 790 { 791 struct irq_chip *chip = irq_desc_get_chip(desc); 792 struct irqaction *action = desc->action; 793 unsigned int irq = irq_desc_get_irq(desc); 794 irqreturn_t res; 795 796 __kstat_incr_irqs_this_cpu(desc); 797 798 trace_irq_handler_entry(irq, action); 799 /* 800 * NMIs cannot be shared, there is only one action. 801 */ 802 res = action->handler(irq, action->dev_id); 803 trace_irq_handler_exit(irq, action, res); 804 805 if (chip->irq_eoi) 806 chip->irq_eoi(&desc->irq_data); 807 } 808 EXPORT_SYMBOL_GPL(handle_fasteoi_nmi); 809 810 /** 811 * handle_edge_irq - edge type IRQ handler 812 * @desc: the interrupt description structure for this irq 813 * 814 * Interrupt occurs on the falling and/or rising edge of a hardware 815 * signal. The occurrence is latched into the irq controller hardware and 816 * must be acked in order to be reenabled. After the ack another interrupt 817 * can happen on the same source even before the first one is handled by 818 * the associated event handler. If this happens it might be necessary to 819 * disable (mask) the interrupt depending on the controller hardware. This 820 * requires to reenable the interrupt inside of the loop which handles the 821 * interrupts which have arrived while the handler was running. If all 822 * pending interrupts are handled, the loop is left. 823 */ 824 void handle_edge_irq(struct irq_desc *desc) 825 { 826 guard(raw_spinlock)(&desc->lock); 827 828 if (!irq_can_handle(desc)) { 829 desc->istate |= IRQS_PENDING; 830 mask_ack_irq(desc); 831 return; 832 } 833 834 kstat_incr_irqs_this_cpu(desc); 835 836 /* Start handling the irq */ 837 desc->irq_data.chip->irq_ack(&desc->irq_data); 838 839 do { 840 if (unlikely(!desc->action)) { 841 mask_irq(desc); 842 return; 843 } 844 845 /* 846 * When another irq arrived while we were handling 847 * one, we could have masked the irq. 848 * Reenable it, if it was not disabled in meantime. 849 */ 850 if (unlikely(desc->istate & IRQS_PENDING)) { 851 if (!irqd_irq_disabled(&desc->irq_data) && 852 irqd_irq_masked(&desc->irq_data)) 853 unmask_irq(desc); 854 } 855 856 handle_irq_event(desc); 857 858 } while ((desc->istate & IRQS_PENDING) && !irqd_irq_disabled(&desc->irq_data)); 859 } 860 EXPORT_SYMBOL(handle_edge_irq); 861 862 /** 863 * handle_percpu_irq - Per CPU local irq handler 864 * @desc: the interrupt description structure for this irq 865 * 866 * Per CPU interrupts on SMP machines without locking requirements 867 */ 868 void handle_percpu_irq(struct irq_desc *desc) 869 { 870 struct irq_chip *chip = irq_desc_get_chip(desc); 871 872 /* 873 * PER CPU interrupts are not serialized. Do not touch 874 * desc->tot_count. 875 */ 876 __kstat_incr_irqs_this_cpu(desc); 877 878 if (chip->irq_ack) 879 chip->irq_ack(&desc->irq_data); 880 881 handle_irq_event_percpu(desc); 882 883 if (chip->irq_eoi) 884 chip->irq_eoi(&desc->irq_data); 885 } 886 887 /** 888 * handle_percpu_devid_irq - Per CPU local irq handler with per cpu dev ids 889 * @desc: the interrupt description structure for this irq 890 * 891 * Per CPU interrupts on SMP machines without locking requirements. Same as 892 * handle_percpu_irq() above but with the following extras: 893 * 894 * action->percpu_dev_id is a pointer to percpu variables which 895 * contain the real device id for the cpu on which this handler is 896 * called 897 */ 898 void handle_percpu_devid_irq(struct irq_desc *desc) 899 { 900 struct irq_chip *chip = irq_desc_get_chip(desc); 901 unsigned int irq = irq_desc_get_irq(desc); 902 unsigned int cpu = smp_processor_id(); 903 struct irqaction *action; 904 irqreturn_t res; 905 906 /* 907 * PER CPU interrupts are not serialized. Do not touch 908 * desc->tot_count. 909 */ 910 __kstat_incr_irqs_this_cpu(desc); 911 912 if (chip->irq_ack) 913 chip->irq_ack(&desc->irq_data); 914 915 for (action = desc->action; action; action = action->next) 916 if (cpumask_test_cpu(cpu, action->affinity)) 917 break; 918 919 if (likely(action)) { 920 trace_irq_handler_entry(irq, action); 921 res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id)); 922 trace_irq_handler_exit(irq, action, res); 923 } else { 924 bool enabled = cpumask_test_cpu(cpu, desc->percpu_enabled); 925 926 if (enabled) 927 irq_percpu_disable(desc, cpu); 928 929 pr_err_once("Spurious%s percpu IRQ%u on CPU%u\n", 930 enabled ? " and unmasked" : "", irq, cpu); 931 } 932 933 add_interrupt_randomness(irq); 934 935 if (chip->irq_eoi) 936 chip->irq_eoi(&desc->irq_data); 937 } 938 939 static void 940 __irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle, 941 int is_chained, const char *name) 942 { 943 if (!handle) { 944 handle = handle_bad_irq; 945 } else { 946 struct irq_data *irq_data = &desc->irq_data; 947 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY 948 /* 949 * With hierarchical domains we might run into a 950 * situation where the outermost chip is not yet set 951 * up, but the inner chips are there. Instead of 952 * bailing we install the handler, but obviously we 953 * cannot enable/startup the interrupt at this point. 954 */ 955 while (irq_data) { 956 if (irq_data->chip != &no_irq_chip) 957 break; 958 /* 959 * Bail out if the outer chip is not set up 960 * and the interrupt supposed to be started 961 * right away. 962 */ 963 if (WARN_ON(is_chained)) 964 return; 965 /* Try the parent */ 966 irq_data = irq_data->parent_data; 967 } 968 #endif 969 if (WARN_ON(!irq_data || irq_data->chip == &no_irq_chip)) 970 return; 971 } 972 973 /* Uninstall? */ 974 if (handle == handle_bad_irq) { 975 if (desc->irq_data.chip != &no_irq_chip) 976 mask_ack_irq(desc); 977 irq_state_set_disabled(desc); 978 if (is_chained) { 979 desc->action = NULL; 980 irq_chip_pm_put(irq_desc_get_irq_data(desc)); 981 } 982 desc->depth = 1; 983 } 984 desc->handle_irq = handle; 985 desc->name = name; 986 987 if (handle != handle_bad_irq && is_chained) { 988 unsigned int type = irqd_get_trigger_type(&desc->irq_data); 989 990 /* 991 * We're about to start this interrupt immediately, 992 * hence the need to set the trigger configuration. 993 * But the .set_type callback may have overridden the 994 * flow handler, ignoring that we're dealing with a 995 * chained interrupt. Reset it immediately because we 996 * do know better. 997 */ 998 if (type != IRQ_TYPE_NONE) { 999 __irq_set_trigger(desc, type); 1000 desc->handle_irq = handle; 1001 } 1002 1003 irq_settings_set_noprobe(desc); 1004 irq_settings_set_norequest(desc); 1005 irq_settings_set_nothread(desc); 1006 desc->action = &chained_action; 1007 WARN_ON(irq_chip_pm_get(irq_desc_get_irq_data(desc))); 1008 irq_activate_and_startup(desc, IRQ_RESEND); 1009 } 1010 } 1011 1012 void __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, 1013 const char *name) 1014 { 1015 scoped_irqdesc_get_and_buslock(irq, 0) 1016 __irq_do_set_handler(scoped_irqdesc, handle, is_chained, name); 1017 } 1018 EXPORT_SYMBOL_GPL(__irq_set_handler); 1019 1020 void irq_set_chained_handler_and_data(unsigned int irq, irq_flow_handler_t handle, 1021 void *data) 1022 { 1023 scoped_irqdesc_get_and_buslock(irq, 0) { 1024 struct irq_desc *desc = scoped_irqdesc; 1025 1026 desc->irq_common_data.handler_data = data; 1027 __irq_do_set_handler(desc, handle, 1, NULL); 1028 } 1029 } 1030 EXPORT_SYMBOL_GPL(irq_set_chained_handler_and_data); 1031 1032 void 1033 irq_set_chip_and_handler_name(unsigned int irq, const struct irq_chip *chip, 1034 irq_flow_handler_t handle, const char *name) 1035 { 1036 irq_set_chip(irq, chip); 1037 __irq_set_handler(irq, handle, 0, name); 1038 } 1039 EXPORT_SYMBOL_GPL(irq_set_chip_and_handler_name); 1040 1041 void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set) 1042 { 1043 scoped_irqdesc_get_and_lock(irq, 0) { 1044 struct irq_desc *desc = scoped_irqdesc; 1045 unsigned long trigger, tmp; 1046 /* 1047 * Warn when a driver sets the no autoenable flag on an already 1048 * active interrupt. 1049 */ 1050 WARN_ON_ONCE(!desc->depth && (set & _IRQ_NOAUTOEN)); 1051 1052 irq_settings_clr_and_set(desc, clr, set); 1053 1054 trigger = irqd_get_trigger_type(&desc->irq_data); 1055 1056 irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU | 1057 IRQD_TRIGGER_MASK | IRQD_LEVEL); 1058 if (irq_settings_has_no_balance_set(desc)) 1059 irqd_set(&desc->irq_data, IRQD_NO_BALANCING); 1060 if (irq_settings_is_per_cpu(desc)) 1061 irqd_set(&desc->irq_data, IRQD_PER_CPU); 1062 if (irq_settings_is_level(desc)) 1063 irqd_set(&desc->irq_data, IRQD_LEVEL); 1064 1065 tmp = irq_settings_get_trigger_mask(desc); 1066 if (tmp != IRQ_TYPE_NONE) 1067 trigger = tmp; 1068 1069 irqd_set(&desc->irq_data, trigger); 1070 } 1071 } 1072 EXPORT_SYMBOL_GPL(irq_modify_status); 1073 1074 #ifdef CONFIG_DEPRECATED_IRQ_CPU_ONOFFLINE 1075 /** 1076 * irq_cpu_online - Invoke all irq_cpu_online functions. 1077 * 1078 * Iterate through all irqs and invoke the chip.irq_cpu_online() 1079 * for each. 1080 */ 1081 void irq_cpu_online(void) 1082 { 1083 unsigned int irq; 1084 1085 for_each_active_irq(irq) { 1086 struct irq_desc *desc = irq_to_desc(irq); 1087 struct irq_chip *chip; 1088 1089 if (!desc) 1090 continue; 1091 1092 guard(raw_spinlock_irqsave)(&desc->lock); 1093 chip = irq_data_get_irq_chip(&desc->irq_data); 1094 if (chip && chip->irq_cpu_online && 1095 (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) || 1096 !irqd_irq_disabled(&desc->irq_data))) 1097 chip->irq_cpu_online(&desc->irq_data); 1098 } 1099 } 1100 1101 /** 1102 * irq_cpu_offline - Invoke all irq_cpu_offline functions. 1103 * 1104 * Iterate through all irqs and invoke the chip.irq_cpu_offline() 1105 * for each. 1106 */ 1107 void irq_cpu_offline(void) 1108 { 1109 unsigned int irq; 1110 1111 for_each_active_irq(irq) { 1112 struct irq_desc *desc = irq_to_desc(irq); 1113 struct irq_chip *chip; 1114 1115 if (!desc) 1116 continue; 1117 1118 guard(raw_spinlock_irqsave)(&desc->lock); 1119 chip = irq_data_get_irq_chip(&desc->irq_data); 1120 if (chip && chip->irq_cpu_offline && 1121 (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) || 1122 !irqd_irq_disabled(&desc->irq_data))) 1123 chip->irq_cpu_offline(&desc->irq_data); 1124 } 1125 } 1126 #endif 1127 1128 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY 1129 1130 #ifdef CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS 1131 /** 1132 * handle_fasteoi_ack_irq - irq handler for edge hierarchy stacked on 1133 * transparent controllers 1134 * 1135 * @desc: the interrupt description structure for this irq 1136 * 1137 * Like handle_fasteoi_irq(), but for use with hierarchy where the irq_chip 1138 * also needs to have its ->irq_ack() function called. 1139 */ 1140 void handle_fasteoi_ack_irq(struct irq_desc *desc) 1141 { 1142 struct irq_chip *chip = desc->irq_data.chip; 1143 1144 guard(raw_spinlock)(&desc->lock); 1145 1146 if (!irq_can_handle_pm(desc)) { 1147 cond_eoi_irq(chip, &desc->irq_data); 1148 return; 1149 } 1150 1151 if (unlikely(!irq_can_handle_actions(desc))) { 1152 mask_irq(desc); 1153 cond_eoi_irq(chip, &desc->irq_data); 1154 return; 1155 } 1156 1157 kstat_incr_irqs_this_cpu(desc); 1158 if (desc->istate & IRQS_ONESHOT) 1159 mask_irq(desc); 1160 1161 desc->irq_data.chip->irq_ack(&desc->irq_data); 1162 1163 handle_irq_event(desc); 1164 1165 cond_unmask_eoi_irq(desc, chip); 1166 } 1167 EXPORT_SYMBOL_GPL(handle_fasteoi_ack_irq); 1168 1169 /** 1170 * handle_fasteoi_mask_irq - irq handler for level hierarchy stacked on 1171 * transparent controllers 1172 * 1173 * @desc: the interrupt description structure for this irq 1174 * 1175 * Like handle_fasteoi_irq(), but for use with hierarchy where the irq_chip 1176 * also needs to have its ->irq_mask_ack() function called. 1177 */ 1178 void handle_fasteoi_mask_irq(struct irq_desc *desc) 1179 { 1180 struct irq_chip *chip = desc->irq_data.chip; 1181 1182 guard(raw_spinlock)(&desc->lock); 1183 mask_ack_irq(desc); 1184 1185 if (!irq_can_handle(desc)) { 1186 cond_eoi_irq(chip, &desc->irq_data); 1187 return; 1188 } 1189 1190 kstat_incr_irqs_this_cpu(desc); 1191 1192 handle_irq_event(desc); 1193 1194 cond_unmask_eoi_irq(desc, chip); 1195 } 1196 EXPORT_SYMBOL_GPL(handle_fasteoi_mask_irq); 1197 1198 #endif /* CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS */ 1199 1200 #ifdef CONFIG_SMP 1201 void irq_chip_pre_redirect_parent(struct irq_data *data) 1202 { 1203 data = data->parent_data; 1204 data->chip->irq_pre_redirect(data); 1205 } 1206 EXPORT_SYMBOL_GPL(irq_chip_pre_redirect_parent); 1207 #endif 1208 1209 /** 1210 * irq_chip_set_parent_state - set the state of a parent interrupt. 1211 * 1212 * @data: Pointer to interrupt specific data 1213 * @which: State to be restored (one of IRQCHIP_STATE_*) 1214 * @val: Value corresponding to @which 1215 * 1216 * Conditional success, if the underlying irqchip does not implement it. 1217 */ 1218 int irq_chip_set_parent_state(struct irq_data *data, 1219 enum irqchip_irq_state which, 1220 bool val) 1221 { 1222 data = data->parent_data; 1223 1224 if (!data || !data->chip->irq_set_irqchip_state) 1225 return 0; 1226 1227 return data->chip->irq_set_irqchip_state(data, which, val); 1228 } 1229 EXPORT_SYMBOL_GPL(irq_chip_set_parent_state); 1230 1231 /** 1232 * irq_chip_get_parent_state - get the state of a parent interrupt. 1233 * 1234 * @data: Pointer to interrupt specific data 1235 * @which: one of IRQCHIP_STATE_* the caller wants to know 1236 * @state: a pointer to a boolean where the state is to be stored 1237 * 1238 * Conditional success, if the underlying irqchip does not implement it. 1239 */ 1240 int irq_chip_get_parent_state(struct irq_data *data, 1241 enum irqchip_irq_state which, 1242 bool *state) 1243 { 1244 data = data->parent_data; 1245 1246 if (!data || !data->chip->irq_get_irqchip_state) 1247 return 0; 1248 1249 return data->chip->irq_get_irqchip_state(data, which, state); 1250 } 1251 EXPORT_SYMBOL_GPL(irq_chip_get_parent_state); 1252 1253 /** 1254 * irq_chip_shutdown_parent - Shutdown the parent interrupt 1255 * @data: Pointer to interrupt specific data 1256 * 1257 * Invokes the irq_shutdown() callback of the parent if available or falls 1258 * back to irq_chip_disable_parent(). 1259 */ 1260 void irq_chip_shutdown_parent(struct irq_data *data) 1261 { 1262 struct irq_data *parent = data->parent_data; 1263 1264 if (parent->chip->irq_shutdown) 1265 parent->chip->irq_shutdown(parent); 1266 else 1267 irq_chip_disable_parent(data); 1268 } 1269 EXPORT_SYMBOL_GPL(irq_chip_shutdown_parent); 1270 1271 /** 1272 * irq_chip_startup_parent - Startup the parent interrupt 1273 * @data: Pointer to interrupt specific data 1274 * 1275 * Invokes the irq_startup() callback of the parent if available or falls 1276 * back to irq_chip_enable_parent(). 1277 */ 1278 unsigned int irq_chip_startup_parent(struct irq_data *data) 1279 { 1280 struct irq_data *parent = data->parent_data; 1281 1282 if (parent->chip->irq_startup) 1283 return parent->chip->irq_startup(parent); 1284 1285 irq_chip_enable_parent(data); 1286 return 0; 1287 } 1288 EXPORT_SYMBOL_GPL(irq_chip_startup_parent); 1289 1290 /** 1291 * irq_chip_enable_parent - Enable the parent interrupt (defaults to unmask if 1292 * NULL) 1293 * @data: Pointer to interrupt specific data 1294 */ 1295 void irq_chip_enable_parent(struct irq_data *data) 1296 { 1297 data = data->parent_data; 1298 if (data->chip->irq_enable) 1299 data->chip->irq_enable(data); 1300 else 1301 data->chip->irq_unmask(data); 1302 } 1303 EXPORT_SYMBOL_GPL(irq_chip_enable_parent); 1304 1305 /** 1306 * irq_chip_disable_parent - Disable the parent interrupt (defaults to mask if 1307 * NULL) 1308 * @data: Pointer to interrupt specific data 1309 */ 1310 void irq_chip_disable_parent(struct irq_data *data) 1311 { 1312 data = data->parent_data; 1313 if (data->chip->irq_disable) 1314 data->chip->irq_disable(data); 1315 else 1316 data->chip->irq_mask(data); 1317 } 1318 EXPORT_SYMBOL_GPL(irq_chip_disable_parent); 1319 1320 /** 1321 * irq_chip_ack_parent - Acknowledge the parent interrupt 1322 * @data: Pointer to interrupt specific data 1323 */ 1324 void irq_chip_ack_parent(struct irq_data *data) 1325 { 1326 data = data->parent_data; 1327 data->chip->irq_ack(data); 1328 } 1329 EXPORT_SYMBOL_GPL(irq_chip_ack_parent); 1330 1331 /** 1332 * irq_chip_mask_parent - Mask the parent interrupt 1333 * @data: Pointer to interrupt specific data 1334 */ 1335 void irq_chip_mask_parent(struct irq_data *data) 1336 { 1337 data = data->parent_data; 1338 data->chip->irq_mask(data); 1339 } 1340 EXPORT_SYMBOL_GPL(irq_chip_mask_parent); 1341 1342 /** 1343 * irq_chip_mask_ack_parent - Mask and acknowledge the parent interrupt 1344 * @data: Pointer to interrupt specific data 1345 */ 1346 void irq_chip_mask_ack_parent(struct irq_data *data) 1347 { 1348 data = data->parent_data; 1349 data->chip->irq_mask_ack(data); 1350 } 1351 EXPORT_SYMBOL_GPL(irq_chip_mask_ack_parent); 1352 1353 /** 1354 * irq_chip_unmask_parent - Unmask the parent interrupt 1355 * @data: Pointer to interrupt specific data 1356 */ 1357 void irq_chip_unmask_parent(struct irq_data *data) 1358 { 1359 data = data->parent_data; 1360 data->chip->irq_unmask(data); 1361 } 1362 EXPORT_SYMBOL_GPL(irq_chip_unmask_parent); 1363 1364 /** 1365 * irq_chip_eoi_parent - Invoke EOI on the parent interrupt 1366 * @data: Pointer to interrupt specific data 1367 */ 1368 void irq_chip_eoi_parent(struct irq_data *data) 1369 { 1370 data = data->parent_data; 1371 data->chip->irq_eoi(data); 1372 } 1373 EXPORT_SYMBOL_GPL(irq_chip_eoi_parent); 1374 1375 /** 1376 * irq_chip_set_affinity_parent - Set affinity on the parent interrupt 1377 * @data: Pointer to interrupt specific data 1378 * @dest: The affinity mask to set 1379 * @force: Flag to enforce setting (disable online checks) 1380 * 1381 * Conditional, as the underlying parent chip might not implement it. 1382 */ 1383 int irq_chip_set_affinity_parent(struct irq_data *data, 1384 const struct cpumask *dest, bool force) 1385 { 1386 data = data->parent_data; 1387 if (data->chip->irq_set_affinity) 1388 return data->chip->irq_set_affinity(data, dest, force); 1389 1390 return -ENOSYS; 1391 } 1392 EXPORT_SYMBOL_GPL(irq_chip_set_affinity_parent); 1393 1394 /** 1395 * irq_chip_set_type_parent - Set IRQ type on the parent interrupt 1396 * @data: Pointer to interrupt specific data 1397 * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h 1398 * 1399 * Conditional, as the underlying parent chip might not implement it. 1400 */ 1401 int irq_chip_set_type_parent(struct irq_data *data, unsigned int type) 1402 { 1403 data = data->parent_data; 1404 1405 if (data->chip->irq_set_type) 1406 return data->chip->irq_set_type(data, type); 1407 1408 return -ENOSYS; 1409 } 1410 EXPORT_SYMBOL_GPL(irq_chip_set_type_parent); 1411 1412 /** 1413 * irq_chip_retrigger_hierarchy - Retrigger an interrupt in hardware 1414 * @data: Pointer to interrupt specific data 1415 * 1416 * Iterate through the domain hierarchy of the interrupt and check 1417 * whether a hw retrigger function exists. If yes, invoke it. 1418 */ 1419 int irq_chip_retrigger_hierarchy(struct irq_data *data) 1420 { 1421 for (data = data->parent_data; data; data = data->parent_data) 1422 if (data->chip && data->chip->irq_retrigger) 1423 return data->chip->irq_retrigger(data); 1424 1425 return 0; 1426 } 1427 EXPORT_SYMBOL_GPL(irq_chip_retrigger_hierarchy); 1428 1429 /** 1430 * irq_chip_set_vcpu_affinity_parent - Set vcpu affinity on the parent interrupt 1431 * @data: Pointer to interrupt specific data 1432 * @vcpu_info: The vcpu affinity information 1433 */ 1434 int irq_chip_set_vcpu_affinity_parent(struct irq_data *data, void *vcpu_info) 1435 { 1436 data = data->parent_data; 1437 if (data->chip->irq_set_vcpu_affinity) 1438 return data->chip->irq_set_vcpu_affinity(data, vcpu_info); 1439 1440 return -ENOSYS; 1441 } 1442 EXPORT_SYMBOL_GPL(irq_chip_set_vcpu_affinity_parent); 1443 /** 1444 * irq_chip_set_wake_parent - Set/reset wake-up on the parent interrupt 1445 * @data: Pointer to interrupt specific data 1446 * @on: Whether to set or reset the wake-up capability of this irq 1447 * 1448 * Conditional, as the underlying parent chip might not implement it. 1449 */ 1450 int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on) 1451 { 1452 data = data->parent_data; 1453 1454 if (data->chip->flags & IRQCHIP_SKIP_SET_WAKE) 1455 return 0; 1456 1457 if (data->chip->irq_set_wake) 1458 return data->chip->irq_set_wake(data, on); 1459 1460 return -ENOSYS; 1461 } 1462 EXPORT_SYMBOL_GPL(irq_chip_set_wake_parent); 1463 1464 /** 1465 * irq_chip_request_resources_parent - Request resources on the parent interrupt 1466 * @data: Pointer to interrupt specific data 1467 */ 1468 int irq_chip_request_resources_parent(struct irq_data *data) 1469 { 1470 data = data->parent_data; 1471 1472 if (data->chip->irq_request_resources) 1473 return data->chip->irq_request_resources(data); 1474 1475 /* no error on missing optional irq_chip::irq_request_resources */ 1476 return 0; 1477 } 1478 EXPORT_SYMBOL_GPL(irq_chip_request_resources_parent); 1479 1480 /** 1481 * irq_chip_release_resources_parent - Release resources on the parent interrupt 1482 * @data: Pointer to interrupt specific data 1483 */ 1484 void irq_chip_release_resources_parent(struct irq_data *data) 1485 { 1486 data = data->parent_data; 1487 if (data->chip->irq_release_resources) 1488 data->chip->irq_release_resources(data); 1489 } 1490 EXPORT_SYMBOL_GPL(irq_chip_release_resources_parent); 1491 #endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */ 1492 1493 #ifdef CONFIG_SMP 1494 int irq_chip_redirect_set_affinity(struct irq_data *data, const struct cpumask *dest, bool force) 1495 { 1496 struct irq_redirect *redir = &irq_data_to_desc(data)->redirect; 1497 1498 WRITE_ONCE(redir->target_cpu, cpumask_first(dest)); 1499 irq_data_update_effective_affinity(data, dest); 1500 1501 return IRQ_SET_MASK_OK_DONE; 1502 } 1503 EXPORT_SYMBOL_GPL(irq_chip_redirect_set_affinity); 1504 #endif 1505 1506 /** 1507 * irq_chip_compose_msi_msg - Compose msi message for a irq chip 1508 * @data: Pointer to interrupt specific data 1509 * @msg: Pointer to the MSI message 1510 * 1511 * For hierarchical domains we find the first chip in the hierarchy 1512 * which implements the irq_compose_msi_msg callback. For non 1513 * hierarchical we use the top level chip. 1514 */ 1515 int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) 1516 { 1517 struct irq_data *pos; 1518 1519 for (pos = NULL; !pos && data; data = irqd_get_parent_data(data)) { 1520 if (data->chip && data->chip->irq_compose_msi_msg) 1521 pos = data; 1522 } 1523 1524 if (!pos) 1525 return -ENOSYS; 1526 1527 pos->chip->irq_compose_msi_msg(pos, msg); 1528 return 0; 1529 } 1530 1531 static struct device *irq_get_pm_device(struct irq_data *data) 1532 { 1533 if (data->domain) 1534 return data->domain->pm_dev; 1535 1536 return NULL; 1537 } 1538 1539 /** 1540 * irq_chip_pm_get - Enable power for an IRQ chip 1541 * @data: Pointer to interrupt specific data 1542 * 1543 * Enable the power to the IRQ chip referenced by the interrupt data 1544 * structure. 1545 */ 1546 int irq_chip_pm_get(struct irq_data *data) 1547 { 1548 struct device *dev = irq_get_pm_device(data); 1549 int retval = 0; 1550 1551 if (IS_ENABLED(CONFIG_PM) && dev) 1552 retval = pm_runtime_resume_and_get(dev); 1553 1554 return retval; 1555 } 1556 1557 /** 1558 * irq_chip_pm_put - Drop a PM reference on an IRQ chip 1559 * @data: Pointer to interrupt specific data 1560 * 1561 * Drop a power management reference, acquired via irq_chip_pm_get(), on the IRQ 1562 * chip represented by the interrupt data structure. 1563 * 1564 * Note that this will not disable power to the IRQ chip until this function 1565 * has been called for all IRQs that have called irq_chip_pm_get() and it may 1566 * not disable power at all (if user space prevents that, for example). 1567 */ 1568 void irq_chip_pm_put(struct irq_data *data) 1569 { 1570 struct device *dev = irq_get_pm_device(data); 1571 1572 if (dev) 1573 pm_runtime_put(dev); 1574 } 1575