Lines Matching full:info

177 static bool (*pirq_needs_eoi)(struct irq_info *info);
250 /* Get info for IRQ */
259 static void set_info_for_irq(unsigned int irq, struct irq_info *info)
262 legacy_info_ptrs[irq] = info;
264 irq_set_chip_data(irq, info);
281 static void channels_on_cpu_dec(struct irq_info *info)
283 if (!info->is_accounted)
286 info->is_accounted = 0;
288 if (WARN_ON_ONCE(info->cpu >= nr_cpu_ids))
291 WARN_ON_ONCE(!atomic_add_unless(&channels_on_cpu[info->cpu], -1 , 0));
294 static void channels_on_cpu_inc(struct irq_info *info)
296 if (WARN_ON_ONCE(info->cpu >= nr_cpu_ids))
299 if (WARN_ON_ONCE(!atomic_add_unless(&channels_on_cpu[info->cpu], 1,
303 info->is_accounted = 1;
315 struct irq_info *info = container_of(to_rcu_work(work), struct irq_info,
317 unsigned int irq = info->irq;
319 /* Remove the info pointer only now, with no potential users left. */
322 kfree(info);
328 static int xen_irq_info_common_setup(struct irq_info *info,
335 BUG_ON(info->type != IRQT_UNBOUND && info->type != type);
337 info->type = type;
338 info->evtchn = evtchn;
339 info->cpu = cpu;
340 info->mask_reason = EVT_MASK_REASON_EXPLICIT;
341 raw_spin_lock_init(&info->lock);
343 ret = set_evtchn_to_irq(evtchn, info->irq);
347 irq_clear_status_flags(info->irq, IRQ_NOREQUEST | IRQ_NOAUTOEN);
352 static int xen_irq_info_evtchn_setup(struct irq_info *info,
358 ret = xen_irq_info_common_setup(info, IRQT_EVTCHN, evtchn, 0);
359 info->u.interdomain = dev;
366 static int xen_irq_info_ipi_setup(struct irq_info *info, unsigned int cpu,
369 info->u.ipi = ipi;
371 per_cpu(ipi_to_irq, cpu)[ipi] = info->irq;
374 return xen_irq_info_common_setup(info, IRQT_IPI, evtchn, 0);
377 static int xen_irq_info_virq_setup(struct irq_info *info, unsigned int cpu,
380 info->u.virq = virq;
382 per_cpu(virq_to_irq, cpu)[virq] = info->irq;
384 return xen_irq_info_common_setup(info, IRQT_VIRQ, evtchn, 0);
387 static int xen_irq_info_pirq_setup(struct irq_info *info, evtchn_port_t evtchn,
391 info->u.pirq.pirq = pirq;
392 info->u.pirq.gsi = gsi;
393 info->u.pirq.domid = domid;
394 info->u.pirq.flags = flags;
396 return xen_irq_info_common_setup(info, IRQT_PIRQ, evtchn, 0);
399 static void xen_irq_info_cleanup(struct irq_info *info)
401 set_evtchn_to_irq(info->evtchn, -1);
402 xen_evtchn_port_remove(info->evtchn, info->cpu);
403 info->evtchn = 0;
404 channels_on_cpu_dec(info);
412 const struct irq_info *info = NULL;
415 info = info_for_irq(irq);
416 if (!info)
419 return info->evtchn;
424 struct irq_info *info = evtchn_to_info(evtchn);
426 return info ? info->irq : -1;
440 static enum ipi_vector ipi_from_irq(struct irq_info *info)
442 BUG_ON(info == NULL);
443 BUG_ON(info->type != IRQT_IPI);
445 return info->u.ipi;
448 static unsigned int virq_from_irq(struct irq_info *info)
450 BUG_ON(info == NULL);
451 BUG_ON(info->type != IRQT_VIRQ);
453 return info->u.virq;
456 static unsigned int pirq_from_irq(struct irq_info *info)
458 BUG_ON(info == NULL);
459 BUG_ON(info->type != IRQT_PIRQ);
461 return info->u.pirq.pirq;
466 struct irq_info *info = evtchn_to_info(evtchn);
468 return info ? info->cpu : 0;
471 static void do_mask(struct irq_info *info, u8 reason)
475 raw_spin_lock_irqsave(&info->lock, flags);
477 if (!info->mask_reason)
478 mask_evtchn(info->evtchn);
480 info->mask_reason |= reason;
482 raw_spin_unlock_irqrestore(&info->lock, flags);
485 static void do_unmask(struct irq_info *info, u8 reason)
489 raw_spin_lock_irqsave(&info->lock, flags);
491 info->mask_reason &= ~reason;
493 if (!info->mask_reason)
494 unmask_evtchn(info->evtchn);
496 raw_spin_unlock_irqrestore(&info->lock, flags);
500 static bool pirq_check_eoi_map(struct irq_info *info)
502 return test_bit(pirq_from_irq(info), pirq_eoi_map);
506 static bool pirq_needs_eoi_flag(struct irq_info *info)
508 BUG_ON(info->type != IRQT_PIRQ);
510 return info->u.pirq.flags & PIRQ_NEEDS_EOI;
513 static void bind_evtchn_to_cpu(struct irq_info *info, unsigned int cpu,
517 struct irq_data *data = irq_get_irq_data(info->irq);
523 xen_evtchn_port_bind_to_cpu(info->evtchn, cpu, info->cpu);
525 channels_on_cpu_dec(info);
526 info->cpu = cpu;
527 channels_on_cpu_inc(info);
555 static void lateeoi_list_del(struct irq_info *info)
557 struct lateeoi_work *eoi = &per_cpu(lateeoi, info->eoi_cpu);
561 list_del_init(&info->eoi_list);
565 static void lateeoi_list_add(struct irq_info *info)
567 struct lateeoi_work *eoi = &per_cpu(lateeoi, info->eoi_cpu);
573 if (now < info->eoi_time)
574 delay = info->eoi_time - now;
582 if (!elem || info->eoi_time < elem->eoi_time) {
583 list_add(&info->eoi_list, &eoi->eoi_list);
584 mod_delayed_work_on(info->eoi_cpu, system_wq,
588 if (elem->eoi_time <= info->eoi_time)
591 list_add(&info->eoi_list, &elem->eoi_list);
597 static void xen_irq_lateeoi_locked(struct irq_info *info, bool spurious)
603 evtchn = info->evtchn;
604 if (!VALID_EVTCHN(evtchn) || !list_empty(&info->eoi_list))
608 struct xenbus_device *dev = info->u.interdomain;
614 if ((1 << info->spurious_cnt) < (HZ << 2)) {
615 if (info->spurious_cnt != 0xFF)
616 info->spurious_cnt++;
618 if (info->spurious_cnt > threshold) {
619 delay = 1 << (info->spurious_cnt - 1 - threshold);
622 if (!info->eoi_time)
623 info->eoi_cpu = smp_processor_id();
624 info->eoi_time = get_jiffies_64() + delay;
631 info->spurious_cnt = 0;
634 cpu = info->eoi_cpu;
635 if (info->eoi_time &&
636 (info->irq_epoch == per_cpu(irq_epoch, cpu) || delay)) {
637 lateeoi_list_add(info);
641 info->eoi_time = 0;
644 smp_store_release(&info->is_active, 0);
645 do_unmask(info, EVT_MASK_REASON_EOI_PENDING);
651 struct irq_info *info;
662 info = list_first_entry_or_null(&eoi->eoi_list, struct irq_info,
665 if (info == NULL)
668 if (now < info->eoi_time) {
669 mod_delayed_work_on(info->eoi_cpu, system_wq,
671 info->eoi_time - now);
675 list_del_init(&info->eoi_list);
679 info->eoi_time = 0;
681 xen_irq_lateeoi_locked(info, false);
700 struct irq_info *info;
704 info = info_for_irq(irq);
706 if (info)
707 xen_irq_lateeoi_locked(info, eoi_flags & XEN_EOI_FLAG_SPURIOUS);
715 struct irq_info *info;
717 info = kzalloc(sizeof(*info), GFP_KERNEL);
718 if (info) {
719 info->irq = irq;
720 info->type = IRQT_UNBOUND;
721 info->refcnt = -1;
722 INIT_RCU_WORK(&info->rwork, delayed_free_irq);
724 set_info_for_irq(irq, info);
725 INIT_LIST_HEAD(&info->eoi_list);
726 list_add_tail(&info->list, &xen_irq_list_head);
729 return info;
735 struct irq_info *info = NULL;
738 info = xen_irq_init(irq);
739 if (!info)
743 return info;
749 struct irq_info *info;
766 info = xen_irq_init(irq);
767 if (!info)
770 return info;
773 static void xen_free_irq(struct irq_info *info)
775 if (WARN_ON(!info))
778 if (!list_empty(&info->eoi_list))
779 lateeoi_list_del(info);
781 list_del(&info->list);
783 WARN_ON(info->refcnt > 0);
785 queue_rcu_work(system_wq, &info->rwork);
789 static void event_handler_exit(struct irq_info *info)
791 smp_store_release(&info->is_active, 0);
792 clear_evtchn(info->evtchn);
795 static void pirq_query_unmask(struct irq_info *info)
799 irq_status.irq = pirq_from_irq(info);
803 info->u.pirq.flags &= ~PIRQ_NEEDS_EOI;
805 info->u.pirq.flags |= PIRQ_NEEDS_EOI;
808 static void do_eoi_pirq(struct irq_info *info)
810 struct physdev_eoi eoi = { .irq = pirq_from_irq(info) };
813 if (!VALID_EVTCHN(info->evtchn))
816 event_handler_exit(info);
818 if (pirq_needs_eoi(info)) {
826 struct irq_info *info = info_for_irq(data->irq);
828 do_eoi_pirq(info);
831 static void do_disable_dynirq(struct irq_info *info)
833 if (VALID_EVTCHN(info->evtchn))
834 do_mask(info, EVT_MASK_REASON_EXPLICIT);
839 struct irq_info *info = info_for_irq(data->irq);
841 if (info)
842 do_disable_dynirq(info);
847 struct irq_info *info = info_for_irq(data->irq);
849 if (info) {
850 do_disable_dynirq(info);
851 do_eoi_pirq(info);
855 static unsigned int __startup_pirq(struct irq_info *info)
858 evtchn_port_t evtchn = info->evtchn;
864 bind_pirq.pirq = pirq_from_irq(info);
866 bind_pirq.flags = info->u.pirq.flags & PIRQ_SHAREABLE ?
870 pr_warn("Failed to obtain physical IRQ %d\n", info->irq);
875 pirq_query_unmask(info);
877 rc = set_evtchn_to_irq(evtchn, info->irq);
881 info->evtchn = evtchn;
882 bind_evtchn_to_cpu(info, 0, false);
889 do_unmask(info, EVT_MASK_REASON_EXPLICIT);
891 do_eoi_pirq(info);
896 pr_err("irq%d: Failed to set port to irq mapping (%d)\n", info->irq,
904 struct irq_info *info = info_for_irq(data->irq);
906 return __startup_pirq(info);
911 struct irq_info *info = info_for_irq(data->irq);
912 evtchn_port_t evtchn = info->evtchn;
914 BUG_ON(info->type != IRQT_PIRQ);
919 do_mask(info, EVT_MASK_REASON_EXPLICIT);
920 xen_irq_info_cleanup(info);
936 struct irq_info *info;
938 list_for_each_entry(info, &xen_irq_list_head, list) {
939 if (info->type != IRQT_PIRQ)
942 if (info->u.pirq.gsi == gsi)
943 return info->irq;
950 static void __unbind_from_irq(struct irq_info *info, unsigned int irq)
955 if (!info) {
960 if (info->refcnt > 0) {
961 info->refcnt--;
962 if (info->refcnt != 0)
966 evtchn = info->evtchn;
969 unsigned int cpu = info->cpu;
972 if (!info->is_static)
975 switch (info->type) {
977 per_cpu(virq_to_irq, cpu)[virq_from_irq(info)] = -1;
980 per_cpu(ipi_to_irq, cpu)[ipi_from_irq(info)] = -1;
981 per_cpu(ipi_to_evtchn, cpu)[ipi_from_irq(info)] = 0;
984 dev = info->u.interdomain;
992 xen_irq_info_cleanup(info);
998 xen_free_irq(info);
1014 struct irq_info *info;
1027 info = xen_allocate_irq_gsi(gsi);
1028 if (!info)
1031 irq_op.irq = info->irq;
1039 xen_free_irq(info);
1044 ret = xen_irq_info_pirq_setup(info, 0, pirq, gsi, DOMID_SELF,
1047 __unbind_from_irq(info, info->irq);
1051 pirq_query_unmask(info);
1068 irq_set_chip_and_handler_name(info->irq, &xen_pirq_chip,
1071 irq_set_chip_and_handler_name(info->irq, &xen_pirq_chip,
1074 ret = info->irq;
1101 struct irq_info *info;
1110 info = xen_irq_init(irq + i);
1111 if (!info) {
1118 ret = xen_irq_info_pirq_setup(info, 0, pirq + i, 0, domid,
1133 info = info_for_irq(irq + nvec);
1134 __unbind_from_irq(info, irq + nvec);
1144 struct irq_info *info = info_for_irq(irq);
1154 if (xen_initial_domain() && !(info->u.pirq.flags & PIRQ_MSI_GROUP)) {
1155 unmap_irq.pirq = info->u.pirq.pirq;
1156 unmap_irq.domid = info->u.pirq.domid;
1162 if ((rc == -ESRCH && info->u.pirq.domid != DOMID_SELF))
1164 info->u.pirq.domid, info->u.pirq.pirq);
1171 xen_free_irq(info);
1180 struct irq_info *info = info_for_irq(irq);
1182 return pirq_from_irq(info);
1190 struct irq_info *info;
1197 info = evtchn_to_info(evtchn);
1199 if (!info) {
1200 info = xen_allocate_irq_dynamic();
1201 if (!info)
1204 irq_set_chip_and_handler_name(info->irq, chip,
1207 ret = xen_irq_info_evtchn_setup(info, evtchn, dev);
1209 __unbind_from_irq(info, info->irq);
1219 bind_evtchn_to_cpu(info, 0, false);
1220 } else if (!WARN_ON(info->type != IRQT_EVTCHN)) {
1221 if (shared && !WARN_ON(info->refcnt < 0))
1222 info->refcnt++;
1225 ret = info->irq;
1249 struct irq_info *info;
1257 info = xen_allocate_irq_dynamic();
1258 if (!info)
1261 irq_set_chip_and_handler_name(info->irq, &xen_percpu_chip,
1270 ret = xen_irq_info_ipi_setup(info, cpu, evtchn, ipi);
1272 __unbind_from_irq(info, info->irq);
1279 bind_evtchn_to_cpu(info, cpu, true);
1280 ret = info->irq;
1282 info = info_for_irq(ret);
1283 WARN_ON(info == NULL || info->type != IRQT_IPI);
1357 struct irq_info *info;
1365 info = xen_allocate_irq_dynamic();
1366 if (!info)
1370 irq_set_chip_and_handler_name(info->irq, &xen_percpu_chip,
1373 irq_set_chip_and_handler_name(info->irq, &xen_dynamic_chip,
1388 ret = xen_irq_info_virq_setup(info, cpu, evtchn, virq);
1390 __unbind_from_irq(info, info->irq);
1398 bind_evtchn_to_cpu(info, cpu, percpu);
1399 ret = info->irq;
1401 info = info_for_irq(ret);
1402 WARN_ON(info == NULL || info->type != IRQT_VIRQ);
1413 struct irq_info *info;
1416 info = info_for_irq(irq);
1417 __unbind_from_irq(info, irq);
1542 struct irq_info *info = info_for_irq(irq);
1544 if (WARN_ON(!info))
1570 struct irq_info *info = evtchn_to_info(evtchn);
1572 if (!info)
1575 WARN_ON(info->refcnt != -1);
1577 info->refcnt = 1;
1578 info->is_static = is_static;
1586 struct irq_info *info;
1594 info = evtchn_to_info(evtchn);
1596 if (!info)
1600 if (info->refcnt <= 0 || info->refcnt == SHRT_MAX)
1603 info->refcnt++;
1614 struct irq_info *info = evtchn_to_info(evtchn);
1616 if (WARN_ON(!info))
1618 unbind_from_irq(info->irq);
1648 struct irq_info *info = evtchn_to_info(port);
1651 if (!info)
1676 if (xchg_acquire(&info->is_active, 1))
1679 dev = (info->type == IRQT_EVTCHN) ? info->u.interdomain : NULL;
1684 info->eoi_cpu = smp_processor_id();
1685 info->irq_epoch = __this_cpu_read(irq_epoch);
1686 info->eoi_time = get_jiffies_64() + event_eoi_delay;
1689 generic_handle_irq(info->irq);
1735 struct irq_info *info = info_for_irq(irq);
1737 if (WARN_ON(!info))
1750 BUG_ON(info->type == IRQT_UNBOUND);
1752 info->irq = irq;
1753 (void)xen_irq_info_evtchn_setup(info, evtchn, NULL);
1757 bind_evtchn_to_cpu(info, info->cpu, false);
1764 static int xen_rebind_evtchn_to_cpu(struct irq_info *info, unsigned int tcpu)
1767 evtchn_port_t evtchn = info ? info->evtchn : 0;
1783 do_mask(info, EVT_MASK_REASON_TEMPORARY);
1791 bind_evtchn_to_cpu(info, tcpu, false);
1793 do_unmask(info, EVT_MASK_REASON_TEMPORARY);
1841 struct irq_info *info = info_for_irq(data->irq);
1842 evtchn_port_t evtchn = info ? info->evtchn : 0;
1845 do_unmask(info, EVT_MASK_REASON_EXPLICIT);
1848 static void do_ack_dynirq(struct irq_info *info)
1850 evtchn_port_t evtchn = info->evtchn;
1853 event_handler_exit(info);
1858 struct irq_info *info = info_for_irq(data->irq);
1860 if (info)
1861 do_ack_dynirq(info);
1866 struct irq_info *info = info_for_irq(data->irq);
1868 if (info) {
1869 do_disable_dynirq(info);
1870 do_ack_dynirq(info);
1876 struct irq_info *info = info_for_irq(data->irq);
1877 evtchn_port_t evtchn = info ? info->evtchn : 0;
1880 do_mask(info, EVT_MASK_REASON_EOI_PENDING);
1892 struct irq_info *info = info_for_irq(data->irq);
1893 evtchn_port_t evtchn = info ? info->evtchn : 0;
1896 do_mask(info, EVT_MASK_REASON_EXPLICIT);
1897 event_handler_exit(info);
1903 struct irq_info *info = info_for_irq(data->irq);
1904 evtchn_port_t evtchn = info ? info->evtchn : 0;
1909 do_mask(info, EVT_MASK_REASON_TEMPORARY);
1911 do_unmask(info, EVT_MASK_REASON_TEMPORARY);
1920 struct irq_info *info;
1922 list_for_each_entry(info, &xen_irq_list_head, list) {
1923 if (info->type != IRQT_PIRQ)
1926 pirq = info->u.pirq.pirq;
1927 gsi = info->u.pirq.gsi;
1928 irq = info->irq;
1944 xen_free_irq(info);
1950 __startup_pirq(info);
1958 struct irq_info *info;
1964 info = info_for_irq(irq);
1966 BUG_ON(virq_from_irq(info) != virq);
1977 xen_irq_info_virq_setup(info, cpu, evtchn, virq);
1979 bind_evtchn_to_cpu(info, cpu, false);
1987 struct irq_info *info;
1993 info = info_for_irq(irq);
1995 BUG_ON(ipi_from_irq(info) != ipi);
2005 xen_irq_info_ipi_setup(info, cpu, evtchn, ipi);
2007 bind_evtchn_to_cpu(info, cpu, false);
2014 struct irq_info *info = info_for_irq(irq);
2015 evtchn_port_t evtchn = info ? info->evtchn : 0;
2018 event_handler_exit(info);
2061 struct irq_info *info = info_for_irq(irq);
2064 if (WARN_ON(!info))
2067 irq_status.irq = info->u.pirq.pirq;
2078 struct irq_info *info;
2084 list_for_each_entry(info, &xen_irq_list_head, list) {
2086 info->evtchn = 0;
2088 channels_on_cpu_dec(info);