Lines Matching +full:0 +full:xd

44 #define DBG_VERBOSE(fmt...)	do { } while(0)
78 * or 0 if there is no new entry.
87 return 0; in xive_read_eq()
92 return 0; in xive_read_eq()
100 if (q->idx == 0) in xive_read_eq()
104 return cur & 0x7fffffff; in xive_read_eq()
114 * (0xff if none) and return what was found (0 if none).
132 u32 irq = 0; in xive_scan_interrupts()
133 u8 prio = 0; in xive_scan_interrupts()
136 while (xc->pending_prio != 0) { in xive_scan_interrupts()
170 int p = atomic_xchg(&q->pending_count, 0); in xive_scan_interrupts()
178 /* If nothing was found, set CPPR to 0xff */ in xive_scan_interrupts()
179 if (irq == 0) in xive_scan_interrupts()
180 prio = 0xff; in xive_scan_interrupts()
196 static notrace u8 xive_esb_read(struct xive_irq_data *xd, u32 offset) in xive_esb_read() argument
200 if (offset == XIVE_ESB_SET_PQ_10 && xd->flags & XIVE_IRQ_FLAG_STORE_EOI) in xive_esb_read()
204 if (xd->flags & XIVE_IRQ_FLAG_SHIFT_BUG) in xive_esb_read()
207 if ((xd->flags & XIVE_IRQ_FLAG_H_INT_ESB) && xive_ops->esb_rw) in xive_esb_read()
208 val = xive_ops->esb_rw(xd->hw_irq, offset, 0, 0); in xive_esb_read()
210 val = in_be64(xd->eoi_mmio + offset); in xive_esb_read()
215 static void xive_esb_write(struct xive_irq_data *xd, u32 offset, u64 data) in xive_esb_write() argument
218 if (xd->flags & XIVE_IRQ_FLAG_SHIFT_BUG) in xive_esb_write()
221 if ((xd->flags & XIVE_IRQ_FLAG_H_INT_ESB) && xive_ops->esb_rw) in xive_esb_write()
222 xive_ops->esb_rw(xd->hw_irq, offset, data, 1); in xive_esb_write()
224 out_be64(xd->eoi_mmio + offset, data); in xive_esb_write()
254 xmon_printf("IPI=0x%08x PQ=%c%c ", xc->hw_ipi, in xmon_xive_do_dump()
277 xmon_printf("IRQ 0x%08x : no config rc=%d\n", hw_irq, rc); in xmon_xive_get_irq_config()
281 xmon_printf("IRQ 0x%08x : target=0x%x prio=%02x lirq=0x%x ", in xmon_xive_get_irq_config()
285 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); in xmon_xive_get_irq_config() local
286 u64 val = xive_esb_read(xd, XIVE_ESB_GET); in xmon_xive_get_irq_config()
289 xd->flags & XIVE_IRQ_FLAG_STORE_EOI ? 'S' : ' ', in xmon_xive_get_irq_config()
290 xd->flags & XIVE_IRQ_FLAG_LSI ? 'L' : ' ', in xmon_xive_get_irq_config()
291 xd->flags & XIVE_IRQ_FLAG_H_INT_ESB ? 'H' : ' ', in xmon_xive_get_irq_config()
297 return 0; in xmon_xive_get_irq_config()
328 DBG_VERBOSE("get_irq: got irq 0x%x, new pending=0x%02x\n", in xive_get_irq()
333 return 0; in xive_get_irq()
349 if (xive_scan_interrupts(xc, true) != 0) { in xive_do_queue_eoi()
350 DBG_VERBOSE("eoi: pending=0x%02x\n", xc->pending_prio); in xive_do_queue_eoi()
359 static void xive_do_source_eoi(u32 hw_irq, struct xive_irq_data *xd) in xive_do_source_eoi() argument
361 xd->stale_p = false; in xive_do_source_eoi()
363 if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI) in xive_do_source_eoi()
364 xive_esb_write(xd, XIVE_ESB_STORE_EOI, 0); in xive_do_source_eoi()
365 else if (hw_irq && xd->flags & XIVE_IRQ_FLAG_EOI_FW) { in xive_do_source_eoi()
370 * on P9 DD1.0 needed a latch to be clared in the LPC bridge in xive_do_source_eoi()
392 if (xd->flags & XIVE_IRQ_FLAG_LSI) in xive_do_source_eoi()
393 xive_esb_read(xd, XIVE_ESB_LOAD_EOI); in xive_do_source_eoi()
395 eoi_val = xive_esb_read(xd, XIVE_ESB_SET_PQ_00); in xive_do_source_eoi()
399 if ((eoi_val & XIVE_ESB_VAL_Q) && xd->trig_mmio) in xive_do_source_eoi()
400 out_be64(xd->trig_mmio, 0); in xive_do_source_eoi()
408 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); in xive_irq_eoi() local
411 DBG_VERBOSE("eoi_irq: irq=%d [0x%lx] pending=%02x\n", in xive_irq_eoi()
419 !(xd->flags & XIVE_IRQ_NO_EOI)) in xive_irq_eoi()
420 xive_do_source_eoi(irqd_to_hwirq(d), xd); in xive_irq_eoi()
422 xd->stale_p = true; in xive_irq_eoi()
428 xd->saved_p = false; in xive_irq_eoi()
439 static void xive_do_source_set_mask(struct xive_irq_data *xd, in xive_do_source_set_mask() argument
453 val = xive_esb_read(xd, XIVE_ESB_SET_PQ_01); in xive_do_source_set_mask()
454 if (!xd->stale_p && !!(val & XIVE_ESB_VAL_P)) in xive_do_source_set_mask()
455 xd->saved_p = true; in xive_do_source_set_mask()
456 xd->stale_p = false; in xive_do_source_set_mask()
457 } else if (xd->saved_p) { in xive_do_source_set_mask()
458 xive_esb_read(xd, XIVE_ESB_SET_PQ_10); in xive_do_source_set_mask()
459 xd->saved_p = false; in xive_do_source_set_mask()
461 xive_esb_read(xd, XIVE_ESB_SET_PQ_00); in xive_do_source_set_mask()
462 xd->stale_p = false; in xive_do_source_set_mask()
500 if (WARN_ON(cpu < 0 || !xc)) { in xive_dec_target_count()
527 for (i = 0; i < first && cpu < nr_cpu_ids; i++) in xive_find_target_in_mask()
566 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); in xive_pick_irq_target() local
574 if (xd->src_chip != XIVE_INVALID_CHIP_ID && in xive_pick_irq_target()
579 if (xc->chip_id == xd->src_chip) in xive_pick_irq_target()
588 if (cpu >= 0) in xive_pick_irq_target()
599 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); in xive_irq_startup() local
603 xd->saved_p = false; in xive_irq_startup()
604 xd->stale_p = false; in xive_irq_startup()
605 pr_devel("xive_irq_startup: irq %d [0x%x] data @%p\n", in xive_irq_startup()
633 xd->target = target; in xive_irq_startup()
646 xive_do_source_set_mask(xd, false); in xive_irq_startup()
648 return 0; in xive_irq_startup()
654 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); in xive_irq_shutdown() local
657 pr_devel("xive_irq_shutdown: irq %d [0x%x] data @%p\n", in xive_irq_shutdown()
660 if (WARN_ON(xd->target == XIVE_INVALID_TARGET)) in xive_irq_shutdown()
664 xive_do_source_set_mask(xd, true); in xive_irq_shutdown()
671 get_hard_smp_processor_id(xd->target), in xive_irq_shutdown()
672 0xff, XIVE_BAD_IRQ); in xive_irq_shutdown()
674 xive_dec_target_count(xd->target); in xive_irq_shutdown()
675 xd->target = XIVE_INVALID_TARGET; in xive_irq_shutdown()
680 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); in xive_irq_unmask() local
682 pr_devel("xive_irq_unmask: irq %d data @%p\n", d->irq, xd); in xive_irq_unmask()
687 * be fixed by P9 DD2.0, if that is the case, firmware in xive_irq_unmask()
690 if (xd->flags & XIVE_IRQ_FLAG_MASK_FW) { in xive_irq_unmask()
693 get_hard_smp_processor_id(xd->target), in xive_irq_unmask()
698 xive_do_source_set_mask(xd, false); in xive_irq_unmask()
703 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); in xive_irq_mask() local
705 pr_devel("xive_irq_mask: irq %d data @%p\n", d->irq, xd); in xive_irq_mask()
710 * be fixed by P9 DD2.0, if that is the case, firmware in xive_irq_mask()
713 if (xd->flags & XIVE_IRQ_FLAG_MASK_FW) { in xive_irq_mask()
716 get_hard_smp_processor_id(xd->target), in xive_irq_mask()
717 0xff, d->irq); in xive_irq_mask()
721 xive_do_source_set_mask(xd, true); in xive_irq_mask()
728 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); in xive_irq_set_affinity() local
731 int rc = 0; in xive_irq_set_affinity()
747 if (xd->target != XIVE_INVALID_TARGET && in xive_irq_set_affinity()
748 cpu_online(xd->target) && in xive_irq_set_affinity()
749 cpumask_test_cpu(xd->target, cpumask)) in xive_irq_set_affinity()
763 old_target = xd->target; in xive_irq_set_affinity()
773 if (rc < 0) { in xive_irq_set_affinity()
778 pr_devel(" target: 0x%x\n", target); in xive_irq_set_affinity()
779 xd->target = target; in xive_irq_set_affinity()
790 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); in xive_irq_set_type() local
817 !!(xd->flags & XIVE_IRQ_FLAG_LSI)) { in xive_irq_set_type()
818 pr_warn("Interrupt %d (HW 0x%x) type mismatch, Linux says %s, FW says %s\n", in xive_irq_set_type()
821 (xd->flags & XIVE_IRQ_FLAG_LSI) ? "Level" : "Edge"); in xive_irq_set_type()
829 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); in xive_irq_retrigger() local
832 if (WARN_ON(xd->flags & XIVE_IRQ_FLAG_LSI)) in xive_irq_retrigger()
833 return 0; in xive_irq_retrigger()
839 xive_esb_read(xd, XIVE_ESB_SET_PQ_11); in xive_irq_retrigger()
842 * Note: We pass "0" to the hw_irq argument in order to in xive_irq_retrigger()
847 xive_do_source_eoi(0, xd); in xive_irq_retrigger()
858 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); in xive_irq_set_vcpu_affinity() local
867 if (xd->flags & XIVE_IRQ_FLAG_MASK_FW) in xive_irq_set_vcpu_affinity()
878 pq = xive_esb_read(xd, XIVE_ESB_SET_PQ_10); in xive_irq_set_vcpu_affinity()
879 if (!xd->stale_p) { in xive_irq_set_vcpu_affinity()
880 xd->saved_p = !!(pq & XIVE_ESB_VAL_P); in xive_irq_set_vcpu_affinity()
881 xd->stale_p = !xd->saved_p; in xive_irq_set_vcpu_affinity()
885 if (xd->target == XIVE_INVALID_TARGET) { in xive_irq_set_vcpu_affinity()
890 WARN_ON(xd->saved_p); in xive_irq_set_vcpu_affinity()
892 return 0; in xive_irq_set_vcpu_affinity()
910 if (xd->saved_p) { in xive_irq_set_vcpu_affinity()
911 xive_esb_read(xd, XIVE_ESB_SET_PQ_11); in xive_irq_set_vcpu_affinity()
929 if (xd->target == XIVE_INVALID_TARGET) { in xive_irq_set_vcpu_affinity()
930 xive_do_source_set_mask(xd, true); in xive_irq_set_vcpu_affinity()
931 return 0; in xive_irq_set_vcpu_affinity()
951 get_hard_smp_processor_id(xd->target), in xive_irq_set_vcpu_affinity()
968 if (!xd->saved_p) in xive_irq_set_vcpu_affinity()
969 xive_do_source_eoi(hw_irq, xd); in xive_irq_set_vcpu_affinity()
972 return 0; in xive_irq_set_vcpu_affinity()
979 struct xive_irq_data *xd = irq_data_get_irq_handler_data(data); in xive_get_irqchip_state() local
984 pq = xive_esb_read(xd, XIVE_ESB_GET); in xive_get_irqchip_state()
993 *state = (pq != XIVE_ESB_INVALID) && !xd->stale_p && in xive_get_irqchip_state()
994 (xd->saved_p || !!(pq & XIVE_ESB_VAL_P)); in xive_get_irqchip_state()
995 return 0; in xive_get_irqchip_state()
1021 void xive_cleanup_irq_data(struct xive_irq_data *xd) in xive_cleanup_irq_data() argument
1023 if (xd->eoi_mmio) { in xive_cleanup_irq_data()
1024 unmap_kernel_range((unsigned long)xd->eoi_mmio, in xive_cleanup_irq_data()
1025 1u << xd->esb_shift); in xive_cleanup_irq_data()
1026 iounmap(xd->eoi_mmio); in xive_cleanup_irq_data()
1027 if (xd->eoi_mmio == xd->trig_mmio) in xive_cleanup_irq_data()
1028 xd->trig_mmio = NULL; in xive_cleanup_irq_data()
1029 xd->eoi_mmio = NULL; in xive_cleanup_irq_data()
1031 if (xd->trig_mmio) { in xive_cleanup_irq_data()
1032 unmap_kernel_range((unsigned long)xd->trig_mmio, in xive_cleanup_irq_data()
1033 1u << xd->esb_shift); in xive_cleanup_irq_data()
1034 iounmap(xd->trig_mmio); in xive_cleanup_irq_data()
1035 xd->trig_mmio = NULL; in xive_cleanup_irq_data()
1042 struct xive_irq_data *xd; in xive_irq_alloc_data() local
1045 xd = kzalloc(sizeof(struct xive_irq_data), GFP_KERNEL); in xive_irq_alloc_data()
1046 if (!xd) in xive_irq_alloc_data()
1048 rc = xive_ops->populate_irq_data(hw, xd); in xive_irq_alloc_data()
1050 kfree(xd); in xive_irq_alloc_data()
1053 xd->target = XIVE_INVALID_TARGET; in xive_irq_alloc_data()
1054 irq_set_handler_data(virq, xd); in xive_irq_alloc_data()
1063 xive_esb_read(xd, XIVE_ESB_SET_PQ_01); in xive_irq_alloc_data()
1065 return 0; in xive_irq_alloc_data()
1070 struct xive_irq_data *xd = irq_get_handler_data(virq); in xive_irq_free_data() local
1072 if (!xd) in xive_irq_free_data()
1075 xive_cleanup_irq_data(xd); in xive_irq_free_data()
1076 kfree(xd); in xive_irq_free_data()
1084 struct xive_irq_data *xd; in xive_cause_ipi() local
1088 DBG_VERBOSE("IPI CPU %d -> %d (HW IRQ 0x%x)\n", in xive_cause_ipi()
1091 xd = &xc->ipi_data; in xive_cause_ipi()
1092 if (WARN_ON(!xd->trig_mmio)) in xive_cause_ipi()
1094 out_be64(xd->trig_mmio, 0); in xive_cause_ipi()
1110 DBG_VERBOSE("IPI eoi: irq=%d [0x%lx] (HW IRQ 0x%x) pending=%02x\n", in xive_ipi_eoi()
1145 virq = irq_create_mapping(xive_irq_domain, 0); in xive_request_ipi()
1163 return 0; in xive_setup_cpu_ipi()
1191 return 0; in xive_setup_cpu_ipi()
1213 0xff, xive_ipi_irq); in xive_cleanup_cpu_ipi()
1244 /* IPIs are special and come up with HW number 0 */ in xive_irq_domain_map()
1245 if (hw == 0) { in xive_irq_domain_map()
1252 return 0; in xive_irq_domain_map()
1262 return 0; in xive_irq_domain_map()
1283 *out_hwirq = intspec[0]; in xive_irq_domain_xlate()
1297 return 0; in xive_irq_domain_xlate()
1330 int rc = 0; in xive_setup_cpu_queues()
1372 /* Set CPPR to 0xff to enable flow of interrupts */ in xive_setup_cpu()
1373 xc->cppr = 0xff; in xive_setup_cpu()
1374 out_8(xive_tima + xive_tima_offset + TM_CPPR, 0xff); in xive_setup_cpu()
1410 while ((irq = xive_scan_interrupts(xc, false)) != 0) { in xive_flush_cpu_queue()
1417 struct xive_irq_data *xd; in xive_flush_cpu_queue() local
1424 if (d->domain != xive_irq_domain || hw_irq == 0) in xive_flush_cpu_queue()
1437 xd = irq_desc_get_handler_data(desc); in xive_flush_cpu_queue()
1442 xd->saved_p = false; in xive_flush_cpu_queue()
1448 if (xd->flags & XIVE_IRQ_FLAG_LSI) in xive_flush_cpu_queue()
1449 xive_do_source_eoi(irqd_to_hwirq(d), xd); in xive_flush_cpu_queue()
1465 /* Set CPPR to 0 to disable flow of interrupts */ in xive_smp_disable_cpu()
1466 xc->cppr = 0; in xive_smp_disable_cpu()
1467 out_8(xive_tima + xive_tima_offset + TM_CPPR, 0); in xive_smp_disable_cpu()
1473 xc->cppr = 0xff; in xive_smp_disable_cpu()
1474 out_8(xive_tima + xive_tima_offset + TM_CPPR, 0xff); in xive_smp_disable_cpu()
1495 /* Set CPPR to 0 to disable flow of interrupts */ in xive_teardown_cpu()
1496 xc->cppr = 0; in xive_teardown_cpu()
1497 out_8(xive_tima + xive_tima_offset + TM_CPPR, 0); in xive_teardown_cpu()
1556 memset(qpage, 0, 1 << queue_shift); in xive_queue_page_alloc()
1564 return 0; in xive_off()
1580 seq_printf(m, "IPI=0x%08x PQ=%c%c ", xc->hw_ipi, in xive_debug_show_cpu()
1615 seq_printf(m, "IRQ 0x%08x : no config rc=%d\n", hw_irq, rc); in xive_debug_show_irq()
1619 seq_printf(m, "IRQ 0x%08x : target=0x%x prio=%02x lirq=0x%x ", in xive_debug_show_irq()
1623 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); in xive_debug_show_irq() local
1624 u64 val = xive_esb_read(xd, XIVE_ESB_GET); in xive_debug_show_irq()
1627 xd->flags & XIVE_IRQ_FLAG_STORE_EOI ? 'S' : ' ', in xive_debug_show_irq()
1628 xd->flags & XIVE_IRQ_FLAG_LSI ? 'L' : ' ', in xive_debug_show_irq()
1629 xd->flags & XIVE_IRQ_FLAG_H_INT_ESB ? 'H' : ' ', in xive_debug_show_irq()
1657 /* IPIs are special (HW number 0) */ in xive_core_debug_show()
1661 return 0; in xive_core_debug_show()
1670 return 0; in xive_core_debug_init()