Lines Matching full:if
206 if (s->callback_param) { in xen_evtchn_post_load()
214 if (p->type == EVTCHNSTAT_pirq) { in xen_evtchn_post_load()
229 if (s->gsi_pirq[i]) { in xen_evtchn_post_load()
302 if (vi) { in gsi_assert_bh()
372 if (domain || !pcms) { in type_init()
377 if (!pdev) { in type_init()
382 if (r.mode != PCI_INTX_ENABLED) { in type_init()
397 if (!s) { in xen_evtchn_set_callback_level()
415 * • From guest vCPU context in the KVM exit handler, if the upcall in xen_evtchn_set_callback_level()
421 * Whichever context we come from if we aren't already holding the BQL in xen_evtchn_set_callback_level()
430 if (!bql_locked()) { in xen_evtchn_set_callback_level()
435 if (s->callback_gsi && s->callback_gsi < s->nr_callback_gsis) { in xen_evtchn_set_callback_level()
442 /* Do not deassert the line if an external device is asserting it. */ in xen_evtchn_set_callback_level()
448 * If the callback GSI is the only one asserted, ensure the status in xen_evtchn_set_callback_level()
451 if (level && !s->extern_gsi_level) { in xen_evtchn_set_callback_level()
469 if (!s) { in xen_evtchn_set_callback_param()
485 if (!ret && kvm_xen_has_cap(EVTCHN_SEND)) { in xen_evtchn_set_callback_param()
503 /* Xen doesn't return error even if you set something bogus */ in xen_evtchn_set_callback_param()
508 /* If the guest has set a per-vCPU callback vector, prefer that. */ in xen_evtchn_set_callback_param()
509 if (gsi && kvm_xen_has_vcpu_callback_vector()) { in xen_evtchn_set_callback_param()
514 if (!ret) { in xen_evtchn_set_callback_param()
515 /* If vector delivery was turned *off* then tell the kernel */ in xen_evtchn_set_callback_param()
516 if ((s->callback_param >> CALLBACK_VIA_TYPE_SHIFT) == in xen_evtchn_set_callback_param()
523 if (gsi != s->callback_gsi) { in xen_evtchn_set_callback_param()
529 if (gsi && vi && vi->evtchn_upcall_pending) { in xen_evtchn_set_callback_param()
557 if (ret) { in deassign_kernel_port()
569 if (!cpu) { in assign_kernel_port()
600 if (!port) { in valid_port()
604 if (xen_is_long_mode()) { in valid_port()
623 if (p->type == EVTCHNSTAT_interdomain && p->u.interdomain.to_qemu) { in unbind_backend_ports()
626 if (s->be_handles[be_port]) { in unbind_backend_ports()
632 if (kvm_xen_has_cap(EVTCHN_SEND)) { in unbind_backend_ports()
646 if (!s) { in xen_evtchn_status_op()
650 if (status->dom != DOMID_SELF && status->dom != xen_domid) { in xen_evtchn_status_op()
654 if (!valid_port(status->port)) { in xen_evtchn_status_op()
707 if (idx >= bits_per_word) { in do_unmask_port_lm()
711 if (do_unmask) { in do_unmask_port_lm()
713 * If this is a true unmask operation, clear the mask bit. If in do_unmask_port_lm()
716 if (!((qatomic_fetch_and(&shinfo->evtchn_mask[idx], ~mask) & mask))) { in do_unmask_port_lm()
722 * change the mask bit, and if it's *masked* we have nothing in do_unmask_port_lm()
725 if (qatomic_fetch_or(&shinfo->evtchn_mask[idx], 0) & mask) { in do_unmask_port_lm()
730 /* If the event was not pending, we're done. */ in do_unmask_port_lm()
731 if (!(qatomic_fetch_or(&shinfo->evtchn_pending[idx], 0) & mask)) { in do_unmask_port_lm()
738 /* If a port in this word was already pending for this vCPU, all done. */ in do_unmask_port_lm()
739 if (qatomic_fetch_or(&vcpu_info->evtchn_pending_sel, mask) & mask) { in do_unmask_port_lm()
744 if (qatomic_fetch_or(&vcpu_info->evtchn_upcall_pending, 1)) { in do_unmask_port_lm()
765 if (idx >= bits_per_word) { in do_unmask_port_compat()
769 if (do_unmask) { in do_unmask_port_compat()
771 * If this is a true unmask operation, clear the mask bit. If in do_unmask_port_compat()
774 if (!((qatomic_fetch_and(&shinfo->evtchn_mask[idx], ~mask) & mask))) { in do_unmask_port_compat()
780 * change the mask bit, and if it's *masked* we have nothing in do_unmask_port_compat()
783 if (qatomic_fetch_or(&shinfo->evtchn_mask[idx], 0) & mask) { in do_unmask_port_compat()
788 /* If the event was not pending, we're done. */ in do_unmask_port_compat()
789 if (!(qatomic_fetch_or(&shinfo->evtchn_pending[idx], 0) & mask)) { in do_unmask_port_compat()
796 /* If a port in this word was already pending for this vCPU, all done. */ in do_unmask_port_compat()
797 if (qatomic_fetch_or(&vcpu_info->evtchn_pending_sel, mask) & mask) { in do_unmask_port_compat()
802 if (qatomic_fetch_or(&vcpu_info->evtchn_upcall_pending, 1)) { in do_unmask_port_compat()
815 if (s->port_table[port].type == EVTCHNSTAT_closed) { in unmask_port()
820 if (!shinfo) { in unmask_port()
825 if (!vcpu_info) { in unmask_port()
829 if (xen_is_long_mode()) { in unmask_port()
847 if (idx >= bits_per_word) { in do_set_port_lm()
851 /* Update the pending bit itself. If it was already set, we're done. */ in do_set_port_lm()
852 if (qatomic_fetch_or(&shinfo->evtchn_pending[idx], mask) & mask) { in do_set_port_lm()
856 /* Check if it's masked. */ in do_set_port_lm()
857 if (qatomic_fetch_or(&shinfo->evtchn_mask[idx], 0) & mask) { in do_set_port_lm()
864 /* If a port in this word was already pending for this vCPU, all done. */ in do_set_port_lm()
865 if (qatomic_fetch_or(&vcpu_info->evtchn_pending_sel, mask) & mask) { in do_set_port_lm()
870 if (qatomic_fetch_or(&vcpu_info->evtchn_upcall_pending, 1)) { in do_set_port_lm()
890 if (idx >= bits_per_word) { in do_set_port_compat()
894 /* Update the pending bit itself. If it was already set, we're done. */ in do_set_port_compat()
895 if (qatomic_fetch_or(&shinfo->evtchn_pending[idx], mask) & mask) { in do_set_port_compat()
899 /* Check if it's masked. */ in do_set_port_compat()
900 if (qatomic_fetch_or(&shinfo->evtchn_mask[idx], 0) & mask) { in do_set_port_compat()
907 /* If a port in this word was already pending for this vCPU, all done. */ in do_set_port_compat()
908 if (qatomic_fetch_or(&vcpu_info->evtchn_pending_sel, mask) & mask) { in do_set_port_compat()
913 if (qatomic_fetch_or(&vcpu_info->evtchn_upcall_pending, 1)) { in do_set_port_compat()
926 if (s->port_table[port].type == EVTCHNSTAT_closed) { in set_port_pending()
930 if (s->evtchn_in_kernel) { in set_port_pending()
935 if (!cpu) { in set_port_pending()
947 if (!shinfo) { in set_port_pending()
952 if (!vcpu_info) { in set_port_pending()
956 if (xen_is_long_mode()) { in set_port_pending()
967 if (!p) { in clear_port_pending()
971 if (xen_is_long_mode()) { in clear_port_pending()
1001 if (s->nr_ports == port + 1) { in free_port()
1018 if (s->port_table[p].type == EVTCHNSTAT_closed) { in allocate_port()
1025 if (s->nr_ports < p + 1) { in allocate_port()
1063 if (s->pirq[p->u.pirq].is_translated) { in close_port()
1074 if (s->evtchn_in_kernel) { in close_port()
1080 if (p->u.interdomain.to_qemu) { in close_port()
1083 if (xc) { in close_port()
1084 if (kvm_xen_has_cap(EVTCHN_SEND)) { in close_port()
1092 if (!valid_port(p->u.interdomain.port) || in close_port()
1118 if (!s) { in xen_evtchn_soft_reset()
1132 if (flush_kvm_routes) { in xen_evtchn_soft_reset()
1141 if (reset->dom != DOMID_SELF && reset->dom != xen_domid) { in xen_evtchn_reset_op()
1155 if (!s) { in xen_evtchn_close_op()
1159 if (!valid_port(close->port)) { in xen_evtchn_close_op()
1170 if (flush_kvm_routes) { in xen_evtchn_close_op()
1182 if (!s) { in xen_evtchn_unmask_op()
1186 if (!valid_port(unmask->port)) { in xen_evtchn_unmask_op()
1205 if (!s) { in xen_evtchn_bind_vcpu_op()
1209 if (!valid_port(vcpu->port)) { in xen_evtchn_bind_vcpu_op()
1213 if (!valid_vcpu(vcpu->vcpu)) { in xen_evtchn_bind_vcpu_op()
1221 if (p->type == EVTCHNSTAT_interdomain || in xen_evtchn_bind_vcpu_op()
1227 * on the new vCPU if the port was already pending. in xen_evtchn_bind_vcpu_op()
1244 if (!s) { in xen_evtchn_bind_virq_op()
1248 if (virq->virq >= NR_VIRQS) { in xen_evtchn_bind_virq_op()
1253 if (virq_is_global(virq->virq) && virq->vcpu != 0) { in xen_evtchn_bind_virq_op()
1257 if (!valid_vcpu(virq->vcpu)) { in xen_evtchn_bind_virq_op()
1265 if (!ret) { in xen_evtchn_bind_virq_op()
1267 if (ret) { in xen_evtchn_bind_virq_op()
1282 if (!s) { in xen_evtchn_bind_pirq_op()
1286 if (pirq->pirq >= s->nr_pirqs) { in xen_evtchn_bind_pirq_op()
1292 if (s->pirq[pirq->pirq].port) { in xen_evtchn_bind_pirq_op()
1300 if (ret) { in xen_evtchn_bind_pirq_op()
1314 if (s->pirq[pirq->pirq].gsi == IRQ_MSI_EMU) { in xen_evtchn_bind_pirq_op()
1315 if (s->pirq[pirq->pirq].is_masked) { in xen_evtchn_bind_pirq_op()
1323 if (s->pirq[pirq->pirq].is_msix) { in xen_evtchn_bind_pirq_op()
1328 } else if (s->pirq[pirq->pirq].is_translated) { in xen_evtchn_bind_pirq_op()
1330 * If KVM had attempted to translate this one before, make it try in xen_evtchn_bind_pirq_op()
1331 * again. If we unmasked, then the notifier on the MSI(-X) vector in xen_evtchn_bind_pirq_op()
1346 if (!s) { in xen_evtchn_bind_ipi_op()
1350 if (!valid_vcpu(ipi->vcpu)) { in xen_evtchn_bind_ipi_op()
1357 if (!ret && s->evtchn_in_kernel) { in xen_evtchn_bind_ipi_op()
1371 if (!s) { in xen_evtchn_bind_interdomain_op()
1375 if (interdomain->remote_dom != DOMID_QEMU && in xen_evtchn_bind_interdomain_op()
1381 if (!valid_port(interdomain->remote_port)) { in xen_evtchn_bind_interdomain_op()
1390 if (ret) { in xen_evtchn_bind_interdomain_op()
1394 if (interdomain->remote_dom == DOMID_QEMU) { in xen_evtchn_bind_interdomain_op()
1398 if (!xc) { in xen_evtchn_bind_interdomain_op()
1403 if (xc->guest_port) { in xen_evtchn_bind_interdomain_op()
1410 if (kvm_xen_has_cap(EVTCHN_SEND)) { in xen_evtchn_bind_interdomain_op()
1427 if (interdomain->local_port != interdomain->remote_port && in xen_evtchn_bind_interdomain_op()
1441 if (ret) { in xen_evtchn_bind_interdomain_op()
1455 if (!s) { in xen_evtchn_alloc_unbound_op()
1459 if (alloc->dom != DOMID_SELF && alloc->dom != xen_domid) { in xen_evtchn_alloc_unbound_op()
1463 if (alloc->remote_dom != DOMID_QEMU && in xen_evtchn_alloc_unbound_op()
1473 if (!ret && alloc->remote_dom == DOMID_QEMU) { in xen_evtchn_alloc_unbound_op()
1489 if (!s) { in xen_evtchn_send_op()
1493 if (!valid_port(send->port)) { in xen_evtchn_send_op()
1503 if (p->u.interdomain.to_qemu) { in xen_evtchn_send_op()
1510 if (xc) { in xen_evtchn_send_op()
1546 if (!s) { in xen_evtchn_set_port()
1550 if (!valid_port(port)) { in xen_evtchn_set_port()
1559 if (p->type == EVTCHNSTAT_virq || in xen_evtchn_set_port()
1580 if (type == MAP_PIRQ_TYPE_GSI) { in allocate_pirq()
1582 if (pirq_inuse(s, pirq)) { in allocate_pirq()
1592 if (pirq_inuse_word(s, pirq) == UINT64_MAX) { in allocate_pirq()
1596 if (pirq_inuse(s, pirq)) { in allocate_pirq()
1606 if (gsi >= 0) { in allocate_pirq()
1621 if (!s || gsi < 0 || gsi >= IOAPIC_NUM_PINS) { in xen_evtchn_set_gsi()
1631 if (gsi && gsi == s->callback_gsi) { in xen_evtchn_set_gsi()
1633 if (!s->setting_callback_gsi) { in xen_evtchn_set_gsi()
1637 * Don't allow the external device to deassert the line if the in xen_evtchn_set_gsi()
1640 if (!s->extern_gsi_level) { in xen_evtchn_set_gsi()
1642 if (vi && vi->evtchn_upcall_pending) { in xen_evtchn_set_gsi()
1652 * no sense. It could also deadlock on s->port_lock, if we proceed. in xen_evtchn_set_gsi()
1661 if (!pirq) { in xen_evtchn_set_gsi()
1665 if (*level) { in xen_evtchn_set_gsi()
1669 if (port) { in xen_evtchn_set_gsi()
1681 if (data & 0xff) { in msi_pirq_target()
1703 if (!(pirq & 63) && !(pirq_inuse_word(s, pirq))) { in do_remove_pci_vector()
1707 if (except_pirq && pirq == except_pirq) { in do_remove_pci_vector()
1710 if (s->pirq[pirq].dev != dev) { in do_remove_pci_vector()
1713 if (vector != -1 && s->pirq[pirq].vector != vector) { in do_remove_pci_vector()
1731 if (!s) { in xen_evtchn_remove_pci_device()
1745 if (!s) { in xen_evtchn_snoop_msi()
1757 if (!pirq || pirq >= s->nr_pirqs || !pirq_inuse(s, pirq) || in xen_evtchn_snoop_msi()
1763 if (pirq) { in xen_evtchn_snoop_msi()
1782 if (!s) { in xen_evtchn_translate_pirq_msi()
1789 if (!pirq || pirq >= s->nr_pirqs) { in xen_evtchn_translate_pirq_msi()
1793 if (!kvm_xen_has_cap(EVTCHN_2LEVEL)) { in xen_evtchn_translate_pirq_msi()
1797 if (s->pirq[pirq].gsi != IRQ_MSI_EMU) { in xen_evtchn_translate_pirq_msi()
1807 if (!valid_port(port)) { in xen_evtchn_translate_pirq_msi()
1812 if (!cpu) { in xen_evtchn_translate_pirq_msi()
1829 if (!s) { in xen_evtchn_deliver_pirq_msi()
1836 if (!pirq || pirq >= s->nr_pirqs) { in xen_evtchn_deliver_pirq_msi()
1843 if (!valid_port(port)) { in xen_evtchn_deliver_pirq_msi()
1857 if (!s) { in xen_physdev_map_pirq()
1864 if (map->domid != DOMID_SELF && map->domid != xen_domid) { in xen_physdev_map_pirq()
1867 if (map->type != MAP_PIRQ_TYPE_GSI) { in xen_physdev_map_pirq()
1870 if (gsi < 0 || gsi >= IOAPIC_NUM_PINS) { in xen_physdev_map_pirq()
1874 if (pirq < 0) { in xen_physdev_map_pirq()
1876 if (pirq < 0) { in xen_physdev_map_pirq()
1880 } else if (pirq > s->nr_pirqs) { in xen_physdev_map_pirq()
1884 * User specified a valid-looking PIRQ#. Allow it if it is in xen_physdev_map_pirq()
1885 * allocated and not yet bound, or if it is unallocated in xen_physdev_map_pirq()
1887 if (pirq_inuse(s, pirq)) { in xen_physdev_map_pirq()
1888 if (s->pirq[pirq].gsi != IRQ_UNBOUND) { in xen_physdev_map_pirq()
1892 /* If it was unused, mark it used now. */ in xen_physdev_map_pirq()
1910 if (!s) { in xen_physdev_unmap_pirq()
1914 if (unmap->domid != DOMID_SELF && unmap->domid != xen_domid) { in xen_physdev_unmap_pirq()
1917 if (pirq < 0 || pirq >= s->nr_pirqs) { in xen_physdev_unmap_pirq()
1924 if (!pirq_inuse(s, pirq)) { in xen_physdev_unmap_pirq()
1932 if (gsi < 0) { in xen_physdev_unmap_pirq()
1944 if (gsi == IRQ_MSI_EMU) { in xen_physdev_unmap_pirq()
1957 if (!s) { in xen_physdev_eoi_pirq()
1964 if (!pirq_inuse(s, pirq)) { in xen_physdev_eoi_pirq()
1969 if (gsi < 0) { in xen_physdev_eoi_pirq()
1973 /* Reassert a level IRQ if needed */ in xen_physdev_eoi_pirq()
1974 if (s->pirq_gsi_set & (1U << gsi)) { in xen_physdev_eoi_pirq()
1976 if (port) { in xen_physdev_eoi_pirq()
1989 if (!s) { in xen_physdev_query_pirq()
1996 if (!pirq_inuse(s, pirq)) { in xen_physdev_query_pirq()
2000 if (s->pirq[pirq].gsi >= 0) { in xen_physdev_query_pirq()
2014 if (!s) { in xen_physdev_get_free_pirq()
2021 if (pirq < 0) { in xen_physdev_get_free_pirq()
2035 if (xc->fd < 0) { in xen_be_evtchn_open()
2048 if (!s->be_handles[i]) { in find_be_port()
2065 if (!s) { in xen_be_evtchn_bind_interdomain()
2069 if (!xc) { in xen_be_evtchn_bind_interdomain()
2073 if (domid != xen_domid) { in xen_be_evtchn_bind_interdomain()
2077 if (!valid_port(guest_port)) { in xen_be_evtchn_bind_interdomain()
2088 /* Allow rebinding after migration, preserve port # if possible */ in xen_be_evtchn_bind_interdomain()
2091 if (!s->be_handles[be_port]) { in xen_be_evtchn_bind_interdomain()
2095 if (kvm_xen_has_cap(EVTCHN_SEND)) { in xen_be_evtchn_bind_interdomain()
2104 if (!be_port) { in xen_be_evtchn_bind_interdomain()
2113 if (kvm_xen_has_cap(EVTCHN_SEND)) { in xen_be_evtchn_bind_interdomain()
2135 if (!s) { in xen_be_evtchn_unbind()
2139 if (!xc) { in xen_be_evtchn_unbind()
2145 if (port && port != xc->be_port) { in xen_be_evtchn_unbind()
2150 if (xc->guest_port) { in xen_be_evtchn_unbind()
2154 if (gp->type == EVTCHNSTAT_interdomain) { in xen_be_evtchn_unbind()
2159 if (kvm_xen_has_cap(EVTCHN_SEND)) { in xen_be_evtchn_unbind()
2175 if (!xc) { in xen_be_evtchn_close()
2188 if (!xc) { in xen_be_evtchn_fd()
2199 if (!s) { in xen_be_evtchn_notify()
2203 if (!xc) { in xen_be_evtchn_notify()
2209 if (xc->guest_port) { in xen_be_evtchn_notify()
2225 if (!xc) { in xen_be_evtchn_pending()
2229 if (!xc->be_port) { in xen_be_evtchn_pending()
2233 if (eventfd_read(xc->fd, &val)) { in xen_be_evtchn_pending()
2242 if (!xc) { in xen_be_evtchn_unmask()
2246 if (xc->be_port != port) { in xen_be_evtchn_unmask()
2269 if (!s) { in qmp_xen_event_list()
2275 if (!shinfo) { in qmp_xen_event_list()
2280 if (xen_is_long_mode()) { in qmp_xen_event_list()
2294 if (p->type == EVTCHNSTAT_closed) { in qmp_xen_event_list()
2309 if (p->type == EVTCHNSTAT_interdomain) { in qmp_xen_event_list()
2330 if (!s) { in qmp_xen_event_inject()
2335 if (!valid_port(port)) { in qmp_xen_event_inject()
2341 if (set_port_pending(s, port)) { in qmp_xen_event_inject()
2353 if (err) { in hmp_xen_event_list()
2363 if (info->type != EVTCHN_PORT_TYPE_IPI) { in hmp_xen_event_list()
2365 if (info->remote_domain) { in hmp_xen_event_list()
2370 if (info->pending) { in hmp_xen_event_list()
2373 if (info->masked) { in hmp_xen_event_list()
2388 if (err) { in hmp_xen_event_inject()