Lines Matching full:irq

7 #include <linux/irq.h>
56 * matching interrupt ID and return a reference to the IRQ structure.
61 struct vgic_irq *irq = NULL; in vgic_get_lpi() local
66 list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) { in vgic_get_lpi()
67 if (irq->intid != intid) in vgic_get_lpi()
72 * call vgic_put_irq() later once it's finished with the IRQ. in vgic_get_lpi()
74 vgic_get_irq_kref(irq); in vgic_get_lpi()
77 irq = NULL; in vgic_get_lpi()
82 return irq; in vgic_get_lpi()
88 * to call vgic_put_irq() once it's finished with this IRQ.
125 void __vgic_put_lpi_locked(struct kvm *kvm, struct vgic_irq *irq) in __vgic_put_lpi_locked() argument
129 if (!kref_put(&irq->refcount, vgic_irq_release)) in __vgic_put_lpi_locked()
132 list_del(&irq->lpi_list); in __vgic_put_lpi_locked()
135 kfree(irq); in __vgic_put_lpi_locked()
138 void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq) in vgic_put_irq() argument
143 if (irq->intid < VGIC_MIN_LPI) in vgic_put_irq()
147 __vgic_put_lpi_locked(kvm, irq); in vgic_put_irq()
154 struct vgic_irq *irq, *tmp; in vgic_flush_pending_lpis() local
159 list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) { in vgic_flush_pending_lpis()
160 if (irq->intid >= VGIC_MIN_LPI) { in vgic_flush_pending_lpis()
161 raw_spin_lock(&irq->irq_lock); in vgic_flush_pending_lpis()
162 list_del(&irq->ap_list); in vgic_flush_pending_lpis()
163 irq->vcpu = NULL; in vgic_flush_pending_lpis()
164 raw_spin_unlock(&irq->irq_lock); in vgic_flush_pending_lpis()
165 vgic_put_irq(vcpu->kvm, irq); in vgic_flush_pending_lpis()
172 void vgic_irq_set_phys_pending(struct vgic_irq *irq, bool pending) in vgic_irq_set_phys_pending() argument
174 WARN_ON(irq_set_irqchip_state(irq->host_irq, in vgic_irq_set_phys_pending()
179 bool vgic_get_phys_line_level(struct vgic_irq *irq) in vgic_get_phys_line_level() argument
183 BUG_ON(!irq->hw); in vgic_get_phys_line_level()
185 if (irq->get_input_level) in vgic_get_phys_line_level()
186 return irq->get_input_level(irq->intid); in vgic_get_phys_line_level()
188 WARN_ON(irq_get_irqchip_state(irq->host_irq, in vgic_get_phys_line_level()
195 void vgic_irq_set_phys_active(struct vgic_irq *irq, bool active) in vgic_irq_set_phys_active() argument
198 BUG_ON(!irq->hw); in vgic_irq_set_phys_active()
199 WARN_ON(irq_set_irqchip_state(irq->host_irq, in vgic_irq_set_phys_active()
205 * kvm_vgic_target_oracle - compute the target vcpu for an irq
207 * @irq: The irq to route. Must be already locked.
213 * Requires the IRQ lock to be held.
215 static struct kvm_vcpu *vgic_target_oracle(struct vgic_irq *irq) in vgic_target_oracle() argument
217 lockdep_assert_held(&irq->irq_lock); in vgic_target_oracle()
220 if (irq->active) in vgic_target_oracle()
221 return irq->vcpu ? : irq->target_vcpu; in vgic_target_oracle()
224 * If the IRQ is not active but enabled and pending, we should direct in vgic_target_oracle()
229 if (irq->enabled && irq_is_pending(irq)) { in vgic_target_oracle()
230 if (unlikely(irq->target_vcpu && in vgic_target_oracle()
231 !irq->target_vcpu->kvm->arch.vgic.enabled)) in vgic_target_oracle()
234 return irq->target_vcpu; in vgic_target_oracle()
237 /* If neither active nor pending and enabled, then this IRQ should not in vgic_target_oracle()
308 * rising edge, and in-kernel connected IRQ lines can only be controlled by
311 static bool vgic_validate_injection(struct vgic_irq *irq, bool level, void *owner) in vgic_validate_injection() argument
313 if (irq->owner != owner) in vgic_validate_injection()
316 switch (irq->config) { in vgic_validate_injection()
318 return irq->line_level != level; in vgic_validate_injection()
327 * Check whether an IRQ needs to (and can) be queued to a VCPU's ap list.
329 * Returns true when the IRQ was queued, false otherwise.
331 * Needs to be entered with the IRQ lock already held, but will return
334 bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq, in vgic_queue_irq_unlock() argument
339 lockdep_assert_held(&irq->irq_lock); in vgic_queue_irq_unlock()
342 vcpu = vgic_target_oracle(irq); in vgic_queue_irq_unlock()
343 if (irq->vcpu || !vcpu) { in vgic_queue_irq_unlock()
345 * If this IRQ is already on a VCPU's ap_list, then it in vgic_queue_irq_unlock()
349 * Otherwise, if the irq is not pending and enabled, it does in vgic_queue_irq_unlock()
353 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in vgic_queue_irq_unlock()
359 * while the IRQ is already on the VCPU's AP list, the in vgic_queue_irq_unlock()
372 * We must unlock the irq lock to take the ap_list_lock where in vgic_queue_irq_unlock()
375 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in vgic_queue_irq_unlock()
380 raw_spin_lock(&irq->irq_lock); in vgic_queue_irq_unlock()
386 * 1) The irq lost its pending state or was disabled behind our in vgic_queue_irq_unlock()
388 * 2) Someone changed the affinity on this irq behind our in vgic_queue_irq_unlock()
394 if (unlikely(irq->vcpu || vcpu != vgic_target_oracle(irq))) { in vgic_queue_irq_unlock()
395 raw_spin_unlock(&irq->irq_lock); in vgic_queue_irq_unlock()
399 raw_spin_lock_irqsave(&irq->irq_lock, flags); in vgic_queue_irq_unlock()
404 * Grab a reference to the irq to reflect the fact that it is in vgic_queue_irq_unlock()
407 vgic_get_irq_kref(irq); in vgic_queue_irq_unlock()
408 list_add_tail(&irq->ap_list, &vcpu->arch.vgic_cpu.ap_list_head); in vgic_queue_irq_unlock()
409 irq->vcpu = vcpu; in vgic_queue_irq_unlock()
411 raw_spin_unlock(&irq->irq_lock); in vgic_queue_irq_unlock()
421 * kvm_vgic_inject_irq - Inject an IRQ from a device to the vgic
429 * @owner: The opaque pointer to the owner of the IRQ being raised to verify
430 * that the caller is allowed to inject this IRQ. Userspace
441 struct vgic_irq *irq; in kvm_vgic_inject_irq() local
455 irq = vgic_get_irq(kvm, vcpu, intid); in kvm_vgic_inject_irq()
456 if (!irq) in kvm_vgic_inject_irq()
459 raw_spin_lock_irqsave(&irq->irq_lock, flags); in kvm_vgic_inject_irq()
461 if (!vgic_validate_injection(irq, level, owner)) { in kvm_vgic_inject_irq()
463 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in kvm_vgic_inject_irq()
464 vgic_put_irq(kvm, irq); in kvm_vgic_inject_irq()
468 if (irq->config == VGIC_CONFIG_LEVEL) in kvm_vgic_inject_irq()
469 irq->line_level = level; in kvm_vgic_inject_irq()
471 irq->pending_latch = true; in kvm_vgic_inject_irq()
473 vgic_queue_irq_unlock(kvm, irq, flags); in kvm_vgic_inject_irq()
474 vgic_put_irq(kvm, irq); in kvm_vgic_inject_irq()
479 /* @irq->irq_lock must be held */
480 static int kvm_vgic_map_irq(struct kvm_vcpu *vcpu, struct vgic_irq *irq, in kvm_vgic_map_irq() argument
488 * Find the physical IRQ number corresponding to @host_irq in kvm_vgic_map_irq()
499 irq->hw = true; in kvm_vgic_map_irq()
500 irq->host_irq = host_irq; in kvm_vgic_map_irq()
501 irq->hwintid = data->hwirq; in kvm_vgic_map_irq()
502 irq->get_input_level = get_input_level; in kvm_vgic_map_irq()
506 /* @irq->irq_lock must be held */
507 static inline void kvm_vgic_unmap_irq(struct vgic_irq *irq) in kvm_vgic_unmap_irq() argument
509 irq->hw = false; in kvm_vgic_unmap_irq()
510 irq->hwintid = 0; in kvm_vgic_unmap_irq()
511 irq->get_input_level = NULL; in kvm_vgic_unmap_irq()
517 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, vintid); in kvm_vgic_map_phys_irq() local
521 BUG_ON(!irq); in kvm_vgic_map_phys_irq()
523 raw_spin_lock_irqsave(&irq->irq_lock, flags); in kvm_vgic_map_phys_irq()
524 ret = kvm_vgic_map_irq(vcpu, irq, host_irq, get_input_level); in kvm_vgic_map_phys_irq()
525 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in kvm_vgic_map_phys_irq()
526 vgic_put_irq(vcpu->kvm, irq); in kvm_vgic_map_phys_irq()
532 * kvm_vgic_reset_mapped_irq - Reset a mapped IRQ
542 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, vintid); in kvm_vgic_reset_mapped_irq() local
545 if (!irq->hw) in kvm_vgic_reset_mapped_irq()
548 raw_spin_lock_irqsave(&irq->irq_lock, flags); in kvm_vgic_reset_mapped_irq()
549 irq->active = false; in kvm_vgic_reset_mapped_irq()
550 irq->pending_latch = false; in kvm_vgic_reset_mapped_irq()
551 irq->line_level = false; in kvm_vgic_reset_mapped_irq()
552 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in kvm_vgic_reset_mapped_irq()
554 vgic_put_irq(vcpu->kvm, irq); in kvm_vgic_reset_mapped_irq()
559 struct vgic_irq *irq; in kvm_vgic_unmap_phys_irq() local
565 irq = vgic_get_irq(vcpu->kvm, vcpu, vintid); in kvm_vgic_unmap_phys_irq()
566 BUG_ON(!irq); in kvm_vgic_unmap_phys_irq()
568 raw_spin_lock_irqsave(&irq->irq_lock, flags); in kvm_vgic_unmap_phys_irq()
569 kvm_vgic_unmap_irq(irq); in kvm_vgic_unmap_phys_irq()
570 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in kvm_vgic_unmap_phys_irq()
571 vgic_put_irq(vcpu->kvm, irq); in kvm_vgic_unmap_phys_irq()
588 struct vgic_irq *irq; in kvm_vgic_set_owner() local
599 irq = vgic_get_irq(vcpu->kvm, vcpu, intid); in kvm_vgic_set_owner()
600 raw_spin_lock_irqsave(&irq->irq_lock, flags); in kvm_vgic_set_owner()
601 if (irq->owner && irq->owner != owner) in kvm_vgic_set_owner()
604 irq->owner = owner; in kvm_vgic_set_owner()
605 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in kvm_vgic_set_owner()
621 struct vgic_irq *irq, *tmp; in vgic_prune_ap_list() local
628 list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) { in vgic_prune_ap_list()
632 raw_spin_lock(&irq->irq_lock); in vgic_prune_ap_list()
634 BUG_ON(vcpu != irq->vcpu); in vgic_prune_ap_list()
636 target_vcpu = vgic_target_oracle(irq); in vgic_prune_ap_list()
643 list_del(&irq->ap_list); in vgic_prune_ap_list()
644 irq->vcpu = NULL; in vgic_prune_ap_list()
645 raw_spin_unlock(&irq->irq_lock); in vgic_prune_ap_list()
651 * we remove the irq from the list, we drop in vgic_prune_ap_list()
654 vgic_put_irq(vcpu->kvm, irq); in vgic_prune_ap_list()
660 raw_spin_unlock(&irq->irq_lock); in vgic_prune_ap_list()
666 raw_spin_unlock(&irq->irq_lock); in vgic_prune_ap_list()
684 raw_spin_lock(&irq->irq_lock); in vgic_prune_ap_list()
695 if (target_vcpu == vgic_target_oracle(irq)) { in vgic_prune_ap_list()
698 list_del(&irq->ap_list); in vgic_prune_ap_list()
699 irq->vcpu = target_vcpu; in vgic_prune_ap_list()
700 list_add_tail(&irq->ap_list, &new_cpu->ap_list_head); in vgic_prune_ap_list()
704 raw_spin_unlock(&irq->irq_lock); in vgic_prune_ap_list()
729 struct vgic_irq *irq, int lr) in vgic_populate_lr() argument
731 lockdep_assert_held(&irq->irq_lock); in vgic_populate_lr()
734 vgic_v2_populate_lr(vcpu, irq, lr); in vgic_populate_lr()
736 vgic_v3_populate_lr(vcpu, irq, lr); in vgic_populate_lr()
760 struct vgic_irq *irq; in compute_ap_list_depth() local
767 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { in compute_ap_list_depth()
770 raw_spin_lock(&irq->irq_lock); in compute_ap_list_depth()
772 w = vgic_irq_get_lr_count(irq); in compute_ap_list_depth()
773 raw_spin_unlock(&irq->irq_lock); in compute_ap_list_depth()
785 struct vgic_irq *irq; in vgic_flush_lr_state() local
799 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { in vgic_flush_lr_state()
800 raw_spin_lock(&irq->irq_lock); in vgic_flush_lr_state()
804 * guarantee that they are all seen before any IRQ of in vgic_flush_lr_state()
809 if (multi_sgi && irq->priority > prio) { in vgic_flush_lr_state()
810 _raw_spin_unlock(&irq->irq_lock); in vgic_flush_lr_state()
814 if (likely(vgic_target_oracle(irq) == vcpu)) { in vgic_flush_lr_state()
815 vgic_populate_lr(vcpu, irq, count++); in vgic_flush_lr_state()
817 if (irq->source) in vgic_flush_lr_state()
818 prio = irq->priority; in vgic_flush_lr_state()
821 raw_spin_unlock(&irq->irq_lock); in vgic_flush_lr_state()
824 if (!list_is_last(&irq->ap_list, in vgic_flush_lr_state()
956 struct vgic_irq *irq; in kvm_vgic_vcpu_pending_irq() local
971 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { in kvm_vgic_vcpu_pending_irq()
972 raw_spin_lock(&irq->irq_lock); in kvm_vgic_vcpu_pending_irq()
973 pending = irq_is_pending(irq) && irq->enabled && in kvm_vgic_vcpu_pending_irq()
974 !irq->active && in kvm_vgic_vcpu_pending_irq()
975 irq->priority < vmcr.pmr; in kvm_vgic_vcpu_pending_irq()
976 raw_spin_unlock(&irq->irq_lock); in kvm_vgic_vcpu_pending_irq()
1006 struct vgic_irq *irq; in kvm_vgic_map_is_active() local
1013 irq = vgic_get_irq(vcpu->kvm, vcpu, vintid); in kvm_vgic_map_is_active()
1014 raw_spin_lock_irqsave(&irq->irq_lock, flags); in kvm_vgic_map_is_active()
1015 map_is_active = irq->hw && irq->active; in kvm_vgic_map_is_active()
1016 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in kvm_vgic_map_is_active()
1017 vgic_put_irq(vcpu->kvm, irq); in kvm_vgic_map_is_active()