Lines Matching +full:architecturally +full:- +full:defined
1 // SPDX-License-Identifier: GPL-2.0-only
17 #include "vgic-mmio.h"
28 return -1UL; in vgic_mmio_read_rao()
53 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); in vgic_mmio_read_group()
55 if (irq->group) in vgic_mmio_read_group()
58 vgic_put_irq(vcpu->kvm, irq); in vgic_mmio_read_group()
66 WARN_ON(its_prop_update_vsgi(irq->host_irq, irq->priority, irq->group)); in vgic_update_vsgi()
77 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); in vgic_mmio_write_group()
79 raw_spin_lock_irqsave(&irq->irq_lock, flags); in vgic_mmio_write_group()
80 irq->group = !!(val & BIT(i)); in vgic_mmio_write_group()
81 if (irq->hw && vgic_irq_is_sgi(irq->intid)) { in vgic_mmio_write_group()
83 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in vgic_mmio_write_group()
85 vgic_queue_irq_unlock(vcpu->kvm, irq, flags); in vgic_mmio_write_group()
88 vgic_put_irq(vcpu->kvm, irq); in vgic_mmio_write_group()
105 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); in vgic_mmio_read_enable()
107 if (irq->enabled) in vgic_mmio_read_enable()
110 vgic_put_irq(vcpu->kvm, irq); in vgic_mmio_read_enable()
125 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); in vgic_mmio_write_senable()
127 raw_spin_lock_irqsave(&irq->irq_lock, flags); in vgic_mmio_write_senable()
128 if (irq->hw && vgic_irq_is_sgi(irq->intid)) { in vgic_mmio_write_senable()
129 if (!irq->enabled) { in vgic_mmio_write_senable()
132 irq->enabled = true; in vgic_mmio_write_senable()
133 data = &irq_to_desc(irq->host_irq)->irq_data; in vgic_mmio_write_senable()
135 enable_irq(irq->host_irq); in vgic_mmio_write_senable()
138 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in vgic_mmio_write_senable()
139 vgic_put_irq(vcpu->kvm, irq); in vgic_mmio_write_senable()
143 bool was_high = irq->line_level; in vgic_mmio_write_senable()
150 irq->line_level = vgic_get_phys_line_level(irq); in vgic_mmio_write_senable()
155 if (!irq->active && was_high && !irq->line_level) in vgic_mmio_write_senable()
158 irq->enabled = true; in vgic_mmio_write_senable()
159 vgic_queue_irq_unlock(vcpu->kvm, irq, flags); in vgic_mmio_write_senable()
161 vgic_put_irq(vcpu->kvm, irq); in vgic_mmio_write_senable()
174 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); in vgic_mmio_write_cenable()
176 raw_spin_lock_irqsave(&irq->irq_lock, flags); in vgic_mmio_write_cenable()
177 if (irq->hw && vgic_irq_is_sgi(irq->intid) && irq->enabled) in vgic_mmio_write_cenable()
178 disable_irq_nosync(irq->host_irq); in vgic_mmio_write_cenable()
180 irq->enabled = false; in vgic_mmio_write_cenable()
182 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in vgic_mmio_write_cenable()
183 vgic_put_irq(vcpu->kvm, irq); in vgic_mmio_write_cenable()
196 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); in vgic_uaccess_write_senable()
198 raw_spin_lock_irqsave(&irq->irq_lock, flags); in vgic_uaccess_write_senable()
199 irq->enabled = true; in vgic_uaccess_write_senable()
200 vgic_queue_irq_unlock(vcpu->kvm, irq, flags); in vgic_uaccess_write_senable()
202 vgic_put_irq(vcpu->kvm, irq); in vgic_uaccess_write_senable()
217 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); in vgic_uaccess_write_cenable()
219 raw_spin_lock_irqsave(&irq->irq_lock, flags); in vgic_uaccess_write_cenable()
220 irq->enabled = false; in vgic_uaccess_write_cenable()
221 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in vgic_uaccess_write_cenable()
223 vgic_put_irq(vcpu->kvm, irq); in vgic_uaccess_write_cenable()
238 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); in vgic_mmio_read_pending()
242 raw_spin_lock_irqsave(&irq->irq_lock, flags); in vgic_mmio_read_pending()
243 if (irq->hw && vgic_irq_is_sgi(irq->intid)) { in vgic_mmio_read_pending()
247 err = irq_get_irqchip_state(irq->host_irq, in vgic_mmio_read_pending()
250 WARN_RATELIMIT(err, "IRQ %d", irq->host_irq); in vgic_mmio_read_pending()
256 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in vgic_mmio_read_pending()
258 vgic_put_irq(vcpu->kvm, irq); in vgic_mmio_read_pending()
266 return (vgic_irq_is_sgi(irq->intid) && in is_vgic_v2_sgi()
267 vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2); in is_vgic_v2_sgi()
279 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); in vgic_mmio_write_spending()
283 vgic_put_irq(vcpu->kvm, irq); in vgic_mmio_write_spending()
287 raw_spin_lock_irqsave(&irq->irq_lock, flags); in vgic_mmio_write_spending()
289 if (irq->hw && vgic_irq_is_sgi(irq->intid)) { in vgic_mmio_write_spending()
292 err = irq_set_irqchip_state(irq->host_irq, in vgic_mmio_write_spending()
295 WARN_RATELIMIT(err, "IRQ %d", irq->host_irq); in vgic_mmio_write_spending()
297 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in vgic_mmio_write_spending()
298 vgic_put_irq(vcpu->kvm, irq); in vgic_mmio_write_spending()
303 irq->pending_latch = true; in vgic_mmio_write_spending()
304 if (irq->hw) in vgic_mmio_write_spending()
307 vgic_queue_irq_unlock(vcpu->kvm, irq, flags); in vgic_mmio_write_spending()
308 vgic_put_irq(vcpu->kvm, irq); in vgic_mmio_write_spending()
321 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); in vgic_uaccess_write_spending()
323 raw_spin_lock_irqsave(&irq->irq_lock, flags); in vgic_uaccess_write_spending()
324 irq->pending_latch = true; in vgic_uaccess_write_spending()
332 irq->source |= BIT(vcpu->vcpu_id); in vgic_uaccess_write_spending()
334 vgic_queue_irq_unlock(vcpu->kvm, irq, flags); in vgic_uaccess_write_spending()
336 vgic_put_irq(vcpu->kvm, irq); in vgic_uaccess_write_spending()
342 /* Must be called with irq->irq_lock held */
345 irq->pending_latch = false; in vgic_hw_irq_cpending()
359 if (!irq->active) in vgic_hw_irq_cpending()
372 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); in vgic_mmio_write_cpending()
376 vgic_put_irq(vcpu->kvm, irq); in vgic_mmio_write_cpending()
380 raw_spin_lock_irqsave(&irq->irq_lock, flags); in vgic_mmio_write_cpending()
382 if (irq->hw && vgic_irq_is_sgi(irq->intid)) { in vgic_mmio_write_cpending()
385 err = irq_set_irqchip_state(irq->host_irq, in vgic_mmio_write_cpending()
388 WARN_RATELIMIT(err, "IRQ %d", irq->host_irq); in vgic_mmio_write_cpending()
390 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in vgic_mmio_write_cpending()
391 vgic_put_irq(vcpu->kvm, irq); in vgic_mmio_write_cpending()
396 if (irq->hw) in vgic_mmio_write_cpending()
399 irq->pending_latch = false; in vgic_mmio_write_cpending()
401 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in vgic_mmio_write_cpending()
402 vgic_put_irq(vcpu->kvm, irq); in vgic_mmio_write_cpending()
415 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); in vgic_uaccess_write_cpending()
417 raw_spin_lock_irqsave(&irq->irq_lock, flags); in vgic_uaccess_write_cpending()
424 irq->source = 0; in vgic_uaccess_write_cpending()
426 irq->pending_latch = false; in vgic_uaccess_write_cpending()
428 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in vgic_uaccess_write_cpending()
430 vgic_put_irq(vcpu->kvm, irq); in vgic_uaccess_write_cpending()
453 if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 || in vgic_access_active_prepare()
455 kvm_arm_halt_guest(vcpu->kvm); in vgic_access_active_prepare()
461 if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 || in vgic_access_active_finish()
463 kvm_arm_resume_guest(vcpu->kvm); in vgic_access_active_finish()
475 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); in __vgic_mmio_read_active()
481 if (irq->active) in __vgic_mmio_read_active()
484 vgic_put_irq(vcpu->kvm, irq); in __vgic_mmio_read_active()
496 mutex_lock(&vcpu->kvm->lock); in vgic_mmio_read_active()
502 mutex_unlock(&vcpu->kvm->lock); in vgic_mmio_read_active()
513 /* Must be called with irq->irq_lock held */
520 irq->active = active; in vgic_hw_irq_change_active()
530 raw_spin_lock_irqsave(&irq->irq_lock, flags); in vgic_mmio_change_active()
532 if (irq->hw && !vgic_irq_is_sgi(irq->intid)) { in vgic_mmio_change_active()
534 } else if (irq->hw && vgic_irq_is_sgi(irq->intid)) { in vgic_mmio_change_active()
540 irq->active = false; in vgic_mmio_change_active()
542 u32 model = vcpu->kvm->arch.vgic.vgic_model; in vgic_mmio_change_active()
545 irq->active = active; in vgic_mmio_change_active()
551 * this state is not architecturally exposed anywhere and we in vgic_mmio_change_active()
558 active_source = (requester_vcpu) ? requester_vcpu->vcpu_id : 0; in vgic_mmio_change_active()
561 active && vgic_irq_is_sgi(irq->intid)) in vgic_mmio_change_active()
562 irq->active_source = active_source; in vgic_mmio_change_active()
565 if (irq->active) in vgic_mmio_change_active()
566 vgic_queue_irq_unlock(vcpu->kvm, irq, flags); in vgic_mmio_change_active()
568 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in vgic_mmio_change_active()
579 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); in __vgic_mmio_write_cactive()
581 vgic_put_irq(vcpu->kvm, irq); in __vgic_mmio_write_cactive()
591 mutex_lock(&vcpu->kvm->lock); in vgic_mmio_write_cactive()
597 mutex_unlock(&vcpu->kvm->lock); in vgic_mmio_write_cactive()
616 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); in __vgic_mmio_write_sactive()
618 vgic_put_irq(vcpu->kvm, irq); in __vgic_mmio_write_sactive()
628 mutex_lock(&vcpu->kvm->lock); in vgic_mmio_write_sactive()
634 mutex_unlock(&vcpu->kvm->lock); in vgic_mmio_write_sactive()
653 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); in vgic_mmio_read_priority()
655 val |= (u64)irq->priority << (i * 8); in vgic_mmio_read_priority()
657 vgic_put_irq(vcpu->kvm, irq); in vgic_mmio_read_priority()
666 * need to make this VCPU exit and re-evaluate the priorities, potentially
679 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); in vgic_mmio_write_priority()
681 raw_spin_lock_irqsave(&irq->irq_lock, flags); in vgic_mmio_write_priority()
683 irq->priority = (val >> (i * 8)) & GENMASK(7, 8 - VGIC_PRI_BITS); in vgic_mmio_write_priority()
684 if (irq->hw && vgic_irq_is_sgi(irq->intid)) in vgic_mmio_write_priority()
686 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in vgic_mmio_write_priority()
688 vgic_put_irq(vcpu->kvm, irq); in vgic_mmio_write_priority()
700 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); in vgic_mmio_read_config()
702 if (irq->config == VGIC_CONFIG_EDGE) in vgic_mmio_read_config()
705 vgic_put_irq(vcpu->kvm, irq); in vgic_mmio_read_config()
724 * for PPIs this is IMPLEMENTATION DEFINED. The arch timer in vgic_mmio_write_config()
726 * make them read-only here. in vgic_mmio_write_config()
731 irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); in vgic_mmio_write_config()
732 raw_spin_lock_irqsave(&irq->irq_lock, flags); in vgic_mmio_write_config()
735 irq->config = VGIC_CONFIG_EDGE; in vgic_mmio_write_config()
737 irq->config = VGIC_CONFIG_LEVEL; in vgic_mmio_write_config()
739 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in vgic_mmio_write_config()
740 vgic_put_irq(vcpu->kvm, irq); in vgic_mmio_write_config()
748 int nr_irqs = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS; in vgic_read_irq_line_level_info()
756 irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); in vgic_read_irq_line_level_info()
757 if (irq->config == VGIC_CONFIG_LEVEL && irq->line_level) in vgic_read_irq_line_level_info()
760 vgic_put_irq(vcpu->kvm, irq); in vgic_read_irq_line_level_info()
770 int nr_irqs = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS; in vgic_write_irq_line_level_info()
780 irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); in vgic_write_irq_line_level_info()
788 raw_spin_lock_irqsave(&irq->irq_lock, flags); in vgic_write_irq_line_level_info()
789 irq->line_level = new_level; in vgic_write_irq_line_level_info()
791 vgic_queue_irq_unlock(vcpu->kvm, irq, flags); in vgic_write_irq_line_level_info()
793 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in vgic_write_irq_line_level_info()
795 vgic_put_irq(vcpu->kvm, irq); in vgic_write_irq_line_level_info()
804 if (offset < region->reg_offset) in match_region()
805 return -1; in match_region()
807 if (offset >= region->reg_offset + region->len) in match_region()
900 int flags, nr_irqs = kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS; in check_region()
916 if ((region->access_flags & flags) && IS_ALIGNED(addr, len)) { in check_region()
917 if (!region->bits_per_irq) in check_region()
920 /* Do we access a non-allocated IRQ? */ in check_region()
921 return VGIC_ADDR_TO_INTID(addr, region->bits_per_irq) < nr_irqs; in check_region()
933 region = vgic_find_mmio_region(iodev->regions, iodev->nr_regions, in vgic_get_mmio_region()
934 addr - iodev->base_addr); in vgic_get_mmio_region()
935 if (!region || !check_region(vcpu->kvm, region, addr, len)) in vgic_get_mmio_region()
954 r_vcpu = iodev->redist_vcpu ? iodev->redist_vcpu : vcpu; in vgic_uaccess_read()
955 if (region->uaccess_read) in vgic_uaccess_read()
956 *val = region->uaccess_read(r_vcpu, addr, sizeof(u32)); in vgic_uaccess_read()
958 *val = region->read(r_vcpu, addr, sizeof(u32)); in vgic_uaccess_read()
974 r_vcpu = iodev->redist_vcpu ? iodev->redist_vcpu : vcpu; in vgic_uaccess_write()
975 if (region->uaccess_write) in vgic_uaccess_write()
976 return region->uaccess_write(r_vcpu, addr, sizeof(u32), *val); in vgic_uaccess_write()
978 region->write(r_vcpu, addr, sizeof(u32), *val); in vgic_uaccess_write()
989 return vgic_uaccess_write(vcpu, &dev->dev, offset, val); in vgic_uaccess()
991 return vgic_uaccess_read(vcpu, &dev->dev, offset, val); in vgic_uaccess()
1007 switch (iodev->iodev_type) { in dispatch_mmio_read()
1009 data = region->read(vcpu, addr, len); in dispatch_mmio_read()
1012 data = region->read(vcpu, addr, len); in dispatch_mmio_read()
1015 data = region->read(iodev->redist_vcpu, addr, len); in dispatch_mmio_read()
1018 data = region->its_read(vcpu->kvm, iodev->its, addr, len); in dispatch_mmio_read()
1037 switch (iodev->iodev_type) { in dispatch_mmio_write()
1039 region->write(vcpu, addr, len, data); in dispatch_mmio_write()
1042 region->write(vcpu, addr, len, data); in dispatch_mmio_write()
1045 region->write(iodev->redist_vcpu, addr, len, data); in dispatch_mmio_write()
1048 region->its_write(vcpu->kvm, iodev->its, addr, len, data); in dispatch_mmio_write()
1063 struct vgic_io_device *io_device = &kvm->arch.vgic.dist_iodev; in vgic_register_dist_iodev()
1078 io_device->base_addr = dist_base_address; in vgic_register_dist_iodev()
1079 io_device->iodev_type = IODEV_DIST; in vgic_register_dist_iodev()
1080 io_device->redist_vcpu = NULL; in vgic_register_dist_iodev()
1082 mutex_lock(&kvm->slots_lock); in vgic_register_dist_iodev()
1084 len, &io_device->dev); in vgic_register_dist_iodev()
1085 mutex_unlock(&kvm->slots_lock); in vgic_register_dist_iodev()