Lines Matching +full:level +full:- +full:sensitive

1 // SPDX-License-Identifier: GPL-2.0-only
6 #include <linux/irqchip/arm-gic.h>
31 struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2; in vgic_v2_set_underflow()
33 cpuif->vgic_hcr |= GICH_HCR_UIE; in vgic_v2_set_underflow()
44 * - active bit is transferred as is
45 * - pending bit is
46 * - transferred as is in case of edge sensitive IRQs
47 * - set to the line-level (resample time) for level sensitive IRQs
51 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; in vgic_v2_fold_lr_state()
52 struct vgic_v2_cpu_if *cpuif = &vgic_cpu->vgic_v2; in vgic_v2_fold_lr_state()
57 cpuif->vgic_hcr &= ~GICH_HCR_UIE; in vgic_v2_fold_lr_state()
59 for (lr = 0; lr < vgic_cpu->vgic_v2.used_lrs; lr++) { in vgic_v2_fold_lr_state()
60 u32 val = cpuif->vgic_lr[lr]; in vgic_v2_fold_lr_state()
69 /* Notify fds when the guest EOI'ed a level-triggered SPI */ in vgic_v2_fold_lr_state()
70 if (lr_signals_eoi_mi(val) && vgic_valid_spi(vcpu->kvm, intid)) in vgic_v2_fold_lr_state()
71 kvm_notify_acked_irq(vcpu->kvm, 0, in vgic_v2_fold_lr_state()
72 intid - VGIC_NR_PRIVATE_IRQS); in vgic_v2_fold_lr_state()
74 irq = vgic_get_irq(vcpu->kvm, vcpu, intid); in vgic_v2_fold_lr_state()
76 raw_spin_lock(&irq->irq_lock); in vgic_v2_fold_lr_state()
79 irq->active = !!(val & GICH_LR_ACTIVE_BIT); in vgic_v2_fold_lr_state()
81 if (irq->active && vgic_irq_is_sgi(intid)) in vgic_v2_fold_lr_state()
82 irq->active_source = cpuid; in vgic_v2_fold_lr_state()
85 if (irq->config == VGIC_CONFIG_EDGE && in vgic_v2_fold_lr_state()
87 irq->pending_latch = true; in vgic_v2_fold_lr_state()
90 irq->source |= (1 << cpuid); in vgic_v2_fold_lr_state()
94 * Clear soft pending state when level irqs have been acked. in vgic_v2_fold_lr_state()
96 if (irq->config == VGIC_CONFIG_LEVEL && !(val & GICH_LR_STATE)) in vgic_v2_fold_lr_state()
97 irq->pending_latch = false; in vgic_v2_fold_lr_state()
100 * Level-triggered mapped IRQs are special because we only in vgic_v2_fold_lr_state()
104 * the physical line and set the line level, because the in vgic_v2_fold_lr_state()
108 * If this causes us to lower the level, we have to also clear in vgic_v2_fold_lr_state()
113 irq->line_level = vgic_get_phys_line_level(irq); in vgic_v2_fold_lr_state()
115 if (!irq->line_level) in vgic_v2_fold_lr_state()
119 raw_spin_unlock(&irq->irq_lock); in vgic_v2_fold_lr_state()
120 vgic_put_irq(vcpu->kvm, irq); in vgic_v2_fold_lr_state()
123 cpuif->used_lrs = 0; in vgic_v2_fold_lr_state()
128 * - for an edge sensitive IRQ the pending state is cleared in struct vgic_irq
129 * - for a level sensitive IRQ the pending state value is unchanged;
130 * it is dictated directly by the input level
133 * lowest-numbered source VCPU and clear that bit in the source bitmap.
139 u32 val = irq->intid; in vgic_v2_populate_lr()
142 if (irq->active) { in vgic_v2_populate_lr()
144 if (vgic_irq_is_sgi(irq->intid)) in vgic_v2_populate_lr()
145 val |= irq->active_source << GICH_LR_PHYSID_CPUID_SHIFT; in vgic_v2_populate_lr()
152 if (irq->group) in vgic_v2_populate_lr()
155 if (irq->hw) { in vgic_v2_populate_lr()
157 val |= irq->hwintid << GICH_LR_PHYSID_CPUID_SHIFT; in vgic_v2_populate_lr()
161 * level. in vgic_v2_populate_lr()
163 if (irq->active) in vgic_v2_populate_lr()
166 if (irq->config == VGIC_CONFIG_LEVEL) { in vgic_v2_populate_lr()
173 if (irq->active) in vgic_v2_populate_lr()
181 if (irq->config == VGIC_CONFIG_EDGE) in vgic_v2_populate_lr()
182 irq->pending_latch = false; in vgic_v2_populate_lr()
184 if (vgic_irq_is_sgi(irq->intid)) { in vgic_v2_populate_lr()
185 u32 src = ffs(irq->source); in vgic_v2_populate_lr()
188 irq->intid)) in vgic_v2_populate_lr()
191 val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT; in vgic_v2_populate_lr()
192 irq->source &= ~(1 << (src - 1)); in vgic_v2_populate_lr()
193 if (irq->source) { in vgic_v2_populate_lr()
194 irq->pending_latch = true; in vgic_v2_populate_lr()
201 * Level-triggered mapped IRQs are special because we only observe in vgic_v2_populate_lr()
203 * level here, so that we can take new virtual IRQs. See in vgic_v2_populate_lr()
207 irq->line_level = false; in vgic_v2_populate_lr()
210 val |= (irq->priority >> 3) << GICH_LR_PRIORITY_SHIFT; in vgic_v2_populate_lr()
212 vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr] = val; in vgic_v2_populate_lr()
217 vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr] = 0; in vgic_v2_clear_lr()
222 struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2; in vgic_v2_set_vmcr()
225 vmcr = (vmcrp->grpen0 << GICH_VMCR_ENABLE_GRP0_SHIFT) & in vgic_v2_set_vmcr()
227 vmcr |= (vmcrp->grpen1 << GICH_VMCR_ENABLE_GRP1_SHIFT) & in vgic_v2_set_vmcr()
229 vmcr |= (vmcrp->ackctl << GICH_VMCR_ACK_CTL_SHIFT) & in vgic_v2_set_vmcr()
231 vmcr |= (vmcrp->fiqen << GICH_VMCR_FIQ_EN_SHIFT) & in vgic_v2_set_vmcr()
233 vmcr |= (vmcrp->cbpr << GICH_VMCR_CBPR_SHIFT) & in vgic_v2_set_vmcr()
235 vmcr |= (vmcrp->eoim << GICH_VMCR_EOI_MODE_SHIFT) & in vgic_v2_set_vmcr()
237 vmcr |= (vmcrp->abpr << GICH_VMCR_ALIAS_BINPOINT_SHIFT) & in vgic_v2_set_vmcr()
239 vmcr |= (vmcrp->bpr << GICH_VMCR_BINPOINT_SHIFT) & in vgic_v2_set_vmcr()
241 vmcr |= ((vmcrp->pmr >> GICV_PMR_PRIORITY_SHIFT) << in vgic_v2_set_vmcr()
244 cpu_if->vgic_vmcr = vmcr; in vgic_v2_set_vmcr()
249 struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2; in vgic_v2_get_vmcr()
252 vmcr = cpu_if->vgic_vmcr; in vgic_v2_get_vmcr()
254 vmcrp->grpen0 = (vmcr & GICH_VMCR_ENABLE_GRP0_MASK) >> in vgic_v2_get_vmcr()
256 vmcrp->grpen1 = (vmcr & GICH_VMCR_ENABLE_GRP1_MASK) >> in vgic_v2_get_vmcr()
258 vmcrp->ackctl = (vmcr & GICH_VMCR_ACK_CTL_MASK) >> in vgic_v2_get_vmcr()
260 vmcrp->fiqen = (vmcr & GICH_VMCR_FIQ_EN_MASK) >> in vgic_v2_get_vmcr()
262 vmcrp->cbpr = (vmcr & GICH_VMCR_CBPR_MASK) >> in vgic_v2_get_vmcr()
264 vmcrp->eoim = (vmcr & GICH_VMCR_EOI_MODE_MASK) >> in vgic_v2_get_vmcr()
267 vmcrp->abpr = (vmcr & GICH_VMCR_ALIAS_BINPOINT_MASK) >> in vgic_v2_get_vmcr()
269 vmcrp->bpr = (vmcr & GICH_VMCR_BINPOINT_MASK) >> in vgic_v2_get_vmcr()
271 vmcrp->pmr = ((vmcr & GICH_VMCR_PRIMASK_MASK) >> in vgic_v2_get_vmcr()
282 vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr = 0; in vgic_v2_enable()
285 vcpu->arch.vgic_cpu.vgic_v2.vgic_hcr = GICH_HCR_EN; in vgic_v2_enable()
306 struct vgic_dist *dist = &kvm->arch.vgic; in vgic_v2_map_resources()
312 if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base) || in vgic_v2_map_resources()
313 IS_VGIC_ADDR_UNDEF(dist->vgic_cpu_base)) { in vgic_v2_map_resources()
315 ret = -ENXIO; in vgic_v2_map_resources()
319 if (!vgic_v2_check_base(dist->vgic_dist_base, dist->vgic_cpu_base)) { in vgic_v2_map_resources()
321 ret = -EINVAL; in vgic_v2_map_resources()
335 ret = vgic_register_dist_iodev(kvm, dist->vgic_dist_base, VGIC_V2); in vgic_v2_map_resources()
342 ret = kvm_phys_addr_ioremap(kvm, dist->vgic_cpu_base, in vgic_v2_map_resources()
351 dist->ready = true; in vgic_v2_map_resources()
360 * vgic_v2_probe - probe for a VGICv2 compatible interrupt controller
371 if (!info->vctrl.start) { in vgic_v2_probe()
373 return -ENXIO; in vgic_v2_probe()
376 if (!PAGE_ALIGNED(info->vcpu.start) || in vgic_v2_probe()
377 !PAGE_ALIGNED(resource_size(&info->vcpu))) { in vgic_v2_probe()
380 ret = create_hyp_io_mappings(info->vcpu.start, in vgic_v2_probe()
381 resource_size(&info->vcpu), in vgic_v2_probe()
392 ret = create_hyp_io_mappings(info->vctrl.start, in vgic_v2_probe()
393 resource_size(&info->vctrl), in vgic_v2_probe()
411 kvm_vgic_global_state.vcpu_base = info->vcpu.start; in vgic_v2_probe()
415 kvm_debug("vgic-v2@%llx\n", info->vctrl.start); in vgic_v2_probe()
429 struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2; in save_lrs()
430 u64 used_lrs = cpu_if->used_lrs; in save_lrs()
440 cpu_if->vgic_lr[i] &= ~GICH_LR_STATE; in save_lrs()
442 cpu_if->vgic_lr[i] = readl_relaxed(base + GICH_LR0 + (i * 4)); in save_lrs()
451 u64 used_lrs = vcpu->arch.vgic_cpu.vgic_v2.used_lrs; in vgic_v2_save_state()
464 struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2; in vgic_v2_restore_state()
466 u64 used_lrs = cpu_if->used_lrs; in vgic_v2_restore_state()
473 writel_relaxed(cpu_if->vgic_hcr, base + GICH_HCR); in vgic_v2_restore_state()
475 writel_relaxed(cpu_if->vgic_lr[i], in vgic_v2_restore_state()
483 struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2; in vgic_v2_load()
485 writel_relaxed(cpu_if->vgic_vmcr, in vgic_v2_load()
487 writel_relaxed(cpu_if->vgic_apr, in vgic_v2_load()
493 struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2; in vgic_v2_vmcr_sync()
495 cpu_if->vgic_vmcr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_VMCR); in vgic_v2_vmcr_sync()
500 struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2; in vgic_v2_put()
503 cpu_if->vgic_apr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_APR); in vgic_v2_put()