Lines Matching +full:fiq +full:- +full:based

1 // SPDX-License-Identifier: GPL-2.0-only
42 * On a non-nesting VM (only running at EL0/EL1), the host hypervisor
44 * Consequently, most of the state that is modified by the guest (by ACK-ing
45 * and EOI-ing interrupts) is synced by KVM on each entry/exit, so that we
46 * keep a semi-consistent view of the interrupts.
58 * - on L2 load: move the in-memory L1 vGIC configuration into a shadow,
59 * per-CPU data structure that is used to populate the actual LRs. This is
65 * - on L2 put: perform the inverse transformation, so that the result of L2
66 * running becomes visible to L1 in the VNCR-accessible registers.
68 * - there is nothing to do on L2 entry, as everything will have happened
72 * - on L2 exit: emulate the HW bit, and deactivate corresponding the L1
81 * - on delivery of a MI to L0 while L2 is running: make the L1 MI pending,
85 * - L1 MI is a fully virtual interrupt, not linked to the host's MI. Its
89 * - because most of the ICH_*_EL2 registers live in the VNCR page, the
98 * - those backed by memory (LRs, APRs, HCR, VMCR): L1 can freely access
101 * - those that always trap (ELRSR, EISR, MISR): these are status registers
102 * that are built on the fly based on the in-memory state.
104 * Only L1 can access the ICH_*_EL2 registers. A non-NV L2 obviously cannot,
106 * based registers), or see the access redirected to L1 (registers that
117 "Separate virtual IRQ/FIQ settings not supported\n"); in vgic_state_is_nested()
150 mi_state->eisr = eisr; in vgic_compute_mi_state()
151 mi_state->elrsr = elrsr; in vgic_compute_mi_state()
152 mi_state->pend = pend; in vgic_compute_mi_state()
187 used_lrs -= hweight16(mi_state.elrsr); in vgic_v3_get_misr()
235 if (!irq || !irq->hw || irq->intid > VGIC_MAX_SPI ) { in vgic_v3_create_shadow_lr()
239 vgic_put_irq(vcpu->kvm, irq); in vgic_v3_create_shadow_lr()
248 lr |= FIELD_PREP(ICH_LR_PHYS_ID_MASK, (u64)irq->hwintid); in vgic_v3_create_shadow_lr()
250 vgic_put_irq(vcpu->kvm, irq); in vgic_v3_create_shadow_lr()
253 s_cpu_if->vgic_lr[index] = lr; in vgic_v3_create_shadow_lr()
260 container_of(s_cpu_if, struct shadow_if, cpuif)->lr_map = lr_map; in vgic_v3_create_shadow_lr()
261 s_cpu_if->used_lrs = index; in vgic_v3_create_shadow_lr()
269 for_each_set_bit(i, &shadow_if->lr_map, kvm_vgic_global_state.nr_lr) { in vgic_v3_sync_nested()
287 irq->active = false; in vgic_v3_sync_nested()
289 vgic_put_irq(vcpu->kvm, irq); in vgic_v3_sync_nested()
298 struct vgic_v3_cpu_if *host_if = &vcpu->arch.vgic_cpu.vgic_v3; in vgic_v3_create_shadow_state()
309 val = host_if->vgic_hcr & (ICH_HCR_EL2_TALL0 | ICH_HCR_EL2_TALL1 | in vgic_v3_create_shadow_state()
311 s_cpu_if->vgic_hcr = __vcpu_sys_reg(vcpu, ICH_HCR_EL2) | val; in vgic_v3_create_shadow_state()
312 s_cpu_if->vgic_vmcr = __vcpu_sys_reg(vcpu, ICH_VMCR_EL2); in vgic_v3_create_shadow_state()
313 s_cpu_if->vgic_sre = host_if->vgic_sre; in vgic_v3_create_shadow_state()
316 s_cpu_if->vgic_ap0r[i] = __vcpu_sys_reg(vcpu, ICH_AP0RN(i)); in vgic_v3_create_shadow_state()
317 s_cpu_if->vgic_ap1r[i] = __vcpu_sys_reg(vcpu, ICH_AP1RN(i)); in vgic_v3_create_shadow_state()
326 struct vgic_v3_cpu_if *cpu_if = &shadow_if->cpuif; in vgic_v3_load_nested()
341 vcpu->arch.vgic_cpu.vgic_v3.used_lrs = cpu_if->used_lrs; in vgic_v3_load_nested()
347 struct vgic_v3_cpu_if *s_cpu_if = &shadow_if->cpuif; in vgic_v3_put_nested()
361 val |= (s_cpu_if->vgic_hcr & ICH_HCR_EL2_EOIcount_MASK); in vgic_v3_put_nested()
363 __vcpu_sys_reg(vcpu, ICH_VMCR_EL2) = s_cpu_if->vgic_vmcr; in vgic_v3_put_nested()
366 __vcpu_sys_reg(vcpu, ICH_AP0RN(i)) = s_cpu_if->vgic_ap0r[i]; in vgic_v3_put_nested()
367 __vcpu_sys_reg(vcpu, ICH_AP1RN(i)) = s_cpu_if->vgic_ap1r[i]; in vgic_v3_put_nested()
370 for_each_set_bit(i, &shadow_if->lr_map, kvm_vgic_global_state.nr_lr) { in vgic_v3_put_nested()
374 val |= s_cpu_if->vgic_lr[i] & ICH_LR_STATE; in vgic_v3_put_nested()
377 s_cpu_if->vgic_lr[i] = 0; in vgic_v3_put_nested()
380 shadow_if->lr_map = 0; in vgic_v3_put_nested()
381 vcpu->arch.vgic_cpu.vgic_v3.used_lrs = 0; in vgic_v3_put_nested()
386 * then we need to forward this to L1 so that it can re-sync the appropriate
394 kvm_vgic_inject_irq(vcpu->kvm, vcpu, in vgic_v3_handle_nested_maint_irq()
395 vcpu->kvm->arch.vgic.mi_intid, state, vcpu); in vgic_v3_handle_nested_maint_irq()
407 kvm_vgic_inject_irq(vcpu->kvm, vcpu, in vgic_v3_nested_update_mi()
408 vcpu->kvm->arch.vgic.mi_intid, level, vcpu); in vgic_v3_nested_update_mi()