Lines Matching +full:spe +full:- +full:pmu

1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2015 - ARM Ltd
8 #include <hyp/sysreg-sr.h>
10 #include <linux/arm-smccc.h>
26 #include <asm/debug-monitors.h>
32 /* Non-VHE specific context */
46 val = vcpu->arch.cptr_el2; in __activate_traps()
70 struct kvm_cpu_context *ctxt = &vcpu->arch.ctxt; in __activate_traps()
109 write_sysreg(this_cpu_ptr(&kvm_init_params)->hcr_el2, hcr_el2); in __deactivate_traps()
115 /* Save VGICv3 state on non-VHE systems */
119 __vgic_v3_save_state(&vcpu->arch.vgic_cpu.vgic_v3); in __hyp_vgic_save_state()
120 __vgic_v3_deactivate_traps(&vcpu->arch.vgic_cpu.vgic_v3); in __hyp_vgic_save_state()
124 /* Restore VGICv3 state on non-VHE systems */
128 __vgic_v3_activate_traps(&vcpu->arch.vgic_cpu.vgic_v3); in __hyp_vgic_restore_state()
129 __vgic_v3_restore_state(&vcpu->arch.vgic_cpu.vgic_v3); in __hyp_vgic_restore_state()
139 struct kvm_pmu_events *pmu = &vcpu->arch.pmu.events; in __pmu_switch_to_guest() local
141 if (pmu->events_host) in __pmu_switch_to_guest()
142 write_sysreg(pmu->events_host, pmcntenclr_el0); in __pmu_switch_to_guest()
144 if (pmu->events_guest) in __pmu_switch_to_guest()
145 write_sysreg(pmu->events_guest, pmcntenset_el0); in __pmu_switch_to_guest()
147 return (pmu->events_host || pmu->events_guest); in __pmu_switch_to_guest()
155 struct kvm_pmu_events *pmu = &vcpu->arch.pmu.events; in __pmu_switch_to_host() local
157 if (pmu->events_guest) in __pmu_switch_to_host()
158 write_sysreg(pmu->events_guest, pmcntenclr_el0); in __pmu_switch_to_host()
160 if (pmu->events_host) in __pmu_switch_to_host()
161 write_sysreg(pmu->events_host, pmcntenset_el0); in __pmu_switch_to_host()
212 if (unlikely(kvm_vm_is_protected(kern_hyp_va(vcpu->kvm)))) in kvm_get_exit_handler_array()
231 struct kvm *kvm = kern_hyp_va(vcpu->kvm); in early_exit_filter()
235 * As we have caught the guest red-handed, decide that it isn't in early_exit_filter()
237 * can try and fix it by re-initializing the vcpu with in early_exit_filter()
247 /* Switch to the guest for legacy non-VHE systems */
267 host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt; in __kvm_vcpu_run()
268 host_ctxt->__hyp_running_vcpu = vcpu; in __kvm_vcpu_run()
269 guest_ctxt = &vcpu->arch.ctxt; in __kvm_vcpu_run()
275 * We must flush and disable the SPE buffer for nVHE, as in __kvm_vcpu_run()
285 * ongoing page-table walks that have started before we in __kvm_vcpu_run()
287 * above disabling of SPE and TRBE. in __kvm_vcpu_run()
289 * See DDI0487I.a D8.1.5 "Out-of-context translation regimes", in __kvm_vcpu_run()
297 * We must restore the 32-bit state before the sysregs, thanks in __kvm_vcpu_run()
298 * to erratum #852523 (Cortex-A57) or #853709 (Cortex-A72). in __kvm_vcpu_run()
301 * and #1319367 (A72), we must ensure that all VM-related sysreg are in __kvm_vcpu_run()
307 mmu = kern_hyp_va(vcpu->arch.hw_mmu); in __kvm_vcpu_run()
308 __load_stage2(mmu, kern_hyp_va(mmu->arch)); in __kvm_vcpu_run()
340 if (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED) in __kvm_vcpu_run()
345 * This must come after restoring the host sysregs, since a non-VHE in __kvm_vcpu_run()
346 * system may enable SPE here and make use of the TTBRs. in __kvm_vcpu_run()
357 host_ctxt->__hyp_running_vcpu = NULL; in __kvm_vcpu_run()
370 host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt; in hyp_panic()
371 vcpu = host_ctxt->__hyp_running_vcpu; in hyp_panic()