Lines Matching +full:ipa +full:- +full:shared
1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2020 - Google Inc
10 #include <asm/pgtable-types.h>
35 sve_cond_update_zcr_vq(vcpu_sve_max_vq(vcpu) - 1, SYS_ZCR_EL2); in __hyp_sve_save_guest()
36 __sve_save_state(vcpu_sve_pffr(vcpu), &vcpu->arch.ctxt.fp_regs.fpsr, true); in __hyp_sve_save_guest()
37 write_sysreg_s(sve_vq_from_vl(kvm_host_sve_max_vl) - 1, SYS_ZCR_EL2); in __hyp_sve_save_guest()
49 * Note that this constrains the PE to the maximum shared VL in __hyp_sve_restore_host()
53 write_sysreg_s(sve_vq_from_vl(kvm_host_sve_max_vl) - 1, SYS_ZCR_EL2); in __hyp_sve_restore_host()
54 __sve_restore_state(sve_state->sve_regs + sve_ffr_offset(kvm_host_sve_max_vl), in __hyp_sve_restore_host()
55 &sve_state->fpsr, in __hyp_sve_restore_host()
57 write_sysreg_el1(sve_state->zcr_el1, SYS_ZCR); in __hyp_sve_restore_host()
78 __fpsimd_save_state(&vcpu->arch.ctxt.fp_regs); in fpsimd_sve_sync()
80 has_fpmr = kvm_has_fpmr(kern_hyp_va(vcpu->kvm)); in fpsimd_sve_sync()
97 struct kvm_vcpu *host_vcpu = hyp_vcpu->host_vcpu; in flush_debug_state()
99 hyp_vcpu->vcpu.arch.debug_owner = host_vcpu->arch.debug_owner; in flush_debug_state()
101 if (kvm_guest_owns_debug_regs(&hyp_vcpu->vcpu)) in flush_debug_state()
102 hyp_vcpu->vcpu.arch.vcpu_debug_state = host_vcpu->arch.vcpu_debug_state; in flush_debug_state()
103 else if (kvm_host_owns_debug_regs(&hyp_vcpu->vcpu)) in flush_debug_state()
104 hyp_vcpu->vcpu.arch.external_debug_state = host_vcpu->arch.external_debug_state; in flush_debug_state()
109 struct kvm_vcpu *host_vcpu = hyp_vcpu->host_vcpu; in sync_debug_state()
111 if (kvm_guest_owns_debug_regs(&hyp_vcpu->vcpu)) in sync_debug_state()
112 host_vcpu->arch.vcpu_debug_state = hyp_vcpu->vcpu.arch.vcpu_debug_state; in sync_debug_state()
113 else if (kvm_host_owns_debug_regs(&hyp_vcpu->vcpu)) in sync_debug_state()
114 host_vcpu->arch.external_debug_state = hyp_vcpu->vcpu.arch.external_debug_state; in sync_debug_state()
119 struct kvm_vcpu *host_vcpu = hyp_vcpu->host_vcpu; in flush_hyp_vcpu()
124 hyp_vcpu->vcpu.arch.ctxt = host_vcpu->arch.ctxt; in flush_hyp_vcpu()
126 hyp_vcpu->vcpu.arch.sve_state = kern_hyp_va(host_vcpu->arch.sve_state); in flush_hyp_vcpu()
128 hyp_vcpu->vcpu.arch.sve_max_vl = min(host_vcpu->arch.sve_max_vl, kvm_host_sve_max_vl); in flush_hyp_vcpu()
130 hyp_vcpu->vcpu.arch.mdcr_el2 = host_vcpu->arch.mdcr_el2; in flush_hyp_vcpu()
131 hyp_vcpu->vcpu.arch.hcr_el2 &= ~(HCR_TWI | HCR_TWE); in flush_hyp_vcpu()
132 hyp_vcpu->vcpu.arch.hcr_el2 |= READ_ONCE(host_vcpu->arch.hcr_el2) & in flush_hyp_vcpu()
135 hyp_vcpu->vcpu.arch.iflags = host_vcpu->arch.iflags; in flush_hyp_vcpu()
137 hyp_vcpu->vcpu.arch.vsesr_el2 = host_vcpu->arch.vsesr_el2; in flush_hyp_vcpu()
139 hyp_vcpu->vcpu.arch.vgic_cpu.vgic_v3 = host_vcpu->arch.vgic_cpu.vgic_v3; in flush_hyp_vcpu()
144 struct kvm_vcpu *host_vcpu = hyp_vcpu->host_vcpu; in sync_hyp_vcpu()
145 struct vgic_v3_cpu_if *hyp_cpu_if = &hyp_vcpu->vcpu.arch.vgic_cpu.vgic_v3; in sync_hyp_vcpu()
146 struct vgic_v3_cpu_if *host_cpu_if = &host_vcpu->arch.vgic_cpu.vgic_v3; in sync_hyp_vcpu()
149 fpsimd_sve_sync(&hyp_vcpu->vcpu); in sync_hyp_vcpu()
152 host_vcpu->arch.ctxt = hyp_vcpu->vcpu.arch.ctxt; in sync_hyp_vcpu()
154 host_vcpu->arch.hcr_el2 = hyp_vcpu->vcpu.arch.hcr_el2; in sync_hyp_vcpu()
156 host_vcpu->arch.fault = hyp_vcpu->vcpu.arch.fault; in sync_hyp_vcpu()
158 host_vcpu->arch.iflags = hyp_vcpu->vcpu.arch.iflags; in sync_hyp_vcpu()
160 host_cpu_if->vgic_hcr = hyp_cpu_if->vgic_hcr; in sync_hyp_vcpu()
161 for (i = 0; i < hyp_cpu_if->used_lrs; ++i) in sync_hyp_vcpu()
162 host_cpu_if->vgic_lr[i] = hyp_cpu_if->vgic_lr[i]; in sync_hyp_vcpu()
181 hyp_vcpu->vcpu.arch.hcr_el2 &= ~(HCR_TWE | HCR_TWI); in handle___pkvm_vcpu_load()
182 hyp_vcpu->vcpu.arch.hcr_el2 |= hcr_el2 & (HCR_TWE | HCR_TWI); in handle___pkvm_vcpu_load()
213 ret = -EINVAL; in handle___kvm_vcpu_run()
218 ret = -EINVAL; in handle___kvm_vcpu_run()
224 ret = __kvm_vcpu_run(&hyp_vcpu->vcpu); in handle___kvm_vcpu_run()
241 struct kvm_vcpu *host_vcpu = hyp_vcpu->host_vcpu; in pkvm_refill_memcache()
243 return refill_memcache(&hyp_vcpu->vcpu.arch.pkvm_memcache, in pkvm_refill_memcache()
244 host_vcpu->arch.pkvm_memcache.nr_pages, in pkvm_refill_memcache()
245 &host_vcpu->arch.pkvm_memcache); in pkvm_refill_memcache()
254 int ret = -EINVAL; in handle___pkvm_host_share_guest()
277 int ret = -EINVAL; in handle___pkvm_host_unshare_guest()
297 int ret = -EINVAL; in handle___pkvm_host_relax_perms_guest()
316 int ret = -EINVAL; in handle___pkvm_host_wrprotect_guest()
337 int ret = -EINVAL; in handle___pkvm_host_test_clear_young_guest()
356 int ret = -EINVAL; in handle___pkvm_host_mkyoung_guest()
385 DECLARE_REG(phys_addr_t, ipa, host_ctxt, 2); in handle___kvm_tlb_flush_vmid_ipa()
388 __kvm_tlb_flush_vmid_ipa(kern_hyp_va(mmu), ipa, level); in handle___kvm_tlb_flush_vmid_ipa()
394 DECLARE_REG(phys_addr_t, ipa, host_ctxt, 2); in handle___kvm_tlb_flush_vmid_ipa_nsh()
397 __kvm_tlb_flush_vmid_ipa_nsh(kern_hyp_va(mmu), ipa, level); in handle___kvm_tlb_flush_vmid_ipa_nsh()
429 __kvm_tlb_flush_vmid(&hyp_vm->kvm.arch.mmu); in handle___pkvm_tlb_flush_vmid()
488 * will tail-call in __pkvm_init_finalise() which will have to deal in handle___pkvm_init()
625 * finalisation and (2) finalisation is performed on a per-CPU in handle_host_hcall()
627 * returns -EPERM after the first call for a given CPU. in handle_host_hcall()
633 id -= KVM_HOST_SMCCC_ID(0); in handle_host_hcall()