xref: /linux/arch/arm64/include/asm/kvm_hyp.h (revision 4f9786035f9e519db41375818e1d0b5f20da2f10)
1caab277bSThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-only */
2c76a0a66SMarc Zyngier /*
3c76a0a66SMarc Zyngier  * Copyright (C) 2015 - ARM Ltd
4c76a0a66SMarc Zyngier  * Author: Marc Zyngier <marc.zyngier@arm.com>
5c76a0a66SMarc Zyngier  */
6c76a0a66SMarc Zyngier 
7c76a0a66SMarc Zyngier #ifndef __ARM64_KVM_HYP_H__
8c76a0a66SMarc Zyngier #define __ARM64_KVM_HYP_H__
9c76a0a66SMarc Zyngier 
10c76a0a66SMarc Zyngier #include <linux/compiler.h>
11c76a0a66SMarc Zyngier #include <linux/kvm_host.h>
121e4448c5SMarc Zyngier #include <asm/alternative.h>
13c76a0a66SMarc Zyngier #include <asm/sysreg.h>
14c76a0a66SMarc Zyngier 
15b619d9aaSAndrew Scull DECLARE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
16a0e47952SAndrew Scull DECLARE_PER_CPU(unsigned long, kvm_hyp_vector);
1763fec243SDavid Brazdil DECLARE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params);
18a0e47952SAndrew Scull 
1957e784b4SMarc Zyngier /*
2057e784b4SMarc Zyngier  * Unified accessors for registers that have a different encoding
2157e784b4SMarc Zyngier  * between VHE and non-VHE. They must be specified without their "ELx"
2257e784b4SMarc Zyngier  * encoding, but with the SYS_ prefix, as defined in asm/sysreg.h.
2357e784b4SMarc Zyngier  */
2457e784b4SMarc Zyngier 
2557e784b4SMarc Zyngier #if defined(__KVM_VHE_HYPERVISOR__)
2657e784b4SMarc Zyngier 
2757e784b4SMarc Zyngier #define read_sysreg_el0(r)	read_sysreg_s(r##_EL02)
2857e784b4SMarc Zyngier #define write_sysreg_el0(v,r)	write_sysreg_s(v, r##_EL02)
2957e784b4SMarc Zyngier #define read_sysreg_el1(r)	read_sysreg_s(r##_EL12)
3057e784b4SMarc Zyngier #define write_sysreg_el1(v,r)	write_sysreg_s(v, r##_EL12)
3157e784b4SMarc Zyngier #define read_sysreg_el2(r)	read_sysreg_s(r##_EL1)
3257e784b4SMarc Zyngier #define write_sysreg_el2(v,r)	write_sysreg_s(v, r##_EL1)
3357e784b4SMarc Zyngier 
3457e784b4SMarc Zyngier #else // !__KVM_VHE_HYPERVISOR__
3557e784b4SMarc Zyngier 
366f617d3aSMarc Zyngier #if defined(__KVM_NVHE_HYPERVISOR__)
376f617d3aSMarc Zyngier #define VHE_ALT_KEY	ARM64_KVM_HVHE
386f617d3aSMarc Zyngier #else
396f617d3aSMarc Zyngier #define VHE_ALT_KEY	ARM64_HAS_VIRT_HOST_EXTN
406f617d3aSMarc Zyngier #endif
416f617d3aSMarc Zyngier 
42915ccd1dSMarc Zyngier #define read_sysreg_elx(r,nvh,vh)					\
43915ccd1dSMarc Zyngier 	({								\
44915ccd1dSMarc Zyngier 		u64 reg;						\
45fdec2a9eSDave Martin 		asm volatile(ALTERNATIVE(__mrs_s("%0", r##nvh),		\
46be604c61SKees Cook 					 __mrs_s("%0", r##vh),		\
476f617d3aSMarc Zyngier 					 VHE_ALT_KEY)			\
48915ccd1dSMarc Zyngier 			     : "=r" (reg));				\
49915ccd1dSMarc Zyngier 		reg;							\
50915ccd1dSMarc Zyngier 	})
51915ccd1dSMarc Zyngier 
52915ccd1dSMarc Zyngier #define write_sysreg_elx(v,r,nvh,vh)					\
53915ccd1dSMarc Zyngier 	do {								\
54915ccd1dSMarc Zyngier 		u64 __val = (u64)(v);					\
55fdec2a9eSDave Martin 		asm volatile(ALTERNATIVE(__msr_s(r##nvh, "%x0"),	\
56be604c61SKees Cook 					 __msr_s(r##vh, "%x0"),		\
576f617d3aSMarc Zyngier 					 VHE_ALT_KEY)			\
58915ccd1dSMarc Zyngier 					 : : "rZ" (__val));		\
59915ccd1dSMarc Zyngier 	} while (0)
60915ccd1dSMarc Zyngier 
61915ccd1dSMarc Zyngier #define read_sysreg_el0(r)	read_sysreg_elx(r, _EL0, _EL02)
62915ccd1dSMarc Zyngier #define write_sysreg_el0(v,r)	write_sysreg_elx(v, r, _EL0, _EL02)
63915ccd1dSMarc Zyngier #define read_sysreg_el1(r)	read_sysreg_elx(r, _EL1, _EL12)
64915ccd1dSMarc Zyngier #define write_sysreg_el1(v,r)	write_sysreg_elx(v, r, _EL1, _EL12)
65fdec2a9eSDave Martin #define read_sysreg_el2(r)	read_sysreg_elx(r, _EL2, _EL1)
66fdec2a9eSDave Martin #define write_sysreg_el2(v,r)	write_sysreg_elx(v, r, _EL2, _EL1)
67915ccd1dSMarc Zyngier 
6857e784b4SMarc Zyngier #endif	// __KVM_VHE_HYPERVISOR__
6957e784b4SMarc Zyngier 
708c2d146eSJames Morse /*
718c2d146eSJames Morse  * Without an __arch_swab32(), we fall back to ___constant_swab32(), but the
728c2d146eSJames Morse  * static inline can allow the compiler to out-of-line this. KVM always wants
7375841d89SBjorn Helgaas  * the macro version as it's always inlined.
748c2d146eSJames Morse  */
758c2d146eSJames Morse #define __kvm_swab32(x)	___constant_swab32(x)
768c2d146eSJames Morse 
773272f0d0SMarc Zyngier int __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu);
7806282fd2SMarc Zyngier 
79*146a050fSMarc Zyngier u64 __gic_v3_get_lr(unsigned int lr);
80*146a050fSMarc Zyngier 
81fc5d1f1aSChristoffer Dall void __vgic_v3_save_state(struct vgic_v3_cpu_if *cpu_if);
82fc5d1f1aSChristoffer Dall void __vgic_v3_restore_state(struct vgic_v3_cpu_if *cpu_if);
83fc5d1f1aSChristoffer Dall void __vgic_v3_activate_traps(struct vgic_v3_cpu_if *cpu_if);
84fc5d1f1aSChristoffer Dall void __vgic_v3_deactivate_traps(struct vgic_v3_cpu_if *cpu_if);
85948e1a53SMarc Zyngier void __vgic_v3_save_vmcr_aprs(struct vgic_v3_cpu_if *cpu_if);
86948e1a53SMarc Zyngier void __vgic_v3_restore_vmcr_aprs(struct vgic_v3_cpu_if *cpu_if);
8759da1cbfSMarc Zyngier int __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu);
88f68d2b1bSMarc Zyngier 
899aebdea4SDavid Brazdil #ifdef __KVM_NVHE_HYPERVISOR__
90688c50aaSChristoffer Dall void __timer_enable_traps(struct kvm_vcpu *vcpu);
91688c50aaSChristoffer Dall void __timer_disable_traps(struct kvm_vcpu *vcpu);
929aebdea4SDavid Brazdil #endif
931431af36SMarc Zyngier 
9413aeb9b4SDavid Brazdil #ifdef __KVM_NVHE_HYPERVISOR__
954cdecabaSChristoffer Dall void __sysreg_save_state_nvhe(struct kvm_cpu_context *ctxt);
964cdecabaSChristoffer Dall void __sysreg_restore_state_nvhe(struct kvm_cpu_context *ctxt);
9713aeb9b4SDavid Brazdil #else
9827cde4c0SOliver Upton void __vcpu_load_switch_sysregs(struct kvm_vcpu *vcpu);
9927cde4c0SOliver Upton void __vcpu_put_switch_sysregs(struct kvm_vcpu *vcpu);
100f837453dSChristoffer Dall void sysreg_save_host_state_vhe(struct kvm_cpu_context *ctxt);
101f837453dSChristoffer Dall void sysreg_restore_host_state_vhe(struct kvm_cpu_context *ctxt);
102f837453dSChristoffer Dall void sysreg_save_guest_state_vhe(struct kvm_cpu_context *ctxt);
103f837453dSChristoffer Dall void sysreg_restore_guest_state_vhe(struct kvm_cpu_context *ctxt);
10413aeb9b4SDavid Brazdil #endif
1056d6ec20fSMarc Zyngier 
106014c4c77SChristoffer Dall void __debug_switch_to_guest(struct kvm_vcpu *vcpu);
107014c4c77SChristoffer Dall void __debug_switch_to_host(struct kvm_vcpu *vcpu);
1088eb99267SMarc Zyngier 
109b96b0c5dSSuzuki K Poulose #ifdef __KVM_NVHE_HYPERVISOR__
110b96b0c5dSSuzuki K Poulose void __debug_save_host_buffers_nvhe(struct kvm_vcpu *vcpu);
111b96b0c5dSSuzuki K Poulose void __debug_restore_host_buffers_nvhe(struct kvm_vcpu *vcpu);
112b96b0c5dSSuzuki K Poulose #endif
113b96b0c5dSSuzuki K Poulose 
114c13d1683SMarc Zyngier void __fpsimd_save_state(struct user_fpsimd_state *fp_regs);
115c13d1683SMarc Zyngier void __fpsimd_restore_state(struct user_fpsimd_state *fp_regs);
11645f4ea9bSFuad Tabba void __sve_save_state(void *sve_pffr, u32 *fpsr, int save_ffr);
11745f4ea9bSFuad Tabba void __sve_restore_state(void *sve_pffr, u32 *fpsr, int restore_ffr);
118c13d1683SMarc Zyngier 
119b619d9aaSAndrew Scull u64 __guest_enter(struct kvm_vcpu *vcpu);
12009cf57ebSDavid Brazdil 
121373beef0SJean-Philippe Brucker bool kvm_host_psci_handler(struct kvm_cpu_context *host_ctxt, u32 func_id);
122eeeee719SDavid Brazdil 
12309cf57ebSDavid Brazdil #ifdef __KVM_NVHE_HYPERVISOR__
124c4b000c3SAndrew Scull void __noreturn __hyp_do_panic(struct kvm_cpu_context *host_ctxt, u64 spsr,
125c4b000c3SAndrew Scull 			       u64 elr, u64 par);
12609cf57ebSDavid Brazdil #endif
127b97b66c1SMarc Zyngier 
128f320bc74SQuentin Perret #ifdef __KVM_NVHE_HYPERVISOR__
129ea9d7c83SPierre-Clément Tosi void __pkvm_init_switch_pgd(phys_addr_t pgd, unsigned long sp,
130ea9d7c83SPierre-Clément Tosi 		void (*fn)(void));
131f320bc74SQuentin Perret int __pkvm_init(phys_addr_t phys, unsigned long size, unsigned long nr_cpus,
132f320bc74SQuentin Perret 		unsigned long *per_cpu_base, u32 hyp_va_bits);
133f320bc74SQuentin Perret void __noreturn __host_enter(struct kvm_cpu_context *host_ctxt);
134f320bc74SQuentin Perret #endif
135f320bc74SQuentin Perret 
1366c30bfb1SFuad Tabba extern u64 kvm_nvhe_sym(id_aa64pfr0_el1_sys_val);
1376c30bfb1SFuad Tabba extern u64 kvm_nvhe_sym(id_aa64pfr1_el1_sys_val);
1386c30bfb1SFuad Tabba extern u64 kvm_nvhe_sym(id_aa64isar0_el1_sys_val);
1396c30bfb1SFuad Tabba extern u64 kvm_nvhe_sym(id_aa64isar1_el1_sys_val);
140def8c222SVladimir Murzin extern u64 kvm_nvhe_sym(id_aa64isar2_el1_sys_val);
1417c419937SMarc Zyngier extern u64 kvm_nvhe_sym(id_aa64mmfr0_el1_sys_val);
1427c419937SMarc Zyngier extern u64 kvm_nvhe_sym(id_aa64mmfr1_el1_sys_val);
1436c30bfb1SFuad Tabba extern u64 kvm_nvhe_sym(id_aa64mmfr2_el1_sys_val);
1448669651cSQuentin Perret extern u64 kvm_nvhe_sym(id_aa64smfr0_el1_sys_val);
1457c419937SMarc Zyngier 
14613e248aaSWill Deacon extern unsigned long kvm_nvhe_sym(__icache_flags);
14773f38ef2SWill Deacon extern unsigned int kvm_nvhe_sym(kvm_arm_vmid_bits);
14866d5b53eSFuad Tabba extern unsigned int kvm_nvhe_sym(kvm_host_sve_max_vl);
14973f38ef2SWill Deacon 
150c76a0a66SMarc Zyngier #endif /* __ARM64_KVM_HYP_H__ */
151