/linux/arch/arm64/kvm/hyp/nvhe/ |
H A D | hyp-main.c | 25 void __kvm_hyp_host_forward_smc(struct kvm_cpu_context *host_ctxt); 90 __fpsimd_restore_state(host_data_ptr(host_ctxt.fp_regs)); in fpsimd_sve_sync() 164 static void handle___pkvm_vcpu_load(struct kvm_cpu_context *host_ctxt) in handle___pkvm_vcpu_load() argument 166 DECLARE_REG(pkvm_handle_t, handle, host_ctxt, 1); in handle___pkvm_vcpu_load() 167 DECLARE_REG(unsigned int, vcpu_idx, host_ctxt, 2); in handle___pkvm_vcpu_load() 168 DECLARE_REG(u64, hcr_el2, host_ctxt, 3); in handle___pkvm_vcpu_load() 185 static void handle___pkvm_vcpu_put(struct kvm_cpu_context *host_ctxt) in handle___pkvm_vcpu_put() argument 197 static void handle___kvm_vcpu_run(struct kvm_cpu_context *host_ctxt) in handle___kvm_vcpu_run() argument 199 DECLARE_REG(struct kvm_vcpu *, host_vcpu, host_ctxt, 1); in handle___kvm_vcpu_run() 235 cpu_reg(host_ctxt, in handle___kvm_vcpu_run() 247 handle___pkvm_host_share_guest(struct kvm_cpu_context * host_ctxt) handle___pkvm_host_share_guest() argument 272 handle___pkvm_host_unshare_guest(struct kvm_cpu_context * host_ctxt) handle___pkvm_host_unshare_guest() argument 293 handle___pkvm_host_relax_perms_guest(struct kvm_cpu_context * host_ctxt) handle___pkvm_host_relax_perms_guest() argument 312 handle___pkvm_host_wrprotect_guest(struct kvm_cpu_context * host_ctxt) handle___pkvm_host_wrprotect_guest() argument 333 handle___pkvm_host_test_clear_young_guest(struct kvm_cpu_context * host_ctxt) handle___pkvm_host_test_clear_young_guest() argument 355 handle___pkvm_host_mkyoung_guest(struct kvm_cpu_context * host_ctxt) handle___pkvm_host_mkyoung_guest() argument 373 handle___kvm_adjust_pc(struct kvm_cpu_context * host_ctxt) handle___kvm_adjust_pc() argument 380 handle___kvm_flush_vm_context(struct kvm_cpu_context * host_ctxt) handle___kvm_flush_vm_context() argument 385 handle___kvm_tlb_flush_vmid_ipa(struct kvm_cpu_context * host_ctxt) handle___kvm_tlb_flush_vmid_ipa() argument 394 handle___kvm_tlb_flush_vmid_ipa_nsh(struct kvm_cpu_context * host_ctxt) handle___kvm_tlb_flush_vmid_ipa_nsh() argument 404 handle___kvm_tlb_flush_vmid_range(struct kvm_cpu_context * host_ctxt) handle___kvm_tlb_flush_vmid_range() argument 413 handle___kvm_tlb_flush_vmid(struct kvm_cpu_context * host_ctxt) handle___kvm_tlb_flush_vmid() argument 420 handle___pkvm_tlb_flush_vmid(struct kvm_cpu_context * host_ctxt) handle___pkvm_tlb_flush_vmid() argument 436 handle___kvm_flush_cpu_context(struct kvm_cpu_context * host_ctxt) handle___kvm_flush_cpu_context() argument 443 handle___kvm_timer_set_cntvoff(struct kvm_cpu_context * host_ctxt) handle___kvm_timer_set_cntvoff() argument 448 handle___kvm_enable_ssbs(struct kvm_cpu_context * host_ctxt) handle___kvm_enable_ssbs() argument 457 handle___vgic_v3_get_gic_config(struct kvm_cpu_context * host_ctxt) handle___vgic_v3_get_gic_config() argument 462 handle___vgic_v3_init_lrs(struct kvm_cpu_context * host_ctxt) handle___vgic_v3_init_lrs() argument 467 handle___vgic_v3_save_vmcr_aprs(struct kvm_cpu_context * host_ctxt) handle___vgic_v3_save_vmcr_aprs() argument 474 handle___vgic_v3_restore_vmcr_aprs(struct kvm_cpu_context * host_ctxt) handle___vgic_v3_restore_vmcr_aprs() argument 481 handle___pkvm_init(struct kvm_cpu_context * host_ctxt) handle___pkvm_init() argument 498 handle___pkvm_cpu_set_vector(struct kvm_cpu_context * host_ctxt) handle___pkvm_cpu_set_vector() argument 505 handle___pkvm_host_share_hyp(struct kvm_cpu_context * host_ctxt) handle___pkvm_host_share_hyp() argument 512 handle___pkvm_host_unshare_hyp(struct kvm_cpu_context * host_ctxt) handle___pkvm_host_unshare_hyp() argument 519 handle___pkvm_create_private_mapping(struct kvm_cpu_context * host_ctxt) handle___pkvm_create_private_mapping() argument 544 handle___pkvm_prot_finalize(struct kvm_cpu_context * host_ctxt) handle___pkvm_prot_finalize() argument 549 handle___pkvm_init_vm(struct kvm_cpu_context * host_ctxt) handle___pkvm_init_vm() argument 559 handle___pkvm_init_vcpu(struct kvm_cpu_context * host_ctxt) handle___pkvm_init_vcpu() argument 569 handle___pkvm_teardown_vm(struct kvm_cpu_context * host_ctxt) handle___pkvm_teardown_vm() argument 617 handle_host_hcall(struct kvm_cpu_context * host_ctxt) handle_host_hcall() argument 653 default_host_smc_handler(struct kvm_cpu_context * host_ctxt) default_host_smc_handler() argument 658 handle_host_smc(struct kvm_cpu_context * host_ctxt) handle_host_smc() argument 675 handle_trap(struct kvm_cpu_context * host_ctxt) handle_trap() argument [all...] |
H A D | psci-relay.c | 20 void __noreturn __host_enter(struct kvm_cpu_context *host_ctxt); 72 static unsigned long psci_forward(struct kvm_cpu_context *host_ctxt) in psci_forward() argument 74 return psci_call(cpu_reg(host_ctxt, 0), cpu_reg(host_ctxt, 1), in psci_forward() 75 cpu_reg(host_ctxt, 2), cpu_reg(host_ctxt, 3)); in psci_forward() 107 static int psci_cpu_on(u64 func_id, struct kvm_cpu_context *host_ctxt) in psci_cpu_on() argument 109 DECLARE_REG(u64, mpidr, host_ctxt, 1); in psci_cpu_on() 110 DECLARE_REG(unsigned long, pc, host_ctxt, 2); in psci_cpu_on() 111 DECLARE_REG(unsigned long, r0, host_ctxt, in psci_cpu_on() 151 psci_cpu_suspend(u64 func_id,struct kvm_cpu_context * host_ctxt) psci_cpu_suspend() argument 179 psci_system_suspend(u64 func_id,struct kvm_cpu_context * host_ctxt) psci_system_suspend() argument 206 struct kvm_cpu_context *host_ctxt; __kvm_host_psci_cpu_entry() local 227 psci_0_1_handler(u64 func_id,struct kvm_cpu_context * host_ctxt) psci_0_1_handler() argument 239 psci_0_2_handler(u64 func_id,struct kvm_cpu_context * host_ctxt) psci_0_2_handler() argument 265 psci_1_0_handler(u64 func_id,struct kvm_cpu_context * host_ctxt) psci_1_0_handler() argument 281 kvm_host_psci_handler(struct kvm_cpu_context * host_ctxt,u32 func_id) kvm_host_psci_handler() argument [all...] |
H A D | switch.c | 238 struct kvm_cpu_context *host_ctxt; in __kvm_vcpu_run() local 255 host_ctxt = host_data_ptr(host_ctxt); in __kvm_vcpu_run() 256 host_ctxt->__hyp_running_vcpu = vcpu; in __kvm_vcpu_run() 261 __sysreg_save_state_nvhe(host_ctxt); in __kvm_vcpu_run() 326 __sysreg_restore_state_nvhe(host_ctxt); in __kvm_vcpu_run() 345 host_ctxt->__hyp_running_vcpu = NULL; in __kvm_vcpu_run() 355 struct kvm_cpu_context *host_ctxt; in hyp_panic() local 358 host_ctxt = host_data_ptr(host_ctxt); in hyp_panic() [all...] |
H A D | tlb.c | 24 struct kvm_cpu_context *host_ctxt; in enter_vmid_context() local 27 host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt; in enter_vmid_context() 28 vcpu = host_ctxt->__hyp_running_vcpu; in enter_vmid_context() 121 struct kvm_cpu_context *host_ctxt; in exit_vmid_context() local 124 host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt; in exit_vmid_context() 125 vcpu = host_ctxt->__hyp_running_vcpu; in exit_vmid_context()
|
H A D | ffa.c | 792 bool kvm_host_ffa_handler(struct kvm_cpu_context *host_ctxt, u32 func_id) in kvm_host_ffa_handler() argument 820 if (!do_ffa_features(&res, host_ctxt)) in kvm_host_ffa_handler() 825 do_ffa_rxtx_map(&res, host_ctxt); in kvm_host_ffa_handler() 828 do_ffa_rxtx_unmap(&res, host_ctxt); in kvm_host_ffa_handler() 832 do_ffa_mem_xfer(FFA_FN64_MEM_SHARE, &res, host_ctxt); in kvm_host_ffa_handler() 835 do_ffa_mem_reclaim(&res, host_ctxt); in kvm_host_ffa_handler() 839 do_ffa_mem_xfer(FFA_FN64_MEM_LEND, &res, host_ctxt); in kvm_host_ffa_handler() 842 do_ffa_mem_frag_tx(&res, host_ctxt); in kvm_host_ffa_handler() 845 do_ffa_version(&res, host_ctxt); in kvm_host_ffa_handler() 848 do_ffa_part_get(&res, host_ctxt); in kvm_host_ffa_handler() [all...] |
H A D | setup.c | 281 struct kvm_cpu_context *host_ctxt = host_data_ptr(host_ctxt); in __pkvm_init_finalise() local 331 cpu_reg(host_ctxt, 1) = ret; in __pkvm_init_finalise() 333 __host_enter(host_ctxt); in __pkvm_init_finalise()
|
H A D | mem_protect.c | 593 void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt) in handle_host_mem_abort() argument
|
/linux/arch/arm64/kvm/hyp/vhe/ |
H A D | switch.c | 208 host_data_ptr(host_ctxt)->__hyp_running_vcpu = vcpu; in kvm_vcpu_load_vhe() 220 host_data_ptr(host_ctxt)->__hyp_running_vcpu = NULL; in kvm_vcpu_put_vhe() 564 struct kvm_cpu_context *host_ctxt; in __kvm_vcpu_run_vhe() local 568 host_ctxt = host_data_ptr(host_ctxt); in __kvm_vcpu_run_vhe() 573 sysreg_save_host_state_vhe(host_ctxt); in __kvm_vcpu_run_vhe() 599 sysreg_restore_host_state_vhe(host_ctxt); in __kvm_vcpu_run_vhe() 650 struct kvm_cpu_context *host_ctxt; in __hyp_call_panic() local 653 host_ctxt = host_data_ptr(host_ctxt); in __hyp_call_panic() [all...] |
/linux/arch/arm64/include/asm/ |
H A D | kvm_hyp.h | 121 bool kvm_host_psci_handler(struct kvm_cpu_context *host_ctxt, u32 func_id); 124 void __noreturn __hyp_do_panic(struct kvm_cpu_context *host_ctxt, u64 spsr, 133 void __noreturn __host_enter(struct kvm_cpu_context *host_ctxt);
|
H A D | kvm_asm.h | 291 void handle_trap(struct kvm_cpu_context *host_ctxt);
|
/linux/arch/arm64/kvm/hyp/include/hyp/ |
H A D | switch.h | 325 struct kvm_cpu_context *hctxt = host_data_ptr(host_ctxt); in __activate_traps_hfgxtr() 360 struct kvm_cpu_context *hctxt = host_data_ptr(host_ctxt); in __deactivate_traps_hfgxtr() 415 struct kvm_cpu_context *hctxt = host_data_ptr(host_ctxt); in __activate_traps_common() 455 struct kvm_cpu_context *hctxt = host_data_ptr(host_ctxt); in __deactivate_traps_common() 655 __fpsimd_save_state(host_data_ptr(host_ctxt.fp_regs)); in kvm_hyp_save_fpsimd_host()
|
H A D | sysreg-sr.h | 33 return host_data_ptr(host_ctxt) != ctxt; in ctxt_is_guest()
|
/linux/arch/arm64/kvm/hyp/include/nvhe/ |
H A D | mem_protect.h | 55 void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt);
|
/linux/arch/arm64/kvm/ |
H A D | pmu.c | 190 hctxt = host_data_ptr(host_ctxt); in kvm_set_pmuserenr()
|
H A D | arm.c | 2105 kvm_init_host_cpu_context(host_data_ptr(host_ctxt)); in cpu_hyp_init_context()
|
/linux/arch/arm64/kernel/ |
H A D | asm-offsets.c | 117 DEFINE(HOST_DATA_CONTEXT, offsetof(struct kvm_host_data, host_ctxt)); in main()
|