Searched refs:hw_mmu (Results 1 – 10 of 10) sorted by relevance
62 if (mmu == vcpu->arch.hw_mmu || WARN_ON(mmu != host_s2_mmu)) in enter_vmid_context()65 cxt->mmu = vcpu->arch.hw_mmu; in enter_vmid_context()
301 mmu = kern_hyp_va(vcpu->arch.hw_mmu); in __kvm_vcpu_run()
519 hyp_vcpu->vcpu.arch.hw_mmu = &hyp_vm->kvm.arch.mmu; in init_pkvm_hyp_vcpu()
1224 .hw_mmu = &selftest_vm.kvm.arch.mmu,
28 if (vcpu && mmu != vcpu->arch.hw_mmu) in enter_vmid_context()29 cxt->mmu = vcpu->arch.hw_mmu; in enter_vmid_context()
222 __load_stage2(vcpu->arch.hw_mmu, vcpu->arch.hw_mmu->arch); in kvm_vcpu_load_vhe()
783 if (!vcpu->arch.hw_mmu) in kvm_vcpu_load_hw_mmu()784 vcpu->arch.hw_mmu = &vcpu->kvm->arch.mmu; in kvm_vcpu_load_hw_mmu()786 if (!vcpu->arch.hw_mmu) { in kvm_vcpu_load_hw_mmu()788 vcpu->arch.hw_mmu = get_s2_mmu_nested(vcpu); in kvm_vcpu_load_hw_mmu()817 if (kvm_is_nested_s2_mmu(vcpu->kvm, vcpu->arch.hw_mmu)) in kvm_vcpu_put_hw_mmu()818 atomic_dec(&vcpu->arch.hw_mmu->refcnt); in kvm_vcpu_put_hw_mmu()820 vcpu->arch.hw_mmu = NULL; in kvm_vcpu_put_hw_mmu()1838 struct kvm_s2_mmu *mmu = vcpu->arch.hw_mmu; in check_nested_vcpu_requests()
1528 min_pages = kvm_mmu_cache_min_pages(vcpu->arch.hw_mmu); in prepare_mmu_memcache()1574 struct kvm_pgtable *pgt = vcpu->arch.hw_mmu->pgt; in gmem_abort()1860 pgt = vcpu->arch.hw_mmu->pgt; in user_mem_abort()1954 mmu = vcpu->arch.hw_mmu; in handle_access_fault()2084 if (fault_ipa >= BIT_ULL(VTCR_EL2_IPA(vcpu->arch.hw_mmu->vtcr))) { in kvm_handle_guest_abort()2121 if (kvm_is_nested_s2_mmu(vcpu->kvm,vcpu->arch.hw_mmu) && in kvm_handle_guest_abort()2122 vcpu->arch.hw_mmu->nested_stage2_enabled) { in kvm_handle_guest_abort()2197 VM_BUG_ON(ipa >= kvm_phys_size(vcpu->arch.hw_mmu)); in kvm_handle_guest_abort()
533 vcpu->arch.hw_mmu = &vcpu->kvm->arch.mmu; in kvm_arch_vcpu_create()641 mmu = vcpu->arch.hw_mmu; in kvm_arch_vcpu_load()
842 struct kvm_s2_mmu *hw_mmu; member