Lines Matching full:mmu
14 struct kvm_s2_mmu *mmu; member
20 static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu, in __tlb_switch_to_guest() argument
28 if (vcpu && mmu != vcpu->arch.hw_mmu) in __tlb_switch_to_guest()
29 cxt->mmu = vcpu->arch.hw_mmu; in __tlb_switch_to_guest()
31 cxt->mmu = NULL; in __tlb_switch_to_guest()
41 * allocate IPA->PA walks, so we enable the S1 MMU... in __tlb_switch_to_guest()
63 __load_stage2(mmu, mmu->arch); in __tlb_switch_to_guest()
79 /* ... and the stage-2 MMU context that we switched away from */ in __tlb_switch_to_host()
80 if (cxt->mmu) in __tlb_switch_to_host()
81 __load_stage2(cxt->mmu, cxt->mmu->arch); in __tlb_switch_to_host()
92 void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, in __kvm_tlb_flush_vmid_ipa() argument
100 __tlb_switch_to_guest(mmu, &cxt); in __kvm_tlb_flush_vmid_ipa()
124 void __kvm_tlb_flush_vmid_ipa_nsh(struct kvm_s2_mmu *mmu, in __kvm_tlb_flush_vmid_ipa_nsh() argument
132 __tlb_switch_to_guest(mmu, &cxt); in __kvm_tlb_flush_vmid_ipa_nsh()
156 void __kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu, in __kvm_tlb_flush_vmid_range() argument
172 __tlb_switch_to_guest(mmu, &cxt); in __kvm_tlb_flush_vmid_range()
184 void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu) in __kvm_tlb_flush_vmid() argument
191 __tlb_switch_to_guest(mmu, &cxt); in __kvm_tlb_flush_vmid()
200 void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu) in __kvm_flush_cpu_context() argument
205 __tlb_switch_to_guest(mmu, &cxt); in __kvm_flush_cpu_context()