Lines Matching refs:kvm
10 void kvm_mmu_init_tdp_mmu(struct kvm *kvm);
11 void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm);
20 void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root);
30 static inline enum kvm_tdp_mmu_root_types kvm_gfn_range_filter_to_root_types(struct kvm *kvm,
35 if (!kvm_has_mirrored_tdp(kvm))
51 if (unlikely(!kvm_is_addr_direct(vcpu->kvm, fault->addr)))
66 bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, gfn_t start, gfn_t end, bool flush);
67 bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp);
68 void kvm_tdp_mmu_zap_all(struct kvm *kvm);
69 void kvm_tdp_mmu_invalidate_roots(struct kvm *kvm,
71 void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm, bool shared);
75 bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range,
77 bool kvm_tdp_mmu_age_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range);
78 bool kvm_tdp_mmu_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
80 bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm,
82 void kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm,
84 void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
88 void kvm_tdp_mmu_recover_huge_pages(struct kvm *kvm,
91 bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
95 void kvm_tdp_mmu_try_split_huge_pages(struct kvm *kvm,