Lines Matching defs:kvm

283 static void kvm_flush_remote_tlbs_sptep(struct kvm *kvm, u64 *sptep)  in kvm_flush_remote_tlbs_sptep()
521 static u64 mmu_spte_clear_track_bits(struct kvm *kvm, u64 *sptep) in mmu_spte_clear_track_bits()
748 static void account_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp) in account_shadowed()
778 void track_possible_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp) in track_possible_nx_huge_page()
796 static void account_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp, in account_nx_huge_page()
805 static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp) in unaccount_shadowed()
821 void untrack_possible_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp) in untrack_possible_nx_huge_page()
830 static void unaccount_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp) in unaccount_nx_huge_page()
934 static unsigned long kvm_rmap_lock(struct kvm *kvm, in kvm_rmap_lock()
955 static void kvm_rmap_unlock(struct kvm *kvm, in kvm_rmap_unlock()
1002 static int pte_list_add(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, in pte_list_add()
1046 static void pte_list_desc_remove_entry(struct kvm *kvm, unsigned long *rmap_val, in pte_list_desc_remove_entry()
1082 static void pte_list_remove(struct kvm *kvm, u64 *spte, in pte_list_remove()
1118 static void kvm_zap_one_rmap_spte(struct kvm *kvm, in kvm_zap_one_rmap_spte()
1126 static bool kvm_zap_all_rmap_sptes(struct kvm *kvm, in kvm_zap_all_rmap_sptes()
1179 static void rmap_remove(struct kvm *kvm, u64 *spte) in rmap_remove()
1277 static void drop_spte(struct kvm *kvm, u64 *sptep) in drop_spte()
1285 static void drop_large_spte(struct kvm *kvm, u64 *sptep, bool flush) in drop_large_spte()
1354 static bool __rmap_clear_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head, in __rmap_clear_dirty()
1372 static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm, in kvm_mmu_write_protect_pt_masked()
1395 static void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm, in kvm_mmu_clear_dirty_pt_masked()
1418 void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, in kvm_arch_mmu_enable_log_dirty_pt_masked()
1470 bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm, in kvm_mmu_slot_gfn_write_protect()
1500 static bool kvm_zap_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head, in kvm_zap_rmap()
1580 static __always_inline bool __walk_slot_rmaps(struct kvm *kvm, in __walk_slot_rmaps()
1613 static __always_inline bool walk_slot_rmaps(struct kvm *kvm, in walk_slot_rmaps()
1624 static __always_inline bool walk_slot_rmaps_4k(struct kvm *kvm, in walk_slot_rmaps_4k()
1632 static bool __kvm_rmap_zap_gfn_range(struct kvm *kvm, in __kvm_rmap_zap_gfn_range()
1642 bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range) in kvm_unmap_gfn_range()
1674 static void __rmap_add(struct kvm *kvm, in __rmap_add()
1706 static bool kvm_rmap_age_gfn_range(struct kvm *kvm, in kvm_rmap_age_gfn_range()
1754 static bool kvm_may_have_shadow_mmu_sptes(struct kvm *kvm) in kvm_may_have_shadow_mmu_sptes()
1759 bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) in kvm_age_gfn()
1772 bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) in kvm_test_age_gfn()
1802 static void kvm_account_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp) in kvm_account_mmu_page()
1808 static void kvm_unaccount_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp) in kvm_unaccount_mmu_page()
1830 static void mmu_page_add_parent_pte(struct kvm *kvm, in mmu_page_add_parent_pte()
1840 static void mmu_page_remove_parent_pte(struct kvm *kvm, struct kvm_mmu_page *sp, in mmu_page_remove_parent_pte()
1846 static void drop_parent_pte(struct kvm *kvm, struct kvm_mmu_page *sp, in drop_parent_pte()
1961 static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp) in kvm_unlink_unsync_page()
2075 static bool kvm_mmu_remote_flush_or_zap(struct kvm *kvm, in kvm_mmu_remote_flush_or_zap()
2089 static bool is_obsolete_sp(struct kvm *kvm, struct kvm_mmu_page *sp) in is_obsolete_sp()
2229 static struct kvm_mmu_page *kvm_mmu_find_shadow_page(struct kvm *kvm, in kvm_mmu_find_shadow_page()
2314 static struct kvm_mmu_page *kvm_mmu_alloc_shadow_page(struct kvm *kvm, in kvm_mmu_alloc_shadow_page()
2350 static struct kvm_mmu_page *__kvm_mmu_get_shadow_page(struct kvm *kvm, in __kvm_mmu_get_shadow_page()
2507 static void __link_shadow_page(struct kvm *kvm, in __link_shadow_page()
2571 static int mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp, in mmu_page_zap_pte()
2602 static int kvm_mmu_page_unlink_children(struct kvm *kvm, in kvm_mmu_page_unlink_children()
2615 static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp) in kvm_mmu_unlink_parents()
2624 static int mmu_zap_unsync_children(struct kvm *kvm, in mmu_zap_unsync_children()
2648 static bool __kvm_mmu_prepare_zap_page(struct kvm *kvm, in __kvm_mmu_prepare_zap_page()
2713 static bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp, in kvm_mmu_prepare_zap_page()
2722 static void kvm_mmu_commit_zap_page(struct kvm *kvm, in kvm_mmu_commit_zap_page()
2747 static unsigned long kvm_mmu_zap_oldest_mmu_pages(struct kvm *kvm, in kvm_mmu_zap_oldest_mmu_pages()
2784 static inline unsigned long kvm_mmu_available_pages(struct kvm *kvm) in kvm_mmu_available_pages()
2820 void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long goal_nr_mmu_pages) in kvm_mmu_change_mmu_pages()
2839 struct kvm *kvm = vcpu->kvm; in __kvm_mmu_unprotect_gfn_and_retry() local
2882 static void kvm_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp) in kvm_unsync_page()
2897 int mmu_try_to_unsync_pages(struct kvm *kvm, const struct kvm_memory_slot *slot, in mmu_try_to_unsync_pages()
3195 static int host_pfn_mapping_level(struct kvm *kvm, gfn_t gfn, in host_pfn_mapping_level()
3259 static int __kvm_mmu_max_mapping_level(struct kvm *kvm, in __kvm_mmu_max_mapping_level()
3283 int kvm_mmu_max_mapping_level(struct kvm *kvm, in kvm_mmu_max_mapping_level()
3462 static bool page_fault_can_be_fast(struct kvm *kvm, struct kvm_page_fault *fault) in page_fault_can_be_fast()
3696 static void mmu_free_root_page(struct kvm *kvm, hpa_t *root_hpa, in mmu_free_root_page()
3721 void kvm_mmu_free_roots(struct kvm *kvm, struct kvm_mmu *mmu, in kvm_mmu_free_roots()
3786 void kvm_mmu_free_guest_mode_roots(struct kvm *kvm, struct kvm_mmu *mmu) in kvm_mmu_free_guest_mode_roots()
3883 static int mmu_first_shadow_root_alloc(struct kvm *kvm) in mmu_first_shadow_root_alloc()
4468 static u8 kvm_max_private_mapping_level(struct kvm *kvm, kvm_pfn_t pfn, in kvm_max_private_mapping_level()
4567 struct kvm *kvm = vcpu->kvm; in kvm_mmu_faultin_pfn() local
4973 static bool cached_root_find_and_keep_current(struct kvm *kvm, struct kvm_mmu *mmu, in cached_root_find_and_keep_current()
5007 static bool cached_root_find_without_current(struct kvm *kvm, struct kvm_mmu *mmu, in cached_root_find_without_current()
5028 static bool fast_pgd_switch(struct kvm *kvm, struct kvm_mmu *mmu, in fast_pgd_switch()
5919 struct kvm *kvm = vcpu->kvm; in kvm_mmu_unload() local
5928 static bool is_obsolete_root(struct kvm *kvm, hpa_t root_hpa) in is_obsolete_root()
5955 static void __kvm_mmu_free_obsolete_roots(struct kvm *kvm, struct kvm_mmu *mmu) in __kvm_mmu_free_obsolete_roots()
6553 static void kvm_zap_obsolete_pages(struct kvm *kvm) in kvm_zap_obsolete_pages()
6621 static void kvm_mmu_zap_all_fast(struct kvm *kvm) in kvm_mmu_zap_all_fast()
6677 void kvm_mmu_init_vm(struct kvm *kvm) in kvm_mmu_init_vm()
6696 static void mmu_free_vm_memory_caches(struct kvm *kvm) in mmu_free_vm_memory_caches()
6703 void kvm_mmu_uninit_vm(struct kvm *kvm) in kvm_mmu_uninit_vm()
6711 static bool kvm_rmap_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end) in kvm_rmap_zap_gfn_range()
6745 void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end) in kvm_zap_gfn_range()
6771 static bool slot_rmap_write_protect(struct kvm *kvm, in slot_rmap_write_protect()
6778 void kvm_mmu_slot_remove_write_access(struct kvm *kvm, in kvm_mmu_slot_remove_write_access()
6801 static bool need_topup_split_caches_or_resched(struct kvm *kvm) in need_topup_split_caches_or_resched()
6816 static int topup_split_caches(struct kvm *kvm) in topup_split_caches()
6849 static struct kvm_mmu_page *shadow_mmu_get_sp_for_split(struct kvm *kvm, u64 *huge_sptep) in shadow_mmu_get_sp_for_split()
6876 static void shadow_mmu_split_huge_page(struct kvm *kvm, in shadow_mmu_split_huge_page()
6922 static int shadow_mmu_try_split_huge_page(struct kvm *kvm, in shadow_mmu_try_split_huge_page()
6961 static bool shadow_mmu_try_split_huge_pages(struct kvm *kvm, in shadow_mmu_try_split_huge_pages()
7007 static void kvm_shadow_mmu_try_split_huge_pages(struct kvm *kvm, in kvm_shadow_mmu_try_split_huge_pages()
7026 void kvm_mmu_try_split_huge_pages(struct kvm *kvm, in kvm_mmu_try_split_huge_pages()
7045 void kvm_mmu_slot_try_split_huge_pages(struct kvm *kvm, in kvm_mmu_slot_try_split_huge_pages()
7076 static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm, in kvm_mmu_zap_collapsible_spte()
7113 static void kvm_rmap_zap_collapsible_sptes(struct kvm *kvm, in kvm_rmap_zap_collapsible_sptes()
7125 void kvm_mmu_recover_huge_pages(struct kvm *kvm, in kvm_mmu_recover_huge_pages()
7141 void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm, in kvm_mmu_slot_leaf_clear_dirty()
7170 static void kvm_mmu_zap_all(struct kvm *kvm) in kvm_mmu_zap_all()
7195 void kvm_arch_flush_shadow_all(struct kvm *kvm) in kvm_arch_flush_shadow_all()
7200 static void kvm_mmu_zap_memslot_pages_and_flush(struct kvm *kvm, in kvm_mmu_zap_memslot_pages_and_flush()
7234 static void kvm_mmu_zap_memslot(struct kvm *kvm, in kvm_mmu_zap_memslot()
7251 static inline bool kvm_memslot_flush_zap_all(struct kvm *kvm) in kvm_memslot_flush_zap_all()
7257 void kvm_arch_flush_shadow_memslot(struct kvm *kvm, in kvm_arch_flush_shadow_memslot()
7266 void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen) in kvm_mmu_invalidate_mmio_sptes()
7297 static void kvm_wake_nx_recovery_thread(struct kvm *kvm) in kvm_wake_nx_recovery_thread()
7361 struct kvm *kvm; in set_nx_huge_pages() local
7500 struct kvm *kvm; in set_nx_huge_pages_recovery_param() local
7513 static void kvm_recover_nx_huge_pages(struct kvm *kvm) in kvm_recover_nx_huge_pages()
7614 struct kvm *kvm = data; in kvm_nx_huge_page_recovery_worker() local
7640 struct kvm *kvm = container_of(ka, struct kvm, arch); in kvm_mmu_start_lpage_recovery() local
7658 int kvm_mmu_post_init_vm(struct kvm *kvm) in kvm_mmu_post_init_vm()
7666 void kvm_mmu_pre_destroy_vm(struct kvm *kvm) in kvm_mmu_pre_destroy_vm()
7691 bool kvm_arch_pre_set_memory_attributes(struct kvm *kvm, in kvm_arch_pre_set_memory_attributes()
7754 static bool hugepage_has_attrs(struct kvm *kvm, struct kvm_memory_slot *slot, in hugepage_has_attrs()
7771 bool kvm_arch_post_set_memory_attributes(struct kvm *kvm, in kvm_arch_post_set_memory_attributes()
7838 void kvm_mmu_init_memslot_memory_attributes(struct kvm *kvm, in kvm_mmu_init_memslot_memory_attributes()