Lines Matching defs:sp

285 	struct kvm_mmu_page *sp = sptep_to_sp(sptep);  in kvm_flush_remote_tlbs_sptep()  local
369 struct kvm_mmu_page *sp = sptep_to_sp(sptep); in count_spte_clear() local
453 struct kvm_mmu_page *sp = sptep_to_sp(sptep); in __get_spte_lockless() local
638 static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index) in kvm_mmu_page_get_gfn()
655 static u32 kvm_mmu_page_get_access(struct kvm_mmu_page *sp, int index) in kvm_mmu_page_get_access()
675 static void kvm_mmu_page_set_translation(struct kvm_mmu_page *sp, int index, in kvm_mmu_page_set_translation()
694 static void kvm_mmu_page_set_access(struct kvm_mmu_page *sp, int index, in kvm_mmu_page_set_access()
748 static void account_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp) in account_shadowed()
778 void track_possible_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp) in track_possible_nx_huge_page()
796 static void account_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp, in account_nx_huge_page()
805 static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp) in unaccount_shadowed()
821 void untrack_possible_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp) in untrack_possible_nx_huge_page()
830 static void unaccount_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp) in unaccount_nx_huge_page()
1183 struct kvm_mmu_page *sp; in rmap_remove() local
1287 struct kvm_mmu_page *sp; in drop_large_spte() local
1679 struct kvm_mmu_page *sp; in __rmap_add() local
1788 static void kvm_mmu_check_sptes_at_free(struct kvm_mmu_page *sp) in kvm_mmu_check_sptes_at_free()
1802 static void kvm_account_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp) in kvm_account_mmu_page()
1808 static void kvm_unaccount_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp) in kvm_unaccount_mmu_page()
1814 static void kvm_mmu_free_shadow_page(struct kvm_mmu_page *sp) in kvm_mmu_free_shadow_page()
1832 struct kvm_mmu_page *sp, u64 *parent_pte) in mmu_page_add_parent_pte()
1840 static void mmu_page_remove_parent_pte(struct kvm *kvm, struct kvm_mmu_page *sp, in mmu_page_remove_parent_pte()
1846 static void drop_parent_pte(struct kvm *kvm, struct kvm_mmu_page *sp, in drop_parent_pte()
1854 static void kvm_mmu_mark_parents_unsync(struct kvm_mmu_page *sp) in kvm_mmu_mark_parents_unsync()
1866 struct kvm_mmu_page *sp; in mark_unsync() local
1880 struct kvm_mmu_page *sp; member
1886 static int mmu_pages_add(struct kvm_mmu_pages *pvec, struct kvm_mmu_page *sp, in mmu_pages_add()
1902 static inline void clear_unsync_child_bit(struct kvm_mmu_page *sp, int idx) in clear_unsync_child_bit()
1909 static int __mmu_unsync_walk(struct kvm_mmu_page *sp, in __mmu_unsync_walk()
1950 static int mmu_unsync_walk(struct kvm_mmu_page *sp, in mmu_unsync_walk()
1961 static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp) in kvm_unlink_unsync_page()
1974 static bool sp_has_gptes(struct kvm_mmu_page *sp) in sp_has_gptes()
1995 static bool kvm_sync_page_check(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) in kvm_sync_page_check()
2028 static int kvm_sync_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, int i) in kvm_sync_spte()
2037 static int __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) in __kvm_sync_page()
2065 static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, in kvm_sync_page()
2089 static bool is_obsolete_sp(struct kvm *kvm, struct kvm_mmu_page *sp) in is_obsolete_sp()
2104 #define for_each_sp(pvec, sp, parents, i) \ argument
2116 struct kvm_mmu_page *sp = pvec->page[n].sp; in mmu_pages_next() local
2133 struct kvm_mmu_page *sp; in mmu_pages_first() local
2156 struct kvm_mmu_page *sp; in mmu_pages_clear_parents() local
2175 struct kvm_mmu_page *sp; in mmu_sync_children() local
2213 static void __clear_sp_write_flooding_count(struct kvm_mmu_page *sp) in __clear_sp_write_flooding_count()
2235 struct kvm_mmu_page *sp; in kvm_mmu_find_shadow_page() local
2320 struct kvm_mmu_page *sp; in kvm_mmu_alloc_shadow_page() local
2357 struct kvm_mmu_page *sp; in __kvm_mmu_get_shadow_page() local
2509 struct kvm_mmu_page *sp, bool flush) in __link_shadow_page()
2543 struct kvm_mmu_page *sp) in link_shadow_page()
2571 static int mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp, in mmu_page_zap_pte()
2603 struct kvm_mmu_page *sp, in kvm_mmu_page_unlink_children()
2615 static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp) in kvm_mmu_unlink_parents()
2636 struct kvm_mmu_page *sp; in mmu_zap_unsync_children() local
2649 struct kvm_mmu_page *sp, in __kvm_mmu_prepare_zap_page()
2713 static bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp, in kvm_mmu_prepare_zap_page()
2725 struct kvm_mmu_page *sp, *nsp; in kvm_mmu_commit_zap_page() local
2751 struct kvm_mmu_page *sp, *tmp; in kvm_mmu_zap_oldest_mmu_pages() local
2841 struct kvm_mmu_page *sp; in __kvm_mmu_unprotect_gfn_and_retry() local
2882 static void kvm_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp) in kvm_unsync_page()
2900 struct kvm_mmu_page *sp; in mmu_try_to_unsync_pages() local
3002 struct kvm_mmu_page *sp = sptep_to_sp(sptep); in mmu_set_spte() local
3108 struct kvm_mmu_page *sp, in direct_pte_prefetch_many()
3118 struct kvm_mmu_page *sp, u64 *sptep) in __direct_pte_prefetch()
3145 struct kvm_mmu_page *sp; in direct_pte_prefetch() local
3352 struct kvm_mmu_page *sp; in direct_map() local
3575 struct kvm_mmu_page *sp; in fast_page_fault() local
3699 struct kvm_mmu_page *sp; in mmu_free_root_page() local
3789 struct kvm_mmu_page *sp; in kvm_mmu_free_guest_mode_roots() local
3817 struct kvm_mmu_page *sp; in mmu_alloc_root() local
4136 struct kvm_mmu_page *sp; in is_unsync_root() local
4172 struct kvm_mmu_page *sp; in kvm_mmu_sync_roots() local
4683 struct kvm_mmu_page *sp = root_to_sp(vcpu->arch.mmu->root.hpa); in is_page_fault_stale() local
4950 struct kvm_mmu_page *sp; in is_root_usable() local
5082 struct kvm_mmu_page *sp = root_to_sp(vcpu->arch.mmu->root.hpa); in kvm_mmu_new_pgd() local
5930 struct kvm_mmu_page *sp; in is_obsolete_root() local
6009 static bool detect_write_flooding(struct kvm_mmu_page *sp) in detect_write_flooding()
6026 static bool detect_write_misaligned(struct kvm_mmu_page *sp, gpa_t gpa, in detect_write_misaligned()
6047 static u64 *get_written_sptes(struct kvm_mmu_page *sp, gpa_t gpa, int *nspte) in get_written_sptes()
6082 struct kvm_mmu_page *sp; in kvm_mmu_track_write() local
6341 struct kvm_mmu_page *sp = sptep_to_sp(iterator.sptep); in __kvm_mmu_invalidate_addr() local
6555 struct kvm_mmu_page *sp, *node; in kvm_zap_obsolete_pages() local
6883 struct kvm_mmu_page *sp; in shadow_mmu_split_huge_page() local
6966 struct kvm_mmu_page *sp; in shadow_mmu_try_split_huge_pages() local
7083 struct kvm_mmu_page *sp; in kvm_mmu_zap_collapsible_spte() local
7172 struct kvm_mmu_page *sp, *node; in kvm_mmu_zap_all() local
7217 struct kvm_mmu_page *sp; in kvm_mmu_zap_memslot_pages_and_flush() local
7518 struct kvm_mmu_page *sp; in kvm_recover_nx_huge_pages() local