Lines Matching defs:sp
334 struct kvm_mmu_page *sp = page_header(__pa(sptep)); in count_spte_clear() local
407 struct kvm_mmu_page *sp = page_header(__pa(sptep)); in __get_spte_lockless() local
665 static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index) in kvm_mmu_page_get_gfn()
673 static void kvm_mmu_page_set_gfn(struct kvm_mmu_page *sp, int index, gfn_t gfn) in kvm_mmu_page_set_gfn()
982 struct kvm_mmu_page *sp; in rmap_add() local
998 struct kvm_mmu_page *sp; in rmap_remove() local
1228 struct kvm_mmu_page *sp; in rmap_recycle() local
1282 static void kvm_mmu_isolate_page(struct kvm_mmu_page *sp) in kvm_mmu_isolate_page()
1294 static void kvm_mmu_free_page(struct kvm_mmu_page *sp) in kvm_mmu_free_page()
1307 struct kvm_mmu_page *sp, u64 *parent_pte) in mmu_page_add_parent_pte()
1315 static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp, in mmu_page_remove_parent_pte()
1321 static void drop_parent_pte(struct kvm_mmu_page *sp, in drop_parent_pte()
1331 struct kvm_mmu_page *sp; in kvm_mmu_alloc_page() local
1348 static void kvm_mmu_mark_parents_unsync(struct kvm_mmu_page *sp) in kvm_mmu_mark_parents_unsync()
1355 struct kvm_mmu_page *sp; in mark_unsync() local
1368 struct kvm_mmu_page *sp) in nonpaging_sync_page()
1378 struct kvm_mmu_page *sp, u64 *spte, in nonpaging_update_pte()
1388 struct kvm_mmu_page *sp; member
1399 static int mmu_pages_add(struct kvm_mmu_pages *pvec, struct kvm_mmu_page *sp, in mmu_pages_add()
1415 static int __mmu_unsync_walk(struct kvm_mmu_page *sp, in __mmu_unsync_walk()
1459 static int mmu_unsync_walk(struct kvm_mmu_page *sp, in mmu_unsync_walk()
1469 static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp) in kvm_unlink_unsync_page()
1482 #define for_each_gfn_sp(kvm, sp, gfn, pos) \ argument
1487 #define for_each_gfn_indirect_valid_sp(kvm, sp, gfn, pos) \ argument
1494 static int __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, in __kvm_sync_page()
1515 struct kvm_mmu_page *sp) in kvm_sync_page_transient()
1534 static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, in kvm_sync_page()
1572 #define for_each_sp(pvec, sp, parents, i) \ argument
1585 struct kvm_mmu_page *sp = pvec->page[n].sp; in mmu_pages_next() local
1601 struct kvm_mmu_page *sp; in mmu_pages_clear_parents() local
1630 struct kvm_mmu_page *sp; in mmu_sync_children() local
1655 static void init_shadow_page_table(struct kvm_mmu_page *sp) in init_shadow_page_table()
1663 static void __clear_sp_write_flooding_count(struct kvm_mmu_page *sp) in __clear_sp_write_flooding_count()
1670 struct kvm_mmu_page *sp = page_header(__pa(spte)); in clear_sp_write_flooding_count() local
1685 struct kvm_mmu_page *sp; in kvm_mmu_get_page() local
1792 static void link_shadow_page(u64 *sptep, struct kvm_mmu_page *sp) in link_shadow_page()
1832 static bool mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp, in mmu_page_zap_pte()
1858 struct kvm_mmu_page *sp) in kvm_mmu_page_unlink_children()
1866 static void kvm_mmu_put_page(struct kvm_mmu_page *sp, u64 *parent_pte) in kvm_mmu_put_page()
1871 static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp) in kvm_mmu_unlink_parents()
1892 struct kvm_mmu_page *sp; in mmu_zap_unsync_children() local
1905 static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp, in kvm_mmu_prepare_zap_page()
1935 struct kvm_mmu_page *sp; in kvm_mmu_isolate_pages() local
1943 struct kvm_mmu_page *next, *sp; in free_pages_rcu() local
1960 struct kvm_mmu_page *sp; in kvm_mmu_commit_zap_page() local
2017 struct kvm_mmu_page *sp; in kvm_mmu_unprotect_page() local
2041 struct kvm_mmu_page *sp = page_header(__pa(pte)); in page_header_update_slot() local
2151 static void __kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) in __kvm_unsync_page()
2382 struct kvm_mmu_page *sp, in direct_pte_prefetch_many()
2408 struct kvm_mmu_page *sp, u64 *sptep) in __direct_pte_prefetch()
2432 struct kvm_mmu_page *sp; in direct_pte_prefetch() local
2455 struct kvm_mmu_page *sp; in __direct_map() local
2645 struct kvm_mmu_page *sp; in mmu_free_roots() local
2698 struct kvm_mmu_page *sp; in mmu_alloc_direct_roots() local
2734 struct kvm_mmu_page *sp; in mmu_alloc_shadow_roots() local
2839 struct kvm_mmu_page *sp; in mmu_sync_roots() local
3481 struct kvm_mmu_page *sp, u64 *spte, in mmu_pte_write_new_pte()
3558 static bool detect_write_flooding(struct kvm_mmu_page *sp, u64 *spte) in detect_write_flooding()
3574 static bool detect_write_misaligned(struct kvm_mmu_page *sp, gpa_t gpa, in detect_write_misaligned()
3598 static u64 *get_written_sptes(struct kvm_mmu_page *sp, gpa_t gpa, int *nspte) in get_written_sptes()
3634 struct kvm_mmu_page *sp; in kvm_mmu_pte_write() local
3722 struct kvm_mmu_page *sp; in __kvm_mmu_free_some_pages() local
3848 struct kvm_mmu_page *sp; in kvm_mmu_slot_remove_write_access() local
3880 struct kvm_mmu_page *sp, *node; in kvm_mmu_zap_all() local