Lines Matching defs:sptep
158 u64 *sptep; member
283 static void kvm_flush_remote_tlbs_sptep(struct kvm *kvm, u64 *sptep) in kvm_flush_remote_tlbs_sptep()
291 static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn, in mark_mmio_spte()
336 static void __set_spte(u64 *sptep, u64 spte) in __set_spte()
342 static void __update_clear_spte_fast(u64 *sptep, u64 spte) in __update_clear_spte_fast()
348 static u64 __update_clear_spte_slow(u64 *sptep, u64 spte) in __update_clear_spte_slow()
354 static u64 __get_spte_lockless(u64 *sptep) in __get_spte_lockless()
367 static void count_spte_clear(u64 *sptep, u64 spte) in count_spte_clear()
379 static void __set_spte(u64 *sptep, u64 spte) in __set_spte()
398 static void __update_clear_spte_fast(u64 *sptep, u64 spte) in __update_clear_spte_fast()
417 static u64 __update_clear_spte_slow(u64 *sptep, u64 spte) in __update_clear_spte_slow()
451 static u64 __get_spte_lockless(u64 *sptep) in __get_spte_lockless()
481 static void mmu_spte_set(u64 *sptep, u64 new_spte) in mmu_spte_set()
492 static bool mmu_spte_update(u64 *sptep, u64 new_spte) in mmu_spte_update()
521 static u64 mmu_spte_clear_track_bits(struct kvm *kvm, u64 *sptep) in mmu_spte_clear_track_bits()
544 static void mmu_spte_clear_no_track(u64 *sptep) in mmu_spte_clear_no_track()
549 static u64 mmu_spte_get_lockless(u64 *sptep) in mmu_spte_get_lockless()
1119 struct kvm_rmap_head *rmap_head, u64 *sptep) in kvm_zap_one_rmap_spte()
1277 static void drop_spte(struct kvm *kvm, u64 *sptep) in drop_spte()
1285 static void drop_large_spte(struct kvm *kvm, u64 *sptep, bool flush) in drop_large_spte()
1311 static bool spte_write_protect(u64 *sptep, bool pt_protect) in spte_write_protect()
1329 u64 *sptep; in rmap_write_protect() local
1339 static bool spte_clear_dirty(u64 *sptep) in spte_clear_dirty()
1357 u64 *sptep; in __rmap_clear_dirty() local
1714 u64 *sptep; in kvm_rmap_age_gfn_range() local
1856 u64 *sptep; in kvm_mmu_mark_parents_unsync() local
2385 static union kvm_mmu_page_role kvm_mmu_child_role(u64 *sptep, bool direct, in kvm_mmu_child_role()
2432 u64 *sptep, gfn_t gfn, in kvm_mmu_get_child_sp()
2508 struct kvm_mmu_memory_cache *cache, u64 *sptep, in __link_shadow_page()
2542 static void link_shadow_page(struct kvm_vcpu *vcpu, u64 *sptep, in link_shadow_page()
2548 static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep, in validate_direct_spte()
2617 u64 *sptep; in kvm_mmu_unlink_parents() local
2999 u64 *sptep, unsigned int pte_access, gfn_t gfn, in mmu_set_spte()
3070 static bool kvm_mmu_prefetch_sptes(struct kvm_vcpu *vcpu, gfn_t gfn, u64 *sptep, in kvm_mmu_prefetch_sptes()
3118 struct kvm_mmu_page *sp, u64 *sptep) in __direct_pte_prefetch()
3143 static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep) in direct_pte_prefetch()
3524 u64 *sptep, u64 old_spte, u64 new_spte) in fast_pf_fix_direct_spte()
3560 u64 *sptep = NULL; in fast_pf_get_last_sptep() local
3578 u64 *sptep; in fast_page_fault() local
4289 static bool get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep) in get_mmio_spte()
5090 static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn, in sync_mmio_spte()
6885 u64 *sptep, spte; in shadow_mmu_split_huge_page() local
7080 u64 *sptep; in kvm_mmu_zap_collapsible_spte() local