Lines Matching defs:gfn

197 static void mark_mmio_spte(u64 *sptep, u64 gfn, unsigned access)  in mark_mmio_spte()
220 static bool set_mmio_spte(u64 *sptep, gfn_t gfn, pfn_t pfn, unsigned access) in set_mmio_spte()
673 static void kvm_mmu_page_set_gfn(struct kvm_mmu_page *sp, int index, gfn_t gfn) in kvm_mmu_page_set_gfn()
685 static struct kvm_lpage_info *lpage_info_slot(gfn_t gfn, in lpage_info_slot()
696 static void account_shadowed(struct kvm *kvm, gfn_t gfn) in account_shadowed()
711 static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn) in unaccount_shadowed()
728 gfn_t gfn, in has_wrprotected_page()
743 static int host_mapping_level(struct kvm *kvm, gfn_t gfn) in host_mapping_level()
762 gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t gfn, in gfn_to_memslot_dirty_bitmap()
949 static unsigned long *__gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int level, in __gfn_to_rmap()
964 static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int level) in gfn_to_rmap()
980 static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) in rmap_add()
999 gfn_t gfn; in rmap_remove() local
1014 int kvm_mmu_rmap_write_protect(struct kvm *kvm, u64 gfn, in kvm_mmu_rmap_write_protect()
1055 static int rmap_write_protect(struct kvm *kvm, u64 gfn) in rmap_write_protect()
1134 gfn_t gfn = memslot->base_gfn + gfn_offset; in kvm_handle_hva() local
1225 static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) in rmap_recycle()
1301 static unsigned kvm_page_table_hashfn(gfn_t gfn) in kvm_page_table_hashfn()
1482 #define for_each_gfn_sp(kvm, sp, gfn, pos) \ argument
1487 #define for_each_gfn_indirect_valid_sp(kvm, sp, gfn, pos) \ argument
1541 static void kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn) in kvm_sync_pages()
1676 gfn_t gfn, in kvm_mmu_get_page()
2015 int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn) in kvm_mmu_unprotect_page()
2038 static void page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn) in page_header_update_slot()
2139 u8 kvm_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn) in kvm_get_guest_memory_type()
2160 static void kvm_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn) in kvm_unsync_pages()
2173 static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn, in mmu_need_write_protect()
2199 gfn_t gfn, pfn_t pfn, bool speculative, in set_spte()
2296 int *emulate, int level, gfn_t gfn, in mmu_set_spte()
2364 static pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, in pte_prefetch_gfn_to_pfn()
2388 gfn_t gfn; in direct_pte_prefetch_many() local
2451 int map_writable, int level, gfn_t gfn, pfn_t pfn, in __direct_map()
2508 static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, pfn_t pfn) in kvm_handle_bad_page()
2523 gfn_t gfn = *gfnp; in transparent_hugepage_adjust() local
2566 static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn, in handle_abnormal_pfn()
2588 static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn, in nonpaging_map()
2944 gfn_t gfn = get_mmio_spte_gfn(spte); in handle_mmio_page_fault_common() local
2983 gfn_t gfn; in nonpaging_page_fault() local
3004 static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn) in kvm_arch_setup_async_pf()
3025 static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn, in try_async_pf()
3059 gfn_t gfn = gpa >> PAGE_SHIFT; in tdp_page_fault() local
3167 static bool sync_mmio_spte(u64 *sptep, gfn_t gfn, unsigned access, in sync_mmio_spte()
3632 gfn_t gfn = gpa >> PAGE_SHIFT; in kvm_mmu_pte_write() local