Lines Matching +refs:get +refs:addr +refs:attrs
79 .get = get_nx_huge_pages,
84 .get = param_get_uint,
156 u64 addr; member
2368 u64 addr) in shadow_walk_init_using_root() argument
2370 iterator->addr = addr; in shadow_walk_init_using_root()
2387 = vcpu->arch.mmu->pae_root[(addr >> 30) & 3]; in shadow_walk_init_using_root()
2396 struct kvm_vcpu *vcpu, u64 addr) in shadow_walk_init() argument
2399 addr); in shadow_walk_init()
2407 iterator->index = SPTE_INDEX(iterator->addr, iterator->level); in shadow_walk_okay()
3247 for_each_shadow_entry(vcpu, fault->addr, it) { in direct_map()
3315 gva_t gva = fault->is_tdp ? 0 : fault->addr; in kvm_handle_noslot_fault()
3461 sptep = kvm_tdp_mmu_fast_pf_get_last_sptep(vcpu, fault->addr, &spte); in fast_page_fault()
3463 sptep = fast_pf_get_last_sptep(vcpu, fault->addr, &spte); in fast_page_fault()
4084 static bool mmio_info_in_cache(struct kvm_vcpu *vcpu, u64 addr, bool direct) in mmio_info_in_cache() argument
4094 return vcpu_match_mmio_gpa(vcpu, addr); in mmio_info_in_cache()
4096 return vcpu_match_mmio_gva(vcpu, addr); in mmio_info_in_cache()
4105 static int get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes, int *root_level) in get_walk() argument
4111 for (shadow_walk_init(&iterator, vcpu, addr), in get_walk()
4125 static bool get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep) in get_mmio_spte() argument
4135 leaf = kvm_tdp_mmu_get_walk(vcpu, addr, sptes, &root); in get_mmio_spte()
4137 leaf = get_walk(vcpu, addr, sptes, &root); in get_mmio_spte()
4164 __func__, addr); in get_mmio_spte()
4174 static int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct) in handle_mmio_page_fault() argument
4179 if (mmio_info_in_cache(vcpu, addr, direct)) in handle_mmio_page_fault()
4182 reserved = get_mmio_spte(vcpu, addr, &spte); in handle_mmio_page_fault()
4194 addr = 0; in handle_mmio_page_fault()
4196 trace_handle_mmio_page_fault(addr, gfn, access); in handle_mmio_page_fault()
4197 vcpu_cache_mmio_info(vcpu, addr, gfn, access); in handle_mmio_page_fault()
4227 static void shadow_page_table_clear_flood(struct kvm_vcpu *vcpu, gva_t addr) in shadow_page_table_clear_flood() argument
4233 for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) in shadow_page_table_clear_flood()
4379 trace_kvm_try_async_get_page(fault->addr, fault->gfn); in __kvm_faultin_pfn()
4381 trace_kvm_async_pf_repeated_fault(fault->addr, fault->gfn); in __kvm_faultin_pfn()
4384 } else if (kvm_arch_setup_async_pf(vcpu, fault->addr, fault->gfn)) { in __kvm_faultin_pfn()
5903 u64 addr, hpa_t root_hpa) in __kvm_mmu_invalidate_addr() argument
5907 vcpu_clear_mmio_info(vcpu, addr); in __kvm_mmu_invalidate_addr()
5921 for_each_shadow_entry_using_root(vcpu, root_hpa, addr, iterator) { in __kvm_mmu_invalidate_addr()
5940 u64 addr, unsigned long roots) in kvm_mmu_invalidate_addr() argument
5949 if (is_noncanonical_address(addr, vcpu)) in kvm_mmu_invalidate_addr()
5952 static_call(kvm_x86_flush_tlb_gva)(vcpu, addr); in kvm_mmu_invalidate_addr()
5959 __kvm_mmu_invalidate_addr(vcpu, mmu, addr, mmu->root.hpa); in kvm_mmu_invalidate_addr()
5963 __kvm_mmu_invalidate_addr(vcpu, mmu, addr, mmu->prev_roots[i].hpa); in kvm_mmu_invalidate_addr()
7341 gfn_t gfn, int level, unsigned long attrs) in hugepage_has_attrs() argument
7347 return kvm_range_has_memory_attributes(kvm, start, end, attrs); in hugepage_has_attrs()
7351 attrs != kvm_get_memory_attributes(kvm, gfn)) in hugepage_has_attrs()
7360 unsigned long attrs = range->arg.attributes; in kvm_arch_post_set_memory_attributes() local
7392 if (hugepage_has_attrs(kvm, slot, gfn, level, attrs)) in kvm_arch_post_set_memory_attributes()
7414 if (hugepage_has_attrs(kvm, slot, gfn, level, attrs)) in kvm_arch_post_set_memory_attributes()
7450 unsigned long attrs = kvm_get_memory_attributes(kvm, gfn); in kvm_mmu_init_memslot_memory_attributes() local
7452 if (hugepage_has_attrs(kvm, slot, gfn, level, attrs)) in kvm_mmu_init_memslot_memory_attributes()