Lines Matching +refs:get +refs:addr +refs:attrs
79 .get = get_nx_huge_pages,
84 .get = param_get_uint,
156 u64 addr; member
2446 u64 addr) in shadow_walk_init_using_root() argument
2448 iterator->addr = addr; in shadow_walk_init_using_root()
2465 = vcpu->arch.mmu->pae_root[(addr >> 30) & 3]; in shadow_walk_init_using_root()
2474 struct kvm_vcpu *vcpu, u64 addr) in shadow_walk_init() argument
2477 addr); in shadow_walk_init()
2485 iterator->index = SPTE_INDEX(iterator->addr, iterator->level); in shadow_walk_okay()
3359 for_each_shadow_entry(vcpu, fault->addr, it) { in direct_map()
3427 gva_t gva = fault->is_tdp ? 0 : fault->addr; in kvm_handle_noslot_fault()
3592 sptep = fast_pf_get_last_sptep(vcpu, fault->addr, &spte); in fast_page_fault()
4232 static bool mmio_info_in_cache(struct kvm_vcpu *vcpu, u64 addr, bool direct) in mmio_info_in_cache() argument
4242 return vcpu_match_mmio_gpa(vcpu, addr); in mmio_info_in_cache()
4244 return vcpu_match_mmio_gva(vcpu, addr); in mmio_info_in_cache()
4253 static int get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes, int *root_level) in get_walk() argument
4259 for (shadow_walk_init(&iterator, vcpu, addr), in get_walk()
4272 static int get_sptes_lockless(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes, in get_sptes_lockless() argument
4280 leaf = kvm_tdp_mmu_get_walk(vcpu, addr, sptes, root_level); in get_sptes_lockless()
4282 leaf = get_walk(vcpu, addr, sptes, root_level); in get_sptes_lockless()
4289 static bool get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep) in get_mmio_spte() argument
4296 leaf = get_sptes_lockless(vcpu, addr, sptes, &root); in get_mmio_spte()
4320 __func__, addr); in get_mmio_spte()
4330 static int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct) in handle_mmio_page_fault() argument
4335 if (mmio_info_in_cache(vcpu, addr, direct)) in handle_mmio_page_fault()
4338 reserved = get_mmio_spte(vcpu, addr, &spte); in handle_mmio_page_fault()
4350 addr = 0; in handle_mmio_page_fault()
4352 trace_handle_mmio_page_fault(addr, gfn, access); in handle_mmio_page_fault()
4353 vcpu_cache_mmio_info(vcpu, addr, gfn, access); in handle_mmio_page_fault()
4383 static void shadow_page_table_clear_flood(struct kvm_vcpu *vcpu, gva_t addr) in shadow_page_table_clear_flood() argument
4389 for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) in shadow_page_table_clear_flood()
4416 return kvm_setup_async_pf(vcpu, fault->addr, in kvm_arch_setup_async_pf()
4540 trace_kvm_try_async_get_page(fault->addr, fault->gfn); in __kvm_mmu_faultin_pfn()
4542 trace_kvm_async_pf_repeated_fault(fault->addr, fault->gfn); in __kvm_mmu_faultin_pfn()
6322 u64 addr, hpa_t root_hpa) in __kvm_mmu_invalidate_addr() argument
6326 vcpu_clear_mmio_info(vcpu, addr); in __kvm_mmu_invalidate_addr()
6340 for_each_shadow_entry_using_root(vcpu, root_hpa, addr, iterator) { in __kvm_mmu_invalidate_addr()
6359 u64 addr, unsigned long roots) in kvm_mmu_invalidate_addr() argument
6368 if (is_noncanonical_invlpg_address(addr, vcpu)) in kvm_mmu_invalidate_addr()
6371 kvm_x86_call(flush_tlb_gva)(vcpu, addr); in kvm_mmu_invalidate_addr()
6378 __kvm_mmu_invalidate_addr(vcpu, mmu, addr, mmu->root.hpa); in kvm_mmu_invalidate_addr()
6382 __kvm_mmu_invalidate_addr(vcpu, mmu, addr, mmu->prev_roots[i].hpa); in kvm_mmu_invalidate_addr()
7755 gfn_t gfn, int level, unsigned long attrs) in hugepage_has_attrs() argument
7761 return kvm_range_has_memory_attributes(kvm, start, end, ~0, attrs); in hugepage_has_attrs()
7765 attrs != kvm_get_memory_attributes(kvm, gfn)) in hugepage_has_attrs()
7774 unsigned long attrs = range->arg.attributes; in kvm_arch_post_set_memory_attributes() local
7807 if (hugepage_has_attrs(kvm, slot, gfn, level, attrs)) in kvm_arch_post_set_memory_attributes()
7829 if (hugepage_has_attrs(kvm, slot, gfn, level, attrs)) in kvm_arch_post_set_memory_attributes()
7865 unsigned long attrs = kvm_get_memory_attributes(kvm, gfn); in kvm_mmu_init_memslot_memory_attributes() local
7867 if (hugepage_has_attrs(kvm, slot, gfn, level, attrs)) in kvm_mmu_init_memslot_memory_attributes()