Lines Matching +full:supervisor +full:- +full:level

1 /* SPDX-License-Identifier: GPL-2.0 */
57 return ((2ULL << (e - s)) - 1) << s; in rsvd_bits()
61 * The number of non-reserved physical address bits irrespective of features
81 return (1ULL << (max_gpa_bits - PAGE_SHIFT)) - 1; in kvm_mmu_max_gfn()
129 if (likely(vcpu->arch.mmu->root.hpa != INVALID_PAGE)) in kvm_mmu_reload()
159 u64 root_hpa = vcpu->arch.mmu->root.hpa; in kvm_mmu_load_pgd()
165 vcpu->arch.mmu->root_role.level); in kvm_mmu_load_pgd()
174 * be stale. Refresh CR0.WP and the metadata on-demand when checking in kvm_mmu_refresh_passthrough_bits()
180 if (!tdp_enabled || mmu == &vcpu->arch.guest_mmu) in kvm_mmu_refresh_passthrough_bits()
203 * For explicit supervisor accesses, SMAP is disabled if EFLAGS.AC = 1. in permission_fault()
204 * For implicit supervisor accesses, SMAP cannot be overridden. in permission_fault()
206 * SMAP works on supervisor accesses only, and not_smap can in permission_fault()
222 fault = (mmu->permissions[index] >> pte_access) & 1; in permission_fault()
225 if (unlikely(mmu->pkru_mask)) { in permission_fault()
234 pkru_bits = (vcpu->arch.pkru >> (pte_pkey * 2)) & 3; in permission_fault()
238 ((pte_access & PT_USER_MASK) << (PFERR_RSVD_BIT - PT_USER_SHIFT)); in permission_fault()
240 pkru_bits &= mmu->pkru_mask >> offset; in permission_fault()
241 errcode |= -pkru_bits & PFERR_PK_MASK; in permission_fault()
245 return -(u32)fault & errcode; in permission_fault()
270 return smp_load_acquire(&kvm->arch.shadow_root_allocated); in kvm_shadow_root_allocated()
284 static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level) in gfn_to_index() argument
287 return (gfn >> KVM_HPAGE_GFN_SHIFT(level)) - in gfn_to_index()
288 (base_gfn >> KVM_HPAGE_GFN_SHIFT(level)); in gfn_to_index()
293 int level) in __kvm_mmu_slot_lpages() argument
295 return gfn_to_index(slot->base_gfn + npages - 1, in __kvm_mmu_slot_lpages()
296 slot->base_gfn, level) + 1; in __kvm_mmu_slot_lpages()
300 kvm_mmu_slot_lpages(struct kvm_memory_slot *slot, int level) in kvm_mmu_slot_lpages() argument
302 return __kvm_mmu_slot_lpages(slot, slot->npages, level); in kvm_mmu_slot_lpages()
305 static inline void kvm_update_page_stats(struct kvm *kvm, int level, int count) in kvm_update_page_stats() argument
307 atomic64_add(count, &kvm->stat.pages[level - 1]); in kvm_update_page_stats()
318 if (mmu != &vcpu->arch.nested_mmu) in kvm_translate_gpa()