Lines Matching +full:page +full:- +full:level

1 /* SPDX-License-Identifier: GPL-2.0 */
15 /* Page table builder macros common to shadow (host) PTEs and guest PTEs. */
17 #define __PT_LEVEL_SHIFT(level, bits_per_level) \ argument
18 (PAGE_SHIFT + ((level) - 1) * (bits_per_level))
19 #define __PT_INDEX(address, level, bits_per_level) \ argument
20 (((address) >> __PT_LEVEL_SHIFT(level, bits_per_level)) & ((1 << (bits_per_level)) - 1))
22 #define __PT_LVL_ADDR_MASK(base_addr_mask, level, bits_per_level) \ argument
23 ((base_addr_mask) & ~((1ULL << (PAGE_SHIFT + (((level) - 1) * (bits_per_level)))) - 1))
25 #define __PT_LVL_OFFSET_MASK(base_addr_mask, level, bits_per_level) \ argument
26 ((base_addr_mask) & ((1ULL << (PAGE_SHIFT + (((level) - 1) * (bits_per_level)))) - 1))
32 * bit, and thus are guaranteed to be non-zero when valid. And, when a guest
55 * 64-bit kernels, keep it that way unless there's a reason not to.
70 * The shadow page can't be replaced by an equivalent huge page
71 * because it is being used to map an executable page in the guest
72 * and the NX huge page mitigation is enabled.
77 * The following two entries are used to key the shadow page in the
87 * SPTE. KVM shadows two types of guest translations: nGPA -> GPA
88 * (shadow EPT/NPT) and GVA -> GPA (traditional shadow paging). In both
113 * huge page. A shadow page will have nx_huge_page_disallowed set but
114 * not be on the list if a huge page is disallowed for other reasons,
121 * Used out of the mmu-lock to avoid reading spte values while an
127 /* Number of writes since the last time traversal visited this page. */
131 /* Used for freeing the page asynchronously if it is a TDP MMU page. */
145 return kvm_mmu_role_as_id(sp->role); in kvm_mmu_page_as_id()
151 * When using the EPT page-modification log, the GPAs in the CPU dirty in kvm_mmu_page_ad_need_write_protect()
155 * being enabled is mandatory as the bits used to denote WP-only SPTEs in kvm_mmu_page_ad_need_write_protect()
156 * are reserved for PAE paging (32-bit KVM). in kvm_mmu_page_ad_need_write_protect()
158 return kvm_x86_ops.cpu_dirty_log_size && sp->role.guest_mode; in kvm_mmu_page_ad_need_write_protect()
161 static inline gfn_t gfn_round_for_level(gfn_t gfn, int level) in gfn_round_for_level() argument
163 return gfn & -KVM_PAGES_PER_HPAGE(level); in gfn_round_for_level()
175 /* Flush the given page (huge or not) of guest memory. */
176 static inline void kvm_flush_remote_tlbs_gfn(struct kvm *kvm, gfn_t gfn, int level) in kvm_flush_remote_tlbs_gfn() argument
178 kvm_flush_remote_tlbs_range(kvm, gfn_round_for_level(gfn, level), in kvm_flush_remote_tlbs_gfn()
179 KVM_PAGES_PER_HPAGE(level)); in kvm_flush_remote_tlbs_gfn()
187 return READ_ONCE(nx_huge_pages) && !kvm->arch.disable_nx_huge_pages; in is_nx_huge_page_enabled()
215 * Maximum page size that can be created for this fault; input to
221 * Page size that can be created based on the max_level and the
222 * page size used by the host mapping.
227 * Page size that will be created based on the req_level and
232 /* Shifted addr, or result of guest page table walk if addr is a gva. */
247 * is changing its own translation in the guest page tables.
258 * RET_PF_CONTINUE: So far, so good, keep handling the page fault.
260 * RET_PF_EMULATE: mmio page fault, emulate the instruction directly.
261 * RET_PF_INVALID: the spte is invalid, let the real page fault path update it.
269 * on -errno return values. Somewhat arbitrarily use '0' for CONTINUE, which
271 * "TEST %rax, %rax, JNZ", as all "stop!" values are non-zero.
294 .is_tdp = likely(vcpu->arch.mmu->page_fault == kvm_tdp_page_fault), in kvm_mmu_do_page_fault()
296 is_nx_huge_page_enabled(vcpu->kvm), in kvm_mmu_do_page_fault()
301 .is_private = kvm_mem_is_private(vcpu->kvm, cr2_or_gpa >> PAGE_SHIFT), in kvm_mmu_do_page_fault()
305 if (vcpu->arch.mmu->root_role.direct) { in kvm_mmu_do_page_fault()
316 vcpu->stat.pf_taken++; in kvm_mmu_do_page_fault()
321 r = vcpu->arch.mmu->page_fault(vcpu, &fault); in kvm_mmu_do_page_fault()
332 vcpu->stat.pf_fixed++; in kvm_mmu_do_page_fault()
336 vcpu->stat.pf_emulate++; in kvm_mmu_do_page_fault()
338 vcpu->stat.pf_spurious++; in kvm_mmu_do_page_fault()