Lines Matching full:fault

92 	struct x86_exception fault;  member
249 ret = __try_cmpxchg_user(ptep_user, &orig_pte, pte, fault); in FNAME()
352 * Queue a page fault for injection if this assertion fails, as callers in FNAME()
353 * assume that walker.fault contains sane info on a walk failure. I.e. in FNAME()
380 nested_access, &walker->fault); in FNAME()
384 * instruction) triggers a nested page fault. The exit in FNAME()
386 * "guest page access" as the nested page fault's cause, in FNAME()
448 real_gpa = kvm_translate_gpa(vcpu, mmu, gfn_to_gpa(gfn), access, &walker->fault); in FNAME()
458 * On a write fault, fold the dirty bit into accessed_dirty. in FNAME()
481 walker->fault.vector = PF_VECTOR; in FNAME()
482 walker->fault.error_code_valid = true; in FNAME()
483 walker->fault.error_code = errcode; in FNAME()
500 walker->fault.exit_qualification = 0; in FNAME()
503 walker->fault.exit_qualification |= EPT_VIOLATION_ACC_WRITE; in FNAME()
505 walker->fault.exit_qualification |= EPT_VIOLATION_ACC_READ; in FNAME()
507 walker->fault.exit_qualification |= EPT_VIOLATION_ACC_INSTR; in FNAME()
513 walker->fault.exit_qualification |= EPT_VIOLATION_RWX_TO_PROT(pte_access); in FNAME()
516 walker->fault.address = addr; in FNAME()
517 walker->fault.nested_page_fault = mmu != vcpu->arch.walk_mmu; in FNAME()
518 walker->fault.async_page_fault = false; in FNAME()
520 trace_kvm_mmu_walker_error(walker->fault.error_code); in FNAME()
614 static int FNAME(fetch)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault, in FNAME()
621 gfn_t base_gfn = fault->gfn; in FNAME()
644 * loading a dummy root and handling the resulting page fault, e.g. if in FNAME()
652 for_each_shadow_entry(vcpu, fault->addr, it) { in FNAME()
686 * write-protected or unsync, wasn't modified between the fault in FNAME()
700 if (fault->write && table_gfn == fault->gfn) in FNAME()
701 fault->write_fault_to_shadow_pgtable = true; in FNAME()
710 kvm_mmu_hugepage_adjust(vcpu, fault); in FNAME()
712 trace_kvm_mmu_spte_requested(fault); in FNAME()
719 if (fault->nx_huge_page_workaround_enabled) in FNAME()
720 disallowed_hugepage_adjust(fault, *it.sptep, it.level); in FNAME()
722 base_gfn = gfn_round_for_level(fault->gfn, it.level); in FNAME()
723 if (it.level == fault->goal_level) in FNAME()
734 if (fault->huge_page_disallowed) in FNAME()
736 fault->req_level >= it.level); in FNAME()
739 if (WARN_ON_ONCE(it.level != fault->goal_level)) in FNAME()
742 ret = mmu_set_spte(vcpu, fault->slot, it.sptep, gw->pte_access, in FNAME()
743 base_gfn, fault->pfn, fault); in FNAME()
752 * Page fault handler. There are several causes for a page fault:
759 * - normal guest page fault due to the guest pte marked not present, not
765 static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) in FNAME()
770 WARN_ON_ONCE(fault->is_tdp); in FNAME()
774 * If PFEC.RSVD is set, this is a shadow page fault. in FNAME()
777 r = FNAME(walk_addr)(&walker, vcpu, fault->addr, in FNAME()
778 fault->error_code & ~PFERR_RSVD_MASK); in FNAME()
784 if (!fault->prefetch) in FNAME()
785 kvm_inject_emulated_page_fault(vcpu, &walker.fault); in FNAME()
790 fault->gfn = walker.gfn; in FNAME()
791 fault->max_level = walker.level; in FNAME()
792 fault->slot = kvm_vcpu_gfn_to_memslot(vcpu, fault->gfn); in FNAME()
794 if (page_fault_handle_page_track(vcpu, fault)) { in FNAME()
795 shadow_page_table_clear_flood(vcpu, fault->addr); in FNAME()
803 r = kvm_mmu_faultin_pfn(vcpu, fault, walker.pte_access); in FNAME()
811 if (fault->write && !(walker.pte_access & ACC_WRITE_MASK) && in FNAME()
812 !is_cr0_wp(vcpu->arch.mmu) && !fault->user && fault->slot) { in FNAME()
829 if (is_page_fault_stale(vcpu, fault)) in FNAME()
835 r = FNAME(fetch)(vcpu, fault, &walker); in FNAME()
838 kvm_mmu_finish_page_fault(vcpu, fault, r); in FNAME()
875 *exception = walker.fault; in FNAME()