Lines Matching full:fault
92 struct x86_exception fault; member
249 ret = __try_cmpxchg_user(ptep_user, &orig_pte, pte, fault); in FNAME()
352 * Queue a page fault for injection if this assertion fails, as callers in FNAME()
353 * assume that walker.fault contains sane info on a walk failure. I.e. in FNAME()
380 nested_access, &walker->fault); in FNAME()
384 * instruction) triggers a nested page fault. The exit in FNAME()
386 * "guest page access" as the nested page fault's cause, in FNAME()
448 real_gpa = kvm_translate_gpa(vcpu, mmu, gfn_to_gpa(gfn), access, &walker->fault); in FNAME()
458 * On a write fault, fold the dirty bit into accessed_dirty. in FNAME()
481 walker->fault.vector = PF_VECTOR; in FNAME()
482 walker->fault.error_code_valid = true; in FNAME()
483 walker->fault.error_code = errcode; in FNAME()
500 walker->fault.exit_qualification = 0; in FNAME()
503 walker->fault.exit_qualification |= EPT_VIOLATION_ACC_WRITE; in FNAME()
505 walker->fault.exit_qualification |= EPT_VIOLATION_ACC_READ; in FNAME()
507 walker->fault.exit_qualification |= EPT_VIOLATION_ACC_INSTR; in FNAME()
513 walker->fault.exit_qualification |= (pte_access & VMX_EPT_RWX_MASK) << in FNAME()
517 walker->fault.address = addr; in FNAME()
518 walker->fault.nested_page_fault = mmu != vcpu->arch.walk_mmu; in FNAME()
519 walker->fault.async_page_fault = false; in FNAME()
521 trace_kvm_mmu_walker_error(walker->fault.error_code); in FNAME()
627 static int FNAME(fetch)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault, in FNAME()
634 gfn_t base_gfn = fault->gfn; in FNAME()
657 * loading a dummy root and handling the resulting page fault, e.g. if in FNAME()
665 for_each_shadow_entry(vcpu, fault->addr, it) { in FNAME()
699 * write-protected or unsync, wasn't modified between the fault in FNAME()
713 if (fault->write && table_gfn == fault->gfn) in FNAME()
714 fault->write_fault_to_shadow_pgtable = true; in FNAME()
723 kvm_mmu_hugepage_adjust(vcpu, fault); in FNAME()
725 trace_kvm_mmu_spte_requested(fault); in FNAME()
732 if (fault->nx_huge_page_workaround_enabled) in FNAME()
733 disallowed_hugepage_adjust(fault, *it.sptep, it.level); in FNAME()
735 base_gfn = gfn_round_for_level(fault->gfn, it.level); in FNAME()
736 if (it.level == fault->goal_level) in FNAME()
747 if (fault->huge_page_disallowed) in FNAME()
749 fault->req_level >= it.level); in FNAME()
752 if (WARN_ON_ONCE(it.level != fault->goal_level)) in FNAME()
755 ret = mmu_set_spte(vcpu, fault->slot, it.sptep, gw->pte_access, in FNAME()
756 base_gfn, fault->pfn, fault); in FNAME()
765 * Page fault handler. There are several causes for a page fault:
772 * - normal guest page fault due to the guest pte marked not present, not
778 static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) in FNAME()
783 WARN_ON_ONCE(fault->is_tdp); in FNAME()
787 * If PFEC.RSVD is set, this is a shadow page fault. in FNAME()
790 r = FNAME(walk_addr)(&walker, vcpu, fault->addr, in FNAME()
791 fault->error_code & ~PFERR_RSVD_MASK); in FNAME()
797 if (!fault->prefetch) in FNAME()
798 kvm_inject_emulated_page_fault(vcpu, &walker.fault); in FNAME()
803 fault->gfn = walker.gfn; in FNAME()
804 fault->max_level = walker.level; in FNAME()
805 fault->slot = kvm_vcpu_gfn_to_memslot(vcpu, fault->gfn); in FNAME()
807 if (page_fault_handle_page_track(vcpu, fault)) { in FNAME()
808 shadow_page_table_clear_flood(vcpu, fault->addr); in FNAME()
816 r = kvm_faultin_pfn(vcpu, fault, walker.pte_access); in FNAME()
824 if (fault->write && !(walker.pte_access & ACC_WRITE_MASK) && in FNAME()
825 !is_cr0_wp(vcpu->arch.mmu) && !fault->user && fault->slot) { in FNAME()
842 if (is_page_fault_stale(vcpu, fault)) in FNAME()
848 r = FNAME(fetch)(vcpu, fault, &walker); in FNAME()
852 kvm_release_pfn_clean(fault->pfn); in FNAME()
888 *exception = walker.fault; in FNAME()