Lines Matching full:fault

215 	 * Maximum page size that can be created for this fault; input to
252 int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault);
258 * RET_PF_CONTINUE: So far, so good, keep handling the page fault.
259 * RET_PF_RETRY: let CPU fault again on the address.
260 * RET_PF_EMULATE: mmio page fault, emulate the instruction directly.
263 * RET_PF_INVALID: the spte is invalid, let the real page fault path update it.
286 struct kvm_page_fault *fault) in kvm_mmu_prepare_memory_fault_exit() argument
288 kvm_prepare_memory_fault_exit(vcpu, fault->gfn << PAGE_SHIFT, in kvm_mmu_prepare_memory_fault_exit()
289 PAGE_SIZE, fault->write, fault->exec, in kvm_mmu_prepare_memory_fault_exit()
290 fault->is_private); in kvm_mmu_prepare_memory_fault_exit()
297 struct kvm_page_fault fault = { in kvm_mmu_do_page_fault() local
321 fault.gfn = fault.addr >> PAGE_SHIFT; in kvm_mmu_do_page_fault()
322 fault.slot = kvm_vcpu_gfn_to_memslot(vcpu, fault.gfn); in kvm_mmu_do_page_fault()
325 if (IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) && fault.is_tdp) in kvm_mmu_do_page_fault()
326 r = kvm_tdp_page_fault(vcpu, &fault); in kvm_mmu_do_page_fault()
328 r = vcpu->arch.mmu->page_fault(vcpu, &fault); in kvm_mmu_do_page_fault()
335 if (r == RET_PF_EMULATE && fault.is_private) { in kvm_mmu_do_page_fault()
337 kvm_mmu_prepare_memory_fault_exit(vcpu, &fault); in kvm_mmu_do_page_fault()
341 if (fault.write_fault_to_shadow_pgtable && emulation_type) in kvm_mmu_do_page_fault()
344 *level = fault.goal_level; in kvm_mmu_do_page_fault()
352 void kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault);
353 void disallowed_hugepage_adjust(struct kvm_page_fault *fault, u64 spte, int cur_level);