Searched refs:walk_mmu (Results 1 – 9 of 9) sorted by relevance
143 return vcpu->arch.walk_mmu->pdptrs[index]; in kvm_pdptr_read()148 vcpu->arch.walk_mmu->pdptrs[index] = value; in kvm_pdptr_write()
208 return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu; in mmu_is_nested()
993 vcpu->arch.walk_mmu; in kvm_inject_emulated_page_fault()1058 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; in load_pdptrs()7582 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; in kvm_mmu_gva_to_gpa_read()7592 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; in kvm_mmu_gva_to_gpa_write()7604 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; in kvm_mmu_gva_to_gpa_system()7613 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; in kvm_read_guest_virt_helper()7646 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; in kvm_fetch_guest_virt()7705 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; in kvm_write_guest_virt_helper()7811 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; in vcpu_mmio_gva_to_gpa()7821 !permission_fault(vcpu, vcpu->arch.walk_mmu, in vcpu_mmio_gva_to_gpa()[all …]
518 walker->fault.nested_page_fault = mmu != vcpu->arch.walk_mmu; in FNAME()879 WARN_ON_ONCE((addr >> 32) && mmu == vcpu->arch.walk_mmu); in FNAME()
6262 kvm_mmu_invalidate_addr(vcpu, vcpu->arch.walk_mmu, gva, KVM_MMU_ROOTS_ALL); in kvm_mmu_invlpg()6398 vcpu->arch.walk_mmu = &vcpu->arch.root_mmu; in kvm_mmu_create()
103 vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu; in nested_svm_init_mmu_context()109 vcpu->arch.walk_mmu = &vcpu->arch.root_mmu; in nested_svm_uninit_mmu_context()
809 struct kvm_mmu *walk_mmu; member
484 vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu; in nested_ept_init_mmu_context()490 vcpu->arch.walk_mmu = &vcpu->arch.root_mmu; in nested_ept_uninit_mmu_context()
3265 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; in vmx_ept_load_pdptrs()3280 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; in ept_save_pdptrs()