Searched refs:hw_mmu (Results 1 – 10 of 10) sorted by relevance
62 if (mmu == vcpu->arch.hw_mmu || WARN_ON(mmu != host_s2_mmu)) in enter_vmid_context()65 cxt->mmu = vcpu->arch.hw_mmu; in enter_vmid_context()
106 hyp_vcpu->vcpu.arch.hw_mmu = host_vcpu->arch.hw_mmu; in flush_hyp_vcpu()
329 mmu = kern_hyp_va(vcpu->arch.hw_mmu); in __kvm_vcpu_run()
336 hyp_vcpu->vcpu.arch.hw_mmu = &hyp_vm->kvm.arch.mmu; in init_pkvm_hyp_vcpu()
28 if (vcpu && mmu != vcpu->arch.hw_mmu) in enter_vmid_context()29 cxt->mmu = vcpu->arch.hw_mmu; in enter_vmid_context()
248 __load_stage2(vcpu->arch.hw_mmu, vcpu->arch.hw_mmu->arch); in kvm_vcpu_load_vhe()
680 if (vcpu->arch.hw_mmu) in kvm_vcpu_load_hw_mmu()684 vcpu->arch.hw_mmu = &vcpu->kvm->arch.mmu; in kvm_vcpu_load_hw_mmu()687 vcpu->arch.hw_mmu = get_s2_mmu_nested(vcpu); in kvm_vcpu_load_hw_mmu()702 if (kvm_is_nested_s2_mmu(vcpu->kvm, vcpu->arch.hw_mmu)) in kvm_vcpu_put_hw_mmu()703 atomic_dec(&vcpu->arch.hw_mmu->refcnt); in kvm_vcpu_put_hw_mmu()705 vcpu->arch.hw_mmu = NULL; in kvm_vcpu_put_hw_mmu()1216 struct kvm_s2_mmu *mmu = vcpu->arch.hw_mmu; in check_nested_vcpu_requests()
1463 kvm_mmu_cache_min_pages(vcpu->arch.hw_mmu)); in user_mem_abort()1625 pgt = vcpu->arch.hw_mmu->pgt; in user_mem_abort()1717 mmu = vcpu->arch.hw_mmu; in handle_access_fault()1761 if (fault_ipa >= BIT_ULL(vcpu->arch.hw_mmu->pgt->ia_bits)) { in kvm_handle_guest_abort()1813 if (kvm_is_nested_s2_mmu(vcpu->kvm,vcpu->arch.hw_mmu) && in kvm_handle_guest_abort()1814 vcpu->arch.hw_mmu->nested_stage2_enabled) { in kvm_handle_guest_abort()1885 VM_BUG_ON(ipa >= kvm_phys_size(vcpu->arch.hw_mmu)); in kvm_handle_guest_abort()
484 vcpu->arch.hw_mmu = &vcpu->kvm->arch.mmu; in kvm_arch_vcpu_create()584 mmu = vcpu->arch.hw_mmu; in kvm_arch_vcpu_load()1177 if (kvm_arm_vmid_update(&vcpu->arch.hw_mmu->vmid) && in kvm_arch_vcpu_ioctl_run()1179 __load_stage2(vcpu->arch.hw_mmu, in kvm_arch_vcpu_ioctl_run()1180 vcpu->arch.hw_mmu->arch); in kvm_arch_vcpu_ioctl_run()
696 struct kvm_s2_mmu *hw_mmu; member