Searched refs:kern_hyp_va (Results 1 – 20 of 20) sorted by relevance
/linux-6.12.1/arch/arm64/kvm/hyp/nvhe/ |
D | hyp-main.c | 79 has_fpmr = kvm_has_fpmr(kern_hyp_va(vcpu->kvm)); in fpsimd_sve_sync() 102 hyp_vcpu->vcpu.arch.sve_state = kern_hyp_va(host_vcpu->arch.sve_state); in flush_hyp_vcpu() 113 hyp_vcpu->vcpu.arch.debug_ptr = kern_hyp_va(host_vcpu->arch.debug_ptr); in flush_hyp_vcpu() 147 host_vcpu = kern_hyp_va(host_vcpu); in handle___kvm_vcpu_run() 164 host_kvm = kern_hyp_va(host_vcpu->kvm); in handle___kvm_vcpu_run() 191 __kvm_adjust_pc(kern_hyp_va(vcpu)); in handle___kvm_adjust_pc() 205 __kvm_tlb_flush_vmid_ipa(kern_hyp_va(mmu), ipa, level); in handle___kvm_tlb_flush_vmid_ipa() 214 __kvm_tlb_flush_vmid_ipa_nsh(kern_hyp_va(mmu), ipa, level); in handle___kvm_tlb_flush_vmid_ipa_nsh() 224 __kvm_tlb_flush_vmid_range(kern_hyp_va(mmu), start, pages); in handle___kvm_tlb_flush_vmid_range() 231 __kvm_tlb_flush_vmid(kern_hyp_va(mmu)); in handle___kvm_tlb_flush_vmid() [all …]
|
D | switch.c | 201 if (kvm_has_fpmr(kern_hyp_va(vcpu->kvm))) { in kvm_hyp_save_fpsimd_host() 329 mmu = kern_hyp_va(vcpu->arch.hw_mmu); in __kvm_vcpu_run() 330 __load_stage2(mmu, kern_hyp_va(mmu->arch)); in __kvm_vcpu_run()
|
D | tlb.c | 113 __load_stage2(mmu, kern_hyp_va(mmu->arch)); in enter_vmid_context() 131 __load_stage2(mmu, kern_hyp_va(mmu->arch)); in exit_vmid_context()
|
D | hyp-smp.c | 37 this_cpu_base = kern_hyp_va(cpu_base_array[cpu]); in __hyp_per_cpu_offset()
|
D | timer-sr.c | 51 !kern_hyp_va(vcpu->kvm)->arch.timer_data.poffset) in __timer_enable_traps()
|
D | setup.c | 82 start = kern_hyp_va(sve_state); in pkvm_create_host_sve_mappings() 139 start = (void *)kern_hyp_va(per_cpu_base[i]); in recreate_hyp_mappings()
|
D | sys_regs.c | 99 const struct kvm *kvm = (const struct kvm *)kern_hyp_va(vcpu->kvm); in get_pvm_id_aa64pfr1()
|
D | pkvm.c | 411 void *va = (void *)kern_hyp_va(host_va); in map_donated_memory_noclear()
|
/linux-6.12.1/arch/arm64/kvm/hyp/include/hyp/ |
D | sysreg-sr.h | 50 return kvm_has_mte(kern_hyp_va(vcpu->kvm)); in ctxt_has_mte() 61 return kvm_has_feat(kern_hyp_va(vcpu->kvm), ID_AA64MMFR3_EL1, S1PIE, IMP); in ctxt_has_s1pie() 72 return kvm_has_feat(kern_hyp_va(vcpu->kvm), ID_AA64MMFR3_EL1, TCRX, IMP); in ctxt_has_tcrx() 83 return kvm_has_feat(kern_hyp_va(vcpu->kvm), ID_AA64MMFR3_EL1, S1POE, IMP); in ctxt_has_s1poe()
|
D | switch.h | 153 struct kvm *kvm = kern_hyp_va(vcpu->kvm); in __activate_traps_hfgxtr() 189 struct kvm *kvm = kern_hyp_va(vcpu->kvm); in __deactivate_traps_hfgxtr() 406 if (kvm_has_fpmr(kern_hyp_va(vcpu->kvm))) in kvm_hyp_handle_fpsimd() 513 val -= *kern_hyp_va(ctxt->offset.vm_offset); in kvm_hyp_handle_cntpct() 515 val -= *kern_hyp_va(ctxt->offset.vcpu_offset); in kvm_hyp_handle_cntpct()
|
D | debug-sr.h | 141 guest_dbg = kern_hyp_va(vcpu->arch.debug_ptr); in __debug_switch_to_guest_common() 160 guest_dbg = kern_hyp_va(vcpu->arch.debug_ptr); in __debug_switch_to_host_common()
|
/linux-6.12.1/arch/arm64/kvm/ |
D | fpsimd.c | 65 *host_data_ptr(fpsimd_state) = kern_hyp_va(¤t->thread.uw.fpsimd_state); in kvm_arch_vcpu_load_fp() 66 *host_data_ptr(fpmr_ptr) = kern_hyp_va(¤t->thread.uw.fpmr); in kvm_arch_vcpu_load_fp()
|
D | mmu.c | 586 unsigned long start = kern_hyp_va((unsigned long)from); in create_hyp_mappings() 587 unsigned long end = kern_hyp_va((unsigned long)to); in create_hyp_mappings() 2022 kern_hyp_va(PAGE_OFFSET), in kvm_mmu_init() 2023 kern_hyp_va((unsigned long)high_memory - 1)); in kvm_mmu_init() 2025 if (hyp_idmap_start >= kern_hyp_va(PAGE_OFFSET) && in kvm_mmu_init() 2026 hyp_idmap_start < kern_hyp_va((unsigned long)high_memory - 1) && in kvm_mmu_init()
|
D | arm.c | 1991 base = kern_hyp_va(kvm_ksym_ref(__kvm_hyp_vector)); in kvm_init_vector_slots() 1994 base = kern_hyp_va(kvm_ksym_ref(__bp_harden_hyp_vecs)); in kvm_init_vector_slots() 2382 num_possible_cpus(), kern_hyp_va(per_cpu_base), in do_pkvm_init() 2492 kern_hyp_va(sve_state); in finalize_init_hyp_mode() 2500 kern_hyp_va(fpsimd_state); in finalize_init_hyp_mode()
|
/linux-6.12.1/arch/arm64/kvm/hyp/ |
D | vgic-v2-cpuif-proxy.c | 39 struct kvm *kvm = kern_hyp_va(vcpu->kvm); in __vgic_v2_perform_cpuif_access()
|
D | exception.c | 134 if (kvm_has_mte(kern_hyp_va(vcpu->kvm))) in enter_exception64()
|
D | vgic-v3-sr.c | 1123 if (kern_hyp_va(vcpu->kvm)->arch.vgic.vgic_model != KVM_DEV_TYPE_ARM_VGIC_V3) in __vgic_v3_perform_cpuif_access()
|
/linux-6.12.1/arch/arm64/include/asm/ |
D | kvm_mmu.h | 140 #define kern_hyp_va(v) ((typeof(v))(__kern_hyp_va((unsigned long)(v)))) macro
|
D | kvm_host.h | 926 #define vcpu_sve_pffr(vcpu) (kern_hyp_va((vcpu)->arch.sve_state) + \
|
/linux-6.12.1/Documentation/arch/arm64/ |
D | memory.rst | 93 random) offset from the linear mapping. See the kern_hyp_va macro and
|