/linux-6.12.1/arch/arm64/kvm/hyp/nvhe/ |
D | hyp-main.c | 24 void __kvm_hyp_host_forward_smc(struct kvm_cpu_context *host_ctxt); 142 static void handle___kvm_vcpu_run(struct kvm_cpu_context *host_ctxt) in handle___kvm_vcpu_run() argument 144 DECLARE_REG(struct kvm_vcpu *, host_vcpu, host_ctxt, 1); in handle___kvm_vcpu_run() 184 cpu_reg(host_ctxt, 1) = ret; in handle___kvm_vcpu_run() 187 static void handle___kvm_adjust_pc(struct kvm_cpu_context *host_ctxt) in handle___kvm_adjust_pc() argument 189 DECLARE_REG(struct kvm_vcpu *, vcpu, host_ctxt, 1); in handle___kvm_adjust_pc() 194 static void handle___kvm_flush_vm_context(struct kvm_cpu_context *host_ctxt) in handle___kvm_flush_vm_context() argument 199 static void handle___kvm_tlb_flush_vmid_ipa(struct kvm_cpu_context *host_ctxt) in handle___kvm_tlb_flush_vmid_ipa() argument 201 DECLARE_REG(struct kvm_s2_mmu *, mmu, host_ctxt, 1); in handle___kvm_tlb_flush_vmid_ipa() 202 DECLARE_REG(phys_addr_t, ipa, host_ctxt, 2); in handle___kvm_tlb_flush_vmid_ipa() [all …]
|
D | psci-relay.c | 20 void __noreturn __host_enter(struct kvm_cpu_context *host_ctxt); 72 static unsigned long psci_forward(struct kvm_cpu_context *host_ctxt) in psci_forward() argument 74 return psci_call(cpu_reg(host_ctxt, 0), cpu_reg(host_ctxt, 1), in psci_forward() 75 cpu_reg(host_ctxt, 2), cpu_reg(host_ctxt, 3)); in psci_forward() 107 static int psci_cpu_on(u64 func_id, struct kvm_cpu_context *host_ctxt) in psci_cpu_on() argument 109 DECLARE_REG(u64, mpidr, host_ctxt, 1); in psci_cpu_on() 110 DECLARE_REG(unsigned long, pc, host_ctxt, 2); in psci_cpu_on() 111 DECLARE_REG(unsigned long, r0, host_ctxt, 3); in psci_cpu_on() 151 static int psci_cpu_suspend(u64 func_id, struct kvm_cpu_context *host_ctxt) in psci_cpu_suspend() argument 153 DECLARE_REG(u64, power_state, host_ctxt, 1); in psci_cpu_suspend() [all …]
|
D | switch.c | 272 struct kvm_cpu_context *host_ctxt; in __kvm_vcpu_run() local 289 host_ctxt = host_data_ptr(host_ctxt); in __kvm_vcpu_run() 290 host_ctxt->__hyp_running_vcpu = vcpu; in __kvm_vcpu_run() 295 __sysreg_save_state_nvhe(host_ctxt); in __kvm_vcpu_run() 360 __sysreg_restore_state_nvhe(host_ctxt); in __kvm_vcpu_run() 379 host_ctxt->__hyp_running_vcpu = NULL; in __kvm_vcpu_run() 389 struct kvm_cpu_context *host_ctxt; in hyp_panic() local 392 host_ctxt = host_data_ptr(host_ctxt); in hyp_panic() 393 vcpu = host_ctxt->__hyp_running_vcpu; in hyp_panic() 399 __sysreg_restore_state_nvhe(host_ctxt); in hyp_panic() [all …]
|
D | tlb.c | 24 struct kvm_cpu_context *host_ctxt; in enter_vmid_context() local 27 host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt; in enter_vmid_context() 28 vcpu = host_ctxt->__hyp_running_vcpu; in enter_vmid_context() 121 struct kvm_cpu_context *host_ctxt; in exit_vmid_context() local 124 host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt; in exit_vmid_context() 125 vcpu = host_ctxt->__hyp_running_vcpu; in exit_vmid_context()
|
D | ffa.c | 792 bool kvm_host_ffa_handler(struct kvm_cpu_context *host_ctxt, u32 func_id) in kvm_host_ffa_handler() argument 819 if (!do_ffa_features(&res, host_ctxt)) in kvm_host_ffa_handler() 824 do_ffa_rxtx_map(&res, host_ctxt); in kvm_host_ffa_handler() 827 do_ffa_rxtx_unmap(&res, host_ctxt); in kvm_host_ffa_handler() 831 do_ffa_mem_xfer(FFA_FN64_MEM_SHARE, &res, host_ctxt); in kvm_host_ffa_handler() 834 do_ffa_mem_reclaim(&res, host_ctxt); in kvm_host_ffa_handler() 838 do_ffa_mem_xfer(FFA_FN64_MEM_LEND, &res, host_ctxt); in kvm_host_ffa_handler() 841 do_ffa_mem_frag_tx(&res, host_ctxt); in kvm_host_ffa_handler() 844 do_ffa_version(&res, host_ctxt); in kvm_host_ffa_handler() 847 do_ffa_part_get(&res, host_ctxt); in kvm_host_ffa_handler() [all …]
|
D | setup.c | 284 struct kvm_cpu_context *host_ctxt = host_data_ptr(host_ctxt); in __pkvm_init_finalise() local 332 cpu_reg(host_ctxt, 1) = ret; in __pkvm_init_finalise() 334 __host_enter(host_ctxt); in __pkvm_init_finalise()
|
D | mem_protect.c | 529 void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt) in handle_host_mem_abort() argument
|
/linux-6.12.1/arch/arm64/kvm/hyp/vhe/ |
D | sysreg-sr.c | 68 struct kvm_cpu_context *host_ctxt; in __vcpu_load_switch_sysregs() local 70 host_ctxt = host_data_ptr(host_ctxt); in __vcpu_load_switch_sysregs() 71 __sysreg_save_user_state(host_ctxt); in __vcpu_load_switch_sysregs() 111 struct kvm_cpu_context *host_ctxt; in __vcpu_put_switch_sysregs() local 113 host_ctxt = host_data_ptr(host_ctxt); in __vcpu_put_switch_sysregs() 120 __sysreg_restore_user_state(host_ctxt); in __vcpu_put_switch_sysregs()
|
D | switch.c | 244 host_data_ptr(host_ctxt)->__hyp_running_vcpu = vcpu; in kvm_vcpu_load_vhe() 256 host_data_ptr(host_ctxt)->__hyp_running_vcpu = NULL; in kvm_vcpu_put_vhe() 465 struct kvm_cpu_context *host_ctxt; in __kvm_vcpu_run_vhe() local 469 host_ctxt = host_data_ptr(host_ctxt); in __kvm_vcpu_run_vhe() 472 sysreg_save_host_state_vhe(host_ctxt); in __kvm_vcpu_run_vhe() 498 sysreg_restore_host_state_vhe(host_ctxt); in __kvm_vcpu_run_vhe() 545 struct kvm_cpu_context *host_ctxt; in __hyp_call_panic() local 548 host_ctxt = host_data_ptr(host_ctxt); in __hyp_call_panic() 549 vcpu = host_ctxt->__hyp_running_vcpu; in __hyp_call_panic() 552 sysreg_restore_host_state_vhe(host_ctxt); in __hyp_call_panic()
|
/linux-6.12.1/arch/arm64/kvm/hyp/include/hyp/ |
D | debug-sr.h | 130 struct kvm_cpu_context *host_ctxt; in __debug_switch_to_guest_common() local 138 host_ctxt = host_data_ptr(host_ctxt); in __debug_switch_to_guest_common() 143 __debug_save_state(host_dbg, host_ctxt); in __debug_switch_to_guest_common() 149 struct kvm_cpu_context *host_ctxt; in __debug_switch_to_host_common() local 157 host_ctxt = host_data_ptr(host_ctxt); in __debug_switch_to_host_common() 163 __debug_restore_state(host_dbg, host_ctxt); in __debug_switch_to_host_common()
|
D | switch.h | 152 struct kvm_cpu_context *hctxt = host_data_ptr(host_ctxt); in __activate_traps_hfgxtr() 188 struct kvm_cpu_context *hctxt = host_data_ptr(host_ctxt); in __deactivate_traps_hfgxtr() 223 hctxt = host_data_ptr(host_ctxt); in __activate_traps_common() 257 hctxt = host_data_ptr(host_ctxt); in __deactivate_traps_common()
|
/linux-6.12.1/arch/arm64/include/asm/ |
D | kvm_hyp.h | 119 bool kvm_host_psci_handler(struct kvm_cpu_context *host_ctxt, u32 func_id); 122 void __noreturn __hyp_do_panic(struct kvm_cpu_context *host_ctxt, u64 spsr, 131 void __noreturn __host_enter(struct kvm_cpu_context *host_ctxt);
|
D | kvm_asm.h | 286 void handle_trap(struct kvm_cpu_context *host_ctxt);
|
D | kvm_host.h | 604 struct kvm_cpu_context host_ctxt; member
|
/linux-6.12.1/arch/arm64/kvm/hyp/include/nvhe/ |
D | ffa.h | 15 bool kvm_host_ffa_handler(struct kvm_cpu_context *host_ctxt, u32 func_id);
|
D | mem_protect.h | 78 void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt);
|
/linux-6.12.1/arch/arm64/kvm/ |
D | pmu.c | 190 hctxt = host_data_ptr(host_ctxt); in kvm_set_pmuserenr()
|
D | arm.c | 2131 kvm_init_host_cpu_context(host_data_ptr(host_ctxt)); in cpu_hyp_init_context() 2498 fpsimd_state = &per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->host_ctxt.fp_regs; in finalize_init_hyp_mode()
|
/linux-6.12.1/arch/arm64/kernel/ |
D | asm-offsets.c | 140 DEFINE(HOST_DATA_CONTEXT, offsetof(struct kvm_host_data, host_ctxt)); in main()
|