/linux-6.12.1/arch/s390/kvm/ |
D | priv.c | 32 static int handle_ri(struct kvm_vcpu *vcpu) in handle_ri() argument 34 vcpu->stat.instruction_ri++; in handle_ri() 36 if (test_kvm_facility(vcpu->kvm, 64)) { in handle_ri() 37 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (lazy)"); in handle_ri() 38 vcpu->arch.sie_block->ecb3 |= ECB3_RI; in handle_ri() 39 kvm_s390_retry_instr(vcpu); in handle_ri() 42 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION); in handle_ri() 45 int kvm_s390_handle_aa(struct kvm_vcpu *vcpu) in kvm_s390_handle_aa() argument 47 if ((vcpu->arch.sie_block->ipa & 0xf) <= 4) in kvm_s390_handle_aa() 48 return handle_ri(vcpu); in kvm_s390_handle_aa() [all …]
|
D | intercept.c | 25 u8 kvm_s390_get_ilen(struct kvm_vcpu *vcpu) in kvm_s390_get_ilen() argument 27 struct kvm_s390_sie_block *sie_block = vcpu->arch.sie_block; in kvm_s390_get_ilen() 30 switch (vcpu->arch.sie_block->icptcode) { in kvm_s390_get_ilen() 37 ilen = insn_length(vcpu->arch.sie_block->ipa >> 8); in kvm_s390_get_ilen() 47 ilen = vcpu->arch.sie_block->pgmilc & 0x6; in kvm_s390_get_ilen() 53 static int handle_stop(struct kvm_vcpu *vcpu) in handle_stop() argument 55 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; in handle_stop() 59 vcpu->stat.exit_stop_request++; in handle_stop() 62 if (kvm_s390_vcpu_has_irq(vcpu, 1)) in handle_stop() 68 stop_pending = kvm_s390_is_stop_irq_pending(vcpu); in handle_stop() [all …]
|
D | diag.c | 20 static int diag_release_pages(struct kvm_vcpu *vcpu) in diag_release_pages() argument 23 unsigned long prefix = kvm_s390_get_prefix(vcpu); in diag_release_pages() 25 start = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4]; in diag_release_pages() 26 end = vcpu->run->s.regs.gprs[vcpu->arch.sie_block->ipa & 0xf] + PAGE_SIZE; in diag_release_pages() 27 vcpu->stat.instruction_diagnose_10++; in diag_release_pages() 31 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); in diag_release_pages() 33 VCPU_EVENT(vcpu, 5, "diag release pages %lX %lX", start, end); in diag_release_pages() 40 gmap_discard(vcpu->arch.gmap, start, end); in diag_release_pages() 48 gmap_discard(vcpu->arch.gmap, start, prefix); in diag_release_pages() 50 gmap_discard(vcpu->arch.gmap, 0, PAGE_SIZE); in diag_release_pages() [all …]
|
D | guestdbg.c | 59 static void enable_all_hw_bp(struct kvm_vcpu *vcpu) in enable_all_hw_bp() argument 62 u64 *cr9 = &vcpu->arch.sie_block->gcr[9]; in enable_all_hw_bp() 63 u64 *cr10 = &vcpu->arch.sie_block->gcr[10]; in enable_all_hw_bp() 64 u64 *cr11 = &vcpu->arch.sie_block->gcr[11]; in enable_all_hw_bp() 67 if (vcpu->arch.guestdbg.nr_hw_bp <= 0 || in enable_all_hw_bp() 68 vcpu->arch.guestdbg.hw_bp_info == NULL) in enable_all_hw_bp() 79 for (i = 0; i < vcpu->arch.guestdbg.nr_hw_bp; i++) { in enable_all_hw_bp() 80 start = vcpu->arch.guestdbg.hw_bp_info[i].addr; in enable_all_hw_bp() 81 len = vcpu->arch.guestdbg.hw_bp_info[i].len; in enable_all_hw_bp() 99 static void enable_all_hw_wp(struct kvm_vcpu *vcpu) in enable_all_hw_wp() argument [all …]
|
/linux-6.12.1/arch/riscv/kvm/ |
D | vcpu.c | 49 static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu) in kvm_riscv_reset_vcpu() argument 51 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; in kvm_riscv_reset_vcpu() 52 struct kvm_vcpu_csr *reset_csr = &vcpu->arch.guest_reset_csr; in kvm_riscv_reset_vcpu() 53 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; in kvm_riscv_reset_vcpu() 54 struct kvm_cpu_context *reset_cntx = &vcpu->arch.guest_reset_context; in kvm_riscv_reset_vcpu() 63 loaded = (vcpu->cpu != -1); in kvm_riscv_reset_vcpu() 65 kvm_arch_vcpu_put(vcpu); in kvm_riscv_reset_vcpu() 67 vcpu->arch.last_exit_cpu = -1; in kvm_riscv_reset_vcpu() 71 spin_lock(&vcpu->arch.reset_cntx_lock); in kvm_riscv_reset_vcpu() 73 spin_unlock(&vcpu->arch.reset_cntx_lock); in kvm_riscv_reset_vcpu() [all …]
|
/linux-6.12.1/arch/arm64/kvm/ |
D | debug.c | 40 static void save_guest_debug_regs(struct kvm_vcpu *vcpu) in save_guest_debug_regs() argument 42 u64 val = vcpu_read_sys_reg(vcpu, MDSCR_EL1); in save_guest_debug_regs() 44 vcpu->arch.guest_debug_preserved.mdscr_el1 = val; in save_guest_debug_regs() 47 vcpu->arch.guest_debug_preserved.mdscr_el1); in save_guest_debug_regs() 49 vcpu->arch.guest_debug_preserved.pstate_ss = in save_guest_debug_regs() 50 (*vcpu_cpsr(vcpu) & DBG_SPSR_SS); in save_guest_debug_regs() 53 static void restore_guest_debug_regs(struct kvm_vcpu *vcpu) in restore_guest_debug_regs() argument 55 u64 val = vcpu->arch.guest_debug_preserved.mdscr_el1; in restore_guest_debug_regs() 57 vcpu_write_sys_reg(vcpu, val, MDSCR_EL1); in restore_guest_debug_regs() 60 vcpu_read_sys_reg(vcpu, MDSCR_EL1)); in restore_guest_debug_regs() [all …]
|
D | inject_fault.c | 18 static void pend_sync_exception(struct kvm_vcpu *vcpu) in pend_sync_exception() argument 21 if (likely(!vcpu_has_nv(vcpu))) { in pend_sync_exception() 22 kvm_pend_exception(vcpu, EXCEPT_AA64_EL1_SYNC); in pend_sync_exception() 32 switch(*vcpu_cpsr(vcpu) & PSR_MODE_MASK) { in pend_sync_exception() 35 kvm_pend_exception(vcpu, EXCEPT_AA64_EL2_SYNC); in pend_sync_exception() 39 kvm_pend_exception(vcpu, EXCEPT_AA64_EL1_SYNC); in pend_sync_exception() 42 if (vcpu_el2_tge_is_set(vcpu)) in pend_sync_exception() 43 kvm_pend_exception(vcpu, EXCEPT_AA64_EL2_SYNC); in pend_sync_exception() 45 kvm_pend_exception(vcpu, EXCEPT_AA64_EL1_SYNC); in pend_sync_exception() 52 static bool match_target_el(struct kvm_vcpu *vcpu, unsigned long target) in match_target_el() argument [all …]
|
D | handle_exit.c | 31 static void kvm_handle_guest_serror(struct kvm_vcpu *vcpu, u64 esr) in kvm_handle_guest_serror() argument 34 kvm_inject_vabt(vcpu); in kvm_handle_guest_serror() 37 static int handle_hvc(struct kvm_vcpu *vcpu) in handle_hvc() argument 39 trace_kvm_hvc_arm64(*vcpu_pc(vcpu), vcpu_get_reg(vcpu, 0), in handle_hvc() 40 kvm_vcpu_hvc_get_imm(vcpu)); in handle_hvc() 41 vcpu->stat.hvc_exit_stat++; in handle_hvc() 44 if (vcpu_has_nv(vcpu)) { in handle_hvc() 45 if (vcpu_read_sys_reg(vcpu, HCR_EL2) & HCR_HCD) in handle_hvc() 46 kvm_inject_undefined(vcpu); in handle_hvc() 48 kvm_inject_nested_sync(vcpu, kvm_vcpu_get_esr(vcpu)); in handle_hvc() [all …]
|
/linux-6.12.1/arch/powerpc/kvm/ |
D | book3s_emulate.c | 70 static bool spr_allowed(struct kvm_vcpu *vcpu, enum priv_level level) in spr_allowed() argument 73 if (vcpu->arch.papr_enabled && (level > PRIV_SUPER)) in spr_allowed() 77 if ((kvmppc_get_msr(vcpu) & MSR_PR) && level > PRIV_PROBLEM) in spr_allowed() 84 static inline void kvmppc_copyto_vcpu_tm(struct kvm_vcpu *vcpu) in kvmppc_copyto_vcpu_tm() argument 86 memcpy(&vcpu->arch.gpr_tm[0], &vcpu->arch.regs.gpr[0], in kvmppc_copyto_vcpu_tm() 87 sizeof(vcpu->arch.gpr_tm)); in kvmppc_copyto_vcpu_tm() 88 memcpy(&vcpu->arch.fp_tm, &vcpu->arch.fp, in kvmppc_copyto_vcpu_tm() 90 memcpy(&vcpu->arch.vr_tm, &vcpu->arch.vr, in kvmppc_copyto_vcpu_tm() 92 vcpu->arch.ppr_tm = vcpu->arch.ppr; in kvmppc_copyto_vcpu_tm() 93 vcpu->arch.dscr_tm = vcpu->arch.dscr; in kvmppc_copyto_vcpu_tm() [all …]
|
D | booke.c | 90 void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu) in kvmppc_dump_vcpu() argument 94 printk("pc: %08lx msr: %08llx\n", vcpu->arch.regs.nip, in kvmppc_dump_vcpu() 95 vcpu->arch.shared->msr); in kvmppc_dump_vcpu() 96 printk("lr: %08lx ctr: %08lx\n", vcpu->arch.regs.link, in kvmppc_dump_vcpu() 97 vcpu->arch.regs.ctr); in kvmppc_dump_vcpu() 98 printk("srr0: %08llx srr1: %08llx\n", vcpu->arch.shared->srr0, in kvmppc_dump_vcpu() 99 vcpu->arch.shared->srr1); in kvmppc_dump_vcpu() 101 printk("exceptions: %08lx\n", vcpu->arch.pending_exceptions); in kvmppc_dump_vcpu() 105 kvmppc_get_gpr(vcpu, i), in kvmppc_dump_vcpu() 106 kvmppc_get_gpr(vcpu, i+1), in kvmppc_dump_vcpu() [all …]
|
D | booke_emulate.c | 24 static void kvmppc_emul_rfi(struct kvm_vcpu *vcpu) in kvmppc_emul_rfi() argument 26 vcpu->arch.regs.nip = vcpu->arch.shared->srr0; in kvmppc_emul_rfi() 27 kvmppc_set_msr(vcpu, vcpu->arch.shared->srr1); in kvmppc_emul_rfi() 30 static void kvmppc_emul_rfdi(struct kvm_vcpu *vcpu) in kvmppc_emul_rfdi() argument 32 vcpu->arch.regs.nip = vcpu->arch.dsrr0; in kvmppc_emul_rfdi() 33 kvmppc_set_msr(vcpu, vcpu->arch.dsrr1); in kvmppc_emul_rfdi() 36 static void kvmppc_emul_rfci(struct kvm_vcpu *vcpu) in kvmppc_emul_rfci() argument 38 vcpu->arch.regs.nip = vcpu->arch.csrr0; in kvmppc_emul_rfci() 39 kvmppc_set_msr(vcpu, vcpu->arch.csrr1); in kvmppc_emul_rfci() 42 int kvmppc_booke_emulate_op(struct kvm_vcpu *vcpu, in kvmppc_booke_emulate_op() argument [all …]
|
D | book3s_pr.c | 53 static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, 56 static int kvmppc_handle_fac(struct kvm_vcpu *vcpu, ulong fac); 67 static bool kvmppc_is_split_real(struct kvm_vcpu *vcpu) in kvmppc_is_split_real() argument 69 ulong msr = kvmppc_get_msr(vcpu); in kvmppc_is_split_real() 73 static void kvmppc_fixup_split_real(struct kvm_vcpu *vcpu) in kvmppc_fixup_split_real() argument 75 ulong msr = kvmppc_get_msr(vcpu); in kvmppc_fixup_split_real() 76 ulong pc = kvmppc_get_pc(vcpu); in kvmppc_fixup_split_real() 83 if (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) in kvmppc_fixup_split_real() 90 vcpu->arch.hflags |= BOOK3S_HFLAG_SPLIT_HACK; in kvmppc_fixup_split_real() 91 kvmppc_set_pc(vcpu, pc | SPLIT_HACK_OFFS); in kvmppc_fixup_split_real() [all …]
|
D | emulate_loadstore.c | 28 static bool kvmppc_check_fp_disabled(struct kvm_vcpu *vcpu) in kvmppc_check_fp_disabled() argument 30 if (!(kvmppc_get_msr(vcpu) & MSR_FP)) { in kvmppc_check_fp_disabled() 31 kvmppc_core_queue_fpunavail(vcpu, kvmppc_get_msr(vcpu) & SRR1_PREFIXED); in kvmppc_check_fp_disabled() 40 static bool kvmppc_check_vsx_disabled(struct kvm_vcpu *vcpu) in kvmppc_check_vsx_disabled() argument 42 if (!(kvmppc_get_msr(vcpu) & MSR_VSX)) { in kvmppc_check_vsx_disabled() 43 kvmppc_core_queue_vsx_unavail(vcpu, kvmppc_get_msr(vcpu) & SRR1_PREFIXED); in kvmppc_check_vsx_disabled() 52 static bool kvmppc_check_altivec_disabled(struct kvm_vcpu *vcpu) in kvmppc_check_altivec_disabled() argument 54 if (!(kvmppc_get_msr(vcpu) & MSR_VEC)) { in kvmppc_check_altivec_disabled() 55 kvmppc_core_queue_vec_unavail(vcpu, kvmppc_get_msr(vcpu) & SRR1_PREFIXED); in kvmppc_check_altivec_disabled() 72 int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) in kvmppc_emulate_loadstore() argument [all …]
|
D | book3s_hv_tm.c | 16 static void emulate_tx_failure(struct kvm_vcpu *vcpu, u64 failure_cause) in emulate_tx_failure() argument 19 u64 msr = vcpu->arch.shregs.msr; in emulate_tx_failure() 21 tfiar = vcpu->arch.regs.nip & ~0x3ull; in emulate_tx_failure() 23 if (MSR_TM_SUSPENDED(vcpu->arch.shregs.msr)) in emulate_tx_failure() 29 vcpu->arch.tfiar = tfiar; in emulate_tx_failure() 31 vcpu->arch.texasr = (vcpu->arch.texasr & 0x3ffffff) | texasr; in emulate_tx_failure() 42 int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu) in kvmhv_p9_tm_emulation() argument 44 u32 instr = vcpu->arch.emul_inst; in kvmhv_p9_tm_emulation() 45 u64 msr = vcpu->arch.shregs.msr; in kvmhv_p9_tm_emulation() 56 vcpu->arch.regs.nip -= 4; in kvmhv_p9_tm_emulation() [all …]
|
D | book3s.c | 98 static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu, in kvmppc_update_int_pending() argument 101 if (is_kvmppc_hv_enabled(vcpu->kvm)) in kvmppc_update_int_pending() 104 kvmppc_set_int_pending(vcpu, 1); in kvmppc_update_int_pending() 106 kvmppc_set_int_pending(vcpu, 0); in kvmppc_update_int_pending() 109 static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu) in kvmppc_critical_section() argument 115 if (is_kvmppc_hv_enabled(vcpu->kvm)) in kvmppc_critical_section() 118 crit_raw = kvmppc_get_critical(vcpu); in kvmppc_critical_section() 119 crit_r1 = kvmppc_get_gpr(vcpu, 1); in kvmppc_critical_section() 122 if (!(kvmppc_get_msr(vcpu) & MSR_SF)) { in kvmppc_critical_section() 130 crit = crit && !(kvmppc_get_msr(vcpu) & MSR_PR); in kvmppc_critical_section() [all …]
|
/linux-6.12.1/arch/arm64/include/asm/ |
D | kvm_emulate.h | 44 bool kvm_condition_valid32(const struct kvm_vcpu *vcpu); 45 void kvm_skip_instr32(struct kvm_vcpu *vcpu); 47 void kvm_inject_undefined(struct kvm_vcpu *vcpu); 48 void kvm_inject_vabt(struct kvm_vcpu *vcpu); 49 void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr); 50 void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr); 51 void kvm_inject_size_fault(struct kvm_vcpu *vcpu); 53 void kvm_vcpu_wfi(struct kvm_vcpu *vcpu); 55 void kvm_emulate_nested_eret(struct kvm_vcpu *vcpu); 56 int kvm_inject_nested_sync(struct kvm_vcpu *vcpu, u64 esr_el2); [all …]
|
/linux-6.12.1/arch/mips/kvm/ |
D | emulate.c | 40 static int kvm_compute_return_epc(struct kvm_vcpu *vcpu, unsigned long instpc, in kvm_compute_return_epc() argument 45 struct kvm_vcpu_arch *arch = &vcpu->arch; in kvm_compute_return_epc() 56 err = kvm_get_badinstrp((u32 *)epc, vcpu, &insn.word); in kvm_compute_return_epc() 243 enum emulation_result update_pc(struct kvm_vcpu *vcpu, u32 cause) in update_pc() argument 248 err = kvm_compute_return_epc(vcpu, vcpu->arch.pc, in update_pc() 249 &vcpu->arch.pc); in update_pc() 253 vcpu->arch.pc += 4; in update_pc() 256 kvm_debug("update_pc(): New PC: %#lx\n", vcpu->arch.pc); in update_pc() 272 int kvm_get_badinstr(u32 *opc, struct kvm_vcpu *vcpu, u32 *out) in kvm_get_badinstr() argument 275 *out = vcpu->arch.host_cp0_badinstr; in kvm_get_badinstr() [all …]
|
/linux-6.12.1/arch/x86/kvm/vmx/ |
D | x86_ops.h | 22 int vmx_vcpu_create(struct kvm_vcpu *vcpu); 23 int vmx_vcpu_pre_run(struct kvm_vcpu *vcpu); 24 fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu, bool force_immediate_exit); 25 void vmx_vcpu_free(struct kvm_vcpu *vcpu); 26 void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event); 27 void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu); 28 void vmx_vcpu_put(struct kvm_vcpu *vcpu); 29 int vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath); 30 void vmx_handle_exit_irqoff(struct kvm_vcpu *vcpu); 31 int vmx_skip_emulated_instruction(struct kvm_vcpu *vcpu); [all …]
|
/linux-6.12.1/arch/arm64/kvm/hyp/vhe/ |
D | switch.c | 49 static u64 __compute_hcr(struct kvm_vcpu *vcpu) in __compute_hcr() argument 51 u64 hcr = vcpu->arch.hcr_el2; in __compute_hcr() 53 if (!vcpu_has_nv(vcpu)) in __compute_hcr() 56 if (is_hyp_ctxt(vcpu)) { in __compute_hcr() 59 if (!vcpu_el2_e2h_is_set(vcpu)) in __compute_hcr() 62 write_sysreg_s(vcpu->arch.ctxt.vncr_array, SYS_VNCR_EL2); in __compute_hcr() 65 return hcr | (__vcpu_sys_reg(vcpu, HCR_EL2) & ~NV_HCR_GUEST_EXCLUDE); in __compute_hcr() 68 static void __activate_cptr_traps(struct kvm_vcpu *vcpu) in __activate_cptr_traps() argument 84 if (vcpu_has_sve(vcpu)) in __activate_cptr_traps() 87 __activate_traps_fpsimd32(vcpu); in __activate_cptr_traps() [all …]
|
/linux-6.12.1/arch/x86/kvm/ |
D | hyperv.h | 64 static inline struct kvm_vcpu_hv *to_hv_vcpu(struct kvm_vcpu *vcpu) in to_hv_vcpu() argument 66 return vcpu->arch.hyperv; in to_hv_vcpu() 69 static inline struct kvm_vcpu_hv_synic *to_hv_synic(struct kvm_vcpu *vcpu) in to_hv_synic() argument 71 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); in to_hv_synic() 80 return hv_vcpu->vcpu; in hv_synic_to_vcpu() 83 static inline struct kvm_hv_syndbg *to_hv_syndbg(struct kvm_vcpu *vcpu) in to_hv_syndbg() argument 85 return &vcpu->kvm->arch.hyperv.hv_syndbg; in to_hv_syndbg() 88 static inline u32 kvm_hv_get_vpindex(struct kvm_vcpu *vcpu) in kvm_hv_get_vpindex() argument 90 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); in kvm_hv_get_vpindex() 92 return hv_vcpu ? hv_vcpu->vp_index : vcpu->vcpu_idx; in kvm_hv_get_vpindex() [all …]
|
D | x86.c | 109 ((struct kvm_vcpu *)(ctxt)->vcpu) 131 static void update_cr8_intercept(struct kvm_vcpu *vcpu); 132 static void process_nmi(struct kvm_vcpu *vcpu); 133 static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags); 134 static void store_regs(struct kvm_vcpu *vcpu); 135 static int sync_regs(struct kvm_vcpu *vcpu); 136 static int kvm_vcpu_do_singlestep(struct kvm_vcpu *vcpu); 138 static int __set_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2); 139 static void __get_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2); 496 typedef int (*msr_access_t)(struct kvm_vcpu *vcpu, u32 index, u64 *data, [all …]
|
D | kvm_cache_regs.h | 19 static __always_inline unsigned long kvm_##lname##_read(struct kvm_vcpu *vcpu)\ 21 return vcpu->arch.regs[VCPU_REGS_##uname]; \ 23 static __always_inline void kvm_##lname##_write(struct kvm_vcpu *vcpu, \ 26 vcpu->arch.regs[VCPU_REGS_##uname] = val; \ 53 static inline bool kvm_register_is_available(struct kvm_vcpu *vcpu, in BUILD_KVM_GPR_ACCESSORS() 56 return test_bit(reg, (unsigned long *)&vcpu->arch.regs_avail); in BUILD_KVM_GPR_ACCESSORS() 59 static inline bool kvm_register_is_dirty(struct kvm_vcpu *vcpu, in kvm_register_is_dirty() argument 62 return test_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty); in kvm_register_is_dirty() 65 static inline void kvm_register_mark_available(struct kvm_vcpu *vcpu, in kvm_register_mark_available() argument 68 __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail); in kvm_register_mark_available() [all …]
|
/linux-6.12.1/arch/loongarch/kvm/ |
D | vcpu.c | 35 static inline void kvm_save_host_pmu(struct kvm_vcpu *vcpu) in kvm_save_host_pmu() argument 39 context = this_cpu_ptr(vcpu->kvm->arch.vmcs); in kvm_save_host_pmu() 50 static inline void kvm_restore_host_pmu(struct kvm_vcpu *vcpu) in kvm_restore_host_pmu() argument 54 context = this_cpu_ptr(vcpu->kvm->arch.vmcs); in kvm_restore_host_pmu() 66 static inline void kvm_save_guest_pmu(struct kvm_vcpu *vcpu) in kvm_save_guest_pmu() argument 68 struct loongarch_csrs *csr = vcpu->arch.csr; in kvm_save_guest_pmu() 80 static inline void kvm_restore_guest_pmu(struct kvm_vcpu *vcpu) in kvm_restore_guest_pmu() argument 82 struct loongarch_csrs *csr = vcpu->arch.csr; in kvm_restore_guest_pmu() 94 static int kvm_own_pmu(struct kvm_vcpu *vcpu) in kvm_own_pmu() argument 98 if (!kvm_guest_has_pmu(&vcpu->arch)) in kvm_own_pmu() [all …]
|
/linux-6.12.1/arch/arm64/kvm/hyp/include/hyp/ |
D | switch.h | 43 static inline void __fpsimd_save_fpexc32(struct kvm_vcpu *vcpu) in __fpsimd_save_fpexc32() argument 45 if (!vcpu_el1_is_32bit(vcpu)) in __fpsimd_save_fpexc32() 48 __vcpu_sys_reg(vcpu, FPEXC32_EL2) = read_sysreg(fpexc32_el2); in __fpsimd_save_fpexc32() 51 static inline void __activate_traps_fpsimd32(struct kvm_vcpu *vcpu) in __activate_traps_fpsimd32() argument 62 if (vcpu_el1_is_32bit(vcpu) && system_supports_fpsimd()) { in __activate_traps_fpsimd32() 68 #define compute_clr_set(vcpu, reg, clr, set) \ argument 71 hfg = __vcpu_sys_reg(vcpu, reg) & ~__ ## reg ## _RES0; \ 101 #define compute_undef_clr_set(vcpu, kvm, reg, clr, set) \ argument 108 #define update_fgt_traps_cs(hctxt, vcpu, kvm, reg, clr, set) \ argument 113 if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)) \ [all …]
|
/linux-6.12.1/arch/powerpc/include/asm/ |
D | kvm_book3s.h | 146 extern void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong ea, ulong ea_mask); 147 extern void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 vp, u64 vp_mask); 148 extern void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end); 149 extern void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 new_msr); 150 extern void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu); 151 extern void kvmppc_mmu_book3s_32_init(struct kvm_vcpu *vcpu); 152 extern void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu); 153 extern int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte, 155 extern void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte); 156 extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr); [all …]
|