Lines Matching full:vcpu

19 	STATS_DESC_COUNTER(VCPU, int_exits),
20 STATS_DESC_COUNTER(VCPU, idle_exits),
21 STATS_DESC_COUNTER(VCPU, cpucfg_exits),
22 STATS_DESC_COUNTER(VCPU, signal_exits),
23 STATS_DESC_COUNTER(VCPU, hypercall_exits)
35 static inline void kvm_save_host_pmu(struct kvm_vcpu *vcpu) in kvm_save_host_pmu() argument
39 context = this_cpu_ptr(vcpu->kvm->arch.vmcs); in kvm_save_host_pmu()
50 static inline void kvm_restore_host_pmu(struct kvm_vcpu *vcpu) in kvm_restore_host_pmu() argument
54 context = this_cpu_ptr(vcpu->kvm->arch.vmcs); in kvm_restore_host_pmu()
66 static inline void kvm_save_guest_pmu(struct kvm_vcpu *vcpu) in kvm_save_guest_pmu() argument
68 struct loongarch_csrs *csr = vcpu->arch.csr; in kvm_save_guest_pmu()
80 static inline void kvm_restore_guest_pmu(struct kvm_vcpu *vcpu) in kvm_restore_guest_pmu() argument
82 struct loongarch_csrs *csr = vcpu->arch.csr; in kvm_restore_guest_pmu()
94 static int kvm_own_pmu(struct kvm_vcpu *vcpu) in kvm_own_pmu() argument
98 if (!kvm_guest_has_pmu(&vcpu->arch)) in kvm_own_pmu()
101 kvm_save_host_pmu(vcpu); in kvm_own_pmu()
105 val |= (kvm_get_pmu_num(&vcpu->arch) + 1) << CSR_GCFG_GPERF_SHIFT; in kvm_own_pmu()
108 kvm_restore_guest_pmu(vcpu); in kvm_own_pmu()
113 static void kvm_lose_pmu(struct kvm_vcpu *vcpu) in kvm_lose_pmu() argument
116 struct loongarch_csrs *csr = vcpu->arch.csr; in kvm_lose_pmu()
118 if (!(vcpu->arch.aux_inuse & KVM_LARCH_PMU)) in kvm_lose_pmu()
121 kvm_save_guest_pmu(vcpu); in kvm_lose_pmu()
136 vcpu->arch.aux_inuse &= ~KVM_LARCH_PMU; in kvm_lose_pmu()
138 kvm_restore_host_pmu(vcpu); in kvm_lose_pmu()
141 static void kvm_restore_pmu(struct kvm_vcpu *vcpu) in kvm_restore_pmu() argument
143 if ((vcpu->arch.aux_inuse & KVM_LARCH_PMU)) in kvm_restore_pmu()
144 kvm_make_request(KVM_REQ_PMU, vcpu); in kvm_restore_pmu()
147 static void kvm_check_pmu(struct kvm_vcpu *vcpu) in kvm_check_pmu() argument
149 if (kvm_check_request(KVM_REQ_PMU, vcpu)) { in kvm_check_pmu()
150 kvm_own_pmu(vcpu); in kvm_check_pmu()
151 vcpu->arch.aux_inuse |= KVM_LARCH_PMU; in kvm_check_pmu()
155 static void kvm_update_stolen_time(struct kvm_vcpu *vcpu) in kvm_update_stolen_time() argument
164 ghc = &vcpu->arch.st.cache; in kvm_update_stolen_time()
165 gpa = vcpu->arch.st.guest_addr; in kvm_update_stolen_time()
170 slots = kvm_memslots(vcpu->kvm); in kvm_update_stolen_time()
172 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, gpa, sizeof(*st))) { in kvm_update_stolen_time()
188 steal += current->sched_info.run_delay - vcpu->arch.st.last_steal; in kvm_update_stolen_time()
189 vcpu->arch.st.last_steal = current->sched_info.run_delay; in kvm_update_stolen_time()
196 mark_page_dirty_in_slot(vcpu->kvm, ghc->memslot, gpa_to_gfn(ghc->gpa)); in kvm_update_stolen_time()
200 * kvm_check_requests - check and handle pending vCPU requests
205 static int kvm_check_requests(struct kvm_vcpu *vcpu) in kvm_check_requests() argument
207 if (!kvm_request_pending(vcpu)) in kvm_check_requests()
210 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) in kvm_check_requests()
211 vcpu->arch.vpid = 0; /* Drop vpid for this vCPU */ in kvm_check_requests()
213 if (kvm_dirty_ring_check_request(vcpu)) in kvm_check_requests()
216 if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu)) in kvm_check_requests()
217 kvm_update_stolen_time(vcpu); in kvm_check_requests()
222 static void kvm_late_check_requests(struct kvm_vcpu *vcpu) in kvm_late_check_requests() argument
225 if (kvm_check_request(KVM_REQ_TLB_FLUSH_GPA, vcpu)) in kvm_late_check_requests()
226 if (vcpu->arch.flush_gpa != INVALID_GPA) { in kvm_late_check_requests()
227 kvm_flush_tlb_gpa(vcpu, vcpu->arch.flush_gpa); in kvm_late_check_requests()
228 vcpu->arch.flush_gpa = INVALID_GPA; in kvm_late_check_requests()
233 * Check and handle pending signal and vCPU requests etc
241 static int kvm_enter_guest_check(struct kvm_vcpu *vcpu) in kvm_enter_guest_check() argument
248 ret = xfer_to_guest_mode_handle_work(vcpu); in kvm_enter_guest_check()
252 ret = kvm_check_requests(vcpu); in kvm_enter_guest_check()
263 static int kvm_pre_enter_guest(struct kvm_vcpu *vcpu) in kvm_pre_enter_guest() argument
268 ret = kvm_enter_guest_check(vcpu); in kvm_pre_enter_guest()
273 * Handle vcpu timer, interrupts, check requests and in kvm_pre_enter_guest()
274 * check vmid before vcpu enter guest in kvm_pre_enter_guest()
277 kvm_deliver_intr(vcpu); in kvm_pre_enter_guest()
278 kvm_deliver_exception(vcpu); in kvm_pre_enter_guest()
279 /* Make sure the vcpu mode has been written */ in kvm_pre_enter_guest()
280 smp_store_mb(vcpu->mode, IN_GUEST_MODE); in kvm_pre_enter_guest()
281 kvm_check_vpid(vcpu); in kvm_pre_enter_guest()
282 kvm_check_pmu(vcpu); in kvm_pre_enter_guest()
289 kvm_late_check_requests(vcpu); in kvm_pre_enter_guest()
290 vcpu->arch.host_eentry = csr_read64(LOONGARCH_CSR_EENTRY); in kvm_pre_enter_guest()
292 vcpu->arch.aux_inuse &= ~KVM_LARCH_SWCSR_LATEST; in kvm_pre_enter_guest()
294 if (kvm_request_pending(vcpu) || xfer_to_guest_mode_work_pending()) { in kvm_pre_enter_guest()
295 /* make sure the vcpu mode has been written */ in kvm_pre_enter_guest()
296 smp_store_mb(vcpu->mode, OUTSIDE_GUEST_MODE); in kvm_pre_enter_guest()
308 static int kvm_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) in kvm_handle_exit() argument
311 unsigned long estat = vcpu->arch.host_estat; in kvm_handle_exit()
315 vcpu->mode = OUTSIDE_GUEST_MODE; in kvm_handle_exit()
320 kvm_lose_pmu(vcpu); in kvm_handle_exit()
326 trace_kvm_exit(vcpu, ecode); in kvm_handle_exit()
328 ret = kvm_handle_fault(vcpu, ecode); in kvm_handle_exit()
331 ++vcpu->stat.int_exits; in kvm_handle_exit()
335 ret = kvm_pre_enter_guest(vcpu); in kvm_handle_exit()
344 trace_kvm_reenter(vcpu); in kvm_handle_exit()
349 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_runnable() argument
351 return !!(vcpu->arch.irq_pending) && in kvm_arch_vcpu_runnable()
352 vcpu->arch.mp_state.mp_state == KVM_MP_STATE_RUNNABLE; in kvm_arch_vcpu_runnable()
355 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_should_kick() argument
357 return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE; in kvm_arch_vcpu_should_kick()
360 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_in_kernel() argument
365 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) in kvm_arch_vcpu_fault() argument
370 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_translate() argument
376 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) in kvm_cpu_has_pending_timer() argument
382 ret = kvm_pending_timer(vcpu) || in kvm_cpu_has_pending_timer()
389 int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_dump_regs() argument
393 kvm_debug("vCPU Register Dump:\n"); in kvm_arch_vcpu_dump_regs()
394 kvm_debug("\tPC = 0x%08lx\n", vcpu->arch.pc); in kvm_arch_vcpu_dump_regs()
395 kvm_debug("\tExceptions: %08lx\n", vcpu->arch.irq_pending); in kvm_arch_vcpu_dump_regs()
399 vcpu->arch.gprs[i], vcpu->arch.gprs[i + 1], in kvm_arch_vcpu_dump_regs()
400 vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]); in kvm_arch_vcpu_dump_regs()
412 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_get_mpstate() argument
415 *mp_state = vcpu->arch.mp_state; in kvm_arch_vcpu_ioctl_get_mpstate()
420 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_set_mpstate() argument
427 vcpu->arch.mp_state = *mp_state; in kvm_arch_vcpu_ioctl_set_mpstate()
436 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_set_guest_debug() argument
443 vcpu->guest_debug = dbg->control; in kvm_arch_vcpu_ioctl_set_guest_debug()
445 vcpu->guest_debug = 0; in kvm_arch_vcpu_ioctl_set_guest_debug()
450 static inline int kvm_set_cpuid(struct kvm_vcpu *vcpu, u64 val) in kvm_set_cpuid() argument
454 struct loongarch_csrs *csr = vcpu->arch.csr; in kvm_set_cpuid()
459 map = vcpu->kvm->arch.phyid_map; in kvm_set_cpuid()
462 spin_lock(&vcpu->kvm->arch.phyid_map_lock); in kvm_set_cpuid()
466 spin_unlock(&vcpu->kvm->arch.phyid_map_lock); in kvm_set_cpuid()
474 spin_unlock(&vcpu->kvm->arch.phyid_map_lock); in kvm_set_cpuid()
480 if (vcpu == map->phys_map[val].vcpu) { in kvm_set_cpuid()
481 spin_unlock(&vcpu->kvm->arch.phyid_map_lock); in kvm_set_cpuid()
486 * New CPUID is already set with other vcpu in kvm_set_cpuid()
489 spin_unlock(&vcpu->kvm->arch.phyid_map_lock); in kvm_set_cpuid()
495 map->phys_map[val].vcpu = vcpu; in kvm_set_cpuid()
496 spin_unlock(&vcpu->kvm->arch.phyid_map_lock); in kvm_set_cpuid()
501 static inline void kvm_drop_cpuid(struct kvm_vcpu *vcpu) in kvm_drop_cpuid() argument
505 struct loongarch_csrs *csr = vcpu->arch.csr; in kvm_drop_cpuid()
507 map = vcpu->kvm->arch.phyid_map; in kvm_drop_cpuid()
513 spin_lock(&vcpu->kvm->arch.phyid_map_lock); in kvm_drop_cpuid()
515 map->phys_map[cpuid].vcpu = NULL; in kvm_drop_cpuid()
519 spin_unlock(&vcpu->kvm->arch.phyid_map_lock); in kvm_drop_cpuid()
533 return map->phys_map[cpuid].vcpu; in kvm_get_vcpu_by_cpuid()
536 static int _kvm_getcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 *val) in _kvm_getcsr() argument
539 struct loongarch_csrs *csr = vcpu->arch.csr; in _kvm_getcsr()
546 vcpu_load(vcpu); in _kvm_getcsr()
551 kvm_deliver_intr(vcpu); in _kvm_getcsr()
552 vcpu->arch.aux_inuse &= ~KVM_LARCH_SWCSR_LATEST; in _kvm_getcsr()
553 vcpu_put(vcpu); in _kvm_getcsr()
571 static int _kvm_setcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 val) in _kvm_setcsr() argument
574 struct loongarch_csrs *csr = vcpu->arch.csr; in _kvm_setcsr()
580 return kvm_set_cpuid(vcpu, val); in _kvm_setcsr()
596 * After modifying the PMU CSR register value of the vcpu. in _kvm_setcsr()
608 kvm_make_request(KVM_REQ_PMU, vcpu); in _kvm_setcsr()
725 static int kvm_get_one_reg(struct kvm_vcpu *vcpu, in kvm_get_one_reg() argument
734 ret = _kvm_getcsr(vcpu, id, v); in kvm_get_one_reg()
739 *v = vcpu->arch.cpucfg[id]; in kvm_get_one_reg()
744 if (!kvm_guest_has_lbt(&vcpu->arch)) in kvm_get_one_reg()
749 *v = vcpu->arch.lbt.scr0; in kvm_get_one_reg()
752 *v = vcpu->arch.lbt.scr1; in kvm_get_one_reg()
755 *v = vcpu->arch.lbt.scr2; in kvm_get_one_reg()
758 *v = vcpu->arch.lbt.scr3; in kvm_get_one_reg()
761 *v = vcpu->arch.lbt.eflags; in kvm_get_one_reg()
764 *v = vcpu->arch.fpu.ftop; in kvm_get_one_reg()
774 *v = drdtime() + vcpu->kvm->arch.time_offset; in kvm_get_one_reg()
792 static int kvm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) in kvm_get_reg() argument
799 ret = kvm_get_one_reg(vcpu, reg, &v); in kvm_get_reg()
812 static int kvm_set_one_reg(struct kvm_vcpu *vcpu, in kvm_set_one_reg() argument
821 ret = _kvm_setcsr(vcpu, id, v); in kvm_set_one_reg()
828 vcpu->arch.cpucfg[id] = (u32)v; in kvm_set_one_reg()
830 vcpu->arch.max_pmu_csrid = in kvm_set_one_reg()
831 LOONGARCH_CSR_PERFCTRL0 + 2 * kvm_get_pmu_num(&vcpu->arch) + 1; in kvm_set_one_reg()
834 if (!kvm_guest_has_lbt(&vcpu->arch)) in kvm_set_one_reg()
839 vcpu->arch.lbt.scr0 = v; in kvm_set_one_reg()
842 vcpu->arch.lbt.scr1 = v; in kvm_set_one_reg()
845 vcpu->arch.lbt.scr2 = v; in kvm_set_one_reg()
848 vcpu->arch.lbt.scr3 = v; in kvm_set_one_reg()
851 vcpu->arch.lbt.eflags = v; in kvm_set_one_reg()
854 vcpu->arch.fpu.ftop = v; in kvm_set_one_reg()
865 * gftoffset is relative with board, not vcpu in kvm_set_one_reg()
868 if (vcpu->vcpu_id == 0) in kvm_set_one_reg()
869 vcpu->kvm->arch.time_offset = (signed long)(v - drdtime()); in kvm_set_one_reg()
872 vcpu->arch.st.guest_addr = 0; in kvm_set_one_reg()
873 memset(&vcpu->arch.irq_pending, 0, sizeof(vcpu->arch.irq_pending)); in kvm_set_one_reg()
874 memset(&vcpu->arch.irq_clear, 0, sizeof(vcpu->arch.irq_clear)); in kvm_set_one_reg()
889 static int kvm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) in kvm_set_reg() argument
904 return kvm_set_one_reg(vcpu, reg, v); in kvm_set_reg()
907 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) in kvm_arch_vcpu_ioctl_get_sregs() argument
912 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) in kvm_arch_vcpu_ioctl_set_sregs() argument
917 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) in kvm_arch_vcpu_ioctl_get_regs() argument
921 for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++) in kvm_arch_vcpu_ioctl_get_regs()
922 regs->gpr[i] = vcpu->arch.gprs[i]; in kvm_arch_vcpu_ioctl_get_regs()
924 regs->pc = vcpu->arch.pc; in kvm_arch_vcpu_ioctl_get_regs()
929 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) in kvm_arch_vcpu_ioctl_set_regs() argument
933 for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++) in kvm_arch_vcpu_ioctl_set_regs()
934 vcpu->arch.gprs[i] = regs->gpr[i]; in kvm_arch_vcpu_ioctl_set_regs()
936 vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */ in kvm_arch_vcpu_ioctl_set_regs()
937 vcpu->arch.pc = regs->pc; in kvm_arch_vcpu_ioctl_set_regs()
942 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_enable_cap() argument
949 static int kvm_loongarch_cpucfg_has_attr(struct kvm_vcpu *vcpu, in kvm_loongarch_cpucfg_has_attr() argument
965 static int kvm_loongarch_pvtime_has_attr(struct kvm_vcpu *vcpu, in kvm_loongarch_pvtime_has_attr() argument
968 if (!kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_STEAL_TIME) in kvm_loongarch_pvtime_has_attr()
975 static int kvm_loongarch_vcpu_has_attr(struct kvm_vcpu *vcpu, in kvm_loongarch_vcpu_has_attr() argument
982 ret = kvm_loongarch_cpucfg_has_attr(vcpu, attr); in kvm_loongarch_vcpu_has_attr()
985 ret = kvm_loongarch_pvtime_has_attr(vcpu, attr); in kvm_loongarch_vcpu_has_attr()
994 static int kvm_loongarch_cpucfg_get_attr(struct kvm_vcpu *vcpu, in kvm_loongarch_cpucfg_get_attr() argument
1008 val = vcpu->kvm->arch.pv_features & LOONGARCH_PV_FEAT_MASK; in kvm_loongarch_cpucfg_get_attr()
1019 static int kvm_loongarch_pvtime_get_attr(struct kvm_vcpu *vcpu, in kvm_loongarch_pvtime_get_attr() argument
1025 if (!kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_STEAL_TIME) in kvm_loongarch_pvtime_get_attr()
1029 gpa = vcpu->arch.st.guest_addr; in kvm_loongarch_pvtime_get_attr()
1036 static int kvm_loongarch_vcpu_get_attr(struct kvm_vcpu *vcpu, in kvm_loongarch_vcpu_get_attr() argument
1043 ret = kvm_loongarch_cpucfg_get_attr(vcpu, attr); in kvm_loongarch_vcpu_get_attr()
1046 ret = kvm_loongarch_pvtime_get_attr(vcpu, attr); in kvm_loongarch_vcpu_get_attr()
1055 static int kvm_loongarch_cpucfg_set_attr(struct kvm_vcpu *vcpu, in kvm_loongarch_cpucfg_set_attr() argument
1060 struct kvm *kvm = vcpu->kvm; in kvm_loongarch_cpucfg_set_attr()
1082 static int kvm_loongarch_pvtime_set_attr(struct kvm_vcpu *vcpu, in kvm_loongarch_pvtime_set_attr() argument
1087 struct kvm *kvm = vcpu->kvm; in kvm_loongarch_pvtime_set_attr()
1089 if (!kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_STEAL_TIME) in kvm_loongarch_pvtime_set_attr()
1100 vcpu->arch.st.guest_addr = gpa; in kvm_loongarch_pvtime_set_attr()
1111 vcpu->arch.st.guest_addr = gpa; in kvm_loongarch_pvtime_set_attr()
1112 vcpu->arch.st.last_steal = current->sched_info.run_delay; in kvm_loongarch_pvtime_set_attr()
1113 kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu); in kvm_loongarch_pvtime_set_attr()
1119 static int kvm_loongarch_vcpu_set_attr(struct kvm_vcpu *vcpu, in kvm_loongarch_vcpu_set_attr() argument
1126 ret = kvm_loongarch_cpucfg_set_attr(vcpu, attr); in kvm_loongarch_vcpu_set_attr()
1129 ret = kvm_loongarch_pvtime_set_attr(vcpu, attr); in kvm_loongarch_vcpu_set_attr()
1144 struct kvm_vcpu *vcpu = filp->private_data; in kvm_arch_vcpu_ioctl() local
1150 * should be used. Since CSR registers owns by this vcpu, if switch in kvm_arch_vcpu_ioctl()
1154 * be clear in vcpu->arch.aux_inuse, and vcpu_load will check in kvm_arch_vcpu_ioctl()
1167 r = kvm_set_reg(vcpu, &reg); in kvm_arch_vcpu_ioctl()
1168 vcpu->arch.aux_inuse &= ~KVM_LARCH_HWCSR_USABLE; in kvm_arch_vcpu_ioctl()
1170 r = kvm_get_reg(vcpu, &reg); in kvm_arch_vcpu_ioctl()
1179 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); in kvm_arch_vcpu_ioctl()
1186 r = kvm_loongarch_vcpu_has_attr(vcpu, &attr); in kvm_arch_vcpu_ioctl()
1193 r = kvm_loongarch_vcpu_get_attr(vcpu, &attr); in kvm_arch_vcpu_ioctl()
1200 r = kvm_loongarch_vcpu_set_attr(vcpu, &attr); in kvm_arch_vcpu_ioctl()
1211 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) in kvm_arch_vcpu_ioctl_get_fpu() argument
1215 fpu->fcc = vcpu->arch.fpu.fcc; in kvm_arch_vcpu_ioctl_get_fpu()
1216 fpu->fcsr = vcpu->arch.fpu.fcsr; in kvm_arch_vcpu_ioctl_get_fpu()
1218 memcpy(&fpu->fpr[i], &vcpu->arch.fpu.fpr[i], FPU_REG_WIDTH / 64); in kvm_arch_vcpu_ioctl_get_fpu()
1223 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) in kvm_arch_vcpu_ioctl_set_fpu() argument
1227 vcpu->arch.fpu.fcc = fpu->fcc; in kvm_arch_vcpu_ioctl_set_fpu()
1228 vcpu->arch.fpu.fcsr = fpu->fcsr; in kvm_arch_vcpu_ioctl_set_fpu()
1230 memcpy(&vcpu->arch.fpu.fpr[i], &fpu->fpr[i], FPU_REG_WIDTH / 64); in kvm_arch_vcpu_ioctl_set_fpu()
1236 int kvm_own_lbt(struct kvm_vcpu *vcpu) in kvm_own_lbt() argument
1238 if (!kvm_guest_has_lbt(&vcpu->arch)) in kvm_own_lbt()
1243 _restore_lbt(&vcpu->arch.lbt); in kvm_own_lbt()
1244 vcpu->arch.aux_inuse |= KVM_LARCH_LBT; in kvm_own_lbt()
1250 static void kvm_lose_lbt(struct kvm_vcpu *vcpu) in kvm_lose_lbt() argument
1253 if (vcpu->arch.aux_inuse & KVM_LARCH_LBT) { in kvm_lose_lbt()
1254 _save_lbt(&vcpu->arch.lbt); in kvm_lose_lbt()
1256 vcpu->arch.aux_inuse &= ~KVM_LARCH_LBT; in kvm_lose_lbt()
1261 static void kvm_check_fcsr(struct kvm_vcpu *vcpu, unsigned long fcsr) in kvm_check_fcsr() argument
1268 kvm_own_lbt(vcpu); in kvm_check_fcsr()
1271 static void kvm_check_fcsr_alive(struct kvm_vcpu *vcpu) in kvm_check_fcsr_alive() argument
1273 if (vcpu->arch.aux_inuse & KVM_LARCH_FPU) { in kvm_check_fcsr_alive()
1274 if (vcpu->arch.aux_inuse & KVM_LARCH_LBT) in kvm_check_fcsr_alive()
1276 kvm_check_fcsr(vcpu, read_fcsr(LOONGARCH_FCSR0)); in kvm_check_fcsr_alive()
1280 static inline void kvm_lose_lbt(struct kvm_vcpu *vcpu) { } in kvm_lose_lbt() argument
1281 static inline void kvm_check_fcsr(struct kvm_vcpu *vcpu, unsigned long fcsr) { } in kvm_check_fcsr() argument
1282 static inline void kvm_check_fcsr_alive(struct kvm_vcpu *vcpu) { } in kvm_check_fcsr_alive() argument
1286 void kvm_own_fpu(struct kvm_vcpu *vcpu) in kvm_own_fpu() argument
1294 kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr); in kvm_own_fpu()
1297 kvm_restore_fpu(&vcpu->arch.fpu); in kvm_own_fpu()
1298 vcpu->arch.aux_inuse |= KVM_LARCH_FPU; in kvm_own_fpu()
1299 trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_FPU); in kvm_own_fpu()
1306 int kvm_own_lsx(struct kvm_vcpu *vcpu) in kvm_own_lsx() argument
1308 if (!kvm_guest_has_fpu(&vcpu->arch) || !kvm_guest_has_lsx(&vcpu->arch)) in kvm_own_lsx()
1314 kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr); in kvm_own_lsx()
1316 switch (vcpu->arch.aux_inuse & KVM_LARCH_FPU) { in kvm_own_lsx()
1322 _restore_lsx_upper(&vcpu->arch.fpu); in kvm_own_lsx()
1328 kvm_restore_lsx(&vcpu->arch.fpu); in kvm_own_lsx()
1332 trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_LSX); in kvm_own_lsx()
1333 vcpu->arch.aux_inuse |= KVM_LARCH_LSX | KVM_LARCH_FPU; in kvm_own_lsx()
1342 int kvm_own_lasx(struct kvm_vcpu *vcpu) in kvm_own_lasx() argument
1344 …if (!kvm_guest_has_fpu(&vcpu->arch) || !kvm_guest_has_lsx(&vcpu->arch) || !kvm_guest_has_lasx(&vcp… in kvm_own_lasx()
1349 kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr); in kvm_own_lasx()
1351 switch (vcpu->arch.aux_inuse & (KVM_LARCH_FPU | KVM_LARCH_LSX)) { in kvm_own_lasx()
1355 _restore_lasx_upper(&vcpu->arch.fpu); in kvm_own_lasx()
1359 _restore_lsx_upper(&vcpu->arch.fpu); in kvm_own_lasx()
1360 _restore_lasx_upper(&vcpu->arch.fpu); in kvm_own_lasx()
1364 kvm_restore_lasx(&vcpu->arch.fpu); in kvm_own_lasx()
1368 trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_LASX); in kvm_own_lasx()
1369 vcpu->arch.aux_inuse |= KVM_LARCH_LASX | KVM_LARCH_LSX | KVM_LARCH_FPU; in kvm_own_lasx()
1377 void kvm_lose_fpu(struct kvm_vcpu *vcpu) in kvm_lose_fpu() argument
1381 kvm_check_fcsr_alive(vcpu); in kvm_lose_fpu()
1382 if (vcpu->arch.aux_inuse & KVM_LARCH_LASX) { in kvm_lose_fpu()
1383 kvm_save_lasx(&vcpu->arch.fpu); in kvm_lose_fpu()
1384 vcpu->arch.aux_inuse &= ~(KVM_LARCH_LSX | KVM_LARCH_FPU | KVM_LARCH_LASX); in kvm_lose_fpu()
1385 trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_LASX); in kvm_lose_fpu()
1389 } else if (vcpu->arch.aux_inuse & KVM_LARCH_LSX) { in kvm_lose_fpu()
1390 kvm_save_lsx(&vcpu->arch.fpu); in kvm_lose_fpu()
1391 vcpu->arch.aux_inuse &= ~(KVM_LARCH_LSX | KVM_LARCH_FPU); in kvm_lose_fpu()
1392 trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_LSX); in kvm_lose_fpu()
1396 } else if (vcpu->arch.aux_inuse & KVM_LARCH_FPU) { in kvm_lose_fpu()
1397 kvm_save_fpu(&vcpu->arch.fpu); in kvm_lose_fpu()
1398 vcpu->arch.aux_inuse &= ~KVM_LARCH_FPU; in kvm_lose_fpu()
1399 trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU); in kvm_lose_fpu()
1404 kvm_lose_lbt(vcpu); in kvm_lose_fpu()
1409 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) in kvm_vcpu_ioctl_interrupt() argument
1414 kvm_queue_irq(vcpu, intr); in kvm_vcpu_ioctl_interrupt()
1416 kvm_dequeue_irq(vcpu, -intr); in kvm_vcpu_ioctl_interrupt()
1422 kvm_vcpu_kick(vcpu); in kvm_vcpu_ioctl_interrupt()
1431 struct kvm_vcpu *vcpu = filp->private_data; in kvm_arch_vcpu_async_ioctl() local
1439 kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__, irq.irq); in kvm_arch_vcpu_async_ioctl()
1441 return kvm_vcpu_ioctl_interrupt(vcpu, &irq); in kvm_arch_vcpu_async_ioctl()
1452 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_create() argument
1457 vcpu->arch.vpid = 0; in kvm_arch_vcpu_create()
1458 vcpu->arch.flush_gpa = INVALID_GPA; in kvm_arch_vcpu_create()
1460 hrtimer_init(&vcpu->arch.swtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED_HARD); in kvm_arch_vcpu_create()
1461 vcpu->arch.swtimer.function = kvm_swtimer_wakeup; in kvm_arch_vcpu_create()
1463 vcpu->arch.handle_exit = kvm_handle_exit; in kvm_arch_vcpu_create()
1464 vcpu->arch.guest_eentry = (unsigned long)kvm_loongarch_ops->exc_entry; in kvm_arch_vcpu_create()
1465 vcpu->arch.csr = kzalloc(sizeof(struct loongarch_csrs), GFP_KERNEL); in kvm_arch_vcpu_create()
1466 if (!vcpu->arch.csr) in kvm_arch_vcpu_create()
1473 vcpu->arch.host_ecfg = (read_csr_ecfg() & CSR_ECFG_VS); in kvm_arch_vcpu_create()
1476 vcpu->arch.last_sched_cpu = -1; in kvm_arch_vcpu_create()
1482 kvm_init_timer(vcpu, timer_hz); in kvm_arch_vcpu_create()
1485 csr = vcpu->arch.csr; in kvm_arch_vcpu_create()
1489 kvm_write_sw_gcsr(csr, LOONGARCH_CSR_TMID, vcpu->vcpu_id); in kvm_arch_vcpu_create()
1498 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_postcreate() argument
1502 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_destroy() argument
1507 hrtimer_cancel(&vcpu->arch.swtimer); in kvm_arch_vcpu_destroy()
1508 kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache); in kvm_arch_vcpu_destroy()
1509 kvm_drop_cpuid(vcpu); in kvm_arch_vcpu_destroy()
1510 kfree(vcpu->arch.csr); in kvm_arch_vcpu_destroy()
1513 * If the vCPU is freed and reused as another vCPU, we don't want the in kvm_arch_vcpu_destroy()
1517 context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu); in kvm_arch_vcpu_destroy()
1518 if (context->last_vcpu == vcpu) in kvm_arch_vcpu_destroy()
1523 static int _kvm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) in _kvm_vcpu_load() argument
1527 struct loongarch_csrs *csr = vcpu->arch.csr; in _kvm_vcpu_load()
1533 migrated = (vcpu->arch.last_sched_cpu != cpu); in _kvm_vcpu_load()
1536 * Was this the last vCPU to run on this CPU? in _kvm_vcpu_load()
1537 * If not, any old guest state from this vCPU will have been clobbered. in _kvm_vcpu_load()
1539 context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu); in _kvm_vcpu_load()
1540 if (migrated || (context->last_vcpu != vcpu)) in _kvm_vcpu_load()
1541 vcpu->arch.aux_inuse &= ~KVM_LARCH_HWCSR_USABLE; in _kvm_vcpu_load()
1542 context->last_vcpu = vcpu; in _kvm_vcpu_load()
1545 kvm_restore_timer(vcpu); in _kvm_vcpu_load()
1549 kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu); in _kvm_vcpu_load()
1552 kvm_restore_pmu(vcpu); in _kvm_vcpu_load()
1555 if (vcpu->arch.aux_inuse & KVM_LARCH_HWCSR_USABLE) in _kvm_vcpu_load()
1558 write_csr_gcntc((ulong)vcpu->kvm->arch.time_offset); in _kvm_vcpu_load()
1611 * prevents a SC on the next vCPU from succeeding by matching a LL on in _kvm_vcpu_load()
1612 * the previous vCPU. in _kvm_vcpu_load()
1614 if (vcpu->kvm->created_vcpus > 1) in _kvm_vcpu_load()
1617 vcpu->arch.aux_inuse |= KVM_LARCH_HWCSR_USABLE; in _kvm_vcpu_load()
1622 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) in kvm_arch_vcpu_load() argument
1628 _kvm_vcpu_load(vcpu, cpu); in kvm_arch_vcpu_load()
1632 static int _kvm_vcpu_put(struct kvm_vcpu *vcpu, int cpu) in _kvm_vcpu_put() argument
1634 struct loongarch_csrs *csr = vcpu->arch.csr; in _kvm_vcpu_put()
1636 kvm_lose_fpu(vcpu); in _kvm_vcpu_put()
1644 if (vcpu->arch.aux_inuse & KVM_LARCH_SWCSR_LATEST) in _kvm_vcpu_put()
1695 vcpu->arch.aux_inuse |= KVM_LARCH_SWCSR_LATEST; in _kvm_vcpu_put()
1698 kvm_save_timer(vcpu); in _kvm_vcpu_put()
1705 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_put() argument
1712 vcpu->arch.last_sched_cpu = cpu; in kvm_arch_vcpu_put()
1715 _kvm_vcpu_put(vcpu, cpu); in kvm_arch_vcpu_put()
1719 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_ioctl_run() argument
1722 struct kvm_run *run = vcpu->run; in kvm_arch_vcpu_ioctl_run()
1724 if (vcpu->mmio_needed) { in kvm_arch_vcpu_ioctl_run()
1725 if (!vcpu->mmio_is_write) in kvm_arch_vcpu_ioctl_run()
1726 kvm_complete_mmio_read(vcpu, run); in kvm_arch_vcpu_ioctl_run()
1727 vcpu->mmio_needed = 0; in kvm_arch_vcpu_ioctl_run()
1732 kvm_complete_iocsr_read(vcpu, run); in kvm_arch_vcpu_ioctl_run()
1735 if (!vcpu->wants_to_run) in kvm_arch_vcpu_ioctl_run()
1741 vcpu_load(vcpu); in kvm_arch_vcpu_ioctl_run()
1742 kvm_sigset_activate(vcpu); in kvm_arch_vcpu_ioctl_run()
1743 r = kvm_pre_enter_guest(vcpu); in kvm_arch_vcpu_ioctl_run()
1749 trace_kvm_enter(vcpu); in kvm_arch_vcpu_ioctl_run()
1750 r = kvm_loongarch_ops->enter_guest(run, vcpu); in kvm_arch_vcpu_ioctl_run()
1752 trace_kvm_out(vcpu); in kvm_arch_vcpu_ioctl_run()
1759 kvm_sigset_deactivate(vcpu); in kvm_arch_vcpu_ioctl_run()
1760 vcpu_put(vcpu); in kvm_arch_vcpu_ioctl_run()