Lines Matching +full:num +full:- +full:guest +full:- +full:ids

1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
7 #include <linux/entry-kvm.h>
39 context = this_cpu_ptr(vcpu->kvm->arch.vmcs); in kvm_save_host_pmu()
40 context->perf_cntr[0] = read_csr_perfcntr0(); in kvm_save_host_pmu()
41 context->perf_cntr[1] = read_csr_perfcntr1(); in kvm_save_host_pmu()
42 context->perf_cntr[2] = read_csr_perfcntr2(); in kvm_save_host_pmu()
43 context->perf_cntr[3] = read_csr_perfcntr3(); in kvm_save_host_pmu()
44 context->perf_ctrl[0] = write_csr_perfctrl0(0); in kvm_save_host_pmu()
45 context->perf_ctrl[1] = write_csr_perfctrl1(0); in kvm_save_host_pmu()
46 context->perf_ctrl[2] = write_csr_perfctrl2(0); in kvm_save_host_pmu()
47 context->perf_ctrl[3] = write_csr_perfctrl3(0); in kvm_save_host_pmu()
54 context = this_cpu_ptr(vcpu->kvm->arch.vmcs); in kvm_restore_host_pmu()
55 write_csr_perfcntr0(context->perf_cntr[0]); in kvm_restore_host_pmu()
56 write_csr_perfcntr1(context->perf_cntr[1]); in kvm_restore_host_pmu()
57 write_csr_perfcntr2(context->perf_cntr[2]); in kvm_restore_host_pmu()
58 write_csr_perfcntr3(context->perf_cntr[3]); in kvm_restore_host_pmu()
59 write_csr_perfctrl0(context->perf_ctrl[0]); in kvm_restore_host_pmu()
60 write_csr_perfctrl1(context->perf_ctrl[1]); in kvm_restore_host_pmu()
61 write_csr_perfctrl2(context->perf_ctrl[2]); in kvm_restore_host_pmu()
62 write_csr_perfctrl3(context->perf_ctrl[3]); in kvm_restore_host_pmu()
68 struct loongarch_csrs *csr = vcpu->arch.csr; in kvm_save_guest_pmu()
82 struct loongarch_csrs *csr = vcpu->arch.csr; in kvm_restore_guest_pmu()
98 if (!kvm_guest_has_pmu(&vcpu->arch)) in kvm_own_pmu()
99 return -EINVAL; in kvm_own_pmu()
103 /* Set PM0-PM(num) to guest */ in kvm_own_pmu()
105 val |= (kvm_get_pmu_num(&vcpu->arch) + 1) << CSR_GCFG_GPERF_SHIFT; in kvm_own_pmu()
116 struct loongarch_csrs *csr = vcpu->arch.csr; in kvm_lose_pmu()
118 if (!(vcpu->arch.aux_inuse & KVM_LARCH_PMU)) in kvm_lose_pmu()
123 /* Disable pmu access from guest */ in kvm_lose_pmu()
127 * Clear KVM_LARCH_PMU if the guest is not using PMU CSRs when in kvm_lose_pmu()
128 * exiting the guest, so that the next time trap into the guest. in kvm_lose_pmu()
136 vcpu->arch.aux_inuse &= ~KVM_LARCH_PMU; in kvm_lose_pmu()
143 if ((vcpu->arch.aux_inuse & KVM_LARCH_PMU)) in kvm_restore_pmu()
151 vcpu->arch.aux_inuse |= KVM_LARCH_PMU; in kvm_check_pmu()
164 ghc = &vcpu->arch.st.cache; in kvm_update_stolen_time()
165 gpa = vcpu->arch.st.guest_addr; in kvm_update_stolen_time()
170 slots = kvm_memslots(vcpu->kvm); in kvm_update_stolen_time()
171 if (slots->generation != ghc->generation || gpa != ghc->gpa) { in kvm_update_stolen_time()
172 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, gpa, sizeof(*st))) { in kvm_update_stolen_time()
173 ghc->gpa = INVALID_GPA; in kvm_update_stolen_time()
178 st = (struct kvm_steal_time __user *)ghc->hva; in kvm_update_stolen_time()
179 unsafe_get_user(version, &st->version, out); in kvm_update_stolen_time()
184 unsafe_put_user(version, &st->version, out); in kvm_update_stolen_time()
187 unsafe_get_user(steal, &st->steal, out); in kvm_update_stolen_time()
188 steal += current->sched_info.run_delay - vcpu->arch.st.last_steal; in kvm_update_stolen_time()
189 vcpu->arch.st.last_steal = current->sched_info.run_delay; in kvm_update_stolen_time()
190 unsafe_put_user(steal, &st->steal, out); in kvm_update_stolen_time()
194 unsafe_put_user(version, &st->version, out); in kvm_update_stolen_time()
196 mark_page_dirty_in_slot(vcpu->kvm, ghc->memslot, gpa_to_gfn(ghc->gpa)); in kvm_update_stolen_time()
200 * kvm_check_requests - check and handle pending vCPU requests
202 * Return: RESUME_GUEST if we should enter the guest
211 vcpu->arch.vpid = 0; /* Drop vpid for this vCPU */ in kvm_check_requests()
226 if (vcpu->arch.flush_gpa != INVALID_GPA) { in kvm_late_check_requests()
227 kvm_flush_tlb_gpa(vcpu, vcpu->arch.flush_gpa); in kvm_late_check_requests()
228 vcpu->arch.flush_gpa = INVALID_GPA; in kvm_late_check_requests()
236 * Return: RESUME_GUEST if we should enter the guest
246 * Check conditions before entering the guest in kvm_enter_guest_check()
260 * Return: RESUME_GUEST if we should enter the guest, and irq disabled
274 * check vmid before vcpu enter guest in kvm_pre_enter_guest()
280 smp_store_mb(vcpu->mode, IN_GUEST_MODE); in kvm_pre_enter_guest()
290 vcpu->arch.host_eentry = csr_read64(LOONGARCH_CSR_EENTRY); in kvm_pre_enter_guest()
291 /* Clear KVM_LARCH_SWCSR_LATEST as CSR will change when enter guest */ in kvm_pre_enter_guest()
292 vcpu->arch.aux_inuse &= ~KVM_LARCH_SWCSR_LATEST; in kvm_pre_enter_guest()
296 smp_store_mb(vcpu->mode, OUTSIDE_GUEST_MODE); in kvm_pre_enter_guest()
298 ret = -EAGAIN; in kvm_pre_enter_guest()
306 * Return 1 for resume guest and "<= 0" for resume host.
311 unsigned long estat = vcpu->arch.host_estat; in kvm_handle_exit()
315 vcpu->mode = OUTSIDE_GUEST_MODE; in kvm_handle_exit()
318 run->exit_reason = KVM_EXIT_UNKNOWN; in kvm_handle_exit()
331 ++vcpu->stat.int_exits; in kvm_handle_exit()
351 return !!(vcpu->arch.irq_pending) && in kvm_arch_vcpu_runnable()
352 vcpu->arch.mp_state.mp_state == KVM_MP_STATE_RUNNABLE; in kvm_arch_vcpu_runnable()
373 return -EINVAL; in kvm_arch_vcpu_ioctl_translate()
394 kvm_debug("\tPC = 0x%08lx\n", vcpu->arch.pc); in kvm_arch_vcpu_dump_regs()
395 kvm_debug("\tExceptions: %08lx\n", vcpu->arch.irq_pending); in kvm_arch_vcpu_dump_regs()
399 vcpu->arch.gprs[i], vcpu->arch.gprs[i + 1], in kvm_arch_vcpu_dump_regs()
400 vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]); in kvm_arch_vcpu_dump_regs()
415 *mp_state = vcpu->arch.mp_state; in kvm_arch_vcpu_ioctl_get_mpstate()
425 switch (mp_state->mp_state) { in kvm_arch_vcpu_ioctl_set_mpstate()
427 vcpu->arch.mp_state = *mp_state; in kvm_arch_vcpu_ioctl_set_mpstate()
430 ret = -EINVAL; in kvm_arch_vcpu_ioctl_set_mpstate()
439 if (dbg->control & ~KVM_GUESTDBG_VALID_MASK) in kvm_arch_vcpu_ioctl_set_guest_debug()
440 return -EINVAL; in kvm_arch_vcpu_ioctl_set_guest_debug()
442 if (dbg->control & KVM_GUESTDBG_ENABLE) in kvm_arch_vcpu_ioctl_set_guest_debug()
443 vcpu->guest_debug = dbg->control; in kvm_arch_vcpu_ioctl_set_guest_debug()
445 vcpu->guest_debug = 0; in kvm_arch_vcpu_ioctl_set_guest_debug()
454 struct loongarch_csrs *csr = vcpu->arch.csr; in kvm_set_cpuid()
457 return -EINVAL; in kvm_set_cpuid()
459 map = vcpu->kvm->arch.phyid_map; in kvm_set_cpuid()
462 spin_lock(&vcpu->kvm->arch.phyid_map_lock); in kvm_set_cpuid()
463 if ((cpuid < KVM_MAX_PHYID) && map->phys_map[cpuid].enabled) { in kvm_set_cpuid()
466 spin_unlock(&vcpu->kvm->arch.phyid_map_lock); in kvm_set_cpuid()
474 spin_unlock(&vcpu->kvm->arch.phyid_map_lock); in kvm_set_cpuid()
475 return -EINVAL; in kvm_set_cpuid()
478 if (map->phys_map[val].enabled) { in kvm_set_cpuid()
480 if (vcpu == map->phys_map[val].vcpu) { in kvm_set_cpuid()
481 spin_unlock(&vcpu->kvm->arch.phyid_map_lock); in kvm_set_cpuid()
489 spin_unlock(&vcpu->kvm->arch.phyid_map_lock); in kvm_set_cpuid()
490 return -EINVAL; in kvm_set_cpuid()
494 map->phys_map[val].enabled = true; in kvm_set_cpuid()
495 map->phys_map[val].vcpu = vcpu; in kvm_set_cpuid()
496 spin_unlock(&vcpu->kvm->arch.phyid_map_lock); in kvm_set_cpuid()
505 struct loongarch_csrs *csr = vcpu->arch.csr; in kvm_drop_cpuid()
507 map = vcpu->kvm->arch.phyid_map; in kvm_drop_cpuid()
513 spin_lock(&vcpu->kvm->arch.phyid_map_lock); in kvm_drop_cpuid()
514 if (map->phys_map[cpuid].enabled) { in kvm_drop_cpuid()
515 map->phys_map[cpuid].vcpu = NULL; in kvm_drop_cpuid()
516 map->phys_map[cpuid].enabled = false; in kvm_drop_cpuid()
519 spin_unlock(&vcpu->kvm->arch.phyid_map_lock); in kvm_drop_cpuid()
529 map = kvm->arch.phyid_map; in kvm_get_vcpu_by_cpuid()
530 if (!map->phys_map[cpuid].enabled) in kvm_get_vcpu_by_cpuid()
533 return map->phys_map[cpuid].vcpu; in kvm_get_vcpu_by_cpuid()
539 struct loongarch_csrs *csr = vcpu->arch.csr; in _kvm_getcsr()
542 return -EINVAL; in _kvm_getcsr()
552 vcpu->arch.aux_inuse &= ~KVM_LARCH_SWCSR_LATEST; in _kvm_getcsr()
574 struct loongarch_csrs *csr = vcpu->arch.csr; in _kvm_setcsr()
577 return -EINVAL; in _kvm_setcsr()
617 return -EINVAL; in _kvm_get_cpucfg_mask()
688 return -EINVAL; in kvm_check_cpucfg()
694 return -EINVAL; in kvm_check_cpucfg()
697 return -EINVAL; in kvm_check_cpucfg()
700 return -EINVAL; in kvm_check_cpucfg()
703 return -EINVAL; in kvm_check_cpucfg()
709 return -EINVAL; in kvm_check_cpucfg()
711 return -EINVAL; in kvm_check_cpucfg()
713 return -EINVAL; in kvm_check_cpucfg()
718 * Values for the other CPUCFG IDs are not being further validated in kvm_check_cpucfg()
729 u64 type = reg->id & KVM_REG_LOONGARCH_MASK; in kvm_get_one_reg()
733 id = KVM_GET_IOC_CSR_IDX(reg->id); in kvm_get_one_reg()
737 id = KVM_GET_IOC_CPUCFG_IDX(reg->id); in kvm_get_one_reg()
739 *v = vcpu->arch.cpucfg[id]; in kvm_get_one_reg()
741 ret = -EINVAL; in kvm_get_one_reg()
744 if (!kvm_guest_has_lbt(&vcpu->arch)) in kvm_get_one_reg()
745 return -ENXIO; in kvm_get_one_reg()
747 switch (reg->id) { in kvm_get_one_reg()
749 *v = vcpu->arch.lbt.scr0; in kvm_get_one_reg()
752 *v = vcpu->arch.lbt.scr1; in kvm_get_one_reg()
755 *v = vcpu->arch.lbt.scr2; in kvm_get_one_reg()
758 *v = vcpu->arch.lbt.scr3; in kvm_get_one_reg()
761 *v = vcpu->arch.lbt.eflags; in kvm_get_one_reg()
764 *v = vcpu->arch.fpu.ftop; in kvm_get_one_reg()
767 ret = -EINVAL; in kvm_get_one_reg()
772 switch (reg->id) { in kvm_get_one_reg()
774 *v = drdtime() + vcpu->kvm->arch.time_offset; in kvm_get_one_reg()
780 ret = -EINVAL; in kvm_get_one_reg()
785 ret = -EINVAL; in kvm_get_one_reg()
795 u64 v, size = reg->id & KVM_REG_SIZE_MASK; in kvm_get_reg()
802 ret = put_user(v, (u64 __user *)(long)reg->addr); in kvm_get_reg()
805 ret = -EINVAL; in kvm_get_reg()
816 u64 type = reg->id & KVM_REG_LOONGARCH_MASK; in kvm_set_one_reg()
820 id = KVM_GET_IOC_CSR_IDX(reg->id); in kvm_set_one_reg()
824 id = KVM_GET_IOC_CPUCFG_IDX(reg->id); in kvm_set_one_reg()
828 vcpu->arch.cpucfg[id] = (u32)v; in kvm_set_one_reg()
830 vcpu->arch.max_pmu_csrid = in kvm_set_one_reg()
831 LOONGARCH_CSR_PERFCTRL0 + 2 * kvm_get_pmu_num(&vcpu->arch) + 1; in kvm_set_one_reg()
834 if (!kvm_guest_has_lbt(&vcpu->arch)) in kvm_set_one_reg()
835 return -ENXIO; in kvm_set_one_reg()
837 switch (reg->id) { in kvm_set_one_reg()
839 vcpu->arch.lbt.scr0 = v; in kvm_set_one_reg()
842 vcpu->arch.lbt.scr1 = v; in kvm_set_one_reg()
845 vcpu->arch.lbt.scr2 = v; in kvm_set_one_reg()
848 vcpu->arch.lbt.scr3 = v; in kvm_set_one_reg()
851 vcpu->arch.lbt.eflags = v; in kvm_set_one_reg()
854 vcpu->arch.fpu.ftop = v; in kvm_set_one_reg()
857 ret = -EINVAL; in kvm_set_one_reg()
862 switch (reg->id) { in kvm_set_one_reg()
868 if (vcpu->vcpu_id == 0) in kvm_set_one_reg()
869 vcpu->kvm->arch.time_offset = (signed long)(v - drdtime()); in kvm_set_one_reg()
872 vcpu->arch.st.guest_addr = 0; in kvm_set_one_reg()
873 memset(&vcpu->arch.irq_pending, 0, sizeof(vcpu->arch.irq_pending)); in kvm_set_one_reg()
874 memset(&vcpu->arch.irq_clear, 0, sizeof(vcpu->arch.irq_clear)); in kvm_set_one_reg()
877 ret = -EINVAL; in kvm_set_one_reg()
882 ret = -EINVAL; in kvm_set_one_reg()
892 u64 v, size = reg->id & KVM_REG_SIZE_MASK; in kvm_set_reg()
896 ret = get_user(v, (u64 __user *)(long)reg->addr); in kvm_set_reg()
901 return -EINVAL; in kvm_set_reg()
909 return -ENOIOCTLCMD; in kvm_arch_vcpu_ioctl_get_sregs()
914 return -ENOIOCTLCMD; in kvm_arch_vcpu_ioctl_set_sregs()
921 for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++) in kvm_arch_vcpu_ioctl_get_regs()
922 regs->gpr[i] = vcpu->arch.gprs[i]; in kvm_arch_vcpu_ioctl_get_regs()
924 regs->pc = vcpu->arch.pc; in kvm_arch_vcpu_ioctl_get_regs()
933 for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++) in kvm_arch_vcpu_ioctl_set_regs()
934 vcpu->arch.gprs[i] = regs->gpr[i]; in kvm_arch_vcpu_ioctl_set_regs()
936 vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */ in kvm_arch_vcpu_ioctl_set_regs()
937 vcpu->arch.pc = regs->pc; in kvm_arch_vcpu_ioctl_set_regs()
946 return -EINVAL; in kvm_vcpu_ioctl_enable_cap()
952 switch (attr->attr) { in kvm_loongarch_cpucfg_has_attr()
959 return -ENXIO; in kvm_loongarch_cpucfg_has_attr()
962 return -ENXIO; in kvm_loongarch_cpucfg_has_attr()
969 || attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA) in kvm_loongarch_pvtime_has_attr()
970 return -ENXIO; in kvm_loongarch_pvtime_has_attr()
978 int ret = -ENXIO; in kvm_loongarch_vcpu_has_attr()
980 switch (attr->group) { in kvm_loongarch_vcpu_has_attr()
999 uint64_t __user *uaddr = (uint64_t __user *)attr->addr; in kvm_loongarch_cpucfg_get_attr()
1001 switch (attr->attr) { in kvm_loongarch_cpucfg_get_attr()
1002 case 0 ... (KVM_MAX_CPUCFG_REGS - 1): in kvm_loongarch_cpucfg_get_attr()
1003 ret = _kvm_get_cpucfg_mask(attr->attr, &val); in kvm_loongarch_cpucfg_get_attr()
1008 val = vcpu->kvm->arch.pv_features & LOONGARCH_PV_FEAT_MASK; in kvm_loongarch_cpucfg_get_attr()
1011 return -ENXIO; in kvm_loongarch_cpucfg_get_attr()
1023 u64 __user *user = (u64 __user *)attr->addr; in kvm_loongarch_pvtime_get_attr()
1026 || attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA) in kvm_loongarch_pvtime_get_attr()
1027 return -ENXIO; in kvm_loongarch_pvtime_get_attr()
1029 gpa = vcpu->arch.st.guest_addr; in kvm_loongarch_pvtime_get_attr()
1031 return -EFAULT; in kvm_loongarch_pvtime_get_attr()
1039 int ret = -ENXIO; in kvm_loongarch_vcpu_get_attr()
1041 switch (attr->group) { in kvm_loongarch_vcpu_get_attr()
1059 u64 __user *user = (u64 __user *)attr->addr; in kvm_loongarch_cpucfg_set_attr()
1060 struct kvm *kvm = vcpu->kvm; in kvm_loongarch_cpucfg_set_attr()
1062 switch (attr->attr) { in kvm_loongarch_cpucfg_set_attr()
1065 return -EFAULT; in kvm_loongarch_cpucfg_set_attr()
1069 return -EINVAL; in kvm_loongarch_cpucfg_set_attr()
1072 if ((kvm->arch.pv_features & LOONGARCH_PV_FEAT_UPDATED) in kvm_loongarch_cpucfg_set_attr()
1073 && ((kvm->arch.pv_features & valid) != val)) in kvm_loongarch_cpucfg_set_attr()
1074 return -EINVAL; in kvm_loongarch_cpucfg_set_attr()
1075 kvm->arch.pv_features = val | LOONGARCH_PV_FEAT_UPDATED; in kvm_loongarch_cpucfg_set_attr()
1078 return -ENXIO; in kvm_loongarch_cpucfg_set_attr()
1086 u64 gpa, __user *user = (u64 __user *)attr->addr; in kvm_loongarch_pvtime_set_attr()
1087 struct kvm *kvm = vcpu->kvm; in kvm_loongarch_pvtime_set_attr()
1090 || attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA) in kvm_loongarch_pvtime_set_attr()
1091 return -ENXIO; in kvm_loongarch_pvtime_set_attr()
1094 return -EFAULT; in kvm_loongarch_pvtime_set_attr()
1097 return -EINVAL; in kvm_loongarch_pvtime_set_attr()
1100 vcpu->arch.st.guest_addr = gpa; in kvm_loongarch_pvtime_set_attr()
1105 idx = srcu_read_lock(&kvm->srcu); in kvm_loongarch_pvtime_set_attr()
1107 ret = -EINVAL; in kvm_loongarch_pvtime_set_attr()
1108 srcu_read_unlock(&kvm->srcu, idx); in kvm_loongarch_pvtime_set_attr()
1111 vcpu->arch.st.guest_addr = gpa; in kvm_loongarch_pvtime_set_attr()
1112 vcpu->arch.st.last_steal = current->sched_info.run_delay; in kvm_loongarch_pvtime_set_attr()
1122 int ret = -ENXIO; in kvm_loongarch_vcpu_set_attr()
1124 switch (attr->group) { in kvm_loongarch_vcpu_set_attr()
1144 struct kvm_vcpu *vcpu = filp->private_data; in kvm_arch_vcpu_ioctl()
1154 * be clear in vcpu->arch.aux_inuse, and vcpu_load will check in kvm_arch_vcpu_ioctl()
1163 r = -EFAULT; in kvm_arch_vcpu_ioctl()
1168 vcpu->arch.aux_inuse &= ~KVM_LARCH_HWCSR_USABLE; in kvm_arch_vcpu_ioctl()
1176 r = -EFAULT; in kvm_arch_vcpu_ioctl()
1183 r = -EFAULT; in kvm_arch_vcpu_ioctl()
1190 r = -EFAULT; in kvm_arch_vcpu_ioctl()
1197 r = -EFAULT; in kvm_arch_vcpu_ioctl()
1204 r = -ENOIOCTLCMD; in kvm_arch_vcpu_ioctl()
1215 fpu->fcc = vcpu->arch.fpu.fcc; in kvm_arch_vcpu_ioctl_get_fpu()
1216 fpu->fcsr = vcpu->arch.fpu.fcsr; in kvm_arch_vcpu_ioctl_get_fpu()
1218 memcpy(&fpu->fpr[i], &vcpu->arch.fpu.fpr[i], FPU_REG_WIDTH / 64); in kvm_arch_vcpu_ioctl_get_fpu()
1227 vcpu->arch.fpu.fcc = fpu->fcc; in kvm_arch_vcpu_ioctl_set_fpu()
1228 vcpu->arch.fpu.fcsr = fpu->fcsr; in kvm_arch_vcpu_ioctl_set_fpu()
1230 memcpy(&vcpu->arch.fpu.fpr[i], &fpu->fpr[i], FPU_REG_WIDTH / 64); in kvm_arch_vcpu_ioctl_set_fpu()
1238 if (!kvm_guest_has_lbt(&vcpu->arch)) in kvm_own_lbt()
1239 return -EINVAL; in kvm_own_lbt()
1243 _restore_lbt(&vcpu->arch.lbt); in kvm_own_lbt()
1244 vcpu->arch.aux_inuse |= KVM_LARCH_LBT; in kvm_own_lbt()
1253 if (vcpu->arch.aux_inuse & KVM_LARCH_LBT) { in kvm_lose_lbt()
1254 _save_lbt(&vcpu->arch.lbt); in kvm_lose_lbt()
1256 vcpu->arch.aux_inuse &= ~KVM_LARCH_LBT; in kvm_lose_lbt()
1273 if (vcpu->arch.aux_inuse & KVM_LARCH_FPU) { in kvm_check_fcsr_alive()
1274 if (vcpu->arch.aux_inuse & KVM_LARCH_LBT) in kvm_check_fcsr_alive()
1291 * Enable FPU for guest in kvm_own_fpu()
1292 * Set FR and FRE according to guest context in kvm_own_fpu()
1294 kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr); in kvm_own_fpu()
1297 kvm_restore_fpu(&vcpu->arch.fpu); in kvm_own_fpu()
1298 vcpu->arch.aux_inuse |= KVM_LARCH_FPU; in kvm_own_fpu()
1308 if (!kvm_guest_has_fpu(&vcpu->arch) || !kvm_guest_has_lsx(&vcpu->arch)) in kvm_own_lsx()
1309 return -EINVAL; in kvm_own_lsx()
1313 /* Enable LSX for guest */ in kvm_own_lsx()
1314 kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr); in kvm_own_lsx()
1316 switch (vcpu->arch.aux_inuse & KVM_LARCH_FPU) { in kvm_own_lsx()
1319 * Guest FPU state already loaded, in kvm_own_lsx()
1322 _restore_lsx_upper(&vcpu->arch.fpu); in kvm_own_lsx()
1328 kvm_restore_lsx(&vcpu->arch.fpu); in kvm_own_lsx()
1333 vcpu->arch.aux_inuse |= KVM_LARCH_LSX | KVM_LARCH_FPU; in kvm_own_lsx()
1344 …if (!kvm_guest_has_fpu(&vcpu->arch) || !kvm_guest_has_lsx(&vcpu->arch) || !kvm_guest_has_lasx(&vcp… in kvm_own_lasx()
1345 return -EINVAL; in kvm_own_lasx()
1349 kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr); in kvm_own_lasx()
1351 switch (vcpu->arch.aux_inuse & (KVM_LARCH_FPU | KVM_LARCH_LSX)) { in kvm_own_lasx()
1354 /* Guest LSX state already loaded, only restore upper LASX state */ in kvm_own_lasx()
1355 _restore_lasx_upper(&vcpu->arch.fpu); in kvm_own_lasx()
1358 /* Guest FP state already loaded, only restore upper LSX & LASX state */ in kvm_own_lasx()
1359 _restore_lsx_upper(&vcpu->arch.fpu); in kvm_own_lasx()
1360 _restore_lasx_upper(&vcpu->arch.fpu); in kvm_own_lasx()
1364 kvm_restore_lasx(&vcpu->arch.fpu); in kvm_own_lasx()
1369 vcpu->arch.aux_inuse |= KVM_LARCH_LASX | KVM_LARCH_LSX | KVM_LARCH_FPU; in kvm_own_lasx()
1382 if (vcpu->arch.aux_inuse & KVM_LARCH_LASX) { in kvm_lose_fpu()
1383 kvm_save_lasx(&vcpu->arch.fpu); in kvm_lose_fpu()
1384 vcpu->arch.aux_inuse &= ~(KVM_LARCH_LSX | KVM_LARCH_FPU | KVM_LARCH_LASX); in kvm_lose_fpu()
1389 } else if (vcpu->arch.aux_inuse & KVM_LARCH_LSX) { in kvm_lose_fpu()
1390 kvm_save_lsx(&vcpu->arch.fpu); in kvm_lose_fpu()
1391 vcpu->arch.aux_inuse &= ~(KVM_LARCH_LSX | KVM_LARCH_FPU); in kvm_lose_fpu()
1396 } else if (vcpu->arch.aux_inuse & KVM_LARCH_FPU) { in kvm_lose_fpu()
1397 kvm_save_fpu(&vcpu->arch.fpu); in kvm_lose_fpu()
1398 vcpu->arch.aux_inuse &= ~KVM_LARCH_FPU; in kvm_lose_fpu()
1411 int intr = (int)irq->irq; in kvm_vcpu_ioctl_interrupt()
1416 kvm_dequeue_irq(vcpu, -intr); in kvm_vcpu_ioctl_interrupt()
1418 kvm_err("%s: invalid interrupt ioctl %d\n", __func__, irq->irq); in kvm_vcpu_ioctl_interrupt()
1419 return -EINVAL; in kvm_vcpu_ioctl_interrupt()
1431 struct kvm_vcpu *vcpu = filp->private_data; in kvm_arch_vcpu_async_ioctl()
1437 return -EFAULT; in kvm_arch_vcpu_async_ioctl()
1439 kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__, irq.irq); in kvm_arch_vcpu_async_ioctl()
1444 return -ENOIOCTLCMD; in kvm_arch_vcpu_async_ioctl()
1457 vcpu->arch.vpid = 0; in kvm_arch_vcpu_create()
1458 vcpu->arch.flush_gpa = INVALID_GPA; in kvm_arch_vcpu_create()
1460 hrtimer_init(&vcpu->arch.swtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED_HARD); in kvm_arch_vcpu_create()
1461 vcpu->arch.swtimer.function = kvm_swtimer_wakeup; in kvm_arch_vcpu_create()
1463 vcpu->arch.handle_exit = kvm_handle_exit; in kvm_arch_vcpu_create()
1464 vcpu->arch.guest_eentry = (unsigned long)kvm_loongarch_ops->exc_entry; in kvm_arch_vcpu_create()
1465 vcpu->arch.csr = kzalloc(sizeof(struct loongarch_csrs), GFP_KERNEL); in kvm_arch_vcpu_create()
1466 if (!vcpu->arch.csr) in kvm_arch_vcpu_create()
1467 return -ENOMEM; in kvm_arch_vcpu_create()
1470 * All kvm exceptions share one exception entry, and host <-> guest in kvm_arch_vcpu_create()
1473 vcpu->arch.host_ecfg = (read_csr_ecfg() & CSR_ECFG_VS); in kvm_arch_vcpu_create()
1476 vcpu->arch.last_sched_cpu = -1; in kvm_arch_vcpu_create()
1479 * Initialize guest register state to valid architectural reset state. in kvm_arch_vcpu_create()
1484 /* Set Initialize mode for guest */ in kvm_arch_vcpu_create()
1485 csr = vcpu->arch.csr; in kvm_arch_vcpu_create()
1489 kvm_write_sw_gcsr(csr, LOONGARCH_CSR_TMID, vcpu->vcpu_id); in kvm_arch_vcpu_create()
1492 /* Start with no pending virtual guest interrupts */ in kvm_arch_vcpu_create()
1493 csr->csrs[LOONGARCH_CSR_GINTC] = 0; in kvm_arch_vcpu_create()
1507 hrtimer_cancel(&vcpu->arch.swtimer); in kvm_arch_vcpu_destroy()
1508 kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache); in kvm_arch_vcpu_destroy()
1510 kfree(vcpu->arch.csr); in kvm_arch_vcpu_destroy()
1517 context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu); in kvm_arch_vcpu_destroy()
1518 if (context->last_vcpu == vcpu) in kvm_arch_vcpu_destroy()
1519 context->last_vcpu = NULL; in kvm_arch_vcpu_destroy()
1527 struct loongarch_csrs *csr = vcpu->arch.csr; in _kvm_vcpu_load()
1531 * If so, any old guest TLB state may be stale. in _kvm_vcpu_load()
1533 migrated = (vcpu->arch.last_sched_cpu != cpu); in _kvm_vcpu_load()
1537 * If not, any old guest state from this vCPU will have been clobbered. in _kvm_vcpu_load()
1539 context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu); in _kvm_vcpu_load()
1540 if (migrated || (context->last_vcpu != vcpu)) in _kvm_vcpu_load()
1541 vcpu->arch.aux_inuse &= ~KVM_LARCH_HWCSR_USABLE; in _kvm_vcpu_load()
1542 context->last_vcpu = vcpu; in _kvm_vcpu_load()
1547 /* Control guest page CCA attribute */ in _kvm_vcpu_load()
1555 if (vcpu->arch.aux_inuse & KVM_LARCH_HWCSR_USABLE) in _kvm_vcpu_load()
1558 write_csr_gcntc((ulong)vcpu->kvm->arch.time_offset); in _kvm_vcpu_load()
1560 /* Restore guest CSR registers */ in _kvm_vcpu_load()
1606 /* Restore Root.GINTC from unused Guest.GINTC register */ in _kvm_vcpu_load()
1607 write_csr_gintc(csr->csrs[LOONGARCH_CSR_GINTC]); in _kvm_vcpu_load()
1614 if (vcpu->kvm->created_vcpus > 1) in _kvm_vcpu_load()
1617 vcpu->arch.aux_inuse |= KVM_LARCH_HWCSR_USABLE; in _kvm_vcpu_load()
1627 /* Restore guest state to registers */ in kvm_arch_vcpu_load()
1634 struct loongarch_csrs *csr = vcpu->arch.csr; in _kvm_vcpu_put()
1644 if (vcpu->arch.aux_inuse & KVM_LARCH_SWCSR_LATEST) in _kvm_vcpu_put()
1695 vcpu->arch.aux_inuse |= KVM_LARCH_SWCSR_LATEST; in _kvm_vcpu_put()
1699 /* Save Root.GINTC into unused Guest.GINTC register */ in _kvm_vcpu_put()
1700 csr->csrs[LOONGARCH_CSR_GINTC] = read_csr_gintc(); in _kvm_vcpu_put()
1712 vcpu->arch.last_sched_cpu = cpu; in kvm_arch_vcpu_put()
1714 /* Save guest state in registers */ in kvm_arch_vcpu_put()
1721 int r = -EINTR; in kvm_arch_vcpu_ioctl_run()
1722 struct kvm_run *run = vcpu->run; in kvm_arch_vcpu_ioctl_run()
1724 if (vcpu->mmio_needed) { in kvm_arch_vcpu_ioctl_run()
1725 if (!vcpu->mmio_is_write) in kvm_arch_vcpu_ioctl_run()
1727 vcpu->mmio_needed = 0; in kvm_arch_vcpu_ioctl_run()
1730 if (run->exit_reason == KVM_EXIT_LOONGARCH_IOCSR) { in kvm_arch_vcpu_ioctl_run()
1731 if (!run->iocsr_io.is_write) in kvm_arch_vcpu_ioctl_run()
1735 if (!vcpu->wants_to_run) in kvm_arch_vcpu_ioctl_run()
1739 run->exit_reason = KVM_EXIT_UNKNOWN; in kvm_arch_vcpu_ioctl_run()
1750 r = kvm_loongarch_ops->enter_guest(run, vcpu); in kvm_arch_vcpu_ioctl_run()
1754 * Guest exit is already recorded at kvm_handle_exit() in kvm_arch_vcpu_ioctl_run()