Lines Matching +full:hard +full:- +full:wired
115 if (kvm_mips_guest_has_msa(&vcpu->arch)) in kvm_vz_config5_guest_wrmask()
122 if (kvm_mips_guest_has_fpu(&vcpu->arch)) { in kvm_vz_config5_guest_wrmask()
140 * Config1: M, [MMUSize-1, C2, MD, PC, WR, CA], FP
158 if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) in kvm_vz_config1_user_wrmask()
175 if (kvm_mips_guest_can_have_msa(&vcpu->arch)) in kvm_vz_config3_user_wrmask()
205 set_bit(priority, &vcpu->arch.pending_exceptions); in kvm_vz_queue_irq()
206 clear_bit(priority, &vcpu->arch.pending_exceptions_clr); in kvm_vz_queue_irq()
211 clear_bit(priority, &vcpu->arch.pending_exceptions); in kvm_vz_dequeue_irq()
212 set_bit(priority, &vcpu->arch.pending_exceptions_clr); in kvm_vz_dequeue_irq()
236 int intr = (int)irq->irq; in kvm_vz_queue_io_int_cb()
248 int intr = (int)irq->irq; in kvm_vz_dequeue_io_int_cb()
254 kvm_vz_dequeue_irq(vcpu, kvm_irq_to_priority(-intr)); in kvm_vz_dequeue_io_int_cb()
282 clear_bit(priority, &vcpu->arch.pending_exceptions); in kvm_vz_irq_deliver_cb()
324 clear_bit(priority, &vcpu->arch.pending_exceptions_clr); in kvm_vz_irq_clear_cb()
333 * kvm_vz_should_use_htimer() - Find whether to use the VZ hard guest timer.
346 if (mips_hpt_frequency != vcpu->arch.count_hz) in kvm_vz_should_use_htimer()
357 * _kvm_vz_restore_stimer() - Restore soft timer state.
362 * Restore VZ state relating to the soft timer. The hard timer can be enabled
372 write_c0_gtoffset(compare - read_c0_count()); in _kvm_vz_restore_stimer()
379 * _kvm_vz_restore_htimer() - Restore hard timer state.
384 * Restore hard timer Guest.Count & Guest.Cause taking care to preserve the
394 * Freeze the soft-timer and sync the guest CP0_Count with it. We do in _kvm_vz_restore_htimer()
399 write_c0_gtoffset(start_count - read_c0_count()); in _kvm_vz_restore_htimer()
413 if (after_count - start_count > compare - start_count - 1) in _kvm_vz_restore_htimer()
418 * kvm_vz_restore_timer() - Restore timer state.
425 struct mips_coproc *cop0 = &vcpu->arch.cop0; in kvm_vz_restore_timer()
436 * kvm_vz_acquire_htimer() - Switch to hard timer state.
439 * Restore hard timer state on top of existing soft timer state if possible.
441 * Since hard timer won't remain active over preemption, preemption should be
450 /* enable guest access to hard timer */ in kvm_vz_acquire_htimer()
459 * _kvm_vz_save_htimer() - Switch to software emulation of guest timer.
465 * timer. The hard timer must already be in use, so preemption should be
489 * Record a final CP0_Count which we will transfer to the soft-timer. in _kvm_vz_save_htimer()
501 if (end_count - before_count > compare - before_count - 1) in _kvm_vz_save_htimer()
505 * Restore soft-timer, ignoring a small amount of negative drift due to in _kvm_vz_save_htimer()
508 kvm_mips_restore_hrtimer(vcpu, before_time, end_count, -0x10000); in _kvm_vz_save_htimer()
512 * kvm_vz_save_timer() - Save guest timer state.
515 * Save VZ guest timer state and switch to soft guest timer if hard timer was in
520 struct mips_coproc *cop0 = &vcpu->arch.cop0; in kvm_vz_save_timer()
525 /* disable guest use of hard timer */ in kvm_vz_save_timer()
528 /* save hard timer state */ in kvm_vz_save_timer()
535 /* save timer-related state to VCPU context */ in kvm_vz_save_timer()
541 * kvm_vz_lose_htimer() - Ensure hard guest timer is not in use.
544 * Transfers the state of the hard guest timer to the soft guest timer, leaving
567 * is_eva_access() - Find whether an instruction is an EVA memory accessor.
568 * @inst: 32-bit instruction encoding.
606 * is_eva_am_mapped() - Find whether an access mode is mapped.
608 * @am: 3-bit encoded access mode.
632 * - 6 110 0 0 in is_eva_am_mapped()
665 opc = (u32 *)vcpu->arch.pc; in is_eva_am_mapped()
666 if (vcpu->arch.host_cp0_cause & CAUSEF_BD) in is_eva_am_mapped()
678 * kvm_vz_gva_to_gpa() - Convert valid GVA to GPA.
687 * -errno on failure.
696 /* Handle canonical 32-bit virtual address */ in kvm_vz_gva_to_gpa()
782 * kvm_vz_badvaddr_to_gpa() - Convert GVA BadVAddr from root exception to GPA.
794 * -errno on failure.
799 unsigned int gexccode = (vcpu->arch.host_cp0_guestctl0 & in kvm_vz_badvaddr_to_gpa()
811 return -EINVAL; in kvm_vz_badvaddr_to_gpa()
813 /* ... and we need to perform the GVA->GPA translation in software */ in kvm_vz_badvaddr_to_gpa()
819 u32 *opc = (u32 *) vcpu->arch.pc; in kvm_trap_vz_no_handler()
820 u32 cause = vcpu->arch.host_cp0_cause; in kvm_trap_vz_no_handler()
822 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; in kvm_trap_vz_no_handler()
836 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in kvm_trap_vz_no_handler()
866 struct mips_coproc *cop0 = &vcpu->arch.cop0; in kvm_write_maari()
870 kvm_write_sw_gc0_maari(cop0, ARRAY_SIZE(vcpu->arch.maar) - 1); in kvm_write_maari()
871 else if (val < ARRAY_SIZE(vcpu->arch.maar)) in kvm_write_maari()
879 struct mips_coproc *cop0 = &vcpu->arch.cop0; in kvm_vz_gpsi_cop0()
889 curr_pc = vcpu->arch.pc; in kvm_vz_gpsi_cop0()
911 cop0->stat[rd][sel]++; in kvm_vz_gpsi_cop0()
932 ARRAY_SIZE(vcpu->arch.maar)); in kvm_vz_gpsi_cop0()
933 val = vcpu->arch.maar[ in kvm_vz_gpsi_cop0()
951 val = cop0->reg[rd][sel]; in kvm_vz_gpsi_cop0()
955 val = cop0->reg[rd][sel]; in kvm_vz_gpsi_cop0()
966 vcpu->arch.gprs[rt] = val; in kvm_vz_gpsi_cop0()
977 cop0->stat[rd][sel]++; in kvm_vz_gpsi_cop0()
979 val = vcpu->arch.gprs[rt]; in kvm_vz_gpsi_cop0()
987 kvm_mips_write_count(vcpu, vcpu->arch.gprs[rt]); in kvm_vz_gpsi_cop0()
991 vcpu->arch.gprs[rt], in kvm_vz_gpsi_cop0()
1011 ARRAY_SIZE(vcpu->arch.maar)); in kvm_vz_gpsi_cop0()
1012 vcpu->arch.maar[kvm_read_sw_gc0_maari(cop0)] = in kvm_vz_gpsi_cop0()
1021 cop0->reg[rd][sel] = (int)val; in kvm_vz_gpsi_cop0()
1068 vcpu->arch.pc = curr_pc; in kvm_vz_gpsi_cop0()
1081 struct kvm_vcpu_arch *arch = &vcpu->arch; in kvm_vz_gpsi_cache()
1088 curr_pc = vcpu->arch.pc; in kvm_vz_gpsi_cache()
1102 va = arch->gprs[base] + offset; in kvm_vz_gpsi_cache()
1105 cache, op, base, arch->gprs[base], offset); in kvm_vz_gpsi_cache()
1134 curr_pc, vcpu->arch.gprs[31], cache, op, base, arch->gprs[base], in kvm_vz_gpsi_cache()
1137 vcpu->arch.pc = curr_pc; in kvm_vz_gpsi_cache()
1156 curr_pc = vcpu->arch.pc; in kvm_vz_gpsi_lwc2()
1165 ++vcpu->stat.vz_cpucfg_exits; in kvm_vz_gpsi_lwc2()
1166 hostcfg = read_cpucfg(vcpu->arch.gprs[rs]); in kvm_vz_gpsi_lwc2()
1168 switch (vcpu->arch.gprs[rs]) { in kvm_vz_gpsi_lwc2()
1170 vcpu->arch.gprs[rd] = 0x14c000; in kvm_vz_gpsi_lwc2()
1176 vcpu->arch.gprs[rd] = hostcfg; in kvm_vz_gpsi_lwc2()
1181 vcpu->arch.gprs[rd] = hostcfg; in kvm_vz_gpsi_lwc2()
1184 vcpu->arch.gprs[rd] = hostcfg; in kvm_vz_gpsi_lwc2()
1188 vcpu->arch.gprs[rd] = 0; in kvm_vz_gpsi_lwc2()
1195 inst.loongson3_lscsr_format.fr, vcpu->arch.gprs[rs], curr_pc); in kvm_vz_gpsi_lwc2()
1205 vcpu->arch.pc = curr_pc; in kvm_vz_gpsi_lwc2()
1216 struct kvm_vcpu_arch *arch = &vcpu->arch; in kvm_trap_vz_handle_gpsi()
1263 arch->gprs[rt] = in kvm_trap_vz_handle_gpsi()
1273 KVM_TRACE_HWR(rd, sel), arch->gprs[rt]); in kvm_trap_vz_handle_gpsi()
1298 struct kvm_vcpu_arch *arch = &vcpu->arch; in kvm_trap_vz_handle_gsfc()
1318 unsigned int val = arch->gprs[rt]; in kvm_trap_vz_handle_gsfc()
1326 if (!kvm_mips_guest_has_fpu(&vcpu->arch)) in kvm_trap_vz_handle_gsfc()
1356 vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) in kvm_trap_vz_handle_gsfc()
1395 vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) in kvm_trap_vz_handle_gsfc()
1450 curr_pc = vcpu->arch.pc; in kvm_trap_vz_handle_hc()
1457 vcpu->arch.pc = curr_pc; in kvm_trap_vz_handle_hc()
1484 u32 *opc = (u32 *) vcpu->arch.pc; in kvm_trap_vz_handle_guest_exit()
1485 u32 cause = vcpu->arch.host_cp0_cause; in kvm_trap_vz_handle_guest_exit()
1487 u32 gexccode = (vcpu->arch.host_cp0_guestctl0 & in kvm_trap_vz_handle_guest_exit()
1494 ++vcpu->stat.vz_gpsi_exits; in kvm_trap_vz_handle_guest_exit()
1498 ++vcpu->stat.vz_gsfc_exits; in kvm_trap_vz_handle_guest_exit()
1502 ++vcpu->stat.vz_hc_exits; in kvm_trap_vz_handle_guest_exit()
1506 ++vcpu->stat.vz_grr_exits; in kvm_trap_vz_handle_guest_exit()
1511 ++vcpu->stat.vz_gva_exits; in kvm_trap_vz_handle_guest_exit()
1516 ++vcpu->stat.vz_ghfc_exits; in kvm_trap_vz_handle_guest_exit()
1520 ++vcpu->stat.vz_gpa_exits; in kvm_trap_vz_handle_guest_exit()
1525 ++vcpu->stat.vz_resvd_exits; in kvm_trap_vz_handle_guest_exit()
1537 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in kvm_trap_vz_handle_guest_exit()
1544 * kvm_trap_vz_handle_cop_unusable() - Guest used unusable coprocessor.
1555 u32 cause = vcpu->arch.host_cp0_cause; in kvm_trap_vz_handle_cop_unusable()
1565 if (WARN_ON(!kvm_mips_guest_has_fpu(&vcpu->arch) || in kvm_trap_vz_handle_cop_unusable()
1566 vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)) { in kvm_trap_vz_handle_cop_unusable()
1582 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in kvm_trap_vz_handle_cop_unusable()
1593 * kvm_trap_vz_handle_msa_disabled() - Guest used MSA while disabled in root.
1610 if (!kvm_mips_guest_has_msa(&vcpu->arch) || in kvm_trap_vz_handle_msa_disabled()
1613 vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) { in kvm_trap_vz_handle_msa_disabled()
1614 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in kvm_trap_vz_handle_msa_disabled()
1625 struct kvm_run *run = vcpu->run; in kvm_trap_vz_handle_tlb_ld_miss()
1626 u32 *opc = (u32 *) vcpu->arch.pc; in kvm_trap_vz_handle_tlb_ld_miss()
1627 u32 cause = vcpu->arch.host_cp0_cause; in kvm_trap_vz_handle_tlb_ld_miss()
1628 ulong badvaddr = vcpu->arch.host_cp0_badvaddr; in kvm_trap_vz_handle_tlb_ld_miss()
1635 if (kvm_is_ifetch_fault(&vcpu->arch)) { in kvm_trap_vz_handle_tlb_ld_miss()
1636 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in kvm_trap_vz_handle_tlb_ld_miss()
1645 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in kvm_trap_vz_handle_tlb_ld_miss()
1654 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in kvm_trap_vz_handle_tlb_ld_miss()
1661 run->exit_reason = KVM_EXIT_MMIO; in kvm_trap_vz_handle_tlb_ld_miss()
1664 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in kvm_trap_vz_handle_tlb_ld_miss()
1672 struct kvm_run *run = vcpu->run; in kvm_trap_vz_handle_tlb_st_miss()
1673 u32 *opc = (u32 *) vcpu->arch.pc; in kvm_trap_vz_handle_tlb_st_miss()
1674 u32 cause = vcpu->arch.host_cp0_cause; in kvm_trap_vz_handle_tlb_st_miss()
1675 ulong badvaddr = vcpu->arch.host_cp0_badvaddr; in kvm_trap_vz_handle_tlb_st_miss()
1684 vcpu->arch.host_cp0_badvaddr = badvaddr; in kvm_trap_vz_handle_tlb_st_miss()
1692 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in kvm_trap_vz_handle_tlb_st_miss()
1701 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in kvm_trap_vz_handle_tlb_st_miss()
1708 run->exit_reason = KVM_EXIT_MMIO; in kvm_trap_vz_handle_tlb_st_miss()
1711 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in kvm_trap_vz_handle_tlb_st_miss()
1800 ret += 1 + ARRAY_SIZE(vcpu->arch.maar); in kvm_vz_num_regs()
1813 return -EFAULT; in kvm_vz_copy_reg_indices()
1819 return -EFAULT; in kvm_vz_copy_reg_indices()
1825 return -EFAULT; in kvm_vz_copy_reg_indices()
1831 return -EFAULT; in kvm_vz_copy_reg_indices()
1837 return -EFAULT; in kvm_vz_copy_reg_indices()
1843 return -EFAULT; in kvm_vz_copy_reg_indices()
1849 return -EFAULT; in kvm_vz_copy_reg_indices()
1853 for (i = 0; i < ARRAY_SIZE(vcpu->arch.maar); ++i) { in kvm_vz_copy_reg_indices()
1856 return -EFAULT; in kvm_vz_copy_reg_indices()
1862 return -EFAULT; in kvm_vz_copy_reg_indices()
1871 return -EFAULT; in kvm_vz_copy_reg_indices()
1884 * KVM API exposes 64-bit version of the register, so move the in entrylo_kvm_to_user()
1900 * KVM API exposes 64-bit versiono of the register, so move the in entrylo_user_to_kvm()
1914 struct mips_coproc *cop0 = &vcpu->arch.cop0; in kvm_vz_get_one_reg()
1917 switch (reg->id) { in kvm_vz_get_one_reg()
1932 return -EINVAL; in kvm_vz_get_one_reg()
1937 return -EINVAL; in kvm_vz_get_one_reg()
1943 return -EINVAL; in kvm_vz_get_one_reg()
1955 return -EINVAL; in kvm_vz_get_one_reg()
1960 return -EINVAL; in kvm_vz_get_one_reg()
1965 return -EINVAL; in kvm_vz_get_one_reg()
1970 return -EINVAL; in kvm_vz_get_one_reg()
1975 return -EINVAL; in kvm_vz_get_one_reg()
1980 return -EINVAL; in kvm_vz_get_one_reg()
1988 return -EINVAL; in kvm_vz_get_one_reg()
1999 return -EINVAL; in kvm_vz_get_one_reg()
2004 return -EINVAL; in kvm_vz_get_one_reg()
2031 /* Octeon III has a read-only guest.PRid */ in kvm_vz_get_one_reg()
2047 return -EINVAL; in kvm_vz_get_one_reg()
2052 return -EINVAL; in kvm_vz_get_one_reg()
2057 return -EINVAL; in kvm_vz_get_one_reg()
2062 return -EINVAL; in kvm_vz_get_one_reg()
2067 return -EINVAL; in kvm_vz_get_one_reg()
2075 return -EINVAL; in kvm_vz_get_one_reg()
2076 idx = reg->id - KVM_REG_MIPS_CP0_MAAR(0); in kvm_vz_get_one_reg()
2077 if (idx >= ARRAY_SIZE(vcpu->arch.maar)) in kvm_vz_get_one_reg()
2078 return -EINVAL; in kvm_vz_get_one_reg()
2079 *v = vcpu->arch.maar[idx]; in kvm_vz_get_one_reg()
2083 return -EINVAL; in kvm_vz_get_one_reg()
2084 *v = kvm_read_sw_gc0_maari(&vcpu->arch.cop0); in kvm_vz_get_one_reg()
2095 idx = reg->id - KVM_REG_MIPS_CP0_KSCRATCH1 + 2; in kvm_vz_get_one_reg()
2097 return -EINVAL; in kvm_vz_get_one_reg()
2120 *v = vcpu->arch.count_ctl; in kvm_vz_get_one_reg()
2123 *v = ktime_to_ns(vcpu->arch.count_resume); in kvm_vz_get_one_reg()
2126 *v = vcpu->arch.count_hz; in kvm_vz_get_one_reg()
2129 return -EINVAL; in kvm_vz_get_one_reg()
2138 struct mips_coproc *cop0 = &vcpu->arch.cop0; in kvm_vz_set_one_reg()
2143 switch (reg->id) { in kvm_vz_set_one_reg()
2158 return -EINVAL; in kvm_vz_set_one_reg()
2163 return -EINVAL; in kvm_vz_set_one_reg()
2169 return -EINVAL; in kvm_vz_set_one_reg()
2181 return -EINVAL; in kvm_vz_set_one_reg()
2186 return -EINVAL; in kvm_vz_set_one_reg()
2191 return -EINVAL; in kvm_vz_set_one_reg()
2196 return -EINVAL; in kvm_vz_set_one_reg()
2201 return -EINVAL; in kvm_vz_set_one_reg()
2206 return -EINVAL; in kvm_vz_set_one_reg()
2214 return -EINVAL; in kvm_vz_set_one_reg()
2225 return -EINVAL; in kvm_vz_set_one_reg()
2230 return -EINVAL; in kvm_vz_set_one_reg()
2274 /* Octeon III has a guest.PRid, but its read-only */ in kvm_vz_set_one_reg()
2352 return -EINVAL; in kvm_vz_set_one_reg()
2353 idx = reg->id - KVM_REG_MIPS_CP0_MAAR(0); in kvm_vz_set_one_reg()
2354 if (idx >= ARRAY_SIZE(vcpu->arch.maar)) in kvm_vz_set_one_reg()
2355 return -EINVAL; in kvm_vz_set_one_reg()
2356 vcpu->arch.maar[idx] = mips_process_maar(dmtc_op, v); in kvm_vz_set_one_reg()
2360 return -EINVAL; in kvm_vz_set_one_reg()
2372 idx = reg->id - KVM_REG_MIPS_CP0_KSCRATCH1 + 2; in kvm_vz_set_one_reg()
2374 return -EINVAL; in kvm_vz_set_one_reg()
2406 return -EINVAL; in kvm_vz_set_one_reg()
2446 vcpu->arch.vzguestid[i] = 0; in kvm_vz_check_requests()
2463 unsigned int wired = read_gc0_wired(); in kvm_vz_vcpu_save_wired() local
2467 /* Expand the wired TLB array if necessary */ in kvm_vz_vcpu_save_wired()
2468 wired &= MIPSR6_WIRED_WIRED; in kvm_vz_vcpu_save_wired()
2469 if (wired > vcpu->arch.wired_tlb_limit) { in kvm_vz_vcpu_save_wired()
2470 tlbs = krealloc(vcpu->arch.wired_tlb, wired * in kvm_vz_vcpu_save_wired()
2471 sizeof(*vcpu->arch.wired_tlb), GFP_ATOMIC); in kvm_vz_vcpu_save_wired()
2474 wired = vcpu->arch.wired_tlb_limit; in kvm_vz_vcpu_save_wired()
2476 vcpu->arch.wired_tlb = tlbs; in kvm_vz_vcpu_save_wired()
2477 vcpu->arch.wired_tlb_limit = wired; in kvm_vz_vcpu_save_wired()
2481 if (wired) in kvm_vz_vcpu_save_wired()
2482 /* Save wired entries from the guest TLB */ in kvm_vz_vcpu_save_wired()
2483 kvm_vz_save_guesttlb(vcpu->arch.wired_tlb, 0, wired); in kvm_vz_vcpu_save_wired()
2485 for (i = wired; i < vcpu->arch.wired_tlb_used; ++i) { in kvm_vz_vcpu_save_wired()
2486 vcpu->arch.wired_tlb[i].tlb_hi = UNIQUE_GUEST_ENTRYHI(i); in kvm_vz_vcpu_save_wired()
2487 vcpu->arch.wired_tlb[i].tlb_lo[0] = 0; in kvm_vz_vcpu_save_wired()
2488 vcpu->arch.wired_tlb[i].tlb_lo[1] = 0; in kvm_vz_vcpu_save_wired()
2489 vcpu->arch.wired_tlb[i].tlb_mask = 0; in kvm_vz_vcpu_save_wired()
2491 vcpu->arch.wired_tlb_used = wired; in kvm_vz_vcpu_save_wired()
2496 /* Load wired entries into the guest TLB */ in kvm_vz_vcpu_load_wired()
2497 if (vcpu->arch.wired_tlb) in kvm_vz_vcpu_load_wired()
2498 kvm_vz_load_guesttlb(vcpu->arch.wired_tlb, 0, in kvm_vz_vcpu_load_wired()
2499 vcpu->arch.wired_tlb_used); in kvm_vz_vcpu_load_wired()
2504 struct kvm *kvm = vcpu->kvm; in kvm_vz_vcpu_load_tlb()
2505 struct mm_struct *gpa_mm = &kvm->arch.gpa_mm; in kvm_vz_vcpu_load_tlb()
2512 migrated = (vcpu->arch.last_exec_cpu != cpu); in kvm_vz_vcpu_load_tlb()
2513 vcpu->arch.last_exec_cpu = cpu; in kvm_vz_vcpu_load_tlb()
2530 (vcpu->arch.vzguestid[cpu] ^ guestid_cache(cpu)) & in kvm_vz_vcpu_load_tlb()
2533 vcpu->arch.vzguestid[cpu] = guestid_cache(cpu); in kvm_vz_vcpu_load_tlb()
2535 vcpu->arch.vzguestid[cpu]); in kvm_vz_vcpu_load_tlb()
2539 change_c0_guestctl1(GUESTID_MASK, vcpu->arch.vzguestid[cpu]); in kvm_vz_vcpu_load_tlb()
2556 if (cpumask_test_and_clear_cpu(cpu, &kvm->arch.asid_flush_mask)) in kvm_vz_vcpu_load_tlb()
2565 struct mips_coproc *cop0 = &vcpu->arch.cop0; in kvm_vz_vcpu_load()
2572 migrated = (vcpu->arch.last_sched_cpu != cpu); in kvm_vz_vcpu_load()
2583 * restore wired guest TLB entries (while in guest context). in kvm_vz_vcpu_load()
2586 if (current->flags & PF_VCPU) { in kvm_vz_vcpu_load()
2692 cop0->reg[MIPS_CP0_GUESTCTL2][MIPS_CP0_GUESTCTL2_SEL]); in kvm_vz_vcpu_load()
2699 if (vcpu->kvm->created_vcpus > 1) in kvm_vz_vcpu_load()
2707 struct mips_coproc *cop0 = &vcpu->arch.cop0; in kvm_vz_vcpu_put()
2709 if (current->flags & PF_VCPU) in kvm_vz_vcpu_put()
2728 /* allow wired TLB entries to be overwritten */ in kvm_vz_vcpu_put()
2799 cop0->reg[MIPS_CP0_GUESTCTL2][MIPS_CP0_GUESTCTL2_SEL] = in kvm_vz_vcpu_put()
2806 * kvm_vz_resize_guest_vtlb() - Attempt to resize guest VTLB.
2810 * necessary for cores with a shared root/guest TLB to avoid overlap with wired
2819 /* Write MMUSize - 1 into guest Config registers */ in kvm_vz_resize_guest_vtlb()
2822 (size - 1) << MIPS_CONF1_TLBS_SHIFT); in kvm_vz_resize_guest_vtlb()
2828 config4 |= ((size - 1) >> MIPS_CONF1_TLBS_SIZE) << in kvm_vz_resize_guest_vtlb()
2833 config4 |= ((size - 1) >> MIPS_CONF1_TLBS_SIZE) << in kvm_vz_resize_guest_vtlb()
2840 * Set Guest.Wired.Limit = 0 (no limit up to Guest.MMUSize-1), unless it in kvm_vz_resize_guest_vtlb()
2841 * would exceed Root.Wired.Limit (clearing Guest.Wired.Wired so write in kvm_vz_resize_guest_vtlb()
2847 if (size - 1 <= limit) in kvm_vz_resize_guest_vtlb()
2852 /* Read back MMUSize - 1 */ in kvm_vz_resize_guest_vtlb()
2894 mmu_size -= guest_mmu_size; in kvm_vz_enable_virtualization_cpu()
2896 cvmvmconfig |= mmu_size - 1; in kvm_vz_enable_virtualization_cpu()
2910 * overlap of root wired and guest entries, the guest TLB may in kvm_vz_enable_virtualization_cpu()
2914 ftlb_size = current_cpu_data.tlbsize - mmu_size; in kvm_vz_enable_virtualization_cpu()
2922 * Reduce to make space for root wired entries and at least 2 in kvm_vz_enable_virtualization_cpu()
2923 * root non-wired entries. This does assume that long-term wired in kvm_vz_enable_virtualization_cpu()
2926 guest_mmu_size = mmu_size - num_wired_entries() - 2; in kvm_vz_enable_virtualization_cpu()
2934 * of wired entries. in kvm_vz_enable_virtualization_cpu()
2939 return -EINVAL; in kvm_vz_enable_virtualization_cpu()
3005 cvmvmconfig |= mmu_size - 1; in kvm_vz_disable_virtualization_cpu()
3035 /* We support 64-bit registers/operations and addresses */ in kvm_vz_check_extension()
3055 vcpu->arch.vzguestid[i] = 0; in kvm_vz_vcpu_init()
3079 struct mips_coproc *cop0 = &vcpu->arch.cop0; in kvm_vz_vcpu_setup()
3097 /* Wired */ in kvm_vz_vcpu_setup()
3111 kvm_write_sw_gc0_ebase(cop0, (s32)0x80000000 | vcpu->vcpu_id); in kvm_vz_vcpu_setup()
3184 /* bits SEGBITS-13+3:4 set */ in kvm_vz_vcpu_setup()
3186 ((1ull << (cpu_vmbits - 13)) - 1) << 4); in kvm_vz_vcpu_setup()
3210 cop0->reg[MIPS_CP0_GUESTCTL2][MIPS_CP0_GUESTCTL2_SEL] = 0; in kvm_vz_vcpu_setup()
3213 vcpu->arch.pc = CKSEG1ADDR(0x1fc00000); in kvm_vz_vcpu_setup()
3230 cpumask_setall(&kvm->arch.asid_flush_mask); in kvm_vz_prepare_flush_shadow()
3263 r = vcpu->arch.vcpu_run(vcpu); in kvm_vz_vcpu_run()
3307 /* FIXME: Get rid of the callbacks now that trap-and-emulate is gone. */
3313 return -ENODEV; in kvm_mips_emulation_init()
3319 if (WARN(pgd_reg == -1, in kvm_mips_emulation_init()
3321 return -ENODEV; in kvm_mips_emulation_init()