Lines Matching +full:guest +full:- +full:index +full:- +full:bits

44  * Number of guest VTLB entries to use, so we can catch inconsistency between
60 * First write with WG=1 to write upper bits, then write again in case in kvm_vz_write_gc0_ebase()
75 * These Config bits may be writable by the guest:
115 if (kvm_mips_guest_has_msa(&vcpu->arch)) in kvm_vz_config5_guest_wrmask()
119 * Permit guest FPU mode changes if FPU is enabled and the relevant in kvm_vz_config5_guest_wrmask()
122 if (kvm_mips_guest_has_fpu(&vcpu->arch)) { in kvm_vz_config5_guest_wrmask()
138 * VZ optionally allows these additional Config bits to be written by root:
140 * Config1: M, [MMUSize-1, C2, MD, PC, WR, CA], FP
158 if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) in kvm_vz_config1_user_wrmask()
175 if (kvm_mips_guest_can_have_msa(&vcpu->arch)) in kvm_vz_config3_user_wrmask()
199 /* VZ guest has already converted gva to gpa */ in kvm_vz_gva_to_gpa_cb()
205 set_bit(priority, &vcpu->arch.pending_exceptions); in kvm_vz_queue_irq()
206 clear_bit(priority, &vcpu->arch.pending_exceptions_clr); in kvm_vz_queue_irq()
211 clear_bit(priority, &vcpu->arch.pending_exceptions); in kvm_vz_dequeue_irq()
212 set_bit(priority, &vcpu->arch.pending_exceptions_clr); in kvm_vz_dequeue_irq()
218 * timer expiry is asynchronous to vcpu execution therefore defer guest in kvm_vz_queue_timer_int_cb()
227 * timer expiry is asynchronous to vcpu execution therefore defer guest in kvm_vz_dequeue_timer_int_cb()
236 int intr = (int)irq->irq; in kvm_vz_queue_io_int_cb()
239 * interrupts are asynchronous to vcpu execution therefore defer guest in kvm_vz_queue_io_int_cb()
248 int intr = (int)irq->irq; in kvm_vz_dequeue_io_int_cb()
251 * interrupts are asynchronous to vcpu execution therefore defer guest in kvm_vz_dequeue_io_int_cb()
254 kvm_vz_dequeue_irq(vcpu, kvm_irq_to_priority(-intr)); in kvm_vz_dequeue_io_int_cb()
282 clear_bit(priority, &vcpu->arch.pending_exceptions); in kvm_vz_irq_deliver_cb()
324 clear_bit(priority, &vcpu->arch.pending_exceptions_clr); in kvm_vz_irq_clear_cb()
329 * VZ guest timer handling.
333 * kvm_vz_should_use_htimer() - Find whether to use the VZ hard guest timer.
336 * Returns: true if the VZ GTOffset & real guest CP0_Count should be used
337 * instead of software emulation of guest timer.
346 if (mips_hpt_frequency != vcpu->arch.count_hz) in kvm_vz_should_use_htimer()
349 /* We don't support a CP0_GTOffset with fewer bits than CP0_Count */ in kvm_vz_should_use_htimer()
357 * _kvm_vz_restore_stimer() - Restore soft timer state.
369 * Avoid spurious counter interrupts by setting Guest CP0_Count to just in _kvm_vz_restore_stimer()
370 * after Guest CP0_Compare. in _kvm_vz_restore_stimer()
372 write_c0_gtoffset(compare - read_c0_count()); in _kvm_vz_restore_stimer()
379 * _kvm_vz_restore_htimer() - Restore hard timer state.
384 * Restore hard timer Guest.Count & Guest.Cause taking care to preserve the
385 * value of Guest.CP0_Cause.TI while restoring Guest.CP0_Cause.
394 * Freeze the soft-timer and sync the guest CP0_Count with it. We do in _kvm_vz_restore_htimer()
399 write_c0_gtoffset(start_count - read_c0_count()); in _kvm_vz_restore_htimer()
402 /* restore guest CP0_Cause, as TI may already be set */ in _kvm_vz_restore_htimer()
413 if (after_count - start_count > compare - start_count - 1) in _kvm_vz_restore_htimer()
418 * kvm_vz_restore_timer() - Restore timer state.
425 struct mips_coproc *cop0 = &vcpu->arch.cop0; in kvm_vz_restore_timer()
436 * kvm_vz_acquire_htimer() - Switch to hard timer state.
450 /* enable guest access to hard timer */ in kvm_vz_acquire_htimer()
459 * _kvm_vz_save_htimer() - Switch to software emulation of guest timer.
464 * Save VZ guest timer state and switch to software emulation of guest CP0
489 * Record a final CP0_Count which we will transfer to the soft-timer. in _kvm_vz_save_htimer()
501 if (end_count - before_count > compare - before_count - 1) in _kvm_vz_save_htimer()
505 * Restore soft-timer, ignoring a small amount of negative drift due to in _kvm_vz_save_htimer()
508 kvm_mips_restore_hrtimer(vcpu, before_time, end_count, -0x10000); in _kvm_vz_save_htimer()
512 * kvm_vz_save_timer() - Save guest timer state.
515 * Save VZ guest timer state and switch to soft guest timer if hard timer was in
520 struct mips_coproc *cop0 = &vcpu->arch.cop0; in kvm_vz_save_timer()
525 /* disable guest use of hard timer */ in kvm_vz_save_timer()
535 /* save timer-related state to VCPU context */ in kvm_vz_save_timer()
541 * kvm_vz_lose_htimer() - Ensure hard guest timer is not in use.
544 * Transfers the state of the hard guest timer to the soft guest timer, leaving
545 * guest state intact so it can continue to be used with the soft timer.
554 /* disable guest use of timer */ in kvm_vz_lose_htimer()
567 * is_eva_access() - Find whether an instruction is an EVA memory accessor.
568 * @inst: 32-bit instruction encoding.
606 * is_eva_am_mapped() - Find whether an access mode is mapped.
608 * @am: 3-bit encoded access mode.
624 * have been caught by the guest, leaving us with: in is_eva_am_mapped()
632 * - 6 110 0 0 in is_eva_am_mapped()
665 opc = (u32 *)vcpu->arch.pc; in is_eva_am_mapped()
666 if (vcpu->arch.host_cp0_cause & CAUSEF_BD) in is_eva_am_mapped()
678 * kvm_vz_gva_to_gpa() - Convert valid GVA to GPA.
680 * @gva: Guest virtual address to convert.
681 * @gpa: Output guest physical address.
683 * Convert a guest virtual address (GVA) which is valid according to the guest
684 * context, to a guest physical address (GPA).
687 * -errno on failure.
696 /* Handle canonical 32-bit virtual address */ in kvm_vz_gva_to_gpa()
739 /* Unmapped, find guest physical address */ in kvm_vz_gva_to_gpa()
768 * Bits 61:59 specify the CCA, which we can just mask off here. in kvm_vz_gva_to_gpa()
769 * Bits 58:PABITS should be zero, but we shouldn't have got here in kvm_vz_gva_to_gpa()
782 * kvm_vz_badvaddr_to_gpa() - Convert GVA BadVAddr from root exception to GPA.
785 * @gpa: Output guest physical address.
787 * VZ implementations are permitted to report guest virtual addresses (GVA) in
788 * BadVAddr on a root exception during guest execution, instead of the more
789 * convenient guest physical addresses (GPA). When we get a GVA, this function
790 * converts it to a GPA, taking into account guest segmentation and guest TLB
794 * -errno on failure.
799 unsigned int gexccode = (vcpu->arch.host_cp0_guestctl0 & in kvm_vz_badvaddr_to_gpa()
811 return -EINVAL; in kvm_vz_badvaddr_to_gpa()
813 /* ... and we need to perform the GVA->GPA translation in software */ in kvm_vz_badvaddr_to_gpa()
819 u32 *opc = (u32 *) vcpu->arch.pc; in kvm_trap_vz_no_handler()
820 u32 cause = vcpu->arch.host_cp0_cause; in kvm_trap_vz_no_handler()
822 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; in kvm_trap_vz_no_handler()
836 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in kvm_trap_vz_no_handler()
842 /* Mask off unused bits */ in mips_process_maar()
866 struct mips_coproc *cop0 = &vcpu->arch.cop0; in kvm_write_maari()
870 kvm_write_sw_gc0_maari(cop0, ARRAY_SIZE(vcpu->arch.maar) - 1); in kvm_write_maari()
871 else if (val < ARRAY_SIZE(vcpu->arch.maar)) in kvm_write_maari()
879 struct mips_coproc *cop0 = &vcpu->arch.cop0; in kvm_vz_gpsi_cop0()
889 curr_pc = vcpu->arch.pc; in kvm_vz_gpsi_cop0()
911 cop0->stat[rd][sel]++; in kvm_vz_gpsi_cop0()
932 ARRAY_SIZE(vcpu->arch.maar)); in kvm_vz_gpsi_cop0()
933 val = vcpu->arch.maar[ in kvm_vz_gpsi_cop0()
951 val = cop0->reg[rd][sel]; in kvm_vz_gpsi_cop0()
955 val = cop0->reg[rd][sel]; in kvm_vz_gpsi_cop0()
966 vcpu->arch.gprs[rt] = val; in kvm_vz_gpsi_cop0()
977 cop0->stat[rd][sel]++; in kvm_vz_gpsi_cop0()
979 val = vcpu->arch.gprs[rt]; in kvm_vz_gpsi_cop0()
987 kvm_mips_write_count(vcpu, vcpu->arch.gprs[rt]); in kvm_vz_gpsi_cop0()
991 vcpu->arch.gprs[rt], in kvm_vz_gpsi_cop0()
996 * P5600 generates GPSI on guest MTC0 LLAddr. in kvm_vz_gpsi_cop0()
997 * Only allow the guest to clear LLB. in kvm_vz_gpsi_cop0()
1011 ARRAY_SIZE(vcpu->arch.maar)); in kvm_vz_gpsi_cop0()
1012 vcpu->arch.maar[kvm_read_sw_gc0_maari(cop0)] = in kvm_vz_gpsi_cop0()
1021 cop0->reg[rd][sel] = (int)val; in kvm_vz_gpsi_cop0()
1068 vcpu->arch.pc = curr_pc; in kvm_vz_gpsi_cop0()
1081 struct kvm_vcpu_arch *arch = &vcpu->arch; in kvm_vz_gpsi_cache()
1088 curr_pc = vcpu->arch.pc; in kvm_vz_gpsi_cache()
1102 va = arch->gprs[base] + offset; in kvm_vz_gpsi_cache()
1105 cache, op, base, arch->gprs[base], offset); in kvm_vz_gpsi_cache()
1127 /* So far, other platforms support guest hit cache ops */ in kvm_vz_gpsi_cache()
1134 curr_pc, vcpu->arch.gprs[31], cache, op, base, arch->gprs[base], in kvm_vz_gpsi_cache()
1137 vcpu->arch.pc = curr_pc; in kvm_vz_gpsi_cache()
1156 curr_pc = vcpu->arch.pc; in kvm_vz_gpsi_lwc2()
1165 ++vcpu->stat.vz_cpucfg_exits; in kvm_vz_gpsi_lwc2()
1166 hostcfg = read_cpucfg(vcpu->arch.gprs[rs]); in kvm_vz_gpsi_lwc2()
1168 switch (vcpu->arch.gprs[rs]) { in kvm_vz_gpsi_lwc2()
1170 vcpu->arch.gprs[rd] = 0x14c000; in kvm_vz_gpsi_lwc2()
1176 vcpu->arch.gprs[rd] = hostcfg; in kvm_vz_gpsi_lwc2()
1181 vcpu->arch.gprs[rd] = hostcfg; in kvm_vz_gpsi_lwc2()
1184 vcpu->arch.gprs[rd] = hostcfg; in kvm_vz_gpsi_lwc2()
1187 /* Don't export any other advanced features to guest */ in kvm_vz_gpsi_lwc2()
1188 vcpu->arch.gprs[rd] = 0; in kvm_vz_gpsi_lwc2()
1195 inst.loongson3_lscsr_format.fr, vcpu->arch.gprs[rs], curr_pc); in kvm_vz_gpsi_lwc2()
1205 vcpu->arch.pc = curr_pc; in kvm_vz_gpsi_lwc2()
1216 struct kvm_vcpu_arch *arch = &vcpu->arch; in kvm_trap_vz_handle_gpsi()
1263 arch->gprs[rt] = in kvm_trap_vz_handle_gpsi()
1273 KVM_TRACE_HWR(rd, sel), arch->gprs[rt]); in kvm_trap_vz_handle_gpsi()
1298 struct kvm_vcpu_arch *arch = &vcpu->arch; in kvm_trap_vz_handle_gsfc()
1311 /* complete MTC0 on behalf of guest and advance EPC */ in kvm_trap_vz_handle_gsfc()
1318 unsigned int val = arch->gprs[rt]; in kvm_trap_vz_handle_gsfc()
1326 if (!kvm_mips_guest_has_fpu(&vcpu->arch)) in kvm_trap_vz_handle_gsfc()
1356 vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) in kvm_trap_vz_handle_gsfc()
1374 /* Only certain bits are RW to the guest */ in kvm_trap_vz_handle_gsfc()
1395 vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) in kvm_trap_vz_handle_gsfc()
1424 * Presumably this is due to MC (guest mode change), so lets trace some in kvm_trap_vz_handle_ghfc()
1450 curr_pc = vcpu->arch.pc; in kvm_trap_vz_handle_hc()
1457 vcpu->arch.pc = curr_pc; in kvm_trap_vz_handle_hc()
1476 kvm_err("Guest Exception Code: %d not yet handled @ PC: %p, inst: 0x%08x Status: %#x\n", in kvm_trap_vz_no_handler_guest_exit()
1484 u32 *opc = (u32 *) vcpu->arch.pc; in kvm_trap_vz_handle_guest_exit()
1485 u32 cause = vcpu->arch.host_cp0_cause; in kvm_trap_vz_handle_guest_exit()
1487 u32 gexccode = (vcpu->arch.host_cp0_guestctl0 & in kvm_trap_vz_handle_guest_exit()
1494 ++vcpu->stat.vz_gpsi_exits; in kvm_trap_vz_handle_guest_exit()
1498 ++vcpu->stat.vz_gsfc_exits; in kvm_trap_vz_handle_guest_exit()
1502 ++vcpu->stat.vz_hc_exits; in kvm_trap_vz_handle_guest_exit()
1506 ++vcpu->stat.vz_grr_exits; in kvm_trap_vz_handle_guest_exit()
1511 ++vcpu->stat.vz_gva_exits; in kvm_trap_vz_handle_guest_exit()
1516 ++vcpu->stat.vz_ghfc_exits; in kvm_trap_vz_handle_guest_exit()
1520 ++vcpu->stat.vz_gpa_exits; in kvm_trap_vz_handle_guest_exit()
1525 ++vcpu->stat.vz_resvd_exits; in kvm_trap_vz_handle_guest_exit()
1537 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in kvm_trap_vz_handle_guest_exit()
1544 * kvm_trap_vz_handle_cop_unusable() - Guest used unusable coprocessor.
1547 * Handle when the guest attempts to use a coprocessor which hasn't been allowed
1550 * Return: value indicating whether to resume the host or the guest
1555 u32 cause = vcpu->arch.host_cp0_cause; in kvm_trap_vz_handle_cop_unusable()
1561 * If guest FPU not present, the FPU operation should have been in kvm_trap_vz_handle_cop_unusable()
1565 if (WARN_ON(!kvm_mips_guest_has_fpu(&vcpu->arch) || in kvm_trap_vz_handle_cop_unusable()
1566 vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)) { in kvm_trap_vz_handle_cop_unusable()
1582 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in kvm_trap_vz_handle_cop_unusable()
1593 * kvm_trap_vz_handle_msa_disabled() - Guest used MSA while disabled in root.
1596 * Handle when the guest attempts to use MSA when it is disabled in the root
1599 * Return: value indicating whether to resume the host or the guest
1605 * If MSA not present or not exposed to guest or FR=0, the MSA operation in kvm_trap_vz_handle_msa_disabled()
1610 if (!kvm_mips_guest_has_msa(&vcpu->arch) || in kvm_trap_vz_handle_msa_disabled()
1613 vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) { in kvm_trap_vz_handle_msa_disabled()
1614 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in kvm_trap_vz_handle_msa_disabled()
1625 struct kvm_run *run = vcpu->run; in kvm_trap_vz_handle_tlb_ld_miss()
1626 u32 *opc = (u32 *) vcpu->arch.pc; in kvm_trap_vz_handle_tlb_ld_miss()
1627 u32 cause = vcpu->arch.host_cp0_cause; in kvm_trap_vz_handle_tlb_ld_miss()
1628 ulong badvaddr = vcpu->arch.host_cp0_badvaddr; in kvm_trap_vz_handle_tlb_ld_miss()
1635 if (kvm_is_ifetch_fault(&vcpu->arch)) { in kvm_trap_vz_handle_tlb_ld_miss()
1636 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in kvm_trap_vz_handle_tlb_ld_miss()
1645 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in kvm_trap_vz_handle_tlb_ld_miss()
1652 kvm_err("Guest Emulate Load from MMIO space failed: PC: %p, BadVaddr: %#lx\n", in kvm_trap_vz_handle_tlb_ld_miss()
1654 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in kvm_trap_vz_handle_tlb_ld_miss()
1661 run->exit_reason = KVM_EXIT_MMIO; in kvm_trap_vz_handle_tlb_ld_miss()
1664 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in kvm_trap_vz_handle_tlb_ld_miss()
1672 struct kvm_run *run = vcpu->run; in kvm_trap_vz_handle_tlb_st_miss()
1673 u32 *opc = (u32 *) vcpu->arch.pc; in kvm_trap_vz_handle_tlb_st_miss()
1674 u32 cause = vcpu->arch.host_cp0_cause; in kvm_trap_vz_handle_tlb_st_miss()
1675 ulong badvaddr = vcpu->arch.host_cp0_badvaddr; in kvm_trap_vz_handle_tlb_st_miss()
1684 vcpu->arch.host_cp0_badvaddr = badvaddr; in kvm_trap_vz_handle_tlb_st_miss()
1692 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in kvm_trap_vz_handle_tlb_st_miss()
1699 kvm_err("Guest Emulate Store to MMIO space failed: PC: %p, BadVaddr: %#lx\n", in kvm_trap_vz_handle_tlb_st_miss()
1701 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in kvm_trap_vz_handle_tlb_st_miss()
1708 run->exit_reason = KVM_EXIT_MMIO; in kvm_trap_vz_handle_tlb_st_miss()
1711 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in kvm_trap_vz_handle_tlb_st_miss()
1800 ret += 1 + ARRAY_SIZE(vcpu->arch.maar); in kvm_vz_num_regs()
1801 ret += __arch_hweight8(cpu_data[0].guest.kscratch_mask); in kvm_vz_num_regs()
1808 u64 index; in kvm_vz_copy_reg_indices() local
1813 return -EFAULT; in kvm_vz_copy_reg_indices()
1817 index = KVM_REG_MIPS_CP0_USERLOCAL; in kvm_vz_copy_reg_indices()
1818 if (copy_to_user(indices, &index, sizeof(index))) in kvm_vz_copy_reg_indices()
1819 return -EFAULT; in kvm_vz_copy_reg_indices()
1823 index = KVM_REG_MIPS_CP0_BADINSTR; in kvm_vz_copy_reg_indices()
1824 if (copy_to_user(indices, &index, sizeof(index))) in kvm_vz_copy_reg_indices()
1825 return -EFAULT; in kvm_vz_copy_reg_indices()
1829 index = KVM_REG_MIPS_CP0_BADINSTRP; in kvm_vz_copy_reg_indices()
1830 if (copy_to_user(indices, &index, sizeof(index))) in kvm_vz_copy_reg_indices()
1831 return -EFAULT; in kvm_vz_copy_reg_indices()
1837 return -EFAULT; in kvm_vz_copy_reg_indices()
1843 return -EFAULT; in kvm_vz_copy_reg_indices()
1849 return -EFAULT; in kvm_vz_copy_reg_indices()
1853 for (i = 0; i < ARRAY_SIZE(vcpu->arch.maar); ++i) { in kvm_vz_copy_reg_indices()
1854 index = KVM_REG_MIPS_CP0_MAAR(i); in kvm_vz_copy_reg_indices()
1855 if (copy_to_user(indices, &index, sizeof(index))) in kvm_vz_copy_reg_indices()
1856 return -EFAULT; in kvm_vz_copy_reg_indices()
1860 index = KVM_REG_MIPS_CP0_MAARI; in kvm_vz_copy_reg_indices()
1861 if (copy_to_user(indices, &index, sizeof(index))) in kvm_vz_copy_reg_indices()
1862 return -EFAULT; in kvm_vz_copy_reg_indices()
1871 return -EFAULT; in kvm_vz_copy_reg_indices()
1884 * KVM API exposes 64-bit version of the register, so move the in entrylo_kvm_to_user()
1885 * RI/XI bits up into place. in entrylo_kvm_to_user()
1900 * KVM API exposes 64-bit versiono of the register, so move the in entrylo_user_to_kvm()
1901 * RI/XI bits down into place. in entrylo_user_to_kvm()
1914 struct mips_coproc *cop0 = &vcpu->arch.cop0; in kvm_vz_get_one_reg()
1917 switch (reg->id) { in kvm_vz_get_one_reg()
1932 return -EINVAL; in kvm_vz_get_one_reg()
1937 return -EINVAL; in kvm_vz_get_one_reg()
1943 return -EINVAL; in kvm_vz_get_one_reg()
1955 return -EINVAL; in kvm_vz_get_one_reg()
1960 return -EINVAL; in kvm_vz_get_one_reg()
1965 return -EINVAL; in kvm_vz_get_one_reg()
1970 return -EINVAL; in kvm_vz_get_one_reg()
1975 return -EINVAL; in kvm_vz_get_one_reg()
1980 return -EINVAL; in kvm_vz_get_one_reg()
1988 return -EINVAL; in kvm_vz_get_one_reg()
1999 return -EINVAL; in kvm_vz_get_one_reg()
2004 return -EINVAL; in kvm_vz_get_one_reg()
2031 /* Octeon III has a read-only guest.PRid */ in kvm_vz_get_one_reg()
2047 return -EINVAL; in kvm_vz_get_one_reg()
2052 return -EINVAL; in kvm_vz_get_one_reg()
2057 return -EINVAL; in kvm_vz_get_one_reg()
2062 return -EINVAL; in kvm_vz_get_one_reg()
2067 return -EINVAL; in kvm_vz_get_one_reg()
2075 return -EINVAL; in kvm_vz_get_one_reg()
2076 idx = reg->id - KVM_REG_MIPS_CP0_MAAR(0); in kvm_vz_get_one_reg()
2077 if (idx >= ARRAY_SIZE(vcpu->arch.maar)) in kvm_vz_get_one_reg()
2078 return -EINVAL; in kvm_vz_get_one_reg()
2079 *v = vcpu->arch.maar[idx]; in kvm_vz_get_one_reg()
2083 return -EINVAL; in kvm_vz_get_one_reg()
2084 *v = kvm_read_sw_gc0_maari(&vcpu->arch.cop0); in kvm_vz_get_one_reg()
2095 idx = reg->id - KVM_REG_MIPS_CP0_KSCRATCH1 + 2; in kvm_vz_get_one_reg()
2097 return -EINVAL; in kvm_vz_get_one_reg()
2120 *v = vcpu->arch.count_ctl; in kvm_vz_get_one_reg()
2123 *v = ktime_to_ns(vcpu->arch.count_resume); in kvm_vz_get_one_reg()
2126 *v = vcpu->arch.count_hz; in kvm_vz_get_one_reg()
2129 return -EINVAL; in kvm_vz_get_one_reg()
2138 struct mips_coproc *cop0 = &vcpu->arch.cop0; in kvm_vz_set_one_reg()
2143 switch (reg->id) { in kvm_vz_set_one_reg()
2158 return -EINVAL; in kvm_vz_set_one_reg()
2163 return -EINVAL; in kvm_vz_set_one_reg()
2169 return -EINVAL; in kvm_vz_set_one_reg()
2181 return -EINVAL; in kvm_vz_set_one_reg()
2186 return -EINVAL; in kvm_vz_set_one_reg()
2191 return -EINVAL; in kvm_vz_set_one_reg()
2196 return -EINVAL; in kvm_vz_set_one_reg()
2201 return -EINVAL; in kvm_vz_set_one_reg()
2206 return -EINVAL; in kvm_vz_set_one_reg()
2214 return -EINVAL; in kvm_vz_set_one_reg()
2225 return -EINVAL; in kvm_vz_set_one_reg()
2230 return -EINVAL; in kvm_vz_set_one_reg()
2274 /* Octeon III has a guest.PRid, but its read-only */ in kvm_vz_set_one_reg()
2352 return -EINVAL; in kvm_vz_set_one_reg()
2353 idx = reg->id - KVM_REG_MIPS_CP0_MAAR(0); in kvm_vz_set_one_reg()
2354 if (idx >= ARRAY_SIZE(vcpu->arch.maar)) in kvm_vz_set_one_reg()
2355 return -EINVAL; in kvm_vz_set_one_reg()
2356 vcpu->arch.maar[idx] = mips_process_maar(dmtc_op, v); in kvm_vz_set_one_reg()
2360 return -EINVAL; in kvm_vz_set_one_reg()
2372 idx = reg->id - KVM_REG_MIPS_CP0_KSCRATCH1 + 2; in kvm_vz_set_one_reg()
2374 return -EINVAL; in kvm_vz_set_one_reg()
2406 return -EINVAL; in kvm_vz_set_one_reg()
2433 /* Returns 1 if the guest TLB may be clobbered */
2446 vcpu->arch.vzguestid[i] = 0; in kvm_vz_check_requests()
2447 /* This will clobber guest TLB contents too */ in kvm_vz_check_requests()
2469 if (wired > vcpu->arch.wired_tlb_limit) { in kvm_vz_vcpu_save_wired()
2470 tlbs = krealloc(vcpu->arch.wired_tlb, wired * in kvm_vz_vcpu_save_wired()
2471 sizeof(*vcpu->arch.wired_tlb), GFP_ATOMIC); in kvm_vz_vcpu_save_wired()
2474 wired = vcpu->arch.wired_tlb_limit; in kvm_vz_vcpu_save_wired()
2476 vcpu->arch.wired_tlb = tlbs; in kvm_vz_vcpu_save_wired()
2477 vcpu->arch.wired_tlb_limit = wired; in kvm_vz_vcpu_save_wired()
2482 /* Save wired entries from the guest TLB */ in kvm_vz_vcpu_save_wired()
2483 kvm_vz_save_guesttlb(vcpu->arch.wired_tlb, 0, wired); in kvm_vz_vcpu_save_wired()
2485 for (i = wired; i < vcpu->arch.wired_tlb_used; ++i) { in kvm_vz_vcpu_save_wired()
2486 vcpu->arch.wired_tlb[i].tlb_hi = UNIQUE_GUEST_ENTRYHI(i); in kvm_vz_vcpu_save_wired()
2487 vcpu->arch.wired_tlb[i].tlb_lo[0] = 0; in kvm_vz_vcpu_save_wired()
2488 vcpu->arch.wired_tlb[i].tlb_lo[1] = 0; in kvm_vz_vcpu_save_wired()
2489 vcpu->arch.wired_tlb[i].tlb_mask = 0; in kvm_vz_vcpu_save_wired()
2491 vcpu->arch.wired_tlb_used = wired; in kvm_vz_vcpu_save_wired()
2496 /* Load wired entries into the guest TLB */ in kvm_vz_vcpu_load_wired()
2497 if (vcpu->arch.wired_tlb) in kvm_vz_vcpu_load_wired()
2498 kvm_vz_load_guesttlb(vcpu->arch.wired_tlb, 0, in kvm_vz_vcpu_load_wired()
2499 vcpu->arch.wired_tlb_used); in kvm_vz_vcpu_load_wired()
2504 struct kvm *kvm = vcpu->kvm; in kvm_vz_vcpu_load_tlb()
2505 struct mm_struct *gpa_mm = &kvm->arch.gpa_mm; in kvm_vz_vcpu_load_tlb()
2509 * Are we entering guest context on a different CPU to last time? in kvm_vz_vcpu_load_tlb()
2510 * If so, the VCPU's guest TLB state on this CPU may be stale. in kvm_vz_vcpu_load_tlb()
2512 migrated = (vcpu->arch.last_exec_cpu != cpu); in kvm_vz_vcpu_load_tlb()
2513 vcpu->arch.last_exec_cpu = cpu; in kvm_vz_vcpu_load_tlb()
2519 * manipulating guest tlb entries. in kvm_vz_vcpu_load_tlb()
2526 * another CPU, as the guest mappings may have changed without in kvm_vz_vcpu_load_tlb()
2530 (vcpu->arch.vzguestid[cpu] ^ guestid_cache(cpu)) & in kvm_vz_vcpu_load_tlb()
2533 vcpu->arch.vzguestid[cpu] = guestid_cache(cpu); in kvm_vz_vcpu_load_tlb()
2535 vcpu->arch.vzguestid[cpu]); in kvm_vz_vcpu_load_tlb()
2539 change_c0_guestctl1(GUESTID_MASK, vcpu->arch.vzguestid[cpu]); in kvm_vz_vcpu_load_tlb()
2542 * The Guest TLB only stores a single guest's TLB state, so in kvm_vz_vcpu_load_tlb()
2545 * We also flush if we've executed on another CPU, as the guest in kvm_vz_vcpu_load_tlb()
2553 * Root ASID dealiases guest GPA mappings in the root TLB. in kvm_vz_vcpu_load_tlb()
2556 if (cpumask_test_and_clear_cpu(cpu, &kvm->arch.asid_flush_mask)) in kvm_vz_vcpu_load_tlb()
2565 struct mips_coproc *cop0 = &vcpu->arch.cop0; in kvm_vz_vcpu_load()
2570 * If so, any old guest TLB state may be stale. in kvm_vz_vcpu_load()
2572 migrated = (vcpu->arch.last_sched_cpu != cpu); in kvm_vz_vcpu_load()
2576 * If not, any old guest state from this VCPU will have been clobbered. in kvm_vz_vcpu_load()
2583 * restore wired guest TLB entries (while in guest context). in kvm_vz_vcpu_load()
2586 if (current->flags & PF_VCPU) { in kvm_vz_vcpu_load()
2598 /* Set MC bit if we want to trace guest mode changes */ in kvm_vz_vcpu_load()
2610 * writes to other registers when the corresponding feature bits aren't in kvm_vz_vcpu_load()
2654 /* restore KScratch registers if enabled in guest */ in kvm_vz_vcpu_load()
2689 /* restore Root.GuestCtl2 from unused Guest guestctl2 register */ in kvm_vz_vcpu_load()
2692 cop0->reg[MIPS_CP0_GUESTCTL2][MIPS_CP0_GUESTCTL2_SEL]); in kvm_vz_vcpu_load()
2699 if (vcpu->kvm->created_vcpus > 1) in kvm_vz_vcpu_load()
2707 struct mips_coproc *cop0 = &vcpu->arch.cop0; in kvm_vz_vcpu_put()
2709 if (current->flags & PF_VCPU) in kvm_vz_vcpu_put()
2759 /* save KScratch registers if enabled in guest */ in kvm_vz_vcpu_put()
2786 /* save HTW registers if enabled in guest */ in kvm_vz_vcpu_put()
2797 /* save Root.GuestCtl2 in unused Guest guestctl2 register */ in kvm_vz_vcpu_put()
2799 cop0->reg[MIPS_CP0_GUESTCTL2][MIPS_CP0_GUESTCTL2_SEL] = in kvm_vz_vcpu_put()
2806 * kvm_vz_resize_guest_vtlb() - Attempt to resize guest VTLB.
2807 * @size: Number of guest VTLB entries (0 < @size <= root VTLB entries).
2809 * Attempt to resize the guest VTLB by writing guest Config registers. This is
2810 * necessary for cores with a shared root/guest TLB to avoid overlap with wired
2813 * Returns: The resulting guest VTLB size.
2819 /* Write MMUSize - 1 into guest Config registers */ in kvm_vz_resize_guest_vtlb()
2822 (size - 1) << MIPS_CONF1_TLBS_SHIFT); in kvm_vz_resize_guest_vtlb()
2828 config4 |= ((size - 1) >> MIPS_CONF1_TLBS_SIZE) << in kvm_vz_resize_guest_vtlb()
2833 config4 |= ((size - 1) >> MIPS_CONF1_TLBS_SIZE) << in kvm_vz_resize_guest_vtlb()
2840 * Set Guest.Wired.Limit = 0 (no limit up to Guest.MMUSize-1), unless it in kvm_vz_resize_guest_vtlb()
2841 * would exceed Root.Wired.Limit (clearing Guest.Wired.Wired so write in kvm_vz_resize_guest_vtlb()
2847 if (size - 1 <= limit) in kvm_vz_resize_guest_vtlb()
2852 /* Read back MMUSize - 1 */ in kvm_vz_resize_guest_vtlb()
2879 /* Set up guest timer/perfcount IRQ lines */ in kvm_vz_enable_virtualization_cpu()
2894 mmu_size -= guest_mmu_size; in kvm_vz_enable_virtualization_cpu()
2896 cvmvmconfig |= mmu_size - 1; in kvm_vz_enable_virtualization_cpu()
2902 current_cpu_data.guest.tlbsize = guest_mmu_size; in kvm_vz_enable_virtualization_cpu()
2904 /* Flush moved entries in new (guest) context */ in kvm_vz_enable_virtualization_cpu()
2909 * ImgTec cores tend to use a shared root/guest TLB. To avoid in kvm_vz_enable_virtualization_cpu()
2910 * overlap of root wired and guest entries, the guest TLB may in kvm_vz_enable_virtualization_cpu()
2914 ftlb_size = current_cpu_data.tlbsize - mmu_size; in kvm_vz_enable_virtualization_cpu()
2916 /* Try switching to maximum guest VTLB size for flush */ in kvm_vz_enable_virtualization_cpu()
2918 current_cpu_data.guest.tlbsize = guest_mmu_size + ftlb_size; in kvm_vz_enable_virtualization_cpu()
2923 * root non-wired entries. This does assume that long-term wired in kvm_vz_enable_virtualization_cpu()
2926 guest_mmu_size = mmu_size - num_wired_entries() - 2; in kvm_vz_enable_virtualization_cpu()
2928 current_cpu_data.guest.tlbsize = guest_mmu_size + ftlb_size; in kvm_vz_enable_virtualization_cpu()
2933 * guest. If this ever happens it suggests an asymmetric number in kvm_vz_enable_virtualization_cpu()
2938 "Available guest VTLB size mismatch")) in kvm_vz_enable_virtualization_cpu()
2939 return -EINVAL; in kvm_vz_enable_virtualization_cpu()
2944 * Enable virtualization features granting guest direct control of in kvm_vz_enable_virtualization_cpu()
2946 * CP0=1: Guest coprocessor 0 context. in kvm_vz_enable_virtualization_cpu()
2947 * AT=Guest: Guest MMU. in kvm_vz_enable_virtualization_cpu()
2949 * CF=1: Guest Config registers. in kvm_vz_enable_virtualization_cpu()
2973 /* clear any pending injected virtual guest interrupts */ in kvm_vz_enable_virtualization_cpu()
2978 /* Control guest CCA attribute */ in kvm_vz_enable_virtualization_cpu()
2991 /* Flush any remaining guest TLB entries */ in kvm_vz_disable_virtualization_cpu()
2997 * Allocate whole TLB for root. Existing guest TLB entries will in kvm_vz_disable_virtualization_cpu()
2999 * they've already been flushed above while in guest TLB. in kvm_vz_disable_virtualization_cpu()
3005 cvmvmconfig |= mmu_size - 1; in kvm_vz_disable_virtualization_cpu()
3011 current_cpu_data.guest.tlbsize = 0; in kvm_vz_disable_virtualization_cpu()
3035 /* We support 64-bit registers/operations and addresses */ in kvm_vz_check_extension()
3055 vcpu->arch.vzguestid[i] = 0; in kvm_vz_vcpu_init()
3079 struct mips_coproc *cop0 = &vcpu->arch.cop0; in kvm_vz_vcpu_setup()
3091 * Initialize guest register state to valid architectural reset state. in kvm_vz_vcpu_setup()
3111 kvm_write_sw_gc0_ebase(cop0, (s32)0x80000000 | vcpu->vcpu_id); in kvm_vz_vcpu_setup()
3114 /* architecturally writable (e.g. from guest) */ in kvm_vz_vcpu_setup()
3140 /* architecturally writable (e.g. from guest) */ in kvm_vz_vcpu_setup()
3167 /* architecturally writable (e.g. from guest) */ in kvm_vz_vcpu_setup()
3184 /* bits SEGBITS-13+3:4 set */ in kvm_vz_vcpu_setup()
3186 ((1ull << (cpu_vmbits - 13)) - 1) << 4); in kvm_vz_vcpu_setup()
3208 /* start with no pending virtual guest interrupts */ in kvm_vz_vcpu_setup()
3210 cop0->reg[MIPS_CP0_GUESTCTL2][MIPS_CP0_GUESTCTL2_SEL] = 0; in kvm_vz_vcpu_setup()
3213 vcpu->arch.pc = CKSEG1ADDR(0x1fc00000); in kvm_vz_vcpu_setup()
3230 cpumask_setall(&kvm->arch.asid_flush_mask); in kvm_vz_prepare_flush_shadow()
3263 r = vcpu->arch.vcpu_run(vcpu); in kvm_vz_vcpu_run()
3307 /* FIXME: Get rid of the callbacks now that trap-and-emulate is gone. */
3313 return -ENODEV; in kvm_mips_emulation_init()
3319 if (WARN(pgd_reg == -1, in kvm_mips_emulation_init()
3321 return -ENODEV; in kvm_mips_emulation_init()