Lines Matching +full:num +full:- +full:guest +full:- +full:ids
1 /* SPDX-License-Identifier: GPL-2.0-only */
15 #include <asm/guest-state-buffer.h>
37 #define SID_MAP_MASK (SID_MAP_NUM - 1)
88 u64 tb_offset; /* guest timebase - host timebase */
389 return vcpu->arch.book3s; in to_book3s()
401 static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val) in kvmppc_set_gpr() argument
403 vcpu->arch.regs.gpr[num] = val; in kvmppc_set_gpr()
404 kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_GPR(num)); in kvmppc_set_gpr()
407 static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num) in kvmppc_get_gpr() argument
409 WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_GPR(num)) < 0); in kvmppc_get_gpr()
410 return vcpu->arch.regs.gpr[num]; in kvmppc_get_gpr()
415 vcpu->arch.regs.ccr = val; in kvmppc_set_cr()
422 return vcpu->arch.regs.ccr; in kvmppc_get_cr()
427 vcpu->arch.regs.xer = val; in kvmppc_set_xer()
434 return vcpu->arch.regs.xer; in kvmppc_get_xer()
439 vcpu->arch.regs.ctr = val; in kvmppc_set_ctr()
446 return vcpu->arch.regs.ctr; in kvmppc_get_ctr()
451 vcpu->arch.regs.link = val; in kvmppc_set_lr()
458 return vcpu->arch.regs.link; in kvmppc_get_lr()
463 vcpu->arch.regs.nip = val; in kvmppc_set_pc()
470 return vcpu->arch.regs.nip; in kvmppc_get_pc()
481 return vcpu->arch.fault_dar; in kvmppc_get_fault_dar()
487 return vcpu->arch.fp.fpr[i][TS_FPROFFSET]; in kvmppc_get_fpr()
492 vcpu->arch.fp.fpr[i][TS_FPROFFSET] = val; in kvmppc_set_fpr()
499 return vcpu->arch.fp.fpscr; in kvmppc_get_fpscr()
504 vcpu->arch.fp.fpscr = val; in kvmppc_set_fpscr()
512 return vcpu->arch.fp.fpr[i][j]; in kvmppc_get_vsx_fpr()
518 vcpu->arch.fp.fpr[i][j] = val; in kvmppc_set_vsx_fpr()
526 *v = vcpu->arch.vr.vr[i]; in kvmppc_get_vsx_vr()
532 vcpu->arch.vr.vr[i] = *val; in kvmppc_set_vsx_vr()
539 return vcpu->arch.vr.vscr.u[3]; in kvmppc_get_vscr()
544 vcpu->arch.vr.vscr.u[3] = val; in kvmppc_set_vscr()
553 vcpu->arch.reg = val; \
561 return vcpu->arch.reg; \
580 vcpu->arch.vcore->reg = val; \
588 return vcpu->arch.vcore->reg; \
604 return vcpu->arch.vcore->tb_offset; in kvmppc_get_tb_offset()
610 return vcpu->arch.dec_expires; in kvmppc_get_dec_expires()
615 vcpu->arch.dec_expires = val; in kvmppc_set_dec_expires()
622 return kvmppc_get_dec_expires(vcpu) - kvmppc_get_tb_offset(vcpu); in kvmppc_dec_expires_host_tb()
634 return !is_kvmppc_hv_enabled(vcpu->kvm); in kvmppc_supports_magic_page()
654 * [0..KVM_MAX_VCPUS) space, using knowledge of the guest's core stride
658 * The implementation leaves VCPU IDs from the range [0..KVM_MAX_VCPUS) (block
659 * 0) unchanged: if the guest is filling each VCORE completely then it will be
660 * using consecutive IDs and it will fill the space without any packing.
662 * For higher VCPU IDs, the packed ID is based on the VCPU ID modulo
666 * VCPU IDs in the range [KVM_MAX_VCPUS..(KVM_MAX_VCPUS*2)) (block 1) are only
667 * possible if the guest is leaving at least 1/2 of each VCORE empty, so IDs
671 * Similarly, if VCPU IDs in the range [(KVM_MAX_VCPUS*2)..(KVM_MAX_VCPUS*4))
672 * (blocks 2 and 3) are seen, the guest must be leaving at least 3/4 of each
673 * VCORE empty so packed IDs can be offset by (stride / 4) and (stride * 3 / 4).
675 * Finally, VCPU IDs from blocks 5..7 will only be seen if the guest is using a
680 * block number if the stride is 8. For cases where the guest's stride is less
681 * than 8, we can re-use the block_offsets array by multiplying the block
687 int stride = kvm->arch.emul_smt_mode; in kvmppc_pack_vcpu_id()