Lines Matching full:vcpu
19 static __always_inline unsigned long kvm_##lname##_read(struct kvm_vcpu *vcpu)\
21 return vcpu->arch.regs[VCPU_REGS_##uname]; \
23 static __always_inline void kvm_##lname##_write(struct kvm_vcpu *vcpu, \
26 vcpu->arch.regs[VCPU_REGS_##uname] = val; \
50 * 1 0 register in vcpu->arch in BUILD_KVM_GPR_ACCESSORS()
51 * 1 1 register in vcpu->arch, needs to be stored back in BUILD_KVM_GPR_ACCESSORS()
53 static inline bool kvm_register_is_available(struct kvm_vcpu *vcpu, in BUILD_KVM_GPR_ACCESSORS()
56 return test_bit(reg, (unsigned long *)&vcpu->arch.regs_avail); in BUILD_KVM_GPR_ACCESSORS()
59 static inline bool kvm_register_is_dirty(struct kvm_vcpu *vcpu, in kvm_register_is_dirty() argument
62 return test_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty); in kvm_register_is_dirty()
65 static inline void kvm_register_mark_available(struct kvm_vcpu *vcpu, in kvm_register_mark_available() argument
68 __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail); in kvm_register_mark_available()
71 static inline void kvm_register_mark_dirty(struct kvm_vcpu *vcpu, in kvm_register_mark_dirty() argument
74 __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail); in kvm_register_mark_dirty()
75 __set_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty); in kvm_register_mark_dirty()
84 static __always_inline bool kvm_register_test_and_mark_available(struct kvm_vcpu *vcpu, in kvm_register_test_and_mark_available() argument
87 return arch___test_and_set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail); in kvm_register_test_and_mark_available()
92 * register are read/written irrespective of current vCPU mode. In other words,
95 static inline unsigned long kvm_register_read_raw(struct kvm_vcpu *vcpu, int reg) in kvm_register_read_raw() argument
100 if (!kvm_register_is_available(vcpu, reg)) in kvm_register_read_raw()
101 kvm_x86_call(cache_reg)(vcpu, reg); in kvm_register_read_raw()
103 return vcpu->arch.regs[reg]; in kvm_register_read_raw()
106 static inline void kvm_register_write_raw(struct kvm_vcpu *vcpu, int reg, in kvm_register_write_raw() argument
112 vcpu->arch.regs[reg] = val; in kvm_register_write_raw()
113 kvm_register_mark_dirty(vcpu, reg); in kvm_register_write_raw()
116 static inline unsigned long kvm_rip_read(struct kvm_vcpu *vcpu) in kvm_rip_read() argument
118 return kvm_register_read_raw(vcpu, VCPU_REGS_RIP); in kvm_rip_read()
121 static inline void kvm_rip_write(struct kvm_vcpu *vcpu, unsigned long val) in kvm_rip_write() argument
123 kvm_register_write_raw(vcpu, VCPU_REGS_RIP, val); in kvm_rip_write()
126 static inline unsigned long kvm_rsp_read(struct kvm_vcpu *vcpu) in kvm_rsp_read() argument
128 return kvm_register_read_raw(vcpu, VCPU_REGS_RSP); in kvm_rsp_read()
131 static inline void kvm_rsp_write(struct kvm_vcpu *vcpu, unsigned long val) in kvm_rsp_write() argument
133 kvm_register_write_raw(vcpu, VCPU_REGS_RSP, val); in kvm_rsp_write()
136 static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index) in kvm_pdptr_read() argument
140 if (!kvm_register_is_available(vcpu, VCPU_EXREG_PDPTR)) in kvm_pdptr_read()
141 kvm_x86_call(cache_reg)(vcpu, VCPU_EXREG_PDPTR); in kvm_pdptr_read()
143 return vcpu->arch.walk_mmu->pdptrs[index]; in kvm_pdptr_read()
146 static inline void kvm_pdptr_write(struct kvm_vcpu *vcpu, int index, u64 value) in kvm_pdptr_write() argument
148 vcpu->arch.walk_mmu->pdptrs[index] = value; in kvm_pdptr_write()
151 static inline ulong kvm_read_cr0_bits(struct kvm_vcpu *vcpu, ulong mask) in kvm_read_cr0_bits() argument
154 if ((tmask & vcpu->arch.cr0_guest_owned_bits) && in kvm_read_cr0_bits()
155 !kvm_register_is_available(vcpu, VCPU_EXREG_CR0)) in kvm_read_cr0_bits()
156 kvm_x86_call(cache_reg)(vcpu, VCPU_EXREG_CR0); in kvm_read_cr0_bits()
157 return vcpu->arch.cr0 & mask; in kvm_read_cr0_bits()
160 static __always_inline bool kvm_is_cr0_bit_set(struct kvm_vcpu *vcpu, in kvm_is_cr0_bit_set() argument
165 return !!kvm_read_cr0_bits(vcpu, cr0_bit); in kvm_is_cr0_bit_set()
168 static inline ulong kvm_read_cr0(struct kvm_vcpu *vcpu) in kvm_read_cr0() argument
170 return kvm_read_cr0_bits(vcpu, ~0UL); in kvm_read_cr0()
173 static inline ulong kvm_read_cr4_bits(struct kvm_vcpu *vcpu, ulong mask) in kvm_read_cr4_bits() argument
176 if ((tmask & vcpu->arch.cr4_guest_owned_bits) && in kvm_read_cr4_bits()
177 !kvm_register_is_available(vcpu, VCPU_EXREG_CR4)) in kvm_read_cr4_bits()
178 kvm_x86_call(cache_reg)(vcpu, VCPU_EXREG_CR4); in kvm_read_cr4_bits()
179 return vcpu->arch.cr4 & mask; in kvm_read_cr4_bits()
182 static __always_inline bool kvm_is_cr4_bit_set(struct kvm_vcpu *vcpu, in kvm_is_cr4_bit_set() argument
187 return !!kvm_read_cr4_bits(vcpu, cr4_bit); in kvm_is_cr4_bit_set()
190 static inline ulong kvm_read_cr3(struct kvm_vcpu *vcpu) in kvm_read_cr3() argument
192 if (!kvm_register_is_available(vcpu, VCPU_EXREG_CR3)) in kvm_read_cr3()
193 kvm_x86_call(cache_reg)(vcpu, VCPU_EXREG_CR3); in kvm_read_cr3()
194 return vcpu->arch.cr3; in kvm_read_cr3()
197 static inline ulong kvm_read_cr4(struct kvm_vcpu *vcpu) in kvm_read_cr4() argument
199 return kvm_read_cr4_bits(vcpu, ~0UL); in kvm_read_cr4()
202 static inline u64 kvm_read_edx_eax(struct kvm_vcpu *vcpu) in kvm_read_edx_eax() argument
204 return (kvm_rax_read(vcpu) & -1u) in kvm_read_edx_eax()
205 | ((u64)(kvm_rdx_read(vcpu) & -1u) << 32); in kvm_read_edx_eax()
208 static inline void enter_guest_mode(struct kvm_vcpu *vcpu) in enter_guest_mode() argument
210 vcpu->arch.hflags |= HF_GUEST_MASK; in enter_guest_mode()
211 vcpu->stat.guest_mode = 1; in enter_guest_mode()
214 static inline void leave_guest_mode(struct kvm_vcpu *vcpu) in leave_guest_mode() argument
216 vcpu->arch.hflags &= ~HF_GUEST_MASK; in leave_guest_mode()
218 if (vcpu->arch.load_eoi_exitmap_pending) { in leave_guest_mode()
219 vcpu->arch.load_eoi_exitmap_pending = false; in leave_guest_mode()
220 kvm_make_request(KVM_REQ_LOAD_EOI_EXITMAP, vcpu); in leave_guest_mode()
223 vcpu->stat.guest_mode = 0; in leave_guest_mode()
226 static inline bool is_guest_mode(struct kvm_vcpu *vcpu) in is_guest_mode() argument
228 return vcpu->arch.hflags & HF_GUEST_MASK; in is_guest_mode()