Lines Matching full:vcpu
146 extern void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong ea, ulong ea_mask);
147 extern void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 vp, u64 vp_mask);
148 extern void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end);
149 extern void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 new_msr);
150 extern void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu);
151 extern void kvmppc_mmu_book3s_32_init(struct kvm_vcpu *vcpu);
152 extern void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu);
153 extern int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte,
155 extern void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte);
156 extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr);
157 extern void kvmppc_mmu_flush_segment(struct kvm_vcpu *vcpu, ulong eaddr, ulong seg_size);
158 extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu);
159 extern int kvmppc_book3s_hv_page_fault(struct kvm_vcpu *vcpu,
163 extern int kvmppc_hv_emulate_mmio(struct kvm_vcpu *vcpu,
166 extern void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte);
167 extern struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu);
169 extern void kvmppc_mmu_hpte_destroy(struct kvm_vcpu *vcpu);
170 extern int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu);
171 extern void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte);
177 extern int kvmppc_book3s_radix_page_fault(struct kvm_vcpu *vcpu,
182 extern long kvmhv_copy_from_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr,
184 extern long kvmhv_copy_to_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr,
186 extern int kvmppc_mmu_walk_radix_tree(struct kvm_vcpu *vcpu, gva_t eaddr,
189 extern int kvmppc_mmu_radix_translate_table(struct kvm_vcpu *vcpu, gva_t eaddr,
192 extern int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
203 extern int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu,
227 extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data);
228 extern void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec);
229 extern void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu,
231 extern void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags);
232 extern void kvmppc_trigger_fac_interrupt(struct kvm_vcpu *vcpu, ulong fac);
233 extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat,
235 extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr);
236 extern int kvmppc_emulate_paired_single(struct kvm_vcpu *vcpu);
237 extern kvm_pfn_t kvmppc_gpa_to_pfn(struct kvm_vcpu *vcpu, gpa_t gpa,
266 extern void kvmppc_set_fscr(struct kvm_vcpu *vcpu, u64 fscr);
268 extern int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu);
269 extern int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu);
270 extern void kvmhv_emulate_tm_rollback(struct kvm_vcpu *vcpu);
274 extern u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst);
275 extern ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst);
276 extern int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd);
280 extern void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu);
281 extern void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu);
284 void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr);
285 void kvmppc_inject_interrupt_hv(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags);
288 void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu);
289 void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu);
290 void kvmppc_save_tm_sprs(struct kvm_vcpu *vcpu);
291 void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu);
293 static inline void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu) {} in kvmppc_save_tm_pr() argument
294 static inline void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu) {} in kvmppc_restore_tm_pr() argument
295 static inline void kvmppc_save_tm_sprs(struct kvm_vcpu *vcpu) {} in kvmppc_save_tm_sprs() argument
296 static inline void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu) {} in kvmppc_restore_tm_sprs() argument
303 long kvmhv_set_partition_table(struct kvm_vcpu *vcpu);
304 long kvmhv_copy_tofrom_guest_nested(struct kvm_vcpu *vcpu);
308 long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu);
309 long kvmhv_do_nested_tlbie(struct kvm_vcpu *vcpu);
310 long do_h_rpt_invalidate_pat(struct kvm_vcpu *vcpu, unsigned long lpid,
313 int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu,
315 void kvmhv_save_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr);
316 void kvmhv_restore_hv_return_state(struct kvm_vcpu *vcpu,
318 long int kvmhv_nested_page_fault(struct kvm_vcpu *vcpu);
320 void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac);
351 int __kvmhv_nestedv2_reload_ptregs(struct kvm_vcpu *vcpu, struct pt_regs *regs);
352 int __kvmhv_nestedv2_mark_dirty_ptregs(struct kvm_vcpu *vcpu, struct pt_regs *regs);
353 int __kvmhv_nestedv2_mark_dirty(struct kvm_vcpu *vcpu, u16 iden);
354 int __kvmhv_nestedv2_cached_reload(struct kvm_vcpu *vcpu, u16 iden);
356 static inline int kvmhv_nestedv2_reload_ptregs(struct kvm_vcpu *vcpu, in kvmhv_nestedv2_reload_ptregs() argument
360 return __kvmhv_nestedv2_reload_ptregs(vcpu, regs); in kvmhv_nestedv2_reload_ptregs()
363 static inline int kvmhv_nestedv2_mark_dirty_ptregs(struct kvm_vcpu *vcpu, in kvmhv_nestedv2_mark_dirty_ptregs() argument
367 return __kvmhv_nestedv2_mark_dirty_ptregs(vcpu, regs); in kvmhv_nestedv2_mark_dirty_ptregs()
371 static inline int kvmhv_nestedv2_mark_dirty(struct kvm_vcpu *vcpu, u16 iden) in kvmhv_nestedv2_mark_dirty() argument
374 return __kvmhv_nestedv2_mark_dirty(vcpu, iden); in kvmhv_nestedv2_mark_dirty()
378 static inline int kvmhv_nestedv2_cached_reload(struct kvm_vcpu *vcpu, u16 iden) in kvmhv_nestedv2_cached_reload() argument
381 return __kvmhv_nestedv2_cached_reload(vcpu, iden); in kvmhv_nestedv2_cached_reload()
387 static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu) in to_book3s() argument
389 return vcpu->arch.book3s; in to_book3s()
401 static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val) in kvmppc_set_gpr() argument
403 vcpu->arch.regs.gpr[num] = val; in kvmppc_set_gpr()
404 kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_GPR(num)); in kvmppc_set_gpr()
407 static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num) in kvmppc_get_gpr() argument
409 WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_GPR(num)) < 0); in kvmppc_get_gpr()
410 return vcpu->arch.regs.gpr[num]; in kvmppc_get_gpr()
413 static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val) in kvmppc_set_cr() argument
415 vcpu->arch.regs.ccr = val; in kvmppc_set_cr()
416 kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_CR); in kvmppc_set_cr()
419 static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu) in kvmppc_get_cr() argument
421 WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_CR) < 0); in kvmppc_get_cr()
422 return vcpu->arch.regs.ccr; in kvmppc_get_cr()
425 static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, ulong val) in kvmppc_set_xer() argument
427 vcpu->arch.regs.xer = val; in kvmppc_set_xer()
428 kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_XER); in kvmppc_set_xer()
431 static inline ulong kvmppc_get_xer(struct kvm_vcpu *vcpu) in kvmppc_get_xer() argument
433 WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_XER) < 0); in kvmppc_get_xer()
434 return vcpu->arch.regs.xer; in kvmppc_get_xer()
437 static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val) in kvmppc_set_ctr() argument
439 vcpu->arch.regs.ctr = val; in kvmppc_set_ctr()
440 kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_CTR); in kvmppc_set_ctr()
443 static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu) in kvmppc_get_ctr() argument
445 WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_CTR) < 0); in kvmppc_get_ctr()
446 return vcpu->arch.regs.ctr; in kvmppc_get_ctr()
449 static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val) in kvmppc_set_lr() argument
451 vcpu->arch.regs.link = val; in kvmppc_set_lr()
452 kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_LR); in kvmppc_set_lr()
455 static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu) in kvmppc_get_lr() argument
457 WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_LR) < 0); in kvmppc_get_lr()
458 return vcpu->arch.regs.link; in kvmppc_get_lr()
461 static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val) in kvmppc_set_pc() argument
463 vcpu->arch.regs.nip = val; in kvmppc_set_pc()
464 kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_NIA); in kvmppc_set_pc()
467 static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu) in kvmppc_get_pc() argument
469 WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_NIA) < 0); in kvmppc_get_pc()
470 return vcpu->arch.regs.nip; in kvmppc_get_pc()
473 static inline u64 kvmppc_get_msr(struct kvm_vcpu *vcpu);
474 static inline bool kvmppc_need_byteswap(struct kvm_vcpu *vcpu) in kvmppc_need_byteswap() argument
476 return (kvmppc_get_msr(vcpu) & MSR_LE) != (MSR_KERNEL & MSR_LE); in kvmppc_need_byteswap()
479 static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu) in kvmppc_get_fault_dar() argument
481 return vcpu->arch.fault_dar; in kvmppc_get_fault_dar()
484 static inline u64 kvmppc_get_fpr(struct kvm_vcpu *vcpu, int i) in kvmppc_get_fpr() argument
486 WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_VSRS(i)) < 0); in kvmppc_get_fpr()
487 return vcpu->arch.fp.fpr[i][TS_FPROFFSET]; in kvmppc_get_fpr()
490 static inline void kvmppc_set_fpr(struct kvm_vcpu *vcpu, int i, u64 val) in kvmppc_set_fpr() argument
492 vcpu->arch.fp.fpr[i][TS_FPROFFSET] = val; in kvmppc_set_fpr()
493 kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_VSRS(i)); in kvmppc_set_fpr()
496 static inline u64 kvmppc_get_fpscr(struct kvm_vcpu *vcpu) in kvmppc_get_fpscr() argument
498 WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_FPSCR) < 0); in kvmppc_get_fpscr()
499 return vcpu->arch.fp.fpscr; in kvmppc_get_fpscr()
502 static inline void kvmppc_set_fpscr(struct kvm_vcpu *vcpu, u64 val) in kvmppc_set_fpscr() argument
504 vcpu->arch.fp.fpscr = val; in kvmppc_set_fpscr()
505 kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_FPSCR); in kvmppc_set_fpscr()
509 static inline u64 kvmppc_get_vsx_fpr(struct kvm_vcpu *vcpu, int i, int j) in kvmppc_get_vsx_fpr() argument
511 WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_VSRS(i)) < 0); in kvmppc_get_vsx_fpr()
512 return vcpu->arch.fp.fpr[i][j]; in kvmppc_get_vsx_fpr()
515 static inline void kvmppc_set_vsx_fpr(struct kvm_vcpu *vcpu, int i, int j, in kvmppc_set_vsx_fpr() argument
518 vcpu->arch.fp.fpr[i][j] = val; in kvmppc_set_vsx_fpr()
519 kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_VSRS(i)); in kvmppc_set_vsx_fpr()
523 static inline void kvmppc_get_vsx_vr(struct kvm_vcpu *vcpu, int i, vector128 *v) in kvmppc_get_vsx_vr() argument
525 WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_VSRS(32 + i)) < 0); in kvmppc_get_vsx_vr()
526 *v = vcpu->arch.vr.vr[i]; in kvmppc_get_vsx_vr()
529 static inline void kvmppc_set_vsx_vr(struct kvm_vcpu *vcpu, int i, in kvmppc_set_vsx_vr() argument
532 vcpu->arch.vr.vr[i] = *val; in kvmppc_set_vsx_vr()
533 kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_VSRS(32 + i)); in kvmppc_set_vsx_vr()
536 static inline u32 kvmppc_get_vscr(struct kvm_vcpu *vcpu) in kvmppc_get_vscr() argument
538 WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_VSCR) < 0); in kvmppc_get_vscr()
539 return vcpu->arch.vr.vscr.u[3]; in kvmppc_get_vscr()
542 static inline void kvmppc_set_vscr(struct kvm_vcpu *vcpu, u32 val) in kvmppc_set_vscr() argument
544 vcpu->arch.vr.vscr.u[3] = val; in kvmppc_set_vscr()
545 kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_VSCR); in kvmppc_set_vscr()
550 static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val) \
553 vcpu->arch.reg = val; \
554 kvmhv_nestedv2_mark_dirty(vcpu, iden); \
558 static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu) \
560 WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, iden) < 0); \
561 return vcpu->arch.reg; \
578 static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val) \
580 vcpu->arch.vcore->reg = val; \
581 kvmhv_nestedv2_mark_dirty(vcpu, iden); \
585 static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu) \
587 WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, iden) < 0); \
588 return vcpu->arch.vcore->reg; \
602 static inline u64 kvmppc_get_tb_offset(struct kvm_vcpu *vcpu) in kvmppc_get_tb_offset() argument
604 return vcpu->arch.vcore->tb_offset; in kvmppc_get_tb_offset()
607 static inline u64 kvmppc_get_dec_expires(struct kvm_vcpu *vcpu) in kvmppc_get_dec_expires() argument
609 WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_DEC_EXPIRY_TB) < 0); in kvmppc_get_dec_expires()
610 return vcpu->arch.dec_expires; in kvmppc_get_dec_expires()
613 static inline void kvmppc_set_dec_expires(struct kvm_vcpu *vcpu, u64 val) in kvmppc_set_dec_expires() argument
615 vcpu->arch.dec_expires = val; in kvmppc_set_dec_expires()
616 kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_DEC_EXPIRY_TB); in kvmppc_set_dec_expires()
619 /* Expiry time of vcpu DEC relative to host TB */
620 static inline u64 kvmppc_dec_expires_host_tb(struct kvm_vcpu *vcpu) in kvmppc_dec_expires_host_tb() argument
622 return kvmppc_get_dec_expires(vcpu) - kvmppc_get_tb_offset(vcpu); in kvmppc_dec_expires_host_tb()
631 static inline bool kvmppc_supports_magic_page(struct kvm_vcpu *vcpu) in kvmppc_supports_magic_page() argument
634 return !is_kvmppc_hv_enabled(vcpu->kvm); in kvmppc_supports_magic_page()
637 extern int kvmppc_h_logical_ci_load(struct kvm_vcpu *vcpu);
638 extern int kvmppc_h_logical_ci_store(struct kvm_vcpu *vcpu);
653 * This packs a VCPU ID from the [0..KVM_MAX_VCPU_IDS) space down to the
658 * The implementation leaves VCPU IDs from the range [0..KVM_MAX_VCPUS) (block
662 * For higher VCPU IDs, the packed ID is based on the VCPU ID modulo
666 * VCPU IDs in the range [KVM_MAX_VCPUS..(KVM_MAX_VCPUS*2)) (block 1) are only
671 * Similarly, if VCPU IDs in the range [(KVM_MAX_VCPUS*2)..(KVM_MAX_VCPUS*4))
675 * Finally, VCPU IDs from blocks 5..7 will only be seen if the guest is using a
691 if (WARN_ONCE(block >= MAX_SMT_THREADS, "VCPU ID too large to pack")) in kvmppc_pack_vcpu_id()
694 if (WARN_ONCE(packed_id >= KVM_MAX_VCPUS, "VCPU ID packing failed")) in kvmppc_pack_vcpu_id()