Lines Matching full:vcpu
53 static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
56 static int kvmppc_handle_fac(struct kvm_vcpu *vcpu, ulong fac);
67 static bool kvmppc_is_split_real(struct kvm_vcpu *vcpu) in kvmppc_is_split_real() argument
69 ulong msr = kvmppc_get_msr(vcpu); in kvmppc_is_split_real()
73 static void kvmppc_fixup_split_real(struct kvm_vcpu *vcpu) in kvmppc_fixup_split_real() argument
75 ulong msr = kvmppc_get_msr(vcpu); in kvmppc_fixup_split_real()
76 ulong pc = kvmppc_get_pc(vcpu); in kvmppc_fixup_split_real()
83 if (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) in kvmppc_fixup_split_real()
90 vcpu->arch.hflags |= BOOK3S_HFLAG_SPLIT_HACK; in kvmppc_fixup_split_real()
91 kvmppc_set_pc(vcpu, pc | SPLIT_HACK_OFFS); in kvmppc_fixup_split_real()
94 static void kvmppc_unfixup_split_real(struct kvm_vcpu *vcpu) in kvmppc_unfixup_split_real() argument
96 if (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) { in kvmppc_unfixup_split_real()
97 ulong pc = kvmppc_get_pc(vcpu); in kvmppc_unfixup_split_real()
98 ulong lr = kvmppc_get_lr(vcpu); in kvmppc_unfixup_split_real()
100 kvmppc_set_pc(vcpu, pc & ~SPLIT_HACK_MASK); in kvmppc_unfixup_split_real()
102 kvmppc_set_lr(vcpu, lr & ~SPLIT_HACK_MASK); in kvmppc_unfixup_split_real()
103 vcpu->arch.hflags &= ~BOOK3S_HFLAG_SPLIT_HACK; in kvmppc_unfixup_split_real()
107 static void kvmppc_inject_interrupt_pr(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags) in kvmppc_inject_interrupt_pr() argument
111 kvmppc_unfixup_split_real(vcpu); in kvmppc_inject_interrupt_pr()
113 msr = kvmppc_get_msr(vcpu); in kvmppc_inject_interrupt_pr()
114 pc = kvmppc_get_pc(vcpu); in kvmppc_inject_interrupt_pr()
115 new_msr = vcpu->arch.intr_msr; in kvmppc_inject_interrupt_pr()
116 new_pc = to_book3s(vcpu)->hior + vec; in kvmppc_inject_interrupt_pr()
126 kvmppc_set_srr0(vcpu, pc); in kvmppc_inject_interrupt_pr()
127 kvmppc_set_srr1(vcpu, (msr & SRR1_MSR_BITS) | srr1_flags); in kvmppc_inject_interrupt_pr()
128 kvmppc_set_pc(vcpu, new_pc); in kvmppc_inject_interrupt_pr()
129 kvmppc_set_msr(vcpu, new_msr); in kvmppc_inject_interrupt_pr()
132 static void kvmppc_core_vcpu_load_pr(struct kvm_vcpu *vcpu, int cpu) in kvmppc_core_vcpu_load_pr() argument
135 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); in kvmppc_core_vcpu_load_pr()
136 memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb)); in kvmppc_core_vcpu_load_pr()
137 svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max; in kvmppc_core_vcpu_load_pr()
150 vcpu->cpu = smp_processor_id(); in kvmppc_core_vcpu_load_pr()
152 current->thread.kvm_shadow_vcpu = vcpu->arch.shadow_vcpu; in kvmppc_core_vcpu_load_pr()
155 if (kvmppc_is_split_real(vcpu)) in kvmppc_core_vcpu_load_pr()
156 kvmppc_fixup_split_real(vcpu); in kvmppc_core_vcpu_load_pr()
158 kvmppc_restore_tm_pr(vcpu); in kvmppc_core_vcpu_load_pr()
161 static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu) in kvmppc_core_vcpu_put_pr() argument
164 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); in kvmppc_core_vcpu_put_pr()
166 kvmppc_copy_from_svcpu(vcpu); in kvmppc_core_vcpu_put_pr()
168 memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb)); in kvmppc_core_vcpu_put_pr()
169 to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max; in kvmppc_core_vcpu_put_pr()
181 if (kvmppc_is_split_real(vcpu)) in kvmppc_core_vcpu_put_pr()
182 kvmppc_unfixup_split_real(vcpu); in kvmppc_core_vcpu_put_pr()
184 kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX); in kvmppc_core_vcpu_put_pr()
185 kvmppc_giveup_fac(vcpu, FSCR_TAR_LG); in kvmppc_core_vcpu_put_pr()
186 kvmppc_save_tm_pr(vcpu); in kvmppc_core_vcpu_put_pr()
188 vcpu->cpu = -1; in kvmppc_core_vcpu_put_pr()
191 /* Copy data needed by real-mode code from vcpu to shadow vcpu */
192 void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu) in kvmppc_copy_to_svcpu() argument
194 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); in kvmppc_copy_to_svcpu()
196 svcpu->gpr[0] = vcpu->arch.regs.gpr[0]; in kvmppc_copy_to_svcpu()
197 svcpu->gpr[1] = vcpu->arch.regs.gpr[1]; in kvmppc_copy_to_svcpu()
198 svcpu->gpr[2] = vcpu->arch.regs.gpr[2]; in kvmppc_copy_to_svcpu()
199 svcpu->gpr[3] = vcpu->arch.regs.gpr[3]; in kvmppc_copy_to_svcpu()
200 svcpu->gpr[4] = vcpu->arch.regs.gpr[4]; in kvmppc_copy_to_svcpu()
201 svcpu->gpr[5] = vcpu->arch.regs.gpr[5]; in kvmppc_copy_to_svcpu()
202 svcpu->gpr[6] = vcpu->arch.regs.gpr[6]; in kvmppc_copy_to_svcpu()
203 svcpu->gpr[7] = vcpu->arch.regs.gpr[7]; in kvmppc_copy_to_svcpu()
204 svcpu->gpr[8] = vcpu->arch.regs.gpr[8]; in kvmppc_copy_to_svcpu()
205 svcpu->gpr[9] = vcpu->arch.regs.gpr[9]; in kvmppc_copy_to_svcpu()
206 svcpu->gpr[10] = vcpu->arch.regs.gpr[10]; in kvmppc_copy_to_svcpu()
207 svcpu->gpr[11] = vcpu->arch.regs.gpr[11]; in kvmppc_copy_to_svcpu()
208 svcpu->gpr[12] = vcpu->arch.regs.gpr[12]; in kvmppc_copy_to_svcpu()
209 svcpu->gpr[13] = vcpu->arch.regs.gpr[13]; in kvmppc_copy_to_svcpu()
210 svcpu->cr = vcpu->arch.regs.ccr; in kvmppc_copy_to_svcpu()
211 svcpu->xer = vcpu->arch.regs.xer; in kvmppc_copy_to_svcpu()
212 svcpu->ctr = vcpu->arch.regs.ctr; in kvmppc_copy_to_svcpu()
213 svcpu->lr = vcpu->arch.regs.link; in kvmppc_copy_to_svcpu()
214 svcpu->pc = vcpu->arch.regs.nip; in kvmppc_copy_to_svcpu()
216 svcpu->shadow_fscr = vcpu->arch.shadow_fscr; in kvmppc_copy_to_svcpu()
222 vcpu->arch.entry_tb = get_tb(); in kvmppc_copy_to_svcpu()
223 vcpu->arch.entry_vtb = get_vtb(); in kvmppc_copy_to_svcpu()
225 vcpu->arch.entry_ic = mfspr(SPRN_IC); in kvmppc_copy_to_svcpu()
231 static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu) in kvmppc_recalc_shadow_msr() argument
233 ulong guest_msr = kvmppc_get_msr(vcpu); in kvmppc_recalc_shadow_msr()
246 smsr |= (guest_msr & vcpu->arch.guest_owned_ext); in kvmppc_recalc_shadow_msr()
260 vcpu->arch.shadow_msr = smsr; in kvmppc_recalc_shadow_msr()
263 /* Copy data touched by real-mode code from shadow vcpu back to vcpu */
264 void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu) in kvmppc_copy_from_svcpu() argument
266 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); in kvmppc_copy_from_svcpu()
278 vcpu->arch.regs.gpr[0] = svcpu->gpr[0]; in kvmppc_copy_from_svcpu()
279 vcpu->arch.regs.gpr[1] = svcpu->gpr[1]; in kvmppc_copy_from_svcpu()
280 vcpu->arch.regs.gpr[2] = svcpu->gpr[2]; in kvmppc_copy_from_svcpu()
281 vcpu->arch.regs.gpr[3] = svcpu->gpr[3]; in kvmppc_copy_from_svcpu()
282 vcpu->arch.regs.gpr[4] = svcpu->gpr[4]; in kvmppc_copy_from_svcpu()
283 vcpu->arch.regs.gpr[5] = svcpu->gpr[5]; in kvmppc_copy_from_svcpu()
284 vcpu->arch.regs.gpr[6] = svcpu->gpr[6]; in kvmppc_copy_from_svcpu()
285 vcpu->arch.regs.gpr[7] = svcpu->gpr[7]; in kvmppc_copy_from_svcpu()
286 vcpu->arch.regs.gpr[8] = svcpu->gpr[8]; in kvmppc_copy_from_svcpu()
287 vcpu->arch.regs.gpr[9] = svcpu->gpr[9]; in kvmppc_copy_from_svcpu()
288 vcpu->arch.regs.gpr[10] = svcpu->gpr[10]; in kvmppc_copy_from_svcpu()
289 vcpu->arch.regs.gpr[11] = svcpu->gpr[11]; in kvmppc_copy_from_svcpu()
290 vcpu->arch.regs.gpr[12] = svcpu->gpr[12]; in kvmppc_copy_from_svcpu()
291 vcpu->arch.regs.gpr[13] = svcpu->gpr[13]; in kvmppc_copy_from_svcpu()
292 vcpu->arch.regs.ccr = svcpu->cr; in kvmppc_copy_from_svcpu()
293 vcpu->arch.regs.xer = svcpu->xer; in kvmppc_copy_from_svcpu()
294 vcpu->arch.regs.ctr = svcpu->ctr; in kvmppc_copy_from_svcpu()
295 vcpu->arch.regs.link = svcpu->lr; in kvmppc_copy_from_svcpu()
296 vcpu->arch.regs.nip = svcpu->pc; in kvmppc_copy_from_svcpu()
297 vcpu->arch.shadow_srr1 = svcpu->shadow_srr1; in kvmppc_copy_from_svcpu()
298 vcpu->arch.fault_dar = svcpu->fault_dar; in kvmppc_copy_from_svcpu()
299 vcpu->arch.fault_dsisr = svcpu->fault_dsisr; in kvmppc_copy_from_svcpu()
300 vcpu->arch.last_inst = svcpu->last_inst; in kvmppc_copy_from_svcpu()
302 vcpu->arch.shadow_fscr = svcpu->shadow_fscr; in kvmppc_copy_from_svcpu()
307 vcpu->arch.purr += get_tb() - vcpu->arch.entry_tb; in kvmppc_copy_from_svcpu()
308 vcpu->arch.spurr += get_tb() - vcpu->arch.entry_tb; in kvmppc_copy_from_svcpu()
309 to_book3s(vcpu)->vtb += get_vtb() - vcpu->arch.entry_vtb; in kvmppc_copy_from_svcpu()
311 vcpu->arch.ic += mfspr(SPRN_IC) - vcpu->arch.entry_ic; in kvmppc_copy_from_svcpu()
325 old_msr = kvmppc_get_msr(vcpu); in kvmppc_copy_from_svcpu()
327 (vcpu->arch.shadow_srr1 & (MSR_TS_MASK)) != in kvmppc_copy_from_svcpu()
330 old_msr |= (vcpu->arch.shadow_srr1 & (MSR_TS_MASK)); in kvmppc_copy_from_svcpu()
331 kvmppc_set_msr_fast(vcpu, old_msr); in kvmppc_copy_from_svcpu()
332 kvmppc_recalc_shadow_msr(vcpu); in kvmppc_copy_from_svcpu()
343 void kvmppc_save_tm_sprs(struct kvm_vcpu *vcpu) in kvmppc_save_tm_sprs() argument
346 vcpu->arch.tfhar = mfspr(SPRN_TFHAR); in kvmppc_save_tm_sprs()
347 vcpu->arch.texasr = mfspr(SPRN_TEXASR); in kvmppc_save_tm_sprs()
348 vcpu->arch.tfiar = mfspr(SPRN_TFIAR); in kvmppc_save_tm_sprs()
352 void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu) in kvmppc_restore_tm_sprs() argument
355 mtspr(SPRN_TFHAR, vcpu->arch.tfhar); in kvmppc_restore_tm_sprs()
356 mtspr(SPRN_TEXASR, vcpu->arch.texasr); in kvmppc_restore_tm_sprs()
357 mtspr(SPRN_TFIAR, vcpu->arch.tfiar); in kvmppc_restore_tm_sprs()
364 static void kvmppc_handle_lost_math_exts(struct kvm_vcpu *vcpu) in kvmppc_handle_lost_math_exts() argument
367 ulong ext_diff = (kvmppc_get_msr(vcpu) & ~vcpu->arch.guest_owned_ext) & in kvmppc_handle_lost_math_exts()
380 kvmppc_handle_ext(vcpu, exit_nr, ext_diff); in kvmppc_handle_lost_math_exts()
383 void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu) in kvmppc_save_tm_pr() argument
385 if (!(MSR_TM_ACTIVE(kvmppc_get_msr(vcpu)))) { in kvmppc_save_tm_pr()
386 kvmppc_save_tm_sprs(vcpu); in kvmppc_save_tm_pr()
390 kvmppc_giveup_fac(vcpu, FSCR_TAR_LG); in kvmppc_save_tm_pr()
391 kvmppc_giveup_ext(vcpu, MSR_VSX); in kvmppc_save_tm_pr()
394 _kvmppc_save_tm_pr(vcpu, mfmsr()); in kvmppc_save_tm_pr()
398 void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu) in kvmppc_restore_tm_pr() argument
400 if (!MSR_TM_ACTIVE(kvmppc_get_msr(vcpu))) { in kvmppc_restore_tm_pr()
401 kvmppc_restore_tm_sprs(vcpu); in kvmppc_restore_tm_pr()
402 if (kvmppc_get_msr(vcpu) & MSR_TM) { in kvmppc_restore_tm_pr()
403 kvmppc_handle_lost_math_exts(vcpu); in kvmppc_restore_tm_pr()
404 if (vcpu->arch.fscr & FSCR_TAR) in kvmppc_restore_tm_pr()
405 kvmppc_handle_fac(vcpu, FSCR_TAR_LG); in kvmppc_restore_tm_pr()
411 _kvmppc_restore_tm_pr(vcpu, kvmppc_get_msr(vcpu)); in kvmppc_restore_tm_pr()
414 if (kvmppc_get_msr(vcpu) & MSR_TM) { in kvmppc_restore_tm_pr()
415 kvmppc_handle_lost_math_exts(vcpu); in kvmppc_restore_tm_pr()
416 if (vcpu->arch.fscr & FSCR_TAR) in kvmppc_restore_tm_pr()
417 kvmppc_handle_fac(vcpu, FSCR_TAR_LG); in kvmppc_restore_tm_pr()
422 static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu) in kvmppc_core_check_requests_pr() argument
428 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) in kvmppc_core_check_requests_pr()
429 kvmppc_mmu_pte_flush(vcpu, 0, 0); in kvmppc_core_check_requests_pr()
438 struct kvm_vcpu *vcpu; in do_kvm_unmap_gfn() local
440 kvm_for_each_vcpu(i, vcpu, kvm) in do_kvm_unmap_gfn()
441 kvmppc_mmu_pte_pflush(vcpu, range->start << PAGE_SHIFT, in do_kvm_unmap_gfn()
466 static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr) in kvmppc_set_msr_pr() argument
471 if (vcpu->arch.papr_enabled) in kvmppc_set_msr_pr()
484 kvmppc_emulate_tabort(vcpu, in kvmppc_set_msr_pr()
488 old_msr = kvmppc_get_msr(vcpu); in kvmppc_set_msr_pr()
489 msr &= to_book3s(vcpu)->msr_mask; in kvmppc_set_msr_pr()
490 kvmppc_set_msr_fast(vcpu, msr); in kvmppc_set_msr_pr()
491 kvmppc_recalc_shadow_msr(vcpu); in kvmppc_set_msr_pr()
494 if (!vcpu->arch.pending_exceptions) { in kvmppc_set_msr_pr()
495 kvm_vcpu_halt(vcpu); in kvmppc_set_msr_pr()
496 vcpu->stat.generic.halt_wakeup++; in kvmppc_set_msr_pr()
500 kvmppc_set_msr_fast(vcpu, msr); in kvmppc_set_msr_pr()
504 if (kvmppc_is_split_real(vcpu)) in kvmppc_set_msr_pr()
505 kvmppc_fixup_split_real(vcpu); in kvmppc_set_msr_pr()
507 kvmppc_unfixup_split_real(vcpu); in kvmppc_set_msr_pr()
509 if ((kvmppc_get_msr(vcpu) & (MSR_PR|MSR_IR|MSR_DR)) != in kvmppc_set_msr_pr()
511 kvmppc_mmu_flush_segments(vcpu); in kvmppc_set_msr_pr()
512 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)); in kvmppc_set_msr_pr()
515 if (!(msr & MSR_PR) && vcpu->arch.magic_page_pa) { in kvmppc_set_msr_pr()
516 struct kvm_vcpu_arch *a = &vcpu->arch; in kvmppc_set_msr_pr()
519 kvmppc_mmu_map_segment(vcpu, a->magic_page_ea); in kvmppc_set_msr_pr()
521 kvmppc_mmu_map_segment(vcpu, a->magic_page_pa); in kvmppc_set_msr_pr()
533 if (vcpu->arch.magic_page_pa && in kvmppc_set_msr_pr()
536 kvmppc_mmu_pte_flush(vcpu, (uint32_t)vcpu->arch.magic_page_pa, in kvmppc_set_msr_pr()
541 if (kvmppc_get_msr(vcpu) & MSR_FP) in kvmppc_set_msr_pr()
542 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); in kvmppc_set_msr_pr()
545 if (kvmppc_get_msr(vcpu) & MSR_TM) in kvmppc_set_msr_pr()
546 kvmppc_handle_lost_math_exts(vcpu); in kvmppc_set_msr_pr()
550 static void kvmppc_set_pvr_pr(struct kvm_vcpu *vcpu, u32 pvr) in kvmppc_set_pvr_pr() argument
554 vcpu->arch.hflags &= ~BOOK3S_HFLAG_SLB; in kvmppc_set_pvr_pr()
555 vcpu->arch.pvr = pvr; in kvmppc_set_pvr_pr()
558 kvmppc_mmu_book3s_64_init(vcpu); in kvmppc_set_pvr_pr()
559 if (!to_book3s(vcpu)->hior_explicit) in kvmppc_set_pvr_pr()
560 to_book3s(vcpu)->hior = 0xfff00000; in kvmppc_set_pvr_pr()
561 to_book3s(vcpu)->msr_mask = 0xffffffffffffffffULL; in kvmppc_set_pvr_pr()
562 vcpu->arch.cpu_type = KVM_CPU_3S_64; in kvmppc_set_pvr_pr()
566 kvmppc_mmu_book3s_32_init(vcpu); in kvmppc_set_pvr_pr()
567 if (!to_book3s(vcpu)->hior_explicit) in kvmppc_set_pvr_pr()
568 to_book3s(vcpu)->hior = 0; in kvmppc_set_pvr_pr()
569 to_book3s(vcpu)->msr_mask = 0xffffffffULL; in kvmppc_set_pvr_pr()
570 vcpu->arch.cpu_type = KVM_CPU_3S_32; in kvmppc_set_pvr_pr()
573 kvmppc_sanity_check(vcpu); in kvmppc_set_pvr_pr()
577 vcpu->arch.hflags &= ~BOOK3S_HFLAG_DCBZ32; in kvmppc_set_pvr_pr()
578 if (vcpu->arch.mmu.is_dcbz32(vcpu) && (mfmsr() & MSR_HV) && in kvmppc_set_pvr_pr()
580 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32; in kvmppc_set_pvr_pr()
585 to_book3s(vcpu)->msr_mask &= ~(MSR_FE0 | MSR_FE1); in kvmppc_set_pvr_pr()
603 vcpu->arch.hflags |= BOOK3S_HFLAG_MULTI_PGSIZE | in kvmppc_set_pvr_pr()
610 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32; in kvmppc_set_pvr_pr()
625 vcpu->arch.hflags |= BOOK3S_HFLAG_NATIVE_PS; in kvmppc_set_pvr_pr()
640 static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte) in kvmppc_patch_dcbz() argument
647 hpage = gfn_to_page(vcpu->kvm, pte->raddr >> PAGE_SHIFT); in kvmppc_patch_dcbz()
667 static bool kvmppc_visible_gpa(struct kvm_vcpu *vcpu, gpa_t gpa) in kvmppc_visible_gpa() argument
669 ulong mp_pa = vcpu->arch.magic_page_pa; in kvmppc_visible_gpa()
671 if (!(kvmppc_get_msr(vcpu) & MSR_SF)) in kvmppc_visible_gpa()
679 return kvm_is_visible_gfn(vcpu->kvm, gpa >> PAGE_SHIFT); in kvmppc_visible_gpa()
682 static int kvmppc_handle_pagefault(struct kvm_vcpu *vcpu, in kvmppc_handle_pagefault() argument
691 bool dr = (kvmppc_get_msr(vcpu) & MSR_DR) ? true : false; in kvmppc_handle_pagefault()
692 bool ir = (kvmppc_get_msr(vcpu) & MSR_IR) ? true : false; in kvmppc_handle_pagefault()
696 if (data && (vcpu->arch.fault_dsisr & DSISR_ISSTORE)) in kvmppc_handle_pagefault()
701 page_found = vcpu->arch.mmu.xlate(vcpu, eaddr, &pte, data, iswrite); in kvmppc_handle_pagefault()
713 switch (kvmppc_get_msr(vcpu) & (MSR_DR|MSR_IR)) { in kvmppc_handle_pagefault()
719 (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) && in kvmppc_handle_pagefault()
724 vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid); in kvmppc_handle_pagefault()
726 if ((kvmppc_get_msr(vcpu) & (MSR_DR|MSR_IR)) == MSR_DR) in kvmppc_handle_pagefault()
737 if (vcpu->arch.mmu.is_dcbz32(vcpu) && in kvmppc_handle_pagefault()
738 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) { in kvmppc_handle_pagefault()
756 flags |= vcpu->arch.fault_dsisr & DSISR_ISSTORE; in kvmppc_handle_pagefault()
757 kvmppc_core_queue_data_storage(vcpu, 0, eaddr, flags); in kvmppc_handle_pagefault()
759 kvmppc_core_queue_inst_storage(vcpu, flags); in kvmppc_handle_pagefault()
763 kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu)); in kvmppc_handle_pagefault()
764 kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80); in kvmppc_handle_pagefault()
765 } else if (kvmppc_visible_gpa(vcpu, pte.raddr)) { in kvmppc_handle_pagefault()
766 if (data && !(vcpu->arch.fault_dsisr & DSISR_NOHPTE)) { in kvmppc_handle_pagefault()
772 kvmppc_mmu_unmap_page(vcpu, &pte); in kvmppc_handle_pagefault()
775 if (kvmppc_mmu_map_page(vcpu, &pte, iswrite) == -EIO) { in kvmppc_handle_pagefault()
777 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in kvmppc_handle_pagefault()
781 vcpu->stat.sp_storage++; in kvmppc_handle_pagefault()
782 else if (vcpu->arch.mmu.is_dcbz32(vcpu) && in kvmppc_handle_pagefault()
783 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) in kvmppc_handle_pagefault()
784 kvmppc_patch_dcbz(vcpu, &pte); in kvmppc_handle_pagefault()
787 vcpu->stat.mmio_exits++; in kvmppc_handle_pagefault()
788 vcpu->arch.paddr_accessed = pte.raddr; in kvmppc_handle_pagefault()
789 vcpu->arch.vaddr_accessed = pte.eaddr; in kvmppc_handle_pagefault()
790 r = kvmppc_emulate_mmio(vcpu); in kvmppc_handle_pagefault()
799 void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr) in kvmppc_giveup_ext() argument
810 msr &= vcpu->arch.guest_owned_ext; in kvmppc_giveup_ext()
837 vcpu->arch.guest_owned_ext &= ~(msr | MSR_VSX); in kvmppc_giveup_ext()
838 kvmppc_recalc_shadow_msr(vcpu); in kvmppc_giveup_ext()
842 void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac) in kvmppc_giveup_fac() argument
845 if (!(vcpu->arch.shadow_fscr & (1ULL << fac))) { in kvmppc_giveup_fac()
852 vcpu->arch.tar = mfspr(SPRN_TAR); in kvmppc_giveup_fac()
854 vcpu->arch.shadow_fscr &= ~FSCR_TAR; in kvmppc_giveup_fac()
861 static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, in kvmppc_handle_ext() argument
867 if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE) in kvmppc_handle_ext()
870 if (!(kvmppc_get_msr(vcpu) & msr)) { in kvmppc_handle_ext()
871 kvmppc_book3s_queue_irqprio(vcpu, exit_nr); in kvmppc_handle_ext()
881 kvmppc_core_queue_program(vcpu, SRR1_PROGILL); in kvmppc_handle_ext()
893 msr &= ~vcpu->arch.guest_owned_ext; in kvmppc_handle_ext()
904 load_fp_state(&vcpu->arch.fp); in kvmppc_handle_ext()
906 t->fp_save_area = &vcpu->arch.fp; in kvmppc_handle_ext()
914 load_vr_state(&vcpu->arch.vr); in kvmppc_handle_ext()
916 t->vr_save_area = &vcpu->arch.vr; in kvmppc_handle_ext()
922 vcpu->arch.guest_owned_ext |= msr; in kvmppc_handle_ext()
923 kvmppc_recalc_shadow_msr(vcpu); in kvmppc_handle_ext()
932 static void kvmppc_handle_lost_ext(struct kvm_vcpu *vcpu) in kvmppc_handle_lost_ext() argument
936 lost_ext = vcpu->arch.guest_owned_ext & ~current->thread.regs->msr; in kvmppc_handle_lost_ext()
943 load_fp_state(&vcpu->arch.fp); in kvmppc_handle_lost_ext()
951 load_vr_state(&vcpu->arch.vr); in kvmppc_handle_lost_ext()
961 void kvmppc_trigger_fac_interrupt(struct kvm_vcpu *vcpu, ulong fac) in kvmppc_trigger_fac_interrupt() argument
964 vcpu->arch.fscr &= ~(0xffULL << 56); in kvmppc_trigger_fac_interrupt()
965 vcpu->arch.fscr |= (fac << 56); in kvmppc_trigger_fac_interrupt()
966 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_FAC_UNAVAIL); in kvmppc_trigger_fac_interrupt()
969 static void kvmppc_emulate_fac(struct kvm_vcpu *vcpu, ulong fac) in kvmppc_emulate_fac() argument
973 if (!(kvmppc_get_msr(vcpu) & MSR_PR)) in kvmppc_emulate_fac()
974 er = kvmppc_emulate_instruction(vcpu); in kvmppc_emulate_fac()
978 kvmppc_trigger_fac_interrupt(vcpu, fac); in kvmppc_emulate_fac()
983 static int kvmppc_handle_fac(struct kvm_vcpu *vcpu, ulong fac) in kvmppc_handle_fac() argument
995 guest_fac_enabled = (vcpu->arch.fscr & (1ULL << fac)); in kvmppc_handle_fac()
998 guest_fac_enabled = kvmppc_get_msr(vcpu) & MSR_TM; in kvmppc_handle_fac()
1007 kvmppc_trigger_fac_interrupt(vcpu, fac); in kvmppc_handle_fac()
1015 mtspr(SPRN_TAR, vcpu->arch.tar); in kvmppc_handle_fac()
1016 vcpu->arch.shadow_fscr |= FSCR_TAR; in kvmppc_handle_fac()
1019 kvmppc_emulate_fac(vcpu, fac); in kvmppc_handle_fac()
1031 if ((fac == FSCR_TM_LG) && !(kvmppc_get_msr(vcpu) & MSR_PR)) in kvmppc_handle_fac()
1038 void kvmppc_set_fscr(struct kvm_vcpu *vcpu, u64 fscr) in kvmppc_set_fscr() argument
1044 if ((vcpu->arch.fscr & FSCR_TAR) && !(fscr & FSCR_TAR)) { in kvmppc_set_fscr()
1046 kvmppc_giveup_fac(vcpu, FSCR_TAR_LG); in kvmppc_set_fscr()
1047 } else if (!(vcpu->arch.fscr & FSCR_TAR) && (fscr & FSCR_TAR)) { in kvmppc_set_fscr()
1048 vcpu->arch.fscr = fscr; in kvmppc_set_fscr()
1049 kvmppc_handle_fac(vcpu, FSCR_TAR_LG); in kvmppc_set_fscr()
1053 vcpu->arch.fscr = fscr; in kvmppc_set_fscr()
1057 static void kvmppc_setup_debug(struct kvm_vcpu *vcpu) in kvmppc_setup_debug() argument
1059 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) { in kvmppc_setup_debug()
1060 u64 msr = kvmppc_get_msr(vcpu); in kvmppc_setup_debug()
1062 kvmppc_set_msr(vcpu, msr | MSR_SE); in kvmppc_setup_debug()
1066 static void kvmppc_clear_debug(struct kvm_vcpu *vcpu) in kvmppc_clear_debug() argument
1068 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) { in kvmppc_clear_debug()
1069 u64 msr = kvmppc_get_msr(vcpu); in kvmppc_clear_debug()
1071 kvmppc_set_msr(vcpu, msr & ~MSR_SE); in kvmppc_clear_debug()
1075 static int kvmppc_exit_pr_progint(struct kvm_vcpu *vcpu, unsigned int exit_nr) in kvmppc_exit_pr_progint() argument
1089 flags = vcpu->arch.shadow_srr1 & 0x1f0000ull; in kvmppc_exit_pr_progint()
1093 emul = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst); in kvmppc_exit_pr_progint()
1097 if (kvmppc_get_msr(vcpu) & MSR_PR) { in kvmppc_exit_pr_progint()
1100 kvmppc_get_pc(vcpu), ppc_inst_val(last_inst)); in kvmppc_exit_pr_progint()
1103 kvmppc_core_queue_program(vcpu, flags); in kvmppc_exit_pr_progint()
1108 vcpu->stat.emulated_inst_exits++; in kvmppc_exit_pr_progint()
1109 er = kvmppc_emulate_instruction(vcpu); in kvmppc_exit_pr_progint()
1119 __func__, kvmppc_get_pc(vcpu), ppc_inst_val(last_inst)); in kvmppc_exit_pr_progint()
1120 kvmppc_core_queue_program(vcpu, flags); in kvmppc_exit_pr_progint()
1124 vcpu->run->exit_reason = KVM_EXIT_MMIO; in kvmppc_exit_pr_progint()
1137 int kvmppc_handle_exit_pr(struct kvm_vcpu *vcpu, unsigned int exit_nr) in kvmppc_handle_exit_pr() argument
1139 struct kvm_run *run = vcpu->run; in kvmppc_handle_exit_pr()
1143 vcpu->stat.sum_exits++; in kvmppc_handle_exit_pr()
1150 trace_kvm_exit(exit_nr, vcpu); in kvmppc_handle_exit_pr()
1156 ulong shadow_srr1 = vcpu->arch.shadow_srr1; in kvmppc_handle_exit_pr()
1157 vcpu->stat.pf_instruc++; in kvmppc_handle_exit_pr()
1159 if (kvmppc_is_split_real(vcpu)) in kvmppc_handle_exit_pr()
1160 kvmppc_fixup_split_real(vcpu); in kvmppc_handle_exit_pr()
1169 svcpu = svcpu_get(vcpu); in kvmppc_handle_exit_pr()
1170 sr = svcpu->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT]; in kvmppc_handle_exit_pr()
1173 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)); in kvmppc_handle_exit_pr()
1182 int idx = srcu_read_lock(&vcpu->kvm->srcu); in kvmppc_handle_exit_pr()
1183 r = kvmppc_handle_pagefault(vcpu, kvmppc_get_pc(vcpu), exit_nr); in kvmppc_handle_exit_pr()
1184 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvmppc_handle_exit_pr()
1185 vcpu->stat.sp_instruc++; in kvmppc_handle_exit_pr()
1186 } else if (vcpu->arch.mmu.is_dcbz32(vcpu) && in kvmppc_handle_exit_pr()
1187 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) { in kvmppc_handle_exit_pr()
1193 kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL); in kvmppc_handle_exit_pr()
1196 kvmppc_core_queue_inst_storage(vcpu, in kvmppc_handle_exit_pr()
1204 ulong dar = kvmppc_get_fault_dar(vcpu); in kvmppc_handle_exit_pr()
1205 u32 fault_dsisr = vcpu->arch.fault_dsisr; in kvmppc_handle_exit_pr()
1206 vcpu->stat.pf_storage++; in kvmppc_handle_exit_pr()
1215 svcpu = svcpu_get(vcpu); in kvmppc_handle_exit_pr()
1219 kvmppc_mmu_map_segment(vcpu, dar); in kvmppc_handle_exit_pr()
1232 int idx = srcu_read_lock(&vcpu->kvm->srcu); in kvmppc_handle_exit_pr()
1233 r = kvmppc_handle_pagefault(vcpu, dar, exit_nr); in kvmppc_handle_exit_pr()
1234 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvmppc_handle_exit_pr()
1236 kvmppc_core_queue_data_storage(vcpu, 0, dar, fault_dsisr); in kvmppc_handle_exit_pr()
1242 if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_fault_dar(vcpu)) < 0) { in kvmppc_handle_exit_pr()
1243 kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu)); in kvmppc_handle_exit_pr()
1244 kvmppc_book3s_queue_irqprio(vcpu, in kvmppc_handle_exit_pr()
1250 if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)) < 0) { in kvmppc_handle_exit_pr()
1251 kvmppc_book3s_queue_irqprio(vcpu, in kvmppc_handle_exit_pr()
1261 vcpu->stat.dec_exits++; in kvmppc_handle_exit_pr()
1267 vcpu->stat.ext_intr_exits++; in kvmppc_handle_exit_pr()
1277 r = kvmppc_exit_pr_progint(vcpu, exit_nr); in kvmppc_handle_exit_pr()
1285 if (vcpu->arch.papr_enabled) { in kvmppc_handle_exit_pr()
1287 emul = kvmppc_get_last_inst(vcpu, INST_SC, &last_sc); in kvmppc_handle_exit_pr()
1289 kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) - 4); in kvmppc_handle_exit_pr()
1295 if (vcpu->arch.papr_enabled && in kvmppc_handle_exit_pr()
1297 !(kvmppc_get_msr(vcpu) & MSR_PR)) { in kvmppc_handle_exit_pr()
1299 ulong cmd = kvmppc_get_gpr(vcpu, 3); in kvmppc_handle_exit_pr()
1303 if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE) { in kvmppc_handle_exit_pr()
1311 ulong gpr = kvmppc_get_gpr(vcpu, 4 + i); in kvmppc_handle_exit_pr()
1315 vcpu->arch.hcall_needed = 1; in kvmppc_handle_exit_pr()
1317 } else if (vcpu->arch.osi_enabled && in kvmppc_handle_exit_pr()
1318 (((u32)kvmppc_get_gpr(vcpu, 3)) == OSI_SC_MAGIC_R3) && in kvmppc_handle_exit_pr()
1319 (((u32)kvmppc_get_gpr(vcpu, 4)) == OSI_SC_MAGIC_R4)) { in kvmppc_handle_exit_pr()
1326 gprs[i] = kvmppc_get_gpr(vcpu, i); in kvmppc_handle_exit_pr()
1327 vcpu->arch.osi_needed = 1; in kvmppc_handle_exit_pr()
1329 } else if (!(kvmppc_get_msr(vcpu) & MSR_PR) && in kvmppc_handle_exit_pr()
1330 (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) { in kvmppc_handle_exit_pr()
1332 kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu)); in kvmppc_handle_exit_pr()
1336 vcpu->stat.syscall_exits++; in kvmppc_handle_exit_pr()
1337 kvmppc_book3s_queue_irqprio(vcpu, exit_nr); in kvmppc_handle_exit_pr()
1350 if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE) { in kvmppc_handle_exit_pr()
1352 emul = kvmppc_get_last_inst(vcpu, INST_GENERIC, in kvmppc_handle_exit_pr()
1355 r = kvmppc_exit_pr_progint(vcpu, exit_nr); in kvmppc_handle_exit_pr()
1377 r = kvmppc_handle_ext(vcpu, exit_nr, ext_msr); in kvmppc_handle_exit_pr()
1383 int emul = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst); in kvmppc_handle_exit_pr()
1389 dsisr = kvmppc_alignment_dsisr(vcpu, ppc_inst_val(last_inst)); in kvmppc_handle_exit_pr()
1390 dar = kvmppc_alignment_dar(vcpu, ppc_inst_val(last_inst)); in kvmppc_handle_exit_pr()
1392 kvmppc_set_dsisr(vcpu, dsisr); in kvmppc_handle_exit_pr()
1393 kvmppc_set_dar(vcpu, dar); in kvmppc_handle_exit_pr()
1395 kvmppc_book3s_queue_irqprio(vcpu, exit_nr); in kvmppc_handle_exit_pr()
1402 r = kvmppc_handle_fac(vcpu, vcpu->arch.shadow_fscr >> 56); in kvmppc_handle_exit_pr()
1406 kvmppc_book3s_queue_irqprio(vcpu, exit_nr); in kvmppc_handle_exit_pr()
1410 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) { in kvmppc_handle_exit_pr()
1414 kvmppc_book3s_queue_irqprio(vcpu, exit_nr); in kvmppc_handle_exit_pr()
1420 ulong shadow_srr1 = vcpu->arch.shadow_srr1; in kvmppc_handle_exit_pr()
1423 exit_nr, kvmppc_get_pc(vcpu), shadow_srr1); in kvmppc_handle_exit_pr()
1441 s = kvmppc_prepare_to_enter(vcpu); in kvmppc_handle_exit_pr()
1449 kvmppc_handle_lost_ext(vcpu); in kvmppc_handle_exit_pr()
1452 trace_kvm_book3s_reenter(r, vcpu); in kvmppc_handle_exit_pr()
1457 static int kvm_arch_vcpu_ioctl_get_sregs_pr(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_get_sregs_pr() argument
1460 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); in kvm_arch_vcpu_ioctl_get_sregs_pr()
1463 sregs->pvr = vcpu->arch.pvr; in kvm_arch_vcpu_ioctl_get_sregs_pr()
1465 sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1; in kvm_arch_vcpu_ioctl_get_sregs_pr()
1466 if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) { in kvm_arch_vcpu_ioctl_get_sregs_pr()
1468 sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige | i; in kvm_arch_vcpu_ioctl_get_sregs_pr()
1469 sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv; in kvm_arch_vcpu_ioctl_get_sregs_pr()
1473 sregs->u.s.ppc32.sr[i] = kvmppc_get_sr(vcpu, i); in kvm_arch_vcpu_ioctl_get_sregs_pr()
1484 static int kvm_arch_vcpu_ioctl_set_sregs_pr(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_set_sregs_pr() argument
1487 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); in kvm_arch_vcpu_ioctl_set_sregs_pr()
1490 kvmppc_set_pvr_pr(vcpu, sregs->pvr); in kvm_arch_vcpu_ioctl_set_sregs_pr()
1494 if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) { in kvm_arch_vcpu_ioctl_set_sregs_pr()
1496 vcpu->arch.mmu.slbmte(vcpu, 0, 0); in kvm_arch_vcpu_ioctl_set_sregs_pr()
1497 vcpu->arch.mmu.slbia(vcpu); in kvm_arch_vcpu_ioctl_set_sregs_pr()
1504 vcpu->arch.mmu.slbmte(vcpu, rs, rb); in kvm_arch_vcpu_ioctl_set_sregs_pr()
1510 vcpu->arch.mmu.mtsrin(vcpu, i, sregs->u.s.ppc32.sr[i]); in kvm_arch_vcpu_ioctl_set_sregs_pr()
1513 kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), false, in kvm_arch_vcpu_ioctl_set_sregs_pr()
1515 kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), true, in kvm_arch_vcpu_ioctl_set_sregs_pr()
1517 kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), false, in kvm_arch_vcpu_ioctl_set_sregs_pr()
1519 kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), true, in kvm_arch_vcpu_ioctl_set_sregs_pr()
1525 kvmppc_mmu_pte_flush(vcpu, 0, 0); in kvm_arch_vcpu_ioctl_set_sregs_pr()
1530 static int kvmppc_get_one_reg_pr(struct kvm_vcpu *vcpu, u64 id, in kvmppc_get_one_reg_pr() argument
1540 *val = get_reg_val(id, to_book3s(vcpu)->hior); in kvmppc_get_one_reg_pr()
1543 *val = get_reg_val(id, to_book3s(vcpu)->vtb); in kvmppc_get_one_reg_pr()
1550 if (vcpu->arch.intr_msr & MSR_LE) in kvmppc_get_one_reg_pr()
1557 *val = get_reg_val(id, vcpu->arch.tfhar); in kvmppc_get_one_reg_pr()
1560 *val = get_reg_val(id, vcpu->arch.tfiar); in kvmppc_get_one_reg_pr()
1563 *val = get_reg_val(id, vcpu->arch.texasr); in kvmppc_get_one_reg_pr()
1567 vcpu->arch.gpr_tm[id-KVM_REG_PPC_TM_GPR0]); in kvmppc_get_one_reg_pr()
1576 val->vsxval[j] = vcpu->arch.fp_tm.fpr[i][j]; in kvmppc_get_one_reg_pr()
1579 val->vval = vcpu->arch.vr_tm.vr[i-32]; in kvmppc_get_one_reg_pr()
1586 *val = get_reg_val(id, vcpu->arch.cr_tm); in kvmppc_get_one_reg_pr()
1589 *val = get_reg_val(id, vcpu->arch.xer_tm); in kvmppc_get_one_reg_pr()
1592 *val = get_reg_val(id, vcpu->arch.lr_tm); in kvmppc_get_one_reg_pr()
1595 *val = get_reg_val(id, vcpu->arch.ctr_tm); in kvmppc_get_one_reg_pr()
1598 *val = get_reg_val(id, vcpu->arch.fp_tm.fpscr); in kvmppc_get_one_reg_pr()
1601 *val = get_reg_val(id, vcpu->arch.amr_tm); in kvmppc_get_one_reg_pr()
1604 *val = get_reg_val(id, vcpu->arch.ppr_tm); in kvmppc_get_one_reg_pr()
1607 *val = get_reg_val(id, vcpu->arch.vrsave_tm); in kvmppc_get_one_reg_pr()
1611 *val = get_reg_val(id, vcpu->arch.vr_tm.vscr.u[3]); in kvmppc_get_one_reg_pr()
1616 *val = get_reg_val(id, vcpu->arch.dscr_tm); in kvmppc_get_one_reg_pr()
1619 *val = get_reg_val(id, vcpu->arch.tar_tm); in kvmppc_get_one_reg_pr()
1630 static void kvmppc_set_lpcr_pr(struct kvm_vcpu *vcpu, u64 new_lpcr) in kvmppc_set_lpcr_pr() argument
1633 vcpu->arch.intr_msr |= MSR_LE; in kvmppc_set_lpcr_pr()
1635 vcpu->arch.intr_msr &= ~MSR_LE; in kvmppc_set_lpcr_pr()
1638 static int kvmppc_set_one_reg_pr(struct kvm_vcpu *vcpu, u64 id, in kvmppc_set_one_reg_pr() argument
1645 to_book3s(vcpu)->hior = set_reg_val(id, *val); in kvmppc_set_one_reg_pr()
1646 to_book3s(vcpu)->hior_explicit = true; in kvmppc_set_one_reg_pr()
1649 to_book3s(vcpu)->vtb = set_reg_val(id, *val); in kvmppc_set_one_reg_pr()
1653 kvmppc_set_lpcr_pr(vcpu, set_reg_val(id, *val)); in kvmppc_set_one_reg_pr()
1657 vcpu->arch.tfhar = set_reg_val(id, *val); in kvmppc_set_one_reg_pr()
1660 vcpu->arch.tfiar = set_reg_val(id, *val); in kvmppc_set_one_reg_pr()
1663 vcpu->arch.texasr = set_reg_val(id, *val); in kvmppc_set_one_reg_pr()
1666 vcpu->arch.gpr_tm[id - KVM_REG_PPC_TM_GPR0] = in kvmppc_set_one_reg_pr()
1676 vcpu->arch.fp_tm.fpr[i][j] = val->vsxval[j]; in kvmppc_set_one_reg_pr()
1679 vcpu->arch.vr_tm.vr[i-32] = val->vval; in kvmppc_set_one_reg_pr()
1685 vcpu->arch.cr_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_pr()
1688 vcpu->arch.xer_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_pr()
1691 vcpu->arch.lr_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_pr()
1694 vcpu->arch.ctr_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_pr()
1697 vcpu->arch.fp_tm.fpscr = set_reg_val(id, *val); in kvmppc_set_one_reg_pr()
1700 vcpu->arch.amr_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_pr()
1703 vcpu->arch.ppr_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_pr()
1706 vcpu->arch.vrsave_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_pr()
1710 vcpu->arch.vr.vscr.u[3] = set_reg_val(id, *val); in kvmppc_set_one_reg_pr()
1715 vcpu->arch.dscr_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_pr()
1718 vcpu->arch.tar_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_pr()
1729 static int kvmppc_core_vcpu_create_pr(struct kvm_vcpu *vcpu) in kvmppc_core_vcpu_create_pr() argument
1740 vcpu->arch.book3s = vcpu_book3s; in kvmppc_core_vcpu_create_pr()
1743 vcpu->arch.shadow_vcpu = in kvmppc_core_vcpu_create_pr()
1744 kzalloc(sizeof(*vcpu->arch.shadow_vcpu), GFP_KERNEL); in kvmppc_core_vcpu_create_pr()
1745 if (!vcpu->arch.shadow_vcpu) in kvmppc_core_vcpu_create_pr()
1752 vcpu->arch.shared = (void *)p; in kvmppc_core_vcpu_create_pr()
1756 vcpu->arch.shared_big_endian = true; in kvmppc_core_vcpu_create_pr()
1758 vcpu->arch.shared_big_endian = false; in kvmppc_core_vcpu_create_pr()
1766 vcpu->arch.pvr = 0x3C0301; in kvmppc_core_vcpu_create_pr()
1768 vcpu->arch.pvr = mfspr(SPRN_PVR); in kvmppc_core_vcpu_create_pr()
1769 vcpu->arch.intr_msr = MSR_SF; in kvmppc_core_vcpu_create_pr()
1772 vcpu->arch.pvr = 0x84202; in kvmppc_core_vcpu_create_pr()
1773 vcpu->arch.intr_msr = 0; in kvmppc_core_vcpu_create_pr()
1775 kvmppc_set_pvr_pr(vcpu, vcpu->arch.pvr); in kvmppc_core_vcpu_create_pr()
1776 vcpu->arch.slb_nr = 64; in kvmppc_core_vcpu_create_pr()
1778 vcpu->arch.shadow_msr = MSR_USER64 & ~MSR_LE; in kvmppc_core_vcpu_create_pr()
1780 err = kvmppc_mmu_init_pr(vcpu); in kvmppc_core_vcpu_create_pr()
1787 free_page((unsigned long)vcpu->arch.shared); in kvmppc_core_vcpu_create_pr()
1790 kfree(vcpu->arch.shadow_vcpu); in kvmppc_core_vcpu_create_pr()
1798 static void kvmppc_core_vcpu_free_pr(struct kvm_vcpu *vcpu) in kvmppc_core_vcpu_free_pr() argument
1800 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); in kvmppc_core_vcpu_free_pr()
1802 kvmppc_mmu_destroy_pr(vcpu); in kvmppc_core_vcpu_free_pr()
1803 free_page((unsigned long)vcpu->arch.shared & PAGE_MASK); in kvmppc_core_vcpu_free_pr()
1805 kfree(vcpu->arch.shadow_vcpu); in kvmppc_core_vcpu_free_pr()
1810 static int kvmppc_vcpu_run_pr(struct kvm_vcpu *vcpu) in kvmppc_vcpu_run_pr() argument
1814 /* Check if we can run the vcpu at all */ in kvmppc_vcpu_run_pr()
1815 if (!vcpu->arch.sane) { in kvmppc_vcpu_run_pr()
1816 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in kvmppc_vcpu_run_pr()
1821 kvmppc_setup_debug(vcpu); in kvmppc_vcpu_run_pr()
1829 ret = kvmppc_prepare_to_enter(vcpu); in kvmppc_vcpu_run_pr()
1838 if (kvmppc_get_msr(vcpu) & MSR_FP) in kvmppc_vcpu_run_pr()
1839 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); in kvmppc_vcpu_run_pr()
1843 ret = __kvmppc_vcpu_run(vcpu); in kvmppc_vcpu_run_pr()
1845 kvmppc_clear_debug(vcpu); in kvmppc_vcpu_run_pr()
1851 kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX); in kvmppc_vcpu_run_pr()
1854 kvmppc_giveup_fac(vcpu, FSCR_TAR_LG); in kvmppc_vcpu_run_pr()
1858 vcpu->mode = OUTSIDE_GUEST_MODE; in kvmppc_vcpu_run_pr()
1869 struct kvm_vcpu *vcpu; in kvm_vm_ioctl_get_dirty_log_pr() local
1886 kvm_for_each_vcpu(n, vcpu, kvm) in kvm_vm_ioctl_get_dirty_log_pr()
1887 kvmppc_mmu_pte_pflush(vcpu, ga, ga_end); in kvm_vm_ioctl_get_dirty_log_pr()
1931 struct kvm_vcpu *vcpu; in kvm_vm_ioctl_get_smmu_info_pr() local
1947 * support it, but unfortunately we don't have a vcpu easily in kvm_vm_ioctl_get_smmu_info_pr()
1948 * to hand here to test. Just pick the first vcpu, and if in kvm_vm_ioctl_get_smmu_info_pr()
1954 vcpu = kvm_get_vcpu(kvm, 0); in kvm_vm_ioctl_get_smmu_info_pr()
1955 if (vcpu && (vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE)) { in kvm_vm_ioctl_get_smmu_info_pr()