Lines Matching +full:guest +full:- +full:side

1 // SPDX-License-Identifier: GPL-2.0-only
53 return -EINVAL; in early_parse_kvm_cma_resv()
60 VM_BUG_ON(order_base_2(nr_pages) < KVM_CMA_CHUNK_ORDER - PAGE_SHIFT); in kvm_alloc_hpt_cma()
74 * kvm_cma_reserve() - reserve area for kvm hash pagetable
98 KVM_CMA_CHUNK_ORDER - PAGE_SHIFT, false, "kvm_cma", in kvm_cma_reserve()
104 * Real-mode H_CONFER implementation.
106 * still running in the guest and not ceded. If so, we pop up
107 * to the virtual-mode implementation; if not, just return to
108 * the guest.
113 struct kvmppc_vcore *vc = local_paca->kvm_hstate.kvm_vcore; in kvmppc_rm_h_confer()
114 int ptid = local_paca->kvm_hstate.ptid; in kvmppc_rm_h_confer()
121 set_bit(ptid, &vc->conferring_threads); in kvmppc_rm_h_confer()
124 threads_ceded = vc->napping_threads; in kvmppc_rm_h_confer()
125 threads_conferring = vc->conferring_threads; in kvmppc_rm_h_confer()
131 clear_bit(ptid, &vc->conferring_threads); in kvmppc_rm_h_confer()
170 if (cmd < hcall_real_table_end - hcall_real_table && in kvmppc_hcall_impl_hv_realmode()
228 xics_phys = paca_ptrs[cpu]->kvm_hstate.xics_phys; in kvmhv_rm_send_ipi()
241 int cpu = vc->pcpu; in kvmhv_interrupt_vcore()
252 struct kvmppc_vcore *vc = local_paca->kvm_hstate.kvm_vcore; in kvmhv_commence_exit()
253 int ptid = local_paca->kvm_hstate.ptid; in kvmhv_commence_exit()
254 struct kvm_split_mode *sip = local_paca->kvm_hstate.kvm_split_mode; in kvmhv_commence_exit()
257 /* Set our bit in the threads-exiting-guest map in the 0xff00 in kvmhv_commence_exit()
258 bits of vcore->entry_exit_map */ in kvmhv_commence_exit()
261 ee = vc->entry_exit_map; in kvmhv_commence_exit()
262 } while (cmpxchg(&vc->entry_exit_map, ee, ee | me) != ee); in kvmhv_commence_exit()
269 * Trigger the other threads in this vcore to exit the guest. in kvmhv_commence_exit()
271 * will be already on their way out of the guest. in kvmhv_commence_exit()
277 * If we are doing dynamic micro-threading, interrupt the other in kvmhv_commence_exit()
284 vc = sip->vc[i]; in kvmhv_commence_exit()
288 ee = vc->entry_exit_map; in kvmhv_commence_exit()
292 } while (cmpxchg(&vc->entry_exit_map, ee, in kvmhv_commence_exit()
318 for (i = 0; i < pimap->n_mapped; i++) { in get_irqmap()
319 if (xisr == pimap->mapped[i].r_hwirq) { in get_irqmap()
325 return &pimap->mapped[i]; in get_irqmap()
336 * ICP, the virtual ICP (based on affinity - the xive value in ICS).
349 vcpu = local_paca->kvm_hstate.kvm_vcpu; in kvmppc_check_passthru()
352 pimap = kvmppc_get_passthru_irqmap(vcpu->kvm); in kvmppc_check_passthru()
360 local_paca->kvm_hstate.saved_xirr = 0; in kvmppc_check_passthru()
378 * -1 if there was a guest wakeup IPI (which has now been cleared)
379 * -2 if there is PCI passthrough external interrupt that was handled
414 host_ipi = READ_ONCE(local_paca->kvm_hstate.host_ipi); in kvmppc_read_one_intr()
419 xics_phys = local_paca->kvm_hstate.xics_phys; in kvmppc_read_one_intr()
435 local_paca->kvm_hstate.saved_xirr = h_xirr; in kvmppc_read_one_intr()
438 * Ensure that the store/load complete to guarantee all side in kvmppc_read_one_intr()
464 * Need to ensure side effects of above stores in kvmppc_read_one_intr()
470 * We need to re-check host IPI now in case it got set in the in kvmppc_read_one_intr()
472 * guest in kvmppc_read_one_intr()
474 host_ipi = READ_ONCE(local_paca->kvm_hstate.host_ipi); in kvmppc_read_one_intr()
485 /* Let side effects complete */ in kvmppc_read_one_intr()
491 local_paca->kvm_hstate.saved_xirr = 0; in kvmppc_read_one_intr()
492 return -1; in kvmppc_read_one_intr()
500 vcpu->arch.ceded = 0; in kvmppc_end_cede()
501 if (vcpu->arch.timer_running) { in kvmppc_end_cede()
502 hrtimer_try_to_cancel(&vcpu->arch.dec_timer); in kvmppc_end_cede()
503 vcpu->arch.timer_running = 0; in kvmppc_end_cede()
509 /* Guest must always run with ME enabled, HV disabled. */ in kvmppc_set_msr_hv()
529 new_msr = vcpu->arch.intr_msr; in inject_interrupt()
543 * delivered to the guest), and does not apply if IR=0 or DR=0. in inject_interrupt()
547 (vcpu->arch.vcore->lpcr & LPCR_AIL) == LPCR_AIL_3 && in inject_interrupt()
567 * Is there a PRIV_DOORBELL pending for the guest (on POWER9)?
578 ext = (vcpu->arch.pending_exceptions >> BOOK3S_IRQPRIO_EXTERNAL) & 1; in kvmppc_guest_entry_inject_int()
584 if (vcpu->arch.shregs.msr & MSR_EE) { in kvmppc_guest_entry_inject_int()
597 if (vcpu->arch.doorbell_request) { in kvmppc_guest_entry_inject_int()
599 vcpu->arch.vcore->dpdes = 1; in kvmppc_guest_entry_inject_int()
601 vcpu->arch.doorbell_request = 0; in kvmppc_guest_entry_inject_int()
610 for (set = 0; set < kvm->arch.tlb_sets; ++set) { in flush_guest_tlb()
622 if (cpumask_test_cpu(pcpu, &kvm->arch.need_tlb_flush)) { in kvmppc_check_need_tlb_flush()
626 cpumask_clear_cpu(pcpu, &kvm->arch.need_tlb_flush); in kvmppc_check_need_tlb_flush()