Lines Matching +full:interrupt +full:- +full:less
1 // SPDX-License-Identifier: GPL-2.0-only
19 #include <asm/ppc-opcode.h>
20 #include <asm/pnv-pci.h>
37 /* -- ICS routines -- */
44 struct ics_irq_state *state = &ics->irq_state[i]; in ics_rm_check_resend()
45 if (state->resend) in ics_rm_check_resend()
46 icp_rm_deliver_irq(xics, icp, state->number, true); in ics_rm_check_resend()
51 /* -- ICP routines -- */
59 kvmppc_host_rm_ops_hv->rm_core[hcore].rm_data = vcpu; in icp_send_hcore_msg()
78 * Returns -1, if no CPU could be found in the host
111 return -1; in grab_next_hostcore()
118 struct kvmppc_host_rm_core *rm_core = kvmppc_host_rm_ops_hv->rm_core; in find_available_hostcore()
121 if (core == -1) in find_available_hostcore()
130 struct kvmppc_icp *this_icp = this_vcpu->arch.icp; in icp_rm_set_vcpu_irq()
134 /* Mark the target VCPU as having an interrupt pending */ in icp_rm_set_vcpu_irq()
135 vcpu->stat.queue_intr++; in icp_rm_set_vcpu_irq()
136 set_bit(BOOK3S_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions); in icp_rm_set_vcpu_irq()
149 cpu = vcpu->arch.thread_cpu; in icp_rm_set_vcpu_irq()
151 hcore = -1; in icp_rm_set_vcpu_irq()
154 if (hcore != -1) { in icp_rm_set_vcpu_irq()
157 this_icp->rm_action |= XICS_RM_KICK_VCPU; in icp_rm_set_vcpu_irq()
158 this_icp->rm_kick_target = vcpu; in icp_rm_set_vcpu_irq()
170 clear_bit(BOOK3S_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions); in icp_rm_clr_vcpu_irq()
178 struct kvm_vcpu *this_vcpu = local_paca->kvm_hstate.kvm_vcpu; in icp_rm_try_update()
185 success = cmpxchg64(&icp->state.raw, old.raw, new.raw) == old.raw; in icp_rm_try_update()
193 * the state already. This is why we never clear the interrupt output in icp_rm_try_update()
200 * for that is that we opportunistically remove the pending interrupt in icp_rm_try_update()
202 * interrupt is still pending. in icp_rm_try_update()
205 icp_rm_set_vcpu_irq(icp->vcpu, this_vcpu); in icp_rm_try_update()
208 this_vcpu->arch.icp->rm_dbgstate = new; in icp_rm_try_update()
209 this_vcpu->arch.icp->rm_dbgtgt = icp->vcpu; in icp_rm_try_update()
218 return (xics->real_mode_dbg || icp->rm_action) ? H_TOO_HARD : H_SUCCESS; in check_too_hard()
228 for_each_set_bit(icsid, icp->resend_map, xics->max_icsid + 1) { in icp_rm_check_resend()
229 struct kvmppc_ics *ics = xics->ics[icsid]; in icp_rm_check_resend()
231 if (!test_and_clear_bit(icsid, icp->resend_map)) in icp_rm_check_resend()
246 old_state = new_state = READ_ONCE(icp->state); in icp_rm_try_to_deliver()
286 * This is used both for initial delivery of an interrupt and in icp_rm_deliver_irq()
291 * so potentially the ICP can already accept the interrupt again. in icp_rm_deliver_irq()
296 * Now the interrupt could also have moved to a different target, in icp_rm_deliver_irq()
297 * thus we may need to re-do the ICP lookup as well in icp_rm_deliver_irq()
305 xics->err_noics++; in icp_rm_deliver_irq()
308 state = &ics->irq_state[src]; in icp_rm_deliver_irq()
311 arch_spin_lock(&ics->lock); in icp_rm_deliver_irq()
314 if (!icp || state->server != icp->server_num) { in icp_rm_deliver_irq()
315 icp = kvmppc_xics_find_server(xics->kvm, state->server); in icp_rm_deliver_irq()
318 xics->err_noicp++; in icp_rm_deliver_irq()
324 if (!state->resend) in icp_rm_deliver_irq()
327 /* Clear the resend bit of that interrupt */ in icp_rm_deliver_irq()
328 state->resend = 0; in icp_rm_deliver_irq()
337 * interrupt that was rejected and isn't consistent with in icp_rm_deliver_irq()
345 if (state->priority == MASKED) { in icp_rm_deliver_irq()
346 state->masked_pending = 1; in icp_rm_deliver_irq()
356 * rejected an interrupt that was "delivered" before we took the in icp_rm_deliver_irq()
360 * new guy. We cannot assume that the rejected interrupt is less in icp_rm_deliver_irq()
364 * the rejected interrupt might actually be already acceptable. in icp_rm_deliver_irq()
366 if (icp_rm_try_to_deliver(icp, new_irq, state->priority, &reject)) { in icp_rm_deliver_irq()
371 arch_spin_unlock(&ics->lock); in icp_rm_deliver_irq()
372 icp->n_reject++; in icp_rm_deliver_irq()
379 * We failed to deliver the interrupt we need to set the in icp_rm_deliver_irq()
382 state->resend = 1; in icp_rm_deliver_irq()
389 set_bit(ics->icsid, icp->resend_map); in icp_rm_deliver_irq()
398 if (!icp->state.need_resend) { in icp_rm_deliver_irq()
399 state->resend = 0; in icp_rm_deliver_irq()
400 arch_spin_unlock(&ics->lock); in icp_rm_deliver_irq()
406 arch_spin_unlock(&ics->lock); in icp_rm_deliver_irq()
445 old_state = new_state = READ_ONCE(icp->state); in icp_rm_down_cppr()
453 * The logic is that we cannot have a pending interrupt in icp_rm_down_cppr()
455 * know that either the pending interrupt is already an in icp_rm_down_cppr()
477 icp->n_check_resend++; in icp_rm_down_cppr()
491 struct kvmppc_xics *xics = vcpu->kvm->arch.xics; in xics_rm_h_xirr()
492 struct kvmppc_icp *icp = vcpu->arch.icp; in xics_rm_h_xirr()
495 if (!xics || !xics->real_mode) in xics_rm_h_xirr()
498 /* First clear the interrupt */ in xics_rm_h_xirr()
499 icp_rm_clr_vcpu_irq(icp->vcpu); in xics_rm_h_xirr()
504 * Return the pending interrupt (if any) along with the in xics_rm_h_xirr()
509 old_state = new_state = READ_ONCE(icp->state); in xics_rm_h_xirr()
530 struct kvmppc_xics *xics = vcpu->kvm->arch.xics; in xics_rm_h_ipi()
531 struct kvmppc_icp *icp, *this_icp = vcpu->arch.icp; in xics_rm_h_ipi()
536 if (!xics || !xics->real_mode) in xics_rm_h_ipi()
539 local = this_icp->server_num == server; in xics_rm_h_ipi()
543 icp = kvmppc_xics_find_server(vcpu->kvm, server); in xics_rm_h_ipi()
556 * If the CPPR is less favored, then we might be replacing in xics_rm_h_ipi()
557 * an interrupt, and thus need to possibly reject it. in xics_rm_h_ipi()
565 * made less favored than its earlier value, there might be in xics_rm_h_ipi()
566 * a previously-rejected interrupt needing to be resent. in xics_rm_h_ipi()
570 * where pending interrupt is the one that was rejected. But in xics_rm_h_ipi()
572 * whenever the MFRR is made less favored. in xics_rm_h_ipi()
575 old_state = new_state = READ_ONCE(icp->state); in xics_rm_h_ipi()
584 /* Reject a pending interrupt if not an IPI */ in xics_rm_h_ipi()
600 this_icp->n_reject++; in xics_rm_h_ipi()
606 this_icp->n_check_resend++; in xics_rm_h_ipi()
616 struct kvmppc_xics *xics = vcpu->kvm->arch.xics; in xics_rm_h_cppr()
617 struct kvmppc_icp *icp = vcpu->arch.icp; in xics_rm_h_cppr()
620 if (!xics || !xics->real_mode) in xics_rm_h_cppr()
630 if (cppr > icp->state.cppr) { in xics_rm_h_cppr()
633 } else if (cppr == icp->state.cppr) in xics_rm_h_cppr()
640 * in a rejection of a pending interrupt: in xics_rm_h_cppr()
647 icp_rm_clr_vcpu_irq(icp->vcpu); in xics_rm_h_cppr()
650 old_state = new_state = READ_ONCE(icp->state); in xics_rm_h_cppr()
668 icp->n_reject++; in xics_rm_h_cppr()
677 struct kvmppc_xics *xics = vcpu->kvm->arch.xics; in ics_rm_eoi()
678 struct kvmppc_icp *icp = vcpu->arch.icp; in ics_rm_eoi()
696 state = &ics->irq_state[src]; in ics_rm_eoi()
698 if (state->lsi) in ics_rm_eoi()
699 pq_new = state->pq_state; in ics_rm_eoi()
702 pq_old = state->pq_state; in ics_rm_eoi()
704 } while (cmpxchg(&state->pq_state, pq_old, pq_new) != pq_old); in ics_rm_eoi()
709 if (!hlist_empty(&vcpu->kvm->irq_ack_notifier_list)) { in ics_rm_eoi()
710 icp->rm_action |= XICS_RM_NOTIFY_EOI; in ics_rm_eoi()
711 icp->rm_eoied_irq = irq; in ics_rm_eoi()
715 if (state->host_irq) { in ics_rm_eoi()
716 ++vcpu->stat.pthru_all; in ics_rm_eoi()
717 if (state->intr_cpu != -1) { in ics_rm_eoi()
721 ++vcpu->stat.pthru_host; in ics_rm_eoi()
722 if (state->intr_cpu != pcpu) { in ics_rm_eoi()
723 ++vcpu->stat.pthru_bad_aff; in ics_rm_eoi()
724 xics_opal_set_server(state->host_irq, pcpu); in ics_rm_eoi()
726 state->intr_cpu = -1; in ics_rm_eoi()
736 struct kvmppc_xics *xics = vcpu->kvm->arch.xics; in xics_rm_h_eoi()
737 struct kvmppc_icp *icp = vcpu->arch.icp; in xics_rm_h_eoi()
740 if (!xics || !xics->real_mode) in xics_rm_h_eoi()
748 * a pending interrupt, this is a SW error and PAPR specifies in xics_rm_h_eoi()
781 xics_phys = local_paca->kvm_hstate.xics_phys; in icp_eoi()
798 * Increment a per-CPU 32-bit unsigned integer variable.
799 * Safe to call in real-mode. Handles vmalloc'ed addresses
823 * - state flags represent internal IRQ state and are not expected to be
825 * - more importantly, these are useful for edge triggered interrupts,
829 * However, we do update irq_stats - we somewhat duplicate the code in
832 * The only difference is that desc->kstat_irqs is an allocated per CPU
835 * per CPU variable and it should be accessible by real-mode KVM.
840 this_cpu_inc_rm(&desc->kstat_irqs->cnt); in kvmppc_rm_handle_irq_desc()
858 irq = irq_map->v_hwirq; in kvmppc_deliver_irq_passthru()
859 xics = vcpu->kvm->arch.xics; in kvmppc_deliver_irq_passthru()
860 icp = vcpu->arch.icp; in kvmppc_deliver_irq_passthru()
862 kvmppc_rm_handle_irq_desc(irq_map->desc); in kvmppc_deliver_irq_passthru()
868 state = &ics->irq_state[src]; in kvmppc_deliver_irq_passthru()
872 pq_old = state->pq_state; in kvmppc_deliver_irq_passthru()
874 } while (cmpxchg(&state->pq_state, pq_old, pq_new) != pq_old); in kvmppc_deliver_irq_passthru()
880 /* EOI the interrupt */ in kvmppc_deliver_irq_passthru()
881 icp_eoi(irq_desc_get_irq_data(irq_map->desc), irq_map->r_hwirq, xirr, again); in kvmppc_deliver_irq_passthru()
886 return -2; in kvmppc_deliver_irq_passthru()
889 /* --- Non-real mode XICS-related built-in routines --- */
898 kvmppc_host_rm_ops_hv->vcpu_kick(data); in rm_host_ipi_action()
914 rm_corep = &kvmppc_host_rm_ops_hv->rm_core[core]; in kvmppc_xics_ipi_action()
916 if (rm_corep->rm_data) { in kvmppc_xics_ipi_action()
917 rm_host_ipi_action(rm_corep->rm_state.rm_action, in kvmppc_xics_ipi_action()
918 rm_corep->rm_data); in kvmppc_xics_ipi_action()
920 rm_corep->rm_data = NULL; in kvmppc_xics_ipi_action()
922 rm_corep->rm_state.rm_action = 0; in kvmppc_xics_ipi_action()