Lines Matching refs:xc

39 static void xive_vm_ack_pending(struct kvmppc_xive_vcpu *xc)  in xive_vm_ack_pending()  argument
65 xc->pending |= 1 << cppr; in xive_vm_ack_pending()
68 if (cppr >= xc->hw_cppr) in xive_vm_ack_pending()
70 smp_processor_id(), cppr, xc->hw_cppr); in xive_vm_ack_pending()
77 xc->hw_cppr = cppr; in xive_vm_ack_pending()
133 static u32 xive_vm_scan_interrupts(struct kvmppc_xive_vcpu *xc, in xive_vm_scan_interrupts() argument
140 while ((xc->mfrr != 0xff || pending != 0) && hirq == 0) { in xive_vm_scan_interrupts()
152 if (prio >= xc->cppr || prio > 7) { in xive_vm_scan_interrupts()
153 if (xc->mfrr < xc->cppr) { in xive_vm_scan_interrupts()
154 prio = xc->mfrr; in xive_vm_scan_interrupts()
161 q = &xc->queues[prio]; in xive_vm_scan_interrupts()
196 xive_vm_source_eoi(xc->vp_ipi, in xive_vm_scan_interrupts()
197 &xc->vp_ipi_data); in xive_vm_scan_interrupts()
234 if (prio >= xc->mfrr && xc->mfrr < xc->cppr) { in xive_vm_scan_interrupts()
235 prio = xc->mfrr; in xive_vm_scan_interrupts()
252 xc->pending = pending; in xive_vm_scan_interrupts()
272 xc->cppr = prio; in xive_vm_scan_interrupts()
279 if (xc->cppr != xc->hw_cppr) { in xive_vm_scan_interrupts()
280 xc->hw_cppr = xc->cppr; in xive_vm_scan_interrupts()
281 __raw_writeb(xc->cppr, xive_tima + TM_QW1_OS + TM_CPPR); in xive_vm_scan_interrupts()
289 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in xive_vm_h_xirr() local
295 xc->stat_vm_h_xirr++; in xive_vm_h_xirr()
298 xive_vm_ack_pending(xc); in xive_vm_h_xirr()
301 xc->pending, xc->hw_cppr, xc->cppr); in xive_vm_h_xirr()
304 old_cppr = xive_prio_to_guest(xc->cppr); in xive_vm_h_xirr()
307 hirq = xive_vm_scan_interrupts(xc, xc->pending, scan_fetch); in xive_vm_h_xirr()
310 hirq, xc->hw_cppr, xc->cppr); in xive_vm_h_xirr()
338 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in xive_vm_h_ipoll() local
339 u8 pending = xc->pending; in xive_vm_h_ipoll()
344 xc->stat_vm_h_ipoll++; in xive_vm_h_ipoll()
347 if (xc->server_num != server) { in xive_vm_h_ipoll()
351 xc = vcpu->arch.xive_vcpu; in xive_vm_h_ipoll()
364 hirq = xive_vm_scan_interrupts(xc, pending, scan_poll); in xive_vm_h_ipoll()
367 kvmppc_set_gpr(vcpu, 4, hirq | (xc->cppr << 24)); in xive_vm_h_ipoll()
372 static void xive_vm_push_pending_to_hw(struct kvmppc_xive_vcpu *xc) in xive_vm_push_pending_to_hw() argument
376 pending = xc->pending; in xive_vm_push_pending_to_hw()
377 if (xc->mfrr != 0xff) { in xive_vm_push_pending_to_hw()
378 if (xc->mfrr < 8) in xive_vm_push_pending_to_hw()
379 pending |= 1 << xc->mfrr; in xive_vm_push_pending_to_hw()
391 struct kvmppc_xive_vcpu *xc) in xive_vm_scan_for_rerouted_irqs() argument
396 for (prio = xc->cppr; prio < KVMPPC_XIVE_Q_COUNT; prio++) { in xive_vm_scan_for_rerouted_irqs()
397 struct xive_q *q = &xc->queues[prio]; in xive_vm_scan_for_rerouted_irqs()
429 if (xc->server_num == state->act_server) in xive_vm_scan_for_rerouted_irqs()
458 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in xive_vm_h_cppr() local
464 xc->stat_vm_h_cppr++; in xive_vm_h_cppr()
470 old_cppr = xc->cppr; in xive_vm_h_cppr()
471 xc->cppr = cppr; in xive_vm_h_cppr()
486 xive_vm_push_pending_to_hw(xc); in xive_vm_h_cppr()
505 xive_vm_scan_for_rerouted_irqs(xive, xc); in xive_vm_h_cppr()
509 xc->hw_cppr = cppr; in xive_vm_h_cppr()
520 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in xive_vm_h_eoi() local
529 xc->stat_vm_h_eoi++; in xive_vm_h_eoi()
531 xc->cppr = xive_prio_from_guest(new_cppr); in xive_vm_h_eoi()
609 xive_vm_scan_interrupts(xc, xc->pending, scan_eoi); in xive_vm_h_eoi()
610 xive_vm_push_pending_to_hw(xc); in xive_vm_h_eoi()
611 pr_devel(" after scan pending=%02x\n", xc->pending); in xive_vm_h_eoi()
614 xc->hw_cppr = xc->cppr; in xive_vm_h_eoi()
615 __raw_writeb(xc->cppr, xive_tima + TM_QW1_OS + TM_CPPR); in xive_vm_h_eoi()
623 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in xive_vm_h_ipi() local
627 xc->stat_vm_h_ipi++; in xive_vm_h_ipi()
633 xc = vcpu->arch.xive_vcpu; in xive_vm_h_ipi()
636 xc->mfrr = mfrr; in xive_vm_h_ipi()
651 if (mfrr < xc->cppr) in xive_vm_h_ipi()
652 __raw_writeq(0, __x_trig_page(&xc->vp_ipi_data)); in xive_vm_h_ipi()
665 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in kvmppc_xive_vcpu_has_save_restore() local
668 return xc->vp_cam & TM_QW1W2_HO; in kvmppc_xive_vcpu_has_save_restore()
673 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in kvmppc_xive_check_save_restore() local
674 struct kvmppc_xive *xive = xc->xive; in kvmppc_xive_check_save_restore()
869 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in kvmppc_xive_attach_escalation() local
870 struct xive_q *q = &xc->queues[prio]; in kvmppc_xive_attach_escalation()
875 if (xc->esc_virq[prio]) in kvmppc_xive_attach_escalation()
879 xc->esc_virq[prio] = irq_create_mapping(NULL, q->esc_irq); in kvmppc_xive_attach_escalation()
880 if (!xc->esc_virq[prio]) { in kvmppc_xive_attach_escalation()
882 prio, xc->server_num); in kvmppc_xive_attach_escalation()
888 vcpu->kvm->arch.lpid, xc->server_num); in kvmppc_xive_attach_escalation()
891 vcpu->kvm->arch.lpid, xc->server_num, prio); in kvmppc_xive_attach_escalation()
894 prio, xc->server_num); in kvmppc_xive_attach_escalation()
899 pr_devel("Escalation %s irq %d (prio %d)\n", name, xc->esc_virq[prio], prio); in kvmppc_xive_attach_escalation()
901 rc = request_irq(xc->esc_virq[prio], xive_esc_irq, in kvmppc_xive_attach_escalation()
905 prio, xc->server_num); in kvmppc_xive_attach_escalation()
908 xc->esc_virq_names[prio] = name; in kvmppc_xive_attach_escalation()
919 struct irq_data *d = irq_get_irq_data(xc->esc_virq[prio]); in kvmppc_xive_attach_escalation()
930 irq_dispose_mapping(xc->esc_virq[prio]); in kvmppc_xive_attach_escalation()
931 xc->esc_virq[prio] = 0; in kvmppc_xive_attach_escalation()
938 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in xive_provision_queue() local
939 struct kvmppc_xive *xive = xc->xive; in xive_provision_queue()
940 struct xive_q *q = &xc->queues[prio]; in xive_provision_queue()
951 prio, xc->server_num); in xive_provision_queue()
963 rc = xive_native_configure_queue(xc->vp_id, q, prio, qpage, in xive_provision_queue()
967 prio, xc->server_num); in xive_provision_queue()
1008 struct kvmppc_xive_vcpu *xc; in xive_inc_q_pending() local
1017 xc = vcpu->arch.xive_vcpu; in xive_inc_q_pending()
1018 if (WARN_ON(!xc)) in xive_inc_q_pending()
1021 q = &xc->queues[prio]; in xive_inc_q_pending()
1027 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in xive_try_pick_queue() local
1031 if (WARN_ON(!xc)) in xive_try_pick_queue()
1033 if (!xc->valid) in xive_try_pick_queue()
1036 q = &xc->queues[prio]; in xive_try_pick_queue()
1481 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in kvmppc_xive_get_icp() local
1483 if (!xc) in kvmppc_xive_get_icp()
1487 return (u64)xc->cppr << KVM_REG_PPC_ICP_CPPR_SHIFT | in kvmppc_xive_get_icp()
1488 (u64)xc->mfrr << KVM_REG_PPC_ICP_MFRR_SHIFT | in kvmppc_xive_get_icp()
1494 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in kvmppc_xive_set_icp() local
1499 if (!xc || !xive) in kvmppc_xive_set_icp()
1509 xc->server_num, cppr, mfrr, xisr); in kvmppc_xive_set_icp()
1521 xc->hw_cppr = xc->cppr = cppr; in kvmppc_xive_set_icp()
1529 xc->mfrr = mfrr; in kvmppc_xive_set_icp()
1531 xive_irq_trigger(&xc->vp_ipi_data); in kvmppc_xive_set_icp()
1543 xc->delayed_irq = xisr; in kvmppc_xive_set_icp()
1731 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in kvmppc_xive_disable_vcpu_interrupts() local
1748 if (state->act_server != xc->server_num) in kvmppc_xive_disable_vcpu_interrupts()
1806 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in kvmppc_xive_cleanup_vcpu() local
1813 if (!xc) in kvmppc_xive_cleanup_vcpu()
1816 pr_devel("cleanup_vcpu(cpu=%d)\n", xc->server_num); in kvmppc_xive_cleanup_vcpu()
1819 xc->valid = false; in kvmppc_xive_cleanup_vcpu()
1823 xive_vm_esb_load(&xc->vp_ipi_data, XIVE_ESB_SET_PQ_01); in kvmppc_xive_cleanup_vcpu()
1827 if (xc->esc_virq[i]) { in kvmppc_xive_cleanup_vcpu()
1828 if (kvmppc_xive_has_single_escalation(xc->xive)) in kvmppc_xive_cleanup_vcpu()
1829 xive_cleanup_single_escalation(vcpu, xc->esc_virq[i]); in kvmppc_xive_cleanup_vcpu()
1830 free_irq(xc->esc_virq[i], vcpu); in kvmppc_xive_cleanup_vcpu()
1831 irq_dispose_mapping(xc->esc_virq[i]); in kvmppc_xive_cleanup_vcpu()
1832 kfree(xc->esc_virq_names[i]); in kvmppc_xive_cleanup_vcpu()
1837 xive_native_disable_vp(xc->vp_id); in kvmppc_xive_cleanup_vcpu()
1844 struct xive_q *q = &xc->queues[i]; in kvmppc_xive_cleanup_vcpu()
1846 xive_native_disable_queue(xc->vp_id, q, i); in kvmppc_xive_cleanup_vcpu()
1855 if (xc->vp_ipi) { in kvmppc_xive_cleanup_vcpu()
1856 xive_cleanup_irq_data(&xc->vp_ipi_data); in kvmppc_xive_cleanup_vcpu()
1857 xive_native_free_irq(xc->vp_ipi); in kvmppc_xive_cleanup_vcpu()
1860 kfree(xc); in kvmppc_xive_cleanup_vcpu()
1907 struct kvmppc_xive_vcpu *xc; in kvmppc_xive_connect_vcpu() local
1929 xc = kzalloc(sizeof(*xc), GFP_KERNEL); in kvmppc_xive_connect_vcpu()
1930 if (!xc) { in kvmppc_xive_connect_vcpu()
1935 vcpu->arch.xive_vcpu = xc; in kvmppc_xive_connect_vcpu()
1936 xc->xive = xive; in kvmppc_xive_connect_vcpu()
1937 xc->vcpu = vcpu; in kvmppc_xive_connect_vcpu()
1938 xc->server_num = cpu; in kvmppc_xive_connect_vcpu()
1939 xc->vp_id = vp_id; in kvmppc_xive_connect_vcpu()
1940 xc->mfrr = 0xff; in kvmppc_xive_connect_vcpu()
1941 xc->valid = true; in kvmppc_xive_connect_vcpu()
1943 r = xive_native_get_vp_info(xc->vp_id, &xc->vp_cam, &xc->vp_chip_id); in kvmppc_xive_connect_vcpu()
1955 vcpu->arch.xive_cam_word = cpu_to_be32(xc->vp_cam | TM_QW1W2_VO); in kvmppc_xive_connect_vcpu()
1958 xc->vp_ipi = xive_native_alloc_irq(); in kvmppc_xive_connect_vcpu()
1959 if (!xc->vp_ipi) { in kvmppc_xive_connect_vcpu()
1964 pr_devel(" IPI=0x%x\n", xc->vp_ipi); in kvmppc_xive_connect_vcpu()
1966 r = xive_native_populate_irq_data(xc->vp_ipi, &xc->vp_ipi_data); in kvmppc_xive_connect_vcpu()
1974 r = xive_native_enable_vp(xc->vp_id, kvmppc_xive_has_single_escalation(xive)); in kvmppc_xive_connect_vcpu()
1988 struct xive_q *q = &xc->queues[i]; in kvmppc_xive_connect_vcpu()
2003 r = xive_native_configure_queue(xc->vp_id, in kvmppc_xive_connect_vcpu()
2019 r = xive_native_configure_irq(xc->vp_ipi, xc->vp_id, 0, XICS_IPI); in kvmppc_xive_connect_vcpu()
2021 xive_vm_esb_load(&xc->vp_ipi_data, XIVE_ESB_SET_PQ_00); in kvmppc_xive_connect_vcpu()
2144 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in xive_pre_save_scan() local
2145 if (!xc) in xive_pre_save_scan()
2148 if (xc->queues[j].qpage) in xive_pre_save_scan()
2149 xive_pre_save_queue(xive, &xc->queues[j]); in xive_pre_save_scan()
2312 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in xive_check_delayed_irq() local
2314 if (!xc) in xive_check_delayed_irq()
2317 if (xc->delayed_irq == irq) { in xive_check_delayed_irq()
2318 xc->delayed_irq = 0; in xive_check_delayed_irq()
2810 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in kvmppc_xive_debug_show_queues() local
2814 struct xive_q *q = &xc->queues[i]; in kvmppc_xive_debug_show_queues()
2817 if (!q->qpage && !xc->esc_virq[i]) in kvmppc_xive_debug_show_queues()
2829 if (xc->esc_virq[i]) { in kvmppc_xive_debug_show_queues()
2830 struct irq_data *d = irq_get_irq_data(xc->esc_virq[i]); in kvmppc_xive_debug_show_queues()
2836 xc->esc_virq[i], in kvmppc_xive_debug_show_queues()
2906 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in xive_debug_show() local
2908 if (!xc) in xive_debug_show()
2913 xc->server_num, xc->vp_id, xc->vp_chip_id, in xive_debug_show()
2914 xc->cppr, xc->hw_cppr, in xive_debug_show()
2915 xc->mfrr, xc->pending, in xive_debug_show()
2916 xc->stat_rm_h_xirr, xc->stat_vm_h_xirr); in xive_debug_show()
2920 t_rm_h_xirr += xc->stat_rm_h_xirr; in xive_debug_show()
2921 t_rm_h_ipoll += xc->stat_rm_h_ipoll; in xive_debug_show()
2922 t_rm_h_cppr += xc->stat_rm_h_cppr; in xive_debug_show()
2923 t_rm_h_eoi += xc->stat_rm_h_eoi; in xive_debug_show()
2924 t_rm_h_ipi += xc->stat_rm_h_ipi; in xive_debug_show()
2925 t_vm_h_xirr += xc->stat_vm_h_xirr; in xive_debug_show()
2926 t_vm_h_ipoll += xc->stat_vm_h_ipoll; in xive_debug_show()
2927 t_vm_h_cppr += xc->stat_vm_h_cppr; in xive_debug_show()
2928 t_vm_h_eoi += xc->stat_vm_h_eoi; in xive_debug_show()
2929 t_vm_h_ipi += xc->stat_vm_h_ipi; in xive_debug_show()