Lines Matching +full:num +full:- +full:guest +full:- +full:ids
1 // SPDX-License-Identifier: GPL-2.0-or-later
31 #include <asm/xive-regs.h>
34 #include "xive-internal.h"
40 #define DBG_VERBOSE(fmt, ...) pr_devel("cpu %d - " fmt, \
73 * Use early_cpu_to_node() for hot-plugged CPUs
85 #define XIVE_INVALID_TARGET (-1)
94 return xd->flags & XIVE_IRQ_FLAG_STORE_EOI && xive_store_eoi; in xive_is_store_eoi()
107 if (!q->qpage) in xive_read_eq()
109 cur = be32_to_cpup(q->qpage + q->idx); in xive_read_eq()
112 if ((cur >> 31) == q->toggle) in xive_read_eq()
118 q->idx = (q->idx + 1) & q->msk; in xive_read_eq()
121 if (q->idx == 0) in xive_read_eq()
122 q->toggle ^= 1; in xive_read_eq()
157 while (xc->pending_prio != 0) { in xive_scan_interrupts()
160 prio = ffs(xc->pending_prio) - 1; in xive_scan_interrupts()
164 irq = xive_read_eq(&xc->queue[prio], just_peek); in xive_scan_interrupts()
182 xc->pending_prio &= ~(1 << prio); in xive_scan_interrupts()
189 q = &xc->queue[prio]; in xive_scan_interrupts()
190 if (atomic_read(&q->pending_count)) { in xive_scan_interrupts()
191 int p = atomic_xchg(&q->pending_count, 0); in xive_scan_interrupts()
193 WARN_ON(p > atomic_read(&q->count)); in xive_scan_interrupts()
194 atomic_sub(p, &q->count); in xive_scan_interrupts()
204 if (prio != xc->cppr) { in xive_scan_interrupts()
206 xc->cppr = prio; in xive_scan_interrupts()
215 * described in xive-regs.h
224 if ((xd->flags & XIVE_IRQ_FLAG_H_INT_ESB) && xive_ops->esb_rw) in xive_esb_read()
225 val = xive_ops->esb_rw(xd->hw_irq, offset, 0, 0); in xive_esb_read()
227 val = in_be64(xd->eoi_mmio + offset); in xive_esb_read()
234 if ((xd->flags & XIVE_IRQ_FLAG_H_INT_ESB) && xive_ops->esb_rw) in xive_esb_write()
235 xive_ops->esb_rw(xd->hw_irq, offset, data, 1); in xive_esb_write()
237 out_be64(xd->eoi_mmio + offset, data); in xive_esb_write()
247 xd->flags & XIVE_IRQ_FLAG_LSI ? 'L' : ' ', in xive_irq_data_dump()
248 xd->flags & XIVE_IRQ_FLAG_H_INT_ESB ? 'H' : ' ', in xive_irq_data_dump()
249 val & XIVE_ESB_VAL_P ? 'P' : '-', in xive_irq_data_dump()
250 val & XIVE_ESB_VAL_Q ? 'Q' : '-', in xive_irq_data_dump()
251 xd->trig_page, xd->eoi_page); in xive_irq_data_dump()
260 if (!q->qpage) in xive_dump_eq()
262 idx = q->idx; in xive_dump_eq()
263 i0 = be32_to_cpup(q->qpage + idx); in xive_dump_eq()
264 idx = (idx + 1) & q->msk; in xive_dump_eq()
265 i1 = be32_to_cpup(q->qpage + idx); in xive_dump_eq()
267 q->idx, q->toggle, i0, i1); in xive_dump_eq()
276 xmon_printf("pp=%02x CPPR=%02x ", xc->pending_prio, xc->cppr); in xmon_xive_do_dump()
282 xive_irq_data_dump(&xc->ipi_data, buffer, sizeof(buffer)); in xmon_xive_do_dump()
283 xmon_printf("IPI=0x%08x %s", xc->hw_ipi, buffer); in xmon_xive_do_dump()
286 xive_dump_eq("EQ", &xc->queue[xive_irq_priority]); in xmon_xive_do_dump()
305 rc = xive_ops->get_irq_config(hw_irq, &target, &prio, &lirq); in xmon_xive_get_irq_config()
360 * we could skip this on replays unless we soft-mask tells us in xive_get_irq()
363 xive_ops->update_pending(xc); in xive_get_irq()
365 DBG_VERBOSE("get_irq: pending=%02x\n", xc->pending_prio); in xive_get_irq()
371 irq, xc->pending_prio); in xive_get_irq()
380 * After EOI'ing an interrupt, we need to re-check the queue
392 DBG_VERBOSE("eoi: pending=0x%02x\n", xc->pending_prio); in xive_do_queue_eoi()
405 xd->stale_p = false; in xive_do_source_eoi()
415 * PQ bits, as they are automatically re-triggered in HW when in xive_do_source_eoi()
418 if (xd->flags & XIVE_IRQ_FLAG_LSI) { in xive_do_source_eoi()
426 * do a re-trigger if Q was set rather than synthesizing an in xive_do_source_eoi()
432 /* Re-trigger if needed */ in xive_do_source_eoi()
433 if ((eoi_val & XIVE_ESB_VAL_Q) && xd->trig_mmio) in xive_do_source_eoi()
434 out_be64(xd->trig_mmio, 0); in xive_do_source_eoi()
444 d->irq, irqd_to_hwirq(d), xc->pending_prio); in xive_irq_eoi()
448 * been passed-through to a KVM guest in xive_irq_eoi()
451 !(xd->flags & XIVE_IRQ_FLAG_NO_EOI)) in xive_irq_eoi()
454 xd->stale_p = true; in xive_irq_eoi()
460 xd->saved_p = false; in xive_irq_eoi()
474 pr_debug("%s: HW 0x%x %smask\n", __func__, xd->hw_irq, mask ? "" : "un"); in xive_do_source_set_mask()
479 * We need to make sure we don't re-enable it until it in xive_do_source_set_mask()
486 if (!xd->stale_p && !!(val & XIVE_ESB_VAL_P)) in xive_do_source_set_mask()
487 xd->saved_p = true; in xive_do_source_set_mask()
488 xd->stale_p = false; in xive_do_source_set_mask()
489 } else if (xd->saved_p) { in xive_do_source_set_mask()
491 xd->saved_p = false; in xive_do_source_set_mask()
494 xd->stale_p = false; in xive_do_source_set_mask()
506 struct xive_q *q = &xc->queue[xive_irq_priority]; in xive_try_pick_target()
514 max = (q->msk + 1) - 1; in xive_try_pick_target()
515 return !!atomic_add_unless(&q->count, 1, max); in xive_try_pick_target()
519 * Un-account an interrupt for a target CPU. We don't directly
520 * decrement q->count since the interrupt might still be present
530 struct xive_q *q = &xc->queue[xive_irq_priority]; in xive_dec_target_count()
544 atomic_inc(&q->pending_count); in xive_dec_target_count()
551 int cpu, first, num, i; in xive_find_target_in_mask() local
554 num = min_t(int, cpumask_weight(mask), nr_cpu_ids); in xive_find_target_in_mask()
555 first = fuzz % num; in xive_find_target_in_mask()
566 /* Remember first one to handle wrap-around */ in xive_find_target_in_mask()
575 * We re-check online as the fallback case passes us in xive_find_target_in_mask()
586 return -1; in xive_find_target_in_mask()
600 int cpu = -1; in xive_pick_irq_target()
603 * If we have chip IDs, first we try to build a mask of in xive_pick_irq_target()
606 if (xd->src_chip != XIVE_INVALID_CHIP_ID && in xive_pick_irq_target()
608 /* Build a mask of matching chip IDs */ in xive_pick_irq_target()
611 if (xc->chip_id == xd->src_chip) in xive_pick_irq_target()
616 cpu = -1; in xive_pick_irq_target()
622 fuzz--; in xive_pick_irq_target()
625 /* No chip IDs, fallback to using the affinity mask */ in xive_pick_irq_target()
635 xd->saved_p = false; in xive_irq_startup()
636 xd->stale_p = false; in xive_irq_startup()
638 pr_debug("%s: irq %d [0x%x] data @%p\n", __func__, d->irq, hw_irq, d); in xive_irq_startup()
646 return -ENXIO; in xive_irq_startup()
647 pr_warn("irq %d started with broken affinity\n", d->irq); in xive_irq_startup()
655 xd->target = target; in xive_irq_startup()
661 rc = xive_ops->configure_irq(hw_irq, in xive_irq_startup()
663 xive_irq_priority, d->irq); in xive_irq_startup()
679 pr_debug("%s: irq %d [0x%x] data @%p\n", __func__, d->irq, hw_irq, d); in xive_irq_shutdown()
681 if (WARN_ON(xd->target == XIVE_INVALID_TARGET)) in xive_irq_shutdown()
691 xive_ops->configure_irq(hw_irq, in xive_irq_shutdown()
692 get_hard_smp_processor_id(xd->target), in xive_irq_shutdown()
695 xive_dec_target_count(xd->target); in xive_irq_shutdown()
696 xd->target = XIVE_INVALID_TARGET; in xive_irq_shutdown()
703 pr_debug("%s: irq %d data @%p\n", __func__, d->irq, xd); in xive_irq_unmask()
712 pr_debug("%s: irq %d data @%p\n", __func__, d->irq, xd); in xive_irq_mask()
726 pr_debug("%s: irq %d/0x%x\n", __func__, d->irq, hw_irq); in xive_irq_set_affinity()
730 return -EINVAL; in xive_irq_set_affinity()
736 if (xd->target != XIVE_INVALID_TARGET && in xive_irq_set_affinity()
737 cpu_online(xd->target) && in xive_irq_set_affinity()
738 cpumask_test_cpu(xd->target, cpumask)) in xive_irq_set_affinity()
746 return -ENXIO; in xive_irq_set_affinity()
752 old_target = xd->target; in xive_irq_set_affinity()
755 * Only configure the irq if it's not currently passed-through to in xive_irq_set_affinity()
756 * a KVM guest in xive_irq_set_affinity()
759 rc = xive_ops->configure_irq(hw_irq, in xive_irq_set_affinity()
761 xive_irq_priority, d->irq); in xive_irq_set_affinity()
763 pr_err("Error %d reconfiguring irq %d\n", rc, d->irq); in xive_irq_set_affinity()
768 xd->target = target; in xive_irq_set_affinity()
784 * affect the resend function when re-enabling an edge interrupt. in xive_irq_set_type()
793 return -EINVAL; in xive_irq_set_type()
801 * the LSI vs MSI information apart from the device-tree so in xive_irq_set_type()
806 !!(xd->flags & XIVE_IRQ_FLAG_LSI)) { in xive_irq_set_type()
808 d->irq, (u32)irqd_to_hwirq(d), in xive_irq_set_type()
810 (xd->flags & XIVE_IRQ_FLAG_LSI) ? "Level" : "Edge"); in xive_irq_set_type()
821 if (WARN_ON(xd->flags & XIVE_IRQ_FLAG_LSI)) in xive_irq_retrigger()
846 * This is called by KVM with state non-NULL for enabling in xive_irq_set_vcpu_affinity()
847 * pass-through or NULL for disabling it in xive_irq_set_vcpu_affinity()
854 if (!xd->stale_p) { in xive_irq_set_vcpu_affinity()
855 xd->saved_p = !!(pq & XIVE_ESB_VAL_P); in xive_irq_set_vcpu_affinity()
856 xd->stale_p = !xd->saved_p; in xive_irq_set_vcpu_affinity()
860 if (xd->target == XIVE_INVALID_TARGET) { in xive_irq_set_vcpu_affinity()
865 WARN_ON(xd->saved_p); in xive_irq_set_vcpu_affinity()
873 * the guest. Also remember the value of P. in xive_irq_set_vcpu_affinity()
879 * as this is the case, we must not hard-unmask it when in xive_irq_set_vcpu_affinity()
885 if (xd->saved_p) { in xive_irq_set_vcpu_affinity()
891 * target to the guest. That should guarantee us in xive_irq_set_vcpu_affinity()
895 * to the guest queue. in xive_irq_set_vcpu_affinity()
897 if (xive_ops->sync_source) in xive_irq_set_vcpu_affinity()
898 xive_ops->sync_source(hw_irq); in xive_irq_set_vcpu_affinity()
904 if (xd->target == XIVE_INVALID_TARGET) { in xive_irq_set_vcpu_affinity()
914 if (xive_ops->sync_source) in xive_irq_set_vcpu_affinity()
915 xive_ops->sync_source(hw_irq); in xive_irq_set_vcpu_affinity()
925 rc = xive_ops->configure_irq(hw_irq, in xive_irq_set_vcpu_affinity()
926 get_hard_smp_processor_id(xd->target), in xive_irq_set_vcpu_affinity()
927 xive_irq_priority, d->irq); in xive_irq_set_vcpu_affinity()
932 * Then if saved_p is not set, effectively re-enable the in xive_irq_set_vcpu_affinity()
941 * while masked, the generic code will re-mask it anyway. in xive_irq_set_vcpu_affinity()
943 if (!xd->saved_p) in xive_irq_set_vcpu_affinity()
968 *state = (pq != XIVE_ESB_INVALID) && !xd->stale_p && in xive_get_irqchip_state()
969 (xd->saved_p || (!!(pq & XIVE_ESB_VAL_P) && in xive_get_irqchip_state()
973 return -EINVAL; in xive_get_irqchip_state()
978 .name = "XIVE-IRQ",
999 pr_debug("%s for HW 0x%x\n", __func__, xd->hw_irq); in xive_cleanup_irq_data()
1001 if (xd->eoi_mmio) { in xive_cleanup_irq_data()
1002 iounmap(xd->eoi_mmio); in xive_cleanup_irq_data()
1003 if (xd->eoi_mmio == xd->trig_mmio) in xive_cleanup_irq_data()
1004 xd->trig_mmio = NULL; in xive_cleanup_irq_data()
1005 xd->eoi_mmio = NULL; in xive_cleanup_irq_data()
1007 if (xd->trig_mmio) { in xive_cleanup_irq_data()
1008 iounmap(xd->trig_mmio); in xive_cleanup_irq_data()
1009 xd->trig_mmio = NULL; in xive_cleanup_irq_data()
1021 return -ENOMEM; in xive_irq_alloc_data()
1022 rc = xive_ops->populate_irq_data(hw, xd); in xive_irq_alloc_data()
1027 xd->target = XIVE_INVALID_TARGET; in xive_irq_alloc_data()
1063 DBG_VERBOSE("IPI CPU %d -> %d (HW IRQ 0x%x)\n", in xive_cause_ipi()
1064 smp_processor_id(), cpu, xc->hw_ipi); in xive_cause_ipi()
1066 xd = &xc->ipi_data; in xive_cause_ipi()
1067 if (WARN_ON(!xd->trig_mmio)) in xive_cause_ipi()
1069 out_be64(xd->trig_mmio, 0); in xive_cause_ipi()
1086 d->irq, irqd_to_hwirq(d), xc->hw_ipi, xc->pending_prio); in xive_ipi_eoi()
1088 xive_do_source_eoi(&xc->ipi_data); in xive_ipi_eoi()
1101 .name = "XIVE-IPI",
1108 * IPIs are marked per-cpu. We use separate HW interrupts under the
1122 irq_domain_set_info(domain, virq + i, info->hwirq + i, &xive_ipi_chip, in xive_ipi_irq_domain_alloc()
1123 domain->host_data, handle_percpu_irq, in xive_ipi_irq_domain_alloc()
1138 int ret = -ENOMEM; in xive_init_ipis()
1140 fwnode = irq_domain_alloc_named_fwnode("XIVE-IPI"); in xive_init_ipis()
1165 xid->irq = ret; in xive_init_ipis()
1167 snprintf(xid->name, sizeof(xid->name), "IPI-%d", node); in xive_init_ipis()
1187 if (atomic_inc_return(&xid->started) > 1) in xive_request_ipi()
1190 ret = request_irq(xid->irq, xive_muxed_ipi_action, in xive_request_ipi()
1192 xid->name, NULL); in xive_request_ipi()
1194 WARN(ret < 0, "Failed to request IPI %d: %d\n", xid->irq, ret); in xive_request_ipi()
1209 if (xc->hw_ipi != XIVE_BAD_IRQ) in xive_setup_cpu_ipi()
1215 /* Grab an IPI from the backend, this will populate xc->hw_ipi */ in xive_setup_cpu_ipi()
1216 if (xive_ops->get_ipi(cpu, xc)) in xive_setup_cpu_ipi()
1217 return -EIO; in xive_setup_cpu_ipi()
1223 rc = xive_ops->populate_irq_data(xc->hw_ipi, &xc->ipi_data); in xive_setup_cpu_ipi()
1226 return -EIO; in xive_setup_cpu_ipi()
1228 rc = xive_ops->configure_irq(xc->hw_ipi, in xive_setup_cpu_ipi()
1233 return -EIO; in xive_setup_cpu_ipi()
1236 xc->hw_ipi, xive_ipi_irq, xc->ipi_data.trig_mmio); in xive_setup_cpu_ipi()
1239 xive_do_source_set_mask(&xc->ipi_data, false); in xive_setup_cpu_ipi()
1251 if (xc->hw_ipi == XIVE_BAD_IRQ) in xive_cleanup_cpu_ipi()
1257 xive_do_source_set_mask(&xc->ipi_data, true); in xive_cleanup_cpu_ipi()
1266 xive_ops->configure_irq(xc->hw_ipi, hard_smp_processor_id(), in xive_cleanup_cpu_ipi()
1270 xive_ops->put_ipi(cpu, xc); in xive_cleanup_cpu_ipi()
1275 smp_ops->cause_ipi = xive_cause_ipi; in xive_smp_probe()
1336 return xive_ops->match(node); in xive_irq_domain_match()
1377 seq_printf(m, "%*sPstate: %s %s\n", ind, "", xd->stale_p ? "stale" : "", in xive_irq_domain_debug_show()
1378 xd->saved_p ? "saved" : ""); in xive_irq_domain_debug_show()
1379 seq_printf(m, "%*sTarget: %d\n", ind, "", xd->target); in xive_irq_domain_debug_show()
1380 seq_printf(m, "%*sChip: %d\n", ind, "", xd->src_chip); in xive_irq_domain_debug_show()
1381 seq_printf(m, "%*sTrigger: 0x%016llx\n", ind, "", xd->trig_page); in xive_irq_domain_debug_show()
1382 seq_printf(m, "%*sEOI: 0x%016llx\n", ind, "", xd->eoi_page); in xive_irq_domain_debug_show()
1383 seq_printf(m, "%*sFlags: 0x%llx\n", ind, "", xd->flags); in xive_irq_domain_debug_show()
1385 if (xd->flags & xive_irq_flags[i].mask) in xive_irq_domain_debug_show()
1397 return xive_irq_domain_xlate(d, to_of_node(fwspec->fwnode), in xive_irq_domain_translate()
1398 fwspec->param, fwspec->param_count, in xive_irq_domain_translate()
1431 &xive_irq_chip, domain->host_data); in xive_irq_domain_alloc()
1475 if (xc->queue[xive_irq_priority].qpage) in xive_cleanup_cpu_queues()
1476 xive_ops->cleanup_queue(cpu, xc, xive_irq_priority); in xive_cleanup_cpu_queues()
1484 if (!xc->queue[xive_irq_priority].qpage) in xive_setup_cpu_queues()
1485 rc = xive_ops->setup_queue(cpu, xc, xive_irq_priority); in xive_setup_cpu_queues()
1499 return -ENOMEM; in xive_prepare_cpu()
1500 xc->hw_ipi = XIVE_BAD_IRQ; in xive_prepare_cpu()
1501 xc->chip_id = XIVE_INVALID_CHIP_ID; in xive_prepare_cpu()
1502 if (xive_ops->prepare_cpu) in xive_prepare_cpu()
1503 xive_ops->prepare_cpu(cpu, xc); in xive_prepare_cpu()
1517 if (xive_ops->setup_cpu) in xive_setup_cpu()
1518 xive_ops->setup_cpu(smp_processor_id(), xc); in xive_setup_cpu()
1521 xc->cppr = 0xff; in xive_setup_cpu()
1540 /* Allocate per-CPU data and queues */ in xive_smp_prepare_cpu()
1560 * We need to re-route that interrupt to its new destination. in xive_flush_cpu_queue()
1571 if (d->domain != xive_irq_domain) in xive_flush_cpu_queue()
1575 * The IRQ should have already been re-routed, it's just a in xive_flush_cpu_queue()
1576 * stale in the old queue, so re-trigger it in order to make in xive_flush_cpu_queue()
1580 pr_info("CPU %d: Got irq %d while offline, re-sending...\n", in xive_flush_cpu_queue()
1583 raw_spin_lock(&desc->lock); in xive_flush_cpu_queue()
1589 xd->saved_p = false; in xive_flush_cpu_queue()
1595 if (xd->flags & XIVE_IRQ_FLAG_LSI) in xive_flush_cpu_queue()
1600 raw_spin_unlock(&desc->lock); in xive_flush_cpu_queue()
1613 xc->cppr = 0; in xive_smp_disable_cpu()
1619 /* Re-enable CPPR */ in xive_smp_disable_cpu()
1620 xc->cppr = 0xff; in xive_smp_disable_cpu()
1643 xc->cppr = 0; in xive_teardown_cpu()
1646 if (xive_ops->teardown_cpu) in xive_teardown_cpu()
1647 xive_ops->teardown_cpu(cpu, xc); in xive_teardown_cpu()
1660 xive_ops->shutdown(); in xive_shutdown()
1679 /* Allocate per-CPU data and queues */ in xive_core_init()
1686 xive_ops->name); in xive_core_init()
1701 return ERR_PTR(-ENOMEM); in xive_queue_page_alloc()
1726 __setup("xive.store-eoi=", xive_store_eoi_cmdline);
1735 seq_printf(m, "pp=%02x CPPR=%02x ", xc->pending_prio, xc->cppr); in xive_debug_show_ipi()
1741 xive_irq_data_dump(&xc->ipi_data, buffer, sizeof(buffer)); in xive_debug_show_ipi()
1742 seq_printf(m, "IPI=0x%08x %s", xc->hw_ipi, buffer); in xive_debug_show_ipi()
1758 rc = xive_ops->get_irq_config(hw_irq, &target, &prio, &lirq); in xive_debug_show_irq()
1791 if (xive_ops->debug_show) in xive_ipi_debug_show()
1792 xive_ops->debug_show(m, private); in xive_ipi_debug_show()
1804 seq_printf(m, "EQ%d idx=%d T=%d\n", prio, q->idx, q->toggle); in xive_eq_debug_show_one()
1805 if (q->qpage) { in xive_eq_debug_show_one()
1806 for (i = 0; i < q->msk + 1; i++) { in xive_eq_debug_show_one()
1809 seq_printf(m, "%08x%s", be32_to_cpup(q->qpage + i), in xive_eq_debug_show_one()
1818 int cpu = (long)m->private; in xive_eq_debug_show()
1822 xive_eq_debug_show_one(m, &xc->queue[xive_irq_priority], in xive_eq_debug_show()
1849 debugfs_create_bool("store-eoi", 0600, xive_dir, &xive_store_eoi); in xive_core_debugfs_create()
1851 if (xive_ops->debug_create) in xive_core_debugfs_create()
1852 xive_ops->debug_create(xive_dir); in xive_core_debugfs_create()