Lines Matching +full:riscv +full:- +full:aia
1 // SPDX-License-Identifier: GPL-2.0
13 #include <linux/irqchip/riscv-imsic.h>
38 raw_spin_lock_irqsave(&hgctrl->lock, flags); in aia_find_hgei()
40 hgei = -1; in aia_find_hgei()
42 if (hgctrl->owners[i] == owner) { in aia_find_hgei()
48 raw_spin_unlock_irqrestore(&hgctrl->lock, flags); in aia_find_hgei()
71 struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr; in kvm_riscv_vcpu_aia_flush_interrupts()
77 if (READ_ONCE(vcpu->arch.irqs_pending_mask[1])) { in kvm_riscv_vcpu_aia_flush_interrupts()
78 mask = xchg_acquire(&vcpu->arch.irqs_pending_mask[1], 0); in kvm_riscv_vcpu_aia_flush_interrupts()
79 val = READ_ONCE(vcpu->arch.irqs_pending[1]) & mask; in kvm_riscv_vcpu_aia_flush_interrupts()
81 csr->hviph &= ~mask; in kvm_riscv_vcpu_aia_flush_interrupts()
82 csr->hviph |= val; in kvm_riscv_vcpu_aia_flush_interrupts()
88 struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr; in kvm_riscv_vcpu_aia_sync_interrupts()
91 csr->vsieh = csr_read(CSR_VSIEH); in kvm_riscv_vcpu_aia_sync_interrupts()
104 if (READ_ONCE(vcpu->arch.irqs_pending[1]) & in kvm_riscv_vcpu_aia_has_interrupts()
105 (vcpu->arch.aia_context.guest_csr.vsieh & upper_32_bits(mask))) in kvm_riscv_vcpu_aia_has_interrupts()
109 seip = vcpu->arch.guest_csr.vsie; in kvm_riscv_vcpu_aia_has_interrupts()
113 if (!kvm_riscv_aia_initialized(vcpu->kvm) || !seip) in kvm_riscv_vcpu_aia_has_interrupts()
125 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; in kvm_riscv_vcpu_aia_update_hvip()
131 csr_write(CSR_HVIPH, vcpu->arch.aia_context.guest_csr.hviph); in kvm_riscv_vcpu_aia_update_hvip()
133 aia_set_hvictl(!!(csr->hvip & BIT(IRQ_VS_EXT))); in kvm_riscv_vcpu_aia_update_hvip()
138 struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr; in kvm_riscv_vcpu_aia_load()
143 csr_write(CSR_VSISELECT, csr->vsiselect); in kvm_riscv_vcpu_aia_load()
144 csr_write(CSR_HVIPRIO1, csr->hviprio1); in kvm_riscv_vcpu_aia_load()
145 csr_write(CSR_HVIPRIO2, csr->hviprio2); in kvm_riscv_vcpu_aia_load()
147 csr_write(CSR_VSIEH, csr->vsieh); in kvm_riscv_vcpu_aia_load()
148 csr_write(CSR_HVIPH, csr->hviph); in kvm_riscv_vcpu_aia_load()
149 csr_write(CSR_HVIPRIO1H, csr->hviprio1h); in kvm_riscv_vcpu_aia_load()
150 csr_write(CSR_HVIPRIO2H, csr->hviprio2h); in kvm_riscv_vcpu_aia_load()
156 struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr; in kvm_riscv_vcpu_aia_put()
161 csr->vsiselect = csr_read(CSR_VSISELECT); in kvm_riscv_vcpu_aia_put()
162 csr->hviprio1 = csr_read(CSR_HVIPRIO1); in kvm_riscv_vcpu_aia_put()
163 csr->hviprio2 = csr_read(CSR_HVIPRIO2); in kvm_riscv_vcpu_aia_put()
165 csr->vsieh = csr_read(CSR_VSIEH); in kvm_riscv_vcpu_aia_put()
166 csr->hviph = csr_read(CSR_HVIPH); in kvm_riscv_vcpu_aia_put()
167 csr->hviprio1h = csr_read(CSR_HVIPRIO1H); in kvm_riscv_vcpu_aia_put()
168 csr->hviprio2h = csr_read(CSR_HVIPRIO2H); in kvm_riscv_vcpu_aia_put()
176 struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr; in kvm_riscv_vcpu_aia_get_csr()
179 return -ENOENT; in kvm_riscv_vcpu_aia_get_csr()
192 struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr; in kvm_riscv_vcpu_aia_set_csr()
195 return -ENOENT; in kvm_riscv_vcpu_aia_set_csr()
202 WRITE_ONCE(vcpu->arch.irqs_pending_mask[1], 0); in kvm_riscv_vcpu_aia_set_csr()
215 /* If AIA not available then redirect trap */ in kvm_riscv_vcpu_aia_rmw_topei()
219 /* If AIA not initialized then forward to user space */ in kvm_riscv_vcpu_aia_rmw_topei()
220 if (!kvm_riscv_aia_initialized(vcpu->kvm)) in kvm_riscv_vcpu_aia_rmw_topei()
228 * External IRQ priority always read-only zero. This means default
233 0, 8, -1, -1, 16, 24, -1, -1, /* 0 - 7 */
234 32, -1, -1, -1, -1, 40, 48, 56, /* 8 - 15 */
235 64, 72, 80, 88, 96, 104, 112, 120, /* 16 - 23 */
236 -1, -1, -1, -1, -1, -1, -1, -1, /* 24 - 31 */
237 -1, -1, -1, -1, -1, -1, -1, -1, /* 32 - 39 */
238 -1, -1, -1, -1, -1, -1, -1, -1, /* 40 - 47 */
239 -1, -1, -1, -1, -1, -1, -1, -1, /* 48 - 55 */
240 -1, -1, -1, -1, -1, -1, -1, -1, /* 56 - 63 */
346 first_irq = (isel - ISELECT_IPRIO0) * 4; in aia_rmw_iprio()
375 /* If AIA not available then redirect trap */ in kvm_riscv_vcpu_aia_rmw_ireg()
384 kvm_riscv_aia_initialized(vcpu->kvm)) in kvm_riscv_vcpu_aia_rmw_ireg()
395 int ret = -ENOENT; in kvm_riscv_aia_alloc_hgei()
402 return -ENODEV; in kvm_riscv_aia_alloc_hgei()
404 raw_spin_lock_irqsave(&hgctrl->lock, flags); in kvm_riscv_aia_alloc_hgei()
406 if (hgctrl->free_bitmap) { in kvm_riscv_aia_alloc_hgei()
407 ret = __ffs(hgctrl->free_bitmap); in kvm_riscv_aia_alloc_hgei()
408 hgctrl->free_bitmap &= ~BIT(ret); in kvm_riscv_aia_alloc_hgei()
409 hgctrl->owners[ret] = owner; in kvm_riscv_aia_alloc_hgei()
412 raw_spin_unlock_irqrestore(&hgctrl->lock, flags); in kvm_riscv_aia_alloc_hgei()
415 lc = (gc) ? per_cpu_ptr(gc->local, cpu) : NULL; in kvm_riscv_aia_alloc_hgei()
418 *hgei_va = lc->msi_va + (ret * IMSIC_MMIO_PAGE_SZ); in kvm_riscv_aia_alloc_hgei()
420 *hgei_pa = lc->msi_pa + (ret * IMSIC_MMIO_PAGE_SZ); in kvm_riscv_aia_alloc_hgei()
434 raw_spin_lock_irqsave(&hgctrl->lock, flags); in kvm_riscv_aia_free_hgei()
437 if (!(hgctrl->free_bitmap & BIT(hgei))) { in kvm_riscv_aia_free_hgei()
438 hgctrl->free_bitmap |= BIT(hgei); in kvm_riscv_aia_free_hgei()
439 hgctrl->owners[hgei] = NULL; in kvm_riscv_aia_free_hgei()
443 raw_spin_unlock_irqrestore(&hgctrl->lock, flags); in kvm_riscv_aia_free_hgei()
471 raw_spin_lock_irqsave(&hgctrl->lock, flags); in hgei_interrupt()
474 if (hgctrl->owners[i]) in hgei_interrupt()
475 kvm_vcpu_kick(hgctrl->owners[i]); in hgei_interrupt()
478 raw_spin_unlock_irqrestore(&hgctrl->lock, flags); in hgei_interrupt()
490 /* Initialize per-CPU guest external interrupt line management */ in aia_hgei_init()
493 raw_spin_lock_init(&hgctrl->lock); in aia_hgei_init()
495 hgctrl->free_bitmap = in aia_hgei_init()
496 BIT(kvm_riscv_aia_nr_hgei + 1) - 1; in aia_hgei_init()
497 hgctrl->free_bitmap &= ~BIT(0); in aia_hgei_init()
499 hgctrl->free_bitmap = 0; in aia_hgei_init()
507 return -ENOENT; in aia_hgei_init()
510 /* Map per-CPU SGEI interrupt from INTC domain */ in aia_hgei_init()
514 return -ENOMEM; in aia_hgei_init()
517 /* Request per-CPU SGEI interrupt */ in aia_hgei_init()
519 "riscv-kvm", &aia_hgei); in aia_hgei_init()
530 /* Free per-CPU SGEI interrupt */ in aia_hgei_exit()
549 /* Enable per-CPU SGEI interrupt */ in kvm_riscv_aia_enable()
571 /* Disable per-CPU SGEI interrupt */ in kvm_riscv_aia_disable()
577 raw_spin_lock_irqsave(&hgctrl->lock, flags); in kvm_riscv_aia_disable()
580 vcpu = hgctrl->owners[i]; in kvm_riscv_aia_disable()
585 * We release hgctrl->lock before notifying IMSIC in kvm_riscv_aia_disable()
588 raw_spin_unlock_irqrestore(&hgctrl->lock, flags); in kvm_riscv_aia_disable()
602 raw_spin_lock_irqsave(&hgctrl->lock, flags); in kvm_riscv_aia_disable()
605 raw_spin_unlock_irqrestore(&hgctrl->lock, flags); in kvm_riscv_aia_disable()
616 return -ENODEV; in kvm_riscv_aia_init()
619 /* Figure-out number of bits in HGEIE */ in kvm_riscv_aia_init()
620 csr_write(CSR_HGEIE, -1UL); in kvm_riscv_aia_init()
624 kvm_riscv_aia_nr_hgei--; in kvm_riscv_aia_init()
627 * Number of usable HGEI lines should be minimum of per-HART in kvm_riscv_aia_init()
632 BIT(gc->guest_index_bits) - 1); in kvm_riscv_aia_init()
639 kvm_riscv_aia_max_ids = gc->nr_guest_ids + 1; in kvm_riscv_aia_init()
654 /* Enable KVM AIA support */ in kvm_riscv_aia_init()