Lines Matching +full:interrupt +full:- +full:src
63 #define OPENPIC_CPU_REG_SIZE (0x100 + ((MAX_CPU - 1) * 0x1000))
116 struct kvm_vcpu *vcpu = current->thread.kvm_vcpu; in get_current_cpu()
117 return vcpu ? vcpu->arch.irq_cpu_id : -1; in get_current_cpu()
120 return -1; in get_current_cpu()
133 IRQ_TYPE_FSLINT, /* FSL internal interrupt -- level only */
134 IRQ_TYPE_FSLSPECIAL, /* FSL timer/IPI interrupt, edge, no polarity */
154 bool level:1; /* level-triggered */
171 #define IVPR_VECTOR(opp, _ivprr_) ((_ivprr_) & (opp)->vector_mask)
175 #define IDR_CI 0x40000000 /* critical interrupt */
184 /* Count of IRQ sources asserting on non-INT outputs */
221 struct irq_source src[MAX_IRQ]; member
232 uint32_t msir; /* Shared Message Signaled Interrupt Register */
248 if (!dst->vcpu) { in mpic_irq_raise()
250 __func__, (int)(dst - &opp->dst[0])); in mpic_irq_raise()
254 pr_debug("%s: cpu %d output %d\n", __func__, dst->vcpu->arch.irq_cpu_id, in mpic_irq_raise()
260 kvm_vcpu_ioctl_interrupt(dst->vcpu, &irq); in mpic_irq_raise()
266 if (!dst->vcpu) { in mpic_irq_lower()
268 __func__, (int)(dst - &opp->dst[0])); in mpic_irq_lower()
272 pr_debug("%s: cpu %d output %d\n", __func__, dst->vcpu->arch.irq_cpu_id, in mpic_irq_lower()
278 kvmppc_core_dequeue_external(dst->vcpu); in mpic_irq_lower()
283 set_bit(n_IRQ, q->queue); in IRQ_setbit()
288 clear_bit(n_IRQ, q->queue); in IRQ_resetbit()
293 int irq = -1; in IRQ_check()
294 int next = -1; in IRQ_check()
295 int priority = -1; in IRQ_check()
298 irq = find_next_bit(q->queue, opp->max_irq, irq + 1); in IRQ_check()
299 if (irq == opp->max_irq) in IRQ_check()
303 irq, IVPR_PRIORITY(opp->src[irq].ivpr), priority); in IRQ_check()
305 if (IVPR_PRIORITY(opp->src[irq].ivpr) > priority) { in IRQ_check()
307 priority = IVPR_PRIORITY(opp->src[irq].ivpr); in IRQ_check()
311 q->next = next; in IRQ_check()
312 q->priority = priority; in IRQ_check()
320 return q->next; in IRQ_get_next()
327 struct irq_source *src; in IRQ_local_pipe() local
330 dst = &opp->dst[n_CPU]; in IRQ_local_pipe()
331 src = &opp->src[n_IRQ]; in IRQ_local_pipe()
336 if (src->output != ILR_INTTGT_INT) { in IRQ_local_pipe()
338 __func__, src->output, n_IRQ, active, was_active, in IRQ_local_pipe()
339 dst->outputs_active[src->output]); in IRQ_local_pipe()
347 dst->outputs_active[src->output]++ == 0) { in IRQ_local_pipe()
349 __func__, src->output, n_CPU, n_IRQ); in IRQ_local_pipe()
350 mpic_irq_raise(opp, dst, src->output); in IRQ_local_pipe()
354 --dst->outputs_active[src->output] == 0) { in IRQ_local_pipe()
356 __func__, src->output, n_CPU, n_IRQ); in IRQ_local_pipe()
357 mpic_irq_lower(opp, dst, src->output); in IRQ_local_pipe()
364 priority = IVPR_PRIORITY(src->ivpr); in IRQ_local_pipe()
366 /* Even if the interrupt doesn't have enough priority, in IRQ_local_pipe()
370 IRQ_setbit(&dst->raised, n_IRQ); in IRQ_local_pipe()
372 IRQ_resetbit(&dst->raised, n_IRQ); in IRQ_local_pipe()
374 IRQ_check(opp, &dst->raised); in IRQ_local_pipe()
376 if (active && priority <= dst->ctpr) { in IRQ_local_pipe()
378 __func__, n_IRQ, priority, dst->ctpr, n_CPU); in IRQ_local_pipe()
383 if (IRQ_get_next(opp, &dst->servicing) >= 0 && in IRQ_local_pipe()
384 priority <= dst->servicing.priority) { in IRQ_local_pipe()
386 __func__, n_IRQ, dst->servicing.next, n_CPU); in IRQ_local_pipe()
389 __func__, n_CPU, n_IRQ, dst->raised.next); in IRQ_local_pipe()
393 IRQ_get_next(opp, &dst->servicing); in IRQ_local_pipe()
394 if (dst->raised.priority > dst->ctpr && in IRQ_local_pipe()
395 dst->raised.priority > dst->servicing.priority) { in IRQ_local_pipe()
397 __func__, n_IRQ, dst->raised.next, in IRQ_local_pipe()
398 dst->raised.priority, dst->ctpr, in IRQ_local_pipe()
399 dst->servicing.priority, n_CPU); in IRQ_local_pipe()
403 __func__, n_IRQ, dst->ctpr, in IRQ_local_pipe()
404 dst->servicing.priority, n_CPU); in IRQ_local_pipe()
413 struct irq_source *src; in openpic_update_irq() local
417 src = &opp->src[n_IRQ]; in openpic_update_irq()
418 active = src->pending; in openpic_update_irq()
420 if ((src->ivpr & IVPR_MASK_MASK) && !src->nomask) { in openpic_update_irq()
421 /* Interrupt source is disabled */ in openpic_update_irq()
426 was_active = !!(src->ivpr & IVPR_ACTIVITY_MASK); in openpic_update_irq()
429 * We don't have a similar check for already-active because in openpic_update_irq()
430 * ctpr may have changed and we need to withdraw the interrupt. in openpic_update_irq()
438 src->ivpr |= IVPR_ACTIVITY_MASK; in openpic_update_irq()
440 src->ivpr &= ~IVPR_ACTIVITY_MASK; in openpic_update_irq()
442 if (src->destmask == 0) { in openpic_update_irq()
448 if (src->destmask == (1 << src->last_cpu)) { in openpic_update_irq()
450 IRQ_local_pipe(opp, src->last_cpu, n_IRQ, active, was_active); in openpic_update_irq()
451 } else if (!(src->ivpr & IVPR_MODE_MASK)) { in openpic_update_irq()
453 for (i = 0; i < opp->nb_cpus; i++) { in openpic_update_irq()
454 if (src->destmask & (1 << i)) { in openpic_update_irq()
461 for (i = src->last_cpu + 1; i != src->last_cpu; i++) { in openpic_update_irq()
462 if (i == opp->nb_cpus) in openpic_update_irq()
465 if (src->destmask & (1 << i)) { in openpic_update_irq()
468 src->last_cpu = i; in openpic_update_irq()
478 struct irq_source *src; in openpic_set_irq() local
485 src = &opp->src[n_IRQ]; in openpic_set_irq()
487 n_IRQ, level, src->ivpr); in openpic_set_irq()
488 if (src->level) { in openpic_set_irq()
489 /* level-sensitive irq */ in openpic_set_irq()
490 src->pending = level; in openpic_set_irq()
493 /* edge-sensitive irq */ in openpic_set_irq()
495 src->pending = 1; in openpic_set_irq()
499 if (src->output != ILR_INTTGT_INT) { in openpic_set_irq()
500 /* Edge-triggered interrupts shouldn't be used in openpic_set_irq()
501 * with non-INT delivery, but just in case, in openpic_set_irq()
503 * cause an interrupt storm. This is close to in openpic_set_irq()
506 src->pending = 0; in openpic_set_irq()
516 opp->gcr = GCR_RESET; in openpic_reset()
518 opp->frr = ((opp->nb_irqs - 1) << FRR_NIRQ_SHIFT) | in openpic_reset()
519 (opp->vid << FRR_VID_SHIFT); in openpic_reset()
521 opp->pir = 0; in openpic_reset()
522 opp->spve = -1 & opp->vector_mask; in openpic_reset()
523 opp->tfrr = opp->tfrr_reset; in openpic_reset()
525 for (i = 0; i < opp->max_irq; i++) { in openpic_reset()
526 opp->src[i].ivpr = opp->ivpr_reset; in openpic_reset()
528 switch (opp->src[i].type) { in openpic_reset()
530 opp->src[i].level = in openpic_reset()
531 !!(opp->ivpr_reset & IVPR_SENSE_MASK); in openpic_reset()
535 opp->src[i].ivpr |= IVPR_POLARITY_MASK; in openpic_reset()
542 write_IRQreg_idr(opp, i, opp->idr_reset); in openpic_reset()
546 opp->dst[i].ctpr = 15; in openpic_reset()
547 memset(&opp->dst[i].raised, 0, sizeof(struct irq_queue)); in openpic_reset()
548 opp->dst[i].raised.next = -1; in openpic_reset()
549 memset(&opp->dst[i].servicing, 0, sizeof(struct irq_queue)); in openpic_reset()
550 opp->dst[i].servicing.next = -1; in openpic_reset()
554 opp->timers[i].tccr = 0; in openpic_reset()
555 opp->timers[i].tbcr = TBCR_CI; in openpic_reset()
558 opp->gcr = 0; in openpic_reset()
563 return opp->src[n_IRQ].idr; in read_IRQreg_idr()
568 if (opp->flags & OPENPIC_FLAG_ILR) in read_IRQreg_ilr()
569 return opp->src[n_IRQ].output; in read_IRQreg_ilr()
576 return opp->src[n_IRQ].ivpr; in read_IRQreg_ivpr()
582 struct irq_source *src = &opp->src[n_IRQ]; in write_IRQreg_idr() local
583 uint32_t normal_mask = (1UL << opp->nb_cpus) - 1; in write_IRQreg_idr()
586 int crit_shift = IDR_EP_SHIFT - opp->nb_cpus; in write_IRQreg_idr()
589 if (opp->flags & OPENPIC_FLAG_IDR_CRIT) { in write_IRQreg_idr()
594 src->idr = val & mask; in write_IRQreg_idr()
595 pr_debug("Set IDR %d to 0x%08x\n", n_IRQ, src->idr); in write_IRQreg_idr()
597 if (opp->flags & OPENPIC_FLAG_IDR_CRIT) { in write_IRQreg_idr()
598 if (src->idr & crit_mask) { in write_IRQreg_idr()
599 if (src->idr & normal_mask) { in write_IRQreg_idr()
604 src->output = ILR_INTTGT_CINT; in write_IRQreg_idr()
605 src->nomask = true; in write_IRQreg_idr()
606 src->destmask = 0; in write_IRQreg_idr()
608 for (i = 0; i < opp->nb_cpus; i++) { in write_IRQreg_idr()
609 int n_ci = IDR_CI0_SHIFT - i; in write_IRQreg_idr()
611 if (src->idr & (1UL << n_ci)) in write_IRQreg_idr()
612 src->destmask |= 1UL << i; in write_IRQreg_idr()
615 src->output = ILR_INTTGT_INT; in write_IRQreg_idr()
616 src->nomask = false; in write_IRQreg_idr()
617 src->destmask = src->idr & normal_mask; in write_IRQreg_idr()
620 src->destmask = src->idr; in write_IRQreg_idr()
627 if (opp->flags & OPENPIC_FLAG_ILR) { in write_IRQreg_ilr()
628 struct irq_source *src = &opp->src[n_IRQ]; in write_IRQreg_ilr() local
630 src->output = val & ILR_INTTGT_MASK; in write_IRQreg_ilr()
631 pr_debug("Set ILR %d to 0x%08x, output %d\n", n_IRQ, src->idr, in write_IRQreg_ilr()
632 src->output); in write_IRQreg_ilr()
634 /* TODO: on MPIC v4.0 only, set nomask for non-INT */ in write_IRQreg_ilr()
644 * the polarity bit is read-only on internal interrupts. in write_IRQreg_ivpr()
647 IVPR_POLARITY_MASK | opp->vector_mask; in write_IRQreg_ivpr()
649 /* ACTIVITY bit is read-only */ in write_IRQreg_ivpr()
650 opp->src[n_IRQ].ivpr = in write_IRQreg_ivpr()
651 (opp->src[n_IRQ].ivpr & IVPR_ACTIVITY_MASK) | (val & mask); in write_IRQreg_ivpr()
654 * and the interrupt is always level-triggered. Timers and IPIs in write_IRQreg_ivpr()
655 * have no sense or polarity bits, and are edge-triggered. in write_IRQreg_ivpr()
657 switch (opp->src[n_IRQ].type) { in write_IRQreg_ivpr()
659 opp->src[n_IRQ].level = in write_IRQreg_ivpr()
660 !!(opp->src[n_IRQ].ivpr & IVPR_SENSE_MASK); in write_IRQreg_ivpr()
664 opp->src[n_IRQ].ivpr &= ~IVPR_SENSE_MASK; in write_IRQreg_ivpr()
668 opp->src[n_IRQ].ivpr &= ~(IVPR_POLARITY_MASK | IVPR_SENSE_MASK); in write_IRQreg_ivpr()
673 pr_debug("Set IVPR %d to 0x%08x -> 0x%08x\n", n_IRQ, val, in write_IRQreg_ivpr()
674 opp->src[n_IRQ].ivpr); in write_IRQreg_ivpr()
684 opp->gcr &= ~opp->mpic_mode_mask; in openpic_gcr_write()
685 opp->gcr |= val & opp->mpic_mode_mask; in openpic_gcr_write()
720 * This register is used to reset a CPU core -- in openpic_gbl_write()
723 err = -ENXIO; in openpic_gbl_write()
730 idx = (addr - 0x10A0) >> 4; in openpic_gbl_write()
731 write_IRQreg_ivpr(opp, opp->irq_ipi0 + idx, val); in openpic_gbl_write()
735 opp->spve = val & opp->vector_mask; in openpic_gbl_write()
757 retval = opp->frr; in openpic_gbl_read()
758 retval |= (opp->nb_cpus - 1) << FRR_NCPU_SHIFT; in openpic_gbl_read()
761 retval = opp->gcr; in openpic_gbl_read()
764 retval = opp->vir; in openpic_gbl_read()
770 retval = opp->brr1; in openpic_gbl_read()
789 idx = (addr - 0x10A0) >> 4; in openpic_gbl_read()
790 retval = read_IRQreg_ivpr(opp, opp->irq_ipi0 + idx); in openpic_gbl_read()
794 retval = opp->spve; in openpic_gbl_read()
819 opp->tfrr = val; in openpic_tmr_write()
830 if ((opp->timers[idx].tccr & TCCR_TOG) != 0 && in openpic_tmr_write()
832 (opp->timers[idx].tbcr & TBCR_CI) != 0) in openpic_tmr_write()
833 opp->timers[idx].tccr &= ~TCCR_TOG; in openpic_tmr_write()
835 opp->timers[idx].tbcr = val; in openpic_tmr_write()
838 write_IRQreg_ivpr(opp, opp->irq_tim0 + idx, val); in openpic_tmr_write()
841 write_IRQreg_idr(opp, opp->irq_tim0 + idx, val); in openpic_tmr_write()
851 uint32_t retval = -1; in openpic_tmr_read()
861 retval = opp->tfrr; in openpic_tmr_read()
867 retval = opp->timers[idx].tccr; in openpic_tmr_read()
870 retval = opp->timers[idx].tbcr; in openpic_tmr_read()
873 retval = read_IRQreg_ivpr(opp, opp->irq_tim0 + idx); in openpic_tmr_read()
876 retval = read_IRQreg_idr(opp, opp->irq_tim0 + idx); in openpic_tmr_read()
943 int idx = opp->irq_msi; in openpic_msi_write()
955 opp->msi[srs].msir |= 1 << ibs; in openpic_msi_write()
959 /* most registers are read-only, thus ignored */ in openpic_msi_write()
974 return -ENXIO; in openpic_msi_read()
987 r = opp->msi[srs].msir; in openpic_msi_read()
989 opp->msi[srs].msir = 0; in openpic_msi_read()
990 openpic_set_irq(opp, opp->irq_msi + srs, 0); in openpic_msi_read()
994 r |= (opp->msi[i].msir ? 1 : 0) << i; in openpic_msi_read()
1027 struct irq_source *src; in openpic_cpu_write_internal() local
1040 dst = &opp->dst[idx]; in openpic_cpu_write_internal()
1047 idx = (addr - 0x40) >> 4; in openpic_cpu_write_internal()
1049 opp->src[opp->irq_ipi0 + idx].destmask |= val; in openpic_cpu_write_internal()
1050 openpic_set_irq(opp, opp->irq_ipi0 + idx, 1); in openpic_cpu_write_internal()
1051 openpic_set_irq(opp, opp->irq_ipi0 + idx, 0); in openpic_cpu_write_internal()
1054 dst->ctpr = val & 0x0000000F; in openpic_cpu_write_internal()
1057 __func__, idx, dst->ctpr, dst->raised.priority, in openpic_cpu_write_internal()
1058 dst->servicing.priority); in openpic_cpu_write_internal()
1060 if (dst->raised.priority <= dst->ctpr) { in openpic_cpu_write_internal()
1064 } else if (dst->raised.priority > dst->servicing.priority) { in openpic_cpu_write_internal()
1066 __func__, idx, dst->raised.next); in openpic_cpu_write_internal()
1072 /* Read-only register */ in openpic_cpu_write_internal()
1075 /* Read-only register */ in openpic_cpu_write_internal()
1081 s_IRQ = IRQ_get_next(opp, &dst->servicing); in openpic_cpu_write_internal()
1084 pr_debug("%s: EOI with no interrupt in service\n", in openpic_cpu_write_internal()
1089 IRQ_resetbit(&dst->servicing, s_IRQ); in openpic_cpu_write_internal()
1093 s_IRQ = IRQ_get_next(opp, &dst->servicing); in openpic_cpu_write_internal()
1095 n_IRQ = IRQ_get_next(opp, &dst->raised); in openpic_cpu_write_internal()
1096 src = &opp->src[n_IRQ]; in openpic_cpu_write_internal()
1097 if (n_IRQ != -1 && in openpic_cpu_write_internal()
1098 (s_IRQ == -1 || in openpic_cpu_write_internal()
1099 IVPR_PRIORITY(src->ivpr) > dst->servicing.priority)) { in openpic_cpu_write_internal()
1105 spin_unlock(&opp->lock); in openpic_cpu_write_internal()
1106 kvm_notify_acked_irq(opp->kvm, 0, notify_eoi); in openpic_cpu_write_internal()
1107 spin_lock(&opp->lock); in openpic_cpu_write_internal()
1129 struct irq_source *src; in openpic_iack() local
1135 irq = IRQ_get_next(opp, &dst->raised); in openpic_iack()
1138 if (irq == -1) in openpic_iack()
1139 /* No more interrupt pending */ in openpic_iack()
1140 return opp->spve; in openpic_iack()
1142 src = &opp->src[irq]; in openpic_iack()
1143 if (!(src->ivpr & IVPR_ACTIVITY_MASK) || in openpic_iack()
1144 !(IVPR_PRIORITY(src->ivpr) > dst->ctpr)) { in openpic_iack()
1146 __func__, irq, dst->ctpr, src->ivpr); in openpic_iack()
1148 retval = opp->spve; in openpic_iack()
1151 IRQ_setbit(&dst->servicing, irq); in openpic_iack()
1152 retval = IVPR_VECTOR(opp, src->ivpr); in openpic_iack()
1155 if (!src->level) { in openpic_iack()
1156 /* edge-sensitive IRQ */ in openpic_iack()
1157 src->ivpr &= ~IVPR_ACTIVITY_MASK; in openpic_iack()
1158 src->pending = 0; in openpic_iack()
1159 IRQ_resetbit(&dst->raised, irq); in openpic_iack()
1162 if ((irq >= opp->irq_ipi0) && (irq < (opp->irq_ipi0 + MAX_IPI))) { in openpic_iack()
1163 src->destmask &= ~(1 << cpu); in openpic_iack()
1164 if (src->destmask && !src->level) { in openpic_iack()
1169 src->ivpr |= IVPR_ACTIVITY_MASK; in openpic_iack()
1178 struct openpic *opp = vcpu->arch.mpic; in kvmppc_mpic_set_epr()
1179 int cpu = vcpu->arch.irq_cpu_id; in kvmppc_mpic_set_epr()
1182 spin_lock_irqsave(&opp->lock, flags); in kvmppc_mpic_set_epr()
1184 if ((opp->gcr & opp->mpic_mode_mask) == GCR_MODE_PROXY) in kvmppc_mpic_set_epr()
1185 kvmppc_set_epr(vcpu, openpic_iack(opp, &opp->dst[cpu], cpu)); in kvmppc_mpic_set_epr()
1187 spin_unlock_irqrestore(&opp->lock, flags); in kvmppc_mpic_set_epr()
1206 dst = &opp->dst[idx]; in openpic_cpu_read_internal()
1210 retval = dst->ctpr; in openpic_cpu_read_internal()
1290 if (opp->num_mmio_regions >= MAX_MMIO_REGIONS) { in add_mmio_region()
1295 opp->mmio_regions[opp->num_mmio_regions++] = mr; in add_mmio_region()
1306 opp->vid = VID_REVISION_1_2; in fsl_common_init()
1307 opp->vir = VIR_GENERIC; in fsl_common_init()
1308 opp->vector_mask = 0xFFFF; in fsl_common_init()
1309 opp->tfrr_reset = 0; in fsl_common_init()
1310 opp->ivpr_reset = IVPR_MASK_MASK; in fsl_common_init()
1311 opp->idr_reset = 1 << 0; in fsl_common_init()
1312 opp->max_irq = MAX_IRQ; in fsl_common_init()
1314 opp->irq_ipi0 = virq; in fsl_common_init()
1316 opp->irq_tim0 = virq; in fsl_common_init()
1321 opp->irq_msi = 224; in fsl_common_init()
1323 for (i = 0; i < opp->fsl->max_ext; i++) in fsl_common_init()
1324 opp->src[i].level = false; in fsl_common_init()
1328 opp->src[i].type = IRQ_TYPE_FSLINT; in fsl_common_init()
1329 opp->src[i].level = true; in fsl_common_init()
1334 opp->src[i].type = IRQ_TYPE_FSLSPECIAL; in fsl_common_init()
1335 opp->src[i].level = false; in fsl_common_init()
1343 for (i = 0; i < opp->num_mmio_regions; i++) { in kvm_mpic_read_internal()
1344 const struct mem_reg *mr = opp->mmio_regions[i]; in kvm_mpic_read_internal()
1346 if (mr->start_addr > addr || addr >= mr->start_addr + mr->size) in kvm_mpic_read_internal()
1349 return mr->read(opp, addr - mr->start_addr, ptr); in kvm_mpic_read_internal()
1352 return -ENXIO; in kvm_mpic_read_internal()
1359 for (i = 0; i < opp->num_mmio_regions; i++) { in kvm_mpic_write_internal()
1360 const struct mem_reg *mr = opp->mmio_regions[i]; in kvm_mpic_write_internal()
1362 if (mr->start_addr > addr || addr >= mr->start_addr + mr->size) in kvm_mpic_write_internal()
1365 return mr->write(opp, addr - mr->start_addr, val); in kvm_mpic_write_internal()
1368 return -ENXIO; in kvm_mpic_write_internal()
1382 if (addr & (len - 1)) { in kvm_mpic_read()
1385 return -EINVAL; in kvm_mpic_read()
1388 spin_lock_irq(&opp->lock); in kvm_mpic_read()
1389 ret = kvm_mpic_read_internal(opp, addr - opp->reg_base, &u.val); in kvm_mpic_read()
1390 spin_unlock_irq(&opp->lock); in kvm_mpic_read()
1393 * Technically only 32-bit accesses are allowed, but be nice to in kvm_mpic_read()
1394 * people dumping registers a byte at a time -- it works in real in kvm_mpic_read()
1407 return -EINVAL; in kvm_mpic_read()
1422 return -EOPNOTSUPP; in kvm_mpic_write()
1426 return -EOPNOTSUPP; in kvm_mpic_write()
1429 spin_lock_irq(&opp->lock); in kvm_mpic_write()
1430 ret = kvm_mpic_write_internal(opp, addr - opp->reg_base, in kvm_mpic_write()
1432 spin_unlock_irq(&opp->lock); in kvm_mpic_write()
1447 kvm_iodevice_init(&opp->mmio, &mpic_mmio_ops); in map_mmio()
1449 kvm_io_bus_register_dev(opp->kvm, KVM_MMIO_BUS, in map_mmio()
1450 opp->reg_base, OPENPIC_REG_SIZE, in map_mmio()
1451 &opp->mmio); in map_mmio()
1456 kvm_io_bus_unregister_dev(opp->kvm, KVM_MMIO_BUS, &opp->mmio); in unmap_mmio()
1463 if (copy_from_user(&base, (u64 __user *)(long)attr->addr, sizeof(u64))) in set_base_addr()
1464 return -EFAULT; in set_base_addr()
1469 return -EINVAL; in set_base_addr()
1472 if (base == opp->reg_base) in set_base_addr()
1475 mutex_lock(&opp->kvm->slots_lock); in set_base_addr()
1478 opp->reg_base = base; in set_base_addr()
1489 mutex_unlock(&opp->kvm->slots_lock); in set_base_addr()
1501 return -ENXIO; in access_reg()
1503 spin_lock_irq(&opp->lock); in access_reg()
1510 spin_unlock_irq(&opp->lock); in access_reg()
1519 struct openpic *opp = dev->private; in mpic_set_attr()
1522 switch (attr->group) { in mpic_set_attr()
1524 switch (attr->attr) { in mpic_set_attr()
1532 if (get_user(attr32, (u32 __user *)(long)attr->addr)) in mpic_set_attr()
1533 return -EFAULT; in mpic_set_attr()
1535 return access_reg(opp, attr->attr, &attr32, ATTR_SET); in mpic_set_attr()
1538 if (attr->attr > MAX_SRC) in mpic_set_attr()
1539 return -EINVAL; in mpic_set_attr()
1541 if (get_user(attr32, (u32 __user *)(long)attr->addr)) in mpic_set_attr()
1542 return -EFAULT; in mpic_set_attr()
1545 return -EINVAL; in mpic_set_attr()
1547 spin_lock_irq(&opp->lock); in mpic_set_attr()
1548 openpic_set_irq(opp, attr->attr, attr32); in mpic_set_attr()
1549 spin_unlock_irq(&opp->lock); in mpic_set_attr()
1553 return -ENXIO; in mpic_set_attr()
1558 struct openpic *opp = dev->private; in mpic_get_attr()
1563 switch (attr->group) { in mpic_get_attr()
1565 switch (attr->attr) { in mpic_get_attr()
1567 mutex_lock(&opp->kvm->slots_lock); in mpic_get_attr()
1568 attr64 = opp->reg_base; in mpic_get_attr()
1569 mutex_unlock(&opp->kvm->slots_lock); in mpic_get_attr()
1571 if (copy_to_user((u64 __user *)(long)attr->addr, in mpic_get_attr()
1573 return -EFAULT; in mpic_get_attr()
1581 ret = access_reg(opp, attr->attr, &attr32, ATTR_GET); in mpic_get_attr()
1585 if (put_user(attr32, (u32 __user *)(long)attr->addr)) in mpic_get_attr()
1586 return -EFAULT; in mpic_get_attr()
1591 if (attr->attr > MAX_SRC) in mpic_get_attr()
1592 return -EINVAL; in mpic_get_attr()
1594 spin_lock_irq(&opp->lock); in mpic_get_attr()
1595 attr32 = opp->src[attr->attr].pending; in mpic_get_attr()
1596 spin_unlock_irq(&opp->lock); in mpic_get_attr()
1598 if (put_user(attr32, (u32 __user *)(long)attr->addr)) in mpic_get_attr()
1599 return -EFAULT; in mpic_get_attr()
1604 return -ENXIO; in mpic_get_attr()
1609 switch (attr->group) { in mpic_has_attr()
1611 switch (attr->attr) { in mpic_has_attr()
1622 if (attr->attr > MAX_SRC) in mpic_has_attr()
1628 return -ENXIO; in mpic_has_attr()
1633 struct openpic *opp = dev->private; in mpic_destroy()
1635 dev->kvm->arch.mpic = NULL; in mpic_destroy()
1647 return -ENOMEM; in mpic_set_default_irq_routing()
1649 kvm_set_irq_routing(opp->kvm, routing, 0, 0); in mpic_set_default_irq_routing()
1661 if (dev->kvm->arch.mpic) in mpic_create()
1662 return -EINVAL; in mpic_create()
1666 return -ENOMEM; in mpic_create()
1668 dev->private = opp; in mpic_create()
1669 opp->kvm = dev->kvm; in mpic_create()
1670 opp->dev = dev; in mpic_create()
1671 opp->model = type; in mpic_create()
1672 spin_lock_init(&opp->lock); in mpic_create()
1679 switch (opp->model) { in mpic_create()
1681 opp->fsl = &fsl_mpic_20; in mpic_create()
1682 opp->brr1 = 0x00400200; in mpic_create()
1683 opp->flags |= OPENPIC_FLAG_IDR_CRIT; in mpic_create()
1684 opp->nb_irqs = 80; in mpic_create()
1685 opp->mpic_mode_mask = GCR_MODE_MIXED; in mpic_create()
1692 opp->fsl = &fsl_mpic_42; in mpic_create()
1693 opp->brr1 = 0x00400402; in mpic_create()
1694 opp->flags |= OPENPIC_FLAG_ILR; in mpic_create()
1695 opp->nb_irqs = 196; in mpic_create()
1696 opp->mpic_mode_mask = GCR_MODE_PROXY; in mpic_create()
1703 ret = -ENODEV; in mpic_create()
1714 dev->kvm->arch.mpic = opp; in mpic_create()
1724 .name = "kvm-mpic",
1735 struct openpic *opp = dev->private; in kvmppc_mpic_connect_vcpu()
1738 if (dev->ops != &kvm_mpic_ops) in kvmppc_mpic_connect_vcpu()
1739 return -EPERM; in kvmppc_mpic_connect_vcpu()
1740 if (opp->kvm != vcpu->kvm) in kvmppc_mpic_connect_vcpu()
1741 return -EPERM; in kvmppc_mpic_connect_vcpu()
1743 return -EPERM; in kvmppc_mpic_connect_vcpu()
1745 spin_lock_irq(&opp->lock); in kvmppc_mpic_connect_vcpu()
1747 if (opp->dst[cpu].vcpu) { in kvmppc_mpic_connect_vcpu()
1748 ret = -EEXIST; in kvmppc_mpic_connect_vcpu()
1751 if (vcpu->arch.irq_type) { in kvmppc_mpic_connect_vcpu()
1752 ret = -EBUSY; in kvmppc_mpic_connect_vcpu()
1756 opp->dst[cpu].vcpu = vcpu; in kvmppc_mpic_connect_vcpu()
1757 opp->nb_cpus = max(opp->nb_cpus, cpu + 1); in kvmppc_mpic_connect_vcpu()
1759 vcpu->arch.mpic = opp; in kvmppc_mpic_connect_vcpu()
1760 vcpu->arch.irq_cpu_id = cpu; in kvmppc_mpic_connect_vcpu()
1761 vcpu->arch.irq_type = KVMPPC_IRQ_MPIC; in kvmppc_mpic_connect_vcpu()
1764 if (opp->mpic_mode_mask == GCR_MODE_PROXY) in kvmppc_mpic_connect_vcpu()
1765 vcpu->arch.epr_flags |= KVMPPC_EPR_KERNEL; in kvmppc_mpic_connect_vcpu()
1768 spin_unlock_irq(&opp->lock); in kvmppc_mpic_connect_vcpu()
1779 BUG_ON(!opp->dst[vcpu->arch.irq_cpu_id].vcpu); in kvmppc_mpic_disconnect_vcpu()
1781 opp->dst[vcpu->arch.irq_cpu_id].vcpu = NULL; in kvmppc_mpic_disconnect_vcpu()
1786 * < 0 Interrupt was ignored (masked or not delivered for other reasons)
1787 * = 0 Interrupt was coalesced (previous irq is still pending)
1788 * > 0 Number of CPUs interrupt was delivered to
1794 u32 irq = e->irqchip.pin; in mpic_set_irq()
1795 struct openpic *opp = kvm->arch.mpic; in mpic_set_irq()
1798 spin_lock_irqsave(&opp->lock, flags); in mpic_set_irq()
1800 spin_unlock_irqrestore(&opp->lock, flags); in mpic_set_irq()
1809 struct openpic *opp = kvm->arch.mpic; in kvm_set_msi()
1812 spin_lock_irqsave(&opp->lock, flags); in kvm_set_msi()
1818 openpic_msi_write(kvm->arch.mpic, MSIIR_OFFSET, e->msi.data); in kvm_set_msi()
1819 spin_unlock_irqrestore(&opp->lock, flags); in kvm_set_msi()
1829 int r = -EINVAL; in kvm_set_routing_entry()
1831 switch (ue->type) { in kvm_set_routing_entry()
1833 e->set = mpic_set_irq; in kvm_set_routing_entry()
1834 e->irqchip.irqchip = ue->u.irqchip.irqchip; in kvm_set_routing_entry()
1835 e->irqchip.pin = ue->u.irqchip.pin; in kvm_set_routing_entry()
1836 if (e->irqchip.pin >= KVM_IRQCHIP_NUM_PINS) in kvm_set_routing_entry()
1840 e->set = kvm_set_msi; in kvm_set_routing_entry()
1841 e->msi.address_lo = ue->u.msi.address_lo; in kvm_set_routing_entry()
1842 e->msi.address_hi = ue->u.msi.address_hi; in kvm_set_routing_entry()
1843 e->msi.data = ue->u.msi.data; in kvm_set_routing_entry()