Lines Matching full:imsic
12 #include <linux/irqchip/riscv-imsic.h>
33 struct imsic { struct
44 * 1) Hardware: IMSIC VS-file (vsfile_cpu >= 0)
45 * 2) Software: IMSIC SW-file (vsfile_cpu < 0)
48 /* IMSIC VS-file */
55 /* IMSIC SW-file */
426 /* We can only read clear if we have a IMSIC VS-file */ in imsic_vsfile_read()
496 /* We can only access register if we have a IMSIC VS-file */ in imsic_vsfile_rw()
500 /* Check IMSIC register iselect */ in imsic_vsfile_rw()
524 /* We can only zero-out if we have a IMSIC VS-file */ in imsic_vsfile_local_clear()
556 /* We can only update if we have a HW IMSIC context */ in imsic_vsfile_local_update()
588 static void imsic_vsfile_cleanup(struct imsic *imsic) in imsic_vsfile_cleanup() argument
599 write_lock_irqsave(&imsic->vsfile_lock, flags); in imsic_vsfile_cleanup()
600 old_vsfile_hgei = imsic->vsfile_hgei; in imsic_vsfile_cleanup()
601 old_vsfile_cpu = imsic->vsfile_cpu; in imsic_vsfile_cleanup()
602 imsic->vsfile_cpu = imsic->vsfile_hgei = -1; in imsic_vsfile_cleanup()
603 imsic->vsfile_va = NULL; in imsic_vsfile_cleanup()
604 imsic->vsfile_pa = 0; in imsic_vsfile_cleanup()
605 write_unlock_irqrestore(&imsic->vsfile_lock, flags); in imsic_vsfile_cleanup()
607 memset(imsic->swfile, 0, sizeof(*imsic->swfile)); in imsic_vsfile_cleanup()
615 struct imsic *imsic = vcpu->arch.aia_context.imsic_state; in imsic_swfile_extirq_update() local
616 struct imsic_mrif *mrif = imsic->swfile; in imsic_swfile_extirq_update()
625 raw_spin_lock_irqsave(&imsic->swfile_extirq_lock, flags); in imsic_swfile_extirq_update()
628 imsic_mrif_topei(mrif, imsic->nr_eix, imsic->nr_msis)) in imsic_swfile_extirq_update()
633 raw_spin_unlock_irqrestore(&imsic->swfile_extirq_lock, flags); in imsic_swfile_extirq_update()
639 struct imsic *imsic = vcpu->arch.aia_context.imsic_state; in imsic_swfile_read() local
648 memcpy(mrif, imsic->swfile, sizeof(*mrif)); in imsic_swfile_read()
650 memset(imsic->swfile, 0, sizeof(*imsic->swfile)); in imsic_swfile_read()
660 struct imsic *imsic = vcpu->arch.aia_context.imsic_state; in imsic_swfile_update() local
661 struct imsic_mrif *smrif = imsic->swfile; in imsic_swfile_update()
665 for (i = 0; i < imsic->nr_eix; i++) { in imsic_swfile_update()
684 struct imsic *imsic = vcpu->arch.aia_context.imsic_state; in kvm_riscv_vcpu_aia_imsic_release() local
686 /* Read and clear IMSIC VS-file details */ in kvm_riscv_vcpu_aia_imsic_release()
687 write_lock_irqsave(&imsic->vsfile_lock, flags); in kvm_riscv_vcpu_aia_imsic_release()
688 old_vsfile_hgei = imsic->vsfile_hgei; in kvm_riscv_vcpu_aia_imsic_release()
689 old_vsfile_cpu = imsic->vsfile_cpu; in kvm_riscv_vcpu_aia_imsic_release()
690 imsic->vsfile_cpu = imsic->vsfile_hgei = -1; in kvm_riscv_vcpu_aia_imsic_release()
691 imsic->vsfile_va = NULL; in kvm_riscv_vcpu_aia_imsic_release()
692 imsic->vsfile_pa = 0; in kvm_riscv_vcpu_aia_imsic_release()
693 write_unlock_irqrestore(&imsic->vsfile_lock, flags); in kvm_riscv_vcpu_aia_imsic_release()
695 /* Do nothing, if no IMSIC VS-file to release */ in kvm_riscv_vcpu_aia_imsic_release()
701 * the old IMSIC VS-file so we first re-direct all interrupt in kvm_riscv_vcpu_aia_imsic_release()
714 * to somewhere else so we move register state from the old IMSIC in kvm_riscv_vcpu_aia_imsic_release()
715 * VS-file to the IMSIC SW-file. in kvm_riscv_vcpu_aia_imsic_release()
718 /* Read and clear register state from old IMSIC VS-file */ in kvm_riscv_vcpu_aia_imsic_release()
720 imsic_vsfile_read(old_vsfile_hgei, old_vsfile_cpu, imsic->nr_hw_eix, in kvm_riscv_vcpu_aia_imsic_release()
723 /* Update register state in IMSIC SW-file */ in kvm_riscv_vcpu_aia_imsic_release()
726 /* Free-up old IMSIC VS-file */ in kvm_riscv_vcpu_aia_imsic_release()
739 struct imsic *imsic = vaia->imsic_state; in kvm_riscv_vcpu_aia_imsic_update() local
746 /* Read old IMSIC VS-file details */ in kvm_riscv_vcpu_aia_imsic_update()
747 read_lock_irqsave(&imsic->vsfile_lock, flags); in kvm_riscv_vcpu_aia_imsic_update()
748 old_vsfile_hgei = imsic->vsfile_hgei; in kvm_riscv_vcpu_aia_imsic_update()
749 old_vsfile_cpu = imsic->vsfile_cpu; in kvm_riscv_vcpu_aia_imsic_update()
750 read_unlock_irqrestore(&imsic->vsfile_lock, flags); in kvm_riscv_vcpu_aia_imsic_update()
756 /* Allocate new IMSIC VS-file */ in kvm_riscv_vcpu_aia_imsic_update()
769 /* Release old IMSIC VS-file */ in kvm_riscv_vcpu_aia_imsic_update()
780 * to the old IMSIC VS-file so we first move all interrupt in kvm_riscv_vcpu_aia_imsic_update()
781 * producers to the new IMSIC VS-file. in kvm_riscv_vcpu_aia_imsic_update()
784 /* Zero-out new IMSIC VS-file */ in kvm_riscv_vcpu_aia_imsic_update()
785 imsic_vsfile_local_clear(new_vsfile_hgei, imsic->nr_hw_eix); in kvm_riscv_vcpu_aia_imsic_update()
787 /* Update G-stage mapping for the new IMSIC VS-file */ in kvm_riscv_vcpu_aia_imsic_update()
796 /* Update new IMSIC VS-file details in IMSIC context */ in kvm_riscv_vcpu_aia_imsic_update()
797 write_lock_irqsave(&imsic->vsfile_lock, flags); in kvm_riscv_vcpu_aia_imsic_update()
798 imsic->vsfile_hgei = new_vsfile_hgei; in kvm_riscv_vcpu_aia_imsic_update()
799 imsic->vsfile_cpu = vcpu->cpu; in kvm_riscv_vcpu_aia_imsic_update()
800 imsic->vsfile_va = new_vsfile_va; in kvm_riscv_vcpu_aia_imsic_update()
801 imsic->vsfile_pa = new_vsfile_pa; in kvm_riscv_vcpu_aia_imsic_update()
802 write_unlock_irqrestore(&imsic->vsfile_lock, flags); in kvm_riscv_vcpu_aia_imsic_update()
806 * to the new IMSIC VS-file so we move register state from in kvm_riscv_vcpu_aia_imsic_update()
807 * the old IMSIC VS/SW-file to the new IMSIC VS-file. in kvm_riscv_vcpu_aia_imsic_update()
812 /* Read and clear register state from old IMSIC VS-file */ in kvm_riscv_vcpu_aia_imsic_update()
814 imsic->nr_hw_eix, true, &tmrif); in kvm_riscv_vcpu_aia_imsic_update()
816 /* Free-up old IMSIC VS-file */ in kvm_riscv_vcpu_aia_imsic_update()
819 /* Read and clear register state from IMSIC SW-file */ in kvm_riscv_vcpu_aia_imsic_update()
823 /* Restore register state in the new IMSIC VS-file */ in kvm_riscv_vcpu_aia_imsic_update()
824 imsic_vsfile_local_update(new_vsfile_hgei, imsic->nr_hw_eix, &tmrif); in kvm_riscv_vcpu_aia_imsic_update()
827 /* Set VCPU HSTATUS.VGEIN to new IMSIC VS-file */ in kvm_riscv_vcpu_aia_imsic_update()
848 struct imsic *imsic = vcpu->arch.aia_context.imsic_state; in kvm_riscv_vcpu_aia_imsic_rmw() local
852 topei = imsic_mrif_topei(imsic->swfile, imsic->nr_eix, in kvm_riscv_vcpu_aia_imsic_rmw()
853 imsic->nr_msis); in kvm_riscv_vcpu_aia_imsic_rmw()
861 eix = &imsic->swfile->eix[topei / in kvm_riscv_vcpu_aia_imsic_rmw()
868 r = imsic_mrif_rmw(imsic->swfile, imsic->nr_eix, isel, in kvm_riscv_vcpu_aia_imsic_rmw()
870 /* Forward unknown IMSIC register to user-space */ in kvm_riscv_vcpu_aia_imsic_rmw()
886 struct imsic *imsic; in kvm_riscv_aia_imsic_rw_attr() local
899 imsic = vcpu->arch.aia_context.imsic_state; in kvm_riscv_aia_imsic_rw_attr()
901 read_lock_irqsave(&imsic->vsfile_lock, flags); in kvm_riscv_aia_imsic_rw_attr()
904 vsfile_hgei = imsic->vsfile_hgei; in kvm_riscv_aia_imsic_rw_attr()
905 vsfile_cpu = imsic->vsfile_cpu; in kvm_riscv_aia_imsic_rw_attr()
908 rc = imsic_mrif_rmw(imsic->swfile, imsic->nr_eix, in kvm_riscv_aia_imsic_rw_attr()
912 rc = imsic_mrif_rmw(imsic->swfile, imsic->nr_eix, in kvm_riscv_aia_imsic_rw_attr()
916 read_unlock_irqrestore(&imsic->vsfile_lock, flags); in kvm_riscv_aia_imsic_rw_attr()
919 rc = imsic_vsfile_rw(vsfile_hgei, vsfile_cpu, imsic->nr_eix, in kvm_riscv_aia_imsic_rw_attr()
928 struct imsic *imsic; in kvm_riscv_aia_imsic_has_attr() local
940 imsic = vcpu->arch.aia_context.imsic_state; in kvm_riscv_aia_imsic_has_attr()
941 return imsic_mrif_isel_check(imsic->nr_eix, isel); in kvm_riscv_aia_imsic_has_attr()
946 struct imsic *imsic = vcpu->arch.aia_context.imsic_state; in kvm_riscv_vcpu_aia_imsic_reset() local
948 if (!imsic) in kvm_riscv_vcpu_aia_imsic_reset()
953 memset(imsic->swfile, 0, sizeof(*imsic->swfile)); in kvm_riscv_vcpu_aia_imsic_reset()
961 struct imsic *imsic = vcpu->arch.aia_context.imsic_state; in kvm_riscv_vcpu_aia_imsic_inject() local
963 /* We only emulate one IMSIC MMIO page for each Guest VCPU */ in kvm_riscv_vcpu_aia_imsic_inject()
964 if (!imsic || !iid || guest_index || in kvm_riscv_vcpu_aia_imsic_inject()
970 if (imsic->nr_msis <= iid) in kvm_riscv_vcpu_aia_imsic_inject()
973 read_lock_irqsave(&imsic->vsfile_lock, flags); in kvm_riscv_vcpu_aia_imsic_inject()
975 if (imsic->vsfile_cpu >= 0) { in kvm_riscv_vcpu_aia_imsic_inject()
976 writel(iid, imsic->vsfile_va + IMSIC_MMIO_SETIPNUM_LE); in kvm_riscv_vcpu_aia_imsic_inject()
979 eix = &imsic->swfile->eix[iid / BITS_PER_TYPE(u64)]; in kvm_riscv_vcpu_aia_imsic_inject()
984 read_unlock_irqrestore(&imsic->vsfile_lock, flags); in kvm_riscv_vcpu_aia_imsic_inject()
1024 struct imsic *imsic; in kvm_riscv_vcpu_aia_imsic_init() local
1032 /* Allocate IMSIC context */ in kvm_riscv_vcpu_aia_imsic_init()
1033 imsic = kzalloc(sizeof(*imsic), GFP_KERNEL); in kvm_riscv_vcpu_aia_imsic_init()
1034 if (!imsic) in kvm_riscv_vcpu_aia_imsic_init()
1036 vcpu->arch.aia_context.imsic_state = imsic; in kvm_riscv_vcpu_aia_imsic_init()
1038 /* Setup IMSIC context */ in kvm_riscv_vcpu_aia_imsic_init()
1039 imsic->nr_msis = kvm->arch.aia.nr_ids + 1; in kvm_riscv_vcpu_aia_imsic_init()
1040 rwlock_init(&imsic->vsfile_lock); in kvm_riscv_vcpu_aia_imsic_init()
1041 imsic->nr_eix = BITS_TO_U64(imsic->nr_msis); in kvm_riscv_vcpu_aia_imsic_init()
1042 imsic->nr_hw_eix = BITS_TO_U64(kvm_riscv_aia_max_ids); in kvm_riscv_vcpu_aia_imsic_init()
1043 imsic->vsfile_hgei = imsic->vsfile_cpu = -1; in kvm_riscv_vcpu_aia_imsic_init()
1045 /* Setup IMSIC SW-file */ in kvm_riscv_vcpu_aia_imsic_init()
1047 get_order(sizeof(*imsic->swfile))); in kvm_riscv_vcpu_aia_imsic_init()
1052 imsic->swfile = page_to_virt(swfile_page); in kvm_riscv_vcpu_aia_imsic_init()
1053 imsic->swfile_pa = page_to_phys(swfile_page); in kvm_riscv_vcpu_aia_imsic_init()
1054 raw_spin_lock_init(&imsic->swfile_extirq_lock); in kvm_riscv_vcpu_aia_imsic_init()
1057 kvm_iodevice_init(&imsic->iodev, &imsic_iodoev_ops); in kvm_riscv_vcpu_aia_imsic_init()
1062 &imsic->iodev); in kvm_riscv_vcpu_aia_imsic_init()
1070 free_pages((unsigned long)imsic->swfile, in kvm_riscv_vcpu_aia_imsic_init()
1071 get_order(sizeof(*imsic->swfile))); in kvm_riscv_vcpu_aia_imsic_init()
1074 kfree(imsic); in kvm_riscv_vcpu_aia_imsic_init()
1081 struct imsic *imsic = vcpu->arch.aia_context.imsic_state; in kvm_riscv_vcpu_aia_imsic_cleanup() local
1083 if (!imsic) in kvm_riscv_vcpu_aia_imsic_cleanup()
1086 imsic_vsfile_cleanup(imsic); in kvm_riscv_vcpu_aia_imsic_cleanup()
1089 kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &imsic->iodev); in kvm_riscv_vcpu_aia_imsic_cleanup()
1092 free_pages((unsigned long)imsic->swfile, in kvm_riscv_vcpu_aia_imsic_cleanup()
1093 get_order(sizeof(*imsic->swfile))); in kvm_riscv_vcpu_aia_imsic_cleanup()
1096 kfree(imsic); in kvm_riscv_vcpu_aia_imsic_cleanup()