Lines Matching full:vpes

175 	struct its_vpe		**vpes;  member
1772 * (a) Either we have a GICv4.1, and all vPEs have to be mapped at all times
1776 * and we're better off mapping all VPEs always
1778 * If neither (a) nor (b) is true, then we map vPEs on demand.
1797 * If the VM wasn't mapped yet, iterate over the vpes and get in its_map_vm()
1806 struct its_vpe *vpe = vm->vpes[i]; in its_map_vm()
1828 guard(raw_spinlock)(&vm->vpes[i]->vpe_lock); in its_unmap_vm()
1829 its_send_vmapp(its, vm->vpes[i], false); in its_unmap_vm()
1863 /* Ensure all the VPEs are mapped on this ITS */ in its_vlpi_map()
3717 vpe_proxy.vpes[vpe->vpe_proxy_event] = NULL; in its_vpe_db_proxy_unmap_locked()
3724 * effect... Let's just hope VPEs don't migrate too often. in its_vpe_db_proxy_unmap_locked()
3726 if (vpe_proxy.vpes[vpe_proxy.next_victim]) in its_vpe_db_proxy_unmap_locked()
3758 if (vpe_proxy.vpes[vpe_proxy.next_victim]) in its_vpe_db_proxy_map_locked()
3759 its_vpe_db_proxy_unmap_locked(vpe_proxy.vpes[vpe_proxy.next_victim]); in its_vpe_db_proxy_map_locked()
3762 vpe_proxy.vpes[vpe_proxy.next_victim] = vpe; in its_vpe_db_proxy_map_locked()
3818 * If we lazily map the VPEs, this isn't an error and in its_vpe_set_affinity()
4558 vm->vpes[i]->vpe_db_lpi = base + i; in its_vpe_irq_domain_alloc()
4559 err = its_vpe_init(vm->vpes[i]); in its_vpe_irq_domain_alloc()
4563 vm->vpes[i]->vpe_db_lpi); in its_vpe_irq_domain_alloc()
4567 irqchip, vm->vpes[i]); in its_vpe_irq_domain_alloc()
5035 vpe_proxy.vpes = kcalloc(entries, sizeof(*vpe_proxy.vpes), in its_init_vpe_domain()
5037 if (!vpe_proxy.vpes) in its_init_vpe_domain()
5044 kfree(vpe_proxy.vpes); in its_init_vpe_domain()