Lines Matching +full:fiq +full:- +full:index
1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
9 #include <linux/entry-kvm.h>
85 * This functions as an allow-list of protected VM capabilities.
112 int r = -EINVAL; in kvm_vm_ioctl_enable_cap()
114 if (cap->flags) in kvm_vm_ioctl_enable_cap()
115 return -EINVAL; in kvm_vm_ioctl_enable_cap()
117 if (kvm_vm_is_protected(kvm) && !pkvm_ext_allowed(kvm, cap->cap)) in kvm_vm_ioctl_enable_cap()
118 return -EINVAL; in kvm_vm_ioctl_enable_cap()
120 switch (cap->cap) { in kvm_vm_ioctl_enable_cap()
124 &kvm->arch.flags); in kvm_vm_ioctl_enable_cap()
127 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
128 if (system_supports_mte() && !kvm->created_vcpus) { in kvm_vm_ioctl_enable_cap()
130 set_bit(KVM_ARCH_FLAG_MTE_ENABLED, &kvm->arch.flags); in kvm_vm_ioctl_enable_cap()
132 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
136 set_bit(KVM_ARCH_FLAG_SYSTEM_SUSPEND_ENABLED, &kvm->arch.flags); in kvm_vm_ioctl_enable_cap()
139 mutex_lock(&kvm->slots_lock); in kvm_vm_ioctl_enable_cap()
145 u64 new_cap = cap->args[0]; in kvm_vm_ioctl_enable_cap()
149 kvm->arch.mmu.split_page_chunk_size = new_cap; in kvm_vm_ioctl_enable_cap()
152 mutex_unlock(&kvm->slots_lock); in kvm_vm_ioctl_enable_cap()
167 * kvm_arch_init_vm - initializes a VM data structure
175 mutex_init(&kvm->arch.config_lock); in kvm_arch_init_vm()
178 /* Clue in lockdep that the config_lock must be taken inside kvm->lock */ in kvm_arch_init_vm()
179 mutex_lock(&kvm->lock); in kvm_arch_init_vm()
180 mutex_lock(&kvm->arch.config_lock); in kvm_arch_init_vm()
181 mutex_unlock(&kvm->arch.config_lock); in kvm_arch_init_vm()
182 mutex_unlock(&kvm->lock); in kvm_arch_init_vm()
195 if (!zalloc_cpumask_var(&kvm->arch.supported_cpus, GFP_KERNEL_ACCOUNT)) { in kvm_arch_init_vm()
196 ret = -ENOMEM; in kvm_arch_init_vm()
199 cpumask_copy(kvm->arch.supported_cpus, cpu_possible_mask); in kvm_arch_init_vm()
201 ret = kvm_init_stage2_mmu(kvm, &kvm->arch.mmu, type); in kvm_arch_init_vm()
210 kvm->max_vcpus = kvm_arm_default_max_vcpus(); in kvm_arch_init_vm()
214 bitmap_zero(kvm->arch.vcpu_features, KVM_VCPU_MAX_FEATURES); in kvm_arch_init_vm()
219 free_cpumask_var(kvm->arch.supported_cpus); in kvm_arch_init_vm()
240 mutex_lock(&kvm->arch.config_lock); in kvm_destroy_mpidr_data()
242 data = rcu_dereference_protected(kvm->arch.mpidr_data, in kvm_destroy_mpidr_data()
243 lockdep_is_held(&kvm->arch.config_lock)); in kvm_destroy_mpidr_data()
245 rcu_assign_pointer(kvm->arch.mpidr_data, NULL); in kvm_destroy_mpidr_data()
250 mutex_unlock(&kvm->arch.config_lock); in kvm_destroy_mpidr_data()
254 * kvm_arch_destroy_vm - destroy the VM data structure
259 bitmap_free(kvm->arch.pmu_filter); in kvm_arch_destroy_vm()
260 free_cpumask_var(kvm->arch.supported_cpus); in kvm_arch_destroy_vm()
269 kfree(kvm->arch.sysreg_masks); in kvm_arch_destroy_vm()
285 * - both Address and Generic auth are implemented for a given in kvm_has_full_ptr_auth()
287 * - only a single algorithm is implemented. in kvm_has_full_ptr_auth()
362 r = kvm->max_vcpus; in kvm_vm_ioctl_check_extension()
368 r = -EINVAL; in kvm_vm_ioctl_check_extension()
370 r = kvm->arch.vgic.msis_require_devid; in kvm_vm_ioctl_check_extension()
412 r = kvm->arch.mmu.split_page_chunk_size; in kvm_vm_ioctl_check_extension()
432 return -EINVAL; in kvm_arch_dev_ioctl()
448 return -EBUSY; in kvm_arch_vcpu_precreate()
450 if (id >= kvm->max_vcpus) in kvm_arch_vcpu_precreate()
451 return -EINVAL; in kvm_arch_vcpu_precreate()
460 spin_lock_init(&vcpu->arch.mp_state_lock); in kvm_arch_vcpu_create()
463 /* Inform lockdep that the config_lock is acquired after vcpu->mutex */ in kvm_arch_vcpu_create()
464 mutex_lock(&vcpu->mutex); in kvm_arch_vcpu_create()
465 mutex_lock(&vcpu->kvm->arch.config_lock); in kvm_arch_vcpu_create()
466 mutex_unlock(&vcpu->kvm->arch.config_lock); in kvm_arch_vcpu_create()
467 mutex_unlock(&vcpu->mutex); in kvm_arch_vcpu_create()
473 vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO; in kvm_arch_vcpu_create()
482 kvm_arm_pvtime_vcpu_init(&vcpu->arch); in kvm_arch_vcpu_create()
484 vcpu->arch.hw_mmu = &vcpu->kvm->arch.mmu; in kvm_arch_vcpu_create()
488 * Throw out the pre-computed mappings if that is the case which forces in kvm_arch_vcpu_create()
491 kvm_destroy_mpidr_data(vcpu->kvm); in kvm_arch_vcpu_create()
506 if (vcpu_has_run_once(vcpu) && unlikely(!irqchip_in_kernel(vcpu->kvm))) in kvm_arch_vcpu_destroy()
509 kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache); in kvm_arch_vcpu_destroy()
538 vcpu->arch.hcr_el2 &= ~(HCR_API | HCR_APK); in vcpu_set_pauth_traps()
539 vcpu->arch.hcr_el2 |= val; in vcpu_set_pauth_traps()
541 vcpu->arch.hcr_el2 |= (HCR_API | HCR_APK); in vcpu_set_pauth_traps()
549 if (vcpu->arch.hcr_el2 & (HCR_API | HCR_APK)) { in vcpu_set_pauth_traps()
564 (atomic_read(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vlpi_count) || in kvm_vcpu_should_clear_twi()
565 vcpu->kvm->arch.vgic.nassgireq); in kvm_vcpu_should_clear_twi()
584 mmu = vcpu->arch.hw_mmu; in kvm_arch_vcpu_load()
585 last_ran = this_cpu_ptr(mmu->last_vcpu_ran); in kvm_arch_vcpu_load()
588 * We guarantee that both TLBs and I-cache are private to each in kvm_arch_vcpu_load()
594 * over-invalidation doesn't affect correctness. in kvm_arch_vcpu_load()
596 if (*last_ran != vcpu->vcpu_idx) { in kvm_arch_vcpu_load()
598 *last_ran = vcpu->vcpu_idx; in kvm_arch_vcpu_load()
601 vcpu->cpu = cpu; in kvm_arch_vcpu_load()
609 if (kvm_arm_is_pvtime_enabled(&vcpu->arch)) in kvm_arch_vcpu_load()
613 vcpu->arch.hcr_el2 &= ~HCR_TWE; in kvm_arch_vcpu_load()
615 vcpu->arch.hcr_el2 |= HCR_TWE; in kvm_arch_vcpu_load()
618 vcpu->arch.hcr_el2 &= ~HCR_TWI; in kvm_arch_vcpu_load()
620 vcpu->arch.hcr_el2 |= HCR_TWI; in kvm_arch_vcpu_load()
626 if (!cpumask_test_cpu(cpu, vcpu->kvm->arch.supported_cpus)) in kvm_arch_vcpu_load()
644 vcpu->cpu = -1; in kvm_arch_vcpu_put()
649 WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_STOPPED); in __kvm_arm_vcpu_power_off()
656 spin_lock(&vcpu->arch.mp_state_lock); in kvm_arm_vcpu_power_off()
658 spin_unlock(&vcpu->arch.mp_state_lock); in kvm_arm_vcpu_power_off()
663 return READ_ONCE(vcpu->arch.mp_state.mp_state) == KVM_MP_STATE_STOPPED; in kvm_arm_vcpu_stopped()
668 WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_SUSPENDED); in kvm_arm_vcpu_suspend()
675 return READ_ONCE(vcpu->arch.mp_state.mp_state) == KVM_MP_STATE_SUSPENDED; in kvm_arm_vcpu_suspended()
681 *mp_state = READ_ONCE(vcpu->arch.mp_state); in kvm_arch_vcpu_ioctl_get_mpstate()
691 spin_lock(&vcpu->arch.mp_state_lock); in kvm_arch_vcpu_ioctl_set_mpstate()
693 switch (mp_state->mp_state) { in kvm_arch_vcpu_ioctl_set_mpstate()
695 WRITE_ONCE(vcpu->arch.mp_state, *mp_state); in kvm_arch_vcpu_ioctl_set_mpstate()
704 ret = -EINVAL; in kvm_arch_vcpu_ioctl_set_mpstate()
707 spin_unlock(&vcpu->arch.mp_state_lock); in kvm_arch_vcpu_ioctl_set_mpstate()
713 * kvm_arch_vcpu_runnable - determine if the vcpu can be scheduled
723 && !kvm_arm_vcpu_stopped(v) && !v->arch.pause); in kvm_arch_vcpu_runnable()
745 mutex_lock(&kvm->arch.config_lock); in kvm_init_mpidr_data()
747 if (rcu_access_pointer(kvm->arch.mpidr_data) || in kvm_init_mpidr_data()
748 atomic_read(&kvm->online_vcpus) == 1) in kvm_init_mpidr_data()
776 data->mpidr_mask = mask; in kvm_init_mpidr_data()
780 u16 index = kvm_mpidr_index(data, aff); in kvm_init_mpidr_data() local
782 data->cmpidr_to_idx[index] = c; in kvm_init_mpidr_data()
785 rcu_assign_pointer(kvm->arch.mpidr_data, data); in kvm_init_mpidr_data()
787 mutex_unlock(&kvm->arch.config_lock); in kvm_init_mpidr_data()
797 struct kvm *kvm = vcpu->kvm; in kvm_arch_vcpu_run_pid_change()
801 return -ENOEXEC; in kvm_arch_vcpu_run_pid_change()
804 return -EPERM; in kvm_arch_vcpu_run_pid_change()
867 mutex_lock(&kvm->arch.config_lock); in kvm_arch_vcpu_run_pid_change()
868 set_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &kvm->arch.flags); in kvm_arch_vcpu_run_pid_change()
869 mutex_unlock(&kvm->arch.config_lock); in kvm_arch_vcpu_run_pid_change()
885 vcpu->arch.pause = true; in kvm_arm_halt_guest()
895 vcpu->arch.pause = false; in kvm_arm_resume_guest()
905 (!kvm_arm_vcpu_stopped(vcpu)) && (!vcpu->arch.pause), in kvm_vcpu_sleep()
908 if (kvm_arm_vcpu_stopped(vcpu) || vcpu->arch.pause) { in kvm_vcpu_sleep()
922 * kvm_vcpu_wfi - emulate Wait-For-Interrupt behavior
934 * kvm_arch_vcpu_runnable has up-to-date data to decide whether in kvm_vcpu_wfi()
974 memset(&vcpu->run->system_event, 0, sizeof(vcpu->run->system_event)); in kvm_vcpu_suspend()
975 vcpu->run->system_event.type = KVM_SYSTEM_EVENT_WAKEUP; in kvm_vcpu_suspend()
976 vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT; in kvm_vcpu_suspend()
989 * check_vcpu_requests - check and handle pending vCPU requests
1001 return -EIO; in check_vcpu_requests()
1056 * kvm_vcpu_exit_request - returns true if the VCPU should *not* enter the guest
1067 * for pending work and re-enter), return true without writing to ret.
1071 struct kvm_run *run = vcpu->run; in kvm_vcpu_exit_request()
1083 *ret = -EINTR; in kvm_vcpu_exit_request()
1084 run->exit_reason = KVM_EXIT_INTR; in kvm_vcpu_exit_request()
1090 run->exit_reason = KVM_EXIT_FAIL_ENTRY; in kvm_vcpu_exit_request()
1091 run->fail_entry.hardware_entry_failure_reason = KVM_EXIT_FAIL_ENTRY_CPU_UNSUPPORTED; in kvm_vcpu_exit_request()
1092 run->fail_entry.cpu = smp_processor_id(); in kvm_vcpu_exit_request()
1120 * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code
1131 struct kvm_run *run = vcpu->run; in kvm_arch_vcpu_ioctl_run()
1134 if (run->exit_reason == KVM_EXIT_MMIO) { in kvm_arch_vcpu_ioctl_run()
1142 if (!vcpu->wants_to_run) { in kvm_arch_vcpu_ioctl_run()
1143 ret = -EINTR; in kvm_arch_vcpu_ioctl_run()
1150 run->exit_reason = KVM_EXIT_UNKNOWN; in kvm_arch_vcpu_ioctl_run()
1151 run->flags = 0; in kvm_arch_vcpu_ioctl_run()
1166 * non-preemptible context. in kvm_arch_vcpu_ioctl_run()
1173 * preserved on VMID roll-over if the task was preempted, in kvm_arch_vcpu_ioctl_run()
1175 * kvm_arm_vmid_update() in non-premptible context. in kvm_arch_vcpu_ioctl_run()
1177 if (kvm_arm_vmid_update(&vcpu->arch.hw_mmu->vmid) && in kvm_arch_vcpu_ioctl_run()
1179 __load_stage2(vcpu->arch.hw_mmu, in kvm_arch_vcpu_ioctl_run()
1180 vcpu->arch.hw_mmu->arch); in kvm_arch_vcpu_ioctl_run()
1194 * Documentation/virt/kvm/vcpu-requests.rst in kvm_arch_vcpu_ioctl_run()
1196 smp_store_mb(vcpu->mode, IN_GUEST_MODE); in kvm_arch_vcpu_ioctl_run()
1199 vcpu->mode = OUTSIDE_GUEST_MODE; in kvm_arch_vcpu_ioctl_run()
1221 vcpu->mode = OUTSIDE_GUEST_MODE; in kvm_arch_vcpu_ioctl_run()
1222 vcpu->stat.exits++; in kvm_arch_vcpu_ioctl_run()
1290 * As we have caught the guest red-handed, decide that in kvm_arch_vcpu_ioctl_run()
1302 /* Tell userspace about in-kernel device output levels */ in kvm_arch_vcpu_ioctl_run()
1303 if (unlikely(!irqchip_in_kernel(vcpu->kvm))) { in kvm_arch_vcpu_ioctl_run()
1316 * being preempt-safe on VHE. in kvm_arch_vcpu_ioctl_run()
1351 * trigger a world-switch round on the running physical CPU to set the in vcpu_interrupt_line()
1352 * virtual IRQ/FIQ fields in the HCR appropriately. in vcpu_interrupt_line()
1363 u32 irq = irq_level->irq; in kvm_vm_ioctl_irq_line()
1366 bool level = irq_level->level; in kvm_vm_ioctl_irq_line()
1373 trace_kvm_irq_line(irq_type, vcpu_id, irq_num, irq_level->level); in kvm_vm_ioctl_irq_line()
1378 return -ENXIO; in kvm_vm_ioctl_irq_line()
1382 return -EINVAL; in kvm_vm_ioctl_irq_line()
1385 return -EINVAL; in kvm_vm_ioctl_irq_line()
1390 return -ENXIO; in kvm_vm_ioctl_irq_line()
1394 return -EINVAL; in kvm_vm_ioctl_irq_line()
1397 return -EINVAL; in kvm_vm_ioctl_irq_line()
1402 return -ENXIO; in kvm_vm_ioctl_irq_line()
1405 return -EINVAL; in kvm_vm_ioctl_irq_line()
1410 return -EINVAL; in kvm_vm_ioctl_irq_line()
1440 unsigned long features = init->features[0]; in kvm_vcpu_init_check_features()
1444 return -ENOENT; in kvm_vcpu_init_check_features()
1446 for (i = 1; i < ARRAY_SIZE(init->features); i++) { in kvm_vcpu_init_check_features()
1447 if (init->features[i]) in kvm_vcpu_init_check_features()
1448 return -ENOENT; in kvm_vcpu_init_check_features()
1452 return -EINVAL; in kvm_vcpu_init_check_features()
1460 return -EINVAL; in kvm_vcpu_init_check_features()
1466 if (kvm_has_mte(vcpu->kvm)) in kvm_vcpu_init_check_features()
1467 return -EINVAL; in kvm_vcpu_init_check_features()
1471 return -EINVAL; in kvm_vcpu_init_check_features()
1479 unsigned long features = init->features[0]; in kvm_vcpu_init_changed()
1481 return !bitmap_equal(vcpu->kvm->arch.vcpu_features, &features, in kvm_vcpu_init_changed()
1487 struct kvm *kvm = vcpu->kvm; in kvm_setup_vcpu()
1494 if (kvm_vcpu_has_pmu(vcpu) && !kvm->arch.arm_pmu) in kvm_setup_vcpu()
1507 unsigned long features = init->features[0]; in __kvm_vcpu_set_target()
1508 struct kvm *kvm = vcpu->kvm; in __kvm_vcpu_set_target()
1509 int ret = -EINVAL; in __kvm_vcpu_set_target()
1511 mutex_lock(&kvm->arch.config_lock); in __kvm_vcpu_set_target()
1513 if (test_bit(KVM_ARCH_FLAG_VCPU_FEATURES_CONFIGURED, &kvm->arch.flags) && in __kvm_vcpu_set_target()
1517 bitmap_copy(kvm->arch.vcpu_features, &features, KVM_VCPU_MAX_FEATURES); in __kvm_vcpu_set_target()
1526 set_bit(KVM_ARCH_FLAG_VCPU_FEATURES_CONFIGURED, &kvm->arch.flags); in __kvm_vcpu_set_target()
1530 mutex_unlock(&kvm->arch.config_lock); in __kvm_vcpu_set_target()
1539 if (init->target != KVM_ARM_TARGET_GENERIC_V8 && in kvm_vcpu_set_target()
1540 init->target != kvm_target_cpu()) in kvm_vcpu_set_target()
1541 return -EINVAL; in kvm_vcpu_set_target()
1551 return -EINVAL; in kvm_vcpu_set_target()
1564 * Treat the power-off vCPU feature as ephemeral. Clear the bit to avoid in kvm_arch_vcpu_ioctl_vcpu_init()
1568 if (init->features[0] & BIT(KVM_ARM_VCPU_POWER_OFF)) { in kvm_arch_vcpu_ioctl_vcpu_init()
1569 init->features[0] &= ~BIT(KVM_ARM_VCPU_POWER_OFF); in kvm_arch_vcpu_ioctl_vcpu_init()
1583 * need to invalidate the I-cache though, as FWB does *not* in kvm_arch_vcpu_ioctl_vcpu_init()
1588 stage2_unmap_vm(vcpu->kvm); in kvm_arch_vcpu_ioctl_vcpu_init()
1594 vcpu->arch.cptr_el2 = kvm_get_reset_cptr_el2(vcpu); in kvm_arch_vcpu_ioctl_vcpu_init()
1597 * Handle the "start in power-off" case. in kvm_arch_vcpu_ioctl_vcpu_init()
1599 spin_lock(&vcpu->arch.mp_state_lock); in kvm_arch_vcpu_ioctl_vcpu_init()
1604 WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_RUNNABLE); in kvm_arch_vcpu_ioctl_vcpu_init()
1606 spin_unlock(&vcpu->arch.mp_state_lock); in kvm_arch_vcpu_ioctl_vcpu_init()
1614 int ret = -ENXIO; in kvm_arm_vcpu_set_attr()
1616 switch (attr->group) { in kvm_arm_vcpu_set_attr()
1628 int ret = -ENXIO; in kvm_arm_vcpu_get_attr()
1630 switch (attr->group) { in kvm_arm_vcpu_get_attr()
1642 int ret = -ENXIO; in kvm_arm_vcpu_has_attr()
1644 switch (attr->group) { in kvm_arm_vcpu_has_attr()
1667 for (i = 0; i < ARRAY_SIZE(events->reserved); i++) in kvm_arm_vcpu_set_events()
1668 if (events->reserved[i]) in kvm_arm_vcpu_set_events()
1669 return -EINVAL; in kvm_arm_vcpu_set_events()
1672 for (i = 0; i < ARRAY_SIZE(events->exception.pad); i++) in kvm_arm_vcpu_set_events()
1673 if (events->exception.pad[i]) in kvm_arm_vcpu_set_events()
1674 return -EINVAL; in kvm_arm_vcpu_set_events()
1682 struct kvm_vcpu *vcpu = filp->private_data; in kvm_arch_vcpu_ioctl()
1691 r = -EFAULT; in kvm_arch_vcpu_ioctl()
1702 r = -ENOEXEC; in kvm_arch_vcpu_ioctl()
1706 r = -EFAULT; in kvm_arch_vcpu_ioctl()
1729 r = -ENOEXEC; in kvm_arch_vcpu_ioctl()
1733 r = -EPERM; in kvm_arch_vcpu_ioctl()
1737 r = -EFAULT; in kvm_arch_vcpu_ioctl()
1744 r = -E2BIG; in kvm_arch_vcpu_ioctl()
1747 r = kvm_arm_copy_reg_indices(vcpu, user_list->reg); in kvm_arch_vcpu_ioctl()
1751 r = -EFAULT; in kvm_arch_vcpu_ioctl()
1758 r = -EFAULT; in kvm_arch_vcpu_ioctl()
1765 r = -EFAULT; in kvm_arch_vcpu_ioctl()
1775 return -EINVAL; in kvm_arch_vcpu_ioctl()
1778 return -EFAULT; in kvm_arch_vcpu_ioctl()
1786 return -EFAULT; in kvm_arch_vcpu_ioctl()
1794 return -ENOEXEC; in kvm_arch_vcpu_ioctl()
1797 return -EFAULT; in kvm_arch_vcpu_ioctl()
1802 r = -EINVAL; in kvm_arch_vcpu_ioctl()
1816 switch (FIELD_GET(KVM_ARM_DEVICE_ID_MASK, dev_addr->id)) { in kvm_vm_ioctl_set_device_addr()
1819 return -ENXIO; in kvm_vm_ioctl_set_device_addr()
1822 return -ENODEV; in kvm_vm_ioctl_set_device_addr()
1828 switch (attr->group) { in kvm_vm_has_attr()
1832 return -ENXIO; in kvm_vm_has_attr()
1838 switch (attr->group) { in kvm_vm_set_attr()
1842 return -ENXIO; in kvm_vm_set_attr()
1848 struct kvm *kvm = filp->private_data; in kvm_arch_vm_ioctl()
1856 return -ENXIO; in kvm_arch_vm_ioctl()
1857 mutex_lock(&kvm->lock); in kvm_arch_vm_ioctl()
1859 mutex_unlock(&kvm->lock); in kvm_arch_vm_ioctl()
1866 return -EFAULT; in kvm_arch_vm_ioctl()
1875 return -EFAULT; in kvm_arch_vm_ioctl()
1883 return -EFAULT; in kvm_arch_vm_ioctl()
1890 return -EFAULT; in kvm_arch_vm_ioctl()
1895 return -EFAULT; in kvm_arch_vm_ioctl()
1901 return -EFAULT; in kvm_arch_vm_ioctl()
1909 return -EFAULT; in kvm_arch_vm_ioctl()
1913 return -EINVAL; in kvm_arch_vm_ioctl()
1922 for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) { in unlock_vcpus()
1924 mutex_unlock(&tmp_vcpu->mutex); in unlock_vcpus()
1930 lockdep_assert_held(&kvm->lock); in unlock_all_vcpus()
1932 unlock_vcpus(kvm, atomic_read(&kvm->online_vcpus) - 1); in unlock_all_vcpus()
1941 lockdep_assert_held(&kvm->lock); in lock_all_vcpus()
1945 * core KVM code tries to grab the vcpu->mutex. in lock_all_vcpus()
1947 * By grabbing the vcpu->mutex of all VCPUs we ensure that no in lock_all_vcpus()
1951 if (!mutex_trylock(&tmp_vcpu->mutex)) { in lock_all_vcpus()
1952 unlock_vcpus(kvm, c - 1); in lock_all_vcpus()
1962 return (unsigned long)CHOOSE_NVHE_SYM(__per_cpu_end) - in nvhe_percpu_size()
2017 * Calculate the raw per-cpu offset without a translation from the in cpu_prepare_hyp_mode()
2019 * so that we can use adr_l to access per-cpu variables in EL2. in cpu_prepare_hyp_mode()
2022 params->tpidr_el2 = (unsigned long)kasan_reset_tag(per_cpu_ptr_nvhe_sym(__per_cpu_start, cpu)) - in cpu_prepare_hyp_mode()
2025 params->mair_el2 = read_sysreg(mair_el1); in cpu_prepare_hyp_mode()
2040 params->tcr_el2 = tcr; in cpu_prepare_hyp_mode()
2042 params->pgd_pa = kvm_mmu_get_httbr(); in cpu_prepare_hyp_mode()
2044 params->hcr_el2 = HCR_HOST_NVHE_PROTECTED_FLAGS; in cpu_prepare_hyp_mode()
2046 params->hcr_el2 = HCR_HOST_NVHE_FLAGS; in cpu_prepare_hyp_mode()
2048 params->hcr_el2 |= HCR_E2H; in cpu_prepare_hyp_mode()
2049 params->vttbr = params->vtcr = 0; in cpu_prepare_hyp_mode()
2083 * Disabling SSBD on a non-VHE system requires us to enable SSBS in cpu_init_hyp_mode()
2102 * - If the CPU is affected by Spectre-v2, the hardening sequence is
2106 * - If the CPU also has the ARM64_SPECTRE_V3A cap, the slot
2110 * - If the CPU only has the ARM64_SPECTRE_V3A cap, then an
2115 * VHE, as we don't have hypervisor-specific mappings. If the system
2121 void *vector = hyp_spectre_vector_selector[data->slot]; in cpu_set_hyp_vector()
2126 kvm_call_hyp_nvhe(__pkvm_cpu_set_vector, data->slot); in cpu_set_hyp_vector()
2208 * PM_ENTER->PM_EXIT. It is used to indicate PM_EXIT should in hyp_init_cpu_pm_notifier()
2209 * re-enable hyp. in hyp_init_cpu_pm_notifier()
2216 * so that the hyp will be re-enabled in hyp_init_cpu_pm_notifier()
2263 * Copy the MPIDR <-> logical CPU ID mapping to hyp. in init_cpu_logical_map()
2309 * Register CPU lower-power notifier in init_subsystems()
2321 case -ENODEV: in init_subsystems()
2322 case -ENXIO: in init_subsystems()
2368 sve_state = per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->sve_state; in teardown_hyp_mode()
2388 * prevent a later re-init attempt in kvm_arch_enable_virtualization_cpu(). in do_pkvm_init()
2401 * Although this is per-CPU, we make it global for simplicity, e.g., not in get_hyp_id_aa64pfr0_el1()
2404 * Unlike for non-protected VMs, userspace cannot override this for in get_hyp_id_aa64pfr0_el1()
2465 return -ENOMEM; in init_pkvm_host_sve_state()
2467 per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->sve_state = page_address(page); in init_pkvm_host_sve_state()
2490 sve_state = per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->sve_state; in finalize_init_hyp_mode()
2491 per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->sve_state = in finalize_init_hyp_mode()
2498 fpsimd_state = &per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->host_ctxt.fp_regs; in finalize_init_hyp_mode()
2499 per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->fpsimd_state = in finalize_init_hyp_mode()
2512 hyp_ctxt->sys_regs[APIAKEYLO_EL1] = get_random_long(); in pkvm_hyp_init_ptrauth()
2513 hyp_ctxt->sys_regs[APIAKEYHI_EL1] = get_random_long(); in pkvm_hyp_init_ptrauth()
2514 hyp_ctxt->sys_regs[APIBKEYLO_EL1] = get_random_long(); in pkvm_hyp_init_ptrauth()
2515 hyp_ctxt->sys_regs[APIBKEYHI_EL1] = get_random_long(); in pkvm_hyp_init_ptrauth()
2516 hyp_ctxt->sys_regs[APDAKEYLO_EL1] = get_random_long(); in pkvm_hyp_init_ptrauth()
2517 hyp_ctxt->sys_regs[APDAKEYHI_EL1] = get_random_long(); in pkvm_hyp_init_ptrauth()
2518 hyp_ctxt->sys_regs[APDBKEYLO_EL1] = get_random_long(); in pkvm_hyp_init_ptrauth()
2519 hyp_ctxt->sys_regs[APDBKEYHI_EL1] = get_random_long(); in pkvm_hyp_init_ptrauth()
2520 hyp_ctxt->sys_regs[APGAKEYLO_EL1] = get_random_long(); in pkvm_hyp_init_ptrauth()
2521 hyp_ctxt->sys_regs[APGAKEYHI_EL1] = get_random_long(); in pkvm_hyp_init_ptrauth()
2525 /* Inits Hyp-mode on all online CPUs */
2530 int err = -ENOMEM; in init_hyp_mode()
2533 * The protected Hyp-mode cannot be initialized if the memory pool in init_hyp_mode()
2547 * Allocate stack pages for Hypervisor-mode in init_hyp_mode()
2554 err = -ENOMEM; in init_hyp_mode()
2562 * Allocate and initialize pages for Hypervisor-mode percpu regions. in init_hyp_mode()
2570 err = -ENOMEM; in init_hyp_mode()
2580 * Map the Hyp-code called directly from the host in init_hyp_mode()
2585 kvm_err("Cannot map world-switch code\n"); in init_hyp_mode()
2629 err = create_hyp_stack(__pa(stack_page), ¶ms->stack_hyp_va); in init_hyp_mode()
2641 params->stack_pa = __pa(stack_page); in init_hyp_mode()
2669 err = -ENODEV; in init_hyp_mode()
2701 data = rcu_dereference(kvm->arch.mpidr_data); in kvm_mpidr_to_vcpu()
2706 vcpu = kvm_get_vcpu(kvm, data->cmpidr_to_idx[idx]); in kvm_mpidr_to_vcpu()
2739 return kvm_vgic_v4_set_forwarding(irqfd->kvm, prod->irq, in kvm_arch_irq_bypass_add_producer()
2740 &irqfd->irq_entry); in kvm_arch_irq_bypass_add_producer()
2748 kvm_vgic_v4_unset_forwarding(irqfd->kvm, prod->irq, in kvm_arch_irq_bypass_del_producer()
2749 &irqfd->irq_entry); in kvm_arch_irq_bypass_del_producer()
2757 kvm_arm_halt_guest(irqfd->kvm); in kvm_arch_irq_bypass_stop()
2765 kvm_arm_resume_guest(irqfd->kvm); in kvm_arch_irq_bypass_start()
2768 /* Initialize Hyp-mode and memory mappings on all CPUs */
2776 return -ENODEV; in kvm_arm_init()
2781 return -ENODEV; in kvm_arm_init()
2865 return -EINVAL; in early_kvm_mode_cfg()
2873 pr_warn_once("KVM is not available. Ignoring kvm-arm.mode\n"); in early_kvm_mode_cfg()
2896 return -EINVAL; in early_kvm_mode_cfg()
2898 early_param("kvm-arm.mode", early_kvm_mode_cfg);
2903 return -EINVAL; in early_kvm_wfx_trap_policy_cfg()
2915 return -EINVAL; in early_kvm_wfx_trap_policy_cfg()
2922 early_param("kvm-arm.wfi_trap_policy", early_kvm_wfi_trap_policy_cfg);
2928 early_param("kvm-arm.wfe_trap_policy", early_kvm_wfe_trap_policy_cfg);