/linux-6.12.1/tools/testing/selftests/kvm/x86_64/ |
D | state_test.c | 39 GUEST_ASSERT(svm->vmcb_gpa); in svm_l1_guest_code() 46 GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_VMMCALL); in svm_l1_guest_code() 50 GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_VMMCALL); in svm_l1_guest_code() 62 GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffee); in vmx_l2_guest_code() 64 GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffee); in vmx_l2_guest_code() 65 GUEST_ASSERT(!vmwrite(GUEST_RIP, 0xc0fffee)); in vmx_l2_guest_code() 67 GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0fffee); in vmx_l2_guest_code() 68 GUEST_ASSERT(!vmwrite(GUEST_RIP, 0xc0ffffee)); in vmx_l2_guest_code() 79 GUEST_ASSERT(vmx_pages->vmcs_gpa); in vmx_l1_guest_code() 80 GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages)); in vmx_l1_guest_code() [all …]
|
D | hyperv_evmcs.c | 95 GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages)); in guest_code() 97 GUEST_ASSERT(load_evmcs(hv_pages)); in guest_code() 98 GUEST_ASSERT(vmptrstz() == hv_pages->enlightened_vmcs_gpa); in guest_code() 101 GUEST_ASSERT(vmptrstz() == hv_pages->enlightened_vmcs_gpa); in guest_code() 107 GUEST_ASSERT(vmptrstz() == hv_pages->enlightened_vmcs_gpa); in guest_code() 109 GUEST_ASSERT(vmlaunch()); in guest_code() 124 GUEST_ASSERT(!vmlaunch()); in guest_code() 127 GUEST_ASSERT(vmptrstz() == hv_pages->enlightened_vmcs_gpa); in guest_code() 134 GUEST_ASSERT(!vmresume()); in guest_code() 138 GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL); in guest_code() [all …]
|
D | amx_test.c | 89 GUEST_ASSERT(this_cpu_has_p(X86_PROPERTY_XSTATE_MAX_SIZE_XCR0)); in check_xtile_info() 90 GUEST_ASSERT(this_cpu_property(X86_PROPERTY_XSTATE_MAX_SIZE_XCR0) <= XSAVE_SIZE); in check_xtile_info() 93 GUEST_ASSERT(xtile.xsave_offset == 2816); in check_xtile_info() 95 GUEST_ASSERT(xtile.xsave_size == 8192); in check_xtile_info() 96 GUEST_ASSERT(sizeof(struct tile_data) >= xtile.xsave_size); in check_xtile_info() 98 GUEST_ASSERT(this_cpu_has_p(X86_PROPERTY_AMX_MAX_PALETTE_TABLES)); in check_xtile_info() 99 GUEST_ASSERT(this_cpu_property(X86_PROPERTY_AMX_MAX_PALETTE_TABLES) >= in check_xtile_info() 102 GUEST_ASSERT(this_cpu_has_p(X86_PROPERTY_AMX_NR_TILE_REGS)); in check_xtile_info() 104 GUEST_ASSERT(xtile.max_names == 8); in check_xtile_info() 106 GUEST_ASSERT(xtile.bytes_per_tile == 1024); in check_xtile_info() [all …]
|
D | hyperv_ipi.c | 105 GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_1] == ++ipis_expected[0]); in sender_guest_code() 106 GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_2] == ipis_expected[1]); in sender_guest_code() 112 GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_1] == ++ipis_expected[0]); in sender_guest_code() 113 GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_2] == ipis_expected[1]); in sender_guest_code() 125 GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_1] == ++ipis_expected[0]); in sender_guest_code() 126 GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_2] == ipis_expected[1]); in sender_guest_code() 134 GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_1] == ++ipis_expected[0]); in sender_guest_code() 135 GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_2] == ipis_expected[1]); in sender_guest_code() 147 GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_1] == ipis_expected[0]); in sender_guest_code() 148 GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_2] == ++ipis_expected[1]); in sender_guest_code() [all …]
|
D | hyperv_clock.c | 57 GUEST_ASSERT(tsc_freq > 0); in check_tsc_msr_rdtsc() 68 GUEST_ASSERT(r2 > r1 && t2 > t1); in check_tsc_msr_rdtsc() 76 GUEST_ASSERT(delta_ns * 100 < (t2 - t1) * 100); in check_tsc_msr_rdtsc() 93 GUEST_ASSERT(r1 >= t1 && r1 - t1 < 100000); in check_tsc_msr_tsc_page() 98 GUEST_ASSERT(r2 >= t1 && r2 - t2 < 100000); in check_tsc_msr_tsc_page() 116 GUEST_ASSERT(tsc_page->tsc_sequence == 0); in guest_main() 117 GUEST_ASSERT(tsc_page->tsc_scale == 0); in guest_main() 118 GUEST_ASSERT(tsc_page->tsc_offset == 0); in guest_main() 124 GUEST_ASSERT(tsc_page->tsc_sequence != 0); in guest_main() 137 GUEST_ASSERT(get_tscpage_ts(tsc_page) < 100000); in guest_main() [all …]
|
D | userspace_msr_exit_test.c | 220 GUEST_ASSERT(data == 0); in guest_code_filter_allow() 221 GUEST_ASSERT(guest_exception_count == 0); in guest_code_filter_allow() 224 GUEST_ASSERT(guest_exception_count == 0); in guest_code_filter_allow() 227 GUEST_ASSERT(guest_exception_count == 1); in guest_code_filter_allow() 236 GUEST_ASSERT(guest_exception_count == 1); in guest_code_filter_allow() 239 GUEST_ASSERT(guest_exception_count == 1); in guest_code_filter_allow() 242 GUEST_ASSERT(guest_exception_count == 0); in guest_code_filter_allow() 251 GUEST_ASSERT(guest_exception_count == 0); in guest_code_filter_allow() 254 GUEST_ASSERT(data == 2); in guest_code_filter_allow() 255 GUEST_ASSERT(guest_exception_count == 0); in guest_code_filter_allow() [all …]
|
D | vmx_preemption_timer_test.c | 75 GUEST_ASSERT(vmx_pages->vmcs_gpa); in l1_guest_code() 76 GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages)); in l1_guest_code() 77 GUEST_ASSERT(load_vmcs(vmx_pages)); in l1_guest_code() 78 GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa); in l1_guest_code() 96 GUEST_ASSERT(!vmlaunch()); in l1_guest_code() 97 GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL); in l1_guest_code() 103 GUEST_ASSERT(!vmwrite(PIN_BASED_VM_EXEC_CONTROL, in l1_guest_code() 107 GUEST_ASSERT(!vmwrite(VMX_PREEMPTION_TIMER_VALUE, in l1_guest_code() 116 GUEST_ASSERT(!vmresume()); in l1_guest_code() 124 GUEST_ASSERT(l2_save_restore_done); in l1_guest_code() [all …]
|
D | vmx_tsc_adjust_test.c | 61 GUEST_ASSERT(adjust <= max); in check_ia32_tsc_adjust() 82 GUEST_ASSERT(rdtsc() < TSC_ADJUST_VALUE); in l1_guest_code() 86 GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages)); in l1_guest_code() 87 GUEST_ASSERT(load_vmcs(vmx_pages)); in l1_guest_code() 100 GUEST_ASSERT(!vmlaunch()); in l1_guest_code() 101 GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == in l1_guest_code() 106 GUEST_ASSERT(!vmlaunch()); in l1_guest_code() 107 GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL); in l1_guest_code()
|
D | hyperv_svm_test.c | 82 GUEST_ASSERT(svm->vmcb_gpa); in guest_code() 97 GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_VMMCALL); in guest_code() 105 GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_MSR); in guest_code() 111 GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_MSR); in guest_code() 120 GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_VMMCALL); in guest_code() 126 GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_MSR); in guest_code() 135 GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_MSR); in guest_code() 140 GUEST_ASSERT(vmcb->control.exit_code == HV_SVM_EXITCODE_ENL); in guest_code() 141 GUEST_ASSERT(vmcb->control.exit_info_1 == HV_SVM_ENL_EXITCODE_TRAP_AFTER_FLUSH); in guest_code() 144 GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_VMMCALL); in guest_code()
|
D | vmx_apic_access_test.c | 43 GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages)); in l1_guest_code() 44 GUEST_ASSERT(load_vmcs(vmx_pages)); in l1_guest_code() 59 GUEST_ASSERT(!vmlaunch()); in l1_guest_code() 60 GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL); in l1_guest_code() 66 GUEST_ASSERT(!vmresume()); in l1_guest_code() 67 GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL); in l1_guest_code()
|
D | triple_fault_event_test.c | 30 GUEST_ASSERT(vmx->vmcs_gpa); in l1_guest_code_vmx() 31 GUEST_ASSERT(prepare_for_vmx_operation(vmx)); in l1_guest_code_vmx() 32 GUEST_ASSERT(load_vmcs(vmx)); in l1_guest_code_vmx() 37 GUEST_ASSERT(!vmlaunch()); in l1_guest_code_vmx() 39 GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_TRIPLE_FAULT); in l1_guest_code_vmx() 56 GUEST_ASSERT(0); in l1_guest_code_svm()
|
D | vmx_invalid_nested_guest_state.c | 31 GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages)); in l1_guest_code() 32 GUEST_ASSERT(load_vmcs(vmx_pages)); in l1_guest_code() 43 GUEST_ASSERT(!(vmreadz(CPU_BASED_VM_EXEC_CONTROL) & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) || in l1_guest_code() 46 GUEST_ASSERT(!vmlaunch()); in l1_guest_code() 49 GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_TRIPLE_FAULT); in l1_guest_code()
|
D | vmx_close_while_nested_test.c | 37 GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages)); in l1_guest_code() 38 GUEST_ASSERT(load_vmcs(vmx_pages)); in l1_guest_code() 44 GUEST_ASSERT(!vmlaunch()); in l1_guest_code() 45 GUEST_ASSERT(0); in l1_guest_code()
|
D | fix_hypercall_test.c | 59 GUEST_ASSERT(0); in guest_main() 75 GUEST_ASSERT(ret == (uint64_t)-EFAULT); in guest_main() 76 GUEST_ASSERT(!memcmp(other_hypercall_insn, hypercall_insn, in guest_main() 79 GUEST_ASSERT(!ret); in guest_main() 80 GUEST_ASSERT(!memcmp(native_hypercall_insn, hypercall_insn, in guest_main()
|
D | vmx_dirty_log_test.c | 62 GUEST_ASSERT(vmx->vmcs_gpa); in l1_guest_code() 63 GUEST_ASSERT(prepare_for_vmx_operation(vmx)); in l1_guest_code() 64 GUEST_ASSERT(load_vmcs(vmx)); in l1_guest_code() 74 GUEST_ASSERT(!vmlaunch()); in l1_guest_code() 76 GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL); in l1_guest_code()
|
D | svm_int_ctl_test.c | 50 GUEST_ASSERT(vintr_irq_called); in l2_guest_code() 51 GUEST_ASSERT(intr_irq_called); in l2_guest_code() 81 GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_VMMCALL); in l1_guest_code()
|
D | vmx_nested_tsc_scaling_test.c | 90 GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages)); in l1_guest_code() 91 GUEST_ASSERT(load_vmcs(vmx_pages)); in l1_guest_code() 110 GUEST_ASSERT(!vmlaunch()); in l1_guest_code() 111 GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL); in l1_guest_code()
|
D | sev_smoke_test.c | 22 GUEST_ASSERT(rdmsr(MSR_AMD64_SEV) & MSR_AMD64_SEV_ENABLED); in guest_sev_es_code() 23 GUEST_ASSERT(rdmsr(MSR_AMD64_SEV) & MSR_AMD64_SEV_ES_ENABLED); in guest_sev_es_code() 35 GUEST_ASSERT(this_cpu_has(X86_FEATURE_SEV)); in guest_sev_code() 36 GUEST_ASSERT(rdmsr(MSR_AMD64_SEV) & MSR_AMD64_SEV_ENABLED); in guest_sev_code()
|
/linux-6.12.1/tools/testing/selftests/kvm/lib/aarch64/ |
D | gic.c | 40 GUEST_ASSERT(gic_ops); in gic_dist_init() 55 GUEST_ASSERT(type < GIC_TYPE_MAX); in gic_init() 56 GUEST_ASSERT(nr_cpus); in gic_init() 64 GUEST_ASSERT(gic_common_ops); in gic_irq_enable() 70 GUEST_ASSERT(gic_common_ops); in gic_irq_disable() 79 GUEST_ASSERT(gic_common_ops); in gic_get_and_ack_irq() 89 GUEST_ASSERT(gic_common_ops); in gic_set_eoi() 95 GUEST_ASSERT(gic_common_ops); in gic_set_dir() 101 GUEST_ASSERT(gic_common_ops); in gic_set_eoi_split() 107 GUEST_ASSERT(gic_common_ops); in gic_set_priority_mask() [all …]
|
D | gic_v3.c | 48 GUEST_ASSERT(count--); in gicv3_gicd_wait_for_rwp() 64 GUEST_ASSERT(count--); in gicv3_gicr_wait_for_rwp() 89 GUEST_ASSERT(0); in get_intid_range() 177 GUEST_ASSERT(bits_per_field <= reg_bits); in gicv3_access_reg() 178 GUEST_ASSERT(!write || *val < (1U << bits_per_field)); in gicv3_access_reg() 183 GUEST_ASSERT(reg_bits == 32); in gicv3_access_reg() 228 GUEST_ASSERT(get_intid_range(intid) == SPI_RANGE); in gicv3_irq_set_config() 291 GUEST_ASSERT(count--); in gicv3_enable_redist() 302 GUEST_ASSERT(cpu < gicv3_data.nr_cpus); in gicv3_cpu_init() 369 GUEST_ASSERT(nr_cpus <= GICV3_MAX_CPUS); in gicv3_init()
|
/linux-6.12.1/tools/testing/selftests/kvm/lib/x86_64/ |
D | memstress.c | 38 GUEST_ASSERT(vmx->vmcs_gpa); in memstress_l1_guest_code() 39 GUEST_ASSERT(prepare_for_vmx_operation(vmx)); in memstress_l1_guest_code() 40 GUEST_ASSERT(load_vmcs(vmx)); in memstress_l1_guest_code() 41 GUEST_ASSERT(ept_1g_pages_supported()); in memstress_l1_guest_code() 47 GUEST_ASSERT(!vmlaunch()); in memstress_l1_guest_code() 48 GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL); in memstress_l1_guest_code()
|
/linux-6.12.1/tools/testing/selftests/kvm/ |
D | steal_time.c | 37 GUEST_ASSERT(!(READ_ONCE(st->version) & 1)); in check_status() 59 GUEST_ASSERT(version < READ_ONCE(st->version)); in guest_code() 233 GUEST_ASSERT(ret.value == 0 && ret.error == 0); in sta_set_shmem() 238 GUEST_ASSERT(!(READ_ONCE(st->sequence) & 1)); in check_status() 239 GUEST_ASSERT(READ_ONCE(st->flags) == 0); in check_status() 240 GUEST_ASSERT(READ_ONCE(st->preempted) == 0); in check_status() 251 GUEST_ASSERT(probe && out_val == 1); in guest_code() 263 GUEST_ASSERT(sequence < READ_ONCE(st->sequence)); in guest_code()
|
/linux-6.12.1/tools/testing/selftests/kvm/aarch64/ |
D | psci_test.c | 120 GUEST_ASSERT(!psci_cpu_on(target_cpu, CPU_ON_ENTRY_ADDR, CPU_ON_CONTEXT_ID)); in guest_test_cpu_on() 125 GUEST_ASSERT((target_state == PSCI_0_2_AFFINITY_LEVEL_ON) || in guest_test_cpu_on() 162 GUEST_ASSERT(!psci_features(PSCI_1_0_FN_SYSTEM_SUSPEND)); in guest_test_system_suspend() 163 GUEST_ASSERT(!psci_features(PSCI_1_0_FN64_SYSTEM_SUSPEND)); in guest_test_system_suspend()
|
D | vgic_irq.c | 170 GUEST_ASSERT(gic_irq_get_active(intid)); in guest_irq_generic_handler() 173 GUEST_ASSERT(!gic_irq_get_pending(intid)); in guest_irq_generic_handler() 178 GUEST_ASSERT(intid < MAX_SPI); in guest_irq_generic_handler() 187 GUEST_ASSERT(!gic_irq_get_active(intid)); in guest_irq_generic_handler() 188 GUEST_ASSERT(!gic_irq_get_pending(intid)); in guest_irq_generic_handler() 208 GUEST_ASSERT(_intid == 0 || _intid == IAR_SPURIOUS); \ 305 GUEST_ASSERT(prio >= 0); in guest_restore_active() 333 GUEST_ASSERT(!gic_irq_get_active(i + first_intid)); in guest_restore_active() 372 GUEST_ASSERT(prio >= 0); in test_inject_preemption() 401 GUEST_ASSERT(!gic_irq_get_active(i + first_intid)); in test_inject_preemption()
|
/linux-6.12.1/tools/testing/selftests/kvm/riscv/ |
D | sbi_pmu_test.c | 151 GUEST_ASSERT(overflown_mask & 0x01); in guest_irq_handler() 169 GUEST_ASSERT(ret.value < RISCV_MAX_PMU_COUNTERS); in get_counter_index() 170 GUEST_ASSERT(BIT(ret.value) & counter_mask_available); in get_counter_index() 205 GUEST_ASSERT(counter_mask_available > 0); in update_counter_info() 213 GUEST_ASSERT(ret.error == 0); in read_fw_counter() 237 GUEST_ASSERT(probe && out_val == 1); in verify_sbi_requirement_assert() 254 GUEST_ASSERT(ret.value == 0 && ret.error == 0); in snapshot_set_shmem() 422 GUEST_ASSERT(probe && out_val == 1); in test_pmu_basic_sanity() 443 GUEST_ASSERT(illegal_handler_invoked); in test_pmu_basic_sanity()
|