Lines Matching +full:0 +full:x8000000a
133 * emulated by KVM. When setting APIC LVTT (0x832) register bit 18,
135 * intercept the MSR 0x832, and do not setup direct_access_msr.
260 static const u32 msrpm_ranges[] = {0, 0xc0000000, 0xc0010000};
271 for (i = 0; i < NUM_MSR_MAPS; i++) { in svm_msrpm_offset()
347 return 0; in svm_set_efer()
353 u32 ret = 0; in svm_get_interrupt_shadow()
364 if (mask == 0) in svm_set_interrupt_shadow()
384 if (nrips && svm->vmcb->control.next_rip != 0) { in __svm_skip_emulated_instruction()
394 return 0; in __svm_skip_emulated_instruction()
404 svm_set_interrupt_shadow(vcpu, 0); in __svm_skip_emulated_instruction()
456 return 0; in svm_update_soft_interrupt_rip()
472 | (ex->has_error_code ? SVM_EVTINJ_VALID_ERR : 0) in svm_inject_exception()
512 * all osvw.status bits inside that length, including bit 0 (which is in svm_init_osvw()
514 * osvw_len is 0 then osvw_status[0] carries no information. We need to in svm_init_osvw()
518 if (osvw_len == 0 && boot_cpu_data.x86 == 0x10) in svm_init_osvw()
562 return 0; in svm_check_processor_compat()
583 wrmsrl(MSR_VM_HSAVE_PA, 0); in kvm_cpu_svm_disable()
653 uint64_t len, status = 0; in svm_enable_virtualization_cpu()
662 osvw_status = osvw_len = 0; in svm_enable_virtualization_cpu()
670 osvw_status = osvw_len = 0; in svm_enable_virtualization_cpu()
688 return 0; in svm_enable_virtualization_cpu()
700 sd->save_area_pa = 0; in svm_cpu_uninit()
710 memset(sd, 0, sizeof(struct svm_cpu_data)); in svm_cpu_init()
721 return 0; in svm_cpu_init()
757 vmcb->control.intercepts[INTERCEPT_DR] = 0; in clr_dr_intercepts()
766 for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) in direct_access_msr_slot()
819 bit_write = 2 * (msr & 0x0f) + 1; in msr_write_intercepted()
843 read = 0; in set_msr_interception_bitmap()
846 write = 0; in set_msr_interception_bitmap()
849 bit_read = 2 * (msr & 0x0f); in set_msr_interception_bitmap()
850 bit_write = 2 * (msr & 0x0f) + 1; in set_msr_interception_bitmap()
881 memset(msrpm, 0xff, PAGE_SIZE * (1 << order)); in svm_vcpu_alloc_msrpm()
890 for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) { in svm_vcpu_init_msrpm()
907 for (i = 0; i < MAX_DIRECT_ACCESS_MSRS; i++) { in svm_set_x2apic_msr_interception()
911 (index > APIC_BASE_MSR + 0xff)) in svm_set_x2apic_msr_interception()
935 for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) { in svm_msr_filter_changed()
948 for (i = 0; i < MSRPM_OFFSETS; ++i) { in add_msr_offset()
975 memset(msrpm_offsets, 0xff, sizeof(msrpm_offsets)); in init_msrpm_offsets()
977 for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) { in init_msrpm_offsets()
1023 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHFROMIP, 0, 0); in svm_disable_lbrv()
1024 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHTOIP, 0, 0); in svm_disable_lbrv()
1025 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTFROMIP, 0, 0); in svm_disable_lbrv()
1026 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTTOIP, 0, 0); in svm_disable_lbrv()
1129 iopm_base = 0; in svm_hardware_unsetup()
1134 seg->selector = 0; in init_seg()
1137 seg->limit = 0xffff; in init_seg()
1138 seg->base = 0; in init_seg()
1143 seg->selector = 0; in init_sys_seg()
1145 seg->limit = 0xffff; in init_sys_seg()
1146 seg->base = 0; in init_sys_seg()
1218 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SYSENTER_EIP, 0, 0); in init_vmcb_after_set_cpuid()
1219 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SYSENTER_ESP, 0, 0); in init_vmcb_after_set_cpuid()
1314 save->cs.selector = 0xf000; in init_vmcb()
1315 save->cs.base = 0xffff0000; in init_vmcb()
1319 save->cs.limit = 0xffff; in init_vmcb()
1321 save->gdtr.base = 0; in init_vmcb()
1322 save->gdtr.limit = 0xffff; in init_vmcb()
1323 save->idtr.base = 0; in init_vmcb()
1324 save->idtr.limit = 0xffff; in init_vmcb()
1337 save->cr3 = 0; in init_vmcb()
1339 svm->current_vmcb->asid_generation = 0; in init_vmcb()
1340 svm->asid = 0; in init_vmcb()
1393 vcpu->arch.microcode_version = 0x01000065; in __svm_vcpu_reset()
1407 svm->spec_ctrl = 0; in svm_vcpu_reset()
1408 svm->virt_spec_ctrl = 0; in svm_vcpu_reset()
1432 BUILD_BUG_ON(offsetof(struct vcpu_svm, vcpu) != 0); in svm_vcpu_create()
1471 return 0; in svm_vcpu_create()
1538 if (likely(tsc_aux_uret_slot >= 0) && in svm_prepare_switch_to_guest()
1648 * Requesting an interrupt window if save.RFLAGS.IF=0 is pointless as in svm_set_vintr()
1659 control->int_vector = 0x0; in svm_set_vintr()
1662 ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT); in svm_set_vintr()
1737 var->g = s->limit > 0xfffff; in svm_get_segment()
1751 var->type |= 0x2; in svm_get_segment()
1765 var->type |= 0x1; in svm_get_segment()
1775 var->db = 0; in svm_get_segment()
2016 get_debugreg(vcpu->arch.db[0], 0); in svm_sync_dirty_debug_regs()
2080 if (rc > 0 && error_code & PFERR_GUEST_RMP_MASK) in npf_interception()
2113 return 0; in db_interception()
2127 return 0; in bp_interception()
2137 kvm_queue_exception_e(vcpu, AC_VECTOR, 0); in ac_interception()
2156 if (value != 0xb600000000010015ULL) in is_erratum_383()
2160 for (i = 0; i < 6; ++i) in is_erratum_383()
2161 native_write_msr_safe(MSR_IA32_MCx_STATUS(i), 0, 0); in is_erratum_383()
2229 return 0; in shutdown_interception()
2240 string = (io_info & SVM_IOIO_STR_MASK) != 0; in io_interception()
2241 in = (io_info & SVM_IOIO_TYPE_MASK) != 0; in io_interception()
2249 return kvm_emulate_instruction(vcpu, 0); in io_interception()
2286 kvm_inject_gp(vcpu, 0); in vmload_vmsave_interception()
2296 svm->sysenter_eip_hi = 0; in vmload_vmsave_interception()
2297 svm->sysenter_esp_hi = 0; in vmload_vmsave_interception()
2337 if (ctxt->b != 0x1 || ctxt->opcode_len != 2) in svm_instr_opcode()
2341 case 0xd8: /* VMRUN */ in svm_instr_opcode()
2343 case 0xda: /* VMLOAD */ in svm_instr_opcode()
2345 case 0xdb: /* VMSAVE */ in svm_instr_opcode()
2370 /* Returns '1' or -errno on failure, '0' on success. */ in emulate_svm_instr()
2398 if (x86_decode_emulated_instruction(vcpu, 0, NULL, 0) != EMULATION_OK) in gp_interception()
2522 u32 error_code = 0; in task_switch_interception()
2565 return 0; in task_switch_interception()
2606 return kvm_emulate_instruction(vcpu, 0); in invlpg_interception()
2614 return kvm_emulate_instruction(vcpu, 0); in emulate_on_interception()
2656 if (unlikely((svm->vmcb->control.exit_info_1 & CR_VALID) == 0)) in cr_interception()
2665 err = 0; in cr_interception()
2671 case 0: in cr_interception()
2694 case 0: in cr_interception()
2725 int ret = 0; in cr_trap()
2731 case 0: in cr_trap()
2759 int err = 0; in dr_interception()
2768 if (vcpu->guest_debug == 0) { in dr_interception()
2806 return 0; in cr8_write_interception()
2830 *data = 0; in svm_get_feature_msr()
2841 return 0; in svm_get_feature_msr()
2858 msr_info->data = 0; in svm_get_msr()
2859 return vcpu->kvm->arch.has_protected_state ? -EINVAL : 0; in svm_get_msr()
2953 if (family < 0 || model < 0) in svm_get_msr()
2956 msr_info->data = 0; in svm_get_msr()
2958 if (family == 0x15 && in svm_get_msr()
2959 (model >= 0x2 && model < 0x20)) in svm_get_msr()
2960 msr_info->data = 0x1E; in svm_get_msr()
2969 return 0; in svm_get_msr()
3008 return 0; in svm_set_vm_cr()
3014 int ret = 0; in svm_set_msr()
3020 return vcpu->kvm->arch.has_protected_state ? -EINVAL : 0; in svm_set_msr()
3033 * Due to bug in qemu 6.2.0, it would try to set in svm_set_msr()
3034 * this msr to 0 if tsc scaling is not enabled. in svm_set_msr()
3037 if (data != 0 && data != svm->tsc_ratio_msr) in svm_set_msr()
3135 svm->sysenter_eip_hi = guest_cpuid_is_intel_compatible(vcpu) ? (data >> 32) : 0; in svm_set_msr()
3139 svm->sysenter_esp_hi = guest_cpuid_is_intel_compatible(vcpu) ? (data >> 32) : 0; in svm_set_msr()
3259 in_kernel = !sev_es_guest(vcpu->kvm) && svm_get_cpl(vcpu) == 0; in pause_interception()
3380 pr_err("%-20s%04x\n", "cr_read:", control->intercepts[INTERCEPT_CR] & 0xffff); in dump_vmcb()
3382 pr_err("%-20s%04x\n", "dr_read:", control->intercepts[INTERCEPT_DR] & 0xffff); in dump_vmcb()
3496 vcpu_unimpl(vcpu, "svm: unexpected exit reason 0x%llx\n", exit_code); in svm_handle_invalid_exit()
3501 vcpu->run->internal.data[0] = exit_code; in svm_handle_invalid_exit()
3503 return 0; in svm_handle_invalid_exit()
3540 *error_code = 0; in svm_get_exit_info()
3577 return 0; in svm_handle_exit()
3597 svm->current_vmcb->asid_generation = 0; in pre_svm_run()
3812 return 0; in svm_nmi_allowed()
3854 return 0; in svm_interrupt_allowed()
3871 * In case GIF=0 we can't rely on the CPU to tell us when GIF becomes in svm_enable_irq_window()
3912 * if the vCPU is in an STI shadow or if GIF=0, KVM can't immediately in svm_enable_nmi_window()
4148 control->event_inj = 0; in svm_cancel_injection()
4276 vcpu->arch.regs_dirty = 0; in svm_vcpu_run()
4291 svm->next_rip = 0; in svm_vcpu_run()
4300 svm->nested.nested_run_pending = 0; in svm_vcpu_run()
4359 hypercall[0] = 0x0f; in svm_patch_hypercall()
4360 hypercall[1] = 0x01; in svm_patch_hypercall()
4361 hypercall[2] = 0xd9; in svm_patch_hypercall()
4427 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_PRED_CMD, 0, in svm_vcpu_after_set_cpuid()
4431 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_FLUSH_CMD, 0, in svm_vcpu_after_set_cpuid()
4550 cr0 &= 0xfUL; in svm_check_intercept()
4551 val &= 0xfUL; in svm_check_intercept()
4570 vmcb->control.exit_info_1 = 0; in svm_check_intercept()
4586 exit_info = ((info->src_val & 0xffff) << 16) | in svm_check_intercept()
4590 exit_info = (info->dst_val & 0xffff) << 16; in svm_check_intercept()
4638 vcpu->arch.mcg_cap &= 0x1ff; in svm_setup_mce()
4660 return 0; in svm_smi_allowed()
4676 return 0; in svm_enter_smm()
4703 * by 0x400 (matches the offset of 'struct vmcb_save_area' in svm_enter_smm()
4712 BUILD_BUG_ON(offsetof(struct vmcb, save) != 0x400); in svm_enter_smm()
4714 svm_copy_vmrun_state(map_save.hva + 0x400, in svm_enter_smm()
4718 return 0; in svm_enter_smm()
4731 return 0; in svm_leave_smm()
4735 return 0; in svm_leave_smm()
4758 svm_copy_vmrun_state(&svm->vmcb01.ptr->save, map_save.hva + 0x400); in svm_leave_smm()
4874 * be '0'. This happens because microcode reads CS:RIP using a _data_ in svm_check_emulate_instruction()
4875 * loap uop with CPL=0 privileges. If the load hits a SMAP #PF, ucode in svm_check_emulate_instruction()
4880 * triggered emulation (e.g. for MMIO), and the CPU returned 0 in the in svm_check_emulate_instruction()
4890 * 2. CR4.SMEP=0 || CPL=3. If SMEP=1 and CPL<3, the erratum cannot in svm_check_emulate_instruction()
4920 kvm_inject_gp(vcpu, 0); in svm_check_emulate_instruction()
4930 * if the fault is at CPL=0, it's the lesser of all evils. Exiting to in svm_check_emulate_instruction()
4988 return 0; in svm_vm_init()
5152 if (cpuid_eax(0x80000000) < 0x8000001f) in svm_adjust_mmio_mask()
5160 enc_bit = cpuid_ebx(0x8000001f) & 0x3f; in svm_adjust_mmio_mask()
5176 mask = (mask_bit < 52) ? rsvd_bits(mask_bit, 51) | PT_PRESENT_MASK : 0; in svm_adjust_mmio_mask()
5185 kvm_caps.supported_perf_cap = 0; in svm_set_cpu_caps()
5186 kvm_caps.supported_xss = 0; in svm_set_cpu_caps()
5188 /* CPUID 0x80000001 and 0x8000000A (SVM features) */ in svm_set_cpu_caps()
5230 /* CPUID 0x80000008 */ in svm_set_cpu_caps()
5252 /* CPUID 0x8000001F (SME/SEV features) */ in svm_set_cpu_caps()
5283 memset(iopm_va, 0xff, PAGE_SIZE * (1 << order)); in svm_hardware_setup()
5312 pause_filter_count = 0; in svm_hardware_setup()
5313 pause_filter_thresh = 0; in svm_hardware_setup()
5315 pause_filter_thresh = 0; in svm_hardware_setup()
5425 return 0; in svm_hardware_setup()
5467 return 0; in svm_init()