Lines Matching +full:0 +full:x8000000a

41 	int feature_bit = 0;  in xstate_required_size()
46 if (xstate_bv & 0x1) { in xstate_required_size()
48 cpuid_count(0xD, feature_bit, &eax, &ebx, &ecx, &edx); in xstate_required_size()
51 offset = (ecx & 0x2) ? ALIGN(ret, 64) : ret; in xstate_required_size()
70 (boot_cpu_has(X86_FEATURE_##name) ? F(name) : 0); \
100 for (i = 0; i < nent; i++) { in cpuid_entry2_find()
144 best = cpuid_entry2_find(entries, nent, 0x80000008, in kvm_check_cpuid()
147 int vaddr_bits = (best->eax & 0xff00) >> 8; in kvm_check_cpuid()
149 if (vaddr_bits != 48 && vaddr_bits != 57 && vaddr_bits != 0) in kvm_check_cpuid()
157 best = cpuid_entry2_find(entries, nent, 0xd, 0); in kvm_check_cpuid()
159 return 0; in kvm_check_cpuid()
164 return 0; in kvm_check_cpuid()
179 for (i = 0; i < nent; i++) { in kvm_cpuid_check_equal()
189 return 0; in kvm_cpuid_check_equal()
205 signature[0] = entry->ebx; in __kvm_get_hypervisor_cpuid()
265 best = cpuid_entry2_find(entries, nent, 0xd, 0); in cpuid_get_supported_xcr0()
267 return 0; in cpuid_get_supported_xcr0()
289 best = cpuid_entry2_find(entries, nent, 7, 0); in __kvm_update_cpuid_runtime()
290 if (best && boot_cpu_has(X86_FEATURE_PKU) && best->function == 0x7) in __kvm_update_cpuid_runtime()
294 best = cpuid_entry2_find(entries, nent, 0xD, 0); in __kvm_update_cpuid_runtime()
298 best = cpuid_entry2_find(entries, nent, 0xD, 1); in __kvm_update_cpuid_runtime()
311 best = cpuid_entry2_find(entries, nent, 0x1, KVM_CPUID_INDEX_NOT_SIGNIFICANT); in __kvm_update_cpuid_runtime()
342 entry = kvm_find_cpuid_entry(vcpu, 0); in guest_cpuid_is_amd_or_hygon()
416 best = kvm_find_cpuid_entry(vcpu, 0x80000000); in cpuid_query_maxphyaddr()
417 if (!best || best->eax < 0x80000008) in cpuid_query_maxphyaddr()
419 best = kvm_find_cpuid_entry(vcpu, 0x80000008); in cpuid_query_maxphyaddr()
421 return best->eax & 0xff; in cpuid_query_maxphyaddr()
460 return 0; in kvm_set_cpuid()
485 return 0; in kvm_set_cpuid()
511 for (i = 0; i < cpuid->nent; i++) { in kvm_vcpu_ioctl_set_cpuid()
517 e2[i].index = 0; in kvm_vcpu_ioctl_set_cpuid()
518 e2[i].flags = 0; in kvm_vcpu_ioctl_set_cpuid()
519 e2[i].padding[0] = 0; in kvm_vcpu_ioctl_set_cpuid()
520 e2[i].padding[1] = 0; in kvm_vcpu_ioctl_set_cpuid()
521 e2[i].padding[2] = 0; in kvm_vcpu_ioctl_set_cpuid()
569 return 0; in kvm_vcpu_ioctl_get_cpuid2()
614 unsigned int f_gbpages = 0; in kvm_set_cpu_caps()
615 unsigned int f_lm = 0; in kvm_set_cpu_caps()
616 unsigned int f_xfd = 0; in kvm_set_cpu_caps()
618 memset(kvm_cpu_caps, 0, sizeof(kvm_cpu_caps)); in kvm_set_cpu_caps()
631 F(XMM3) | F(PCLMULQDQ) | 0 /* DTES64, MONITOR */ | in kvm_set_cpu_caps()
632 0 /* DS-CPL, VMX, SMX, EST */ | in kvm_set_cpu_caps()
633 0 /* TM2 */ | F(SSSE3) | 0 /* CNXT-ID */ | 0 /* Reserved */ | in kvm_set_cpu_caps()
634 F(FMA) | F(CX16) | 0 /* xTPR Update */ | F(PDCM) | in kvm_set_cpu_caps()
635 F(PCID) | 0 /* Reserved, DCA */ | F(XMM4_1) | in kvm_set_cpu_caps()
637 0 /* Reserved*/ | F(AES) | F(XSAVE) | 0 /* OSXSAVE */ | F(AVX) | in kvm_set_cpu_caps()
646 F(CX8) | F(APIC) | 0 /* Reserved */ | F(SEP) | in kvm_set_cpu_caps()
648 F(PAT) | F(PSE36) | 0 /* PSN */ | F(CLFLUSH) | in kvm_set_cpu_caps()
649 0 /* Reserved, DS, ACPI */ | F(MMX) | in kvm_set_cpu_caps()
651 0 /* HTT, TM, Reserved, PBE */ in kvm_set_cpu_caps()
657 F(RTM) | F(ZERO_FCS_FDS) | 0 /*MPX*/ | F(AVX512F) | in kvm_set_cpu_caps()
659 F(CLFLUSHOPT) | F(CLWB) | 0 /*INTEL_PT*/ | F(AVX512PF) | in kvm_set_cpu_caps()
664 F(AVX512VBMI) | F(LA57) | F(PKU) | 0 /*OSPKE*/ | F(RDPID) | in kvm_set_cpu_caps()
667 F(CLDEMOTE) | F(MOVDIRI) | F(MOVDIR64B) | 0 /*WAITPKG*/ | in kvm_set_cpu_caps()
729 F(LAHF_LM) | F(CMP_LEGACY) | 0 /*SVM*/ | 0 /* ExtApicSpace */ | in kvm_set_cpu_caps()
731 F(3DNOWPREFETCH) | F(OSVW) | 0 /* IBS */ | F(XOP) | in kvm_set_cpu_caps()
732 0 /* SKINIT, WDT, LWP */ | F(FMA4) | F(TBM) | in kvm_set_cpu_caps()
733 F(TOPOEXT) | 0 /* PERFCTR_CORE */ in kvm_set_cpu_caps()
739 F(CX8) | F(APIC) | 0 /* Reserved */ | F(SYSCALL) | in kvm_set_cpu_caps()
741 F(PAT) | F(PSE36) | 0 /* Reserved */ | in kvm_set_cpu_caps()
742 F(NX) | 0 /* Reserved */ | F(MMXEXT) | F(MMX) | in kvm_set_cpu_caps()
744 0 /* Reserved */ | f_lm | F(3DNOWEXT) | F(3DNOW) in kvm_set_cpu_caps()
788 kvm_cpu_cap_mask(CPUID_8000_000A_EDX, 0); in kvm_set_cpu_caps()
791 0 /* SME */ | 0 /* SEV */ | 0 /* VM_PAGE_FLUSH */ | 0 /* SEV_ES */ | in kvm_set_cpu_caps()
795 F(NO_NESTED_DATA_BP) | F(LFENCE_RDTSC) | 0 /* SmmPgCfgLock */ | in kvm_set_cpu_caps()
796 F(NULL_SEL_CLR_BASE) | F(AUTOIBRS) | 0 /* PrefetchCtlMsr */ | in kvm_set_cpu_caps()
869 memset(entry, 0, sizeof(*entry)); in do_host_cpuid()
872 switch (function & 0xC0000000) { in do_host_cpuid()
873 case 0x40000000: in do_host_cpuid()
877 case 0x80000000: in do_host_cpuid()
879 * 0x80000021 is sometimes synthesized by __do_cpuid_func, which in do_host_cpuid()
885 WRITE_ONCE(max_cpuid_80000000, cpuid_eax(0x80000000)); in do_host_cpuid()
913 entry->index = 0; in __do_cpuid_func_emulated()
914 entry->flags = 0; in __do_cpuid_func_emulated()
917 case 0: in __do_cpuid_func_emulated()
927 entry->eax = 0; in __do_cpuid_func_emulated()
936 return 0; in __do_cpuid_func_emulated()
949 entry = do_host_cpuid(array, function, 0); in __do_cpuid_func()
954 case 0: in __do_cpuid_func()
956 entry->eax = min(entry->eax, 0x24U); in __do_cpuid_func()
965 * CPUID(function=2, index=0) may return different results each in __do_cpuid_func()
967 * number of times software should do CPUID(2, 0). in __do_cpuid_func()
970 * idiotic. Intel's SDM states that EAX & 0xff "will always in __do_cpuid_func()
976 * a stateful CPUID.0x2 is encountered. in __do_cpuid_func()
978 WARN_ON_ONCE((entry->eax & 0xff) > 1); in __do_cpuid_func()
980 /* functions 4 and 0x8000001d have additional index. */ in __do_cpuid_func()
982 case 0x8000001d: in __do_cpuid_func()
987 for (i = 1; entry->eax & 0x1f; ++i) { in __do_cpuid_func()
994 entry->eax = 0x4; /* allow ARAT */ in __do_cpuid_func()
995 entry->ebx = 0; in __do_cpuid_func()
996 entry->ecx = 0; in __do_cpuid_func()
997 entry->edx = 0; in __do_cpuid_func()
1006 /* KVM only supports up to 0x7.2, capped above via min(). */ in __do_cpuid_func()
1014 entry->ebx = 0; in __do_cpuid_func()
1015 entry->ecx = 0; in __do_cpuid_func()
1023 entry->ecx = 0; in __do_cpuid_func()
1024 entry->ebx = 0; in __do_cpuid_func()
1025 entry->eax = 0; in __do_cpuid_func()
1028 case 0xa: { /* Architectural Performance Monitoring */ in __do_cpuid_func()
1033 entry->eax = entry->ebx = entry->ecx = entry->edx = 0; in __do_cpuid_func()
1046 edx.split.reserved1 = 0; in __do_cpuid_func()
1047 edx.split.reserved2 = 0; in __do_cpuid_func()
1051 entry->ecx = 0; in __do_cpuid_func()
1055 case 0x1f: in __do_cpuid_func()
1056 case 0xb: in __do_cpuid_func()
1061 entry->eax = entry->ebx = entry->ecx = 0; in __do_cpuid_func()
1063 case 0xd: { in __do_cpuid_func()
1083 WARN_ON_ONCE(permitted_xss != 0); in __do_cpuid_func()
1084 entry->ebx = 0; in __do_cpuid_func()
1110 if (WARN_ON_ONCE(!entry->eax || (entry->ecx & 0x1) != s_state)) { in __do_cpuid_func()
1117 entry->edx = 0; in __do_cpuid_func()
1121 case 0x12: in __do_cpuid_func()
1124 entry->eax = entry->ebx = entry->ecx = entry->edx = 0; in __do_cpuid_func()
1129 * Index 0: Sub-features, MISCSELECT (a.k.a extended features) in __do_cpuid_func()
1149 entry->ebx &= 0; in __do_cpuid_func()
1152 case 0x14: in __do_cpuid_func()
1154 entry->eax = entry->ebx = entry->ecx = entry->edx = 0; in __do_cpuid_func()
1164 case 0x1d: in __do_cpuid_func()
1166 entry->eax = entry->ebx = entry->ecx = entry->edx = 0; in __do_cpuid_func()
1175 case 0x1e: /* TMUL information */ in __do_cpuid_func()
1177 entry->eax = entry->ebx = entry->ecx = entry->edx = 0; in __do_cpuid_func()
1181 case 0x24: { in __do_cpuid_func()
1185 entry->eax = entry->ebx = entry->ecx = entry->edx = 0; in __do_cpuid_func()
1190 * The AVX10 version is encoded in EBX[7:0]. Note, the version in __do_cpuid_func()
1194 avx10_version = min_t(u8, entry->ebx & 0xff, 1); in __do_cpuid_func()
1198 entry->eax = 0; in __do_cpuid_func()
1199 entry->ecx = 0; in __do_cpuid_func()
1200 entry->edx = 0; in __do_cpuid_func()
1206 entry->ebx = sigptr[0]; in __do_cpuid_func()
1229 entry->ebx = 0; in __do_cpuid_func()
1230 entry->ecx = 0; in __do_cpuid_func()
1231 entry->edx = 0; in __do_cpuid_func()
1233 case 0x80000000: in __do_cpuid_func()
1234 entry->eax = min(entry->eax, 0x80000022); in __do_cpuid_func()
1240 * However, only do it if the host has CPUID leaf 0x8000001d. in __do_cpuid_func()
1242 * CPUID leaf if KVM reports that it supports 0x8000001d or in __do_cpuid_func()
1245 * 0x8000001d. Even worse, this can result in an infinite in __do_cpuid_func()
1248 if (entry->eax >= 0x8000001d && in __do_cpuid_func()
1251 entry->eax = max(entry->eax, 0x80000021); in __do_cpuid_func()
1253 case 0x80000001: in __do_cpuid_func()
1258 case 0x80000005: in __do_cpuid_func()
1261 case 0x80000006: in __do_cpuid_func()
1265 case 0x80000007: /* Advanced power management */ in __do_cpuid_func()
1270 entry->eax = entry->ebx = entry->ecx = 0; in __do_cpuid_func()
1272 case 0x80000008: { in __do_cpuid_func()
1287 unsigned int virt_as = max((entry->eax >> 8) & 0xff, 48U); in __do_cpuid_func()
1305 g_phys_as = 0; in __do_cpuid_func()
1307 phys_as = entry->eax & 0xff; in __do_cpuid_func()
1315 entry->edx = 0; in __do_cpuid_func()
1319 case 0x8000000A: in __do_cpuid_func()
1321 entry->eax = entry->ebx = entry->ecx = entry->edx = 0; in __do_cpuid_func()
1327 entry->ecx = 0; /* Reserved */ in __do_cpuid_func()
1330 case 0x80000019: in __do_cpuid_func()
1331 entry->ecx = entry->edx = 0; in __do_cpuid_func()
1333 case 0x8000001a: in __do_cpuid_func()
1334 entry->eax &= GENMASK(2, 0); in __do_cpuid_func()
1335 entry->ebx = entry->ecx = entry->edx = 0; in __do_cpuid_func()
1337 case 0x8000001e: in __do_cpuid_func()
1339 entry->eax = entry->ebx = entry->ecx = 0; in __do_cpuid_func()
1340 entry->edx = 0; /* reserved */ in __do_cpuid_func()
1342 case 0x8000001F: in __do_cpuid_func()
1344 entry->eax = entry->ebx = entry->ecx = entry->edx = 0; in __do_cpuid_func()
1350 * Enumerate '0' for "PA bits reduction", the adjusted in __do_cpuid_func()
1351 * MAXPHYADDR is enumerated directly (see 0x80000008). in __do_cpuid_func()
1356 case 0x80000020: in __do_cpuid_func()
1357 entry->eax = entry->ebx = entry->ecx = entry->edx = 0; in __do_cpuid_func()
1359 case 0x80000021: in __do_cpuid_func()
1360 entry->ebx = entry->ecx = entry->edx = 0; in __do_cpuid_func()
1364 case 0x80000022: { in __do_cpuid_func()
1367 entry->ecx = entry->edx = 0; in __do_cpuid_func()
1386 case 0xC0000000: in __do_cpuid_func()
1387 /*Just support up to 0xC0000004 now*/ in __do_cpuid_func()
1388 entry->eax = min(entry->eax, 0xC0000004); in __do_cpuid_func()
1390 case 0xC0000001: in __do_cpuid_func()
1395 case 0xC0000002: in __do_cpuid_func()
1396 case 0xC0000003: in __do_cpuid_func()
1397 case 0xC0000004: in __do_cpuid_func()
1399 entry->eax = entry->ebx = entry->ecx = entry->edx = 0; in __do_cpuid_func()
1403 r = 0; in __do_cpuid_func()
1420 #define CENTAUR_CPUID_SIGNATURE 0xC0000000
1430 return 0; in get_cpuid_func()
1463 for (i = 0; i < num_entries; i++) { in sanity_check_entries()
1467 if (pad[0] || pad[1] || pad[2]) in sanity_check_entries()
1478 0, 0x80000000, CENTAUR_CPUID_SIGNATURE, KVM_CPUID_SIGNATURE, in kvm_dev_ioctl_get_cpuid()
1482 .nent = 0, in kvm_dev_ioctl_get_cpuid()
1500 for (i = 0; i < ARRAY_SIZE(funcs); i++) { in kvm_dev_ioctl_get_cpuid()
1534 * highest basic leaf (i.e. CPUID.0H:EAX) were requested. AMD CPUID semantics
1547 * - Basic: 0x00000000 - 0x3fffffff, 0x50000000 - 0x7fffffff
1548 * - Hypervisor: 0x40000000 - 0x4fffffff
1549 * - Extended: 0x80000000 - 0xbfffffff
1550 * - Centaur: 0xc0000000 - 0xcfffffff
1553 * their own independent class associated with a 0x100 byte range. E.g. if Qemu
1557 * - HyperV: 0x40000000 - 0x400000ff
1558 * - KVM: 0x40000100 - 0x400001ff
1566 basic = kvm_find_cpuid_entry(vcpu, 0); in get_out_of_range_cpuid_entry()
1574 if (function >= 0x40000000 && function <= 0x4fffffff) in get_out_of_range_cpuid_entry()
1575 class = kvm_find_cpuid_entry(vcpu, function & 0xffffff00); in get_out_of_range_cpuid_entry()
1576 else if (function >= 0xc0000000) in get_out_of_range_cpuid_entry()
1577 class = kvm_find_cpuid_entry(vcpu, 0xc0000000); in get_out_of_range_cpuid_entry()
1579 class = kvm_find_cpuid_entry(vcpu, function & 0x80000000); in get_out_of_range_cpuid_entry()
1586 * max basic entry, e.g. if the max basic leaf is 0xb but there is no in get_out_of_range_cpuid_entry()
1587 * entry for CPUID.0xb.index (see below), then the output value for EDX in get_out_of_range_cpuid_entry()
1588 * needs to be pulled from CPUID.0xb.1. in get_out_of_range_cpuid_entry()
1620 if (function == 7 && index == 0) { in kvm_cpuid()
1625 } else if (function == 0x80000007) { in kvm_cpuid()
1630 *eax = *ebx = *ecx = *edx = 0; in kvm_cpuid()
1632 * When leaf 0BH or 1FH is defined, CL is pass-through in kvm_cpuid()
1638 if (function == 0xb || function == 0x1f) { in kvm_cpuid()
1641 *ecx = index & 0xff; in kvm_cpuid()
1656 if (cpuid_fault_enabled(vcpu) && !kvm_require_cpl(vcpu, 0)) in kvm_emulate_cpuid()