Lines Matching refs:best
137 struct kvm_cpuid_entry2 *best; in kvm_check_cpuid() local
144 best = cpuid_entry2_find(entries, nent, 0x80000008, in kvm_check_cpuid()
146 if (best) { in kvm_check_cpuid()
147 int vaddr_bits = (best->eax & 0xff00) >> 8; in kvm_check_cpuid()
157 best = cpuid_entry2_find(entries, nent, 0xd, 0); in kvm_check_cpuid()
158 if (!best) in kvm_check_cpuid()
161 xfeatures = best->eax | ((u64)best->edx << 32); in kvm_check_cpuid()
247 struct kvm_cpuid_entry2 *best = kvm_find_kvm_cpuid_features(vcpu); in kvm_update_pv_runtime() local
253 if (best) in kvm_update_pv_runtime()
254 vcpu->arch.pv_cpuid.features = best->eax; in kvm_update_pv_runtime()
263 struct kvm_cpuid_entry2 *best; in cpuid_get_supported_xcr0() local
265 best = cpuid_entry2_find(entries, nent, 0xd, 0); in cpuid_get_supported_xcr0()
266 if (!best) in cpuid_get_supported_xcr0()
269 return (best->eax | ((u64)best->edx << 32)) & kvm_caps.supported_xcr0; in cpuid_get_supported_xcr0()
275 struct kvm_cpuid_entry2 *best; in __kvm_update_cpuid_runtime() local
278 best = cpuid_entry2_find(entries, nent, 1, KVM_CPUID_INDEX_NOT_SIGNIFICANT); in __kvm_update_cpuid_runtime()
279 if (best) { in __kvm_update_cpuid_runtime()
282 cpuid_entry_change(best, X86_FEATURE_OSXSAVE, in __kvm_update_cpuid_runtime()
285 cpuid_entry_change(best, X86_FEATURE_APIC, in __kvm_update_cpuid_runtime()
289 best = cpuid_entry2_find(entries, nent, 7, 0); in __kvm_update_cpuid_runtime()
290 if (best && boot_cpu_has(X86_FEATURE_PKU) && best->function == 0x7) in __kvm_update_cpuid_runtime()
291 cpuid_entry_change(best, X86_FEATURE_OSPKE, in __kvm_update_cpuid_runtime()
294 best = cpuid_entry2_find(entries, nent, 0xD, 0); in __kvm_update_cpuid_runtime()
295 if (best) in __kvm_update_cpuid_runtime()
296 best->ebx = xstate_required_size(vcpu->arch.xcr0, false); in __kvm_update_cpuid_runtime()
298 best = cpuid_entry2_find(entries, nent, 0xD, 1); in __kvm_update_cpuid_runtime()
299 if (best && (cpuid_entry_has(best, X86_FEATURE_XSAVES) || in __kvm_update_cpuid_runtime()
300 cpuid_entry_has(best, X86_FEATURE_XSAVEC))) in __kvm_update_cpuid_runtime()
301 best->ebx = xstate_required_size(vcpu->arch.xcr0, true); in __kvm_update_cpuid_runtime()
305 best = __kvm_find_kvm_cpuid_features(entries, nent, kvm_cpuid.base); in __kvm_update_cpuid_runtime()
306 if (kvm_hlt_in_guest(vcpu->kvm) && best) in __kvm_update_cpuid_runtime()
307 best->eax &= ~(1 << KVM_FEATURE_PV_UNHALT); in __kvm_update_cpuid_runtime()
311 best = cpuid_entry2_find(entries, nent, 0x1, KVM_CPUID_INDEX_NOT_SIGNIFICANT); in __kvm_update_cpuid_runtime()
312 if (best) in __kvm_update_cpuid_runtime()
313 cpuid_entry_change(best, X86_FEATURE_MWAIT, in __kvm_update_cpuid_runtime()
353 struct kvm_cpuid_entry2 *best; in kvm_vcpu_after_set_cpuid() local
376 best = kvm_find_cpuid_entry(vcpu, 1); in kvm_vcpu_after_set_cpuid()
377 if (best && apic) { in kvm_vcpu_after_set_cpuid()
378 if (cpuid_entry_has(best, X86_FEATURE_TSC_DEADLINE_TIMER)) in kvm_vcpu_after_set_cpuid()
414 struct kvm_cpuid_entry2 *best; in cpuid_query_maxphyaddr() local
416 best = kvm_find_cpuid_entry(vcpu, 0x80000000); in cpuid_query_maxphyaddr()
417 if (!best || best->eax < 0x80000008) in cpuid_query_maxphyaddr()
419 best = kvm_find_cpuid_entry(vcpu, 0x80000008); in cpuid_query_maxphyaddr()
420 if (best) in cpuid_query_maxphyaddr()
421 return best->eax & 0xff; in cpuid_query_maxphyaddr()