1  /* SPDX-License-Identifier: GPL-2.0 */
2  #ifndef ARCH_X86_KVM_CPUID_H
3  #define ARCH_X86_KVM_CPUID_H
4  
5  #include "x86.h"
6  #include "reverse_cpuid.h"
7  #include <asm/cpu.h>
8  #include <asm/processor.h>
9  #include <uapi/asm/kvm_para.h>
10  
11  extern u32 kvm_cpu_caps[NR_KVM_CPU_CAPS] __read_mostly;
12  void kvm_set_cpu_caps(void);
13  
14  void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu);
15  void kvm_update_pv_runtime(struct kvm_vcpu *vcpu);
16  struct kvm_cpuid_entry2 *kvm_find_cpuid_entry_index(struct kvm_vcpu *vcpu,
17  						    u32 function, u32 index);
18  struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
19  					      u32 function);
20  int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid,
21  			    struct kvm_cpuid_entry2 __user *entries,
22  			    unsigned int type);
23  int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
24  			     struct kvm_cpuid *cpuid,
25  			     struct kvm_cpuid_entry __user *entries);
26  int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
27  			      struct kvm_cpuid2 *cpuid,
28  			      struct kvm_cpuid_entry2 __user *entries);
29  int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
30  			      struct kvm_cpuid2 *cpuid,
31  			      struct kvm_cpuid_entry2 __user *entries);
32  bool kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx,
33  	       u32 *ecx, u32 *edx, bool exact_only);
34  
35  u32 xstate_required_size(u64 xstate_bv, bool compacted);
36  
37  int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu);
38  u64 kvm_vcpu_reserved_gpa_bits_raw(struct kvm_vcpu *vcpu);
39  
cpuid_maxphyaddr(struct kvm_vcpu * vcpu)40  static inline int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
41  {
42  	return vcpu->arch.maxphyaddr;
43  }
44  
kvm_vcpu_is_legal_gpa(struct kvm_vcpu * vcpu,gpa_t gpa)45  static inline bool kvm_vcpu_is_legal_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
46  {
47  	return !(gpa & vcpu->arch.reserved_gpa_bits);
48  }
49  
kvm_vcpu_is_legal_aligned_gpa(struct kvm_vcpu * vcpu,gpa_t gpa,gpa_t alignment)50  static inline bool kvm_vcpu_is_legal_aligned_gpa(struct kvm_vcpu *vcpu,
51  						 gpa_t gpa, gpa_t alignment)
52  {
53  	return IS_ALIGNED(gpa, alignment) && kvm_vcpu_is_legal_gpa(vcpu, gpa);
54  }
55  
page_address_valid(struct kvm_vcpu * vcpu,gpa_t gpa)56  static inline bool page_address_valid(struct kvm_vcpu *vcpu, gpa_t gpa)
57  {
58  	return kvm_vcpu_is_legal_aligned_gpa(vcpu, gpa, PAGE_SIZE);
59  }
60  
cpuid_entry_override(struct kvm_cpuid_entry2 * entry,unsigned int leaf)61  static __always_inline void cpuid_entry_override(struct kvm_cpuid_entry2 *entry,
62  						 unsigned int leaf)
63  {
64  	u32 *reg = cpuid_entry_get_reg(entry, leaf * 32);
65  
66  	BUILD_BUG_ON(leaf >= ARRAY_SIZE(kvm_cpu_caps));
67  	*reg = kvm_cpu_caps[leaf];
68  }
69  
guest_cpuid_get_register(struct kvm_vcpu * vcpu,unsigned int x86_feature)70  static __always_inline u32 *guest_cpuid_get_register(struct kvm_vcpu *vcpu,
71  						     unsigned int x86_feature)
72  {
73  	const struct cpuid_reg cpuid = x86_feature_cpuid(x86_feature);
74  	struct kvm_cpuid_entry2 *entry;
75  
76  	entry = kvm_find_cpuid_entry_index(vcpu, cpuid.function, cpuid.index);
77  	if (!entry)
78  		return NULL;
79  
80  	return __cpuid_entry_get_reg(entry, cpuid.reg);
81  }
82  
guest_cpuid_has(struct kvm_vcpu * vcpu,unsigned int x86_feature)83  static __always_inline bool guest_cpuid_has(struct kvm_vcpu *vcpu,
84  					    unsigned int x86_feature)
85  {
86  	u32 *reg;
87  
88  	reg = guest_cpuid_get_register(vcpu, x86_feature);
89  	if (!reg)
90  		return false;
91  
92  	return *reg & __feature_bit(x86_feature);
93  }
94  
guest_cpuid_clear(struct kvm_vcpu * vcpu,unsigned int x86_feature)95  static __always_inline void guest_cpuid_clear(struct kvm_vcpu *vcpu,
96  					      unsigned int x86_feature)
97  {
98  	u32 *reg;
99  
100  	reg = guest_cpuid_get_register(vcpu, x86_feature);
101  	if (reg)
102  		*reg &= ~__feature_bit(x86_feature);
103  }
104  
guest_cpuid_is_amd_compatible(struct kvm_vcpu * vcpu)105  static inline bool guest_cpuid_is_amd_compatible(struct kvm_vcpu *vcpu)
106  {
107  	return vcpu->arch.is_amd_compatible;
108  }
109  
guest_cpuid_is_intel_compatible(struct kvm_vcpu * vcpu)110  static inline bool guest_cpuid_is_intel_compatible(struct kvm_vcpu *vcpu)
111  {
112  	return !guest_cpuid_is_amd_compatible(vcpu);
113  }
114  
guest_cpuid_family(struct kvm_vcpu * vcpu)115  static inline int guest_cpuid_family(struct kvm_vcpu *vcpu)
116  {
117  	struct kvm_cpuid_entry2 *best;
118  
119  	best = kvm_find_cpuid_entry(vcpu, 0x1);
120  	if (!best)
121  		return -1;
122  
123  	return x86_family(best->eax);
124  }
125  
guest_cpuid_model(struct kvm_vcpu * vcpu)126  static inline int guest_cpuid_model(struct kvm_vcpu *vcpu)
127  {
128  	struct kvm_cpuid_entry2 *best;
129  
130  	best = kvm_find_cpuid_entry(vcpu, 0x1);
131  	if (!best)
132  		return -1;
133  
134  	return x86_model(best->eax);
135  }
136  
cpuid_model_is_consistent(struct kvm_vcpu * vcpu)137  static inline bool cpuid_model_is_consistent(struct kvm_vcpu *vcpu)
138  {
139  	return boot_cpu_data.x86_model == guest_cpuid_model(vcpu);
140  }
141  
guest_cpuid_stepping(struct kvm_vcpu * vcpu)142  static inline int guest_cpuid_stepping(struct kvm_vcpu *vcpu)
143  {
144  	struct kvm_cpuid_entry2 *best;
145  
146  	best = kvm_find_cpuid_entry(vcpu, 0x1);
147  	if (!best)
148  		return -1;
149  
150  	return x86_stepping(best->eax);
151  }
152  
guest_has_spec_ctrl_msr(struct kvm_vcpu * vcpu)153  static inline bool guest_has_spec_ctrl_msr(struct kvm_vcpu *vcpu)
154  {
155  	return (guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) ||
156  		guest_cpuid_has(vcpu, X86_FEATURE_AMD_STIBP) ||
157  		guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS) ||
158  		guest_cpuid_has(vcpu, X86_FEATURE_AMD_SSBD));
159  }
160  
guest_has_pred_cmd_msr(struct kvm_vcpu * vcpu)161  static inline bool guest_has_pred_cmd_msr(struct kvm_vcpu *vcpu)
162  {
163  	return (guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) ||
164  		guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBPB) ||
165  		guest_cpuid_has(vcpu, X86_FEATURE_SBPB));
166  }
167  
supports_cpuid_fault(struct kvm_vcpu * vcpu)168  static inline bool supports_cpuid_fault(struct kvm_vcpu *vcpu)
169  {
170  	return vcpu->arch.msr_platform_info & MSR_PLATFORM_INFO_CPUID_FAULT;
171  }
172  
cpuid_fault_enabled(struct kvm_vcpu * vcpu)173  static inline bool cpuid_fault_enabled(struct kvm_vcpu *vcpu)
174  {
175  	return vcpu->arch.msr_misc_features_enables &
176  		  MSR_MISC_FEATURES_ENABLES_CPUID_FAULT;
177  }
178  
kvm_cpu_cap_clear(unsigned int x86_feature)179  static __always_inline void kvm_cpu_cap_clear(unsigned int x86_feature)
180  {
181  	unsigned int x86_leaf = __feature_leaf(x86_feature);
182  
183  	reverse_cpuid_check(x86_leaf);
184  	kvm_cpu_caps[x86_leaf] &= ~__feature_bit(x86_feature);
185  }
186  
kvm_cpu_cap_set(unsigned int x86_feature)187  static __always_inline void kvm_cpu_cap_set(unsigned int x86_feature)
188  {
189  	unsigned int x86_leaf = __feature_leaf(x86_feature);
190  
191  	reverse_cpuid_check(x86_leaf);
192  	kvm_cpu_caps[x86_leaf] |= __feature_bit(x86_feature);
193  }
194  
kvm_cpu_cap_get(unsigned int x86_feature)195  static __always_inline u32 kvm_cpu_cap_get(unsigned int x86_feature)
196  {
197  	unsigned int x86_leaf = __feature_leaf(x86_feature);
198  
199  	reverse_cpuid_check(x86_leaf);
200  	return kvm_cpu_caps[x86_leaf] & __feature_bit(x86_feature);
201  }
202  
kvm_cpu_cap_has(unsigned int x86_feature)203  static __always_inline bool kvm_cpu_cap_has(unsigned int x86_feature)
204  {
205  	return !!kvm_cpu_cap_get(x86_feature);
206  }
207  
kvm_cpu_cap_check_and_set(unsigned int x86_feature)208  static __always_inline void kvm_cpu_cap_check_and_set(unsigned int x86_feature)
209  {
210  	if (boot_cpu_has(x86_feature))
211  		kvm_cpu_cap_set(x86_feature);
212  }
213  
guest_pv_has(struct kvm_vcpu * vcpu,unsigned int kvm_feature)214  static __always_inline bool guest_pv_has(struct kvm_vcpu *vcpu,
215  					 unsigned int kvm_feature)
216  {
217  	if (!vcpu->arch.pv_cpuid.enforce)
218  		return true;
219  
220  	return vcpu->arch.pv_cpuid.features & (1u << kvm_feature);
221  }
222  
223  enum kvm_governed_features {
224  #define KVM_GOVERNED_FEATURE(x) KVM_GOVERNED_##x,
225  #include "governed_features.h"
226  	KVM_NR_GOVERNED_FEATURES
227  };
228  
kvm_governed_feature_index(unsigned int x86_feature)229  static __always_inline int kvm_governed_feature_index(unsigned int x86_feature)
230  {
231  	switch (x86_feature) {
232  #define KVM_GOVERNED_FEATURE(x) case x: return KVM_GOVERNED_##x;
233  #include "governed_features.h"
234  	default:
235  		return -1;
236  	}
237  }
238  
kvm_is_governed_feature(unsigned int x86_feature)239  static __always_inline bool kvm_is_governed_feature(unsigned int x86_feature)
240  {
241  	return kvm_governed_feature_index(x86_feature) >= 0;
242  }
243  
kvm_governed_feature_set(struct kvm_vcpu * vcpu,unsigned int x86_feature)244  static __always_inline void kvm_governed_feature_set(struct kvm_vcpu *vcpu,
245  						     unsigned int x86_feature)
246  {
247  	BUILD_BUG_ON(!kvm_is_governed_feature(x86_feature));
248  
249  	__set_bit(kvm_governed_feature_index(x86_feature),
250  		  vcpu->arch.governed_features.enabled);
251  }
252  
kvm_governed_feature_check_and_set(struct kvm_vcpu * vcpu,unsigned int x86_feature)253  static __always_inline void kvm_governed_feature_check_and_set(struct kvm_vcpu *vcpu,
254  							       unsigned int x86_feature)
255  {
256  	if (kvm_cpu_cap_has(x86_feature) && guest_cpuid_has(vcpu, x86_feature))
257  		kvm_governed_feature_set(vcpu, x86_feature);
258  }
259  
guest_can_use(struct kvm_vcpu * vcpu,unsigned int x86_feature)260  static __always_inline bool guest_can_use(struct kvm_vcpu *vcpu,
261  					  unsigned int x86_feature)
262  {
263  	BUILD_BUG_ON(!kvm_is_governed_feature(x86_feature));
264  
265  	return test_bit(kvm_governed_feature_index(x86_feature),
266  			vcpu->arch.governed_features.enabled);
267  }
268  
kvm_vcpu_is_legal_cr3(struct kvm_vcpu * vcpu,unsigned long cr3)269  static inline bool kvm_vcpu_is_legal_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
270  {
271  	if (guest_can_use(vcpu, X86_FEATURE_LAM))
272  		cr3 &= ~(X86_CR3_LAM_U48 | X86_CR3_LAM_U57);
273  
274  	return kvm_vcpu_is_legal_gpa(vcpu, cr3);
275  }
276  
277  #endif
278