1  /* SPDX-License-Identifier: GPL-2.0-only */
2  /*
3   * tools/testing/selftests/kvm/include/x86_64/vmx.h
4   *
5   * Copyright (C) 2018, Google LLC.
6   */
7  
8  #ifndef SELFTEST_KVM_VMX_H
9  #define SELFTEST_KVM_VMX_H
10  
11  #include <asm/vmx.h>
12  
13  #include <stdint.h>
14  #include "processor.h"
15  #include "apic.h"
16  
17  /*
18   * Definitions of Primary Processor-Based VM-Execution Controls.
19   */
20  #define CPU_BASED_INTR_WINDOW_EXITING		0x00000004
21  #define CPU_BASED_USE_TSC_OFFSETTING		0x00000008
22  #define CPU_BASED_HLT_EXITING			0x00000080
23  #define CPU_BASED_INVLPG_EXITING		0x00000200
24  #define CPU_BASED_MWAIT_EXITING			0x00000400
25  #define CPU_BASED_RDPMC_EXITING			0x00000800
26  #define CPU_BASED_RDTSC_EXITING			0x00001000
27  #define CPU_BASED_CR3_LOAD_EXITING		0x00008000
28  #define CPU_BASED_CR3_STORE_EXITING		0x00010000
29  #define CPU_BASED_CR8_LOAD_EXITING		0x00080000
30  #define CPU_BASED_CR8_STORE_EXITING		0x00100000
31  #define CPU_BASED_TPR_SHADOW			0x00200000
32  #define CPU_BASED_NMI_WINDOW_EXITING		0x00400000
33  #define CPU_BASED_MOV_DR_EXITING		0x00800000
34  #define CPU_BASED_UNCOND_IO_EXITING		0x01000000
35  #define CPU_BASED_USE_IO_BITMAPS		0x02000000
36  #define CPU_BASED_MONITOR_TRAP			0x08000000
37  #define CPU_BASED_USE_MSR_BITMAPS		0x10000000
38  #define CPU_BASED_MONITOR_EXITING		0x20000000
39  #define CPU_BASED_PAUSE_EXITING			0x40000000
40  #define CPU_BASED_ACTIVATE_SECONDARY_CONTROLS	0x80000000
41  
42  #define CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR	0x0401e172
43  
44  /*
45   * Definitions of Secondary Processor-Based VM-Execution Controls.
46   */
47  #define SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES 0x00000001
48  #define SECONDARY_EXEC_ENABLE_EPT		0x00000002
49  #define SECONDARY_EXEC_DESC			0x00000004
50  #define SECONDARY_EXEC_ENABLE_RDTSCP		0x00000008
51  #define SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE	0x00000010
52  #define SECONDARY_EXEC_ENABLE_VPID		0x00000020
53  #define SECONDARY_EXEC_WBINVD_EXITING		0x00000040
54  #define SECONDARY_EXEC_UNRESTRICTED_GUEST	0x00000080
55  #define SECONDARY_EXEC_APIC_REGISTER_VIRT	0x00000100
56  #define SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY	0x00000200
57  #define SECONDARY_EXEC_PAUSE_LOOP_EXITING	0x00000400
58  #define SECONDARY_EXEC_RDRAND_EXITING		0x00000800
59  #define SECONDARY_EXEC_ENABLE_INVPCID		0x00001000
60  #define SECONDARY_EXEC_ENABLE_VMFUNC		0x00002000
61  #define SECONDARY_EXEC_SHADOW_VMCS		0x00004000
62  #define SECONDARY_EXEC_RDSEED_EXITING		0x00010000
63  #define SECONDARY_EXEC_ENABLE_PML		0x00020000
64  #define SECONDARY_EPT_VE			0x00040000
65  #define SECONDARY_ENABLE_XSAV_RESTORE		0x00100000
66  #define SECONDARY_EXEC_TSC_SCALING		0x02000000
67  
68  #define PIN_BASED_EXT_INTR_MASK			0x00000001
69  #define PIN_BASED_NMI_EXITING			0x00000008
70  #define PIN_BASED_VIRTUAL_NMIS			0x00000020
71  #define PIN_BASED_VMX_PREEMPTION_TIMER		0x00000040
72  #define PIN_BASED_POSTED_INTR			0x00000080
73  
74  #define PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR	0x00000016
75  
76  #define VM_EXIT_SAVE_DEBUG_CONTROLS		0x00000004
77  #define VM_EXIT_HOST_ADDR_SPACE_SIZE		0x00000200
78  #define VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL	0x00001000
79  #define VM_EXIT_ACK_INTR_ON_EXIT		0x00008000
80  #define VM_EXIT_SAVE_IA32_PAT			0x00040000
81  #define VM_EXIT_LOAD_IA32_PAT			0x00080000
82  #define VM_EXIT_SAVE_IA32_EFER			0x00100000
83  #define VM_EXIT_LOAD_IA32_EFER			0x00200000
84  #define VM_EXIT_SAVE_VMX_PREEMPTION_TIMER	0x00400000
85  
86  #define VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR	0x00036dff
87  
88  #define VM_ENTRY_LOAD_DEBUG_CONTROLS		0x00000004
89  #define VM_ENTRY_IA32E_MODE			0x00000200
90  #define VM_ENTRY_SMM				0x00000400
91  #define VM_ENTRY_DEACT_DUAL_MONITOR		0x00000800
92  #define VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL	0x00002000
93  #define VM_ENTRY_LOAD_IA32_PAT			0x00004000
94  #define VM_ENTRY_LOAD_IA32_EFER			0x00008000
95  
96  #define VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR	0x000011ff
97  
98  #define VMX_MISC_PREEMPTION_TIMER_RATE_MASK	0x0000001f
99  #define VMX_MISC_SAVE_EFER_LMA			0x00000020
100  
101  #define VMX_EPT_VPID_CAP_1G_PAGES		0x00020000
102  #define VMX_EPT_VPID_CAP_AD_BITS		0x00200000
103  
104  #define EXIT_REASON_FAILED_VMENTRY	0x80000000
105  
106  enum vmcs_field {
107  	VIRTUAL_PROCESSOR_ID		= 0x00000000,
108  	POSTED_INTR_NV			= 0x00000002,
109  	GUEST_ES_SELECTOR		= 0x00000800,
110  	GUEST_CS_SELECTOR		= 0x00000802,
111  	GUEST_SS_SELECTOR		= 0x00000804,
112  	GUEST_DS_SELECTOR		= 0x00000806,
113  	GUEST_FS_SELECTOR		= 0x00000808,
114  	GUEST_GS_SELECTOR		= 0x0000080a,
115  	GUEST_LDTR_SELECTOR		= 0x0000080c,
116  	GUEST_TR_SELECTOR		= 0x0000080e,
117  	GUEST_INTR_STATUS		= 0x00000810,
118  	GUEST_PML_INDEX			= 0x00000812,
119  	HOST_ES_SELECTOR		= 0x00000c00,
120  	HOST_CS_SELECTOR		= 0x00000c02,
121  	HOST_SS_SELECTOR		= 0x00000c04,
122  	HOST_DS_SELECTOR		= 0x00000c06,
123  	HOST_FS_SELECTOR		= 0x00000c08,
124  	HOST_GS_SELECTOR		= 0x00000c0a,
125  	HOST_TR_SELECTOR		= 0x00000c0c,
126  	IO_BITMAP_A			= 0x00002000,
127  	IO_BITMAP_A_HIGH		= 0x00002001,
128  	IO_BITMAP_B			= 0x00002002,
129  	IO_BITMAP_B_HIGH		= 0x00002003,
130  	MSR_BITMAP			= 0x00002004,
131  	MSR_BITMAP_HIGH			= 0x00002005,
132  	VM_EXIT_MSR_STORE_ADDR		= 0x00002006,
133  	VM_EXIT_MSR_STORE_ADDR_HIGH	= 0x00002007,
134  	VM_EXIT_MSR_LOAD_ADDR		= 0x00002008,
135  	VM_EXIT_MSR_LOAD_ADDR_HIGH	= 0x00002009,
136  	VM_ENTRY_MSR_LOAD_ADDR		= 0x0000200a,
137  	VM_ENTRY_MSR_LOAD_ADDR_HIGH	= 0x0000200b,
138  	PML_ADDRESS			= 0x0000200e,
139  	PML_ADDRESS_HIGH		= 0x0000200f,
140  	TSC_OFFSET			= 0x00002010,
141  	TSC_OFFSET_HIGH			= 0x00002011,
142  	VIRTUAL_APIC_PAGE_ADDR		= 0x00002012,
143  	VIRTUAL_APIC_PAGE_ADDR_HIGH	= 0x00002013,
144  	APIC_ACCESS_ADDR		= 0x00002014,
145  	APIC_ACCESS_ADDR_HIGH		= 0x00002015,
146  	POSTED_INTR_DESC_ADDR		= 0x00002016,
147  	POSTED_INTR_DESC_ADDR_HIGH	= 0x00002017,
148  	EPT_POINTER			= 0x0000201a,
149  	EPT_POINTER_HIGH		= 0x0000201b,
150  	EOI_EXIT_BITMAP0		= 0x0000201c,
151  	EOI_EXIT_BITMAP0_HIGH		= 0x0000201d,
152  	EOI_EXIT_BITMAP1		= 0x0000201e,
153  	EOI_EXIT_BITMAP1_HIGH		= 0x0000201f,
154  	EOI_EXIT_BITMAP2		= 0x00002020,
155  	EOI_EXIT_BITMAP2_HIGH		= 0x00002021,
156  	EOI_EXIT_BITMAP3		= 0x00002022,
157  	EOI_EXIT_BITMAP3_HIGH		= 0x00002023,
158  	VMREAD_BITMAP			= 0x00002026,
159  	VMREAD_BITMAP_HIGH		= 0x00002027,
160  	VMWRITE_BITMAP			= 0x00002028,
161  	VMWRITE_BITMAP_HIGH		= 0x00002029,
162  	XSS_EXIT_BITMAP			= 0x0000202C,
163  	XSS_EXIT_BITMAP_HIGH		= 0x0000202D,
164  	ENCLS_EXITING_BITMAP		= 0x0000202E,
165  	ENCLS_EXITING_BITMAP_HIGH	= 0x0000202F,
166  	TSC_MULTIPLIER			= 0x00002032,
167  	TSC_MULTIPLIER_HIGH		= 0x00002033,
168  	GUEST_PHYSICAL_ADDRESS		= 0x00002400,
169  	GUEST_PHYSICAL_ADDRESS_HIGH	= 0x00002401,
170  	VMCS_LINK_POINTER		= 0x00002800,
171  	VMCS_LINK_POINTER_HIGH		= 0x00002801,
172  	GUEST_IA32_DEBUGCTL		= 0x00002802,
173  	GUEST_IA32_DEBUGCTL_HIGH	= 0x00002803,
174  	GUEST_IA32_PAT			= 0x00002804,
175  	GUEST_IA32_PAT_HIGH		= 0x00002805,
176  	GUEST_IA32_EFER			= 0x00002806,
177  	GUEST_IA32_EFER_HIGH		= 0x00002807,
178  	GUEST_IA32_PERF_GLOBAL_CTRL	= 0x00002808,
179  	GUEST_IA32_PERF_GLOBAL_CTRL_HIGH= 0x00002809,
180  	GUEST_PDPTR0			= 0x0000280a,
181  	GUEST_PDPTR0_HIGH		= 0x0000280b,
182  	GUEST_PDPTR1			= 0x0000280c,
183  	GUEST_PDPTR1_HIGH		= 0x0000280d,
184  	GUEST_PDPTR2			= 0x0000280e,
185  	GUEST_PDPTR2_HIGH		= 0x0000280f,
186  	GUEST_PDPTR3			= 0x00002810,
187  	GUEST_PDPTR3_HIGH		= 0x00002811,
188  	GUEST_BNDCFGS			= 0x00002812,
189  	GUEST_BNDCFGS_HIGH		= 0x00002813,
190  	HOST_IA32_PAT			= 0x00002c00,
191  	HOST_IA32_PAT_HIGH		= 0x00002c01,
192  	HOST_IA32_EFER			= 0x00002c02,
193  	HOST_IA32_EFER_HIGH		= 0x00002c03,
194  	HOST_IA32_PERF_GLOBAL_CTRL	= 0x00002c04,
195  	HOST_IA32_PERF_GLOBAL_CTRL_HIGH	= 0x00002c05,
196  	PIN_BASED_VM_EXEC_CONTROL	= 0x00004000,
197  	CPU_BASED_VM_EXEC_CONTROL	= 0x00004002,
198  	EXCEPTION_BITMAP		= 0x00004004,
199  	PAGE_FAULT_ERROR_CODE_MASK	= 0x00004006,
200  	PAGE_FAULT_ERROR_CODE_MATCH	= 0x00004008,
201  	CR3_TARGET_COUNT		= 0x0000400a,
202  	VM_EXIT_CONTROLS		= 0x0000400c,
203  	VM_EXIT_MSR_STORE_COUNT		= 0x0000400e,
204  	VM_EXIT_MSR_LOAD_COUNT		= 0x00004010,
205  	VM_ENTRY_CONTROLS		= 0x00004012,
206  	VM_ENTRY_MSR_LOAD_COUNT		= 0x00004014,
207  	VM_ENTRY_INTR_INFO_FIELD	= 0x00004016,
208  	VM_ENTRY_EXCEPTION_ERROR_CODE	= 0x00004018,
209  	VM_ENTRY_INSTRUCTION_LEN	= 0x0000401a,
210  	TPR_THRESHOLD			= 0x0000401c,
211  	SECONDARY_VM_EXEC_CONTROL	= 0x0000401e,
212  	PLE_GAP				= 0x00004020,
213  	PLE_WINDOW			= 0x00004022,
214  	VM_INSTRUCTION_ERROR		= 0x00004400,
215  	VM_EXIT_REASON			= 0x00004402,
216  	VM_EXIT_INTR_INFO		= 0x00004404,
217  	VM_EXIT_INTR_ERROR_CODE		= 0x00004406,
218  	IDT_VECTORING_INFO_FIELD	= 0x00004408,
219  	IDT_VECTORING_ERROR_CODE	= 0x0000440a,
220  	VM_EXIT_INSTRUCTION_LEN		= 0x0000440c,
221  	VMX_INSTRUCTION_INFO		= 0x0000440e,
222  	GUEST_ES_LIMIT			= 0x00004800,
223  	GUEST_CS_LIMIT			= 0x00004802,
224  	GUEST_SS_LIMIT			= 0x00004804,
225  	GUEST_DS_LIMIT			= 0x00004806,
226  	GUEST_FS_LIMIT			= 0x00004808,
227  	GUEST_GS_LIMIT			= 0x0000480a,
228  	GUEST_LDTR_LIMIT		= 0x0000480c,
229  	GUEST_TR_LIMIT			= 0x0000480e,
230  	GUEST_GDTR_LIMIT		= 0x00004810,
231  	GUEST_IDTR_LIMIT		= 0x00004812,
232  	GUEST_ES_AR_BYTES		= 0x00004814,
233  	GUEST_CS_AR_BYTES		= 0x00004816,
234  	GUEST_SS_AR_BYTES		= 0x00004818,
235  	GUEST_DS_AR_BYTES		= 0x0000481a,
236  	GUEST_FS_AR_BYTES		= 0x0000481c,
237  	GUEST_GS_AR_BYTES		= 0x0000481e,
238  	GUEST_LDTR_AR_BYTES		= 0x00004820,
239  	GUEST_TR_AR_BYTES		= 0x00004822,
240  	GUEST_INTERRUPTIBILITY_INFO	= 0x00004824,
241  	GUEST_ACTIVITY_STATE		= 0X00004826,
242  	GUEST_SYSENTER_CS		= 0x0000482A,
243  	VMX_PREEMPTION_TIMER_VALUE	= 0x0000482E,
244  	HOST_IA32_SYSENTER_CS		= 0x00004c00,
245  	CR0_GUEST_HOST_MASK		= 0x00006000,
246  	CR4_GUEST_HOST_MASK		= 0x00006002,
247  	CR0_READ_SHADOW			= 0x00006004,
248  	CR4_READ_SHADOW			= 0x00006006,
249  	CR3_TARGET_VALUE0		= 0x00006008,
250  	CR3_TARGET_VALUE1		= 0x0000600a,
251  	CR3_TARGET_VALUE2		= 0x0000600c,
252  	CR3_TARGET_VALUE3		= 0x0000600e,
253  	EXIT_QUALIFICATION		= 0x00006400,
254  	GUEST_LINEAR_ADDRESS		= 0x0000640a,
255  	GUEST_CR0			= 0x00006800,
256  	GUEST_CR3			= 0x00006802,
257  	GUEST_CR4			= 0x00006804,
258  	GUEST_ES_BASE			= 0x00006806,
259  	GUEST_CS_BASE			= 0x00006808,
260  	GUEST_SS_BASE			= 0x0000680a,
261  	GUEST_DS_BASE			= 0x0000680c,
262  	GUEST_FS_BASE			= 0x0000680e,
263  	GUEST_GS_BASE			= 0x00006810,
264  	GUEST_LDTR_BASE			= 0x00006812,
265  	GUEST_TR_BASE			= 0x00006814,
266  	GUEST_GDTR_BASE			= 0x00006816,
267  	GUEST_IDTR_BASE			= 0x00006818,
268  	GUEST_DR7			= 0x0000681a,
269  	GUEST_RSP			= 0x0000681c,
270  	GUEST_RIP			= 0x0000681e,
271  	GUEST_RFLAGS			= 0x00006820,
272  	GUEST_PENDING_DBG_EXCEPTIONS	= 0x00006822,
273  	GUEST_SYSENTER_ESP		= 0x00006824,
274  	GUEST_SYSENTER_EIP		= 0x00006826,
275  	HOST_CR0			= 0x00006c00,
276  	HOST_CR3			= 0x00006c02,
277  	HOST_CR4			= 0x00006c04,
278  	HOST_FS_BASE			= 0x00006c06,
279  	HOST_GS_BASE			= 0x00006c08,
280  	HOST_TR_BASE			= 0x00006c0a,
281  	HOST_GDTR_BASE			= 0x00006c0c,
282  	HOST_IDTR_BASE			= 0x00006c0e,
283  	HOST_IA32_SYSENTER_ESP		= 0x00006c10,
284  	HOST_IA32_SYSENTER_EIP		= 0x00006c12,
285  	HOST_RSP			= 0x00006c14,
286  	HOST_RIP			= 0x00006c16,
287  };
288  
289  struct vmx_msr_entry {
290  	uint32_t index;
291  	uint32_t reserved;
292  	uint64_t value;
293  } __attribute__ ((aligned(16)));
294  
295  #include "evmcs.h"
296  
vmxon(uint64_t phys)297  static inline int vmxon(uint64_t phys)
298  {
299  	uint8_t ret;
300  
301  	__asm__ __volatile__ ("vmxon %[pa]; setna %[ret]"
302  		: [ret]"=rm"(ret)
303  		: [pa]"m"(phys)
304  		: "cc", "memory");
305  
306  	return ret;
307  }
308  
vmxoff(void)309  static inline void vmxoff(void)
310  {
311  	__asm__ __volatile__("vmxoff");
312  }
313  
vmclear(uint64_t vmcs_pa)314  static inline int vmclear(uint64_t vmcs_pa)
315  {
316  	uint8_t ret;
317  
318  	__asm__ __volatile__ ("vmclear %[pa]; setna %[ret]"
319  		: [ret]"=rm"(ret)
320  		: [pa]"m"(vmcs_pa)
321  		: "cc", "memory");
322  
323  	return ret;
324  }
325  
vmptrld(uint64_t vmcs_pa)326  static inline int vmptrld(uint64_t vmcs_pa)
327  {
328  	uint8_t ret;
329  
330  	if (enable_evmcs)
331  		return -1;
332  
333  	__asm__ __volatile__ ("vmptrld %[pa]; setna %[ret]"
334  		: [ret]"=rm"(ret)
335  		: [pa]"m"(vmcs_pa)
336  		: "cc", "memory");
337  
338  	return ret;
339  }
340  
vmptrst(uint64_t * value)341  static inline int vmptrst(uint64_t *value)
342  {
343  	uint64_t tmp;
344  	uint8_t ret;
345  
346  	if (enable_evmcs)
347  		return evmcs_vmptrst(value);
348  
349  	__asm__ __volatile__("vmptrst %[value]; setna %[ret]"
350  		: [value]"=m"(tmp), [ret]"=rm"(ret)
351  		: : "cc", "memory");
352  
353  	*value = tmp;
354  	return ret;
355  }
356  
357  /*
358   * A wrapper around vmptrst that ignores errors and returns zero if the
359   * vmptrst instruction fails.
360   */
vmptrstz(void)361  static inline uint64_t vmptrstz(void)
362  {
363  	uint64_t value = 0;
364  	vmptrst(&value);
365  	return value;
366  }
367  
368  /*
369   * No guest state (e.g. GPRs) is established by this vmlaunch.
370   */
vmlaunch(void)371  static inline int vmlaunch(void)
372  {
373  	int ret;
374  
375  	if (enable_evmcs)
376  		return evmcs_vmlaunch();
377  
378  	__asm__ __volatile__("push %%rbp;"
379  			     "push %%rcx;"
380  			     "push %%rdx;"
381  			     "push %%rsi;"
382  			     "push %%rdi;"
383  			     "push $0;"
384  			     "vmwrite %%rsp, %[host_rsp];"
385  			     "lea 1f(%%rip), %%rax;"
386  			     "vmwrite %%rax, %[host_rip];"
387  			     "vmlaunch;"
388  			     "incq (%%rsp);"
389  			     "1: pop %%rax;"
390  			     "pop %%rdi;"
391  			     "pop %%rsi;"
392  			     "pop %%rdx;"
393  			     "pop %%rcx;"
394  			     "pop %%rbp;"
395  			     : [ret]"=&a"(ret)
396  			     : [host_rsp]"r"((uint64_t)HOST_RSP),
397  			       [host_rip]"r"((uint64_t)HOST_RIP)
398  			     : "memory", "cc", "rbx", "r8", "r9", "r10",
399  			       "r11", "r12", "r13", "r14", "r15");
400  	return ret;
401  }
402  
403  /*
404   * No guest state (e.g. GPRs) is established by this vmresume.
405   */
vmresume(void)406  static inline int vmresume(void)
407  {
408  	int ret;
409  
410  	if (enable_evmcs)
411  		return evmcs_vmresume();
412  
413  	__asm__ __volatile__("push %%rbp;"
414  			     "push %%rcx;"
415  			     "push %%rdx;"
416  			     "push %%rsi;"
417  			     "push %%rdi;"
418  			     "push $0;"
419  			     "vmwrite %%rsp, %[host_rsp];"
420  			     "lea 1f(%%rip), %%rax;"
421  			     "vmwrite %%rax, %[host_rip];"
422  			     "vmresume;"
423  			     "incq (%%rsp);"
424  			     "1: pop %%rax;"
425  			     "pop %%rdi;"
426  			     "pop %%rsi;"
427  			     "pop %%rdx;"
428  			     "pop %%rcx;"
429  			     "pop %%rbp;"
430  			     : [ret]"=&a"(ret)
431  			     : [host_rsp]"r"((uint64_t)HOST_RSP),
432  			       [host_rip]"r"((uint64_t)HOST_RIP)
433  			     : "memory", "cc", "rbx", "r8", "r9", "r10",
434  			       "r11", "r12", "r13", "r14", "r15");
435  	return ret;
436  }
437  
vmcall(void)438  static inline void vmcall(void)
439  {
440  	/*
441  	 * Stuff RAX and RCX with "safe" values to make sure L0 doesn't handle
442  	 * it as a valid hypercall (e.g. Hyper-V L2 TLB flush) as the intended
443  	 * use of this function is to exit to L1 from L2.  Clobber all other
444  	 * GPRs as L1 doesn't correctly preserve them during vmexits.
445  	 */
446  	__asm__ __volatile__("push %%rbp; vmcall; pop %%rbp"
447  			     : : "a"(0xdeadbeef), "c"(0xbeefdead)
448  			     : "rbx", "rdx", "rsi", "rdi", "r8", "r9",
449  			       "r10", "r11", "r12", "r13", "r14", "r15");
450  }
451  
vmread(uint64_t encoding,uint64_t * value)452  static inline int vmread(uint64_t encoding, uint64_t *value)
453  {
454  	uint64_t tmp;
455  	uint8_t ret;
456  
457  	if (enable_evmcs)
458  		return evmcs_vmread(encoding, value);
459  
460  	__asm__ __volatile__("vmread %[encoding], %[value]; setna %[ret]"
461  		: [value]"=rm"(tmp), [ret]"=rm"(ret)
462  		: [encoding]"r"(encoding)
463  		: "cc", "memory");
464  
465  	*value = tmp;
466  	return ret;
467  }
468  
469  /*
470   * A wrapper around vmread that ignores errors and returns zero if the
471   * vmread instruction fails.
472   */
vmreadz(uint64_t encoding)473  static inline uint64_t vmreadz(uint64_t encoding)
474  {
475  	uint64_t value = 0;
476  	vmread(encoding, &value);
477  	return value;
478  }
479  
vmwrite(uint64_t encoding,uint64_t value)480  static inline int vmwrite(uint64_t encoding, uint64_t value)
481  {
482  	uint8_t ret;
483  
484  	if (enable_evmcs)
485  		return evmcs_vmwrite(encoding, value);
486  
487  	__asm__ __volatile__ ("vmwrite %[value], %[encoding]; setna %[ret]"
488  		: [ret]"=rm"(ret)
489  		: [value]"rm"(value), [encoding]"r"(encoding)
490  		: "cc", "memory");
491  
492  	return ret;
493  }
494  
vmcs_revision(void)495  static inline uint32_t vmcs_revision(void)
496  {
497  	return rdmsr(MSR_IA32_VMX_BASIC);
498  }
499  
500  struct vmx_pages {
501  	void *vmxon_hva;
502  	uint64_t vmxon_gpa;
503  	void *vmxon;
504  
505  	void *vmcs_hva;
506  	uint64_t vmcs_gpa;
507  	void *vmcs;
508  
509  	void *msr_hva;
510  	uint64_t msr_gpa;
511  	void *msr;
512  
513  	void *shadow_vmcs_hva;
514  	uint64_t shadow_vmcs_gpa;
515  	void *shadow_vmcs;
516  
517  	void *vmread_hva;
518  	uint64_t vmread_gpa;
519  	void *vmread;
520  
521  	void *vmwrite_hva;
522  	uint64_t vmwrite_gpa;
523  	void *vmwrite;
524  
525  	void *eptp_hva;
526  	uint64_t eptp_gpa;
527  	void *eptp;
528  
529  	void *apic_access_hva;
530  	uint64_t apic_access_gpa;
531  	void *apic_access;
532  };
533  
534  union vmx_basic {
535  	u64 val;
536  	struct {
537  		u32 revision;
538  		u32	size:13,
539  			reserved1:3,
540  			width:1,
541  			dual:1,
542  			type:4,
543  			insouts:1,
544  			ctrl:1,
545  			vm_entry_exception_ctrl:1,
546  			reserved2:7;
547  	};
548  };
549  
550  union vmx_ctrl_msr {
551  	u64 val;
552  	struct {
553  		u32 set, clr;
554  	};
555  };
556  
557  struct vmx_pages *vcpu_alloc_vmx(struct kvm_vm *vm, vm_vaddr_t *p_vmx_gva);
558  bool prepare_for_vmx_operation(struct vmx_pages *vmx);
559  void prepare_vmcs(struct vmx_pages *vmx, void *guest_rip, void *guest_rsp);
560  bool load_vmcs(struct vmx_pages *vmx);
561  
562  bool ept_1g_pages_supported(void);
563  
564  void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm,
565  		   uint64_t nested_paddr, uint64_t paddr);
566  void nested_map(struct vmx_pages *vmx, struct kvm_vm *vm,
567  		 uint64_t nested_paddr, uint64_t paddr, uint64_t size);
568  void nested_map_memslot(struct vmx_pages *vmx, struct kvm_vm *vm,
569  			uint32_t memslot);
570  void nested_identity_map_1g(struct vmx_pages *vmx, struct kvm_vm *vm,
571  			    uint64_t addr, uint64_t size);
572  bool kvm_cpu_has_ept(void);
573  void prepare_eptp(struct vmx_pages *vmx, struct kvm_vm *vm,
574  		  uint32_t eptp_memslot);
575  void prepare_virtualize_apic_accesses(struct vmx_pages *vmx, struct kvm_vm *vm);
576  
577  #endif /* SELFTEST_KVM_VMX_H */
578