1 /* SPDX-License-Identifier: GPL-2.0 */
2 #include <linux/linkage.h>
3 #include <asm/asm.h>
4 #include <asm/asm-offsets.h>
5 #include <asm/bitsperlong.h>
6 #include <asm/frame.h>
7 #include <asm/kvm_vcpu_regs.h>
8 #include <asm/nospec-branch.h>
9 #include "kvm-asm-offsets.h"
10 
11 #define WORD_SIZE (BITS_PER_LONG / 8)
12 
13 /* Intentionally omit RAX as it's context switched by hardware */
14 #define VCPU_RCX	(SVM_vcpu_arch_regs + __VCPU_REGS_RCX * WORD_SIZE)
15 #define VCPU_RDX	(SVM_vcpu_arch_regs + __VCPU_REGS_RDX * WORD_SIZE)
16 #define VCPU_RBX	(SVM_vcpu_arch_regs + __VCPU_REGS_RBX * WORD_SIZE)
17 /* Intentionally omit RSP as it's context switched by hardware */
18 #define VCPU_RBP	(SVM_vcpu_arch_regs + __VCPU_REGS_RBP * WORD_SIZE)
19 #define VCPU_RSI	(SVM_vcpu_arch_regs + __VCPU_REGS_RSI * WORD_SIZE)
20 #define VCPU_RDI	(SVM_vcpu_arch_regs + __VCPU_REGS_RDI * WORD_SIZE)
21 
22 #ifdef CONFIG_X86_64
23 #define VCPU_R8		(SVM_vcpu_arch_regs + __VCPU_REGS_R8  * WORD_SIZE)
24 #define VCPU_R9		(SVM_vcpu_arch_regs + __VCPU_REGS_R9  * WORD_SIZE)
25 #define VCPU_R10	(SVM_vcpu_arch_regs + __VCPU_REGS_R10 * WORD_SIZE)
26 #define VCPU_R11	(SVM_vcpu_arch_regs + __VCPU_REGS_R11 * WORD_SIZE)
27 #define VCPU_R12	(SVM_vcpu_arch_regs + __VCPU_REGS_R12 * WORD_SIZE)
28 #define VCPU_R13	(SVM_vcpu_arch_regs + __VCPU_REGS_R13 * WORD_SIZE)
29 #define VCPU_R14	(SVM_vcpu_arch_regs + __VCPU_REGS_R14 * WORD_SIZE)
30 #define VCPU_R15	(SVM_vcpu_arch_regs + __VCPU_REGS_R15 * WORD_SIZE)
31 #endif
32 
33 #define SVM_vmcb01_pa	(SVM_vmcb01 + KVM_VMCB_pa)
34 
35 .section .noinstr.text, "ax"
36 
37 .macro RESTORE_GUEST_SPEC_CTRL
38 	/* No need to do anything if SPEC_CTRL is unset or V_SPEC_CTRL is set */
39 	ALTERNATIVE_2 "", \
40 		"jmp 800f", X86_FEATURE_MSR_SPEC_CTRL, \
41 		"", X86_FEATURE_V_SPEC_CTRL
42 801:
43 .endm
44 .macro RESTORE_GUEST_SPEC_CTRL_BODY
45 800:
46 	/*
47 	 * SPEC_CTRL handling: if the guest's SPEC_CTRL value differs from the
48 	 * host's, write the MSR.  This is kept out-of-line so that the common
49 	 * case does not have to jump.
50 	 *
51 	 * IMPORTANT: To avoid RSB underflow attacks and any other nastiness,
52 	 * there must not be any returns or indirect branches between this code
53 	 * and vmentry.
54 	 */
55 	movl SVM_spec_ctrl(%_ASM_DI), %eax
56 	cmp PER_CPU_VAR(x86_spec_ctrl_current), %eax
57 	je 801b
58 	mov $MSR_IA32_SPEC_CTRL, %ecx
59 	xor %edx, %edx
60 	wrmsr
61 	jmp 801b
62 .endm
63 
64 .macro RESTORE_HOST_SPEC_CTRL
65 	/* No need to do anything if SPEC_CTRL is unset or V_SPEC_CTRL is set */
66 	ALTERNATIVE_2 "", \
67 		"jmp 900f", X86_FEATURE_MSR_SPEC_CTRL, \
68 		"", X86_FEATURE_V_SPEC_CTRL
69 901:
70 .endm
71 .macro RESTORE_HOST_SPEC_CTRL_BODY spec_ctrl_intercepted:req
72 900:
73 	/* Same for after vmexit.  */
74 	mov $MSR_IA32_SPEC_CTRL, %ecx
75 
76 	/*
77 	 * Load the value that the guest had written into MSR_IA32_SPEC_CTRL,
78 	 * if it was not intercepted during guest execution.
79 	 */
80 	cmpb $0, \spec_ctrl_intercepted
81 	jnz 998f
82 	rdmsr
83 	movl %eax, SVM_spec_ctrl(%_ASM_DI)
84 998:
85 
86 	/* Now restore the host value of the MSR if different from the guest's.  */
87 	movl PER_CPU_VAR(x86_spec_ctrl_current), %eax
88 	cmp SVM_spec_ctrl(%_ASM_DI), %eax
89 	je 901b
90 	xor %edx, %edx
91 	wrmsr
92 	jmp 901b
93 .endm
94 
95 
96 /**
97  * __svm_vcpu_run - Run a vCPU via a transition to SVM guest mode
98  * @svm:	struct vcpu_svm *
99  * @spec_ctrl_intercepted: bool
100  */
101 SYM_FUNC_START(__svm_vcpu_run)
102 	push %_ASM_BP
103 	mov  %_ASM_SP, %_ASM_BP
104 #ifdef CONFIG_X86_64
105 	push %r15
106 	push %r14
107 	push %r13
108 	push %r12
109 #else
110 	push %edi
111 	push %esi
112 #endif
113 	push %_ASM_BX
114 
115 	/*
116 	 * Save variables needed after vmexit on the stack, in inverse
117 	 * order compared to when they are needed.
118 	 */
119 
120 	/* Accessed directly from the stack in RESTORE_HOST_SPEC_CTRL.  */
121 	push %_ASM_ARG2
122 
123 	/* Needed to restore access to percpu variables.  */
124 	__ASM_SIZE(push) PER_CPU_VAR(svm_data + SD_save_area_pa)
125 
126 	/* Finally save @svm. */
127 	push %_ASM_ARG1
128 
129 .ifnc _ASM_ARG1, _ASM_DI
130 	/*
131 	 * Stash @svm in RDI early. On 32-bit, arguments are in RAX, RCX
132 	 * and RDX which are clobbered by RESTORE_GUEST_SPEC_CTRL.
133 	 */
134 	mov %_ASM_ARG1, %_ASM_DI
135 .endif
136 
137 	/* Clobbers RAX, RCX, RDX.  */
138 	RESTORE_GUEST_SPEC_CTRL
139 
140 	/*
141 	 * Use a single vmcb (vmcb01 because it's always valid) for
142 	 * context switching guest state via VMLOAD/VMSAVE, that way
143 	 * the state doesn't need to be copied between vmcb01 and
144 	 * vmcb02 when switching vmcbs for nested virtualization.
145 	 */
146 	mov SVM_vmcb01_pa(%_ASM_DI), %_ASM_AX
147 1:	vmload %_ASM_AX
148 2:
149 
150 	/* Get svm->current_vmcb->pa into RAX. */
151 	mov SVM_current_vmcb(%_ASM_DI), %_ASM_AX
152 	mov KVM_VMCB_pa(%_ASM_AX), %_ASM_AX
153 
154 	/* Load guest registers. */
155 	mov VCPU_RCX(%_ASM_DI), %_ASM_CX
156 	mov VCPU_RDX(%_ASM_DI), %_ASM_DX
157 	mov VCPU_RBX(%_ASM_DI), %_ASM_BX
158 	mov VCPU_RBP(%_ASM_DI), %_ASM_BP
159 	mov VCPU_RSI(%_ASM_DI), %_ASM_SI
160 #ifdef CONFIG_X86_64
161 	mov VCPU_R8 (%_ASM_DI),  %r8
162 	mov VCPU_R9 (%_ASM_DI),  %r9
163 	mov VCPU_R10(%_ASM_DI), %r10
164 	mov VCPU_R11(%_ASM_DI), %r11
165 	mov VCPU_R12(%_ASM_DI), %r12
166 	mov VCPU_R13(%_ASM_DI), %r13
167 	mov VCPU_R14(%_ASM_DI), %r14
168 	mov VCPU_R15(%_ASM_DI), %r15
169 #endif
170 	mov VCPU_RDI(%_ASM_DI), %_ASM_DI
171 
172 	/* Enter guest mode */
173 	sti
174 
175 3:	vmrun %_ASM_AX
176 4:
177 	cli
178 
179 	/* Pop @svm to RAX while it's the only available register. */
180 	pop %_ASM_AX
181 
182 	/* Save all guest registers.  */
183 	mov %_ASM_CX,   VCPU_RCX(%_ASM_AX)
184 	mov %_ASM_DX,   VCPU_RDX(%_ASM_AX)
185 	mov %_ASM_BX,   VCPU_RBX(%_ASM_AX)
186 	mov %_ASM_BP,   VCPU_RBP(%_ASM_AX)
187 	mov %_ASM_SI,   VCPU_RSI(%_ASM_AX)
188 	mov %_ASM_DI,   VCPU_RDI(%_ASM_AX)
189 #ifdef CONFIG_X86_64
190 	mov %r8,  VCPU_R8 (%_ASM_AX)
191 	mov %r9,  VCPU_R9 (%_ASM_AX)
192 	mov %r10, VCPU_R10(%_ASM_AX)
193 	mov %r11, VCPU_R11(%_ASM_AX)
194 	mov %r12, VCPU_R12(%_ASM_AX)
195 	mov %r13, VCPU_R13(%_ASM_AX)
196 	mov %r14, VCPU_R14(%_ASM_AX)
197 	mov %r15, VCPU_R15(%_ASM_AX)
198 #endif
199 
200 	/* @svm can stay in RDI from now on.  */
201 	mov %_ASM_AX, %_ASM_DI
202 
203 	mov SVM_vmcb01_pa(%_ASM_DI), %_ASM_AX
204 5:	vmsave %_ASM_AX
205 6:
206 
207 	/* Restores GSBASE among other things, allowing access to percpu data.  */
208 	pop %_ASM_AX
209 7:	vmload %_ASM_AX
210 8:
211 
212 	/* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
213 	FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_VMEXIT
214 
215 	/* Clobbers RAX, RCX, RDX.  */
216 	RESTORE_HOST_SPEC_CTRL
217 
218 	/*
219 	 * Mitigate RETBleed for AMD/Hygon Zen uarch. RET should be
220 	 * untrained as soon as we exit the VM and are back to the
221 	 * kernel. This should be done before re-enabling interrupts
222 	 * because interrupt handlers won't sanitize 'ret' if the return is
223 	 * from the kernel.
224 	 */
225 	UNTRAIN_RET_VM
226 
227 	/*
228 	 * Clear all general purpose registers except RSP and RAX to prevent
229 	 * speculative use of the guest's values, even those that are reloaded
230 	 * via the stack.  In theory, an L1 cache miss when restoring registers
231 	 * could lead to speculative execution with the guest's values.
232 	 * Zeroing XORs are dirt cheap, i.e. the extra paranoia is essentially
233 	 * free.  RSP and RAX are exempt as they are restored by hardware
234 	 * during VM-Exit.
235 	 */
236 	xor %ecx, %ecx
237 	xor %edx, %edx
238 	xor %ebx, %ebx
239 	xor %ebp, %ebp
240 	xor %esi, %esi
241 	xor %edi, %edi
242 #ifdef CONFIG_X86_64
243 	xor %r8d,  %r8d
244 	xor %r9d,  %r9d
245 	xor %r10d, %r10d
246 	xor %r11d, %r11d
247 	xor %r12d, %r12d
248 	xor %r13d, %r13d
249 	xor %r14d, %r14d
250 	xor %r15d, %r15d
251 #endif
252 
253 	/* "Pop" @spec_ctrl_intercepted.  */
254 	pop %_ASM_BX
255 
256 	pop %_ASM_BX
257 
258 #ifdef CONFIG_X86_64
259 	pop %r12
260 	pop %r13
261 	pop %r14
262 	pop %r15
263 #else
264 	pop %esi
265 	pop %edi
266 #endif
267 	pop %_ASM_BP
268 	RET
269 
270 	RESTORE_GUEST_SPEC_CTRL_BODY
271 	RESTORE_HOST_SPEC_CTRL_BODY (%_ASM_SP)
272 
273 10:	cmpb $0, _ASM_RIP(kvm_rebooting)
274 	jne 2b
275 	ud2
276 30:	cmpb $0, _ASM_RIP(kvm_rebooting)
277 	jne 4b
278 	ud2
279 50:	cmpb $0, _ASM_RIP(kvm_rebooting)
280 	jne 6b
281 	ud2
282 70:	cmpb $0, _ASM_RIP(kvm_rebooting)
283 	jne 8b
284 	ud2
285 
286 	_ASM_EXTABLE(1b, 10b)
287 	_ASM_EXTABLE(3b, 30b)
288 	_ASM_EXTABLE(5b, 50b)
289 	_ASM_EXTABLE(7b, 70b)
290 
291 SYM_FUNC_END(__svm_vcpu_run)
292 
293 #ifdef CONFIG_KVM_AMD_SEV
294 
295 
296 #ifdef CONFIG_X86_64
297 #define SEV_ES_GPRS_BASE 0x300
298 #define SEV_ES_RBX	(SEV_ES_GPRS_BASE + __VCPU_REGS_RBX * WORD_SIZE)
299 #define SEV_ES_RBP	(SEV_ES_GPRS_BASE + __VCPU_REGS_RBP * WORD_SIZE)
300 #define SEV_ES_RSI	(SEV_ES_GPRS_BASE + __VCPU_REGS_RSI * WORD_SIZE)
301 #define SEV_ES_RDI	(SEV_ES_GPRS_BASE + __VCPU_REGS_RDI * WORD_SIZE)
302 #define SEV_ES_R12	(SEV_ES_GPRS_BASE + __VCPU_REGS_R12 * WORD_SIZE)
303 #define SEV_ES_R13	(SEV_ES_GPRS_BASE + __VCPU_REGS_R13 * WORD_SIZE)
304 #define SEV_ES_R14	(SEV_ES_GPRS_BASE + __VCPU_REGS_R14 * WORD_SIZE)
305 #define SEV_ES_R15	(SEV_ES_GPRS_BASE + __VCPU_REGS_R15 * WORD_SIZE)
306 #endif
307 
308 /**
309  * __svm_sev_es_vcpu_run - Run a SEV-ES vCPU via a transition to SVM guest mode
310  * @svm:	struct vcpu_svm *
311  * @spec_ctrl_intercepted: bool
312  */
313 SYM_FUNC_START(__svm_sev_es_vcpu_run)
314 	FRAME_BEGIN
315 
316 	/*
317 	 * Save non-volatile (callee-saved) registers to the host save area.
318 	 * Except for RAX and RSP, all GPRs are restored on #VMEXIT, but not
319 	 * saved on VMRUN.
320 	 */
321 	mov %rbp, SEV_ES_RBP (%rdx)
322 	mov %r15, SEV_ES_R15 (%rdx)
323 	mov %r14, SEV_ES_R14 (%rdx)
324 	mov %r13, SEV_ES_R13 (%rdx)
325 	mov %r12, SEV_ES_R12 (%rdx)
326 	mov %rbx, SEV_ES_RBX (%rdx)
327 
328 	/*
329 	 * Save volatile registers that hold arguments that are needed after
330 	 * #VMEXIT (RDI=@svm and RSI=@spec_ctrl_intercepted).
331 	 */
332 	mov %rdi, SEV_ES_RDI (%rdx)
333 	mov %rsi, SEV_ES_RSI (%rdx)
334 
335 	/* Clobbers RAX, RCX, RDX (@hostsa). */
336 	RESTORE_GUEST_SPEC_CTRL
337 
338 	/* Get svm->current_vmcb->pa into RAX. */
339 	mov SVM_current_vmcb(%rdi), %rax
340 	mov KVM_VMCB_pa(%rax), %rax
341 
342 	/* Enter guest mode */
343 	sti
344 
345 1:	vmrun %rax
346 
347 2:	cli
348 
349 	/* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
350 	FILL_RETURN_BUFFER %rax, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_VMEXIT
351 
352 	/* Clobbers RAX, RCX, RDX, consumes RDI (@svm) and RSI (@spec_ctrl_intercepted). */
353 	RESTORE_HOST_SPEC_CTRL
354 
355 	/*
356 	 * Mitigate RETBleed for AMD/Hygon Zen uarch. RET should be
357 	 * untrained as soon as we exit the VM and are back to the
358 	 * kernel. This should be done before re-enabling interrupts
359 	 * because interrupt handlers won't sanitize RET if the return is
360 	 * from the kernel.
361 	 */
362 	UNTRAIN_RET_VM
363 
364 	FRAME_END
365 	RET
366 
367 	RESTORE_GUEST_SPEC_CTRL_BODY
368 	RESTORE_HOST_SPEC_CTRL_BODY %sil
369 
370 3:	cmpb $0, kvm_rebooting(%rip)
371 	jne 2b
372 	ud2
373 
374 	_ASM_EXTABLE(1b, 3b)
375 
376 SYM_FUNC_END(__svm_sev_es_vcpu_run)
377 #endif /* CONFIG_KVM_AMD_SEV */
378