1 | ; $Id: HMR0A.asm 87491 2021-01-30 01:15:50Z vboxsync $
|
---|
2 | ;; @file
|
---|
3 | ; HM - Ring-0 VMX, SVM world-switch and helper routines.
|
---|
4 | ;
|
---|
5 |
|
---|
6 | ;
|
---|
7 | ; Copyright (C) 2006-2020 Oracle Corporation
|
---|
8 | ;
|
---|
9 | ; This file is part of VirtualBox Open Source Edition (OSE), as
|
---|
10 | ; available from http://www.virtualbox.org. This file is free software;
|
---|
11 | ; you can redistribute it and/or modify it under the terms of the GNU
|
---|
12 | ; General Public License (GPL) as published by the Free Software
|
---|
13 | ; Foundation, in version 2 as it comes in the "COPYING" file of the
|
---|
14 | ; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
|
---|
15 | ; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
|
---|
16 | ;
|
---|
17 |
|
---|
18 | ;*********************************************************************************************************************************
|
---|
19 | ;* Header Files *
|
---|
20 | ;*********************************************************************************************************************************
|
---|
21 | ;%define RT_ASM_WITH_SEH64 - trouble with SEH, alignment and (probably) 2nd pass optimizations.
|
---|
22 | %define RT_ASM_WITH_SEH64_ALT ; Use asmdefs.mac hackery for manually emitting unwind info.
|
---|
23 | %include "VBox/asmdefs.mac"
|
---|
24 | %include "VBox/err.mac"
|
---|
25 | %include "VBox/vmm/hm_vmx.mac"
|
---|
26 | %include "VBox/vmm/cpum.mac"
|
---|
27 | %include "VBox/vmm/gvm.mac"
|
---|
28 | %include "iprt/x86.mac"
|
---|
29 | %include "HMInternal.mac"
|
---|
30 |
|
---|
31 | %ifndef RT_ARCH_AMD64
|
---|
32 | %error AMD64 only.
|
---|
33 | %endif
|
---|
34 |
|
---|
35 |
|
---|
36 | ;*********************************************************************************************************************************
|
---|
37 | ;* Defined Constants And Macros *
|
---|
38 | ;*********************************************************************************************************************************
|
---|
39 | ;; The offset of the XMM registers in X86FXSTATE.
|
---|
40 | ; Use define because I'm too lazy to convert the struct.
|
---|
41 | %define XMM_OFF_IN_X86FXSTATE 160
|
---|
42 |
|
---|
43 | ;; Spectre filler for 64-bit mode.
|
---|
44 | ; Choosen to be an invalid address (also with 5 level paging).
|
---|
45 | %define SPECTRE_FILLER 0x02204204207fffff
|
---|
46 |
|
---|
47 | ;;
|
---|
48 | ; Determine skipping restoring of GDTR, IDTR, TR across VMX non-root operation.
|
---|
49 | ;
|
---|
50 | ; @note This is normally done by hmR0VmxExportHostSegmentRegs and VMXRestoreHostState,
|
---|
51 | ; so much of this is untested code.
|
---|
52 | ; @{
|
---|
53 | %define VMX_SKIP_GDTR
|
---|
54 | %define VMX_SKIP_TR
|
---|
55 | %define VBOX_SKIP_RESTORE_SEG
|
---|
56 | %ifdef RT_OS_DARWIN
|
---|
57 | ; Load the NULL selector into DS, ES, FS and GS on 64-bit darwin so we don't
|
---|
58 | ; risk loading a stale LDT value or something invalid.
|
---|
59 | %define HM_64_BIT_USE_NULL_SEL
|
---|
60 | ; Darwin (Mavericks) uses IDTR limit to store the CPU number so we need to always restore it.
|
---|
61 | ; See @bugref{6875}.
|
---|
62 | %undef VMX_SKIP_IDTR
|
---|
63 | %else
|
---|
64 | %define VMX_SKIP_IDTR
|
---|
65 | %endif
|
---|
66 | ;; @}
|
---|
67 |
|
---|
68 | ;; @def CALLEE_PRESERVED_REGISTER_COUNT
|
---|
69 | ; Number of registers pushed by PUSH_CALLEE_PRESERVED_REGISTERS
|
---|
70 | %ifdef ASM_CALL64_GCC
|
---|
71 | %define CALLEE_PRESERVED_REGISTER_COUNT 5
|
---|
72 | %else
|
---|
73 | %define CALLEE_PRESERVED_REGISTER_COUNT 7
|
---|
74 | %endif
|
---|
75 |
|
---|
76 | ;; @def PUSH_CALLEE_PRESERVED_REGISTERS
|
---|
77 | ; Macro for pushing all GPRs we must preserve for the caller.
|
---|
78 | %macro PUSH_CALLEE_PRESERVED_REGISTERS 0
|
---|
79 | push r15
|
---|
80 | SEH64_PUSH_GREG r15
|
---|
81 | %assign cbFrame cbFrame + 8
|
---|
82 | %assign frm_saved_r15 -cbFrame
|
---|
83 |
|
---|
84 | push r14
|
---|
85 | SEH64_PUSH_GREG r14
|
---|
86 | %assign cbFrame cbFrame + 8
|
---|
87 | %assign frm_saved_r14 -cbFrame
|
---|
88 |
|
---|
89 | push r13
|
---|
90 | SEH64_PUSH_GREG r13
|
---|
91 | %assign cbFrame cbFrame + 8
|
---|
92 | %assign frm_saved_r13 -cbFrame
|
---|
93 |
|
---|
94 | push r12
|
---|
95 | SEH64_PUSH_GREG r12
|
---|
96 | %assign cbFrame cbFrame + 8
|
---|
97 | %assign frm_saved_r12 -cbFrame
|
---|
98 |
|
---|
99 | push rbx
|
---|
100 | SEH64_PUSH_GREG rbx
|
---|
101 | %assign cbFrame cbFrame + 8
|
---|
102 | %assign frm_saved_rbx -cbFrame
|
---|
103 |
|
---|
104 | %ifdef ASM_CALL64_MSC
|
---|
105 | push rsi
|
---|
106 | SEH64_PUSH_GREG rsi
|
---|
107 | %assign cbFrame cbFrame + 8
|
---|
108 | %assign frm_saved_rsi -cbFrame
|
---|
109 |
|
---|
110 | push rdi
|
---|
111 | SEH64_PUSH_GREG rdi
|
---|
112 | %assign cbFrame cbFrame + 8
|
---|
113 | %assign frm_saved_rdi -cbFrame
|
---|
114 | %endif
|
---|
115 | %endmacro
|
---|
116 |
|
---|
117 | ;; @def POP_CALLEE_PRESERVED_REGISTERS
|
---|
118 | ; Counterpart to PUSH_CALLEE_PRESERVED_REGISTERS for use in the epilogue.
|
---|
119 | %macro POP_CALLEE_PRESERVED_REGISTERS 0
|
---|
120 | %ifdef ASM_CALL64_MSC
|
---|
121 | pop rdi
|
---|
122 | %assign cbFrame cbFrame - 8
|
---|
123 | %undef frm_saved_rdi
|
---|
124 |
|
---|
125 | pop rsi
|
---|
126 | %assign cbFrame cbFrame - 8
|
---|
127 | %undef frm_saved_rsi
|
---|
128 | %endif
|
---|
129 | pop rbx
|
---|
130 | %assign cbFrame cbFrame - 8
|
---|
131 | %undef frm_saved_rbx
|
---|
132 |
|
---|
133 | pop r12
|
---|
134 | %assign cbFrame cbFrame - 8
|
---|
135 | %undef frm_saved_r12
|
---|
136 |
|
---|
137 | pop r13
|
---|
138 | %assign cbFrame cbFrame - 8
|
---|
139 | %undef frm_saved_r13
|
---|
140 |
|
---|
141 | pop r14
|
---|
142 | %assign cbFrame cbFrame - 8
|
---|
143 | %undef frm_saved_r14
|
---|
144 |
|
---|
145 | pop r15
|
---|
146 | %assign cbFrame cbFrame - 8
|
---|
147 | %undef frm_saved_r15
|
---|
148 | %endmacro
|
---|
149 |
|
---|
150 |
|
---|
151 | ;; @def PUSH_RELEVANT_SEGMENT_REGISTERS
|
---|
152 | ; Macro saving all segment registers on the stack.
|
---|
153 | ; @param 1 Full width register name.
|
---|
154 | ; @param 2 16-bit register name for \a 1.
|
---|
155 | ; @cobbers rax, rdx, rcx
|
---|
156 | %macro PUSH_RELEVANT_SEGMENT_REGISTERS 2
|
---|
157 | %ifndef VBOX_SKIP_RESTORE_SEG
|
---|
158 | %error untested code. probably does not work any more!
|
---|
159 | %ifndef HM_64_BIT_USE_NULL_SEL
|
---|
160 | mov %2, es
|
---|
161 | push %1
|
---|
162 | mov %2, ds
|
---|
163 | push %1
|
---|
164 | %endif
|
---|
165 |
|
---|
166 | ; Special case for FS; Windows and Linux either don't use it or restore it when leaving kernel mode,
|
---|
167 | ; Solaris OTOH doesn't and we must save it.
|
---|
168 | mov ecx, MSR_K8_FS_BASE
|
---|
169 | rdmsr
|
---|
170 | push rdx
|
---|
171 | push rax
|
---|
172 | %ifndef HM_64_BIT_USE_NULL_SEL
|
---|
173 | push fs
|
---|
174 | %endif
|
---|
175 |
|
---|
176 | ; Special case for GS; OSes typically use swapgs to reset the hidden base register for GS on entry into the kernel.
|
---|
177 | ; The same happens on exit.
|
---|
178 | mov ecx, MSR_K8_GS_BASE
|
---|
179 | rdmsr
|
---|
180 | push rdx
|
---|
181 | push rax
|
---|
182 | %ifndef HM_64_BIT_USE_NULL_SEL
|
---|
183 | push gs
|
---|
184 | %endif
|
---|
185 | %endif ; !VBOX_SKIP_RESTORE_SEG
|
---|
186 | %endmacro ; PUSH_RELEVANT_SEGMENT_REGISTERS
|
---|
187 |
|
---|
188 | ;; @def POP_RELEVANT_SEGMENT_REGISTERS
|
---|
189 | ; Macro restoring all segment registers on the stack.
|
---|
190 | ; @param 1 Full width register name.
|
---|
191 | ; @param 2 16-bit register name for \a 1.
|
---|
192 | ; @cobbers rax, rdx, rcx
|
---|
193 | %macro POP_RELEVANT_SEGMENT_REGISTERS 2
|
---|
194 | %ifndef VBOX_SKIP_RESTORE_SEG
|
---|
195 | %error untested code. probably does not work any more!
|
---|
196 | ; Note: do not step through this code with a debugger!
|
---|
197 | %ifndef HM_64_BIT_USE_NULL_SEL
|
---|
198 | xor eax, eax
|
---|
199 | mov ds, ax
|
---|
200 | mov es, ax
|
---|
201 | mov fs, ax
|
---|
202 | mov gs, ax
|
---|
203 | %endif
|
---|
204 |
|
---|
205 | %ifndef HM_64_BIT_USE_NULL_SEL
|
---|
206 | pop gs
|
---|
207 | %endif
|
---|
208 | pop rax
|
---|
209 | pop rdx
|
---|
210 | mov ecx, MSR_K8_GS_BASE
|
---|
211 | wrmsr
|
---|
212 |
|
---|
213 | %ifndef HM_64_BIT_USE_NULL_SEL
|
---|
214 | pop fs
|
---|
215 | %endif
|
---|
216 | pop rax
|
---|
217 | pop rdx
|
---|
218 | mov ecx, MSR_K8_FS_BASE
|
---|
219 | wrmsr
|
---|
220 | ; Now it's safe to step again
|
---|
221 |
|
---|
222 | %ifndef HM_64_BIT_USE_NULL_SEL
|
---|
223 | pop %1
|
---|
224 | mov ds, %2
|
---|
225 | pop %1
|
---|
226 | mov es, %2
|
---|
227 | %endif
|
---|
228 | %endif ; !VBOX_SKIP_RESTORE_SEG
|
---|
229 | %endmacro ; POP_RELEVANT_SEGMENT_REGISTERS
|
---|
230 |
|
---|
231 |
|
---|
232 | ;;
|
---|
233 | ; Creates an indirect branch prediction barrier on CPUs that need and supports that.
|
---|
234 | ; @clobbers eax, edx, ecx
|
---|
235 | ; @param 1 How to address CPUMCTX.
|
---|
236 | ; @param 2 Which flag to test for (CPUMCTX_WSF_IBPB_ENTRY or CPUMCTX_WSF_IBPB_EXIT)
|
---|
237 | %macro INDIRECT_BRANCH_PREDICTION_BARRIER_CTX 2
|
---|
238 | test byte [%1 + CPUMCTX.fWorldSwitcher], %2
|
---|
239 | jz %%no_indirect_branch_barrier
|
---|
240 | mov ecx, MSR_IA32_PRED_CMD
|
---|
241 | mov eax, MSR_IA32_PRED_CMD_F_IBPB
|
---|
242 | xor edx, edx
|
---|
243 | wrmsr
|
---|
244 | %%no_indirect_branch_barrier:
|
---|
245 | %endmacro
|
---|
246 |
|
---|
247 | ;;
|
---|
248 | ; Creates an indirect branch prediction barrier on CPUs that need and supports that.
|
---|
249 | ; @clobbers eax, edx, ecx
|
---|
250 | ; @param 1 How to address VMCPU.
|
---|
251 | ; @param 2 Which flag to test for (CPUMCTX_WSF_IBPB_ENTRY or CPUMCTX_WSF_IBPB_EXIT)
|
---|
252 | %macro INDIRECT_BRANCH_PREDICTION_BARRIER 2
|
---|
253 | test byte [%1 + VMCPU.cpum.GstCtx + CPUMCTX.fWorldSwitcher], %2
|
---|
254 | jz %%no_indirect_branch_barrier
|
---|
255 | mov ecx, MSR_IA32_PRED_CMD
|
---|
256 | mov eax, MSR_IA32_PRED_CMD_F_IBPB
|
---|
257 | xor edx, edx
|
---|
258 | wrmsr
|
---|
259 | %%no_indirect_branch_barrier:
|
---|
260 | %endmacro
|
---|
261 |
|
---|
262 | ;;
|
---|
263 | ; Creates an indirect branch prediction and L1D barrier on CPUs that need and supports that.
|
---|
264 | ; @clobbers eax, edx, ecx
|
---|
265 | ; @param 1 How to address CPUMCTX.
|
---|
266 | ; @param 2 Which IBPB flag to test for (CPUMCTX_WSF_IBPB_ENTRY or CPUMCTX_WSF_IBPB_EXIT)
|
---|
267 | ; @param 3 Which FLUSH flag to test for (CPUMCTX_WSF_L1D_ENTRY)
|
---|
268 | ; @param 4 Which MDS flag to test for (CPUMCTX_WSF_MDS_ENTRY)
|
---|
269 | %macro INDIRECT_BRANCH_PREDICTION_AND_L1_CACHE_BARRIER 4
|
---|
270 | ; Only one test+jmp when disabled CPUs.
|
---|
271 | test byte [%1 + CPUMCTX.fWorldSwitcher], (%2 | %3 | %4)
|
---|
272 | jz %%no_barrier_needed
|
---|
273 |
|
---|
274 | ; The eax:edx value is the same for both.
|
---|
275 | AssertCompile(MSR_IA32_PRED_CMD_F_IBPB == MSR_IA32_FLUSH_CMD_F_L1D)
|
---|
276 | mov eax, MSR_IA32_PRED_CMD_F_IBPB
|
---|
277 | xor edx, edx
|
---|
278 |
|
---|
279 | ; Indirect branch barrier.
|
---|
280 | test byte [%1 + CPUMCTX.fWorldSwitcher], %2
|
---|
281 | jz %%no_indirect_branch_barrier
|
---|
282 | mov ecx, MSR_IA32_PRED_CMD
|
---|
283 | wrmsr
|
---|
284 | %%no_indirect_branch_barrier:
|
---|
285 |
|
---|
286 | ; Level 1 data cache flush.
|
---|
287 | test byte [%1 + CPUMCTX.fWorldSwitcher], %3
|
---|
288 | jz %%no_cache_flush_barrier
|
---|
289 | mov ecx, MSR_IA32_FLUSH_CMD
|
---|
290 | wrmsr
|
---|
291 | jmp %%no_mds_buffer_flushing ; MDS flushing is included in L1D_FLUSH
|
---|
292 | %%no_cache_flush_barrier:
|
---|
293 |
|
---|
294 | ; MDS buffer flushing.
|
---|
295 | test byte [%1 + CPUMCTX.fWorldSwitcher], %4
|
---|
296 | jz %%no_mds_buffer_flushing
|
---|
297 | sub xSP, xSP
|
---|
298 | mov [xSP], ds
|
---|
299 | verw [xSP]
|
---|
300 | add xSP, xSP
|
---|
301 | %%no_mds_buffer_flushing:
|
---|
302 |
|
---|
303 | %%no_barrier_needed:
|
---|
304 | %endmacro
|
---|
305 |
|
---|
306 |
|
---|
307 | ;*********************************************************************************************************************************
|
---|
308 | ;* External Symbols *
|
---|
309 | ;*********************************************************************************************************************************
|
---|
310 | %ifdef VBOX_WITH_KERNEL_USING_XMM
|
---|
311 | extern NAME(CPUMIsGuestFPUStateActive)
|
---|
312 | %endif
|
---|
313 |
|
---|
314 |
|
---|
315 | BEGINCODE
|
---|
316 |
|
---|
317 |
|
---|
318 | ;;
|
---|
319 | ; Used on platforms with poor inline assembly support to retrieve all the
|
---|
320 | ; info from the CPU and put it in the @a pRestoreHost structure.
|
---|
321 | ;
|
---|
322 | ; @returns VBox status code
|
---|
323 | ; @param pRestoreHost msc: rcx gcc: rdi Pointer to the RestoreHost struct.
|
---|
324 | ; @param fHaveFsGsBase msc: dl gcc: sil Whether we can use rdfsbase or not.
|
---|
325 | ;
|
---|
326 | ALIGNCODE(64)
|
---|
327 | BEGINPROC hmR0VmxExportHostSegmentRegsAsmHlp
|
---|
328 | %ifdef ASM_CALL64_MSC
|
---|
329 | %define pRestoreHost rcx
|
---|
330 | %elifdef ASM_CALL64_GCC
|
---|
331 | %define pRestoreHost rdi
|
---|
332 | %else
|
---|
333 | %error Unknown calling convension.
|
---|
334 | %endif
|
---|
335 | SEH64_END_PROLOGUE
|
---|
336 |
|
---|
337 | ; Start with the FS and GS base so we can trash DL/SIL.
|
---|
338 | %ifdef ASM_CALL64_MSC
|
---|
339 | or dl, dl
|
---|
340 | %else
|
---|
341 | or sil, sil
|
---|
342 | %endif
|
---|
343 | jz .use_rdmsr_for_fs_and_gs_base
|
---|
344 | rdfsbase rax
|
---|
345 | mov [pRestoreHost + VMXRESTOREHOST.uHostFSBase], rax
|
---|
346 | rdgsbase rax
|
---|
347 | mov [pRestoreHost + VMXRESTOREHOST.uHostGSBase], rax
|
---|
348 | .done_fs_and_gs_base:
|
---|
349 |
|
---|
350 | ; TR, GDTR and IDTR
|
---|
351 | str [pRestoreHost + VMXRESTOREHOST.uHostSelTR]
|
---|
352 | sgdt [pRestoreHost + VMXRESTOREHOST.HostGdtr]
|
---|
353 | sidt [pRestoreHost + VMXRESTOREHOST.HostIdtr]
|
---|
354 |
|
---|
355 | ; Segment registers.
|
---|
356 | xor eax, eax
|
---|
357 | mov eax, cs
|
---|
358 | mov [pRestoreHost + VMXRESTOREHOST.uHostSelCS], ax
|
---|
359 |
|
---|
360 | mov eax, ss
|
---|
361 | mov [pRestoreHost + VMXRESTOREHOST.uHostSelSS], ax
|
---|
362 |
|
---|
363 | mov eax, gs
|
---|
364 | mov [pRestoreHost + VMXRESTOREHOST.uHostSelGS], ax
|
---|
365 |
|
---|
366 | mov eax, fs
|
---|
367 | mov [pRestoreHost + VMXRESTOREHOST.uHostSelFS], ax
|
---|
368 |
|
---|
369 | mov eax, es
|
---|
370 | mov [pRestoreHost + VMXRESTOREHOST.uHostSelES], ax
|
---|
371 |
|
---|
372 | mov eax, ds
|
---|
373 | mov [pRestoreHost + VMXRESTOREHOST.uHostSelDS], ax
|
---|
374 |
|
---|
375 | ret
|
---|
376 |
|
---|
377 | ALIGNCODE(16)
|
---|
378 | .use_rdmsr_for_fs_and_gs_base:
|
---|
379 | %ifdef ASM_CALL64_MSC
|
---|
380 | mov r8, pRestoreHost
|
---|
381 | %endif
|
---|
382 |
|
---|
383 | mov ecx, MSR_K8_FS_BASE
|
---|
384 | rdmsr
|
---|
385 | shl rdx, 32
|
---|
386 | or rdx, rax
|
---|
387 | mov [r8 + VMXRESTOREHOST.uHostFSBase], rax
|
---|
388 |
|
---|
389 | mov ecx, MSR_K8_GS_BASE
|
---|
390 | rdmsr
|
---|
391 | shl rdx, 32
|
---|
392 | or rdx, rax
|
---|
393 | mov [r8 + VMXRESTOREHOST.uHostGSBase], rax
|
---|
394 |
|
---|
395 | %ifdef ASM_CALL64_MSC
|
---|
396 | mov pRestoreHost, r8
|
---|
397 | %endif
|
---|
398 | jmp .done_fs_and_gs_base
|
---|
399 | %undef pRestoreHost
|
---|
400 | ENDPROC hmR0VmxExportHostSegmentRegsAsmHlp
|
---|
401 |
|
---|
402 |
|
---|
403 | ;;
|
---|
404 | ; Restores host-state fields.
|
---|
405 | ;
|
---|
406 | ; @returns VBox status code
|
---|
407 | ; @param f32RestoreHost msc: ecx gcc: edi RestoreHost flags.
|
---|
408 | ; @param pRestoreHost msc: rdx gcc: rsi Pointer to the RestoreHost struct.
|
---|
409 | ;
|
---|
410 | ALIGNCODE(64)
|
---|
411 | BEGINPROC VMXRestoreHostState
|
---|
412 | %ifndef ASM_CALL64_GCC
|
---|
413 | ; Use GCC's input registers since we'll be needing both rcx and rdx further
|
---|
414 | ; down with the wrmsr instruction. Use the R10 and R11 register for saving
|
---|
415 | ; RDI and RSI since MSC preserve the two latter registers.
|
---|
416 | mov r10, rdi
|
---|
417 | mov r11, rsi
|
---|
418 | mov rdi, rcx
|
---|
419 | mov rsi, rdx
|
---|
420 | %endif
|
---|
421 | SEH64_END_PROLOGUE
|
---|
422 |
|
---|
423 | .restore_gdtr:
|
---|
424 | test edi, VMX_RESTORE_HOST_GDTR
|
---|
425 | jz .restore_idtr
|
---|
426 | lgdt [rsi + VMXRESTOREHOST.HostGdtr]
|
---|
427 |
|
---|
428 | .restore_idtr:
|
---|
429 | test edi, VMX_RESTORE_HOST_IDTR
|
---|
430 | jz .restore_ds
|
---|
431 | lidt [rsi + VMXRESTOREHOST.HostIdtr]
|
---|
432 |
|
---|
433 | .restore_ds:
|
---|
434 | test edi, VMX_RESTORE_HOST_SEL_DS
|
---|
435 | jz .restore_es
|
---|
436 | mov ax, [rsi + VMXRESTOREHOST.uHostSelDS]
|
---|
437 | mov ds, eax
|
---|
438 |
|
---|
439 | .restore_es:
|
---|
440 | test edi, VMX_RESTORE_HOST_SEL_ES
|
---|
441 | jz .restore_tr
|
---|
442 | mov ax, [rsi + VMXRESTOREHOST.uHostSelES]
|
---|
443 | mov es, eax
|
---|
444 |
|
---|
445 | .restore_tr:
|
---|
446 | test edi, VMX_RESTORE_HOST_SEL_TR
|
---|
447 | jz .restore_fs
|
---|
448 | ; When restoring the TR, we must first clear the busy flag or we'll end up faulting.
|
---|
449 | mov dx, [rsi + VMXRESTOREHOST.uHostSelTR]
|
---|
450 | mov ax, dx
|
---|
451 | and eax, X86_SEL_MASK_OFF_RPL ; mask away TI and RPL bits leaving only the descriptor offset
|
---|
452 | test edi, VMX_RESTORE_HOST_GDT_READ_ONLY | VMX_RESTORE_HOST_GDT_NEED_WRITABLE
|
---|
453 | jnz .gdt_readonly_or_need_writable
|
---|
454 | add rax, qword [rsi + VMXRESTOREHOST.HostGdtr + 2] ; xAX <- descriptor offset + GDTR.pGdt.
|
---|
455 | and dword [rax + 4], ~RT_BIT(9) ; clear the busy flag in TSS desc (bits 0-7=base, bit 9=busy bit)
|
---|
456 | ltr dx
|
---|
457 |
|
---|
458 | .restore_fs:
|
---|
459 | ;
|
---|
460 | ; When restoring the selector values for FS and GS, we'll temporarily trash
|
---|
461 | ; the base address (at least the high 32-bit bits, but quite possibly the
|
---|
462 | ; whole base address), the wrmsr will restore it correctly. (VT-x actually
|
---|
463 | ; restores the base correctly when leaving guest mode, but not the selector
|
---|
464 | ; value, so there is little problem with interrupts being enabled prior to
|
---|
465 | ; this restore job.)
|
---|
466 | ; We'll disable ints once for both FS and GS as that's probably faster.
|
---|
467 | ;
|
---|
468 | test edi, VMX_RESTORE_HOST_SEL_FS | VMX_RESTORE_HOST_SEL_GS
|
---|
469 | jz .restore_success
|
---|
470 | pushfq
|
---|
471 | cli ; (see above)
|
---|
472 |
|
---|
473 | test edi, VMX_RESTORE_HOST_CAN_USE_WRFSBASE_AND_WRGSBASE
|
---|
474 | jz .restore_fs_using_wrmsr
|
---|
475 |
|
---|
476 | .restore_fs_using_wrfsbase:
|
---|
477 | test edi, VMX_RESTORE_HOST_SEL_FS
|
---|
478 | jz .restore_gs_using_wrgsbase
|
---|
479 | mov rax, qword [rsi + VMXRESTOREHOST.uHostFSBase]
|
---|
480 | mov cx, word [rsi + VMXRESTOREHOST.uHostSelFS]
|
---|
481 | mov fs, ecx
|
---|
482 | wrfsbase rax
|
---|
483 |
|
---|
484 | .restore_gs_using_wrgsbase:
|
---|
485 | test edi, VMX_RESTORE_HOST_SEL_GS
|
---|
486 | jz .restore_flags
|
---|
487 | mov rax, qword [rsi + VMXRESTOREHOST.uHostGSBase]
|
---|
488 | mov cx, word [rsi + VMXRESTOREHOST.uHostSelGS]
|
---|
489 | mov gs, ecx
|
---|
490 | wrgsbase rax
|
---|
491 |
|
---|
492 | .restore_flags:
|
---|
493 | popfq
|
---|
494 |
|
---|
495 | .restore_success:
|
---|
496 | mov eax, VINF_SUCCESS
|
---|
497 | %ifndef ASM_CALL64_GCC
|
---|
498 | ; Restore RDI and RSI on MSC.
|
---|
499 | mov rdi, r10
|
---|
500 | mov rsi, r11
|
---|
501 | %endif
|
---|
502 | ret
|
---|
503 |
|
---|
504 | ALIGNCODE(8)
|
---|
505 | .gdt_readonly_or_need_writable:
|
---|
506 | test edi, VMX_RESTORE_HOST_GDT_NEED_WRITABLE
|
---|
507 | jnz .gdt_readonly_need_writable
|
---|
508 | .gdt_readonly:
|
---|
509 | mov rcx, cr0
|
---|
510 | mov r9, rcx
|
---|
511 | add rax, qword [rsi + VMXRESTOREHOST.HostGdtr + 2] ; xAX <- descriptor offset + GDTR.pGdt.
|
---|
512 | and rcx, ~X86_CR0_WP
|
---|
513 | mov cr0, rcx
|
---|
514 | and dword [rax + 4], ~RT_BIT(9) ; clear the busy flag in TSS desc (bits 0-7=base, bit 9=busy bit)
|
---|
515 | ltr dx
|
---|
516 | mov cr0, r9
|
---|
517 | jmp .restore_fs
|
---|
518 |
|
---|
519 | ALIGNCODE(8)
|
---|
520 | .gdt_readonly_need_writable:
|
---|
521 | add rax, qword [rsi + VMXRESTOREHOST.HostGdtrRw + 2] ; xAX <- descriptor offset + GDTR.pGdtRw
|
---|
522 | and dword [rax + 4], ~RT_BIT(9) ; clear the busy flag in TSS desc (bits 0-7=base, bit 9=busy bit)
|
---|
523 | lgdt [rsi + VMXRESTOREHOST.HostGdtrRw]
|
---|
524 | ltr dx
|
---|
525 | lgdt [rsi + VMXRESTOREHOST.HostGdtr] ; load the original GDT
|
---|
526 | jmp .restore_fs
|
---|
527 |
|
---|
528 | ALIGNCODE(8)
|
---|
529 | .restore_fs_using_wrmsr:
|
---|
530 | test edi, VMX_RESTORE_HOST_SEL_FS
|
---|
531 | jz .restore_gs_using_wrmsr
|
---|
532 | mov eax, dword [rsi + VMXRESTOREHOST.uHostFSBase] ; uHostFSBase - Lo
|
---|
533 | mov edx, dword [rsi + VMXRESTOREHOST.uHostFSBase + 4h] ; uHostFSBase - Hi
|
---|
534 | mov cx, word [rsi + VMXRESTOREHOST.uHostSelFS]
|
---|
535 | mov fs, ecx
|
---|
536 | mov ecx, MSR_K8_FS_BASE
|
---|
537 | wrmsr
|
---|
538 |
|
---|
539 | .restore_gs_using_wrmsr:
|
---|
540 | test edi, VMX_RESTORE_HOST_SEL_GS
|
---|
541 | jz .restore_flags
|
---|
542 | mov eax, dword [rsi + VMXRESTOREHOST.uHostGSBase] ; uHostGSBase - Lo
|
---|
543 | mov edx, dword [rsi + VMXRESTOREHOST.uHostGSBase + 4h] ; uHostGSBase - Hi
|
---|
544 | mov cx, word [rsi + VMXRESTOREHOST.uHostSelGS]
|
---|
545 | mov gs, ecx
|
---|
546 | mov ecx, MSR_K8_GS_BASE
|
---|
547 | wrmsr
|
---|
548 | jmp .restore_flags
|
---|
549 | ENDPROC VMXRestoreHostState
|
---|
550 |
|
---|
551 |
|
---|
552 | ;;
|
---|
553 | ; Dispatches an NMI to the host.
|
---|
554 | ;
|
---|
555 | ALIGNCODE(16)
|
---|
556 | BEGINPROC VMXDispatchHostNmi
|
---|
557 | ; NMI is always vector 2. The IDT[2] IRQ handler cannot be anything else. See Intel spec. 6.3.1 "External Interrupts".
|
---|
558 | SEH64_END_PROLOGUE
|
---|
559 | int 2
|
---|
560 | ret
|
---|
561 | ENDPROC VMXDispatchHostNmi
|
---|
562 |
|
---|
563 |
|
---|
564 | ;;
|
---|
565 | ; Common restore logic for success and error paths. We duplicate this because we
|
---|
566 | ; don't want to waste writing the VINF_SUCCESS return value to the stack in the
|
---|
567 | ; regular code path.
|
---|
568 | ;
|
---|
569 | ; @param 1 Zero if regular return, non-zero if error return. Controls label emission.
|
---|
570 | ; @param 2 fLoadSaveGuestXcr0 value
|
---|
571 | ; @param 3 The (CPUMCTX_WSF_IBPB_ENTRY | CPUMCTX_WSF_L1D_ENTRY | CPUMCTX_WSF_MDS_ENTRY) + CPUMCTX_WSF_IBPB_EXIT value.
|
---|
572 | ; The entry values are either all set or not at all, as we're too lazy to flesh out all the variants.
|
---|
573 | ; @param 4 The SSE saving/restoring: 0 to do nothing, 1 to do it manually, 2 to use xsave/xrstor.
|
---|
574 | ;
|
---|
575 | ; @note Important that this does not modify cbFrame or rsp.
|
---|
576 | %macro RESTORE_STATE_VMX 4
|
---|
577 | ; Restore base and limit of the IDTR & GDTR.
|
---|
578 | %ifndef VMX_SKIP_IDTR
|
---|
579 | lidt [rsp + cbFrame + frm_saved_idtr]
|
---|
580 | %endif
|
---|
581 | %ifndef VMX_SKIP_GDTR
|
---|
582 | lgdt [rsp + cbFrame + frm_saved_gdtr]
|
---|
583 | %endif
|
---|
584 |
|
---|
585 | ; Save the guest state and restore the non-volatile registers. We use rax=pGstCtx here.
|
---|
586 | mov [rsp + cbFrame + frm_guest_rax], rax
|
---|
587 | mov rax, [rsp + cbFrame + frm_pGstCtx]
|
---|
588 |
|
---|
589 | mov qword [rax + CPUMCTX.ebp], rbp
|
---|
590 | lea rbp, [rsp + cbFrame] ; re-establish the frame pointer as early as possible.
|
---|
591 | mov qword [rax + CPUMCTX.ecx], rcx
|
---|
592 | mov rcx, SPECTRE_FILLER
|
---|
593 | mov qword [rax + CPUMCTX.edx], rdx
|
---|
594 | mov rdx, [rbp + frm_guest_rax]
|
---|
595 | mov qword [rax + CPUMCTX.eax], rdx
|
---|
596 | mov rdx, rcx
|
---|
597 | mov qword [rax + CPUMCTX.r8], r8
|
---|
598 | mov r8, rcx
|
---|
599 | mov qword [rax + CPUMCTX.r9], r9
|
---|
600 | mov r9, rcx
|
---|
601 | mov qword [rax + CPUMCTX.r10], r10
|
---|
602 | mov r10, rcx
|
---|
603 | mov qword [rax + CPUMCTX.r11], r11
|
---|
604 | mov r11, rcx
|
---|
605 | mov qword [rax + CPUMCTX.esi], rsi
|
---|
606 | %ifdef ASM_CALL64_MSC
|
---|
607 | mov rsi, [rbp + frm_saved_rsi]
|
---|
608 | %else
|
---|
609 | mov rsi, rcx
|
---|
610 | %endif
|
---|
611 | mov qword [rax + CPUMCTX.edi], rdi
|
---|
612 | %ifdef ASM_CALL64_MSC
|
---|
613 | mov rdi, [rbp + frm_saved_rdi]
|
---|
614 | %else
|
---|
615 | mov rdi, rcx
|
---|
616 | %endif
|
---|
617 | mov qword [rax + CPUMCTX.ebx], rbx
|
---|
618 | mov rbx, [rbp + frm_saved_rbx]
|
---|
619 | mov qword [rax + CPUMCTX.r12], r12
|
---|
620 | mov r12, [rbp + frm_saved_r12]
|
---|
621 | mov qword [rax + CPUMCTX.r13], r13
|
---|
622 | mov r13, [rbp + frm_saved_r13]
|
---|
623 | mov qword [rax + CPUMCTX.r14], r14
|
---|
624 | mov r14, [rbp + frm_saved_r14]
|
---|
625 | mov qword [rax + CPUMCTX.r15], r15
|
---|
626 | mov r15, [rbp + frm_saved_r15]
|
---|
627 |
|
---|
628 | mov rdx, cr2
|
---|
629 | mov qword [rax + CPUMCTX.cr2], rdx
|
---|
630 | mov rdx, rcx
|
---|
631 |
|
---|
632 | %if %4 != 0
|
---|
633 | ; Save the context pointer in r8 for the SSE save/restore.
|
---|
634 | mov r8, rax
|
---|
635 | %endif
|
---|
636 |
|
---|
637 | %if %3 & CPUMCTX_WSF_IBPB_EXIT
|
---|
638 | ; Fight spectre (trashes rax, rdx and rcx).
|
---|
639 | %if %1 = 0 ; Skip this in failure branch (=> guru)
|
---|
640 | mov ecx, MSR_IA32_PRED_CMD
|
---|
641 | mov eax, MSR_IA32_PRED_CMD_F_IBPB
|
---|
642 | xor edx, edx
|
---|
643 | wrmsr
|
---|
644 | %endif
|
---|
645 | %endif
|
---|
646 |
|
---|
647 | %ifndef VMX_SKIP_TR
|
---|
648 | ; Restore TSS selector; must mark it as not busy before using ltr!
|
---|
649 | ; ASSUME that this is supposed to be 'BUSY' (saves 20-30 ticks on the T42p).
|
---|
650 | %ifndef VMX_SKIP_GDTR
|
---|
651 | lgdt [rbp + frm_saved_gdtr]
|
---|
652 | %endif
|
---|
653 | movzx eax, word [rbp + frm_saved_tr]
|
---|
654 | mov ecx, eax
|
---|
655 | and eax, X86_SEL_MASK_OFF_RPL ; mask away TI and RPL bits leaving only the descriptor offset
|
---|
656 | add rax, [rbp + frm_saved_gdtr + 2] ; eax <- GDTR.address + descriptor offset
|
---|
657 | and dword [rax + 4], ~RT_BIT(9) ; clear the busy flag in TSS desc (bits 0-7=base, bit 9=busy bit)
|
---|
658 | ltr cx
|
---|
659 | %endif
|
---|
660 | movzx edx, word [rbp + frm_saved_ldtr]
|
---|
661 | test edx, edx
|
---|
662 | jz %%skip_ldt_write
|
---|
663 | lldt dx
|
---|
664 | %%skip_ldt_write:
|
---|
665 |
|
---|
666 | %if %1 != 0
|
---|
667 | .return_after_vmwrite_error:
|
---|
668 | %endif
|
---|
669 | ; Restore segment registers.
|
---|
670 | ;POP_RELEVANT_SEGMENT_REGISTERS rax, ax - currently broken.
|
---|
671 |
|
---|
672 | %if %2 != 0
|
---|
673 | ; Restore the host XCR0.
|
---|
674 | xor ecx, ecx
|
---|
675 | mov eax, [rbp + frm_uHostXcr0]
|
---|
676 | mov edx, [rbp + frm_uHostXcr0 + 4]
|
---|
677 | xsetbv
|
---|
678 | %endif
|
---|
679 | %endmacro ; RESTORE_STATE_VMX
|
---|
680 |
|
---|
681 |
|
---|
682 | ;;
|
---|
683 | ; hmR0VmxStartVm template
|
---|
684 | ;
|
---|
685 | ; @param 1 The suffix of the variation.
|
---|
686 | ; @param 2 fLoadSaveGuestXcr0 value
|
---|
687 | ; @param 3 The CPUMCTX_WSF_IBPB_ENTRY + CPUMCTX_WSF_IBPB_EXIT value.
|
---|
688 | ; @param 4 The SSE saving/restoring: 0 to do nothing, 1 to do it manually, 2 to use xsave/xrstor.
|
---|
689 | ; Drivers shouldn't use AVX registers without saving+loading:
|
---|
690 | ; https://msdn.microsoft.com/en-us/library/windows/hardware/ff545910%28v=vs.85%29.aspx?f=255&MSPPError=-2147217396
|
---|
691 | ; However the compiler docs have different idea:
|
---|
692 | ; https://msdn.microsoft.com/en-us/library/9z1stfyw.aspx
|
---|
693 | ; We'll go with the former for now.
|
---|
694 | ;
|
---|
695 | %macro hmR0VmxStartVmTemplate 4
|
---|
696 |
|
---|
697 | ;;
|
---|
698 | ; Prepares for and executes VMLAUNCH/VMRESUME (64 bits guest mode)
|
---|
699 | ;
|
---|
700 | ; @returns VBox status code
|
---|
701 | ; @param pVmcsInfo msc:rcx, gcc:rdi Pointer to the VMCS info (for cached host RIP and RSP).
|
---|
702 | ; @param pVCpu msc:rdx, gcc:rsi The cross context virtual CPU structure of the calling EMT.
|
---|
703 | ; @param fResume msc:r8l, gcc:dl Whether to use vmlauch/vmresume.
|
---|
704 | ;
|
---|
705 | ALIGNCODE(64)
|
---|
706 | BEGINPROC RT_CONCAT(hmR0VmxStartVm,%1)
|
---|
707 | %ifdef VBOX_WITH_KERNEL_USING_XMM
|
---|
708 | %if %4 = 0
|
---|
709 | ;
|
---|
710 | ; The non-saving variant will currently check the two SSE preconditions and pick
|
---|
711 | ; the right variant to continue with. Later we can see if we can't manage to
|
---|
712 | ; move these decisions into hmR0VmxUpdateStartVmFunction().
|
---|
713 | ;
|
---|
714 | %ifdef ASM_CALL64_MSC
|
---|
715 | test byte [rdx + VMCPU.cpum.GstCtx + CPUMCTX.fUsedFpuGuest], 1
|
---|
716 | %else
|
---|
717 | test byte [rsi + VMCPU.cpum.GstCtx + CPUMCTX.fUsedFpuGuest], 1
|
---|
718 | %endif
|
---|
719 | jz .save_xmm_no_need
|
---|
720 | %ifdef ASM_CALL64_MSC
|
---|
721 | cmp dword [rdx + VMCPU.cpum.GstCtx + CPUMCTX.fXStateMask], 0
|
---|
722 | %else
|
---|
723 | cmp dword [rsi + VMCPU.cpum.GstCtx + CPUMCTX.fXStateMask], 0
|
---|
724 | %endif
|
---|
725 | je RT_CONCAT3(hmR0VmxStartVm,%1,_SseManual)
|
---|
726 | jmp RT_CONCAT3(hmR0VmxStartVm,%1,_SseXSave)
|
---|
727 | .save_xmm_no_need:
|
---|
728 | %endif
|
---|
729 | %endif
|
---|
730 | push xBP
|
---|
731 | SEH64_PUSH_xBP
|
---|
732 | mov xBP, xSP
|
---|
733 | SEH64_SET_FRAME_xBP 0
|
---|
734 | pushf
|
---|
735 | cli
|
---|
736 |
|
---|
737 | %define frm_fRFlags -008h
|
---|
738 | %define frm_pGstCtx -010h ; Where we stash guest CPU context for use after the vmrun.
|
---|
739 | %define frm_uHostXcr0 -020h ; 128-bit
|
---|
740 | %define frm_saved_gdtr -02ah ; 16+64: Only used when VMX_SKIP_GDTR isn't defined
|
---|
741 | %define frm_saved_tr -02ch ; 16-bit: Only used when VMX_SKIP_TR isn't defined
|
---|
742 | %define frm_MDS_seg -030h ; 16-bit: Temporary storage for the MDS flushing.
|
---|
743 | %define frm_saved_idtr -03ah ; 16+64: Only used when VMX_SKIP_IDTR isn't defined
|
---|
744 | %define frm_saved_ldtr -03ch ; 16-bit: always saved.
|
---|
745 | %define frm_rcError -040h ; 32-bit: Error status code (not used in the success path)
|
---|
746 | %define frm_guest_rax -048h ; Temporary storage slot for guest RAX.
|
---|
747 | %if %4 = 0
|
---|
748 | %assign cbFrame 048h
|
---|
749 | %else
|
---|
750 | %define frm_saved_xmm6 -050h
|
---|
751 | %define frm_saved_xmm7 -060h
|
---|
752 | %define frm_saved_xmm8 -070h
|
---|
753 | %define frm_saved_xmm9 -080h
|
---|
754 | %define frm_saved_xmm10 -090h
|
---|
755 | %define frm_saved_xmm11 -0a0h
|
---|
756 | %define frm_saved_xmm12 -0b0h
|
---|
757 | %define frm_saved_xmm13 -0c0h
|
---|
758 | %define frm_saved_xmm14 -0d0h
|
---|
759 | %define frm_saved_xmm15 -0e0h
|
---|
760 | %define frm_saved_mxcsr -0f0h
|
---|
761 | %assign cbFrame 0f0h
|
---|
762 | %endif
|
---|
763 | %assign cbBaseFrame cbFrame
|
---|
764 | sub rsp, cbFrame - 8h
|
---|
765 | SEH64_ALLOCATE_STACK cbFrame
|
---|
766 |
|
---|
767 | ; Save all general purpose host registers.
|
---|
768 | PUSH_CALLEE_PRESERVED_REGISTERS
|
---|
769 | ;PUSH_RELEVANT_SEGMENT_REGISTERS xAX, ax - currently broken
|
---|
770 | SEH64_END_PROLOGUE
|
---|
771 |
|
---|
772 | ;
|
---|
773 | ; Unify the input parameter registers: r9=pVmcsInfo, rsi=pVCpu, bl=fResume, rdi=&pVCpu->cpum.GstCtx;
|
---|
774 | ;
|
---|
775 | %ifdef ASM_CALL64_GCC
|
---|
776 | mov r9, rdi ; pVmcsInfo
|
---|
777 | mov ebx, edx ; fResume
|
---|
778 | %else
|
---|
779 | mov r9, rcx ; pVmcsInfo
|
---|
780 | mov rsi, rdx ; pVCpu
|
---|
781 | mov ebx, r8d ; fResume
|
---|
782 | %endif
|
---|
783 | lea rdi, [rsi + VMCPU.cpum.GstCtx]
|
---|
784 | mov [rbp + frm_pGstCtx], rdi
|
---|
785 |
|
---|
786 | %ifdef VBOX_STRICT
|
---|
787 | ;
|
---|
788 | ; Verify template preconditions / parameters to ensure HMSVM.cpp didn't miss some state change.
|
---|
789 | ;
|
---|
790 | cmp byte [rsi + GVMCPU.hmr0 + HMR0PERVCPU.fLoadSaveGuestXcr0], %2
|
---|
791 | mov eax, VERR_VMX_STARTVM_PRECOND_0
|
---|
792 | jne NAME(RT_CONCAT(hmR0VmxStartVmHostRIP,%1).precond_failure_return)
|
---|
793 |
|
---|
794 | mov eax, [rsi + VMCPU.cpum.GstCtx + CPUMCTX.fWorldSwitcher]
|
---|
795 | and eax, CPUMCTX_WSF_IBPB_ENTRY | CPUMCTX_WSF_L1D_ENTRY | CPUMCTX_WSF_MDS_ENTRY | CPUMCTX_WSF_IBPB_EXIT
|
---|
796 | cmp eax, %3
|
---|
797 | mov eax, VERR_VMX_STARTVM_PRECOND_1
|
---|
798 | jne NAME(RT_CONCAT(hmR0VmxStartVmHostRIP,%1).precond_failure_return)
|
---|
799 |
|
---|
800 | %ifdef VBOX_WITH_KERNEL_USING_XMM
|
---|
801 | mov eax, VERR_VMX_STARTVM_PRECOND_2
|
---|
802 | test byte [rsi + VMCPU.cpum.GstCtx + CPUMCTX.fUsedFpuGuest], 1
|
---|
803 | %if %4 = 0
|
---|
804 | jnz NAME(RT_CONCAT(hmR0VmxStartVmHostRIP,%1).precond_failure_return)
|
---|
805 | %else
|
---|
806 | jz NAME(RT_CONCAT(hmR0VmxStartVmHostRIP,%1).precond_failure_return)
|
---|
807 |
|
---|
808 | mov eax, VERR_VMX_STARTVM_PRECOND_3
|
---|
809 | cmp dword [rsi + VMCPU.cpum.GstCtx + CPUMCTX.fXStateMask], 0
|
---|
810 | %if %4 = 1
|
---|
811 | jne NAME(RT_CONCAT(hmR0VmxStartVmHostRIP,%1).precond_failure_return)
|
---|
812 | %elif %4 = 2
|
---|
813 | je NAME(RT_CONCAT(hmR0VmxStartVmHostRIP,%1).precond_failure_return)
|
---|
814 | %else
|
---|
815 | %error Invalid template parameter 4.
|
---|
816 | %endif
|
---|
817 | %endif
|
---|
818 | %endif
|
---|
819 | %endif ; VBOX_STRICT
|
---|
820 |
|
---|
821 | %if %4 != 0
|
---|
822 | ; Save the non-volatile SSE host register state.
|
---|
823 | movdqa [rbp + frm_saved_xmm6 ], xmm6
|
---|
824 | movdqa [rbp + frm_saved_xmm7 ], xmm7
|
---|
825 | movdqa [rbp + frm_saved_xmm8 ], xmm8
|
---|
826 | movdqa [rbp + frm_saved_xmm9 ], xmm9
|
---|
827 | movdqa [rbp + frm_saved_xmm10], xmm10
|
---|
828 | movdqa [rbp + frm_saved_xmm11], xmm11
|
---|
829 | movdqa [rbp + frm_saved_xmm12], xmm12
|
---|
830 | movdqa [rbp + frm_saved_xmm13], xmm13
|
---|
831 | movdqa [rbp + frm_saved_xmm14], xmm14
|
---|
832 | movdqa [rbp + frm_saved_xmm15], xmm15
|
---|
833 | stmxcsr [rbp + frm_saved_mxcsr]
|
---|
834 |
|
---|
835 | ; Load the guest state related to the above non-volatile and volatile SSE registers. Trashes rcx, eax and edx.
|
---|
836 | mov rcx, [rdi + CPUMCTX.pXStateR0]
|
---|
837 | %if %4 = 1 ; manual
|
---|
838 | movdqa xmm0, [rcx + XMM_OFF_IN_X86FXSTATE + 000h]
|
---|
839 | movdqa xmm1, [rcx + XMM_OFF_IN_X86FXSTATE + 010h]
|
---|
840 | movdqa xmm2, [rcx + XMM_OFF_IN_X86FXSTATE + 020h]
|
---|
841 | movdqa xmm3, [rcx + XMM_OFF_IN_X86FXSTATE + 030h]
|
---|
842 | movdqa xmm4, [rcx + XMM_OFF_IN_X86FXSTATE + 040h]
|
---|
843 | movdqa xmm5, [rcx + XMM_OFF_IN_X86FXSTATE + 050h]
|
---|
844 | movdqa xmm6, [rcx + XMM_OFF_IN_X86FXSTATE + 060h]
|
---|
845 | movdqa xmm7, [rcx + XMM_OFF_IN_X86FXSTATE + 070h]
|
---|
846 | movdqa xmm8, [rcx + XMM_OFF_IN_X86FXSTATE + 080h]
|
---|
847 | movdqa xmm9, [rcx + XMM_OFF_IN_X86FXSTATE + 090h]
|
---|
848 | movdqa xmm10, [rcx + XMM_OFF_IN_X86FXSTATE + 0a0h]
|
---|
849 | movdqa xmm11, [rcx + XMM_OFF_IN_X86FXSTATE + 0b0h]
|
---|
850 | movdqa xmm12, [rcx + XMM_OFF_IN_X86FXSTATE + 0c0h]
|
---|
851 | movdqa xmm13, [rcx + XMM_OFF_IN_X86FXSTATE + 0d0h]
|
---|
852 | movdqa xmm14, [rcx + XMM_OFF_IN_X86FXSTATE + 0e0h]
|
---|
853 | movdqa xmm15, [rcx + XMM_OFF_IN_X86FXSTATE + 0f0h]
|
---|
854 | ldmxcsr [rcx + X86FXSTATE.MXCSR]
|
---|
855 | %elif %4 = 2 ; use xrstor/xsave
|
---|
856 | mov eax, [rsi + VMCPU.cpum.GstCtx + CPUMCTX.fXStateMask]
|
---|
857 | and eax, CPUM_VOLATILE_XSAVE_GUEST_COMPONENTS
|
---|
858 | xor edx, edx
|
---|
859 | xrstor [rcx]
|
---|
860 | %else
|
---|
861 | %error invalid template parameter 4
|
---|
862 | %endif
|
---|
863 | %endif
|
---|
864 |
|
---|
865 | %if %2 != 0
|
---|
866 | ; Save the host XCR0 and load the guest one if necessary.
|
---|
867 | ; Note! Trashes rax, rdx and rcx.
|
---|
868 | xor ecx, ecx
|
---|
869 | xgetbv ; save the host one on the stack
|
---|
870 | mov [rbp + frm_uHostXcr0], eax
|
---|
871 | mov [rbp + frm_uHostXcr0 + 4], edx
|
---|
872 |
|
---|
873 | mov eax, [rdi + CPUMCTX.aXcr] ; load the guest one
|
---|
874 | mov edx, [rdi + CPUMCTX.aXcr + 4]
|
---|
875 | xor ecx, ecx ; paranoia; indicate that we must restore XCR0 (popped into ecx, thus 0)
|
---|
876 | xsetbv
|
---|
877 | %endif
|
---|
878 |
|
---|
879 | ; Save host LDTR.
|
---|
880 | sldt word [rbp + frm_saved_ldtr]
|
---|
881 |
|
---|
882 | %ifndef VMX_SKIP_TR
|
---|
883 | ; The host TR limit is reset to 0x67; save & restore it manually.
|
---|
884 | str word [rbp + frm_saved_tr]
|
---|
885 | %endif
|
---|
886 |
|
---|
887 | %ifndef VMX_SKIP_GDTR
|
---|
888 | ; VT-x only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!
|
---|
889 | sgdt [rbp + frm_saved_gdtr]
|
---|
890 | %endif
|
---|
891 | %ifndef VMX_SKIP_IDTR
|
---|
892 | sidt [rbp + frm_saved_idtr]
|
---|
893 | %endif
|
---|
894 |
|
---|
895 | ; Load CR2 if necessary (expensive as writing CR2 is a synchronizing instruction - (bird: still expensive on 10980xe)).
|
---|
896 | mov rcx, qword [rdi + CPUMCTX.cr2]
|
---|
897 | mov rdx, cr2
|
---|
898 | cmp rcx, rdx
|
---|
899 | je .skip_cr2_write
|
---|
900 | mov cr2, rcx
|
---|
901 | .skip_cr2_write:
|
---|
902 |
|
---|
903 | ; Set the vmlaunch/vmresume "return" host RIP and RSP values if they've changed (unlikly).
|
---|
904 | ; The vmwrite isn't quite for free (on an 10980xe at least), thus we check if anything changed
|
---|
905 | ; before writing here.
|
---|
906 | lea rcx, [NAME(RT_CONCAT(hmR0VmxStartVmHostRIP,%1)) wrt rip]
|
---|
907 | cmp rcx, [r9 + VMXVMCSINFO.uHostRip]
|
---|
908 | jne .write_host_rip
|
---|
909 | .wrote_host_rip:
|
---|
910 | cmp rsp, [r9 + VMXVMCSINFO.uHostRsp]
|
---|
911 | jne .write_host_rsp
|
---|
912 | .wrote_host_rsp:
|
---|
913 |
|
---|
914 | ;
|
---|
915 | ; Fight spectre and similar. Trashes rax, rcx, and rdx.
|
---|
916 | ;
|
---|
917 | %if %3 & (CPUMCTX_WSF_IBPB_ENTRY | CPUMCTX_WSF_L1D_ENTRY) ; The eax:edx value is the same for the first two.
|
---|
918 | AssertCompile(MSR_IA32_PRED_CMD_F_IBPB == MSR_IA32_FLUSH_CMD_F_L1D)
|
---|
919 | mov eax, MSR_IA32_PRED_CMD_F_IBPB
|
---|
920 | xor edx, edx
|
---|
921 | %endif
|
---|
922 | %if %3 & CPUMCTX_WSF_IBPB_ENTRY ; Indirect branch barrier.
|
---|
923 | mov ecx, MSR_IA32_PRED_CMD
|
---|
924 | wrmsr
|
---|
925 | %endif
|
---|
926 | %if %3 & CPUMCTX_WSF_L1D_ENTRY ; Level 1 data cache flush.
|
---|
927 | mov ecx, MSR_IA32_FLUSH_CMD
|
---|
928 | wrmsr
|
---|
929 | %elif %3 & CPUMCTX_WSF_MDS_ENTRY ; MDS flushing is included in L1D_FLUSH
|
---|
930 | mov word [rbp + frm_MDS_seg], ds
|
---|
931 | verw word [rbp + frm_MDS_seg]
|
---|
932 | %endif
|
---|
933 |
|
---|
934 | ; Resume or start VM?
|
---|
935 | cmp bl, 0 ; fResume
|
---|
936 |
|
---|
937 | ; Load guest general purpose registers.
|
---|
938 | mov rax, qword [rdi + CPUMCTX.eax]
|
---|
939 | mov rbx, qword [rdi + CPUMCTX.ebx]
|
---|
940 | mov rcx, qword [rdi + CPUMCTX.ecx]
|
---|
941 | mov rdx, qword [rdi + CPUMCTX.edx]
|
---|
942 | mov rbp, qword [rdi + CPUMCTX.ebp]
|
---|
943 | mov rsi, qword [rdi + CPUMCTX.esi]
|
---|
944 | mov r8, qword [rdi + CPUMCTX.r8]
|
---|
945 | mov r9, qword [rdi + CPUMCTX.r9]
|
---|
946 | mov r10, qword [rdi + CPUMCTX.r10]
|
---|
947 | mov r11, qword [rdi + CPUMCTX.r11]
|
---|
948 | mov r12, qword [rdi + CPUMCTX.r12]
|
---|
949 | mov r13, qword [rdi + CPUMCTX.r13]
|
---|
950 | mov r14, qword [rdi + CPUMCTX.r14]
|
---|
951 | mov r15, qword [rdi + CPUMCTX.r15]
|
---|
952 | mov rdi, qword [rdi + CPUMCTX.edi]
|
---|
953 |
|
---|
954 | je .vmlaunch64_launch
|
---|
955 |
|
---|
956 | vmresume
|
---|
957 | jc NAME(RT_CONCAT(hmR0VmxStartVmHostRIP,%1).vmxstart64_invalid_vmcs_ptr)
|
---|
958 | jz NAME(RT_CONCAT(hmR0VmxStartVmHostRIP,%1).vmxstart64_start_failed)
|
---|
959 | jmp NAME(RT_CONCAT(hmR0VmxStartVmHostRIP,%1)) ; here if vmresume detected a failure
|
---|
960 |
|
---|
961 | .vmlaunch64_launch:
|
---|
962 | vmlaunch
|
---|
963 | jc NAME(RT_CONCAT(hmR0VmxStartVmHostRIP,%1).vmxstart64_invalid_vmcs_ptr)
|
---|
964 | jz NAME(RT_CONCAT(hmR0VmxStartVmHostRIP,%1).vmxstart64_start_failed)
|
---|
965 | jmp NAME(RT_CONCAT(hmR0VmxStartVmHostRIP,%1)) ; here if vmlaunch detected a failure
|
---|
966 |
|
---|
967 |
|
---|
968 | ; Put these two outside the normal code path as they should rarely change.
|
---|
969 | ALIGNCODE(8)
|
---|
970 | .write_host_rip:
|
---|
971 | %ifdef VBOX_WITH_STATISTICS
|
---|
972 | inc qword [rsi + VMCPU.hm + HMCPU.StatVmxWriteHostRip]
|
---|
973 | %endif
|
---|
974 | mov [r9 + VMXVMCSINFO.uHostRip], rcx
|
---|
975 | mov eax, VMX_VMCS_HOST_RIP ;; @todo It is only strictly necessary to write VMX_VMCS_HOST_RIP when
|
---|
976 | vmwrite rax, rcx ;; the VMXVMCSINFO::pfnStartVM function changes (eventually
|
---|
977 | %ifdef VBOX_STRICT ;; take the Windows/SSE stuff into account then)...
|
---|
978 | jna NAME(RT_CONCAT(hmR0VmxStartVmHostRIP,%1).vmwrite_failed)
|
---|
979 | %endif
|
---|
980 | jmp .wrote_host_rip
|
---|
981 |
|
---|
982 | ALIGNCODE(8)
|
---|
983 | .write_host_rsp:
|
---|
984 | %ifdef VBOX_WITH_STATISTICS
|
---|
985 | inc qword [rsi + VMCPU.hm + HMCPU.StatVmxWriteHostRsp]
|
---|
986 | %endif
|
---|
987 | mov [r9 + VMXVMCSINFO.uHostRsp], rsp
|
---|
988 | mov eax, VMX_VMCS_HOST_RSP
|
---|
989 | vmwrite rax, rsp
|
---|
990 | %ifdef VBOX_STRICT
|
---|
991 | jna NAME(RT_CONCAT(hmR0VmxStartVmHostRIP,%1).vmwrite_failed)
|
---|
992 | %endif
|
---|
993 | jmp .wrote_host_rsp
|
---|
994 |
|
---|
995 | ALIGNCODE(64)
|
---|
996 | GLOBALNAME RT_CONCAT(hmR0VmxStartVmHostRIP,%1)
|
---|
997 | RESTORE_STATE_VMX 0, %2, %3, %4
|
---|
998 | mov eax, VINF_SUCCESS
|
---|
999 |
|
---|
1000 | .vmstart64_end:
|
---|
1001 | %if %4 != 0
|
---|
1002 | mov r11d, eax ; save the return code.
|
---|
1003 |
|
---|
1004 | ; Save the guest SSE state related to non-volatile and volatile SSE registers.
|
---|
1005 | mov rcx, [r8 + CPUMCTX.pXStateR0]
|
---|
1006 | %if %4 = 1 ; manual
|
---|
1007 | stmxcsr [rcx + X86FXSTATE.MXCSR]
|
---|
1008 | movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 000h], xmm0
|
---|
1009 | movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 010h], xmm1
|
---|
1010 | movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 020h], xmm2
|
---|
1011 | movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 030h], xmm3
|
---|
1012 | movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 040h], xmm4
|
---|
1013 | movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 050h], xmm5
|
---|
1014 | movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 060h], xmm6
|
---|
1015 | movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 070h], xmm7
|
---|
1016 | movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 080h], xmm8
|
---|
1017 | movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 090h], xmm9
|
---|
1018 | movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 0a0h], xmm10
|
---|
1019 | movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 0b0h], xmm11
|
---|
1020 | movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 0c0h], xmm12
|
---|
1021 | movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 0d0h], xmm13
|
---|
1022 | movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 0e0h], xmm14
|
---|
1023 | movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 0f0h], xmm15
|
---|
1024 | %elif %4 = 2 ; use xrstor/xsave
|
---|
1025 | mov eax, [r8 + CPUMCTX.fXStateMask]
|
---|
1026 | and eax, CPUM_VOLATILE_XSAVE_GUEST_COMPONENTS
|
---|
1027 | xor edx, edx
|
---|
1028 | xsave [rcx]
|
---|
1029 | %else
|
---|
1030 | %error invalid template parameter 4
|
---|
1031 | %endif
|
---|
1032 |
|
---|
1033 | ; Restore the host non-volatile SSE register state.
|
---|
1034 | ldmxcsr [rbp + frm_saved_mxcsr]
|
---|
1035 | movdqa xmm6, [rbp + frm_saved_xmm6 ]
|
---|
1036 | movdqa xmm7, [rbp + frm_saved_xmm7 ]
|
---|
1037 | movdqa xmm8, [rbp + frm_saved_xmm8 ]
|
---|
1038 | movdqa xmm9, [rbp + frm_saved_xmm9 ]
|
---|
1039 | movdqa xmm10, [rbp + frm_saved_xmm10]
|
---|
1040 | movdqa xmm11, [rbp + frm_saved_xmm11]
|
---|
1041 | movdqa xmm12, [rbp + frm_saved_xmm12]
|
---|
1042 | movdqa xmm13, [rbp + frm_saved_xmm13]
|
---|
1043 | movdqa xmm14, [rbp + frm_saved_xmm14]
|
---|
1044 | movdqa xmm15, [rbp + frm_saved_xmm15]
|
---|
1045 |
|
---|
1046 | mov eax, r11d
|
---|
1047 | %endif ; %4 != 0
|
---|
1048 |
|
---|
1049 | lea rsp, [rbp + frm_fRFlags]
|
---|
1050 | popf
|
---|
1051 | leave
|
---|
1052 | ret
|
---|
1053 |
|
---|
1054 | ;
|
---|
1055 | ; Error returns.
|
---|
1056 | ;
|
---|
1057 | %ifdef VBOX_STRICT
|
---|
1058 | .vmwrite_failed:
|
---|
1059 | mov dword [rsp + cbFrame + frm_rcError], VERR_VMX_INVALID_VMCS_FIELD
|
---|
1060 | jz .return_after_vmwrite_error
|
---|
1061 | mov dword [rsp + cbFrame + frm_rcError], VERR_VMX_INVALID_VMCS_PTR
|
---|
1062 | jmp .return_after_vmwrite_error
|
---|
1063 | %endif
|
---|
1064 | .vmxstart64_invalid_vmcs_ptr:
|
---|
1065 | mov dword [rsp + cbFrame + frm_rcError], VERR_VMX_INVALID_VMCS_PTR_TO_START_VM
|
---|
1066 | jmp .vmstart64_error_return
|
---|
1067 | .vmxstart64_start_failed:
|
---|
1068 | mov dword [rsp + cbFrame + frm_rcError], VERR_VMX_UNABLE_TO_START_VM
|
---|
1069 | .vmstart64_error_return:
|
---|
1070 | RESTORE_STATE_VMX 1, %2, %3, %4
|
---|
1071 | mov eax, [rbp + frm_rcError]
|
---|
1072 | jmp .vmstart64_end
|
---|
1073 |
|
---|
1074 | %ifdef VBOX_STRICT
|
---|
1075 | ; Precondition checks failed.
|
---|
1076 | .precond_failure_return:
|
---|
1077 | POP_CALLEE_PRESERVED_REGISTERS
|
---|
1078 | %if cbFrame != cbBaseFrame
|
---|
1079 | %error Bad frame size value: cbFrame, expected cbBaseFrame
|
---|
1080 | %endif
|
---|
1081 | jmp .vmstart64_end
|
---|
1082 | %endif
|
---|
1083 |
|
---|
1084 | %undef frm_fRFlags
|
---|
1085 | %undef frm_pGstCtx
|
---|
1086 | %undef frm_uHostXcr0
|
---|
1087 | %undef frm_saved_gdtr
|
---|
1088 | %undef frm_saved_tr
|
---|
1089 | %undef frm_fNoRestoreXcr0
|
---|
1090 | %undef frm_saved_idtr
|
---|
1091 | %undef frm_saved_ldtr
|
---|
1092 | %undef frm_rcError
|
---|
1093 | %undef frm_guest_rax
|
---|
1094 | %undef cbFrame
|
---|
1095 | ENDPROC RT_CONCAT(hmR0VmxStartVm,%1)
|
---|
1096 |
|
---|
1097 | %endmacro ; hmR0VmxStartVmTemplate
|
---|
1098 |
|
---|
1099 | %macro hmR0VmxStartVmSseTemplate 3
|
---|
1100 | hmR0VmxStartVmTemplate _SansXcr0_SansIbpbEntry_SansL1dEntry_SansMdsEntry_SansIbpbExit %+ %2, 0, 0 | 0 | 0 | 0 , %1
|
---|
1101 | hmR0VmxStartVmTemplate _WithXcr0_SansIbpbEntry_SansL1dEntry_SansMdsEntry_SansIbpbExit %+ %2, 1, 0 | 0 | 0 | 0 , %1
|
---|
1102 | hmR0VmxStartVmTemplate _SansXcr0_WithIbpbEntry_SansL1dEntry_SansMdsEntry_SansIbpbExit %+ %2, 0, CPUMCTX_WSF_IBPB_ENTRY | 0 | 0 | 0 , %1
|
---|
1103 | hmR0VmxStartVmTemplate _WithXcr0_WithIbpbEntry_SansL1dEntry_SansMdsEntry_SansIbpbExit %+ %2, 1, CPUMCTX_WSF_IBPB_ENTRY | 0 | 0 | 0 , %1
|
---|
1104 | hmR0VmxStartVmTemplate _SansXcr0_SansIbpbEntry_WithL1dEntry_SansMdsEntry_SansIbpbExit %+ %2, 0, 0 | CPUMCTX_WSF_L1D_ENTRY | 0 | 0 , %1
|
---|
1105 | hmR0VmxStartVmTemplate _WithXcr0_SansIbpbEntry_WithL1dEntry_SansMdsEntry_SansIbpbExit %+ %2, 1, 0 | CPUMCTX_WSF_L1D_ENTRY | 0 | 0 , %1
|
---|
1106 | hmR0VmxStartVmTemplate _SansXcr0_WithIbpbEntry_WithL1dEntry_SansMdsEntry_SansIbpbExit %+ %2, 0, CPUMCTX_WSF_IBPB_ENTRY | CPUMCTX_WSF_L1D_ENTRY | 0 | 0 , %1
|
---|
1107 | hmR0VmxStartVmTemplate _WithXcr0_WithIbpbEntry_WithL1dEntry_SansMdsEntry_SansIbpbExit %+ %2, 1, CPUMCTX_WSF_IBPB_ENTRY | CPUMCTX_WSF_L1D_ENTRY | 0 | 0 , %1
|
---|
1108 | hmR0VmxStartVmTemplate _SansXcr0_SansIbpbEntry_SansL1dEntry_WithMdsEntry_SansIbpbExit %+ %2, 0, 0 | 0 | CPUMCTX_WSF_MDS_ENTRY | 0 , %1
|
---|
1109 | hmR0VmxStartVmTemplate _WithXcr0_SansIbpbEntry_SansL1dEntry_WithMdsEntry_SansIbpbExit %+ %2, 1, 0 | 0 | CPUMCTX_WSF_MDS_ENTRY | 0 , %1
|
---|
1110 | hmR0VmxStartVmTemplate _SansXcr0_WithIbpbEntry_SansL1dEntry_WithMdsEntry_SansIbpbExit %+ %2, 0, CPUMCTX_WSF_IBPB_ENTRY | 0 | CPUMCTX_WSF_MDS_ENTRY | 0 , %1
|
---|
1111 | hmR0VmxStartVmTemplate _WithXcr0_WithIbpbEntry_SansL1dEntry_WithMdsEntry_SansIbpbExit %+ %2, 1, CPUMCTX_WSF_IBPB_ENTRY | 0 | CPUMCTX_WSF_MDS_ENTRY | 0 , %1
|
---|
1112 | hmR0VmxStartVmTemplate _SansXcr0_SansIbpbEntry_WithL1dEntry_WithMdsEntry_SansIbpbExit %+ %2, 0, 0 | CPUMCTX_WSF_L1D_ENTRY | CPUMCTX_WSF_MDS_ENTRY | 0 , %1
|
---|
1113 | hmR0VmxStartVmTemplate _WithXcr0_SansIbpbEntry_WithL1dEntry_WithMdsEntry_SansIbpbExit %+ %2, 1, 0 | CPUMCTX_WSF_L1D_ENTRY | CPUMCTX_WSF_MDS_ENTRY | 0 , %1
|
---|
1114 | hmR0VmxStartVmTemplate _SansXcr0_WithIbpbEntry_WithL1dEntry_WithMdsEntry_SansIbpbExit %+ %2, 0, CPUMCTX_WSF_IBPB_ENTRY | CPUMCTX_WSF_L1D_ENTRY | CPUMCTX_WSF_MDS_ENTRY | 0 , %1
|
---|
1115 | hmR0VmxStartVmTemplate _WithXcr0_WithIbpbEntry_WithL1dEntry_WithMdsEntry_SansIbpbExit %+ %2, 1, CPUMCTX_WSF_IBPB_ENTRY | CPUMCTX_WSF_L1D_ENTRY | CPUMCTX_WSF_MDS_ENTRY | 0 , %1
|
---|
1116 | hmR0VmxStartVmTemplate _SansXcr0_SansIbpbEntry_SansL1dEntry_SansMdsEntry_WithIbpbExit %+ %2, 0, 0 | 0 | 0 | CPUMCTX_WSF_IBPB_EXIT, %1
|
---|
1117 | hmR0VmxStartVmTemplate _WithXcr0_SansIbpbEntry_SansL1dEntry_SansMdsEntry_WithIbpbExit %+ %2, 1, 0 | 0 | 0 | CPUMCTX_WSF_IBPB_EXIT, %1
|
---|
1118 | hmR0VmxStartVmTemplate _SansXcr0_WithIbpbEntry_SansL1dEntry_SansMdsEntry_WithIbpbExit %+ %2, 0, CPUMCTX_WSF_IBPB_ENTRY | 0 | 0 | CPUMCTX_WSF_IBPB_EXIT, %1
|
---|
1119 | hmR0VmxStartVmTemplate _WithXcr0_WithIbpbEntry_SansL1dEntry_SansMdsEntry_WithIbpbExit %+ %2, 1, CPUMCTX_WSF_IBPB_ENTRY | 0 | 0 | CPUMCTX_WSF_IBPB_EXIT, %1
|
---|
1120 | hmR0VmxStartVmTemplate _SansXcr0_SansIbpbEntry_WithL1dEntry_SansMdsEntry_WithIbpbExit %+ %2, 0, 0 | CPUMCTX_WSF_L1D_ENTRY | 0 | CPUMCTX_WSF_IBPB_EXIT, %1
|
---|
1121 | hmR0VmxStartVmTemplate _WithXcr0_SansIbpbEntry_WithL1dEntry_SansMdsEntry_WithIbpbExit %+ %2, 1, 0 | CPUMCTX_WSF_L1D_ENTRY | 0 | CPUMCTX_WSF_IBPB_EXIT, %1
|
---|
1122 | hmR0VmxStartVmTemplate _SansXcr0_WithIbpbEntry_WithL1dEntry_SansMdsEntry_WithIbpbExit %+ %2, 0, CPUMCTX_WSF_IBPB_ENTRY | CPUMCTX_WSF_L1D_ENTRY | 0 | CPUMCTX_WSF_IBPB_EXIT, %1
|
---|
1123 | hmR0VmxStartVmTemplate _WithXcr0_WithIbpbEntry_WithL1dEntry_SansMdsEntry_WithIbpbExit %+ %2, 1, CPUMCTX_WSF_IBPB_ENTRY | CPUMCTX_WSF_L1D_ENTRY | 0 | CPUMCTX_WSF_IBPB_EXIT, %1
|
---|
1124 | hmR0VmxStartVmTemplate _SansXcr0_SansIbpbEntry_SansL1dEntry_WithMdsEntry_WithIbpbExit %+ %2, 0, 0 | 0 | CPUMCTX_WSF_MDS_ENTRY | CPUMCTX_WSF_IBPB_EXIT, %1
|
---|
1125 | hmR0VmxStartVmTemplate _WithXcr0_SansIbpbEntry_SansL1dEntry_WithMdsEntry_WithIbpbExit %+ %2, 1, 0 | 0 | CPUMCTX_WSF_MDS_ENTRY | CPUMCTX_WSF_IBPB_EXIT, %1
|
---|
1126 | hmR0VmxStartVmTemplate _SansXcr0_WithIbpbEntry_SansL1dEntry_WithMdsEntry_WithIbpbExit %+ %2, 0, CPUMCTX_WSF_IBPB_ENTRY | 0 | CPUMCTX_WSF_MDS_ENTRY | CPUMCTX_WSF_IBPB_EXIT, %1
|
---|
1127 | hmR0VmxStartVmTemplate _WithXcr0_WithIbpbEntry_SansL1dEntry_WithMdsEntry_WithIbpbExit %+ %2, 1, CPUMCTX_WSF_IBPB_ENTRY | 0 | CPUMCTX_WSF_MDS_ENTRY | CPUMCTX_WSF_IBPB_EXIT, %1
|
---|
1128 | hmR0VmxStartVmTemplate _SansXcr0_SansIbpbEntry_WithL1dEntry_WithMdsEntry_WithIbpbExit %+ %2, 0, 0 | CPUMCTX_WSF_L1D_ENTRY | CPUMCTX_WSF_MDS_ENTRY | CPUMCTX_WSF_IBPB_EXIT, %1
|
---|
1129 | hmR0VmxStartVmTemplate _WithXcr0_SansIbpbEntry_WithL1dEntry_WithMdsEntry_WithIbpbExit %+ %2, 1, 0 | CPUMCTX_WSF_L1D_ENTRY | CPUMCTX_WSF_MDS_ENTRY | CPUMCTX_WSF_IBPB_EXIT, %1
|
---|
1130 | hmR0VmxStartVmTemplate _SansXcr0_WithIbpbEntry_WithL1dEntry_WithMdsEntry_WithIbpbExit %+ %2, 0, CPUMCTX_WSF_IBPB_ENTRY | CPUMCTX_WSF_L1D_ENTRY | CPUMCTX_WSF_MDS_ENTRY | CPUMCTX_WSF_IBPB_EXIT, %1
|
---|
1131 | hmR0VmxStartVmTemplate _WithXcr0_WithIbpbEntry_WithL1dEntry_WithMdsEntry_WithIbpbExit %+ %2, 1, CPUMCTX_WSF_IBPB_ENTRY | CPUMCTX_WSF_L1D_ENTRY | CPUMCTX_WSF_MDS_ENTRY | CPUMCTX_WSF_IBPB_EXIT, %1
|
---|
1132 | %endmacro
|
---|
1133 |
|
---|
1134 | hmR0VmxStartVmSseTemplate 0,,RT_NOTHING
|
---|
1135 | %ifdef VBOX_WITH_KERNEL_USING_XMM
|
---|
1136 | hmR0VmxStartVmSseTemplate 1,_SseManual,RT_NOTHING
|
---|
1137 | hmR0VmxStartVmSseTemplate 2,_SseXSave,RT_NOTHING
|
---|
1138 | %endif
|
---|
1139 |
|
---|
1140 |
|
---|
1141 | ;;
|
---|
1142 | ; Clears the MDS buffers using VERW.
|
---|
1143 | ALIGNCODE(16)
|
---|
1144 | BEGINPROC hmR0MdsClear
|
---|
1145 | SEH64_END_PROLOGUE
|
---|
1146 | sub xSP, xCB
|
---|
1147 | mov [xSP], ds
|
---|
1148 | verw [xSP]
|
---|
1149 | add xSP, xCB
|
---|
1150 | ret
|
---|
1151 | ENDPROC hmR0MdsClear
|
---|
1152 |
|
---|
1153 |
|
---|
1154 | ;;
|
---|
1155 | ; hmR0SvmVmRun template
|
---|
1156 | ;
|
---|
1157 | ; @param 1 The suffix of the variation.
|
---|
1158 | ; @param 2 fLoadSaveGuestXcr0 value
|
---|
1159 | ; @param 3 The CPUMCTX_WSF_IBPB_ENTRY + CPUMCTX_WSF_IBPB_EXIT value.
|
---|
1160 | ; @param 4 The SSE saving/restoring: 0 to do nothing, 1 to do it manually, 2 to use xsave/xrstor.
|
---|
1161 | ; Drivers shouldn't use AVX registers without saving+loading:
|
---|
1162 | ; https://msdn.microsoft.com/en-us/library/windows/hardware/ff545910%28v=vs.85%29.aspx?f=255&MSPPError=-2147217396
|
---|
1163 | ; However the compiler docs have different idea:
|
---|
1164 | ; https://msdn.microsoft.com/en-us/library/9z1stfyw.aspx
|
---|
1165 | ; We'll go with the former for now.
|
---|
1166 | ;
|
---|
1167 | %macro hmR0SvmVmRunTemplate 4
|
---|
1168 |
|
---|
1169 | ;;
|
---|
1170 | ; Prepares for and executes VMRUN (32-bit and 64-bit guests).
|
---|
1171 | ;
|
---|
1172 | ; @returns VBox status code
|
---|
1173 | ; @param pVM msc:rcx,gcc:rdi The cross context VM structure (unused).
|
---|
1174 | ; @param pVCpu msc:rdx,gcc:rsi The cross context virtual CPU structure of the calling EMT.
|
---|
1175 | ; @param HCPhysVmcb msc:r8, gcc:rdx Physical address of guest VMCB.
|
---|
1176 | ;
|
---|
1177 | ALIGNCODE(64) ; This + immediate optimizations causes serious trouble for yasm and the SEH frames: prologue -28 bytes, must be <256
|
---|
1178 | ; So the SEH64_XXX stuff is currently not operational.
|
---|
1179 | BEGINPROC RT_CONCAT(hmR0SvmVmRun,%1)
|
---|
1180 | %ifdef VBOX_WITH_KERNEL_USING_XMM
|
---|
1181 | %if %4 = 0
|
---|
1182 | ;
|
---|
1183 | ; The non-saving variant will currently check the two SSE preconditions and pick
|
---|
1184 | ; the right variant to continue with. Later we can see if we can't manage to
|
---|
1185 | ; move these decisions into hmR0SvmUpdateVmRunFunction().
|
---|
1186 | ;
|
---|
1187 | %ifdef ASM_CALL64_MSC
|
---|
1188 | test byte [rdx + VMCPU.cpum.GstCtx + CPUMCTX.fUsedFpuGuest], 1
|
---|
1189 | %else
|
---|
1190 | test byte [rsi + VMCPU.cpum.GstCtx + CPUMCTX.fUsedFpuGuest], 1
|
---|
1191 | %endif
|
---|
1192 | jz .save_xmm_no_need
|
---|
1193 | %ifdef ASM_CALL64_MSC
|
---|
1194 | cmp dword [rdx + VMCPU.cpum.GstCtx + CPUMCTX.fXStateMask], 0
|
---|
1195 | %else
|
---|
1196 | cmp dword [rsi + VMCPU.cpum.GstCtx + CPUMCTX.fXStateMask], 0
|
---|
1197 | %endif
|
---|
1198 | je RT_CONCAT3(hmR0SvmVmRun,%1,_SseManual)
|
---|
1199 | jmp RT_CONCAT3(hmR0SvmVmRun,%1,_SseXSave)
|
---|
1200 | .save_xmm_no_need:
|
---|
1201 | %endif
|
---|
1202 | %endif
|
---|
1203 | push rbp
|
---|
1204 | SEH64_PUSH_xBP
|
---|
1205 | mov rbp, rsp
|
---|
1206 | SEH64_SET_FRAME_xBP 0
|
---|
1207 | pushf
|
---|
1208 | %assign cbFrame 30h
|
---|
1209 | %if %4 != 0
|
---|
1210 | %assign cbFrame cbFrame + 16 * 11 ; Reserve space for 10x 128-bit XMM registers and MXCSR (32-bit)
|
---|
1211 | %endif
|
---|
1212 | %assign cbBaseFrame cbFrame
|
---|
1213 | sub rsp, cbFrame - 8h ; We subtract 8 bytes for the above pushf
|
---|
1214 | SEH64_ALLOCATE_STACK cbFrame ; And we have CALLEE_PRESERVED_REGISTER_COUNT following it.
|
---|
1215 |
|
---|
1216 | %define frm_fRFlags -008h
|
---|
1217 | %define frm_uHostXcr0 -018h ; 128-bit
|
---|
1218 | ;%define frm_fNoRestoreXcr0 -020h ; Non-zero if we should skip XCR0 restoring.
|
---|
1219 | %define frm_pGstCtx -028h ; Where we stash guest CPU context for use after the vmrun.
|
---|
1220 | %define frm_HCPhysVmcbHost -030h ; Where we stash HCPhysVmcbHost for the vmload after vmrun.
|
---|
1221 | %if %4 != 0
|
---|
1222 | %define frm_saved_xmm6 -040h
|
---|
1223 | %define frm_saved_xmm7 -050h
|
---|
1224 | %define frm_saved_xmm8 -060h
|
---|
1225 | %define frm_saved_xmm9 -070h
|
---|
1226 | %define frm_saved_xmm10 -080h
|
---|
1227 | %define frm_saved_xmm11 -090h
|
---|
1228 | %define frm_saved_xmm12 -0a0h
|
---|
1229 | %define frm_saved_xmm13 -0b0h
|
---|
1230 | %define frm_saved_xmm14 -0c0h
|
---|
1231 | %define frm_saved_xmm15 -0d0h
|
---|
1232 | %define frm_saved_mxcsr -0e0h
|
---|
1233 | %endif
|
---|
1234 |
|
---|
1235 | ; Manual save and restore:
|
---|
1236 | ; - General purpose registers except RIP, RSP, RAX
|
---|
1237 | ;
|
---|
1238 | ; Trashed:
|
---|
1239 | ; - CR2 (we don't care)
|
---|
1240 | ; - LDTR (reset to 0)
|
---|
1241 | ; - DRx (presumably not changed at all)
|
---|
1242 | ; - DR7 (reset to 0x400)
|
---|
1243 |
|
---|
1244 | ; Save all general purpose host registers.
|
---|
1245 | PUSH_CALLEE_PRESERVED_REGISTERS
|
---|
1246 | SEH64_END_PROLOGUE
|
---|
1247 | %if cbFrame != (cbBaseFrame + 8 * CALLEE_PRESERVED_REGISTER_COUNT)
|
---|
1248 | %error Bad cbFrame value
|
---|
1249 | %endif
|
---|
1250 |
|
---|
1251 | ; Shuffle parameter registers so that r8=HCPhysVmcb and rsi=pVCpu. (rdx & rcx will soon be trashed.)
|
---|
1252 | %ifdef ASM_CALL64_GCC
|
---|
1253 | mov r8, rdx ; Put HCPhysVmcb in r8 like on MSC as rdx is trashed below.
|
---|
1254 | %else
|
---|
1255 | mov rsi, rdx ; Put pVCpu in rsi like on GCC as rdx is trashed below.
|
---|
1256 | ;mov rdi, rcx ; Put pVM in rdi like on GCC as rcx is trashed below.
|
---|
1257 | %endif
|
---|
1258 |
|
---|
1259 | %ifdef VBOX_STRICT
|
---|
1260 | ;
|
---|
1261 | ; Verify template preconditions / parameters to ensure HMSVM.cpp didn't miss some state change.
|
---|
1262 | ;
|
---|
1263 | cmp byte [rsi + GVMCPU.hmr0 + HMR0PERVCPU.fLoadSaveGuestXcr0], %2
|
---|
1264 | mov eax, VERR_SVM_VMRUN_PRECOND_0
|
---|
1265 | jne .failure_return
|
---|
1266 |
|
---|
1267 | mov eax, [rsi + VMCPU.cpum.GstCtx + CPUMCTX.fWorldSwitcher]
|
---|
1268 | and eax, CPUMCTX_WSF_IBPB_ENTRY | CPUMCTX_WSF_IBPB_EXIT
|
---|
1269 | cmp eax, %3
|
---|
1270 | mov eax, VERR_SVM_VMRUN_PRECOND_1
|
---|
1271 | jne .failure_return
|
---|
1272 |
|
---|
1273 | %ifdef VBOX_WITH_KERNEL_USING_XMM
|
---|
1274 | mov eax, VERR_SVM_VMRUN_PRECOND_2
|
---|
1275 | test byte [rsi + VMCPU.cpum.GstCtx + CPUMCTX.fUsedFpuGuest], 1
|
---|
1276 | %if %4 = 0
|
---|
1277 | jnz .failure_return
|
---|
1278 | %else
|
---|
1279 | jz .failure_return
|
---|
1280 |
|
---|
1281 | mov eax, VERR_SVM_VMRUN_PRECOND_3
|
---|
1282 | cmp dword [rsi + VMCPU.cpum.GstCtx + CPUMCTX.fXStateMask], 0
|
---|
1283 | %if %4 = 1
|
---|
1284 | jne .failure_return
|
---|
1285 | %elif %4 = 2
|
---|
1286 | je .failure_return
|
---|
1287 | %else
|
---|
1288 | %error Invalid template parameter 4.
|
---|
1289 | %endif
|
---|
1290 | %endif
|
---|
1291 | %endif
|
---|
1292 | %endif ; VBOX_STRICT
|
---|
1293 |
|
---|
1294 | %if %4 != 0
|
---|
1295 | ; Save the non-volatile SSE host register state.
|
---|
1296 | movdqa [rbp + frm_saved_xmm6 ], xmm6
|
---|
1297 | movdqa [rbp + frm_saved_xmm7 ], xmm7
|
---|
1298 | movdqa [rbp + frm_saved_xmm8 ], xmm8
|
---|
1299 | movdqa [rbp + frm_saved_xmm9 ], xmm9
|
---|
1300 | movdqa [rbp + frm_saved_xmm10], xmm10
|
---|
1301 | movdqa [rbp + frm_saved_xmm11], xmm11
|
---|
1302 | movdqa [rbp + frm_saved_xmm12], xmm12
|
---|
1303 | movdqa [rbp + frm_saved_xmm13], xmm13
|
---|
1304 | movdqa [rbp + frm_saved_xmm14], xmm14
|
---|
1305 | movdqa [rbp + frm_saved_xmm15], xmm15
|
---|
1306 | stmxcsr [rbp + frm_saved_mxcsr]
|
---|
1307 |
|
---|
1308 | ; Load the guest state related to the above non-volatile and volatile SSE registers. Trashes rcx, eax and edx.
|
---|
1309 | mov rcx, [rsi + VMCPU.cpum.GstCtx + CPUMCTX.pXStateR0]
|
---|
1310 | %if %4 = 1 ; manual
|
---|
1311 | movdqa xmm0, [rcx + XMM_OFF_IN_X86FXSTATE + 000h]
|
---|
1312 | movdqa xmm1, [rcx + XMM_OFF_IN_X86FXSTATE + 010h]
|
---|
1313 | movdqa xmm2, [rcx + XMM_OFF_IN_X86FXSTATE + 020h]
|
---|
1314 | movdqa xmm3, [rcx + XMM_OFF_IN_X86FXSTATE + 030h]
|
---|
1315 | movdqa xmm4, [rcx + XMM_OFF_IN_X86FXSTATE + 040h]
|
---|
1316 | movdqa xmm5, [rcx + XMM_OFF_IN_X86FXSTATE + 050h]
|
---|
1317 | movdqa xmm6, [rcx + XMM_OFF_IN_X86FXSTATE + 060h]
|
---|
1318 | movdqa xmm7, [rcx + XMM_OFF_IN_X86FXSTATE + 070h]
|
---|
1319 | movdqa xmm8, [rcx + XMM_OFF_IN_X86FXSTATE + 080h]
|
---|
1320 | movdqa xmm9, [rcx + XMM_OFF_IN_X86FXSTATE + 090h]
|
---|
1321 | movdqa xmm10, [rcx + XMM_OFF_IN_X86FXSTATE + 0a0h]
|
---|
1322 | movdqa xmm11, [rcx + XMM_OFF_IN_X86FXSTATE + 0b0h]
|
---|
1323 | movdqa xmm12, [rcx + XMM_OFF_IN_X86FXSTATE + 0c0h]
|
---|
1324 | movdqa xmm13, [rcx + XMM_OFF_IN_X86FXSTATE + 0d0h]
|
---|
1325 | movdqa xmm14, [rcx + XMM_OFF_IN_X86FXSTATE + 0e0h]
|
---|
1326 | movdqa xmm15, [rcx + XMM_OFF_IN_X86FXSTATE + 0f0h]
|
---|
1327 | ldmxcsr [rcx + X86FXSTATE.MXCSR]
|
---|
1328 | %elif %4 = 2 ; use xrstor/xsave
|
---|
1329 | mov eax, [rsi + VMCPU.cpum.GstCtx + CPUMCTX.fXStateMask]
|
---|
1330 | and eax, CPUM_VOLATILE_XSAVE_GUEST_COMPONENTS
|
---|
1331 | xor edx, edx
|
---|
1332 | xrstor [rcx]
|
---|
1333 | %else
|
---|
1334 | %error invalid template parameter 4
|
---|
1335 | %endif
|
---|
1336 | %endif
|
---|
1337 |
|
---|
1338 | %if %2 != 0
|
---|
1339 | ; Save the host XCR0 and load the guest one if necessary.
|
---|
1340 | xor ecx, ecx
|
---|
1341 | xgetbv ; save the host XCR0 on the stack
|
---|
1342 | mov [rbp + frm_uHostXcr0 + 8], rdx
|
---|
1343 | mov [rbp + frm_uHostXcr0 ], rax
|
---|
1344 |
|
---|
1345 | mov eax, [rsi + VMCPU.cpum.GstCtx + CPUMCTX.aXcr] ; load the guest XCR0
|
---|
1346 | mov edx, [rsi + VMCPU.cpum.GstCtx + CPUMCTX.aXcr + 4]
|
---|
1347 | xor ecx, ecx ; paranoia
|
---|
1348 | xsetbv
|
---|
1349 | %endif
|
---|
1350 |
|
---|
1351 | ; Save host fs, gs, sysenter msr etc.
|
---|
1352 | mov rax, [rsi + VMCPU.hm + HMCPU.u + HMCPUSVM.HCPhysVmcbHost]
|
---|
1353 | mov qword [rbp + frm_HCPhysVmcbHost], rax ; save for the vmload after vmrun
|
---|
1354 | lea rsi, [rsi + VMCPU.cpum.GstCtx]
|
---|
1355 | mov qword [rbp + frm_pGstCtx], rsi
|
---|
1356 | vmsave
|
---|
1357 |
|
---|
1358 | %if %3 & CPUMCTX_WSF_IBPB_ENTRY
|
---|
1359 | ; Fight spectre (trashes rax, rdx and rcx).
|
---|
1360 | mov ecx, MSR_IA32_PRED_CMD
|
---|
1361 | mov eax, MSR_IA32_PRED_CMD_F_IBPB
|
---|
1362 | xor edx, edx
|
---|
1363 | wrmsr
|
---|
1364 | %endif
|
---|
1365 |
|
---|
1366 | ; Setup rax for VMLOAD.
|
---|
1367 | mov rax, r8 ; HCPhysVmcb (64 bits physical address; take low dword only)
|
---|
1368 |
|
---|
1369 | ; Load guest general purpose registers (rax is loaded from the VMCB by VMRUN).
|
---|
1370 | mov rbx, qword [rsi + CPUMCTX.ebx]
|
---|
1371 | mov rcx, qword [rsi + CPUMCTX.ecx]
|
---|
1372 | mov rdx, qword [rsi + CPUMCTX.edx]
|
---|
1373 | mov rdi, qword [rsi + CPUMCTX.edi]
|
---|
1374 | mov rbp, qword [rsi + CPUMCTX.ebp]
|
---|
1375 | mov r8, qword [rsi + CPUMCTX.r8]
|
---|
1376 | mov r9, qword [rsi + CPUMCTX.r9]
|
---|
1377 | mov r10, qword [rsi + CPUMCTX.r10]
|
---|
1378 | mov r11, qword [rsi + CPUMCTX.r11]
|
---|
1379 | mov r12, qword [rsi + CPUMCTX.r12]
|
---|
1380 | mov r13, qword [rsi + CPUMCTX.r13]
|
---|
1381 | mov r14, qword [rsi + CPUMCTX.r14]
|
---|
1382 | mov r15, qword [rsi + CPUMCTX.r15]
|
---|
1383 | mov rsi, qword [rsi + CPUMCTX.esi]
|
---|
1384 |
|
---|
1385 | ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch.
|
---|
1386 | clgi
|
---|
1387 | sti
|
---|
1388 |
|
---|
1389 | ; Load guest FS, GS, Sysenter MSRs etc.
|
---|
1390 | vmload
|
---|
1391 |
|
---|
1392 | ; Run the VM.
|
---|
1393 | vmrun
|
---|
1394 |
|
---|
1395 | ; Save guest fs, gs, sysenter msr etc.
|
---|
1396 | vmsave
|
---|
1397 |
|
---|
1398 | ; Load host fs, gs, sysenter msr etc.
|
---|
1399 | mov rax, [rsp + cbFrame + frm_HCPhysVmcbHost] ; load HCPhysVmcbHost (rbp is not operational yet, thus rsp)
|
---|
1400 | vmload
|
---|
1401 |
|
---|
1402 | ; Set the global interrupt flag again, but execute cli to make sure IF=0.
|
---|
1403 | cli
|
---|
1404 | stgi
|
---|
1405 |
|
---|
1406 | ; Pop pVCpu (saved above) and save the guest GPRs (sans RSP and RAX).
|
---|
1407 | mov rax, [rsp + cbFrame + frm_pGstCtx] ; (rbp still not operational)
|
---|
1408 |
|
---|
1409 | mov qword [rax + CPUMCTX.ebp], rbp
|
---|
1410 | lea rbp, [rsp + cbFrame]
|
---|
1411 | mov qword [rax + CPUMCTX.ecx], rcx
|
---|
1412 | mov rcx, SPECTRE_FILLER
|
---|
1413 | mov qword [rax + CPUMCTX.edx], rdx
|
---|
1414 | mov rdx, rcx
|
---|
1415 | mov qword [rax + CPUMCTX.r8], r8
|
---|
1416 | mov r8, rcx
|
---|
1417 | mov qword [rax + CPUMCTX.r9], r9
|
---|
1418 | mov r9, rcx
|
---|
1419 | mov qword [rax + CPUMCTX.r10], r10
|
---|
1420 | mov r10, rcx
|
---|
1421 | mov qword [rax + CPUMCTX.r11], r11
|
---|
1422 | mov r11, rcx
|
---|
1423 | mov qword [rax + CPUMCTX.edi], rdi
|
---|
1424 | %ifdef ASM_CALL64_MSC
|
---|
1425 | mov rdi, [rbp + frm_saved_rdi]
|
---|
1426 | %else
|
---|
1427 | mov rdi, rcx
|
---|
1428 | %endif
|
---|
1429 | mov qword [rax + CPUMCTX.esi], rsi
|
---|
1430 | %ifdef ASM_CALL64_MSC
|
---|
1431 | mov rsi, [rbp + frm_saved_rsi]
|
---|
1432 | %else
|
---|
1433 | mov rsi, rcx
|
---|
1434 | %endif
|
---|
1435 | mov qword [rax + CPUMCTX.ebx], rbx
|
---|
1436 | mov rbx, [rbp + frm_saved_rbx]
|
---|
1437 | mov qword [rax + CPUMCTX.r12], r12
|
---|
1438 | mov r12, [rbp + frm_saved_r12]
|
---|
1439 | mov qword [rax + CPUMCTX.r13], r13
|
---|
1440 | mov r13, [rbp + frm_saved_r13]
|
---|
1441 | mov qword [rax + CPUMCTX.r14], r14
|
---|
1442 | mov r14, [rbp + frm_saved_r14]
|
---|
1443 | mov qword [rax + CPUMCTX.r15], r15
|
---|
1444 | mov r15, [rbp + frm_saved_r15]
|
---|
1445 |
|
---|
1446 | %if %4 != 0
|
---|
1447 | ; Set r8 = &pVCpu->cpum.GstCtx; for use below when saving and restoring SSE state.
|
---|
1448 | mov r8, rax
|
---|
1449 | %endif
|
---|
1450 |
|
---|
1451 | %if %3 & CPUMCTX_WSF_IBPB_EXIT
|
---|
1452 | ; Fight spectre (trashes rax, rdx and rcx).
|
---|
1453 | mov ecx, MSR_IA32_PRED_CMD
|
---|
1454 | mov eax, MSR_IA32_PRED_CMD_F_IBPB
|
---|
1455 | xor edx, edx
|
---|
1456 | wrmsr
|
---|
1457 | %endif
|
---|
1458 |
|
---|
1459 | %if %2 != 0
|
---|
1460 | ; Restore the host xcr0.
|
---|
1461 | xor ecx, ecx
|
---|
1462 | mov rdx, [rbp + frm_uHostXcr0 + 8]
|
---|
1463 | mov rax, [rbp + frm_uHostXcr0]
|
---|
1464 | xsetbv
|
---|
1465 | %endif
|
---|
1466 |
|
---|
1467 | %if %4 != 0
|
---|
1468 | ; Save the guest SSE state related to non-volatile and volatile SSE registers.
|
---|
1469 | mov rcx, [r8 + CPUMCTX.pXStateR0]
|
---|
1470 | %if %4 = 1 ; manual
|
---|
1471 | stmxcsr [rcx + X86FXSTATE.MXCSR]
|
---|
1472 | movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 000h], xmm0
|
---|
1473 | movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 010h], xmm1
|
---|
1474 | movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 020h], xmm2
|
---|
1475 | movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 030h], xmm3
|
---|
1476 | movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 040h], xmm4
|
---|
1477 | movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 050h], xmm5
|
---|
1478 | movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 060h], xmm6
|
---|
1479 | movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 070h], xmm7
|
---|
1480 | movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 080h], xmm8
|
---|
1481 | movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 090h], xmm9
|
---|
1482 | movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 0a0h], xmm10
|
---|
1483 | movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 0b0h], xmm11
|
---|
1484 | movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 0c0h], xmm12
|
---|
1485 | movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 0d0h], xmm13
|
---|
1486 | movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 0e0h], xmm14
|
---|
1487 | movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 0f0h], xmm15
|
---|
1488 | %elif %4 = 2 ; use xrstor/xsave
|
---|
1489 | mov eax, [r8 + CPUMCTX.fXStateMask]
|
---|
1490 | and eax, CPUM_VOLATILE_XSAVE_GUEST_COMPONENTS
|
---|
1491 | xor edx, edx
|
---|
1492 | xsave [rcx]
|
---|
1493 | %else
|
---|
1494 | %error invalid template parameter 4
|
---|
1495 | %endif
|
---|
1496 |
|
---|
1497 | ; Restore the host non-volatile SSE register state.
|
---|
1498 | ldmxcsr [rbp + frm_saved_mxcsr]
|
---|
1499 | movdqa xmm6, [rbp + frm_saved_xmm6 ]
|
---|
1500 | movdqa xmm7, [rbp + frm_saved_xmm7 ]
|
---|
1501 | movdqa xmm8, [rbp + frm_saved_xmm8 ]
|
---|
1502 | movdqa xmm9, [rbp + frm_saved_xmm9 ]
|
---|
1503 | movdqa xmm10, [rbp + frm_saved_xmm10]
|
---|
1504 | movdqa xmm11, [rbp + frm_saved_xmm11]
|
---|
1505 | movdqa xmm12, [rbp + frm_saved_xmm12]
|
---|
1506 | movdqa xmm13, [rbp + frm_saved_xmm13]
|
---|
1507 | movdqa xmm14, [rbp + frm_saved_xmm14]
|
---|
1508 | movdqa xmm15, [rbp + frm_saved_xmm15]
|
---|
1509 | %endif ; %4 != 0
|
---|
1510 |
|
---|
1511 | ; Epilogue (assumes we restored volatile registers above when saving the guest GPRs).
|
---|
1512 | mov eax, VINF_SUCCESS
|
---|
1513 | add rsp, cbFrame - 8h
|
---|
1514 | popf
|
---|
1515 | leave
|
---|
1516 | ret
|
---|
1517 |
|
---|
1518 | %ifdef VBOX_STRICT
|
---|
1519 | ; Precondition checks failed.
|
---|
1520 | .failure_return:
|
---|
1521 | POP_CALLEE_PRESERVED_REGISTERS
|
---|
1522 | %if cbFrame != cbBaseFrame
|
---|
1523 | %error Bad frame size value: cbFrame
|
---|
1524 | %endif
|
---|
1525 | add rsp, cbFrame - 8h
|
---|
1526 | popf
|
---|
1527 | leave
|
---|
1528 | ret
|
---|
1529 | %endif
|
---|
1530 |
|
---|
1531 | %undef frm_uHostXcr0
|
---|
1532 | %undef frm_fNoRestoreXcr0
|
---|
1533 | %undef frm_pVCpu
|
---|
1534 | %undef frm_HCPhysVmcbHost
|
---|
1535 | %undef cbFrame
|
---|
1536 | ENDPROC RT_CONCAT(hmR0SvmVmRun,%1)
|
---|
1537 |
|
---|
1538 | %endmacro ; hmR0SvmVmRunTemplate
|
---|
1539 |
|
---|
1540 | ;
|
---|
1541 | ; Instantiate the hmR0SvmVmRun various variations.
|
---|
1542 | ;
|
---|
1543 | hmR0SvmVmRunTemplate _SansXcr0_SansIbpbEntry_SansIbpbExit, 0, 0, 0
|
---|
1544 | hmR0SvmVmRunTemplate _WithXcr0_SansIbpbEntry_SansIbpbExit, 1, 0, 0
|
---|
1545 | hmR0SvmVmRunTemplate _SansXcr0_WithIbpbEntry_SansIbpbExit, 0, CPUMCTX_WSF_IBPB_ENTRY, 0
|
---|
1546 | hmR0SvmVmRunTemplate _WithXcr0_WithIbpbEntry_SansIbpbExit, 1, CPUMCTX_WSF_IBPB_ENTRY, 0
|
---|
1547 | hmR0SvmVmRunTemplate _SansXcr0_SansIbpbEntry_WithIbpbExit, 0, CPUMCTX_WSF_IBPB_EXIT, 0
|
---|
1548 | hmR0SvmVmRunTemplate _WithXcr0_SansIbpbEntry_WithIbpbExit, 1, CPUMCTX_WSF_IBPB_EXIT, 0
|
---|
1549 | hmR0SvmVmRunTemplate _SansXcr0_WithIbpbEntry_WithIbpbExit, 0, CPUMCTX_WSF_IBPB_ENTRY | CPUMCTX_WSF_IBPB_EXIT, 0
|
---|
1550 | hmR0SvmVmRunTemplate _WithXcr0_WithIbpbEntry_WithIbpbExit, 1, CPUMCTX_WSF_IBPB_ENTRY | CPUMCTX_WSF_IBPB_EXIT, 0
|
---|
1551 | %ifdef VBOX_WITH_KERNEL_USING_XMM
|
---|
1552 | hmR0SvmVmRunTemplate _SansXcr0_SansIbpbEntry_SansIbpbExit_SseManual, 0, 0, 1
|
---|
1553 | hmR0SvmVmRunTemplate _WithXcr0_SansIbpbEntry_SansIbpbExit_SseManual, 1, 0, 1
|
---|
1554 | hmR0SvmVmRunTemplate _SansXcr0_WithIbpbEntry_SansIbpbExit_SseManual, 0, CPUMCTX_WSF_IBPB_ENTRY, 1
|
---|
1555 | hmR0SvmVmRunTemplate _WithXcr0_WithIbpbEntry_SansIbpbExit_SseManual, 1, CPUMCTX_WSF_IBPB_ENTRY, 1
|
---|
1556 | hmR0SvmVmRunTemplate _SansXcr0_SansIbpbEntry_WithIbpbExit_SseManual, 0, CPUMCTX_WSF_IBPB_EXIT, 1
|
---|
1557 | hmR0SvmVmRunTemplate _WithXcr0_SansIbpbEntry_WithIbpbExit_SseManual, 1, CPUMCTX_WSF_IBPB_EXIT, 1
|
---|
1558 | hmR0SvmVmRunTemplate _SansXcr0_WithIbpbEntry_WithIbpbExit_SseManual, 0, CPUMCTX_WSF_IBPB_ENTRY | CPUMCTX_WSF_IBPB_EXIT, 1
|
---|
1559 | hmR0SvmVmRunTemplate _WithXcr0_WithIbpbEntry_WithIbpbExit_SseManual, 1, CPUMCTX_WSF_IBPB_ENTRY | CPUMCTX_WSF_IBPB_EXIT, 1
|
---|
1560 |
|
---|
1561 | hmR0SvmVmRunTemplate _SansXcr0_SansIbpbEntry_SansIbpbExit_SseXSave, 0, 0, 2
|
---|
1562 | hmR0SvmVmRunTemplate _WithXcr0_SansIbpbEntry_SansIbpbExit_SseXSave, 1, 0, 2
|
---|
1563 | hmR0SvmVmRunTemplate _SansXcr0_WithIbpbEntry_SansIbpbExit_SseXSave, 0, CPUMCTX_WSF_IBPB_ENTRY, 2
|
---|
1564 | hmR0SvmVmRunTemplate _WithXcr0_WithIbpbEntry_SansIbpbExit_SseXSave, 1, CPUMCTX_WSF_IBPB_ENTRY, 2
|
---|
1565 | hmR0SvmVmRunTemplate _SansXcr0_SansIbpbEntry_WithIbpbExit_SseXSave, 0, CPUMCTX_WSF_IBPB_EXIT, 2
|
---|
1566 | hmR0SvmVmRunTemplate _WithXcr0_SansIbpbEntry_WithIbpbExit_SseXSave, 1, CPUMCTX_WSF_IBPB_EXIT, 2
|
---|
1567 | hmR0SvmVmRunTemplate _SansXcr0_WithIbpbEntry_WithIbpbExit_SseXSave, 0, CPUMCTX_WSF_IBPB_ENTRY | CPUMCTX_WSF_IBPB_EXIT, 2
|
---|
1568 | hmR0SvmVmRunTemplate _WithXcr0_WithIbpbEntry_WithIbpbExit_SseXSave, 1, CPUMCTX_WSF_IBPB_ENTRY | CPUMCTX_WSF_IBPB_EXIT, 2
|
---|
1569 | %endif
|
---|
1570 |
|
---|