VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HMR0A.asm@ 87751

Last change on this file since 87751 was 87741, checked in by vboxsync, 4 years ago

VMM/HMR0A.asm: Put a size on hmR0VmxStartVmHostRIP so perf can associate subsequent instructions with it. bugref:9937

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 59.2 KB
Line 
1; $Id: HMR0A.asm 87741 2021-02-12 16:37:50Z vboxsync $
2;; @file
3; HM - Ring-0 VMX, SVM world-switch and helper routines.
4;
5
6;
7; Copyright (C) 2006-2020 Oracle Corporation
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.virtualbox.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License (GPL) as published by the Free Software
13; Foundation, in version 2 as it comes in the "COPYING" file of the
14; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17
18;*********************************************************************************************************************************
19;* Header Files *
20;*********************************************************************************************************************************
21;%define RT_ASM_WITH_SEH64 - trouble with SEH, alignment and (probably) 2nd pass optimizations.
22%define RT_ASM_WITH_SEH64_ALT ; Use asmdefs.mac hackery for manually emitting unwind info.
23%include "VBox/asmdefs.mac"
24%include "VBox/err.mac"
25%include "VBox/vmm/hm_vmx.mac"
26%include "VBox/vmm/cpum.mac"
27%include "VBox/vmm/gvm.mac"
28%include "iprt/x86.mac"
29%include "HMInternal.mac"
30
31%ifndef RT_ARCH_AMD64
32 %error AMD64 only.
33%endif
34
35
36;*********************************************************************************************************************************
37;* Defined Constants And Macros *
38;*********************************************************************************************************************************
39;; The offset of the XMM registers in X86FXSTATE.
40; Use define because I'm too lazy to convert the struct.
41%define XMM_OFF_IN_X86FXSTATE 160
42
43;; Spectre filler for 64-bit mode.
44; Choosen to be an invalid address (also with 5 level paging).
45%define SPECTRE_FILLER 0x02204204207fffff
46
47;;
48; Determine skipping restoring of GDTR, IDTR, TR across VMX non-root operation.
49;
50; @note This is normally done by hmR0VmxExportHostSegmentRegs and VMXRestoreHostState,
51; so much of this is untested code.
52; @{
53%define VMX_SKIP_GDTR
54%define VMX_SKIP_TR
55%define VBOX_SKIP_RESTORE_SEG
56%ifdef RT_OS_DARWIN
57 ; Load the NULL selector into DS, ES, FS and GS on 64-bit darwin so we don't
58 ; risk loading a stale LDT value or something invalid.
59 %define HM_64_BIT_USE_NULL_SEL
60 ; Darwin (Mavericks) uses IDTR limit to store the CPU number so we need to always restore it.
61 ; See @bugref{6875}.
62 %undef VMX_SKIP_IDTR
63%else
64 %define VMX_SKIP_IDTR
65%endif
66;; @}
67
68;; @def CALLEE_PRESERVED_REGISTER_COUNT
69; Number of registers pushed by PUSH_CALLEE_PRESERVED_REGISTERS
70%ifdef ASM_CALL64_GCC
71 %define CALLEE_PRESERVED_REGISTER_COUNT 5
72%else
73 %define CALLEE_PRESERVED_REGISTER_COUNT 7
74%endif
75
76;; @def PUSH_CALLEE_PRESERVED_REGISTERS
77; Macro for pushing all GPRs we must preserve for the caller.
78%macro PUSH_CALLEE_PRESERVED_REGISTERS 0
79 push r15
80 SEH64_PUSH_GREG r15
81 %assign cbFrame cbFrame + 8
82 %assign frm_saved_r15 -cbFrame
83
84 push r14
85 SEH64_PUSH_GREG r14
86 %assign cbFrame cbFrame + 8
87 %assign frm_saved_r14 -cbFrame
88
89 push r13
90 SEH64_PUSH_GREG r13
91 %assign cbFrame cbFrame + 8
92 %assign frm_saved_r13 -cbFrame
93
94 push r12
95 SEH64_PUSH_GREG r12
96 %assign cbFrame cbFrame + 8
97 %assign frm_saved_r12 -cbFrame
98
99 push rbx
100 SEH64_PUSH_GREG rbx
101 %assign cbFrame cbFrame + 8
102 %assign frm_saved_rbx -cbFrame
103
104 %ifdef ASM_CALL64_MSC
105 push rsi
106 SEH64_PUSH_GREG rsi
107 %assign cbFrame cbFrame + 8
108 %assign frm_saved_rsi -cbFrame
109
110 push rdi
111 SEH64_PUSH_GREG rdi
112 %assign cbFrame cbFrame + 8
113 %assign frm_saved_rdi -cbFrame
114 %endif
115%endmacro
116
117;; @def POP_CALLEE_PRESERVED_REGISTERS
118; Counterpart to PUSH_CALLEE_PRESERVED_REGISTERS for use in the epilogue.
119%macro POP_CALLEE_PRESERVED_REGISTERS 0
120 %ifdef ASM_CALL64_MSC
121 pop rdi
122 %assign cbFrame cbFrame - 8
123 %undef frm_saved_rdi
124
125 pop rsi
126 %assign cbFrame cbFrame - 8
127 %undef frm_saved_rsi
128 %endif
129 pop rbx
130 %assign cbFrame cbFrame - 8
131 %undef frm_saved_rbx
132
133 pop r12
134 %assign cbFrame cbFrame - 8
135 %undef frm_saved_r12
136
137 pop r13
138 %assign cbFrame cbFrame - 8
139 %undef frm_saved_r13
140
141 pop r14
142 %assign cbFrame cbFrame - 8
143 %undef frm_saved_r14
144
145 pop r15
146 %assign cbFrame cbFrame - 8
147 %undef frm_saved_r15
148%endmacro
149
150
151;; @def PUSH_RELEVANT_SEGMENT_REGISTERS
152; Macro saving all segment registers on the stack.
153; @param 1 Full width register name.
154; @param 2 16-bit register name for \a 1.
155; @cobbers rax, rdx, rcx
156%macro PUSH_RELEVANT_SEGMENT_REGISTERS 2
157 %ifndef VBOX_SKIP_RESTORE_SEG
158 %error untested code. probably does not work any more!
159 %ifndef HM_64_BIT_USE_NULL_SEL
160 mov %2, es
161 push %1
162 mov %2, ds
163 push %1
164 %endif
165
166 ; Special case for FS; Windows and Linux either don't use it or restore it when leaving kernel mode,
167 ; Solaris OTOH doesn't and we must save it.
168 mov ecx, MSR_K8_FS_BASE
169 rdmsr
170 push rdx
171 push rax
172 %ifndef HM_64_BIT_USE_NULL_SEL
173 push fs
174 %endif
175
176 ; Special case for GS; OSes typically use swapgs to reset the hidden base register for GS on entry into the kernel.
177 ; The same happens on exit.
178 mov ecx, MSR_K8_GS_BASE
179 rdmsr
180 push rdx
181 push rax
182 %ifndef HM_64_BIT_USE_NULL_SEL
183 push gs
184 %endif
185 %endif ; !VBOX_SKIP_RESTORE_SEG
186%endmacro ; PUSH_RELEVANT_SEGMENT_REGISTERS
187
188;; @def POP_RELEVANT_SEGMENT_REGISTERS
189; Macro restoring all segment registers on the stack.
190; @param 1 Full width register name.
191; @param 2 16-bit register name for \a 1.
192; @cobbers rax, rdx, rcx
193%macro POP_RELEVANT_SEGMENT_REGISTERS 2
194 %ifndef VBOX_SKIP_RESTORE_SEG
195 %error untested code. probably does not work any more!
196 ; Note: do not step through this code with a debugger!
197 %ifndef HM_64_BIT_USE_NULL_SEL
198 xor eax, eax
199 mov ds, ax
200 mov es, ax
201 mov fs, ax
202 mov gs, ax
203 %endif
204
205 %ifndef HM_64_BIT_USE_NULL_SEL
206 pop gs
207 %endif
208 pop rax
209 pop rdx
210 mov ecx, MSR_K8_GS_BASE
211 wrmsr
212
213 %ifndef HM_64_BIT_USE_NULL_SEL
214 pop fs
215 %endif
216 pop rax
217 pop rdx
218 mov ecx, MSR_K8_FS_BASE
219 wrmsr
220 ; Now it's safe to step again
221
222 %ifndef HM_64_BIT_USE_NULL_SEL
223 pop %1
224 mov ds, %2
225 pop %1
226 mov es, %2
227 %endif
228 %endif ; !VBOX_SKIP_RESTORE_SEG
229%endmacro ; POP_RELEVANT_SEGMENT_REGISTERS
230
231
232;*********************************************************************************************************************************
233;* External Symbols *
234;*********************************************************************************************************************************
235%ifdef VBOX_WITH_KERNEL_USING_XMM
236extern NAME(CPUMIsGuestFPUStateActive)
237%endif
238
239
240BEGINCODE
241
242
243;;
244; Used on platforms with poor inline assembly support to retrieve all the
245; info from the CPU and put it in the @a pRestoreHost structure.
246;
247; @returns VBox status code
248; @param pRestoreHost msc: rcx gcc: rdi Pointer to the RestoreHost struct.
249; @param fHaveFsGsBase msc: dl gcc: sil Whether we can use rdfsbase or not.
250;
251ALIGNCODE(64)
252BEGINPROC hmR0VmxExportHostSegmentRegsAsmHlp
253%ifdef ASM_CALL64_MSC
254 %define pRestoreHost rcx
255%elifdef ASM_CALL64_GCC
256 %define pRestoreHost rdi
257%else
258 %error Unknown calling convension.
259%endif
260 SEH64_END_PROLOGUE
261
262 ; Start with the FS and GS base so we can trash DL/SIL.
263%ifdef ASM_CALL64_MSC
264 or dl, dl
265%else
266 or sil, sil
267%endif
268 jz .use_rdmsr_for_fs_and_gs_base
269 rdfsbase rax
270 mov [pRestoreHost + VMXRESTOREHOST.uHostFSBase], rax
271 rdgsbase rax
272 mov [pRestoreHost + VMXRESTOREHOST.uHostGSBase], rax
273.done_fs_and_gs_base:
274
275 ; TR, GDTR and IDTR
276 str [pRestoreHost + VMXRESTOREHOST.uHostSelTR]
277 sgdt [pRestoreHost + VMXRESTOREHOST.HostGdtr]
278 sidt [pRestoreHost + VMXRESTOREHOST.HostIdtr]
279
280 ; Segment registers.
281 xor eax, eax
282 mov eax, cs
283 mov [pRestoreHost + VMXRESTOREHOST.uHostSelCS], ax
284
285 mov eax, ss
286 mov [pRestoreHost + VMXRESTOREHOST.uHostSelSS], ax
287
288 mov eax, gs
289 mov [pRestoreHost + VMXRESTOREHOST.uHostSelGS], ax
290
291 mov eax, fs
292 mov [pRestoreHost + VMXRESTOREHOST.uHostSelFS], ax
293
294 mov eax, es
295 mov [pRestoreHost + VMXRESTOREHOST.uHostSelES], ax
296
297 mov eax, ds
298 mov [pRestoreHost + VMXRESTOREHOST.uHostSelDS], ax
299
300 ret
301
302ALIGNCODE(16)
303.use_rdmsr_for_fs_and_gs_base:
304%ifdef ASM_CALL64_MSC
305 mov r8, pRestoreHost
306%endif
307
308 mov ecx, MSR_K8_FS_BASE
309 rdmsr
310 shl rdx, 32
311 or rdx, rax
312 mov [r8 + VMXRESTOREHOST.uHostFSBase], rax
313
314 mov ecx, MSR_K8_GS_BASE
315 rdmsr
316 shl rdx, 32
317 or rdx, rax
318 mov [r8 + VMXRESTOREHOST.uHostGSBase], rax
319
320%ifdef ASM_CALL64_MSC
321 mov pRestoreHost, r8
322%endif
323 jmp .done_fs_and_gs_base
324%undef pRestoreHost
325ENDPROC hmR0VmxExportHostSegmentRegsAsmHlp
326
327
328;;
329; Restores host-state fields.
330;
331; @returns VBox status code
332; @param f32RestoreHost msc: ecx gcc: edi RestoreHost flags.
333; @param pRestoreHost msc: rdx gcc: rsi Pointer to the RestoreHost struct.
334;
335ALIGNCODE(64)
336BEGINPROC VMXRestoreHostState
337%ifndef ASM_CALL64_GCC
338 ; Use GCC's input registers since we'll be needing both rcx and rdx further
339 ; down with the wrmsr instruction. Use the R10 and R11 register for saving
340 ; RDI and RSI since MSC preserve the two latter registers.
341 mov r10, rdi
342 mov r11, rsi
343 mov rdi, rcx
344 mov rsi, rdx
345%endif
346 SEH64_END_PROLOGUE
347
348.restore_gdtr:
349 test edi, VMX_RESTORE_HOST_GDTR
350 jz .restore_idtr
351 lgdt [rsi + VMXRESTOREHOST.HostGdtr]
352
353.restore_idtr:
354 test edi, VMX_RESTORE_HOST_IDTR
355 jz .restore_ds
356 lidt [rsi + VMXRESTOREHOST.HostIdtr]
357
358.restore_ds:
359 test edi, VMX_RESTORE_HOST_SEL_DS
360 jz .restore_es
361 mov ax, [rsi + VMXRESTOREHOST.uHostSelDS]
362 mov ds, eax
363
364.restore_es:
365 test edi, VMX_RESTORE_HOST_SEL_ES
366 jz .restore_tr
367 mov ax, [rsi + VMXRESTOREHOST.uHostSelES]
368 mov es, eax
369
370.restore_tr:
371 test edi, VMX_RESTORE_HOST_SEL_TR
372 jz .restore_fs
373 ; When restoring the TR, we must first clear the busy flag or we'll end up faulting.
374 mov dx, [rsi + VMXRESTOREHOST.uHostSelTR]
375 mov ax, dx
376 and eax, X86_SEL_MASK_OFF_RPL ; mask away TI and RPL bits leaving only the descriptor offset
377 test edi, VMX_RESTORE_HOST_GDT_READ_ONLY | VMX_RESTORE_HOST_GDT_NEED_WRITABLE
378 jnz .gdt_readonly_or_need_writable
379 add rax, qword [rsi + VMXRESTOREHOST.HostGdtr + 2] ; xAX <- descriptor offset + GDTR.pGdt.
380 and dword [rax + 4], ~RT_BIT(9) ; clear the busy flag in TSS desc (bits 0-7=base, bit 9=busy bit)
381 ltr dx
382
383.restore_fs:
384 ;
385 ; When restoring the selector values for FS and GS, we'll temporarily trash
386 ; the base address (at least the high 32-bit bits, but quite possibly the
387 ; whole base address), the wrmsr will restore it correctly. (VT-x actually
388 ; restores the base correctly when leaving guest mode, but not the selector
389 ; value, so there is little problem with interrupts being enabled prior to
390 ; this restore job.)
391 ; We'll disable ints once for both FS and GS as that's probably faster.
392 ;
393 test edi, VMX_RESTORE_HOST_SEL_FS | VMX_RESTORE_HOST_SEL_GS
394 jz .restore_success
395 pushfq
396 cli ; (see above)
397
398 test edi, VMX_RESTORE_HOST_CAN_USE_WRFSBASE_AND_WRGSBASE
399 jz .restore_fs_using_wrmsr
400
401.restore_fs_using_wrfsbase:
402 test edi, VMX_RESTORE_HOST_SEL_FS
403 jz .restore_gs_using_wrgsbase
404 mov rax, qword [rsi + VMXRESTOREHOST.uHostFSBase]
405 mov cx, word [rsi + VMXRESTOREHOST.uHostSelFS]
406 mov fs, ecx
407 wrfsbase rax
408
409.restore_gs_using_wrgsbase:
410 test edi, VMX_RESTORE_HOST_SEL_GS
411 jz .restore_flags
412 mov rax, qword [rsi + VMXRESTOREHOST.uHostGSBase]
413 mov cx, word [rsi + VMXRESTOREHOST.uHostSelGS]
414 mov gs, ecx
415 wrgsbase rax
416
417.restore_flags:
418 popfq
419
420.restore_success:
421 mov eax, VINF_SUCCESS
422%ifndef ASM_CALL64_GCC
423 ; Restore RDI and RSI on MSC.
424 mov rdi, r10
425 mov rsi, r11
426%endif
427 ret
428
429ALIGNCODE(8)
430.gdt_readonly_or_need_writable:
431 test edi, VMX_RESTORE_HOST_GDT_NEED_WRITABLE
432 jnz .gdt_readonly_need_writable
433.gdt_readonly:
434 mov rcx, cr0
435 mov r9, rcx
436 add rax, qword [rsi + VMXRESTOREHOST.HostGdtr + 2] ; xAX <- descriptor offset + GDTR.pGdt.
437 and rcx, ~X86_CR0_WP
438 mov cr0, rcx
439 and dword [rax + 4], ~RT_BIT(9) ; clear the busy flag in TSS desc (bits 0-7=base, bit 9=busy bit)
440 ltr dx
441 mov cr0, r9
442 jmp .restore_fs
443
444ALIGNCODE(8)
445.gdt_readonly_need_writable:
446 add rax, qword [rsi + VMXRESTOREHOST.HostGdtrRw + 2] ; xAX <- descriptor offset + GDTR.pGdtRw
447 and dword [rax + 4], ~RT_BIT(9) ; clear the busy flag in TSS desc (bits 0-7=base, bit 9=busy bit)
448 lgdt [rsi + VMXRESTOREHOST.HostGdtrRw]
449 ltr dx
450 lgdt [rsi + VMXRESTOREHOST.HostGdtr] ; load the original GDT
451 jmp .restore_fs
452
453ALIGNCODE(8)
454.restore_fs_using_wrmsr:
455 test edi, VMX_RESTORE_HOST_SEL_FS
456 jz .restore_gs_using_wrmsr
457 mov eax, dword [rsi + VMXRESTOREHOST.uHostFSBase] ; uHostFSBase - Lo
458 mov edx, dword [rsi + VMXRESTOREHOST.uHostFSBase + 4h] ; uHostFSBase - Hi
459 mov cx, word [rsi + VMXRESTOREHOST.uHostSelFS]
460 mov fs, ecx
461 mov ecx, MSR_K8_FS_BASE
462 wrmsr
463
464.restore_gs_using_wrmsr:
465 test edi, VMX_RESTORE_HOST_SEL_GS
466 jz .restore_flags
467 mov eax, dword [rsi + VMXRESTOREHOST.uHostGSBase] ; uHostGSBase - Lo
468 mov edx, dword [rsi + VMXRESTOREHOST.uHostGSBase + 4h] ; uHostGSBase - Hi
469 mov cx, word [rsi + VMXRESTOREHOST.uHostSelGS]
470 mov gs, ecx
471 mov ecx, MSR_K8_GS_BASE
472 wrmsr
473 jmp .restore_flags
474ENDPROC VMXRestoreHostState
475
476
477;;
478; Clears the MDS buffers using VERW.
479ALIGNCODE(16)
480BEGINPROC hmR0MdsClear
481 SEH64_END_PROLOGUE
482 sub xSP, xCB
483 mov [xSP], ds
484 verw [xSP]
485 add xSP, xCB
486 ret
487ENDPROC hmR0MdsClear
488
489
490;;
491; Dispatches an NMI to the host.
492;
493ALIGNCODE(16)
494BEGINPROC VMXDispatchHostNmi
495 ; NMI is always vector 2. The IDT[2] IRQ handler cannot be anything else. See Intel spec. 6.3.1 "External Interrupts".
496 SEH64_END_PROLOGUE
497 int 2
498 ret
499ENDPROC VMXDispatchHostNmi
500
501
502;;
503; Common restore logic for success and error paths. We duplicate this because we
504; don't want to waste writing the VINF_SUCCESS return value to the stack in the
505; regular code path.
506;
507; @param 1 Zero if regular return, non-zero if error return. Controls label emission.
508; @param 2 fLoadSaveGuestXcr0 value
509; @param 3 The (HM_WSF_IBPB_ENTRY | HM_WSF_L1D_ENTRY | HM_WSF_MDS_ENTRY) + HM_WSF_IBPB_EXIT value.
510; The entry values are either all set or not at all, as we're too lazy to flesh out all the variants.
511; @param 4 The SSE saving/restoring: 0 to do nothing, 1 to do it manually, 2 to use xsave/xrstor.
512;
513; @note Important that this does not modify cbFrame or rsp.
514%macro RESTORE_STATE_VMX 4
515 ; Restore base and limit of the IDTR & GDTR.
516 %ifndef VMX_SKIP_IDTR
517 lidt [rsp + cbFrame + frm_saved_idtr]
518 %endif
519 %ifndef VMX_SKIP_GDTR
520 lgdt [rsp + cbFrame + frm_saved_gdtr]
521 %endif
522
523 ; Save the guest state and restore the non-volatile registers. We use rax=pGstCtx here.
524 mov [rsp + cbFrame + frm_guest_rax], rax
525 mov rax, [rsp + cbFrame + frm_pGstCtx]
526
527 mov qword [rax + CPUMCTX.ebp], rbp
528 lea rbp, [rsp + cbFrame] ; re-establish the frame pointer as early as possible.
529 mov qword [rax + CPUMCTX.ecx], rcx
530 mov rcx, SPECTRE_FILLER
531 mov qword [rax + CPUMCTX.edx], rdx
532 mov rdx, [rbp + frm_guest_rax]
533 mov qword [rax + CPUMCTX.eax], rdx
534 mov rdx, rcx
535 mov qword [rax + CPUMCTX.r8], r8
536 mov r8, rcx
537 mov qword [rax + CPUMCTX.r9], r9
538 mov r9, rcx
539 mov qword [rax + CPUMCTX.r10], r10
540 mov r10, rcx
541 mov qword [rax + CPUMCTX.r11], r11
542 mov r11, rcx
543 mov qword [rax + CPUMCTX.esi], rsi
544 %ifdef ASM_CALL64_MSC
545 mov rsi, [rbp + frm_saved_rsi]
546 %else
547 mov rsi, rcx
548 %endif
549 mov qword [rax + CPUMCTX.edi], rdi
550 %ifdef ASM_CALL64_MSC
551 mov rdi, [rbp + frm_saved_rdi]
552 %else
553 mov rdi, rcx
554 %endif
555 mov qword [rax + CPUMCTX.ebx], rbx
556 mov rbx, [rbp + frm_saved_rbx]
557 mov qword [rax + CPUMCTX.r12], r12
558 mov r12, [rbp + frm_saved_r12]
559 mov qword [rax + CPUMCTX.r13], r13
560 mov r13, [rbp + frm_saved_r13]
561 mov qword [rax + CPUMCTX.r14], r14
562 mov r14, [rbp + frm_saved_r14]
563 mov qword [rax + CPUMCTX.r15], r15
564 mov r15, [rbp + frm_saved_r15]
565
566 mov rdx, cr2
567 mov qword [rax + CPUMCTX.cr2], rdx
568 mov rdx, rcx
569
570 %if %4 != 0
571 ; Save the context pointer in r8 for the SSE save/restore.
572 mov r8, rax
573 %endif
574
575 %if %3 & HM_WSF_IBPB_EXIT
576 ; Fight spectre (trashes rax, rdx and rcx).
577 %if %1 = 0 ; Skip this in failure branch (=> guru)
578 mov ecx, MSR_IA32_PRED_CMD
579 mov eax, MSR_IA32_PRED_CMD_F_IBPB
580 xor edx, edx
581 wrmsr
582 %endif
583 %endif
584
585 %ifndef VMX_SKIP_TR
586 ; Restore TSS selector; must mark it as not busy before using ltr!
587 ; ASSUME that this is supposed to be 'BUSY' (saves 20-30 ticks on the T42p).
588 %ifndef VMX_SKIP_GDTR
589 lgdt [rbp + frm_saved_gdtr]
590 %endif
591 movzx eax, word [rbp + frm_saved_tr]
592 mov ecx, eax
593 and eax, X86_SEL_MASK_OFF_RPL ; mask away TI and RPL bits leaving only the descriptor offset
594 add rax, [rbp + frm_saved_gdtr + 2] ; eax <- GDTR.address + descriptor offset
595 and dword [rax + 4], ~RT_BIT(9) ; clear the busy flag in TSS desc (bits 0-7=base, bit 9=busy bit)
596 ltr cx
597 %endif
598 movzx edx, word [rbp + frm_saved_ldtr]
599 test edx, edx
600 jz %%skip_ldt_write
601 lldt dx
602%%skip_ldt_write:
603
604 %if %1 != 0
605.return_after_vmwrite_error:
606 %endif
607 ; Restore segment registers.
608 ;POP_RELEVANT_SEGMENT_REGISTERS rax, ax - currently broken.
609
610 %if %2 != 0
611 ; Restore the host XCR0.
612 xor ecx, ecx
613 mov eax, [rbp + frm_uHostXcr0]
614 mov edx, [rbp + frm_uHostXcr0 + 4]
615 xsetbv
616 %endif
617%endmacro ; RESTORE_STATE_VMX
618
619
620;;
621; hmR0VmxStartVm template
622;
623; @param 1 The suffix of the variation.
624; @param 2 fLoadSaveGuestXcr0 value
625; @param 3 The HM_WSF_IBPB_ENTRY + HM_WSF_IBPB_EXIT value.
626; @param 4 The SSE saving/restoring: 0 to do nothing, 1 to do it manually, 2 to use xsave/xrstor.
627; Drivers shouldn't use AVX registers without saving+loading:
628; https://msdn.microsoft.com/en-us/library/windows/hardware/ff545910%28v=vs.85%29.aspx?f=255&MSPPError=-2147217396
629; However the compiler docs have different idea:
630; https://msdn.microsoft.com/en-us/library/9z1stfyw.aspx
631; We'll go with the former for now.
632;
633%macro hmR0VmxStartVmTemplate 4
634
635;;
636; Prepares for and executes VMLAUNCH/VMRESUME (64 bits guest mode)
637;
638; @returns VBox status code
639; @param pVmcsInfo msc:rcx, gcc:rdi Pointer to the VMCS info (for cached host RIP and RSP).
640; @param pVCpu msc:rdx, gcc:rsi The cross context virtual CPU structure of the calling EMT.
641; @param fResume msc:r8l, gcc:dl Whether to use vmlauch/vmresume.
642;
643ALIGNCODE(64)
644BEGINPROC RT_CONCAT(hmR0VmxStartVm,%1)
645 %ifdef VBOX_WITH_KERNEL_USING_XMM
646 %if %4 = 0
647 ;
648 ; The non-saving variant will currently check the two SSE preconditions and pick
649 ; the right variant to continue with. Later we can see if we can't manage to
650 ; move these decisions into hmR0VmxUpdateStartVmFunction().
651 ;
652 %ifdef ASM_CALL64_MSC
653 test byte [rdx + VMCPU.cpum.GstCtx + CPUMCTX.fUsedFpuGuest], 1
654 %else
655 test byte [rsi + VMCPU.cpum.GstCtx + CPUMCTX.fUsedFpuGuest], 1
656 %endif
657 jz .save_xmm_no_need
658 %ifdef ASM_CALL64_MSC
659 cmp dword [rdx + VMCPU.cpum.GstCtx + CPUMCTX.fXStateMask], 0
660 %else
661 cmp dword [rsi + VMCPU.cpum.GstCtx + CPUMCTX.fXStateMask], 0
662 %endif
663 je RT_CONCAT3(hmR0VmxStartVm,%1,_SseManual)
664 jmp RT_CONCAT3(hmR0VmxStartVm,%1,_SseXSave)
665.save_xmm_no_need:
666 %endif
667 %endif
668 push xBP
669 SEH64_PUSH_xBP
670 mov xBP, xSP
671 SEH64_SET_FRAME_xBP 0
672 pushf
673 cli
674
675 %define frm_fRFlags -008h
676 %define frm_pGstCtx -010h ; Where we stash guest CPU context for use after the vmrun.
677 %define frm_uHostXcr0 -020h ; 128-bit
678 %define frm_saved_gdtr -02ah ; 16+64: Only used when VMX_SKIP_GDTR isn't defined
679 %define frm_saved_tr -02ch ; 16-bit: Only used when VMX_SKIP_TR isn't defined
680 %define frm_MDS_seg -030h ; 16-bit: Temporary storage for the MDS flushing.
681 %define frm_saved_idtr -03ah ; 16+64: Only used when VMX_SKIP_IDTR isn't defined
682 %define frm_saved_ldtr -03ch ; 16-bit: always saved.
683 %define frm_rcError -040h ; 32-bit: Error status code (not used in the success path)
684 %define frm_guest_rax -048h ; Temporary storage slot for guest RAX.
685 %if %4 = 0
686 %assign cbFrame 048h
687 %else
688 %define frm_saved_xmm6 -050h
689 %define frm_saved_xmm7 -060h
690 %define frm_saved_xmm8 -070h
691 %define frm_saved_xmm9 -080h
692 %define frm_saved_xmm10 -090h
693 %define frm_saved_xmm11 -0a0h
694 %define frm_saved_xmm12 -0b0h
695 %define frm_saved_xmm13 -0c0h
696 %define frm_saved_xmm14 -0d0h
697 %define frm_saved_xmm15 -0e0h
698 %define frm_saved_mxcsr -0f0h
699 %assign cbFrame 0f0h
700 %endif
701 %assign cbBaseFrame cbFrame
702 sub rsp, cbFrame - 8h
703 SEH64_ALLOCATE_STACK cbFrame
704
705 ; Save all general purpose host registers.
706 PUSH_CALLEE_PRESERVED_REGISTERS
707 ;PUSH_RELEVANT_SEGMENT_REGISTERS xAX, ax - currently broken
708 SEH64_END_PROLOGUE
709
710 ;
711 ; Unify the input parameter registers: r9=pVmcsInfo, rsi=pVCpu, bl=fResume, rdi=&pVCpu->cpum.GstCtx;
712 ;
713 %ifdef ASM_CALL64_GCC
714 mov r9, rdi ; pVmcsInfo
715 mov ebx, edx ; fResume
716 %else
717 mov r9, rcx ; pVmcsInfo
718 mov rsi, rdx ; pVCpu
719 mov ebx, r8d ; fResume
720 %endif
721 lea rdi, [rsi + VMCPU.cpum.GstCtx]
722 mov [rbp + frm_pGstCtx], rdi
723
724 %ifdef VBOX_STRICT
725 ;
726 ; Verify template preconditions / parameters to ensure HMSVM.cpp didn't miss some state change.
727 ;
728 cmp byte [rsi + GVMCPU.hmr0 + HMR0PERVCPU.fLoadSaveGuestXcr0], %2
729 mov eax, VERR_VMX_STARTVM_PRECOND_0
730 jne NAME(RT_CONCAT(hmR0VmxStartVmHostRIP,%1).precond_failure_return)
731
732 mov eax, [rsi + GVMCPU.hmr0 + HMR0PERVCPU.fWorldSwitcher]
733 and eax, HM_WSF_IBPB_ENTRY | HM_WSF_L1D_ENTRY | HM_WSF_MDS_ENTRY | HM_WSF_IBPB_EXIT
734 cmp eax, %3
735 mov eax, VERR_VMX_STARTVM_PRECOND_1
736 jne NAME(RT_CONCAT(hmR0VmxStartVmHostRIP,%1).precond_failure_return)
737
738 %ifdef VBOX_WITH_KERNEL_USING_XMM
739 mov eax, VERR_VMX_STARTVM_PRECOND_2
740 test byte [rsi + VMCPU.cpum.GstCtx + CPUMCTX.fUsedFpuGuest], 1
741 %if %4 = 0
742 jnz NAME(RT_CONCAT(hmR0VmxStartVmHostRIP,%1).precond_failure_return)
743 %else
744 jz NAME(RT_CONCAT(hmR0VmxStartVmHostRIP,%1).precond_failure_return)
745
746 mov eax, VERR_VMX_STARTVM_PRECOND_3
747 cmp dword [rsi + VMCPU.cpum.GstCtx + CPUMCTX.fXStateMask], 0
748 %if %4 = 1
749 jne NAME(RT_CONCAT(hmR0VmxStartVmHostRIP,%1).precond_failure_return)
750 %elif %4 = 2
751 je NAME(RT_CONCAT(hmR0VmxStartVmHostRIP,%1).precond_failure_return)
752 %else
753 %error Invalid template parameter 4.
754 %endif
755 %endif
756 %endif
757 %endif ; VBOX_STRICT
758
759 %if %4 != 0
760 ; Save the non-volatile SSE host register state.
761 movdqa [rbp + frm_saved_xmm6 ], xmm6
762 movdqa [rbp + frm_saved_xmm7 ], xmm7
763 movdqa [rbp + frm_saved_xmm8 ], xmm8
764 movdqa [rbp + frm_saved_xmm9 ], xmm9
765 movdqa [rbp + frm_saved_xmm10], xmm10
766 movdqa [rbp + frm_saved_xmm11], xmm11
767 movdqa [rbp + frm_saved_xmm12], xmm12
768 movdqa [rbp + frm_saved_xmm13], xmm13
769 movdqa [rbp + frm_saved_xmm14], xmm14
770 movdqa [rbp + frm_saved_xmm15], xmm15
771 stmxcsr [rbp + frm_saved_mxcsr]
772
773 ; Load the guest state related to the above non-volatile and volatile SSE registers. Trashes rcx, eax and edx.
774 mov rcx, [rdi + CPUMCTX.pXStateR0]
775 %if %4 = 1 ; manual
776 movdqa xmm0, [rcx + XMM_OFF_IN_X86FXSTATE + 000h]
777 movdqa xmm1, [rcx + XMM_OFF_IN_X86FXSTATE + 010h]
778 movdqa xmm2, [rcx + XMM_OFF_IN_X86FXSTATE + 020h]
779 movdqa xmm3, [rcx + XMM_OFF_IN_X86FXSTATE + 030h]
780 movdqa xmm4, [rcx + XMM_OFF_IN_X86FXSTATE + 040h]
781 movdqa xmm5, [rcx + XMM_OFF_IN_X86FXSTATE + 050h]
782 movdqa xmm6, [rcx + XMM_OFF_IN_X86FXSTATE + 060h]
783 movdqa xmm7, [rcx + XMM_OFF_IN_X86FXSTATE + 070h]
784 movdqa xmm8, [rcx + XMM_OFF_IN_X86FXSTATE + 080h]
785 movdqa xmm9, [rcx + XMM_OFF_IN_X86FXSTATE + 090h]
786 movdqa xmm10, [rcx + XMM_OFF_IN_X86FXSTATE + 0a0h]
787 movdqa xmm11, [rcx + XMM_OFF_IN_X86FXSTATE + 0b0h]
788 movdqa xmm12, [rcx + XMM_OFF_IN_X86FXSTATE + 0c0h]
789 movdqa xmm13, [rcx + XMM_OFF_IN_X86FXSTATE + 0d0h]
790 movdqa xmm14, [rcx + XMM_OFF_IN_X86FXSTATE + 0e0h]
791 movdqa xmm15, [rcx + XMM_OFF_IN_X86FXSTATE + 0f0h]
792 ldmxcsr [rcx + X86FXSTATE.MXCSR]
793 %elif %4 = 2 ; use xrstor/xsave
794 mov eax, [rsi + VMCPU.cpum.GstCtx + CPUMCTX.fXStateMask]
795 and eax, CPUM_VOLATILE_XSAVE_GUEST_COMPONENTS
796 xor edx, edx
797 xrstor [rcx]
798 %else
799 %error invalid template parameter 4
800 %endif
801 %endif
802
803 %if %2 != 0
804 ; Save the host XCR0 and load the guest one if necessary.
805 ; Note! Trashes rax, rdx and rcx.
806 xor ecx, ecx
807 xgetbv ; save the host one on the stack
808 mov [rbp + frm_uHostXcr0], eax
809 mov [rbp + frm_uHostXcr0 + 4], edx
810
811 mov eax, [rdi + CPUMCTX.aXcr] ; load the guest one
812 mov edx, [rdi + CPUMCTX.aXcr + 4]
813 xor ecx, ecx ; paranoia; indicate that we must restore XCR0 (popped into ecx, thus 0)
814 xsetbv
815 %endif
816
817 ; Save host LDTR.
818 sldt word [rbp + frm_saved_ldtr]
819
820 %ifndef VMX_SKIP_TR
821 ; The host TR limit is reset to 0x67; save & restore it manually.
822 str word [rbp + frm_saved_tr]
823 %endif
824
825 %ifndef VMX_SKIP_GDTR
826 ; VT-x only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!
827 sgdt [rbp + frm_saved_gdtr]
828 %endif
829 %ifndef VMX_SKIP_IDTR
830 sidt [rbp + frm_saved_idtr]
831 %endif
832
833 ; Load CR2 if necessary (expensive as writing CR2 is a synchronizing instruction - (bird: still expensive on 10980xe)).
834 mov rcx, qword [rdi + CPUMCTX.cr2]
835 mov rdx, cr2
836 cmp rcx, rdx
837 je .skip_cr2_write
838 mov cr2, rcx
839.skip_cr2_write:
840
841 ; Set the vmlaunch/vmresume "return" host RIP and RSP values if they've changed (unlikly).
842 ; The vmwrite isn't quite for free (on an 10980xe at least), thus we check if anything changed
843 ; before writing here.
844 lea rcx, [NAME(RT_CONCAT(hmR0VmxStartVmHostRIP,%1)) wrt rip]
845 cmp rcx, [r9 + VMXVMCSINFO.uHostRip]
846 jne .write_host_rip
847.wrote_host_rip:
848 cmp rsp, [r9 + VMXVMCSINFO.uHostRsp]
849 jne .write_host_rsp
850.wrote_host_rsp:
851
852 ;
853 ; Fight spectre and similar. Trashes rax, rcx, and rdx.
854 ;
855 %if %3 & (HM_WSF_IBPB_ENTRY | HM_WSF_L1D_ENTRY) ; The eax:edx value is the same for the first two.
856 AssertCompile(MSR_IA32_PRED_CMD_F_IBPB == MSR_IA32_FLUSH_CMD_F_L1D)
857 mov eax, MSR_IA32_PRED_CMD_F_IBPB
858 xor edx, edx
859 %endif
860 %if %3 & HM_WSF_IBPB_ENTRY ; Indirect branch barrier.
861 mov ecx, MSR_IA32_PRED_CMD
862 wrmsr
863 %endif
864 %if %3 & HM_WSF_L1D_ENTRY ; Level 1 data cache flush.
865 mov ecx, MSR_IA32_FLUSH_CMD
866 wrmsr
867 %elif %3 & HM_WSF_MDS_ENTRY ; MDS flushing is included in L1D_FLUSH
868 mov word [rbp + frm_MDS_seg], ds
869 verw word [rbp + frm_MDS_seg]
870 %endif
871
872 ; Resume or start VM?
873 cmp bl, 0 ; fResume
874
875 ; Load guest general purpose registers.
876 mov rax, qword [rdi + CPUMCTX.eax]
877 mov rbx, qword [rdi + CPUMCTX.ebx]
878 mov rcx, qword [rdi + CPUMCTX.ecx]
879 mov rdx, qword [rdi + CPUMCTX.edx]
880 mov rbp, qword [rdi + CPUMCTX.ebp]
881 mov rsi, qword [rdi + CPUMCTX.esi]
882 mov r8, qword [rdi + CPUMCTX.r8]
883 mov r9, qword [rdi + CPUMCTX.r9]
884 mov r10, qword [rdi + CPUMCTX.r10]
885 mov r11, qword [rdi + CPUMCTX.r11]
886 mov r12, qword [rdi + CPUMCTX.r12]
887 mov r13, qword [rdi + CPUMCTX.r13]
888 mov r14, qword [rdi + CPUMCTX.r14]
889 mov r15, qword [rdi + CPUMCTX.r15]
890 mov rdi, qword [rdi + CPUMCTX.edi]
891
892 je .vmlaunch64_launch
893
894 vmresume
895 jc NAME(RT_CONCAT(hmR0VmxStartVmHostRIP,%1).vmxstart64_invalid_vmcs_ptr)
896 jz NAME(RT_CONCAT(hmR0VmxStartVmHostRIP,%1).vmxstart64_start_failed)
897 jmp NAME(RT_CONCAT(hmR0VmxStartVmHostRIP,%1)) ; here if vmresume detected a failure
898
899.vmlaunch64_launch:
900 vmlaunch
901 jc NAME(RT_CONCAT(hmR0VmxStartVmHostRIP,%1).vmxstart64_invalid_vmcs_ptr)
902 jz NAME(RT_CONCAT(hmR0VmxStartVmHostRIP,%1).vmxstart64_start_failed)
903 jmp NAME(RT_CONCAT(hmR0VmxStartVmHostRIP,%1)) ; here if vmlaunch detected a failure
904
905
906; Put these two outside the normal code path as they should rarely change.
907ALIGNCODE(8)
908.write_host_rip:
909 %ifdef VBOX_WITH_STATISTICS
910 inc qword [rsi + VMCPU.hm + HMCPU.StatVmxWriteHostRip]
911 %endif
912 mov [r9 + VMXVMCSINFO.uHostRip], rcx
913 mov eax, VMX_VMCS_HOST_RIP ;; @todo It is only strictly necessary to write VMX_VMCS_HOST_RIP when
914 vmwrite rax, rcx ;; the VMXVMCSINFO::pfnStartVM function changes (eventually
915 %ifdef VBOX_STRICT ;; take the Windows/SSE stuff into account then)...
916 jna NAME(RT_CONCAT(hmR0VmxStartVmHostRIP,%1).vmwrite_failed)
917 %endif
918 jmp .wrote_host_rip
919
920ALIGNCODE(8)
921.write_host_rsp:
922 %ifdef VBOX_WITH_STATISTICS
923 inc qword [rsi + VMCPU.hm + HMCPU.StatVmxWriteHostRsp]
924 %endif
925 mov [r9 + VMXVMCSINFO.uHostRsp], rsp
926 mov eax, VMX_VMCS_HOST_RSP
927 vmwrite rax, rsp
928 %ifdef VBOX_STRICT
929 jna NAME(RT_CONCAT(hmR0VmxStartVmHostRIP,%1).vmwrite_failed)
930 %endif
931 jmp .wrote_host_rsp
932
933ALIGNCODE(64)
934GLOBALNAME RT_CONCAT(hmR0VmxStartVmHostRIP,%1)
935 RESTORE_STATE_VMX 0, %2, %3, %4
936 mov eax, VINF_SUCCESS
937
938.vmstart64_end:
939 %if %4 != 0
940 mov r11d, eax ; save the return code.
941
942 ; Save the guest SSE state related to non-volatile and volatile SSE registers.
943 mov rcx, [r8 + CPUMCTX.pXStateR0]
944 %if %4 = 1 ; manual
945 stmxcsr [rcx + X86FXSTATE.MXCSR]
946 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 000h], xmm0
947 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 010h], xmm1
948 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 020h], xmm2
949 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 030h], xmm3
950 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 040h], xmm4
951 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 050h], xmm5
952 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 060h], xmm6
953 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 070h], xmm7
954 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 080h], xmm8
955 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 090h], xmm9
956 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 0a0h], xmm10
957 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 0b0h], xmm11
958 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 0c0h], xmm12
959 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 0d0h], xmm13
960 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 0e0h], xmm14
961 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 0f0h], xmm15
962 %elif %4 = 2 ; use xrstor/xsave
963 mov eax, [r8 + CPUMCTX.fXStateMask]
964 and eax, CPUM_VOLATILE_XSAVE_GUEST_COMPONENTS
965 xor edx, edx
966 xsave [rcx]
967 %else
968 %error invalid template parameter 4
969 %endif
970
971 ; Restore the host non-volatile SSE register state.
972 ldmxcsr [rbp + frm_saved_mxcsr]
973 movdqa xmm6, [rbp + frm_saved_xmm6 ]
974 movdqa xmm7, [rbp + frm_saved_xmm7 ]
975 movdqa xmm8, [rbp + frm_saved_xmm8 ]
976 movdqa xmm9, [rbp + frm_saved_xmm9 ]
977 movdqa xmm10, [rbp + frm_saved_xmm10]
978 movdqa xmm11, [rbp + frm_saved_xmm11]
979 movdqa xmm12, [rbp + frm_saved_xmm12]
980 movdqa xmm13, [rbp + frm_saved_xmm13]
981 movdqa xmm14, [rbp + frm_saved_xmm14]
982 movdqa xmm15, [rbp + frm_saved_xmm15]
983
984 mov eax, r11d
985 %endif ; %4 != 0
986
987 lea rsp, [rbp + frm_fRFlags]
988 popf
989 leave
990 ret
991
992 ;
993 ; Error returns.
994 ;
995 %ifdef VBOX_STRICT
996.vmwrite_failed:
997 mov dword [rsp + cbFrame + frm_rcError], VERR_VMX_INVALID_VMCS_FIELD
998 jz .return_after_vmwrite_error
999 mov dword [rsp + cbFrame + frm_rcError], VERR_VMX_INVALID_VMCS_PTR
1000 jmp .return_after_vmwrite_error
1001 %endif
1002.vmxstart64_invalid_vmcs_ptr:
1003 mov dword [rsp + cbFrame + frm_rcError], VERR_VMX_INVALID_VMCS_PTR_TO_START_VM
1004 jmp .vmstart64_error_return
1005.vmxstart64_start_failed:
1006 mov dword [rsp + cbFrame + frm_rcError], VERR_VMX_UNABLE_TO_START_VM
1007.vmstart64_error_return:
1008 RESTORE_STATE_VMX 1, %2, %3, %4
1009 mov eax, [rbp + frm_rcError]
1010 jmp .vmstart64_end
1011
1012 %ifdef VBOX_STRICT
1013 ; Precondition checks failed.
1014.precond_failure_return:
1015 POP_CALLEE_PRESERVED_REGISTERS
1016 %if cbFrame != cbBaseFrame
1017 %error Bad frame size value: cbFrame, expected cbBaseFrame
1018 %endif
1019 jmp .vmstart64_end
1020 %endif
1021
1022 %undef frm_fRFlags
1023 %undef frm_pGstCtx
1024 %undef frm_uHostXcr0
1025 %undef frm_saved_gdtr
1026 %undef frm_saved_tr
1027 %undef frm_fNoRestoreXcr0
1028 %undef frm_saved_idtr
1029 %undef frm_saved_ldtr
1030 %undef frm_rcError
1031 %undef frm_guest_rax
1032 %undef cbFrame
1033ENDPROC RT_CONCAT(hmR0VmxStartVm,%1)
1034 %ifdef ASM_FORMAT_ELF
1035size NAME(RT_CONCAT(hmR0VmxStartVmHostRIP,%1)) NAME(RT_CONCAT(hmR0VmxStartVm,%1) %+ _EndProc) - NAME(RT_CONCAT(hmR0VmxStartVmHostRIP,%1))
1036 %endif
1037
1038
1039%endmacro ; hmR0VmxStartVmTemplate
1040
1041%macro hmR0VmxStartVmSseTemplate 3
1042hmR0VmxStartVmTemplate _SansXcr0_SansIbpbEntry_SansL1dEntry_SansMdsEntry_SansIbpbExit %+ %2, 0, 0 | 0 | 0 | 0 , %1
1043hmR0VmxStartVmTemplate _WithXcr0_SansIbpbEntry_SansL1dEntry_SansMdsEntry_SansIbpbExit %+ %2, 1, 0 | 0 | 0 | 0 , %1
1044hmR0VmxStartVmTemplate _SansXcr0_WithIbpbEntry_SansL1dEntry_SansMdsEntry_SansIbpbExit %+ %2, 0, HM_WSF_IBPB_ENTRY | 0 | 0 | 0 , %1
1045hmR0VmxStartVmTemplate _WithXcr0_WithIbpbEntry_SansL1dEntry_SansMdsEntry_SansIbpbExit %+ %2, 1, HM_WSF_IBPB_ENTRY | 0 | 0 | 0 , %1
1046hmR0VmxStartVmTemplate _SansXcr0_SansIbpbEntry_WithL1dEntry_SansMdsEntry_SansIbpbExit %+ %2, 0, 0 | HM_WSF_L1D_ENTRY | 0 | 0 , %1
1047hmR0VmxStartVmTemplate _WithXcr0_SansIbpbEntry_WithL1dEntry_SansMdsEntry_SansIbpbExit %+ %2, 1, 0 | HM_WSF_L1D_ENTRY | 0 | 0 , %1
1048hmR0VmxStartVmTemplate _SansXcr0_WithIbpbEntry_WithL1dEntry_SansMdsEntry_SansIbpbExit %+ %2, 0, HM_WSF_IBPB_ENTRY | HM_WSF_L1D_ENTRY | 0 | 0 , %1
1049hmR0VmxStartVmTemplate _WithXcr0_WithIbpbEntry_WithL1dEntry_SansMdsEntry_SansIbpbExit %+ %2, 1, HM_WSF_IBPB_ENTRY | HM_WSF_L1D_ENTRY | 0 | 0 , %1
1050hmR0VmxStartVmTemplate _SansXcr0_SansIbpbEntry_SansL1dEntry_WithMdsEntry_SansIbpbExit %+ %2, 0, 0 | 0 | HM_WSF_MDS_ENTRY | 0 , %1
1051hmR0VmxStartVmTemplate _WithXcr0_SansIbpbEntry_SansL1dEntry_WithMdsEntry_SansIbpbExit %+ %2, 1, 0 | 0 | HM_WSF_MDS_ENTRY | 0 , %1
1052hmR0VmxStartVmTemplate _SansXcr0_WithIbpbEntry_SansL1dEntry_WithMdsEntry_SansIbpbExit %+ %2, 0, HM_WSF_IBPB_ENTRY | 0 | HM_WSF_MDS_ENTRY | 0 , %1
1053hmR0VmxStartVmTemplate _WithXcr0_WithIbpbEntry_SansL1dEntry_WithMdsEntry_SansIbpbExit %+ %2, 1, HM_WSF_IBPB_ENTRY | 0 | HM_WSF_MDS_ENTRY | 0 , %1
1054hmR0VmxStartVmTemplate _SansXcr0_SansIbpbEntry_WithL1dEntry_WithMdsEntry_SansIbpbExit %+ %2, 0, 0 | HM_WSF_L1D_ENTRY | HM_WSF_MDS_ENTRY | 0 , %1
1055hmR0VmxStartVmTemplate _WithXcr0_SansIbpbEntry_WithL1dEntry_WithMdsEntry_SansIbpbExit %+ %2, 1, 0 | HM_WSF_L1D_ENTRY | HM_WSF_MDS_ENTRY | 0 , %1
1056hmR0VmxStartVmTemplate _SansXcr0_WithIbpbEntry_WithL1dEntry_WithMdsEntry_SansIbpbExit %+ %2, 0, HM_WSF_IBPB_ENTRY | HM_WSF_L1D_ENTRY | HM_WSF_MDS_ENTRY | 0 , %1
1057hmR0VmxStartVmTemplate _WithXcr0_WithIbpbEntry_WithL1dEntry_WithMdsEntry_SansIbpbExit %+ %2, 1, HM_WSF_IBPB_ENTRY | HM_WSF_L1D_ENTRY | HM_WSF_MDS_ENTRY | 0 , %1
1058hmR0VmxStartVmTemplate _SansXcr0_SansIbpbEntry_SansL1dEntry_SansMdsEntry_WithIbpbExit %+ %2, 0, 0 | 0 | 0 | HM_WSF_IBPB_EXIT, %1
1059hmR0VmxStartVmTemplate _WithXcr0_SansIbpbEntry_SansL1dEntry_SansMdsEntry_WithIbpbExit %+ %2, 1, 0 | 0 | 0 | HM_WSF_IBPB_EXIT, %1
1060hmR0VmxStartVmTemplate _SansXcr0_WithIbpbEntry_SansL1dEntry_SansMdsEntry_WithIbpbExit %+ %2, 0, HM_WSF_IBPB_ENTRY | 0 | 0 | HM_WSF_IBPB_EXIT, %1
1061hmR0VmxStartVmTemplate _WithXcr0_WithIbpbEntry_SansL1dEntry_SansMdsEntry_WithIbpbExit %+ %2, 1, HM_WSF_IBPB_ENTRY | 0 | 0 | HM_WSF_IBPB_EXIT, %1
1062hmR0VmxStartVmTemplate _SansXcr0_SansIbpbEntry_WithL1dEntry_SansMdsEntry_WithIbpbExit %+ %2, 0, 0 | HM_WSF_L1D_ENTRY | 0 | HM_WSF_IBPB_EXIT, %1
1063hmR0VmxStartVmTemplate _WithXcr0_SansIbpbEntry_WithL1dEntry_SansMdsEntry_WithIbpbExit %+ %2, 1, 0 | HM_WSF_L1D_ENTRY | 0 | HM_WSF_IBPB_EXIT, %1
1064hmR0VmxStartVmTemplate _SansXcr0_WithIbpbEntry_WithL1dEntry_SansMdsEntry_WithIbpbExit %+ %2, 0, HM_WSF_IBPB_ENTRY | HM_WSF_L1D_ENTRY | 0 | HM_WSF_IBPB_EXIT, %1
1065hmR0VmxStartVmTemplate _WithXcr0_WithIbpbEntry_WithL1dEntry_SansMdsEntry_WithIbpbExit %+ %2, 1, HM_WSF_IBPB_ENTRY | HM_WSF_L1D_ENTRY | 0 | HM_WSF_IBPB_EXIT, %1
1066hmR0VmxStartVmTemplate _SansXcr0_SansIbpbEntry_SansL1dEntry_WithMdsEntry_WithIbpbExit %+ %2, 0, 0 | 0 | HM_WSF_MDS_ENTRY | HM_WSF_IBPB_EXIT, %1
1067hmR0VmxStartVmTemplate _WithXcr0_SansIbpbEntry_SansL1dEntry_WithMdsEntry_WithIbpbExit %+ %2, 1, 0 | 0 | HM_WSF_MDS_ENTRY | HM_WSF_IBPB_EXIT, %1
1068hmR0VmxStartVmTemplate _SansXcr0_WithIbpbEntry_SansL1dEntry_WithMdsEntry_WithIbpbExit %+ %2, 0, HM_WSF_IBPB_ENTRY | 0 | HM_WSF_MDS_ENTRY | HM_WSF_IBPB_EXIT, %1
1069hmR0VmxStartVmTemplate _WithXcr0_WithIbpbEntry_SansL1dEntry_WithMdsEntry_WithIbpbExit %+ %2, 1, HM_WSF_IBPB_ENTRY | 0 | HM_WSF_MDS_ENTRY | HM_WSF_IBPB_EXIT, %1
1070hmR0VmxStartVmTemplate _SansXcr0_SansIbpbEntry_WithL1dEntry_WithMdsEntry_WithIbpbExit %+ %2, 0, 0 | HM_WSF_L1D_ENTRY | HM_WSF_MDS_ENTRY | HM_WSF_IBPB_EXIT, %1
1071hmR0VmxStartVmTemplate _WithXcr0_SansIbpbEntry_WithL1dEntry_WithMdsEntry_WithIbpbExit %+ %2, 1, 0 | HM_WSF_L1D_ENTRY | HM_WSF_MDS_ENTRY | HM_WSF_IBPB_EXIT, %1
1072hmR0VmxStartVmTemplate _SansXcr0_WithIbpbEntry_WithL1dEntry_WithMdsEntry_WithIbpbExit %+ %2, 0, HM_WSF_IBPB_ENTRY | HM_WSF_L1D_ENTRY | HM_WSF_MDS_ENTRY | HM_WSF_IBPB_EXIT, %1
1073hmR0VmxStartVmTemplate _WithXcr0_WithIbpbEntry_WithL1dEntry_WithMdsEntry_WithIbpbExit %+ %2, 1, HM_WSF_IBPB_ENTRY | HM_WSF_L1D_ENTRY | HM_WSF_MDS_ENTRY | HM_WSF_IBPB_EXIT, %1
1074%endmacro
1075
1076hmR0VmxStartVmSseTemplate 0,,RT_NOTHING
1077%ifdef VBOX_WITH_KERNEL_USING_XMM
1078hmR0VmxStartVmSseTemplate 1,_SseManual,RT_NOTHING
1079hmR0VmxStartVmSseTemplate 2,_SseXSave,RT_NOTHING
1080%endif
1081
1082
1083;;
1084; hmR0SvmVmRun template
1085;
1086; @param 1 The suffix of the variation.
1087; @param 2 fLoadSaveGuestXcr0 value
1088; @param 3 The HM_WSF_IBPB_ENTRY + HM_WSF_IBPB_EXIT value.
1089; @param 4 The SSE saving/restoring: 0 to do nothing, 1 to do it manually, 2 to use xsave/xrstor.
1090; Drivers shouldn't use AVX registers without saving+loading:
1091; https://msdn.microsoft.com/en-us/library/windows/hardware/ff545910%28v=vs.85%29.aspx?f=255&MSPPError=-2147217396
1092; However the compiler docs have different idea:
1093; https://msdn.microsoft.com/en-us/library/9z1stfyw.aspx
1094; We'll go with the former for now.
1095;
1096%macro hmR0SvmVmRunTemplate 4
1097
1098;;
1099; Prepares for and executes VMRUN (32-bit and 64-bit guests).
1100;
1101; @returns VBox status code
1102; @param pVM msc:rcx,gcc:rdi The cross context VM structure (unused).
1103; @param pVCpu msc:rdx,gcc:rsi The cross context virtual CPU structure of the calling EMT.
1104; @param HCPhysVmcb msc:r8, gcc:rdx Physical address of guest VMCB.
1105;
1106ALIGNCODE(64) ; This + immediate optimizations causes serious trouble for yasm and the SEH frames: prologue -28 bytes, must be <256
1107 ; So the SEH64_XXX stuff is currently not operational.
1108BEGINPROC RT_CONCAT(hmR0SvmVmRun,%1)
1109 %ifdef VBOX_WITH_KERNEL_USING_XMM
1110 %if %4 = 0
1111 ;
1112 ; The non-saving variant will currently check the two SSE preconditions and pick
1113 ; the right variant to continue with. Later we can see if we can't manage to
1114 ; move these decisions into hmR0SvmUpdateVmRunFunction().
1115 ;
1116 %ifdef ASM_CALL64_MSC
1117 test byte [rdx + VMCPU.cpum.GstCtx + CPUMCTX.fUsedFpuGuest], 1
1118 %else
1119 test byte [rsi + VMCPU.cpum.GstCtx + CPUMCTX.fUsedFpuGuest], 1
1120 %endif
1121 jz .save_xmm_no_need
1122 %ifdef ASM_CALL64_MSC
1123 cmp dword [rdx + VMCPU.cpum.GstCtx + CPUMCTX.fXStateMask], 0
1124 %else
1125 cmp dword [rsi + VMCPU.cpum.GstCtx + CPUMCTX.fXStateMask], 0
1126 %endif
1127 je RT_CONCAT3(hmR0SvmVmRun,%1,_SseManual)
1128 jmp RT_CONCAT3(hmR0SvmVmRun,%1,_SseXSave)
1129.save_xmm_no_need:
1130 %endif
1131 %endif
1132 push rbp
1133 SEH64_PUSH_xBP
1134 mov rbp, rsp
1135 SEH64_SET_FRAME_xBP 0
1136 pushf
1137 %assign cbFrame 30h
1138 %if %4 != 0
1139 %assign cbFrame cbFrame + 16 * 11 ; Reserve space for 10x 128-bit XMM registers and MXCSR (32-bit)
1140 %endif
1141 %assign cbBaseFrame cbFrame
1142 sub rsp, cbFrame - 8h ; We subtract 8 bytes for the above pushf
1143 SEH64_ALLOCATE_STACK cbFrame ; And we have CALLEE_PRESERVED_REGISTER_COUNT following it.
1144
1145 %define frm_fRFlags -008h
1146 %define frm_uHostXcr0 -018h ; 128-bit
1147 ;%define frm_fNoRestoreXcr0 -020h ; Non-zero if we should skip XCR0 restoring.
1148 %define frm_pGstCtx -028h ; Where we stash guest CPU context for use after the vmrun.
1149 %define frm_HCPhysVmcbHost -030h ; Where we stash HCPhysVmcbHost for the vmload after vmrun.
1150 %if %4 != 0
1151 %define frm_saved_xmm6 -040h
1152 %define frm_saved_xmm7 -050h
1153 %define frm_saved_xmm8 -060h
1154 %define frm_saved_xmm9 -070h
1155 %define frm_saved_xmm10 -080h
1156 %define frm_saved_xmm11 -090h
1157 %define frm_saved_xmm12 -0a0h
1158 %define frm_saved_xmm13 -0b0h
1159 %define frm_saved_xmm14 -0c0h
1160 %define frm_saved_xmm15 -0d0h
1161 %define frm_saved_mxcsr -0e0h
1162 %endif
1163
1164 ; Manual save and restore:
1165 ; - General purpose registers except RIP, RSP, RAX
1166 ;
1167 ; Trashed:
1168 ; - CR2 (we don't care)
1169 ; - LDTR (reset to 0)
1170 ; - DRx (presumably not changed at all)
1171 ; - DR7 (reset to 0x400)
1172
1173 ; Save all general purpose host registers.
1174 PUSH_CALLEE_PRESERVED_REGISTERS
1175 SEH64_END_PROLOGUE
1176 %if cbFrame != (cbBaseFrame + 8 * CALLEE_PRESERVED_REGISTER_COUNT)
1177 %error Bad cbFrame value
1178 %endif
1179
1180 ; Shuffle parameter registers so that r8=HCPhysVmcb and rsi=pVCpu. (rdx & rcx will soon be trashed.)
1181 %ifdef ASM_CALL64_GCC
1182 mov r8, rdx ; Put HCPhysVmcb in r8 like on MSC as rdx is trashed below.
1183 %else
1184 mov rsi, rdx ; Put pVCpu in rsi like on GCC as rdx is trashed below.
1185 ;mov rdi, rcx ; Put pVM in rdi like on GCC as rcx is trashed below.
1186 %endif
1187
1188 %ifdef VBOX_STRICT
1189 ;
1190 ; Verify template preconditions / parameters to ensure HMSVM.cpp didn't miss some state change.
1191 ;
1192 cmp byte [rsi + GVMCPU.hmr0 + HMR0PERVCPU.fLoadSaveGuestXcr0], %2
1193 mov eax, VERR_SVM_VMRUN_PRECOND_0
1194 jne .failure_return
1195
1196 mov eax, [rsi + GVMCPU.hmr0 + HMR0PERVCPU.fWorldSwitcher]
1197 and eax, HM_WSF_IBPB_ENTRY | HM_WSF_IBPB_EXIT
1198 cmp eax, %3
1199 mov eax, VERR_SVM_VMRUN_PRECOND_1
1200 jne .failure_return
1201
1202 %ifdef VBOX_WITH_KERNEL_USING_XMM
1203 mov eax, VERR_SVM_VMRUN_PRECOND_2
1204 test byte [rsi + VMCPU.cpum.GstCtx + CPUMCTX.fUsedFpuGuest], 1
1205 %if %4 = 0
1206 jnz .failure_return
1207 %else
1208 jz .failure_return
1209
1210 mov eax, VERR_SVM_VMRUN_PRECOND_3
1211 cmp dword [rsi + VMCPU.cpum.GstCtx + CPUMCTX.fXStateMask], 0
1212 %if %4 = 1
1213 jne .failure_return
1214 %elif %4 = 2
1215 je .failure_return
1216 %else
1217 %error Invalid template parameter 4.
1218 %endif
1219 %endif
1220 %endif
1221 %endif ; VBOX_STRICT
1222
1223 %if %4 != 0
1224 ; Save the non-volatile SSE host register state.
1225 movdqa [rbp + frm_saved_xmm6 ], xmm6
1226 movdqa [rbp + frm_saved_xmm7 ], xmm7
1227 movdqa [rbp + frm_saved_xmm8 ], xmm8
1228 movdqa [rbp + frm_saved_xmm9 ], xmm9
1229 movdqa [rbp + frm_saved_xmm10], xmm10
1230 movdqa [rbp + frm_saved_xmm11], xmm11
1231 movdqa [rbp + frm_saved_xmm12], xmm12
1232 movdqa [rbp + frm_saved_xmm13], xmm13
1233 movdqa [rbp + frm_saved_xmm14], xmm14
1234 movdqa [rbp + frm_saved_xmm15], xmm15
1235 stmxcsr [rbp + frm_saved_mxcsr]
1236
1237 ; Load the guest state related to the above non-volatile and volatile SSE registers. Trashes rcx, eax and edx.
1238 mov rcx, [rsi + VMCPU.cpum.GstCtx + CPUMCTX.pXStateR0]
1239 %if %4 = 1 ; manual
1240 movdqa xmm0, [rcx + XMM_OFF_IN_X86FXSTATE + 000h]
1241 movdqa xmm1, [rcx + XMM_OFF_IN_X86FXSTATE + 010h]
1242 movdqa xmm2, [rcx + XMM_OFF_IN_X86FXSTATE + 020h]
1243 movdqa xmm3, [rcx + XMM_OFF_IN_X86FXSTATE + 030h]
1244 movdqa xmm4, [rcx + XMM_OFF_IN_X86FXSTATE + 040h]
1245 movdqa xmm5, [rcx + XMM_OFF_IN_X86FXSTATE + 050h]
1246 movdqa xmm6, [rcx + XMM_OFF_IN_X86FXSTATE + 060h]
1247 movdqa xmm7, [rcx + XMM_OFF_IN_X86FXSTATE + 070h]
1248 movdqa xmm8, [rcx + XMM_OFF_IN_X86FXSTATE + 080h]
1249 movdqa xmm9, [rcx + XMM_OFF_IN_X86FXSTATE + 090h]
1250 movdqa xmm10, [rcx + XMM_OFF_IN_X86FXSTATE + 0a0h]
1251 movdqa xmm11, [rcx + XMM_OFF_IN_X86FXSTATE + 0b0h]
1252 movdqa xmm12, [rcx + XMM_OFF_IN_X86FXSTATE + 0c0h]
1253 movdqa xmm13, [rcx + XMM_OFF_IN_X86FXSTATE + 0d0h]
1254 movdqa xmm14, [rcx + XMM_OFF_IN_X86FXSTATE + 0e0h]
1255 movdqa xmm15, [rcx + XMM_OFF_IN_X86FXSTATE + 0f0h]
1256 ldmxcsr [rcx + X86FXSTATE.MXCSR]
1257 %elif %4 = 2 ; use xrstor/xsave
1258 mov eax, [rsi + VMCPU.cpum.GstCtx + CPUMCTX.fXStateMask]
1259 and eax, CPUM_VOLATILE_XSAVE_GUEST_COMPONENTS
1260 xor edx, edx
1261 xrstor [rcx]
1262 %else
1263 %error invalid template parameter 4
1264 %endif
1265 %endif
1266
1267 %if %2 != 0
1268 ; Save the host XCR0 and load the guest one if necessary.
1269 xor ecx, ecx
1270 xgetbv ; save the host XCR0 on the stack
1271 mov [rbp + frm_uHostXcr0 + 8], rdx
1272 mov [rbp + frm_uHostXcr0 ], rax
1273
1274 mov eax, [rsi + VMCPU.cpum.GstCtx + CPUMCTX.aXcr] ; load the guest XCR0
1275 mov edx, [rsi + VMCPU.cpum.GstCtx + CPUMCTX.aXcr + 4]
1276 xor ecx, ecx ; paranoia
1277 xsetbv
1278 %endif
1279
1280 ; Save host fs, gs, sysenter msr etc.
1281 mov rax, [rsi + GVMCPU.hmr0 + HMR0PERVCPU.svm + HMR0CPUSVM.HCPhysVmcbHost]
1282 mov qword [rbp + frm_HCPhysVmcbHost], rax ; save for the vmload after vmrun
1283 lea rsi, [rsi + VMCPU.cpum.GstCtx]
1284 mov qword [rbp + frm_pGstCtx], rsi
1285 vmsave
1286
1287 %if %3 & HM_WSF_IBPB_ENTRY
1288 ; Fight spectre (trashes rax, rdx and rcx).
1289 mov ecx, MSR_IA32_PRED_CMD
1290 mov eax, MSR_IA32_PRED_CMD_F_IBPB
1291 xor edx, edx
1292 wrmsr
1293 %endif
1294
1295 ; Setup rax for VMLOAD.
1296 mov rax, r8 ; HCPhysVmcb (64 bits physical address; take low dword only)
1297
1298 ; Load guest general purpose registers (rax is loaded from the VMCB by VMRUN).
1299 mov rbx, qword [rsi + CPUMCTX.ebx]
1300 mov rcx, qword [rsi + CPUMCTX.ecx]
1301 mov rdx, qword [rsi + CPUMCTX.edx]
1302 mov rdi, qword [rsi + CPUMCTX.edi]
1303 mov rbp, qword [rsi + CPUMCTX.ebp]
1304 mov r8, qword [rsi + CPUMCTX.r8]
1305 mov r9, qword [rsi + CPUMCTX.r9]
1306 mov r10, qword [rsi + CPUMCTX.r10]
1307 mov r11, qword [rsi + CPUMCTX.r11]
1308 mov r12, qword [rsi + CPUMCTX.r12]
1309 mov r13, qword [rsi + CPUMCTX.r13]
1310 mov r14, qword [rsi + CPUMCTX.r14]
1311 mov r15, qword [rsi + CPUMCTX.r15]
1312 mov rsi, qword [rsi + CPUMCTX.esi]
1313
1314 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch.
1315 clgi
1316 sti
1317
1318 ; Load guest FS, GS, Sysenter MSRs etc.
1319 vmload
1320
1321 ; Run the VM.
1322 vmrun
1323
1324 ; Save guest fs, gs, sysenter msr etc.
1325 vmsave
1326
1327 ; Load host fs, gs, sysenter msr etc.
1328 mov rax, [rsp + cbFrame + frm_HCPhysVmcbHost] ; load HCPhysVmcbHost (rbp is not operational yet, thus rsp)
1329 vmload
1330
1331 ; Set the global interrupt flag again, but execute cli to make sure IF=0.
1332 cli
1333 stgi
1334
1335 ; Pop pVCpu (saved above) and save the guest GPRs (sans RSP and RAX).
1336 mov rax, [rsp + cbFrame + frm_pGstCtx] ; (rbp still not operational)
1337
1338 mov qword [rax + CPUMCTX.ebp], rbp
1339 lea rbp, [rsp + cbFrame]
1340 mov qword [rax + CPUMCTX.ecx], rcx
1341 mov rcx, SPECTRE_FILLER
1342 mov qword [rax + CPUMCTX.edx], rdx
1343 mov rdx, rcx
1344 mov qword [rax + CPUMCTX.r8], r8
1345 mov r8, rcx
1346 mov qword [rax + CPUMCTX.r9], r9
1347 mov r9, rcx
1348 mov qword [rax + CPUMCTX.r10], r10
1349 mov r10, rcx
1350 mov qword [rax + CPUMCTX.r11], r11
1351 mov r11, rcx
1352 mov qword [rax + CPUMCTX.edi], rdi
1353 %ifdef ASM_CALL64_MSC
1354 mov rdi, [rbp + frm_saved_rdi]
1355 %else
1356 mov rdi, rcx
1357 %endif
1358 mov qword [rax + CPUMCTX.esi], rsi
1359 %ifdef ASM_CALL64_MSC
1360 mov rsi, [rbp + frm_saved_rsi]
1361 %else
1362 mov rsi, rcx
1363 %endif
1364 mov qword [rax + CPUMCTX.ebx], rbx
1365 mov rbx, [rbp + frm_saved_rbx]
1366 mov qword [rax + CPUMCTX.r12], r12
1367 mov r12, [rbp + frm_saved_r12]
1368 mov qword [rax + CPUMCTX.r13], r13
1369 mov r13, [rbp + frm_saved_r13]
1370 mov qword [rax + CPUMCTX.r14], r14
1371 mov r14, [rbp + frm_saved_r14]
1372 mov qword [rax + CPUMCTX.r15], r15
1373 mov r15, [rbp + frm_saved_r15]
1374
1375 %if %4 != 0
1376 ; Set r8 = &pVCpu->cpum.GstCtx; for use below when saving and restoring SSE state.
1377 mov r8, rax
1378 %endif
1379
1380 %if %3 & HM_WSF_IBPB_EXIT
1381 ; Fight spectre (trashes rax, rdx and rcx).
1382 mov ecx, MSR_IA32_PRED_CMD
1383 mov eax, MSR_IA32_PRED_CMD_F_IBPB
1384 xor edx, edx
1385 wrmsr
1386 %endif
1387
1388 %if %2 != 0
1389 ; Restore the host xcr0.
1390 xor ecx, ecx
1391 mov rdx, [rbp + frm_uHostXcr0 + 8]
1392 mov rax, [rbp + frm_uHostXcr0]
1393 xsetbv
1394 %endif
1395
1396 %if %4 != 0
1397 ; Save the guest SSE state related to non-volatile and volatile SSE registers.
1398 mov rcx, [r8 + CPUMCTX.pXStateR0]
1399 %if %4 = 1 ; manual
1400 stmxcsr [rcx + X86FXSTATE.MXCSR]
1401 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 000h], xmm0
1402 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 010h], xmm1
1403 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 020h], xmm2
1404 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 030h], xmm3
1405 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 040h], xmm4
1406 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 050h], xmm5
1407 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 060h], xmm6
1408 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 070h], xmm7
1409 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 080h], xmm8
1410 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 090h], xmm9
1411 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 0a0h], xmm10
1412 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 0b0h], xmm11
1413 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 0c0h], xmm12
1414 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 0d0h], xmm13
1415 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 0e0h], xmm14
1416 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 0f0h], xmm15
1417 %elif %4 = 2 ; use xrstor/xsave
1418 mov eax, [r8 + CPUMCTX.fXStateMask]
1419 and eax, CPUM_VOLATILE_XSAVE_GUEST_COMPONENTS
1420 xor edx, edx
1421 xsave [rcx]
1422 %else
1423 %error invalid template parameter 4
1424 %endif
1425
1426 ; Restore the host non-volatile SSE register state.
1427 ldmxcsr [rbp + frm_saved_mxcsr]
1428 movdqa xmm6, [rbp + frm_saved_xmm6 ]
1429 movdqa xmm7, [rbp + frm_saved_xmm7 ]
1430 movdqa xmm8, [rbp + frm_saved_xmm8 ]
1431 movdqa xmm9, [rbp + frm_saved_xmm9 ]
1432 movdqa xmm10, [rbp + frm_saved_xmm10]
1433 movdqa xmm11, [rbp + frm_saved_xmm11]
1434 movdqa xmm12, [rbp + frm_saved_xmm12]
1435 movdqa xmm13, [rbp + frm_saved_xmm13]
1436 movdqa xmm14, [rbp + frm_saved_xmm14]
1437 movdqa xmm15, [rbp + frm_saved_xmm15]
1438 %endif ; %4 != 0
1439
1440 ; Epilogue (assumes we restored volatile registers above when saving the guest GPRs).
1441 mov eax, VINF_SUCCESS
1442 add rsp, cbFrame - 8h
1443 popf
1444 leave
1445 ret
1446
1447 %ifdef VBOX_STRICT
1448 ; Precondition checks failed.
1449.failure_return:
1450 POP_CALLEE_PRESERVED_REGISTERS
1451 %if cbFrame != cbBaseFrame
1452 %error Bad frame size value: cbFrame
1453 %endif
1454 add rsp, cbFrame - 8h
1455 popf
1456 leave
1457 ret
1458 %endif
1459
1460%undef frm_uHostXcr0
1461%undef frm_fNoRestoreXcr0
1462%undef frm_pVCpu
1463%undef frm_HCPhysVmcbHost
1464%undef cbFrame
1465ENDPROC RT_CONCAT(hmR0SvmVmRun,%1)
1466
1467%endmacro ; hmR0SvmVmRunTemplate
1468
1469;
1470; Instantiate the hmR0SvmVmRun various variations.
1471;
1472hmR0SvmVmRunTemplate _SansXcr0_SansIbpbEntry_SansIbpbExit, 0, 0, 0
1473hmR0SvmVmRunTemplate _WithXcr0_SansIbpbEntry_SansIbpbExit, 1, 0, 0
1474hmR0SvmVmRunTemplate _SansXcr0_WithIbpbEntry_SansIbpbExit, 0, HM_WSF_IBPB_ENTRY, 0
1475hmR0SvmVmRunTemplate _WithXcr0_WithIbpbEntry_SansIbpbExit, 1, HM_WSF_IBPB_ENTRY, 0
1476hmR0SvmVmRunTemplate _SansXcr0_SansIbpbEntry_WithIbpbExit, 0, HM_WSF_IBPB_EXIT, 0
1477hmR0SvmVmRunTemplate _WithXcr0_SansIbpbEntry_WithIbpbExit, 1, HM_WSF_IBPB_EXIT, 0
1478hmR0SvmVmRunTemplate _SansXcr0_WithIbpbEntry_WithIbpbExit, 0, HM_WSF_IBPB_ENTRY | HM_WSF_IBPB_EXIT, 0
1479hmR0SvmVmRunTemplate _WithXcr0_WithIbpbEntry_WithIbpbExit, 1, HM_WSF_IBPB_ENTRY | HM_WSF_IBPB_EXIT, 0
1480%ifdef VBOX_WITH_KERNEL_USING_XMM
1481hmR0SvmVmRunTemplate _SansXcr0_SansIbpbEntry_SansIbpbExit_SseManual, 0, 0, 1
1482hmR0SvmVmRunTemplate _WithXcr0_SansIbpbEntry_SansIbpbExit_SseManual, 1, 0, 1
1483hmR0SvmVmRunTemplate _SansXcr0_WithIbpbEntry_SansIbpbExit_SseManual, 0, HM_WSF_IBPB_ENTRY, 1
1484hmR0SvmVmRunTemplate _WithXcr0_WithIbpbEntry_SansIbpbExit_SseManual, 1, HM_WSF_IBPB_ENTRY, 1
1485hmR0SvmVmRunTemplate _SansXcr0_SansIbpbEntry_WithIbpbExit_SseManual, 0, HM_WSF_IBPB_EXIT, 1
1486hmR0SvmVmRunTemplate _WithXcr0_SansIbpbEntry_WithIbpbExit_SseManual, 1, HM_WSF_IBPB_EXIT, 1
1487hmR0SvmVmRunTemplate _SansXcr0_WithIbpbEntry_WithIbpbExit_SseManual, 0, HM_WSF_IBPB_ENTRY | HM_WSF_IBPB_EXIT, 1
1488hmR0SvmVmRunTemplate _WithXcr0_WithIbpbEntry_WithIbpbExit_SseManual, 1, HM_WSF_IBPB_ENTRY | HM_WSF_IBPB_EXIT, 1
1489
1490hmR0SvmVmRunTemplate _SansXcr0_SansIbpbEntry_SansIbpbExit_SseXSave, 0, 0, 2
1491hmR0SvmVmRunTemplate _WithXcr0_SansIbpbEntry_SansIbpbExit_SseXSave, 1, 0, 2
1492hmR0SvmVmRunTemplate _SansXcr0_WithIbpbEntry_SansIbpbExit_SseXSave, 0, HM_WSF_IBPB_ENTRY, 2
1493hmR0SvmVmRunTemplate _WithXcr0_WithIbpbEntry_SansIbpbExit_SseXSave, 1, HM_WSF_IBPB_ENTRY, 2
1494hmR0SvmVmRunTemplate _SansXcr0_SansIbpbEntry_WithIbpbExit_SseXSave, 0, HM_WSF_IBPB_EXIT, 2
1495hmR0SvmVmRunTemplate _WithXcr0_SansIbpbEntry_WithIbpbExit_SseXSave, 1, HM_WSF_IBPB_EXIT, 2
1496hmR0SvmVmRunTemplate _SansXcr0_WithIbpbEntry_WithIbpbExit_SseXSave, 0, HM_WSF_IBPB_ENTRY | HM_WSF_IBPB_EXIT, 2
1497hmR0SvmVmRunTemplate _WithXcr0_WithIbpbEntry_WithIbpbExit_SseXSave, 1, HM_WSF_IBPB_ENTRY | HM_WSF_IBPB_EXIT, 2
1498%endif
1499
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette