VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HMR0A.asm@ 87559

Last change on this file since 87559 was 87522, checked in by vboxsync, 4 years ago

VMM/HM: Moved CPUMCTX::fWorldSwitcher to HMR0PERVCPU::fWorldSwitcher. bugref:9453 bugref:9087

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 59.1 KB
Line 
1; $Id: HMR0A.asm 87522 2021-02-01 22:32:33Z vboxsync $
2;; @file
3; HM - Ring-0 VMX, SVM world-switch and helper routines.
4;
5
6;
7; Copyright (C) 2006-2020 Oracle Corporation
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.virtualbox.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License (GPL) as published by the Free Software
13; Foundation, in version 2 as it comes in the "COPYING" file of the
14; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17
18;*********************************************************************************************************************************
19;* Header Files *
20;*********************************************************************************************************************************
21;%define RT_ASM_WITH_SEH64 - trouble with SEH, alignment and (probably) 2nd pass optimizations.
22%define RT_ASM_WITH_SEH64_ALT ; Use asmdefs.mac hackery for manually emitting unwind info.
23%include "VBox/asmdefs.mac"
24%include "VBox/err.mac"
25%include "VBox/vmm/hm_vmx.mac"
26%include "VBox/vmm/cpum.mac"
27%include "VBox/vmm/gvm.mac"
28%include "iprt/x86.mac"
29%include "HMInternal.mac"
30
31%ifndef RT_ARCH_AMD64
32 %error AMD64 only.
33%endif
34
35
36;*********************************************************************************************************************************
37;* Defined Constants And Macros *
38;*********************************************************************************************************************************
39;; The offset of the XMM registers in X86FXSTATE.
40; Use define because I'm too lazy to convert the struct.
41%define XMM_OFF_IN_X86FXSTATE 160
42
43;; Spectre filler for 64-bit mode.
44; Choosen to be an invalid address (also with 5 level paging).
45%define SPECTRE_FILLER 0x02204204207fffff
46
47;;
48; Determine skipping restoring of GDTR, IDTR, TR across VMX non-root operation.
49;
50; @note This is normally done by hmR0VmxExportHostSegmentRegs and VMXRestoreHostState,
51; so much of this is untested code.
52; @{
53%define VMX_SKIP_GDTR
54%define VMX_SKIP_TR
55%define VBOX_SKIP_RESTORE_SEG
56%ifdef RT_OS_DARWIN
57 ; Load the NULL selector into DS, ES, FS and GS on 64-bit darwin so we don't
58 ; risk loading a stale LDT value or something invalid.
59 %define HM_64_BIT_USE_NULL_SEL
60 ; Darwin (Mavericks) uses IDTR limit to store the CPU number so we need to always restore it.
61 ; See @bugref{6875}.
62 %undef VMX_SKIP_IDTR
63%else
64 %define VMX_SKIP_IDTR
65%endif
66;; @}
67
68;; @def CALLEE_PRESERVED_REGISTER_COUNT
69; Number of registers pushed by PUSH_CALLEE_PRESERVED_REGISTERS
70%ifdef ASM_CALL64_GCC
71 %define CALLEE_PRESERVED_REGISTER_COUNT 5
72%else
73 %define CALLEE_PRESERVED_REGISTER_COUNT 7
74%endif
75
76;; @def PUSH_CALLEE_PRESERVED_REGISTERS
77; Macro for pushing all GPRs we must preserve for the caller.
78%macro PUSH_CALLEE_PRESERVED_REGISTERS 0
79 push r15
80 SEH64_PUSH_GREG r15
81 %assign cbFrame cbFrame + 8
82 %assign frm_saved_r15 -cbFrame
83
84 push r14
85 SEH64_PUSH_GREG r14
86 %assign cbFrame cbFrame + 8
87 %assign frm_saved_r14 -cbFrame
88
89 push r13
90 SEH64_PUSH_GREG r13
91 %assign cbFrame cbFrame + 8
92 %assign frm_saved_r13 -cbFrame
93
94 push r12
95 SEH64_PUSH_GREG r12
96 %assign cbFrame cbFrame + 8
97 %assign frm_saved_r12 -cbFrame
98
99 push rbx
100 SEH64_PUSH_GREG rbx
101 %assign cbFrame cbFrame + 8
102 %assign frm_saved_rbx -cbFrame
103
104 %ifdef ASM_CALL64_MSC
105 push rsi
106 SEH64_PUSH_GREG rsi
107 %assign cbFrame cbFrame + 8
108 %assign frm_saved_rsi -cbFrame
109
110 push rdi
111 SEH64_PUSH_GREG rdi
112 %assign cbFrame cbFrame + 8
113 %assign frm_saved_rdi -cbFrame
114 %endif
115%endmacro
116
117;; @def POP_CALLEE_PRESERVED_REGISTERS
118; Counterpart to PUSH_CALLEE_PRESERVED_REGISTERS for use in the epilogue.
119%macro POP_CALLEE_PRESERVED_REGISTERS 0
120 %ifdef ASM_CALL64_MSC
121 pop rdi
122 %assign cbFrame cbFrame - 8
123 %undef frm_saved_rdi
124
125 pop rsi
126 %assign cbFrame cbFrame - 8
127 %undef frm_saved_rsi
128 %endif
129 pop rbx
130 %assign cbFrame cbFrame - 8
131 %undef frm_saved_rbx
132
133 pop r12
134 %assign cbFrame cbFrame - 8
135 %undef frm_saved_r12
136
137 pop r13
138 %assign cbFrame cbFrame - 8
139 %undef frm_saved_r13
140
141 pop r14
142 %assign cbFrame cbFrame - 8
143 %undef frm_saved_r14
144
145 pop r15
146 %assign cbFrame cbFrame - 8
147 %undef frm_saved_r15
148%endmacro
149
150
151;; @def PUSH_RELEVANT_SEGMENT_REGISTERS
152; Macro saving all segment registers on the stack.
153; @param 1 Full width register name.
154; @param 2 16-bit register name for \a 1.
155; @cobbers rax, rdx, rcx
156%macro PUSH_RELEVANT_SEGMENT_REGISTERS 2
157 %ifndef VBOX_SKIP_RESTORE_SEG
158 %error untested code. probably does not work any more!
159 %ifndef HM_64_BIT_USE_NULL_SEL
160 mov %2, es
161 push %1
162 mov %2, ds
163 push %1
164 %endif
165
166 ; Special case for FS; Windows and Linux either don't use it or restore it when leaving kernel mode,
167 ; Solaris OTOH doesn't and we must save it.
168 mov ecx, MSR_K8_FS_BASE
169 rdmsr
170 push rdx
171 push rax
172 %ifndef HM_64_BIT_USE_NULL_SEL
173 push fs
174 %endif
175
176 ; Special case for GS; OSes typically use swapgs to reset the hidden base register for GS on entry into the kernel.
177 ; The same happens on exit.
178 mov ecx, MSR_K8_GS_BASE
179 rdmsr
180 push rdx
181 push rax
182 %ifndef HM_64_BIT_USE_NULL_SEL
183 push gs
184 %endif
185 %endif ; !VBOX_SKIP_RESTORE_SEG
186%endmacro ; PUSH_RELEVANT_SEGMENT_REGISTERS
187
188;; @def POP_RELEVANT_SEGMENT_REGISTERS
189; Macro restoring all segment registers on the stack.
190; @param 1 Full width register name.
191; @param 2 16-bit register name for \a 1.
192; @cobbers rax, rdx, rcx
193%macro POP_RELEVANT_SEGMENT_REGISTERS 2
194 %ifndef VBOX_SKIP_RESTORE_SEG
195 %error untested code. probably does not work any more!
196 ; Note: do not step through this code with a debugger!
197 %ifndef HM_64_BIT_USE_NULL_SEL
198 xor eax, eax
199 mov ds, ax
200 mov es, ax
201 mov fs, ax
202 mov gs, ax
203 %endif
204
205 %ifndef HM_64_BIT_USE_NULL_SEL
206 pop gs
207 %endif
208 pop rax
209 pop rdx
210 mov ecx, MSR_K8_GS_BASE
211 wrmsr
212
213 %ifndef HM_64_BIT_USE_NULL_SEL
214 pop fs
215 %endif
216 pop rax
217 pop rdx
218 mov ecx, MSR_K8_FS_BASE
219 wrmsr
220 ; Now it's safe to step again
221
222 %ifndef HM_64_BIT_USE_NULL_SEL
223 pop %1
224 mov ds, %2
225 pop %1
226 mov es, %2
227 %endif
228 %endif ; !VBOX_SKIP_RESTORE_SEG
229%endmacro ; POP_RELEVANT_SEGMENT_REGISTERS
230
231
232;*********************************************************************************************************************************
233;* External Symbols *
234;*********************************************************************************************************************************
235%ifdef VBOX_WITH_KERNEL_USING_XMM
236extern NAME(CPUMIsGuestFPUStateActive)
237%endif
238
239
240BEGINCODE
241
242
243;;
244; Used on platforms with poor inline assembly support to retrieve all the
245; info from the CPU and put it in the @a pRestoreHost structure.
246;
247; @returns VBox status code
248; @param pRestoreHost msc: rcx gcc: rdi Pointer to the RestoreHost struct.
249; @param fHaveFsGsBase msc: dl gcc: sil Whether we can use rdfsbase or not.
250;
251ALIGNCODE(64)
252BEGINPROC hmR0VmxExportHostSegmentRegsAsmHlp
253%ifdef ASM_CALL64_MSC
254 %define pRestoreHost rcx
255%elifdef ASM_CALL64_GCC
256 %define pRestoreHost rdi
257%else
258 %error Unknown calling convension.
259%endif
260 SEH64_END_PROLOGUE
261
262 ; Start with the FS and GS base so we can trash DL/SIL.
263%ifdef ASM_CALL64_MSC
264 or dl, dl
265%else
266 or sil, sil
267%endif
268 jz .use_rdmsr_for_fs_and_gs_base
269 rdfsbase rax
270 mov [pRestoreHost + VMXRESTOREHOST.uHostFSBase], rax
271 rdgsbase rax
272 mov [pRestoreHost + VMXRESTOREHOST.uHostGSBase], rax
273.done_fs_and_gs_base:
274
275 ; TR, GDTR and IDTR
276 str [pRestoreHost + VMXRESTOREHOST.uHostSelTR]
277 sgdt [pRestoreHost + VMXRESTOREHOST.HostGdtr]
278 sidt [pRestoreHost + VMXRESTOREHOST.HostIdtr]
279
280 ; Segment registers.
281 xor eax, eax
282 mov eax, cs
283 mov [pRestoreHost + VMXRESTOREHOST.uHostSelCS], ax
284
285 mov eax, ss
286 mov [pRestoreHost + VMXRESTOREHOST.uHostSelSS], ax
287
288 mov eax, gs
289 mov [pRestoreHost + VMXRESTOREHOST.uHostSelGS], ax
290
291 mov eax, fs
292 mov [pRestoreHost + VMXRESTOREHOST.uHostSelFS], ax
293
294 mov eax, es
295 mov [pRestoreHost + VMXRESTOREHOST.uHostSelES], ax
296
297 mov eax, ds
298 mov [pRestoreHost + VMXRESTOREHOST.uHostSelDS], ax
299
300 ret
301
302ALIGNCODE(16)
303.use_rdmsr_for_fs_and_gs_base:
304%ifdef ASM_CALL64_MSC
305 mov r8, pRestoreHost
306%endif
307
308 mov ecx, MSR_K8_FS_BASE
309 rdmsr
310 shl rdx, 32
311 or rdx, rax
312 mov [r8 + VMXRESTOREHOST.uHostFSBase], rax
313
314 mov ecx, MSR_K8_GS_BASE
315 rdmsr
316 shl rdx, 32
317 or rdx, rax
318 mov [r8 + VMXRESTOREHOST.uHostGSBase], rax
319
320%ifdef ASM_CALL64_MSC
321 mov pRestoreHost, r8
322%endif
323 jmp .done_fs_and_gs_base
324%undef pRestoreHost
325ENDPROC hmR0VmxExportHostSegmentRegsAsmHlp
326
327
328;;
329; Restores host-state fields.
330;
331; @returns VBox status code
332; @param f32RestoreHost msc: ecx gcc: edi RestoreHost flags.
333; @param pRestoreHost msc: rdx gcc: rsi Pointer to the RestoreHost struct.
334;
335ALIGNCODE(64)
336BEGINPROC VMXRestoreHostState
337%ifndef ASM_CALL64_GCC
338 ; Use GCC's input registers since we'll be needing both rcx and rdx further
339 ; down with the wrmsr instruction. Use the R10 and R11 register for saving
340 ; RDI and RSI since MSC preserve the two latter registers.
341 mov r10, rdi
342 mov r11, rsi
343 mov rdi, rcx
344 mov rsi, rdx
345%endif
346 SEH64_END_PROLOGUE
347
348.restore_gdtr:
349 test edi, VMX_RESTORE_HOST_GDTR
350 jz .restore_idtr
351 lgdt [rsi + VMXRESTOREHOST.HostGdtr]
352
353.restore_idtr:
354 test edi, VMX_RESTORE_HOST_IDTR
355 jz .restore_ds
356 lidt [rsi + VMXRESTOREHOST.HostIdtr]
357
358.restore_ds:
359 test edi, VMX_RESTORE_HOST_SEL_DS
360 jz .restore_es
361 mov ax, [rsi + VMXRESTOREHOST.uHostSelDS]
362 mov ds, eax
363
364.restore_es:
365 test edi, VMX_RESTORE_HOST_SEL_ES
366 jz .restore_tr
367 mov ax, [rsi + VMXRESTOREHOST.uHostSelES]
368 mov es, eax
369
370.restore_tr:
371 test edi, VMX_RESTORE_HOST_SEL_TR
372 jz .restore_fs
373 ; When restoring the TR, we must first clear the busy flag or we'll end up faulting.
374 mov dx, [rsi + VMXRESTOREHOST.uHostSelTR]
375 mov ax, dx
376 and eax, X86_SEL_MASK_OFF_RPL ; mask away TI and RPL bits leaving only the descriptor offset
377 test edi, VMX_RESTORE_HOST_GDT_READ_ONLY | VMX_RESTORE_HOST_GDT_NEED_WRITABLE
378 jnz .gdt_readonly_or_need_writable
379 add rax, qword [rsi + VMXRESTOREHOST.HostGdtr + 2] ; xAX <- descriptor offset + GDTR.pGdt.
380 and dword [rax + 4], ~RT_BIT(9) ; clear the busy flag in TSS desc (bits 0-7=base, bit 9=busy bit)
381 ltr dx
382
383.restore_fs:
384 ;
385 ; When restoring the selector values for FS and GS, we'll temporarily trash
386 ; the base address (at least the high 32-bit bits, but quite possibly the
387 ; whole base address), the wrmsr will restore it correctly. (VT-x actually
388 ; restores the base correctly when leaving guest mode, but not the selector
389 ; value, so there is little problem with interrupts being enabled prior to
390 ; this restore job.)
391 ; We'll disable ints once for both FS and GS as that's probably faster.
392 ;
393 test edi, VMX_RESTORE_HOST_SEL_FS | VMX_RESTORE_HOST_SEL_GS
394 jz .restore_success
395 pushfq
396 cli ; (see above)
397
398 test edi, VMX_RESTORE_HOST_CAN_USE_WRFSBASE_AND_WRGSBASE
399 jz .restore_fs_using_wrmsr
400
401.restore_fs_using_wrfsbase:
402 test edi, VMX_RESTORE_HOST_SEL_FS
403 jz .restore_gs_using_wrgsbase
404 mov rax, qword [rsi + VMXRESTOREHOST.uHostFSBase]
405 mov cx, word [rsi + VMXRESTOREHOST.uHostSelFS]
406 mov fs, ecx
407 wrfsbase rax
408
409.restore_gs_using_wrgsbase:
410 test edi, VMX_RESTORE_HOST_SEL_GS
411 jz .restore_flags
412 mov rax, qword [rsi + VMXRESTOREHOST.uHostGSBase]
413 mov cx, word [rsi + VMXRESTOREHOST.uHostSelGS]
414 mov gs, ecx
415 wrgsbase rax
416
417.restore_flags:
418 popfq
419
420.restore_success:
421 mov eax, VINF_SUCCESS
422%ifndef ASM_CALL64_GCC
423 ; Restore RDI and RSI on MSC.
424 mov rdi, r10
425 mov rsi, r11
426%endif
427 ret
428
429ALIGNCODE(8)
430.gdt_readonly_or_need_writable:
431 test edi, VMX_RESTORE_HOST_GDT_NEED_WRITABLE
432 jnz .gdt_readonly_need_writable
433.gdt_readonly:
434 mov rcx, cr0
435 mov r9, rcx
436 add rax, qword [rsi + VMXRESTOREHOST.HostGdtr + 2] ; xAX <- descriptor offset + GDTR.pGdt.
437 and rcx, ~X86_CR0_WP
438 mov cr0, rcx
439 and dword [rax + 4], ~RT_BIT(9) ; clear the busy flag in TSS desc (bits 0-7=base, bit 9=busy bit)
440 ltr dx
441 mov cr0, r9
442 jmp .restore_fs
443
444ALIGNCODE(8)
445.gdt_readonly_need_writable:
446 add rax, qword [rsi + VMXRESTOREHOST.HostGdtrRw + 2] ; xAX <- descriptor offset + GDTR.pGdtRw
447 and dword [rax + 4], ~RT_BIT(9) ; clear the busy flag in TSS desc (bits 0-7=base, bit 9=busy bit)
448 lgdt [rsi + VMXRESTOREHOST.HostGdtrRw]
449 ltr dx
450 lgdt [rsi + VMXRESTOREHOST.HostGdtr] ; load the original GDT
451 jmp .restore_fs
452
453ALIGNCODE(8)
454.restore_fs_using_wrmsr:
455 test edi, VMX_RESTORE_HOST_SEL_FS
456 jz .restore_gs_using_wrmsr
457 mov eax, dword [rsi + VMXRESTOREHOST.uHostFSBase] ; uHostFSBase - Lo
458 mov edx, dword [rsi + VMXRESTOREHOST.uHostFSBase + 4h] ; uHostFSBase - Hi
459 mov cx, word [rsi + VMXRESTOREHOST.uHostSelFS]
460 mov fs, ecx
461 mov ecx, MSR_K8_FS_BASE
462 wrmsr
463
464.restore_gs_using_wrmsr:
465 test edi, VMX_RESTORE_HOST_SEL_GS
466 jz .restore_flags
467 mov eax, dword [rsi + VMXRESTOREHOST.uHostGSBase] ; uHostGSBase - Lo
468 mov edx, dword [rsi + VMXRESTOREHOST.uHostGSBase + 4h] ; uHostGSBase - Hi
469 mov cx, word [rsi + VMXRESTOREHOST.uHostSelGS]
470 mov gs, ecx
471 mov ecx, MSR_K8_GS_BASE
472 wrmsr
473 jmp .restore_flags
474ENDPROC VMXRestoreHostState
475
476
477;;
478; Dispatches an NMI to the host.
479;
480ALIGNCODE(16)
481BEGINPROC VMXDispatchHostNmi
482 ; NMI is always vector 2. The IDT[2] IRQ handler cannot be anything else. See Intel spec. 6.3.1 "External Interrupts".
483 SEH64_END_PROLOGUE
484 int 2
485 ret
486ENDPROC VMXDispatchHostNmi
487
488
489;;
490; Common restore logic for success and error paths. We duplicate this because we
491; don't want to waste writing the VINF_SUCCESS return value to the stack in the
492; regular code path.
493;
494; @param 1 Zero if regular return, non-zero if error return. Controls label emission.
495; @param 2 fLoadSaveGuestXcr0 value
496; @param 3 The (HM_WSF_IBPB_ENTRY | HM_WSF_L1D_ENTRY | HM_WSF_MDS_ENTRY) + HM_WSF_IBPB_EXIT value.
497; The entry values are either all set or not at all, as we're too lazy to flesh out all the variants.
498; @param 4 The SSE saving/restoring: 0 to do nothing, 1 to do it manually, 2 to use xsave/xrstor.
499;
500; @note Important that this does not modify cbFrame or rsp.
501%macro RESTORE_STATE_VMX 4
502 ; Restore base and limit of the IDTR & GDTR.
503 %ifndef VMX_SKIP_IDTR
504 lidt [rsp + cbFrame + frm_saved_idtr]
505 %endif
506 %ifndef VMX_SKIP_GDTR
507 lgdt [rsp + cbFrame + frm_saved_gdtr]
508 %endif
509
510 ; Save the guest state and restore the non-volatile registers. We use rax=pGstCtx here.
511 mov [rsp + cbFrame + frm_guest_rax], rax
512 mov rax, [rsp + cbFrame + frm_pGstCtx]
513
514 mov qword [rax + CPUMCTX.ebp], rbp
515 lea rbp, [rsp + cbFrame] ; re-establish the frame pointer as early as possible.
516 mov qword [rax + CPUMCTX.ecx], rcx
517 mov rcx, SPECTRE_FILLER
518 mov qword [rax + CPUMCTX.edx], rdx
519 mov rdx, [rbp + frm_guest_rax]
520 mov qword [rax + CPUMCTX.eax], rdx
521 mov rdx, rcx
522 mov qword [rax + CPUMCTX.r8], r8
523 mov r8, rcx
524 mov qword [rax + CPUMCTX.r9], r9
525 mov r9, rcx
526 mov qword [rax + CPUMCTX.r10], r10
527 mov r10, rcx
528 mov qword [rax + CPUMCTX.r11], r11
529 mov r11, rcx
530 mov qword [rax + CPUMCTX.esi], rsi
531 %ifdef ASM_CALL64_MSC
532 mov rsi, [rbp + frm_saved_rsi]
533 %else
534 mov rsi, rcx
535 %endif
536 mov qword [rax + CPUMCTX.edi], rdi
537 %ifdef ASM_CALL64_MSC
538 mov rdi, [rbp + frm_saved_rdi]
539 %else
540 mov rdi, rcx
541 %endif
542 mov qword [rax + CPUMCTX.ebx], rbx
543 mov rbx, [rbp + frm_saved_rbx]
544 mov qword [rax + CPUMCTX.r12], r12
545 mov r12, [rbp + frm_saved_r12]
546 mov qword [rax + CPUMCTX.r13], r13
547 mov r13, [rbp + frm_saved_r13]
548 mov qword [rax + CPUMCTX.r14], r14
549 mov r14, [rbp + frm_saved_r14]
550 mov qword [rax + CPUMCTX.r15], r15
551 mov r15, [rbp + frm_saved_r15]
552
553 mov rdx, cr2
554 mov qword [rax + CPUMCTX.cr2], rdx
555 mov rdx, rcx
556
557 %if %4 != 0
558 ; Save the context pointer in r8 for the SSE save/restore.
559 mov r8, rax
560 %endif
561
562 %if %3 & HM_WSF_IBPB_EXIT
563 ; Fight spectre (trashes rax, rdx and rcx).
564 %if %1 = 0 ; Skip this in failure branch (=> guru)
565 mov ecx, MSR_IA32_PRED_CMD
566 mov eax, MSR_IA32_PRED_CMD_F_IBPB
567 xor edx, edx
568 wrmsr
569 %endif
570 %endif
571
572 %ifndef VMX_SKIP_TR
573 ; Restore TSS selector; must mark it as not busy before using ltr!
574 ; ASSUME that this is supposed to be 'BUSY' (saves 20-30 ticks on the T42p).
575 %ifndef VMX_SKIP_GDTR
576 lgdt [rbp + frm_saved_gdtr]
577 %endif
578 movzx eax, word [rbp + frm_saved_tr]
579 mov ecx, eax
580 and eax, X86_SEL_MASK_OFF_RPL ; mask away TI and RPL bits leaving only the descriptor offset
581 add rax, [rbp + frm_saved_gdtr + 2] ; eax <- GDTR.address + descriptor offset
582 and dword [rax + 4], ~RT_BIT(9) ; clear the busy flag in TSS desc (bits 0-7=base, bit 9=busy bit)
583 ltr cx
584 %endif
585 movzx edx, word [rbp + frm_saved_ldtr]
586 test edx, edx
587 jz %%skip_ldt_write
588 lldt dx
589%%skip_ldt_write:
590
591 %if %1 != 0
592.return_after_vmwrite_error:
593 %endif
594 ; Restore segment registers.
595 ;POP_RELEVANT_SEGMENT_REGISTERS rax, ax - currently broken.
596
597 %if %2 != 0
598 ; Restore the host XCR0.
599 xor ecx, ecx
600 mov eax, [rbp + frm_uHostXcr0]
601 mov edx, [rbp + frm_uHostXcr0 + 4]
602 xsetbv
603 %endif
604%endmacro ; RESTORE_STATE_VMX
605
606
607;;
608; hmR0VmxStartVm template
609;
610; @param 1 The suffix of the variation.
611; @param 2 fLoadSaveGuestXcr0 value
612; @param 3 The HM_WSF_IBPB_ENTRY + HM_WSF_IBPB_EXIT value.
613; @param 4 The SSE saving/restoring: 0 to do nothing, 1 to do it manually, 2 to use xsave/xrstor.
614; Drivers shouldn't use AVX registers without saving+loading:
615; https://msdn.microsoft.com/en-us/library/windows/hardware/ff545910%28v=vs.85%29.aspx?f=255&MSPPError=-2147217396
616; However the compiler docs have different idea:
617; https://msdn.microsoft.com/en-us/library/9z1stfyw.aspx
618; We'll go with the former for now.
619;
620%macro hmR0VmxStartVmTemplate 4
621
622;;
623; Prepares for and executes VMLAUNCH/VMRESUME (64 bits guest mode)
624;
625; @returns VBox status code
626; @param pVmcsInfo msc:rcx, gcc:rdi Pointer to the VMCS info (for cached host RIP and RSP).
627; @param pVCpu msc:rdx, gcc:rsi The cross context virtual CPU structure of the calling EMT.
628; @param fResume msc:r8l, gcc:dl Whether to use vmlauch/vmresume.
629;
630ALIGNCODE(64)
631BEGINPROC RT_CONCAT(hmR0VmxStartVm,%1)
632 %ifdef VBOX_WITH_KERNEL_USING_XMM
633 %if %4 = 0
634 ;
635 ; The non-saving variant will currently check the two SSE preconditions and pick
636 ; the right variant to continue with. Later we can see if we can't manage to
637 ; move these decisions into hmR0VmxUpdateStartVmFunction().
638 ;
639 %ifdef ASM_CALL64_MSC
640 test byte [rdx + VMCPU.cpum.GstCtx + CPUMCTX.fUsedFpuGuest], 1
641 %else
642 test byte [rsi + VMCPU.cpum.GstCtx + CPUMCTX.fUsedFpuGuest], 1
643 %endif
644 jz .save_xmm_no_need
645 %ifdef ASM_CALL64_MSC
646 cmp dword [rdx + VMCPU.cpum.GstCtx + CPUMCTX.fXStateMask], 0
647 %else
648 cmp dword [rsi + VMCPU.cpum.GstCtx + CPUMCTX.fXStateMask], 0
649 %endif
650 je RT_CONCAT3(hmR0VmxStartVm,%1,_SseManual)
651 jmp RT_CONCAT3(hmR0VmxStartVm,%1,_SseXSave)
652.save_xmm_no_need:
653 %endif
654 %endif
655 push xBP
656 SEH64_PUSH_xBP
657 mov xBP, xSP
658 SEH64_SET_FRAME_xBP 0
659 pushf
660 cli
661
662 %define frm_fRFlags -008h
663 %define frm_pGstCtx -010h ; Where we stash guest CPU context for use after the vmrun.
664 %define frm_uHostXcr0 -020h ; 128-bit
665 %define frm_saved_gdtr -02ah ; 16+64: Only used when VMX_SKIP_GDTR isn't defined
666 %define frm_saved_tr -02ch ; 16-bit: Only used when VMX_SKIP_TR isn't defined
667 %define frm_MDS_seg -030h ; 16-bit: Temporary storage for the MDS flushing.
668 %define frm_saved_idtr -03ah ; 16+64: Only used when VMX_SKIP_IDTR isn't defined
669 %define frm_saved_ldtr -03ch ; 16-bit: always saved.
670 %define frm_rcError -040h ; 32-bit: Error status code (not used in the success path)
671 %define frm_guest_rax -048h ; Temporary storage slot for guest RAX.
672 %if %4 = 0
673 %assign cbFrame 048h
674 %else
675 %define frm_saved_xmm6 -050h
676 %define frm_saved_xmm7 -060h
677 %define frm_saved_xmm8 -070h
678 %define frm_saved_xmm9 -080h
679 %define frm_saved_xmm10 -090h
680 %define frm_saved_xmm11 -0a0h
681 %define frm_saved_xmm12 -0b0h
682 %define frm_saved_xmm13 -0c0h
683 %define frm_saved_xmm14 -0d0h
684 %define frm_saved_xmm15 -0e0h
685 %define frm_saved_mxcsr -0f0h
686 %assign cbFrame 0f0h
687 %endif
688 %assign cbBaseFrame cbFrame
689 sub rsp, cbFrame - 8h
690 SEH64_ALLOCATE_STACK cbFrame
691
692 ; Save all general purpose host registers.
693 PUSH_CALLEE_PRESERVED_REGISTERS
694 ;PUSH_RELEVANT_SEGMENT_REGISTERS xAX, ax - currently broken
695 SEH64_END_PROLOGUE
696
697 ;
698 ; Unify the input parameter registers: r9=pVmcsInfo, rsi=pVCpu, bl=fResume, rdi=&pVCpu->cpum.GstCtx;
699 ;
700 %ifdef ASM_CALL64_GCC
701 mov r9, rdi ; pVmcsInfo
702 mov ebx, edx ; fResume
703 %else
704 mov r9, rcx ; pVmcsInfo
705 mov rsi, rdx ; pVCpu
706 mov ebx, r8d ; fResume
707 %endif
708 lea rdi, [rsi + VMCPU.cpum.GstCtx]
709 mov [rbp + frm_pGstCtx], rdi
710
711 %ifdef VBOX_STRICT
712 ;
713 ; Verify template preconditions / parameters to ensure HMSVM.cpp didn't miss some state change.
714 ;
715 cmp byte [rsi + GVMCPU.hmr0 + HMR0PERVCPU.fLoadSaveGuestXcr0], %2
716 mov eax, VERR_VMX_STARTVM_PRECOND_0
717 jne NAME(RT_CONCAT(hmR0VmxStartVmHostRIP,%1).precond_failure_return)
718
719 mov eax, [rsi + GVMCPU.hmr0 + HMR0PERVCPU.fWorldSwitcher]
720 and eax, HM_WSF_IBPB_ENTRY | HM_WSF_L1D_ENTRY | HM_WSF_MDS_ENTRY | HM_WSF_IBPB_EXIT
721 cmp eax, %3
722 mov eax, VERR_VMX_STARTVM_PRECOND_1
723 jne NAME(RT_CONCAT(hmR0VmxStartVmHostRIP,%1).precond_failure_return)
724
725 %ifdef VBOX_WITH_KERNEL_USING_XMM
726 mov eax, VERR_VMX_STARTVM_PRECOND_2
727 test byte [rsi + VMCPU.cpum.GstCtx + CPUMCTX.fUsedFpuGuest], 1
728 %if %4 = 0
729 jnz NAME(RT_CONCAT(hmR0VmxStartVmHostRIP,%1).precond_failure_return)
730 %else
731 jz NAME(RT_CONCAT(hmR0VmxStartVmHostRIP,%1).precond_failure_return)
732
733 mov eax, VERR_VMX_STARTVM_PRECOND_3
734 cmp dword [rsi + VMCPU.cpum.GstCtx + CPUMCTX.fXStateMask], 0
735 %if %4 = 1
736 jne NAME(RT_CONCAT(hmR0VmxStartVmHostRIP,%1).precond_failure_return)
737 %elif %4 = 2
738 je NAME(RT_CONCAT(hmR0VmxStartVmHostRIP,%1).precond_failure_return)
739 %else
740 %error Invalid template parameter 4.
741 %endif
742 %endif
743 %endif
744 %endif ; VBOX_STRICT
745
746 %if %4 != 0
747 ; Save the non-volatile SSE host register state.
748 movdqa [rbp + frm_saved_xmm6 ], xmm6
749 movdqa [rbp + frm_saved_xmm7 ], xmm7
750 movdqa [rbp + frm_saved_xmm8 ], xmm8
751 movdqa [rbp + frm_saved_xmm9 ], xmm9
752 movdqa [rbp + frm_saved_xmm10], xmm10
753 movdqa [rbp + frm_saved_xmm11], xmm11
754 movdqa [rbp + frm_saved_xmm12], xmm12
755 movdqa [rbp + frm_saved_xmm13], xmm13
756 movdqa [rbp + frm_saved_xmm14], xmm14
757 movdqa [rbp + frm_saved_xmm15], xmm15
758 stmxcsr [rbp + frm_saved_mxcsr]
759
760 ; Load the guest state related to the above non-volatile and volatile SSE registers. Trashes rcx, eax and edx.
761 mov rcx, [rdi + CPUMCTX.pXStateR0]
762 %if %4 = 1 ; manual
763 movdqa xmm0, [rcx + XMM_OFF_IN_X86FXSTATE + 000h]
764 movdqa xmm1, [rcx + XMM_OFF_IN_X86FXSTATE + 010h]
765 movdqa xmm2, [rcx + XMM_OFF_IN_X86FXSTATE + 020h]
766 movdqa xmm3, [rcx + XMM_OFF_IN_X86FXSTATE + 030h]
767 movdqa xmm4, [rcx + XMM_OFF_IN_X86FXSTATE + 040h]
768 movdqa xmm5, [rcx + XMM_OFF_IN_X86FXSTATE + 050h]
769 movdqa xmm6, [rcx + XMM_OFF_IN_X86FXSTATE + 060h]
770 movdqa xmm7, [rcx + XMM_OFF_IN_X86FXSTATE + 070h]
771 movdqa xmm8, [rcx + XMM_OFF_IN_X86FXSTATE + 080h]
772 movdqa xmm9, [rcx + XMM_OFF_IN_X86FXSTATE + 090h]
773 movdqa xmm10, [rcx + XMM_OFF_IN_X86FXSTATE + 0a0h]
774 movdqa xmm11, [rcx + XMM_OFF_IN_X86FXSTATE + 0b0h]
775 movdqa xmm12, [rcx + XMM_OFF_IN_X86FXSTATE + 0c0h]
776 movdqa xmm13, [rcx + XMM_OFF_IN_X86FXSTATE + 0d0h]
777 movdqa xmm14, [rcx + XMM_OFF_IN_X86FXSTATE + 0e0h]
778 movdqa xmm15, [rcx + XMM_OFF_IN_X86FXSTATE + 0f0h]
779 ldmxcsr [rcx + X86FXSTATE.MXCSR]
780 %elif %4 = 2 ; use xrstor/xsave
781 mov eax, [rsi + VMCPU.cpum.GstCtx + CPUMCTX.fXStateMask]
782 and eax, CPUM_VOLATILE_XSAVE_GUEST_COMPONENTS
783 xor edx, edx
784 xrstor [rcx]
785 %else
786 %error invalid template parameter 4
787 %endif
788 %endif
789
790 %if %2 != 0
791 ; Save the host XCR0 and load the guest one if necessary.
792 ; Note! Trashes rax, rdx and rcx.
793 xor ecx, ecx
794 xgetbv ; save the host one on the stack
795 mov [rbp + frm_uHostXcr0], eax
796 mov [rbp + frm_uHostXcr0 + 4], edx
797
798 mov eax, [rdi + CPUMCTX.aXcr] ; load the guest one
799 mov edx, [rdi + CPUMCTX.aXcr + 4]
800 xor ecx, ecx ; paranoia; indicate that we must restore XCR0 (popped into ecx, thus 0)
801 xsetbv
802 %endif
803
804 ; Save host LDTR.
805 sldt word [rbp + frm_saved_ldtr]
806
807 %ifndef VMX_SKIP_TR
808 ; The host TR limit is reset to 0x67; save & restore it manually.
809 str word [rbp + frm_saved_tr]
810 %endif
811
812 %ifndef VMX_SKIP_GDTR
813 ; VT-x only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!
814 sgdt [rbp + frm_saved_gdtr]
815 %endif
816 %ifndef VMX_SKIP_IDTR
817 sidt [rbp + frm_saved_idtr]
818 %endif
819
820 ; Load CR2 if necessary (expensive as writing CR2 is a synchronizing instruction - (bird: still expensive on 10980xe)).
821 mov rcx, qword [rdi + CPUMCTX.cr2]
822 mov rdx, cr2
823 cmp rcx, rdx
824 je .skip_cr2_write
825 mov cr2, rcx
826.skip_cr2_write:
827
828 ; Set the vmlaunch/vmresume "return" host RIP and RSP values if they've changed (unlikly).
829 ; The vmwrite isn't quite for free (on an 10980xe at least), thus we check if anything changed
830 ; before writing here.
831 lea rcx, [NAME(RT_CONCAT(hmR0VmxStartVmHostRIP,%1)) wrt rip]
832 cmp rcx, [r9 + VMXVMCSINFO.uHostRip]
833 jne .write_host_rip
834.wrote_host_rip:
835 cmp rsp, [r9 + VMXVMCSINFO.uHostRsp]
836 jne .write_host_rsp
837.wrote_host_rsp:
838
839 ;
840 ; Fight spectre and similar. Trashes rax, rcx, and rdx.
841 ;
842 %if %3 & (HM_WSF_IBPB_ENTRY | HM_WSF_L1D_ENTRY) ; The eax:edx value is the same for the first two.
843 AssertCompile(MSR_IA32_PRED_CMD_F_IBPB == MSR_IA32_FLUSH_CMD_F_L1D)
844 mov eax, MSR_IA32_PRED_CMD_F_IBPB
845 xor edx, edx
846 %endif
847 %if %3 & HM_WSF_IBPB_ENTRY ; Indirect branch barrier.
848 mov ecx, MSR_IA32_PRED_CMD
849 wrmsr
850 %endif
851 %if %3 & HM_WSF_L1D_ENTRY ; Level 1 data cache flush.
852 mov ecx, MSR_IA32_FLUSH_CMD
853 wrmsr
854 %elif %3 & HM_WSF_MDS_ENTRY ; MDS flushing is included in L1D_FLUSH
855 mov word [rbp + frm_MDS_seg], ds
856 verw word [rbp + frm_MDS_seg]
857 %endif
858
859 ; Resume or start VM?
860 cmp bl, 0 ; fResume
861
862 ; Load guest general purpose registers.
863 mov rax, qword [rdi + CPUMCTX.eax]
864 mov rbx, qword [rdi + CPUMCTX.ebx]
865 mov rcx, qword [rdi + CPUMCTX.ecx]
866 mov rdx, qword [rdi + CPUMCTX.edx]
867 mov rbp, qword [rdi + CPUMCTX.ebp]
868 mov rsi, qword [rdi + CPUMCTX.esi]
869 mov r8, qword [rdi + CPUMCTX.r8]
870 mov r9, qword [rdi + CPUMCTX.r9]
871 mov r10, qword [rdi + CPUMCTX.r10]
872 mov r11, qword [rdi + CPUMCTX.r11]
873 mov r12, qword [rdi + CPUMCTX.r12]
874 mov r13, qword [rdi + CPUMCTX.r13]
875 mov r14, qword [rdi + CPUMCTX.r14]
876 mov r15, qword [rdi + CPUMCTX.r15]
877 mov rdi, qword [rdi + CPUMCTX.edi]
878
879 je .vmlaunch64_launch
880
881 vmresume
882 jc NAME(RT_CONCAT(hmR0VmxStartVmHostRIP,%1).vmxstart64_invalid_vmcs_ptr)
883 jz NAME(RT_CONCAT(hmR0VmxStartVmHostRIP,%1).vmxstart64_start_failed)
884 jmp NAME(RT_CONCAT(hmR0VmxStartVmHostRIP,%1)) ; here if vmresume detected a failure
885
886.vmlaunch64_launch:
887 vmlaunch
888 jc NAME(RT_CONCAT(hmR0VmxStartVmHostRIP,%1).vmxstart64_invalid_vmcs_ptr)
889 jz NAME(RT_CONCAT(hmR0VmxStartVmHostRIP,%1).vmxstart64_start_failed)
890 jmp NAME(RT_CONCAT(hmR0VmxStartVmHostRIP,%1)) ; here if vmlaunch detected a failure
891
892
893; Put these two outside the normal code path as they should rarely change.
894ALIGNCODE(8)
895.write_host_rip:
896 %ifdef VBOX_WITH_STATISTICS
897 inc qword [rsi + VMCPU.hm + HMCPU.StatVmxWriteHostRip]
898 %endif
899 mov [r9 + VMXVMCSINFO.uHostRip], rcx
900 mov eax, VMX_VMCS_HOST_RIP ;; @todo It is only strictly necessary to write VMX_VMCS_HOST_RIP when
901 vmwrite rax, rcx ;; the VMXVMCSINFO::pfnStartVM function changes (eventually
902 %ifdef VBOX_STRICT ;; take the Windows/SSE stuff into account then)...
903 jna NAME(RT_CONCAT(hmR0VmxStartVmHostRIP,%1).vmwrite_failed)
904 %endif
905 jmp .wrote_host_rip
906
907ALIGNCODE(8)
908.write_host_rsp:
909 %ifdef VBOX_WITH_STATISTICS
910 inc qword [rsi + VMCPU.hm + HMCPU.StatVmxWriteHostRsp]
911 %endif
912 mov [r9 + VMXVMCSINFO.uHostRsp], rsp
913 mov eax, VMX_VMCS_HOST_RSP
914 vmwrite rax, rsp
915 %ifdef VBOX_STRICT
916 jna NAME(RT_CONCAT(hmR0VmxStartVmHostRIP,%1).vmwrite_failed)
917 %endif
918 jmp .wrote_host_rsp
919
920ALIGNCODE(64)
921GLOBALNAME RT_CONCAT(hmR0VmxStartVmHostRIP,%1)
922 RESTORE_STATE_VMX 0, %2, %3, %4
923 mov eax, VINF_SUCCESS
924
925.vmstart64_end:
926 %if %4 != 0
927 mov r11d, eax ; save the return code.
928
929 ; Save the guest SSE state related to non-volatile and volatile SSE registers.
930 mov rcx, [r8 + CPUMCTX.pXStateR0]
931 %if %4 = 1 ; manual
932 stmxcsr [rcx + X86FXSTATE.MXCSR]
933 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 000h], xmm0
934 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 010h], xmm1
935 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 020h], xmm2
936 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 030h], xmm3
937 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 040h], xmm4
938 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 050h], xmm5
939 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 060h], xmm6
940 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 070h], xmm7
941 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 080h], xmm8
942 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 090h], xmm9
943 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 0a0h], xmm10
944 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 0b0h], xmm11
945 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 0c0h], xmm12
946 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 0d0h], xmm13
947 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 0e0h], xmm14
948 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 0f0h], xmm15
949 %elif %4 = 2 ; use xrstor/xsave
950 mov eax, [r8 + CPUMCTX.fXStateMask]
951 and eax, CPUM_VOLATILE_XSAVE_GUEST_COMPONENTS
952 xor edx, edx
953 xsave [rcx]
954 %else
955 %error invalid template parameter 4
956 %endif
957
958 ; Restore the host non-volatile SSE register state.
959 ldmxcsr [rbp + frm_saved_mxcsr]
960 movdqa xmm6, [rbp + frm_saved_xmm6 ]
961 movdqa xmm7, [rbp + frm_saved_xmm7 ]
962 movdqa xmm8, [rbp + frm_saved_xmm8 ]
963 movdqa xmm9, [rbp + frm_saved_xmm9 ]
964 movdqa xmm10, [rbp + frm_saved_xmm10]
965 movdqa xmm11, [rbp + frm_saved_xmm11]
966 movdqa xmm12, [rbp + frm_saved_xmm12]
967 movdqa xmm13, [rbp + frm_saved_xmm13]
968 movdqa xmm14, [rbp + frm_saved_xmm14]
969 movdqa xmm15, [rbp + frm_saved_xmm15]
970
971 mov eax, r11d
972 %endif ; %4 != 0
973
974 lea rsp, [rbp + frm_fRFlags]
975 popf
976 leave
977 ret
978
979 ;
980 ; Error returns.
981 ;
982 %ifdef VBOX_STRICT
983.vmwrite_failed:
984 mov dword [rsp + cbFrame + frm_rcError], VERR_VMX_INVALID_VMCS_FIELD
985 jz .return_after_vmwrite_error
986 mov dword [rsp + cbFrame + frm_rcError], VERR_VMX_INVALID_VMCS_PTR
987 jmp .return_after_vmwrite_error
988 %endif
989.vmxstart64_invalid_vmcs_ptr:
990 mov dword [rsp + cbFrame + frm_rcError], VERR_VMX_INVALID_VMCS_PTR_TO_START_VM
991 jmp .vmstart64_error_return
992.vmxstart64_start_failed:
993 mov dword [rsp + cbFrame + frm_rcError], VERR_VMX_UNABLE_TO_START_VM
994.vmstart64_error_return:
995 RESTORE_STATE_VMX 1, %2, %3, %4
996 mov eax, [rbp + frm_rcError]
997 jmp .vmstart64_end
998
999 %ifdef VBOX_STRICT
1000 ; Precondition checks failed.
1001.precond_failure_return:
1002 POP_CALLEE_PRESERVED_REGISTERS
1003 %if cbFrame != cbBaseFrame
1004 %error Bad frame size value: cbFrame, expected cbBaseFrame
1005 %endif
1006 jmp .vmstart64_end
1007 %endif
1008
1009 %undef frm_fRFlags
1010 %undef frm_pGstCtx
1011 %undef frm_uHostXcr0
1012 %undef frm_saved_gdtr
1013 %undef frm_saved_tr
1014 %undef frm_fNoRestoreXcr0
1015 %undef frm_saved_idtr
1016 %undef frm_saved_ldtr
1017 %undef frm_rcError
1018 %undef frm_guest_rax
1019 %undef cbFrame
1020ENDPROC RT_CONCAT(hmR0VmxStartVm,%1)
1021
1022%endmacro ; hmR0VmxStartVmTemplate
1023
1024%macro hmR0VmxStartVmSseTemplate 3
1025hmR0VmxStartVmTemplate _SansXcr0_SansIbpbEntry_SansL1dEntry_SansMdsEntry_SansIbpbExit %+ %2, 0, 0 | 0 | 0 | 0 , %1
1026hmR0VmxStartVmTemplate _WithXcr0_SansIbpbEntry_SansL1dEntry_SansMdsEntry_SansIbpbExit %+ %2, 1, 0 | 0 | 0 | 0 , %1
1027hmR0VmxStartVmTemplate _SansXcr0_WithIbpbEntry_SansL1dEntry_SansMdsEntry_SansIbpbExit %+ %2, 0, HM_WSF_IBPB_ENTRY | 0 | 0 | 0 , %1
1028hmR0VmxStartVmTemplate _WithXcr0_WithIbpbEntry_SansL1dEntry_SansMdsEntry_SansIbpbExit %+ %2, 1, HM_WSF_IBPB_ENTRY | 0 | 0 | 0 , %1
1029hmR0VmxStartVmTemplate _SansXcr0_SansIbpbEntry_WithL1dEntry_SansMdsEntry_SansIbpbExit %+ %2, 0, 0 | HM_WSF_L1D_ENTRY | 0 | 0 , %1
1030hmR0VmxStartVmTemplate _WithXcr0_SansIbpbEntry_WithL1dEntry_SansMdsEntry_SansIbpbExit %+ %2, 1, 0 | HM_WSF_L1D_ENTRY | 0 | 0 , %1
1031hmR0VmxStartVmTemplate _SansXcr0_WithIbpbEntry_WithL1dEntry_SansMdsEntry_SansIbpbExit %+ %2, 0, HM_WSF_IBPB_ENTRY | HM_WSF_L1D_ENTRY | 0 | 0 , %1
1032hmR0VmxStartVmTemplate _WithXcr0_WithIbpbEntry_WithL1dEntry_SansMdsEntry_SansIbpbExit %+ %2, 1, HM_WSF_IBPB_ENTRY | HM_WSF_L1D_ENTRY | 0 | 0 , %1
1033hmR0VmxStartVmTemplate _SansXcr0_SansIbpbEntry_SansL1dEntry_WithMdsEntry_SansIbpbExit %+ %2, 0, 0 | 0 | HM_WSF_MDS_ENTRY | 0 , %1
1034hmR0VmxStartVmTemplate _WithXcr0_SansIbpbEntry_SansL1dEntry_WithMdsEntry_SansIbpbExit %+ %2, 1, 0 | 0 | HM_WSF_MDS_ENTRY | 0 , %1
1035hmR0VmxStartVmTemplate _SansXcr0_WithIbpbEntry_SansL1dEntry_WithMdsEntry_SansIbpbExit %+ %2, 0, HM_WSF_IBPB_ENTRY | 0 | HM_WSF_MDS_ENTRY | 0 , %1
1036hmR0VmxStartVmTemplate _WithXcr0_WithIbpbEntry_SansL1dEntry_WithMdsEntry_SansIbpbExit %+ %2, 1, HM_WSF_IBPB_ENTRY | 0 | HM_WSF_MDS_ENTRY | 0 , %1
1037hmR0VmxStartVmTemplate _SansXcr0_SansIbpbEntry_WithL1dEntry_WithMdsEntry_SansIbpbExit %+ %2, 0, 0 | HM_WSF_L1D_ENTRY | HM_WSF_MDS_ENTRY | 0 , %1
1038hmR0VmxStartVmTemplate _WithXcr0_SansIbpbEntry_WithL1dEntry_WithMdsEntry_SansIbpbExit %+ %2, 1, 0 | HM_WSF_L1D_ENTRY | HM_WSF_MDS_ENTRY | 0 , %1
1039hmR0VmxStartVmTemplate _SansXcr0_WithIbpbEntry_WithL1dEntry_WithMdsEntry_SansIbpbExit %+ %2, 0, HM_WSF_IBPB_ENTRY | HM_WSF_L1D_ENTRY | HM_WSF_MDS_ENTRY | 0 , %1
1040hmR0VmxStartVmTemplate _WithXcr0_WithIbpbEntry_WithL1dEntry_WithMdsEntry_SansIbpbExit %+ %2, 1, HM_WSF_IBPB_ENTRY | HM_WSF_L1D_ENTRY | HM_WSF_MDS_ENTRY | 0 , %1
1041hmR0VmxStartVmTemplate _SansXcr0_SansIbpbEntry_SansL1dEntry_SansMdsEntry_WithIbpbExit %+ %2, 0, 0 | 0 | 0 | HM_WSF_IBPB_EXIT, %1
1042hmR0VmxStartVmTemplate _WithXcr0_SansIbpbEntry_SansL1dEntry_SansMdsEntry_WithIbpbExit %+ %2, 1, 0 | 0 | 0 | HM_WSF_IBPB_EXIT, %1
1043hmR0VmxStartVmTemplate _SansXcr0_WithIbpbEntry_SansL1dEntry_SansMdsEntry_WithIbpbExit %+ %2, 0, HM_WSF_IBPB_ENTRY | 0 | 0 | HM_WSF_IBPB_EXIT, %1
1044hmR0VmxStartVmTemplate _WithXcr0_WithIbpbEntry_SansL1dEntry_SansMdsEntry_WithIbpbExit %+ %2, 1, HM_WSF_IBPB_ENTRY | 0 | 0 | HM_WSF_IBPB_EXIT, %1
1045hmR0VmxStartVmTemplate _SansXcr0_SansIbpbEntry_WithL1dEntry_SansMdsEntry_WithIbpbExit %+ %2, 0, 0 | HM_WSF_L1D_ENTRY | 0 | HM_WSF_IBPB_EXIT, %1
1046hmR0VmxStartVmTemplate _WithXcr0_SansIbpbEntry_WithL1dEntry_SansMdsEntry_WithIbpbExit %+ %2, 1, 0 | HM_WSF_L1D_ENTRY | 0 | HM_WSF_IBPB_EXIT, %1
1047hmR0VmxStartVmTemplate _SansXcr0_WithIbpbEntry_WithL1dEntry_SansMdsEntry_WithIbpbExit %+ %2, 0, HM_WSF_IBPB_ENTRY | HM_WSF_L1D_ENTRY | 0 | HM_WSF_IBPB_EXIT, %1
1048hmR0VmxStartVmTemplate _WithXcr0_WithIbpbEntry_WithL1dEntry_SansMdsEntry_WithIbpbExit %+ %2, 1, HM_WSF_IBPB_ENTRY | HM_WSF_L1D_ENTRY | 0 | HM_WSF_IBPB_EXIT, %1
1049hmR0VmxStartVmTemplate _SansXcr0_SansIbpbEntry_SansL1dEntry_WithMdsEntry_WithIbpbExit %+ %2, 0, 0 | 0 | HM_WSF_MDS_ENTRY | HM_WSF_IBPB_EXIT, %1
1050hmR0VmxStartVmTemplate _WithXcr0_SansIbpbEntry_SansL1dEntry_WithMdsEntry_WithIbpbExit %+ %2, 1, 0 | 0 | HM_WSF_MDS_ENTRY | HM_WSF_IBPB_EXIT, %1
1051hmR0VmxStartVmTemplate _SansXcr0_WithIbpbEntry_SansL1dEntry_WithMdsEntry_WithIbpbExit %+ %2, 0, HM_WSF_IBPB_ENTRY | 0 | HM_WSF_MDS_ENTRY | HM_WSF_IBPB_EXIT, %1
1052hmR0VmxStartVmTemplate _WithXcr0_WithIbpbEntry_SansL1dEntry_WithMdsEntry_WithIbpbExit %+ %2, 1, HM_WSF_IBPB_ENTRY | 0 | HM_WSF_MDS_ENTRY | HM_WSF_IBPB_EXIT, %1
1053hmR0VmxStartVmTemplate _SansXcr0_SansIbpbEntry_WithL1dEntry_WithMdsEntry_WithIbpbExit %+ %2, 0, 0 | HM_WSF_L1D_ENTRY | HM_WSF_MDS_ENTRY | HM_WSF_IBPB_EXIT, %1
1054hmR0VmxStartVmTemplate _WithXcr0_SansIbpbEntry_WithL1dEntry_WithMdsEntry_WithIbpbExit %+ %2, 1, 0 | HM_WSF_L1D_ENTRY | HM_WSF_MDS_ENTRY | HM_WSF_IBPB_EXIT, %1
1055hmR0VmxStartVmTemplate _SansXcr0_WithIbpbEntry_WithL1dEntry_WithMdsEntry_WithIbpbExit %+ %2, 0, HM_WSF_IBPB_ENTRY | HM_WSF_L1D_ENTRY | HM_WSF_MDS_ENTRY | HM_WSF_IBPB_EXIT, %1
1056hmR0VmxStartVmTemplate _WithXcr0_WithIbpbEntry_WithL1dEntry_WithMdsEntry_WithIbpbExit %+ %2, 1, HM_WSF_IBPB_ENTRY | HM_WSF_L1D_ENTRY | HM_WSF_MDS_ENTRY | HM_WSF_IBPB_EXIT, %1
1057%endmacro
1058
1059hmR0VmxStartVmSseTemplate 0,,RT_NOTHING
1060%ifdef VBOX_WITH_KERNEL_USING_XMM
1061hmR0VmxStartVmSseTemplate 1,_SseManual,RT_NOTHING
1062hmR0VmxStartVmSseTemplate 2,_SseXSave,RT_NOTHING
1063%endif
1064
1065
1066;;
1067; Clears the MDS buffers using VERW.
1068ALIGNCODE(16)
1069BEGINPROC hmR0MdsClear
1070 SEH64_END_PROLOGUE
1071 sub xSP, xCB
1072 mov [xSP], ds
1073 verw [xSP]
1074 add xSP, xCB
1075 ret
1076ENDPROC hmR0MdsClear
1077
1078
1079;;
1080; hmR0SvmVmRun template
1081;
1082; @param 1 The suffix of the variation.
1083; @param 2 fLoadSaveGuestXcr0 value
1084; @param 3 The HM_WSF_IBPB_ENTRY + HM_WSF_IBPB_EXIT value.
1085; @param 4 The SSE saving/restoring: 0 to do nothing, 1 to do it manually, 2 to use xsave/xrstor.
1086; Drivers shouldn't use AVX registers without saving+loading:
1087; https://msdn.microsoft.com/en-us/library/windows/hardware/ff545910%28v=vs.85%29.aspx?f=255&MSPPError=-2147217396
1088; However the compiler docs have different idea:
1089; https://msdn.microsoft.com/en-us/library/9z1stfyw.aspx
1090; We'll go with the former for now.
1091;
1092%macro hmR0SvmVmRunTemplate 4
1093
1094;;
1095; Prepares for and executes VMRUN (32-bit and 64-bit guests).
1096;
1097; @returns VBox status code
1098; @param pVM msc:rcx,gcc:rdi The cross context VM structure (unused).
1099; @param pVCpu msc:rdx,gcc:rsi The cross context virtual CPU structure of the calling EMT.
1100; @param HCPhysVmcb msc:r8, gcc:rdx Physical address of guest VMCB.
1101;
1102ALIGNCODE(64) ; This + immediate optimizations causes serious trouble for yasm and the SEH frames: prologue -28 bytes, must be <256
1103 ; So the SEH64_XXX stuff is currently not operational.
1104BEGINPROC RT_CONCAT(hmR0SvmVmRun,%1)
1105 %ifdef VBOX_WITH_KERNEL_USING_XMM
1106 %if %4 = 0
1107 ;
1108 ; The non-saving variant will currently check the two SSE preconditions and pick
1109 ; the right variant to continue with. Later we can see if we can't manage to
1110 ; move these decisions into hmR0SvmUpdateVmRunFunction().
1111 ;
1112 %ifdef ASM_CALL64_MSC
1113 test byte [rdx + VMCPU.cpum.GstCtx + CPUMCTX.fUsedFpuGuest], 1
1114 %else
1115 test byte [rsi + VMCPU.cpum.GstCtx + CPUMCTX.fUsedFpuGuest], 1
1116 %endif
1117 jz .save_xmm_no_need
1118 %ifdef ASM_CALL64_MSC
1119 cmp dword [rdx + VMCPU.cpum.GstCtx + CPUMCTX.fXStateMask], 0
1120 %else
1121 cmp dword [rsi + VMCPU.cpum.GstCtx + CPUMCTX.fXStateMask], 0
1122 %endif
1123 je RT_CONCAT3(hmR0SvmVmRun,%1,_SseManual)
1124 jmp RT_CONCAT3(hmR0SvmVmRun,%1,_SseXSave)
1125.save_xmm_no_need:
1126 %endif
1127 %endif
1128 push rbp
1129 SEH64_PUSH_xBP
1130 mov rbp, rsp
1131 SEH64_SET_FRAME_xBP 0
1132 pushf
1133 %assign cbFrame 30h
1134 %if %4 != 0
1135 %assign cbFrame cbFrame + 16 * 11 ; Reserve space for 10x 128-bit XMM registers and MXCSR (32-bit)
1136 %endif
1137 %assign cbBaseFrame cbFrame
1138 sub rsp, cbFrame - 8h ; We subtract 8 bytes for the above pushf
1139 SEH64_ALLOCATE_STACK cbFrame ; And we have CALLEE_PRESERVED_REGISTER_COUNT following it.
1140
1141 %define frm_fRFlags -008h
1142 %define frm_uHostXcr0 -018h ; 128-bit
1143 ;%define frm_fNoRestoreXcr0 -020h ; Non-zero if we should skip XCR0 restoring.
1144 %define frm_pGstCtx -028h ; Where we stash guest CPU context for use after the vmrun.
1145 %define frm_HCPhysVmcbHost -030h ; Where we stash HCPhysVmcbHost for the vmload after vmrun.
1146 %if %4 != 0
1147 %define frm_saved_xmm6 -040h
1148 %define frm_saved_xmm7 -050h
1149 %define frm_saved_xmm8 -060h
1150 %define frm_saved_xmm9 -070h
1151 %define frm_saved_xmm10 -080h
1152 %define frm_saved_xmm11 -090h
1153 %define frm_saved_xmm12 -0a0h
1154 %define frm_saved_xmm13 -0b0h
1155 %define frm_saved_xmm14 -0c0h
1156 %define frm_saved_xmm15 -0d0h
1157 %define frm_saved_mxcsr -0e0h
1158 %endif
1159
1160 ; Manual save and restore:
1161 ; - General purpose registers except RIP, RSP, RAX
1162 ;
1163 ; Trashed:
1164 ; - CR2 (we don't care)
1165 ; - LDTR (reset to 0)
1166 ; - DRx (presumably not changed at all)
1167 ; - DR7 (reset to 0x400)
1168
1169 ; Save all general purpose host registers.
1170 PUSH_CALLEE_PRESERVED_REGISTERS
1171 SEH64_END_PROLOGUE
1172 %if cbFrame != (cbBaseFrame + 8 * CALLEE_PRESERVED_REGISTER_COUNT)
1173 %error Bad cbFrame value
1174 %endif
1175
1176 ; Shuffle parameter registers so that r8=HCPhysVmcb and rsi=pVCpu. (rdx & rcx will soon be trashed.)
1177 %ifdef ASM_CALL64_GCC
1178 mov r8, rdx ; Put HCPhysVmcb in r8 like on MSC as rdx is trashed below.
1179 %else
1180 mov rsi, rdx ; Put pVCpu in rsi like on GCC as rdx is trashed below.
1181 ;mov rdi, rcx ; Put pVM in rdi like on GCC as rcx is trashed below.
1182 %endif
1183
1184 %ifdef VBOX_STRICT
1185 ;
1186 ; Verify template preconditions / parameters to ensure HMSVM.cpp didn't miss some state change.
1187 ;
1188 cmp byte [rsi + GVMCPU.hmr0 + HMR0PERVCPU.fLoadSaveGuestXcr0], %2
1189 mov eax, VERR_SVM_VMRUN_PRECOND_0
1190 jne .failure_return
1191
1192 mov eax, [rsi + GVMCPU.hmr0 + HMR0PERVCPU.fWorldSwitcher]
1193 and eax, HM_WSF_IBPB_ENTRY | HM_WSF_IBPB_EXIT
1194 cmp eax, %3
1195 mov eax, VERR_SVM_VMRUN_PRECOND_1
1196 jne .failure_return
1197
1198 %ifdef VBOX_WITH_KERNEL_USING_XMM
1199 mov eax, VERR_SVM_VMRUN_PRECOND_2
1200 test byte [rsi + VMCPU.cpum.GstCtx + CPUMCTX.fUsedFpuGuest], 1
1201 %if %4 = 0
1202 jnz .failure_return
1203 %else
1204 jz .failure_return
1205
1206 mov eax, VERR_SVM_VMRUN_PRECOND_3
1207 cmp dword [rsi + VMCPU.cpum.GstCtx + CPUMCTX.fXStateMask], 0
1208 %if %4 = 1
1209 jne .failure_return
1210 %elif %4 = 2
1211 je .failure_return
1212 %else
1213 %error Invalid template parameter 4.
1214 %endif
1215 %endif
1216 %endif
1217 %endif ; VBOX_STRICT
1218
1219 %if %4 != 0
1220 ; Save the non-volatile SSE host register state.
1221 movdqa [rbp + frm_saved_xmm6 ], xmm6
1222 movdqa [rbp + frm_saved_xmm7 ], xmm7
1223 movdqa [rbp + frm_saved_xmm8 ], xmm8
1224 movdqa [rbp + frm_saved_xmm9 ], xmm9
1225 movdqa [rbp + frm_saved_xmm10], xmm10
1226 movdqa [rbp + frm_saved_xmm11], xmm11
1227 movdqa [rbp + frm_saved_xmm12], xmm12
1228 movdqa [rbp + frm_saved_xmm13], xmm13
1229 movdqa [rbp + frm_saved_xmm14], xmm14
1230 movdqa [rbp + frm_saved_xmm15], xmm15
1231 stmxcsr [rbp + frm_saved_mxcsr]
1232
1233 ; Load the guest state related to the above non-volatile and volatile SSE registers. Trashes rcx, eax and edx.
1234 mov rcx, [rsi + VMCPU.cpum.GstCtx + CPUMCTX.pXStateR0]
1235 %if %4 = 1 ; manual
1236 movdqa xmm0, [rcx + XMM_OFF_IN_X86FXSTATE + 000h]
1237 movdqa xmm1, [rcx + XMM_OFF_IN_X86FXSTATE + 010h]
1238 movdqa xmm2, [rcx + XMM_OFF_IN_X86FXSTATE + 020h]
1239 movdqa xmm3, [rcx + XMM_OFF_IN_X86FXSTATE + 030h]
1240 movdqa xmm4, [rcx + XMM_OFF_IN_X86FXSTATE + 040h]
1241 movdqa xmm5, [rcx + XMM_OFF_IN_X86FXSTATE + 050h]
1242 movdqa xmm6, [rcx + XMM_OFF_IN_X86FXSTATE + 060h]
1243 movdqa xmm7, [rcx + XMM_OFF_IN_X86FXSTATE + 070h]
1244 movdqa xmm8, [rcx + XMM_OFF_IN_X86FXSTATE + 080h]
1245 movdqa xmm9, [rcx + XMM_OFF_IN_X86FXSTATE + 090h]
1246 movdqa xmm10, [rcx + XMM_OFF_IN_X86FXSTATE + 0a0h]
1247 movdqa xmm11, [rcx + XMM_OFF_IN_X86FXSTATE + 0b0h]
1248 movdqa xmm12, [rcx + XMM_OFF_IN_X86FXSTATE + 0c0h]
1249 movdqa xmm13, [rcx + XMM_OFF_IN_X86FXSTATE + 0d0h]
1250 movdqa xmm14, [rcx + XMM_OFF_IN_X86FXSTATE + 0e0h]
1251 movdqa xmm15, [rcx + XMM_OFF_IN_X86FXSTATE + 0f0h]
1252 ldmxcsr [rcx + X86FXSTATE.MXCSR]
1253 %elif %4 = 2 ; use xrstor/xsave
1254 mov eax, [rsi + VMCPU.cpum.GstCtx + CPUMCTX.fXStateMask]
1255 and eax, CPUM_VOLATILE_XSAVE_GUEST_COMPONENTS
1256 xor edx, edx
1257 xrstor [rcx]
1258 %else
1259 %error invalid template parameter 4
1260 %endif
1261 %endif
1262
1263 %if %2 != 0
1264 ; Save the host XCR0 and load the guest one if necessary.
1265 xor ecx, ecx
1266 xgetbv ; save the host XCR0 on the stack
1267 mov [rbp + frm_uHostXcr0 + 8], rdx
1268 mov [rbp + frm_uHostXcr0 ], rax
1269
1270 mov eax, [rsi + VMCPU.cpum.GstCtx + CPUMCTX.aXcr] ; load the guest XCR0
1271 mov edx, [rsi + VMCPU.cpum.GstCtx + CPUMCTX.aXcr + 4]
1272 xor ecx, ecx ; paranoia
1273 xsetbv
1274 %endif
1275
1276 ; Save host fs, gs, sysenter msr etc.
1277 mov rax, [rsi + GVMCPU.hmr0 + HMR0PERVCPU.svm + HMR0CPUSVM.HCPhysVmcbHost]
1278 mov qword [rbp + frm_HCPhysVmcbHost], rax ; save for the vmload after vmrun
1279 lea rsi, [rsi + VMCPU.cpum.GstCtx]
1280 mov qword [rbp + frm_pGstCtx], rsi
1281 vmsave
1282
1283 %if %3 & HM_WSF_IBPB_ENTRY
1284 ; Fight spectre (trashes rax, rdx and rcx).
1285 mov ecx, MSR_IA32_PRED_CMD
1286 mov eax, MSR_IA32_PRED_CMD_F_IBPB
1287 xor edx, edx
1288 wrmsr
1289 %endif
1290
1291 ; Setup rax for VMLOAD.
1292 mov rax, r8 ; HCPhysVmcb (64 bits physical address; take low dword only)
1293
1294 ; Load guest general purpose registers (rax is loaded from the VMCB by VMRUN).
1295 mov rbx, qword [rsi + CPUMCTX.ebx]
1296 mov rcx, qword [rsi + CPUMCTX.ecx]
1297 mov rdx, qword [rsi + CPUMCTX.edx]
1298 mov rdi, qword [rsi + CPUMCTX.edi]
1299 mov rbp, qword [rsi + CPUMCTX.ebp]
1300 mov r8, qword [rsi + CPUMCTX.r8]
1301 mov r9, qword [rsi + CPUMCTX.r9]
1302 mov r10, qword [rsi + CPUMCTX.r10]
1303 mov r11, qword [rsi + CPUMCTX.r11]
1304 mov r12, qword [rsi + CPUMCTX.r12]
1305 mov r13, qword [rsi + CPUMCTX.r13]
1306 mov r14, qword [rsi + CPUMCTX.r14]
1307 mov r15, qword [rsi + CPUMCTX.r15]
1308 mov rsi, qword [rsi + CPUMCTX.esi]
1309
1310 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch.
1311 clgi
1312 sti
1313
1314 ; Load guest FS, GS, Sysenter MSRs etc.
1315 vmload
1316
1317 ; Run the VM.
1318 vmrun
1319
1320 ; Save guest fs, gs, sysenter msr etc.
1321 vmsave
1322
1323 ; Load host fs, gs, sysenter msr etc.
1324 mov rax, [rsp + cbFrame + frm_HCPhysVmcbHost] ; load HCPhysVmcbHost (rbp is not operational yet, thus rsp)
1325 vmload
1326
1327 ; Set the global interrupt flag again, but execute cli to make sure IF=0.
1328 cli
1329 stgi
1330
1331 ; Pop pVCpu (saved above) and save the guest GPRs (sans RSP and RAX).
1332 mov rax, [rsp + cbFrame + frm_pGstCtx] ; (rbp still not operational)
1333
1334 mov qword [rax + CPUMCTX.ebp], rbp
1335 lea rbp, [rsp + cbFrame]
1336 mov qword [rax + CPUMCTX.ecx], rcx
1337 mov rcx, SPECTRE_FILLER
1338 mov qword [rax + CPUMCTX.edx], rdx
1339 mov rdx, rcx
1340 mov qword [rax + CPUMCTX.r8], r8
1341 mov r8, rcx
1342 mov qword [rax + CPUMCTX.r9], r9
1343 mov r9, rcx
1344 mov qword [rax + CPUMCTX.r10], r10
1345 mov r10, rcx
1346 mov qword [rax + CPUMCTX.r11], r11
1347 mov r11, rcx
1348 mov qword [rax + CPUMCTX.edi], rdi
1349 %ifdef ASM_CALL64_MSC
1350 mov rdi, [rbp + frm_saved_rdi]
1351 %else
1352 mov rdi, rcx
1353 %endif
1354 mov qword [rax + CPUMCTX.esi], rsi
1355 %ifdef ASM_CALL64_MSC
1356 mov rsi, [rbp + frm_saved_rsi]
1357 %else
1358 mov rsi, rcx
1359 %endif
1360 mov qword [rax + CPUMCTX.ebx], rbx
1361 mov rbx, [rbp + frm_saved_rbx]
1362 mov qword [rax + CPUMCTX.r12], r12
1363 mov r12, [rbp + frm_saved_r12]
1364 mov qword [rax + CPUMCTX.r13], r13
1365 mov r13, [rbp + frm_saved_r13]
1366 mov qword [rax + CPUMCTX.r14], r14
1367 mov r14, [rbp + frm_saved_r14]
1368 mov qword [rax + CPUMCTX.r15], r15
1369 mov r15, [rbp + frm_saved_r15]
1370
1371 %if %4 != 0
1372 ; Set r8 = &pVCpu->cpum.GstCtx; for use below when saving and restoring SSE state.
1373 mov r8, rax
1374 %endif
1375
1376 %if %3 & HM_WSF_IBPB_EXIT
1377 ; Fight spectre (trashes rax, rdx and rcx).
1378 mov ecx, MSR_IA32_PRED_CMD
1379 mov eax, MSR_IA32_PRED_CMD_F_IBPB
1380 xor edx, edx
1381 wrmsr
1382 %endif
1383
1384 %if %2 != 0
1385 ; Restore the host xcr0.
1386 xor ecx, ecx
1387 mov rdx, [rbp + frm_uHostXcr0 + 8]
1388 mov rax, [rbp + frm_uHostXcr0]
1389 xsetbv
1390 %endif
1391
1392 %if %4 != 0
1393 ; Save the guest SSE state related to non-volatile and volatile SSE registers.
1394 mov rcx, [r8 + CPUMCTX.pXStateR0]
1395 %if %4 = 1 ; manual
1396 stmxcsr [rcx + X86FXSTATE.MXCSR]
1397 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 000h], xmm0
1398 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 010h], xmm1
1399 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 020h], xmm2
1400 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 030h], xmm3
1401 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 040h], xmm4
1402 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 050h], xmm5
1403 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 060h], xmm6
1404 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 070h], xmm7
1405 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 080h], xmm8
1406 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 090h], xmm9
1407 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 0a0h], xmm10
1408 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 0b0h], xmm11
1409 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 0c0h], xmm12
1410 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 0d0h], xmm13
1411 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 0e0h], xmm14
1412 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 0f0h], xmm15
1413 %elif %4 = 2 ; use xrstor/xsave
1414 mov eax, [r8 + CPUMCTX.fXStateMask]
1415 and eax, CPUM_VOLATILE_XSAVE_GUEST_COMPONENTS
1416 xor edx, edx
1417 xsave [rcx]
1418 %else
1419 %error invalid template parameter 4
1420 %endif
1421
1422 ; Restore the host non-volatile SSE register state.
1423 ldmxcsr [rbp + frm_saved_mxcsr]
1424 movdqa xmm6, [rbp + frm_saved_xmm6 ]
1425 movdqa xmm7, [rbp + frm_saved_xmm7 ]
1426 movdqa xmm8, [rbp + frm_saved_xmm8 ]
1427 movdqa xmm9, [rbp + frm_saved_xmm9 ]
1428 movdqa xmm10, [rbp + frm_saved_xmm10]
1429 movdqa xmm11, [rbp + frm_saved_xmm11]
1430 movdqa xmm12, [rbp + frm_saved_xmm12]
1431 movdqa xmm13, [rbp + frm_saved_xmm13]
1432 movdqa xmm14, [rbp + frm_saved_xmm14]
1433 movdqa xmm15, [rbp + frm_saved_xmm15]
1434 %endif ; %4 != 0
1435
1436 ; Epilogue (assumes we restored volatile registers above when saving the guest GPRs).
1437 mov eax, VINF_SUCCESS
1438 add rsp, cbFrame - 8h
1439 popf
1440 leave
1441 ret
1442
1443 %ifdef VBOX_STRICT
1444 ; Precondition checks failed.
1445.failure_return:
1446 POP_CALLEE_PRESERVED_REGISTERS
1447 %if cbFrame != cbBaseFrame
1448 %error Bad frame size value: cbFrame
1449 %endif
1450 add rsp, cbFrame - 8h
1451 popf
1452 leave
1453 ret
1454 %endif
1455
1456%undef frm_uHostXcr0
1457%undef frm_fNoRestoreXcr0
1458%undef frm_pVCpu
1459%undef frm_HCPhysVmcbHost
1460%undef cbFrame
1461ENDPROC RT_CONCAT(hmR0SvmVmRun,%1)
1462
1463%endmacro ; hmR0SvmVmRunTemplate
1464
1465;
1466; Instantiate the hmR0SvmVmRun various variations.
1467;
1468hmR0SvmVmRunTemplate _SansXcr0_SansIbpbEntry_SansIbpbExit, 0, 0, 0
1469hmR0SvmVmRunTemplate _WithXcr0_SansIbpbEntry_SansIbpbExit, 1, 0, 0
1470hmR0SvmVmRunTemplate _SansXcr0_WithIbpbEntry_SansIbpbExit, 0, HM_WSF_IBPB_ENTRY, 0
1471hmR0SvmVmRunTemplate _WithXcr0_WithIbpbEntry_SansIbpbExit, 1, HM_WSF_IBPB_ENTRY, 0
1472hmR0SvmVmRunTemplate _SansXcr0_SansIbpbEntry_WithIbpbExit, 0, HM_WSF_IBPB_EXIT, 0
1473hmR0SvmVmRunTemplate _WithXcr0_SansIbpbEntry_WithIbpbExit, 1, HM_WSF_IBPB_EXIT, 0
1474hmR0SvmVmRunTemplate _SansXcr0_WithIbpbEntry_WithIbpbExit, 0, HM_WSF_IBPB_ENTRY | HM_WSF_IBPB_EXIT, 0
1475hmR0SvmVmRunTemplate _WithXcr0_WithIbpbEntry_WithIbpbExit, 1, HM_WSF_IBPB_ENTRY | HM_WSF_IBPB_EXIT, 0
1476%ifdef VBOX_WITH_KERNEL_USING_XMM
1477hmR0SvmVmRunTemplate _SansXcr0_SansIbpbEntry_SansIbpbExit_SseManual, 0, 0, 1
1478hmR0SvmVmRunTemplate _WithXcr0_SansIbpbEntry_SansIbpbExit_SseManual, 1, 0, 1
1479hmR0SvmVmRunTemplate _SansXcr0_WithIbpbEntry_SansIbpbExit_SseManual, 0, HM_WSF_IBPB_ENTRY, 1
1480hmR0SvmVmRunTemplate _WithXcr0_WithIbpbEntry_SansIbpbExit_SseManual, 1, HM_WSF_IBPB_ENTRY, 1
1481hmR0SvmVmRunTemplate _SansXcr0_SansIbpbEntry_WithIbpbExit_SseManual, 0, HM_WSF_IBPB_EXIT, 1
1482hmR0SvmVmRunTemplate _WithXcr0_SansIbpbEntry_WithIbpbExit_SseManual, 1, HM_WSF_IBPB_EXIT, 1
1483hmR0SvmVmRunTemplate _SansXcr0_WithIbpbEntry_WithIbpbExit_SseManual, 0, HM_WSF_IBPB_ENTRY | HM_WSF_IBPB_EXIT, 1
1484hmR0SvmVmRunTemplate _WithXcr0_WithIbpbEntry_WithIbpbExit_SseManual, 1, HM_WSF_IBPB_ENTRY | HM_WSF_IBPB_EXIT, 1
1485
1486hmR0SvmVmRunTemplate _SansXcr0_SansIbpbEntry_SansIbpbExit_SseXSave, 0, 0, 2
1487hmR0SvmVmRunTemplate _WithXcr0_SansIbpbEntry_SansIbpbExit_SseXSave, 1, 0, 2
1488hmR0SvmVmRunTemplate _SansXcr0_WithIbpbEntry_SansIbpbExit_SseXSave, 0, HM_WSF_IBPB_ENTRY, 2
1489hmR0SvmVmRunTemplate _WithXcr0_WithIbpbEntry_SansIbpbExit_SseXSave, 1, HM_WSF_IBPB_ENTRY, 2
1490hmR0SvmVmRunTemplate _SansXcr0_SansIbpbEntry_WithIbpbExit_SseXSave, 0, HM_WSF_IBPB_EXIT, 2
1491hmR0SvmVmRunTemplate _WithXcr0_SansIbpbEntry_WithIbpbExit_SseXSave, 1, HM_WSF_IBPB_EXIT, 2
1492hmR0SvmVmRunTemplate _SansXcr0_WithIbpbEntry_WithIbpbExit_SseXSave, 0, HM_WSF_IBPB_ENTRY | HM_WSF_IBPB_EXIT, 2
1493hmR0SvmVmRunTemplate _WithXcr0_WithIbpbEntry_WithIbpbExit_SseXSave, 1, HM_WSF_IBPB_ENTRY | HM_WSF_IBPB_EXIT, 2
1494%endif
1495
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette