VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HMR0A.asm@ 99725

Last change on this file since 99725 was 98103, checked in by vboxsync, 2 years ago

Copyright year updates by scm.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 59.8 KB
Line 
1; $Id: HMR0A.asm 98103 2023-01-17 14:15:46Z vboxsync $
2;; @file
3; HM - Ring-0 VMX, SVM world-switch and helper routines.
4;
5
6;
7; Copyright (C) 2006-2023 Oracle and/or its affiliates.
8;
9; This file is part of VirtualBox base platform packages, as
10; available from https://www.virtualbox.org.
11;
12; This program is free software; you can redistribute it and/or
13; modify it under the terms of the GNU General Public License
14; as published by the Free Software Foundation, in version 3 of the
15; License.
16;
17; This program is distributed in the hope that it will be useful, but
18; WITHOUT ANY WARRANTY; without even the implied warranty of
19; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20; General Public License for more details.
21;
22; You should have received a copy of the GNU General Public License
23; along with this program; if not, see <https://www.gnu.org/licenses>.
24;
25; SPDX-License-Identifier: GPL-3.0-only
26;
27
28;*********************************************************************************************************************************
29;* Header Files *
30;*********************************************************************************************************************************
31;%define RT_ASM_WITH_SEH64 - trouble with SEH, alignment and (probably) 2nd pass optimizations.
32%define RT_ASM_WITH_SEH64_ALT ; Use asmdefs.mac hackery for manually emitting unwind info.
33%include "VBox/asmdefs.mac"
34%include "VBox/err.mac"
35%include "VBox/vmm/hm_vmx.mac"
36%include "VBox/vmm/cpum.mac"
37%include "VBox/vmm/gvm.mac"
38%include "iprt/x86.mac"
39%include "HMInternal.mac"
40
41%ifndef RT_ARCH_AMD64
42 %error AMD64 only.
43%endif
44
45
46;*********************************************************************************************************************************
47;* Defined Constants And Macros *
48;*********************************************************************************************************************************
49;; The offset of the XMM registers in X86FXSTATE.
50; Use define because I'm too lazy to convert the struct.
51%define XMM_OFF_IN_X86FXSTATE 160
52
53;; Spectre filler for 64-bit mode.
54; Choosen to be an invalid address (also with 5 level paging).
55%define SPECTRE_FILLER 0x02204204207fffff
56
57;;
58; Determine skipping restoring of GDTR, IDTR, TR across VMX non-root operation.
59;
60; @note This is normally done by hmR0VmxExportHostSegmentRegs and VMXRestoreHostState,
61; so much of this is untested code.
62; @{
63%define VMX_SKIP_GDTR
64%define VMX_SKIP_TR
65%define VBOX_SKIP_RESTORE_SEG
66%ifdef RT_OS_DARWIN
67 ; Load the NULL selector into DS, ES, FS and GS on 64-bit darwin so we don't
68 ; risk loading a stale LDT value or something invalid.
69 %define HM_64_BIT_USE_NULL_SEL
70 ; Darwin (Mavericks) uses IDTR limit to store the CPU number so we need to always restore it.
71 ; See @bugref{6875}.
72 %undef VMX_SKIP_IDTR
73%else
74 %define VMX_SKIP_IDTR
75%endif
76;; @}
77
78;; @def CALLEE_PRESERVED_REGISTER_COUNT
79; Number of registers pushed by PUSH_CALLEE_PRESERVED_REGISTERS
80%ifdef ASM_CALL64_GCC
81 %define CALLEE_PRESERVED_REGISTER_COUNT 5
82%else
83 %define CALLEE_PRESERVED_REGISTER_COUNT 7
84%endif
85
86;; @def PUSH_CALLEE_PRESERVED_REGISTERS
87; Macro for pushing all GPRs we must preserve for the caller.
88%macro PUSH_CALLEE_PRESERVED_REGISTERS 0
89 push r15
90 SEH64_PUSH_GREG r15
91 %assign cbFrame cbFrame + 8
92 %assign frm_saved_r15 -cbFrame
93
94 push r14
95 SEH64_PUSH_GREG r14
96 %assign cbFrame cbFrame + 8
97 %assign frm_saved_r14 -cbFrame
98
99 push r13
100 SEH64_PUSH_GREG r13
101 %assign cbFrame cbFrame + 8
102 %assign frm_saved_r13 -cbFrame
103
104 push r12
105 SEH64_PUSH_GREG r12
106 %assign cbFrame cbFrame + 8
107 %assign frm_saved_r12 -cbFrame
108
109 push rbx
110 SEH64_PUSH_GREG rbx
111 %assign cbFrame cbFrame + 8
112 %assign frm_saved_rbx -cbFrame
113
114 %ifdef ASM_CALL64_MSC
115 push rsi
116 SEH64_PUSH_GREG rsi
117 %assign cbFrame cbFrame + 8
118 %assign frm_saved_rsi -cbFrame
119
120 push rdi
121 SEH64_PUSH_GREG rdi
122 %assign cbFrame cbFrame + 8
123 %assign frm_saved_rdi -cbFrame
124 %endif
125%endmacro
126
127;; @def POP_CALLEE_PRESERVED_REGISTERS
128; Counterpart to PUSH_CALLEE_PRESERVED_REGISTERS for use in the epilogue.
129%macro POP_CALLEE_PRESERVED_REGISTERS 0
130 %ifdef ASM_CALL64_MSC
131 pop rdi
132 %assign cbFrame cbFrame - 8
133 %undef frm_saved_rdi
134
135 pop rsi
136 %assign cbFrame cbFrame - 8
137 %undef frm_saved_rsi
138 %endif
139 pop rbx
140 %assign cbFrame cbFrame - 8
141 %undef frm_saved_rbx
142
143 pop r12
144 %assign cbFrame cbFrame - 8
145 %undef frm_saved_r12
146
147 pop r13
148 %assign cbFrame cbFrame - 8
149 %undef frm_saved_r13
150
151 pop r14
152 %assign cbFrame cbFrame - 8
153 %undef frm_saved_r14
154
155 pop r15
156 %assign cbFrame cbFrame - 8
157 %undef frm_saved_r15
158%endmacro
159
160
161;; @def PUSH_RELEVANT_SEGMENT_REGISTERS
162; Macro saving all segment registers on the stack.
163; @param 1 Full width register name.
164; @param 2 16-bit register name for \a 1.
165; @cobbers rax, rdx, rcx
166%macro PUSH_RELEVANT_SEGMENT_REGISTERS 2
167 %ifndef VBOX_SKIP_RESTORE_SEG
168 %error untested code. probably does not work any more!
169 %ifndef HM_64_BIT_USE_NULL_SEL
170 mov %2, es
171 push %1
172 mov %2, ds
173 push %1
174 %endif
175
176 ; Special case for FS; Windows and Linux either don't use it or restore it when leaving kernel mode,
177 ; Solaris OTOH doesn't and we must save it.
178 mov ecx, MSR_K8_FS_BASE
179 rdmsr
180 push rdx
181 push rax
182 %ifndef HM_64_BIT_USE_NULL_SEL
183 push fs
184 %endif
185
186 ; Special case for GS; OSes typically use swapgs to reset the hidden base register for GS on entry into the kernel.
187 ; The same happens on exit.
188 mov ecx, MSR_K8_GS_BASE
189 rdmsr
190 push rdx
191 push rax
192 %ifndef HM_64_BIT_USE_NULL_SEL
193 push gs
194 %endif
195 %endif ; !VBOX_SKIP_RESTORE_SEG
196%endmacro ; PUSH_RELEVANT_SEGMENT_REGISTERS
197
198;; @def POP_RELEVANT_SEGMENT_REGISTERS
199; Macro restoring all segment registers on the stack.
200; @param 1 Full width register name.
201; @param 2 16-bit register name for \a 1.
202; @cobbers rax, rdx, rcx
203%macro POP_RELEVANT_SEGMENT_REGISTERS 2
204 %ifndef VBOX_SKIP_RESTORE_SEG
205 %error untested code. probably does not work any more!
206 ; Note: do not step through this code with a debugger!
207 %ifndef HM_64_BIT_USE_NULL_SEL
208 xor eax, eax
209 mov ds, ax
210 mov es, ax
211 mov fs, ax
212 mov gs, ax
213 %endif
214
215 %ifndef HM_64_BIT_USE_NULL_SEL
216 pop gs
217 %endif
218 pop rax
219 pop rdx
220 mov ecx, MSR_K8_GS_BASE
221 wrmsr
222
223 %ifndef HM_64_BIT_USE_NULL_SEL
224 pop fs
225 %endif
226 pop rax
227 pop rdx
228 mov ecx, MSR_K8_FS_BASE
229 wrmsr
230 ; Now it's safe to step again
231
232 %ifndef HM_64_BIT_USE_NULL_SEL
233 pop %1
234 mov ds, %2
235 pop %1
236 mov es, %2
237 %endif
238 %endif ; !VBOX_SKIP_RESTORE_SEG
239%endmacro ; POP_RELEVANT_SEGMENT_REGISTERS
240
241
242;*********************************************************************************************************************************
243;* External Symbols *
244;*********************************************************************************************************************************
245%ifdef VBOX_WITH_KERNEL_USING_XMM
246extern NAME(CPUMIsGuestFPUStateActive)
247%endif
248
249
250BEGINCODE
251
252
253;;
254; Used on platforms with poor inline assembly support to retrieve all the
255; info from the CPU and put it in the @a pRestoreHost structure.
256;
257; @returns VBox status code
258; @param pRestoreHost msc: rcx gcc: rdi Pointer to the RestoreHost struct.
259; @param fHaveFsGsBase msc: dl gcc: sil Whether we can use rdfsbase or not.
260;
261ALIGNCODE(64)
262BEGINPROC hmR0VmxExportHostSegmentRegsAsmHlp
263%ifdef ASM_CALL64_MSC
264 %define pRestoreHost rcx
265%elifdef ASM_CALL64_GCC
266 %define pRestoreHost rdi
267%else
268 %error Unknown calling convension.
269%endif
270 SEH64_END_PROLOGUE
271
272 ; Start with the FS and GS base so we can trash DL/SIL.
273%ifdef ASM_CALL64_MSC
274 or dl, dl
275%else
276 or sil, sil
277%endif
278 jz .use_rdmsr_for_fs_and_gs_base
279 rdfsbase rax
280 mov [pRestoreHost + VMXRESTOREHOST.uHostFSBase], rax
281 rdgsbase rax
282 mov [pRestoreHost + VMXRESTOREHOST.uHostGSBase], rax
283.done_fs_and_gs_base:
284
285 ; TR, GDTR and IDTR
286 str [pRestoreHost + VMXRESTOREHOST.uHostSelTR]
287 sgdt [pRestoreHost + VMXRESTOREHOST.HostGdtr]
288 sidt [pRestoreHost + VMXRESTOREHOST.HostIdtr]
289
290 ; Segment registers.
291 xor eax, eax
292 mov eax, cs
293 mov [pRestoreHost + VMXRESTOREHOST.uHostSelCS], ax
294
295 mov eax, ss
296 mov [pRestoreHost + VMXRESTOREHOST.uHostSelSS], ax
297
298 mov eax, gs
299 mov [pRestoreHost + VMXRESTOREHOST.uHostSelGS], ax
300
301 mov eax, fs
302 mov [pRestoreHost + VMXRESTOREHOST.uHostSelFS], ax
303
304 mov eax, es
305 mov [pRestoreHost + VMXRESTOREHOST.uHostSelES], ax
306
307 mov eax, ds
308 mov [pRestoreHost + VMXRESTOREHOST.uHostSelDS], ax
309
310 ret
311
312ALIGNCODE(16)
313.use_rdmsr_for_fs_and_gs_base:
314%ifdef ASM_CALL64_MSC
315 mov r8, pRestoreHost
316%endif
317
318 mov ecx, MSR_K8_FS_BASE
319 rdmsr
320 shl rdx, 32
321 or rdx, rax
322 mov [r8 + VMXRESTOREHOST.uHostFSBase], rdx
323
324 mov ecx, MSR_K8_GS_BASE
325 rdmsr
326 shl rdx, 32
327 or rdx, rax
328 mov [r8 + VMXRESTOREHOST.uHostGSBase], rdx
329
330%ifdef ASM_CALL64_MSC
331 mov pRestoreHost, r8
332%endif
333 jmp .done_fs_and_gs_base
334%undef pRestoreHost
335ENDPROC hmR0VmxExportHostSegmentRegsAsmHlp
336
337
338;;
339; Restores host-state fields.
340;
341; @returns VBox status code
342; @param f32RestoreHost msc: ecx gcc: edi RestoreHost flags.
343; @param pRestoreHost msc: rdx gcc: rsi Pointer to the RestoreHost struct.
344;
345ALIGNCODE(64)
346BEGINPROC VMXRestoreHostState
347%ifndef ASM_CALL64_GCC
348 ; Use GCC's input registers since we'll be needing both rcx and rdx further
349 ; down with the wrmsr instruction. Use the R10 and R11 register for saving
350 ; RDI and RSI since MSC preserve the two latter registers.
351 mov r10, rdi
352 mov r11, rsi
353 mov rdi, rcx
354 mov rsi, rdx
355%endif
356 SEH64_END_PROLOGUE
357
358.restore_gdtr:
359 test edi, VMX_RESTORE_HOST_GDTR
360 jz .restore_idtr
361 lgdt [rsi + VMXRESTOREHOST.HostGdtr]
362
363.restore_idtr:
364 test edi, VMX_RESTORE_HOST_IDTR
365 jz .restore_ds
366 lidt [rsi + VMXRESTOREHOST.HostIdtr]
367
368.restore_ds:
369 test edi, VMX_RESTORE_HOST_SEL_DS
370 jz .restore_es
371 mov ax, [rsi + VMXRESTOREHOST.uHostSelDS]
372 mov ds, eax
373
374.restore_es:
375 test edi, VMX_RESTORE_HOST_SEL_ES
376 jz .restore_tr
377 mov ax, [rsi + VMXRESTOREHOST.uHostSelES]
378 mov es, eax
379
380.restore_tr:
381 test edi, VMX_RESTORE_HOST_SEL_TR
382 jz .restore_fs
383 ; When restoring the TR, we must first clear the busy flag or we'll end up faulting.
384 mov dx, [rsi + VMXRESTOREHOST.uHostSelTR]
385 mov ax, dx
386 and eax, X86_SEL_MASK_OFF_RPL ; mask away TI and RPL bits leaving only the descriptor offset
387 test edi, VMX_RESTORE_HOST_GDT_READ_ONLY | VMX_RESTORE_HOST_GDT_NEED_WRITABLE
388 jnz .gdt_readonly_or_need_writable
389 add rax, qword [rsi + VMXRESTOREHOST.HostGdtr + 2] ; xAX <- descriptor offset + GDTR.pGdt.
390 and dword [rax + 4], ~RT_BIT(9) ; clear the busy flag in TSS desc (bits 0-7=base, bit 9=busy bit)
391 ltr dx
392
393.restore_fs:
394 ;
395 ; When restoring the selector values for FS and GS, we'll temporarily trash
396 ; the base address (at least the high 32-bit bits, but quite possibly the
397 ; whole base address), the wrmsr will restore it correctly. (VT-x actually
398 ; restores the base correctly when leaving guest mode, but not the selector
399 ; value, so there is little problem with interrupts being enabled prior to
400 ; this restore job.)
401 ; We'll disable ints once for both FS and GS as that's probably faster.
402 ;
403 test edi, VMX_RESTORE_HOST_SEL_FS | VMX_RESTORE_HOST_SEL_GS
404 jz .restore_success
405 pushfq
406 cli ; (see above)
407
408 test edi, VMX_RESTORE_HOST_CAN_USE_WRFSBASE_AND_WRGSBASE
409 jz .restore_fs_using_wrmsr
410
411.restore_fs_using_wrfsbase:
412 test edi, VMX_RESTORE_HOST_SEL_FS
413 jz .restore_gs_using_wrgsbase
414 mov rax, qword [rsi + VMXRESTOREHOST.uHostFSBase]
415 mov cx, word [rsi + VMXRESTOREHOST.uHostSelFS]
416 mov fs, ecx
417 wrfsbase rax
418
419.restore_gs_using_wrgsbase:
420 test edi, VMX_RESTORE_HOST_SEL_GS
421 jz .restore_flags
422 mov rax, qword [rsi + VMXRESTOREHOST.uHostGSBase]
423 mov cx, word [rsi + VMXRESTOREHOST.uHostSelGS]
424 mov gs, ecx
425 wrgsbase rax
426
427.restore_flags:
428 popfq
429
430.restore_success:
431 mov eax, VINF_SUCCESS
432%ifndef ASM_CALL64_GCC
433 ; Restore RDI and RSI on MSC.
434 mov rdi, r10
435 mov rsi, r11
436%endif
437 ret
438
439ALIGNCODE(8)
440.gdt_readonly_or_need_writable:
441 test edi, VMX_RESTORE_HOST_GDT_NEED_WRITABLE
442 jnz .gdt_readonly_need_writable
443.gdt_readonly:
444 mov rcx, cr0
445 mov r9, rcx
446 add rax, qword [rsi + VMXRESTOREHOST.HostGdtr + 2] ; xAX <- descriptor offset + GDTR.pGdt.
447 and rcx, ~X86_CR0_WP
448 mov cr0, rcx
449 and dword [rax + 4], ~RT_BIT(9) ; clear the busy flag in TSS desc (bits 0-7=base, bit 9=busy bit)
450 ltr dx
451 mov cr0, r9
452 jmp .restore_fs
453
454ALIGNCODE(8)
455.gdt_readonly_need_writable:
456 add rax, qword [rsi + VMXRESTOREHOST.HostGdtrRw + 2] ; xAX <- descriptor offset + GDTR.pGdtRw
457 and dword [rax + 4], ~RT_BIT(9) ; clear the busy flag in TSS desc (bits 0-7=base, bit 9=busy bit)
458 lgdt [rsi + VMXRESTOREHOST.HostGdtrRw]
459 ltr dx
460 lgdt [rsi + VMXRESTOREHOST.HostGdtr] ; load the original GDT
461 jmp .restore_fs
462
463ALIGNCODE(8)
464.restore_fs_using_wrmsr:
465 test edi, VMX_RESTORE_HOST_SEL_FS
466 jz .restore_gs_using_wrmsr
467 mov eax, dword [rsi + VMXRESTOREHOST.uHostFSBase] ; uHostFSBase - Lo
468 mov edx, dword [rsi + VMXRESTOREHOST.uHostFSBase + 4h] ; uHostFSBase - Hi
469 mov cx, word [rsi + VMXRESTOREHOST.uHostSelFS]
470 mov fs, ecx
471 mov ecx, MSR_K8_FS_BASE
472 wrmsr
473
474.restore_gs_using_wrmsr:
475 test edi, VMX_RESTORE_HOST_SEL_GS
476 jz .restore_flags
477 mov eax, dword [rsi + VMXRESTOREHOST.uHostGSBase] ; uHostGSBase - Lo
478 mov edx, dword [rsi + VMXRESTOREHOST.uHostGSBase + 4h] ; uHostGSBase - Hi
479 mov cx, word [rsi + VMXRESTOREHOST.uHostSelGS]
480 mov gs, ecx
481 mov ecx, MSR_K8_GS_BASE
482 wrmsr
483 jmp .restore_flags
484ENDPROC VMXRestoreHostState
485
486
487;;
488; Clears the MDS buffers using VERW.
489ALIGNCODE(16)
490BEGINPROC hmR0MdsClear
491 SEH64_END_PROLOGUE
492 sub xSP, xCB
493 mov [xSP], ds
494 verw [xSP]
495 add xSP, xCB
496 ret
497ENDPROC hmR0MdsClear
498
499
500;;
501; Dispatches an NMI to the host.
502;
503ALIGNCODE(16)
504BEGINPROC VMXDispatchHostNmi
505 ; NMI is always vector 2. The IDT[2] IRQ handler cannot be anything else. See Intel spec. 6.3.1 "External Interrupts".
506 SEH64_END_PROLOGUE
507 int 2
508 ret
509ENDPROC VMXDispatchHostNmi
510
511
512;;
513; Common restore logic for success and error paths. We duplicate this because we
514; don't want to waste writing the VINF_SUCCESS return value to the stack in the
515; regular code path.
516;
517; @param 1 Zero if regular return, non-zero if error return. Controls label emission.
518; @param 2 fLoadSaveGuestXcr0 value
519; @param 3 The (HM_WSF_IBPB_ENTRY | HM_WSF_L1D_ENTRY | HM_WSF_MDS_ENTRY) + HM_WSF_IBPB_EXIT value.
520; The entry values are either all set or not at all, as we're too lazy to flesh out all the variants.
521; @param 4 The SSE saving/restoring: 0 to do nothing, 1 to do it manually, 2 to use xsave/xrstor.
522;
523; @note Important that this does not modify cbFrame or rsp.
524%macro RESTORE_STATE_VMX 4
525 ; Restore base and limit of the IDTR & GDTR.
526 %ifndef VMX_SKIP_IDTR
527 lidt [rsp + cbFrame + frm_saved_idtr]
528 %endif
529 %ifndef VMX_SKIP_GDTR
530 lgdt [rsp + cbFrame + frm_saved_gdtr]
531 %endif
532
533 ; Save the guest state and restore the non-volatile registers. We use rcx=pGstCtx (&pVCpu->cpum.GstCtx) here.
534 mov [rsp + cbFrame + frm_guest_rcx], rcx
535 mov rcx, [rsp + cbFrame + frm_pGstCtx]
536
537 mov qword [rcx + CPUMCTX.eax], rax
538 mov qword [rcx + CPUMCTX.edx], rdx
539 rdtsc
540 mov qword [rcx + CPUMCTX.ebp], rbp
541 lea rbp, [rsp + cbFrame] ; re-establish the frame pointer as early as possible.
542 shl rdx, 20h
543 or rax, rdx ; TSC value in RAX
544 mov rdx, [rbp + frm_guest_rcx]
545 mov qword [rcx + CPUMCTX.ecx], rdx
546 mov rdx, SPECTRE_FILLER ; FILLER in RDX
547 mov qword [rcx + GVMCPU.hmr0 + HMR0PERVCPU.uTscExit - VMCPU.cpum.GstCtx], rax
548 mov qword [rcx + CPUMCTX.r8], r8
549 mov r8, rdx
550 mov qword [rcx + CPUMCTX.r9], r9
551 mov r9, rdx
552 mov qword [rcx + CPUMCTX.r10], r10
553 mov r10, rdx
554 mov qword [rcx + CPUMCTX.r11], r11
555 mov r11, rdx
556 mov qword [rcx + CPUMCTX.esi], rsi
557 %ifdef ASM_CALL64_MSC
558 mov rsi, [rbp + frm_saved_rsi]
559 %else
560 mov rsi, rdx
561 %endif
562 mov qword [rcx + CPUMCTX.edi], rdi
563 %ifdef ASM_CALL64_MSC
564 mov rdi, [rbp + frm_saved_rdi]
565 %else
566 mov rdi, rdx
567 %endif
568 mov qword [rcx + CPUMCTX.ebx], rbx
569 mov rbx, [rbp + frm_saved_rbx]
570 mov qword [rcx + CPUMCTX.r12], r12
571 mov r12, [rbp + frm_saved_r12]
572 mov qword [rcx + CPUMCTX.r13], r13
573 mov r13, [rbp + frm_saved_r13]
574 mov qword [rcx + CPUMCTX.r14], r14
575 mov r14, [rbp + frm_saved_r14]
576 mov qword [rcx + CPUMCTX.r15], r15
577 mov r15, [rbp + frm_saved_r15]
578
579 mov rax, cr2
580 mov qword [rcx + CPUMCTX.cr2], rax
581 mov rax, rdx
582
583 %if %4 != 0
584 ; Save the context pointer in r8 for the SSE save/restore.
585 mov r8, rcx
586 %endif
587
588 %if %3 & HM_WSF_IBPB_EXIT
589 ; Fight spectre (trashes rax, rdx and rcx).
590 %if %1 = 0 ; Skip this in failure branch (=> guru)
591 mov ecx, MSR_IA32_PRED_CMD
592 mov eax, MSR_IA32_PRED_CMD_F_IBPB
593 xor edx, edx
594 wrmsr
595 %endif
596 %endif
597
598 %ifndef VMX_SKIP_TR
599 ; Restore TSS selector; must mark it as not busy before using ltr!
600 ; ASSUME that this is supposed to be 'BUSY' (saves 20-30 ticks on the T42p).
601 %ifndef VMX_SKIP_GDTR
602 lgdt [rbp + frm_saved_gdtr]
603 %endif
604 movzx eax, word [rbp + frm_saved_tr]
605 mov ecx, eax
606 and eax, X86_SEL_MASK_OFF_RPL ; mask away TI and RPL bits leaving only the descriptor offset
607 add rax, [rbp + frm_saved_gdtr + 2] ; eax <- GDTR.address + descriptor offset
608 and dword [rax + 4], ~RT_BIT(9) ; clear the busy flag in TSS desc (bits 0-7=base, bit 9=busy bit)
609 ltr cx
610 %endif
611 movzx edx, word [rbp + frm_saved_ldtr]
612 test edx, edx
613 jz %%skip_ldt_write
614 lldt dx
615%%skip_ldt_write:
616
617 %if %1 != 0
618.return_after_vmwrite_error:
619 %endif
620 ; Restore segment registers.
621 ;POP_RELEVANT_SEGMENT_REGISTERS rax, ax - currently broken.
622
623 %if %2 != 0
624 ; Restore the host XCR0.
625 xor ecx, ecx
626 mov eax, [rbp + frm_uHostXcr0]
627 mov edx, [rbp + frm_uHostXcr0 + 4]
628 xsetbv
629 %endif
630%endmacro ; RESTORE_STATE_VMX
631
632
633;;
634; hmR0VmxStartVm template
635;
636; @param 1 The suffix of the variation.
637; @param 2 fLoadSaveGuestXcr0 value
638; @param 3 The HM_WSF_IBPB_ENTRY + HM_WSF_IBPB_EXIT value.
639; @param 4 The SSE saving/restoring: 0 to do nothing, 1 to do it manually, 2 to use xsave/xrstor.
640; Drivers shouldn't use AVX registers without saving+loading:
641; https://msdn.microsoft.com/en-us/library/windows/hardware/ff545910%28v=vs.85%29.aspx?f=255&MSPPError=-2147217396
642; However the compiler docs have different idea:
643; https://msdn.microsoft.com/en-us/library/9z1stfyw.aspx
644; We'll go with the former for now.
645;
646%macro hmR0VmxStartVmTemplate 4
647
648;;
649; Prepares for and executes VMLAUNCH/VMRESUME (64 bits guest mode)
650;
651; @returns VBox status code
652; @param pVmcsInfo msc:rcx, gcc:rdi Pointer to the VMCS info (for cached host RIP and RSP).
653; @param pVCpu msc:rdx, gcc:rsi The cross context virtual CPU structure of the calling EMT.
654; @param fResume msc:r8l, gcc:dl Whether to use vmlauch/vmresume.
655;
656ALIGNCODE(64)
657BEGINPROC RT_CONCAT(hmR0VmxStartVm,%1)
658 %ifdef VBOX_WITH_KERNEL_USING_XMM
659 %if %4 = 0
660 ;
661 ; The non-saving variant will currently check the two SSE preconditions and pick
662 ; the right variant to continue with. Later we can see if we can't manage to
663 ; move these decisions into hmR0VmxUpdateStartVmFunction().
664 ;
665 %ifdef ASM_CALL64_MSC
666 test byte [rdx + VMCPU.cpum.GstCtx + CPUMCTX.fUsedFpuGuest], 1
667 %else
668 test byte [rsi + VMCPU.cpum.GstCtx + CPUMCTX.fUsedFpuGuest], 1
669 %endif
670 jz .save_xmm_no_need
671 %ifdef ASM_CALL64_MSC
672 cmp dword [rdx + VMCPU.cpum.GstCtx + CPUMCTX.fXStateMask], 0
673 %else
674 cmp dword [rsi + VMCPU.cpum.GstCtx + CPUMCTX.fXStateMask], 0
675 %endif
676 je RT_CONCAT3(hmR0VmxStartVm,%1,_SseManual)
677 jmp RT_CONCAT3(hmR0VmxStartVm,%1,_SseXSave)
678.save_xmm_no_need:
679 %endif
680 %endif
681 push xBP
682 SEH64_PUSH_xBP
683 mov xBP, xSP
684 SEH64_SET_FRAME_xBP 0
685 pushf
686 cli
687
688 %define frm_fRFlags -008h
689 %define frm_pGstCtx -010h ; Where we stash guest CPU context for use after the vmrun.
690 %define frm_uHostXcr0 -020h ; 128-bit
691 %define frm_saved_gdtr -02ah ; 16+64: Only used when VMX_SKIP_GDTR isn't defined
692 %define frm_saved_tr -02ch ; 16-bit: Only used when VMX_SKIP_TR isn't defined
693 %define frm_MDS_seg -030h ; 16-bit: Temporary storage for the MDS flushing.
694 %define frm_saved_idtr -03ah ; 16+64: Only used when VMX_SKIP_IDTR isn't defined
695 %define frm_saved_ldtr -03ch ; 16-bit: always saved.
696 %define frm_rcError -040h ; 32-bit: Error status code (not used in the success path)
697 %define frm_guest_rcx -048h ; Temporary storage slot for guest RCX.
698 %if %4 = 0
699 %assign cbFrame 048h
700 %else
701 %define frm_saved_xmm6 -050h
702 %define frm_saved_xmm7 -060h
703 %define frm_saved_xmm8 -070h
704 %define frm_saved_xmm9 -080h
705 %define frm_saved_xmm10 -090h
706 %define frm_saved_xmm11 -0a0h
707 %define frm_saved_xmm12 -0b0h
708 %define frm_saved_xmm13 -0c0h
709 %define frm_saved_xmm14 -0d0h
710 %define frm_saved_xmm15 -0e0h
711 %define frm_saved_mxcsr -0f0h
712 %assign cbFrame 0f0h
713 %endif
714 %assign cbBaseFrame cbFrame
715 sub rsp, cbFrame - 8h
716 SEH64_ALLOCATE_STACK cbFrame
717
718 ; Save all general purpose host registers.
719 PUSH_CALLEE_PRESERVED_REGISTERS
720 ;PUSH_RELEVANT_SEGMENT_REGISTERS xAX, ax - currently broken
721 SEH64_END_PROLOGUE
722
723 ;
724 ; Unify the input parameter registers: r9=pVmcsInfo, rsi=pVCpu, bl=fResume, rdi=&pVCpu->cpum.GstCtx;
725 ;
726 %ifdef ASM_CALL64_GCC
727 mov r9, rdi ; pVmcsInfo
728 mov ebx, edx ; fResume
729 %else
730 mov r9, rcx ; pVmcsInfo
731 mov rsi, rdx ; pVCpu
732 mov ebx, r8d ; fResume
733 %endif
734 lea rdi, [rsi + VMCPU.cpum.GstCtx]
735 mov [rbp + frm_pGstCtx], rdi
736
737 %ifdef VBOX_STRICT
738 ;
739 ; Verify template preconditions / parameters to ensure HMSVM.cpp didn't miss some state change.
740 ;
741 cmp byte [rsi + GVMCPU.hmr0 + HMR0PERVCPU.fLoadSaveGuestXcr0], %2
742 mov eax, VERR_VMX_STARTVM_PRECOND_0
743 jne NAME(RT_CONCAT(hmR0VmxStartVmHostRIP,%1).precond_failure_return)
744
745 mov eax, [rsi + GVMCPU.hmr0 + HMR0PERVCPU.fWorldSwitcher]
746 and eax, HM_WSF_IBPB_ENTRY | HM_WSF_L1D_ENTRY | HM_WSF_MDS_ENTRY | HM_WSF_IBPB_EXIT
747 cmp eax, %3
748 mov eax, VERR_VMX_STARTVM_PRECOND_1
749 jne NAME(RT_CONCAT(hmR0VmxStartVmHostRIP,%1).precond_failure_return)
750
751 %ifdef VBOX_WITH_KERNEL_USING_XMM
752 mov eax, VERR_VMX_STARTVM_PRECOND_2
753 test byte [rsi + VMCPU.cpum.GstCtx + CPUMCTX.fUsedFpuGuest], 1
754 %if %4 = 0
755 jnz NAME(RT_CONCAT(hmR0VmxStartVmHostRIP,%1).precond_failure_return)
756 %else
757 jz NAME(RT_CONCAT(hmR0VmxStartVmHostRIP,%1).precond_failure_return)
758
759 mov eax, VERR_VMX_STARTVM_PRECOND_3
760 cmp dword [rsi + VMCPU.cpum.GstCtx + CPUMCTX.fXStateMask], 0
761 %if %4 = 1
762 jne NAME(RT_CONCAT(hmR0VmxStartVmHostRIP,%1).precond_failure_return)
763 %elif %4 = 2
764 je NAME(RT_CONCAT(hmR0VmxStartVmHostRIP,%1).precond_failure_return)
765 %else
766 %error Invalid template parameter 4.
767 %endif
768 %endif
769 %endif
770 %endif ; VBOX_STRICT
771
772 %if %4 != 0
773 ; Save the non-volatile SSE host register state.
774 movdqa [rbp + frm_saved_xmm6 ], xmm6
775 movdqa [rbp + frm_saved_xmm7 ], xmm7
776 movdqa [rbp + frm_saved_xmm8 ], xmm8
777 movdqa [rbp + frm_saved_xmm9 ], xmm9
778 movdqa [rbp + frm_saved_xmm10], xmm10
779 movdqa [rbp + frm_saved_xmm11], xmm11
780 movdqa [rbp + frm_saved_xmm12], xmm12
781 movdqa [rbp + frm_saved_xmm13], xmm13
782 movdqa [rbp + frm_saved_xmm14], xmm14
783 movdqa [rbp + frm_saved_xmm15], xmm15
784 stmxcsr [rbp + frm_saved_mxcsr]
785
786 ; Load the guest state related to the above non-volatile and volatile SSE registers. Trashes rcx, eax and edx.
787 lea rcx, [rdi + CPUMCTX.XState]
788 %if %4 = 1 ; manual
789 movdqa xmm0, [rcx + XMM_OFF_IN_X86FXSTATE + 000h]
790 movdqa xmm1, [rcx + XMM_OFF_IN_X86FXSTATE + 010h]
791 movdqa xmm2, [rcx + XMM_OFF_IN_X86FXSTATE + 020h]
792 movdqa xmm3, [rcx + XMM_OFF_IN_X86FXSTATE + 030h]
793 movdqa xmm4, [rcx + XMM_OFF_IN_X86FXSTATE + 040h]
794 movdqa xmm5, [rcx + XMM_OFF_IN_X86FXSTATE + 050h]
795 movdqa xmm6, [rcx + XMM_OFF_IN_X86FXSTATE + 060h]
796 movdqa xmm7, [rcx + XMM_OFF_IN_X86FXSTATE + 070h]
797 movdqa xmm8, [rcx + XMM_OFF_IN_X86FXSTATE + 080h]
798 movdqa xmm9, [rcx + XMM_OFF_IN_X86FXSTATE + 090h]
799 movdqa xmm10, [rcx + XMM_OFF_IN_X86FXSTATE + 0a0h]
800 movdqa xmm11, [rcx + XMM_OFF_IN_X86FXSTATE + 0b0h]
801 movdqa xmm12, [rcx + XMM_OFF_IN_X86FXSTATE + 0c0h]
802 movdqa xmm13, [rcx + XMM_OFF_IN_X86FXSTATE + 0d0h]
803 movdqa xmm14, [rcx + XMM_OFF_IN_X86FXSTATE + 0e0h]
804 movdqa xmm15, [rcx + XMM_OFF_IN_X86FXSTATE + 0f0h]
805 ldmxcsr [rcx + X86FXSTATE.MXCSR]
806 %elif %4 = 2 ; use xrstor/xsave
807 mov eax, [rsi + VMCPU.cpum.GstCtx + CPUMCTX.fXStateMask]
808 and eax, CPUM_VOLATILE_XSAVE_GUEST_COMPONENTS
809 xor edx, edx
810 xrstor [rcx]
811 %else
812 %error invalid template parameter 4
813 %endif
814 %endif
815
816 %if %2 != 0
817 ; Save the host XCR0 and load the guest one if necessary.
818 ; Note! Trashes rax, rdx and rcx.
819 xor ecx, ecx
820 xgetbv ; save the host one on the stack
821 mov [rbp + frm_uHostXcr0], eax
822 mov [rbp + frm_uHostXcr0 + 4], edx
823
824 mov eax, [rdi + CPUMCTX.aXcr] ; load the guest one
825 mov edx, [rdi + CPUMCTX.aXcr + 4]
826 xor ecx, ecx ; paranoia; indicate that we must restore XCR0 (popped into ecx, thus 0)
827 xsetbv
828 %endif
829
830 ; Save host LDTR.
831 sldt word [rbp + frm_saved_ldtr]
832
833 %ifndef VMX_SKIP_TR
834 ; The host TR limit is reset to 0x67; save & restore it manually.
835 str word [rbp + frm_saved_tr]
836 %endif
837
838 %ifndef VMX_SKIP_GDTR
839 ; VT-x only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!
840 sgdt [rbp + frm_saved_gdtr]
841 %endif
842 %ifndef VMX_SKIP_IDTR
843 sidt [rbp + frm_saved_idtr]
844 %endif
845
846 ; Load CR2 if necessary (expensive as writing CR2 is a synchronizing instruction - (bird: still expensive on 10980xe)).
847 mov rcx, qword [rdi + CPUMCTX.cr2]
848 mov rdx, cr2
849 cmp rcx, rdx
850 je .skip_cr2_write
851 mov cr2, rcx
852.skip_cr2_write:
853
854 ; Set the vmlaunch/vmresume "return" host RIP and RSP values if they've changed (unlikly).
855 ; The vmwrite isn't quite for free (on an 10980xe at least), thus we check if anything changed
856 ; before writing here.
857 lea rcx, [NAME(RT_CONCAT(hmR0VmxStartVmHostRIP,%1)) wrt rip]
858 cmp rcx, [r9 + VMXVMCSINFO.uHostRip]
859 jne .write_host_rip
860.wrote_host_rip:
861 cmp rsp, [r9 + VMXVMCSINFO.uHostRsp]
862 jne .write_host_rsp
863.wrote_host_rsp:
864
865 ;
866 ; Fight spectre and similar. Trashes rax, rcx, and rdx.
867 ;
868 %if %3 & (HM_WSF_IBPB_ENTRY | HM_WSF_L1D_ENTRY) ; The eax:edx value is the same for the first two.
869 AssertCompile(MSR_IA32_PRED_CMD_F_IBPB == MSR_IA32_FLUSH_CMD_F_L1D)
870 mov eax, MSR_IA32_PRED_CMD_F_IBPB
871 xor edx, edx
872 %endif
873 %if %3 & HM_WSF_IBPB_ENTRY ; Indirect branch barrier.
874 mov ecx, MSR_IA32_PRED_CMD
875 wrmsr
876 %endif
877 %if %3 & HM_WSF_L1D_ENTRY ; Level 1 data cache flush.
878 mov ecx, MSR_IA32_FLUSH_CMD
879 wrmsr
880 %elif %3 & HM_WSF_MDS_ENTRY ; MDS flushing is included in L1D_FLUSH
881 mov word [rbp + frm_MDS_seg], ds
882 verw word [rbp + frm_MDS_seg]
883 %endif
884
885 ; Resume or start VM?
886 cmp bl, 0 ; fResume
887
888 ; Load guest general purpose registers.
889 mov rax, qword [rdi + CPUMCTX.eax]
890 mov rbx, qword [rdi + CPUMCTX.ebx]
891 mov rcx, qword [rdi + CPUMCTX.ecx]
892 mov rdx, qword [rdi + CPUMCTX.edx]
893 mov rbp, qword [rdi + CPUMCTX.ebp]
894 mov rsi, qword [rdi + CPUMCTX.esi]
895 mov r8, qword [rdi + CPUMCTX.r8]
896 mov r9, qword [rdi + CPUMCTX.r9]
897 mov r10, qword [rdi + CPUMCTX.r10]
898 mov r11, qword [rdi + CPUMCTX.r11]
899 mov r12, qword [rdi + CPUMCTX.r12]
900 mov r13, qword [rdi + CPUMCTX.r13]
901 mov r14, qword [rdi + CPUMCTX.r14]
902 mov r15, qword [rdi + CPUMCTX.r15]
903 mov rdi, qword [rdi + CPUMCTX.edi]
904
905 je .vmlaunch64_launch
906
907 vmresume
908 jc NAME(RT_CONCAT(hmR0VmxStartVmHostRIP,%1).vmxstart64_invalid_vmcs_ptr)
909 jz NAME(RT_CONCAT(hmR0VmxStartVmHostRIP,%1).vmxstart64_start_failed)
910 jmp NAME(RT_CONCAT(hmR0VmxStartVmHostRIP,%1)) ; here if vmresume detected a failure
911
912.vmlaunch64_launch:
913 vmlaunch
914 jc NAME(RT_CONCAT(hmR0VmxStartVmHostRIP,%1).vmxstart64_invalid_vmcs_ptr)
915 jz NAME(RT_CONCAT(hmR0VmxStartVmHostRIP,%1).vmxstart64_start_failed)
916 jmp NAME(RT_CONCAT(hmR0VmxStartVmHostRIP,%1)) ; here if vmlaunch detected a failure
917
918
919; Put these two outside the normal code path as they should rarely change.
920ALIGNCODE(8)
921.write_host_rip:
922 %ifdef VBOX_WITH_STATISTICS
923 inc qword [rsi + VMCPU.hm + HMCPU.StatVmxWriteHostRip]
924 %endif
925 mov [r9 + VMXVMCSINFO.uHostRip], rcx
926 mov eax, VMX_VMCS_HOST_RIP ;; @todo It is only strictly necessary to write VMX_VMCS_HOST_RIP when
927 vmwrite rax, rcx ;; the VMXVMCSINFO::pfnStartVM function changes (eventually
928 %ifdef VBOX_STRICT ;; take the Windows/SSE stuff into account then)...
929 jna NAME(RT_CONCAT(hmR0VmxStartVmHostRIP,%1).vmwrite_failed)
930 %endif
931 jmp .wrote_host_rip
932
933ALIGNCODE(8)
934.write_host_rsp:
935 %ifdef VBOX_WITH_STATISTICS
936 inc qword [rsi + VMCPU.hm + HMCPU.StatVmxWriteHostRsp]
937 %endif
938 mov [r9 + VMXVMCSINFO.uHostRsp], rsp
939 mov eax, VMX_VMCS_HOST_RSP
940 vmwrite rax, rsp
941 %ifdef VBOX_STRICT
942 jna NAME(RT_CONCAT(hmR0VmxStartVmHostRIP,%1).vmwrite_failed)
943 %endif
944 jmp .wrote_host_rsp
945
946ALIGNCODE(64)
947GLOBALNAME RT_CONCAT(hmR0VmxStartVmHostRIP,%1)
948 RESTORE_STATE_VMX 0, %2, %3, %4
949 mov eax, VINF_SUCCESS
950
951.vmstart64_end:
952 %if %4 != 0
953 mov r11d, eax ; save the return code.
954
955 ; Save the guest SSE state related to non-volatile and volatile SSE registers.
956 lea rcx, [r8 + CPUMCTX.XState]
957 %if %4 = 1 ; manual
958 stmxcsr [rcx + X86FXSTATE.MXCSR]
959 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 000h], xmm0
960 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 010h], xmm1
961 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 020h], xmm2
962 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 030h], xmm3
963 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 040h], xmm4
964 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 050h], xmm5
965 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 060h], xmm6
966 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 070h], xmm7
967 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 080h], xmm8
968 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 090h], xmm9
969 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 0a0h], xmm10
970 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 0b0h], xmm11
971 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 0c0h], xmm12
972 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 0d0h], xmm13
973 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 0e0h], xmm14
974 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 0f0h], xmm15
975 %elif %4 = 2 ; use xrstor/xsave
976 mov eax, [r8 + CPUMCTX.fXStateMask]
977 and eax, CPUM_VOLATILE_XSAVE_GUEST_COMPONENTS
978 xor edx, edx
979 xsave [rcx]
980 %else
981 %error invalid template parameter 4
982 %endif
983
984 ; Restore the host non-volatile SSE register state.
985 ldmxcsr [rbp + frm_saved_mxcsr]
986 movdqa xmm6, [rbp + frm_saved_xmm6 ]
987 movdqa xmm7, [rbp + frm_saved_xmm7 ]
988 movdqa xmm8, [rbp + frm_saved_xmm8 ]
989 movdqa xmm9, [rbp + frm_saved_xmm9 ]
990 movdqa xmm10, [rbp + frm_saved_xmm10]
991 movdqa xmm11, [rbp + frm_saved_xmm11]
992 movdqa xmm12, [rbp + frm_saved_xmm12]
993 movdqa xmm13, [rbp + frm_saved_xmm13]
994 movdqa xmm14, [rbp + frm_saved_xmm14]
995 movdqa xmm15, [rbp + frm_saved_xmm15]
996
997 mov eax, r11d
998 %endif ; %4 != 0
999
1000 lea rsp, [rbp + frm_fRFlags]
1001 popf
1002 leave
1003 ret
1004
1005 ;
1006 ; Error returns.
1007 ;
1008 %ifdef VBOX_STRICT
1009.vmwrite_failed:
1010 mov dword [rsp + cbFrame + frm_rcError], VERR_VMX_INVALID_VMCS_FIELD
1011 jz .return_after_vmwrite_error
1012 mov dword [rsp + cbFrame + frm_rcError], VERR_VMX_INVALID_VMCS_PTR
1013 jmp .return_after_vmwrite_error
1014 %endif
1015.vmxstart64_invalid_vmcs_ptr:
1016 mov dword [rsp + cbFrame + frm_rcError], VERR_VMX_INVALID_VMCS_PTR_TO_START_VM
1017 jmp .vmstart64_error_return
1018.vmxstart64_start_failed:
1019 mov dword [rsp + cbFrame + frm_rcError], VERR_VMX_UNABLE_TO_START_VM
1020.vmstart64_error_return:
1021 RESTORE_STATE_VMX 1, %2, %3, %4
1022 mov eax, [rbp + frm_rcError]
1023 jmp .vmstart64_end
1024
1025 %ifdef VBOX_STRICT
1026 ; Precondition checks failed.
1027.precond_failure_return:
1028 POP_CALLEE_PRESERVED_REGISTERS
1029 %if cbFrame != cbBaseFrame
1030 %error Bad frame size value: cbFrame, expected cbBaseFrame
1031 %endif
1032 jmp .vmstart64_end
1033 %endif
1034
1035 %undef frm_fRFlags
1036 %undef frm_pGstCtx
1037 %undef frm_uHostXcr0
1038 %undef frm_saved_gdtr
1039 %undef frm_saved_tr
1040 %undef frm_fNoRestoreXcr0
1041 %undef frm_saved_idtr
1042 %undef frm_saved_ldtr
1043 %undef frm_rcError
1044 %undef frm_guest_rax
1045 %undef cbFrame
1046ENDPROC RT_CONCAT(hmR0VmxStartVm,%1)
1047 %ifdef ASM_FORMAT_ELF
1048size NAME(RT_CONCAT(hmR0VmxStartVmHostRIP,%1)) NAME(RT_CONCAT(hmR0VmxStartVm,%1) %+ _EndProc) - NAME(RT_CONCAT(hmR0VmxStartVmHostRIP,%1))
1049 %endif
1050
1051
1052%endmacro ; hmR0VmxStartVmTemplate
1053
1054%macro hmR0VmxStartVmSseTemplate 2
1055hmR0VmxStartVmTemplate _SansXcr0_SansIbpbEntry_SansL1dEntry_SansMdsEntry_SansIbpbExit %+ %2, 0, 0 | 0 | 0 | 0 , %1
1056hmR0VmxStartVmTemplate _WithXcr0_SansIbpbEntry_SansL1dEntry_SansMdsEntry_SansIbpbExit %+ %2, 1, 0 | 0 | 0 | 0 , %1
1057hmR0VmxStartVmTemplate _SansXcr0_WithIbpbEntry_SansL1dEntry_SansMdsEntry_SansIbpbExit %+ %2, 0, HM_WSF_IBPB_ENTRY | 0 | 0 | 0 , %1
1058hmR0VmxStartVmTemplate _WithXcr0_WithIbpbEntry_SansL1dEntry_SansMdsEntry_SansIbpbExit %+ %2, 1, HM_WSF_IBPB_ENTRY | 0 | 0 | 0 , %1
1059hmR0VmxStartVmTemplate _SansXcr0_SansIbpbEntry_WithL1dEntry_SansMdsEntry_SansIbpbExit %+ %2, 0, 0 | HM_WSF_L1D_ENTRY | 0 | 0 , %1
1060hmR0VmxStartVmTemplate _WithXcr0_SansIbpbEntry_WithL1dEntry_SansMdsEntry_SansIbpbExit %+ %2, 1, 0 | HM_WSF_L1D_ENTRY | 0 | 0 , %1
1061hmR0VmxStartVmTemplate _SansXcr0_WithIbpbEntry_WithL1dEntry_SansMdsEntry_SansIbpbExit %+ %2, 0, HM_WSF_IBPB_ENTRY | HM_WSF_L1D_ENTRY | 0 | 0 , %1
1062hmR0VmxStartVmTemplate _WithXcr0_WithIbpbEntry_WithL1dEntry_SansMdsEntry_SansIbpbExit %+ %2, 1, HM_WSF_IBPB_ENTRY | HM_WSF_L1D_ENTRY | 0 | 0 , %1
1063hmR0VmxStartVmTemplate _SansXcr0_SansIbpbEntry_SansL1dEntry_WithMdsEntry_SansIbpbExit %+ %2, 0, 0 | 0 | HM_WSF_MDS_ENTRY | 0 , %1
1064hmR0VmxStartVmTemplate _WithXcr0_SansIbpbEntry_SansL1dEntry_WithMdsEntry_SansIbpbExit %+ %2, 1, 0 | 0 | HM_WSF_MDS_ENTRY | 0 , %1
1065hmR0VmxStartVmTemplate _SansXcr0_WithIbpbEntry_SansL1dEntry_WithMdsEntry_SansIbpbExit %+ %2, 0, HM_WSF_IBPB_ENTRY | 0 | HM_WSF_MDS_ENTRY | 0 , %1
1066hmR0VmxStartVmTemplate _WithXcr0_WithIbpbEntry_SansL1dEntry_WithMdsEntry_SansIbpbExit %+ %2, 1, HM_WSF_IBPB_ENTRY | 0 | HM_WSF_MDS_ENTRY | 0 , %1
1067hmR0VmxStartVmTemplate _SansXcr0_SansIbpbEntry_WithL1dEntry_WithMdsEntry_SansIbpbExit %+ %2, 0, 0 | HM_WSF_L1D_ENTRY | HM_WSF_MDS_ENTRY | 0 , %1
1068hmR0VmxStartVmTemplate _WithXcr0_SansIbpbEntry_WithL1dEntry_WithMdsEntry_SansIbpbExit %+ %2, 1, 0 | HM_WSF_L1D_ENTRY | HM_WSF_MDS_ENTRY | 0 , %1
1069hmR0VmxStartVmTemplate _SansXcr0_WithIbpbEntry_WithL1dEntry_WithMdsEntry_SansIbpbExit %+ %2, 0, HM_WSF_IBPB_ENTRY | HM_WSF_L1D_ENTRY | HM_WSF_MDS_ENTRY | 0 , %1
1070hmR0VmxStartVmTemplate _WithXcr0_WithIbpbEntry_WithL1dEntry_WithMdsEntry_SansIbpbExit %+ %2, 1, HM_WSF_IBPB_ENTRY | HM_WSF_L1D_ENTRY | HM_WSF_MDS_ENTRY | 0 , %1
1071hmR0VmxStartVmTemplate _SansXcr0_SansIbpbEntry_SansL1dEntry_SansMdsEntry_WithIbpbExit %+ %2, 0, 0 | 0 | 0 | HM_WSF_IBPB_EXIT, %1
1072hmR0VmxStartVmTemplate _WithXcr0_SansIbpbEntry_SansL1dEntry_SansMdsEntry_WithIbpbExit %+ %2, 1, 0 | 0 | 0 | HM_WSF_IBPB_EXIT, %1
1073hmR0VmxStartVmTemplate _SansXcr0_WithIbpbEntry_SansL1dEntry_SansMdsEntry_WithIbpbExit %+ %2, 0, HM_WSF_IBPB_ENTRY | 0 | 0 | HM_WSF_IBPB_EXIT, %1
1074hmR0VmxStartVmTemplate _WithXcr0_WithIbpbEntry_SansL1dEntry_SansMdsEntry_WithIbpbExit %+ %2, 1, HM_WSF_IBPB_ENTRY | 0 | 0 | HM_WSF_IBPB_EXIT, %1
1075hmR0VmxStartVmTemplate _SansXcr0_SansIbpbEntry_WithL1dEntry_SansMdsEntry_WithIbpbExit %+ %2, 0, 0 | HM_WSF_L1D_ENTRY | 0 | HM_WSF_IBPB_EXIT, %1
1076hmR0VmxStartVmTemplate _WithXcr0_SansIbpbEntry_WithL1dEntry_SansMdsEntry_WithIbpbExit %+ %2, 1, 0 | HM_WSF_L1D_ENTRY | 0 | HM_WSF_IBPB_EXIT, %1
1077hmR0VmxStartVmTemplate _SansXcr0_WithIbpbEntry_WithL1dEntry_SansMdsEntry_WithIbpbExit %+ %2, 0, HM_WSF_IBPB_ENTRY | HM_WSF_L1D_ENTRY | 0 | HM_WSF_IBPB_EXIT, %1
1078hmR0VmxStartVmTemplate _WithXcr0_WithIbpbEntry_WithL1dEntry_SansMdsEntry_WithIbpbExit %+ %2, 1, HM_WSF_IBPB_ENTRY | HM_WSF_L1D_ENTRY | 0 | HM_WSF_IBPB_EXIT, %1
1079hmR0VmxStartVmTemplate _SansXcr0_SansIbpbEntry_SansL1dEntry_WithMdsEntry_WithIbpbExit %+ %2, 0, 0 | 0 | HM_WSF_MDS_ENTRY | HM_WSF_IBPB_EXIT, %1
1080hmR0VmxStartVmTemplate _WithXcr0_SansIbpbEntry_SansL1dEntry_WithMdsEntry_WithIbpbExit %+ %2, 1, 0 | 0 | HM_WSF_MDS_ENTRY | HM_WSF_IBPB_EXIT, %1
1081hmR0VmxStartVmTemplate _SansXcr0_WithIbpbEntry_SansL1dEntry_WithMdsEntry_WithIbpbExit %+ %2, 0, HM_WSF_IBPB_ENTRY | 0 | HM_WSF_MDS_ENTRY | HM_WSF_IBPB_EXIT, %1
1082hmR0VmxStartVmTemplate _WithXcr0_WithIbpbEntry_SansL1dEntry_WithMdsEntry_WithIbpbExit %+ %2, 1, HM_WSF_IBPB_ENTRY | 0 | HM_WSF_MDS_ENTRY | HM_WSF_IBPB_EXIT, %1
1083hmR0VmxStartVmTemplate _SansXcr0_SansIbpbEntry_WithL1dEntry_WithMdsEntry_WithIbpbExit %+ %2, 0, 0 | HM_WSF_L1D_ENTRY | HM_WSF_MDS_ENTRY | HM_WSF_IBPB_EXIT, %1
1084hmR0VmxStartVmTemplate _WithXcr0_SansIbpbEntry_WithL1dEntry_WithMdsEntry_WithIbpbExit %+ %2, 1, 0 | HM_WSF_L1D_ENTRY | HM_WSF_MDS_ENTRY | HM_WSF_IBPB_EXIT, %1
1085hmR0VmxStartVmTemplate _SansXcr0_WithIbpbEntry_WithL1dEntry_WithMdsEntry_WithIbpbExit %+ %2, 0, HM_WSF_IBPB_ENTRY | HM_WSF_L1D_ENTRY | HM_WSF_MDS_ENTRY | HM_WSF_IBPB_EXIT, %1
1086hmR0VmxStartVmTemplate _WithXcr0_WithIbpbEntry_WithL1dEntry_WithMdsEntry_WithIbpbExit %+ %2, 1, HM_WSF_IBPB_ENTRY | HM_WSF_L1D_ENTRY | HM_WSF_MDS_ENTRY | HM_WSF_IBPB_EXIT, %1
1087%endmacro
1088
1089hmR0VmxStartVmSseTemplate 0,,
1090%ifdef VBOX_WITH_KERNEL_USING_XMM
1091hmR0VmxStartVmSseTemplate 1,_SseManual
1092hmR0VmxStartVmSseTemplate 2,_SseXSave
1093%endif
1094
1095
1096;;
1097; hmR0SvmVmRun template
1098;
1099; @param 1 The suffix of the variation.
1100; @param 2 fLoadSaveGuestXcr0 value
1101; @param 3 The HM_WSF_IBPB_ENTRY + HM_WSF_IBPB_EXIT value.
1102; @param 4 The SSE saving/restoring: 0 to do nothing, 1 to do it manually, 2 to use xsave/xrstor.
1103; Drivers shouldn't use AVX registers without saving+loading:
1104; https://msdn.microsoft.com/en-us/library/windows/hardware/ff545910%28v=vs.85%29.aspx?f=255&MSPPError=-2147217396
1105; However the compiler docs have different idea:
1106; https://msdn.microsoft.com/en-us/library/9z1stfyw.aspx
1107; We'll go with the former for now.
1108;
1109%macro hmR0SvmVmRunTemplate 4
1110
1111;;
1112; Prepares for and executes VMRUN (32-bit and 64-bit guests).
1113;
1114; @returns VBox status code
1115; @param pVM msc:rcx,gcc:rdi The cross context VM structure (unused).
1116; @param pVCpu msc:rdx,gcc:rsi The cross context virtual CPU structure of the calling EMT.
1117; @param HCPhysVmcb msc:r8, gcc:rdx Physical address of guest VMCB.
1118;
1119ALIGNCODE(64) ; This + immediate optimizations causes serious trouble for yasm and the SEH frames: prologue -28 bytes, must be <256
1120 ; So the SEH64_XXX stuff is currently not operational.
1121BEGINPROC RT_CONCAT(hmR0SvmVmRun,%1)
1122 %ifdef VBOX_WITH_KERNEL_USING_XMM
1123 %if %4 = 0
1124 ;
1125 ; The non-saving variant will currently check the two SSE preconditions and pick
1126 ; the right variant to continue with. Later we can see if we can't manage to
1127 ; move these decisions into hmR0SvmUpdateVmRunFunction().
1128 ;
1129 %ifdef ASM_CALL64_MSC
1130 test byte [rdx + VMCPU.cpum.GstCtx + CPUMCTX.fUsedFpuGuest], 1
1131 %else
1132 test byte [rsi + VMCPU.cpum.GstCtx + CPUMCTX.fUsedFpuGuest], 1
1133 %endif
1134 jz .save_xmm_no_need
1135 %ifdef ASM_CALL64_MSC
1136 cmp dword [rdx + VMCPU.cpum.GstCtx + CPUMCTX.fXStateMask], 0
1137 %else
1138 cmp dword [rsi + VMCPU.cpum.GstCtx + CPUMCTX.fXStateMask], 0
1139 %endif
1140 je RT_CONCAT3(hmR0SvmVmRun,%1,_SseManual)
1141 jmp RT_CONCAT3(hmR0SvmVmRun,%1,_SseXSave)
1142.save_xmm_no_need:
1143 %endif
1144 %endif
1145 push rbp
1146 SEH64_PUSH_xBP
1147 mov rbp, rsp
1148 SEH64_SET_FRAME_xBP 0
1149 pushf
1150 %assign cbFrame 30h
1151 %if %4 != 0
1152 %assign cbFrame cbFrame + 16 * 11 ; Reserve space for 10x 128-bit XMM registers and MXCSR (32-bit)
1153 %endif
1154 %assign cbBaseFrame cbFrame
1155 sub rsp, cbFrame - 8h ; We subtract 8 bytes for the above pushf
1156 SEH64_ALLOCATE_STACK cbFrame ; And we have CALLEE_PRESERVED_REGISTER_COUNT following it.
1157
1158 %define frm_fRFlags -008h
1159 %define frm_uHostXcr0 -018h ; 128-bit
1160 ;%define frm_fNoRestoreXcr0 -020h ; Non-zero if we should skip XCR0 restoring.
1161 %define frm_pGstCtx -028h ; Where we stash guest CPU context for use after the vmrun.
1162 %define frm_HCPhysVmcbHost -030h ; Where we stash HCPhysVmcbHost for the vmload after vmrun.
1163 %if %4 != 0
1164 %define frm_saved_xmm6 -040h
1165 %define frm_saved_xmm7 -050h
1166 %define frm_saved_xmm8 -060h
1167 %define frm_saved_xmm9 -070h
1168 %define frm_saved_xmm10 -080h
1169 %define frm_saved_xmm11 -090h
1170 %define frm_saved_xmm12 -0a0h
1171 %define frm_saved_xmm13 -0b0h
1172 %define frm_saved_xmm14 -0c0h
1173 %define frm_saved_xmm15 -0d0h
1174 %define frm_saved_mxcsr -0e0h
1175 %endif
1176
1177 ; Manual save and restore:
1178 ; - General purpose registers except RIP, RSP, RAX
1179 ;
1180 ; Trashed:
1181 ; - CR2 (we don't care)
1182 ; - LDTR (reset to 0)
1183 ; - DRx (presumably not changed at all)
1184 ; - DR7 (reset to 0x400)
1185
1186 ; Save all general purpose host registers.
1187 PUSH_CALLEE_PRESERVED_REGISTERS
1188 SEH64_END_PROLOGUE
1189 %if cbFrame != (cbBaseFrame + 8 * CALLEE_PRESERVED_REGISTER_COUNT)
1190 %error Bad cbFrame value
1191 %endif
1192
1193 ; Shuffle parameter registers so that r8=HCPhysVmcb and rsi=pVCpu. (rdx & rcx will soon be trashed.)
1194 %ifdef ASM_CALL64_GCC
1195 mov r8, rdx ; Put HCPhysVmcb in r8 like on MSC as rdx is trashed below.
1196 %else
1197 mov rsi, rdx ; Put pVCpu in rsi like on GCC as rdx is trashed below.
1198 ;mov rdi, rcx ; Put pVM in rdi like on GCC as rcx is trashed below.
1199 %endif
1200
1201 %ifdef VBOX_STRICT
1202 ;
1203 ; Verify template preconditions / parameters to ensure HMSVM.cpp didn't miss some state change.
1204 ;
1205 cmp byte [rsi + GVMCPU.hmr0 + HMR0PERVCPU.fLoadSaveGuestXcr0], %2
1206 mov eax, VERR_SVM_VMRUN_PRECOND_0
1207 jne .failure_return
1208
1209 mov eax, [rsi + GVMCPU.hmr0 + HMR0PERVCPU.fWorldSwitcher]
1210 and eax, HM_WSF_IBPB_ENTRY | HM_WSF_IBPB_EXIT
1211 cmp eax, %3
1212 mov eax, VERR_SVM_VMRUN_PRECOND_1
1213 jne .failure_return
1214
1215 %ifdef VBOX_WITH_KERNEL_USING_XMM
1216 mov eax, VERR_SVM_VMRUN_PRECOND_2
1217 test byte [rsi + VMCPU.cpum.GstCtx + CPUMCTX.fUsedFpuGuest], 1
1218 %if %4 = 0
1219 jnz .failure_return
1220 %else
1221 jz .failure_return
1222
1223 mov eax, VERR_SVM_VMRUN_PRECOND_3
1224 cmp dword [rsi + VMCPU.cpum.GstCtx + CPUMCTX.fXStateMask], 0
1225 %if %4 = 1
1226 jne .failure_return
1227 %elif %4 = 2
1228 je .failure_return
1229 %else
1230 %error Invalid template parameter 4.
1231 %endif
1232 %endif
1233 %endif
1234 %endif ; VBOX_STRICT
1235
1236 %if %4 != 0
1237 ; Save the non-volatile SSE host register state.
1238 movdqa [rbp + frm_saved_xmm6 ], xmm6
1239 movdqa [rbp + frm_saved_xmm7 ], xmm7
1240 movdqa [rbp + frm_saved_xmm8 ], xmm8
1241 movdqa [rbp + frm_saved_xmm9 ], xmm9
1242 movdqa [rbp + frm_saved_xmm10], xmm10
1243 movdqa [rbp + frm_saved_xmm11], xmm11
1244 movdqa [rbp + frm_saved_xmm12], xmm12
1245 movdqa [rbp + frm_saved_xmm13], xmm13
1246 movdqa [rbp + frm_saved_xmm14], xmm14
1247 movdqa [rbp + frm_saved_xmm15], xmm15
1248 stmxcsr [rbp + frm_saved_mxcsr]
1249
1250 ; Load the guest state related to the above non-volatile and volatile SSE registers. Trashes rcx, eax and edx.
1251 lea rcx, [rsi + VMCPU.cpum.GstCtx + CPUMCTX.XState]
1252 %if %4 = 1 ; manual
1253 movdqa xmm0, [rcx + XMM_OFF_IN_X86FXSTATE + 000h]
1254 movdqa xmm1, [rcx + XMM_OFF_IN_X86FXSTATE + 010h]
1255 movdqa xmm2, [rcx + XMM_OFF_IN_X86FXSTATE + 020h]
1256 movdqa xmm3, [rcx + XMM_OFF_IN_X86FXSTATE + 030h]
1257 movdqa xmm4, [rcx + XMM_OFF_IN_X86FXSTATE + 040h]
1258 movdqa xmm5, [rcx + XMM_OFF_IN_X86FXSTATE + 050h]
1259 movdqa xmm6, [rcx + XMM_OFF_IN_X86FXSTATE + 060h]
1260 movdqa xmm7, [rcx + XMM_OFF_IN_X86FXSTATE + 070h]
1261 movdqa xmm8, [rcx + XMM_OFF_IN_X86FXSTATE + 080h]
1262 movdqa xmm9, [rcx + XMM_OFF_IN_X86FXSTATE + 090h]
1263 movdqa xmm10, [rcx + XMM_OFF_IN_X86FXSTATE + 0a0h]
1264 movdqa xmm11, [rcx + XMM_OFF_IN_X86FXSTATE + 0b0h]
1265 movdqa xmm12, [rcx + XMM_OFF_IN_X86FXSTATE + 0c0h]
1266 movdqa xmm13, [rcx + XMM_OFF_IN_X86FXSTATE + 0d0h]
1267 movdqa xmm14, [rcx + XMM_OFF_IN_X86FXSTATE + 0e0h]
1268 movdqa xmm15, [rcx + XMM_OFF_IN_X86FXSTATE + 0f0h]
1269 ldmxcsr [rcx + X86FXSTATE.MXCSR]
1270 %elif %4 = 2 ; use xrstor/xsave
1271 mov eax, [rsi + VMCPU.cpum.GstCtx + CPUMCTX.fXStateMask]
1272 and eax, CPUM_VOLATILE_XSAVE_GUEST_COMPONENTS
1273 xor edx, edx
1274 xrstor [rcx]
1275 %else
1276 %error invalid template parameter 4
1277 %endif
1278 %endif
1279
1280 %if %2 != 0
1281 ; Save the host XCR0 and load the guest one if necessary.
1282 xor ecx, ecx
1283 xgetbv ; save the host XCR0 on the stack
1284 mov [rbp + frm_uHostXcr0 + 8], rdx
1285 mov [rbp + frm_uHostXcr0 ], rax
1286
1287 mov eax, [rsi + VMCPU.cpum.GstCtx + CPUMCTX.aXcr] ; load the guest XCR0
1288 mov edx, [rsi + VMCPU.cpum.GstCtx + CPUMCTX.aXcr + 4]
1289 xor ecx, ecx ; paranoia
1290 xsetbv
1291 %endif
1292
1293 ; Save host fs, gs, sysenter msr etc.
1294 mov rax, [rsi + GVMCPU.hmr0 + HMR0PERVCPU.svm + HMR0CPUSVM.HCPhysVmcbHost]
1295 mov qword [rbp + frm_HCPhysVmcbHost], rax ; save for the vmload after vmrun
1296 lea rsi, [rsi + VMCPU.cpum.GstCtx]
1297 mov qword [rbp + frm_pGstCtx], rsi
1298 vmsave
1299
1300 %if %3 & HM_WSF_IBPB_ENTRY
1301 ; Fight spectre (trashes rax, rdx and rcx).
1302 mov ecx, MSR_IA32_PRED_CMD
1303 mov eax, MSR_IA32_PRED_CMD_F_IBPB
1304 xor edx, edx
1305 wrmsr
1306 %endif
1307
1308 ; Setup rax for VMLOAD.
1309 mov rax, r8 ; HCPhysVmcb (64 bits physical address; take low dword only)
1310
1311 ; Load guest general purpose registers (rax is loaded from the VMCB by VMRUN).
1312 mov rbx, qword [rsi + CPUMCTX.ebx]
1313 mov rcx, qword [rsi + CPUMCTX.ecx]
1314 mov rdx, qword [rsi + CPUMCTX.edx]
1315 mov rdi, qword [rsi + CPUMCTX.edi]
1316 mov rbp, qword [rsi + CPUMCTX.ebp]
1317 mov r8, qword [rsi + CPUMCTX.r8]
1318 mov r9, qword [rsi + CPUMCTX.r9]
1319 mov r10, qword [rsi + CPUMCTX.r10]
1320 mov r11, qword [rsi + CPUMCTX.r11]
1321 mov r12, qword [rsi + CPUMCTX.r12]
1322 mov r13, qword [rsi + CPUMCTX.r13]
1323 mov r14, qword [rsi + CPUMCTX.r14]
1324 mov r15, qword [rsi + CPUMCTX.r15]
1325 mov rsi, qword [rsi + CPUMCTX.esi]
1326
1327 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch.
1328 clgi
1329 sti
1330
1331 ; Load guest FS, GS, Sysenter MSRs etc.
1332 vmload
1333
1334 ; Run the VM.
1335 vmrun
1336
1337 ; Save guest fs, gs, sysenter msr etc.
1338 vmsave
1339
1340 ; Load host fs, gs, sysenter msr etc.
1341 mov rax, [rsp + cbFrame + frm_HCPhysVmcbHost] ; load HCPhysVmcbHost (rbp is not operational yet, thus rsp)
1342 vmload
1343
1344 ; Set the global interrupt flag again, but execute cli to make sure IF=0.
1345 cli
1346 stgi
1347
1348 ; Pop pVCpu (saved above) and save the guest GPRs (sans RSP and RAX).
1349 mov rax, [rsp + cbFrame + frm_pGstCtx] ; (rbp still not operational)
1350
1351 mov qword [rax + CPUMCTX.edx], rdx
1352 mov qword [rax + CPUMCTX.ecx], rcx
1353 mov rcx, rax
1354 rdtsc
1355 mov qword [rcx + CPUMCTX.ebp], rbp
1356 lea rbp, [rsp + cbFrame]
1357 shl rdx, 20h
1358 or rax, rdx ; TSC value in RAX
1359 mov qword [rcx + CPUMCTX.r8], r8
1360 mov r8, SPECTRE_FILLER ; SPECTRE filler in R8
1361 mov qword [rcx + CPUMCTX.r9], r9
1362 mov r9, r8
1363 mov qword [rcx + CPUMCTX.r10], r10
1364 mov r10, r8
1365 mov qword [rcx + GVMCPU.hmr0 + HMR0PERVCPU.uTscExit - VMCPU.cpum.GstCtx], rax
1366 mov qword [rcx + CPUMCTX.r11], r11
1367 mov r11, r8
1368 mov qword [rcx + CPUMCTX.edi], rdi
1369 %ifdef ASM_CALL64_MSC
1370 mov rdi, [rbp + frm_saved_rdi]
1371 %else
1372 mov rdi, r8
1373 %endif
1374 mov qword [rcx + CPUMCTX.esi], rsi
1375 %ifdef ASM_CALL64_MSC
1376 mov rsi, [rbp + frm_saved_rsi]
1377 %else
1378 mov rsi, r8
1379 %endif
1380 mov qword [rcx + CPUMCTX.ebx], rbx
1381 mov rbx, [rbp + frm_saved_rbx]
1382 mov qword [rcx + CPUMCTX.r12], r12
1383 mov r12, [rbp + frm_saved_r12]
1384 mov qword [rcx + CPUMCTX.r13], r13
1385 mov r13, [rbp + frm_saved_r13]
1386 mov qword [rcx + CPUMCTX.r14], r14
1387 mov r14, [rbp + frm_saved_r14]
1388 mov qword [rcx + CPUMCTX.r15], r15
1389 mov r15, [rbp + frm_saved_r15]
1390
1391 %if %4 != 0
1392 ; Set r8 = &pVCpu->cpum.GstCtx; for use below when saving and restoring SSE state.
1393 mov r8, rcx
1394 %endif
1395
1396 %if %3 & HM_WSF_IBPB_EXIT
1397 ; Fight spectre (trashes rax, rdx and rcx).
1398 mov ecx, MSR_IA32_PRED_CMD
1399 mov eax, MSR_IA32_PRED_CMD_F_IBPB
1400 xor edx, edx
1401 wrmsr
1402 %endif
1403
1404 %if %2 != 0
1405 ; Restore the host xcr0.
1406 xor ecx, ecx
1407 mov rdx, [rbp + frm_uHostXcr0 + 8]
1408 mov rax, [rbp + frm_uHostXcr0]
1409 xsetbv
1410 %endif
1411
1412 %if %4 != 0
1413 ; Save the guest SSE state related to non-volatile and volatile SSE registers.
1414 lea rcx, [r8 + CPUMCTX.XState]
1415 %if %4 = 1 ; manual
1416 stmxcsr [rcx + X86FXSTATE.MXCSR]
1417 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 000h], xmm0
1418 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 010h], xmm1
1419 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 020h], xmm2
1420 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 030h], xmm3
1421 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 040h], xmm4
1422 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 050h], xmm5
1423 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 060h], xmm6
1424 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 070h], xmm7
1425 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 080h], xmm8
1426 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 090h], xmm9
1427 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 0a0h], xmm10
1428 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 0b0h], xmm11
1429 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 0c0h], xmm12
1430 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 0d0h], xmm13
1431 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 0e0h], xmm14
1432 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 0f0h], xmm15
1433 %elif %4 = 2 ; use xrstor/xsave
1434 mov eax, [r8 + CPUMCTX.fXStateMask]
1435 and eax, CPUM_VOLATILE_XSAVE_GUEST_COMPONENTS
1436 xor edx, edx
1437 xsave [rcx]
1438 %else
1439 %error invalid template parameter 4
1440 %endif
1441
1442 ; Restore the host non-volatile SSE register state.
1443 ldmxcsr [rbp + frm_saved_mxcsr]
1444 movdqa xmm6, [rbp + frm_saved_xmm6 ]
1445 movdqa xmm7, [rbp + frm_saved_xmm7 ]
1446 movdqa xmm8, [rbp + frm_saved_xmm8 ]
1447 movdqa xmm9, [rbp + frm_saved_xmm9 ]
1448 movdqa xmm10, [rbp + frm_saved_xmm10]
1449 movdqa xmm11, [rbp + frm_saved_xmm11]
1450 movdqa xmm12, [rbp + frm_saved_xmm12]
1451 movdqa xmm13, [rbp + frm_saved_xmm13]
1452 movdqa xmm14, [rbp + frm_saved_xmm14]
1453 movdqa xmm15, [rbp + frm_saved_xmm15]
1454 %endif ; %4 != 0
1455
1456 ; Epilogue (assumes we restored volatile registers above when saving the guest GPRs).
1457 mov eax, VINF_SUCCESS
1458 add rsp, cbFrame - 8h
1459 popf
1460 leave
1461 ret
1462
1463 %ifdef VBOX_STRICT
1464 ; Precondition checks failed.
1465.failure_return:
1466 POP_CALLEE_PRESERVED_REGISTERS
1467 %if cbFrame != cbBaseFrame
1468 %error Bad frame size value: cbFrame
1469 %endif
1470 add rsp, cbFrame - 8h
1471 popf
1472 leave
1473 ret
1474 %endif
1475
1476%undef frm_uHostXcr0
1477%undef frm_fNoRestoreXcr0
1478%undef frm_pVCpu
1479%undef frm_HCPhysVmcbHost
1480%undef cbFrame
1481ENDPROC RT_CONCAT(hmR0SvmVmRun,%1)
1482
1483%endmacro ; hmR0SvmVmRunTemplate
1484
1485;
1486; Instantiate the hmR0SvmVmRun various variations.
1487;
1488hmR0SvmVmRunTemplate _SansXcr0_SansIbpbEntry_SansIbpbExit, 0, 0, 0
1489hmR0SvmVmRunTemplate _WithXcr0_SansIbpbEntry_SansIbpbExit, 1, 0, 0
1490hmR0SvmVmRunTemplate _SansXcr0_WithIbpbEntry_SansIbpbExit, 0, HM_WSF_IBPB_ENTRY, 0
1491hmR0SvmVmRunTemplate _WithXcr0_WithIbpbEntry_SansIbpbExit, 1, HM_WSF_IBPB_ENTRY, 0
1492hmR0SvmVmRunTemplate _SansXcr0_SansIbpbEntry_WithIbpbExit, 0, HM_WSF_IBPB_EXIT, 0
1493hmR0SvmVmRunTemplate _WithXcr0_SansIbpbEntry_WithIbpbExit, 1, HM_WSF_IBPB_EXIT, 0
1494hmR0SvmVmRunTemplate _SansXcr0_WithIbpbEntry_WithIbpbExit, 0, HM_WSF_IBPB_ENTRY | HM_WSF_IBPB_EXIT, 0
1495hmR0SvmVmRunTemplate _WithXcr0_WithIbpbEntry_WithIbpbExit, 1, HM_WSF_IBPB_ENTRY | HM_WSF_IBPB_EXIT, 0
1496%ifdef VBOX_WITH_KERNEL_USING_XMM
1497hmR0SvmVmRunTemplate _SansXcr0_SansIbpbEntry_SansIbpbExit_SseManual, 0, 0, 1
1498hmR0SvmVmRunTemplate _WithXcr0_SansIbpbEntry_SansIbpbExit_SseManual, 1, 0, 1
1499hmR0SvmVmRunTemplate _SansXcr0_WithIbpbEntry_SansIbpbExit_SseManual, 0, HM_WSF_IBPB_ENTRY, 1
1500hmR0SvmVmRunTemplate _WithXcr0_WithIbpbEntry_SansIbpbExit_SseManual, 1, HM_WSF_IBPB_ENTRY, 1
1501hmR0SvmVmRunTemplate _SansXcr0_SansIbpbEntry_WithIbpbExit_SseManual, 0, HM_WSF_IBPB_EXIT, 1
1502hmR0SvmVmRunTemplate _WithXcr0_SansIbpbEntry_WithIbpbExit_SseManual, 1, HM_WSF_IBPB_EXIT, 1
1503hmR0SvmVmRunTemplate _SansXcr0_WithIbpbEntry_WithIbpbExit_SseManual, 0, HM_WSF_IBPB_ENTRY | HM_WSF_IBPB_EXIT, 1
1504hmR0SvmVmRunTemplate _WithXcr0_WithIbpbEntry_WithIbpbExit_SseManual, 1, HM_WSF_IBPB_ENTRY | HM_WSF_IBPB_EXIT, 1
1505
1506hmR0SvmVmRunTemplate _SansXcr0_SansIbpbEntry_SansIbpbExit_SseXSave, 0, 0, 2
1507hmR0SvmVmRunTemplate _WithXcr0_SansIbpbEntry_SansIbpbExit_SseXSave, 1, 0, 2
1508hmR0SvmVmRunTemplate _SansXcr0_WithIbpbEntry_SansIbpbExit_SseXSave, 0, HM_WSF_IBPB_ENTRY, 2
1509hmR0SvmVmRunTemplate _WithXcr0_WithIbpbEntry_SansIbpbExit_SseXSave, 1, HM_WSF_IBPB_ENTRY, 2
1510hmR0SvmVmRunTemplate _SansXcr0_SansIbpbEntry_WithIbpbExit_SseXSave, 0, HM_WSF_IBPB_EXIT, 2
1511hmR0SvmVmRunTemplate _WithXcr0_SansIbpbEntry_WithIbpbExit_SseXSave, 1, HM_WSF_IBPB_EXIT, 2
1512hmR0SvmVmRunTemplate _SansXcr0_WithIbpbEntry_WithIbpbExit_SseXSave, 0, HM_WSF_IBPB_ENTRY | HM_WSF_IBPB_EXIT, 2
1513hmR0SvmVmRunTemplate _WithXcr0_WithIbpbEntry_WithIbpbExit_SseXSave, 1, HM_WSF_IBPB_ENTRY | HM_WSF_IBPB_EXIT, 2
1514%endif
1515
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette