VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HMR0A.asm@ 78927

Last change on this file since 78927 was 78632, checked in by vboxsync, 6 years ago

Forward ported 130474,130475,130477,130479. bugref:9453

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 65.5 KB
Line 
1; $Id: HMR0A.asm 78632 2019-05-21 13:56:11Z vboxsync $
2;; @file
3; HM - Ring-0 VMX, SVM world-switch and helper routines
4;
5
6;
7; Copyright (C) 2006-2019 Oracle Corporation
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.virtualbox.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License (GPL) as published by the Free Software
13; Foundation, in version 2 as it comes in the "COPYING" file of the
14; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17
18;*********************************************************************************************************************************
19;* Header Files *
20;*********************************************************************************************************************************
21%include "VBox/asmdefs.mac"
22%include "VBox/err.mac"
23%include "VBox/vmm/hm_vmx.mac"
24%include "VBox/vmm/cpum.mac"
25%include "VBox/vmm/vm.mac"
26%include "iprt/x86.mac"
27%include "HMInternal.mac"
28
29%ifdef RT_OS_OS2 ;; @todo fix OMF support in yasm and kick nasm out completely.
30 %macro vmwrite 2,
31 int3
32 %endmacro
33 %define vmlaunch int3
34 %define vmresume int3
35 %define vmsave int3
36 %define vmload int3
37 %define vmrun int3
38 %define clgi int3
39 %define stgi int3
40 %macro invlpga 2,
41 int3
42 %endmacro
43%endif
44
45;*********************************************************************************************************************************
46;* Defined Constants And Macros *
47;*********************************************************************************************************************************
48;; The offset of the XMM registers in X86FXSTATE.
49; Use define because I'm too lazy to convert the struct.
50%define XMM_OFF_IN_X86FXSTATE 160
51
52;; Spectre filler for 32-bit mode.
53; Some user space address that points to a 4MB page boundrary in hope that it
54; will somehow make it less useful.
55%define SPECTRE_FILLER32 0x227fffff
56;; Spectre filler for 64-bit mode.
57; Choosen to be an invalid address (also with 5 level paging).
58%define SPECTRE_FILLER64 0x02204204207fffff
59;; Spectre filler for the current CPU mode.
60%ifdef RT_ARCH_AMD64
61 %define SPECTRE_FILLER SPECTRE_FILLER64
62%else
63 %define SPECTRE_FILLER SPECTRE_FILLER32
64%endif
65
66;;
67; Determine skipping restoring of GDTR, IDTR, TR across VMX non-root operation
68;
69%ifdef RT_ARCH_AMD64
70 %define VMX_SKIP_GDTR
71 %define VMX_SKIP_TR
72 %define VBOX_SKIP_RESTORE_SEG
73 %ifdef RT_OS_DARWIN
74 ; Load the NULL selector into DS, ES, FS and GS on 64-bit darwin so we don't
75 ; risk loading a stale LDT value or something invalid.
76 %define HM_64_BIT_USE_NULL_SEL
77 ; Darwin (Mavericks) uses IDTR limit to store the CPU Id so we need to restore it always.
78 ; See @bugref{6875}.
79 %else
80 %define VMX_SKIP_IDTR
81 %endif
82%endif
83
84;; @def MYPUSHAD
85; Macro generating an equivalent to pushad
86
87;; @def MYPOPAD
88; Macro generating an equivalent to popad
89
90;; @def MYPUSHSEGS
91; Macro saving all segment registers on the stack.
92; @param 1 full width register name
93; @param 2 16-bit register name for \a 1.
94
95;; @def MYPOPSEGS
96; Macro restoring all segment registers on the stack
97; @param 1 full width register name
98; @param 2 16-bit register name for \a 1.
99
100%ifdef ASM_CALL64_GCC
101 %macro MYPUSHAD64 0
102 push r15
103 push r14
104 push r13
105 push r12
106 push rbx
107 %endmacro
108 %macro MYPOPAD64 0
109 pop rbx
110 pop r12
111 pop r13
112 pop r14
113 pop r15
114 %endmacro
115
116%else ; ASM_CALL64_MSC
117 %macro MYPUSHAD64 0
118 push r15
119 push r14
120 push r13
121 push r12
122 push rbx
123 push rsi
124 push rdi
125 %endmacro
126 %macro MYPOPAD64 0
127 pop rdi
128 pop rsi
129 pop rbx
130 pop r12
131 pop r13
132 pop r14
133 pop r15
134 %endmacro
135%endif
136
137%ifdef VBOX_SKIP_RESTORE_SEG
138 %macro MYPUSHSEGS64 2
139 %endmacro
140
141 %macro MYPOPSEGS64 2
142 %endmacro
143%else ; !VBOX_SKIP_RESTORE_SEG
144 ; trashes, rax, rdx & rcx
145 %macro MYPUSHSEGS64 2
146 %ifndef HM_64_BIT_USE_NULL_SEL
147 mov %2, es
148 push %1
149 mov %2, ds
150 push %1
151 %endif
152
153 ; Special case for FS; Windows and Linux either don't use it or restore it when leaving kernel mode, Solaris OTOH doesn't and we must save it.
154 mov ecx, MSR_K8_FS_BASE
155 rdmsr
156 push rdx
157 push rax
158 %ifndef HM_64_BIT_USE_NULL_SEL
159 push fs
160 %endif
161
162 ; Special case for GS; OSes typically use swapgs to reset the hidden base register for GS on entry into the kernel. The same happens on exit
163 mov ecx, MSR_K8_GS_BASE
164 rdmsr
165 push rdx
166 push rax
167 %ifndef HM_64_BIT_USE_NULL_SEL
168 push gs
169 %endif
170 %endmacro
171
172 ; trashes, rax, rdx & rcx
173 %macro MYPOPSEGS64 2
174 ; Note: do not step through this code with a debugger!
175 %ifndef HM_64_BIT_USE_NULL_SEL
176 xor eax, eax
177 mov ds, ax
178 mov es, ax
179 mov fs, ax
180 mov gs, ax
181 %endif
182
183 %ifndef HM_64_BIT_USE_NULL_SEL
184 pop gs
185 %endif
186 pop rax
187 pop rdx
188 mov ecx, MSR_K8_GS_BASE
189 wrmsr
190
191 %ifndef HM_64_BIT_USE_NULL_SEL
192 pop fs
193 %endif
194 pop rax
195 pop rdx
196 mov ecx, MSR_K8_FS_BASE
197 wrmsr
198 ; Now it's safe to step again
199
200 %ifndef HM_64_BIT_USE_NULL_SEL
201 pop %1
202 mov ds, %2
203 pop %1
204 mov es, %2
205 %endif
206 %endmacro
207%endif ; VBOX_SKIP_RESTORE_SEG
208
209%macro MYPUSHAD32 0
210 pushad
211%endmacro
212%macro MYPOPAD32 0
213 popad
214%endmacro
215
216%macro MYPUSHSEGS32 2
217 push ds
218 push es
219 push fs
220 push gs
221%endmacro
222%macro MYPOPSEGS32 2
223 pop gs
224 pop fs
225 pop es
226 pop ds
227%endmacro
228
229%ifdef RT_ARCH_AMD64
230 %define MYPUSHAD MYPUSHAD64
231 %define MYPOPAD MYPOPAD64
232 %define MYPUSHSEGS MYPUSHSEGS64
233 %define MYPOPSEGS MYPOPSEGS64
234%else
235 %define MYPUSHAD MYPUSHAD32
236 %define MYPOPAD MYPOPAD32
237 %define MYPUSHSEGS MYPUSHSEGS32
238 %define MYPOPSEGS MYPOPSEGS32
239%endif
240
241;;
242; Creates an indirect branch prediction barrier on CPUs that need and supports that.
243; @clobbers eax, edx, ecx
244; @param 1 How to address CPUMCTX.
245; @param 2 Which flag to test for (CPUMCTX_WSF_IBPB_ENTRY or CPUMCTX_WSF_IBPB_EXIT)
246%macro INDIRECT_BRANCH_PREDICTION_BARRIER 2
247 test byte [%1 + CPUMCTX.fWorldSwitcher], %2
248 jz %%no_indirect_branch_barrier
249 mov ecx, MSR_IA32_PRED_CMD
250 mov eax, MSR_IA32_PRED_CMD_F_IBPB
251 xor edx, edx
252 wrmsr
253%%no_indirect_branch_barrier:
254%endmacro
255
256;;
257; Creates an indirect branch prediction and L1D barrier on CPUs that need and supports that.
258; @clobbers eax, edx, ecx
259; @param 1 How to address CPUMCTX.
260; @param 2 Which IBPB flag to test for (CPUMCTX_WSF_IBPB_ENTRY or CPUMCTX_WSF_IBPB_EXIT)
261; @param 3 Which FLUSH flag to test for (CPUMCTX_WSF_L1D_ENTRY)
262; @param 4 Which MDS flag to test for (CPUMCTX_WSF_MDS_ENTRY)
263%macro INDIRECT_BRANCH_PREDICTION_AND_L1_CACHE_BARRIER 4
264 ; Only one test+jmp when disabled CPUs.
265 test byte [%1 + CPUMCTX.fWorldSwitcher], (%2 | %3 | %4)
266 jz %%no_barrier_needed
267
268 ; The eax:edx value is the same for both.
269 AssertCompile(MSR_IA32_PRED_CMD_F_IBPB == MSR_IA32_FLUSH_CMD_F_L1D)
270 mov eax, MSR_IA32_PRED_CMD_F_IBPB
271 xor edx, edx
272
273 ; Indirect branch barrier.
274 test byte [%1 + CPUMCTX.fWorldSwitcher], %2
275 jz %%no_indirect_branch_barrier
276 mov ecx, MSR_IA32_PRED_CMD
277 wrmsr
278%%no_indirect_branch_barrier:
279
280 ; Level 1 data cache flush.
281 test byte [%1 + CPUMCTX.fWorldSwitcher], %3
282 jz %%no_cache_flush_barrier
283 mov ecx, MSR_IA32_FLUSH_CMD
284 wrmsr
285 jmp %%no_mds_buffer_flushing ; MDS flushing is included in L1D_FLUSH.
286%%no_cache_flush_barrier:
287
288 ; MDS buffer flushing.
289 test byte [%1 + CPUMCTX.fWorldSwitcher], %4
290 jz %%no_mds_buffer_flushing
291 sub xSP, xSP
292 mov [xSP], ds
293 verw [xSP]
294 add xSP, xSP
295%%no_mds_buffer_flushing:
296
297%%no_barrier_needed:
298%endmacro
299
300
301;*********************************************************************************************************************************
302;* External Symbols *
303;*********************************************************************************************************************************
304%ifdef VBOX_WITH_KERNEL_USING_XMM
305extern NAME(CPUMIsGuestFPUStateActive)
306%endif
307
308
309BEGINCODE
310
311
312;/**
313; * Restores host-state fields.
314; *
315; * @returns VBox status code
316; * @param f32RestoreHost x86: [ebp + 08h] msc: ecx gcc: edi RestoreHost flags.
317; * @param pRestoreHost x86: [ebp + 0ch] msc: rdx gcc: rsi Pointer to the RestoreHost struct.
318; */
319ALIGNCODE(16)
320BEGINPROC VMXRestoreHostState
321%ifdef RT_ARCH_AMD64
322 %ifndef ASM_CALL64_GCC
323 ; Use GCC's input registers since we'll be needing both rcx and rdx further
324 ; down with the wrmsr instruction. Use the R10 and R11 register for saving
325 ; RDI and RSI since MSC preserve the two latter registers.
326 mov r10, rdi
327 mov r11, rsi
328 mov rdi, rcx
329 mov rsi, rdx
330 %endif
331
332 test edi, VMX_RESTORE_HOST_GDTR
333 jz .test_idtr
334 lgdt [rsi + VMXRESTOREHOST.HostGdtr]
335
336.test_idtr:
337 test edi, VMX_RESTORE_HOST_IDTR
338 jz .test_ds
339 lidt [rsi + VMXRESTOREHOST.HostIdtr]
340
341.test_ds:
342 test edi, VMX_RESTORE_HOST_SEL_DS
343 jz .test_es
344 mov ax, [rsi + VMXRESTOREHOST.uHostSelDS]
345 mov ds, eax
346
347.test_es:
348 test edi, VMX_RESTORE_HOST_SEL_ES
349 jz .test_tr
350 mov ax, [rsi + VMXRESTOREHOST.uHostSelES]
351 mov es, eax
352
353.test_tr:
354 test edi, VMX_RESTORE_HOST_SEL_TR
355 jz .test_fs
356 ; When restoring the TR, we must first clear the busy flag or we'll end up faulting.
357 mov dx, [rsi + VMXRESTOREHOST.uHostSelTR]
358 mov ax, dx
359 and eax, X86_SEL_MASK_OFF_RPL ; Mask away TI and RPL bits leaving only the descriptor offset.
360 test edi, VMX_RESTORE_HOST_GDT_READ_ONLY | VMX_RESTORE_HOST_GDT_NEED_WRITABLE
361 jnz .gdt_readonly
362 add rax, qword [rsi + VMXRESTOREHOST.HostGdtr + 2] ; xAX <- descriptor offset + GDTR.pGdt.
363 and dword [rax + 4], ~RT_BIT(9) ; Clear the busy flag in TSS desc (bits 0-7=base, bit 9=busy bit).
364 ltr dx
365 jmp short .test_fs
366.gdt_readonly:
367 test edi, VMX_RESTORE_HOST_GDT_NEED_WRITABLE
368 jnz .gdt_readonly_need_writable
369 mov rcx, cr0
370 mov r9, rcx
371 add rax, qword [rsi + VMXRESTOREHOST.HostGdtr + 2] ; xAX <- descriptor offset + GDTR.pGdt.
372 and rcx, ~X86_CR0_WP
373 mov cr0, rcx
374 and dword [rax + 4], ~RT_BIT(9) ; Clear the busy flag in TSS desc (bits 0-7=base, bit 9=busy bit).
375 ltr dx
376 mov cr0, r9
377 jmp short .test_fs
378.gdt_readonly_need_writable:
379 add rax, qword [rsi + VMXRESTOREHOST.HostGdtrRw + 2] ; xAX <- descriptor offset + GDTR.pGdtRw.
380 and dword [rax + 4], ~RT_BIT(9) ; Clear the busy flag in TSS desc (bits 0-7=base, bit 9=busy bit).
381 lgdt [rsi + VMXRESTOREHOST.HostGdtrRw]
382 ltr dx
383 lgdt [rsi + VMXRESTOREHOST.HostGdtr] ; Load the original GDT
384
385.test_fs:
386 ;
387 ; When restoring the selector values for FS and GS, we'll temporarily trash
388 ; the base address (at least the high 32-bit bits, but quite possibly the
389 ; whole base address), the wrmsr will restore it correctly. (VT-x actually
390 ; restores the base correctly when leaving guest mode, but not the selector
391 ; value, so there is little problem with interrupts being enabled prior to
392 ; this restore job.)
393 ; We'll disable ints once for both FS and GS as that's probably faster.
394 ;
395 test edi, VMX_RESTORE_HOST_SEL_FS | VMX_RESTORE_HOST_SEL_GS
396 jz .restore_success
397 pushfq
398 cli ; (see above)
399
400 test edi, VMX_RESTORE_HOST_SEL_FS
401 jz .test_gs
402 mov ax, word [rsi + VMXRESTOREHOST.uHostSelFS]
403 mov fs, eax
404 mov eax, dword [rsi + VMXRESTOREHOST.uHostFSBase] ; uHostFSBase - Lo
405 mov edx, dword [rsi + VMXRESTOREHOST.uHostFSBase + 4h] ; uHostFSBase - Hi
406 mov ecx, MSR_K8_FS_BASE
407 wrmsr
408
409.test_gs:
410 test edi, VMX_RESTORE_HOST_SEL_GS
411 jz .restore_flags
412 mov ax, word [rsi + VMXRESTOREHOST.uHostSelGS]
413 mov gs, eax
414 mov eax, dword [rsi + VMXRESTOREHOST.uHostGSBase] ; uHostGSBase - Lo
415 mov edx, dword [rsi + VMXRESTOREHOST.uHostGSBase + 4h] ; uHostGSBase - Hi
416 mov ecx, MSR_K8_GS_BASE
417 wrmsr
418
419.restore_flags:
420 popfq
421
422.restore_success:
423 mov eax, VINF_SUCCESS
424 %ifndef ASM_CALL64_GCC
425 ; Restore RDI and RSI on MSC.
426 mov rdi, r10
427 mov rsi, r11
428 %endif
429%else ; RT_ARCH_X86
430 mov eax, VERR_NOT_IMPLEMENTED
431%endif
432 ret
433ENDPROC VMXRestoreHostState
434
435
436;/**
437; * Dispatches an NMI to the host.
438; */
439ALIGNCODE(16)
440BEGINPROC VMXDispatchHostNmi
441 int 2 ; NMI is always vector 2. The IDT[2] IRQ handler cannot be anything else. See Intel spec. 6.3.1 "External Interrupts".
442 ret
443ENDPROC VMXDispatchHostNmi
444
445
446;/**
447; * Executes VMWRITE, 64-bit value.
448; *
449; * @returns VBox status code.
450; * @param idxField x86: [ebp + 08h] msc: rcx gcc: rdi VMCS index.
451; * @param u64Data x86: [ebp + 0ch] msc: rdx gcc: rsi VM field value.
452; */
453ALIGNCODE(16)
454BEGINPROC VMXWriteVmcs64
455%ifdef RT_ARCH_AMD64
456 %ifdef ASM_CALL64_GCC
457 and edi, 0ffffffffh
458 xor rax, rax
459 vmwrite rdi, rsi
460 %else
461 and ecx, 0ffffffffh
462 xor rax, rax
463 vmwrite rcx, rdx
464 %endif
465%else ; RT_ARCH_X86
466 mov ecx, [esp + 4] ; idxField
467 lea edx, [esp + 8] ; &u64Data
468 vmwrite ecx, [edx] ; low dword
469 jz .done
470 jc .done
471 inc ecx
472 xor eax, eax
473 vmwrite ecx, [edx + 4] ; high dword
474.done:
475%endif ; RT_ARCH_X86
476 jnc .valid_vmcs
477 mov eax, VERR_VMX_INVALID_VMCS_PTR
478 ret
479.valid_vmcs:
480 jnz .the_end
481 mov eax, VERR_VMX_INVALID_VMCS_FIELD
482.the_end:
483 ret
484ENDPROC VMXWriteVmcs64
485
486
487;/**
488; * Executes VMREAD, 64-bit value.
489; *
490; * @returns VBox status code.
491; * @param idxField VMCS index.
492; * @param pData Where to store VM field value.
493; */
494;DECLASM(int) VMXReadVmcs64(uint32_t idxField, uint64_t *pData);
495ALIGNCODE(16)
496BEGINPROC VMXReadVmcs64
497%ifdef RT_ARCH_AMD64
498 %ifdef ASM_CALL64_GCC
499 and edi, 0ffffffffh
500 xor rax, rax
501 vmread [rsi], rdi
502 %else
503 and ecx, 0ffffffffh
504 xor rax, rax
505 vmread [rdx], rcx
506 %endif
507%else ; RT_ARCH_X86
508 mov ecx, [esp + 4] ; idxField
509 mov edx, [esp + 8] ; pData
510 vmread [edx], ecx ; low dword
511 jz .done
512 jc .done
513 inc ecx
514 xor eax, eax
515 vmread [edx + 4], ecx ; high dword
516.done:
517%endif ; RT_ARCH_X86
518 jnc .valid_vmcs
519 mov eax, VERR_VMX_INVALID_VMCS_PTR
520 ret
521.valid_vmcs:
522 jnz .the_end
523 mov eax, VERR_VMX_INVALID_VMCS_FIELD
524.the_end:
525 ret
526ENDPROC VMXReadVmcs64
527
528
529;/**
530; * Executes VMREAD, 32-bit value.
531; *
532; * @returns VBox status code.
533; * @param idxField VMCS index.
534; * @param pu32Data Where to store VM field value.
535; */
536;DECLASM(int) VMXReadVmcs32(uint32_t idxField, uint32_t *pu32Data);
537ALIGNCODE(16)
538BEGINPROC VMXReadVmcs32
539%ifdef RT_ARCH_AMD64
540 %ifdef ASM_CALL64_GCC
541 and edi, 0ffffffffh
542 xor rax, rax
543 vmread r10, rdi
544 mov [rsi], r10d
545 %else
546 and ecx, 0ffffffffh
547 xor rax, rax
548 vmread r10, rcx
549 mov [rdx], r10d
550 %endif
551%else ; RT_ARCH_X86
552 mov ecx, [esp + 4] ; idxField
553 mov edx, [esp + 8] ; pu32Data
554 xor eax, eax
555 vmread [edx], ecx
556%endif ; RT_ARCH_X86
557 jnc .valid_vmcs
558 mov eax, VERR_VMX_INVALID_VMCS_PTR
559 ret
560.valid_vmcs:
561 jnz .the_end
562 mov eax, VERR_VMX_INVALID_VMCS_FIELD
563.the_end:
564 ret
565ENDPROC VMXReadVmcs32
566
567
568;/**
569; * Executes VMWRITE, 32-bit value.
570; *
571; * @returns VBox status code.
572; * @param idxField VMCS index.
573; * @param u32Data Where to store VM field value.
574; */
575;DECLASM(int) VMXWriteVmcs32(uint32_t idxField, uint32_t u32Data);
576ALIGNCODE(16)
577BEGINPROC VMXWriteVmcs32
578%ifdef RT_ARCH_AMD64
579 %ifdef ASM_CALL64_GCC
580 and edi, 0ffffffffh
581 and esi, 0ffffffffh
582 xor rax, rax
583 vmwrite rdi, rsi
584 %else
585 and ecx, 0ffffffffh
586 and edx, 0ffffffffh
587 xor rax, rax
588 vmwrite rcx, rdx
589 %endif
590%else ; RT_ARCH_X86
591 mov ecx, [esp + 4] ; idxField
592 mov edx, [esp + 8] ; u32Data
593 xor eax, eax
594 vmwrite ecx, edx
595%endif ; RT_ARCH_X86
596 jnc .valid_vmcs
597 mov eax, VERR_VMX_INVALID_VMCS_PTR
598 ret
599.valid_vmcs:
600 jnz .the_end
601 mov eax, VERR_VMX_INVALID_VMCS_FIELD
602.the_end:
603 ret
604ENDPROC VMXWriteVmcs32
605
606
607;/**
608; * Executes VMXON.
609; *
610; * @returns VBox status code.
611; * @param HCPhysVMXOn Physical address of VMXON structure.
612; */
613;DECLASM(int) VMXEnable(RTHCPHYS HCPhysVMXOn);
614BEGINPROC VMXEnable
615%ifdef RT_ARCH_AMD64
616 xor rax, rax
617 %ifdef ASM_CALL64_GCC
618 push rdi
619 %else
620 push rcx
621 %endif
622 vmxon [rsp]
623%else ; RT_ARCH_X86
624 xor eax, eax
625 vmxon [esp + 4]
626%endif ; RT_ARCH_X86
627 jnc .good
628 mov eax, VERR_VMX_INVALID_VMXON_PTR
629 jmp .the_end
630
631.good:
632 jnz .the_end
633 mov eax, VERR_VMX_VMXON_FAILED
634
635.the_end:
636%ifdef RT_ARCH_AMD64
637 add rsp, 8
638%endif
639 ret
640ENDPROC VMXEnable
641
642
643;/**
644; * Executes VMXOFF.
645; */
646;DECLASM(void) VMXDisable(void);
647BEGINPROC VMXDisable
648 vmxoff
649.the_end:
650 ret
651ENDPROC VMXDisable
652
653
654;/**
655; * Executes VMCLEAR.
656; *
657; * @returns VBox status code.
658; * @param HCPhysVmcs Physical address of VM control structure.
659; */
660;DECLASM(int) VMXClearVmcs(RTHCPHYS HCPhysVmcs);
661ALIGNCODE(16)
662BEGINPROC VMXClearVmcs
663%ifdef RT_ARCH_AMD64
664 xor rax, rax
665 %ifdef ASM_CALL64_GCC
666 push rdi
667 %else
668 push rcx
669 %endif
670 vmclear [rsp]
671%else ; RT_ARCH_X86
672 xor eax, eax
673 vmclear [esp + 4]
674%endif ; RT_ARCH_X86
675 jnc .the_end
676 mov eax, VERR_VMX_INVALID_VMCS_PTR
677.the_end:
678%ifdef RT_ARCH_AMD64
679 add rsp, 8
680%endif
681 ret
682ENDPROC VMXClearVmcs
683
684
685;/**
686; * Executes VMPTRLD.
687; *
688; * @returns VBox status code.
689; * @param HCPhysVmcs Physical address of VMCS structure.
690; */
691;DECLASM(int) VMXLoadVmcs(RTHCPHYS HCPhysVmcs);
692ALIGNCODE(16)
693BEGINPROC VMXLoadVmcs
694%ifdef RT_ARCH_AMD64
695 xor rax, rax
696 %ifdef ASM_CALL64_GCC
697 push rdi
698 %else
699 push rcx
700 %endif
701 vmptrld [rsp]
702%else
703 xor eax, eax
704 vmptrld [esp + 4]
705%endif
706 jnc .the_end
707 mov eax, VERR_VMX_INVALID_VMCS_PTR
708.the_end:
709%ifdef RT_ARCH_AMD64
710 add rsp, 8
711%endif
712 ret
713ENDPROC VMXLoadVmcs
714
715
716;/**
717; * Executes VMPTRST.
718; *
719; * @returns VBox status code.
720; * @param [esp + 04h] gcc:rdi msc:rcx Param 1 - First parameter - Address that will receive the current pointer.
721; */
722;DECLASM(int) VMXGetCurrentVmcs(RTHCPHYS *pVMCS);
723BEGINPROC VMXGetCurrentVmcs
724%ifdef RT_OS_OS2
725 mov eax, VERR_NOT_SUPPORTED
726 ret
727%else
728 %ifdef RT_ARCH_AMD64
729 %ifdef ASM_CALL64_GCC
730 vmptrst qword [rdi]
731 %else
732 vmptrst qword [rcx]
733 %endif
734 %else
735 vmptrst qword [esp+04h]
736 %endif
737 xor eax, eax
738.the_end:
739 ret
740%endif
741ENDPROC VMXGetCurrentVmcs
742
743;/**
744; * Invalidate a page using INVEPT.
745; @param enmTlbFlush msc:ecx gcc:edi x86:[esp+04] Type of flush.
746; @param pDescriptor msc:edx gcc:esi x86:[esp+08] Descriptor pointer.
747; */
748;DECLASM(int) VMXR0InvEPT(VMXTLBFLUSHEPT enmTlbFlush, uint64_t *pDescriptor);
749BEGINPROC VMXR0InvEPT
750%ifdef RT_ARCH_AMD64
751 %ifdef ASM_CALL64_GCC
752 and edi, 0ffffffffh
753 xor rax, rax
754; invept rdi, qword [rsi]
755 DB 0x66, 0x0F, 0x38, 0x80, 0x3E
756 %else
757 and ecx, 0ffffffffh
758 xor rax, rax
759; invept rcx, qword [rdx]
760 DB 0x66, 0x0F, 0x38, 0x80, 0xA
761 %endif
762%else
763 mov ecx, [esp + 4]
764 mov edx, [esp + 8]
765 xor eax, eax
766; invept ecx, qword [edx]
767 DB 0x66, 0x0F, 0x38, 0x80, 0xA
768%endif
769 jnc .valid_vmcs
770 mov eax, VERR_VMX_INVALID_VMCS_PTR
771 ret
772.valid_vmcs:
773 jnz .the_end
774 mov eax, VERR_INVALID_PARAMETER
775.the_end:
776 ret
777ENDPROC VMXR0InvEPT
778
779
780;/**
781; * Invalidate a page using invvpid
782; @param enmTlbFlush msc:ecx gcc:edi x86:[esp+04] Type of flush
783; @param pDescriptor msc:edx gcc:esi x86:[esp+08] Descriptor pointer
784; */
785;DECLASM(int) VMXR0InvVPID(VMXTLBFLUSHVPID enmTlbFlush, uint64_t *pDescriptor);
786BEGINPROC VMXR0InvVPID
787%ifdef RT_ARCH_AMD64
788 %ifdef ASM_CALL64_GCC
789 and edi, 0ffffffffh
790 xor rax, rax
791; invvpid rdi, qword [rsi]
792 DB 0x66, 0x0F, 0x38, 0x81, 0x3E
793 %else
794 and ecx, 0ffffffffh
795 xor rax, rax
796; invvpid rcx, qword [rdx]
797 DB 0x66, 0x0F, 0x38, 0x81, 0xA
798 %endif
799%else
800 mov ecx, [esp + 4]
801 mov edx, [esp + 8]
802 xor eax, eax
803; invvpid ecx, qword [edx]
804 DB 0x66, 0x0F, 0x38, 0x81, 0xA
805%endif
806 jnc .valid_vmcs
807 mov eax, VERR_VMX_INVALID_VMCS_PTR
808 ret
809.valid_vmcs:
810 jnz .the_end
811 mov eax, VERR_INVALID_PARAMETER
812.the_end:
813 ret
814ENDPROC VMXR0InvVPID
815
816
817%if GC_ARCH_BITS == 64
818;;
819; Executes INVLPGA
820;
821; @param pPageGC msc:rcx gcc:rdi x86:[esp+04] Virtual page to invalidate
822; @param uASID msc:rdx gcc:rsi x86:[esp+0C] Tagged TLB id
823;
824;DECLASM(void) SVMR0InvlpgA(RTGCPTR pPageGC, uint32_t uASID);
825BEGINPROC SVMR0InvlpgA
826%ifdef RT_ARCH_AMD64
827 %ifdef ASM_CALL64_GCC
828 mov rax, rdi
829 mov rcx, rsi
830 %else
831 mov rax, rcx
832 mov rcx, rdx
833 %endif
834%else
835 mov eax, [esp + 4]
836 mov ecx, [esp + 0Ch]
837%endif
838 invlpga [xAX], ecx
839 ret
840ENDPROC SVMR0InvlpgA
841
842%else ; GC_ARCH_BITS != 64
843;;
844; Executes INVLPGA
845;
846; @param pPageGC msc:ecx gcc:edi x86:[esp+04] Virtual page to invalidate
847; @param uASID msc:edx gcc:esi x86:[esp+08] Tagged TLB id
848;
849;DECLASM(void) SVMR0InvlpgA(RTGCPTR pPageGC, uint32_t uASID);
850BEGINPROC SVMR0InvlpgA
851%ifdef RT_ARCH_AMD64
852 %ifdef ASM_CALL64_GCC
853 movzx rax, edi
854 mov ecx, esi
855 %else
856 ; from http://www.cs.cmu.edu/~fp/courses/15213-s06/misc/asm64-handout.pdf:
857 ; ``Perhaps unexpectedly, instructions that move or generate 32-bit register
858 ; values also set the upper 32 bits of the register to zero. Consequently
859 ; there is no need for an instruction movzlq.''
860 mov eax, ecx
861 mov ecx, edx
862 %endif
863%else
864 mov eax, [esp + 4]
865 mov ecx, [esp + 8]
866%endif
867 invlpga [xAX], ecx
868 ret
869ENDPROC SVMR0InvlpgA
870
871%endif ; GC_ARCH_BITS != 64
872
873
874%ifdef VBOX_WITH_KERNEL_USING_XMM
875
876;;
877; Wrapper around vmx.pfnStartVM that preserves host XMM registers and
878; load the guest ones when necessary.
879;
880; @cproto DECLASM(int) HMR0VMXStartVMhmR0DumpDescriptorM(RTHCUINT fResume, PCPUMCTX pCtx, PVMXVMCSCACHE pCache,
881; PVM pVM, PVMCPU pVCpu, PFNHMVMXSTARTVM pfnStartVM);
882;
883; @returns eax
884;
885; @param fResumeVM msc:rcx
886; @param pCtx msc:rdx
887; @param pVmcsCache msc:r8
888; @param pVM msc:r9
889; @param pVCpu msc:[rbp+30h] The cross context virtual CPU structure of the calling EMT.
890; @param pfnStartVM msc:[rbp+38h]
891;
892; @remarks This is essentially the same code as hmR0SVMRunWrapXMM, only the parameters differ a little bit.
893;
894; @remarks Drivers shouldn't use AVX registers without saving+loading:
895; https://msdn.microsoft.com/en-us/library/windows/hardware/ff545910%28v=vs.85%29.aspx?f=255&MSPPError=-2147217396
896; However the compiler docs have different idea:
897; https://msdn.microsoft.com/en-us/library/9z1stfyw.aspx
898; We'll go with the former for now.
899;
900; ASSUMING 64-bit and windows for now.
901;
902ALIGNCODE(16)
903BEGINPROC hmR0VMXStartVMWrapXMM
904 push xBP
905 mov xBP, xSP
906 sub xSP, 0b0h + 040h ; Don't bother optimizing the frame size.
907
908 ; spill input parameters.
909 mov [xBP + 010h], rcx ; fResumeVM
910 mov [xBP + 018h], rdx ; pCtx
911 mov [xBP + 020h], r8 ; pVmcsCache
912 mov [xBP + 028h], r9 ; pVM
913
914 ; Ask CPUM whether we've started using the FPU yet.
915 mov rcx, [xBP + 30h] ; pVCpu
916 call NAME(CPUMIsGuestFPUStateActive)
917 test al, al
918 jnz .guest_fpu_state_active
919
920 ; No need to mess with XMM registers just call the start routine and return.
921 mov r11, [xBP + 38h] ; pfnStartVM
922 mov r10, [xBP + 30h] ; pVCpu
923 mov [xSP + 020h], r10
924 mov rcx, [xBP + 010h] ; fResumeVM
925 mov rdx, [xBP + 018h] ; pCtx
926 mov r8, [xBP + 020h] ; pVmcsCache
927 mov r9, [xBP + 028h] ; pVM
928 call r11
929
930 leave
931 ret
932
933ALIGNCODE(8)
934.guest_fpu_state_active:
935 ; Save the non-volatile host XMM registers.
936 movdqa [rsp + 040h + 000h], xmm6
937 movdqa [rsp + 040h + 010h], xmm7
938 movdqa [rsp + 040h + 020h], xmm8
939 movdqa [rsp + 040h + 030h], xmm9
940 movdqa [rsp + 040h + 040h], xmm10
941 movdqa [rsp + 040h + 050h], xmm11
942 movdqa [rsp + 040h + 060h], xmm12
943 movdqa [rsp + 040h + 070h], xmm13
944 movdqa [rsp + 040h + 080h], xmm14
945 movdqa [rsp + 040h + 090h], xmm15
946 stmxcsr [rsp + 040h + 0a0h]
947
948 mov r10, [xBP + 018h] ; pCtx
949 mov eax, [r10 + CPUMCTX.fXStateMask]
950 test eax, eax
951 jz .guest_fpu_state_manually
952
953 ;
954 ; Using XSAVE to load the guest XMM, YMM and ZMM registers.
955 ;
956 and eax, CPUM_VOLATILE_XSAVE_GUEST_COMPONENTS
957 xor edx, edx
958 mov r10, [r10 + CPUMCTX.pXStateR0]
959 xrstor [r10]
960
961 ; Make the call (same as in the other case ).
962 mov r11, [xBP + 38h] ; pfnStartVM
963 mov r10, [xBP + 30h] ; pVCpu
964 mov [xSP + 020h], r10
965 mov rcx, [xBP + 010h] ; fResumeVM
966 mov rdx, [xBP + 018h] ; pCtx
967 mov r8, [xBP + 020h] ; pVmcsCache
968 mov r9, [xBP + 028h] ; pVM
969 call r11
970
971 mov r11d, eax ; save return value (xsave below uses eax)
972
973 ; Save the guest XMM registers.
974 mov r10, [xBP + 018h] ; pCtx
975 mov eax, [r10 + CPUMCTX.fXStateMask]
976 and eax, CPUM_VOLATILE_XSAVE_GUEST_COMPONENTS
977 xor edx, edx
978 mov r10, [r10 + CPUMCTX.pXStateR0]
979 xsave [r10]
980
981 mov eax, r11d ; restore return value.
982
983.restore_non_volatile_host_xmm_regs:
984 ; Load the non-volatile host XMM registers.
985 movdqa xmm6, [rsp + 040h + 000h]
986 movdqa xmm7, [rsp + 040h + 010h]
987 movdqa xmm8, [rsp + 040h + 020h]
988 movdqa xmm9, [rsp + 040h + 030h]
989 movdqa xmm10, [rsp + 040h + 040h]
990 movdqa xmm11, [rsp + 040h + 050h]
991 movdqa xmm12, [rsp + 040h + 060h]
992 movdqa xmm13, [rsp + 040h + 070h]
993 movdqa xmm14, [rsp + 040h + 080h]
994 movdqa xmm15, [rsp + 040h + 090h]
995 ldmxcsr [rsp + 040h + 0a0h]
996 leave
997 ret
998
999 ;
1000 ; No XSAVE, load and save the guest XMM registers manually.
1001 ;
1002.guest_fpu_state_manually:
1003 ; Load the full guest XMM register state.
1004 mov r10, [r10 + CPUMCTX.pXStateR0]
1005 movdqa xmm0, [r10 + XMM_OFF_IN_X86FXSTATE + 000h]
1006 movdqa xmm1, [r10 + XMM_OFF_IN_X86FXSTATE + 010h]
1007 movdqa xmm2, [r10 + XMM_OFF_IN_X86FXSTATE + 020h]
1008 movdqa xmm3, [r10 + XMM_OFF_IN_X86FXSTATE + 030h]
1009 movdqa xmm4, [r10 + XMM_OFF_IN_X86FXSTATE + 040h]
1010 movdqa xmm5, [r10 + XMM_OFF_IN_X86FXSTATE + 050h]
1011 movdqa xmm6, [r10 + XMM_OFF_IN_X86FXSTATE + 060h]
1012 movdqa xmm7, [r10 + XMM_OFF_IN_X86FXSTATE + 070h]
1013 movdqa xmm8, [r10 + XMM_OFF_IN_X86FXSTATE + 080h]
1014 movdqa xmm9, [r10 + XMM_OFF_IN_X86FXSTATE + 090h]
1015 movdqa xmm10, [r10 + XMM_OFF_IN_X86FXSTATE + 0a0h]
1016 movdqa xmm11, [r10 + XMM_OFF_IN_X86FXSTATE + 0b0h]
1017 movdqa xmm12, [r10 + XMM_OFF_IN_X86FXSTATE + 0c0h]
1018 movdqa xmm13, [r10 + XMM_OFF_IN_X86FXSTATE + 0d0h]
1019 movdqa xmm14, [r10 + XMM_OFF_IN_X86FXSTATE + 0e0h]
1020 movdqa xmm15, [r10 + XMM_OFF_IN_X86FXSTATE + 0f0h]
1021 ldmxcsr [r10 + X86FXSTATE.MXCSR]
1022
1023 ; Make the call (same as in the other case ).
1024 mov r11, [xBP + 38h] ; pfnStartVM
1025 mov r10, [xBP + 30h] ; pVCpu
1026 mov [xSP + 020h], r10
1027 mov rcx, [xBP + 010h] ; fResumeVM
1028 mov rdx, [xBP + 018h] ; pCtx
1029 mov r8, [xBP + 020h] ; pVmcsCache
1030 mov r9, [xBP + 028h] ; pVM
1031 call r11
1032
1033 ; Save the guest XMM registers.
1034 mov r10, [xBP + 018h] ; pCtx
1035 mov r10, [r10 + CPUMCTX.pXStateR0]
1036 stmxcsr [r10 + X86FXSTATE.MXCSR]
1037 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 000h], xmm0
1038 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 010h], xmm1
1039 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 020h], xmm2
1040 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 030h], xmm3
1041 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 040h], xmm4
1042 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 050h], xmm5
1043 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 060h], xmm6
1044 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 070h], xmm7
1045 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 080h], xmm8
1046 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 090h], xmm9
1047 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0a0h], xmm10
1048 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0b0h], xmm11
1049 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0c0h], xmm12
1050 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0d0h], xmm13
1051 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0e0h], xmm14
1052 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0f0h], xmm15
1053 jmp .restore_non_volatile_host_xmm_regs
1054ENDPROC hmR0VMXStartVMWrapXMM
1055
1056;;
1057; Wrapper around svm.pfnVMRun that preserves host XMM registers and
1058; load the guest ones when necessary.
1059;
1060; @cproto DECLASM(int) hmR0SVMRunWrapXMM(RTHCPHYS HCPhysVmcbHost, RTHCPHYS HCPhysVmcb, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu,
1061; PFNHMSVMVMRUN pfnVMRun);
1062;
1063; @returns eax
1064;
1065; @param HCPhysVmcbHost msc:rcx
1066; @param HCPhysVmcb msc:rdx
1067; @param pCtx msc:r8
1068; @param pVM msc:r9
1069; @param pVCpu msc:[rbp+30h] The cross context virtual CPU structure of the calling EMT.
1070; @param pfnVMRun msc:[rbp+38h]
1071;
1072; @remarks This is essentially the same code as hmR0VMXStartVMWrapXMM, only the parameters differ a little bit.
1073;
1074; @remarks Drivers shouldn't use AVX registers without saving+loading:
1075; https://msdn.microsoft.com/en-us/library/windows/hardware/ff545910%28v=vs.85%29.aspx?f=255&MSPPError=-2147217396
1076; However the compiler docs have different idea:
1077; https://msdn.microsoft.com/en-us/library/9z1stfyw.aspx
1078; We'll go with the former for now.
1079;
1080; ASSUMING 64-bit and windows for now.
1081ALIGNCODE(16)
1082BEGINPROC hmR0SVMRunWrapXMM
1083 push xBP
1084 mov xBP, xSP
1085 sub xSP, 0b0h + 040h ; Don't bother optimizing the frame size.
1086
1087 ; spill input parameters.
1088 mov [xBP + 010h], rcx ; HCPhysVmcbHost
1089 mov [xBP + 018h], rdx ; HCPhysVmcb
1090 mov [xBP + 020h], r8 ; pCtx
1091 mov [xBP + 028h], r9 ; pVM
1092
1093 ; Ask CPUM whether we've started using the FPU yet.
1094 mov rcx, [xBP + 30h] ; pVCpu
1095 call NAME(CPUMIsGuestFPUStateActive)
1096 test al, al
1097 jnz .guest_fpu_state_active
1098
1099 ; No need to mess with XMM registers just call the start routine and return.
1100 mov r11, [xBP + 38h] ; pfnVMRun
1101 mov r10, [xBP + 30h] ; pVCpu
1102 mov [xSP + 020h], r10
1103 mov rcx, [xBP + 010h] ; HCPhysVmcbHost
1104 mov rdx, [xBP + 018h] ; HCPhysVmcb
1105 mov r8, [xBP + 020h] ; pCtx
1106 mov r9, [xBP + 028h] ; pVM
1107 call r11
1108
1109 leave
1110 ret
1111
1112ALIGNCODE(8)
1113.guest_fpu_state_active:
1114 ; Save the non-volatile host XMM registers.
1115 movdqa [rsp + 040h + 000h], xmm6
1116 movdqa [rsp + 040h + 010h], xmm7
1117 movdqa [rsp + 040h + 020h], xmm8
1118 movdqa [rsp + 040h + 030h], xmm9
1119 movdqa [rsp + 040h + 040h], xmm10
1120 movdqa [rsp + 040h + 050h], xmm11
1121 movdqa [rsp + 040h + 060h], xmm12
1122 movdqa [rsp + 040h + 070h], xmm13
1123 movdqa [rsp + 040h + 080h], xmm14
1124 movdqa [rsp + 040h + 090h], xmm15
1125 stmxcsr [rsp + 040h + 0a0h]
1126
1127 mov r10, [xBP + 020h] ; pCtx
1128 mov eax, [r10 + CPUMCTX.fXStateMask]
1129 test eax, eax
1130 jz .guest_fpu_state_manually
1131
1132 ;
1133 ; Using XSAVE.
1134 ;
1135 and eax, CPUM_VOLATILE_XSAVE_GUEST_COMPONENTS
1136 xor edx, edx
1137 mov r10, [r10 + CPUMCTX.pXStateR0]
1138 xrstor [r10]
1139
1140 ; Make the call (same as in the other case ).
1141 mov r11, [xBP + 38h] ; pfnVMRun
1142 mov r10, [xBP + 30h] ; pVCpu
1143 mov [xSP + 020h], r10
1144 mov rcx, [xBP + 010h] ; HCPhysVmcbHost
1145 mov rdx, [xBP + 018h] ; HCPhysVmcb
1146 mov r8, [xBP + 020h] ; pCtx
1147 mov r9, [xBP + 028h] ; pVM
1148 call r11
1149
1150 mov r11d, eax ; save return value (xsave below uses eax)
1151
1152 ; Save the guest XMM registers.
1153 mov r10, [xBP + 020h] ; pCtx
1154 mov eax, [r10 + CPUMCTX.fXStateMask]
1155 and eax, CPUM_VOLATILE_XSAVE_GUEST_COMPONENTS
1156 xor edx, edx
1157 mov r10, [r10 + CPUMCTX.pXStateR0]
1158 xsave [r10]
1159
1160 mov eax, r11d ; restore return value.
1161
1162.restore_non_volatile_host_xmm_regs:
1163 ; Load the non-volatile host XMM registers.
1164 movdqa xmm6, [rsp + 040h + 000h]
1165 movdqa xmm7, [rsp + 040h + 010h]
1166 movdqa xmm8, [rsp + 040h + 020h]
1167 movdqa xmm9, [rsp + 040h + 030h]
1168 movdqa xmm10, [rsp + 040h + 040h]
1169 movdqa xmm11, [rsp + 040h + 050h]
1170 movdqa xmm12, [rsp + 040h + 060h]
1171 movdqa xmm13, [rsp + 040h + 070h]
1172 movdqa xmm14, [rsp + 040h + 080h]
1173 movdqa xmm15, [rsp + 040h + 090h]
1174 ldmxcsr [rsp + 040h + 0a0h]
1175 leave
1176 ret
1177
1178 ;
1179 ; No XSAVE, load and save the guest XMM registers manually.
1180 ;
1181.guest_fpu_state_manually:
1182 ; Load the full guest XMM register state.
1183 mov r10, [r10 + CPUMCTX.pXStateR0]
1184 movdqa xmm0, [r10 + XMM_OFF_IN_X86FXSTATE + 000h]
1185 movdqa xmm1, [r10 + XMM_OFF_IN_X86FXSTATE + 010h]
1186 movdqa xmm2, [r10 + XMM_OFF_IN_X86FXSTATE + 020h]
1187 movdqa xmm3, [r10 + XMM_OFF_IN_X86FXSTATE + 030h]
1188 movdqa xmm4, [r10 + XMM_OFF_IN_X86FXSTATE + 040h]
1189 movdqa xmm5, [r10 + XMM_OFF_IN_X86FXSTATE + 050h]
1190 movdqa xmm6, [r10 + XMM_OFF_IN_X86FXSTATE + 060h]
1191 movdqa xmm7, [r10 + XMM_OFF_IN_X86FXSTATE + 070h]
1192 movdqa xmm8, [r10 + XMM_OFF_IN_X86FXSTATE + 080h]
1193 movdqa xmm9, [r10 + XMM_OFF_IN_X86FXSTATE + 090h]
1194 movdqa xmm10, [r10 + XMM_OFF_IN_X86FXSTATE + 0a0h]
1195 movdqa xmm11, [r10 + XMM_OFF_IN_X86FXSTATE + 0b0h]
1196 movdqa xmm12, [r10 + XMM_OFF_IN_X86FXSTATE + 0c0h]
1197 movdqa xmm13, [r10 + XMM_OFF_IN_X86FXSTATE + 0d0h]
1198 movdqa xmm14, [r10 + XMM_OFF_IN_X86FXSTATE + 0e0h]
1199 movdqa xmm15, [r10 + XMM_OFF_IN_X86FXSTATE + 0f0h]
1200 ldmxcsr [r10 + X86FXSTATE.MXCSR]
1201
1202 ; Make the call (same as in the other case ).
1203 mov r11, [xBP + 38h] ; pfnVMRun
1204 mov r10, [xBP + 30h] ; pVCpu
1205 mov [xSP + 020h], r10
1206 mov rcx, [xBP + 010h] ; HCPhysVmcbHost
1207 mov rdx, [xBP + 018h] ; HCPhysVmcb
1208 mov r8, [xBP + 020h] ; pCtx
1209 mov r9, [xBP + 028h] ; pVM
1210 call r11
1211
1212 ; Save the guest XMM registers.
1213 mov r10, [xBP + 020h] ; pCtx
1214 mov r10, [r10 + CPUMCTX.pXStateR0]
1215 stmxcsr [r10 + X86FXSTATE.MXCSR]
1216 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 000h], xmm0
1217 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 010h], xmm1
1218 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 020h], xmm2
1219 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 030h], xmm3
1220 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 040h], xmm4
1221 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 050h], xmm5
1222 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 060h], xmm6
1223 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 070h], xmm7
1224 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 080h], xmm8
1225 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 090h], xmm9
1226 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0a0h], xmm10
1227 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0b0h], xmm11
1228 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0c0h], xmm12
1229 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0d0h], xmm13
1230 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0e0h], xmm14
1231 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0f0h], xmm15
1232 jmp .restore_non_volatile_host_xmm_regs
1233ENDPROC hmR0SVMRunWrapXMM
1234
1235%endif ; VBOX_WITH_KERNEL_USING_XMM
1236
1237
1238;; @def RESTORE_STATE_VM32
1239; Macro restoring essential host state and updating guest state
1240; for common host, 32-bit guest for VT-x.
1241%macro RESTORE_STATE_VM32 0
1242 ; Restore base and limit of the IDTR & GDTR.
1243 %ifndef VMX_SKIP_IDTR
1244 lidt [xSP]
1245 add xSP, xCB * 2
1246 %endif
1247 %ifndef VMX_SKIP_GDTR
1248 lgdt [xSP]
1249 add xSP, xCB * 2
1250 %endif
1251
1252 push xDI
1253 %ifndef VMX_SKIP_TR
1254 mov xDI, [xSP + xCB * 3] ; pCtx (*3 to skip the saved xDI, TR, LDTR).
1255 %else
1256 mov xDI, [xSP + xCB * 2] ; pCtx (*2 to skip the saved xDI, LDTR).
1257 %endif
1258
1259 mov [ss:xDI + CPUMCTX.eax], eax
1260 mov xAX, SPECTRE_FILLER
1261 mov [ss:xDI + CPUMCTX.ebx], ebx
1262 mov xBX, xAX
1263 mov [ss:xDI + CPUMCTX.ecx], ecx
1264 mov xCX, xAX
1265 mov [ss:xDI + CPUMCTX.edx], edx
1266 mov xDX, xAX
1267 mov [ss:xDI + CPUMCTX.esi], esi
1268 mov xSI, xAX
1269 mov [ss:xDI + CPUMCTX.ebp], ebp
1270 mov xBP, xAX
1271 mov xAX, cr2
1272 mov [ss:xDI + CPUMCTX.cr2], xAX
1273
1274 %ifdef RT_ARCH_AMD64
1275 pop xAX ; The guest edi we pushed above.
1276 mov dword [ss:xDI + CPUMCTX.edi], eax
1277 %else
1278 pop dword [ss:xDI + CPUMCTX.edi] ; The guest edi we pushed above.
1279 %endif
1280
1281 ; Fight spectre.
1282 INDIRECT_BRANCH_PREDICTION_BARRIER ss:xDI, CPUMCTX_WSF_IBPB_EXIT
1283
1284 %ifndef VMX_SKIP_TR
1285 ; Restore TSS selector; must mark it as not busy before using ltr (!)
1286 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p)
1287 ; @todo get rid of sgdt
1288 pop xBX ; Saved TR
1289 sub xSP, xCB * 2
1290 sgdt [xSP]
1291 mov xAX, xBX
1292 and eax, X86_SEL_MASK_OFF_RPL ; Mask away TI and RPL bits leaving only the descriptor offset.
1293 add xAX, [xSP + 2] ; eax <- GDTR.address + descriptor offset.
1294 and dword [ss:xAX + 4], ~RT_BIT(9) ; Clear the busy flag in TSS desc (bits 0-7=base, bit 9=busy bit).
1295 ltr bx
1296 add xSP, xCB * 2
1297 %endif
1298
1299 pop xAX ; Saved LDTR
1300 %ifdef RT_ARCH_AMD64
1301 cmp eax, 0
1302 je %%skip_ldt_write32
1303 %endif
1304 lldt ax
1305
1306%%skip_ldt_write32:
1307 add xSP, xCB ; pCtx
1308
1309 %ifdef VMX_USE_CACHED_VMCS_ACCESSES
1310 pop xDX ; Saved pVmcsCache
1311
1312 ; Note! If we get here as a result of invalid VMCS pointer, all the following
1313 ; vmread's will fail (only eflags.cf=1 will be set) but that shouldn't cause any
1314 ; trouble only just less efficient.
1315 mov ecx, [ss:xDX + VMXVMCSCACHE.Read.cValidEntries]
1316 cmp ecx, 0 ; Can't happen
1317 je %%no_cached_read32
1318 jmp %%cached_read32
1319
1320ALIGN(16)
1321%%cached_read32:
1322 dec xCX
1323 mov eax, [ss:xDX + VMXVMCSCACHE.Read.aField + xCX * 4]
1324 ; Note! This leaves the high 32 bits of the cache entry unmodified!!
1325 vmread [ss:xDX + VMXVMCSCACHE.Read.aFieldVal + xCX * 8], xAX
1326 cmp xCX, 0
1327 jnz %%cached_read32
1328%%no_cached_read32:
1329 %endif
1330
1331 ; Restore segment registers.
1332 MYPOPSEGS xAX, ax
1333
1334 ; Restore the host XCR0 if necessary.
1335 pop xCX
1336 test ecx, ecx
1337 jnz %%xcr0_after_skip
1338 pop xAX
1339 pop xDX
1340 xsetbv ; ecx is already zero.
1341%%xcr0_after_skip:
1342
1343 ; Restore general purpose registers.
1344 MYPOPAD
1345%endmacro
1346
1347
1348;;
1349; Prepares for and executes VMLAUNCH/VMRESUME (32 bits guest mode)
1350;
1351; @returns VBox status code
1352; @param fResume x86:[ebp+8], msc:rcx,gcc:rdi Whether to use vmlauch/vmresume.
1353; @param pCtx x86:[ebp+c], msc:rdx,gcc:rsi Pointer to the guest-CPU context.
1354; @param pVmcsCache x86:[ebp+10],msc:r8, gcc:rdx Pointer to the VMCS cache.
1355; @param pVM x86:[ebp+14],msc:r9, gcc:rcx The cross context VM structure.
1356; @param pVCpu x86:[ebp+18],msc:[ebp+30],gcc:r8 The cross context virtual CPU structure of the calling EMT.
1357;
1358ALIGNCODE(16)
1359BEGINPROC VMXR0StartVM32
1360 push xBP
1361 mov xBP, xSP
1362
1363 pushf
1364 cli
1365
1366 ;
1367 ; Save all general purpose host registers.
1368 ;
1369 MYPUSHAD
1370
1371 ;
1372 ; First we have to write some final guest CPU context registers.
1373 ;
1374 mov eax, VMX_VMCS_HOST_RIP
1375%ifdef RT_ARCH_AMD64
1376 lea r10, [.vmlaunch_done wrt rip]
1377 vmwrite rax, r10
1378%else
1379 mov ecx, .vmlaunch_done
1380 vmwrite eax, ecx
1381%endif
1382 ; Note: assumes success!
1383
1384 ;
1385 ; Unify input parameter registers.
1386 ;
1387%ifdef RT_ARCH_AMD64
1388 %ifdef ASM_CALL64_GCC
1389 ; fResume already in rdi
1390 ; pCtx already in rsi
1391 mov rbx, rdx ; pVmcsCache
1392 %else
1393 mov rdi, rcx ; fResume
1394 mov rsi, rdx ; pCtx
1395 mov rbx, r8 ; pVmcsCache
1396 %endif
1397%else
1398 mov edi, [ebp + 8] ; fResume
1399 mov esi, [ebp + 12] ; pCtx
1400 mov ebx, [ebp + 16] ; pVmcsCache
1401%endif
1402
1403 ;
1404 ; Save the host XCR0 and load the guest one if necessary.
1405 ; Note! Trashes rdx and rcx.
1406 ;
1407%ifdef ASM_CALL64_MSC
1408 mov rax, [xBP + 30h] ; pVCpu
1409%elifdef ASM_CALL64_GCC
1410 mov rax, r8 ; pVCpu
1411%else
1412 mov eax, [xBP + 18h] ; pVCpu
1413%endif
1414 test byte [xAX + VMCPU.hm + HMCPU.fLoadSaveGuestXcr0], 1
1415 jz .xcr0_before_skip
1416
1417 xor ecx, ecx
1418 xgetbv ; Save the host one on the stack.
1419 push xDX
1420 push xAX
1421
1422 mov eax, [xSI + CPUMCTX.aXcr] ; Load the guest one.
1423 mov edx, [xSI + CPUMCTX.aXcr + 4]
1424 xor ecx, ecx ; paranoia
1425 xsetbv
1426
1427 push 0 ; Indicate that we must restore XCR0 (popped into ecx, thus 0).
1428 jmp .xcr0_before_done
1429
1430.xcr0_before_skip:
1431 push 3fh ; indicate that we need not.
1432.xcr0_before_done:
1433
1434 ;
1435 ; Save segment registers.
1436 ; Note! Trashes rdx & rcx, so we moved it here (amd64 case).
1437 ;
1438 MYPUSHSEGS xAX, ax
1439
1440%ifdef VMX_USE_CACHED_VMCS_ACCESSES
1441 mov ecx, [xBX + VMXVMCSCACHE.Write.cValidEntries]
1442 cmp ecx, 0
1443 je .no_cached_writes
1444 mov edx, ecx
1445 mov ecx, 0
1446 jmp .cached_write
1447
1448ALIGN(16)
1449.cached_write:
1450 mov eax, [xBX + VMXVMCSCACHE.Write.aField + xCX * 4]
1451 vmwrite xAX, [xBX + VMXVMCSCACHE.Write.aFieldVal + xCX * 8]
1452 inc xCX
1453 cmp xCX, xDX
1454 jl .cached_write
1455
1456 mov dword [xBX + VMXVMCSCACHE.Write.cValidEntries], 0
1457.no_cached_writes:
1458
1459 ; Save the pVmcsCache pointer.
1460 push xBX
1461%endif
1462
1463 ; Save the pCtx pointer.
1464 push xSI
1465
1466 ; Save host LDTR.
1467 xor eax, eax
1468 sldt ax
1469 push xAX
1470
1471%ifndef VMX_SKIP_TR
1472 ; The host TR limit is reset to 0x67; save & restore it manually.
1473 str eax
1474 push xAX
1475%endif
1476
1477%ifndef VMX_SKIP_GDTR
1478 ; VT-x only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!
1479 sub xSP, xCB * 2
1480 sgdt [xSP]
1481%endif
1482%ifndef VMX_SKIP_IDTR
1483 sub xSP, xCB * 2
1484 sidt [xSP]
1485%endif
1486
1487 ; Load CR2 if necessary (may be expensive as writing CR2 is a synchronizing instruction).
1488 mov xBX, [xSI + CPUMCTX.cr2]
1489 mov xDX, cr2
1490 cmp xBX, xDX
1491 je .skip_cr2_write32
1492 mov cr2, xBX
1493
1494.skip_cr2_write32:
1495 mov eax, VMX_VMCS_HOST_RSP
1496 vmwrite xAX, xSP
1497 ; Note: assumes success!
1498 ; Don't mess with ESP anymore!!!
1499
1500 ; Fight spectre and similar.
1501 INDIRECT_BRANCH_PREDICTION_AND_L1_CACHE_BARRIER xSI, CPUMCTX_WSF_IBPB_ENTRY, CPUMCTX_WSF_L1D_ENTRY, CPUMCTX_WSF_MDS_ENTRY
1502
1503 ; Load guest general purpose registers.
1504 mov eax, [xSI + CPUMCTX.eax]
1505 mov ebx, [xSI + CPUMCTX.ebx]
1506 mov ecx, [xSI + CPUMCTX.ecx]
1507 mov edx, [xSI + CPUMCTX.edx]
1508 mov ebp, [xSI + CPUMCTX.ebp]
1509
1510 ; Resume or start VM?
1511 cmp xDI, 0 ; fResume
1512
1513 ; Load guest edi & esi.
1514 mov edi, [xSI + CPUMCTX.edi]
1515 mov esi, [xSI + CPUMCTX.esi]
1516
1517 je .vmlaunch_launch
1518
1519 vmresume
1520 jc near .vmxstart_invalid_vmcs_ptr
1521 jz near .vmxstart_start_failed
1522 jmp .vmlaunch_done; ; Here if vmresume detected a failure.
1523
1524.vmlaunch_launch:
1525 vmlaunch
1526 jc near .vmxstart_invalid_vmcs_ptr
1527 jz near .vmxstart_start_failed
1528 jmp .vmlaunch_done; ; Here if vmlaunch detected a failure.
1529
1530ALIGNCODE(16) ;; @todo YASM BUG - this alignment is wrong on darwin, it's 1 byte off.
1531.vmlaunch_done:
1532 RESTORE_STATE_VM32
1533 mov eax, VINF_SUCCESS
1534
1535.vmstart_end:
1536 popf
1537 pop xBP
1538 ret
1539
1540.vmxstart_invalid_vmcs_ptr:
1541 RESTORE_STATE_VM32
1542 mov eax, VERR_VMX_INVALID_VMCS_PTR_TO_START_VM
1543 jmp .vmstart_end
1544
1545.vmxstart_start_failed:
1546 RESTORE_STATE_VM32
1547 mov eax, VERR_VMX_UNABLE_TO_START_VM
1548 jmp .vmstart_end
1549
1550ENDPROC VMXR0StartVM32
1551
1552
1553%ifdef RT_ARCH_AMD64
1554;; @def RESTORE_STATE_VM64
1555; Macro restoring essential host state and updating guest state
1556; for 64-bit host, 64-bit guest for VT-x.
1557;
1558%macro RESTORE_STATE_VM64 0
1559 ; Restore base and limit of the IDTR & GDTR
1560 %ifndef VMX_SKIP_IDTR
1561 lidt [xSP]
1562 add xSP, xCB * 2
1563 %endif
1564 %ifndef VMX_SKIP_GDTR
1565 lgdt [xSP]
1566 add xSP, xCB * 2
1567 %endif
1568
1569 push xDI
1570 %ifndef VMX_SKIP_TR
1571 mov xDI, [xSP + xCB * 3] ; pCtx (*3 to skip the saved xDI, TR, LDTR)
1572 %else
1573 mov xDI, [xSP + xCB * 2] ; pCtx (*2 to skip the saved xDI, LDTR)
1574 %endif
1575
1576 mov qword [xDI + CPUMCTX.eax], rax
1577 mov rax, SPECTRE_FILLER64
1578 mov qword [xDI + CPUMCTX.ebx], rbx
1579 mov rbx, rax
1580 mov qword [xDI + CPUMCTX.ecx], rcx
1581 mov rcx, rax
1582 mov qword [xDI + CPUMCTX.edx], rdx
1583 mov rdx, rax
1584 mov qword [xDI + CPUMCTX.esi], rsi
1585 mov rsi, rax
1586 mov qword [xDI + CPUMCTX.ebp], rbp
1587 mov rbp, rax
1588 mov qword [xDI + CPUMCTX.r8], r8
1589 mov r8, rax
1590 mov qword [xDI + CPUMCTX.r9], r9
1591 mov r9, rax
1592 mov qword [xDI + CPUMCTX.r10], r10
1593 mov r10, rax
1594 mov qword [xDI + CPUMCTX.r11], r11
1595 mov r11, rax
1596 mov qword [xDI + CPUMCTX.r12], r12
1597 mov r12, rax
1598 mov qword [xDI + CPUMCTX.r13], r13
1599 mov r13, rax
1600 mov qword [xDI + CPUMCTX.r14], r14
1601 mov r14, rax
1602 mov qword [xDI + CPUMCTX.r15], r15
1603 mov r15, rax
1604 mov rax, cr2
1605 mov qword [xDI + CPUMCTX.cr2], rax
1606
1607 pop xAX ; The guest rdi we pushed above
1608 mov qword [xDI + CPUMCTX.edi], rax
1609
1610 ; Fight spectre.
1611 INDIRECT_BRANCH_PREDICTION_BARRIER xDI, CPUMCTX_WSF_IBPB_EXIT
1612
1613 %ifndef VMX_SKIP_TR
1614 ; Restore TSS selector; must mark it as not busy before using ltr (!)
1615 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p).
1616 ; @todo get rid of sgdt
1617 pop xBX ; Saved TR
1618 sub xSP, xCB * 2
1619 sgdt [xSP]
1620 mov xAX, xBX
1621 and eax, X86_SEL_MASK_OFF_RPL ; Mask away TI and RPL bits leaving only the descriptor offset.
1622 add xAX, [xSP + 2] ; eax <- GDTR.address + descriptor offset.
1623 and dword [xAX + 4], ~RT_BIT(9) ; Clear the busy flag in TSS desc (bits 0-7=base, bit 9=busy bit).
1624 ltr bx
1625 add xSP, xCB * 2
1626 %endif
1627
1628 pop xAX ; Saved LDTR
1629 cmp eax, 0
1630 je %%skip_ldt_write64
1631 lldt ax
1632
1633%%skip_ldt_write64:
1634 pop xSI ; pCtx (needed in rsi by the macros below)
1635
1636 %ifdef VMX_USE_CACHED_VMCS_ACCESSES
1637 pop xDX ; Saved pVmcsCache
1638
1639 ; Note! If we get here as a result of invalid VMCS pointer, all the following
1640 ; vmread's will fail (only eflags.cf=1 will be set) but that shouldn't cause any
1641 ; trouble only just less efficient.
1642 mov ecx, [xDX + VMXVMCSCACHE.Read.cValidEntries]
1643 cmp ecx, 0 ; Can't happen
1644 je %%no_cached_read64
1645 jmp %%cached_read64
1646
1647ALIGN(16)
1648%%cached_read64:
1649 dec xCX
1650 mov eax, [xDX + VMXVMCSCACHE.Read.aField + xCX * 4]
1651 vmread [xDX + VMXVMCSCACHE.Read.aFieldVal + xCX * 8], xAX
1652 cmp xCX, 0
1653 jnz %%cached_read64
1654%%no_cached_read64:
1655 %endif
1656
1657 ; Restore segment registers.
1658 MYPOPSEGS xAX, ax
1659
1660 ; Restore the host XCR0 if necessary.
1661 pop xCX
1662 test ecx, ecx
1663 jnz %%xcr0_after_skip
1664 pop xAX
1665 pop xDX
1666 xsetbv ; ecx is already zero.
1667%%xcr0_after_skip:
1668
1669 ; Restore general purpose registers.
1670 MYPOPAD
1671%endmacro
1672
1673
1674;;
1675; Prepares for and executes VMLAUNCH/VMRESUME (64 bits guest mode)
1676;
1677; @returns VBox status code
1678; @param fResume msc:rcx, gcc:rdi Whether to use vmlauch/vmresume.
1679; @param pCtx msc:rdx, gcc:rsi Pointer to the guest-CPU context.
1680; @param pVmcsCache msc:r8, gcc:rdx Pointer to the VMCS cache.
1681; @param pVM msc:r9, gcc:rcx The cross context VM structure.
1682; @param pVCpu msc:[ebp+30], gcc:r8 The cross context virtual CPU structure of the calling EMT.
1683;
1684ALIGNCODE(16)
1685BEGINPROC VMXR0StartVM64
1686 push xBP
1687 mov xBP, xSP
1688
1689 pushf
1690 cli
1691
1692 ; Save all general purpose host registers.
1693 MYPUSHAD
1694
1695 ; First we have to save some final CPU context registers.
1696 lea r10, [.vmlaunch64_done wrt rip]
1697 mov rax, VMX_VMCS_HOST_RIP ; Return address (too difficult to continue after VMLAUNCH?).
1698 vmwrite rax, r10
1699 ; Note: assumes success!
1700
1701 ;
1702 ; Unify the input parameter registers.
1703 ;
1704%ifdef ASM_CALL64_GCC
1705 ; fResume already in rdi
1706 ; pCtx already in rsi
1707 mov rbx, rdx ; pVmcsCache
1708%else
1709 mov rdi, rcx ; fResume
1710 mov rsi, rdx ; pCtx
1711 mov rbx, r8 ; pVmcsCache
1712%endif
1713
1714 ;
1715 ; Save the host XCR0 and load the guest one if necessary.
1716 ; Note! Trashes rdx and rcx.
1717 ;
1718%ifdef ASM_CALL64_MSC
1719 mov rax, [xBP + 30h] ; pVCpu
1720%else
1721 mov rax, r8 ; pVCpu
1722%endif
1723 test byte [xAX + VMCPU.hm + HMCPU.fLoadSaveGuestXcr0], 1
1724 jz .xcr0_before_skip
1725
1726 xor ecx, ecx
1727 xgetbv ; Save the host one on the stack.
1728 push xDX
1729 push xAX
1730
1731 mov eax, [xSI + CPUMCTX.aXcr] ; Load the guest one.
1732 mov edx, [xSI + CPUMCTX.aXcr + 4]
1733 xor ecx, ecx ; paranoia
1734 xsetbv
1735
1736 push 0 ; Indicate that we must restore XCR0 (popped into ecx, thus 0).
1737 jmp .xcr0_before_done
1738
1739.xcr0_before_skip:
1740 push 3fh ; indicate that we need not.
1741.xcr0_before_done:
1742
1743 ;
1744 ; Save segment registers.
1745 ; Note! Trashes rdx & rcx, so we moved it here (amd64 case).
1746 ;
1747 MYPUSHSEGS xAX, ax
1748
1749%ifdef VMX_USE_CACHED_VMCS_ACCESSES
1750 mov ecx, [xBX + VMXVMCSCACHE.Write.cValidEntries]
1751 cmp ecx, 0
1752 je .no_cached_writes
1753 mov edx, ecx
1754 mov ecx, 0
1755 jmp .cached_write
1756
1757ALIGN(16)
1758.cached_write:
1759 mov eax, [xBX + VMXVMCSCACHE.Write.aField + xCX * 4]
1760 vmwrite xAX, [xBX + VMXVMCSCACHE.Write.aFieldVal + xCX * 8]
1761 inc xCX
1762 cmp xCX, xDX
1763 jl .cached_write
1764
1765 mov dword [xBX + VMXVMCSCACHE.Write.cValidEntries], 0
1766.no_cached_writes:
1767
1768 ; Save the pVmcsCache pointer.
1769 push xBX
1770%endif
1771
1772 ; Save the pCtx pointer.
1773 push xSI
1774
1775 ; Save host LDTR.
1776 xor eax, eax
1777 sldt ax
1778 push xAX
1779
1780%ifndef VMX_SKIP_TR
1781 ; The host TR limit is reset to 0x67; save & restore it manually.
1782 str eax
1783 push xAX
1784%endif
1785
1786%ifndef VMX_SKIP_GDTR
1787 ; VT-x only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!
1788 sub xSP, xCB * 2
1789 sgdt [xSP]
1790%endif
1791%ifndef VMX_SKIP_IDTR
1792 sub xSP, xCB * 2
1793 sidt [xSP]
1794%endif
1795
1796 ; Load CR2 if necessary (may be expensive as writing CR2 is a synchronizing instruction).
1797 mov rbx, qword [xSI + CPUMCTX.cr2]
1798 mov rdx, cr2
1799 cmp rbx, rdx
1800 je .skip_cr2_write
1801 mov cr2, rbx
1802
1803.skip_cr2_write:
1804 mov eax, VMX_VMCS_HOST_RSP
1805 vmwrite xAX, xSP
1806 ; Note: assumes success!
1807 ; Don't mess with ESP anymore!!!
1808
1809 ; Fight spectre and similar.
1810 INDIRECT_BRANCH_PREDICTION_AND_L1_CACHE_BARRIER xSI, CPUMCTX_WSF_IBPB_ENTRY, CPUMCTX_WSF_L1D_ENTRY, CPUMCTX_WSF_MDS_ENTRY
1811
1812 ; Load guest general purpose registers.
1813 mov rax, qword [xSI + CPUMCTX.eax]
1814 mov rbx, qword [xSI + CPUMCTX.ebx]
1815 mov rcx, qword [xSI + CPUMCTX.ecx]
1816 mov rdx, qword [xSI + CPUMCTX.edx]
1817 mov rbp, qword [xSI + CPUMCTX.ebp]
1818 mov r8, qword [xSI + CPUMCTX.r8]
1819 mov r9, qword [xSI + CPUMCTX.r9]
1820 mov r10, qword [xSI + CPUMCTX.r10]
1821 mov r11, qword [xSI + CPUMCTX.r11]
1822 mov r12, qword [xSI + CPUMCTX.r12]
1823 mov r13, qword [xSI + CPUMCTX.r13]
1824 mov r14, qword [xSI + CPUMCTX.r14]
1825 mov r15, qword [xSI + CPUMCTX.r15]
1826
1827 ; Resume or start VM?
1828 cmp xDI, 0 ; fResume
1829
1830 ; Load guest rdi & rsi.
1831 mov rdi, qword [xSI + CPUMCTX.edi]
1832 mov rsi, qword [xSI + CPUMCTX.esi]
1833
1834 je .vmlaunch64_launch
1835
1836 vmresume
1837 jc near .vmxstart64_invalid_vmcs_ptr
1838 jz near .vmxstart64_start_failed
1839 jmp .vmlaunch64_done; ; Here if vmresume detected a failure.
1840
1841.vmlaunch64_launch:
1842 vmlaunch
1843 jc near .vmxstart64_invalid_vmcs_ptr
1844 jz near .vmxstart64_start_failed
1845 jmp .vmlaunch64_done; ; Here if vmlaunch detected a failure.
1846
1847ALIGNCODE(16)
1848.vmlaunch64_done:
1849 RESTORE_STATE_VM64
1850 mov eax, VINF_SUCCESS
1851
1852.vmstart64_end:
1853 popf
1854 pop xBP
1855 ret
1856
1857.vmxstart64_invalid_vmcs_ptr:
1858 RESTORE_STATE_VM64
1859 mov eax, VERR_VMX_INVALID_VMCS_PTR_TO_START_VM
1860 jmp .vmstart64_end
1861
1862.vmxstart64_start_failed:
1863 RESTORE_STATE_VM64
1864 mov eax, VERR_VMX_UNABLE_TO_START_VM
1865 jmp .vmstart64_end
1866ENDPROC VMXR0StartVM64
1867%endif ; RT_ARCH_AMD64
1868
1869
1870;;
1871; Clears the MDS buffers using VERW.
1872ALIGNCODE(16)
1873BEGINPROC hmR0MdsClear
1874 sub xSP, xCB
1875 mov [xSP], ds
1876 verw [xSP]
1877 add xSP, xCB
1878 ret
1879ENDPROC hmR0MdsClear
1880
1881
1882;;
1883; Prepares for and executes VMRUN (32 bits guests)
1884;
1885; @returns VBox status code
1886; @param HCPhysVmcbHost msc:rcx,gcc:rdi Physical address of host VMCB.
1887; @param HCPhysVmcb msc:rdx,gcc:rsi Physical address of guest VMCB.
1888; @param pCtx msc:r8,gcc:rdx Pointer to the guest CPU-context.
1889; @param pVM msc:r9,gcc:rcx The cross context VM structure.
1890; @param pVCpu msc:[rsp+28],gcc:r8 The cross context virtual CPU structure of the calling EMT.
1891;
1892ALIGNCODE(16)
1893BEGINPROC SVMR0VMRun
1894%ifdef RT_ARCH_AMD64 ; fake a cdecl stack frame
1895 %ifdef ASM_CALL64_GCC
1896 push r8 ; pVCpu
1897 push rcx ; pVM
1898 push rdx ; pCtx
1899 push rsi ; HCPhysVmcb
1900 push rdi ; HCPhysVmcbHost
1901 %else
1902 mov rax, [rsp + 28h]
1903 push rax ; pVCpu
1904 push r9 ; pVM
1905 push r8 ; pCtx
1906 push rdx ; HCPhysVmcb
1907 push rcx ; HCPhysVmcbHost
1908 %endif
1909 push 0
1910%endif
1911 push xBP
1912 mov xBP, xSP
1913 pushf
1914
1915 ; Save all general purpose host registers.
1916 MYPUSHAD
1917
1918 ; Load pCtx into xSI.
1919 mov xSI, [xBP + xCB * 2 + RTHCPHYS_CB * 2] ; pCtx
1920
1921 ; Save the host XCR0 and load the guest one if necessary.
1922 mov xAX, [xBP + xCB * 2 + RTHCPHYS_CB * 2 + xCB * 2] ; pVCpu
1923 test byte [xAX + VMCPU.hm + HMCPU.fLoadSaveGuestXcr0], 1
1924 jz .xcr0_before_skip
1925
1926 xor ecx, ecx
1927 xgetbv ; Save the host XCR0 on the stack
1928 push xDX
1929 push xAX
1930
1931 mov xSI, [xBP + xCB * 2 + RTHCPHYS_CB * 2] ; pCtx
1932 mov eax, [xSI + CPUMCTX.aXcr] ; load the guest XCR0
1933 mov edx, [xSI + CPUMCTX.aXcr + 4]
1934 xor ecx, ecx ; paranoia
1935 xsetbv
1936
1937 push 0 ; indicate that we must restore XCR0 (popped into ecx, thus 0)
1938 jmp .xcr0_before_done
1939
1940.xcr0_before_skip:
1941 push 3fh ; indicate that we need not restore XCR0
1942.xcr0_before_done:
1943
1944 ; Save guest CPU-context pointer for simplifying saving of the GPRs afterwards.
1945 push xSI
1946
1947 ; Save host fs, gs, sysenter msr etc.
1948 mov xAX, [xBP + xCB * 2] ; HCPhysVmcbHost (64 bits physical address; x86: take low dword only)
1949 push xAX ; save for the vmload after vmrun
1950 vmsave
1951
1952 ; Fight spectre.
1953 INDIRECT_BRANCH_PREDICTION_BARRIER xSI, CPUMCTX_WSF_IBPB_ENTRY
1954
1955 ; Setup xAX for VMLOAD.
1956 mov xAX, [xBP + xCB * 2 + RTHCPHYS_CB] ; HCPhysVmcb (64 bits physical address; x86: take low dword only)
1957
1958 ; Load guest general purpose registers.
1959 ; eax is loaded from the VMCB by VMRUN.
1960 mov ebx, [xSI + CPUMCTX.ebx]
1961 mov ecx, [xSI + CPUMCTX.ecx]
1962 mov edx, [xSI + CPUMCTX.edx]
1963 mov edi, [xSI + CPUMCTX.edi]
1964 mov ebp, [xSI + CPUMCTX.ebp]
1965 mov esi, [xSI + CPUMCTX.esi]
1966
1967 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch.
1968 clgi
1969 sti
1970
1971 ; Load guest fs, gs, sysenter msr etc.
1972 vmload
1973
1974 ; Run the VM.
1975 vmrun
1976
1977 ; Save guest fs, gs, sysenter msr etc.
1978 vmsave
1979
1980 ; Load host fs, gs, sysenter msr etc.
1981 pop xAX ; load HCPhysVmcbHost (pushed above)
1982 vmload
1983
1984 ; Set the global interrupt flag again, but execute cli to make sure IF=0.
1985 cli
1986 stgi
1987
1988 ; Pop the context pointer (pushed above) and save the guest GPRs (sans RSP and RAX).
1989 pop xAX
1990
1991 mov [ss:xAX + CPUMCTX.ebx], ebx
1992 mov xBX, SPECTRE_FILLER
1993 mov [ss:xAX + CPUMCTX.ecx], ecx
1994 mov xCX, xBX
1995 mov [ss:xAX + CPUMCTX.edx], edx
1996 mov xDX, xBX
1997 mov [ss:xAX + CPUMCTX.esi], esi
1998 mov xSI, xBX
1999 mov [ss:xAX + CPUMCTX.edi], edi
2000 mov xDI, xBX
2001 mov [ss:xAX + CPUMCTX.ebp], ebp
2002 mov xBP, xBX
2003
2004 ; Fight spectre. Note! Trashes xAX!
2005 INDIRECT_BRANCH_PREDICTION_BARRIER ss:xAX, CPUMCTX_WSF_IBPB_EXIT
2006
2007 ; Restore the host xcr0 if necessary.
2008 pop xCX
2009 test ecx, ecx
2010 jnz .xcr0_after_skip
2011 pop xAX
2012 pop xDX
2013 xsetbv ; ecx is already zero
2014.xcr0_after_skip:
2015
2016 ; Restore host general purpose registers.
2017 MYPOPAD
2018
2019 mov eax, VINF_SUCCESS
2020
2021 popf
2022 pop xBP
2023%ifdef RT_ARCH_AMD64
2024 add xSP, 6*xCB
2025%endif
2026 ret
2027ENDPROC SVMR0VMRun
2028
2029
2030%ifdef RT_ARCH_AMD64
2031;;
2032; Prepares for and executes VMRUN (64 bits guests)
2033;
2034; @returns VBox status code
2035; @param HCPhysVmcbHost msc:rcx,gcc:rdi Physical address of host VMCB.
2036; @param HCPhysVmcb msc:rdx,gcc:rsi Physical address of guest VMCB.
2037; @param pCtx msc:r8,gcc:rdx Pointer to the guest-CPU context.
2038; @param pVM msc:r9,gcc:rcx The cross context VM structure.
2039; @param pVCpu msc:[rsp+28],gcc:r8 The cross context virtual CPU structure of the calling EMT.
2040;
2041ALIGNCODE(16)
2042BEGINPROC SVMR0VMRun64
2043 ; Fake a cdecl stack frame
2044 %ifdef ASM_CALL64_GCC
2045 push r8 ;pVCpu
2046 push rcx ;pVM
2047 push rdx ;pCtx
2048 push rsi ;HCPhysVmcb
2049 push rdi ;HCPhysVmcbHost
2050 %else
2051 mov rax, [rsp + 28h]
2052 push rax ; rbp + 30h pVCpu
2053 push r9 ; rbp + 28h pVM
2054 push r8 ; rbp + 20h pCtx
2055 push rdx ; rbp + 18h HCPhysVmcb
2056 push rcx ; rbp + 10h HCPhysVmcbHost
2057 %endif
2058 push 0 ; rbp + 08h "fake ret addr"
2059 push rbp ; rbp + 00h
2060 mov rbp, rsp
2061 pushf
2062
2063 ; Manual save and restore:
2064 ; - General purpose registers except RIP, RSP, RAX
2065 ;
2066 ; Trashed:
2067 ; - CR2 (we don't care)
2068 ; - LDTR (reset to 0)
2069 ; - DRx (presumably not changed at all)
2070 ; - DR7 (reset to 0x400)
2071
2072 ; Save all general purpose host registers.
2073 MYPUSHAD
2074
2075 ; Load pCtx into xSI.
2076 mov xSI, [rbp + xCB * 2 + RTHCPHYS_CB * 2]
2077
2078 ; Save the host XCR0 and load the guest one if necessary.
2079 mov rax, [xBP + 30h] ; pVCpu
2080 test byte [xAX + VMCPU.hm + HMCPU.fLoadSaveGuestXcr0], 1
2081 jz .xcr0_before_skip
2082
2083 xor ecx, ecx
2084 xgetbv ; save the host XCR0 on the stack.
2085 push xDX
2086 push xAX
2087
2088 mov xSI, [xBP + xCB * 2 + RTHCPHYS_CB * 2] ; pCtx
2089 mov eax, [xSI + CPUMCTX.aXcr] ; load the guest XCR0
2090 mov edx, [xSI + CPUMCTX.aXcr + 4]
2091 xor ecx, ecx ; paranoia
2092 xsetbv
2093
2094 push 0 ; indicate that we must restore XCR0 (popped into ecx, thus 0)
2095 jmp .xcr0_before_done
2096
2097.xcr0_before_skip:
2098 push 3fh ; indicate that we need not restore XCR0
2099.xcr0_before_done:
2100
2101 ; Save guest CPU-context pointer for simplifying saving of the GPRs afterwards.
2102 push rsi
2103
2104 ; Save host fs, gs, sysenter msr etc.
2105 mov rax, [rbp + xCB * 2] ; HCPhysVmcbHost (64 bits physical address; x86: take low dword only)
2106 push rax ; save for the vmload after vmrun
2107 vmsave
2108
2109 ; Fight spectre.
2110 INDIRECT_BRANCH_PREDICTION_BARRIER xSI, CPUMCTX_WSF_IBPB_ENTRY
2111
2112 ; Setup rax for VMLOAD.
2113 mov rax, [rbp + xCB * 2 + RTHCPHYS_CB] ; HCPhysVmcb (64 bits physical address; take low dword only)
2114
2115 ; Load guest general purpose registers (rax is loaded from the VMCB by VMRUN).
2116 mov rbx, qword [xSI + CPUMCTX.ebx]
2117 mov rcx, qword [xSI + CPUMCTX.ecx]
2118 mov rdx, qword [xSI + CPUMCTX.edx]
2119 mov rdi, qword [xSI + CPUMCTX.edi]
2120 mov rbp, qword [xSI + CPUMCTX.ebp]
2121 mov r8, qword [xSI + CPUMCTX.r8]
2122 mov r9, qword [xSI + CPUMCTX.r9]
2123 mov r10, qword [xSI + CPUMCTX.r10]
2124 mov r11, qword [xSI + CPUMCTX.r11]
2125 mov r12, qword [xSI + CPUMCTX.r12]
2126 mov r13, qword [xSI + CPUMCTX.r13]
2127 mov r14, qword [xSI + CPUMCTX.r14]
2128 mov r15, qword [xSI + CPUMCTX.r15]
2129 mov rsi, qword [xSI + CPUMCTX.esi]
2130
2131 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch.
2132 clgi
2133 sti
2134
2135 ; Load guest FS, GS, Sysenter MSRs etc.
2136 vmload
2137
2138 ; Run the VM.
2139 vmrun
2140
2141 ; Save guest fs, gs, sysenter msr etc.
2142 vmsave
2143
2144 ; Load host fs, gs, sysenter msr etc.
2145 pop rax ; load HCPhysVmcbHost (pushed above)
2146 vmload
2147
2148 ; Set the global interrupt flag again, but execute cli to make sure IF=0.
2149 cli
2150 stgi
2151
2152 ; Pop the context pointer (pushed above) and save the guest GPRs (sans RSP and RAX).
2153 pop rax
2154
2155 mov qword [rax + CPUMCTX.ebx], rbx
2156 mov rbx, SPECTRE_FILLER64
2157 mov qword [rax + CPUMCTX.ecx], rcx
2158 mov rcx, rbx
2159 mov qword [rax + CPUMCTX.edx], rdx
2160 mov rdx, rbx
2161 mov qword [rax + CPUMCTX.esi], rsi
2162 mov rsi, rbx
2163 mov qword [rax + CPUMCTX.edi], rdi
2164 mov rdi, rbx
2165 mov qword [rax + CPUMCTX.ebp], rbp
2166 mov rbp, rbx
2167 mov qword [rax + CPUMCTX.r8], r8
2168 mov r8, rbx
2169 mov qword [rax + CPUMCTX.r9], r9
2170 mov r9, rbx
2171 mov qword [rax + CPUMCTX.r10], r10
2172 mov r10, rbx
2173 mov qword [rax + CPUMCTX.r11], r11
2174 mov r11, rbx
2175 mov qword [rax + CPUMCTX.r12], r12
2176 mov r12, rbx
2177 mov qword [rax + CPUMCTX.r13], r13
2178 mov r13, rbx
2179 mov qword [rax + CPUMCTX.r14], r14
2180 mov r14, rbx
2181 mov qword [rax + CPUMCTX.r15], r15
2182 mov r15, rbx
2183
2184 ; Fight spectre. Note! Trashes rax!
2185 INDIRECT_BRANCH_PREDICTION_BARRIER rax, CPUMCTX_WSF_IBPB_EXIT
2186
2187 ; Restore the host xcr0 if necessary.
2188 pop xCX
2189 test ecx, ecx
2190 jnz .xcr0_after_skip
2191 pop xAX
2192 pop xDX
2193 xsetbv ; ecx is already zero
2194.xcr0_after_skip:
2195
2196 ; Restore host general purpose registers.
2197 MYPOPAD
2198
2199 mov eax, VINF_SUCCESS
2200
2201 popf
2202 pop rbp
2203 add rsp, 6 * xCB
2204 ret
2205ENDPROC SVMR0VMRun64
2206%endif ; RT_ARCH_AMD64
2207
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette