VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HMR0A.asm@ 80540

Last change on this file since 80540 was 80150, checked in by vboxsync, 5 years ago

VMM: Kicking out 32-bit host support - VMX [drop VMCSCACHE]. bugref:9511

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 62.9 KB
Line 
1; $Id: HMR0A.asm 80150 2019-08-06 07:44:20Z vboxsync $
2;; @file
3; HM - Ring-0 VMX, SVM world-switch and helper routines
4;
5
6;
7; Copyright (C) 2006-2019 Oracle Corporation
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.virtualbox.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License (GPL) as published by the Free Software
13; Foundation, in version 2 as it comes in the "COPYING" file of the
14; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17
18;*********************************************************************************************************************************
19;* Header Files *
20;*********************************************************************************************************************************
21%include "VBox/asmdefs.mac"
22%include "VBox/err.mac"
23%include "VBox/vmm/hm_vmx.mac"
24%include "VBox/vmm/cpum.mac"
25%include "VBox/vmm/vm.mac"
26%include "iprt/x86.mac"
27%include "HMInternal.mac"
28
29%ifdef RT_OS_OS2 ;; @todo fix OMF support in yasm and kick nasm out completely.
30 %macro vmwrite 2,
31 int3
32 %endmacro
33 %define vmlaunch int3
34 %define vmresume int3
35 %define vmsave int3
36 %define vmload int3
37 %define vmrun int3
38 %define clgi int3
39 %define stgi int3
40 %macro invlpga 2,
41 int3
42 %endmacro
43%endif
44
45;*********************************************************************************************************************************
46;* Defined Constants And Macros *
47;*********************************************************************************************************************************
48;; The offset of the XMM registers in X86FXSTATE.
49; Use define because I'm too lazy to convert the struct.
50%define XMM_OFF_IN_X86FXSTATE 160
51
52;; Spectre filler for 32-bit mode.
53; Some user space address that points to a 4MB page boundrary in hope that it
54; will somehow make it less useful.
55%define SPECTRE_FILLER32 0x227fffff
56;; Spectre filler for 64-bit mode.
57; Choosen to be an invalid address (also with 5 level paging).
58%define SPECTRE_FILLER64 0x02204204207fffff
59;; Spectre filler for the current CPU mode.
60%ifdef RT_ARCH_AMD64
61 %define SPECTRE_FILLER SPECTRE_FILLER64
62%else
63 %define SPECTRE_FILLER SPECTRE_FILLER32
64%endif
65
66;;
67; Determine skipping restoring of GDTR, IDTR, TR across VMX non-root operation
68;
69%ifdef RT_ARCH_AMD64
70 %define VMX_SKIP_GDTR
71 %define VMX_SKIP_TR
72 %define VBOX_SKIP_RESTORE_SEG
73 %ifdef RT_OS_DARWIN
74 ; Load the NULL selector into DS, ES, FS and GS on 64-bit darwin so we don't
75 ; risk loading a stale LDT value or something invalid.
76 %define HM_64_BIT_USE_NULL_SEL
77 ; Darwin (Mavericks) uses IDTR limit to store the CPU Id so we need to restore it always.
78 ; See @bugref{6875}.
79 %else
80 %define VMX_SKIP_IDTR
81 %endif
82%endif
83
84;; @def MYPUSHAD
85; Macro generating an equivalent to pushad
86
87;; @def MYPOPAD
88; Macro generating an equivalent to popad
89
90;; @def MYPUSHSEGS
91; Macro saving all segment registers on the stack.
92; @param 1 full width register name
93; @param 2 16-bit register name for \a 1.
94
95;; @def MYPOPSEGS
96; Macro restoring all segment registers on the stack
97; @param 1 full width register name
98; @param 2 16-bit register name for \a 1.
99
100%ifdef ASM_CALL64_GCC
101 %macro MYPUSHAD64 0
102 push r15
103 push r14
104 push r13
105 push r12
106 push rbx
107 %endmacro
108 %macro MYPOPAD64 0
109 pop rbx
110 pop r12
111 pop r13
112 pop r14
113 pop r15
114 %endmacro
115
116%else ; ASM_CALL64_MSC
117 %macro MYPUSHAD64 0
118 push r15
119 push r14
120 push r13
121 push r12
122 push rbx
123 push rsi
124 push rdi
125 %endmacro
126 %macro MYPOPAD64 0
127 pop rdi
128 pop rsi
129 pop rbx
130 pop r12
131 pop r13
132 pop r14
133 pop r15
134 %endmacro
135%endif
136
137%ifdef VBOX_SKIP_RESTORE_SEG
138 %macro MYPUSHSEGS64 2
139 %endmacro
140
141 %macro MYPOPSEGS64 2
142 %endmacro
143%else ; !VBOX_SKIP_RESTORE_SEG
144 ; trashes, rax, rdx & rcx
145 %macro MYPUSHSEGS64 2
146 %ifndef HM_64_BIT_USE_NULL_SEL
147 mov %2, es
148 push %1
149 mov %2, ds
150 push %1
151 %endif
152
153 ; Special case for FS; Windows and Linux either don't use it or restore it when leaving kernel mode, Solaris OTOH doesn't and we must save it.
154 mov ecx, MSR_K8_FS_BASE
155 rdmsr
156 push rdx
157 push rax
158 %ifndef HM_64_BIT_USE_NULL_SEL
159 push fs
160 %endif
161
162 ; Special case for GS; OSes typically use swapgs to reset the hidden base register for GS on entry into the kernel. The same happens on exit
163 mov ecx, MSR_K8_GS_BASE
164 rdmsr
165 push rdx
166 push rax
167 %ifndef HM_64_BIT_USE_NULL_SEL
168 push gs
169 %endif
170 %endmacro
171
172 ; trashes, rax, rdx & rcx
173 %macro MYPOPSEGS64 2
174 ; Note: do not step through this code with a debugger!
175 %ifndef HM_64_BIT_USE_NULL_SEL
176 xor eax, eax
177 mov ds, ax
178 mov es, ax
179 mov fs, ax
180 mov gs, ax
181 %endif
182
183 %ifndef HM_64_BIT_USE_NULL_SEL
184 pop gs
185 %endif
186 pop rax
187 pop rdx
188 mov ecx, MSR_K8_GS_BASE
189 wrmsr
190
191 %ifndef HM_64_BIT_USE_NULL_SEL
192 pop fs
193 %endif
194 pop rax
195 pop rdx
196 mov ecx, MSR_K8_FS_BASE
197 wrmsr
198 ; Now it's safe to step again
199
200 %ifndef HM_64_BIT_USE_NULL_SEL
201 pop %1
202 mov ds, %2
203 pop %1
204 mov es, %2
205 %endif
206 %endmacro
207%endif ; VBOX_SKIP_RESTORE_SEG
208
209%macro MYPUSHAD32 0
210 pushad
211%endmacro
212%macro MYPOPAD32 0
213 popad
214%endmacro
215
216%macro MYPUSHSEGS32 2
217 push ds
218 push es
219 push fs
220 push gs
221%endmacro
222%macro MYPOPSEGS32 2
223 pop gs
224 pop fs
225 pop es
226 pop ds
227%endmacro
228
229%ifdef RT_ARCH_AMD64
230 %define MYPUSHAD MYPUSHAD64
231 %define MYPOPAD MYPOPAD64
232 %define MYPUSHSEGS MYPUSHSEGS64
233 %define MYPOPSEGS MYPOPSEGS64
234%else
235 %define MYPUSHAD MYPUSHAD32
236 %define MYPOPAD MYPOPAD32
237 %define MYPUSHSEGS MYPUSHSEGS32
238 %define MYPOPSEGS MYPOPSEGS32
239%endif
240
241;;
242; Creates an indirect branch prediction barrier on CPUs that need and supports that.
243; @clobbers eax, edx, ecx
244; @param 1 How to address CPUMCTX.
245; @param 2 Which flag to test for (CPUMCTX_WSF_IBPB_ENTRY or CPUMCTX_WSF_IBPB_EXIT)
246%macro INDIRECT_BRANCH_PREDICTION_BARRIER 2
247 test byte [%1 + CPUMCTX.fWorldSwitcher], %2
248 jz %%no_indirect_branch_barrier
249 mov ecx, MSR_IA32_PRED_CMD
250 mov eax, MSR_IA32_PRED_CMD_F_IBPB
251 xor edx, edx
252 wrmsr
253%%no_indirect_branch_barrier:
254%endmacro
255
256;;
257; Creates an indirect branch prediction and L1D barrier on CPUs that need and supports that.
258; @clobbers eax, edx, ecx
259; @param 1 How to address CPUMCTX.
260; @param 2 Which IBPB flag to test for (CPUMCTX_WSF_IBPB_ENTRY or CPUMCTX_WSF_IBPB_EXIT)
261; @param 3 Which FLUSH flag to test for (CPUMCTX_WSF_L1D_ENTRY)
262; @param 4 Which MDS flag to test for (CPUMCTX_WSF_MDS_ENTRY)
263%macro INDIRECT_BRANCH_PREDICTION_AND_L1_CACHE_BARRIER 4
264 ; Only one test+jmp when disabled CPUs.
265 test byte [%1 + CPUMCTX.fWorldSwitcher], (%2 | %3 | %4)
266 jz %%no_barrier_needed
267
268 ; The eax:edx value is the same for both.
269 AssertCompile(MSR_IA32_PRED_CMD_F_IBPB == MSR_IA32_FLUSH_CMD_F_L1D)
270 mov eax, MSR_IA32_PRED_CMD_F_IBPB
271 xor edx, edx
272
273 ; Indirect branch barrier.
274 test byte [%1 + CPUMCTX.fWorldSwitcher], %2
275 jz %%no_indirect_branch_barrier
276 mov ecx, MSR_IA32_PRED_CMD
277 wrmsr
278%%no_indirect_branch_barrier:
279
280 ; Level 1 data cache flush.
281 test byte [%1 + CPUMCTX.fWorldSwitcher], %3
282 jz %%no_cache_flush_barrier
283 mov ecx, MSR_IA32_FLUSH_CMD
284 wrmsr
285 jmp %%no_mds_buffer_flushing ; MDS flushing is included in L1D_FLUSH.
286%%no_cache_flush_barrier:
287
288 ; MDS buffer flushing.
289 test byte [%1 + CPUMCTX.fWorldSwitcher], %4
290 jz %%no_mds_buffer_flushing
291 sub xSP, xSP
292 mov [xSP], ds
293 verw [xSP]
294 add xSP, xSP
295%%no_mds_buffer_flushing:
296
297%%no_barrier_needed:
298%endmacro
299
300
301;*********************************************************************************************************************************
302;* External Symbols *
303;*********************************************************************************************************************************
304%ifdef VBOX_WITH_KERNEL_USING_XMM
305extern NAME(CPUMIsGuestFPUStateActive)
306%endif
307
308
309BEGINCODE
310
311
312;/**
313; * Restores host-state fields.
314; *
315; * @returns VBox status code
316; * @param f32RestoreHost x86: [ebp + 08h] msc: ecx gcc: edi RestoreHost flags.
317; * @param pRestoreHost x86: [ebp + 0ch] msc: rdx gcc: rsi Pointer to the RestoreHost struct.
318; */
319ALIGNCODE(16)
320BEGINPROC VMXRestoreHostState
321%ifdef RT_ARCH_AMD64
322 %ifndef ASM_CALL64_GCC
323 ; Use GCC's input registers since we'll be needing both rcx and rdx further
324 ; down with the wrmsr instruction. Use the R10 and R11 register for saving
325 ; RDI and RSI since MSC preserve the two latter registers.
326 mov r10, rdi
327 mov r11, rsi
328 mov rdi, rcx
329 mov rsi, rdx
330 %endif
331
332 test edi, VMX_RESTORE_HOST_GDTR
333 jz .test_idtr
334 lgdt [rsi + VMXRESTOREHOST.HostGdtr]
335
336.test_idtr:
337 test edi, VMX_RESTORE_HOST_IDTR
338 jz .test_ds
339 lidt [rsi + VMXRESTOREHOST.HostIdtr]
340
341.test_ds:
342 test edi, VMX_RESTORE_HOST_SEL_DS
343 jz .test_es
344 mov ax, [rsi + VMXRESTOREHOST.uHostSelDS]
345 mov ds, eax
346
347.test_es:
348 test edi, VMX_RESTORE_HOST_SEL_ES
349 jz .test_tr
350 mov ax, [rsi + VMXRESTOREHOST.uHostSelES]
351 mov es, eax
352
353.test_tr:
354 test edi, VMX_RESTORE_HOST_SEL_TR
355 jz .test_fs
356 ; When restoring the TR, we must first clear the busy flag or we'll end up faulting.
357 mov dx, [rsi + VMXRESTOREHOST.uHostSelTR]
358 mov ax, dx
359 and eax, X86_SEL_MASK_OFF_RPL ; Mask away TI and RPL bits leaving only the descriptor offset.
360 test edi, VMX_RESTORE_HOST_GDT_READ_ONLY | VMX_RESTORE_HOST_GDT_NEED_WRITABLE
361 jnz .gdt_readonly
362 add rax, qword [rsi + VMXRESTOREHOST.HostGdtr + 2] ; xAX <- descriptor offset + GDTR.pGdt.
363 and dword [rax + 4], ~RT_BIT(9) ; Clear the busy flag in TSS desc (bits 0-7=base, bit 9=busy bit).
364 ltr dx
365 jmp short .test_fs
366.gdt_readonly:
367 test edi, VMX_RESTORE_HOST_GDT_NEED_WRITABLE
368 jnz .gdt_readonly_need_writable
369 mov rcx, cr0
370 mov r9, rcx
371 add rax, qword [rsi + VMXRESTOREHOST.HostGdtr + 2] ; xAX <- descriptor offset + GDTR.pGdt.
372 and rcx, ~X86_CR0_WP
373 mov cr0, rcx
374 and dword [rax + 4], ~RT_BIT(9) ; Clear the busy flag in TSS desc (bits 0-7=base, bit 9=busy bit).
375 ltr dx
376 mov cr0, r9
377 jmp short .test_fs
378.gdt_readonly_need_writable:
379 add rax, qword [rsi + VMXRESTOREHOST.HostGdtrRw + 2] ; xAX <- descriptor offset + GDTR.pGdtRw.
380 and dword [rax + 4], ~RT_BIT(9) ; Clear the busy flag in TSS desc (bits 0-7=base, bit 9=busy bit).
381 lgdt [rsi + VMXRESTOREHOST.HostGdtrRw]
382 ltr dx
383 lgdt [rsi + VMXRESTOREHOST.HostGdtr] ; Load the original GDT
384
385.test_fs:
386 ;
387 ; When restoring the selector values for FS and GS, we'll temporarily trash
388 ; the base address (at least the high 32-bit bits, but quite possibly the
389 ; whole base address), the wrmsr will restore it correctly. (VT-x actually
390 ; restores the base correctly when leaving guest mode, but not the selector
391 ; value, so there is little problem with interrupts being enabled prior to
392 ; this restore job.)
393 ; We'll disable ints once for both FS and GS as that's probably faster.
394 ;
395 test edi, VMX_RESTORE_HOST_SEL_FS | VMX_RESTORE_HOST_SEL_GS
396 jz .restore_success
397 pushfq
398 cli ; (see above)
399
400 test edi, VMX_RESTORE_HOST_SEL_FS
401 jz .test_gs
402 mov ax, word [rsi + VMXRESTOREHOST.uHostSelFS]
403 mov fs, eax
404 mov eax, dword [rsi + VMXRESTOREHOST.uHostFSBase] ; uHostFSBase - Lo
405 mov edx, dword [rsi + VMXRESTOREHOST.uHostFSBase + 4h] ; uHostFSBase - Hi
406 mov ecx, MSR_K8_FS_BASE
407 wrmsr
408
409.test_gs:
410 test edi, VMX_RESTORE_HOST_SEL_GS
411 jz .restore_flags
412 mov ax, word [rsi + VMXRESTOREHOST.uHostSelGS]
413 mov gs, eax
414 mov eax, dword [rsi + VMXRESTOREHOST.uHostGSBase] ; uHostGSBase - Lo
415 mov edx, dword [rsi + VMXRESTOREHOST.uHostGSBase + 4h] ; uHostGSBase - Hi
416 mov ecx, MSR_K8_GS_BASE
417 wrmsr
418
419.restore_flags:
420 popfq
421
422.restore_success:
423 mov eax, VINF_SUCCESS
424 %ifndef ASM_CALL64_GCC
425 ; Restore RDI and RSI on MSC.
426 mov rdi, r10
427 mov rsi, r11
428 %endif
429%else ; RT_ARCH_X86
430 mov eax, VERR_NOT_IMPLEMENTED
431%endif
432 ret
433ENDPROC VMXRestoreHostState
434
435
436;/**
437; * Dispatches an NMI to the host.
438; */
439ALIGNCODE(16)
440BEGINPROC VMXDispatchHostNmi
441 int 2 ; NMI is always vector 2. The IDT[2] IRQ handler cannot be anything else. See Intel spec. 6.3.1 "External Interrupts".
442 ret
443ENDPROC VMXDispatchHostNmi
444
445
446;/**
447; * Executes VMWRITE, 64-bit value.
448; *
449; * @returns VBox status code.
450; * @param idxField x86: [ebp + 08h] msc: rcx gcc: rdi VMCS index.
451; * @param u64Data x86: [ebp + 0ch] msc: rdx gcc: rsi VM field value.
452; */
453ALIGNCODE(16)
454BEGINPROC VMXWriteVmcs64
455%ifdef RT_ARCH_AMD64
456 %ifdef ASM_CALL64_GCC
457 and edi, 0ffffffffh
458 xor rax, rax
459 vmwrite rdi, rsi
460 %else
461 and ecx, 0ffffffffh
462 xor rax, rax
463 vmwrite rcx, rdx
464 %endif
465%else ; RT_ARCH_X86
466 mov ecx, [esp + 4] ; idxField
467 lea edx, [esp + 8] ; &u64Data
468 vmwrite ecx, [edx] ; low dword
469 jz .done
470 jc .done
471 inc ecx
472 xor eax, eax
473 vmwrite ecx, [edx + 4] ; high dword
474.done:
475%endif ; RT_ARCH_X86
476 jnc .valid_vmcs
477 mov eax, VERR_VMX_INVALID_VMCS_PTR
478 ret
479.valid_vmcs:
480 jnz .the_end
481 mov eax, VERR_VMX_INVALID_VMCS_FIELD
482.the_end:
483 ret
484ENDPROC VMXWriteVmcs64
485
486
487;/**
488; * Executes VMREAD, 64-bit value.
489; *
490; * @returns VBox status code.
491; * @param idxField VMCS index.
492; * @param pData Where to store VM field value.
493; */
494;DECLASM(int) VMXReadVmcs64(uint32_t idxField, uint64_t *pData);
495ALIGNCODE(16)
496BEGINPROC VMXReadVmcs64
497%ifdef RT_ARCH_AMD64
498 %ifdef ASM_CALL64_GCC
499 and edi, 0ffffffffh
500 xor rax, rax
501 vmread [rsi], rdi
502 %else
503 and ecx, 0ffffffffh
504 xor rax, rax
505 vmread [rdx], rcx
506 %endif
507%else ; RT_ARCH_X86
508 mov ecx, [esp + 4] ; idxField
509 mov edx, [esp + 8] ; pData
510 vmread [edx], ecx ; low dword
511 jz .done
512 jc .done
513 inc ecx
514 xor eax, eax
515 vmread [edx + 4], ecx ; high dword
516.done:
517%endif ; RT_ARCH_X86
518 jnc .valid_vmcs
519 mov eax, VERR_VMX_INVALID_VMCS_PTR
520 ret
521.valid_vmcs:
522 jnz .the_end
523 mov eax, VERR_VMX_INVALID_VMCS_FIELD
524.the_end:
525 ret
526ENDPROC VMXReadVmcs64
527
528
529;/**
530; * Executes VMREAD, 32-bit value.
531; *
532; * @returns VBox status code.
533; * @param idxField VMCS index.
534; * @param pu32Data Where to store VM field value.
535; */
536;DECLASM(int) VMXReadVmcs32(uint32_t idxField, uint32_t *pu32Data);
537ALIGNCODE(16)
538BEGINPROC VMXReadVmcs32
539%ifdef RT_ARCH_AMD64
540 %ifdef ASM_CALL64_GCC
541 and edi, 0ffffffffh
542 xor rax, rax
543 vmread r10, rdi
544 mov [rsi], r10d
545 %else
546 and ecx, 0ffffffffh
547 xor rax, rax
548 vmread r10, rcx
549 mov [rdx], r10d
550 %endif
551%else ; RT_ARCH_X86
552 mov ecx, [esp + 4] ; idxField
553 mov edx, [esp + 8] ; pu32Data
554 xor eax, eax
555 vmread [edx], ecx
556%endif ; RT_ARCH_X86
557 jnc .valid_vmcs
558 mov eax, VERR_VMX_INVALID_VMCS_PTR
559 ret
560.valid_vmcs:
561 jnz .the_end
562 mov eax, VERR_VMX_INVALID_VMCS_FIELD
563.the_end:
564 ret
565ENDPROC VMXReadVmcs32
566
567
568;/**
569; * Executes VMWRITE, 32-bit value.
570; *
571; * @returns VBox status code.
572; * @param idxField VMCS index.
573; * @param u32Data Where to store VM field value.
574; */
575;DECLASM(int) VMXWriteVmcs32(uint32_t idxField, uint32_t u32Data);
576ALIGNCODE(16)
577BEGINPROC VMXWriteVmcs32
578%ifdef RT_ARCH_AMD64
579 %ifdef ASM_CALL64_GCC
580 and edi, 0ffffffffh
581 and esi, 0ffffffffh
582 xor rax, rax
583 vmwrite rdi, rsi
584 %else
585 and ecx, 0ffffffffh
586 and edx, 0ffffffffh
587 xor rax, rax
588 vmwrite rcx, rdx
589 %endif
590%else ; RT_ARCH_X86
591 mov ecx, [esp + 4] ; idxField
592 mov edx, [esp + 8] ; u32Data
593 xor eax, eax
594 vmwrite ecx, edx
595%endif ; RT_ARCH_X86
596 jnc .valid_vmcs
597 mov eax, VERR_VMX_INVALID_VMCS_PTR
598 ret
599.valid_vmcs:
600 jnz .the_end
601 mov eax, VERR_VMX_INVALID_VMCS_FIELD
602.the_end:
603 ret
604ENDPROC VMXWriteVmcs32
605
606
607;/**
608; * Executes VMXON.
609; *
610; * @returns VBox status code.
611; * @param HCPhysVMXOn Physical address of VMXON structure.
612; */
613;DECLASM(int) VMXEnable(RTHCPHYS HCPhysVMXOn);
614BEGINPROC VMXEnable
615%ifdef RT_ARCH_AMD64
616 xor rax, rax
617 %ifdef ASM_CALL64_GCC
618 push rdi
619 %else
620 push rcx
621 %endif
622 vmxon [rsp]
623%else ; RT_ARCH_X86
624 xor eax, eax
625 vmxon [esp + 4]
626%endif ; RT_ARCH_X86
627 jnc .good
628 mov eax, VERR_VMX_INVALID_VMXON_PTR
629 jmp .the_end
630
631.good:
632 jnz .the_end
633 mov eax, VERR_VMX_VMXON_FAILED
634
635.the_end:
636%ifdef RT_ARCH_AMD64
637 add rsp, 8
638%endif
639 ret
640ENDPROC VMXEnable
641
642
643;/**
644; * Executes VMXOFF.
645; */
646;DECLASM(void) VMXDisable(void);
647BEGINPROC VMXDisable
648 vmxoff
649.the_end:
650 ret
651ENDPROC VMXDisable
652
653
654;/**
655; * Executes VMCLEAR.
656; *
657; * @returns VBox status code.
658; * @param HCPhysVmcs Physical address of VM control structure.
659; */
660;DECLASM(int) VMXClearVmcs(RTHCPHYS HCPhysVmcs);
661ALIGNCODE(16)
662BEGINPROC VMXClearVmcs
663%ifdef RT_ARCH_AMD64
664 xor rax, rax
665 %ifdef ASM_CALL64_GCC
666 push rdi
667 %else
668 push rcx
669 %endif
670 vmclear [rsp]
671%else ; RT_ARCH_X86
672 xor eax, eax
673 vmclear [esp + 4]
674%endif ; RT_ARCH_X86
675 jnc .the_end
676 mov eax, VERR_VMX_INVALID_VMCS_PTR
677.the_end:
678%ifdef RT_ARCH_AMD64
679 add rsp, 8
680%endif
681 ret
682ENDPROC VMXClearVmcs
683
684
685;/**
686; * Executes VMPTRLD.
687; *
688; * @returns VBox status code.
689; * @param HCPhysVmcs Physical address of VMCS structure.
690; */
691;DECLASM(int) VMXLoadVmcs(RTHCPHYS HCPhysVmcs);
692ALIGNCODE(16)
693BEGINPROC VMXLoadVmcs
694%ifdef RT_ARCH_AMD64
695 xor rax, rax
696 %ifdef ASM_CALL64_GCC
697 push rdi
698 %else
699 push rcx
700 %endif
701 vmptrld [rsp]
702%else
703 xor eax, eax
704 vmptrld [esp + 4]
705%endif
706 jnc .the_end
707 mov eax, VERR_VMX_INVALID_VMCS_PTR
708.the_end:
709%ifdef RT_ARCH_AMD64
710 add rsp, 8
711%endif
712 ret
713ENDPROC VMXLoadVmcs
714
715
716;/**
717; * Executes VMPTRST.
718; *
719; * @returns VBox status code.
720; * @param [esp + 04h] gcc:rdi msc:rcx Param 1 - First parameter - Address that will receive the current pointer.
721; */
722;DECLASM(int) VMXGetCurrentVmcs(RTHCPHYS *pVMCS);
723BEGINPROC VMXGetCurrentVmcs
724%ifdef RT_OS_OS2
725 mov eax, VERR_NOT_SUPPORTED
726 ret
727%else
728 %ifdef RT_ARCH_AMD64
729 %ifdef ASM_CALL64_GCC
730 vmptrst qword [rdi]
731 %else
732 vmptrst qword [rcx]
733 %endif
734 %else
735 vmptrst qword [esp+04h]
736 %endif
737 xor eax, eax
738.the_end:
739 ret
740%endif
741ENDPROC VMXGetCurrentVmcs
742
743;/**
744; * Invalidate a page using INVEPT.
745; @param enmTlbFlush msc:ecx gcc:edi x86:[esp+04] Type of flush.
746; @param pDescriptor msc:edx gcc:esi x86:[esp+08] Descriptor pointer.
747; */
748;DECLASM(int) VMXR0InvEPT(VMXTLBFLUSHEPT enmTlbFlush, uint64_t *pDescriptor);
749BEGINPROC VMXR0InvEPT
750%ifdef RT_ARCH_AMD64
751 %ifdef ASM_CALL64_GCC
752 and edi, 0ffffffffh
753 xor rax, rax
754; invept rdi, qword [rsi]
755 DB 0x66, 0x0F, 0x38, 0x80, 0x3E
756 %else
757 and ecx, 0ffffffffh
758 xor rax, rax
759; invept rcx, qword [rdx]
760 DB 0x66, 0x0F, 0x38, 0x80, 0xA
761 %endif
762%else
763 mov ecx, [esp + 4]
764 mov edx, [esp + 8]
765 xor eax, eax
766; invept ecx, qword [edx]
767 DB 0x66, 0x0F, 0x38, 0x80, 0xA
768%endif
769 jnc .valid_vmcs
770 mov eax, VERR_VMX_INVALID_VMCS_PTR
771 ret
772.valid_vmcs:
773 jnz .the_end
774 mov eax, VERR_INVALID_PARAMETER
775.the_end:
776 ret
777ENDPROC VMXR0InvEPT
778
779
780;/**
781; * Invalidate a page using invvpid
782; @param enmTlbFlush msc:ecx gcc:edi x86:[esp+04] Type of flush
783; @param pDescriptor msc:edx gcc:esi x86:[esp+08] Descriptor pointer
784; */
785;DECLASM(int) VMXR0InvVPID(VMXTLBFLUSHVPID enmTlbFlush, uint64_t *pDescriptor);
786BEGINPROC VMXR0InvVPID
787%ifdef RT_ARCH_AMD64
788 %ifdef ASM_CALL64_GCC
789 and edi, 0ffffffffh
790 xor rax, rax
791; invvpid rdi, qword [rsi]
792 DB 0x66, 0x0F, 0x38, 0x81, 0x3E
793 %else
794 and ecx, 0ffffffffh
795 xor rax, rax
796; invvpid rcx, qword [rdx]
797 DB 0x66, 0x0F, 0x38, 0x81, 0xA
798 %endif
799%else
800 mov ecx, [esp + 4]
801 mov edx, [esp + 8]
802 xor eax, eax
803; invvpid ecx, qword [edx]
804 DB 0x66, 0x0F, 0x38, 0x81, 0xA
805%endif
806 jnc .valid_vmcs
807 mov eax, VERR_VMX_INVALID_VMCS_PTR
808 ret
809.valid_vmcs:
810 jnz .the_end
811 mov eax, VERR_INVALID_PARAMETER
812.the_end:
813 ret
814ENDPROC VMXR0InvVPID
815
816
817%if GC_ARCH_BITS == 64
818;;
819; Executes INVLPGA
820;
821; @param pPageGC msc:rcx gcc:rdi x86:[esp+04] Virtual page to invalidate
822; @param uASID msc:rdx gcc:rsi x86:[esp+0C] Tagged TLB id
823;
824;DECLASM(void) SVMR0InvlpgA(RTGCPTR pPageGC, uint32_t uASID);
825BEGINPROC SVMR0InvlpgA
826%ifdef RT_ARCH_AMD64
827 %ifdef ASM_CALL64_GCC
828 mov rax, rdi
829 mov rcx, rsi
830 %else
831 mov rax, rcx
832 mov rcx, rdx
833 %endif
834%else
835 mov eax, [esp + 4]
836 mov ecx, [esp + 0Ch]
837%endif
838 invlpga [xAX], ecx
839 ret
840ENDPROC SVMR0InvlpgA
841
842%else ; GC_ARCH_BITS != 64
843;;
844; Executes INVLPGA
845;
846; @param pPageGC msc:ecx gcc:edi x86:[esp+04] Virtual page to invalidate
847; @param uASID msc:edx gcc:esi x86:[esp+08] Tagged TLB id
848;
849;DECLASM(void) SVMR0InvlpgA(RTGCPTR pPageGC, uint32_t uASID);
850BEGINPROC SVMR0InvlpgA
851%ifdef RT_ARCH_AMD64
852 %ifdef ASM_CALL64_GCC
853 movzx rax, edi
854 mov ecx, esi
855 %else
856 ; from http://www.cs.cmu.edu/~fp/courses/15213-s06/misc/asm64-handout.pdf:
857 ; ``Perhaps unexpectedly, instructions that move or generate 32-bit register
858 ; values also set the upper 32 bits of the register to zero. Consequently
859 ; there is no need for an instruction movzlq.''
860 mov eax, ecx
861 mov ecx, edx
862 %endif
863%else
864 mov eax, [esp + 4]
865 mov ecx, [esp + 8]
866%endif
867 invlpga [xAX], ecx
868 ret
869ENDPROC SVMR0InvlpgA
870
871%endif ; GC_ARCH_BITS != 64
872
873
874%ifdef VBOX_WITH_KERNEL_USING_XMM
875
876;;
877; Wrapper around vmx.pfnStartVM that preserves host XMM registers and
878; load the guest ones when necessary.
879;
880; @cproto DECLASM(int) hmR0VMXStartVMWrapXMM(RTHCUINT fResume, PCPUMCTX pCtx, void *pvUnused, PVM pVM,
881; PVMCPU pVCpu, PFNHMVMXSTARTVM pfnStartVM);
882;
883; @returns eax
884;
885; @param fResumeVM msc:rcx
886; @param pCtx msc:rdx
887; @param pvUnused msc:r8
888; @param pVM msc:r9
889; @param pVCpu msc:[rbp+30h] The cross context virtual CPU structure of the calling EMT.
890; @param pfnStartVM msc:[rbp+38h]
891;
892; @remarks This is essentially the same code as hmR0SVMRunWrapXMM, only the parameters differ a little bit.
893;
894; @remarks Drivers shouldn't use AVX registers without saving+loading:
895; https://msdn.microsoft.com/en-us/library/windows/hardware/ff545910%28v=vs.85%29.aspx?f=255&MSPPError=-2147217396
896; However the compiler docs have different idea:
897; https://msdn.microsoft.com/en-us/library/9z1stfyw.aspx
898; We'll go with the former for now.
899;
900; ASSUMING 64-bit and windows for now.
901;
902ALIGNCODE(16)
903BEGINPROC hmR0VMXStartVMWrapXMM
904 push xBP
905 mov xBP, xSP
906 sub xSP, 0b0h + 040h ; Don't bother optimizing the frame size.
907
908 ; spill input parameters.
909 mov [xBP + 010h], rcx ; fResumeVM
910 mov [xBP + 018h], rdx ; pCtx
911 mov [xBP + 020h], r8 ; pvUnused
912 mov [xBP + 028h], r9 ; pVM
913
914 ; Ask CPUM whether we've started using the FPU yet.
915 mov rcx, [xBP + 30h] ; pVCpu
916 call NAME(CPUMIsGuestFPUStateActive)
917 test al, al
918 jnz .guest_fpu_state_active
919
920 ; No need to mess with XMM registers just call the start routine and return.
921 mov r11, [xBP + 38h] ; pfnStartVM
922 mov r10, [xBP + 30h] ; pVCpu
923 mov [xSP + 020h], r10
924 mov rcx, [xBP + 010h] ; fResumeVM
925 mov rdx, [xBP + 018h] ; pCtx
926 mov r8, [xBP + 020h] ; pvUnused
927 mov r9, [xBP + 028h] ; pVM
928 call r11
929
930 leave
931 ret
932
933ALIGNCODE(8)
934.guest_fpu_state_active:
935 ; Save the non-volatile host XMM registers.
936 movdqa [rsp + 040h + 000h], xmm6
937 movdqa [rsp + 040h + 010h], xmm7
938 movdqa [rsp + 040h + 020h], xmm8
939 movdqa [rsp + 040h + 030h], xmm9
940 movdqa [rsp + 040h + 040h], xmm10
941 movdqa [rsp + 040h + 050h], xmm11
942 movdqa [rsp + 040h + 060h], xmm12
943 movdqa [rsp + 040h + 070h], xmm13
944 movdqa [rsp + 040h + 080h], xmm14
945 movdqa [rsp + 040h + 090h], xmm15
946 stmxcsr [rsp + 040h + 0a0h]
947
948 mov r10, [xBP + 018h] ; pCtx
949 mov eax, [r10 + CPUMCTX.fXStateMask]
950 test eax, eax
951 jz .guest_fpu_state_manually
952
953 ;
954 ; Using XSAVE to load the guest XMM, YMM and ZMM registers.
955 ;
956 and eax, CPUM_VOLATILE_XSAVE_GUEST_COMPONENTS
957 xor edx, edx
958 mov r10, [r10 + CPUMCTX.pXStateR0]
959 xrstor [r10]
960
961 ; Make the call (same as in the other case ).
962 mov r11, [xBP + 38h] ; pfnStartVM
963 mov r10, [xBP + 30h] ; pVCpu
964 mov [xSP + 020h], r10
965 mov rcx, [xBP + 010h] ; fResumeVM
966 mov rdx, [xBP + 018h] ; pCtx
967 mov r8, [xBP + 020h] ; pvUnused
968 mov r9, [xBP + 028h] ; pVM
969 call r11
970
971 mov r11d, eax ; save return value (xsave below uses eax)
972
973 ; Save the guest XMM registers.
974 mov r10, [xBP + 018h] ; pCtx
975 mov eax, [r10 + CPUMCTX.fXStateMask]
976 and eax, CPUM_VOLATILE_XSAVE_GUEST_COMPONENTS
977 xor edx, edx
978 mov r10, [r10 + CPUMCTX.pXStateR0]
979 xsave [r10]
980
981 mov eax, r11d ; restore return value.
982
983.restore_non_volatile_host_xmm_regs:
984 ; Load the non-volatile host XMM registers.
985 movdqa xmm6, [rsp + 040h + 000h]
986 movdqa xmm7, [rsp + 040h + 010h]
987 movdqa xmm8, [rsp + 040h + 020h]
988 movdqa xmm9, [rsp + 040h + 030h]
989 movdqa xmm10, [rsp + 040h + 040h]
990 movdqa xmm11, [rsp + 040h + 050h]
991 movdqa xmm12, [rsp + 040h + 060h]
992 movdqa xmm13, [rsp + 040h + 070h]
993 movdqa xmm14, [rsp + 040h + 080h]
994 movdqa xmm15, [rsp + 040h + 090h]
995 ldmxcsr [rsp + 040h + 0a0h]
996 leave
997 ret
998
999 ;
1000 ; No XSAVE, load and save the guest XMM registers manually.
1001 ;
1002.guest_fpu_state_manually:
1003 ; Load the full guest XMM register state.
1004 mov r10, [r10 + CPUMCTX.pXStateR0]
1005 movdqa xmm0, [r10 + XMM_OFF_IN_X86FXSTATE + 000h]
1006 movdqa xmm1, [r10 + XMM_OFF_IN_X86FXSTATE + 010h]
1007 movdqa xmm2, [r10 + XMM_OFF_IN_X86FXSTATE + 020h]
1008 movdqa xmm3, [r10 + XMM_OFF_IN_X86FXSTATE + 030h]
1009 movdqa xmm4, [r10 + XMM_OFF_IN_X86FXSTATE + 040h]
1010 movdqa xmm5, [r10 + XMM_OFF_IN_X86FXSTATE + 050h]
1011 movdqa xmm6, [r10 + XMM_OFF_IN_X86FXSTATE + 060h]
1012 movdqa xmm7, [r10 + XMM_OFF_IN_X86FXSTATE + 070h]
1013 movdqa xmm8, [r10 + XMM_OFF_IN_X86FXSTATE + 080h]
1014 movdqa xmm9, [r10 + XMM_OFF_IN_X86FXSTATE + 090h]
1015 movdqa xmm10, [r10 + XMM_OFF_IN_X86FXSTATE + 0a0h]
1016 movdqa xmm11, [r10 + XMM_OFF_IN_X86FXSTATE + 0b0h]
1017 movdqa xmm12, [r10 + XMM_OFF_IN_X86FXSTATE + 0c0h]
1018 movdqa xmm13, [r10 + XMM_OFF_IN_X86FXSTATE + 0d0h]
1019 movdqa xmm14, [r10 + XMM_OFF_IN_X86FXSTATE + 0e0h]
1020 movdqa xmm15, [r10 + XMM_OFF_IN_X86FXSTATE + 0f0h]
1021 ldmxcsr [r10 + X86FXSTATE.MXCSR]
1022
1023 ; Make the call (same as in the other case ).
1024 mov r11, [xBP + 38h] ; pfnStartVM
1025 mov r10, [xBP + 30h] ; pVCpu
1026 mov [xSP + 020h], r10
1027 mov rcx, [xBP + 010h] ; fResumeVM
1028 mov rdx, [xBP + 018h] ; pCtx
1029 mov r8, [xBP + 020h] ; pvUnused
1030 mov r9, [xBP + 028h] ; pVM
1031 call r11
1032
1033 ; Save the guest XMM registers.
1034 mov r10, [xBP + 018h] ; pCtx
1035 mov r10, [r10 + CPUMCTX.pXStateR0]
1036 stmxcsr [r10 + X86FXSTATE.MXCSR]
1037 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 000h], xmm0
1038 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 010h], xmm1
1039 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 020h], xmm2
1040 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 030h], xmm3
1041 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 040h], xmm4
1042 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 050h], xmm5
1043 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 060h], xmm6
1044 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 070h], xmm7
1045 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 080h], xmm8
1046 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 090h], xmm9
1047 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0a0h], xmm10
1048 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0b0h], xmm11
1049 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0c0h], xmm12
1050 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0d0h], xmm13
1051 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0e0h], xmm14
1052 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0f0h], xmm15
1053 jmp .restore_non_volatile_host_xmm_regs
1054ENDPROC hmR0VMXStartVMWrapXMM
1055
1056;;
1057; Wrapper around svm.pfnVMRun that preserves host XMM registers and
1058; load the guest ones when necessary.
1059;
1060; @cproto DECLASM(int) hmR0SVMRunWrapXMM(RTHCPHYS HCPhysVmcbHost, RTHCPHYS HCPhysVmcb, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu,
1061; PFNHMSVMVMRUN pfnVMRun);
1062;
1063; @returns eax
1064;
1065; @param HCPhysVmcbHost msc:rcx
1066; @param HCPhysVmcb msc:rdx
1067; @param pCtx msc:r8
1068; @param pVM msc:r9
1069; @param pVCpu msc:[rbp+30h] The cross context virtual CPU structure of the calling EMT.
1070; @param pfnVMRun msc:[rbp+38h]
1071;
1072; @remarks This is essentially the same code as hmR0VMXStartVMWrapXMM, only the parameters differ a little bit.
1073;
1074; @remarks Drivers shouldn't use AVX registers without saving+loading:
1075; https://msdn.microsoft.com/en-us/library/windows/hardware/ff545910%28v=vs.85%29.aspx?f=255&MSPPError=-2147217396
1076; However the compiler docs have different idea:
1077; https://msdn.microsoft.com/en-us/library/9z1stfyw.aspx
1078; We'll go with the former for now.
1079;
1080; ASSUMING 64-bit and windows for now.
1081ALIGNCODE(16)
1082BEGINPROC hmR0SVMRunWrapXMM
1083 push xBP
1084 mov xBP, xSP
1085 sub xSP, 0b0h + 040h ; Don't bother optimizing the frame size.
1086
1087 ; spill input parameters.
1088 mov [xBP + 010h], rcx ; HCPhysVmcbHost
1089 mov [xBP + 018h], rdx ; HCPhysVmcb
1090 mov [xBP + 020h], r8 ; pCtx
1091 mov [xBP + 028h], r9 ; pVM
1092
1093 ; Ask CPUM whether we've started using the FPU yet.
1094 mov rcx, [xBP + 30h] ; pVCpu
1095 call NAME(CPUMIsGuestFPUStateActive)
1096 test al, al
1097 jnz .guest_fpu_state_active
1098
1099 ; No need to mess with XMM registers just call the start routine and return.
1100 mov r11, [xBP + 38h] ; pfnVMRun
1101 mov r10, [xBP + 30h] ; pVCpu
1102 mov [xSP + 020h], r10
1103 mov rcx, [xBP + 010h] ; HCPhysVmcbHost
1104 mov rdx, [xBP + 018h] ; HCPhysVmcb
1105 mov r8, [xBP + 020h] ; pCtx
1106 mov r9, [xBP + 028h] ; pVM
1107 call r11
1108
1109 leave
1110 ret
1111
1112ALIGNCODE(8)
1113.guest_fpu_state_active:
1114 ; Save the non-volatile host XMM registers.
1115 movdqa [rsp + 040h + 000h], xmm6
1116 movdqa [rsp + 040h + 010h], xmm7
1117 movdqa [rsp + 040h + 020h], xmm8
1118 movdqa [rsp + 040h + 030h], xmm9
1119 movdqa [rsp + 040h + 040h], xmm10
1120 movdqa [rsp + 040h + 050h], xmm11
1121 movdqa [rsp + 040h + 060h], xmm12
1122 movdqa [rsp + 040h + 070h], xmm13
1123 movdqa [rsp + 040h + 080h], xmm14
1124 movdqa [rsp + 040h + 090h], xmm15
1125 stmxcsr [rsp + 040h + 0a0h]
1126
1127 mov r10, [xBP + 020h] ; pCtx
1128 mov eax, [r10 + CPUMCTX.fXStateMask]
1129 test eax, eax
1130 jz .guest_fpu_state_manually
1131
1132 ;
1133 ; Using XSAVE.
1134 ;
1135 and eax, CPUM_VOLATILE_XSAVE_GUEST_COMPONENTS
1136 xor edx, edx
1137 mov r10, [r10 + CPUMCTX.pXStateR0]
1138 xrstor [r10]
1139
1140 ; Make the call (same as in the other case ).
1141 mov r11, [xBP + 38h] ; pfnVMRun
1142 mov r10, [xBP + 30h] ; pVCpu
1143 mov [xSP + 020h], r10
1144 mov rcx, [xBP + 010h] ; HCPhysVmcbHost
1145 mov rdx, [xBP + 018h] ; HCPhysVmcb
1146 mov r8, [xBP + 020h] ; pCtx
1147 mov r9, [xBP + 028h] ; pVM
1148 call r11
1149
1150 mov r11d, eax ; save return value (xsave below uses eax)
1151
1152 ; Save the guest XMM registers.
1153 mov r10, [xBP + 020h] ; pCtx
1154 mov eax, [r10 + CPUMCTX.fXStateMask]
1155 and eax, CPUM_VOLATILE_XSAVE_GUEST_COMPONENTS
1156 xor edx, edx
1157 mov r10, [r10 + CPUMCTX.pXStateR0]
1158 xsave [r10]
1159
1160 mov eax, r11d ; restore return value.
1161
1162.restore_non_volatile_host_xmm_regs:
1163 ; Load the non-volatile host XMM registers.
1164 movdqa xmm6, [rsp + 040h + 000h]
1165 movdqa xmm7, [rsp + 040h + 010h]
1166 movdqa xmm8, [rsp + 040h + 020h]
1167 movdqa xmm9, [rsp + 040h + 030h]
1168 movdqa xmm10, [rsp + 040h + 040h]
1169 movdqa xmm11, [rsp + 040h + 050h]
1170 movdqa xmm12, [rsp + 040h + 060h]
1171 movdqa xmm13, [rsp + 040h + 070h]
1172 movdqa xmm14, [rsp + 040h + 080h]
1173 movdqa xmm15, [rsp + 040h + 090h]
1174 ldmxcsr [rsp + 040h + 0a0h]
1175 leave
1176 ret
1177
1178 ;
1179 ; No XSAVE, load and save the guest XMM registers manually.
1180 ;
1181.guest_fpu_state_manually:
1182 ; Load the full guest XMM register state.
1183 mov r10, [r10 + CPUMCTX.pXStateR0]
1184 movdqa xmm0, [r10 + XMM_OFF_IN_X86FXSTATE + 000h]
1185 movdqa xmm1, [r10 + XMM_OFF_IN_X86FXSTATE + 010h]
1186 movdqa xmm2, [r10 + XMM_OFF_IN_X86FXSTATE + 020h]
1187 movdqa xmm3, [r10 + XMM_OFF_IN_X86FXSTATE + 030h]
1188 movdqa xmm4, [r10 + XMM_OFF_IN_X86FXSTATE + 040h]
1189 movdqa xmm5, [r10 + XMM_OFF_IN_X86FXSTATE + 050h]
1190 movdqa xmm6, [r10 + XMM_OFF_IN_X86FXSTATE + 060h]
1191 movdqa xmm7, [r10 + XMM_OFF_IN_X86FXSTATE + 070h]
1192 movdqa xmm8, [r10 + XMM_OFF_IN_X86FXSTATE + 080h]
1193 movdqa xmm9, [r10 + XMM_OFF_IN_X86FXSTATE + 090h]
1194 movdqa xmm10, [r10 + XMM_OFF_IN_X86FXSTATE + 0a0h]
1195 movdqa xmm11, [r10 + XMM_OFF_IN_X86FXSTATE + 0b0h]
1196 movdqa xmm12, [r10 + XMM_OFF_IN_X86FXSTATE + 0c0h]
1197 movdqa xmm13, [r10 + XMM_OFF_IN_X86FXSTATE + 0d0h]
1198 movdqa xmm14, [r10 + XMM_OFF_IN_X86FXSTATE + 0e0h]
1199 movdqa xmm15, [r10 + XMM_OFF_IN_X86FXSTATE + 0f0h]
1200 ldmxcsr [r10 + X86FXSTATE.MXCSR]
1201
1202 ; Make the call (same as in the other case ).
1203 mov r11, [xBP + 38h] ; pfnVMRun
1204 mov r10, [xBP + 30h] ; pVCpu
1205 mov [xSP + 020h], r10
1206 mov rcx, [xBP + 010h] ; HCPhysVmcbHost
1207 mov rdx, [xBP + 018h] ; HCPhysVmcb
1208 mov r8, [xBP + 020h] ; pCtx
1209 mov r9, [xBP + 028h] ; pVM
1210 call r11
1211
1212 ; Save the guest XMM registers.
1213 mov r10, [xBP + 020h] ; pCtx
1214 mov r10, [r10 + CPUMCTX.pXStateR0]
1215 stmxcsr [r10 + X86FXSTATE.MXCSR]
1216 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 000h], xmm0
1217 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 010h], xmm1
1218 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 020h], xmm2
1219 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 030h], xmm3
1220 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 040h], xmm4
1221 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 050h], xmm5
1222 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 060h], xmm6
1223 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 070h], xmm7
1224 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 080h], xmm8
1225 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 090h], xmm9
1226 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0a0h], xmm10
1227 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0b0h], xmm11
1228 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0c0h], xmm12
1229 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0d0h], xmm13
1230 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0e0h], xmm14
1231 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0f0h], xmm15
1232 jmp .restore_non_volatile_host_xmm_regs
1233ENDPROC hmR0SVMRunWrapXMM
1234
1235%endif ; VBOX_WITH_KERNEL_USING_XMM
1236
1237
1238;; @def RESTORE_STATE_VM32
1239; Macro restoring essential host state and updating guest state
1240; for common host, 32-bit guest for VT-x.
1241%macro RESTORE_STATE_VM32 0
1242 ; Restore base and limit of the IDTR & GDTR.
1243 %ifndef VMX_SKIP_IDTR
1244 lidt [xSP]
1245 add xSP, xCB * 2
1246 %endif
1247 %ifndef VMX_SKIP_GDTR
1248 lgdt [xSP]
1249 add xSP, xCB * 2
1250 %endif
1251
1252 push xDI
1253 %ifndef VMX_SKIP_TR
1254 mov xDI, [xSP + xCB * 3] ; pCtx (*3 to skip the saved xDI, TR, LDTR).
1255 %else
1256 mov xDI, [xSP + xCB * 2] ; pCtx (*2 to skip the saved xDI, LDTR).
1257 %endif
1258
1259 mov [ss:xDI + CPUMCTX.eax], eax
1260 mov xAX, SPECTRE_FILLER
1261 mov [ss:xDI + CPUMCTX.ebx], ebx
1262 mov xBX, xAX
1263 mov [ss:xDI + CPUMCTX.ecx], ecx
1264 mov xCX, xAX
1265 mov [ss:xDI + CPUMCTX.edx], edx
1266 mov xDX, xAX
1267 mov [ss:xDI + CPUMCTX.esi], esi
1268 mov xSI, xAX
1269 mov [ss:xDI + CPUMCTX.ebp], ebp
1270 mov xBP, xAX
1271 mov xAX, cr2
1272 mov [ss:xDI + CPUMCTX.cr2], xAX
1273
1274 %ifdef RT_ARCH_AMD64
1275 pop xAX ; The guest edi we pushed above.
1276 mov dword [ss:xDI + CPUMCTX.edi], eax
1277 %else
1278 pop dword [ss:xDI + CPUMCTX.edi] ; The guest edi we pushed above.
1279 %endif
1280
1281 ; Fight spectre.
1282 INDIRECT_BRANCH_PREDICTION_BARRIER ss:xDI, CPUMCTX_WSF_IBPB_EXIT
1283
1284 %ifndef VMX_SKIP_TR
1285 ; Restore TSS selector; must mark it as not busy before using ltr (!)
1286 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p)
1287 ; @todo get rid of sgdt
1288 pop xBX ; Saved TR
1289 sub xSP, xCB * 2
1290 sgdt [xSP]
1291 mov xAX, xBX
1292 and eax, X86_SEL_MASK_OFF_RPL ; Mask away TI and RPL bits leaving only the descriptor offset.
1293 add xAX, [xSP + 2] ; eax <- GDTR.address + descriptor offset.
1294 and dword [ss:xAX + 4], ~RT_BIT(9) ; Clear the busy flag in TSS desc (bits 0-7=base, bit 9=busy bit).
1295 ltr bx
1296 add xSP, xCB * 2
1297 %endif
1298
1299 pop xAX ; Saved LDTR
1300 %ifdef RT_ARCH_AMD64
1301 cmp eax, 0
1302 je %%skip_ldt_write32
1303 %endif
1304 lldt ax
1305
1306%%skip_ldt_write32:
1307 add xSP, xCB ; pCtx
1308
1309 ; Restore segment registers.
1310 MYPOPSEGS xAX, ax
1311
1312 ; Restore the host XCR0 if necessary.
1313 pop xCX
1314 test ecx, ecx
1315 jnz %%xcr0_after_skip
1316 pop xAX
1317 pop xDX
1318 xsetbv ; ecx is already zero.
1319%%xcr0_after_skip:
1320
1321 ; Restore general purpose registers.
1322 MYPOPAD
1323%endmacro
1324
1325
1326;;
1327; Prepares for and executes VMLAUNCH/VMRESUME (32 bits guest mode)
1328;
1329; @returns VBox status code
1330; @param fResume x86:[ebp+8], msc:rcx,gcc:rdi Whether to use vmlauch/vmresume.
1331; @param pCtx x86:[ebp+c], msc:rdx,gcc:rsi Pointer to the guest-CPU context.
1332; @param pvUnused x86:[ebp+10],msc:r8, gcc:rdx Unused argument.
1333; @param pVM x86:[ebp+14],msc:r9, gcc:rcx The cross context VM structure.
1334; @param pVCpu x86:[ebp+18],msc:[ebp+30],gcc:r8 The cross context virtual CPU structure of the calling EMT.
1335;
1336ALIGNCODE(16)
1337BEGINPROC VMXR0StartVM32
1338 push xBP
1339 mov xBP, xSP
1340
1341 pushf
1342 cli
1343
1344 ;
1345 ; Save all general purpose host registers.
1346 ;
1347 MYPUSHAD
1348
1349 ;
1350 ; First we have to write some final guest CPU context registers.
1351 ;
1352 mov eax, VMX_VMCS_HOST_RIP
1353%ifdef RT_ARCH_AMD64
1354 lea r10, [.vmlaunch_done wrt rip]
1355 vmwrite rax, r10
1356%else
1357 mov ecx, .vmlaunch_done
1358 vmwrite eax, ecx
1359%endif
1360 ; Note: assumes success!
1361
1362 ;
1363 ; Unify input parameter registers.
1364 ;
1365%ifdef RT_ARCH_AMD64
1366 %ifdef ASM_CALL64_GCC
1367 ; fResume already in rdi
1368 ; pCtx already in rsi
1369 mov rbx, rdx ; pvUnused
1370 %else
1371 mov rdi, rcx ; fResume
1372 mov rsi, rdx ; pCtx
1373 mov rbx, r8 ; pvUnused
1374 %endif
1375%else
1376 mov edi, [ebp + 8] ; fResume
1377 mov esi, [ebp + 12] ; pCtx
1378 mov ebx, [ebp + 16] ; pvUnused
1379%endif
1380
1381 ;
1382 ; Save the host XCR0 and load the guest one if necessary.
1383 ; Note! Trashes rdx and rcx.
1384 ;
1385%ifdef ASM_CALL64_MSC
1386 mov rax, [xBP + 30h] ; pVCpu
1387%elifdef ASM_CALL64_GCC
1388 mov rax, r8 ; pVCpu
1389%else
1390 mov eax, [xBP + 18h] ; pVCpu
1391%endif
1392 test byte [xAX + VMCPU.hm + HMCPU.fLoadSaveGuestXcr0], 1
1393 jz .xcr0_before_skip
1394
1395 xor ecx, ecx
1396 xgetbv ; Save the host one on the stack.
1397 push xDX
1398 push xAX
1399
1400 mov eax, [xSI + CPUMCTX.aXcr] ; Load the guest one.
1401 mov edx, [xSI + CPUMCTX.aXcr + 4]
1402 xor ecx, ecx ; paranoia
1403 xsetbv
1404
1405 push 0 ; Indicate that we must restore XCR0 (popped into ecx, thus 0).
1406 jmp .xcr0_before_done
1407
1408.xcr0_before_skip:
1409 push 3fh ; indicate that we need not.
1410.xcr0_before_done:
1411
1412 ;
1413 ; Save segment registers.
1414 ; Note! Trashes rdx & rcx, so we moved it here (amd64 case).
1415 ;
1416 MYPUSHSEGS xAX, ax
1417
1418 ; Save the pCtx pointer.
1419 push xSI
1420
1421 ; Save host LDTR.
1422 xor eax, eax
1423 sldt ax
1424 push xAX
1425
1426%ifndef VMX_SKIP_TR
1427 ; The host TR limit is reset to 0x67; save & restore it manually.
1428 str eax
1429 push xAX
1430%endif
1431
1432%ifndef VMX_SKIP_GDTR
1433 ; VT-x only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!
1434 sub xSP, xCB * 2
1435 sgdt [xSP]
1436%endif
1437%ifndef VMX_SKIP_IDTR
1438 sub xSP, xCB * 2
1439 sidt [xSP]
1440%endif
1441
1442 ; Load CR2 if necessary (may be expensive as writing CR2 is a synchronizing instruction).
1443 mov xBX, [xSI + CPUMCTX.cr2]
1444 mov xDX, cr2
1445 cmp xBX, xDX
1446 je .skip_cr2_write32
1447 mov cr2, xBX
1448
1449.skip_cr2_write32:
1450 mov eax, VMX_VMCS_HOST_RSP
1451 vmwrite xAX, xSP
1452 ; Note: assumes success!
1453 ; Don't mess with ESP anymore!!!
1454
1455 ; Fight spectre and similar.
1456 INDIRECT_BRANCH_PREDICTION_AND_L1_CACHE_BARRIER xSI, CPUMCTX_WSF_IBPB_ENTRY, CPUMCTX_WSF_L1D_ENTRY, CPUMCTX_WSF_MDS_ENTRY
1457
1458 ; Load guest general purpose registers.
1459 mov eax, [xSI + CPUMCTX.eax]
1460 mov ebx, [xSI + CPUMCTX.ebx]
1461 mov ecx, [xSI + CPUMCTX.ecx]
1462 mov edx, [xSI + CPUMCTX.edx]
1463 mov ebp, [xSI + CPUMCTX.ebp]
1464
1465 ; Resume or start VM?
1466 cmp xDI, 0 ; fResume
1467
1468 ; Load guest edi & esi.
1469 mov edi, [xSI + CPUMCTX.edi]
1470 mov esi, [xSI + CPUMCTX.esi]
1471
1472 je .vmlaunch_launch
1473
1474 vmresume
1475 jc near .vmxstart_invalid_vmcs_ptr
1476 jz near .vmxstart_start_failed
1477 jmp .vmlaunch_done; ; Here if vmresume detected a failure.
1478
1479.vmlaunch_launch:
1480 vmlaunch
1481 jc near .vmxstart_invalid_vmcs_ptr
1482 jz near .vmxstart_start_failed
1483 jmp .vmlaunch_done; ; Here if vmlaunch detected a failure.
1484
1485ALIGNCODE(16) ;; @todo YASM BUG - this alignment is wrong on darwin, it's 1 byte off.
1486.vmlaunch_done:
1487 RESTORE_STATE_VM32
1488 mov eax, VINF_SUCCESS
1489
1490.vmstart_end:
1491 popf
1492 pop xBP
1493 ret
1494
1495.vmxstart_invalid_vmcs_ptr:
1496 RESTORE_STATE_VM32
1497 mov eax, VERR_VMX_INVALID_VMCS_PTR_TO_START_VM
1498 jmp .vmstart_end
1499
1500.vmxstart_start_failed:
1501 RESTORE_STATE_VM32
1502 mov eax, VERR_VMX_UNABLE_TO_START_VM
1503 jmp .vmstart_end
1504
1505ENDPROC VMXR0StartVM32
1506
1507
1508%ifdef RT_ARCH_AMD64
1509;; @def RESTORE_STATE_VM64
1510; Macro restoring essential host state and updating guest state
1511; for 64-bit host, 64-bit guest for VT-x.
1512;
1513%macro RESTORE_STATE_VM64 0
1514 ; Restore base and limit of the IDTR & GDTR
1515 %ifndef VMX_SKIP_IDTR
1516 lidt [xSP]
1517 add xSP, xCB * 2
1518 %endif
1519 %ifndef VMX_SKIP_GDTR
1520 lgdt [xSP]
1521 add xSP, xCB * 2
1522 %endif
1523
1524 push xDI
1525 %ifndef VMX_SKIP_TR
1526 mov xDI, [xSP + xCB * 3] ; pCtx (*3 to skip the saved xDI, TR, LDTR)
1527 %else
1528 mov xDI, [xSP + xCB * 2] ; pCtx (*2 to skip the saved xDI, LDTR)
1529 %endif
1530
1531 mov qword [xDI + CPUMCTX.eax], rax
1532 mov rax, SPECTRE_FILLER64
1533 mov qword [xDI + CPUMCTX.ebx], rbx
1534 mov rbx, rax
1535 mov qword [xDI + CPUMCTX.ecx], rcx
1536 mov rcx, rax
1537 mov qword [xDI + CPUMCTX.edx], rdx
1538 mov rdx, rax
1539 mov qword [xDI + CPUMCTX.esi], rsi
1540 mov rsi, rax
1541 mov qword [xDI + CPUMCTX.ebp], rbp
1542 mov rbp, rax
1543 mov qword [xDI + CPUMCTX.r8], r8
1544 mov r8, rax
1545 mov qword [xDI + CPUMCTX.r9], r9
1546 mov r9, rax
1547 mov qword [xDI + CPUMCTX.r10], r10
1548 mov r10, rax
1549 mov qword [xDI + CPUMCTX.r11], r11
1550 mov r11, rax
1551 mov qword [xDI + CPUMCTX.r12], r12
1552 mov r12, rax
1553 mov qword [xDI + CPUMCTX.r13], r13
1554 mov r13, rax
1555 mov qword [xDI + CPUMCTX.r14], r14
1556 mov r14, rax
1557 mov qword [xDI + CPUMCTX.r15], r15
1558 mov r15, rax
1559 mov rax, cr2
1560 mov qword [xDI + CPUMCTX.cr2], rax
1561
1562 pop xAX ; The guest rdi we pushed above
1563 mov qword [xDI + CPUMCTX.edi], rax
1564
1565 ; Fight spectre.
1566 INDIRECT_BRANCH_PREDICTION_BARRIER xDI, CPUMCTX_WSF_IBPB_EXIT
1567
1568 %ifndef VMX_SKIP_TR
1569 ; Restore TSS selector; must mark it as not busy before using ltr (!)
1570 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p).
1571 ; @todo get rid of sgdt
1572 pop xBX ; Saved TR
1573 sub xSP, xCB * 2
1574 sgdt [xSP]
1575 mov xAX, xBX
1576 and eax, X86_SEL_MASK_OFF_RPL ; Mask away TI and RPL bits leaving only the descriptor offset.
1577 add xAX, [xSP + 2] ; eax <- GDTR.address + descriptor offset.
1578 and dword [xAX + 4], ~RT_BIT(9) ; Clear the busy flag in TSS desc (bits 0-7=base, bit 9=busy bit).
1579 ltr bx
1580 add xSP, xCB * 2
1581 %endif
1582
1583 pop xAX ; Saved LDTR
1584 cmp eax, 0
1585 je %%skip_ldt_write64
1586 lldt ax
1587
1588%%skip_ldt_write64:
1589 pop xSI ; pCtx (needed in rsi by the macros below)
1590
1591 ; Restore segment registers.
1592 MYPOPSEGS xAX, ax
1593
1594 ; Restore the host XCR0 if necessary.
1595 pop xCX
1596 test ecx, ecx
1597 jnz %%xcr0_after_skip
1598 pop xAX
1599 pop xDX
1600 xsetbv ; ecx is already zero.
1601%%xcr0_after_skip:
1602
1603 ; Restore general purpose registers.
1604 MYPOPAD
1605%endmacro
1606
1607
1608;;
1609; Prepares for and executes VMLAUNCH/VMRESUME (64 bits guest mode)
1610;
1611; @returns VBox status code
1612; @param fResume msc:rcx, gcc:rdi Whether to use vmlauch/vmresume.
1613; @param pCtx msc:rdx, gcc:rsi Pointer to the guest-CPU context.
1614; @param pvUnused msc:r8, gcc:rdx Unused argument.
1615; @param pVM msc:r9, gcc:rcx The cross context VM structure.
1616; @param pVCpu msc:[ebp+30], gcc:r8 The cross context virtual CPU structure of the calling EMT.
1617;
1618ALIGNCODE(16)
1619BEGINPROC VMXR0StartVM64
1620 push xBP
1621 mov xBP, xSP
1622
1623 pushf
1624 cli
1625
1626 ; Save all general purpose host registers.
1627 MYPUSHAD
1628
1629 ; First we have to save some final CPU context registers.
1630 lea r10, [.vmlaunch64_done wrt rip]
1631 mov rax, VMX_VMCS_HOST_RIP ; Return address (too difficult to continue after VMLAUNCH?).
1632 vmwrite rax, r10
1633 ; Note: assumes success!
1634
1635 ;
1636 ; Unify the input parameter registers.
1637 ;
1638%ifdef ASM_CALL64_GCC
1639 ; fResume already in rdi
1640 ; pCtx already in rsi
1641 mov rbx, rdx ; pvUnused
1642%else
1643 mov rdi, rcx ; fResume
1644 mov rsi, rdx ; pCtx
1645 mov rbx, r8 ; pvUnused
1646%endif
1647
1648 ;
1649 ; Save the host XCR0 and load the guest one if necessary.
1650 ; Note! Trashes rdx and rcx.
1651 ;
1652%ifdef ASM_CALL64_MSC
1653 mov rax, [xBP + 30h] ; pVCpu
1654%else
1655 mov rax, r8 ; pVCpu
1656%endif
1657 test byte [xAX + VMCPU.hm + HMCPU.fLoadSaveGuestXcr0], 1
1658 jz .xcr0_before_skip
1659
1660 xor ecx, ecx
1661 xgetbv ; Save the host one on the stack.
1662 push xDX
1663 push xAX
1664
1665 mov eax, [xSI + CPUMCTX.aXcr] ; Load the guest one.
1666 mov edx, [xSI + CPUMCTX.aXcr + 4]
1667 xor ecx, ecx ; paranoia
1668 xsetbv
1669
1670 push 0 ; Indicate that we must restore XCR0 (popped into ecx, thus 0).
1671 jmp .xcr0_before_done
1672
1673.xcr0_before_skip:
1674 push 3fh ; indicate that we need not.
1675.xcr0_before_done:
1676
1677 ;
1678 ; Save segment registers.
1679 ; Note! Trashes rdx & rcx, so we moved it here (amd64 case).
1680 ;
1681 MYPUSHSEGS xAX, ax
1682
1683 ; Save the pCtx pointer.
1684 push xSI
1685
1686 ; Save host LDTR.
1687 xor eax, eax
1688 sldt ax
1689 push xAX
1690
1691%ifndef VMX_SKIP_TR
1692 ; The host TR limit is reset to 0x67; save & restore it manually.
1693 str eax
1694 push xAX
1695%endif
1696
1697%ifndef VMX_SKIP_GDTR
1698 ; VT-x only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!
1699 sub xSP, xCB * 2
1700 sgdt [xSP]
1701%endif
1702%ifndef VMX_SKIP_IDTR
1703 sub xSP, xCB * 2
1704 sidt [xSP]
1705%endif
1706
1707 ; Load CR2 if necessary (may be expensive as writing CR2 is a synchronizing instruction).
1708 mov rbx, qword [xSI + CPUMCTX.cr2]
1709 mov rdx, cr2
1710 cmp rbx, rdx
1711 je .skip_cr2_write
1712 mov cr2, rbx
1713
1714.skip_cr2_write:
1715 mov eax, VMX_VMCS_HOST_RSP
1716 vmwrite xAX, xSP
1717 ; Note: assumes success!
1718 ; Don't mess with ESP anymore!!!
1719
1720 ; Fight spectre and similar.
1721 INDIRECT_BRANCH_PREDICTION_AND_L1_CACHE_BARRIER xSI, CPUMCTX_WSF_IBPB_ENTRY, CPUMCTX_WSF_L1D_ENTRY, CPUMCTX_WSF_MDS_ENTRY
1722
1723 ; Load guest general purpose registers.
1724 mov rax, qword [xSI + CPUMCTX.eax]
1725 mov rbx, qword [xSI + CPUMCTX.ebx]
1726 mov rcx, qword [xSI + CPUMCTX.ecx]
1727 mov rdx, qword [xSI + CPUMCTX.edx]
1728 mov rbp, qword [xSI + CPUMCTX.ebp]
1729 mov r8, qword [xSI + CPUMCTX.r8]
1730 mov r9, qword [xSI + CPUMCTX.r9]
1731 mov r10, qword [xSI + CPUMCTX.r10]
1732 mov r11, qword [xSI + CPUMCTX.r11]
1733 mov r12, qword [xSI + CPUMCTX.r12]
1734 mov r13, qword [xSI + CPUMCTX.r13]
1735 mov r14, qword [xSI + CPUMCTX.r14]
1736 mov r15, qword [xSI + CPUMCTX.r15]
1737
1738 ; Resume or start VM?
1739 cmp xDI, 0 ; fResume
1740
1741 ; Load guest rdi & rsi.
1742 mov rdi, qword [xSI + CPUMCTX.edi]
1743 mov rsi, qword [xSI + CPUMCTX.esi]
1744
1745 je .vmlaunch64_launch
1746
1747 vmresume
1748 jc near .vmxstart64_invalid_vmcs_ptr
1749 jz near .vmxstart64_start_failed
1750 jmp .vmlaunch64_done; ; Here if vmresume detected a failure.
1751
1752.vmlaunch64_launch:
1753 vmlaunch
1754 jc near .vmxstart64_invalid_vmcs_ptr
1755 jz near .vmxstart64_start_failed
1756 jmp .vmlaunch64_done; ; Here if vmlaunch detected a failure.
1757
1758ALIGNCODE(16)
1759.vmlaunch64_done:
1760 RESTORE_STATE_VM64
1761 mov eax, VINF_SUCCESS
1762
1763.vmstart64_end:
1764 popf
1765 pop xBP
1766 ret
1767
1768.vmxstart64_invalid_vmcs_ptr:
1769 RESTORE_STATE_VM64
1770 mov eax, VERR_VMX_INVALID_VMCS_PTR_TO_START_VM
1771 jmp .vmstart64_end
1772
1773.vmxstart64_start_failed:
1774 RESTORE_STATE_VM64
1775 mov eax, VERR_VMX_UNABLE_TO_START_VM
1776 jmp .vmstart64_end
1777ENDPROC VMXR0StartVM64
1778%endif ; RT_ARCH_AMD64
1779
1780
1781;;
1782; Clears the MDS buffers using VERW.
1783ALIGNCODE(16)
1784BEGINPROC hmR0MdsClear
1785 sub xSP, xCB
1786 mov [xSP], ds
1787 verw [xSP]
1788 add xSP, xCB
1789 ret
1790ENDPROC hmR0MdsClear
1791
1792
1793;;
1794; Prepares for and executes VMRUN (32 bits guests)
1795;
1796; @returns VBox status code
1797; @param HCPhysVmcbHost msc:rcx,gcc:rdi Physical address of host VMCB.
1798; @param HCPhysVmcb msc:rdx,gcc:rsi Physical address of guest VMCB.
1799; @param pCtx msc:r8,gcc:rdx Pointer to the guest CPU-context.
1800; @param pVM msc:r9,gcc:rcx The cross context VM structure.
1801; @param pVCpu msc:[rsp+28],gcc:r8 The cross context virtual CPU structure of the calling EMT.
1802;
1803ALIGNCODE(16)
1804BEGINPROC SVMR0VMRun
1805%ifdef RT_ARCH_AMD64 ; fake a cdecl stack frame
1806 %ifdef ASM_CALL64_GCC
1807 push r8 ; pVCpu
1808 push rcx ; pVM
1809 push rdx ; pCtx
1810 push rsi ; HCPhysVmcb
1811 push rdi ; HCPhysVmcbHost
1812 %else
1813 mov rax, [rsp + 28h]
1814 push rax ; pVCpu
1815 push r9 ; pVM
1816 push r8 ; pCtx
1817 push rdx ; HCPhysVmcb
1818 push rcx ; HCPhysVmcbHost
1819 %endif
1820 push 0
1821%endif
1822 push xBP
1823 mov xBP, xSP
1824 pushf
1825
1826 ; Save all general purpose host registers.
1827 MYPUSHAD
1828
1829 ; Load pCtx into xSI.
1830 mov xSI, [xBP + xCB * 2 + RTHCPHYS_CB * 2] ; pCtx
1831
1832 ; Save the host XCR0 and load the guest one if necessary.
1833 mov xAX, [xBP + xCB * 2 + RTHCPHYS_CB * 2 + xCB * 2] ; pVCpu
1834 test byte [xAX + VMCPU.hm + HMCPU.fLoadSaveGuestXcr0], 1
1835 jz .xcr0_before_skip
1836
1837 xor ecx, ecx
1838 xgetbv ; Save the host XCR0 on the stack
1839 push xDX
1840 push xAX
1841
1842 mov xSI, [xBP + xCB * 2 + RTHCPHYS_CB * 2] ; pCtx
1843 mov eax, [xSI + CPUMCTX.aXcr] ; load the guest XCR0
1844 mov edx, [xSI + CPUMCTX.aXcr + 4]
1845 xor ecx, ecx ; paranoia
1846 xsetbv
1847
1848 push 0 ; indicate that we must restore XCR0 (popped into ecx, thus 0)
1849 jmp .xcr0_before_done
1850
1851.xcr0_before_skip:
1852 push 3fh ; indicate that we need not restore XCR0
1853.xcr0_before_done:
1854
1855 ; Save guest CPU-context pointer for simplifying saving of the GPRs afterwards.
1856 push xSI
1857
1858 ; Save host fs, gs, sysenter msr etc.
1859 mov xAX, [xBP + xCB * 2] ; HCPhysVmcbHost (64 bits physical address; x86: take low dword only)
1860 push xAX ; save for the vmload after vmrun
1861 vmsave
1862
1863 ; Fight spectre.
1864 INDIRECT_BRANCH_PREDICTION_BARRIER xSI, CPUMCTX_WSF_IBPB_ENTRY
1865
1866 ; Setup xAX for VMLOAD.
1867 mov xAX, [xBP + xCB * 2 + RTHCPHYS_CB] ; HCPhysVmcb (64 bits physical address; x86: take low dword only)
1868
1869 ; Load guest general purpose registers.
1870 ; eax is loaded from the VMCB by VMRUN.
1871 mov ebx, [xSI + CPUMCTX.ebx]
1872 mov ecx, [xSI + CPUMCTX.ecx]
1873 mov edx, [xSI + CPUMCTX.edx]
1874 mov edi, [xSI + CPUMCTX.edi]
1875 mov ebp, [xSI + CPUMCTX.ebp]
1876 mov esi, [xSI + CPUMCTX.esi]
1877
1878 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch.
1879 clgi
1880 sti
1881
1882 ; Load guest fs, gs, sysenter msr etc.
1883 vmload
1884
1885 ; Run the VM.
1886 vmrun
1887
1888 ; Save guest fs, gs, sysenter msr etc.
1889 vmsave
1890
1891 ; Load host fs, gs, sysenter msr etc.
1892 pop xAX ; load HCPhysVmcbHost (pushed above)
1893 vmload
1894
1895 ; Set the global interrupt flag again, but execute cli to make sure IF=0.
1896 cli
1897 stgi
1898
1899 ; Pop the context pointer (pushed above) and save the guest GPRs (sans RSP and RAX).
1900 pop xAX
1901
1902 mov [ss:xAX + CPUMCTX.ebx], ebx
1903 mov xBX, SPECTRE_FILLER
1904 mov [ss:xAX + CPUMCTX.ecx], ecx
1905 mov xCX, xBX
1906 mov [ss:xAX + CPUMCTX.edx], edx
1907 mov xDX, xBX
1908 mov [ss:xAX + CPUMCTX.esi], esi
1909 mov xSI, xBX
1910 mov [ss:xAX + CPUMCTX.edi], edi
1911 mov xDI, xBX
1912 mov [ss:xAX + CPUMCTX.ebp], ebp
1913 mov xBP, xBX
1914
1915 ; Fight spectre. Note! Trashes xAX!
1916 INDIRECT_BRANCH_PREDICTION_BARRIER ss:xAX, CPUMCTX_WSF_IBPB_EXIT
1917
1918 ; Restore the host xcr0 if necessary.
1919 pop xCX
1920 test ecx, ecx
1921 jnz .xcr0_after_skip
1922 pop xAX
1923 pop xDX
1924 xsetbv ; ecx is already zero
1925.xcr0_after_skip:
1926
1927 ; Restore host general purpose registers.
1928 MYPOPAD
1929
1930 mov eax, VINF_SUCCESS
1931
1932 popf
1933 pop xBP
1934%ifdef RT_ARCH_AMD64
1935 add xSP, 6*xCB
1936%endif
1937 ret
1938ENDPROC SVMR0VMRun
1939
1940
1941%ifdef RT_ARCH_AMD64
1942;;
1943; Prepares for and executes VMRUN (64 bits guests)
1944;
1945; @returns VBox status code
1946; @param HCPhysVmcbHost msc:rcx,gcc:rdi Physical address of host VMCB.
1947; @param HCPhysVmcb msc:rdx,gcc:rsi Physical address of guest VMCB.
1948; @param pCtx msc:r8,gcc:rdx Pointer to the guest-CPU context.
1949; @param pVM msc:r9,gcc:rcx The cross context VM structure.
1950; @param pVCpu msc:[rsp+28],gcc:r8 The cross context virtual CPU structure of the calling EMT.
1951;
1952ALIGNCODE(16)
1953BEGINPROC SVMR0VMRun64
1954 ; Fake a cdecl stack frame
1955 %ifdef ASM_CALL64_GCC
1956 push r8 ;pVCpu
1957 push rcx ;pVM
1958 push rdx ;pCtx
1959 push rsi ;HCPhysVmcb
1960 push rdi ;HCPhysVmcbHost
1961 %else
1962 mov rax, [rsp + 28h]
1963 push rax ; rbp + 30h pVCpu
1964 push r9 ; rbp + 28h pVM
1965 push r8 ; rbp + 20h pCtx
1966 push rdx ; rbp + 18h HCPhysVmcb
1967 push rcx ; rbp + 10h HCPhysVmcbHost
1968 %endif
1969 push 0 ; rbp + 08h "fake ret addr"
1970 push rbp ; rbp + 00h
1971 mov rbp, rsp
1972 pushf
1973
1974 ; Manual save and restore:
1975 ; - General purpose registers except RIP, RSP, RAX
1976 ;
1977 ; Trashed:
1978 ; - CR2 (we don't care)
1979 ; - LDTR (reset to 0)
1980 ; - DRx (presumably not changed at all)
1981 ; - DR7 (reset to 0x400)
1982
1983 ; Save all general purpose host registers.
1984 MYPUSHAD
1985
1986 ; Load pCtx into xSI.
1987 mov xSI, [rbp + xCB * 2 + RTHCPHYS_CB * 2]
1988
1989 ; Save the host XCR0 and load the guest one if necessary.
1990 mov rax, [xBP + 30h] ; pVCpu
1991 test byte [xAX + VMCPU.hm + HMCPU.fLoadSaveGuestXcr0], 1
1992 jz .xcr0_before_skip
1993
1994 xor ecx, ecx
1995 xgetbv ; save the host XCR0 on the stack.
1996 push xDX
1997 push xAX
1998
1999 mov xSI, [xBP + xCB * 2 + RTHCPHYS_CB * 2] ; pCtx
2000 mov eax, [xSI + CPUMCTX.aXcr] ; load the guest XCR0
2001 mov edx, [xSI + CPUMCTX.aXcr + 4]
2002 xor ecx, ecx ; paranoia
2003 xsetbv
2004
2005 push 0 ; indicate that we must restore XCR0 (popped into ecx, thus 0)
2006 jmp .xcr0_before_done
2007
2008.xcr0_before_skip:
2009 push 3fh ; indicate that we need not restore XCR0
2010.xcr0_before_done:
2011
2012 ; Save guest CPU-context pointer for simplifying saving of the GPRs afterwards.
2013 push rsi
2014
2015 ; Save host fs, gs, sysenter msr etc.
2016 mov rax, [rbp + xCB * 2] ; HCPhysVmcbHost (64 bits physical address; x86: take low dword only)
2017 push rax ; save for the vmload after vmrun
2018 vmsave
2019
2020 ; Fight spectre.
2021 INDIRECT_BRANCH_PREDICTION_BARRIER xSI, CPUMCTX_WSF_IBPB_ENTRY
2022
2023 ; Setup rax for VMLOAD.
2024 mov rax, [rbp + xCB * 2 + RTHCPHYS_CB] ; HCPhysVmcb (64 bits physical address; take low dword only)
2025
2026 ; Load guest general purpose registers (rax is loaded from the VMCB by VMRUN).
2027 mov rbx, qword [xSI + CPUMCTX.ebx]
2028 mov rcx, qword [xSI + CPUMCTX.ecx]
2029 mov rdx, qword [xSI + CPUMCTX.edx]
2030 mov rdi, qword [xSI + CPUMCTX.edi]
2031 mov rbp, qword [xSI + CPUMCTX.ebp]
2032 mov r8, qword [xSI + CPUMCTX.r8]
2033 mov r9, qword [xSI + CPUMCTX.r9]
2034 mov r10, qword [xSI + CPUMCTX.r10]
2035 mov r11, qword [xSI + CPUMCTX.r11]
2036 mov r12, qword [xSI + CPUMCTX.r12]
2037 mov r13, qword [xSI + CPUMCTX.r13]
2038 mov r14, qword [xSI + CPUMCTX.r14]
2039 mov r15, qword [xSI + CPUMCTX.r15]
2040 mov rsi, qword [xSI + CPUMCTX.esi]
2041
2042 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch.
2043 clgi
2044 sti
2045
2046 ; Load guest FS, GS, Sysenter MSRs etc.
2047 vmload
2048
2049 ; Run the VM.
2050 vmrun
2051
2052 ; Save guest fs, gs, sysenter msr etc.
2053 vmsave
2054
2055 ; Load host fs, gs, sysenter msr etc.
2056 pop rax ; load HCPhysVmcbHost (pushed above)
2057 vmload
2058
2059 ; Set the global interrupt flag again, but execute cli to make sure IF=0.
2060 cli
2061 stgi
2062
2063 ; Pop the context pointer (pushed above) and save the guest GPRs (sans RSP and RAX).
2064 pop rax
2065
2066 mov qword [rax + CPUMCTX.ebx], rbx
2067 mov rbx, SPECTRE_FILLER64
2068 mov qword [rax + CPUMCTX.ecx], rcx
2069 mov rcx, rbx
2070 mov qword [rax + CPUMCTX.edx], rdx
2071 mov rdx, rbx
2072 mov qword [rax + CPUMCTX.esi], rsi
2073 mov rsi, rbx
2074 mov qword [rax + CPUMCTX.edi], rdi
2075 mov rdi, rbx
2076 mov qword [rax + CPUMCTX.ebp], rbp
2077 mov rbp, rbx
2078 mov qword [rax + CPUMCTX.r8], r8
2079 mov r8, rbx
2080 mov qword [rax + CPUMCTX.r9], r9
2081 mov r9, rbx
2082 mov qword [rax + CPUMCTX.r10], r10
2083 mov r10, rbx
2084 mov qword [rax + CPUMCTX.r11], r11
2085 mov r11, rbx
2086 mov qword [rax + CPUMCTX.r12], r12
2087 mov r12, rbx
2088 mov qword [rax + CPUMCTX.r13], r13
2089 mov r13, rbx
2090 mov qword [rax + CPUMCTX.r14], r14
2091 mov r14, rbx
2092 mov qword [rax + CPUMCTX.r15], r15
2093 mov r15, rbx
2094
2095 ; Fight spectre. Note! Trashes rax!
2096 INDIRECT_BRANCH_PREDICTION_BARRIER rax, CPUMCTX_WSF_IBPB_EXIT
2097
2098 ; Restore the host xcr0 if necessary.
2099 pop xCX
2100 test ecx, ecx
2101 jnz .xcr0_after_skip
2102 pop xAX
2103 pop xDX
2104 xsetbv ; ecx is already zero
2105.xcr0_after_skip:
2106
2107 ; Restore host general purpose registers.
2108 MYPOPAD
2109
2110 mov eax, VINF_SUCCESS
2111
2112 popf
2113 pop rbp
2114 add rsp, 6 * xCB
2115 ret
2116ENDPROC SVMR0VMRun64
2117%endif ; RT_ARCH_AMD64
2118
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette