VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HMR0A.asm@ 78003

Last change on this file since 78003 was 77481, checked in by vboxsync, 6 years ago

VMM/IEM: Nested VMX: bugref:9180 Renamed VMCSCACHE to VMXVMCSBATCHCACHE to better reflects its nature and use. This is a generic cache that uses array/linear-lookup for VMCS fields and is solely used atm for 64-on-32 bit host.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 65.0 KB
Line 
1; $Id: HMR0A.asm 77481 2019-02-27 12:59:58Z vboxsync $
2;; @file
3; HM - Ring-0 VMX, SVM world-switch and helper routines
4;
5
6;
7; Copyright (C) 2006-2019 Oracle Corporation
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.virtualbox.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License (GPL) as published by the Free Software
13; Foundation, in version 2 as it comes in the "COPYING" file of the
14; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17
18;*********************************************************************************************************************************
19;* Header Files *
20;*********************************************************************************************************************************
21%include "VBox/asmdefs.mac"
22%include "VBox/err.mac"
23%include "VBox/vmm/hm_vmx.mac"
24%include "VBox/vmm/cpum.mac"
25%include "VBox/vmm/vm.mac"
26%include "iprt/x86.mac"
27%include "HMInternal.mac"
28
29%ifdef RT_OS_OS2 ;; @todo fix OMF support in yasm and kick nasm out completely.
30 %macro vmwrite 2,
31 int3
32 %endmacro
33 %define vmlaunch int3
34 %define vmresume int3
35 %define vmsave int3
36 %define vmload int3
37 %define vmrun int3
38 %define clgi int3
39 %define stgi int3
40 %macro invlpga 2,
41 int3
42 %endmacro
43%endif
44
45;*********************************************************************************************************************************
46;* Defined Constants And Macros *
47;*********************************************************************************************************************************
48;; The offset of the XMM registers in X86FXSTATE.
49; Use define because I'm too lazy to convert the struct.
50%define XMM_OFF_IN_X86FXSTATE 160
51
52;; Spectre filler for 32-bit mode.
53; Some user space address that points to a 4MB page boundrary in hope that it
54; will somehow make it less useful.
55%define SPECTRE_FILLER32 0x227fffff
56;; Spectre filler for 64-bit mode.
57; Choosen to be an invalid address (also with 5 level paging).
58%define SPECTRE_FILLER64 0x02204204207fffff
59;; Spectre filler for the current CPU mode.
60%ifdef RT_ARCH_AMD64
61 %define SPECTRE_FILLER SPECTRE_FILLER64
62%else
63 %define SPECTRE_FILLER SPECTRE_FILLER32
64%endif
65
66;;
67; Determine skipping restoring of GDTR, IDTR, TR across VMX non-root operation
68;
69%ifdef RT_ARCH_AMD64
70 %define VMX_SKIP_GDTR
71 %define VMX_SKIP_TR
72 %define VBOX_SKIP_RESTORE_SEG
73 %ifdef RT_OS_DARWIN
74 ; Load the NULL selector into DS, ES, FS and GS on 64-bit darwin so we don't
75 ; risk loading a stale LDT value or something invalid.
76 %define HM_64_BIT_USE_NULL_SEL
77 ; Darwin (Mavericks) uses IDTR limit to store the CPU Id so we need to restore it always.
78 ; See @bugref{6875}.
79 %else
80 %define VMX_SKIP_IDTR
81 %endif
82%endif
83
84;; @def MYPUSHAD
85; Macro generating an equivalent to pushad
86
87;; @def MYPOPAD
88; Macro generating an equivalent to popad
89
90;; @def MYPUSHSEGS
91; Macro saving all segment registers on the stack.
92; @param 1 full width register name
93; @param 2 16-bit register name for \a 1.
94
95;; @def MYPOPSEGS
96; Macro restoring all segment registers on the stack
97; @param 1 full width register name
98; @param 2 16-bit register name for \a 1.
99
100%ifdef ASM_CALL64_GCC
101 %macro MYPUSHAD64 0
102 push r15
103 push r14
104 push r13
105 push r12
106 push rbx
107 %endmacro
108 %macro MYPOPAD64 0
109 pop rbx
110 pop r12
111 pop r13
112 pop r14
113 pop r15
114 %endmacro
115
116%else ; ASM_CALL64_MSC
117 %macro MYPUSHAD64 0
118 push r15
119 push r14
120 push r13
121 push r12
122 push rbx
123 push rsi
124 push rdi
125 %endmacro
126 %macro MYPOPAD64 0
127 pop rdi
128 pop rsi
129 pop rbx
130 pop r12
131 pop r13
132 pop r14
133 pop r15
134 %endmacro
135%endif
136
137%ifdef VBOX_SKIP_RESTORE_SEG
138 %macro MYPUSHSEGS64 2
139 %endmacro
140
141 %macro MYPOPSEGS64 2
142 %endmacro
143%else ; !VBOX_SKIP_RESTORE_SEG
144 ; trashes, rax, rdx & rcx
145 %macro MYPUSHSEGS64 2
146 %ifndef HM_64_BIT_USE_NULL_SEL
147 mov %2, es
148 push %1
149 mov %2, ds
150 push %1
151 %endif
152
153 ; Special case for FS; Windows and Linux either don't use it or restore it when leaving kernel mode, Solaris OTOH doesn't and we must save it.
154 mov ecx, MSR_K8_FS_BASE
155 rdmsr
156 push rdx
157 push rax
158 %ifndef HM_64_BIT_USE_NULL_SEL
159 push fs
160 %endif
161
162 ; Special case for GS; OSes typically use swapgs to reset the hidden base register for GS on entry into the kernel. The same happens on exit
163 mov ecx, MSR_K8_GS_BASE
164 rdmsr
165 push rdx
166 push rax
167 %ifndef HM_64_BIT_USE_NULL_SEL
168 push gs
169 %endif
170 %endmacro
171
172 ; trashes, rax, rdx & rcx
173 %macro MYPOPSEGS64 2
174 ; Note: do not step through this code with a debugger!
175 %ifndef HM_64_BIT_USE_NULL_SEL
176 xor eax, eax
177 mov ds, ax
178 mov es, ax
179 mov fs, ax
180 mov gs, ax
181 %endif
182
183 %ifndef HM_64_BIT_USE_NULL_SEL
184 pop gs
185 %endif
186 pop rax
187 pop rdx
188 mov ecx, MSR_K8_GS_BASE
189 wrmsr
190
191 %ifndef HM_64_BIT_USE_NULL_SEL
192 pop fs
193 %endif
194 pop rax
195 pop rdx
196 mov ecx, MSR_K8_FS_BASE
197 wrmsr
198 ; Now it's safe to step again
199
200 %ifndef HM_64_BIT_USE_NULL_SEL
201 pop %1
202 mov ds, %2
203 pop %1
204 mov es, %2
205 %endif
206 %endmacro
207%endif ; VBOX_SKIP_RESTORE_SEG
208
209%macro MYPUSHAD32 0
210 pushad
211%endmacro
212%macro MYPOPAD32 0
213 popad
214%endmacro
215
216%macro MYPUSHSEGS32 2
217 push ds
218 push es
219 push fs
220 push gs
221%endmacro
222%macro MYPOPSEGS32 2
223 pop gs
224 pop fs
225 pop es
226 pop ds
227%endmacro
228
229%ifdef RT_ARCH_AMD64
230 %define MYPUSHAD MYPUSHAD64
231 %define MYPOPAD MYPOPAD64
232 %define MYPUSHSEGS MYPUSHSEGS64
233 %define MYPOPSEGS MYPOPSEGS64
234%else
235 %define MYPUSHAD MYPUSHAD32
236 %define MYPOPAD MYPOPAD32
237 %define MYPUSHSEGS MYPUSHSEGS32
238 %define MYPOPSEGS MYPOPSEGS32
239%endif
240
241;;
242; Creates an indirect branch prediction barrier on CPUs that need and supports that.
243; @clobbers eax, edx, ecx
244; @param 1 How to address CPUMCTX.
245; @param 2 Which flag to test for (CPUMCTX_WSF_IBPB_ENTRY or CPUMCTX_WSF_IBPB_EXIT)
246%macro INDIRECT_BRANCH_PREDICTION_BARRIER 2
247 test byte [%1 + CPUMCTX.fWorldSwitcher], %2
248 jz %%no_indirect_branch_barrier
249 mov ecx, MSR_IA32_PRED_CMD
250 mov eax, MSR_IA32_PRED_CMD_F_IBPB
251 xor edx, edx
252 wrmsr
253%%no_indirect_branch_barrier:
254%endmacro
255
256;;
257; Creates an indirect branch prediction and L1D barrier on CPUs that need and supports that.
258; @clobbers eax, edx, ecx
259; @param 1 How to address CPUMCTX.
260; @param 2 Which IBPB flag to test for (CPUMCTX_WSF_IBPB_ENTRY or CPUMCTX_WSF_IBPB_EXIT)
261; @param 3 Which FLUSH flag to test for (CPUMCTX_WSF_L1D_ENTRY)
262%macro INDIRECT_BRANCH_PREDICTION_AND_L1_CACHE_BARRIER 3
263 ; Only one test+jmp when disabled CPUs.
264 test byte [%1 + CPUMCTX.fWorldSwitcher], (%2 | %3)
265 jz %%no_barrier_needed
266
267 ; The eax:edx value is the same for both.
268 AssertCompile(MSR_IA32_PRED_CMD_F_IBPB == MSR_IA32_FLUSH_CMD_F_L1D)
269 mov eax, MSR_IA32_PRED_CMD_F_IBPB
270 xor edx, edx
271
272 ; Indirect branch barrier.
273 test byte [%1 + CPUMCTX.fWorldSwitcher], %2
274 jz %%no_indirect_branch_barrier
275 mov ecx, MSR_IA32_PRED_CMD
276 wrmsr
277%%no_indirect_branch_barrier:
278
279 ; Level 1 data cache flush.
280 test byte [%1 + CPUMCTX.fWorldSwitcher], %3
281 jz %%no_cache_flush_barrier
282 mov ecx, MSR_IA32_FLUSH_CMD
283 wrmsr
284%%no_cache_flush_barrier:
285
286%%no_barrier_needed:
287%endmacro
288
289
290;*********************************************************************************************************************************
291;* External Symbols *
292;*********************************************************************************************************************************
293%ifdef VBOX_WITH_KERNEL_USING_XMM
294extern NAME(CPUMIsGuestFPUStateActive)
295%endif
296
297
298BEGINCODE
299
300
301;/**
302; * Restores host-state fields.
303; *
304; * @returns VBox status code
305; * @param f32RestoreHost x86: [ebp + 08h] msc: ecx gcc: edi RestoreHost flags.
306; * @param pRestoreHost x86: [ebp + 0ch] msc: rdx gcc: rsi Pointer to the RestoreHost struct.
307; */
308ALIGNCODE(16)
309BEGINPROC VMXRestoreHostState
310%ifdef RT_ARCH_AMD64
311 %ifndef ASM_CALL64_GCC
312 ; Use GCC's input registers since we'll be needing both rcx and rdx further
313 ; down with the wrmsr instruction. Use the R10 and R11 register for saving
314 ; RDI and RSI since MSC preserve the two latter registers.
315 mov r10, rdi
316 mov r11, rsi
317 mov rdi, rcx
318 mov rsi, rdx
319 %endif
320
321 test edi, VMX_RESTORE_HOST_GDTR
322 jz .test_idtr
323 lgdt [rsi + VMXRESTOREHOST.HostGdtr]
324
325.test_idtr:
326 test edi, VMX_RESTORE_HOST_IDTR
327 jz .test_ds
328 lidt [rsi + VMXRESTOREHOST.HostIdtr]
329
330.test_ds:
331 test edi, VMX_RESTORE_HOST_SEL_DS
332 jz .test_es
333 mov ax, [rsi + VMXRESTOREHOST.uHostSelDS]
334 mov ds, eax
335
336.test_es:
337 test edi, VMX_RESTORE_HOST_SEL_ES
338 jz .test_tr
339 mov ax, [rsi + VMXRESTOREHOST.uHostSelES]
340 mov es, eax
341
342.test_tr:
343 test edi, VMX_RESTORE_HOST_SEL_TR
344 jz .test_fs
345 ; When restoring the TR, we must first clear the busy flag or we'll end up faulting.
346 mov dx, [rsi + VMXRESTOREHOST.uHostSelTR]
347 mov ax, dx
348 and eax, X86_SEL_MASK_OFF_RPL ; Mask away TI and RPL bits leaving only the descriptor offset.
349 test edi, VMX_RESTORE_HOST_GDT_READ_ONLY | VMX_RESTORE_HOST_GDT_NEED_WRITABLE
350 jnz .gdt_readonly
351 add rax, qword [rsi + VMXRESTOREHOST.HostGdtr + 2] ; xAX <- descriptor offset + GDTR.pGdt.
352 and dword [rax + 4], ~RT_BIT(9) ; Clear the busy flag in TSS desc (bits 0-7=base, bit 9=busy bit).
353 ltr dx
354 jmp short .test_fs
355.gdt_readonly:
356 test edi, VMX_RESTORE_HOST_GDT_NEED_WRITABLE
357 jnz .gdt_readonly_need_writable
358 mov rcx, cr0
359 mov r9, rcx
360 add rax, qword [rsi + VMXRESTOREHOST.HostGdtr + 2] ; xAX <- descriptor offset + GDTR.pGdt.
361 and rcx, ~X86_CR0_WP
362 mov cr0, rcx
363 and dword [rax + 4], ~RT_BIT(9) ; Clear the busy flag in TSS desc (bits 0-7=base, bit 9=busy bit).
364 ltr dx
365 mov cr0, r9
366 jmp short .test_fs
367.gdt_readonly_need_writable:
368 add rax, qword [rsi + VMXRESTOREHOST.HostGdtrRw + 2] ; xAX <- descriptor offset + GDTR.pGdtRw.
369 and dword [rax + 4], ~RT_BIT(9) ; Clear the busy flag in TSS desc (bits 0-7=base, bit 9=busy bit).
370 lgdt [rsi + VMXRESTOREHOST.HostGdtrRw]
371 ltr dx
372 lgdt [rsi + VMXRESTOREHOST.HostGdtr] ; Load the original GDT
373
374.test_fs:
375 ;
376 ; When restoring the selector values for FS and GS, we'll temporarily trash
377 ; the base address (at least the high 32-bit bits, but quite possibly the
378 ; whole base address), the wrmsr will restore it correctly. (VT-x actually
379 ; restores the base correctly when leaving guest mode, but not the selector
380 ; value, so there is little problem with interrupts being enabled prior to
381 ; this restore job.)
382 ; We'll disable ints once for both FS and GS as that's probably faster.
383 ;
384 test edi, VMX_RESTORE_HOST_SEL_FS | VMX_RESTORE_HOST_SEL_GS
385 jz .restore_success
386 pushfq
387 cli ; (see above)
388
389 test edi, VMX_RESTORE_HOST_SEL_FS
390 jz .test_gs
391 mov ax, word [rsi + VMXRESTOREHOST.uHostSelFS]
392 mov fs, eax
393 mov eax, dword [rsi + VMXRESTOREHOST.uHostFSBase] ; uHostFSBase - Lo
394 mov edx, dword [rsi + VMXRESTOREHOST.uHostFSBase + 4h] ; uHostFSBase - Hi
395 mov ecx, MSR_K8_FS_BASE
396 wrmsr
397
398.test_gs:
399 test edi, VMX_RESTORE_HOST_SEL_GS
400 jz .restore_flags
401 mov ax, word [rsi + VMXRESTOREHOST.uHostSelGS]
402 mov gs, eax
403 mov eax, dword [rsi + VMXRESTOREHOST.uHostGSBase] ; uHostGSBase - Lo
404 mov edx, dword [rsi + VMXRESTOREHOST.uHostGSBase + 4h] ; uHostGSBase - Hi
405 mov ecx, MSR_K8_GS_BASE
406 wrmsr
407
408.restore_flags:
409 popfq
410
411.restore_success:
412 mov eax, VINF_SUCCESS
413 %ifndef ASM_CALL64_GCC
414 ; Restore RDI and RSI on MSC.
415 mov rdi, r10
416 mov rsi, r11
417 %endif
418%else ; RT_ARCH_X86
419 mov eax, VERR_NOT_IMPLEMENTED
420%endif
421 ret
422ENDPROC VMXRestoreHostState
423
424
425;/**
426; * Dispatches an NMI to the host.
427; */
428ALIGNCODE(16)
429BEGINPROC VMXDispatchHostNmi
430 int 2 ; NMI is always vector 2. The IDT[2] IRQ handler cannot be anything else. See Intel spec. 6.3.1 "External Interrupts".
431 ret
432ENDPROC VMXDispatchHostNmi
433
434
435;/**
436; * Executes VMWRITE, 64-bit value.
437; *
438; * @returns VBox status code.
439; * @param idxField x86: [ebp + 08h] msc: rcx gcc: rdi VMCS index.
440; * @param u64Data x86: [ebp + 0ch] msc: rdx gcc: rsi VM field value.
441; */
442ALIGNCODE(16)
443BEGINPROC VMXWriteVmcs64
444%ifdef RT_ARCH_AMD64
445 %ifdef ASM_CALL64_GCC
446 and edi, 0ffffffffh
447 xor rax, rax
448 vmwrite rdi, rsi
449 %else
450 and ecx, 0ffffffffh
451 xor rax, rax
452 vmwrite rcx, rdx
453 %endif
454%else ; RT_ARCH_X86
455 mov ecx, [esp + 4] ; idxField
456 lea edx, [esp + 8] ; &u64Data
457 vmwrite ecx, [edx] ; low dword
458 jz .done
459 jc .done
460 inc ecx
461 xor eax, eax
462 vmwrite ecx, [edx + 4] ; high dword
463.done:
464%endif ; RT_ARCH_X86
465 jnc .valid_vmcs
466 mov eax, VERR_VMX_INVALID_VMCS_PTR
467 ret
468.valid_vmcs:
469 jnz .the_end
470 mov eax, VERR_VMX_INVALID_VMCS_FIELD
471.the_end:
472 ret
473ENDPROC VMXWriteVmcs64
474
475
476;/**
477; * Executes VMREAD, 64-bit value.
478; *
479; * @returns VBox status code.
480; * @param idxField VMCS index.
481; * @param pData Where to store VM field value.
482; */
483;DECLASM(int) VMXReadVmcs64(uint32_t idxField, uint64_t *pData);
484ALIGNCODE(16)
485BEGINPROC VMXReadVmcs64
486%ifdef RT_ARCH_AMD64
487 %ifdef ASM_CALL64_GCC
488 and edi, 0ffffffffh
489 xor rax, rax
490 vmread [rsi], rdi
491 %else
492 and ecx, 0ffffffffh
493 xor rax, rax
494 vmread [rdx], rcx
495 %endif
496%else ; RT_ARCH_X86
497 mov ecx, [esp + 4] ; idxField
498 mov edx, [esp + 8] ; pData
499 vmread [edx], ecx ; low dword
500 jz .done
501 jc .done
502 inc ecx
503 xor eax, eax
504 vmread [edx + 4], ecx ; high dword
505.done:
506%endif ; RT_ARCH_X86
507 jnc .valid_vmcs
508 mov eax, VERR_VMX_INVALID_VMCS_PTR
509 ret
510.valid_vmcs:
511 jnz .the_end
512 mov eax, VERR_VMX_INVALID_VMCS_FIELD
513.the_end:
514 ret
515ENDPROC VMXReadVmcs64
516
517
518;/**
519; * Executes VMREAD, 32-bit value.
520; *
521; * @returns VBox status code.
522; * @param idxField VMCS index.
523; * @param pu32Data Where to store VM field value.
524; */
525;DECLASM(int) VMXReadVmcs32(uint32_t idxField, uint32_t *pu32Data);
526ALIGNCODE(16)
527BEGINPROC VMXReadVmcs32
528%ifdef RT_ARCH_AMD64
529 %ifdef ASM_CALL64_GCC
530 and edi, 0ffffffffh
531 xor rax, rax
532 vmread r10, rdi
533 mov [rsi], r10d
534 %else
535 and ecx, 0ffffffffh
536 xor rax, rax
537 vmread r10, rcx
538 mov [rdx], r10d
539 %endif
540%else ; RT_ARCH_X86
541 mov ecx, [esp + 4] ; idxField
542 mov edx, [esp + 8] ; pu32Data
543 xor eax, eax
544 vmread [edx], ecx
545%endif ; RT_ARCH_X86
546 jnc .valid_vmcs
547 mov eax, VERR_VMX_INVALID_VMCS_PTR
548 ret
549.valid_vmcs:
550 jnz .the_end
551 mov eax, VERR_VMX_INVALID_VMCS_FIELD
552.the_end:
553 ret
554ENDPROC VMXReadVmcs32
555
556
557;/**
558; * Executes VMWRITE, 32-bit value.
559; *
560; * @returns VBox status code.
561; * @param idxField VMCS index.
562; * @param u32Data Where to store VM field value.
563; */
564;DECLASM(int) VMXWriteVmcs32(uint32_t idxField, uint32_t u32Data);
565ALIGNCODE(16)
566BEGINPROC VMXWriteVmcs32
567%ifdef RT_ARCH_AMD64
568 %ifdef ASM_CALL64_GCC
569 and edi, 0ffffffffh
570 and esi, 0ffffffffh
571 xor rax, rax
572 vmwrite rdi, rsi
573 %else
574 and ecx, 0ffffffffh
575 and edx, 0ffffffffh
576 xor rax, rax
577 vmwrite rcx, rdx
578 %endif
579%else ; RT_ARCH_X86
580 mov ecx, [esp + 4] ; idxField
581 mov edx, [esp + 8] ; u32Data
582 xor eax, eax
583 vmwrite ecx, edx
584%endif ; RT_ARCH_X86
585 jnc .valid_vmcs
586 mov eax, VERR_VMX_INVALID_VMCS_PTR
587 ret
588.valid_vmcs:
589 jnz .the_end
590 mov eax, VERR_VMX_INVALID_VMCS_FIELD
591.the_end:
592 ret
593ENDPROC VMXWriteVmcs32
594
595
596;/**
597; * Executes VMXON.
598; *
599; * @returns VBox status code.
600; * @param HCPhysVMXOn Physical address of VMXON structure.
601; */
602;DECLASM(int) VMXEnable(RTHCPHYS HCPhysVMXOn);
603BEGINPROC VMXEnable
604%ifdef RT_ARCH_AMD64
605 xor rax, rax
606 %ifdef ASM_CALL64_GCC
607 push rdi
608 %else
609 push rcx
610 %endif
611 vmxon [rsp]
612%else ; RT_ARCH_X86
613 xor eax, eax
614 vmxon [esp + 4]
615%endif ; RT_ARCH_X86
616 jnc .good
617 mov eax, VERR_VMX_INVALID_VMXON_PTR
618 jmp .the_end
619
620.good:
621 jnz .the_end
622 mov eax, VERR_VMX_VMXON_FAILED
623
624.the_end:
625%ifdef RT_ARCH_AMD64
626 add rsp, 8
627%endif
628 ret
629ENDPROC VMXEnable
630
631
632;/**
633; * Executes VMXOFF.
634; */
635;DECLASM(void) VMXDisable(void);
636BEGINPROC VMXDisable
637 vmxoff
638.the_end:
639 ret
640ENDPROC VMXDisable
641
642
643;/**
644; * Executes VMCLEAR.
645; *
646; * @returns VBox status code.
647; * @param HCPhysVmcs Physical address of VM control structure.
648; */
649;DECLASM(int) VMXClearVmcs(RTHCPHYS HCPhysVmcs);
650ALIGNCODE(16)
651BEGINPROC VMXClearVmcs
652%ifdef RT_ARCH_AMD64
653 xor rax, rax
654 %ifdef ASM_CALL64_GCC
655 push rdi
656 %else
657 push rcx
658 %endif
659 vmclear [rsp]
660%else ; RT_ARCH_X86
661 xor eax, eax
662 vmclear [esp + 4]
663%endif ; RT_ARCH_X86
664 jnc .the_end
665 mov eax, VERR_VMX_INVALID_VMCS_PTR
666.the_end:
667%ifdef RT_ARCH_AMD64
668 add rsp, 8
669%endif
670 ret
671ENDPROC VMXClearVmcs
672
673
674;/**
675; * Executes VMPTRLD.
676; *
677; * @returns VBox status code.
678; * @param HCPhysVmcs Physical address of VMCS structure.
679; */
680;DECLASM(int) VMXActivateVmcs(RTHCPHYS HCPhysVmcs);
681ALIGNCODE(16)
682BEGINPROC VMXActivateVmcs
683%ifdef RT_ARCH_AMD64
684 xor rax, rax
685 %ifdef ASM_CALL64_GCC
686 push rdi
687 %else
688 push rcx
689 %endif
690 vmptrld [rsp]
691%else
692 xor eax, eax
693 vmptrld [esp + 4]
694%endif
695 jnc .the_end
696 mov eax, VERR_VMX_INVALID_VMCS_PTR
697.the_end:
698%ifdef RT_ARCH_AMD64
699 add rsp, 8
700%endif
701 ret
702ENDPROC VMXActivateVmcs
703
704
705;/**
706; * Executes VMPTRST.
707; *
708; * @returns VBox status code.
709; * @param [esp + 04h] gcc:rdi msc:rcx Param 1 - First parameter - Address that will receive the current pointer.
710; */
711;DECLASM(int) VMXGetActivatedVmcs(RTHCPHYS *pVMCS);
712BEGINPROC VMXGetActivatedVmcs
713%ifdef RT_OS_OS2
714 mov eax, VERR_NOT_SUPPORTED
715 ret
716%else
717 %ifdef RT_ARCH_AMD64
718 %ifdef ASM_CALL64_GCC
719 vmptrst qword [rdi]
720 %else
721 vmptrst qword [rcx]
722 %endif
723 %else
724 vmptrst qword [esp+04h]
725 %endif
726 xor eax, eax
727.the_end:
728 ret
729%endif
730ENDPROC VMXGetActivatedVmcs
731
732;/**
733; * Invalidate a page using INVEPT.
734; @param enmTlbFlush msc:ecx gcc:edi x86:[esp+04] Type of flush.
735; @param pDescriptor msc:edx gcc:esi x86:[esp+08] Descriptor pointer.
736; */
737;DECLASM(int) VMXR0InvEPT(VMXTLBFLUSHEPT enmTlbFlush, uint64_t *pDescriptor);
738BEGINPROC VMXR0InvEPT
739%ifdef RT_ARCH_AMD64
740 %ifdef ASM_CALL64_GCC
741 and edi, 0ffffffffh
742 xor rax, rax
743; invept rdi, qword [rsi]
744 DB 0x66, 0x0F, 0x38, 0x80, 0x3E
745 %else
746 and ecx, 0ffffffffh
747 xor rax, rax
748; invept rcx, qword [rdx]
749 DB 0x66, 0x0F, 0x38, 0x80, 0xA
750 %endif
751%else
752 mov ecx, [esp + 4]
753 mov edx, [esp + 8]
754 xor eax, eax
755; invept ecx, qword [edx]
756 DB 0x66, 0x0F, 0x38, 0x80, 0xA
757%endif
758 jnc .valid_vmcs
759 mov eax, VERR_VMX_INVALID_VMCS_PTR
760 ret
761.valid_vmcs:
762 jnz .the_end
763 mov eax, VERR_INVALID_PARAMETER
764.the_end:
765 ret
766ENDPROC VMXR0InvEPT
767
768
769;/**
770; * Invalidate a page using invvpid
771; @param enmTlbFlush msc:ecx gcc:edi x86:[esp+04] Type of flush
772; @param pDescriptor msc:edx gcc:esi x86:[esp+08] Descriptor pointer
773; */
774;DECLASM(int) VMXR0InvVPID(VMXTLBFLUSHVPID enmTlbFlush, uint64_t *pDescriptor);
775BEGINPROC VMXR0InvVPID
776%ifdef RT_ARCH_AMD64
777 %ifdef ASM_CALL64_GCC
778 and edi, 0ffffffffh
779 xor rax, rax
780; invvpid rdi, qword [rsi]
781 DB 0x66, 0x0F, 0x38, 0x81, 0x3E
782 %else
783 and ecx, 0ffffffffh
784 xor rax, rax
785; invvpid rcx, qword [rdx]
786 DB 0x66, 0x0F, 0x38, 0x81, 0xA
787 %endif
788%else
789 mov ecx, [esp + 4]
790 mov edx, [esp + 8]
791 xor eax, eax
792; invvpid ecx, qword [edx]
793 DB 0x66, 0x0F, 0x38, 0x81, 0xA
794%endif
795 jnc .valid_vmcs
796 mov eax, VERR_VMX_INVALID_VMCS_PTR
797 ret
798.valid_vmcs:
799 jnz .the_end
800 mov eax, VERR_INVALID_PARAMETER
801.the_end:
802 ret
803ENDPROC VMXR0InvVPID
804
805
806%if GC_ARCH_BITS == 64
807;;
808; Executes INVLPGA
809;
810; @param pPageGC msc:rcx gcc:rdi x86:[esp+04] Virtual page to invalidate
811; @param uASID msc:rdx gcc:rsi x86:[esp+0C] Tagged TLB id
812;
813;DECLASM(void) SVMR0InvlpgA(RTGCPTR pPageGC, uint32_t uASID);
814BEGINPROC SVMR0InvlpgA
815%ifdef RT_ARCH_AMD64
816 %ifdef ASM_CALL64_GCC
817 mov rax, rdi
818 mov rcx, rsi
819 %else
820 mov rax, rcx
821 mov rcx, rdx
822 %endif
823%else
824 mov eax, [esp + 4]
825 mov ecx, [esp + 0Ch]
826%endif
827 invlpga [xAX], ecx
828 ret
829ENDPROC SVMR0InvlpgA
830
831%else ; GC_ARCH_BITS != 64
832;;
833; Executes INVLPGA
834;
835; @param pPageGC msc:ecx gcc:edi x86:[esp+04] Virtual page to invalidate
836; @param uASID msc:edx gcc:esi x86:[esp+08] Tagged TLB id
837;
838;DECLASM(void) SVMR0InvlpgA(RTGCPTR pPageGC, uint32_t uASID);
839BEGINPROC SVMR0InvlpgA
840%ifdef RT_ARCH_AMD64
841 %ifdef ASM_CALL64_GCC
842 movzx rax, edi
843 mov ecx, esi
844 %else
845 ; from http://www.cs.cmu.edu/~fp/courses/15213-s06/misc/asm64-handout.pdf:
846 ; ``Perhaps unexpectedly, instructions that move or generate 32-bit register
847 ; values also set the upper 32 bits of the register to zero. Consequently
848 ; there is no need for an instruction movzlq.''
849 mov eax, ecx
850 mov ecx, edx
851 %endif
852%else
853 mov eax, [esp + 4]
854 mov ecx, [esp + 8]
855%endif
856 invlpga [xAX], ecx
857 ret
858ENDPROC SVMR0InvlpgA
859
860%endif ; GC_ARCH_BITS != 64
861
862
863%ifdef VBOX_WITH_KERNEL_USING_XMM
864
865;;
866; Wrapper around vmx.pfnStartVM that preserves host XMM registers and
867; load the guest ones when necessary.
868;
869; @cproto DECLASM(int) HMR0VMXStartVMhmR0DumpDescriptorM(RTHCUINT fResume, PCPUMCTX pCtx, PVMXVMCSBATCHCACHE pCache,
870; PVM pVM, PVMCPU pVCpu, PFNHMVMXSTARTVM pfnStartVM);
871;
872; @returns eax
873;
874; @param fResumeVM msc:rcx
875; @param pCtx msc:rdx
876; @param pVmcsCache msc:r8
877; @param pVM msc:r9
878; @param pVCpu msc:[rbp+30h] The cross context virtual CPU structure of the calling EMT.
879; @param pfnStartVM msc:[rbp+38h]
880;
881; @remarks This is essentially the same code as hmR0SVMRunWrapXMM, only the parameters differ a little bit.
882;
883; @remarks Drivers shouldn't use AVX registers without saving+loading:
884; https://msdn.microsoft.com/en-us/library/windows/hardware/ff545910%28v=vs.85%29.aspx?f=255&MSPPError=-2147217396
885; However the compiler docs have different idea:
886; https://msdn.microsoft.com/en-us/library/9z1stfyw.aspx
887; We'll go with the former for now.
888;
889; ASSUMING 64-bit and windows for now.
890;
891ALIGNCODE(16)
892BEGINPROC hmR0VMXStartVMWrapXMM
893 push xBP
894 mov xBP, xSP
895 sub xSP, 0b0h + 040h ; Don't bother optimizing the frame size.
896
897 ; spill input parameters.
898 mov [xBP + 010h], rcx ; fResumeVM
899 mov [xBP + 018h], rdx ; pCtx
900 mov [xBP + 020h], r8 ; pVmcsCache
901 mov [xBP + 028h], r9 ; pVM
902
903 ; Ask CPUM whether we've started using the FPU yet.
904 mov rcx, [xBP + 30h] ; pVCpu
905 call NAME(CPUMIsGuestFPUStateActive)
906 test al, al
907 jnz .guest_fpu_state_active
908
909 ; No need to mess with XMM registers just call the start routine and return.
910 mov r11, [xBP + 38h] ; pfnStartVM
911 mov r10, [xBP + 30h] ; pVCpu
912 mov [xSP + 020h], r10
913 mov rcx, [xBP + 010h] ; fResumeVM
914 mov rdx, [xBP + 018h] ; pCtx
915 mov r8, [xBP + 020h] ; pVmcsCache
916 mov r9, [xBP + 028h] ; pVM
917 call r11
918
919 leave
920 ret
921
922ALIGNCODE(8)
923.guest_fpu_state_active:
924 ; Save the non-volatile host XMM registers.
925 movdqa [rsp + 040h + 000h], xmm6
926 movdqa [rsp + 040h + 010h], xmm7
927 movdqa [rsp + 040h + 020h], xmm8
928 movdqa [rsp + 040h + 030h], xmm9
929 movdqa [rsp + 040h + 040h], xmm10
930 movdqa [rsp + 040h + 050h], xmm11
931 movdqa [rsp + 040h + 060h], xmm12
932 movdqa [rsp + 040h + 070h], xmm13
933 movdqa [rsp + 040h + 080h], xmm14
934 movdqa [rsp + 040h + 090h], xmm15
935 stmxcsr [rsp + 040h + 0a0h]
936
937 mov r10, [xBP + 018h] ; pCtx
938 mov eax, [r10 + CPUMCTX.fXStateMask]
939 test eax, eax
940 jz .guest_fpu_state_manually
941
942 ;
943 ; Using XSAVE to load the guest XMM, YMM and ZMM registers.
944 ;
945 and eax, CPUM_VOLATILE_XSAVE_GUEST_COMPONENTS
946 xor edx, edx
947 mov r10, [r10 + CPUMCTX.pXStateR0]
948 xrstor [r10]
949
950 ; Make the call (same as in the other case ).
951 mov r11, [xBP + 38h] ; pfnStartVM
952 mov r10, [xBP + 30h] ; pVCpu
953 mov [xSP + 020h], r10
954 mov rcx, [xBP + 010h] ; fResumeVM
955 mov rdx, [xBP + 018h] ; pCtx
956 mov r8, [xBP + 020h] ; pVmcsCache
957 mov r9, [xBP + 028h] ; pVM
958 call r11
959
960 mov r11d, eax ; save return value (xsave below uses eax)
961
962 ; Save the guest XMM registers.
963 mov r10, [xBP + 018h] ; pCtx
964 mov eax, [r10 + CPUMCTX.fXStateMask]
965 and eax, CPUM_VOLATILE_XSAVE_GUEST_COMPONENTS
966 xor edx, edx
967 mov r10, [r10 + CPUMCTX.pXStateR0]
968 xsave [r10]
969
970 mov eax, r11d ; restore return value.
971
972.restore_non_volatile_host_xmm_regs:
973 ; Load the non-volatile host XMM registers.
974 movdqa xmm6, [rsp + 040h + 000h]
975 movdqa xmm7, [rsp + 040h + 010h]
976 movdqa xmm8, [rsp + 040h + 020h]
977 movdqa xmm9, [rsp + 040h + 030h]
978 movdqa xmm10, [rsp + 040h + 040h]
979 movdqa xmm11, [rsp + 040h + 050h]
980 movdqa xmm12, [rsp + 040h + 060h]
981 movdqa xmm13, [rsp + 040h + 070h]
982 movdqa xmm14, [rsp + 040h + 080h]
983 movdqa xmm15, [rsp + 040h + 090h]
984 ldmxcsr [rsp + 040h + 0a0h]
985 leave
986 ret
987
988 ;
989 ; No XSAVE, load and save the guest XMM registers manually.
990 ;
991.guest_fpu_state_manually:
992 ; Load the full guest XMM register state.
993 mov r10, [r10 + CPUMCTX.pXStateR0]
994 movdqa xmm0, [r10 + XMM_OFF_IN_X86FXSTATE + 000h]
995 movdqa xmm1, [r10 + XMM_OFF_IN_X86FXSTATE + 010h]
996 movdqa xmm2, [r10 + XMM_OFF_IN_X86FXSTATE + 020h]
997 movdqa xmm3, [r10 + XMM_OFF_IN_X86FXSTATE + 030h]
998 movdqa xmm4, [r10 + XMM_OFF_IN_X86FXSTATE + 040h]
999 movdqa xmm5, [r10 + XMM_OFF_IN_X86FXSTATE + 050h]
1000 movdqa xmm6, [r10 + XMM_OFF_IN_X86FXSTATE + 060h]
1001 movdqa xmm7, [r10 + XMM_OFF_IN_X86FXSTATE + 070h]
1002 movdqa xmm8, [r10 + XMM_OFF_IN_X86FXSTATE + 080h]
1003 movdqa xmm9, [r10 + XMM_OFF_IN_X86FXSTATE + 090h]
1004 movdqa xmm10, [r10 + XMM_OFF_IN_X86FXSTATE + 0a0h]
1005 movdqa xmm11, [r10 + XMM_OFF_IN_X86FXSTATE + 0b0h]
1006 movdqa xmm12, [r10 + XMM_OFF_IN_X86FXSTATE + 0c0h]
1007 movdqa xmm13, [r10 + XMM_OFF_IN_X86FXSTATE + 0d0h]
1008 movdqa xmm14, [r10 + XMM_OFF_IN_X86FXSTATE + 0e0h]
1009 movdqa xmm15, [r10 + XMM_OFF_IN_X86FXSTATE + 0f0h]
1010 ldmxcsr [r10 + X86FXSTATE.MXCSR]
1011
1012 ; Make the call (same as in the other case ).
1013 mov r11, [xBP + 38h] ; pfnStartVM
1014 mov r10, [xBP + 30h] ; pVCpu
1015 mov [xSP + 020h], r10
1016 mov rcx, [xBP + 010h] ; fResumeVM
1017 mov rdx, [xBP + 018h] ; pCtx
1018 mov r8, [xBP + 020h] ; pVmcsCache
1019 mov r9, [xBP + 028h] ; pVM
1020 call r11
1021
1022 ; Save the guest XMM registers.
1023 mov r10, [xBP + 018h] ; pCtx
1024 mov r10, [r10 + CPUMCTX.pXStateR0]
1025 stmxcsr [r10 + X86FXSTATE.MXCSR]
1026 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 000h], xmm0
1027 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 010h], xmm1
1028 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 020h], xmm2
1029 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 030h], xmm3
1030 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 040h], xmm4
1031 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 050h], xmm5
1032 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 060h], xmm6
1033 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 070h], xmm7
1034 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 080h], xmm8
1035 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 090h], xmm9
1036 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0a0h], xmm10
1037 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0b0h], xmm11
1038 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0c0h], xmm12
1039 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0d0h], xmm13
1040 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0e0h], xmm14
1041 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0f0h], xmm15
1042 jmp .restore_non_volatile_host_xmm_regs
1043ENDPROC hmR0VMXStartVMWrapXMM
1044
1045;;
1046; Wrapper around svm.pfnVMRun that preserves host XMM registers and
1047; load the guest ones when necessary.
1048;
1049; @cproto DECLASM(int) hmR0SVMRunWrapXMM(RTHCPHYS HCPhysVmcbHost, RTHCPHYS HCPhysVmcb, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu,
1050; PFNHMSVMVMRUN pfnVMRun);
1051;
1052; @returns eax
1053;
1054; @param HCPhysVmcbHost msc:rcx
1055; @param HCPhysVmcb msc:rdx
1056; @param pCtx msc:r8
1057; @param pVM msc:r9
1058; @param pVCpu msc:[rbp+30h] The cross context virtual CPU structure of the calling EMT.
1059; @param pfnVMRun msc:[rbp+38h]
1060;
1061; @remarks This is essentially the same code as hmR0VMXStartVMWrapXMM, only the parameters differ a little bit.
1062;
1063; @remarks Drivers shouldn't use AVX registers without saving+loading:
1064; https://msdn.microsoft.com/en-us/library/windows/hardware/ff545910%28v=vs.85%29.aspx?f=255&MSPPError=-2147217396
1065; However the compiler docs have different idea:
1066; https://msdn.microsoft.com/en-us/library/9z1stfyw.aspx
1067; We'll go with the former for now.
1068;
1069; ASSUMING 64-bit and windows for now.
1070ALIGNCODE(16)
1071BEGINPROC hmR0SVMRunWrapXMM
1072 push xBP
1073 mov xBP, xSP
1074 sub xSP, 0b0h + 040h ; Don't bother optimizing the frame size.
1075
1076 ; spill input parameters.
1077 mov [xBP + 010h], rcx ; HCPhysVmcbHost
1078 mov [xBP + 018h], rdx ; HCPhysVmcb
1079 mov [xBP + 020h], r8 ; pCtx
1080 mov [xBP + 028h], r9 ; pVM
1081
1082 ; Ask CPUM whether we've started using the FPU yet.
1083 mov rcx, [xBP + 30h] ; pVCpu
1084 call NAME(CPUMIsGuestFPUStateActive)
1085 test al, al
1086 jnz .guest_fpu_state_active
1087
1088 ; No need to mess with XMM registers just call the start routine and return.
1089 mov r11, [xBP + 38h] ; pfnVMRun
1090 mov r10, [xBP + 30h] ; pVCpu
1091 mov [xSP + 020h], r10
1092 mov rcx, [xBP + 010h] ; HCPhysVmcbHost
1093 mov rdx, [xBP + 018h] ; HCPhysVmcb
1094 mov r8, [xBP + 020h] ; pCtx
1095 mov r9, [xBP + 028h] ; pVM
1096 call r11
1097
1098 leave
1099 ret
1100
1101ALIGNCODE(8)
1102.guest_fpu_state_active:
1103 ; Save the non-volatile host XMM registers.
1104 movdqa [rsp + 040h + 000h], xmm6
1105 movdqa [rsp + 040h + 010h], xmm7
1106 movdqa [rsp + 040h + 020h], xmm8
1107 movdqa [rsp + 040h + 030h], xmm9
1108 movdqa [rsp + 040h + 040h], xmm10
1109 movdqa [rsp + 040h + 050h], xmm11
1110 movdqa [rsp + 040h + 060h], xmm12
1111 movdqa [rsp + 040h + 070h], xmm13
1112 movdqa [rsp + 040h + 080h], xmm14
1113 movdqa [rsp + 040h + 090h], xmm15
1114 stmxcsr [rsp + 040h + 0a0h]
1115
1116 mov r10, [xBP + 020h] ; pCtx
1117 mov eax, [r10 + CPUMCTX.fXStateMask]
1118 test eax, eax
1119 jz .guest_fpu_state_manually
1120
1121 ;
1122 ; Using XSAVE.
1123 ;
1124 and eax, CPUM_VOLATILE_XSAVE_GUEST_COMPONENTS
1125 xor edx, edx
1126 mov r10, [r10 + CPUMCTX.pXStateR0]
1127 xrstor [r10]
1128
1129 ; Make the call (same as in the other case ).
1130 mov r11, [xBP + 38h] ; pfnVMRun
1131 mov r10, [xBP + 30h] ; pVCpu
1132 mov [xSP + 020h], r10
1133 mov rcx, [xBP + 010h] ; HCPhysVmcbHost
1134 mov rdx, [xBP + 018h] ; HCPhysVmcb
1135 mov r8, [xBP + 020h] ; pCtx
1136 mov r9, [xBP + 028h] ; pVM
1137 call r11
1138
1139 mov r11d, eax ; save return value (xsave below uses eax)
1140
1141 ; Save the guest XMM registers.
1142 mov r10, [xBP + 020h] ; pCtx
1143 mov eax, [r10 + CPUMCTX.fXStateMask]
1144 and eax, CPUM_VOLATILE_XSAVE_GUEST_COMPONENTS
1145 xor edx, edx
1146 mov r10, [r10 + CPUMCTX.pXStateR0]
1147 xsave [r10]
1148
1149 mov eax, r11d ; restore return value.
1150
1151.restore_non_volatile_host_xmm_regs:
1152 ; Load the non-volatile host XMM registers.
1153 movdqa xmm6, [rsp + 040h + 000h]
1154 movdqa xmm7, [rsp + 040h + 010h]
1155 movdqa xmm8, [rsp + 040h + 020h]
1156 movdqa xmm9, [rsp + 040h + 030h]
1157 movdqa xmm10, [rsp + 040h + 040h]
1158 movdqa xmm11, [rsp + 040h + 050h]
1159 movdqa xmm12, [rsp + 040h + 060h]
1160 movdqa xmm13, [rsp + 040h + 070h]
1161 movdqa xmm14, [rsp + 040h + 080h]
1162 movdqa xmm15, [rsp + 040h + 090h]
1163 ldmxcsr [rsp + 040h + 0a0h]
1164 leave
1165 ret
1166
1167 ;
1168 ; No XSAVE, load and save the guest XMM registers manually.
1169 ;
1170.guest_fpu_state_manually:
1171 ; Load the full guest XMM register state.
1172 mov r10, [r10 + CPUMCTX.pXStateR0]
1173 movdqa xmm0, [r10 + XMM_OFF_IN_X86FXSTATE + 000h]
1174 movdqa xmm1, [r10 + XMM_OFF_IN_X86FXSTATE + 010h]
1175 movdqa xmm2, [r10 + XMM_OFF_IN_X86FXSTATE + 020h]
1176 movdqa xmm3, [r10 + XMM_OFF_IN_X86FXSTATE + 030h]
1177 movdqa xmm4, [r10 + XMM_OFF_IN_X86FXSTATE + 040h]
1178 movdqa xmm5, [r10 + XMM_OFF_IN_X86FXSTATE + 050h]
1179 movdqa xmm6, [r10 + XMM_OFF_IN_X86FXSTATE + 060h]
1180 movdqa xmm7, [r10 + XMM_OFF_IN_X86FXSTATE + 070h]
1181 movdqa xmm8, [r10 + XMM_OFF_IN_X86FXSTATE + 080h]
1182 movdqa xmm9, [r10 + XMM_OFF_IN_X86FXSTATE + 090h]
1183 movdqa xmm10, [r10 + XMM_OFF_IN_X86FXSTATE + 0a0h]
1184 movdqa xmm11, [r10 + XMM_OFF_IN_X86FXSTATE + 0b0h]
1185 movdqa xmm12, [r10 + XMM_OFF_IN_X86FXSTATE + 0c0h]
1186 movdqa xmm13, [r10 + XMM_OFF_IN_X86FXSTATE + 0d0h]
1187 movdqa xmm14, [r10 + XMM_OFF_IN_X86FXSTATE + 0e0h]
1188 movdqa xmm15, [r10 + XMM_OFF_IN_X86FXSTATE + 0f0h]
1189 ldmxcsr [r10 + X86FXSTATE.MXCSR]
1190
1191 ; Make the call (same as in the other case ).
1192 mov r11, [xBP + 38h] ; pfnVMRun
1193 mov r10, [xBP + 30h] ; pVCpu
1194 mov [xSP + 020h], r10
1195 mov rcx, [xBP + 010h] ; HCPhysVmcbHost
1196 mov rdx, [xBP + 018h] ; HCPhysVmcb
1197 mov r8, [xBP + 020h] ; pCtx
1198 mov r9, [xBP + 028h] ; pVM
1199 call r11
1200
1201 ; Save the guest XMM registers.
1202 mov r10, [xBP + 020h] ; pCtx
1203 mov r10, [r10 + CPUMCTX.pXStateR0]
1204 stmxcsr [r10 + X86FXSTATE.MXCSR]
1205 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 000h], xmm0
1206 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 010h], xmm1
1207 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 020h], xmm2
1208 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 030h], xmm3
1209 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 040h], xmm4
1210 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 050h], xmm5
1211 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 060h], xmm6
1212 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 070h], xmm7
1213 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 080h], xmm8
1214 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 090h], xmm9
1215 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0a0h], xmm10
1216 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0b0h], xmm11
1217 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0c0h], xmm12
1218 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0d0h], xmm13
1219 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0e0h], xmm14
1220 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0f0h], xmm15
1221 jmp .restore_non_volatile_host_xmm_regs
1222ENDPROC hmR0SVMRunWrapXMM
1223
1224%endif ; VBOX_WITH_KERNEL_USING_XMM
1225
1226
1227;; @def RESTORE_STATE_VM32
1228; Macro restoring essential host state and updating guest state
1229; for common host, 32-bit guest for VT-x.
1230%macro RESTORE_STATE_VM32 0
1231 ; Restore base and limit of the IDTR & GDTR.
1232 %ifndef VMX_SKIP_IDTR
1233 lidt [xSP]
1234 add xSP, xCB * 2
1235 %endif
1236 %ifndef VMX_SKIP_GDTR
1237 lgdt [xSP]
1238 add xSP, xCB * 2
1239 %endif
1240
1241 push xDI
1242 %ifndef VMX_SKIP_TR
1243 mov xDI, [xSP + xCB * 3] ; pCtx (*3 to skip the saved xDI, TR, LDTR).
1244 %else
1245 mov xDI, [xSP + xCB * 2] ; pCtx (*2 to skip the saved xDI, LDTR).
1246 %endif
1247
1248 mov [ss:xDI + CPUMCTX.eax], eax
1249 mov xAX, SPECTRE_FILLER
1250 mov [ss:xDI + CPUMCTX.ebx], ebx
1251 mov xBX, xAX
1252 mov [ss:xDI + CPUMCTX.ecx], ecx
1253 mov xCX, xAX
1254 mov [ss:xDI + CPUMCTX.edx], edx
1255 mov xDX, xAX
1256 mov [ss:xDI + CPUMCTX.esi], esi
1257 mov xSI, xAX
1258 mov [ss:xDI + CPUMCTX.ebp], ebp
1259 mov xBP, xAX
1260 mov xAX, cr2
1261 mov [ss:xDI + CPUMCTX.cr2], xAX
1262
1263 %ifdef RT_ARCH_AMD64
1264 pop xAX ; The guest edi we pushed above.
1265 mov dword [ss:xDI + CPUMCTX.edi], eax
1266 %else
1267 pop dword [ss:xDI + CPUMCTX.edi] ; The guest edi we pushed above.
1268 %endif
1269
1270 ; Fight spectre.
1271 INDIRECT_BRANCH_PREDICTION_BARRIER ss:xDI, CPUMCTX_WSF_IBPB_EXIT
1272
1273 %ifndef VMX_SKIP_TR
1274 ; Restore TSS selector; must mark it as not busy before using ltr (!)
1275 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p)
1276 ; @todo get rid of sgdt
1277 pop xBX ; Saved TR
1278 sub xSP, xCB * 2
1279 sgdt [xSP]
1280 mov xAX, xBX
1281 and eax, X86_SEL_MASK_OFF_RPL ; Mask away TI and RPL bits leaving only the descriptor offset.
1282 add xAX, [xSP + 2] ; eax <- GDTR.address + descriptor offset.
1283 and dword [ss:xAX + 4], ~RT_BIT(9) ; Clear the busy flag in TSS desc (bits 0-7=base, bit 9=busy bit).
1284 ltr bx
1285 add xSP, xCB * 2
1286 %endif
1287
1288 pop xAX ; Saved LDTR
1289 %ifdef RT_ARCH_AMD64
1290 cmp eax, 0
1291 je %%skip_ldt_write32
1292 %endif
1293 lldt ax
1294
1295%%skip_ldt_write32:
1296 add xSP, xCB ; pCtx
1297
1298 %ifdef VMX_USE_CACHED_VMCS_ACCESSES
1299 pop xDX ; Saved pVmcsCache
1300
1301 ; Note! If we get here as a result of invalid VMCS pointer, all the following
1302 ; vmread's will fail (only eflags.cf=1 will be set) but that shouldn't cause any
1303 ; trouble only just less efficient.
1304 mov ecx, [ss:xDX + VMXVMCSBATCHCACHE.Read.cValidEntries]
1305 cmp ecx, 0 ; Can't happen
1306 je %%no_cached_read32
1307 jmp %%cached_read32
1308
1309ALIGN(16)
1310%%cached_read32:
1311 dec xCX
1312 mov eax, [ss:xDX + VMXVMCSBATCHCACHE.Read.aField + xCX * 4]
1313 ; Note! This leaves the high 32 bits of the cache entry unmodified!!
1314 vmread [ss:xDX + VMXVMCSBATCHCACHE.Read.aFieldVal + xCX * 8], xAX
1315 cmp xCX, 0
1316 jnz %%cached_read32
1317%%no_cached_read32:
1318 %endif
1319
1320 ; Restore segment registers.
1321 MYPOPSEGS xAX, ax
1322
1323 ; Restore the host XCR0 if necessary.
1324 pop xCX
1325 test ecx, ecx
1326 jnz %%xcr0_after_skip
1327 pop xAX
1328 pop xDX
1329 xsetbv ; ecx is already zero.
1330%%xcr0_after_skip:
1331
1332 ; Restore general purpose registers.
1333 MYPOPAD
1334%endmacro
1335
1336
1337;;
1338; Prepares for and executes VMLAUNCH/VMRESUME (32 bits guest mode)
1339;
1340; @returns VBox status code
1341; @param fResume x86:[ebp+8], msc:rcx,gcc:rdi Whether to use vmlauch/vmresume.
1342; @param pCtx x86:[ebp+c], msc:rdx,gcc:rsi Pointer to the guest-CPU context.
1343; @param pVmcsCache x86:[ebp+10],msc:r8, gcc:rdx Pointer to the VMCS cache.
1344; @param pVM x86:[ebp+14],msc:r9, gcc:rcx The cross context VM structure.
1345; @param pVCpu x86:[ebp+18],msc:[ebp+30],gcc:r8 The cross context virtual CPU structure of the calling EMT.
1346;
1347ALIGNCODE(16)
1348BEGINPROC VMXR0StartVM32
1349 push xBP
1350 mov xBP, xSP
1351
1352 pushf
1353 cli
1354
1355 ;
1356 ; Save all general purpose host registers.
1357 ;
1358 MYPUSHAD
1359
1360 ;
1361 ; First we have to write some final guest CPU context registers.
1362 ;
1363 mov eax, VMX_VMCS_HOST_RIP
1364%ifdef RT_ARCH_AMD64
1365 lea r10, [.vmlaunch_done wrt rip]
1366 vmwrite rax, r10
1367%else
1368 mov ecx, .vmlaunch_done
1369 vmwrite eax, ecx
1370%endif
1371 ; Note: assumes success!
1372
1373 ;
1374 ; Unify input parameter registers.
1375 ;
1376%ifdef RT_ARCH_AMD64
1377 %ifdef ASM_CALL64_GCC
1378 ; fResume already in rdi
1379 ; pCtx already in rsi
1380 mov rbx, rdx ; pVmcsCache
1381 %else
1382 mov rdi, rcx ; fResume
1383 mov rsi, rdx ; pCtx
1384 mov rbx, r8 ; pVmcsCache
1385 %endif
1386%else
1387 mov edi, [ebp + 8] ; fResume
1388 mov esi, [ebp + 12] ; pCtx
1389 mov ebx, [ebp + 16] ; pVmcsCache
1390%endif
1391
1392 ;
1393 ; Save the host XCR0 and load the guest one if necessary.
1394 ; Note! Trashes rdx and rcx.
1395 ;
1396%ifdef ASM_CALL64_MSC
1397 mov rax, [xBP + 30h] ; pVCpu
1398%elifdef ASM_CALL64_GCC
1399 mov rax, r8 ; pVCpu
1400%else
1401 mov eax, [xBP + 18h] ; pVCpu
1402%endif
1403 test byte [xAX + VMCPU.hm + HMCPU.fLoadSaveGuestXcr0], 1
1404 jz .xcr0_before_skip
1405
1406 xor ecx, ecx
1407 xgetbv ; Save the host one on the stack.
1408 push xDX
1409 push xAX
1410
1411 mov eax, [xSI + CPUMCTX.aXcr] ; Load the guest one.
1412 mov edx, [xSI + CPUMCTX.aXcr + 4]
1413 xor ecx, ecx ; paranoia
1414 xsetbv
1415
1416 push 0 ; Indicate that we must restore XCR0 (popped into ecx, thus 0).
1417 jmp .xcr0_before_done
1418
1419.xcr0_before_skip:
1420 push 3fh ; indicate that we need not.
1421.xcr0_before_done:
1422
1423 ;
1424 ; Save segment registers.
1425 ; Note! Trashes rdx & rcx, so we moved it here (amd64 case).
1426 ;
1427 MYPUSHSEGS xAX, ax
1428
1429%ifdef VMX_USE_CACHED_VMCS_ACCESSES
1430 mov ecx, [xBX + VMXVMCSBATCHCACHE.Write.cValidEntries]
1431 cmp ecx, 0
1432 je .no_cached_writes
1433 mov edx, ecx
1434 mov ecx, 0
1435 jmp .cached_write
1436
1437ALIGN(16)
1438.cached_write:
1439 mov eax, [xBX + VMXVMCSBATCHCACHE.Write.aField + xCX * 4]
1440 vmwrite xAX, [xBX + VMXVMCSBATCHCACHE.Write.aFieldVal + xCX * 8]
1441 inc xCX
1442 cmp xCX, xDX
1443 jl .cached_write
1444
1445 mov dword [xBX + VMXVMCSBATCHCACHE.Write.cValidEntries], 0
1446.no_cached_writes:
1447
1448 ; Save the pVmcsCache pointer.
1449 push xBX
1450%endif
1451
1452 ; Save the pCtx pointer.
1453 push xSI
1454
1455 ; Save host LDTR.
1456 xor eax, eax
1457 sldt ax
1458 push xAX
1459
1460%ifndef VMX_SKIP_TR
1461 ; The host TR limit is reset to 0x67; save & restore it manually.
1462 str eax
1463 push xAX
1464%endif
1465
1466%ifndef VMX_SKIP_GDTR
1467 ; VT-x only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!
1468 sub xSP, xCB * 2
1469 sgdt [xSP]
1470%endif
1471%ifndef VMX_SKIP_IDTR
1472 sub xSP, xCB * 2
1473 sidt [xSP]
1474%endif
1475
1476 ; Load CR2 if necessary (may be expensive as writing CR2 is a synchronizing instruction).
1477 mov xBX, [xSI + CPUMCTX.cr2]
1478 mov xDX, cr2
1479 cmp xBX, xDX
1480 je .skip_cr2_write32
1481 mov cr2, xBX
1482
1483.skip_cr2_write32:
1484 mov eax, VMX_VMCS_HOST_RSP
1485 vmwrite xAX, xSP
1486 ; Note: assumes success!
1487 ; Don't mess with ESP anymore!!!
1488
1489 ; Fight spectre and similar.
1490 INDIRECT_BRANCH_PREDICTION_AND_L1_CACHE_BARRIER xSI, CPUMCTX_WSF_IBPB_ENTRY, CPUMCTX_WSF_L1D_ENTRY
1491
1492 ; Load guest general purpose registers.
1493 mov eax, [xSI + CPUMCTX.eax]
1494 mov ebx, [xSI + CPUMCTX.ebx]
1495 mov ecx, [xSI + CPUMCTX.ecx]
1496 mov edx, [xSI + CPUMCTX.edx]
1497 mov ebp, [xSI + CPUMCTX.ebp]
1498
1499 ; Resume or start VM?
1500 cmp xDI, 0 ; fResume
1501
1502 ; Load guest edi & esi.
1503 mov edi, [xSI + CPUMCTX.edi]
1504 mov esi, [xSI + CPUMCTX.esi]
1505
1506 je .vmlaunch_launch
1507
1508 vmresume
1509 jc near .vmxstart_invalid_vmcs_ptr
1510 jz near .vmxstart_start_failed
1511 jmp .vmlaunch_done; ; Here if vmresume detected a failure.
1512
1513.vmlaunch_launch:
1514 vmlaunch
1515 jc near .vmxstart_invalid_vmcs_ptr
1516 jz near .vmxstart_start_failed
1517 jmp .vmlaunch_done; ; Here if vmlaunch detected a failure.
1518
1519ALIGNCODE(16) ;; @todo YASM BUG - this alignment is wrong on darwin, it's 1 byte off.
1520.vmlaunch_done:
1521 RESTORE_STATE_VM32
1522 mov eax, VINF_SUCCESS
1523
1524.vmstart_end:
1525 popf
1526 pop xBP
1527 ret
1528
1529.vmxstart_invalid_vmcs_ptr:
1530 RESTORE_STATE_VM32
1531 mov eax, VERR_VMX_INVALID_VMCS_PTR_TO_START_VM
1532 jmp .vmstart_end
1533
1534.vmxstart_start_failed:
1535 RESTORE_STATE_VM32
1536 mov eax, VERR_VMX_UNABLE_TO_START_VM
1537 jmp .vmstart_end
1538
1539ENDPROC VMXR0StartVM32
1540
1541
1542%ifdef RT_ARCH_AMD64
1543;; @def RESTORE_STATE_VM64
1544; Macro restoring essential host state and updating guest state
1545; for 64-bit host, 64-bit guest for VT-x.
1546;
1547%macro RESTORE_STATE_VM64 0
1548 ; Restore base and limit of the IDTR & GDTR
1549 %ifndef VMX_SKIP_IDTR
1550 lidt [xSP]
1551 add xSP, xCB * 2
1552 %endif
1553 %ifndef VMX_SKIP_GDTR
1554 lgdt [xSP]
1555 add xSP, xCB * 2
1556 %endif
1557
1558 push xDI
1559 %ifndef VMX_SKIP_TR
1560 mov xDI, [xSP + xCB * 3] ; pCtx (*3 to skip the saved xDI, TR, LDTR)
1561 %else
1562 mov xDI, [xSP + xCB * 2] ; pCtx (*2 to skip the saved xDI, LDTR)
1563 %endif
1564
1565 mov qword [xDI + CPUMCTX.eax], rax
1566 mov rax, SPECTRE_FILLER64
1567 mov qword [xDI + CPUMCTX.ebx], rbx
1568 mov rbx, rax
1569 mov qword [xDI + CPUMCTX.ecx], rcx
1570 mov rcx, rax
1571 mov qword [xDI + CPUMCTX.edx], rdx
1572 mov rdx, rax
1573 mov qword [xDI + CPUMCTX.esi], rsi
1574 mov rsi, rax
1575 mov qword [xDI + CPUMCTX.ebp], rbp
1576 mov rbp, rax
1577 mov qword [xDI + CPUMCTX.r8], r8
1578 mov r8, rax
1579 mov qword [xDI + CPUMCTX.r9], r9
1580 mov r9, rax
1581 mov qword [xDI + CPUMCTX.r10], r10
1582 mov r10, rax
1583 mov qword [xDI + CPUMCTX.r11], r11
1584 mov r11, rax
1585 mov qword [xDI + CPUMCTX.r12], r12
1586 mov r12, rax
1587 mov qword [xDI + CPUMCTX.r13], r13
1588 mov r13, rax
1589 mov qword [xDI + CPUMCTX.r14], r14
1590 mov r14, rax
1591 mov qword [xDI + CPUMCTX.r15], r15
1592 mov r15, rax
1593 mov rax, cr2
1594 mov qword [xDI + CPUMCTX.cr2], rax
1595
1596 pop xAX ; The guest rdi we pushed above
1597 mov qword [xDI + CPUMCTX.edi], rax
1598
1599 ; Fight spectre.
1600 INDIRECT_BRANCH_PREDICTION_BARRIER xDI, CPUMCTX_WSF_IBPB_EXIT
1601
1602 %ifndef VMX_SKIP_TR
1603 ; Restore TSS selector; must mark it as not busy before using ltr (!)
1604 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p).
1605 ; @todo get rid of sgdt
1606 pop xBX ; Saved TR
1607 sub xSP, xCB * 2
1608 sgdt [xSP]
1609 mov xAX, xBX
1610 and eax, X86_SEL_MASK_OFF_RPL ; Mask away TI and RPL bits leaving only the descriptor offset.
1611 add xAX, [xSP + 2] ; eax <- GDTR.address + descriptor offset.
1612 and dword [xAX + 4], ~RT_BIT(9) ; Clear the busy flag in TSS desc (bits 0-7=base, bit 9=busy bit).
1613 ltr bx
1614 add xSP, xCB * 2
1615 %endif
1616
1617 pop xAX ; Saved LDTR
1618 cmp eax, 0
1619 je %%skip_ldt_write64
1620 lldt ax
1621
1622%%skip_ldt_write64:
1623 pop xSI ; pCtx (needed in rsi by the macros below)
1624
1625 %ifdef VMX_USE_CACHED_VMCS_ACCESSES
1626 pop xDX ; Saved pVmcsCache
1627
1628 ; Note! If we get here as a result of invalid VMCS pointer, all the following
1629 ; vmread's will fail (only eflags.cf=1 will be set) but that shouldn't cause any
1630 ; trouble only just less efficient.
1631 mov ecx, [xDX + VMXVMCSBATCHCACHE.Read.cValidEntries]
1632 cmp ecx, 0 ; Can't happen
1633 je %%no_cached_read64
1634 jmp %%cached_read64
1635
1636ALIGN(16)
1637%%cached_read64:
1638 dec xCX
1639 mov eax, [xDX + VMXVMCSBATCHCACHE.Read.aField + xCX * 4]
1640 vmread [xDX + VMXVMCSBATCHCACHE.Read.aFieldVal + xCX * 8], xAX
1641 cmp xCX, 0
1642 jnz %%cached_read64
1643%%no_cached_read64:
1644 %endif
1645
1646 ; Restore segment registers.
1647 MYPOPSEGS xAX, ax
1648
1649 ; Restore the host XCR0 if necessary.
1650 pop xCX
1651 test ecx, ecx
1652 jnz %%xcr0_after_skip
1653 pop xAX
1654 pop xDX
1655 xsetbv ; ecx is already zero.
1656%%xcr0_after_skip:
1657
1658 ; Restore general purpose registers.
1659 MYPOPAD
1660%endmacro
1661
1662
1663;;
1664; Prepares for and executes VMLAUNCH/VMRESUME (64 bits guest mode)
1665;
1666; @returns VBox status code
1667; @param fResume msc:rcx, gcc:rdi Whether to use vmlauch/vmresume.
1668; @param pCtx msc:rdx, gcc:rsi Pointer to the guest-CPU context.
1669; @param pVmcsCache msc:r8, gcc:rdx Pointer to the VMCS cache.
1670; @param pVM msc:r9, gcc:rcx The cross context VM structure.
1671; @param pVCpu msc:[ebp+30], gcc:r8 The cross context virtual CPU structure of the calling EMT.
1672;
1673ALIGNCODE(16)
1674BEGINPROC VMXR0StartVM64
1675 push xBP
1676 mov xBP, xSP
1677
1678 pushf
1679 cli
1680
1681 ; Save all general purpose host registers.
1682 MYPUSHAD
1683
1684 ; First we have to save some final CPU context registers.
1685 lea r10, [.vmlaunch64_done wrt rip]
1686 mov rax, VMX_VMCS_HOST_RIP ; Return address (too difficult to continue after VMLAUNCH?).
1687 vmwrite rax, r10
1688 ; Note: assumes success!
1689
1690 ;
1691 ; Unify the input parameter registers.
1692 ;
1693%ifdef ASM_CALL64_GCC
1694 ; fResume already in rdi
1695 ; pCtx already in rsi
1696 mov rbx, rdx ; pVmcsCache
1697%else
1698 mov rdi, rcx ; fResume
1699 mov rsi, rdx ; pCtx
1700 mov rbx, r8 ; pVmcsCache
1701%endif
1702
1703 ;
1704 ; Save the host XCR0 and load the guest one if necessary.
1705 ; Note! Trashes rdx and rcx.
1706 ;
1707%ifdef ASM_CALL64_MSC
1708 mov rax, [xBP + 30h] ; pVCpu
1709%else
1710 mov rax, r8 ; pVCpu
1711%endif
1712 test byte [xAX + VMCPU.hm + HMCPU.fLoadSaveGuestXcr0], 1
1713 jz .xcr0_before_skip
1714
1715 xor ecx, ecx
1716 xgetbv ; Save the host one on the stack.
1717 push xDX
1718 push xAX
1719
1720 mov eax, [xSI + CPUMCTX.aXcr] ; Load the guest one.
1721 mov edx, [xSI + CPUMCTX.aXcr + 4]
1722 xor ecx, ecx ; paranoia
1723 xsetbv
1724
1725 push 0 ; Indicate that we must restore XCR0 (popped into ecx, thus 0).
1726 jmp .xcr0_before_done
1727
1728.xcr0_before_skip:
1729 push 3fh ; indicate that we need not.
1730.xcr0_before_done:
1731
1732 ;
1733 ; Save segment registers.
1734 ; Note! Trashes rdx & rcx, so we moved it here (amd64 case).
1735 ;
1736 MYPUSHSEGS xAX, ax
1737
1738%ifdef VMX_USE_CACHED_VMCS_ACCESSES
1739 mov ecx, [xBX + VMXVMCSBATCHCACHE.Write.cValidEntries]
1740 cmp ecx, 0
1741 je .no_cached_writes
1742 mov edx, ecx
1743 mov ecx, 0
1744 jmp .cached_write
1745
1746ALIGN(16)
1747.cached_write:
1748 mov eax, [xBX + VMXVMCSBATCHCACHE.Write.aField + xCX * 4]
1749 vmwrite xAX, [xBX + VMXVMCSBATCHCACHE.Write.aFieldVal + xCX * 8]
1750 inc xCX
1751 cmp xCX, xDX
1752 jl .cached_write
1753
1754 mov dword [xBX + VMXVMCSBATCHCACHE.Write.cValidEntries], 0
1755.no_cached_writes:
1756
1757 ; Save the pVmcsCache pointer.
1758 push xBX
1759%endif
1760
1761 ; Save the pCtx pointer.
1762 push xSI
1763
1764 ; Save host LDTR.
1765 xor eax, eax
1766 sldt ax
1767 push xAX
1768
1769%ifndef VMX_SKIP_TR
1770 ; The host TR limit is reset to 0x67; save & restore it manually.
1771 str eax
1772 push xAX
1773%endif
1774
1775%ifndef VMX_SKIP_GDTR
1776 ; VT-x only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!
1777 sub xSP, xCB * 2
1778 sgdt [xSP]
1779%endif
1780%ifndef VMX_SKIP_IDTR
1781 sub xSP, xCB * 2
1782 sidt [xSP]
1783%endif
1784
1785 ; Load CR2 if necessary (may be expensive as writing CR2 is a synchronizing instruction).
1786 mov rbx, qword [xSI + CPUMCTX.cr2]
1787 mov rdx, cr2
1788 cmp rbx, rdx
1789 je .skip_cr2_write
1790 mov cr2, rbx
1791
1792.skip_cr2_write:
1793 mov eax, VMX_VMCS_HOST_RSP
1794 vmwrite xAX, xSP
1795 ; Note: assumes success!
1796 ; Don't mess with ESP anymore!!!
1797
1798 ; Fight spectre and similar.
1799 INDIRECT_BRANCH_PREDICTION_AND_L1_CACHE_BARRIER xSI, CPUMCTX_WSF_IBPB_ENTRY, CPUMCTX_WSF_L1D_ENTRY
1800
1801 ; Load guest general purpose registers.
1802 mov rax, qword [xSI + CPUMCTX.eax]
1803 mov rbx, qword [xSI + CPUMCTX.ebx]
1804 mov rcx, qword [xSI + CPUMCTX.ecx]
1805 mov rdx, qword [xSI + CPUMCTX.edx]
1806 mov rbp, qword [xSI + CPUMCTX.ebp]
1807 mov r8, qword [xSI + CPUMCTX.r8]
1808 mov r9, qword [xSI + CPUMCTX.r9]
1809 mov r10, qword [xSI + CPUMCTX.r10]
1810 mov r11, qword [xSI + CPUMCTX.r11]
1811 mov r12, qword [xSI + CPUMCTX.r12]
1812 mov r13, qword [xSI + CPUMCTX.r13]
1813 mov r14, qword [xSI + CPUMCTX.r14]
1814 mov r15, qword [xSI + CPUMCTX.r15]
1815
1816 ; Resume or start VM?
1817 cmp xDI, 0 ; fResume
1818
1819 ; Load guest rdi & rsi.
1820 mov rdi, qword [xSI + CPUMCTX.edi]
1821 mov rsi, qword [xSI + CPUMCTX.esi]
1822
1823 je .vmlaunch64_launch
1824
1825 vmresume
1826 jc near .vmxstart64_invalid_vmcs_ptr
1827 jz near .vmxstart64_start_failed
1828 jmp .vmlaunch64_done; ; Here if vmresume detected a failure.
1829
1830.vmlaunch64_launch:
1831 vmlaunch
1832 jc near .vmxstart64_invalid_vmcs_ptr
1833 jz near .vmxstart64_start_failed
1834 jmp .vmlaunch64_done; ; Here if vmlaunch detected a failure.
1835
1836ALIGNCODE(16)
1837.vmlaunch64_done:
1838 RESTORE_STATE_VM64
1839 mov eax, VINF_SUCCESS
1840
1841.vmstart64_end:
1842 popf
1843 pop xBP
1844 ret
1845
1846.vmxstart64_invalid_vmcs_ptr:
1847 RESTORE_STATE_VM64
1848 mov eax, VERR_VMX_INVALID_VMCS_PTR_TO_START_VM
1849 jmp .vmstart64_end
1850
1851.vmxstart64_start_failed:
1852 RESTORE_STATE_VM64
1853 mov eax, VERR_VMX_UNABLE_TO_START_VM
1854 jmp .vmstart64_end
1855ENDPROC VMXR0StartVM64
1856%endif ; RT_ARCH_AMD64
1857
1858
1859;;
1860; Prepares for and executes VMRUN (32 bits guests)
1861;
1862; @returns VBox status code
1863; @param HCPhysVmcbHost msc:rcx,gcc:rdi Physical address of host VMCB.
1864; @param HCPhysVmcb msc:rdx,gcc:rsi Physical address of guest VMCB.
1865; @param pCtx msc:r8,gcc:rdx Pointer to the guest CPU-context.
1866; @param pVM msc:r9,gcc:rcx The cross context VM structure.
1867; @param pVCpu msc:[rsp+28],gcc:r8 The cross context virtual CPU structure of the calling EMT.
1868;
1869ALIGNCODE(16)
1870BEGINPROC SVMR0VMRun
1871%ifdef RT_ARCH_AMD64 ; fake a cdecl stack frame
1872 %ifdef ASM_CALL64_GCC
1873 push r8 ; pVCpu
1874 push rcx ; pVM
1875 push rdx ; pCtx
1876 push rsi ; HCPhysVmcb
1877 push rdi ; HCPhysVmcbHost
1878 %else
1879 mov rax, [rsp + 28h]
1880 push rax ; pVCpu
1881 push r9 ; pVM
1882 push r8 ; pCtx
1883 push rdx ; HCPhysVmcb
1884 push rcx ; HCPhysVmcbHost
1885 %endif
1886 push 0
1887%endif
1888 push xBP
1889 mov xBP, xSP
1890 pushf
1891
1892 ; Save all general purpose host registers.
1893 MYPUSHAD
1894
1895 ; Load pCtx into xSI.
1896 mov xSI, [xBP + xCB * 2 + RTHCPHYS_CB * 2] ; pCtx
1897
1898 ; Save the host XCR0 and load the guest one if necessary.
1899 mov xAX, [xBP + xCB * 2 + RTHCPHYS_CB * 2 + xCB * 2] ; pVCpu
1900 test byte [xAX + VMCPU.hm + HMCPU.fLoadSaveGuestXcr0], 1
1901 jz .xcr0_before_skip
1902
1903 xor ecx, ecx
1904 xgetbv ; Save the host XCR0 on the stack
1905 push xDX
1906 push xAX
1907
1908 mov xSI, [xBP + xCB * 2 + RTHCPHYS_CB * 2] ; pCtx
1909 mov eax, [xSI + CPUMCTX.aXcr] ; load the guest XCR0
1910 mov edx, [xSI + CPUMCTX.aXcr + 4]
1911 xor ecx, ecx ; paranoia
1912 xsetbv
1913
1914 push 0 ; indicate that we must restore XCR0 (popped into ecx, thus 0)
1915 jmp .xcr0_before_done
1916
1917.xcr0_before_skip:
1918 push 3fh ; indicate that we need not restore XCR0
1919.xcr0_before_done:
1920
1921 ; Save guest CPU-context pointer for simplifying saving of the GPRs afterwards.
1922 push xSI
1923
1924 ; Save host fs, gs, sysenter msr etc.
1925 mov xAX, [xBP + xCB * 2] ; HCPhysVmcbHost (64 bits physical address; x86: take low dword only)
1926 push xAX ; save for the vmload after vmrun
1927 vmsave
1928
1929 ; Fight spectre.
1930 INDIRECT_BRANCH_PREDICTION_BARRIER xSI, CPUMCTX_WSF_IBPB_ENTRY
1931
1932 ; Setup xAX for VMLOAD.
1933 mov xAX, [xBP + xCB * 2 + RTHCPHYS_CB] ; HCPhysVmcb (64 bits physical address; x86: take low dword only)
1934
1935 ; Load guest general purpose registers.
1936 ; eax is loaded from the VMCB by VMRUN.
1937 mov ebx, [xSI + CPUMCTX.ebx]
1938 mov ecx, [xSI + CPUMCTX.ecx]
1939 mov edx, [xSI + CPUMCTX.edx]
1940 mov edi, [xSI + CPUMCTX.edi]
1941 mov ebp, [xSI + CPUMCTX.ebp]
1942 mov esi, [xSI + CPUMCTX.esi]
1943
1944 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch.
1945 clgi
1946 sti
1947
1948 ; Load guest fs, gs, sysenter msr etc.
1949 vmload
1950
1951 ; Run the VM.
1952 vmrun
1953
1954 ; Save guest fs, gs, sysenter msr etc.
1955 vmsave
1956
1957 ; Load host fs, gs, sysenter msr etc.
1958 pop xAX ; load HCPhysVmcbHost (pushed above)
1959 vmload
1960
1961 ; Set the global interrupt flag again, but execute cli to make sure IF=0.
1962 cli
1963 stgi
1964
1965 ; Pop the context pointer (pushed above) and save the guest GPRs (sans RSP and RAX).
1966 pop xAX
1967
1968 mov [ss:xAX + CPUMCTX.ebx], ebx
1969 mov xBX, SPECTRE_FILLER
1970 mov [ss:xAX + CPUMCTX.ecx], ecx
1971 mov xCX, xBX
1972 mov [ss:xAX + CPUMCTX.edx], edx
1973 mov xDX, xBX
1974 mov [ss:xAX + CPUMCTX.esi], esi
1975 mov xSI, xBX
1976 mov [ss:xAX + CPUMCTX.edi], edi
1977 mov xDI, xBX
1978 mov [ss:xAX + CPUMCTX.ebp], ebp
1979 mov xBP, xBX
1980
1981 ; Fight spectre. Note! Trashes xAX!
1982 INDIRECT_BRANCH_PREDICTION_BARRIER ss:xAX, CPUMCTX_WSF_IBPB_EXIT
1983
1984 ; Restore the host xcr0 if necessary.
1985 pop xCX
1986 test ecx, ecx
1987 jnz .xcr0_after_skip
1988 pop xAX
1989 pop xDX
1990 xsetbv ; ecx is already zero
1991.xcr0_after_skip:
1992
1993 ; Restore host general purpose registers.
1994 MYPOPAD
1995
1996 mov eax, VINF_SUCCESS
1997
1998 popf
1999 pop xBP
2000%ifdef RT_ARCH_AMD64
2001 add xSP, 6*xCB
2002%endif
2003 ret
2004ENDPROC SVMR0VMRun
2005
2006
2007%ifdef RT_ARCH_AMD64
2008;;
2009; Prepares for and executes VMRUN (64 bits guests)
2010;
2011; @returns VBox status code
2012; @param HCPhysVmcbHost msc:rcx,gcc:rdi Physical address of host VMCB.
2013; @param HCPhysVmcb msc:rdx,gcc:rsi Physical address of guest VMCB.
2014; @param pCtx msc:r8,gcc:rdx Pointer to the guest-CPU context.
2015; @param pVM msc:r9,gcc:rcx The cross context VM structure.
2016; @param pVCpu msc:[rsp+28],gcc:r8 The cross context virtual CPU structure of the calling EMT.
2017;
2018ALIGNCODE(16)
2019BEGINPROC SVMR0VMRun64
2020 ; Fake a cdecl stack frame
2021 %ifdef ASM_CALL64_GCC
2022 push r8 ;pVCpu
2023 push rcx ;pVM
2024 push rdx ;pCtx
2025 push rsi ;HCPhysVmcb
2026 push rdi ;HCPhysVmcbHost
2027 %else
2028 mov rax, [rsp + 28h]
2029 push rax ; rbp + 30h pVCpu
2030 push r9 ; rbp + 28h pVM
2031 push r8 ; rbp + 20h pCtx
2032 push rdx ; rbp + 18h HCPhysVmcb
2033 push rcx ; rbp + 10h HCPhysVmcbHost
2034 %endif
2035 push 0 ; rbp + 08h "fake ret addr"
2036 push rbp ; rbp + 00h
2037 mov rbp, rsp
2038 pushf
2039
2040 ; Manual save and restore:
2041 ; - General purpose registers except RIP, RSP, RAX
2042 ;
2043 ; Trashed:
2044 ; - CR2 (we don't care)
2045 ; - LDTR (reset to 0)
2046 ; - DRx (presumably not changed at all)
2047 ; - DR7 (reset to 0x400)
2048
2049 ; Save all general purpose host registers.
2050 MYPUSHAD
2051
2052 ; Load pCtx into xSI.
2053 mov xSI, [rbp + xCB * 2 + RTHCPHYS_CB * 2]
2054
2055 ; Save the host XCR0 and load the guest one if necessary.
2056 mov rax, [xBP + 30h] ; pVCpu
2057 test byte [xAX + VMCPU.hm + HMCPU.fLoadSaveGuestXcr0], 1
2058 jz .xcr0_before_skip
2059
2060 xor ecx, ecx
2061 xgetbv ; save the host XCR0 on the stack.
2062 push xDX
2063 push xAX
2064
2065 mov xSI, [xBP + xCB * 2 + RTHCPHYS_CB * 2] ; pCtx
2066 mov eax, [xSI + CPUMCTX.aXcr] ; load the guest XCR0
2067 mov edx, [xSI + CPUMCTX.aXcr + 4]
2068 xor ecx, ecx ; paranoia
2069 xsetbv
2070
2071 push 0 ; indicate that we must restore XCR0 (popped into ecx, thus 0)
2072 jmp .xcr0_before_done
2073
2074.xcr0_before_skip:
2075 push 3fh ; indicate that we need not restore XCR0
2076.xcr0_before_done:
2077
2078 ; Save guest CPU-context pointer for simplifying saving of the GPRs afterwards.
2079 push rsi
2080
2081 ; Save host fs, gs, sysenter msr etc.
2082 mov rax, [rbp + xCB * 2] ; HCPhysVmcbHost (64 bits physical address; x86: take low dword only)
2083 push rax ; save for the vmload after vmrun
2084 vmsave
2085
2086 ; Fight spectre.
2087 INDIRECT_BRANCH_PREDICTION_BARRIER xSI, CPUMCTX_WSF_IBPB_ENTRY
2088
2089 ; Setup rax for VMLOAD.
2090 mov rax, [rbp + xCB * 2 + RTHCPHYS_CB] ; HCPhysVmcb (64 bits physical address; take low dword only)
2091
2092 ; Load guest general purpose registers (rax is loaded from the VMCB by VMRUN).
2093 mov rbx, qword [xSI + CPUMCTX.ebx]
2094 mov rcx, qword [xSI + CPUMCTX.ecx]
2095 mov rdx, qword [xSI + CPUMCTX.edx]
2096 mov rdi, qword [xSI + CPUMCTX.edi]
2097 mov rbp, qword [xSI + CPUMCTX.ebp]
2098 mov r8, qword [xSI + CPUMCTX.r8]
2099 mov r9, qword [xSI + CPUMCTX.r9]
2100 mov r10, qword [xSI + CPUMCTX.r10]
2101 mov r11, qword [xSI + CPUMCTX.r11]
2102 mov r12, qword [xSI + CPUMCTX.r12]
2103 mov r13, qword [xSI + CPUMCTX.r13]
2104 mov r14, qword [xSI + CPUMCTX.r14]
2105 mov r15, qword [xSI + CPUMCTX.r15]
2106 mov rsi, qword [xSI + CPUMCTX.esi]
2107
2108 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch.
2109 clgi
2110 sti
2111
2112 ; Load guest FS, GS, Sysenter MSRs etc.
2113 vmload
2114
2115 ; Run the VM.
2116 vmrun
2117
2118 ; Save guest fs, gs, sysenter msr etc.
2119 vmsave
2120
2121 ; Load host fs, gs, sysenter msr etc.
2122 pop rax ; load HCPhysVmcbHost (pushed above)
2123 vmload
2124
2125 ; Set the global interrupt flag again, but execute cli to make sure IF=0.
2126 cli
2127 stgi
2128
2129 ; Pop the context pointer (pushed above) and save the guest GPRs (sans RSP and RAX).
2130 pop rax
2131
2132 mov qword [rax + CPUMCTX.ebx], rbx
2133 mov rbx, SPECTRE_FILLER64
2134 mov qword [rax + CPUMCTX.ecx], rcx
2135 mov rcx, rbx
2136 mov qword [rax + CPUMCTX.edx], rdx
2137 mov rdx, rbx
2138 mov qword [rax + CPUMCTX.esi], rsi
2139 mov rsi, rbx
2140 mov qword [rax + CPUMCTX.edi], rdi
2141 mov rdi, rbx
2142 mov qword [rax + CPUMCTX.ebp], rbp
2143 mov rbp, rbx
2144 mov qword [rax + CPUMCTX.r8], r8
2145 mov r8, rbx
2146 mov qword [rax + CPUMCTX.r9], r9
2147 mov r9, rbx
2148 mov qword [rax + CPUMCTX.r10], r10
2149 mov r10, rbx
2150 mov qword [rax + CPUMCTX.r11], r11
2151 mov r11, rbx
2152 mov qword [rax + CPUMCTX.r12], r12
2153 mov r12, rbx
2154 mov qword [rax + CPUMCTX.r13], r13
2155 mov r13, rbx
2156 mov qword [rax + CPUMCTX.r14], r14
2157 mov r14, rbx
2158 mov qword [rax + CPUMCTX.r15], r15
2159 mov r15, rbx
2160
2161 ; Fight spectre. Note! Trashes rax!
2162 INDIRECT_BRANCH_PREDICTION_BARRIER rax, CPUMCTX_WSF_IBPB_EXIT
2163
2164 ; Restore the host xcr0 if necessary.
2165 pop xCX
2166 test ecx, ecx
2167 jnz .xcr0_after_skip
2168 pop xAX
2169 pop xDX
2170 xsetbv ; ecx is already zero
2171.xcr0_after_skip:
2172
2173 ; Restore host general purpose registers.
2174 MYPOPAD
2175
2176 mov eax, VINF_SUCCESS
2177
2178 popf
2179 pop rbp
2180 add rsp, 6 * xCB
2181 ret
2182ENDPROC SVMR0VMRun64
2183%endif ; RT_ARCH_AMD64
2184
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette