VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HMR0A.asm@ 46926

Last change on this file since 46926 was 46926, checked in by vboxsync, 12 years ago

VMM: Annoying assembler warning.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 44.2 KB
Line 
1; $Id: HMR0A.asm 46926 2013-07-03 11:26:20Z vboxsync $
2;; @file
3; VMXM - R0 vmx helpers
4;
5
6;
7; Copyright (C) 2006-2013 Oracle Corporation
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.virtualbox.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License (GPL) as published by the Free Software
13; Foundation, in version 2 as it comes in the "COPYING" file of the
14; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17
18;*******************************************************************************
19;* Header Files *
20;*******************************************************************************
21%include "VBox/asmdefs.mac"
22%include "VBox/err.mac"
23%include "VBox/vmm/hm_vmx.mac"
24%include "VBox/vmm/cpum.mac"
25%include "iprt/x86.mac"
26%include "HMInternal.mac"
27
28%ifdef RT_OS_OS2 ;; @todo fix OMF support in yasm and kick nasm out completely.
29 %macro vmwrite 2,
30 int3
31 %endmacro
32 %define vmlaunch int3
33 %define vmresume int3
34 %define vmsave int3
35 %define vmload int3
36 %define vmrun int3
37 %define clgi int3
38 %define stgi int3
39 %macro invlpga 2,
40 int3
41 %endmacro
42%endif
43
44;*******************************************************************************
45;* Defined Constants And Macros *
46;*******************************************************************************
47%ifdef RT_ARCH_AMD64
48 %define MAYBE_64_BIT
49%endif
50%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
51 %define MAYBE_64_BIT
52%else
53 %ifdef RT_OS_DARWIN
54 %ifdef RT_ARCH_AMD64
55 ;;
56 ; Load the NULL selector into DS, ES, FS and GS on 64-bit darwin so we don't
57 ; risk loading a stale LDT value or something invalid.
58 %define HM_64_BIT_USE_NULL_SEL
59 %endif
60 %endif
61%endif
62
63%ifndef VBOX_WITH_OLD_VTX_CODE
64 %ifdef RT_ARCH_AMD64
65 %define VBOX_SKIP_RESTORE_SEG
66 %endif
67%endif
68
69;; The offset of the XMM registers in X86FXSTATE.
70; Use define because I'm too lazy to convert the struct.
71%define XMM_OFF_IN_X86FXSTATE 160
72
73;; @def MYPUSHAD
74; Macro generating an equivalent to pushad
75
76;; @def MYPOPAD
77; Macro generating an equivalent to popad
78
79;; @def MYPUSHSEGS
80; Macro saving all segment registers on the stack.
81; @param 1 full width register name
82; @param 2 16-bit register name for \a 1.
83
84;; @def MYPOPSEGS
85; Macro restoring all segment registers on the stack
86; @param 1 full width register name
87; @param 2 16-bit register name for \a 1.
88
89%ifdef MAYBE_64_BIT
90 ; Save a host and load the corresponding guest MSR (trashes rdx & rcx)
91 %macro LOADGUESTMSR 2
92 mov rcx, %1
93 rdmsr
94 push rdx
95 push rax
96 mov edx, dword [xSI + %2 + 4]
97 mov eax, dword [xSI + %2]
98 wrmsr
99 %endmacro
100
101 ; Save a guest and load the corresponding host MSR (trashes rdx & rcx)
102 ; Only really useful for gs kernel base as that one can be changed behind our back (swapgs)
103 %macro LOADHOSTMSREX 2
104 mov rcx, %1
105 rdmsr
106 mov dword [xSI + %2], eax
107 mov dword [xSI + %2 + 4], edx
108 pop rax
109 pop rdx
110 wrmsr
111 %endmacro
112
113 ; Load the corresponding host MSR (trashes rdx & rcx)
114 %macro LOADHOSTMSR 1
115 mov rcx, %1
116 pop rax
117 pop rdx
118 wrmsr
119 %endmacro
120%endif
121
122%ifdef ASM_CALL64_GCC
123 %macro MYPUSHAD64 0
124 push r15
125 push r14
126 push r13
127 push r12
128 push rbx
129 %endmacro
130 %macro MYPOPAD64 0
131 pop rbx
132 pop r12
133 pop r13
134 pop r14
135 pop r15
136 %endmacro
137
138%else ; ASM_CALL64_MSC
139 %macro MYPUSHAD64 0
140 push r15
141 push r14
142 push r13
143 push r12
144 push rbx
145 push rsi
146 push rdi
147 %endmacro
148 %macro MYPOPAD64 0
149 pop rdi
150 pop rsi
151 pop rbx
152 pop r12
153 pop r13
154 pop r14
155 pop r15
156 %endmacro
157%endif
158
159%ifdef VBOX_SKIP_RESTORE_SEG
160%macro MYPUSHSEGS64 2
161%endmacro
162
163%macro MYPOPSEGS64 2
164%endmacro
165%else ; !VBOX_SKIP_RESTORE_SEG
166; trashes, rax, rdx & rcx
167%macro MYPUSHSEGS64 2
168 %ifndef HM_64_BIT_USE_NULL_SEL
169 mov %2, es
170 push %1
171 mov %2, ds
172 push %1
173 %endif
174
175 ; Special case for FS; Windows and Linux either don't use it or restore it when leaving kernel mode, Solaris OTOH doesn't and we must save it.
176 mov ecx, MSR_K8_FS_BASE
177 rdmsr
178 push rdx
179 push rax
180 %ifndef HM_64_BIT_USE_NULL_SEL
181 push fs
182 %endif
183
184 ; Special case for GS; OSes typically use swapgs to reset the hidden base register for GS on entry into the kernel. The same happens on exit
185 mov ecx, MSR_K8_GS_BASE
186 rdmsr
187 push rdx
188 push rax
189 %ifndef HM_64_BIT_USE_NULL_SEL
190 push gs
191 %endif
192%endmacro
193
194; trashes, rax, rdx & rcx
195%macro MYPOPSEGS64 2
196 ; Note: do not step through this code with a debugger!
197 %ifndef HM_64_BIT_USE_NULL_SEL
198 xor eax, eax
199 mov ds, ax
200 mov es, ax
201 mov fs, ax
202 mov gs, ax
203 %endif
204
205 %ifndef HM_64_BIT_USE_NULL_SEL
206 pop gs
207 %endif
208 pop rax
209 pop rdx
210 mov ecx, MSR_K8_GS_BASE
211 wrmsr
212
213 %ifndef HM_64_BIT_USE_NULL_SEL
214 pop fs
215 %endif
216 pop rax
217 pop rdx
218 mov ecx, MSR_K8_FS_BASE
219 wrmsr
220 ; Now it's safe to step again
221
222 %ifndef HM_64_BIT_USE_NULL_SEL
223 pop %1
224 mov ds, %2
225 pop %1
226 mov es, %2
227 %endif
228%endmacro
229%endif ; VBOX_SKIP_RESTORE_SEG
230
231%macro MYPUSHAD32 0
232 pushad
233%endmacro
234%macro MYPOPAD32 0
235 popad
236%endmacro
237
238%macro MYPUSHSEGS32 2
239 push ds
240 push es
241 push fs
242 push gs
243%endmacro
244%macro MYPOPSEGS32 2
245 pop gs
246 pop fs
247 pop es
248 pop ds
249%endmacro
250
251
252;*******************************************************************************
253;* External Symbols *
254;*******************************************************************************
255%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
256extern NAME(SUPR0AbsIs64bit)
257extern NAME(SUPR0Abs64bitKernelCS)
258extern NAME(SUPR0Abs64bitKernelSS)
259extern NAME(SUPR0Abs64bitKernelDS)
260extern NAME(SUPR0AbsKernelCS)
261%endif
262%ifdef VBOX_WITH_KERNEL_USING_XMM
263extern NAME(CPUMIsGuestFPUStateActive)
264%endif
265
266
267;*******************************************************************************
268;* Global Variables *
269;*******************************************************************************
270%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
271BEGINDATA
272;;
273; Store the SUPR0AbsIs64bit absolute value here so we can cmp/test without
274; needing to clobber a register. (This trick doesn't quite work for PE btw.
275; but that's not relevant atm.)
276GLOBALNAME g_fVMXIs64bitHost
277 dd NAME(SUPR0AbsIs64bit)
278%endif
279
280
281BEGINCODE
282
283
284;/**
285; * Restores host-state fields.
286; *
287; * @returns VBox status code
288; * @param u32RestoreHostFlags x86: [ebp + 08h] msc: rcx gcc: rdi u32RestoreHost - RestoreHost flags.
289; * @param pRestoreHost x86: [ebp + 0ch] msc: rdx gcc: rsi pRestoreHost - Pointer to the RestoreHost struct.
290; */
291ALIGNCODE(16)
292BEGINPROC VMXRestoreHostState
293%ifdef RT_ARCH_AMD64
294 %ifndef ASM_CALL64_GCC
295 ; On msc R10, R11 are scratch, RDI and RSI are not. So we must save and restore them!
296 mov r10, rdi
297 mov r11, rsi
298 ; Switch to common register usage (i.e. gcc's in this function)
299 mov rdi, rcx
300 mov rsi, rdx
301 %endif
302
303 test edi, VMX_RESTORE_HOST_GDTR
304 jz near .test_idtr
305 lgdt [rsi + VMXRESTOREHOST.HostGdtr]
306
307.test_idtr:
308 test edi, VMX_RESTORE_HOST_IDTR
309 jz near .test_ds
310 lidt [rsi + VMXRESTOREHOST.HostIdtr]
311
312.test_ds:
313 test edi, VMX_RESTORE_HOST_SEL_DS
314 jz near .test_es
315 mov ax, word [rsi + VMXRESTOREHOST.uHostSelDS]
316 mov ds, ax
317
318.test_es:
319 test edi, VMX_RESTORE_HOST_SEL_ES
320 jz near .test_ldtr
321 mov ax, word [rsi + VMXRESTOREHOST.uHostSelES]
322 mov es, ax
323
324.test_ldtr:
325 test edi, VMX_RESTORE_HOST_SEL_LDTR
326 jz near .test_tr
327 mov ax, word [rsi + VMXRESTOREHOST.uHostSelLDTR]
328 lldt ax
329
330.test_tr:
331 test edi, VMX_RESTORE_HOST_SEL_TR
332 jz near .test_fs
333 mov dx, word [rsi + VMXRESTOREHOST.uHostSelTR]
334 xor xAX, xAX
335 mov ax, dx
336 and al, ~(X86_SEL_LDT | X86_SEL_RPL) ; Mask away TI and RPL bits leaving only the descriptor offset.
337 add xAX, qword [rsi + VMXRESTOREHOST.HostGdtr + 2] ; xAX <- descriptor offset + GDTR.pGdt.
338 and dword [ss:xAX + 4], ~RT_BIT(9) ; Clear the busy flag in TSS (bits 0-7=base, bit 9=busy bit).
339 ltr dx
340
341.test_fs:
342 ; We're only restoring the selector. The base is valid and restored by VT-x. If we get an interrupt in between FS & GS
343 ; below, we are fine as the base is what is relevant in 64-bit mode. We need to disable interrupts only during
344 ; writing of the selector as that zaps (trashes) the upper-part of the base until we wrmsr the full 64-bit base.
345
346 test edi, VMX_RESTORE_HOST_SEL_FS
347 jz near .test_gs
348 mov ax, word [rsi + VMXRESTOREHOST.uHostSelFS]
349 cli ; Disable interrupts as mov fs, ax will zap the upper part of the base
350 mov fs, ax
351 mov eax, dword [rsi + VMXRESTOREHOST.uHostFSBase] ; uHostFSBase - Lo
352 mov edx, dword [rsi + VMXRESTOREHOST.uHostFSBase + 4h] ; uHostFSBase - Hi
353 mov ecx, MSR_K8_FS_BASE
354 wrmsr
355 sti ; Re-enable interrupts as fsbase is consistent now
356
357.test_gs:
358 test edi, VMX_RESTORE_HOST_SEL_GS
359 jz near .restore_success
360 mov ax, word [rsi + VMXRESTOREHOST.uHostSelGS]
361 cli ; Disable interrupts as mov gs, ax will zap the upper part of the base
362 mov gs, ax
363 mov eax, dword [rsi + VMXRESTOREHOST.uHostGSBase] ; uHostGSBase - Lo
364 mov edx, dword [rsi + VMXRESTOREHOST.uHostGSBase + 4h] ; uHostGSBase - Hi
365 mov ecx, MSR_K8_GS_BASE
366 wrmsr
367 sti ; Re-enable interrupts as gsbase is consistent now
368
369.restore_success:
370 mov eax, VINF_SUCCESS
371 %ifndef ASM_CALL64_GCC
372 ; Restore RDI and RSI on MSC.
373 mov rdi, r10
374 mov rsi, r11
375 %endif
376%else ; RT_ARCH_X86
377 mov eax, VERR_NOT_IMPLEMENTED
378%endif
379 ret
380ENDPROC VMXRestoreHostState
381
382
383;/**
384; * Executes VMWRITE, 64-bit value.
385; *
386; * @returns VBox status code
387; * @param idxField x86: [ebp + 08h] msc: rcx gcc: rdi VMCS index
388; * @param u64Data x86: [ebp + 0ch] msc: rdx gcc: rsi VM field value
389; */
390ALIGNCODE(16)
391BEGINPROC VMXWriteVmcs64
392%ifdef RT_ARCH_AMD64
393 %ifdef ASM_CALL64_GCC
394 and edi, 0ffffffffh
395 xor rax, rax
396 vmwrite rdi, rsi
397 %else
398 and ecx, 0ffffffffh
399 xor rax, rax
400 vmwrite rcx, rdx
401 %endif
402%else ; RT_ARCH_X86
403 mov ecx, [esp + 4] ; idxField
404 lea edx, [esp + 8] ; &u64Data
405 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
406 cmp byte [NAME(g_fVMXIs64bitHost)], 0
407 jz .legacy_mode
408 db 0xea ; jmp far .sixtyfourbit_mode
409 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
410.legacy_mode:
411 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
412 vmwrite ecx, [edx] ; low dword
413 jz .done
414 jc .done
415 inc ecx
416 xor eax, eax
417 vmwrite ecx, [edx + 4] ; high dword
418.done:
419%endif ; RT_ARCH_X86
420 jnc .valid_vmcs
421 mov eax, VERR_VMX_INVALID_VMCS_PTR
422 ret
423.valid_vmcs:
424 jnz .the_end
425 mov eax, VERR_VMX_INVALID_VMCS_FIELD
426.the_end:
427 ret
428
429%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
430ALIGNCODE(16)
431BITS 64
432.sixtyfourbit_mode:
433 and edx, 0ffffffffh
434 and ecx, 0ffffffffh
435 xor eax, eax
436 vmwrite rcx, [rdx]
437 mov r8d, VERR_VMX_INVALID_VMCS_FIELD
438 cmovz eax, r8d
439 mov r9d, VERR_VMX_INVALID_VMCS_PTR
440 cmovc eax, r9d
441 jmp far [.fpret wrt rip]
442.fpret: ; 16:32 Pointer to .the_end.
443 dd .the_end, NAME(SUPR0AbsKernelCS)
444BITS 32
445%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
446ENDPROC VMXWriteVmcs64
447
448
449;/**
450; * Executes VMREAD, 64-bit value
451; *
452; * @returns VBox status code
453; * @param idxField VMCS index
454; * @param pData Ptr to store VM field value
455; */
456;DECLASM(int) VMXReadVmcs64(uint32_t idxField, uint64_t *pData);
457ALIGNCODE(16)
458BEGINPROC VMXReadVmcs64
459%ifdef RT_ARCH_AMD64
460 %ifdef ASM_CALL64_GCC
461 and edi, 0ffffffffh
462 xor rax, rax
463 vmread [rsi], rdi
464 %else
465 and ecx, 0ffffffffh
466 xor rax, rax
467 vmread [rdx], rcx
468 %endif
469%else ; RT_ARCH_X86
470 mov ecx, [esp + 4] ; idxField
471 mov edx, [esp + 8] ; pData
472 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
473 cmp byte [NAME(g_fVMXIs64bitHost)], 0
474 jz .legacy_mode
475 db 0xea ; jmp far .sixtyfourbit_mode
476 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
477.legacy_mode:
478 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
479 vmread [edx], ecx ; low dword
480 jz .done
481 jc .done
482 inc ecx
483 xor eax, eax
484 vmread [edx + 4], ecx ; high dword
485.done:
486%endif ; RT_ARCH_X86
487 jnc .valid_vmcs
488 mov eax, VERR_VMX_INVALID_VMCS_PTR
489 ret
490.valid_vmcs:
491 jnz .the_end
492 mov eax, VERR_VMX_INVALID_VMCS_FIELD
493.the_end:
494 ret
495
496%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
497ALIGNCODE(16)
498BITS 64
499.sixtyfourbit_mode:
500 and edx, 0ffffffffh
501 and ecx, 0ffffffffh
502 xor eax, eax
503 vmread [rdx], rcx
504 mov r8d, VERR_VMX_INVALID_VMCS_FIELD
505 cmovz eax, r8d
506 mov r9d, VERR_VMX_INVALID_VMCS_PTR
507 cmovc eax, r9d
508 jmp far [.fpret wrt rip]
509.fpret: ; 16:32 Pointer to .the_end.
510 dd .the_end, NAME(SUPR0AbsKernelCS)
511BITS 32
512%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
513ENDPROC VMXReadVmcs64
514
515
516;/**
517; * Executes VMREAD, 32-bit value.
518; *
519; * @returns VBox status code
520; * @param idxField VMCS index
521; * @param pu32Data Ptr to store VM field value
522; */
523;DECLASM(int) VMXReadVmcs32(uint32_t idxField, uint32_t *pu32Data);
524ALIGNCODE(16)
525BEGINPROC VMXReadVmcs32
526%ifdef RT_ARCH_AMD64
527 %ifdef ASM_CALL64_GCC
528 and edi, 0ffffffffh
529 xor rax, rax
530 vmread r10, rdi
531 mov [rsi], r10d
532 %else
533 and ecx, 0ffffffffh
534 xor rax, rax
535 vmread r10, rcx
536 mov [rdx], r10d
537 %endif
538%else ; RT_ARCH_X86
539 mov ecx, [esp + 4] ; idxField
540 mov edx, [esp + 8] ; pu32Data
541 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
542 cmp byte [NAME(g_fVMXIs64bitHost)], 0
543 jz .legacy_mode
544 db 0xea ; jmp far .sixtyfourbit_mode
545 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
546.legacy_mode:
547 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
548 xor eax, eax
549 vmread [edx], ecx
550%endif ; RT_ARCH_X86
551 jnc .valid_vmcs
552 mov eax, VERR_VMX_INVALID_VMCS_PTR
553 ret
554.valid_vmcs:
555 jnz .the_end
556 mov eax, VERR_VMX_INVALID_VMCS_FIELD
557.the_end:
558 ret
559
560%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
561ALIGNCODE(16)
562BITS 64
563.sixtyfourbit_mode:
564 and edx, 0ffffffffh
565 and ecx, 0ffffffffh
566 xor eax, eax
567 vmread r10, rcx
568 mov [rdx], r10d
569 mov r8d, VERR_VMX_INVALID_VMCS_FIELD
570 cmovz eax, r8d
571 mov r9d, VERR_VMX_INVALID_VMCS_PTR
572 cmovc eax, r9d
573 jmp far [.fpret wrt rip]
574.fpret: ; 16:32 Pointer to .the_end.
575 dd .the_end, NAME(SUPR0AbsKernelCS)
576BITS 32
577%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
578ENDPROC VMXReadVmcs32
579
580
581;/**
582; * Executes VMWRITE, 32-bit value.
583; *
584; * @returns VBox status code
585; * @param idxField VMCS index
586; * @param u32Data Ptr to store VM field value
587; */
588;DECLASM(int) VMXWriteVmcs32(uint32_t idxField, uint32_t u32Data);
589ALIGNCODE(16)
590BEGINPROC VMXWriteVmcs32
591%ifdef RT_ARCH_AMD64
592 %ifdef ASM_CALL64_GCC
593 and edi, 0ffffffffh
594 and esi, 0ffffffffh
595 xor rax, rax
596 vmwrite rdi, rsi
597 %else
598 and ecx, 0ffffffffh
599 and edx, 0ffffffffh
600 xor rax, rax
601 vmwrite rcx, rdx
602 %endif
603%else ; RT_ARCH_X86
604 mov ecx, [esp + 4] ; idxField
605 mov edx, [esp + 8] ; u32Data
606 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
607 cmp byte [NAME(g_fVMXIs64bitHost)], 0
608 jz .legacy_mode
609 db 0xea ; jmp far .sixtyfourbit_mode
610 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
611.legacy_mode:
612 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
613 xor eax, eax
614 vmwrite ecx, edx
615%endif ; RT_ARCH_X86
616 jnc .valid_vmcs
617 mov eax, VERR_VMX_INVALID_VMCS_PTR
618 ret
619.valid_vmcs:
620 jnz .the_end
621 mov eax, VERR_VMX_INVALID_VMCS_FIELD
622.the_end:
623 ret
624
625%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
626ALIGNCODE(16)
627BITS 64
628.sixtyfourbit_mode:
629 and edx, 0ffffffffh
630 and ecx, 0ffffffffh
631 xor eax, eax
632 vmwrite rcx, rdx
633 mov r8d, VERR_VMX_INVALID_VMCS_FIELD
634 cmovz eax, r8d
635 mov r9d, VERR_VMX_INVALID_VMCS_PTR
636 cmovc eax, r9d
637 jmp far [.fpret wrt rip]
638.fpret: ; 16:32 Pointer to .the_end.
639 dd .the_end, NAME(SUPR0AbsKernelCS)
640BITS 32
641%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
642ENDPROC VMXWriteVmcs32
643
644
645;/**
646; * Executes VMXON
647; *
648; * @returns VBox status code
649; * @param HCPhysVMXOn Physical address of VMXON structure
650; */
651;DECLASM(int) VMXEnable(RTHCPHYS HCPhysVMXOn);
652BEGINPROC VMXEnable
653%ifdef RT_ARCH_AMD64
654 xor rax, rax
655 %ifdef ASM_CALL64_GCC
656 push rdi
657 %else
658 push rcx
659 %endif
660 vmxon [rsp]
661%else ; RT_ARCH_X86
662 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
663 cmp byte [NAME(g_fVMXIs64bitHost)], 0
664 jz .legacy_mode
665 db 0xea ; jmp far .sixtyfourbit_mode
666 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
667.legacy_mode:
668 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
669 xor eax, eax
670 vmxon [esp + 4]
671%endif ; RT_ARCH_X86
672 jnc .good
673 mov eax, VERR_VMX_INVALID_VMXON_PTR
674 jmp .the_end
675
676.good:
677 jnz .the_end
678 mov eax, VERR_VMX_VMXON_FAILED
679
680.the_end:
681%ifdef RT_ARCH_AMD64
682 add rsp, 8
683%endif
684 ret
685
686%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
687ALIGNCODE(16)
688BITS 64
689.sixtyfourbit_mode:
690 lea rdx, [rsp + 4] ; &HCPhysVMXOn.
691 and edx, 0ffffffffh
692 xor eax, eax
693 vmxon [rdx]
694 mov r8d, VERR_VMX_VMXON_FAILED
695 cmovz eax, r8d
696 mov r9d, VERR_VMX_INVALID_VMXON_PTR
697 cmovc eax, r9d
698 jmp far [.fpret wrt rip]
699.fpret: ; 16:32 Pointer to .the_end.
700 dd .the_end, NAME(SUPR0AbsKernelCS)
701BITS 32
702%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
703ENDPROC VMXEnable
704
705
706;/**
707; * Executes VMXOFF
708; */
709;DECLASM(void) VMXDisable(void);
710BEGINPROC VMXDisable
711%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
712 cmp byte [NAME(g_fVMXIs64bitHost)], 0
713 jz .legacy_mode
714 db 0xea ; jmp far .sixtyfourbit_mode
715 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
716.legacy_mode:
717%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
718 vmxoff
719.the_end:
720 ret
721
722%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
723ALIGNCODE(16)
724BITS 64
725.sixtyfourbit_mode:
726 vmxoff
727 jmp far [.fpret wrt rip]
728.fpret: ; 16:32 Pointer to .the_end.
729 dd .the_end, NAME(SUPR0AbsKernelCS)
730BITS 32
731%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
732ENDPROC VMXDisable
733
734
735;/**
736; * Executes VMCLEAR
737; *
738; * @returns VBox status code
739; * @param HCPhysVmcs Physical address of VM control structure
740; */
741;DECLASM(int) VMXClearVMCS(RTHCPHYS HCPhysVmcs);
742ALIGNCODE(16)
743BEGINPROC VMXClearVMCS
744%ifdef RT_ARCH_AMD64
745 xor rax, rax
746 %ifdef ASM_CALL64_GCC
747 push rdi
748 %else
749 push rcx
750 %endif
751 vmclear [rsp]
752%else ; RT_ARCH_X86
753 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
754 cmp byte [NAME(g_fVMXIs64bitHost)], 0
755 jz .legacy_mode
756 db 0xea ; jmp far .sixtyfourbit_mode
757 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
758.legacy_mode:
759 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
760 xor eax, eax
761 vmclear [esp + 4]
762%endif ; RT_ARCH_X86
763 jnc .the_end
764 mov eax, VERR_VMX_INVALID_VMCS_PTR
765.the_end:
766%ifdef RT_ARCH_AMD64
767 add rsp, 8
768%endif
769 ret
770
771%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
772ALIGNCODE(16)
773BITS 64
774.sixtyfourbit_mode:
775 lea rdx, [rsp + 4] ; &HCPhysVmcs
776 and edx, 0ffffffffh
777 xor eax, eax
778 vmclear [rdx]
779 mov r9d, VERR_VMX_INVALID_VMCS_PTR
780 cmovc eax, r9d
781 jmp far [.fpret wrt rip]
782.fpret: ; 16:32 Pointer to .the_end.
783 dd .the_end, NAME(SUPR0AbsKernelCS)
784BITS 32
785%endif
786ENDPROC VMXClearVMCS
787
788
789;/**
790; * Executes VMPTRLD
791; *
792; * @returns VBox status code
793; * @param HCPhysVmcs Physical address of VMCS structure
794; */
795;DECLASM(int) VMXActivateVMCS(RTHCPHYS HCPhysVmcs);
796ALIGNCODE(16)
797BEGINPROC VMXActivateVMCS
798%ifdef RT_ARCH_AMD64
799 xor rax, rax
800 %ifdef ASM_CALL64_GCC
801 push rdi
802 %else
803 push rcx
804 %endif
805 vmptrld [rsp]
806%else
807 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
808 cmp byte [NAME(g_fVMXIs64bitHost)], 0
809 jz .legacy_mode
810 db 0xea ; jmp far .sixtyfourbit_mode
811 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
812.legacy_mode:
813 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
814 xor eax, eax
815 vmptrld [esp + 4]
816%endif
817 jnc .the_end
818 mov eax, VERR_VMX_INVALID_VMCS_PTR
819.the_end:
820%ifdef RT_ARCH_AMD64
821 add rsp, 8
822%endif
823 ret
824
825%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
826ALIGNCODE(16)
827BITS 64
828.sixtyfourbit_mode:
829 lea rdx, [rsp + 4] ; &HCPhysVmcs
830 and edx, 0ffffffffh
831 xor eax, eax
832 vmptrld [rdx]
833 mov r9d, VERR_VMX_INVALID_VMCS_PTR
834 cmovc eax, r9d
835 jmp far [.fpret wrt rip]
836.fpret: ; 16:32 Pointer to .the_end.
837 dd .the_end, NAME(SUPR0AbsKernelCS)
838BITS 32
839%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
840ENDPROC VMXActivateVMCS
841
842
843;/**
844; * Executes VMPTRST
845; *
846; * @returns VBox status code
847; * @param [esp + 04h] gcc:rdi msc:rcx Param 1 - First parameter - Address that will receive the current pointer
848; */
849;DECLASM(int) VMXGetActivateVMCS(RTHCPHYS *pVMCS);
850BEGINPROC VMXGetActivateVMCS
851%ifdef RT_OS_OS2
852 mov eax, VERR_NOT_SUPPORTED
853 ret
854%else
855 %ifdef RT_ARCH_AMD64
856 %ifdef ASM_CALL64_GCC
857 vmptrst qword [rdi]
858 %else
859 vmptrst qword [rcx]
860 %endif
861 %else
862 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
863 cmp byte [NAME(g_fVMXIs64bitHost)], 0
864 jz .legacy_mode
865 db 0xea ; jmp far .sixtyfourbit_mode
866 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
867.legacy_mode:
868 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
869 vmptrst qword [esp+04h]
870 %endif
871 xor eax, eax
872.the_end:
873 ret
874
875 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
876ALIGNCODE(16)
877BITS 64
878.sixtyfourbit_mode:
879 lea rdx, [rsp + 4] ; &HCPhysVmcs
880 and edx, 0ffffffffh
881 vmptrst qword [rdx]
882 xor eax, eax
883 jmp far [.fpret wrt rip]
884.fpret: ; 16:32 Pointer to .the_end.
885 dd .the_end, NAME(SUPR0AbsKernelCS)
886BITS 32
887 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
888%endif
889ENDPROC VMXGetActivateVMCS
890
891;/**
892; * Invalidate a page using invept
893; @param enmFlush msc:ecx gcc:edi x86:[esp+04] Type of flush
894; @param pDescriptor msc:edx gcc:esi x86:[esp+08] Descriptor pointer
895; */
896;DECLASM(int) VMXR0InvEPT(VMX_FLUSH enmFlush, uint64_t *pDescriptor);
897BEGINPROC VMXR0InvEPT
898%ifdef RT_ARCH_AMD64
899 %ifdef ASM_CALL64_GCC
900 and edi, 0ffffffffh
901 xor rax, rax
902; invept rdi, qword [rsi]
903 DB 0x66, 0x0F, 0x38, 0x80, 0x3E
904 %else
905 and ecx, 0ffffffffh
906 xor rax, rax
907; invept rcx, qword [rdx]
908 DB 0x66, 0x0F, 0x38, 0x80, 0xA
909 %endif
910%else
911 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
912 cmp byte [NAME(g_fVMXIs64bitHost)], 0
913 jz .legacy_mode
914 db 0xea ; jmp far .sixtyfourbit_mode
915 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
916.legacy_mode:
917 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
918 mov ecx, [esp + 4]
919 mov edx, [esp + 8]
920 xor eax, eax
921; invept ecx, qword [edx]
922 DB 0x66, 0x0F, 0x38, 0x80, 0xA
923%endif
924 jnc .valid_vmcs
925 mov eax, VERR_VMX_INVALID_VMCS_PTR
926 ret
927.valid_vmcs:
928 jnz .the_end
929 mov eax, VERR_INVALID_PARAMETER
930.the_end:
931 ret
932
933%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
934ALIGNCODE(16)
935BITS 64
936.sixtyfourbit_mode:
937 and esp, 0ffffffffh
938 mov ecx, [rsp + 4] ; enmFlush
939 mov edx, [rsp + 8] ; pDescriptor
940 xor eax, eax
941; invept rcx, qword [rdx]
942 DB 0x66, 0x0F, 0x38, 0x80, 0xA
943 mov r8d, VERR_INVALID_PARAMETER
944 cmovz eax, r8d
945 mov r9d, VERR_VMX_INVALID_VMCS_PTR
946 cmovc eax, r9d
947 jmp far [.fpret wrt rip]
948.fpret: ; 16:32 Pointer to .the_end.
949 dd .the_end, NAME(SUPR0AbsKernelCS)
950BITS 32
951%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
952ENDPROC VMXR0InvEPT
953
954
955;/**
956; * Invalidate a page using invvpid
957; @param enmFlush msc:ecx gcc:edi x86:[esp+04] Type of flush
958; @param pDescriptor msc:edx gcc:esi x86:[esp+08] Descriptor pointer
959; */
960;DECLASM(int) VMXR0InvVPID(VMX_FLUSH enmFlush, uint64_t *pDescriptor);
961BEGINPROC VMXR0InvVPID
962%ifdef RT_ARCH_AMD64
963 %ifdef ASM_CALL64_GCC
964 and edi, 0ffffffffh
965 xor rax, rax
966; invvpid rdi, qword [rsi]
967 DB 0x66, 0x0F, 0x38, 0x81, 0x3E
968 %else
969 and ecx, 0ffffffffh
970 xor rax, rax
971; invvpid rcx, qword [rdx]
972 DB 0x66, 0x0F, 0x38, 0x81, 0xA
973 %endif
974%else
975 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
976 cmp byte [NAME(g_fVMXIs64bitHost)], 0
977 jz .legacy_mode
978 db 0xea ; jmp far .sixtyfourbit_mode
979 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
980.legacy_mode:
981 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
982 mov ecx, [esp + 4]
983 mov edx, [esp + 8]
984 xor eax, eax
985; invvpid ecx, qword [edx]
986 DB 0x66, 0x0F, 0x38, 0x81, 0xA
987%endif
988 jnc .valid_vmcs
989 mov eax, VERR_VMX_INVALID_VMCS_PTR
990 ret
991.valid_vmcs:
992 jnz .the_end
993 mov eax, VERR_INVALID_PARAMETER
994.the_end:
995 ret
996
997%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
998ALIGNCODE(16)
999BITS 64
1000.sixtyfourbit_mode:
1001 and esp, 0ffffffffh
1002 mov ecx, [rsp + 4] ; enmFlush
1003 mov edx, [rsp + 8] ; pDescriptor
1004 xor eax, eax
1005; invvpid rcx, qword [rdx]
1006 DB 0x66, 0x0F, 0x38, 0x81, 0xA
1007 mov r8d, VERR_INVALID_PARAMETER
1008 cmovz eax, r8d
1009 mov r9d, VERR_VMX_INVALID_VMCS_PTR
1010 cmovc eax, r9d
1011 jmp far [.fpret wrt rip]
1012.fpret: ; 16:32 Pointer to .the_end.
1013 dd .the_end, NAME(SUPR0AbsKernelCS)
1014BITS 32
1015%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
1016ENDPROC VMXR0InvVPID
1017
1018
1019%if GC_ARCH_BITS == 64
1020;;
1021; Executes INVLPGA
1022;
1023; @param pPageGC msc:rcx gcc:rdi x86:[esp+04] Virtual page to invalidate
1024; @param uASID msc:rdx gcc:rsi x86:[esp+0C] Tagged TLB id
1025;
1026;DECLASM(void) SVMR0InvlpgA(RTGCPTR pPageGC, uint32_t uASID);
1027BEGINPROC SVMR0InvlpgA
1028%ifdef RT_ARCH_AMD64
1029 %ifdef ASM_CALL64_GCC
1030 mov rax, rdi
1031 mov rcx, rsi
1032 %else
1033 mov rax, rcx
1034 mov rcx, rdx
1035 %endif
1036%else
1037 mov eax, [esp + 4]
1038 mov ecx, [esp + 0Ch]
1039%endif
1040 invlpga [xAX], ecx
1041 ret
1042ENDPROC SVMR0InvlpgA
1043
1044%else ; GC_ARCH_BITS != 64
1045;;
1046; Executes INVLPGA
1047;
1048; @param pPageGC msc:ecx gcc:edi x86:[esp+04] Virtual page to invalidate
1049; @param uASID msc:edx gcc:esi x86:[esp+08] Tagged TLB id
1050;
1051;DECLASM(void) SVMR0InvlpgA(RTGCPTR pPageGC, uint32_t uASID);
1052BEGINPROC SVMR0InvlpgA
1053%ifdef RT_ARCH_AMD64
1054 %ifdef ASM_CALL64_GCC
1055 movzx rax, edi
1056 mov ecx, esi
1057 %else
1058 ; from http://www.cs.cmu.edu/~fp/courses/15213-s06/misc/asm64-handout.pdf:
1059 ; ``Perhaps unexpectedly, instructions that move or generate 32-bit register
1060 ; values also set the upper 32 bits of the register to zero. Consequently
1061 ; there is no need for an instruction movzlq.''
1062 mov eax, ecx
1063 mov ecx, edx
1064 %endif
1065%else
1066 mov eax, [esp + 4]
1067 mov ecx, [esp + 8]
1068%endif
1069 invlpga [xAX], ecx
1070 ret
1071ENDPROC SVMR0InvlpgA
1072
1073%endif ; GC_ARCH_BITS != 64
1074
1075%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
1076
1077;/**
1078; * Gets 64-bit GDTR and IDTR on darwin.
1079; * @param pGdtr Where to store the 64-bit GDTR.
1080; * @param pIdtr Where to store the 64-bit IDTR.
1081; */
1082;DECLASM(void) HMR0Get64bitGdtrAndIdtr(PX86XDTR64 pGdtr, PX86XDTR64 pIdtr);
1083ALIGNCODE(16)
1084BEGINPROC HMR0Get64bitGdtrAndIdtr
1085 db 0xea ; jmp far .sixtyfourbit_mode
1086 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
1087.the_end:
1088 ret
1089
1090ALIGNCODE(16)
1091BITS 64
1092.sixtyfourbit_mode:
1093 and esp, 0ffffffffh
1094 mov ecx, [rsp + 4] ; pGdtr
1095 mov edx, [rsp + 8] ; pIdtr
1096 sgdt [rcx]
1097 sidt [rdx]
1098 jmp far [.fpret wrt rip]
1099.fpret: ; 16:32 Pointer to .the_end.
1100 dd .the_end, NAME(SUPR0AbsKernelCS)
1101BITS 32
1102ENDPROC HMR0Get64bitGdtrAndIdtr
1103
1104
1105;/**
1106; * Gets 64-bit CR3 on darwin.
1107; * @returns CR3
1108; */
1109;DECLASM(uint64_t) HMR0Get64bitCR3(void);
1110ALIGNCODE(16)
1111BEGINPROC HMR0Get64bitCR3
1112 db 0xea ; jmp far .sixtyfourbit_mode
1113 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
1114.the_end:
1115 ret
1116
1117ALIGNCODE(16)
1118BITS 64
1119.sixtyfourbit_mode:
1120 mov rax, cr3
1121 mov rdx, rax
1122 shr rdx, 32
1123 jmp far [.fpret wrt rip]
1124.fpret: ; 16:32 Pointer to .the_end.
1125 dd .the_end, NAME(SUPR0AbsKernelCS)
1126BITS 32
1127ENDPROC HMR0Get64bitCR3
1128
1129%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
1130
1131%ifdef VBOX_WITH_KERNEL_USING_XMM
1132
1133;;
1134; Wrapper around vmx.pfnStartVM that preserves host XMM registers and
1135; load the guest ones when necessary.
1136;
1137; @cproto DECLASM(int) HMR0VMXStartVMWrapXMM(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu, PFNHMVMXSTARTVM pfnStartVM);
1138;
1139; @returns eax
1140;
1141; @param fResumeVM msc:rcx
1142; @param pCtx msc:rdx
1143; @param pVMCSCache msc:r8
1144; @param pVM msc:r9
1145; @param pVCpu msc:[rbp+30h]
1146; @param pfnStartVM msc:[rbp+38h]
1147;
1148; @remarks This is essentially the same code as HMR0SVMRunWrapXMM, only the parameters differ a little bit.
1149;
1150; ASSUMING 64-bit and windows for now.
1151ALIGNCODE(16)
1152BEGINPROC HMR0VMXStartVMWrapXMM
1153 push xBP
1154 mov xBP, xSP
1155 sub xSP, 0a0h + 040h ; Don't bother optimizing the frame size.
1156
1157 ; spill input parameters.
1158 mov [xBP + 010h], rcx ; fResumeVM
1159 mov [xBP + 018h], rdx ; pCtx
1160 mov [xBP + 020h], r8 ; pVMCSCache
1161 mov [xBP + 028h], r9 ; pVM
1162
1163 ; Ask CPUM whether we've started using the FPU yet.
1164 mov rcx, [xBP + 30h] ; pVCpu
1165 call NAME(CPUMIsGuestFPUStateActive)
1166 test al, al
1167 jnz .guest_fpu_state_active
1168
1169 ; No need to mess with XMM registers just call the start routine and return.
1170 mov r11, [xBP + 38h] ; pfnStartVM
1171 mov r10, [xBP + 30h] ; pVCpu
1172 mov [xSP + 020h], r10
1173 mov rcx, [xBP + 010h] ; fResumeVM
1174 mov rdx, [xBP + 018h] ; pCtx
1175 mov r8, [xBP + 020h] ; pVMCSCache
1176 mov r9, [xBP + 028h] ; pVM
1177 call r11
1178
1179 leave
1180 ret
1181
1182ALIGNCODE(8)
1183.guest_fpu_state_active:
1184 ; Save the host XMM registers.
1185 movdqa [rsp + 040h + 000h], xmm6
1186 movdqa [rsp + 040h + 010h], xmm7
1187 movdqa [rsp + 040h + 020h], xmm8
1188 movdqa [rsp + 040h + 030h], xmm9
1189 movdqa [rsp + 040h + 040h], xmm10
1190 movdqa [rsp + 040h + 050h], xmm11
1191 movdqa [rsp + 040h + 060h], xmm12
1192 movdqa [rsp + 040h + 070h], xmm13
1193 movdqa [rsp + 040h + 080h], xmm14
1194 movdqa [rsp + 040h + 090h], xmm15
1195
1196 ; Load the full guest XMM register state.
1197 mov r10, [xBP + 018h] ; pCtx
1198 lea r10, [r10 + XMM_OFF_IN_X86FXSTATE]
1199 movdqa xmm0, [r10 + 000h]
1200 movdqa xmm1, [r10 + 010h]
1201 movdqa xmm2, [r10 + 020h]
1202 movdqa xmm3, [r10 + 030h]
1203 movdqa xmm4, [r10 + 040h]
1204 movdqa xmm5, [r10 + 050h]
1205 movdqa xmm6, [r10 + 060h]
1206 movdqa xmm7, [r10 + 070h]
1207 movdqa xmm8, [r10 + 080h]
1208 movdqa xmm9, [r10 + 090h]
1209 movdqa xmm10, [r10 + 0a0h]
1210 movdqa xmm11, [r10 + 0b0h]
1211 movdqa xmm12, [r10 + 0c0h]
1212 movdqa xmm13, [r10 + 0d0h]
1213 movdqa xmm14, [r10 + 0e0h]
1214 movdqa xmm15, [r10 + 0f0h]
1215
1216 ; Make the call (same as in the other case ).
1217 mov r11, [xBP + 38h] ; pfnStartVM
1218 mov r10, [xBP + 30h] ; pVCpu
1219 mov [xSP + 020h], r10
1220 mov rcx, [xBP + 010h] ; fResumeVM
1221 mov rdx, [xBP + 018h] ; pCtx
1222 mov r8, [xBP + 020h] ; pVMCSCache
1223 mov r9, [xBP + 028h] ; pVM
1224 call r11
1225
1226 ; Save the guest XMM registers.
1227 mov r10, [xBP + 018h] ; pCtx
1228 lea r10, [r10 + XMM_OFF_IN_X86FXSTATE]
1229 movdqa [r10 + 000h], xmm0
1230 movdqa [r10 + 010h], xmm1
1231 movdqa [r10 + 020h], xmm2
1232 movdqa [r10 + 030h], xmm3
1233 movdqa [r10 + 040h], xmm4
1234 movdqa [r10 + 050h], xmm5
1235 movdqa [r10 + 060h], xmm6
1236 movdqa [r10 + 070h], xmm7
1237 movdqa [r10 + 080h], xmm8
1238 movdqa [r10 + 090h], xmm9
1239 movdqa [r10 + 0a0h], xmm10
1240 movdqa [r10 + 0b0h], xmm11
1241 movdqa [r10 + 0c0h], xmm12
1242 movdqa [r10 + 0d0h], xmm13
1243 movdqa [r10 + 0e0h], xmm14
1244 movdqa [r10 + 0f0h], xmm15
1245
1246 ; Load the host XMM registers.
1247 movdqa xmm6, [rsp + 040h + 000h]
1248 movdqa xmm7, [rsp + 040h + 010h]
1249 movdqa xmm8, [rsp + 040h + 020h]
1250 movdqa xmm9, [rsp + 040h + 030h]
1251 movdqa xmm10, [rsp + 040h + 040h]
1252 movdqa xmm11, [rsp + 040h + 050h]
1253 movdqa xmm12, [rsp + 040h + 060h]
1254 movdqa xmm13, [rsp + 040h + 070h]
1255 movdqa xmm14, [rsp + 040h + 080h]
1256 movdqa xmm15, [rsp + 040h + 090h]
1257 leave
1258 ret
1259ENDPROC HMR0VMXStartVMWrapXMM
1260
1261;;
1262; Wrapper around svm.pfnVMRun that preserves host XMM registers and
1263; load the guest ones when necessary.
1264;
1265; @cproto DECLASM(int) HMR0SVMRunWrapXMM(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu, PFNHMSVMVMRUN pfnVMRun);
1266;
1267; @returns eax
1268;
1269; @param pVMCBHostPhys msc:rcx
1270; @param pVMCBPhys msc:rdx
1271; @param pCtx msc:r8
1272; @param pVM msc:r9
1273; @param pVCpu msc:[rbp+30h]
1274; @param pfnVMRun msc:[rbp+38h]
1275;
1276; @remarks This is essentially the same code as HMR0VMXStartVMWrapXMM, only the parameters differ a little bit.
1277;
1278; ASSUMING 64-bit and windows for now.
1279ALIGNCODE(16)
1280BEGINPROC HMR0SVMRunWrapXMM
1281 push xBP
1282 mov xBP, xSP
1283 sub xSP, 0a0h + 040h ; Don't bother optimizing the frame size.
1284
1285 ; spill input parameters.
1286 mov [xBP + 010h], rcx ; pVMCBHostPhys
1287 mov [xBP + 018h], rdx ; pVMCBPhys
1288 mov [xBP + 020h], r8 ; pCtx
1289 mov [xBP + 028h], r9 ; pVM
1290
1291 ; Ask CPUM whether we've started using the FPU yet.
1292 mov rcx, [xBP + 30h] ; pVCpu
1293 call NAME(CPUMIsGuestFPUStateActive)
1294 test al, al
1295 jnz .guest_fpu_state_active
1296
1297 ; No need to mess with XMM registers just call the start routine and return.
1298 mov r11, [xBP + 38h] ; pfnVMRun
1299 mov r10, [xBP + 30h] ; pVCpu
1300 mov [xSP + 020h], r10
1301 mov rcx, [xBP + 010h] ; pVMCBHostPhys
1302 mov rdx, [xBP + 018h] ; pVMCBPhys
1303 mov r8, [xBP + 020h] ; pCtx
1304 mov r9, [xBP + 028h] ; pVM
1305 call r11
1306
1307 leave
1308 ret
1309
1310ALIGNCODE(8)
1311.guest_fpu_state_active:
1312 ; Save the host XMM registers.
1313 movdqa [rsp + 040h + 000h], xmm6
1314 movdqa [rsp + 040h + 010h], xmm7
1315 movdqa [rsp + 040h + 020h], xmm8
1316 movdqa [rsp + 040h + 030h], xmm9
1317 movdqa [rsp + 040h + 040h], xmm10
1318 movdqa [rsp + 040h + 050h], xmm11
1319 movdqa [rsp + 040h + 060h], xmm12
1320 movdqa [rsp + 040h + 070h], xmm13
1321 movdqa [rsp + 040h + 080h], xmm14
1322 movdqa [rsp + 040h + 090h], xmm15
1323
1324 ; Load the full guest XMM register state.
1325 mov r10, [xBP + 020h] ; pCtx
1326 lea r10, [r10 + XMM_OFF_IN_X86FXSTATE]
1327 movdqa xmm0, [r10 + 000h]
1328 movdqa xmm1, [r10 + 010h]
1329 movdqa xmm2, [r10 + 020h]
1330 movdqa xmm3, [r10 + 030h]
1331 movdqa xmm4, [r10 + 040h]
1332 movdqa xmm5, [r10 + 050h]
1333 movdqa xmm6, [r10 + 060h]
1334 movdqa xmm7, [r10 + 070h]
1335 movdqa xmm8, [r10 + 080h]
1336 movdqa xmm9, [r10 + 090h]
1337 movdqa xmm10, [r10 + 0a0h]
1338 movdqa xmm11, [r10 + 0b0h]
1339 movdqa xmm12, [r10 + 0c0h]
1340 movdqa xmm13, [r10 + 0d0h]
1341 movdqa xmm14, [r10 + 0e0h]
1342 movdqa xmm15, [r10 + 0f0h]
1343
1344 ; Make the call (same as in the other case ).
1345 mov r11, [xBP + 38h] ; pfnVMRun
1346 mov r10, [xBP + 30h] ; pVCpu
1347 mov [xSP + 020h], r10
1348 mov rcx, [xBP + 010h] ; pVMCBHostPhys
1349 mov rdx, [xBP + 018h] ; pVMCBPhys
1350 mov r8, [xBP + 020h] ; pCtx
1351 mov r9, [xBP + 028h] ; pVM
1352 call r11
1353
1354 ; Save the guest XMM registers.
1355 mov r10, [xBP + 020h] ; pCtx
1356 lea r10, [r10 + XMM_OFF_IN_X86FXSTATE]
1357 movdqa [r10 + 000h], xmm0
1358 movdqa [r10 + 010h], xmm1
1359 movdqa [r10 + 020h], xmm2
1360 movdqa [r10 + 030h], xmm3
1361 movdqa [r10 + 040h], xmm4
1362 movdqa [r10 + 050h], xmm5
1363 movdqa [r10 + 060h], xmm6
1364 movdqa [r10 + 070h], xmm7
1365 movdqa [r10 + 080h], xmm8
1366 movdqa [r10 + 090h], xmm9
1367 movdqa [r10 + 0a0h], xmm10
1368 movdqa [r10 + 0b0h], xmm11
1369 movdqa [r10 + 0c0h], xmm12
1370 movdqa [r10 + 0d0h], xmm13
1371 movdqa [r10 + 0e0h], xmm14
1372 movdqa [r10 + 0f0h], xmm15
1373
1374 ; Load the host XMM registers.
1375 movdqa xmm6, [rsp + 040h + 000h]
1376 movdqa xmm7, [rsp + 040h + 010h]
1377 movdqa xmm8, [rsp + 040h + 020h]
1378 movdqa xmm9, [rsp + 040h + 030h]
1379 movdqa xmm10, [rsp + 040h + 040h]
1380 movdqa xmm11, [rsp + 040h + 050h]
1381 movdqa xmm12, [rsp + 040h + 060h]
1382 movdqa xmm13, [rsp + 040h + 070h]
1383 movdqa xmm14, [rsp + 040h + 080h]
1384 movdqa xmm15, [rsp + 040h + 090h]
1385 leave
1386 ret
1387ENDPROC HMR0SVMRunWrapXMM
1388
1389%endif ; VBOX_WITH_KERNEL_USING_XMM
1390
1391;
1392; The default setup of the StartVM routines.
1393;
1394%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
1395 %define MY_NAME(name) name %+ _32
1396%else
1397 %define MY_NAME(name) name
1398%endif
1399%ifdef RT_ARCH_AMD64
1400 %define MYPUSHAD MYPUSHAD64
1401 %define MYPOPAD MYPOPAD64
1402 %define MYPUSHSEGS MYPUSHSEGS64
1403 %define MYPOPSEGS MYPOPSEGS64
1404%else
1405 %define MYPUSHAD MYPUSHAD32
1406 %define MYPOPAD MYPOPAD32
1407 %define MYPUSHSEGS MYPUSHSEGS32
1408 %define MYPOPSEGS MYPOPSEGS32
1409%endif
1410
1411%include "HMR0Mixed.mac"
1412
1413
1414%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
1415 ;
1416 ; Write the wrapper procedures.
1417 ;
1418 ; These routines are probably being too paranoid about selector
1419 ; restoring, but better safe than sorry...
1420 ;
1421
1422; DECLASM(int) VMXR0StartVM32(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache /*, PVM pVM, PVMCPU pVCpu*/);
1423ALIGNCODE(16)
1424BEGINPROC VMXR0StartVM32
1425 cmp byte [NAME(g_fVMXIs64bitHost)], 0
1426 je near NAME(VMXR0StartVM32_32)
1427
1428 ; stack frame
1429 push esi
1430 push edi
1431 push fs
1432 push gs
1433
1434 ; jmp far .thunk64
1435 db 0xea
1436 dd .thunk64, NAME(SUPR0Abs64bitKernelCS)
1437
1438ALIGNCODE(16)
1439BITS 64
1440.thunk64:
1441 sub esp, 20h
1442 mov edi, [rsp + 20h + 14h] ; fResume
1443 mov esi, [rsp + 20h + 18h] ; pCtx
1444 mov edx, [rsp + 20h + 1Ch] ; pCache
1445 call NAME(VMXR0StartVM32_64)
1446 add esp, 20h
1447 jmp far [.fpthunk32 wrt rip]
1448.fpthunk32: ; 16:32 Pointer to .thunk32.
1449 dd .thunk32, NAME(SUPR0AbsKernelCS)
1450
1451BITS 32
1452ALIGNCODE(16)
1453.thunk32:
1454 pop gs
1455 pop fs
1456 pop edi
1457 pop esi
1458 ret
1459ENDPROC VMXR0StartVM32
1460
1461
1462; DECLASM(int) VMXR0StartVM64(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache /*, PVM pVM, PVMCPU pVCpu*/);
1463ALIGNCODE(16)
1464BEGINPROC VMXR0StartVM64
1465 cmp byte [NAME(g_fVMXIs64bitHost)], 0
1466 je .not_in_long_mode
1467
1468 ; stack frame
1469 push esi
1470 push edi
1471 push fs
1472 push gs
1473
1474 ; jmp far .thunk64
1475 db 0xea
1476 dd .thunk64, NAME(SUPR0Abs64bitKernelCS)
1477
1478ALIGNCODE(16)
1479BITS 64
1480.thunk64:
1481 sub esp, 20h
1482 mov edi, [rsp + 20h + 14h] ; fResume
1483 mov esi, [rsp + 20h + 18h] ; pCtx
1484 mov edx, [rsp + 20h + 1Ch] ; pCache
1485 call NAME(VMXR0StartVM64_64)
1486 add esp, 20h
1487 jmp far [.fpthunk32 wrt rip]
1488.fpthunk32: ; 16:32 Pointer to .thunk32.
1489 dd .thunk32, NAME(SUPR0AbsKernelCS)
1490
1491BITS 32
1492ALIGNCODE(16)
1493.thunk32:
1494 pop gs
1495 pop fs
1496 pop edi
1497 pop esi
1498 ret
1499
1500.not_in_long_mode:
1501 mov eax, VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE
1502 ret
1503ENDPROC VMXR0StartVM64
1504
1505;DECLASM(int) SVMR0VMRun(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx /*, PVM pVM, PVMCPU pVCpu*/);
1506ALIGNCODE(16)
1507BEGINPROC SVMR0VMRun
1508 cmp byte [NAME(g_fVMXIs64bitHost)], 0
1509 je near NAME(SVMR0VMRun_32)
1510
1511 ; stack frame
1512 push esi
1513 push edi
1514 push fs
1515 push gs
1516
1517 ; jmp far .thunk64
1518 db 0xea
1519 dd .thunk64, NAME(SUPR0Abs64bitKernelCS)
1520
1521ALIGNCODE(16)
1522BITS 64
1523.thunk64:
1524 sub esp, 20h
1525 mov rdi, [rsp + 20h + 14h] ; pVMCBHostPhys
1526 mov rsi, [rsp + 20h + 1Ch] ; pVMCBPhys
1527 mov edx, [rsp + 20h + 24h] ; pCtx
1528 call NAME(SVMR0VMRun_64)
1529 add esp, 20h
1530 jmp far [.fpthunk32 wrt rip]
1531.fpthunk32: ; 16:32 Pointer to .thunk32.
1532 dd .thunk32, NAME(SUPR0AbsKernelCS)
1533
1534BITS 32
1535ALIGNCODE(16)
1536.thunk32:
1537 pop gs
1538 pop fs
1539 pop edi
1540 pop esi
1541 ret
1542ENDPROC SVMR0VMRun
1543
1544
1545; DECLASM(int) SVMR0VMRun64(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx /*, PVM pVM, PVMCPU pVCpu*/);
1546ALIGNCODE(16)
1547BEGINPROC SVMR0VMRun64
1548 cmp byte [NAME(g_fVMXIs64bitHost)], 0
1549 je .not_in_long_mode
1550
1551 ; stack frame
1552 push esi
1553 push edi
1554 push fs
1555 push gs
1556
1557 ; jmp far .thunk64
1558 db 0xea
1559 dd .thunk64, NAME(SUPR0Abs64bitKernelCS)
1560
1561ALIGNCODE(16)
1562BITS 64
1563.thunk64:
1564 sub esp, 20h
1565 mov rdi, [rbp + 20h + 14h] ; pVMCBHostPhys
1566 mov rsi, [rbp + 20h + 1Ch] ; pVMCBPhys
1567 mov edx, [rbp + 20h + 24h] ; pCtx
1568 call NAME(SVMR0VMRun64_64)
1569 add esp, 20h
1570 jmp far [.fpthunk32 wrt rip]
1571.fpthunk32: ; 16:32 Pointer to .thunk32.
1572 dd .thunk32, NAME(SUPR0AbsKernelCS)
1573
1574BITS 32
1575ALIGNCODE(16)
1576.thunk32:
1577 pop gs
1578 pop fs
1579 pop edi
1580 pop esi
1581 ret
1582
1583.not_in_long_mode:
1584 mov eax, VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE
1585 ret
1586ENDPROC SVMR0VMRun64
1587
1588 ;
1589 ; Do it a second time pretending we're a 64-bit host.
1590 ;
1591 ; This *HAS* to be done at the very end of the file to avoid restoring
1592 ; macros. So, add new code *BEFORE* this mess.
1593 ;
1594 BITS 64
1595 %undef RT_ARCH_X86
1596 %define RT_ARCH_AMD64
1597 %undef ASM_CALL64_MSC
1598 %define ASM_CALL64_GCC
1599 %define xCB 8
1600 %define xSP rsp
1601 %define xBP rbp
1602 %define xAX rax
1603 %define xBX rbx
1604 %define xCX rcx
1605 %define xDX rdx
1606 %define xDI rdi
1607 %define xSI rsi
1608 %define MY_NAME(name) name %+ _64
1609 %define MYPUSHAD MYPUSHAD64
1610 %define MYPOPAD MYPOPAD64
1611 %define MYPUSHSEGS MYPUSHSEGS64
1612 %define MYPOPSEGS MYPOPSEGS64
1613
1614 %include "HMR0Mixed.mac"
1615%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette