VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HWACCMR0A.asm@ 34079

Last change on this file since 34079 was 33540, checked in by vboxsync, 14 years ago

*: spelling fixes, thanks Timeless!

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 40.4 KB
Line 
1; $Id: HWACCMR0A.asm 33540 2010-10-28 09:27:05Z vboxsync $
2;; @file
3; VMXM - R0 vmx helpers
4;
5
6;
7; Copyright (C) 2006-2007 Oracle Corporation
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.virtualbox.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License (GPL) as published by the Free Software
13; Foundation, in version 2 as it comes in the "COPYING" file of the
14; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17
18;*******************************************************************************
19;* Header Files *
20;*******************************************************************************
21%include "VBox/asmdefs.mac"
22%include "VBox/err.mac"
23%include "VBox/hwacc_vmx.mac"
24%include "VBox/cpum.mac"
25%include "VBox/x86.mac"
26%include "../HWACCMInternal.mac"
27
28%ifdef RT_OS_OS2 ;; @todo fix OMF support in yasm and kick nasm out completely.
29 %macro vmwrite 2,
30 int3
31 %endmacro
32 %define vmlaunch int3
33 %define vmresume int3
34 %define vmsave int3
35 %define vmload int3
36 %define vmrun int3
37 %define clgi int3
38 %define stgi int3
39 %macro invlpga 2,
40 int3
41 %endmacro
42%endif
43
44;*******************************************************************************
45;* Defined Constants And Macros *
46;*******************************************************************************
47%ifdef RT_ARCH_AMD64
48 %define MAYBE_64_BIT
49%endif
50%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
51 %define MAYBE_64_BIT
52%else
53 %ifdef RT_OS_DARWIN
54 %ifdef RT_ARCH_AMD64
55 ;;
56 ; Load the NULL selector into DS, ES, FS and GS on 64-bit darwin so we don't
57 ; risk loading a stale LDT value or something invalid.
58 %define HWACCM_64_BIT_USE_NULL_SEL
59 %endif
60 %endif
61%endif
62
63;; The offset of the XMM registers in X86FXSTATE.
64; Use define because I'm too lazy to convert the struct.
65%define XMM_OFF_IN_X86FXSTATE 160
66
67
68;; This is too risky wrt. stability, performance and correctness.
69;%define VBOX_WITH_DR6_EXPERIMENT 1
70
71;; @def MYPUSHAD
72; Macro generating an equivalent to pushad
73
74;; @def MYPOPAD
75; Macro generating an equivalent to popad
76
77;; @def MYPUSHSEGS
78; Macro saving all segment registers on the stack.
79; @param 1 full width register name
80; @param 2 16-bit register name for \a 1.
81
82;; @def MYPOPSEGS
83; Macro restoring all segment registers on the stack
84; @param 1 full width register name
85; @param 2 16-bit register name for \a 1.
86
87%ifdef MAYBE_64_BIT
88 ; Save a host and load the corresponding guest MSR (trashes rdx & rcx)
89 %macro LOADGUESTMSR 2
90 mov rcx, %1
91 rdmsr
92 push rdx
93 push rax
94 mov edx, dword [xSI + %2 + 4]
95 mov eax, dword [xSI + %2]
96 wrmsr
97 %endmacro
98
99 ; Save a guest and load the corresponding host MSR (trashes rdx & rcx)
100 ; Only really useful for gs kernel base as that one can be changed behind our back (swapgs)
101 %macro LOADHOSTMSREX 2
102 mov rcx, %1
103 rdmsr
104 mov dword [xSI + %2], eax
105 mov dword [xSI + %2 + 4], edx
106 pop rax
107 pop rdx
108 wrmsr
109 %endmacro
110
111 ; Load the corresponding host MSR (trashes rdx & rcx)
112 %macro LOADHOSTMSR 1
113 mov rcx, %1
114 pop rax
115 pop rdx
116 wrmsr
117 %endmacro
118%endif
119
120%ifdef ASM_CALL64_GCC
121 %macro MYPUSHAD64 0
122 push r15
123 push r14
124 push r13
125 push r12
126 push rbx
127 %endmacro
128 %macro MYPOPAD64 0
129 pop rbx
130 pop r12
131 pop r13
132 pop r14
133 pop r15
134 %endmacro
135
136%else ; ASM_CALL64_MSC
137 %macro MYPUSHAD64 0
138 push r15
139 push r14
140 push r13
141 push r12
142 push rbx
143 push rsi
144 push rdi
145 %endmacro
146 %macro MYPOPAD64 0
147 pop rdi
148 pop rsi
149 pop rbx
150 pop r12
151 pop r13
152 pop r14
153 pop r15
154 %endmacro
155%endif
156
157; trashes, rax, rdx & rcx
158%macro MYPUSHSEGS64 2
159 %ifndef HWACCM_64_BIT_USE_NULL_SEL
160 mov %2, es
161 push %1
162 mov %2, ds
163 push %1
164 %endif
165
166 ; Special case for FS; Windows and Linux either don't use it or restore it when leaving kernel mode, Solaris OTOH doesn't and we must save it.
167 mov ecx, MSR_K8_FS_BASE
168 rdmsr
169 push rdx
170 push rax
171 %ifndef HWACCM_64_BIT_USE_NULL_SEL
172 push fs
173 %endif
174
175 ; Special case for GS; OSes typically use swapgs to reset the hidden base register for GS on entry into the kernel. The same happens on exit
176 mov ecx, MSR_K8_GS_BASE
177 rdmsr
178 push rdx
179 push rax
180 %ifndef HWACCM_64_BIT_USE_NULL_SEL
181 push gs
182 %endif
183%endmacro
184
185; trashes, rax, rdx & rcx
186%macro MYPOPSEGS64 2
187 ; Note: do not step through this code with a debugger!
188 %ifndef HWACCM_64_BIT_USE_NULL_SEL
189 xor eax, eax
190 mov ds, ax
191 mov es, ax
192 mov fs, ax
193 mov gs, ax
194 %endif
195
196 %ifndef HWACCM_64_BIT_USE_NULL_SEL
197 pop gs
198 %endif
199 pop rax
200 pop rdx
201 mov ecx, MSR_K8_GS_BASE
202 wrmsr
203
204 %ifndef HWACCM_64_BIT_USE_NULL_SEL
205 pop fs
206 %endif
207 pop rax
208 pop rdx
209 mov ecx, MSR_K8_FS_BASE
210 wrmsr
211 ; Now it's safe to step again
212
213 %ifndef HWACCM_64_BIT_USE_NULL_SEL
214 pop %1
215 mov ds, %2
216 pop %1
217 mov es, %2
218 %endif
219%endmacro
220
221%macro MYPUSHAD32 0
222 pushad
223%endmacro
224%macro MYPOPAD32 0
225 popad
226%endmacro
227
228%macro MYPUSHSEGS32 2
229 push ds
230 push es
231 push fs
232 push gs
233%endmacro
234%macro MYPOPSEGS32 2
235 pop gs
236 pop fs
237 pop es
238 pop ds
239%endmacro
240
241
242;*******************************************************************************
243;* External Symbols *
244;*******************************************************************************
245%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
246extern NAME(SUPR0AbsIs64bit)
247extern NAME(SUPR0Abs64bitKernelCS)
248extern NAME(SUPR0Abs64bitKernelSS)
249extern NAME(SUPR0Abs64bitKernelDS)
250extern NAME(SUPR0AbsKernelCS)
251%endif
252%ifdef VBOX_WITH_KERNEL_USING_XMM
253extern NAME(CPUMIsGuestFPUStateActive)
254%endif
255
256
257;*******************************************************************************
258;* Global Variables *
259;*******************************************************************************
260%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
261BEGINDATA
262;;
263; Store the SUPR0AbsIs64bit absolute value here so we can cmp/test without
264; needing to clobber a register. (This trick doesn't quite work for PE btw.
265; but that's not relevant atm.)
266GLOBALNAME g_fVMXIs64bitHost
267 dd NAME(SUPR0AbsIs64bit)
268%endif
269
270
271BEGINCODE
272
273
274;/**
275; * Executes VMWRITE, 64-bit value.
276; *
277; * @returns VBox status code
278; * @param idxField x86: [ebp + 08h] msc: rcx gcc: rdi VMCS index
279; * @param u64Data x86: [ebp + 0ch] msc: rdx gcc: rsi VM field value
280; */
281ALIGNCODE(16)
282BEGINPROC VMXWriteVMCS64
283%ifdef RT_ARCH_AMD64
284 %ifdef ASM_CALL64_GCC
285 and edi, 0ffffffffh
286 xor rax, rax
287 vmwrite rdi, rsi
288 %else
289 and ecx, 0ffffffffh
290 xor rax, rax
291 vmwrite rcx, rdx
292 %endif
293%else ; RT_ARCH_X86
294 mov ecx, [esp + 4] ; idxField
295 lea edx, [esp + 8] ; &u64Data
296 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
297 cmp byte [NAME(g_fVMXIs64bitHost)], 0
298 jz .legacy_mode
299 db 0xea ; jmp far .sixtyfourbit_mode
300 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
301.legacy_mode:
302 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
303 vmwrite ecx, [edx] ; low dword
304 jz .done
305 jc .done
306 inc ecx
307 xor eax, eax
308 vmwrite ecx, [edx + 4] ; high dword
309.done:
310%endif ; RT_ARCH_X86
311 jnc .valid_vmcs
312 mov eax, VERR_VMX_INVALID_VMCS_PTR
313 ret
314.valid_vmcs:
315 jnz .the_end
316 mov eax, VERR_VMX_INVALID_VMCS_FIELD
317.the_end:
318 ret
319
320%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
321ALIGNCODE(16)
322BITS 64
323.sixtyfourbit_mode:
324 and edx, 0ffffffffh
325 and ecx, 0ffffffffh
326 xor eax, eax
327 vmwrite rcx, [rdx]
328 mov r8d, VERR_VMX_INVALID_VMCS_FIELD
329 cmovz eax, r8d
330 mov r9d, VERR_VMX_INVALID_VMCS_PTR
331 cmovc eax, r9d
332 jmp far [.fpret wrt rip]
333.fpret: ; 16:32 Pointer to .the_end.
334 dd .the_end, NAME(SUPR0AbsKernelCS)
335BITS 32
336%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
337ENDPROC VMXWriteVMCS64
338
339
340;/**
341; * Executes VMREAD, 64-bit value
342; *
343; * @returns VBox status code
344; * @param idxField VMCS index
345; * @param pData Ptr to store VM field value
346; */
347;DECLASM(int) VMXReadVMCS64(uint32_t idxField, uint64_t *pData);
348ALIGNCODE(16)
349BEGINPROC VMXReadVMCS64
350%ifdef RT_ARCH_AMD64
351 %ifdef ASM_CALL64_GCC
352 and edi, 0ffffffffh
353 xor rax, rax
354 vmread [rsi], rdi
355 %else
356 and ecx, 0ffffffffh
357 xor rax, rax
358 vmread [rdx], rcx
359 %endif
360%else ; RT_ARCH_X86
361 mov ecx, [esp + 4] ; idxField
362 mov edx, [esp + 8] ; pData
363 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
364 cmp byte [NAME(g_fVMXIs64bitHost)], 0
365 jz .legacy_mode
366 db 0xea ; jmp far .sixtyfourbit_mode
367 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
368.legacy_mode:
369 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
370 vmread [edx], ecx ; low dword
371 jz .done
372 jc .done
373 inc ecx
374 xor eax, eax
375 vmread [edx + 4], ecx ; high dword
376.done:
377%endif ; RT_ARCH_X86
378 jnc .valid_vmcs
379 mov eax, VERR_VMX_INVALID_VMCS_PTR
380 ret
381.valid_vmcs:
382 jnz .the_end
383 mov eax, VERR_VMX_INVALID_VMCS_FIELD
384.the_end:
385 ret
386
387%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
388ALIGNCODE(16)
389BITS 64
390.sixtyfourbit_mode:
391 and edx, 0ffffffffh
392 and ecx, 0ffffffffh
393 xor eax, eax
394 vmread [rdx], rcx
395 mov r8d, VERR_VMX_INVALID_VMCS_FIELD
396 cmovz eax, r8d
397 mov r9d, VERR_VMX_INVALID_VMCS_PTR
398 cmovc eax, r9d
399 jmp far [.fpret wrt rip]
400.fpret: ; 16:32 Pointer to .the_end.
401 dd .the_end, NAME(SUPR0AbsKernelCS)
402BITS 32
403%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
404ENDPROC VMXReadVMCS64
405
406
407;/**
408; * Executes VMREAD, 32-bit value.
409; *
410; * @returns VBox status code
411; * @param idxField VMCS index
412; * @param pu32Data Ptr to store VM field value
413; */
414;DECLASM(int) VMXReadVMCS32(uint32_t idxField, uint32_t *pu32Data);
415ALIGNCODE(16)
416BEGINPROC VMXReadVMCS32
417%ifdef RT_ARCH_AMD64
418 %ifdef ASM_CALL64_GCC
419 and edi, 0ffffffffh
420 xor rax, rax
421 vmread r10, rdi
422 mov [rsi], r10d
423 %else
424 and ecx, 0ffffffffh
425 xor rax, rax
426 vmread r10, rcx
427 mov [rdx], r10d
428 %endif
429%else ; RT_ARCH_X86
430 mov ecx, [esp + 4] ; idxField
431 mov edx, [esp + 8] ; pu32Data
432 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
433 cmp byte [NAME(g_fVMXIs64bitHost)], 0
434 jz .legacy_mode
435 db 0xea ; jmp far .sixtyfourbit_mode
436 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
437.legacy_mode:
438 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
439 xor eax, eax
440 vmread [edx], ecx
441%endif ; RT_ARCH_X86
442 jnc .valid_vmcs
443 mov eax, VERR_VMX_INVALID_VMCS_PTR
444 ret
445.valid_vmcs:
446 jnz .the_end
447 mov eax, VERR_VMX_INVALID_VMCS_FIELD
448.the_end:
449 ret
450
451%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
452ALIGNCODE(16)
453BITS 64
454.sixtyfourbit_mode:
455 and edx, 0ffffffffh
456 and ecx, 0ffffffffh
457 xor eax, eax
458 vmread r10, rcx
459 mov [rdx], r10d
460 mov r8d, VERR_VMX_INVALID_VMCS_FIELD
461 cmovz eax, r8d
462 mov r9d, VERR_VMX_INVALID_VMCS_PTR
463 cmovc eax, r9d
464 jmp far [.fpret wrt rip]
465.fpret: ; 16:32 Pointer to .the_end.
466 dd .the_end, NAME(SUPR0AbsKernelCS)
467BITS 32
468%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
469ENDPROC VMXReadVMCS32
470
471
472;/**
473; * Executes VMWRITE, 32-bit value.
474; *
475; * @returns VBox status code
476; * @param idxField VMCS index
477; * @param u32Data Ptr to store VM field value
478; */
479;DECLASM(int) VMXWriteVMCS32(uint32_t idxField, uint32_t u32Data);
480ALIGNCODE(16)
481BEGINPROC VMXWriteVMCS32
482%ifdef RT_ARCH_AMD64
483 %ifdef ASM_CALL64_GCC
484 and edi, 0ffffffffh
485 and esi, 0ffffffffh
486 xor rax, rax
487 vmwrite rdi, rsi
488 %else
489 and ecx, 0ffffffffh
490 and edx, 0ffffffffh
491 xor rax, rax
492 vmwrite rcx, rdx
493 %endif
494%else ; RT_ARCH_X86
495 mov ecx, [esp + 4] ; idxField
496 mov edx, [esp + 8] ; u32Data
497 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
498 cmp byte [NAME(g_fVMXIs64bitHost)], 0
499 jz .legacy_mode
500 db 0xea ; jmp far .sixtyfourbit_mode
501 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
502.legacy_mode:
503 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
504 xor eax, eax
505 vmwrite ecx, edx
506%endif ; RT_ARCH_X86
507 jnc .valid_vmcs
508 mov eax, VERR_VMX_INVALID_VMCS_PTR
509 ret
510.valid_vmcs:
511 jnz .the_end
512 mov eax, VERR_VMX_INVALID_VMCS_FIELD
513.the_end:
514 ret
515
516%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
517ALIGNCODE(16)
518BITS 64
519.sixtyfourbit_mode:
520 and edx, 0ffffffffh
521 and ecx, 0ffffffffh
522 xor eax, eax
523 vmwrite rcx, rdx
524 mov r8d, VERR_VMX_INVALID_VMCS_FIELD
525 cmovz eax, r8d
526 mov r9d, VERR_VMX_INVALID_VMCS_PTR
527 cmovc eax, r9d
528 jmp far [.fpret wrt rip]
529.fpret: ; 16:32 Pointer to .the_end.
530 dd .the_end, NAME(SUPR0AbsKernelCS)
531BITS 32
532%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
533ENDPROC VMXWriteVMCS32
534
535
536;/**
537; * Executes VMXON
538; *
539; * @returns VBox status code
540; * @param HCPhysVMXOn Physical address of VMXON structure
541; */
542;DECLASM(int) VMXEnable(RTHCPHYS HCPhysVMXOn);
543BEGINPROC VMXEnable
544%ifdef RT_ARCH_AMD64
545 xor rax, rax
546 %ifdef ASM_CALL64_GCC
547 push rdi
548 %else
549 push rcx
550 %endif
551 vmxon [rsp]
552%else ; RT_ARCH_X86
553 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
554 cmp byte [NAME(g_fVMXIs64bitHost)], 0
555 jz .legacy_mode
556 db 0xea ; jmp far .sixtyfourbit_mode
557 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
558.legacy_mode:
559 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
560 xor eax, eax
561 vmxon [esp + 4]
562%endif ; RT_ARCH_X86
563 jnc .good
564 mov eax, VERR_VMX_INVALID_VMXON_PTR
565 jmp .the_end
566
567.good:
568 jnz .the_end
569 mov eax, VERR_VMX_GENERIC
570
571.the_end:
572%ifdef RT_ARCH_AMD64
573 add rsp, 8
574%endif
575 ret
576
577%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
578ALIGNCODE(16)
579BITS 64
580.sixtyfourbit_mode:
581 lea rdx, [rsp + 4] ; &HCPhysVMXOn.
582 and edx, 0ffffffffh
583 xor eax, eax
584 vmxon [rdx]
585 mov r8d, VERR_INVALID_PARAMETER
586 cmovz eax, r8d
587 mov r9d, VERR_VMX_INVALID_VMCS_PTR
588 cmovc eax, r9d
589 jmp far [.fpret wrt rip]
590.fpret: ; 16:32 Pointer to .the_end.
591 dd .the_end, NAME(SUPR0AbsKernelCS)
592BITS 32
593%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
594ENDPROC VMXEnable
595
596
597;/**
598; * Executes VMXOFF
599; */
600;DECLASM(void) VMXDisable(void);
601BEGINPROC VMXDisable
602%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
603 cmp byte [NAME(g_fVMXIs64bitHost)], 0
604 jz .legacy_mode
605 db 0xea ; jmp far .sixtyfourbit_mode
606 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
607.legacy_mode:
608%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
609 vmxoff
610.the_end:
611 ret
612
613%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
614ALIGNCODE(16)
615BITS 64
616.sixtyfourbit_mode:
617 vmxoff
618 jmp far [.fpret wrt rip]
619.fpret: ; 16:32 Pointer to .the_end.
620 dd .the_end, NAME(SUPR0AbsKernelCS)
621BITS 32
622%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
623ENDPROC VMXDisable
624
625
626;/**
627; * Executes VMCLEAR
628; *
629; * @returns VBox status code
630; * @param HCPhysVMCS Physical address of VM control structure
631; */
632;DECLASM(int) VMXClearVMCS(RTHCPHYS HCPhysVMCS);
633ALIGNCODE(16)
634BEGINPROC VMXClearVMCS
635%ifdef RT_ARCH_AMD64
636 xor rax, rax
637 %ifdef ASM_CALL64_GCC
638 push rdi
639 %else
640 push rcx
641 %endif
642 vmclear [rsp]
643%else ; RT_ARCH_X86
644 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
645 cmp byte [NAME(g_fVMXIs64bitHost)], 0
646 jz .legacy_mode
647 db 0xea ; jmp far .sixtyfourbit_mode
648 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
649.legacy_mode:
650 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
651 xor eax, eax
652 vmclear [esp + 4]
653%endif ; RT_ARCH_X86
654 jnc .the_end
655 mov eax, VERR_VMX_INVALID_VMCS_PTR
656.the_end:
657%ifdef RT_ARCH_AMD64
658 add rsp, 8
659%endif
660 ret
661
662%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
663ALIGNCODE(16)
664BITS 64
665.sixtyfourbit_mode:
666 lea rdx, [rsp + 4] ; &HCPhysVMCS
667 and edx, 0ffffffffh
668 xor eax, eax
669 vmclear [rdx]
670 mov r9d, VERR_VMX_INVALID_VMCS_PTR
671 cmovc eax, r9d
672 jmp far [.fpret wrt rip]
673.fpret: ; 16:32 Pointer to .the_end.
674 dd .the_end, NAME(SUPR0AbsKernelCS)
675BITS 32
676%endif
677ENDPROC VMXClearVMCS
678
679
680;/**
681; * Executes VMPTRLD
682; *
683; * @returns VBox status code
684; * @param HCPhysVMCS Physical address of VMCS structure
685; */
686;DECLASM(int) VMXActivateVMCS(RTHCPHYS HCPhysVMCS);
687ALIGNCODE(16)
688BEGINPROC VMXActivateVMCS
689%ifdef RT_ARCH_AMD64
690 xor rax, rax
691 %ifdef ASM_CALL64_GCC
692 push rdi
693 %else
694 push rcx
695 %endif
696 vmptrld [rsp]
697%else
698 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
699 cmp byte [NAME(g_fVMXIs64bitHost)], 0
700 jz .legacy_mode
701 db 0xea ; jmp far .sixtyfourbit_mode
702 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
703.legacy_mode:
704 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
705 xor eax, eax
706 vmptrld [esp + 4]
707%endif
708 jnc .the_end
709 mov eax, VERR_VMX_INVALID_VMCS_PTR
710.the_end:
711%ifdef RT_ARCH_AMD64
712 add rsp, 8
713%endif
714 ret
715
716%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
717ALIGNCODE(16)
718BITS 64
719.sixtyfourbit_mode:
720 lea rdx, [rsp + 4] ; &HCPhysVMCS
721 and edx, 0ffffffffh
722 xor eax, eax
723 vmptrld [rdx]
724 mov r9d, VERR_VMX_INVALID_VMCS_PTR
725 cmovc eax, r9d
726 jmp far [.fpret wrt rip]
727.fpret: ; 16:32 Pointer to .the_end.
728 dd .the_end, NAME(SUPR0AbsKernelCS)
729BITS 32
730%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
731ENDPROC VMXActivateVMCS
732
733
734;/**
735; * Executes VMPTRST
736; *
737; * @returns VBox status code
738; * @param [esp + 04h] gcc:rdi msc:rcx Param 1 - First parameter - Address that will receive the current pointer
739; */
740;DECLASM(int) VMXGetActivateVMCS(RTHCPHYS *pVMCS);
741BEGINPROC VMXGetActivateVMCS
742%ifdef RT_OS_OS2
743 mov eax, VERR_NOT_SUPPORTED
744 ret
745%else
746 %ifdef RT_ARCH_AMD64
747 %ifdef ASM_CALL64_GCC
748 vmptrst qword [rdi]
749 %else
750 vmptrst qword [rcx]
751 %endif
752 %else
753 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
754 cmp byte [NAME(g_fVMXIs64bitHost)], 0
755 jz .legacy_mode
756 db 0xea ; jmp far .sixtyfourbit_mode
757 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
758.legacy_mode:
759 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
760 vmptrst qword [esp+04h]
761 %endif
762 xor eax, eax
763.the_end:
764 ret
765
766 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
767ALIGNCODE(16)
768BITS 64
769.sixtyfourbit_mode:
770 lea rdx, [rsp + 4] ; &HCPhysVMCS
771 and edx, 0ffffffffh
772 vmptrst qword [rdx]
773 xor eax, eax
774 jmp far [.fpret wrt rip]
775.fpret: ; 16:32 Pointer to .the_end.
776 dd .the_end, NAME(SUPR0AbsKernelCS)
777BITS 32
778 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
779%endif
780ENDPROC VMXGetActivateVMCS
781
782;/**
783; * Invalidate a page using invept
784; @param enmFlush msc:ecx gcc:edi x86:[esp+04] Type of flush
785; @param pDescriptor msc:edx gcc:esi x86:[esp+08] Descriptor pointer
786; */
787;DECLASM(int) VMXR0InvEPT(VMX_FLUSH enmFlush, uint64_t *pDescriptor);
788BEGINPROC VMXR0InvEPT
789%ifdef RT_ARCH_AMD64
790 %ifdef ASM_CALL64_GCC
791 and edi, 0ffffffffh
792 xor rax, rax
793; invept rdi, qword [rsi]
794 DB 0x66, 0x0F, 0x38, 0x80, 0x3E
795 %else
796 and ecx, 0ffffffffh
797 xor rax, rax
798; invept rcx, qword [rdx]
799 DB 0x66, 0x0F, 0x38, 0x80, 0xA
800 %endif
801%else
802 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
803 cmp byte [NAME(g_fVMXIs64bitHost)], 0
804 jz .legacy_mode
805 db 0xea ; jmp far .sixtyfourbit_mode
806 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
807.legacy_mode:
808 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
809 mov ecx, [esp + 4]
810 mov edx, [esp + 8]
811 xor eax, eax
812; invept ecx, qword [edx]
813 DB 0x66, 0x0F, 0x38, 0x80, 0xA
814%endif
815 jnc .valid_vmcs
816 mov eax, VERR_VMX_INVALID_VMCS_PTR
817 ret
818.valid_vmcs:
819 jnz .the_end
820 mov eax, VERR_INVALID_PARAMETER
821.the_end:
822 ret
823
824%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
825ALIGNCODE(16)
826BITS 64
827.sixtyfourbit_mode:
828 and esp, 0ffffffffh
829 mov ecx, [rsp + 4] ; enmFlush
830 mov edx, [rsp + 8] ; pDescriptor
831 xor eax, eax
832; invept rcx, qword [rdx]
833 DB 0x66, 0x0F, 0x38, 0x80, 0xA
834 mov r8d, VERR_INVALID_PARAMETER
835 cmovz eax, r8d
836 mov r9d, VERR_VMX_INVALID_VMCS_PTR
837 cmovc eax, r9d
838 jmp far [.fpret wrt rip]
839.fpret: ; 16:32 Pointer to .the_end.
840 dd .the_end, NAME(SUPR0AbsKernelCS)
841BITS 32
842%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
843ENDPROC VMXR0InvEPT
844
845
846;/**
847; * Invalidate a page using invvpid
848; @param enmFlush msc:ecx gcc:edi x86:[esp+04] Type of flush
849; @param pDescriptor msc:edx gcc:esi x86:[esp+08] Descriptor pointer
850; */
851;DECLASM(int) VMXR0InvVPID(VMX_FLUSH enmFlush, uint64_t *pDescriptor);
852BEGINPROC VMXR0InvVPID
853%ifdef RT_ARCH_AMD64
854 %ifdef ASM_CALL64_GCC
855 and edi, 0ffffffffh
856 xor rax, rax
857 ;invvpid rdi, qword [rsi]
858 DB 0x66, 0x0F, 0x38, 0x81, 0x3E
859 %else
860 and ecx, 0ffffffffh
861 xor rax, rax
862; invvpid rcx, qword [rdx]
863 DB 0x66, 0x0F, 0x38, 0x81, 0xA
864 %endif
865%else
866 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
867 cmp byte [NAME(g_fVMXIs64bitHost)], 0
868 jz .legacy_mode
869 db 0xea ; jmp far .sixtyfourbit_mode
870 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
871.legacy_mode:
872 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
873 mov ecx, [esp + 4]
874 mov edx, [esp + 8]
875 xor eax, eax
876; invvpid ecx, qword [edx]
877 DB 0x66, 0x0F, 0x38, 0x81, 0xA
878%endif
879 jnc .valid_vmcs
880 mov eax, VERR_VMX_INVALID_VMCS_PTR
881 ret
882.valid_vmcs:
883 jnz .the_end
884 mov eax, VERR_INVALID_PARAMETER
885.the_end:
886 ret
887
888%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
889ALIGNCODE(16)
890BITS 64
891.sixtyfourbit_mode:
892 and esp, 0ffffffffh
893 mov ecx, [rsp + 4] ; enmFlush
894 mov edx, [rsp + 8] ; pDescriptor
895 xor eax, eax
896; invvpid rcx, qword [rdx]
897 DB 0x66, 0x0F, 0x38, 0x81, 0xA
898 mov r8d, VERR_INVALID_PARAMETER
899 cmovz eax, r8d
900 mov r9d, VERR_VMX_INVALID_VMCS_PTR
901 cmovc eax, r9d
902 jmp far [.fpret wrt rip]
903.fpret: ; 16:32 Pointer to .the_end.
904 dd .the_end, NAME(SUPR0AbsKernelCS)
905BITS 32
906%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
907ENDPROC VMXR0InvVPID
908
909
910%if GC_ARCH_BITS == 64
911;;
912; Executes INVLPGA
913;
914; @param pPageGC msc:rcx gcc:rdi x86:[esp+04] Virtual page to invalidate
915; @param uASID msc:rdx gcc:rsi x86:[esp+0C] Tagged TLB id
916;
917;DECLASM(void) SVMR0InvlpgA(RTGCPTR pPageGC, uint32_t uASID);
918BEGINPROC SVMR0InvlpgA
919%ifdef RT_ARCH_AMD64
920 %ifdef ASM_CALL64_GCC
921 mov rax, rdi
922 mov rcx, rsi
923 %else
924 mov rax, rcx
925 mov rcx, rdx
926 %endif
927%else
928 mov eax, [esp + 4]
929 mov ecx, [esp + 0Ch]
930%endif
931 invlpga [xAX], ecx
932 ret
933ENDPROC SVMR0InvlpgA
934
935%else ; GC_ARCH_BITS != 64
936;;
937; Executes INVLPGA
938;
939; @param pPageGC msc:ecx gcc:edi x86:[esp+04] Virtual page to invalidate
940; @param uASID msc:edx gcc:esi x86:[esp+08] Tagged TLB id
941;
942;DECLASM(void) SVMR0InvlpgA(RTGCPTR pPageGC, uint32_t uASID);
943BEGINPROC SVMR0InvlpgA
944%ifdef RT_ARCH_AMD64
945 %ifdef ASM_CALL64_GCC
946 movzx rax, edi
947 mov ecx, esi
948 %else
949 ; from http://www.cs.cmu.edu/~fp/courses/15213-s06/misc/asm64-handout.pdf:
950 ; ``Perhaps unexpectedly, instructions that move or generate 32-bit register
951 ; values also set the upper 32 bits of the register to zero. Consequently
952 ; there is no need for an instruction movzlq.''
953 mov eax, ecx
954 mov ecx, edx
955 %endif
956%else
957 mov eax, [esp + 4]
958 mov ecx, [esp + 8]
959%endif
960 invlpga [xAX], ecx
961 ret
962ENDPROC SVMR0InvlpgA
963
964%endif ; GC_ARCH_BITS != 64
965
966%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
967
968;/**
969; * Gets 64-bit GDTR and IDTR on darwin.
970; * @param pGdtr Where to store the 64-bit GDTR.
971; * @param pIdtr Where to store the 64-bit IDTR.
972; */
973;DECLASM(void) hwaccmR0Get64bitGDTRandIDTR(PX86XDTR64 pGdtr, PX86XDTR64 pIdtr);
974ALIGNCODE(16)
975BEGINPROC hwaccmR0Get64bitGDTRandIDTR
976 db 0xea ; jmp far .sixtyfourbit_mode
977 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
978.the_end:
979 ret
980
981ALIGNCODE(16)
982BITS 64
983.sixtyfourbit_mode:
984 and esp, 0ffffffffh
985 mov ecx, [rsp + 4] ; pGdtr
986 mov edx, [rsp + 8] ; pIdtr
987 sgdt [rcx]
988 sidt [rdx]
989 jmp far [.fpret wrt rip]
990.fpret: ; 16:32 Pointer to .the_end.
991 dd .the_end, NAME(SUPR0AbsKernelCS)
992BITS 32
993ENDPROC hwaccmR0Get64bitGDTRandIDTR
994
995
996;/**
997; * Gets 64-bit CR3 on darwin.
998; * @returns CR3
999; */
1000;DECLASM(uint64_t) hwaccmR0Get64bitCR3(void);
1001ALIGNCODE(16)
1002BEGINPROC hwaccmR0Get64bitCR3
1003 db 0xea ; jmp far .sixtyfourbit_mode
1004 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
1005.the_end:
1006 ret
1007
1008ALIGNCODE(16)
1009BITS 64
1010.sixtyfourbit_mode:
1011 mov rax, cr3
1012 mov rdx, rax
1013 shr rdx, 32
1014 jmp far [.fpret wrt rip]
1015.fpret: ; 16:32 Pointer to .the_end.
1016 dd .the_end, NAME(SUPR0AbsKernelCS)
1017BITS 32
1018ENDPROC hwaccmR0Get64bitCR3
1019
1020%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
1021
1022%ifdef VBOX_WITH_KERNEL_USING_XMM
1023
1024;;
1025; Wrapper around vmx.pfnStartVM that preserves host XMM registers and
1026; load the guest ones when necessary.
1027;
1028; @cproto DECLASM(int) hwaccmR0VMXStartVMWrapXMM(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu, PFNHWACCMVMXSTARTVM pfnStartVM);
1029;
1030; @returns eax
1031;
1032; @param fResumeVM msc:rcx
1033; @param pCtx msc:rdx
1034; @param pVMCSCache msc:r8
1035; @param pVM msc:r9
1036; @param pVCpu msc:[rbp+30h]
1037; @param pfnStartVM msc:[rbp+38h]
1038;
1039; @remarks This is essentially the same code as hwaccmR0SVMRunWrapXMM, only the parameters differ a little bit.
1040;
1041; ASSUMING 64-bit and windows for now.
1042ALIGNCODE(16)
1043BEGINPROC hwaccmR0VMXStartVMWrapXMM
1044 push xBP
1045 mov xBP, xSP
1046 sub xSP, 0a0h + 040h ; Don't bother optimizing the frame size.
1047
1048 ; spill input parameters.
1049 mov [xBP + 010h], rcx ; fResumeVM
1050 mov [xBP + 018h], rdx ; pCtx
1051 mov [xBP + 020h], r8 ; pVMCSCache
1052 mov [xBP + 028h], r9 ; pVM
1053
1054 ; Ask CPUM whether we've started using the FPU yet.
1055 mov rcx, [xBP + 30h] ; pVCpu
1056 call NAME(CPUMIsGuestFPUStateActive)
1057 test al, al
1058 jnz .guest_fpu_state_active
1059
1060 ; No need to mess with XMM registers just call the start routine and return.
1061 mov r11, [xBP + 38h] ; pfnStartVM
1062 mov r10, [xBP + 30h] ; pVCpu
1063 mov [xSP + 020h], r10
1064 mov rcx, [xBP + 010h] ; fResumeVM
1065 mov rdx, [xBP + 018h] ; pCtx
1066 mov r8, [xBP + 020h] ; pVMCSCache
1067 mov r9, [xBP + 028h] ; pVM
1068 call r11
1069
1070 leave
1071 ret
1072
1073ALIGNCODE(8)
1074.guest_fpu_state_active:
1075 ; Save the host XMM registers.
1076 movdqa [rsp + 040h + 000h], xmm6
1077 movdqa [rsp + 040h + 010h], xmm7
1078 movdqa [rsp + 040h + 020h], xmm8
1079 movdqa [rsp + 040h + 030h], xmm9
1080 movdqa [rsp + 040h + 040h], xmm10
1081 movdqa [rsp + 040h + 050h], xmm11
1082 movdqa [rsp + 040h + 060h], xmm12
1083 movdqa [rsp + 040h + 070h], xmm13
1084 movdqa [rsp + 040h + 080h], xmm14
1085 movdqa [rsp + 040h + 090h], xmm15
1086
1087 ; Load the full guest XMM register state.
1088 mov r10, [xBP + 018h] ; pCtx
1089 lea r10, [r10 + XMM_OFF_IN_X86FXSTATE]
1090 movdqa xmm0, [r10 + 000h]
1091 movdqa xmm1, [r10 + 010h]
1092 movdqa xmm2, [r10 + 020h]
1093 movdqa xmm3, [r10 + 030h]
1094 movdqa xmm4, [r10 + 040h]
1095 movdqa xmm5, [r10 + 050h]
1096 movdqa xmm6, [r10 + 060h]
1097 movdqa xmm7, [r10 + 070h]
1098 movdqa xmm8, [r10 + 080h]
1099 movdqa xmm9, [r10 + 090h]
1100 movdqa xmm10, [r10 + 0a0h]
1101 movdqa xmm11, [r10 + 0b0h]
1102 movdqa xmm12, [r10 + 0c0h]
1103 movdqa xmm13, [r10 + 0d0h]
1104 movdqa xmm14, [r10 + 0e0h]
1105 movdqa xmm15, [r10 + 0f0h]
1106
1107 ; Make the call (same as in the other case ).
1108 mov r11, [xBP + 38h] ; pfnStartVM
1109 mov r10, [xBP + 30h] ; pVCpu
1110 mov [xSP + 020h], r10
1111 mov rcx, [xBP + 010h] ; fResumeVM
1112 mov rdx, [xBP + 018h] ; pCtx
1113 mov r8, [xBP + 020h] ; pVMCSCache
1114 mov r9, [xBP + 028h] ; pVM
1115 call r11
1116
1117 ; Save the guest XMM registers.
1118 mov r10, [xBP + 018h] ; pCtx
1119 lea r10, [r10 + XMM_OFF_IN_X86FXSTATE]
1120 movdqa [r10 + 000h], xmm0
1121 movdqa [r10 + 010h], xmm1
1122 movdqa [r10 + 020h], xmm2
1123 movdqa [r10 + 030h], xmm3
1124 movdqa [r10 + 040h], xmm4
1125 movdqa [r10 + 050h], xmm5
1126 movdqa [r10 + 060h], xmm6
1127 movdqa [r10 + 070h], xmm7
1128 movdqa [r10 + 080h], xmm8
1129 movdqa [r10 + 090h], xmm9
1130 movdqa [r10 + 0a0h], xmm10
1131 movdqa [r10 + 0b0h], xmm11
1132 movdqa [r10 + 0c0h], xmm12
1133 movdqa [r10 + 0d0h], xmm13
1134 movdqa [r10 + 0e0h], xmm14
1135 movdqa [r10 + 0f0h], xmm15
1136
1137 ; Load the host XMM registers.
1138 movdqa xmm6, [rsp + 040h + 000h]
1139 movdqa xmm7, [rsp + 040h + 010h]
1140 movdqa xmm8, [rsp + 040h + 020h]
1141 movdqa xmm9, [rsp + 040h + 030h]
1142 movdqa xmm10, [rsp + 040h + 040h]
1143 movdqa xmm11, [rsp + 040h + 050h]
1144 movdqa xmm12, [rsp + 040h + 060h]
1145 movdqa xmm13, [rsp + 040h + 070h]
1146 movdqa xmm14, [rsp + 040h + 080h]
1147 movdqa xmm15, [rsp + 040h + 090h]
1148 leave
1149 ret
1150ENDPROC hwaccmR0VMXStartVMWrapXMM
1151
1152;;
1153; Wrapper around svm.pfnVMRun that preserves host XMM registers and
1154; load the guest ones when necessary.
1155;
1156; @cproto DECLASM(int) hwaccmR0SVMRunWrapXMM(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu, PFNHWACCMSVMVMRUN pfnVMRun);
1157;
1158; @returns eax
1159;
1160; @param pVMCBHostPhys msc:rcx
1161; @param pVMCBPhys msc:rdx
1162; @param pCtx msc:r8
1163; @param pVM msc:r9
1164; @param pVCpu msc:[rbp+30h]
1165; @param pfnVMRun msc:[rbp+38h]
1166;
1167; @remarks This is essentially the same code as hwaccmR0VMXStartVMWrapXMM, only the parameters differ a little bit.
1168;
1169; ASSUMING 64-bit and windows for now.
1170ALIGNCODE(16)
1171BEGINPROC hwaccmR0SVMRunWrapXMM
1172 push xBP
1173 mov xBP, xSP
1174 sub xSP, 0a0h + 040h ; Don't bother optimizing the frame size.
1175
1176 ; spill input parameters.
1177 mov [xBP + 010h], rcx ; pVMCBHostPhys
1178 mov [xBP + 018h], rdx ; pVMCBPhys
1179 mov [xBP + 020h], r8 ; pCtx
1180 mov [xBP + 028h], r9 ; pVM
1181
1182 ; Ask CPUM whether we've started using the FPU yet.
1183 mov rcx, [xBP + 30h] ; pVCpu
1184 call NAME(CPUMIsGuestFPUStateActive)
1185 test al, al
1186 jnz .guest_fpu_state_active
1187
1188 ; No need to mess with XMM registers just call the start routine and return.
1189 mov r11, [xBP + 38h] ; pfnVMRun
1190 mov r10, [xBP + 30h] ; pVCpu
1191 mov [xSP + 020h], r10
1192 mov rcx, [xBP + 010h] ; pVMCBHostPhys
1193 mov rdx, [xBP + 018h] ; pVMCBPhys
1194 mov r8, [xBP + 020h] ; pCtx
1195 mov r9, [xBP + 028h] ; pVM
1196 call r11
1197
1198 leave
1199 ret
1200
1201ALIGNCODE(8)
1202.guest_fpu_state_active:
1203 ; Save the host XMM registers.
1204 movdqa [rsp + 040h + 000h], xmm6
1205 movdqa [rsp + 040h + 010h], xmm7
1206 movdqa [rsp + 040h + 020h], xmm8
1207 movdqa [rsp + 040h + 030h], xmm9
1208 movdqa [rsp + 040h + 040h], xmm10
1209 movdqa [rsp + 040h + 050h], xmm11
1210 movdqa [rsp + 040h + 060h], xmm12
1211 movdqa [rsp + 040h + 070h], xmm13
1212 movdqa [rsp + 040h + 080h], xmm14
1213 movdqa [rsp + 040h + 090h], xmm15
1214
1215 ; Load the full guest XMM register state.
1216 mov r10, [xBP + 020h] ; pCtx
1217 lea r10, [r10 + XMM_OFF_IN_X86FXSTATE]
1218 movdqa xmm0, [r10 + 000h]
1219 movdqa xmm1, [r10 + 010h]
1220 movdqa xmm2, [r10 + 020h]
1221 movdqa xmm3, [r10 + 030h]
1222 movdqa xmm4, [r10 + 040h]
1223 movdqa xmm5, [r10 + 050h]
1224 movdqa xmm6, [r10 + 060h]
1225 movdqa xmm7, [r10 + 070h]
1226 movdqa xmm8, [r10 + 080h]
1227 movdqa xmm9, [r10 + 090h]
1228 movdqa xmm10, [r10 + 0a0h]
1229 movdqa xmm11, [r10 + 0b0h]
1230 movdqa xmm12, [r10 + 0c0h]
1231 movdqa xmm13, [r10 + 0d0h]
1232 movdqa xmm14, [r10 + 0e0h]
1233 movdqa xmm15, [r10 + 0f0h]
1234
1235 ; Make the call (same as in the other case ).
1236 mov r11, [xBP + 38h] ; pfnVMRun
1237 mov r10, [xBP + 30h] ; pVCpu
1238 mov [xSP + 020h], r10
1239 mov rcx, [xBP + 010h] ; pVMCBHostPhys
1240 mov rdx, [xBP + 018h] ; pVMCBPhys
1241 mov r8, [xBP + 020h] ; pCtx
1242 mov r9, [xBP + 028h] ; pVM
1243 call r11
1244
1245 ; Save the guest XMM registers.
1246 mov r10, [xBP + 020h] ; pCtx
1247 lea r10, [r10 + XMM_OFF_IN_X86FXSTATE]
1248 movdqa [r10 + 000h], xmm0
1249 movdqa [r10 + 010h], xmm1
1250 movdqa [r10 + 020h], xmm2
1251 movdqa [r10 + 030h], xmm3
1252 movdqa [r10 + 040h], xmm4
1253 movdqa [r10 + 050h], xmm5
1254 movdqa [r10 + 060h], xmm6
1255 movdqa [r10 + 070h], xmm7
1256 movdqa [r10 + 080h], xmm8
1257 movdqa [r10 + 090h], xmm9
1258 movdqa [r10 + 0a0h], xmm10
1259 movdqa [r10 + 0b0h], xmm11
1260 movdqa [r10 + 0c0h], xmm12
1261 movdqa [r10 + 0d0h], xmm13
1262 movdqa [r10 + 0e0h], xmm14
1263 movdqa [r10 + 0f0h], xmm15
1264
1265 ; Load the host XMM registers.
1266 movdqa xmm6, [rsp + 040h + 000h]
1267 movdqa xmm7, [rsp + 040h + 010h]
1268 movdqa xmm8, [rsp + 040h + 020h]
1269 movdqa xmm9, [rsp + 040h + 030h]
1270 movdqa xmm10, [rsp + 040h + 040h]
1271 movdqa xmm11, [rsp + 040h + 050h]
1272 movdqa xmm12, [rsp + 040h + 060h]
1273 movdqa xmm13, [rsp + 040h + 070h]
1274 movdqa xmm14, [rsp + 040h + 080h]
1275 movdqa xmm15, [rsp + 040h + 090h]
1276 leave
1277 ret
1278ENDPROC hwaccmR0SVMRunWrapXMM
1279
1280%endif ; VBOX_WITH_KERNEL_USING_XMM
1281
1282;
1283; The default setup of the StartVM routines.
1284;
1285%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
1286 %define MY_NAME(name) name %+ _32
1287%else
1288 %define MY_NAME(name) name
1289%endif
1290%ifdef RT_ARCH_AMD64
1291 %define MYPUSHAD MYPUSHAD64
1292 %define MYPOPAD MYPOPAD64
1293 %define MYPUSHSEGS MYPUSHSEGS64
1294 %define MYPOPSEGS MYPOPSEGS64
1295%else
1296 %define MYPUSHAD MYPUSHAD32
1297 %define MYPOPAD MYPOPAD32
1298 %define MYPUSHSEGS MYPUSHSEGS32
1299 %define MYPOPSEGS MYPOPSEGS32
1300%endif
1301
1302%include "HWACCMR0Mixed.mac"
1303
1304
1305%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
1306 ;
1307 ; Write the wrapper procedures.
1308 ;
1309 ; These routines are probably being too paranoid about selector
1310 ; restoring, but better safe than sorry...
1311 ;
1312
1313; DECLASM(int) VMXR0StartVM32(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache /*, PVM pVM, PVMCPU pVCpu*/);
1314ALIGNCODE(16)
1315BEGINPROC VMXR0StartVM32
1316 cmp byte [NAME(g_fVMXIs64bitHost)], 0
1317 je near NAME(VMXR0StartVM32_32)
1318
1319 ; stack frame
1320 push esi
1321 push edi
1322 push fs
1323 push gs
1324
1325 ; jmp far .thunk64
1326 db 0xea
1327 dd .thunk64, NAME(SUPR0Abs64bitKernelCS)
1328
1329ALIGNCODE(16)
1330BITS 64
1331.thunk64:
1332 sub esp, 20h
1333 mov edi, [rsp + 20h + 14h] ; fResume
1334 mov esi, [rsp + 20h + 18h] ; pCtx
1335 mov edx, [rsp + 20h + 1Ch] ; pCache
1336 call NAME(VMXR0StartVM32_64)
1337 add esp, 20h
1338 jmp far [.fpthunk32 wrt rip]
1339.fpthunk32: ; 16:32 Pointer to .thunk32.
1340 dd .thunk32, NAME(SUPR0AbsKernelCS)
1341
1342BITS 32
1343ALIGNCODE(16)
1344.thunk32:
1345 pop gs
1346 pop fs
1347 pop edi
1348 pop esi
1349 ret
1350ENDPROC VMXR0StartVM32
1351
1352
1353; DECLASM(int) VMXR0StartVM64(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache /*, PVM pVM, PVMCPU pVCpu*/);
1354ALIGNCODE(16)
1355BEGINPROC VMXR0StartVM64
1356 cmp byte [NAME(g_fVMXIs64bitHost)], 0
1357 je .not_in_long_mode
1358
1359 ; stack frame
1360 push esi
1361 push edi
1362 push fs
1363 push gs
1364
1365 ; jmp far .thunk64
1366 db 0xea
1367 dd .thunk64, NAME(SUPR0Abs64bitKernelCS)
1368
1369ALIGNCODE(16)
1370BITS 64
1371.thunk64:
1372 sub esp, 20h
1373 mov edi, [rsp + 20h + 14h] ; fResume
1374 mov esi, [rsp + 20h + 18h] ; pCtx
1375 mov edx, [rsp + 20h + 1Ch] ; pCache
1376 call NAME(VMXR0StartVM64_64)
1377 add esp, 20h
1378 jmp far [.fpthunk32 wrt rip]
1379.fpthunk32: ; 16:32 Pointer to .thunk32.
1380 dd .thunk32, NAME(SUPR0AbsKernelCS)
1381
1382BITS 32
1383ALIGNCODE(16)
1384.thunk32:
1385 pop gs
1386 pop fs
1387 pop edi
1388 pop esi
1389 ret
1390
1391.not_in_long_mode:
1392 mov eax, VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE
1393 ret
1394ENDPROC VMXR0StartVM64
1395
1396;DECLASM(int) SVMR0VMRun(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx /*, PVM pVM, PVMCPU pVCpu*/);
1397ALIGNCODE(16)
1398BEGINPROC SVMR0VMRun
1399 cmp byte [NAME(g_fVMXIs64bitHost)], 0
1400 je near NAME(SVMR0VMRun_32)
1401
1402 ; stack frame
1403 push esi
1404 push edi
1405 push fs
1406 push gs
1407
1408 ; jmp far .thunk64
1409 db 0xea
1410 dd .thunk64, NAME(SUPR0Abs64bitKernelCS)
1411
1412ALIGNCODE(16)
1413BITS 64
1414.thunk64:
1415 sub esp, 20h
1416 mov rdi, [rsp + 20h + 14h] ; pVMCBHostPhys
1417 mov rsi, [rsp + 20h + 1Ch] ; pVMCBPhys
1418 mov edx, [rsp + 20h + 24h] ; pCtx
1419 call NAME(SVMR0VMRun_64)
1420 add esp, 20h
1421 jmp far [.fpthunk32 wrt rip]
1422.fpthunk32: ; 16:32 Pointer to .thunk32.
1423 dd .thunk32, NAME(SUPR0AbsKernelCS)
1424
1425BITS 32
1426ALIGNCODE(16)
1427.thunk32:
1428 pop gs
1429 pop fs
1430 pop edi
1431 pop esi
1432 ret
1433ENDPROC SVMR0VMRun
1434
1435
1436; DECLASM(int) SVMR0VMRun64(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx /*, PVM pVM, PVMCPU pVCpu*/);
1437ALIGNCODE(16)
1438BEGINPROC SVMR0VMRun64
1439 cmp byte [NAME(g_fVMXIs64bitHost)], 0
1440 je .not_in_long_mode
1441
1442 ; stack frame
1443 push esi
1444 push edi
1445 push fs
1446 push gs
1447
1448 ; jmp far .thunk64
1449 db 0xea
1450 dd .thunk64, NAME(SUPR0Abs64bitKernelCS)
1451
1452ALIGNCODE(16)
1453BITS 64
1454.thunk64:
1455 sub esp, 20h
1456 mov rdi, [rbp + 20h + 14h] ; pVMCBHostPhys
1457 mov rsi, [rbp + 20h + 1Ch] ; pVMCBPhys
1458 mov edx, [rbp + 20h + 24h] ; pCtx
1459 call NAME(SVMR0VMRun64_64)
1460 add esp, 20h
1461 jmp far [.fpthunk32 wrt rip]
1462.fpthunk32: ; 16:32 Pointer to .thunk32.
1463 dd .thunk32, NAME(SUPR0AbsKernelCS)
1464
1465BITS 32
1466ALIGNCODE(16)
1467.thunk32:
1468 pop gs
1469 pop fs
1470 pop edi
1471 pop esi
1472 ret
1473
1474.not_in_long_mode:
1475 mov eax, VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE
1476 ret
1477ENDPROC SVMR0VMRun64
1478
1479 ;
1480 ; Do it a second time pretending we're a 64-bit host.
1481 ;
1482 ; This *HAS* to be done at the very end of the file to avoid restoring
1483 ; macros. So, add new code *BEFORE* this mess.
1484 ;
1485 BITS 64
1486 %undef RT_ARCH_X86
1487 %define RT_ARCH_AMD64
1488 %undef ASM_CALL64_MSC
1489 %define ASM_CALL64_GCC
1490 %define xS 8
1491 %define xSP rsp
1492 %define xBP rbp
1493 %define xAX rax
1494 %define xBX rbx
1495 %define xCX rcx
1496 %define xDX rdx
1497 %define xDI rdi
1498 %define xSI rsi
1499 %define MY_NAME(name) name %+ _64
1500 %define MYPUSHAD MYPUSHAD64
1501 %define MYPOPAD MYPOPAD64
1502 %define MYPUSHSEGS MYPUSHSEGS64
1503 %define MYPOPSEGS MYPOPSEGS64
1504
1505 %include "HWACCMR0Mixed.mac"
1506%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette