VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HWACCMR0A.asm@ 21111

Last change on this file since 21111 was 21001, checked in by vboxsync, 16 years ago

HWACCM: Untested AMD-V fix for the xmm register corruption on Windows/AMD64.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 40.8 KB
Line 
1; $Id: HWACCMR0A.asm 21001 2009-06-26 23:18:11Z vboxsync $
2;; @file
3; VMXM - R0 vmx helpers
4;
5
6;
7; Copyright (C) 2006-2007 Sun Microsystems, Inc.
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.virtualbox.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License (GPL) as published by the Free Software
13; Foundation, in version 2 as it comes in the "COPYING" file of the
14; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17; Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18; Clara, CA 95054 USA or visit http://www.sun.com if you need
19; additional information or have any questions.
20;
21
22;*******************************************************************************
23;* Header Files *
24;*******************************************************************************
25%include "VBox/asmdefs.mac"
26%include "VBox/err.mac"
27%include "VBox/hwacc_vmx.mac"
28%include "VBox/cpum.mac"
29%include "VBox/x86.mac"
30%include "../HWACCMInternal.mac"
31
32%ifdef RT_OS_OS2 ;; @todo fix OMF support in yasm and kick nasm out completely.
33 %macro vmwrite 2,
34 int3
35 %endmacro
36 %define vmlaunch int3
37 %define vmresume int3
38 %define vmsave int3
39 %define vmload int3
40 %define vmrun int3
41 %define clgi int3
42 %define stgi int3
43 %macro invlpga 2,
44 int3
45 %endmacro
46%endif
47
48;*******************************************************************************
49;* Defined Constants And Macros *
50;*******************************************************************************
51%ifdef RT_ARCH_AMD64
52 %define MAYBE_64_BIT
53%endif
54%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
55 %define MAYBE_64_BIT
56%else
57 %ifdef RT_OS_DARWIN
58 %ifdef RT_ARCH_AMD64
59 ;;
60 ; Load the NULL selector into DS, ES, FS and GS on 64-bit darwin so we don't
61 ; risk loading a stale LDT value or something invalid.
62 %define HWACCM_64_BIT_USE_NULL_SEL
63 %endif
64 %endif
65%endif
66
67;; The offset of the XMM registers in X86FXSTATE.
68; Use define because I'm too lazy to convert the struct.
69%define XMM_OFF_IN_X86FXSTATE 160
70
71
72;; This is too risky wrt. stability, performance and correctness.
73;%define VBOX_WITH_DR6_EXPERIMENT 1
74
75;; @def MYPUSHAD
76; Macro generating an equivalent to pushad
77
78;; @def MYPOPAD
79; Macro generating an equivalent to popad
80
81;; @def MYPUSHSEGS
82; Macro saving all segment registers on the stack.
83; @param 1 full width register name
84; @param 2 16-bit regsiter name for \a 1.
85
86;; @def MYPOPSEGS
87; Macro restoring all segment registers on the stack
88; @param 1 full width register name
89; @param 2 16-bit regsiter name for \a 1.
90
91%ifdef MAYBE_64_BIT
92 ; Save a host and load the corresponding guest MSR (trashes rdx & rcx)
93 %macro LOADGUESTMSR 2
94 mov rcx, %1
95 rdmsr
96 push rdx
97 push rax
98 mov edx, dword [xSI + %2 + 4]
99 mov eax, dword [xSI + %2]
100 wrmsr
101 %endmacro
102
103 ; Save a guest and load the corresponding host MSR (trashes rdx & rcx)
104 ; Only really useful for gs kernel base as that one can be changed behind our back (swapgs)
105 %macro LOADHOSTMSREX 2
106 mov rcx, %1
107 rdmsr
108 mov dword [xSI + %2], eax
109 mov dword [xSI + %2 + 4], edx
110 pop rax
111 pop rdx
112 wrmsr
113 %endmacro
114
115 ; Load the corresponding host MSR (trashes rdx & rcx)
116 %macro LOADHOSTMSR 1
117 mov rcx, %1
118 pop rax
119 pop rdx
120 wrmsr
121 %endmacro
122%endif
123
124%ifdef ASM_CALL64_GCC
125 %macro MYPUSHAD64 0
126 push r15
127 push r14
128 push r13
129 push r12
130 push rbx
131 %endmacro
132 %macro MYPOPAD64 0
133 pop rbx
134 pop r12
135 pop r13
136 pop r14
137 pop r15
138 %endmacro
139
140%else ; ASM_CALL64_MSC
141 %macro MYPUSHAD64 0
142 push r15
143 push r14
144 push r13
145 push r12
146 push rbx
147 push rsi
148 push rdi
149 %endmacro
150 %macro MYPOPAD64 0
151 pop rdi
152 pop rsi
153 pop rbx
154 pop r12
155 pop r13
156 pop r14
157 pop r15
158 %endmacro
159%endif
160
161; trashes, rax, rdx & rcx
162%macro MYPUSHSEGS64 2
163 %ifndef HWACCM_64_BIT_USE_NULL_SEL
164 mov %2, es
165 push %1
166 mov %2, ds
167 push %1
168 %endif
169
170 ; Special case for FS; Windows and Linux either don't use it or restore it when leaving kernel mode, Solaris OTOH doesn't and we must save it.
171 mov ecx, MSR_K8_FS_BASE
172 rdmsr
173 push rdx
174 push rax
175 %ifndef HWACCM_64_BIT_USE_NULL_SEL
176 push fs
177 %endif
178
179 ; Special case for GS; OSes typically use swapgs to reset the hidden base register for GS on entry into the kernel. The same happens on exit
180 mov ecx, MSR_K8_GS_BASE
181 rdmsr
182 push rdx
183 push rax
184 %ifndef HWACCM_64_BIT_USE_NULL_SEL
185 push gs
186 %endif
187%endmacro
188
189; trashes, rax, rdx & rcx
190%macro MYPOPSEGS64 2
191 ; Note: do not step through this code with a debugger!
192 %ifndef HWACCM_64_BIT_USE_NULL_SEL
193 xor eax, eax
194 mov ds, ax
195 mov es, ax
196 mov fs, ax
197 mov gs, ax
198 %endif
199
200 %ifndef HWACCM_64_BIT_USE_NULL_SEL
201 pop gs
202 %endif
203 pop rax
204 pop rdx
205 mov ecx, MSR_K8_GS_BASE
206 wrmsr
207
208 %ifndef HWACCM_64_BIT_USE_NULL_SEL
209 pop fs
210 %endif
211 pop rax
212 pop rdx
213 mov ecx, MSR_K8_FS_BASE
214 wrmsr
215 ; Now it's safe to step again
216
217 %ifndef HWACCM_64_BIT_USE_NULL_SEL
218 pop %1
219 mov ds, %2
220 pop %1
221 mov es, %2
222 %endif
223%endmacro
224
225%macro MYPUSHAD32 0
226 pushad
227%endmacro
228%macro MYPOPAD32 0
229 popad
230%endmacro
231
232%macro MYPUSHSEGS32 2
233 push ds
234 push es
235 push fs
236 push gs
237%endmacro
238%macro MYPOPSEGS32 2
239 pop gs
240 pop fs
241 pop es
242 pop ds
243%endmacro
244
245
246;*******************************************************************************
247;* External Symbols *
248;*******************************************************************************
249%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
250extern NAME(SUPR0AbsIs64bit)
251extern NAME(SUPR0Abs64bitKernelCS)
252extern NAME(SUPR0Abs64bitKernelSS)
253extern NAME(SUPR0Abs64bitKernelDS)
254extern NAME(SUPR0AbsKernelCS)
255%endif
256%ifdef VBOX_WITH_KERNEL_USING_XMM
257extern NAME(CPUMIsGuestFPUStateActive)
258%endif
259
260
261;*******************************************************************************
262;* Global Variables *
263;*******************************************************************************
264%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
265BEGINDATA
266;;
267; Store the SUPR0AbsIs64bit absolute value here so we can cmp/test without
268; needing to clobber a register. (This trick doesn't quite work for PE btw.
269; but that's not relevant atm.)
270GLOBALNAME g_fVMXIs64bitHost
271 dd NAME(SUPR0AbsIs64bit)
272%endif
273
274
275BEGINCODE
276
277
278;/**
279; * Executes VMWRITE, 64-bit value.
280; *
281; * @returns VBox status code
282; * @param idxField x86: [ebp + 08h] msc: rcx gcc: rdi VMCS index
283; * @param u64Data x86: [ebp + 0ch] msc: rdx gcc: rsi VM field value
284; */
285ALIGNCODE(16)
286BEGINPROC VMXWriteVMCS64
287%ifdef RT_ARCH_AMD64
288 %ifdef ASM_CALL64_GCC
289 and edi, 0ffffffffh
290 xor rax, rax
291 vmwrite rdi, rsi
292 %else
293 and ecx, 0ffffffffh
294 xor rax, rax
295 vmwrite rcx, rdx
296 %endif
297%else ; RT_ARCH_X86
298 mov ecx, [esp + 4] ; idxField
299 lea edx, [esp + 8] ; &u64Data
300 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
301 cmp byte [NAME(g_fVMXIs64bitHost)], 0
302 jz .legacy_mode
303 db 0xea ; jmp far .sixtyfourbit_mode
304 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
305.legacy_mode:
306 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
307 vmwrite ecx, [edx] ; low dword
308 jz .done
309 jc .done
310 inc ecx
311 xor eax, eax
312 vmwrite ecx, [edx + 4] ; high dword
313.done:
314%endif ; RT_ARCH_X86
315 jnc .valid_vmcs
316 mov eax, VERR_VMX_INVALID_VMCS_PTR
317 ret
318.valid_vmcs:
319 jnz .the_end
320 mov eax, VERR_VMX_INVALID_VMCS_FIELD
321.the_end:
322 ret
323
324%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
325ALIGNCODE(16)
326BITS 64
327.sixtyfourbit_mode:
328 and edx, 0ffffffffh
329 and ecx, 0ffffffffh
330 xor eax, eax
331 vmwrite rcx, [rdx]
332 mov r8d, VERR_VMX_INVALID_VMCS_FIELD
333 cmovz eax, r8d
334 mov r9d, VERR_VMX_INVALID_VMCS_PTR
335 cmovc eax, r9d
336 jmp far [.fpret wrt rip]
337.fpret: ; 16:32 Pointer to .the_end.
338 dd .the_end, NAME(SUPR0AbsKernelCS)
339BITS 32
340%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
341ENDPROC VMXWriteVMCS64
342
343
344;/**
345; * Executes VMREAD, 64-bit value
346; *
347; * @returns VBox status code
348; * @param idxField VMCS index
349; * @param pData Ptr to store VM field value
350; */
351;DECLASM(int) VMXReadVMCS64(uint32_t idxField, uint64_t *pData);
352ALIGNCODE(16)
353BEGINPROC VMXReadVMCS64
354%ifdef RT_ARCH_AMD64
355 %ifdef ASM_CALL64_GCC
356 and edi, 0ffffffffh
357 xor rax, rax
358 vmread [rsi], rdi
359 %else
360 and ecx, 0ffffffffh
361 xor rax, rax
362 vmread [rdx], rcx
363 %endif
364%else ; RT_ARCH_X86
365 mov ecx, [esp + 4] ; idxField
366 mov edx, [esp + 8] ; pData
367 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
368 cmp byte [NAME(g_fVMXIs64bitHost)], 0
369 jz .legacy_mode
370 db 0xea ; jmp far .sixtyfourbit_mode
371 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
372.legacy_mode:
373 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
374 vmread [edx], ecx ; low dword
375 jz .done
376 jc .done
377 inc ecx
378 xor eax, eax
379 vmread [edx + 4], ecx ; high dword
380.done:
381%endif ; RT_ARCH_X86
382 jnc .valid_vmcs
383 mov eax, VERR_VMX_INVALID_VMCS_PTR
384 ret
385.valid_vmcs:
386 jnz .the_end
387 mov eax, VERR_VMX_INVALID_VMCS_FIELD
388.the_end:
389 ret
390
391%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
392ALIGNCODE(16)
393BITS 64
394.sixtyfourbit_mode:
395 and edx, 0ffffffffh
396 and ecx, 0ffffffffh
397 xor eax, eax
398 vmread [rdx], rcx
399 mov r8d, VERR_VMX_INVALID_VMCS_FIELD
400 cmovz eax, r8d
401 mov r9d, VERR_VMX_INVALID_VMCS_PTR
402 cmovc eax, r9d
403 jmp far [.fpret wrt rip]
404.fpret: ; 16:32 Pointer to .the_end.
405 dd .the_end, NAME(SUPR0AbsKernelCS)
406BITS 32
407%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
408ENDPROC VMXReadVMCS64
409
410
411;/**
412; * Executes VMREAD, 32-bit value.
413; *
414; * @returns VBox status code
415; * @param idxField VMCS index
416; * @param pu32Data Ptr to store VM field value
417; */
418;DECLASM(int) VMXReadVMCS32(uint32_t idxField, uint32_t *pu32Data);
419ALIGNCODE(16)
420BEGINPROC VMXReadVMCS32
421%ifdef RT_ARCH_AMD64
422 %ifdef ASM_CALL64_GCC
423 and edi, 0ffffffffh
424 xor rax, rax
425 vmread r10, rdi
426 mov [rsi], r10d
427 %else
428 and ecx, 0ffffffffh
429 xor rax, rax
430 vmread r10, rcx
431 mov [rdx], r10d
432 %endif
433%else ; RT_ARCH_X86
434 mov ecx, [esp + 4] ; idxField
435 mov edx, [esp + 8] ; pu32Data
436 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
437 cmp byte [NAME(g_fVMXIs64bitHost)], 0
438 jz .legacy_mode
439 db 0xea ; jmp far .sixtyfourbit_mode
440 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
441.legacy_mode:
442 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
443 xor eax, eax
444 vmread [edx], ecx
445%endif ; RT_ARCH_X86
446 jnc .valid_vmcs
447 mov eax, VERR_VMX_INVALID_VMCS_PTR
448 ret
449.valid_vmcs:
450 jnz .the_end
451 mov eax, VERR_VMX_INVALID_VMCS_FIELD
452.the_end:
453 ret
454
455%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
456ALIGNCODE(16)
457BITS 64
458.sixtyfourbit_mode:
459 and edx, 0ffffffffh
460 and ecx, 0ffffffffh
461 xor eax, eax
462 vmread r10, rcx
463 mov [rdx], r10d
464 mov r8d, VERR_VMX_INVALID_VMCS_FIELD
465 cmovz eax, r8d
466 mov r9d, VERR_VMX_INVALID_VMCS_PTR
467 cmovc eax, r9d
468 jmp far [.fpret wrt rip]
469.fpret: ; 16:32 Pointer to .the_end.
470 dd .the_end, NAME(SUPR0AbsKernelCS)
471BITS 32
472%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
473ENDPROC VMXReadVMCS32
474
475
476;/**
477; * Executes VMWRITE, 32-bit value.
478; *
479; * @returns VBox status code
480; * @param idxField VMCS index
481; * @param u32Data Ptr to store VM field value
482; */
483;DECLASM(int) VMXWriteVMCS32(uint32_t idxField, uint32_t u32Data);
484ALIGNCODE(16)
485BEGINPROC VMXWriteVMCS32
486%ifdef RT_ARCH_AMD64
487 %ifdef ASM_CALL64_GCC
488 and edi, 0ffffffffh
489 and esi, 0ffffffffh
490 xor rax, rax
491 vmwrite rdi, rsi
492 %else
493 and ecx, 0ffffffffh
494 and edx, 0ffffffffh
495 xor rax, rax
496 vmwrite rcx, rdx
497 %endif
498%else ; RT_ARCH_X86
499 mov ecx, [esp + 4] ; idxField
500 mov edx, [esp + 8] ; u32Data
501 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
502 cmp byte [NAME(g_fVMXIs64bitHost)], 0
503 jz .legacy_mode
504 db 0xea ; jmp far .sixtyfourbit_mode
505 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
506.legacy_mode:
507 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
508 xor eax, eax
509 vmwrite ecx, edx
510%endif ; RT_ARCH_X86
511 jnc .valid_vmcs
512 mov eax, VERR_VMX_INVALID_VMCS_PTR
513 ret
514.valid_vmcs:
515 jnz .the_end
516 mov eax, VERR_VMX_INVALID_VMCS_FIELD
517.the_end:
518 ret
519
520%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
521ALIGNCODE(16)
522BITS 64
523.sixtyfourbit_mode:
524 and edx, 0ffffffffh
525 and ecx, 0ffffffffh
526 xor eax, eax
527 vmwrite rcx, rdx
528 mov r8d, VERR_VMX_INVALID_VMCS_FIELD
529 cmovz eax, r8d
530 mov r9d, VERR_VMX_INVALID_VMCS_PTR
531 cmovc eax, r9d
532 jmp far [.fpret wrt rip]
533.fpret: ; 16:32 Pointer to .the_end.
534 dd .the_end, NAME(SUPR0AbsKernelCS)
535BITS 32
536%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
537ENDPROC VMXWriteVMCS32
538
539
540;/**
541; * Executes VMXON
542; *
543; * @returns VBox status code
544; * @param HCPhysVMXOn Physical address of VMXON structure
545; */
546;DECLASM(int) VMXEnable(RTHCPHYS HCPhysVMXOn);
547BEGINPROC VMXEnable
548%ifdef RT_ARCH_AMD64
549 xor rax, rax
550 %ifdef ASM_CALL64_GCC
551 push rdi
552 %else
553 push rcx
554 %endif
555 vmxon [rsp]
556%else ; RT_ARCH_X86
557 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
558 cmp byte [NAME(g_fVMXIs64bitHost)], 0
559 jz .legacy_mode
560 db 0xea ; jmp far .sixtyfourbit_mode
561 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
562.legacy_mode:
563 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
564 xor eax, eax
565 vmxon [esp + 4]
566%endif ; RT_ARCH_X86
567 jnc .good
568 mov eax, VERR_VMX_INVALID_VMXON_PTR
569 jmp .the_end
570
571.good:
572 jnz .the_end
573 mov eax, VERR_VMX_GENERIC
574
575.the_end:
576%ifdef RT_ARCH_AMD64
577 add rsp, 8
578%endif
579 ret
580
581%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
582ALIGNCODE(16)
583BITS 64
584.sixtyfourbit_mode:
585 lea rdx, [rsp + 4] ; &HCPhysVMXOn.
586 and edx, 0ffffffffh
587 xor eax, eax
588 vmxon [rdx]
589 mov r8d, VERR_INVALID_PARAMETER
590 cmovz eax, r8d
591 mov r9d, VERR_VMX_INVALID_VMCS_PTR
592 cmovc eax, r9d
593 jmp far [.fpret wrt rip]
594.fpret: ; 16:32 Pointer to .the_end.
595 dd .the_end, NAME(SUPR0AbsKernelCS)
596BITS 32
597%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
598ENDPROC VMXEnable
599
600
601;/**
602; * Executes VMXOFF
603; */
604;DECLASM(void) VMXDisable(void);
605BEGINPROC VMXDisable
606%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
607 cmp byte [NAME(g_fVMXIs64bitHost)], 0
608 jz .legacy_mode
609 db 0xea ; jmp far .sixtyfourbit_mode
610 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
611.legacy_mode:
612%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
613 vmxoff
614.the_end:
615 ret
616
617%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
618ALIGNCODE(16)
619BITS 64
620.sixtyfourbit_mode:
621 vmxoff
622 jmp far [.fpret wrt rip]
623.fpret: ; 16:32 Pointer to .the_end.
624 dd .the_end, NAME(SUPR0AbsKernelCS)
625BITS 32
626%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
627ENDPROC VMXDisable
628
629
630;/**
631; * Executes VMCLEAR
632; *
633; * @returns VBox status code
634; * @param HCPhysVMCS Physical address of VM control structure
635; */
636;DECLASM(int) VMXClearVMCS(RTHCPHYS HCPhysVMCS);
637ALIGNCODE(16)
638BEGINPROC VMXClearVMCS
639%ifdef RT_ARCH_AMD64
640 xor rax, rax
641 %ifdef ASM_CALL64_GCC
642 push rdi
643 %else
644 push rcx
645 %endif
646 vmclear [rsp]
647%else ; RT_ARCH_X86
648 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
649 cmp byte [NAME(g_fVMXIs64bitHost)], 0
650 jz .legacy_mode
651 db 0xea ; jmp far .sixtyfourbit_mode
652 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
653.legacy_mode:
654 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
655 xor eax, eax
656 vmclear [esp + 4]
657%endif ; RT_ARCH_X86
658 jnc .the_end
659 mov eax, VERR_VMX_INVALID_VMCS_PTR
660.the_end:
661%ifdef RT_ARCH_AMD64
662 add rsp, 8
663%endif
664 ret
665
666%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
667ALIGNCODE(16)
668BITS 64
669.sixtyfourbit_mode:
670 lea rdx, [rsp + 4] ; &HCPhysVMCS
671 and edx, 0ffffffffh
672 xor eax, eax
673 vmclear [rdx]
674 mov r9d, VERR_VMX_INVALID_VMCS_PTR
675 cmovc eax, r9d
676 jmp far [.fpret wrt rip]
677.fpret: ; 16:32 Pointer to .the_end.
678 dd .the_end, NAME(SUPR0AbsKernelCS)
679BITS 32
680%endif
681ENDPROC VMXClearVMCS
682
683
684;/**
685; * Executes VMPTRLD
686; *
687; * @returns VBox status code
688; * @param HCPhysVMCS Physical address of VMCS structure
689; */
690;DECLASM(int) VMXActivateVMCS(RTHCPHYS HCPhysVMCS);
691ALIGNCODE(16)
692BEGINPROC VMXActivateVMCS
693%ifdef RT_ARCH_AMD64
694 xor rax, rax
695 %ifdef ASM_CALL64_GCC
696 push rdi
697 %else
698 push rcx
699 %endif
700 vmptrld [rsp]
701%else
702 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
703 cmp byte [NAME(g_fVMXIs64bitHost)], 0
704 jz .legacy_mode
705 db 0xea ; jmp far .sixtyfourbit_mode
706 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
707.legacy_mode:
708 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
709 xor eax, eax
710 vmptrld [esp + 4]
711%endif
712 jnc .the_end
713 mov eax, VERR_VMX_INVALID_VMCS_PTR
714.the_end:
715%ifdef RT_ARCH_AMD64
716 add rsp, 8
717%endif
718 ret
719
720%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
721ALIGNCODE(16)
722BITS 64
723.sixtyfourbit_mode:
724 lea rdx, [rsp + 4] ; &HCPhysVMCS
725 and edx, 0ffffffffh
726 xor eax, eax
727 vmptrld [rdx]
728 mov r9d, VERR_VMX_INVALID_VMCS_PTR
729 cmovc eax, r9d
730 jmp far [.fpret wrt rip]
731.fpret: ; 16:32 Pointer to .the_end.
732 dd .the_end, NAME(SUPR0AbsKernelCS)
733BITS 32
734%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
735ENDPROC VMXActivateVMCS
736
737
738;/**
739; * Executes VMPTRST
740; *
741; * @returns VBox status code
742; * @param [esp + 04h] gcc:rdi msc:rcx Param 1 - First parameter - Address that will receive the current pointer
743; */
744;DECLASM(int) VMXGetActivateVMCS(RTHCPHYS *pVMCS);
745BEGINPROC VMXGetActivateVMCS
746%ifdef RT_OS_OS2
747 mov eax, VERR_NOT_SUPPORTED
748 ret
749%else
750 %ifdef RT_ARCH_AMD64
751 %ifdef ASM_CALL64_GCC
752 vmptrst qword [rdi]
753 %else
754 vmptrst qword [rcx]
755 %endif
756 %else
757 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
758 cmp byte [NAME(g_fVMXIs64bitHost)], 0
759 jz .legacy_mode
760 db 0xea ; jmp far .sixtyfourbit_mode
761 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
762.legacy_mode:
763 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
764 vmptrst qword [esp+04h]
765 %endif
766 xor eax, eax
767.the_end:
768 ret
769
770 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
771ALIGNCODE(16)
772BITS 64
773.sixtyfourbit_mode:
774 lea rdx, [rsp + 4] ; &HCPhysVMCS
775 and edx, 0ffffffffh
776 vmptrst qword [rdx]
777 xor eax, eax
778 jmp far [.fpret wrt rip]
779.fpret: ; 16:32 Pointer to .the_end.
780 dd .the_end, NAME(SUPR0AbsKernelCS)
781BITS 32
782 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
783%endif
784ENDPROC VMXGetActivateVMCS
785
786;/**
787; * Invalidate a page using invept
788; @param enmFlush msc:ecx gcc:edi x86:[esp+04] Type of flush
789; @param pDescriptor msc:edx gcc:esi x86:[esp+08] Descriptor pointer
790; */
791;DECLASM(int) VMXR0InvEPT(VMX_FLUSH enmFlush, uint64_t *pDescriptor);
792BEGINPROC VMXR0InvEPT
793%ifdef RT_ARCH_AMD64
794 %ifdef ASM_CALL64_GCC
795 and edi, 0ffffffffh
796 xor rax, rax
797; invept rdi, qword [rsi]
798 DB 0x66, 0x0F, 0x38, 0x80, 0x3E
799 %else
800 and ecx, 0ffffffffh
801 xor rax, rax
802; invept rcx, qword [rdx]
803 DB 0x66, 0x0F, 0x38, 0x80, 0xA
804 %endif
805%else
806 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
807 cmp byte [NAME(g_fVMXIs64bitHost)], 0
808 jz .legacy_mode
809 db 0xea ; jmp far .sixtyfourbit_mode
810 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
811.legacy_mode:
812 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
813 mov eax, [esp + 4]
814 mov ecx, [esp + 8]
815; invept eax, qword [ecx]
816 DB 0x66, 0x0F, 0x38, 0x80, 0x1
817%endif
818 jnc .valid_vmcs
819 mov eax, VERR_VMX_INVALID_VMCS_PTR
820 ret
821.valid_vmcs:
822 jnz .the_end
823 mov eax, VERR_INVALID_PARAMETER
824.the_end:
825 ret
826
827%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
828ALIGNCODE(16)
829BITS 64
830.sixtyfourbit_mode:
831 and esp, 0ffffffffh
832 mov ecx, [rsp + 4] ; enmFlush
833 mov edx, [rsp + 8] ; pDescriptor
834 xor eax, eax
835; invept rcx, qword [rdx]
836 DB 0x66, 0x0F, 0x38, 0x80, 0xA
837 mov r8d, VERR_INVALID_PARAMETER
838 cmovz eax, r8d
839 mov r9d, VERR_VMX_INVALID_VMCS_PTR
840 cmovc eax, r9d
841 jmp far [.fpret wrt rip]
842.fpret: ; 16:32 Pointer to .the_end.
843 dd .the_end, NAME(SUPR0AbsKernelCS)
844BITS 32
845%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
846ENDPROC VMXR0InvEPT
847
848
849;/**
850; * Invalidate a page using invvpid
851; @param enmFlush msc:ecx gcc:edi x86:[esp+04] Type of flush
852; @param pDescriptor msc:edx gcc:esi x86:[esp+08] Descriptor pointer
853; */
854;DECLASM(int) VMXR0InvVPID(VMX_FLUSH enmFlush, uint64_t *pDescriptor);
855BEGINPROC VMXR0InvVPID
856%ifdef RT_ARCH_AMD64
857 %ifdef ASM_CALL64_GCC
858 and edi, 0ffffffffh
859 xor rax, rax
860 ;invvpid rdi, qword [rsi]
861 DB 0x66, 0x0F, 0x38, 0x81, 0x3E
862 %else
863 and ecx, 0ffffffffh
864 xor rax, rax
865; invvpid rcx, qword [rdx]
866 DB 0x66, 0x0F, 0x38, 0x81, 0xA
867 %endif
868%else
869 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
870 cmp byte [NAME(g_fVMXIs64bitHost)], 0
871 jz .legacy_mode
872 db 0xea ; jmp far .sixtyfourbit_mode
873 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
874.legacy_mode:
875 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
876 mov eax, [esp + 4]
877 mov ecx, [esp + 8]
878; invept eax, qword [ecx]
879 DB 0x66, 0x0F, 0x38, 0x81, 0x1
880%endif
881 jnc .valid_vmcs
882 mov eax, VERR_VMX_INVALID_VMCS_PTR
883 ret
884.valid_vmcs:
885 jnz .the_end
886 mov eax, VERR_INVALID_PARAMETER
887.the_end:
888 ret
889
890%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
891ALIGNCODE(16)
892BITS 64
893.sixtyfourbit_mode:
894 and esp, 0ffffffffh
895 mov ecx, [rsp + 4] ; enmFlush
896 mov edx, [rsp + 8] ; pDescriptor
897 xor eax, eax
898; invvpid rcx, qword [rdx]
899 DB 0x66, 0x0F, 0x38, 0x81, 0xA
900 mov r8d, VERR_INVALID_PARAMETER
901 cmovz eax, r8d
902 mov r9d, VERR_VMX_INVALID_VMCS_PTR
903 cmovc eax, r9d
904 jmp far [.fpret wrt rip]
905.fpret: ; 16:32 Pointer to .the_end.
906 dd .the_end, NAME(SUPR0AbsKernelCS)
907BITS 32
908%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
909ENDPROC VMXR0InvVPID
910
911
912%if GC_ARCH_BITS == 64
913;;
914; Executes INVLPGA
915;
916; @param pPageGC msc:rcx gcc:rdi x86:[esp+04] Virtual page to invalidate
917; @param uASID msc:rdx gcc:rsi x86:[esp+0C] Tagged TLB id
918;
919;DECLASM(void) SVMR0InvlpgA(RTGCPTR pPageGC, uint32_t uASID);
920BEGINPROC SVMR0InvlpgA
921%ifdef RT_ARCH_AMD64
922 %ifdef ASM_CALL64_GCC
923 mov rax, rdi
924 mov rcx, rsi
925 %else
926 ; from http://www.cs.cmu.edu/~fp/courses/15213-s06/misc/asm64-handout.pdf:
927 ; ``Perhaps unexpectedly, instructions that move or generate 32-bit register
928 ; values also set the upper 32 bits of the register to zero. Consequently
929 ; there is no need for an instruction movzlq.''
930 mov eax, ecx
931 mov rcx, rdx
932 %endif
933%else
934 mov eax, [esp + 4]
935 mov ecx, [esp + 0Ch]
936%endif
937 invlpga [xAX], ecx
938 ret
939ENDPROC SVMR0InvlpgA
940
941%else ; GC_ARCH_BITS != 64
942;;
943; Executes INVLPGA
944;
945; @param pPageGC msc:ecx gcc:edi x86:[esp+04] Virtual page to invalidate
946; @param uASID msc:edx gcc:esi x86:[esp+08] Tagged TLB id
947;
948;DECLASM(void) SVMR0InvlpgA(RTGCPTR pPageGC, uint32_t uASID);
949BEGINPROC SVMR0InvlpgA
950%ifdef RT_ARCH_AMD64
951 %ifdef ASM_CALL64_GCC
952 movzx rax, edi
953 mov ecx, esi
954 %else
955 ; from http://www.cs.cmu.edu/~fp/courses/15213-s06/misc/asm64-handout.pdf:
956 ; ``Perhaps unexpectedly, instructions that move or generate 32-bit register
957 ; values also set the upper 32 bits of the register to zero. Consequently
958 ; there is no need for an instruction movzlq.''
959 mov eax, ecx
960 mov ecx, edx
961 %endif
962%else
963 mov eax, [esp + 4]
964 mov ecx, [esp + 8]
965%endif
966 invlpga [xAX], ecx
967 ret
968ENDPROC SVMR0InvlpgA
969
970%endif ; GC_ARCH_BITS != 64
971
972%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
973
974;/**
975; * Gets 64-bit GDTR and IDTR on darwin.
976; * @param pGdtr Where to store the 64-bit GDTR.
977; * @param pIdtr Where to store the 64-bit IDTR.
978; */
979;DECLASM(void) hwaccmR0Get64bitGDTRandIDTR(PX86XDTR64 pGdtr, PX86XDTR64 pIdtr);
980ALIGNCODE(16)
981BEGINPROC hwaccmR0Get64bitGDTRandIDTR
982 db 0xea ; jmp far .sixtyfourbit_mode
983 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
984.the_end:
985 ret
986
987ALIGNCODE(16)
988BITS 64
989.sixtyfourbit_mode:
990 and esp, 0ffffffffh
991 mov ecx, [rsp + 4] ; pGdtr
992 mov edx, [rsp + 8] ; pIdtr
993 sgdt [rcx]
994 sidt [rdx]
995 jmp far [.fpret wrt rip]
996.fpret: ; 16:32 Pointer to .the_end.
997 dd .the_end, NAME(SUPR0AbsKernelCS)
998BITS 32
999ENDPROC hwaccmR0Get64bitGDTRandIDTR
1000
1001
1002;/**
1003; * Gets 64-bit CR3 on darwin.
1004; * @returns CR3
1005; */
1006;DECLASM(uint64_t) hwaccmR0Get64bitCR3(void);
1007ALIGNCODE(16)
1008BEGINPROC hwaccmR0Get64bitCR3
1009 db 0xea ; jmp far .sixtyfourbit_mode
1010 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
1011.the_end:
1012 ret
1013
1014ALIGNCODE(16)
1015BITS 64
1016.sixtyfourbit_mode:
1017 mov rax, cr3
1018 mov rdx, rax
1019 shr rdx, 32
1020 jmp far [.fpret wrt rip]
1021.fpret: ; 16:32 Pointer to .the_end.
1022 dd .the_end, NAME(SUPR0AbsKernelCS)
1023BITS 32
1024ENDPROC hwaccmR0Get64bitCR3
1025
1026%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
1027
1028%ifdef VBOX_WITH_KERNEL_USING_XMM
1029
1030;;
1031; Wrapper around vmx.pfnStartVM that preserves host XMM registers and
1032; load the guest ones when necessary.
1033;
1034; @cproto DECLASM(int) hwaccmR0VMXStartVMWrapXMM(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu, PFNHWACCMVMXSTARTVM pfnStartVM);
1035;
1036; @returns eax
1037;
1038; @param fResumeVM msc:rcx
1039; @param pCtx msc:rdx
1040; @param pVMCSCache msc:r8
1041; @param pVM msc:r9
1042; @param pVCpu msc:[rbp+30h]
1043; @param pfnStartVM msc:[rbp+38h]
1044;
1045; @remarks This is essentially the same code as hwaccmR0SVMRunWrapXMM, only the parameters differ a little bit.
1046;
1047; ASSUMING 64-bit and windows for now.
1048ALIGNCODE(16)
1049BEGINPROC hwaccmR0VMXStartVMWrapXMM
1050 push xBP
1051 mov xBP, xSP
1052 sub xSP, 0a0h + 040h ; Don't bother optimizing the frame size.
1053
1054 ; spill input parameters.
1055 mov [xBP + 010h], rcx ; fResumeVM
1056 mov [xBP + 018h], rdx ; pCtx
1057 mov [xBP + 020h], r8 ; pVMCSCache
1058 mov [xBP + 028h], r9 ; pVM
1059
1060 ; Ask CPUM whether we've started using the FPU yet.
1061 mov rcx, [xBP + 30h] ; pVCpu
1062 call NAME(CPUMIsGuestFPUStateActive)
1063 test al, al
1064 jnz .guest_fpu_state_active
1065
1066 ; No need to mess with XMM registers just call the start routine and return.
1067 mov r11, [xBP + 38h] ; pfnStartVM
1068 mov r10, [xBP + 30h] ; pVCpu
1069 mov [xSP + 020h], r10
1070 mov rcx, [xBP + 010h] ; fResumeVM
1071 mov rdx, [xBP + 018h] ; pCtx
1072 mov r8, [xBP + 020h] ; pVMCSCache
1073 mov r9, [xBP + 028h] ; pVM
1074 call r11
1075
1076 leave
1077 ret
1078
1079ALIGNCODE(8)
1080.guest_fpu_state_active:
1081 ; Save the host XMM registers.
1082 movdqa [rsp + 040h + 000h], xmm6
1083 movdqa [rsp + 040h + 010h], xmm7
1084 movdqa [rsp + 040h + 020h], xmm8
1085 movdqa [rsp + 040h + 030h], xmm9
1086 movdqa [rsp + 040h + 040h], xmm10
1087 movdqa [rsp + 040h + 050h], xmm11
1088 movdqa [rsp + 040h + 060h], xmm12
1089 movdqa [rsp + 040h + 070h], xmm13
1090 movdqa [rsp + 040h + 080h], xmm14
1091 movdqa [rsp + 040h + 090h], xmm15
1092
1093 ; Load the full guest XMM register state.
1094 mov r10, [xBP + 018h] ; pCtx
1095 lea r10, [r10 + XMM_OFF_IN_X86FXSTATE]
1096 movdqa xmm0, [r10 + 000h]
1097 movdqa xmm1, [r10 + 010h]
1098 movdqa xmm2, [r10 + 020h]
1099 movdqa xmm3, [r10 + 030h]
1100 movdqa xmm4, [r10 + 040h]
1101 movdqa xmm5, [r10 + 050h]
1102 movdqa xmm6, [r10 + 060h]
1103 movdqa xmm7, [r10 + 070h]
1104 movdqa xmm8, [r10 + 080h]
1105 movdqa xmm9, [r10 + 090h]
1106 movdqa xmm10, [r10 + 0a0h]
1107 movdqa xmm11, [r10 + 0b0h]
1108 movdqa xmm12, [r10 + 0c0h]
1109 movdqa xmm13, [r10 + 0d0h]
1110 movdqa xmm14, [r10 + 0e0h]
1111 movdqa xmm15, [r10 + 0f0h]
1112
1113 ; Make the call (same as in the other case ).
1114 mov r11, [xBP + 38h] ; pfnStartVM
1115 mov r10, [xBP + 30h] ; pVCpu
1116 mov [xSP + 020h], r10
1117 mov rcx, [xBP + 010h] ; fResumeVM
1118 mov rdx, [xBP + 018h] ; pCtx
1119 mov r8, [xBP + 020h] ; pVMCSCache
1120 mov r9, [xBP + 028h] ; pVM
1121 call r11
1122
1123 ; Save the guest XMM registers.
1124 mov r10, [xBP + 018h] ; pCtx
1125 lea r10, [r10 + XMM_OFF_IN_X86FXSTATE]
1126 movdqa [r10 + 000h], xmm0
1127 movdqa [r10 + 010h], xmm1
1128 movdqa [r10 + 020h], xmm2
1129 movdqa [r10 + 030h], xmm3
1130 movdqa [r10 + 040h], xmm4
1131 movdqa [r10 + 050h], xmm5
1132 movdqa [r10 + 060h], xmm6
1133 movdqa [r10 + 070h], xmm7
1134 movdqa [r10 + 080h], xmm8
1135 movdqa [r10 + 090h], xmm9
1136 movdqa [r10 + 0a0h], xmm10
1137 movdqa [r10 + 0b0h], xmm11
1138 movdqa [r10 + 0c0h], xmm12
1139 movdqa [r10 + 0d0h], xmm13
1140 movdqa [r10 + 0e0h], xmm14
1141 movdqa [r10 + 0f0h], xmm15
1142
1143 ; Load the host XMM registers.
1144 movdqa xmm6, [rsp + 040h + 000h]
1145 movdqa xmm7, [rsp + 040h + 010h]
1146 movdqa xmm8, [rsp + 040h + 020h]
1147 movdqa xmm9, [rsp + 040h + 030h]
1148 movdqa xmm10, [rsp + 040h + 040h]
1149 movdqa xmm11, [rsp + 040h + 050h]
1150 movdqa xmm12, [rsp + 040h + 060h]
1151 movdqa xmm13, [rsp + 040h + 070h]
1152 movdqa xmm14, [rsp + 040h + 080h]
1153 movdqa xmm15, [rsp + 040h + 090h]
1154 leave
1155 ret
1156ENDPROC hwaccmR0VMXStartVMWrapXMM
1157
1158;;
1159; Wrapper around svm.pfnVMRun that preserves host XMM registers and
1160; load the guest ones when necessary.
1161;
1162; @cproto DECLASM(int) hwaccmR0SVMRunWrapXMM(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu, PFNHWACCMSVMVMRUN pfnVMRun);
1163;
1164; @returns eax
1165;
1166; @param pVMCBHostPhys msc:rcx
1167; @param pVMCBPhys msc:rdx
1168; @param pCtx msc:r8
1169; @param pVM msc:r9
1170; @param pVCpu msc:[rbp+30h]
1171; @param pfnVMRun msc:[rbp+38h]
1172;
1173; @remarks This is essentially the same code as hwaccmR0VMXStartVMWrapXMM, only the parameters differ a little bit.
1174;
1175; ASSUMING 64-bit and windows for now.
1176ALIGNCODE(16)
1177BEGINPROC hwaccmR0SVMRunWrapXMM
1178 push xBP
1179 mov xBP, xSP
1180 sub xSP, 0a0h + 040h ; Don't bother optimizing the frame size.
1181
1182 ; spill input parameters.
1183 mov [xBP + 010h], rcx ; pVMCBHostPhys
1184 mov [xBP + 018h], rdx ; pVMCBPhys
1185 mov [xBP + 020h], r8 ; pCtx
1186 mov [xBP + 028h], r9 ; pVM
1187
1188 ; Ask CPUM whether we've started using the FPU yet.
1189 mov rcx, [xBP + 30h] ; pVCpu
1190 call NAME(CPUMIsGuestFPUStateActive)
1191 test al, al
1192 jnz .guest_fpu_state_active
1193
1194 ; No need to mess with XMM registers just call the start routine and return.
1195 mov r11, [xBP + 38h] ; pfnVMRun
1196 mov r10, [xBP + 30h] ; pVCpu
1197 mov [xSP + 020h], r10
1198 mov rcx, [xBP + 010h] ; pVMCBHostPhys
1199 mov rdx, [xBP + 018h] ; pVMCBPhys
1200 mov r8, [xBP + 020h] ; pCtx
1201 mov r9, [xBP + 028h] ; pVM
1202 call r11
1203
1204 leave
1205 ret
1206
1207ALIGNCODE(8)
1208.guest_fpu_state_active:
1209 ; Save the host XMM registers.
1210 movdqa [rsp + 040h + 000h], xmm6
1211 movdqa [rsp + 040h + 010h], xmm7
1212 movdqa [rsp + 040h + 020h], xmm8
1213 movdqa [rsp + 040h + 030h], xmm9
1214 movdqa [rsp + 040h + 040h], xmm10
1215 movdqa [rsp + 040h + 050h], xmm11
1216 movdqa [rsp + 040h + 060h], xmm12
1217 movdqa [rsp + 040h + 070h], xmm13
1218 movdqa [rsp + 040h + 080h], xmm14
1219 movdqa [rsp + 040h + 090h], xmm15
1220
1221 ; Load the full guest XMM register state.
1222 mov r10, [xBP + 020h] ; pCtx
1223 lea r10, [r10 + XMM_OFF_IN_X86FXSTATE]
1224 movdqa xmm0, [r10 + 000h]
1225 movdqa xmm1, [r10 + 010h]
1226 movdqa xmm2, [r10 + 020h]
1227 movdqa xmm3, [r10 + 030h]
1228 movdqa xmm4, [r10 + 040h]
1229 movdqa xmm5, [r10 + 050h]
1230 movdqa xmm6, [r10 + 060h]
1231 movdqa xmm7, [r10 + 070h]
1232 movdqa xmm8, [r10 + 080h]
1233 movdqa xmm9, [r10 + 090h]
1234 movdqa xmm10, [r10 + 0a0h]
1235 movdqa xmm11, [r10 + 0b0h]
1236 movdqa xmm12, [r10 + 0c0h]
1237 movdqa xmm13, [r10 + 0d0h]
1238 movdqa xmm14, [r10 + 0e0h]
1239 movdqa xmm15, [r10 + 0f0h]
1240
1241 ; Make the call (same as in the other case ).
1242 mov r11, [xBP + 38h] ; pfnVMRun
1243 mov r10, [xBP + 30h] ; pVCpu
1244 mov [xSP + 020h], r10
1245 mov rcx, [xBP + 010h] ; pVMCBHostPhys
1246 mov rdx, [xBP + 018h] ; pVMCBPhys
1247 mov r8, [xBP + 020h] ; pCtx
1248 mov r9, [xBP + 028h] ; pVM
1249 call r11
1250
1251 ; Save the guest XMM registers.
1252 mov r10, [xBP + 020h] ; pCtx
1253 lea r10, [r10 + XMM_OFF_IN_X86FXSTATE]
1254 movdqa [r10 + 000h], xmm0
1255 movdqa [r10 + 010h], xmm1
1256 movdqa [r10 + 020h], xmm2
1257 movdqa [r10 + 030h], xmm3
1258 movdqa [r10 + 040h], xmm4
1259 movdqa [r10 + 050h], xmm5
1260 movdqa [r10 + 060h], xmm6
1261 movdqa [r10 + 070h], xmm7
1262 movdqa [r10 + 080h], xmm8
1263 movdqa [r10 + 090h], xmm9
1264 movdqa [r10 + 0a0h], xmm10
1265 movdqa [r10 + 0b0h], xmm11
1266 movdqa [r10 + 0c0h], xmm12
1267 movdqa [r10 + 0d0h], xmm13
1268 movdqa [r10 + 0e0h], xmm14
1269 movdqa [r10 + 0f0h], xmm15
1270
1271 ; Load the host XMM registers.
1272 movdqa xmm6, [rsp + 040h + 000h]
1273 movdqa xmm7, [rsp + 040h + 010h]
1274 movdqa xmm8, [rsp + 040h + 020h]
1275 movdqa xmm9, [rsp + 040h + 030h]
1276 movdqa xmm10, [rsp + 040h + 040h]
1277 movdqa xmm11, [rsp + 040h + 050h]
1278 movdqa xmm12, [rsp + 040h + 060h]
1279 movdqa xmm13, [rsp + 040h + 070h]
1280 movdqa xmm14, [rsp + 040h + 080h]
1281 movdqa xmm15, [rsp + 040h + 090h]
1282 leave
1283 ret
1284ENDPROC hwaccmR0SVMRunWrapXMM
1285
1286%endif ; VBOX_WITH_KERNEL_USING_XMM
1287
1288;
1289; The default setup of the StartVM routines.
1290;
1291%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
1292 %define MY_NAME(name) name %+ _32
1293%else
1294 %define MY_NAME(name) name
1295%endif
1296%ifdef RT_ARCH_AMD64
1297 %define MYPUSHAD MYPUSHAD64
1298 %define MYPOPAD MYPOPAD64
1299 %define MYPUSHSEGS MYPUSHSEGS64
1300 %define MYPOPSEGS MYPOPSEGS64
1301%else
1302 %define MYPUSHAD MYPUSHAD32
1303 %define MYPOPAD MYPOPAD32
1304 %define MYPUSHSEGS MYPUSHSEGS32
1305 %define MYPOPSEGS MYPOPSEGS32
1306%endif
1307
1308%include "HWACCMR0Mixed.mac"
1309
1310
1311%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
1312 ;
1313 ; Write the wrapper procedures.
1314 ;
1315 ; These routines are probably being too paranoid about selector
1316 ; restoring, but better safe than sorry...
1317 ;
1318
1319; DECLASM(int) VMXR0StartVM32(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache /*, PVM pVM, PVMCPU pVCpu*/);
1320ALIGNCODE(16)
1321BEGINPROC VMXR0StartVM32
1322 cmp byte [NAME(g_fVMXIs64bitHost)], 0
1323 je near NAME(VMXR0StartVM32_32)
1324
1325 ; stack frame
1326 push esi
1327 push edi
1328 push fs
1329 push gs
1330
1331 ; jmp far .thunk64
1332 db 0xea
1333 dd .thunk64, NAME(SUPR0Abs64bitKernelCS)
1334
1335ALIGNCODE(16)
1336BITS 64
1337.thunk64:
1338 sub esp, 20h
1339 mov edi, [rsp + 20h + 14h] ; fResume
1340 mov esi, [rsp + 20h + 18h] ; pCtx
1341 mov edx, [rsp + 20h + 1Ch] ; pCache
1342 call NAME(VMXR0StartVM32_64)
1343 add esp, 20h
1344 jmp far [.fpthunk32 wrt rip]
1345.fpthunk32: ; 16:32 Pointer to .thunk32.
1346 dd .thunk32, NAME(SUPR0AbsKernelCS)
1347
1348BITS 32
1349ALIGNCODE(16)
1350.thunk32:
1351 pop gs
1352 pop fs
1353 pop edi
1354 pop esi
1355 ret
1356ENDPROC VMXR0StartVM32
1357
1358
1359; DECLASM(int) VMXR0StartVM64(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache /*, PVM pVM, PVMCPU pVCpu*/);
1360ALIGNCODE(16)
1361BEGINPROC VMXR0StartVM64
1362 cmp byte [NAME(g_fVMXIs64bitHost)], 0
1363 je .not_in_long_mode
1364
1365 ; stack frame
1366 push esi
1367 push edi
1368 push fs
1369 push gs
1370
1371 ; jmp far .thunk64
1372 db 0xea
1373 dd .thunk64, NAME(SUPR0Abs64bitKernelCS)
1374
1375ALIGNCODE(16)
1376BITS 64
1377.thunk64:
1378 sub esp, 20h
1379 mov edi, [rsp + 20h + 14h] ; fResume
1380 mov esi, [rsp + 20h + 18h] ; pCtx
1381 mov edx, [rsp + 20h + 1Ch] ; pCache
1382 call NAME(VMXR0StartVM64_64)
1383 add esp, 20h
1384 jmp far [.fpthunk32 wrt rip]
1385.fpthunk32: ; 16:32 Pointer to .thunk32.
1386 dd .thunk32, NAME(SUPR0AbsKernelCS)
1387
1388BITS 32
1389ALIGNCODE(16)
1390.thunk32:
1391 pop gs
1392 pop fs
1393 pop edi
1394 pop esi
1395 ret
1396
1397.not_in_long_mode:
1398 mov eax, VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE
1399 ret
1400ENDPROC VMXR0StartVM64
1401
1402;DECLASM(int) SVMR0VMRun(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx /*, PVM pVM, PVMCPU pVCpu*/);
1403ALIGNCODE(16)
1404BEGINPROC SVMR0VMRun
1405 cmp byte [NAME(g_fVMXIs64bitHost)], 0
1406 je near NAME(SVMR0VMRun_32)
1407
1408 ; stack frame
1409 push esi
1410 push edi
1411 push fs
1412 push gs
1413
1414 ; jmp far .thunk64
1415 db 0xea
1416 dd .thunk64, NAME(SUPR0Abs64bitKernelCS)
1417
1418ALIGNCODE(16)
1419BITS 64
1420.thunk64:
1421 sub esp, 20h
1422 mov rdi, [rsp + 20h + 14h] ; pVMCBHostPhys
1423 mov rsi, [rsp + 20h + 1Ch] ; pVMCBPhys
1424 mov edx, [rsp + 20h + 24h] ; pCtx
1425 call NAME(SVMR0VMRun_64)
1426 add esp, 20h
1427 jmp far [.fpthunk32 wrt rip]
1428.fpthunk32: ; 16:32 Pointer to .thunk32.
1429 dd .thunk32, NAME(SUPR0AbsKernelCS)
1430
1431BITS 32
1432ALIGNCODE(16)
1433.thunk32:
1434 pop gs
1435 pop fs
1436 pop edi
1437 pop esi
1438 ret
1439ENDPROC SVMR0VMRun
1440
1441
1442; DECLASM(int) SVMR0VMRun64(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx /*, PVM pVM, PVMCPU pVCpu*/);
1443ALIGNCODE(16)
1444BEGINPROC SVMR0VMRun64
1445 cmp byte [NAME(g_fVMXIs64bitHost)], 0
1446 je .not_in_long_mode
1447
1448 ; stack frame
1449 push esi
1450 push edi
1451 push fs
1452 push gs
1453
1454 ; jmp far .thunk64
1455 db 0xea
1456 dd .thunk64, NAME(SUPR0Abs64bitKernelCS)
1457
1458ALIGNCODE(16)
1459BITS 64
1460.thunk64:
1461 sub esp, 20h
1462 mov rdi, [rbp + 20h + 14h] ; pVMCBHostPhys
1463 mov rsi, [rbp + 20h + 1Ch] ; pVMCBPhys
1464 mov edx, [rbp + 20h + 24h] ; pCtx
1465 call NAME(SVMR0VMRun64_64)
1466 add esp, 20h
1467 jmp far [.fpthunk32 wrt rip]
1468.fpthunk32: ; 16:32 Pointer to .thunk32.
1469 dd .thunk32, NAME(SUPR0AbsKernelCS)
1470
1471BITS 32
1472ALIGNCODE(16)
1473.thunk32:
1474 pop gs
1475 pop fs
1476 pop edi
1477 pop esi
1478 ret
1479
1480.not_in_long_mode:
1481 mov eax, VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE
1482 ret
1483ENDPROC SVMR0VMRun64
1484
1485 ;
1486 ; Do it a second time pretending we're a 64-bit host.
1487 ;
1488 ; This *HAS* to be done at the very end of the file to avoid restoring
1489 ; macros. So, add new code *BEFORE* this mess.
1490 ;
1491 BITS 64
1492 %undef RT_ARCH_X86
1493 %define RT_ARCH_AMD64
1494 %undef ASM_CALL64_MSC
1495 %define ASM_CALL64_GCC
1496 %define xS 8
1497 %define xSP rsp
1498 %define xBP rbp
1499 %define xAX rax
1500 %define xBX rbx
1501 %define xCX rcx
1502 %define xDX rdx
1503 %define xDI rdi
1504 %define xSI rsi
1505 %define MY_NAME(name) name %+ _64
1506 %define MYPUSHAD MYPUSHAD64
1507 %define MYPOPAD MYPOPAD64
1508 %define MYPUSHSEGS MYPUSHSEGS64
1509 %define MYPOPSEGS MYPOPSEGS64
1510
1511 %include "HWACCMR0Mixed.mac"
1512%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette