VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HWACCMR0A.asm@ 15198

Last change on this file since 15198 was 15198, checked in by vboxsync, 16 years ago

HWACCMR0A.asm: retf -> far jmps for 64->32 thunking on darwin.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 31.0 KB
Line 
1; $Id: HWACCMR0A.asm 15198 2008-12-09 19:35:36Z vboxsync $
2;; @file
3; VMXM - R0 vmx helpers
4;
5
6;
7; Copyright (C) 2006-2007 Sun Microsystems, Inc.
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.virtualbox.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License (GPL) as published by the Free Software
13; Foundation, in version 2 as it comes in the "COPYING" file of the
14; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17; Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18; Clara, CA 95054 USA or visit http://www.sun.com if you need
19; additional information or have any questions.
20;
21
22;*******************************************************************************
23;* Header Files *
24;*******************************************************************************
25%include "VBox/asmdefs.mac"
26%include "VBox/err.mac"
27%include "VBox/hwacc_vmx.mac"
28%include "VBox/cpum.mac"
29%include "VBox/x86.mac"
30
31%ifdef RT_OS_OS2 ;; @todo fix OMF support in yasm and kick nasm out completely.
32 %macro vmwrite 2,
33 int3
34 %endmacro
35 %define vmlaunch int3
36 %define vmresume int3
37 %define vmsave int3
38 %define vmload int3
39 %define vmrun int3
40 %define clgi int3
41 %define stgi int3
42 %macro invlpga 2,
43 int3
44 %endmacro
45%endif
46
47;*******************************************************************************
48;* Defined Constants And Macros *
49;*******************************************************************************
50%ifdef RT_ARCH_AMD64
51 %define MAYBE_64_BIT
52%endif
53%ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
54 %define MAYBE_64_BIT
55%endif
56
57
58;; This is too risky wrt. stability, performance and correctness.
59;%define VBOX_WITH_DR6_EXPERIMENT 1
60
61;; @def MYPUSHAD
62; Macro generating an equivalent to pushad
63
64;; @def MYPOPAD
65; Macro generating an equivalent to popad
66
67;; @def MYPUSHSEGS
68; Macro saving all segment registers on the stack.
69; @param 1 full width register name
70; @param 2 16-bit regsiter name for \a 1.
71
72;; @def MYPOPSEGS
73; Macro restoring all segment registers on the stack
74; @param 1 full width register name
75; @param 2 16-bit regsiter name for \a 1.
76
77%ifdef MAYBE_64_BIT
78 ; Save a host and load the corresponding guest MSR (trashes rdx & rcx)
79 %macro LOADGUESTMSR 2
80 mov rcx, %1
81 rdmsr
82 push rdx
83 push rax
84 mov edx, dword [xSI + %2 + 4]
85 mov eax, dword [xSI + %2]
86 wrmsr
87 %endmacro
88
89 ; Save a guest and load the corresponding host MSR (trashes rdx & rcx)
90 ; Only really useful for gs kernel base as that one can be changed behind our back (swapgs)
91 %macro LOADHOSTMSREX 2
92 mov rcx, %1
93 rdmsr
94 mov dword [xSI + %2], eax
95 mov dword [xSI + %2 + 4], edx
96 pop rax
97 pop rdx
98 wrmsr
99 %endmacro
100
101 ; Load the corresponding host MSR (trashes rdx & rcx)
102 %macro LOADHOSTMSR 1
103 mov rcx, %1
104 pop rax
105 pop rdx
106 wrmsr
107 %endmacro
108%endif
109
110%ifdef ASM_CALL64_GCC
111 %macro MYPUSHAD64 0
112 push r15
113 push r14
114 push r13
115 push r12
116 push rbx
117 %endmacro
118 %macro MYPOPAD64 0
119 pop rbx
120 pop r12
121 pop r13
122 pop r14
123 pop r15
124 %endmacro
125
126%else ; ASM_CALL64_MSC
127 %macro MYPUSHAD64 0
128 push r15
129 push r14
130 push r13
131 push r12
132 push rbx
133 push rsi
134 push rdi
135 %endmacro
136 %macro MYPOPAD64 0
137 pop rdi
138 pop rsi
139 pop rbx
140 pop r12
141 pop r13
142 pop r14
143 pop r15
144 %endmacro
145%endif
146
147; trashes, rax, rdx & rcx
148%macro MYPUSHSEGS64 2
149 mov %2, es
150 push %1
151 mov %2, ds
152 push %1
153
154 ; Special case for FS; Windows and Linux either don't use it or restore it when leaving kernel mode, Solaris OTOH doesn't and we must save it.
155 mov ecx, MSR_K8_FS_BASE
156 rdmsr
157 push rdx
158 push rax
159 push fs
160
161 ; Special case for GS; OSes typically use swapgs to reset the hidden base register for GS on entry into the kernel. The same happens on exit
162 mov ecx, MSR_K8_GS_BASE
163 rdmsr
164 push rdx
165 push rax
166 push gs
167%endmacro
168
169; trashes, rax, rdx & rcx
170%macro MYPOPSEGS64 2
171 ; Note: do not step through this code with a debugger!
172 pop gs
173 pop rax
174 pop rdx
175 mov ecx, MSR_K8_GS_BASE
176 wrmsr
177
178 pop fs
179 pop rax
180 pop rdx
181 mov ecx, MSR_K8_FS_BASE
182 wrmsr
183 ; Now it's safe to step again
184
185 pop %1
186 mov ds, %2
187 pop %1
188 mov es, %2
189%endmacro
190
191%macro MYPUSHAD32 0
192 pushad
193%endmacro
194%macro MYPOPAD32 0
195 popad
196%endmacro
197
198%macro MYPUSHSEGS32 2
199 push ds
200 push es
201 push fs
202 push gs
203%endmacro
204%macro MYPOPSEGS32 2
205 pop gs
206 pop fs
207 pop es
208 pop ds
209%endmacro
210
211
212;*******************************************************************************
213;* External Symbols *
214;*******************************************************************************
215%ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
216extern NAME(SUPR0AbsIs64bit)
217extern NAME(SUPR0Abs64bitKernelCS)
218extern NAME(SUPR0Abs64bitKernelSS)
219extern NAME(SUPR0Abs64bitKernelDS)
220extern NAME(SUPR0AbsKernelCS)
221%endif
222
223
224;*******************************************************************************
225;* Global Variables *
226;*******************************************************************************
227%ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
228BEGINDATA
229;;
230; Store the SUPR0AbsIs64bit absolute value here so we can cmp/test without
231; needing to clobber a register. (This trick doesn't quite work for PE btw.
232; but that's not relevant atm.)
233GLOBALNAME g_fVMXIs64bitHost
234 dd NAME(SUPR0AbsIs64bit)
235%endif
236
237
238BEGINCODE
239
240
241;/**
242; * Executes VMWRITE, 64-bit value.
243; *
244; * @returns VBox status code
245; * @param idxField x86: [ebp + 08h] msc: rcx gcc: rdi VMCS index
246; * @param u64Data x86: [ebp + 0ch] msc: rdx gcc: rsi VM field value
247; */
248BEGINPROC VMXWriteVMCS64
249%ifdef RT_ARCH_AMD64
250 %ifdef ASM_CALL64_GCC
251 and edi, 0ffffffffh
252 xor rax, rax
253 vmwrite rdi, rsi
254 %else
255 and ecx, 0ffffffffh
256 xor rax, rax
257 vmwrite rcx, rdx
258 %endif
259%else ; RT_ARCH_X86
260 mov ecx, [esp + 4] ; idxField
261 lea edx, [esp + 8] ; &u64Data
262 %ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
263 cmp byte [NAME(g_fVMXIs64bitHost)], 0
264 jz .legacy_mode
265 db 0xea ; jmp far .sixtyfourbit_mode
266 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
267.legacy_mode:
268 %endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL
269 vmwrite ecx, [edx] ; low dword
270 jz .done
271 jc .done
272 inc ecx
273 xor eax, eax
274 vmwrite ecx, [edx + 4] ; high dword
275.done:
276%endif ; RT_ARCH_X86
277 jnc .valid_vmcs
278 mov eax, VERR_VMX_INVALID_VMCS_PTR
279 ret
280.valid_vmcs:
281 jnz .the_end
282 mov eax, VERR_VMX_INVALID_VMCS_FIELD
283.the_end:
284 ret
285
286%ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
287ALIGNCODE(16)
288BITS 64
289.sixtyfourbit_mode:
290 and edx, 0ffffffffh
291 and ecx, 0ffffffffh
292 xor eax, eax
293 vmwrite rcx, [rdx]
294 mov r8d, VERR_VMX_INVALID_VMCS_FIELD
295 cmovz eax, r8d
296 mov r9d, VERR_VMX_INVALID_VMCS_PTR
297 cmovc eax, r9d
298 jmp far [.fpret wrt rip]
299.fpret: ; 16:32 Pointer to .the_end.
300 dd .the_end, NAME(SUPR0AbsKernelCS)
301BITS 32
302%endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL
303ENDPROC VMXWriteVMCS64
304
305
306;/**
307; * Executes VMREAD, 64-bit value
308; *
309; * @returns VBox status code
310; * @param idxField VMCS index
311; * @param pData Ptr to store VM field value
312; */
313;DECLASM(int) VMXReadVMCS64(uint32_t idxField, uint64_t *pData);
314BEGINPROC VMXReadVMCS64
315%ifdef RT_ARCH_AMD64
316 %ifdef ASM_CALL64_GCC
317 and edi, 0ffffffffh
318 xor rax, rax
319 vmread [rsi], rdi
320 %else
321 and ecx, 0ffffffffh
322 xor rax, rax
323 vmread [rdx], rcx
324 %endif
325%else ; RT_ARCH_X86
326 mov ecx, [esp + 4] ; idxField
327 mov edx, [esp + 8] ; pData
328 %ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
329 cmp byte [NAME(g_fVMXIs64bitHost)], 0
330 jz .legacy_mode
331 db 0xea ; jmp far .sixtyfourbit_mode
332 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
333.legacy_mode:
334 %endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL
335 vmread [edx], ecx ; low dword
336 jz .done
337 jc .done
338 inc ecx
339 xor eax, eax
340 vmread [edx + 4], ecx ; high dword
341.done:
342%endif ; RT_ARCH_X86
343 jnc .valid_vmcs
344 mov eax, VERR_VMX_INVALID_VMCS_PTR
345 ret
346.valid_vmcs:
347 jnz .the_end
348 mov eax, VERR_VMX_INVALID_VMCS_FIELD
349.the_end:
350 ret
351
352%ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
353ALIGNCODE(16)
354BITS 64
355.sixtyfourbit_mode:
356 and edx, 0ffffffffh
357 and ecx, 0ffffffffh
358 xor eax, eax
359 vmread [rdx], rcx
360 mov r8d, VERR_VMX_INVALID_VMCS_FIELD
361 cmovz eax, r8d
362 mov r9d, VERR_VMX_INVALID_VMCS_PTR
363 cmovc eax, r9d
364 jmp far [.fpret wrt rip]
365.fpret: ; 16:32 Pointer to .the_end.
366 dd .the_end, NAME(SUPR0AbsKernelCS)
367BITS 32
368%endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL
369ENDPROC VMXReadVMCS64
370
371
372;/**
373; * Executes VMREAD, 32-bit value.
374; *
375; * @returns VBox status code
376; * @param idxField VMCS index
377; * @param pu32Data Ptr to store VM field value
378; */
379;DECLASM(int) VMXReadVMCS32(uint32_t idxField, uint32_t *pu32Data);
380BEGINPROC VMXReadVMCS32
381%ifdef RT_ARCH_AMD64
382 %ifdef ASM_CALL64_GCC
383 and edi, 0ffffffffh
384 xor rax, rax
385 vmread r10, rdi
386 mov [rsi], r10d
387 %else
388 and ecx, 0ffffffffh
389 xor rax, rax
390 vmread r10, rcx
391 mov [rdx], r10d
392 %endif
393%else ; RT_ARCH_X86
394 mov ecx, [esp + 4] ; idxField
395 mov edx, [esp + 8] ; pu32Data
396 %ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
397 cmp byte [NAME(g_fVMXIs64bitHost)], 0
398 jz .legacy_mode
399 db 0xea ; jmp far .sixtyfourbit_mode
400 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
401.legacy_mode:
402 %endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL
403 xor eax, eax
404 vmread [edx], ecx
405%endif ; RT_ARCH_X86
406 jnc .valid_vmcs
407 mov eax, VERR_VMX_INVALID_VMCS_PTR
408 ret
409.valid_vmcs:
410 jnz .the_end
411 mov eax, VERR_VMX_INVALID_VMCS_FIELD
412.the_end:
413 ret
414
415%ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
416ALIGNCODE(16)
417BITS 64
418.sixtyfourbit_mode:
419 and edx, 0ffffffffh
420 and ecx, 0ffffffffh
421 xor eax, eax
422 vmread r10, rcx
423 mov [rdx], r10d
424 mov r8d, VERR_VMX_INVALID_VMCS_FIELD
425 cmovz eax, r8d
426 mov r9d, VERR_VMX_INVALID_VMCS_PTR
427 cmovc eax, r9d
428 jmp far [.fpret wrt rip]
429.fpret: ; 16:32 Pointer to .the_end.
430 dd .the_end, NAME(SUPR0AbsKernelCS)
431BITS 32
432%endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL
433ENDPROC VMXReadVMCS32
434
435
436;/**
437; * Executes VMWRITE, 32-bit value.
438; *
439; * @returns VBox status code
440; * @param idxField VMCS index
441; * @param u32Data Ptr to store VM field value
442; */
443;DECLASM(int) VMXWriteVMCS32(uint32_t idxField, uint32_t u32Data);
444BEGINPROC VMXWriteVMCS32
445%ifdef RT_ARCH_AMD64
446 %ifdef ASM_CALL64_GCC
447 and edi, 0ffffffffh
448 and esi, 0ffffffffh
449 xor rax, rax
450 vmwrite rdi, rsi
451 %else
452 and ecx, 0ffffffffh
453 and edx, 0ffffffffh
454 xor rax, rax
455 vmwrite rcx, rdx
456 %endif
457%else ; RT_ARCH_X86
458 mov ecx, [esp + 4] ; idxField
459 mov edx, [esp + 8] ; u32Data
460 %ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
461 cmp byte [NAME(g_fVMXIs64bitHost)], 0
462 jz .legacy_mode
463 db 0xea ; jmp far .sixtyfourbit_mode
464 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
465.legacy_mode:
466 %endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL
467 xor eax, eax
468 vmwrite ecx, edx
469%endif ; RT_ARCH_X86
470 jnc .valid_vmcs
471 mov eax, VERR_VMX_INVALID_VMCS_PTR
472 ret
473.valid_vmcs:
474 jnz .the_end
475 mov eax, VERR_VMX_INVALID_VMCS_FIELD
476.the_end:
477 ret
478
479%ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
480ALIGNCODE(16)
481BITS 64
482.sixtyfourbit_mode:
483 and edx, 0ffffffffh
484 and ecx, 0ffffffffh
485 xor eax, eax
486 vmwrite rcx, rdx
487 mov r8d, VERR_VMX_INVALID_VMCS_FIELD
488 cmovz eax, r8d
489 mov r9d, VERR_VMX_INVALID_VMCS_PTR
490 cmovc eax, r9d
491 jmp far [.fpret wrt rip]
492.fpret: ; 16:32 Pointer to .the_end.
493 dd .the_end, NAME(SUPR0AbsKernelCS)
494BITS 32
495%endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL
496ENDPROC VMXWriteVMCS32
497
498
499;/**
500; * Executes VMXON
501; *
502; * @returns VBox status code
503; * @param HCPhysVMXOn Physical address of VMXON structure
504; */
505;DECLASM(int) VMXEnable(RTHCPHYS HCPhysVMXOn);
506BEGINPROC VMXEnable
507%ifdef RT_ARCH_AMD64
508 xor rax, rax
509 %ifdef ASM_CALL64_GCC
510 push rdi
511 %else
512 push rcx
513 %endif
514 vmxon [rsp]
515%else ; RT_ARCH_X86
516 %ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
517 cmp byte [NAME(g_fVMXIs64bitHost)], 0
518 jz .legacy_mode
519 db 0xea ; jmp far .sixtyfourbit_mode
520 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
521.legacy_mode:
522 %endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL
523 xor eax, eax
524 vmxon [esp + 4]
525%endif ; RT_ARCH_X86
526 jnc .good
527 mov eax, VERR_VMX_INVALID_VMXON_PTR
528 jmp .the_end
529
530.good:
531 jnz .the_end
532 mov eax, VERR_VMX_GENERIC
533
534.the_end:
535%ifdef RT_ARCH_AMD64
536 add rsp, 8
537%endif
538 ret
539
540%ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
541ALIGNCODE(16)
542BITS 64
543.sixtyfourbit_mode:
544 lea rdx, [rsp + 4] ; &HCPhysVMXOn.
545 and edx, 0ffffffffh
546 xor eax, eax
547 vmxon [rdx]
548 mov r8d, VERR_INVALID_PARAMETER
549 cmovz eax, r8d
550 mov r9d, VERR_VMX_INVALID_VMCS_PTR
551 cmovc eax, r9d
552 jmp far [.fpret wrt rip]
553.fpret: ; 16:32 Pointer to .the_end.
554 dd .the_end, NAME(SUPR0AbsKernelCS)
555BITS 32
556%endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL
557ENDPROC VMXEnable
558
559
560;/**
561; * Executes VMXOFF
562; */
563;DECLASM(void) VMXDisable(void);
564BEGINPROC VMXDisable
565%ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
566 cmp byte [NAME(g_fVMXIs64bitHost)], 0
567 jz .legacy_mode
568 db 0xea ; jmp far .sixtyfourbit_mode
569 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
570.legacy_mode:
571%endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL
572 vmxoff
573.the_end:
574 ret
575
576%ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
577ALIGNCODE(16)
578BITS 64
579.sixtyfourbit_mode:
580 vmxoff
581 jmp far [.fpret wrt rip]
582.fpret: ; 16:32 Pointer to .the_end.
583 dd .the_end, NAME(SUPR0AbsKernelCS)
584BITS 32
585%endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL
586ENDPROC VMXDisable
587
588
589;/**
590; * Executes VMCLEAR
591; *
592; * @returns VBox status code
593; * @param HCPhysVMCS Physical address of VM control structure
594; */
595;DECLASM(int) VMXClearVMCS(RTHCPHYS HCPhysVMCS);
596BEGINPROC VMXClearVMCS
597%ifdef RT_ARCH_AMD64
598 xor rax, rax
599 %ifdef ASM_CALL64_GCC
600 push rdi
601 %else
602 push rcx
603 %endif
604 vmclear [rsp]
605%else ; RT_ARCH_X86
606 %ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
607 cmp byte [NAME(g_fVMXIs64bitHost)], 0
608 jz .legacy_mode
609 db 0xea ; jmp far .sixtyfourbit_mode
610 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
611.legacy_mode:
612 %endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL
613 xor eax, eax
614 vmclear [esp + 4]
615%endif ; RT_ARCH_X86
616 jnc .the_end
617 mov eax, VERR_VMX_INVALID_VMCS_PTR
618.the_end:
619%ifdef RT_ARCH_AMD64
620 add rsp, 8
621%endif
622 ret
623
624%ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
625ALIGNCODE(16)
626BITS 64
627.sixtyfourbit_mode:
628 lea rdx, [rsp + 4] ; &HCPhysVMCS
629 and edx, 0ffffffffh
630 xor eax, eax
631 vmclear [rdx]
632 mov r9d, VERR_VMX_INVALID_VMCS_PTR
633 cmovc eax, r9d
634 jmp far [.fpret wrt rip]
635.fpret: ; 16:32 Pointer to .the_end.
636 dd .the_end, NAME(SUPR0AbsKernelCS)
637BITS 32
638%endif
639ENDPROC VMXClearVMCS
640
641
642;/**
643; * Executes VMPTRLD
644; *
645; * @returns VBox status code
646; * @param HCPhysVMCS Physical address of VMCS structure
647; */
648;DECLASM(int) VMXActivateVMCS(RTHCPHYS HCPhysVMCS);
649BEGINPROC VMXActivateVMCS
650%ifdef RT_ARCH_AMD64
651 xor rax, rax
652 %ifdef ASM_CALL64_GCC
653 push rdi
654 %else
655 push rcx
656 %endif
657 vmptrld [rsp]
658%else
659 %ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
660 cmp byte [NAME(g_fVMXIs64bitHost)], 0
661 jz .legacy_mode
662 db 0xea ; jmp far .sixtyfourbit_mode
663 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
664.legacy_mode:
665 %endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL
666 xor eax, eax
667 vmptrld [esp + 4]
668%endif
669 jnc .the_end
670 mov eax, VERR_VMX_INVALID_VMCS_PTR
671.the_end:
672%ifdef RT_ARCH_AMD64
673 add rsp, 8
674%endif
675 ret
676
677%ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
678ALIGNCODE(16)
679BITS 64
680.sixtyfourbit_mode:
681 lea rdx, [rsp + 4] ; &HCPhysVMCS
682 and edx, 0ffffffffh
683 xor eax, eax
684 vmptrld [rdx]
685 mov r9d, VERR_VMX_INVALID_VMCS_PTR
686 cmovc eax, r9d
687 jmp far [.fpret wrt rip]
688.fpret: ; 16:32 Pointer to .the_end.
689 dd .the_end, NAME(SUPR0AbsKernelCS)
690BITS 32
691%endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL
692ENDPROC VMXActivateVMCS
693
694
695;/**
696; * Executes VMPTRST
697; *
698; * @returns VBox status code
699; * @param [esp + 04h] gcc:rdi msc:rcx Param 1 - First parameter - Address that will receive the current pointer
700; */
701;DECLASM(int) VMXGetActivateVMCS(RTHCPHYS *pVMCS);
702BEGINPROC VMXGetActivateVMCS
703%ifdef RT_OS_OS2
704 mov eax, VERR_NOT_SUPPORTED
705 ret
706%else
707 %ifdef RT_ARCH_AMD64
708 %ifdef ASM_CALL64_GCC
709 vmptrst qword [rdi]
710 %else
711 vmptrst qword [rcx]
712 %endif
713 %else
714 %ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
715 cmp byte [NAME(g_fVMXIs64bitHost)], 0
716 jz .legacy_mode
717 db 0xea ; jmp far .sixtyfourbit_mode
718 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
719.legacy_mode:
720 %endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL
721 vmptrst qword [esp+04h]
722 %endif
723 xor eax, eax
724.the_end:
725 ret
726
727 %ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
728ALIGNCODE(16)
729BITS 64
730.sixtyfourbit_mode:
731 lea rdx, [rsp + 4] ; &HCPhysVMCS
732 and edx, 0ffffffffh
733 vmptrst qword [rdx]
734 xor eax, eax
735 jmp far [.fpret wrt rip]
736.fpret: ; 16:32 Pointer to .the_end.
737 dd .the_end, NAME(SUPR0AbsKernelCS)
738BITS 32
739 %endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL
740%endif
741ENDPROC VMXGetActivateVMCS
742
743;/**
744; * Invalidate a page using invept
745; @param enmFlush msc:ecx gcc:edi x86:[esp+04] Type of flush
746; @param pDescriptor msc:edx gcc:esi x86:[esp+08] Descriptor pointer
747; */
748;DECLASM(int) VMXR0InvEPT(VMX_FLUSH enmFlush, uint64_t *pDescriptor);
749BEGINPROC VMXR0InvEPT
750%ifdef RT_ARCH_AMD64
751 %ifdef ASM_CALL64_GCC
752 and edi, 0ffffffffh
753 xor rax, rax
754; invept rdi, qword [rsi]
755 DB 0x66, 0x0F, 0x38, 0x80, 0x3E
756 %else
757 and ecx, 0ffffffffh
758 xor rax, rax
759; invept rcx, qword [rdx]
760 DB 0x66, 0x0F, 0x38, 0x80, 0xA
761 %endif
762%else
763 %ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
764 cmp byte [NAME(g_fVMXIs64bitHost)], 0
765 jz .legacy_mode
766 db 0xea ; jmp far .sixtyfourbit_mode
767 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
768.legacy_mode:
769 %endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL
770 mov eax, [esp + 4]
771 mov ecx, [esp + 8]
772; invept eax, qword [ecx]
773 DB 0x66, 0x0F, 0x38, 0x80, 0x1
774%endif
775 jnc .valid_vmcs
776 mov eax, VERR_VMX_INVALID_VMCS_PTR
777 ret
778.valid_vmcs:
779 jnz .the_end
780 mov eax, VERR_INVALID_PARAMETER
781.the_end:
782 ret
783
784%ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
785ALIGNCODE(16)
786BITS 64
787.sixtyfourbit_mode:
788 and esp, 0ffffffffh
789 mov ecx, [rsp + 4] ; enmFlush
790 mov edx, [rsp + 8] ; pDescriptor
791 xor eax, eax
792; invept rcx, qword [rdx]
793 DB 0x66, 0x0F, 0x38, 0x80, 0xA
794 mov r8d, VERR_INVALID_PARAMETER
795 cmovz eax, r8d
796 mov r9d, VERR_VMX_INVALID_VMCS_PTR
797 cmovc eax, r9d
798 jmp far [.fpret wrt rip]
799.fpret: ; 16:32 Pointer to .the_end.
800 dd .the_end, NAME(SUPR0AbsKernelCS)
801BITS 32
802%endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL
803ENDPROC VMXR0InvEPT
804
805
806;/**
807; * Invalidate a page using invvpid
808; @param enmFlush msc:ecx gcc:edi x86:[esp+04] Type of flush
809; @param pDescriptor msc:edx gcc:esi x86:[esp+08] Descriptor pointer
810; */
811;DECLASM(int) VMXR0InvVPID(VMX_FLUSH enmFlush, uint64_t *pDescriptor);
812BEGINPROC VMXR0InvVPID
813%ifdef RT_ARCH_AMD64
814 %ifdef ASM_CALL64_GCC
815 and edi, 0ffffffffh
816 xor rax, rax
817 ;invvpid rdi, qword [rsi]
818 DB 0x66, 0x0F, 0x38, 0x81, 0x3E
819 %else
820 and ecx, 0ffffffffh
821 xor rax, rax
822; invvpid rcx, qword [rdx]
823 DB 0x66, 0x0F, 0x38, 0x81, 0xA
824 %endif
825%else
826 %ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
827 cmp byte [NAME(g_fVMXIs64bitHost)], 0
828 jz .legacy_mode
829 db 0xea ; jmp far .sixtyfourbit_mode
830 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
831.legacy_mode:
832 %endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL
833 mov eax, [esp + 4]
834 mov ecx, [esp + 8]
835; invept eax, qword [ecx]
836 DB 0x66, 0x0F, 0x38, 0x81, 0x1
837%endif
838 jnc .valid_vmcs
839 mov eax, VERR_VMX_INVALID_VMCS_PTR
840 ret
841.valid_vmcs:
842 jnz .the_end
843 mov eax, VERR_INVALID_PARAMETER
844.the_end:
845 ret
846
847%ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
848ALIGNCODE(16)
849BITS 64
850.sixtyfourbit_mode:
851 and esp, 0ffffffffh
852 mov ecx, [rsp + 4] ; enmFlush
853 mov edx, [rsp + 8] ; pDescriptor
854 xor eax, eax
855; invvpid rcx, qword [rdx]
856 DB 0x66, 0x0F, 0x38, 0x81, 0xA
857 mov r8d, VERR_INVALID_PARAMETER
858 cmovz eax, r8d
859 mov r9d, VERR_VMX_INVALID_VMCS_PTR
860 cmovc eax, r9d
861 jmp far [.fpret wrt rip]
862.fpret: ; 16:32 Pointer to .the_end.
863 dd .the_end, NAME(SUPR0AbsKernelCS)
864BITS 32
865%endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL
866ENDPROC VMXR0InvVPID
867
868
869%if GC_ARCH_BITS == 64
870;;
871; Executes INVLPGA
872;
873; @param pPageGC msc:rcx gcc:rdi x86:[esp+04] Virtual page to invalidate
874; @param uASID msc:rdx gcc:rsi x86:[esp+0C] Tagged TLB id
875;
876;DECLASM(void) SVMR0InvlpgA(RTGCPTR pPageGC, uint32_t uASID);
877BEGINPROC SVMR0InvlpgA
878%ifdef RT_ARCH_AMD64
879 %ifdef ASM_CALL64_GCC
880 mov rax, rdi
881 mov rcx, rsi
882 %else
883 ; from http://www.cs.cmu.edu/~fp/courses/15213-s06/misc/asm64-handout.pdf:
884 ; ``Perhaps unexpectedly, instructions that move or generate 32-bit register
885 ; values also set the upper 32 bits of the register to zero. Consequently
886 ; there is no need for an instruction movzlq.''
887 mov eax, ecx
888 mov rcx, rdx
889 %endif
890%else
891 mov eax, [esp + 4]
892 mov ecx, [esp + 0Ch]
893%endif
894 invlpga [xAX], ecx
895 ret
896ENDPROC SVMR0InvlpgA
897
898%else ; GC_ARCH_BITS != 64
899;;
900; Executes INVLPGA
901;
902; @param pPageGC msc:ecx gcc:edi x86:[esp+04] Virtual page to invalidate
903; @param uASID msc:edx gcc:esi x86:[esp+08] Tagged TLB id
904;
905;DECLASM(void) SVMR0InvlpgA(RTGCPTR pPageGC, uint32_t uASID);
906BEGINPROC SVMR0InvlpgA
907%ifdef RT_ARCH_AMD64
908 %ifdef ASM_CALL64_GCC
909 movzx rax, edi
910 mov ecx, esi
911 %else
912 ; from http://www.cs.cmu.edu/~fp/courses/15213-s06/misc/asm64-handout.pdf:
913 ; ``Perhaps unexpectedly, instructions that move or generate 32-bit register
914 ; values also set the upper 32 bits of the register to zero. Consequently
915 ; there is no need for an instruction movzlq.''
916 mov eax, ecx
917 mov ecx, edx
918 %endif
919%else
920 mov eax, [esp + 4]
921 mov ecx, [esp + 8]
922%endif
923 invlpga [xAX], ecx
924 ret
925ENDPROC SVMR0InvlpgA
926
927%endif ; GC_ARCH_BITS != 64
928
929%ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
930
931;/**
932; * Gets 64-bit GDTR and IDTR on darwin.
933; * @param pGdtr Where to store the 64-bit GDTR.
934; * @param pIdtr Where to store the 64-bit IDTR.
935; */
936;DECLASM(void) hwaccmR0Get64bitGDTRandIDTR(PX86XDTR64 pGdtr, PX86XDTR64 pIdtr);
937BEGINPROC hwaccmR0Get64bitGDTRandIDTR
938 db 0xea ; jmp far .sixtyfourbit_mode
939 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
940.the_end:
941 ret
942
943ALIGNCODE(16)
944BITS 64
945.sixtyfourbit_mode:
946 and esp, 0ffffffffh
947 mov ecx, [rsp + 4] ; pGdtr
948 mov edx, [rsp + 8] ; pIdtr
949 sgdt [rcx]
950 sidt [rdx]
951 jmp far [.fpret wrt rip]
952.fpret: ; 16:32 Pointer to .the_end.
953 dd .the_end, NAME(SUPR0AbsKernelCS)
954BITS 32
955ENDPROC hwaccmR0Get64bitGDTRandIDTR
956
957
958;/**
959; * Gets 64-bit CR3 on darwin.
960; * @returns CR3
961; */
962;DECLASM(uint64_t) hwaccmR0Get64bitCR3(void);
963BEGINPROC hwaccmR0Get64bitCR3
964 db 0xea ; jmp far .sixtyfourbit_mode
965 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
966.the_end:
967 ret
968
969ALIGNCODE(16)
970BITS 64
971.sixtyfourbit_mode:
972 mov rax, cr3
973 mov rdx, rax
974 shr rdx, 32
975 jmp far [.fpret wrt rip]
976.fpret: ; 16:32 Pointer to .the_end.
977 dd .the_end, NAME(SUPR0AbsKernelCS)
978BITS 32
979ENDPROC hwaccmR0Get64bitCR3
980
981%endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL
982
983
984
985;
986; The default setup of the StartVM routines.
987;
988%ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
989 %define MY_NAME(name) name %+ _32
990%else
991 %define MY_NAME(name) name
992%endif
993%ifdef RT_ARCH_AMD64
994 %define MYPUSHAD MYPUSHAD64
995 %define MYPOPAD MYPOPAD64
996 %define MYPUSHSEGS MYPUSHSEGS64
997 %define MYPOPSEGS MYPOPSEGS64
998%else
999 %define MYPUSHAD MYPUSHAD32
1000 %define MYPOPAD MYPOPAD32
1001 %define MYPUSHSEGS MYPUSHSEGS32
1002 %define MYPOPSEGS MYPOPSEGS32
1003%endif
1004
1005%include "HWACCMR0Mixed.mac"
1006
1007
1008%ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
1009 ;
1010 ; Write the wrapper procedures.
1011 ;
1012 ; These routines are probably being too paranoid about selector
1013 ; restoring, but better safe than sorry...
1014 ;
1015
1016; DECLASM(int) VMXR0StartVM32(RTHCUINT fResume, PCPUMCTX pCtx);
1017BEGINPROC VMXR0StartVM32
1018 cmp byte [NAME(g_fVMXIs64bitHost)], 0
1019 je near NAME(VMXR0StartVM32_32)
1020
1021 ; stack frame.
1022 push ebp
1023 mov ebp, esp
1024 and esp, 0fffffff0h
1025 push esi
1026 push edi
1027 push ebx
1028 push ds
1029 push es
1030 push fs
1031 push gs
1032 push ss
1033
1034 ; retf frame (64 -> 32).
1035 push 0
1036 push cs
1037 push 0
1038 push .thunk32
1039
1040 ; jmp far .thunk64
1041 db 0xea
1042 dd .thunk64, NAME(SUPR0Abs64bitKernelCS)
1043BITS 64
1044.thunk64:
1045 and esp, 0ffffffffh
1046 and ebp, 0ffffffffh
1047 mov edi, [rbp + 8] ; fResume
1048 mov esi, [rbp + 12] ; pCtx
1049 sub rsp, 20h
1050 call NAME(VMXR0StartVM32_64)
1051 add rsp, 20h
1052 retf
1053BITS 32
1054.thunk32:
1055 pop ss
1056 pop gs
1057 pop fs
1058 pop es
1059 pop ds
1060 pop ebx
1061 pop edi
1062 pop esi
1063 leave
1064 ret
1065ENDPROC VMXR0StartVM32
1066
1067; DECLASM(int) VMXR0StartVM64(RTHCUINT fResume, PCPUMCTX pCtx)
1068BEGINPROC VMXR0StartVM64
1069 cmp byte [NAME(g_fVMXIs64bitHost)], 0
1070 jne .longmode
1071 mov eax, VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE
1072 ret
1073
1074.longmode:
1075 ; stack frame.
1076 push ebp
1077 mov ebp, esp
1078 and esp, 0fffffff0h
1079 push esi
1080 push edi
1081 push ebx
1082 push ds
1083 push es
1084 push fs
1085 push gs
1086 push ss
1087
1088 ; retf frame (64 -> 32).
1089 push 0
1090 push cs
1091 push 0
1092 push .thunk32
1093
1094 ; jmp far .thunk64
1095 db 0xea
1096 dd .thunk64, NAME(SUPR0Abs64bitKernelCS)
1097BITS 64
1098.thunk64:
1099 and esp, 0ffffffffh
1100 and ebp, 0ffffffffh
1101 mov edi, [rbp + 8] ; fResume
1102 mov esi, [rbp + 12] ; pCtx
1103 sub rsp, 20h
1104 call NAME(VMXR0StartVM64_64)
1105 add rsp, 20h
1106 retf
1107BITS 32
1108.thunk32:
1109 pop ss
1110 pop gs
1111 pop fs
1112 pop es
1113 pop ds
1114 pop ebx
1115 pop edi
1116 pop esi
1117 leave
1118 ret
1119ENDPROC VMXR0StartVM64
1120
1121;DECLASM(int) SVMR0VMRun(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu);
1122BEGINPROC SVMR0VMRun
1123 cmp byte [NAME(g_fVMXIs64bitHost)], 0
1124 je near NAME(SVMR0VMRun_32)
1125
1126 ; stack frame.
1127 push ebp
1128 mov ebp, esp
1129 and esp, 0fffffff0h
1130 push esi
1131 push edi
1132 push ebx
1133 push ds
1134 push es
1135 push fs
1136 push gs
1137 push ss
1138
1139 ; retf frame (64 -> 32).
1140 push 0
1141 push cs
1142 push 0
1143 push .thunk32
1144
1145 ; jmp far .thunk64
1146 db 0xea
1147 dd .thunk64, NAME(SUPR0Abs64bitKernelCS)
1148BITS 64
1149.thunk64:
1150 and esp, 0ffffffffh
1151 and ebp, 0ffffffffh
1152 mov rdi, [rbp + 8] ; pVMCBHostPhys
1153 mov rsi, [rbp + 16] ; pVMCBPhys
1154 mov edx, [rbp + 24] ; pCtx
1155 sub rsp, 20h
1156 call NAME(SVMR0VMRun_64)
1157 add rsp, 20h
1158 retf
1159BITS 32
1160.thunk32:
1161 pop ss
1162 pop gs
1163 pop fs
1164 pop es
1165 pop ds
1166 pop ebx
1167 pop edi
1168 pop esi
1169 leave
1170 ret
1171ENDPROC SVMR0VMRun
1172
1173; DECLASM(int) SVMR0VMRun64(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu);
1174BEGINPROC SVMR0VMRun64
1175 cmp byte [NAME(g_fVMXIs64bitHost)], 0
1176 jne .longmode
1177 mov eax, VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE
1178 ret
1179
1180.longmode:
1181 ; stack frame.
1182 push ebp
1183 mov ebp, esp
1184 and esp, 0fffffff0h
1185 push esi
1186 push edi
1187 push ebx
1188 push ds
1189 push es
1190 push fs
1191 push gs
1192 push ss
1193
1194 ; retf frame (64 -> 32).
1195 push 0
1196 push cs
1197 push 0
1198 push .thunk32
1199
1200 ; jmp far .thunk64
1201 db 0xea
1202 dd .thunk64, NAME(SUPR0Abs64bitKernelCS)
1203BITS 64
1204.thunk64:
1205 and esp, 0ffffffffh
1206 and ebp, 0ffffffffh
1207 mov rdi, [rbp + 8] ; pVMCBHostPhys
1208 mov rsi, [rbp + 16] ; pVMCBPhys
1209 mov edx, [rbp + 24] ; pCtx
1210 sub rsp, 20h
1211 call NAME(SVMR0VMRun64_64)
1212 add rsp, 20h
1213 retf
1214BITS 32
1215.thunk32:
1216 pop ss
1217 pop gs
1218 pop fs
1219 pop es
1220 pop ds
1221 pop ebx
1222 pop edi
1223 pop esi
1224 leave
1225 ret
1226ENDPROC SVMR0VMRun64
1227
1228 ;
1229 ; Do it a second time pretending we're a 64-bit host.
1230 ;
1231 ; This *HAS* to be done at the very end of the file to avoid restoring
1232 ; macros. So, add new code *BEFORE* this mess.
1233 ;
1234 BITS 64
1235 %undef RT_ARCH_X86
1236 %define RT_ARCH_AMD64
1237 %undef ASM_CALL64_MSC
1238 %define ASM_CALL64_GCC
1239 %define xS 8
1240 %define xSP rsp
1241 %define xBP rbp
1242 %define xAX rax
1243 %define xBX rbx
1244 %define xCX rcx
1245 %define xDX rdx
1246 %define xDI rdi
1247 %define xSI rsi
1248 %define MY_NAME(name) name %+ _64
1249 %define MYPUSHAD MYPUSHAD64
1250 %define MYPOPAD MYPOPAD64
1251 %define MYPUSHSEGS MYPUSHSEGS64
1252 %define MYPOPSEGS MYPOPSEGS64
1253
1254 %include "HWACCMR0Mixed.mac"
1255%endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette