VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HWACCMR0A.asm@ 14853

Last change on this file since 14853 was 14804, checked in by vboxsync, 16 years ago

#3202: Got long mode darwin host running.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 31.5 KB
Line 
1; $Id: HWACCMR0A.asm 14804 2008-11-29 03:17:08Z vboxsync $
2;; @file
3; VMXM - R0 vmx helpers
4;
5
6;
7; Copyright (C) 2006-2007 Sun Microsystems, Inc.
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.virtualbox.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License (GPL) as published by the Free Software
13; Foundation, in version 2 as it comes in the "COPYING" file of the
14; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17; Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18; Clara, CA 95054 USA or visit http://www.sun.com if you need
19; additional information or have any questions.
20;
21
22;*******************************************************************************
23;* Header Files *
24;*******************************************************************************
25%include "VBox/asmdefs.mac"
26%include "VBox/err.mac"
27%include "VBox/hwacc_vmx.mac"
28%include "VBox/cpum.mac"
29%include "VBox/x86.mac"
30
31%ifdef RT_OS_OS2 ;; @todo fix OMF support in yasm and kick nasm out completely.
32 %macro vmwrite 2,
33 int3
34 %endmacro
35 %define vmlaunch int3
36 %define vmresume int3
37 %define vmsave int3
38 %define vmload int3
39 %define vmrun int3
40 %define clgi int3
41 %define stgi int3
42 %macro invlpga 2,
43 int3
44 %endmacro
45%endif
46
47;*******************************************************************************
48;* Defined Constants And Macros *
49;*******************************************************************************
50%ifdef RT_ARCH_AMD64
51 %define MAYBE_64_BIT
52%endif
53%ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
54 %define MAYBE_64_BIT
55%endif
56
57
58;; This is too risky wrt. stability, performance and correctness.
59;%define VBOX_WITH_DR6_EXPERIMENT 1
60
61;; @def MYPUSHAD
62; Macro generating an equivalent to pushad
63
64;; @def MYPOPAD
65; Macro generating an equivalent to popad
66
67;; @def MYPUSHSEGS
68; Macro saving all segment registers on the stack.
69; @param 1 full width register name
70; @param 2 16-bit regsiter name for \a 1.
71
72;; @def MYPOPSEGS
73; Macro restoring all segment registers on the stack
74; @param 1 full width register name
75; @param 2 16-bit regsiter name for \a 1.
76
77%ifdef MAYBE_64_BIT
78 ; Save a host and load the corresponding guest MSR (trashes rdx & rcx)
79 %macro LOADGUESTMSR 2
80 mov rcx, %1
81 rdmsr
82 push rdx
83 push rax
84 mov edx, dword [xSI + %2 + 4]
85 mov eax, dword [xSI + %2]
86 wrmsr
87 %endmacro
88
89 ; Save a guest and load the corresponding host MSR (trashes rdx & rcx)
90 ; Only really useful for gs kernel base as that one can be changed behind our back (swapgs)
91 %macro LOADHOSTMSREX 2
92 mov rcx, %1
93 rdmsr
94 mov dword [xSI + %2], eax
95 mov dword [xSI + %2 + 4], edx
96 pop rax
97 pop rdx
98 wrmsr
99 %endmacro
100
101 ; Load the corresponding host MSR (trashes rdx & rcx)
102 %macro LOADHOSTMSR 1
103 mov rcx, %1
104 pop rax
105 pop rdx
106 wrmsr
107 %endmacro
108%endif
109
110%ifdef ASM_CALL64_GCC
111 %macro MYPUSHAD64 0
112 push r15
113 push r14
114 push r13
115 push r12
116 push rbx
117 %endmacro
118 %macro MYPOPAD64 0
119 pop rbx
120 pop r12
121 pop r13
122 pop r14
123 pop r15
124 %endmacro
125
126%else ; ASM_CALL64_MSC
127 %macro MYPUSHAD64 0
128 push r15
129 push r14
130 push r13
131 push r12
132 push rbx
133 push rsi
134 push rdi
135 %endmacro
136 %macro MYPOPAD64 0
137 pop rdi
138 pop rsi
139 pop rbx
140 pop r12
141 pop r13
142 pop r14
143 pop r15
144 %endmacro
145%endif
146
147; trashes, rax, rdx & rcx
148%macro MYPUSHSEGS64 2
149 mov %2, es
150 push %1
151 mov %2, ds
152 push %1
153
154 ; Special case for FS; Windows and Linux either don't use it or restore it when leaving kernel mode, Solaris OTOH doesn't and we must save it.
155 mov ecx, MSR_K8_FS_BASE
156 rdmsr
157 push rdx
158 push rax
159 push fs
160
161 ; Special case for GS; OSes typically use swapgs to reset the hidden base register for GS on entry into the kernel. The same happens on exit
162 mov ecx, MSR_K8_GS_BASE
163 rdmsr
164 push rdx
165 push rax
166 push gs
167%endmacro
168
169; trashes, rax, rdx & rcx
170%macro MYPOPSEGS64 2
171 ; Note: do not step through this code with a debugger!
172 pop gs
173 pop rax
174 pop rdx
175 mov ecx, MSR_K8_GS_BASE
176 wrmsr
177
178 pop fs
179 pop rax
180 pop rdx
181 mov ecx, MSR_K8_FS_BASE
182 wrmsr
183 ; Now it's safe to step again
184
185 pop %1
186 mov ds, %2
187 pop %1
188 mov es, %2
189%endmacro
190
191%macro MYPUSHAD32 0
192 pushad
193%endmacro
194%macro MYPOPAD32 0
195 popad
196%endmacro
197
198%macro MYPUSHSEGS32 2
199 push ds
200 push es
201 push fs
202 push gs
203%endmacro
204%macro MYPOPSEGS32 2
205 pop gs
206 pop fs
207 pop es
208 pop ds
209%endmacro
210
211
212;*******************************************************************************
213;* External Symbols *
214;*******************************************************************************
215%ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
216extern NAME(SUPR0AbsIs64bit)
217extern NAME(SUPR0Abs64bitKernelCS)
218extern NAME(SUPR0Abs64bitKernelSS)
219extern NAME(SUPR0Abs64bitKernelDS)
220%endif
221
222
223;*******************************************************************************
224;* Global Variables *
225;*******************************************************************************
226%ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
227BEGINDATA
228;;
229; Store the SUPR0AbsIs64bit absolute value here so we can cmp/test without
230; needing to clobber a register. (This trick doesn't quite work for PE btw.
231; but that's not relevant atm.)
232GLOBALNAME g_fVMXIs64bitHost
233 dd NAME(SUPR0AbsIs64bit)
234%endif
235
236
237BEGINCODE
238
239
240;/**
241; * Executes VMWRITE, 64-bit value.
242; *
243; * @returns VBox status code
244; * @param idxField x86: [ebp + 08h] msc: rcx gcc: rdi VMCS index
245; * @param u64Data x86: [ebp + 0ch] msc: rdx gcc: rsi VM field value
246; */
247BEGINPROC VMXWriteVMCS64
248%ifdef RT_ARCH_AMD64
249 %ifdef ASM_CALL64_GCC
250 and edi, 0ffffffffh
251 xor rax, rax
252 vmwrite rdi, rsi
253 %else
254 and ecx, 0ffffffffh
255 xor rax, rax
256 vmwrite rcx, rdx
257 %endif
258%else ; RT_ARCH_X86
259 mov ecx, [esp + 4] ; idxField
260 lea edx, [esp + 8] ; &u64Data
261 %ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
262 cmp byte [NAME(g_fVMXIs64bitHost)], 0
263 jne .longmode
264 %endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL
265 vmwrite ecx, [edx] ; low dword
266 jz .done
267 jc .done
268 inc ecx
269 xor eax, eax
270 vmwrite ecx, [edx + 4] ; high dword
271.done:
272%endif ; RT_ARCH_X86
273 jnc .valid_vmcs
274 mov eax, VERR_VMX_INVALID_VMCS_PTR
275 ret
276.valid_vmcs:
277 jnz .the_end
278 mov eax, VERR_VMX_INVALID_VMCS_FIELD
279.the_end:
280 ret
281
282%ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
283.longmode:
284 ; Convert return frame into a retf frame 64-bit -> 32-bit
285 xor eax, eax
286 xchg eax, [esp]
287 push cs
288 push 0
289 push eax ; original return address.
290 ; jmp far .thunk64
291 db 0xea
292 dd .thunk64, NAME(SUPR0Abs64bitKernelCS)
293BITS 64
294.thunk64:
295 and edx, 0ffffffffh
296 and ecx, 0ffffffffh
297 xor eax, eax
298 vmwrite rcx, [rdx]
299 mov r8d, VERR_VMX_INVALID_VMCS_FIELD
300 cmovz eax, r8d
301 mov r9d, VERR_VMX_INVALID_VMCS_PTR
302 cmovc eax, r9d
303 retf ; return to caller
304BITS 32
305%endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL
306ENDPROC VMXWriteVMCS64
307
308
309;/**
310; * Executes VMREAD, 64-bit value
311; *
312; * @returns VBox status code
313; * @param idxField VMCS index
314; * @param pData Ptr to store VM field value
315; */
316;DECLASM(int) VMXReadVMCS64(uint32_t idxField, uint64_t *pData);
317BEGINPROC VMXReadVMCS64
318%ifdef RT_ARCH_AMD64
319 %ifdef ASM_CALL64_GCC
320 and edi, 0ffffffffh
321 xor rax, rax
322 vmread [rsi], rdi
323 %else
324 and ecx, 0ffffffffh
325 xor rax, rax
326 vmread [rdx], rcx
327 %endif
328%else ; RT_ARCH_X86
329 mov ecx, [esp + 4] ; idxField
330 mov edx, [esp + 8] ; pData
331 %ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
332 cmp byte [NAME(g_fVMXIs64bitHost)], 0
333 jne .longmode
334 %endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL
335 vmread [edx], ecx ; low dword
336 jz .done
337 jc .done
338 inc ecx
339 xor eax, eax
340 vmread [edx + 4], ecx ; high dword
341.done:
342%endif ; RT_ARCH_X86
343 jnc .valid_vmcs
344 mov eax, VERR_VMX_INVALID_VMCS_PTR
345 ret
346.valid_vmcs:
347 jnz .the_end
348 mov eax, VERR_VMX_INVALID_VMCS_FIELD
349.the_end:
350 ret
351
352%ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
353.longmode:
354 ; Convert return frame into a retf frame 64-bit -> 32-bit
355 xor eax, eax
356 xchg eax, [esp]
357 push cs
358 push 0
359 push eax ; original return address.
360 ; jmp far .thunk64
361 db 0xea
362 dd .thunk64, NAME(SUPR0Abs64bitKernelCS)
363BITS 64
364.thunk64:
365 and edx, 0ffffffffh
366 and ecx, 0ffffffffh
367 xor eax, eax
368 vmread [rdx], rcx
369 mov r8d, VERR_VMX_INVALID_VMCS_FIELD
370 cmovz eax, r8d
371 mov r9d, VERR_VMX_INVALID_VMCS_PTR
372 cmovc eax, r9d
373 retf ; return to caller
374BITS 32
375%endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL
376ENDPROC VMXReadVMCS64
377
378
379;/**
380; * Executes VMREAD, 32-bit value.
381; *
382; * @returns VBox status code
383; * @param idxField VMCS index
384; * @param pu32Data Ptr to store VM field value
385; */
386;DECLASM(int) VMXReadVMCS32(uint32_t idxField, uint32_t *pu32Data);
387BEGINPROC VMXReadVMCS32
388%ifdef RT_ARCH_AMD64
389 %ifdef ASM_CALL64_GCC
390 and edi, 0ffffffffh
391 xor rax, rax
392 vmread r10, rdi
393 mov [rsi], r10d
394 %else
395 and ecx, 0ffffffffh
396 xor rax, rax
397 vmread r10, rcx
398 mov [rdx], r10d
399 %endif
400%else ; RT_ARCH_X86
401 mov ecx, [esp + 4] ; idxField
402 mov edx, [esp + 8] ; pu32Data
403 %ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
404 cmp byte [NAME(g_fVMXIs64bitHost)], 0
405 jne .longmode
406 %endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL
407 xor eax, eax
408 vmread [edx], ecx
409%endif ; RT_ARCH_X86
410 jnc .valid_vmcs
411 mov eax, VERR_VMX_INVALID_VMCS_PTR
412 ret
413.valid_vmcs:
414 jnz .the_end
415 mov eax, VERR_VMX_INVALID_VMCS_FIELD
416.the_end:
417 ret
418
419%ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
420.longmode:
421 ; Convert return frame into a retf frame 64-bit -> 32-bit
422 xor eax, eax
423 xchg eax, [esp]
424 push cs
425 push 0
426 push eax ; original return address.
427 ; jmp far .thunk64
428 db 0xea
429 dd .thunk64, NAME(SUPR0Abs64bitKernelCS)
430BITS 64
431.thunk64:
432 and edx, 0ffffffffh
433 and ecx, 0ffffffffh
434 xor eax, eax
435 vmread r10, rcx
436 mov [rdx], r10d
437 mov r8d, VERR_VMX_INVALID_VMCS_FIELD
438 cmovz eax, r8d
439 mov r9d, VERR_VMX_INVALID_VMCS_PTR
440 cmovc eax, r9d
441 retf ; return to caller
442BITS 32
443%endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL
444ENDPROC VMXReadVMCS32
445
446
447;/**
448; * Executes VMWRITE, 32-bit value.
449; *
450; * @returns VBox status code
451; * @param idxField VMCS index
452; * @param u32Data Ptr to store VM field value
453; */
454;DECLASM(int) VMXWriteVMCS32(uint32_t idxField, uint32_t u32Data);
455BEGINPROC VMXWriteVMCS32
456%ifdef RT_ARCH_AMD64
457 %ifdef ASM_CALL64_GCC
458 and edi, 0ffffffffh
459 and esi, 0ffffffffh
460 xor rax, rax
461 vmwrite rdi, rsi
462 %else
463 and ecx, 0ffffffffh
464 and edx, 0ffffffffh
465 xor rax, rax
466 vmwrite rcx, rdx
467 %endif
468%else ; RT_ARCH_X86
469 mov ecx, [esp + 4] ; idxField
470 mov edx, [esp + 8] ; u32Data
471 %ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
472 cmp byte [NAME(g_fVMXIs64bitHost)], 0
473 jne .longmode
474 %endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL
475 xor eax, eax
476 vmwrite ecx, edx
477%endif ; RT_ARCH_X86
478 jnc .valid_vmcs
479 mov eax, VERR_VMX_INVALID_VMCS_PTR
480 ret
481.valid_vmcs:
482 jnz .the_end
483 mov eax, VERR_VMX_INVALID_VMCS_FIELD
484.the_end:
485 ret
486
487%ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
488.longmode:
489 ; Convert return frame into a retf frame 64-bit -> 32-bit
490 xor eax, eax
491 xchg eax, [esp]
492 push cs
493 push 0
494 push eax ; original return address.
495 ; jmp far .thunk64
496 db 0xea
497 dd .thunk64, NAME(SUPR0Abs64bitKernelCS)
498BITS 64
499.thunk64:
500 and edx, 0ffffffffh
501 and ecx, 0ffffffffh
502 xor eax, eax
503 vmwrite rcx, rdx
504 mov r8d, VERR_VMX_INVALID_VMCS_FIELD
505 cmovz eax, r8d
506 mov r9d, VERR_VMX_INVALID_VMCS_PTR
507 cmovc eax, r9d
508 retf ; return to caller
509BITS 32
510%endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL
511ENDPROC VMXWriteVMCS32
512
513
514;/**
515; * Executes VMXON
516; *
517; * @returns VBox status code
518; * @param HCPhysVMXOn Physical address of VMXON structure
519; */
520;DECLASM(int) VMXEnable(RTHCPHYS HCPhysVMXOn);
521BEGINPROC VMXEnable
522%ifdef RT_ARCH_AMD64
523 xor rax, rax
524 %ifdef ASM_CALL64_GCC
525 push rdi
526 %else
527 push rcx
528 %endif
529 vmxon [rsp]
530%else ; RT_ARCH_X86
531 %ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
532 cmp byte [NAME(g_fVMXIs64bitHost)], 0
533 jne .longmode
534 %endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL
535 xor eax, eax
536 vmxon [esp + 4]
537%endif ; RT_ARCH_X86
538 jnc .good
539 mov eax, VERR_VMX_INVALID_VMXON_PTR
540 jmp .the_end
541
542.good:
543 jnz .the_end
544 mov eax, VERR_VMX_GENERIC
545
546.the_end:
547%ifdef RT_ARCH_AMD64
548 add rsp, 8
549%endif
550 ret
551
552%ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
553.longmode:
554 lea edx, [esp + 4] ; &HCPhysVMXOn.
555 ; Convert return frame into a retf frame 64-bit -> 32-bit
556 xor eax, eax
557 xchg eax, [esp]
558 push cs
559 push 0
560 push eax ; original return address.
561 ; jmp far .thunk64
562 db 0xea
563 dd .thunk64, NAME(SUPR0Abs64bitKernelCS)
564BITS 64
565.thunk64:
566 and edx, 0ffffffffh
567 xor eax, eax
568 vmxon [rdx]
569 mov r8d, VERR_INVALID_PARAMETER
570 cmovz eax, r8d
571 mov r9d, VERR_VMX_INVALID_VMCS_PTR
572 cmovc eax, r9d
573 retf ; return to caller
574BITS 32
575%endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL
576ENDPROC VMXEnable
577
578
579;/**
580; * Executes VMXOFF
581; */
582;DECLASM(void) VMXDisable(void);
583BEGINPROC VMXDisable
584%ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
585 cmp byte [NAME(g_fVMXIs64bitHost)], 0
586 jne .longmode
587%endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL
588 vmxoff
589 ret
590
591%ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
592.longmode:
593 ; Convert return frame into a retf frame 64-bit -> 32-bit
594 xor eax, eax
595 xchg eax, [esp]
596 push cs
597 push 0
598 push eax ; original return address.
599 ; jmp far .thunk64
600 db 0xea
601 dd .thunk64, NAME(SUPR0Abs64bitKernelCS)
602BITS 64
603.thunk64:
604 vmxoff
605 retf ; return to caller
606BITS 32
607%endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL
608ENDPROC VMXDisable
609
610
611;/**
612; * Executes VMCLEAR
613; *
614; * @returns VBox status code
615; * @param HCPhysVMCS Physical address of VM control structure
616; */
617;DECLASM(int) VMXClearVMCS(RTHCPHYS HCPhysVMCS);
618BEGINPROC VMXClearVMCS
619%ifdef RT_ARCH_AMD64
620 xor rax, rax
621 %ifdef ASM_CALL64_GCC
622 push rdi
623 %else
624 push rcx
625 %endif
626 vmclear [rsp]
627%else ; RT_ARCH_X86
628 %ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
629 cmp byte [NAME(g_fVMXIs64bitHost)], 0
630 jne .longmode
631 %endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL
632 xor eax, eax
633 vmclear [esp + 4]
634%endif ; RT_ARCH_X86
635 jnc .the_end
636 mov eax, VERR_VMX_INVALID_VMCS_PTR
637.the_end:
638%ifdef RT_ARCH_AMD64
639 add rsp, 8
640%endif
641 ret
642
643%ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
644.longmode:
645 lea edx, [esp + 4] ; &HCPhysVMCS
646 ; Convert return frame into a retf frame 64-bit -> 32-bit
647 xor eax, eax
648 xchg eax, [esp]
649 push cs
650 push 0
651 push eax ; original return address.
652 ; jmp far .thunk64
653 db 0xea
654 dd .thunk64, NAME(SUPR0Abs64bitKernelCS)
655BITS 64
656.thunk64:
657 and edx, 0ffffffffh
658 xor eax, eax
659 vmclear [rdx]
660 mov r9d, VERR_VMX_INVALID_VMCS_PTR
661 cmovc eax, r9d
662 retf ; return to caller
663BITS 32
664%endif
665ENDPROC VMXClearVMCS
666
667
668;/**
669; * Executes VMPTRLD
670; *
671; * @returns VBox status code
672; * @param HCPhysVMCS Physical address of VMCS structure
673; */
674;DECLASM(int) VMXActivateVMCS(RTHCPHYS HCPhysVMCS);
675BEGINPROC VMXActivateVMCS
676%ifdef RT_ARCH_AMD64
677 xor rax, rax
678 %ifdef ASM_CALL64_GCC
679 push rdi
680 %else
681 push rcx
682 %endif
683 vmptrld [rsp]
684%else
685 %ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
686 cmp byte [NAME(g_fVMXIs64bitHost)], 0
687 jne .longmode
688 %endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL
689 xor eax, eax
690 vmptrld [esp + 4]
691%endif
692 jnc .the_end
693 mov eax, VERR_VMX_INVALID_VMCS_PTR
694.the_end:
695%ifdef RT_ARCH_AMD64
696 add rsp, 8
697%endif
698 ret
699
700%ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
701.longmode:
702 lea edx, [esp + 4] ; &HCPhysVMCS
703 ; Convert return frame into a retf frame 64-bit -> 32-bit
704 xor eax, eax
705 xchg eax, [esp]
706 push cs
707 push 0
708 push eax ; original return address.
709 ; jmp far .thunk64
710 db 0xea
711 dd .thunk64, NAME(SUPR0Abs64bitKernelCS)
712BITS 64
713.thunk64:
714 and edx, 0ffffffffh
715 xor eax, eax
716 vmptrld [rdx]
717 mov r9d, VERR_VMX_INVALID_VMCS_PTR
718 cmovc eax, r9d
719 retf ; return to caller
720BITS 32
721%endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL
722ENDPROC VMXActivateVMCS
723
724
725;/**
726; * Executes VMPTRST
727; *
728; * @returns VBox status code
729; * @param [esp + 04h] gcc:rdi msc:rcx Param 1 - First parameter - Address that will receive the current pointer
730; */
731;DECLASM(int) VMXGetActivateVMCS(RTHCPHYS *pVMCS);
732BEGINPROC VMXGetActivateVMCS
733%ifdef RT_OS_OS2
734 mov eax, VERR_NOT_SUPPORTED
735 ret
736%else
737 %ifdef RT_ARCH_AMD64
738 %ifdef ASM_CALL64_GCC
739 vmptrst qword [rdi]
740 %else
741 vmptrst qword [rcx]
742 %endif
743 %else
744 %ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
745 cmp byte [NAME(g_fVMXIs64bitHost)], 0
746 jne .longmode
747 %endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL
748 vmptrst qword [esp+04h]
749 %endif
750 xor eax, eax
751 ret
752
753 %ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
754.longmode:
755 lea edx, [esp + 4] ; &HCPhysVMCS
756 ; Convert return frame into a retf frame 64-bit -> 32-bit
757 xor eax, eax
758 xchg eax, [esp]
759 push cs
760 push 0
761 push eax ; original return address.
762 ; jmp far .thunk64
763 db 0xea
764 dd .thunk64, NAME(SUPR0Abs64bitKernelCS)
765BITS 64
766.thunk64:
767 and edx, 0ffffffffh
768 vmptrst qword [rdx]
769 xor eax, eax
770 retf ; return to caller
771BITS 32
772 %endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL
773%endif
774ENDPROC VMXGetActivateVMCS
775
776;/**
777; * Invalidate a page using invept
778; @param enmFlush msc:ecx gcc:edi x86:[esp+04] Type of flush
779; @param pDescriptor msc:edx gcc:esi x86:[esp+08] Descriptor pointer
780; */
781;DECLASM(int) VMXR0InvEPT(VMX_FLUSH enmFlush, uint64_t *pDescriptor);
782BEGINPROC VMXR0InvEPT
783%ifdef RT_ARCH_AMD64
784 %ifdef ASM_CALL64_GCC
785 and edi, 0ffffffffh
786 xor rax, rax
787; invept rdi, qword [rsi]
788 DB 0x66, 0x0F, 0x38, 0x80, 0x3E
789 %else
790 and ecx, 0ffffffffh
791 xor rax, rax
792; invept rcx, qword [rdx]
793 DB 0x66, 0x0F, 0x38, 0x80, 0xA
794 %endif
795%else
796 %ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
797 cmp byte [NAME(g_fVMXIs64bitHost)], 0
798 jne .longmode
799 %endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL
800 mov eax, [esp + 4]
801 mov ecx, [esp + 8]
802; invept eax, qword [ecx]
803 DB 0x66, 0x0F, 0x38, 0x80, 0x1
804%endif
805 jnc .valid_vmcs
806 mov eax, VERR_VMX_INVALID_VMCS_PTR
807 ret
808.valid_vmcs:
809 jnz .the_end
810 mov eax, VERR_INVALID_PARAMETER
811.the_end:
812 ret
813
814%ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
815.longmode:
816 mov ecx, [esp + 4] ; enmFlush
817 mov edx, [esp + 8] ; pDescriptor
818 ; Convert return frame into a retf frame 64-bit -> 32-bit
819 xor eax, eax
820 xchg eax, [esp]
821 push cs
822 push 0
823 push eax ; original return address.
824 ; jmp far .thunk64
825 db 0xea
826 dd .thunk64, NAME(SUPR0Abs64bitKernelCS)
827BITS 64
828.thunk64:
829 and ecx, 0ffffffffh
830 and edx, 0ffffffffh
831 xor eax, eax
832; invept rcx, qword [rdx]
833 DB 0x66, 0x0F, 0x38, 0x80, 0xA
834 mov r8d, VERR_INVALID_PARAMETER
835 cmovz eax, r8d
836 mov r9d, VERR_VMX_INVALID_VMCS_PTR
837 cmovc eax, r9d
838 retf ; return to caller
839BITS 32
840%endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL
841ENDPROC VMXR0InvEPT
842
843
844;/**
845; * Invalidate a page using invvpid
846; @param enmFlush msc:ecx gcc:edi x86:[esp+04] Type of flush
847; @param pDescriptor msc:edx gcc:esi x86:[esp+08] Descriptor pointer
848; */
849;DECLASM(int) VMXR0InvVPID(VMX_FLUSH enmFlush, uint64_t *pDescriptor);
850BEGINPROC VMXR0InvVPID
851%ifdef RT_ARCH_AMD64
852 %ifdef ASM_CALL64_GCC
853 and edi, 0ffffffffh
854 xor rax, rax
855 ;invvpid rdi, qword [rsi]
856 DB 0x66, 0x0F, 0x38, 0x81, 0x3E
857 %else
858 and ecx, 0ffffffffh
859 xor rax, rax
860; invvpid rcx, qword [rdx]
861 DB 0x66, 0x0F, 0x38, 0x81, 0xA
862 %endif
863%else
864 %ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
865 cmp byte [NAME(g_fVMXIs64bitHost)], 0
866 jne .longmode
867 %endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL
868 mov eax, [esp + 4]
869 mov ecx, [esp + 8]
870; invept eax, qword [ecx]
871 DB 0x66, 0x0F, 0x38, 0x81, 0x1
872%endif
873 jnc .valid_vmcs
874 mov eax, VERR_VMX_INVALID_VMCS_PTR
875 ret
876.valid_vmcs:
877 jnz .the_end
878 mov eax, VERR_INVALID_PARAMETER
879.the_end:
880 ret
881
882%ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
883.longmode:
884 mov ecx, [esp + 4] ; enmFlush
885 mov edx, [esp + 8] ; pDescriptor
886 ; Convert return frame into a retf frame 64-bit -> 32-bit
887 xor eax, eax
888 xchg eax, [esp]
889 push cs
890 push 0
891 push eax ; original return address.
892 ; jmp far .thunk64
893 db 0xea
894 dd .thunk64, NAME(SUPR0Abs64bitKernelCS)
895BITS 64
896.thunk64:
897 and ecx, 0ffffffffh
898 and edx, 0ffffffffh
899 xor eax, eax
900; invvpid rcx, qword [rdx]
901 DB 0x66, 0x0F, 0x38, 0x81, 0xA
902 mov r8d, VERR_INVALID_PARAMETER
903 cmovz eax, r8d
904 mov r9d, VERR_VMX_INVALID_VMCS_PTR
905 cmovc eax, r9d
906 retf ; return to caller
907BITS 32
908%endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL
909ENDPROC VMXR0InvVPID
910
911
912%if GC_ARCH_BITS == 64
913;;
914; Executes INVLPGA
915;
916; @param pPageGC msc:rcx gcc:rdi x86:[esp+04] Virtual page to invalidate
917; @param uASID msc:rdx gcc:rsi x86:[esp+0C] Tagged TLB id
918;
919;DECLASM(void) SVMR0InvlpgA(RTGCPTR pPageGC, uint32_t uASID);
920BEGINPROC SVMR0InvlpgA
921%ifdef RT_ARCH_AMD64
922 %ifdef ASM_CALL64_GCC
923 mov rax, rdi
924 mov rcx, rsi
925 %else
926 ; from http://www.cs.cmu.edu/~fp/courses/15213-s06/misc/asm64-handout.pdf:
927 ; ``Perhaps unexpectedly, instructions that move or generate 32-bit register
928 ; values also set the upper 32 bits of the register to zero. Consequently
929 ; there is no need for an instruction movzlq.''
930 mov eax, ecx
931 mov rcx, rdx
932 %endif
933%else
934 mov eax, [esp + 4]
935 mov ecx, [esp + 0Ch]
936%endif
937 invlpga [xAX], ecx
938 ret
939ENDPROC SVMR0InvlpgA
940
941%else ; GC_ARCH_BITS != 64
942;;
943; Executes INVLPGA
944;
945; @param pPageGC msc:ecx gcc:edi x86:[esp+04] Virtual page to invalidate
946; @param uASID msc:edx gcc:esi x86:[esp+08] Tagged TLB id
947;
948;DECLASM(void) SVMR0InvlpgA(RTGCPTR pPageGC, uint32_t uASID);
949BEGINPROC SVMR0InvlpgA
950%ifdef RT_ARCH_AMD64
951 %ifdef ASM_CALL64_GCC
952 movzx rax, edi
953 mov ecx, esi
954 %else
955 ; from http://www.cs.cmu.edu/~fp/courses/15213-s06/misc/asm64-handout.pdf:
956 ; ``Perhaps unexpectedly, instructions that move or generate 32-bit register
957 ; values also set the upper 32 bits of the register to zero. Consequently
958 ; there is no need for an instruction movzlq.''
959 mov eax, ecx
960 mov ecx, edx
961 %endif
962%else
963 mov eax, [esp + 4]
964 mov ecx, [esp + 8]
965%endif
966 invlpga [xAX], ecx
967 ret
968ENDPROC SVMR0InvlpgA
969
970%endif ; GC_ARCH_BITS != 64
971
972%ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
973
974;/**
975; * Gets 64-bit GDTR and IDTR on darwin.
976; * @param pGdtr Where to store the 64-bit GDTR.
977; * @param pIdtr Where to store the 64-bit IDTR.
978; */
979;DECLASM(void) hwaccmR0Get64bitGDTRandIDTR(PX86XDTR64 pGdtr, PX86XDTR64 pIdtr);
980BEGINPROC hwaccmR0Get64bitGDTRandIDTR
981.longmode:
982 mov ecx, [esp + 4] ; pGdtr
983 mov edx, [esp + 8] ; pIdtr
984 ; Convert return frame into a retf frame 64-bit -> 32-bit
985 xor eax, eax
986 xchg eax, [esp]
987 push cs
988 push 0
989 push eax ; original return address.
990 ; jmp far .thunk64
991 db 0xea
992 dd .thunk64, NAME(SUPR0Abs64bitKernelCS)
993BITS 64
994.thunk64:
995 and ecx, 0ffffffffh
996 and edx, 0ffffffffh
997 sgdt [rcx]
998 sidt [rdx]
999 retf
1000BITS 32
1001ENDPROC hwaccmR0Get64bitGDTRandIDTR
1002
1003
1004;/**
1005; * Gets 64-bit CR3 on darwin.
1006; * @returns CR3
1007; */
1008;DECLASM(uint64_t) hwaccmR0Get64bitCR3(void);
1009BEGINPROC hwaccmR0Get64bitCR3
1010.longmode:
1011 ; Convert return frame into a retf frame 64-bit -> 32-bit
1012 xor eax, eax
1013 xchg eax, [esp]
1014 push cs
1015 push 0
1016 push eax ; original return address.
1017 ; jmp far .thunk64
1018 db 0xea
1019 dd .thunk64, NAME(SUPR0Abs64bitKernelCS)
1020BITS 64
1021.thunk64:
1022 mov rax, cr3
1023 mov rdx, rax
1024 shr rdx, 32
1025 retf
1026BITS 32
1027ENDPROC hwaccmR0Get64bitCR3
1028
1029%endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL
1030
1031
1032
1033;
1034; The default setup of the StartVM routines.
1035;
1036%ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
1037 %define MY_NAME(name) name %+ _32
1038%else
1039 %define MY_NAME(name) name
1040%endif
1041%ifdef RT_ARCH_AMD64
1042 %define MYPUSHAD MYPUSHAD64
1043 %define MYPOPAD MYPOPAD64
1044 %define MYPUSHSEGS MYPUSHSEGS64
1045 %define MYPOPSEGS MYPOPSEGS64
1046%else
1047 %define MYPUSHAD MYPUSHAD32
1048 %define MYPOPAD MYPOPAD32
1049 %define MYPUSHSEGS MYPUSHSEGS32
1050 %define MYPOPSEGS MYPOPSEGS32
1051%endif
1052
1053%include "HWACCMR0Mixed.mac"
1054
1055
1056%ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
1057 ;
1058 ; Write the wrapper procedures.
1059 ;
1060 ; These routines are probably being too paranoid about selector
1061 ; restoring, but better safe than sorry...
1062 ;
1063
1064; DECLASM(int) VMXR0StartVM32(RTHCUINT fResume, PCPUMCTX pCtx);
1065BEGINPROC VMXR0StartVM32
1066 cmp byte [NAME(g_fVMXIs64bitHost)], 0
1067 je near NAME(VMXR0StartVM32_32)
1068
1069 ; stack frame.
1070 push ebp
1071 mov ebp, esp
1072 and esp, 0fffffff0h
1073 push esi
1074 push edi
1075 push ebx
1076 push ds
1077 push es
1078 push fs
1079 push gs
1080 push ss
1081
1082 ; retf frame (64 -> 32).
1083 push 0
1084 push cs
1085 push 0
1086 push .thunk32
1087
1088 ; jmp far .thunk64
1089 db 0xea
1090 dd .thunk64, NAME(SUPR0Abs64bitKernelCS)
1091BITS 64
1092.thunk64:
1093 and esp, 0ffffffffh
1094 and ebp, 0ffffffffh
1095 mov edi, [rbp + 8] ; fResume
1096 mov esi, [rbp + 12] ; pCtx
1097 sub rsp, 20h
1098 call NAME(VMXR0StartVM32_64)
1099 add rsp, 20h
1100 retf
1101BITS 32
1102.thunk32:
1103 pop ss
1104 pop gs
1105 pop fs
1106 pop es
1107 pop ds
1108 pop ebx
1109 pop edi
1110 pop esi
1111 leave
1112 ret
1113ENDPROC VMXR0StartVM32
1114
1115; DECLASM(int) VMXR0StartVM64(RTHCUINT fResume, PCPUMCTX pCtx)
1116BEGINPROC VMXR0StartVM64
1117 cmp byte [NAME(g_fVMXIs64bitHost)], 0
1118 jne .longmode
1119 mov eax, VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE
1120 ret
1121
1122.longmode:
1123 ; stack frame.
1124 push ebp
1125 mov ebp, esp
1126 and esp, 0fffffff0h
1127 push esi
1128 push edi
1129 push ebx
1130 push ds
1131 push es
1132 push fs
1133 push gs
1134 push ss
1135
1136 ; retf frame (64 -> 32).
1137 push 0
1138 push cs
1139 push 0
1140 push .thunk32
1141
1142 ; jmp far .thunk64
1143 db 0xea
1144 dd .thunk64, NAME(SUPR0Abs64bitKernelCS)
1145BITS 64
1146.thunk64:
1147 and esp, 0ffffffffh
1148 and ebp, 0ffffffffh
1149 mov edi, [rbp + 8] ; fResume
1150 mov esi, [rbp + 12] ; pCtx
1151 sub rsp, 20h
1152 call NAME(VMXR0StartVM64_64)
1153 add rsp, 20h
1154 retf
1155BITS 32
1156.thunk32:
1157 pop ss
1158 pop gs
1159 pop fs
1160 pop es
1161 pop ds
1162 pop ebx
1163 pop edi
1164 pop esi
1165 leave
1166 ret
1167ENDPROC VMXR0StartVM64
1168
1169;DECLASM(int) SVMR0VMRun(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx);
1170BEGINPROC SVMR0VMRun
1171 cmp byte [NAME(g_fVMXIs64bitHost)], 0
1172 je near NAME(SVMR0VMRun_32)
1173
1174 ; stack frame.
1175 push ebp
1176 mov ebp, esp
1177 and esp, 0fffffff0h
1178 push esi
1179 push edi
1180 push ebx
1181 push ds
1182 push es
1183 push fs
1184 push gs
1185 push ss
1186
1187 ; retf frame (64 -> 32).
1188 push 0
1189 push cs
1190 push 0
1191 push .thunk32
1192
1193 ; jmp far .thunk64
1194 db 0xea
1195 dd .thunk64, NAME(SUPR0Abs64bitKernelCS)
1196BITS 64
1197.thunk64:
1198 and esp, 0ffffffffh
1199 and ebp, 0ffffffffh
1200 mov rdi, [rbp + 8] ; pVMCBHostPhys
1201 mov rsi, [rbp + 16] ; pVMCBPhys
1202 mov edx, [rbp + 24] ; pCtx
1203 sub rsp, 20h
1204 call NAME(SVMR0VMRun_64)
1205 add rsp, 20h
1206 retf
1207BITS 32
1208.thunk32:
1209 pop ss
1210 pop gs
1211 pop fs
1212 pop es
1213 pop ds
1214 pop ebx
1215 pop edi
1216 pop esi
1217 leave
1218 ret
1219ENDPROC SVMR0VMRun
1220
1221; DECLASM(int) SVMR0VMRun64(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx);
1222BEGINPROC SVMR0VMRun64
1223 cmp byte [NAME(g_fVMXIs64bitHost)], 0
1224 jne .longmode
1225 mov eax, VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE
1226 ret
1227
1228.longmode:
1229 ; stack frame.
1230 push ebp
1231 mov ebp, esp
1232 and esp, 0fffffff0h
1233 push esi
1234 push edi
1235 push ebx
1236 push ds
1237 push es
1238 push fs
1239 push gs
1240 push ss
1241
1242 ; retf frame (64 -> 32).
1243 push 0
1244 push cs
1245 push 0
1246 push .thunk32
1247
1248 ; jmp far .thunk64
1249 db 0xea
1250 dd .thunk64, NAME(SUPR0Abs64bitKernelCS)
1251BITS 64
1252.thunk64:
1253 and esp, 0ffffffffh
1254 and ebp, 0ffffffffh
1255 mov rdi, [rbp + 8] ; pVMCBHostPhys
1256 mov rsi, [rbp + 16] ; pVMCBPhys
1257 mov edx, [rbp + 24] ; pCtx
1258 sub rsp, 20h
1259 call NAME(SVMR0VMRun64_64)
1260 add rsp, 20h
1261 retf
1262BITS 32
1263.thunk32:
1264 pop ss
1265 pop gs
1266 pop fs
1267 pop es
1268 pop ds
1269 pop ebx
1270 pop edi
1271 pop esi
1272 leave
1273 ret
1274ENDPROC SVMR0VMRun64
1275
1276 ;
1277 ; Do it a second time pretending we're a 64-bit host.
1278 ;
1279 ; This *HAS* to be done at the very end of the file to avoid restoring
1280 ; macros. So, add new code *BEFORE* this mess.
1281 ;
1282 BITS 64
1283 %undef RT_ARCH_X86
1284 %define RT_ARCH_AMD64
1285 %undef ASM_CALL64_MSC
1286 %define ASM_CALL64_GCC
1287 %define xS 8
1288 %define xSP rsp
1289 %define xBP rbp
1290 %define xAX rax
1291 %define xBX rbx
1292 %define xCX rcx
1293 %define xDX rdx
1294 %define xDI rdi
1295 %define xSI rsi
1296 %define MY_NAME(name) name %+ _64
1297 %define MYPUSHAD MYPUSHAD64
1298 %define MYPOPAD MYPOPAD64
1299 %define MYPUSHSEGS MYPUSHSEGS64
1300 %define MYPOPSEGS MYPOPSEGS64
1301
1302 %include "HWACCMR0Mixed.mac"
1303%endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette