VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HWACCMR0A.asm@ 15255

Last change on this file since 15255 was 15255, checked in by vboxsync, 16 years ago

Compile fixes

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 31.1 KB
Line 
1; $Id: HWACCMR0A.asm 15255 2008-12-10 15:45:19Z vboxsync $
2;; @file
3; VMXM - R0 vmx helpers
4;
5
6;
7; Copyright (C) 2006-2007 Sun Microsystems, Inc.
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.virtualbox.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License (GPL) as published by the Free Software
13; Foundation, in version 2 as it comes in the "COPYING" file of the
14; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17; Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18; Clara, CA 95054 USA or visit http://www.sun.com if you need
19; additional information or have any questions.
20;
21
22;*******************************************************************************
23;* Header Files *
24;*******************************************************************************
25%include "VBox/asmdefs.mac"
26%include "VBox/err.mac"
27%include "VBox/hwacc_vmx.mac"
28%include "VBox/cpum.mac"
29%include "VBox/x86.mac"
30%include "../HWACCMInternal.mac"
31
32%ifdef RT_OS_OS2 ;; @todo fix OMF support in yasm and kick nasm out completely.
33 %macro vmwrite 2,
34 int3
35 %endmacro
36 %define vmlaunch int3
37 %define vmresume int3
38 %define vmsave int3
39 %define vmload int3
40 %define vmrun int3
41 %define clgi int3
42 %define stgi int3
43 %macro invlpga 2,
44 int3
45 %endmacro
46%endif
47
48;*******************************************************************************
49;* Defined Constants And Macros *
50;*******************************************************************************
51%ifdef RT_ARCH_AMD64
52 %define MAYBE_64_BIT
53%endif
54%ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
55 %define MAYBE_64_BIT
56%endif
57
58
59;; This is too risky wrt. stability, performance and correctness.
60;%define VBOX_WITH_DR6_EXPERIMENT 1
61
62;; @def MYPUSHAD
63; Macro generating an equivalent to pushad
64
65;; @def MYPOPAD
66; Macro generating an equivalent to popad
67
68;; @def MYPUSHSEGS
69; Macro saving all segment registers on the stack.
70; @param 1 full width register name
71; @param 2 16-bit regsiter name for \a 1.
72
73;; @def MYPOPSEGS
74; Macro restoring all segment registers on the stack
75; @param 1 full width register name
76; @param 2 16-bit regsiter name for \a 1.
77
78%ifdef MAYBE_64_BIT
79 ; Save a host and load the corresponding guest MSR (trashes rdx & rcx)
80 %macro LOADGUESTMSR 2
81 mov rcx, %1
82 rdmsr
83 push rdx
84 push rax
85 mov edx, dword [xSI + %2 + 4]
86 mov eax, dword [xSI + %2]
87 wrmsr
88 %endmacro
89
90 ; Save a guest and load the corresponding host MSR (trashes rdx & rcx)
91 ; Only really useful for gs kernel base as that one can be changed behind our back (swapgs)
92 %macro LOADHOSTMSREX 2
93 mov rcx, %1
94 rdmsr
95 mov dword [xSI + %2], eax
96 mov dword [xSI + %2 + 4], edx
97 pop rax
98 pop rdx
99 wrmsr
100 %endmacro
101
102 ; Load the corresponding host MSR (trashes rdx & rcx)
103 %macro LOADHOSTMSR 1
104 mov rcx, %1
105 pop rax
106 pop rdx
107 wrmsr
108 %endmacro
109%endif
110
111%ifdef ASM_CALL64_GCC
112 %macro MYPUSHAD64 0
113 push r15
114 push r14
115 push r13
116 push r12
117 push rbx
118 %endmacro
119 %macro MYPOPAD64 0
120 pop rbx
121 pop r12
122 pop r13
123 pop r14
124 pop r15
125 %endmacro
126
127%else ; ASM_CALL64_MSC
128 %macro MYPUSHAD64 0
129 push r15
130 push r14
131 push r13
132 push r12
133 push rbx
134 push rsi
135 push rdi
136 %endmacro
137 %macro MYPOPAD64 0
138 pop rdi
139 pop rsi
140 pop rbx
141 pop r12
142 pop r13
143 pop r14
144 pop r15
145 %endmacro
146%endif
147
148; trashes, rax, rdx & rcx
149%macro MYPUSHSEGS64 2
150 mov %2, es
151 push %1
152 mov %2, ds
153 push %1
154
155 ; Special case for FS; Windows and Linux either don't use it or restore it when leaving kernel mode, Solaris OTOH doesn't and we must save it.
156 mov ecx, MSR_K8_FS_BASE
157 rdmsr
158 push rdx
159 push rax
160 push fs
161
162 ; Special case for GS; OSes typically use swapgs to reset the hidden base register for GS on entry into the kernel. The same happens on exit
163 mov ecx, MSR_K8_GS_BASE
164 rdmsr
165 push rdx
166 push rax
167 push gs
168%endmacro
169
170; trashes, rax, rdx & rcx
171%macro MYPOPSEGS64 2
172 ; Note: do not step through this code with a debugger!
173 pop gs
174 pop rax
175 pop rdx
176 mov ecx, MSR_K8_GS_BASE
177 wrmsr
178
179 pop fs
180 pop rax
181 pop rdx
182 mov ecx, MSR_K8_FS_BASE
183 wrmsr
184 ; Now it's safe to step again
185
186 pop %1
187 mov ds, %2
188 pop %1
189 mov es, %2
190%endmacro
191
192%macro MYPUSHAD32 0
193 pushad
194%endmacro
195%macro MYPOPAD32 0
196 popad
197%endmacro
198
199%macro MYPUSHSEGS32 2
200 push ds
201 push es
202 push fs
203 push gs
204%endmacro
205%macro MYPOPSEGS32 2
206 pop gs
207 pop fs
208 pop es
209 pop ds
210%endmacro
211
212
213;*******************************************************************************
214;* External Symbols *
215;*******************************************************************************
216%ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
217extern NAME(SUPR0AbsIs64bit)
218extern NAME(SUPR0Abs64bitKernelCS)
219extern NAME(SUPR0Abs64bitKernelSS)
220extern NAME(SUPR0Abs64bitKernelDS)
221extern NAME(SUPR0AbsKernelCS)
222%endif
223
224
225;*******************************************************************************
226;* Global Variables *
227;*******************************************************************************
228%ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
229BEGINDATA
230;;
231; Store the SUPR0AbsIs64bit absolute value here so we can cmp/test without
232; needing to clobber a register. (This trick doesn't quite work for PE btw.
233; but that's not relevant atm.)
234GLOBALNAME g_fVMXIs64bitHost
235 dd NAME(SUPR0AbsIs64bit)
236%endif
237
238
239BEGINCODE
240
241
242;/**
243; * Executes VMWRITE, 64-bit value.
244; *
245; * @returns VBox status code
246; * @param idxField x86: [ebp + 08h] msc: rcx gcc: rdi VMCS index
247; * @param u64Data x86: [ebp + 0ch] msc: rdx gcc: rsi VM field value
248; */
249ALIGNCODE(16)
250BEGINPROC VMXWriteVMCS64
251%ifdef RT_ARCH_AMD64
252 %ifdef ASM_CALL64_GCC
253 and edi, 0ffffffffh
254 xor rax, rax
255 vmwrite rdi, rsi
256 %else
257 and ecx, 0ffffffffh
258 xor rax, rax
259 vmwrite rcx, rdx
260 %endif
261%else ; RT_ARCH_X86
262 mov ecx, [esp + 4] ; idxField
263 lea edx, [esp + 8] ; &u64Data
264 %ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
265 cmp byte [NAME(g_fVMXIs64bitHost)], 0
266 jz .legacy_mode
267 db 0xea ; jmp far .sixtyfourbit_mode
268 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
269.legacy_mode:
270 %endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL
271 vmwrite ecx, [edx] ; low dword
272 jz .done
273 jc .done
274 inc ecx
275 xor eax, eax
276 vmwrite ecx, [edx + 4] ; high dword
277.done:
278%endif ; RT_ARCH_X86
279 jnc .valid_vmcs
280 mov eax, VERR_VMX_INVALID_VMCS_PTR
281 ret
282.valid_vmcs:
283 jnz .the_end
284 mov eax, VERR_VMX_INVALID_VMCS_FIELD
285.the_end:
286 ret
287
288%ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
289ALIGNCODE(16)
290BITS 64
291.sixtyfourbit_mode:
292 and edx, 0ffffffffh
293 and ecx, 0ffffffffh
294 xor eax, eax
295 vmwrite rcx, [rdx]
296 mov r8d, VERR_VMX_INVALID_VMCS_FIELD
297 cmovz eax, r8d
298 mov r9d, VERR_VMX_INVALID_VMCS_PTR
299 cmovc eax, r9d
300 jmp far [.fpret wrt rip]
301.fpret: ; 16:32 Pointer to .the_end.
302 dd .the_end, NAME(SUPR0AbsKernelCS)
303BITS 32
304%endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL
305ENDPROC VMXWriteVMCS64
306
307
308;/**
309; * Executes VMREAD, 64-bit value
310; *
311; * @returns VBox status code
312; * @param idxField VMCS index
313; * @param pData Ptr to store VM field value
314; */
315;DECLASM(int) VMXReadVMCS64(uint32_t idxField, uint64_t *pData);
316ALIGNCODE(16)
317BEGINPROC VMXReadVMCS64
318%ifdef RT_ARCH_AMD64
319 %ifdef ASM_CALL64_GCC
320 and edi, 0ffffffffh
321 xor rax, rax
322 vmread [rsi], rdi
323 %else
324 and ecx, 0ffffffffh
325 xor rax, rax
326 vmread [rdx], rcx
327 %endif
328%else ; RT_ARCH_X86
329 mov ecx, [esp + 4] ; idxField
330 mov edx, [esp + 8] ; pData
331 %ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
332 cmp byte [NAME(g_fVMXIs64bitHost)], 0
333 jz .legacy_mode
334 db 0xea ; jmp far .sixtyfourbit_mode
335 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
336.legacy_mode:
337 %endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL
338 vmread [edx], ecx ; low dword
339 jz .done
340 jc .done
341 inc ecx
342 xor eax, eax
343 vmread [edx + 4], ecx ; high dword
344.done:
345%endif ; RT_ARCH_X86
346 jnc .valid_vmcs
347 mov eax, VERR_VMX_INVALID_VMCS_PTR
348 ret
349.valid_vmcs:
350 jnz .the_end
351 mov eax, VERR_VMX_INVALID_VMCS_FIELD
352.the_end:
353 ret
354
355%ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
356ALIGNCODE(16)
357BITS 64
358.sixtyfourbit_mode:
359 and edx, 0ffffffffh
360 and ecx, 0ffffffffh
361 xor eax, eax
362 vmread [rdx], rcx
363 mov r8d, VERR_VMX_INVALID_VMCS_FIELD
364 cmovz eax, r8d
365 mov r9d, VERR_VMX_INVALID_VMCS_PTR
366 cmovc eax, r9d
367 jmp far [.fpret wrt rip]
368.fpret: ; 16:32 Pointer to .the_end.
369 dd .the_end, NAME(SUPR0AbsKernelCS)
370BITS 32
371%endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL
372ENDPROC VMXReadVMCS64
373
374
375;/**
376; * Executes VMREAD, 32-bit value.
377; *
378; * @returns VBox status code
379; * @param idxField VMCS index
380; * @param pu32Data Ptr to store VM field value
381; */
382;DECLASM(int) VMXReadVMCS32(uint32_t idxField, uint32_t *pu32Data);
383ALIGNCODE(16)
384BEGINPROC VMXReadVMCS32
385%ifdef RT_ARCH_AMD64
386 %ifdef ASM_CALL64_GCC
387 and edi, 0ffffffffh
388 xor rax, rax
389 vmread r10, rdi
390 mov [rsi], r10d
391 %else
392 and ecx, 0ffffffffh
393 xor rax, rax
394 vmread r10, rcx
395 mov [rdx], r10d
396 %endif
397%else ; RT_ARCH_X86
398 mov ecx, [esp + 4] ; idxField
399 mov edx, [esp + 8] ; pu32Data
400 %ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
401 cmp byte [NAME(g_fVMXIs64bitHost)], 0
402 jz .legacy_mode
403 db 0xea ; jmp far .sixtyfourbit_mode
404 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
405.legacy_mode:
406 %endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL
407 xor eax, eax
408 vmread [edx], ecx
409%endif ; RT_ARCH_X86
410 jnc .valid_vmcs
411 mov eax, VERR_VMX_INVALID_VMCS_PTR
412 ret
413.valid_vmcs:
414 jnz .the_end
415 mov eax, VERR_VMX_INVALID_VMCS_FIELD
416.the_end:
417 ret
418
419%ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
420ALIGNCODE(16)
421BITS 64
422.sixtyfourbit_mode:
423 and edx, 0ffffffffh
424 and ecx, 0ffffffffh
425 xor eax, eax
426 vmread r10, rcx
427 mov [rdx], r10d
428 mov r8d, VERR_VMX_INVALID_VMCS_FIELD
429 cmovz eax, r8d
430 mov r9d, VERR_VMX_INVALID_VMCS_PTR
431 cmovc eax, r9d
432 jmp far [.fpret wrt rip]
433.fpret: ; 16:32 Pointer to .the_end.
434 dd .the_end, NAME(SUPR0AbsKernelCS)
435BITS 32
436%endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL
437ENDPROC VMXReadVMCS32
438
439
440;/**
441; * Executes VMWRITE, 32-bit value.
442; *
443; * @returns VBox status code
444; * @param idxField VMCS index
445; * @param u32Data Ptr to store VM field value
446; */
447;DECLASM(int) VMXWriteVMCS32(uint32_t idxField, uint32_t u32Data);
448ALIGNCODE(16)
449BEGINPROC VMXWriteVMCS32
450%ifdef RT_ARCH_AMD64
451 %ifdef ASM_CALL64_GCC
452 and edi, 0ffffffffh
453 and esi, 0ffffffffh
454 xor rax, rax
455 vmwrite rdi, rsi
456 %else
457 and ecx, 0ffffffffh
458 and edx, 0ffffffffh
459 xor rax, rax
460 vmwrite rcx, rdx
461 %endif
462%else ; RT_ARCH_X86
463 mov ecx, [esp + 4] ; idxField
464 mov edx, [esp + 8] ; u32Data
465 %ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
466 cmp byte [NAME(g_fVMXIs64bitHost)], 0
467 jz .legacy_mode
468 db 0xea ; jmp far .sixtyfourbit_mode
469 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
470.legacy_mode:
471 %endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL
472 xor eax, eax
473 vmwrite ecx, edx
474%endif ; RT_ARCH_X86
475 jnc .valid_vmcs
476 mov eax, VERR_VMX_INVALID_VMCS_PTR
477 ret
478.valid_vmcs:
479 jnz .the_end
480 mov eax, VERR_VMX_INVALID_VMCS_FIELD
481.the_end:
482 ret
483
484%ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
485ALIGNCODE(16)
486BITS 64
487.sixtyfourbit_mode:
488 and edx, 0ffffffffh
489 and ecx, 0ffffffffh
490 xor eax, eax
491 vmwrite rcx, rdx
492 mov r8d, VERR_VMX_INVALID_VMCS_FIELD
493 cmovz eax, r8d
494 mov r9d, VERR_VMX_INVALID_VMCS_PTR
495 cmovc eax, r9d
496 jmp far [.fpret wrt rip]
497.fpret: ; 16:32 Pointer to .the_end.
498 dd .the_end, NAME(SUPR0AbsKernelCS)
499BITS 32
500%endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL
501ENDPROC VMXWriteVMCS32
502
503
504;/**
505; * Executes VMXON
506; *
507; * @returns VBox status code
508; * @param HCPhysVMXOn Physical address of VMXON structure
509; */
510;DECLASM(int) VMXEnable(RTHCPHYS HCPhysVMXOn);
511BEGINPROC VMXEnable
512%ifdef RT_ARCH_AMD64
513 xor rax, rax
514 %ifdef ASM_CALL64_GCC
515 push rdi
516 %else
517 push rcx
518 %endif
519 vmxon [rsp]
520%else ; RT_ARCH_X86
521 %ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
522 cmp byte [NAME(g_fVMXIs64bitHost)], 0
523 jz .legacy_mode
524 db 0xea ; jmp far .sixtyfourbit_mode
525 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
526.legacy_mode:
527 %endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL
528 xor eax, eax
529 vmxon [esp + 4]
530%endif ; RT_ARCH_X86
531 jnc .good
532 mov eax, VERR_VMX_INVALID_VMXON_PTR
533 jmp .the_end
534
535.good:
536 jnz .the_end
537 mov eax, VERR_VMX_GENERIC
538
539.the_end:
540%ifdef RT_ARCH_AMD64
541 add rsp, 8
542%endif
543 ret
544
545%ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
546ALIGNCODE(16)
547BITS 64
548.sixtyfourbit_mode:
549 lea rdx, [rsp + 4] ; &HCPhysVMXOn.
550 and edx, 0ffffffffh
551 xor eax, eax
552 vmxon [rdx]
553 mov r8d, VERR_INVALID_PARAMETER
554 cmovz eax, r8d
555 mov r9d, VERR_VMX_INVALID_VMCS_PTR
556 cmovc eax, r9d
557 jmp far [.fpret wrt rip]
558.fpret: ; 16:32 Pointer to .the_end.
559 dd .the_end, NAME(SUPR0AbsKernelCS)
560BITS 32
561%endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL
562ENDPROC VMXEnable
563
564
565;/**
566; * Executes VMXOFF
567; */
568;DECLASM(void) VMXDisable(void);
569BEGINPROC VMXDisable
570%ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
571 cmp byte [NAME(g_fVMXIs64bitHost)], 0
572 jz .legacy_mode
573 db 0xea ; jmp far .sixtyfourbit_mode
574 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
575.legacy_mode:
576%endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL
577 vmxoff
578.the_end:
579 ret
580
581%ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
582ALIGNCODE(16)
583BITS 64
584.sixtyfourbit_mode:
585 vmxoff
586 jmp far [.fpret wrt rip]
587.fpret: ; 16:32 Pointer to .the_end.
588 dd .the_end, NAME(SUPR0AbsKernelCS)
589BITS 32
590%endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL
591ENDPROC VMXDisable
592
593
594;/**
595; * Executes VMCLEAR
596; *
597; * @returns VBox status code
598; * @param HCPhysVMCS Physical address of VM control structure
599; */
600;DECLASM(int) VMXClearVMCS(RTHCPHYS HCPhysVMCS);
601ALIGNCODE(16)
602BEGINPROC VMXClearVMCS
603%ifdef RT_ARCH_AMD64
604 xor rax, rax
605 %ifdef ASM_CALL64_GCC
606 push rdi
607 %else
608 push rcx
609 %endif
610 vmclear [rsp]
611%else ; RT_ARCH_X86
612 %ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
613 cmp byte [NAME(g_fVMXIs64bitHost)], 0
614 jz .legacy_mode
615 db 0xea ; jmp far .sixtyfourbit_mode
616 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
617.legacy_mode:
618 %endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL
619 xor eax, eax
620 vmclear [esp + 4]
621%endif ; RT_ARCH_X86
622 jnc .the_end
623 mov eax, VERR_VMX_INVALID_VMCS_PTR
624.the_end:
625%ifdef RT_ARCH_AMD64
626 add rsp, 8
627%endif
628 ret
629
630%ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
631ALIGNCODE(16)
632BITS 64
633.sixtyfourbit_mode:
634 lea rdx, [rsp + 4] ; &HCPhysVMCS
635 and edx, 0ffffffffh
636 xor eax, eax
637 vmclear [rdx]
638 mov r9d, VERR_VMX_INVALID_VMCS_PTR
639 cmovc eax, r9d
640 jmp far [.fpret wrt rip]
641.fpret: ; 16:32 Pointer to .the_end.
642 dd .the_end, NAME(SUPR0AbsKernelCS)
643BITS 32
644%endif
645ENDPROC VMXClearVMCS
646
647
648;/**
649; * Executes VMPTRLD
650; *
651; * @returns VBox status code
652; * @param HCPhysVMCS Physical address of VMCS structure
653; */
654;DECLASM(int) VMXActivateVMCS(RTHCPHYS HCPhysVMCS);
655ALIGNCODE(16)
656BEGINPROC VMXActivateVMCS
657%ifdef RT_ARCH_AMD64
658 xor rax, rax
659 %ifdef ASM_CALL64_GCC
660 push rdi
661 %else
662 push rcx
663 %endif
664 vmptrld [rsp]
665%else
666 %ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
667 cmp byte [NAME(g_fVMXIs64bitHost)], 0
668 jz .legacy_mode
669 db 0xea ; jmp far .sixtyfourbit_mode
670 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
671.legacy_mode:
672 %endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL
673 xor eax, eax
674 vmptrld [esp + 4]
675%endif
676 jnc .the_end
677 mov eax, VERR_VMX_INVALID_VMCS_PTR
678.the_end:
679%ifdef RT_ARCH_AMD64
680 add rsp, 8
681%endif
682 ret
683
684%ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
685ALIGNCODE(16)
686BITS 64
687.sixtyfourbit_mode:
688 lea rdx, [rsp + 4] ; &HCPhysVMCS
689 and edx, 0ffffffffh
690 xor eax, eax
691 vmptrld [rdx]
692 mov r9d, VERR_VMX_INVALID_VMCS_PTR
693 cmovc eax, r9d
694 jmp far [.fpret wrt rip]
695.fpret: ; 16:32 Pointer to .the_end.
696 dd .the_end, NAME(SUPR0AbsKernelCS)
697BITS 32
698%endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL
699ENDPROC VMXActivateVMCS
700
701
702;/**
703; * Executes VMPTRST
704; *
705; * @returns VBox status code
706; * @param [esp + 04h] gcc:rdi msc:rcx Param 1 - First parameter - Address that will receive the current pointer
707; */
708;DECLASM(int) VMXGetActivateVMCS(RTHCPHYS *pVMCS);
709BEGINPROC VMXGetActivateVMCS
710%ifdef RT_OS_OS2
711 mov eax, VERR_NOT_SUPPORTED
712 ret
713%else
714 %ifdef RT_ARCH_AMD64
715 %ifdef ASM_CALL64_GCC
716 vmptrst qword [rdi]
717 %else
718 vmptrst qword [rcx]
719 %endif
720 %else
721 %ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
722 cmp byte [NAME(g_fVMXIs64bitHost)], 0
723 jz .legacy_mode
724 db 0xea ; jmp far .sixtyfourbit_mode
725 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
726.legacy_mode:
727 %endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL
728 vmptrst qword [esp+04h]
729 %endif
730 xor eax, eax
731.the_end:
732 ret
733
734 %ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
735ALIGNCODE(16)
736BITS 64
737.sixtyfourbit_mode:
738 lea rdx, [rsp + 4] ; &HCPhysVMCS
739 and edx, 0ffffffffh
740 vmptrst qword [rdx]
741 xor eax, eax
742 jmp far [.fpret wrt rip]
743.fpret: ; 16:32 Pointer to .the_end.
744 dd .the_end, NAME(SUPR0AbsKernelCS)
745BITS 32
746 %endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL
747%endif
748ENDPROC VMXGetActivateVMCS
749
750;/**
751; * Invalidate a page using invept
752; @param enmFlush msc:ecx gcc:edi x86:[esp+04] Type of flush
753; @param pDescriptor msc:edx gcc:esi x86:[esp+08] Descriptor pointer
754; */
755;DECLASM(int) VMXR0InvEPT(VMX_FLUSH enmFlush, uint64_t *pDescriptor);
756BEGINPROC VMXR0InvEPT
757%ifdef RT_ARCH_AMD64
758 %ifdef ASM_CALL64_GCC
759 and edi, 0ffffffffh
760 xor rax, rax
761; invept rdi, qword [rsi]
762 DB 0x66, 0x0F, 0x38, 0x80, 0x3E
763 %else
764 and ecx, 0ffffffffh
765 xor rax, rax
766; invept rcx, qword [rdx]
767 DB 0x66, 0x0F, 0x38, 0x80, 0xA
768 %endif
769%else
770 %ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
771 cmp byte [NAME(g_fVMXIs64bitHost)], 0
772 jz .legacy_mode
773 db 0xea ; jmp far .sixtyfourbit_mode
774 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
775.legacy_mode:
776 %endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL
777 mov eax, [esp + 4]
778 mov ecx, [esp + 8]
779; invept eax, qword [ecx]
780 DB 0x66, 0x0F, 0x38, 0x80, 0x1
781%endif
782 jnc .valid_vmcs
783 mov eax, VERR_VMX_INVALID_VMCS_PTR
784 ret
785.valid_vmcs:
786 jnz .the_end
787 mov eax, VERR_INVALID_PARAMETER
788.the_end:
789 ret
790
791%ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
792ALIGNCODE(16)
793BITS 64
794.sixtyfourbit_mode:
795 and esp, 0ffffffffh
796 mov ecx, [rsp + 4] ; enmFlush
797 mov edx, [rsp + 8] ; pDescriptor
798 xor eax, eax
799; invept rcx, qword [rdx]
800 DB 0x66, 0x0F, 0x38, 0x80, 0xA
801 mov r8d, VERR_INVALID_PARAMETER
802 cmovz eax, r8d
803 mov r9d, VERR_VMX_INVALID_VMCS_PTR
804 cmovc eax, r9d
805 jmp far [.fpret wrt rip]
806.fpret: ; 16:32 Pointer to .the_end.
807 dd .the_end, NAME(SUPR0AbsKernelCS)
808BITS 32
809%endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL
810ENDPROC VMXR0InvEPT
811
812
813;/**
814; * Invalidate a page using invvpid
815; @param enmFlush msc:ecx gcc:edi x86:[esp+04] Type of flush
816; @param pDescriptor msc:edx gcc:esi x86:[esp+08] Descriptor pointer
817; */
818;DECLASM(int) VMXR0InvVPID(VMX_FLUSH enmFlush, uint64_t *pDescriptor);
819BEGINPROC VMXR0InvVPID
820%ifdef RT_ARCH_AMD64
821 %ifdef ASM_CALL64_GCC
822 and edi, 0ffffffffh
823 xor rax, rax
824 ;invvpid rdi, qword [rsi]
825 DB 0x66, 0x0F, 0x38, 0x81, 0x3E
826 %else
827 and ecx, 0ffffffffh
828 xor rax, rax
829; invvpid rcx, qword [rdx]
830 DB 0x66, 0x0F, 0x38, 0x81, 0xA
831 %endif
832%else
833 %ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
834 cmp byte [NAME(g_fVMXIs64bitHost)], 0
835 jz .legacy_mode
836 db 0xea ; jmp far .sixtyfourbit_mode
837 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
838.legacy_mode:
839 %endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL
840 mov eax, [esp + 4]
841 mov ecx, [esp + 8]
842; invept eax, qword [ecx]
843 DB 0x66, 0x0F, 0x38, 0x81, 0x1
844%endif
845 jnc .valid_vmcs
846 mov eax, VERR_VMX_INVALID_VMCS_PTR
847 ret
848.valid_vmcs:
849 jnz .the_end
850 mov eax, VERR_INVALID_PARAMETER
851.the_end:
852 ret
853
854%ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
855ALIGNCODE(16)
856BITS 64
857.sixtyfourbit_mode:
858 and esp, 0ffffffffh
859 mov ecx, [rsp + 4] ; enmFlush
860 mov edx, [rsp + 8] ; pDescriptor
861 xor eax, eax
862; invvpid rcx, qword [rdx]
863 DB 0x66, 0x0F, 0x38, 0x81, 0xA
864 mov r8d, VERR_INVALID_PARAMETER
865 cmovz eax, r8d
866 mov r9d, VERR_VMX_INVALID_VMCS_PTR
867 cmovc eax, r9d
868 jmp far [.fpret wrt rip]
869.fpret: ; 16:32 Pointer to .the_end.
870 dd .the_end, NAME(SUPR0AbsKernelCS)
871BITS 32
872%endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL
873ENDPROC VMXR0InvVPID
874
875
876%if GC_ARCH_BITS == 64
877;;
878; Executes INVLPGA
879;
880; @param pPageGC msc:rcx gcc:rdi x86:[esp+04] Virtual page to invalidate
881; @param uASID msc:rdx gcc:rsi x86:[esp+0C] Tagged TLB id
882;
883;DECLASM(void) SVMR0InvlpgA(RTGCPTR pPageGC, uint32_t uASID);
884BEGINPROC SVMR0InvlpgA
885%ifdef RT_ARCH_AMD64
886 %ifdef ASM_CALL64_GCC
887 mov rax, rdi
888 mov rcx, rsi
889 %else
890 ; from http://www.cs.cmu.edu/~fp/courses/15213-s06/misc/asm64-handout.pdf:
891 ; ``Perhaps unexpectedly, instructions that move or generate 32-bit register
892 ; values also set the upper 32 bits of the register to zero. Consequently
893 ; there is no need for an instruction movzlq.''
894 mov eax, ecx
895 mov rcx, rdx
896 %endif
897%else
898 mov eax, [esp + 4]
899 mov ecx, [esp + 0Ch]
900%endif
901 invlpga [xAX], ecx
902 ret
903ENDPROC SVMR0InvlpgA
904
905%else ; GC_ARCH_BITS != 64
906;;
907; Executes INVLPGA
908;
909; @param pPageGC msc:ecx gcc:edi x86:[esp+04] Virtual page to invalidate
910; @param uASID msc:edx gcc:esi x86:[esp+08] Tagged TLB id
911;
912;DECLASM(void) SVMR0InvlpgA(RTGCPTR pPageGC, uint32_t uASID);
913BEGINPROC SVMR0InvlpgA
914%ifdef RT_ARCH_AMD64
915 %ifdef ASM_CALL64_GCC
916 movzx rax, edi
917 mov ecx, esi
918 %else
919 ; from http://www.cs.cmu.edu/~fp/courses/15213-s06/misc/asm64-handout.pdf:
920 ; ``Perhaps unexpectedly, instructions that move or generate 32-bit register
921 ; values also set the upper 32 bits of the register to zero. Consequently
922 ; there is no need for an instruction movzlq.''
923 mov eax, ecx
924 mov ecx, edx
925 %endif
926%else
927 mov eax, [esp + 4]
928 mov ecx, [esp + 8]
929%endif
930 invlpga [xAX], ecx
931 ret
932ENDPROC SVMR0InvlpgA
933
934%endif ; GC_ARCH_BITS != 64
935
936%ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
937
938;/**
939; * Gets 64-bit GDTR and IDTR on darwin.
940; * @param pGdtr Where to store the 64-bit GDTR.
941; * @param pIdtr Where to store the 64-bit IDTR.
942; */
943;DECLASM(void) hwaccmR0Get64bitGDTRandIDTR(PX86XDTR64 pGdtr, PX86XDTR64 pIdtr);
944ALIGNCODE(16)
945BEGINPROC hwaccmR0Get64bitGDTRandIDTR
946 db 0xea ; jmp far .sixtyfourbit_mode
947 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
948.the_end:
949 ret
950
951ALIGNCODE(16)
952BITS 64
953.sixtyfourbit_mode:
954 and esp, 0ffffffffh
955 mov ecx, [rsp + 4] ; pGdtr
956 mov edx, [rsp + 8] ; pIdtr
957 sgdt [rcx]
958 sidt [rdx]
959 jmp far [.fpret wrt rip]
960.fpret: ; 16:32 Pointer to .the_end.
961 dd .the_end, NAME(SUPR0AbsKernelCS)
962BITS 32
963ENDPROC hwaccmR0Get64bitGDTRandIDTR
964
965
966;/**
967; * Gets 64-bit CR3 on darwin.
968; * @returns CR3
969; */
970;DECLASM(uint64_t) hwaccmR0Get64bitCR3(void);
971ALIGNCODE(16)
972BEGINPROC hwaccmR0Get64bitCR3
973 db 0xea ; jmp far .sixtyfourbit_mode
974 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
975.the_end:
976 ret
977
978ALIGNCODE(16)
979BITS 64
980.sixtyfourbit_mode:
981 mov rax, cr3
982 mov rdx, rax
983 shr rdx, 32
984 jmp far [.fpret wrt rip]
985.fpret: ; 16:32 Pointer to .the_end.
986 dd .the_end, NAME(SUPR0AbsKernelCS)
987BITS 32
988ENDPROC hwaccmR0Get64bitCR3
989
990%endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL
991
992
993
994;
995; The default setup of the StartVM routines.
996;
997%ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
998 %define MY_NAME(name) name %+ _32
999%else
1000 %define MY_NAME(name) name
1001%endif
1002%ifdef RT_ARCH_AMD64
1003 %define MYPUSHAD MYPUSHAD64
1004 %define MYPOPAD MYPOPAD64
1005 %define MYPUSHSEGS MYPUSHSEGS64
1006 %define MYPOPSEGS MYPOPSEGS64
1007%else
1008 %define MYPUSHAD MYPUSHAD32
1009 %define MYPOPAD MYPOPAD32
1010 %define MYPUSHSEGS MYPUSHSEGS32
1011 %define MYPOPSEGS MYPOPSEGS32
1012%endif
1013
1014%include "HWACCMR0Mixed.mac"
1015
1016
1017%ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
1018 ;
1019 ; Write the wrapper procedures.
1020 ;
1021 ; These routines are probably being too paranoid about selector
1022 ; restoring, but better safe than sorry...
1023 ;
1024
1025; DECLASM(int) VMXR0StartVM32(RTHCUINT fResume, PCPUMCTX pCtx);
1026ALIGNCODE(16)
1027BEGINPROC VMXR0StartVM32
1028 cmp byte [NAME(g_fVMXIs64bitHost)], 0
1029 je near NAME(VMXR0StartVM32_32)
1030
1031 ; stack frame.
1032 push esi
1033 push edi
1034 push fs
1035 push gs
1036
1037 ; jmp far .thunk64
1038 db 0xea
1039 dd .thunk64, NAME(SUPR0Abs64bitKernelCS)
1040
1041ALIGNCODE(16)
1042BITS 64
1043.thunk64:
1044 sub esp, 20h
1045 mov edi, [rsp + 20h + 14h] ; fResume
1046 mov esi, [rsp + 20h + 18h] ; pCtx
1047 mov edx, [rsp + 20h + 1Ch] ; pCache
1048 call NAME(VMXR0StartVM32_64)
1049 add esp, 20h
1050 jmp far [.fpthunk32 wrt rip]
1051.fpthunk32: ; 16:32 Pointer to .thunk32.
1052 dd .thunk32, NAME(SUPR0AbsKernelCS)
1053
1054BITS 32
1055ALIGNCODE(16)
1056.thunk32:
1057 pop gs
1058 pop fs
1059 pop edi
1060 pop esi
1061 ret
1062ENDPROC VMXR0StartVM32
1063
1064; DECLASM(int) VMXR0StartVM64(RTHCUINT fResume, PCPUMCTX pCtx)
1065ALIGNCODE(16)
1066BEGINPROC VMXR0StartVM64
1067 cmp byte [NAME(g_fVMXIs64bitHost)], 0
1068 jne .longmode
1069 mov eax, VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE
1070 ret
1071
1072.longmode:
1073 ; stack frame.
1074 push ebp
1075 mov ebp, esp
1076 and esp, 0fffffff0h
1077 push esi
1078 push edi
1079 push ebx
1080 push ds
1081 push es
1082 push fs
1083 push gs
1084 push ss
1085
1086 ; retf frame (64 -> 32).
1087 push 0
1088 push cs
1089 push 0
1090 push .thunk32
1091
1092 ; jmp far .thunk64
1093 db 0xea
1094 dd .thunk64, NAME(SUPR0Abs64bitKernelCS)
1095BITS 64
1096.thunk64:
1097 and esp, 0ffffffffh
1098 and ebp, 0ffffffffh
1099 mov edi, [rbp + 8] ; fResume
1100 mov esi, [rbp + 12] ; pCtx
1101 mov edx, [rbp + 16] ; pCache
1102 sub rsp, 20h
1103 call NAME(VMXR0StartVM64_64)
1104 add rsp, 20h
1105 retf
1106BITS 32
1107.thunk32:
1108 pop ss
1109 pop gs
1110 pop fs
1111 pop es
1112 pop ds
1113 pop ebx
1114 pop edi
1115 pop esi
1116 leave
1117 ret
1118ENDPROC VMXR0StartVM64
1119
1120;DECLASM(int) SVMR0VMRun(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu);
1121ALIGNCODE(16)
1122BEGINPROC SVMR0VMRun
1123 cmp byte [NAME(g_fVMXIs64bitHost)], 0
1124 je near NAME(SVMR0VMRun_32)
1125
1126 ; stack frame.
1127 push ebp
1128 mov ebp, esp
1129 and esp, 0fffffff0h
1130 push esi
1131 push edi
1132 push ebx
1133 push ds
1134 push es
1135 push fs
1136 push gs
1137 push ss
1138
1139 ; retf frame (64 -> 32).
1140 push 0
1141 push cs
1142 push 0
1143 push .thunk32
1144
1145 ; jmp far .thunk64
1146 db 0xea
1147 dd .thunk64, NAME(SUPR0Abs64bitKernelCS)
1148BITS 64
1149.thunk64:
1150 and esp, 0ffffffffh
1151 and ebp, 0ffffffffh
1152 mov rdi, [rbp + 8] ; pVMCBHostPhys
1153 mov rsi, [rbp + 16] ; pVMCBPhys
1154 mov edx, [rbp + 24] ; pCtx
1155 sub rsp, 20h
1156 call NAME(SVMR0VMRun_64)
1157 add rsp, 20h
1158 retf
1159BITS 32
1160.thunk32:
1161 pop ss
1162 pop gs
1163 pop fs
1164 pop es
1165 pop ds
1166 pop ebx
1167 pop edi
1168 pop esi
1169 leave
1170 ret
1171ENDPROC SVMR0VMRun
1172
1173; DECLASM(int) SVMR0VMRun64(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu);
1174ALIGNCODE(16)
1175BEGINPROC SVMR0VMRun64
1176 cmp byte [NAME(g_fVMXIs64bitHost)], 0
1177 jne .longmode
1178 mov eax, VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE
1179 ret
1180
1181.longmode:
1182 ; stack frame.
1183 push ebp
1184 mov ebp, esp
1185 and esp, 0fffffff0h
1186 push esi
1187 push edi
1188 push ebx
1189 push ds
1190 push es
1191 push fs
1192 push gs
1193 push ss
1194
1195 ; retf frame (64 -> 32).
1196 push 0
1197 push cs
1198 push 0
1199 push .thunk32
1200
1201 ; jmp far .thunk64
1202 db 0xea
1203 dd .thunk64, NAME(SUPR0Abs64bitKernelCS)
1204BITS 64
1205.thunk64:
1206 and esp, 0ffffffffh
1207 and ebp, 0ffffffffh
1208 mov rdi, [rbp + 8] ; pVMCBHostPhys
1209 mov rsi, [rbp + 16] ; pVMCBPhys
1210 mov edx, [rbp + 24] ; pCtx
1211 sub rsp, 20h
1212 call NAME(SVMR0VMRun64_64)
1213 add rsp, 20h
1214 retf
1215BITS 32
1216.thunk32:
1217 pop ss
1218 pop gs
1219 pop fs
1220 pop es
1221 pop ds
1222 pop ebx
1223 pop edi
1224 pop esi
1225 leave
1226 ret
1227ENDPROC SVMR0VMRun64
1228
1229 ;
1230 ; Do it a second time pretending we're a 64-bit host.
1231 ;
1232 ; This *HAS* to be done at the very end of the file to avoid restoring
1233 ; macros. So, add new code *BEFORE* this mess.
1234 ;
1235 BITS 64
1236 %undef RT_ARCH_X86
1237 %define RT_ARCH_AMD64
1238 %undef ASM_CALL64_MSC
1239 %define ASM_CALL64_GCC
1240 %define xS 8
1241 %define xSP rsp
1242 %define xBP rbp
1243 %define xAX rax
1244 %define xBX rbx
1245 %define xCX rcx
1246 %define xDX rdx
1247 %define xDI rdi
1248 %define xSI rsi
1249 %define MY_NAME(name) name %+ _64
1250 %define MYPUSHAD MYPUSHAD64
1251 %define MYPOPAD MYPOPAD64
1252 %define MYPUSHSEGS MYPUSHSEGS64
1253 %define MYPOPSEGS MYPOPSEGS64
1254
1255 %include "HWACCMR0Mixed.mac"
1256%endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette