VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HWACCMR0A.asm@ 15213

Last change on this file since 15213 was 15213, checked in by vboxsync, 16 years ago

HWACCMA.asm: code alignment.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 31.0 KB
Line 
1; $Id: HWACCMR0A.asm 15213 2008-12-09 23:12:50Z vboxsync $
2;; @file
3; VMXM - R0 vmx helpers
4;
5
6;
7; Copyright (C) 2006-2007 Sun Microsystems, Inc.
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.virtualbox.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License (GPL) as published by the Free Software
13; Foundation, in version 2 as it comes in the "COPYING" file of the
14; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17; Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18; Clara, CA 95054 USA or visit http://www.sun.com if you need
19; additional information or have any questions.
20;
21
22;*******************************************************************************
23;* Header Files *
24;*******************************************************************************
25%include "VBox/asmdefs.mac"
26%include "VBox/err.mac"
27%include "VBox/hwacc_vmx.mac"
28%include "VBox/cpum.mac"
29%include "VBox/x86.mac"
30
31%ifdef RT_OS_OS2 ;; @todo fix OMF support in yasm and kick nasm out completely.
32 %macro vmwrite 2,
33 int3
34 %endmacro
35 %define vmlaunch int3
36 %define vmresume int3
37 %define vmsave int3
38 %define vmload int3
39 %define vmrun int3
40 %define clgi int3
41 %define stgi int3
42 %macro invlpga 2,
43 int3
44 %endmacro
45%endif
46
47;*******************************************************************************
48;* Defined Constants And Macros *
49;*******************************************************************************
50%ifdef RT_ARCH_AMD64
51 %define MAYBE_64_BIT
52%endif
53%ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
54 %define MAYBE_64_BIT
55%endif
56
57
58;; This is too risky wrt. stability, performance and correctness.
59;%define VBOX_WITH_DR6_EXPERIMENT 1
60
61;; @def MYPUSHAD
62; Macro generating an equivalent to pushad
63
64;; @def MYPOPAD
65; Macro generating an equivalent to popad
66
67;; @def MYPUSHSEGS
68; Macro saving all segment registers on the stack.
69; @param 1 full width register name
70; @param 2 16-bit regsiter name for \a 1.
71
72;; @def MYPOPSEGS
73; Macro restoring all segment registers on the stack
74; @param 1 full width register name
75; @param 2 16-bit regsiter name for \a 1.
76
77%ifdef MAYBE_64_BIT
78 ; Save a host and load the corresponding guest MSR (trashes rdx & rcx)
79 %macro LOADGUESTMSR 2
80 mov rcx, %1
81 rdmsr
82 push rdx
83 push rax
84 mov edx, dword [xSI + %2 + 4]
85 mov eax, dword [xSI + %2]
86 wrmsr
87 %endmacro
88
89 ; Save a guest and load the corresponding host MSR (trashes rdx & rcx)
90 ; Only really useful for gs kernel base as that one can be changed behind our back (swapgs)
91 %macro LOADHOSTMSREX 2
92 mov rcx, %1
93 rdmsr
94 mov dword [xSI + %2], eax
95 mov dword [xSI + %2 + 4], edx
96 pop rax
97 pop rdx
98 wrmsr
99 %endmacro
100
101 ; Load the corresponding host MSR (trashes rdx & rcx)
102 %macro LOADHOSTMSR 1
103 mov rcx, %1
104 pop rax
105 pop rdx
106 wrmsr
107 %endmacro
108%endif
109
110%ifdef ASM_CALL64_GCC
111 %macro MYPUSHAD64 0
112 push r15
113 push r14
114 push r13
115 push r12
116 push rbx
117 %endmacro
118 %macro MYPOPAD64 0
119 pop rbx
120 pop r12
121 pop r13
122 pop r14
123 pop r15
124 %endmacro
125
126%else ; ASM_CALL64_MSC
127 %macro MYPUSHAD64 0
128 push r15
129 push r14
130 push r13
131 push r12
132 push rbx
133 push rsi
134 push rdi
135 %endmacro
136 %macro MYPOPAD64 0
137 pop rdi
138 pop rsi
139 pop rbx
140 pop r12
141 pop r13
142 pop r14
143 pop r15
144 %endmacro
145%endif
146
147; trashes, rax, rdx & rcx
148%macro MYPUSHSEGS64 2
149 mov %2, es
150 push %1
151 mov %2, ds
152 push %1
153
154 ; Special case for FS; Windows and Linux either don't use it or restore it when leaving kernel mode, Solaris OTOH doesn't and we must save it.
155 mov ecx, MSR_K8_FS_BASE
156 rdmsr
157 push rdx
158 push rax
159 push fs
160
161 ; Special case for GS; OSes typically use swapgs to reset the hidden base register for GS on entry into the kernel. The same happens on exit
162 mov ecx, MSR_K8_GS_BASE
163 rdmsr
164 push rdx
165 push rax
166 push gs
167%endmacro
168
169; trashes, rax, rdx & rcx
170%macro MYPOPSEGS64 2
171 ; Note: do not step through this code with a debugger!
172 pop gs
173 pop rax
174 pop rdx
175 mov ecx, MSR_K8_GS_BASE
176 wrmsr
177
178 pop fs
179 pop rax
180 pop rdx
181 mov ecx, MSR_K8_FS_BASE
182 wrmsr
183 ; Now it's safe to step again
184
185 pop %1
186 mov ds, %2
187 pop %1
188 mov es, %2
189%endmacro
190
191%macro MYPUSHAD32 0
192 pushad
193%endmacro
194%macro MYPOPAD32 0
195 popad
196%endmacro
197
198%macro MYPUSHSEGS32 2
199 push ds
200 push es
201 push fs
202 push gs
203%endmacro
204%macro MYPOPSEGS32 2
205 pop gs
206 pop fs
207 pop es
208 pop ds
209%endmacro
210
211
212;*******************************************************************************
213;* External Symbols *
214;*******************************************************************************
215%ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
216extern NAME(SUPR0AbsIs64bit)
217extern NAME(SUPR0Abs64bitKernelCS)
218extern NAME(SUPR0Abs64bitKernelSS)
219extern NAME(SUPR0Abs64bitKernelDS)
220extern NAME(SUPR0AbsKernelCS)
221%endif
222
223
224;*******************************************************************************
225;* Global Variables *
226;*******************************************************************************
227%ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
228BEGINDATA
229;;
230; Store the SUPR0AbsIs64bit absolute value here so we can cmp/test without
231; needing to clobber a register. (This trick doesn't quite work for PE btw.
232; but that's not relevant atm.)
233GLOBALNAME g_fVMXIs64bitHost
234 dd NAME(SUPR0AbsIs64bit)
235%endif
236
237
238BEGINCODE
239
240
241;/**
242; * Executes VMWRITE, 64-bit value.
243; *
244; * @returns VBox status code
245; * @param idxField x86: [ebp + 08h] msc: rcx gcc: rdi VMCS index
246; * @param u64Data x86: [ebp + 0ch] msc: rdx gcc: rsi VM field value
247; */
248ALIGNCODE(16)
249BEGINPROC VMXWriteVMCS64
250%ifdef RT_ARCH_AMD64
251 %ifdef ASM_CALL64_GCC
252 and edi, 0ffffffffh
253 xor rax, rax
254 vmwrite rdi, rsi
255 %else
256 and ecx, 0ffffffffh
257 xor rax, rax
258 vmwrite rcx, rdx
259 %endif
260%else ; RT_ARCH_X86
261 mov ecx, [esp + 4] ; idxField
262 lea edx, [esp + 8] ; &u64Data
263 %ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
264 cmp byte [NAME(g_fVMXIs64bitHost)], 0
265 jz .legacy_mode
266 db 0xea ; jmp far .sixtyfourbit_mode
267 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
268.legacy_mode:
269 %endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL
270 vmwrite ecx, [edx] ; low dword
271 jz .done
272 jc .done
273 inc ecx
274 xor eax, eax
275 vmwrite ecx, [edx + 4] ; high dword
276.done:
277%endif ; RT_ARCH_X86
278 jnc .valid_vmcs
279 mov eax, VERR_VMX_INVALID_VMCS_PTR
280 ret
281.valid_vmcs:
282 jnz .the_end
283 mov eax, VERR_VMX_INVALID_VMCS_FIELD
284.the_end:
285 ret
286
287%ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
288ALIGNCODE(16)
289BITS 64
290.sixtyfourbit_mode:
291 and edx, 0ffffffffh
292 and ecx, 0ffffffffh
293 xor eax, eax
294 vmwrite rcx, [rdx]
295 mov r8d, VERR_VMX_INVALID_VMCS_FIELD
296 cmovz eax, r8d
297 mov r9d, VERR_VMX_INVALID_VMCS_PTR
298 cmovc eax, r9d
299 jmp far [.fpret wrt rip]
300.fpret: ; 16:32 Pointer to .the_end.
301 dd .the_end, NAME(SUPR0AbsKernelCS)
302BITS 32
303%endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL
304ENDPROC VMXWriteVMCS64
305
306
307;/**
308; * Executes VMREAD, 64-bit value
309; *
310; * @returns VBox status code
311; * @param idxField VMCS index
312; * @param pData Ptr to store VM field value
313; */
314;DECLASM(int) VMXReadVMCS64(uint32_t idxField, uint64_t *pData);
315ALIGNCODE(16)
316BEGINPROC VMXReadVMCS64
317%ifdef RT_ARCH_AMD64
318 %ifdef ASM_CALL64_GCC
319 and edi, 0ffffffffh
320 xor rax, rax
321 vmread [rsi], rdi
322 %else
323 and ecx, 0ffffffffh
324 xor rax, rax
325 vmread [rdx], rcx
326 %endif
327%else ; RT_ARCH_X86
328 mov ecx, [esp + 4] ; idxField
329 mov edx, [esp + 8] ; pData
330 %ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
331 cmp byte [NAME(g_fVMXIs64bitHost)], 0
332 jz .legacy_mode
333 db 0xea ; jmp far .sixtyfourbit_mode
334 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
335.legacy_mode:
336 %endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL
337 vmread [edx], ecx ; low dword
338 jz .done
339 jc .done
340 inc ecx
341 xor eax, eax
342 vmread [edx + 4], ecx ; high dword
343.done:
344%endif ; RT_ARCH_X86
345 jnc .valid_vmcs
346 mov eax, VERR_VMX_INVALID_VMCS_PTR
347 ret
348.valid_vmcs:
349 jnz .the_end
350 mov eax, VERR_VMX_INVALID_VMCS_FIELD
351.the_end:
352 ret
353
354%ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
355ALIGNCODE(16)
356BITS 64
357.sixtyfourbit_mode:
358 and edx, 0ffffffffh
359 and ecx, 0ffffffffh
360 xor eax, eax
361 vmread [rdx], rcx
362 mov r8d, VERR_VMX_INVALID_VMCS_FIELD
363 cmovz eax, r8d
364 mov r9d, VERR_VMX_INVALID_VMCS_PTR
365 cmovc eax, r9d
366 jmp far [.fpret wrt rip]
367.fpret: ; 16:32 Pointer to .the_end.
368 dd .the_end, NAME(SUPR0AbsKernelCS)
369BITS 32
370%endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL
371ENDPROC VMXReadVMCS64
372
373
374;/**
375; * Executes VMREAD, 32-bit value.
376; *
377; * @returns VBox status code
378; * @param idxField VMCS index
379; * @param pu32Data Ptr to store VM field value
380; */
381;DECLASM(int) VMXReadVMCS32(uint32_t idxField, uint32_t *pu32Data);
382ALIGNCODE(16)
383BEGINPROC VMXReadVMCS32
384%ifdef RT_ARCH_AMD64
385 %ifdef ASM_CALL64_GCC
386 and edi, 0ffffffffh
387 xor rax, rax
388 vmread r10, rdi
389 mov [rsi], r10d
390 %else
391 and ecx, 0ffffffffh
392 xor rax, rax
393 vmread r10, rcx
394 mov [rdx], r10d
395 %endif
396%else ; RT_ARCH_X86
397 mov ecx, [esp + 4] ; idxField
398 mov edx, [esp + 8] ; pu32Data
399 %ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
400 cmp byte [NAME(g_fVMXIs64bitHost)], 0
401 jz .legacy_mode
402 db 0xea ; jmp far .sixtyfourbit_mode
403 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
404.legacy_mode:
405 %endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL
406 xor eax, eax
407 vmread [edx], ecx
408%endif ; RT_ARCH_X86
409 jnc .valid_vmcs
410 mov eax, VERR_VMX_INVALID_VMCS_PTR
411 ret
412.valid_vmcs:
413 jnz .the_end
414 mov eax, VERR_VMX_INVALID_VMCS_FIELD
415.the_end:
416 ret
417
418%ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
419ALIGNCODE(16)
420BITS 64
421.sixtyfourbit_mode:
422 and edx, 0ffffffffh
423 and ecx, 0ffffffffh
424 xor eax, eax
425 vmread r10, rcx
426 mov [rdx], r10d
427 mov r8d, VERR_VMX_INVALID_VMCS_FIELD
428 cmovz eax, r8d
429 mov r9d, VERR_VMX_INVALID_VMCS_PTR
430 cmovc eax, r9d
431 jmp far [.fpret wrt rip]
432.fpret: ; 16:32 Pointer to .the_end.
433 dd .the_end, NAME(SUPR0AbsKernelCS)
434BITS 32
435%endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL
436ENDPROC VMXReadVMCS32
437
438
439;/**
440; * Executes VMWRITE, 32-bit value.
441; *
442; * @returns VBox status code
443; * @param idxField VMCS index
444; * @param u32Data Ptr to store VM field value
445; */
446;DECLASM(int) VMXWriteVMCS32(uint32_t idxField, uint32_t u32Data);
447ALIGNCODE(16)
448BEGINPROC VMXWriteVMCS32
449%ifdef RT_ARCH_AMD64
450 %ifdef ASM_CALL64_GCC
451 and edi, 0ffffffffh
452 and esi, 0ffffffffh
453 xor rax, rax
454 vmwrite rdi, rsi
455 %else
456 and ecx, 0ffffffffh
457 and edx, 0ffffffffh
458 xor rax, rax
459 vmwrite rcx, rdx
460 %endif
461%else ; RT_ARCH_X86
462 mov ecx, [esp + 4] ; idxField
463 mov edx, [esp + 8] ; u32Data
464 %ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
465 cmp byte [NAME(g_fVMXIs64bitHost)], 0
466 jz .legacy_mode
467 db 0xea ; jmp far .sixtyfourbit_mode
468 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
469.legacy_mode:
470 %endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL
471 xor eax, eax
472 vmwrite ecx, edx
473%endif ; RT_ARCH_X86
474 jnc .valid_vmcs
475 mov eax, VERR_VMX_INVALID_VMCS_PTR
476 ret
477.valid_vmcs:
478 jnz .the_end
479 mov eax, VERR_VMX_INVALID_VMCS_FIELD
480.the_end:
481 ret
482
483%ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
484ALIGNCODE(16)
485BITS 64
486.sixtyfourbit_mode:
487 and edx, 0ffffffffh
488 and ecx, 0ffffffffh
489 xor eax, eax
490 vmwrite rcx, rdx
491 mov r8d, VERR_VMX_INVALID_VMCS_FIELD
492 cmovz eax, r8d
493 mov r9d, VERR_VMX_INVALID_VMCS_PTR
494 cmovc eax, r9d
495 jmp far [.fpret wrt rip]
496.fpret: ; 16:32 Pointer to .the_end.
497 dd .the_end, NAME(SUPR0AbsKernelCS)
498BITS 32
499%endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL
500ENDPROC VMXWriteVMCS32
501
502
503;/**
504; * Executes VMXON
505; *
506; * @returns VBox status code
507; * @param HCPhysVMXOn Physical address of VMXON structure
508; */
509;DECLASM(int) VMXEnable(RTHCPHYS HCPhysVMXOn);
510BEGINPROC VMXEnable
511%ifdef RT_ARCH_AMD64
512 xor rax, rax
513 %ifdef ASM_CALL64_GCC
514 push rdi
515 %else
516 push rcx
517 %endif
518 vmxon [rsp]
519%else ; RT_ARCH_X86
520 %ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
521 cmp byte [NAME(g_fVMXIs64bitHost)], 0
522 jz .legacy_mode
523 db 0xea ; jmp far .sixtyfourbit_mode
524 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
525.legacy_mode:
526 %endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL
527 xor eax, eax
528 vmxon [esp + 4]
529%endif ; RT_ARCH_X86
530 jnc .good
531 mov eax, VERR_VMX_INVALID_VMXON_PTR
532 jmp .the_end
533
534.good:
535 jnz .the_end
536 mov eax, VERR_VMX_GENERIC
537
538.the_end:
539%ifdef RT_ARCH_AMD64
540 add rsp, 8
541%endif
542 ret
543
544%ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
545ALIGNCODE(16)
546BITS 64
547.sixtyfourbit_mode:
548 lea rdx, [rsp + 4] ; &HCPhysVMXOn.
549 and edx, 0ffffffffh
550 xor eax, eax
551 vmxon [rdx]
552 mov r8d, VERR_INVALID_PARAMETER
553 cmovz eax, r8d
554 mov r9d, VERR_VMX_INVALID_VMCS_PTR
555 cmovc eax, r9d
556 jmp far [.fpret wrt rip]
557.fpret: ; 16:32 Pointer to .the_end.
558 dd .the_end, NAME(SUPR0AbsKernelCS)
559BITS 32
560%endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL
561ENDPROC VMXEnable
562
563
564;/**
565; * Executes VMXOFF
566; */
567;DECLASM(void) VMXDisable(void);
568BEGINPROC VMXDisable
569%ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
570 cmp byte [NAME(g_fVMXIs64bitHost)], 0
571 jz .legacy_mode
572 db 0xea ; jmp far .sixtyfourbit_mode
573 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
574.legacy_mode:
575%endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL
576 vmxoff
577.the_end:
578 ret
579
580%ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
581ALIGNCODE(16)
582BITS 64
583.sixtyfourbit_mode:
584 vmxoff
585 jmp far [.fpret wrt rip]
586.fpret: ; 16:32 Pointer to .the_end.
587 dd .the_end, NAME(SUPR0AbsKernelCS)
588BITS 32
589%endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL
590ENDPROC VMXDisable
591
592
593;/**
594; * Executes VMCLEAR
595; *
596; * @returns VBox status code
597; * @param HCPhysVMCS Physical address of VM control structure
598; */
599;DECLASM(int) VMXClearVMCS(RTHCPHYS HCPhysVMCS);
600ALIGNCODE(16)
601BEGINPROC VMXClearVMCS
602%ifdef RT_ARCH_AMD64
603 xor rax, rax
604 %ifdef ASM_CALL64_GCC
605 push rdi
606 %else
607 push rcx
608 %endif
609 vmclear [rsp]
610%else ; RT_ARCH_X86
611 %ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
612 cmp byte [NAME(g_fVMXIs64bitHost)], 0
613 jz .legacy_mode
614 db 0xea ; jmp far .sixtyfourbit_mode
615 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
616.legacy_mode:
617 %endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL
618 xor eax, eax
619 vmclear [esp + 4]
620%endif ; RT_ARCH_X86
621 jnc .the_end
622 mov eax, VERR_VMX_INVALID_VMCS_PTR
623.the_end:
624%ifdef RT_ARCH_AMD64
625 add rsp, 8
626%endif
627 ret
628
629%ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
630ALIGNCODE(16)
631BITS 64
632.sixtyfourbit_mode:
633 lea rdx, [rsp + 4] ; &HCPhysVMCS
634 and edx, 0ffffffffh
635 xor eax, eax
636 vmclear [rdx]
637 mov r9d, VERR_VMX_INVALID_VMCS_PTR
638 cmovc eax, r9d
639 jmp far [.fpret wrt rip]
640.fpret: ; 16:32 Pointer to .the_end.
641 dd .the_end, NAME(SUPR0AbsKernelCS)
642BITS 32
643%endif
644ENDPROC VMXClearVMCS
645
646
647;/**
648; * Executes VMPTRLD
649; *
650; * @returns VBox status code
651; * @param HCPhysVMCS Physical address of VMCS structure
652; */
653;DECLASM(int) VMXActivateVMCS(RTHCPHYS HCPhysVMCS);
654ALIGNCODE(16)
655BEGINPROC VMXActivateVMCS
656%ifdef RT_ARCH_AMD64
657 xor rax, rax
658 %ifdef ASM_CALL64_GCC
659 push rdi
660 %else
661 push rcx
662 %endif
663 vmptrld [rsp]
664%else
665 %ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
666 cmp byte [NAME(g_fVMXIs64bitHost)], 0
667 jz .legacy_mode
668 db 0xea ; jmp far .sixtyfourbit_mode
669 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
670.legacy_mode:
671 %endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL
672 xor eax, eax
673 vmptrld [esp + 4]
674%endif
675 jnc .the_end
676 mov eax, VERR_VMX_INVALID_VMCS_PTR
677.the_end:
678%ifdef RT_ARCH_AMD64
679 add rsp, 8
680%endif
681 ret
682
683%ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
684ALIGNCODE(16)
685BITS 64
686.sixtyfourbit_mode:
687 lea rdx, [rsp + 4] ; &HCPhysVMCS
688 and edx, 0ffffffffh
689 xor eax, eax
690 vmptrld [rdx]
691 mov r9d, VERR_VMX_INVALID_VMCS_PTR
692 cmovc eax, r9d
693 jmp far [.fpret wrt rip]
694.fpret: ; 16:32 Pointer to .the_end.
695 dd .the_end, NAME(SUPR0AbsKernelCS)
696BITS 32
697%endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL
698ENDPROC VMXActivateVMCS
699
700
701;/**
702; * Executes VMPTRST
703; *
704; * @returns VBox status code
705; * @param [esp + 04h] gcc:rdi msc:rcx Param 1 - First parameter - Address that will receive the current pointer
706; */
707;DECLASM(int) VMXGetActivateVMCS(RTHCPHYS *pVMCS);
708BEGINPROC VMXGetActivateVMCS
709%ifdef RT_OS_OS2
710 mov eax, VERR_NOT_SUPPORTED
711 ret
712%else
713 %ifdef RT_ARCH_AMD64
714 %ifdef ASM_CALL64_GCC
715 vmptrst qword [rdi]
716 %else
717 vmptrst qword [rcx]
718 %endif
719 %else
720 %ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
721 cmp byte [NAME(g_fVMXIs64bitHost)], 0
722 jz .legacy_mode
723 db 0xea ; jmp far .sixtyfourbit_mode
724 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
725.legacy_mode:
726 %endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL
727 vmptrst qword [esp+04h]
728 %endif
729 xor eax, eax
730.the_end:
731 ret
732
733 %ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
734ALIGNCODE(16)
735BITS 64
736.sixtyfourbit_mode:
737 lea rdx, [rsp + 4] ; &HCPhysVMCS
738 and edx, 0ffffffffh
739 vmptrst qword [rdx]
740 xor eax, eax
741 jmp far [.fpret wrt rip]
742.fpret: ; 16:32 Pointer to .the_end.
743 dd .the_end, NAME(SUPR0AbsKernelCS)
744BITS 32
745 %endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL
746%endif
747ENDPROC VMXGetActivateVMCS
748
749;/**
750; * Invalidate a page using invept
751; @param enmFlush msc:ecx gcc:edi x86:[esp+04] Type of flush
752; @param pDescriptor msc:edx gcc:esi x86:[esp+08] Descriptor pointer
753; */
754;DECLASM(int) VMXR0InvEPT(VMX_FLUSH enmFlush, uint64_t *pDescriptor);
755BEGINPROC VMXR0InvEPT
756%ifdef RT_ARCH_AMD64
757 %ifdef ASM_CALL64_GCC
758 and edi, 0ffffffffh
759 xor rax, rax
760; invept rdi, qword [rsi]
761 DB 0x66, 0x0F, 0x38, 0x80, 0x3E
762 %else
763 and ecx, 0ffffffffh
764 xor rax, rax
765; invept rcx, qword [rdx]
766 DB 0x66, 0x0F, 0x38, 0x80, 0xA
767 %endif
768%else
769 %ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
770 cmp byte [NAME(g_fVMXIs64bitHost)], 0
771 jz .legacy_mode
772 db 0xea ; jmp far .sixtyfourbit_mode
773 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
774.legacy_mode:
775 %endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL
776 mov eax, [esp + 4]
777 mov ecx, [esp + 8]
778; invept eax, qword [ecx]
779 DB 0x66, 0x0F, 0x38, 0x80, 0x1
780%endif
781 jnc .valid_vmcs
782 mov eax, VERR_VMX_INVALID_VMCS_PTR
783 ret
784.valid_vmcs:
785 jnz .the_end
786 mov eax, VERR_INVALID_PARAMETER
787.the_end:
788 ret
789
790%ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
791ALIGNCODE(16)
792BITS 64
793.sixtyfourbit_mode:
794 and esp, 0ffffffffh
795 mov ecx, [rsp + 4] ; enmFlush
796 mov edx, [rsp + 8] ; pDescriptor
797 xor eax, eax
798; invept rcx, qword [rdx]
799 DB 0x66, 0x0F, 0x38, 0x80, 0xA
800 mov r8d, VERR_INVALID_PARAMETER
801 cmovz eax, r8d
802 mov r9d, VERR_VMX_INVALID_VMCS_PTR
803 cmovc eax, r9d
804 jmp far [.fpret wrt rip]
805.fpret: ; 16:32 Pointer to .the_end.
806 dd .the_end, NAME(SUPR0AbsKernelCS)
807BITS 32
808%endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL
809ENDPROC VMXR0InvEPT
810
811
812;/**
813; * Invalidate a page using invvpid
814; @param enmFlush msc:ecx gcc:edi x86:[esp+04] Type of flush
815; @param pDescriptor msc:edx gcc:esi x86:[esp+08] Descriptor pointer
816; */
817;DECLASM(int) VMXR0InvVPID(VMX_FLUSH enmFlush, uint64_t *pDescriptor);
818BEGINPROC VMXR0InvVPID
819%ifdef RT_ARCH_AMD64
820 %ifdef ASM_CALL64_GCC
821 and edi, 0ffffffffh
822 xor rax, rax
823 ;invvpid rdi, qword [rsi]
824 DB 0x66, 0x0F, 0x38, 0x81, 0x3E
825 %else
826 and ecx, 0ffffffffh
827 xor rax, rax
828; invvpid rcx, qword [rdx]
829 DB 0x66, 0x0F, 0x38, 0x81, 0xA
830 %endif
831%else
832 %ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
833 cmp byte [NAME(g_fVMXIs64bitHost)], 0
834 jz .legacy_mode
835 db 0xea ; jmp far .sixtyfourbit_mode
836 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
837.legacy_mode:
838 %endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL
839 mov eax, [esp + 4]
840 mov ecx, [esp + 8]
841; invept eax, qword [ecx]
842 DB 0x66, 0x0F, 0x38, 0x81, 0x1
843%endif
844 jnc .valid_vmcs
845 mov eax, VERR_VMX_INVALID_VMCS_PTR
846 ret
847.valid_vmcs:
848 jnz .the_end
849 mov eax, VERR_INVALID_PARAMETER
850.the_end:
851 ret
852
853%ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
854ALIGNCODE(16)
855BITS 64
856.sixtyfourbit_mode:
857 and esp, 0ffffffffh
858 mov ecx, [rsp + 4] ; enmFlush
859 mov edx, [rsp + 8] ; pDescriptor
860 xor eax, eax
861; invvpid rcx, qword [rdx]
862 DB 0x66, 0x0F, 0x38, 0x81, 0xA
863 mov r8d, VERR_INVALID_PARAMETER
864 cmovz eax, r8d
865 mov r9d, VERR_VMX_INVALID_VMCS_PTR
866 cmovc eax, r9d
867 jmp far [.fpret wrt rip]
868.fpret: ; 16:32 Pointer to .the_end.
869 dd .the_end, NAME(SUPR0AbsKernelCS)
870BITS 32
871%endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL
872ENDPROC VMXR0InvVPID
873
874
875%if GC_ARCH_BITS == 64
876;;
877; Executes INVLPGA
878;
879; @param pPageGC msc:rcx gcc:rdi x86:[esp+04] Virtual page to invalidate
880; @param uASID msc:rdx gcc:rsi x86:[esp+0C] Tagged TLB id
881;
882;DECLASM(void) SVMR0InvlpgA(RTGCPTR pPageGC, uint32_t uASID);
883BEGINPROC SVMR0InvlpgA
884%ifdef RT_ARCH_AMD64
885 %ifdef ASM_CALL64_GCC
886 mov rax, rdi
887 mov rcx, rsi
888 %else
889 ; from http://www.cs.cmu.edu/~fp/courses/15213-s06/misc/asm64-handout.pdf:
890 ; ``Perhaps unexpectedly, instructions that move or generate 32-bit register
891 ; values also set the upper 32 bits of the register to zero. Consequently
892 ; there is no need for an instruction movzlq.''
893 mov eax, ecx
894 mov rcx, rdx
895 %endif
896%else
897 mov eax, [esp + 4]
898 mov ecx, [esp + 0Ch]
899%endif
900 invlpga [xAX], ecx
901 ret
902ENDPROC SVMR0InvlpgA
903
904%else ; GC_ARCH_BITS != 64
905;;
906; Executes INVLPGA
907;
908; @param pPageGC msc:ecx gcc:edi x86:[esp+04] Virtual page to invalidate
909; @param uASID msc:edx gcc:esi x86:[esp+08] Tagged TLB id
910;
911;DECLASM(void) SVMR0InvlpgA(RTGCPTR pPageGC, uint32_t uASID);
912BEGINPROC SVMR0InvlpgA
913%ifdef RT_ARCH_AMD64
914 %ifdef ASM_CALL64_GCC
915 movzx rax, edi
916 mov ecx, esi
917 %else
918 ; from http://www.cs.cmu.edu/~fp/courses/15213-s06/misc/asm64-handout.pdf:
919 ; ``Perhaps unexpectedly, instructions that move or generate 32-bit register
920 ; values also set the upper 32 bits of the register to zero. Consequently
921 ; there is no need for an instruction movzlq.''
922 mov eax, ecx
923 mov ecx, edx
924 %endif
925%else
926 mov eax, [esp + 4]
927 mov ecx, [esp + 8]
928%endif
929 invlpga [xAX], ecx
930 ret
931ENDPROC SVMR0InvlpgA
932
933%endif ; GC_ARCH_BITS != 64
934
935%ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
936
937;/**
938; * Gets 64-bit GDTR and IDTR on darwin.
939; * @param pGdtr Where to store the 64-bit GDTR.
940; * @param pIdtr Where to store the 64-bit IDTR.
941; */
942;DECLASM(void) hwaccmR0Get64bitGDTRandIDTR(PX86XDTR64 pGdtr, PX86XDTR64 pIdtr);
943ALIGNCODE(16)
944BEGINPROC hwaccmR0Get64bitGDTRandIDTR
945 db 0xea ; jmp far .sixtyfourbit_mode
946 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
947.the_end:
948 ret
949
950ALIGNCODE(16)
951BITS 64
952.sixtyfourbit_mode:
953 and esp, 0ffffffffh
954 mov ecx, [rsp + 4] ; pGdtr
955 mov edx, [rsp + 8] ; pIdtr
956 sgdt [rcx]
957 sidt [rdx]
958 jmp far [.fpret wrt rip]
959.fpret: ; 16:32 Pointer to .the_end.
960 dd .the_end, NAME(SUPR0AbsKernelCS)
961BITS 32
962ENDPROC hwaccmR0Get64bitGDTRandIDTR
963
964
965;/**
966; * Gets 64-bit CR3 on darwin.
967; * @returns CR3
968; */
969;DECLASM(uint64_t) hwaccmR0Get64bitCR3(void);
970ALIGNCODE(16)
971BEGINPROC hwaccmR0Get64bitCR3
972 db 0xea ; jmp far .sixtyfourbit_mode
973 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
974.the_end:
975 ret
976
977ALIGNCODE(16)
978BITS 64
979.sixtyfourbit_mode:
980 mov rax, cr3
981 mov rdx, rax
982 shr rdx, 32
983 jmp far [.fpret wrt rip]
984.fpret: ; 16:32 Pointer to .the_end.
985 dd .the_end, NAME(SUPR0AbsKernelCS)
986BITS 32
987ENDPROC hwaccmR0Get64bitCR3
988
989%endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL
990
991
992
993;
994; The default setup of the StartVM routines.
995;
996%ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
997 %define MY_NAME(name) name %+ _32
998%else
999 %define MY_NAME(name) name
1000%endif
1001%ifdef RT_ARCH_AMD64
1002 %define MYPUSHAD MYPUSHAD64
1003 %define MYPOPAD MYPOPAD64
1004 %define MYPUSHSEGS MYPUSHSEGS64
1005 %define MYPOPSEGS MYPOPSEGS64
1006%else
1007 %define MYPUSHAD MYPUSHAD32
1008 %define MYPOPAD MYPOPAD32
1009 %define MYPUSHSEGS MYPUSHSEGS32
1010 %define MYPOPSEGS MYPOPSEGS32
1011%endif
1012
1013%include "HWACCMR0Mixed.mac"
1014
1015
1016%ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
1017 ;
1018 ; Write the wrapper procedures.
1019 ;
1020 ; These routines are probably being too paranoid about selector
1021 ; restoring, but better safe than sorry...
1022 ;
1023
1024; DECLASM(int) VMXR0StartVM32(RTHCUINT fResume, PCPUMCTX pCtx);
1025ALIGNCODE(16)
1026BEGINPROC VMXR0StartVM32
1027 cmp byte [NAME(g_fVMXIs64bitHost)], 0
1028 je near NAME(VMXR0StartVM32_32)
1029
1030 ; stack frame.
1031 push esi
1032 push edi
1033 push fs
1034 push gs
1035
1036 ; jmp far .thunk64
1037 db 0xea
1038 dd .thunk64, NAME(SUPR0Abs64bitKernelCS)
1039
1040ALIGNCODE(16)
1041BITS 64
1042.thunk64:
1043 sub esp, 20h
1044 mov edi, [rsp + 20h + 14h] ; fResume
1045 mov esi, [rsp + 20h + 18h] ; pCtx
1046 call NAME(VMXR0StartVM32_64)
1047 add esp, 20h
1048 jmp far [.fpthunk32 wrt rip]
1049.fpthunk32: ; 16:32 Pointer to .thunk32.
1050 dd .thunk32, NAME(SUPR0AbsKernelCS)
1051
1052BITS 32
1053ALIGNCODE(16)
1054.thunk32:
1055 pop gs
1056 pop fs
1057 pop edi
1058 pop esi
1059 ret
1060ENDPROC VMXR0StartVM32
1061
1062; DECLASM(int) VMXR0StartVM64(RTHCUINT fResume, PCPUMCTX pCtx)
1063ALIGNCODE(16)
1064BEGINPROC VMXR0StartVM64
1065 cmp byte [NAME(g_fVMXIs64bitHost)], 0
1066 jne .longmode
1067 mov eax, VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE
1068 ret
1069
1070.longmode:
1071 ; stack frame.
1072 push ebp
1073 mov ebp, esp
1074 and esp, 0fffffff0h
1075 push esi
1076 push edi
1077 push ebx
1078 push ds
1079 push es
1080 push fs
1081 push gs
1082 push ss
1083
1084 ; retf frame (64 -> 32).
1085 push 0
1086 push cs
1087 push 0
1088 push .thunk32
1089
1090 ; jmp far .thunk64
1091 db 0xea
1092 dd .thunk64, NAME(SUPR0Abs64bitKernelCS)
1093BITS 64
1094.thunk64:
1095 and esp, 0ffffffffh
1096 and ebp, 0ffffffffh
1097 mov edi, [rbp + 8] ; fResume
1098 mov esi, [rbp + 12] ; pCtx
1099 sub rsp, 20h
1100 call NAME(VMXR0StartVM64_64)
1101 add rsp, 20h
1102 retf
1103BITS 32
1104.thunk32:
1105 pop ss
1106 pop gs
1107 pop fs
1108 pop es
1109 pop ds
1110 pop ebx
1111 pop edi
1112 pop esi
1113 leave
1114 ret
1115ENDPROC VMXR0StartVM64
1116
1117;DECLASM(int) SVMR0VMRun(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu);
1118ALIGNCODE(16)
1119BEGINPROC SVMR0VMRun
1120 cmp byte [NAME(g_fVMXIs64bitHost)], 0
1121 je near NAME(SVMR0VMRun_32)
1122
1123 ; stack frame.
1124 push ebp
1125 mov ebp, esp
1126 and esp, 0fffffff0h
1127 push esi
1128 push edi
1129 push ebx
1130 push ds
1131 push es
1132 push fs
1133 push gs
1134 push ss
1135
1136 ; retf frame (64 -> 32).
1137 push 0
1138 push cs
1139 push 0
1140 push .thunk32
1141
1142 ; jmp far .thunk64
1143 db 0xea
1144 dd .thunk64, NAME(SUPR0Abs64bitKernelCS)
1145BITS 64
1146.thunk64:
1147 and esp, 0ffffffffh
1148 and ebp, 0ffffffffh
1149 mov rdi, [rbp + 8] ; pVMCBHostPhys
1150 mov rsi, [rbp + 16] ; pVMCBPhys
1151 mov edx, [rbp + 24] ; pCtx
1152 sub rsp, 20h
1153 call NAME(SVMR0VMRun_64)
1154 add rsp, 20h
1155 retf
1156BITS 32
1157.thunk32:
1158 pop ss
1159 pop gs
1160 pop fs
1161 pop es
1162 pop ds
1163 pop ebx
1164 pop edi
1165 pop esi
1166 leave
1167 ret
1168ENDPROC SVMR0VMRun
1169
1170; DECLASM(int) SVMR0VMRun64(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu);
1171ALIGNCODE(16)
1172BEGINPROC SVMR0VMRun64
1173 cmp byte [NAME(g_fVMXIs64bitHost)], 0
1174 jne .longmode
1175 mov eax, VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE
1176 ret
1177
1178.longmode:
1179 ; stack frame.
1180 push ebp
1181 mov ebp, esp
1182 and esp, 0fffffff0h
1183 push esi
1184 push edi
1185 push ebx
1186 push ds
1187 push es
1188 push fs
1189 push gs
1190 push ss
1191
1192 ; retf frame (64 -> 32).
1193 push 0
1194 push cs
1195 push 0
1196 push .thunk32
1197
1198 ; jmp far .thunk64
1199 db 0xea
1200 dd .thunk64, NAME(SUPR0Abs64bitKernelCS)
1201BITS 64
1202.thunk64:
1203 and esp, 0ffffffffh
1204 and ebp, 0ffffffffh
1205 mov rdi, [rbp + 8] ; pVMCBHostPhys
1206 mov rsi, [rbp + 16] ; pVMCBPhys
1207 mov edx, [rbp + 24] ; pCtx
1208 sub rsp, 20h
1209 call NAME(SVMR0VMRun64_64)
1210 add rsp, 20h
1211 retf
1212BITS 32
1213.thunk32:
1214 pop ss
1215 pop gs
1216 pop fs
1217 pop es
1218 pop ds
1219 pop ebx
1220 pop edi
1221 pop esi
1222 leave
1223 ret
1224ENDPROC SVMR0VMRun64
1225
1226 ;
1227 ; Do it a second time pretending we're a 64-bit host.
1228 ;
1229 ; This *HAS* to be done at the very end of the file to avoid restoring
1230 ; macros. So, add new code *BEFORE* this mess.
1231 ;
1232 BITS 64
1233 %undef RT_ARCH_X86
1234 %define RT_ARCH_AMD64
1235 %undef ASM_CALL64_MSC
1236 %define ASM_CALL64_GCC
1237 %define xS 8
1238 %define xSP rsp
1239 %define xBP rbp
1240 %define xAX rax
1241 %define xBX rbx
1242 %define xCX rcx
1243 %define xDX rdx
1244 %define xDI rdi
1245 %define xSI rsi
1246 %define MY_NAME(name) name %+ _64
1247 %define MYPUSHAD MYPUSHAD64
1248 %define MYPOPAD MYPOPAD64
1249 %define MYPUSHSEGS MYPUSHSEGS64
1250 %define MYPOPSEGS MYPOPSEGS64
1251
1252 %include "HWACCMR0Mixed.mac"
1253%endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette