VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HMR0A.asm@ 47472

Last change on this file since 47472 was 47439, checked in by vboxsync, 11 years ago

Too lazy to do: pushfq; cli; ... popfq ?

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 44.2 KB
Line 
1; $Id: HMR0A.asm 47439 2013-07-27 20:03:21Z vboxsync $
2;; @file
3; VMXM - R0 vmx helpers
4;
5
6;
7; Copyright (C) 2006-2013 Oracle Corporation
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.virtualbox.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License (GPL) as published by the Free Software
13; Foundation, in version 2 as it comes in the "COPYING" file of the
14; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17
18;*******************************************************************************
19;* Header Files *
20;*******************************************************************************
21%include "VBox/asmdefs.mac"
22%include "VBox/err.mac"
23%include "VBox/vmm/hm_vmx.mac"
24%include "VBox/vmm/cpum.mac"
25%include "iprt/x86.mac"
26%include "HMInternal.mac"
27
28%ifdef RT_OS_OS2 ;; @todo fix OMF support in yasm and kick nasm out completely.
29 %macro vmwrite 2,
30 int3
31 %endmacro
32 %define vmlaunch int3
33 %define vmresume int3
34 %define vmsave int3
35 %define vmload int3
36 %define vmrun int3
37 %define clgi int3
38 %define stgi int3
39 %macro invlpga 2,
40 int3
41 %endmacro
42%endif
43
44;*******************************************************************************
45;* Defined Constants And Macros *
46;*******************************************************************************
47%ifdef RT_ARCH_AMD64
48 %define MAYBE_64_BIT
49%endif
50%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
51 %define MAYBE_64_BIT
52%else
53 %ifdef RT_OS_DARWIN
54 %ifdef RT_ARCH_AMD64
55 ;;
56 ; Load the NULL selector into DS, ES, FS and GS on 64-bit darwin so we don't
57 ; risk loading a stale LDT value or something invalid.
58 %define HM_64_BIT_USE_NULL_SEL
59 %endif
60 %endif
61%endif
62
63%ifndef VBOX_WITH_OLD_VTX_CODE
64 %ifdef RT_ARCH_AMD64
65 %define VBOX_SKIP_RESTORE_SEG
66 %endif
67%endif
68
69;; The offset of the XMM registers in X86FXSTATE.
70; Use define because I'm too lazy to convert the struct.
71%define XMM_OFF_IN_X86FXSTATE 160
72
73;; @def MYPUSHAD
74; Macro generating an equivalent to pushad
75
76;; @def MYPOPAD
77; Macro generating an equivalent to popad
78
79;; @def MYPUSHSEGS
80; Macro saving all segment registers on the stack.
81; @param 1 full width register name
82; @param 2 16-bit register name for \a 1.
83
84;; @def MYPOPSEGS
85; Macro restoring all segment registers on the stack
86; @param 1 full width register name
87; @param 2 16-bit register name for \a 1.
88
89%ifdef MAYBE_64_BIT
90 ; Save a host and load the corresponding guest MSR (trashes rdx & rcx)
91 %macro LOADGUESTMSR 2
92 mov rcx, %1
93 rdmsr
94 push rdx
95 push rax
96 mov edx, dword [xSI + %2 + 4]
97 mov eax, dword [xSI + %2]
98 wrmsr
99 %endmacro
100
101 ; Save a guest and load the corresponding host MSR (trashes rdx & rcx)
102 ; Only really useful for gs kernel base as that one can be changed behind our back (swapgs)
103 %macro LOADHOSTMSREX 2
104 mov rcx, %1
105 rdmsr
106 mov dword [xSI + %2], eax
107 mov dword [xSI + %2 + 4], edx
108 pop rax
109 pop rdx
110 wrmsr
111 %endmacro
112
113 ; Load the corresponding host MSR (trashes rdx & rcx)
114 %macro LOADHOSTMSR 1
115 mov rcx, %1
116 pop rax
117 pop rdx
118 wrmsr
119 %endmacro
120%endif
121
122%ifdef ASM_CALL64_GCC
123 %macro MYPUSHAD64 0
124 push r15
125 push r14
126 push r13
127 push r12
128 push rbx
129 %endmacro
130 %macro MYPOPAD64 0
131 pop rbx
132 pop r12
133 pop r13
134 pop r14
135 pop r15
136 %endmacro
137
138%else ; ASM_CALL64_MSC
139 %macro MYPUSHAD64 0
140 push r15
141 push r14
142 push r13
143 push r12
144 push rbx
145 push rsi
146 push rdi
147 %endmacro
148 %macro MYPOPAD64 0
149 pop rdi
150 pop rsi
151 pop rbx
152 pop r12
153 pop r13
154 pop r14
155 pop r15
156 %endmacro
157%endif
158
159%ifdef VBOX_SKIP_RESTORE_SEG
160%macro MYPUSHSEGS64 2
161%endmacro
162
163%macro MYPOPSEGS64 2
164%endmacro
165%else ; !VBOX_SKIP_RESTORE_SEG
166; trashes, rax, rdx & rcx
167%macro MYPUSHSEGS64 2
168 %ifndef HM_64_BIT_USE_NULL_SEL
169 mov %2, es
170 push %1
171 mov %2, ds
172 push %1
173 %endif
174
175 ; Special case for FS; Windows and Linux either don't use it or restore it when leaving kernel mode, Solaris OTOH doesn't and we must save it.
176 mov ecx, MSR_K8_FS_BASE
177 rdmsr
178 push rdx
179 push rax
180 %ifndef HM_64_BIT_USE_NULL_SEL
181 push fs
182 %endif
183
184 ; Special case for GS; OSes typically use swapgs to reset the hidden base register for GS on entry into the kernel. The same happens on exit
185 mov ecx, MSR_K8_GS_BASE
186 rdmsr
187 push rdx
188 push rax
189 %ifndef HM_64_BIT_USE_NULL_SEL
190 push gs
191 %endif
192%endmacro
193
194; trashes, rax, rdx & rcx
195%macro MYPOPSEGS64 2
196 ; Note: do not step through this code with a debugger!
197 %ifndef HM_64_BIT_USE_NULL_SEL
198 xor eax, eax
199 mov ds, ax
200 mov es, ax
201 mov fs, ax
202 mov gs, ax
203 %endif
204
205 %ifndef HM_64_BIT_USE_NULL_SEL
206 pop gs
207 %endif
208 pop rax
209 pop rdx
210 mov ecx, MSR_K8_GS_BASE
211 wrmsr
212
213 %ifndef HM_64_BIT_USE_NULL_SEL
214 pop fs
215 %endif
216 pop rax
217 pop rdx
218 mov ecx, MSR_K8_FS_BASE
219 wrmsr
220 ; Now it's safe to step again
221
222 %ifndef HM_64_BIT_USE_NULL_SEL
223 pop %1
224 mov ds, %2
225 pop %1
226 mov es, %2
227 %endif
228%endmacro
229%endif ; VBOX_SKIP_RESTORE_SEG
230
231%macro MYPUSHAD32 0
232 pushad
233%endmacro
234%macro MYPOPAD32 0
235 popad
236%endmacro
237
238%macro MYPUSHSEGS32 2
239 push ds
240 push es
241 push fs
242 push gs
243%endmacro
244%macro MYPOPSEGS32 2
245 pop gs
246 pop fs
247 pop es
248 pop ds
249%endmacro
250
251
252;*******************************************************************************
253;* External Symbols *
254;*******************************************************************************
255%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
256extern NAME(SUPR0AbsIs64bit)
257extern NAME(SUPR0Abs64bitKernelCS)
258extern NAME(SUPR0Abs64bitKernelSS)
259extern NAME(SUPR0Abs64bitKernelDS)
260extern NAME(SUPR0AbsKernelCS)
261%endif
262%ifdef VBOX_WITH_KERNEL_USING_XMM
263extern NAME(CPUMIsGuestFPUStateActive)
264%endif
265
266
267;*******************************************************************************
268;* Global Variables *
269;*******************************************************************************
270%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
271BEGINDATA
272;;
273; Store the SUPR0AbsIs64bit absolute value here so we can cmp/test without
274; needing to clobber a register. (This trick doesn't quite work for PE btw.
275; but that's not relevant atm.)
276GLOBALNAME g_fVMXIs64bitHost
277 dd NAME(SUPR0AbsIs64bit)
278%endif
279
280
281BEGINCODE
282
283
284;/**
285; * Restores host-state fields.
286; *
287; * @returns VBox status code
288; * @param f32RestoreHost x86: [ebp + 08h] msc: ecx gcc: edi RestoreHost flags.
289; * @param pRestoreHost x86: [ebp + 0ch] msc: rdx gcc: rsi Pointer to the RestoreHost struct.
290; */
291ALIGNCODE(16)
292BEGINPROC VMXRestoreHostState
293%ifdef RT_ARCH_AMD64
294 %ifndef ASM_CALL64_GCC
295 ; Use GCC's input registers since we'll be needing both rcx and rdx further
296 ; down with the wrmsr instruction. Use the R10 and R11 register for saving
297 ; RDI and RSI since MSC preserve the two latter registers.
298 mov r10, rdi
299 mov r11, rsi
300 mov rdi, rcx
301 mov rsi, rdx
302 %endif
303
304 test edi, VMX_RESTORE_HOST_GDTR
305 jz .test_idtr
306 lgdt [rsi + VMXRESTOREHOST.HostGdtr]
307
308.test_idtr:
309 test edi, VMX_RESTORE_HOST_IDTR
310 jz .test_ds
311 lidt [rsi + VMXRESTOREHOST.HostIdtr]
312
313.test_ds:
314 test edi, VMX_RESTORE_HOST_SEL_DS
315 jz .test_es
316 mov ax, [rsi + VMXRESTOREHOST.uHostSelDS]
317 mov ds, ax
318
319.test_es:
320 test edi, VMX_RESTORE_HOST_SEL_ES
321 jz .test_tr
322 mov ax, [rsi + VMXRESTOREHOST.uHostSelES]
323 mov es, ax
324
325.test_tr:
326 test edi, VMX_RESTORE_HOST_SEL_TR
327 jz .test_fs
328 ; When restoring the TR, we must first clear the busy flag or we'll end up faulting.
329 mov dx, [rsi + VMXRESTOREHOST.uHostSelTR]
330 mov ax, dx
331 and eax, X86_SEL_MASK_OFF_RPL ; Mask away TI and RPL bits leaving only the descriptor offset.
332 add rax, qword [rsi + VMXRESTOREHOST.HostGdtr + 2] ; xAX <- descriptor offset + GDTR.pGdt.
333 and dword [rax + 4], ~RT_BIT(9) ; Clear the busy flag in TSS desc (bits 0-7=base, bit 9=busy bit).
334 ltr dx
335
336.test_fs:
337 ;
338 ; When restoring the selector values for FS and GS, we'll temporarily trash
339 ; the base address (at least the high 32-bit bits, but quite possibly the
340 ; whole base address), the wrmsr will restore it correctly. (VT-x actually
341 ; restores the base correctly when leaving guest mode, but not the selector
342 ; value, so there is little problem with interrupts being enabled prior to
343 ; this restore job.)
344 ; We'll disable ints once for both FS and GS as that's probably faster.
345 ;
346 test edi, VMX_RESTORE_HOST_SEL_FS | VMX_RESTORE_HOST_SEL_GS
347 jz .restore_success
348 pushfq
349 cli ; (see above)
350
351 test edi, VMX_RESTORE_HOST_SEL_FS
352 jz .test_gs
353 mov ax, word [rsi + VMXRESTOREHOST.uHostSelFS]
354 mov fs, ax
355 mov eax, dword [rsi + VMXRESTOREHOST.uHostFSBase] ; uHostFSBase - Lo
356 mov edx, dword [rsi + VMXRESTOREHOST.uHostFSBase + 4h] ; uHostFSBase - Hi
357 mov ecx, MSR_K8_FS_BASE
358 wrmsr
359
360 test edi, VMX_RESTORE_HOST_SEL_GS
361 jz .restore_flags
362.test_gs:
363 mov ax, word [rsi + VMXRESTOREHOST.uHostSelGS]
364 mov gs, ax
365 mov eax, dword [rsi + VMXRESTOREHOST.uHostGSBase] ; uHostGSBase - Lo
366 mov edx, dword [rsi + VMXRESTOREHOST.uHostGSBase + 4h] ; uHostGSBase - Hi
367 mov ecx, MSR_K8_GS_BASE
368 wrmsr
369
370.restore_flags:
371 popfq
372
373.restore_success:
374 mov eax, VINF_SUCCESS
375 %ifndef ASM_CALL64_GCC
376 ; Restore RDI and RSI on MSC.
377 mov rdi, r10
378 mov rsi, r11
379 %endif
380%else ; RT_ARCH_X86
381 mov eax, VERR_NOT_IMPLEMENTED
382%endif
383 ret
384ENDPROC VMXRestoreHostState
385
386
387;/**
388; * Dispatches an NMI to the host.
389; */
390ALIGNCODE(16)
391BEGINPROC VMXDispatchHostNmi
392 int 2 ; NMI is always vector 2. The IDT[2] IRQ handler cannot be anything else. See Intel spec. 6.3.1 "External Interrupts".
393 ret
394ENDPROC VMXDispatchHostNmi
395
396
397;/**
398; * Executes VMWRITE, 64-bit value.
399; *
400; * @returns VBox status code
401; * @param idxField x86: [ebp + 08h] msc: rcx gcc: rdi VMCS index
402; * @param u64Data x86: [ebp + 0ch] msc: rdx gcc: rsi VM field value
403; */
404ALIGNCODE(16)
405BEGINPROC VMXWriteVmcs64
406%ifdef RT_ARCH_AMD64
407 %ifdef ASM_CALL64_GCC
408 and edi, 0ffffffffh
409 xor rax, rax
410 vmwrite rdi, rsi
411 %else
412 and ecx, 0ffffffffh
413 xor rax, rax
414 vmwrite rcx, rdx
415 %endif
416%else ; RT_ARCH_X86
417 mov ecx, [esp + 4] ; idxField
418 lea edx, [esp + 8] ; &u64Data
419 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
420 cmp byte [NAME(g_fVMXIs64bitHost)], 0
421 jz .legacy_mode
422 db 0xea ; jmp far .sixtyfourbit_mode
423 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
424.legacy_mode:
425 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
426 vmwrite ecx, [edx] ; low dword
427 jz .done
428 jc .done
429 inc ecx
430 xor eax, eax
431 vmwrite ecx, [edx + 4] ; high dword
432.done:
433%endif ; RT_ARCH_X86
434 jnc .valid_vmcs
435 mov eax, VERR_VMX_INVALID_VMCS_PTR
436 ret
437.valid_vmcs:
438 jnz .the_end
439 mov eax, VERR_VMX_INVALID_VMCS_FIELD
440.the_end:
441 ret
442
443%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
444ALIGNCODE(16)
445BITS 64
446.sixtyfourbit_mode:
447 and edx, 0ffffffffh
448 and ecx, 0ffffffffh
449 xor eax, eax
450 vmwrite rcx, [rdx]
451 mov r8d, VERR_VMX_INVALID_VMCS_FIELD
452 cmovz eax, r8d
453 mov r9d, VERR_VMX_INVALID_VMCS_PTR
454 cmovc eax, r9d
455 jmp far [.fpret wrt rip]
456.fpret: ; 16:32 Pointer to .the_end.
457 dd .the_end, NAME(SUPR0AbsKernelCS)
458BITS 32
459%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
460ENDPROC VMXWriteVmcs64
461
462
463;/**
464; * Executes VMREAD, 64-bit value
465; *
466; * @returns VBox status code
467; * @param idxField VMCS index
468; * @param pData Ptr to store VM field value
469; */
470;DECLASM(int) VMXReadVmcs64(uint32_t idxField, uint64_t *pData);
471ALIGNCODE(16)
472BEGINPROC VMXReadVmcs64
473%ifdef RT_ARCH_AMD64
474 %ifdef ASM_CALL64_GCC
475 and edi, 0ffffffffh
476 xor rax, rax
477 vmread [rsi], rdi
478 %else
479 and ecx, 0ffffffffh
480 xor rax, rax
481 vmread [rdx], rcx
482 %endif
483%else ; RT_ARCH_X86
484 mov ecx, [esp + 4] ; idxField
485 mov edx, [esp + 8] ; pData
486 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
487 cmp byte [NAME(g_fVMXIs64bitHost)], 0
488 jz .legacy_mode
489 db 0xea ; jmp far .sixtyfourbit_mode
490 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
491.legacy_mode:
492 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
493 vmread [edx], ecx ; low dword
494 jz .done
495 jc .done
496 inc ecx
497 xor eax, eax
498 vmread [edx + 4], ecx ; high dword
499.done:
500%endif ; RT_ARCH_X86
501 jnc .valid_vmcs
502 mov eax, VERR_VMX_INVALID_VMCS_PTR
503 ret
504.valid_vmcs:
505 jnz .the_end
506 mov eax, VERR_VMX_INVALID_VMCS_FIELD
507.the_end:
508 ret
509
510%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
511ALIGNCODE(16)
512BITS 64
513.sixtyfourbit_mode:
514 and edx, 0ffffffffh
515 and ecx, 0ffffffffh
516 xor eax, eax
517 vmread [rdx], rcx
518 mov r8d, VERR_VMX_INVALID_VMCS_FIELD
519 cmovz eax, r8d
520 mov r9d, VERR_VMX_INVALID_VMCS_PTR
521 cmovc eax, r9d
522 jmp far [.fpret wrt rip]
523.fpret: ; 16:32 Pointer to .the_end.
524 dd .the_end, NAME(SUPR0AbsKernelCS)
525BITS 32
526%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
527ENDPROC VMXReadVmcs64
528
529
530;/**
531; * Executes VMREAD, 32-bit value.
532; *
533; * @returns VBox status code
534; * @param idxField VMCS index
535; * @param pu32Data Ptr to store VM field value
536; */
537;DECLASM(int) VMXReadVmcs32(uint32_t idxField, uint32_t *pu32Data);
538ALIGNCODE(16)
539BEGINPROC VMXReadVmcs32
540%ifdef RT_ARCH_AMD64
541 %ifdef ASM_CALL64_GCC
542 and edi, 0ffffffffh
543 xor rax, rax
544 vmread r10, rdi
545 mov [rsi], r10d
546 %else
547 and ecx, 0ffffffffh
548 xor rax, rax
549 vmread r10, rcx
550 mov [rdx], r10d
551 %endif
552%else ; RT_ARCH_X86
553 mov ecx, [esp + 4] ; idxField
554 mov edx, [esp + 8] ; pu32Data
555 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
556 cmp byte [NAME(g_fVMXIs64bitHost)], 0
557 jz .legacy_mode
558 db 0xea ; jmp far .sixtyfourbit_mode
559 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
560.legacy_mode:
561 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
562 xor eax, eax
563 vmread [edx], ecx
564%endif ; RT_ARCH_X86
565 jnc .valid_vmcs
566 mov eax, VERR_VMX_INVALID_VMCS_PTR
567 ret
568.valid_vmcs:
569 jnz .the_end
570 mov eax, VERR_VMX_INVALID_VMCS_FIELD
571.the_end:
572 ret
573
574%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
575ALIGNCODE(16)
576BITS 64
577.sixtyfourbit_mode:
578 and edx, 0ffffffffh
579 and ecx, 0ffffffffh
580 xor eax, eax
581 vmread r10, rcx
582 mov [rdx], r10d
583 mov r8d, VERR_VMX_INVALID_VMCS_FIELD
584 cmovz eax, r8d
585 mov r9d, VERR_VMX_INVALID_VMCS_PTR
586 cmovc eax, r9d
587 jmp far [.fpret wrt rip]
588.fpret: ; 16:32 Pointer to .the_end.
589 dd .the_end, NAME(SUPR0AbsKernelCS)
590BITS 32
591%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
592ENDPROC VMXReadVmcs32
593
594
595;/**
596; * Executes VMWRITE, 32-bit value.
597; *
598; * @returns VBox status code
599; * @param idxField VMCS index
600; * @param u32Data Ptr to store VM field value
601; */
602;DECLASM(int) VMXWriteVmcs32(uint32_t idxField, uint32_t u32Data);
603ALIGNCODE(16)
604BEGINPROC VMXWriteVmcs32
605%ifdef RT_ARCH_AMD64
606 %ifdef ASM_CALL64_GCC
607 and edi, 0ffffffffh
608 and esi, 0ffffffffh
609 xor rax, rax
610 vmwrite rdi, rsi
611 %else
612 and ecx, 0ffffffffh
613 and edx, 0ffffffffh
614 xor rax, rax
615 vmwrite rcx, rdx
616 %endif
617%else ; RT_ARCH_X86
618 mov ecx, [esp + 4] ; idxField
619 mov edx, [esp + 8] ; u32Data
620 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
621 cmp byte [NAME(g_fVMXIs64bitHost)], 0
622 jz .legacy_mode
623 db 0xea ; jmp far .sixtyfourbit_mode
624 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
625.legacy_mode:
626 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
627 xor eax, eax
628 vmwrite ecx, edx
629%endif ; RT_ARCH_X86
630 jnc .valid_vmcs
631 mov eax, VERR_VMX_INVALID_VMCS_PTR
632 ret
633.valid_vmcs:
634 jnz .the_end
635 mov eax, VERR_VMX_INVALID_VMCS_FIELD
636.the_end:
637 ret
638
639%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
640ALIGNCODE(16)
641BITS 64
642.sixtyfourbit_mode:
643 and edx, 0ffffffffh
644 and ecx, 0ffffffffh
645 xor eax, eax
646 vmwrite rcx, rdx
647 mov r8d, VERR_VMX_INVALID_VMCS_FIELD
648 cmovz eax, r8d
649 mov r9d, VERR_VMX_INVALID_VMCS_PTR
650 cmovc eax, r9d
651 jmp far [.fpret wrt rip]
652.fpret: ; 16:32 Pointer to .the_end.
653 dd .the_end, NAME(SUPR0AbsKernelCS)
654BITS 32
655%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
656ENDPROC VMXWriteVmcs32
657
658
659;/**
660; * Executes VMXON
661; *
662; * @returns VBox status code
663; * @param HCPhysVMXOn Physical address of VMXON structure
664; */
665;DECLASM(int) VMXEnable(RTHCPHYS HCPhysVMXOn);
666BEGINPROC VMXEnable
667%ifdef RT_ARCH_AMD64
668 xor rax, rax
669 %ifdef ASM_CALL64_GCC
670 push rdi
671 %else
672 push rcx
673 %endif
674 vmxon [rsp]
675%else ; RT_ARCH_X86
676 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
677 cmp byte [NAME(g_fVMXIs64bitHost)], 0
678 jz .legacy_mode
679 db 0xea ; jmp far .sixtyfourbit_mode
680 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
681.legacy_mode:
682 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
683 xor eax, eax
684 vmxon [esp + 4]
685%endif ; RT_ARCH_X86
686 jnc .good
687 mov eax, VERR_VMX_INVALID_VMXON_PTR
688 jmp .the_end
689
690.good:
691 jnz .the_end
692 mov eax, VERR_VMX_VMXON_FAILED
693
694.the_end:
695%ifdef RT_ARCH_AMD64
696 add rsp, 8
697%endif
698 ret
699
700%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
701ALIGNCODE(16)
702BITS 64
703.sixtyfourbit_mode:
704 lea rdx, [rsp + 4] ; &HCPhysVMXOn.
705 and edx, 0ffffffffh
706 xor eax, eax
707 vmxon [rdx]
708 mov r8d, VERR_VMX_VMXON_FAILED
709 cmovz eax, r8d
710 mov r9d, VERR_VMX_INVALID_VMXON_PTR
711 cmovc eax, r9d
712 jmp far [.fpret wrt rip]
713.fpret: ; 16:32 Pointer to .the_end.
714 dd .the_end, NAME(SUPR0AbsKernelCS)
715BITS 32
716%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
717ENDPROC VMXEnable
718
719
720;/**
721; * Executes VMXOFF
722; */
723;DECLASM(void) VMXDisable(void);
724BEGINPROC VMXDisable
725%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
726 cmp byte [NAME(g_fVMXIs64bitHost)], 0
727 jz .legacy_mode
728 db 0xea ; jmp far .sixtyfourbit_mode
729 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
730.legacy_mode:
731%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
732 vmxoff
733.the_end:
734 ret
735
736%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
737ALIGNCODE(16)
738BITS 64
739.sixtyfourbit_mode:
740 vmxoff
741 jmp far [.fpret wrt rip]
742.fpret: ; 16:32 Pointer to .the_end.
743 dd .the_end, NAME(SUPR0AbsKernelCS)
744BITS 32
745%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
746ENDPROC VMXDisable
747
748
749;/**
750; * Executes VMCLEAR
751; *
752; * @returns VBox status code
753; * @param HCPhysVmcs Physical address of VM control structure
754; */
755;DECLASM(int) VMXClearVMCS(RTHCPHYS HCPhysVmcs);
756ALIGNCODE(16)
757BEGINPROC VMXClearVMCS
758%ifdef RT_ARCH_AMD64
759 xor rax, rax
760 %ifdef ASM_CALL64_GCC
761 push rdi
762 %else
763 push rcx
764 %endif
765 vmclear [rsp]
766%else ; RT_ARCH_X86
767 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
768 cmp byte [NAME(g_fVMXIs64bitHost)], 0
769 jz .legacy_mode
770 db 0xea ; jmp far .sixtyfourbit_mode
771 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
772.legacy_mode:
773 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
774 xor eax, eax
775 vmclear [esp + 4]
776%endif ; RT_ARCH_X86
777 jnc .the_end
778 mov eax, VERR_VMX_INVALID_VMCS_PTR
779.the_end:
780%ifdef RT_ARCH_AMD64
781 add rsp, 8
782%endif
783 ret
784
785%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
786ALIGNCODE(16)
787BITS 64
788.sixtyfourbit_mode:
789 lea rdx, [rsp + 4] ; &HCPhysVmcs
790 and edx, 0ffffffffh
791 xor eax, eax
792 vmclear [rdx]
793 mov r9d, VERR_VMX_INVALID_VMCS_PTR
794 cmovc eax, r9d
795 jmp far [.fpret wrt rip]
796.fpret: ; 16:32 Pointer to .the_end.
797 dd .the_end, NAME(SUPR0AbsKernelCS)
798BITS 32
799%endif
800ENDPROC VMXClearVMCS
801
802
803;/**
804; * Executes VMPTRLD
805; *
806; * @returns VBox status code
807; * @param HCPhysVmcs Physical address of VMCS structure
808; */
809;DECLASM(int) VMXActivateVMCS(RTHCPHYS HCPhysVmcs);
810ALIGNCODE(16)
811BEGINPROC VMXActivateVMCS
812%ifdef RT_ARCH_AMD64
813 xor rax, rax
814 %ifdef ASM_CALL64_GCC
815 push rdi
816 %else
817 push rcx
818 %endif
819 vmptrld [rsp]
820%else
821 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
822 cmp byte [NAME(g_fVMXIs64bitHost)], 0
823 jz .legacy_mode
824 db 0xea ; jmp far .sixtyfourbit_mode
825 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
826.legacy_mode:
827 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
828 xor eax, eax
829 vmptrld [esp + 4]
830%endif
831 jnc .the_end
832 mov eax, VERR_VMX_INVALID_VMCS_PTR
833.the_end:
834%ifdef RT_ARCH_AMD64
835 add rsp, 8
836%endif
837 ret
838
839%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
840ALIGNCODE(16)
841BITS 64
842.sixtyfourbit_mode:
843 lea rdx, [rsp + 4] ; &HCPhysVmcs
844 and edx, 0ffffffffh
845 xor eax, eax
846 vmptrld [rdx]
847 mov r9d, VERR_VMX_INVALID_VMCS_PTR
848 cmovc eax, r9d
849 jmp far [.fpret wrt rip]
850.fpret: ; 16:32 Pointer to .the_end.
851 dd .the_end, NAME(SUPR0AbsKernelCS)
852BITS 32
853%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
854ENDPROC VMXActivateVMCS
855
856
857;/**
858; * Executes VMPTRST
859; *
860; * @returns VBox status code
861; * @param [esp + 04h] gcc:rdi msc:rcx Param 1 - First parameter - Address that will receive the current pointer
862; */
863;DECLASM(int) VMXGetActivateVMCS(RTHCPHYS *pVMCS);
864BEGINPROC VMXGetActivateVMCS
865%ifdef RT_OS_OS2
866 mov eax, VERR_NOT_SUPPORTED
867 ret
868%else
869 %ifdef RT_ARCH_AMD64
870 %ifdef ASM_CALL64_GCC
871 vmptrst qword [rdi]
872 %else
873 vmptrst qword [rcx]
874 %endif
875 %else
876 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
877 cmp byte [NAME(g_fVMXIs64bitHost)], 0
878 jz .legacy_mode
879 db 0xea ; jmp far .sixtyfourbit_mode
880 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
881.legacy_mode:
882 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
883 vmptrst qword [esp+04h]
884 %endif
885 xor eax, eax
886.the_end:
887 ret
888
889 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
890ALIGNCODE(16)
891BITS 64
892.sixtyfourbit_mode:
893 lea rdx, [rsp + 4] ; &HCPhysVmcs
894 and edx, 0ffffffffh
895 vmptrst qword [rdx]
896 xor eax, eax
897 jmp far [.fpret wrt rip]
898.fpret: ; 16:32 Pointer to .the_end.
899 dd .the_end, NAME(SUPR0AbsKernelCS)
900BITS 32
901 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
902%endif
903ENDPROC VMXGetActivateVMCS
904
905;/**
906; * Invalidate a page using invept
907; @param enmFlush msc:ecx gcc:edi x86:[esp+04] Type of flush
908; @param pDescriptor msc:edx gcc:esi x86:[esp+08] Descriptor pointer
909; */
910;DECLASM(int) VMXR0InvEPT(VMX_FLUSH enmFlush, uint64_t *pDescriptor);
911BEGINPROC VMXR0InvEPT
912%ifdef RT_ARCH_AMD64
913 %ifdef ASM_CALL64_GCC
914 and edi, 0ffffffffh
915 xor rax, rax
916; invept rdi, qword [rsi]
917 DB 0x66, 0x0F, 0x38, 0x80, 0x3E
918 %else
919 and ecx, 0ffffffffh
920 xor rax, rax
921; invept rcx, qword [rdx]
922 DB 0x66, 0x0F, 0x38, 0x80, 0xA
923 %endif
924%else
925 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
926 cmp byte [NAME(g_fVMXIs64bitHost)], 0
927 jz .legacy_mode
928 db 0xea ; jmp far .sixtyfourbit_mode
929 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
930.legacy_mode:
931 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
932 mov ecx, [esp + 4]
933 mov edx, [esp + 8]
934 xor eax, eax
935; invept ecx, qword [edx]
936 DB 0x66, 0x0F, 0x38, 0x80, 0xA
937%endif
938 jnc .valid_vmcs
939 mov eax, VERR_VMX_INVALID_VMCS_PTR
940 ret
941.valid_vmcs:
942 jnz .the_end
943 mov eax, VERR_INVALID_PARAMETER
944.the_end:
945 ret
946
947%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
948ALIGNCODE(16)
949BITS 64
950.sixtyfourbit_mode:
951 and esp, 0ffffffffh
952 mov ecx, [rsp + 4] ; enmFlush
953 mov edx, [rsp + 8] ; pDescriptor
954 xor eax, eax
955; invept rcx, qword [rdx]
956 DB 0x66, 0x0F, 0x38, 0x80, 0xA
957 mov r8d, VERR_INVALID_PARAMETER
958 cmovz eax, r8d
959 mov r9d, VERR_VMX_INVALID_VMCS_PTR
960 cmovc eax, r9d
961 jmp far [.fpret wrt rip]
962.fpret: ; 16:32 Pointer to .the_end.
963 dd .the_end, NAME(SUPR0AbsKernelCS)
964BITS 32
965%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
966ENDPROC VMXR0InvEPT
967
968
969;/**
970; * Invalidate a page using invvpid
971; @param enmFlush msc:ecx gcc:edi x86:[esp+04] Type of flush
972; @param pDescriptor msc:edx gcc:esi x86:[esp+08] Descriptor pointer
973; */
974;DECLASM(int) VMXR0InvVPID(VMX_FLUSH enmFlush, uint64_t *pDescriptor);
975BEGINPROC VMXR0InvVPID
976%ifdef RT_ARCH_AMD64
977 %ifdef ASM_CALL64_GCC
978 and edi, 0ffffffffh
979 xor rax, rax
980; invvpid rdi, qword [rsi]
981 DB 0x66, 0x0F, 0x38, 0x81, 0x3E
982 %else
983 and ecx, 0ffffffffh
984 xor rax, rax
985; invvpid rcx, qword [rdx]
986 DB 0x66, 0x0F, 0x38, 0x81, 0xA
987 %endif
988%else
989 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
990 cmp byte [NAME(g_fVMXIs64bitHost)], 0
991 jz .legacy_mode
992 db 0xea ; jmp far .sixtyfourbit_mode
993 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
994.legacy_mode:
995 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
996 mov ecx, [esp + 4]
997 mov edx, [esp + 8]
998 xor eax, eax
999; invvpid ecx, qword [edx]
1000 DB 0x66, 0x0F, 0x38, 0x81, 0xA
1001%endif
1002 jnc .valid_vmcs
1003 mov eax, VERR_VMX_INVALID_VMCS_PTR
1004 ret
1005.valid_vmcs:
1006 jnz .the_end
1007 mov eax, VERR_INVALID_PARAMETER
1008.the_end:
1009 ret
1010
1011%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
1012ALIGNCODE(16)
1013BITS 64
1014.sixtyfourbit_mode:
1015 and esp, 0ffffffffh
1016 mov ecx, [rsp + 4] ; enmFlush
1017 mov edx, [rsp + 8] ; pDescriptor
1018 xor eax, eax
1019; invvpid rcx, qword [rdx]
1020 DB 0x66, 0x0F, 0x38, 0x81, 0xA
1021 mov r8d, VERR_INVALID_PARAMETER
1022 cmovz eax, r8d
1023 mov r9d, VERR_VMX_INVALID_VMCS_PTR
1024 cmovc eax, r9d
1025 jmp far [.fpret wrt rip]
1026.fpret: ; 16:32 Pointer to .the_end.
1027 dd .the_end, NAME(SUPR0AbsKernelCS)
1028BITS 32
1029%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
1030ENDPROC VMXR0InvVPID
1031
1032
1033%if GC_ARCH_BITS == 64
1034;;
1035; Executes INVLPGA
1036;
1037; @param pPageGC msc:rcx gcc:rdi x86:[esp+04] Virtual page to invalidate
1038; @param uASID msc:rdx gcc:rsi x86:[esp+0C] Tagged TLB id
1039;
1040;DECLASM(void) SVMR0InvlpgA(RTGCPTR pPageGC, uint32_t uASID);
1041BEGINPROC SVMR0InvlpgA
1042%ifdef RT_ARCH_AMD64
1043 %ifdef ASM_CALL64_GCC
1044 mov rax, rdi
1045 mov rcx, rsi
1046 %else
1047 mov rax, rcx
1048 mov rcx, rdx
1049 %endif
1050%else
1051 mov eax, [esp + 4]
1052 mov ecx, [esp + 0Ch]
1053%endif
1054 invlpga [xAX], ecx
1055 ret
1056ENDPROC SVMR0InvlpgA
1057
1058%else ; GC_ARCH_BITS != 64
1059;;
1060; Executes INVLPGA
1061;
1062; @param pPageGC msc:ecx gcc:edi x86:[esp+04] Virtual page to invalidate
1063; @param uASID msc:edx gcc:esi x86:[esp+08] Tagged TLB id
1064;
1065;DECLASM(void) SVMR0InvlpgA(RTGCPTR pPageGC, uint32_t uASID);
1066BEGINPROC SVMR0InvlpgA
1067%ifdef RT_ARCH_AMD64
1068 %ifdef ASM_CALL64_GCC
1069 movzx rax, edi
1070 mov ecx, esi
1071 %else
1072 ; from http://www.cs.cmu.edu/~fp/courses/15213-s06/misc/asm64-handout.pdf:
1073 ; ``Perhaps unexpectedly, instructions that move or generate 32-bit register
1074 ; values also set the upper 32 bits of the register to zero. Consequently
1075 ; there is no need for an instruction movzlq.''
1076 mov eax, ecx
1077 mov ecx, edx
1078 %endif
1079%else
1080 mov eax, [esp + 4]
1081 mov ecx, [esp + 8]
1082%endif
1083 invlpga [xAX], ecx
1084 ret
1085ENDPROC SVMR0InvlpgA
1086
1087%endif ; GC_ARCH_BITS != 64
1088
1089%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
1090
1091;/**
1092; * Gets 64-bit GDTR and IDTR on darwin.
1093; * @param pGdtr Where to store the 64-bit GDTR.
1094; * @param pIdtr Where to store the 64-bit IDTR.
1095; */
1096;DECLASM(void) HMR0Get64bitGdtrAndIdtr(PX86XDTR64 pGdtr, PX86XDTR64 pIdtr);
1097ALIGNCODE(16)
1098BEGINPROC HMR0Get64bitGdtrAndIdtr
1099 db 0xea ; jmp far .sixtyfourbit_mode
1100 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
1101.the_end:
1102 ret
1103
1104ALIGNCODE(16)
1105BITS 64
1106.sixtyfourbit_mode:
1107 and esp, 0ffffffffh
1108 mov ecx, [rsp + 4] ; pGdtr
1109 mov edx, [rsp + 8] ; pIdtr
1110 sgdt [rcx]
1111 sidt [rdx]
1112 jmp far [.fpret wrt rip]
1113.fpret: ; 16:32 Pointer to .the_end.
1114 dd .the_end, NAME(SUPR0AbsKernelCS)
1115BITS 32
1116ENDPROC HMR0Get64bitGdtrAndIdtr
1117
1118
1119;/**
1120; * Gets 64-bit CR3 on darwin.
1121; * @returns CR3
1122; */
1123;DECLASM(uint64_t) HMR0Get64bitCR3(void);
1124ALIGNCODE(16)
1125BEGINPROC HMR0Get64bitCR3
1126 db 0xea ; jmp far .sixtyfourbit_mode
1127 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
1128.the_end:
1129 ret
1130
1131ALIGNCODE(16)
1132BITS 64
1133.sixtyfourbit_mode:
1134 mov rax, cr3
1135 mov rdx, rax
1136 shr rdx, 32
1137 jmp far [.fpret wrt rip]
1138.fpret: ; 16:32 Pointer to .the_end.
1139 dd .the_end, NAME(SUPR0AbsKernelCS)
1140BITS 32
1141ENDPROC HMR0Get64bitCR3
1142
1143%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
1144
1145%ifdef VBOX_WITH_KERNEL_USING_XMM
1146
1147;;
1148; Wrapper around vmx.pfnStartVM that preserves host XMM registers and
1149; load the guest ones when necessary.
1150;
1151; @cproto DECLASM(int) HMR0VMXStartVMWrapXMM(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu, PFNHMVMXSTARTVM pfnStartVM);
1152;
1153; @returns eax
1154;
1155; @param fResumeVM msc:rcx
1156; @param pCtx msc:rdx
1157; @param pVMCSCache msc:r8
1158; @param pVM msc:r9
1159; @param pVCpu msc:[rbp+30h]
1160; @param pfnStartVM msc:[rbp+38h]
1161;
1162; @remarks This is essentially the same code as HMR0SVMRunWrapXMM, only the parameters differ a little bit.
1163;
1164; ASSUMING 64-bit and windows for now.
1165ALIGNCODE(16)
1166BEGINPROC HMR0VMXStartVMWrapXMM
1167 push xBP
1168 mov xBP, xSP
1169 sub xSP, 0a0h + 040h ; Don't bother optimizing the frame size.
1170
1171 ; spill input parameters.
1172 mov [xBP + 010h], rcx ; fResumeVM
1173 mov [xBP + 018h], rdx ; pCtx
1174 mov [xBP + 020h], r8 ; pVMCSCache
1175 mov [xBP + 028h], r9 ; pVM
1176
1177 ; Ask CPUM whether we've started using the FPU yet.
1178 mov rcx, [xBP + 30h] ; pVCpu
1179 call NAME(CPUMIsGuestFPUStateActive)
1180 test al, al
1181 jnz .guest_fpu_state_active
1182
1183 ; No need to mess with XMM registers just call the start routine and return.
1184 mov r11, [xBP + 38h] ; pfnStartVM
1185 mov r10, [xBP + 30h] ; pVCpu
1186 mov [xSP + 020h], r10
1187 mov rcx, [xBP + 010h] ; fResumeVM
1188 mov rdx, [xBP + 018h] ; pCtx
1189 mov r8, [xBP + 020h] ; pVMCSCache
1190 mov r9, [xBP + 028h] ; pVM
1191 call r11
1192
1193 leave
1194 ret
1195
1196ALIGNCODE(8)
1197.guest_fpu_state_active:
1198 ; Save the host XMM registers.
1199 movdqa [rsp + 040h + 000h], xmm6
1200 movdqa [rsp + 040h + 010h], xmm7
1201 movdqa [rsp + 040h + 020h], xmm8
1202 movdqa [rsp + 040h + 030h], xmm9
1203 movdqa [rsp + 040h + 040h], xmm10
1204 movdqa [rsp + 040h + 050h], xmm11
1205 movdqa [rsp + 040h + 060h], xmm12
1206 movdqa [rsp + 040h + 070h], xmm13
1207 movdqa [rsp + 040h + 080h], xmm14
1208 movdqa [rsp + 040h + 090h], xmm15
1209
1210 ; Load the full guest XMM register state.
1211 mov r10, [xBP + 018h] ; pCtx
1212 lea r10, [r10 + XMM_OFF_IN_X86FXSTATE]
1213 movdqa xmm0, [r10 + 000h]
1214 movdqa xmm1, [r10 + 010h]
1215 movdqa xmm2, [r10 + 020h]
1216 movdqa xmm3, [r10 + 030h]
1217 movdqa xmm4, [r10 + 040h]
1218 movdqa xmm5, [r10 + 050h]
1219 movdqa xmm6, [r10 + 060h]
1220 movdqa xmm7, [r10 + 070h]
1221 movdqa xmm8, [r10 + 080h]
1222 movdqa xmm9, [r10 + 090h]
1223 movdqa xmm10, [r10 + 0a0h]
1224 movdqa xmm11, [r10 + 0b0h]
1225 movdqa xmm12, [r10 + 0c0h]
1226 movdqa xmm13, [r10 + 0d0h]
1227 movdqa xmm14, [r10 + 0e0h]
1228 movdqa xmm15, [r10 + 0f0h]
1229
1230 ; Make the call (same as in the other case ).
1231 mov r11, [xBP + 38h] ; pfnStartVM
1232 mov r10, [xBP + 30h] ; pVCpu
1233 mov [xSP + 020h], r10
1234 mov rcx, [xBP + 010h] ; fResumeVM
1235 mov rdx, [xBP + 018h] ; pCtx
1236 mov r8, [xBP + 020h] ; pVMCSCache
1237 mov r9, [xBP + 028h] ; pVM
1238 call r11
1239
1240 ; Save the guest XMM registers.
1241 mov r10, [xBP + 018h] ; pCtx
1242 lea r10, [r10 + XMM_OFF_IN_X86FXSTATE]
1243 movdqa [r10 + 000h], xmm0
1244 movdqa [r10 + 010h], xmm1
1245 movdqa [r10 + 020h], xmm2
1246 movdqa [r10 + 030h], xmm3
1247 movdqa [r10 + 040h], xmm4
1248 movdqa [r10 + 050h], xmm5
1249 movdqa [r10 + 060h], xmm6
1250 movdqa [r10 + 070h], xmm7
1251 movdqa [r10 + 080h], xmm8
1252 movdqa [r10 + 090h], xmm9
1253 movdqa [r10 + 0a0h], xmm10
1254 movdqa [r10 + 0b0h], xmm11
1255 movdqa [r10 + 0c0h], xmm12
1256 movdqa [r10 + 0d0h], xmm13
1257 movdqa [r10 + 0e0h], xmm14
1258 movdqa [r10 + 0f0h], xmm15
1259
1260 ; Load the host XMM registers.
1261 movdqa xmm6, [rsp + 040h + 000h]
1262 movdqa xmm7, [rsp + 040h + 010h]
1263 movdqa xmm8, [rsp + 040h + 020h]
1264 movdqa xmm9, [rsp + 040h + 030h]
1265 movdqa xmm10, [rsp + 040h + 040h]
1266 movdqa xmm11, [rsp + 040h + 050h]
1267 movdqa xmm12, [rsp + 040h + 060h]
1268 movdqa xmm13, [rsp + 040h + 070h]
1269 movdqa xmm14, [rsp + 040h + 080h]
1270 movdqa xmm15, [rsp + 040h + 090h]
1271 leave
1272 ret
1273ENDPROC HMR0VMXStartVMWrapXMM
1274
1275;;
1276; Wrapper around svm.pfnVMRun that preserves host XMM registers and
1277; load the guest ones when necessary.
1278;
1279; @cproto DECLASM(int) HMR0SVMRunWrapXMM(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu, PFNHMSVMVMRUN pfnVMRun);
1280;
1281; @returns eax
1282;
1283; @param pVMCBHostPhys msc:rcx
1284; @param pVMCBPhys msc:rdx
1285; @param pCtx msc:r8
1286; @param pVM msc:r9
1287; @param pVCpu msc:[rbp+30h]
1288; @param pfnVMRun msc:[rbp+38h]
1289;
1290; @remarks This is essentially the same code as HMR0VMXStartVMWrapXMM, only the parameters differ a little bit.
1291;
1292; ASSUMING 64-bit and windows for now.
1293ALIGNCODE(16)
1294BEGINPROC HMR0SVMRunWrapXMM
1295 push xBP
1296 mov xBP, xSP
1297 sub xSP, 0a0h + 040h ; Don't bother optimizing the frame size.
1298
1299 ; spill input parameters.
1300 mov [xBP + 010h], rcx ; pVMCBHostPhys
1301 mov [xBP + 018h], rdx ; pVMCBPhys
1302 mov [xBP + 020h], r8 ; pCtx
1303 mov [xBP + 028h], r9 ; pVM
1304
1305 ; Ask CPUM whether we've started using the FPU yet.
1306 mov rcx, [xBP + 30h] ; pVCpu
1307 call NAME(CPUMIsGuestFPUStateActive)
1308 test al, al
1309 jnz .guest_fpu_state_active
1310
1311 ; No need to mess with XMM registers just call the start routine and return.
1312 mov r11, [xBP + 38h] ; pfnVMRun
1313 mov r10, [xBP + 30h] ; pVCpu
1314 mov [xSP + 020h], r10
1315 mov rcx, [xBP + 010h] ; pVMCBHostPhys
1316 mov rdx, [xBP + 018h] ; pVMCBPhys
1317 mov r8, [xBP + 020h] ; pCtx
1318 mov r9, [xBP + 028h] ; pVM
1319 call r11
1320
1321 leave
1322 ret
1323
1324ALIGNCODE(8)
1325.guest_fpu_state_active:
1326 ; Save the host XMM registers.
1327 movdqa [rsp + 040h + 000h], xmm6
1328 movdqa [rsp + 040h + 010h], xmm7
1329 movdqa [rsp + 040h + 020h], xmm8
1330 movdqa [rsp + 040h + 030h], xmm9
1331 movdqa [rsp + 040h + 040h], xmm10
1332 movdqa [rsp + 040h + 050h], xmm11
1333 movdqa [rsp + 040h + 060h], xmm12
1334 movdqa [rsp + 040h + 070h], xmm13
1335 movdqa [rsp + 040h + 080h], xmm14
1336 movdqa [rsp + 040h + 090h], xmm15
1337
1338 ; Load the full guest XMM register state.
1339 mov r10, [xBP + 020h] ; pCtx
1340 lea r10, [r10 + XMM_OFF_IN_X86FXSTATE]
1341 movdqa xmm0, [r10 + 000h]
1342 movdqa xmm1, [r10 + 010h]
1343 movdqa xmm2, [r10 + 020h]
1344 movdqa xmm3, [r10 + 030h]
1345 movdqa xmm4, [r10 + 040h]
1346 movdqa xmm5, [r10 + 050h]
1347 movdqa xmm6, [r10 + 060h]
1348 movdqa xmm7, [r10 + 070h]
1349 movdqa xmm8, [r10 + 080h]
1350 movdqa xmm9, [r10 + 090h]
1351 movdqa xmm10, [r10 + 0a0h]
1352 movdqa xmm11, [r10 + 0b0h]
1353 movdqa xmm12, [r10 + 0c0h]
1354 movdqa xmm13, [r10 + 0d0h]
1355 movdqa xmm14, [r10 + 0e0h]
1356 movdqa xmm15, [r10 + 0f0h]
1357
1358 ; Make the call (same as in the other case ).
1359 mov r11, [xBP + 38h] ; pfnVMRun
1360 mov r10, [xBP + 30h] ; pVCpu
1361 mov [xSP + 020h], r10
1362 mov rcx, [xBP + 010h] ; pVMCBHostPhys
1363 mov rdx, [xBP + 018h] ; pVMCBPhys
1364 mov r8, [xBP + 020h] ; pCtx
1365 mov r9, [xBP + 028h] ; pVM
1366 call r11
1367
1368 ; Save the guest XMM registers.
1369 mov r10, [xBP + 020h] ; pCtx
1370 lea r10, [r10 + XMM_OFF_IN_X86FXSTATE]
1371 movdqa [r10 + 000h], xmm0
1372 movdqa [r10 + 010h], xmm1
1373 movdqa [r10 + 020h], xmm2
1374 movdqa [r10 + 030h], xmm3
1375 movdqa [r10 + 040h], xmm4
1376 movdqa [r10 + 050h], xmm5
1377 movdqa [r10 + 060h], xmm6
1378 movdqa [r10 + 070h], xmm7
1379 movdqa [r10 + 080h], xmm8
1380 movdqa [r10 + 090h], xmm9
1381 movdqa [r10 + 0a0h], xmm10
1382 movdqa [r10 + 0b0h], xmm11
1383 movdqa [r10 + 0c0h], xmm12
1384 movdqa [r10 + 0d0h], xmm13
1385 movdqa [r10 + 0e0h], xmm14
1386 movdqa [r10 + 0f0h], xmm15
1387
1388 ; Load the host XMM registers.
1389 movdqa xmm6, [rsp + 040h + 000h]
1390 movdqa xmm7, [rsp + 040h + 010h]
1391 movdqa xmm8, [rsp + 040h + 020h]
1392 movdqa xmm9, [rsp + 040h + 030h]
1393 movdqa xmm10, [rsp + 040h + 040h]
1394 movdqa xmm11, [rsp + 040h + 050h]
1395 movdqa xmm12, [rsp + 040h + 060h]
1396 movdqa xmm13, [rsp + 040h + 070h]
1397 movdqa xmm14, [rsp + 040h + 080h]
1398 movdqa xmm15, [rsp + 040h + 090h]
1399 leave
1400 ret
1401ENDPROC HMR0SVMRunWrapXMM
1402
1403%endif ; VBOX_WITH_KERNEL_USING_XMM
1404
1405;
1406; The default setup of the StartVM routines.
1407;
1408%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
1409 %define MY_NAME(name) name %+ _32
1410%else
1411 %define MY_NAME(name) name
1412%endif
1413%ifdef RT_ARCH_AMD64
1414 %define MYPUSHAD MYPUSHAD64
1415 %define MYPOPAD MYPOPAD64
1416 %define MYPUSHSEGS MYPUSHSEGS64
1417 %define MYPOPSEGS MYPOPSEGS64
1418%else
1419 %define MYPUSHAD MYPUSHAD32
1420 %define MYPOPAD MYPOPAD32
1421 %define MYPUSHSEGS MYPUSHSEGS32
1422 %define MYPOPSEGS MYPOPSEGS32
1423%endif
1424
1425%include "HMR0Mixed.mac"
1426
1427
1428%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
1429 ;
1430 ; Write the wrapper procedures.
1431 ;
1432 ; These routines are probably being too paranoid about selector
1433 ; restoring, but better safe than sorry...
1434 ;
1435
1436; DECLASM(int) VMXR0StartVM32(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache /*, PVM pVM, PVMCPU pVCpu*/);
1437ALIGNCODE(16)
1438BEGINPROC VMXR0StartVM32
1439 cmp byte [NAME(g_fVMXIs64bitHost)], 0
1440 je near NAME(VMXR0StartVM32_32)
1441
1442 ; stack frame
1443 push esi
1444 push edi
1445 push fs
1446 push gs
1447
1448 ; jmp far .thunk64
1449 db 0xea
1450 dd .thunk64, NAME(SUPR0Abs64bitKernelCS)
1451
1452ALIGNCODE(16)
1453BITS 64
1454.thunk64:
1455 sub esp, 20h
1456 mov edi, [rsp + 20h + 14h] ; fResume
1457 mov esi, [rsp + 20h + 18h] ; pCtx
1458 mov edx, [rsp + 20h + 1Ch] ; pCache
1459 call NAME(VMXR0StartVM32_64)
1460 add esp, 20h
1461 jmp far [.fpthunk32 wrt rip]
1462.fpthunk32: ; 16:32 Pointer to .thunk32.
1463 dd .thunk32, NAME(SUPR0AbsKernelCS)
1464
1465BITS 32
1466ALIGNCODE(16)
1467.thunk32:
1468 pop gs
1469 pop fs
1470 pop edi
1471 pop esi
1472 ret
1473ENDPROC VMXR0StartVM32
1474
1475
1476; DECLASM(int) VMXR0StartVM64(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache /*, PVM pVM, PVMCPU pVCpu*/);
1477ALIGNCODE(16)
1478BEGINPROC VMXR0StartVM64
1479 cmp byte [NAME(g_fVMXIs64bitHost)], 0
1480 je .not_in_long_mode
1481
1482 ; stack frame
1483 push esi
1484 push edi
1485 push fs
1486 push gs
1487
1488 ; jmp far .thunk64
1489 db 0xea
1490 dd .thunk64, NAME(SUPR0Abs64bitKernelCS)
1491
1492ALIGNCODE(16)
1493BITS 64
1494.thunk64:
1495 sub esp, 20h
1496 mov edi, [rsp + 20h + 14h] ; fResume
1497 mov esi, [rsp + 20h + 18h] ; pCtx
1498 mov edx, [rsp + 20h + 1Ch] ; pCache
1499 call NAME(VMXR0StartVM64_64)
1500 add esp, 20h
1501 jmp far [.fpthunk32 wrt rip]
1502.fpthunk32: ; 16:32 Pointer to .thunk32.
1503 dd .thunk32, NAME(SUPR0AbsKernelCS)
1504
1505BITS 32
1506ALIGNCODE(16)
1507.thunk32:
1508 pop gs
1509 pop fs
1510 pop edi
1511 pop esi
1512 ret
1513
1514.not_in_long_mode:
1515 mov eax, VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE
1516 ret
1517ENDPROC VMXR0StartVM64
1518
1519;DECLASM(int) SVMR0VMRun(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx /*, PVM pVM, PVMCPU pVCpu*/);
1520ALIGNCODE(16)
1521BEGINPROC SVMR0VMRun
1522 cmp byte [NAME(g_fVMXIs64bitHost)], 0
1523 je near NAME(SVMR0VMRun_32)
1524
1525 ; stack frame
1526 push esi
1527 push edi
1528 push fs
1529 push gs
1530
1531 ; jmp far .thunk64
1532 db 0xea
1533 dd .thunk64, NAME(SUPR0Abs64bitKernelCS)
1534
1535ALIGNCODE(16)
1536BITS 64
1537.thunk64:
1538 sub esp, 20h
1539 mov rdi, [rsp + 20h + 14h] ; pVMCBHostPhys
1540 mov rsi, [rsp + 20h + 1Ch] ; pVMCBPhys
1541 mov edx, [rsp + 20h + 24h] ; pCtx
1542 call NAME(SVMR0VMRun_64)
1543 add esp, 20h
1544 jmp far [.fpthunk32 wrt rip]
1545.fpthunk32: ; 16:32 Pointer to .thunk32.
1546 dd .thunk32, NAME(SUPR0AbsKernelCS)
1547
1548BITS 32
1549ALIGNCODE(16)
1550.thunk32:
1551 pop gs
1552 pop fs
1553 pop edi
1554 pop esi
1555 ret
1556ENDPROC SVMR0VMRun
1557
1558
1559; DECLASM(int) SVMR0VMRun64(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx /*, PVM pVM, PVMCPU pVCpu*/);
1560ALIGNCODE(16)
1561BEGINPROC SVMR0VMRun64
1562 cmp byte [NAME(g_fVMXIs64bitHost)], 0
1563 je .not_in_long_mode
1564
1565 ; stack frame
1566 push esi
1567 push edi
1568 push fs
1569 push gs
1570
1571 ; jmp far .thunk64
1572 db 0xea
1573 dd .thunk64, NAME(SUPR0Abs64bitKernelCS)
1574
1575ALIGNCODE(16)
1576BITS 64
1577.thunk64:
1578 sub esp, 20h
1579 mov rdi, [rbp + 20h + 14h] ; pVMCBHostPhys
1580 mov rsi, [rbp + 20h + 1Ch] ; pVMCBPhys
1581 mov edx, [rbp + 20h + 24h] ; pCtx
1582 call NAME(SVMR0VMRun64_64)
1583 add esp, 20h
1584 jmp far [.fpthunk32 wrt rip]
1585.fpthunk32: ; 16:32 Pointer to .thunk32.
1586 dd .thunk32, NAME(SUPR0AbsKernelCS)
1587
1588BITS 32
1589ALIGNCODE(16)
1590.thunk32:
1591 pop gs
1592 pop fs
1593 pop edi
1594 pop esi
1595 ret
1596
1597.not_in_long_mode:
1598 mov eax, VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE
1599 ret
1600ENDPROC SVMR0VMRun64
1601
1602 ;
1603 ; Do it a second time pretending we're a 64-bit host.
1604 ;
1605 ; This *HAS* to be done at the very end of the file to avoid restoring
1606 ; macros. So, add new code *BEFORE* this mess.
1607 ;
1608 BITS 64
1609 %undef RT_ARCH_X86
1610 %define RT_ARCH_AMD64
1611 %undef ASM_CALL64_MSC
1612 %define ASM_CALL64_GCC
1613 %define xCB 8
1614 %define xSP rsp
1615 %define xBP rbp
1616 %define xAX rax
1617 %define xBX rbx
1618 %define xCX rcx
1619 %define xDX rdx
1620 %define xDI rdi
1621 %define xSI rsi
1622 %define MY_NAME(name) name %+ _64
1623 %define MYPUSHAD MYPUSHAD64
1624 %define MYPOPAD MYPOPAD64
1625 %define MYPUSHSEGS MYPUSHSEGS64
1626 %define MYPOPSEGS MYPOPSEGS64
1627
1628 %include "HMR0Mixed.mac"
1629%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette