VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HMR0A.asm@ 55715

Last change on this file since 55715 was 55301, checked in by vboxsync, 10 years ago

HMR0A.asm: Fixed some unused code.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 45.8 KB
Line 
1; $Id: HMR0A.asm 55301 2015-04-16 11:00:14Z vboxsync $
2;; @file
3; HM - Ring-0 VMX, SVM world-switch and helper routines
4;
5
6;
7; Copyright (C) 2006-2015 Oracle Corporation
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.virtualbox.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License (GPL) as published by the Free Software
13; Foundation, in version 2 as it comes in the "COPYING" file of the
14; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17
18;*******************************************************************************
19;* Header Files *
20;*******************************************************************************
21%include "VBox/asmdefs.mac"
22%include "VBox/err.mac"
23%include "VBox/vmm/hm_vmx.mac"
24%include "VBox/vmm/cpum.mac"
25%include "VBox/vmm/vm.mac"
26%include "iprt/x86.mac"
27%include "HMInternal.mac"
28
29%ifdef RT_OS_OS2 ;; @todo fix OMF support in yasm and kick nasm out completely.
30 %macro vmwrite 2,
31 int3
32 %endmacro
33 %define vmlaunch int3
34 %define vmresume int3
35 %define vmsave int3
36 %define vmload int3
37 %define vmrun int3
38 %define clgi int3
39 %define stgi int3
40 %macro invlpga 2,
41 int3
42 %endmacro
43%endif
44
45;*******************************************************************************
46;* Defined Constants And Macros *
47;*******************************************************************************
48%ifdef RT_ARCH_AMD64
49 %define MAYBE_64_BIT
50%endif
51%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
52 %define MAYBE_64_BIT
53%else
54 %ifdef RT_OS_DARWIN
55 %ifdef RT_ARCH_AMD64
56 ;;
57 ; Load the NULL selector into DS, ES, FS and GS on 64-bit darwin so we don't
58 ; risk loading a stale LDT value or something invalid.
59 %define HM_64_BIT_USE_NULL_SEL
60 %endif
61 %endif
62%endif
63
64%ifdef RT_ARCH_AMD64
65 %define VBOX_SKIP_RESTORE_SEG
66%endif
67
68;; The offset of the XMM registers in X86FXSTATE.
69; Use define because I'm too lazy to convert the struct.
70%define XMM_OFF_IN_X86FXSTATE 160
71
72;; @def MYPUSHAD
73; Macro generating an equivalent to pushad
74
75;; @def MYPOPAD
76; Macro generating an equivalent to popad
77
78;; @def MYPUSHSEGS
79; Macro saving all segment registers on the stack.
80; @param 1 full width register name
81; @param 2 16-bit register name for \a 1.
82
83;; @def MYPOPSEGS
84; Macro restoring all segment registers on the stack
85; @param 1 full width register name
86; @param 2 16-bit register name for \a 1.
87
88%ifdef ASM_CALL64_GCC
89 %macro MYPUSHAD64 0
90 push r15
91 push r14
92 push r13
93 push r12
94 push rbx
95 %endmacro
96 %macro MYPOPAD64 0
97 pop rbx
98 pop r12
99 pop r13
100 pop r14
101 pop r15
102 %endmacro
103
104%else ; ASM_CALL64_MSC
105 %macro MYPUSHAD64 0
106 push r15
107 push r14
108 push r13
109 push r12
110 push rbx
111 push rsi
112 push rdi
113 %endmacro
114 %macro MYPOPAD64 0
115 pop rdi
116 pop rsi
117 pop rbx
118 pop r12
119 pop r13
120 pop r14
121 pop r15
122 %endmacro
123%endif
124
125%ifdef VBOX_SKIP_RESTORE_SEG
126 %macro MYPUSHSEGS64 2
127 %endmacro
128
129 %macro MYPOPSEGS64 2
130 %endmacro
131%else ; !VBOX_SKIP_RESTORE_SEG
132 ; trashes, rax, rdx & rcx
133 %macro MYPUSHSEGS64 2
134 %ifndef HM_64_BIT_USE_NULL_SEL
135 mov %2, es
136 push %1
137 mov %2, ds
138 push %1
139 %endif
140
141 ; Special case for FS; Windows and Linux either don't use it or restore it when leaving kernel mode, Solaris OTOH doesn't and we must save it.
142 mov ecx, MSR_K8_FS_BASE
143 rdmsr
144 push rdx
145 push rax
146 %ifndef HM_64_BIT_USE_NULL_SEL
147 push fs
148 %endif
149
150 ; Special case for GS; OSes typically use swapgs to reset the hidden base register for GS on entry into the kernel. The same happens on exit
151 mov ecx, MSR_K8_GS_BASE
152 rdmsr
153 push rdx
154 push rax
155 %ifndef HM_64_BIT_USE_NULL_SEL
156 push gs
157 %endif
158 %endmacro
159
160 ; trashes, rax, rdx & rcx
161 %macro MYPOPSEGS64 2
162 ; Note: do not step through this code with a debugger!
163 %ifndef HM_64_BIT_USE_NULL_SEL
164 xor eax, eax
165 mov ds, ax
166 mov es, ax
167 mov fs, ax
168 mov gs, ax
169 %endif
170
171 %ifndef HM_64_BIT_USE_NULL_SEL
172 pop gs
173 %endif
174 pop rax
175 pop rdx
176 mov ecx, MSR_K8_GS_BASE
177 wrmsr
178
179 %ifndef HM_64_BIT_USE_NULL_SEL
180 pop fs
181 %endif
182 pop rax
183 pop rdx
184 mov ecx, MSR_K8_FS_BASE
185 wrmsr
186 ; Now it's safe to step again
187
188 %ifndef HM_64_BIT_USE_NULL_SEL
189 pop %1
190 mov ds, %2
191 pop %1
192 mov es, %2
193 %endif
194 %endmacro
195%endif ; VBOX_SKIP_RESTORE_SEG
196
197%macro MYPUSHAD32 0
198 pushad
199%endmacro
200%macro MYPOPAD32 0
201 popad
202%endmacro
203
204%macro MYPUSHSEGS32 2
205 push ds
206 push es
207 push fs
208 push gs
209%endmacro
210%macro MYPOPSEGS32 2
211 pop gs
212 pop fs
213 pop es
214 pop ds
215%endmacro
216
217
218;*******************************************************************************
219;* External Symbols *
220;*******************************************************************************
221%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
222extern NAME(SUPR0AbsIs64bit)
223extern NAME(SUPR0Abs64bitKernelCS)
224extern NAME(SUPR0Abs64bitKernelSS)
225extern NAME(SUPR0Abs64bitKernelDS)
226extern NAME(SUPR0AbsKernelCS)
227%endif
228%ifdef VBOX_WITH_KERNEL_USING_XMM
229extern NAME(CPUMIsGuestFPUStateActive)
230%endif
231
232
233;*******************************************************************************
234;* Global Variables *
235;*******************************************************************************
236%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
237BEGINDATA
238;;
239; Store the SUPR0AbsIs64bit absolute value here so we can cmp/test without
240; needing to clobber a register. (This trick doesn't quite work for PE btw.
241; but that's not relevant atm.)
242GLOBALNAME g_fVMXIs64bitHost
243 dd NAME(SUPR0AbsIs64bit)
244%endif
245
246
247BEGINCODE
248
249
250;/**
251; * Restores host-state fields.
252; *
253; * @returns VBox status code
254; * @param f32RestoreHost x86: [ebp + 08h] msc: ecx gcc: edi RestoreHost flags.
255; * @param pRestoreHost x86: [ebp + 0ch] msc: rdx gcc: rsi Pointer to the RestoreHost struct.
256; */
257ALIGNCODE(16)
258BEGINPROC VMXRestoreHostState
259%ifdef RT_ARCH_AMD64
260 %ifndef ASM_CALL64_GCC
261 ; Use GCC's input registers since we'll be needing both rcx and rdx further
262 ; down with the wrmsr instruction. Use the R10 and R11 register for saving
263 ; RDI and RSI since MSC preserve the two latter registers.
264 mov r10, rdi
265 mov r11, rsi
266 mov rdi, rcx
267 mov rsi, rdx
268 %endif
269
270 test edi, VMX_RESTORE_HOST_GDTR
271 jz .test_idtr
272 lgdt [rsi + VMXRESTOREHOST.HostGdtr]
273
274.test_idtr:
275 test edi, VMX_RESTORE_HOST_IDTR
276 jz .test_ds
277 lidt [rsi + VMXRESTOREHOST.HostIdtr]
278
279.test_ds:
280 test edi, VMX_RESTORE_HOST_SEL_DS
281 jz .test_es
282 mov ax, [rsi + VMXRESTOREHOST.uHostSelDS]
283 mov ds, eax
284
285.test_es:
286 test edi, VMX_RESTORE_HOST_SEL_ES
287 jz .test_tr
288 mov ax, [rsi + VMXRESTOREHOST.uHostSelES]
289 mov es, eax
290
291.test_tr:
292 test edi, VMX_RESTORE_HOST_SEL_TR
293 jz .test_fs
294 ; When restoring the TR, we must first clear the busy flag or we'll end up faulting.
295 mov dx, [rsi + VMXRESTOREHOST.uHostSelTR]
296 mov ax, dx
297 and eax, X86_SEL_MASK_OFF_RPL ; Mask away TI and RPL bits leaving only the descriptor offset.
298 add rax, qword [rsi + VMXRESTOREHOST.HostGdtr + 2] ; xAX <- descriptor offset + GDTR.pGdt.
299 test edi, VMX_RESTORE_HOST_GDT_READ_ONLY
300 jnz .gdt_readonly
301 and dword [rax + 4], ~RT_BIT(9) ; Clear the busy flag in TSS desc (bits 0-7=base, bit 9=busy bit).
302 ltr dx
303 jmp short .test_fs
304.gdt_readonly:
305 mov rcx, cr0
306 mov r9, rcx
307 and rcx, ~X86_CR0_WP
308 mov cr0, rcx
309 and dword [rax + 4], ~RT_BIT(9) ; Clear the busy flag in TSS desc (bits 0-7=base, bit 9=busy bit).
310 ltr dx
311 mov cr0, r9
312
313.test_fs:
314 ;
315 ; When restoring the selector values for FS and GS, we'll temporarily trash
316 ; the base address (at least the high 32-bit bits, but quite possibly the
317 ; whole base address), the wrmsr will restore it correctly. (VT-x actually
318 ; restores the base correctly when leaving guest mode, but not the selector
319 ; value, so there is little problem with interrupts being enabled prior to
320 ; this restore job.)
321 ; We'll disable ints once for both FS and GS as that's probably faster.
322 ;
323 test edi, VMX_RESTORE_HOST_SEL_FS | VMX_RESTORE_HOST_SEL_GS
324 jz .restore_success
325 pushfq
326 cli ; (see above)
327
328 test edi, VMX_RESTORE_HOST_SEL_FS
329 jz .test_gs
330 mov ax, word [rsi + VMXRESTOREHOST.uHostSelFS]
331 mov fs, eax
332 mov eax, dword [rsi + VMXRESTOREHOST.uHostFSBase] ; uHostFSBase - Lo
333 mov edx, dword [rsi + VMXRESTOREHOST.uHostFSBase + 4h] ; uHostFSBase - Hi
334 mov ecx, MSR_K8_FS_BASE
335 wrmsr
336
337.test_gs:
338 test edi, VMX_RESTORE_HOST_SEL_GS
339 jz .restore_flags
340 mov ax, word [rsi + VMXRESTOREHOST.uHostSelGS]
341 mov gs, eax
342 mov eax, dword [rsi + VMXRESTOREHOST.uHostGSBase] ; uHostGSBase - Lo
343 mov edx, dword [rsi + VMXRESTOREHOST.uHostGSBase + 4h] ; uHostGSBase - Hi
344 mov ecx, MSR_K8_GS_BASE
345 wrmsr
346
347.restore_flags:
348 popfq
349
350.restore_success:
351 mov eax, VINF_SUCCESS
352 %ifndef ASM_CALL64_GCC
353 ; Restore RDI and RSI on MSC.
354 mov rdi, r10
355 mov rsi, r11
356 %endif
357%else ; RT_ARCH_X86
358 mov eax, VERR_NOT_IMPLEMENTED
359%endif
360 ret
361ENDPROC VMXRestoreHostState
362
363
364;/**
365; * Dispatches an NMI to the host.
366; */
367ALIGNCODE(16)
368BEGINPROC VMXDispatchHostNmi
369 int 2 ; NMI is always vector 2. The IDT[2] IRQ handler cannot be anything else. See Intel spec. 6.3.1 "External Interrupts".
370 ret
371ENDPROC VMXDispatchHostNmi
372
373
374;/**
375; * Executes VMWRITE, 64-bit value.
376; *
377; * @returns VBox status code
378; * @param idxField x86: [ebp + 08h] msc: rcx gcc: rdi VMCS index
379; * @param u64Data x86: [ebp + 0ch] msc: rdx gcc: rsi VM field value
380; */
381ALIGNCODE(16)
382BEGINPROC VMXWriteVmcs64
383%ifdef RT_ARCH_AMD64
384 %ifdef ASM_CALL64_GCC
385 and edi, 0ffffffffh
386 xor rax, rax
387 vmwrite rdi, rsi
388 %else
389 and ecx, 0ffffffffh
390 xor rax, rax
391 vmwrite rcx, rdx
392 %endif
393%else ; RT_ARCH_X86
394 mov ecx, [esp + 4] ; idxField
395 lea edx, [esp + 8] ; &u64Data
396 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
397 cmp byte [NAME(g_fVMXIs64bitHost)], 0
398 jz .legacy_mode
399 db 0xea ; jmp far .sixtyfourbit_mode
400 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
401.legacy_mode:
402 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
403 vmwrite ecx, [edx] ; low dword
404 jz .done
405 jc .done
406 inc ecx
407 xor eax, eax
408 vmwrite ecx, [edx + 4] ; high dword
409.done:
410%endif ; RT_ARCH_X86
411 jnc .valid_vmcs
412 mov eax, VERR_VMX_INVALID_VMCS_PTR
413 ret
414.valid_vmcs:
415 jnz .the_end
416 mov eax, VERR_VMX_INVALID_VMCS_FIELD
417.the_end:
418 ret
419
420%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
421ALIGNCODE(16)
422BITS 64
423.sixtyfourbit_mode:
424 and edx, 0ffffffffh
425 and ecx, 0ffffffffh
426 xor eax, eax
427 vmwrite rcx, [rdx]
428 mov r8d, VERR_VMX_INVALID_VMCS_FIELD
429 cmovz eax, r8d
430 mov r9d, VERR_VMX_INVALID_VMCS_PTR
431 cmovc eax, r9d
432 jmp far [.fpret wrt rip]
433.fpret: ; 16:32 Pointer to .the_end.
434 dd .the_end, NAME(SUPR0AbsKernelCS)
435BITS 32
436%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
437ENDPROC VMXWriteVmcs64
438
439
440;/**
441; * Executes VMREAD, 64-bit value
442; *
443; * @returns VBox status code
444; * @param idxField VMCS index
445; * @param pData Ptr to store VM field value
446; */
447;DECLASM(int) VMXReadVmcs64(uint32_t idxField, uint64_t *pData);
448ALIGNCODE(16)
449BEGINPROC VMXReadVmcs64
450%ifdef RT_ARCH_AMD64
451 %ifdef ASM_CALL64_GCC
452 and edi, 0ffffffffh
453 xor rax, rax
454 vmread [rsi], rdi
455 %else
456 and ecx, 0ffffffffh
457 xor rax, rax
458 vmread [rdx], rcx
459 %endif
460%else ; RT_ARCH_X86
461 mov ecx, [esp + 4] ; idxField
462 mov edx, [esp + 8] ; pData
463 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
464 cmp byte [NAME(g_fVMXIs64bitHost)], 0
465 jz .legacy_mode
466 db 0xea ; jmp far .sixtyfourbit_mode
467 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
468.legacy_mode:
469 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
470 vmread [edx], ecx ; low dword
471 jz .done
472 jc .done
473 inc ecx
474 xor eax, eax
475 vmread [edx + 4], ecx ; high dword
476.done:
477%endif ; RT_ARCH_X86
478 jnc .valid_vmcs
479 mov eax, VERR_VMX_INVALID_VMCS_PTR
480 ret
481.valid_vmcs:
482 jnz .the_end
483 mov eax, VERR_VMX_INVALID_VMCS_FIELD
484.the_end:
485 ret
486
487%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
488ALIGNCODE(16)
489BITS 64
490.sixtyfourbit_mode:
491 and edx, 0ffffffffh
492 and ecx, 0ffffffffh
493 xor eax, eax
494 vmread [rdx], rcx
495 mov r8d, VERR_VMX_INVALID_VMCS_FIELD
496 cmovz eax, r8d
497 mov r9d, VERR_VMX_INVALID_VMCS_PTR
498 cmovc eax, r9d
499 jmp far [.fpret wrt rip]
500.fpret: ; 16:32 Pointer to .the_end.
501 dd .the_end, NAME(SUPR0AbsKernelCS)
502BITS 32
503%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
504ENDPROC VMXReadVmcs64
505
506
507;/**
508; * Executes VMREAD, 32-bit value.
509; *
510; * @returns VBox status code
511; * @param idxField VMCS index
512; * @param pu32Data Ptr to store VM field value
513; */
514;DECLASM(int) VMXReadVmcs32(uint32_t idxField, uint32_t *pu32Data);
515ALIGNCODE(16)
516BEGINPROC VMXReadVmcs32
517%ifdef RT_ARCH_AMD64
518 %ifdef ASM_CALL64_GCC
519 and edi, 0ffffffffh
520 xor rax, rax
521 vmread r10, rdi
522 mov [rsi], r10d
523 %else
524 and ecx, 0ffffffffh
525 xor rax, rax
526 vmread r10, rcx
527 mov [rdx], r10d
528 %endif
529%else ; RT_ARCH_X86
530 mov ecx, [esp + 4] ; idxField
531 mov edx, [esp + 8] ; pu32Data
532 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
533 cmp byte [NAME(g_fVMXIs64bitHost)], 0
534 jz .legacy_mode
535 db 0xea ; jmp far .sixtyfourbit_mode
536 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
537.legacy_mode:
538 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
539 xor eax, eax
540 vmread [edx], ecx
541%endif ; RT_ARCH_X86
542 jnc .valid_vmcs
543 mov eax, VERR_VMX_INVALID_VMCS_PTR
544 ret
545.valid_vmcs:
546 jnz .the_end
547 mov eax, VERR_VMX_INVALID_VMCS_FIELD
548.the_end:
549 ret
550
551%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
552ALIGNCODE(16)
553BITS 64
554.sixtyfourbit_mode:
555 and edx, 0ffffffffh
556 and ecx, 0ffffffffh
557 xor eax, eax
558 vmread r10, rcx
559 mov [rdx], r10d
560 mov r8d, VERR_VMX_INVALID_VMCS_FIELD
561 cmovz eax, r8d
562 mov r9d, VERR_VMX_INVALID_VMCS_PTR
563 cmovc eax, r9d
564 jmp far [.fpret wrt rip]
565.fpret: ; 16:32 Pointer to .the_end.
566 dd .the_end, NAME(SUPR0AbsKernelCS)
567BITS 32
568%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
569ENDPROC VMXReadVmcs32
570
571
572;/**
573; * Executes VMWRITE, 32-bit value.
574; *
575; * @returns VBox status code
576; * @param idxField VMCS index
577; * @param u32Data Ptr to store VM field value
578; */
579;DECLASM(int) VMXWriteVmcs32(uint32_t idxField, uint32_t u32Data);
580ALIGNCODE(16)
581BEGINPROC VMXWriteVmcs32
582%ifdef RT_ARCH_AMD64
583 %ifdef ASM_CALL64_GCC
584 and edi, 0ffffffffh
585 and esi, 0ffffffffh
586 xor rax, rax
587 vmwrite rdi, rsi
588 %else
589 and ecx, 0ffffffffh
590 and edx, 0ffffffffh
591 xor rax, rax
592 vmwrite rcx, rdx
593 %endif
594%else ; RT_ARCH_X86
595 mov ecx, [esp + 4] ; idxField
596 mov edx, [esp + 8] ; u32Data
597 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
598 cmp byte [NAME(g_fVMXIs64bitHost)], 0
599 jz .legacy_mode
600 db 0xea ; jmp far .sixtyfourbit_mode
601 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
602.legacy_mode:
603 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
604 xor eax, eax
605 vmwrite ecx, edx
606%endif ; RT_ARCH_X86
607 jnc .valid_vmcs
608 mov eax, VERR_VMX_INVALID_VMCS_PTR
609 ret
610.valid_vmcs:
611 jnz .the_end
612 mov eax, VERR_VMX_INVALID_VMCS_FIELD
613.the_end:
614 ret
615
616%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
617ALIGNCODE(16)
618BITS 64
619.sixtyfourbit_mode:
620 and edx, 0ffffffffh
621 and ecx, 0ffffffffh
622 xor eax, eax
623 vmwrite rcx, rdx
624 mov r8d, VERR_VMX_INVALID_VMCS_FIELD
625 cmovz eax, r8d
626 mov r9d, VERR_VMX_INVALID_VMCS_PTR
627 cmovc eax, r9d
628 jmp far [.fpret wrt rip]
629.fpret: ; 16:32 Pointer to .the_end.
630 dd .the_end, NAME(SUPR0AbsKernelCS)
631BITS 32
632%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
633ENDPROC VMXWriteVmcs32
634
635
636;/**
637; * Executes VMXON
638; *
639; * @returns VBox status code
640; * @param HCPhysVMXOn Physical address of VMXON structure
641; */
642;DECLASM(int) VMXEnable(RTHCPHYS HCPhysVMXOn);
643BEGINPROC VMXEnable
644%ifdef RT_ARCH_AMD64
645 xor rax, rax
646 %ifdef ASM_CALL64_GCC
647 push rdi
648 %else
649 push rcx
650 %endif
651 vmxon [rsp]
652%else ; RT_ARCH_X86
653 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
654 cmp byte [NAME(g_fVMXIs64bitHost)], 0
655 jz .legacy_mode
656 db 0xea ; jmp far .sixtyfourbit_mode
657 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
658.legacy_mode:
659 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
660 xor eax, eax
661 vmxon [esp + 4]
662%endif ; RT_ARCH_X86
663 jnc .good
664 mov eax, VERR_VMX_INVALID_VMXON_PTR
665 jmp .the_end
666
667.good:
668 jnz .the_end
669 mov eax, VERR_VMX_VMXON_FAILED
670
671.the_end:
672%ifdef RT_ARCH_AMD64
673 add rsp, 8
674%endif
675 ret
676
677%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
678ALIGNCODE(16)
679BITS 64
680.sixtyfourbit_mode:
681 lea rdx, [rsp + 4] ; &HCPhysVMXOn.
682 and edx, 0ffffffffh
683 xor eax, eax
684 vmxon [rdx]
685 mov r8d, VERR_VMX_VMXON_FAILED
686 cmovz eax, r8d
687 mov r9d, VERR_VMX_INVALID_VMXON_PTR
688 cmovc eax, r9d
689 jmp far [.fpret wrt rip]
690.fpret: ; 16:32 Pointer to .the_end.
691 dd .the_end, NAME(SUPR0AbsKernelCS)
692BITS 32
693%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
694ENDPROC VMXEnable
695
696
697;/**
698; * Executes VMXOFF
699; */
700;DECLASM(void) VMXDisable(void);
701BEGINPROC VMXDisable
702%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
703 cmp byte [NAME(g_fVMXIs64bitHost)], 0
704 jz .legacy_mode
705 db 0xea ; jmp far .sixtyfourbit_mode
706 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
707.legacy_mode:
708%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
709 vmxoff
710.the_end:
711 ret
712
713%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
714ALIGNCODE(16)
715BITS 64
716.sixtyfourbit_mode:
717 vmxoff
718 jmp far [.fpret wrt rip]
719.fpret: ; 16:32 Pointer to .the_end.
720 dd .the_end, NAME(SUPR0AbsKernelCS)
721BITS 32
722%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
723ENDPROC VMXDisable
724
725
726;/**
727; * Executes VMCLEAR
728; *
729; * @returns VBox status code
730; * @param HCPhysVmcs Physical address of VM control structure
731; */
732;DECLASM(int) VMXClearVmcs(RTHCPHYS HCPhysVmcs);
733ALIGNCODE(16)
734BEGINPROC VMXClearVmcs
735%ifdef RT_ARCH_AMD64
736 xor rax, rax
737 %ifdef ASM_CALL64_GCC
738 push rdi
739 %else
740 push rcx
741 %endif
742 vmclear [rsp]
743%else ; RT_ARCH_X86
744 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
745 cmp byte [NAME(g_fVMXIs64bitHost)], 0
746 jz .legacy_mode
747 db 0xea ; jmp far .sixtyfourbit_mode
748 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
749.legacy_mode:
750 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
751 xor eax, eax
752 vmclear [esp + 4]
753%endif ; RT_ARCH_X86
754 jnc .the_end
755 mov eax, VERR_VMX_INVALID_VMCS_PTR
756.the_end:
757%ifdef RT_ARCH_AMD64
758 add rsp, 8
759%endif
760 ret
761
762%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
763ALIGNCODE(16)
764BITS 64
765.sixtyfourbit_mode:
766 lea rdx, [rsp + 4] ; &HCPhysVmcs
767 and edx, 0ffffffffh
768 xor eax, eax
769 vmclear [rdx]
770 mov r9d, VERR_VMX_INVALID_VMCS_PTR
771 cmovc eax, r9d
772 jmp far [.fpret wrt rip]
773.fpret: ; 16:32 Pointer to .the_end.
774 dd .the_end, NAME(SUPR0AbsKernelCS)
775BITS 32
776%endif
777ENDPROC VMXClearVmcs
778
779
780;/**
781; * Executes VMPTRLD
782; *
783; * @returns VBox status code
784; * @param HCPhysVmcs Physical address of VMCS structure
785; */
786;DECLASM(int) VMXActivateVmcs(RTHCPHYS HCPhysVmcs);
787ALIGNCODE(16)
788BEGINPROC VMXActivateVmcs
789%ifdef RT_ARCH_AMD64
790 xor rax, rax
791 %ifdef ASM_CALL64_GCC
792 push rdi
793 %else
794 push rcx
795 %endif
796 vmptrld [rsp]
797%else
798 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
799 cmp byte [NAME(g_fVMXIs64bitHost)], 0
800 jz .legacy_mode
801 db 0xea ; jmp far .sixtyfourbit_mode
802 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
803.legacy_mode:
804 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
805 xor eax, eax
806 vmptrld [esp + 4]
807%endif
808 jnc .the_end
809 mov eax, VERR_VMX_INVALID_VMCS_PTR
810.the_end:
811%ifdef RT_ARCH_AMD64
812 add rsp, 8
813%endif
814 ret
815
816%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
817ALIGNCODE(16)
818BITS 64
819.sixtyfourbit_mode:
820 lea rdx, [rsp + 4] ; &HCPhysVmcs
821 and edx, 0ffffffffh
822 xor eax, eax
823 vmptrld [rdx]
824 mov r9d, VERR_VMX_INVALID_VMCS_PTR
825 cmovc eax, r9d
826 jmp far [.fpret wrt rip]
827.fpret: ; 16:32 Pointer to .the_end.
828 dd .the_end, NAME(SUPR0AbsKernelCS)
829BITS 32
830%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
831ENDPROC VMXActivateVmcs
832
833
834;/**
835; * Executes VMPTRST
836; *
837; * @returns VBox status code
838; * @param [esp + 04h] gcc:rdi msc:rcx Param 1 - First parameter - Address that will receive the current pointer
839; */
840;DECLASM(int) VMXGetActivatedVmcs(RTHCPHYS *pVMCS);
841BEGINPROC VMXGetActivatedVmcs
842%ifdef RT_OS_OS2
843 mov eax, VERR_NOT_SUPPORTED
844 ret
845%else
846 %ifdef RT_ARCH_AMD64
847 %ifdef ASM_CALL64_GCC
848 vmptrst qword [rdi]
849 %else
850 vmptrst qword [rcx]
851 %endif
852 %else
853 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
854 cmp byte [NAME(g_fVMXIs64bitHost)], 0
855 jz .legacy_mode
856 db 0xea ; jmp far .sixtyfourbit_mode
857 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
858.legacy_mode:
859 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
860 vmptrst qword [esp+04h]
861 %endif
862 xor eax, eax
863.the_end:
864 ret
865
866 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
867ALIGNCODE(16)
868BITS 64
869.sixtyfourbit_mode:
870 lea rdx, [rsp + 4] ; &HCPhysVmcs
871 and edx, 0ffffffffh
872 vmptrst qword [rdx]
873 xor eax, eax
874 jmp far [.fpret wrt rip]
875.fpret: ; 16:32 Pointer to .the_end.
876 dd .the_end, NAME(SUPR0AbsKernelCS)
877BITS 32
878 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
879%endif
880ENDPROC VMXGetActivatedVmcs
881
882;/**
883; * Invalidate a page using invept
884; @param enmFlush msc:ecx gcc:edi x86:[esp+04] Type of flush
885; @param pDescriptor msc:edx gcc:esi x86:[esp+08] Descriptor pointer
886; */
887;DECLASM(int) VMXR0InvEPT(VMX_FLUSH enmFlush, uint64_t *pDescriptor);
888BEGINPROC VMXR0InvEPT
889%ifdef RT_ARCH_AMD64
890 %ifdef ASM_CALL64_GCC
891 and edi, 0ffffffffh
892 xor rax, rax
893; invept rdi, qword [rsi]
894 DB 0x66, 0x0F, 0x38, 0x80, 0x3E
895 %else
896 and ecx, 0ffffffffh
897 xor rax, rax
898; invept rcx, qword [rdx]
899 DB 0x66, 0x0F, 0x38, 0x80, 0xA
900 %endif
901%else
902 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
903 cmp byte [NAME(g_fVMXIs64bitHost)], 0
904 jz .legacy_mode
905 db 0xea ; jmp far .sixtyfourbit_mode
906 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
907.legacy_mode:
908 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
909 mov ecx, [esp + 4]
910 mov edx, [esp + 8]
911 xor eax, eax
912; invept ecx, qword [edx]
913 DB 0x66, 0x0F, 0x38, 0x80, 0xA
914%endif
915 jnc .valid_vmcs
916 mov eax, VERR_VMX_INVALID_VMCS_PTR
917 ret
918.valid_vmcs:
919 jnz .the_end
920 mov eax, VERR_INVALID_PARAMETER
921.the_end:
922 ret
923
924%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
925ALIGNCODE(16)
926BITS 64
927.sixtyfourbit_mode:
928 and esp, 0ffffffffh
929 mov ecx, [rsp + 4] ; enmFlush
930 mov edx, [rsp + 8] ; pDescriptor
931 xor eax, eax
932; invept rcx, qword [rdx]
933 DB 0x66, 0x0F, 0x38, 0x80, 0xA
934 mov r8d, VERR_INVALID_PARAMETER
935 cmovz eax, r8d
936 mov r9d, VERR_VMX_INVALID_VMCS_PTR
937 cmovc eax, r9d
938 jmp far [.fpret wrt rip]
939.fpret: ; 16:32 Pointer to .the_end.
940 dd .the_end, NAME(SUPR0AbsKernelCS)
941BITS 32
942%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
943ENDPROC VMXR0InvEPT
944
945
946;/**
947; * Invalidate a page using invvpid
948; @param enmFlush msc:ecx gcc:edi x86:[esp+04] Type of flush
949; @param pDescriptor msc:edx gcc:esi x86:[esp+08] Descriptor pointer
950; */
951;DECLASM(int) VMXR0InvVPID(VMX_FLUSH enmFlush, uint64_t *pDescriptor);
952BEGINPROC VMXR0InvVPID
953%ifdef RT_ARCH_AMD64
954 %ifdef ASM_CALL64_GCC
955 and edi, 0ffffffffh
956 xor rax, rax
957; invvpid rdi, qword [rsi]
958 DB 0x66, 0x0F, 0x38, 0x81, 0x3E
959 %else
960 and ecx, 0ffffffffh
961 xor rax, rax
962; invvpid rcx, qword [rdx]
963 DB 0x66, 0x0F, 0x38, 0x81, 0xA
964 %endif
965%else
966 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
967 cmp byte [NAME(g_fVMXIs64bitHost)], 0
968 jz .legacy_mode
969 db 0xea ; jmp far .sixtyfourbit_mode
970 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
971.legacy_mode:
972 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
973 mov ecx, [esp + 4]
974 mov edx, [esp + 8]
975 xor eax, eax
976; invvpid ecx, qword [edx]
977 DB 0x66, 0x0F, 0x38, 0x81, 0xA
978%endif
979 jnc .valid_vmcs
980 mov eax, VERR_VMX_INVALID_VMCS_PTR
981 ret
982.valid_vmcs:
983 jnz .the_end
984 mov eax, VERR_INVALID_PARAMETER
985.the_end:
986 ret
987
988%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
989ALIGNCODE(16)
990BITS 64
991.sixtyfourbit_mode:
992 and esp, 0ffffffffh
993 mov ecx, [rsp + 4] ; enmFlush
994 mov edx, [rsp + 8] ; pDescriptor
995 xor eax, eax
996; invvpid rcx, qword [rdx]
997 DB 0x66, 0x0F, 0x38, 0x81, 0xA
998 mov r8d, VERR_INVALID_PARAMETER
999 cmovz eax, r8d
1000 mov r9d, VERR_VMX_INVALID_VMCS_PTR
1001 cmovc eax, r9d
1002 jmp far [.fpret wrt rip]
1003.fpret: ; 16:32 Pointer to .the_end.
1004 dd .the_end, NAME(SUPR0AbsKernelCS)
1005BITS 32
1006%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
1007ENDPROC VMXR0InvVPID
1008
1009
1010%if GC_ARCH_BITS == 64
1011;;
1012; Executes INVLPGA
1013;
1014; @param pPageGC msc:rcx gcc:rdi x86:[esp+04] Virtual page to invalidate
1015; @param uASID msc:rdx gcc:rsi x86:[esp+0C] Tagged TLB id
1016;
1017;DECLASM(void) SVMR0InvlpgA(RTGCPTR pPageGC, uint32_t uASID);
1018BEGINPROC SVMR0InvlpgA
1019%ifdef RT_ARCH_AMD64
1020 %ifdef ASM_CALL64_GCC
1021 mov rax, rdi
1022 mov rcx, rsi
1023 %else
1024 mov rax, rcx
1025 mov rcx, rdx
1026 %endif
1027%else
1028 mov eax, [esp + 4]
1029 mov ecx, [esp + 0Ch]
1030%endif
1031 invlpga [xAX], ecx
1032 ret
1033ENDPROC SVMR0InvlpgA
1034
1035%else ; GC_ARCH_BITS != 64
1036;;
1037; Executes INVLPGA
1038;
1039; @param pPageGC msc:ecx gcc:edi x86:[esp+04] Virtual page to invalidate
1040; @param uASID msc:edx gcc:esi x86:[esp+08] Tagged TLB id
1041;
1042;DECLASM(void) SVMR0InvlpgA(RTGCPTR pPageGC, uint32_t uASID);
1043BEGINPROC SVMR0InvlpgA
1044%ifdef RT_ARCH_AMD64
1045 %ifdef ASM_CALL64_GCC
1046 movzx rax, edi
1047 mov ecx, esi
1048 %else
1049 ; from http://www.cs.cmu.edu/~fp/courses/15213-s06/misc/asm64-handout.pdf:
1050 ; ``Perhaps unexpectedly, instructions that move or generate 32-bit register
1051 ; values also set the upper 32 bits of the register to zero. Consequently
1052 ; there is no need for an instruction movzlq.''
1053 mov eax, ecx
1054 mov ecx, edx
1055 %endif
1056%else
1057 mov eax, [esp + 4]
1058 mov ecx, [esp + 8]
1059%endif
1060 invlpga [xAX], ecx
1061 ret
1062ENDPROC SVMR0InvlpgA
1063
1064%endif ; GC_ARCH_BITS != 64
1065
1066%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
1067
1068;/**
1069; * Gets 64-bit GDTR and IDTR on darwin.
1070; * @param pGdtr Where to store the 64-bit GDTR.
1071; * @param pIdtr Where to store the 64-bit IDTR.
1072; */
1073;DECLASM(void) HMR0Get64bitGdtrAndIdtr(PX86XDTR64 pGdtr, PX86XDTR64 pIdtr);
1074ALIGNCODE(16)
1075BEGINPROC HMR0Get64bitGdtrAndIdtr
1076 db 0xea ; jmp far .sixtyfourbit_mode
1077 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
1078.the_end:
1079 ret
1080
1081ALIGNCODE(16)
1082BITS 64
1083.sixtyfourbit_mode:
1084 and esp, 0ffffffffh
1085 mov ecx, [rsp + 4] ; pGdtr
1086 mov edx, [rsp + 8] ; pIdtr
1087 sgdt [rcx]
1088 sidt [rdx]
1089 jmp far [.fpret wrt rip]
1090.fpret: ; 16:32 Pointer to .the_end.
1091 dd .the_end, NAME(SUPR0AbsKernelCS)
1092BITS 32
1093ENDPROC HMR0Get64bitGdtrAndIdtr
1094
1095
1096;/**
1097; * Gets 64-bit CR3 on darwin.
1098; * @returns CR3
1099; */
1100;DECLASM(uint64_t) HMR0Get64bitCR3(void);
1101ALIGNCODE(16)
1102BEGINPROC HMR0Get64bitCR3
1103 db 0xea ; jmp far .sixtyfourbit_mode
1104 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
1105.the_end:
1106 ret
1107
1108ALIGNCODE(16)
1109BITS 64
1110.sixtyfourbit_mode:
1111 mov rax, cr3
1112 mov rdx, rax
1113 shr rdx, 32
1114 jmp far [.fpret wrt rip]
1115.fpret: ; 16:32 Pointer to .the_end.
1116 dd .the_end, NAME(SUPR0AbsKernelCS)
1117BITS 32
1118ENDPROC HMR0Get64bitCR3
1119
1120%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
1121
1122%ifdef VBOX_WITH_KERNEL_USING_XMM
1123
1124;;
1125; Wrapper around vmx.pfnStartVM that preserves host XMM registers and
1126; load the guest ones when necessary.
1127;
1128; @cproto DECLASM(int) HMR0VMXStartVMWrapXMM(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu, PFNHMVMXSTARTVM pfnStartVM);
1129;
1130; @returns eax
1131;
1132; @param fResumeVM msc:rcx
1133; @param pCtx msc:rdx
1134; @param pVMCSCache msc:r8
1135; @param pVM msc:r9
1136; @param pVCpu msc:[rbp+30h]
1137; @param pfnStartVM msc:[rbp+38h]
1138;
1139; @remarks This is essentially the same code as HMR0SVMRunWrapXMM, only the parameters differ a little bit.
1140;
1141; ASSUMING 64-bit and windows for now.
1142ALIGNCODE(16)
1143BEGINPROC HMR0VMXStartVMWrapXMM
1144 push xBP
1145 mov xBP, xSP
1146 sub xSP, 0a0h + 040h ; Don't bother optimizing the frame size.
1147
1148 ; spill input parameters.
1149 mov [xBP + 010h], rcx ; fResumeVM
1150 mov [xBP + 018h], rdx ; pCtx
1151 mov [xBP + 020h], r8 ; pVMCSCache
1152 mov [xBP + 028h], r9 ; pVM
1153
1154 ; Ask CPUM whether we've started using the FPU yet.
1155 mov rcx, [xBP + 30h] ; pVCpu
1156 call NAME(CPUMIsGuestFPUStateActive)
1157 test al, al
1158 jnz .guest_fpu_state_active
1159
1160 ; No need to mess with XMM registers just call the start routine and return.
1161 mov r11, [xBP + 38h] ; pfnStartVM
1162 mov r10, [xBP + 30h] ; pVCpu
1163 mov [xSP + 020h], r10
1164 mov rcx, [xBP + 010h] ; fResumeVM
1165 mov rdx, [xBP + 018h] ; pCtx
1166 mov r8, [xBP + 020h] ; pVMCSCache
1167 mov r9, [xBP + 028h] ; pVM
1168 call r11
1169
1170 leave
1171 ret
1172
1173ALIGNCODE(8)
1174.guest_fpu_state_active:
1175 ; Save the host XMM registers.
1176 movdqa [rsp + 040h + 000h], xmm6
1177 movdqa [rsp + 040h + 010h], xmm7
1178 movdqa [rsp + 040h + 020h], xmm8
1179 movdqa [rsp + 040h + 030h], xmm9
1180 movdqa [rsp + 040h + 040h], xmm10
1181 movdqa [rsp + 040h + 050h], xmm11
1182 movdqa [rsp + 040h + 060h], xmm12
1183 movdqa [rsp + 040h + 070h], xmm13
1184 movdqa [rsp + 040h + 080h], xmm14
1185 movdqa [rsp + 040h + 090h], xmm15
1186
1187 ; Load the full guest XMM register state.
1188 mov r10, [xBP + 018h] ; pCtx
1189 mov r10, [r10 + CPUMCTX.pXStateR0]
1190 movdqa xmm0, [r10 + XMM_OFF_IN_X86FXSTATE + 000h]
1191 movdqa xmm1, [r10 + XMM_OFF_IN_X86FXSTATE + 010h]
1192 movdqa xmm2, [r10 + XMM_OFF_IN_X86FXSTATE + 020h]
1193 movdqa xmm3, [r10 + XMM_OFF_IN_X86FXSTATE + 030h]
1194 movdqa xmm4, [r10 + XMM_OFF_IN_X86FXSTATE + 040h]
1195 movdqa xmm5, [r10 + XMM_OFF_IN_X86FXSTATE + 050h]
1196 movdqa xmm6, [r10 + XMM_OFF_IN_X86FXSTATE + 060h]
1197 movdqa xmm7, [r10 + XMM_OFF_IN_X86FXSTATE + 070h]
1198 movdqa xmm8, [r10 + XMM_OFF_IN_X86FXSTATE + 080h]
1199 movdqa xmm9, [r10 + XMM_OFF_IN_X86FXSTATE + 090h]
1200 movdqa xmm10, [r10 + XMM_OFF_IN_X86FXSTATE + 0a0h]
1201 movdqa xmm11, [r10 + XMM_OFF_IN_X86FXSTATE + 0b0h]
1202 movdqa xmm12, [r10 + XMM_OFF_IN_X86FXSTATE + 0c0h]
1203 movdqa xmm13, [r10 + XMM_OFF_IN_X86FXSTATE + 0d0h]
1204 movdqa xmm14, [r10 + XMM_OFF_IN_X86FXSTATE + 0e0h]
1205 movdqa xmm15, [r10 + XMM_OFF_IN_X86FXSTATE + 0f0h]
1206
1207 ; Make the call (same as in the other case ).
1208 mov r11, [xBP + 38h] ; pfnStartVM
1209 mov r10, [xBP + 30h] ; pVCpu
1210 mov [xSP + 020h], r10
1211 mov rcx, [xBP + 010h] ; fResumeVM
1212 mov rdx, [xBP + 018h] ; pCtx
1213 mov r8, [xBP + 020h] ; pVMCSCache
1214 mov r9, [xBP + 028h] ; pVM
1215 call r11
1216
1217 ; Save the guest XMM registers.
1218 mov r10, [xBP + 018h] ; pCtx
1219 mov r10, [r10 + CPUMCTX.pXStateR0]
1220 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 000h], xmm0
1221 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 010h], xmm1
1222 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 020h], xmm2
1223 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 030h], xmm3
1224 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 040h], xmm4
1225 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 050h], xmm5
1226 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 060h], xmm6
1227 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 070h], xmm7
1228 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 080h], xmm8
1229 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 090h], xmm9
1230 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0a0h], xmm10
1231 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0b0h], xmm11
1232 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0c0h], xmm12
1233 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0d0h], xmm13
1234 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0e0h], xmm14
1235 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0f0h], xmm15
1236
1237 ; Load the host XMM registers.
1238 movdqa xmm6, [rsp + 040h + 000h]
1239 movdqa xmm7, [rsp + 040h + 010h]
1240 movdqa xmm8, [rsp + 040h + 020h]
1241 movdqa xmm9, [rsp + 040h + 030h]
1242 movdqa xmm10, [rsp + 040h + 040h]
1243 movdqa xmm11, [rsp + 040h + 050h]
1244 movdqa xmm12, [rsp + 040h + 060h]
1245 movdqa xmm13, [rsp + 040h + 070h]
1246 movdqa xmm14, [rsp + 040h + 080h]
1247 movdqa xmm15, [rsp + 040h + 090h]
1248 leave
1249 ret
1250ENDPROC HMR0VMXStartVMWrapXMM
1251
1252;;
1253; Wrapper around svm.pfnVMRun that preserves host XMM registers and
1254; load the guest ones when necessary.
1255;
1256; @cproto DECLASM(int) HMR0SVMRunWrapXMM(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu, PFNHMSVMVMRUN pfnVMRun);
1257;
1258; @returns eax
1259;
1260; @param pVMCBHostPhys msc:rcx
1261; @param pVMCBPhys msc:rdx
1262; @param pCtx msc:r8
1263; @param pVM msc:r9
1264; @param pVCpu msc:[rbp+30h]
1265; @param pfnVMRun msc:[rbp+38h]
1266;
1267; @remarks This is essentially the same code as HMR0VMXStartVMWrapXMM, only the parameters differ a little bit.
1268;
1269; ASSUMING 64-bit and windows for now.
1270ALIGNCODE(16)
1271BEGINPROC HMR0SVMRunWrapXMM
1272 push xBP
1273 mov xBP, xSP
1274 sub xSP, 0a0h + 040h ; Don't bother optimizing the frame size.
1275
1276 ; spill input parameters.
1277 mov [xBP + 010h], rcx ; pVMCBHostPhys
1278 mov [xBP + 018h], rdx ; pVMCBPhys
1279 mov [xBP + 020h], r8 ; pCtx
1280 mov [xBP + 028h], r9 ; pVM
1281
1282 ; Ask CPUM whether we've started using the FPU yet.
1283 mov rcx, [xBP + 30h] ; pVCpu
1284 call NAME(CPUMIsGuestFPUStateActive)
1285 test al, al
1286 jnz .guest_fpu_state_active
1287
1288 ; No need to mess with XMM registers just call the start routine and return.
1289 mov r11, [xBP + 38h] ; pfnVMRun
1290 mov r10, [xBP + 30h] ; pVCpu
1291 mov [xSP + 020h], r10
1292 mov rcx, [xBP + 010h] ; pVMCBHostPhys
1293 mov rdx, [xBP + 018h] ; pVMCBPhys
1294 mov r8, [xBP + 020h] ; pCtx
1295 mov r9, [xBP + 028h] ; pVM
1296 call r11
1297
1298 leave
1299 ret
1300
1301ALIGNCODE(8)
1302.guest_fpu_state_active:
1303 ; Save the host XMM registers.
1304 movdqa [rsp + 040h + 000h], xmm6
1305 movdqa [rsp + 040h + 010h], xmm7
1306 movdqa [rsp + 040h + 020h], xmm8
1307 movdqa [rsp + 040h + 030h], xmm9
1308 movdqa [rsp + 040h + 040h], xmm10
1309 movdqa [rsp + 040h + 050h], xmm11
1310 movdqa [rsp + 040h + 060h], xmm12
1311 movdqa [rsp + 040h + 070h], xmm13
1312 movdqa [rsp + 040h + 080h], xmm14
1313 movdqa [rsp + 040h + 090h], xmm15
1314
1315 ; Load the full guest XMM register state.
1316 mov r10, [xBP + 020h] ; pCtx
1317 mov r10, [r10 + CPUMCTX.pXStateR0]
1318 movdqa xmm0, [r10 + XMM_OFF_IN_X86FXSTATE + 000h]
1319 movdqa xmm1, [r10 + XMM_OFF_IN_X86FXSTATE + 010h]
1320 movdqa xmm2, [r10 + XMM_OFF_IN_X86FXSTATE + 020h]
1321 movdqa xmm3, [r10 + XMM_OFF_IN_X86FXSTATE + 030h]
1322 movdqa xmm4, [r10 + XMM_OFF_IN_X86FXSTATE + 040h]
1323 movdqa xmm5, [r10 + XMM_OFF_IN_X86FXSTATE + 050h]
1324 movdqa xmm6, [r10 + XMM_OFF_IN_X86FXSTATE + 060h]
1325 movdqa xmm7, [r10 + XMM_OFF_IN_X86FXSTATE + 070h]
1326 movdqa xmm8, [r10 + XMM_OFF_IN_X86FXSTATE + 080h]
1327 movdqa xmm9, [r10 + XMM_OFF_IN_X86FXSTATE + 090h]
1328 movdqa xmm10, [r10 + XMM_OFF_IN_X86FXSTATE + 0a0h]
1329 movdqa xmm11, [r10 + XMM_OFF_IN_X86FXSTATE + 0b0h]
1330 movdqa xmm12, [r10 + XMM_OFF_IN_X86FXSTATE + 0c0h]
1331 movdqa xmm13, [r10 + XMM_OFF_IN_X86FXSTATE + 0d0h]
1332 movdqa xmm14, [r10 + XMM_OFF_IN_X86FXSTATE + 0e0h]
1333 movdqa xmm15, [r10 + XMM_OFF_IN_X86FXSTATE + 0f0h]
1334
1335 ; Make the call (same as in the other case ).
1336 mov r11, [xBP + 38h] ; pfnVMRun
1337 mov r10, [xBP + 30h] ; pVCpu
1338 mov [xSP + 020h], r10
1339 mov rcx, [xBP + 010h] ; pVMCBHostPhys
1340 mov rdx, [xBP + 018h] ; pVMCBPhys
1341 mov r8, [xBP + 020h] ; pCtx
1342 mov r9, [xBP + 028h] ; pVM
1343 call r11
1344
1345 ; Save the guest XMM registers.
1346 mov r10, [xBP + 020h] ; pCtx
1347 mov r10, [r10 + CPUMCTX.pXStateR0]
1348 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 000h], xmm0
1349 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 010h], xmm1
1350 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 020h], xmm2
1351 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 030h], xmm3
1352 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 040h], xmm4
1353 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 050h], xmm5
1354 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 060h], xmm6
1355 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 070h], xmm7
1356 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 080h], xmm8
1357 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 090h], xmm9
1358 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0a0h], xmm10
1359 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0b0h], xmm11
1360 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0c0h], xmm12
1361 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0d0h], xmm13
1362 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0e0h], xmm14
1363 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0f0h], xmm15
1364
1365 ; Load the host XMM registers.
1366 movdqa xmm6, [rsp + 040h + 000h]
1367 movdqa xmm7, [rsp + 040h + 010h]
1368 movdqa xmm8, [rsp + 040h + 020h]
1369 movdqa xmm9, [rsp + 040h + 030h]
1370 movdqa xmm10, [rsp + 040h + 040h]
1371 movdqa xmm11, [rsp + 040h + 050h]
1372 movdqa xmm12, [rsp + 040h + 060h]
1373 movdqa xmm13, [rsp + 040h + 070h]
1374 movdqa xmm14, [rsp + 040h + 080h]
1375 movdqa xmm15, [rsp + 040h + 090h]
1376 leave
1377 ret
1378ENDPROC HMR0SVMRunWrapXMM
1379
1380%endif ; VBOX_WITH_KERNEL_USING_XMM
1381
1382;
1383; The default setup of the StartVM routines.
1384;
1385%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
1386 %define MY_NAME(name) name %+ _32
1387%else
1388 %define MY_NAME(name) name
1389%endif
1390%ifdef RT_ARCH_AMD64
1391 %define MYPUSHAD MYPUSHAD64
1392 %define MYPOPAD MYPOPAD64
1393 %define MYPUSHSEGS MYPUSHSEGS64
1394 %define MYPOPSEGS MYPOPSEGS64
1395%else
1396 %define MYPUSHAD MYPUSHAD32
1397 %define MYPOPAD MYPOPAD32
1398 %define MYPUSHSEGS MYPUSHSEGS32
1399 %define MYPOPSEGS MYPOPSEGS32
1400%endif
1401
1402%include "HMR0Mixed.mac"
1403
1404
1405%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
1406 ;
1407 ; Write the wrapper procedures.
1408 ;
1409 ; These routines are probably being too paranoid about selector
1410 ; restoring, but better safe than sorry...
1411 ;
1412
1413; DECLASM(int) VMXR0StartVM32(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu);
1414ALIGNCODE(16)
1415BEGINPROC VMXR0StartVM32
1416 cmp byte [NAME(g_fVMXIs64bitHost)], 0
1417 je near NAME(VMXR0StartVM32_32)
1418
1419 ; stack frame
1420 push esi
1421 push edi
1422 push fs
1423 push gs
1424
1425 ; jmp far .thunk64
1426 db 0xea
1427 dd .thunk64, NAME(SUPR0Abs64bitKernelCS)
1428
1429ALIGNCODE(16)
1430BITS 64
1431.thunk64:
1432 sub esp, 20h
1433 mov edi, [rsp + 20h + 14h] ; fResume
1434 mov esi, [rsp + 20h + 18h] ; pCtx
1435 mov edx, [rsp + 20h + 1Ch] ; pCache
1436 mov ecx, [rsp + 20h + 20h] ; pVM
1437 mov r8, [rsp + 20h + 24h] ; pVCpu
1438 call NAME(VMXR0StartVM32_64)
1439 add esp, 20h
1440 jmp far [.fpthunk32 wrt rip]
1441.fpthunk32: ; 16:32 Pointer to .thunk32.
1442 dd .thunk32, NAME(SUPR0AbsKernelCS)
1443
1444BITS 32
1445ALIGNCODE(16)
1446.thunk32:
1447 pop gs
1448 pop fs
1449 pop edi
1450 pop esi
1451 ret
1452ENDPROC VMXR0StartVM32
1453
1454
1455; DECLASM(int) VMXR0StartVM64(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu);
1456ALIGNCODE(16)
1457BEGINPROC VMXR0StartVM64
1458 cmp byte [NAME(g_fVMXIs64bitHost)], 0
1459 je .not_in_long_mode
1460
1461 ; stack frame
1462 push esi
1463 push edi
1464 push fs
1465 push gs
1466
1467 ; jmp far .thunk64
1468 db 0xea
1469 dd .thunk64, NAME(SUPR0Abs64bitKernelCS)
1470
1471ALIGNCODE(16)
1472BITS 64
1473.thunk64:
1474 sub esp, 20h
1475 mov edi, [rsp + 20h + 14h] ; fResume
1476 mov esi, [rsp + 20h + 18h] ; pCtx
1477 mov edx, [rsp + 20h + 1Ch] ; pCache
1478 mov ecx, [rsp + 20h + 20h] ; pVM
1479 mov r8, [rsp + 20h + 24h] ; pVCpu
1480 call NAME(VMXR0StartVM64_64)
1481 add esp, 20h
1482 jmp far [.fpthunk32 wrt rip]
1483.fpthunk32: ; 16:32 Pointer to .thunk32.
1484 dd .thunk32, NAME(SUPR0AbsKernelCS)
1485
1486BITS 32
1487ALIGNCODE(16)
1488.thunk32:
1489 pop gs
1490 pop fs
1491 pop edi
1492 pop esi
1493 ret
1494
1495.not_in_long_mode:
1496 mov eax, VERR_PGM_UNSUPPORTED_HOST_PAGING_MODE
1497 ret
1498ENDPROC VMXR0StartVM64
1499
1500;DECLASM(int) SVMR0VMRun(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu);
1501ALIGNCODE(16)
1502BEGINPROC SVMR0VMRun
1503 cmp byte [NAME(g_fVMXIs64bitHost)], 0
1504 je near NAME(SVMR0VMRun_32)
1505
1506 ; stack frame
1507 push esi
1508 push edi
1509 push fs
1510 push gs
1511
1512 ; jmp far .thunk64
1513 db 0xea
1514 dd .thunk64, NAME(SUPR0Abs64bitKernelCS)
1515
1516ALIGNCODE(16)
1517BITS 64
1518.thunk64:
1519 sub esp, 20h
1520 mov rdi, [rsp + 20h + 14h] ; pVMCBHostPhys
1521 mov rsi, [rsp + 20h + 1Ch] ; pVMCBPhys
1522 mov edx, [rsp + 20h + 24h] ; pCtx
1523 mov ecx, [rsp + 20h + 28h] ; pVM
1524 mov r8d, [rsp + 20h + 2Ch] ; pVCpu
1525 call NAME(SVMR0VMRun_64)
1526 add esp, 20h
1527 jmp far [.fpthunk32 wrt rip]
1528.fpthunk32: ; 16:32 Pointer to .thunk32.
1529 dd .thunk32, NAME(SUPR0AbsKernelCS)
1530
1531BITS 32
1532ALIGNCODE(16)
1533.thunk32:
1534 pop gs
1535 pop fs
1536 pop edi
1537 pop esi
1538 ret
1539ENDPROC SVMR0VMRun
1540
1541
1542; DECLASM(int) SVMR0VMRun64(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu);
1543ALIGNCODE(16)
1544BEGINPROC SVMR0VMRun64
1545 cmp byte [NAME(g_fVMXIs64bitHost)], 0
1546 je .not_in_long_mode
1547
1548 ; stack frame
1549 push esi
1550 push edi
1551 push fs
1552 push gs
1553
1554 ; jmp far .thunk64
1555 db 0xea
1556 dd .thunk64, NAME(SUPR0Abs64bitKernelCS)
1557
1558ALIGNCODE(16)
1559BITS 64
1560.thunk64:
1561 sub esp, 20h
1562 mov rdi, [rsp + 20h + 14h] ; pVMCBHostPhys
1563 mov rsi, [rsp + 20h + 1Ch] ; pVMCBPhys
1564 mov edx, [rsp + 20h + 24h] ; pCtx
1565 mov ecx, [rsp + 20h + 28h] ; pVM
1566 mov r8d, [rsp + 20h + 2Ch] ; pVCpu
1567 call NAME(SVMR0VMRun64_64)
1568 add esp, 20h
1569 jmp far [.fpthunk32 wrt rip]
1570.fpthunk32: ; 16:32 Pointer to .thunk32.
1571 dd .thunk32, NAME(SUPR0AbsKernelCS)
1572
1573BITS 32
1574ALIGNCODE(16)
1575.thunk32:
1576 pop gs
1577 pop fs
1578 pop edi
1579 pop esi
1580 ret
1581
1582.not_in_long_mode:
1583 mov eax, VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE
1584 ret
1585ENDPROC SVMR0VMRun64
1586
1587 ;
1588 ; Do it a second time pretending we're a 64-bit host.
1589 ;
1590 ; This *HAS* to be done at the very end of the file to avoid restoring
1591 ; macros. So, add new code *BEFORE* this mess.
1592 ;
1593 BITS 64
1594 %undef RT_ARCH_X86
1595 %define RT_ARCH_AMD64
1596 %undef ASM_CALL64_MSC
1597 %define ASM_CALL64_GCC
1598 %define xCB 8
1599 %define xSP rsp
1600 %define xBP rbp
1601 %define xAX rax
1602 %define xBX rbx
1603 %define xCX rcx
1604 %define xDX rdx
1605 %define xDI rdi
1606 %define xSI rsi
1607 %define MY_NAME(name) name %+ _64
1608 %define MYPUSHAD MYPUSHAD64
1609 %define MYPOPAD MYPOPAD64
1610 %define MYPUSHSEGS MYPUSHSEGS64
1611 %define MYPOPSEGS MYPOPSEGS64
1612
1613 %include "HMR0Mixed.mac"
1614%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
1615
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette