VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HMR0A.asm@ 61605

Last change on this file since 61605 was 58123, checked in by vboxsync, 9 years ago

VMM: Made @param pVCpu more uniform and to the point.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 59.0 KB
Line 
1; $Id: HMR0A.asm 58123 2015-10-08 18:09:45Z vboxsync $
2;; @file
3; HM - Ring-0 VMX, SVM world-switch and helper routines
4;
5
6;
7; Copyright (C) 2006-2015 Oracle Corporation
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.virtualbox.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License (GPL) as published by the Free Software
13; Foundation, in version 2 as it comes in the "COPYING" file of the
14; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17
18;*********************************************************************************************************************************
19;* Header Files *
20;*********************************************************************************************************************************
21%include "VBox/asmdefs.mac"
22%include "VBox/err.mac"
23%include "VBox/vmm/hm_vmx.mac"
24%include "VBox/vmm/cpum.mac"
25%include "VBox/vmm/vm.mac"
26%include "iprt/x86.mac"
27%include "HMInternal.mac"
28
29%ifdef RT_OS_OS2 ;; @todo fix OMF support in yasm and kick nasm out completely.
30 %macro vmwrite 2,
31 int3
32 %endmacro
33 %define vmlaunch int3
34 %define vmresume int3
35 %define vmsave int3
36 %define vmload int3
37 %define vmrun int3
38 %define clgi int3
39 %define stgi int3
40 %macro invlpga 2,
41 int3
42 %endmacro
43%endif
44
45;*********************************************************************************************************************************
46;* Defined Constants And Macros *
47;*********************************************************************************************************************************
48;; The offset of the XMM registers in X86FXSTATE.
49; Use define because I'm too lazy to convert the struct.
50%define XMM_OFF_IN_X86FXSTATE 160
51
52;;
53; Determine skipping restoring of GDTR, IDTR, TR across VMX non-root operation
54;
55%ifdef RT_ARCH_AMD64
56 %define VMX_SKIP_GDTR
57 %define VMX_SKIP_TR
58 %define VBOX_SKIP_RESTORE_SEG
59 %ifdef RT_OS_DARWIN
60 ; Load the NULL selector into DS, ES, FS and GS on 64-bit darwin so we don't
61 ; risk loading a stale LDT value or something invalid.
62 %define HM_64_BIT_USE_NULL_SEL
63 ; Darwin (Mavericks) uses IDTR limit to store the CPU Id so we need to restore it always.
64 ; See @bugref{6875}.
65 %else
66 %define VMX_SKIP_IDTR
67 %endif
68%endif
69
70;; @def MYPUSHAD
71; Macro generating an equivalent to pushad
72
73;; @def MYPOPAD
74; Macro generating an equivalent to popad
75
76;; @def MYPUSHSEGS
77; Macro saving all segment registers on the stack.
78; @param 1 full width register name
79; @param 2 16-bit register name for \a 1.
80
81;; @def MYPOPSEGS
82; Macro restoring all segment registers on the stack
83; @param 1 full width register name
84; @param 2 16-bit register name for \a 1.
85
86%ifdef ASM_CALL64_GCC
87 %macro MYPUSHAD64 0
88 push r15
89 push r14
90 push r13
91 push r12
92 push rbx
93 %endmacro
94 %macro MYPOPAD64 0
95 pop rbx
96 pop r12
97 pop r13
98 pop r14
99 pop r15
100 %endmacro
101
102%else ; ASM_CALL64_MSC
103 %macro MYPUSHAD64 0
104 push r15
105 push r14
106 push r13
107 push r12
108 push rbx
109 push rsi
110 push rdi
111 %endmacro
112 %macro MYPOPAD64 0
113 pop rdi
114 pop rsi
115 pop rbx
116 pop r12
117 pop r13
118 pop r14
119 pop r15
120 %endmacro
121%endif
122
123%ifdef VBOX_SKIP_RESTORE_SEG
124 %macro MYPUSHSEGS64 2
125 %endmacro
126
127 %macro MYPOPSEGS64 2
128 %endmacro
129%else ; !VBOX_SKIP_RESTORE_SEG
130 ; trashes, rax, rdx & rcx
131 %macro MYPUSHSEGS64 2
132 %ifndef HM_64_BIT_USE_NULL_SEL
133 mov %2, es
134 push %1
135 mov %2, ds
136 push %1
137 %endif
138
139 ; Special case for FS; Windows and Linux either don't use it or restore it when leaving kernel mode, Solaris OTOH doesn't and we must save it.
140 mov ecx, MSR_K8_FS_BASE
141 rdmsr
142 push rdx
143 push rax
144 %ifndef HM_64_BIT_USE_NULL_SEL
145 push fs
146 %endif
147
148 ; Special case for GS; OSes typically use swapgs to reset the hidden base register for GS on entry into the kernel. The same happens on exit
149 mov ecx, MSR_K8_GS_BASE
150 rdmsr
151 push rdx
152 push rax
153 %ifndef HM_64_BIT_USE_NULL_SEL
154 push gs
155 %endif
156 %endmacro
157
158 ; trashes, rax, rdx & rcx
159 %macro MYPOPSEGS64 2
160 ; Note: do not step through this code with a debugger!
161 %ifndef HM_64_BIT_USE_NULL_SEL
162 xor eax, eax
163 mov ds, ax
164 mov es, ax
165 mov fs, ax
166 mov gs, ax
167 %endif
168
169 %ifndef HM_64_BIT_USE_NULL_SEL
170 pop gs
171 %endif
172 pop rax
173 pop rdx
174 mov ecx, MSR_K8_GS_BASE
175 wrmsr
176
177 %ifndef HM_64_BIT_USE_NULL_SEL
178 pop fs
179 %endif
180 pop rax
181 pop rdx
182 mov ecx, MSR_K8_FS_BASE
183 wrmsr
184 ; Now it's safe to step again
185
186 %ifndef HM_64_BIT_USE_NULL_SEL
187 pop %1
188 mov ds, %2
189 pop %1
190 mov es, %2
191 %endif
192 %endmacro
193%endif ; VBOX_SKIP_RESTORE_SEG
194
195%macro MYPUSHAD32 0
196 pushad
197%endmacro
198%macro MYPOPAD32 0
199 popad
200%endmacro
201
202%macro MYPUSHSEGS32 2
203 push ds
204 push es
205 push fs
206 push gs
207%endmacro
208%macro MYPOPSEGS32 2
209 pop gs
210 pop fs
211 pop es
212 pop ds
213%endmacro
214
215%ifdef RT_ARCH_AMD64
216 %define MYPUSHAD MYPUSHAD64
217 %define MYPOPAD MYPOPAD64
218 %define MYPUSHSEGS MYPUSHSEGS64
219 %define MYPOPSEGS MYPOPSEGS64
220%else
221 %define MYPUSHAD MYPUSHAD32
222 %define MYPOPAD MYPOPAD32
223 %define MYPUSHSEGS MYPUSHSEGS32
224 %define MYPOPSEGS MYPOPSEGS32
225%endif
226
227
228;*********************************************************************************************************************************
229;* External Symbols *
230;*********************************************************************************************************************************
231%ifdef VBOX_WITH_KERNEL_USING_XMM
232extern NAME(CPUMIsGuestFPUStateActive)
233%endif
234
235
236BEGINCODE
237
238
239;/**
240; * Restores host-state fields.
241; *
242; * @returns VBox status code
243; * @param f32RestoreHost x86: [ebp + 08h] msc: ecx gcc: edi RestoreHost flags.
244; * @param pRestoreHost x86: [ebp + 0ch] msc: rdx gcc: rsi Pointer to the RestoreHost struct.
245; */
246ALIGNCODE(16)
247BEGINPROC VMXRestoreHostState
248%ifdef RT_ARCH_AMD64
249 %ifndef ASM_CALL64_GCC
250 ; Use GCC's input registers since we'll be needing both rcx and rdx further
251 ; down with the wrmsr instruction. Use the R10 and R11 register for saving
252 ; RDI and RSI since MSC preserve the two latter registers.
253 mov r10, rdi
254 mov r11, rsi
255 mov rdi, rcx
256 mov rsi, rdx
257 %endif
258
259 test edi, VMX_RESTORE_HOST_GDTR
260 jz .test_idtr
261 lgdt [rsi + VMXRESTOREHOST.HostGdtr]
262
263.test_idtr:
264 test edi, VMX_RESTORE_HOST_IDTR
265 jz .test_ds
266 lidt [rsi + VMXRESTOREHOST.HostIdtr]
267
268.test_ds:
269 test edi, VMX_RESTORE_HOST_SEL_DS
270 jz .test_es
271 mov ax, [rsi + VMXRESTOREHOST.uHostSelDS]
272 mov ds, eax
273
274.test_es:
275 test edi, VMX_RESTORE_HOST_SEL_ES
276 jz .test_tr
277 mov ax, [rsi + VMXRESTOREHOST.uHostSelES]
278 mov es, eax
279
280.test_tr:
281 test edi, VMX_RESTORE_HOST_SEL_TR
282 jz .test_fs
283 ; When restoring the TR, we must first clear the busy flag or we'll end up faulting.
284 mov dx, [rsi + VMXRESTOREHOST.uHostSelTR]
285 mov ax, dx
286 and eax, X86_SEL_MASK_OFF_RPL ; Mask away TI and RPL bits leaving only the descriptor offset.
287 add rax, qword [rsi + VMXRESTOREHOST.HostGdtr + 2] ; xAX <- descriptor offset + GDTR.pGdt.
288 test edi, VMX_RESTORE_HOST_GDT_READ_ONLY
289 jnz .gdt_readonly
290 and dword [rax + 4], ~RT_BIT(9) ; Clear the busy flag in TSS desc (bits 0-7=base, bit 9=busy bit).
291 ltr dx
292 jmp short .test_fs
293.gdt_readonly:
294 mov rcx, cr0
295 mov r9, rcx
296 and rcx, ~X86_CR0_WP
297 mov cr0, rcx
298 and dword [rax + 4], ~RT_BIT(9) ; Clear the busy flag in TSS desc (bits 0-7=base, bit 9=busy bit).
299 ltr dx
300 mov cr0, r9
301
302.test_fs:
303 ;
304 ; When restoring the selector values for FS and GS, we'll temporarily trash
305 ; the base address (at least the high 32-bit bits, but quite possibly the
306 ; whole base address), the wrmsr will restore it correctly. (VT-x actually
307 ; restores the base correctly when leaving guest mode, but not the selector
308 ; value, so there is little problem with interrupts being enabled prior to
309 ; this restore job.)
310 ; We'll disable ints once for both FS and GS as that's probably faster.
311 ;
312 test edi, VMX_RESTORE_HOST_SEL_FS | VMX_RESTORE_HOST_SEL_GS
313 jz .restore_success
314 pushfq
315 cli ; (see above)
316
317 test edi, VMX_RESTORE_HOST_SEL_FS
318 jz .test_gs
319 mov ax, word [rsi + VMXRESTOREHOST.uHostSelFS]
320 mov fs, eax
321 mov eax, dword [rsi + VMXRESTOREHOST.uHostFSBase] ; uHostFSBase - Lo
322 mov edx, dword [rsi + VMXRESTOREHOST.uHostFSBase + 4h] ; uHostFSBase - Hi
323 mov ecx, MSR_K8_FS_BASE
324 wrmsr
325
326.test_gs:
327 test edi, VMX_RESTORE_HOST_SEL_GS
328 jz .restore_flags
329 mov ax, word [rsi + VMXRESTOREHOST.uHostSelGS]
330 mov gs, eax
331 mov eax, dword [rsi + VMXRESTOREHOST.uHostGSBase] ; uHostGSBase - Lo
332 mov edx, dword [rsi + VMXRESTOREHOST.uHostGSBase + 4h] ; uHostGSBase - Hi
333 mov ecx, MSR_K8_GS_BASE
334 wrmsr
335
336.restore_flags:
337 popfq
338
339.restore_success:
340 mov eax, VINF_SUCCESS
341 %ifndef ASM_CALL64_GCC
342 ; Restore RDI and RSI on MSC.
343 mov rdi, r10
344 mov rsi, r11
345 %endif
346%else ; RT_ARCH_X86
347 mov eax, VERR_NOT_IMPLEMENTED
348%endif
349 ret
350ENDPROC VMXRestoreHostState
351
352
353;/**
354; * Dispatches an NMI to the host.
355; */
356ALIGNCODE(16)
357BEGINPROC VMXDispatchHostNmi
358 int 2 ; NMI is always vector 2. The IDT[2] IRQ handler cannot be anything else. See Intel spec. 6.3.1 "External Interrupts".
359 ret
360ENDPROC VMXDispatchHostNmi
361
362
363;/**
364; * Executes VMWRITE, 64-bit value.
365; *
366; * @returns VBox status code.
367; * @param idxField x86: [ebp + 08h] msc: rcx gcc: rdi VMCS index.
368; * @param u64Data x86: [ebp + 0ch] msc: rdx gcc: rsi VM field value.
369; */
370ALIGNCODE(16)
371BEGINPROC VMXWriteVmcs64
372%ifdef RT_ARCH_AMD64
373 %ifdef ASM_CALL64_GCC
374 and edi, 0ffffffffh
375 xor rax, rax
376 vmwrite rdi, rsi
377 %else
378 and ecx, 0ffffffffh
379 xor rax, rax
380 vmwrite rcx, rdx
381 %endif
382%else ; RT_ARCH_X86
383 mov ecx, [esp + 4] ; idxField
384 lea edx, [esp + 8] ; &u64Data
385 vmwrite ecx, [edx] ; low dword
386 jz .done
387 jc .done
388 inc ecx
389 xor eax, eax
390 vmwrite ecx, [edx + 4] ; high dword
391.done:
392%endif ; RT_ARCH_X86
393 jnc .valid_vmcs
394 mov eax, VERR_VMX_INVALID_VMCS_PTR
395 ret
396.valid_vmcs:
397 jnz .the_end
398 mov eax, VERR_VMX_INVALID_VMCS_FIELD
399.the_end:
400 ret
401ENDPROC VMXWriteVmcs64
402
403
404;/**
405; * Executes VMREAD, 64-bit value.
406; *
407; * @returns VBox status code.
408; * @param idxField VMCS index.
409; * @param pData Where to store VM field value.
410; */
411;DECLASM(int) VMXReadVmcs64(uint32_t idxField, uint64_t *pData);
412ALIGNCODE(16)
413BEGINPROC VMXReadVmcs64
414%ifdef RT_ARCH_AMD64
415 %ifdef ASM_CALL64_GCC
416 and edi, 0ffffffffh
417 xor rax, rax
418 vmread [rsi], rdi
419 %else
420 and ecx, 0ffffffffh
421 xor rax, rax
422 vmread [rdx], rcx
423 %endif
424%else ; RT_ARCH_X86
425 mov ecx, [esp + 4] ; idxField
426 mov edx, [esp + 8] ; pData
427 vmread [edx], ecx ; low dword
428 jz .done
429 jc .done
430 inc ecx
431 xor eax, eax
432 vmread [edx + 4], ecx ; high dword
433.done:
434%endif ; RT_ARCH_X86
435 jnc .valid_vmcs
436 mov eax, VERR_VMX_INVALID_VMCS_PTR
437 ret
438.valid_vmcs:
439 jnz .the_end
440 mov eax, VERR_VMX_INVALID_VMCS_FIELD
441.the_end:
442 ret
443ENDPROC VMXReadVmcs64
444
445
446;/**
447; * Executes VMREAD, 32-bit value.
448; *
449; * @returns VBox status code.
450; * @param idxField VMCS index.
451; * @param pu32Data Where to store VM field value.
452; */
453;DECLASM(int) VMXReadVmcs32(uint32_t idxField, uint32_t *pu32Data);
454ALIGNCODE(16)
455BEGINPROC VMXReadVmcs32
456%ifdef RT_ARCH_AMD64
457 %ifdef ASM_CALL64_GCC
458 and edi, 0ffffffffh
459 xor rax, rax
460 vmread r10, rdi
461 mov [rsi], r10d
462 %else
463 and ecx, 0ffffffffh
464 xor rax, rax
465 vmread r10, rcx
466 mov [rdx], r10d
467 %endif
468%else ; RT_ARCH_X86
469 mov ecx, [esp + 4] ; idxField
470 mov edx, [esp + 8] ; pu32Data
471 xor eax, eax
472 vmread [edx], ecx
473%endif ; RT_ARCH_X86
474 jnc .valid_vmcs
475 mov eax, VERR_VMX_INVALID_VMCS_PTR
476 ret
477.valid_vmcs:
478 jnz .the_end
479 mov eax, VERR_VMX_INVALID_VMCS_FIELD
480.the_end:
481 ret
482ENDPROC VMXReadVmcs32
483
484
485;/**
486; * Executes VMWRITE, 32-bit value.
487; *
488; * @returns VBox status code.
489; * @param idxField VMCS index.
490; * @param u32Data Where to store VM field value.
491; */
492;DECLASM(int) VMXWriteVmcs32(uint32_t idxField, uint32_t u32Data);
493ALIGNCODE(16)
494BEGINPROC VMXWriteVmcs32
495%ifdef RT_ARCH_AMD64
496 %ifdef ASM_CALL64_GCC
497 and edi, 0ffffffffh
498 and esi, 0ffffffffh
499 xor rax, rax
500 vmwrite rdi, rsi
501 %else
502 and ecx, 0ffffffffh
503 and edx, 0ffffffffh
504 xor rax, rax
505 vmwrite rcx, rdx
506 %endif
507%else ; RT_ARCH_X86
508 mov ecx, [esp + 4] ; idxField
509 mov edx, [esp + 8] ; u32Data
510 xor eax, eax
511 vmwrite ecx, edx
512%endif ; RT_ARCH_X86
513 jnc .valid_vmcs
514 mov eax, VERR_VMX_INVALID_VMCS_PTR
515 ret
516.valid_vmcs:
517 jnz .the_end
518 mov eax, VERR_VMX_INVALID_VMCS_FIELD
519.the_end:
520 ret
521ENDPROC VMXWriteVmcs32
522
523
524;/**
525; * Executes VMXON.
526; *
527; * @returns VBox status code.
528; * @param HCPhysVMXOn Physical address of VMXON structure.
529; */
530;DECLASM(int) VMXEnable(RTHCPHYS HCPhysVMXOn);
531BEGINPROC VMXEnable
532%ifdef RT_ARCH_AMD64
533 xor rax, rax
534 %ifdef ASM_CALL64_GCC
535 push rdi
536 %else
537 push rcx
538 %endif
539 vmxon [rsp]
540%else ; RT_ARCH_X86
541 xor eax, eax
542 vmxon [esp + 4]
543%endif ; RT_ARCH_X86
544 jnc .good
545 mov eax, VERR_VMX_INVALID_VMXON_PTR
546 jmp .the_end
547
548.good:
549 jnz .the_end
550 mov eax, VERR_VMX_VMXON_FAILED
551
552.the_end:
553%ifdef RT_ARCH_AMD64
554 add rsp, 8
555%endif
556 ret
557ENDPROC VMXEnable
558
559
560;/**
561; * Executes VMXOFF.
562; */
563;DECLASM(void) VMXDisable(void);
564BEGINPROC VMXDisable
565 vmxoff
566.the_end:
567 ret
568ENDPROC VMXDisable
569
570
571;/**
572; * Executes VMCLEAR.
573; *
574; * @returns VBox status code.
575; * @param HCPhysVmcs Physical address of VM control structure.
576; */
577;DECLASM(int) VMXClearVmcs(RTHCPHYS HCPhysVmcs);
578ALIGNCODE(16)
579BEGINPROC VMXClearVmcs
580%ifdef RT_ARCH_AMD64
581 xor rax, rax
582 %ifdef ASM_CALL64_GCC
583 push rdi
584 %else
585 push rcx
586 %endif
587 vmclear [rsp]
588%else ; RT_ARCH_X86
589 xor eax, eax
590 vmclear [esp + 4]
591%endif ; RT_ARCH_X86
592 jnc .the_end
593 mov eax, VERR_VMX_INVALID_VMCS_PTR
594.the_end:
595%ifdef RT_ARCH_AMD64
596 add rsp, 8
597%endif
598 ret
599ENDPROC VMXClearVmcs
600
601
602;/**
603; * Executes VMPTRLD.
604; *
605; * @returns VBox status code.
606; * @param HCPhysVmcs Physical address of VMCS structure.
607; */
608;DECLASM(int) VMXActivateVmcs(RTHCPHYS HCPhysVmcs);
609ALIGNCODE(16)
610BEGINPROC VMXActivateVmcs
611%ifdef RT_ARCH_AMD64
612 xor rax, rax
613 %ifdef ASM_CALL64_GCC
614 push rdi
615 %else
616 push rcx
617 %endif
618 vmptrld [rsp]
619%else
620 xor eax, eax
621 vmptrld [esp + 4]
622%endif
623 jnc .the_end
624 mov eax, VERR_VMX_INVALID_VMCS_PTR
625.the_end:
626%ifdef RT_ARCH_AMD64
627 add rsp, 8
628%endif
629 ret
630ENDPROC VMXActivateVmcs
631
632
633;/**
634; * Executes VMPTRST.
635; *
636; * @returns VBox status code.
637; * @param [esp + 04h] gcc:rdi msc:rcx Param 1 - First parameter - Address that will receive the current pointer.
638; */
639;DECLASM(int) VMXGetActivatedVmcs(RTHCPHYS *pVMCS);
640BEGINPROC VMXGetActivatedVmcs
641%ifdef RT_OS_OS2
642 mov eax, VERR_NOT_SUPPORTED
643 ret
644%else
645 %ifdef RT_ARCH_AMD64
646 %ifdef ASM_CALL64_GCC
647 vmptrst qword [rdi]
648 %else
649 vmptrst qword [rcx]
650 %endif
651 %else
652 vmptrst qword [esp+04h]
653 %endif
654 xor eax, eax
655.the_end:
656 ret
657%endif
658ENDPROC VMXGetActivatedVmcs
659
660;/**
661; * Invalidate a page using INVEPT.
662; @param enmFlush msc:ecx gcc:edi x86:[esp+04] Type of flush.
663; @param pDescriptor msc:edx gcc:esi x86:[esp+08] Descriptor pointer.
664; */
665;DECLASM(int) VMXR0InvEPT(VMX_FLUSH enmFlush, uint64_t *pDescriptor);
666BEGINPROC VMXR0InvEPT
667%ifdef RT_ARCH_AMD64
668 %ifdef ASM_CALL64_GCC
669 and edi, 0ffffffffh
670 xor rax, rax
671; invept rdi, qword [rsi]
672 DB 0x66, 0x0F, 0x38, 0x80, 0x3E
673 %else
674 and ecx, 0ffffffffh
675 xor rax, rax
676; invept rcx, qword [rdx]
677 DB 0x66, 0x0F, 0x38, 0x80, 0xA
678 %endif
679%else
680 mov ecx, [esp + 4]
681 mov edx, [esp + 8]
682 xor eax, eax
683; invept ecx, qword [edx]
684 DB 0x66, 0x0F, 0x38, 0x80, 0xA
685%endif
686 jnc .valid_vmcs
687 mov eax, VERR_VMX_INVALID_VMCS_PTR
688 ret
689.valid_vmcs:
690 jnz .the_end
691 mov eax, VERR_INVALID_PARAMETER
692.the_end:
693 ret
694ENDPROC VMXR0InvEPT
695
696
697;/**
698; * Invalidate a page using invvpid
699; @param enmFlush msc:ecx gcc:edi x86:[esp+04] Type of flush
700; @param pDescriptor msc:edx gcc:esi x86:[esp+08] Descriptor pointer
701; */
702;DECLASM(int) VMXR0InvVPID(VMX_FLUSH enmFlush, uint64_t *pDescriptor);
703BEGINPROC VMXR0InvVPID
704%ifdef RT_ARCH_AMD64
705 %ifdef ASM_CALL64_GCC
706 and edi, 0ffffffffh
707 xor rax, rax
708; invvpid rdi, qword [rsi]
709 DB 0x66, 0x0F, 0x38, 0x81, 0x3E
710 %else
711 and ecx, 0ffffffffh
712 xor rax, rax
713; invvpid rcx, qword [rdx]
714 DB 0x66, 0x0F, 0x38, 0x81, 0xA
715 %endif
716%else
717 mov ecx, [esp + 4]
718 mov edx, [esp + 8]
719 xor eax, eax
720; invvpid ecx, qword [edx]
721 DB 0x66, 0x0F, 0x38, 0x81, 0xA
722%endif
723 jnc .valid_vmcs
724 mov eax, VERR_VMX_INVALID_VMCS_PTR
725 ret
726.valid_vmcs:
727 jnz .the_end
728 mov eax, VERR_INVALID_PARAMETER
729.the_end:
730 ret
731ENDPROC VMXR0InvVPID
732
733
734%if GC_ARCH_BITS == 64
735;;
736; Executes INVLPGA
737;
738; @param pPageGC msc:rcx gcc:rdi x86:[esp+04] Virtual page to invalidate
739; @param uASID msc:rdx gcc:rsi x86:[esp+0C] Tagged TLB id
740;
741;DECLASM(void) SVMR0InvlpgA(RTGCPTR pPageGC, uint32_t uASID);
742BEGINPROC SVMR0InvlpgA
743%ifdef RT_ARCH_AMD64
744 %ifdef ASM_CALL64_GCC
745 mov rax, rdi
746 mov rcx, rsi
747 %else
748 mov rax, rcx
749 mov rcx, rdx
750 %endif
751%else
752 mov eax, [esp + 4]
753 mov ecx, [esp + 0Ch]
754%endif
755 invlpga [xAX], ecx
756 ret
757ENDPROC SVMR0InvlpgA
758
759%else ; GC_ARCH_BITS != 64
760;;
761; Executes INVLPGA
762;
763; @param pPageGC msc:ecx gcc:edi x86:[esp+04] Virtual page to invalidate
764; @param uASID msc:edx gcc:esi x86:[esp+08] Tagged TLB id
765;
766;DECLASM(void) SVMR0InvlpgA(RTGCPTR pPageGC, uint32_t uASID);
767BEGINPROC SVMR0InvlpgA
768%ifdef RT_ARCH_AMD64
769 %ifdef ASM_CALL64_GCC
770 movzx rax, edi
771 mov ecx, esi
772 %else
773 ; from http://www.cs.cmu.edu/~fp/courses/15213-s06/misc/asm64-handout.pdf:
774 ; ``Perhaps unexpectedly, instructions that move or generate 32-bit register
775 ; values also set the upper 32 bits of the register to zero. Consequently
776 ; there is no need for an instruction movzlq.''
777 mov eax, ecx
778 mov ecx, edx
779 %endif
780%else
781 mov eax, [esp + 4]
782 mov ecx, [esp + 8]
783%endif
784 invlpga [xAX], ecx
785 ret
786ENDPROC SVMR0InvlpgA
787
788%endif ; GC_ARCH_BITS != 64
789
790
791%ifdef VBOX_WITH_KERNEL_USING_XMM
792
793;;
794; Wrapper around vmx.pfnStartVM that preserves host XMM registers and
795; load the guest ones when necessary.
796;
797; @cproto DECLASM(int) HMR0VMXStartVMWrapXMM(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu, PFNHMVMXSTARTVM pfnStartVM);
798;
799; @returns eax
800;
801; @param fResumeVM msc:rcx
802; @param pCtx msc:rdx
803; @param pVMCSCache msc:r8
804; @param pVM msc:r9
805; @param pVCpu msc:[rbp+30h] The cross context virtual CPU structure of the calling EMT.
806; @param pfnStartVM msc:[rbp+38h]
807;
808; @remarks This is essentially the same code as HMR0SVMRunWrapXMM, only the parameters differ a little bit.
809;
810; ASSUMING 64-bit and windows for now.
811ALIGNCODE(16)
812BEGINPROC HMR0VMXStartVMWrapXMM
813 push xBP
814 mov xBP, xSP
815 sub xSP, 0a0h + 040h ; Don't bother optimizing the frame size.
816
817 ; spill input parameters.
818 mov [xBP + 010h], rcx ; fResumeVM
819 mov [xBP + 018h], rdx ; pCtx
820 mov [xBP + 020h], r8 ; pVMCSCache
821 mov [xBP + 028h], r9 ; pVM
822
823 ; Ask CPUM whether we've started using the FPU yet.
824 mov rcx, [xBP + 30h] ; pVCpu
825 call NAME(CPUMIsGuestFPUStateActive)
826 test al, al
827 jnz .guest_fpu_state_active
828
829 ; No need to mess with XMM registers just call the start routine and return.
830 mov r11, [xBP + 38h] ; pfnStartVM
831 mov r10, [xBP + 30h] ; pVCpu
832 mov [xSP + 020h], r10
833 mov rcx, [xBP + 010h] ; fResumeVM
834 mov rdx, [xBP + 018h] ; pCtx
835 mov r8, [xBP + 020h] ; pVMCSCache
836 mov r9, [xBP + 028h] ; pVM
837 call r11
838
839 leave
840 ret
841
842ALIGNCODE(8)
843.guest_fpu_state_active:
844 ; Save the non-volatile host XMM registers.
845 movdqa [rsp + 040h + 000h], xmm6
846 movdqa [rsp + 040h + 010h], xmm7
847 movdqa [rsp + 040h + 020h], xmm8
848 movdqa [rsp + 040h + 030h], xmm9
849 movdqa [rsp + 040h + 040h], xmm10
850 movdqa [rsp + 040h + 050h], xmm11
851 movdqa [rsp + 040h + 060h], xmm12
852 movdqa [rsp + 040h + 070h], xmm13
853 movdqa [rsp + 040h + 080h], xmm14
854 movdqa [rsp + 040h + 090h], xmm15
855
856 mov r10, [xBP + 018h] ; pCtx
857 mov eax, [r10 + CPUMCTX.fXStateMask]
858 test eax, eax
859 jz .guest_fpu_state_manually
860
861 ;
862 ; Using XSAVE to load the guest XMM, YMM and ZMM registers.
863 ;
864 and eax, CPUM_VOLATILE_XSAVE_GUEST_COMPONENTS
865 xor edx, edx
866 mov r10, [r10 + CPUMCTX.pXStateR0]
867 xrstor [r10]
868
869 ; Make the call (same as in the other case ).
870 mov r11, [xBP + 38h] ; pfnStartVM
871 mov r10, [xBP + 30h] ; pVCpu
872 mov [xSP + 020h], r10
873 mov rcx, [xBP + 010h] ; fResumeVM
874 mov rdx, [xBP + 018h] ; pCtx
875 mov r8, [xBP + 020h] ; pVMCSCache
876 mov r9, [xBP + 028h] ; pVM
877 call r11
878
879 mov r11d, eax ; save return value (xsave below uses eax)
880
881 ; Save the guest XMM registers.
882 mov r10, [xBP + 018h] ; pCtx
883 mov eax, [r10 + CPUMCTX.fXStateMask]
884 and eax, CPUM_VOLATILE_XSAVE_GUEST_COMPONENTS
885 xor edx, edx
886 mov r10, [r10 + CPUMCTX.pXStateR0]
887 xsave [r10]
888
889 mov eax, r11d ; restore return value.
890
891.restore_non_volatile_host_xmm_regs:
892 ; Load the non-volatile host XMM registers.
893 movdqa xmm6, [rsp + 040h + 000h]
894 movdqa xmm7, [rsp + 040h + 010h]
895 movdqa xmm8, [rsp + 040h + 020h]
896 movdqa xmm9, [rsp + 040h + 030h]
897 movdqa xmm10, [rsp + 040h + 040h]
898 movdqa xmm11, [rsp + 040h + 050h]
899 movdqa xmm12, [rsp + 040h + 060h]
900 movdqa xmm13, [rsp + 040h + 070h]
901 movdqa xmm14, [rsp + 040h + 080h]
902 movdqa xmm15, [rsp + 040h + 090h]
903 leave
904 ret
905
906 ;
907 ; No XSAVE, load and save the guest XMM registers manually.
908 ;
909.guest_fpu_state_manually:
910 ; Load the full guest XMM register state.
911 mov r10, [r10 + CPUMCTX.pXStateR0]
912 movdqa xmm0, [r10 + XMM_OFF_IN_X86FXSTATE + 000h]
913 movdqa xmm1, [r10 + XMM_OFF_IN_X86FXSTATE + 010h]
914 movdqa xmm2, [r10 + XMM_OFF_IN_X86FXSTATE + 020h]
915 movdqa xmm3, [r10 + XMM_OFF_IN_X86FXSTATE + 030h]
916 movdqa xmm4, [r10 + XMM_OFF_IN_X86FXSTATE + 040h]
917 movdqa xmm5, [r10 + XMM_OFF_IN_X86FXSTATE + 050h]
918 movdqa xmm6, [r10 + XMM_OFF_IN_X86FXSTATE + 060h]
919 movdqa xmm7, [r10 + XMM_OFF_IN_X86FXSTATE + 070h]
920 movdqa xmm8, [r10 + XMM_OFF_IN_X86FXSTATE + 080h]
921 movdqa xmm9, [r10 + XMM_OFF_IN_X86FXSTATE + 090h]
922 movdqa xmm10, [r10 + XMM_OFF_IN_X86FXSTATE + 0a0h]
923 movdqa xmm11, [r10 + XMM_OFF_IN_X86FXSTATE + 0b0h]
924 movdqa xmm12, [r10 + XMM_OFF_IN_X86FXSTATE + 0c0h]
925 movdqa xmm13, [r10 + XMM_OFF_IN_X86FXSTATE + 0d0h]
926 movdqa xmm14, [r10 + XMM_OFF_IN_X86FXSTATE + 0e0h]
927 movdqa xmm15, [r10 + XMM_OFF_IN_X86FXSTATE + 0f0h]
928
929 ; Make the call (same as in the other case ).
930 mov r11, [xBP + 38h] ; pfnStartVM
931 mov r10, [xBP + 30h] ; pVCpu
932 mov [xSP + 020h], r10
933 mov rcx, [xBP + 010h] ; fResumeVM
934 mov rdx, [xBP + 018h] ; pCtx
935 mov r8, [xBP + 020h] ; pVMCSCache
936 mov r9, [xBP + 028h] ; pVM
937 call r11
938
939 ; Save the guest XMM registers.
940 mov r10, [xBP + 018h] ; pCtx
941 mov r10, [r10 + CPUMCTX.pXStateR0]
942 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 000h], xmm0
943 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 010h], xmm1
944 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 020h], xmm2
945 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 030h], xmm3
946 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 040h], xmm4
947 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 050h], xmm5
948 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 060h], xmm6
949 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 070h], xmm7
950 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 080h], xmm8
951 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 090h], xmm9
952 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0a0h], xmm10
953 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0b0h], xmm11
954 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0c0h], xmm12
955 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0d0h], xmm13
956 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0e0h], xmm14
957 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0f0h], xmm15
958 jmp .restore_non_volatile_host_xmm_regs
959ENDPROC HMR0VMXStartVMWrapXMM
960
961;;
962; Wrapper around svm.pfnVMRun that preserves host XMM registers and
963; load the guest ones when necessary.
964;
965; @cproto DECLASM(int) HMR0SVMRunWrapXMM(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu, PFNHMSVMVMRUN pfnVMRun);
966;
967; @returns eax
968;
969; @param pVMCBHostPhys msc:rcx
970; @param pVMCBPhys msc:rdx
971; @param pCtx msc:r8
972; @param pVM msc:r9
973; @param pVCpu msc:[rbp+30h] The cross context virtual CPU structure of the calling EMT.
974; @param pfnVMRun msc:[rbp+38h]
975;
976; @remarks This is essentially the same code as HMR0VMXStartVMWrapXMM, only the parameters differ a little bit.
977;
978; ASSUMING 64-bit and windows for now.
979ALIGNCODE(16)
980BEGINPROC HMR0SVMRunWrapXMM
981 push xBP
982 mov xBP, xSP
983 sub xSP, 0a0h + 040h ; Don't bother optimizing the frame size.
984
985 ; spill input parameters.
986 mov [xBP + 010h], rcx ; pVMCBHostPhys
987 mov [xBP + 018h], rdx ; pVMCBPhys
988 mov [xBP + 020h], r8 ; pCtx
989 mov [xBP + 028h], r9 ; pVM
990
991 ; Ask CPUM whether we've started using the FPU yet.
992 mov rcx, [xBP + 30h] ; pVCpu
993 call NAME(CPUMIsGuestFPUStateActive)
994 test al, al
995 jnz .guest_fpu_state_active
996
997 ; No need to mess with XMM registers just call the start routine and return.
998 mov r11, [xBP + 38h] ; pfnVMRun
999 mov r10, [xBP + 30h] ; pVCpu
1000 mov [xSP + 020h], r10
1001 mov rcx, [xBP + 010h] ; pVMCBHostPhys
1002 mov rdx, [xBP + 018h] ; pVMCBPhys
1003 mov r8, [xBP + 020h] ; pCtx
1004 mov r9, [xBP + 028h] ; pVM
1005 call r11
1006
1007 leave
1008 ret
1009
1010ALIGNCODE(8)
1011.guest_fpu_state_active:
1012 ; Save the non-volatile host XMM registers.
1013 movdqa [rsp + 040h + 000h], xmm6
1014 movdqa [rsp + 040h + 010h], xmm7
1015 movdqa [rsp + 040h + 020h], xmm8
1016 movdqa [rsp + 040h + 030h], xmm9
1017 movdqa [rsp + 040h + 040h], xmm10
1018 movdqa [rsp + 040h + 050h], xmm11
1019 movdqa [rsp + 040h + 060h], xmm12
1020 movdqa [rsp + 040h + 070h], xmm13
1021 movdqa [rsp + 040h + 080h], xmm14
1022 movdqa [rsp + 040h + 090h], xmm15
1023
1024 mov r10, [xBP + 020h] ; pCtx
1025 mov eax, [r10 + CPUMCTX.fXStateMask]
1026 test eax, eax
1027 jz .guest_fpu_state_manually
1028
1029 ;
1030 ; Using XSAVE.
1031 ;
1032 and eax, CPUM_VOLATILE_XSAVE_GUEST_COMPONENTS
1033 xor edx, edx
1034 mov r10, [r10 + CPUMCTX.pXStateR0]
1035 xrstor [r10]
1036
1037 ; Make the call (same as in the other case ).
1038 mov r11, [xBP + 38h] ; pfnVMRun
1039 mov r10, [xBP + 30h] ; pVCpu
1040 mov [xSP + 020h], r10
1041 mov rcx, [xBP + 010h] ; pVMCBHostPhys
1042 mov rdx, [xBP + 018h] ; pVMCBPhys
1043 mov r8, [xBP + 020h] ; pCtx
1044 mov r9, [xBP + 028h] ; pVM
1045 call r11
1046
1047 mov r11d, eax ; save return value (xsave below uses eax)
1048
1049 ; Save the guest XMM registers.
1050 mov r10, [xBP + 020h] ; pCtx
1051 mov eax, [r10 + CPUMCTX.fXStateMask]
1052 and eax, CPUM_VOLATILE_XSAVE_GUEST_COMPONENTS
1053 xor edx, edx
1054 mov r10, [r10 + CPUMCTX.pXStateR0]
1055 xsave [r10]
1056
1057 mov eax, r11d ; restore return value.
1058
1059.restore_non_volatile_host_xmm_regs:
1060 ; Load the non-volatile host XMM registers.
1061 movdqa xmm6, [rsp + 040h + 000h]
1062 movdqa xmm7, [rsp + 040h + 010h]
1063 movdqa xmm8, [rsp + 040h + 020h]
1064 movdqa xmm9, [rsp + 040h + 030h]
1065 movdqa xmm10, [rsp + 040h + 040h]
1066 movdqa xmm11, [rsp + 040h + 050h]
1067 movdqa xmm12, [rsp + 040h + 060h]
1068 movdqa xmm13, [rsp + 040h + 070h]
1069 movdqa xmm14, [rsp + 040h + 080h]
1070 movdqa xmm15, [rsp + 040h + 090h]
1071 leave
1072 ret
1073
1074 ;
1075 ; No XSAVE, load and save the guest XMM registers manually.
1076 ;
1077.guest_fpu_state_manually:
1078 ; Load the full guest XMM register state.
1079 mov r10, [r10 + CPUMCTX.pXStateR0]
1080 movdqa xmm0, [r10 + XMM_OFF_IN_X86FXSTATE + 000h]
1081 movdqa xmm1, [r10 + XMM_OFF_IN_X86FXSTATE + 010h]
1082 movdqa xmm2, [r10 + XMM_OFF_IN_X86FXSTATE + 020h]
1083 movdqa xmm3, [r10 + XMM_OFF_IN_X86FXSTATE + 030h]
1084 movdqa xmm4, [r10 + XMM_OFF_IN_X86FXSTATE + 040h]
1085 movdqa xmm5, [r10 + XMM_OFF_IN_X86FXSTATE + 050h]
1086 movdqa xmm6, [r10 + XMM_OFF_IN_X86FXSTATE + 060h]
1087 movdqa xmm7, [r10 + XMM_OFF_IN_X86FXSTATE + 070h]
1088 movdqa xmm8, [r10 + XMM_OFF_IN_X86FXSTATE + 080h]
1089 movdqa xmm9, [r10 + XMM_OFF_IN_X86FXSTATE + 090h]
1090 movdqa xmm10, [r10 + XMM_OFF_IN_X86FXSTATE + 0a0h]
1091 movdqa xmm11, [r10 + XMM_OFF_IN_X86FXSTATE + 0b0h]
1092 movdqa xmm12, [r10 + XMM_OFF_IN_X86FXSTATE + 0c0h]
1093 movdqa xmm13, [r10 + XMM_OFF_IN_X86FXSTATE + 0d0h]
1094 movdqa xmm14, [r10 + XMM_OFF_IN_X86FXSTATE + 0e0h]
1095 movdqa xmm15, [r10 + XMM_OFF_IN_X86FXSTATE + 0f0h]
1096
1097 ; Make the call (same as in the other case ).
1098 mov r11, [xBP + 38h] ; pfnVMRun
1099 mov r10, [xBP + 30h] ; pVCpu
1100 mov [xSP + 020h], r10
1101 mov rcx, [xBP + 010h] ; pVMCBHostPhys
1102 mov rdx, [xBP + 018h] ; pVMCBPhys
1103 mov r8, [xBP + 020h] ; pCtx
1104 mov r9, [xBP + 028h] ; pVM
1105 call r11
1106
1107 ; Save the guest XMM registers.
1108 mov r10, [xBP + 020h] ; pCtx
1109 mov r10, [r10 + CPUMCTX.pXStateR0]
1110 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 000h], xmm0
1111 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 010h], xmm1
1112 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 020h], xmm2
1113 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 030h], xmm3
1114 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 040h], xmm4
1115 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 050h], xmm5
1116 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 060h], xmm6
1117 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 070h], xmm7
1118 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 080h], xmm8
1119 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 090h], xmm9
1120 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0a0h], xmm10
1121 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0b0h], xmm11
1122 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0c0h], xmm12
1123 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0d0h], xmm13
1124 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0e0h], xmm14
1125 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0f0h], xmm15
1126 jmp .restore_non_volatile_host_xmm_regs
1127ENDPROC HMR0SVMRunWrapXMM
1128
1129%endif ; VBOX_WITH_KERNEL_USING_XMM
1130
1131
1132;; @def RESTORE_STATE_VM32
1133; Macro restoring essential host state and updating guest state
1134; for common host, 32-bit guest for VT-x.
1135%macro RESTORE_STATE_VM32 0
1136 ; Restore base and limit of the IDTR & GDTR.
1137 %ifndef VMX_SKIP_IDTR
1138 lidt [xSP]
1139 add xSP, xCB * 2
1140 %endif
1141 %ifndef VMX_SKIP_GDTR
1142 lgdt [xSP]
1143 add xSP, xCB * 2
1144 %endif
1145
1146 push xDI
1147 %ifndef VMX_SKIP_TR
1148 mov xDI, [xSP + xCB * 3] ; pCtx (*3 to skip the saved xDI, TR, LDTR).
1149 %else
1150 mov xDI, [xSP + xCB * 2] ; pCtx (*2 to skip the saved xDI, LDTR).
1151 %endif
1152
1153 mov [ss:xDI + CPUMCTX.eax], eax
1154 mov [ss:xDI + CPUMCTX.ebx], ebx
1155 mov [ss:xDI + CPUMCTX.ecx], ecx
1156 mov [ss:xDI + CPUMCTX.edx], edx
1157 mov [ss:xDI + CPUMCTX.esi], esi
1158 mov [ss:xDI + CPUMCTX.ebp], ebp
1159 mov xAX, cr2
1160 mov [ss:xDI + CPUMCTX.cr2], xAX
1161
1162 %ifdef RT_ARCH_AMD64
1163 pop xAX ; The guest edi we pushed above.
1164 mov dword [ss:xDI + CPUMCTX.edi], eax
1165 %else
1166 pop dword [ss:xDI + CPUMCTX.edi] ; The guest edi we pushed above.
1167 %endif
1168
1169 %ifndef VMX_SKIP_TR
1170 ; Restore TSS selector; must mark it as not busy before using ltr (!)
1171 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p)
1172 ; @todo get rid of sgdt
1173 pop xBX ; Saved TR
1174 sub xSP, xCB * 2
1175 sgdt [xSP]
1176 mov xAX, xBX
1177 and eax, X86_SEL_MASK_OFF_RPL ; Mask away TI and RPL bits leaving only the descriptor offset.
1178 add xAX, [xSP + 2] ; eax <- GDTR.address + descriptor offset.
1179 and dword [ss:xAX + 4], ~RT_BIT(9) ; Clear the busy flag in TSS desc (bits 0-7=base, bit 9=busy bit).
1180 ltr bx
1181 add xSP, xCB * 2
1182 %endif
1183
1184 pop xAX ; Saved LDTR
1185 %ifdef RT_ARCH_AMD64
1186 cmp eax, 0
1187 je %%skip_ldt_write32
1188 %endif
1189 lldt ax
1190
1191%%skip_ldt_write32:
1192 add xSP, xCB ; pCtx
1193
1194 %ifdef VMX_USE_CACHED_VMCS_ACCESSES
1195 pop xDX ; Saved pCache
1196
1197 ; Note! If we get here as a result of invalid VMCS pointer, all the following
1198 ; vmread's will fail (only eflags.cf=1 will be set) but that shouldn't cause any
1199 ; trouble only just less efficient.
1200 mov ecx, [ss:xDX + VMCSCACHE.Read.cValidEntries]
1201 cmp ecx, 0 ; Can't happen
1202 je %%no_cached_read32
1203 jmp %%cached_read32
1204
1205ALIGN(16)
1206%%cached_read32:
1207 dec xCX
1208 mov eax, [ss:xDX + VMCSCACHE.Read.aField + xCX * 4]
1209 vmread [ss:xDX + VMCSCACHE.Read.aFieldVal + xCX * 8], xAX
1210 cmp xCX, 0
1211 jnz %%cached_read32
1212%%no_cached_read32:
1213 %endif
1214
1215 ; Restore segment registers.
1216 MYPOPSEGS xAX, ax
1217
1218 ; Restore the host XCR0 if necessary.
1219 pop xCX
1220 test ecx, ecx
1221 jnz %%xcr0_after_skip
1222 pop xAX
1223 pop xDX
1224 xsetbv ; ecx is already zero.
1225%%xcr0_after_skip:
1226
1227 ; Restore general purpose registers.
1228 MYPOPAD
1229%endmacro
1230
1231
1232;;
1233; Prepares for and executes VMLAUNCH/VMRESUME (32 bits guest mode)
1234;
1235; @returns VBox status code
1236; @param fResume x86:[ebp+8], msc:rcx,gcc:rdi Whether to use vmlauch/vmresume.
1237; @param pCtx x86:[ebp+c], msc:rdx,gcc:rsi Pointer to the guest-CPU context.
1238; @param pCache x86:[ebp+10],msc:r8, gcc:rdx Pointer to the VMCS cache.
1239; @param pVM x86:[ebp+14],msc:r9, gcc:rcx The cross context VM structure.
1240; @param pVCpu x86:[ebp+18],msc:[ebp+30],gcc:r8 The cross context virtual CPU structure of the calling EMT.
1241;
1242ALIGNCODE(16)
1243BEGINPROC VMXR0StartVM32
1244 push xBP
1245 mov xBP, xSP
1246
1247 pushf
1248 cli
1249
1250 ;
1251 ; Save all general purpose host registers.
1252 ;
1253 MYPUSHAD
1254
1255 ;
1256 ; First we have to write some final guest CPU context registers.
1257 ;
1258 mov eax, VMX_VMCS_HOST_RIP
1259%ifdef RT_ARCH_AMD64
1260 lea r10, [.vmlaunch_done wrt rip]
1261 vmwrite rax, r10
1262%else
1263 mov ecx, .vmlaunch_done
1264 vmwrite eax, ecx
1265%endif
1266 ; Note: assumes success!
1267
1268 ;
1269 ; Unify input parameter registers.
1270 ;
1271%ifdef RT_ARCH_AMD64
1272 %ifdef ASM_CALL64_GCC
1273 ; fResume already in rdi
1274 ; pCtx already in rsi
1275 mov rbx, rdx ; pCache
1276 %else
1277 mov rdi, rcx ; fResume
1278 mov rsi, rdx ; pCtx
1279 mov rbx, r8 ; pCache
1280 %endif
1281%else
1282 mov edi, [ebp + 8] ; fResume
1283 mov esi, [ebp + 12] ; pCtx
1284 mov ebx, [ebp + 16] ; pCache
1285%endif
1286
1287 ;
1288 ; Save the host XCR0 and load the guest one if necessary.
1289 ; Note! Trashes rdx and rcx.
1290 ;
1291%ifdef ASM_CALL64_MSC
1292 mov rax, [xBP + 30h] ; pVCpu
1293%elifdef ASM_CALL64_GCC
1294 mov rax, r8 ; pVCpu
1295%else
1296 mov eax, [xBP + 18h] ; pVCpu
1297%endif
1298 test byte [xAX + VMCPU.hm + HMCPU.fLoadSaveGuestXcr0], 1
1299 jz .xcr0_before_skip
1300
1301 xor ecx, ecx
1302 xgetbv ; Save the host one on the stack.
1303 push xDX
1304 push xAX
1305
1306 mov eax, [xSI + CPUMCTX.aXcr] ; Load the guest one.
1307 mov edx, [xSI + CPUMCTX.aXcr + 4]
1308 xor ecx, ecx ; paranoia
1309 xsetbv
1310
1311 push 0 ; Indicate that we must restore XCR0 (popped into ecx, thus 0).
1312 jmp .xcr0_before_done
1313
1314.xcr0_before_skip:
1315 push 3fh ; indicate that we need not.
1316.xcr0_before_done:
1317
1318 ;
1319 ; Save segment registers.
1320 ; Note! Trashes rdx & rcx, so we moved it here (amd64 case).
1321 ;
1322 MYPUSHSEGS xAX, ax
1323
1324%ifdef VMX_USE_CACHED_VMCS_ACCESSES
1325 mov ecx, [xBX + VMCSCACHE.Write.cValidEntries]
1326 cmp ecx, 0
1327 je .no_cached_writes
1328 mov edx, ecx
1329 mov ecx, 0
1330 jmp .cached_write
1331
1332ALIGN(16)
1333.cached_write:
1334 mov eax, [xBX + VMCSCACHE.Write.aField + xCX * 4]
1335 vmwrite xAX, [xBX + VMCSCACHE.Write.aFieldVal + xCX * 8]
1336 inc xCX
1337 cmp xCX, xDX
1338 jl .cached_write
1339
1340 mov dword [xBX + VMCSCACHE.Write.cValidEntries], 0
1341.no_cached_writes:
1342
1343 ; Save the pCache pointer.
1344 push xBX
1345%endif
1346
1347 ; Save the pCtx pointer.
1348 push xSI
1349
1350 ; Save host LDTR.
1351 xor eax, eax
1352 sldt ax
1353 push xAX
1354
1355%ifndef VMX_SKIP_TR
1356 ; The host TR limit is reset to 0x67; save & restore it manually.
1357 str eax
1358 push xAX
1359%endif
1360
1361%ifndef VMX_SKIP_GDTR
1362 ; VT-x only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!
1363 sub xSP, xCB * 2
1364 sgdt [xSP]
1365%endif
1366%ifndef VMX_SKIP_IDTR
1367 sub xSP, xCB * 2
1368 sidt [xSP]
1369%endif
1370
1371 ; Load CR2 if necessary (may be expensive as writing CR2 is a synchronizing instruction).
1372 mov xBX, [xSI + CPUMCTX.cr2]
1373 mov xDX, cr2
1374 cmp xBX, xDX
1375 je .skip_cr2_write32
1376 mov cr2, xBX
1377
1378.skip_cr2_write32:
1379 mov eax, VMX_VMCS_HOST_RSP
1380 vmwrite xAX, xSP
1381 ; Note: assumes success!
1382 ; Don't mess with ESP anymore!!!
1383
1384 ; Load guest general purpose registers.
1385 mov eax, [xSI + CPUMCTX.eax]
1386 mov ebx, [xSI + CPUMCTX.ebx]
1387 mov ecx, [xSI + CPUMCTX.ecx]
1388 mov edx, [xSI + CPUMCTX.edx]
1389 mov ebp, [xSI + CPUMCTX.ebp]
1390
1391 ; Resume or start VM?
1392 cmp xDI, 0 ; fResume
1393
1394 ; Load guest edi & esi.
1395 mov edi, [xSI + CPUMCTX.edi]
1396 mov esi, [xSI + CPUMCTX.esi]
1397
1398 je .vmlaunch_launch
1399
1400 vmresume
1401 jc near .vmxstart_invalid_vmcs_ptr
1402 jz near .vmxstart_start_failed
1403 jmp .vmlaunch_done; ; Here if vmresume detected a failure.
1404
1405.vmlaunch_launch:
1406 vmlaunch
1407 jc near .vmxstart_invalid_vmcs_ptr
1408 jz near .vmxstart_start_failed
1409 jmp .vmlaunch_done; ; Here if vmlaunch detected a failure.
1410
1411ALIGNCODE(16) ;; @todo YASM BUG - this alignment is wrong on darwin, it's 1 byte off.
1412.vmlaunch_done:
1413 RESTORE_STATE_VM32
1414 mov eax, VINF_SUCCESS
1415
1416.vmstart_end:
1417 popf
1418 pop xBP
1419 ret
1420
1421.vmxstart_invalid_vmcs_ptr:
1422 RESTORE_STATE_VM32
1423 mov eax, VERR_VMX_INVALID_VMCS_PTR_TO_START_VM
1424 jmp .vmstart_end
1425
1426.vmxstart_start_failed:
1427 RESTORE_STATE_VM32
1428 mov eax, VERR_VMX_UNABLE_TO_START_VM
1429 jmp .vmstart_end
1430
1431ENDPROC VMXR0StartVM32
1432
1433
1434%ifdef RT_ARCH_AMD64
1435;; @def RESTORE_STATE_VM64
1436; Macro restoring essential host state and updating guest state
1437; for 64-bit host, 64-bit guest for VT-x.
1438;
1439%macro RESTORE_STATE_VM64 0
1440 ; Restore base and limit of the IDTR & GDTR
1441 %ifndef VMX_SKIP_IDTR
1442 lidt [xSP]
1443 add xSP, xCB * 2
1444 %endif
1445 %ifndef VMX_SKIP_GDTR
1446 lgdt [xSP]
1447 add xSP, xCB * 2
1448 %endif
1449
1450 push xDI
1451 %ifndef VMX_SKIP_TR
1452 mov xDI, [xSP + xCB * 3] ; pCtx (*3 to skip the saved xDI, TR, LDTR)
1453 %else
1454 mov xDI, [xSP + xCB * 2] ; pCtx (*2 to skip the saved xDI, LDTR)
1455 %endif
1456
1457 mov qword [xDI + CPUMCTX.eax], rax
1458 mov qword [xDI + CPUMCTX.ebx], rbx
1459 mov qword [xDI + CPUMCTX.ecx], rcx
1460 mov qword [xDI + CPUMCTX.edx], rdx
1461 mov qword [xDI + CPUMCTX.esi], rsi
1462 mov qword [xDI + CPUMCTX.ebp], rbp
1463 mov qword [xDI + CPUMCTX.r8], r8
1464 mov qword [xDI + CPUMCTX.r9], r9
1465 mov qword [xDI + CPUMCTX.r10], r10
1466 mov qword [xDI + CPUMCTX.r11], r11
1467 mov qword [xDI + CPUMCTX.r12], r12
1468 mov qword [xDI + CPUMCTX.r13], r13
1469 mov qword [xDI + CPUMCTX.r14], r14
1470 mov qword [xDI + CPUMCTX.r15], r15
1471 mov rax, cr2
1472 mov qword [xDI + CPUMCTX.cr2], rax
1473
1474 pop xAX ; The guest rdi we pushed above
1475 mov qword [xDI + CPUMCTX.edi], rax
1476
1477 %ifndef VMX_SKIP_TR
1478 ; Restore TSS selector; must mark it as not busy before using ltr (!)
1479 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p).
1480 ; @todo get rid of sgdt
1481 pop xBX ; Saved TR
1482 sub xSP, xCB * 2
1483 sgdt [xSP]
1484 mov xAX, xBX
1485 and eax, X86_SEL_MASK_OFF_RPL ; Mask away TI and RPL bits leaving only the descriptor offset.
1486 add xAX, [xSP + 2] ; eax <- GDTR.address + descriptor offset.
1487 and dword [xAX + 4], ~RT_BIT(9) ; Clear the busy flag in TSS desc (bits 0-7=base, bit 9=busy bit).
1488 ltr bx
1489 add xSP, xCB * 2
1490 %endif
1491
1492 pop xAX ; Saved LDTR
1493 cmp eax, 0
1494 je %%skip_ldt_write64
1495 lldt ax
1496
1497%%skip_ldt_write64:
1498 pop xSI ; pCtx (needed in rsi by the macros below)
1499
1500 %ifdef VMX_USE_CACHED_VMCS_ACCESSES
1501 pop xDX ; Saved pCache
1502
1503 ; Note! If we get here as a result of invalid VMCS pointer, all the following
1504 ; vmread's will fail (only eflags.cf=1 will be set) but that shouldn't cause any
1505 ; trouble only just less efficient.
1506 mov ecx, [xDX + VMCSCACHE.Read.cValidEntries]
1507 cmp ecx, 0 ; Can't happen
1508 je %%no_cached_read64
1509 jmp %%cached_read64
1510
1511ALIGN(16)
1512%%cached_read64:
1513 dec xCX
1514 mov eax, [xDX + VMCSCACHE.Read.aField + xCX * 4]
1515 vmread [xDX + VMCSCACHE.Read.aFieldVal + xCX * 8], xAX
1516 cmp xCX, 0
1517 jnz %%cached_read64
1518%%no_cached_read64:
1519 %endif
1520
1521 ; Restore segment registers.
1522 MYPOPSEGS xAX, ax
1523
1524 ; Restore the host XCR0 if necessary.
1525 pop xCX
1526 test ecx, ecx
1527 jnz %%xcr0_after_skip
1528 pop xAX
1529 pop xDX
1530 xsetbv ; ecx is already zero.
1531%%xcr0_after_skip:
1532
1533 ; Restore general purpose registers.
1534 MYPOPAD
1535%endmacro
1536
1537
1538;;
1539; Prepares for and executes VMLAUNCH/VMRESUME (64 bits guest mode)
1540;
1541; @returns VBox status code
1542; @param fResume msc:rcx, gcc:rdi Whether to use vmlauch/vmresume.
1543; @param pCtx msc:rdx, gcc:rsi Pointer to the guest-CPU context.
1544; @param pCache msc:r8, gcc:rdx Pointer to the VMCS cache.
1545; @param pVM msc:r9, gcc:rcx The cross context VM structure.
1546; @param pVCpu msc:[ebp+30], gcc:r8 The cross context virtual CPU structure of the calling EMT.
1547;
1548ALIGNCODE(16)
1549BEGINPROC VMXR0StartVM64
1550 push xBP
1551 mov xBP, xSP
1552
1553 pushf
1554 cli
1555
1556 ; Save all general purpose host registers.
1557 MYPUSHAD
1558
1559 ; First we have to save some final CPU context registers.
1560 lea r10, [.vmlaunch64_done wrt rip]
1561 mov rax, VMX_VMCS_HOST_RIP ; Return address (too difficult to continue after VMLAUNCH?).
1562 vmwrite rax, r10
1563 ; Note: assumes success!
1564
1565 ;
1566 ; Unify the input parameter registers.
1567 ;
1568%ifdef ASM_CALL64_GCC
1569 ; fResume already in rdi
1570 ; pCtx already in rsi
1571 mov rbx, rdx ; pCache
1572%else
1573 mov rdi, rcx ; fResume
1574 mov rsi, rdx ; pCtx
1575 mov rbx, r8 ; pCache
1576%endif
1577
1578 ;
1579 ; Save the host XCR0 and load the guest one if necessary.
1580 ; Note! Trashes rdx and rcx.
1581 ;
1582%ifdef ASM_CALL64_MSC
1583 mov rax, [xBP + 30h] ; pVCpu
1584%else
1585 mov rax, r8 ; pVCpu
1586%endif
1587 test byte [xAX + VMCPU.hm + HMCPU.fLoadSaveGuestXcr0], 1
1588 jz .xcr0_before_skip
1589
1590 xor ecx, ecx
1591 xgetbv ; Save the host one on the stack.
1592 push xDX
1593 push xAX
1594
1595 mov eax, [xSI + CPUMCTX.aXcr] ; Load the guest one.
1596 mov edx, [xSI + CPUMCTX.aXcr + 4]
1597 xor ecx, ecx ; paranoia
1598 xsetbv
1599
1600 push 0 ; Indicate that we must restore XCR0 (popped into ecx, thus 0).
1601 jmp .xcr0_before_done
1602
1603.xcr0_before_skip:
1604 push 3fh ; indicate that we need not.
1605.xcr0_before_done:
1606
1607 ;
1608 ; Save segment registers.
1609 ; Note! Trashes rdx & rcx, so we moved it here (amd64 case).
1610 ;
1611 MYPUSHSEGS xAX, ax
1612
1613%ifdef VMX_USE_CACHED_VMCS_ACCESSES
1614 mov ecx, [xBX + VMCSCACHE.Write.cValidEntries]
1615 cmp ecx, 0
1616 je .no_cached_writes
1617 mov edx, ecx
1618 mov ecx, 0
1619 jmp .cached_write
1620
1621ALIGN(16)
1622.cached_write:
1623 mov eax, [xBX + VMCSCACHE.Write.aField + xCX * 4]
1624 vmwrite xAX, [xBX + VMCSCACHE.Write.aFieldVal + xCX * 8]
1625 inc xCX
1626 cmp xCX, xDX
1627 jl .cached_write
1628
1629 mov dword [xBX + VMCSCACHE.Write.cValidEntries], 0
1630.no_cached_writes:
1631
1632 ; Save the pCache pointer.
1633 push xBX
1634%endif
1635
1636 ; Save the pCtx pointer.
1637 push xSI
1638
1639 ; Save host LDTR.
1640 xor eax, eax
1641 sldt ax
1642 push xAX
1643
1644%ifndef VMX_SKIP_TR
1645 ; The host TR limit is reset to 0x67; save & restore it manually.
1646 str eax
1647 push xAX
1648%endif
1649
1650%ifndef VMX_SKIP_GDTR
1651 ; VT-x only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!
1652 sub xSP, xCB * 2
1653 sgdt [xSP]
1654%endif
1655%ifndef VMX_SKIP_IDTR
1656 sub xSP, xCB * 2
1657 sidt [xSP]
1658%endif
1659
1660 ; Load CR2 if necessary (may be expensive as writing CR2 is a synchronizing instruction).
1661 mov rbx, qword [xSI + CPUMCTX.cr2]
1662 mov rdx, cr2
1663 cmp rbx, rdx
1664 je .skip_cr2_write
1665 mov cr2, rbx
1666
1667.skip_cr2_write:
1668 mov eax, VMX_VMCS_HOST_RSP
1669 vmwrite xAX, xSP
1670 ; Note: assumes success!
1671 ; Don't mess with ESP anymore!!!
1672
1673 ; Load guest general purpose registers.
1674 mov rax, qword [xSI + CPUMCTX.eax]
1675 mov rbx, qword [xSI + CPUMCTX.ebx]
1676 mov rcx, qword [xSI + CPUMCTX.ecx]
1677 mov rdx, qword [xSI + CPUMCTX.edx]
1678 mov rbp, qword [xSI + CPUMCTX.ebp]
1679 mov r8, qword [xSI + CPUMCTX.r8]
1680 mov r9, qword [xSI + CPUMCTX.r9]
1681 mov r10, qword [xSI + CPUMCTX.r10]
1682 mov r11, qword [xSI + CPUMCTX.r11]
1683 mov r12, qword [xSI + CPUMCTX.r12]
1684 mov r13, qword [xSI + CPUMCTX.r13]
1685 mov r14, qword [xSI + CPUMCTX.r14]
1686 mov r15, qword [xSI + CPUMCTX.r15]
1687
1688 ; Resume or start VM?
1689 cmp xDI, 0 ; fResume
1690
1691 ; Load guest rdi & rsi.
1692 mov rdi, qword [xSI + CPUMCTX.edi]
1693 mov rsi, qword [xSI + CPUMCTX.esi]
1694
1695 je .vmlaunch64_launch
1696
1697 vmresume
1698 jc near .vmxstart64_invalid_vmcs_ptr
1699 jz near .vmxstart64_start_failed
1700 jmp .vmlaunch64_done; ; Here if vmresume detected a failure.
1701
1702.vmlaunch64_launch:
1703 vmlaunch
1704 jc near .vmxstart64_invalid_vmcs_ptr
1705 jz near .vmxstart64_start_failed
1706 jmp .vmlaunch64_done; ; Here if vmlaunch detected a failure.
1707
1708ALIGNCODE(16)
1709.vmlaunch64_done:
1710 RESTORE_STATE_VM64
1711 mov eax, VINF_SUCCESS
1712
1713.vmstart64_end:
1714 popf
1715 pop xBP
1716 ret
1717
1718.vmxstart64_invalid_vmcs_ptr:
1719 RESTORE_STATE_VM64
1720 mov eax, VERR_VMX_INVALID_VMCS_PTR_TO_START_VM
1721 jmp .vmstart64_end
1722
1723.vmxstart64_start_failed:
1724 RESTORE_STATE_VM64
1725 mov eax, VERR_VMX_UNABLE_TO_START_VM
1726 jmp .vmstart64_end
1727ENDPROC VMXR0StartVM64
1728%endif ; RT_ARCH_AMD64
1729
1730
1731;;
1732; Prepares for and executes VMRUN (32 bits guests)
1733;
1734; @returns VBox status code
1735; @param HCPhysVMCB Physical address of host VMCB.
1736; @param HCPhysVMCB Physical address of guest VMCB.
1737; @param pCtx Pointer to the guest CPU-context.
1738; @param pVM msc:r9, gcc:rcx The cross context VM structure.
1739; @param pVCpu msc:[rsp+28],gcc:r8 The cross context virtual CPU structure of the calling EMT.
1740;
1741ALIGNCODE(16)
1742BEGINPROC SVMR0VMRun
1743%ifdef RT_ARCH_AMD64 ; fake a cdecl stack frame
1744 %ifdef ASM_CALL64_GCC
1745 push r8
1746 push rcx
1747 push rdx
1748 push rsi
1749 push rdi
1750 %else
1751 mov rax, [rsp + 28h]
1752 push rax ; pVCpu
1753 push r9 ; pVM
1754 push r8 ; pCtx
1755 push rdx ; HCPHYSGuestVMCB
1756 push rcx ; HCPhysHostVMCB
1757 %endif
1758 push 0
1759%endif
1760 push xBP
1761 mov xBP, xSP
1762 pushf
1763
1764 ;
1765 ; Save all general purpose host registers.
1766 ;
1767 MYPUSHAD
1768
1769 ;
1770 ; Load pCtx into xSI.
1771 ;
1772 mov xSI, [xBP + xCB * 2 + RTHCPHYS_CB * 2] ; pCtx
1773
1774 ;
1775 ; Save the host XCR0 and load the guest one if necessary.
1776 ;
1777 mov xAX, [xBP + xCB * 2 + RTHCPHYS_CB * 2 + xCB * 2] ; pVCpu
1778 test byte [xAX + VMCPU.hm + HMCPU.fLoadSaveGuestXcr0], 1
1779 jz .xcr0_before_skip
1780
1781 xor ecx, ecx
1782 xgetbv ; Save the host one on the stack.
1783 push xDX
1784 push xAX
1785
1786 mov xSI, [xBP + xCB * 2 + RTHCPHYS_CB * 2] ; pCtx
1787 mov eax, [xSI + CPUMCTX.aXcr] ; Load the guest one.
1788 mov edx, [xSI + CPUMCTX.aXcr + 4]
1789 xor ecx, ecx ; paranoia
1790 xsetbv
1791
1792 push 0 ; Indicate that we must restore XCR0 (popped into ecx, thus 0).
1793 jmp .xcr0_before_done
1794
1795.xcr0_before_skip:
1796 push 3fh ; indicate that we need not.
1797.xcr0_before_done:
1798
1799 ;
1800 ; Save guest CPU-context pointer for simplifying saving of the GPRs afterwards.
1801 ;
1802 push xSI
1803
1804 ; Save host fs, gs, sysenter msr etc.
1805 mov xAX, [xBP + xCB * 2] ; pVMCBHostPhys (64 bits physical address; x86: take low dword only)
1806 push xAX ; save for the vmload after vmrun
1807 vmsave
1808
1809 ; Setup eax for VMLOAD.
1810 mov xAX, [xBP + xCB * 2 + RTHCPHYS_CB] ; pVMCBPhys (64 bits physical address; take low dword only)
1811
1812 ; Load guest general purpose registers.
1813 ; eax is loaded from the VMCB by VMRUN.
1814 mov ebx, [xSI + CPUMCTX.ebx]
1815 mov ecx, [xSI + CPUMCTX.ecx]
1816 mov edx, [xSI + CPUMCTX.edx]
1817 mov edi, [xSI + CPUMCTX.edi]
1818 mov ebp, [xSI + CPUMCTX.ebp]
1819 mov esi, [xSI + CPUMCTX.esi]
1820
1821 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch.
1822 clgi
1823 sti
1824
1825 ; Load guest fs, gs, sysenter msr etc.
1826 vmload
1827 ; Run the VM.
1828 vmrun
1829
1830 ; eax is in the VMCB already; we can use it here.
1831
1832 ; Save guest fs, gs, sysenter msr etc.
1833 vmsave
1834
1835 ; Load host fs, gs, sysenter msr etc.
1836 pop xAX ; Pushed above
1837 vmload
1838
1839 ; Set the global interrupt flag again, but execute cli to make sure IF=0.
1840 cli
1841 stgi
1842
1843 ;
1844 ; Pop the context pointer (pushed above) and save the guest GPRs (sans RSP and RAX).
1845 ;
1846 pop xAX
1847
1848 mov [ss:xAX + CPUMCTX.ebx], ebx
1849 mov [ss:xAX + CPUMCTX.ecx], ecx
1850 mov [ss:xAX + CPUMCTX.edx], edx
1851 mov [ss:xAX + CPUMCTX.esi], esi
1852 mov [ss:xAX + CPUMCTX.edi], edi
1853 mov [ss:xAX + CPUMCTX.ebp], ebp
1854
1855 ;
1856 ; Restore the host xcr0 if necessary.
1857 ;
1858 pop xCX
1859 test ecx, ecx
1860 jnz .xcr0_after_skip
1861 pop xAX
1862 pop xDX
1863 xsetbv ; ecx is already zero.
1864.xcr0_after_skip:
1865
1866 ;
1867 ; Restore host general purpose registers.
1868 ;
1869 MYPOPAD
1870
1871 mov eax, VINF_SUCCESS
1872
1873 popf
1874 pop xBP
1875%ifdef RT_ARCH_AMD64
1876 add xSP, 6*xCB
1877%endif
1878 ret
1879ENDPROC SVMR0VMRun
1880
1881
1882%ifdef RT_ARCH_AMD64
1883;;
1884; Prepares for and executes VMRUN (64 bits guests)
1885;
1886; @returns VBox status code
1887; @param HCPhysVMCB Physical address of host VMCB.
1888; @param HCPhysVMCB Physical address of guest VMCB.
1889; @param pCtx Pointer to the guest-CPU context.
1890; @param pVM msc:r9, gcc:rcx The cross context VM structure.
1891; @param pVCpu msc:[rsp+28],gcc:r8 The cross context virtual CPU structure of the calling EMT.
1892;
1893ALIGNCODE(16)
1894BEGINPROC SVMR0VMRun64
1895 ; Fake a cdecl stack frame
1896 %ifdef ASM_CALL64_GCC
1897 push r8
1898 push rcx
1899 push rdx
1900 push rsi
1901 push rdi
1902 %else
1903 mov rax, [rsp + 28h]
1904 push rax ; rbp + 30h pVCpu
1905 push r9 ; rbp + 28h pVM
1906 push r8 ; rbp + 20h pCtx
1907 push rdx ; rbp + 18h HCPHYSGuestVMCB
1908 push rcx ; rbp + 10h HCPhysHostVMCB
1909 %endif
1910 push 0 ; rbp + 08h "fake ret addr"
1911 push rbp ; rbp + 00h
1912 mov rbp, rsp
1913 pushf
1914
1915 ; Manual save and restore:
1916 ; - General purpose registers except RIP, RSP, RAX
1917 ;
1918 ; Trashed:
1919 ; - CR2 (we don't care)
1920 ; - LDTR (reset to 0)
1921 ; - DRx (presumably not changed at all)
1922 ; - DR7 (reset to 0x400)
1923 ;
1924
1925 ;
1926 ; Save all general purpose host registers.
1927 ;
1928 MYPUSHAD
1929
1930 ;
1931 ; Load pCtx into xSI.
1932 ;
1933 mov xSI, [rbp + xCB * 2 + RTHCPHYS_CB * 2]
1934
1935 ;
1936 ; Save the host XCR0 and load the guest one if necessary.
1937 ;
1938 mov rax, [xBP + 30h] ; pVCpu
1939 test byte [xAX + VMCPU.hm + HMCPU.fLoadSaveGuestXcr0], 1
1940 jz .xcr0_before_skip
1941
1942 xor ecx, ecx
1943 xgetbv ; Save the host one on the stack.
1944 push xDX
1945 push xAX
1946
1947 mov xSI, [xBP + xCB * 2 + RTHCPHYS_CB * 2] ; pCtx
1948 mov eax, [xSI + CPUMCTX.aXcr] ; Load the guest one.
1949 mov edx, [xSI + CPUMCTX.aXcr + 4]
1950 xor ecx, ecx ; paranoia
1951 xsetbv
1952
1953 push 0 ; Indicate that we must restore XCR0 (popped into ecx, thus 0).
1954 jmp .xcr0_before_done
1955
1956.xcr0_before_skip:
1957 push 3fh ; indicate that we need not.
1958.xcr0_before_done:
1959
1960 ;
1961 ; Save guest CPU-context pointer for simplifying saving of the GPRs afterwards.
1962 ;
1963 push rsi
1964
1965 ;
1966 ; Save host fs, gs, sysenter msr etc.
1967 ;
1968 mov rax, [rbp + xCB * 2] ; pVMCBHostPhys (64 bits physical address; x86: take low dword only)
1969 push rax ; Save for the vmload after vmrun
1970 vmsave
1971
1972 ; Setup eax for VMLOAD.
1973 mov rax, [rbp + xCB * 2 + RTHCPHYS_CB] ; pVMCBPhys (64 bits physical address; take low dword only)
1974
1975 ; Load guest general purpose registers.
1976 ; rax is loaded from the VMCB by VMRUN.
1977 mov rbx, qword [xSI + CPUMCTX.ebx]
1978 mov rcx, qword [xSI + CPUMCTX.ecx]
1979 mov rdx, qword [xSI + CPUMCTX.edx]
1980 mov rdi, qword [xSI + CPUMCTX.edi]
1981 mov rbp, qword [xSI + CPUMCTX.ebp]
1982 mov r8, qword [xSI + CPUMCTX.r8]
1983 mov r9, qword [xSI + CPUMCTX.r9]
1984 mov r10, qword [xSI + CPUMCTX.r10]
1985 mov r11, qword [xSI + CPUMCTX.r11]
1986 mov r12, qword [xSI + CPUMCTX.r12]
1987 mov r13, qword [xSI + CPUMCTX.r13]
1988 mov r14, qword [xSI + CPUMCTX.r14]
1989 mov r15, qword [xSI + CPUMCTX.r15]
1990 mov rsi, qword [xSI + CPUMCTX.esi]
1991
1992 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch.
1993 clgi
1994 sti
1995
1996 ; Load guest fs, gs, sysenter msr etc.
1997 vmload
1998 ; Run the VM.
1999 vmrun
2000
2001 ; rax is in the VMCB already; we can use it here.
2002
2003 ; Save guest fs, gs, sysenter msr etc.
2004 vmsave
2005
2006 ;
2007 ; Load host fs, gs, sysenter msr etc.
2008 ;
2009 pop rax ; pushed above
2010 vmload
2011
2012 ;
2013 ; Set the global interrupt flag again, but execute cli to make sure IF=0.
2014 ;
2015 cli
2016 stgi
2017
2018 ;
2019 ; Pop the context pointer (pushed above) and save the guest GPRs (sans RSP and RAX).
2020 ;
2021 pop rax
2022
2023 mov qword [rax + CPUMCTX.ebx], rbx
2024 mov qword [rax + CPUMCTX.ecx], rcx
2025 mov qword [rax + CPUMCTX.edx], rdx
2026 mov qword [rax + CPUMCTX.esi], rsi
2027 mov qword [rax + CPUMCTX.edi], rdi
2028 mov qword [rax + CPUMCTX.ebp], rbp
2029 mov qword [rax + CPUMCTX.r8], r8
2030 mov qword [rax + CPUMCTX.r9], r9
2031 mov qword [rax + CPUMCTX.r10], r10
2032 mov qword [rax + CPUMCTX.r11], r11
2033 mov qword [rax + CPUMCTX.r12], r12
2034 mov qword [rax + CPUMCTX.r13], r13
2035 mov qword [rax + CPUMCTX.r14], r14
2036 mov qword [rax + CPUMCTX.r15], r15
2037
2038 ;
2039 ; Restore the host xcr0 if necessary.
2040 ;
2041 pop xCX
2042 test ecx, ecx
2043 jnz .xcr0_after_skip
2044 pop xAX
2045 pop xDX
2046 xsetbv ; ecx is already zero.
2047.xcr0_after_skip:
2048
2049 ;
2050 ; Restore host general purpose registers.
2051 ;
2052 MYPOPAD
2053
2054 mov eax, VINF_SUCCESS
2055
2056 popf
2057 pop rbp
2058 add rsp, 6 * xCB
2059 ret
2060ENDPROC SVMR0VMRun64
2061%endif ; RT_ARCH_AMD64
2062
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette