VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HWACCMR0A.asm@ 9815

Last change on this file since 9815 was 9815, checked in by vboxsync, 16 years ago

Removed unnecessary guest msr saving.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 22.3 KB
Line 
1; $Id: HWACCMR0A.asm 9815 2008-06-19 11:14:38Z vboxsync $
2;; @file
3; VMXM - R0 vmx helpers
4;
5
6;
7; Copyright (C) 2006-2007 Sun Microsystems, Inc.
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.virtualbox.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License (GPL) as published by the Free Software
13; Foundation, in version 2 as it comes in the "COPYING" file of the
14; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17; Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18; Clara, CA 95054 USA or visit http://www.sun.com if you need
19; additional information or have any questions.
20;
21
22;*******************************************************************************
23;* Header Files *
24;*******************************************************************************
25%include "VBox/asmdefs.mac"
26%include "VBox/err.mac"
27%include "VBox/hwacc_vmx.mac"
28%include "VBox/cpum.mac"
29%include "VBox/x86.mac"
30
31%ifdef RT_OS_OS2 ;; @todo fix OMF support in yasm and kick nasm out completely.
32 %macro vmwrite 2,
33 int3
34 %endmacro
35 %define vmlaunch int3
36 %define vmresume int3
37 %define vmsave int3
38 %define vmload int3
39 %define vmrun int3
40 %define clgi int3
41 %define stgi int3
42 %macro invlpga 2,
43 int3
44 %endmacro
45%endif
46
47;; This is too risky wrt. stability, performance and correctness.
48;%define VBOX_WITH_DR6_EXPERIMENT 1
49
50;; @def MYPUSHAD
51; Macro generating an equivalent to pushad
52
53;; @def MYPOPAD
54; Macro generating an equivalent to popad
55
56;; @def MYPUSHSEGS
57; Macro saving all segment registers on the stack.
58; @param 1 full width register name
59; @param 2 16-bit regsiter name for \a 1.
60
61;; @def MYPOPSEGS
62; Macro restoring all segment registers on the stack
63; @param 1 full width register name
64; @param 2 16-bit regsiter name for \a 1.
65
66%ifdef RT_ARCH_AMD64
67 ; Save a host and load the corresponding guest MSR (trashes rdx & rcx)
68 %macro LOADGUESTMSR 2
69 mov rcx, %1
70 rdmsr
71 push rdx
72 push rax
73 xor rdx, rdx
74 mov rax, qword [xSI + %2]
75 wrmsr
76 %endmacro
77
78 ; Save a guest and load the corresponding host MSR (trashes rdx & rcx)
79 %macro LOADHOSTMSR 1
80 mov rcx, %1
81 pop rax
82 pop rdx
83 wrmsr
84 %endmacro
85
86 %ifdef ASM_CALL64_GCC
87 %macro MYPUSHAD 0
88 push r15
89 push r14
90 push r13
91 push r12
92 push rbx
93 %endmacro
94 %macro MYPOPAD 0
95 pop rbx
96 pop r12
97 pop r13
98 pop r14
99 pop r15
100 %endmacro
101
102 %else ; ASM_CALL64_MSC
103 %macro MYPUSHAD 0
104 push r15
105 push r14
106 push r13
107 push r12
108 push rbx
109 push rsi
110 push rdi
111 %endmacro
112 %macro MYPOPAD 0
113 pop rdi
114 pop rsi
115 pop rbx
116 pop r12
117 pop r13
118 pop r14
119 pop r15
120 %endmacro
121 %endif
122
123; trashes, rax, rdx & rcx
124 %macro MYPUSHSEGS 2
125 mov %2, es
126 push %1
127 mov %2, ds
128 push %1
129
130 ; Special case for FS; Windows and Linux either don't use it or restore it when leaving kernel mode, Solaris OTOH doesn't and we must save it.
131 mov ecx, MSR_K8_FS_BASE
132 rdmsr
133 push rdx
134 push rax
135 push fs
136
137 ; Special case for GS; OSes typically use swapgs to reset the hidden base register for GS on entry into the kernel. The same happens on exit
138 mov ecx, MSR_K8_GS_BASE
139 rdmsr
140 push rdx
141 push rax
142 push gs
143 %endmacro
144
145; trashes, rax, rdx & rcx
146 %macro MYPOPSEGS 2
147 ; Note: do not step through this code with a debugger!
148 pop gs
149 pop rax
150 pop rdx
151 mov ecx, MSR_K8_GS_BASE
152 wrmsr
153
154 pop fs
155 pop rax
156 pop rdx
157 mov ecx, MSR_K8_FS_BASE
158 wrmsr
159 ; Now it's safe to step again
160
161 pop %1
162 mov ds, %2
163 pop %1
164 mov es, %2
165 %endmacro
166
167%else ; RT_ARCH_X86
168 %macro MYPUSHAD 0
169 pushad
170 %endmacro
171 %macro MYPOPAD 0
172 popad
173 %endmacro
174
175 %macro MYPUSHSEGS 2
176 push ds
177 push es
178 push fs
179 push gs
180 %endmacro
181 %macro MYPOPSEGS 2
182 pop gs
183 pop fs
184 pop es
185 pop ds
186 %endmacro
187%endif
188
189
190BEGINCODE
191
192;/**
193; * Prepares for and executes VMLAUNCH/VMRESUME (32 bits guest mode)
194; *
195; * @returns VBox status code
196; * @param fResume vmlauch/vmresume
197; * @param pCtx Guest context
198; */
199BEGINPROC VMXR0StartVM32
200 push xBP
201 mov xBP, xSP
202
203 pushf
204 cli
205
206 ;/* First we have to save some final CPU context registers. */
207%ifdef RT_ARCH_AMD64
208 mov rax, qword .vmlaunch_done
209 push rax
210%else
211 push .vmlaunch_done
212%endif
213 mov eax, VMX_VMCS_HOST_RIP ;/* return address (too difficult to continue after VMLAUNCH?) */
214 vmwrite xAX, [xSP]
215 ;/* Note: assumes success... */
216 add xSP, xS
217
218 ;/* Manual save and restore:
219 ; * - General purpose registers except RIP, RSP
220 ; *
221 ; * Trashed:
222 ; * - CR2 (we don't care)
223 ; * - LDTR (reset to 0)
224 ; * - DRx (presumably not changed at all)
225 ; * - DR7 (reset to 0x400)
226 ; * - EFLAGS (reset to RT_BIT(1); not relevant)
227 ; *
228 ; */
229
230 ;/* Save all general purpose host registers. */
231 MYPUSHAD
232
233 ;/* Save the Guest CPU context pointer. */
234%ifdef RT_ARCH_AMD64
235 %ifdef ASM_CALL64_GCC
236 ; fResume already in rdi
237 ; pCtx already in rsi
238 %else
239 mov rdi, rcx ; fResume
240 mov rsi, rdx ; pCtx
241 %endif
242%else
243 mov edi, [ebp + 8] ; fResume
244 mov esi, [ebp + 12] ; pCtx
245%endif
246
247 ;/* Save segment registers */
248 ; Note: MYPUSHSEGS trashes rdx & rcx, so we moved it here (msvc amd64 case)
249 MYPUSHSEGS xAX, ax
250
251 ; Save the pCtx pointer
252 push xSI
253
254 ; Save LDTR
255 xor eax, eax
256 sldt ax
257 push xAX
258
259 ; VMX only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!
260 sub xSP, xS*2
261 sgdt [xSP]
262
263 sub xSP, xS*2
264 sidt [xSP]
265
266%ifdef VBOX_WITH_DR6_EXPERIMENT
267 ; Restore DR6 - experiment, not safe!
268 mov xBX, [xSI + CPUMCTX.dr6]
269 mov dr6, xBX
270%endif
271
272 ; Restore CR2
273 mov ebx, [xSI + CPUMCTX.cr2]
274 mov cr2, xBX
275
276 mov eax, VMX_VMCS_HOST_RSP
277 vmwrite xAX, xSP
278 ;/* Note: assumes success... */
279 ;/* Don't mess with ESP anymore!! */
280
281 ;/* Restore Guest's general purpose registers. */
282 mov eax, [xSI + CPUMCTX.eax]
283 mov ebx, [xSI + CPUMCTX.ebx]
284 mov ecx, [xSI + CPUMCTX.ecx]
285 mov edx, [xSI + CPUMCTX.edx]
286 mov ebp, [xSI + CPUMCTX.ebp]
287
288 ; resume or start?
289 cmp xDI, 0 ; fResume
290 je .vmlauch_lauch
291
292 ;/* Restore edi & esi. */
293 mov edi, [xSI + CPUMCTX.edi]
294 mov esi, [xSI + CPUMCTX.esi]
295
296 vmresume
297 jmp .vmlaunch_done; ;/* here if vmresume detected a failure. */
298
299.vmlauch_lauch:
300 ;/* Restore edi & esi. */
301 mov edi, [xSI + CPUMCTX.edi]
302 mov esi, [xSI + CPUMCTX.esi]
303
304 vmlaunch
305 jmp .vmlaunch_done; ;/* here if vmlaunch detected a failure. */
306
307ALIGNCODE(16)
308.vmlaunch_done:
309 jc near .vmxstart_invalid_vmxon_ptr
310 jz near .vmxstart_start_failed
311
312 ; Restore base and limit of the IDTR & GDTR
313 lidt [xSP]
314 add xSP, xS*2
315 lgdt [xSP]
316 add xSP, xS*2
317
318 push xDI
319 mov xDI, [xSP + xS * 2] ; pCtx
320
321 mov [ss:xDI + CPUMCTX.eax], eax
322 mov [ss:xDI + CPUMCTX.ebx], ebx
323 mov [ss:xDI + CPUMCTX.ecx], ecx
324 mov [ss:xDI + CPUMCTX.edx], edx
325 mov [ss:xDI + CPUMCTX.esi], esi
326 mov [ss:xDI + CPUMCTX.ebp], ebp
327%ifdef RT_ARCH_AMD64
328 pop xAX ; the guest edi we pushed above
329 mov dword [ss:xDI + CPUMCTX.edi], eax
330%else
331 pop dword [ss:xDI + CPUMCTX.edi] ; the guest edi we pushed above
332%endif
333
334%ifdef VBOX_WITH_DR6_EXPERIMENT
335 ; Save DR6 - experiment, not safe!
336 mov xAX, dr6
337 mov [ss:xDI + CPUMCTX.dr6], xAX
338%endif
339
340 pop xAX ; saved LDTR
341 lldt ax
342
343 add xSP, xS ; pCtx
344
345 ; Restore segment registers
346 MYPOPSEGS xAX, ax
347
348 ; Restore general purpose registers
349 MYPOPAD
350
351 mov eax, VINF_SUCCESS
352
353.vmstart_end:
354 popf
355 pop xBP
356 ret
357
358
359.vmxstart_invalid_vmxon_ptr:
360 ; Restore base and limit of the IDTR & GDTR
361 lidt [xSP]
362 add xSP, xS*2
363 lgdt [xSP]
364 add xSP, xS*2
365
366 pop xAX ; saved LDTR
367 lldt ax
368
369 add xSP, xS ; pCtx
370
371 ; Restore segment registers
372 MYPOPSEGS xAX, ax
373
374 ; Restore all general purpose host registers.
375 MYPOPAD
376 mov eax, VERR_VMX_INVALID_VMXON_PTR
377 jmp .vmstart_end
378
379.vmxstart_start_failed:
380 ; Restore base and limit of the IDTR & GDTR
381 lidt [xSP]
382 add xSP, xS*2
383 lgdt [xSP]
384 add xSP, xS*2
385
386 pop xAX ; saved LDTR
387 lldt ax
388
389 add xSP, xS ; pCtx
390
391 ; Restore segment registers
392 MYPOPSEGS xAX, ax
393
394 ; Restore all general purpose host registers.
395 MYPOPAD
396 mov eax, VERR_VMX_UNABLE_TO_START_VM
397 jmp .vmstart_end
398
399ENDPROC VMXR0StartVM32
400
401%ifdef RT_ARCH_AMD64
402;/**
403; * Prepares for and executes VMLAUNCH/VMRESUME (32 bits guest mode)
404; *
405; * @returns VBox status code
406; * @param fResume vmlauch/vmresume
407; * @param pCtx Guest context
408; */
409BEGINPROC VMXR0StartVM64
410 push xBP
411 mov xBP, xSP
412
413 pushf
414 cli
415
416 ;/* First we have to save some final CPU context registers. */
417 mov rax, qword .vmlaunch64_done
418 push rax
419 mov rax, VMX_VMCS_HOST_RIP ;/* return address (too difficult to continue after VMLAUNCH?) */
420 vmwrite rax, [xSP]
421 ;/* Note: assumes success... */
422 add xSP, xS
423
424 ;/* Manual save and restore:
425 ; * - General purpose registers except RIP, RSP
426 ; *
427 ; * Trashed:
428 ; * - CR2 (we don't care)
429 ; * - LDTR (reset to 0)
430 ; * - DRx (presumably not changed at all)
431 ; * - DR7 (reset to 0x400)
432 ; * - EFLAGS (reset to RT_BIT(1); not relevant)
433 ; *
434 ; */
435
436 ;/* Save all general purpose host registers. */
437 MYPUSHAD
438
439 ;/* Save the Guest CPU context pointer. */
440%ifdef ASM_CALL64_GCC
441 ; fResume already in rdi
442 ; pCtx already in rsi
443%else
444 mov rdi, rcx ; fResume
445 mov rsi, rdx ; pCtx
446%endif
447
448 ;/* Save segment registers */
449 ; Note: MYPUSHSEGS trashes rdx & rcx, so we moved it here (msvc amd64 case)
450 MYPUSHSEGS xAX, ax
451
452 ; Save the host LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs and restore the guest MSRs
453 ; @todo use the automatic load feature for MSRs
454 LOADGUESTMSR MSR_K8_LSTAR, CPUMCTX.msrLSTAR
455 LOADGUESTMSR MSR_K8_CSTAR, CPUMCTX.msrCSTAR
456 LOADGUESTMSR MSR_K8_SF_MASK, CPUMCTX.msrSFMASK
457 LOADGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
458
459 ; Load the guest MSRs for FS & GS base (saved in MYPUSHSEGS)
460 mov rcx, MSR_K8_FS_BASE
461 mov rax, qword [xSI + CPUMCTX.msrFSBASE]
462 wrmsr
463
464 mov rcx, MSR_K8_GS_BASE
465 mov rax, qword [xSI + CPUMCTX.msrGSBASE]
466 wrmsr
467
468 ; Save the pCtx pointer
469 push xSI
470
471 ; Save LDTR
472 xor eax, eax
473 sldt ax
474 push xAX
475
476 ; VMX only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!
477 sub xSP, xS*2
478 sgdt [xSP]
479
480 sub xSP, xS*2
481 sidt [xSP]
482
483%ifdef VBOX_WITH_DR6_EXPERIMENT
484 ; Restore DR6 - experiment, not safe!
485 mov xBX, [xSI + CPUMCTX.dr6]
486 mov dr6, xBX
487%endif
488
489 ; Restore CR2
490 mov rbx, qword [xSI + CPUMCTX.cr2]
491 mov cr2, rbx
492
493 mov eax, VMX_VMCS_HOST_RSP
494 vmwrite xAX, xSP
495 ;/* Note: assumes success... */
496 ;/* Don't mess with ESP anymore!! */
497
498 ;/* Restore Guest's general purpose registers. */
499 mov rax, qword [xSI + CPUMCTX.eax]
500 mov rbx, qword [xSI + CPUMCTX.ebx]
501 mov rcx, qword [xSI + CPUMCTX.ecx]
502 mov rdx, qword [xSI + CPUMCTX.edx]
503 mov rbp, qword [xSI + CPUMCTX.ebp]
504 mov r8, qword [xSI + CPUMCTX.r8]
505 mov r9, qword [xSI + CPUMCTX.r9]
506 mov r10, qword [xSI + CPUMCTX.r10]
507 mov r11, qword [xSI + CPUMCTX.r11]
508 mov r12, qword [xSI + CPUMCTX.r12]
509 mov r13, qword [xSI + CPUMCTX.r13]
510 mov r14, qword [xSI + CPUMCTX.r14]
511 mov r15, qword [xSI + CPUMCTX.r15]
512
513 ; resume or start?
514 cmp xDI, 0 ; fResume
515 je .vmlauch64_lauch
516
517 ;/* Restore edi & esi. */
518 mov rdi, qword [xSI + CPUMCTX.edi]
519 mov rsi, qword [xSI + CPUMCTX.esi]
520
521 vmresume
522 jmp .vmlaunch64_done; ;/* here if vmresume detected a failure. */
523
524.vmlauch64_lauch:
525 ;/* Restore rdi & rsi. */
526 mov rdi, qword [xSI + CPUMCTX.edi]
527 mov rsi, qword [xSI + CPUMCTX.esi]
528
529 vmlaunch
530 jmp .vmlaunch64_done; ;/* here if vmlaunch detected a failure. */
531
532ALIGNCODE(16)
533.vmlaunch64_done:
534 jc near .vmxstart64_invalid_vmxon_ptr
535 jz near .vmxstart64_start_failed
536
537 ; Restore base and limit of the IDTR & GDTR
538 lidt [xSP]
539 add xSP, xS*2
540 lgdt [xSP]
541 add xSP, xS*2
542
543 push xDI
544 mov xDI, [xSP + xS * 2] ; pCtx
545
546 mov qword [xDI + CPUMCTX.eax], rax
547 mov qword [xDI + CPUMCTX.ebx], rbx
548 mov qword [xDI + CPUMCTX.ecx], rcx
549 mov qword [xDI + CPUMCTX.edx], rdx
550 mov qword [xDI + CPUMCTX.esi], rsi
551 mov qword [xDI + CPUMCTX.ebp], rbp
552 mov qword [xDI + CPUMCTX.r8], r8
553 mov qword [xDI + CPUMCTX.r9], r9
554 mov qword [xDI + CPUMCTX.r10], r10
555 mov qword [xDI + CPUMCTX.r11], r11
556 mov qword [xDI + CPUMCTX.r12], r12
557 mov qword [xDI + CPUMCTX.r13], r13
558 mov qword [xDI + CPUMCTX.r14], r14
559 mov qword [xDI + CPUMCTX.r15], r15
560
561 pop xAX ; the guest edi we pushed above
562 mov qword [xDI + CPUMCTX.edi], rax
563
564%ifdef VBOX_WITH_DR6_EXPERIMENT
565 ; Save DR6 - experiment, not safe!
566 mov xAX, dr6
567 mov [xDI + CPUMCTX.dr6], xAX
568%endif
569
570 pop xAX ; saved LDTR
571 lldt ax
572
573 pop xSI ; pCtx (needed in rsi by the macros below)
574
575 ; Restore the host LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs
576 ; @todo use the automatic load feature for MSRs
577 LOADHOSTMSR MSR_K8_KERNEL_GS_BASE
578 LOADHOSTMSR MSR_K8_SF_MASK
579 LOADHOSTMSR MSR_K8_CSTAR
580 LOADHOSTMSR MSR_K8_LSTAR
581
582 ; Restore segment registers
583 MYPOPSEGS xAX, ax
584
585 ; Restore general purpose registers
586 MYPOPAD
587
588 mov eax, VINF_SUCCESS
589
590.vmstart64_end:
591 popf
592 pop xBP
593 ret
594
595
596.vmxstart64_invalid_vmxon_ptr:
597 ; Restore base and limit of the IDTR & GDTR
598 lidt [xSP]
599 add xSP, xS*2
600 lgdt [xSP]
601 add xSP, xS*2
602
603 pop xAX ; saved LDTR
604 lldt ax
605
606 pop xSI ; pCtx (needed in rsi by the macros below)
607
608 ; Restore the host LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs
609 ; @todo use the automatic load feature for MSRs
610 LOADHOSTMSR MSR_K8_KERNEL_GS_BASE
611 LOADHOSTMSR MSR_K8_SF_MASK
612 LOADHOSTMSR MSR_K8_CSTAR
613 LOADHOSTMSR MSR_K8_LSTAR
614
615 ; Restore segment registers
616 MYPOPSEGS xAX, ax
617
618 ; Restore all general purpose host registers.
619 MYPOPAD
620 mov eax, VERR_VMX_INVALID_VMXON_PTR
621 jmp .vmstart64_end
622
623.vmxstart64_start_failed:
624 ; Restore base and limit of the IDTR & GDTR
625 lidt [xSP]
626 add xSP, xS*2
627 lgdt [xSP]
628 add xSP, xS*2
629
630 pop xAX ; saved LDTR
631 lldt ax
632
633 pop xSI ; pCtx (needed in rsi by the macros below)
634
635 ; Restore the host LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs
636 ; @todo use the automatic load feature for MSRs
637 LOADHOSTMSR MSR_K8_KERNEL_GS_BASE
638 LOADHOSTMSR MSR_K8_SF_MASK
639 LOADHOSTMSR MSR_K8_CSTAR
640 LOADHOSTMSR MSR_K8_LSTAR
641
642 ; Restore segment registers
643 MYPOPSEGS xAX, ax
644
645 ; Restore all general purpose host registers.
646 MYPOPAD
647 mov eax, VERR_VMX_UNABLE_TO_START_VM
648 jmp .vmstart64_end
649ENDPROC VMXR0StartVM64
650
651;/**
652; * Executes VMWRITE
653; *
654; * @returns VBox status code
655; * @param idxField x86: [ebp + 08h] msc: rcx gcc: rdi VMCS index
656; * @param pData x86: [ebp + 0ch] msc: rdx gcc: rsi VM field value
657; */
658BEGINPROC VMXWriteVMCS64
659%ifdef ASM_CALL64_GCC
660 mov eax, 0ffffffffh
661 and rdi, rax
662 xor rax, rax
663 vmwrite rdi, rsi
664%else
665 mov eax, 0ffffffffh
666 and rcx, rax
667 xor rax, rax
668 vmwrite rcx, rdx
669%endif
670 jnc .valid_vmcs
671 mov eax, VERR_VMX_INVALID_VMCS_PTR
672 ret
673.valid_vmcs:
674 jnz .the_end
675 mov eax, VERR_VMX_INVALID_VMCS_FIELD
676.the_end:
677 ret
678ENDPROC VMXWriteVMCS64
679
680;/**
681; * Executes VMREAD
682; *
683; * @returns VBox status code
684; * @param idxField VMCS index
685; * @param pData Ptr to store VM field value
686; */
687;DECLASM(int) VMXReadVMCS64(uint32_t idxField, uint64_t *pData);
688BEGINPROC VMXReadVMCS64
689%ifdef ASM_CALL64_GCC
690 mov eax, 0ffffffffh
691 and rdi, rax
692 xor rax, rax
693 vmread [rsi], rdi
694%else
695 mov eax, 0ffffffffh
696 and rcx, rax
697 xor rax, rax
698 vmread [rdx], rcx
699%endif
700 jnc .valid_vmcs
701 mov eax, VERR_VMX_INVALID_VMCS_PTR
702 ret
703.valid_vmcs:
704 jnz .the_end
705 mov eax, VERR_VMX_INVALID_VMCS_FIELD
706.the_end:
707 ret
708ENDPROC VMXReadVMCS64
709
710
711;/**
712; * Executes VMXON
713; *
714; * @returns VBox status code
715; * @param HCPhysVMXOn Physical address of VMXON structure
716; */
717;DECLASM(int) VMXEnable(RTHCPHYS HCPhysVMXOn);
718BEGINPROC VMXEnable
719%ifdef RT_ARCH_AMD64
720 xor rax, rax
721 %ifdef ASM_CALL64_GCC
722 push rdi
723 %else
724 push rcx
725 %endif
726 vmxon [rsp]
727%else
728 xor eax, eax
729 vmxon [esp + 4]
730%endif
731 jnc .good
732 mov eax, VERR_VMX_INVALID_VMXON_PTR
733 jmp .the_end
734
735.good:
736 jnz .the_end
737 mov eax, VERR_VMX_GENERIC
738
739.the_end:
740%ifdef RT_ARCH_AMD64
741 add rsp, 8
742%endif
743 ret
744ENDPROC VMXEnable
745
746
747;/**
748; * Executes VMXOFF
749; */
750;DECLASM(void) VMXDisable(void);
751BEGINPROC VMXDisable
752 vmxoff
753 ret
754ENDPROC VMXDisable
755
756
757;/**
758; * Executes VMCLEAR
759; *
760; * @returns VBox status code
761; * @param HCPhysVMCS Physical address of VM control structure
762; */
763;DECLASM(int) VMXClearVMCS(RTHCPHYS HCPhysVMCS);
764BEGINPROC VMXClearVMCS
765%ifdef RT_ARCH_AMD64
766 xor rax, rax
767 %ifdef ASM_CALL64_GCC
768 push rdi
769 %else
770 push rcx
771 %endif
772 vmclear [rsp]
773%else
774 xor eax, eax
775 vmclear [esp + 4]
776%endif
777 jnc .the_end
778 mov eax, VERR_VMX_INVALID_VMCS_PTR
779.the_end:
780%ifdef RT_ARCH_AMD64
781 add rsp, 8
782%endif
783 ret
784ENDPROC VMXClearVMCS
785
786
787;/**
788; * Executes VMPTRLD
789; *
790; * @returns VBox status code
791; * @param HCPhysVMCS Physical address of VMCS structure
792; */
793;DECLASM(int) VMXActivateVMCS(RTHCPHYS HCPhysVMCS);
794BEGINPROC VMXActivateVMCS
795%ifdef RT_ARCH_AMD64
796 xor rax, rax
797 %ifdef ASM_CALL64_GCC
798 push rdi
799 %else
800 push rcx
801 %endif
802 vmptrld [rsp]
803%else
804 xor eax, eax
805 vmptrld [esp + 4]
806%endif
807 jnc .the_end
808 mov eax, VERR_VMX_INVALID_VMCS_PTR
809.the_end:
810%ifdef RT_ARCH_AMD64
811 add rsp, 8
812%endif
813 ret
814ENDPROC VMXActivateVMCS
815
816%endif ; RT_ARCH_AMD64
817
818
819;/**
820; * Prepares for and executes VMRUN
821; *
822; * @returns VBox status code
823; * @param HCPhysVMCB Physical address of host VMCB
824; * @param HCPhysVMCB Physical address of guest VMCB
825; * @param pCtx Guest context
826; */
827BEGINPROC SVMVMRun
828%ifdef RT_ARCH_AMD64 ; fake a cdecl stack frame - I'm lazy, sosume.
829 %ifdef ASM_CALL64_GCC
830 push rdx
831 push rsi
832 push rdi
833 %else
834 push r8
835 push rdx
836 push rcx
837 %endif
838 push 0
839%endif
840 push xBP
841 mov xBP, xSP
842 pushf
843
844 ;/* Manual save and restore:
845 ; * - General purpose registers except RIP, RSP, RAX
846 ; *
847 ; * Trashed:
848 ; * - CR2 (we don't care)
849 ; * - LDTR (reset to 0)
850 ; * - DRx (presumably not changed at all)
851 ; * - DR7 (reset to 0x400)
852 ; */
853
854 ;/* Save all general purpose host registers. */
855 MYPUSHAD
856
857 ;/* Save the Guest CPU context pointer. */
858 mov xSI, [xBP + xS*2 + RTHCPHYS_CB*2] ; pCtx
859 push xSI ; push for saving the state at the end
860
861 ; Restore CR2
862 mov ebx, [xSI + CPUMCTX.cr2]
863 mov cr2, xBX
864
865 ; save host fs, gs, sysenter msr etc
866 mov xAX, [xBP + xS*2] ; pVMCBHostPhys (64 bits physical address; x86: take low dword only)
867 push xAX ; save for the vmload after vmrun
868 vmsave
869
870 ; setup eax for VMLOAD
871 mov xAX, [xBP + xS*2 + RTHCPHYS_CB] ; pVMCBPhys (64 bits physical address; take low dword only)
872
873 ;/* Restore Guest's general purpose registers. */
874 ;/* EAX is loaded from the VMCB by VMRUN */
875 mov ebx, [xSI + CPUMCTX.ebx]
876 mov ecx, [xSI + CPUMCTX.ecx]
877 mov edx, [xSI + CPUMCTX.edx]
878 mov edi, [xSI + CPUMCTX.edi]
879 mov ebp, [xSI + CPUMCTX.ebp]
880 mov esi, [xSI + CPUMCTX.esi]
881
882 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch
883 clgi
884 sti
885
886 ; load guest fs, gs, sysenter msr etc
887 vmload
888 ; run the VM
889 vmrun
890
891 ;/* EAX is in the VMCB already; we can use it here. */
892
893 ; save guest fs, gs, sysenter msr etc
894 vmsave
895
896 ; load host fs, gs, sysenter msr etc
897 pop xAX ; pushed above
898 vmload
899
900 ; Set the global interrupt flag again, but execute cli to make sure IF=0.
901 cli
902 stgi
903
904 pop xAX ; pCtx
905
906 mov [ss:xAX + CPUMCTX.ebx], ebx
907 mov [ss:xAX + CPUMCTX.ecx], ecx
908 mov [ss:xAX + CPUMCTX.edx], edx
909 mov [ss:xAX + CPUMCTX.esi], esi
910 mov [ss:xAX + CPUMCTX.edi], edi
911 mov [ss:xAX + CPUMCTX.ebp], ebp
912
913 ; Restore general purpose registers
914 MYPOPAD
915
916 mov eax, VINF_SUCCESS
917
918 popf
919 pop xBP
920%ifdef RT_ARCH_AMD64
921 add xSP, 4*xS
922%endif
923 ret
924ENDPROC SVMVMRun
925
926
927;;
928; Executes INVLPGA
929;
930; @param pPageGC msc:ecx gcc:edi x86:[esp+04] Virtual page to invalidate
931; @param uASID msc:edx gcc:esi x86:[esp+08] Tagged TLB id
932;
933;DECLASM(void) SVMInvlpgA(RTGCPTR pPageGC, uint32_t uASID);
934BEGINPROC SVMInvlpgA
935%ifdef RT_ARCH_AMD64
936 %ifdef ASM_CALL64_GCC
937 mov eax, edi ;; @todo 64-bit guest.
938 mov ecx, esi
939 %else
940 mov eax, ecx ;; @todo 64-bit guest.
941 mov ecx, edx
942 %endif
943%else
944 mov eax, [esp + 4]
945 mov ecx, [esp + 8]
946%endif
947 invlpga [xAX], ecx
948 ret
949ENDPROC SVMInvlpgA
950
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette