VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HWACCMR0A.asm@ 9817

Last change on this file since 9817 was 9817, checked in by vboxsync, 17 years ago

fs & gs base cleanup

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 22.1 KB
Line 
1; $Id: HWACCMR0A.asm 9817 2008-06-19 11:47:38Z vboxsync $
2;; @file
3; VMXM - R0 vmx helpers
4;
5
6;
7; Copyright (C) 2006-2007 Sun Microsystems, Inc.
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.virtualbox.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License (GPL) as published by the Free Software
13; Foundation, in version 2 as it comes in the "COPYING" file of the
14; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17; Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18; Clara, CA 95054 USA or visit http://www.sun.com if you need
19; additional information or have any questions.
20;
21
22;*******************************************************************************
23;* Header Files *
24;*******************************************************************************
25%include "VBox/asmdefs.mac"
26%include "VBox/err.mac"
27%include "VBox/hwacc_vmx.mac"
28%include "VBox/cpum.mac"
29%include "VBox/x86.mac"
30
31%ifdef RT_OS_OS2 ;; @todo fix OMF support in yasm and kick nasm out completely.
32 %macro vmwrite 2,
33 int3
34 %endmacro
35 %define vmlaunch int3
36 %define vmresume int3
37 %define vmsave int3
38 %define vmload int3
39 %define vmrun int3
40 %define clgi int3
41 %define stgi int3
42 %macro invlpga 2,
43 int3
44 %endmacro
45%endif
46
47;; This is too risky wrt. stability, performance and correctness.
48;%define VBOX_WITH_DR6_EXPERIMENT 1
49
50;; @def MYPUSHAD
51; Macro generating an equivalent to pushad
52
53;; @def MYPOPAD
54; Macro generating an equivalent to popad
55
56;; @def MYPUSHSEGS
57; Macro saving all segment registers on the stack.
58; @param 1 full width register name
59; @param 2 16-bit regsiter name for \a 1.
60
61;; @def MYPOPSEGS
62; Macro restoring all segment registers on the stack
63; @param 1 full width register name
64; @param 2 16-bit regsiter name for \a 1.
65
66%ifdef RT_ARCH_AMD64
67 ; Save a host and load the corresponding guest MSR (trashes rdx & rcx)
68 %macro LOADGUESTMSR 2
69 mov rcx, %1
70 rdmsr
71 push rdx
72 push rax
73 xor rdx, rdx
74 mov rax, qword [xSI + %2]
75 wrmsr
76 %endmacro
77
78 ; Save a guest and load the corresponding host MSR (trashes rdx & rcx)
79 %macro LOADHOSTMSR 1
80 mov rcx, %1
81 pop rax
82 pop rdx
83 wrmsr
84 %endmacro
85
86 %ifdef ASM_CALL64_GCC
87 %macro MYPUSHAD 0
88 push r15
89 push r14
90 push r13
91 push r12
92 push rbx
93 %endmacro
94 %macro MYPOPAD 0
95 pop rbx
96 pop r12
97 pop r13
98 pop r14
99 pop r15
100 %endmacro
101
102 %else ; ASM_CALL64_MSC
103 %macro MYPUSHAD 0
104 push r15
105 push r14
106 push r13
107 push r12
108 push rbx
109 push rsi
110 push rdi
111 %endmacro
112 %macro MYPOPAD 0
113 pop rdi
114 pop rsi
115 pop rbx
116 pop r12
117 pop r13
118 pop r14
119 pop r15
120 %endmacro
121 %endif
122
123; trashes, rax, rdx & rcx
124 %macro MYPUSHSEGS 2
125 mov %2, es
126 push %1
127 mov %2, ds
128 push %1
129
130 ; Special case for FS; Windows and Linux either don't use it or restore it when leaving kernel mode, Solaris OTOH doesn't and we must save it.
131 mov ecx, MSR_K8_FS_BASE
132 rdmsr
133 push rdx
134 push rax
135 push fs
136
137 ; Special case for GS; OSes typically use swapgs to reset the hidden base register for GS on entry into the kernel. The same happens on exit
138 mov ecx, MSR_K8_GS_BASE
139 rdmsr
140 push rdx
141 push rax
142 push gs
143 %endmacro
144
145; trashes, rax, rdx & rcx
146 %macro MYPOPSEGS 2
147 ; Note: do not step through this code with a debugger!
148 pop gs
149 pop rax
150 pop rdx
151 mov ecx, MSR_K8_GS_BASE
152 wrmsr
153
154 pop fs
155 pop rax
156 pop rdx
157 mov ecx, MSR_K8_FS_BASE
158 wrmsr
159 ; Now it's safe to step again
160
161 pop %1
162 mov ds, %2
163 pop %1
164 mov es, %2
165 %endmacro
166
167%else ; RT_ARCH_X86
168 %macro MYPUSHAD 0
169 pushad
170 %endmacro
171 %macro MYPOPAD 0
172 popad
173 %endmacro
174
175 %macro MYPUSHSEGS 2
176 push ds
177 push es
178 push fs
179 push gs
180 %endmacro
181 %macro MYPOPSEGS 2
182 pop gs
183 pop fs
184 pop es
185 pop ds
186 %endmacro
187%endif
188
189
190BEGINCODE
191
192;/**
193; * Prepares for and executes VMLAUNCH/VMRESUME (32 bits guest mode)
194; *
195; * @returns VBox status code
196; * @param fResume vmlauch/vmresume
197; * @param pCtx Guest context
198; */
199BEGINPROC VMXR0StartVM32
200 push xBP
201 mov xBP, xSP
202
203 pushf
204 cli
205
206 ;/* First we have to save some final CPU context registers. */
207%ifdef RT_ARCH_AMD64
208 mov rax, qword .vmlaunch_done
209 push rax
210%else
211 push .vmlaunch_done
212%endif
213 mov eax, VMX_VMCS_HOST_RIP ;/* return address (too difficult to continue after VMLAUNCH?) */
214 vmwrite xAX, [xSP]
215 ;/* Note: assumes success... */
216 add xSP, xS
217
218 ;/* Manual save and restore:
219 ; * - General purpose registers except RIP, RSP
220 ; *
221 ; * Trashed:
222 ; * - CR2 (we don't care)
223 ; * - LDTR (reset to 0)
224 ; * - DRx (presumably not changed at all)
225 ; * - DR7 (reset to 0x400)
226 ; * - EFLAGS (reset to RT_BIT(1); not relevant)
227 ; *
228 ; */
229
230 ;/* Save all general purpose host registers. */
231 MYPUSHAD
232
233 ;/* Save the Guest CPU context pointer. */
234%ifdef RT_ARCH_AMD64
235 %ifdef ASM_CALL64_GCC
236 ; fResume already in rdi
237 ; pCtx already in rsi
238 %else
239 mov rdi, rcx ; fResume
240 mov rsi, rdx ; pCtx
241 %endif
242%else
243 mov edi, [ebp + 8] ; fResume
244 mov esi, [ebp + 12] ; pCtx
245%endif
246
247 ;/* Save segment registers */
248 ; Note: MYPUSHSEGS trashes rdx & rcx, so we moved it here (msvc amd64 case)
249 MYPUSHSEGS xAX, ax
250
251 ; Save the pCtx pointer
252 push xSI
253
254 ; Save LDTR
255 xor eax, eax
256 sldt ax
257 push xAX
258
259 ; VMX only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!
260 sub xSP, xS*2
261 sgdt [xSP]
262
263 sub xSP, xS*2
264 sidt [xSP]
265
266%ifdef VBOX_WITH_DR6_EXPERIMENT
267 ; Restore DR6 - experiment, not safe!
268 mov xBX, [xSI + CPUMCTX.dr6]
269 mov dr6, xBX
270%endif
271
272 ; Restore CR2
273 mov ebx, [xSI + CPUMCTX.cr2]
274 mov cr2, xBX
275
276 mov eax, VMX_VMCS_HOST_RSP
277 vmwrite xAX, xSP
278 ;/* Note: assumes success... */
279 ;/* Don't mess with ESP anymore!! */
280
281 ;/* Restore Guest's general purpose registers. */
282 mov eax, [xSI + CPUMCTX.eax]
283 mov ebx, [xSI + CPUMCTX.ebx]
284 mov ecx, [xSI + CPUMCTX.ecx]
285 mov edx, [xSI + CPUMCTX.edx]
286 mov ebp, [xSI + CPUMCTX.ebp]
287
288 ; resume or start?
289 cmp xDI, 0 ; fResume
290 je .vmlauch_lauch
291
292 ;/* Restore edi & esi. */
293 mov edi, [xSI + CPUMCTX.edi]
294 mov esi, [xSI + CPUMCTX.esi]
295
296 vmresume
297 jmp .vmlaunch_done; ;/* here if vmresume detected a failure. */
298
299.vmlauch_lauch:
300 ;/* Restore edi & esi. */
301 mov edi, [xSI + CPUMCTX.edi]
302 mov esi, [xSI + CPUMCTX.esi]
303
304 vmlaunch
305 jmp .vmlaunch_done; ;/* here if vmlaunch detected a failure. */
306
307ALIGNCODE(16)
308.vmlaunch_done:
309 jc near .vmxstart_invalid_vmxon_ptr
310 jz near .vmxstart_start_failed
311
312 ; Restore base and limit of the IDTR & GDTR
313 lidt [xSP]
314 add xSP, xS*2
315 lgdt [xSP]
316 add xSP, xS*2
317
318 push xDI
319 mov xDI, [xSP + xS * 2] ; pCtx
320
321 mov [ss:xDI + CPUMCTX.eax], eax
322 mov [ss:xDI + CPUMCTX.ebx], ebx
323 mov [ss:xDI + CPUMCTX.ecx], ecx
324 mov [ss:xDI + CPUMCTX.edx], edx
325 mov [ss:xDI + CPUMCTX.esi], esi
326 mov [ss:xDI + CPUMCTX.ebp], ebp
327%ifdef RT_ARCH_AMD64
328 pop xAX ; the guest edi we pushed above
329 mov dword [ss:xDI + CPUMCTX.edi], eax
330%else
331 pop dword [ss:xDI + CPUMCTX.edi] ; the guest edi we pushed above
332%endif
333
334%ifdef VBOX_WITH_DR6_EXPERIMENT
335 ; Save DR6 - experiment, not safe!
336 mov xAX, dr6
337 mov [ss:xDI + CPUMCTX.dr6], xAX
338%endif
339
340 pop xAX ; saved LDTR
341 lldt ax
342
343 add xSP, xS ; pCtx
344
345 ; Restore segment registers
346 MYPOPSEGS xAX, ax
347
348 ; Restore general purpose registers
349 MYPOPAD
350
351 mov eax, VINF_SUCCESS
352
353.vmstart_end:
354 popf
355 pop xBP
356 ret
357
358
359.vmxstart_invalid_vmxon_ptr:
360 ; Restore base and limit of the IDTR & GDTR
361 lidt [xSP]
362 add xSP, xS*2
363 lgdt [xSP]
364 add xSP, xS*2
365
366 pop xAX ; saved LDTR
367 lldt ax
368
369 add xSP, xS ; pCtx
370
371 ; Restore segment registers
372 MYPOPSEGS xAX, ax
373
374 ; Restore all general purpose host registers.
375 MYPOPAD
376 mov eax, VERR_VMX_INVALID_VMXON_PTR
377 jmp .vmstart_end
378
379.vmxstart_start_failed:
380 ; Restore base and limit of the IDTR & GDTR
381 lidt [xSP]
382 add xSP, xS*2
383 lgdt [xSP]
384 add xSP, xS*2
385
386 pop xAX ; saved LDTR
387 lldt ax
388
389 add xSP, xS ; pCtx
390
391 ; Restore segment registers
392 MYPOPSEGS xAX, ax
393
394 ; Restore all general purpose host registers.
395 MYPOPAD
396 mov eax, VERR_VMX_UNABLE_TO_START_VM
397 jmp .vmstart_end
398
399ENDPROC VMXR0StartVM32
400
401%ifdef RT_ARCH_AMD64
402;/**
403; * Prepares for and executes VMLAUNCH/VMRESUME (32 bits guest mode)
404; *
405; * @returns VBox status code
406; * @param fResume vmlauch/vmresume
407; * @param pCtx Guest context
408; */
409BEGINPROC VMXR0StartVM64
410 push xBP
411 mov xBP, xSP
412
413 pushf
414 cli
415
416 ;/* First we have to save some final CPU context registers. */
417 mov rax, qword .vmlaunch64_done
418 push rax
419 mov rax, VMX_VMCS_HOST_RIP ;/* return address (too difficult to continue after VMLAUNCH?) */
420 vmwrite rax, [xSP]
421 ;/* Note: assumes success... */
422 add xSP, xS
423
424 ;/* Manual save and restore:
425 ; * - General purpose registers except RIP, RSP
426 ; *
427 ; * Trashed:
428 ; * - CR2 (we don't care)
429 ; * - LDTR (reset to 0)
430 ; * - DRx (presumably not changed at all)
431 ; * - DR7 (reset to 0x400)
432 ; * - EFLAGS (reset to RT_BIT(1); not relevant)
433 ; *
434 ; */
435
436 ;/* Save all general purpose host registers. */
437 MYPUSHAD
438
439 ;/* Save the Guest CPU context pointer. */
440%ifdef ASM_CALL64_GCC
441 ; fResume already in rdi
442 ; pCtx already in rsi
443%else
444 mov rdi, rcx ; fResume
445 mov rsi, rdx ; pCtx
446%endif
447
448 ;/* Save segment registers */
449 ; Note: MYPUSHSEGS trashes rdx & rcx, so we moved it here (msvc amd64 case)
450 MYPUSHSEGS xAX, ax
451
452 ; Save the host LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs and restore the guest MSRs
453 ; @todo use the automatic load feature for MSRs
454 LOADGUESTMSR MSR_K8_LSTAR, CPUMCTX.msrLSTAR
455 LOADGUESTMSR MSR_K8_CSTAR, CPUMCTX.msrCSTAR
456 LOADGUESTMSR MSR_K8_SF_MASK, CPUMCTX.msrSFMASK
457 LOADGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
458
459 ; Save the pCtx pointer
460 push xSI
461
462 ; Save LDTR
463 xor eax, eax
464 sldt ax
465 push xAX
466
467 ; VMX only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!
468 sub xSP, xS*2
469 sgdt [xSP]
470
471 sub xSP, xS*2
472 sidt [xSP]
473
474%ifdef VBOX_WITH_DR6_EXPERIMENT
475 ; Restore DR6 - experiment, not safe!
476 mov xBX, [xSI + CPUMCTX.dr6]
477 mov dr6, xBX
478%endif
479
480 ; Restore CR2
481 mov rbx, qword [xSI + CPUMCTX.cr2]
482 mov cr2, rbx
483
484 mov eax, VMX_VMCS_HOST_RSP
485 vmwrite xAX, xSP
486 ;/* Note: assumes success... */
487 ;/* Don't mess with ESP anymore!! */
488
489 ;/* Restore Guest's general purpose registers. */
490 mov rax, qword [xSI + CPUMCTX.eax]
491 mov rbx, qword [xSI + CPUMCTX.ebx]
492 mov rcx, qword [xSI + CPUMCTX.ecx]
493 mov rdx, qword [xSI + CPUMCTX.edx]
494 mov rbp, qword [xSI + CPUMCTX.ebp]
495 mov r8, qword [xSI + CPUMCTX.r8]
496 mov r9, qword [xSI + CPUMCTX.r9]
497 mov r10, qword [xSI + CPUMCTX.r10]
498 mov r11, qword [xSI + CPUMCTX.r11]
499 mov r12, qword [xSI + CPUMCTX.r12]
500 mov r13, qword [xSI + CPUMCTX.r13]
501 mov r14, qword [xSI + CPUMCTX.r14]
502 mov r15, qword [xSI + CPUMCTX.r15]
503
504 ; resume or start?
505 cmp xDI, 0 ; fResume
506 je .vmlauch64_lauch
507
508 ;/* Restore edi & esi. */
509 mov rdi, qword [xSI + CPUMCTX.edi]
510 mov rsi, qword [xSI + CPUMCTX.esi]
511
512 vmresume
513 jmp .vmlaunch64_done; ;/* here if vmresume detected a failure. */
514
515.vmlauch64_lauch:
516 ;/* Restore rdi & rsi. */
517 mov rdi, qword [xSI + CPUMCTX.edi]
518 mov rsi, qword [xSI + CPUMCTX.esi]
519
520 vmlaunch
521 jmp .vmlaunch64_done; ;/* here if vmlaunch detected a failure. */
522
523ALIGNCODE(16)
524.vmlaunch64_done:
525 jc near .vmxstart64_invalid_vmxon_ptr
526 jz near .vmxstart64_start_failed
527
528 ; Restore base and limit of the IDTR & GDTR
529 lidt [xSP]
530 add xSP, xS*2
531 lgdt [xSP]
532 add xSP, xS*2
533
534 push xDI
535 mov xDI, [xSP + xS * 2] ; pCtx
536
537 mov qword [xDI + CPUMCTX.eax], rax
538 mov qword [xDI + CPUMCTX.ebx], rbx
539 mov qword [xDI + CPUMCTX.ecx], rcx
540 mov qword [xDI + CPUMCTX.edx], rdx
541 mov qword [xDI + CPUMCTX.esi], rsi
542 mov qword [xDI + CPUMCTX.ebp], rbp
543 mov qword [xDI + CPUMCTX.r8], r8
544 mov qword [xDI + CPUMCTX.r9], r9
545 mov qword [xDI + CPUMCTX.r10], r10
546 mov qword [xDI + CPUMCTX.r11], r11
547 mov qword [xDI + CPUMCTX.r12], r12
548 mov qword [xDI + CPUMCTX.r13], r13
549 mov qword [xDI + CPUMCTX.r14], r14
550 mov qword [xDI + CPUMCTX.r15], r15
551
552 pop xAX ; the guest edi we pushed above
553 mov qword [xDI + CPUMCTX.edi], rax
554
555%ifdef VBOX_WITH_DR6_EXPERIMENT
556 ; Save DR6 - experiment, not safe!
557 mov xAX, dr6
558 mov [xDI + CPUMCTX.dr6], xAX
559%endif
560
561 pop xAX ; saved LDTR
562 lldt ax
563
564 pop xSI ; pCtx (needed in rsi by the macros below)
565
566 ; Restore the host LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs
567 ; @todo use the automatic load feature for MSRs
568 LOADHOSTMSR MSR_K8_KERNEL_GS_BASE
569 LOADHOSTMSR MSR_K8_SF_MASK
570 LOADHOSTMSR MSR_K8_CSTAR
571 LOADHOSTMSR MSR_K8_LSTAR
572
573 ; Restore segment registers
574 MYPOPSEGS xAX, ax
575
576 ; Restore general purpose registers
577 MYPOPAD
578
579 mov eax, VINF_SUCCESS
580
581.vmstart64_end:
582 popf
583 pop xBP
584 ret
585
586
587.vmxstart64_invalid_vmxon_ptr:
588 ; Restore base and limit of the IDTR & GDTR
589 lidt [xSP]
590 add xSP, xS*2
591 lgdt [xSP]
592 add xSP, xS*2
593
594 pop xAX ; saved LDTR
595 lldt ax
596
597 pop xSI ; pCtx (needed in rsi by the macros below)
598
599 ; Restore the host LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs
600 ; @todo use the automatic load feature for MSRs
601 LOADHOSTMSR MSR_K8_KERNEL_GS_BASE
602 LOADHOSTMSR MSR_K8_SF_MASK
603 LOADHOSTMSR MSR_K8_CSTAR
604 LOADHOSTMSR MSR_K8_LSTAR
605
606 ; Restore segment registers
607 MYPOPSEGS xAX, ax
608
609 ; Restore all general purpose host registers.
610 MYPOPAD
611 mov eax, VERR_VMX_INVALID_VMXON_PTR
612 jmp .vmstart64_end
613
614.vmxstart64_start_failed:
615 ; Restore base and limit of the IDTR & GDTR
616 lidt [xSP]
617 add xSP, xS*2
618 lgdt [xSP]
619 add xSP, xS*2
620
621 pop xAX ; saved LDTR
622 lldt ax
623
624 pop xSI ; pCtx (needed in rsi by the macros below)
625
626 ; Restore the host LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs
627 ; @todo use the automatic load feature for MSRs
628 LOADHOSTMSR MSR_K8_KERNEL_GS_BASE
629 LOADHOSTMSR MSR_K8_SF_MASK
630 LOADHOSTMSR MSR_K8_CSTAR
631 LOADHOSTMSR MSR_K8_LSTAR
632
633 ; Restore segment registers
634 MYPOPSEGS xAX, ax
635
636 ; Restore all general purpose host registers.
637 MYPOPAD
638 mov eax, VERR_VMX_UNABLE_TO_START_VM
639 jmp .vmstart64_end
640ENDPROC VMXR0StartVM64
641
642;/**
643; * Executes VMWRITE
644; *
645; * @returns VBox status code
646; * @param idxField x86: [ebp + 08h] msc: rcx gcc: rdi VMCS index
647; * @param pData x86: [ebp + 0ch] msc: rdx gcc: rsi VM field value
648; */
649BEGINPROC VMXWriteVMCS64
650%ifdef ASM_CALL64_GCC
651 mov eax, 0ffffffffh
652 and rdi, rax
653 xor rax, rax
654 vmwrite rdi, rsi
655%else
656 mov eax, 0ffffffffh
657 and rcx, rax
658 xor rax, rax
659 vmwrite rcx, rdx
660%endif
661 jnc .valid_vmcs
662 mov eax, VERR_VMX_INVALID_VMCS_PTR
663 ret
664.valid_vmcs:
665 jnz .the_end
666 mov eax, VERR_VMX_INVALID_VMCS_FIELD
667.the_end:
668 ret
669ENDPROC VMXWriteVMCS64
670
671;/**
672; * Executes VMREAD
673; *
674; * @returns VBox status code
675; * @param idxField VMCS index
676; * @param pData Ptr to store VM field value
677; */
678;DECLASM(int) VMXReadVMCS64(uint32_t idxField, uint64_t *pData);
679BEGINPROC VMXReadVMCS64
680%ifdef ASM_CALL64_GCC
681 mov eax, 0ffffffffh
682 and rdi, rax
683 xor rax, rax
684 vmread [rsi], rdi
685%else
686 mov eax, 0ffffffffh
687 and rcx, rax
688 xor rax, rax
689 vmread [rdx], rcx
690%endif
691 jnc .valid_vmcs
692 mov eax, VERR_VMX_INVALID_VMCS_PTR
693 ret
694.valid_vmcs:
695 jnz .the_end
696 mov eax, VERR_VMX_INVALID_VMCS_FIELD
697.the_end:
698 ret
699ENDPROC VMXReadVMCS64
700
701
702;/**
703; * Executes VMXON
704; *
705; * @returns VBox status code
706; * @param HCPhysVMXOn Physical address of VMXON structure
707; */
708;DECLASM(int) VMXEnable(RTHCPHYS HCPhysVMXOn);
709BEGINPROC VMXEnable
710%ifdef RT_ARCH_AMD64
711 xor rax, rax
712 %ifdef ASM_CALL64_GCC
713 push rdi
714 %else
715 push rcx
716 %endif
717 vmxon [rsp]
718%else
719 xor eax, eax
720 vmxon [esp + 4]
721%endif
722 jnc .good
723 mov eax, VERR_VMX_INVALID_VMXON_PTR
724 jmp .the_end
725
726.good:
727 jnz .the_end
728 mov eax, VERR_VMX_GENERIC
729
730.the_end:
731%ifdef RT_ARCH_AMD64
732 add rsp, 8
733%endif
734 ret
735ENDPROC VMXEnable
736
737
738;/**
739; * Executes VMXOFF
740; */
741;DECLASM(void) VMXDisable(void);
742BEGINPROC VMXDisable
743 vmxoff
744 ret
745ENDPROC VMXDisable
746
747
748;/**
749; * Executes VMCLEAR
750; *
751; * @returns VBox status code
752; * @param HCPhysVMCS Physical address of VM control structure
753; */
754;DECLASM(int) VMXClearVMCS(RTHCPHYS HCPhysVMCS);
755BEGINPROC VMXClearVMCS
756%ifdef RT_ARCH_AMD64
757 xor rax, rax
758 %ifdef ASM_CALL64_GCC
759 push rdi
760 %else
761 push rcx
762 %endif
763 vmclear [rsp]
764%else
765 xor eax, eax
766 vmclear [esp + 4]
767%endif
768 jnc .the_end
769 mov eax, VERR_VMX_INVALID_VMCS_PTR
770.the_end:
771%ifdef RT_ARCH_AMD64
772 add rsp, 8
773%endif
774 ret
775ENDPROC VMXClearVMCS
776
777
778;/**
779; * Executes VMPTRLD
780; *
781; * @returns VBox status code
782; * @param HCPhysVMCS Physical address of VMCS structure
783; */
784;DECLASM(int) VMXActivateVMCS(RTHCPHYS HCPhysVMCS);
785BEGINPROC VMXActivateVMCS
786%ifdef RT_ARCH_AMD64
787 xor rax, rax
788 %ifdef ASM_CALL64_GCC
789 push rdi
790 %else
791 push rcx
792 %endif
793 vmptrld [rsp]
794%else
795 xor eax, eax
796 vmptrld [esp + 4]
797%endif
798 jnc .the_end
799 mov eax, VERR_VMX_INVALID_VMCS_PTR
800.the_end:
801%ifdef RT_ARCH_AMD64
802 add rsp, 8
803%endif
804 ret
805ENDPROC VMXActivateVMCS
806
807%endif ; RT_ARCH_AMD64
808
809
810;/**
811; * Prepares for and executes VMRUN
812; *
813; * @returns VBox status code
814; * @param HCPhysVMCB Physical address of host VMCB
815; * @param HCPhysVMCB Physical address of guest VMCB
816; * @param pCtx Guest context
817; */
818BEGINPROC SVMVMRun
819%ifdef RT_ARCH_AMD64 ; fake a cdecl stack frame - I'm lazy, sosume.
820 %ifdef ASM_CALL64_GCC
821 push rdx
822 push rsi
823 push rdi
824 %else
825 push r8
826 push rdx
827 push rcx
828 %endif
829 push 0
830%endif
831 push xBP
832 mov xBP, xSP
833 pushf
834
835 ;/* Manual save and restore:
836 ; * - General purpose registers except RIP, RSP, RAX
837 ; *
838 ; * Trashed:
839 ; * - CR2 (we don't care)
840 ; * - LDTR (reset to 0)
841 ; * - DRx (presumably not changed at all)
842 ; * - DR7 (reset to 0x400)
843 ; */
844
845 ;/* Save all general purpose host registers. */
846 MYPUSHAD
847
848 ;/* Save the Guest CPU context pointer. */
849 mov xSI, [xBP + xS*2 + RTHCPHYS_CB*2] ; pCtx
850 push xSI ; push for saving the state at the end
851
852 ; Restore CR2
853 mov ebx, [xSI + CPUMCTX.cr2]
854 mov cr2, xBX
855
856 ; save host fs, gs, sysenter msr etc
857 mov xAX, [xBP + xS*2] ; pVMCBHostPhys (64 bits physical address; x86: take low dword only)
858 push xAX ; save for the vmload after vmrun
859 vmsave
860
861 ; setup eax for VMLOAD
862 mov xAX, [xBP + xS*2 + RTHCPHYS_CB] ; pVMCBPhys (64 bits physical address; take low dword only)
863
864 ;/* Restore Guest's general purpose registers. */
865 ;/* EAX is loaded from the VMCB by VMRUN */
866 mov ebx, [xSI + CPUMCTX.ebx]
867 mov ecx, [xSI + CPUMCTX.ecx]
868 mov edx, [xSI + CPUMCTX.edx]
869 mov edi, [xSI + CPUMCTX.edi]
870 mov ebp, [xSI + CPUMCTX.ebp]
871 mov esi, [xSI + CPUMCTX.esi]
872
873 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch
874 clgi
875 sti
876
877 ; load guest fs, gs, sysenter msr etc
878 vmload
879 ; run the VM
880 vmrun
881
882 ;/* EAX is in the VMCB already; we can use it here. */
883
884 ; save guest fs, gs, sysenter msr etc
885 vmsave
886
887 ; load host fs, gs, sysenter msr etc
888 pop xAX ; pushed above
889 vmload
890
891 ; Set the global interrupt flag again, but execute cli to make sure IF=0.
892 cli
893 stgi
894
895 pop xAX ; pCtx
896
897 mov [ss:xAX + CPUMCTX.ebx], ebx
898 mov [ss:xAX + CPUMCTX.ecx], ecx
899 mov [ss:xAX + CPUMCTX.edx], edx
900 mov [ss:xAX + CPUMCTX.esi], esi
901 mov [ss:xAX + CPUMCTX.edi], edi
902 mov [ss:xAX + CPUMCTX.ebp], ebp
903
904 ; Restore general purpose registers
905 MYPOPAD
906
907 mov eax, VINF_SUCCESS
908
909 popf
910 pop xBP
911%ifdef RT_ARCH_AMD64
912 add xSP, 4*xS
913%endif
914 ret
915ENDPROC SVMVMRun
916
917
918;;
919; Executes INVLPGA
920;
921; @param pPageGC msc:ecx gcc:edi x86:[esp+04] Virtual page to invalidate
922; @param uASID msc:edx gcc:esi x86:[esp+08] Tagged TLB id
923;
924;DECLASM(void) SVMInvlpgA(RTGCPTR pPageGC, uint32_t uASID);
925BEGINPROC SVMInvlpgA
926%ifdef RT_ARCH_AMD64
927 %ifdef ASM_CALL64_GCC
928 mov eax, edi ;; @todo 64-bit guest.
929 mov ecx, esi
930 %else
931 mov eax, ecx ;; @todo 64-bit guest.
932 mov ecx, edx
933 %endif
934%else
935 mov eax, [esp + 4]
936 mov ecx, [esp + 8]
937%endif
938 invlpga [xAX], ecx
939 ret
940ENDPROC SVMInvlpgA
941
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette