VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HWACCMR0A.asm@ 10507

Last change on this file since 10507 was 9915, checked in by vboxsync, 16 years ago

fixed build breaks

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 26.7 KB
Line 
1; $Id: HWACCMR0A.asm 9915 2008-06-25 12:29:49Z vboxsync $
2;; @file
3; VMXM - R0 vmx helpers
4;
5
6;
7; Copyright (C) 2006-2007 Sun Microsystems, Inc.
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.virtualbox.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License (GPL) as published by the Free Software
13; Foundation, in version 2 as it comes in the "COPYING" file of the
14; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17; Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18; Clara, CA 95054 USA or visit http://www.sun.com if you need
19; additional information or have any questions.
20;
21
22;*******************************************************************************
23;* Header Files *
24;*******************************************************************************
25%include "VBox/asmdefs.mac"
26%include "VBox/err.mac"
27%include "VBox/hwacc_vmx.mac"
28%include "VBox/cpum.mac"
29%include "VBox/x86.mac"
30
31%ifdef RT_OS_OS2 ;; @todo fix OMF support in yasm and kick nasm out completely.
32 %macro vmwrite 2,
33 int3
34 %endmacro
35 %define vmlaunch int3
36 %define vmresume int3
37 %define vmsave int3
38 %define vmload int3
39 %define vmrun int3
40 %define clgi int3
41 %define stgi int3
42 %macro invlpga 2,
43 int3
44 %endmacro
45%endif
46
47;; This is too risky wrt. stability, performance and correctness.
48;%define VBOX_WITH_DR6_EXPERIMENT 1
49
50;; @def MYPUSHAD
51; Macro generating an equivalent to pushad
52
53;; @def MYPOPAD
54; Macro generating an equivalent to popad
55
56;; @def MYPUSHSEGS
57; Macro saving all segment registers on the stack.
58; @param 1 full width register name
59; @param 2 16-bit regsiter name for \a 1.
60
61;; @def MYPOPSEGS
62; Macro restoring all segment registers on the stack
63; @param 1 full width register name
64; @param 2 16-bit regsiter name for \a 1.
65
66%ifdef RT_ARCH_AMD64
67 ; Save a host and load the corresponding guest MSR (trashes rdx & rcx)
68 %macro LOADGUESTMSR 2
69 mov rcx, %1
70 rdmsr
71 push rdx
72 push rax
73 mov edx, dword [xSI + %2 + 4]
74 mov eax, dword [xSI + %2]
75 wrmsr
76 %endmacro
77
78 ; Save a guest and load the corresponding host MSR (trashes rdx & rcx)
79 ; Only really useful for gs kernel base as that one can be changed behind our back (swapgs)
80 %macro LOADHOSTMSREX 2
81 mov rcx, %1
82 rdmsr
83 mov dword [xSI + %2], eax
84 mov dword [xSI + %2 + 4], edx
85 pop rax
86 pop rdx
87 wrmsr
88 %endmacro
89
90 ; Load the corresponding host MSR (trashes rdx & rcx)
91 %macro LOADHOSTMSR 1
92 mov rcx, %1
93 pop rax
94 pop rdx
95 wrmsr
96 %endmacro
97
98 %ifdef ASM_CALL64_GCC
99 %macro MYPUSHAD 0
100 push r15
101 push r14
102 push r13
103 push r12
104 push rbx
105 %endmacro
106 %macro MYPOPAD 0
107 pop rbx
108 pop r12
109 pop r13
110 pop r14
111 pop r15
112 %endmacro
113
114 %else ; ASM_CALL64_MSC
115 %macro MYPUSHAD 0
116 push r15
117 push r14
118 push r13
119 push r12
120 push rbx
121 push rsi
122 push rdi
123 %endmacro
124 %macro MYPOPAD 0
125 pop rdi
126 pop rsi
127 pop rbx
128 pop r12
129 pop r13
130 pop r14
131 pop r15
132 %endmacro
133 %endif
134
135; trashes, rax, rdx & rcx
136 %macro MYPUSHSEGS 2
137 mov %2, es
138 push %1
139 mov %2, ds
140 push %1
141
142 ; Special case for FS; Windows and Linux either don't use it or restore it when leaving kernel mode, Solaris OTOH doesn't and we must save it.
143 mov ecx, MSR_K8_FS_BASE
144 rdmsr
145 push rdx
146 push rax
147 push fs
148
149 ; Special case for GS; OSes typically use swapgs to reset the hidden base register for GS on entry into the kernel. The same happens on exit
150 mov ecx, MSR_K8_GS_BASE
151 rdmsr
152 push rdx
153 push rax
154 push gs
155 %endmacro
156
157; trashes, rax, rdx & rcx
158 %macro MYPOPSEGS 2
159 ; Note: do not step through this code with a debugger!
160 pop gs
161 pop rax
162 pop rdx
163 mov ecx, MSR_K8_GS_BASE
164 wrmsr
165
166 pop fs
167 pop rax
168 pop rdx
169 mov ecx, MSR_K8_FS_BASE
170 wrmsr
171 ; Now it's safe to step again
172
173 pop %1
174 mov ds, %2
175 pop %1
176 mov es, %2
177 %endmacro
178
179%else ; RT_ARCH_X86
180 %macro MYPUSHAD 0
181 pushad
182 %endmacro
183 %macro MYPOPAD 0
184 popad
185 %endmacro
186
187 %macro MYPUSHSEGS 2
188 push ds
189 push es
190 push fs
191 push gs
192 %endmacro
193 %macro MYPOPSEGS 2
194 pop gs
195 pop fs
196 pop es
197 pop ds
198 %endmacro
199%endif
200
201
202BEGINCODE
203
204;/**
205; * Prepares for and executes VMLAUNCH/VMRESUME (32 bits guest mode)
206; *
207; * @returns VBox status code
208; * @param fResume vmlauch/vmresume
209; * @param pCtx Guest context
210; */
211BEGINPROC VMXR0StartVM32
212 push xBP
213 mov xBP, xSP
214
215 pushf
216 cli
217
218 ;/* First we have to save some final CPU context registers. */
219%ifdef RT_ARCH_AMD64
220 mov rax, qword .vmlaunch_done
221 push rax
222%else
223 push .vmlaunch_done
224%endif
225 mov eax, VMX_VMCS_HOST_RIP ;/* return address (too difficult to continue after VMLAUNCH?) */
226 vmwrite xAX, [xSP]
227 ;/* Note: assumes success... */
228 add xSP, xS
229
230 ;/* Manual save and restore:
231 ; * - General purpose registers except RIP, RSP
232 ; *
233 ; * Trashed:
234 ; * - CR2 (we don't care)
235 ; * - LDTR (reset to 0)
236 ; * - DRx (presumably not changed at all)
237 ; * - DR7 (reset to 0x400)
238 ; * - EFLAGS (reset to RT_BIT(1); not relevant)
239 ; *
240 ; */
241
242 ;/* Save all general purpose host registers. */
243 MYPUSHAD
244
245 ;/* Save the Guest CPU context pointer. */
246%ifdef RT_ARCH_AMD64
247 %ifdef ASM_CALL64_GCC
248 ; fResume already in rdi
249 ; pCtx already in rsi
250 %else
251 mov rdi, rcx ; fResume
252 mov rsi, rdx ; pCtx
253 %endif
254%else
255 mov edi, [ebp + 8] ; fResume
256 mov esi, [ebp + 12] ; pCtx
257%endif
258
259 ;/* Save segment registers */
260 ; Note: MYPUSHSEGS trashes rdx & rcx, so we moved it here (msvc amd64 case)
261 MYPUSHSEGS xAX, ax
262
263 ; Save the pCtx pointer
264 push xSI
265
266 ; Save LDTR
267 xor eax, eax
268 sldt ax
269 push xAX
270
271 ; VMX only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!
272 sub xSP, xS*2
273 sgdt [xSP]
274
275 sub xSP, xS*2
276 sidt [xSP]
277
278%ifdef VBOX_WITH_DR6_EXPERIMENT
279 ; Restore DR6 - experiment, not safe!
280 mov xBX, [xSI + CPUMCTX.dr6]
281 mov dr6, xBX
282%endif
283
284 ; Restore CR2
285 mov ebx, [xSI + CPUMCTX.cr2]
286 mov cr2, xBX
287
288 mov eax, VMX_VMCS_HOST_RSP
289 vmwrite xAX, xSP
290 ;/* Note: assumes success... */
291 ;/* Don't mess with ESP anymore!! */
292
293 ;/* Restore Guest's general purpose registers. */
294 mov eax, [xSI + CPUMCTX.eax]
295 mov ebx, [xSI + CPUMCTX.ebx]
296 mov ecx, [xSI + CPUMCTX.ecx]
297 mov edx, [xSI + CPUMCTX.edx]
298 mov ebp, [xSI + CPUMCTX.ebp]
299
300 ; resume or start?
301 cmp xDI, 0 ; fResume
302 je .vmlauch_lauch
303
304 ;/* Restore edi & esi. */
305 mov edi, [xSI + CPUMCTX.edi]
306 mov esi, [xSI + CPUMCTX.esi]
307
308 vmresume
309 jmp .vmlaunch_done; ;/* here if vmresume detected a failure. */
310
311.vmlauch_lauch:
312 ;/* Restore edi & esi. */
313 mov edi, [xSI + CPUMCTX.edi]
314 mov esi, [xSI + CPUMCTX.esi]
315
316 vmlaunch
317 jmp .vmlaunch_done; ;/* here if vmlaunch detected a failure. */
318
319ALIGNCODE(16)
320.vmlaunch_done:
321 jc near .vmxstart_invalid_vmxon_ptr
322 jz near .vmxstart_start_failed
323
324 ; Restore base and limit of the IDTR & GDTR
325 lidt [xSP]
326 add xSP, xS*2
327 lgdt [xSP]
328 add xSP, xS*2
329
330 push xDI
331 mov xDI, [xSP + xS * 2] ; pCtx
332
333 mov [ss:xDI + CPUMCTX.eax], eax
334 mov [ss:xDI + CPUMCTX.ebx], ebx
335 mov [ss:xDI + CPUMCTX.ecx], ecx
336 mov [ss:xDI + CPUMCTX.edx], edx
337 mov [ss:xDI + CPUMCTX.esi], esi
338 mov [ss:xDI + CPUMCTX.ebp], ebp
339%ifdef RT_ARCH_AMD64
340 pop xAX ; the guest edi we pushed above
341 mov dword [ss:xDI + CPUMCTX.edi], eax
342%else
343 pop dword [ss:xDI + CPUMCTX.edi] ; the guest edi we pushed above
344%endif
345
346%ifdef VBOX_WITH_DR6_EXPERIMENT
347 ; Save DR6 - experiment, not safe!
348 mov xAX, dr6
349 mov [ss:xDI + CPUMCTX.dr6], xAX
350%endif
351
352 pop xAX ; saved LDTR
353 lldt ax
354
355 add xSP, xS ; pCtx
356
357 ; Restore segment registers
358 MYPOPSEGS xAX, ax
359
360 ; Restore general purpose registers
361 MYPOPAD
362
363 mov eax, VINF_SUCCESS
364
365.vmstart_end:
366 popf
367 pop xBP
368 ret
369
370
371.vmxstart_invalid_vmxon_ptr:
372 ; Restore base and limit of the IDTR & GDTR
373 lidt [xSP]
374 add xSP, xS*2
375 lgdt [xSP]
376 add xSP, xS*2
377
378 pop xAX ; saved LDTR
379 lldt ax
380
381 add xSP, xS ; pCtx
382
383 ; Restore segment registers
384 MYPOPSEGS xAX, ax
385
386 ; Restore all general purpose host registers.
387 MYPOPAD
388 mov eax, VERR_VMX_INVALID_VMXON_PTR
389 jmp .vmstart_end
390
391.vmxstart_start_failed:
392 ; Restore base and limit of the IDTR & GDTR
393 lidt [xSP]
394 add xSP, xS*2
395 lgdt [xSP]
396 add xSP, xS*2
397
398 pop xAX ; saved LDTR
399 lldt ax
400
401 add xSP, xS ; pCtx
402
403 ; Restore segment registers
404 MYPOPSEGS xAX, ax
405
406 ; Restore all general purpose host registers.
407 MYPOPAD
408 mov eax, VERR_VMX_UNABLE_TO_START_VM
409 jmp .vmstart_end
410
411ENDPROC VMXR0StartVM32
412
413%ifdef RT_ARCH_AMD64
414;/**
415; * Prepares for and executes VMLAUNCH/VMRESUME (64 bits guest mode)
416; *
417; * @returns VBox status code
418; * @param fResume vmlauch/vmresume
419; * @param pCtx Guest context
420; */
421BEGINPROC VMXR0StartVM64
422 push xBP
423 mov xBP, xSP
424
425 pushf
426 cli
427
428 ;/* First we have to save some final CPU context registers. */
429 mov rax, qword .vmlaunch64_done
430 push rax
431 mov rax, VMX_VMCS_HOST_RIP ;/* return address (too difficult to continue after VMLAUNCH?) */
432 vmwrite rax, [xSP]
433 ;/* Note: assumes success... */
434 add xSP, xS
435
436 ;/* Manual save and restore:
437 ; * - General purpose registers except RIP, RSP
438 ; *
439 ; * Trashed:
440 ; * - CR2 (we don't care)
441 ; * - LDTR (reset to 0)
442 ; * - DRx (presumably not changed at all)
443 ; * - DR7 (reset to 0x400)
444 ; * - EFLAGS (reset to RT_BIT(1); not relevant)
445 ; *
446 ; */
447
448 ;/* Save all general purpose host registers. */
449 MYPUSHAD
450
451 ;/* Save the Guest CPU context pointer. */
452%ifdef ASM_CALL64_GCC
453 ; fResume already in rdi
454 ; pCtx already in rsi
455%else
456 mov rdi, rcx ; fResume
457 mov rsi, rdx ; pCtx
458%endif
459
460 ;/* Save segment registers */
461 ; Note: MYPUSHSEGS trashes rdx & rcx, so we moved it here (msvc amd64 case)
462 MYPUSHSEGS xAX, ax
463
464 ; Save the host LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs and restore the guest MSRs
465 ; @todo use the automatic load feature for MSRs
466 LOADGUESTMSR MSR_K8_LSTAR, CPUMCTX.msrLSTAR
467 LOADGUESTMSR MSR_K8_CSTAR, CPUMCTX.msrCSTAR
468 LOADGUESTMSR MSR_K8_SF_MASK, CPUMCTX.msrSFMASK
469 LOADGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
470
471 ; Save the pCtx pointer
472 push xSI
473
474 ; Save LDTR
475 xor eax, eax
476 sldt ax
477 push xAX
478
479 ; VMX only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!
480 sub xSP, xS*2
481 sgdt [xSP]
482
483 sub xSP, xS*2
484 sidt [xSP]
485
486%ifdef VBOX_WITH_DR6_EXPERIMENT
487 ; Restore DR6 - experiment, not safe!
488 mov xBX, [xSI + CPUMCTX.dr6]
489 mov dr6, xBX
490%endif
491
492 ; Restore CR2
493 mov rbx, qword [xSI + CPUMCTX.cr2]
494 mov cr2, rbx
495
496 mov eax, VMX_VMCS_HOST_RSP
497 vmwrite xAX, xSP
498 ;/* Note: assumes success... */
499 ;/* Don't mess with ESP anymore!! */
500
501 ;/* Restore Guest's general purpose registers. */
502 mov rax, qword [xSI + CPUMCTX.eax]
503 mov rbx, qword [xSI + CPUMCTX.ebx]
504 mov rcx, qword [xSI + CPUMCTX.ecx]
505 mov rdx, qword [xSI + CPUMCTX.edx]
506 mov rbp, qword [xSI + CPUMCTX.ebp]
507 mov r8, qword [xSI + CPUMCTX.r8]
508 mov r9, qword [xSI + CPUMCTX.r9]
509 mov r10, qword [xSI + CPUMCTX.r10]
510 mov r11, qword [xSI + CPUMCTX.r11]
511 mov r12, qword [xSI + CPUMCTX.r12]
512 mov r13, qword [xSI + CPUMCTX.r13]
513 mov r14, qword [xSI + CPUMCTX.r14]
514 mov r15, qword [xSI + CPUMCTX.r15]
515
516 ; resume or start?
517 cmp xDI, 0 ; fResume
518 je .vmlauch64_lauch
519
520 ;/* Restore edi & esi. */
521 mov rdi, qword [xSI + CPUMCTX.edi]
522 mov rsi, qword [xSI + CPUMCTX.esi]
523
524 vmresume
525 jmp .vmlaunch64_done; ;/* here if vmresume detected a failure. */
526
527.vmlauch64_lauch:
528 ;/* Restore rdi & rsi. */
529 mov rdi, qword [xSI + CPUMCTX.edi]
530 mov rsi, qword [xSI + CPUMCTX.esi]
531
532 vmlaunch
533 jmp .vmlaunch64_done; ;/* here if vmlaunch detected a failure. */
534
535ALIGNCODE(16)
536.vmlaunch64_done:
537 jc near .vmxstart64_invalid_vmxon_ptr
538 jz near .vmxstart64_start_failed
539
540 ; Restore base and limit of the IDTR & GDTR
541 lidt [xSP]
542 add xSP, xS*2
543 lgdt [xSP]
544 add xSP, xS*2
545
546 push xDI
547 mov xDI, [xSP + xS * 2] ; pCtx
548
549 mov qword [xDI + CPUMCTX.eax], rax
550 mov qword [xDI + CPUMCTX.ebx], rbx
551 mov qword [xDI + CPUMCTX.ecx], rcx
552 mov qword [xDI + CPUMCTX.edx], rdx
553 mov qword [xDI + CPUMCTX.esi], rsi
554 mov qword [xDI + CPUMCTX.ebp], rbp
555 mov qword [xDI + CPUMCTX.r8], r8
556 mov qword [xDI + CPUMCTX.r9], r9
557 mov qword [xDI + CPUMCTX.r10], r10
558 mov qword [xDI + CPUMCTX.r11], r11
559 mov qword [xDI + CPUMCTX.r12], r12
560 mov qword [xDI + CPUMCTX.r13], r13
561 mov qword [xDI + CPUMCTX.r14], r14
562 mov qword [xDI + CPUMCTX.r15], r15
563
564 pop xAX ; the guest edi we pushed above
565 mov qword [xDI + CPUMCTX.edi], rax
566
567%ifdef VBOX_WITH_DR6_EXPERIMENT
568 ; Save DR6 - experiment, not safe!
569 mov xAX, dr6
570 mov [xDI + CPUMCTX.dr6], xAX
571%endif
572
573 pop xAX ; saved LDTR
574 lldt ax
575
576 pop xSI ; pCtx (needed in rsi by the macros below)
577
578 ; Restore the host LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs
579 ; @todo use the automatic load feature for MSRs
580 LOADHOSTMSREX MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
581 LOADHOSTMSR MSR_K8_SF_MASK
582 LOADHOSTMSR MSR_K8_CSTAR
583 LOADHOSTMSR MSR_K8_LSTAR
584
585 ; Restore segment registers
586 MYPOPSEGS xAX, ax
587
588 ; Restore general purpose registers
589 MYPOPAD
590
591 mov eax, VINF_SUCCESS
592
593.vmstart64_end:
594 popf
595 pop xBP
596 ret
597
598
599.vmxstart64_invalid_vmxon_ptr:
600 ; Restore base and limit of the IDTR & GDTR
601 lidt [xSP]
602 add xSP, xS*2
603 lgdt [xSP]
604 add xSP, xS*2
605
606 pop xAX ; saved LDTR
607 lldt ax
608
609 pop xSI ; pCtx (needed in rsi by the macros below)
610
611 ; Restore the host LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs
612 ; @todo use the automatic load feature for MSRs
613 LOADHOSTMSREX MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
614 LOADHOSTMSR MSR_K8_SF_MASK
615 LOADHOSTMSR MSR_K8_CSTAR
616 LOADHOSTMSR MSR_K8_LSTAR
617
618 ; Restore segment registers
619 MYPOPSEGS xAX, ax
620
621 ; Restore all general purpose host registers.
622 MYPOPAD
623 mov eax, VERR_VMX_INVALID_VMXON_PTR
624 jmp .vmstart64_end
625
626.vmxstart64_start_failed:
627 ; Restore base and limit of the IDTR & GDTR
628 lidt [xSP]
629 add xSP, xS*2
630 lgdt [xSP]
631 add xSP, xS*2
632
633 pop xAX ; saved LDTR
634 lldt ax
635
636 pop xSI ; pCtx (needed in rsi by the macros below)
637
638 ; Restore the host LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs
639 ; @todo use the automatic load feature for MSRs
640 LOADHOSTMSREX MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
641 LOADHOSTMSR MSR_K8_SF_MASK
642 LOADHOSTMSR MSR_K8_CSTAR
643 LOADHOSTMSR MSR_K8_LSTAR
644
645 ; Restore segment registers
646 MYPOPSEGS xAX, ax
647
648 ; Restore all general purpose host registers.
649 MYPOPAD
650 mov eax, VERR_VMX_UNABLE_TO_START_VM
651 jmp .vmstart64_end
652ENDPROC VMXR0StartVM64
653
654;/**
655; * Executes VMWRITE
656; *
657; * @returns VBox status code
658; * @param idxField x86: [ebp + 08h] msc: rcx gcc: rdi VMCS index
659; * @param pData x86: [ebp + 0ch] msc: rdx gcc: rsi VM field value
660; */
661BEGINPROC VMXWriteVMCS64
662%ifdef ASM_CALL64_GCC
663 mov eax, 0ffffffffh
664 and rdi, rax
665 xor rax, rax
666 vmwrite rdi, rsi
667%else
668 mov eax, 0ffffffffh
669 and rcx, rax
670 xor rax, rax
671 vmwrite rcx, rdx
672%endif
673 jnc .valid_vmcs
674 mov eax, VERR_VMX_INVALID_VMCS_PTR
675 ret
676.valid_vmcs:
677 jnz .the_end
678 mov eax, VERR_VMX_INVALID_VMCS_FIELD
679.the_end:
680 ret
681ENDPROC VMXWriteVMCS64
682
683;/**
684; * Executes VMREAD
685; *
686; * @returns VBox status code
687; * @param idxField VMCS index
688; * @param pData Ptr to store VM field value
689; */
690;DECLASM(int) VMXReadVMCS64(uint32_t idxField, uint64_t *pData);
691BEGINPROC VMXReadVMCS64
692%ifdef ASM_CALL64_GCC
693 mov eax, 0ffffffffh
694 and rdi, rax
695 xor rax, rax
696 vmread [rsi], rdi
697%else
698 mov eax, 0ffffffffh
699 and rcx, rax
700 xor rax, rax
701 vmread [rdx], rcx
702%endif
703 jnc .valid_vmcs
704 mov eax, VERR_VMX_INVALID_VMCS_PTR
705 ret
706.valid_vmcs:
707 jnz .the_end
708 mov eax, VERR_VMX_INVALID_VMCS_FIELD
709.the_end:
710 ret
711ENDPROC VMXReadVMCS64
712
713
714;/**
715; * Executes VMXON
716; *
717; * @returns VBox status code
718; * @param HCPhysVMXOn Physical address of VMXON structure
719; */
720;DECLASM(int) VMXEnable(RTHCPHYS HCPhysVMXOn);
721BEGINPROC VMXEnable
722%ifdef RT_ARCH_AMD64
723 xor rax, rax
724 %ifdef ASM_CALL64_GCC
725 push rdi
726 %else
727 push rcx
728 %endif
729 vmxon [rsp]
730%else
731 xor eax, eax
732 vmxon [esp + 4]
733%endif
734 jnc .good
735 mov eax, VERR_VMX_INVALID_VMXON_PTR
736 jmp .the_end
737
738.good:
739 jnz .the_end
740 mov eax, VERR_VMX_GENERIC
741
742.the_end:
743%ifdef RT_ARCH_AMD64
744 add rsp, 8
745%endif
746 ret
747ENDPROC VMXEnable
748
749
750;/**
751; * Executes VMXOFF
752; */
753;DECLASM(void) VMXDisable(void);
754BEGINPROC VMXDisable
755 vmxoff
756 ret
757ENDPROC VMXDisable
758
759
760;/**
761; * Executes VMCLEAR
762; *
763; * @returns VBox status code
764; * @param HCPhysVMCS Physical address of VM control structure
765; */
766;DECLASM(int) VMXClearVMCS(RTHCPHYS HCPhysVMCS);
767BEGINPROC VMXClearVMCS
768%ifdef RT_ARCH_AMD64
769 xor rax, rax
770 %ifdef ASM_CALL64_GCC
771 push rdi
772 %else
773 push rcx
774 %endif
775 vmclear [rsp]
776%else
777 xor eax, eax
778 vmclear [esp + 4]
779%endif
780 jnc .the_end
781 mov eax, VERR_VMX_INVALID_VMCS_PTR
782.the_end:
783%ifdef RT_ARCH_AMD64
784 add rsp, 8
785%endif
786 ret
787ENDPROC VMXClearVMCS
788
789
790;/**
791; * Executes VMPTRLD
792; *
793; * @returns VBox status code
794; * @param HCPhysVMCS Physical address of VMCS structure
795; */
796;DECLASM(int) VMXActivateVMCS(RTHCPHYS HCPhysVMCS);
797BEGINPROC VMXActivateVMCS
798%ifdef RT_ARCH_AMD64
799 xor rax, rax
800 %ifdef ASM_CALL64_GCC
801 push rdi
802 %else
803 push rcx
804 %endif
805 vmptrld [rsp]
806%else
807 xor eax, eax
808 vmptrld [esp + 4]
809%endif
810 jnc .the_end
811 mov eax, VERR_VMX_INVALID_VMCS_PTR
812.the_end:
813%ifdef RT_ARCH_AMD64
814 add rsp, 8
815%endif
816 ret
817ENDPROC VMXActivateVMCS
818
819%endif ; RT_ARCH_AMD64
820
821
822;/**
823; * Prepares for and executes VMRUN (32 bits guests)
824; *
825; * @returns VBox status code
826; * @param HCPhysVMCB Physical address of host VMCB
827; * @param HCPhysVMCB Physical address of guest VMCB
828; * @param pCtx Guest context
829; */
830BEGINPROC SVMVMRun
831%ifdef RT_ARCH_AMD64 ; fake a cdecl stack frame
832 %ifdef ASM_CALL64_GCC
833 push rdx
834 push rsi
835 push rdi
836 %else
837 push r8
838 push rdx
839 push rcx
840 %endif
841 push 0
842%endif
843 push xBP
844 mov xBP, xSP
845 pushf
846
847 ;/* Manual save and restore:
848 ; * - General purpose registers except RIP, RSP, RAX
849 ; *
850 ; * Trashed:
851 ; * - CR2 (we don't care)
852 ; * - LDTR (reset to 0)
853 ; * - DRx (presumably not changed at all)
854 ; * - DR7 (reset to 0x400)
855 ; */
856
857 ;/* Save all general purpose host registers. */
858 MYPUSHAD
859
860 ;/* Save the Guest CPU context pointer. */
861 mov xSI, [xBP + xS*2 + RTHCPHYS_CB*2] ; pCtx
862 push xSI ; push for saving the state at the end
863
864 ; Restore CR2
865 mov ebx, [xSI + CPUMCTX.cr2]
866 mov cr2, xBX
867
868 ; save host fs, gs, sysenter msr etc
869 mov xAX, [xBP + xS*2] ; pVMCBHostPhys (64 bits physical address; x86: take low dword only)
870 push xAX ; save for the vmload after vmrun
871 vmsave
872
873 ; setup eax for VMLOAD
874 mov xAX, [xBP + xS*2 + RTHCPHYS_CB] ; pVMCBPhys (64 bits physical address; take low dword only)
875
876 ;/* Restore Guest's general purpose registers. */
877 ;/* EAX is loaded from the VMCB by VMRUN */
878 mov ebx, [xSI + CPUMCTX.ebx]
879 mov ecx, [xSI + CPUMCTX.ecx]
880 mov edx, [xSI + CPUMCTX.edx]
881 mov edi, [xSI + CPUMCTX.edi]
882 mov ebp, [xSI + CPUMCTX.ebp]
883 mov esi, [xSI + CPUMCTX.esi]
884
885 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch
886 clgi
887 sti
888
889 ; load guest fs, gs, sysenter msr etc
890 vmload
891 ; run the VM
892 vmrun
893
894 ;/* EAX is in the VMCB already; we can use it here. */
895
896 ; save guest fs, gs, sysenter msr etc
897 vmsave
898
899 ; load host fs, gs, sysenter msr etc
900 pop xAX ; pushed above
901 vmload
902
903 ; Set the global interrupt flag again, but execute cli to make sure IF=0.
904 cli
905 stgi
906
907 pop xAX ; pCtx
908
909 mov [ss:xAX + CPUMCTX.ebx], ebx
910 mov [ss:xAX + CPUMCTX.ecx], ecx
911 mov [ss:xAX + CPUMCTX.edx], edx
912 mov [ss:xAX + CPUMCTX.esi], esi
913 mov [ss:xAX + CPUMCTX.edi], edi
914 mov [ss:xAX + CPUMCTX.ebp], ebp
915
916 ; Restore general purpose registers
917 MYPOPAD
918
919 mov eax, VINF_SUCCESS
920
921 popf
922 pop xBP
923%ifdef RT_ARCH_AMD64
924 add xSP, 4*xS
925%endif
926 ret
927ENDPROC SVMVMRun
928
929%ifdef RT_ARCH_AMD64
930;/**
931; * Prepares for and executes VMRUN (64 bits guests)
932; *
933; * @returns VBox status code
934; * @param HCPhysVMCB Physical address of host VMCB
935; * @param HCPhysVMCB Physical address of guest VMCB
936; * @param pCtx Guest context
937; */
938BEGINPROC SVMVMRun64
939 ; fake a cdecl stack frame
940 %ifdef ASM_CALL64_GCC
941 push rdx
942 push rsi
943 push rdi
944 %else
945 push r8
946 push rdx
947 push rcx
948 %endif
949 push 0
950 push rbp
951 mov rbp, rsp
952 pushf
953
954 ;/* Manual save and restore:
955 ; * - General purpose registers except RIP, RSP, RAX
956 ; *
957 ; * Trashed:
958 ; * - CR2 (we don't care)
959 ; * - LDTR (reset to 0)
960 ; * - DRx (presumably not changed at all)
961 ; * - DR7 (reset to 0x400)
962 ; */
963
964 ;/* Save all general purpose host registers. */
965 MYPUSHAD
966
967 ;/* Save the Guest CPU context pointer. */
968 mov rsi, [rbp + xS*2 + RTHCPHYS_CB*2] ; pCtx
969 push rsi ; push for saving the state at the end
970
971 ; Restore CR2
972 mov rbx, [rsi + CPUMCTX.cr2]
973 mov cr2, rbx
974
975 ; save host fs, gs, sysenter msr etc
976 mov rax, [rbp + xS*2] ; pVMCBHostPhys (64 bits physical address; x86: take low dword only)
977 push rax ; save for the vmload after vmrun
978 vmsave
979
980 ; setup eax for VMLOAD
981 mov rax, [rbp + xS*2 + RTHCPHYS_CB] ; pVMCBPhys (64 bits physical address; take low dword only)
982
983 ;/* Restore Guest's general purpose registers. */
984 ;/* RAX is loaded from the VMCB by VMRUN */
985 mov rbx, qword [xSI + CPUMCTX.ebx]
986 mov rcx, qword [xSI + CPUMCTX.ecx]
987 mov rdx, qword [xSI + CPUMCTX.edx]
988 mov rdi, qword [xSI + CPUMCTX.edi]
989 mov rbp, qword [xSI + CPUMCTX.ebp]
990 mov r8, qword [xSI + CPUMCTX.r8]
991 mov r9, qword [xSI + CPUMCTX.r9]
992 mov r10, qword [xSI + CPUMCTX.r10]
993 mov r11, qword [xSI + CPUMCTX.r11]
994 mov r12, qword [xSI + CPUMCTX.r12]
995 mov r13, qword [xSI + CPUMCTX.r13]
996 mov r14, qword [xSI + CPUMCTX.r14]
997 mov r15, qword [xSI + CPUMCTX.r15]
998 mov rsi, qword [xSI + CPUMCTX.esi]
999
1000 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch
1001 clgi
1002 sti
1003
1004 ; load guest fs, gs, sysenter msr etc
1005 vmload
1006 ; run the VM
1007 vmrun
1008
1009 ;/* RAX is in the VMCB already; we can use it here. */
1010
1011 ; save guest fs, gs, sysenter msr etc
1012 vmsave
1013
1014 ; load host fs, gs, sysenter msr etc
1015 pop rax ; pushed above
1016 vmload
1017
1018 ; Set the global interrupt flag again, but execute cli to make sure IF=0.
1019 cli
1020 stgi
1021
1022 pop rax ; pCtx
1023
1024 mov qword [rax + CPUMCTX.ebx], rbx
1025 mov qword [rax + CPUMCTX.ecx], rcx
1026 mov qword [rax + CPUMCTX.edx], rdx
1027 mov qword [rax + CPUMCTX.esi], rsi
1028 mov qword [rax + CPUMCTX.edi], rdi
1029 mov qword [rax + CPUMCTX.ebp], rbp
1030 mov qword [rax + CPUMCTX.r8], r8
1031 mov qword [rax + CPUMCTX.r9], r9
1032 mov qword [rax + CPUMCTX.r10], r10
1033 mov qword [rax + CPUMCTX.r11], r11
1034 mov qword [rax + CPUMCTX.r12], r12
1035 mov qword [rax + CPUMCTX.r13], r13
1036 mov qword [rax + CPUMCTX.r14], r14
1037 mov qword [rax + CPUMCTX.r15], r15
1038
1039 ; Restore general purpose registers
1040 MYPOPAD
1041
1042 mov eax, VINF_SUCCESS
1043
1044 popf
1045 pop rbp
1046 add rsp, 4*xS
1047 ret
1048ENDPROC SVMVMRun64
1049%endif ; RT_ARCH_AMD64
1050
1051
1052%if GC_ARCH_BITS == 64
1053;;
1054; Executes INVLPGA
1055;
1056; @param pPageGC msc:rcx gcc:rdi x86:[esp+04] Virtual page to invalidate
1057; @param uASID msc:rdx gcc:rsi x86:[esp+0C] Tagged TLB id
1058;
1059;DECLASM(void) SVMInvlpgA(RTGCPTR pPageGC, uint32_t uASID);
1060BEGINPROC SVMInvlpgA
1061%ifdef RT_ARCH_AMD64
1062 %ifdef ASM_CALL64_GCC
1063 mov rax, rdi
1064 mov rcx, rsi
1065 %else
1066 ; from http://www.cs.cmu.edu/~fp/courses/15213-s06/misc/asm64-handout.pdf:
1067 ; ``Perhaps unexpectedly, instructions that move or generate 32-bit register
1068 ; values also set the upper 32 bits of the register to zero. Consequently
1069 ; there is no need for an instruction movzlq.''
1070 mov eax, ecx
1071 mov rcx, rdx
1072 %endif
1073%else
1074 mov eax, [esp + 4]
1075 mov ecx, [esp + 0Ch]
1076%endif
1077 invlpga [xAX], ecx
1078 ret
1079ENDPROC SVMInvlpgA
1080
1081%else
1082;;
1083; Executes INVLPGA
1084;
1085; @param pPageGC msc:ecx gcc:edi x86:[esp+04] Virtual page to invalidate
1086; @param uASID msc:edx gcc:esi x86:[esp+08] Tagged TLB id
1087;
1088;DECLASM(void) SVMInvlpgA(RTGCPTR pPageGC, uint32_t uASID);
1089BEGINPROC SVMInvlpgA
1090%ifdef RT_ARCH_AMD64
1091 %ifdef ASM_CALL64_GCC
1092 movzx rax, edi
1093 mov ecx, esi
1094 %else
1095 movzx rax, ecx
1096 mov ecx, edx
1097 %endif
1098%else
1099 mov eax, [esp + 4]
1100 mov ecx, [esp + 8]
1101%endif
1102 invlpga [xAX], ecx
1103 ret
1104ENDPROC SVMInvlpgA
1105
1106%endif ; GC_ARCH_BITS != 64
1107
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette