VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HWACCMR0A.asm@ 11985

Last change on this file since 11985 was 11516, checked in by vboxsync, 16 years ago

Forgot IA32_MSR_STAR syncing.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 27.1 KB
Line 
1; $Id: HWACCMR0A.asm 11516 2008-08-20 14:21:18Z vboxsync $
2;; @file
3; VMXM - R0 vmx helpers
4;
5
6;
7; Copyright (C) 2006-2007 Sun Microsystems, Inc.
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.virtualbox.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License (GPL) as published by the Free Software
13; Foundation, in version 2 as it comes in the "COPYING" file of the
14; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17; Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18; Clara, CA 95054 USA or visit http://www.sun.com if you need
19; additional information or have any questions.
20;
21
22;*******************************************************************************
23;* Header Files *
24;*******************************************************************************
25%include "VBox/asmdefs.mac"
26%include "VBox/err.mac"
27%include "VBox/hwacc_vmx.mac"
28%include "VBox/cpum.mac"
29%include "VBox/x86.mac"
30
31%ifdef RT_OS_OS2 ;; @todo fix OMF support in yasm and kick nasm out completely.
32 %macro vmwrite 2,
33 int3
34 %endmacro
35 %define vmlaunch int3
36 %define vmresume int3
37 %define vmsave int3
38 %define vmload int3
39 %define vmrun int3
40 %define clgi int3
41 %define stgi int3
42 %macro invlpga 2,
43 int3
44 %endmacro
45%endif
46
47;; This is too risky wrt. stability, performance and correctness.
48;%define VBOX_WITH_DR6_EXPERIMENT 1
49
50;; @def MYPUSHAD
51; Macro generating an equivalent to pushad
52
53;; @def MYPOPAD
54; Macro generating an equivalent to popad
55
56;; @def MYPUSHSEGS
57; Macro saving all segment registers on the stack.
58; @param 1 full width register name
59; @param 2 16-bit regsiter name for \a 1.
60
61;; @def MYPOPSEGS
62; Macro restoring all segment registers on the stack
63; @param 1 full width register name
64; @param 2 16-bit regsiter name for \a 1.
65
66%ifdef RT_ARCH_AMD64
67 ; Save a host and load the corresponding guest MSR (trashes rdx & rcx)
68 %macro LOADGUESTMSR 2
69 mov rcx, %1
70 rdmsr
71 push rdx
72 push rax
73 mov edx, dword [xSI + %2 + 4]
74 mov eax, dword [xSI + %2]
75 wrmsr
76 %endmacro
77
78 ; Save a guest and load the corresponding host MSR (trashes rdx & rcx)
79 ; Only really useful for gs kernel base as that one can be changed behind our back (swapgs)
80 %macro LOADHOSTMSREX 2
81 mov rcx, %1
82 rdmsr
83 mov dword [xSI + %2], eax
84 mov dword [xSI + %2 + 4], edx
85 pop rax
86 pop rdx
87 wrmsr
88 %endmacro
89
90 ; Load the corresponding host MSR (trashes rdx & rcx)
91 %macro LOADHOSTMSR 1
92 mov rcx, %1
93 pop rax
94 pop rdx
95 wrmsr
96 %endmacro
97
98 %ifdef ASM_CALL64_GCC
99 %macro MYPUSHAD 0
100 push r15
101 push r14
102 push r13
103 push r12
104 push rbx
105 %endmacro
106 %macro MYPOPAD 0
107 pop rbx
108 pop r12
109 pop r13
110 pop r14
111 pop r15
112 %endmacro
113
114 %else ; ASM_CALL64_MSC
115 %macro MYPUSHAD 0
116 push r15
117 push r14
118 push r13
119 push r12
120 push rbx
121 push rsi
122 push rdi
123 %endmacro
124 %macro MYPOPAD 0
125 pop rdi
126 pop rsi
127 pop rbx
128 pop r12
129 pop r13
130 pop r14
131 pop r15
132 %endmacro
133 %endif
134
135; trashes, rax, rdx & rcx
136 %macro MYPUSHSEGS 2
137 mov %2, es
138 push %1
139 mov %2, ds
140 push %1
141
142 ; Special case for FS; Windows and Linux either don't use it or restore it when leaving kernel mode, Solaris OTOH doesn't and we must save it.
143 mov ecx, MSR_K8_FS_BASE
144 rdmsr
145 push rdx
146 push rax
147 push fs
148
149 ; Special case for GS; OSes typically use swapgs to reset the hidden base register for GS on entry into the kernel. The same happens on exit
150 mov ecx, MSR_K8_GS_BASE
151 rdmsr
152 push rdx
153 push rax
154 push gs
155 %endmacro
156
157; trashes, rax, rdx & rcx
158 %macro MYPOPSEGS 2
159 ; Note: do not step through this code with a debugger!
160 pop gs
161 pop rax
162 pop rdx
163 mov ecx, MSR_K8_GS_BASE
164 wrmsr
165
166 pop fs
167 pop rax
168 pop rdx
169 mov ecx, MSR_K8_FS_BASE
170 wrmsr
171 ; Now it's safe to step again
172
173 pop %1
174 mov ds, %2
175 pop %1
176 mov es, %2
177 %endmacro
178
179%else ; RT_ARCH_X86
180 %macro MYPUSHAD 0
181 pushad
182 %endmacro
183 %macro MYPOPAD 0
184 popad
185 %endmacro
186
187 %macro MYPUSHSEGS 2
188 push ds
189 push es
190 push fs
191 push gs
192 %endmacro
193 %macro MYPOPSEGS 2
194 pop gs
195 pop fs
196 pop es
197 pop ds
198 %endmacro
199%endif
200
201
202BEGINCODE
203
204;/**
205; * Prepares for and executes VMLAUNCH/VMRESUME (32 bits guest mode)
206; *
207; * @returns VBox status code
208; * @param fResume vmlauch/vmresume
209; * @param pCtx Guest context
210; */
211BEGINPROC VMXR0StartVM32
212 push xBP
213 mov xBP, xSP
214
215 pushf
216 cli
217
218 ;/* First we have to save some final CPU context registers. */
219%ifdef RT_ARCH_AMD64
220 mov rax, qword .vmlaunch_done
221 push rax
222%else
223 push .vmlaunch_done
224%endif
225 mov eax, VMX_VMCS_HOST_RIP ;/* return address (too difficult to continue after VMLAUNCH?) */
226 vmwrite xAX, [xSP]
227 ;/* Note: assumes success... */
228 add xSP, xS
229
230 ;/* Manual save and restore:
231 ; * - General purpose registers except RIP, RSP
232 ; *
233 ; * Trashed:
234 ; * - CR2 (we don't care)
235 ; * - LDTR (reset to 0)
236 ; * - DRx (presumably not changed at all)
237 ; * - DR7 (reset to 0x400)
238 ; * - EFLAGS (reset to RT_BIT(1); not relevant)
239 ; *
240 ; */
241
242 ;/* Save all general purpose host registers. */
243 MYPUSHAD
244
245 ;/* Save the Guest CPU context pointer. */
246%ifdef RT_ARCH_AMD64
247 %ifdef ASM_CALL64_GCC
248 ; fResume already in rdi
249 ; pCtx already in rsi
250 %else
251 mov rdi, rcx ; fResume
252 mov rsi, rdx ; pCtx
253 %endif
254%else
255 mov edi, [ebp + 8] ; fResume
256 mov esi, [ebp + 12] ; pCtx
257%endif
258
259 ;/* Save segment registers */
260 ; Note: MYPUSHSEGS trashes rdx & rcx, so we moved it here (msvc amd64 case)
261 MYPUSHSEGS xAX, ax
262
263 ; Save the pCtx pointer
264 push xSI
265
266 ; Save LDTR
267 xor eax, eax
268 sldt ax
269 push xAX
270
271 ; VMX only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!
272 sub xSP, xS*2
273 sgdt [xSP]
274
275 sub xSP, xS*2
276 sidt [xSP]
277
278%ifdef VBOX_WITH_DR6_EXPERIMENT
279 ; Restore DR6 - experiment, not safe!
280 mov xBX, [xSI + CPUMCTX.dr6]
281 mov dr6, xBX
282%endif
283
284 ; Restore CR2
285 mov ebx, [xSI + CPUMCTX.cr2]
286 mov cr2, xBX
287
288 mov eax, VMX_VMCS_HOST_RSP
289 vmwrite xAX, xSP
290 ;/* Note: assumes success... */
291 ;/* Don't mess with ESP anymore!! */
292
293 ;/* Restore Guest's general purpose registers. */
294 mov eax, [xSI + CPUMCTX.eax]
295 mov ebx, [xSI + CPUMCTX.ebx]
296 mov ecx, [xSI + CPUMCTX.ecx]
297 mov edx, [xSI + CPUMCTX.edx]
298 mov ebp, [xSI + CPUMCTX.ebp]
299
300 ; resume or start?
301 cmp xDI, 0 ; fResume
302 je .vmlauch_lauch
303
304 ;/* Restore edi & esi. */
305 mov edi, [xSI + CPUMCTX.edi]
306 mov esi, [xSI + CPUMCTX.esi]
307
308 vmresume
309 jmp .vmlaunch_done; ;/* here if vmresume detected a failure. */
310
311.vmlauch_lauch:
312 ;/* Restore edi & esi. */
313 mov edi, [xSI + CPUMCTX.edi]
314 mov esi, [xSI + CPUMCTX.esi]
315
316 vmlaunch
317 jmp .vmlaunch_done; ;/* here if vmlaunch detected a failure. */
318
319ALIGNCODE(16)
320.vmlaunch_done:
321 jc near .vmxstart_invalid_vmxon_ptr
322 jz near .vmxstart_start_failed
323
324 ; Restore base and limit of the IDTR & GDTR
325 lidt [xSP]
326 add xSP, xS*2
327 lgdt [xSP]
328 add xSP, xS*2
329
330 push xDI
331 mov xDI, [xSP + xS * 2] ; pCtx
332
333 mov [ss:xDI + CPUMCTX.eax], eax
334 mov [ss:xDI + CPUMCTX.ebx], ebx
335 mov [ss:xDI + CPUMCTX.ecx], ecx
336 mov [ss:xDI + CPUMCTX.edx], edx
337 mov [ss:xDI + CPUMCTX.esi], esi
338 mov [ss:xDI + CPUMCTX.ebp], ebp
339%ifdef RT_ARCH_AMD64
340 pop xAX ; the guest edi we pushed above
341 mov dword [ss:xDI + CPUMCTX.edi], eax
342%else
343 pop dword [ss:xDI + CPUMCTX.edi] ; the guest edi we pushed above
344%endif
345
346%ifdef VBOX_WITH_DR6_EXPERIMENT
347 ; Save DR6 - experiment, not safe!
348 mov xAX, dr6
349 mov [ss:xDI + CPUMCTX.dr6], xAX
350%endif
351
352 pop xAX ; saved LDTR
353 lldt ax
354
355 add xSP, xS ; pCtx
356
357 ; Restore segment registers
358 MYPOPSEGS xAX, ax
359
360 ; Restore general purpose registers
361 MYPOPAD
362
363 mov eax, VINF_SUCCESS
364
365.vmstart_end:
366 popf
367 pop xBP
368 ret
369
370
371.vmxstart_invalid_vmxon_ptr:
372 ; Restore base and limit of the IDTR & GDTR
373 lidt [xSP]
374 add xSP, xS*2
375 lgdt [xSP]
376 add xSP, xS*2
377
378 pop xAX ; saved LDTR
379 lldt ax
380
381 add xSP, xS ; pCtx
382
383 ; Restore segment registers
384 MYPOPSEGS xAX, ax
385
386 ; Restore all general purpose host registers.
387 MYPOPAD
388 mov eax, VERR_VMX_INVALID_VMXON_PTR
389 jmp .vmstart_end
390
391.vmxstart_start_failed:
392 ; Restore base and limit of the IDTR & GDTR
393 lidt [xSP]
394 add xSP, xS*2
395 lgdt [xSP]
396 add xSP, xS*2
397
398 pop xAX ; saved LDTR
399 lldt ax
400
401 add xSP, xS ; pCtx
402
403 ; Restore segment registers
404 MYPOPSEGS xAX, ax
405
406 ; Restore all general purpose host registers.
407 MYPOPAD
408 mov eax, VERR_VMX_UNABLE_TO_START_VM
409 jmp .vmstart_end
410
411ENDPROC VMXR0StartVM32
412
413%ifdef RT_ARCH_AMD64
414;/**
415; * Prepares for and executes VMLAUNCH/VMRESUME (64 bits guest mode)
416; *
417; * @returns VBox status code
418; * @param fResume vmlauch/vmresume
419; * @param pCtx Guest context
420; */
421BEGINPROC VMXR0StartVM64
422 push xBP
423 mov xBP, xSP
424
425 pushf
426 cli
427
428 ;/* First we have to save some final CPU context registers. */
429 mov rax, qword .vmlaunch64_done
430 push rax
431 mov rax, VMX_VMCS_HOST_RIP ;/* return address (too difficult to continue after VMLAUNCH?) */
432 vmwrite rax, [xSP]
433 ;/* Note: assumes success... */
434 add xSP, xS
435
436 ;/* Manual save and restore:
437 ; * - General purpose registers except RIP, RSP
438 ; *
439 ; * Trashed:
440 ; * - CR2 (we don't care)
441 ; * - LDTR (reset to 0)
442 ; * - DRx (presumably not changed at all)
443 ; * - DR7 (reset to 0x400)
444 ; * - EFLAGS (reset to RT_BIT(1); not relevant)
445 ; *
446 ; */
447
448 ;/* Save all general purpose host registers. */
449 MYPUSHAD
450
451 ;/* Save the Guest CPU context pointer. */
452%ifdef ASM_CALL64_GCC
453 ; fResume already in rdi
454 ; pCtx already in rsi
455%else
456 mov rdi, rcx ; fResume
457 mov rsi, rdx ; pCtx
458%endif
459
460 ;/* Save segment registers */
461 ; Note: MYPUSHSEGS trashes rdx & rcx, so we moved it here (msvc amd64 case)
462 MYPUSHSEGS xAX, ax
463
464 ; Save the host LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs and restore the guest MSRs
465 ; @todo use the automatic load feature for MSRs
466 LOADGUESTMSR MSR_K8_LSTAR, CPUMCTX.msrLSTAR
467 LOADGUESTMSR MSR_K8_CSTAR, CPUMCTX.msrCSTAR
468 LOADGUESTMSR MSR_K6_STAR, CPUMCTX.msrSTAR
469 LOADGUESTMSR MSR_K8_SF_MASK, CPUMCTX.msrSFMASK
470 LOADGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
471
472 ; Save the pCtx pointer
473 push xSI
474
475 ; Save LDTR
476 xor eax, eax
477 sldt ax
478 push xAX
479
480 ; VMX only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!
481 sub xSP, xS*2
482 sgdt [xSP]
483
484 sub xSP, xS*2
485 sidt [xSP]
486
487%ifdef VBOX_WITH_DR6_EXPERIMENT
488 ; Restore DR6 - experiment, not safe!
489 mov xBX, [xSI + CPUMCTX.dr6]
490 mov dr6, xBX
491%endif
492
493 ; Restore CR2
494 mov rbx, qword [xSI + CPUMCTX.cr2]
495 mov cr2, rbx
496
497 mov eax, VMX_VMCS_HOST_RSP
498 vmwrite xAX, xSP
499 ;/* Note: assumes success... */
500 ;/* Don't mess with ESP anymore!! */
501
502 ;/* Restore Guest's general purpose registers. */
503 mov rax, qword [xSI + CPUMCTX.eax]
504 mov rbx, qword [xSI + CPUMCTX.ebx]
505 mov rcx, qword [xSI + CPUMCTX.ecx]
506 mov rdx, qword [xSI + CPUMCTX.edx]
507 mov rbp, qword [xSI + CPUMCTX.ebp]
508 mov r8, qword [xSI + CPUMCTX.r8]
509 mov r9, qword [xSI + CPUMCTX.r9]
510 mov r10, qword [xSI + CPUMCTX.r10]
511 mov r11, qword [xSI + CPUMCTX.r11]
512 mov r12, qword [xSI + CPUMCTX.r12]
513 mov r13, qword [xSI + CPUMCTX.r13]
514 mov r14, qword [xSI + CPUMCTX.r14]
515 mov r15, qword [xSI + CPUMCTX.r15]
516
517 ; resume or start?
518 cmp xDI, 0 ; fResume
519 je .vmlauch64_lauch
520
521 ;/* Restore edi & esi. */
522 mov rdi, qword [xSI + CPUMCTX.edi]
523 mov rsi, qword [xSI + CPUMCTX.esi]
524
525 vmresume
526 jmp .vmlaunch64_done; ;/* here if vmresume detected a failure. */
527
528.vmlauch64_lauch:
529 ;/* Restore rdi & rsi. */
530 mov rdi, qword [xSI + CPUMCTX.edi]
531 mov rsi, qword [xSI + CPUMCTX.esi]
532
533 vmlaunch
534 jmp .vmlaunch64_done; ;/* here if vmlaunch detected a failure. */
535
536ALIGNCODE(16)
537.vmlaunch64_done:
538 jc near .vmxstart64_invalid_vmxon_ptr
539 jz near .vmxstart64_start_failed
540
541 ; Restore base and limit of the IDTR & GDTR
542 lidt [xSP]
543 add xSP, xS*2
544 lgdt [xSP]
545 add xSP, xS*2
546
547 push xDI
548 mov xDI, [xSP + xS * 2] ; pCtx
549
550 mov qword [xDI + CPUMCTX.eax], rax
551 mov qword [xDI + CPUMCTX.ebx], rbx
552 mov qword [xDI + CPUMCTX.ecx], rcx
553 mov qword [xDI + CPUMCTX.edx], rdx
554 mov qword [xDI + CPUMCTX.esi], rsi
555 mov qword [xDI + CPUMCTX.ebp], rbp
556 mov qword [xDI + CPUMCTX.r8], r8
557 mov qword [xDI + CPUMCTX.r9], r9
558 mov qword [xDI + CPUMCTX.r10], r10
559 mov qword [xDI + CPUMCTX.r11], r11
560 mov qword [xDI + CPUMCTX.r12], r12
561 mov qword [xDI + CPUMCTX.r13], r13
562 mov qword [xDI + CPUMCTX.r14], r14
563 mov qword [xDI + CPUMCTX.r15], r15
564
565 pop xAX ; the guest edi we pushed above
566 mov qword [xDI + CPUMCTX.edi], rax
567
568%ifdef VBOX_WITH_DR6_EXPERIMENT
569 ; Save DR6 - experiment, not safe!
570 mov xAX, dr6
571 mov [xDI + CPUMCTX.dr6], xAX
572%endif
573
574 pop xAX ; saved LDTR
575 lldt ax
576
577 pop xSI ; pCtx (needed in rsi by the macros below)
578
579 ; Restore the host LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs
580 ; @todo use the automatic load feature for MSRs
581 LOADHOSTMSREX MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
582 LOADHOSTMSR MSR_K8_SF_MASK
583 LOADHOSTMSR MSR_K6_STAR
584 LOADHOSTMSR MSR_K8_CSTAR
585 LOADHOSTMSR MSR_K8_LSTAR
586
587 ; Restore segment registers
588 MYPOPSEGS xAX, ax
589
590 ; Restore general purpose registers
591 MYPOPAD
592
593 mov eax, VINF_SUCCESS
594
595.vmstart64_end:
596 popf
597 pop xBP
598 ret
599
600
601.vmxstart64_invalid_vmxon_ptr:
602 ; Restore base and limit of the IDTR & GDTR
603 lidt [xSP]
604 add xSP, xS*2
605 lgdt [xSP]
606 add xSP, xS*2
607
608 pop xAX ; saved LDTR
609 lldt ax
610
611 pop xSI ; pCtx (needed in rsi by the macros below)
612
613 ; Restore the host LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs
614 ; @todo use the automatic load feature for MSRs
615 LOADHOSTMSREX MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
616 LOADHOSTMSR MSR_K8_SF_MASK
617 LOADHOSTMSR MSR_K8_CSTAR
618 LOADHOSTMSR MSR_K8_LSTAR
619
620 ; Restore segment registers
621 MYPOPSEGS xAX, ax
622
623 ; Restore all general purpose host registers.
624 MYPOPAD
625 mov eax, VERR_VMX_INVALID_VMXON_PTR
626 jmp .vmstart64_end
627
628.vmxstart64_start_failed:
629 ; Restore base and limit of the IDTR & GDTR
630 lidt [xSP]
631 add xSP, xS*2
632 lgdt [xSP]
633 add xSP, xS*2
634
635 pop xAX ; saved LDTR
636 lldt ax
637
638 pop xSI ; pCtx (needed in rsi by the macros below)
639
640 ; Restore the host LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs
641 ; @todo use the automatic load feature for MSRs
642 LOADHOSTMSREX MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
643 LOADHOSTMSR MSR_K8_SF_MASK
644 LOADHOSTMSR MSR_K8_CSTAR
645 LOADHOSTMSR MSR_K8_LSTAR
646
647 ; Restore segment registers
648 MYPOPSEGS xAX, ax
649
650 ; Restore all general purpose host registers.
651 MYPOPAD
652 mov eax, VERR_VMX_UNABLE_TO_START_VM
653 jmp .vmstart64_end
654ENDPROC VMXR0StartVM64
655
656;/**
657; * Executes VMWRITE
658; *
659; * @returns VBox status code
660; * @param idxField x86: [ebp + 08h] msc: rcx gcc: rdi VMCS index
661; * @param pData x86: [ebp + 0ch] msc: rdx gcc: rsi VM field value
662; */
663BEGINPROC VMXWriteVMCS64
664%ifdef ASM_CALL64_GCC
665 mov eax, 0ffffffffh
666 and rdi, rax
667 xor rax, rax
668 vmwrite rdi, rsi
669%else
670 mov eax, 0ffffffffh
671 and rcx, rax
672 xor rax, rax
673 vmwrite rcx, rdx
674%endif
675 jnc .valid_vmcs
676 mov eax, VERR_VMX_INVALID_VMCS_PTR
677 ret
678.valid_vmcs:
679 jnz .the_end
680 mov eax, VERR_VMX_INVALID_VMCS_FIELD
681.the_end:
682 ret
683ENDPROC VMXWriteVMCS64
684
685;/**
686; * Executes VMREAD
687; *
688; * @returns VBox status code
689; * @param idxField VMCS index
690; * @param pData Ptr to store VM field value
691; */
692;DECLASM(int) VMXReadVMCS64(uint32_t idxField, uint64_t *pData);
693BEGINPROC VMXReadVMCS64
694%ifdef ASM_CALL64_GCC
695 mov eax, 0ffffffffh
696 and rdi, rax
697 xor rax, rax
698 vmread [rsi], rdi
699%else
700 mov eax, 0ffffffffh
701 and rcx, rax
702 xor rax, rax
703 vmread [rdx], rcx
704%endif
705 jnc .valid_vmcs
706 mov eax, VERR_VMX_INVALID_VMCS_PTR
707 ret
708.valid_vmcs:
709 jnz .the_end
710 mov eax, VERR_VMX_INVALID_VMCS_FIELD
711.the_end:
712 ret
713ENDPROC VMXReadVMCS64
714
715
716;/**
717; * Executes VMXON
718; *
719; * @returns VBox status code
720; * @param HCPhysVMXOn Physical address of VMXON structure
721; */
722;DECLASM(int) VMXEnable(RTHCPHYS HCPhysVMXOn);
723BEGINPROC VMXEnable
724%ifdef RT_ARCH_AMD64
725 xor rax, rax
726 %ifdef ASM_CALL64_GCC
727 push rdi
728 %else
729 push rcx
730 %endif
731 vmxon [rsp]
732%else
733 xor eax, eax
734 vmxon [esp + 4]
735%endif
736 jnc .good
737 mov eax, VERR_VMX_INVALID_VMXON_PTR
738 jmp .the_end
739
740.good:
741 jnz .the_end
742 mov eax, VERR_VMX_GENERIC
743
744.the_end:
745%ifdef RT_ARCH_AMD64
746 add rsp, 8
747%endif
748 ret
749ENDPROC VMXEnable
750
751
752;/**
753; * Executes VMXOFF
754; */
755;DECLASM(void) VMXDisable(void);
756BEGINPROC VMXDisable
757 vmxoff
758 ret
759ENDPROC VMXDisable
760
761
762;/**
763; * Executes VMCLEAR
764; *
765; * @returns VBox status code
766; * @param HCPhysVMCS Physical address of VM control structure
767; */
768;DECLASM(int) VMXClearVMCS(RTHCPHYS HCPhysVMCS);
769BEGINPROC VMXClearVMCS
770%ifdef RT_ARCH_AMD64
771 xor rax, rax
772 %ifdef ASM_CALL64_GCC
773 push rdi
774 %else
775 push rcx
776 %endif
777 vmclear [rsp]
778%else
779 xor eax, eax
780 vmclear [esp + 4]
781%endif
782 jnc .the_end
783 mov eax, VERR_VMX_INVALID_VMCS_PTR
784.the_end:
785%ifdef RT_ARCH_AMD64
786 add rsp, 8
787%endif
788 ret
789ENDPROC VMXClearVMCS
790
791
792;/**
793; * Executes VMPTRLD
794; *
795; * @returns VBox status code
796; * @param HCPhysVMCS Physical address of VMCS structure
797; */
798;DECLASM(int) VMXActivateVMCS(RTHCPHYS HCPhysVMCS);
799BEGINPROC VMXActivateVMCS
800%ifdef RT_ARCH_AMD64
801 xor rax, rax
802 %ifdef ASM_CALL64_GCC
803 push rdi
804 %else
805 push rcx
806 %endif
807 vmptrld [rsp]
808%else
809 xor eax, eax
810 vmptrld [esp + 4]
811%endif
812 jnc .the_end
813 mov eax, VERR_VMX_INVALID_VMCS_PTR
814.the_end:
815%ifdef RT_ARCH_AMD64
816 add rsp, 8
817%endif
818 ret
819ENDPROC VMXActivateVMCS
820
821%endif ; RT_ARCH_AMD64
822
823
824;/**
825; * Prepares for and executes VMRUN (32 bits guests)
826; *
827; * @returns VBox status code
828; * @param HCPhysVMCB Physical address of host VMCB
829; * @param HCPhysVMCB Physical address of guest VMCB
830; * @param pCtx Guest context
831; */
832BEGINPROC SVMVMRun
833%ifdef RT_ARCH_AMD64 ; fake a cdecl stack frame
834 %ifdef ASM_CALL64_GCC
835 push rdx
836 push rsi
837 push rdi
838 %else
839 push r8
840 push rdx
841 push rcx
842 %endif
843 push 0
844%endif
845 push xBP
846 mov xBP, xSP
847 pushf
848
849 ;/* Manual save and restore:
850 ; * - General purpose registers except RIP, RSP, RAX
851 ; *
852 ; * Trashed:
853 ; * - CR2 (we don't care)
854 ; * - LDTR (reset to 0)
855 ; * - DRx (presumably not changed at all)
856 ; * - DR7 (reset to 0x400)
857 ; */
858
859 ;/* Save all general purpose host registers. */
860 MYPUSHAD
861
862 ;/* Save the Guest CPU context pointer. */
863 mov xSI, [xBP + xS*2 + RTHCPHYS_CB*2] ; pCtx
864 push xSI ; push for saving the state at the end
865
866 ; Restore CR2
867 mov ebx, [xSI + CPUMCTX.cr2]
868 mov cr2, xBX
869
870 ; save host fs, gs, sysenter msr etc
871 mov xAX, [xBP + xS*2] ; pVMCBHostPhys (64 bits physical address; x86: take low dword only)
872 push xAX ; save for the vmload after vmrun
873 vmsave
874
875 ; setup eax for VMLOAD
876 mov xAX, [xBP + xS*2 + RTHCPHYS_CB] ; pVMCBPhys (64 bits physical address; take low dword only)
877
878 ;/* Restore Guest's general purpose registers. */
879 ;/* EAX is loaded from the VMCB by VMRUN */
880 mov ebx, [xSI + CPUMCTX.ebx]
881 mov ecx, [xSI + CPUMCTX.ecx]
882 mov edx, [xSI + CPUMCTX.edx]
883 mov edi, [xSI + CPUMCTX.edi]
884 mov ebp, [xSI + CPUMCTX.ebp]
885 mov esi, [xSI + CPUMCTX.esi]
886
887 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch
888 clgi
889 sti
890
891 ; load guest fs, gs, sysenter msr etc
892 vmload
893 ; run the VM
894 vmrun
895
896 ;/* EAX is in the VMCB already; we can use it here. */
897
898 ; save guest fs, gs, sysenter msr etc
899 vmsave
900
901 ; load host fs, gs, sysenter msr etc
902 pop xAX ; pushed above
903 vmload
904
905 ; Set the global interrupt flag again, but execute cli to make sure IF=0.
906 cli
907 stgi
908
909 pop xAX ; pCtx
910
911 mov [ss:xAX + CPUMCTX.ebx], ebx
912 mov [ss:xAX + CPUMCTX.ecx], ecx
913 mov [ss:xAX + CPUMCTX.edx], edx
914 mov [ss:xAX + CPUMCTX.esi], esi
915 mov [ss:xAX + CPUMCTX.edi], edi
916 mov [ss:xAX + CPUMCTX.ebp], ebp
917
918 ; Restore general purpose registers
919 MYPOPAD
920
921 mov eax, VINF_SUCCESS
922
923 popf
924 pop xBP
925%ifdef RT_ARCH_AMD64
926 add xSP, 4*xS
927%endif
928 ret
929ENDPROC SVMVMRun
930
931%ifdef RT_ARCH_AMD64
932;/**
933; * Prepares for and executes VMRUN (64 bits guests)
934; *
935; * @returns VBox status code
936; * @param HCPhysVMCB Physical address of host VMCB
937; * @param HCPhysVMCB Physical address of guest VMCB
938; * @param pCtx Guest context
939; */
940BEGINPROC SVMVMRun64
941 ; fake a cdecl stack frame
942 %ifdef ASM_CALL64_GCC
943 push rdx
944 push rsi
945 push rdi
946 %else
947 push r8
948 push rdx
949 push rcx
950 %endif
951 push 0
952 push rbp
953 mov rbp, rsp
954 pushf
955
956 ;/* Manual save and restore:
957 ; * - General purpose registers except RIP, RSP, RAX
958 ; *
959 ; * Trashed:
960 ; * - CR2 (we don't care)
961 ; * - LDTR (reset to 0)
962 ; * - DRx (presumably not changed at all)
963 ; * - DR7 (reset to 0x400)
964 ; */
965
966 ;/* Save all general purpose host registers. */
967 MYPUSHAD
968
969 ;/* Save the Guest CPU context pointer. */
970 mov rsi, [rbp + xS*2 + RTHCPHYS_CB*2] ; pCtx
971 push rsi ; push for saving the state at the end
972
973 ; Restore CR2
974 mov rbx, [rsi + CPUMCTX.cr2]
975 mov cr2, rbx
976
977 ; save host fs, gs, sysenter msr etc
978 mov rax, [rbp + xS*2] ; pVMCBHostPhys (64 bits physical address; x86: take low dword only)
979 push rax ; save for the vmload after vmrun
980 vmsave
981
982 ; setup eax for VMLOAD
983 mov rax, [rbp + xS*2 + RTHCPHYS_CB] ; pVMCBPhys (64 bits physical address; take low dword only)
984
985 ;/* Restore Guest's general purpose registers. */
986 ;/* RAX is loaded from the VMCB by VMRUN */
987 mov rbx, qword [xSI + CPUMCTX.ebx]
988 mov rcx, qword [xSI + CPUMCTX.ecx]
989 mov rdx, qword [xSI + CPUMCTX.edx]
990 mov rdi, qword [xSI + CPUMCTX.edi]
991 mov rbp, qword [xSI + CPUMCTX.ebp]
992 mov r8, qword [xSI + CPUMCTX.r8]
993 mov r9, qword [xSI + CPUMCTX.r9]
994 mov r10, qword [xSI + CPUMCTX.r10]
995 mov r11, qword [xSI + CPUMCTX.r11]
996 mov r12, qword [xSI + CPUMCTX.r12]
997 mov r13, qword [xSI + CPUMCTX.r13]
998 mov r14, qword [xSI + CPUMCTX.r14]
999 mov r15, qword [xSI + CPUMCTX.r15]
1000 mov rsi, qword [xSI + CPUMCTX.esi]
1001
1002 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch
1003 clgi
1004 sti
1005
1006 ; load guest fs, gs, sysenter msr etc
1007 vmload
1008 ; run the VM
1009 vmrun
1010
1011 ;/* RAX is in the VMCB already; we can use it here. */
1012
1013 ; save guest fs, gs, sysenter msr etc
1014 vmsave
1015
1016 ; load host fs, gs, sysenter msr etc
1017 pop rax ; pushed above
1018 vmload
1019
1020 ; Set the global interrupt flag again, but execute cli to make sure IF=0.
1021 cli
1022 stgi
1023
1024 pop rax ; pCtx
1025
1026 mov qword [rax + CPUMCTX.ebx], rbx
1027 mov qword [rax + CPUMCTX.ecx], rcx
1028 mov qword [rax + CPUMCTX.edx], rdx
1029 mov qword [rax + CPUMCTX.esi], rsi
1030 mov qword [rax + CPUMCTX.edi], rdi
1031 mov qword [rax + CPUMCTX.ebp], rbp
1032 mov qword [rax + CPUMCTX.r8], r8
1033 mov qword [rax + CPUMCTX.r9], r9
1034 mov qword [rax + CPUMCTX.r10], r10
1035 mov qword [rax + CPUMCTX.r11], r11
1036 mov qword [rax + CPUMCTX.r12], r12
1037 mov qword [rax + CPUMCTX.r13], r13
1038 mov qword [rax + CPUMCTX.r14], r14
1039 mov qword [rax + CPUMCTX.r15], r15
1040
1041 ; Restore general purpose registers
1042 MYPOPAD
1043
1044 mov eax, VINF_SUCCESS
1045
1046 popf
1047 pop rbp
1048 add rsp, 4*xS
1049 ret
1050ENDPROC SVMVMRun64
1051%endif ; RT_ARCH_AMD64
1052
1053
1054%if GC_ARCH_BITS == 64
1055;;
1056; Executes INVLPGA
1057;
1058; @param pPageGC msc:rcx gcc:rdi x86:[esp+04] Virtual page to invalidate
1059; @param uASID msc:rdx gcc:rsi x86:[esp+0C] Tagged TLB id
1060;
1061;DECLASM(void) SVMInvlpgA(RTGCPTR pPageGC, uint32_t uASID);
1062BEGINPROC SVMInvlpgA
1063%ifdef RT_ARCH_AMD64
1064 %ifdef ASM_CALL64_GCC
1065 mov rax, rdi
1066 mov rcx, rsi
1067 %else
1068 ; from http://www.cs.cmu.edu/~fp/courses/15213-s06/misc/asm64-handout.pdf:
1069 ; ``Perhaps unexpectedly, instructions that move or generate 32-bit register
1070 ; values also set the upper 32 bits of the register to zero. Consequently
1071 ; there is no need for an instruction movzlq.''
1072 mov eax, ecx
1073 mov rcx, rdx
1074 %endif
1075%else
1076 mov eax, [esp + 4]
1077 mov ecx, [esp + 0Ch]
1078%endif
1079 invlpga [xAX], ecx
1080 ret
1081ENDPROC SVMInvlpgA
1082
1083%else
1084;;
1085; Executes INVLPGA
1086;
1087; @param pPageGC msc:ecx gcc:edi x86:[esp+04] Virtual page to invalidate
1088; @param uASID msc:edx gcc:esi x86:[esp+08] Tagged TLB id
1089;
1090;DECLASM(void) SVMInvlpgA(RTGCPTR pPageGC, uint32_t uASID);
1091BEGINPROC SVMInvlpgA
1092%ifdef RT_ARCH_AMD64
1093 %ifdef ASM_CALL64_GCC
1094 movzx rax, edi
1095 mov ecx, esi
1096 %else
1097 ; from http://www.cs.cmu.edu/~fp/courses/15213-s06/misc/asm64-handout.pdf:
1098 ; ``Perhaps unexpectedly, instructions that move or generate 32-bit register
1099 ; values also set the upper 32 bits of the register to zero. Consequently
1100 ; there is no need for an instruction movzlq.''
1101 mov eax, ecx
1102 mov ecx, edx
1103 %endif
1104%else
1105 mov eax, [esp + 4]
1106 mov ecx, [esp + 8]
1107%endif
1108 invlpga [xAX], ecx
1109 ret
1110ENDPROC SVMInvlpgA
1111
1112%endif ; GC_ARCH_BITS != 64
1113
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette