VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HWACCMR0A.asm@ 12227

Last change on this file since 12227 was 12071, checked in by vboxsync, 16 years ago

Consistency

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 27.5 KB
Line 
1; $Id: HWACCMR0A.asm 12071 2008-09-03 16:05:46Z vboxsync $
2;; @file
3; VMXM - R0 vmx helpers
4;
5
6;
7; Copyright (C) 2006-2007 Sun Microsystems, Inc.
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.virtualbox.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License (GPL) as published by the Free Software
13; Foundation, in version 2 as it comes in the "COPYING" file of the
14; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17; Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18; Clara, CA 95054 USA or visit http://www.sun.com if you need
19; additional information or have any questions.
20;
21
22;*******************************************************************************
23;* Header Files *
24;*******************************************************************************
25%include "VBox/asmdefs.mac"
26%include "VBox/err.mac"
27%include "VBox/hwacc_vmx.mac"
28%include "VBox/cpum.mac"
29%include "VBox/x86.mac"
30
31%ifdef RT_OS_OS2 ;; @todo fix OMF support in yasm and kick nasm out completely.
32 %macro vmwrite 2,
33 int3
34 %endmacro
35 %define vmlaunch int3
36 %define vmresume int3
37 %define vmsave int3
38 %define vmload int3
39 %define vmrun int3
40 %define clgi int3
41 %define stgi int3
42 %macro invlpga 2,
43 int3
44 %endmacro
45%endif
46
47;; This is too risky wrt. stability, performance and correctness.
48;%define VBOX_WITH_DR6_EXPERIMENT 1
49
50;; @def MYPUSHAD
51; Macro generating an equivalent to pushad
52
53;; @def MYPOPAD
54; Macro generating an equivalent to popad
55
56;; @def MYPUSHSEGS
57; Macro saving all segment registers on the stack.
58; @param 1 full width register name
59; @param 2 16-bit regsiter name for \a 1.
60
61;; @def MYPOPSEGS
62; Macro restoring all segment registers on the stack
63; @param 1 full width register name
64; @param 2 16-bit regsiter name for \a 1.
65
66%ifdef RT_ARCH_AMD64
67 ; Save a host and load the corresponding guest MSR (trashes rdx & rcx)
68 %macro LOADGUESTMSR 2
69 mov rcx, %1
70 rdmsr
71 push rdx
72 push rax
73 mov edx, dword [xSI + %2 + 4]
74 mov eax, dword [xSI + %2]
75 wrmsr
76 %endmacro
77
78 ; Save a guest and load the corresponding host MSR (trashes rdx & rcx)
79 ; Only really useful for gs kernel base as that one can be changed behind our back (swapgs)
80 %macro LOADHOSTMSREX 2
81 mov rcx, %1
82 rdmsr
83 mov dword [xSI + %2], eax
84 mov dword [xSI + %2 + 4], edx
85 pop rax
86 pop rdx
87 wrmsr
88 %endmacro
89
90 ; Load the corresponding host MSR (trashes rdx & rcx)
91 %macro LOADHOSTMSR 1
92 mov rcx, %1
93 pop rax
94 pop rdx
95 wrmsr
96 %endmacro
97
98 %ifdef ASM_CALL64_GCC
99 %macro MYPUSHAD 0
100 push r15
101 push r14
102 push r13
103 push r12
104 push rbx
105 %endmacro
106 %macro MYPOPAD 0
107 pop rbx
108 pop r12
109 pop r13
110 pop r14
111 pop r15
112 %endmacro
113
114 %else ; ASM_CALL64_MSC
115 %macro MYPUSHAD 0
116 push r15
117 push r14
118 push r13
119 push r12
120 push rbx
121 push rsi
122 push rdi
123 %endmacro
124 %macro MYPOPAD 0
125 pop rdi
126 pop rsi
127 pop rbx
128 pop r12
129 pop r13
130 pop r14
131 pop r15
132 %endmacro
133 %endif
134
135; trashes, rax, rdx & rcx
136 %macro MYPUSHSEGS 2
137 mov %2, es
138 push %1
139 mov %2, ds
140 push %1
141
142 ; Special case for FS; Windows and Linux either don't use it or restore it when leaving kernel mode, Solaris OTOH doesn't and we must save it.
143 mov ecx, MSR_K8_FS_BASE
144 rdmsr
145 push rdx
146 push rax
147 push fs
148
149 ; Special case for GS; OSes typically use swapgs to reset the hidden base register for GS on entry into the kernel. The same happens on exit
150 mov ecx, MSR_K8_GS_BASE
151 rdmsr
152 push rdx
153 push rax
154 push gs
155 %endmacro
156
157; trashes, rax, rdx & rcx
158 %macro MYPOPSEGS 2
159 ; Note: do not step through this code with a debugger!
160 pop gs
161 pop rax
162 pop rdx
163 mov ecx, MSR_K8_GS_BASE
164 wrmsr
165
166 pop fs
167 pop rax
168 pop rdx
169 mov ecx, MSR_K8_FS_BASE
170 wrmsr
171 ; Now it's safe to step again
172
173 pop %1
174 mov ds, %2
175 pop %1
176 mov es, %2
177 %endmacro
178
179%else ; RT_ARCH_X86
180 %macro MYPUSHAD 0
181 pushad
182 %endmacro
183 %macro MYPOPAD 0
184 popad
185 %endmacro
186
187 %macro MYPUSHSEGS 2
188 push ds
189 push es
190 push fs
191 push gs
192 %endmacro
193 %macro MYPOPSEGS 2
194 pop gs
195 pop fs
196 pop es
197 pop ds
198 %endmacro
199%endif
200
201
202BEGINCODE
203
204;/**
205; * Prepares for and executes VMLAUNCH/VMRESUME (32 bits guest mode)
206; *
207; * @returns VBox status code
208; * @param fResume vmlauch/vmresume
209; * @param pCtx Guest context
210; */
211BEGINPROC VMXR0StartVM32
212 push xBP
213 mov xBP, xSP
214
215 pushf
216 cli
217
218 ;/* First we have to save some final CPU context registers. */
219%ifdef RT_ARCH_AMD64
220 mov rax, qword .vmlaunch_done
221 push rax
222%else
223 push .vmlaunch_done
224%endif
225 mov eax, VMX_VMCS_HOST_RIP ;/* return address (too difficult to continue after VMLAUNCH?) */
226 vmwrite xAX, [xSP]
227 ;/* Note: assumes success... */
228 add xSP, xS
229
230 ;/* Manual save and restore:
231 ; * - General purpose registers except RIP, RSP
232 ; *
233 ; * Trashed:
234 ; * - CR2 (we don't care)
235 ; * - LDTR (reset to 0)
236 ; * - DRx (presumably not changed at all)
237 ; * - DR7 (reset to 0x400)
238 ; * - EFLAGS (reset to RT_BIT(1); not relevant)
239 ; *
240 ; */
241
242 ;/* Save all general purpose host registers. */
243 MYPUSHAD
244
245 ;/* Save the Guest CPU context pointer. */
246%ifdef RT_ARCH_AMD64
247 %ifdef ASM_CALL64_GCC
248 ; fResume already in rdi
249 ; pCtx already in rsi
250 %else
251 mov rdi, rcx ; fResume
252 mov rsi, rdx ; pCtx
253 %endif
254%else
255 mov edi, [ebp + 8] ; fResume
256 mov esi, [ebp + 12] ; pCtx
257%endif
258
259 ;/* Save segment registers */
260 ; Note: MYPUSHSEGS trashes rdx & rcx, so we moved it here (msvc amd64 case)
261 MYPUSHSEGS xAX, ax
262
263 ; Save the pCtx pointer
264 push xSI
265
266 ; Save LDTR
267 xor eax, eax
268 sldt ax
269 push xAX
270
271 ; VMX only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!
272 sub xSP, xS*2
273 sgdt [xSP]
274
275 sub xSP, xS*2
276 sidt [xSP]
277
278%ifdef VBOX_WITH_DR6_EXPERIMENT
279 ; Restore DR6 - experiment, not safe!
280 mov xBX, [xSI + CPUMCTX.dr6]
281 mov dr6, xBX
282%endif
283
284 ; Restore CR2
285 mov ebx, [xSI + CPUMCTX.cr2]
286 mov cr2, xBX
287
288 mov eax, VMX_VMCS_HOST_RSP
289 vmwrite xAX, xSP
290 ;/* Note: assumes success... */
291 ;/* Don't mess with ESP anymore!! */
292
293 ;/* Restore Guest's general purpose registers. */
294 mov eax, [xSI + CPUMCTX.eax]
295 mov ebx, [xSI + CPUMCTX.ebx]
296 mov ecx, [xSI + CPUMCTX.ecx]
297 mov edx, [xSI + CPUMCTX.edx]
298 mov ebp, [xSI + CPUMCTX.ebp]
299
300 ; resume or start?
301 cmp xDI, 0 ; fResume
302 je .vmlauch_lauch
303
304 ;/* Restore edi & esi. */
305 mov edi, [xSI + CPUMCTX.edi]
306 mov esi, [xSI + CPUMCTX.esi]
307
308 vmresume
309 jmp .vmlaunch_done; ;/* here if vmresume detected a failure. */
310
311.vmlauch_lauch:
312 ;/* Restore edi & esi. */
313 mov edi, [xSI + CPUMCTX.edi]
314 mov esi, [xSI + CPUMCTX.esi]
315
316 vmlaunch
317 jmp .vmlaunch_done; ;/* here if vmlaunch detected a failure. */
318
319ALIGNCODE(16)
320.vmlaunch_done:
321 jc near .vmxstart_invalid_vmxon_ptr
322 jz near .vmxstart_start_failed
323
324 ; Restore base and limit of the IDTR & GDTR
325 lidt [xSP]
326 add xSP, xS*2
327 lgdt [xSP]
328 add xSP, xS*2
329
330 push xDI
331 mov xDI, [xSP + xS * 2] ; pCtx
332
333 mov [ss:xDI + CPUMCTX.eax], eax
334 mov [ss:xDI + CPUMCTX.ebx], ebx
335 mov [ss:xDI + CPUMCTX.ecx], ecx
336 mov [ss:xDI + CPUMCTX.edx], edx
337 mov [ss:xDI + CPUMCTX.esi], esi
338 mov [ss:xDI + CPUMCTX.ebp], ebp
339%ifdef RT_ARCH_AMD64
340 pop xAX ; the guest edi we pushed above
341 mov dword [ss:xDI + CPUMCTX.edi], eax
342%else
343 pop dword [ss:xDI + CPUMCTX.edi] ; the guest edi we pushed above
344%endif
345
346%ifdef VBOX_WITH_DR6_EXPERIMENT
347 ; Save DR6 - experiment, not safe!
348 mov xAX, dr6
349 mov [ss:xDI + CPUMCTX.dr6], xAX
350%endif
351
352 pop xAX ; saved LDTR
353 lldt ax
354
355 add xSP, xS ; pCtx
356
357 ; Restore segment registers
358 MYPOPSEGS xAX, ax
359
360 ; Restore general purpose registers
361 MYPOPAD
362
363 mov eax, VINF_SUCCESS
364
365.vmstart_end:
366 popf
367 pop xBP
368 ret
369
370
371.vmxstart_invalid_vmxon_ptr:
372 ; Restore base and limit of the IDTR & GDTR
373 lidt [xSP]
374 add xSP, xS*2
375 lgdt [xSP]
376 add xSP, xS*2
377
378 pop xAX ; saved LDTR
379 lldt ax
380
381 add xSP, xS ; pCtx
382
383 ; Restore segment registers
384 MYPOPSEGS xAX, ax
385
386 ; Restore all general purpose host registers.
387 MYPOPAD
388 mov eax, VERR_VMX_INVALID_VMXON_PTR
389 jmp .vmstart_end
390
391.vmxstart_start_failed:
392 ; Restore base and limit of the IDTR & GDTR
393 lidt [xSP]
394 add xSP, xS*2
395 lgdt [xSP]
396 add xSP, xS*2
397
398 pop xAX ; saved LDTR
399 lldt ax
400
401 add xSP, xS ; pCtx
402
403 ; Restore segment registers
404 MYPOPSEGS xAX, ax
405
406 ; Restore all general purpose host registers.
407 MYPOPAD
408 mov eax, VERR_VMX_UNABLE_TO_START_VM
409 jmp .vmstart_end
410
411ENDPROC VMXR0StartVM32
412
413%ifdef RT_ARCH_AMD64
414;/**
415; * Prepares for and executes VMLAUNCH/VMRESUME (64 bits guest mode)
416; *
417; * @returns VBox status code
418; * @param fResume vmlauch/vmresume
419; * @param pCtx Guest context
420; */
421BEGINPROC VMXR0StartVM64
422 push xBP
423 mov xBP, xSP
424
425 pushf
426 cli
427
428 ;/* First we have to save some final CPU context registers. */
429 mov rax, qword .vmlaunch64_done
430 push rax
431 mov rax, VMX_VMCS_HOST_RIP ;/* return address (too difficult to continue after VMLAUNCH?) */
432 vmwrite rax, [xSP]
433 ;/* Note: assumes success... */
434 add xSP, xS
435
436 ;/* Manual save and restore:
437 ; * - General purpose registers except RIP, RSP
438 ; *
439 ; * Trashed:
440 ; * - CR2 (we don't care)
441 ; * - LDTR (reset to 0)
442 ; * - DRx (presumably not changed at all)
443 ; * - DR7 (reset to 0x400)
444 ; * - EFLAGS (reset to RT_BIT(1); not relevant)
445 ; *
446 ; */
447
448 ;/* Save all general purpose host registers. */
449 MYPUSHAD
450
451 ;/* Save the Guest CPU context pointer. */
452%ifdef ASM_CALL64_GCC
453 ; fResume already in rdi
454 ; pCtx already in rsi
455%else
456 mov rdi, rcx ; fResume
457 mov rsi, rdx ; pCtx
458%endif
459
460 ;/* Save segment registers */
461 ; Note: MYPUSHSEGS trashes rdx & rcx, so we moved it here (msvc amd64 case)
462 MYPUSHSEGS xAX, ax
463
464 ; Save the host LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs and restore the guest MSRs
465 ; @todo use the automatic load feature for MSRs
466 LOADGUESTMSR MSR_K8_LSTAR, CPUMCTX.msrLSTAR
467 LOADGUESTMSR MSR_K8_CSTAR, CPUMCTX.msrCSTAR
468 LOADGUESTMSR MSR_K6_STAR, CPUMCTX.msrSTAR
469 LOADGUESTMSR MSR_K8_SF_MASK, CPUMCTX.msrSFMASK
470 LOADGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
471
472 ; Save the pCtx pointer
473 push xSI
474
475 ; Save LDTR
476 xor eax, eax
477 sldt ax
478 push xAX
479
480 ; VMX only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!
481 sub xSP, xS*2
482 sgdt [xSP]
483
484 sub xSP, xS*2
485 sidt [xSP]
486
487%ifdef VBOX_WITH_DR6_EXPERIMENT
488 ; Restore DR6 - experiment, not safe!
489 mov xBX, [xSI + CPUMCTX.dr6]
490 mov dr6, xBX
491%endif
492
493 ; Restore CR2
494 mov rbx, qword [xSI + CPUMCTX.cr2]
495 mov cr2, rbx
496
497 mov eax, VMX_VMCS_HOST_RSP
498 vmwrite xAX, xSP
499 ;/* Note: assumes success... */
500 ;/* Don't mess with ESP anymore!! */
501
502 ;/* Restore Guest's general purpose registers. */
503 mov rax, qword [xSI + CPUMCTX.eax]
504 mov rbx, qword [xSI + CPUMCTX.ebx]
505 mov rcx, qword [xSI + CPUMCTX.ecx]
506 mov rdx, qword [xSI + CPUMCTX.edx]
507 mov rbp, qword [xSI + CPUMCTX.ebp]
508 mov r8, qword [xSI + CPUMCTX.r8]
509 mov r9, qword [xSI + CPUMCTX.r9]
510 mov r10, qword [xSI + CPUMCTX.r10]
511 mov r11, qword [xSI + CPUMCTX.r11]
512 mov r12, qword [xSI + CPUMCTX.r12]
513 mov r13, qword [xSI + CPUMCTX.r13]
514 mov r14, qword [xSI + CPUMCTX.r14]
515 mov r15, qword [xSI + CPUMCTX.r15]
516
517 ; resume or start?
518 cmp xDI, 0 ; fResume
519 je .vmlauch64_lauch
520
521 ;/* Restore edi & esi. */
522 mov rdi, qword [xSI + CPUMCTX.edi]
523 mov rsi, qword [xSI + CPUMCTX.esi]
524
525 vmresume
526 jmp .vmlaunch64_done; ;/* here if vmresume detected a failure. */
527
528.vmlauch64_lauch:
529 ;/* Restore rdi & rsi. */
530 mov rdi, qword [xSI + CPUMCTX.edi]
531 mov rsi, qword [xSI + CPUMCTX.esi]
532
533 vmlaunch
534 jmp .vmlaunch64_done; ;/* here if vmlaunch detected a failure. */
535
536ALIGNCODE(16)
537.vmlaunch64_done:
538 jc near .vmxstart64_invalid_vmxon_ptr
539 jz near .vmxstart64_start_failed
540
541 ; Restore base and limit of the IDTR & GDTR
542 lidt [xSP]
543 add xSP, xS*2
544 lgdt [xSP]
545 add xSP, xS*2
546
547 push xDI
548 mov xDI, [xSP + xS * 2] ; pCtx
549
550 mov qword [xDI + CPUMCTX.eax], rax
551 mov qword [xDI + CPUMCTX.ebx], rbx
552 mov qword [xDI + CPUMCTX.ecx], rcx
553 mov qword [xDI + CPUMCTX.edx], rdx
554 mov qword [xDI + CPUMCTX.esi], rsi
555 mov qword [xDI + CPUMCTX.ebp], rbp
556 mov qword [xDI + CPUMCTX.r8], r8
557 mov qword [xDI + CPUMCTX.r9], r9
558 mov qword [xDI + CPUMCTX.r10], r10
559 mov qword [xDI + CPUMCTX.r11], r11
560 mov qword [xDI + CPUMCTX.r12], r12
561 mov qword [xDI + CPUMCTX.r13], r13
562 mov qword [xDI + CPUMCTX.r14], r14
563 mov qword [xDI + CPUMCTX.r15], r15
564
565 pop xAX ; the guest edi we pushed above
566 mov qword [xDI + CPUMCTX.edi], rax
567
568%ifdef VBOX_WITH_DR6_EXPERIMENT
569 ; Save DR6 - experiment, not safe!
570 mov xAX, dr6
571 mov [xDI + CPUMCTX.dr6], xAX
572%endif
573
574 pop xAX ; saved LDTR
575 lldt ax
576
577 pop xSI ; pCtx (needed in rsi by the macros below)
578
579 ; Restore the host LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs
580 ; @todo use the automatic load feature for MSRs
581 LOADHOSTMSREX MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
582 LOADHOSTMSR MSR_K8_SF_MASK
583 LOADHOSTMSR MSR_K6_STAR
584 LOADHOSTMSR MSR_K8_CSTAR
585 LOADHOSTMSR MSR_K8_LSTAR
586
587 ; Restore segment registers
588 MYPOPSEGS xAX, ax
589
590 ; Restore general purpose registers
591 MYPOPAD
592
593 mov eax, VINF_SUCCESS
594
595.vmstart64_end:
596 popf
597 pop xBP
598 ret
599
600
601.vmxstart64_invalid_vmxon_ptr:
602 ; Restore base and limit of the IDTR & GDTR
603 lidt [xSP]
604 add xSP, xS*2
605 lgdt [xSP]
606 add xSP, xS*2
607
608 pop xAX ; saved LDTR
609 lldt ax
610
611 pop xSI ; pCtx (needed in rsi by the macros below)
612
613 ; Restore the host LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs
614 ; @todo use the automatic load feature for MSRs
615 LOADHOSTMSREX MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
616 LOADHOSTMSR MSR_K8_SF_MASK
617 LOADHOSTMSR MSR_K8_CSTAR
618 LOADHOSTMSR MSR_K8_LSTAR
619
620 ; Restore segment registers
621 MYPOPSEGS xAX, ax
622
623 ; Restore all general purpose host registers.
624 MYPOPAD
625 mov eax, VERR_VMX_INVALID_VMXON_PTR
626 jmp .vmstart64_end
627
628.vmxstart64_start_failed:
629 ; Restore base and limit of the IDTR & GDTR
630 lidt [xSP]
631 add xSP, xS*2
632 lgdt [xSP]
633 add xSP, xS*2
634
635 pop xAX ; saved LDTR
636 lldt ax
637
638 pop xSI ; pCtx (needed in rsi by the macros below)
639
640 ; Restore the host LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs
641 ; @todo use the automatic load feature for MSRs
642 LOADHOSTMSREX MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
643 LOADHOSTMSR MSR_K8_SF_MASK
644 LOADHOSTMSR MSR_K8_CSTAR
645 LOADHOSTMSR MSR_K8_LSTAR
646
647 ; Restore segment registers
648 MYPOPSEGS xAX, ax
649
650 ; Restore all general purpose host registers.
651 MYPOPAD
652 mov eax, VERR_VMX_UNABLE_TO_START_VM
653 jmp .vmstart64_end
654ENDPROC VMXR0StartVM64
655
656;/**
657; * Executes VMWRITE
658; *
659; * @returns VBox status code
660; * @param idxField x86: [ebp + 08h] msc: rcx gcc: rdi VMCS index
661; * @param pData x86: [ebp + 0ch] msc: rdx gcc: rsi VM field value
662; */
663BEGINPROC VMXWriteVMCS64
664%ifdef ASM_CALL64_GCC
665 mov eax, 0ffffffffh
666 and rdi, rax
667 xor rax, rax
668 vmwrite rdi, rsi
669%else
670 mov eax, 0ffffffffh
671 and rcx, rax
672 xor rax, rax
673 vmwrite rcx, rdx
674%endif
675 jnc .valid_vmcs
676 mov eax, VERR_VMX_INVALID_VMCS_PTR
677 ret
678.valid_vmcs:
679 jnz .the_end
680 mov eax, VERR_VMX_INVALID_VMCS_FIELD
681.the_end:
682 ret
683ENDPROC VMXWriteVMCS64
684
685;/**
686; * Executes VMREAD
687; *
688; * @returns VBox status code
689; * @param idxField VMCS index
690; * @param pData Ptr to store VM field value
691; */
692;DECLASM(int) VMXReadVMCS64(uint32_t idxField, uint64_t *pData);
693BEGINPROC VMXReadVMCS64
694%ifdef ASM_CALL64_GCC
695 mov eax, 0ffffffffh
696 and rdi, rax
697 xor rax, rax
698 vmread [rsi], rdi
699%else
700 mov eax, 0ffffffffh
701 and rcx, rax
702 xor rax, rax
703 vmread [rdx], rcx
704%endif
705 jnc .valid_vmcs
706 mov eax, VERR_VMX_INVALID_VMCS_PTR
707 ret
708.valid_vmcs:
709 jnz .the_end
710 mov eax, VERR_VMX_INVALID_VMCS_FIELD
711.the_end:
712 ret
713ENDPROC VMXReadVMCS64
714
715
716;/**
717; * Executes VMXON
718; *
719; * @returns VBox status code
720; * @param HCPhysVMXOn Physical address of VMXON structure
721; */
722;DECLASM(int) VMXEnable(RTHCPHYS HCPhysVMXOn);
723BEGINPROC VMXEnable
724%ifdef RT_ARCH_AMD64
725 xor rax, rax
726 %ifdef ASM_CALL64_GCC
727 push rdi
728 %else
729 push rcx
730 %endif
731 vmxon [rsp]
732%else
733 xor eax, eax
734 vmxon [esp + 4]
735%endif
736 jnc .good
737 mov eax, VERR_VMX_INVALID_VMXON_PTR
738 jmp .the_end
739
740.good:
741 jnz .the_end
742 mov eax, VERR_VMX_GENERIC
743
744.the_end:
745%ifdef RT_ARCH_AMD64
746 add rsp, 8
747%endif
748 ret
749ENDPROC VMXEnable
750
751;/**
752; * Executes VMXOFF
753; */
754;DECLASM(void) VMXDisable(void);
755BEGINPROC VMXDisable
756 vmxoff
757 ret
758ENDPROC VMXDisable
759
760
761;/**
762; * Executes VMCLEAR
763; *
764; * @returns VBox status code
765; * @param HCPhysVMCS Physical address of VM control structure
766; */
767;DECLASM(int) VMXClearVMCS(RTHCPHYS HCPhysVMCS);
768BEGINPROC VMXClearVMCS
769%ifdef RT_ARCH_AMD64
770 xor rax, rax
771 %ifdef ASM_CALL64_GCC
772 push rdi
773 %else
774 push rcx
775 %endif
776 vmclear [rsp]
777%else
778 xor eax, eax
779 vmclear [esp + 4]
780%endif
781 jnc .the_end
782 mov eax, VERR_VMX_INVALID_VMCS_PTR
783.the_end:
784%ifdef RT_ARCH_AMD64
785 add rsp, 8
786%endif
787 ret
788ENDPROC VMXClearVMCS
789
790
791;/**
792; * Executes VMPTRLD
793; *
794; * @returns VBox status code
795; * @param HCPhysVMCS Physical address of VMCS structure
796; */
797;DECLASM(int) VMXActivateVMCS(RTHCPHYS HCPhysVMCS);
798BEGINPROC VMXActivateVMCS
799%ifdef RT_ARCH_AMD64
800 xor rax, rax
801 %ifdef ASM_CALL64_GCC
802 push rdi
803 %else
804 push rcx
805 %endif
806 vmptrld [rsp]
807%else
808 xor eax, eax
809 vmptrld [esp + 4]
810%endif
811 jnc .the_end
812 mov eax, VERR_VMX_INVALID_VMCS_PTR
813.the_end:
814%ifdef RT_ARCH_AMD64
815 add rsp, 8
816%endif
817 ret
818ENDPROC VMXActivateVMCS
819
820%endif ; RT_ARCH_AMD64
821
822;/**
823; * Executes VMPTRST
824; *
825; * @returns VBox status code
826; * @param [esp + 04h] gcc:rdi msc:rcx Param 1 - First parameter - Address that will receive the current pointer
827; */
828;DECLASM(int) VMXGetActivateVMCS(RTHCPHYS *pVMCS);
829BEGINPROC VMXGetActivateVMCS
830%ifdef RT_ARCH_AMD64
831 %ifdef ASM_CALL64_GCC
832 vmptrst qword [rdi]
833 %else
834 vmptrst qword [rcx]
835 %endif
836%else
837 vmptrst qword [esp+04h]
838%endif
839 xor eax, eax
840 ret
841ENDPROC VMXGetActivateVMCS
842
843
844;/**
845; * Prepares for and executes VMRUN (32 bits guests)
846; *
847; * @returns VBox status code
848; * @param HCPhysVMCB Physical address of host VMCB
849; * @param HCPhysVMCB Physical address of guest VMCB
850; * @param pCtx Guest context
851; */
852BEGINPROC SVMVMRun
853%ifdef RT_ARCH_AMD64 ; fake a cdecl stack frame
854 %ifdef ASM_CALL64_GCC
855 push rdx
856 push rsi
857 push rdi
858 %else
859 push r8
860 push rdx
861 push rcx
862 %endif
863 push 0
864%endif
865 push xBP
866 mov xBP, xSP
867 pushf
868
869 ;/* Manual save and restore:
870 ; * - General purpose registers except RIP, RSP, RAX
871 ; *
872 ; * Trashed:
873 ; * - CR2 (we don't care)
874 ; * - LDTR (reset to 0)
875 ; * - DRx (presumably not changed at all)
876 ; * - DR7 (reset to 0x400)
877 ; */
878
879 ;/* Save all general purpose host registers. */
880 MYPUSHAD
881
882 ;/* Save the Guest CPU context pointer. */
883 mov xSI, [xBP + xS*2 + RTHCPHYS_CB*2] ; pCtx
884 push xSI ; push for saving the state at the end
885
886 ; Restore CR2
887 mov ebx, [xSI + CPUMCTX.cr2]
888 mov cr2, xBX
889
890 ; save host fs, gs, sysenter msr etc
891 mov xAX, [xBP + xS*2] ; pVMCBHostPhys (64 bits physical address; x86: take low dword only)
892 push xAX ; save for the vmload after vmrun
893 vmsave
894
895 ; setup eax for VMLOAD
896 mov xAX, [xBP + xS*2 + RTHCPHYS_CB] ; pVMCBPhys (64 bits physical address; take low dword only)
897
898 ;/* Restore Guest's general purpose registers. */
899 ;/* EAX is loaded from the VMCB by VMRUN */
900 mov ebx, [xSI + CPUMCTX.ebx]
901 mov ecx, [xSI + CPUMCTX.ecx]
902 mov edx, [xSI + CPUMCTX.edx]
903 mov edi, [xSI + CPUMCTX.edi]
904 mov ebp, [xSI + CPUMCTX.ebp]
905 mov esi, [xSI + CPUMCTX.esi]
906
907 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch
908 clgi
909 sti
910
911 ; load guest fs, gs, sysenter msr etc
912 vmload
913 ; run the VM
914 vmrun
915
916 ;/* EAX is in the VMCB already; we can use it here. */
917
918 ; save guest fs, gs, sysenter msr etc
919 vmsave
920
921 ; load host fs, gs, sysenter msr etc
922 pop xAX ; pushed above
923 vmload
924
925 ; Set the global interrupt flag again, but execute cli to make sure IF=0.
926 cli
927 stgi
928
929 pop xAX ; pCtx
930
931 mov [ss:xAX + CPUMCTX.ebx], ebx
932 mov [ss:xAX + CPUMCTX.ecx], ecx
933 mov [ss:xAX + CPUMCTX.edx], edx
934 mov [ss:xAX + CPUMCTX.esi], esi
935 mov [ss:xAX + CPUMCTX.edi], edi
936 mov [ss:xAX + CPUMCTX.ebp], ebp
937
938 ; Restore general purpose registers
939 MYPOPAD
940
941 mov eax, VINF_SUCCESS
942
943 popf
944 pop xBP
945%ifdef RT_ARCH_AMD64
946 add xSP, 4*xS
947%endif
948 ret
949ENDPROC SVMVMRun
950
951%ifdef RT_ARCH_AMD64
952;/**
953; * Prepares for and executes VMRUN (64 bits guests)
954; *
955; * @returns VBox status code
956; * @param HCPhysVMCB Physical address of host VMCB
957; * @param HCPhysVMCB Physical address of guest VMCB
958; * @param pCtx Guest context
959; */
960BEGINPROC SVMVMRun64
961 ; fake a cdecl stack frame
962 %ifdef ASM_CALL64_GCC
963 push rdx
964 push rsi
965 push rdi
966 %else
967 push r8
968 push rdx
969 push rcx
970 %endif
971 push 0
972 push rbp
973 mov rbp, rsp
974 pushf
975
976 ;/* Manual save and restore:
977 ; * - General purpose registers except RIP, RSP, RAX
978 ; *
979 ; * Trashed:
980 ; * - CR2 (we don't care)
981 ; * - LDTR (reset to 0)
982 ; * - DRx (presumably not changed at all)
983 ; * - DR7 (reset to 0x400)
984 ; */
985
986 ;/* Save all general purpose host registers. */
987 MYPUSHAD
988
989 ;/* Save the Guest CPU context pointer. */
990 mov rsi, [rbp + xS*2 + RTHCPHYS_CB*2] ; pCtx
991 push rsi ; push for saving the state at the end
992
993 ; Restore CR2
994 mov rbx, [rsi + CPUMCTX.cr2]
995 mov cr2, rbx
996
997 ; save host fs, gs, sysenter msr etc
998 mov rax, [rbp + xS*2] ; pVMCBHostPhys (64 bits physical address; x86: take low dword only)
999 push rax ; save for the vmload after vmrun
1000 vmsave
1001
1002 ; setup eax for VMLOAD
1003 mov rax, [rbp + xS*2 + RTHCPHYS_CB] ; pVMCBPhys (64 bits physical address; take low dword only)
1004
1005 ;/* Restore Guest's general purpose registers. */
1006 ;/* RAX is loaded from the VMCB by VMRUN */
1007 mov rbx, qword [xSI + CPUMCTX.ebx]
1008 mov rcx, qword [xSI + CPUMCTX.ecx]
1009 mov rdx, qword [xSI + CPUMCTX.edx]
1010 mov rdi, qword [xSI + CPUMCTX.edi]
1011 mov rbp, qword [xSI + CPUMCTX.ebp]
1012 mov r8, qword [xSI + CPUMCTX.r8]
1013 mov r9, qword [xSI + CPUMCTX.r9]
1014 mov r10, qword [xSI + CPUMCTX.r10]
1015 mov r11, qword [xSI + CPUMCTX.r11]
1016 mov r12, qword [xSI + CPUMCTX.r12]
1017 mov r13, qword [xSI + CPUMCTX.r13]
1018 mov r14, qword [xSI + CPUMCTX.r14]
1019 mov r15, qword [xSI + CPUMCTX.r15]
1020 mov rsi, qword [xSI + CPUMCTX.esi]
1021
1022 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch
1023 clgi
1024 sti
1025
1026 ; load guest fs, gs, sysenter msr etc
1027 vmload
1028 ; run the VM
1029 vmrun
1030
1031 ;/* RAX is in the VMCB already; we can use it here. */
1032
1033 ; save guest fs, gs, sysenter msr etc
1034 vmsave
1035
1036 ; load host fs, gs, sysenter msr etc
1037 pop rax ; pushed above
1038 vmload
1039
1040 ; Set the global interrupt flag again, but execute cli to make sure IF=0.
1041 cli
1042 stgi
1043
1044 pop rax ; pCtx
1045
1046 mov qword [rax + CPUMCTX.ebx], rbx
1047 mov qword [rax + CPUMCTX.ecx], rcx
1048 mov qword [rax + CPUMCTX.edx], rdx
1049 mov qword [rax + CPUMCTX.esi], rsi
1050 mov qword [rax + CPUMCTX.edi], rdi
1051 mov qword [rax + CPUMCTX.ebp], rbp
1052 mov qword [rax + CPUMCTX.r8], r8
1053 mov qword [rax + CPUMCTX.r9], r9
1054 mov qword [rax + CPUMCTX.r10], r10
1055 mov qword [rax + CPUMCTX.r11], r11
1056 mov qword [rax + CPUMCTX.r12], r12
1057 mov qword [rax + CPUMCTX.r13], r13
1058 mov qword [rax + CPUMCTX.r14], r14
1059 mov qword [rax + CPUMCTX.r15], r15
1060
1061 ; Restore general purpose registers
1062 MYPOPAD
1063
1064 mov eax, VINF_SUCCESS
1065
1066 popf
1067 pop rbp
1068 add rsp, 4*xS
1069 ret
1070ENDPROC SVMVMRun64
1071%endif ; RT_ARCH_AMD64
1072
1073
1074%if GC_ARCH_BITS == 64
1075;;
1076; Executes INVLPGA
1077;
1078; @param pPageGC msc:rcx gcc:rdi x86:[esp+04] Virtual page to invalidate
1079; @param uASID msc:rdx gcc:rsi x86:[esp+0C] Tagged TLB id
1080;
1081;DECLASM(void) SVMInvlpgA(RTGCPTR pPageGC, uint32_t uASID);
1082BEGINPROC SVMInvlpgA
1083%ifdef RT_ARCH_AMD64
1084 %ifdef ASM_CALL64_GCC
1085 mov rax, rdi
1086 mov rcx, rsi
1087 %else
1088 ; from http://www.cs.cmu.edu/~fp/courses/15213-s06/misc/asm64-handout.pdf:
1089 ; ``Perhaps unexpectedly, instructions that move or generate 32-bit register
1090 ; values also set the upper 32 bits of the register to zero. Consequently
1091 ; there is no need for an instruction movzlq.''
1092 mov eax, ecx
1093 mov rcx, rdx
1094 %endif
1095%else
1096 mov eax, [esp + 4]
1097 mov ecx, [esp + 0Ch]
1098%endif
1099 invlpga [xAX], ecx
1100 ret
1101ENDPROC SVMInvlpgA
1102
1103%else
1104;;
1105; Executes INVLPGA
1106;
1107; @param pPageGC msc:ecx gcc:edi x86:[esp+04] Virtual page to invalidate
1108; @param uASID msc:edx gcc:esi x86:[esp+08] Tagged TLB id
1109;
1110;DECLASM(void) SVMInvlpgA(RTGCPTR pPageGC, uint32_t uASID);
1111BEGINPROC SVMInvlpgA
1112%ifdef RT_ARCH_AMD64
1113 %ifdef ASM_CALL64_GCC
1114 movzx rax, edi
1115 mov ecx, esi
1116 %else
1117 ; from http://www.cs.cmu.edu/~fp/courses/15213-s06/misc/asm64-handout.pdf:
1118 ; ``Perhaps unexpectedly, instructions that move or generate 32-bit register
1119 ; values also set the upper 32 bits of the register to zero. Consequently
1120 ; there is no need for an instruction movzlq.''
1121 mov eax, ecx
1122 mov ecx, edx
1123 %endif
1124%else
1125 mov eax, [esp + 4]
1126 mov ecx, [esp + 8]
1127%endif
1128 invlpga [xAX], ecx
1129 ret
1130ENDPROC SVMInvlpgA
1131
1132%endif ; GC_ARCH_BITS != 64
1133
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette