VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HWACCMR0A.asm@ 13908

Last change on this file since 13908 was 13908, checked in by vboxsync, 16 years ago

Fixed include order, a bunch of GCC 3.3 warnings, OS/2 build.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 29.8 KB
Line 
1; $Id: HWACCMR0A.asm 13908 2008-11-06 11:53:47Z vboxsync $
2;; @file
3; VMXM - R0 vmx helpers
4;
5
6;
7; Copyright (C) 2006-2007 Sun Microsystems, Inc.
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.virtualbox.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License (GPL) as published by the Free Software
13; Foundation, in version 2 as it comes in the "COPYING" file of the
14; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17; Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18; Clara, CA 95054 USA or visit http://www.sun.com if you need
19; additional information or have any questions.
20;
21
22;*******************************************************************************
23;* Header Files *
24;*******************************************************************************
25%include "VBox/asmdefs.mac"
26%include "VBox/err.mac"
27%include "VBox/hwacc_vmx.mac"
28%include "VBox/cpum.mac"
29%include "VBox/x86.mac"
30
31%ifdef RT_OS_OS2 ;; @todo fix OMF support in yasm and kick nasm out completely.
32 %macro vmwrite 2,
33 int3
34 %endmacro
35 %define vmlaunch int3
36 %define vmresume int3
37 %define vmsave int3
38 %define vmload int3
39 %define vmrun int3
40 %define clgi int3
41 %define stgi int3
42 %macro invlpga 2,
43 int3
44 %endmacro
45%endif
46
47;; This is too risky wrt. stability, performance and correctness.
48;%define VBOX_WITH_DR6_EXPERIMENT 1
49
50;; @def MYPUSHAD
51; Macro generating an equivalent to pushad
52
53;; @def MYPOPAD
54; Macro generating an equivalent to popad
55
56;; @def MYPUSHSEGS
57; Macro saving all segment registers on the stack.
58; @param 1 full width register name
59; @param 2 16-bit regsiter name for \a 1.
60
61;; @def MYPOPSEGS
62; Macro restoring all segment registers on the stack
63; @param 1 full width register name
64; @param 2 16-bit regsiter name for \a 1.
65
66%ifdef RT_ARCH_AMD64
67 ; Save a host and load the corresponding guest MSR (trashes rdx & rcx)
68 %macro LOADGUESTMSR 2
69 mov rcx, %1
70 rdmsr
71 push rdx
72 push rax
73 mov edx, dword [xSI + %2 + 4]
74 mov eax, dword [xSI + %2]
75 wrmsr
76 %endmacro
77
78 ; Save a guest and load the corresponding host MSR (trashes rdx & rcx)
79 ; Only really useful for gs kernel base as that one can be changed behind our back (swapgs)
80 %macro LOADHOSTMSREX 2
81 mov rcx, %1
82 rdmsr
83 mov dword [xSI + %2], eax
84 mov dword [xSI + %2 + 4], edx
85 pop rax
86 pop rdx
87 wrmsr
88 %endmacro
89
90 ; Load the corresponding host MSR (trashes rdx & rcx)
91 %macro LOADHOSTMSR 1
92 mov rcx, %1
93 pop rax
94 pop rdx
95 wrmsr
96 %endmacro
97
98 %ifdef ASM_CALL64_GCC
99 %macro MYPUSHAD 0
100 push r15
101 push r14
102 push r13
103 push r12
104 push rbx
105 %endmacro
106 %macro MYPOPAD 0
107 pop rbx
108 pop r12
109 pop r13
110 pop r14
111 pop r15
112 %endmacro
113
114 %else ; ASM_CALL64_MSC
115 %macro MYPUSHAD 0
116 push r15
117 push r14
118 push r13
119 push r12
120 push rbx
121 push rsi
122 push rdi
123 %endmacro
124 %macro MYPOPAD 0
125 pop rdi
126 pop rsi
127 pop rbx
128 pop r12
129 pop r13
130 pop r14
131 pop r15
132 %endmacro
133 %endif
134
135; trashes, rax, rdx & rcx
136 %macro MYPUSHSEGS 2
137 mov %2, es
138 push %1
139 mov %2, ds
140 push %1
141
142 ; Special case for FS; Windows and Linux either don't use it or restore it when leaving kernel mode, Solaris OTOH doesn't and we must save it.
143 mov ecx, MSR_K8_FS_BASE
144 rdmsr
145 push rdx
146 push rax
147 push fs
148
149 ; Special case for GS; OSes typically use swapgs to reset the hidden base register for GS on entry into the kernel. The same happens on exit
150 mov ecx, MSR_K8_GS_BASE
151 rdmsr
152 push rdx
153 push rax
154 push gs
155 %endmacro
156
157; trashes, rax, rdx & rcx
158 %macro MYPOPSEGS 2
159 ; Note: do not step through this code with a debugger!
160 pop gs
161 pop rax
162 pop rdx
163 mov ecx, MSR_K8_GS_BASE
164 wrmsr
165
166 pop fs
167 pop rax
168 pop rdx
169 mov ecx, MSR_K8_FS_BASE
170 wrmsr
171 ; Now it's safe to step again
172
173 pop %1
174 mov ds, %2
175 pop %1
176 mov es, %2
177 %endmacro
178
179%else ; RT_ARCH_X86
180 %macro MYPUSHAD 0
181 pushad
182 %endmacro
183 %macro MYPOPAD 0
184 popad
185 %endmacro
186
187 %macro MYPUSHSEGS 2
188 push ds
189 push es
190 push fs
191 push gs
192 %endmacro
193 %macro MYPOPSEGS 2
194 pop gs
195 pop fs
196 pop es
197 pop ds
198 %endmacro
199%endif
200
201
202BEGINCODE
203
204;/**
205; * Prepares for and executes VMLAUNCH/VMRESUME (32 bits guest mode)
206; *
207; * @returns VBox status code
208; * @param fResume vmlauch/vmresume
209; * @param pCtx Guest context
210; */
211BEGINPROC VMXR0StartVM32
212 push xBP
213 mov xBP, xSP
214
215 pushf
216 cli
217
218 ;/* First we have to save some final CPU context registers. */
219%ifdef RT_ARCH_AMD64
220 mov rax, qword .vmlaunch_done
221 push rax
222%else
223 push .vmlaunch_done
224%endif
225 mov eax, VMX_VMCS_HOST_RIP ;/* return address (too difficult to continue after VMLAUNCH?) */
226 vmwrite xAX, [xSP]
227 ;/* Note: assumes success... */
228 add xSP, xS
229
230 ;/* Manual save and restore:
231 ; * - General purpose registers except RIP, RSP
232 ; *
233 ; * Trashed:
234 ; * - CR2 (we don't care)
235 ; * - LDTR (reset to 0)
236 ; * - DRx (presumably not changed at all)
237 ; * - DR7 (reset to 0x400)
238 ; * - EFLAGS (reset to RT_BIT(1); not relevant)
239 ; *
240 ; */
241
242 ;/* Save all general purpose host registers. */
243 MYPUSHAD
244
245 ;/* Save the Guest CPU context pointer. */
246%ifdef RT_ARCH_AMD64
247 %ifdef ASM_CALL64_GCC
248 ; fResume already in rdi
249 ; pCtx already in rsi
250 %else
251 mov rdi, rcx ; fResume
252 mov rsi, rdx ; pCtx
253 %endif
254%else
255 mov edi, [ebp + 8] ; fResume
256 mov esi, [ebp + 12] ; pCtx
257%endif
258
259 ;/* Save segment registers */
260 ; Note: MYPUSHSEGS trashes rdx & rcx, so we moved it here (msvc amd64 case)
261 MYPUSHSEGS xAX, ax
262
263 ; Save the pCtx pointer
264 push xSI
265
266 ; Save LDTR
267 xor eax, eax
268 sldt ax
269 push xAX
270
271 ; VMX only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!
272 sub xSP, xS*2
273 sgdt [xSP]
274
275 sub xSP, xS*2
276 sidt [xSP]
277
278%ifdef VBOX_WITH_DR6_EXPERIMENT
279 ; Restore DR6 - experiment, not safe!
280 mov xBX, [xSI + CPUMCTX.dr6]
281 mov dr6, xBX
282%endif
283
284 ; Restore CR2
285 mov ebx, [xSI + CPUMCTX.cr2]
286 mov cr2, xBX
287
288 mov eax, VMX_VMCS_HOST_RSP
289 vmwrite xAX, xSP
290 ;/* Note: assumes success... */
291 ;/* Don't mess with ESP anymore!! */
292
293 ;/* Restore Guest's general purpose registers. */
294 mov eax, [xSI + CPUMCTX.eax]
295 mov ebx, [xSI + CPUMCTX.ebx]
296 mov ecx, [xSI + CPUMCTX.ecx]
297 mov edx, [xSI + CPUMCTX.edx]
298 mov ebp, [xSI + CPUMCTX.ebp]
299
300 ; resume or start?
301 cmp xDI, 0 ; fResume
302 je .vmlauch_lauch
303
304 ;/* Restore edi & esi. */
305 mov edi, [xSI + CPUMCTX.edi]
306 mov esi, [xSI + CPUMCTX.esi]
307
308 vmresume
309 jmp .vmlaunch_done; ;/* here if vmresume detected a failure. */
310
311.vmlauch_lauch:
312 ;/* Restore edi & esi. */
313 mov edi, [xSI + CPUMCTX.edi]
314 mov esi, [xSI + CPUMCTX.esi]
315
316 vmlaunch
317 jmp .vmlaunch_done; ;/* here if vmlaunch detected a failure. */
318
319ALIGNCODE(16)
320.vmlaunch_done:
321 jc near .vmxstart_invalid_vmxon_ptr
322 jz near .vmxstart_start_failed
323
324 ; Restore base and limit of the IDTR & GDTR
325 lidt [xSP]
326 add xSP, xS*2
327 lgdt [xSP]
328 add xSP, xS*2
329
330 push xDI
331 mov xDI, [xSP + xS * 2] ; pCtx
332
333 mov [ss:xDI + CPUMCTX.eax], eax
334 mov [ss:xDI + CPUMCTX.ebx], ebx
335 mov [ss:xDI + CPUMCTX.ecx], ecx
336 mov [ss:xDI + CPUMCTX.edx], edx
337 mov [ss:xDI + CPUMCTX.esi], esi
338 mov [ss:xDI + CPUMCTX.ebp], ebp
339%ifdef RT_ARCH_AMD64
340 pop xAX ; the guest edi we pushed above
341 mov dword [ss:xDI + CPUMCTX.edi], eax
342%else
343 pop dword [ss:xDI + CPUMCTX.edi] ; the guest edi we pushed above
344%endif
345
346%ifdef VBOX_WITH_DR6_EXPERIMENT
347 ; Save DR6 - experiment, not safe!
348 mov xAX, dr6
349 mov [ss:xDI + CPUMCTX.dr6], xAX
350%endif
351
352 pop xAX ; saved LDTR
353 lldt ax
354
355 add xSP, xS ; pCtx
356
357 ; Restore segment registers
358 MYPOPSEGS xAX, ax
359
360 ; Restore general purpose registers
361 MYPOPAD
362
363 mov eax, VINF_SUCCESS
364
365.vmstart_end:
366 popf
367 pop xBP
368 ret
369
370
371.vmxstart_invalid_vmxon_ptr:
372 ; Restore base and limit of the IDTR & GDTR
373 lidt [xSP]
374 add xSP, xS*2
375 lgdt [xSP]
376 add xSP, xS*2
377
378 pop xAX ; saved LDTR
379 lldt ax
380
381 add xSP, xS ; pCtx
382
383 ; Restore segment registers
384 MYPOPSEGS xAX, ax
385
386 ; Restore all general purpose host registers.
387 MYPOPAD
388 mov eax, VERR_VMX_INVALID_VMXON_PTR
389 jmp .vmstart_end
390
391.vmxstart_start_failed:
392 ; Restore base and limit of the IDTR & GDTR
393 lidt [xSP]
394 add xSP, xS*2
395 lgdt [xSP]
396 add xSP, xS*2
397
398 pop xAX ; saved LDTR
399 lldt ax
400
401 add xSP, xS ; pCtx
402
403 ; Restore segment registers
404 MYPOPSEGS xAX, ax
405
406 ; Restore all general purpose host registers.
407 MYPOPAD
408 mov eax, VERR_VMX_UNABLE_TO_START_VM
409 jmp .vmstart_end
410
411ENDPROC VMXR0StartVM32
412
413%ifdef RT_ARCH_AMD64
414;/**
415; * Prepares for and executes VMLAUNCH/VMRESUME (64 bits guest mode)
416; *
417; * @returns VBox status code
418; * @param fResume vmlauch/vmresume
419; * @param pCtx Guest context
420; */
421BEGINPROC VMXR0StartVM64
422 push xBP
423 mov xBP, xSP
424
425 pushf
426 cli
427
428 ;/* First we have to save some final CPU context registers. */
429 mov rax, qword .vmlaunch64_done
430 push rax
431 mov rax, VMX_VMCS_HOST_RIP ;/* return address (too difficult to continue after VMLAUNCH?) */
432 vmwrite rax, [xSP]
433 ;/* Note: assumes success... */
434 add xSP, xS
435
436 ;/* Manual save and restore:
437 ; * - General purpose registers except RIP, RSP
438 ; *
439 ; * Trashed:
440 ; * - CR2 (we don't care)
441 ; * - LDTR (reset to 0)
442 ; * - DRx (presumably not changed at all)
443 ; * - DR7 (reset to 0x400)
444 ; * - EFLAGS (reset to RT_BIT(1); not relevant)
445 ; *
446 ; */
447
448 ;/* Save all general purpose host registers. */
449 MYPUSHAD
450
451 ;/* Save the Guest CPU context pointer. */
452%ifdef ASM_CALL64_GCC
453 ; fResume already in rdi
454 ; pCtx already in rsi
455%else
456 mov rdi, rcx ; fResume
457 mov rsi, rdx ; pCtx
458%endif
459
460 ;/* Save segment registers */
461 ; Note: MYPUSHSEGS trashes rdx & rcx, so we moved it here (msvc amd64 case)
462 MYPUSHSEGS xAX, ax
463
464 ; Save the host LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs and restore the guest MSRs
465 ;; @todo use the automatic load feature for MSRs
466 LOADGUESTMSR MSR_K8_LSTAR, CPUMCTX.msrLSTAR
467%if 0 ; not supported on Intel CPUs
468 LOADGUESTMSR MSR_K8_CSTAR, CPUMCTX.msrCSTAR
469%endif
470 LOADGUESTMSR MSR_K6_STAR, CPUMCTX.msrSTAR
471 LOADGUESTMSR MSR_K8_SF_MASK, CPUMCTX.msrSFMASK
472 LOADGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
473
474 ; Save the pCtx pointer
475 push xSI
476
477 ; Save LDTR
478 xor eax, eax
479 sldt ax
480 push xAX
481
482 ; VMX only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!
483 sub xSP, xS*2
484 sgdt [xSP]
485
486 sub xSP, xS*2
487 sidt [xSP]
488
489%ifdef VBOX_WITH_DR6_EXPERIMENT
490 ; Restore DR6 - experiment, not safe!
491 mov xBX, [xSI + CPUMCTX.dr6]
492 mov dr6, xBX
493%endif
494
495 ; Restore CR2
496 mov rbx, qword [xSI + CPUMCTX.cr2]
497 mov cr2, rbx
498
499 mov eax, VMX_VMCS_HOST_RSP
500 vmwrite xAX, xSP
501 ;/* Note: assumes success... */
502 ;/* Don't mess with ESP anymore!! */
503
504 ;/* Restore Guest's general purpose registers. */
505 mov rax, qword [xSI + CPUMCTX.eax]
506 mov rbx, qword [xSI + CPUMCTX.ebx]
507 mov rcx, qword [xSI + CPUMCTX.ecx]
508 mov rdx, qword [xSI + CPUMCTX.edx]
509 mov rbp, qword [xSI + CPUMCTX.ebp]
510 mov r8, qword [xSI + CPUMCTX.r8]
511 mov r9, qword [xSI + CPUMCTX.r9]
512 mov r10, qword [xSI + CPUMCTX.r10]
513 mov r11, qword [xSI + CPUMCTX.r11]
514 mov r12, qword [xSI + CPUMCTX.r12]
515 mov r13, qword [xSI + CPUMCTX.r13]
516 mov r14, qword [xSI + CPUMCTX.r14]
517 mov r15, qword [xSI + CPUMCTX.r15]
518
519 ; resume or start?
520 cmp xDI, 0 ; fResume
521 je .vmlauch64_lauch
522
523 ;/* Restore edi & esi. */
524 mov rdi, qword [xSI + CPUMCTX.edi]
525 mov rsi, qword [xSI + CPUMCTX.esi]
526
527 vmresume
528 jmp .vmlaunch64_done; ;/* here if vmresume detected a failure. */
529
530.vmlauch64_lauch:
531 ;/* Restore rdi & rsi. */
532 mov rdi, qword [xSI + CPUMCTX.edi]
533 mov rsi, qword [xSI + CPUMCTX.esi]
534
535 vmlaunch
536 jmp .vmlaunch64_done; ;/* here if vmlaunch detected a failure. */
537
538ALIGNCODE(16)
539.vmlaunch64_done:
540 jc near .vmxstart64_invalid_vmxon_ptr
541 jz near .vmxstart64_start_failed
542
543 ; Restore base and limit of the IDTR & GDTR
544 lidt [xSP]
545 add xSP, xS*2
546 lgdt [xSP]
547 add xSP, xS*2
548
549 push xDI
550 mov xDI, [xSP + xS * 2] ; pCtx
551
552 mov qword [xDI + CPUMCTX.eax], rax
553 mov qword [xDI + CPUMCTX.ebx], rbx
554 mov qword [xDI + CPUMCTX.ecx], rcx
555 mov qword [xDI + CPUMCTX.edx], rdx
556 mov qword [xDI + CPUMCTX.esi], rsi
557 mov qword [xDI + CPUMCTX.ebp], rbp
558 mov qword [xDI + CPUMCTX.r8], r8
559 mov qword [xDI + CPUMCTX.r9], r9
560 mov qword [xDI + CPUMCTX.r10], r10
561 mov qword [xDI + CPUMCTX.r11], r11
562 mov qword [xDI + CPUMCTX.r12], r12
563 mov qword [xDI + CPUMCTX.r13], r13
564 mov qword [xDI + CPUMCTX.r14], r14
565 mov qword [xDI + CPUMCTX.r15], r15
566
567 pop xAX ; the guest edi we pushed above
568 mov qword [xDI + CPUMCTX.edi], rax
569
570%ifdef VBOX_WITH_DR6_EXPERIMENT
571 ; Save DR6 - experiment, not safe!
572 mov xAX, dr6
573 mov [xDI + CPUMCTX.dr6], xAX
574%endif
575
576 pop xAX ; saved LDTR
577 lldt ax
578
579 pop xSI ; pCtx (needed in rsi by the macros below)
580
581 ; Restore the host LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs
582 ;; @todo use the automatic load feature for MSRs
583 LOADHOSTMSREX MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
584 LOADHOSTMSR MSR_K8_SF_MASK
585 LOADHOSTMSR MSR_K6_STAR
586%if 0 ; not supported on Intel CPUs
587 LOADHOSTMSR MSR_K8_CSTAR
588%endif
589 LOADHOSTMSR MSR_K8_LSTAR
590
591 ; Restore segment registers
592 MYPOPSEGS xAX, ax
593
594 ; Restore general purpose registers
595 MYPOPAD
596
597 mov eax, VINF_SUCCESS
598
599.vmstart64_end:
600 popf
601 pop xBP
602 ret
603
604
605.vmxstart64_invalid_vmxon_ptr:
606 ; Restore base and limit of the IDTR & GDTR
607 lidt [xSP]
608 add xSP, xS*2
609 lgdt [xSP]
610 add xSP, xS*2
611
612 pop xAX ; saved LDTR
613 lldt ax
614
615 pop xSI ; pCtx (needed in rsi by the macros below)
616
617 ; Restore the host LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs
618 ;; @todo use the automatic load feature for MSRs
619 LOADHOSTMSREX MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
620 LOADHOSTMSR MSR_K8_SF_MASK
621 LOADHOSTMSR MSR_K6_STAR
622%if 0 ; not supported on Intel CPUs
623 LOADHOSTMSR MSR_K8_CSTAR
624%endif
625 LOADHOSTMSR MSR_K8_LSTAR
626
627 ; Restore segment registers
628 MYPOPSEGS xAX, ax
629
630 ; Restore all general purpose host registers.
631 MYPOPAD
632 mov eax, VERR_VMX_INVALID_VMXON_PTR
633 jmp .vmstart64_end
634
635.vmxstart64_start_failed:
636 ; Restore base and limit of the IDTR & GDTR
637 lidt [xSP]
638 add xSP, xS*2
639 lgdt [xSP]
640 add xSP, xS*2
641
642 pop xAX ; saved LDTR
643 lldt ax
644
645 pop xSI ; pCtx (needed in rsi by the macros below)
646
647 ; Restore the host LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs
648 ;; @todo use the automatic load feature for MSRs
649 LOADHOSTMSREX MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
650 LOADHOSTMSR MSR_K8_SF_MASK
651 LOADHOSTMSR MSR_K6_STAR
652%if 0 ; not supported on Intel CPUs
653 LOADHOSTMSR MSR_K8_CSTAR
654%endif
655 LOADHOSTMSR MSR_K8_LSTAR
656
657 ; Restore segment registers
658 MYPOPSEGS xAX, ax
659
660 ; Restore all general purpose host registers.
661 MYPOPAD
662 mov eax, VERR_VMX_UNABLE_TO_START_VM
663 jmp .vmstart64_end
664ENDPROC VMXR0StartVM64
665
666;/**
667; * Executes VMWRITE
668; *
669; * @returns VBox status code
670; * @param idxField x86: [ebp + 08h] msc: rcx gcc: rdi VMCS index
671; * @param pData x86: [ebp + 0ch] msc: rdx gcc: rsi VM field value
672; */
673BEGINPROC VMXWriteVMCS64
674%ifdef ASM_CALL64_GCC
675 mov eax, 0ffffffffh
676 and rdi, rax
677 xor rax, rax
678 vmwrite rdi, rsi
679%else
680 mov eax, 0ffffffffh
681 and rcx, rax
682 xor rax, rax
683 vmwrite rcx, rdx
684%endif
685 jnc .valid_vmcs
686 mov eax, VERR_VMX_INVALID_VMCS_PTR
687 ret
688.valid_vmcs:
689 jnz .the_end
690 mov eax, VERR_VMX_INVALID_VMCS_FIELD
691.the_end:
692 ret
693ENDPROC VMXWriteVMCS64
694
695;/**
696; * Executes VMREAD
697; *
698; * @returns VBox status code
699; * @param idxField VMCS index
700; * @param pData Ptr to store VM field value
701; */
702;DECLASM(int) VMXReadVMCS64(uint32_t idxField, uint64_t *pData);
703BEGINPROC VMXReadVMCS64
704%ifdef ASM_CALL64_GCC
705 mov eax, 0ffffffffh
706 and rdi, rax
707 xor rax, rax
708 vmread [rsi], rdi
709%else
710 mov eax, 0ffffffffh
711 and rcx, rax
712 xor rax, rax
713 vmread [rdx], rcx
714%endif
715 jnc .valid_vmcs
716 mov eax, VERR_VMX_INVALID_VMCS_PTR
717 ret
718.valid_vmcs:
719 jnz .the_end
720 mov eax, VERR_VMX_INVALID_VMCS_FIELD
721.the_end:
722 ret
723ENDPROC VMXReadVMCS64
724
725
726;/**
727; * Executes VMXON
728; *
729; * @returns VBox status code
730; * @param HCPhysVMXOn Physical address of VMXON structure
731; */
732;DECLASM(int) VMXEnable(RTHCPHYS HCPhysVMXOn);
733BEGINPROC VMXEnable
734%ifdef RT_ARCH_AMD64
735 xor rax, rax
736 %ifdef ASM_CALL64_GCC
737 push rdi
738 %else
739 push rcx
740 %endif
741 vmxon [rsp]
742%else
743 xor eax, eax
744 vmxon [esp + 4]
745%endif
746 jnc .good
747 mov eax, VERR_VMX_INVALID_VMXON_PTR
748 jmp .the_end
749
750.good:
751 jnz .the_end
752 mov eax, VERR_VMX_GENERIC
753
754.the_end:
755%ifdef RT_ARCH_AMD64
756 add rsp, 8
757%endif
758 ret
759ENDPROC VMXEnable
760
761;/**
762; * Executes VMXOFF
763; */
764;DECLASM(void) VMXDisable(void);
765BEGINPROC VMXDisable
766 vmxoff
767 ret
768ENDPROC VMXDisable
769
770
771;/**
772; * Executes VMCLEAR
773; *
774; * @returns VBox status code
775; * @param HCPhysVMCS Physical address of VM control structure
776; */
777;DECLASM(int) VMXClearVMCS(RTHCPHYS HCPhysVMCS);
778BEGINPROC VMXClearVMCS
779%ifdef RT_ARCH_AMD64
780 xor rax, rax
781 %ifdef ASM_CALL64_GCC
782 push rdi
783 %else
784 push rcx
785 %endif
786 vmclear [rsp]
787%else
788 xor eax, eax
789 vmclear [esp + 4]
790%endif
791 jnc .the_end
792 mov eax, VERR_VMX_INVALID_VMCS_PTR
793.the_end:
794%ifdef RT_ARCH_AMD64
795 add rsp, 8
796%endif
797 ret
798ENDPROC VMXClearVMCS
799
800
801;/**
802; * Executes VMPTRLD
803; *
804; * @returns VBox status code
805; * @param HCPhysVMCS Physical address of VMCS structure
806; */
807;DECLASM(int) VMXActivateVMCS(RTHCPHYS HCPhysVMCS);
808BEGINPROC VMXActivateVMCS
809%ifdef RT_ARCH_AMD64
810 xor rax, rax
811 %ifdef ASM_CALL64_GCC
812 push rdi
813 %else
814 push rcx
815 %endif
816 vmptrld [rsp]
817%else
818 xor eax, eax
819 vmptrld [esp + 4]
820%endif
821 jnc .the_end
822 mov eax, VERR_VMX_INVALID_VMCS_PTR
823.the_end:
824%ifdef RT_ARCH_AMD64
825 add rsp, 8
826%endif
827 ret
828ENDPROC VMXActivateVMCS
829
830%endif ; RT_ARCH_AMD64
831
832;/**
833; * Executes VMPTRST
834; *
835; * @returns VBox status code
836; * @param [esp + 04h] gcc:rdi msc:rcx Param 1 - First parameter - Address that will receive the current pointer
837; */
838;DECLASM(int) VMXGetActivateVMCS(RTHCPHYS *pVMCS);
839BEGINPROC VMXGetActivateVMCS
840%ifdef RT_OS_OS2
841 mov eax, VERR_NOT_SUPPORTED
842 ret
843%else
844 %ifdef RT_ARCH_AMD64
845 %ifdef ASM_CALL64_GCC
846 vmptrst qword [rdi]
847 %else
848 vmptrst qword [rcx]
849 %endif
850 %else
851 vmptrst qword [esp+04h]
852 %endif
853 xor eax, eax
854 ret
855%endif
856ENDPROC VMXGetActivateVMCS
857
858;/**
859; * Invalidate a page using invept
860; @param enmFlush msc:ecx gcc:edi x86:[esp+04] Type of flush
861; @param pDescriptor msc:edx gcc:esi x86:[esp+08] Descriptor pointer
862; */
863;DECLASM(int) VMXR0InvEPT(VMX_FLUSH enmFlush, uint64_t *pDescriptor);
864BEGINPROC VMXR0InvEPT
865%ifdef RT_ARCH_AMD64
866 %ifdef ASM_CALL64_GCC
867 mov eax, 0ffffffffh
868 and rdi, rax
869 xor rax, rax
870; invept rdi, qword [rsi]
871 DB 0x66, 0x0F, 0x38, 0x80, 0x3E
872 %else
873 mov eax, 0ffffffffh
874 and rcx, rax
875 xor rax, rax
876; invept rcx, qword [rdx]
877 DB 0x66, 0x0F, 0x38, 0x80, 0xA
878 %endif
879%else
880 mov eax, [esp + 4]
881 mov ecx, [esp + 8]
882; invept eax, qword [ecx]
883 DB 0x66, 0x0F, 0x38, 0x80, 0x1
884%endif
885 jnc .valid_vmcs
886 mov eax, VERR_VMX_INVALID_VMCS_PTR
887 ret
888.valid_vmcs:
889 jnz .the_end
890 mov eax, VERR_INVALID_PARAMETER
891.the_end:
892 ret
893ENDPROC VMXR0InvEPT
894
895;/**
896; * Invalidate a page using invvpid
897; @param enmFlush msc:ecx gcc:edi x86:[esp+04] Type of flush
898; @param pDescriptor msc:edx gcc:esi x86:[esp+08] Descriptor pointer
899; */
900;DECLASM(int) VMXR0InvVPID(VMX_FLUSH enmFlush, uint64_t *pDescriptor);
901BEGINPROC VMXR0InvVPID
902%ifdef RT_ARCH_AMD64
903 %ifdef ASM_CALL64_GCC
904 mov eax, 0ffffffffh
905 and rdi, rax
906 xor rax, rax
907 ;invvpid rdi, qword [rsi]
908 DB 0x66, 0x0F, 0x38, 0x81, 0x3E
909 %else
910 mov eax, 0ffffffffh
911 and rcx, rax
912 xor rax, rax
913; invvpid rcx, qword [rdx]
914 DB 0x66, 0x0F, 0x38, 0x81, 0xA
915 %endif
916%else
917 mov eax, [esp + 4]
918 mov ecx, [esp + 8]
919; invept eax, qword [ecx]
920 DB 0x66, 0x0F, 0x38, 0x81, 0x1
921%endif
922 jnc .valid_vmcs
923 mov eax, VERR_VMX_INVALID_VMCS_PTR
924 ret
925.valid_vmcs:
926 jnz .the_end
927 mov eax, VERR_INVALID_PARAMETER
928.the_end:
929 ret
930ENDPROC VMXR0InvVPID
931
932
933;/**
934; * Prepares for and executes VMRUN (32 bits guests)
935; *
936; * @returns VBox status code
937; * @param HCPhysVMCB Physical address of host VMCB
938; * @param HCPhysVMCB Physical address of guest VMCB
939; * @param pCtx Guest context
940; */
941BEGINPROC SVMVMRun
942%ifdef RT_ARCH_AMD64 ; fake a cdecl stack frame
943 %ifdef ASM_CALL64_GCC
944 push rdx
945 push rsi
946 push rdi
947 %else
948 push r8
949 push rdx
950 push rcx
951 %endif
952 push 0
953%endif
954 push xBP
955 mov xBP, xSP
956 pushf
957
958 ;/* Manual save and restore:
959 ; * - General purpose registers except RIP, RSP, RAX
960 ; *
961 ; * Trashed:
962 ; * - CR2 (we don't care)
963 ; * - LDTR (reset to 0)
964 ; * - DRx (presumably not changed at all)
965 ; * - DR7 (reset to 0x400)
966 ; */
967
968 ;/* Save all general purpose host registers. */
969 MYPUSHAD
970
971 ;/* Save the Guest CPU context pointer. */
972 mov xSI, [xBP + xS*2 + RTHCPHYS_CB*2] ; pCtx
973 push xSI ; push for saving the state at the end
974
975 ; Restore CR2
976 mov ebx, [xSI + CPUMCTX.cr2]
977 mov cr2, xBX
978
979 ; save host fs, gs, sysenter msr etc
980 mov xAX, [xBP + xS*2] ; pVMCBHostPhys (64 bits physical address; x86: take low dword only)
981 push xAX ; save for the vmload after vmrun
982 vmsave
983
984 ; setup eax for VMLOAD
985 mov xAX, [xBP + xS*2 + RTHCPHYS_CB] ; pVMCBPhys (64 bits physical address; take low dword only)
986
987 ;/* Restore Guest's general purpose registers. */
988 ;/* EAX is loaded from the VMCB by VMRUN */
989 mov ebx, [xSI + CPUMCTX.ebx]
990 mov ecx, [xSI + CPUMCTX.ecx]
991 mov edx, [xSI + CPUMCTX.edx]
992 mov edi, [xSI + CPUMCTX.edi]
993 mov ebp, [xSI + CPUMCTX.ebp]
994 mov esi, [xSI + CPUMCTX.esi]
995
996 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch
997 clgi
998 sti
999
1000 ; load guest fs, gs, sysenter msr etc
1001 vmload
1002 ; run the VM
1003 vmrun
1004
1005 ;/* EAX is in the VMCB already; we can use it here. */
1006
1007 ; save guest fs, gs, sysenter msr etc
1008 vmsave
1009
1010 ; load host fs, gs, sysenter msr etc
1011 pop xAX ; pushed above
1012 vmload
1013
1014 ; Set the global interrupt flag again, but execute cli to make sure IF=0.
1015 cli
1016 stgi
1017
1018 pop xAX ; pCtx
1019
1020 mov [ss:xAX + CPUMCTX.ebx], ebx
1021 mov [ss:xAX + CPUMCTX.ecx], ecx
1022 mov [ss:xAX + CPUMCTX.edx], edx
1023 mov [ss:xAX + CPUMCTX.esi], esi
1024 mov [ss:xAX + CPUMCTX.edi], edi
1025 mov [ss:xAX + CPUMCTX.ebp], ebp
1026
1027 ; Restore general purpose registers
1028 MYPOPAD
1029
1030 mov eax, VINF_SUCCESS
1031
1032 popf
1033 pop xBP
1034%ifdef RT_ARCH_AMD64
1035 add xSP, 4*xS
1036%endif
1037 ret
1038ENDPROC SVMVMRun
1039
1040%ifdef RT_ARCH_AMD64
1041;/**
1042; * Prepares for and executes VMRUN (64 bits guests)
1043; *
1044; * @returns VBox status code
1045; * @param HCPhysVMCB Physical address of host VMCB
1046; * @param HCPhysVMCB Physical address of guest VMCB
1047; * @param pCtx Guest context
1048; */
1049BEGINPROC SVMVMRun64
1050 ; fake a cdecl stack frame
1051 %ifdef ASM_CALL64_GCC
1052 push rdx
1053 push rsi
1054 push rdi
1055 %else
1056 push r8
1057 push rdx
1058 push rcx
1059 %endif
1060 push 0
1061 push rbp
1062 mov rbp, rsp
1063 pushf
1064
1065 ;/* Manual save and restore:
1066 ; * - General purpose registers except RIP, RSP, RAX
1067 ; *
1068 ; * Trashed:
1069 ; * - CR2 (we don't care)
1070 ; * - LDTR (reset to 0)
1071 ; * - DRx (presumably not changed at all)
1072 ; * - DR7 (reset to 0x400)
1073 ; */
1074
1075 ;/* Save all general purpose host registers. */
1076 MYPUSHAD
1077
1078 ;/* Save the Guest CPU context pointer. */
1079 mov rsi, [rbp + xS*2 + RTHCPHYS_CB*2] ; pCtx
1080 push rsi ; push for saving the state at the end
1081
1082 ; Restore CR2
1083 mov rbx, [rsi + CPUMCTX.cr2]
1084 mov cr2, rbx
1085
1086 ; save host fs, gs, sysenter msr etc
1087 mov rax, [rbp + xS*2] ; pVMCBHostPhys (64 bits physical address; x86: take low dword only)
1088 push rax ; save for the vmload after vmrun
1089 vmsave
1090
1091 ; setup eax for VMLOAD
1092 mov rax, [rbp + xS*2 + RTHCPHYS_CB] ; pVMCBPhys (64 bits physical address; take low dword only)
1093
1094 ;/* Restore Guest's general purpose registers. */
1095 ;/* RAX is loaded from the VMCB by VMRUN */
1096 mov rbx, qword [xSI + CPUMCTX.ebx]
1097 mov rcx, qword [xSI + CPUMCTX.ecx]
1098 mov rdx, qword [xSI + CPUMCTX.edx]
1099 mov rdi, qword [xSI + CPUMCTX.edi]
1100 mov rbp, qword [xSI + CPUMCTX.ebp]
1101 mov r8, qword [xSI + CPUMCTX.r8]
1102 mov r9, qword [xSI + CPUMCTX.r9]
1103 mov r10, qword [xSI + CPUMCTX.r10]
1104 mov r11, qword [xSI + CPUMCTX.r11]
1105 mov r12, qword [xSI + CPUMCTX.r12]
1106 mov r13, qword [xSI + CPUMCTX.r13]
1107 mov r14, qword [xSI + CPUMCTX.r14]
1108 mov r15, qword [xSI + CPUMCTX.r15]
1109 mov rsi, qword [xSI + CPUMCTX.esi]
1110
1111 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch
1112 clgi
1113 sti
1114
1115 ; load guest fs, gs, sysenter msr etc
1116 vmload
1117 ; run the VM
1118 vmrun
1119
1120 ;/* RAX is in the VMCB already; we can use it here. */
1121
1122 ; save guest fs, gs, sysenter msr etc
1123 vmsave
1124
1125 ; load host fs, gs, sysenter msr etc
1126 pop rax ; pushed above
1127 vmload
1128
1129 ; Set the global interrupt flag again, but execute cli to make sure IF=0.
1130 cli
1131 stgi
1132
1133 pop rax ; pCtx
1134
1135 mov qword [rax + CPUMCTX.ebx], rbx
1136 mov qword [rax + CPUMCTX.ecx], rcx
1137 mov qword [rax + CPUMCTX.edx], rdx
1138 mov qword [rax + CPUMCTX.esi], rsi
1139 mov qword [rax + CPUMCTX.edi], rdi
1140 mov qword [rax + CPUMCTX.ebp], rbp
1141 mov qword [rax + CPUMCTX.r8], r8
1142 mov qword [rax + CPUMCTX.r9], r9
1143 mov qword [rax + CPUMCTX.r10], r10
1144 mov qword [rax + CPUMCTX.r11], r11
1145 mov qword [rax + CPUMCTX.r12], r12
1146 mov qword [rax + CPUMCTX.r13], r13
1147 mov qword [rax + CPUMCTX.r14], r14
1148 mov qword [rax + CPUMCTX.r15], r15
1149
1150 ; Restore general purpose registers
1151 MYPOPAD
1152
1153 mov eax, VINF_SUCCESS
1154
1155 popf
1156 pop rbp
1157 add rsp, 4*xS
1158 ret
1159ENDPROC SVMVMRun64
1160%endif ; RT_ARCH_AMD64
1161
1162
1163%if GC_ARCH_BITS == 64
1164;;
1165; Executes INVLPGA
1166;
1167; @param pPageGC msc:rcx gcc:rdi x86:[esp+04] Virtual page to invalidate
1168; @param uASID msc:rdx gcc:rsi x86:[esp+0C] Tagged TLB id
1169;
1170;DECLASM(void) SVMInvlpgA(RTGCPTR pPageGC, uint32_t uASID);
1171BEGINPROC SVMInvlpgA
1172%ifdef RT_ARCH_AMD64
1173 %ifdef ASM_CALL64_GCC
1174 mov rax, rdi
1175 mov rcx, rsi
1176 %else
1177 ; from http://www.cs.cmu.edu/~fp/courses/15213-s06/misc/asm64-handout.pdf:
1178 ; ``Perhaps unexpectedly, instructions that move or generate 32-bit register
1179 ; values also set the upper 32 bits of the register to zero. Consequently
1180 ; there is no need for an instruction movzlq.''
1181 mov eax, ecx
1182 mov rcx, rdx
1183 %endif
1184%else
1185 mov eax, [esp + 4]
1186 mov ecx, [esp + 0Ch]
1187%endif
1188 invlpga [xAX], ecx
1189 ret
1190ENDPROC SVMInvlpgA
1191
1192%else
1193;;
1194; Executes INVLPGA
1195;
1196; @param pPageGC msc:ecx gcc:edi x86:[esp+04] Virtual page to invalidate
1197; @param uASID msc:edx gcc:esi x86:[esp+08] Tagged TLB id
1198;
1199;DECLASM(void) SVMInvlpgA(RTGCPTR pPageGC, uint32_t uASID);
1200BEGINPROC SVMInvlpgA
1201%ifdef RT_ARCH_AMD64
1202 %ifdef ASM_CALL64_GCC
1203 movzx rax, edi
1204 mov ecx, esi
1205 %else
1206 ; from http://www.cs.cmu.edu/~fp/courses/15213-s06/misc/asm64-handout.pdf:
1207 ; ``Perhaps unexpectedly, instructions that move or generate 32-bit register
1208 ; values also set the upper 32 bits of the register to zero. Consequently
1209 ; there is no need for an instruction movzlq.''
1210 mov eax, ecx
1211 mov ecx, edx
1212 %endif
1213%else
1214 mov eax, [esp + 4]
1215 mov ecx, [esp + 8]
1216%endif
1217 invlpga [xAX], ecx
1218 ret
1219ENDPROC SVMInvlpgA
1220
1221%endif ; GC_ARCH_BITS != 64
1222
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette