VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HWACCMR0A.asm@ 13796

Last change on this file since 13796 was 13279, checked in by vboxsync, 16 years ago

More failure path problems with VT-x (lead to crashes on testboxwin3).

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 29.7 KB
Line 
1; $Id: HWACCMR0A.asm 13279 2008-10-15 11:15:56Z vboxsync $
2;; @file
3; VMXM - R0 vmx helpers
4;
5
6;
7; Copyright (C) 2006-2007 Sun Microsystems, Inc.
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.virtualbox.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License (GPL) as published by the Free Software
13; Foundation, in version 2 as it comes in the "COPYING" file of the
14; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17; Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18; Clara, CA 95054 USA or visit http://www.sun.com if you need
19; additional information or have any questions.
20;
21
22;*******************************************************************************
23;* Header Files *
24;*******************************************************************************
25%include "VBox/asmdefs.mac"
26%include "VBox/err.mac"
27%include "VBox/hwacc_vmx.mac"
28%include "VBox/cpum.mac"
29%include "VBox/x86.mac"
30
31%ifdef RT_OS_OS2 ;; @todo fix OMF support in yasm and kick nasm out completely.
32 %macro vmwrite 2,
33 int3
34 %endmacro
35 %define vmlaunch int3
36 %define vmresume int3
37 %define vmsave int3
38 %define vmload int3
39 %define vmrun int3
40 %define clgi int3
41 %define stgi int3
42 %macro invlpga 2,
43 int3
44 %endmacro
45%endif
46
47;; This is too risky wrt. stability, performance and correctness.
48;%define VBOX_WITH_DR6_EXPERIMENT 1
49
50;; @def MYPUSHAD
51; Macro generating an equivalent to pushad
52
53;; @def MYPOPAD
54; Macro generating an equivalent to popad
55
56;; @def MYPUSHSEGS
57; Macro saving all segment registers on the stack.
58; @param 1 full width register name
59; @param 2 16-bit regsiter name for \a 1.
60
61;; @def MYPOPSEGS
62; Macro restoring all segment registers on the stack
63; @param 1 full width register name
64; @param 2 16-bit regsiter name for \a 1.
65
66%ifdef RT_ARCH_AMD64
67 ; Save a host and load the corresponding guest MSR (trashes rdx & rcx)
68 %macro LOADGUESTMSR 2
69 mov rcx, %1
70 rdmsr
71 push rdx
72 push rax
73 mov edx, dword [xSI + %2 + 4]
74 mov eax, dword [xSI + %2]
75 wrmsr
76 %endmacro
77
78 ; Save a guest and load the corresponding host MSR (trashes rdx & rcx)
79 ; Only really useful for gs kernel base as that one can be changed behind our back (swapgs)
80 %macro LOADHOSTMSREX 2
81 mov rcx, %1
82 rdmsr
83 mov dword [xSI + %2], eax
84 mov dword [xSI + %2 + 4], edx
85 pop rax
86 pop rdx
87 wrmsr
88 %endmacro
89
90 ; Load the corresponding host MSR (trashes rdx & rcx)
91 %macro LOADHOSTMSR 1
92 mov rcx, %1
93 pop rax
94 pop rdx
95 wrmsr
96 %endmacro
97
98 %ifdef ASM_CALL64_GCC
99 %macro MYPUSHAD 0
100 push r15
101 push r14
102 push r13
103 push r12
104 push rbx
105 %endmacro
106 %macro MYPOPAD 0
107 pop rbx
108 pop r12
109 pop r13
110 pop r14
111 pop r15
112 %endmacro
113
114 %else ; ASM_CALL64_MSC
115 %macro MYPUSHAD 0
116 push r15
117 push r14
118 push r13
119 push r12
120 push rbx
121 push rsi
122 push rdi
123 %endmacro
124 %macro MYPOPAD 0
125 pop rdi
126 pop rsi
127 pop rbx
128 pop r12
129 pop r13
130 pop r14
131 pop r15
132 %endmacro
133 %endif
134
135; trashes, rax, rdx & rcx
136 %macro MYPUSHSEGS 2
137 mov %2, es
138 push %1
139 mov %2, ds
140 push %1
141
142 ; Special case for FS; Windows and Linux either don't use it or restore it when leaving kernel mode, Solaris OTOH doesn't and we must save it.
143 mov ecx, MSR_K8_FS_BASE
144 rdmsr
145 push rdx
146 push rax
147 push fs
148
149 ; Special case for GS; OSes typically use swapgs to reset the hidden base register for GS on entry into the kernel. The same happens on exit
150 mov ecx, MSR_K8_GS_BASE
151 rdmsr
152 push rdx
153 push rax
154 push gs
155 %endmacro
156
157; trashes, rax, rdx & rcx
158 %macro MYPOPSEGS 2
159 ; Note: do not step through this code with a debugger!
160 pop gs
161 pop rax
162 pop rdx
163 mov ecx, MSR_K8_GS_BASE
164 wrmsr
165
166 pop fs
167 pop rax
168 pop rdx
169 mov ecx, MSR_K8_FS_BASE
170 wrmsr
171 ; Now it's safe to step again
172
173 pop %1
174 mov ds, %2
175 pop %1
176 mov es, %2
177 %endmacro
178
179%else ; RT_ARCH_X86
180 %macro MYPUSHAD 0
181 pushad
182 %endmacro
183 %macro MYPOPAD 0
184 popad
185 %endmacro
186
187 %macro MYPUSHSEGS 2
188 push ds
189 push es
190 push fs
191 push gs
192 %endmacro
193 %macro MYPOPSEGS 2
194 pop gs
195 pop fs
196 pop es
197 pop ds
198 %endmacro
199%endif
200
201
202BEGINCODE
203
204;/**
205; * Prepares for and executes VMLAUNCH/VMRESUME (32 bits guest mode)
206; *
207; * @returns VBox status code
208; * @param fResume vmlauch/vmresume
209; * @param pCtx Guest context
210; */
211BEGINPROC VMXR0StartVM32
212 push xBP
213 mov xBP, xSP
214
215 pushf
216 cli
217
218 ;/* First we have to save some final CPU context registers. */
219%ifdef RT_ARCH_AMD64
220 mov rax, qword .vmlaunch_done
221 push rax
222%else
223 push .vmlaunch_done
224%endif
225 mov eax, VMX_VMCS_HOST_RIP ;/* return address (too difficult to continue after VMLAUNCH?) */
226 vmwrite xAX, [xSP]
227 ;/* Note: assumes success... */
228 add xSP, xS
229
230 ;/* Manual save and restore:
231 ; * - General purpose registers except RIP, RSP
232 ; *
233 ; * Trashed:
234 ; * - CR2 (we don't care)
235 ; * - LDTR (reset to 0)
236 ; * - DRx (presumably not changed at all)
237 ; * - DR7 (reset to 0x400)
238 ; * - EFLAGS (reset to RT_BIT(1); not relevant)
239 ; *
240 ; */
241
242 ;/* Save all general purpose host registers. */
243 MYPUSHAD
244
245 ;/* Save the Guest CPU context pointer. */
246%ifdef RT_ARCH_AMD64
247 %ifdef ASM_CALL64_GCC
248 ; fResume already in rdi
249 ; pCtx already in rsi
250 %else
251 mov rdi, rcx ; fResume
252 mov rsi, rdx ; pCtx
253 %endif
254%else
255 mov edi, [ebp + 8] ; fResume
256 mov esi, [ebp + 12] ; pCtx
257%endif
258
259 ;/* Save segment registers */
260 ; Note: MYPUSHSEGS trashes rdx & rcx, so we moved it here (msvc amd64 case)
261 MYPUSHSEGS xAX, ax
262
263 ; Save the pCtx pointer
264 push xSI
265
266 ; Save LDTR
267 xor eax, eax
268 sldt ax
269 push xAX
270
271 ; VMX only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!
272 sub xSP, xS*2
273 sgdt [xSP]
274
275 sub xSP, xS*2
276 sidt [xSP]
277
278%ifdef VBOX_WITH_DR6_EXPERIMENT
279 ; Restore DR6 - experiment, not safe!
280 mov xBX, [xSI + CPUMCTX.dr6]
281 mov dr6, xBX
282%endif
283
284 ; Restore CR2
285 mov ebx, [xSI + CPUMCTX.cr2]
286 mov cr2, xBX
287
288 mov eax, VMX_VMCS_HOST_RSP
289 vmwrite xAX, xSP
290 ;/* Note: assumes success... */
291 ;/* Don't mess with ESP anymore!! */
292
293 ;/* Restore Guest's general purpose registers. */
294 mov eax, [xSI + CPUMCTX.eax]
295 mov ebx, [xSI + CPUMCTX.ebx]
296 mov ecx, [xSI + CPUMCTX.ecx]
297 mov edx, [xSI + CPUMCTX.edx]
298 mov ebp, [xSI + CPUMCTX.ebp]
299
300 ; resume or start?
301 cmp xDI, 0 ; fResume
302 je .vmlauch_lauch
303
304 ;/* Restore edi & esi. */
305 mov edi, [xSI + CPUMCTX.edi]
306 mov esi, [xSI + CPUMCTX.esi]
307
308 vmresume
309 jmp .vmlaunch_done; ;/* here if vmresume detected a failure. */
310
311.vmlauch_lauch:
312 ;/* Restore edi & esi. */
313 mov edi, [xSI + CPUMCTX.edi]
314 mov esi, [xSI + CPUMCTX.esi]
315
316 vmlaunch
317 jmp .vmlaunch_done; ;/* here if vmlaunch detected a failure. */
318
319ALIGNCODE(16)
320.vmlaunch_done:
321 jc near .vmxstart_invalid_vmxon_ptr
322 jz near .vmxstart_start_failed
323
324 ; Restore base and limit of the IDTR & GDTR
325 lidt [xSP]
326 add xSP, xS*2
327 lgdt [xSP]
328 add xSP, xS*2
329
330 push xDI
331 mov xDI, [xSP + xS * 2] ; pCtx
332
333 mov [ss:xDI + CPUMCTX.eax], eax
334 mov [ss:xDI + CPUMCTX.ebx], ebx
335 mov [ss:xDI + CPUMCTX.ecx], ecx
336 mov [ss:xDI + CPUMCTX.edx], edx
337 mov [ss:xDI + CPUMCTX.esi], esi
338 mov [ss:xDI + CPUMCTX.ebp], ebp
339%ifdef RT_ARCH_AMD64
340 pop xAX ; the guest edi we pushed above
341 mov dword [ss:xDI + CPUMCTX.edi], eax
342%else
343 pop dword [ss:xDI + CPUMCTX.edi] ; the guest edi we pushed above
344%endif
345
346%ifdef VBOX_WITH_DR6_EXPERIMENT
347 ; Save DR6 - experiment, not safe!
348 mov xAX, dr6
349 mov [ss:xDI + CPUMCTX.dr6], xAX
350%endif
351
352 pop xAX ; saved LDTR
353 lldt ax
354
355 add xSP, xS ; pCtx
356
357 ; Restore segment registers
358 MYPOPSEGS xAX, ax
359
360 ; Restore general purpose registers
361 MYPOPAD
362
363 mov eax, VINF_SUCCESS
364
365.vmstart_end:
366 popf
367 pop xBP
368 ret
369
370
371.vmxstart_invalid_vmxon_ptr:
372 ; Restore base and limit of the IDTR & GDTR
373 lidt [xSP]
374 add xSP, xS*2
375 lgdt [xSP]
376 add xSP, xS*2
377
378 pop xAX ; saved LDTR
379 lldt ax
380
381 add xSP, xS ; pCtx
382
383 ; Restore segment registers
384 MYPOPSEGS xAX, ax
385
386 ; Restore all general purpose host registers.
387 MYPOPAD
388 mov eax, VERR_VMX_INVALID_VMXON_PTR
389 jmp .vmstart_end
390
391.vmxstart_start_failed:
392 ; Restore base and limit of the IDTR & GDTR
393 lidt [xSP]
394 add xSP, xS*2
395 lgdt [xSP]
396 add xSP, xS*2
397
398 pop xAX ; saved LDTR
399 lldt ax
400
401 add xSP, xS ; pCtx
402
403 ; Restore segment registers
404 MYPOPSEGS xAX, ax
405
406 ; Restore all general purpose host registers.
407 MYPOPAD
408 mov eax, VERR_VMX_UNABLE_TO_START_VM
409 jmp .vmstart_end
410
411ENDPROC VMXR0StartVM32
412
413%ifdef RT_ARCH_AMD64
414;/**
415; * Prepares for and executes VMLAUNCH/VMRESUME (64 bits guest mode)
416; *
417; * @returns VBox status code
418; * @param fResume vmlauch/vmresume
419; * @param pCtx Guest context
420; */
421BEGINPROC VMXR0StartVM64
422 push xBP
423 mov xBP, xSP
424
425 pushf
426 cli
427
428 ;/* First we have to save some final CPU context registers. */
429 mov rax, qword .vmlaunch64_done
430 push rax
431 mov rax, VMX_VMCS_HOST_RIP ;/* return address (too difficult to continue after VMLAUNCH?) */
432 vmwrite rax, [xSP]
433 ;/* Note: assumes success... */
434 add xSP, xS
435
436 ;/* Manual save and restore:
437 ; * - General purpose registers except RIP, RSP
438 ; *
439 ; * Trashed:
440 ; * - CR2 (we don't care)
441 ; * - LDTR (reset to 0)
442 ; * - DRx (presumably not changed at all)
443 ; * - DR7 (reset to 0x400)
444 ; * - EFLAGS (reset to RT_BIT(1); not relevant)
445 ; *
446 ; */
447
448 ;/* Save all general purpose host registers. */
449 MYPUSHAD
450
451 ;/* Save the Guest CPU context pointer. */
452%ifdef ASM_CALL64_GCC
453 ; fResume already in rdi
454 ; pCtx already in rsi
455%else
456 mov rdi, rcx ; fResume
457 mov rsi, rdx ; pCtx
458%endif
459
460 ;/* Save segment registers */
461 ; Note: MYPUSHSEGS trashes rdx & rcx, so we moved it here (msvc amd64 case)
462 MYPUSHSEGS xAX, ax
463
464 ; Save the host LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs and restore the guest MSRs
465 ;; @todo use the automatic load feature for MSRs
466 LOADGUESTMSR MSR_K8_LSTAR, CPUMCTX.msrLSTAR
467%if 0 ; not supported on Intel CPUs
468 LOADGUESTMSR MSR_K8_CSTAR, CPUMCTX.msrCSTAR
469%endif
470 LOADGUESTMSR MSR_K6_STAR, CPUMCTX.msrSTAR
471 LOADGUESTMSR MSR_K8_SF_MASK, CPUMCTX.msrSFMASK
472 LOADGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
473
474 ; Save the pCtx pointer
475 push xSI
476
477 ; Save LDTR
478 xor eax, eax
479 sldt ax
480 push xAX
481
482 ; VMX only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!
483 sub xSP, xS*2
484 sgdt [xSP]
485
486 sub xSP, xS*2
487 sidt [xSP]
488
489%ifdef VBOX_WITH_DR6_EXPERIMENT
490 ; Restore DR6 - experiment, not safe!
491 mov xBX, [xSI + CPUMCTX.dr6]
492 mov dr6, xBX
493%endif
494
495 ; Restore CR2
496 mov rbx, qword [xSI + CPUMCTX.cr2]
497 mov cr2, rbx
498
499 mov eax, VMX_VMCS_HOST_RSP
500 vmwrite xAX, xSP
501 ;/* Note: assumes success... */
502 ;/* Don't mess with ESP anymore!! */
503
504 ;/* Restore Guest's general purpose registers. */
505 mov rax, qword [xSI + CPUMCTX.eax]
506 mov rbx, qword [xSI + CPUMCTX.ebx]
507 mov rcx, qword [xSI + CPUMCTX.ecx]
508 mov rdx, qword [xSI + CPUMCTX.edx]
509 mov rbp, qword [xSI + CPUMCTX.ebp]
510 mov r8, qword [xSI + CPUMCTX.r8]
511 mov r9, qword [xSI + CPUMCTX.r9]
512 mov r10, qword [xSI + CPUMCTX.r10]
513 mov r11, qword [xSI + CPUMCTX.r11]
514 mov r12, qword [xSI + CPUMCTX.r12]
515 mov r13, qword [xSI + CPUMCTX.r13]
516 mov r14, qword [xSI + CPUMCTX.r14]
517 mov r15, qword [xSI + CPUMCTX.r15]
518
519 ; resume or start?
520 cmp xDI, 0 ; fResume
521 je .vmlauch64_lauch
522
523 ;/* Restore edi & esi. */
524 mov rdi, qword [xSI + CPUMCTX.edi]
525 mov rsi, qword [xSI + CPUMCTX.esi]
526
527 vmresume
528 jmp .vmlaunch64_done; ;/* here if vmresume detected a failure. */
529
530.vmlauch64_lauch:
531 ;/* Restore rdi & rsi. */
532 mov rdi, qword [xSI + CPUMCTX.edi]
533 mov rsi, qword [xSI + CPUMCTX.esi]
534
535 vmlaunch
536 jmp .vmlaunch64_done; ;/* here if vmlaunch detected a failure. */
537
538ALIGNCODE(16)
539.vmlaunch64_done:
540 jc near .vmxstart64_invalid_vmxon_ptr
541 jz near .vmxstart64_start_failed
542
543 ; Restore base and limit of the IDTR & GDTR
544 lidt [xSP]
545 add xSP, xS*2
546 lgdt [xSP]
547 add xSP, xS*2
548
549 push xDI
550 mov xDI, [xSP + xS * 2] ; pCtx
551
552 mov qword [xDI + CPUMCTX.eax], rax
553 mov qword [xDI + CPUMCTX.ebx], rbx
554 mov qword [xDI + CPUMCTX.ecx], rcx
555 mov qword [xDI + CPUMCTX.edx], rdx
556 mov qword [xDI + CPUMCTX.esi], rsi
557 mov qword [xDI + CPUMCTX.ebp], rbp
558 mov qword [xDI + CPUMCTX.r8], r8
559 mov qword [xDI + CPUMCTX.r9], r9
560 mov qword [xDI + CPUMCTX.r10], r10
561 mov qword [xDI + CPUMCTX.r11], r11
562 mov qword [xDI + CPUMCTX.r12], r12
563 mov qword [xDI + CPUMCTX.r13], r13
564 mov qword [xDI + CPUMCTX.r14], r14
565 mov qword [xDI + CPUMCTX.r15], r15
566
567 pop xAX ; the guest edi we pushed above
568 mov qword [xDI + CPUMCTX.edi], rax
569
570%ifdef VBOX_WITH_DR6_EXPERIMENT
571 ; Save DR6 - experiment, not safe!
572 mov xAX, dr6
573 mov [xDI + CPUMCTX.dr6], xAX
574%endif
575
576 pop xAX ; saved LDTR
577 lldt ax
578
579 pop xSI ; pCtx (needed in rsi by the macros below)
580
581 ; Restore the host LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs
582 ;; @todo use the automatic load feature for MSRs
583 LOADHOSTMSREX MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
584 LOADHOSTMSR MSR_K8_SF_MASK
585 LOADHOSTMSR MSR_K6_STAR
586%if 0 ; not supported on Intel CPUs
587 LOADHOSTMSR MSR_K8_CSTAR
588%endif
589 LOADHOSTMSR MSR_K8_LSTAR
590
591 ; Restore segment registers
592 MYPOPSEGS xAX, ax
593
594 ; Restore general purpose registers
595 MYPOPAD
596
597 mov eax, VINF_SUCCESS
598
599.vmstart64_end:
600 popf
601 pop xBP
602 ret
603
604
605.vmxstart64_invalid_vmxon_ptr:
606 ; Restore base and limit of the IDTR & GDTR
607 lidt [xSP]
608 add xSP, xS*2
609 lgdt [xSP]
610 add xSP, xS*2
611
612 pop xAX ; saved LDTR
613 lldt ax
614
615 pop xSI ; pCtx (needed in rsi by the macros below)
616
617 ; Restore the host LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs
618 ;; @todo use the automatic load feature for MSRs
619 LOADHOSTMSREX MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
620 LOADHOSTMSR MSR_K8_SF_MASK
621 LOADHOSTMSR MSR_K6_STAR
622%if 0 ; not supported on Intel CPUs
623 LOADHOSTMSR MSR_K8_CSTAR
624%endif
625 LOADHOSTMSR MSR_K8_LSTAR
626
627 ; Restore segment registers
628 MYPOPSEGS xAX, ax
629
630 ; Restore all general purpose host registers.
631 MYPOPAD
632 mov eax, VERR_VMX_INVALID_VMXON_PTR
633 jmp .vmstart64_end
634
635.vmxstart64_start_failed:
636 ; Restore base and limit of the IDTR & GDTR
637 lidt [xSP]
638 add xSP, xS*2
639 lgdt [xSP]
640 add xSP, xS*2
641
642 pop xAX ; saved LDTR
643 lldt ax
644
645 pop xSI ; pCtx (needed in rsi by the macros below)
646
647 ; Restore the host LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs
648 ;; @todo use the automatic load feature for MSRs
649 LOADHOSTMSREX MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
650 LOADHOSTMSR MSR_K8_SF_MASK
651 LOADHOSTMSR MSR_K6_STAR
652%if 0 ; not supported on Intel CPUs
653 LOADHOSTMSR MSR_K8_CSTAR
654%endif
655 LOADHOSTMSR MSR_K8_LSTAR
656
657 ; Restore segment registers
658 MYPOPSEGS xAX, ax
659
660 ; Restore all general purpose host registers.
661 MYPOPAD
662 mov eax, VERR_VMX_UNABLE_TO_START_VM
663 jmp .vmstart64_end
664ENDPROC VMXR0StartVM64
665
666;/**
667; * Executes VMWRITE
668; *
669; * @returns VBox status code
670; * @param idxField x86: [ebp + 08h] msc: rcx gcc: rdi VMCS index
671; * @param pData x86: [ebp + 0ch] msc: rdx gcc: rsi VM field value
672; */
673BEGINPROC VMXWriteVMCS64
674%ifdef ASM_CALL64_GCC
675 mov eax, 0ffffffffh
676 and rdi, rax
677 xor rax, rax
678 vmwrite rdi, rsi
679%else
680 mov eax, 0ffffffffh
681 and rcx, rax
682 xor rax, rax
683 vmwrite rcx, rdx
684%endif
685 jnc .valid_vmcs
686 mov eax, VERR_VMX_INVALID_VMCS_PTR
687 ret
688.valid_vmcs:
689 jnz .the_end
690 mov eax, VERR_VMX_INVALID_VMCS_FIELD
691.the_end:
692 ret
693ENDPROC VMXWriteVMCS64
694
695;/**
696; * Executes VMREAD
697; *
698; * @returns VBox status code
699; * @param idxField VMCS index
700; * @param pData Ptr to store VM field value
701; */
702;DECLASM(int) VMXReadVMCS64(uint32_t idxField, uint64_t *pData);
703BEGINPROC VMXReadVMCS64
704%ifdef ASM_CALL64_GCC
705 mov eax, 0ffffffffh
706 and rdi, rax
707 xor rax, rax
708 vmread [rsi], rdi
709%else
710 mov eax, 0ffffffffh
711 and rcx, rax
712 xor rax, rax
713 vmread [rdx], rcx
714%endif
715 jnc .valid_vmcs
716 mov eax, VERR_VMX_INVALID_VMCS_PTR
717 ret
718.valid_vmcs:
719 jnz .the_end
720 mov eax, VERR_VMX_INVALID_VMCS_FIELD
721.the_end:
722 ret
723ENDPROC VMXReadVMCS64
724
725
726;/**
727; * Executes VMXON
728; *
729; * @returns VBox status code
730; * @param HCPhysVMXOn Physical address of VMXON structure
731; */
732;DECLASM(int) VMXEnable(RTHCPHYS HCPhysVMXOn);
733BEGINPROC VMXEnable
734%ifdef RT_ARCH_AMD64
735 xor rax, rax
736 %ifdef ASM_CALL64_GCC
737 push rdi
738 %else
739 push rcx
740 %endif
741 vmxon [rsp]
742%else
743 xor eax, eax
744 vmxon [esp + 4]
745%endif
746 jnc .good
747 mov eax, VERR_VMX_INVALID_VMXON_PTR
748 jmp .the_end
749
750.good:
751 jnz .the_end
752 mov eax, VERR_VMX_GENERIC
753
754.the_end:
755%ifdef RT_ARCH_AMD64
756 add rsp, 8
757%endif
758 ret
759ENDPROC VMXEnable
760
761;/**
762; * Executes VMXOFF
763; */
764;DECLASM(void) VMXDisable(void);
765BEGINPROC VMXDisable
766 vmxoff
767 ret
768ENDPROC VMXDisable
769
770
771;/**
772; * Executes VMCLEAR
773; *
774; * @returns VBox status code
775; * @param HCPhysVMCS Physical address of VM control structure
776; */
777;DECLASM(int) VMXClearVMCS(RTHCPHYS HCPhysVMCS);
778BEGINPROC VMXClearVMCS
779%ifdef RT_ARCH_AMD64
780 xor rax, rax
781 %ifdef ASM_CALL64_GCC
782 push rdi
783 %else
784 push rcx
785 %endif
786 vmclear [rsp]
787%else
788 xor eax, eax
789 vmclear [esp + 4]
790%endif
791 jnc .the_end
792 mov eax, VERR_VMX_INVALID_VMCS_PTR
793.the_end:
794%ifdef RT_ARCH_AMD64
795 add rsp, 8
796%endif
797 ret
798ENDPROC VMXClearVMCS
799
800
801;/**
802; * Executes VMPTRLD
803; *
804; * @returns VBox status code
805; * @param HCPhysVMCS Physical address of VMCS structure
806; */
807;DECLASM(int) VMXActivateVMCS(RTHCPHYS HCPhysVMCS);
808BEGINPROC VMXActivateVMCS
809%ifdef RT_ARCH_AMD64
810 xor rax, rax
811 %ifdef ASM_CALL64_GCC
812 push rdi
813 %else
814 push rcx
815 %endif
816 vmptrld [rsp]
817%else
818 xor eax, eax
819 vmptrld [esp + 4]
820%endif
821 jnc .the_end
822 mov eax, VERR_VMX_INVALID_VMCS_PTR
823.the_end:
824%ifdef RT_ARCH_AMD64
825 add rsp, 8
826%endif
827 ret
828ENDPROC VMXActivateVMCS
829
830%endif ; RT_ARCH_AMD64
831
832;/**
833; * Executes VMPTRST
834; *
835; * @returns VBox status code
836; * @param [esp + 04h] gcc:rdi msc:rcx Param 1 - First parameter - Address that will receive the current pointer
837; */
838;DECLASM(int) VMXGetActivateVMCS(RTHCPHYS *pVMCS);
839BEGINPROC VMXGetActivateVMCS
840%ifdef RT_ARCH_AMD64
841 %ifdef ASM_CALL64_GCC
842 vmptrst qword [rdi]
843 %else
844 vmptrst qword [rcx]
845 %endif
846%else
847 vmptrst qword [esp+04h]
848%endif
849 xor eax, eax
850 ret
851ENDPROC VMXGetActivateVMCS
852
853;/**
854; * Invalidate a page using invept
855; @param enmFlush msc:ecx gcc:edi x86:[esp+04] Type of flush
856; @param pDescriptor msc:edx gcc:esi x86:[esp+08] Descriptor pointer
857; */
858;DECLASM(int) VMXR0InvEPT(VMX_FLUSH enmFlush, uint64_t *pDescriptor);
859BEGINPROC VMXR0InvEPT
860%ifdef RT_ARCH_AMD64
861 %ifdef ASM_CALL64_GCC
862 mov eax, 0ffffffffh
863 and rdi, rax
864 xor rax, rax
865; invept rdi, qword [rsi]
866 DB 0x66, 0x0F, 0x38, 0x80, 0x3E
867 %else
868 mov eax, 0ffffffffh
869 and rcx, rax
870 xor rax, rax
871; invept rcx, qword [rdx]
872 DB 0x66, 0x0F, 0x38, 0x80, 0xA
873 %endif
874%else
875 mov eax, [esp + 4]
876 mov ecx, [esp + 8]
877; invept eax, qword [ecx]
878 DB 0x66, 0x0F, 0x38, 0x80, 0x1
879%endif
880 jnc .valid_vmcs
881 mov eax, VERR_VMX_INVALID_VMCS_PTR
882 ret
883.valid_vmcs:
884 jnz .the_end
885 mov eax, VERR_INVALID_PARAMETER
886.the_end:
887 ret
888ENDPROC VMXR0InvEPT
889
890;/**
891; * Invalidate a page using invvpid
892; @param enmFlush msc:ecx gcc:edi x86:[esp+04] Type of flush
893; @param pDescriptor msc:edx gcc:esi x86:[esp+08] Descriptor pointer
894; */
895;DECLASM(int) VMXR0InvVPID(VMX_FLUSH enmFlush, uint64_t *pDescriptor);
896BEGINPROC VMXR0InvVPID
897%ifdef RT_ARCH_AMD64
898 %ifdef ASM_CALL64_GCC
899 mov eax, 0ffffffffh
900 and rdi, rax
901 xor rax, rax
902 ;invvpid rdi, qword [rsi]
903 DB 0x66, 0x0F, 0x38, 0x81, 0x3E
904 %else
905 mov eax, 0ffffffffh
906 and rcx, rax
907 xor rax, rax
908; invvpid rcx, qword [rdx]
909 DB 0x66, 0x0F, 0x38, 0x81, 0xA
910 %endif
911%else
912 mov eax, [esp + 4]
913 mov ecx, [esp + 8]
914; invept eax, qword [ecx]
915 DB 0x66, 0x0F, 0x38, 0x81, 0x1
916%endif
917 jnc .valid_vmcs
918 mov eax, VERR_VMX_INVALID_VMCS_PTR
919 ret
920.valid_vmcs:
921 jnz .the_end
922 mov eax, VERR_INVALID_PARAMETER
923.the_end:
924 ret
925ENDPROC VMXR0InvVPID
926
927
928;/**
929; * Prepares for and executes VMRUN (32 bits guests)
930; *
931; * @returns VBox status code
932; * @param HCPhysVMCB Physical address of host VMCB
933; * @param HCPhysVMCB Physical address of guest VMCB
934; * @param pCtx Guest context
935; */
936BEGINPROC SVMVMRun
937%ifdef RT_ARCH_AMD64 ; fake a cdecl stack frame
938 %ifdef ASM_CALL64_GCC
939 push rdx
940 push rsi
941 push rdi
942 %else
943 push r8
944 push rdx
945 push rcx
946 %endif
947 push 0
948%endif
949 push xBP
950 mov xBP, xSP
951 pushf
952
953 ;/* Manual save and restore:
954 ; * - General purpose registers except RIP, RSP, RAX
955 ; *
956 ; * Trashed:
957 ; * - CR2 (we don't care)
958 ; * - LDTR (reset to 0)
959 ; * - DRx (presumably not changed at all)
960 ; * - DR7 (reset to 0x400)
961 ; */
962
963 ;/* Save all general purpose host registers. */
964 MYPUSHAD
965
966 ;/* Save the Guest CPU context pointer. */
967 mov xSI, [xBP + xS*2 + RTHCPHYS_CB*2] ; pCtx
968 push xSI ; push for saving the state at the end
969
970 ; Restore CR2
971 mov ebx, [xSI + CPUMCTX.cr2]
972 mov cr2, xBX
973
974 ; save host fs, gs, sysenter msr etc
975 mov xAX, [xBP + xS*2] ; pVMCBHostPhys (64 bits physical address; x86: take low dword only)
976 push xAX ; save for the vmload after vmrun
977 vmsave
978
979 ; setup eax for VMLOAD
980 mov xAX, [xBP + xS*2 + RTHCPHYS_CB] ; pVMCBPhys (64 bits physical address; take low dword only)
981
982 ;/* Restore Guest's general purpose registers. */
983 ;/* EAX is loaded from the VMCB by VMRUN */
984 mov ebx, [xSI + CPUMCTX.ebx]
985 mov ecx, [xSI + CPUMCTX.ecx]
986 mov edx, [xSI + CPUMCTX.edx]
987 mov edi, [xSI + CPUMCTX.edi]
988 mov ebp, [xSI + CPUMCTX.ebp]
989 mov esi, [xSI + CPUMCTX.esi]
990
991 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch
992 clgi
993 sti
994
995 ; load guest fs, gs, sysenter msr etc
996 vmload
997 ; run the VM
998 vmrun
999
1000 ;/* EAX is in the VMCB already; we can use it here. */
1001
1002 ; save guest fs, gs, sysenter msr etc
1003 vmsave
1004
1005 ; load host fs, gs, sysenter msr etc
1006 pop xAX ; pushed above
1007 vmload
1008
1009 ; Set the global interrupt flag again, but execute cli to make sure IF=0.
1010 cli
1011 stgi
1012
1013 pop xAX ; pCtx
1014
1015 mov [ss:xAX + CPUMCTX.ebx], ebx
1016 mov [ss:xAX + CPUMCTX.ecx], ecx
1017 mov [ss:xAX + CPUMCTX.edx], edx
1018 mov [ss:xAX + CPUMCTX.esi], esi
1019 mov [ss:xAX + CPUMCTX.edi], edi
1020 mov [ss:xAX + CPUMCTX.ebp], ebp
1021
1022 ; Restore general purpose registers
1023 MYPOPAD
1024
1025 mov eax, VINF_SUCCESS
1026
1027 popf
1028 pop xBP
1029%ifdef RT_ARCH_AMD64
1030 add xSP, 4*xS
1031%endif
1032 ret
1033ENDPROC SVMVMRun
1034
1035%ifdef RT_ARCH_AMD64
1036;/**
1037; * Prepares for and executes VMRUN (64 bits guests)
1038; *
1039; * @returns VBox status code
1040; * @param HCPhysVMCB Physical address of host VMCB
1041; * @param HCPhysVMCB Physical address of guest VMCB
1042; * @param pCtx Guest context
1043; */
1044BEGINPROC SVMVMRun64
1045 ; fake a cdecl stack frame
1046 %ifdef ASM_CALL64_GCC
1047 push rdx
1048 push rsi
1049 push rdi
1050 %else
1051 push r8
1052 push rdx
1053 push rcx
1054 %endif
1055 push 0
1056 push rbp
1057 mov rbp, rsp
1058 pushf
1059
1060 ;/* Manual save and restore:
1061 ; * - General purpose registers except RIP, RSP, RAX
1062 ; *
1063 ; * Trashed:
1064 ; * - CR2 (we don't care)
1065 ; * - LDTR (reset to 0)
1066 ; * - DRx (presumably not changed at all)
1067 ; * - DR7 (reset to 0x400)
1068 ; */
1069
1070 ;/* Save all general purpose host registers. */
1071 MYPUSHAD
1072
1073 ;/* Save the Guest CPU context pointer. */
1074 mov rsi, [rbp + xS*2 + RTHCPHYS_CB*2] ; pCtx
1075 push rsi ; push for saving the state at the end
1076
1077 ; Restore CR2
1078 mov rbx, [rsi + CPUMCTX.cr2]
1079 mov cr2, rbx
1080
1081 ; save host fs, gs, sysenter msr etc
1082 mov rax, [rbp + xS*2] ; pVMCBHostPhys (64 bits physical address; x86: take low dword only)
1083 push rax ; save for the vmload after vmrun
1084 vmsave
1085
1086 ; setup eax for VMLOAD
1087 mov rax, [rbp + xS*2 + RTHCPHYS_CB] ; pVMCBPhys (64 bits physical address; take low dword only)
1088
1089 ;/* Restore Guest's general purpose registers. */
1090 ;/* RAX is loaded from the VMCB by VMRUN */
1091 mov rbx, qword [xSI + CPUMCTX.ebx]
1092 mov rcx, qword [xSI + CPUMCTX.ecx]
1093 mov rdx, qword [xSI + CPUMCTX.edx]
1094 mov rdi, qword [xSI + CPUMCTX.edi]
1095 mov rbp, qword [xSI + CPUMCTX.ebp]
1096 mov r8, qword [xSI + CPUMCTX.r8]
1097 mov r9, qword [xSI + CPUMCTX.r9]
1098 mov r10, qword [xSI + CPUMCTX.r10]
1099 mov r11, qword [xSI + CPUMCTX.r11]
1100 mov r12, qword [xSI + CPUMCTX.r12]
1101 mov r13, qword [xSI + CPUMCTX.r13]
1102 mov r14, qword [xSI + CPUMCTX.r14]
1103 mov r15, qword [xSI + CPUMCTX.r15]
1104 mov rsi, qword [xSI + CPUMCTX.esi]
1105
1106 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch
1107 clgi
1108 sti
1109
1110 ; load guest fs, gs, sysenter msr etc
1111 vmload
1112 ; run the VM
1113 vmrun
1114
1115 ;/* RAX is in the VMCB already; we can use it here. */
1116
1117 ; save guest fs, gs, sysenter msr etc
1118 vmsave
1119
1120 ; load host fs, gs, sysenter msr etc
1121 pop rax ; pushed above
1122 vmload
1123
1124 ; Set the global interrupt flag again, but execute cli to make sure IF=0.
1125 cli
1126 stgi
1127
1128 pop rax ; pCtx
1129
1130 mov qword [rax + CPUMCTX.ebx], rbx
1131 mov qword [rax + CPUMCTX.ecx], rcx
1132 mov qword [rax + CPUMCTX.edx], rdx
1133 mov qword [rax + CPUMCTX.esi], rsi
1134 mov qword [rax + CPUMCTX.edi], rdi
1135 mov qword [rax + CPUMCTX.ebp], rbp
1136 mov qword [rax + CPUMCTX.r8], r8
1137 mov qword [rax + CPUMCTX.r9], r9
1138 mov qword [rax + CPUMCTX.r10], r10
1139 mov qword [rax + CPUMCTX.r11], r11
1140 mov qword [rax + CPUMCTX.r12], r12
1141 mov qword [rax + CPUMCTX.r13], r13
1142 mov qword [rax + CPUMCTX.r14], r14
1143 mov qword [rax + CPUMCTX.r15], r15
1144
1145 ; Restore general purpose registers
1146 MYPOPAD
1147
1148 mov eax, VINF_SUCCESS
1149
1150 popf
1151 pop rbp
1152 add rsp, 4*xS
1153 ret
1154ENDPROC SVMVMRun64
1155%endif ; RT_ARCH_AMD64
1156
1157
1158%if GC_ARCH_BITS == 64
1159;;
1160; Executes INVLPGA
1161;
1162; @param pPageGC msc:rcx gcc:rdi x86:[esp+04] Virtual page to invalidate
1163; @param uASID msc:rdx gcc:rsi x86:[esp+0C] Tagged TLB id
1164;
1165;DECLASM(void) SVMInvlpgA(RTGCPTR pPageGC, uint32_t uASID);
1166BEGINPROC SVMInvlpgA
1167%ifdef RT_ARCH_AMD64
1168 %ifdef ASM_CALL64_GCC
1169 mov rax, rdi
1170 mov rcx, rsi
1171 %else
1172 ; from http://www.cs.cmu.edu/~fp/courses/15213-s06/misc/asm64-handout.pdf:
1173 ; ``Perhaps unexpectedly, instructions that move or generate 32-bit register
1174 ; values also set the upper 32 bits of the register to zero. Consequently
1175 ; there is no need for an instruction movzlq.''
1176 mov eax, ecx
1177 mov rcx, rdx
1178 %endif
1179%else
1180 mov eax, [esp + 4]
1181 mov ecx, [esp + 0Ch]
1182%endif
1183 invlpga [xAX], ecx
1184 ret
1185ENDPROC SVMInvlpgA
1186
1187%else
1188;;
1189; Executes INVLPGA
1190;
1191; @param pPageGC msc:ecx gcc:edi x86:[esp+04] Virtual page to invalidate
1192; @param uASID msc:edx gcc:esi x86:[esp+08] Tagged TLB id
1193;
1194;DECLASM(void) SVMInvlpgA(RTGCPTR pPageGC, uint32_t uASID);
1195BEGINPROC SVMInvlpgA
1196%ifdef RT_ARCH_AMD64
1197 %ifdef ASM_CALL64_GCC
1198 movzx rax, edi
1199 mov ecx, esi
1200 %else
1201 ; from http://www.cs.cmu.edu/~fp/courses/15213-s06/misc/asm64-handout.pdf:
1202 ; ``Perhaps unexpectedly, instructions that move or generate 32-bit register
1203 ; values also set the upper 32 bits of the register to zero. Consequently
1204 ; there is no need for an instruction movzlq.''
1205 mov eax, ecx
1206 mov ecx, edx
1207 %endif
1208%else
1209 mov eax, [esp + 4]
1210 mov ecx, [esp + 8]
1211%endif
1212 invlpga [xAX], ecx
1213 ret
1214ENDPROC SVMInvlpgA
1215
1216%endif ; GC_ARCH_BITS != 64
1217
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette