VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HWACCMR0A.asm@ 13077

Last change on this file since 13077 was 13036, checked in by vboxsync, 16 years ago

CSTAR was still restored in failure paths. (not supported on Intel CPUs)

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 27.7 KB
Line 
1; $Id: HWACCMR0A.asm 13036 2008-10-07 11:05:23Z vboxsync $
2;; @file
3; VMXM - R0 vmx helpers
4;
5
6;
7; Copyright (C) 2006-2007 Sun Microsystems, Inc.
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.virtualbox.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License (GPL) as published by the Free Software
13; Foundation, in version 2 as it comes in the "COPYING" file of the
14; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17; Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18; Clara, CA 95054 USA or visit http://www.sun.com if you need
19; additional information or have any questions.
20;
21
22;*******************************************************************************
23;* Header Files *
24;*******************************************************************************
25%include "VBox/asmdefs.mac"
26%include "VBox/err.mac"
27%include "VBox/hwacc_vmx.mac"
28%include "VBox/cpum.mac"
29%include "VBox/x86.mac"
30
31%ifdef RT_OS_OS2 ;; @todo fix OMF support in yasm and kick nasm out completely.
32 %macro vmwrite 2,
33 int3
34 %endmacro
35 %define vmlaunch int3
36 %define vmresume int3
37 %define vmsave int3
38 %define vmload int3
39 %define vmrun int3
40 %define clgi int3
41 %define stgi int3
42 %macro invlpga 2,
43 int3
44 %endmacro
45%endif
46
47;; This is too risky wrt. stability, performance and correctness.
48;%define VBOX_WITH_DR6_EXPERIMENT 1
49
50;; @def MYPUSHAD
51; Macro generating an equivalent to pushad
52
53;; @def MYPOPAD
54; Macro generating an equivalent to popad
55
56;; @def MYPUSHSEGS
57; Macro saving all segment registers on the stack.
58; @param 1 full width register name
59; @param 2 16-bit regsiter name for \a 1.
60
61;; @def MYPOPSEGS
62; Macro restoring all segment registers on the stack
63; @param 1 full width register name
64; @param 2 16-bit regsiter name for \a 1.
65
66%ifdef RT_ARCH_AMD64
67 ; Save a host and load the corresponding guest MSR (trashes rdx & rcx)
68 %macro LOADGUESTMSR 2
69 mov rcx, %1
70 rdmsr
71 push rdx
72 push rax
73 mov edx, dword [xSI + %2 + 4]
74 mov eax, dword [xSI + %2]
75 wrmsr
76 %endmacro
77
78 ; Save a guest and load the corresponding host MSR (trashes rdx & rcx)
79 ; Only really useful for gs kernel base as that one can be changed behind our back (swapgs)
80 %macro LOADHOSTMSREX 2
81 mov rcx, %1
82 rdmsr
83 mov dword [xSI + %2], eax
84 mov dword [xSI + %2 + 4], edx
85 pop rax
86 pop rdx
87 wrmsr
88 %endmacro
89
90 ; Load the corresponding host MSR (trashes rdx & rcx)
91 %macro LOADHOSTMSR 1
92 mov rcx, %1
93 pop rax
94 pop rdx
95 wrmsr
96 %endmacro
97
98 %ifdef ASM_CALL64_GCC
99 %macro MYPUSHAD 0
100 push r15
101 push r14
102 push r13
103 push r12
104 push rbx
105 %endmacro
106 %macro MYPOPAD 0
107 pop rbx
108 pop r12
109 pop r13
110 pop r14
111 pop r15
112 %endmacro
113
114 %else ; ASM_CALL64_MSC
115 %macro MYPUSHAD 0
116 push r15
117 push r14
118 push r13
119 push r12
120 push rbx
121 push rsi
122 push rdi
123 %endmacro
124 %macro MYPOPAD 0
125 pop rdi
126 pop rsi
127 pop rbx
128 pop r12
129 pop r13
130 pop r14
131 pop r15
132 %endmacro
133 %endif
134
135; trashes, rax, rdx & rcx
136 %macro MYPUSHSEGS 2
137 mov %2, es
138 push %1
139 mov %2, ds
140 push %1
141
142 ; Special case for FS; Windows and Linux either don't use it or restore it when leaving kernel mode, Solaris OTOH doesn't and we must save it.
143 mov ecx, MSR_K8_FS_BASE
144 rdmsr
145 push rdx
146 push rax
147 push fs
148
149 ; Special case for GS; OSes typically use swapgs to reset the hidden base register for GS on entry into the kernel. The same happens on exit
150 mov ecx, MSR_K8_GS_BASE
151 rdmsr
152 push rdx
153 push rax
154 push gs
155 %endmacro
156
157; trashes, rax, rdx & rcx
158 %macro MYPOPSEGS 2
159 ; Note: do not step through this code with a debugger!
160 pop gs
161 pop rax
162 pop rdx
163 mov ecx, MSR_K8_GS_BASE
164 wrmsr
165
166 pop fs
167 pop rax
168 pop rdx
169 mov ecx, MSR_K8_FS_BASE
170 wrmsr
171 ; Now it's safe to step again
172
173 pop %1
174 mov ds, %2
175 pop %1
176 mov es, %2
177 %endmacro
178
179%else ; RT_ARCH_X86
180 %macro MYPUSHAD 0
181 pushad
182 %endmacro
183 %macro MYPOPAD 0
184 popad
185 %endmacro
186
187 %macro MYPUSHSEGS 2
188 push ds
189 push es
190 push fs
191 push gs
192 %endmacro
193 %macro MYPOPSEGS 2
194 pop gs
195 pop fs
196 pop es
197 pop ds
198 %endmacro
199%endif
200
201
202BEGINCODE
203
204;/**
205; * Prepares for and executes VMLAUNCH/VMRESUME (32 bits guest mode)
206; *
207; * @returns VBox status code
208; * @param fResume vmlauch/vmresume
209; * @param pCtx Guest context
210; */
211BEGINPROC VMXR0StartVM32
212 push xBP
213 mov xBP, xSP
214
215 pushf
216 cli
217
218 ;/* First we have to save some final CPU context registers. */
219%ifdef RT_ARCH_AMD64
220 mov rax, qword .vmlaunch_done
221 push rax
222%else
223 push .vmlaunch_done
224%endif
225 mov eax, VMX_VMCS_HOST_RIP ;/* return address (too difficult to continue after VMLAUNCH?) */
226 vmwrite xAX, [xSP]
227 ;/* Note: assumes success... */
228 add xSP, xS
229
230 ;/* Manual save and restore:
231 ; * - General purpose registers except RIP, RSP
232 ; *
233 ; * Trashed:
234 ; * - CR2 (we don't care)
235 ; * - LDTR (reset to 0)
236 ; * - DRx (presumably not changed at all)
237 ; * - DR7 (reset to 0x400)
238 ; * - EFLAGS (reset to RT_BIT(1); not relevant)
239 ; *
240 ; */
241
242 ;/* Save all general purpose host registers. */
243 MYPUSHAD
244
245 ;/* Save the Guest CPU context pointer. */
246%ifdef RT_ARCH_AMD64
247 %ifdef ASM_CALL64_GCC
248 ; fResume already in rdi
249 ; pCtx already in rsi
250 %else
251 mov rdi, rcx ; fResume
252 mov rsi, rdx ; pCtx
253 %endif
254%else
255 mov edi, [ebp + 8] ; fResume
256 mov esi, [ebp + 12] ; pCtx
257%endif
258
259 ;/* Save segment registers */
260 ; Note: MYPUSHSEGS trashes rdx & rcx, so we moved it here (msvc amd64 case)
261 MYPUSHSEGS xAX, ax
262
263 ; Save the pCtx pointer
264 push xSI
265
266 ; Save LDTR
267 xor eax, eax
268 sldt ax
269 push xAX
270
271 ; VMX only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!
272 sub xSP, xS*2
273 sgdt [xSP]
274
275 sub xSP, xS*2
276 sidt [xSP]
277
278%ifdef VBOX_WITH_DR6_EXPERIMENT
279 ; Restore DR6 - experiment, not safe!
280 mov xBX, [xSI + CPUMCTX.dr6]
281 mov dr6, xBX
282%endif
283
284 ; Restore CR2
285 mov ebx, [xSI + CPUMCTX.cr2]
286 mov cr2, xBX
287
288 mov eax, VMX_VMCS_HOST_RSP
289 vmwrite xAX, xSP
290 ;/* Note: assumes success... */
291 ;/* Don't mess with ESP anymore!! */
292
293 ;/* Restore Guest's general purpose registers. */
294 mov eax, [xSI + CPUMCTX.eax]
295 mov ebx, [xSI + CPUMCTX.ebx]
296 mov ecx, [xSI + CPUMCTX.ecx]
297 mov edx, [xSI + CPUMCTX.edx]
298 mov ebp, [xSI + CPUMCTX.ebp]
299
300 ; resume or start?
301 cmp xDI, 0 ; fResume
302 je .vmlauch_lauch
303
304 ;/* Restore edi & esi. */
305 mov edi, [xSI + CPUMCTX.edi]
306 mov esi, [xSI + CPUMCTX.esi]
307
308 vmresume
309 jmp .vmlaunch_done; ;/* here if vmresume detected a failure. */
310
311.vmlauch_lauch:
312 ;/* Restore edi & esi. */
313 mov edi, [xSI + CPUMCTX.edi]
314 mov esi, [xSI + CPUMCTX.esi]
315
316 vmlaunch
317 jmp .vmlaunch_done; ;/* here if vmlaunch detected a failure. */
318
319ALIGNCODE(16)
320.vmlaunch_done:
321 jc near .vmxstart_invalid_vmxon_ptr
322 jz near .vmxstart_start_failed
323
324 ; Restore base and limit of the IDTR & GDTR
325 lidt [xSP]
326 add xSP, xS*2
327 lgdt [xSP]
328 add xSP, xS*2
329
330 push xDI
331 mov xDI, [xSP + xS * 2] ; pCtx
332
333 mov [ss:xDI + CPUMCTX.eax], eax
334 mov [ss:xDI + CPUMCTX.ebx], ebx
335 mov [ss:xDI + CPUMCTX.ecx], ecx
336 mov [ss:xDI + CPUMCTX.edx], edx
337 mov [ss:xDI + CPUMCTX.esi], esi
338 mov [ss:xDI + CPUMCTX.ebp], ebp
339%ifdef RT_ARCH_AMD64
340 pop xAX ; the guest edi we pushed above
341 mov dword [ss:xDI + CPUMCTX.edi], eax
342%else
343 pop dword [ss:xDI + CPUMCTX.edi] ; the guest edi we pushed above
344%endif
345
346%ifdef VBOX_WITH_DR6_EXPERIMENT
347 ; Save DR6 - experiment, not safe!
348 mov xAX, dr6
349 mov [ss:xDI + CPUMCTX.dr6], xAX
350%endif
351
352 pop xAX ; saved LDTR
353 lldt ax
354
355 add xSP, xS ; pCtx
356
357 ; Restore segment registers
358 MYPOPSEGS xAX, ax
359
360 ; Restore general purpose registers
361 MYPOPAD
362
363 mov eax, VINF_SUCCESS
364
365.vmstart_end:
366 popf
367 pop xBP
368 ret
369
370
371.vmxstart_invalid_vmxon_ptr:
372 ; Restore base and limit of the IDTR & GDTR
373 lidt [xSP]
374 add xSP, xS*2
375 lgdt [xSP]
376 add xSP, xS*2
377
378 pop xAX ; saved LDTR
379 lldt ax
380
381 add xSP, xS ; pCtx
382
383 ; Restore segment registers
384 MYPOPSEGS xAX, ax
385
386 ; Restore all general purpose host registers.
387 MYPOPAD
388 mov eax, VERR_VMX_INVALID_VMXON_PTR
389 jmp .vmstart_end
390
391.vmxstart_start_failed:
392 ; Restore base and limit of the IDTR & GDTR
393 lidt [xSP]
394 add xSP, xS*2
395 lgdt [xSP]
396 add xSP, xS*2
397
398 pop xAX ; saved LDTR
399 lldt ax
400
401 add xSP, xS ; pCtx
402
403 ; Restore segment registers
404 MYPOPSEGS xAX, ax
405
406 ; Restore all general purpose host registers.
407 MYPOPAD
408 mov eax, VERR_VMX_UNABLE_TO_START_VM
409 jmp .vmstart_end
410
411ENDPROC VMXR0StartVM32
412
413%ifdef RT_ARCH_AMD64
414;/**
415; * Prepares for and executes VMLAUNCH/VMRESUME (64 bits guest mode)
416; *
417; * @returns VBox status code
418; * @param fResume vmlauch/vmresume
419; * @param pCtx Guest context
420; */
421BEGINPROC VMXR0StartVM64
422 push xBP
423 mov xBP, xSP
424
425 pushf
426 cli
427
428 ;/* First we have to save some final CPU context registers. */
429 mov rax, qword .vmlaunch64_done
430 push rax
431 mov rax, VMX_VMCS_HOST_RIP ;/* return address (too difficult to continue after VMLAUNCH?) */
432 vmwrite rax, [xSP]
433 ;/* Note: assumes success... */
434 add xSP, xS
435
436 ;/* Manual save and restore:
437 ; * - General purpose registers except RIP, RSP
438 ; *
439 ; * Trashed:
440 ; * - CR2 (we don't care)
441 ; * - LDTR (reset to 0)
442 ; * - DRx (presumably not changed at all)
443 ; * - DR7 (reset to 0x400)
444 ; * - EFLAGS (reset to RT_BIT(1); not relevant)
445 ; *
446 ; */
447
448 ;/* Save all general purpose host registers. */
449 MYPUSHAD
450
451 ;/* Save the Guest CPU context pointer. */
452%ifdef ASM_CALL64_GCC
453 ; fResume already in rdi
454 ; pCtx already in rsi
455%else
456 mov rdi, rcx ; fResume
457 mov rsi, rdx ; pCtx
458%endif
459
460 ;/* Save segment registers */
461 ; Note: MYPUSHSEGS trashes rdx & rcx, so we moved it here (msvc amd64 case)
462 MYPUSHSEGS xAX, ax
463
464 ; Save the host LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs and restore the guest MSRs
465 ;; @todo use the automatic load feature for MSRs
466 LOADGUESTMSR MSR_K8_LSTAR, CPUMCTX.msrLSTAR
467%if 0 ; not supported on Intel CPUs
468 LOADGUESTMSR MSR_K8_CSTAR, CPUMCTX.msrCSTAR
469%endif
470 LOADGUESTMSR MSR_K6_STAR, CPUMCTX.msrSTAR
471 LOADGUESTMSR MSR_K8_SF_MASK, CPUMCTX.msrSFMASK
472 LOADGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
473
474 ; Save the pCtx pointer
475 push xSI
476
477 ; Save LDTR
478 xor eax, eax
479 sldt ax
480 push xAX
481
482 ; VMX only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!
483 sub xSP, xS*2
484 sgdt [xSP]
485
486 sub xSP, xS*2
487 sidt [xSP]
488
489%ifdef VBOX_WITH_DR6_EXPERIMENT
490 ; Restore DR6 - experiment, not safe!
491 mov xBX, [xSI + CPUMCTX.dr6]
492 mov dr6, xBX
493%endif
494
495 ; Restore CR2
496 mov rbx, qword [xSI + CPUMCTX.cr2]
497 mov cr2, rbx
498
499 mov eax, VMX_VMCS_HOST_RSP
500 vmwrite xAX, xSP
501 ;/* Note: assumes success... */
502 ;/* Don't mess with ESP anymore!! */
503
504 ;/* Restore Guest's general purpose registers. */
505 mov rax, qword [xSI + CPUMCTX.eax]
506 mov rbx, qword [xSI + CPUMCTX.ebx]
507 mov rcx, qword [xSI + CPUMCTX.ecx]
508 mov rdx, qword [xSI + CPUMCTX.edx]
509 mov rbp, qword [xSI + CPUMCTX.ebp]
510 mov r8, qword [xSI + CPUMCTX.r8]
511 mov r9, qword [xSI + CPUMCTX.r9]
512 mov r10, qword [xSI + CPUMCTX.r10]
513 mov r11, qword [xSI + CPUMCTX.r11]
514 mov r12, qword [xSI + CPUMCTX.r12]
515 mov r13, qword [xSI + CPUMCTX.r13]
516 mov r14, qword [xSI + CPUMCTX.r14]
517 mov r15, qword [xSI + CPUMCTX.r15]
518
519 ; resume or start?
520 cmp xDI, 0 ; fResume
521 je .vmlauch64_lauch
522
523 ;/* Restore edi & esi. */
524 mov rdi, qword [xSI + CPUMCTX.edi]
525 mov rsi, qword [xSI + CPUMCTX.esi]
526
527 vmresume
528 jmp .vmlaunch64_done; ;/* here if vmresume detected a failure. */
529
530.vmlauch64_lauch:
531 ;/* Restore rdi & rsi. */
532 mov rdi, qword [xSI + CPUMCTX.edi]
533 mov rsi, qword [xSI + CPUMCTX.esi]
534
535 vmlaunch
536 jmp .vmlaunch64_done; ;/* here if vmlaunch detected a failure. */
537
538ALIGNCODE(16)
539.vmlaunch64_done:
540 jc near .vmxstart64_invalid_vmxon_ptr
541 jz near .vmxstart64_start_failed
542
543 ; Restore base and limit of the IDTR & GDTR
544 lidt [xSP]
545 add xSP, xS*2
546 lgdt [xSP]
547 add xSP, xS*2
548
549 push xDI
550 mov xDI, [xSP + xS * 2] ; pCtx
551
552 mov qword [xDI + CPUMCTX.eax], rax
553 mov qword [xDI + CPUMCTX.ebx], rbx
554 mov qword [xDI + CPUMCTX.ecx], rcx
555 mov qword [xDI + CPUMCTX.edx], rdx
556 mov qword [xDI + CPUMCTX.esi], rsi
557 mov qword [xDI + CPUMCTX.ebp], rbp
558 mov qword [xDI + CPUMCTX.r8], r8
559 mov qword [xDI + CPUMCTX.r9], r9
560 mov qword [xDI + CPUMCTX.r10], r10
561 mov qword [xDI + CPUMCTX.r11], r11
562 mov qword [xDI + CPUMCTX.r12], r12
563 mov qword [xDI + CPUMCTX.r13], r13
564 mov qword [xDI + CPUMCTX.r14], r14
565 mov qword [xDI + CPUMCTX.r15], r15
566
567 pop xAX ; the guest edi we pushed above
568 mov qword [xDI + CPUMCTX.edi], rax
569
570%ifdef VBOX_WITH_DR6_EXPERIMENT
571 ; Save DR6 - experiment, not safe!
572 mov xAX, dr6
573 mov [xDI + CPUMCTX.dr6], xAX
574%endif
575
576 pop xAX ; saved LDTR
577 lldt ax
578
579 pop xSI ; pCtx (needed in rsi by the macros below)
580
581 ; Restore the host LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs
582 ;; @todo use the automatic load feature for MSRs
583 LOADHOSTMSREX MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
584 LOADHOSTMSR MSR_K8_SF_MASK
585 LOADHOSTMSR MSR_K6_STAR
586%if 0 ; not supported on Intel CPUs
587 LOADHOSTMSR MSR_K8_CSTAR
588%endif
589 LOADHOSTMSR MSR_K8_LSTAR
590
591 ; Restore segment registers
592 MYPOPSEGS xAX, ax
593
594 ; Restore general purpose registers
595 MYPOPAD
596
597 mov eax, VINF_SUCCESS
598
599.vmstart64_end:
600 popf
601 pop xBP
602 ret
603
604
605.vmxstart64_invalid_vmxon_ptr:
606 ; Restore base and limit of the IDTR & GDTR
607 lidt [xSP]
608 add xSP, xS*2
609 lgdt [xSP]
610 add xSP, xS*2
611
612 pop xAX ; saved LDTR
613 lldt ax
614
615 pop xSI ; pCtx (needed in rsi by the macros below)
616
617 ; Restore the host LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs
618 ;; @todo use the automatic load feature for MSRs
619 LOADHOSTMSREX MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
620 LOADHOSTMSR MSR_K8_SF_MASK
621%if 0 ; not supported on Intel CPUs
622 LOADHOSTMSR MSR_K8_CSTAR
623%endif
624 LOADHOSTMSR MSR_K8_LSTAR
625
626 ; Restore segment registers
627 MYPOPSEGS xAX, ax
628
629 ; Restore all general purpose host registers.
630 MYPOPAD
631 mov eax, VERR_VMX_INVALID_VMXON_PTR
632 jmp .vmstart64_end
633
634.vmxstart64_start_failed:
635 ; Restore base and limit of the IDTR & GDTR
636 lidt [xSP]
637 add xSP, xS*2
638 lgdt [xSP]
639 add xSP, xS*2
640
641 pop xAX ; saved LDTR
642 lldt ax
643
644 pop xSI ; pCtx (needed in rsi by the macros below)
645
646 ; Restore the host LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs
647 ;; @todo use the automatic load feature for MSRs
648 LOADHOSTMSREX MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
649 LOADHOSTMSR MSR_K8_SF_MASK
650%if 0 ; not supported on Intel CPUs
651 LOADHOSTMSR MSR_K8_CSTAR
652%endif
653 LOADHOSTMSR MSR_K8_LSTAR
654
655 ; Restore segment registers
656 MYPOPSEGS xAX, ax
657
658 ; Restore all general purpose host registers.
659 MYPOPAD
660 mov eax, VERR_VMX_UNABLE_TO_START_VM
661 jmp .vmstart64_end
662ENDPROC VMXR0StartVM64
663
664;/**
665; * Executes VMWRITE
666; *
667; * @returns VBox status code
668; * @param idxField x86: [ebp + 08h] msc: rcx gcc: rdi VMCS index
669; * @param pData x86: [ebp + 0ch] msc: rdx gcc: rsi VM field value
670; */
671BEGINPROC VMXWriteVMCS64
672%ifdef ASM_CALL64_GCC
673 mov eax, 0ffffffffh
674 and rdi, rax
675 xor rax, rax
676 vmwrite rdi, rsi
677%else
678 mov eax, 0ffffffffh
679 and rcx, rax
680 xor rax, rax
681 vmwrite rcx, rdx
682%endif
683 jnc .valid_vmcs
684 mov eax, VERR_VMX_INVALID_VMCS_PTR
685 ret
686.valid_vmcs:
687 jnz .the_end
688 mov eax, VERR_VMX_INVALID_VMCS_FIELD
689.the_end:
690 ret
691ENDPROC VMXWriteVMCS64
692
693;/**
694; * Executes VMREAD
695; *
696; * @returns VBox status code
697; * @param idxField VMCS index
698; * @param pData Ptr to store VM field value
699; */
700;DECLASM(int) VMXReadVMCS64(uint32_t idxField, uint64_t *pData);
701BEGINPROC VMXReadVMCS64
702%ifdef ASM_CALL64_GCC
703 mov eax, 0ffffffffh
704 and rdi, rax
705 xor rax, rax
706 vmread [rsi], rdi
707%else
708 mov eax, 0ffffffffh
709 and rcx, rax
710 xor rax, rax
711 vmread [rdx], rcx
712%endif
713 jnc .valid_vmcs
714 mov eax, VERR_VMX_INVALID_VMCS_PTR
715 ret
716.valid_vmcs:
717 jnz .the_end
718 mov eax, VERR_VMX_INVALID_VMCS_FIELD
719.the_end:
720 ret
721ENDPROC VMXReadVMCS64
722
723
724;/**
725; * Executes VMXON
726; *
727; * @returns VBox status code
728; * @param HCPhysVMXOn Physical address of VMXON structure
729; */
730;DECLASM(int) VMXEnable(RTHCPHYS HCPhysVMXOn);
731BEGINPROC VMXEnable
732%ifdef RT_ARCH_AMD64
733 xor rax, rax
734 %ifdef ASM_CALL64_GCC
735 push rdi
736 %else
737 push rcx
738 %endif
739 vmxon [rsp]
740%else
741 xor eax, eax
742 vmxon [esp + 4]
743%endif
744 jnc .good
745 mov eax, VERR_VMX_INVALID_VMXON_PTR
746 jmp .the_end
747
748.good:
749 jnz .the_end
750 mov eax, VERR_VMX_GENERIC
751
752.the_end:
753%ifdef RT_ARCH_AMD64
754 add rsp, 8
755%endif
756 ret
757ENDPROC VMXEnable
758
759;/**
760; * Executes VMXOFF
761; */
762;DECLASM(void) VMXDisable(void);
763BEGINPROC VMXDisable
764 vmxoff
765 ret
766ENDPROC VMXDisable
767
768
769;/**
770; * Executes VMCLEAR
771; *
772; * @returns VBox status code
773; * @param HCPhysVMCS Physical address of VM control structure
774; */
775;DECLASM(int) VMXClearVMCS(RTHCPHYS HCPhysVMCS);
776BEGINPROC VMXClearVMCS
777%ifdef RT_ARCH_AMD64
778 xor rax, rax
779 %ifdef ASM_CALL64_GCC
780 push rdi
781 %else
782 push rcx
783 %endif
784 vmclear [rsp]
785%else
786 xor eax, eax
787 vmclear [esp + 4]
788%endif
789 jnc .the_end
790 mov eax, VERR_VMX_INVALID_VMCS_PTR
791.the_end:
792%ifdef RT_ARCH_AMD64
793 add rsp, 8
794%endif
795 ret
796ENDPROC VMXClearVMCS
797
798
799;/**
800; * Executes VMPTRLD
801; *
802; * @returns VBox status code
803; * @param HCPhysVMCS Physical address of VMCS structure
804; */
805;DECLASM(int) VMXActivateVMCS(RTHCPHYS HCPhysVMCS);
806BEGINPROC VMXActivateVMCS
807%ifdef RT_ARCH_AMD64
808 xor rax, rax
809 %ifdef ASM_CALL64_GCC
810 push rdi
811 %else
812 push rcx
813 %endif
814 vmptrld [rsp]
815%else
816 xor eax, eax
817 vmptrld [esp + 4]
818%endif
819 jnc .the_end
820 mov eax, VERR_VMX_INVALID_VMCS_PTR
821.the_end:
822%ifdef RT_ARCH_AMD64
823 add rsp, 8
824%endif
825 ret
826ENDPROC VMXActivateVMCS
827
828%endif ; RT_ARCH_AMD64
829
830;/**
831; * Executes VMPTRST
832; *
833; * @returns VBox status code
834; * @param [esp + 04h] gcc:rdi msc:rcx Param 1 - First parameter - Address that will receive the current pointer
835; */
836;DECLASM(int) VMXGetActivateVMCS(RTHCPHYS *pVMCS);
837BEGINPROC VMXGetActivateVMCS
838%ifdef RT_ARCH_AMD64
839 %ifdef ASM_CALL64_GCC
840 vmptrst qword [rdi]
841 %else
842 vmptrst qword [rcx]
843 %endif
844%else
845 vmptrst qword [esp+04h]
846%endif
847 xor eax, eax
848 ret
849ENDPROC VMXGetActivateVMCS
850
851
852;/**
853; * Prepares for and executes VMRUN (32 bits guests)
854; *
855; * @returns VBox status code
856; * @param HCPhysVMCB Physical address of host VMCB
857; * @param HCPhysVMCB Physical address of guest VMCB
858; * @param pCtx Guest context
859; */
860BEGINPROC SVMVMRun
861%ifdef RT_ARCH_AMD64 ; fake a cdecl stack frame
862 %ifdef ASM_CALL64_GCC
863 push rdx
864 push rsi
865 push rdi
866 %else
867 push r8
868 push rdx
869 push rcx
870 %endif
871 push 0
872%endif
873 push xBP
874 mov xBP, xSP
875 pushf
876
877 ;/* Manual save and restore:
878 ; * - General purpose registers except RIP, RSP, RAX
879 ; *
880 ; * Trashed:
881 ; * - CR2 (we don't care)
882 ; * - LDTR (reset to 0)
883 ; * - DRx (presumably not changed at all)
884 ; * - DR7 (reset to 0x400)
885 ; */
886
887 ;/* Save all general purpose host registers. */
888 MYPUSHAD
889
890 ;/* Save the Guest CPU context pointer. */
891 mov xSI, [xBP + xS*2 + RTHCPHYS_CB*2] ; pCtx
892 push xSI ; push for saving the state at the end
893
894 ; Restore CR2
895 mov ebx, [xSI + CPUMCTX.cr2]
896 mov cr2, xBX
897
898 ; save host fs, gs, sysenter msr etc
899 mov xAX, [xBP + xS*2] ; pVMCBHostPhys (64 bits physical address; x86: take low dword only)
900 push xAX ; save for the vmload after vmrun
901 vmsave
902
903 ; setup eax for VMLOAD
904 mov xAX, [xBP + xS*2 + RTHCPHYS_CB] ; pVMCBPhys (64 bits physical address; take low dword only)
905
906 ;/* Restore Guest's general purpose registers. */
907 ;/* EAX is loaded from the VMCB by VMRUN */
908 mov ebx, [xSI + CPUMCTX.ebx]
909 mov ecx, [xSI + CPUMCTX.ecx]
910 mov edx, [xSI + CPUMCTX.edx]
911 mov edi, [xSI + CPUMCTX.edi]
912 mov ebp, [xSI + CPUMCTX.ebp]
913 mov esi, [xSI + CPUMCTX.esi]
914
915 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch
916 clgi
917 sti
918
919 ; load guest fs, gs, sysenter msr etc
920 vmload
921 ; run the VM
922 vmrun
923
924 ;/* EAX is in the VMCB already; we can use it here. */
925
926 ; save guest fs, gs, sysenter msr etc
927 vmsave
928
929 ; load host fs, gs, sysenter msr etc
930 pop xAX ; pushed above
931 vmload
932
933 ; Set the global interrupt flag again, but execute cli to make sure IF=0.
934 cli
935 stgi
936
937 pop xAX ; pCtx
938
939 mov [ss:xAX + CPUMCTX.ebx], ebx
940 mov [ss:xAX + CPUMCTX.ecx], ecx
941 mov [ss:xAX + CPUMCTX.edx], edx
942 mov [ss:xAX + CPUMCTX.esi], esi
943 mov [ss:xAX + CPUMCTX.edi], edi
944 mov [ss:xAX + CPUMCTX.ebp], ebp
945
946 ; Restore general purpose registers
947 MYPOPAD
948
949 mov eax, VINF_SUCCESS
950
951 popf
952 pop xBP
953%ifdef RT_ARCH_AMD64
954 add xSP, 4*xS
955%endif
956 ret
957ENDPROC SVMVMRun
958
959%ifdef RT_ARCH_AMD64
960;/**
961; * Prepares for and executes VMRUN (64 bits guests)
962; *
963; * @returns VBox status code
964; * @param HCPhysVMCB Physical address of host VMCB
965; * @param HCPhysVMCB Physical address of guest VMCB
966; * @param pCtx Guest context
967; */
968BEGINPROC SVMVMRun64
969 ; fake a cdecl stack frame
970 %ifdef ASM_CALL64_GCC
971 push rdx
972 push rsi
973 push rdi
974 %else
975 push r8
976 push rdx
977 push rcx
978 %endif
979 push 0
980 push rbp
981 mov rbp, rsp
982 pushf
983
984 ;/* Manual save and restore:
985 ; * - General purpose registers except RIP, RSP, RAX
986 ; *
987 ; * Trashed:
988 ; * - CR2 (we don't care)
989 ; * - LDTR (reset to 0)
990 ; * - DRx (presumably not changed at all)
991 ; * - DR7 (reset to 0x400)
992 ; */
993
994 ;/* Save all general purpose host registers. */
995 MYPUSHAD
996
997 ;/* Save the Guest CPU context pointer. */
998 mov rsi, [rbp + xS*2 + RTHCPHYS_CB*2] ; pCtx
999 push rsi ; push for saving the state at the end
1000
1001 ; Restore CR2
1002 mov rbx, [rsi + CPUMCTX.cr2]
1003 mov cr2, rbx
1004
1005 ; save host fs, gs, sysenter msr etc
1006 mov rax, [rbp + xS*2] ; pVMCBHostPhys (64 bits physical address; x86: take low dword only)
1007 push rax ; save for the vmload after vmrun
1008 vmsave
1009
1010 ; setup eax for VMLOAD
1011 mov rax, [rbp + xS*2 + RTHCPHYS_CB] ; pVMCBPhys (64 bits physical address; take low dword only)
1012
1013 ;/* Restore Guest's general purpose registers. */
1014 ;/* RAX is loaded from the VMCB by VMRUN */
1015 mov rbx, qword [xSI + CPUMCTX.ebx]
1016 mov rcx, qword [xSI + CPUMCTX.ecx]
1017 mov rdx, qword [xSI + CPUMCTX.edx]
1018 mov rdi, qword [xSI + CPUMCTX.edi]
1019 mov rbp, qword [xSI + CPUMCTX.ebp]
1020 mov r8, qword [xSI + CPUMCTX.r8]
1021 mov r9, qword [xSI + CPUMCTX.r9]
1022 mov r10, qword [xSI + CPUMCTX.r10]
1023 mov r11, qword [xSI + CPUMCTX.r11]
1024 mov r12, qword [xSI + CPUMCTX.r12]
1025 mov r13, qword [xSI + CPUMCTX.r13]
1026 mov r14, qword [xSI + CPUMCTX.r14]
1027 mov r15, qword [xSI + CPUMCTX.r15]
1028 mov rsi, qword [xSI + CPUMCTX.esi]
1029
1030 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch
1031 clgi
1032 sti
1033
1034 ; load guest fs, gs, sysenter msr etc
1035 vmload
1036 ; run the VM
1037 vmrun
1038
1039 ;/* RAX is in the VMCB already; we can use it here. */
1040
1041 ; save guest fs, gs, sysenter msr etc
1042 vmsave
1043
1044 ; load host fs, gs, sysenter msr etc
1045 pop rax ; pushed above
1046 vmload
1047
1048 ; Set the global interrupt flag again, but execute cli to make sure IF=0.
1049 cli
1050 stgi
1051
1052 pop rax ; pCtx
1053
1054 mov qword [rax + CPUMCTX.ebx], rbx
1055 mov qword [rax + CPUMCTX.ecx], rcx
1056 mov qword [rax + CPUMCTX.edx], rdx
1057 mov qword [rax + CPUMCTX.esi], rsi
1058 mov qword [rax + CPUMCTX.edi], rdi
1059 mov qword [rax + CPUMCTX.ebp], rbp
1060 mov qword [rax + CPUMCTX.r8], r8
1061 mov qword [rax + CPUMCTX.r9], r9
1062 mov qword [rax + CPUMCTX.r10], r10
1063 mov qword [rax + CPUMCTX.r11], r11
1064 mov qword [rax + CPUMCTX.r12], r12
1065 mov qword [rax + CPUMCTX.r13], r13
1066 mov qword [rax + CPUMCTX.r14], r14
1067 mov qword [rax + CPUMCTX.r15], r15
1068
1069 ; Restore general purpose registers
1070 MYPOPAD
1071
1072 mov eax, VINF_SUCCESS
1073
1074 popf
1075 pop rbp
1076 add rsp, 4*xS
1077 ret
1078ENDPROC SVMVMRun64
1079%endif ; RT_ARCH_AMD64
1080
1081
1082%if GC_ARCH_BITS == 64
1083;;
1084; Executes INVLPGA
1085;
1086; @param pPageGC msc:rcx gcc:rdi x86:[esp+04] Virtual page to invalidate
1087; @param uASID msc:rdx gcc:rsi x86:[esp+0C] Tagged TLB id
1088;
1089;DECLASM(void) SVMInvlpgA(RTGCPTR pPageGC, uint32_t uASID);
1090BEGINPROC SVMInvlpgA
1091%ifdef RT_ARCH_AMD64
1092 %ifdef ASM_CALL64_GCC
1093 mov rax, rdi
1094 mov rcx, rsi
1095 %else
1096 ; from http://www.cs.cmu.edu/~fp/courses/15213-s06/misc/asm64-handout.pdf:
1097 ; ``Perhaps unexpectedly, instructions that move or generate 32-bit register
1098 ; values also set the upper 32 bits of the register to zero. Consequently
1099 ; there is no need for an instruction movzlq.''
1100 mov eax, ecx
1101 mov rcx, rdx
1102 %endif
1103%else
1104 mov eax, [esp + 4]
1105 mov ecx, [esp + 0Ch]
1106%endif
1107 invlpga [xAX], ecx
1108 ret
1109ENDPROC SVMInvlpgA
1110
1111%else
1112;;
1113; Executes INVLPGA
1114;
1115; @param pPageGC msc:ecx gcc:edi x86:[esp+04] Virtual page to invalidate
1116; @param uASID msc:edx gcc:esi x86:[esp+08] Tagged TLB id
1117;
1118;DECLASM(void) SVMInvlpgA(RTGCPTR pPageGC, uint32_t uASID);
1119BEGINPROC SVMInvlpgA
1120%ifdef RT_ARCH_AMD64
1121 %ifdef ASM_CALL64_GCC
1122 movzx rax, edi
1123 mov ecx, esi
1124 %else
1125 ; from http://www.cs.cmu.edu/~fp/courses/15213-s06/misc/asm64-handout.pdf:
1126 ; ``Perhaps unexpectedly, instructions that move or generate 32-bit register
1127 ; values also set the upper 32 bits of the register to zero. Consequently
1128 ; there is no need for an instruction movzlq.''
1129 mov eax, ecx
1130 mov ecx, edx
1131 %endif
1132%else
1133 mov eax, [esp + 4]
1134 mov ecx, [esp + 8]
1135%endif
1136 invlpga [xAX], ecx
1137 ret
1138ENDPROC SVMInvlpgA
1139
1140%endif ; GC_ARCH_BITS != 64
1141
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette