VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HWACCMR0A.asm@ 9457

Last change on this file since 9457 was 9457, checked in by vboxsync, 17 years ago

Reapplied fixed 31707.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 15.5 KB
Line 
1; $Id: HWACCMR0A.asm 9457 2008-06-06 09:46:39Z vboxsync $
2;; @file
3; VMXM - R0 vmx helpers
4;
5
6;
7; Copyright (C) 2006-2007 Sun Microsystems, Inc.
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.virtualbox.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License (GPL) as published by the Free Software
13; Foundation, in version 2 as it comes in the "COPYING" file of the
14; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17; Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18; Clara, CA 95054 USA or visit http://www.sun.com if you need
19; additional information or have any questions.
20;
21
22;*******************************************************************************
23;* Header Files *
24;*******************************************************************************
25%include "VBox/asmdefs.mac"
26%include "VBox/err.mac"
27%include "VBox/hwacc_vmx.mac"
28%include "VBox/cpum.mac"
29%include "VBox/x86.mac"
30
31%ifdef RT_OS_OS2 ;; @todo fix OMF support in yasm and kick nasm out completely.
32 %macro vmwrite 2,
33 int3
34 %endmacro
35 %define vmlaunch int3
36 %define vmresume int3
37 %define vmsave int3
38 %define vmload int3
39 %define vmrun int3
40 %define clgi int3
41 %define stgi int3
42 %macro invlpga 2,
43 int3
44 %endmacro
45%endif
46
47;; This is too risky wrt. stability, performance and correctness.
48;%define VBOX_WITH_DR6_EXPERIMENT 1
49
50;; @def MYPUSHAD
51; Macro generating an equivalent to pushad
52
53;; @def MYPOPAD
54; Macro generating an equivalent to popad
55
56;; @def MYPUSHSEGS
57; Macro saving all segment registers on the stack.
58; @param 1 full width register name
59; @param 2 16-bit regsiter name for \a 1.
60
61;; @def MYPOPSEGS
62; Macro restoring all segment registers on the stack
63; @param 1 full width register name
64; @param 2 16-bit regsiter name for \a 1.
65
66%ifdef RT_ARCH_AMD64
67 %ifdef ASM_CALL64_GCC
68 %macro MYPUSHAD 0
69 push r15
70 push r14
71 push r13
72 push r12
73 push rbx
74 %endmacro
75 %macro MYPOPAD 0
76 pop rbx
77 pop r12
78 pop r13
79 pop r14
80 pop r15
81 %endmacro
82
83 %else ; ASM_CALL64_MSC
84 %macro MYPUSHAD 0
85 push r15
86 push r14
87 push r13
88 push r12
89 push rbx
90 push rsi
91 push rdi
92 %endmacro
93 %macro MYPOPAD 0
94 pop rdi
95 pop rsi
96 pop rbx
97 pop r12
98 pop r13
99 pop r14
100 pop r15
101 %endmacro
102 %endif
103
104 %macro MYPUSHSEGS 2
105 mov %2, es
106 push %1
107 mov %2, ds
108 push %1
109
110 ; Special case for FS; Windows and Linux either don't use it or restore it when leaving kernel mode, Solaris OTOH doesn't and we must save it.
111 push rcx
112 mov ecx, MSR_K8_FS_BASE
113 rdmsr
114 pop rcx
115 push rdx
116 push rax
117 push fs
118
119 ; Special case for GS; OSes typically use swapgs to reset the hidden base register for GS on entry into the kernel. The same happens on exit
120 push rcx
121 mov ecx, MSR_K8_GS_BASE
122 rdmsr
123 pop rcx
124 push rdx
125 push rax
126 push gs
127 %endmacro
128
129 %macro MYPOPSEGS 2
130 ; Note: do not step through this code with a debugger!
131 pop gs
132 pop rax
133 pop rdx
134 push rcx
135 mov ecx, MSR_K8_GS_BASE
136 wrmsr
137 pop rcx
138
139 pop fs
140 pop rax
141 pop rdx
142 push rcx
143 mov ecx, MSR_K8_FS_BASE
144 wrmsr
145 pop rcx
146 ; Now it's safe to step again
147
148 pop %1
149 mov ds, %2
150 pop %1
151 mov es, %2
152 %endmacro
153
154%else ; RT_ARCH_X86
155 %macro MYPUSHAD 0
156 pushad
157 %endmacro
158 %macro MYPOPAD 0
159 popad
160 %endmacro
161
162 %macro MYPUSHSEGS 2
163 push ds
164 push es
165 push fs
166 push gs
167 %endmacro
168 %macro MYPOPSEGS 2
169 pop gs
170 pop fs
171 pop es
172 pop ds
173 %endmacro
174%endif
175
176
177BEGINCODE
178
179;/**
180; * Prepares for and executes VMLAUNCH/VMRESUME (32 bits guest mode)
181; *
182; * @returns VBox status code
183; * @param fResume vmlauch/vmresume
184; * @param pCtx Guest context
185; */
186BEGINPROC VMXR0StartVM32
187 push xBP
188 mov xBP, xSP
189
190 pushf
191 cli
192
193 ;/* First we have to save some final CPU context registers. */
194%ifdef RT_ARCH_AMD64
195 mov rax, qword .vmlaunch_done
196 push rax
197%else
198 push .vmlaunch_done
199%endif
200 mov eax, VMX_VMCS_HOST_RIP ;/* return address (too difficult to continue after VMLAUNCH?) */
201 vmwrite xAX, [xSP]
202 ;/* Note: assumes success... */
203 add xSP, xS
204
205 ;/* Manual save and restore:
206 ; * - General purpose registers except RIP, RSP
207 ; *
208 ; * Trashed:
209 ; * - CR2 (we don't care)
210 ; * - LDTR (reset to 0)
211 ; * - DRx (presumably not changed at all)
212 ; * - DR7 (reset to 0x400)
213 ; * - EFLAGS (reset to RT_BIT(1); not relevant)
214 ; *
215 ; */
216
217 ;/* Save all general purpose host registers. */
218 MYPUSHAD
219
220 ;/* Save the Guest CPU context pointer. */
221%ifdef RT_ARCH_AMD64
222 %ifdef ASM_CALL64_GCC
223 ; fResume already in rdi
224 ; pCtx already in rsi
225 %else
226 mov rdi, rcx ; fResume
227 mov rsi, rdx ; pCtx
228 %endif
229%else
230 mov edi, [ebp + 8] ; fResume
231 mov esi, [ebp + 12] ; pCtx
232%endif
233
234 ;/* Save segment registers */
235 ; Note: MYPUSHSEGS trashes rdx (among others), so we moved it here (msvc amd64 case)
236 MYPUSHSEGS xAX, ax
237
238 ; Save the pCtx pointer
239 push xSI
240
241 ; Save LDTR
242 xor eax, eax
243 sldt ax
244 push xAX
245
246 ; VMX only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!
247 sub xSP, xS*2
248 sgdt [xSP]
249
250 sub xSP, xS*2
251 sidt [xSP]
252
253%ifdef VBOX_WITH_DR6_EXPERIMENT
254 ; Restore DR6 - experiment, not safe!
255 mov xBX, [xSI + CPUMCTX.dr6]
256 mov dr6, xBX
257%endif
258
259 ; Restore CR2
260 mov ebx, [xSI + CPUMCTX.cr2]
261 mov cr2, xBX
262
263 mov eax, VMX_VMCS_HOST_RSP
264 vmwrite xAX, xSP
265 ;/* Note: assumes success... */
266 ;/* Don't mess with ESP anymore!! */
267
268 ;/* Restore Guest's general purpose registers. */
269 mov eax, [xSI + CPUMCTX.eax]
270 mov ebx, [xSI + CPUMCTX.ebx]
271 mov ecx, [xSI + CPUMCTX.ecx]
272 mov edx, [xSI + CPUMCTX.edx]
273 mov ebp, [xSI + CPUMCTX.ebp]
274
275 ; resume or start?
276 cmp xDI, 0 ; fResume
277 je .vmlauch_lauch
278
279 ;/* Restore edi & esi. */
280 mov edi, [xSI + CPUMCTX.edi]
281 mov esi, [xSI + CPUMCTX.esi]
282
283 vmresume
284 jmp .vmlaunch_done; ;/* here if vmresume detected a failure. */
285
286.vmlauch_lauch:
287 ;/* Restore edi & esi. */
288 mov edi, [xSI + CPUMCTX.edi]
289 mov esi, [xSI + CPUMCTX.esi]
290
291 vmlaunch
292 jmp .vmlaunch_done; ;/* here if vmlaunch detected a failure. */
293
294ALIGNCODE(16)
295.vmlaunch_done:
296 jc near .vmxstart_invalid_vmxon_ptr
297 jz near .vmxstart_start_failed
298
299 ; Restore base and limit of the IDTR & GDTR
300 lidt [xSP]
301 add xSP, xS*2
302 lgdt [xSP]
303 add xSP, xS*2
304
305 push xDI
306 mov xDI, [xSP + xS * 2] ; pCtx
307
308 mov [ss:xDI + CPUMCTX.eax], eax
309 mov [ss:xDI + CPUMCTX.ebx], ebx
310 mov [ss:xDI + CPUMCTX.ecx], ecx
311 mov [ss:xDI + CPUMCTX.edx], edx
312 mov [ss:xDI + CPUMCTX.esi], esi
313 mov [ss:xDI + CPUMCTX.ebp], ebp
314%ifdef RT_ARCH_AMD64
315 pop xAX ; the guest edi we pushed above
316 mov dword [ss:xDI + CPUMCTX.edi], eax
317%else
318 pop dword [ss:xDI + CPUMCTX.edi] ; the guest edi we pushed above
319%endif
320
321%ifdef VBOX_WITH_DR6_EXPERIMENT
322 ; Save DR6 - experiment, not safe!
323 mov xAX, dr6
324 mov [ss:xDI + CPUMCTX.dr6], xAX
325%endif
326
327 pop xAX ; saved LDTR
328 lldt ax
329
330 add xSP, xS ; pCtx
331
332 ; Restore segment registers
333 MYPOPSEGS xAX, ax
334
335 ; Restore general purpose registers
336 MYPOPAD
337
338 mov eax, VINF_SUCCESS
339
340.vmstart_end:
341 popf
342 pop xBP
343 ret
344
345
346.vmxstart_invalid_vmxon_ptr:
347 ; Restore base and limit of the IDTR & GDTR
348 lidt [xSP]
349 add xSP, xS*2
350 lgdt [xSP]
351 add xSP, xS*2
352
353 pop xAX ; saved LDTR
354 lldt ax
355
356 add xSP, xS ; pCtx
357
358 ; Restore segment registers
359 MYPOPSEGS xAX, ax
360
361 ; Restore all general purpose host registers.
362 MYPOPAD
363 mov eax, VERR_VMX_INVALID_VMXON_PTR
364 jmp .vmstart_end
365
366.vmxstart_start_failed:
367 ; Restore base and limit of the IDTR & GDTR
368 lidt [xSP]
369 add xSP, xS*2
370 lgdt [xSP]
371 add xSP, xS*2
372
373 pop xAX ; saved LDTR
374 lldt ax
375
376 add xSP, xS ; pCtx
377
378 ; Restore segment registers
379 MYPOPSEGS xAX, ax
380
381 ; Restore all general purpose host registers.
382 MYPOPAD
383 mov eax, VERR_VMX_UNABLE_TO_START_VM
384 jmp .vmstart_end
385
386ENDPROC VMXR0StartVM32
387
388%ifdef RT_ARCH_AMD64
389;/**
390; * Prepares for and executes VMLAUNCH/VMRESUME (32 bits guest mode)
391; *
392; * @returns VBox status code
393; * @param fResume vmlauch/vmresume
394; * @param pCtx Guest context
395; */
396BEGINPROC VMXR0StartVM64
397 ret
398ENDPROC VMXR0StartVM64
399
400;/**
401; * Executes VMWRITE
402; *
403; * @returns VBox status code
404; * @param idxField x86: [ebp + 08h] msc: rcx gcc: rdi VMCS index
405; * @param pData x86: [ebp + 0ch] msc: rdx gcc: rsi VM field value
406; */
407BEGINPROC VMXWriteVMCS64
408%ifdef ASM_CALL64_GCC
409 mov eax, 0ffffffffh
410 and rdi, rax
411 xor rax, rax
412 vmwrite rdi, rsi
413%else
414 mov eax, 0ffffffffh
415 and rcx, rax
416 xor rax, rax
417 vmwrite rcx, rdx
418%endif
419 jnc .valid_vmcs
420 mov eax, VERR_VMX_INVALID_VMCS_PTR
421 ret
422.valid_vmcs:
423 jnz .the_end
424 mov eax, VERR_VMX_INVALID_VMCS_FIELD
425.the_end:
426 ret
427ENDPROC VMXWriteVMCS64
428
429;/**
430; * Executes VMREAD
431; *
432; * @returns VBox status code
433; * @param idxField VMCS index
434; * @param pData Ptr to store VM field value
435; */
436;DECLASM(int) VMXReadVMCS64(uint32_t idxField, uint64_t *pData);
437BEGINPROC VMXReadVMCS64
438%ifdef ASM_CALL64_GCC
439 mov eax, 0ffffffffh
440 and rdi, rax
441 xor rax, rax
442 vmread [rsi], rdi
443%else
444 mov eax, 0ffffffffh
445 and rcx, rax
446 xor rax, rax
447 vmread [rdx], rcx
448%endif
449 jnc .valid_vmcs
450 mov eax, VERR_VMX_INVALID_VMCS_PTR
451 ret
452.valid_vmcs:
453 jnz .the_end
454 mov eax, VERR_VMX_INVALID_VMCS_FIELD
455.the_end:
456 ret
457ENDPROC VMXReadVMCS64
458
459
460;/**
461; * Executes VMXON
462; *
463; * @returns VBox status code
464; * @param HCPhysVMXOn Physical address of VMXON structure
465; */
466;DECLASM(int) VMXEnable(RTHCPHYS HCPhysVMXOn);
467BEGINPROC VMXEnable
468%ifdef RT_ARCH_AMD64
469 xor rax, rax
470 %ifdef ASM_CALL64_GCC
471 push rdi
472 %else
473 push rcx
474 %endif
475 vmxon [rsp]
476%else
477 xor eax, eax
478 vmxon [esp + 4]
479%endif
480 jnc .good
481 mov eax, VERR_VMX_INVALID_VMXON_PTR
482 jmp .the_end
483
484.good:
485 jnz .the_end
486 mov eax, VERR_VMX_GENERIC
487
488.the_end:
489%ifdef RT_ARCH_AMD64
490 add rsp, 8
491%endif
492 ret
493ENDPROC VMXEnable
494
495
496;/**
497; * Executes VMXOFF
498; */
499;DECLASM(void) VMXDisable(void);
500BEGINPROC VMXDisable
501 vmxoff
502 ret
503ENDPROC VMXDisable
504
505
506;/**
507; * Executes VMCLEAR
508; *
509; * @returns VBox status code
510; * @param HCPhysVMCS Physical address of VM control structure
511; */
512;DECLASM(int) VMXClearVMCS(RTHCPHYS HCPhysVMCS);
513BEGINPROC VMXClearVMCS
514%ifdef RT_ARCH_AMD64
515 xor rax, rax
516 %ifdef ASM_CALL64_GCC
517 push rdi
518 %else
519 push rcx
520 %endif
521 vmclear [rsp]
522%else
523 xor eax, eax
524 vmclear [esp + 4]
525%endif
526 jnc .the_end
527 mov eax, VERR_VMX_INVALID_VMCS_PTR
528.the_end:
529%ifdef RT_ARCH_AMD64
530 add rsp, 8
531%endif
532 ret
533ENDPROC VMXClearVMCS
534
535
536;/**
537; * Executes VMPTRLD
538; *
539; * @returns VBox status code
540; * @param HCPhysVMCS Physical address of VMCS structure
541; */
542;DECLASM(int) VMXActivateVMCS(RTHCPHYS HCPhysVMCS);
543BEGINPROC VMXActivateVMCS
544%ifdef RT_ARCH_AMD64
545 xor rax, rax
546 %ifdef ASM_CALL64_GCC
547 push rdi
548 %else
549 push rcx
550 %endif
551 vmptrld [rsp]
552%else
553 xor eax, eax
554 vmptrld [esp + 4]
555%endif
556 jnc .the_end
557 mov eax, VERR_VMX_INVALID_VMCS_PTR
558.the_end:
559%ifdef RT_ARCH_AMD64
560 add rsp, 8
561%endif
562 ret
563ENDPROC VMXActivateVMCS
564
565%endif ; RT_ARCH_AMD64
566
567
568;/**
569; * Prepares for and executes VMRUN
570; *
571; * @returns VBox status code
572; * @param HCPhysVMCB Physical address of host VMCB
573; * @param HCPhysVMCB Physical address of guest VMCB
574; * @param pCtx Guest context
575; */
576BEGINPROC SVMVMRun
577%ifdef RT_ARCH_AMD64 ; fake a cdecl stack frame - I'm lazy, sosume.
578 %ifdef ASM_CALL64_GCC
579 push rdx
580 push rsi
581 push rdi
582 %else
583 push r8
584 push rdx
585 push rcx
586 %endif
587 push 0
588%endif
589 push xBP
590 mov xBP, xSP
591 pushf
592
593 ;/* Manual save and restore:
594 ; * - General purpose registers except RIP, RSP, RAX
595 ; *
596 ; * Trashed:
597 ; * - CR2 (we don't care)
598 ; * - LDTR (reset to 0)
599 ; * - DRx (presumably not changed at all)
600 ; * - DR7 (reset to 0x400)
601 ; */
602
603 ;/* Save all general purpose host registers. */
604 MYPUSHAD
605
606 ;/* Save the Guest CPU context pointer. */
607 mov xSI, [xBP + xS*2 + RTHCPHYS_CB*2] ; pCtx
608 push xSI ; push for saving the state at the end
609
610 ; Restore CR2
611 mov ebx, [xSI + CPUMCTX.cr2]
612 mov cr2, xBX
613
614 ; save host fs, gs, sysenter msr etc
615 mov xAX, [xBP + xS*2] ; pVMCBHostPhys (64 bits physical address; x86: take low dword only)
616 push xAX ; save for the vmload after vmrun
617 vmsave
618
619 ; setup eax for VMLOAD
620 mov xAX, [xBP + xS*2 + RTHCPHYS_CB] ; pVMCBPhys (64 bits physical address; take low dword only)
621
622 ;/* Restore Guest's general purpose registers. */
623 ;/* EAX is loaded from the VMCB by VMRUN */
624 mov ebx, [xSI + CPUMCTX.ebx]
625 mov ecx, [xSI + CPUMCTX.ecx]
626 mov edx, [xSI + CPUMCTX.edx]
627 mov edi, [xSI + CPUMCTX.edi]
628 mov ebp, [xSI + CPUMCTX.ebp]
629 mov esi, [xSI + CPUMCTX.esi]
630
631 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch
632 clgi
633 sti
634
635 ; load guest fs, gs, sysenter msr etc
636 vmload
637 ; run the VM
638 vmrun
639
640 ;/* EAX is in the VMCB already; we can use it here. */
641
642 ; save guest fs, gs, sysenter msr etc
643 vmsave
644
645 ; load host fs, gs, sysenter msr etc
646 pop xAX ; pushed above
647 vmload
648
649 ; Set the global interrupt flag again, but execute cli to make sure IF=0.
650 cli
651 stgi
652
653 pop xAX ; pCtx
654
655 mov [ss:xAX + CPUMCTX.ebx], ebx
656 mov [ss:xAX + CPUMCTX.ecx], ecx
657 mov [ss:xAX + CPUMCTX.edx], edx
658 mov [ss:xAX + CPUMCTX.esi], esi
659 mov [ss:xAX + CPUMCTX.edi], edi
660 mov [ss:xAX + CPUMCTX.ebp], ebp
661
662 ; Restore general purpose registers
663 MYPOPAD
664
665 mov eax, VINF_SUCCESS
666
667 popf
668 pop xBP
669%ifdef RT_ARCH_AMD64
670 add xSP, 4*xS
671%endif
672 ret
673ENDPROC SVMVMRun
674
675
676;;
677; Executes INVLPGA
678;
679; @param pPageGC msc:ecx gcc:edi x86:[esp+04] Virtual page to invalidate
680; @param uASID msc:edx gcc:esi x86:[esp+08] Tagged TLB id
681;
682;DECLASM(void) SVMInvlpgA(RTGCPTR pPageGC, uint32_t uASID);
683BEGINPROC SVMInvlpgA
684%ifdef RT_ARCH_AMD64
685 %ifdef ASM_CALL64_GCC
686 mov eax, edi ;; @todo 64-bit guest.
687 mov ecx, esi
688 %else
689 mov eax, ecx ;; @todo 64-bit guest.
690 mov ecx, edx
691 %endif
692%else
693 mov eax, [esp + 4]
694 mov ecx, [esp + 8]
695%endif
696 invlpga [xAX], ecx
697 ret
698ENDPROC SVMInvlpgA
699
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette