VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HWACCMR0A.asm@ 2756

Last change on this file since 2756 was 2756, checked in by vboxsync, 18 years ago

Manually save and restore GDTR & IDTR.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 17.6 KB
Line 
1; $Id: HWACCMR0A.asm 2756 2007-05-22 08:25:23Z vboxsync $
2;; @file
3; VMXM - R0 vmx helpers
4;
5
6;
7; Copyright (C) 2006 InnoTek Systemberatung GmbH
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.virtualbox.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License as published by the Free Software Foundation,
13; in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
14; distribution. VirtualBox OSE is distributed in the hope that it will
15; be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17; If you received this file as part of a commercial VirtualBox
18; distribution, then only the terms of your commercial VirtualBox
19; license agreement apply instead of the previous paragraph.
20;
21
22;*******************************************************************************
23;* Header Files *
24;*******************************************************************************
25%include "VBox/asmdefs.mac"
26%include "VBox/err.mac"
27%include "VBox/hwacc_vmx.mac"
28%include "VBox/cpum.mac"
29%include "VBox/x86.mac"
30
31%ifdef __OS2__ ;; @todo build cvs nasm like on OS X.
32 %macro vmwrite 2,
33 int3
34 %endmacro
35 %define vmlaunch int3
36 %define vmresume int3
37%endif
38
39
40;; @def MYPUSHAD
41; Macro generating an equivalent to pushad
42
43;; @def MYPOPAD
44; Macro generating an equivalent to popad
45
46;; @def MYPUSHSEGS
47; Macro saving all segment registers on the stack.
48; @param 1 full width register name
49; @param 2 16-bit regsiter name for \a 1.
50
51;; @def MYPOPSEGS
52; Macro restoring all segment registers on the stack
53; @param 1 full width register name
54; @param 2 16-bit regsiter name for \a 1.
55
56%ifdef __AMD64__
57 %ifdef ASM_CALL64_GCC
58 %macro MYPUSHAD 0
59 push r15
60 push r14
61 push r13
62 push r12
63 push rbx
64 %endmacro
65 %macro MYPOPAD 0
66 pop rbx
67 pop r12
68 pop r13
69 pop r14
70 pop r15
71 %endmacro
72
73 %else ; ASM_CALL64_MSC
74 %macro MYPUSHAD 0
75 push r15
76 push r14
77 push r13
78 push r12
79 push rbx
80 push rsi
81 push rdi
82 %endmacro
83 %macro MYPOPAD 0
84 pop rdi
85 pop rsi
86 pop rbx
87 pop r12
88 pop r13
89 pop r14
90 pop r15
91 %endmacro
92 %endif
93 ;; @todo check ds,es saving/restoring on AMD64
94 %macro MYPUSHSEGS 2
95 push gs
96 push fs
97 mov %2, es
98 push %1
99 mov %2, ds
100 push %1
101 %endmacro
102 %macro MYPOPSEGS 2
103 pop %1
104 mov ds, %2
105 pop %1
106 mov es, %2
107 pop fs
108 pop gs
109 %endmacro
110
111%else ; __X86__
112 %macro MYPUSHAD 0
113 pushad
114 %endmacro
115 %macro MYPOPAD 0
116 popad
117 %endmacro
118
119 %macro MYPUSHSEGS 2
120 push ds
121 push es
122 push fs
123 push gs
124 %endmacro
125 %macro MYPOPSEGS 2
126 pop gs
127 pop fs
128 pop es
129 pop ds
130 %endmacro
131%endif
132
133
134BEGINCODE
135
136;/**
137; * Prepares for and executes VMLAUNCH
138; *
139; * @note identical to VMXResumeVM, except for the vmlaunch/vmresume opcode
140; *
141; * @returns VBox status code
142; * @param pCtx Guest context
143; */
144BEGINPROC VMXStartVM
145 push xBP
146 mov xBP, xSP
147
148 ;/* First we have to save some final CPU context registers. */
149%ifdef __AMD64__
150 mov rax, qword .vmlaunch_done
151 push rax
152%else
153 push .vmlaunch_done
154%endif
155 mov eax, VMX_VMCS_HOST_RIP ;/* return address (too difficult to continue after VMLAUNCH?) */
156 vmwrite xAX, [xSP]
157 ;/* @todo assumes success... */
158 add xSP, xS
159
160 ;/* Manual save and restore:
161 ; * - General purpose registers except RIP, RSP
162 ; *
163 ; * Trashed:
164 ; * - CR2 (we don't care)
165 ; * - LDTR (reset to 0)
166 ; * - DRx (presumably not changed at all)
167 ; * - DR7 (reset to 0x400)
168 ; * - EFLAGS (reset to BIT(1); not relevant)
169 ; *
170 ; */
171
172 ;/* Save all general purpose host registers. */
173 MYPUSHAD
174
175 ;/* Save segment registers */
176 MYPUSHSEGS xAX, ax
177
178 ;/* Save the Guest CPU context pointer. */
179%ifdef __AMD64__
180 %ifdef ASM_CALL64_GCC
181 mov rsi, rdi ; pCtx
182 %else
183 mov rsi, rcx ; pCtx
184 %endif
185%else
186 mov esi, [ebp + 8] ; pCtx
187%endif
188 push xSI
189
190 ; VMX only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!
191 sub xSP, xS*2
192 sgdt [xSP]
193
194 sub xSP, xS*2
195 sidt [xSP]
196
197 ; Save LDTR
198 xor eax, eax
199 sldt ax
200 push xAX
201
202 ; Restore CR2
203 mov ebx, [xSI + CPUMCTX.cr2]
204 mov cr2, xBX
205
206 mov eax, VMX_VMCS_HOST_RSP
207 vmwrite xAX, xSP
208 ;/* @todo assumes success... */
209 ;/* Don't mess with ESP anymore!! */
210
211 ;/* Restore Guest's general purpose registers. */
212 mov eax, [xSI + CPUMCTX.eax]
213 mov ebx, [xSI + CPUMCTX.ebx]
214 mov ecx, [xSI + CPUMCTX.ecx]
215 mov edx, [xSI + CPUMCTX.edx]
216 mov edi, [xSI + CPUMCTX.edi]
217 mov ebp, [xSI + CPUMCTX.ebp]
218 mov esi, [xSI + CPUMCTX.esi]
219
220 vmlaunch
221 jmp .vmlaunch_done; ;/* here if vmlaunch detected a failure. */
222
223ALIGNCODE(16)
224.vmlaunch_done:
225 jnc .vmxstart_good
226
227 ; Restore base and limit of the IDTR & GDTR
228 lidt [xSP]
229 add xSP, xS*2
230 lgdt [xSP]
231 add xSP, xS*2
232
233 pop xAX ; saved LDTR
234 lldt ax
235
236 add xSP, xS ; pCtx
237
238 ; Restore segment registers
239 MYPOPSEGS xAX, ax
240
241 ;/* Restore all general purpose host registers. */
242 MYPOPAD
243 mov eax, VERR_VMX_INVALID_VMXON_PTR
244 jmp .vmstart_end
245
246.vmxstart_good:
247 jnz .vmxstart_success
248
249 ; Restore base and limit of the IDTR & GDTR
250 lidt [xSP]
251 add xSP, xS*2
252 lgdt [xSP]
253 add xSP, xS*2
254
255 pop xAX ; saved LDTR
256 lldt ax
257
258 add xSP, xS ; pCtx
259
260 ; Restore segment registers
261 MYPOPSEGS xAX, ax
262
263 ; Restore all general purpose host registers.
264 MYPOPAD
265 mov eax, VERR_VMX_UNABLE_TO_START_VM
266 jmp .vmstart_end
267
268.vmxstart_success:
269 push xDI
270 mov xDI, [xSP + xS * 2] ;/* pCtx */
271
272 mov [ss:xDI + CPUMCTX.eax], eax
273 mov [ss:xDI + CPUMCTX.ebx], ebx
274 mov [ss:xDI + CPUMCTX.ecx], ecx
275 mov [ss:xDI + CPUMCTX.edx], edx
276 mov [ss:xDI + CPUMCTX.esi], esi
277 mov [ss:xDI + CPUMCTX.ebp], ebp
278%ifdef __AMD64__
279 pop xAX ; the guest edi we pushed above
280 mov dword [ss:xDI + CPUMCTX.edi], eax
281%else
282 pop dword [ss:xDI + CPUMCTX.edi] ; the guest edi we pushed above
283%endif
284
285 ; Restore base and limit of the IDTR & GDTR
286 lidt [xSP]
287 add xSP, xS*2
288 lgdt [xSP]
289 add xSP, xS*2
290
291 pop xAX ; saved LDTR
292 lldt ax
293
294 add xSP, xS ; pCtx
295
296 ; Restore segment registers
297 MYPOPSEGS xAX, ax
298
299 ; Restore general purpose registers
300 MYPOPAD
301
302 mov eax, VINF_SUCCESS
303
304.vmstart_end:
305 pop xBP
306 ret
307ENDPROC VMXStartVM
308
309
310;/**
311; * Prepares for and executes VMRESUME
312; *
313; * @note identical to VMXStartVM, except for the vmlaunch/vmresume opcode
314; *
315; * @returns VBox status code
316; * @param pCtx Guest context
317; */
318BEGINPROC VMXResumeVM
319 push xBP
320 mov xBP, xSP
321
322 ;/* First we have to save some final CPU context registers. */
323%ifdef __AMD64__
324 mov rax, qword vmresume_done
325 push rax
326%else
327 push vmresume_done
328%endif
329 mov eax, VMX_VMCS_HOST_RIP ;/* return address (too difficult to continue after VMLAUNCH?) */
330 vmwrite xAX, [xSP]
331 ;/* @todo assumes success... */
332 add xSP, xS
333
334 ;/* Manual save and restore:
335 ; * - General purpose registers except RIP, RSP
336 ; *
337 ; * Trashed:
338 ; * - CR2 (we don't care)
339 ; * - LDTR (reset to 0)
340 ; * - DRx (presumably not changed at all)
341 ; * - DR7 (reset to 0x400)
342 ; * - EFLAGS (reset to BIT(1); not relevant)
343 ; *
344 ; */
345
346 ;/* Save all general purpose host registers. */
347 MYPUSHAD
348
349 ;/* Save segment registers */
350 MYPUSHSEGS xAX, ax
351
352 ;/* Save the Guest CPU context pointer. */
353%ifdef __AMD64__
354 %ifdef ASM_CALL64_GCC
355 mov rsi, rdi ; pCtx
356 %else
357 mov rsi, rcx ; pCtx
358 %endif
359%else
360 mov esi, [ebp + 8] ; pCtx
361%endif
362 push xSI
363
364 ; VMX only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!
365 sub xSP, xS*2
366 sgdt [xSP]
367
368 sub xSP, xS*2
369 sidt [xSP]
370
371 ; Save LDTR
372 xor eax, eax
373 sldt ax
374 push xAX
375
376 ; Restore CR2
377 mov xBX, [xSI + CPUMCTX.cr2]
378 mov cr2, xBX
379
380 mov eax, VMX_VMCS_HOST_RSP
381 vmwrite xAX, xSP
382 ;/* @todo assumes success... */
383 ;/* Don't mess with ESP anymore!! */
384
385 ;/* Restore Guest's general purpose registers. */
386 mov eax, [xSI + CPUMCTX.eax]
387 mov ebx, [xSI + CPUMCTX.ebx]
388 mov ecx, [xSI + CPUMCTX.ecx]
389 mov edx, [xSI + CPUMCTX.edx]
390 mov edi, [xSI + CPUMCTX.edi]
391 mov ebp, [xSI + CPUMCTX.ebp]
392 mov esi, [xSI + CPUMCTX.esi]
393
394 vmresume
395 jmp vmresume_done; ;/* here if vmresume detected a failure. */
396
397ALIGNCODE(16)
398vmresume_done:
399 jnc vmresume_good
400
401 ; Restore base and limit of the IDTR & GDTR
402 lidt [xSP]
403 add xSP, xS*2
404 lgdt [xSP]
405 add xSP, xS*2
406
407 pop xAX ; saved LDTR
408 lldt ax
409
410 add xSP, xS ; pCtx
411
412 ; Restore segment registers
413 MYPOPSEGS xAX, ax
414
415 ; Restore all general purpose host registers.
416 MYPOPAD
417 mov eax, VERR_VMX_INVALID_VMXON_PTR
418 jmp vmresume_end
419
420vmresume_good:
421 jnz vmresume_success
422
423 ; Restore base and limit of the IDTR & GDTR
424 lidt [xSP]
425 add xSP, xS*2
426 lgdt [xSP]
427 add xSP, xS*2
428
429 pop xAX ; saved LDTR
430 lldt ax
431
432 add xSP, xS ; pCtx
433
434 ; Restore segment registers
435 MYPOPSEGS xAX, ax
436
437 ; Restore all general purpose host registers.
438 MYPOPAD
439 mov eax, VERR_VMX_UNABLE_TO_RESUME_VM
440 jmp vmresume_end
441
442vmresume_success:
443 push xDI
444 mov xDI, [xSP + xS * 2] ; pCtx
445
446 mov [ss:xDI + CPUMCTX.eax], eax
447 mov [ss:xDI + CPUMCTX.ebx], ebx
448 mov [ss:xDI + CPUMCTX.ecx], ecx
449 mov [ss:xDI + CPUMCTX.edx], edx
450 mov [ss:xDI + CPUMCTX.esi], esi
451 mov [ss:xDI + CPUMCTX.ebp], ebp
452%ifdef __AMD64__
453 pop xAX ; the guest edi we pushed above
454 mov dword [ss:xDI + CPUMCTX.edi], eax
455%else
456 pop dword [ss:xDI + CPUMCTX.edi] ; the guest edi we pushed above
457%endif
458
459 ; Restore base and limit of the IDTR & GDTR
460 lidt [xSP]
461 add xSP, xS*2
462 lgdt [xSP]
463 add xSP, xS*2
464
465 pop xAX ; saved LDTR
466 lldt ax
467
468 add xSP, xS ; pCtx
469
470 ; Restore segment registers
471 MYPOPSEGS xAX, ax
472
473 ; Restore general purpose registers
474 MYPOPAD
475
476 mov eax, VINF_SUCCESS
477
478vmresume_end:
479 pop xBP
480 ret
481ENDPROC VMXResumeVM
482
483
484%ifdef __AMD64__
485;/**
486; * Executes VMWRITE
487; *
488; * @returns VBox status code
489; * @param idxField x86: [ebp + 08h] msc: rcx gcc: edi VMCS index
490; * @param pData x86: [ebp + 0ch] msc: rdx gcc: rsi VM field value
491; */
492BEGINPROC VMXWriteVMCS64
493%ifdef ASM_CALL64_GCC
494 mov eax, 0ffffffffh
495 and rdi, rax
496 xor rax, rax
497 vmwrite rdi, rsi
498%else
499 mov eax, 0ffffffffh
500 and rcx, rax
501 xor rax, rax
502 vmwrite rcx, rdx
503%endif
504 jnc .valid_vmcs
505 mov eax, VERR_VMX_INVALID_VMCS_PTR
506 ret
507.valid_vmcs:
508 jnz .the_end
509 mov eax, VERR_VMX_INVALID_VMCS_FIELD
510.the_end:
511 ret
512ENDPROC VMXWriteVMCS64
513
514;/**
515; * Executes VMREAD
516; *
517; * @returns VBox status code
518; * @param idxField VMCS index
519; * @param pData Ptr to store VM field value
520; */
521;DECLASM(int) VMXReadVMCS64(uint32_t idxField, uint64_t *pData);
522BEGINPROC VMXReadVMCS64
523%ifdef ASM_CALL64_GCC
524 mov eax, 0ffffffffh
525 and rdi, rax
526 xor rax, rax
527 vmread [rsi], rdi
528%else
529 mov eax, 0ffffffffh
530 and rcx, rax
531 xor rax, rax
532 vmread [rdx], rcx
533%endif
534 jnc .valid_vmcs
535 mov eax, VERR_VMX_INVALID_VMCS_PTR
536 ret
537.valid_vmcs:
538 jnz .the_end
539 mov eax, VERR_VMX_INVALID_VMCS_FIELD
540.the_end:
541 ret
542ENDPROC VMXReadVMCS64
543
544
545;/**
546; * Executes VMXON
547; *
548; * @returns VBox status code
549; * @param HCPhysVMXOn Physical address of VMXON structure
550; */
551;DECLASM(int) VMXEnable(RTHCPHYS HCPhysVMXOn);
552BEGINPROC VMXEnable
553%ifdef __AMD64__
554 xor rax, rax
555 %ifdef ASM_CALL64_GCC
556 push rdi
557 %else
558 push rcx
559 %endif
560 vmxon [rsp]
561%else
562 xor eax, eax
563 vmxon [esp + 4]
564%endif
565 jnc .good
566 mov eax, VERR_VMX_INVALID_VMXON_PTR
567 jmp .the_end
568
569.good:
570 jnz .the_end
571 mov eax, VERR_VMX_GENERIC
572
573.the_end:
574%ifdef __AMD64__
575 add rsp, 8
576%endif
577 ret
578ENDPROC VMXEnable
579
580
581;/**
582; * Executes VMXOFF
583; */
584;DECLASM(void) VMXDisable(void);
585BEGINPROC VMXDisable
586 vmxoff
587 ret
588ENDPROC VMXDisable
589
590
591;/**
592; * Executes VMCLEAR
593; *
594; * @returns VBox status code
595; * @param HCPhysVMCS Physical address of VM control structure
596; */
597;DECLASM(int) VMXClearVMCS(RTHCPHYS HCPhysVMCS);
598BEGINPROC VMXClearVMCS
599%ifdef __AMD64__
600 xor rax, rax
601 %ifdef ASM_CALL64_GCC
602 push rdi
603 %else
604 push rcx
605 %endif
606 vmclear [rsp]
607%else
608 xor eax, eax
609 vmclear [esp + 4]
610%endif
611 jnc .the_end
612 mov eax, VERR_VMX_INVALID_VMCS_PTR
613.the_end:
614%ifdef __AMD64__
615 add rsp, 8
616%endif
617 ret
618ENDPROC VMXClearVMCS
619
620
621;/**
622; * Executes VMPTRLD
623; *
624; * @returns VBox status code
625; * @param HCPhysVMCS Physical address of VMCS structure
626; */
627;DECLASM(int) VMXActivateVMCS(RTHCPHYS HCPhysVMCS);
628BEGINPROC VMXActivateVMCS
629%ifdef __AMD64__
630 xor rax, rax
631 %ifdef ASM_CALL64_GCC
632 push rdi
633 %else
634 push rcx
635 %endif
636 vmptrld [rsp]
637%else
638 xor eax, eax
639 vmptrld [esp + 4]
640%endif
641 jnc .the_end
642 mov eax, VERR_VMX_INVALID_VMCS_PTR
643.the_end:
644%ifdef __AMD64__
645 add rsp, 8
646%endif
647 ret
648ENDPROC VMXActivateVMCS
649
650%endif ; __AMD64__
651
652
653;/**
654; * Prepares for and executes VMRUN
655; *
656; * @returns VBox status code
657; * @param HCPhysVMCB Physical address of host VMCB
658; * @param HCPhysVMCB Physical address of guest VMCB
659; * @param pCtx Guest context
660; */
661BEGINPROC SVMVMRun
662%ifdef __AMD64__ ; fake a cdecl stack frame - I'm lazy, sosume.
663 %ifdef ASM_CALL64_GCC
664 push rdx
665 push rsi
666 push rdi
667 %else
668 push r8
669 push rdx
670 push rcx
671 %endif
672 push 0
673%endif
674 push xBP
675 mov xBP, xSP
676
677 ;/* Manual save and restore:
678 ; * - General purpose registers except RIP, RSP, RAX
679 ; *
680 ; * Trashed:
681 ; * - CR2 (we don't care)
682 ; * - LDTR (reset to 0)
683 ; * - DRx (presumably not changed at all)
684 ; * - DR7 (reset to 0x400)
685 ; */
686
687 ;/* Save all general purpose host registers. */
688 MYPUSHAD
689
690 ;/* Save the Guest CPU context pointer. */
691 mov xSI, [xBP + xS*2 + RTHCPHYS_CB*2] ; pCtx
692 push xSI ; push for saving the state at the end
693
694 ; Restore CR2
695 mov ebx, [xSI + CPUMCTX.cr2]
696 mov cr2, xBX
697
698 ; save host fs, gs, sysenter msr etc
699 mov xAX, [xBP + xS*2] ; pVMCBHostPhys (64 bits physical address; x86: take low dword only)
700 push xAX ; save for the vmload after vmrun
701 DB 0x0F, 0x01, 0xDB ; VMSAVE
702
703 ; setup eax for VMLOAD
704 mov xAX, [xBP + xS*2 + RTHCPHYS_CB] ; pVMCBPhys (64 bits physical address; take low dword only)
705
706 ;/* Restore Guest's general purpose registers. */
707 ;/* EAX is loaded from the VMCB by VMRUN */
708 mov ebx, [xSI + CPUMCTX.ebx]
709 mov ecx, [xSI + CPUMCTX.ecx]
710 mov edx, [xSI + CPUMCTX.edx]
711 mov edi, [xSI + CPUMCTX.edi]
712 mov ebp, [xSI + CPUMCTX.ebp]
713 mov esi, [xSI + CPUMCTX.esi]
714
715 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch
716 DB 0x0f, 0x01, 0xDD ; CLGI
717 sti
718
719 ; load guest fs, gs, sysenter msr etc
720 DB 0x0f, 0x01, 0xDA ; VMLOAD
721 ; run the VM
722 DB 0x0F, 0x01, 0xD8 ; VMRUN
723
724 ;/* EAX is in the VMCB already; we can use it here. */
725
726 ; save guest fs, gs, sysenter msr etc
727 DB 0x0F, 0x01, 0xDB ; VMSAVE
728
729 ; load host fs, gs, sysenter msr etc
730 pop xAX ; pushed above
731 DB 0x0F, 0x01, 0xDA ; VMLOAD
732
733 ; Set the global interrupt flag again, but execute cli to make sure IF=0.
734 cli
735 DB 0x0f, 0x01, 0xDC ; STGI
736
737 pop xAX ; pCtx
738
739 mov [ss:xAX + CPUMCTX.ebx], ebx
740 mov [ss:xAX + CPUMCTX.ecx], ecx
741 mov [ss:xAX + CPUMCTX.edx], edx
742 mov [ss:xAX + CPUMCTX.esi], esi
743 mov [ss:xAX + CPUMCTX.edi], edi
744 mov [ss:xAX + CPUMCTX.ebp], ebp
745
746 ; Restore general purpose registers
747 MYPOPAD
748
749 mov eax, VINF_SUCCESS
750
751 pop xBP
752%ifdef __AMD64__
753 add xSP, 4*xS
754%endif
755 ret
756ENDPROC SVMVMRun
757
758%ifdef __AMD64__
759%ifdef __WIN__
760
761;;
762; Executes INVLPGA
763;
764; @param pPageGC msc:ecx gcc:edi x86:[esp+04] Virtual page to invalidate
765; @param uASID msc:edx gcc:esi x86:[esp+08] Tagged TLB id
766;
767;DECLASM(void) SVMInvlpgA(RTGCPTR pPageGC, uint32_t uASID);
768BEGINPROC SVMInvlpgA
769%ifdef __AMD64__
770 %ifdef ASM_CALL64_GCC
771 mov eax, edi ;; @todo 64-bit guest.
772 mov ecx, esi
773 %else
774 mov eax, ecx ;; @todo 64-bit guest.
775 mov ecx, edx
776 %endif
777 invlpga rax, ecx
778%else
779 mov eax, [esp + 4]
780 mov ecx, [esp + 8]
781 invlpga eax, ecx
782%endif
783 ret
784ENDPROC SVMInvlpgA
785%endif
786%endif
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette