VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HWACCMR0A.asm@ 75

Last change on this file since 75 was 75, checked in by vboxsync, 18 years ago

Initial 64-bit port of the code.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 15.6 KB
Line 
1; $Id: HWACCMR0A.asm 75 2007-01-16 17:27:03Z vboxsync $
2;; @file
3; VMXM - R0 vmx helpers
4;
5
6;
7; Copyright (C) 2006 InnoTek Systemberatung GmbH
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.virtualbox.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License as published by the Free Software Foundation,
13; in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
14; distribution. VirtualBox OSE is distributed in the hope that it will
15; be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17; If you received this file as part of a commercial VirtualBox
18; distribution, then only the terms of your commercial VirtualBox
19; license agreement apply instead of the previous paragraph.
20;
21
22;*******************************************************************************
23;* Header Files *
24;*******************************************************************************
25%include "VBox/asmdefs.mac"
26%include "VBox/err.mac"
27%include "VBox/hwacc_vmx.mac"
28%include "VBox/cpum.mac"
29%include "VBox/x86.mac"
30
31%ifdef __OS2__ ;; @todo build cvs nasm like on OS X.
32 %macro vmwrite 2,
33 int3
34 %endmacro
35 %define vmlaunch int3
36 %define vmresume int3
37%endif
38
39
40;; @def MYPUSHAD
41; Macro generating an equivalent to pushad
42
43;; @def MYPOPAD
44; Macro generating an equivalent to popad
45
46;; @def MYPUSHSEGS
47; Macro saving all segment registers on the stack.
48; @param 1 full width register name
49; @param 2 16-bit regsiter name for \a 1.
50
51;; @def MYPOPSEGS
52; Macro restoring all segment registers on the stack
53; @param 1 full width register name
54; @param 2 16-bit regsiter name for \a 1.
55
56%ifdef __AMD64__
57 %ifdef ASM_CALL64_GCC
58 %macro MYPUSHAD 0
59 push r15
60 push r14
61 push r13
62 push r12
63 push rbx
64 %endmacro
65 %macro MYPOPAD 0
66 pop rbx
67 pop r12
68 pop r13
69 pop r14
70 pop r15
71 %endmacro
72
73 %else ; ASM_CALL64_MSC
74 %macro MYPUSHAD 0
75 push r15
76 push r14
77 push r13
78 push r12
79 push rbx
80 push rsi
81 push rdi
82 %endmacro
83 %macro MYPOPAD 0
84 pop rsi
85 pop rdi
86 pop rbx
87 pop r12
88 pop r13
89 pop r14
90 pop r15
91 %endmacro
92 %endif
93 ;; @todo check ds,es saving/restoring on AMD64
94 %macro MYPUSHSEGS 2
95 push gs
96 push fs
97 mov %2, es
98 push %1
99 mov %2, ds
100 push %1
101 %endmacro
102 %macro MYPOPSEGS 2
103 pop %1
104 mov ds, %2
105 pop %1
106 mov es, %2
107 pop fs
108 pop gs
109 %endmacro
110
111%else ; __X86__
112 %macro MYPUSHAD 0
113 pushad
114 %endmacro
115 %macro MYPOPAD 0
116 popad
117 %endmacro
118
119 %macro MYPUSHSEGS 2
120 push ds
121 push es
122 push fs
123 push gs
124 %endmacro
125 %macro MYPOPSEGS 2
126 pop gs
127 pop fs
128 pop es
129 pop ds
130 %endmacro
131%endif
132
133
134BEGINCODE
135
136;/**
137; * Prepares for and executes VMLAUNCH
138; *
139; * @note identical to VMXResumeVM, except for the vmlaunch/vmresume opcode
140; *
141; * @returns VBox status code
142; * @param pCtx Guest context
143; */
144BEGINPROC VMXStartVM
145 push xBP
146 mov xBP, xSP
147
148 ;/* First we have to save some final CPU context registers. */
149 push .vmlaunch_done
150 mov eax, VMX_VMCS_HOST_RIP ;/* return address (too difficult to continue after VMLAUNCH?) */
151 vmwrite xAX, [xSP]
152 ;/* @todo assumes success... */
153 add xSP, xS
154
155 ;/* Manual save and restore:
156 ; * - General purpose registers except RIP, RSP
157 ; *
158 ; * Trashed:
159 ; * - CR2 (we don't care)
160 ; * - LDTR (reset to 0)
161 ; * - DRx (presumably not changed at all)
162 ; * - DR7 (reset to 0x400)
163 ; * - EFLAGS (reset to BIT(1); not relevant)
164 ; *
165 ; */
166
167 ;/* Save all general purpose host registers. */
168 MYPUSHAD
169
170 ;/* Save segment registers */
171 MYPUSHSEGS xAX, ax
172
173 ;/* Save the Guest CPU context pointer. */
174%ifdef __AMD64__
175 %ifdef ASM_CALL64_GCC
176 mov rsi, rdi ; pCtx
177 %else
178 mov rsi, rcx ; pCtx
179 %endif
180%else
181 mov esi, [ebp + 8] ; pCtx
182%endif
183 push xSI
184
185 ; Save LDTR
186 xor eax, eax
187 sldt ax
188 push xAX
189
190 ; Restore CR2
191 mov ebx, [xSI + CPUMCTX.cr2]
192 mov cr2, xBX
193
194 mov eax, VMX_VMCS_HOST_RSP
195 vmwrite xAX, xSP
196 ;/* @todo assumes success... */
197 ;/* Don't mess with ESP anymore!! */
198
199 ;/* Restore Guest's general purpose registers. */
200 mov eax, [xSI + CPUMCTX.eax]
201 mov ebx, [xSI + CPUMCTX.ebx]
202 mov ecx, [xSI + CPUMCTX.ecx]
203 mov edx, [xSI + CPUMCTX.edx]
204 mov edi, [xSI + CPUMCTX.edi]
205 mov ebp, [xSI + CPUMCTX.ebp]
206 mov esi, [xSI + CPUMCTX.esi]
207
208 vmlaunch
209 jmp .vmlaunch_done; ;/* here if vmlaunch detected a failure. */
210
211ALIGNCODE(16)
212.vmlaunch_done:
213 jnc .vmxstart_good
214
215 pop xAX ; saved LDTR
216 lldt ax
217
218 add xSP, xS ; pCtx
219
220 ; Restore segment registers
221 MYPOPSEGS xAX, ax
222
223 ;/* Restore all general purpose host registers. */
224 MYPOPAD
225 mov eax, VERR_VMX_INVALID_VMXON_PTR
226 jmp .vmstart_end
227
228.vmxstart_good:
229 jnz .vmxstart_success
230
231 pop xAX ; saved LDTR
232 lldt ax
233
234 add xSP, xS ; pCtx
235
236 ; Restore segment registers
237 MYPOPSEGS xAX, ax
238
239 ; Restore all general purpose host registers.
240 MYPOPAD
241 mov eax, VERR_VMX_UNABLE_TO_START_VM
242 jmp .vmstart_end
243
244.vmxstart_success:
245 push xDI
246 mov xDI, [xSP + xS * 2] ;/* pCtx */
247
248 mov [ss:xDI + CPUMCTX.eax], eax
249 mov [ss:xDI + CPUMCTX.ebx], ebx
250 mov [ss:xDI + CPUMCTX.ecx], ecx
251 mov [ss:xDI + CPUMCTX.edx], edx
252 mov [ss:xDI + CPUMCTX.esi], esi
253 mov [ss:xDI + CPUMCTX.ebp], ebp
254%ifdef __AMD64__
255 pop xAX ; the guest edi we pushed above
256 mov dword [ss:xDI + CPUMCTX.edi], eax
257%else
258 pop dword [ss:xDI + CPUMCTX.edi] ; the guest edi we pushed above
259%endif
260
261 pop xAX ; saved LDTR
262 lldt ax
263
264 add xSP, 4 ; pCtx
265
266 ; Restore segment registers
267 MYPOPSEGS xAX, ax
268
269 ; Restore general purpose registers
270 MYPOPAD
271
272 mov eax, VINF_SUCCESS
273
274.vmstart_end:
275 pop xBP
276 ret
277ENDPROC VMXStartVM
278
279
280;/**
281; * Prepares for and executes VMRESUME
282; *
283; * @note identical to VMXStartVM, except for the vmlaunch/vmresume opcode
284; *
285; * @returns VBox status code
286; * @param pCtx Guest context
287; */
288BEGINPROC VMXResumeVM
289 push xBP
290 mov xBP, xSP
291
292 ;/* First we have to save some final CPU context registers. */
293 push vmresume_done
294 mov eax, VMX_VMCS_HOST_RIP ;/* return address (too difficult to continue after VMLAUNCH?) */
295 vmwrite xAX, [xSP]
296 ;/* @todo assumes success... */
297 add xSP, xS
298
299 ;/* Manual save and restore:
300 ; * - General purpose registers except RIP, RSP
301 ; *
302 ; * Trashed:
303 ; * - CR2 (we don't care)
304 ; * - LDTR (reset to 0)
305 ; * - DRx (presumably not changed at all)
306 ; * - DR7 (reset to 0x400)
307 ; * - EFLAGS (reset to BIT(1); not relevant)
308 ; *
309 ; */
310
311 ;/* Save all general purpose host registers. */
312 MYPUSHAD
313
314 ;/* Save segment registers */
315 MYPUSHSEGS xAX, ax
316
317 ;/* Save the Guest CPU context pointer. */
318%ifdef __AMD64__
319 %ifdef ASM_CALL64_GCC
320 mov rsi, rdi ; pCtx
321 %else
322 mov rsi, ecx ; pCtx
323 %endif
324%else
325 mov esi, [ebp + 8] ; pCtx
326%endif
327 push xSI
328
329 ; Save LDTR
330 xor eax, eax
331 sldt ax
332 push xAX
333
334 ; Restore CR2
335 mov xBX, [xSI + CPUMCTX.cr2]
336 mov cr2, xBX
337
338 mov eax, VMX_VMCS_HOST_RSP
339 vmwrite xAX, xSP
340 ;/* @todo assumes success... */
341 ;/* Don't mess with ESP anymore!! */
342
343 ;/* Restore Guest's general purpose registers. */
344 mov eax, [xSI + CPUMCTX.eax]
345 mov ebx, [xSI + CPUMCTX.ebx]
346 mov ecx, [xSI + CPUMCTX.ecx]
347 mov edx, [xSI + CPUMCTX.edx]
348 mov edi, [xSI + CPUMCTX.edi]
349 mov ebp, [xSI + CPUMCTX.ebp]
350 mov esi, [xSI + CPUMCTX.esi]
351
352 vmresume
353 jmp vmresume_done; ;/* here if vmresume detected a failure. */
354
355ALIGNCODE(16)
356vmresume_done:
357 jnc vmresume_good
358
359 pop xAX ; saved LDTR
360 lldt ax
361
362 add xSP, xS ; pCtx
363
364 ; Restore segment registers
365 MYPOPSEGS xAX, ax
366
367 ; Restore all general purpose host registers.
368 MYPOPAD
369 mov eax, VERR_VMX_INVALID_VMXON_PTR
370 jmp vmresume_end
371
372vmresume_good:
373 jnz vmresume_success
374
375 pop xAX ; saved LDTR
376 lldt ax
377
378 add xSP, xS ; pCtx
379
380 ; Restore segment registers
381 MYPOPSEGS xAX, ax
382
383 ; Restore all general purpose host registers.
384 MYPOPAD
385 mov eax, VERR_VMX_UNABLE_TO_RESUME_VM
386 jmp vmresume_end
387
388vmresume_success:
389 push xDI
390 mov xDI, [xSP + xS * 2] ; pCtx
391
392 mov [ss:xDI + CPUMCTX.eax], eax
393 mov [ss:xDI + CPUMCTX.ebx], ebx
394 mov [ss:xDI + CPUMCTX.ecx], ecx
395 mov [ss:xDI + CPUMCTX.edx], edx
396 mov [ss:xDI + CPUMCTX.esi], esi
397 mov [ss:xDI + CPUMCTX.ebp], ebp
398%ifdef __AMD64__
399 pop xAX ; the guest edi we pushed above
400 mov dword [ss:xDI + CPUMCTX.edi], eax
401%else
402 pop dword [ss:xDI + CPUMCTX.edi] ; the guest edi we pushed above
403%endif
404
405 pop xAX ; saved LDTR
406 lldt ax
407
408 add xSP, xS ; pCtx
409
410 ; Restore segment registers
411 MYPOPSEGS xAX, ax
412
413 ; Restore general purpose registers
414 MYPOPAD
415
416 mov eax, VINF_SUCCESS
417
418vmresume_end:
419 pop xBP
420 ret
421ENDPROC VMXResumeVM
422
423
424%ifdef __AMD64__
425;/**
426; * Executes VMWRITE
427; *
428; * @returns VBox status code
429; * @param idxField x86: [ebp + 08h] msc: rcx gcc: edi VMCS index
430; * @param pData x86: [ebp + 0ch] msc: rdx gcc: rsi Ptr to store VM field value
431; */
432BEGINPROC VMXWriteVMCS64
433%ifdef ASM_CALL64_GCC
434 and edi, 0ffffffffh; serious paranoia
435 vmwrite rdi, [rsi]
436%else
437 and ecx, 0ffffffffh; serious paranoia
438 vmwrite rcx, [rdx]
439%endif
440 jnc .valid_vmcs
441 mov eax, VERR_VMX_INVALID_VMCS_PTR
442 ret
443.valid_vmcs:
444 jnz .the_end
445 mov eax, VERR_VMX_INVALID_VMCS_FIELD
446.the_end:
447 ret
448ENDPROC VMXWriteVMCS64
449
450;/**
451; * Executes VMREAD
452; *
453; * @returns VBox status code
454; * @param idxField VMCS index
455; * @param pData Ptr to store VM field value
456; */
457;DECLASM(int) VMXReadVMCS64(uint32_t idxField, uint64_t *pData);
458BEGINPROC VMXReadVMCS64
459%ifdef ASM_CALL64_GCC
460 and edi, 0ffffffffh; serious paranoia
461 vmread [rsi], rdi
462%else
463 and ecx, 0ffffffffh; serious paranoia
464 vmread [rdx], rcx
465%endif
466 jnc .valid_vmcs
467 mov eax, VERR_VMX_INVALID_VMCS_PTR
468 ret
469.valid_vmcs:
470 jnz .the_end
471 mov eax, VERR_VMX_INVALID_VMCS_FIELD
472.the_end:
473 ret
474ENDPROC VMXReadVMCS64
475
476
477;/**
478; * Executes VMXON
479; *
480; * @returns VBox status code
481; * @param HCPhysVMXOn Physical address of VMXON structure
482; */
483;DECLASM(int) VMXEnable(RTHCPHYS HCPhysVMXOn);
484BEGINPROC VMXEnable
485%ifdef __AMD64__
486 %ifdef ASM_CALL64_GCC
487 push rdi
488 %else
489 push ecx
490 %endif
491 vmxon [rsp]
492%else
493 vmxon [esp + 4]
494%endif
495 jnc .good
496 mov eax, VERR_VMX_INVALID_VMXON_PTR
497 jmp .the_end
498
499.good:
500 jnz .the_end
501 mov eax, VERR_VMX_GENERIC
502
503.the_end:
504%ifdef __AMD64__
505 add rsp, 8
506%endif
507 ret
508ENDPROC VMXEnable
509
510
511;/**
512; * Executes VMXOFF
513; */
514;DECLASM(void) VMXDisable(void);
515BEGINPROC VMXDisable
516 vmxoff
517 ret
518ENDPROC VMXDisable
519
520
521;/**
522; * Executes VMCLEAR
523; *
524; * @returns VBox status code
525; * @param HCPhysVMCS Physical address of VM control structure
526; */
527;DECLASM(int) VMXClearVMCS(RTHCPHYS HCPhysVMCS);
528BEGINPROC VMXClearVMCS
529%ifdef __AMD64__
530 %ifdef ASM_CALL64_GCC
531 push rdi
532 %else
533 push ecx
534 %endif
535 vmclear [rsp]
536%else
537 vmclear [esp + 4]
538%endif
539 jnc .the_end
540 mov eax, VERR_VMX_INVALID_VMCS_PTR
541.the_end:
542%ifdef __AMD64__
543 add rsp, 8
544%endif
545 ret
546ENDPROC VMXClearVMCS
547
548
549;/**
550; * Executes VMPTRLD
551; *
552; * @returns VBox status code
553; * @param HCPhysVMCS Physical address of VMCS structure
554; */
555;DECLASM(int) VMXActivateVMCS(RTHCPHYS HCPhysVMCS);
556BEGINPROC VMXActivateVMCS
557%ifdef __AMD64__
558 %ifdef ASM_CALL64_GCC
559 push rdi
560 %else
561 push ecx
562 %endif
563 vmclear [rsp]
564%else
565 vmclear [esp + 4]
566%endif
567 jnc .the_end
568 mov eax, VERR_VMX_INVALID_VMCS_PTR
569.the_end:
570%ifdef __AMD64__
571 add rsp, 8
572%endif
573 ret
574ENDPROC VMXActivateVMCS
575
576%endif ; __AMD64__
577
578
579;/**
580; * Prepares for and executes VMRUN
581; *
582; * @returns VBox status code
583; * @param HCPhysVMCB Physical address of host VMCB
584; * @param HCPhysVMCB Physical address of guest VMCB
585; * @param pCtx Guest context
586; */
587BEGINPROC SVMVMRun
588%ifdef __AMD64__ ; fake a cdecl stack frame - I'm lazy, sosume.
589 %ifdef ASM_CALL64_GCC
590 push rdx
591 push rsi
592 push rdi
593 %else
594 push r8
595 push rdx
596 push rcx
597 %endif
598 push 0
599%endif
600 push xBP
601 mov xBP, xSP
602
603 ;/* Manual save and restore:
604 ; * - General purpose registers except RIP, RSP, RAX
605 ; *
606 ; * Trashed:
607 ; * - CR2 (we don't care)
608 ; * - LDTR (reset to 0)
609 ; * - DRx (presumably not changed at all)
610 ; * - DR7 (reset to 0x400)
611 ; */
612
613 ;/* Save all general purpose host registers. */
614 MYPUSHAD
615
616 ; /* Clear fs and gs as a safety precaution. Maybe not necessary. */
617 push fs
618 push gs
619 xor eax, eax
620 mov fs, eax
621 mov gs, eax
622
623 ;/* Save the Guest CPU context pointer. */
624 mov xSI, [xBP + xS*2 + RTHCPHYS_CB*2] ; pCtx
625 push xSI ; push for saving the state at the end
626
627 ; Restore CR2
628 mov ebx, [xSI + CPUMCTX.cr2]
629 mov cr2, xBX
630
631 ; save host fs, gs, sysenter msr etc
632 mov xAX, [xBP + xS*2] ; pVMCBHostPhys (64 bits physical address; x86: take low dword only)
633 push xAX ; save for the vmload after vmrun
634 DB 0x0F, 0x01, 0xDB ; VMSAVE
635
636 ; setup eax for VMLOAD
637 mov xAX, [xBP + xS*2 + RTHCPHYS_CB] ; pVMCBPhys (64 bits physical address; take low dword only)
638
639 ;/* Restore Guest's general purpose registers. */
640 ;/* EAX is loaded from the VMCB by VMRUN */
641 mov ebx, [xSI + CPUMCTX.ebx]
642 mov ecx, [xSI + CPUMCTX.ecx]
643 mov edx, [xSI + CPUMCTX.edx]
644 mov edi, [xSI + CPUMCTX.edi]
645 mov ebp, [xSI + CPUMCTX.ebp]
646 mov esi, [xSI + CPUMCTX.esi]
647
648 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch
649 DB 0x0f, 0x01, 0xDD ; CLGI
650 sti
651
652 ; load guest fs, gs, sysenter msr etc
653 DB 0x0f, 0x01, 0xDA ; VMLOAD
654 ; run the VM
655 DB 0x0F, 0x01, 0xD8 ; VMRUN
656
657 ;/* EAX is in the VMCB already; we can use it here. */
658
659 ; save guest fs, gs, sysenter msr etc
660 DB 0x0F, 0x01, 0xDB ; VMSAVE
661
662 ; load host fs, gs, sysenter msr etc
663 pop xAX ; pushed above
664 DB 0x0F, 0x01, 0xDA ; VMLOAD
665
666 ; Set the global interrupt flag again, but execute cli to make sure IF=0.
667 cli
668 DB 0x0f, 0x01, 0xDC ; STGI
669
670 pop xAX ; pCtx
671
672 mov [ss:xAX + CPUMCTX.ebx], ebx
673 mov [ss:xAX + CPUMCTX.ecx], ecx
674 mov [ss:xAX + CPUMCTX.edx], edx
675 mov [ss:xAX + CPUMCTX.esi], esi
676 mov [ss:xAX + CPUMCTX.edi], edi
677 mov [ss:xAX + CPUMCTX.ebp], ebp
678
679 ; Restore fs & gs
680 pop gs
681 pop fs
682
683 ; Restore general purpose registers
684 MYPOPAD
685
686 mov eax, VINF_SUCCESS
687
688 pop xBP
689%ifdef __AMD64__
690 add xSP, 4*xS
691%endif
692 ret
693ENDPROC SVMVMRun
694
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette