VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HWACCMR0A.asm@ 2751

Last change on this file since 2751 was 2751, checked in by vboxsync, 18 years ago

Leftover breakpoints

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 16.4 KB
Line 
1; $Id: HWACCMR0A.asm 2751 2007-05-21 16:36:42Z vboxsync $
2;; @file
3; VMXM - R0 vmx helpers
4;
5
6;
7; Copyright (C) 2006 InnoTek Systemberatung GmbH
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.virtualbox.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License as published by the Free Software Foundation,
13; in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
14; distribution. VirtualBox OSE is distributed in the hope that it will
15; be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17; If you received this file as part of a commercial VirtualBox
18; distribution, then only the terms of your commercial VirtualBox
19; license agreement apply instead of the previous paragraph.
20;
21
22;*******************************************************************************
23;* Header Files *
24;*******************************************************************************
25%include "VBox/asmdefs.mac"
26%include "VBox/err.mac"
27%include "VBox/hwacc_vmx.mac"
28%include "VBox/cpum.mac"
29%include "VBox/x86.mac"
30
31%ifdef __OS2__ ;; @todo build cvs nasm like on OS X.
32 %macro vmwrite 2,
33 int3
34 %endmacro
35 %define vmlaunch int3
36 %define vmresume int3
37%endif
38
39
40;; @def MYPUSHAD
41; Macro generating an equivalent to pushad
42
43;; @def MYPOPAD
44; Macro generating an equivalent to popad
45
46;; @def MYPUSHSEGS
47; Macro saving all segment registers on the stack.
48; @param 1 full width register name
49; @param 2 16-bit regsiter name for \a 1.
50
51;; @def MYPOPSEGS
52; Macro restoring all segment registers on the stack
53; @param 1 full width register name
54; @param 2 16-bit regsiter name for \a 1.
55
56%ifdef __AMD64__
57 %ifdef ASM_CALL64_GCC
58 %macro MYPUSHAD 0
59 push r15
60 push r14
61 push r13
62 push r12
63 push rbx
64 %endmacro
65 %macro MYPOPAD 0
66 pop rbx
67 pop r12
68 pop r13
69 pop r14
70 pop r15
71 %endmacro
72
73 %else ; ASM_CALL64_MSC
74 %macro MYPUSHAD 0
75 push r15
76 push r14
77 push r13
78 push r12
79 push rbx
80 push rsi
81 push rdi
82 %endmacro
83 %macro MYPOPAD 0
84 pop rdi
85 pop rsi
86 pop rbx
87 pop r12
88 pop r13
89 pop r14
90 pop r15
91 %endmacro
92 %endif
93 ;; @todo check ds,es saving/restoring on AMD64
94 %macro MYPUSHSEGS 2
95 push gs
96 push fs
97 mov %2, es
98 push %1
99 mov %2, ds
100 push %1
101 %endmacro
102 %macro MYPOPSEGS 2
103 pop %1
104 mov ds, %2
105 pop %1
106 mov es, %2
107 pop fs
108 pop gs
109 %endmacro
110
111%else ; __X86__
112 %macro MYPUSHAD 0
113 pushad
114 %endmacro
115 %macro MYPOPAD 0
116 popad
117 %endmacro
118
119 %macro MYPUSHSEGS 2
120 push ds
121 push es
122 push fs
123 push gs
124 %endmacro
125 %macro MYPOPSEGS 2
126 pop gs
127 pop fs
128 pop es
129 pop ds
130 %endmacro
131%endif
132
133
134BEGINCODE
135
136;/**
137; * Prepares for and executes VMLAUNCH
138; *
139; * @note identical to VMXResumeVM, except for the vmlaunch/vmresume opcode
140; *
141; * @returns VBox status code
142; * @param pCtx Guest context
143; */
144BEGINPROC VMXStartVM
145 push xBP
146 mov xBP, xSP
147
148 ;/* First we have to save some final CPU context registers. */
149%ifdef __AMD64__
150 mov rax, qword .vmlaunch_done
151 push rax
152%else
153 push .vmlaunch_done
154%endif
155 mov eax, VMX_VMCS_HOST_RIP ;/* return address (too difficult to continue after VMLAUNCH?) */
156 vmwrite xAX, [xSP]
157 ;/* @todo assumes success... */
158 add xSP, xS
159
160 ;/* Manual save and restore:
161 ; * - General purpose registers except RIP, RSP
162 ; *
163 ; * Trashed:
164 ; * - CR2 (we don't care)
165 ; * - LDTR (reset to 0)
166 ; * - DRx (presumably not changed at all)
167 ; * - DR7 (reset to 0x400)
168 ; * - EFLAGS (reset to BIT(1); not relevant)
169 ; *
170 ; */
171
172 ;/* Save all general purpose host registers. */
173 MYPUSHAD
174
175 ;/* Save segment registers */
176 MYPUSHSEGS xAX, ax
177
178 ;/* Save the Guest CPU context pointer. */
179%ifdef __AMD64__
180 %ifdef ASM_CALL64_GCC
181 mov rsi, rdi ; pCtx
182 %else
183 mov rsi, rcx ; pCtx
184 %endif
185%else
186 mov esi, [ebp + 8] ; pCtx
187%endif
188 push xSI
189
190 ; Save LDTR
191 xor eax, eax
192 sldt ax
193 push xAX
194
195 ; Restore CR2
196 mov ebx, [xSI + CPUMCTX.cr2]
197 mov cr2, xBX
198
199 mov eax, VMX_VMCS_HOST_RSP
200 vmwrite xAX, xSP
201 ;/* @todo assumes success... */
202 ;/* Don't mess with ESP anymore!! */
203
204 ;/* Restore Guest's general purpose registers. */
205 mov eax, [xSI + CPUMCTX.eax]
206 mov ebx, [xSI + CPUMCTX.ebx]
207 mov ecx, [xSI + CPUMCTX.ecx]
208 mov edx, [xSI + CPUMCTX.edx]
209 mov edi, [xSI + CPUMCTX.edi]
210 mov ebp, [xSI + CPUMCTX.ebp]
211 mov esi, [xSI + CPUMCTX.esi]
212
213 vmlaunch
214 jmp .vmlaunch_done; ;/* here if vmlaunch detected a failure. */
215
216ALIGNCODE(16)
217.vmlaunch_done:
218 jnc .vmxstart_good
219
220 pop xAX ; saved LDTR
221 lldt ax
222
223 add xSP, xS ; pCtx
224
225 ; Restore segment registers
226 MYPOPSEGS xAX, ax
227
228 ;/* Restore all general purpose host registers. */
229 MYPOPAD
230 mov eax, VERR_VMX_INVALID_VMXON_PTR
231 jmp .vmstart_end
232
233.vmxstart_good:
234 jnz .vmxstart_success
235
236 pop xAX ; saved LDTR
237 lldt ax
238
239 add xSP, xS ; pCtx
240
241 ; Restore segment registers
242 MYPOPSEGS xAX, ax
243
244 ; Restore all general purpose host registers.
245 MYPOPAD
246 mov eax, VERR_VMX_UNABLE_TO_START_VM
247 jmp .vmstart_end
248
249.vmxstart_success:
250 push xDI
251 mov xDI, [xSP + xS * 2] ;/* pCtx */
252
253 mov [ss:xDI + CPUMCTX.eax], eax
254 mov [ss:xDI + CPUMCTX.ebx], ebx
255 mov [ss:xDI + CPUMCTX.ecx], ecx
256 mov [ss:xDI + CPUMCTX.edx], edx
257 mov [ss:xDI + CPUMCTX.esi], esi
258 mov [ss:xDI + CPUMCTX.ebp], ebp
259%ifdef __AMD64__
260 pop xAX ; the guest edi we pushed above
261 mov dword [ss:xDI + CPUMCTX.edi], eax
262%else
263 pop dword [ss:xDI + CPUMCTX.edi] ; the guest edi we pushed above
264%endif
265
266 pop xAX ; saved LDTR
267 lldt ax
268
269 add xSP, xS ; pCtx
270
271 ; Restore segment registers
272 MYPOPSEGS xAX, ax
273
274 ; Restore general purpose registers
275 MYPOPAD
276
277 mov eax, VINF_SUCCESS
278
279.vmstart_end:
280 pop xBP
281 ret
282ENDPROC VMXStartVM
283
284
285;/**
286; * Prepares for and executes VMRESUME
287; *
288; * @note identical to VMXStartVM, except for the vmlaunch/vmresume opcode
289; *
290; * @returns VBox status code
291; * @param pCtx Guest context
292; */
293BEGINPROC VMXResumeVM
294 push xBP
295 mov xBP, xSP
296
297 ;/* First we have to save some final CPU context registers. */
298%ifdef __AMD64__
299 mov rax, qword vmresume_done
300 push rax
301%else
302 push vmresume_done
303%endif
304 mov eax, VMX_VMCS_HOST_RIP ;/* return address (too difficult to continue after VMLAUNCH?) */
305 vmwrite xAX, [xSP]
306 ;/* @todo assumes success... */
307 add xSP, xS
308
309 ;/* Manual save and restore:
310 ; * - General purpose registers except RIP, RSP
311 ; *
312 ; * Trashed:
313 ; * - CR2 (we don't care)
314 ; * - LDTR (reset to 0)
315 ; * - DRx (presumably not changed at all)
316 ; * - DR7 (reset to 0x400)
317 ; * - EFLAGS (reset to BIT(1); not relevant)
318 ; *
319 ; */
320
321 ;/* Save all general purpose host registers. */
322 MYPUSHAD
323
324 ;/* Save segment registers */
325 MYPUSHSEGS xAX, ax
326
327 ;/* Save the Guest CPU context pointer. */
328%ifdef __AMD64__
329 %ifdef ASM_CALL64_GCC
330 mov rsi, rdi ; pCtx
331 %else
332 mov rsi, rcx ; pCtx
333 %endif
334%else
335 mov esi, [ebp + 8] ; pCtx
336%endif
337 push xSI
338
339 ; Save LDTR
340 xor eax, eax
341 sldt ax
342 push xAX
343
344 ; Restore CR2
345 mov xBX, [xSI + CPUMCTX.cr2]
346 mov cr2, xBX
347
348 mov eax, VMX_VMCS_HOST_RSP
349 vmwrite xAX, xSP
350 ;/* @todo assumes success... */
351 ;/* Don't mess with ESP anymore!! */
352
353 ;/* Restore Guest's general purpose registers. */
354 mov eax, [xSI + CPUMCTX.eax]
355 mov ebx, [xSI + CPUMCTX.ebx]
356 mov ecx, [xSI + CPUMCTX.ecx]
357 mov edx, [xSI + CPUMCTX.edx]
358 mov edi, [xSI + CPUMCTX.edi]
359 mov ebp, [xSI + CPUMCTX.ebp]
360 mov esi, [xSI + CPUMCTX.esi]
361
362 vmresume
363 jmp vmresume_done; ;/* here if vmresume detected a failure. */
364
365ALIGNCODE(16)
366vmresume_done:
367 jnc vmresume_good
368
369 pop xAX ; saved LDTR
370 lldt ax
371
372 add xSP, xS ; pCtx
373
374 ; Restore segment registers
375 MYPOPSEGS xAX, ax
376
377 ; Restore all general purpose host registers.
378 MYPOPAD
379 mov eax, VERR_VMX_INVALID_VMXON_PTR
380 jmp vmresume_end
381
382vmresume_good:
383 jnz vmresume_success
384
385 pop xAX ; saved LDTR
386 lldt ax
387
388 add xSP, xS ; pCtx
389
390 ; Restore segment registers
391 MYPOPSEGS xAX, ax
392
393 ; Restore all general purpose host registers.
394 MYPOPAD
395 mov eax, VERR_VMX_UNABLE_TO_RESUME_VM
396 jmp vmresume_end
397
398vmresume_success:
399 push xDI
400 mov xDI, [xSP + xS * 2] ; pCtx
401
402 mov [ss:xDI + CPUMCTX.eax], eax
403 mov [ss:xDI + CPUMCTX.ebx], ebx
404 mov [ss:xDI + CPUMCTX.ecx], ecx
405 mov [ss:xDI + CPUMCTX.edx], edx
406 mov [ss:xDI + CPUMCTX.esi], esi
407 mov [ss:xDI + CPUMCTX.ebp], ebp
408%ifdef __AMD64__
409 pop xAX ; the guest edi we pushed above
410 mov dword [ss:xDI + CPUMCTX.edi], eax
411%else
412 pop dword [ss:xDI + CPUMCTX.edi] ; the guest edi we pushed above
413%endif
414
415 pop xAX ; saved LDTR
416 lldt ax
417
418 add xSP, xS ; pCtx
419
420 ; Restore segment registers
421 MYPOPSEGS xAX, ax
422
423 ; Restore general purpose registers
424 MYPOPAD
425
426 mov eax, VINF_SUCCESS
427
428vmresume_end:
429 pop xBP
430 ret
431ENDPROC VMXResumeVM
432
433
434%ifdef __AMD64__
435;/**
436; * Executes VMWRITE
437; *
438; * @returns VBox status code
439; * @param idxField x86: [ebp + 08h] msc: rcx gcc: edi VMCS index
440; * @param pData x86: [ebp + 0ch] msc: rdx gcc: rsi VM field value
441; */
442BEGINPROC VMXWriteVMCS64
443%ifdef ASM_CALL64_GCC
444 mov eax, 0ffffffffh
445 and rdi, rax
446 xor rax, rax
447 vmwrite rdi, rsi
448%else
449 mov eax, 0ffffffffh
450 and rcx, rax
451 xor rax, rax
452 vmwrite rcx, rdx
453%endif
454 jnc .valid_vmcs
455 mov eax, VERR_VMX_INVALID_VMCS_PTR
456 ret
457.valid_vmcs:
458 jnz .the_end
459 mov eax, VERR_VMX_INVALID_VMCS_FIELD
460.the_end:
461 ret
462ENDPROC VMXWriteVMCS64
463
464;/**
465; * Executes VMREAD
466; *
467; * @returns VBox status code
468; * @param idxField VMCS index
469; * @param pData Ptr to store VM field value
470; */
471;DECLASM(int) VMXReadVMCS64(uint32_t idxField, uint64_t *pData);
472BEGINPROC VMXReadVMCS64
473%ifdef ASM_CALL64_GCC
474 mov eax, 0ffffffffh
475 and rdi, rax
476 xor rax, rax
477 vmread [rsi], rdi
478%else
479 mov eax, 0ffffffffh
480 and rcx, rax
481 xor rax, rax
482 vmread [rdx], rcx
483%endif
484 jnc .valid_vmcs
485 mov eax, VERR_VMX_INVALID_VMCS_PTR
486 ret
487.valid_vmcs:
488 jnz .the_end
489 mov eax, VERR_VMX_INVALID_VMCS_FIELD
490.the_end:
491 ret
492ENDPROC VMXReadVMCS64
493
494
495;/**
496; * Executes VMXON
497; *
498; * @returns VBox status code
499; * @param HCPhysVMXOn Physical address of VMXON structure
500; */
501;DECLASM(int) VMXEnable(RTHCPHYS HCPhysVMXOn);
502BEGINPROC VMXEnable
503%ifdef __AMD64__
504 xor rax, rax
505 %ifdef ASM_CALL64_GCC
506 push rdi
507 %else
508 push rcx
509 %endif
510 vmxon [rsp]
511%else
512 xor eax, eax
513 vmxon [esp + 4]
514%endif
515 jnc .good
516 mov eax, VERR_VMX_INVALID_VMXON_PTR
517 jmp .the_end
518
519.good:
520 jnz .the_end
521 mov eax, VERR_VMX_GENERIC
522
523.the_end:
524%ifdef __AMD64__
525 add rsp, 8
526%endif
527 ret
528ENDPROC VMXEnable
529
530
531;/**
532; * Executes VMXOFF
533; */
534;DECLASM(void) VMXDisable(void);
535BEGINPROC VMXDisable
536 vmxoff
537 ret
538ENDPROC VMXDisable
539
540
541;/**
542; * Executes VMCLEAR
543; *
544; * @returns VBox status code
545; * @param HCPhysVMCS Physical address of VM control structure
546; */
547;DECLASM(int) VMXClearVMCS(RTHCPHYS HCPhysVMCS);
548BEGINPROC VMXClearVMCS
549%ifdef __AMD64__
550 xor rax, rax
551 %ifdef ASM_CALL64_GCC
552 push rdi
553 %else
554 push rcx
555 %endif
556 vmclear [rsp]
557%else
558 xor eax, eax
559 vmclear [esp + 4]
560%endif
561 jnc .the_end
562 mov eax, VERR_VMX_INVALID_VMCS_PTR
563.the_end:
564%ifdef __AMD64__
565 add rsp, 8
566%endif
567 ret
568ENDPROC VMXClearVMCS
569
570
571;/**
572; * Executes VMPTRLD
573; *
574; * @returns VBox status code
575; * @param HCPhysVMCS Physical address of VMCS structure
576; */
577;DECLASM(int) VMXActivateVMCS(RTHCPHYS HCPhysVMCS);
578BEGINPROC VMXActivateVMCS
579%ifdef __AMD64__
580 xor rax, rax
581 %ifdef ASM_CALL64_GCC
582 push rdi
583 %else
584 push rcx
585 %endif
586 vmptrld [rsp]
587%else
588 xor eax, eax
589 vmptrld [esp + 4]
590%endif
591 jnc .the_end
592 mov eax, VERR_VMX_INVALID_VMCS_PTR
593.the_end:
594%ifdef __AMD64__
595 add rsp, 8
596%endif
597 ret
598ENDPROC VMXActivateVMCS
599
600%endif ; __AMD64__
601
602
603;/**
604; * Prepares for and executes VMRUN
605; *
606; * @returns VBox status code
607; * @param HCPhysVMCB Physical address of host VMCB
608; * @param HCPhysVMCB Physical address of guest VMCB
609; * @param pCtx Guest context
610; */
611BEGINPROC SVMVMRun
612%ifdef __AMD64__ ; fake a cdecl stack frame - I'm lazy, sosume.
613 %ifdef ASM_CALL64_GCC
614 push rdx
615 push rsi
616 push rdi
617 %else
618 push r8
619 push rdx
620 push rcx
621 %endif
622 push 0
623%endif
624 push xBP
625 mov xBP, xSP
626
627 ;/* Manual save and restore:
628 ; * - General purpose registers except RIP, RSP, RAX
629 ; *
630 ; * Trashed:
631 ; * - CR2 (we don't care)
632 ; * - LDTR (reset to 0)
633 ; * - DRx (presumably not changed at all)
634 ; * - DR7 (reset to 0x400)
635 ; */
636
637 ;/* Save all general purpose host registers. */
638 MYPUSHAD
639
640 ;/* Save the Guest CPU context pointer. */
641 mov xSI, [xBP + xS*2 + RTHCPHYS_CB*2] ; pCtx
642 push xSI ; push for saving the state at the end
643
644 ; Restore CR2
645 mov ebx, [xSI + CPUMCTX.cr2]
646 mov cr2, xBX
647
648 ; save host fs, gs, sysenter msr etc
649 mov xAX, [xBP + xS*2] ; pVMCBHostPhys (64 bits physical address; x86: take low dword only)
650 push xAX ; save for the vmload after vmrun
651 DB 0x0F, 0x01, 0xDB ; VMSAVE
652
653 ; setup eax for VMLOAD
654 mov xAX, [xBP + xS*2 + RTHCPHYS_CB] ; pVMCBPhys (64 bits physical address; take low dword only)
655
656 ;/* Restore Guest's general purpose registers. */
657 ;/* EAX is loaded from the VMCB by VMRUN */
658 mov ebx, [xSI + CPUMCTX.ebx]
659 mov ecx, [xSI + CPUMCTX.ecx]
660 mov edx, [xSI + CPUMCTX.edx]
661 mov edi, [xSI + CPUMCTX.edi]
662 mov ebp, [xSI + CPUMCTX.ebp]
663 mov esi, [xSI + CPUMCTX.esi]
664
665 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch
666 DB 0x0f, 0x01, 0xDD ; CLGI
667 sti
668
669 ; load guest fs, gs, sysenter msr etc
670 DB 0x0f, 0x01, 0xDA ; VMLOAD
671 ; run the VM
672 DB 0x0F, 0x01, 0xD8 ; VMRUN
673
674 ;/* EAX is in the VMCB already; we can use it here. */
675
676 ; save guest fs, gs, sysenter msr etc
677 DB 0x0F, 0x01, 0xDB ; VMSAVE
678
679 ; load host fs, gs, sysenter msr etc
680 pop xAX ; pushed above
681 DB 0x0F, 0x01, 0xDA ; VMLOAD
682
683 ; Set the global interrupt flag again, but execute cli to make sure IF=0.
684 cli
685 DB 0x0f, 0x01, 0xDC ; STGI
686
687 pop xAX ; pCtx
688
689 mov [ss:xAX + CPUMCTX.ebx], ebx
690 mov [ss:xAX + CPUMCTX.ecx], ecx
691 mov [ss:xAX + CPUMCTX.edx], edx
692 mov [ss:xAX + CPUMCTX.esi], esi
693 mov [ss:xAX + CPUMCTX.edi], edi
694 mov [ss:xAX + CPUMCTX.ebp], ebp
695
696 ; Restore general purpose registers
697 MYPOPAD
698
699 mov eax, VINF_SUCCESS
700
701 pop xBP
702%ifdef __AMD64__
703 add xSP, 4*xS
704%endif
705 ret
706ENDPROC SVMVMRun
707
708%ifdef __AMD64__
709%ifdef __WIN__
710
711;;
712; Executes INVLPGA
713;
714; @param pPageGC msc:ecx gcc:edi x86:[esp+04] Virtual page to invalidate
715; @param uASID msc:edx gcc:esi x86:[esp+08] Tagged TLB id
716;
717;DECLASM(void) SVMInvlpgA(RTGCPTR pPageGC, uint32_t uASID);
718BEGINPROC SVMInvlpgA
719%ifdef __AMD64__
720 %ifdef ASM_CALL64_GCC
721 mov eax, edi ;; @todo 64-bit guest.
722 mov ecx, esi
723 %else
724 mov eax, ecx ;; @todo 64-bit guest.
725 mov ecx, edx
726 %endif
727 invlpga rax, ecx
728%else
729 mov eax, [esp + 4]
730 mov ecx, [esp + 8]
731 invlpga eax, ecx
732%endif
733 ret
734ENDPROC SVMInvlpgA
735%endif
736%endif
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette