VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HWACCMR0Mixed.mac@ 15377

Last change on this file since 15377 was 15360, checked in by vboxsync, 16 years ago

Comment

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 21.5 KB
Line 
1; $Id: HWACCMR0Mixed.mac 15360 2008-12-12 12:25:21Z vboxsync $
2;; @file
3; HWACCMR0Mixed.mac - Stuff that darwin needs to build two versions of.
4;
5; Included by HWACCMR0A.asm with RT_ARCH_AMD64 defined or or undefined.
6;
7
8;
9; Copyright (C) 2006-2007 Sun Microsystems, Inc.
10;
11; This file is part of VirtualBox Open Source Edition (OSE), as
12; available from http://www.virtualbox.org. This file is free software;
13; you can redistribute it and/or modify it under the terms of the GNU
14; General Public License (GPL) as published by the Free Software
15; Foundation, in version 2 as it comes in the "COPYING" file of the
16; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
17; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
18;
19; Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
20; Clara, CA 95054 USA or visit http://www.sun.com if you need
21; additional information or have any questions.
22;
23
24
25;/**
26; * Prepares for and executes VMLAUNCH/VMRESUME (32 bits guest mode)
27; *
28; * @returns VBox status code
29; * @param fResume x86:[ebp+8], msc:rcx,gcc:rdi vmlauch/vmresume
30; * @param pCtx x86:[ebp+c], msc:rdx,gcc:rsi Guest context
31; * @param pCache x86:[esp+10],msc:r8, gcc:rdx VMCS cache
32; */
33ALIGNCODE(16)
34BEGINPROC MY_NAME(VMXR0StartVM32)
35 push xBP
36 mov xBP, xSP
37
38 pushf
39 cli
40
41 ;/* Save all general purpose host registers. */
42 MYPUSHAD
43
44 ;/* First we have to save some final CPU context registers. */
45 mov eax, VMX_VMCS_HOST_RIP
46%ifdef RT_ARCH_AMD64
47 lea r10, [.vmlaunch_done wrt rip]
48 vmwrite rax, r10
49%else
50 mov ecx, .vmlaunch_done
51 vmwrite eax, ecx
52%endif
53 ;/* Note: assumes success... */
54
55 ;/* Manual save and restore:
56 ; * - General purpose registers except RIP, RSP
57 ; *
58 ; * Trashed:
59 ; * - CR2 (we don't care)
60 ; * - LDTR (reset to 0)
61 ; * - DRx (presumably not changed at all)
62 ; * - DR7 (reset to 0x400)
63 ; * - EFLAGS (reset to RT_BIT(1); not relevant)
64 ; *
65 ; */
66
67 ;/* Save the Guest CPU context pointer. */
68%ifdef RT_ARCH_AMD64
69 %ifdef ASM_CALL64_GCC
70 ; fResume already in rdi
71 ; pCtx already in rsi
72 mov rbx, rdx ; pCache
73 %else
74 mov rdi, rcx ; fResume
75 mov rsi, rdx ; pCtx
76 mov rbx, r8 ; pCache
77 %endif
78%else
79 mov edi, [ebp + 8] ; fResume
80 mov esi, [ebp + 12] ; pCtx
81 mov ebx, [ebp + 16] ; pCache
82%endif
83
84 ;/* Save segment registers */
85 ; Note: MYPUSHSEGS trashes rdx & rcx, so we moved it here (msvc amd64 case)
86 MYPUSHSEGS xAX, ax
87
88%ifdef VMX_USE_CACHED_VMCS_ACCESSES
89 mov ecx, [xBX + VMCSCACHE.Write.cValidEntries]
90 cmp ecx, 0
91 je .no_cached_writes
92 mov edx, ecx
93 mov ecx, 0
94 jmp .cached_write
95
96ALIGN(16)
97.cached_write:
98 mov eax, [xBX + VMCSCACHE.Write.aField + xCX*4]
99 vmwrite xAX, [xBX + VMCSCACHE.Write.aFieldVal + xCX*8]
100 inc xCX
101 cmp xCX, xDX
102 jl .cached_write
103
104 mov dword [xBX + VMCSCACHE.Write.cValidEntries], 0
105.no_cached_writes:
106
107 ; Save the pCache pointer
108 push xBX
109%endif
110
111 ; Save the pCtx pointer
112 push xSI
113
114 ; Save LDTR
115 xor eax, eax
116 sldt ax
117 push xAX
118
119 ; VMX only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!
120 sub xSP, xS*2
121 sgdt [xSP]
122
123 sub xSP, xS*2
124 sidt [xSP]
125
126%ifdef VBOX_WITH_DR6_EXPERIMENT
127 ; Restore DR6 - experiment, not safe!
128 mov xBX, [xSI + CPUMCTX.dr6]
129 mov dr6, xBX
130%endif
131
132 ; Restore CR2
133 mov ebx, [xSI + CPUMCTX.cr2]
134 mov cr2, xBX
135
136 mov eax, VMX_VMCS_HOST_RSP
137 vmwrite xAX, xSP
138 ;/* Note: assumes success... */
139 ;/* Don't mess with ESP anymore!! */
140
141 ;/* Restore Guest's general purpose registers. */
142 mov eax, [xSI + CPUMCTX.eax]
143 mov ebx, [xSI + CPUMCTX.ebx]
144 mov ecx, [xSI + CPUMCTX.ecx]
145 mov edx, [xSI + CPUMCTX.edx]
146 mov ebp, [xSI + CPUMCTX.ebp]
147
148 ; resume or start?
149 cmp xDI, 0 ; fResume
150 je .vmlauch_lauch
151
152 ;/* Restore edi & esi. */
153 mov edi, [xSI + CPUMCTX.edi]
154 mov esi, [xSI + CPUMCTX.esi]
155
156 vmresume
157 jmp .vmlaunch_done; ;/* here if vmresume detected a failure. */
158
159.vmlauch_lauch:
160 ;/* Restore edi & esi. */
161 mov edi, [xSI + CPUMCTX.edi]
162 mov esi, [xSI + CPUMCTX.esi]
163
164 vmlaunch
165 jmp .vmlaunch_done; ;/* here if vmlaunch detected a failure. */
166
167ALIGNCODE(16) ;; @todo YASM BUG - this alignment is wrong on darwin, it's 1 byte off.
168.vmlaunch_done:
169 jc near .vmxstart_invalid_vmxon_ptr
170 jz near .vmxstart_start_failed
171
172 ; Restore base and limit of the IDTR & GDTR
173 lidt [xSP]
174 add xSP, xS*2
175 lgdt [xSP]
176 add xSP, xS*2
177
178 push xDI
179 mov xDI, [xSP + xS * 2] ; pCtx (*2 to skip the saved LDTR)
180
181 mov [ss:xDI + CPUMCTX.eax], eax
182 mov [ss:xDI + CPUMCTX.ebx], ebx
183 mov [ss:xDI + CPUMCTX.ecx], ecx
184 mov [ss:xDI + CPUMCTX.edx], edx
185 mov [ss:xDI + CPUMCTX.esi], esi
186 mov [ss:xDI + CPUMCTX.ebp], ebp
187%ifdef RT_ARCH_AMD64
188 pop xAX ; the guest edi we pushed above
189 mov dword [ss:xDI + CPUMCTX.edi], eax
190%else
191 pop dword [ss:xDI + CPUMCTX.edi] ; the guest edi we pushed above
192%endif
193
194%ifdef VBOX_WITH_DR6_EXPERIMENT
195 ; Save DR6 - experiment, not safe!
196 mov xAX, dr6
197 mov [ss:xDI + CPUMCTX.dr6], xAX
198%endif
199
200 pop xAX ; saved LDTR
201 lldt ax
202
203 add xSP, xS ; pCtx
204
205%ifdef VMX_USE_CACHED_VMCS_ACCESSES
206 pop xDX ; saved pCache
207
208 mov ecx, [ss:xDX + VMCSCACHE.Read.cValidEntries]
209 cmp ecx, 0 ; can't happen
210 je .no_cached_reads
211 jmp .cached_read
212
213ALIGN(16)
214.cached_read:
215 dec xCX
216 mov eax, [ss:xDX + VMCSCACHE.Read.aField + xCX*4]
217 vmread [ss:xDX + VMCSCACHE.Read.aFieldVal + xCX*8], xAX
218 cmp xCX, 0
219 jnz .cached_read
220.no_cached_reads:
221%endif
222
223 ; Restore segment registers
224 MYPOPSEGS xAX, ax
225
226 ; Restore general purpose registers
227 MYPOPAD
228
229 mov eax, VINF_SUCCESS
230
231.vmstart_end:
232 popf
233 pop xBP
234 ret
235
236
237.vmxstart_invalid_vmxon_ptr:
238 ; Restore base and limit of the IDTR & GDTR
239 lidt [xSP]
240 add xSP, xS*2
241 lgdt [xSP]
242 add xSP, xS*2
243
244 pop xAX ; saved LDTR
245 lldt ax
246
247%ifdef VMX_USE_CACHED_VMCS_ACCESSES
248 add xSP, xS*2 ; pCtx + pCache
249%else
250 add xSP, xS ; pCtx
251%endif
252
253 ; Restore segment registers
254 MYPOPSEGS xAX, ax
255
256 ; Restore all general purpose host registers.
257 MYPOPAD
258 mov eax, VERR_VMX_INVALID_VMXON_PTR
259 jmp .vmstart_end
260
261.vmxstart_start_failed:
262 ; Restore base and limit of the IDTR & GDTR
263 lidt [xSP]
264 add xSP, xS*2
265 lgdt [xSP]
266 add xSP, xS*2
267
268 pop xAX ; saved LDTR
269 lldt ax
270
271%ifdef VMX_USE_CACHED_VMCS_ACCESSES
272 add xSP, xS*2 ; pCtx + pCache
273%else
274 add xSP, xS ; pCtx
275%endif
276
277 ; Restore segment registers
278 MYPOPSEGS xAX, ax
279
280 ; Restore all general purpose host registers.
281 MYPOPAD
282 mov eax, VERR_VMX_UNABLE_TO_START_VM
283 jmp .vmstart_end
284
285ENDPROC MY_NAME(VMXR0StartVM32)
286
287%ifdef RT_ARCH_AMD64
288;/**
289; * Prepares for and executes VMLAUNCH/VMRESUME (64 bits guest mode)
290; *
291; * @returns VBox status code
292; * @param fResume msc:rcx, gcc:rdi vmlauch/vmresume
293; * @param pCtx msc:rdx, gcc:rsi Guest context
294; * @param pCache msc:r8, gcc:rdx VMCS cache
295; */
296ALIGNCODE(16)
297BEGINPROC MY_NAME(VMXR0StartVM64)
298 push xBP
299 mov xBP, xSP
300
301 pushf
302 cli
303
304 ;/* Save all general purpose host registers. */
305 MYPUSHAD
306
307 ;/* First we have to save some final CPU context registers. */
308 lea r10, [.vmlaunch64_done wrt rip]
309 mov rax, VMX_VMCS_HOST_RIP ;/* return address (too difficult to continue after VMLAUNCH?) */
310 vmwrite rax, r10
311 ;/* Note: assumes success... */
312
313 ;/* Manual save and restore:
314 ; * - General purpose registers except RIP, RSP
315 ; *
316 ; * Trashed:
317 ; * - CR2 (we don't care)
318 ; * - LDTR (reset to 0)
319 ; * - DRx (presumably not changed at all)
320 ; * - DR7 (reset to 0x400)
321 ; * - EFLAGS (reset to RT_BIT(1); not relevant)
322 ; *
323 ; */
324
325 ;/* Save the Guest CPU context pointer. */
326%ifdef ASM_CALL64_GCC
327 ; fResume already in rdi
328 ; pCtx already in rsi
329 mov rbx, rdx ; pCache
330%else
331 mov rdi, rcx ; fResume
332 mov rsi, rdx ; pCtx
333 mov rbx, r8 ; pCache
334%endif
335
336 ;/* Save segment registers */
337 ; Note: MYPUSHSEGS trashes rdx & rcx, so we moved it here (msvc amd64 case)
338 MYPUSHSEGS xAX, ax
339
340%ifdef VMX_USE_CACHED_VMCS_ACCESSES
341 mov ecx, [xBX + VMCSCACHE.Write.cValidEntries]
342 cmp ecx, 0
343 je .no_cached_writes
344 mov edx, ecx
345 mov ecx, 0
346 jmp .cached_write
347
348ALIGN(16)
349.cached_write:
350 mov eax, [xBX + VMCSCACHE.Write.aField + xCX*4]
351 vmwrite xAX, [xBX + VMCSCACHE.Write.aFieldVal + xCX*8]
352 inc xCX
353 cmp xCX, xDX
354 jl .cached_write
355
356 mov dword [xBX + VMCSCACHE.Write.cValidEntries], 0
357.no_cached_writes:
358
359 ; Save the pCache pointer
360 push xBX
361%endif
362
363 ; Save the host LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs and restore the guest MSRs
364 ;; @todo use the automatic load feature for MSRs
365 LOADGUESTMSR MSR_K8_LSTAR, CPUMCTX.msrLSTAR
366%if 0 ; not supported on Intel CPUs
367 LOADGUESTMSR MSR_K8_CSTAR, CPUMCTX.msrCSTAR
368%endif
369 LOADGUESTMSR MSR_K6_STAR, CPUMCTX.msrSTAR
370 LOADGUESTMSR MSR_K8_SF_MASK, CPUMCTX.msrSFMASK
371 LOADGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
372
373 ; Save the pCtx pointer
374 push xSI
375
376 ; Save LDTR
377 xor eax, eax
378 sldt ax
379 push xAX
380
381 ; VMX only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!
382 sub xSP, xS*2
383 sgdt [xSP]
384
385 sub xSP, xS*2
386 sidt [xSP]
387
388%ifdef VBOX_WITH_DR6_EXPERIMENT
389 ; Restore DR6 - experiment, not safe!
390 mov xBX, [xSI + CPUMCTX.dr6]
391 mov dr6, xBX
392%endif
393
394 ; Restore CR2
395 mov rbx, qword [xSI + CPUMCTX.cr2]
396 mov cr2, rbx
397
398 mov eax, VMX_VMCS_HOST_RSP
399 vmwrite xAX, xSP
400 ;/* Note: assumes success... */
401 ;/* Don't mess with ESP anymore!! */
402
403 ;/* Restore Guest's general purpose registers. */
404 mov rax, qword [xSI + CPUMCTX.eax]
405 mov rbx, qword [xSI + CPUMCTX.ebx]
406 mov rcx, qword [xSI + CPUMCTX.ecx]
407 mov rdx, qword [xSI + CPUMCTX.edx]
408 mov rbp, qword [xSI + CPUMCTX.ebp]
409 mov r8, qword [xSI + CPUMCTX.r8]
410 mov r9, qword [xSI + CPUMCTX.r9]
411 mov r10, qword [xSI + CPUMCTX.r10]
412 mov r11, qword [xSI + CPUMCTX.r11]
413 mov r12, qword [xSI + CPUMCTX.r12]
414 mov r13, qword [xSI + CPUMCTX.r13]
415 mov r14, qword [xSI + CPUMCTX.r14]
416 mov r15, qword [xSI + CPUMCTX.r15]
417
418 ; resume or start?
419 cmp xDI, 0 ; fResume
420 je .vmlauch64_lauch
421
422 ;/* Restore edi & esi. */
423 mov rdi, qword [xSI + CPUMCTX.edi]
424 mov rsi, qword [xSI + CPUMCTX.esi]
425
426 vmresume
427 jmp .vmlaunch64_done; ;/* here if vmresume detected a failure. */
428
429.vmlauch64_lauch:
430 ;/* Restore rdi & rsi. */
431 mov rdi, qword [xSI + CPUMCTX.edi]
432 mov rsi, qword [xSI + CPUMCTX.esi]
433
434 vmlaunch
435 jmp .vmlaunch64_done; ;/* here if vmlaunch detected a failure. */
436
437ALIGNCODE(16)
438.vmlaunch64_done:
439 jc near .vmxstart64_invalid_vmxon_ptr
440 jz near .vmxstart64_start_failed
441
442 ; Restore base and limit of the IDTR & GDTR
443 lidt [xSP]
444 add xSP, xS*2
445 lgdt [xSP]
446 add xSP, xS*2
447
448 push xDI
449 mov xDI, [xSP + xS * 2] ; pCtx (*2 to skip the saved LDTR)
450
451 mov qword [xDI + CPUMCTX.eax], rax
452 mov qword [xDI + CPUMCTX.ebx], rbx
453 mov qword [xDI + CPUMCTX.ecx], rcx
454 mov qword [xDI + CPUMCTX.edx], rdx
455 mov qword [xDI + CPUMCTX.esi], rsi
456 mov qword [xDI + CPUMCTX.ebp], rbp
457 mov qword [xDI + CPUMCTX.r8], r8
458 mov qword [xDI + CPUMCTX.r9], r9
459 mov qword [xDI + CPUMCTX.r10], r10
460 mov qword [xDI + CPUMCTX.r11], r11
461 mov qword [xDI + CPUMCTX.r12], r12
462 mov qword [xDI + CPUMCTX.r13], r13
463 mov qword [xDI + CPUMCTX.r14], r14
464 mov qword [xDI + CPUMCTX.r15], r15
465
466 pop xAX ; the guest edi we pushed above
467 mov qword [xDI + CPUMCTX.edi], rax
468
469%ifdef VBOX_WITH_DR6_EXPERIMENT
470 ; Save DR6 - experiment, not safe!
471 mov xAX, dr6
472 mov [xDI + CPUMCTX.dr6], xAX
473%endif
474
475 pop xAX ; saved LDTR
476 lldt ax
477
478 pop xSI ; pCtx (needed in rsi by the macros below)
479
480 ; Restore the host LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs
481 ;; @todo use the automatic load feature for MSRs
482 LOADHOSTMSREX MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
483 LOADHOSTMSR MSR_K8_SF_MASK
484 LOADHOSTMSR MSR_K6_STAR
485%if 0 ; not supported on Intel CPUs
486 LOADHOSTMSR MSR_K8_CSTAR
487%endif
488 LOADHOSTMSR MSR_K8_LSTAR
489
490%ifdef VMX_USE_CACHED_VMCS_ACCESSES
491 pop xDX ; saved pCache
492
493 mov ecx, [xDX + VMCSCACHE.Read.cValidEntries]
494 cmp ecx, 0 ; can't happen
495 je .no_cached_reads
496 jmp .cached_read
497
498ALIGN(16)
499.cached_read:
500 dec xCX
501 mov eax, [xDX + VMCSCACHE.Read.aField + xCX*4]
502 vmread [xDX + VMCSCACHE.Read.aFieldVal + xCX*8], xAX
503 cmp xCX, 0
504 jnz .cached_read
505.no_cached_reads:
506%endif
507
508 ; Restore segment registers
509 MYPOPSEGS xAX, ax
510
511 ; Restore general purpose registers
512 MYPOPAD
513
514 mov eax, VINF_SUCCESS
515
516.vmstart64_end:
517 popf
518 pop xBP
519 ret
520
521
522.vmxstart64_invalid_vmxon_ptr:
523 ; Restore base and limit of the IDTR & GDTR
524 lidt [xSP]
525 add xSP, xS*2
526 lgdt [xSP]
527 add xSP, xS*2
528
529 pop xAX ; saved LDTR
530 lldt ax
531
532 pop xSI ; pCtx (needed in rsi by the macros below)
533
534 ; Restore the host LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs
535 ;; @todo use the automatic load feature for MSRs
536 LOADHOSTMSREX MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
537 LOADHOSTMSR MSR_K8_SF_MASK
538 LOADHOSTMSR MSR_K6_STAR
539%if 0 ; not supported on Intel CPUs
540 LOADHOSTMSR MSR_K8_CSTAR
541%endif
542 LOADHOSTMSR MSR_K8_LSTAR
543
544%ifdef VMX_USE_CACHED_VMCS_ACCESSES
545 add xSP, xS ; pCache
546%endif
547
548 ; Restore segment registers
549 MYPOPSEGS xAX, ax
550
551 ; Restore all general purpose host registers.
552 MYPOPAD
553 mov eax, VERR_VMX_INVALID_VMXON_PTR
554 jmp .vmstart64_end
555
556.vmxstart64_start_failed:
557 ; Restore base and limit of the IDTR & GDTR
558 lidt [xSP]
559 add xSP, xS*2
560 lgdt [xSP]
561 add xSP, xS*2
562
563 pop xAX ; saved LDTR
564 lldt ax
565
566 pop xSI ; pCtx (needed in rsi by the macros below)
567
568 ; Restore the host LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs
569 ;; @todo use the automatic load feature for MSRs
570 LOADHOSTMSREX MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
571 LOADHOSTMSR MSR_K8_SF_MASK
572 LOADHOSTMSR MSR_K6_STAR
573%if 0 ; not supported on Intel CPUs
574 LOADHOSTMSR MSR_K8_CSTAR
575%endif
576 LOADHOSTMSR MSR_K8_LSTAR
577
578%ifdef VMX_USE_CACHED_VMCS_ACCESSES
579 add xSP, xS ; pCache
580%endif
581
582 ; Restore segment registers
583 MYPOPSEGS xAX, ax
584
585 ; Restore all general purpose host registers.
586 MYPOPAD
587 mov eax, VERR_VMX_UNABLE_TO_START_VM
588 jmp .vmstart64_end
589ENDPROC MY_NAME(VMXR0StartVM64)
590%endif ; RT_ARCH_AMD64
591
592
593;/**
594; * Prepares for and executes VMRUN (32 bits guests)
595; *
596; * @returns VBox status code
597; * @param HCPhysVMCB Physical address of host VMCB
598; * @param HCPhysVMCB Physical address of guest VMCB
599; * @param pCtx Guest context
600; */
601ALIGNCODE(16)
602BEGINPROC MY_NAME(SVMR0VMRun)
603%ifdef RT_ARCH_AMD64 ; fake a cdecl stack frame
604 %ifdef ASM_CALL64_GCC
605 push rdx
606 push rsi
607 push rdi
608 %else
609 push r8
610 push rdx
611 push rcx
612 %endif
613 push 0
614%endif
615 push xBP
616 mov xBP, xSP
617 pushf
618
619 ;/* Manual save and restore:
620 ; * - General purpose registers except RIP, RSP, RAX
621 ; *
622 ; * Trashed:
623 ; * - CR2 (we don't care)
624 ; * - LDTR (reset to 0)
625 ; * - DRx (presumably not changed at all)
626 ; * - DR7 (reset to 0x400)
627 ; */
628
629 ;/* Save all general purpose host registers. */
630 MYPUSHAD
631
632 ;/* Save the Guest CPU context pointer. */
633 mov xSI, [xBP + xS*2 + RTHCPHYS_CB*2] ; pCtx
634 push xSI ; push for saving the state at the end
635
636 ; save host fs, gs, sysenter msr etc
637 mov xAX, [xBP + xS*2] ; pVMCBHostPhys (64 bits physical address; x86: take low dword only)
638 push xAX ; save for the vmload after vmrun
639 vmsave
640
641 ; setup eax for VMLOAD
642 mov xAX, [xBP + xS*2 + RTHCPHYS_CB] ; pVMCBPhys (64 bits physical address; take low dword only)
643
644 ;/* Restore Guest's general purpose registers. */
645 ;/* EAX is loaded from the VMCB by VMRUN */
646 mov ebx, [xSI + CPUMCTX.ebx]
647 mov ecx, [xSI + CPUMCTX.ecx]
648 mov edx, [xSI + CPUMCTX.edx]
649 mov edi, [xSI + CPUMCTX.edi]
650 mov ebp, [xSI + CPUMCTX.ebp]
651 mov esi, [xSI + CPUMCTX.esi]
652
653 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch
654 clgi
655 sti
656
657 ; load guest fs, gs, sysenter msr etc
658 vmload
659 ; run the VM
660 vmrun
661
662 ;/* EAX is in the VMCB already; we can use it here. */
663
664 ; save guest fs, gs, sysenter msr etc
665 vmsave
666
667 ; load host fs, gs, sysenter msr etc
668 pop xAX ; pushed above
669 vmload
670
671 ; Set the global interrupt flag again, but execute cli to make sure IF=0.
672 cli
673 stgi
674
675 pop xAX ; pCtx
676
677 mov [ss:xAX + CPUMCTX.ebx], ebx
678 mov [ss:xAX + CPUMCTX.ecx], ecx
679 mov [ss:xAX + CPUMCTX.edx], edx
680 mov [ss:xAX + CPUMCTX.esi], esi
681 mov [ss:xAX + CPUMCTX.edi], edi
682 mov [ss:xAX + CPUMCTX.ebp], ebp
683
684 ; Restore general purpose registers
685 MYPOPAD
686
687 mov eax, VINF_SUCCESS
688
689 popf
690 pop xBP
691%ifdef RT_ARCH_AMD64
692 add xSP, 4*xS
693%endif
694 ret
695ENDPROC MY_NAME(SVMR0VMRun)
696
697%ifdef RT_ARCH_AMD64
698;/**
699; * Prepares for and executes VMRUN (64 bits guests)
700; *
701; * @returns VBox status code
702; * @param HCPhysVMCB Physical address of host VMCB
703; * @param HCPhysVMCB Physical address of guest VMCB
704; * @param pCtx Guest context
705; */
706ALIGNCODE(16)
707BEGINPROC MY_NAME(SVMR0VMRun64)
708 ; fake a cdecl stack frame
709 %ifdef ASM_CALL64_GCC
710 push rdx
711 push rsi
712 push rdi
713 %else
714 push r8
715 push rdx
716 push rcx
717 %endif
718 push 0
719 push rbp
720 mov rbp, rsp
721 pushf
722
723 ;/* Manual save and restore:
724 ; * - General purpose registers except RIP, RSP, RAX
725 ; *
726 ; * Trashed:
727 ; * - CR2 (we don't care)
728 ; * - LDTR (reset to 0)
729 ; * - DRx (presumably not changed at all)
730 ; * - DR7 (reset to 0x400)
731 ; */
732
733 ;/* Save all general purpose host registers. */
734 MYPUSHAD
735
736 ;/* Save the Guest CPU context pointer. */
737 mov rsi, [rbp + xS*2 + RTHCPHYS_CB*2] ; pCtx
738 push rsi ; push for saving the state at the end
739
740 ; save host fs, gs, sysenter msr etc
741 mov rax, [rbp + xS*2] ; pVMCBHostPhys (64 bits physical address; x86: take low dword only)
742 push rax ; save for the vmload after vmrun
743 vmsave
744
745 ; setup eax for VMLOAD
746 mov rax, [rbp + xS*2 + RTHCPHYS_CB] ; pVMCBPhys (64 bits physical address; take low dword only)
747
748 ;/* Restore Guest's general purpose registers. */
749 ;/* RAX is loaded from the VMCB by VMRUN */
750 mov rbx, qword [xSI + CPUMCTX.ebx]
751 mov rcx, qword [xSI + CPUMCTX.ecx]
752 mov rdx, qword [xSI + CPUMCTX.edx]
753 mov rdi, qword [xSI + CPUMCTX.edi]
754 mov rbp, qword [xSI + CPUMCTX.ebp]
755 mov r8, qword [xSI + CPUMCTX.r8]
756 mov r9, qword [xSI + CPUMCTX.r9]
757 mov r10, qword [xSI + CPUMCTX.r10]
758 mov r11, qword [xSI + CPUMCTX.r11]
759 mov r12, qword [xSI + CPUMCTX.r12]
760 mov r13, qword [xSI + CPUMCTX.r13]
761 mov r14, qword [xSI + CPUMCTX.r14]
762 mov r15, qword [xSI + CPUMCTX.r15]
763 mov rsi, qword [xSI + CPUMCTX.esi]
764
765 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch
766 clgi
767 sti
768
769 ; load guest fs, gs, sysenter msr etc
770 vmload
771 ; run the VM
772 vmrun
773
774 ;/* RAX is in the VMCB already; we can use it here. */
775
776 ; save guest fs, gs, sysenter msr etc
777 vmsave
778
779 ; load host fs, gs, sysenter msr etc
780 pop rax ; pushed above
781 vmload
782
783 ; Set the global interrupt flag again, but execute cli to make sure IF=0.
784 cli
785 stgi
786
787 pop rax ; pCtx
788
789 mov qword [rax + CPUMCTX.ebx], rbx
790 mov qword [rax + CPUMCTX.ecx], rcx
791 mov qword [rax + CPUMCTX.edx], rdx
792 mov qword [rax + CPUMCTX.esi], rsi
793 mov qword [rax + CPUMCTX.edi], rdi
794 mov qword [rax + CPUMCTX.ebp], rbp
795 mov qword [rax + CPUMCTX.r8], r8
796 mov qword [rax + CPUMCTX.r9], r9
797 mov qword [rax + CPUMCTX.r10], r10
798 mov qword [rax + CPUMCTX.r11], r11
799 mov qword [rax + CPUMCTX.r12], r12
800 mov qword [rax + CPUMCTX.r13], r13
801 mov qword [rax + CPUMCTX.r14], r14
802 mov qword [rax + CPUMCTX.r15], r15
803
804 ; Restore general purpose registers
805 MYPOPAD
806
807 mov eax, VINF_SUCCESS
808
809 popf
810 pop rbp
811 add rsp, 4*xS
812 ret
813ENDPROC MY_NAME(SVMR0VMRun64)
814%endif ; RT_ARCH_AMD64
815
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette