VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HWACCMR0Mixed.mac@ 15749

Last change on this file since 15749 was 15440, checked in by vboxsync, 16 years ago

Sync CR2 properly for VT-x EPT (unable to access the 64 bits cr2 from 32 bits mode).

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 21.7 KB
Line 
1; $Id: HWACCMR0Mixed.mac 15440 2008-12-13 13:09:30Z vboxsync $
2;; @file
3; HWACCMR0Mixed.mac - Stuff that darwin needs to build two versions of.
4;
5; Included by HWACCMR0A.asm with RT_ARCH_AMD64 defined or or undefined.
6;
7
8;
9; Copyright (C) 2006-2007 Sun Microsystems, Inc.
10;
11; This file is part of VirtualBox Open Source Edition (OSE), as
12; available from http://www.virtualbox.org. This file is free software;
13; you can redistribute it and/or modify it under the terms of the GNU
14; General Public License (GPL) as published by the Free Software
15; Foundation, in version 2 as it comes in the "COPYING" file of the
16; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
17; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
18;
19; Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
20; Clara, CA 95054 USA or visit http://www.sun.com if you need
21; additional information or have any questions.
22;
23
24
25;/**
26; * Prepares for and executes VMLAUNCH/VMRESUME (32 bits guest mode)
27; *
28; * @returns VBox status code
29; * @param fResume x86:[ebp+8], msc:rcx,gcc:rdi vmlauch/vmresume
30; * @param pCtx x86:[ebp+c], msc:rdx,gcc:rsi Guest context
31; * @param pCache x86:[esp+10],msc:r8, gcc:rdx VMCS cache
32; */
33ALIGNCODE(16)
34BEGINPROC MY_NAME(VMXR0StartVM32)
35 push xBP
36 mov xBP, xSP
37
38 pushf
39 cli
40
41 ;/* Save all general purpose host registers. */
42 MYPUSHAD
43
44 ;/* First we have to save some final CPU context registers. */
45 mov eax, VMX_VMCS_HOST_RIP
46%ifdef RT_ARCH_AMD64
47 lea r10, [.vmlaunch_done wrt rip]
48 vmwrite rax, r10
49%else
50 mov ecx, .vmlaunch_done
51 vmwrite eax, ecx
52%endif
53 ;/* Note: assumes success... */
54
55 ;/* Manual save and restore:
56 ; * - General purpose registers except RIP, RSP
57 ; *
58 ; * Trashed:
59 ; * - CR2 (we don't care)
60 ; * - LDTR (reset to 0)
61 ; * - DRx (presumably not changed at all)
62 ; * - DR7 (reset to 0x400)
63 ; * - EFLAGS (reset to RT_BIT(1); not relevant)
64 ; *
65 ; */
66
67 ;/* Save the Guest CPU context pointer. */
68%ifdef RT_ARCH_AMD64
69 %ifdef ASM_CALL64_GCC
70 ; fResume already in rdi
71 ; pCtx already in rsi
72 mov rbx, rdx ; pCache
73 %else
74 mov rdi, rcx ; fResume
75 mov rsi, rdx ; pCtx
76 mov rbx, r8 ; pCache
77 %endif
78%else
79 mov edi, [ebp + 8] ; fResume
80 mov esi, [ebp + 12] ; pCtx
81 mov ebx, [ebp + 16] ; pCache
82%endif
83
84 ;/* Save segment registers */
85 ; Note: MYPUSHSEGS trashes rdx & rcx, so we moved it here (msvc amd64 case)
86 MYPUSHSEGS xAX, ax
87
88%ifdef VMX_USE_CACHED_VMCS_ACCESSES
89 mov ecx, [xBX + VMCSCACHE.Write.cValidEntries]
90 cmp ecx, 0
91 je .no_cached_writes
92 mov edx, ecx
93 mov ecx, 0
94 jmp .cached_write
95
96ALIGN(16)
97.cached_write:
98 mov eax, [xBX + VMCSCACHE.Write.aField + xCX*4]
99 vmwrite xAX, [xBX + VMCSCACHE.Write.aFieldVal + xCX*8]
100 inc xCX
101 cmp xCX, xDX
102 jl .cached_write
103
104 mov dword [xBX + VMCSCACHE.Write.cValidEntries], 0
105.no_cached_writes:
106
107 ; Save the pCache pointer
108 push xBX
109%endif
110
111 ; Save the pCtx pointer
112 push xSI
113
114 ; Save LDTR
115 xor eax, eax
116 sldt ax
117 push xAX
118
119 ; VMX only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!
120 sub xSP, xS*2
121 sgdt [xSP]
122
123 sub xSP, xS*2
124 sidt [xSP]
125
126%ifdef VBOX_WITH_DR6_EXPERIMENT
127 ; Restore DR6 - experiment, not safe!
128 mov xBX, [xSI + CPUMCTX.dr6]
129 mov dr6, xBX
130%endif
131
132 ; Restore CR2
133 mov ebx, [xSI + CPUMCTX.cr2]
134 mov cr2, xBX
135
136 mov eax, VMX_VMCS_HOST_RSP
137 vmwrite xAX, xSP
138 ;/* Note: assumes success... */
139 ;/* Don't mess with ESP anymore!! */
140
141 ;/* Restore Guest's general purpose registers. */
142 mov eax, [xSI + CPUMCTX.eax]
143 mov ebx, [xSI + CPUMCTX.ebx]
144 mov ecx, [xSI + CPUMCTX.ecx]
145 mov edx, [xSI + CPUMCTX.edx]
146 mov ebp, [xSI + CPUMCTX.ebp]
147
148 ; resume or start?
149 cmp xDI, 0 ; fResume
150 je .vmlauch_lauch
151
152 ;/* Restore edi & esi. */
153 mov edi, [xSI + CPUMCTX.edi]
154 mov esi, [xSI + CPUMCTX.esi]
155
156 vmresume
157 jmp .vmlaunch_done; ;/* here if vmresume detected a failure. */
158
159.vmlauch_lauch:
160 ;/* Restore edi & esi. */
161 mov edi, [xSI + CPUMCTX.edi]
162 mov esi, [xSI + CPUMCTX.esi]
163
164 vmlaunch
165 jmp .vmlaunch_done; ;/* here if vmlaunch detected a failure. */
166
167ALIGNCODE(16) ;; @todo YASM BUG - this alignment is wrong on darwin, it's 1 byte off.
168.vmlaunch_done:
169 jc near .vmxstart_invalid_vmxon_ptr
170 jz near .vmxstart_start_failed
171
172 ; Restore base and limit of the IDTR & GDTR
173 lidt [xSP]
174 add xSP, xS*2
175 lgdt [xSP]
176 add xSP, xS*2
177
178 push xDI
179 mov xDI, [xSP + xS * 2] ; pCtx (*2 to skip the saved LDTR)
180
181 mov [ss:xDI + CPUMCTX.eax], eax
182 mov [ss:xDI + CPUMCTX.ebx], ebx
183 mov [ss:xDI + CPUMCTX.ecx], ecx
184 mov [ss:xDI + CPUMCTX.edx], edx
185 mov [ss:xDI + CPUMCTX.esi], esi
186 mov [ss:xDI + CPUMCTX.ebp], ebp
187%ifdef RT_ARCH_AMD64
188 pop xAX ; the guest edi we pushed above
189 mov dword [ss:xDI + CPUMCTX.edi], eax
190%else
191 pop dword [ss:xDI + CPUMCTX.edi] ; the guest edi we pushed above
192%endif
193
194%ifdef VBOX_WITH_DR6_EXPERIMENT
195 ; Save DR6 - experiment, not safe!
196 mov xAX, dr6
197 mov [ss:xDI + CPUMCTX.dr6], xAX
198%endif
199
200 pop xAX ; saved LDTR
201 lldt ax
202
203 add xSP, xS ; pCtx
204
205%ifdef VMX_USE_CACHED_VMCS_ACCESSES
206 pop xDX ; saved pCache
207
208 mov ecx, [ss:xDX + VMCSCACHE.Read.cValidEntries]
209 cmp ecx, 0 ; can't happen
210 je .no_cached_reads
211 jmp .cached_read
212
213ALIGN(16)
214.cached_read:
215 dec xCX
216 mov eax, [ss:xDX + VMCSCACHE.Read.aField + xCX*4]
217 vmread [ss:xDX + VMCSCACHE.Read.aFieldVal + xCX*8], xAX
218 cmp xCX, 0
219 jnz .cached_read
220.no_cached_reads:
221
222 ; Save CR2 for EPT
223 mov xAX, cr2
224 mov [ss:xDX + VMCSCACHE.cr2], xAX
225%endif
226
227 ; Restore segment registers
228 MYPOPSEGS xAX, ax
229
230 ; Restore general purpose registers
231 MYPOPAD
232
233 mov eax, VINF_SUCCESS
234
235.vmstart_end:
236 popf
237 pop xBP
238 ret
239
240
241.vmxstart_invalid_vmxon_ptr:
242 ; Restore base and limit of the IDTR & GDTR
243 lidt [xSP]
244 add xSP, xS*2
245 lgdt [xSP]
246 add xSP, xS*2
247
248 pop xAX ; saved LDTR
249 lldt ax
250
251%ifdef VMX_USE_CACHED_VMCS_ACCESSES
252 add xSP, xS*2 ; pCtx + pCache
253%else
254 add xSP, xS ; pCtx
255%endif
256
257 ; Restore segment registers
258 MYPOPSEGS xAX, ax
259
260 ; Restore all general purpose host registers.
261 MYPOPAD
262 mov eax, VERR_VMX_INVALID_VMXON_PTR
263 jmp .vmstart_end
264
265.vmxstart_start_failed:
266 ; Restore base and limit of the IDTR & GDTR
267 lidt [xSP]
268 add xSP, xS*2
269 lgdt [xSP]
270 add xSP, xS*2
271
272 pop xAX ; saved LDTR
273 lldt ax
274
275%ifdef VMX_USE_CACHED_VMCS_ACCESSES
276 add xSP, xS*2 ; pCtx + pCache
277%else
278 add xSP, xS ; pCtx
279%endif
280
281 ; Restore segment registers
282 MYPOPSEGS xAX, ax
283
284 ; Restore all general purpose host registers.
285 MYPOPAD
286 mov eax, VERR_VMX_UNABLE_TO_START_VM
287 jmp .vmstart_end
288
289ENDPROC MY_NAME(VMXR0StartVM32)
290
291%ifdef RT_ARCH_AMD64
292;/**
293; * Prepares for and executes VMLAUNCH/VMRESUME (64 bits guest mode)
294; *
295; * @returns VBox status code
296; * @param fResume msc:rcx, gcc:rdi vmlauch/vmresume
297; * @param pCtx msc:rdx, gcc:rsi Guest context
298; * @param pCache msc:r8, gcc:rdx VMCS cache
299; */
300ALIGNCODE(16)
301BEGINPROC MY_NAME(VMXR0StartVM64)
302 push xBP
303 mov xBP, xSP
304
305 pushf
306 cli
307
308 ;/* Save all general purpose host registers. */
309 MYPUSHAD
310
311 ;/* First we have to save some final CPU context registers. */
312 lea r10, [.vmlaunch64_done wrt rip]
313 mov rax, VMX_VMCS_HOST_RIP ;/* return address (too difficult to continue after VMLAUNCH?) */
314 vmwrite rax, r10
315 ;/* Note: assumes success... */
316
317 ;/* Manual save and restore:
318 ; * - General purpose registers except RIP, RSP
319 ; *
320 ; * Trashed:
321 ; * - CR2 (we don't care)
322 ; * - LDTR (reset to 0)
323 ; * - DRx (presumably not changed at all)
324 ; * - DR7 (reset to 0x400)
325 ; * - EFLAGS (reset to RT_BIT(1); not relevant)
326 ; *
327 ; */
328
329 ;/* Save the Guest CPU context pointer. */
330%ifdef ASM_CALL64_GCC
331 ; fResume already in rdi
332 ; pCtx already in rsi
333 mov rbx, rdx ; pCache
334%else
335 mov rdi, rcx ; fResume
336 mov rsi, rdx ; pCtx
337 mov rbx, r8 ; pCache
338%endif
339
340 ;/* Save segment registers */
341 ; Note: MYPUSHSEGS trashes rdx & rcx, so we moved it here (msvc amd64 case)
342 MYPUSHSEGS xAX, ax
343
344%ifdef VMX_USE_CACHED_VMCS_ACCESSES
345 mov ecx, [xBX + VMCSCACHE.Write.cValidEntries]
346 cmp ecx, 0
347 je .no_cached_writes
348 mov edx, ecx
349 mov ecx, 0
350 jmp .cached_write
351
352ALIGN(16)
353.cached_write:
354 mov eax, [xBX + VMCSCACHE.Write.aField + xCX*4]
355 vmwrite xAX, [xBX + VMCSCACHE.Write.aFieldVal + xCX*8]
356 inc xCX
357 cmp xCX, xDX
358 jl .cached_write
359
360 mov dword [xBX + VMCSCACHE.Write.cValidEntries], 0
361.no_cached_writes:
362
363 ; Save the pCache pointer
364 push xBX
365%endif
366
367 ; Save the host LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs and restore the guest MSRs
368 ;; @todo use the automatic load feature for MSRs
369 LOADGUESTMSR MSR_K8_LSTAR, CPUMCTX.msrLSTAR
370%if 0 ; not supported on Intel CPUs
371 LOADGUESTMSR MSR_K8_CSTAR, CPUMCTX.msrCSTAR
372%endif
373 LOADGUESTMSR MSR_K6_STAR, CPUMCTX.msrSTAR
374 LOADGUESTMSR MSR_K8_SF_MASK, CPUMCTX.msrSFMASK
375 LOADGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
376
377 ; Save the pCtx pointer
378 push xSI
379
380 ; Save LDTR
381 xor eax, eax
382 sldt ax
383 push xAX
384
385 ; VMX only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!
386 sub xSP, xS*2
387 sgdt [xSP]
388
389 sub xSP, xS*2
390 sidt [xSP]
391
392%ifdef VBOX_WITH_DR6_EXPERIMENT
393 ; Restore DR6 - experiment, not safe!
394 mov xBX, [xSI + CPUMCTX.dr6]
395 mov dr6, xBX
396%endif
397
398 ; Restore CR2
399 mov rbx, qword [xSI + CPUMCTX.cr2]
400 mov cr2, rbx
401
402 mov eax, VMX_VMCS_HOST_RSP
403 vmwrite xAX, xSP
404 ;/* Note: assumes success... */
405 ;/* Don't mess with ESP anymore!! */
406
407 ;/* Restore Guest's general purpose registers. */
408 mov rax, qword [xSI + CPUMCTX.eax]
409 mov rbx, qword [xSI + CPUMCTX.ebx]
410 mov rcx, qword [xSI + CPUMCTX.ecx]
411 mov rdx, qword [xSI + CPUMCTX.edx]
412 mov rbp, qword [xSI + CPUMCTX.ebp]
413 mov r8, qword [xSI + CPUMCTX.r8]
414 mov r9, qword [xSI + CPUMCTX.r9]
415 mov r10, qword [xSI + CPUMCTX.r10]
416 mov r11, qword [xSI + CPUMCTX.r11]
417 mov r12, qword [xSI + CPUMCTX.r12]
418 mov r13, qword [xSI + CPUMCTX.r13]
419 mov r14, qword [xSI + CPUMCTX.r14]
420 mov r15, qword [xSI + CPUMCTX.r15]
421
422 ; resume or start?
423 cmp xDI, 0 ; fResume
424 je .vmlauch64_lauch
425
426 ;/* Restore edi & esi. */
427 mov rdi, qword [xSI + CPUMCTX.edi]
428 mov rsi, qword [xSI + CPUMCTX.esi]
429
430 vmresume
431 jmp .vmlaunch64_done; ;/* here if vmresume detected a failure. */
432
433.vmlauch64_lauch:
434 ;/* Restore rdi & rsi. */
435 mov rdi, qword [xSI + CPUMCTX.edi]
436 mov rsi, qword [xSI + CPUMCTX.esi]
437
438 vmlaunch
439 jmp .vmlaunch64_done; ;/* here if vmlaunch detected a failure. */
440
441ALIGNCODE(16)
442.vmlaunch64_done:
443 jc near .vmxstart64_invalid_vmxon_ptr
444 jz near .vmxstart64_start_failed
445
446 ; Restore base and limit of the IDTR & GDTR
447 lidt [xSP]
448 add xSP, xS*2
449 lgdt [xSP]
450 add xSP, xS*2
451
452 push xDI
453 mov xDI, [xSP + xS * 2] ; pCtx (*2 to skip the saved LDTR)
454
455 mov qword [xDI + CPUMCTX.eax], rax
456 mov qword [xDI + CPUMCTX.ebx], rbx
457 mov qword [xDI + CPUMCTX.ecx], rcx
458 mov qword [xDI + CPUMCTX.edx], rdx
459 mov qword [xDI + CPUMCTX.esi], rsi
460 mov qword [xDI + CPUMCTX.ebp], rbp
461 mov qword [xDI + CPUMCTX.r8], r8
462 mov qword [xDI + CPUMCTX.r9], r9
463 mov qword [xDI + CPUMCTX.r10], r10
464 mov qword [xDI + CPUMCTX.r11], r11
465 mov qword [xDI + CPUMCTX.r12], r12
466 mov qword [xDI + CPUMCTX.r13], r13
467 mov qword [xDI + CPUMCTX.r14], r14
468 mov qword [xDI + CPUMCTX.r15], r15
469
470 pop xAX ; the guest edi we pushed above
471 mov qword [xDI + CPUMCTX.edi], rax
472
473%ifdef VBOX_WITH_DR6_EXPERIMENT
474 ; Save DR6 - experiment, not safe!
475 mov xAX, dr6
476 mov [xDI + CPUMCTX.dr6], xAX
477%endif
478
479 pop xAX ; saved LDTR
480 lldt ax
481
482 pop xSI ; pCtx (needed in rsi by the macros below)
483
484 ; Restore the host LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs
485 ;; @todo use the automatic load feature for MSRs
486 LOADHOSTMSREX MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
487 LOADHOSTMSR MSR_K8_SF_MASK
488 LOADHOSTMSR MSR_K6_STAR
489%if 0 ; not supported on Intel CPUs
490 LOADHOSTMSR MSR_K8_CSTAR
491%endif
492 LOADHOSTMSR MSR_K8_LSTAR
493
494%ifdef VMX_USE_CACHED_VMCS_ACCESSES
495 pop xDX ; saved pCache
496
497 mov ecx, [xDX + VMCSCACHE.Read.cValidEntries]
498 cmp ecx, 0 ; can't happen
499 je .no_cached_reads
500 jmp .cached_read
501
502ALIGN(16)
503.cached_read:
504 dec xCX
505 mov eax, [xDX + VMCSCACHE.Read.aField + xCX*4]
506 vmread [xDX + VMCSCACHE.Read.aFieldVal + xCX*8], xAX
507 cmp xCX, 0
508 jnz .cached_read
509.no_cached_reads:
510
511 ; Save CR2 for EPT
512 mov xAX, cr2
513 mov [xDX + VMCSCACHE.cr2], xAX
514%endif
515
516 ; Restore segment registers
517 MYPOPSEGS xAX, ax
518
519 ; Restore general purpose registers
520 MYPOPAD
521
522 mov eax, VINF_SUCCESS
523
524.vmstart64_end:
525 popf
526 pop xBP
527 ret
528
529
530.vmxstart64_invalid_vmxon_ptr:
531 ; Restore base and limit of the IDTR & GDTR
532 lidt [xSP]
533 add xSP, xS*2
534 lgdt [xSP]
535 add xSP, xS*2
536
537 pop xAX ; saved LDTR
538 lldt ax
539
540 pop xSI ; pCtx (needed in rsi by the macros below)
541
542 ; Restore the host LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs
543 ;; @todo use the automatic load feature for MSRs
544 LOADHOSTMSREX MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
545 LOADHOSTMSR MSR_K8_SF_MASK
546 LOADHOSTMSR MSR_K6_STAR
547%if 0 ; not supported on Intel CPUs
548 LOADHOSTMSR MSR_K8_CSTAR
549%endif
550 LOADHOSTMSR MSR_K8_LSTAR
551
552%ifdef VMX_USE_CACHED_VMCS_ACCESSES
553 add xSP, xS ; pCache
554%endif
555
556 ; Restore segment registers
557 MYPOPSEGS xAX, ax
558
559 ; Restore all general purpose host registers.
560 MYPOPAD
561 mov eax, VERR_VMX_INVALID_VMXON_PTR
562 jmp .vmstart64_end
563
564.vmxstart64_start_failed:
565 ; Restore base and limit of the IDTR & GDTR
566 lidt [xSP]
567 add xSP, xS*2
568 lgdt [xSP]
569 add xSP, xS*2
570
571 pop xAX ; saved LDTR
572 lldt ax
573
574 pop xSI ; pCtx (needed in rsi by the macros below)
575
576 ; Restore the host LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs
577 ;; @todo use the automatic load feature for MSRs
578 LOADHOSTMSREX MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
579 LOADHOSTMSR MSR_K8_SF_MASK
580 LOADHOSTMSR MSR_K6_STAR
581%if 0 ; not supported on Intel CPUs
582 LOADHOSTMSR MSR_K8_CSTAR
583%endif
584 LOADHOSTMSR MSR_K8_LSTAR
585
586%ifdef VMX_USE_CACHED_VMCS_ACCESSES
587 add xSP, xS ; pCache
588%endif
589
590 ; Restore segment registers
591 MYPOPSEGS xAX, ax
592
593 ; Restore all general purpose host registers.
594 MYPOPAD
595 mov eax, VERR_VMX_UNABLE_TO_START_VM
596 jmp .vmstart64_end
597ENDPROC MY_NAME(VMXR0StartVM64)
598%endif ; RT_ARCH_AMD64
599
600
601;/**
602; * Prepares for and executes VMRUN (32 bits guests)
603; *
604; * @returns VBox status code
605; * @param HCPhysVMCB Physical address of host VMCB
606; * @param HCPhysVMCB Physical address of guest VMCB
607; * @param pCtx Guest context
608; */
609ALIGNCODE(16)
610BEGINPROC MY_NAME(SVMR0VMRun)
611%ifdef RT_ARCH_AMD64 ; fake a cdecl stack frame
612 %ifdef ASM_CALL64_GCC
613 push rdx
614 push rsi
615 push rdi
616 %else
617 push r8
618 push rdx
619 push rcx
620 %endif
621 push 0
622%endif
623 push xBP
624 mov xBP, xSP
625 pushf
626
627 ;/* Manual save and restore:
628 ; * - General purpose registers except RIP, RSP, RAX
629 ; *
630 ; * Trashed:
631 ; * - CR2 (we don't care)
632 ; * - LDTR (reset to 0)
633 ; * - DRx (presumably not changed at all)
634 ; * - DR7 (reset to 0x400)
635 ; */
636
637 ;/* Save all general purpose host registers. */
638 MYPUSHAD
639
640 ;/* Save the Guest CPU context pointer. */
641 mov xSI, [xBP + xS*2 + RTHCPHYS_CB*2] ; pCtx
642 push xSI ; push for saving the state at the end
643
644 ; save host fs, gs, sysenter msr etc
645 mov xAX, [xBP + xS*2] ; pVMCBHostPhys (64 bits physical address; x86: take low dword only)
646 push xAX ; save for the vmload after vmrun
647 vmsave
648
649 ; setup eax for VMLOAD
650 mov xAX, [xBP + xS*2 + RTHCPHYS_CB] ; pVMCBPhys (64 bits physical address; take low dword only)
651
652 ;/* Restore Guest's general purpose registers. */
653 ;/* EAX is loaded from the VMCB by VMRUN */
654 mov ebx, [xSI + CPUMCTX.ebx]
655 mov ecx, [xSI + CPUMCTX.ecx]
656 mov edx, [xSI + CPUMCTX.edx]
657 mov edi, [xSI + CPUMCTX.edi]
658 mov ebp, [xSI + CPUMCTX.ebp]
659 mov esi, [xSI + CPUMCTX.esi]
660
661 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch
662 clgi
663 sti
664
665 ; load guest fs, gs, sysenter msr etc
666 vmload
667 ; run the VM
668 vmrun
669
670 ;/* EAX is in the VMCB already; we can use it here. */
671
672 ; save guest fs, gs, sysenter msr etc
673 vmsave
674
675 ; load host fs, gs, sysenter msr etc
676 pop xAX ; pushed above
677 vmload
678
679 ; Set the global interrupt flag again, but execute cli to make sure IF=0.
680 cli
681 stgi
682
683 pop xAX ; pCtx
684
685 mov [ss:xAX + CPUMCTX.ebx], ebx
686 mov [ss:xAX + CPUMCTX.ecx], ecx
687 mov [ss:xAX + CPUMCTX.edx], edx
688 mov [ss:xAX + CPUMCTX.esi], esi
689 mov [ss:xAX + CPUMCTX.edi], edi
690 mov [ss:xAX + CPUMCTX.ebp], ebp
691
692 ; Restore general purpose registers
693 MYPOPAD
694
695 mov eax, VINF_SUCCESS
696
697 popf
698 pop xBP
699%ifdef RT_ARCH_AMD64
700 add xSP, 4*xS
701%endif
702 ret
703ENDPROC MY_NAME(SVMR0VMRun)
704
705%ifdef RT_ARCH_AMD64
706;/**
707; * Prepares for and executes VMRUN (64 bits guests)
708; *
709; * @returns VBox status code
710; * @param HCPhysVMCB Physical address of host VMCB
711; * @param HCPhysVMCB Physical address of guest VMCB
712; * @param pCtx Guest context
713; */
714ALIGNCODE(16)
715BEGINPROC MY_NAME(SVMR0VMRun64)
716 ; fake a cdecl stack frame
717 %ifdef ASM_CALL64_GCC
718 push rdx
719 push rsi
720 push rdi
721 %else
722 push r8
723 push rdx
724 push rcx
725 %endif
726 push 0
727 push rbp
728 mov rbp, rsp
729 pushf
730
731 ;/* Manual save and restore:
732 ; * - General purpose registers except RIP, RSP, RAX
733 ; *
734 ; * Trashed:
735 ; * - CR2 (we don't care)
736 ; * - LDTR (reset to 0)
737 ; * - DRx (presumably not changed at all)
738 ; * - DR7 (reset to 0x400)
739 ; */
740
741 ;/* Save all general purpose host registers. */
742 MYPUSHAD
743
744 ;/* Save the Guest CPU context pointer. */
745 mov rsi, [rbp + xS*2 + RTHCPHYS_CB*2] ; pCtx
746 push rsi ; push for saving the state at the end
747
748 ; save host fs, gs, sysenter msr etc
749 mov rax, [rbp + xS*2] ; pVMCBHostPhys (64 bits physical address; x86: take low dword only)
750 push rax ; save for the vmload after vmrun
751 vmsave
752
753 ; setup eax for VMLOAD
754 mov rax, [rbp + xS*2 + RTHCPHYS_CB] ; pVMCBPhys (64 bits physical address; take low dword only)
755
756 ;/* Restore Guest's general purpose registers. */
757 ;/* RAX is loaded from the VMCB by VMRUN */
758 mov rbx, qword [xSI + CPUMCTX.ebx]
759 mov rcx, qword [xSI + CPUMCTX.ecx]
760 mov rdx, qword [xSI + CPUMCTX.edx]
761 mov rdi, qword [xSI + CPUMCTX.edi]
762 mov rbp, qword [xSI + CPUMCTX.ebp]
763 mov r8, qword [xSI + CPUMCTX.r8]
764 mov r9, qword [xSI + CPUMCTX.r9]
765 mov r10, qword [xSI + CPUMCTX.r10]
766 mov r11, qword [xSI + CPUMCTX.r11]
767 mov r12, qword [xSI + CPUMCTX.r12]
768 mov r13, qword [xSI + CPUMCTX.r13]
769 mov r14, qword [xSI + CPUMCTX.r14]
770 mov r15, qword [xSI + CPUMCTX.r15]
771 mov rsi, qword [xSI + CPUMCTX.esi]
772
773 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch
774 clgi
775 sti
776
777 ; load guest fs, gs, sysenter msr etc
778 vmload
779 ; run the VM
780 vmrun
781
782 ;/* RAX is in the VMCB already; we can use it here. */
783
784 ; save guest fs, gs, sysenter msr etc
785 vmsave
786
787 ; load host fs, gs, sysenter msr etc
788 pop rax ; pushed above
789 vmload
790
791 ; Set the global interrupt flag again, but execute cli to make sure IF=0.
792 cli
793 stgi
794
795 pop rax ; pCtx
796
797 mov qword [rax + CPUMCTX.ebx], rbx
798 mov qword [rax + CPUMCTX.ecx], rcx
799 mov qword [rax + CPUMCTX.edx], rdx
800 mov qword [rax + CPUMCTX.esi], rsi
801 mov qword [rax + CPUMCTX.edi], rdi
802 mov qword [rax + CPUMCTX.ebp], rbp
803 mov qword [rax + CPUMCTX.r8], r8
804 mov qword [rax + CPUMCTX.r9], r9
805 mov qword [rax + CPUMCTX.r10], r10
806 mov qword [rax + CPUMCTX.r11], r11
807 mov qword [rax + CPUMCTX.r12], r12
808 mov qword [rax + CPUMCTX.r13], r13
809 mov qword [rax + CPUMCTX.r14], r14
810 mov qword [rax + CPUMCTX.r15], r15
811
812 ; Restore general purpose registers
813 MYPOPAD
814
815 mov eax, VINF_SUCCESS
816
817 popf
818 pop rbp
819 add rsp, 4*xS
820 ret
821ENDPROC MY_NAME(SVMR0VMRun64)
822%endif ; RT_ARCH_AMD64
823
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette