VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HWACCMR0Mixed.mac@ 15144

Last change on this file since 15144 was 15046, checked in by vboxsync, 16 years ago

No need to restore CR2 for AMD-V.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 18.7 KB
Line 
1; $Id: HWACCMR0Mixed.mac 15046 2008-12-05 13:58:00Z vboxsync $
2;; @file
3; HWACCMR0Mixed.mac - Stuff that darwin needs to build two versions of.
4;
5; Included by HWACCMR0A.asm with RT_ARCH_AMD64 defined or or undefined.
6;
7
8;
9; Copyright (C) 2006-2007 Sun Microsystems, Inc.
10;
11; This file is part of VirtualBox Open Source Edition (OSE), as
12; available from http://www.virtualbox.org. This file is free software;
13; you can redistribute it and/or modify it under the terms of the GNU
14; General Public License (GPL) as published by the Free Software
15; Foundation, in version 2 as it comes in the "COPYING" file of the
16; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
17; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
18;
19; Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
20; Clara, CA 95054 USA or visit http://www.sun.com if you need
21; additional information or have any questions.
22;
23
24
25;/**
26; * Prepares for and executes VMLAUNCH/VMRESUME (32 bits guest mode)
27; *
28; * @returns VBox status code
29; * @param fResume vmlauch/vmresume
30; * @param pCtx Guest context
31; */
32BEGINPROC MY_NAME(VMXR0StartVM32)
33 push xBP
34 mov xBP, xSP
35
36 pushf
37 cli
38
39 ;/* First we have to save some final CPU context registers. */
40%ifdef RT_ARCH_AMD64
41 lea rax, [.vmlaunch_done wrt rip]
42 push rax
43%else
44 push .vmlaunch_done
45%endif
46 mov eax, VMX_VMCS_HOST_RIP ;/* return address (too difficult to continue after VMLAUNCH?) */
47 vmwrite xAX, [xSP]
48 ;/* Note: assumes success... */
49 add xSP, xS
50
51 ;/* Manual save and restore:
52 ; * - General purpose registers except RIP, RSP
53 ; *
54 ; * Trashed:
55 ; * - CR2 (we don't care)
56 ; * - LDTR (reset to 0)
57 ; * - DRx (presumably not changed at all)
58 ; * - DR7 (reset to 0x400)
59 ; * - EFLAGS (reset to RT_BIT(1); not relevant)
60 ; *
61 ; */
62
63 ;/* Save all general purpose host registers. */
64 MYPUSHAD
65
66 ;/* Save the Guest CPU context pointer. */
67%ifdef RT_ARCH_AMD64
68 %ifdef ASM_CALL64_GCC
69 ; fResume already in rdi
70 ; pCtx already in rsi
71 %else
72 mov rdi, rcx ; fResume
73 mov rsi, rdx ; pCtx
74 %endif
75%else
76 mov edi, [ebp + 8] ; fResume
77 mov esi, [ebp + 12] ; pCtx
78%endif
79
80 ;/* Save segment registers */
81 ; Note: MYPUSHSEGS trashes rdx & rcx, so we moved it here (msvc amd64 case)
82 MYPUSHSEGS xAX, ax
83
84 ; Save the pCtx pointer
85 push xSI
86
87 ; Save LDTR
88 xor eax, eax
89 sldt ax
90 push xAX
91
92 ; VMX only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!
93 sub xSP, xS*2
94 sgdt [xSP]
95
96 sub xSP, xS*2
97 sidt [xSP]
98
99%ifdef VBOX_WITH_DR6_EXPERIMENT
100 ; Restore DR6 - experiment, not safe!
101 mov xBX, [xSI + CPUMCTX.dr6]
102 mov dr6, xBX
103%endif
104
105 ; Restore CR2
106 mov ebx, [xSI + CPUMCTX.cr2]
107 mov cr2, xBX
108
109 mov eax, VMX_VMCS_HOST_RSP
110 vmwrite xAX, xSP
111 ;/* Note: assumes success... */
112 ;/* Don't mess with ESP anymore!! */
113
114 ;/* Restore Guest's general purpose registers. */
115 mov eax, [xSI + CPUMCTX.eax]
116 mov ebx, [xSI + CPUMCTX.ebx]
117 mov ecx, [xSI + CPUMCTX.ecx]
118 mov edx, [xSI + CPUMCTX.edx]
119 mov ebp, [xSI + CPUMCTX.ebp]
120
121 ; resume or start?
122 cmp xDI, 0 ; fResume
123 je .vmlauch_lauch
124
125 ;/* Restore edi & esi. */
126 mov edi, [xSI + CPUMCTX.edi]
127 mov esi, [xSI + CPUMCTX.esi]
128
129 vmresume
130 jmp .vmlaunch_done; ;/* here if vmresume detected a failure. */
131
132.vmlauch_lauch:
133 ;/* Restore edi & esi. */
134 mov edi, [xSI + CPUMCTX.edi]
135 mov esi, [xSI + CPUMCTX.esi]
136
137 vmlaunch
138 jmp .vmlaunch_done; ;/* here if vmlaunch detected a failure. */
139
140ALIGNCODE(16)
141.vmlaunch_done:
142 jc near .vmxstart_invalid_vmxon_ptr
143 jz near .vmxstart_start_failed
144
145 ; Restore base and limit of the IDTR & GDTR
146 lidt [xSP]
147 add xSP, xS*2
148 lgdt [xSP]
149 add xSP, xS*2
150
151 push xDI
152 mov xDI, [xSP + xS * 2] ; pCtx
153
154 mov [ss:xDI + CPUMCTX.eax], eax
155 mov [ss:xDI + CPUMCTX.ebx], ebx
156 mov [ss:xDI + CPUMCTX.ecx], ecx
157 mov [ss:xDI + CPUMCTX.edx], edx
158 mov [ss:xDI + CPUMCTX.esi], esi
159 mov [ss:xDI + CPUMCTX.ebp], ebp
160%ifdef RT_ARCH_AMD64
161 pop xAX ; the guest edi we pushed above
162 mov dword [ss:xDI + CPUMCTX.edi], eax
163%else
164 pop dword [ss:xDI + CPUMCTX.edi] ; the guest edi we pushed above
165%endif
166
167%ifdef VBOX_WITH_DR6_EXPERIMENT
168 ; Save DR6 - experiment, not safe!
169 mov xAX, dr6
170 mov [ss:xDI + CPUMCTX.dr6], xAX
171%endif
172
173 pop xAX ; saved LDTR
174 lldt ax
175
176 add xSP, xS ; pCtx
177
178 ; Restore segment registers
179 MYPOPSEGS xAX, ax
180
181 ; Restore general purpose registers
182 MYPOPAD
183
184 mov eax, VINF_SUCCESS
185
186.vmstart_end:
187 popf
188 pop xBP
189 ret
190
191
192.vmxstart_invalid_vmxon_ptr:
193 ; Restore base and limit of the IDTR & GDTR
194 lidt [xSP]
195 add xSP, xS*2
196 lgdt [xSP]
197 add xSP, xS*2
198
199 pop xAX ; saved LDTR
200 lldt ax
201
202 add xSP, xS ; pCtx
203
204 ; Restore segment registers
205 MYPOPSEGS xAX, ax
206
207 ; Restore all general purpose host registers.
208 MYPOPAD
209 mov eax, VERR_VMX_INVALID_VMXON_PTR
210 jmp .vmstart_end
211
212.vmxstart_start_failed:
213 ; Restore base and limit of the IDTR & GDTR
214 lidt [xSP]
215 add xSP, xS*2
216 lgdt [xSP]
217 add xSP, xS*2
218
219 pop xAX ; saved LDTR
220 lldt ax
221
222 add xSP, xS ; pCtx
223
224 ; Restore segment registers
225 MYPOPSEGS xAX, ax
226
227 ; Restore all general purpose host registers.
228 MYPOPAD
229 mov eax, VERR_VMX_UNABLE_TO_START_VM
230 jmp .vmstart_end
231
232ENDPROC MY_NAME(VMXR0StartVM32)
233
234%ifdef RT_ARCH_AMD64
235;/**
236; * Prepares for and executes VMLAUNCH/VMRESUME (64 bits guest mode)
237; *
238; * @returns VBox status code
239; * @param fResume vmlauch/vmresume
240; * @param pCtx Guest context
241; */
242BEGINPROC MY_NAME(VMXR0StartVM64)
243 push xBP
244 mov xBP, xSP
245
246 pushf
247 cli
248
249 ;/* First we have to save some final CPU context registers. */
250 lea rax, [.vmlaunch64_done wrt rip]
251 push rax
252 mov rax, VMX_VMCS_HOST_RIP ;/* return address (too difficult to continue after VMLAUNCH?) */
253 vmwrite rax, [xSP]
254 ;/* Note: assumes success... */
255 add xSP, xS
256
257 ;/* Manual save and restore:
258 ; * - General purpose registers except RIP, RSP
259 ; *
260 ; * Trashed:
261 ; * - CR2 (we don't care)
262 ; * - LDTR (reset to 0)
263 ; * - DRx (presumably not changed at all)
264 ; * - DR7 (reset to 0x400)
265 ; * - EFLAGS (reset to RT_BIT(1); not relevant)
266 ; *
267 ; */
268
269 ;/* Save all general purpose host registers. */
270 MYPUSHAD
271
272 ;/* Save the Guest CPU context pointer. */
273%ifdef ASM_CALL64_GCC
274 ; fResume already in rdi
275 ; pCtx already in rsi
276%else
277 mov rdi, rcx ; fResume
278 mov rsi, rdx ; pCtx
279%endif
280
281 ;/* Save segment registers */
282 ; Note: MYPUSHSEGS trashes rdx & rcx, so we moved it here (msvc amd64 case)
283 MYPUSHSEGS xAX, ax
284
285 ; Save the host LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs and restore the guest MSRs
286 ;; @todo use the automatic load feature for MSRs
287 LOADGUESTMSR MSR_K8_LSTAR, CPUMCTX.msrLSTAR
288%if 0 ; not supported on Intel CPUs
289 LOADGUESTMSR MSR_K8_CSTAR, CPUMCTX.msrCSTAR
290%endif
291 LOADGUESTMSR MSR_K6_STAR, CPUMCTX.msrSTAR
292 LOADGUESTMSR MSR_K8_SF_MASK, CPUMCTX.msrSFMASK
293 LOADGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
294
295 ; Save the pCtx pointer
296 push xSI
297
298 ; Save LDTR
299 xor eax, eax
300 sldt ax
301 push xAX
302
303 ; VMX only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!
304 sub xSP, xS*2
305 sgdt [xSP]
306
307 sub xSP, xS*2
308 sidt [xSP]
309
310%ifdef VBOX_WITH_DR6_EXPERIMENT
311 ; Restore DR6 - experiment, not safe!
312 mov xBX, [xSI + CPUMCTX.dr6]
313 mov dr6, xBX
314%endif
315
316 ; Restore CR2
317 mov rbx, qword [xSI + CPUMCTX.cr2]
318 mov cr2, rbx
319
320 mov eax, VMX_VMCS_HOST_RSP
321 vmwrite xAX, xSP
322 ;/* Note: assumes success... */
323 ;/* Don't mess with ESP anymore!! */
324
325 ;/* Restore Guest's general purpose registers. */
326 mov rax, qword [xSI + CPUMCTX.eax]
327 mov rbx, qword [xSI + CPUMCTX.ebx]
328 mov rcx, qword [xSI + CPUMCTX.ecx]
329 mov rdx, qword [xSI + CPUMCTX.edx]
330 mov rbp, qword [xSI + CPUMCTX.ebp]
331 mov r8, qword [xSI + CPUMCTX.r8]
332 mov r9, qword [xSI + CPUMCTX.r9]
333 mov r10, qword [xSI + CPUMCTX.r10]
334 mov r11, qword [xSI + CPUMCTX.r11]
335 mov r12, qword [xSI + CPUMCTX.r12]
336 mov r13, qword [xSI + CPUMCTX.r13]
337 mov r14, qword [xSI + CPUMCTX.r14]
338 mov r15, qword [xSI + CPUMCTX.r15]
339
340 ; resume or start?
341 cmp xDI, 0 ; fResume
342 je .vmlauch64_lauch
343
344 ;/* Restore edi & esi. */
345 mov rdi, qword [xSI + CPUMCTX.edi]
346 mov rsi, qword [xSI + CPUMCTX.esi]
347
348 vmresume
349 jmp .vmlaunch64_done; ;/* here if vmresume detected a failure. */
350
351.vmlauch64_lauch:
352 ;/* Restore rdi & rsi. */
353 mov rdi, qword [xSI + CPUMCTX.edi]
354 mov rsi, qword [xSI + CPUMCTX.esi]
355
356 vmlaunch
357 jmp .vmlaunch64_done; ;/* here if vmlaunch detected a failure. */
358
359ALIGNCODE(16)
360.vmlaunch64_done:
361 jc near .vmxstart64_invalid_vmxon_ptr
362 jz near .vmxstart64_start_failed
363
364 ; Restore base and limit of the IDTR & GDTR
365 lidt [xSP]
366 add xSP, xS*2
367 lgdt [xSP]
368 add xSP, xS*2
369
370 push xDI
371 mov xDI, [xSP + xS * 2] ; pCtx
372
373 mov qword [xDI + CPUMCTX.eax], rax
374 mov qword [xDI + CPUMCTX.ebx], rbx
375 mov qword [xDI + CPUMCTX.ecx], rcx
376 mov qword [xDI + CPUMCTX.edx], rdx
377 mov qword [xDI + CPUMCTX.esi], rsi
378 mov qword [xDI + CPUMCTX.ebp], rbp
379 mov qword [xDI + CPUMCTX.r8], r8
380 mov qword [xDI + CPUMCTX.r9], r9
381 mov qword [xDI + CPUMCTX.r10], r10
382 mov qword [xDI + CPUMCTX.r11], r11
383 mov qword [xDI + CPUMCTX.r12], r12
384 mov qword [xDI + CPUMCTX.r13], r13
385 mov qword [xDI + CPUMCTX.r14], r14
386 mov qword [xDI + CPUMCTX.r15], r15
387
388 pop xAX ; the guest edi we pushed above
389 mov qword [xDI + CPUMCTX.edi], rax
390
391%ifdef VBOX_WITH_DR6_EXPERIMENT
392 ; Save DR6 - experiment, not safe!
393 mov xAX, dr6
394 mov [xDI + CPUMCTX.dr6], xAX
395%endif
396
397 pop xAX ; saved LDTR
398 lldt ax
399
400 pop xSI ; pCtx (needed in rsi by the macros below)
401
402 ; Restore the host LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs
403 ;; @todo use the automatic load feature for MSRs
404 LOADHOSTMSREX MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
405 LOADHOSTMSR MSR_K8_SF_MASK
406 LOADHOSTMSR MSR_K6_STAR
407%if 0 ; not supported on Intel CPUs
408 LOADHOSTMSR MSR_K8_CSTAR
409%endif
410 LOADHOSTMSR MSR_K8_LSTAR
411
412 ; Restore segment registers
413 MYPOPSEGS xAX, ax
414
415 ; Restore general purpose registers
416 MYPOPAD
417
418 mov eax, VINF_SUCCESS
419
420.vmstart64_end:
421 popf
422 pop xBP
423 ret
424
425
426.vmxstart64_invalid_vmxon_ptr:
427 ; Restore base and limit of the IDTR & GDTR
428 lidt [xSP]
429 add xSP, xS*2
430 lgdt [xSP]
431 add xSP, xS*2
432
433 pop xAX ; saved LDTR
434 lldt ax
435
436 pop xSI ; pCtx (needed in rsi by the macros below)
437
438 ; Restore the host LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs
439 ;; @todo use the automatic load feature for MSRs
440 LOADHOSTMSREX MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
441 LOADHOSTMSR MSR_K8_SF_MASK
442 LOADHOSTMSR MSR_K6_STAR
443%if 0 ; not supported on Intel CPUs
444 LOADHOSTMSR MSR_K8_CSTAR
445%endif
446 LOADHOSTMSR MSR_K8_LSTAR
447
448 ; Restore segment registers
449 MYPOPSEGS xAX, ax
450
451 ; Restore all general purpose host registers.
452 MYPOPAD
453 mov eax, VERR_VMX_INVALID_VMXON_PTR
454 jmp .vmstart64_end
455
456.vmxstart64_start_failed:
457 ; Restore base and limit of the IDTR & GDTR
458 lidt [xSP]
459 add xSP, xS*2
460 lgdt [xSP]
461 add xSP, xS*2
462
463 pop xAX ; saved LDTR
464 lldt ax
465
466 pop xSI ; pCtx (needed in rsi by the macros below)
467
468 ; Restore the host LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs
469 ;; @todo use the automatic load feature for MSRs
470 LOADHOSTMSREX MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
471 LOADHOSTMSR MSR_K8_SF_MASK
472 LOADHOSTMSR MSR_K6_STAR
473%if 0 ; not supported on Intel CPUs
474 LOADHOSTMSR MSR_K8_CSTAR
475%endif
476 LOADHOSTMSR MSR_K8_LSTAR
477
478 ; Restore segment registers
479 MYPOPSEGS xAX, ax
480
481 ; Restore all general purpose host registers.
482 MYPOPAD
483 mov eax, VERR_VMX_UNABLE_TO_START_VM
484 jmp .vmstart64_end
485ENDPROC MY_NAME(VMXR0StartVM64)
486%endif ; RT_ARCH_AMD64
487
488
489;/**
490; * Prepares for and executes VMRUN (32 bits guests)
491; *
492; * @returns VBox status code
493; * @param HCPhysVMCB Physical address of host VMCB
494; * @param HCPhysVMCB Physical address of guest VMCB
495; * @param pCtx Guest context
496; */
497BEGINPROC MY_NAME(SVMR0VMRun)
498%ifdef RT_ARCH_AMD64 ; fake a cdecl stack frame
499 %ifdef ASM_CALL64_GCC
500 push rdx
501 push rsi
502 push rdi
503 %else
504 push r8
505 push rdx
506 push rcx
507 %endif
508 push 0
509%endif
510 push xBP
511 mov xBP, xSP
512 pushf
513
514 ;/* Manual save and restore:
515 ; * - General purpose registers except RIP, RSP, RAX
516 ; *
517 ; * Trashed:
518 ; * - CR2 (we don't care)
519 ; * - LDTR (reset to 0)
520 ; * - DRx (presumably not changed at all)
521 ; * - DR7 (reset to 0x400)
522 ; */
523
524 ;/* Save all general purpose host registers. */
525 MYPUSHAD
526
527 ;/* Save the Guest CPU context pointer. */
528 mov xSI, [xBP + xS*2 + RTHCPHYS_CB*2] ; pCtx
529 push xSI ; push for saving the state at the end
530
531 ; save host fs, gs, sysenter msr etc
532 mov xAX, [xBP + xS*2] ; pVMCBHostPhys (64 bits physical address; x86: take low dword only)
533 push xAX ; save for the vmload after vmrun
534 vmsave
535
536 ; setup eax for VMLOAD
537 mov xAX, [xBP + xS*2 + RTHCPHYS_CB] ; pVMCBPhys (64 bits physical address; take low dword only)
538
539 ;/* Restore Guest's general purpose registers. */
540 ;/* EAX is loaded from the VMCB by VMRUN */
541 mov ebx, [xSI + CPUMCTX.ebx]
542 mov ecx, [xSI + CPUMCTX.ecx]
543 mov edx, [xSI + CPUMCTX.edx]
544 mov edi, [xSI + CPUMCTX.edi]
545 mov ebp, [xSI + CPUMCTX.ebp]
546 mov esi, [xSI + CPUMCTX.esi]
547
548 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch
549 clgi
550 sti
551
552 ; load guest fs, gs, sysenter msr etc
553 vmload
554 ; run the VM
555 vmrun
556
557 ;/* EAX is in the VMCB already; we can use it here. */
558
559 ; save guest fs, gs, sysenter msr etc
560 vmsave
561
562 ; load host fs, gs, sysenter msr etc
563 pop xAX ; pushed above
564 vmload
565
566 ; Set the global interrupt flag again, but execute cli to make sure IF=0.
567 cli
568 stgi
569
570 pop xAX ; pCtx
571
572 mov [ss:xAX + CPUMCTX.ebx], ebx
573 mov [ss:xAX + CPUMCTX.ecx], ecx
574 mov [ss:xAX + CPUMCTX.edx], edx
575 mov [ss:xAX + CPUMCTX.esi], esi
576 mov [ss:xAX + CPUMCTX.edi], edi
577 mov [ss:xAX + CPUMCTX.ebp], ebp
578
579 ; Restore general purpose registers
580 MYPOPAD
581
582 mov eax, VINF_SUCCESS
583
584 popf
585 pop xBP
586%ifdef RT_ARCH_AMD64
587 add xSP, 4*xS
588%endif
589 ret
590ENDPROC MY_NAME(SVMR0VMRun)
591
592%ifdef RT_ARCH_AMD64
593;/**
594; * Prepares for and executes VMRUN (64 bits guests)
595; *
596; * @returns VBox status code
597; * @param HCPhysVMCB Physical address of host VMCB
598; * @param HCPhysVMCB Physical address of guest VMCB
599; * @param pCtx Guest context
600; */
601BEGINPROC MY_NAME(SVMR0VMRun64)
602 ; fake a cdecl stack frame
603 %ifdef ASM_CALL64_GCC
604 push rdx
605 push rsi
606 push rdi
607 %else
608 push r8
609 push rdx
610 push rcx
611 %endif
612 push 0
613 push rbp
614 mov rbp, rsp
615 pushf
616
617 ;/* Manual save and restore:
618 ; * - General purpose registers except RIP, RSP, RAX
619 ; *
620 ; * Trashed:
621 ; * - CR2 (we don't care)
622 ; * - LDTR (reset to 0)
623 ; * - DRx (presumably not changed at all)
624 ; * - DR7 (reset to 0x400)
625 ; */
626
627 ;/* Save all general purpose host registers. */
628 MYPUSHAD
629
630 ;/* Save the Guest CPU context pointer. */
631 mov rsi, [rbp + xS*2 + RTHCPHYS_CB*2] ; pCtx
632 push rsi ; push for saving the state at the end
633
634 ; save host fs, gs, sysenter msr etc
635 mov rax, [rbp + xS*2] ; pVMCBHostPhys (64 bits physical address; x86: take low dword only)
636 push rax ; save for the vmload after vmrun
637 vmsave
638
639 ; setup eax for VMLOAD
640 mov rax, [rbp + xS*2 + RTHCPHYS_CB] ; pVMCBPhys (64 bits physical address; take low dword only)
641
642 ;/* Restore Guest's general purpose registers. */
643 ;/* RAX is loaded from the VMCB by VMRUN */
644 mov rbx, qword [xSI + CPUMCTX.ebx]
645 mov rcx, qword [xSI + CPUMCTX.ecx]
646 mov rdx, qword [xSI + CPUMCTX.edx]
647 mov rdi, qword [xSI + CPUMCTX.edi]
648 mov rbp, qword [xSI + CPUMCTX.ebp]
649 mov r8, qword [xSI + CPUMCTX.r8]
650 mov r9, qword [xSI + CPUMCTX.r9]
651 mov r10, qword [xSI + CPUMCTX.r10]
652 mov r11, qword [xSI + CPUMCTX.r11]
653 mov r12, qword [xSI + CPUMCTX.r12]
654 mov r13, qword [xSI + CPUMCTX.r13]
655 mov r14, qword [xSI + CPUMCTX.r14]
656 mov r15, qword [xSI + CPUMCTX.r15]
657 mov rsi, qword [xSI + CPUMCTX.esi]
658
659 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch
660 clgi
661 sti
662
663 ; load guest fs, gs, sysenter msr etc
664 vmload
665 ; run the VM
666 vmrun
667
668 ;/* RAX is in the VMCB already; we can use it here. */
669
670 ; save guest fs, gs, sysenter msr etc
671 vmsave
672
673 ; load host fs, gs, sysenter msr etc
674 pop rax ; pushed above
675 vmload
676
677 ; Set the global interrupt flag again, but execute cli to make sure IF=0.
678 cli
679 stgi
680
681 pop rax ; pCtx
682
683 mov qword [rax + CPUMCTX.ebx], rbx
684 mov qword [rax + CPUMCTX.ecx], rcx
685 mov qword [rax + CPUMCTX.edx], rdx
686 mov qword [rax + CPUMCTX.esi], rsi
687 mov qword [rax + CPUMCTX.edi], rdi
688 mov qword [rax + CPUMCTX.ebp], rbp
689 mov qword [rax + CPUMCTX.r8], r8
690 mov qword [rax + CPUMCTX.r9], r9
691 mov qword [rax + CPUMCTX.r10], r10
692 mov qword [rax + CPUMCTX.r11], r11
693 mov qword [rax + CPUMCTX.r12], r12
694 mov qword [rax + CPUMCTX.r13], r13
695 mov qword [rax + CPUMCTX.r14], r14
696 mov qword [rax + CPUMCTX.r15], r15
697
698 ; Restore general purpose registers
699 MYPOPAD
700
701 mov eax, VINF_SUCCESS
702
703 popf
704 pop rbp
705 add rsp, 4*xS
706 ret
707ENDPROC MY_NAME(SVMR0VMRun64)
708%endif ; RT_ARCH_AMD64
709
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette