VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HWACCMR0A.asm@ 9475

Last change on this file since 9475 was 9475, checked in by vboxsync, 17 years ago

Added VMXR0StartVM64.
Sync the FS_BASE & GS_BASE MSRs.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 20.6 KB
Line 
1; $Id: HWACCMR0A.asm 9475 2008-06-06 13:10:55Z vboxsync $
2;; @file
3; VMXM - R0 vmx helpers
4;
5
6;
7; Copyright (C) 2006-2007 Sun Microsystems, Inc.
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.virtualbox.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License (GPL) as published by the Free Software
13; Foundation, in version 2 as it comes in the "COPYING" file of the
14; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17; Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18; Clara, CA 95054 USA or visit http://www.sun.com if you need
19; additional information or have any questions.
20;
21
22;*******************************************************************************
23;* Header Files *
24;*******************************************************************************
25%include "VBox/asmdefs.mac"
26%include "VBox/err.mac"
27%include "VBox/hwacc_vmx.mac"
28%include "VBox/cpum.mac"
29%include "VBox/x86.mac"
30
31%ifdef RT_OS_OS2 ;; @todo fix OMF support in yasm and kick nasm out completely.
32 %macro vmwrite 2,
33 int3
34 %endmacro
35 %define vmlaunch int3
36 %define vmresume int3
37 %define vmsave int3
38 %define vmload int3
39 %define vmrun int3
40 %define clgi int3
41 %define stgi int3
42 %macro invlpga 2,
43 int3
44 %endmacro
45%endif
46
47;; This is too risky wrt. stability, performance and correctness.
48;%define VBOX_WITH_DR6_EXPERIMENT 1
49
50;; @def MYPUSHAD
51; Macro generating an equivalent to pushad
52
53;; @def MYPOPAD
54; Macro generating an equivalent to popad
55
56;; @def MYPUSHSEGS
57; Macro saving all segment registers on the stack.
58; @param 1 full width register name
59; @param 2 16-bit regsiter name for \a 1.
60
61;; @def MYPOPSEGS
62; Macro restoring all segment registers on the stack
63; @param 1 full width register name
64; @param 2 16-bit regsiter name for \a 1.
65
66%ifdef RT_ARCH_AMD64
67 %ifdef ASM_CALL64_GCC
68 %macro MYPUSHAD 0
69 push r15
70 push r14
71 push r13
72 push r12
73 push rbx
74 %endmacro
75 %macro MYPOPAD 0
76 pop rbx
77 pop r12
78 pop r13
79 pop r14
80 pop r15
81 %endmacro
82
83 %else ; ASM_CALL64_MSC
84 %macro MYPUSHAD 0
85 push r15
86 push r14
87 push r13
88 push r12
89 push rbx
90 push rsi
91 push rdi
92 %endmacro
93 %macro MYPOPAD 0
94 pop rdi
95 pop rsi
96 pop rbx
97 pop r12
98 pop r13
99 pop r14
100 pop r15
101 %endmacro
102 %endif
103
104 %macro MYPUSHSEGS 2
105 mov %2, es
106 push %1
107 mov %2, ds
108 push %1
109
110 ; Special case for FS; Windows and Linux either don't use it or restore it when leaving kernel mode, Solaris OTOH doesn't and we must save it.
111 push rcx
112 mov ecx, MSR_K8_FS_BASE
113 rdmsr
114 pop rcx
115 push rdx
116 push rax
117 push fs
118
119 ; Special case for GS; OSes typically use swapgs to reset the hidden base register for GS on entry into the kernel. The same happens on exit
120 push rcx
121 mov ecx, MSR_K8_GS_BASE
122 rdmsr
123 pop rcx
124 push rdx
125 push rax
126 push gs
127 %endmacro
128
129 %macro MYPOPSEGS 2
130 ; Note: do not step through this code with a debugger!
131 pop gs
132 pop rax
133 pop rdx
134 push rcx
135 mov ecx, MSR_K8_GS_BASE
136 wrmsr
137 pop rcx
138
139 pop fs
140 pop rax
141 pop rdx
142 push rcx
143 mov ecx, MSR_K8_FS_BASE
144 wrmsr
145 pop rcx
146 ; Now it's safe to step again
147
148 pop %1
149 mov ds, %2
150 pop %1
151 mov es, %2
152 %endmacro
153
154%else ; RT_ARCH_X86
155 %macro MYPUSHAD 0
156 pushad
157 %endmacro
158 %macro MYPOPAD 0
159 popad
160 %endmacro
161
162 %macro MYPUSHSEGS 2
163 push ds
164 push es
165 push fs
166 push gs
167 %endmacro
168 %macro MYPOPSEGS 2
169 pop gs
170 pop fs
171 pop es
172 pop ds
173 %endmacro
174%endif
175
176
177BEGINCODE
178
179;/**
180; * Prepares for and executes VMLAUNCH/VMRESUME (32 bits guest mode)
181; *
182; * @returns VBox status code
183; * @param fResume vmlauch/vmresume
184; * @param pCtx Guest context
185; */
186BEGINPROC VMXR0StartVM32
187 push xBP
188 mov xBP, xSP
189
190 pushf
191 cli
192
193 ;/* First we have to save some final CPU context registers. */
194%ifdef RT_ARCH_AMD64
195 mov rax, qword .vmlaunch_done
196 push rax
197%else
198 push .vmlaunch_done
199%endif
200 mov eax, VMX_VMCS_HOST_RIP ;/* return address (too difficult to continue after VMLAUNCH?) */
201 vmwrite xAX, [xSP]
202 ;/* Note: assumes success... */
203 add xSP, xS
204
205 ;/* Manual save and restore:
206 ; * - General purpose registers except RIP, RSP
207 ; *
208 ; * Trashed:
209 ; * - CR2 (we don't care)
210 ; * - LDTR (reset to 0)
211 ; * - DRx (presumably not changed at all)
212 ; * - DR7 (reset to 0x400)
213 ; * - EFLAGS (reset to RT_BIT(1); not relevant)
214 ; *
215 ; */
216
217 ;/* Save all general purpose host registers. */
218 MYPUSHAD
219
220 ;/* Save the Guest CPU context pointer. */
221%ifdef RT_ARCH_AMD64
222 %ifdef ASM_CALL64_GCC
223 ; fResume already in rdi
224 ; pCtx already in rsi
225 %else
226 mov rdi, rcx ; fResume
227 mov rsi, rdx ; pCtx
228 %endif
229%else
230 mov edi, [ebp + 8] ; fResume
231 mov esi, [ebp + 12] ; pCtx
232%endif
233
234 ;/* Save segment registers */
235 ; Note: MYPUSHSEGS trashes rdx (among others), so we moved it here (msvc amd64 case)
236 MYPUSHSEGS xAX, ax
237
238 ; Save the pCtx pointer
239 push xSI
240
241 ; Save LDTR
242 xor eax, eax
243 sldt ax
244 push xAX
245
246 ; VMX only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!
247 sub xSP, xS*2
248 sgdt [xSP]
249
250 sub xSP, xS*2
251 sidt [xSP]
252
253%ifdef VBOX_WITH_DR6_EXPERIMENT
254 ; Restore DR6 - experiment, not safe!
255 mov xBX, [xSI + CPUMCTX.dr6]
256 mov dr6, xBX
257%endif
258
259 ; Restore CR2
260 mov ebx, [xSI + CPUMCTX.cr2]
261 mov cr2, xBX
262
263 mov eax, VMX_VMCS_HOST_RSP
264 vmwrite xAX, xSP
265 ;/* Note: assumes success... */
266 ;/* Don't mess with ESP anymore!! */
267
268 ;/* Restore Guest's general purpose registers. */
269 mov eax, [xSI + CPUMCTX.eax]
270 mov ebx, [xSI + CPUMCTX.ebx]
271 mov ecx, [xSI + CPUMCTX.ecx]
272 mov edx, [xSI + CPUMCTX.edx]
273 mov ebp, [xSI + CPUMCTX.ebp]
274
275 ; resume or start?
276 cmp xDI, 0 ; fResume
277 je .vmlauch_lauch
278
279 ;/* Restore edi & esi. */
280 mov edi, [xSI + CPUMCTX.edi]
281 mov esi, [xSI + CPUMCTX.esi]
282
283 vmresume
284 jmp .vmlaunch_done; ;/* here if vmresume detected a failure. */
285
286.vmlauch_lauch:
287 ;/* Restore edi & esi. */
288 mov edi, [xSI + CPUMCTX.edi]
289 mov esi, [xSI + CPUMCTX.esi]
290
291 vmlaunch
292 jmp .vmlaunch_done; ;/* here if vmlaunch detected a failure. */
293
294ALIGNCODE(16)
295.vmlaunch_done:
296 jc near .vmxstart_invalid_vmxon_ptr
297 jz near .vmxstart_start_failed
298
299 ; Restore base and limit of the IDTR & GDTR
300 lidt [xSP]
301 add xSP, xS*2
302 lgdt [xSP]
303 add xSP, xS*2
304
305 push xDI
306 mov xDI, [xSP + xS * 2] ; pCtx
307
308 mov [ss:xDI + CPUMCTX.eax], eax
309 mov [ss:xDI + CPUMCTX.ebx], ebx
310 mov [ss:xDI + CPUMCTX.ecx], ecx
311 mov [ss:xDI + CPUMCTX.edx], edx
312 mov [ss:xDI + CPUMCTX.esi], esi
313 mov [ss:xDI + CPUMCTX.ebp], ebp
314%ifdef RT_ARCH_AMD64
315 pop xAX ; the guest edi we pushed above
316 mov dword [ss:xDI + CPUMCTX.edi], eax
317%else
318 pop dword [ss:xDI + CPUMCTX.edi] ; the guest edi we pushed above
319%endif
320
321%ifdef VBOX_WITH_DR6_EXPERIMENT
322 ; Save DR6 - experiment, not safe!
323 mov xAX, dr6
324 mov [ss:xDI + CPUMCTX.dr6], xAX
325%endif
326
327 pop xAX ; saved LDTR
328 lldt ax
329
330 add xSP, xS ; pCtx
331
332 ; Restore segment registers
333 MYPOPSEGS xAX, ax
334
335 ; Restore general purpose registers
336 MYPOPAD
337
338 mov eax, VINF_SUCCESS
339
340.vmstart_end:
341 popf
342 pop xBP
343 ret
344
345
346.vmxstart_invalid_vmxon_ptr:
347 ; Restore base and limit of the IDTR & GDTR
348 lidt [xSP]
349 add xSP, xS*2
350 lgdt [xSP]
351 add xSP, xS*2
352
353 pop xAX ; saved LDTR
354 lldt ax
355
356 add xSP, xS ; pCtx
357
358 ; Restore segment registers
359 MYPOPSEGS xAX, ax
360
361 ; Restore all general purpose host registers.
362 MYPOPAD
363 mov eax, VERR_VMX_INVALID_VMXON_PTR
364 jmp .vmstart_end
365
366.vmxstart_start_failed:
367 ; Restore base and limit of the IDTR & GDTR
368 lidt [xSP]
369 add xSP, xS*2
370 lgdt [xSP]
371 add xSP, xS*2
372
373 pop xAX ; saved LDTR
374 lldt ax
375
376 add xSP, xS ; pCtx
377
378 ; Restore segment registers
379 MYPOPSEGS xAX, ax
380
381 ; Restore all general purpose host registers.
382 MYPOPAD
383 mov eax, VERR_VMX_UNABLE_TO_START_VM
384 jmp .vmstart_end
385
386ENDPROC VMXR0StartVM32
387
388%ifdef RT_ARCH_AMD64
389;/**
390; * Prepares for and executes VMLAUNCH/VMRESUME (32 bits guest mode)
391; *
392; * @returns VBox status code
393; * @param fResume vmlauch/vmresume
394; * @param pCtx Guest context
395; */
396BEGINPROC VMXR0StartVM64
397 push xBP
398 mov xBP, xSP
399
400 pushf
401 cli
402
403 ;/* First we have to save some final CPU context registers. */
404 mov rax, qword .vmlaunch64_done
405 push rax
406 mov rax, VMX_VMCS_HOST_RIP ;/* return address (too difficult to continue after VMLAUNCH?) */
407 vmwrite rax, [xSP]
408 ;/* Note: assumes success... */
409 add xSP, xS
410
411 ;/* Manual save and restore:
412 ; * - General purpose registers except RIP, RSP
413 ; *
414 ; * Trashed:
415 ; * - CR2 (we don't care)
416 ; * - LDTR (reset to 0)
417 ; * - DRx (presumably not changed at all)
418 ; * - DR7 (reset to 0x400)
419 ; * - EFLAGS (reset to RT_BIT(1); not relevant)
420 ; *
421 ; */
422
423 ;/* Save all general purpose host registers. */
424 MYPUSHAD
425
426 ;/* Save the Guest CPU context pointer. */
427%ifdef ASM_CALL64_GCC
428 ; fResume already in rdi
429 ; pCtx already in rsi
430%else
431 mov rdi, rcx ; fResume
432 mov rsi, rdx ; pCtx
433%endif
434
435 ;/* Save segment registers */
436 ; Note: MYPUSHSEGS trashes rdx (among others), so we moved it here (msvc amd64 case)
437 MYPUSHSEGS xAX, ax
438
439 ; Save the pCtx pointer
440 push xSI
441
442 ; Save LDTR
443 xor eax, eax
444 sldt ax
445 push xAX
446
447 ; VMX only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!
448 sub xSP, xS*2
449 sgdt [xSP]
450
451 sub xSP, xS*2
452 sidt [xSP]
453
454%ifdef VBOX_WITH_DR6_EXPERIMENT
455 ; Restore DR6 - experiment, not safe!
456 mov xBX, [xSI + CPUMCTX.dr6]
457 mov dr6, xBX
458%endif
459
460 ; Restore CR2
461 mov rbx, qword [xSI + CPUMCTX.cr2]
462 mov cr2, rbx
463
464 mov eax, VMX_VMCS_HOST_RSP
465 vmwrite xAX, xSP
466 ;/* Note: assumes success... */
467 ;/* Don't mess with ESP anymore!! */
468
469 ;/* Restore Guest's general purpose registers. */
470 mov rax, qword [xSI + CPUMCTX.eax]
471 mov rbx, qword [xSI + CPUMCTX.ebx]
472 mov rcx, qword [xSI + CPUMCTX.ecx]
473 mov rdx, qword [xSI + CPUMCTX.edx]
474 mov rbp, qword [xSI + CPUMCTX.ebp]
475 mov r8, qword [xSI + CPUMCTX.r8]
476 mov r9, qword [xSI + CPUMCTX.r9]
477 mov r10, qword [xSI + CPUMCTX.r10]
478 mov r11, qword [xSI + CPUMCTX.r11]
479 mov r12, qword [xSI + CPUMCTX.r12]
480 mov r13, qword [xSI + CPUMCTX.r13]
481 mov r14, qword [xSI + CPUMCTX.r14]
482 mov r15, qword [xSI + CPUMCTX.r15]
483
484 ; resume or start?
485 cmp xDI, 0 ; fResume
486 je .vmlauch64_lauch
487
488 ;/* Restore edi & esi. */
489 mov rdi, qword [xSI + CPUMCTX.edi]
490 mov rsi, qword [xSI + CPUMCTX.esi]
491
492 vmresume
493 jmp .vmlaunch64_done; ;/* here if vmresume detected a failure. */
494
495.vmlauch64_lauch:
496 ;/* Restore rdi & rsi. */
497 mov rdi, qword [xSI + CPUMCTX.edi]
498 mov rsi, qword [xSI + CPUMCTX.esi]
499
500 vmlaunch
501 jmp .vmlaunch64_done; ;/* here if vmlaunch detected a failure. */
502
503ALIGNCODE(16)
504.vmlaunch64_done:
505 jc near .vmxstart64_invalid_vmxon_ptr
506 jz near .vmxstart64_start_failed
507
508 ; Restore base and limit of the IDTR & GDTR
509 lidt [xSP]
510 add xSP, xS*2
511 lgdt [xSP]
512 add xSP, xS*2
513
514 push xDI
515 mov xDI, [xSP + xS * 2] ; pCtx
516
517 mov qword [xDI + CPUMCTX.eax], rax
518 mov qword [xDI + CPUMCTX.ebx], rbx
519 mov qword [xDI + CPUMCTX.ecx], rcx
520 mov qword [xDI + CPUMCTX.edx], rdx
521 mov qword [xDI + CPUMCTX.esi], rsi
522 mov qword [xDI + CPUMCTX.ebp], rbp
523 mov qword [xDI + CPUMCTX.r8], r8
524 mov qword [xDI + CPUMCTX.r9], r9
525 mov qword [xDI + CPUMCTX.r10], r10
526 mov qword [xDI + CPUMCTX.r11], r11
527 mov qword [xDI + CPUMCTX.r12], r12
528 mov qword [xDI + CPUMCTX.r13], r13
529 mov qword [xDI + CPUMCTX.r14], r14
530 mov qword [xDI + CPUMCTX.r15], r15
531
532 pop xAX ; the guest edi we pushed above
533 mov qword [xDI + CPUMCTX.edi], rax
534
535%ifdef VBOX_WITH_DR6_EXPERIMENT
536 ; Save DR6 - experiment, not safe!
537 mov xAX, dr6
538 mov [xDI + CPUMCTX.dr6], xAX
539%endif
540
541 pop xAX ; saved LDTR
542 lldt ax
543
544 add xSP, xS ; pCtx
545
546 ; Restore segment registers
547 MYPOPSEGS xAX, ax
548
549 ; Restore general purpose registers
550 MYPOPAD
551
552 mov eax, VINF_SUCCESS
553
554.vmstart64_end:
555 popf
556 pop xBP
557 ret
558
559
560.vmxstart64_invalid_vmxon_ptr:
561 ; Restore base and limit of the IDTR & GDTR
562 lidt [xSP]
563 add xSP, xS*2
564 lgdt [xSP]
565 add xSP, xS*2
566
567 pop xAX ; saved LDTR
568 lldt ax
569
570 add xSP, xS ; pCtx
571
572 ; Restore segment registers
573 MYPOPSEGS xAX, ax
574
575 ; Restore all general purpose host registers.
576 MYPOPAD
577 mov eax, VERR_VMX_INVALID_VMXON_PTR
578 jmp .vmstart64_end
579
580.vmxstart64_start_failed:
581 ; Restore base and limit of the IDTR & GDTR
582 lidt [xSP]
583 add xSP, xS*2
584 lgdt [xSP]
585 add xSP, xS*2
586
587 pop xAX ; saved LDTR
588 lldt ax
589
590 add xSP, xS ; pCtx
591
592 ; Restore segment registers
593 MYPOPSEGS xAX, ax
594
595 ; Restore all general purpose host registers.
596 MYPOPAD
597 mov eax, VERR_VMX_UNABLE_TO_START_VM
598 jmp .vmstart64_end
599ENDPROC VMXR0StartVM64
600
601;/**
602; * Executes VMWRITE
603; *
604; * @returns VBox status code
605; * @param idxField x86: [ebp + 08h] msc: rcx gcc: rdi VMCS index
606; * @param pData x86: [ebp + 0ch] msc: rdx gcc: rsi VM field value
607; */
608BEGINPROC VMXWriteVMCS64
609%ifdef ASM_CALL64_GCC
610 mov eax, 0ffffffffh
611 and rdi, rax
612 xor rax, rax
613 vmwrite rdi, rsi
614%else
615 mov eax, 0ffffffffh
616 and rcx, rax
617 xor rax, rax
618 vmwrite rcx, rdx
619%endif
620 jnc .valid_vmcs
621 mov eax, VERR_VMX_INVALID_VMCS_PTR
622 ret
623.valid_vmcs:
624 jnz .the_end
625 mov eax, VERR_VMX_INVALID_VMCS_FIELD
626.the_end:
627 ret
628ENDPROC VMXWriteVMCS64
629
630;/**
631; * Executes VMREAD
632; *
633; * @returns VBox status code
634; * @param idxField VMCS index
635; * @param pData Ptr to store VM field value
636; */
637;DECLASM(int) VMXReadVMCS64(uint32_t idxField, uint64_t *pData);
638BEGINPROC VMXReadVMCS64
639%ifdef ASM_CALL64_GCC
640 mov eax, 0ffffffffh
641 and rdi, rax
642 xor rax, rax
643 vmread [rsi], rdi
644%else
645 mov eax, 0ffffffffh
646 and rcx, rax
647 xor rax, rax
648 vmread [rdx], rcx
649%endif
650 jnc .valid_vmcs
651 mov eax, VERR_VMX_INVALID_VMCS_PTR
652 ret
653.valid_vmcs:
654 jnz .the_end
655 mov eax, VERR_VMX_INVALID_VMCS_FIELD
656.the_end:
657 ret
658ENDPROC VMXReadVMCS64
659
660
661;/**
662; * Executes VMXON
663; *
664; * @returns VBox status code
665; * @param HCPhysVMXOn Physical address of VMXON structure
666; */
667;DECLASM(int) VMXEnable(RTHCPHYS HCPhysVMXOn);
668BEGINPROC VMXEnable
669%ifdef RT_ARCH_AMD64
670 xor rax, rax
671 %ifdef ASM_CALL64_GCC
672 push rdi
673 %else
674 push rcx
675 %endif
676 vmxon [rsp]
677%else
678 xor eax, eax
679 vmxon [esp + 4]
680%endif
681 jnc .good
682 mov eax, VERR_VMX_INVALID_VMXON_PTR
683 jmp .the_end
684
685.good:
686 jnz .the_end
687 mov eax, VERR_VMX_GENERIC
688
689.the_end:
690%ifdef RT_ARCH_AMD64
691 add rsp, 8
692%endif
693 ret
694ENDPROC VMXEnable
695
696
697;/**
698; * Executes VMXOFF
699; */
700;DECLASM(void) VMXDisable(void);
701BEGINPROC VMXDisable
702 vmxoff
703 ret
704ENDPROC VMXDisable
705
706
707;/**
708; * Executes VMCLEAR
709; *
710; * @returns VBox status code
711; * @param HCPhysVMCS Physical address of VM control structure
712; */
713;DECLASM(int) VMXClearVMCS(RTHCPHYS HCPhysVMCS);
714BEGINPROC VMXClearVMCS
715%ifdef RT_ARCH_AMD64
716 xor rax, rax
717 %ifdef ASM_CALL64_GCC
718 push rdi
719 %else
720 push rcx
721 %endif
722 vmclear [rsp]
723%else
724 xor eax, eax
725 vmclear [esp + 4]
726%endif
727 jnc .the_end
728 mov eax, VERR_VMX_INVALID_VMCS_PTR
729.the_end:
730%ifdef RT_ARCH_AMD64
731 add rsp, 8
732%endif
733 ret
734ENDPROC VMXClearVMCS
735
736
737;/**
738; * Executes VMPTRLD
739; *
740; * @returns VBox status code
741; * @param HCPhysVMCS Physical address of VMCS structure
742; */
743;DECLASM(int) VMXActivateVMCS(RTHCPHYS HCPhysVMCS);
744BEGINPROC VMXActivateVMCS
745%ifdef RT_ARCH_AMD64
746 xor rax, rax
747 %ifdef ASM_CALL64_GCC
748 push rdi
749 %else
750 push rcx
751 %endif
752 vmptrld [rsp]
753%else
754 xor eax, eax
755 vmptrld [esp + 4]
756%endif
757 jnc .the_end
758 mov eax, VERR_VMX_INVALID_VMCS_PTR
759.the_end:
760%ifdef RT_ARCH_AMD64
761 add rsp, 8
762%endif
763 ret
764ENDPROC VMXActivateVMCS
765
766%endif ; RT_ARCH_AMD64
767
768
769;/**
770; * Prepares for and executes VMRUN
771; *
772; * @returns VBox status code
773; * @param HCPhysVMCB Physical address of host VMCB
774; * @param HCPhysVMCB Physical address of guest VMCB
775; * @param pCtx Guest context
776; */
777BEGINPROC SVMVMRun
778%ifdef RT_ARCH_AMD64 ; fake a cdecl stack frame - I'm lazy, sosume.
779 %ifdef ASM_CALL64_GCC
780 push rdx
781 push rsi
782 push rdi
783 %else
784 push r8
785 push rdx
786 push rcx
787 %endif
788 push 0
789%endif
790 push xBP
791 mov xBP, xSP
792 pushf
793
794 ;/* Manual save and restore:
795 ; * - General purpose registers except RIP, RSP, RAX
796 ; *
797 ; * Trashed:
798 ; * - CR2 (we don't care)
799 ; * - LDTR (reset to 0)
800 ; * - DRx (presumably not changed at all)
801 ; * - DR7 (reset to 0x400)
802 ; */
803
804 ;/* Save all general purpose host registers. */
805 MYPUSHAD
806
807 ;/* Save the Guest CPU context pointer. */
808 mov xSI, [xBP + xS*2 + RTHCPHYS_CB*2] ; pCtx
809 push xSI ; push for saving the state at the end
810
811 ; Restore CR2
812 mov ebx, [xSI + CPUMCTX.cr2]
813 mov cr2, xBX
814
815 ; save host fs, gs, sysenter msr etc
816 mov xAX, [xBP + xS*2] ; pVMCBHostPhys (64 bits physical address; x86: take low dword only)
817 push xAX ; save for the vmload after vmrun
818 vmsave
819
820 ; setup eax for VMLOAD
821 mov xAX, [xBP + xS*2 + RTHCPHYS_CB] ; pVMCBPhys (64 bits physical address; take low dword only)
822
823 ;/* Restore Guest's general purpose registers. */
824 ;/* EAX is loaded from the VMCB by VMRUN */
825 mov ebx, [xSI + CPUMCTX.ebx]
826 mov ecx, [xSI + CPUMCTX.ecx]
827 mov edx, [xSI + CPUMCTX.edx]
828 mov edi, [xSI + CPUMCTX.edi]
829 mov ebp, [xSI + CPUMCTX.ebp]
830 mov esi, [xSI + CPUMCTX.esi]
831
832 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch
833 clgi
834 sti
835
836 ; load guest fs, gs, sysenter msr etc
837 vmload
838 ; run the VM
839 vmrun
840
841 ;/* EAX is in the VMCB already; we can use it here. */
842
843 ; save guest fs, gs, sysenter msr etc
844 vmsave
845
846 ; load host fs, gs, sysenter msr etc
847 pop xAX ; pushed above
848 vmload
849
850 ; Set the global interrupt flag again, but execute cli to make sure IF=0.
851 cli
852 stgi
853
854 pop xAX ; pCtx
855
856 mov [ss:xAX + CPUMCTX.ebx], ebx
857 mov [ss:xAX + CPUMCTX.ecx], ecx
858 mov [ss:xAX + CPUMCTX.edx], edx
859 mov [ss:xAX + CPUMCTX.esi], esi
860 mov [ss:xAX + CPUMCTX.edi], edi
861 mov [ss:xAX + CPUMCTX.ebp], ebp
862
863 ; Restore general purpose registers
864 MYPOPAD
865
866 mov eax, VINF_SUCCESS
867
868 popf
869 pop xBP
870%ifdef RT_ARCH_AMD64
871 add xSP, 4*xS
872%endif
873 ret
874ENDPROC SVMVMRun
875
876
877;;
878; Executes INVLPGA
879;
880; @param pPageGC msc:ecx gcc:edi x86:[esp+04] Virtual page to invalidate
881; @param uASID msc:edx gcc:esi x86:[esp+08] Tagged TLB id
882;
883;DECLASM(void) SVMInvlpgA(RTGCPTR pPageGC, uint32_t uASID);
884BEGINPROC SVMInvlpgA
885%ifdef RT_ARCH_AMD64
886 %ifdef ASM_CALL64_GCC
887 mov eax, edi ;; @todo 64-bit guest.
888 mov ecx, esi
889 %else
890 mov eax, ecx ;; @todo 64-bit guest.
891 mov ecx, edx
892 %endif
893%else
894 mov eax, [esp + 4]
895 mov ecx, [esp + 8]
896%endif
897 invlpga [xAX], ecx
898 ret
899ENDPROC SVMInvlpgA
900
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette