VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HWACCMR0A.asm@ 9046

Last change on this file since 9046 was 9046, checked in by vboxsync, 17 years ago

Experimental workaround for the non-working debuggers and panicing guest - disabled. comment in VBOX_WITH_DR6_EXPERIMENT in order to enable it.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 18.7 KB
Line 
1; $Id: HWACCMR0A.asm 9046 2008-05-21 23:47:23Z vboxsync $
2;; @file
3; VMXM - R0 vmx helpers
4;
5
6;
7; Copyright (C) 2006-2007 Sun Microsystems, Inc.
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.virtualbox.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License (GPL) as published by the Free Software
13; Foundation, in version 2 as it comes in the "COPYING" file of the
14; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17; Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18; Clara, CA 95054 USA or visit http://www.sun.com if you need
19; additional information or have any questions.
20;
21
22;*******************************************************************************
23;* Header Files *
24;*******************************************************************************
25%include "VBox/asmdefs.mac"
26%include "VBox/err.mac"
27%include "VBox/hwacc_vmx.mac"
28%include "VBox/cpum.mac"
29%include "VBox/x86.mac"
30
31%ifdef RT_OS_OS2 ;; @todo fix OMF support in yasm and kick nasm out completely.
32 %macro vmwrite 2,
33 int3
34 %endmacro
35 %define vmlaunch int3
36 %define vmresume int3
37 %define vmsave int3
38 %define vmload int3
39 %define vmrun int3
40 %define clgi int3
41 %define stgi int3
42 %macro invlpga 2,
43 int3
44 %endmacro
45%endif
46
47;; This is too risky wrt. stability, performance and correctness.
48;%define VBOX_WITH_DR6_EXPERIMENT 1
49
50;; @def MYPUSHAD
51; Macro generating an equivalent to pushad
52
53;; @def MYPOPAD
54; Macro generating an equivalent to popad
55
56;; @def MYPUSHSEGS
57; Macro saving all segment registers on the stack.
58; @param 1 full width register name
59; @param 2 16-bit regsiter name for \a 1.
60
61;; @def MYPOPSEGS
62; Macro restoring all segment registers on the stack
63; @param 1 full width register name
64; @param 2 16-bit regsiter name for \a 1.
65
66%ifdef RT_ARCH_AMD64
67 %ifdef ASM_CALL64_GCC
68 %macro MYPUSHAD 0
69 push r15
70 push r14
71 push r13
72 push r12
73 push rbx
74 %endmacro
75 %macro MYPOPAD 0
76 pop rbx
77 pop r12
78 pop r13
79 pop r14
80 pop r15
81 %endmacro
82
83 %else ; ASM_CALL64_MSC
84 %macro MYPUSHAD 0
85 push r15
86 push r14
87 push r13
88 push r12
89 push rbx
90 push rsi
91 push rdi
92 %endmacro
93 %macro MYPOPAD 0
94 pop rdi
95 pop rsi
96 pop rbx
97 pop r12
98 pop r13
99 pop r14
100 pop r15
101 %endmacro
102 %endif
103
104 %macro MYPUSHSEGS 2
105 mov %2, es
106 push %1
107 mov %2, ds
108 push %1
109 push fs
110 ; Special case for GS; OSes typically use swapgs to reset the hidden base register for GS on entry into the kernel. The same happens on exit
111 push rcx
112 mov ecx, MSR_K8_GS_BASE
113 rdmsr
114 pop rcx
115 push rdx
116 push rax
117 push gs
118 %endmacro
119
120 %macro MYPOPSEGS 2
121 ; Note: do not step through this code with a debugger!
122 pop gs
123 pop rax
124 pop rdx
125 push rcx
126 mov ecx, MSR_K8_GS_BASE
127 wrmsr
128 pop rcx
129 ; Now it's safe to step again
130
131 pop fs
132 pop %1
133 mov ds, %2
134 pop %1
135 mov es, %2
136 %endmacro
137
138%else ; RT_ARCH_X86
139 %macro MYPUSHAD 0
140 pushad
141 %endmacro
142 %macro MYPOPAD 0
143 popad
144 %endmacro
145
146 %macro MYPUSHSEGS 2
147 push ds
148 push es
149 push fs
150 push gs
151 %endmacro
152 %macro MYPOPSEGS 2
153 pop gs
154 pop fs
155 pop es
156 pop ds
157 %endmacro
158%endif
159
160
161BEGINCODE
162
163;/**
164; * Prepares for and executes VMLAUNCH
165; *
166; * @note identical to VMXResumeVM, except for the vmlaunch/vmresume opcode
167; *
168; * @returns VBox status code
169; * @param pCtx Guest context
170; */
171BEGINPROC VMXStartVM
172 push xBP
173 mov xBP, xSP
174
175 pushf
176 cli
177
178 ;/* First we have to save some final CPU context registers. */
179%ifdef RT_ARCH_AMD64
180 mov rax, qword .vmlaunch_done
181 push rax
182%else
183 push .vmlaunch_done
184%endif
185 mov eax, VMX_VMCS_HOST_RIP ;/* return address (too difficult to continue after VMLAUNCH?) */
186 vmwrite xAX, [xSP]
187 ;/* @todo assumes success... */
188 add xSP, xS
189
190 ;/* Manual save and restore:
191 ; * - General purpose registers except RIP, RSP
192 ; *
193 ; * Trashed:
194 ; * - CR2 (we don't care)
195 ; * - LDTR (reset to 0)
196 ; * - DRx (presumably not changed at all)
197 ; * - DR7 (reset to 0x400)
198 ; * - EFLAGS (reset to RT_BIT(1); not relevant)
199 ; *
200 ; */
201
202 ;/* Save all general purpose host registers. */
203 MYPUSHAD
204
205 ;/* Save segment registers */
206 MYPUSHSEGS xAX, ax
207
208 ;/* Save the Guest CPU context pointer. */
209%ifdef RT_ARCH_AMD64
210 %ifdef ASM_CALL64_GCC
211 mov rsi, rdi ; pCtx
212 %else
213 mov rsi, rcx ; pCtx
214 %endif
215%else
216 mov esi, [ebp + 8] ; pCtx
217%endif
218 push xSI
219
220 ; Save LDTR
221 xor eax, eax
222 sldt ax
223 push xAX
224
225 ; VMX only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!
226 sub xSP, xS*2
227 sgdt [xSP]
228
229 sub xSP, xS*2
230 sidt [xSP]
231
232%ifdef VBOX_WITH_DR6_EXPERIMENT
233 ; Restore DR6 - experiment, not safe!
234 mov xBX, [xSI + CPUMCTX.dr6]
235 mov dr6, xBX
236%endif
237
238 ; Restore CR2
239 mov ebx, [xSI + CPUMCTX.cr2]
240 mov cr2, xBX
241
242 mov eax, VMX_VMCS_HOST_RSP
243 vmwrite xAX, xSP
244 ;/* @todo assumes success... */
245 ;/* Don't mess with ESP anymore!! */
246
247 ;/* Restore Guest's general purpose registers. */
248 mov eax, [xSI + CPUMCTX.eax]
249 mov ebx, [xSI + CPUMCTX.ebx]
250 mov ecx, [xSI + CPUMCTX.ecx]
251 mov edx, [xSI + CPUMCTX.edx]
252 mov edi, [xSI + CPUMCTX.edi]
253 mov ebp, [xSI + CPUMCTX.ebp]
254 mov esi, [xSI + CPUMCTX.esi]
255
256 vmlaunch
257 jmp .vmlaunch_done; ;/* here if vmlaunch detected a failure. */
258
259ALIGNCODE(16)
260.vmlaunch_done:
261 jc near .vmxstart_invalid_vmxon_ptr
262 jz near .vmxstart_start_failed
263
264 ; Restore base and limit of the IDTR & GDTR
265 lidt [xSP]
266 add xSP, xS*2
267 lgdt [xSP]
268 add xSP, xS*2
269
270 push xDI
271 mov xDI, [xSP + xS * 2] ; pCtx
272
273 mov [ss:xDI + CPUMCTX.eax], eax
274 mov [ss:xDI + CPUMCTX.ebx], ebx
275 mov [ss:xDI + CPUMCTX.ecx], ecx
276 mov [ss:xDI + CPUMCTX.edx], edx
277 mov [ss:xDI + CPUMCTX.esi], esi
278 mov [ss:xDI + CPUMCTX.ebp], ebp
279%ifdef RT_ARCH_AMD64
280 pop xAX ; the guest edi we pushed above
281 mov dword [ss:xDI + CPUMCTX.edi], eax
282%else
283 pop dword [ss:xDI + CPUMCTX.edi] ; the guest edi we pushed above
284%endif
285
286%ifdef VBOX_WITH_DR6_EXPERIMENT
287 ; Save DR6 - experiment, not safe!
288 mov xAX, dr6
289 mov [ss:xDI + CPUMCTX.dr6], xAX
290%endif
291
292 pop xAX ; saved LDTR
293 lldt ax
294
295 add xSP, xS ; pCtx
296
297 ; Restore segment registers
298 MYPOPSEGS xAX, ax
299
300 ; Restore general purpose registers
301 MYPOPAD
302
303 mov eax, VINF_SUCCESS
304
305.vmstart_end:
306 popf
307 pop xBP
308 ret
309
310
311.vmxstart_invalid_vmxon_ptr:
312 ; Restore base and limit of the IDTR & GDTR
313 lidt [xSP]
314 add xSP, xS*2
315 lgdt [xSP]
316 add xSP, xS*2
317
318 pop xAX ; saved LDTR
319 lldt ax
320
321 add xSP, xS ; pCtx
322
323 ; Restore segment registers
324 MYPOPSEGS xAX, ax
325
326 ; Restore all general purpose host registers.
327 MYPOPAD
328 mov eax, VERR_VMX_INVALID_VMXON_PTR
329 jmp .vmstart_end
330
331.vmxstart_start_failed:
332 ; Restore base and limit of the IDTR & GDTR
333 lidt [xSP]
334 add xSP, xS*2
335 lgdt [xSP]
336 add xSP, xS*2
337
338 pop xAX ; saved LDTR
339 lldt ax
340
341 add xSP, xS ; pCtx
342
343 ; Restore segment registers
344 MYPOPSEGS xAX, ax
345
346 ; Restore all general purpose host registers.
347 MYPOPAD
348 mov eax, VERR_VMX_UNABLE_TO_START_VM
349 jmp .vmstart_end
350
351ENDPROC VMXStartVM
352
353
354;/**
355; * Prepares for and executes VMRESUME
356; *
357; * @note identical to VMXStartVM, except for the vmlaunch/vmresume opcode
358; *
359; * @returns VBox status code
360; * @param pCtx Guest context
361; */
362BEGINPROC VMXResumeVM
363 push xBP
364 mov xBP, xSP
365
366 pushf
367 cli
368
369 ;/* First we have to save some final CPU context registers. */
370%ifdef RT_ARCH_AMD64
371 mov rax, qword .vmresume_done
372 push rax
373%else
374 push .vmresume_done
375%endif
376 mov eax, VMX_VMCS_HOST_RIP ;/* return address (too difficult to continue after VMLAUNCH?) */
377 vmwrite xAX, [xSP]
378 ;/* @todo assumes success... */
379 add xSP, xS
380
381 ;/* Manual save and restore:
382 ; * - General purpose registers except RIP, RSP
383 ; *
384 ; * Trashed:
385 ; * - CR2 (we don't care)
386 ; * - LDTR (reset to 0)
387 ; * - DRx (presumably not changed at all)
388 ; * - DR7 (reset to 0x400)
389 ; * - EFLAGS (reset to RT_BIT(1); not relevant)
390 ; *
391 ; */
392
393 ;/* Save all general purpose host registers. */
394 MYPUSHAD
395
396 ;/* Save segment registers */
397 MYPUSHSEGS xAX, ax
398
399 ;/* Save the Guest CPU context pointer. */
400%ifdef RT_ARCH_AMD64
401 %ifdef ASM_CALL64_GCC
402 mov rsi, rdi ; pCtx
403 %else
404 mov rsi, rcx ; pCtx
405 %endif
406%else
407 mov esi, [ebp + 8] ; pCtx
408%endif
409 push xSI
410
411 ; Save LDTR
412 xor eax, eax
413 sldt ax
414 push xAX
415
416 ; VMX only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!
417 sub xSP, xS*2
418 sgdt [xSP]
419
420 sub xSP, xS*2
421 sidt [xSP]
422
423%ifdef VBOX_WITH_DR6_EXPERIMENT
424 ; Restore DR6 - experiment, not safe!
425 mov xBX, [xSI + CPUMCTX.dr6]
426 mov dr6, xBX
427%endif
428
429 ; Restore CR2
430 mov xBX, [xSI + CPUMCTX.cr2]
431 mov cr2, xBX
432
433 mov eax, VMX_VMCS_HOST_RSP
434 vmwrite xAX, xSP
435 ;/* @todo assumes success... */
436 ;/* Don't mess with ESP anymore!! */
437
438 ;/* Restore Guest's general purpose registers. */
439 mov eax, [xSI + CPUMCTX.eax]
440 mov ebx, [xSI + CPUMCTX.ebx]
441 mov ecx, [xSI + CPUMCTX.ecx]
442 mov edx, [xSI + CPUMCTX.edx]
443 mov edi, [xSI + CPUMCTX.edi]
444 mov ebp, [xSI + CPUMCTX.ebp]
445 mov esi, [xSI + CPUMCTX.esi]
446
447 vmresume
448 jmp .vmresume_done; ;/* here if vmresume detected a failure. */
449
450ALIGNCODE(16)
451.vmresume_done:
452 jc near .vmxresume_invalid_vmxon_ptr
453 jz near .vmxresume_start_failed
454
455 ; Restore base and limit of the IDTR & GDTR
456 lidt [xSP]
457 add xSP, xS*2
458 lgdt [xSP]
459 add xSP, xS*2
460
461 push xDI
462 mov xDI, [xSP + xS * 2] ; pCtx
463
464 mov [ss:xDI + CPUMCTX.eax], eax
465 mov [ss:xDI + CPUMCTX.ebx], ebx
466 mov [ss:xDI + CPUMCTX.ecx], ecx
467 mov [ss:xDI + CPUMCTX.edx], edx
468 mov [ss:xDI + CPUMCTX.esi], esi
469 mov [ss:xDI + CPUMCTX.ebp], ebp
470%ifdef RT_ARCH_AMD64
471 pop xAX ; the guest edi we pushed above
472 mov dword [ss:xDI + CPUMCTX.edi], eax
473%else
474 pop dword [ss:xDI + CPUMCTX.edi] ; the guest edi we pushed above
475%endif
476
477%ifdef VBOX_WITH_DR6_EXPERIMENT
478 ; Save DR6 - experiment, not safe!
479 mov xAX, dr6
480 mov [ss:xDI + CPUMCTX.dr6], xAX
481%endif
482
483 pop xAX ; saved LDTR
484 lldt ax
485
486 add xSP, xS ; pCtx
487
488 ; Restore segment registers
489 MYPOPSEGS xAX, ax
490
491 ; Restore general purpose registers
492 MYPOPAD
493
494 mov eax, VINF_SUCCESS
495
496.vmresume_end:
497 popf
498 pop xBP
499 ret
500
501.vmxresume_invalid_vmxon_ptr:
502 ; Restore base and limit of the IDTR & GDTR
503 lidt [xSP]
504 add xSP, xS*2
505 lgdt [xSP]
506 add xSP, xS*2
507
508 pop xAX ; saved LDTR
509 lldt ax
510
511 add xSP, xS ; pCtx
512
513 ; Restore segment registers
514 MYPOPSEGS xAX, ax
515
516 ; Restore all general purpose host registers.
517 MYPOPAD
518 mov eax, VERR_VMX_INVALID_VMXON_PTR
519 jmp .vmresume_end
520
521.vmxresume_start_failed:
522 ; Restore base and limit of the IDTR & GDTR
523 lidt [xSP]
524 add xSP, xS*2
525 lgdt [xSP]
526 add xSP, xS*2
527
528 pop xAX ; saved LDTR
529 lldt ax
530
531 add xSP, xS ; pCtx
532
533 ; Restore segment registers
534 MYPOPSEGS xAX, ax
535
536 ; Restore all general purpose host registers.
537 MYPOPAD
538 mov eax, VERR_VMX_UNABLE_TO_RESUME_VM
539 jmp .vmresume_end
540
541ENDPROC VMXResumeVM
542
543
544%ifdef RT_ARCH_AMD64
545;/**
546; * Executes VMWRITE
547; *
548; * @returns VBox status code
549; * @param idxField x86: [ebp + 08h] msc: rcx gcc: edi VMCS index
550; * @param pData x86: [ebp + 0ch] msc: rdx gcc: rsi VM field value
551; */
552BEGINPROC VMXWriteVMCS64
553%ifdef ASM_CALL64_GCC
554 mov eax, 0ffffffffh
555 and rdi, rax
556 xor rax, rax
557 vmwrite rdi, rsi
558%else
559 mov eax, 0ffffffffh
560 and rcx, rax
561 xor rax, rax
562 vmwrite rcx, rdx
563%endif
564 jnc .valid_vmcs
565 mov eax, VERR_VMX_INVALID_VMCS_PTR
566 ret
567.valid_vmcs:
568 jnz .the_end
569 mov eax, VERR_VMX_INVALID_VMCS_FIELD
570.the_end:
571 ret
572ENDPROC VMXWriteVMCS64
573
574;/**
575; * Executes VMREAD
576; *
577; * @returns VBox status code
578; * @param idxField VMCS index
579; * @param pData Ptr to store VM field value
580; */
581;DECLASM(int) VMXReadVMCS64(uint32_t idxField, uint64_t *pData);
582BEGINPROC VMXReadVMCS64
583%ifdef ASM_CALL64_GCC
584 mov eax, 0ffffffffh
585 and rdi, rax
586 xor rax, rax
587 vmread [rsi], rdi
588%else
589 mov eax, 0ffffffffh
590 and rcx, rax
591 xor rax, rax
592 vmread [rdx], rcx
593%endif
594 jnc .valid_vmcs
595 mov eax, VERR_VMX_INVALID_VMCS_PTR
596 ret
597.valid_vmcs:
598 jnz .the_end
599 mov eax, VERR_VMX_INVALID_VMCS_FIELD
600.the_end:
601 ret
602ENDPROC VMXReadVMCS64
603
604
605;/**
606; * Executes VMXON
607; *
608; * @returns VBox status code
609; * @param HCPhysVMXOn Physical address of VMXON structure
610; */
611;DECLASM(int) VMXEnable(RTHCPHYS HCPhysVMXOn);
612BEGINPROC VMXEnable
613%ifdef RT_ARCH_AMD64
614 xor rax, rax
615 %ifdef ASM_CALL64_GCC
616 push rdi
617 %else
618 push rcx
619 %endif
620 vmxon [rsp]
621%else
622 xor eax, eax
623 vmxon [esp + 4]
624%endif
625 jnc .good
626 mov eax, VERR_VMX_INVALID_VMXON_PTR
627 jmp .the_end
628
629.good:
630 jnz .the_end
631 mov eax, VERR_VMX_GENERIC
632
633.the_end:
634%ifdef RT_ARCH_AMD64
635 add rsp, 8
636%endif
637 ret
638ENDPROC VMXEnable
639
640
641;/**
642; * Executes VMXOFF
643; */
644;DECLASM(void) VMXDisable(void);
645BEGINPROC VMXDisable
646 vmxoff
647 ret
648ENDPROC VMXDisable
649
650
651;/**
652; * Executes VMCLEAR
653; *
654; * @returns VBox status code
655; * @param HCPhysVMCS Physical address of VM control structure
656; */
657;DECLASM(int) VMXClearVMCS(RTHCPHYS HCPhysVMCS);
658BEGINPROC VMXClearVMCS
659%ifdef RT_ARCH_AMD64
660 xor rax, rax
661 %ifdef ASM_CALL64_GCC
662 push rdi
663 %else
664 push rcx
665 %endif
666 vmclear [rsp]
667%else
668 xor eax, eax
669 vmclear [esp + 4]
670%endif
671 jnc .the_end
672 mov eax, VERR_VMX_INVALID_VMCS_PTR
673.the_end:
674%ifdef RT_ARCH_AMD64
675 add rsp, 8
676%endif
677 ret
678ENDPROC VMXClearVMCS
679
680
681;/**
682; * Executes VMPTRLD
683; *
684; * @returns VBox status code
685; * @param HCPhysVMCS Physical address of VMCS structure
686; */
687;DECLASM(int) VMXActivateVMCS(RTHCPHYS HCPhysVMCS);
688BEGINPROC VMXActivateVMCS
689%ifdef RT_ARCH_AMD64
690 xor rax, rax
691 %ifdef ASM_CALL64_GCC
692 push rdi
693 %else
694 push rcx
695 %endif
696 vmptrld [rsp]
697%else
698 xor eax, eax
699 vmptrld [esp + 4]
700%endif
701 jnc .the_end
702 mov eax, VERR_VMX_INVALID_VMCS_PTR
703.the_end:
704%ifdef RT_ARCH_AMD64
705 add rsp, 8
706%endif
707 ret
708ENDPROC VMXActivateVMCS
709
710%endif ; RT_ARCH_AMD64
711
712
713;/**
714; * Prepares for and executes VMRUN
715; *
716; * @returns VBox status code
717; * @param HCPhysVMCB Physical address of host VMCB
718; * @param HCPhysVMCB Physical address of guest VMCB
719; * @param pCtx Guest context
720; */
721BEGINPROC SVMVMRun
722%ifdef RT_ARCH_AMD64 ; fake a cdecl stack frame - I'm lazy, sosume.
723 %ifdef ASM_CALL64_GCC
724 push rdx
725 push rsi
726 push rdi
727 %else
728 push r8
729 push rdx
730 push rcx
731 %endif
732 push 0
733%endif
734 push xBP
735 mov xBP, xSP
736 pushf
737
738 ;/* Manual save and restore:
739 ; * - General purpose registers except RIP, RSP, RAX
740 ; *
741 ; * Trashed:
742 ; * - CR2 (we don't care)
743 ; * - LDTR (reset to 0)
744 ; * - DRx (presumably not changed at all)
745 ; * - DR7 (reset to 0x400)
746 ; */
747
748 ;/* Save all general purpose host registers. */
749 MYPUSHAD
750
751 ;/* Save the Guest CPU context pointer. */
752 mov xSI, [xBP + xS*2 + RTHCPHYS_CB*2] ; pCtx
753 push xSI ; push for saving the state at the end
754
755 ; Restore CR2
756 mov ebx, [xSI + CPUMCTX.cr2]
757 mov cr2, xBX
758
759 ; save host fs, gs, sysenter msr etc
760 mov xAX, [xBP + xS*2] ; pVMCBHostPhys (64 bits physical address; x86: take low dword only)
761 push xAX ; save for the vmload after vmrun
762 vmsave
763
764 ; setup eax for VMLOAD
765 mov xAX, [xBP + xS*2 + RTHCPHYS_CB] ; pVMCBPhys (64 bits physical address; take low dword only)
766
767 ;/* Restore Guest's general purpose registers. */
768 ;/* EAX is loaded from the VMCB by VMRUN */
769 mov ebx, [xSI + CPUMCTX.ebx]
770 mov ecx, [xSI + CPUMCTX.ecx]
771 mov edx, [xSI + CPUMCTX.edx]
772 mov edi, [xSI + CPUMCTX.edi]
773 mov ebp, [xSI + CPUMCTX.ebp]
774 mov esi, [xSI + CPUMCTX.esi]
775
776 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch
777 clgi
778 sti
779
780 ; load guest fs, gs, sysenter msr etc
781 vmload
782 ; run the VM
783 vmrun
784
785 ;/* EAX is in the VMCB already; we can use it here. */
786
787 ; save guest fs, gs, sysenter msr etc
788 vmsave
789
790 ; load host fs, gs, sysenter msr etc
791 pop xAX ; pushed above
792 vmload
793
794 ; Set the global interrupt flag again, but execute cli to make sure IF=0.
795 cli
796 stgi
797
798 pop xAX ; pCtx
799
800 mov [ss:xAX + CPUMCTX.ebx], ebx
801 mov [ss:xAX + CPUMCTX.ecx], ecx
802 mov [ss:xAX + CPUMCTX.edx], edx
803 mov [ss:xAX + CPUMCTX.esi], esi
804 mov [ss:xAX + CPUMCTX.edi], edi
805 mov [ss:xAX + CPUMCTX.ebp], ebp
806
807 ; Restore general purpose registers
808 MYPOPAD
809
810 mov eax, VINF_SUCCESS
811
812 popf
813 pop xBP
814%ifdef RT_ARCH_AMD64
815 add xSP, 4*xS
816%endif
817 ret
818ENDPROC SVMVMRun
819
820
821;;
822; Executes INVLPGA
823;
824; @param pPageGC msc:ecx gcc:edi x86:[esp+04] Virtual page to invalidate
825; @param uASID msc:edx gcc:esi x86:[esp+08] Tagged TLB id
826;
827;DECLASM(void) SVMInvlpgA(RTGCPTR pPageGC, uint32_t uASID);
828BEGINPROC SVMInvlpgA
829%ifdef RT_ARCH_AMD64
830 %ifdef ASM_CALL64_GCC
831 mov eax, edi ;; @todo 64-bit guest.
832 mov ecx, esi
833 %else
834 mov eax, ecx ;; @todo 64-bit guest.
835 mov ecx, edx
836 %endif
837%else
838 mov eax, [esp + 4]
839 mov ecx, [esp + 8]
840%endif
841 invlpga [xAX], ecx
842 ret
843ENDPROC SVMInvlpgA
844
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette