VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HWACCMR0A.asm@ 2789

Last change on this file since 2789 was 2789, checked in by vboxsync, 18 years ago

Fixes for gs save & restore in AMD64 mode.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 18.5 KB
Line 
1; $Id: HWACCMR0A.asm 2789 2007-05-23 08:30:17Z vboxsync $
2;; @file
3; VMXM - R0 vmx helpers
4;
5
6;
7; Copyright (C) 2006 InnoTek Systemberatung GmbH
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.virtualbox.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License as published by the Free Software Foundation,
13; in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
14; distribution. VirtualBox OSE is distributed in the hope that it will
15; be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17; If you received this file as part of a commercial VirtualBox
18; distribution, then only the terms of your commercial VirtualBox
19; license agreement apply instead of the previous paragraph.
20;
21
22;*******************************************************************************
23;* Header Files *
24;*******************************************************************************
25%include "VBox/asmdefs.mac"
26%include "VBox/err.mac"
27%include "VBox/hwacc_vmx.mac"
28%include "VBox/cpum.mac"
29%include "VBox/x86.mac"
30
31%ifdef __OS2__ ;; @todo build cvs nasm like on OS X.
32 %macro vmwrite 2,
33 int3
34 %endmacro
35 %define vmlaunch int3
36 %define vmresume int3
37%endif
38
39
40;; @def MYPUSHAD
41; Macro generating an equivalent to pushad
42
43;; @def MYPOPAD
44; Macro generating an equivalent to popad
45
46;; @def MYPUSHSEGS
47; Macro saving all segment registers on the stack.
48; @param 1 full width register name
49; @param 2 16-bit regsiter name for \a 1.
50
51;; @def MYPOPSEGS
52; Macro restoring all segment registers on the stack
53; @param 1 full width register name
54; @param 2 16-bit regsiter name for \a 1.
55
56%ifdef __AMD64__
57 %ifdef ASM_CALL64_GCC
58 %macro MYPUSHAD 0
59 push r15
60 push r14
61 push r13
62 push r12
63 push rbx
64 %endmacro
65 %macro MYPOPAD 0
66 pop rbx
67 pop r12
68 pop r13
69 pop r14
70 pop r15
71 %endmacro
72
73 %else ; ASM_CALL64_MSC
74 %macro MYPUSHAD 0
75 push r15
76 push r14
77 push r13
78 push r12
79 push rbx
80 push rsi
81 push rdi
82 %endmacro
83 %macro MYPOPAD 0
84 pop rdi
85 pop rsi
86 pop rbx
87 pop r12
88 pop r13
89 pop r14
90 pop r15
91 %endmacro
92 %endif
93
94 %macro MYPUSHSEGS 2
95 mov %2, es
96 push %1
97 mov %2, ds
98 push %1
99 push fs
100 ; Special case for GS; OSes typically use swapgs to reset the hidden base register for GS on entry into the kernel. The same happens on exit
101 ; Note: do not step through this code with a debugger!
102 push rcx
103 mov ecx, MSR_K8_KERNEL_GS_BASE
104 rdmsr
105 pop rcx
106 push rdx
107 push rax
108 ; copy hidden base register into the MSR
109 swapgs
110 push rcx
111 mov ecx, MSR_K8_KERNEL_GS_BASE
112 rdmsr
113 pop rcx
114 push rdx
115 push rax
116 swapgs ; redundant unless in debugging mode
117 ; Now it's safe to step again
118 push gs
119 %endmacro
120
121 %macro MYPOPSEGS 2
122 ; Note: do not step through this code with a debugger!
123 pop gs
124 pop rax
125 pop rdx
126 push rcx
127 mov ecx, MSR_K8_KERNEL_GS_BASE
128 wrmsr
129 pop rcx
130 ; copy MSR into hidden base register
131 swapgs
132 pop rax
133 pop rdx
134 push rcx
135 mov ecx, MSR_K8_KERNEL_GS_BASE
136 wrmsr
137 pop rcx
138 ; Now it's safe to step again
139
140 pop fs
141 pop %1
142 mov ds, %2
143 pop %1
144 mov es, %2
145 %endmacro
146
147%else ; __X86__
148 %macro MYPUSHAD 0
149 pushad
150 %endmacro
151 %macro MYPOPAD 0
152 popad
153 %endmacro
154
155 %macro MYPUSHSEGS 2
156 push ds
157 push es
158 push fs
159 push gs
160 %endmacro
161 %macro MYPOPSEGS 2
162 pop gs
163 pop fs
164 pop es
165 pop ds
166 %endmacro
167%endif
168
169
170BEGINCODE
171
172;/**
173; * Prepares for and executes VMLAUNCH
174; *
175; * @note identical to VMXResumeVM, except for the vmlaunch/vmresume opcode
176; *
177; * @returns VBox status code
178; * @param pCtx Guest context
179; */
180BEGINPROC VMXStartVM
181 push xBP
182 mov xBP, xSP
183
184 ;/* First we have to save some final CPU context registers. */
185%ifdef __AMD64__
186 mov rax, qword .vmlaunch_done
187 push rax
188%else
189 push .vmlaunch_done
190%endif
191 mov eax, VMX_VMCS_HOST_RIP ;/* return address (too difficult to continue after VMLAUNCH?) */
192 vmwrite xAX, [xSP]
193 ;/* @todo assumes success... */
194 add xSP, xS
195
196 ;/* Manual save and restore:
197 ; * - General purpose registers except RIP, RSP
198 ; *
199 ; * Trashed:
200 ; * - CR2 (we don't care)
201 ; * - LDTR (reset to 0)
202 ; * - DRx (presumably not changed at all)
203 ; * - DR7 (reset to 0x400)
204 ; * - EFLAGS (reset to BIT(1); not relevant)
205 ; *
206 ; */
207
208 ;/* Save all general purpose host registers. */
209 MYPUSHAD
210
211 ;/* Save segment registers */
212 MYPUSHSEGS xAX, ax
213
214 ;/* Save the Guest CPU context pointer. */
215%ifdef __AMD64__
216 %ifdef ASM_CALL64_GCC
217 mov rsi, rdi ; pCtx
218 %else
219 mov rsi, rcx ; pCtx
220 %endif
221%else
222 mov esi, [ebp + 8] ; pCtx
223%endif
224 push xSI
225
226 ; Save LDTR
227 xor eax, eax
228 sldt ax
229 push xAX
230
231 ; VMX only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!
232 sub xSP, xS*2
233 sgdt [xSP]
234
235 sub xSP, xS*2
236 sidt [xSP]
237
238 ; Restore CR2
239 mov ebx, [xSI + CPUMCTX.cr2]
240 mov cr2, xBX
241
242 mov eax, VMX_VMCS_HOST_RSP
243 vmwrite xAX, xSP
244 ;/* @todo assumes success... */
245 ;/* Don't mess with ESP anymore!! */
246
247 ;/* Restore Guest's general purpose registers. */
248 mov eax, [xSI + CPUMCTX.eax]
249 mov ebx, [xSI + CPUMCTX.ebx]
250 mov ecx, [xSI + CPUMCTX.ecx]
251 mov edx, [xSI + CPUMCTX.edx]
252 mov edi, [xSI + CPUMCTX.edi]
253 mov ebp, [xSI + CPUMCTX.ebp]
254 mov esi, [xSI + CPUMCTX.esi]
255
256 vmlaunch
257 jmp .vmlaunch_done; ;/* here if vmlaunch detected a failure. */
258
259ALIGNCODE(16)
260.vmlaunch_done:
261 jc .vmxstart_invalid_vmxon_ptr
262 jz .vmxstart_start_failed
263
264 ; Restore base and limit of the IDTR & GDTR
265 lidt [xSP]
266 add xSP, xS*2
267 lgdt [xSP]
268 add xSP, xS*2
269
270 push xDI
271 mov xDI, [xSP + xS * 2] ; pCtx
272
273 mov [ss:xDI + CPUMCTX.eax], eax
274 mov [ss:xDI + CPUMCTX.ebx], ebx
275 mov [ss:xDI + CPUMCTX.ecx], ecx
276 mov [ss:xDI + CPUMCTX.edx], edx
277 mov [ss:xDI + CPUMCTX.esi], esi
278 mov [ss:xDI + CPUMCTX.ebp], ebp
279%ifdef __AMD64__
280 pop xAX ; the guest edi we pushed above
281 mov dword [ss:xDI + CPUMCTX.edi], eax
282%else
283 pop dword [ss:xDI + CPUMCTX.edi] ; the guest edi we pushed above
284%endif
285
286 pop xAX ; saved LDTR
287 lldt ax
288
289 add xSP, xS ; pCtx
290
291 ; Restore segment registers
292 MYPOPSEGS xAX, ax
293
294 ; Restore general purpose registers
295 MYPOPAD
296
297 mov eax, VINF_SUCCESS
298
299.vmstart_end:
300 pop xBP
301 ret
302
303
304.vmxstart_invalid_vmxon_ptr:
305 ; Restore base and limit of the IDTR & GDTR
306 lidt [xSP]
307 add xSP, xS*2
308 lgdt [xSP]
309 add xSP, xS*2
310
311 pop xAX ; saved LDTR
312 lldt ax
313
314 add xSP, xS ; pCtx
315
316 ; Restore segment registers
317 MYPOPSEGS xAX, ax
318
319 ; Restore all general purpose host registers.
320 MYPOPAD
321 mov eax, VERR_VMX_INVALID_VMXON_PTR
322 jmp .vmstart_end
323
324.vmxstart_start_failed:
325 ; Restore base and limit of the IDTR & GDTR
326 lidt [xSP]
327 add xSP, xS*2
328 lgdt [xSP]
329 add xSP, xS*2
330
331 pop xAX ; saved LDTR
332 lldt ax
333
334 add xSP, xS ; pCtx
335
336 ; Restore segment registers
337 MYPOPSEGS xAX, ax
338
339 ; Restore all general purpose host registers.
340 MYPOPAD
341 mov eax, VERR_VMX_UNABLE_TO_START_VM
342 jmp .vmstart_end
343
344ENDPROC VMXStartVM
345
346
347;/**
348; * Prepares for and executes VMRESUME
349; *
350; * @note identical to VMXStartVM, except for the vmlaunch/vmresume opcode
351; *
352; * @returns VBox status code
353; * @param pCtx Guest context
354; */
355BEGINPROC VMXResumeVM
356 push xBP
357 mov xBP, xSP
358
359 ;/* First we have to save some final CPU context registers. */
360%ifdef __AMD64__
361 mov rax, qword .vmresume_done
362 push rax
363%else
364 push .vmresume_done
365%endif
366 mov eax, VMX_VMCS_HOST_RIP ;/* return address (too difficult to continue after VMLAUNCH?) */
367 vmwrite xAX, [xSP]
368 ;/* @todo assumes success... */
369 add xSP, xS
370
371 ;/* Manual save and restore:
372 ; * - General purpose registers except RIP, RSP
373 ; *
374 ; * Trashed:
375 ; * - CR2 (we don't care)
376 ; * - LDTR (reset to 0)
377 ; * - DRx (presumably not changed at all)
378 ; * - DR7 (reset to 0x400)
379 ; * - EFLAGS (reset to BIT(1); not relevant)
380 ; *
381 ; */
382
383 ;/* Save all general purpose host registers. */
384 MYPUSHAD
385
386 ;/* Save segment registers */
387 MYPUSHSEGS xAX, ax
388
389 ;/* Save the Guest CPU context pointer. */
390%ifdef __AMD64__
391 %ifdef ASM_CALL64_GCC
392 mov rsi, rdi ; pCtx
393 %else
394 mov rsi, rcx ; pCtx
395 %endif
396%else
397 mov esi, [ebp + 8] ; pCtx
398%endif
399 push xSI
400
401 ; Save LDTR
402 xor eax, eax
403 sldt ax
404 push xAX
405
406 ; VMX only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!
407 sub xSP, xS*2
408 sgdt [xSP]
409
410 sub xSP, xS*2
411 sidt [xSP]
412
413 ; Restore CR2
414 mov xBX, [xSI + CPUMCTX.cr2]
415 mov cr2, xBX
416
417 mov eax, VMX_VMCS_HOST_RSP
418 vmwrite xAX, xSP
419 ;/* @todo assumes success... */
420 ;/* Don't mess with ESP anymore!! */
421
422 ;/* Restore Guest's general purpose registers. */
423 mov eax, [xSI + CPUMCTX.eax]
424 mov ebx, [xSI + CPUMCTX.ebx]
425 mov ecx, [xSI + CPUMCTX.ecx]
426 mov edx, [xSI + CPUMCTX.edx]
427 mov edi, [xSI + CPUMCTX.edi]
428 mov ebp, [xSI + CPUMCTX.ebp]
429 mov esi, [xSI + CPUMCTX.esi]
430
431 vmresume
432 jmp .vmresume_done; ;/* here if vmresume detected a failure. */
433
434ALIGNCODE(16)
435.vmresume_done:
436 jc .vmxresume_invalid_vmxon_ptr
437 jz .vmxresume_start_failed
438
439 ; Restore base and limit of the IDTR & GDTR
440 lidt [xSP]
441 add xSP, xS*2
442 lgdt [xSP]
443 add xSP, xS*2
444
445 push xDI
446 mov xDI, [xSP + xS * 2] ; pCtx
447
448 mov [ss:xDI + CPUMCTX.eax], eax
449 mov [ss:xDI + CPUMCTX.ebx], ebx
450 mov [ss:xDI + CPUMCTX.ecx], ecx
451 mov [ss:xDI + CPUMCTX.edx], edx
452 mov [ss:xDI + CPUMCTX.esi], esi
453 mov [ss:xDI + CPUMCTX.ebp], ebp
454%ifdef __AMD64__
455 pop xAX ; the guest edi we pushed above
456 mov dword [ss:xDI + CPUMCTX.edi], eax
457%else
458 pop dword [ss:xDI + CPUMCTX.edi] ; the guest edi we pushed above
459%endif
460
461 pop xAX ; saved LDTR
462 lldt ax
463
464 add xSP, xS ; pCtx
465
466 ; Restore segment registers
467 MYPOPSEGS xAX, ax
468
469 ; Restore general purpose registers
470 MYPOPAD
471
472 mov eax, VINF_SUCCESS
473
474.vmresume_end:
475 pop xBP
476 ret
477
478.vmxresume_invalid_vmxon_ptr:
479 ; Restore base and limit of the IDTR & GDTR
480 lidt [xSP]
481 add xSP, xS*2
482 lgdt [xSP]
483 add xSP, xS*2
484
485 pop xAX ; saved LDTR
486 lldt ax
487
488 add xSP, xS ; pCtx
489
490 ; Restore segment registers
491 MYPOPSEGS xAX, ax
492
493 ; Restore all general purpose host registers.
494 MYPOPAD
495 mov eax, VERR_VMX_INVALID_VMXON_PTR
496 jmp .vmresume_end
497
498.vmxresume_start_failed:
499 ; Restore base and limit of the IDTR & GDTR
500 lidt [xSP]
501 add xSP, xS*2
502 lgdt [xSP]
503 add xSP, xS*2
504
505 pop xAX ; saved LDTR
506 lldt ax
507
508 add xSP, xS ; pCtx
509
510 ; Restore segment registers
511 MYPOPSEGS xAX, ax
512
513 ; Restore all general purpose host registers.
514 MYPOPAD
515 mov eax, VERR_VMX_UNABLE_TO_RESUME_VM
516 jmp .vmresume_end
517
518ENDPROC VMXResumeVM
519
520
521%ifdef __AMD64__
522;/**
523; * Executes VMWRITE
524; *
525; * @returns VBox status code
526; * @param idxField x86: [ebp + 08h] msc: rcx gcc: edi VMCS index
527; * @param pData x86: [ebp + 0ch] msc: rdx gcc: rsi VM field value
528; */
529BEGINPROC VMXWriteVMCS64
530%ifdef ASM_CALL64_GCC
531 mov eax, 0ffffffffh
532 and rdi, rax
533 xor rax, rax
534 vmwrite rdi, rsi
535%else
536 mov eax, 0ffffffffh
537 and rcx, rax
538 xor rax, rax
539 vmwrite rcx, rdx
540%endif
541 jnc .valid_vmcs
542 mov eax, VERR_VMX_INVALID_VMCS_PTR
543 ret
544.valid_vmcs:
545 jnz .the_end
546 mov eax, VERR_VMX_INVALID_VMCS_FIELD
547.the_end:
548 ret
549ENDPROC VMXWriteVMCS64
550
551;/**
552; * Executes VMREAD
553; *
554; * @returns VBox status code
555; * @param idxField VMCS index
556; * @param pData Ptr to store VM field value
557; */
558;DECLASM(int) VMXReadVMCS64(uint32_t idxField, uint64_t *pData);
559BEGINPROC VMXReadVMCS64
560%ifdef ASM_CALL64_GCC
561 mov eax, 0ffffffffh
562 and rdi, rax
563 xor rax, rax
564 vmread [rsi], rdi
565%else
566 mov eax, 0ffffffffh
567 and rcx, rax
568 xor rax, rax
569 vmread [rdx], rcx
570%endif
571 jnc .valid_vmcs
572 mov eax, VERR_VMX_INVALID_VMCS_PTR
573 ret
574.valid_vmcs:
575 jnz .the_end
576 mov eax, VERR_VMX_INVALID_VMCS_FIELD
577.the_end:
578 ret
579ENDPROC VMXReadVMCS64
580
581
582;/**
583; * Executes VMXON
584; *
585; * @returns VBox status code
586; * @param HCPhysVMXOn Physical address of VMXON structure
587; */
588;DECLASM(int) VMXEnable(RTHCPHYS HCPhysVMXOn);
589BEGINPROC VMXEnable
590%ifdef __AMD64__
591 xor rax, rax
592 %ifdef ASM_CALL64_GCC
593 push rdi
594 %else
595 push rcx
596 %endif
597 vmxon [rsp]
598%else
599 xor eax, eax
600 vmxon [esp + 4]
601%endif
602 jnc .good
603 mov eax, VERR_VMX_INVALID_VMXON_PTR
604 jmp .the_end
605
606.good:
607 jnz .the_end
608 mov eax, VERR_VMX_GENERIC
609
610.the_end:
611%ifdef __AMD64__
612 add rsp, 8
613%endif
614 ret
615ENDPROC VMXEnable
616
617
618;/**
619; * Executes VMXOFF
620; */
621;DECLASM(void) VMXDisable(void);
622BEGINPROC VMXDisable
623 vmxoff
624 ret
625ENDPROC VMXDisable
626
627
628;/**
629; * Executes VMCLEAR
630; *
631; * @returns VBox status code
632; * @param HCPhysVMCS Physical address of VM control structure
633; */
634;DECLASM(int) VMXClearVMCS(RTHCPHYS HCPhysVMCS);
635BEGINPROC VMXClearVMCS
636%ifdef __AMD64__
637 xor rax, rax
638 %ifdef ASM_CALL64_GCC
639 push rdi
640 %else
641 push rcx
642 %endif
643 vmclear [rsp]
644%else
645 xor eax, eax
646 vmclear [esp + 4]
647%endif
648 jnc .the_end
649 mov eax, VERR_VMX_INVALID_VMCS_PTR
650.the_end:
651%ifdef __AMD64__
652 add rsp, 8
653%endif
654 ret
655ENDPROC VMXClearVMCS
656
657
658;/**
659; * Executes VMPTRLD
660; *
661; * @returns VBox status code
662; * @param HCPhysVMCS Physical address of VMCS structure
663; */
664;DECLASM(int) VMXActivateVMCS(RTHCPHYS HCPhysVMCS);
665BEGINPROC VMXActivateVMCS
666%ifdef __AMD64__
667 xor rax, rax
668 %ifdef ASM_CALL64_GCC
669 push rdi
670 %else
671 push rcx
672 %endif
673 vmptrld [rsp]
674%else
675 xor eax, eax
676 vmptrld [esp + 4]
677%endif
678 jnc .the_end
679 mov eax, VERR_VMX_INVALID_VMCS_PTR
680.the_end:
681%ifdef __AMD64__
682 add rsp, 8
683%endif
684 ret
685ENDPROC VMXActivateVMCS
686
687%endif ; __AMD64__
688
689
690;/**
691; * Prepares for and executes VMRUN
692; *
693; * @returns VBox status code
694; * @param HCPhysVMCB Physical address of host VMCB
695; * @param HCPhysVMCB Physical address of guest VMCB
696; * @param pCtx Guest context
697; */
698BEGINPROC SVMVMRun
699%ifdef __AMD64__ ; fake a cdecl stack frame - I'm lazy, sosume.
700 %ifdef ASM_CALL64_GCC
701 push rdx
702 push rsi
703 push rdi
704 %else
705 push r8
706 push rdx
707 push rcx
708 %endif
709 push 0
710%endif
711 push xBP
712 mov xBP, xSP
713
714 ;/* Manual save and restore:
715 ; * - General purpose registers except RIP, RSP, RAX
716 ; *
717 ; * Trashed:
718 ; * - CR2 (we don't care)
719 ; * - LDTR (reset to 0)
720 ; * - DRx (presumably not changed at all)
721 ; * - DR7 (reset to 0x400)
722 ; */
723
724 ;/* Save all general purpose host registers. */
725 MYPUSHAD
726
727 ;/* Save the Guest CPU context pointer. */
728 mov xSI, [xBP + xS*2 + RTHCPHYS_CB*2] ; pCtx
729 push xSI ; push for saving the state at the end
730
731 ; Restore CR2
732 mov ebx, [xSI + CPUMCTX.cr2]
733 mov cr2, xBX
734
735 ; save host fs, gs, sysenter msr etc
736 mov xAX, [xBP + xS*2] ; pVMCBHostPhys (64 bits physical address; x86: take low dword only)
737 push xAX ; save for the vmload after vmrun
738 DB 0x0F, 0x01, 0xDB ; VMSAVE
739
740 ; setup eax for VMLOAD
741 mov xAX, [xBP + xS*2 + RTHCPHYS_CB] ; pVMCBPhys (64 bits physical address; take low dword only)
742
743 ;/* Restore Guest's general purpose registers. */
744 ;/* EAX is loaded from the VMCB by VMRUN */
745 mov ebx, [xSI + CPUMCTX.ebx]
746 mov ecx, [xSI + CPUMCTX.ecx]
747 mov edx, [xSI + CPUMCTX.edx]
748 mov edi, [xSI + CPUMCTX.edi]
749 mov ebp, [xSI + CPUMCTX.ebp]
750 mov esi, [xSI + CPUMCTX.esi]
751
752 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch
753 DB 0x0f, 0x01, 0xDD ; CLGI
754 sti
755
756 ; load guest fs, gs, sysenter msr etc
757 DB 0x0f, 0x01, 0xDA ; VMLOAD
758 ; run the VM
759 DB 0x0F, 0x01, 0xD8 ; VMRUN
760
761 ;/* EAX is in the VMCB already; we can use it here. */
762
763 ; save guest fs, gs, sysenter msr etc
764 DB 0x0F, 0x01, 0xDB ; VMSAVE
765
766 ; load host fs, gs, sysenter msr etc
767 pop xAX ; pushed above
768 DB 0x0F, 0x01, 0xDA ; VMLOAD
769
770 ; Set the global interrupt flag again, but execute cli to make sure IF=0.
771 cli
772 DB 0x0f, 0x01, 0xDC ; STGI
773
774 pop xAX ; pCtx
775
776 mov [ss:xAX + CPUMCTX.ebx], ebx
777 mov [ss:xAX + CPUMCTX.ecx], ecx
778 mov [ss:xAX + CPUMCTX.edx], edx
779 mov [ss:xAX + CPUMCTX.esi], esi
780 mov [ss:xAX + CPUMCTX.edi], edi
781 mov [ss:xAX + CPUMCTX.ebp], ebp
782
783 ; Restore general purpose registers
784 MYPOPAD
785
786 mov eax, VINF_SUCCESS
787
788 pop xBP
789%ifdef __AMD64__
790 add xSP, 4*xS
791%endif
792 ret
793ENDPROC SVMVMRun
794
795%ifdef __AMD64__
796%ifdef __WIN__
797
798;;
799; Executes INVLPGA
800;
801; @param pPageGC msc:ecx gcc:edi x86:[esp+04] Virtual page to invalidate
802; @param uASID msc:edx gcc:esi x86:[esp+08] Tagged TLB id
803;
804;DECLASM(void) SVMInvlpgA(RTGCPTR pPageGC, uint32_t uASID);
805BEGINPROC SVMInvlpgA
806%ifdef __AMD64__
807 %ifdef ASM_CALL64_GCC
808 mov eax, edi ;; @todo 64-bit guest.
809 mov ecx, esi
810 %else
811 mov eax, ecx ;; @todo 64-bit guest.
812 mov ecx, edx
813 %endif
814 invlpga rax, ecx
815%else
816 mov eax, [esp + 4]
817 mov ecx, [esp + 8]
818 invlpga eax, ecx
819%endif
820 ret
821ENDPROC SVMInvlpgA
822%endif
823%endif
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette