VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HWACCMR0A.asm@ 3294

Last change on this file since 3294 was 3151, checked in by vboxsync, 18 years ago

VMM: Fixed OS/2 build.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 18.0 KB
Line 
1; $Id: HWACCMR0A.asm 3151 2007-06-18 22:53:04Z vboxsync $
2;; @file
3; VMXM - R0 vmx helpers
4;
5
6;
7; Copyright (C) 2006-2007 innotek GmbH
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.virtualbox.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License as published by the Free Software Foundation,
13; in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
14; distribution. VirtualBox OSE is distributed in the hope that it will
15; be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17; If you received this file as part of a commercial VirtualBox
18; distribution, then only the terms of your commercial VirtualBox
19; license agreement apply instead of the previous paragraph.
20;
21
22;*******************************************************************************
23;* Header Files *
24;*******************************************************************************
25%include "VBox/asmdefs.mac"
26%include "VBox/err.mac"
27%include "VBox/hwacc_vmx.mac"
28%include "VBox/cpum.mac"
29%include "VBox/x86.mac"
30
31%ifdef __OS2__ ;; @todo build cvs nasm like on OS X.
32 %macro vmwrite 2,
33 int3
34 %endmacro
35 %define vmlaunch int3
36 %define vmresume int3
37%endif
38
39
40;; @def MYPUSHAD
41; Macro generating an equivalent to pushad
42
43;; @def MYPOPAD
44; Macro generating an equivalent to popad
45
46;; @def MYPUSHSEGS
47; Macro saving all segment registers on the stack.
48; @param 1 full width register name
49; @param 2 16-bit regsiter name for \a 1.
50
51;; @def MYPOPSEGS
52; Macro restoring all segment registers on the stack
53; @param 1 full width register name
54; @param 2 16-bit regsiter name for \a 1.
55
56%ifdef __AMD64__
57 %ifdef ASM_CALL64_GCC
58 %macro MYPUSHAD 0
59 push r15
60 push r14
61 push r13
62 push r12
63 push rbx
64 %endmacro
65 %macro MYPOPAD 0
66 pop rbx
67 pop r12
68 pop r13
69 pop r14
70 pop r15
71 %endmacro
72
73 %else ; ASM_CALL64_MSC
74 %macro MYPUSHAD 0
75 push r15
76 push r14
77 push r13
78 push r12
79 push rbx
80 push rsi
81 push rdi
82 %endmacro
83 %macro MYPOPAD 0
84 pop rdi
85 pop rsi
86 pop rbx
87 pop r12
88 pop r13
89 pop r14
90 pop r15
91 %endmacro
92 %endif
93
94 %macro MYPUSHSEGS 2
95 mov %2, es
96 push %1
97 mov %2, ds
98 push %1
99 push fs
100 ; Special case for GS; OSes typically use swapgs to reset the hidden base register for GS on entry into the kernel. The same happens on exit
101 push rcx
102 mov ecx, MSR_K8_GS_BASE
103 rdmsr
104 pop rcx
105 push rdx
106 push rax
107 push gs
108 %endmacro
109
110 %macro MYPOPSEGS 2
111 ; Note: do not step through this code with a debugger!
112 pop gs
113 pop rax
114 pop rdx
115 push rcx
116 mov ecx, MSR_K8_GS_BASE
117 wrmsr
118 pop rcx
119 ; Now it's safe to step again
120
121 pop fs
122 pop %1
123 mov ds, %2
124 pop %1
125 mov es, %2
126 %endmacro
127
128%else ; __X86__
129 %macro MYPUSHAD 0
130 pushad
131 %endmacro
132 %macro MYPOPAD 0
133 popad
134 %endmacro
135
136 %macro MYPUSHSEGS 2
137 push ds
138 push es
139 push fs
140 push gs
141 %endmacro
142 %macro MYPOPSEGS 2
143 pop gs
144 pop fs
145 pop es
146 pop ds
147 %endmacro
148%endif
149
150
151BEGINCODE
152
153;/**
154; * Prepares for and executes VMLAUNCH
155; *
156; * @note identical to VMXResumeVM, except for the vmlaunch/vmresume opcode
157; *
158; * @returns VBox status code
159; * @param pCtx Guest context
160; */
161BEGINPROC VMXStartVM
162 push xBP
163 mov xBP, xSP
164
165 ;/* First we have to save some final CPU context registers. */
166%ifdef __AMD64__
167 mov rax, qword .vmlaunch_done
168 push rax
169%else
170 push .vmlaunch_done
171%endif
172 mov eax, VMX_VMCS_HOST_RIP ;/* return address (too difficult to continue after VMLAUNCH?) */
173 vmwrite xAX, [xSP]
174 ;/* @todo assumes success... */
175 add xSP, xS
176
177 ;/* Manual save and restore:
178 ; * - General purpose registers except RIP, RSP
179 ; *
180 ; * Trashed:
181 ; * - CR2 (we don't care)
182 ; * - LDTR (reset to 0)
183 ; * - DRx (presumably not changed at all)
184 ; * - DR7 (reset to 0x400)
185 ; * - EFLAGS (reset to BIT(1); not relevant)
186 ; *
187 ; */
188
189 ;/* Save all general purpose host registers. */
190 MYPUSHAD
191
192 ;/* Save segment registers */
193 MYPUSHSEGS xAX, ax
194
195 ;/* Save the Guest CPU context pointer. */
196%ifdef __AMD64__
197 %ifdef ASM_CALL64_GCC
198 mov rsi, rdi ; pCtx
199 %else
200 mov rsi, rcx ; pCtx
201 %endif
202%else
203 mov esi, [ebp + 8] ; pCtx
204%endif
205 push xSI
206
207 ; Save LDTR
208 xor eax, eax
209 sldt ax
210 push xAX
211
212 ; VMX only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!
213 sub xSP, xS*2
214 sgdt [xSP]
215
216 sub xSP, xS*2
217 sidt [xSP]
218
219 ; Restore CR2
220 mov ebx, [xSI + CPUMCTX.cr2]
221 mov cr2, xBX
222
223 mov eax, VMX_VMCS_HOST_RSP
224 vmwrite xAX, xSP
225 ;/* @todo assumes success... */
226 ;/* Don't mess with ESP anymore!! */
227
228 ;/* Restore Guest's general purpose registers. */
229 mov eax, [xSI + CPUMCTX.eax]
230 mov ebx, [xSI + CPUMCTX.ebx]
231 mov ecx, [xSI + CPUMCTX.ecx]
232 mov edx, [xSI + CPUMCTX.edx]
233 mov edi, [xSI + CPUMCTX.edi]
234 mov ebp, [xSI + CPUMCTX.ebp]
235 mov esi, [xSI + CPUMCTX.esi]
236
237 vmlaunch
238 jmp .vmlaunch_done; ;/* here if vmlaunch detected a failure. */
239
240ALIGNCODE(16)
241.vmlaunch_done:
242 jc near .vmxstart_invalid_vmxon_ptr
243 jz near .vmxstart_start_failed
244
245 ; Restore base and limit of the IDTR & GDTR
246 lidt [xSP]
247 add xSP, xS*2
248 lgdt [xSP]
249 add xSP, xS*2
250
251 push xDI
252 mov xDI, [xSP + xS * 2] ; pCtx
253
254 mov [ss:xDI + CPUMCTX.eax], eax
255 mov [ss:xDI + CPUMCTX.ebx], ebx
256 mov [ss:xDI + CPUMCTX.ecx], ecx
257 mov [ss:xDI + CPUMCTX.edx], edx
258 mov [ss:xDI + CPUMCTX.esi], esi
259 mov [ss:xDI + CPUMCTX.ebp], ebp
260%ifdef __AMD64__
261 pop xAX ; the guest edi we pushed above
262 mov dword [ss:xDI + CPUMCTX.edi], eax
263%else
264 pop dword [ss:xDI + CPUMCTX.edi] ; the guest edi we pushed above
265%endif
266
267 pop xAX ; saved LDTR
268 lldt ax
269
270 add xSP, xS ; pCtx
271
272 ; Restore segment registers
273 MYPOPSEGS xAX, ax
274
275 ; Restore general purpose registers
276 MYPOPAD
277
278 mov eax, VINF_SUCCESS
279
280.vmstart_end:
281 pop xBP
282 ret
283
284
285.vmxstart_invalid_vmxon_ptr:
286 ; Restore base and limit of the IDTR & GDTR
287 lidt [xSP]
288 add xSP, xS*2
289 lgdt [xSP]
290 add xSP, xS*2
291
292 pop xAX ; saved LDTR
293 lldt ax
294
295 add xSP, xS ; pCtx
296
297 ; Restore segment registers
298 MYPOPSEGS xAX, ax
299
300 ; Restore all general purpose host registers.
301 MYPOPAD
302 mov eax, VERR_VMX_INVALID_VMXON_PTR
303 jmp .vmstart_end
304
305.vmxstart_start_failed:
306 ; Restore base and limit of the IDTR & GDTR
307 lidt [xSP]
308 add xSP, xS*2
309 lgdt [xSP]
310 add xSP, xS*2
311
312 pop xAX ; saved LDTR
313 lldt ax
314
315 add xSP, xS ; pCtx
316
317 ; Restore segment registers
318 MYPOPSEGS xAX, ax
319
320 ; Restore all general purpose host registers.
321 MYPOPAD
322 mov eax, VERR_VMX_UNABLE_TO_START_VM
323 jmp .vmstart_end
324
325ENDPROC VMXStartVM
326
327
328;/**
329; * Prepares for and executes VMRESUME
330; *
331; * @note identical to VMXStartVM, except for the vmlaunch/vmresume opcode
332; *
333; * @returns VBox status code
334; * @param pCtx Guest context
335; */
336BEGINPROC VMXResumeVM
337 push xBP
338 mov xBP, xSP
339
340 ;/* First we have to save some final CPU context registers. */
341%ifdef __AMD64__
342 mov rax, qword .vmresume_done
343 push rax
344%else
345 push .vmresume_done
346%endif
347 mov eax, VMX_VMCS_HOST_RIP ;/* return address (too difficult to continue after VMLAUNCH?) */
348 vmwrite xAX, [xSP]
349 ;/* @todo assumes success... */
350 add xSP, xS
351
352 ;/* Manual save and restore:
353 ; * - General purpose registers except RIP, RSP
354 ; *
355 ; * Trashed:
356 ; * - CR2 (we don't care)
357 ; * - LDTR (reset to 0)
358 ; * - DRx (presumably not changed at all)
359 ; * - DR7 (reset to 0x400)
360 ; * - EFLAGS (reset to BIT(1); not relevant)
361 ; *
362 ; */
363
364 ;/* Save all general purpose host registers. */
365 MYPUSHAD
366
367 ;/* Save segment registers */
368 MYPUSHSEGS xAX, ax
369
370 ;/* Save the Guest CPU context pointer. */
371%ifdef __AMD64__
372 %ifdef ASM_CALL64_GCC
373 mov rsi, rdi ; pCtx
374 %else
375 mov rsi, rcx ; pCtx
376 %endif
377%else
378 mov esi, [ebp + 8] ; pCtx
379%endif
380 push xSI
381
382 ; Save LDTR
383 xor eax, eax
384 sldt ax
385 push xAX
386
387 ; VMX only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!
388 sub xSP, xS*2
389 sgdt [xSP]
390
391 sub xSP, xS*2
392 sidt [xSP]
393
394 ; Restore CR2
395 mov xBX, [xSI + CPUMCTX.cr2]
396 mov cr2, xBX
397
398 mov eax, VMX_VMCS_HOST_RSP
399 vmwrite xAX, xSP
400 ;/* @todo assumes success... */
401 ;/* Don't mess with ESP anymore!! */
402
403 ;/* Restore Guest's general purpose registers. */
404 mov eax, [xSI + CPUMCTX.eax]
405 mov ebx, [xSI + CPUMCTX.ebx]
406 mov ecx, [xSI + CPUMCTX.ecx]
407 mov edx, [xSI + CPUMCTX.edx]
408 mov edi, [xSI + CPUMCTX.edi]
409 mov ebp, [xSI + CPUMCTX.ebp]
410 mov esi, [xSI + CPUMCTX.esi]
411
412 vmresume
413 jmp .vmresume_done; ;/* here if vmresume detected a failure. */
414
415ALIGNCODE(16)
416.vmresume_done:
417 jc near .vmxresume_invalid_vmxon_ptr
418 jz near .vmxresume_start_failed
419
420 ; Restore base and limit of the IDTR & GDTR
421 lidt [xSP]
422 add xSP, xS*2
423 lgdt [xSP]
424 add xSP, xS*2
425
426 push xDI
427 mov xDI, [xSP + xS * 2] ; pCtx
428
429 mov [ss:xDI + CPUMCTX.eax], eax
430 mov [ss:xDI + CPUMCTX.ebx], ebx
431 mov [ss:xDI + CPUMCTX.ecx], ecx
432 mov [ss:xDI + CPUMCTX.edx], edx
433 mov [ss:xDI + CPUMCTX.esi], esi
434 mov [ss:xDI + CPUMCTX.ebp], ebp
435%ifdef __AMD64__
436 pop xAX ; the guest edi we pushed above
437 mov dword [ss:xDI + CPUMCTX.edi], eax
438%else
439 pop dword [ss:xDI + CPUMCTX.edi] ; the guest edi we pushed above
440%endif
441
442 pop xAX ; saved LDTR
443 lldt ax
444
445 add xSP, xS ; pCtx
446
447 ; Restore segment registers
448 MYPOPSEGS xAX, ax
449
450 ; Restore general purpose registers
451 MYPOPAD
452
453 mov eax, VINF_SUCCESS
454
455.vmresume_end:
456 pop xBP
457 ret
458
459.vmxresume_invalid_vmxon_ptr:
460 ; Restore base and limit of the IDTR & GDTR
461 lidt [xSP]
462 add xSP, xS*2
463 lgdt [xSP]
464 add xSP, xS*2
465
466 pop xAX ; saved LDTR
467 lldt ax
468
469 add xSP, xS ; pCtx
470
471 ; Restore segment registers
472 MYPOPSEGS xAX, ax
473
474 ; Restore all general purpose host registers.
475 MYPOPAD
476 mov eax, VERR_VMX_INVALID_VMXON_PTR
477 jmp .vmresume_end
478
479.vmxresume_start_failed:
480 ; Restore base and limit of the IDTR & GDTR
481 lidt [xSP]
482 add xSP, xS*2
483 lgdt [xSP]
484 add xSP, xS*2
485
486 pop xAX ; saved LDTR
487 lldt ax
488
489 add xSP, xS ; pCtx
490
491 ; Restore segment registers
492 MYPOPSEGS xAX, ax
493
494 ; Restore all general purpose host registers.
495 MYPOPAD
496 mov eax, VERR_VMX_UNABLE_TO_RESUME_VM
497 jmp .vmresume_end
498
499ENDPROC VMXResumeVM
500
501
502%ifdef __AMD64__
503;/**
504; * Executes VMWRITE
505; *
506; * @returns VBox status code
507; * @param idxField x86: [ebp + 08h] msc: rcx gcc: edi VMCS index
508; * @param pData x86: [ebp + 0ch] msc: rdx gcc: rsi VM field value
509; */
510BEGINPROC VMXWriteVMCS64
511%ifdef ASM_CALL64_GCC
512 mov eax, 0ffffffffh
513 and rdi, rax
514 xor rax, rax
515 vmwrite rdi, rsi
516%else
517 mov eax, 0ffffffffh
518 and rcx, rax
519 xor rax, rax
520 vmwrite rcx, rdx
521%endif
522 jnc .valid_vmcs
523 mov eax, VERR_VMX_INVALID_VMCS_PTR
524 ret
525.valid_vmcs:
526 jnz .the_end
527 mov eax, VERR_VMX_INVALID_VMCS_FIELD
528.the_end:
529 ret
530ENDPROC VMXWriteVMCS64
531
532;/**
533; * Executes VMREAD
534; *
535; * @returns VBox status code
536; * @param idxField VMCS index
537; * @param pData Ptr to store VM field value
538; */
539;DECLASM(int) VMXReadVMCS64(uint32_t idxField, uint64_t *pData);
540BEGINPROC VMXReadVMCS64
541%ifdef ASM_CALL64_GCC
542 mov eax, 0ffffffffh
543 and rdi, rax
544 xor rax, rax
545 vmread [rsi], rdi
546%else
547 mov eax, 0ffffffffh
548 and rcx, rax
549 xor rax, rax
550 vmread [rdx], rcx
551%endif
552 jnc .valid_vmcs
553 mov eax, VERR_VMX_INVALID_VMCS_PTR
554 ret
555.valid_vmcs:
556 jnz .the_end
557 mov eax, VERR_VMX_INVALID_VMCS_FIELD
558.the_end:
559 ret
560ENDPROC VMXReadVMCS64
561
562
563;/**
564; * Executes VMXON
565; *
566; * @returns VBox status code
567; * @param HCPhysVMXOn Physical address of VMXON structure
568; */
569;DECLASM(int) VMXEnable(RTHCPHYS HCPhysVMXOn);
570BEGINPROC VMXEnable
571%ifdef __AMD64__
572 xor rax, rax
573 %ifdef ASM_CALL64_GCC
574 push rdi
575 %else
576 push rcx
577 %endif
578 vmxon [rsp]
579%else
580 xor eax, eax
581 vmxon [esp + 4]
582%endif
583 jnc .good
584 mov eax, VERR_VMX_INVALID_VMXON_PTR
585 jmp .the_end
586
587.good:
588 jnz .the_end
589 mov eax, VERR_VMX_GENERIC
590
591.the_end:
592%ifdef __AMD64__
593 add rsp, 8
594%endif
595 ret
596ENDPROC VMXEnable
597
598
599;/**
600; * Executes VMXOFF
601; */
602;DECLASM(void) VMXDisable(void);
603BEGINPROC VMXDisable
604 vmxoff
605 ret
606ENDPROC VMXDisable
607
608
609;/**
610; * Executes VMCLEAR
611; *
612; * @returns VBox status code
613; * @param HCPhysVMCS Physical address of VM control structure
614; */
615;DECLASM(int) VMXClearVMCS(RTHCPHYS HCPhysVMCS);
616BEGINPROC VMXClearVMCS
617%ifdef __AMD64__
618 xor rax, rax
619 %ifdef ASM_CALL64_GCC
620 push rdi
621 %else
622 push rcx
623 %endif
624 vmclear [rsp]
625%else
626 xor eax, eax
627 vmclear [esp + 4]
628%endif
629 jnc .the_end
630 mov eax, VERR_VMX_INVALID_VMCS_PTR
631.the_end:
632%ifdef __AMD64__
633 add rsp, 8
634%endif
635 ret
636ENDPROC VMXClearVMCS
637
638
639;/**
640; * Executes VMPTRLD
641; *
642; * @returns VBox status code
643; * @param HCPhysVMCS Physical address of VMCS structure
644; */
645;DECLASM(int) VMXActivateVMCS(RTHCPHYS HCPhysVMCS);
646BEGINPROC VMXActivateVMCS
647%ifdef __AMD64__
648 xor rax, rax
649 %ifdef ASM_CALL64_GCC
650 push rdi
651 %else
652 push rcx
653 %endif
654 vmptrld [rsp]
655%else
656 xor eax, eax
657 vmptrld [esp + 4]
658%endif
659 jnc .the_end
660 mov eax, VERR_VMX_INVALID_VMCS_PTR
661.the_end:
662%ifdef __AMD64__
663 add rsp, 8
664%endif
665 ret
666ENDPROC VMXActivateVMCS
667
668%endif ; __AMD64__
669
670
671;/**
672; * Prepares for and executes VMRUN
673; *
674; * @returns VBox status code
675; * @param HCPhysVMCB Physical address of host VMCB
676; * @param HCPhysVMCB Physical address of guest VMCB
677; * @param pCtx Guest context
678; */
679BEGINPROC SVMVMRun
680%ifdef __AMD64__ ; fake a cdecl stack frame - I'm lazy, sosume.
681 %ifdef ASM_CALL64_GCC
682 push rdx
683 push rsi
684 push rdi
685 %else
686 push r8
687 push rdx
688 push rcx
689 %endif
690 push 0
691%endif
692 push xBP
693 mov xBP, xSP
694
695 ;/* Manual save and restore:
696 ; * - General purpose registers except RIP, RSP, RAX
697 ; *
698 ; * Trashed:
699 ; * - CR2 (we don't care)
700 ; * - LDTR (reset to 0)
701 ; * - DRx (presumably not changed at all)
702 ; * - DR7 (reset to 0x400)
703 ; */
704
705 ;/* Save all general purpose host registers. */
706 MYPUSHAD
707
708 ;/* Save the Guest CPU context pointer. */
709 mov xSI, [xBP + xS*2 + RTHCPHYS_CB*2] ; pCtx
710 push xSI ; push for saving the state at the end
711
712 ; Restore CR2
713 mov ebx, [xSI + CPUMCTX.cr2]
714 mov cr2, xBX
715
716 ; save host fs, gs, sysenter msr etc
717 mov xAX, [xBP + xS*2] ; pVMCBHostPhys (64 bits physical address; x86: take low dword only)
718 push xAX ; save for the vmload after vmrun
719 DB 0x0F, 0x01, 0xDB ; VMSAVE
720
721 ; setup eax for VMLOAD
722 mov xAX, [xBP + xS*2 + RTHCPHYS_CB] ; pVMCBPhys (64 bits physical address; take low dword only)
723
724 ;/* Restore Guest's general purpose registers. */
725 ;/* EAX is loaded from the VMCB by VMRUN */
726 mov ebx, [xSI + CPUMCTX.ebx]
727 mov ecx, [xSI + CPUMCTX.ecx]
728 mov edx, [xSI + CPUMCTX.edx]
729 mov edi, [xSI + CPUMCTX.edi]
730 mov ebp, [xSI + CPUMCTX.ebp]
731 mov esi, [xSI + CPUMCTX.esi]
732
733 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch
734 DB 0x0f, 0x01, 0xDD ; CLGI
735 sti
736
737 ; load guest fs, gs, sysenter msr etc
738 DB 0x0f, 0x01, 0xDA ; VMLOAD
739 ; run the VM
740 DB 0x0F, 0x01, 0xD8 ; VMRUN
741
742 ;/* EAX is in the VMCB already; we can use it here. */
743
744 ; save guest fs, gs, sysenter msr etc
745 DB 0x0F, 0x01, 0xDB ; VMSAVE
746
747 ; load host fs, gs, sysenter msr etc
748 pop xAX ; pushed above
749 DB 0x0F, 0x01, 0xDA ; VMLOAD
750
751 ; Set the global interrupt flag again, but execute cli to make sure IF=0.
752 cli
753 DB 0x0f, 0x01, 0xDC ; STGI
754
755 pop xAX ; pCtx
756
757 mov [ss:xAX + CPUMCTX.ebx], ebx
758 mov [ss:xAX + CPUMCTX.ecx], ecx
759 mov [ss:xAX + CPUMCTX.edx], edx
760 mov [ss:xAX + CPUMCTX.esi], esi
761 mov [ss:xAX + CPUMCTX.edi], edi
762 mov [ss:xAX + CPUMCTX.ebp], ebp
763
764 ; Restore general purpose registers
765 MYPOPAD
766
767 mov eax, VINF_SUCCESS
768
769 pop xBP
770%ifdef __AMD64__
771 add xSP, 4*xS
772%endif
773 ret
774ENDPROC SVMVMRun
775
776%ifdef __AMD64__
777%ifdef __WIN__
778
779;;
780; Executes INVLPGA
781;
782; @param pPageGC msc:ecx gcc:edi x86:[esp+04] Virtual page to invalidate
783; @param uASID msc:edx gcc:esi x86:[esp+08] Tagged TLB id
784;
785;DECLASM(void) SVMInvlpgA(RTGCPTR pPageGC, uint32_t uASID);
786BEGINPROC SVMInvlpgA
787%ifdef __AMD64__
788 %ifdef ASM_CALL64_GCC
789 mov eax, edi ;; @todo 64-bit guest.
790 mov ecx, esi
791 %else
792 mov eax, ecx ;; @todo 64-bit guest.
793 mov ecx, edx
794 %endif
795 invlpga rax, ecx
796%else
797 mov eax, [esp + 4]
798 mov ecx, [esp + 8]
799 invlpga eax, ecx
800%endif
801 ret
802ENDPROC SVMInvlpgA
803%endif
804%endif
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette