VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMSwitcher/AMD64andLegacy.mac@ 18726

Last change on this file since 18726 was 16859, checked in by vboxsync, 16 years ago

Load hypervisor CR3 from CPUM (instead of hardcoded fixups in the switchers). Dangerous change. Watch for regressions.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 34.3 KB
Line 
1; $Id: AMD64andLegacy.mac 16859 2009-02-17 16:19:51Z vboxsync $
2;; @file
3; VMM - World Switchers, template for AMD64 to PAE and 32-bit.
4;
5
6;
7; Copyright (C) 2006-2007 Sun Microsystems, Inc.
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.virtualbox.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License (GPL) as published by the Free Software
13; Foundation, in version 2 as it comes in the "COPYING" file of the
14; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17; Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18; Clara, CA 95054 USA or visit http://www.sun.com if you need
19; additional information or have any questions.
20;
21
22;%define DEBUG_STUFF 1
23;%define STRICT_IF 1
24
25;*******************************************************************************
26;* Header Files *
27;*******************************************************************************
28%include "VBox/asmdefs.mac"
29%include "VBox/x86.mac"
30%include "VBox/cpum.mac"
31%include "VBox/stam.mac"
32%include "VBox/vm.mac"
33%include "CPUMInternal.mac"
34%include "VMMSwitcher/VMMSwitcher.mac"
35
36
37;
38; Start the fixup records
39; We collect the fixups in the .data section as we go along
40; It is therefore VITAL that no-one is using the .data section
41; for anything else between 'Start' and 'End'.
42;
43BEGINDATA
44GLOBALNAME Fixups
45
46
47
48BEGINCODE
49GLOBALNAME Start
50
51%ifndef VBOX_WITH_HYBRID_32BIT_KERNEL
52BITS 64
53
54;;
55; The C interface.
56;
57; @param pVM GCC: rdi MSC:rcx The VM handle.
58;
59BEGINPROC vmmR0HostToGuest
60%ifdef DEBUG_STUFF
61 COM64_S_NEWLINE
62 COM64_S_CHAR '^'
63%endif
64 ;
65 ; The ordinary version of the code.
66 ;
67
68 %ifdef STRICT_IF
69 pushf
70 pop rax
71 test eax, X86_EFL_IF
72 jz .if_clear_in
73 mov eax, 0c0ffee00h
74 ret
75.if_clear_in:
76 %endif
77
78 ;
79 ; make r9 = pVM and rdx = pCpum.
80 ; rax, rcx and r8 are scratch here after.
81 %ifdef RT_OS_WINDOWS
82 mov r9, rcx
83 %else
84 mov r9, rdi
85 %endif
86 lea rdx, [r9 + VM.cpum]
87
88 %ifdef VBOX_WITH_STATISTICS
89 ;
90 ; Switcher stats.
91 ;
92 lea r8, [r9 + VM.StatSwitcherToGC]
93 STAM64_PROFILE_ADV_START r8
94 %endif
95
96 ;
97 ; Call worker (far return).
98 ;
99 mov eax, cs
100 push rax
101 call NAME(vmmR0HostToGuestAsm)
102
103 %ifdef VBOX_WITH_STATISTICS
104 ;
105 ; Switcher stats.
106 ;
107 lea r8, [r9 + VM.StatSwitcherToGC]
108 STAM64_PROFILE_ADV_STOP r8
109 %endif
110
111 ret
112ENDPROC vmmR0HostToGuest
113
114
115%else ; VBOX_WITH_HYBRID_32BIT_KERNEL
116
117
118BITS 32
119
120;;
121; The C interface.
122;
123BEGINPROC vmmR0HostToGuest
124%ifdef DEBUG_STUFF
125 COM32_S_NEWLINE
126 COM32_S_CHAR '^'
127%endif
128
129 %ifdef VBOX_WITH_STATISTICS
130 ;
131 ; Switcher stats.
132 ;
133 FIXUP FIX_HC_VM_OFF, 1, VM.StatSwitcherToGC
134 mov edx, 0ffffffffh
135 STAM_PROFILE_ADV_START edx
136 %endif
137
138 ; Thunk to/from 64 bit when invoking the worker routine.
139 ;
140 FIXUP FIX_HC_VM_OFF, 1, VM.cpum
141 mov edx, 0ffffffffh
142
143 push 0
144 push cs
145 push 0
146 FIXUP FIX_HC_32BIT, 1, .vmmR0HostToGuestReturn - NAME(Start)
147 push 0ffffffffh
148
149 FIXUP FIX_HC_64BIT_CS, 1
150 push 0ffffh
151 FIXUP FIX_HC_32BIT, 1, NAME(vmmR0HostToGuestAsm) - NAME(Start)
152 push 0ffffffffh
153 retf
154.vmmR0HostToGuestReturn:
155
156 ;
157 ; This selector reloading is probably not necessary, but we do it anyway to be quite sure
158 ; the CPU has the right idea about the selectors.
159 ;
160 mov edx, ds
161 mov ds, edx
162 mov ecx, es
163 mov es, ecx
164 mov edx, ss
165 mov ss, edx
166
167 %ifdef VBOX_WITH_STATISTICS
168 ;
169 ; Switcher stats.
170 ;
171 FIXUP FIX_HC_VM_OFF, 1, VM.StatSwitcherToHC
172 mov edx, 0ffffffffh
173 STAM_PROFILE_ADV_STOP edx
174 %endif
175
176 ret
177ENDPROC vmmR0HostToGuest
178
179BITS 64
180%endif ;!VBOX_WITH_HYBRID_32BIT_KERNEL
181
182
183
184; *****************************************************************************
185; vmmR0HostToGuestAsm
186;
187; Phase one of the switch from host to guest context (host MMU context)
188;
189; INPUT:
190; - edx virtual address of CPUM structure (valid in host context)
191;
192; USES/DESTROYS:
193; - eax, ecx, edx, r8
194;
195; ASSUMPTION:
196; - current CS and DS selectors are wide open
197;
198; *****************************************************************************
199ALIGNCODE(16)
200BEGINPROC vmmR0HostToGuestAsm
201 ;; Store the offset from CPUM to CPUMCPU in r8
202 mov r8, [rdx + CPUM.ulOffCPUMCPU]
203
204 ;;
205 ;; Save CPU host context
206 ;; Skip eax, edx and ecx as these are not preserved over calls.
207 ;;
208 ; general registers.
209 ; mov [rdx + r8 + CPUMCPU.Host.rax], rax - scratch
210 mov [rdx + r8 + CPUMCPU.Host.rbx], rbx
211 ; mov [rdx + r8 + CPUMCPU.Host.rcx], rcx - scratch
212 ; mov [rdx + r8 + CPUMCPU.Host.rdx], rdx - scratch
213 mov [rdx + r8 + CPUMCPU.Host.rdi], rdi
214 mov [rdx + r8 + CPUMCPU.Host.rsi], rsi
215 mov [rdx + r8 + CPUMCPU.Host.rsp], rsp
216 mov [rdx + r8 + CPUMCPU.Host.rbp], rbp
217 ; mov [rdx + r8 + CPUMCPU.Host.r8 ], r8 - scratch
218 ; mov [rdx + r8 + CPUMCPU.Host.r9 ], r9 - scratch
219 mov [rdx + r8 + CPUMCPU.Host.r10], r10
220 mov [rdx + r8 + CPUMCPU.Host.r11], r11
221 mov [rdx + r8 + CPUMCPU.Host.r12], r12
222 mov [rdx + r8 + CPUMCPU.Host.r13], r13
223 mov [rdx + r8 + CPUMCPU.Host.r14], r14
224 mov [rdx + r8 + CPUMCPU.Host.r15], r15
225 ; selectors.
226 mov [rdx + r8 + CPUMCPU.Host.ds], ds
227 mov [rdx + r8 + CPUMCPU.Host.es], es
228 mov [rdx + r8 + CPUMCPU.Host.fs], fs
229 mov [rdx + r8 + CPUMCPU.Host.gs], gs
230 mov [rdx + r8 + CPUMCPU.Host.ss], ss
231 ; MSRs
232 mov rbx, rdx
233 mov ecx, MSR_K8_FS_BASE
234 rdmsr
235 mov [rbx + r8 + CPUMCPU.Host.FSbase], eax
236 mov [rbx + r8 + CPUMCPU.Host.FSbase + 4], edx
237 mov ecx, MSR_K8_GS_BASE
238 rdmsr
239 mov [rbx + r8 + CPUMCPU.Host.GSbase], eax
240 mov [rbx + r8 + CPUMCPU.Host.GSbase + 4], edx
241 mov ecx, MSR_K6_EFER
242 rdmsr
243 mov [rbx + r8 + CPUMCPU.Host.efer], eax
244 mov [rbx + r8 + CPUMCPU.Host.efer + 4], edx
245 mov ecx, MSR_K6_EFER
246 mov rdx, rbx
247 ; special registers.
248 sldt [rdx + r8 + CPUMCPU.Host.ldtr]
249 sidt [rdx + r8 + CPUMCPU.Host.idtr]
250 sgdt [rdx + r8 + CPUMCPU.Host.gdtr]
251 str [rdx + r8 + CPUMCPU.Host.tr] ; yasm BUG, generates sldt. YASMCHECK!
252 ; flags
253 pushf
254 pop qword [rdx + r8 + CPUMCPU.Host.rflags]
255
256 FIXUP FIX_NO_SYSENTER_JMP, 0, htg_no_sysenter - NAME(Start) ; this will insert a jmp htg_no_sysenter if host doesn't use sysenter.
257 ; save MSR_IA32_SYSENTER_CS register.
258 mov ecx, MSR_IA32_SYSENTER_CS
259 mov rbx, rdx ; save edx
260 rdmsr ; edx:eax <- MSR[ecx]
261 mov [rbx + r8 + CPUMCPU.Host.SysEnter.cs], rax
262 mov [rbx + r8 + CPUMCPU.Host.SysEnter.cs + 4], rdx
263 xor rax, rax ; load 0:0 to cause #GP upon sysenter
264 xor rdx, rdx
265 wrmsr
266 mov rdx, rbx ; restore edx
267 jmp short htg_no_sysenter
268
269ALIGNCODE(16)
270htg_no_sysenter:
271
272 ;; handle use flags.
273 mov esi, [rdx + r8 + CPUMCPU.fUseFlags] ; esi == use flags.
274 and esi, ~CPUM_USED_FPU ; Clear CPUM_USED_* flags. ;;@todo FPU check can be optimized to use cr0 flags!
275 mov [rdx + r8 + CPUMCPU.fUseFlags], esi
276
277 ; debug registers.
278 test esi, CPUM_USE_DEBUG_REGS | CPUM_USE_DEBUG_REGS_HOST
279 jz htg_debug_regs_no
280 jmp htg_debug_regs_save
281htg_debug_regs_no:
282 DEBUG_CHAR('a') ; trashes esi
283
284 ; control registers.
285 mov rax, cr0
286 mov [rdx + r8 + CPUMCPU.Host.cr0], rax
287 ;mov rax, cr2 ; assume host os don't stuff things in cr2. (safe)
288 ;mov [rdx + r8 + CPUMCPU.Host.cr2], rax
289 mov rax, cr3
290 mov [rdx + r8 + CPUMCPU.Host.cr3], rax
291 mov rax, cr4
292 mov [rdx + r8 + CPUMCPU.Host.cr4], rax
293
294 ;;
295 ;; Start switching to VMM context.
296 ;;
297
298 ;
299 ; Change CR0 and CR4 so we can correctly emulate FPU/MMX/SSE[23] exceptions
300 ; Also disable WP. (eax==cr4 now)
301 ; Note! X86_CR4_PSE and X86_CR4_PAE are important if the host thinks so :-)
302 ;
303 and rax, X86_CR4_MCE | X86_CR4_PSE | X86_CR4_PAE
304 mov ecx, [rdx + r8 + CPUMCPU.Guest.cr4]
305 DEBUG_CHAR('b') ; trashes esi
306 ;; @todo Switcher cleanup: Determine base CR4 during CPUMR0Init / VMMR3SelectSwitcher putting it
307 ; in CPUM.Hyper.cr4 (which isn't currently being used). That should
308 ; simplify this operation a bit (and improve locality of the data).
309
310 ;
311 ; CR4.AndMask and CR4.OrMask are set in CPUMR3Init based on the presence of
312 ; FXSAVE support on the host CPU
313 ;
314 and ecx, [rdx + CPUM.CR4.AndMask]
315 or eax, ecx
316 or eax, [rdx + CPUM.CR4.OrMask]
317 mov cr4, rax
318 DEBUG_CHAR('c') ; trashes esi
319
320 mov eax, [rdx + r8 + CPUMCPU.Guest.cr0]
321 and eax, X86_CR0_EM
322 or eax, X86_CR0_PE | X86_CR0_PG | X86_CR0_TS | X86_CR0_ET | X86_CR0_NE | X86_CR0_MP
323 mov cr0, rax
324 DEBUG_CHAR('0') ; trashes esi
325
326
327 ; Load new gdt so we can do far jump to guest code after cr3 reload.
328 lgdt [rdx + CPUM.Hyper.gdtr]
329 DEBUG_CHAR('1') ; trashes esi
330
331 ; Store the hypervisor cr3 for later loading
332 mov ebp, [rdx + CPUM.Hyper.cr3]
333
334 ;;
335 ;; Load Intermediate memory context.
336 ;;
337 FIXUP FIX_INTER_AMD64_CR3, 1
338 mov eax, 0ffffffffh
339 mov cr3, rax
340 DEBUG_CHAR('2') ; trashes esi
341
342 ;;
343 ;; 1. Switch to compatibility mode, placing ourselves in identity mapped code.
344 ;;
345 jmp far [NAME(fpIDEnterTarget) wrt rip]
346
347; 16:32 Pointer to IDEnterTarget.
348NAME(fpIDEnterTarget):
349 FIXUP FIX_ID_32BIT, 0, NAME(IDEnterTarget) - NAME(Start)
350dd 0
351 FIXUP FIX_HYPER_CS, 0
352dd 0
353
354
355;;
356; Detour for saving the host DR7 and DR6.
357; esi and rdx must be preserved.
358htg_debug_regs_save:
359DEBUG_S_CHAR('s');
360 mov rax, dr7 ; not sure, but if I read the docs right this will trap if GD is set. FIXME!!!
361 mov [rdx + r8 + CPUMCPU.Host.dr7], rax
362 xor eax, eax ; clear everything. (bit 12? is read as 1...)
363 mov dr7, rax
364 mov rax, dr6 ; just in case we save the state register too.
365 mov [rdx + r8 + CPUMCPU.Host.dr6], rax
366 ; save host DR0-3?
367 test esi, CPUM_USE_DEBUG_REGS
368 jz near htg_debug_regs_no
369DEBUG_S_CHAR('S');
370 mov rax, dr0
371 mov [rdx + r8 + CPUMCPU.Host.dr0], rax
372 mov rbx, dr1
373 mov [rdx + r8 + CPUMCPU.Host.dr1], rbx
374 mov rcx, dr2
375 mov [rdx + r8 + CPUMCPU.Host.dr2], rcx
376 mov rax, dr3
377 mov [rdx + r8 + CPUMCPU.Host.dr3], rax
378 jmp htg_debug_regs_no
379
380
381 ; We're now on an identity mapped pages! in 32-bit compatability mode.
382BITS 32
383ALIGNCODE(16)
384GLOBALNAME IDEnterTarget
385 DEBUG_CHAR('3')
386
387 ; 2. Deactivate long mode by turning off paging.
388 mov ebx, cr0
389 and ebx, ~X86_CR0_PG
390 mov cr0, ebx
391 DEBUG_CHAR('4')
392
393 ; 3. Load intermediate page table.
394 FIXUP SWITCHER_FIX_INTER_CR3_GC, 1
395 mov edx, 0ffffffffh
396 mov cr3, edx
397
398 ; 4. Disable long mode.
399 ; We also use the chance to disable syscall/sysret and fast fxsave/fxrstor.
400 mov ecx, MSR_K6_EFER
401 rdmsr
402 DEBUG_CHAR('5')
403 and eax, ~(MSR_K6_EFER_LME | MSR_K6_EFER_SCE | MSR_K6_EFER_FFXSR)
404 wrmsr
405 DEBUG_CHAR('6')
406
407%ifndef SWITCHER_TO_PAE
408 ; 4b. Disable PAE.
409 mov eax, cr4
410 and eax, ~X86_CR4_PAE
411 mov cr4, eax
412%else
413%endif
414
415 ; 5. Enable paging.
416 or ebx, X86_CR0_PG
417 mov cr0, ebx
418 jmp short just_a_jump
419just_a_jump:
420 DEBUG_CHAR('7')
421
422 ;;
423 ;; 6. Jump to guest code mapping of the code and load the Hypervisor CS.
424 ;;
425 FIXUP FIX_ID_2_GC_NEAR_REL, 1, NAME(JmpGCTarget) - NAME(Start)
426 jmp near NAME(JmpGCTarget)
427
428
429 ;;
430 ;; When we arrive at this label we're at the
431 ;; guest code mapping of the switching code.
432 ;;
433ALIGNCODE(16)
434GLOBALNAME JmpGCTarget
435 DEBUG_CHAR('-')
436;mov eax, 0ffff0000h
437;.delay_loop:
438;nop
439;dec eax
440;nop
441;jnz .delay_loop
442 ; load final cr3 and do far jump to load cs.
443 mov cr3, ebp ; ebp set above
444 DEBUG_CHAR('0')
445
446 ;;
447 ;; We're in VMM MMU context and VMM CS is loaded.
448 ;; Setup the rest of the VMM state.
449 ;;
450 ; Load selectors
451 DEBUG_CHAR('1')
452 FIXUP FIX_HYPER_DS, 1
453 mov eax, 0ffffh
454 mov ds, eax
455 mov es, eax
456 xor eax, eax
457 mov gs, eax
458 mov fs, eax
459 ; Load pCpum into EDX
460 FIXUP FIX_GC_CPUM_OFF, 1, 0
461 mov edx, 0ffffffffh
462 ; Activate guest IDT
463 DEBUG_CHAR('2')
464 lidt [edx + CPUM.Hyper.idtr]
465
466 ; Setup stack; use the lss_esp, ss pair for lss
467 DEBUG_CHAR('3')
468 mov eax, [edx + CPUM.Hyper.esp]
469 mov [edx + CPUM.Hyper.lss_esp], eax
470 lss esp, [edx + CPUM.Hyper.lss_esp]
471
472 ; Restore TSS selector; must mark it as not busy before using ltr (!)
473 DEBUG_CHAR('4')
474 FIXUP FIX_GC_TSS_GDTE_DW2, 2
475 and dword [0ffffffffh], ~0200h ; clear busy flag (2nd type2 bit)
476 DEBUG_CHAR('5')
477 ltr word [edx + CPUM.Hyper.tr]
478 DEBUG_CHAR('6')
479
480 ; Activate the ldt (now we can safely crash).
481 lldt [edx + CPUM.Hyper.ldtr]
482 DEBUG_CHAR('7')
483
484 ;; use flags.
485 mov esi, [edx + CPUM.ulOffCPUMCPU]
486 mov esi, [edx + esi + CPUMCPU.fUseFlags]
487
488 ; debug registers
489 test esi, CPUM_USE_DEBUG_REGS
490 jz htg_debug_regs_guest_no
491 jmp htg_debug_regs_guest
492htg_debug_regs_guest_no:
493 DEBUG_CHAR('9')
494
495 ; General registers.
496 mov ebx, [edx + CPUM.Hyper.ebx]
497 mov ebp, [edx + CPUM.Hyper.ebp]
498 mov esi, [edx + CPUM.Hyper.esi]
499 mov edi, [edx + CPUM.Hyper.edi]
500 push dword [edx + CPUM.Hyper.eflags]
501 popfd
502 DEBUG_CHAR('!')
503
504 ;;
505 ;; Return to the VMM code which either called the switcher or
506 ;; the code set up to run by HC.
507 ;;
508%ifdef DEBUG_STUFF
509 COM32_S_PRINT ';eip='
510 mov eax, [edx + CPUM.Hyper.eip]
511 COM32_S_DWORD_REG eax
512 COM32_S_CHAR ';'
513%endif
514 mov eax, [edx + CPUM.Hyper.eip]
515%ifdef VBOX_WITH_STATISTICS
516 FIXUP FIX_GC_VM_OFF, 1, VM.StatSwitcherToGC
517 mov edx, 0ffffffffh
518 STAM32_PROFILE_ADV_STOP edx
519 FIXUP FIX_GC_CPUM_OFF, 1, 0
520 mov edx, 0ffffffffh
521%endif
522 jmp eax
523
524;;
525; Detour for saving host DR0-3 and loading hypervisor debug registers.
526; esi and edx must be preserved.
527htg_debug_regs_guest:
528 DEBUG_S_CHAR('D')
529 DEBUG_S_CHAR('R')
530 DEBUG_S_CHAR('x')
531 ; load hyper DR0-7
532 mov ebx, [edx + CPUM.Hyper.dr]
533 mov dr0, ebx
534 mov ecx, [edx + CPUM.Hyper.dr + 8*1]
535 mov dr1, ecx
536 mov eax, [edx + CPUM.Hyper.dr + 8*2]
537 mov dr2, eax
538 mov ebx, [edx + CPUM.Hyper.dr + 8*3]
539 mov dr3, ebx
540 ;mov eax, [edx + CPUM.Hyper.dr + 8*6]
541 mov ecx, 0ffff0ff0h
542 mov dr6, ecx
543 mov eax, [edx + CPUM.Hyper.dr + 8*7]
544 mov dr7, eax
545 jmp htg_debug_regs_guest_no
546
547ENDPROC vmmR0HostToGuestAsm
548
549
550;;
551; Trampoline for doing a call when starting the hyper visor execution.
552;
553; Push any arguments to the routine.
554; Push the argument frame size (cArg * 4).
555; Push the call target (_cdecl convention).
556; Push the address of this routine.
557;
558;
559ALIGNCODE(16)
560BEGINPROC vmmGCCallTrampoline
561%ifdef DEBUG_STUFF
562 COM32_S_CHAR 'c'
563 COM32_S_CHAR 't'
564 COM32_S_CHAR '!'
565%endif
566
567 ; call routine
568 pop eax ; call address
569 mov esi, edx ; save edx
570 pop edi ; argument count.
571%ifdef DEBUG_STUFF
572 COM32_S_PRINT ';eax='
573 COM32_S_DWORD_REG eax
574 COM32_S_CHAR ';'
575%endif
576 call eax ; do call
577 add esp, edi ; cleanup stack
578
579 ; return to the host context.
580 push byte 0 ; eip
581 mov edx, esi ; CPUM pointer
582
583%ifdef DEBUG_STUFF
584 COM32_S_CHAR '`'
585%endif
586 jmp NAME(VMMGCGuestToHostAsm) ; eax = returncode.
587ENDPROC vmmGCCallTrampoline
588
589
590
591;;
592; The C interface.
593;
594ALIGNCODE(16)
595BEGINPROC vmmGCGuestToHost
596%ifdef DEBUG_STUFF
597 push esi
598 COM_NEWLINE
599 DEBUG_CHAR('b')
600 DEBUG_CHAR('a')
601 DEBUG_CHAR('c')
602 DEBUG_CHAR('k')
603 DEBUG_CHAR('!')
604 COM_NEWLINE
605 pop esi
606%endif
607 mov eax, [esp + 4]
608 jmp NAME(VMMGCGuestToHostAsm)
609ENDPROC vmmGCGuestToHost
610
611
612;;
613; VMMGCGuestToHostAsmGuestCtx
614;
615; Switches from Guest Context to Host Context.
616; Of course it's only called from within the GC.
617;
618; @param eax Return code.
619; @param esp + 4 Pointer to CPUMCTXCORE.
620;
621; @remark ASSUMES interrupts disabled.
622;
623ALIGNCODE(16)
624BEGINPROC VMMGCGuestToHostAsmGuestCtx
625 DEBUG_CHAR('~')
626
627%ifdef VBOX_WITH_STATISTICS
628 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalInGC
629 mov edx, 0ffffffffh
630 STAM32_PROFILE_ADV_STOP edx
631
632 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalGCToQemu
633 mov edx, 0ffffffffh
634 STAM32_PROFILE_ADV_START edx
635
636 FIXUP FIX_GC_VM_OFF, 1, VM.StatSwitcherToHC
637 mov edx, 0ffffffffh
638 STAM32_PROFILE_ADV_START edx
639%endif
640
641 ;
642 ; Load the CPUM pointer.
643 ;
644 FIXUP FIX_GC_CPUM_OFF, 1, 0
645 mov edx, 0ffffffffh
646 ; Convert to CPUMCPU pointer
647 add edx, [edx + CPUM.ulOffCPUMCPU]
648
649 ; Skip return address (assumes called!)
650 lea esp, [esp + 4]
651
652 ;
653 ; Guest Context (assumes esp now points to CPUMCTXCORE structure).
654 ;
655 ; general purpose registers
656 push eax ; save return code.
657 mov eax, [esp + 4 + CPUMCTXCORE.edi]
658 mov [edx + CPUMCPU.Guest.edi], eax
659 mov eax, [esp + 4 + CPUMCTXCORE.esi]
660 mov [edx + CPUMCPU.Guest.esi], eax
661 mov eax, [esp + 4 + CPUMCTXCORE.ebp]
662 mov [edx + CPUMCPU.Guest.ebp], eax
663 mov eax, [esp + 4 + CPUMCTXCORE.eax]
664 mov [edx + CPUMCPU.Guest.eax], eax
665 mov eax, [esp + 4 + CPUMCTXCORE.ebx]
666 mov [edx + CPUMCPU.Guest.ebx], eax
667 mov eax, [esp + 4 + CPUMCTXCORE.edx]
668 mov [edx + CPUMCPU.Guest.edx], eax
669 mov eax, [esp + 4 + CPUMCTXCORE.ecx]
670 mov [edx + CPUMCPU.Guest.ecx], eax
671 mov eax, [esp + 4 + CPUMCTXCORE.esp]
672 mov [edx + CPUMCPU.Guest.esp], eax
673 ; selectors
674 mov eax, [esp + 4 + CPUMCTXCORE.ss]
675 mov [edx + CPUMCPU.Guest.ss], eax
676 mov eax, [esp + 4 + CPUMCTXCORE.gs]
677 mov [edx + CPUMCPU.Guest.gs], eax
678 mov eax, [esp + 4 + CPUMCTXCORE.fs]
679 mov [edx + CPUMCPU.Guest.fs], eax
680 mov eax, [esp + 4 + CPUMCTXCORE.es]
681 mov [edx + CPUMCPU.Guest.es], eax
682 mov eax, [esp + 4 + CPUMCTXCORE.ds]
683 mov [edx + CPUMCPU.Guest.ds], eax
684 mov eax, [esp + 4 + CPUMCTXCORE.cs]
685 mov [edx + CPUMCPU.Guest.cs], eax
686 ; flags
687 mov eax, [esp + 4 + CPUMCTXCORE.eflags]
688 mov [edx + CPUMCPU.Guest.eflags], eax
689 ; eip
690 mov eax, [esp + 4 + CPUMCTXCORE.eip]
691 mov [edx + CPUMCPU.Guest.eip], eax
692 ; jump to common worker code.
693 pop eax ; restore return code.
694 ; Load CPUM into edx again
695 sub edx, [edx + CPUMCPU.ulOffCPUM]
696
697 add esp, CPUMCTXCORE_size ; skip CPUMCTXCORE structure
698
699 jmp vmmGCGuestToHostAsm_EIPDone
700ENDPROC VMMGCGuestToHostAsmGuestCtx
701
702
703;;
704; VMMGCGuestToHostAsmHyperCtx
705;
706; This is an alternative entry point which we'll be using
707; when the we have the hypervisor context and need to save
708; that before going to the host.
709;
710; This is typically useful when abandoning the hypervisor
711; because of a trap and want the trap state to be saved.
712;
713; @param eax Return code.
714; @param ecx Points to CPUMCTXCORE.
715; @uses eax,edx,ecx
716ALIGNCODE(16)
717BEGINPROC VMMGCGuestToHostAsmHyperCtx
718 DEBUG_CHAR('#')
719
720%ifdef VBOX_WITH_STATISTICS
721 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalInGC
722 mov edx, 0ffffffffh
723 STAM32_PROFILE_ADV_STOP edx
724
725 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalGCToQemu
726 mov edx, 0ffffffffh
727 STAM32_PROFILE_ADV_START edx
728
729 FIXUP FIX_GC_VM_OFF, 1, VM.StatSwitcherToHC
730 mov edx, 0ffffffffh
731 STAM32_PROFILE_ADV_START edx
732%endif
733
734 ;
735 ; Load the CPUM pointer.
736 ;
737 FIXUP FIX_GC_CPUM_OFF, 1, 0
738 mov edx, 0ffffffffh
739
740 push eax ; save return code.
741 ; general purpose registers
742 mov eax, [ecx + CPUMCTXCORE.edi]
743 mov [edx + CPUM.Hyper.edi], eax
744 mov eax, [ecx + CPUMCTXCORE.esi]
745 mov [edx + CPUM.Hyper.esi], eax
746 mov eax, [ecx + CPUMCTXCORE.ebp]
747 mov [edx + CPUM.Hyper.ebp], eax
748 mov eax, [ecx + CPUMCTXCORE.eax]
749 mov [edx + CPUM.Hyper.eax], eax
750 mov eax, [ecx + CPUMCTXCORE.ebx]
751 mov [edx + CPUM.Hyper.ebx], eax
752 mov eax, [ecx + CPUMCTXCORE.edx]
753 mov [edx + CPUM.Hyper.edx], eax
754 mov eax, [ecx + CPUMCTXCORE.ecx]
755 mov [edx + CPUM.Hyper.ecx], eax
756 mov eax, [ecx + CPUMCTXCORE.esp]
757 mov [edx + CPUM.Hyper.esp], eax
758 ; selectors
759 mov eax, [ecx + CPUMCTXCORE.ss]
760 mov [edx + CPUM.Hyper.ss], eax
761 mov eax, [ecx + CPUMCTXCORE.gs]
762 mov [edx + CPUM.Hyper.gs], eax
763 mov eax, [ecx + CPUMCTXCORE.fs]
764 mov [edx + CPUM.Hyper.fs], eax
765 mov eax, [ecx + CPUMCTXCORE.es]
766 mov [edx + CPUM.Hyper.es], eax
767 mov eax, [ecx + CPUMCTXCORE.ds]
768 mov [edx + CPUM.Hyper.ds], eax
769 mov eax, [ecx + CPUMCTXCORE.cs]
770 mov [edx + CPUM.Hyper.cs], eax
771 ; flags
772 mov eax, [ecx + CPUMCTXCORE.eflags]
773 mov [edx + CPUM.Hyper.eflags], eax
774 ; eip
775 mov eax, [ecx + CPUMCTXCORE.eip]
776 mov [edx + CPUM.Hyper.eip], eax
777 ; jump to common worker code.
778 pop eax ; restore return code.
779 jmp vmmGCGuestToHostAsm_SkipHyperRegs
780
781ENDPROC VMMGCGuestToHostAsmHyperCtx
782
783
784;;
785; VMMGCGuestToHostAsm
786;
787; This is an alternative entry point which we'll be using
788; when the we have saved the guest state already or we haven't
789; been messing with the guest at all.
790;
791; @param eax Return code.
792; @uses eax, edx, ecx (or it may use them in the future)
793;
794ALIGNCODE(16)
795BEGINPROC VMMGCGuestToHostAsm
796 DEBUG_CHAR('%')
797
798%ifdef VBOX_WITH_STATISTICS
799 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalInGC
800 mov edx, 0ffffffffh
801 STAM32_PROFILE_ADV_STOP edx
802
803 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalGCToQemu
804 mov edx, 0ffffffffh
805 STAM32_PROFILE_ADV_START edx
806
807 FIXUP FIX_GC_VM_OFF, 1, VM.StatSwitcherToHC
808 mov edx, 0ffffffffh
809 STAM32_PROFILE_ADV_START edx
810%endif
811
812 ;
813 ; Load the CPUM pointer.
814 ;
815 FIXUP FIX_GC_CPUM_OFF, 1, 0
816 mov edx, 0ffffffffh
817
818 pop dword [edx + CPUM.Hyper.eip] ; call return from stack
819 jmp short vmmGCGuestToHostAsm_EIPDone
820
821ALIGNCODE(16)
822vmmGCGuestToHostAsm_EIPDone:
823 ; general registers which we care about.
824 mov dword [edx + CPUM.Hyper.ebx], ebx
825 mov dword [edx + CPUM.Hyper.esi], esi
826 mov dword [edx + CPUM.Hyper.edi], edi
827 mov dword [edx + CPUM.Hyper.ebp], ebp
828 mov dword [edx + CPUM.Hyper.esp], esp
829
830 ; special registers which may change.
831vmmGCGuestToHostAsm_SkipHyperRegs:
832%ifdef STRICT_IF
833 pushf
834 pop ecx
835 test ecx, X86_EFL_IF
836 jz .if_clear_out
837 mov eax, 0c0ffee01h
838 cli
839.if_clear_out:
840%endif
841 ; str [edx + CPUM.Hyper.tr] - double fault only, and it won't be right then either.
842 sldt [edx + CPUM.Hyper.ldtr]
843
844 ; No need to save CRx here. They are set dynamically according to Guest/Host requirements.
845 ; FPU context is saved before restore of host saving (another) branch.
846
847
848 ;;
849 ;; Load Intermediate memory context.
850 ;;
851 mov edi, eax ; save return code in EDI (careful with COM_DWORD_REG from here on!)
852 FIXUP SWITCHER_FIX_INTER_CR3_GC, 1
853 mov eax, 0ffffffffh
854 mov cr3, eax
855 DEBUG_CHAR('?')
856
857 ;; We're now in intermediate memory context!
858
859 ;;
860 ;; 0. Jump to identity mapped location
861 ;;
862 FIXUP FIX_GC_2_ID_NEAR_REL, 1, NAME(IDExitTarget) - NAME(Start)
863 jmp near NAME(IDExitTarget)
864
865 ; We're now on identity mapped pages!
866ALIGNCODE(16)
867GLOBALNAME IDExitTarget
868 DEBUG_CHAR('1')
869
870 ; 1. Disable paging.
871 mov ebx, cr0
872 and ebx, ~X86_CR0_PG
873 mov cr0, ebx
874 DEBUG_CHAR('2')
875
876 ; 2. Enable PAE.
877%ifdef SWITCHER_TO_PAE
878 ; - already enabled
879%else
880 mov ecx, cr4
881 or ecx, X86_CR4_PAE
882 mov cr4, ecx
883%endif
884
885 ; 3. Load long mode intermediate CR3.
886 FIXUP FIX_INTER_AMD64_CR3, 1
887 mov ecx, 0ffffffffh
888 mov cr3, ecx
889 DEBUG_CHAR('3')
890
891 ; 4. Enable long mode.
892 mov ebp, edx
893 mov ecx, MSR_K6_EFER
894 rdmsr
895 or eax, MSR_K6_EFER_LME
896 wrmsr
897 mov edx, ebp
898 DEBUG_CHAR('4')
899
900 ; 5. Enable paging.
901 or ebx, X86_CR0_PG
902 mov cr0, ebx
903 DEBUG_CHAR('5')
904
905 ; Jump from compatability mode to 64-bit mode.
906 FIXUP FIX_ID_FAR32_TO_64BIT_MODE, 1, NAME(IDExit64Mode) - NAME(Start)
907 jmp 0ffffh:0fffffffeh
908
909 ;
910 ; We're in 64-bit mode (ds, ss, es, fs, gs are all bogus).
911 ; Move on to the HC mapping.
912 ;
913BITS 64
914ALIGNCODE(16)
915NAME(IDExit64Mode):
916 DEBUG_CHAR('6')
917 jmp [NAME(pHCExitTarget) wrt rip]
918
919; 64-bit jump target
920NAME(pHCExitTarget):
921FIXUP FIX_HC_64BIT, 0, NAME(HCExitTarget) - NAME(Start)
922dq 0ffffffffffffffffh
923
924; 64-bit pCpum address.
925NAME(pCpumHC):
926FIXUP FIX_HC_64BIT_CPUM, 0
927dq 0ffffffffffffffffh
928
929 ;
930 ; When we arrive here we're at the host context
931 ; mapping of the switcher code.
932 ;
933ALIGNCODE(16)
934GLOBALNAME HCExitTarget
935 DEBUG_CHAR('9')
936
937 ; Clear high dword of the CPUM pointer
938 and rdx, 0ffffffffh
939
940 ; Load the CPUMCPU offset.
941 mov r8, [rdx + CPUM.ulOffCPUMCPU]
942
943 ; load final cr3
944 mov rsi, [rdx + r8 + CPUMCPU.Host.cr3]
945 mov cr3, rsi
946 DEBUG_CHAR('@')
947
948 ;;
949 ;; Restore Host context.
950 ;;
951 ; Load CPUM pointer into edx
952 mov rdx, [NAME(pCpumHC) wrt rip]
953 ; Load the CPUMCPU offset.
954 mov r8, [rdx + CPUM.ulOffCPUMCPU]
955
956 ; activate host gdt and idt
957 lgdt [rdx + r8 + CPUMCPU.Host.gdtr]
958 DEBUG_CHAR('0')
959 lidt [rdx + r8 + CPUMCPU.Host.idtr]
960 DEBUG_CHAR('1')
961 ; Restore TSS selector; must mark it as not busy before using ltr (!)
962%if 1 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p)
963 movzx eax, word [rdx + r8 + CPUMCPU.Host.tr] ; eax <- TR
964 and al, 0F8h ; mask away TI and RPL bits, get descriptor offset.
965 add rax, [rdx + r8 + CPUMCPU.Host.gdtr + 2] ; eax <- GDTR.address + descriptor offset.
966 and dword [rax + 4], ~0200h ; clear busy flag (2nd type2 bit)
967 ltr word [rdx + r8 + CPUMCPU.Host.tr]
968%else
969 movzx eax, word [rdx + r8 + CPUMCPU.Host.tr] ; eax <- TR
970 and al, 0F8h ; mask away TI and RPL bits, get descriptor offset.
971 add rax, [rdx + r8 + CPUMCPU.Host.gdtr + 2] ; eax <- GDTR.address + descriptor offset.
972 mov ecx, [rax + 4] ; ecx <- 2nd descriptor dword
973 mov ebx, ecx ; save orginal value
974 and ecx, ~0200h ; clear busy flag (2nd type2 bit)
975 mov [rax + 4], ccx ; not using xchg here is paranoia..
976 ltr word [rdx + r8 + CPUMCPU.Host.tr]
977 xchg [rax + 4], ebx ; using xchg is paranoia too...
978%endif
979 ; activate ldt
980 DEBUG_CHAR('2')
981 lldt [rdx + r8 + CPUMCPU.Host.ldtr]
982 ; Restore segment registers
983 mov eax, [rdx + r8 + CPUMCPU.Host.ds]
984 mov ds, eax
985 mov eax, [rdx + r8 + CPUMCPU.Host.es]
986 mov es, eax
987 mov eax, [rdx + r8 + CPUMCPU.Host.fs]
988 mov fs, eax
989 mov eax, [rdx + r8 + CPUMCPU.Host.gs]
990 mov gs, eax
991 ; restore stack
992 mov eax, [rdx + r8 + CPUMCPU.Host.ss]
993 mov ss, eax
994 mov rsp, [rdx + r8 + CPUMCPU.Host.rsp]
995
996 FIXUP FIX_NO_SYSENTER_JMP, 0, gth_sysenter_no - NAME(Start) ; this will insert a jmp gth_sysenter_no if host doesn't use sysenter.
997 ; restore MSR_IA32_SYSENTER_CS register.
998 mov ecx, MSR_IA32_SYSENTER_CS
999 mov eax, [rdx + r8 + CPUMCPU.Host.SysEnter.cs]
1000 mov ebx, [rdx + r8 + CPUMCPU.Host.SysEnter.cs + 4]
1001 mov rbx, rdx ; save/load edx
1002 wrmsr ; MSR[ecx] <- edx:eax
1003 mov rdx, rbx ; restore edx
1004 jmp short gth_sysenter_no
1005
1006ALIGNCODE(16)
1007gth_sysenter_no:
1008
1009 ;; @todo AMD syscall
1010
1011 ; Restore FPU if guest has used it.
1012 ; Using fxrstor should ensure that we're not causing unwanted exception on the host.
1013 mov esi, [rdx + r8 + CPUMCPU.fUseFlags] ; esi == use flags.
1014 test esi, CPUM_USED_FPU
1015 jz short gth_fpu_no
1016 mov rcx, cr0
1017 and rcx, ~(X86_CR0_TS | X86_CR0_EM)
1018 mov cr0, rcx
1019
1020 fxsave [rdx + r8 + CPUMCPU.Guest.fpu]
1021 fxrstor [rdx + r8 + CPUMCPU.Host.fpu]
1022 jmp short gth_fpu_no
1023
1024ALIGNCODE(16)
1025gth_fpu_no:
1026
1027 ; Control registers.
1028 ; Would've liked to have these highere up in case of crashes, but
1029 ; the fpu stuff must be done before we restore cr0.
1030 mov rcx, [rdx + r8 + CPUMCPU.Host.cr4]
1031 mov cr4, rcx
1032 mov rcx, [rdx + r8 + CPUMCPU.Host.cr0]
1033 mov cr0, rcx
1034 ;mov rcx, [rdx + r8 + CPUMCPU.Host.cr2] ; assumes this is waste of time.
1035 ;mov cr2, rcx
1036
1037 ; restore debug registers (if modified) (esi must still be fUseFlags!)
1038 ; (must be done after cr4 reload because of the debug extension.)
1039 test esi, CPUM_USE_DEBUG_REGS | CPUM_USE_DEBUG_REGS_HOST
1040 jz short gth_debug_regs_no
1041 jmp gth_debug_regs_restore
1042gth_debug_regs_no:
1043
1044 ; Restore MSRs
1045 mov rbx, rdx
1046 mov ecx, MSR_K8_FS_BASE
1047 mov eax, [rbx + r8 + CPUMCPU.Host.FSbase]
1048 mov edx, [rbx + r8 + CPUMCPU.Host.FSbase + 4]
1049 wrmsr
1050 mov ecx, MSR_K8_GS_BASE
1051 mov eax, [rbx + r8 + CPUMCPU.Host.GSbase]
1052 mov edx, [rbx + r8 + CPUMCPU.Host.GSbase + 4]
1053 wrmsr
1054 mov ecx, MSR_K6_EFER
1055 mov eax, [rbx + r8 + CPUMCPU.Host.efer]
1056 mov edx, [rbx + r8 + CPUMCPU.Host.efer + 4]
1057 wrmsr
1058 mov rdx, rbx
1059
1060
1061 ; restore general registers.
1062 mov eax, edi ; restore return code. eax = return code !!
1063 ; mov rax, [rdx + r8 + CPUMCPU.Host.rax] - scratch + return code
1064 mov rbx, [rdx + r8 + CPUMCPU.Host.rbx]
1065 ; mov rcx, [rdx + r8 + CPUMCPU.Host.rcx] - scratch
1066 ; mov rdx, [rdx + r8 + CPUMCPU.Host.rdx] - scratch
1067 mov rdi, [rdx + r8 + CPUMCPU.Host.rdi]
1068 mov rsi, [rdx + r8 + CPUMCPU.Host.rsi]
1069 mov rsp, [rdx + r8 + CPUMCPU.Host.rsp]
1070 mov rbp, [rdx + r8 + CPUMCPU.Host.rbp]
1071 ; mov r8, [rdx + r8 + CPUMCPU.Host.r8 ] - scratch
1072 ; mov r9, [rdx + r8 + CPUMCPU.Host.r9 ] - scratch
1073 mov r10, [rdx + r8 + CPUMCPU.Host.r10]
1074 mov r11, [rdx + r8 + CPUMCPU.Host.r11]
1075 mov r12, [rdx + r8 + CPUMCPU.Host.r12]
1076 mov r13, [rdx + r8 + CPUMCPU.Host.r13]
1077 mov r14, [rdx + r8 + CPUMCPU.Host.r14]
1078 mov r15, [rdx + r8 + CPUMCPU.Host.r15]
1079
1080 ; finally restore flags. (probably not required)
1081 push qword [rdx + r8 + CPUMCPU.Host.rflags]
1082 popf
1083
1084
1085%ifdef DEBUG_STUFF
1086 COM64_S_CHAR '4'
1087%endif
1088 db 048h
1089 retf
1090
1091;;
1092; Detour for restoring the host debug registers.
1093; edx and edi must be preserved.
1094gth_debug_regs_restore:
1095 DEBUG_S_CHAR('d')
1096 xor eax, eax
1097 mov dr7, rax ; paranoia or not?
1098 test esi, CPUM_USE_DEBUG_REGS
1099 jz short gth_debug_regs_dr7
1100 DEBUG_S_CHAR('r')
1101 mov rax, [rdx + r8 + CPUMCPU.Host.dr0]
1102 mov dr0, rax
1103 mov rbx, [rdx + r8 + CPUMCPU.Host.dr1]
1104 mov dr1, rbx
1105 mov rcx, [rdx + r8 + CPUMCPU.Host.dr2]
1106 mov dr2, rcx
1107 mov rax, [rdx + r8 + CPUMCPU.Host.dr3]
1108 mov dr3, rax
1109gth_debug_regs_dr7:
1110 mov rbx, [rdx + r8 + CPUMCPU.Host.dr6]
1111 mov dr6, rbx
1112 mov rcx, [rdx + r8 + CPUMCPU.Host.dr7]
1113 mov dr7, rcx
1114 jmp gth_debug_regs_no
1115
1116ENDPROC VMMGCGuestToHostAsm
1117
1118
1119GLOBALNAME End
1120;
1121; The description string (in the text section).
1122;
1123NAME(Description):
1124 db SWITCHER_DESCRIPTION
1125 db 0
1126
1127extern NAME(Relocate)
1128
1129;
1130; End the fixup records.
1131;
1132BEGINDATA
1133 db FIX_THE_END ; final entry.
1134GLOBALNAME FixupsEnd
1135
1136;;
1137; The switcher definition structure.
1138ALIGNDATA(16)
1139GLOBALNAME Def
1140 istruc VMMSWITCHERDEF
1141 at VMMSWITCHERDEF.pvCode, RTCCPTR_DEF NAME(Start)
1142 at VMMSWITCHERDEF.pvFixups, RTCCPTR_DEF NAME(Fixups)
1143 at VMMSWITCHERDEF.pszDesc, RTCCPTR_DEF NAME(Description)
1144 at VMMSWITCHERDEF.pfnRelocate, RTCCPTR_DEF NAME(Relocate)
1145 at VMMSWITCHERDEF.enmType, dd SWITCHER_TYPE
1146 at VMMSWITCHERDEF.cbCode, dd NAME(End) - NAME(Start)
1147 at VMMSWITCHERDEF.offR0HostToGuest, dd NAME(vmmR0HostToGuest) - NAME(Start)
1148 at VMMSWITCHERDEF.offGCGuestToHost, dd NAME(vmmGCGuestToHost) - NAME(Start)
1149 at VMMSWITCHERDEF.offGCCallTrampoline, dd NAME(vmmGCCallTrampoline) - NAME(Start)
1150 at VMMSWITCHERDEF.offGCGuestToHostAsm, dd NAME(VMMGCGuestToHostAsm) - NAME(Start)
1151 at VMMSWITCHERDEF.offGCGuestToHostAsmHyperCtx, dd NAME(VMMGCGuestToHostAsmHyperCtx)- NAME(Start)
1152 at VMMSWITCHERDEF.offGCGuestToHostAsmGuestCtx, dd NAME(VMMGCGuestToHostAsmGuestCtx)- NAME(Start)
1153 ; disasm help
1154 at VMMSWITCHERDEF.offHCCode0, dd 0
1155 at VMMSWITCHERDEF.cbHCCode0, dd NAME(IDEnterTarget) - NAME(Start)
1156 at VMMSWITCHERDEF.offHCCode1, dd NAME(HCExitTarget) - NAME(Start)
1157 at VMMSWITCHERDEF.cbHCCode1, dd NAME(End) - NAME(HCExitTarget)
1158 at VMMSWITCHERDEF.offIDCode0, dd NAME(IDEnterTarget) - NAME(Start)
1159 at VMMSWITCHERDEF.cbIDCode0, dd NAME(JmpGCTarget) - NAME(IDEnterTarget)
1160 at VMMSWITCHERDEF.offIDCode1, dd NAME(IDExitTarget) - NAME(Start)
1161 at VMMSWITCHERDEF.cbIDCode1, dd NAME(HCExitTarget) - NAME(IDExitTarget)
1162 at VMMSWITCHERDEF.offGCCode, dd NAME(JmpGCTarget) - NAME(Start)
1163 at VMMSWITCHERDEF.cbGCCode, dd NAME(IDExitTarget) - NAME(JmpGCTarget)
1164
1165 iend
1166
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette