VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMSwitcher/32BitToAMD64.asm@ 14301

Last change on this file since 14301 was 14301, checked in by vboxsync, 16 years ago

Synced some (inactive) new paging code.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 18.5 KB
Line 
1; VMM - World Switchers, 32Bit to AMD64.
2;
3
4;
5; Copyright (C) 2006-2007 Sun Microsystems, Inc.
6;
7; This file is part of VirtualBox Open Source Edition (OSE), as
8; available from http://www.virtualbox.org. This file is free software;
9; you can redistribute it and/or modify it under the terms of the GNU
10; General Public License (GPL) as published by the Free Software
11; Foundation, in version 2 as it comes in the "COPYING" file of the
12; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
13; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
14;
15; Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
16; Clara, CA 95054 USA or visit http://www.sun.com if you need
17; additional information or have any questions.
18;
19
20;%define DEBUG_STUFF 1
21;%define STRICT_IF 1
22
23;*******************************************************************************
24;* Defined Constants And Macros *
25;*******************************************************************************
26;; Prefix all names.
27%define NAME_OVERLOAD(name) vmmR3Switcher32BitToAMD64_ %+ name
28
29
30;*******************************************************************************
31;* Header Files *
32;*******************************************************************************
33%include "VBox/asmdefs.mac"
34%include "VBox/x86.mac"
35%include "VBox/cpum.mac"
36%include "VBox/stam.mac"
37%include "VBox/vm.mac"
38%include "CPUMInternal.mac"
39%include "VMMSwitcher/VMMSwitcher.mac"
40
41
42;
43; Start the fixup records
44; We collect the fixups in the .data section as we go along
45; It is therefore VITAL that no-one is using the .data section
46; for anything else between 'Start' and 'End'.
47;
48BEGINDATA
49GLOBALNAME Fixups
50
51
52
53BEGINCODE
54GLOBALNAME Start
55
56BITS 32
57
58;;
59; The C interface.
60;
61BEGINPROC vmmR0HostToGuest
62 %ifdef DEBUG_STUFF
63 COM32_S_NEWLINE
64 COM32_S_CHAR '^'
65 %endif
66
67 %ifdef VBOX_WITH_STATISTICS
68 ;
69 ; Switcher stats.
70 ;
71 FIXUP FIX_HC_VM_OFF, 1, VM.StatSwitcherToGC
72 mov edx, 0ffffffffh
73 STAM_PROFILE_ADV_START edx
74 %endif
75
76 ;
77 ; Call worker.
78 ;
79 FIXUP FIX_HC_CPUM_OFF, 1, 0
80 mov edx, 0ffffffffh
81 push cs ; allow for far return and restore cs correctly.
82 call NAME(vmmR0HostToGuestAsm)
83
84%ifdef VBOX_WITH_STATISTICS
85 ;
86 ; Switcher stats.
87 ;
88 FIXUP FIX_HC_VM_OFF, 1, VM.StatSwitcherToHC
89 mov edx, 0ffffffffh
90 STAM_PROFILE_ADV_STOP edx
91%endif
92
93 ret
94
95ENDPROC vmmR0HostToGuest
96
97; *****************************************************************************
98; vmmR0HostToGuestAsm
99;
100; Phase one of the switch from host to guest context (host MMU context)
101;
102; INPUT:
103; - edx virtual address of CPUM structure (valid in host context)
104;
105; USES/DESTROYS:
106; - eax, ecx, edx, esi
107;
108; ASSUMPTION:
109; - current CS and DS selectors are wide open
110;
111; *****************************************************************************
112ALIGNCODE(16)
113BEGINPROC vmmR0HostToGuestAsm
114 ;;
115 ;; Save CPU host context
116 ;; Skip eax, edx and ecx as these are not preserved over calls.
117 ;;
118 CPUMCPU_FROM_CPUM(edx)
119 ; general registers.
120 mov [edx + CPUMCPU.Host.ebx], ebx
121 mov [edx + CPUMCPU.Host.edi], edi
122 mov [edx + CPUMCPU.Host.esi], esi
123 mov [edx + CPUMCPU.Host.esp], esp
124 mov [edx + CPUMCPU.Host.ebp], ebp
125 ; selectors.
126 mov [edx + CPUMCPU.Host.ds], ds
127 mov [edx + CPUMCPU.Host.es], es
128 mov [edx + CPUMCPU.Host.fs], fs
129 mov [edx + CPUMCPU.Host.gs], gs
130 mov [edx + CPUMCPU.Host.ss], ss
131 ; special registers.
132 sldt [edx + CPUMCPU.Host.ldtr]
133 sidt [edx + CPUMCPU.Host.idtr]
134 sgdt [edx + CPUMCPU.Host.gdtr]
135 str [edx + CPUMCPU.Host.tr]
136 ; flags
137 pushfd
138 pop dword [edx + CPUMCPU.Host.eflags]
139
140 FIXUP FIX_NO_SYSENTER_JMP, 0, htg_no_sysenter - NAME(Start) ; this will insert a jmp htg_no_sysenter if host doesn't use sysenter.
141 ; save MSR_IA32_SYSENTER_CS register.
142 mov ecx, MSR_IA32_SYSENTER_CS
143 mov ebx, edx ; save edx
144 rdmsr ; edx:eax <- MSR[ecx]
145 mov [ebx + CPUMCPU.Host.SysEnter.cs], eax
146 mov [ebx + CPUMCPU.Host.SysEnter.cs + 4], edx
147 xor eax, eax ; load 0:0 to cause #GP upon sysenter
148 xor edx, edx
149 wrmsr
150 xchg ebx, edx ; restore edx
151 jmp short htg_no_sysenter
152
153ALIGNCODE(16)
154htg_no_sysenter:
155
156 ;; handle use flags.
157 mov esi, [edx + CPUMCPU.fUseFlags] ; esi == use flags.
158 and esi, ~CPUM_USED_FPU ; Clear CPUM_USED_* flags. ;;@todo FPU check can be optimized to use cr0 flags!
159 mov [edx + CPUMCPU.fUseFlags], esi
160
161 ; debug registers.
162 test esi, CPUM_USE_DEBUG_REGS | CPUM_USE_DEBUG_REGS_HOST
163 jz htg_debug_regs_no
164 jmp htg_debug_regs_save_dr7and6
165htg_debug_regs_no:
166
167 ; control registers.
168 mov eax, cr0
169 mov [edx + CPUMCPU.Host.cr0], eax
170 ;mov eax, cr2 ; assume host os don't stuff things in cr2. (safe)
171 ;mov [edx + CPUMCPU.Host.cr2], eax
172 mov eax, cr3
173 mov [edx + CPUMCPU.Host.cr3], eax
174 mov eax, cr4
175 mov [edx + CPUMCPU.Host.cr4], eax
176
177 ;;
178 ;; Load Intermediate memory context.
179 ;;
180 FIXUP FIX_INTER_32BIT_CR3, 1
181 mov eax, 0ffffffffh
182 mov cr3, eax
183 DEBUG_CHAR('?')
184
185 ;;
186 ;; Jump to identity mapped location
187 ;;
188 FIXUP FIX_GC_2_ID_NEAR_REL, 1, NAME(IDEnterTarget) - NAME(Start)
189 jmp near NAME(IDEnterTarget)
190
191
192 ; We're now on identity mapped pages!
193ALIGNCODE(16)
194GLOBALNAME IDEnterTarget
195 DEBUG_CHAR('2')
196
197 ; 1. Disable paging.
198 mov ebx, cr0
199 and ebx, ~X86_CR0_PG
200 mov cr0, ebx
201 DEBUG_CHAR('2')
202
203 ; 2. Enable PAE.
204 mov ecx, cr4
205 or ecx, X86_CR4_PAE
206 mov cr4, ecx
207
208 ; 3. Load long mode intermediate CR3.
209 FIXUP FIX_INTER_AMD64_CR3, 1
210 mov ecx, 0ffffffffh
211 mov cr3, ecx
212 DEBUG_CHAR('3')
213
214 ; 4. Enable long mode.
215 mov ebp, edx
216 mov ecx, MSR_K6_EFER
217 rdmsr
218 or eax, MSR_K6_EFER_LME
219 wrmsr
220 mov edx, ebp
221 DEBUG_CHAR('4')
222
223 ; 5. Enable paging.
224 or ebx, X86_CR0_PG
225 mov cr0, ebx
226 DEBUG_CHAR('5')
227
228 ; Jump from compatability mode to 64-bit mode.
229 FIXUP FIX_ID_FAR32_TO_64BIT_MODE, 1, NAME(IDEnter64Mode) - NAME(Start)
230 jmp 0ffffh:0fffffffeh
231
232 ;
233 ; We're in 64-bit mode (ds, ss, es, fs, gs are all bogus).
234BITS 64
235ALIGNCODE(16)
236NAME(IDEnter64Mode):
237 DEBUG_CHAR('6')
238 jmp [NAME(pICEnterTarget) wrt rip]
239
240; 64-bit jump target
241NAME(pICEnterTarget):
242FIXUP FIX_HC_64BIT, 0, NAME(ICEnterTarget) - NAME(Start)
243dq 0ffffffffffffffffh
244
245; 64-bit pCpum address.
246NAME(pCpumIC):
247FIXUP FIX_HC_64BIT_CPUM, 0
248dq 0ffffffffffffffffh
249
250 ;
251 ; When we arrive here we're at the 64 bit mode of intermediate context
252 ;
253ALIGNCODE(16)
254GLOBALNAME ICEnterTarget
255 ; at this moment we're in 64-bit mode. let's write something to CPUM
256 ; Load CPUM pointer into rdx
257 mov rdx, [NAME(pCpumIC) wrt rip]
258 ; Load the CPUMCPU offset.
259 mov r8, [rdx + CPUM.ulOffCPUMCPU]
260
261 mov rsi, 012345678h
262 mov [rdx + r8 + CPUMCPU.uPadding], rsi
263
264 ; now let's switch back
265 mov rax, 0666h
266 jmp NAME(VMMGCGuestToHostAsm) ; rax = returncode.
267
268BITS 32
269;;
270; Detour for saving the host DR7 and DR6.
271; esi and edx must be preserved.
272htg_debug_regs_save_dr7and6:
273DEBUG_S_CHAR('s');
274 mov eax, dr7 ; not sure, but if I read the docs right this will trap if GD is set. FIXME!!!
275 mov [edx + CPUMCPU.Host.dr7], eax
276 xor eax, eax ; clear everything. (bit 12? is read as 1...)
277 mov dr7, eax
278 mov eax, dr6 ; just in case we save the state register too.
279 mov [edx + CPUMCPU.Host.dr6], eax
280 jmp htg_debug_regs_no
281
282
283BITS 64
284ENDPROC vmmR0HostToGuestAsm
285
286
287;;
288; Trampoline for doing a call when starting the hyper visor execution.
289;
290; Push any arguments to the routine.
291; Push the argument frame size (cArg * 4).
292; Push the call target (_cdecl convention).
293; Push the address of this routine.
294;
295;
296ALIGNCODE(16)
297BEGINPROC vmmGCCallTrampoline
298%ifdef DEBUG_STUFF
299 COM32_S_CHAR 'c'
300 COM32_S_CHAR 't'
301 COM32_S_CHAR '!'
302%endif
303 int3
304ENDPROC vmmGCCallTrampoline
305
306
307BITS 64
308;;
309; The C interface.
310;
311ALIGNCODE(16)
312BEGINPROC vmmGCGuestToHost
313%ifdef DEBUG_STUFF
314 push esi
315 COM_NEWLINE
316 DEBUG_CHAR('b')
317 DEBUG_CHAR('a')
318 DEBUG_CHAR('c')
319 DEBUG_CHAR('k')
320 DEBUG_CHAR('!')
321 COM_NEWLINE
322 pop esi
323%endif
324 int3
325ENDPROC vmmGCGuestToHost
326
327;;
328; VMMGCGuestToHostAsm
329;
330; This is an alternative entry point which we'll be using
331; when the we have saved the guest state already or we haven't
332; been messing with the guest at all.
333;
334; @param eax Return code.
335; @uses eax, edx, ecx (or it may use them in the future)
336;
337ALIGNCODE(16)
338BEGINPROC VMMGCGuestToHostAsm
339 CPUMCPU_FROM_CPUM(rdx)
340 FIXUP FIX_INTER_AMD64_CR3, 1
341 mov rax, 0ffffffffh
342 mov cr3, rax
343 ;; We're now in intermediate memory context!
344
345 ;;
346 ;; Jump to identity mapped location
347 ;;
348 FIXUP FIX_GC_2_ID_NEAR_REL, 1, NAME(IDExitTarget) - NAME(Start)
349 jmp near NAME(IDExitTarget)
350
351 ; We're now on identity mapped pages!
352ALIGNCODE(16)
353GLOBALNAME IDExitTarget
354BITS 32
355 DEBUG_CHAR('1')
356
357 ; 1. Deactivate long mode by turning off paging.
358 mov ebx, cr0
359 and ebx, ~X86_CR0_PG
360 mov cr0, ebx
361 DEBUG_CHAR('2')
362
363 ; 2. Load 32-bit intermediate page table.
364 FIXUP FIX_INTER_32BIT_CR3, 1
365 mov edx, 0ffffffffh
366 mov cr3, edx
367 DEBUG_CHAR('3')
368
369 ; 3. Disable long mode.
370 mov ecx, MSR_K6_EFER
371 rdmsr
372 DEBUG_CHAR('5')
373 and eax, ~(MSR_K6_EFER_LME)
374 wrmsr
375 DEBUG_CHAR('6')
376
377 ; 3b. Disable PAE.
378 mov eax, cr4
379 and eax, ~X86_CR4_PAE
380 mov cr4, eax
381 DEBUG_CHAR('7')
382
383 ; 4. Enable paging.
384 or ebx, X86_CR0_PG
385 mov cr0, ebx
386 jmp short just_a_jump
387just_a_jump:
388 DEBUG_CHAR('8')
389
390 ;;
391 ;; 5. Jump to guest code mapping of the code and load the Hypervisor CS.
392 ;;
393 FIXUP FIX_ID_2_GC_NEAR_REL, 1, NAME(ICExitTarget) - NAME(Start)
394 jmp near NAME(ICExitTarget)
395
396 ;;
397 ;; When we arrive at this label we're at the
398 ;; intermediate mapping of the switching code.
399 ;;
400BITS 32
401ALIGNCODE(16)
402GLOBALNAME ICExitTarget
403 DEBUG_CHAR('8')
404 FIXUP FIX_HC_CPUM_OFF, 1, 0
405 mov edx, 0ffffffffh
406 CPUMCPU_FROM_CPUM(edx)
407 mov esi, [edx + CPUMCPU.Host.cr3]
408 mov cr3, esi
409
410 ;; now we're in host memory context, let's restore regs
411
412 ; activate host gdt and idt
413 lgdt [edx + CPUMCPU.Host.gdtr]
414 DEBUG_CHAR('0')
415 lidt [edx + CPUMCPU.Host.idtr]
416 DEBUG_CHAR('1')
417
418 ; Restore TSS selector; must mark it as not busy before using ltr (!)
419 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p)
420 movzx eax, word [edx + CPUMCPU.Host.tr] ; eax <- TR
421 and al, 0F8h ; mask away TI and RPL bits, get descriptor offset.
422 add eax, [edx + CPUMCPU.Host.gdtr + 2] ; eax <- GDTR.address + descriptor offset.
423 and dword [eax + 4], ~0200h ; clear busy flag (2nd type2 bit)
424 ltr word [edx + CPUMCPU.Host.tr]
425
426 ; activate ldt
427 DEBUG_CHAR('2')
428 lldt [edx + CPUMCPU.Host.ldtr]
429 ; Restore segment registers
430 mov eax, [edx + CPUMCPU.Host.ds]
431 mov ds, eax
432 mov eax, [edx + CPUMCPU.Host.es]
433 mov es, eax
434 mov eax, [edx + CPUMCPU.Host.fs]
435 mov fs, eax
436 mov eax, [edx + CPUMCPU.Host.gs]
437 mov gs, eax
438 ; restore stack
439 lss esp, [edx + CPUMCPU.Host.esp]
440
441 FIXUP FIX_NO_SYSENTER_JMP, 0, gth_sysenter_no - NAME(Start) ; this will insert a jmp gth_sysenter_no if host doesn't use sysenter.
442
443 ; restore MSR_IA32_SYSENTER_CS register.
444 mov ecx, MSR_IA32_SYSENTER_CS
445 mov eax, [edx + CPUMCPU.Host.SysEnter.cs]
446 mov ebx, [edx + CPUMCPU.Host.SysEnter.cs + 4]
447 xchg edx, ebx ; save/load edx
448 wrmsr ; MSR[ecx] <- edx:eax
449 xchg edx, ebx ; restore edx
450 jmp short gth_sysenter_no
451
452ALIGNCODE(16)
453gth_sysenter_no:
454
455 ;; @todo AMD syscall
456
457 ; Restore FPU if guest has used it.
458 ; Using fxrstor should ensure that we're not causing unwanted exception on the host.
459 mov esi, [edx + CPUMCPU.fUseFlags] ; esi == use flags.
460 test esi, CPUM_USED_FPU
461 jz near gth_fpu_no
462 mov ecx, cr0
463 and ecx, ~(X86_CR0_TS | X86_CR0_EM)
464 mov cr0, ecx
465
466 FIXUP FIX_NO_FXSAVE_JMP, 0, gth_no_fxsave - NAME(Start) ; this will insert a jmp gth_no_fxsave if fxsave isn't supported.
467 fxsave [edx + CPUMCPU.Guest.fpu]
468 fxrstor [edx + CPUMCPU.Host.fpu]
469 jmp near gth_fpu_no
470
471gth_no_fxsave:
472 fnsave [edx + CPUMCPU.Guest.fpu]
473 mov eax, [edx + CPUMCPU.Host.fpu] ; control word
474 not eax ; 1 means exception ignored (6 LS bits)
475 and eax, byte 03Fh ; 6 LS bits only
476 test eax, [edx + CPUMCPU.Host.fpu + 4] ; status word
477 jz gth_no_exceptions_pending
478
479 ; technically incorrect, but we certainly don't want any exceptions now!!
480 and dword [edx + CPUMCPU.Host.fpu + 4], ~03Fh
481
482gth_no_exceptions_pending:
483 frstor [edx + CPUMCPU.Host.fpu]
484 jmp short gth_fpu_no
485
486ALIGNCODE(16)
487gth_fpu_no:
488
489 ; Control registers.
490 ; Would've liked to have these highere up in case of crashes, but
491 ; the fpu stuff must be done before we restore cr0.
492 mov ecx, [edx + CPUMCPU.Host.cr4]
493 mov cr4, ecx
494 mov ecx, [edx + CPUMCPU.Host.cr0]
495 mov cr0, ecx
496 ;mov ecx, [edx + CPUMCPU.Host.cr2] ; assumes this is waste of time.
497 ;mov cr2, ecx
498
499 ; restore debug registers (if modified) (esi must still be fUseFlags!)
500 ; (must be done after cr4 reload because of the debug extension.)
501 test esi, CPUM_USE_DEBUG_REGS | CPUM_USE_DEBUG_REGS_HOST
502 jz short gth_debug_regs_no
503 jmp gth_debug_regs_restore
504gth_debug_regs_no:
505
506 ; restore general registers.
507 mov eax, edi ; restore return code. eax = return code !!
508 mov edi, [edx + CPUMCPU.Host.edi]
509 mov esi, [edx + CPUMCPU.Host.esi]
510 mov ebx, [edx + CPUMCPU.Host.ebx]
511 mov ebp, [edx + CPUMCPU.Host.ebp]
512 push dword [edx + CPUMCPU.Host.eflags]
513 popfd
514
515%ifdef DEBUG_STUFF
516; COM_S_CHAR '4'
517%endif
518 retf
519
520;;
521; Detour for restoring the host debug registers.
522; edx and edi must be preserved.
523gth_debug_regs_restore:
524 DEBUG_S_CHAR('d')
525 xor eax, eax
526 mov dr7, eax ; paranoia or not?
527 test esi, CPUM_USE_DEBUG_REGS
528 jz short gth_debug_regs_dr7
529 DEBUG_S_CHAR('r')
530 mov eax, [edx + CPUMCPU.Host.dr0]
531 mov dr0, eax
532 mov ebx, [edx + CPUMCPU.Host.dr1]
533 mov dr1, ebx
534 mov ecx, [edx + CPUMCPU.Host.dr2]
535 mov dr2, ecx
536 mov eax, [edx + CPUMCPU.Host.dr3]
537 mov dr3, eax
538gth_debug_regs_dr7:
539 mov ebx, [edx + CPUMCPU.Host.dr6]
540 mov dr6, ebx
541 mov ecx, [edx + CPUMCPU.Host.dr7]
542 mov dr7, ecx
543 jmp gth_debug_regs_no
544
545ENDPROC VMMGCGuestToHostAsm
546
547;;
548; VMMGCGuestToHostAsmHyperCtx
549;
550; This is an alternative entry point which we'll be using
551; when the we have the hypervisor context and need to save
552; that before going to the host.
553;
554; This is typically useful when abandoning the hypervisor
555; because of a trap and want the trap state to be saved.
556;
557; @param eax Return code.
558; @param ecx Points to CPUMCTXCORE.
559; @uses eax,edx,ecx
560ALIGNCODE(16)
561BEGINPROC VMMGCGuestToHostAsmHyperCtx
562 int3
563
564;;
565; VMMGCGuestToHostAsmGuestCtx
566;
567; Switches from Guest Context to Host Context.
568; Of course it's only called from within the GC.
569;
570; @param eax Return code.
571; @param esp + 4 Pointer to CPUMCTXCORE.
572;
573; @remark ASSUMES interrupts disabled.
574;
575ALIGNCODE(16)
576BEGINPROC VMMGCGuestToHostAsmGuestCtx
577 int3
578
579GLOBALNAME End
580;
581; The description string (in the text section).
582;
583NAME(Description):
584 db "32-bits to/from AMD64", 0
585
586extern NAME(Relocate)
587
588;
589; End the fixup records.
590;
591BEGINDATA
592 db FIX_THE_END ; final entry.
593GLOBALNAME FixupsEnd
594
595;;
596; The switcher definition structure.
597ALIGNDATA(16)
598GLOBALNAME Def
599 istruc VMMSWITCHERDEF
600 at VMMSWITCHERDEF.pvCode, RTCCPTR_DEF NAME(Start)
601 at VMMSWITCHERDEF.pvFixups, RTCCPTR_DEF NAME(Fixups)
602 at VMMSWITCHERDEF.pszDesc, RTCCPTR_DEF NAME(Description)
603 at VMMSWITCHERDEF.pfnRelocate, RTCCPTR_DEF NAME(Relocate)
604 at VMMSWITCHERDEF.enmType, dd VMMSWITCHER_32_TO_AMD64
605 at VMMSWITCHERDEF.cbCode, dd NAME(End) - NAME(Start)
606 at VMMSWITCHERDEF.offR0HostToGuest, dd NAME(vmmR0HostToGuest) - NAME(Start)
607 at VMMSWITCHERDEF.offGCGuestToHost, dd NAME(vmmGCGuestToHost) - NAME(Start)
608 at VMMSWITCHERDEF.offGCCallTrampoline, dd NAME(vmmGCCallTrampoline) - NAME(Start)
609 at VMMSWITCHERDEF.offGCGuestToHostAsm, dd NAME(VMMGCGuestToHostAsm) - NAME(Start)
610 at VMMSWITCHERDEF.offGCGuestToHostAsmHyperCtx, dd NAME(VMMGCGuestToHostAsmHyperCtx)- NAME(Start)
611 at VMMSWITCHERDEF.offGCGuestToHostAsmGuestCtx, dd NAME(VMMGCGuestToHostAsmGuestCtx)- NAME(Start)
612 ; disasm help
613 at VMMSWITCHERDEF.offHCCode0, dd 0
614 at VMMSWITCHERDEF.cbHCCode0, dd NAME(IDEnterTarget) - NAME(Start)
615 at VMMSWITCHERDEF.offHCCode1, dd NAME(ICExitTarget) - NAME(Start)
616 at VMMSWITCHERDEF.cbHCCode1, dd NAME(End) - NAME(ICExitTarget)
617 at VMMSWITCHERDEF.offIDCode0, dd NAME(IDEnterTarget) - NAME(Start)
618 at VMMSWITCHERDEF.cbIDCode0, dd NAME(ICEnterTarget) - NAME(IDEnterTarget)
619 at VMMSWITCHERDEF.offIDCode1, dd NAME(IDExitTarget) - NAME(Start)
620 at VMMSWITCHERDEF.cbIDCode1, dd NAME(ICExitTarget) - NAME(IDExitTarget)
621 at VMMSWITCHERDEF.offGCCode, dd NAME(ICEnterTarget) - NAME(Start)
622 at VMMSWITCHERDEF.cbGCCode, dd NAME(IDExitTarget) - NAME(ICEnterTarget)
623
624 iend
625
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette