VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMSwitcher/AMD64ToPAE.asm@ 193

Last change on this file since 193 was 193, checked in by vboxsync, 18 years ago

Export AMD64ToPAE.asm

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 30.1 KB
Line 
1; $Id: AMD64ToPAE.asm 193 2007-01-19 19:42:32Z vboxsync $
2;; @file
3; VMM - World Switchers, AMD64 to PAE.
4;
5
6;
7; Copyright (C) 2006 InnoTek Systemberatung GmbH
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.virtualbox.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License as published by the Free Software Foundation,
13; in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
14; distribution. VirtualBox OSE is distributed in the hope that it will
15; be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17; If you received this file as part of a commercial VirtualBox
18; distribution, then only the terms of your commercial VirtualBox
19; license agreement apply instead of the previous paragraph.
20;
21
22;%define DEBUG_STUFF 1
23
24;*******************************************************************************
25;* Defined Constants And Macros *
26;*******************************************************************************
27;; Prefix all names.
28%define NAME_OVERLOAD(name) vmmR3SwitcherAMD64ToPAE_ %+ name
29
30
31;*******************************************************************************
32;* Header Files *
33;*******************************************************************************
34%include "VBox/asmdefs.mac"
35%include "VBox/x86.mac"
36%include "VBox/cpum.mac"
37%include "VBox/stam.mac"
38%include "VBox/vm.mac"
39%include "CPUMInternal.mac"
40%include "VMMSwitcher/VMMSwitcher.mac"
41
42
43;
44; Start the fixup records
45; We collect the fixups in the .data section as we go along
46; It is therefore VITAL that no-one is using the .data section
47; for anything else between 'Start' and 'End'.
48;
49BEGINDATA
50GLOBALNAME Fixups
51
52
53
54BEGINCODE
55GLOBALNAME Start
56
57BITS 64
58
59;;
60; The C interface.
61;
62; @param pVM GCC: rdi MSC:rcx The VM handle.
63;
64BEGINPROC vmmR0HostToGuest
65%ifdef DEBUG_STUFF
66 COM64_S_NEWLINE
67 COM64_S_CHAR '^'
68%endif
69
70 ;
71 ; make r9 = pVM and rdx = pCpum.
72 ; rax, rcx and r8 are scratch here after.
73%ifdef __WIN64__
74 mov r9, rcx
75%else
76 mov r9, rdi
77%endif
78 lea rdx, [r9 + VM.cpum]
79
80%ifdef VBOX_WITH_STATISTICS
81 ;
82 ; Switcher stats.
83 ;
84 lea r8, [r9 + VM.StatSwitcherToGC]
85 STAM64_PROFILE_ADV_START r8
86%endif
87
88 ;
89 ; Call worker (far return).
90 ;
91 mov eax, cs
92 push rax
93 call NAME(vmmR0HostToGuestAsm)
94
95%ifdef VBOX_WITH_STATISTICS
96 ;
97 ; Switcher stats.
98 ;
99 lea r8, [r9 + VM.StatSwitcherToGC]
100 STAM64_PROFILE_ADV_STOP r8
101%endif
102
103 ret
104ENDPROC vmmR0HostToGuest
105
106
107
108; *****************************************************************************
109; vmmR0HostToGuestAsm
110;
111; Phase one of the switch from host to guest context (host MMU context)
112;
113; INPUT:
114; - edx virtual address of CPUM structure (valid in host context)
115;
116; USES/DESTROYS:
117; - eax, ecx, edx
118;
119; ASSUMPTION:
120; - current CS and DS selectors are wide open
121;
122; *****************************************************************************
123ALIGNCODE(16)
124BEGINPROC vmmR0HostToGuestAsm
125 ;;
126 ;; Save CPU host context
127 ;; Skip eax, edx and ecx as these are not preserved over calls.
128 ;;
129 ; general registers.
130 ; mov [edx + CPUM.Host.rax], rax - scratch
131 mov [rdx + CPUM.Host.rbx], rbx
132 ; mov [edx + CPUM.Host.rcx], rcx - scratch
133 ; mov [edx + CPUM.Host.rdx], rdx - scratch
134 mov [rdx + CPUM.Host.rdi], rdi
135 mov [rdx + CPUM.Host.rsi], rsi
136 mov [rdx + CPUM.Host.rsp], rsp
137 mov [rdx + CPUM.Host.rbp], rbp
138 ; mov [edx + CPUM.Host.r8 ], r8 - scratch
139 ; mov [edx + CPUM.Host.r9 ], r9 - scratch
140 mov [rdx + CPUM.Host.r10], r10
141 mov [rdx + CPUM.Host.r11], r11
142 mov [rdx + CPUM.Host.r12], r12
143 mov [rdx + CPUM.Host.r13], r13
144 mov [rdx + CPUM.Host.r14], r14
145 mov [rdx + CPUM.Host.r15], r15
146 ; selectors.
147 mov [rdx + CPUM.Host.ds], ds
148 mov [rdx + CPUM.Host.es], es
149 mov [rdx + CPUM.Host.fs], fs
150 mov [rdx + CPUM.Host.gs], gs
151 mov [rdx + CPUM.Host.ss], ss
152 ; MSRs
153 mov rbx, rdx
154 mov ecx, MSR_K8_FS_BASE
155 rdmsr
156 mov [rbx + CPUM.Host.FSbase], eax
157 mov [rbx + CPUM.Host.FSbase + 4], edx
158 mov ecx, MSR_K8_GS_BASE
159 rdmsr
160 mov [rbx + CPUM.Host.GSbase], eax
161 mov [rbx + CPUM.Host.GSbase + 4], edx
162 mov ecx, MSR_K6_EFER
163 rdmsr
164 mov [rbx + CPUM.Host.efer], eax
165 mov [rbx + CPUM.Host.efer + 4], edx
166 mov ecx, MSR_K6_EFER
167 mov rdx, rbx
168 ; special registers.
169 sldt [rdx + CPUM.Host.ldtr]
170 sidt [rdx + CPUM.Host.idtr]
171 sgdt [rdx + CPUM.Host.gdtr]
172 str [rdx + CPUM.Host.tr] ; yasm BUG, generates sldt. YASMCHECK!
173 ; flags
174 pushf
175 pop qword [rdx + CPUM.Host.rflags]
176
177 FIXUP FIX_NO_SYSENTER_JMP, 0, htg_no_sysenter - NAME(Start) ; this will insert a jmp htg_no_sysenter if host doesn't use sysenter.
178 ; save MSR_IA32_SYSENTER_CS register.
179 mov ecx, MSR_IA32_SYSENTER_CS
180 mov rbx, rdx ; save edx
181 rdmsr ; edx:eax <- MSR[ecx]
182 mov [rbx + CPUM.Host.SysEnter.cs], rax
183 mov [rbx + CPUM.Host.SysEnter.cs + 4], rdx
184 xor rax, rax ; load 0:0 to cause #GP upon sysenter
185 xor rdx, rdx
186 wrmsr
187 mov rdx, rbx ; restore edx
188 jmp short htg_no_sysenter
189
190ALIGNCODE(16)
191htg_no_sysenter:
192
193 ;; handle use flags.
194 mov esi, [rdx + CPUM.fUseFlags] ; esi == use flags.
195 and esi, ~CPUM_USED_FPU ; Clear CPUM_USED_* flags. ;;@todo FPU check can be optimized to use cr0 flags!
196 mov [rdx + CPUM.fUseFlags], esi
197
198 ; debug registers.
199 test esi, CPUM_USE_DEBUG_REGS | CPUM_USE_DEBUG_REGS_HOST
200 jz htg_debug_regs_no
201 jmp htg_debug_regs_save_dr7and6
202htg_debug_regs_no:
203 DEBUG_CHAR('a') ; trashes esi
204
205 ; control registers.
206 mov rax, cr0
207 mov [rdx + CPUM.Host.cr0], rax
208 ;mov rax, cr2 ; assume host os don't suff things in cr2. (safe)
209 ;mov [rdx + CPUM.Host.cr2], rax
210 mov rax, cr3
211 mov [rdx + CPUM.Host.cr3], rax
212 mov rax, cr4
213 mov [rdx + CPUM.Host.cr4], rax
214
215 ;;
216 ;; Start switching to VMM context.
217 ;;
218
219 ;
220 ; Change CR0 and CR4 so we can correctly emulate FPU/MMX/SSE[23] exceptions
221 ; Also disable WP. (eax==cr4 now)
222 ; Note! X86_CR4_PSE and X86_CR4_PAE are important if the host thinks so :-)
223 ;
224 and rax, X86_CR4_MCE | X86_CR4_PSE | X86_CR4_PAE
225 mov ecx, [rdx + CPUM.Guest.cr4]
226 DEBUG_CHAR('b') ; trashes esi
227 ;; @todo Switcher cleanup: Determin base CR4 during CPUMR0Init / VMMR3SelectSwitcher putting it
228 ; in CPUM.Hyper.cr4 (which isn't currently being used). That should
229 ; simplify this operation a bit (and improve locality of the data).
230
231 ;
232 ; CR4.Mask and CR4.OSFSXR are set in CPUMR3Init based on the presence of
233 ; FXSAVE support on the host CPU
234 ;
235 and ecx, [rdx + CPUM.CR4.Mask]
236 or eax, ecx
237 or eax, [rdx + CPUM.CR4.OSFSXR]
238 mov cr4, rax
239 DEBUG_CHAR('c') ; trashes esi
240
241 mov eax, [rdx + CPUM.Guest.cr0]
242 and eax, X86_CR0_EM
243 or eax, X86_CR0_PE | X86_CR0_PG | X86_CR0_TS | X86_CR0_ET | X86_CR0_NE | X86_CR0_MP
244 mov cr0, rax
245 DEBUG_CHAR('0') ; trashes esi
246
247
248 ; Load new gdt so we can do far jump to guest code after cr3 reload.
249 lgdt [rdx + CPUM.Hyper.gdtr]
250 DEBUG_CHAR('1') ; trashes esi
251
252 ;;
253 ;; Load Intermediate memory context.
254 ;;
255 FIXUP FIX_INTER_AMD64_CR3, 1
256 mov eax, 0ffffffffh
257 mov cr3, rax
258 DEBUG_CHAR('2') ; trashes esi
259
260 ;;
261 ;; 1. Switch to compatibility mode, placing ourselves in identity mapped code.
262 ;;
263 jmp far [NAME(fpIDEnterTarget) wrt rip]
264
265; 16:32 Pointer to IDEnterTarget.
266NAME(fpIDEnterTarget):
267 FIXUP FIX_ID_32BIT, 0, NAME(IDEnterTarget) - NAME(Start)
268dd 0
269 FIXUP FIX_HYPER_CS, 0
270dd 0
271
272 ; We're now on an identity mapped pages! in 32-bit compatability mode.
273BITS 32
274ALIGNCODE(16)
275GLOBALNAME IDEnterTarget
276 DEBUG_CHAR('3')
277
278 ; 2. Deactivate long mode by turning off paging.
279 mov ebx, cr0
280 and ebx, ~X86_CR0_PG
281 mov cr0, ebx
282 DEBUG_CHAR('4')
283
284 ; 3. Load 32-bit intermediate page table.
285 FIXUP FIX_INTER_PAE_CR3, 1
286 mov edx, 0ffffffffh
287 mov cr3, edx
288
289 ; 4. Disable long mode.
290 ; We also use the chance to disable syscall/sysret and fast fxsave/fxrstor.
291 mov ecx, MSR_K6_EFER
292 rdmsr
293 DEBUG_CHAR('5')
294 and eax, ~(MSR_K6_EFER_LME | MSR_K6_EFER_SCE | MSR_K6_EFER_FFXSR)
295 wrmsr
296 DEBUG_CHAR('6')
297
298 ; 5. Enable paging.
299 or ebx, X86_CR0_PG
300 mov cr0, ebx
301 jmp short just_a_jump
302just_a_jump:
303 DEBUG_CHAR('7')
304
305 ;;
306 ;; 6. Jump to guest code mapping of the code and load the Hypervisor CS.
307 ;;
308 FIXUP FIX_ID_2_GC_NEAR_REL, 1, NAME(JmpGCTarget) - NAME(Start)
309 jmp near NAME(JmpGCTarget)
310
311
312 ;;
313 ;; When we arrive at this label we're at the
314 ;; guest code mapping of the switching code.
315 ;;
316ALIGNCODE(16)
317GLOBALNAME JmpGCTarget
318 DEBUG_CHAR('-')
319 ; load final cr3 and do far jump to load cs.
320 FIXUP FIX_HYPER_PAE_CR3, 1
321 mov eax, 0ffffffffh
322 mov cr3, eax
323 DEBUG_CHAR('0')
324
325 ;;
326 ;; We're in VMM MMU context and VMM CS is loaded.
327 ;; Setup the rest of the VMM state.
328 ;;
329 ; Load selectors
330 DEBUG_CHAR('1')
331 FIXUP FIX_HYPER_DS, 1
332 mov eax, 0ffffh
333 mov ds, eax
334 mov es, eax
335 ; Load pCpum into EDX
336 FIXUP FIX_GC_CPUM_OFF, 1, 0
337 mov edx, 0ffffffffh
338 ; Activate guest IDT
339 DEBUG_CHAR('2')
340 lidt [edx + CPUM.Hyper.idtr]
341
342 ; Setup stack
343 DEBUG_CHAR('3')
344 lss esp, [edx + CPUM.Hyper.esp]
345
346 ; Restore TSS selector; must mark it as not busy before using ltr (!)
347 DEBUG_CHAR('4')
348 FIXUP FIX_GC_TSS_GDTE_DW2, 2
349 and dword [0ffffffffh], ~0200h ; clear busy flag (2nd type2 bit)
350 DEBUG_CHAR('5')
351 ltr word [edx + CPUM.Hyper.tr]
352 DEBUG_CHAR('6')
353
354 ; Activate the ldt (now we can safely crash).
355 lldt [edx + CPUM.Hyper.ldtr]
356 DEBUG_CHAR('7')
357
358 ;; use flags.
359 mov esi, [edx + CPUM.fUseFlags]
360
361 ; debug registers
362 test esi, CPUM_USE_DEBUG_REGS
363 jz htg_debug_regs_guest_no
364 jmp htg_debug_regs_guest
365htg_debug_regs_guest_no:
366 DEBUG_CHAR('9')
367
368 ; General registers.
369 mov ebx, [edx + CPUM.Hyper.ebx]
370 mov ebp, [edx + CPUM.Hyper.ebp]
371 mov esi, [edx + CPUM.Hyper.esi]
372 mov edi, [edx + CPUM.Hyper.edi]
373 push dword [edx + CPUM.Hyper.eflags]
374 popfd
375 DEBUG_CHAR('!')
376
377 ;;
378 ;; Return to the VMM code which either called the switcher or
379 ;; the code set up to run by HC.
380 ;;
381%ifdef DEBUG_STUFF
382 COM32_S_PRINT ';eip='
383 mov eax, [edx + CPUM.Hyper.eip]
384 COM32_S_DWORD_REG eax
385 COM32_S_CHAR ';'
386%endif
387 mov eax, [edx + CPUM.Hyper.eip]
388%ifdef VBOX_WITH_STATISTICS
389 FIXUP FIX_GC_VM_OFF, 1, VM.StatSwitcherToGC
390 mov edx, 0ffffffffh
391 STAM32_PROFILE_ADV_STOP edx
392 FIXUP FIX_GC_CPUM_OFF, 1, 0
393 mov edx, 0ffffffffh
394%endif
395 jmp eax
396
397;;
398; Detour for saving the host DR7 and DR6.
399; esi and edx must be preserved.
400htg_debug_regs_save_dr7and6:
401DEBUG_S_CHAR('s');
402 mov eax, dr7 ; not sure, but if I read the docs right this will trap if GD is set. FIXME!!!
403 mov [edx + CPUM.Host.dr7], eax
404 xor eax, eax ; clear everything. (bit 12? is read as 1...)
405 mov dr7, eax
406 mov eax, dr6 ; just in case we save the state register too.
407 mov [edx + CPUM.Host.dr6], eax
408 jmp htg_debug_regs_no
409
410;;
411; Detour for saving host DR0-3 and loading hypervisor debug registers.
412; esi and edx must be preserved.
413htg_debug_regs_guest:
414 DEBUG_S_CHAR('D')
415 DEBUG_S_CHAR('R')
416 DEBUG_S_CHAR('x')
417 ; save host DR0-3.
418 mov eax, dr0
419 mov [edx + CPUM.Host.dr0], eax
420 mov ebx, dr1
421 mov [edx + CPUM.Host.dr1], ebx
422 mov ecx, dr2
423 mov [edx + CPUM.Host.dr2], ecx
424 mov eax, dr3
425 mov [edx + CPUM.Host.dr3], eax
426 ; load hyper DR0-7
427 mov ebx, [edx + CPUM.Hyper.dr0]
428 mov dr0, ebx
429 mov ecx, [edx + CPUM.Hyper.dr1]
430 mov dr1, ecx
431 mov eax, [edx + CPUM.Hyper.dr2]
432 mov dr2, eax
433 mov ebx, [edx + CPUM.Hyper.dr3]
434 mov dr3, ebx
435 ;mov eax, [edx + CPUM.Hyper.dr6]
436 mov ecx, 0ffff0ff0h
437 mov dr6, ecx
438 mov eax, [edx + CPUM.Hyper.dr7]
439 mov dr7, eax
440 jmp htg_debug_regs_guest_no
441
442ENDPROC vmmR0HostToGuestAsm
443
444
445;;
446; Trampoline for doing a call when starting the hyper visor execution.
447;
448; Push any arguments to the routine.
449; Push the argument frame size (cArg * 4).
450; Push the call target (_cdecl convention).
451; Push the address of this routine.
452;
453;
454ALIGNCODE(16)
455BEGINPROC vmmGCCallTrampoline
456%ifdef DEBUG_STUFF
457 COM32_S_CHAR 'c'
458 COM32_S_CHAR 't'
459 COM32_S_CHAR '!'
460%endif
461 ; Clear fs and gs.
462 xor eax, eax
463 mov gs, eax
464 mov fs, eax
465
466 ; call routine
467 pop eax ; call address
468 mov esi, edx ; save edx
469 pop edi ; argument count.
470%ifdef DEBUG_STUFF
471 COM32_S_PRINT ';eax='
472 COM32_S_DWORD_REG eax
473 COM32_S_CHAR ';'
474%endif
475 call eax ; do call
476 add esp, edi ; cleanup stack
477
478 ; return to the host context.
479 push byte 0 ; eip
480 mov edx, esi ; CPUM pointer
481
482%ifdef DEBUG_STUFF
483 COM32_S_CHAR '`'
484%endif
485 jmp NAME(VMMGCGuestToHostAsm) ; eax = returncode.
486ENDPROC vmmGCCallTrampoline
487
488
489
490;;
491; The C interface.
492;
493ALIGNCODE(16)
494BEGINPROC vmmGCGuestToHost
495%ifdef DEBUG_STUFF
496 push esi
497 COM_NEWLINE
498 DEBUG_CHAR('b')
499 DEBUG_CHAR('a')
500 DEBUG_CHAR('c')
501 DEBUG_CHAR('k')
502 DEBUG_CHAR('!')
503 COM_NEWLINE
504 pop esi
505%endif
506 mov eax, [esp + 4]
507 jmp NAME(VMMGCGuestToHostAsm)
508ENDPROC vmmGCGuestToHost
509
510
511;;
512; VMMGCGuestToHostAsmGuestCtx
513;
514; Switches from Guest Context to Host Context.
515; Of course it's only called from within the GC.
516;
517; @param eax Return code.
518; @param esp + 4 Pointer to CPUMCTXCORE.
519;
520; @remark ASSUMES interrupts disabled.
521;
522ALIGNCODE(16)
523BEGINPROC VMMGCGuestToHostAsmGuestCtx
524 DEBUG_CHAR('~')
525
526%ifdef VBOX_WITH_STATISTICS
527 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalInGC
528 mov edx, 0ffffffffh
529 STAM32_PROFILE_ADV_STOP edx
530
531 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalGCToQemu
532 mov edx, 0ffffffffh
533 STAM32_PROFILE_ADV_START edx
534
535 FIXUP FIX_GC_VM_OFF, 1, VM.StatSwitcherToHC
536 mov edx, 0ffffffffh
537 STAM32_PROFILE_ADV_START edx
538%endif
539
540 ;
541 ; Load the CPUM pointer.
542 ;
543 FIXUP FIX_GC_CPUM_OFF, 1, 0
544 mov edx, 0ffffffffh
545
546 ; Skip return address (assumes called!)
547 lea esp, [esp + 4]
548
549 ;
550 ; Guest Context (assumes CPUMCTXCORE layout).
551 ;
552 ; general purpose registers (layout is pushad)
553 pop dword [edx + CPUM.Guest.edi]
554 pop dword [edx + CPUM.Guest.esi]
555 pop dword [edx + CPUM.Guest.ebp]
556 pop dword [edx + CPUM.Guest.eax]
557 pop dword [edx + CPUM.Guest.ebx]
558 pop dword [edx + CPUM.Guest.edx]
559 pop dword [edx + CPUM.Guest.ecx]
560 pop dword [edx + CPUM.Guest.esp]
561 pop dword [edx + CPUM.Guest.ss]
562 pop dword [edx + CPUM.Guest.gs]
563 pop dword [edx + CPUM.Guest.fs]
564 pop dword [edx + CPUM.Guest.es]
565 pop dword [edx + CPUM.Guest.ds]
566 pop dword [edx + CPUM.Guest.cs]
567 ; flags
568 pop dword [edx + CPUM.Guest.eflags]
569 ; eip
570 pop dword [edx + CPUM.Guest.eip]
571 jmp vmmGCGuestToHostAsm_EIPDone
572ENDPROC VMMGCGuestToHostAsmGuestCtx
573
574
575;;
576; VMMGCGuestToHostAsmHyperCtx
577;
578; This is an alternative entry point which we'll be using
579; when the we have the hypervisor context and need to save
580; that before going to the host.
581;
582; This is typically useful when abandoning the hypervisor
583; because of a trap and want the trap state to be saved.
584;
585; @param eax Return code.
586; @param ecx Points to CPUMCTXCORE.
587; @uses eax,edx,ecx
588ALIGNCODE(16)
589BEGINPROC VMMGCGuestToHostAsmHyperCtx
590 DEBUG_CHAR('#')
591
592%ifdef VBOX_WITH_STATISTICS
593 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalInGC
594 mov edx, 0ffffffffh
595 STAM32_PROFILE_ADV_STOP edx
596
597 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalGCToQemu
598 mov edx, 0ffffffffh
599 STAM32_PROFILE_ADV_START edx
600
601 FIXUP FIX_GC_VM_OFF, 1, VM.StatSwitcherToHC
602 mov edx, 0ffffffffh
603 STAM32_PROFILE_ADV_START edx
604%endif
605
606 ;
607 ; Load the CPUM pointer.
608 ;
609 FIXUP FIX_GC_CPUM_OFF, 1, 0
610 mov edx, 0ffffffffh
611
612 push eax ; save return code.
613 ; general purpose registers
614 mov eax, [ecx + CPUMCTXCORE.edi]
615 mov [edx + CPUM.Hyper.edi], eax
616 mov eax, [ecx + CPUMCTXCORE.esi]
617 mov [edx + CPUM.Hyper.esi], eax
618 mov eax, [ecx + CPUMCTXCORE.ebp]
619 mov [edx + CPUM.Hyper.ebp], eax
620 mov eax, [ecx + CPUMCTXCORE.eax]
621 mov [edx + CPUM.Hyper.eax], eax
622 mov eax, [ecx + CPUMCTXCORE.ebx]
623 mov [edx + CPUM.Hyper.ebx], eax
624 mov eax, [ecx + CPUMCTXCORE.edx]
625 mov [edx + CPUM.Hyper.edx], eax
626 mov eax, [ecx + CPUMCTXCORE.ecx]
627 mov [edx + CPUM.Hyper.ecx], eax
628 mov eax, [ecx + CPUMCTXCORE.esp]
629 mov [edx + CPUM.Hyper.esp], eax
630 ; selectors
631 mov eax, [ecx + CPUMCTXCORE.ss]
632 mov [edx + CPUM.Hyper.ss], eax
633 mov eax, [ecx + CPUMCTXCORE.gs]
634 mov [edx + CPUM.Hyper.gs], eax
635 mov eax, [ecx + CPUMCTXCORE.fs]
636 mov [edx + CPUM.Hyper.fs], eax
637 mov eax, [ecx + CPUMCTXCORE.es]
638 mov [edx + CPUM.Hyper.es], eax
639 mov eax, [ecx + CPUMCTXCORE.ds]
640 mov [edx + CPUM.Hyper.ds], eax
641 mov eax, [ecx + CPUMCTXCORE.cs]
642 mov [edx + CPUM.Hyper.cs], eax
643 ; flags
644 mov eax, [ecx + CPUMCTXCORE.eflags]
645 mov [edx + CPUM.Hyper.eflags], eax
646 ; eip
647 mov eax, [ecx + CPUMCTXCORE.eip]
648 mov [edx + CPUM.Hyper.eip], eax
649 ; jump to common worker code.
650 pop eax ; restore return code.
651 jmp vmmGCGuestToHostAsm_SkipHyperRegs
652
653ENDPROC VMMGCGuestToHostAsmHyperCtx
654
655
656;;
657; VMMGCGuestToHostAsm
658;
659; This is an alternative entry point which we'll be using
660; when the we have saved the guest state already or we haven't
661; been messing with the guest at all.
662;
663; @param eax Return code.
664; @uses eax, edx, ecx (or it may use them in the future)
665;
666ALIGNCODE(16)
667BEGINPROC VMMGCGuestToHostAsm
668 DEBUG_CHAR('%')
669
670%ifdef VBOX_WITH_STATISTICS
671 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalInGC
672 mov edx, 0ffffffffh
673 STAM32_PROFILE_ADV_STOP edx
674
675 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalGCToQemu
676 mov edx, 0ffffffffh
677 STAM32_PROFILE_ADV_START edx
678
679 FIXUP FIX_GC_VM_OFF, 1, VM.StatSwitcherToHC
680 mov edx, 0ffffffffh
681 STAM32_PROFILE_ADV_START edx
682%endif
683
684 ;
685 ; Load the CPUM pointer.
686 ;
687 FIXUP FIX_GC_CPUM_OFF, 1, 0
688 mov edx, 0ffffffffh
689
690 pop dword [edx + CPUM.Hyper.eip] ; call return from stack
691 jmp short vmmGCGuestToHostAsm_EIPDone
692
693ALIGNCODE(16)
694vmmGCGuestToHostAsm_EIPDone:
695 ; general registers which we care about.
696 mov dword [edx + CPUM.Hyper.ebx], ebx
697 mov dword [edx + CPUM.Hyper.esi], esi
698 mov dword [edx + CPUM.Hyper.edi], edi
699 mov dword [edx + CPUM.Hyper.ebp], ebp
700 mov dword [edx + CPUM.Hyper.esp], esp
701
702 ; special registers which may change.
703vmmGCGuestToHostAsm_SkipHyperRegs:
704 ; str [edx + CPUM.Hyper.tr] - double fault only, and it won't be right then either.
705 sldt [edx + CPUM.Hyper.ldtr]
706
707 ; No need to save CRx here. They are set dynamically according to Guest/Host requirements.
708 ; FPU context is saved before restore of host saving (another) branch.
709
710
711 ;;
712 ;; Load Intermediate memory context.
713 ;;
714 mov edi, eax ; save return code in EDI (careful with COM_DWORD_REG from here on!)
715 FIXUP FIX_INTER_PAE_CR3, 1
716 mov eax, 0ffffffffh
717 mov cr3, eax
718 DEBUG_CHAR('?')
719
720 ;; We're now in intermediate memory context!
721
722 ;;
723 ;; 0. Jump to identity mapped location
724 ;;
725 FIXUP FIX_GC_2_ID_NEAR_REL, 1, NAME(IDExitTarget) - NAME(Start)
726 jmp near NAME(IDExitTarget)
727
728 ; We're now on identity mapped pages!
729ALIGNCODE(16)
730GLOBALNAME IDExitTarget
731 DEBUG_CHAR('1')
732
733 ; 1. Disable paging.
734 mov ebx, cr0
735 and ebx, ~X86_CR0_PG
736 mov cr0, ebx
737 DEBUG_CHAR('2')
738
739 ; 2. Enable PAE - already enabled.
740
741 ; 3. Load long mode intermediate CR3.
742 FIXUP FIX_INTER_AMD64_CR3, 1
743 mov ecx, 0ffffffffh
744 mov cr3, ecx
745 DEBUG_CHAR('3')
746
747 ; 4. Enable long mode.
748 mov ebp, edx
749 mov ecx, MSR_K6_EFER
750 rdmsr
751 or eax, MSR_K6_EFER_LME
752 wrmsr
753 mov edx, ebp
754 DEBUG_CHAR('4')
755
756 ; 5. Enable paging.
757 or ebx, X86_CR0_PG
758 mov cr0, ebx
759 DEBUG_CHAR('5')
760
761 ; Jump from compatability mode to 64-bit mode.
762 FIXUP FIX_ID_FAR32_TO_64BIT_MODE, 1, NAME(IDExit64Mode) - NAME(Start)
763 jmp 0ffffh:0fffffffeh
764
765 ;
766 ; We're in 64-bit mode (ds, ss, es, fs, gs are all bogus).
767 ; Move on to the HC mapping.
768 ;
769BITS 64
770ALIGNCODE(16)
771NAME(IDExit64Mode):
772 DEBUG_CHAR('6')
773 jmp [NAME(pHCExitTarget) wrt rip]
774
775; 64-bit jump target
776NAME(pHCExitTarget):
777FIXUP FIX_HC_64BIT, 0, NAME(HCExitTarget) - NAME(Start)
778dq 0ffffffffffffffffh
779
780; 64-bit pCpum address.
781NAME(pCpumHC):
782FIXUP FIX_HC_64BIT_CPUM, 0
783dq 0ffffffffffffffffh
784
785 ;
786 ; When we arrive here we're at the host context
787 ; mapping of the switcher code.
788 ;
789ALIGNCODE(16)
790GLOBALNAME HCExitTarget
791 DEBUG_CHAR('9')
792
793 ; load final cr3
794 mov rsi, [rdx + CPUM.Host.cr3]
795 mov cr3, rsi
796 DEBUG_CHAR('@')
797
798 ;;
799 ;; Restore Host context.
800 ;;
801 ; Load CPUM pointer into edx
802 mov rdx, [NAME(pCpumHC) wrt rip]
803 ; activate host gdt and idt
804 lgdt [rdx + CPUM.Host.gdtr]
805 DEBUG_CHAR('0')
806 lidt [rdx + CPUM.Host.idtr]
807 DEBUG_CHAR('1')
808 ; Restore TSS selector; must mark it as not busy before using ltr (!)
809%if 1 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p)
810 movzx eax, word [rdx + CPUM.Host.tr] ; eax <- TR
811 and al, 0F8h ; mask away TI and RPL bits, get descriptor offset.
812 add rax, [rdx + CPUM.Host.gdtr + 2] ; eax <- GDTR.address + descriptor offset.
813 and dword [rax + 4], ~0200h ; clear busy flag (2nd type2 bit)
814 ltr word [rdx + CPUM.Host.tr]
815%else
816 movzx eax, word [rdx + CPUM.Host.tr] ; eax <- TR
817 and al, 0F8h ; mask away TI and RPL bits, get descriptor offset.
818 add rax, [rdx + CPUM.Host.gdtr + 2] ; eax <- GDTR.address + descriptor offset.
819 mov ecx, [rax + 4] ; ecx <- 2nd descriptor dword
820 mov ebx, ecx ; save orginal value
821 and ecx, ~0200h ; clear busy flag (2nd type2 bit)
822 mov [rax + 4], ccx ; not using xchg here is paranoia..
823 ltr word [rdx + CPUM.Host.tr]
824 xchg [rax + 4], ebx ; using xchg is paranoia too...
825%endif
826 ; activate ldt
827 DEBUG_CHAR('2')
828 lldt [rdx + CPUM.Host.ldtr]
829 ; Restore segment registers
830 mov eax, [rdx + CPUM.Host.ds]
831 mov ds, eax
832 mov eax, [rdx + CPUM.Host.es]
833 mov es, eax
834 mov eax, [rdx + CPUM.Host.fs]
835 mov fs, eax
836 mov eax, [rdx + CPUM.Host.gs]
837 mov gs, eax
838 ; restore stack
839 mov eax, [rdx + CPUM.Host.ss]
840 mov ss, eax
841 mov rsp, [rdx + CPUM.Host.rsp]
842
843 FIXUP FIX_NO_SYSENTER_JMP, 0, gth_sysenter_no - NAME(Start) ; this will insert a jmp gth_sysenter_no if host doesn't use sysenter.
844 ; restore MSR_IA32_SYSENTER_CS register.
845 mov ecx, MSR_IA32_SYSENTER_CS
846 mov eax, [rdx + CPUM.Host.SysEnter.cs]
847 mov ebx, [rdx + CPUM.Host.SysEnter.cs + 4]
848 mov rbx, rdx ; save/load edx
849 wrmsr ; MSR[ecx] <- edx:eax
850 mov rdx, rbx ; restore edx
851 jmp short gth_sysenter_no
852
853ALIGNCODE(16)
854gth_sysenter_no:
855
856 ;; @todo AMD syscall
857
858 ; Restore FPU if guest has used it.
859 ; Using fxrstor should ensure that we're not causing unwanted exception on the host.
860 mov esi, [rdx + CPUM.fUseFlags] ; esi == use flags.
861 test esi, CPUM_USED_FPU
862 jz short gth_fpu_no
863 mov rcx, cr0
864 and rcx, ~(X86_CR0_TS | X86_CR0_EM)
865 mov cr0, rcx
866
867 fxsave [rdx + CPUM.Guest.fpu]
868 fxrstor [rdx + CPUM.Host.fpu]
869 jmp short gth_fpu_no
870
871ALIGNCODE(16)
872gth_fpu_no:
873
874 ; Control registers.
875 ; Would've liked to have these highere up in case of crashes, but
876 ; the fpu stuff must be done before we restore cr0.
877 mov rcx, [rdx + CPUM.Host.cr4]
878 mov cr4, rcx
879 mov rcx, [rdx + CPUM.Host.cr0]
880 mov cr0, rcx
881 ;mov rcx, [rdx + CPUM.Host.cr2] ; assumes this is waste of time.
882 ;mov cr2, rcx
883
884 ; restore debug registers (if modified) (esi must still be fUseFlags!)
885 ; (must be done after cr4 reload because of the debug extension.)
886 test esi, CPUM_USE_DEBUG_REGS | CPUM_USE_DEBUG_REGS_HOST
887 jz short gth_debug_regs_no
888 jmp gth_debug_regs_restore
889gth_debug_regs_no:
890
891 ; Restore MSRs
892 mov rbx, rdx
893 mov ecx, MSR_K8_FS_BASE
894 mov eax, [ebx + CPUM.Host.FSbase]
895 mov edx, [ebx + CPUM.Host.FSbase + 4]
896 wrmsr
897 mov ecx, MSR_K8_GS_BASE
898 mov eax, [ebx + CPUM.Host.GSbase]
899 mov edx, [ebx + CPUM.Host.GSbase + 4]
900 wrmsr
901 mov ecx, MSR_K6_EFER
902 mov eax, [ebx + CPUM.Host.efer]
903 mov edx, [ebx + CPUM.Host.efer + 4]
904 wrmsr
905 mov rdx, rbx
906
907
908 ; restore general registers.
909 mov eax, edi ; restore return code. eax = return code !!
910 ; mov rax, [edx + CPUM.Host.rax] - scratch + return code
911 mov rbx, [rdx + CPUM.Host.rbx]
912 ; mov rcx, [edx + CPUM.Host.rcx] - scratch
913 ; mov rdx, [edx + CPUM.Host.rdx] - scratch
914 mov rdi, [rdx + CPUM.Host.rdi]
915 mov rsi, [rdx + CPUM.Host.rsi]
916 mov rsp, [rdx + CPUM.Host.rsp]
917 mov rbp, [rdx + CPUM.Host.rbp]
918 ; mov r8, [edx + CPUM.Host.r8 ] - scratch
919 ; mov r9, [edx + CPUM.Host.r9 ] - scratch
920 mov r10, [rdx + CPUM.Host.r10]
921 mov r11, [rdx + CPUM.Host.r11]
922 mov r12, [rdx + CPUM.Host.r12]
923 mov r13, [rdx + CPUM.Host.r13]
924 mov r14, [rdx + CPUM.Host.r14]
925 mov r15, [rdx + CPUM.Host.r15]
926
927 ; finally restore flags. (probably not required)
928 push qword [edx + CPUM.Host.rflags]
929 popf
930
931
932%ifdef DEBUG_STUFF
933 COM64_S_CHAR '4'
934%endif
935 db 048h
936 retf
937
938;;
939; Detour for restoring the host debug registers.
940; edx and edi must be preserved.
941gth_debug_regs_restore:
942 DEBUG_S_CHAR('d')
943 xor eax, eax
944 mov dr7, rax ; paranoia or not?
945 test esi, CPUM_USE_DEBUG_REGS
946 jz short gth_debug_regs_dr7
947 DEBUG_S_CHAR('r')
948 mov rax, [rdx + CPUM.Host.dr0]
949 mov dr0, rax
950 mov rbx, [rdx + CPUM.Host.dr1]
951 mov dr1, rbx
952 mov rcx, [rdx + CPUM.Host.dr2]
953 mov dr2, rcx
954 mov rax, [rdx + CPUM.Host.dr3]
955 mov dr3, rax
956gth_debug_regs_dr7:
957 mov rbx, [rdx + CPUM.Host.dr6]
958 mov dr6, rbx
959 mov rcx, [rdx + CPUM.Host.dr7]
960 mov dr7, rcx
961 jmp gth_debug_regs_no
962
963ENDPROC VMMGCGuestToHostAsm
964
965
966GLOBALNAME End
967;
968; The description string (in the text section).
969;
970NAME(Description):
971 db "AMD64 to/from PAE", 0
972
973extern NAME(Relocate)
974
975;
976; End the fixup records.
977;
978BEGINDATA
979 db FIX_THE_END ; final entry.
980GLOBALNAME FixupsEnd
981
982;;
983; The switcher definition structure.
984ALIGNDATA(16)
985GLOBALNAME Def
986 istruc VMMSWITCHERDEF
987 at VMMSWITCHERDEF.pvCode, RTCCPTR_DEF NAME(Start)
988 at VMMSWITCHERDEF.pvFixups, RTCCPTR_DEF NAME(Fixups)
989 at VMMSWITCHERDEF.pszDesc, RTCCPTR_DEF NAME(Description)
990 at VMMSWITCHERDEF.pfnRelocate, RTCCPTR_DEF NAME(Relocate)
991 at VMMSWITCHERDEF.enmType, dd VMMSWITCHER_AMD64_TO_PAE
992 at VMMSWITCHERDEF.cbCode, dd NAME(End) - NAME(Start)
993 at VMMSWITCHERDEF.offR0HostToGuest, dd NAME(vmmR0HostToGuest) - NAME(Start)
994 at VMMSWITCHERDEF.offGCGuestToHost, dd NAME(vmmGCGuestToHost) - NAME(Start)
995 at VMMSWITCHERDEF.offGCCallTrampoline, dd NAME(vmmGCCallTrampoline) - NAME(Start)
996 at VMMSWITCHERDEF.offGCGuestToHostAsm, dd NAME(VMMGCGuestToHostAsm) - NAME(Start)
997 at VMMSWITCHERDEF.offGCGuestToHostAsmHyperCtx, dd NAME(VMMGCGuestToHostAsmHyperCtx)- NAME(Start)
998 at VMMSWITCHERDEF.offGCGuestToHostAsmGuestCtx, dd NAME(VMMGCGuestToHostAsmGuestCtx)- NAME(Start)
999 ; disasm help
1000 at VMMSWITCHERDEF.offHCCode0, dd 0
1001 at VMMSWITCHERDEF.cbHCCode0, dd NAME(IDEnterTarget) - NAME(Start)
1002 at VMMSWITCHERDEF.offHCCode1, dd NAME(HCExitTarget) - NAME(Start)
1003 at VMMSWITCHERDEF.cbHCCode1, dd NAME(End) - NAME(HCExitTarget)
1004 at VMMSWITCHERDEF.offIDCode0, dd NAME(IDEnterTarget) - NAME(Start)
1005 at VMMSWITCHERDEF.cbIDCode0, dd NAME(JmpGCTarget) - NAME(IDEnterTarget)
1006 at VMMSWITCHERDEF.offIDCode1, dd NAME(IDExitTarget) - NAME(Start)
1007 at VMMSWITCHERDEF.cbIDCode1, dd NAME(HCExitTarget) - NAME(IDExitTarget)
1008 at VMMSWITCHERDEF.offGCCode, dd NAME(JmpGCTarget) - NAME(Start)
1009 at VMMSWITCHERDEF.cbGCCode, dd NAME(IDExitTarget) - NAME(JmpGCTarget)
1010
1011 iend
1012
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette