VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMSwitcher/AMD64ToPAE.asm@ 490

Last change on this file since 490 was 464, checked in by vboxsync, 18 years ago

Renamed CR4 masks

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 30.1 KB
Line 
1; $Id: AMD64ToPAE.asm 464 2007-01-31 14:58:15Z vboxsync $
2;; @file
3; VMM - World Switchers, AMD64 to PAE.
4;
5
6;
7; Copyright (C) 2006 InnoTek Systemberatung GmbH
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.virtualbox.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License as published by the Free Software Foundation,
13; in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
14; distribution. VirtualBox OSE is distributed in the hope that it will
15; be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17; If you received this file as part of a commercial VirtualBox
18; distribution, then only the terms of your commercial VirtualBox
19; license agreement apply instead of the previous paragraph.
20;
21
22;%define DEBUG_STUFF 1
23
24;*******************************************************************************
25;* Defined Constants And Macros *
26;*******************************************************************************
27;; Prefix all names.
28%define NAME_OVERLOAD(name) vmmR3SwitcherAMD64ToPAE_ %+ name
29
30
31;*******************************************************************************
32;* Header Files *
33;*******************************************************************************
34%include "VBox/asmdefs.mac"
35%include "VBox/x86.mac"
36%include "VBox/cpum.mac"
37%include "VBox/stam.mac"
38%include "VBox/vm.mac"
39%include "CPUMInternal.mac"
40%include "VMMSwitcher/VMMSwitcher.mac"
41
42
43;
44; Start the fixup records
45; We collect the fixups in the .data section as we go along
46; It is therefore VITAL that no-one is using the .data section
47; for anything else between 'Start' and 'End'.
48;
49BEGINDATA
50GLOBALNAME Fixups
51
52
53
54BEGINCODE
55GLOBALNAME Start
56
57BITS 64
58
59;;
60; The C interface.
61;
62; @param pVM GCC: rdi MSC:rcx The VM handle.
63;
64BEGINPROC vmmR0HostToGuest
65%ifdef DEBUG_STUFF
66 COM64_S_NEWLINE
67 COM64_S_CHAR '^'
68%endif
69
70 ;
71 ; make r9 = pVM and rdx = pCpum.
72 ; rax, rcx and r8 are scratch here after.
73%ifdef __WIN64__
74 mov r9, rcx
75%else
76 mov r9, rdi
77%endif
78 lea rdx, [r9 + VM.cpum]
79
80%ifdef VBOX_WITH_STATISTICS
81 ;
82 ; Switcher stats.
83 ;
84 lea r8, [r9 + VM.StatSwitcherToGC]
85 STAM64_PROFILE_ADV_START r8
86%endif
87
88 ;
89 ; Call worker (far return).
90 ;
91 mov eax, cs
92 push rax
93 call NAME(vmmR0HostToGuestAsm)
94
95%ifdef VBOX_WITH_STATISTICS
96 ;
97 ; Switcher stats.
98 ;
99 lea r8, [r9 + VM.StatSwitcherToGC]
100 STAM64_PROFILE_ADV_STOP r8
101%endif
102
103 ret
104ENDPROC vmmR0HostToGuest
105
106
107
108; *****************************************************************************
109; vmmR0HostToGuestAsm
110;
111; Phase one of the switch from host to guest context (host MMU context)
112;
113; INPUT:
114; - edx virtual address of CPUM structure (valid in host context)
115;
116; USES/DESTROYS:
117; - eax, ecx, edx
118;
119; ASSUMPTION:
120; - current CS and DS selectors are wide open
121;
122; *****************************************************************************
123ALIGNCODE(16)
124BEGINPROC vmmR0HostToGuestAsm
125 ;;
126 ;; Save CPU host context
127 ;; Skip eax, edx and ecx as these are not preserved over calls.
128 ;;
129 ; general registers.
130 ; mov [rdx + CPUM.Host.rax], rax - scratch
131 mov [rdx + CPUM.Host.rbx], rbx
132 ; mov [rdx + CPUM.Host.rcx], rcx - scratch
133 ; mov [rdx + CPUM.Host.rdx], rdx - scratch
134 mov [rdx + CPUM.Host.rdi], rdi
135 mov [rdx + CPUM.Host.rsi], rsi
136 mov [rdx + CPUM.Host.rsp], rsp
137 mov [rdx + CPUM.Host.rbp], rbp
138 ; mov [rdx + CPUM.Host.r8 ], r8 - scratch
139 ; mov [rdx + CPUM.Host.r9 ], r9 - scratch
140 mov [rdx + CPUM.Host.r10], r10
141 mov [rdx + CPUM.Host.r11], r11
142 mov [rdx + CPUM.Host.r12], r12
143 mov [rdx + CPUM.Host.r13], r13
144 mov [rdx + CPUM.Host.r14], r14
145 mov [rdx + CPUM.Host.r15], r15
146 ; selectors.
147 mov [rdx + CPUM.Host.ds], ds
148 mov [rdx + CPUM.Host.es], es
149 mov [rdx + CPUM.Host.fs], fs
150 mov [rdx + CPUM.Host.gs], gs
151 mov [rdx + CPUM.Host.ss], ss
152 ; MSRs
153 mov rbx, rdx
154 mov ecx, MSR_K8_FS_BASE
155 rdmsr
156 mov [rbx + CPUM.Host.FSbase], eax
157 mov [rbx + CPUM.Host.FSbase + 4], edx
158 mov ecx, MSR_K8_GS_BASE
159 rdmsr
160 mov [rbx + CPUM.Host.GSbase], eax
161 mov [rbx + CPUM.Host.GSbase + 4], edx
162 mov ecx, MSR_K6_EFER
163 rdmsr
164 mov [rbx + CPUM.Host.efer], eax
165 mov [rbx + CPUM.Host.efer + 4], edx
166 mov ecx, MSR_K6_EFER
167 mov rdx, rbx
168 ; special registers.
169 sldt [rdx + CPUM.Host.ldtr]
170 sidt [rdx + CPUM.Host.idtr]
171 sgdt [rdx + CPUM.Host.gdtr]
172 str [rdx + CPUM.Host.tr] ; yasm BUG, generates sldt. YASMCHECK!
173 ; flags
174 pushf
175 pop qword [rdx + CPUM.Host.rflags]
176
177 FIXUP FIX_NO_SYSENTER_JMP, 0, htg_no_sysenter - NAME(Start) ; this will insert a jmp htg_no_sysenter if host doesn't use sysenter.
178 ; save MSR_IA32_SYSENTER_CS register.
179 mov ecx, MSR_IA32_SYSENTER_CS
180 mov rbx, rdx ; save edx
181 rdmsr ; edx:eax <- MSR[ecx]
182 mov [rbx + CPUM.Host.SysEnter.cs], rax
183 mov [rbx + CPUM.Host.SysEnter.cs + 4], rdx
184 xor rax, rax ; load 0:0 to cause #GP upon sysenter
185 xor rdx, rdx
186 wrmsr
187 mov rdx, rbx ; restore edx
188 jmp short htg_no_sysenter
189
190ALIGNCODE(16)
191htg_no_sysenter:
192
193 ;; handle use flags.
194 mov esi, [rdx + CPUM.fUseFlags] ; esi == use flags.
195 and esi, ~CPUM_USED_FPU ; Clear CPUM_USED_* flags. ;;@todo FPU check can be optimized to use cr0 flags!
196 mov [rdx + CPUM.fUseFlags], esi
197
198 ; debug registers.
199 test esi, CPUM_USE_DEBUG_REGS | CPUM_USE_DEBUG_REGS_HOST
200 jz htg_debug_regs_no
201 jmp htg_debug_regs_save
202htg_debug_regs_no:
203 DEBUG_CHAR('a') ; trashes esi
204
205 ; control registers.
206 mov rax, cr0
207 mov [rdx + CPUM.Host.cr0], rax
208 ;mov rax, cr2 ; assume host os don't suff things in cr2. (safe)
209 ;mov [rdx + CPUM.Host.cr2], rax
210 mov rax, cr3
211 mov [rdx + CPUM.Host.cr3], rax
212 mov rax, cr4
213 mov [rdx + CPUM.Host.cr4], rax
214
215 ;;
216 ;; Start switching to VMM context.
217 ;;
218
219 ;
220 ; Change CR0 and CR4 so we can correctly emulate FPU/MMX/SSE[23] exceptions
221 ; Also disable WP. (eax==cr4 now)
222 ; Note! X86_CR4_PSE and X86_CR4_PAE are important if the host thinks so :-)
223 ;
224 and rax, X86_CR4_MCE | X86_CR4_PSE | X86_CR4_PAE
225 mov ecx, [rdx + CPUM.Guest.cr4]
226 DEBUG_CHAR('b') ; trashes esi
227 ;; @todo Switcher cleanup: Determin base CR4 during CPUMR0Init / VMMR3SelectSwitcher putting it
228 ; in CPUM.Hyper.cr4 (which isn't currently being used). That should
229 ; simplify this operation a bit (and improve locality of the data).
230
231 ;
232 ; CR4.AndMask and CR4.OrMask are set in CPUMR3Init based on the presence of
233 ; FXSAVE support on the host CPU
234 ;
235 and ecx, [rdx + CPUM.CR4.AndMask]
236 or eax, ecx
237 or eax, [rdx + CPUM.CR4.OrMask]
238 mov cr4, rax
239 DEBUG_CHAR('c') ; trashes esi
240
241 mov eax, [rdx + CPUM.Guest.cr0]
242 and eax, X86_CR0_EM
243 or eax, X86_CR0_PE | X86_CR0_PG | X86_CR0_TS | X86_CR0_ET | X86_CR0_NE | X86_CR0_MP
244 mov cr0, rax
245 DEBUG_CHAR('0') ; trashes esi
246
247
248 ; Load new gdt so we can do far jump to guest code after cr3 reload.
249 lgdt [rdx + CPUM.Hyper.gdtr]
250 DEBUG_CHAR('1') ; trashes esi
251
252 ;;
253 ;; Load Intermediate memory context.
254 ;;
255 FIXUP FIX_INTER_AMD64_CR3, 1
256 mov eax, 0ffffffffh
257 mov cr3, rax
258 DEBUG_CHAR('2') ; trashes esi
259
260 ;;
261 ;; 1. Switch to compatibility mode, placing ourselves in identity mapped code.
262 ;;
263 jmp far [NAME(fpIDEnterTarget) wrt rip]
264
265; 16:32 Pointer to IDEnterTarget.
266NAME(fpIDEnterTarget):
267 FIXUP FIX_ID_32BIT, 0, NAME(IDEnterTarget) - NAME(Start)
268dd 0
269 FIXUP FIX_HYPER_CS, 0
270dd 0
271
272
273;;
274; Detour for saving the host DR7 and DR6.
275; esi and rdx must be preserved.
276htg_debug_regs_save:
277DEBUG_S_CHAR('s');
278 mov rax, dr7 ; not sure, but if I read the docs right this will trap if GD is set. FIXME!!!
279 mov [rdx + CPUM.Host.dr7], rax
280 xor eax, eax ; clear everything. (bit 12? is read as 1...)
281 mov dr7, rax
282 mov rax, dr6 ; just in case we save the state register too.
283 mov [rdx + CPUM.Host.dr6], rax
284 ; save host DR0-3?
285 test esi, CPUM_USE_DEBUG_REGS
286 jz near htg_debug_regs_no
287DEBUG_S_CHAR('S');
288 mov rax, dr0
289 mov [rdx + CPUM.Host.dr0], rax
290 mov rbx, dr1
291 mov [rdx + CPUM.Host.dr1], rbx
292 mov rcx, dr2
293 mov [rdx + CPUM.Host.dr2], rcx
294 mov rax, dr3
295 mov [rdx + CPUM.Host.dr3], rax
296 jmp htg_debug_regs_no
297
298
299 ; We're now on an identity mapped pages! in 32-bit compatability mode.
300BITS 32
301ALIGNCODE(16)
302GLOBALNAME IDEnterTarget
303 DEBUG_CHAR('3')
304
305 ; 2. Deactivate long mode by turning off paging.
306 mov ebx, cr0
307 and ebx, ~X86_CR0_PG
308 mov cr0, ebx
309 DEBUG_CHAR('4')
310
311 ; 3. Load 32-bit intermediate page table.
312 FIXUP FIX_INTER_PAE_CR3, 1
313 mov edx, 0ffffffffh
314 mov cr3, edx
315
316 ; 4. Disable long mode.
317 ; We also use the chance to disable syscall/sysret and fast fxsave/fxrstor.
318 mov ecx, MSR_K6_EFER
319 rdmsr
320 DEBUG_CHAR('5')
321 and eax, ~(MSR_K6_EFER_LME | MSR_K6_EFER_SCE | MSR_K6_EFER_FFXSR)
322 wrmsr
323 DEBUG_CHAR('6')
324
325 ; 5. Enable paging.
326 or ebx, X86_CR0_PG
327 mov cr0, ebx
328 jmp short just_a_jump
329just_a_jump:
330 DEBUG_CHAR('7')
331
332 ;;
333 ;; 6. Jump to guest code mapping of the code and load the Hypervisor CS.
334 ;;
335 FIXUP FIX_ID_2_GC_NEAR_REL, 1, NAME(JmpGCTarget) - NAME(Start)
336 jmp near NAME(JmpGCTarget)
337
338
339 ;;
340 ;; When we arrive at this label we're at the
341 ;; guest code mapping of the switching code.
342 ;;
343ALIGNCODE(16)
344GLOBALNAME JmpGCTarget
345 DEBUG_CHAR('-')
346 ; load final cr3 and do far jump to load cs.
347 FIXUP FIX_HYPER_PAE_CR3, 1
348 mov eax, 0ffffffffh
349 mov cr3, eax
350 DEBUG_CHAR('0')
351
352 ;;
353 ;; We're in VMM MMU context and VMM CS is loaded.
354 ;; Setup the rest of the VMM state.
355 ;;
356 ; Load selectors
357 DEBUG_CHAR('1')
358 FIXUP FIX_HYPER_DS, 1
359 mov eax, 0ffffh
360 mov ds, eax
361 mov es, eax
362 ; Load pCpum into EDX
363 FIXUP FIX_GC_CPUM_OFF, 1, 0
364 mov edx, 0ffffffffh
365 ; Activate guest IDT
366 DEBUG_CHAR('2')
367 lidt [edx + CPUM.Hyper.idtr]
368
369 ; Setup stack
370 DEBUG_CHAR('3')
371 lss esp, [edx + CPUM.Hyper.esp]
372
373 ; Restore TSS selector; must mark it as not busy before using ltr (!)
374 DEBUG_CHAR('4')
375 FIXUP FIX_GC_TSS_GDTE_DW2, 2
376 and dword [0ffffffffh], ~0200h ; clear busy flag (2nd type2 bit)
377 DEBUG_CHAR('5')
378 ltr word [edx + CPUM.Hyper.tr]
379 DEBUG_CHAR('6')
380
381 ; Activate the ldt (now we can safely crash).
382 lldt [edx + CPUM.Hyper.ldtr]
383 DEBUG_CHAR('7')
384
385 ;; use flags.
386 mov esi, [edx + CPUM.fUseFlags]
387
388 ; debug registers
389 test esi, CPUM_USE_DEBUG_REGS
390 jz htg_debug_regs_guest_no
391 jmp htg_debug_regs_guest
392htg_debug_regs_guest_no:
393 DEBUG_CHAR('9')
394
395 ; General registers.
396 mov ebx, [edx + CPUM.Hyper.ebx]
397 mov ebp, [edx + CPUM.Hyper.ebp]
398 mov esi, [edx + CPUM.Hyper.esi]
399 mov edi, [edx + CPUM.Hyper.edi]
400 push dword [edx + CPUM.Hyper.eflags]
401 popfd
402 DEBUG_CHAR('!')
403
404 ;;
405 ;; Return to the VMM code which either called the switcher or
406 ;; the code set up to run by HC.
407 ;;
408%ifdef DEBUG_STUFF
409 COM32_S_PRINT ';eip='
410 mov eax, [edx + CPUM.Hyper.eip]
411 COM32_S_DWORD_REG eax
412 COM32_S_CHAR ';'
413%endif
414 mov eax, [edx + CPUM.Hyper.eip]
415%ifdef VBOX_WITH_STATISTICS
416 FIXUP FIX_GC_VM_OFF, 1, VM.StatSwitcherToGC
417 mov edx, 0ffffffffh
418 STAM32_PROFILE_ADV_STOP edx
419 FIXUP FIX_GC_CPUM_OFF, 1, 0
420 mov edx, 0ffffffffh
421%endif
422 jmp eax
423
424;;
425; Detour for saving host DR0-3 and loading hypervisor debug registers.
426; esi and edx must be preserved.
427htg_debug_regs_guest:
428 DEBUG_S_CHAR('D')
429 DEBUG_S_CHAR('R')
430 DEBUG_S_CHAR('x')
431 ; load hyper DR0-7
432 mov ebx, [edx + CPUM.Hyper.dr0]
433 mov dr0, ebx
434 mov ecx, [edx + CPUM.Hyper.dr1]
435 mov dr1, ecx
436 mov eax, [edx + CPUM.Hyper.dr2]
437 mov dr2, eax
438 mov ebx, [edx + CPUM.Hyper.dr3]
439 mov dr3, ebx
440 ;mov eax, [edx + CPUM.Hyper.dr6]
441 mov ecx, 0ffff0ff0h
442 mov dr6, ecx
443 mov eax, [edx + CPUM.Hyper.dr7]
444 mov dr7, eax
445 jmp htg_debug_regs_guest_no
446
447ENDPROC vmmR0HostToGuestAsm
448
449
450;;
451; Trampoline for doing a call when starting the hyper visor execution.
452;
453; Push any arguments to the routine.
454; Push the argument frame size (cArg * 4).
455; Push the call target (_cdecl convention).
456; Push the address of this routine.
457;
458;
459ALIGNCODE(16)
460BEGINPROC vmmGCCallTrampoline
461%ifdef DEBUG_STUFF
462 COM32_S_CHAR 'c'
463 COM32_S_CHAR 't'
464 COM32_S_CHAR '!'
465%endif
466 ; Clear fs and gs.
467 xor eax, eax
468 mov gs, eax
469 mov fs, eax
470
471 ; call routine
472 pop eax ; call address
473 mov esi, edx ; save edx
474 pop edi ; argument count.
475%ifdef DEBUG_STUFF
476 COM32_S_PRINT ';eax='
477 COM32_S_DWORD_REG eax
478 COM32_S_CHAR ';'
479%endif
480 call eax ; do call
481 add esp, edi ; cleanup stack
482
483 ; return to the host context.
484 push byte 0 ; eip
485 mov edx, esi ; CPUM pointer
486
487%ifdef DEBUG_STUFF
488 COM32_S_CHAR '`'
489%endif
490 jmp NAME(VMMGCGuestToHostAsm) ; eax = returncode.
491ENDPROC vmmGCCallTrampoline
492
493
494
495;;
496; The C interface.
497;
498ALIGNCODE(16)
499BEGINPROC vmmGCGuestToHost
500%ifdef DEBUG_STUFF
501 push esi
502 COM_NEWLINE
503 DEBUG_CHAR('b')
504 DEBUG_CHAR('a')
505 DEBUG_CHAR('c')
506 DEBUG_CHAR('k')
507 DEBUG_CHAR('!')
508 COM_NEWLINE
509 pop esi
510%endif
511 mov eax, [esp + 4]
512 jmp NAME(VMMGCGuestToHostAsm)
513ENDPROC vmmGCGuestToHost
514
515
516;;
517; VMMGCGuestToHostAsmGuestCtx
518;
519; Switches from Guest Context to Host Context.
520; Of course it's only called from within the GC.
521;
522; @param eax Return code.
523; @param esp + 4 Pointer to CPUMCTXCORE.
524;
525; @remark ASSUMES interrupts disabled.
526;
527ALIGNCODE(16)
528BEGINPROC VMMGCGuestToHostAsmGuestCtx
529 DEBUG_CHAR('~')
530
531%ifdef VBOX_WITH_STATISTICS
532 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalInGC
533 mov edx, 0ffffffffh
534 STAM32_PROFILE_ADV_STOP edx
535
536 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalGCToQemu
537 mov edx, 0ffffffffh
538 STAM32_PROFILE_ADV_START edx
539
540 FIXUP FIX_GC_VM_OFF, 1, VM.StatSwitcherToHC
541 mov edx, 0ffffffffh
542 STAM32_PROFILE_ADV_START edx
543%endif
544
545 ;
546 ; Load the CPUM pointer.
547 ;
548 FIXUP FIX_GC_CPUM_OFF, 1, 0
549 mov edx, 0ffffffffh
550
551 ; Skip return address (assumes called!)
552 lea esp, [esp + 4]
553
554 ;
555 ; Guest Context (assumes CPUMCTXCORE layout).
556 ;
557 ; general purpose registers (layout is pushad)
558 pop dword [edx + CPUM.Guest.edi]
559 pop dword [edx + CPUM.Guest.esi]
560 pop dword [edx + CPUM.Guest.ebp]
561 pop dword [edx + CPUM.Guest.eax]
562 pop dword [edx + CPUM.Guest.ebx]
563 pop dword [edx + CPUM.Guest.edx]
564 pop dword [edx + CPUM.Guest.ecx]
565 pop dword [edx + CPUM.Guest.esp]
566 pop dword [edx + CPUM.Guest.ss]
567 pop dword [edx + CPUM.Guest.gs]
568 pop dword [edx + CPUM.Guest.fs]
569 pop dword [edx + CPUM.Guest.es]
570 pop dword [edx + CPUM.Guest.ds]
571 pop dword [edx + CPUM.Guest.cs]
572 ; flags
573 pop dword [edx + CPUM.Guest.eflags]
574 ; eip
575 pop dword [edx + CPUM.Guest.eip]
576 jmp vmmGCGuestToHostAsm_EIPDone
577ENDPROC VMMGCGuestToHostAsmGuestCtx
578
579
580;;
581; VMMGCGuestToHostAsmHyperCtx
582;
583; This is an alternative entry point which we'll be using
584; when the we have the hypervisor context and need to save
585; that before going to the host.
586;
587; This is typically useful when abandoning the hypervisor
588; because of a trap and want the trap state to be saved.
589;
590; @param eax Return code.
591; @param ecx Points to CPUMCTXCORE.
592; @uses eax,edx,ecx
593ALIGNCODE(16)
594BEGINPROC VMMGCGuestToHostAsmHyperCtx
595 DEBUG_CHAR('#')
596
597%ifdef VBOX_WITH_STATISTICS
598 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalInGC
599 mov edx, 0ffffffffh
600 STAM32_PROFILE_ADV_STOP edx
601
602 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalGCToQemu
603 mov edx, 0ffffffffh
604 STAM32_PROFILE_ADV_START edx
605
606 FIXUP FIX_GC_VM_OFF, 1, VM.StatSwitcherToHC
607 mov edx, 0ffffffffh
608 STAM32_PROFILE_ADV_START edx
609%endif
610
611 ;
612 ; Load the CPUM pointer.
613 ;
614 FIXUP FIX_GC_CPUM_OFF, 1, 0
615 mov edx, 0ffffffffh
616
617 push eax ; save return code.
618 ; general purpose registers
619 mov eax, [ecx + CPUMCTXCORE.edi]
620 mov [edx + CPUM.Hyper.edi], eax
621 mov eax, [ecx + CPUMCTXCORE.esi]
622 mov [edx + CPUM.Hyper.esi], eax
623 mov eax, [ecx + CPUMCTXCORE.ebp]
624 mov [edx + CPUM.Hyper.ebp], eax
625 mov eax, [ecx + CPUMCTXCORE.eax]
626 mov [edx + CPUM.Hyper.eax], eax
627 mov eax, [ecx + CPUMCTXCORE.ebx]
628 mov [edx + CPUM.Hyper.ebx], eax
629 mov eax, [ecx + CPUMCTXCORE.edx]
630 mov [edx + CPUM.Hyper.edx], eax
631 mov eax, [ecx + CPUMCTXCORE.ecx]
632 mov [edx + CPUM.Hyper.ecx], eax
633 mov eax, [ecx + CPUMCTXCORE.esp]
634 mov [edx + CPUM.Hyper.esp], eax
635 ; selectors
636 mov eax, [ecx + CPUMCTXCORE.ss]
637 mov [edx + CPUM.Hyper.ss], eax
638 mov eax, [ecx + CPUMCTXCORE.gs]
639 mov [edx + CPUM.Hyper.gs], eax
640 mov eax, [ecx + CPUMCTXCORE.fs]
641 mov [edx + CPUM.Hyper.fs], eax
642 mov eax, [ecx + CPUMCTXCORE.es]
643 mov [edx + CPUM.Hyper.es], eax
644 mov eax, [ecx + CPUMCTXCORE.ds]
645 mov [edx + CPUM.Hyper.ds], eax
646 mov eax, [ecx + CPUMCTXCORE.cs]
647 mov [edx + CPUM.Hyper.cs], eax
648 ; flags
649 mov eax, [ecx + CPUMCTXCORE.eflags]
650 mov [edx + CPUM.Hyper.eflags], eax
651 ; eip
652 mov eax, [ecx + CPUMCTXCORE.eip]
653 mov [edx + CPUM.Hyper.eip], eax
654 ; jump to common worker code.
655 pop eax ; restore return code.
656 jmp vmmGCGuestToHostAsm_SkipHyperRegs
657
658ENDPROC VMMGCGuestToHostAsmHyperCtx
659
660
661;;
662; VMMGCGuestToHostAsm
663;
664; This is an alternative entry point which we'll be using
665; when the we have saved the guest state already or we haven't
666; been messing with the guest at all.
667;
668; @param eax Return code.
669; @uses eax, edx, ecx (or it may use them in the future)
670;
671ALIGNCODE(16)
672BEGINPROC VMMGCGuestToHostAsm
673 DEBUG_CHAR('%')
674
675%ifdef VBOX_WITH_STATISTICS
676 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalInGC
677 mov edx, 0ffffffffh
678 STAM32_PROFILE_ADV_STOP edx
679
680 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalGCToQemu
681 mov edx, 0ffffffffh
682 STAM32_PROFILE_ADV_START edx
683
684 FIXUP FIX_GC_VM_OFF, 1, VM.StatSwitcherToHC
685 mov edx, 0ffffffffh
686 STAM32_PROFILE_ADV_START edx
687%endif
688
689 ;
690 ; Load the CPUM pointer.
691 ;
692 FIXUP FIX_GC_CPUM_OFF, 1, 0
693 mov edx, 0ffffffffh
694
695 pop dword [edx + CPUM.Hyper.eip] ; call return from stack
696 jmp short vmmGCGuestToHostAsm_EIPDone
697
698ALIGNCODE(16)
699vmmGCGuestToHostAsm_EIPDone:
700 ; general registers which we care about.
701 mov dword [edx + CPUM.Hyper.ebx], ebx
702 mov dword [edx + CPUM.Hyper.esi], esi
703 mov dword [edx + CPUM.Hyper.edi], edi
704 mov dword [edx + CPUM.Hyper.ebp], ebp
705 mov dword [edx + CPUM.Hyper.esp], esp
706
707 ; special registers which may change.
708vmmGCGuestToHostAsm_SkipHyperRegs:
709 ; str [edx + CPUM.Hyper.tr] - double fault only, and it won't be right then either.
710 sldt [edx + CPUM.Hyper.ldtr]
711
712 ; No need to save CRx here. They are set dynamically according to Guest/Host requirements.
713 ; FPU context is saved before restore of host saving (another) branch.
714
715
716 ;;
717 ;; Load Intermediate memory context.
718 ;;
719 mov edi, eax ; save return code in EDI (careful with COM_DWORD_REG from here on!)
720 FIXUP FIX_INTER_PAE_CR3, 1
721 mov eax, 0ffffffffh
722 mov cr3, eax
723 DEBUG_CHAR('?')
724
725 ;; We're now in intermediate memory context!
726
727 ;;
728 ;; 0. Jump to identity mapped location
729 ;;
730 FIXUP FIX_GC_2_ID_NEAR_REL, 1, NAME(IDExitTarget) - NAME(Start)
731 jmp near NAME(IDExitTarget)
732
733 ; We're now on identity mapped pages!
734ALIGNCODE(16)
735GLOBALNAME IDExitTarget
736 DEBUG_CHAR('1')
737
738 ; 1. Disable paging.
739 mov ebx, cr0
740 and ebx, ~X86_CR0_PG
741 mov cr0, ebx
742 DEBUG_CHAR('2')
743
744 ; 2. Enable PAE - already enabled.
745
746 ; 3. Load long mode intermediate CR3.
747 FIXUP FIX_INTER_AMD64_CR3, 1
748 mov ecx, 0ffffffffh
749 mov cr3, ecx
750 DEBUG_CHAR('3')
751
752 ; 4. Enable long mode.
753 mov ebp, edx
754 mov ecx, MSR_K6_EFER
755 rdmsr
756 or eax, MSR_K6_EFER_LME
757 wrmsr
758 mov edx, ebp
759 DEBUG_CHAR('4')
760
761 ; 5. Enable paging.
762 or ebx, X86_CR0_PG
763 mov cr0, ebx
764 DEBUG_CHAR('5')
765
766 ; Jump from compatability mode to 64-bit mode.
767 FIXUP FIX_ID_FAR32_TO_64BIT_MODE, 1, NAME(IDExit64Mode) - NAME(Start)
768 jmp 0ffffh:0fffffffeh
769
770 ;
771 ; We're in 64-bit mode (ds, ss, es, fs, gs are all bogus).
772 ; Move on to the HC mapping.
773 ;
774BITS 64
775ALIGNCODE(16)
776NAME(IDExit64Mode):
777 DEBUG_CHAR('6')
778 jmp [NAME(pHCExitTarget) wrt rip]
779
780; 64-bit jump target
781NAME(pHCExitTarget):
782FIXUP FIX_HC_64BIT, 0, NAME(HCExitTarget) - NAME(Start)
783dq 0ffffffffffffffffh
784
785; 64-bit pCpum address.
786NAME(pCpumHC):
787FIXUP FIX_HC_64BIT_CPUM, 0
788dq 0ffffffffffffffffh
789
790 ;
791 ; When we arrive here we're at the host context
792 ; mapping of the switcher code.
793 ;
794ALIGNCODE(16)
795GLOBALNAME HCExitTarget
796 DEBUG_CHAR('9')
797
798 ; load final cr3
799 mov rsi, [rdx + CPUM.Host.cr3]
800 mov cr3, rsi
801 DEBUG_CHAR('@')
802
803 ;;
804 ;; Restore Host context.
805 ;;
806 ; Load CPUM pointer into edx
807 mov rdx, [NAME(pCpumHC) wrt rip]
808 ; activate host gdt and idt
809 lgdt [rdx + CPUM.Host.gdtr]
810 DEBUG_CHAR('0')
811 lidt [rdx + CPUM.Host.idtr]
812 DEBUG_CHAR('1')
813 ; Restore TSS selector; must mark it as not busy before using ltr (!)
814%if 1 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p)
815 movzx eax, word [rdx + CPUM.Host.tr] ; eax <- TR
816 and al, 0F8h ; mask away TI and RPL bits, get descriptor offset.
817 add rax, [rdx + CPUM.Host.gdtr + 2] ; eax <- GDTR.address + descriptor offset.
818 and dword [rax + 4], ~0200h ; clear busy flag (2nd type2 bit)
819 ltr word [rdx + CPUM.Host.tr]
820%else
821 movzx eax, word [rdx + CPUM.Host.tr] ; eax <- TR
822 and al, 0F8h ; mask away TI and RPL bits, get descriptor offset.
823 add rax, [rdx + CPUM.Host.gdtr + 2] ; eax <- GDTR.address + descriptor offset.
824 mov ecx, [rax + 4] ; ecx <- 2nd descriptor dword
825 mov ebx, ecx ; save orginal value
826 and ecx, ~0200h ; clear busy flag (2nd type2 bit)
827 mov [rax + 4], ccx ; not using xchg here is paranoia..
828 ltr word [rdx + CPUM.Host.tr]
829 xchg [rax + 4], ebx ; using xchg is paranoia too...
830%endif
831 ; activate ldt
832 DEBUG_CHAR('2')
833 lldt [rdx + CPUM.Host.ldtr]
834 ; Restore segment registers
835 mov eax, [rdx + CPUM.Host.ds]
836 mov ds, eax
837 mov eax, [rdx + CPUM.Host.es]
838 mov es, eax
839 mov eax, [rdx + CPUM.Host.fs]
840 mov fs, eax
841 mov eax, [rdx + CPUM.Host.gs]
842 mov gs, eax
843 ; restore stack
844 mov eax, [rdx + CPUM.Host.ss]
845 mov ss, eax
846 mov rsp, [rdx + CPUM.Host.rsp]
847
848 FIXUP FIX_NO_SYSENTER_JMP, 0, gth_sysenter_no - NAME(Start) ; this will insert a jmp gth_sysenter_no if host doesn't use sysenter.
849 ; restore MSR_IA32_SYSENTER_CS register.
850 mov ecx, MSR_IA32_SYSENTER_CS
851 mov eax, [rdx + CPUM.Host.SysEnter.cs]
852 mov ebx, [rdx + CPUM.Host.SysEnter.cs + 4]
853 mov rbx, rdx ; save/load edx
854 wrmsr ; MSR[ecx] <- edx:eax
855 mov rdx, rbx ; restore edx
856 jmp short gth_sysenter_no
857
858ALIGNCODE(16)
859gth_sysenter_no:
860
861 ;; @todo AMD syscall
862
863 ; Restore FPU if guest has used it.
864 ; Using fxrstor should ensure that we're not causing unwanted exception on the host.
865 mov esi, [rdx + CPUM.fUseFlags] ; esi == use flags.
866 test esi, CPUM_USED_FPU
867 jz short gth_fpu_no
868 mov rcx, cr0
869 and rcx, ~(X86_CR0_TS | X86_CR0_EM)
870 mov cr0, rcx
871
872 fxsave [rdx + CPUM.Guest.fpu]
873 fxrstor [rdx + CPUM.Host.fpu]
874 jmp short gth_fpu_no
875
876ALIGNCODE(16)
877gth_fpu_no:
878
879 ; Control registers.
880 ; Would've liked to have these highere up in case of crashes, but
881 ; the fpu stuff must be done before we restore cr0.
882 mov rcx, [rdx + CPUM.Host.cr4]
883 mov cr4, rcx
884 mov rcx, [rdx + CPUM.Host.cr0]
885 mov cr0, rcx
886 ;mov rcx, [rdx + CPUM.Host.cr2] ; assumes this is waste of time.
887 ;mov cr2, rcx
888
889 ; restore debug registers (if modified) (esi must still be fUseFlags!)
890 ; (must be done after cr4 reload because of the debug extension.)
891 test esi, CPUM_USE_DEBUG_REGS | CPUM_USE_DEBUG_REGS_HOST
892 jz short gth_debug_regs_no
893 jmp gth_debug_regs_restore
894gth_debug_regs_no:
895
896 ; Restore MSRs
897 mov rbx, rdx
898 mov ecx, MSR_K8_FS_BASE
899 mov eax, [rbx + CPUM.Host.FSbase]
900 mov edx, [rbx + CPUM.Host.FSbase + 4]
901 wrmsr
902 mov ecx, MSR_K8_GS_BASE
903 mov eax, [rbx + CPUM.Host.GSbase]
904 mov edx, [rbx + CPUM.Host.GSbase + 4]
905 wrmsr
906 mov ecx, MSR_K6_EFER
907 mov eax, [rbx + CPUM.Host.efer]
908 mov edx, [rbx + CPUM.Host.efer + 4]
909 wrmsr
910 mov rdx, rbx
911
912
913 ; restore general registers.
914 mov eax, edi ; restore return code. eax = return code !!
915 ; mov rax, [rdx + CPUM.Host.rax] - scratch + return code
916 mov rbx, [rdx + CPUM.Host.rbx]
917 ; mov rcx, [rdx + CPUM.Host.rcx] - scratch
918 ; mov rdx, [rdx + CPUM.Host.rdx] - scratch
919 mov rdi, [rdx + CPUM.Host.rdi]
920 mov rsi, [rdx + CPUM.Host.rsi]
921 mov rsp, [rdx + CPUM.Host.rsp]
922 mov rbp, [rdx + CPUM.Host.rbp]
923 ; mov r8, [rdx + CPUM.Host.r8 ] - scratch
924 ; mov r9, [rdx + CPUM.Host.r9 ] - scratch
925 mov r10, [rdx + CPUM.Host.r10]
926 mov r11, [rdx + CPUM.Host.r11]
927 mov r12, [rdx + CPUM.Host.r12]
928 mov r13, [rdx + CPUM.Host.r13]
929 mov r14, [rdx + CPUM.Host.r14]
930 mov r15, [rdx + CPUM.Host.r15]
931
932 ; finally restore flags. (probably not required)
933 push qword [rdx + CPUM.Host.rflags]
934 popf
935
936
937%ifdef DEBUG_STUFF
938 COM64_S_CHAR '4'
939%endif
940 db 048h
941 retf
942
943;;
944; Detour for restoring the host debug registers.
945; edx and edi must be preserved.
946gth_debug_regs_restore:
947 DEBUG_S_CHAR('d')
948 xor eax, eax
949 mov dr7, rax ; paranoia or not?
950 test esi, CPUM_USE_DEBUG_REGS
951 jz short gth_debug_regs_dr7
952 DEBUG_S_CHAR('r')
953 mov rax, [rdx + CPUM.Host.dr0]
954 mov dr0, rax
955 mov rbx, [rdx + CPUM.Host.dr1]
956 mov dr1, rbx
957 mov rcx, [rdx + CPUM.Host.dr2]
958 mov dr2, rcx
959 mov rax, [rdx + CPUM.Host.dr3]
960 mov dr3, rax
961gth_debug_regs_dr7:
962 mov rbx, [rdx + CPUM.Host.dr6]
963 mov dr6, rbx
964 mov rcx, [rdx + CPUM.Host.dr7]
965 mov dr7, rcx
966 jmp gth_debug_regs_no
967
968ENDPROC VMMGCGuestToHostAsm
969
970
971GLOBALNAME End
972;
973; The description string (in the text section).
974;
975NAME(Description):
976 db "AMD64 to/from PAE", 0
977
978extern NAME(Relocate)
979
980;
981; End the fixup records.
982;
983BEGINDATA
984 db FIX_THE_END ; final entry.
985GLOBALNAME FixupsEnd
986
987;;
988; The switcher definition structure.
989ALIGNDATA(16)
990GLOBALNAME Def
991 istruc VMMSWITCHERDEF
992 at VMMSWITCHERDEF.pvCode, RTCCPTR_DEF NAME(Start)
993 at VMMSWITCHERDEF.pvFixups, RTCCPTR_DEF NAME(Fixups)
994 at VMMSWITCHERDEF.pszDesc, RTCCPTR_DEF NAME(Description)
995 at VMMSWITCHERDEF.pfnRelocate, RTCCPTR_DEF NAME(Relocate)
996 at VMMSWITCHERDEF.enmType, dd VMMSWITCHER_AMD64_TO_PAE
997 at VMMSWITCHERDEF.cbCode, dd NAME(End) - NAME(Start)
998 at VMMSWITCHERDEF.offR0HostToGuest, dd NAME(vmmR0HostToGuest) - NAME(Start)
999 at VMMSWITCHERDEF.offGCGuestToHost, dd NAME(vmmGCGuestToHost) - NAME(Start)
1000 at VMMSWITCHERDEF.offGCCallTrampoline, dd NAME(vmmGCCallTrampoline) - NAME(Start)
1001 at VMMSWITCHERDEF.offGCGuestToHostAsm, dd NAME(VMMGCGuestToHostAsm) - NAME(Start)
1002 at VMMSWITCHERDEF.offGCGuestToHostAsmHyperCtx, dd NAME(VMMGCGuestToHostAsmHyperCtx)- NAME(Start)
1003 at VMMSWITCHERDEF.offGCGuestToHostAsmGuestCtx, dd NAME(VMMGCGuestToHostAsmGuestCtx)- NAME(Start)
1004 ; disasm help
1005 at VMMSWITCHERDEF.offHCCode0, dd 0
1006 at VMMSWITCHERDEF.cbHCCode0, dd NAME(IDEnterTarget) - NAME(Start)
1007 at VMMSWITCHERDEF.offHCCode1, dd NAME(HCExitTarget) - NAME(Start)
1008 at VMMSWITCHERDEF.cbHCCode1, dd NAME(End) - NAME(HCExitTarget)
1009 at VMMSWITCHERDEF.offIDCode0, dd NAME(IDEnterTarget) - NAME(Start)
1010 at VMMSWITCHERDEF.cbIDCode0, dd NAME(JmpGCTarget) - NAME(IDEnterTarget)
1011 at VMMSWITCHERDEF.offIDCode1, dd NAME(IDExitTarget) - NAME(Start)
1012 at VMMSWITCHERDEF.cbIDCode1, dd NAME(HCExitTarget) - NAME(IDExitTarget)
1013 at VMMSWITCHERDEF.offGCCode, dd NAME(JmpGCTarget) - NAME(Start)
1014 at VMMSWITCHERDEF.cbGCCode, dd NAME(IDExitTarget) - NAME(JmpGCTarget)
1015
1016 iend
1017
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette