VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMSwitcher/AMD64ToPAE.asm@ 880

Last change on this file since 880 was 848, checked in by vboxsync, 18 years ago

AMD64 debugging: Added checks for NMIs and that IF is cleared when doing the context switching.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 30.4 KB
Line 
1; $Id: AMD64ToPAE.asm 848 2007-02-12 16:01:52Z vboxsync $
2;; @file
3; VMM - World Switchers, AMD64 to PAE.
4;
5
6;
7; Copyright (C) 2006 InnoTek Systemberatung GmbH
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.virtualbox.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License as published by the Free Software Foundation,
13; in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
14; distribution. VirtualBox OSE is distributed in the hope that it will
15; be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17; If you received this file as part of a commercial VirtualBox
18; distribution, then only the terms of your commercial VirtualBox
19; license agreement apply instead of the previous paragraph.
20;
21
22;%define DEBUG_STUFF 1
23%define STRICT_IF 1
24
25;*******************************************************************************
26;* Defined Constants And Macros *
27;*******************************************************************************
28;; Prefix all names.
29%define NAME_OVERLOAD(name) vmmR3SwitcherAMD64ToPAE_ %+ name
30
31
32;*******************************************************************************
33;* Header Files *
34;*******************************************************************************
35%include "VBox/asmdefs.mac"
36%include "VBox/x86.mac"
37%include "VBox/cpum.mac"
38%include "VBox/stam.mac"
39%include "VBox/vm.mac"
40%include "CPUMInternal.mac"
41%include "VMMSwitcher/VMMSwitcher.mac"
42
43
44;
45; Start the fixup records
46; We collect the fixups in the .data section as we go along
47; It is therefore VITAL that no-one is using the .data section
48; for anything else between 'Start' and 'End'.
49;
50BEGINDATA
51GLOBALNAME Fixups
52
53
54
55BEGINCODE
56GLOBALNAME Start
57
58BITS 64
59
60;;
61; The C interface.
62;
63; @param pVM GCC: rdi MSC:rcx The VM handle.
64;
65BEGINPROC vmmR0HostToGuest
66%ifdef DEBUG_STUFF
67 COM64_S_NEWLINE
68 COM64_S_CHAR '^'
69%endif
70
71%ifdef STRICT_IF
72 pushf
73 pop rax
74 test eax, X86_EFL_IF
75 jz .if_clear_in
76 mov eax, 0c0ffee00h
77 ret
78.if_clear_in:
79%endif
80
81 ;
82 ; make r9 = pVM and rdx = pCpum.
83 ; rax, rcx and r8 are scratch here after.
84%ifdef __WIN64__
85 mov r9, rcx
86%else
87 mov r9, rdi
88%endif
89 lea rdx, [r9 + VM.cpum]
90
91%ifdef VBOX_WITH_STATISTICS
92 ;
93 ; Switcher stats.
94 ;
95 lea r8, [r9 + VM.StatSwitcherToGC]
96 STAM64_PROFILE_ADV_START r8
97%endif
98
99 ;
100 ; Call worker (far return).
101 ;
102 mov eax, cs
103 push rax
104 call NAME(vmmR0HostToGuestAsm)
105
106%ifdef VBOX_WITH_STATISTICS
107 ;
108 ; Switcher stats.
109 ;
110 lea r8, [r9 + VM.StatSwitcherToGC]
111 STAM64_PROFILE_ADV_STOP r8
112%endif
113
114 ret
115ENDPROC vmmR0HostToGuest
116
117
118
119; *****************************************************************************
120; vmmR0HostToGuestAsm
121;
122; Phase one of the switch from host to guest context (host MMU context)
123;
124; INPUT:
125; - edx virtual address of CPUM structure (valid in host context)
126;
127; USES/DESTROYS:
128; - eax, ecx, edx
129;
130; ASSUMPTION:
131; - current CS and DS selectors are wide open
132;
133; *****************************************************************************
134ALIGNCODE(16)
135BEGINPROC vmmR0HostToGuestAsm
136 ;;
137 ;; Save CPU host context
138 ;; Skip eax, edx and ecx as these are not preserved over calls.
139 ;;
140 ; general registers.
141 ; mov [rdx + CPUM.Host.rax], rax - scratch
142 mov [rdx + CPUM.Host.rbx], rbx
143 ; mov [rdx + CPUM.Host.rcx], rcx - scratch
144 ; mov [rdx + CPUM.Host.rdx], rdx - scratch
145 mov [rdx + CPUM.Host.rdi], rdi
146 mov [rdx + CPUM.Host.rsi], rsi
147 mov [rdx + CPUM.Host.rsp], rsp
148 mov [rdx + CPUM.Host.rbp], rbp
149 ; mov [rdx + CPUM.Host.r8 ], r8 - scratch
150 ; mov [rdx + CPUM.Host.r9 ], r9 - scratch
151 mov [rdx + CPUM.Host.r10], r10
152 mov [rdx + CPUM.Host.r11], r11
153 mov [rdx + CPUM.Host.r12], r12
154 mov [rdx + CPUM.Host.r13], r13
155 mov [rdx + CPUM.Host.r14], r14
156 mov [rdx + CPUM.Host.r15], r15
157 ; selectors.
158 mov [rdx + CPUM.Host.ds], ds
159 mov [rdx + CPUM.Host.es], es
160 mov [rdx + CPUM.Host.fs], fs
161 mov [rdx + CPUM.Host.gs], gs
162 mov [rdx + CPUM.Host.ss], ss
163 ; MSRs
164 mov rbx, rdx
165 mov ecx, MSR_K8_FS_BASE
166 rdmsr
167 mov [rbx + CPUM.Host.FSbase], eax
168 mov [rbx + CPUM.Host.FSbase + 4], edx
169 mov ecx, MSR_K8_GS_BASE
170 rdmsr
171 mov [rbx + CPUM.Host.GSbase], eax
172 mov [rbx + CPUM.Host.GSbase + 4], edx
173 mov ecx, MSR_K6_EFER
174 rdmsr
175 mov [rbx + CPUM.Host.efer], eax
176 mov [rbx + CPUM.Host.efer + 4], edx
177 mov ecx, MSR_K6_EFER
178 mov rdx, rbx
179 ; special registers.
180 sldt [rdx + CPUM.Host.ldtr]
181 sidt [rdx + CPUM.Host.idtr]
182 sgdt [rdx + CPUM.Host.gdtr]
183 str [rdx + CPUM.Host.tr] ; yasm BUG, generates sldt. YASMCHECK!
184 ; flags
185 pushf
186 pop qword [rdx + CPUM.Host.rflags]
187
188 FIXUP FIX_NO_SYSENTER_JMP, 0, htg_no_sysenter - NAME(Start) ; this will insert a jmp htg_no_sysenter if host doesn't use sysenter.
189 ; save MSR_IA32_SYSENTER_CS register.
190 mov ecx, MSR_IA32_SYSENTER_CS
191 mov rbx, rdx ; save edx
192 rdmsr ; edx:eax <- MSR[ecx]
193 mov [rbx + CPUM.Host.SysEnter.cs], rax
194 mov [rbx + CPUM.Host.SysEnter.cs + 4], rdx
195 xor rax, rax ; load 0:0 to cause #GP upon sysenter
196 xor rdx, rdx
197 wrmsr
198 mov rdx, rbx ; restore edx
199 jmp short htg_no_sysenter
200
201ALIGNCODE(16)
202htg_no_sysenter:
203
204 ;; handle use flags.
205 mov esi, [rdx + CPUM.fUseFlags] ; esi == use flags.
206 and esi, ~CPUM_USED_FPU ; Clear CPUM_USED_* flags. ;;@todo FPU check can be optimized to use cr0 flags!
207 mov [rdx + CPUM.fUseFlags], esi
208
209 ; debug registers.
210 test esi, CPUM_USE_DEBUG_REGS | CPUM_USE_DEBUG_REGS_HOST
211 jz htg_debug_regs_no
212 jmp htg_debug_regs_save
213htg_debug_regs_no:
214 DEBUG_CHAR('a') ; trashes esi
215
216 ; control registers.
217 mov rax, cr0
218 mov [rdx + CPUM.Host.cr0], rax
219 ;mov rax, cr2 ; assume host os don't suff things in cr2. (safe)
220 ;mov [rdx + CPUM.Host.cr2], rax
221 mov rax, cr3
222 mov [rdx + CPUM.Host.cr3], rax
223 mov rax, cr4
224 mov [rdx + CPUM.Host.cr4], rax
225
226 ;;
227 ;; Start switching to VMM context.
228 ;;
229
230 ;
231 ; Change CR0 and CR4 so we can correctly emulate FPU/MMX/SSE[23] exceptions
232 ; Also disable WP. (eax==cr4 now)
233 ; Note! X86_CR4_PSE and X86_CR4_PAE are important if the host thinks so :-)
234 ;
235 and rax, X86_CR4_MCE | X86_CR4_PSE | X86_CR4_PAE
236 mov ecx, [rdx + CPUM.Guest.cr4]
237 DEBUG_CHAR('b') ; trashes esi
238 ;; @todo Switcher cleanup: Determin base CR4 during CPUMR0Init / VMMR3SelectSwitcher putting it
239 ; in CPUM.Hyper.cr4 (which isn't currently being used). That should
240 ; simplify this operation a bit (and improve locality of the data).
241
242 ;
243 ; CR4.AndMask and CR4.OrMask are set in CPUMR3Init based on the presence of
244 ; FXSAVE support on the host CPU
245 ;
246 and ecx, [rdx + CPUM.CR4.AndMask]
247 or eax, ecx
248 or eax, [rdx + CPUM.CR4.OrMask]
249 mov cr4, rax
250 DEBUG_CHAR('c') ; trashes esi
251
252 mov eax, [rdx + CPUM.Guest.cr0]
253 and eax, X86_CR0_EM
254 or eax, X86_CR0_PE | X86_CR0_PG | X86_CR0_TS | X86_CR0_ET | X86_CR0_NE | X86_CR0_MP
255 mov cr0, rax
256 DEBUG_CHAR('0') ; trashes esi
257
258
259 ; Load new gdt so we can do far jump to guest code after cr3 reload.
260 lgdt [rdx + CPUM.Hyper.gdtr]
261 DEBUG_CHAR('1') ; trashes esi
262
263 ;;
264 ;; Load Intermediate memory context.
265 ;;
266 FIXUP FIX_INTER_AMD64_CR3, 1
267 mov eax, 0ffffffffh
268 mov cr3, rax
269 DEBUG_CHAR('2') ; trashes esi
270
271 ;;
272 ;; 1. Switch to compatibility mode, placing ourselves in identity mapped code.
273 ;;
274 jmp far [NAME(fpIDEnterTarget) wrt rip]
275
276; 16:32 Pointer to IDEnterTarget.
277NAME(fpIDEnterTarget):
278 FIXUP FIX_ID_32BIT, 0, NAME(IDEnterTarget) - NAME(Start)
279dd 0
280 FIXUP FIX_HYPER_CS, 0
281dd 0
282
283
284;;
285; Detour for saving the host DR7 and DR6.
286; esi and rdx must be preserved.
287htg_debug_regs_save:
288DEBUG_S_CHAR('s');
289 mov rax, dr7 ; not sure, but if I read the docs right this will trap if GD is set. FIXME!!!
290 mov [rdx + CPUM.Host.dr7], rax
291 xor eax, eax ; clear everything. (bit 12? is read as 1...)
292 mov dr7, rax
293 mov rax, dr6 ; just in case we save the state register too.
294 mov [rdx + CPUM.Host.dr6], rax
295 ; save host DR0-3?
296 test esi, CPUM_USE_DEBUG_REGS
297 jz near htg_debug_regs_no
298DEBUG_S_CHAR('S');
299 mov rax, dr0
300 mov [rdx + CPUM.Host.dr0], rax
301 mov rbx, dr1
302 mov [rdx + CPUM.Host.dr1], rbx
303 mov rcx, dr2
304 mov [rdx + CPUM.Host.dr2], rcx
305 mov rax, dr3
306 mov [rdx + CPUM.Host.dr3], rax
307 jmp htg_debug_regs_no
308
309
310 ; We're now on an identity mapped pages! in 32-bit compatability mode.
311BITS 32
312ALIGNCODE(16)
313GLOBALNAME IDEnterTarget
314 DEBUG_CHAR('3')
315
316 ; 2. Deactivate long mode by turning off paging.
317 mov ebx, cr0
318 and ebx, ~X86_CR0_PG
319 mov cr0, ebx
320 DEBUG_CHAR('4')
321
322 ; 3. Load 32-bit intermediate page table.
323 FIXUP FIX_INTER_PAE_CR3, 1
324 mov edx, 0ffffffffh
325 mov cr3, edx
326
327 ; 4. Disable long mode.
328 ; We also use the chance to disable syscall/sysret and fast fxsave/fxrstor.
329 mov ecx, MSR_K6_EFER
330 rdmsr
331 DEBUG_CHAR('5')
332 and eax, ~(MSR_K6_EFER_LME | MSR_K6_EFER_SCE | MSR_K6_EFER_FFXSR)
333 wrmsr
334 DEBUG_CHAR('6')
335
336 ; 5. Enable paging.
337 or ebx, X86_CR0_PG
338 mov cr0, ebx
339 jmp short just_a_jump
340just_a_jump:
341 DEBUG_CHAR('7')
342
343 ;;
344 ;; 6. Jump to guest code mapping of the code and load the Hypervisor CS.
345 ;;
346 FIXUP FIX_ID_2_GC_NEAR_REL, 1, NAME(JmpGCTarget) - NAME(Start)
347 jmp near NAME(JmpGCTarget)
348
349
350 ;;
351 ;; When we arrive at this label we're at the
352 ;; guest code mapping of the switching code.
353 ;;
354ALIGNCODE(16)
355GLOBALNAME JmpGCTarget
356 DEBUG_CHAR('-')
357 ; load final cr3 and do far jump to load cs.
358 FIXUP FIX_HYPER_PAE_CR3, 1
359 mov eax, 0ffffffffh
360 mov cr3, eax
361 DEBUG_CHAR('0')
362
363 ;;
364 ;; We're in VMM MMU context and VMM CS is loaded.
365 ;; Setup the rest of the VMM state.
366 ;;
367 ; Load selectors
368 DEBUG_CHAR('1')
369 FIXUP FIX_HYPER_DS, 1
370 mov eax, 0ffffh
371 mov ds, eax
372 mov es, eax
373 ; Load pCpum into EDX
374 FIXUP FIX_GC_CPUM_OFF, 1, 0
375 mov edx, 0ffffffffh
376 ; Activate guest IDT
377 DEBUG_CHAR('2')
378 lidt [edx + CPUM.Hyper.idtr]
379
380 ; Setup stack
381 DEBUG_CHAR('3')
382 lss esp, [edx + CPUM.Hyper.esp]
383
384 ; Restore TSS selector; must mark it as not busy before using ltr (!)
385 DEBUG_CHAR('4')
386 FIXUP FIX_GC_TSS_GDTE_DW2, 2
387 and dword [0ffffffffh], ~0200h ; clear busy flag (2nd type2 bit)
388 DEBUG_CHAR('5')
389 ltr word [edx + CPUM.Hyper.tr]
390 DEBUG_CHAR('6')
391
392 ; Activate the ldt (now we can safely crash).
393 lldt [edx + CPUM.Hyper.ldtr]
394 DEBUG_CHAR('7')
395
396 ;; use flags.
397 mov esi, [edx + CPUM.fUseFlags]
398
399 ; debug registers
400 test esi, CPUM_USE_DEBUG_REGS
401 jz htg_debug_regs_guest_no
402 jmp htg_debug_regs_guest
403htg_debug_regs_guest_no:
404 DEBUG_CHAR('9')
405
406 ; General registers.
407 mov ebx, [edx + CPUM.Hyper.ebx]
408 mov ebp, [edx + CPUM.Hyper.ebp]
409 mov esi, [edx + CPUM.Hyper.esi]
410 mov edi, [edx + CPUM.Hyper.edi]
411 push dword [edx + CPUM.Hyper.eflags]
412 popfd
413 DEBUG_CHAR('!')
414
415 ;;
416 ;; Return to the VMM code which either called the switcher or
417 ;; the code set up to run by HC.
418 ;;
419%ifdef DEBUG_STUFF
420 COM32_S_PRINT ';eip='
421 mov eax, [edx + CPUM.Hyper.eip]
422 COM32_S_DWORD_REG eax
423 COM32_S_CHAR ';'
424%endif
425 mov eax, [edx + CPUM.Hyper.eip]
426%ifdef VBOX_WITH_STATISTICS
427 FIXUP FIX_GC_VM_OFF, 1, VM.StatSwitcherToGC
428 mov edx, 0ffffffffh
429 STAM32_PROFILE_ADV_STOP edx
430 FIXUP FIX_GC_CPUM_OFF, 1, 0
431 mov edx, 0ffffffffh
432%endif
433 jmp eax
434
435;;
436; Detour for saving host DR0-3 and loading hypervisor debug registers.
437; esi and edx must be preserved.
438htg_debug_regs_guest:
439 DEBUG_S_CHAR('D')
440 DEBUG_S_CHAR('R')
441 DEBUG_S_CHAR('x')
442 ; load hyper DR0-7
443 mov ebx, [edx + CPUM.Hyper.dr0]
444 mov dr0, ebx
445 mov ecx, [edx + CPUM.Hyper.dr1]
446 mov dr1, ecx
447 mov eax, [edx + CPUM.Hyper.dr2]
448 mov dr2, eax
449 mov ebx, [edx + CPUM.Hyper.dr3]
450 mov dr3, ebx
451 ;mov eax, [edx + CPUM.Hyper.dr6]
452 mov ecx, 0ffff0ff0h
453 mov dr6, ecx
454 mov eax, [edx + CPUM.Hyper.dr7]
455 mov dr7, eax
456 jmp htg_debug_regs_guest_no
457
458ENDPROC vmmR0HostToGuestAsm
459
460
461;;
462; Trampoline for doing a call when starting the hyper visor execution.
463;
464; Push any arguments to the routine.
465; Push the argument frame size (cArg * 4).
466; Push the call target (_cdecl convention).
467; Push the address of this routine.
468;
469;
470ALIGNCODE(16)
471BEGINPROC vmmGCCallTrampoline
472%ifdef DEBUG_STUFF
473 COM32_S_CHAR 'c'
474 COM32_S_CHAR 't'
475 COM32_S_CHAR '!'
476%endif
477 ; Clear fs and gs.
478 xor eax, eax
479 mov gs, eax
480 mov fs, eax
481
482 ; call routine
483 pop eax ; call address
484 mov esi, edx ; save edx
485 pop edi ; argument count.
486%ifdef DEBUG_STUFF
487 COM32_S_PRINT ';eax='
488 COM32_S_DWORD_REG eax
489 COM32_S_CHAR ';'
490%endif
491 call eax ; do call
492 add esp, edi ; cleanup stack
493
494 ; return to the host context.
495 push byte 0 ; eip
496 mov edx, esi ; CPUM pointer
497
498%ifdef DEBUG_STUFF
499 COM32_S_CHAR '`'
500%endif
501 jmp NAME(VMMGCGuestToHostAsm) ; eax = returncode.
502ENDPROC vmmGCCallTrampoline
503
504
505
506;;
507; The C interface.
508;
509ALIGNCODE(16)
510BEGINPROC vmmGCGuestToHost
511%ifdef DEBUG_STUFF
512 push esi
513 COM_NEWLINE
514 DEBUG_CHAR('b')
515 DEBUG_CHAR('a')
516 DEBUG_CHAR('c')
517 DEBUG_CHAR('k')
518 DEBUG_CHAR('!')
519 COM_NEWLINE
520 pop esi
521%endif
522 mov eax, [esp + 4]
523 jmp NAME(VMMGCGuestToHostAsm)
524ENDPROC vmmGCGuestToHost
525
526
527;;
528; VMMGCGuestToHostAsmGuestCtx
529;
530; Switches from Guest Context to Host Context.
531; Of course it's only called from within the GC.
532;
533; @param eax Return code.
534; @param esp + 4 Pointer to CPUMCTXCORE.
535;
536; @remark ASSUMES interrupts disabled.
537;
538ALIGNCODE(16)
539BEGINPROC VMMGCGuestToHostAsmGuestCtx
540 DEBUG_CHAR('~')
541
542%ifdef VBOX_WITH_STATISTICS
543 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalInGC
544 mov edx, 0ffffffffh
545 STAM32_PROFILE_ADV_STOP edx
546
547 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalGCToQemu
548 mov edx, 0ffffffffh
549 STAM32_PROFILE_ADV_START edx
550
551 FIXUP FIX_GC_VM_OFF, 1, VM.StatSwitcherToHC
552 mov edx, 0ffffffffh
553 STAM32_PROFILE_ADV_START edx
554%endif
555
556 ;
557 ; Load the CPUM pointer.
558 ;
559 FIXUP FIX_GC_CPUM_OFF, 1, 0
560 mov edx, 0ffffffffh
561
562 ; Skip return address (assumes called!)
563 lea esp, [esp + 4]
564
565 ;
566 ; Guest Context (assumes CPUMCTXCORE layout).
567 ;
568 ; general purpose registers (layout is pushad)
569 pop dword [edx + CPUM.Guest.edi]
570 pop dword [edx + CPUM.Guest.esi]
571 pop dword [edx + CPUM.Guest.ebp]
572 pop dword [edx + CPUM.Guest.eax]
573 pop dword [edx + CPUM.Guest.ebx]
574 pop dword [edx + CPUM.Guest.edx]
575 pop dword [edx + CPUM.Guest.ecx]
576 pop dword [edx + CPUM.Guest.esp]
577 pop dword [edx + CPUM.Guest.ss]
578 pop dword [edx + CPUM.Guest.gs]
579 pop dword [edx + CPUM.Guest.fs]
580 pop dword [edx + CPUM.Guest.es]
581 pop dword [edx + CPUM.Guest.ds]
582 pop dword [edx + CPUM.Guest.cs]
583 ; flags
584 pop dword [edx + CPUM.Guest.eflags]
585 ; eip
586 pop dword [edx + CPUM.Guest.eip]
587 jmp vmmGCGuestToHostAsm_EIPDone
588ENDPROC VMMGCGuestToHostAsmGuestCtx
589
590
591;;
592; VMMGCGuestToHostAsmHyperCtx
593;
594; This is an alternative entry point which we'll be using
595; when the we have the hypervisor context and need to save
596; that before going to the host.
597;
598; This is typically useful when abandoning the hypervisor
599; because of a trap and want the trap state to be saved.
600;
601; @param eax Return code.
602; @param ecx Points to CPUMCTXCORE.
603; @uses eax,edx,ecx
604ALIGNCODE(16)
605BEGINPROC VMMGCGuestToHostAsmHyperCtx
606 DEBUG_CHAR('#')
607
608%ifdef VBOX_WITH_STATISTICS
609 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalInGC
610 mov edx, 0ffffffffh
611 STAM32_PROFILE_ADV_STOP edx
612
613 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalGCToQemu
614 mov edx, 0ffffffffh
615 STAM32_PROFILE_ADV_START edx
616
617 FIXUP FIX_GC_VM_OFF, 1, VM.StatSwitcherToHC
618 mov edx, 0ffffffffh
619 STAM32_PROFILE_ADV_START edx
620%endif
621
622 ;
623 ; Load the CPUM pointer.
624 ;
625 FIXUP FIX_GC_CPUM_OFF, 1, 0
626 mov edx, 0ffffffffh
627
628 push eax ; save return code.
629 ; general purpose registers
630 mov eax, [ecx + CPUMCTXCORE.edi]
631 mov [edx + CPUM.Hyper.edi], eax
632 mov eax, [ecx + CPUMCTXCORE.esi]
633 mov [edx + CPUM.Hyper.esi], eax
634 mov eax, [ecx + CPUMCTXCORE.ebp]
635 mov [edx + CPUM.Hyper.ebp], eax
636 mov eax, [ecx + CPUMCTXCORE.eax]
637 mov [edx + CPUM.Hyper.eax], eax
638 mov eax, [ecx + CPUMCTXCORE.ebx]
639 mov [edx + CPUM.Hyper.ebx], eax
640 mov eax, [ecx + CPUMCTXCORE.edx]
641 mov [edx + CPUM.Hyper.edx], eax
642 mov eax, [ecx + CPUMCTXCORE.ecx]
643 mov [edx + CPUM.Hyper.ecx], eax
644 mov eax, [ecx + CPUMCTXCORE.esp]
645 mov [edx + CPUM.Hyper.esp], eax
646 ; selectors
647 mov eax, [ecx + CPUMCTXCORE.ss]
648 mov [edx + CPUM.Hyper.ss], eax
649 mov eax, [ecx + CPUMCTXCORE.gs]
650 mov [edx + CPUM.Hyper.gs], eax
651 mov eax, [ecx + CPUMCTXCORE.fs]
652 mov [edx + CPUM.Hyper.fs], eax
653 mov eax, [ecx + CPUMCTXCORE.es]
654 mov [edx + CPUM.Hyper.es], eax
655 mov eax, [ecx + CPUMCTXCORE.ds]
656 mov [edx + CPUM.Hyper.ds], eax
657 mov eax, [ecx + CPUMCTXCORE.cs]
658 mov [edx + CPUM.Hyper.cs], eax
659 ; flags
660 mov eax, [ecx + CPUMCTXCORE.eflags]
661 mov [edx + CPUM.Hyper.eflags], eax
662 ; eip
663 mov eax, [ecx + CPUMCTXCORE.eip]
664 mov [edx + CPUM.Hyper.eip], eax
665 ; jump to common worker code.
666 pop eax ; restore return code.
667 jmp vmmGCGuestToHostAsm_SkipHyperRegs
668
669ENDPROC VMMGCGuestToHostAsmHyperCtx
670
671
672;;
673; VMMGCGuestToHostAsm
674;
675; This is an alternative entry point which we'll be using
676; when the we have saved the guest state already or we haven't
677; been messing with the guest at all.
678;
679; @param eax Return code.
680; @uses eax, edx, ecx (or it may use them in the future)
681;
682ALIGNCODE(16)
683BEGINPROC VMMGCGuestToHostAsm
684 DEBUG_CHAR('%')
685
686%ifdef VBOX_WITH_STATISTICS
687 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalInGC
688 mov edx, 0ffffffffh
689 STAM32_PROFILE_ADV_STOP edx
690
691 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalGCToQemu
692 mov edx, 0ffffffffh
693 STAM32_PROFILE_ADV_START edx
694
695 FIXUP FIX_GC_VM_OFF, 1, VM.StatSwitcherToHC
696 mov edx, 0ffffffffh
697 STAM32_PROFILE_ADV_START edx
698%endif
699
700 ;
701 ; Load the CPUM pointer.
702 ;
703 FIXUP FIX_GC_CPUM_OFF, 1, 0
704 mov edx, 0ffffffffh
705
706 pop dword [edx + CPUM.Hyper.eip] ; call return from stack
707 jmp short vmmGCGuestToHostAsm_EIPDone
708
709ALIGNCODE(16)
710vmmGCGuestToHostAsm_EIPDone:
711 ; general registers which we care about.
712 mov dword [edx + CPUM.Hyper.ebx], ebx
713 mov dword [edx + CPUM.Hyper.esi], esi
714 mov dword [edx + CPUM.Hyper.edi], edi
715 mov dword [edx + CPUM.Hyper.ebp], ebp
716 mov dword [edx + CPUM.Hyper.esp], esp
717
718 ; special registers which may change.
719vmmGCGuestToHostAsm_SkipHyperRegs:
720%ifdef STRICT_IF
721 pushf
722 pop ecx
723 test ecx, X86_EFL_IF
724 jz .if_clear_out
725 mov eax, 0c0ffee01h
726 cli
727.if_clear_out:
728%endif
729 ; str [edx + CPUM.Hyper.tr] - double fault only, and it won't be right then either.
730 sldt [edx + CPUM.Hyper.ldtr]
731
732 ; No need to save CRx here. They are set dynamically according to Guest/Host requirements.
733 ; FPU context is saved before restore of host saving (another) branch.
734
735
736 ;;
737 ;; Load Intermediate memory context.
738 ;;
739 mov edi, eax ; save return code in EDI (careful with COM_DWORD_REG from here on!)
740 FIXUP FIX_INTER_PAE_CR3, 1
741 mov eax, 0ffffffffh
742 mov cr3, eax
743 DEBUG_CHAR('?')
744
745 ;; We're now in intermediate memory context!
746
747 ;;
748 ;; 0. Jump to identity mapped location
749 ;;
750 FIXUP FIX_GC_2_ID_NEAR_REL, 1, NAME(IDExitTarget) - NAME(Start)
751 jmp near NAME(IDExitTarget)
752
753 ; We're now on identity mapped pages!
754ALIGNCODE(16)
755GLOBALNAME IDExitTarget
756 DEBUG_CHAR('1')
757
758 ; 1. Disable paging.
759 mov ebx, cr0
760 and ebx, ~X86_CR0_PG
761 mov cr0, ebx
762 DEBUG_CHAR('2')
763
764 ; 2. Enable PAE - already enabled.
765
766 ; 3. Load long mode intermediate CR3.
767 FIXUP FIX_INTER_AMD64_CR3, 1
768 mov ecx, 0ffffffffh
769 mov cr3, ecx
770 DEBUG_CHAR('3')
771
772 ; 4. Enable long mode.
773 mov ebp, edx
774 mov ecx, MSR_K6_EFER
775 rdmsr
776 or eax, MSR_K6_EFER_LME
777 wrmsr
778 mov edx, ebp
779 DEBUG_CHAR('4')
780
781 ; 5. Enable paging.
782 or ebx, X86_CR0_PG
783 mov cr0, ebx
784 DEBUG_CHAR('5')
785
786 ; Jump from compatability mode to 64-bit mode.
787 FIXUP FIX_ID_FAR32_TO_64BIT_MODE, 1, NAME(IDExit64Mode) - NAME(Start)
788 jmp 0ffffh:0fffffffeh
789
790 ;
791 ; We're in 64-bit mode (ds, ss, es, fs, gs are all bogus).
792 ; Move on to the HC mapping.
793 ;
794BITS 64
795ALIGNCODE(16)
796NAME(IDExit64Mode):
797 DEBUG_CHAR('6')
798 jmp [NAME(pHCExitTarget) wrt rip]
799
800; 64-bit jump target
801NAME(pHCExitTarget):
802FIXUP FIX_HC_64BIT, 0, NAME(HCExitTarget) - NAME(Start)
803dq 0ffffffffffffffffh
804
805; 64-bit pCpum address.
806NAME(pCpumHC):
807FIXUP FIX_HC_64BIT_CPUM, 0
808dq 0ffffffffffffffffh
809
810 ;
811 ; When we arrive here we're at the host context
812 ; mapping of the switcher code.
813 ;
814ALIGNCODE(16)
815GLOBALNAME HCExitTarget
816 DEBUG_CHAR('9')
817
818 ; load final cr3
819 mov rsi, [rdx + CPUM.Host.cr3]
820 mov cr3, rsi
821 DEBUG_CHAR('@')
822
823 ;;
824 ;; Restore Host context.
825 ;;
826 ; Load CPUM pointer into edx
827 mov rdx, [NAME(pCpumHC) wrt rip]
828 ; activate host gdt and idt
829 lgdt [rdx + CPUM.Host.gdtr]
830 DEBUG_CHAR('0')
831 lidt [rdx + CPUM.Host.idtr]
832 DEBUG_CHAR('1')
833 ; Restore TSS selector; must mark it as not busy before using ltr (!)
834%if 1 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p)
835 movzx eax, word [rdx + CPUM.Host.tr] ; eax <- TR
836 and al, 0F8h ; mask away TI and RPL bits, get descriptor offset.
837 add rax, [rdx + CPUM.Host.gdtr + 2] ; eax <- GDTR.address + descriptor offset.
838 and dword [rax + 4], ~0200h ; clear busy flag (2nd type2 bit)
839 ltr word [rdx + CPUM.Host.tr]
840%else
841 movzx eax, word [rdx + CPUM.Host.tr] ; eax <- TR
842 and al, 0F8h ; mask away TI and RPL bits, get descriptor offset.
843 add rax, [rdx + CPUM.Host.gdtr + 2] ; eax <- GDTR.address + descriptor offset.
844 mov ecx, [rax + 4] ; ecx <- 2nd descriptor dword
845 mov ebx, ecx ; save orginal value
846 and ecx, ~0200h ; clear busy flag (2nd type2 bit)
847 mov [rax + 4], ccx ; not using xchg here is paranoia..
848 ltr word [rdx + CPUM.Host.tr]
849 xchg [rax + 4], ebx ; using xchg is paranoia too...
850%endif
851 ; activate ldt
852 DEBUG_CHAR('2')
853 lldt [rdx + CPUM.Host.ldtr]
854 ; Restore segment registers
855 mov eax, [rdx + CPUM.Host.ds]
856 mov ds, eax
857 mov eax, [rdx + CPUM.Host.es]
858 mov es, eax
859 mov eax, [rdx + CPUM.Host.fs]
860 mov fs, eax
861 mov eax, [rdx + CPUM.Host.gs]
862 mov gs, eax
863 ; restore stack
864 mov eax, [rdx + CPUM.Host.ss]
865 mov ss, eax
866 mov rsp, [rdx + CPUM.Host.rsp]
867
868 FIXUP FIX_NO_SYSENTER_JMP, 0, gth_sysenter_no - NAME(Start) ; this will insert a jmp gth_sysenter_no if host doesn't use sysenter.
869 ; restore MSR_IA32_SYSENTER_CS register.
870 mov ecx, MSR_IA32_SYSENTER_CS
871 mov eax, [rdx + CPUM.Host.SysEnter.cs]
872 mov ebx, [rdx + CPUM.Host.SysEnter.cs + 4]
873 mov rbx, rdx ; save/load edx
874 wrmsr ; MSR[ecx] <- edx:eax
875 mov rdx, rbx ; restore edx
876 jmp short gth_sysenter_no
877
878ALIGNCODE(16)
879gth_sysenter_no:
880
881 ;; @todo AMD syscall
882
883 ; Restore FPU if guest has used it.
884 ; Using fxrstor should ensure that we're not causing unwanted exception on the host.
885 mov esi, [rdx + CPUM.fUseFlags] ; esi == use flags.
886 test esi, CPUM_USED_FPU
887 jz short gth_fpu_no
888 mov rcx, cr0
889 and rcx, ~(X86_CR0_TS | X86_CR0_EM)
890 mov cr0, rcx
891
892 fxsave [rdx + CPUM.Guest.fpu]
893 fxrstor [rdx + CPUM.Host.fpu]
894 jmp short gth_fpu_no
895
896ALIGNCODE(16)
897gth_fpu_no:
898
899 ; Control registers.
900 ; Would've liked to have these highere up in case of crashes, but
901 ; the fpu stuff must be done before we restore cr0.
902 mov rcx, [rdx + CPUM.Host.cr4]
903 mov cr4, rcx
904 mov rcx, [rdx + CPUM.Host.cr0]
905 mov cr0, rcx
906 ;mov rcx, [rdx + CPUM.Host.cr2] ; assumes this is waste of time.
907 ;mov cr2, rcx
908
909 ; restore debug registers (if modified) (esi must still be fUseFlags!)
910 ; (must be done after cr4 reload because of the debug extension.)
911 test esi, CPUM_USE_DEBUG_REGS | CPUM_USE_DEBUG_REGS_HOST
912 jz short gth_debug_regs_no
913 jmp gth_debug_regs_restore
914gth_debug_regs_no:
915
916 ; Restore MSRs
917 mov rbx, rdx
918 mov ecx, MSR_K8_FS_BASE
919 mov eax, [rbx + CPUM.Host.FSbase]
920 mov edx, [rbx + CPUM.Host.FSbase + 4]
921 wrmsr
922 mov ecx, MSR_K8_GS_BASE
923 mov eax, [rbx + CPUM.Host.GSbase]
924 mov edx, [rbx + CPUM.Host.GSbase + 4]
925 wrmsr
926 mov ecx, MSR_K6_EFER
927 mov eax, [rbx + CPUM.Host.efer]
928 mov edx, [rbx + CPUM.Host.efer + 4]
929 wrmsr
930 mov rdx, rbx
931
932
933 ; restore general registers.
934 mov eax, edi ; restore return code. eax = return code !!
935 ; mov rax, [rdx + CPUM.Host.rax] - scratch + return code
936 mov rbx, [rdx + CPUM.Host.rbx]
937 ; mov rcx, [rdx + CPUM.Host.rcx] - scratch
938 ; mov rdx, [rdx + CPUM.Host.rdx] - scratch
939 mov rdi, [rdx + CPUM.Host.rdi]
940 mov rsi, [rdx + CPUM.Host.rsi]
941 mov rsp, [rdx + CPUM.Host.rsp]
942 mov rbp, [rdx + CPUM.Host.rbp]
943 ; mov r8, [rdx + CPUM.Host.r8 ] - scratch
944 ; mov r9, [rdx + CPUM.Host.r9 ] - scratch
945 mov r10, [rdx + CPUM.Host.r10]
946 mov r11, [rdx + CPUM.Host.r11]
947 mov r12, [rdx + CPUM.Host.r12]
948 mov r13, [rdx + CPUM.Host.r13]
949 mov r14, [rdx + CPUM.Host.r14]
950 mov r15, [rdx + CPUM.Host.r15]
951
952 ; finally restore flags. (probably not required)
953 push qword [rdx + CPUM.Host.rflags]
954 popf
955
956
957%ifdef DEBUG_STUFF
958 COM64_S_CHAR '4'
959%endif
960 db 048h
961 retf
962
963;;
964; Detour for restoring the host debug registers.
965; edx and edi must be preserved.
966gth_debug_regs_restore:
967 DEBUG_S_CHAR('d')
968 xor eax, eax
969 mov dr7, rax ; paranoia or not?
970 test esi, CPUM_USE_DEBUG_REGS
971 jz short gth_debug_regs_dr7
972 DEBUG_S_CHAR('r')
973 mov rax, [rdx + CPUM.Host.dr0]
974 mov dr0, rax
975 mov rbx, [rdx + CPUM.Host.dr1]
976 mov dr1, rbx
977 mov rcx, [rdx + CPUM.Host.dr2]
978 mov dr2, rcx
979 mov rax, [rdx + CPUM.Host.dr3]
980 mov dr3, rax
981gth_debug_regs_dr7:
982 mov rbx, [rdx + CPUM.Host.dr6]
983 mov dr6, rbx
984 mov rcx, [rdx + CPUM.Host.dr7]
985 mov dr7, rcx
986 jmp gth_debug_regs_no
987
988ENDPROC VMMGCGuestToHostAsm
989
990
991GLOBALNAME End
992;
993; The description string (in the text section).
994;
995NAME(Description):
996 db "AMD64 to/from PAE", 0
997
998extern NAME(Relocate)
999
1000;
1001; End the fixup records.
1002;
1003BEGINDATA
1004 db FIX_THE_END ; final entry.
1005GLOBALNAME FixupsEnd
1006
1007;;
1008; The switcher definition structure.
1009ALIGNDATA(16)
1010GLOBALNAME Def
1011 istruc VMMSWITCHERDEF
1012 at VMMSWITCHERDEF.pvCode, RTCCPTR_DEF NAME(Start)
1013 at VMMSWITCHERDEF.pvFixups, RTCCPTR_DEF NAME(Fixups)
1014 at VMMSWITCHERDEF.pszDesc, RTCCPTR_DEF NAME(Description)
1015 at VMMSWITCHERDEF.pfnRelocate, RTCCPTR_DEF NAME(Relocate)
1016 at VMMSWITCHERDEF.enmType, dd VMMSWITCHER_AMD64_TO_PAE
1017 at VMMSWITCHERDEF.cbCode, dd NAME(End) - NAME(Start)
1018 at VMMSWITCHERDEF.offR0HostToGuest, dd NAME(vmmR0HostToGuest) - NAME(Start)
1019 at VMMSWITCHERDEF.offGCGuestToHost, dd NAME(vmmGCGuestToHost) - NAME(Start)
1020 at VMMSWITCHERDEF.offGCCallTrampoline, dd NAME(vmmGCCallTrampoline) - NAME(Start)
1021 at VMMSWITCHERDEF.offGCGuestToHostAsm, dd NAME(VMMGCGuestToHostAsm) - NAME(Start)
1022 at VMMSWITCHERDEF.offGCGuestToHostAsmHyperCtx, dd NAME(VMMGCGuestToHostAsmHyperCtx)- NAME(Start)
1023 at VMMSWITCHERDEF.offGCGuestToHostAsmGuestCtx, dd NAME(VMMGCGuestToHostAsmGuestCtx)- NAME(Start)
1024 ; disasm help
1025 at VMMSWITCHERDEF.offHCCode0, dd 0
1026 at VMMSWITCHERDEF.cbHCCode0, dd NAME(IDEnterTarget) - NAME(Start)
1027 at VMMSWITCHERDEF.offHCCode1, dd NAME(HCExitTarget) - NAME(Start)
1028 at VMMSWITCHERDEF.cbHCCode1, dd NAME(End) - NAME(HCExitTarget)
1029 at VMMSWITCHERDEF.offIDCode0, dd NAME(IDEnterTarget) - NAME(Start)
1030 at VMMSWITCHERDEF.cbIDCode0, dd NAME(JmpGCTarget) - NAME(IDEnterTarget)
1031 at VMMSWITCHERDEF.offIDCode1, dd NAME(IDExitTarget) - NAME(Start)
1032 at VMMSWITCHERDEF.cbIDCode1, dd NAME(HCExitTarget) - NAME(IDExitTarget)
1033 at VMMSWITCHERDEF.offGCCode, dd NAME(JmpGCTarget) - NAME(Start)
1034 at VMMSWITCHERDEF.cbGCCode, dd NAME(IDExitTarget) - NAME(JmpGCTarget)
1035
1036 iend
1037
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette