VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMSwitcher/AMD64ToPAE.asm@ 1272

Last change on this file since 1272 was 955, checked in by vboxsync, 18 years ago

Always clear fs and gs!

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 30.4 KB
Line 
1; $Id: AMD64ToPAE.asm 955 2007-02-16 19:37:24Z vboxsync $
2;; @file
3; VMM - World Switchers, AMD64 to PAE.
4;
5
6;
7; Copyright (C) 2006 InnoTek Systemberatung GmbH
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.virtualbox.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License as published by the Free Software Foundation,
13; in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
14; distribution. VirtualBox OSE is distributed in the hope that it will
15; be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17; If you received this file as part of a commercial VirtualBox
18; distribution, then only the terms of your commercial VirtualBox
19; license agreement apply instead of the previous paragraph.
20;
21
22;%define DEBUG_STUFF 1
23%define STRICT_IF 1
24
25;*******************************************************************************
26;* Defined Constants And Macros *
27;*******************************************************************************
28;; Prefix all names.
29%define NAME_OVERLOAD(name) vmmR3SwitcherAMD64ToPAE_ %+ name
30
31
32;*******************************************************************************
33;* Header Files *
34;*******************************************************************************
35%include "VBox/asmdefs.mac"
36%include "VBox/x86.mac"
37%include "VBox/cpum.mac"
38%include "VBox/stam.mac"
39%include "VBox/vm.mac"
40%include "CPUMInternal.mac"
41%include "VMMSwitcher/VMMSwitcher.mac"
42
43
44;
45; Start the fixup records
46; We collect the fixups in the .data section as we go along
47; It is therefore VITAL that no-one is using the .data section
48; for anything else between 'Start' and 'End'.
49;
50BEGINDATA
51GLOBALNAME Fixups
52
53
54
55BEGINCODE
56GLOBALNAME Start
57
58BITS 64
59
60;;
61; The C interface.
62;
63; @param pVM GCC: rdi MSC:rcx The VM handle.
64;
65BEGINPROC vmmR0HostToGuest
66%ifdef DEBUG_STUFF
67 COM64_S_NEWLINE
68 COM64_S_CHAR '^'
69%endif
70
71%ifdef STRICT_IF
72 pushf
73 pop rax
74 test eax, X86_EFL_IF
75 jz .if_clear_in
76 mov eax, 0c0ffee00h
77 ret
78.if_clear_in:
79%endif
80
81 ;
82 ; make r9 = pVM and rdx = pCpum.
83 ; rax, rcx and r8 are scratch here after.
84%ifdef __WIN64__
85 mov r9, rcx
86%else
87 mov r9, rdi
88%endif
89 lea rdx, [r9 + VM.cpum]
90
91%ifdef VBOX_WITH_STATISTICS
92 ;
93 ; Switcher stats.
94 ;
95 lea r8, [r9 + VM.StatSwitcherToGC]
96 STAM64_PROFILE_ADV_START r8
97%endif
98
99 ;
100 ; Call worker (far return).
101 ;
102 mov eax, cs
103 push rax
104 call NAME(vmmR0HostToGuestAsm)
105
106%ifdef VBOX_WITH_STATISTICS
107 ;
108 ; Switcher stats.
109 ;
110 lea r8, [r9 + VM.StatSwitcherToGC]
111 STAM64_PROFILE_ADV_STOP r8
112%endif
113
114 ret
115ENDPROC vmmR0HostToGuest
116
117
118
119; *****************************************************************************
120; vmmR0HostToGuestAsm
121;
122; Phase one of the switch from host to guest context (host MMU context)
123;
124; INPUT:
125; - edx virtual address of CPUM structure (valid in host context)
126;
127; USES/DESTROYS:
128; - eax, ecx, edx
129;
130; ASSUMPTION:
131; - current CS and DS selectors are wide open
132;
133; *****************************************************************************
134ALIGNCODE(16)
135BEGINPROC vmmR0HostToGuestAsm
136 ;;
137 ;; Save CPU host context
138 ;; Skip eax, edx and ecx as these are not preserved over calls.
139 ;;
140 ; general registers.
141 ; mov [rdx + CPUM.Host.rax], rax - scratch
142 mov [rdx + CPUM.Host.rbx], rbx
143 ; mov [rdx + CPUM.Host.rcx], rcx - scratch
144 ; mov [rdx + CPUM.Host.rdx], rdx - scratch
145 mov [rdx + CPUM.Host.rdi], rdi
146 mov [rdx + CPUM.Host.rsi], rsi
147 mov [rdx + CPUM.Host.rsp], rsp
148 mov [rdx + CPUM.Host.rbp], rbp
149 ; mov [rdx + CPUM.Host.r8 ], r8 - scratch
150 ; mov [rdx + CPUM.Host.r9 ], r9 - scratch
151 mov [rdx + CPUM.Host.r10], r10
152 mov [rdx + CPUM.Host.r11], r11
153 mov [rdx + CPUM.Host.r12], r12
154 mov [rdx + CPUM.Host.r13], r13
155 mov [rdx + CPUM.Host.r14], r14
156 mov [rdx + CPUM.Host.r15], r15
157 ; selectors.
158 mov [rdx + CPUM.Host.ds], ds
159 mov [rdx + CPUM.Host.es], es
160 mov [rdx + CPUM.Host.fs], fs
161 mov [rdx + CPUM.Host.gs], gs
162 mov [rdx + CPUM.Host.ss], ss
163 ; MSRs
164 mov rbx, rdx
165 mov ecx, MSR_K8_FS_BASE
166 rdmsr
167 mov [rbx + CPUM.Host.FSbase], eax
168 mov [rbx + CPUM.Host.FSbase + 4], edx
169 mov ecx, MSR_K8_GS_BASE
170 rdmsr
171 mov [rbx + CPUM.Host.GSbase], eax
172 mov [rbx + CPUM.Host.GSbase + 4], edx
173 mov ecx, MSR_K6_EFER
174 rdmsr
175 mov [rbx + CPUM.Host.efer], eax
176 mov [rbx + CPUM.Host.efer + 4], edx
177 mov ecx, MSR_K6_EFER
178 mov rdx, rbx
179 ; special registers.
180 sldt [rdx + CPUM.Host.ldtr]
181 sidt [rdx + CPUM.Host.idtr]
182 sgdt [rdx + CPUM.Host.gdtr]
183 str [rdx + CPUM.Host.tr] ; yasm BUG, generates sldt. YASMCHECK!
184 ; flags
185 pushf
186 pop qword [rdx + CPUM.Host.rflags]
187
188 FIXUP FIX_NO_SYSENTER_JMP, 0, htg_no_sysenter - NAME(Start) ; this will insert a jmp htg_no_sysenter if host doesn't use sysenter.
189 ; save MSR_IA32_SYSENTER_CS register.
190 mov ecx, MSR_IA32_SYSENTER_CS
191 mov rbx, rdx ; save edx
192 rdmsr ; edx:eax <- MSR[ecx]
193 mov [rbx + CPUM.Host.SysEnter.cs], rax
194 mov [rbx + CPUM.Host.SysEnter.cs + 4], rdx
195 xor rax, rax ; load 0:0 to cause #GP upon sysenter
196 xor rdx, rdx
197 wrmsr
198 mov rdx, rbx ; restore edx
199 jmp short htg_no_sysenter
200
201ALIGNCODE(16)
202htg_no_sysenter:
203
204 ;; handle use flags.
205 mov esi, [rdx + CPUM.fUseFlags] ; esi == use flags.
206 and esi, ~CPUM_USED_FPU ; Clear CPUM_USED_* flags. ;;@todo FPU check can be optimized to use cr0 flags!
207 mov [rdx + CPUM.fUseFlags], esi
208
209 ; debug registers.
210 test esi, CPUM_USE_DEBUG_REGS | CPUM_USE_DEBUG_REGS_HOST
211 jz htg_debug_regs_no
212 jmp htg_debug_regs_save
213htg_debug_regs_no:
214 DEBUG_CHAR('a') ; trashes esi
215
216 ; control registers.
217 mov rax, cr0
218 mov [rdx + CPUM.Host.cr0], rax
219 ;mov rax, cr2 ; assume host os don't suff things in cr2. (safe)
220 ;mov [rdx + CPUM.Host.cr2], rax
221 mov rax, cr3
222 mov [rdx + CPUM.Host.cr3], rax
223 mov rax, cr4
224 mov [rdx + CPUM.Host.cr4], rax
225
226 ;;
227 ;; Start switching to VMM context.
228 ;;
229
230 ;
231 ; Change CR0 and CR4 so we can correctly emulate FPU/MMX/SSE[23] exceptions
232 ; Also disable WP. (eax==cr4 now)
233 ; Note! X86_CR4_PSE and X86_CR4_PAE are important if the host thinks so :-)
234 ;
235 and rax, X86_CR4_MCE | X86_CR4_PSE | X86_CR4_PAE
236 mov ecx, [rdx + CPUM.Guest.cr4]
237 DEBUG_CHAR('b') ; trashes esi
238 ;; @todo Switcher cleanup: Determin base CR4 during CPUMR0Init / VMMR3SelectSwitcher putting it
239 ; in CPUM.Hyper.cr4 (which isn't currently being used). That should
240 ; simplify this operation a bit (and improve locality of the data).
241
242 ;
243 ; CR4.AndMask and CR4.OrMask are set in CPUMR3Init based on the presence of
244 ; FXSAVE support on the host CPU
245 ;
246 and ecx, [rdx + CPUM.CR4.AndMask]
247 or eax, ecx
248 or eax, [rdx + CPUM.CR4.OrMask]
249 mov cr4, rax
250 DEBUG_CHAR('c') ; trashes esi
251
252 mov eax, [rdx + CPUM.Guest.cr0]
253 and eax, X86_CR0_EM
254 or eax, X86_CR0_PE | X86_CR0_PG | X86_CR0_TS | X86_CR0_ET | X86_CR0_NE | X86_CR0_MP
255 mov cr0, rax
256 DEBUG_CHAR('0') ; trashes esi
257
258
259 ; Load new gdt so we can do far jump to guest code after cr3 reload.
260 lgdt [rdx + CPUM.Hyper.gdtr]
261 DEBUG_CHAR('1') ; trashes esi
262
263 ;;
264 ;; Load Intermediate memory context.
265 ;;
266 FIXUP FIX_INTER_AMD64_CR3, 1
267 mov eax, 0ffffffffh
268 mov cr3, rax
269 DEBUG_CHAR('2') ; trashes esi
270
271 ;;
272 ;; 1. Switch to compatibility mode, placing ourselves in identity mapped code.
273 ;;
274 jmp far [NAME(fpIDEnterTarget) wrt rip]
275
276; 16:32 Pointer to IDEnterTarget.
277NAME(fpIDEnterTarget):
278 FIXUP FIX_ID_32BIT, 0, NAME(IDEnterTarget) - NAME(Start)
279dd 0
280 FIXUP FIX_HYPER_CS, 0
281dd 0
282
283
284;;
285; Detour for saving the host DR7 and DR6.
286; esi and rdx must be preserved.
287htg_debug_regs_save:
288DEBUG_S_CHAR('s');
289 mov rax, dr7 ; not sure, but if I read the docs right this will trap if GD is set. FIXME!!!
290 mov [rdx + CPUM.Host.dr7], rax
291 xor eax, eax ; clear everything. (bit 12? is read as 1...)
292 mov dr7, rax
293 mov rax, dr6 ; just in case we save the state register too.
294 mov [rdx + CPUM.Host.dr6], rax
295 ; save host DR0-3?
296 test esi, CPUM_USE_DEBUG_REGS
297 jz near htg_debug_regs_no
298DEBUG_S_CHAR('S');
299 mov rax, dr0
300 mov [rdx + CPUM.Host.dr0], rax
301 mov rbx, dr1
302 mov [rdx + CPUM.Host.dr1], rbx
303 mov rcx, dr2
304 mov [rdx + CPUM.Host.dr2], rcx
305 mov rax, dr3
306 mov [rdx + CPUM.Host.dr3], rax
307 jmp htg_debug_regs_no
308
309
310 ; We're now on an identity mapped pages! in 32-bit compatability mode.
311BITS 32
312ALIGNCODE(16)
313GLOBALNAME IDEnterTarget
314 DEBUG_CHAR('3')
315
316 ; 2. Deactivate long mode by turning off paging.
317 mov ebx, cr0
318 and ebx, ~X86_CR0_PG
319 mov cr0, ebx
320 DEBUG_CHAR('4')
321
322 ; 3. Load 32-bit intermediate page table.
323 FIXUP FIX_INTER_PAE_CR3, 1
324 mov edx, 0ffffffffh
325 mov cr3, edx
326
327 ; 4. Disable long mode.
328 ; We also use the chance to disable syscall/sysret and fast fxsave/fxrstor.
329 mov ecx, MSR_K6_EFER
330 rdmsr
331 DEBUG_CHAR('5')
332 and eax, ~(MSR_K6_EFER_LME | MSR_K6_EFER_SCE | MSR_K6_EFER_FFXSR)
333 wrmsr
334 DEBUG_CHAR('6')
335
336 ; 5. Enable paging.
337 or ebx, X86_CR0_PG
338 mov cr0, ebx
339 jmp short just_a_jump
340just_a_jump:
341 DEBUG_CHAR('7')
342
343 ;;
344 ;; 6. Jump to guest code mapping of the code and load the Hypervisor CS.
345 ;;
346 FIXUP FIX_ID_2_GC_NEAR_REL, 1, NAME(JmpGCTarget) - NAME(Start)
347 jmp near NAME(JmpGCTarget)
348
349
350 ;;
351 ;; When we arrive at this label we're at the
352 ;; guest code mapping of the switching code.
353 ;;
354ALIGNCODE(16)
355GLOBALNAME JmpGCTarget
356 DEBUG_CHAR('-')
357 ; load final cr3 and do far jump to load cs.
358 FIXUP FIX_HYPER_PAE_CR3, 1
359 mov eax, 0ffffffffh
360 mov cr3, eax
361 DEBUG_CHAR('0')
362
363 ;;
364 ;; We're in VMM MMU context and VMM CS is loaded.
365 ;; Setup the rest of the VMM state.
366 ;;
367 ; Load selectors
368 DEBUG_CHAR('1')
369 FIXUP FIX_HYPER_DS, 1
370 mov eax, 0ffffh
371 mov ds, eax
372 mov es, eax
373 xor eax, eax
374 mov gs, eax
375 mov fs, eax
376 ; Load pCpum into EDX
377 FIXUP FIX_GC_CPUM_OFF, 1, 0
378 mov edx, 0ffffffffh
379 ; Activate guest IDT
380 DEBUG_CHAR('2')
381 lidt [edx + CPUM.Hyper.idtr]
382
383 ; Setup stack
384 DEBUG_CHAR('3')
385 lss esp, [edx + CPUM.Hyper.esp]
386
387 ; Restore TSS selector; must mark it as not busy before using ltr (!)
388 DEBUG_CHAR('4')
389 FIXUP FIX_GC_TSS_GDTE_DW2, 2
390 and dword [0ffffffffh], ~0200h ; clear busy flag (2nd type2 bit)
391 DEBUG_CHAR('5')
392 ltr word [edx + CPUM.Hyper.tr]
393 DEBUG_CHAR('6')
394
395 ; Activate the ldt (now we can safely crash).
396 lldt [edx + CPUM.Hyper.ldtr]
397 DEBUG_CHAR('7')
398
399 ;; use flags.
400 mov esi, [edx + CPUM.fUseFlags]
401
402 ; debug registers
403 test esi, CPUM_USE_DEBUG_REGS
404 jz htg_debug_regs_guest_no
405 jmp htg_debug_regs_guest
406htg_debug_regs_guest_no:
407 DEBUG_CHAR('9')
408
409 ; General registers.
410 mov ebx, [edx + CPUM.Hyper.ebx]
411 mov ebp, [edx + CPUM.Hyper.ebp]
412 mov esi, [edx + CPUM.Hyper.esi]
413 mov edi, [edx + CPUM.Hyper.edi]
414 push dword [edx + CPUM.Hyper.eflags]
415 popfd
416 DEBUG_CHAR('!')
417
418 ;;
419 ;; Return to the VMM code which either called the switcher or
420 ;; the code set up to run by HC.
421 ;;
422%ifdef DEBUG_STUFF
423 COM32_S_PRINT ';eip='
424 mov eax, [edx + CPUM.Hyper.eip]
425 COM32_S_DWORD_REG eax
426 COM32_S_CHAR ';'
427%endif
428 mov eax, [edx + CPUM.Hyper.eip]
429%ifdef VBOX_WITH_STATISTICS
430 FIXUP FIX_GC_VM_OFF, 1, VM.StatSwitcherToGC
431 mov edx, 0ffffffffh
432 STAM32_PROFILE_ADV_STOP edx
433 FIXUP FIX_GC_CPUM_OFF, 1, 0
434 mov edx, 0ffffffffh
435%endif
436 jmp eax
437
438;;
439; Detour for saving host DR0-3 and loading hypervisor debug registers.
440; esi and edx must be preserved.
441htg_debug_regs_guest:
442 DEBUG_S_CHAR('D')
443 DEBUG_S_CHAR('R')
444 DEBUG_S_CHAR('x')
445 ; load hyper DR0-7
446 mov ebx, [edx + CPUM.Hyper.dr0]
447 mov dr0, ebx
448 mov ecx, [edx + CPUM.Hyper.dr1]
449 mov dr1, ecx
450 mov eax, [edx + CPUM.Hyper.dr2]
451 mov dr2, eax
452 mov ebx, [edx + CPUM.Hyper.dr3]
453 mov dr3, ebx
454 ;mov eax, [edx + CPUM.Hyper.dr6]
455 mov ecx, 0ffff0ff0h
456 mov dr6, ecx
457 mov eax, [edx + CPUM.Hyper.dr7]
458 mov dr7, eax
459 jmp htg_debug_regs_guest_no
460
461ENDPROC vmmR0HostToGuestAsm
462
463
464;;
465; Trampoline for doing a call when starting the hyper visor execution.
466;
467; Push any arguments to the routine.
468; Push the argument frame size (cArg * 4).
469; Push the call target (_cdecl convention).
470; Push the address of this routine.
471;
472;
473ALIGNCODE(16)
474BEGINPROC vmmGCCallTrampoline
475%ifdef DEBUG_STUFF
476 COM32_S_CHAR 'c'
477 COM32_S_CHAR 't'
478 COM32_S_CHAR '!'
479%endif
480
481 ; call routine
482 pop eax ; call address
483 mov esi, edx ; save edx
484 pop edi ; argument count.
485%ifdef DEBUG_STUFF
486 COM32_S_PRINT ';eax='
487 COM32_S_DWORD_REG eax
488 COM32_S_CHAR ';'
489%endif
490 call eax ; do call
491 add esp, edi ; cleanup stack
492
493 ; return to the host context.
494 push byte 0 ; eip
495 mov edx, esi ; CPUM pointer
496
497%ifdef DEBUG_STUFF
498 COM32_S_CHAR '`'
499%endif
500 jmp NAME(VMMGCGuestToHostAsm) ; eax = returncode.
501ENDPROC vmmGCCallTrampoline
502
503
504
505;;
506; The C interface.
507;
508ALIGNCODE(16)
509BEGINPROC vmmGCGuestToHost
510%ifdef DEBUG_STUFF
511 push esi
512 COM_NEWLINE
513 DEBUG_CHAR('b')
514 DEBUG_CHAR('a')
515 DEBUG_CHAR('c')
516 DEBUG_CHAR('k')
517 DEBUG_CHAR('!')
518 COM_NEWLINE
519 pop esi
520%endif
521 mov eax, [esp + 4]
522 jmp NAME(VMMGCGuestToHostAsm)
523ENDPROC vmmGCGuestToHost
524
525
526;;
527; VMMGCGuestToHostAsmGuestCtx
528;
529; Switches from Guest Context to Host Context.
530; Of course it's only called from within the GC.
531;
532; @param eax Return code.
533; @param esp + 4 Pointer to CPUMCTXCORE.
534;
535; @remark ASSUMES interrupts disabled.
536;
537ALIGNCODE(16)
538BEGINPROC VMMGCGuestToHostAsmGuestCtx
539 DEBUG_CHAR('~')
540
541%ifdef VBOX_WITH_STATISTICS
542 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalInGC
543 mov edx, 0ffffffffh
544 STAM32_PROFILE_ADV_STOP edx
545
546 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalGCToQemu
547 mov edx, 0ffffffffh
548 STAM32_PROFILE_ADV_START edx
549
550 FIXUP FIX_GC_VM_OFF, 1, VM.StatSwitcherToHC
551 mov edx, 0ffffffffh
552 STAM32_PROFILE_ADV_START edx
553%endif
554
555 ;
556 ; Load the CPUM pointer.
557 ;
558 FIXUP FIX_GC_CPUM_OFF, 1, 0
559 mov edx, 0ffffffffh
560
561 ; Skip return address (assumes called!)
562 lea esp, [esp + 4]
563
564 ;
565 ; Guest Context (assumes CPUMCTXCORE layout).
566 ;
567 ; general purpose registers (layout is pushad)
568 pop dword [edx + CPUM.Guest.edi]
569 pop dword [edx + CPUM.Guest.esi]
570 pop dword [edx + CPUM.Guest.ebp]
571 pop dword [edx + CPUM.Guest.eax]
572 pop dword [edx + CPUM.Guest.ebx]
573 pop dword [edx + CPUM.Guest.edx]
574 pop dword [edx + CPUM.Guest.ecx]
575 pop dword [edx + CPUM.Guest.esp]
576 pop dword [edx + CPUM.Guest.ss]
577 pop dword [edx + CPUM.Guest.gs]
578 pop dword [edx + CPUM.Guest.fs]
579 pop dword [edx + CPUM.Guest.es]
580 pop dword [edx + CPUM.Guest.ds]
581 pop dword [edx + CPUM.Guest.cs]
582 ; flags
583 pop dword [edx + CPUM.Guest.eflags]
584 ; eip
585 pop dword [edx + CPUM.Guest.eip]
586 jmp vmmGCGuestToHostAsm_EIPDone
587ENDPROC VMMGCGuestToHostAsmGuestCtx
588
589
590;;
591; VMMGCGuestToHostAsmHyperCtx
592;
593; This is an alternative entry point which we'll be using
594; when the we have the hypervisor context and need to save
595; that before going to the host.
596;
597; This is typically useful when abandoning the hypervisor
598; because of a trap and want the trap state to be saved.
599;
600; @param eax Return code.
601; @param ecx Points to CPUMCTXCORE.
602; @uses eax,edx,ecx
603ALIGNCODE(16)
604BEGINPROC VMMGCGuestToHostAsmHyperCtx
605 DEBUG_CHAR('#')
606
607%ifdef VBOX_WITH_STATISTICS
608 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalInGC
609 mov edx, 0ffffffffh
610 STAM32_PROFILE_ADV_STOP edx
611
612 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalGCToQemu
613 mov edx, 0ffffffffh
614 STAM32_PROFILE_ADV_START edx
615
616 FIXUP FIX_GC_VM_OFF, 1, VM.StatSwitcherToHC
617 mov edx, 0ffffffffh
618 STAM32_PROFILE_ADV_START edx
619%endif
620
621 ;
622 ; Load the CPUM pointer.
623 ;
624 FIXUP FIX_GC_CPUM_OFF, 1, 0
625 mov edx, 0ffffffffh
626
627 push eax ; save return code.
628 ; general purpose registers
629 mov eax, [ecx + CPUMCTXCORE.edi]
630 mov [edx + CPUM.Hyper.edi], eax
631 mov eax, [ecx + CPUMCTXCORE.esi]
632 mov [edx + CPUM.Hyper.esi], eax
633 mov eax, [ecx + CPUMCTXCORE.ebp]
634 mov [edx + CPUM.Hyper.ebp], eax
635 mov eax, [ecx + CPUMCTXCORE.eax]
636 mov [edx + CPUM.Hyper.eax], eax
637 mov eax, [ecx + CPUMCTXCORE.ebx]
638 mov [edx + CPUM.Hyper.ebx], eax
639 mov eax, [ecx + CPUMCTXCORE.edx]
640 mov [edx + CPUM.Hyper.edx], eax
641 mov eax, [ecx + CPUMCTXCORE.ecx]
642 mov [edx + CPUM.Hyper.ecx], eax
643 mov eax, [ecx + CPUMCTXCORE.esp]
644 mov [edx + CPUM.Hyper.esp], eax
645 ; selectors
646 mov eax, [ecx + CPUMCTXCORE.ss]
647 mov [edx + CPUM.Hyper.ss], eax
648 mov eax, [ecx + CPUMCTXCORE.gs]
649 mov [edx + CPUM.Hyper.gs], eax
650 mov eax, [ecx + CPUMCTXCORE.fs]
651 mov [edx + CPUM.Hyper.fs], eax
652 mov eax, [ecx + CPUMCTXCORE.es]
653 mov [edx + CPUM.Hyper.es], eax
654 mov eax, [ecx + CPUMCTXCORE.ds]
655 mov [edx + CPUM.Hyper.ds], eax
656 mov eax, [ecx + CPUMCTXCORE.cs]
657 mov [edx + CPUM.Hyper.cs], eax
658 ; flags
659 mov eax, [ecx + CPUMCTXCORE.eflags]
660 mov [edx + CPUM.Hyper.eflags], eax
661 ; eip
662 mov eax, [ecx + CPUMCTXCORE.eip]
663 mov [edx + CPUM.Hyper.eip], eax
664 ; jump to common worker code.
665 pop eax ; restore return code.
666 jmp vmmGCGuestToHostAsm_SkipHyperRegs
667
668ENDPROC VMMGCGuestToHostAsmHyperCtx
669
670
671;;
672; VMMGCGuestToHostAsm
673;
674; This is an alternative entry point which we'll be using
675; when the we have saved the guest state already or we haven't
676; been messing with the guest at all.
677;
678; @param eax Return code.
679; @uses eax, edx, ecx (or it may use them in the future)
680;
681ALIGNCODE(16)
682BEGINPROC VMMGCGuestToHostAsm
683 DEBUG_CHAR('%')
684
685%ifdef VBOX_WITH_STATISTICS
686 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalInGC
687 mov edx, 0ffffffffh
688 STAM32_PROFILE_ADV_STOP edx
689
690 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalGCToQemu
691 mov edx, 0ffffffffh
692 STAM32_PROFILE_ADV_START edx
693
694 FIXUP FIX_GC_VM_OFF, 1, VM.StatSwitcherToHC
695 mov edx, 0ffffffffh
696 STAM32_PROFILE_ADV_START edx
697%endif
698
699 ;
700 ; Load the CPUM pointer.
701 ;
702 FIXUP FIX_GC_CPUM_OFF, 1, 0
703 mov edx, 0ffffffffh
704
705 pop dword [edx + CPUM.Hyper.eip] ; call return from stack
706 jmp short vmmGCGuestToHostAsm_EIPDone
707
708ALIGNCODE(16)
709vmmGCGuestToHostAsm_EIPDone:
710 ; general registers which we care about.
711 mov dword [edx + CPUM.Hyper.ebx], ebx
712 mov dword [edx + CPUM.Hyper.esi], esi
713 mov dword [edx + CPUM.Hyper.edi], edi
714 mov dword [edx + CPUM.Hyper.ebp], ebp
715 mov dword [edx + CPUM.Hyper.esp], esp
716
717 ; special registers which may change.
718vmmGCGuestToHostAsm_SkipHyperRegs:
719%ifdef STRICT_IF
720 pushf
721 pop ecx
722 test ecx, X86_EFL_IF
723 jz .if_clear_out
724 mov eax, 0c0ffee01h
725 cli
726.if_clear_out:
727%endif
728 ; str [edx + CPUM.Hyper.tr] - double fault only, and it won't be right then either.
729 sldt [edx + CPUM.Hyper.ldtr]
730
731 ; No need to save CRx here. They are set dynamically according to Guest/Host requirements.
732 ; FPU context is saved before restore of host saving (another) branch.
733
734
735 ;;
736 ;; Load Intermediate memory context.
737 ;;
738 mov edi, eax ; save return code in EDI (careful with COM_DWORD_REG from here on!)
739 FIXUP FIX_INTER_PAE_CR3, 1
740 mov eax, 0ffffffffh
741 mov cr3, eax
742 DEBUG_CHAR('?')
743
744 ;; We're now in intermediate memory context!
745
746 ;;
747 ;; 0. Jump to identity mapped location
748 ;;
749 FIXUP FIX_GC_2_ID_NEAR_REL, 1, NAME(IDExitTarget) - NAME(Start)
750 jmp near NAME(IDExitTarget)
751
752 ; We're now on identity mapped pages!
753ALIGNCODE(16)
754GLOBALNAME IDExitTarget
755 DEBUG_CHAR('1')
756
757 ; 1. Disable paging.
758 mov ebx, cr0
759 and ebx, ~X86_CR0_PG
760 mov cr0, ebx
761 DEBUG_CHAR('2')
762
763 ; 2. Enable PAE - already enabled.
764
765 ; 3. Load long mode intermediate CR3.
766 FIXUP FIX_INTER_AMD64_CR3, 1
767 mov ecx, 0ffffffffh
768 mov cr3, ecx
769 DEBUG_CHAR('3')
770
771 ; 4. Enable long mode.
772 mov ebp, edx
773 mov ecx, MSR_K6_EFER
774 rdmsr
775 or eax, MSR_K6_EFER_LME
776 wrmsr
777 mov edx, ebp
778 DEBUG_CHAR('4')
779
780 ; 5. Enable paging.
781 or ebx, X86_CR0_PG
782 mov cr0, ebx
783 DEBUG_CHAR('5')
784
785 ; Jump from compatability mode to 64-bit mode.
786 FIXUP FIX_ID_FAR32_TO_64BIT_MODE, 1, NAME(IDExit64Mode) - NAME(Start)
787 jmp 0ffffh:0fffffffeh
788
789 ;
790 ; We're in 64-bit mode (ds, ss, es, fs, gs are all bogus).
791 ; Move on to the HC mapping.
792 ;
793BITS 64
794ALIGNCODE(16)
795NAME(IDExit64Mode):
796 DEBUG_CHAR('6')
797 jmp [NAME(pHCExitTarget) wrt rip]
798
799; 64-bit jump target
800NAME(pHCExitTarget):
801FIXUP FIX_HC_64BIT, 0, NAME(HCExitTarget) - NAME(Start)
802dq 0ffffffffffffffffh
803
804; 64-bit pCpum address.
805NAME(pCpumHC):
806FIXUP FIX_HC_64BIT_CPUM, 0
807dq 0ffffffffffffffffh
808
809 ;
810 ; When we arrive here we're at the host context
811 ; mapping of the switcher code.
812 ;
813ALIGNCODE(16)
814GLOBALNAME HCExitTarget
815 DEBUG_CHAR('9')
816
817 ; load final cr3
818 mov rsi, [rdx + CPUM.Host.cr3]
819 mov cr3, rsi
820 DEBUG_CHAR('@')
821
822 ;;
823 ;; Restore Host context.
824 ;;
825 ; Load CPUM pointer into edx
826 mov rdx, [NAME(pCpumHC) wrt rip]
827 ; activate host gdt and idt
828 lgdt [rdx + CPUM.Host.gdtr]
829 DEBUG_CHAR('0')
830 lidt [rdx + CPUM.Host.idtr]
831 DEBUG_CHAR('1')
832 ; Restore TSS selector; must mark it as not busy before using ltr (!)
833%if 1 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p)
834 movzx eax, word [rdx + CPUM.Host.tr] ; eax <- TR
835 and al, 0F8h ; mask away TI and RPL bits, get descriptor offset.
836 add rax, [rdx + CPUM.Host.gdtr + 2] ; eax <- GDTR.address + descriptor offset.
837 and dword [rax + 4], ~0200h ; clear busy flag (2nd type2 bit)
838 ltr word [rdx + CPUM.Host.tr]
839%else
840 movzx eax, word [rdx + CPUM.Host.tr] ; eax <- TR
841 and al, 0F8h ; mask away TI and RPL bits, get descriptor offset.
842 add rax, [rdx + CPUM.Host.gdtr + 2] ; eax <- GDTR.address + descriptor offset.
843 mov ecx, [rax + 4] ; ecx <- 2nd descriptor dword
844 mov ebx, ecx ; save orginal value
845 and ecx, ~0200h ; clear busy flag (2nd type2 bit)
846 mov [rax + 4], ccx ; not using xchg here is paranoia..
847 ltr word [rdx + CPUM.Host.tr]
848 xchg [rax + 4], ebx ; using xchg is paranoia too...
849%endif
850 ; activate ldt
851 DEBUG_CHAR('2')
852 lldt [rdx + CPUM.Host.ldtr]
853 ; Restore segment registers
854 mov eax, [rdx + CPUM.Host.ds]
855 mov ds, eax
856 mov eax, [rdx + CPUM.Host.es]
857 mov es, eax
858 mov eax, [rdx + CPUM.Host.fs]
859 mov fs, eax
860 mov eax, [rdx + CPUM.Host.gs]
861 mov gs, eax
862 ; restore stack
863 mov eax, [rdx + CPUM.Host.ss]
864 mov ss, eax
865 mov rsp, [rdx + CPUM.Host.rsp]
866
867 FIXUP FIX_NO_SYSENTER_JMP, 0, gth_sysenter_no - NAME(Start) ; this will insert a jmp gth_sysenter_no if host doesn't use sysenter.
868 ; restore MSR_IA32_SYSENTER_CS register.
869 mov ecx, MSR_IA32_SYSENTER_CS
870 mov eax, [rdx + CPUM.Host.SysEnter.cs]
871 mov ebx, [rdx + CPUM.Host.SysEnter.cs + 4]
872 mov rbx, rdx ; save/load edx
873 wrmsr ; MSR[ecx] <- edx:eax
874 mov rdx, rbx ; restore edx
875 jmp short gth_sysenter_no
876
877ALIGNCODE(16)
878gth_sysenter_no:
879
880 ;; @todo AMD syscall
881
882 ; Restore FPU if guest has used it.
883 ; Using fxrstor should ensure that we're not causing unwanted exception on the host.
884 mov esi, [rdx + CPUM.fUseFlags] ; esi == use flags.
885 test esi, CPUM_USED_FPU
886 jz short gth_fpu_no
887 mov rcx, cr0
888 and rcx, ~(X86_CR0_TS | X86_CR0_EM)
889 mov cr0, rcx
890
891 fxsave [rdx + CPUM.Guest.fpu]
892 fxrstor [rdx + CPUM.Host.fpu]
893 jmp short gth_fpu_no
894
895ALIGNCODE(16)
896gth_fpu_no:
897
898 ; Control registers.
899 ; Would've liked to have these highere up in case of crashes, but
900 ; the fpu stuff must be done before we restore cr0.
901 mov rcx, [rdx + CPUM.Host.cr4]
902 mov cr4, rcx
903 mov rcx, [rdx + CPUM.Host.cr0]
904 mov cr0, rcx
905 ;mov rcx, [rdx + CPUM.Host.cr2] ; assumes this is waste of time.
906 ;mov cr2, rcx
907
908 ; restore debug registers (if modified) (esi must still be fUseFlags!)
909 ; (must be done after cr4 reload because of the debug extension.)
910 test esi, CPUM_USE_DEBUG_REGS | CPUM_USE_DEBUG_REGS_HOST
911 jz short gth_debug_regs_no
912 jmp gth_debug_regs_restore
913gth_debug_regs_no:
914
915 ; Restore MSRs
916 mov rbx, rdx
917 mov ecx, MSR_K8_FS_BASE
918 mov eax, [rbx + CPUM.Host.FSbase]
919 mov edx, [rbx + CPUM.Host.FSbase + 4]
920 wrmsr
921 mov ecx, MSR_K8_GS_BASE
922 mov eax, [rbx + CPUM.Host.GSbase]
923 mov edx, [rbx + CPUM.Host.GSbase + 4]
924 wrmsr
925 mov ecx, MSR_K6_EFER
926 mov eax, [rbx + CPUM.Host.efer]
927 mov edx, [rbx + CPUM.Host.efer + 4]
928 wrmsr
929 mov rdx, rbx
930
931
932 ; restore general registers.
933 mov eax, edi ; restore return code. eax = return code !!
934 ; mov rax, [rdx + CPUM.Host.rax] - scratch + return code
935 mov rbx, [rdx + CPUM.Host.rbx]
936 ; mov rcx, [rdx + CPUM.Host.rcx] - scratch
937 ; mov rdx, [rdx + CPUM.Host.rdx] - scratch
938 mov rdi, [rdx + CPUM.Host.rdi]
939 mov rsi, [rdx + CPUM.Host.rsi]
940 mov rsp, [rdx + CPUM.Host.rsp]
941 mov rbp, [rdx + CPUM.Host.rbp]
942 ; mov r8, [rdx + CPUM.Host.r8 ] - scratch
943 ; mov r9, [rdx + CPUM.Host.r9 ] - scratch
944 mov r10, [rdx + CPUM.Host.r10]
945 mov r11, [rdx + CPUM.Host.r11]
946 mov r12, [rdx + CPUM.Host.r12]
947 mov r13, [rdx + CPUM.Host.r13]
948 mov r14, [rdx + CPUM.Host.r14]
949 mov r15, [rdx + CPUM.Host.r15]
950
951 ; finally restore flags. (probably not required)
952 push qword [rdx + CPUM.Host.rflags]
953 popf
954
955
956%ifdef DEBUG_STUFF
957 COM64_S_CHAR '4'
958%endif
959 db 048h
960 retf
961
962;;
963; Detour for restoring the host debug registers.
964; edx and edi must be preserved.
965gth_debug_regs_restore:
966 DEBUG_S_CHAR('d')
967 xor eax, eax
968 mov dr7, rax ; paranoia or not?
969 test esi, CPUM_USE_DEBUG_REGS
970 jz short gth_debug_regs_dr7
971 DEBUG_S_CHAR('r')
972 mov rax, [rdx + CPUM.Host.dr0]
973 mov dr0, rax
974 mov rbx, [rdx + CPUM.Host.dr1]
975 mov dr1, rbx
976 mov rcx, [rdx + CPUM.Host.dr2]
977 mov dr2, rcx
978 mov rax, [rdx + CPUM.Host.dr3]
979 mov dr3, rax
980gth_debug_regs_dr7:
981 mov rbx, [rdx + CPUM.Host.dr6]
982 mov dr6, rbx
983 mov rcx, [rdx + CPUM.Host.dr7]
984 mov dr7, rcx
985 jmp gth_debug_regs_no
986
987ENDPROC VMMGCGuestToHostAsm
988
989
990GLOBALNAME End
991;
992; The description string (in the text section).
993;
994NAME(Description):
995 db "AMD64 to/from PAE", 0
996
997extern NAME(Relocate)
998
999;
1000; End the fixup records.
1001;
1002BEGINDATA
1003 db FIX_THE_END ; final entry.
1004GLOBALNAME FixupsEnd
1005
1006;;
1007; The switcher definition structure.
1008ALIGNDATA(16)
1009GLOBALNAME Def
1010 istruc VMMSWITCHERDEF
1011 at VMMSWITCHERDEF.pvCode, RTCCPTR_DEF NAME(Start)
1012 at VMMSWITCHERDEF.pvFixups, RTCCPTR_DEF NAME(Fixups)
1013 at VMMSWITCHERDEF.pszDesc, RTCCPTR_DEF NAME(Description)
1014 at VMMSWITCHERDEF.pfnRelocate, RTCCPTR_DEF NAME(Relocate)
1015 at VMMSWITCHERDEF.enmType, dd VMMSWITCHER_AMD64_TO_PAE
1016 at VMMSWITCHERDEF.cbCode, dd NAME(End) - NAME(Start)
1017 at VMMSWITCHERDEF.offR0HostToGuest, dd NAME(vmmR0HostToGuest) - NAME(Start)
1018 at VMMSWITCHERDEF.offGCGuestToHost, dd NAME(vmmGCGuestToHost) - NAME(Start)
1019 at VMMSWITCHERDEF.offGCCallTrampoline, dd NAME(vmmGCCallTrampoline) - NAME(Start)
1020 at VMMSWITCHERDEF.offGCGuestToHostAsm, dd NAME(VMMGCGuestToHostAsm) - NAME(Start)
1021 at VMMSWITCHERDEF.offGCGuestToHostAsmHyperCtx, dd NAME(VMMGCGuestToHostAsmHyperCtx)- NAME(Start)
1022 at VMMSWITCHERDEF.offGCGuestToHostAsmGuestCtx, dd NAME(VMMGCGuestToHostAsmGuestCtx)- NAME(Start)
1023 ; disasm help
1024 at VMMSWITCHERDEF.offHCCode0, dd 0
1025 at VMMSWITCHERDEF.cbHCCode0, dd NAME(IDEnterTarget) - NAME(Start)
1026 at VMMSWITCHERDEF.offHCCode1, dd NAME(HCExitTarget) - NAME(Start)
1027 at VMMSWITCHERDEF.cbHCCode1, dd NAME(End) - NAME(HCExitTarget)
1028 at VMMSWITCHERDEF.offIDCode0, dd NAME(IDEnterTarget) - NAME(Start)
1029 at VMMSWITCHERDEF.cbIDCode0, dd NAME(JmpGCTarget) - NAME(IDEnterTarget)
1030 at VMMSWITCHERDEF.offIDCode1, dd NAME(IDExitTarget) - NAME(Start)
1031 at VMMSWITCHERDEF.cbIDCode1, dd NAME(HCExitTarget) - NAME(IDExitTarget)
1032 at VMMSWITCHERDEF.offGCCode, dd NAME(JmpGCTarget) - NAME(Start)
1033 at VMMSWITCHERDEF.cbGCCode, dd NAME(IDExitTarget) - NAME(JmpGCTarget)
1034
1035 iend
1036
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette