VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMSwitcher/LegacyandAMD64.mac@ 14792

Last change on this file since 14792 was 14785, checked in by vboxsync, 16 years ago

More switcher updates

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 15.3 KB
Line 
1; VMM - World Switchers, 32Bit to AMD64.
2;
3
4;
5; Copyright (C) 2006-2007 Sun Microsystems, Inc.
6;
7; This file is part of VirtualBox Open Source Edition (OSE), as
8; available from http://www.virtualbox.org. This file is free software;
9; you can redistribute it and/or modify it under the terms of the GNU
10; General Public License (GPL) as published by the Free Software
11; Foundation, in version 2 as it comes in the "COPYING" file of the
12; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
13; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
14;
15; Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
16; Clara, CA 95054 USA or visit http://www.sun.com if you need
17; additional information or have any questions.
18;
19
20;%define DEBUG_STUFF 1
21;%define STRICT_IF 1
22
23;*******************************************************************************
24;* Defined Constants And Macros *
25;*******************************************************************************
26
27
28;*******************************************************************************
29;* Header Files *
30;*******************************************************************************
31%include "VBox/asmdefs.mac"
32%include "VBox/x86.mac"
33%include "VBox/cpum.mac"
34%include "VBox/stam.mac"
35%include "VBox/vm.mac"
36%include "CPUMInternal.mac"
37%include "VMMSwitcher/VMMSwitcher.mac"
38
39
40;
41; Start the fixup records
42; We collect the fixups in the .data section as we go along
43; It is therefore VITAL that no-one is using the .data section
44; for anything else between 'Start' and 'End'.
45;
46BEGINDATA
47GLOBALNAME Fixups
48
49
50
51BEGINCODE
52GLOBALNAME Start
53
54BITS 32
55
56;;
57; The C interface.
58;
59BEGINPROC vmmR0HostToGuest
60 %ifdef DEBUG_STUFF
61 COM32_S_NEWLINE
62 COM32_S_CHAR '^'
63 %endif
64
65 %ifdef VBOX_WITH_STATISTICS
66 ;
67 ; Switcher stats.
68 ;
69 FIXUP FIX_HC_VM_OFF, 1, VM.StatSwitcherToGC
70 mov edx, 0ffffffffh
71 STAM_PROFILE_ADV_START edx
72 %endif
73
74 ;
75 ; Call worker.
76 ;
77 FIXUP FIX_HC_CPUM_OFF, 1, 0
78 mov edx, 0ffffffffh
79 push cs ; allow for far return and restore cs correctly.
80 call NAME(vmmR0HostToGuestAsm)
81
82%ifdef VBOX_WITH_STATISTICS
83 ;
84 ; Switcher stats.
85 ;
86 FIXUP FIX_HC_VM_OFF, 1, VM.StatSwitcherToHC
87 mov edx, 0ffffffffh
88 STAM_PROFILE_ADV_STOP edx
89%endif
90
91 ret
92
93ENDPROC vmmR0HostToGuest
94
95; *****************************************************************************
96; vmmR0HostToGuestAsm
97;
98; Phase one of the switch from host to guest context (host MMU context)
99;
100; INPUT:
101; - edx virtual address of CPUM structure (valid in host context)
102;
103; USES/DESTROYS:
104; - eax, ecx, edx, esi
105;
106; ASSUMPTION:
107; - current CS and DS selectors are wide open
108;
109; *****************************************************************************
110ALIGNCODE(16)
111BEGINPROC vmmR0HostToGuestAsm
112 ;;
113 ;; Save CPU host context
114 ;; Skip eax, edx and ecx as these are not preserved over calls.
115 ;;
116 CPUMCPU_FROM_CPUM(edx)
117 ; general registers.
118 mov [edx + CPUMCPU.Host.ebx], ebx
119 mov [edx + CPUMCPU.Host.edi], edi
120 mov [edx + CPUMCPU.Host.esi], esi
121 mov [edx + CPUMCPU.Host.esp], esp
122 mov [edx + CPUMCPU.Host.ebp], ebp
123 ; selectors.
124 mov [edx + CPUMCPU.Host.ds], ds
125 mov [edx + CPUMCPU.Host.es], es
126 mov [edx + CPUMCPU.Host.fs], fs
127 mov [edx + CPUMCPU.Host.gs], gs
128 mov [edx + CPUMCPU.Host.ss], ss
129 ; special registers.
130 sldt [edx + CPUMCPU.Host.ldtr]
131 sidt [edx + CPUMCPU.Host.idtr]
132 sgdt [edx + CPUMCPU.Host.gdtr]
133 str [edx + CPUMCPU.Host.tr]
134 ; flags
135 pushfd
136 pop dword [edx + CPUMCPU.Host.eflags]
137
138 ;; handle use flags.
139 mov esi, [edx + CPUMCPU.fUseFlags] ; esi == use flags.
140 and esi, ~CPUM_USED_FPU ; Clear CPUM_USED_* flags. ;;@todo FPU check can be optimized to use cr0 flags!
141 mov [edx + CPUMCPU.fUseFlags], esi
142
143 ; control registers.
144 mov eax, cr0
145 mov [edx + CPUMCPU.Host.cr0], eax
146 ;Skip cr2; assume host os don't stuff things in cr2. (safe)
147 mov eax, cr3
148 mov [edx + CPUMCPU.Host.cr3], eax
149 mov eax, cr4
150 mov [edx + CPUMCPU.Host.cr4], eax
151
152 CPUM_FROM_CPUMCPU(edx)
153 ; Load new gdt so we can do far jump after going into 64 bits mode
154 lgdt [edx + CPUM.Hyper.gdtr]
155
156 ;;
157 ;; Load Intermediate memory context.
158 ;;
159 FIXUP SWITCHER_FIX_INTER_CR3_HC, 1
160 mov eax, 0ffffffffh
161 mov cr3, eax
162 DEBUG_CHAR('?')
163
164 ;;
165 ;; Jump to identity mapped location
166 ;;
167 FIXUP FIX_HC_2_ID_NEAR_REL, 1, NAME(IDEnterTarget) - NAME(Start)
168 jmp near NAME(IDEnterTarget)
169
170
171 ; We're now on identity mapped pages!
172ALIGNCODE(16)
173GLOBALNAME IDEnterTarget
174 DEBUG_CHAR('2')
175
176 ; 1. Disable paging.
177 mov ebx, cr0
178 and ebx, ~X86_CR0_PG
179 mov cr0, ebx
180 DEBUG_CHAR('2')
181
182 ; 2. Enable PAE.
183 mov ecx, cr4
184 or ecx, X86_CR4_PAE
185 mov cr4, ecx
186
187 ; 3. Load long mode intermediate CR3.
188 FIXUP FIX_INTER_AMD64_CR3, 1
189 mov ecx, 0ffffffffh
190 mov cr3, ecx
191 DEBUG_CHAR('3')
192
193 ; 4. Enable long mode.
194 mov ebp, edx
195 mov ecx, MSR_K6_EFER
196 rdmsr
197 or eax, MSR_K6_EFER_LME
198 wrmsr
199 mov edx, ebp
200 DEBUG_CHAR('4')
201
202 ; 5. Enable paging.
203 or ebx, X86_CR0_PG
204 mov cr0, ebx
205 DEBUG_CHAR('5')
206
207 ; Jump from compatability mode to 64-bit mode.
208 FIXUP FIX_ID_FAR32_TO_64BIT_MODE, 1, NAME(IDEnter64Mode) - NAME(Start)
209 jmp 0ffffh:0fffffffeh
210
211 ;
212 ; We're in 64-bit mode (ds, ss, es, fs, gs are all bogus).
213BITS 64
214ALIGNCODE(16)
215NAME(IDEnter64Mode):
216 DEBUG_CHAR('6')
217 jmp [NAME(pICEnterTarget) wrt rip]
218
219; 64-bit jump target
220NAME(pICEnterTarget):
221FIXUP FIX_HC_64BIT_NOCHECK, 0, NAME(ICEnterTarget) - NAME(Start)
222dq 0ffffffffffffffffh
223
224; 64-bit pCpum address.
225NAME(pCpumIC):
226FIXUP FIX_HC_64BIT_CPUM, 0
227dq 0ffffffffffffffffh
228
229 ;
230 ; When we arrive here we're in 64 bits mode in the intermediate context
231 ;
232ALIGNCODE(16)
233GLOBALNAME ICEnterTarget
234 ; Load CPUM pointer into rdx
235 mov rdx, [NAME(pCpumIC) wrt rip]
236
237 mov rax, cs
238 mov ds, rax
239 mov es, rax
240 mov fs, rax
241 mov gs, rax
242
243 ; Setup stack; use the lss_esp, ss pair for lss
244 DEBUG_CHAR('7')
245 mov rsp, 0
246 mov eax, [rdx + CPUM.Hyper.esp]
247 mov [rdx + CPUM.Hyper.lss_esp], eax
248 lss esp, [rdx + CPUM.Hyper.lss_esp]
249
250 ; call the hypervisor function with rdx=pCpumCpu
251 mov eax, [rdx + r8 + CPUM.Hyper.eip]
252 CPUMCPU_FROM_CPUM(rdx)
253 call rax
254
255 ; Load CPUM pointer into rdx
256 mov rdx, [NAME(pCpumIC) wrt rip]
257 CPUMCPU_FROM_CPUM(rdx)
258
259 ; Save the return code
260 mov [rdx + CPUMCPU.u32RetCode], eax
261
262 ; now let's switch back
263 jmp NAME(VMMGCGuestToHostAsm) ; rax = returncode.
264
265ENDPROC vmmR0HostToGuestAsm
266
267
268;;
269; Trampoline for doing a call when starting the hyper visor execution.
270;
271; Push any arguments to the routine.
272; Push the argument frame size (cArg * 4).
273; Push the call target (_cdecl convention).
274; Push the address of this routine.
275;
276;
277BITS 64
278ALIGNCODE(16)
279BEGINPROC vmmGCCallTrampoline
280%ifdef DEBUG_STUFF
281 COM32_S_CHAR 'c'
282 COM32_S_CHAR 't'
283 COM32_S_CHAR '!'
284%endif
285 int3
286ENDPROC vmmGCCallTrampoline
287
288
289;;
290; The C interface.
291;
292BITS 64
293ALIGNCODE(16)
294BEGINPROC vmmGCGuestToHost
295%ifdef DEBUG_STUFF
296 push esi
297 COM_NEWLINE
298 DEBUG_CHAR('b')
299 DEBUG_CHAR('a')
300 DEBUG_CHAR('c')
301 DEBUG_CHAR('k')
302 DEBUG_CHAR('!')
303 COM_NEWLINE
304 pop esi
305%endif
306 int3
307ENDPROC vmmGCGuestToHost
308
309;;
310; VMMGCGuestToHostAsm
311;
312; This is an alternative entry point which we'll be using
313; when the we have saved the guest state already or we haven't
314; been messing with the guest at all.
315;
316; @param eax Return code.
317; @uses eax, edx, ecx (or it may use them in the future)
318;
319BITS 64
320ALIGNCODE(16)
321BEGINPROC VMMGCGuestToHostAsm
322 CPUMCPU_FROM_CPUM(rdx)
323 FIXUP FIX_INTER_AMD64_CR3, 1
324 mov rax, 0ffffffffh
325 mov cr3, rax
326 ;; We're now in the intermediate memory context!
327
328 ;;
329 ;; Switch to compatibility mode, placing ourselves in identity mapped code.
330 ;;
331 jmp far [NAME(fpIDEnterTarget) wrt rip]
332
333; 16:32 Pointer to IDEnterTarget.
334NAME(fpIDEnterTarget):
335 FIXUP FIX_ID_32BIT, 0, NAME(IDExitTarget) - NAME(Start)
336dd 0
337 FIXUP FIX_HYPER_CS, 0
338dd 0
339
340 ; We're now on identity mapped pages!
341ALIGNCODE(16)
342GLOBALNAME IDExitTarget
343BITS 32
344 DEBUG_CHAR('1')
345
346 ; 1. Deactivate long mode by turning off paging.
347 mov ebx, cr0
348 and ebx, ~X86_CR0_PG
349 mov cr0, ebx
350 DEBUG_CHAR('2')
351
352 ; 2. Load intermediate page table.
353 FIXUP SWITCHER_FIX_INTER_CR3_HC, 1
354 mov edx, 0ffffffffh
355 mov cr3, edx
356 DEBUG_CHAR('3')
357
358 ; 3. Disable long mode.
359 mov ecx, MSR_K6_EFER
360 rdmsr
361 DEBUG_CHAR('5')
362 and eax, ~(MSR_K6_EFER_LME)
363 wrmsr
364 DEBUG_CHAR('6')
365
366%ifndef NEED_PAE_ON_HOST
367 ; 3b. Disable PAE.
368 mov eax, cr4
369 and eax, ~X86_CR4_PAE
370 mov cr4, eax
371 DEBUG_CHAR('7')
372%endif
373
374 ; 4. Enable paging.
375 or ebx, X86_CR0_PG
376 mov cr0, ebx
377 jmp short just_a_jump
378just_a_jump:
379 DEBUG_CHAR('8')
380
381 ;;
382 ;; 5. Jump to guest code mapping of the code and load the Hypervisor CS.
383 ;;
384 FIXUP FIX_ID_2_HC_NEAR_REL, 1, NAME(ICExitTarget) - NAME(Start)
385 jmp near NAME(ICExitTarget)
386
387 ;;
388 ;; When we arrive at this label we're at the
389 ;; intermediate mapping of the switching code.
390 ;;
391BITS 32
392ALIGNCODE(16)
393GLOBALNAME ICExitTarget
394 DEBUG_CHAR('8')
395 FIXUP FIX_HC_CPUM_OFF, 1, 0
396 mov edx, 0ffffffffh
397 CPUMCPU_FROM_CPUM(edx)
398 mov esi, [edx + CPUMCPU.Host.cr3]
399 mov cr3, esi
400
401 ;; now we're in host memory context, let's restore regs
402
403 ; activate host gdt and idt
404 lgdt [edx + CPUMCPU.Host.gdtr]
405 DEBUG_CHAR('0')
406 lidt [edx + CPUMCPU.Host.idtr]
407 DEBUG_CHAR('1')
408
409 ; Restore TSS selector; must mark it as not busy before using ltr (!)
410 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p)
411 movzx eax, word [edx + CPUMCPU.Host.tr] ; eax <- TR
412 and al, 0F8h ; mask away TI and RPL bits, get descriptor offset.
413 add eax, [edx + CPUMCPU.Host.gdtr + 2] ; eax <- GDTR.address + descriptor offset.
414 and dword [eax + 4], ~0200h ; clear busy flag (2nd type2 bit)
415 ltr word [edx + CPUMCPU.Host.tr]
416
417 ; activate ldt
418 DEBUG_CHAR('2')
419 lldt [edx + CPUMCPU.Host.ldtr]
420 ; Restore segment registers
421 mov eax, [edx + CPUMCPU.Host.ds]
422 mov ds, eax
423 mov eax, [edx + CPUMCPU.Host.es]
424 mov es, eax
425 mov eax, [edx + CPUMCPU.Host.fs]
426 mov fs, eax
427 mov eax, [edx + CPUMCPU.Host.gs]
428 mov gs, eax
429 ; restore stack
430 lss esp, [edx + CPUMCPU.Host.esp]
431
432 ; Restore FPU if guest has used it.
433 ; Using fxrstor should ensure that we're not causing unwanted exception on the host.
434 mov esi, [edx + CPUMCPU.fUseFlags] ; esi == use flags.
435 test esi, CPUM_USED_FPU
436 jz near gth_fpu_no
437 mov ecx, cr0
438 and ecx, ~(X86_CR0_TS | X86_CR0_EM)
439 mov cr0, ecx
440
441 fxsave [edx + CPUMCPU.Guest.fpu]
442 fxrstor [edx + CPUMCPU.Host.fpu]
443 jmp near gth_fpu_no
444
445ALIGNCODE(16)
446gth_fpu_no:
447
448 ; Control registers.
449 ; Would've liked to have these higher up in case of crashes, but
450 ; the fpu stuff must be done before we restore cr0.
451 mov ecx, [edx + CPUMCPU.Host.cr4]
452 mov cr4, ecx
453 mov ecx, [edx + CPUMCPU.Host.cr0]
454 mov cr0, ecx
455 ;mov ecx, [edx + CPUMCPU.Host.cr2] ; assumes this is waste of time.
456 ;mov cr2, ecx
457
458 ; restore general registers.
459 mov edi, [edx + CPUMCPU.Host.edi]
460 mov esi, [edx + CPUMCPU.Host.esi]
461 mov ebx, [edx + CPUMCPU.Host.ebx]
462 mov ebp, [edx + CPUMCPU.Host.ebp]
463
464 ; store the return code in eax
465 mov eax, [edx + CPUMCPU.u32RetCode]
466
467 push dword [edx + CPUMCPU.Host.eflags]
468 popfd
469
470 retf
471
472ENDPROC VMMGCGuestToHostAsm
473
474;;
475; VMMGCGuestToHostAsmHyperCtx
476;
477; This is an alternative entry point which we'll be using
478; when the we have the hypervisor context and need to save
479; that before going to the host.
480;
481; This is typically useful when abandoning the hypervisor
482; because of a trap and want the trap state to be saved.
483;
484; @param eax Return code.
485; @param ecx Points to CPUMCTXCORE.
486; @uses eax,edx,ecx
487ALIGNCODE(16)
488BEGINPROC VMMGCGuestToHostAsmHyperCtx
489 int3
490
491;;
492; VMMGCGuestToHostAsmGuestCtx
493;
494; Switches from Guest Context to Host Context.
495; Of course it's only called from within the GC.
496;
497; @param eax Return code.
498; @param esp + 4 Pointer to CPUMCTXCORE.
499;
500; @remark ASSUMES interrupts disabled.
501;
502ALIGNCODE(16)
503BEGINPROC VMMGCGuestToHostAsmGuestCtx
504 int3
505
506GLOBALNAME End
507;
508; The description string (in the text section).
509;
510NAME(Description):
511 db SWITCHER_DESCRIPTION
512 db 0
513
514extern NAME(Relocate)
515
516;
517; End the fixup records.
518;
519BEGINDATA
520 db FIX_THE_END ; final entry.
521GLOBALNAME FixupsEnd
522
523;;
524; The switcher definition structure.
525ALIGNDATA(16)
526GLOBALNAME Def
527 istruc VMMSWITCHERDEF
528 at VMMSWITCHERDEF.pvCode, RTCCPTR_DEF NAME(Start)
529 at VMMSWITCHERDEF.pvFixups, RTCCPTR_DEF NAME(Fixups)
530 at VMMSWITCHERDEF.pszDesc, RTCCPTR_DEF NAME(Description)
531 at VMMSWITCHERDEF.pfnRelocate, RTCCPTR_DEF NAME(Relocate)
532 at VMMSWITCHERDEF.enmType, dd SWITCHER_TYPE
533 at VMMSWITCHERDEF.cbCode, dd NAME(End) - NAME(Start)
534 at VMMSWITCHERDEF.offR0HostToGuest, dd NAME(vmmR0HostToGuest) - NAME(Start)
535 at VMMSWITCHERDEF.offGCGuestToHost, dd NAME(vmmGCGuestToHost) - NAME(Start)
536 at VMMSWITCHERDEF.offGCCallTrampoline, dd NAME(vmmGCCallTrampoline) - NAME(Start)
537 at VMMSWITCHERDEF.offGCGuestToHostAsm, dd NAME(VMMGCGuestToHostAsm) - NAME(Start)
538 at VMMSWITCHERDEF.offGCGuestToHostAsmHyperCtx, dd NAME(VMMGCGuestToHostAsmHyperCtx)- NAME(Start)
539 at VMMSWITCHERDEF.offGCGuestToHostAsmGuestCtx, dd NAME(VMMGCGuestToHostAsmGuestCtx)- NAME(Start)
540 ; disasm help
541 at VMMSWITCHERDEF.offHCCode0, dd 0
542 at VMMSWITCHERDEF.cbHCCode0, dd NAME(IDEnterTarget) - NAME(Start)
543 at VMMSWITCHERDEF.offHCCode1, dd NAME(ICExitTarget) - NAME(Start)
544 at VMMSWITCHERDEF.cbHCCode1, dd NAME(End) - NAME(ICExitTarget)
545 at VMMSWITCHERDEF.offIDCode0, dd NAME(IDEnterTarget) - NAME(Start)
546 at VMMSWITCHERDEF.cbIDCode0, dd NAME(ICEnterTarget) - NAME(IDEnterTarget)
547 at VMMSWITCHERDEF.offIDCode1, dd NAME(IDExitTarget) - NAME(Start)
548 at VMMSWITCHERDEF.cbIDCode1, dd NAME(ICExitTarget) - NAME(Start)
549 at VMMSWITCHERDEF.offGCCode, dd 0
550 at VMMSWITCHERDEF.cbGCCode, dd 0
551
552 iend
553
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette