VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMSwitcher/LegacyandAMD64.mac@ 15187

Last change on this file since 15187 was 15187, checked in by vboxsync, 16 years ago

Compile fix

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 15.8 KB
Line 
1; VMM - World Switchers, 32Bit to AMD64.
2;
3
4;
5; Copyright (C) 2006-2007 Sun Microsystems, Inc.
6;
7; This file is part of VirtualBox Open Source Edition (OSE), as
8; available from http://www.virtualbox.org. This file is free software;
9; you can redistribute it and/or modify it under the terms of the GNU
10; General Public License (GPL) as published by the Free Software
11; Foundation, in version 2 as it comes in the "COPYING" file of the
12; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
13; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
14;
15; Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
16; Clara, CA 95054 USA or visit http://www.sun.com if you need
17; additional information or have any questions.
18;
19
20;%define DEBUG_STUFF 1
21;%define STRICT_IF 1
22
23;*******************************************************************************
24;* Defined Constants And Macros *
25;*******************************************************************************
26
27
28;*******************************************************************************
29;* Header Files *
30;*******************************************************************************
31%include "VBox/asmdefs.mac"
32%include "VBox/x86.mac"
33%include "VBox/cpum.mac"
34%include "VBox/stam.mac"
35%include "VBox/vm.mac"
36%include "CPUMInternal.mac"
37%include "VMMSwitcher/VMMSwitcher.mac"
38
39
40;
41; Start the fixup records
42; We collect the fixups in the .data section as we go along
43; It is therefore VITAL that no-one is using the .data section
44; for anything else between 'Start' and 'End'.
45;
46BEGINDATA
47GLOBALNAME Fixups
48
49
50
51BEGINCODE
52GLOBALNAME Start
53
54BITS 32
55
56;;
57; The C interface.
58;
59BEGINPROC vmmR0HostToGuest
60 %ifdef DEBUG_STUFF
61 COM32_S_NEWLINE
62 COM32_S_CHAR '^'
63 %endif
64
65 %ifdef VBOX_WITH_STATISTICS
66 ;
67 ; Switcher stats.
68 ;
69 FIXUP FIX_HC_VM_OFF, 1, VM.StatSwitcherToGC
70 mov edx, 0ffffffffh
71 STAM_PROFILE_ADV_START edx
72 %endif
73
74 ;
75 ; Call worker.
76 ;
77 FIXUP FIX_HC_CPUM_OFF, 1, 0
78 mov edx, 0ffffffffh
79 push cs ; allow for far return and restore cs correctly.
80 call NAME(vmmR0HostToGuestAsm)
81
82%ifdef VBOX_WITH_STATISTICS
83 ;
84 ; Switcher stats.
85 ;
86 FIXUP FIX_HC_VM_OFF, 1, VM.StatSwitcherToHC
87 mov edx, 0ffffffffh
88 STAM_PROFILE_ADV_STOP edx
89%endif
90
91 ret
92
93ENDPROC vmmR0HostToGuest
94
95; *****************************************************************************
96; vmmR0HostToGuestAsm
97;
98; Phase one of the switch from host to guest context (host MMU context)
99;
100; INPUT:
101; - edx virtual address of CPUM structure (valid in host context)
102;
103; USES/DESTROYS:
104; - eax, ecx, edx, esi
105;
106; ASSUMPTION:
107; - current CS and DS selectors are wide open
108;
109; *****************************************************************************
110ALIGNCODE(16)
111BEGINPROC vmmR0HostToGuestAsm
112 ;;
113 ;; Save CPU host context
114 ;; Skip eax, edx and ecx as these are not preserved over calls.
115 ;;
116 CPUMCPU_FROM_CPUM(edx)
117 ; general registers.
118 mov [edx + CPUMCPU.Host.ebx], ebx
119 mov [edx + CPUMCPU.Host.edi], edi
120 mov [edx + CPUMCPU.Host.esi], esi
121 mov [edx + CPUMCPU.Host.esp], esp
122 mov [edx + CPUMCPU.Host.ebp], ebp
123 ; selectors.
124 mov [edx + CPUMCPU.Host.ds], ds
125 mov [edx + CPUMCPU.Host.es], es
126 mov [edx + CPUMCPU.Host.fs], fs
127 mov [edx + CPUMCPU.Host.gs], gs
128 mov [edx + CPUMCPU.Host.ss], ss
129 ; special registers.
130 sldt [edx + CPUMCPU.Host.ldtr]
131 sidt [edx + CPUMCPU.Host.idtr]
132 sgdt [edx + CPUMCPU.Host.gdtr]
133 str [edx + CPUMCPU.Host.tr]
134 ; flags
135 pushfd
136 pop dword [edx + CPUMCPU.Host.eflags]
137
138 ; control registers.
139 mov eax, cr0
140 mov [edx + CPUMCPU.Host.cr0], eax
141 ;Skip cr2; assume host os don't stuff things in cr2. (safe)
142 mov eax, cr3
143 mov [edx + CPUMCPU.Host.cr3], eax
144 mov eax, cr4
145 mov [edx + CPUMCPU.Host.cr4], eax
146
147 CPUM_FROM_CPUMCPU(edx)
148 ; Load new gdt so we can do a far jump after going into 64 bits mode
149 lgdt [edx + CPUM.Hyper.gdtr]
150
151 ;;
152 ;; Load Intermediate memory context.
153 ;;
154 FIXUP SWITCHER_FIX_INTER_CR3_HC, 1
155 mov eax, 0ffffffffh
156 mov cr3, eax
157 DEBUG_CHAR('?')
158
159 ;;
160 ;; Jump to identity mapped location
161 ;;
162 FIXUP FIX_HC_2_ID_NEAR_REL, 1, NAME(IDEnterTarget) - NAME(Start)
163 jmp near NAME(IDEnterTarget)
164
165
166 ; We're now on identity mapped pages!
167ALIGNCODE(16)
168GLOBALNAME IDEnterTarget
169 DEBUG_CHAR('2')
170
171 ; 1. Disable paging.
172 mov ebx, cr0
173 and ebx, ~X86_CR0_PG
174 mov cr0, ebx
175 DEBUG_CHAR('2')
176
177 ; 2. Enable PAE.
178 mov ecx, cr4
179 or ecx, X86_CR4_PAE
180 mov cr4, ecx
181
182 ; 3. Load long mode intermediate CR3.
183 FIXUP FIX_INTER_AMD64_CR3, 1
184 mov ecx, 0ffffffffh
185 mov cr3, ecx
186 DEBUG_CHAR('3')
187
188 ; 4. Enable long mode.
189 mov ebp, edx
190 mov ecx, MSR_K6_EFER
191 rdmsr
192 or eax, MSR_K6_EFER_LME
193 wrmsr
194 mov edx, ebp
195 DEBUG_CHAR('4')
196
197 ; 5. Enable paging.
198 or ebx, X86_CR0_PG
199 ; Disable ring 0 write protection too
200 and ebx, ~X86_CR0_WRITE_PROTECT
201 mov cr0, ebx
202 DEBUG_CHAR('5')
203
204 ; Jump from compatability mode to 64-bit mode.
205 FIXUP FIX_ID_FAR32_TO_64BIT_MODE, 1, NAME(IDEnter64Mode) - NAME(Start)
206 jmp 0ffffh:0fffffffeh
207
208 ;
209 ; We're in 64-bit mode (ds, ss, es, fs, gs are all bogus).
210BITS 64
211ALIGNCODE(16)
212NAME(IDEnter64Mode):
213 DEBUG_CHAR('6')
214 jmp [NAME(pICEnterTarget) wrt rip]
215
216; 64-bit jump target
217NAME(pICEnterTarget):
218FIXUP FIX_HC_64BIT_NOCHECK, 0, NAME(ICEnterTarget) - NAME(Start)
219dq 0ffffffffffffffffh
220
221; 64-bit pCpum address.
222NAME(pCpumIC):
223FIXUP FIX_GC_64_BIT_CPUM_OFF, 0, 0
224dq 0ffffffffffffffffh
225
226 ;
227 ; When we arrive here we're in 64 bits mode in the intermediate context
228 ;
229ALIGNCODE(16)
230GLOBALNAME ICEnterTarget
231 ; Load CPUM pointer into rdx
232 mov rdx, [NAME(pCpumIC) wrt rip]
233
234 mov rax, cs
235 mov ds, rax
236 mov es, rax
237
238 ; Setup stack; use the lss_esp, ss pair for lss
239 DEBUG_CHAR('7')
240 mov rsp, 0
241 mov eax, [rdx + CPUM.Hyper.esp]
242 mov [rdx + CPUM.Hyper.lss_esp], eax
243 lss esp, [rdx + CPUM.Hyper.lss_esp]
244
245 ; load the hypervisor function address
246 mov r9, [rdx + CPUM.Hyper.eip]
247
248 CPUMCPU_FROM_CPUM(edx)
249
250 ; Check if we need to restore the guest FPU state
251 mov esi, [rdx + CPUMCPU.fUseFlags] ; esi == use flags.
252 test esi, CPUM_SYNC_FPU_STATE
253 jz near gth_fpu_no
254
255 mov rax, cr0
256 mov rcx, rax ; save old CR0
257 and rax, ~(X86_CR0_TS | X86_CR0_EM)
258 mov cr0, rax
259 fxrstor [rdx + CPUMCPU.Guest.fpu]
260 mov cr0, rcx ; and restore old CR0 again
261
262 and dword [rdx + CPUMCPU.fUseFlags], ~CPUM_SYNC_FPU_STATE
263
264gth_fpu_no:
265 ; Check if we need to restore the guest debug state
266 test esi, CPUM_SYNC_DEBUG_STATE
267 jz near gth_debug_no
268
269 mov rax, qword [rdx + CPUMCPU.Guest.dr + 0*8]
270 mov dr0, rax
271 mov rax, qword [rdx + CPUMCPU.Guest.dr + 1*8]
272 mov dr1, rax
273 mov rax, qword [rdx + CPUMCPU.Guest.dr + 2*8]
274 mov dr2, rax
275 mov rax, qword [rdx + CPUMCPU.Guest.dr + 3*8]
276 mov dr3, rax
277 mov rax, qword [rdx + CPUMCPU.Guest.dr + 6*8]
278 mov dr6, rax ; not required for AMD-V
279
280 and dword [rdx + CPUMCPU.fUseFlags], ~CPUM_SYNC_DEBUG_STATE
281
282gth_debug_no:
283
284 ; parameter for all helper functions (pCtx)
285 lea rsi, [rdx + CPUMCPU.Guest.fpu]
286 call r9
287
288 ; Load CPUM pointer into rdx
289 mov rdx, [NAME(pCpumIC) wrt rip]
290 CPUMCPU_FROM_CPUM(edx)
291
292 ; Save the return code
293 mov [rdx + CPUMCPU.u32RetCode], eax
294
295 ; now let's switch back
296 jmp NAME(VMMGCGuestToHostAsm) ; rax = returncode.
297
298ENDPROC vmmR0HostToGuestAsm
299
300
301;;
302; Trampoline for doing a call when starting the hyper visor execution.
303;
304; Push any arguments to the routine.
305; Push the argument frame size (cArg * 4).
306; Push the call target (_cdecl convention).
307; Push the address of this routine.
308;
309;
310BITS 64
311ALIGNCODE(16)
312BEGINPROC vmmGCCallTrampoline
313%ifdef DEBUG_STUFF
314 COM32_S_CHAR 'c'
315 COM32_S_CHAR 't'
316 COM32_S_CHAR '!'
317%endif
318 int3
319ENDPROC vmmGCCallTrampoline
320
321
322;;
323; The C interface.
324;
325BITS 64
326ALIGNCODE(16)
327BEGINPROC vmmGCGuestToHost
328%ifdef DEBUG_STUFF
329 push esi
330 COM_NEWLINE
331 DEBUG_CHAR('b')
332 DEBUG_CHAR('a')
333 DEBUG_CHAR('c')
334 DEBUG_CHAR('k')
335 DEBUG_CHAR('!')
336 COM_NEWLINE
337 pop esi
338%endif
339 int3
340ENDPROC vmmGCGuestToHost
341
342;;
343; VMMGCGuestToHostAsm
344;
345; This is an alternative entry point which we'll be using
346; when the we have saved the guest state already or we haven't
347; been messing with the guest at all.
348;
349; @param eax Return code.
350; @uses eax, edx, ecx (or it may use them in the future)
351;
352BITS 64
353ALIGNCODE(16)
354BEGINPROC VMMGCGuestToHostAsm
355 ;; We're still in the intermediate memory context!
356
357 ;;
358 ;; Switch to compatibility mode, placing ourselves in identity mapped code.
359 ;;
360 jmp far [NAME(fpIDEnterTarget) wrt rip]
361
362; 16:32 Pointer to IDEnterTarget.
363NAME(fpIDEnterTarget):
364 FIXUP FIX_ID_32BIT, 0, NAME(IDExitTarget) - NAME(Start)
365dd 0
366 FIXUP FIX_HYPER_CS, 0
367dd 0
368
369 ; We're now on identity mapped pages!
370ALIGNCODE(16)
371GLOBALNAME IDExitTarget
372BITS 32
373 DEBUG_CHAR('1')
374
375 ; 1. Deactivate long mode by turning off paging.
376 mov ebx, cr0
377 and ebx, ~X86_CR0_PG
378 mov cr0, ebx
379 DEBUG_CHAR('2')
380
381 ; 2. Load intermediate page table.
382 FIXUP SWITCHER_FIX_INTER_CR3_HC, 1
383 mov edx, 0ffffffffh
384 mov cr3, edx
385 DEBUG_CHAR('3')
386
387 ; 3. Disable long mode.
388 mov ecx, MSR_K6_EFER
389 rdmsr
390 DEBUG_CHAR('5')
391 and eax, ~(MSR_K6_EFER_LME)
392 wrmsr
393 DEBUG_CHAR('6')
394
395%ifndef NEED_PAE_ON_HOST
396 ; 3b. Disable PAE.
397 mov eax, cr4
398 and eax, ~X86_CR4_PAE
399 mov cr4, eax
400 DEBUG_CHAR('7')
401%endif
402
403 ; 4. Enable paging.
404 or ebx, X86_CR0_PG
405 mov cr0, ebx
406 jmp short just_a_jump
407just_a_jump:
408 DEBUG_CHAR('8')
409
410 ;;
411 ;; 5. Jump to guest code mapping of the code and load the Hypervisor CS.
412 ;;
413 FIXUP FIX_ID_2_HC_NEAR_REL, 1, NAME(ICExitTarget) - NAME(Start)
414 jmp near NAME(ICExitTarget)
415
416 ;;
417 ;; When we arrive at this label we're at the
418 ;; intermediate mapping of the switching code.
419 ;;
420BITS 32
421ALIGNCODE(16)
422GLOBALNAME ICExitTarget
423 DEBUG_CHAR('8')
424
425 ; load the hypervisor data selector into ds & es
426 FIXUP FIX_HYPER_DS, 1
427 mov eax, 0ffffh
428 mov ds, eax
429 mov es, eax
430
431 FIXUP FIX_GC_CPUM_OFF, 1, 0
432 mov edx, 0ffffffffh
433 CPUMCPU_FROM_CPUM(edx)
434 mov esi, [edx + CPUMCPU.Host.cr3]
435 mov cr3, esi
436
437 ;; now we're in host memory context, let's restore regs
438 FIXUP FIX_HC_CPUM_OFF, 1, 0
439 mov edx, 0ffffffffh
440 CPUMCPU_FROM_CPUM(edx)
441
442
443 ; activate host gdt and idt
444 lgdt [edx + CPUMCPU.Host.gdtr]
445 DEBUG_CHAR('0')
446 lidt [edx + CPUMCPU.Host.idtr]
447 DEBUG_CHAR('1')
448
449 ; Restore TSS selector; must mark it as not busy before using ltr (!)
450 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p)
451 movzx eax, word [edx + CPUMCPU.Host.tr] ; eax <- TR
452 and al, 0F8h ; mask away TI and RPL bits, get descriptor offset.
453 add eax, [edx + CPUMCPU.Host.gdtr + 2] ; eax <- GDTR.address + descriptor offset.
454 and dword [eax + 4], ~0200h ; clear busy flag (2nd type2 bit)
455 ltr word [edx + CPUMCPU.Host.tr]
456
457 ; activate ldt
458 DEBUG_CHAR('2')
459 lldt [edx + CPUMCPU.Host.ldtr]
460
461 ; Restore segment registers
462 mov eax, [edx + CPUMCPU.Host.ds]
463 mov ds, eax
464 mov eax, [edx + CPUMCPU.Host.es]
465 mov es, eax
466 mov eax, [edx + CPUMCPU.Host.fs]
467 mov fs, eax
468 mov eax, [edx + CPUMCPU.Host.gs]
469 mov gs, eax
470 ; restore stack
471 lss esp, [edx + CPUMCPU.Host.esp]
472
473 ; Control registers.
474 mov ecx, [edx + CPUMCPU.Host.cr4]
475 mov cr4, ecx
476 mov ecx, [edx + CPUMCPU.Host.cr0]
477 mov cr0, ecx
478 ;mov ecx, [edx + CPUMCPU.Host.cr2] ; assumes this is waste of time.
479 ;mov cr2, ecx
480
481 ; restore general registers.
482 mov edi, [edx + CPUMCPU.Host.edi]
483 mov esi, [edx + CPUMCPU.Host.esi]
484 mov ebx, [edx + CPUMCPU.Host.ebx]
485 mov ebp, [edx + CPUMCPU.Host.ebp]
486
487 ; store the return code in eax
488 mov eax, [edx + CPUMCPU.u32RetCode]
489
490 push dword [edx + CPUMCPU.Host.eflags]
491 popfd
492
493 retf
494
495ENDPROC VMMGCGuestToHostAsm
496
497;;
498; VMMGCGuestToHostAsmHyperCtx
499;
500; This is an alternative entry point which we'll be using
501; when the we have the hypervisor context and need to save
502; that before going to the host.
503;
504; This is typically useful when abandoning the hypervisor
505; because of a trap and want the trap state to be saved.
506;
507; @param eax Return code.
508; @param ecx Points to CPUMCTXCORE.
509; @uses eax,edx,ecx
510ALIGNCODE(16)
511BEGINPROC VMMGCGuestToHostAsmHyperCtx
512 int3
513
514;;
515; VMMGCGuestToHostAsmGuestCtx
516;
517; Switches from Guest Context to Host Context.
518; Of course it's only called from within the GC.
519;
520; @param eax Return code.
521; @param esp + 4 Pointer to CPUMCTXCORE.
522;
523; @remark ASSUMES interrupts disabled.
524;
525ALIGNCODE(16)
526BEGINPROC VMMGCGuestToHostAsmGuestCtx
527 int3
528
529GLOBALNAME End
530;
531; The description string (in the text section).
532;
533NAME(Description):
534 db SWITCHER_DESCRIPTION
535 db 0
536
537extern NAME(Relocate)
538
539;
540; End the fixup records.
541;
542BEGINDATA
543 db FIX_THE_END ; final entry.
544GLOBALNAME FixupsEnd
545
546;;
547; The switcher definition structure.
548ALIGNDATA(16)
549GLOBALNAME Def
550 istruc VMMSWITCHERDEF
551 at VMMSWITCHERDEF.pvCode, RTCCPTR_DEF NAME(Start)
552 at VMMSWITCHERDEF.pvFixups, RTCCPTR_DEF NAME(Fixups)
553 at VMMSWITCHERDEF.pszDesc, RTCCPTR_DEF NAME(Description)
554 at VMMSWITCHERDEF.pfnRelocate, RTCCPTR_DEF NAME(Relocate)
555 at VMMSWITCHERDEF.enmType, dd SWITCHER_TYPE
556 at VMMSWITCHERDEF.cbCode, dd NAME(End) - NAME(Start)
557 at VMMSWITCHERDEF.offR0HostToGuest, dd NAME(vmmR0HostToGuest) - NAME(Start)
558 at VMMSWITCHERDEF.offGCGuestToHost, dd NAME(vmmGCGuestToHost) - NAME(Start)
559 at VMMSWITCHERDEF.offGCCallTrampoline, dd NAME(vmmGCCallTrampoline) - NAME(Start)
560 at VMMSWITCHERDEF.offGCGuestToHostAsm, dd NAME(VMMGCGuestToHostAsm) - NAME(Start)
561 at VMMSWITCHERDEF.offGCGuestToHostAsmHyperCtx, dd NAME(VMMGCGuestToHostAsmHyperCtx)- NAME(Start)
562 at VMMSWITCHERDEF.offGCGuestToHostAsmGuestCtx, dd NAME(VMMGCGuestToHostAsmGuestCtx)- NAME(Start)
563 ; disasm help
564 at VMMSWITCHERDEF.offHCCode0, dd 0
565 at VMMSWITCHERDEF.cbHCCode0, dd NAME(IDEnterTarget) - NAME(Start)
566 at VMMSWITCHERDEF.offHCCode1, dd NAME(ICExitTarget) - NAME(Start)
567 at VMMSWITCHERDEF.cbHCCode1, dd NAME(End) - NAME(ICExitTarget)
568 at VMMSWITCHERDEF.offIDCode0, dd NAME(IDEnterTarget) - NAME(Start)
569 at VMMSWITCHERDEF.cbIDCode0, dd NAME(ICEnterTarget) - NAME(IDEnterTarget)
570 at VMMSWITCHERDEF.offIDCode1, dd NAME(IDExitTarget) - NAME(Start)
571 at VMMSWITCHERDEF.cbIDCode1, dd NAME(ICExitTarget) - NAME(Start)
572 at VMMSWITCHERDEF.offGCCode, dd 0
573 at VMMSWITCHERDEF.cbGCCode, dd 0
574
575 iend
576
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette