VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMSwitcher/PAEand32Bit.mac@ 17639

Last change on this file since 17639 was 16859, checked in by vboxsync, 16 years ago

Load hypervisor CR3 from CPUM (instead of hardcoded fixups in the switchers). Dangerous change. Watch for regressions.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 30.7 KB
Line 
1; $Id: PAEand32Bit.mac 16859 2009-02-17 16:19:51Z vboxsync $
2;; @file
3; VMM - World Switchers, template for PAE and 32-Bit.
4;
5
6;
7; Copyright (C) 2006-2007 Sun Microsystems, Inc.
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.virtualbox.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License (GPL) as published by the Free Software
13; Foundation, in version 2 as it comes in the "COPYING" file of the
14; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17; Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18; Clara, CA 95054 USA or visit http://www.sun.com if you need
19; additional information or have any questions.
20;
21
22;%define DEBUG_STUFF 1
23
24;*******************************************************************************
25;* Header Files *
26;*******************************************************************************
27%include "VBox/asmdefs.mac"
28%include "VBox/x86.mac"
29%include "VBox/cpum.mac"
30%include "VBox/stam.mac"
31%include "VBox/vm.mac"
32%include "CPUMInternal.mac"
33%include "VMMSwitcher/VMMSwitcher.mac"
34
35%undef NEED_ID
36%ifdef NEED_PAE_ON_32BIT_HOST
37%define NEED_ID
38%endif
39%ifdef NEED_32BIT_ON_PAE_HOST
40%define NEED_ID
41%endif
42
43
44
45;
46; Start the fixup records
47; We collect the fixups in the .data section as we go along
48; It is therefore VITAL that no-one is using the .data section
49; for anything else between 'Start' and 'End'.
50;
51BEGINDATA
52GLOBALNAME Fixups
53
54
55
56BEGINCODE
57GLOBALNAME Start
58
59;;
60; The C interface.
61;
62BEGINPROC vmmR0HostToGuest
63
64%ifdef DEBUG_STUFF
65 COM_S_NEWLINE
66 COM_S_CHAR '^'
67%endif
68
69%ifdef VBOX_WITH_STATISTICS
70 ;
71 ; Switcher stats.
72 ;
73 FIXUP FIX_HC_VM_OFF, 1, VM.StatSwitcherToGC
74 mov edx, 0ffffffffh
75 STAM_PROFILE_ADV_START edx
76%endif
77
78 ;
79 ; Call worker.
80 ;
81 FIXUP FIX_HC_CPUM_OFF, 1, 0
82 mov edx, 0ffffffffh
83 push cs ; allow for far return and restore cs correctly.
84 call NAME(vmmR0HostToGuestAsm)
85
86%ifdef VBOX_WITH_STATISTICS
87 ;
88 ; Switcher stats.
89 ;
90 FIXUP FIX_HC_VM_OFF, 1, VM.StatSwitcherToHC
91 mov edx, 0ffffffffh
92 STAM_PROFILE_ADV_STOP edx
93%endif
94
95 ret
96ENDPROC vmmR0HostToGuest
97
98
99
100; *****************************************************************************
101; vmmR0HostToGuestAsm
102;
103; Phase one of the switch from host to guest context (host MMU context)
104;
105; INPUT:
106; - edx virtual address of CPUM structure (valid in host context)
107;
108; USES/DESTROYS:
109; - eax, ecx, edx
110;
111; ASSUMPTION:
112; - current CS and DS selectors are wide open
113;
114; *****************************************************************************
115ALIGNCODE(16)
116BEGINPROC vmmR0HostToGuestAsm
117 ;;
118 ;; Save CPU host context
119 ;; Skip eax, edx and ecx as these are not preserved over calls.
120 ;;
121 CPUMCPU_FROM_CPUM(edx)
122 ; general registers.
123 mov [edx + CPUMCPU.Host.ebx], ebx
124 mov [edx + CPUMCPU.Host.edi], edi
125 mov [edx + CPUMCPU.Host.esi], esi
126 mov [edx + CPUMCPU.Host.esp], esp
127 mov [edx + CPUMCPU.Host.ebp], ebp
128 ; selectors.
129 mov [edx + CPUMCPU.Host.ds], ds
130 mov [edx + CPUMCPU.Host.es], es
131 mov [edx + CPUMCPU.Host.fs], fs
132 mov [edx + CPUMCPU.Host.gs], gs
133 mov [edx + CPUMCPU.Host.ss], ss
134 ; special registers.
135 sldt [edx + CPUMCPU.Host.ldtr]
136 sidt [edx + CPUMCPU.Host.idtr]
137 sgdt [edx + CPUMCPU.Host.gdtr]
138 str [edx + CPUMCPU.Host.tr]
139 ; flags
140 pushfd
141 pop dword [edx + CPUMCPU.Host.eflags]
142
143 FIXUP FIX_NO_SYSENTER_JMP, 0, htg_no_sysenter - NAME(Start) ; this will insert a jmp htg_no_sysenter if host doesn't use sysenter.
144 ; save MSR_IA32_SYSENTER_CS register.
145 mov ecx, MSR_IA32_SYSENTER_CS
146 mov ebx, edx ; save edx
147 rdmsr ; edx:eax <- MSR[ecx]
148 mov [ebx + CPUMCPU.Host.SysEnter.cs], eax
149 mov [ebx + CPUMCPU.Host.SysEnter.cs + 4], edx
150 xor eax, eax ; load 0:0 to cause #GP upon sysenter
151 xor edx, edx
152 wrmsr
153 xchg ebx, edx ; restore edx
154 jmp short htg_no_sysenter
155
156ALIGNCODE(16)
157htg_no_sysenter:
158
159 ;; handle use flags.
160 mov esi, [edx + CPUMCPU.fUseFlags] ; esi == use flags.
161 and esi, ~CPUM_USED_FPU ; Clear CPUM_USED_* flags. ;;@todo FPU check can be optimized to use cr0 flags!
162 mov [edx + CPUMCPU.fUseFlags], esi
163
164 ; debug registers.
165 test esi, CPUM_USE_DEBUG_REGS | CPUM_USE_DEBUG_REGS_HOST
166 jz htg_debug_regs_no
167 jmp htg_debug_regs_save_dr7and6
168htg_debug_regs_no:
169
170 ; control registers.
171 mov eax, cr0
172 mov [edx + CPUMCPU.Host.cr0], eax
173 ;mov eax, cr2 ; assume host os don't suff things in cr2. (safe)
174 ;mov [edx + CPUMCPU.Host.cr2], eax
175 mov eax, cr3
176 mov [edx + CPUMCPU.Host.cr3], eax
177 mov eax, cr4
178 mov [edx + CPUMCPU.Host.cr4], eax
179
180 ;;
181 ;; Start switching to VMM context.
182 ;;
183
184 ;
185 ; Change CR0 and CR4 so we can correctly emulate FPU/MMX/SSE[23] exceptions
186 ; Also disable WP. (eax==cr4 now)
187 ; Note! X86_CR4_PSE and X86_CR4_PAE are important if the host thinks so :-)
188 ; Note! X86_CR4_VMXE must not be touched in case the CPU is in vmx root mode
189 ;
190 and eax, X86_CR4_MCE | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_VMXE
191 mov ecx, [edx + CPUMCPU.Guest.cr4]
192 ;; @todo Switcher cleanup: Determin base CR4 during CPUMR0Init / VMMR3SelectSwitcher putting it
193 ; in CPUM.Hyper.cr4 (which isn't currently being used). That should
194 ; simplify this operation a bit (and improve locality of the data).
195
196 ;
197 ; CR4.AndMask and CR4.OrMask are set in CPUMR3Init based on the presence of
198 ; FXSAVE support on the host CPU
199 ;
200 CPUM_FROM_CPUMCPU(edx)
201 and ecx, [edx + CPUM.CR4.AndMask]
202 or eax, ecx
203 or eax, [edx + CPUM.CR4.OrMask]
204 mov cr4, eax
205
206 CPUMCPU_FROM_CPUM(edx)
207 mov eax, [edx + CPUMCPU.Guest.cr0]
208 and eax, X86_CR0_EM
209 or eax, X86_CR0_PE | X86_CR0_PG | X86_CR0_TS | X86_CR0_ET | X86_CR0_NE | X86_CR0_MP
210 mov cr0, eax
211
212 CPUM_FROM_CPUMCPU(edx)
213 ; Load new gdt so we can do far jump to guest code after cr3 reload.
214 lgdt [edx + CPUM.Hyper.gdtr]
215 DEBUG_CHAR('1') ; trashes esi
216
217 ; Store the hypervisor cr3 for later loading
218 mov ebp, [edx + CPUM.Hyper.cr3]
219
220 ;;
221 ;; Load Intermediate memory context.
222 ;;
223 FIXUP SWITCHER_FIX_INTER_CR3_HC, 1
224 mov eax, 0ffffffffh
225 mov cr3, eax
226 DEBUG_CHAR('2') ; trashes esi
227
228%ifdef NEED_ID
229 ;;
230 ;; Jump to identity mapped location
231 ;;
232 FIXUP FIX_HC_2_ID_NEAR_REL, 1, NAME(IDEnterTarget) - NAME(Start)
233 jmp near NAME(IDEnterTarget)
234
235 ; We're now on identity mapped pages!
236ALIGNCODE(16)
237GLOBALNAME IDEnterTarget
238 DEBUG_CHAR('3')
239 mov edx, cr4
240%ifdef NEED_PAE_ON_32BIT_HOST
241 or edx, X86_CR4_PAE
242%else
243 and edx, ~X86_CR4_PAE
244%endif
245 mov eax, cr0
246 and eax, (~X86_CR0_PG) & 0xffffffff ; prevent yasm warning
247 mov cr0, eax
248 DEBUG_CHAR('4')
249 mov cr4, edx
250 FIXUP SWITCHER_FIX_INTER_CR3_GC, 1
251 mov edx, 0ffffffffh
252 mov cr3, edx
253 or eax, X86_CR0_PG
254 DEBUG_CHAR('5')
255 mov cr0, eax
256 DEBUG_CHAR('6')
257%endif
258
259 ;;
260 ;; Jump to guest code mapping of the code and load the Hypervisor CS.
261 ;;
262 FIXUP FIX_GC_FAR32, 1, NAME(FarJmpGCTarget) - NAME(Start)
263 jmp 0fff8h:0deadfaceh
264
265
266 ;;
267 ;; When we arrive at this label we're at the
268 ;; guest code mapping of the switching code.
269 ;;
270ALIGNCODE(16)
271GLOBALNAME FarJmpGCTarget
272 DEBUG_CHAR('-')
273 ; load final cr3 and do far jump to load cs.
274 mov cr3, ebp ; ebp set above
275 DEBUG_CHAR('0')
276
277 ;;
278 ;; We're in VMM MMU context and VMM CS is loaded.
279 ;; Setup the rest of the VMM state.
280 ;;
281 FIXUP FIX_GC_CPUM_OFF, 1, 0
282 mov edx, 0ffffffffh
283 ; Activate guest IDT
284 DEBUG_CHAR('1')
285 lidt [edx + CPUM.Hyper.idtr]
286 ; Load selectors
287 DEBUG_CHAR('2')
288 FIXUP FIX_HYPER_DS, 1
289 mov eax, 0ffffh
290 mov ds, eax
291 mov es, eax
292 xor eax, eax
293 mov gs, eax
294 mov fs, eax
295
296 ; Setup stack; use the lss_esp, ss pair for lss
297 DEBUG_CHAR('3')
298 mov eax, [edx + CPUM.Hyper.esp]
299 mov [edx + CPUM.Hyper.lss_esp], eax
300 lss esp, [edx + CPUM.Hyper.lss_esp]
301
302 ; Restore TSS selector; must mark it as not busy before using ltr (!)
303 DEBUG_CHAR('4')
304 FIXUP FIX_GC_TSS_GDTE_DW2, 2
305 and dword [0ffffffffh], ~0200h ; clear busy flag (2nd type2 bit)
306 DEBUG_CHAR('5')
307 ltr word [edx + CPUM.Hyper.tr]
308 DEBUG_CHAR('6')
309
310 ; Activate the ldt (now we can safely crash).
311 lldt [edx + CPUM.Hyper.ldtr]
312 DEBUG_CHAR('7')
313
314 CPUMCPU_FROM_CPUM(edx)
315 ;; use flags.
316 mov esi, [edx + CPUMCPU.fUseFlags]
317 CPUM_FROM_CPUMCPU(edx)
318
319 ; debug registers
320 test esi, CPUM_USE_DEBUG_REGS
321 jz htg_debug_regs_guest_no
322 jmp htg_debug_regs_guest
323htg_debug_regs_guest_no:
324 DEBUG_CHAR('9')
325
326%ifdef VBOX_WITH_NMI
327 ;
328 ; Setup K7 NMI.
329 ;
330 mov esi, edx
331 ; clear all PerfEvtSeln registers
332 xor eax, eax
333 xor edx, edx
334 mov ecx, MSR_K7_PERFCTR0
335 wrmsr
336 mov ecx, MSR_K7_PERFCTR1
337 wrmsr
338 mov ecx, MSR_K7_PERFCTR2
339 wrmsr
340 mov ecx, MSR_K7_PERFCTR3
341 wrmsr
342
343 mov eax, RT_BIT(20) | RT_BIT(17) | RT_BIT(16) | 076h
344 mov ecx, MSR_K7_EVNTSEL0
345 wrmsr
346 mov eax, 02329B000h
347 mov edx, 0fffffffeh ; -1.6GHz * 5
348 mov ecx, MSR_K7_PERFCTR0
349 wrmsr
350
351 FIXUP FIX_GC_APIC_BASE_32BIT, 1
352 mov eax, 0f0f0f0f0h
353 add eax, 0340h ; APIC_LVTPC
354 mov dword [eax], 0400h ; APIC_DM_NMI
355
356 xor edx, edx
357 mov eax, RT_BIT(20) | RT_BIT(17) | RT_BIT(16) | 076h | RT_BIT(22) ;+EN
358 mov ecx, MSR_K7_EVNTSEL0
359 wrmsr
360
361 mov edx, esi
362%endif
363
364 ; General registers.
365 mov ebx, [edx + CPUM.Hyper.ebx]
366 mov ebp, [edx + CPUM.Hyper.ebp]
367 mov esi, [edx + CPUM.Hyper.esi]
368 mov edi, [edx + CPUM.Hyper.edi]
369 push dword [edx + CPUM.Hyper.eflags]
370 popfd
371 DEBUG_CHAR('!')
372
373 ;;
374 ;; Return to the VMM code which either called the switcher or
375 ;; the code set up to run by HC.
376 ;;
377%ifdef DEBUG_STUFF
378 COM_S_PRINT ';eip='
379 mov eax, [edx + CPUM.Hyper.eip]
380 COM_S_DWORD_REG eax
381 COM_S_CHAR ';'
382%endif
383 mov eax, [edx + CPUM.Hyper.eip]
384%ifdef VBOX_WITH_STATISTICS
385 FIXUP FIX_GC_VM_OFF, 1, VM.StatSwitcherToGC
386 mov edx, 0ffffffffh
387 STAM_PROFILE_ADV_STOP edx
388 FIXUP FIX_GC_CPUM_OFF, 1, 0
389 mov edx, 0ffffffffh
390%endif
391 jmp eax
392
393;;
394; Detour for saving the host DR7 and DR6.
395; esi and edx must be preserved.
396htg_debug_regs_save_dr7and6:
397DEBUG_S_CHAR('s');
398 mov eax, dr7 ; not sure, but if I read the docs right this will trap if GD is set. FIXME!!!
399 mov [edx + CPUMCPU.Host.dr7], eax
400 xor eax, eax ; clear everything. (bit 12? is read as 1...)
401 mov dr7, eax
402 mov eax, dr6 ; just in case we save the state register too.
403 mov [edx + CPUMCPU.Host.dr6], eax
404 jmp htg_debug_regs_no
405
406;;
407; Detour for saving host DR0-3 and loading hypervisor debug registers.
408; esi and edx must be preserved.
409htg_debug_regs_guest:
410 DEBUG_S_CHAR('D')
411 DEBUG_S_CHAR('R')
412 DEBUG_S_CHAR('x')
413 CPUMCPU_FROM_CPUM(edx)
414 ; save host DR0-3.
415 mov eax, dr0
416 mov [edx + CPUMCPU.Host.dr0], eax
417 mov ebx, dr1
418 mov [edx + CPUMCPU.Host.dr1], ebx
419 mov ecx, dr2
420 mov [edx + CPUMCPU.Host.dr2], ecx
421 mov eax, dr3
422 mov [edx + CPUMCPU.Host.dr3], eax
423 CPUM_FROM_CPUMCPU(edx)
424
425 ; load hyper DR0-7
426 mov ebx, [edx + CPUM.Hyper.dr]
427 mov dr0, ebx
428 mov ecx, [edx + CPUM.Hyper.dr + 8*1]
429 mov dr1, ecx
430 mov eax, [edx + CPUM.Hyper.dr + 8*2]
431 mov dr2, eax
432 mov ebx, [edx + CPUM.Hyper.dr + 8*3]
433 mov dr3, ebx
434 ;mov eax, [edx + CPUM.Hyper.dr + 8*6]
435 mov ecx, 0ffff0ff0h
436 mov dr6, ecx
437 mov eax, [edx + CPUM.Hyper.dr + 8*7]
438 mov dr7, eax
439 jmp htg_debug_regs_guest_no
440
441ENDPROC vmmR0HostToGuestAsm
442
443
444;;
445; Trampoline for doing a call when starting the hyper visor execution.
446;
447; Push any arguments to the routine.
448; Push the argument frame size (cArg * 4).
449; Push the call target (_cdecl convention).
450; Push the address of this routine.
451;
452;
453ALIGNCODE(16)
454BEGINPROC vmmGCCallTrampoline
455%ifdef DEBUG_STUFF
456 COM_S_CHAR 'c'
457 COM_S_CHAR 't'
458 COM_S_CHAR '!'
459%endif
460
461 ; call routine
462 pop eax ; call address
463 mov esi, edx ; save edx
464 pop edi ; argument count.
465%ifdef DEBUG_STUFF
466 COM_S_PRINT ';eax='
467 COM_S_DWORD_REG eax
468 COM_S_CHAR ';'
469%endif
470 call eax ; do call
471 add esp, edi ; cleanup stack
472
473 ; return to the host context.
474 push byte 0 ; eip
475 mov edx, esi ; CPUM pointer
476
477%ifdef DEBUG_STUFF
478 COM_S_CHAR '`'
479%endif
480 jmp NAME(VMMGCGuestToHostAsm) ; eax = returncode.
481ENDPROC vmmGCCallTrampoline
482
483
484
485;;
486; The C interface.
487;
488ALIGNCODE(16)
489BEGINPROC vmmGCGuestToHost
490%ifdef DEBUG_STUFF
491 push esi
492 COM_NEWLINE
493 DEBUG_CHAR('b')
494 DEBUG_CHAR('a')
495 DEBUG_CHAR('c')
496 DEBUG_CHAR('k')
497 DEBUG_CHAR('!')
498 COM_NEWLINE
499 pop esi
500%endif
501 mov eax, [esp + 4]
502 jmp NAME(VMMGCGuestToHostAsm)
503ENDPROC vmmGCGuestToHost
504
505
506;;
507; VMMGCGuestToHostAsmGuestCtx
508;
509; Switches from Guest Context to Host Context.
510; Of course it's only called from within the GC.
511;
512; @param eax Return code.
513; @param esp + 4 Pointer to CPUMCTXCORE.
514;
515; @remark ASSUMES interrupts disabled.
516;
517ALIGNCODE(16)
518BEGINPROC VMMGCGuestToHostAsmGuestCtx
519 DEBUG_CHAR('~')
520
521%ifdef VBOX_WITH_STATISTICS
522 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalInGC
523 mov edx, 0ffffffffh
524 STAM_PROFILE_ADV_STOP edx
525
526 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalGCToQemu
527 mov edx, 0ffffffffh
528 STAM_PROFILE_ADV_START edx
529
530 FIXUP FIX_GC_VM_OFF, 1, VM.StatSwitcherToHC
531 mov edx, 0ffffffffh
532 STAM_PROFILE_ADV_START edx
533%endif
534
535 ;
536 ; Load the CPUM pointer.
537 ;
538 FIXUP FIX_GC_CPUM_OFF, 1, 0
539 mov edx, 0ffffffffh
540
541 ; Skip return address (assumes called!)
542 lea esp, [esp + 4]
543
544 ;
545 ; Guest Context (assumes esp now points to CPUMCTXCORE structure).
546 ;
547 ; general purpose registers.
548 push eax
549
550 CPUMCPU_FROM_CPUM(edx)
551 mov eax, [esp + 4 + CPUMCTXCORE.eax]
552 mov [edx + CPUMCPU.Guest.eax], eax
553 mov eax, [esp + 4 + CPUMCTXCORE.ecx]
554 mov [edx + CPUMCPU.Guest.ecx], eax
555 mov eax, [esp + 4 + CPUMCTXCORE.edx]
556 mov [edx + CPUMCPU.Guest.edx], eax
557 mov eax, [esp + 4 + CPUMCTXCORE.ebx]
558 mov [edx + CPUMCPU.Guest.ebx], eax
559 mov eax, [esp + 4 + CPUMCTXCORE.esp]
560 mov [edx + CPUMCPU.Guest.esp], eax
561 mov eax, [esp + 4 + CPUMCTXCORE.ebp]
562 mov [edx + CPUMCPU.Guest.ebp], eax
563 mov eax, [esp + 4 + CPUMCTXCORE.esi]
564 mov [edx + CPUMCPU.Guest.esi], eax
565 mov eax, [esp + 4 + CPUMCTXCORE.edi]
566 mov [edx + CPUMCPU.Guest.edi], eax
567 mov eax, dword [esp + 4 + CPUMCTXCORE.es]
568 mov dword [edx + CPUMCPU.Guest.es], eax
569 mov eax, dword [esp + 4 + CPUMCTXCORE.cs]
570 mov dword [edx + CPUMCPU.Guest.cs], eax
571 mov eax, dword [esp + 4 + CPUMCTXCORE.ss]
572 mov dword [edx + CPUMCPU.Guest.ss], eax
573 mov eax, dword [esp + 4 + CPUMCTXCORE.ds]
574 mov dword [edx + CPUMCPU.Guest.ds], eax
575 mov eax, dword [esp + 4 + CPUMCTXCORE.fs]
576 mov dword [edx + CPUMCPU.Guest.fs], eax
577 mov eax, dword [esp + 4 + CPUMCTXCORE.gs]
578 mov dword [edx + CPUMCPU.Guest.gs], eax
579 mov eax, [esp + 4 + CPUMCTXCORE.eflags]
580 mov dword [edx + CPUMCPU.Guest.eflags], eax
581 mov eax, [esp + 4 + CPUMCTXCORE.eip]
582 mov dword [edx + CPUMCPU.Guest.eip], eax
583 pop eax
584 CPUM_FROM_CPUMCPU(edx)
585
586 add esp, CPUMCTXCORE_size ; skip CPUMCTXCORE structure
587
588 jmp vmmGCGuestToHostAsm_EIPDone
589ENDPROC VMMGCGuestToHostAsmGuestCtx
590
591
592;;
593; VMMGCGuestToHostAsmHyperCtx
594;
595; This is an alternative entry point which we'll be using
596; when the we have the hypervisor context and need to save
597; that before going to the host.
598;
599; This is typically useful when abandoning the hypervisor
600; because of a trap and want the trap state to be saved.
601;
602; @param eax Return code.
603; @param ecx Points to CPUMCTXCORE.
604; @uses eax,edx,ecx
605ALIGNCODE(16)
606BEGINPROC VMMGCGuestToHostAsmHyperCtx
607 DEBUG_CHAR('#')
608
609%ifdef VBOX_WITH_STATISTICS
610 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalInGC
611 mov edx, 0ffffffffh
612 STAM_PROFILE_ADV_STOP edx
613
614 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalGCToQemu
615 mov edx, 0ffffffffh
616 STAM_PROFILE_ADV_START edx
617
618 FIXUP FIX_GC_VM_OFF, 1, VM.StatSwitcherToHC
619 mov edx, 0ffffffffh
620 STAM_PROFILE_ADV_START edx
621%endif
622
623 ;
624 ; Load the CPUM pointer.
625 ;
626 FIXUP FIX_GC_CPUM_OFF, 1, 0
627 mov edx, 0ffffffffh
628
629 push eax ; save return code.
630 ; general purpose registers
631 mov eax, [ecx + CPUMCTXCORE.edi]
632 mov [edx + CPUM.Hyper.edi], eax
633 mov eax, [ecx + CPUMCTXCORE.esi]
634 mov [edx + CPUM.Hyper.esi], eax
635 mov eax, [ecx + CPUMCTXCORE.ebp]
636 mov [edx + CPUM.Hyper.ebp], eax
637 mov eax, [ecx + CPUMCTXCORE.eax]
638 mov [edx + CPUM.Hyper.eax], eax
639 mov eax, [ecx + CPUMCTXCORE.ebx]
640 mov [edx + CPUM.Hyper.ebx], eax
641 mov eax, [ecx + CPUMCTXCORE.edx]
642 mov [edx + CPUM.Hyper.edx], eax
643 mov eax, [ecx + CPUMCTXCORE.ecx]
644 mov [edx + CPUM.Hyper.ecx], eax
645 mov eax, [ecx + CPUMCTXCORE.esp]
646 mov [edx + CPUM.Hyper.esp], eax
647 ; selectors
648 mov eax, [ecx + CPUMCTXCORE.ss]
649 mov [edx + CPUM.Hyper.ss], eax
650 mov eax, [ecx + CPUMCTXCORE.gs]
651 mov [edx + CPUM.Hyper.gs], eax
652 mov eax, [ecx + CPUMCTXCORE.fs]
653 mov [edx + CPUM.Hyper.fs], eax
654 mov eax, [ecx + CPUMCTXCORE.es]
655 mov [edx + CPUM.Hyper.es], eax
656 mov eax, [ecx + CPUMCTXCORE.ds]
657 mov [edx + CPUM.Hyper.ds], eax
658 mov eax, [ecx + CPUMCTXCORE.cs]
659 mov [edx + CPUM.Hyper.cs], eax
660 ; flags
661 mov eax, [ecx + CPUMCTXCORE.eflags]
662 mov [edx + CPUM.Hyper.eflags], eax
663 ; eip
664 mov eax, [ecx + CPUMCTXCORE.eip]
665 mov [edx + CPUM.Hyper.eip], eax
666 ; jump to common worker code.
667 pop eax ; restore return code.
668 jmp vmmGCGuestToHostAsm_SkipHyperRegs
669
670ENDPROC VMMGCGuestToHostAsmHyperCtx
671
672
673;;
674; VMMGCGuestToHostAsm
675;
676; This is an alternative entry point which we'll be using
677; when the we have saved the guest state already or we haven't
678; been messing with the guest at all.
679;
680; @param eax Return code.
681; @uses eax, edx, ecx (or it may use them in the future)
682;
683ALIGNCODE(16)
684BEGINPROC VMMGCGuestToHostAsm
685 DEBUG_CHAR('%')
686
687%ifdef VBOX_WITH_STATISTICS
688 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalInGC
689 mov edx, 0ffffffffh
690 STAM_PROFILE_ADV_STOP edx
691
692 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalGCToQemu
693 mov edx, 0ffffffffh
694 STAM_PROFILE_ADV_START edx
695
696 FIXUP FIX_GC_VM_OFF, 1, VM.StatSwitcherToHC
697 mov edx, 0ffffffffh
698 STAM_PROFILE_ADV_START edx
699%endif
700
701 ;
702 ; Load the CPUM pointer.
703 ;
704 FIXUP FIX_GC_CPUM_OFF, 1, 0
705 mov edx, 0ffffffffh
706
707 pop dword [edx + CPUM.Hyper.eip] ; call return from stack
708 jmp short vmmGCGuestToHostAsm_EIPDone
709
710ALIGNCODE(16)
711vmmGCGuestToHostAsm_EIPDone:
712 ; general registers which we care about.
713 mov dword [edx + CPUM.Hyper.ebx], ebx
714 mov dword [edx + CPUM.Hyper.esi], esi
715 mov dword [edx + CPUM.Hyper.edi], edi
716 mov dword [edx + CPUM.Hyper.ebp], ebp
717 mov dword [edx + CPUM.Hyper.esp], esp
718
719 ; special registers which may change.
720vmmGCGuestToHostAsm_SkipHyperRegs:
721 ; str [edx + CPUM.Hyper.tr] - double fault only, and it won't be right then either.
722 sldt [edx + CPUM.Hyper.ldtr]
723
724 ; No need to save CRx here. They are set dynamically according to Guest/Host requirements.
725 ; FPU context is saved before restore of host saving (another) branch.
726
727%ifdef VBOX_WITH_NMI
728 ;
729 ; Disarm K7 NMI.
730 ;
731 mov esi, edx
732 mov edi, eax
733
734 xor edx, edx
735 xor eax, eax
736 mov ecx, MSR_K7_EVNTSEL0
737 wrmsr
738
739 mov eax, edi
740 mov edx, esi
741%endif
742
743
744 ;;
745 ;; Load Intermediate memory context.
746 ;;
747 mov edi, eax ; save return code in EDI (careful with COM_DWORD_REG from here on!)
748 CPUMCPU_FROM_CPUM(edx)
749 mov ecx, [edx + CPUMCPU.Host.cr3]
750 CPUM_FROM_CPUMCPU(edx)
751 FIXUP SWITCHER_FIX_INTER_CR3_GC, 1
752 mov eax, 0ffffffffh
753 mov cr3, eax
754 DEBUG_CHAR('?')
755
756 ;; We're now in intermediate memory context!
757%ifdef NEED_ID
758 ;;
759 ;; Jump to identity mapped location
760 ;;
761 FIXUP FIX_GC_2_ID_NEAR_REL, 1, NAME(IDExitTarget) - NAME(Start)
762 jmp near NAME(IDExitTarget)
763
764 ; We're now on identity mapped pages!
765ALIGNCODE(16)
766GLOBALNAME IDExitTarget
767 DEBUG_CHAR('1')
768 mov edx, cr4
769%ifdef NEED_PAE_ON_32BIT_HOST
770 and edx, ~X86_CR4_PAE
771%else
772 or edx, X86_CR4_PAE
773%endif
774 mov eax, cr0
775 and eax, (~X86_CR0_PG) & 0xffffffff ; prevent yasm warning
776 mov cr0, eax
777 DEBUG_CHAR('2')
778 mov cr4, edx
779 FIXUP SWITCHER_FIX_INTER_CR3_HC, 1
780 mov edx, 0ffffffffh
781 mov cr3, edx
782 or eax, X86_CR0_PG
783 DEBUG_CHAR('3')
784 mov cr0, eax
785 DEBUG_CHAR('4')
786
787 ;;
788 ;; Jump to HC mapping.
789 ;;
790 FIXUP FIX_ID_2_HC_NEAR_REL, 1, NAME(HCExitTarget) - NAME(Start)
791 jmp near NAME(HCExitTarget)
792%else
793 ;;
794 ;; Jump to HC mapping.
795 ;;
796 FIXUP FIX_GC_2_HC_NEAR_REL, 1, NAME(HCExitTarget) - NAME(Start)
797 jmp near NAME(HCExitTarget)
798%endif
799
800
801 ;
802 ; When we arrive here we're at the host context
803 ; mapping of the switcher code.
804 ;
805ALIGNCODE(16)
806GLOBALNAME HCExitTarget
807 DEBUG_CHAR('9')
808 ; load final cr3
809 mov cr3, ecx
810 DEBUG_CHAR('@')
811
812
813 ;;
814 ;; Restore Host context.
815 ;;
816 ; Load CPUM pointer into edx
817 FIXUP FIX_HC_CPUM_OFF, 1, 0
818 mov edx, 0ffffffffh
819 CPUMCPU_FROM_CPUM(edx)
820 ; activate host gdt and idt
821 lgdt [edx + CPUMCPU.Host.gdtr]
822 DEBUG_CHAR('0')
823 lidt [edx + CPUMCPU.Host.idtr]
824 DEBUG_CHAR('1')
825 ; Restore TSS selector; must mark it as not busy before using ltr (!)
826%if 1 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p)
827 movzx eax, word [edx + CPUMCPU.Host.tr] ; eax <- TR
828 and al, 0F8h ; mask away TI and RPL bits, get descriptor offset.
829 add eax, [edx + CPUMCPU.Host.gdtr + 2] ; eax <- GDTR.address + descriptor offset.
830 and dword [eax + 4], ~0200h ; clear busy flag (2nd type2 bit)
831 ltr word [edx + CPUMCPU.Host.tr]
832%else
833 movzx eax, word [edx + CPUMCPU.Host.tr] ; eax <- TR
834 and al, 0F8h ; mask away TI and RPL bits, get descriptor offset.
835 add eax, [edx + CPUMCPU.Host.gdtr + 2] ; eax <- GDTR.address + descriptor offset.
836 mov ecx, [eax + 4] ; ecx <- 2nd descriptor dword
837 mov ebx, ecx ; save orginal value
838 and ecx, ~0200h ; clear busy flag (2nd type2 bit)
839 mov [eax + 4], ecx ; not using xchg here is paranoia..
840 ltr word [edx + CPUMCPU.Host.tr]
841 xchg [eax + 4], ebx ; using xchg is paranoia too...
842%endif
843 ; activate ldt
844 DEBUG_CHAR('2')
845 lldt [edx + CPUMCPU.Host.ldtr]
846 ; Restore segment registers
847 mov eax, [edx + CPUMCPU.Host.ds]
848 mov ds, eax
849 mov eax, [edx + CPUMCPU.Host.es]
850 mov es, eax
851 mov eax, [edx + CPUMCPU.Host.fs]
852 mov fs, eax
853 mov eax, [edx + CPUMCPU.Host.gs]
854 mov gs, eax
855 ; restore stack
856 lss esp, [edx + CPUMCPU.Host.esp]
857
858
859 FIXUP FIX_NO_SYSENTER_JMP, 0, gth_sysenter_no - NAME(Start) ; this will insert a jmp gth_sysenter_no if host doesn't use sysenter.
860 ; restore MSR_IA32_SYSENTER_CS register.
861 mov ecx, MSR_IA32_SYSENTER_CS
862 mov eax, [edx + CPUMCPU.Host.SysEnter.cs]
863 mov ebx, [edx + CPUMCPU.Host.SysEnter.cs + 4]
864 xchg edx, ebx ; save/load edx
865 wrmsr ; MSR[ecx] <- edx:eax
866 xchg edx, ebx ; restore edx
867 jmp short gth_sysenter_no
868
869ALIGNCODE(16)
870gth_sysenter_no:
871
872 ;; @todo AMD syscall
873
874 ; Restore FPU if guest has used it.
875 ; Using fxrstor should ensure that we're not causing unwanted exception on the host.
876 mov esi, [edx + CPUMCPU.fUseFlags] ; esi == use flags.
877 test esi, CPUM_USED_FPU
878 jz near gth_fpu_no
879 mov ecx, cr0
880 and ecx, ~(X86_CR0_TS | X86_CR0_EM)
881 mov cr0, ecx
882
883 FIXUP FIX_NO_FXSAVE_JMP, 0, gth_no_fxsave - NAME(Start) ; this will insert a jmp gth_no_fxsave if fxsave isn't supported.
884 fxsave [edx + CPUMCPU.Guest.fpu]
885 fxrstor [edx + CPUMCPU.Host.fpu]
886 jmp near gth_fpu_no
887
888gth_no_fxsave:
889 fnsave [edx + CPUMCPU.Guest.fpu]
890 mov eax, [edx + CPUMCPU.Host.fpu] ; control word
891 not eax ; 1 means exception ignored (6 LS bits)
892 and eax, byte 03Fh ; 6 LS bits only
893 test eax, [edx + CPUMCPU.Host.fpu + 4] ; status word
894 jz gth_no_exceptions_pending
895
896 ; technically incorrect, but we certainly don't want any exceptions now!!
897 and dword [edx + CPUMCPU.Host.fpu + 4], ~03Fh
898
899gth_no_exceptions_pending:
900 frstor [edx + CPUMCPU.Host.fpu]
901 jmp short gth_fpu_no
902
903ALIGNCODE(16)
904gth_fpu_no:
905
906 ; Control registers.
907 ; Would've liked to have these highere up in case of crashes, but
908 ; the fpu stuff must be done before we restore cr0.
909 mov ecx, [edx + CPUMCPU.Host.cr4]
910 mov cr4, ecx
911 mov ecx, [edx + CPUMCPU.Host.cr0]
912 mov cr0, ecx
913 ;mov ecx, [edx + CPUMCPU.Host.cr2] ; assumes this is waste of time.
914 ;mov cr2, ecx
915
916 ; restore debug registers (if modified) (esi must still be fUseFlags!)
917 ; (must be done after cr4 reload because of the debug extension.)
918 test esi, CPUM_USE_DEBUG_REGS | CPUM_USE_DEBUG_REGS_HOST
919 jz short gth_debug_regs_no
920 jmp gth_debug_regs_restore
921gth_debug_regs_no:
922
923 ; restore general registers.
924 mov eax, edi ; restore return code. eax = return code !!
925 mov edi, [edx + CPUMCPU.Host.edi]
926 mov esi, [edx + CPUMCPU.Host.esi]
927 mov ebx, [edx + CPUMCPU.Host.ebx]
928 mov ebp, [edx + CPUMCPU.Host.ebp]
929 push dword [edx + CPUMCPU.Host.eflags]
930 popfd
931
932%ifdef DEBUG_STUFF
933; COM_S_CHAR '4'
934%endif
935 retf
936
937;;
938; Detour for restoring the host debug registers.
939; edx and edi must be preserved.
940gth_debug_regs_restore:
941 DEBUG_S_CHAR('d')
942 xor eax, eax
943 mov dr7, eax ; paranoia or not?
944 test esi, CPUM_USE_DEBUG_REGS
945 jz short gth_debug_regs_dr7
946 DEBUG_S_CHAR('r')
947 mov eax, [edx + CPUMCPU.Host.dr0]
948 mov dr0, eax
949 mov ebx, [edx + CPUMCPU.Host.dr1]
950 mov dr1, ebx
951 mov ecx, [edx + CPUMCPU.Host.dr2]
952 mov dr2, ecx
953 mov eax, [edx + CPUMCPU.Host.dr3]
954 mov dr3, eax
955gth_debug_regs_dr7:
956 mov ebx, [edx + CPUMCPU.Host.dr6]
957 mov dr6, ebx
958 mov ecx, [edx + CPUMCPU.Host.dr7]
959 mov dr7, ecx
960 jmp gth_debug_regs_no
961
962ENDPROC VMMGCGuestToHostAsm
963
964
965GLOBALNAME End
966;
967; The description string (in the text section).
968;
969NAME(Description):
970 db SWITCHER_DESCRIPTION
971 db 0
972
973extern NAME(Relocate)
974
975;
976; End the fixup records.
977;
978BEGINDATA
979 db FIX_THE_END ; final entry.
980GLOBALNAME FixupsEnd
981
982;;
983; The switcher definition structure.
984ALIGNDATA(16)
985GLOBALNAME Def
986 istruc VMMSWITCHERDEF
987 at VMMSWITCHERDEF.pvCode, RTCCPTR_DEF NAME(Start)
988 at VMMSWITCHERDEF.pvFixups, RTCCPTR_DEF NAME(Fixups)
989 at VMMSWITCHERDEF.pszDesc, RTCCPTR_DEF NAME(Description)
990 at VMMSWITCHERDEF.pfnRelocate, RTCCPTR_DEF NAME(Relocate)
991 at VMMSWITCHERDEF.enmType, dd SWITCHER_TYPE
992 at VMMSWITCHERDEF.cbCode, dd NAME(End) - NAME(Start)
993 at VMMSWITCHERDEF.offR0HostToGuest, dd NAME(vmmR0HostToGuest) - NAME(Start)
994 at VMMSWITCHERDEF.offGCGuestToHost, dd NAME(vmmGCGuestToHost) - NAME(Start)
995 at VMMSWITCHERDEF.offGCCallTrampoline, dd NAME(vmmGCCallTrampoline) - NAME(Start)
996 at VMMSWITCHERDEF.offGCGuestToHostAsm, dd NAME(VMMGCGuestToHostAsm) - NAME(Start)
997 at VMMSWITCHERDEF.offGCGuestToHostAsmHyperCtx, dd NAME(VMMGCGuestToHostAsmHyperCtx)- NAME(Start)
998 at VMMSWITCHERDEF.offGCGuestToHostAsmGuestCtx, dd NAME(VMMGCGuestToHostAsmGuestCtx)- NAME(Start)
999 ; disasm help
1000 at VMMSWITCHERDEF.offHCCode0, dd 0
1001%ifdef NEED_ID
1002 at VMMSWITCHERDEF.cbHCCode0, dd NAME(IDEnterTarget) - NAME(Start)
1003%else
1004 at VMMSWITCHERDEF.cbHCCode0, dd NAME(FarJmpGCTarget) - NAME(Start)
1005%endif
1006 at VMMSWITCHERDEF.offHCCode1, dd NAME(HCExitTarget) - NAME(Start)
1007 at VMMSWITCHERDEF.cbHCCode1, dd NAME(End) - NAME(HCExitTarget)
1008%ifdef NEED_ID
1009 at VMMSWITCHERDEF.offIDCode0, dd NAME(IDEnterTarget) - NAME(Start)
1010 at VMMSWITCHERDEF.cbIDCode0, dd NAME(FarJmpGCTarget) - NAME(IDEnterTarget)
1011 at VMMSWITCHERDEF.offIDCode1, dd NAME(IDExitTarget) - NAME(Start)
1012 at VMMSWITCHERDEF.cbIDCode1, dd NAME(HCExitTarget) - NAME(IDExitTarget)
1013%else
1014 at VMMSWITCHERDEF.offIDCode0, dd 0
1015 at VMMSWITCHERDEF.cbIDCode0, dd 0
1016 at VMMSWITCHERDEF.offIDCode1, dd 0
1017 at VMMSWITCHERDEF.cbIDCode1, dd 0
1018%endif
1019 at VMMSWITCHERDEF.offGCCode, dd NAME(FarJmpGCTarget) - NAME(Start)
1020%ifdef NEED_ID
1021 at VMMSWITCHERDEF.cbGCCode, dd NAME(IDExitTarget) - NAME(FarJmpGCTarget)
1022%else
1023 at VMMSWITCHERDEF.cbGCCode, dd NAME(HCExitTarget) - NAME(FarJmpGCTarget)
1024%endif
1025
1026 iend
1027
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette