VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMSwitcher/PAEand32Bit.mac@ 4501

Last change on this file since 4501 was 4071, checked in by vboxsync, 17 years ago

Biggest check-in ever. New source code headers for all (C) innotek files.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 29.6 KB
Line 
1; $Id: PAEand32Bit.mac 4071 2007-08-07 17:07:59Z vboxsync $
2;; @file
3; VMM - World Switchers, template for PAE and 32-Bit.
4;
5
6;
7; Copyright (C) 2006-2007 innotek GmbH
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.virtualbox.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License as published by the Free Software Foundation,
13; in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
14; distribution. VirtualBox OSE is distributed in the hope that it will
15; be useful, but WITHOUT ANY WARRANTY of any kind.
16
17;%define DEBUG_STUFF 1
18
19;*******************************************************************************
20;* Header Files *
21;*******************************************************************************
22%include "VBox/asmdefs.mac"
23%include "VBox/x86.mac"
24%include "VBox/cpum.mac"
25%include "VBox/stam.mac"
26%include "VBox/vm.mac"
27%include "CPUMInternal.mac"
28%include "VMMSwitcher/VMMSwitcher.mac"
29
30%undef NEED_ID
31%ifdef NEED_PAE_ON_32BIT_HOST
32%define NEED_ID
33%endif
34%ifdef NEED_32BIT_ON_PAE_HOST
35%define NEED_ID
36%endif
37
38
39
40;
41; Start the fixup records
42; We collect the fixups in the .data section as we go along
43; It is therefore VITAL that no-one is using the .data section
44; for anything else between 'Start' and 'End'.
45;
46BEGINDATA
47GLOBALNAME Fixups
48
49
50
51BEGINCODE
52GLOBALNAME Start
53
54;;
55; The C interface.
56;
57BEGINPROC vmmR0HostToGuest
58
59%ifdef DEBUG_STUFF
60 COM_S_NEWLINE
61 COM_S_CHAR '^'
62%endif
63
64%ifdef VBOX_WITH_STATISTICS
65 ;
66 ; Switcher stats.
67 ;
68 FIXUP FIX_HC_VM_OFF, 1, VM.StatSwitcherToGC
69 mov edx, 0ffffffffh
70 STAM_PROFILE_ADV_START edx
71%endif
72
73 ;
74 ; Call worker.
75 ;
76 FIXUP FIX_HC_CPUM_OFF, 1, 0
77 mov edx, 0ffffffffh
78 push cs ; allow for far return and restore cs correctly.
79 call NAME(vmmR0HostToGuestAsm)
80
81%ifdef VBOX_WITH_STATISTICS
82 ;
83 ; Switcher stats.
84 ;
85 FIXUP FIX_HC_VM_OFF, 1, VM.StatSwitcherToHC
86 mov edx, 0ffffffffh
87 STAM_PROFILE_ADV_STOP edx
88%endif
89
90 ret
91ENDPROC vmmR0HostToGuest
92
93
94
95; *****************************************************************************
96; vmmR0HostToGuestAsm
97;
98; Phase one of the switch from host to guest context (host MMU context)
99;
100; INPUT:
101; - edx virtual address of CPUM structure (valid in host context)
102;
103; USES/DESTROYS:
104; - eax, ecx, edx
105;
106; ASSUMPTION:
107; - current CS and DS selectors are wide open
108;
109; *****************************************************************************
110ALIGNCODE(16)
111BEGINPROC vmmR0HostToGuestAsm
112 ;;
113 ;; Save CPU host context
114 ;; Skip eax, edx and ecx as these are not preserved over calls.
115 ;;
116 ; general registers.
117 mov [edx + CPUM.Host.ebx], ebx
118 mov [edx + CPUM.Host.edi], edi
119 mov [edx + CPUM.Host.esi], esi
120 mov [edx + CPUM.Host.esp], esp
121 mov [edx + CPUM.Host.ebp], ebp
122 ; selectors.
123 mov [edx + CPUM.Host.ds], ds
124 mov [edx + CPUM.Host.es], es
125 mov [edx + CPUM.Host.fs], fs
126 mov [edx + CPUM.Host.gs], gs
127 mov [edx + CPUM.Host.ss], ss
128 ; special registers.
129 sldt [edx + CPUM.Host.ldtr]
130 sidt [edx + CPUM.Host.idtr]
131 sgdt [edx + CPUM.Host.gdtr]
132 str [edx + CPUM.Host.tr]
133 ; flags
134 pushfd
135 pop dword [edx + CPUM.Host.eflags]
136
137 FIXUP FIX_NO_SYSENTER_JMP, 0, htg_no_sysenter - NAME(Start) ; this will insert a jmp htg_no_sysenter if host doesn't use sysenter.
138 ; save MSR_IA32_SYSENTER_CS register.
139 mov ecx, MSR_IA32_SYSENTER_CS
140 mov ebx, edx ; save edx
141 rdmsr ; edx:eax <- MSR[ecx]
142 mov [ebx + CPUM.Host.SysEnter.cs], eax
143 mov [ebx + CPUM.Host.SysEnter.cs + 4], edx
144 xor eax, eax ; load 0:0 to cause #GP upon sysenter
145 xor edx, edx
146 wrmsr
147 xchg ebx, edx ; restore edx
148 jmp short htg_no_sysenter
149
150ALIGNCODE(16)
151htg_no_sysenter:
152
153 ;; handle use flags.
154 mov esi, [edx + CPUM.fUseFlags] ; esi == use flags.
155 and esi, ~CPUM_USED_FPU ; Clear CPUM_USED_* flags. ;;@todo FPU check can be optimized to use cr0 flags!
156 mov [edx + CPUM.fUseFlags], esi
157
158 ; debug registers.
159 test esi, CPUM_USE_DEBUG_REGS | CPUM_USE_DEBUG_REGS_HOST
160 jz htg_debug_regs_no
161 jmp htg_debug_regs_save_dr7and6
162htg_debug_regs_no:
163
164 ; control registers.
165 mov eax, cr0
166 mov [edx + CPUM.Host.cr0], eax
167 ;mov eax, cr2 ; assume host os don't suff things in cr2. (safe)
168 ;mov [edx + CPUM.Host.cr2], eax
169 mov eax, cr3
170 mov [edx + CPUM.Host.cr3], eax
171 mov eax, cr4
172 mov [edx + CPUM.Host.cr4], eax
173
174 ;;
175 ;; Start switching to VMM context.
176 ;;
177
178 ;
179 ; Change CR0 and CR4 so we can correctly emulate FPU/MMX/SSE[23] exceptions
180 ; Also disable WP. (eax==cr4 now)
181 ; Note! X86_CR4_PSE and X86_CR4_PAE are important if the host thinks so :-)
182 ;
183 and eax, X86_CR4_MCE | X86_CR4_PSE | X86_CR4_PAE
184 mov ecx, [edx + CPUM.Guest.cr4]
185 ;; @todo Switcher cleanup: Determin base CR4 during CPUMR0Init / VMMR3SelectSwitcher putting it
186 ; in CPUM.Hyper.cr4 (which isn't currently being used). That should
187 ; simplify this operation a bit (and improve locality of the data).
188
189 ;
190 ; CR4.AndMask and CR4.OrMask are set in CPUMR3Init based on the presence of
191 ; FXSAVE support on the host CPU
192 ;
193 and ecx, [edx + CPUM.CR4.AndMask]
194 or eax, ecx
195 or eax, [edx + CPUM.CR4.OrMask]
196 mov cr4, eax
197
198 mov eax, [edx + CPUM.Guest.cr0]
199 and eax, X86_CR0_EM
200 or eax, X86_CR0_PE | X86_CR0_PG | X86_CR0_TS | X86_CR0_ET | X86_CR0_NE | X86_CR0_MP
201 mov cr0, eax
202
203 ; Load new gdt so we can do far jump to guest code after cr3 reload.
204 lgdt [edx + CPUM.Hyper.gdtr]
205 DEBUG_CHAR('1') ; trashes esi
206
207 ;;
208 ;; Load Intermediate memory context.
209 ;;
210 FIXUP SWITCHER_FIX_INTER_CR3_HC, 1
211 mov eax, 0ffffffffh
212 mov cr3, eax
213 DEBUG_CHAR('2') ; trashes esi
214
215%ifdef NEED_ID
216 ;;
217 ;; Jump to identity mapped location
218 ;;
219 FIXUP FIX_HC_2_ID_NEAR_REL, 1, NAME(IDEnterTarget) - NAME(Start)
220 jmp near NAME(IDEnterTarget)
221
222 ; We're now on identity mapped pages!
223ALIGNCODE(16)
224GLOBALNAME IDEnterTarget
225 DEBUG_CHAR('3')
226 mov edx, cr4
227%ifdef NEED_PAE_ON_32BIT_HOST
228 or edx, X86_CR4_PAE
229%else
230 and edx, ~X86_CR4_PAE
231%endif
232 mov eax, cr0
233 and eax, ~X86_CR0_PG
234 mov cr0, eax
235 DEBUG_CHAR('4')
236 mov cr4, edx
237 FIXUP SWITCHER_FIX_INTER_CR3_GC, 1
238 mov edx, 0ffffffffh
239 mov cr3, edx
240 or eax, X86_CR0_PG
241 DEBUG_CHAR('5')
242 mov cr0, eax
243 DEBUG_CHAR('6')
244%endif
245
246 ;;
247 ;; Jump to guest code mapping of the code and load the Hypervisor CS.
248 ;;
249 FIXUP FIX_GC_FAR32, 1, NAME(FarJmpGCTarget) - NAME(Start)
250 jmp 0fff8h:0deadfaceh
251
252
253 ;;
254 ;; When we arrive at this label we're at the
255 ;; guest code mapping of the switching code.
256 ;;
257ALIGNCODE(16)
258GLOBALNAME FarJmpGCTarget
259 DEBUG_CHAR('-')
260 ; load final cr3 and do far jump to load cs.
261 FIXUP SWITCHER_FIX_HYPER_CR3, 1
262 mov eax, 0ffffffffh
263 mov cr3, eax
264 DEBUG_CHAR('0')
265
266 ;;
267 ;; We're in VMM MMU context and VMM CS is loaded.
268 ;; Setup the rest of the VMM state.
269 ;;
270 FIXUP FIX_GC_CPUM_OFF, 1, 0
271 mov edx, 0ffffffffh
272 ; Activate guest IDT
273 DEBUG_CHAR('1')
274 lidt [edx + CPUM.Hyper.idtr]
275 ; Load selectors
276 DEBUG_CHAR('2')
277 FIXUP FIX_HYPER_DS, 1
278 mov eax, 0ffffh
279 mov ds, eax
280 mov es, eax
281 xor eax, eax
282 mov gs, eax
283 mov fs, eax
284
285 ; Setup stack
286 DEBUG_CHAR('3')
287 lss esp, [edx + CPUM.Hyper.esp]
288
289 ; Restore TSS selector; must mark it as not busy before using ltr (!)
290 DEBUG_CHAR('4')
291 FIXUP FIX_GC_TSS_GDTE_DW2, 2
292 and dword [0ffffffffh], ~0200h ; clear busy flag (2nd type2 bit)
293 DEBUG_CHAR('5')
294 ltr word [edx + CPUM.Hyper.tr]
295 DEBUG_CHAR('6')
296
297 ; Activate the ldt (now we can safely crash).
298 lldt [edx + CPUM.Hyper.ldtr]
299 DEBUG_CHAR('7')
300
301 ;; use flags.
302 mov esi, [edx + CPUM.fUseFlags]
303
304 ; debug registers
305 test esi, CPUM_USE_DEBUG_REGS
306 jz htg_debug_regs_guest_no
307 jmp htg_debug_regs_guest
308htg_debug_regs_guest_no:
309 DEBUG_CHAR('9')
310
311%ifdef VBOX_WITH_NMI
312 ;
313 ; Setup K7 NMI.
314 ;
315 mov esi, edx
316 ; clear all PerfEvtSeln registers
317 xor eax, eax
318 xor edx, edx
319 mov ecx, MSR_K7_PERFCTR0
320 wrmsr
321 mov ecx, MSR_K7_PERFCTR1
322 wrmsr
323 mov ecx, MSR_K7_PERFCTR2
324 wrmsr
325 mov ecx, MSR_K7_PERFCTR3
326 wrmsr
327
328 mov eax, BIT(20) | BIT(17) | BIT(16) | 076h
329 mov ecx, MSR_K7_EVNTSEL0
330 wrmsr
331 mov eax, 02329B000h
332 mov edx, 0fffffffeh ; -1.6GHz * 5
333 mov ecx, MSR_K7_PERFCTR0
334 wrmsr
335
336 FIXUP FIX_GC_APIC_BASE_32BIT, 1
337 mov eax, 0f0f0f0f0h
338 add eax, 0340h ; APIC_LVTPC
339 mov dword [eax], 0400h ; APIC_DM_NMI
340
341 xor edx, edx
342 mov eax, BIT(20) | BIT(17) | BIT(16) | 076h | BIT(22) ;+EN
343 mov ecx, MSR_K7_EVNTSEL0
344 wrmsr
345
346 mov edx, esi
347%endif
348
349 ; General registers.
350 mov ebx, [edx + CPUM.Hyper.ebx]
351 mov ebp, [edx + CPUM.Hyper.ebp]
352 mov esi, [edx + CPUM.Hyper.esi]
353 mov edi, [edx + CPUM.Hyper.edi]
354 push dword [edx + CPUM.Hyper.eflags]
355 popfd
356 DEBUG_CHAR('!')
357
358 ;;
359 ;; Return to the VMM code which either called the switcher or
360 ;; the code set up to run by HC.
361 ;;
362%ifdef DEBUG_STUFF
363 COM_S_PRINT ';eip='
364 mov eax, [edx + CPUM.Hyper.eip]
365 COM_S_DWORD_REG eax
366 COM_S_CHAR ';'
367%endif
368 mov eax, [edx + CPUM.Hyper.eip]
369%ifdef VBOX_WITH_STATISTICS
370 FIXUP FIX_GC_VM_OFF, 1, VM.StatSwitcherToGC
371 mov edx, 0ffffffffh
372 STAM_PROFILE_ADV_STOP edx
373 FIXUP FIX_GC_CPUM_OFF, 1, 0
374 mov edx, 0ffffffffh
375%endif
376 jmp eax
377
378;;
379; Detour for saving the host DR7 and DR6.
380; esi and edx must be preserved.
381htg_debug_regs_save_dr7and6:
382DEBUG_S_CHAR('s');
383 mov eax, dr7 ; not sure, but if I read the docs right this will trap if GD is set. FIXME!!!
384 mov [edx + CPUM.Host.dr7], eax
385 xor eax, eax ; clear everything. (bit 12? is read as 1...)
386 mov dr7, eax
387 mov eax, dr6 ; just in case we save the state register too.
388 mov [edx + CPUM.Host.dr6], eax
389 jmp htg_debug_regs_no
390
391;;
392; Detour for saving host DR0-3 and loading hypervisor debug registers.
393; esi and edx must be preserved.
394htg_debug_regs_guest:
395 DEBUG_S_CHAR('D')
396 DEBUG_S_CHAR('R')
397 DEBUG_S_CHAR('x')
398 ; save host DR0-3.
399 mov eax, dr0
400 mov [edx + CPUM.Host.dr0], eax
401 mov ebx, dr1
402 mov [edx + CPUM.Host.dr1], ebx
403 mov ecx, dr2
404 mov [edx + CPUM.Host.dr2], ecx
405 mov eax, dr3
406 mov [edx + CPUM.Host.dr3], eax
407 ; load hyper DR0-7
408 mov ebx, [edx + CPUM.Hyper.dr0]
409 mov dr0, ebx
410 mov ecx, [edx + CPUM.Hyper.dr1]
411 mov dr1, ecx
412 mov eax, [edx + CPUM.Hyper.dr2]
413 mov dr2, eax
414 mov ebx, [edx + CPUM.Hyper.dr3]
415 mov dr3, ebx
416 ;mov eax, [edx + CPUM.Hyper.dr6]
417 mov ecx, 0ffff0ff0h
418 mov dr6, ecx
419 mov eax, [edx + CPUM.Hyper.dr7]
420 mov dr7, eax
421 jmp htg_debug_regs_guest_no
422
423ENDPROC vmmR0HostToGuestAsm
424
425
426;;
427; Trampoline for doing a call when starting the hyper visor execution.
428;
429; Push any arguments to the routine.
430; Push the argument frame size (cArg * 4).
431; Push the call target (_cdecl convention).
432; Push the address of this routine.
433;
434;
435ALIGNCODE(16)
436BEGINPROC vmmGCCallTrampoline
437%ifdef DEBUG_STUFF
438 COM_S_CHAR 'c'
439 COM_S_CHAR 't'
440 COM_S_CHAR '!'
441%endif
442
443 ; call routine
444 pop eax ; call address
445 mov esi, edx ; save edx
446 pop edi ; argument count.
447%ifdef DEBUG_STUFF
448 COM_S_PRINT ';eax='
449 COM_S_DWORD_REG eax
450 COM_S_CHAR ';'
451%endif
452 call eax ; do call
453 add esp, edi ; cleanup stack
454
455 ; return to the host context.
456 push byte 0 ; eip
457 mov edx, esi ; CPUM pointer
458
459%ifdef DEBUG_STUFF
460 COM_S_CHAR '`'
461%endif
462 jmp NAME(VMMGCGuestToHostAsm) ; eax = returncode.
463ENDPROC vmmGCCallTrampoline
464
465
466
467;;
468; The C interface.
469;
470ALIGNCODE(16)
471BEGINPROC vmmGCGuestToHost
472%ifdef DEBUG_STUFF
473 push esi
474 COM_NEWLINE
475 DEBUG_CHAR('b')
476 DEBUG_CHAR('a')
477 DEBUG_CHAR('c')
478 DEBUG_CHAR('k')
479 DEBUG_CHAR('!')
480 COM_NEWLINE
481 pop esi
482%endif
483 mov eax, [esp + 4]
484 jmp NAME(VMMGCGuestToHostAsm)
485ENDPROC vmmGCGuestToHost
486
487
488;;
489; VMMGCGuestToHostAsmGuestCtx
490;
491; Switches from Guest Context to Host Context.
492; Of course it's only called from within the GC.
493;
494; @param eax Return code.
495; @param esp + 4 Pointer to CPUMCTXCORE.
496;
497; @remark ASSUMES interrupts disabled.
498;
499ALIGNCODE(16)
500BEGINPROC VMMGCGuestToHostAsmGuestCtx
501 DEBUG_CHAR('~')
502
503%ifdef VBOX_WITH_STATISTICS
504 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalInGC
505 mov edx, 0ffffffffh
506 STAM_PROFILE_ADV_STOP edx
507
508 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalGCToQemu
509 mov edx, 0ffffffffh
510 STAM_PROFILE_ADV_START edx
511
512 FIXUP FIX_GC_VM_OFF, 1, VM.StatSwitcherToHC
513 mov edx, 0ffffffffh
514 STAM_PROFILE_ADV_START edx
515%endif
516
517 ;
518 ; Load the CPUM pointer.
519 ;
520 FIXUP FIX_GC_CPUM_OFF, 1, 0
521 mov edx, 0ffffffffh
522
523 ; Skip return address (assumes called!)
524 lea esp, [esp + 4]
525
526 ;
527 ; Guest Context (assumes esp now points to CPUMCTXCORE structure).
528 ;
529 ; general purpose registers (layout is pushad)
530 push eax
531
532 ; @todo do a rep movsd instead
533 mov eax, [esp + 4 + CPUMCTXCORE.eax]
534 mov [edx + CPUM.Guest.eax], eax
535 mov eax, [esp + 4 + CPUMCTXCORE.ecx]
536 mov [edx + CPUM.Guest.ecx], eax
537 mov eax, [esp + 4 + CPUMCTXCORE.edx]
538 mov [edx + CPUM.Guest.edx], eax
539 mov eax, [esp + 4 + CPUMCTXCORE.ebx]
540 mov [edx + CPUM.Guest.ebx], eax
541 mov eax, [esp + 4 + CPUMCTXCORE.esp]
542 mov [edx + CPUM.Guest.esp], eax
543 mov eax, [esp + 4 + CPUMCTXCORE.ebp]
544 mov [edx + CPUM.Guest.ebp], eax
545 mov eax, [esp + 4 + CPUMCTXCORE.esi]
546 mov [edx + CPUM.Guest.esi], eax
547 mov eax, [esp + 4 + CPUMCTXCORE.edi]
548 mov [edx + CPUM.Guest.edi], eax
549 mov eax, dword [esp + 4 + CPUMCTXCORE.es]
550 mov dword [edx + CPUM.Guest.es], eax
551 mov eax, dword [esp + 4 + CPUMCTXCORE.cs]
552 mov dword [edx + CPUM.Guest.cs], eax
553 mov eax, dword [esp + 4 + CPUMCTXCORE.ss]
554 mov dword [edx + CPUM.Guest.ss], eax
555 mov eax, dword [esp + 4 + CPUMCTXCORE.ds]
556 mov dword [edx + CPUM.Guest.ds], eax
557 mov eax, dword [esp + 4 + CPUMCTXCORE.fs]
558 mov dword [edx + CPUM.Guest.fs], eax
559 mov eax, dword [esp + 4 + CPUMCTXCORE.gs]
560 mov dword [edx + CPUM.Guest.gs], eax
561 mov eax, [esp + 4 + CPUMCTXCORE.eflags]
562 mov dword [edx + CPUM.Guest.eflags], eax
563 mov eax, [esp + 4 + CPUMCTXCORE.eip]
564 mov dword [edx + CPUM.Guest.eip], eax
565 pop eax
566
567 add esp, CPUMCTXCORE_size ; skip CPUMCTXCORE structure
568
569 jmp vmmGCGuestToHostAsm_EIPDone
570ENDPROC VMMGCGuestToHostAsmGuestCtx
571
572
573;;
574; VMMGCGuestToHostAsmHyperCtx
575;
576; This is an alternative entry point which we'll be using
577; when the we have the hypervisor context and need to save
578; that before going to the host.
579;
580; This is typically useful when abandoning the hypervisor
581; because of a trap and want the trap state to be saved.
582;
583; @param eax Return code.
584; @param ecx Points to CPUMCTXCORE.
585; @uses eax,edx,ecx
586ALIGNCODE(16)
587BEGINPROC VMMGCGuestToHostAsmHyperCtx
588 DEBUG_CHAR('#')
589
590%ifdef VBOX_WITH_STATISTICS
591 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalInGC
592 mov edx, 0ffffffffh
593 STAM_PROFILE_ADV_STOP edx
594
595 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalGCToQemu
596 mov edx, 0ffffffffh
597 STAM_PROFILE_ADV_START edx
598
599 FIXUP FIX_GC_VM_OFF, 1, VM.StatSwitcherToHC
600 mov edx, 0ffffffffh
601 STAM_PROFILE_ADV_START edx
602%endif
603
604 ;
605 ; Load the CPUM pointer.
606 ;
607 FIXUP FIX_GC_CPUM_OFF, 1, 0
608 mov edx, 0ffffffffh
609
610 push eax ; save return code.
611 ; general purpose registers
612 mov eax, [ecx + CPUMCTXCORE.edi]
613 mov [edx + CPUM.Hyper.edi], eax
614 mov eax, [ecx + CPUMCTXCORE.esi]
615 mov [edx + CPUM.Hyper.esi], eax
616 mov eax, [ecx + CPUMCTXCORE.ebp]
617 mov [edx + CPUM.Hyper.ebp], eax
618 mov eax, [ecx + CPUMCTXCORE.eax]
619 mov [edx + CPUM.Hyper.eax], eax
620 mov eax, [ecx + CPUMCTXCORE.ebx]
621 mov [edx + CPUM.Hyper.ebx], eax
622 mov eax, [ecx + CPUMCTXCORE.edx]
623 mov [edx + CPUM.Hyper.edx], eax
624 mov eax, [ecx + CPUMCTXCORE.ecx]
625 mov [edx + CPUM.Hyper.ecx], eax
626 mov eax, [ecx + CPUMCTXCORE.esp]
627 mov [edx + CPUM.Hyper.esp], eax
628 ; selectors
629 mov eax, [ecx + CPUMCTXCORE.ss]
630 mov [edx + CPUM.Hyper.ss], eax
631 mov eax, [ecx + CPUMCTXCORE.gs]
632 mov [edx + CPUM.Hyper.gs], eax
633 mov eax, [ecx + CPUMCTXCORE.fs]
634 mov [edx + CPUM.Hyper.fs], eax
635 mov eax, [ecx + CPUMCTXCORE.es]
636 mov [edx + CPUM.Hyper.es], eax
637 mov eax, [ecx + CPUMCTXCORE.ds]
638 mov [edx + CPUM.Hyper.ds], eax
639 mov eax, [ecx + CPUMCTXCORE.cs]
640 mov [edx + CPUM.Hyper.cs], eax
641 ; flags
642 mov eax, [ecx + CPUMCTXCORE.eflags]
643 mov [edx + CPUM.Hyper.eflags], eax
644 ; eip
645 mov eax, [ecx + CPUMCTXCORE.eip]
646 mov [edx + CPUM.Hyper.eip], eax
647 ; jump to common worker code.
648 pop eax ; restore return code.
649 jmp vmmGCGuestToHostAsm_SkipHyperRegs
650
651ENDPROC VMMGCGuestToHostAsmHyperCtx
652
653
654;;
655; VMMGCGuestToHostAsm
656;
657; This is an alternative entry point which we'll be using
658; when the we have saved the guest state already or we haven't
659; been messing with the guest at all.
660;
661; @param eax Return code.
662; @uses eax, edx, ecx (or it may use them in the future)
663;
664ALIGNCODE(16)
665BEGINPROC VMMGCGuestToHostAsm
666 DEBUG_CHAR('%')
667
668%ifdef VBOX_WITH_STATISTICS
669 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalInGC
670 mov edx, 0ffffffffh
671 STAM_PROFILE_ADV_STOP edx
672
673 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalGCToQemu
674 mov edx, 0ffffffffh
675 STAM_PROFILE_ADV_START edx
676
677 FIXUP FIX_GC_VM_OFF, 1, VM.StatSwitcherToHC
678 mov edx, 0ffffffffh
679 STAM_PROFILE_ADV_START edx
680%endif
681
682 ;
683 ; Load the CPUM pointer.
684 ;
685 FIXUP FIX_GC_CPUM_OFF, 1, 0
686 mov edx, 0ffffffffh
687
688 pop dword [edx + CPUM.Hyper.eip] ; call return from stack
689 jmp short vmmGCGuestToHostAsm_EIPDone
690
691ALIGNCODE(16)
692vmmGCGuestToHostAsm_EIPDone:
693 ; general registers which we care about.
694 mov dword [edx + CPUM.Hyper.ebx], ebx
695 mov dword [edx + CPUM.Hyper.esi], esi
696 mov dword [edx + CPUM.Hyper.edi], edi
697 mov dword [edx + CPUM.Hyper.ebp], ebp
698 mov dword [edx + CPUM.Hyper.esp], esp
699
700 ; special registers which may change.
701vmmGCGuestToHostAsm_SkipHyperRegs:
702 ; str [edx + CPUM.Hyper.tr] - double fault only, and it won't be right then either.
703 sldt [edx + CPUM.Hyper.ldtr]
704
705 ; No need to save CRx here. They are set dynamically according to Guest/Host requirements.
706 ; FPU context is saved before restore of host saving (another) branch.
707
708%ifdef VBOX_WITH_NMI
709 ;
710 ; Disarm K7 NMI.
711 ;
712 mov esi, edx
713 mov edi, eax
714
715 xor edx, edx
716 xor eax, eax
717 mov ecx, MSR_K7_EVNTSEL0
718 wrmsr
719
720 mov eax, edi
721 mov edx, esi
722%endif
723
724
725 ;;
726 ;; Load Intermediate memory context.
727 ;;
728 mov edi, eax ; save return code in EDI (careful with COM_DWORD_REG from here on!)
729 mov ecx, [edx + CPUM.Host.cr3]
730 FIXUP SWITCHER_FIX_INTER_CR3_GC, 1
731 mov eax, 0ffffffffh
732 mov cr3, eax
733 DEBUG_CHAR('?')
734
735 ;; We're now in intermediate memory context!
736%ifdef NEED_ID
737 ;;
738 ;; Jump to identity mapped location
739 ;;
740 FIXUP FIX_GC_2_ID_NEAR_REL, 1, NAME(IDExitTarget) - NAME(Start)
741 jmp near NAME(IDExitTarget)
742
743 ; We're now on identity mapped pages!
744ALIGNCODE(16)
745GLOBALNAME IDExitTarget
746 DEBUG_CHAR('1')
747 mov edx, cr4
748%ifdef NEED_PAE_ON_32BIT_HOST
749 and edx, ~X86_CR4_PAE
750%else
751 or edx, X86_CR4_PAE
752%endif
753 mov eax, cr0
754 and eax, ~X86_CR0_PG
755 mov cr0, eax
756 DEBUG_CHAR('2')
757 mov cr4, edx
758 FIXUP SWITCHER_FIX_INTER_CR3_HC, 1
759 mov edx, 0ffffffffh
760 mov cr3, edx
761 or eax, X86_CR0_PG
762 DEBUG_CHAR('3')
763 mov cr0, eax
764 DEBUG_CHAR('4')
765
766 ;;
767 ;; Jump to HC mapping.
768 ;;
769 FIXUP FIX_ID_2_HC_NEAR_REL, 1, NAME(HCExitTarget) - NAME(Start)
770 jmp near NAME(HCExitTarget)
771%else
772 ;;
773 ;; Jump to HC mapping.
774 ;;
775 FIXUP FIX_GC_2_HC_NEAR_REL, 1, NAME(HCExitTarget) - NAME(Start)
776 jmp near NAME(HCExitTarget)
777%endif
778
779
780 ;
781 ; When we arrive here we're at the host context
782 ; mapping of the switcher code.
783 ;
784ALIGNCODE(16)
785GLOBALNAME HCExitTarget
786 DEBUG_CHAR('9')
787 ; load final cr3
788 mov cr3, ecx
789 DEBUG_CHAR('@')
790
791
792 ;;
793 ;; Restore Host context.
794 ;;
795 ; Load CPUM pointer into edx
796 FIXUP FIX_HC_CPUM_OFF, 1, 0
797 mov edx, 0ffffffffh
798 ; activate host gdt and idt
799 lgdt [edx + CPUM.Host.gdtr]
800 DEBUG_CHAR('0')
801 lidt [edx + CPUM.Host.idtr]
802 DEBUG_CHAR('1')
803 ; Restore TSS selector; must mark it as not busy before using ltr (!)
804%if 1 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p)
805 movzx eax, word [edx + CPUM.Host.tr] ; eax <- TR
806 and al, 0F8h ; mask away TI and RPL bits, get descriptor offset.
807 add eax, [edx + CPUM.Host.gdtr + 2] ; eax <- GDTR.address + descriptor offset.
808 and dword [eax + 4], ~0200h ; clear busy flag (2nd type2 bit)
809 ltr word [edx + CPUM.Host.tr]
810%else
811 movzx eax, word [edx + CPUM.Host.tr] ; eax <- TR
812 and al, 0F8h ; mask away TI and RPL bits, get descriptor offset.
813 add eax, [edx + CPUM.Host.gdtr + 2] ; eax <- GDTR.address + descriptor offset.
814 mov ecx, [eax + 4] ; ecx <- 2nd descriptor dword
815 mov ebx, ecx ; save orginal value
816 and ecx, ~0200h ; clear busy flag (2nd type2 bit)
817 mov [eax + 4], ecx ; not using xchg here is paranoia..
818 ltr word [edx + CPUM.Host.tr]
819 xchg [eax + 4], ebx ; using xchg is paranoia too...
820%endif
821 ; activate ldt
822 DEBUG_CHAR('2')
823 lldt [edx + CPUM.Host.ldtr]
824 ; Restore segment registers
825 mov eax, [edx + CPUM.Host.ds]
826 mov ds, eax
827 mov eax, [edx + CPUM.Host.es]
828 mov es, eax
829 mov eax, [edx + CPUM.Host.fs]
830 mov fs, eax
831 mov eax, [edx + CPUM.Host.gs]
832 mov gs, eax
833 ; restore stack
834 lss esp, [edx + CPUM.Host.esp]
835
836
837 FIXUP FIX_NO_SYSENTER_JMP, 0, gth_sysenter_no - NAME(Start) ; this will insert a jmp gth_sysenter_no if host doesn't use sysenter.
838 ; restore MSR_IA32_SYSENTER_CS register.
839 mov ecx, MSR_IA32_SYSENTER_CS
840 mov eax, [edx + CPUM.Host.SysEnter.cs]
841 mov ebx, [edx + CPUM.Host.SysEnter.cs + 4]
842 xchg edx, ebx ; save/load edx
843 wrmsr ; MSR[ecx] <- edx:eax
844 xchg edx, ebx ; restore edx
845 jmp short gth_sysenter_no
846
847ALIGNCODE(16)
848gth_sysenter_no:
849
850 ;; @todo AMD syscall
851
852 ; Restore FPU if guest has used it.
853 ; Using fxrstor should ensure that we're not causing unwanted exception on the host.
854 mov esi, [edx + CPUM.fUseFlags] ; esi == use flags.
855 test esi, CPUM_USED_FPU
856 jz near gth_fpu_no
857 mov ecx, cr0
858 and ecx, ~(X86_CR0_TS | X86_CR0_EM)
859 mov cr0, ecx
860
861 FIXUP FIX_NO_FXSAVE_JMP, 0, gth_no_fxsave - NAME(Start) ; this will insert a jmp gth_no_fxsave if fxsave isn't supported.
862 fxsave [edx + CPUM.Guest.fpu]
863 fxrstor [edx + CPUM.Host.fpu]
864 jmp near gth_fpu_no
865
866gth_no_fxsave:
867 fnsave [edx + CPUM.Guest.fpu]
868 mov eax, [edx + CPUM.Host.fpu] ; control word
869 not eax ; 1 means exception ignored (6 LS bits)
870 and eax, byte 03Fh ; 6 LS bits only
871 test eax, [edx + CPUM.Host.fpu + 4] ; status word
872 jz gth_no_exceptions_pending
873
874 ; technically incorrect, but we certainly don't want any exceptions now!!
875 and dword [edx + CPUM.Host.fpu + 4], ~03Fh
876
877gth_no_exceptions_pending:
878 frstor [edx + CPUM.Host.fpu]
879 jmp short gth_fpu_no
880
881ALIGNCODE(16)
882gth_fpu_no:
883
884 ; Control registers.
885 ; Would've liked to have these highere up in case of crashes, but
886 ; the fpu stuff must be done before we restore cr0.
887 mov ecx, [edx + CPUM.Host.cr4]
888 mov cr4, ecx
889 mov ecx, [edx + CPUM.Host.cr0]
890 mov cr0, ecx
891 ;mov ecx, [edx + CPUM.Host.cr2] ; assumes this is waste of time.
892 ;mov cr2, ecx
893
894 ; restore debug registers (if modified) (esi must still be fUseFlags!)
895 ; (must be done after cr4 reload because of the debug extension.)
896 test esi, CPUM_USE_DEBUG_REGS | CPUM_USE_DEBUG_REGS_HOST
897 jz short gth_debug_regs_no
898 jmp gth_debug_regs_restore
899gth_debug_regs_no:
900
901 ; restore general registers.
902 mov eax, edi ; restore return code. eax = return code !!
903 mov edi, [edx + CPUM.Host.edi]
904 mov esi, [edx + CPUM.Host.esi]
905 mov ebx, [edx + CPUM.Host.ebx]
906 mov ebp, [edx + CPUM.Host.ebp]
907 push dword [edx + CPUM.Host.eflags]
908 popfd
909
910%ifdef DEBUG_STUFF
911; COM_S_CHAR '4'
912%endif
913 retf
914
915;;
916; Detour for restoring the host debug registers.
917; edx and edi must be preserved.
918gth_debug_regs_restore:
919 DEBUG_S_CHAR('d')
920 xor eax, eax
921 mov dr7, eax ; paranoia or not?
922 test esi, CPUM_USE_DEBUG_REGS
923 jz short gth_debug_regs_dr7
924 DEBUG_S_CHAR('r')
925 mov eax, [edx + CPUM.Host.dr0]
926 mov dr0, eax
927 mov ebx, [edx + CPUM.Host.dr1]
928 mov dr1, ebx
929 mov ecx, [edx + CPUM.Host.dr2]
930 mov dr2, ecx
931 mov eax, [edx + CPUM.Host.dr3]
932 mov dr3, eax
933gth_debug_regs_dr7:
934 mov ebx, [edx + CPUM.Host.dr6]
935 mov dr6, ebx
936 mov ecx, [edx + CPUM.Host.dr7]
937 mov dr7, ecx
938 jmp gth_debug_regs_no
939
940ENDPROC VMMGCGuestToHostAsm
941
942
943GLOBALNAME End
944;
945; The description string (in the text section).
946;
947NAME(Description):
948 db SWITCHER_DESCRIPTION
949 db 0
950
951extern NAME(Relocate)
952
953;
954; End the fixup records.
955;
956BEGINDATA
957 db FIX_THE_END ; final entry.
958GLOBALNAME FixupsEnd
959
960;;
961; The switcher definition structure.
962ALIGNDATA(16)
963GLOBALNAME Def
964 istruc VMMSWITCHERDEF
965 at VMMSWITCHERDEF.pvCode, RTCCPTR_DEF NAME(Start)
966 at VMMSWITCHERDEF.pvFixups, RTCCPTR_DEF NAME(Fixups)
967 at VMMSWITCHERDEF.pszDesc, RTCCPTR_DEF NAME(Description)
968 at VMMSWITCHERDEF.pfnRelocate, RTCCPTR_DEF NAME(Relocate)
969 at VMMSWITCHERDEF.enmType, dd SWITCHER_TYPE
970 at VMMSWITCHERDEF.cbCode, dd NAME(End) - NAME(Start)
971 at VMMSWITCHERDEF.offR0HostToGuest, dd NAME(vmmR0HostToGuest) - NAME(Start)
972 at VMMSWITCHERDEF.offGCGuestToHost, dd NAME(vmmGCGuestToHost) - NAME(Start)
973 at VMMSWITCHERDEF.offGCCallTrampoline, dd NAME(vmmGCCallTrampoline) - NAME(Start)
974 at VMMSWITCHERDEF.offGCGuestToHostAsm, dd NAME(VMMGCGuestToHostAsm) - NAME(Start)
975 at VMMSWITCHERDEF.offGCGuestToHostAsmHyperCtx, dd NAME(VMMGCGuestToHostAsmHyperCtx)- NAME(Start)
976 at VMMSWITCHERDEF.offGCGuestToHostAsmGuestCtx, dd NAME(VMMGCGuestToHostAsmGuestCtx)- NAME(Start)
977 ; disasm help
978 at VMMSWITCHERDEF.offHCCode0, dd 0
979%ifdef NEED_ID
980 at VMMSWITCHERDEF.cbHCCode0, dd NAME(IDEnterTarget) - NAME(Start)
981%else
982 at VMMSWITCHERDEF.cbHCCode0, dd NAME(FarJmpGCTarget) - NAME(Start)
983%endif
984 at VMMSWITCHERDEF.offHCCode1, dd NAME(HCExitTarget) - NAME(Start)
985 at VMMSWITCHERDEF.cbHCCode1, dd NAME(End) - NAME(HCExitTarget)
986%ifdef NEED_ID
987 at VMMSWITCHERDEF.offIDCode0, dd NAME(IDEnterTarget) - NAME(Start)
988 at VMMSWITCHERDEF.cbIDCode0, dd NAME(FarJmpGCTarget) - NAME(IDEnterTarget)
989 at VMMSWITCHERDEF.offIDCode1, dd NAME(IDExitTarget) - NAME(Start)
990 at VMMSWITCHERDEF.cbIDCode1, dd NAME(HCExitTarget) - NAME(IDExitTarget)
991%else
992 at VMMSWITCHERDEF.offIDCode0, dd 0
993 at VMMSWITCHERDEF.cbIDCode0, dd 0
994 at VMMSWITCHERDEF.offIDCode1, dd 0
995 at VMMSWITCHERDEF.cbIDCode1, dd 0
996%endif
997 at VMMSWITCHERDEF.offGCCode, dd NAME(FarJmpGCTarget) - NAME(Start)
998%ifdef NEED_ID
999 at VMMSWITCHERDEF.cbGCCode, dd NAME(IDExitTarget) - NAME(FarJmpGCTarget)
1000%else
1001 at VMMSWITCHERDEF.cbGCCode, dd NAME(HCExitTarget) - NAME(FarJmpGCTarget)
1002%endif
1003
1004 iend
1005
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette