VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMSwitcher/PAEand32Bit.mac@ 13972

Last change on this file since 13972 was 13960, checked in by vboxsync, 16 years ago

Moved guest and host CPU contexts into per-VCPU array.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 30.7 KB
Line 
1; $Id: PAEand32Bit.mac 13960 2008-11-07 13:04:45Z vboxsync $
2;; @file
3; VMM - World Switchers, template for PAE and 32-Bit.
4;
5
6;
7; Copyright (C) 2006-2007 Sun Microsystems, Inc.
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.virtualbox.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License (GPL) as published by the Free Software
13; Foundation, in version 2 as it comes in the "COPYING" file of the
14; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17; Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18; Clara, CA 95054 USA or visit http://www.sun.com if you need
19; additional information or have any questions.
20;
21
22;%define DEBUG_STUFF 1
23
24;*******************************************************************************
25;* Header Files *
26;*******************************************************************************
27%include "VBox/asmdefs.mac"
28%include "VBox/x86.mac"
29%include "VBox/cpum.mac"
30%include "VBox/stam.mac"
31%include "VBox/vm.mac"
32%include "CPUMInternal.mac"
33%include "VMMSwitcher/VMMSwitcher.mac"
34
35%undef NEED_ID
36%ifdef NEED_PAE_ON_32BIT_HOST
37%define NEED_ID
38%endif
39%ifdef NEED_32BIT_ON_PAE_HOST
40%define NEED_ID
41%endif
42
43
44
45;
46; Start the fixup records
47; We collect the fixups in the .data section as we go along
48; It is therefore VITAL that no-one is using the .data section
49; for anything else between 'Start' and 'End'.
50;
51BEGINDATA
52GLOBALNAME Fixups
53
54
55
56BEGINCODE
57GLOBALNAME Start
58
59;;
60; The C interface.
61;
62BEGINPROC vmmR0HostToGuest
63
64%ifdef DEBUG_STUFF
65 COM_S_NEWLINE
66 COM_S_CHAR '^'
67%endif
68
69%ifdef VBOX_WITH_STATISTICS
70 ;
71 ; Switcher stats.
72 ;
73 FIXUP FIX_HC_VM_OFF, 1, VM.StatSwitcherToGC
74 mov edx, 0ffffffffh
75 STAM_PROFILE_ADV_START edx
76%endif
77
78 ;
79 ; Call worker.
80 ;
81 FIXUP FIX_HC_CPUM_OFF, 1, 0
82 mov edx, 0ffffffffh
83 push cs ; allow for far return and restore cs correctly.
84 call NAME(vmmR0HostToGuestAsm)
85
86%ifdef VBOX_WITH_STATISTICS
87 ;
88 ; Switcher stats.
89 ;
90 FIXUP FIX_HC_VM_OFF, 1, VM.StatSwitcherToHC
91 mov edx, 0ffffffffh
92 STAM_PROFILE_ADV_STOP edx
93%endif
94
95 ret
96ENDPROC vmmR0HostToGuest
97
98
99
100; *****************************************************************************
101; vmmR0HostToGuestAsm
102;
103; Phase one of the switch from host to guest context (host MMU context)
104;
105; INPUT:
106; - edx virtual address of CPUM structure (valid in host context)
107;
108; USES/DESTROYS:
109; - eax, ecx, edx
110;
111; ASSUMPTION:
112; - current CS and DS selectors are wide open
113;
114; *****************************************************************************
115ALIGNCODE(16)
116BEGINPROC vmmR0HostToGuestAsm
117 ;;
118 ;; Save CPU host context
119 ;; Skip eax, edx and ecx as these are not preserved over calls.
120 ;;
121 CPUMCPU_FROM_CPUM(edx)
122 ; general registers.
123 mov [edx + CPUMCPU.Host.ebx], ebx
124 mov [edx + CPUMCPU.Host.edi], edi
125 mov [edx + CPUMCPU.Host.esi], esi
126 mov [edx + CPUMCPU.Host.esp], esp
127 mov [edx + CPUMCPU.Host.ebp], ebp
128 ; selectors.
129 mov [edx + CPUMCPU.Host.ds], ds
130 mov [edx + CPUMCPU.Host.es], es
131 mov [edx + CPUMCPU.Host.fs], fs
132 mov [edx + CPUMCPU.Host.gs], gs
133 mov [edx + CPUMCPU.Host.ss], ss
134 ; special registers.
135 sldt [edx + CPUMCPU.Host.ldtr]
136 sidt [edx + CPUMCPU.Host.idtr]
137 sgdt [edx + CPUMCPU.Host.gdtr]
138 str [edx + CPUMCPU.Host.tr]
139 ; flags
140 pushfd
141 pop dword [edx + CPUMCPU.Host.eflags]
142
143 FIXUP FIX_NO_SYSENTER_JMP, 0, htg_no_sysenter - NAME(Start) ; this will insert a jmp htg_no_sysenter if host doesn't use sysenter.
144 ; save MSR_IA32_SYSENTER_CS register.
145 mov ecx, MSR_IA32_SYSENTER_CS
146 mov ebx, edx ; save edx
147 rdmsr ; edx:eax <- MSR[ecx]
148 mov [ebx + CPUMCPU.Host.SysEnter.cs], eax
149 mov [ebx + CPUMCPU.Host.SysEnter.cs + 4], edx
150 xor eax, eax ; load 0:0 to cause #GP upon sysenter
151 xor edx, edx
152 wrmsr
153 xchg ebx, edx ; restore edx
154 jmp short htg_no_sysenter
155
156ALIGNCODE(16)
157htg_no_sysenter:
158
159 ;; handle use flags.
160 mov esi, [edx + CPUMCPU.fUseFlags] ; esi == use flags.
161 and esi, ~CPUM_USED_FPU ; Clear CPUM_USED_* flags. ;;@todo FPU check can be optimized to use cr0 flags!
162 mov [edx + CPUMCPU.fUseFlags], esi
163
164 ; debug registers.
165 test esi, CPUM_USE_DEBUG_REGS | CPUM_USE_DEBUG_REGS_HOST
166 jz htg_debug_regs_no
167 jmp htg_debug_regs_save_dr7and6
168htg_debug_regs_no:
169
170 ; control registers.
171 mov eax, cr0
172 mov [edx + CPUMCPU.Host.cr0], eax
173 ;mov eax, cr2 ; assume host os don't suff things in cr2. (safe)
174 ;mov [edx + CPUMCPU.Host.cr2], eax
175 mov eax, cr3
176 mov [edx + CPUMCPU.Host.cr3], eax
177 mov eax, cr4
178 mov [edx + CPUMCPU.Host.cr4], eax
179
180 ;;
181 ;; Start switching to VMM context.
182 ;;
183
184 ;
185 ; Change CR0 and CR4 so we can correctly emulate FPU/MMX/SSE[23] exceptions
186 ; Also disable WP. (eax==cr4 now)
187 ; Note! X86_CR4_PSE and X86_CR4_PAE are important if the host thinks so :-)
188 ; Note! X86_CR4_VMXE must not be touched in case the CPU is in vmx root mode
189 ;
190 and eax, X86_CR4_MCE | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_VMXE
191 mov ecx, [edx + CPUMCPU.Guest.cr4]
192 ;; @todo Switcher cleanup: Determin base CR4 during CPUMR0Init / VMMR3SelectSwitcher putting it
193 ; in CPUM.Hyper.cr4 (which isn't currently being used). That should
194 ; simplify this operation a bit (and improve locality of the data).
195
196 ;
197 ; CR4.AndMask and CR4.OrMask are set in CPUMR3Init based on the presence of
198 ; FXSAVE support on the host CPU
199 ;
200 CPUM_FROM_CPUMCPU(edx)
201 and ecx, [edx + CPUM.CR4.AndMask]
202 or eax, ecx
203 or eax, [edx + CPUM.CR4.OrMask]
204 mov cr4, eax
205
206 CPUMCPU_FROM_CPUM(edx)
207 mov eax, [edx + CPUMCPU.Guest.cr0]
208 and eax, X86_CR0_EM
209 or eax, X86_CR0_PE | X86_CR0_PG | X86_CR0_TS | X86_CR0_ET | X86_CR0_NE | X86_CR0_MP
210 mov cr0, eax
211
212 CPUM_FROM_CPUMCPU(edx)
213 ; Load new gdt so we can do far jump to guest code after cr3 reload.
214 lgdt [edx + CPUM.Hyper.gdtr]
215 DEBUG_CHAR('1') ; trashes esi
216
217 ;;
218 ;; Load Intermediate memory context.
219 ;;
220 FIXUP SWITCHER_FIX_INTER_CR3_HC, 1
221 mov eax, 0ffffffffh
222 mov cr3, eax
223 DEBUG_CHAR('2') ; trashes esi
224
225%ifdef NEED_ID
226 ;;
227 ;; Jump to identity mapped location
228 ;;
229 FIXUP FIX_HC_2_ID_NEAR_REL, 1, NAME(IDEnterTarget) - NAME(Start)
230 jmp near NAME(IDEnterTarget)
231
232 ; We're now on identity mapped pages!
233ALIGNCODE(16)
234GLOBALNAME IDEnterTarget
235 DEBUG_CHAR('3')
236 mov edx, cr4
237%ifdef NEED_PAE_ON_32BIT_HOST
238 or edx, X86_CR4_PAE
239%else
240 and edx, ~X86_CR4_PAE
241%endif
242 mov eax, cr0
243 and eax, (~X86_CR0_PG) & 0xffffffff ; prevent yasm warning
244 mov cr0, eax
245 DEBUG_CHAR('4')
246 mov cr4, edx
247 FIXUP SWITCHER_FIX_INTER_CR3_GC, 1
248 mov edx, 0ffffffffh
249 mov cr3, edx
250 or eax, X86_CR0_PG
251 DEBUG_CHAR('5')
252 mov cr0, eax
253 DEBUG_CHAR('6')
254%endif
255
256 ;;
257 ;; Jump to guest code mapping of the code and load the Hypervisor CS.
258 ;;
259 FIXUP FIX_GC_FAR32, 1, NAME(FarJmpGCTarget) - NAME(Start)
260 jmp 0fff8h:0deadfaceh
261
262
263 ;;
264 ;; When we arrive at this label we're at the
265 ;; guest code mapping of the switching code.
266 ;;
267ALIGNCODE(16)
268GLOBALNAME FarJmpGCTarget
269 DEBUG_CHAR('-')
270 ; load final cr3 and do far jump to load cs.
271 FIXUP SWITCHER_FIX_HYPER_CR3, 1
272 mov eax, 0ffffffffh
273 mov cr3, eax
274 DEBUG_CHAR('0')
275
276 ;;
277 ;; We're in VMM MMU context and VMM CS is loaded.
278 ;; Setup the rest of the VMM state.
279 ;;
280 FIXUP FIX_GC_CPUM_OFF, 1, 0
281 mov edx, 0ffffffffh
282 ; Activate guest IDT
283 DEBUG_CHAR('1')
284 lidt [edx + CPUM.Hyper.idtr]
285 ; Load selectors
286 DEBUG_CHAR('2')
287 FIXUP FIX_HYPER_DS, 1
288 mov eax, 0ffffh
289 mov ds, eax
290 mov es, eax
291 xor eax, eax
292 mov gs, eax
293 mov fs, eax
294
295 ; Setup stack; use the lss_esp, ss pair for lss
296 DEBUG_CHAR('3')
297 mov eax, [edx + CPUM.Hyper.esp]
298 mov [edx + CPUM.Hyper.lss_esp], eax
299 lss esp, [edx + CPUM.Hyper.lss_esp]
300
301 ; Restore TSS selector; must mark it as not busy before using ltr (!)
302 DEBUG_CHAR('4')
303 FIXUP FIX_GC_TSS_GDTE_DW2, 2
304 and dword [0ffffffffh], ~0200h ; clear busy flag (2nd type2 bit)
305 DEBUG_CHAR('5')
306 ltr word [edx + CPUM.Hyper.tr]
307 DEBUG_CHAR('6')
308
309 ; Activate the ldt (now we can safely crash).
310 lldt [edx + CPUM.Hyper.ldtr]
311 DEBUG_CHAR('7')
312
313 CPUMCPU_FROM_CPUM(edx)
314 ;; use flags.
315 mov esi, [edx + CPUMCPU.fUseFlags]
316 CPUM_FROM_CPUMCPU(edx)
317
318 ; debug registers
319 test esi, CPUM_USE_DEBUG_REGS
320 jz htg_debug_regs_guest_no
321 jmp htg_debug_regs_guest
322htg_debug_regs_guest_no:
323 DEBUG_CHAR('9')
324
325%ifdef VBOX_WITH_NMI
326 ;
327 ; Setup K7 NMI.
328 ;
329 mov esi, edx
330 ; clear all PerfEvtSeln registers
331 xor eax, eax
332 xor edx, edx
333 mov ecx, MSR_K7_PERFCTR0
334 wrmsr
335 mov ecx, MSR_K7_PERFCTR1
336 wrmsr
337 mov ecx, MSR_K7_PERFCTR2
338 wrmsr
339 mov ecx, MSR_K7_PERFCTR3
340 wrmsr
341
342 mov eax, RT_BIT(20) | RT_BIT(17) | RT_BIT(16) | 076h
343 mov ecx, MSR_K7_EVNTSEL0
344 wrmsr
345 mov eax, 02329B000h
346 mov edx, 0fffffffeh ; -1.6GHz * 5
347 mov ecx, MSR_K7_PERFCTR0
348 wrmsr
349
350 FIXUP FIX_GC_APIC_BASE_32BIT, 1
351 mov eax, 0f0f0f0f0h
352 add eax, 0340h ; APIC_LVTPC
353 mov dword [eax], 0400h ; APIC_DM_NMI
354
355 xor edx, edx
356 mov eax, RT_BIT(20) | RT_BIT(17) | RT_BIT(16) | 076h | RT_BIT(22) ;+EN
357 mov ecx, MSR_K7_EVNTSEL0
358 wrmsr
359
360 mov edx, esi
361%endif
362
363 ; General registers.
364 mov ebx, [edx + CPUM.Hyper.ebx]
365 mov ebp, [edx + CPUM.Hyper.ebp]
366 mov esi, [edx + CPUM.Hyper.esi]
367 mov edi, [edx + CPUM.Hyper.edi]
368 push dword [edx + CPUM.Hyper.eflags]
369 popfd
370 DEBUG_CHAR('!')
371
372 ;;
373 ;; Return to the VMM code which either called the switcher or
374 ;; the code set up to run by HC.
375 ;;
376%ifdef DEBUG_STUFF
377 COM_S_PRINT ';eip='
378 mov eax, [edx + CPUM.Hyper.eip]
379 COM_S_DWORD_REG eax
380 COM_S_CHAR ';'
381%endif
382 mov eax, [edx + CPUM.Hyper.eip]
383%ifdef VBOX_WITH_STATISTICS
384 FIXUP FIX_GC_VM_OFF, 1, VM.StatSwitcherToGC
385 mov edx, 0ffffffffh
386 STAM_PROFILE_ADV_STOP edx
387 FIXUP FIX_GC_CPUM_OFF, 1, 0
388 mov edx, 0ffffffffh
389%endif
390 jmp eax
391
392;;
393; Detour for saving the host DR7 and DR6.
394; esi and edx must be preserved.
395htg_debug_regs_save_dr7and6:
396DEBUG_S_CHAR('s');
397 CPUMCPU_FROM_CPUM(edx)
398 mov eax, dr7 ; not sure, but if I read the docs right this will trap if GD is set. FIXME!!!
399 mov [edx + CPUMCPU.Host.dr7], eax
400 xor eax, eax ; clear everything. (bit 12? is read as 1...)
401 mov dr7, eax
402 mov eax, dr6 ; just in case we save the state register too.
403 mov [edx + CPUMCPU.Host.dr6], eax
404 CPUM_FROM_CPUMCPU(edx)
405 jmp htg_debug_regs_no
406
407;;
408; Detour for saving host DR0-3 and loading hypervisor debug registers.
409; esi and edx must be preserved.
410htg_debug_regs_guest:
411 DEBUG_S_CHAR('D')
412 DEBUG_S_CHAR('R')
413 DEBUG_S_CHAR('x')
414 CPUMCPU_FROM_CPUM(edx)
415 ; save host DR0-3.
416 mov eax, dr0
417 mov [edx + CPUMCPU.Host.dr0], eax
418 mov ebx, dr1
419 mov [edx + CPUMCPU.Host.dr1], ebx
420 mov ecx, dr2
421 mov [edx + CPUMCPU.Host.dr2], ecx
422 mov eax, dr3
423 mov [edx + CPUMCPU.Host.dr3], eax
424 CPUM_FROM_CPUMCPU(edx)
425
426 ; load hyper DR0-7
427 mov ebx, [edx + CPUM.Hyper.dr]
428 mov dr0, ebx
429 mov ecx, [edx + CPUM.Hyper.dr + 8*1]
430 mov dr1, ecx
431 mov eax, [edx + CPUM.Hyper.dr + 8*2]
432 mov dr2, eax
433 mov ebx, [edx + CPUM.Hyper.dr + 8*3]
434 mov dr3, ebx
435 ;mov eax, [edx + CPUM.Hyper.dr + 8*6]
436 mov ecx, 0ffff0ff0h
437 mov dr6, ecx
438 mov eax, [edx + CPUM.Hyper.dr + 8*7]
439 mov dr7, eax
440 jmp htg_debug_regs_guest_no
441
442ENDPROC vmmR0HostToGuestAsm
443
444
445;;
446; Trampoline for doing a call when starting the hyper visor execution.
447;
448; Push any arguments to the routine.
449; Push the argument frame size (cArg * 4).
450; Push the call target (_cdecl convention).
451; Push the address of this routine.
452;
453;
454ALIGNCODE(16)
455BEGINPROC vmmGCCallTrampoline
456%ifdef DEBUG_STUFF
457 COM_S_CHAR 'c'
458 COM_S_CHAR 't'
459 COM_S_CHAR '!'
460%endif
461
462 ; call routine
463 pop eax ; call address
464 mov esi, edx ; save edx
465 pop edi ; argument count.
466%ifdef DEBUG_STUFF
467 COM_S_PRINT ';eax='
468 COM_S_DWORD_REG eax
469 COM_S_CHAR ';'
470%endif
471 call eax ; do call
472 add esp, edi ; cleanup stack
473
474 ; return to the host context.
475 push byte 0 ; eip
476 mov edx, esi ; CPUM pointer
477
478%ifdef DEBUG_STUFF
479 COM_S_CHAR '`'
480%endif
481 jmp NAME(VMMGCGuestToHostAsm) ; eax = returncode.
482ENDPROC vmmGCCallTrampoline
483
484
485
486;;
487; The C interface.
488;
489ALIGNCODE(16)
490BEGINPROC vmmGCGuestToHost
491%ifdef DEBUG_STUFF
492 push esi
493 COM_NEWLINE
494 DEBUG_CHAR('b')
495 DEBUG_CHAR('a')
496 DEBUG_CHAR('c')
497 DEBUG_CHAR('k')
498 DEBUG_CHAR('!')
499 COM_NEWLINE
500 pop esi
501%endif
502 mov eax, [esp + 4]
503 jmp NAME(VMMGCGuestToHostAsm)
504ENDPROC vmmGCGuestToHost
505
506
507;;
508; VMMGCGuestToHostAsmGuestCtx
509;
510; Switches from Guest Context to Host Context.
511; Of course it's only called from within the GC.
512;
513; @param eax Return code.
514; @param esp + 4 Pointer to CPUMCTXCORE.
515;
516; @remark ASSUMES interrupts disabled.
517;
518ALIGNCODE(16)
519BEGINPROC VMMGCGuestToHostAsmGuestCtx
520 DEBUG_CHAR('~')
521
522%ifdef VBOX_WITH_STATISTICS
523 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalInGC
524 mov edx, 0ffffffffh
525 STAM_PROFILE_ADV_STOP edx
526
527 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalGCToQemu
528 mov edx, 0ffffffffh
529 STAM_PROFILE_ADV_START edx
530
531 FIXUP FIX_GC_VM_OFF, 1, VM.StatSwitcherToHC
532 mov edx, 0ffffffffh
533 STAM_PROFILE_ADV_START edx
534%endif
535
536 ;
537 ; Load the CPUM pointer.
538 ;
539 FIXUP FIX_GC_CPUM_OFF, 1, 0
540 mov edx, 0ffffffffh
541
542 ; Skip return address (assumes called!)
543 lea esp, [esp + 4]
544
545 ;
546 ; Guest Context (assumes esp now points to CPUMCTXCORE structure).
547 ;
548 ; general purpose registers.
549 push eax
550
551 CPUMCPU_FROM_CPUM(edx)
552 mov eax, [esp + 4 + CPUMCTXCORE.eax]
553 mov [edx + CPUMCPU.Guest.eax], eax
554 mov eax, [esp + 4 + CPUMCTXCORE.ecx]
555 mov [edx + CPUMCPU.Guest.ecx], eax
556 mov eax, [esp + 4 + CPUMCTXCORE.edx]
557 mov [edx + CPUMCPU.Guest.edx], eax
558 mov eax, [esp + 4 + CPUMCTXCORE.ebx]
559 mov [edx + CPUMCPU.Guest.ebx], eax
560 mov eax, [esp + 4 + CPUMCTXCORE.esp]
561 mov [edx + CPUMCPU.Guest.esp], eax
562 mov eax, [esp + 4 + CPUMCTXCORE.ebp]
563 mov [edx + CPUMCPU.Guest.ebp], eax
564 mov eax, [esp + 4 + CPUMCTXCORE.esi]
565 mov [edx + CPUMCPU.Guest.esi], eax
566 mov eax, [esp + 4 + CPUMCTXCORE.edi]
567 mov [edx + CPUMCPU.Guest.edi], eax
568 mov eax, dword [esp + 4 + CPUMCTXCORE.es]
569 mov dword [edx + CPUMCPU.Guest.es], eax
570 mov eax, dword [esp + 4 + CPUMCTXCORE.cs]
571 mov dword [edx + CPUMCPU.Guest.cs], eax
572 mov eax, dword [esp + 4 + CPUMCTXCORE.ss]
573 mov dword [edx + CPUMCPU.Guest.ss], eax
574 mov eax, dword [esp + 4 + CPUMCTXCORE.ds]
575 mov dword [edx + CPUMCPU.Guest.ds], eax
576 mov eax, dword [esp + 4 + CPUMCTXCORE.fs]
577 mov dword [edx + CPUMCPU.Guest.fs], eax
578 mov eax, dword [esp + 4 + CPUMCTXCORE.gs]
579 mov dword [edx + CPUMCPU.Guest.gs], eax
580 mov eax, [esp + 4 + CPUMCTXCORE.eflags]
581 mov dword [edx + CPUMCPU.Guest.eflags], eax
582 mov eax, [esp + 4 + CPUMCTXCORE.eip]
583 mov dword [edx + CPUMCPU.Guest.eip], eax
584 pop eax
585 CPUM_FROM_CPUMCPU(edx)
586
587 add esp, CPUMCTXCORE_size ; skip CPUMCTXCORE structure
588
589 jmp vmmGCGuestToHostAsm_EIPDone
590ENDPROC VMMGCGuestToHostAsmGuestCtx
591
592
593;;
594; VMMGCGuestToHostAsmHyperCtx
595;
596; This is an alternative entry point which we'll be using
597; when the we have the hypervisor context and need to save
598; that before going to the host.
599;
600; This is typically useful when abandoning the hypervisor
601; because of a trap and want the trap state to be saved.
602;
603; @param eax Return code.
604; @param ecx Points to CPUMCTXCORE.
605; @uses eax,edx,ecx
606ALIGNCODE(16)
607BEGINPROC VMMGCGuestToHostAsmHyperCtx
608 DEBUG_CHAR('#')
609
610%ifdef VBOX_WITH_STATISTICS
611 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalInGC
612 mov edx, 0ffffffffh
613 STAM_PROFILE_ADV_STOP edx
614
615 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalGCToQemu
616 mov edx, 0ffffffffh
617 STAM_PROFILE_ADV_START edx
618
619 FIXUP FIX_GC_VM_OFF, 1, VM.StatSwitcherToHC
620 mov edx, 0ffffffffh
621 STAM_PROFILE_ADV_START edx
622%endif
623
624 ;
625 ; Load the CPUM pointer.
626 ;
627 FIXUP FIX_GC_CPUM_OFF, 1, 0
628 mov edx, 0ffffffffh
629
630 push eax ; save return code.
631 ; general purpose registers
632 mov eax, [ecx + CPUMCTXCORE.edi]
633 mov [edx + CPUM.Hyper.edi], eax
634 mov eax, [ecx + CPUMCTXCORE.esi]
635 mov [edx + CPUM.Hyper.esi], eax
636 mov eax, [ecx + CPUMCTXCORE.ebp]
637 mov [edx + CPUM.Hyper.ebp], eax
638 mov eax, [ecx + CPUMCTXCORE.eax]
639 mov [edx + CPUM.Hyper.eax], eax
640 mov eax, [ecx + CPUMCTXCORE.ebx]
641 mov [edx + CPUM.Hyper.ebx], eax
642 mov eax, [ecx + CPUMCTXCORE.edx]
643 mov [edx + CPUM.Hyper.edx], eax
644 mov eax, [ecx + CPUMCTXCORE.ecx]
645 mov [edx + CPUM.Hyper.ecx], eax
646 mov eax, [ecx + CPUMCTXCORE.esp]
647 mov [edx + CPUM.Hyper.esp], eax
648 ; selectors
649 mov eax, [ecx + CPUMCTXCORE.ss]
650 mov [edx + CPUM.Hyper.ss], eax
651 mov eax, [ecx + CPUMCTXCORE.gs]
652 mov [edx + CPUM.Hyper.gs], eax
653 mov eax, [ecx + CPUMCTXCORE.fs]
654 mov [edx + CPUM.Hyper.fs], eax
655 mov eax, [ecx + CPUMCTXCORE.es]
656 mov [edx + CPUM.Hyper.es], eax
657 mov eax, [ecx + CPUMCTXCORE.ds]
658 mov [edx + CPUM.Hyper.ds], eax
659 mov eax, [ecx + CPUMCTXCORE.cs]
660 mov [edx + CPUM.Hyper.cs], eax
661 ; flags
662 mov eax, [ecx + CPUMCTXCORE.eflags]
663 mov [edx + CPUM.Hyper.eflags], eax
664 ; eip
665 mov eax, [ecx + CPUMCTXCORE.eip]
666 mov [edx + CPUM.Hyper.eip], eax
667 ; jump to common worker code.
668 pop eax ; restore return code.
669 jmp vmmGCGuestToHostAsm_SkipHyperRegs
670
671ENDPROC VMMGCGuestToHostAsmHyperCtx
672
673
674;;
675; VMMGCGuestToHostAsm
676;
677; This is an alternative entry point which we'll be using
678; when the we have saved the guest state already or we haven't
679; been messing with the guest at all.
680;
681; @param eax Return code.
682; @uses eax, edx, ecx (or it may use them in the future)
683;
684ALIGNCODE(16)
685BEGINPROC VMMGCGuestToHostAsm
686 DEBUG_CHAR('%')
687
688%ifdef VBOX_WITH_STATISTICS
689 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalInGC
690 mov edx, 0ffffffffh
691 STAM_PROFILE_ADV_STOP edx
692
693 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalGCToQemu
694 mov edx, 0ffffffffh
695 STAM_PROFILE_ADV_START edx
696
697 FIXUP FIX_GC_VM_OFF, 1, VM.StatSwitcherToHC
698 mov edx, 0ffffffffh
699 STAM_PROFILE_ADV_START edx
700%endif
701
702 ;
703 ; Load the CPUM pointer.
704 ;
705 FIXUP FIX_GC_CPUM_OFF, 1, 0
706 mov edx, 0ffffffffh
707
708 pop dword [edx + CPUM.Hyper.eip] ; call return from stack
709 jmp short vmmGCGuestToHostAsm_EIPDone
710
711ALIGNCODE(16)
712vmmGCGuestToHostAsm_EIPDone:
713 ; general registers which we care about.
714 mov dword [edx + CPUM.Hyper.ebx], ebx
715 mov dword [edx + CPUM.Hyper.esi], esi
716 mov dword [edx + CPUM.Hyper.edi], edi
717 mov dword [edx + CPUM.Hyper.ebp], ebp
718 mov dword [edx + CPUM.Hyper.esp], esp
719
720 ; special registers which may change.
721vmmGCGuestToHostAsm_SkipHyperRegs:
722 ; str [edx + CPUM.Hyper.tr] - double fault only, and it won't be right then either.
723 sldt [edx + CPUM.Hyper.ldtr]
724
725 ; No need to save CRx here. They are set dynamically according to Guest/Host requirements.
726 ; FPU context is saved before restore of host saving (another) branch.
727
728%ifdef VBOX_WITH_NMI
729 ;
730 ; Disarm K7 NMI.
731 ;
732 mov esi, edx
733 mov edi, eax
734
735 xor edx, edx
736 xor eax, eax
737 mov ecx, MSR_K7_EVNTSEL0
738 wrmsr
739
740 mov eax, edi
741 mov edx, esi
742%endif
743
744
745 ;;
746 ;; Load Intermediate memory context.
747 ;;
748 mov edi, eax ; save return code in EDI (careful with COM_DWORD_REG from here on!)
749 CPUMCPU_FROM_CPUM(edx)
750 mov ecx, [edx + CPUMCPU.Host.cr3]
751 CPUM_FROM_CPUMCPU(edx)
752 FIXUP SWITCHER_FIX_INTER_CR3_GC, 1
753 mov eax, 0ffffffffh
754 mov cr3, eax
755 DEBUG_CHAR('?')
756
757 ;; We're now in intermediate memory context!
758%ifdef NEED_ID
759 ;;
760 ;; Jump to identity mapped location
761 ;;
762 FIXUP FIX_GC_2_ID_NEAR_REL, 1, NAME(IDExitTarget) - NAME(Start)
763 jmp near NAME(IDExitTarget)
764
765 ; We're now on identity mapped pages!
766ALIGNCODE(16)
767GLOBALNAME IDExitTarget
768 DEBUG_CHAR('1')
769 mov edx, cr4
770%ifdef NEED_PAE_ON_32BIT_HOST
771 and edx, ~X86_CR4_PAE
772%else
773 or edx, X86_CR4_PAE
774%endif
775 mov eax, cr0
776 and eax, (~X86_CR0_PG) & 0xffffffff ; prevent yasm warning
777 mov cr0, eax
778 DEBUG_CHAR('2')
779 mov cr4, edx
780 FIXUP SWITCHER_FIX_INTER_CR3_HC, 1
781 mov edx, 0ffffffffh
782 mov cr3, edx
783 or eax, X86_CR0_PG
784 DEBUG_CHAR('3')
785 mov cr0, eax
786 DEBUG_CHAR('4')
787
788 ;;
789 ;; Jump to HC mapping.
790 ;;
791 FIXUP FIX_ID_2_HC_NEAR_REL, 1, NAME(HCExitTarget) - NAME(Start)
792 jmp near NAME(HCExitTarget)
793%else
794 ;;
795 ;; Jump to HC mapping.
796 ;;
797 FIXUP FIX_GC_2_HC_NEAR_REL, 1, NAME(HCExitTarget) - NAME(Start)
798 jmp near NAME(HCExitTarget)
799%endif
800
801
802 ;
803 ; When we arrive here we're at the host context
804 ; mapping of the switcher code.
805 ;
806ALIGNCODE(16)
807GLOBALNAME HCExitTarget
808 DEBUG_CHAR('9')
809 ; load final cr3
810 mov cr3, ecx
811 DEBUG_CHAR('@')
812
813
814 ;;
815 ;; Restore Host context.
816 ;;
817 ; Load CPUM pointer into edx
818 FIXUP FIX_HC_CPUM_OFF, 1, 0
819 mov edx, 0ffffffffh
820 CPUMCPU_FROM_CPUM(edx)
821 ; activate host gdt and idt
822 lgdt [edx + CPUMCPU.Host.gdtr]
823 DEBUG_CHAR('0')
824 lidt [edx + CPUMCPU.Host.idtr]
825 DEBUG_CHAR('1')
826 ; Restore TSS selector; must mark it as not busy before using ltr (!)
827%if 1 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p)
828 movzx eax, word [edx + CPUMCPU.Host.tr] ; eax <- TR
829 and al, 0F8h ; mask away TI and RPL bits, get descriptor offset.
830 add eax, [edx + CPUMCPU.Host.gdtr + 2] ; eax <- GDTR.address + descriptor offset.
831 and dword [eax + 4], ~0200h ; clear busy flag (2nd type2 bit)
832 ltr word [edx + CPUMCPU.Host.tr]
833%else
834 movzx eax, word [edx + CPUMCPU.Host.tr] ; eax <- TR
835 and al, 0F8h ; mask away TI and RPL bits, get descriptor offset.
836 add eax, [edx + CPUMCPU.Host.gdtr + 2] ; eax <- GDTR.address + descriptor offset.
837 mov ecx, [eax + 4] ; ecx <- 2nd descriptor dword
838 mov ebx, ecx ; save orginal value
839 and ecx, ~0200h ; clear busy flag (2nd type2 bit)
840 mov [eax + 4], ecx ; not using xchg here is paranoia..
841 ltr word [edx + CPUMCPU.Host.tr]
842 xchg [eax + 4], ebx ; using xchg is paranoia too...
843%endif
844 ; activate ldt
845 DEBUG_CHAR('2')
846 lldt [edx + CPUMCPU.Host.ldtr]
847 ; Restore segment registers
848 mov eax, [edx + CPUMCPU.Host.ds]
849 mov ds, eax
850 mov eax, [edx + CPUMCPU.Host.es]
851 mov es, eax
852 mov eax, [edx + CPUMCPU.Host.fs]
853 mov fs, eax
854 mov eax, [edx + CPUMCPU.Host.gs]
855 mov gs, eax
856 ; restore stack
857 lss esp, [edx + CPUMCPU.Host.esp]
858
859
860 FIXUP FIX_NO_SYSENTER_JMP, 0, gth_sysenter_no - NAME(Start) ; this will insert a jmp gth_sysenter_no if host doesn't use sysenter.
861 ; restore MSR_IA32_SYSENTER_CS register.
862 mov ecx, MSR_IA32_SYSENTER_CS
863 mov eax, [edx + CPUMCPU.Host.SysEnter.cs]
864 mov ebx, [edx + CPUMCPU.Host.SysEnter.cs + 4]
865 xchg edx, ebx ; save/load edx
866 wrmsr ; MSR[ecx] <- edx:eax
867 xchg edx, ebx ; restore edx
868 jmp short gth_sysenter_no
869
870ALIGNCODE(16)
871gth_sysenter_no:
872
873 ;; @todo AMD syscall
874
875 ; Restore FPU if guest has used it.
876 ; Using fxrstor should ensure that we're not causing unwanted exception on the host.
877 mov esi, [edx + CPUMCPU.fUseFlags] ; esi == use flags.
878 test esi, CPUM_USED_FPU
879 jz near gth_fpu_no
880 mov ecx, cr0
881 and ecx, ~(X86_CR0_TS | X86_CR0_EM)
882 mov cr0, ecx
883
884 FIXUP FIX_NO_FXSAVE_JMP, 0, gth_no_fxsave - NAME(Start) ; this will insert a jmp gth_no_fxsave if fxsave isn't supported.
885 fxsave [edx + CPUMCPU.Guest.fpu]
886 fxrstor [edx + CPUMCPU.Host.fpu]
887 jmp near gth_fpu_no
888
889gth_no_fxsave:
890 fnsave [edx + CPUMCPU.Guest.fpu]
891 mov eax, [edx + CPUMCPU.Host.fpu] ; control word
892 not eax ; 1 means exception ignored (6 LS bits)
893 and eax, byte 03Fh ; 6 LS bits only
894 test eax, [edx + CPUMCPU.Host.fpu + 4] ; status word
895 jz gth_no_exceptions_pending
896
897 ; technically incorrect, but we certainly don't want any exceptions now!!
898 and dword [edx + CPUMCPU.Host.fpu + 4], ~03Fh
899
900gth_no_exceptions_pending:
901 frstor [edx + CPUMCPU.Host.fpu]
902 jmp short gth_fpu_no
903
904ALIGNCODE(16)
905gth_fpu_no:
906
907 ; Control registers.
908 ; Would've liked to have these highere up in case of crashes, but
909 ; the fpu stuff must be done before we restore cr0.
910 mov ecx, [edx + CPUMCPU.Host.cr4]
911 mov cr4, ecx
912 mov ecx, [edx + CPUMCPU.Host.cr0]
913 mov cr0, ecx
914 ;mov ecx, [edx + CPUMCPU.Host.cr2] ; assumes this is waste of time.
915 ;mov cr2, ecx
916
917 ; restore debug registers (if modified) (esi must still be fUseFlags!)
918 ; (must be done after cr4 reload because of the debug extension.)
919 test esi, CPUM_USE_DEBUG_REGS | CPUM_USE_DEBUG_REGS_HOST
920 jz short gth_debug_regs_no
921 jmp gth_debug_regs_restore
922gth_debug_regs_no:
923
924 ; restore general registers.
925 mov eax, edi ; restore return code. eax = return code !!
926 mov edi, [edx + CPUMCPU.Host.edi]
927 mov esi, [edx + CPUMCPU.Host.esi]
928 mov ebx, [edx + CPUMCPU.Host.ebx]
929 mov ebp, [edx + CPUMCPU.Host.ebp]
930 push dword [edx + CPUMCPU.Host.eflags]
931 popfd
932
933%ifdef DEBUG_STUFF
934; COM_S_CHAR '4'
935%endif
936 retf
937
938;;
939; Detour for restoring the host debug registers.
940; edx and edi must be preserved.
941gth_debug_regs_restore:
942 DEBUG_S_CHAR('d')
943 xor eax, eax
944 mov dr7, eax ; paranoia or not?
945 test esi, CPUM_USE_DEBUG_REGS
946 jz short gth_debug_regs_dr7
947 DEBUG_S_CHAR('r')
948 mov eax, [edx + CPUMCPU.Host.dr0]
949 mov dr0, eax
950 mov ebx, [edx + CPUMCPU.Host.dr1]
951 mov dr1, ebx
952 mov ecx, [edx + CPUMCPU.Host.dr2]
953 mov dr2, ecx
954 mov eax, [edx + CPUMCPU.Host.dr3]
955 mov dr3, eax
956gth_debug_regs_dr7:
957 mov ebx, [edx + CPUMCPU.Host.dr6]
958 mov dr6, ebx
959 mov ecx, [edx + CPUMCPU.Host.dr7]
960 mov dr7, ecx
961 jmp gth_debug_regs_no
962
963ENDPROC VMMGCGuestToHostAsm
964
965
966GLOBALNAME End
967;
968; The description string (in the text section).
969;
970NAME(Description):
971 db SWITCHER_DESCRIPTION
972 db 0
973
974extern NAME(Relocate)
975
976;
977; End the fixup records.
978;
979BEGINDATA
980 db FIX_THE_END ; final entry.
981GLOBALNAME FixupsEnd
982
983;;
984; The switcher definition structure.
985ALIGNDATA(16)
986GLOBALNAME Def
987 istruc VMMSWITCHERDEF
988 at VMMSWITCHERDEF.pvCode, RTCCPTR_DEF NAME(Start)
989 at VMMSWITCHERDEF.pvFixups, RTCCPTR_DEF NAME(Fixups)
990 at VMMSWITCHERDEF.pszDesc, RTCCPTR_DEF NAME(Description)
991 at VMMSWITCHERDEF.pfnRelocate, RTCCPTR_DEF NAME(Relocate)
992 at VMMSWITCHERDEF.enmType, dd SWITCHER_TYPE
993 at VMMSWITCHERDEF.cbCode, dd NAME(End) - NAME(Start)
994 at VMMSWITCHERDEF.offR0HostToGuest, dd NAME(vmmR0HostToGuest) - NAME(Start)
995 at VMMSWITCHERDEF.offGCGuestToHost, dd NAME(vmmGCGuestToHost) - NAME(Start)
996 at VMMSWITCHERDEF.offGCCallTrampoline, dd NAME(vmmGCCallTrampoline) - NAME(Start)
997 at VMMSWITCHERDEF.offGCGuestToHostAsm, dd NAME(VMMGCGuestToHostAsm) - NAME(Start)
998 at VMMSWITCHERDEF.offGCGuestToHostAsmHyperCtx, dd NAME(VMMGCGuestToHostAsmHyperCtx)- NAME(Start)
999 at VMMSWITCHERDEF.offGCGuestToHostAsmGuestCtx, dd NAME(VMMGCGuestToHostAsmGuestCtx)- NAME(Start)
1000 ; disasm help
1001 at VMMSWITCHERDEF.offHCCode0, dd 0
1002%ifdef NEED_ID
1003 at VMMSWITCHERDEF.cbHCCode0, dd NAME(IDEnterTarget) - NAME(Start)
1004%else
1005 at VMMSWITCHERDEF.cbHCCode0, dd NAME(FarJmpGCTarget) - NAME(Start)
1006%endif
1007 at VMMSWITCHERDEF.offHCCode1, dd NAME(HCExitTarget) - NAME(Start)
1008 at VMMSWITCHERDEF.cbHCCode1, dd NAME(End) - NAME(HCExitTarget)
1009%ifdef NEED_ID
1010 at VMMSWITCHERDEF.offIDCode0, dd NAME(IDEnterTarget) - NAME(Start)
1011 at VMMSWITCHERDEF.cbIDCode0, dd NAME(FarJmpGCTarget) - NAME(IDEnterTarget)
1012 at VMMSWITCHERDEF.offIDCode1, dd NAME(IDExitTarget) - NAME(Start)
1013 at VMMSWITCHERDEF.cbIDCode1, dd NAME(HCExitTarget) - NAME(IDExitTarget)
1014%else
1015 at VMMSWITCHERDEF.offIDCode0, dd 0
1016 at VMMSWITCHERDEF.cbIDCode0, dd 0
1017 at VMMSWITCHERDEF.offIDCode1, dd 0
1018 at VMMSWITCHERDEF.cbIDCode1, dd 0
1019%endif
1020 at VMMSWITCHERDEF.offGCCode, dd NAME(FarJmpGCTarget) - NAME(Start)
1021%ifdef NEED_ID
1022 at VMMSWITCHERDEF.cbGCCode, dd NAME(IDExitTarget) - NAME(FarJmpGCTarget)
1023%else
1024 at VMMSWITCHERDEF.cbGCCode, dd NAME(HCExitTarget) - NAME(FarJmpGCTarget)
1025%endif
1026
1027 iend
1028
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette