VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMSwitcher/PAEand32Bit.mac@ 1

Last change on this file since 1 was 1, checked in by vboxsync, 55 years ago

import

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 29.9 KB
Line 
1;; @file
2;
3; VMM - World Switchers, template for PAE and 32-Bit.
4
5; Copyright (C) 2006 InnoTek Systemberatung GmbH
6;
7; This file is part of VirtualBox Open Source Edition (OSE), as
8; available from http://www.virtualbox.org. This file is free software;
9; you can redistribute it and/or modify it under the terms of the GNU
10; General Public License as published by the Free Software Foundation,
11; in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
12; distribution. VirtualBox OSE is distributed in the hope that it will
13; be useful, but WITHOUT ANY WARRANTY of any kind.
14;
15; If you received this file as part of a commercial VirtualBox
16; distribution, then only the terms of your commercial VirtualBox
17; license agreement apply instead of the previous paragraph.
18
19;%define DEBUG_STUFF 1
20
21;*******************************************************************************
22;* Header Files *
23;*******************************************************************************
24%include "VBox/nasm.mac"
25%include "VBox/x86.mac"
26%include "VBox/cpum.mac"
27%include "VBox/stam.mac"
28%include "VBox/vm.mac"
29%include "CPUMInternal.mac"
30%include "VMMSwitcher/VMMSwitcher.mac"
31
32%undef NEED_ID
33%ifdef NEED_PAE_ON_32BIT_HOST
34%define NEED_ID
35%endif
36%ifdef NEED_32BIT_ON_PAE_HOST
37%define NEED_ID
38%endif
39
40
41
42;
43; Start the fixup records
44; We collect the fixups in the .data section as we go along
45; It is therefore VITAL that no-one is using the .data section
46; for anything else between 'Start' and 'End'.
47;
48BEGINDATA
49GLOBALNAME Fixups
50
51
52
53BEGINCODE
54GLOBALNAME Start
55
56;;
57; The C interface.
58;
59BEGINPROC vmmR0HostToGuest
60
61%ifdef DEBUG_STUFF
62 COM_S_NEWLINE
63 COM_S_CHAR '^'
64%endif
65
66%ifdef VBOX_WITH_STATISTICS
67 ;
68 ; Switcher stats.
69 ;
70 FIXUP FIX_HC_VM_OFF, 1, VM.StatSwitcherToGC
71 mov edx, 0ffffffffh
72 STAM_PROFILE_ADV_START edx
73%endif
74
75 ;
76 ; Call worker.
77 ;
78 FIXUP FIX_HC_CPUM_OFF, 1, 0
79 mov edx, 0ffffffffh
80 push cs ; allow for far return and restore cs correctly.
81 call NAME(vmmR0HostToGuestAsm)
82
83%ifdef VBOX_WITH_STATISTICS
84 ;
85 ; Switcher stats.
86 ;
87 FIXUP FIX_HC_VM_OFF, 1, VM.StatSwitcherToHC
88 mov edx, 0ffffffffh
89 STAM_PROFILE_ADV_STOP edx
90%endif
91
92 ret
93ENDPROC vmmR0HostToGuest
94
95
96
97; *****************************************************************************
98; vmmR0HostToGuestAsm
99;
100; Phase one of the switch from host to guest context (host MMU context)
101;
102; INPUT:
103; - edx virtual address of CPUM structure (valid in host context)
104;
105; USES/DESTROYS:
106; - eax, ecx, edx
107;
108; ASSUMPTION:
109; - current CS and DS selectors are wide open
110;
111; *****************************************************************************
112ALIGNCODE(16)
113BEGINPROC vmmR0HostToGuestAsm
114 ;;
115 ;; Save CPU host context
116 ;; Skip eax, edx and ecx as these are not preserved over calls.
117 ;;
118 ; general registers.
119 mov [edx + CPUM.Host.ebx], ebx
120 mov [edx + CPUM.Host.edi], edi
121 mov [edx + CPUM.Host.esi], esi
122 mov [edx + CPUM.Host.esp], esp
123 mov [edx + CPUM.Host.ebp], ebp
124 ; selectors.
125 mov [edx + CPUM.Host.ds], ds
126 mov [edx + CPUM.Host.es], es
127 mov [edx + CPUM.Host.fs], fs
128 mov [edx + CPUM.Host.gs], gs
129 mov [edx + CPUM.Host.ss], ss
130 ; special registers.
131 sldt [edx + CPUM.Host.ldtr]
132 sidt [edx + CPUM.Host.idtr]
133 sgdt [edx + CPUM.Host.gdtr]
134 str [edx + CPUM.Host.tr]
135 ; flags
136 pushfd
137 pop dword [edx + CPUM.Host.eflags]
138
139 FIXUP FIX_NO_SYSENTER_JMP, 0, htg_no_sysenter - NAME(Start) ; this will insert a jmp htg_no_sysenter if host doesn't use sysenter.
140 ; save MSR_IA32_SYSENTER_CS register.
141 mov ecx, MSR_IA32_SYSENTER_CS
142 mov ebx, edx ; save edx
143 rdmsr ; edx:eax <- MSR[ecx]
144 mov [ebx + CPUM.Host.SysEnter.cs], eax
145 mov [ebx + CPUM.Host.SysEnter.cs + 4], edx
146 xor eax, eax ; load 0:0 to cause #GP upon sysenter
147 xor edx, edx
148 wrmsr
149 xchg ebx, edx ; restore edx
150 jmp short htg_no_sysenter
151
152ALIGNCODE(16)
153htg_no_sysenter:
154
155 ;; handle use flags.
156 mov esi, [edx + CPUM.fUseFlags] ; esi == use flags.
157 and esi, ~CPUM_USED_FPU ; Clear CPUM_USED_* flags. ;;@todo FPU check can be optimized to use cr0 flags!
158 mov [edx + CPUM.fUseFlags], esi
159
160 ; debug registers.
161 test esi, CPUM_USE_DEBUG_REGS | CPUM_USE_DEBUG_REGS_HOST
162 jz htg_debug_regs_no
163 jmp htg_debug_regs_save_dr7and6
164htg_debug_regs_no:
165
166 ; control registers.
167 mov eax, cr0
168 mov [edx + CPUM.Host.cr0], eax
169 ;mov eax, cr2 ; assume host os don't suff things in cr2. (safe)
170 ;mov [edx + CPUM.Host.cr2], eax
171 mov eax, cr3
172 mov [edx + CPUM.Host.cr3], eax
173 mov eax, cr4
174 mov [edx + CPUM.Host.cr4], eax
175
176 ;;
177 ;; Start switching to VMM context.
178 ;;
179
180 ;
181 ; Change CR0 and CR4 so we can correctly emulate FPU/MMX/SSE[23] exceptions
182 ; Also disable WP. (eax==cr4 now)
183 ; Note! X86_CR4_PSE and X86_CR4_PAE are important if the host thinks so :-)
184 ;
185 and eax, X86_CR4_MCE | X86_CR4_PSE | X86_CR4_PAE
186 mov ecx, [edx + CPUM.Guest.cr4]
187 ;; @todo Switcher cleanup: Determin base CR4 during CPUMR0Init / VMMR3SelectSwitcher putting it
188 ; in CPUM.Hyper.cr4 (which isn't currently being used). That should
189 ; simplify this operation a bit (and improve locality of the data).
190
191 ;
192 ; CR4.Mask and CR4.OSFSXR are set in CPUMR3Init based on the presence of
193 ; FXSAVE support on the host CPU
194 ;
195 and ecx, [edx + CPUM.CR4.Mask]
196 or eax, ecx
197 or eax, [edx + CPUM.CR4.OSFSXR]
198 mov cr4, eax
199
200 mov eax, [edx + CPUM.Guest.cr0]
201 and eax, X86_CR0_EM
202 or eax, X86_CR0_PE | X86_CR0_PG | X86_CR0_TS | X86_CR0_ET | X86_CR0_NE | X86_CR0_MP
203 mov cr0, eax
204
205 ; Load new gdt so we can do far jump to guest code after cr3 reload.
206 lgdt [edx + CPUM.Hyper.gdtr]
207 DEBUG_CHAR('1') ; trashes esi
208
209 ;;
210 ;; Load Intermediate memory context.
211 ;;
212 FIXUP SWITCHER_FIX_INTER_CR3_HC, 1
213 mov eax, 0ffffffffh
214 mov cr3, eax
215 DEBUG_CHAR('2') ; trashes esi
216
217%ifdef NEED_ID
218 ;;
219 ;; Jump to identity mapped location
220 ;;
221 FIXUP FIX_HC_2_ID_NEAR_REL, 1, NAME(IDEnterTarget) - NAME(Start)
222 jmp near NAME(IDEnterTarget)
223
224 ; We're now on identity mapped pages!
225ALIGNCODE(16)
226GLOBALNAME IDEnterTarget
227 DEBUG_CHAR('3')
228 mov edx, cr4
229%ifdef NEED_PAE_ON_32BIT_HOST
230 or edx, X86_CR4_PAE
231%else
232 and edx, ~X86_CR4_PAE
233%endif
234 mov eax, cr0
235 and eax, ~X86_CR0_PG
236 mov cr0, eax
237 DEBUG_CHAR('4')
238 mov cr4, edx
239 FIXUP SWITCHER_FIX_INTER_CR3_GC, 1
240 mov edx, 0ffffffffh
241 mov cr3, edx
242 or eax, X86_CR0_PG
243 DEBUG_CHAR('5')
244 mov cr0, eax
245 DEBUG_CHAR('6')
246%endif
247
248 ;;
249 ;; Jump to guest code mapping of the code and load the Hypervisor CS.
250 ;;
251 FIXUP FIX_GC_FAR32, 1, NAME(FarJmpGCTarget) - NAME(Start)
252 jmp 0fff8h:0deadfaceh
253
254
255 ;;
256 ;; When we arrive at this label we're at the
257 ;; guest code mapping of the switching code.
258 ;;
259ALIGNCODE(16)
260GLOBALNAME FarJmpGCTarget
261 DEBUG_CHAR('-')
262 ; load final cr3 and do far jump to load cs.
263 FIXUP SWITCHER_FIX_HYPER_CR3, 1
264 mov eax, 0ffffffffh
265 mov cr3, eax
266 DEBUG_CHAR('0')
267
268 ;;
269 ;; We're in VMM MMU context and VMM CS is loaded.
270 ;; Setup the rest of the VMM state.
271 ;;
272 FIXUP FIX_GC_CPUM_OFF, 1, 0
273 mov edx, 0ffffffffh
274 ; Activate guest IDT
275 DEBUG_CHAR('1')
276 lidt [edx + CPUM.Hyper.idtr]
277 ; Load selectors
278 DEBUG_CHAR('2')
279 FIXUP FIX_HYPER_DS, 1
280 mov eax, 0ffffh
281 mov ds, eax
282 mov es, eax
283 xor eax, eax
284 mov gs, eax
285 mov fs, eax
286
287 ; Setup stack
288 DEBUG_CHAR('3')
289 lss esp, [edx + CPUM.Hyper.esp]
290
291 ; Restore TSS selector; must mark it as not busy before using ltr (!)
292 DEBUG_CHAR('4')
293 FIXUP FIX_GC_TSS_GDTE_DW2, 2
294 and dword [0ffffffffh], ~0200h ; clear busy flag (2nd type2 bit)
295 DEBUG_CHAR('5')
296 ltr word [edx + CPUM.Hyper.tr]
297 DEBUG_CHAR('6')
298
299 ; Activate the ldt (now we can safely crash).
300 lldt [edx + CPUM.Hyper.ldtr]
301 DEBUG_CHAR('7')
302
303 ;; use flags.
304 mov esi, [edx + CPUM.fUseFlags]
305
306 ; debug registers
307 test esi, CPUM_USE_DEBUG_REGS
308 jz htg_debug_regs_guest_no
309 jmp htg_debug_regs_guest
310htg_debug_regs_guest_no:
311 DEBUG_CHAR('9')
312
313%ifdef VBOX_WITH_NMI
314 ;
315 ; Setup K7 NMI.
316 ;
317 mov esi, edx
318 ; clear all PerfEvtSeln registers
319 xor eax, eax
320 xor edx, edx
321 mov ecx, MSR_K7_PERFCTR0
322 wrmsr
323 mov ecx, MSR_K7_PERFCTR1
324 wrmsr
325 mov ecx, MSR_K7_PERFCTR2
326 wrmsr
327 mov ecx, MSR_K7_PERFCTR3
328 wrmsr
329
330 mov eax, BIT(20) | BIT(17) | BIT(16) | 076h
331 mov ecx, MSR_K7_EVNTSEL0
332 wrmsr
333 mov eax, 02329B000h
334 mov edx, 0fffffffeh ; -1.6GHz * 5
335 mov ecx, MSR_K7_PERFCTR0
336 wrmsr
337
338 FIXUP FIX_GC_APIC_BASE_32BIT, 1
339 mov eax, 0f0f0f0f0h
340 add eax, 0340h ; APIC_LVTPC
341 mov dword [eax], 0400h ; APIC_DM_NMI
342
343 xor edx, edx
344 mov eax, BIT(20) | BIT(17) | BIT(16) | 076h | BIT(22) ;+EN
345 mov ecx, MSR_K7_EVNTSEL0
346 wrmsr
347
348 mov edx, esi
349%endif
350
351 ; General registers.
352 mov ebx, [edx + CPUM.Hyper.ebx]
353 mov ebp, [edx + CPUM.Hyper.ebp]
354 mov esi, [edx + CPUM.Hyper.esi]
355 mov edi, [edx + CPUM.Hyper.edi]
356 push dword [edx + CPUM.Hyper.eflags]
357 popfd
358 DEBUG_CHAR('!')
359
360 ;;
361 ;; Return to the VMM code which either called the switcher or
362 ;; the code set up to run by HC.
363 ;;
364%ifdef DEBUG_STUFF
365 COM_S_PRINT ';eip='
366 mov eax, [edx + CPUM.Hyper.eip]
367 COM_S_DWORD_REG eax
368 COM_S_CHAR ';'
369%endif
370 mov eax, [edx + CPUM.Hyper.eip]
371%ifdef VBOX_WITH_STATISTICS
372 FIXUP FIX_GC_VM_OFF, 1, VM.StatSwitcherToGC
373 mov edx, 0ffffffffh
374 STAM_PROFILE_ADV_STOP edx
375 FIXUP FIX_GC_CPUM_OFF, 1, 0
376 mov edx, 0ffffffffh
377%endif
378 jmp eax
379
380;;
381; Detour for saving the host DR7 and DR6.
382; esi and edx must be preserved.
383htg_debug_regs_save_dr7and6:
384DEBUG_S_CHAR('s');
385 mov eax, dr7 ; not sure, but if I read the docs right this will trap if GD is set. FIXME!!!
386 mov [edx + CPUM.Host.dr7], eax
387 xor eax, eax ; clear everything. (bit 12? is read as 1...)
388 mov dr7, eax
389 mov eax, dr6 ; just in case we save the state register too.
390 mov [edx + CPUM.Host.dr6], eax
391 jmp htg_debug_regs_no
392
393;;
394; Detour for saving host DR0-3 and loading hypervisor debug registers.
395; esi and edx must be preserved.
396htg_debug_regs_guest:
397 DEBUG_S_CHAR('D')
398 DEBUG_S_CHAR('R')
399 DEBUG_S_CHAR('x')
400 ; save host DR0-3.
401 mov eax, dr0
402 mov [edx + CPUM.Host.dr0], eax
403 mov ebx, dr1
404 mov [edx + CPUM.Host.dr1], ebx
405 mov ecx, dr2
406 mov [edx + CPUM.Host.dr2], ecx
407 mov eax, dr3
408 mov [edx + CPUM.Host.dr3], eax
409 ; load hyper DR0-7
410 mov ebx, [edx + CPUM.Hyper.dr0]
411 mov dr0, ebx
412 mov ecx, [edx + CPUM.Hyper.dr1]
413 mov dr1, ecx
414 mov eax, [edx + CPUM.Hyper.dr2]
415 mov dr2, eax
416 mov ebx, [edx + CPUM.Hyper.dr3]
417 mov dr3, ebx
418 ;mov eax, [edx + CPUM.Hyper.dr6]
419 mov ecx, 0ffff0ff0h
420 mov dr6, ecx
421 mov eax, [edx + CPUM.Hyper.dr7]
422 mov dr7, eax
423 jmp htg_debug_regs_guest_no
424
425ENDPROC vmmR0HostToGuestAsm
426
427
428;;
429; Trampoline for doing a call when starting the hyper visor execution.
430;
431; Push any arguments to the routine.
432; Push the argument frame size (cArg * 4).
433; Push the call target (_cdecl convention).
434; Push the address of this routine.
435;
436;
437ALIGNCODE(16)
438BEGINPROC vmmGCCallTrampoline
439%ifdef DEBUG_STUFF
440 COM_S_CHAR 'c'
441 COM_S_CHAR 't'
442 COM_S_CHAR '!'
443%endif
444 ; Clear fs and gs.
445 xor eax, eax
446 mov gs, eax
447 mov fs, eax
448
449 ; call routine
450 pop eax ; call address
451 mov esi, edx ; save edx
452 pop edi ; argument count.
453%ifdef DEBUG_STUFF
454 COM_S_PRINT ';eax='
455 COM_S_DWORD_REG eax
456 COM_S_CHAR ';'
457%endif
458 call eax ; do call
459 add esp, edi ; cleanup stack
460
461 ; return to the host context.
462 push byte 0 ; eip
463 mov edx, esi ; CPUM pointer
464
465%ifdef DEBUG_STUFF
466 COM_S_CHAR '`'
467%endif
468 jmp NAME(VMMGCGuestToHostAsm) ; eax = returncode.
469ENDPROC vmmGCCallTrampoline
470
471
472
473;;
474; The C interface.
475;
476ALIGNCODE(16)
477BEGINPROC vmmGCGuestToHost
478%ifdef DEBUG_STUFF
479 push esi
480 COM_NEWLINE
481 DEBUG_CHAR('b')
482 DEBUG_CHAR('a')
483 DEBUG_CHAR('c')
484 DEBUG_CHAR('k')
485 DEBUG_CHAR('!')
486 COM_NEWLINE
487 pop esi
488%endif
489 mov eax, [esp + 4]
490 jmp NAME(VMMGCGuestToHostAsm)
491ENDPROC vmmGCGuestToHost
492
493
494;;
495; VMMGCGuestToHostAsmGuestCtx
496;
497; Switches from Guest Context to Host Context.
498; Of course it's only called from within the GC.
499;
500; @param eax Return code.
501; @param esp + 4 Pointer to CPUMCTXCORE.
502;
503; @remark ASSUMES interrupts disabled.
504;
505ALIGNCODE(16)
506BEGINPROC VMMGCGuestToHostAsmGuestCtx
507 DEBUG_CHAR('~')
508
509%ifdef VBOX_WITH_STATISTICS
510 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalInGC
511 mov edx, 0ffffffffh
512 STAM_PROFILE_ADV_STOP edx
513
514 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalGCToQemu
515 mov edx, 0ffffffffh
516 STAM_PROFILE_ADV_START edx
517
518 FIXUP FIX_GC_VM_OFF, 1, VM.StatSwitcherToHC
519 mov edx, 0ffffffffh
520 STAM_PROFILE_ADV_START edx
521%endif
522
523 ;
524 ; Load the CPUM pointer.
525 ;
526 FIXUP FIX_GC_CPUM_OFF, 1, 0
527 mov edx, 0ffffffffh
528
529 ; Skip return address (assumes called!)
530 lea esp, [esp + 4]
531
532 ;
533 ; Guest Context (assumes esp now points to CPUMCTXCORE structure).
534 ;
535 ; general purpose registers (layout is pushad)
536 push eax
537
538 ; @todo do a rep movsd instead
539 mov eax, [esp + 4 + CPUMCTXCORE.eax]
540 mov [edx + CPUM.Guest.eax], eax
541 mov eax, [esp + 4 + CPUMCTXCORE.ecx]
542 mov [edx + CPUM.Guest.ecx], eax
543 mov eax, [esp + 4 + CPUMCTXCORE.edx]
544 mov [edx + CPUM.Guest.edx], eax
545 mov eax, [esp + 4 + CPUMCTXCORE.ebx]
546 mov [edx + CPUM.Guest.ebx], eax
547 mov eax, [esp + 4 + CPUMCTXCORE.esp]
548 mov [edx + CPUM.Guest.esp], eax
549 mov eax, [esp + 4 + CPUMCTXCORE.ebp]
550 mov [edx + CPUM.Guest.ebp], eax
551 mov eax, [esp + 4 + CPUMCTXCORE.esi]
552 mov [edx + CPUM.Guest.esi], eax
553 mov eax, [esp + 4 + CPUMCTXCORE.edi]
554 mov [edx + CPUM.Guest.edi], eax
555 mov eax, dword [esp + 4 + CPUMCTXCORE.es]
556 mov dword [edx + CPUM.Guest.es], eax
557 mov eax, dword [esp + 4 + CPUMCTXCORE.cs]
558 mov dword [edx + CPUM.Guest.cs], eax
559 mov eax, dword [esp + 4 + CPUMCTXCORE.ss]
560 mov dword [edx + CPUM.Guest.ss], eax
561 mov eax, dword [esp + 4 + CPUMCTXCORE.ds]
562 mov dword [edx + CPUM.Guest.ds], eax
563 mov eax, dword [esp + 4 + CPUMCTXCORE.fs]
564 mov dword [edx + CPUM.Guest.fs], eax
565 mov eax, dword [esp + 4 + CPUMCTXCORE.gs]
566 mov dword [edx + CPUM.Guest.gs], eax
567 mov eax, [esp + 4 + CPUMCTXCORE.eflags]
568 mov dword [edx + CPUM.Guest.eflags], eax
569 mov eax, [esp + 4 + CPUMCTXCORE.eip]
570 mov dword [edx + CPUM.Guest.eip], eax
571 pop eax
572
573 add esp, CPUMCTXCORE_size ; skip CPUMCTXCORE structure
574
575 jmp vmmGCGuestToHostAsm_EIPDone
576ENDPROC VMMGCGuestToHostAsmGuestCtx
577
578
579;;
580; VMMGCGuestToHostAsmHyperCtx
581;
582; This is an alternative entry point which we'll be using
583; when the we have the hypervisor context and need to save
584; that before going to the host.
585;
586; This is typically useful when abandoning the hypervisor
587; because of a trap and want the trap state to be saved.
588;
589; @param eax Return code.
590; @param ecx Points to CPUMCTXCORE.
591; @uses eax,edx,ecx
592ALIGNCODE(16)
593BEGINPROC VMMGCGuestToHostAsmHyperCtx
594 DEBUG_CHAR('#')
595
596%ifdef VBOX_WITH_STATISTICS
597 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalInGC
598 mov edx, 0ffffffffh
599 STAM_PROFILE_ADV_STOP edx
600
601 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalGCToQemu
602 mov edx, 0ffffffffh
603 STAM_PROFILE_ADV_START edx
604
605 FIXUP FIX_GC_VM_OFF, 1, VM.StatSwitcherToHC
606 mov edx, 0ffffffffh
607 STAM_PROFILE_ADV_START edx
608%endif
609
610 ;
611 ; Load the CPUM pointer.
612 ;
613 FIXUP FIX_GC_CPUM_OFF, 1, 0
614 mov edx, 0ffffffffh
615
616 push eax ; save return code.
617 ; general purpose registers
618 mov eax, [ecx + CPUMCTXCORE.edi]
619 mov [edx + CPUM.Hyper.edi], eax
620 mov eax, [ecx + CPUMCTXCORE.esi]
621 mov [edx + CPUM.Hyper.esi], eax
622 mov eax, [ecx + CPUMCTXCORE.ebp]
623 mov [edx + CPUM.Hyper.ebp], eax
624 mov eax, [ecx + CPUMCTXCORE.eax]
625 mov [edx + CPUM.Hyper.eax], eax
626 mov eax, [ecx + CPUMCTXCORE.ebx]
627 mov [edx + CPUM.Hyper.ebx], eax
628 mov eax, [ecx + CPUMCTXCORE.edx]
629 mov [edx + CPUM.Hyper.edx], eax
630 mov eax, [ecx + CPUMCTXCORE.ecx]
631 mov [edx + CPUM.Hyper.ecx], eax
632 mov eax, [ecx + CPUMCTXCORE.esp]
633 mov [edx + CPUM.Hyper.esp], eax
634 ; selectors
635 mov eax, [ecx + CPUMCTXCORE.ss]
636 mov [edx + CPUM.Hyper.ss], eax
637 mov eax, [ecx + CPUMCTXCORE.gs]
638 mov [edx + CPUM.Hyper.gs], eax
639 mov eax, [ecx + CPUMCTXCORE.fs]
640 mov [edx + CPUM.Hyper.fs], eax
641 mov eax, [ecx + CPUMCTXCORE.es]
642 mov [edx + CPUM.Hyper.es], eax
643 mov eax, [ecx + CPUMCTXCORE.ds]
644 mov [edx + CPUM.Hyper.ds], eax
645 mov eax, [ecx + CPUMCTXCORE.cs]
646 mov [edx + CPUM.Hyper.cs], eax
647 ; flags
648 mov eax, [ecx + CPUMCTXCORE.eflags]
649 mov [edx + CPUM.Hyper.eflags], eax
650 ; eip
651 mov eax, [ecx + CPUMCTXCORE.eip]
652 mov [edx + CPUM.Hyper.eip], eax
653 ; jump to common worker code.
654 pop eax ; restore return code.
655 jmp vmmGCGuestToHostAsm_SkipHyperRegs
656
657ENDPROC VMMGCGuestToHostAsmHyperCtx
658
659
660;;
661; VMMGCGuestToHostAsm
662;
663; This is an alternative entry point which we'll be using
664; when the we have saved the guest state already or we haven't
665; been messing with the guest at all.
666;
667; @param eax Return code.
668; @uses eax, edx, ecx (or it may use them in the future)
669;
670ALIGNCODE(16)
671BEGINPROC VMMGCGuestToHostAsm
672 DEBUG_CHAR('%')
673
674%ifdef VBOX_WITH_STATISTICS
675 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalInGC
676 mov edx, 0ffffffffh
677 STAM_PROFILE_ADV_STOP edx
678
679 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalGCToQemu
680 mov edx, 0ffffffffh
681 STAM_PROFILE_ADV_START edx
682
683 FIXUP FIX_GC_VM_OFF, 1, VM.StatSwitcherToHC
684 mov edx, 0ffffffffh
685 STAM_PROFILE_ADV_START edx
686%endif
687
688 ;
689 ; Load the CPUM pointer.
690 ;
691 FIXUP FIX_GC_CPUM_OFF, 1, 0
692 mov edx, 0ffffffffh
693
694 pop dword [edx + CPUM.Hyper.eip] ; call return from stack
695 jmp short vmmGCGuestToHostAsm_EIPDone
696
697ALIGNCODE(16)
698vmmGCGuestToHostAsm_EIPDone:
699 ; general registers which we care about.
700 mov dword [edx + CPUM.Hyper.ebx], ebx
701 mov dword [edx + CPUM.Hyper.esi], esi
702 mov dword [edx + CPUM.Hyper.edi], edi
703 mov dword [edx + CPUM.Hyper.ebp], ebp
704 mov dword [edx + CPUM.Hyper.esp], esp
705
706 ; special registers which may change.
707vmmGCGuestToHostAsm_SkipHyperRegs:
708 ; str [edx + CPUM.Hyper.tr] - double fault only, and it won't be right then either.
709 sldt [edx + CPUM.Hyper.ldtr]
710
711 ; No need to save CRx here. They are set dynamically according to Guest/Host requirements.
712 ; FPU context is saved before restore of host saving (another) branch.
713
714%ifdef VBOX_WITH_NMI
715 ;
716 ; Disarm K7 NMI.
717 ;
718 mov esi, edx
719 mov edi, eax
720
721 xor edx, edx
722 xor eax, eax
723 mov ecx, MSR_K7_EVNTSEL0
724 wrmsr
725
726 mov eax, edi
727 mov edx, esi
728%endif
729
730
731 ;;
732 ;; Load Intermediate memory context.
733 ;;
734 mov edi, eax ; save return code in EDI (careful with COM_DWORD_REG from here on!)
735 mov ecx, [edx + CPUM.Host.cr3]
736 FIXUP SWITCHER_FIX_INTER_CR3_GC, 1
737 mov eax, 0ffffffffh
738 mov cr3, eax
739 DEBUG_CHAR('?')
740
741 ;; We're now in intermediate memory context!
742%ifdef NEED_ID
743 ;;
744 ;; Jump to identity mapped location
745 ;;
746 FIXUP FIX_GC_2_ID_NEAR_REL, 1, NAME(IDExitTarget) - NAME(Start)
747 jmp near NAME(IDExitTarget)
748
749 ; We're now on identity mapped pages!
750ALIGNCODE(16)
751GLOBALNAME IDExitTarget
752 DEBUG_CHAR('1')
753 mov edx, cr4
754%ifdef NEED_PAE_ON_32BIT_HOST
755 and edx, ~X86_CR4_PAE
756%else
757 or edx, X86_CR4_PAE
758%endif
759 mov eax, cr0
760 and eax, ~X86_CR0_PG
761 mov cr0, eax
762 DEBUG_CHAR('2')
763 mov cr4, edx
764 FIXUP SWITCHER_FIX_INTER_CR3_HC, 1
765 mov edx, 0ffffffffh
766 mov cr3, edx
767 or eax, X86_CR0_PG
768 DEBUG_CHAR('3')
769 mov cr0, eax
770 DEBUG_CHAR('4')
771
772 ;;
773 ;; Jump to HC mapping.
774 ;;
775 FIXUP FIX_ID_2_HC_NEAR_REL, 1, NAME(HCExitTarget) - NAME(Start)
776 jmp near NAME(HCExitTarget)
777%else
778 ;;
779 ;; Jump to HC mapping.
780 ;;
781 FIXUP FIX_GC_2_HC_NEAR_REL, 1, NAME(HCExitTarget) - NAME(Start)
782 jmp near NAME(HCExitTarget)
783%endif
784
785
786 ;
787 ; When we arrive here we're at the host context
788 ; mapping of the switcher code.
789 ;
790ALIGNCODE(16)
791GLOBALNAME HCExitTarget
792 DEBUG_CHAR('9')
793 ; load final cr3
794 mov cr3, ecx
795 DEBUG_CHAR('@')
796
797
798 ;;
799 ;; Restore Host context.
800 ;;
801 ; Load CPUM pointer into edx
802 FIXUP FIX_HC_CPUM_OFF, 1, 0
803 mov edx, 0ffffffffh
804 ; activate host gdt and idt
805 lgdt [edx + CPUM.Host.gdtr]
806 DEBUG_CHAR('0')
807 lidt [edx + CPUM.Host.idtr]
808 DEBUG_CHAR('1')
809 ; Restore TSS selector; must mark it as not busy before using ltr (!)
810%if 1 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p)
811 movzx eax, word [edx + CPUM.Host.tr] ; eax <- TR
812 and al, 0F8h ; mask away TI and RPL bits, get descriptor offset.
813 add eax, [edx + CPUM.Host.gdtr + 2] ; eax <- GDTR.address + descriptor offset.
814 and dword [eax + 4], ~0200h ; clear busy flag (2nd type2 bit)
815 ltr word [edx + CPUM.Host.tr]
816%else
817 movzx eax, word [edx + CPUM.Host.tr] ; eax <- TR
818 and al, 0F8h ; mask away TI and RPL bits, get descriptor offset.
819 add eax, [edx + CPUM.Host.gdtr + 2] ; eax <- GDTR.address + descriptor offset.
820 mov ecx, [eax + 4] ; ecx <- 2nd descriptor dword
821 mov ebx, ecx ; save orginal value
822 and ecx, ~0200h ; clear busy flag (2nd type2 bit)
823 mov [eax + 4], ecx ; not using xchg here is paranoia..
824 ltr word [edx + CPUM.Host.tr]
825 xchg [eax + 4], ebx ; using xchg is paranoia too...
826%endif
827 ; activate ldt
828 DEBUG_CHAR('2')
829 lldt [edx + CPUM.Host.ldtr]
830 ; Restore segment registers
831 mov eax, [edx + CPUM.Host.ds]
832 mov ds, eax
833 mov eax, [edx + CPUM.Host.es]
834 mov es, eax
835 mov eax, [edx + CPUM.Host.fs]
836 mov fs, eax
837 mov eax, [edx + CPUM.Host.gs]
838 mov gs, eax
839 ; restore stack
840 lss esp, [edx + CPUM.Host.esp]
841
842
843 FIXUP FIX_NO_SYSENTER_JMP, 0, gth_sysenter_no - NAME(Start) ; this will insert a jmp gth_sysenter_no if host doesn't use sysenter.
844 ; restore MSR_IA32_SYSENTER_CS register.
845 mov ecx, MSR_IA32_SYSENTER_CS
846 mov eax, [edx + CPUM.Host.SysEnter.cs]
847 mov ebx, [edx + CPUM.Host.SysEnter.cs + 4]
848 xchg edx, ebx ; save/load edx
849 wrmsr ; MSR[ecx] <- edx:eax
850 xchg edx, ebx ; restore edx
851 jmp short gth_sysenter_no
852
853ALIGNCODE(16)
854gth_sysenter_no:
855
856 ;; @todo AMD syscall
857
858 ; Restore FPU if guest has used it.
859 ; Using fxrstor should ensure that we're not causing unwanted exception on the host.
860 mov esi, [edx + CPUM.fUseFlags] ; esi == use flags.
861 test esi, CPUM_USED_FPU
862 jz near gth_fpu_no
863 mov ecx, cr0
864 and ecx, ~(X86_CR0_TS | X86_CR0_EM)
865 mov cr0, ecx
866
867 FIXUP FIX_NO_FXSAVE_JMP, 0, gth_no_fxsave - NAME(Start) ; this will insert a jmp gth_no_fxsave if fxsave isn't supported.
868 fxsave [edx + CPUM.Guest.fpu]
869 fxrstor [edx + CPUM.Host.fpu]
870 jmp near gth_fpu_no
871
872gth_no_fxsave:
873 fnsave [edx + CPUM.Guest.fpu]
874 mov eax, [edx + CPUM.Host.fpu] ; control word
875 not eax ; 1 means exception ignored (6 LS bits)
876 and eax, byte 03Fh ; 6 LS bits only
877 test eax, [edx + CPUM.Host.fpu + 4] ; status word
878 jz gth_no_exceptions_pending
879
880 ; technically incorrect, but we certainly don't want any exceptions now!!
881 and dword [edx + CPUM.Host.fpu + 4], ~03Fh
882
883gth_no_exceptions_pending:
884 frstor [edx + CPUM.Host.fpu]
885 jmp short gth_fpu_no
886
887ALIGNCODE(16)
888gth_fpu_no:
889
890 ; Control registers.
891 ; Would've liked to have these highere up in case of crashes, but
892 ; the fpu stuff must be done before we restore cr0.
893 mov ecx, [edx + CPUM.Host.cr4]
894 mov cr4, ecx
895 mov ecx, [edx + CPUM.Host.cr0]
896 mov cr0, ecx
897 ;mov ecx, [edx + CPUM.Host.cr2] ; assumes this is waste of time.
898 ;mov cr2, ecx
899
900 ; restore debug registers (if modified) (esi must still be fUseFlags!)
901 ; (must be done after cr4 reload because of the debug extension.)
902 test esi, CPUM_USE_DEBUG_REGS | CPUM_USE_DEBUG_REGS_HOST
903 jz short gth_debug_regs_no
904 jmp gth_debug_regs_restore
905gth_debug_regs_no:
906
907 ; restore general registers.
908 mov eax, edi ; restore return code. eax = return code !!
909 mov edi, [edx + CPUM.Host.edi]
910 mov esi, [edx + CPUM.Host.esi]
911 mov ebx, [edx + CPUM.Host.ebx]
912 mov ebp, [edx + CPUM.Host.ebp]
913 push dword [edx + CPUM.Host.eflags]
914 popfd
915
916%ifdef DEBUG_STUFF
917; COM_S_CHAR '4'
918%endif
919 retf
920
921;;
922; Detour for restoring the host debug registers.
923; edx and edi must be preserved.
924gth_debug_regs_restore:
925 DEBUG_S_CHAR('d')
926 xor eax, eax
927 mov dr7, eax ; paranoia or not?
928 test esi, CPUM_USE_DEBUG_REGS
929 jz short gth_debug_regs_dr7
930 DEBUG_S_CHAR('r')
931 mov eax, [edx + CPUM.Host.dr0]
932 mov dr0, eax
933 mov ebx, [edx + CPUM.Host.dr1]
934 mov dr1, ebx
935 mov ecx, [edx + CPUM.Host.dr2]
936 mov dr2, ecx
937 mov eax, [edx + CPUM.Host.dr3]
938 mov dr3, eax
939gth_debug_regs_dr7:
940 mov ebx, [edx + CPUM.Host.dr6]
941 mov dr6, ebx
942 mov ecx, [edx + CPUM.Host.dr7]
943 mov dr7, ecx
944 jmp gth_debug_regs_no
945
946ENDPROC VMMGCGuestToHostAsm
947
948
949GLOBALNAME End
950;
951; The description string (in the text section).
952;
953NAME(Description):
954 db SWITCHER_DESCRIPTION
955 db 0
956
957extern NAME(Relocate)
958
959;
960; End the fixup records.
961;
962BEGINDATA
963 db FIX_THE_END ; final entry.
964GLOBALNAME FixupsEnd
965
966;;
967; The switcher definition structure.
968ALIGNDATA(16)
969GLOBALNAME Def
970 istruc VMMSWITCHERDEF
971 at VMMSWITCHERDEF.pvCode, RTCCPTR_DEF NAME(Start)
972 at VMMSWITCHERDEF.pvFixups, RTCCPTR_DEF NAME(Fixups)
973 at VMMSWITCHERDEF.pszDesc, RTCCPTR_DEF NAME(Description)
974 at VMMSWITCHERDEF.pfnRelocate, RTCCPTR_DEF NAME(Relocate)
975 at VMMSWITCHERDEF.enmType, dd SWITCHER_TYPE
976 at VMMSWITCHERDEF.cbCode, dd NAME(End) - NAME(Start)
977 at VMMSWITCHERDEF.offR0HostToGuest, dd NAME(vmmR0HostToGuest) - NAME(Start)
978 at VMMSWITCHERDEF.offGCGuestToHost, dd NAME(vmmGCGuestToHost) - NAME(Start)
979 at VMMSWITCHERDEF.offGCCallTrampoline, dd NAME(vmmGCCallTrampoline) - NAME(Start)
980 at VMMSWITCHERDEF.offGCGuestToHostAsm, dd NAME(VMMGCGuestToHostAsm) - NAME(Start)
981 at VMMSWITCHERDEF.offGCGuestToHostAsmHyperCtx, dd NAME(VMMGCGuestToHostAsmHyperCtx)- NAME(Start)
982 at VMMSWITCHERDEF.offGCGuestToHostAsmGuestCtx, dd NAME(VMMGCGuestToHostAsmGuestCtx)- NAME(Start)
983 ; disasm help
984 at VMMSWITCHERDEF.offHCCode0, dd 0
985%ifdef NEED_ID
986 at VMMSWITCHERDEF.cbHCCode0, dd NAME(IDEnterTarget) - NAME(Start)
987%else
988 at VMMSWITCHERDEF.cbHCCode0, dd NAME(FarJmpGCTarget) - NAME(Start)
989%endif
990 at VMMSWITCHERDEF.offHCCode1, dd NAME(HCExitTarget) - NAME(Start)
991 at VMMSWITCHERDEF.cbHCCode1, dd NAME(End) - NAME(HCExitTarget)
992%ifdef NEED_ID
993 at VMMSWITCHERDEF.offIDCode0, dd NAME(IDEnterTarget) - NAME(Start)
994 at VMMSWITCHERDEF.cbIDCode0, dd NAME(FarJmpGCTarget) - NAME(IDEnterTarget)
995 at VMMSWITCHERDEF.offIDCode1, dd NAME(IDExitTarget) - NAME(Start)
996 at VMMSWITCHERDEF.cbIDCode1, dd NAME(HCExitTarget) - NAME(IDExitTarget)
997%else
998 at VMMSWITCHERDEF.offIDCode0, dd 0
999 at VMMSWITCHERDEF.cbIDCode0, dd 0
1000 at VMMSWITCHERDEF.offIDCode1, dd 0
1001 at VMMSWITCHERDEF.cbIDCode1, dd 0
1002%endif
1003 at VMMSWITCHERDEF.offGCCode, dd NAME(FarJmpGCTarget) - NAME(Start)
1004%ifdef NEED_ID
1005 at VMMSWITCHERDEF.cbGCCode, dd NAME(IDExitTarget) - NAME(FarJmpGCTarget)
1006%else
1007 at VMMSWITCHERDEF.cbGCCode, dd NAME(HCExitTarget) - NAME(FarJmpGCTarget)
1008%endif
1009
1010 iend
1011
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette