VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/CPUMAllA.asm@ 48684

Last change on this file since 48684 was 48673, checked in by vboxsync, 11 years ago

VMM/CPUMAllA.asm: Revert r89225, caller already saves/restores CR0. Added a comment explain this.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 6.9 KB
Line 
1; $Id: CPUMAllA.asm 48673 2013-09-25 08:24:06Z vboxsync $
2;; @file
3; CPUM - Guest Context Assembly Routines.
4;
5
6;
7; Copyright (C) 2006-2013 Oracle Corporation
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.virtualbox.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License (GPL) as published by the Free Software
13; Foundation, in version 2 as it comes in the "COPYING" file of the
14; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17
18;*******************************************************************************
19;* Header Files *
20;*******************************************************************************
21%include "VBox/asmdefs.mac"
22%include "VBox/vmm/vm.mac"
23%include "VBox/err.mac"
24%include "VBox/vmm/stam.mac"
25%include "CPUMInternal.mac"
26%include "iprt/x86.mac"
27%include "VBox/vmm/cpum.mac"
28
29%ifdef IN_RING3
30 %error "The jump table doesn't link on leopard."
31%endif
32
33;
34; Enables write protection of Hypervisor memory pages.
35; !note! Must be commented out for Trap8 debug handler.
36;
37%define ENABLE_WRITE_PROTECTION 1
38
39BEGINCODE
40
41;;
42; Handles lazy FPU saving and restoring.
43;
44; This handler will implement lazy fpu (sse/mmx/stuff) saving.
45; Two actions may be taken in this handler since the Guest OS may
46; be doing lazy fpu switching. So, we'll have to generate those
47; traps which the Guest CPU CTX shall have according to the
48; its CR0 flags. If no traps for the Guest OS, we'll save the host
49; context and restore the guest context.
50;
51; @returns 0 if caller should continue execution.
52; @returns VINF_EM_RAW_GUEST_TRAP if a guest trap should be generated.
53; @param pCPUMCPU x86:[esp+4] GCC:rdi MSC:rcx CPUMCPU pointer
54;
55align 16
56BEGINPROC cpumHandleLazyFPUAsm
57 ;
58 ; Figure out what to do.
59 ;
60 ; There are two basic actions:
61 ; 1. Save host fpu and restore guest fpu.
62 ; 2. Generate guest trap.
63 ;
64 ; When entering the hypervisor we'll always enable MP (for proper wait
65 ; trapping) and TS (for intercepting all fpu/mmx/sse stuff). The EM flag
66 ; is taken from the guest OS in order to get proper SSE handling.
67 ;
68 ;
69 ; Actions taken depending on the guest CR0 flags:
70 ;
71 ; 3 2 1
72 ; TS | EM | MP | FPUInstr | WAIT :: VMM Action
73 ; ------------------------------------------------------------------------
74 ; 0 | 0 | 0 | Exec | Exec :: Clear TS & MP, Save HC, Load GC.
75 ; 0 | 0 | 1 | Exec | Exec :: Clear TS, Save HC, Load GC.
76 ; 0 | 1 | 0 | #NM | Exec :: Clear TS & MP, Save HC, Load GC;
77 ; 0 | 1 | 1 | #NM | Exec :: Clear TS, Save HC, Load GC.
78 ; 1 | 0 | 0 | #NM | Exec :: Clear MP, Save HC, Load GC. (EM is already cleared.)
79 ; 1 | 0 | 1 | #NM | #NM :: Go to host taking trap there.
80 ; 1 | 1 | 0 | #NM | Exec :: Clear MP, Save HC, Load GC. (EM is already set.)
81 ; 1 | 1 | 1 | #NM | #NM :: Go to host taking trap there.
82
83 ;
84 ; Before taking any of these actions we're checking if we have already
85 ; loaded the GC FPU. Because if we have, this is an trap for the guest - raw ring-3.
86 ;
87%ifdef RT_ARCH_AMD64
88 %ifdef RT_OS_WINDOWS
89 mov xDX, rcx
90 %else
91 mov xDX, rdi
92 %endif
93%else
94 mov xDX, dword [esp + 4]
95%endif
96 test dword [xDX + CPUMCPU.fUseFlags], CPUM_USED_FPU
97 jz hlfpua_not_loaded
98 jmp hlfpua_to_host
99
100 ;
101 ; Take action.
102 ;
103align 16
104hlfpua_not_loaded:
105 mov eax, [xDX + CPUMCPU.Guest.cr0]
106 and eax, X86_CR0_MP | X86_CR0_EM | X86_CR0_TS
107%ifdef RT_ARCH_AMD64
108 lea r8, [hlfpuajmp1 wrt rip]
109 jmp qword [rax*4 + r8]
110%else
111 jmp dword [eax*2 + hlfpuajmp1]
112%endif
113align 16
114;; jump table using fpu related cr0 flags as index.
115hlfpuajmp1:
116 RTCCPTR_DEF hlfpua_switch_fpu_ctx
117 RTCCPTR_DEF hlfpua_switch_fpu_ctx
118 RTCCPTR_DEF hlfpua_switch_fpu_ctx
119 RTCCPTR_DEF hlfpua_switch_fpu_ctx
120 RTCCPTR_DEF hlfpua_switch_fpu_ctx
121 RTCCPTR_DEF hlfpua_to_host
122 RTCCPTR_DEF hlfpua_switch_fpu_ctx
123 RTCCPTR_DEF hlfpua_to_host
124;; and mask for cr0.
125hlfpu_afFlags:
126 RTCCPTR_DEF ~(X86_CR0_TS | X86_CR0_MP)
127 RTCCPTR_DEF ~(X86_CR0_TS)
128 RTCCPTR_DEF ~(X86_CR0_TS | X86_CR0_MP)
129 RTCCPTR_DEF ~(X86_CR0_TS)
130 RTCCPTR_DEF ~(X86_CR0_MP)
131 RTCCPTR_DEF 0
132 RTCCPTR_DEF ~(X86_CR0_MP)
133 RTCCPTR_DEF 0
134
135 ;
136 ; Action - switch FPU context and change cr0 flags.
137 ;
138align 16
139hlfpua_switch_fpu_ctx:
140%ifndef IN_RING3 ; IN_RC or IN_RING0
141 mov xCX, cr0
142 %ifdef RT_ARCH_AMD64
143 lea r8, [hlfpu_afFlags wrt rip]
144 and rcx, [rax*4 + r8] ; calc the new cr0 flags.
145 %else
146 and ecx, [eax*2 + hlfpu_afFlags] ; calc the new cr0 flags.
147 %endif
148 mov xAX, cr0
149 and xAX, ~(X86_CR0_TS | X86_CR0_EM)
150 mov cr0, xAX ; clear flags so we don't trap here.
151%endif
152%ifndef RT_ARCH_AMD64
153 mov eax, edx ; Calculate the PCPUM pointer
154 sub eax, [edx + CPUMCPU.offCPUM]
155 test dword [eax + CPUM.CPUFeatures.edx], X86_CPUID_FEATURE_EDX_FXSR
156 jz short hlfpua_no_fxsave
157%endif
158
159%ifdef RT_ARCH_AMD64
160 ; Use explicit REX prefix. See @bugref{6398}.
161 o64 fxsave [xDX + CPUMCPU.Host.fpu]
162%else
163 fxsave [xDX + CPUMCPU.Host.fpu]
164%endif
165 or dword [xDX + CPUMCPU.fUseFlags], (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM)
166%ifdef RT_ARCH_AMD64
167 o64 fxrstor [xDX + CPUMCPU.Guest.fpu]
168%else
169 fxrstor [xDX + CPUMCPU.Guest.fpu]
170%endif
171hlfpua_finished_switch:
172
173 ; Load new CR0 value.
174 ; IN_RING0 the caller saves/restores CR0 anyway, so avoid the extra CR0 write.
175 ; Currently the only caller in ring-0 is CPUMR0LoadGuestFPU()->CPUMHandleLazyFPU().
176 ;; @todo Optimize the many unconditional CR0 writes.
177%ifdef IN_RC
178 mov cr0, xCX ; load the new cr0 flags.
179%endif
180 ; return continue execution.
181 xor eax, eax
182 ret
183
184%ifndef RT_ARCH_AMD64
185; legacy support.
186hlfpua_no_fxsave:
187 fnsave [xDX + CPUMCPU.Host.fpu]
188 or dword [xDX + CPUMCPU.fUseFlags], dword (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM) ; yasm / nasm
189 mov eax, [xDX + CPUMCPU.Guest.fpu] ; control word
190 not eax ; 1 means exception ignored (6 LS bits)
191 and eax, byte 03Fh ; 6 LS bits only
192 test eax, [xDX + CPUMCPU.Guest.fpu + 4] ; status word
193 jz short hlfpua_no_exceptions_pending
194 ; technically incorrect, but we certainly don't want any exceptions now!!
195 and dword [xDX + CPUMCPU.Guest.fpu + 4], ~03Fh
196hlfpua_no_exceptions_pending:
197 frstor [xDX + CPUMCPU.Guest.fpu]
198 jmp near hlfpua_finished_switch
199%endif ; !RT_ARCH_AMD64
200
201
202 ;
203 ; Action - Generate Guest trap.
204 ;
205hlfpua_action_4:
206hlfpua_to_host:
207 mov eax, VINF_EM_RAW_GUEST_TRAP
208 ret
209ENDPROC cpumHandleLazyFPUAsm
210
211
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette