VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/CPUMAllA.asm@ 11985

Last change on this file since 11985 was 10687, checked in by vboxsync, 16 years ago

Save the FPU control word and MXCSR on entry and restore them afterwards. (VT-x & AMD-V)
Security measure so the guest can't cause fpu/sse exceptions as we no longer restore the entire
host fpu state.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 12.1 KB
Line 
1; $Id: CPUMAllA.asm 10687 2008-07-16 09:22:28Z vboxsync $
2;; @file
3; CPUM - Guest Context Assembly Routines.
4;
5
6;
7; Copyright (C) 2006-2007 Sun Microsystems, Inc.
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.virtualbox.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License (GPL) as published by the Free Software
13; Foundation, in version 2 as it comes in the "COPYING" file of the
14; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17; Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18; Clara, CA 95054 USA or visit http://www.sun.com if you need
19; additional information or have any questions.
20;
21
22;*******************************************************************************
23;* Header Files *
24;*******************************************************************************
25%include "VBox/asmdefs.mac"
26%include "VBox/vm.mac"
27%include "VBox/err.mac"
28%include "VBox/stam.mac"
29%include "CPUMInternal.mac"
30%include "VBox/x86.mac"
31%include "VBox/cpum.mac"
32
33%ifdef IN_RING3
34 %error "The jump table doesn't link on leopard."
35%endif
36
37;
38; Enables write protection of Hypervisor memory pages.
39; !note! Must be commented out for Trap8 debug handler.
40;
41%define ENABLE_WRITE_PROTECTION 1
42
43;; @def CPUM_REG
44; The register which we load the CPUM pointer into.
45%ifdef RT_ARCH_AMD64
46 %define CPUM_REG rdx
47%else
48 %define CPUM_REG edx
49%endif
50
51BEGINCODE
52
53
54;;
55; Handles lazy FPU saving and restoring.
56;
57; This handler will implement lazy fpu (sse/mmx/stuff) saving.
58; Two actions may be taken in this handler since the Guest OS may
59; be doing lazy fpu switching. So, we'll have to generate those
60; traps which the Guest CPU CTX shall have according to the
61; its CR0 flags. If no traps for the Guest OS, we'll save the host
62; context and restore the guest context.
63;
64; @returns 0 if caller should continue execution.
65; @returns VINF_EM_RAW_GUEST_TRAP if a guest trap should be generated.
66; @param pCPUM x86:[esp+4] GCC:rdi MSC:rcx CPUM pointer
67;
68align 16
69BEGINPROC CPUMHandleLazyFPUAsm
70 ;
71 ; Figure out what to do.
72 ;
73 ; There are two basic actions:
74 ; 1. Save host fpu and restore guest fpu.
75 ; 2. Generate guest trap.
76 ;
77 ; When entering the hypervisor we'll always enable MP (for proper wait
78 ; trapping) and TS (for intercepting all fpu/mmx/sse stuff). The EM flag
79 ; is taken from the guest OS in order to get proper SSE handling.
80 ;
81 ;
82 ; Actions taken depending on the guest CR0 flags:
83 ;
84 ; 3 2 1
85 ; TS | EM | MP | FPUInstr | WAIT :: VMM Action
86 ; ------------------------------------------------------------------------
87 ; 0 | 0 | 0 | Exec | Exec :: Clear TS & MP, Save HC, Load GC.
88 ; 0 | 0 | 1 | Exec | Exec :: Clear TS, Save HC, Load GC.
89 ; 0 | 1 | 0 | #NM | Exec :: Clear TS & MP, Save HC, Load GC;
90 ; 0 | 1 | 1 | #NM | Exec :: Clear TS, Save HC, Load GC.
91 ; 1 | 0 | 0 | #NM | Exec :: Clear MP, Save HC, Load GC. (EM is already cleared.)
92 ; 1 | 0 | 1 | #NM | #NM :: Go to host taking trap there.
93 ; 1 | 1 | 0 | #NM | Exec :: Clear MP, Save HC, Load GC. (EM is already set.)
94 ; 1 | 1 | 1 | #NM | #NM :: Go to host taking trap there.
95
96 ;
97 ; Before taking any of these actions we're checking if we have already
98 ; loaded the GC FPU. Because if we have, this is an trap for the guest - raw ring-3.
99 ;
100%ifdef RT_ARCH_AMD64
101 %ifdef RT_OS_WINDOWS
102 mov xDX, rcx
103 %else
104 mov xDX, rdi
105 %endif
106%else
107 mov xDX, dword [esp + 4]
108%endif
109 test dword [xDX + CPUM.fUseFlags], CPUM_USED_FPU
110 jz hlfpua_not_loaded
111 jmp hlfpua_to_host
112
113 ;
114 ; Take action.
115 ;
116align 16
117hlfpua_not_loaded:
118 mov eax, [xDX + CPUM.Guest.cr0]
119 and eax, X86_CR0_MP | X86_CR0_EM | X86_CR0_TS
120%ifdef RT_ARCH_AMD64
121 lea r8, [hlfpuajmp1 wrt rip]
122 jmp qword [rax*4 + r8]
123%else
124 jmp dword [eax*2 + hlfpuajmp1]
125%endif
126align 16
127;; jump table using fpu related cr0 flags as index.
128hlfpuajmp1:
129 RTCCPTR_DEF hlfpua_switch_fpu_ctx
130 RTCCPTR_DEF hlfpua_switch_fpu_ctx
131 RTCCPTR_DEF hlfpua_switch_fpu_ctx
132 RTCCPTR_DEF hlfpua_switch_fpu_ctx
133 RTCCPTR_DEF hlfpua_switch_fpu_ctx
134 RTCCPTR_DEF hlfpua_to_host
135 RTCCPTR_DEF hlfpua_switch_fpu_ctx
136 RTCCPTR_DEF hlfpua_to_host
137;; and mask for cr0.
138hlfpu_afFlags:
139 RTCCPTR_DEF ~(X86_CR0_TS | X86_CR0_MP)
140 RTCCPTR_DEF ~(X86_CR0_TS)
141 RTCCPTR_DEF ~(X86_CR0_TS | X86_CR0_MP)
142 RTCCPTR_DEF ~(X86_CR0_TS)
143 RTCCPTR_DEF ~(X86_CR0_MP)
144 RTCCPTR_DEF 0
145 RTCCPTR_DEF ~(X86_CR0_MP)
146 RTCCPTR_DEF 0
147
148 ;
149 ; Action - switch FPU context and change cr0 flags.
150 ;
151align 16
152hlfpua_switch_fpu_ctx:
153%ifndef IN_RING3 ; IN_GC or IN_RING0
154 mov xCX, cr0
155 %ifdef RT_ARCH_AMD64
156 lea r8, [hlfpu_afFlags wrt rip]
157 and rcx, [rax*4 + r8] ; calc the new cr0 flags.
158 %else
159 and ecx, [eax*2 + hlfpu_afFlags] ; calc the new cr0 flags.
160 %endif
161 mov xAX, cr0
162 and xAX, ~(X86_CR0_TS | X86_CR0_EM)
163 mov cr0, xAX ; clear flags so we don't trap here.
164%endif
165%ifndef RT_ARCH_AMD64
166 test dword [xDX + CPUM.CPUFeatures.edx], X86_CPUID_FEATURE_EDX_FXSR
167 jz short hlfpua_no_fxsave
168%endif
169
170 fxsave [xDX + CPUM.Host.fpu]
171 or dword [xDX + CPUM.fUseFlags], (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM)
172 fxrstor [xDX + CPUM.Guest.fpu]
173hlfpua_finished_switch:
174%ifdef IN_GC
175 mov cr0, xCX ; load the new cr0 flags.
176%endif
177 ; return continue execution.
178 xor eax, eax
179 ret
180
181%ifndef RT_ARCH_AMD64
182; legacy support.
183hlfpua_no_fxsave:
184 fnsave [xDX + CPUM.Host.fpu]
185 or dword [xDX + CPUM.fUseFlags], dword (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM) ; yasm / nasm
186 mov eax, [xDX + CPUM.Guest.fpu] ; control word
187 not eax ; 1 means exception ignored (6 LS bits)
188 and eax, byte 03Fh ; 6 LS bits only
189 test eax, [xDX + CPUM.Guest.fpu + 4]; status word
190 jz short hlfpua_no_exceptions_pending
191 ; technically incorrect, but we certainly don't want any exceptions now!!
192 and dword [xDX + CPUM.Guest.fpu + 4], ~03Fh
193hlfpua_no_exceptions_pending:
194 frstor [xDX + CPUM.Guest.fpu]
195 jmp near hlfpua_finished_switch
196%endif ; !RT_ARCH_AMD64
197
198
199 ;
200 ; Action - Generate Guest trap.
201 ;
202hlfpua_action_4:
203hlfpua_to_host:
204 mov eax, VINF_EM_RAW_GUEST_TRAP
205 ret
206ENDPROC CPUMHandleLazyFPUAsm
207
208
209;;
210; Restores the host's FPU/XMM state
211;
212; @returns 0
213; @param pCPUM x86:[esp+4] GCC:rdi MSC:rcx CPUM pointer
214;
215align 16
216BEGINPROC CPUMRestoreHostFPUStateAsm
217%ifdef RT_ARCH_AMD64
218 %ifdef RT_OS_WINDOWS
219 mov xDX, rcx
220 %else
221 mov xDX, rdi
222 %endif
223%else
224 mov xDX, dword [esp + 4]
225%endif
226
227 ; Restore FPU if guest has used it.
228 ; Using fxrstor should ensure that we're not causing unwanted exception on the host.
229 test dword [xDX + CPUM.fUseFlags], CPUM_USED_FPU
230 jz short gth_fpu_no
231
232 mov xAX, cr0
233 mov xCX, xAX ; save old CR0
234 and xAX, ~(X86_CR0_TS | X86_CR0_EM)
235 mov cr0, xAX
236
237 fxsave [xDX + CPUM.Guest.fpu]
238 fxrstor [xDX + CPUM.Host.fpu]
239
240 mov cr0, xCX ; and restore old CR0 again
241 and dword [xDX + CPUM.fUseFlags], ~CPUM_USED_FPU
242gth_fpu_no:
243 xor eax, eax
244 ret
245ENDPROC CPUMRestoreHostFPUStateAsm
246
247
248;;
249; Restores the guest's FPU/XMM state
250;
251; @param pCtx x86:[esp+4] GCC:rdi MSC:rcx CPUMCTX pointer
252;
253align 16
254BEGINPROC CPUMLoadFPUAsm
255%ifdef RT_ARCH_AMD64
256 %ifdef RT_OS_WINDOWS
257 mov xDX, rcx
258 %else
259 mov xDX, rdi
260 %endif
261%else
262 mov xDX, dword [esp + 4]
263%endif
264 fxrstor [xDX + CPUMCTX.fpu]
265 ret
266ENDPROC CPUMLoadFPUAsm
267
268;;
269; Restores the guest's FPU/XMM state
270;
271; @param pCtx x86:[esp+4] GCC:rdi MSC:rcx CPUMCTX pointer
272;
273align 16
274BEGINPROC CPUMSaveFPUAsm
275%ifdef RT_ARCH_AMD64
276 %ifdef RT_OS_WINDOWS
277 mov xDX, rcx
278 %else
279 mov xDX, rdi
280 %endif
281%else
282 mov xDX, dword [esp + 4]
283%endif
284 fxsave [xDX + CPUMCTX.fpu]
285 ret
286ENDPROC CPUMSaveFPUAsm
287
288;;
289; Restores the guest's XMM state
290;
291; @param pCtx x86:[esp+4] GCC:rdi MSC:rcx CPUMCTX pointer
292;
293align 16
294BEGINPROC CPUMLoadXMMAsm
295%ifdef RT_ARCH_AMD64
296 %ifdef RT_OS_WINDOWS
297 mov xDX, rcx
298 %else
299 mov xDX, rdi
300 %endif
301%else
302 mov xDX, dword [esp + 4]
303%endif
304 movdqa xmm0, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*0]
305 movdqa xmm1, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*1]
306 movdqa xmm2, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*2]
307 movdqa xmm3, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*3]
308 movdqa xmm4, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*4]
309 movdqa xmm5, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*5]
310 movdqa xmm6, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*6]
311 movdqa xmm7, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*7]
312
313%ifdef RT_ARCH_AMD64
314 test qword [xDX + CPUMCTX.msrEFER], MSR_K6_EFER_LMA
315 jz CPUMLoadXMMAsm_done
316
317 movdqa xmm8, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*8]
318 movdqa xmm9, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*9]
319 movdqa xmm10, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*10]
320 movdqa xmm11, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*11]
321 movdqa xmm12, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*12]
322 movdqa xmm13, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*13]
323 movdqa xmm14, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*14]
324 movdqa xmm15, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*15]
325CPUMLoadXMMAsm_done:
326%endif
327
328 ret
329ENDPROC CPUMLoadXMMAsm
330
331
332;;
333; Restores the guest's XMM state
334;
335; @param pCtx x86:[esp+4] GCC:rdi MSC:rcx CPUMCTX pointer
336;
337align 16
338BEGINPROC CPUMSaveXMMAsm
339%ifdef RT_ARCH_AMD64
340 %ifdef RT_OS_WINDOWS
341 mov xDX, rcx
342 %else
343 mov xDX, rdi
344 %endif
345%else
346 mov xDX, dword [esp + 4]
347%endif
348 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*0], xmm0
349 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*1], xmm1
350 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*2], xmm2
351 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*3], xmm3
352 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*4], xmm4
353 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*5], xmm5
354 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*6], xmm6
355 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*7], xmm7
356
357%ifdef RT_ARCH_AMD64
358 test qword [xDX + CPUMCTX.msrEFER], MSR_K6_EFER_LMA
359 jz CPUMSaveXMMAsm_done
360
361 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*8], xmm8
362 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*9], xmm9
363 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*10], xmm10
364 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*11], xmm11
365 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*12], xmm12
366 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*13], xmm13
367 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*14], xmm14
368 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*15], xmm15
369
370CPUMSaveXMMAsm_done:
371%endif
372 ret
373ENDPROC CPUMSaveXMMAsm
374
375
376;;
377; Set the FPU control word; clearing exceptions first
378;
379; @param u16FCW x86:[esp+4] GCC:rdi MSC:rcx New FPU control word
380align 16
381BEGINPROC CPUMSetFCW
382%ifdef RT_ARCH_AMD64
383 %ifdef RT_OS_WINDOWS
384 mov xAX, rcx
385 %else
386 mov xAX, rdi
387 %endif
388%else
389 mov xAX, dword [esp + 4]
390%endif
391 fnclex
392 push xAX
393 fldcw [xSP]
394 pop xAX
395 ret
396ENDPROC CPUMSetFCW
397
398;;
399; Get the FPU control word
400;
401align 16
402BEGINPROC CPUMGetFCW
403 fnstcw [xSP - 8]
404 mov ax, word [xSP - 8]
405 ret
406ENDPROC CPUMGetFCW
407
408
409;;
410; Set the MXCSR;
411;
412; @param u32MXCSR x86:[esp+4] GCC:rdi MSC:rcx New MXCSR
413align 16
414BEGINPROC CPUMSetMXCSR
415%ifdef RT_ARCH_AMD64
416 %ifdef RT_OS_WINDOWS
417 mov xAX, rcx
418 %else
419 mov xAX, rdi
420 %endif
421%else
422 mov xAX, dword [esp + 4]
423%endif
424 push xAX
425 ldmxcsr [xSP]
426 pop xAX
427 ret
428ENDPROC CPUMSetMXCSR
429
430;;
431; Get the MXCSR
432;
433align 16
434BEGINPROC CPUMGetMXCSR
435 stmxcsr [xSP - 8]
436 mov eax, dword [xSP - 8]
437 ret
438ENDPROC CPUMGetMXCSR
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette