VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/CPUMAllA.asm@ 2988

Last change on this file since 2988 was 2988, checked in by vboxsync, 18 years ago

InnoTek -> innotek part 4: more miscellaneous files.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 7.4 KB
Line 
1; $Id: CPUMAllA.asm 2988 2007-06-01 17:36:09Z vboxsync $
2;; @file
3; CPUM - Guest Context Assembly Routines.
4;
5
6;
7; Copyright (C) 2006-2007 innotek GmbH
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.virtualbox.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License as published by the Free Software Foundation,
13; in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
14; distribution. VirtualBox OSE is distributed in the hope that it will
15; be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17; If you received this file as part of a commercial VirtualBox
18; distribution, then only the terms of your commercial VirtualBox
19; license agreement apply instead of the previous paragraph.
20;
21
22;*******************************************************************************
23;* Header Files *
24;*******************************************************************************
25%include "VBox/asmdefs.mac"
26%include "VBox/vm.mac"
27%include "VBox/err.mac"
28%include "VBox/stam.mac"
29%include "CPUMInternal.mac"
30%include "VBox/x86.mac"
31%include "VBox/cpum.mac"
32
33
34;
35; Enables write protection of Hypervisor memory pages.
36; !note! Must be commented out for Trap8 debug handler.
37;
38%define ENABLE_WRITE_PROTECTION 1
39
40;; @def CPUM_REG
41; The register which we load the CPUM pointer into.
42%ifdef __AMD64__
43 %define CPUM_REG rdx
44%else
45 %define CPUM_REG edx
46%endif
47
48BEGINCODE
49
50
51;;
52; Handles lazy FPU saveing and restoring.
53;
54; This handler will implement lazy fpu (sse/mmx/stuff) saving.
55; Two actions may be taken in this handler since the Guest OS may
56; be doing lazy fpu switching. So, we'll have to generate those
57; traps which the Guest CPU CTX shall have according to the
58; its CR0 flags. If no traps for the Guest OS, we'll save the host
59; context and restore the guest context.
60;
61; @returns 0 if caller should continue execution.
62; @returns VINF_EM_RAW_GUEST_TRAP if a guest trap should be generated.
63; @param pCPUM x86:[esp+4] GCC:rdi MSC:rcx CPUM pointer
64;
65align 16
66BEGINPROC CPUMHandleLazyFPUAsm
67 ;
68 ; Figure out what to do.
69 ;
70 ; There are two basic actions:
71 ; 1. Save host fpu and restore guest fpu.
72 ; 2. Generate guest trap.
73 ;
74 ; When entering the hypvervisor we'll always enable MP (for proper wait
75 ; trapping) and TS (for intercepting all fpu/mmx/sse stuff). The EM flag
76 ; is taken from the guest OS in order to get proper SSE handling.
77 ;
78 ;
79 ; Actions taken depending on the guest CR0 flags:
80 ;
81 ; 3 2 1
82 ; TS | EM | MP | FPUInstr | WAIT :: VMM Action
83 ; ------------------------------------------------------------------------
84 ; 0 | 0 | 0 | Exec | Exec :: Clear TS & MP, Save HC, Load GC.
85 ; 0 | 0 | 1 | Exec | Exec :: Clear TS, Save HC, Load GC.
86 ; 0 | 1 | 0 | #NM | Exec :: Clear TS & MP, Save HC, Load GC;
87 ; 0 | 1 | 1 | #NM | Exec :: Clear TS, Save HC, Load GC.
88 ; 1 | 0 | 0 | #NM | Exec :: Clear MP, Save HC, Load GC. (EM is already cleared.)
89 ; 1 | 0 | 1 | #NM | #NM :: Go to host taking trap there.
90 ; 1 | 1 | 0 | #NM | Exec :: Clear MP, Save HC, Load GC. (EM is already set.)
91 ; 1 | 1 | 1 | #NM | #NM :: Go to host taking trap there.
92
93 ;
94 ; Before taking any of these actions we're checking if we have already
95 ; loaded the GC FPU. Because if we have, this is an trap for the guest - raw ring-3.
96 ;
97%ifdef __AMD64__
98 %ifdef __WIN__
99 mov xDX, rcx
100 %else
101 mov xDX, rdi
102 %endif
103%else
104 mov xDX, dword [esp + 4]
105%endif
106 test dword [xDX + CPUM.fUseFlags], CPUM_USED_FPU
107 jz hlfpua_not_loaded
108 jmp hlfpua_to_host
109
110 ;
111 ; Take action.
112 ;
113align 16
114hlfpua_not_loaded:
115 mov eax, [xDX + CPUM.Guest.cr0]
116 and eax, X86_CR0_MP | X86_CR0_EM | X86_CR0_TS
117%ifdef __AMD64__
118 lea r8, [hlfpuajmp1 wrt rip]
119 jmp qword [rax*4 + r8]
120%else
121 jmp dword [eax*2 + hlfpuajmp1]
122%endif
123align 16
124;; jump table using fpu related cr0 flags as index.
125hlfpuajmp1:
126 RTCCPTR_DEF hlfpua_switch_fpu_ctx
127 RTCCPTR_DEF hlfpua_switch_fpu_ctx
128 RTCCPTR_DEF hlfpua_switch_fpu_ctx
129 RTCCPTR_DEF hlfpua_switch_fpu_ctx
130 RTCCPTR_DEF hlfpua_switch_fpu_ctx
131 RTCCPTR_DEF hlfpua_to_host
132 RTCCPTR_DEF hlfpua_switch_fpu_ctx
133 RTCCPTR_DEF hlfpua_to_host
134;; and mask for cr0.
135hlfpu_afFlags:
136 RTCCPTR_DEF ~(X86_CR0_TS | X86_CR0_MP)
137 RTCCPTR_DEF ~(X86_CR0_TS)
138 RTCCPTR_DEF ~(X86_CR0_TS | X86_CR0_MP)
139 RTCCPTR_DEF ~(X86_CR0_TS)
140 RTCCPTR_DEF ~(X86_CR0_MP)
141 RTCCPTR_DEF 0
142 RTCCPTR_DEF ~(X86_CR0_MP)
143 RTCCPTR_DEF 0
144
145 ;
146 ; Action - switch FPU context and change cr0 flags.
147 ;
148align 16
149hlfpua_switch_fpu_ctx:
150%ifndef IN_RING3 ; IN_GC or IN_RING0
151 mov xCX, cr0
152 %ifdef __AMD64__
153 lea r8, [hlfpu_afFlags wrt rip]
154 and rcx, [rax*4 + r8] ; calc the new cr0 flags.
155 %else
156 and ecx, [eax*2 + hlfpu_afFlags] ; calc the new cr0 flags.
157 %endif
158 mov xAX, cr0
159 and xAX, ~(X86_CR0_TS | X86_CR0_EM)
160 mov cr0, xAX ; clear flags so we don't trap here.
161%endif
162%ifndef __AMD64__
163 test dword [xDX + CPUM.CPUFeatures.edx], X86_CPUID_FEATURE_EDX_FXSR
164 jz short hlfpua_no_fxsave
165%endif
166
167 fxsave [xDX + CPUM.Host.fpu]
168 or dword [xDX + CPUM.fUseFlags], (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM)
169 fxrstor [xDX + CPUM.Guest.fpu]
170hlfpua_finished_switch:
171%ifdef IN_GC
172 mov cr0, xCX ; load the new cr0 flags.
173%endif
174 ; return continue execution.
175 xor eax, eax
176 ret
177
178%ifndef __AMD64__
179; legacy support.
180hlfpua_no_fxsave:
181 fnsave [xDX + CPUM.Host.fpu]
182 or dword [xDX + CPUM.fUseFlags], dword (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM) ; yasm / nasm
183 mov eax, [xDX + CPUM.Guest.fpu] ; control word
184 not eax ; 1 means exception ignored (6 LS bits)
185 and eax, byte 03Fh ; 6 LS bits only
186 test eax, [xDX + CPUM.Guest.fpu + 4]; status word
187 jz short hlfpua_no_exceptions_pending
188 ; technically incorrect, but we certainly don't want any exceptions now!!
189 and dword [xDX + CPUM.Guest.fpu + 4], ~03Fh
190hlfpua_no_exceptions_pending:
191 frstor [xDX + CPUM.Guest.fpu]
192 jmp near hlfpua_finished_switch
193%endif ; !__AMD64__
194
195
196 ;
197 ; Action - Generate Guest trap.
198 ;
199hlfpua_action_4:
200hlfpua_to_host:
201 mov eax, VINF_EM_RAW_GUEST_TRAP
202 ret
203ENDPROC CPUMHandleLazyFPUAsm
204
205
206;;
207; Restores the host's FPU/XMM state
208;
209; @returns 0
210; @param pCPUM x86:[esp+4] GCC:rdi MSC:rcx CPUM pointer
211;
212align 16
213BEGINPROC CPUMRestoreHostFPUStateAsm
214%ifdef __AMD64__
215 %ifdef __WIN__
216 mov xDX, rcx
217 %else
218 mov xDX, rdi
219 %endif
220%else
221 mov xDX, dword [esp + 4]
222%endif
223
224 ; Restore FPU if guest has used it.
225 ; Using fxrstor should ensure that we're not causing unwanted exception on the host.
226 test dword [xDX + CPUM.fUseFlags], CPUM_USED_FPU
227 jz short gth_fpu_no
228
229 mov xAX, cr0
230 mov xCX, xAX ; save old CR0
231 and xAX, ~(X86_CR0_TS | X86_CR0_EM)
232 mov cr0, xAX
233
234 fxsave [xDX + CPUM.Guest.fpu]
235 fxrstor [xDX + CPUM.Host.fpu]
236
237 mov cr0, xCX ; and restore old CR0 again
238 and dword [xDX + CPUM.fUseFlags], ~CPUM_USED_FPU
239gth_fpu_no:
240 xor eax, eax
241 ret
242ENDPROC CPUMRestoreHostFPUStateAsm
243
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette