VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/CPUMR0A.asm@ 61144

Last change on this file since 61144 was 61144, checked in by vboxsync, 9 years ago

CPUM,HM,GVMM,TRPM,VMM: Next part of the FPU state handling for IEM. This is a little bit risky change as we now leave CR0.TS+EM cleared after saving the host state, they only get restored when we restore the host state. On Windows, Darwin, and later on Linux (needs testing) we will rely on #NM handling of the host OS and not our own CR.TS/EM handy work. This means we won't be saving the host state but rather the ring-3 state of our own thread. This change also introduces a CPUM force flag that we're using for restoring CR0.TS/EM in raw-mode (it may be extended with other uses later if we need to).

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 9.8 KB
Line 
1 ; $Id: CPUMR0A.asm 61144 2016-05-23 22:16:26Z vboxsync $
2;; @file
3; CPUM - Ring-0 Assembly Routines (supporting HM and IEM).
4;
5
6;
7; Copyright (C) 2006-2016 Oracle Corporation
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.virtualbox.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License (GPL) as published by the Free Software
13; Foundation, in version 2 as it comes in the "COPYING" file of the
14; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17
18
19;*******************************************************************************
20;* Header Files *
21;*******************************************************************************
22%define RT_ASM_WITH_SEH64
23%include "iprt/asmdefs.mac"
24%include "VBox/asmdefs.mac"
25%include "VBox/vmm/vm.mac"
26%include "VBox/err.mac"
27%include "VBox/vmm/stam.mac"
28%include "CPUMInternal.mac"
29%include "iprt/x86.mac"
30%include "VBox/vmm/cpum.mac"
31
32
33BEGINCODE
34
35;;
36; Makes sure the EMTs have a FPU state associated with them on hosts where we're
37; allowed to use it in ring-0 too.
38;
39; This ensure that we don't have to allocate the state lazily while trying to execute
40; guest code with preemption disabled or worse.
41;
42; @cproto VMMR0_INT_DECL(void) CPUMR0RegisterVCpuThread(PVMCPU pVCpu);
43;
44BEGINPROC CPUMR0RegisterVCpuThread
45 push xBP
46 SEH64_PUSH_xBP
47 mov xBP, xSP
48 SEH64_SET_FRAME_xBP 0
49SEH64_END_PROLOGUE
50
51%ifdef CPUM_CAN_USE_FPU_IN_R0
52 movaps xmm0, xmm0
53%endif
54
55.return:
56 xor eax, eax ; paranoia
57 leave
58 ret
59ENDPROC CPUMR0RegisterVCpuThread
60
61
62;;
63; Saves the host FPU/SSE/AVX state and restores the guest FPU/SSE/AVX state.
64;
65; @param pCpumCpu x86:[ebp+8] gcc:rdi msc:rcx CPUMCPU pointer
66;
67align 16
68BEGINPROC cpumR0SaveHostRestoreGuestFPUState
69 push xBP
70 SEH64_PUSH_xBP
71 mov xBP, xSP
72 SEH64_SET_FRAME_xBP 0
73SEH64_END_PROLOGUE
74
75 ;
76 ; Prologue - xAX+xDX must be free for XSAVE/XRSTOR input.
77 ;
78%ifdef RT_ARCH_AMD64
79 %ifdef RT_OS_WINDOWS
80 mov r11, rcx
81 %else
82 mov r11, rdi
83 %endif
84 %define pCpumCpu r11
85 %define pXState r10
86%else
87 push ebx
88 push esi
89 mov ebx, dword [ebp + 8]
90 %define pCpumCpu ebx
91 %define pXState esi
92%endif
93
94 pushf ; The darwin kernel can get upset or upset things if an
95 cli ; interrupt occurs while we're doing fxsave/fxrstor/cr0.
96
97 ;
98 ; Save the host state.
99 ;
100 test dword [pCpumCpu + CPUMCPU.fUseFlags], CPUM_USED_FPU_HOST
101 jnz .already_saved_host
102
103%ifndef CPUM_CAN_USE_FPU_IN_R0
104 ; On systems where the kernel doesn't necessarily allow us to use the FPU
105 ; in ring-0 context, we have to disable FPU traps before doing fxsave/xsave
106 ; here. (xCX is 0 if no CR0 was necessary.) We leave it like that so IEM
107 ; can use the FPU/SSE/AVX host CPU features directly.
108 SAVE_CR0_CLEAR_FPU_TRAPS xCX, xAX
109 mov [pCpumCpu + CPUMCPU.Host.cr0Fpu], xCX
110 ;; @todo What about XCR0?
111%endif
112
113 CPUMR0_SAVE_HOST
114
115%ifdef VBOX_WITH_KERNEL_USING_XMM
116 jmp .load_guest
117%endif
118.already_saved_host:
119%ifdef VBOX_WITH_KERNEL_USING_XMM
120 ; If we didn't save the host state, we must save the non-volatile XMM registers.
121 mov pXState, [pCpumCpu + CPUMCPU.Host.pXStateR0]
122 movdqa [pXState + X86FXSTATE.xmm6 ], xmm6
123 movdqa [pXState + X86FXSTATE.xmm7 ], xmm7
124 movdqa [pXState + X86FXSTATE.xmm8 ], xmm8
125 movdqa [pXState + X86FXSTATE.xmm9 ], xmm9
126 movdqa [pXState + X86FXSTATE.xmm10], xmm10
127 movdqa [pXState + X86FXSTATE.xmm11], xmm11
128 movdqa [pXState + X86FXSTATE.xmm12], xmm12
129 movdqa [pXState + X86FXSTATE.xmm13], xmm13
130 movdqa [pXState + X86FXSTATE.xmm14], xmm14
131 movdqa [pXState + X86FXSTATE.xmm15], xmm15
132
133 ;
134 ; Load the guest state.
135 ;
136.load_guest:
137%endif
138 CPUMR0_LOAD_GUEST
139
140%ifdef VBOX_WITH_KERNEL_USING_XMM
141 ; Restore the non-volatile xmm registers. ASSUMING 64-bit host.
142 mov pXState, [pCpumCpu + CPUMCPU.Host.pXStateR0]
143 movdqa xmm6, [pXState + X86FXSTATE.xmm6]
144 movdqa xmm7, [pXState + X86FXSTATE.xmm7]
145 movdqa xmm8, [pXState + X86FXSTATE.xmm8]
146 movdqa xmm9, [pXState + X86FXSTATE.xmm9]
147 movdqa xmm10, [pXState + X86FXSTATE.xmm10]
148 movdqa xmm11, [pXState + X86FXSTATE.xmm11]
149 movdqa xmm12, [pXState + X86FXSTATE.xmm12]
150 movdqa xmm13, [pXState + X86FXSTATE.xmm13]
151 movdqa xmm14, [pXState + X86FXSTATE.xmm14]
152 movdqa xmm15, [pXState + X86FXSTATE.xmm15]
153%endif
154
155 or dword [pCpumCpu + CPUMCPU.fUseFlags], (CPUM_USED_FPU_GUEST | CPUM_USED_FPU_SINCE_REM | CPUM_USED_FPU_HOST)
156 popf
157
158%ifdef RT_ARCH_X86
159 pop esi
160 pop ebx
161%endif
162 leave
163 ret
164ENDPROC cpumR0SaveHostRestoreGuestFPUState
165
166
167;;
168; Saves the guest FPU/SSE/AVX state and restores the host FPU/SSE/AVX state.
169;
170; @param pCpumCpu x86:[ebp+8] gcc:rdi msc:rcx CPUMCPU pointer
171;
172align 16
173BEGINPROC cpumR0SaveGuestRestoreHostFPUState
174 push xBP
175 SEH64_PUSH_xBP
176 mov xBP, xSP
177 SEH64_SET_FRAME_xBP 0
178SEH64_END_PROLOGUE
179
180 ;
181 ; Prologue - xAX+xDX must be free for XSAVE/XRSTOR input.
182 ;
183%ifdef RT_ARCH_AMD64
184 %ifdef RT_OS_WINDOWS
185 mov r11, rcx
186 %else
187 mov r11, rdi
188 %endif
189 %define pCpumCpu r11
190 %define pXState r10
191%else
192 push ebx
193 push esi
194 mov ebx, dword [ebp + 8]
195 %define pCpumCpu ebx
196 %define pXState esi
197%endif
198 pushf ; The darwin kernel can get upset or upset things if an
199 cli ; interrupt occurs while we're doing fxsave/fxrstor/cr0.
200
201 %ifdef VBOX_WITH_KERNEL_USING_XMM
202 ;
203 ; Copy non-volatile XMM registers to the host state so we can use
204 ; them while saving the guest state (we've gotta do this anyway).
205 ;
206 mov pXState, [pCpumCpu + CPUMCPU.Host.pXStateR0]
207 movdqa [pXState + X86FXSTATE.xmm6], xmm6
208 movdqa [pXState + X86FXSTATE.xmm7], xmm7
209 movdqa [pXState + X86FXSTATE.xmm8], xmm8
210 movdqa [pXState + X86FXSTATE.xmm9], xmm9
211 movdqa [pXState + X86FXSTATE.xmm10], xmm10
212 movdqa [pXState + X86FXSTATE.xmm11], xmm11
213 movdqa [pXState + X86FXSTATE.xmm12], xmm12
214 movdqa [pXState + X86FXSTATE.xmm13], xmm13
215 movdqa [pXState + X86FXSTATE.xmm14], xmm14
216 movdqa [pXState + X86FXSTATE.xmm15], xmm15
217 %endif
218
219 ;
220 ; Save the guest state if necessary.
221 ;
222 test dword [pCpumCpu + CPUMCPU.fUseFlags], CPUM_USED_FPU_GUEST
223 jz .load_only_host
224
225 %ifdef VBOX_WITH_KERNEL_USING_XMM
226 ; Load the guest XMM register values we already saved in HMR0VMXStartVMWrapXMM.
227 mov pXState, [pCpumCpu + CPUMCPU.Guest.pXStateR0]
228 movdqa xmm0, [pXState + X86FXSTATE.xmm0]
229 movdqa xmm1, [pXState + X86FXSTATE.xmm1]
230 movdqa xmm2, [pXState + X86FXSTATE.xmm2]
231 movdqa xmm3, [pXState + X86FXSTATE.xmm3]
232 movdqa xmm4, [pXState + X86FXSTATE.xmm4]
233 movdqa xmm5, [pXState + X86FXSTATE.xmm5]
234 movdqa xmm6, [pXState + X86FXSTATE.xmm6]
235 movdqa xmm7, [pXState + X86FXSTATE.xmm7]
236 movdqa xmm8, [pXState + X86FXSTATE.xmm8]
237 movdqa xmm9, [pXState + X86FXSTATE.xmm9]
238 movdqa xmm10, [pXState + X86FXSTATE.xmm10]
239 movdqa xmm11, [pXState + X86FXSTATE.xmm11]
240 movdqa xmm12, [pXState + X86FXSTATE.xmm12]
241 movdqa xmm13, [pXState + X86FXSTATE.xmm13]
242 movdqa xmm14, [pXState + X86FXSTATE.xmm14]
243 movdqa xmm15, [pXState + X86FXSTATE.xmm15]
244 %endif
245 CPUMR0_SAVE_GUEST
246
247 ;
248 ; Load the host state.
249 ;
250.load_only_host:
251 CPUMR0_LOAD_HOST
252
253%ifndef CPUM_CAN_USE_FPU_IN_R0
254 ; Restore the CR0 value we saved in cpumR0SaveHostRestoreGuestFPUState or
255 ; in cpumRZSaveHostFPUState.
256 mov xCX, [pCpumCpu + CPUMCPU.Host.cr0Fpu]
257 RESTORE_CR0 xCX
258%endif
259 and dword [pCpumCpu + CPUMCPU.fUseFlags], ~(CPUM_USED_FPU_GUEST | CPUM_USED_FPU_HOST)
260
261 popf
262%ifdef RT_ARCH_X86
263 pop esi
264 pop ebx
265%endif
266 leave
267 ret
268%undef pCpumCpu
269%undef pXState
270ENDPROC cpumR0SaveGuestRestoreHostFPUState
271
272
273%if ARCH_BITS == 32
274 %ifdef VBOX_WITH_64_BITS_GUESTS
275;;
276; Restores the host's FPU/SSE/AVX state from pCpumCpu->Host.
277;
278; @param pCpumCpu x86:[ebp+8] gcc:rdi msc:rcx CPUMCPU pointer
279;
280align 16
281BEGINPROC cpumR0RestoreHostFPUState
282 ;
283 ; Prologue - xAX+xDX must be free for XSAVE/XRSTOR input.
284 ;
285 push ebp
286 mov ebp, esp
287 push ebx
288 push esi
289 mov ebx, dword [ebp + 8]
290 %define pCpumCpu ebx
291 %define pXState esi
292
293 ;
294 ; Restore host CPU state.
295 ;
296 pushf ; The darwin kernel can get upset or upset things if an
297 cli ; interrupt occurs while we're doing fxsave/fxrstor/cr0.
298
299 CPUMR0_LOAD_HOST
300
301%ifndef CPUM_CAN_USE_FPU_IN_R0
302 ; Restore the CR0 value we saved in cpumR0SaveHostRestoreGuestFPUState or
303 ; in cpumRZSaveHostFPUState.
304 ;; @todo What about XCR0?
305 mov xCX, [pCpumCpu + CPUMCPU.Host.cr0Fpu]
306 RESTORE_CR0 xCX
307%endif
308 and dword [pCpumCpu + CPUMCPU.fUseFlags], ~CPUM_USED_FPU_HOST
309 popf
310
311 pop esi
312 pop ebx
313 leave
314 ret
315 %undef pCpumCPu
316 %undef pXState
317ENDPROC cpumR0RestoreHostFPUState
318 %endif ; VBOX_WITH_64_BITS_GUESTS
319%endif ; ARCH_BITS == 32
320
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette