VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/CPUMR0A.asm@ 20997

Last change on this file since 20997 was 20997, checked in by vboxsync, 15 years ago

HWACCM,CPUM: Fix for 64-bit Windows trashing guest XMM registers - VMX part.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 12.5 KB
Line 
1; $Id: CPUMR0A.asm 20997 2009-06-26 22:23:04Z vboxsync $
2;; @file
3; CPUM - Guest Context Assembly Routines.
4;
5
6;
7; Copyright (C) 2006-2007 Sun Microsystems, Inc.
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.virtualbox.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License (GPL) as published by the Free Software
13; Foundation, in version 2 as it comes in the "COPYING" file of the
14; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17; Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18; Clara, CA 95054 USA or visit http://www.sun.com if you need
19; additional information or have any questions.
20;
21
22;*******************************************************************************
23;* Header Files *
24;*******************************************************************************
25%include "VBox/asmdefs.mac"
26%include "VBox/vm.mac"
27%include "VBox/err.mac"
28%include "VBox/stam.mac"
29%include "CPUMInternal.mac"
30%include "VBox/x86.mac"
31%include "VBox/cpum.mac"
32
33%ifdef IN_RING3
34 %error "The jump table doesn't link on leopard."
35%endif
36
37;*******************************************************************************
38;* Defined Constants And Macros *
39;*******************************************************************************
40;; The offset of the XMM registers in X86FXSTATE.
41; Use define because I'm too lazy to convert the struct.
42%define XMM_OFF_IN_X86FXSTATE 160
43
44
45;*******************************************************************************
46;* External Symbols *
47;*******************************************************************************
48%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
49extern NAME(SUPR0AbsIs64bit)
50extern NAME(SUPR0Abs64bitKernelCS)
51extern NAME(SUPR0Abs64bitKernelSS)
52extern NAME(SUPR0Abs64bitKernelDS)
53extern NAME(SUPR0AbsKernelCS)
54%endif
55
56
57;*******************************************************************************
58;* Global Variables *
59;*******************************************************************************
60%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
61BEGINDATA
62;;
63; Store the SUPR0AbsIs64bit absolute value here so we can cmp/test without
64; needing to clobber a register. (This trick doesn't quite work for PE btw.
65; but that's not relevant atm.)
66GLOBALNAME g_fCPUMIs64bitHost
67 dd NAME(SUPR0AbsIs64bit)
68%endif
69
70
71BEGINCODE
72
73
74;;
75; Saves the host FPU/XMM state and restores the guest state.
76;
77; @returns 0
78; @param pCPUMCPU x86:[esp+4] GCC:rdi MSC:rcx CPUMCPU pointer
79;
80align 16
81BEGINPROC cpumR0SaveHostRestoreGuestFPUState
82%ifdef RT_ARCH_AMD64
83 %ifdef RT_OS_WINDOWS
84 mov xDX, rcx
85 %else
86 mov xDX, rdi
87 %endif
88%else
89 mov xDX, dword [esp + 4]
90%endif
91 pushf ; The darwin kernel can get upset or upset things if an
92 cli ; interrupt occurs while we're doing fxsave/fxrstor/cr0.
93
94 ; Switch the state.
95 or dword [xDX + CPUMCPU.fUseFlags], (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM)
96
97 mov xAX, cr0 ; Make sure its safe to access the FPU state.
98 mov xCX, xAX ; save old CR0
99 and xAX, ~(X86_CR0_TS | X86_CR0_EM)
100 mov cr0, xAX ;; @todo optimize this.
101
102%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
103 cmp byte [NAME(g_fCPUMIs64bitHost)], 0
104 jz .legacy_mode
105 db 0xea ; jmp far .sixtyfourbit_mode
106 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
107.legacy_mode:
108%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
109
110 fxsave [xDX + CPUMCPU.Host.fpu] ; ASSUMES that all VT-x/AMD-V boxes sports fxsave/fxrstor (safe assumption)
111 fxrstor [xDX + CPUMCPU.Guest.fpu]
112
113%ifdef VBOX_WITH_KERNEL_USING_XMM
114 ; Restore the non-volatile xmm registers. ASSUMING 64-bit windows
115 lea r11, [xDX + CPUMCPU.Host.fpu + XMM_OFF_IN_X86FXSTATE]
116 movdqa xmm6, [r11 + 060h]
117 movdqa xmm7, [r11 + 070h]
118 movdqa xmm8, [r11 + 080h]
119 movdqa xmm9, [r11 + 090h]
120 movdqa xmm10, [r11 + 0a0h]
121 movdqa xmm11, [r11 + 0b0h]
122 movdqa xmm12, [r11 + 0c0h]
123 movdqa xmm13, [r11 + 0d0h]
124 movdqa xmm14, [r11 + 0e0h]
125 movdqa xmm15, [r11 + 0f0h]
126%endif
127
128.done:
129 mov cr0, xCX ; and restore old CR0 again ;; @todo optimize this.
130 popf
131 xor eax, eax
132 ret
133
134%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
135ALIGNCODE(16)
136BITS 64
137.sixtyfourbit_mode:
138 and edx, 0ffffffffh
139 fxsave [rdx + CPUMCPU.Host.fpu]
140 fxrstor [rdx + CPUMCPU.Guest.fpu]
141 jmp far [.fpret wrt rip]
142.fpret: ; 16:32 Pointer to .the_end.
143 dd .done, NAME(SUPR0AbsKernelCS)
144BITS 32
145%endif
146ENDPROC cpumR0SaveHostRestoreGuestFPUState
147
148
149%ifndef RT_ARCH_AMD64
150%ifdef VBOX_WITH_64_BITS_GUESTS
151%ifndef VBOX_WITH_HYBRID_32BIT_KERNEL
152;;
153; Saves the host FPU/XMM state
154;
155; @returns 0
156; @param pCPUMCPU x86:[esp+4] GCC:rdi MSC:rcx CPUMCPU pointer
157;
158align 16
159BEGINPROC cpumR0SaveHostFPUState
160 mov xDX, dword [esp + 4]
161 pushf ; The darwin kernel can get upset or upset things if an
162 cli ; interrupt occurs while we're doing fxsave/fxrstor/cr0.
163
164 ; Switch the state.
165 or dword [xDX + CPUMCPU.fUseFlags], (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM)
166
167 mov xAX, cr0 ; Make sure its safe to access the FPU state.
168 mov xCX, xAX ; save old CR0
169 and xAX, ~(X86_CR0_TS | X86_CR0_EM)
170 mov cr0, xAX ;; @todo optimize this.
171
172 fxsave [xDX + CPUMCPU.Host.fpu] ; ASSUMES that all VT-x/AMD-V boxes sports fxsave/fxrstor (safe assumption)
173
174 mov cr0, xCX ; and restore old CR0 again ;; @todo optimize this.
175 popf
176 xor eax, eax
177 ret
178ENDPROC cpumR0SaveHostFPUState
179%endif
180%endif
181%endif
182
183
184;;
185; Saves the guest FPU/XMM state and restores the host state.
186;
187; @returns 0
188; @param pCPUMCPU x86:[esp+4] GCC:rdi MSC:rcx CPUMCPU pointer
189;
190align 16
191BEGINPROC cpumR0SaveGuestRestoreHostFPUState
192%ifdef RT_ARCH_AMD64
193 %ifdef RT_OS_WINDOWS
194 mov xDX, rcx
195 %else
196 mov xDX, rdi
197 %endif
198%else
199 mov xDX, dword [esp + 4]
200%endif
201
202 ; Only restore FPU if guest has used it.
203 ; Using fxrstor should ensure that we're not causing unwanted exception on the host.
204 test dword [xDX + CPUMCPU.fUseFlags], CPUM_USED_FPU
205 jz short .fpu_not_used
206
207 pushf ; The darwin kernel can get upset or upset things if an
208 cli ; interrupt occurs while we're doing fxsave/fxrstor/cr0.
209
210 mov xAX, cr0 ; Make sure it's safe to access the FPU state.
211 mov xCX, xAX ; save old CR0
212 and xAX, ~(X86_CR0_TS | X86_CR0_EM)
213 mov cr0, xAX ;; @todo optimize this.
214
215%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
216 cmp byte [NAME(g_fCPUMIs64bitHost)], 0
217 jz .legacy_mode
218 db 0xea ; jmp far .sixtyfourbit_mode
219 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
220.legacy_mode:
221%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
222
223 fxsave [xDX + CPUMCPU.Guest.fpu] ; ASSUMES that all VT-x/AMD-V boxes sports fxsave/fxrstor (safe assumption)
224 fxrstor [xDX + CPUMCPU.Host.fpu]
225
226.done:
227 mov cr0, xCX ; and restore old CR0 again ;; @todo optimize this.
228 and dword [xDX + CPUMCPU.fUseFlags], ~CPUM_USED_FPU
229 popf
230.fpu_not_used:
231 xor eax, eax
232 ret
233
234%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
235ALIGNCODE(16)
236BITS 64
237.sixtyfourbit_mode:
238 and edx, 0ffffffffh
239 fxsave [rdx + CPUMCPU.Guest.fpu]
240 fxrstor [rdx + CPUMCPU.Host.fpu]
241 jmp far [.fpret wrt rip]
242.fpret: ; 16:32 Pointer to .the_end.
243 dd .done, NAME(SUPR0AbsKernelCS)
244BITS 32
245%endif
246ENDPROC cpumR0SaveGuestRestoreHostFPUState
247
248
249;;
250; Sets the host's FPU/XMM state
251;
252; @returns 0
253; @param pCPUMCPU x86:[esp+4] GCC:rdi MSC:rcx CPUMCPU pointer
254;
255align 16
256BEGINPROC cpumR0RestoreHostFPUState
257%ifdef RT_ARCH_AMD64
258 %ifdef RT_OS_WINDOWS
259 mov xDX, rcx
260 %else
261 mov xDX, rdi
262 %endif
263%else
264 mov xDX, dword [esp + 4]
265%endif
266
267 ; Restore FPU if guest has used it.
268 ; Using fxrstor should ensure that we're not causing unwanted exception on the host.
269 test dword [xDX + CPUMCPU.fUseFlags], CPUM_USED_FPU
270 jz short .fpu_not_used
271
272 pushf ; The darwin kernel can get upset or upset things if an
273 cli ; interrupt occurs while we're doing fxsave/fxrstor/cr0.
274
275 mov xAX, cr0
276 mov xCX, xAX ; save old CR0
277 and xAX, ~(X86_CR0_TS | X86_CR0_EM)
278 mov cr0, xAX
279
280%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
281 cmp byte [NAME(g_fCPUMIs64bitHost)], 0
282 jz .legacy_mode
283 db 0xea ; jmp far .sixtyfourbit_mode
284 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
285.legacy_mode:
286%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
287
288 fxrstor [xDX + CPUMCPU.Host.fpu]
289
290.done:
291 mov cr0, xCX ; and restore old CR0 again
292 and dword [xDX + CPUMCPU.fUseFlags], ~CPUM_USED_FPU
293 popf
294.fpu_not_used:
295 xor eax, eax
296 ret
297
298%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
299ALIGNCODE(16)
300BITS 64
301.sixtyfourbit_mode:
302 and edx, 0ffffffffh
303 fxrstor [rdx + CPUMCPU.Host.fpu]
304 jmp far [.fpret wrt rip]
305.fpret: ; 16:32 Pointer to .the_end.
306 dd .done, NAME(SUPR0AbsKernelCS)
307BITS 32
308%endif
309ENDPROC cpumR0RestoreHostFPUState
310
311
312%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
313;;
314; DECLASM(void) cpumR0SaveDRx(uint64_t *pa4Regs);
315;
316ALIGNCODE(16)
317BEGINPROC cpumR0SaveDRx
318%ifdef RT_ARCH_AMD64
319 %ifdef ASM_CALL64_GCC
320 mov xCX, rdi
321 %endif
322%else
323 mov xCX, dword [esp + 4]
324%endif
325 pushf ; Just to be on the safe side.
326 cli
327%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
328 cmp byte [NAME(g_fCPUMIs64bitHost)], 0
329 jz .legacy_mode
330 db 0xea ; jmp far .sixtyfourbit_mode
331 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
332.legacy_mode:
333%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
334
335 ;
336 ; Do the job.
337 ;
338 mov xAX, dr0
339 mov xDX, dr1
340 mov [xCX], xAX
341 mov [xCX + 8 * 1], xDX
342 mov xAX, dr2
343 mov xDX, dr3
344 mov [xCX + 8 * 2], xAX
345 mov [xCX + 8 * 3], xDX
346
347.done:
348 popf
349 ret
350
351%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
352ALIGNCODE(16)
353BITS 64
354.sixtyfourbit_mode:
355 and ecx, 0ffffffffh
356
357 mov rax, dr0
358 mov rdx, dr1
359 mov r8, dr2
360 mov r9, dr3
361 mov [rcx], rax
362 mov [rcx + 8 * 1], rdx
363 mov [rcx + 8 * 2], r8
364 mov [rcx + 8 * 3], r9
365 jmp far [.fpret wrt rip]
366.fpret: ; 16:32 Pointer to .the_end.
367 dd .done, NAME(SUPR0AbsKernelCS)
368BITS 32
369%endif
370ENDPROC cpumR0SaveDRx
371
372
373;;
374; DECLASM(void) cpumR0LoadDRx(uint64_t const *pa4Regs);
375;
376ALIGNCODE(16)
377BEGINPROC cpumR0LoadDRx
378%ifdef RT_ARCH_AMD64
379 %ifdef ASM_CALL64_GCC
380 mov xCX, rdi
381 %endif
382%else
383 mov xCX, dword [esp + 4]
384%endif
385 pushf ; Just to be on the safe side.
386 cli
387%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
388 cmp byte [NAME(g_fCPUMIs64bitHost)], 0
389 jz .legacy_mode
390 db 0xea ; jmp far .sixtyfourbit_mode
391 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
392.legacy_mode:
393%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
394
395 ;
396 ; Do the job.
397 ;
398 mov xAX, [xCX]
399 mov xDX, [xCX + 8 * 1]
400 mov dr0, xAX
401 mov dr1, xDX
402 mov xAX, [xCX + 8 * 2]
403 mov xDX, [xCX + 8 * 3]
404 mov dr2, xAX
405 mov dr3, xDX
406
407.done:
408 popf
409 ret
410
411%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
412ALIGNCODE(16)
413BITS 64
414.sixtyfourbit_mode:
415 and ecx, 0ffffffffh
416
417 mov rax, [rcx]
418 mov rdx, [rcx + 8 * 1]
419 mov r8, [rcx + 8 * 2]
420 mov r9, [rcx + 8 * 3]
421 mov dr0, rax
422 mov dr1, rdx
423 mov dr2, r8
424 mov dr3, r9
425 jmp far [.fpret wrt rip]
426.fpret: ; 16:32 Pointer to .the_end.
427 dd .done, NAME(SUPR0AbsKernelCS)
428BITS 32
429%endif
430ENDPROC cpumR0LoadDRx
431
432%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
433
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette