VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/CPUMR0A.asm@ 15416

Last change on this file since 15416 was 15416, checked in by vboxsync, 16 years ago

CPUM: hybrid 32-bit kernel FPU mess.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 14.3 KB
Line 
1; $Id: CPUMR0A.asm 15416 2008-12-13 05:31:06Z vboxsync $
2;; @file
3; CPUM - Guest Context Assembly Routines.
4;
5
6;
7; Copyright (C) 2006-2007 Sun Microsystems, Inc.
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.virtualbox.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License (GPL) as published by the Free Software
13; Foundation, in version 2 as it comes in the "COPYING" file of the
14; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17; Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18; Clara, CA 95054 USA or visit http://www.sun.com if you need
19; additional information or have any questions.
20;
21
22;*******************************************************************************
23;* Header Files *
24;*******************************************************************************
25%include "VBox/asmdefs.mac"
26%include "VBox/vm.mac"
27%include "VBox/err.mac"
28%include "VBox/stam.mac"
29%include "CPUMInternal.mac"
30%include "VBox/x86.mac"
31%include "VBox/cpum.mac"
32
33%ifdef IN_RING3
34 %error "The jump table doesn't link on leopard."
35%endif
36
37
38;*******************************************************************************
39;* External Symbols *
40;*******************************************************************************
41%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
42extern NAME(SUPR0AbsIs64bit)
43extern NAME(SUPR0Abs64bitKernelCS)
44extern NAME(SUPR0Abs64bitKernelSS)
45extern NAME(SUPR0Abs64bitKernelDS)
46extern NAME(SUPR0AbsKernelCS)
47%endif
48
49
50;*******************************************************************************
51;* Global Variables *
52;*******************************************************************************
53%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
54BEGINDATA
55;;
56; Store the SUPR0AbsIs64bit absolute value here so we can cmp/test without
57; needing to clobber a register. (This trick doesn't quite work for PE btw.
58; but that's not relevant atm.)
59GLOBALNAME g_fCPUMIs64bitHost
60 dd NAME(SUPR0AbsIs64bit)
61%endif
62
63
64BEGINCODE
65
66
67;;
68; Restores the host's FPU/XMM state
69;
70; @returns 0
71; @param pCPUMCPU x86:[esp+4] GCC:rdi MSC:rcx CPUMCPU pointer
72;
73align 16
74BEGINPROC cpumR0SaveGuestRestoreHostFPUState
75%ifdef RT_ARCH_AMD64
76 %ifdef RT_OS_WINDOWS
77 mov xDX, rcx
78 %else
79 mov xDX, rdi
80 %endif
81%else
82 mov xDX, dword [esp + 4]
83%endif
84
85 ; Restore FPU if guest has used it.
86 ; Using fxrstor should ensure that we're not causing unwanted exception on the host.
87 test dword [xDX + CPUMCPU.fUseFlags], CPUM_USED_FPU
88 jz short .fpu_not_used
89
90 mov xAX, cr0
91 mov xCX, xAX ; save old CR0
92 and xAX, ~(X86_CR0_TS | X86_CR0_EM)
93 mov cr0, xAX
94
95%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
96 cmp byte [NAME(g_fCPUMIs64bitHost)], 0
97 jz .legacy_mode
98 db 0xea ; jmp far .sixtyfourbit_mode
99 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
100.legacy_mode:
101%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
102
103 fxsave [xDX + CPUMCPU.Guest.fpu]
104 fxrstor [xDX + CPUMCPU.Host.fpu]
105
106.done:
107 mov cr0, xCX ; and restore old CR0 again
108 and dword [xDX + CPUMCPU.fUseFlags], ~CPUM_USED_FPU
109.fpu_not_used:
110 xor eax, eax
111 ret
112
113%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
114ALIGNCODE(16)
115BITS 64
116.sixtyfourbit_mode:
117 and edx, 0ffffffffh
118 fxsave [rdx + CPUMCPU.Guest.fpu]
119 fxrstor [rdx + CPUMCPU.Host.fpu]
120 jmp far [.fpret wrt rip]
121.fpret: ; 16:32 Pointer to .the_end.
122 dd .done, NAME(SUPR0AbsKernelCS)
123BITS 32
124%endif
125ENDPROC cpumR0SaveGuestRestoreHostFPUState
126
127;;
128; Sets the host's FPU/XMM state
129;
130; @returns 0
131; @param pCPUMCPU x86:[esp+4] GCC:rdi MSC:rcx CPUMCPU pointer
132;
133align 16
134BEGINPROC cpumR0RestoreHostFPUState
135%ifdef RT_ARCH_AMD64
136 %ifdef RT_OS_WINDOWS
137 mov xDX, rcx
138 %else
139 mov xDX, rdi
140 %endif
141%else
142 mov xDX, dword [esp + 4]
143%endif
144
145 ; Restore FPU if guest has used it.
146 ; Using fxrstor should ensure that we're not causing unwanted exception on the host.
147 test dword [xDX + CPUMCPU.fUseFlags], CPUM_USED_FPU
148 jz short .fpu_not_used
149
150 mov xAX, cr0
151 mov xCX, xAX ; save old CR0
152 and xAX, ~(X86_CR0_TS | X86_CR0_EM)
153 mov cr0, xAX
154
155%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
156 cmp byte [NAME(g_fCPUMIs64bitHost)], 0
157 jz .legacy_mode
158 db 0xea ; jmp far .sixtyfourbit_mode
159 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
160.legacy_mode:
161%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
162
163 fxrstor [xDX + CPUMCPU.Host.fpu]
164
165.done:
166 mov cr0, xCX ; and restore old CR0 again
167 and dword [xDX + CPUMCPU.fUseFlags], ~CPUM_USED_FPU
168.fpu_not_used:
169 xor eax, eax
170 ret
171
172%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
173ALIGNCODE(16)
174BITS 64
175.sixtyfourbit_mode:
176 and edx, 0ffffffffh
177 fxrstor [rdx + CPUMCPU.Host.fpu]
178 jmp far [.fpret wrt rip]
179.fpret: ; 16:32 Pointer to .the_end.
180 dd .done, NAME(SUPR0AbsKernelCS)
181BITS 32
182%endif
183ENDPROC cpumR0RestoreHostFPUState
184
185
186;;
187; Restores the guest's FPU/XMM state
188;
189; @param pCtx x86:[esp+4] GCC:rdi MSC:rcx CPUMCTX pointer
190;
191align 16
192BEGINPROC CPUMLoadFPU
193%ifdef RT_ARCH_AMD64
194 %ifdef RT_OS_WINDOWS
195 mov xDX, rcx
196 %else
197 mov xDX, rdi
198 %endif
199%else
200 mov xDX, dword [esp + 4]
201%endif
202%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
203 cmp byte [NAME(g_fCPUMIs64bitHost)], 0
204 jz .legacy_mode
205 db 0xea ; jmp far .sixtyfourbit_mode
206 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
207.legacy_mode:
208%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
209
210 fxrstor [xDX + CPUMCTX.fpu]
211.done:
212 ret
213
214%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
215ALIGNCODE(16)
216BITS 64
217.sixtyfourbit_mode:
218 and edx, 0ffffffffh
219 fxrstor [rdx + CPUMCTX.fpu]
220 jmp far [.fpret wrt rip]
221.fpret: ; 16:32 Pointer to .the_end.
222 dd .done, NAME(SUPR0AbsKernelCS)
223BITS 32
224%endif
225ENDPROC CPUMLoadFPU
226
227
228;;
229; Restores the guest's FPU/XMM state
230;
231; @param pCtx x86:[esp+4] GCC:rdi MSC:rcx CPUMCTX pointer
232;
233align 16
234BEGINPROC CPUMSaveFPU
235%ifdef RT_ARCH_AMD64
236 %ifdef RT_OS_WINDOWS
237 mov xDX, rcx
238 %else
239 mov xDX, rdi
240 %endif
241%else
242 mov xDX, dword [esp + 4]
243%endif
244%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
245 cmp byte [NAME(g_fCPUMIs64bitHost)], 0
246 jz .legacy_mode
247 db 0xea ; jmp far .sixtyfourbit_mode
248 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
249.legacy_mode:
250%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
251 fxsave [xDX + CPUMCTX.fpu]
252.done:
253 ret
254
255%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
256ALIGNCODE(16)
257BITS 64
258.sixtyfourbit_mode:
259 and edx, 0ffffffffh
260 fxsave [rdx + CPUMCTX.fpu]
261 jmp far [.fpret wrt rip]
262.fpret: ; 16:32 Pointer to .the_end.
263 dd .done, NAME(SUPR0AbsKernelCS)
264BITS 32
265%endif
266ENDPROC CPUMSaveFPU
267
268
269;;
270; Restores the guest's XMM state
271;
272; @param pCtx x86:[esp+4] GCC:rdi MSC:rcx CPUMCTX pointer
273;
274align 16
275BEGINPROC CPUMLoadXMM
276%ifdef RT_ARCH_AMD64
277 %ifdef RT_OS_WINDOWS
278 mov xDX, rcx
279 %else
280 mov xDX, rdi
281 %endif
282%else
283 mov xDX, dword [esp + 4]
284%endif
285%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
286 cmp byte [NAME(g_fCPUMIs64bitHost)], 0
287 jz .legacy_mode
288 db 0xea ; jmp far .sixtyfourbit_mode
289 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
290.legacy_mode:
291%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
292
293 movdqa xmm0, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*0]
294 movdqa xmm1, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*1]
295 movdqa xmm2, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*2]
296 movdqa xmm3, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*3]
297 movdqa xmm4, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*4]
298 movdqa xmm5, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*5]
299 movdqa xmm6, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*6]
300 movdqa xmm7, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*7]
301
302%ifdef RT_ARCH_AMD64
303 test qword [xDX + CPUMCTX.msrEFER], MSR_K6_EFER_LMA
304 jz .done
305
306 movdqa xmm8, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*8]
307 movdqa xmm9, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*9]
308 movdqa xmm10, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*10]
309 movdqa xmm11, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*11]
310 movdqa xmm12, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*12]
311 movdqa xmm13, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*13]
312 movdqa xmm14, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*14]
313 movdqa xmm15, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*15]
314%endif
315.done:
316
317 ret
318
319%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
320ALIGNCODE(16)
321BITS 64
322.sixtyfourbit_mode:
323 and edx, 0ffffffffh
324
325 movdqa xmm0, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*0]
326 movdqa xmm1, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*1]
327 movdqa xmm2, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*2]
328 movdqa xmm3, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*3]
329 movdqa xmm4, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*4]
330 movdqa xmm5, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*5]
331 movdqa xmm6, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*6]
332 movdqa xmm7, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*7]
333
334 test qword [rdx + CPUMCTX.msrEFER], MSR_K6_EFER_LMA
335 jz .sixtyfourbit_done
336
337 movdqa xmm8, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*8]
338 movdqa xmm9, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*9]
339 movdqa xmm10, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*10]
340 movdqa xmm11, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*11]
341 movdqa xmm12, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*12]
342 movdqa xmm13, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*13]
343 movdqa xmm14, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*14]
344 movdqa xmm15, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*15]
345.sixtyfourbit_done:
346 jmp far [.fpret wrt rip]
347.fpret: ; 16:32 Pointer to .the_end.
348 dd .done, NAME(SUPR0AbsKernelCS)
349BITS 32
350%endif
351ENDPROC CPUMLoadXMM
352
353
354;;
355; Restores the guest's XMM state
356;
357; @param pCtx x86:[esp+4] GCC:rdi MSC:rcx CPUMCTX pointer
358;
359align 16
360BEGINPROC CPUMSaveXMM
361%ifdef RT_ARCH_AMD64
362 %ifdef RT_OS_WINDOWS
363 mov xDX, rcx
364 %else
365 mov xDX, rdi
366 %endif
367%else
368 mov xDX, dword [esp + 4]
369%endif
370%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
371 cmp byte [NAME(g_fCPUMIs64bitHost)], 0
372 jz .legacy_mode
373 db 0xea ; jmp far .sixtyfourbit_mode
374 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
375.legacy_mode:
376%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
377
378 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*0], xmm0
379 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*1], xmm1
380 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*2], xmm2
381 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*3], xmm3
382 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*4], xmm4
383 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*5], xmm5
384 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*6], xmm6
385 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*7], xmm7
386
387%ifdef RT_ARCH_AMD64
388 test qword [xDX + CPUMCTX.msrEFER], MSR_K6_EFER_LMA
389 jz .done
390
391 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*8], xmm8
392 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*9], xmm9
393 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*10], xmm10
394 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*11], xmm11
395 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*12], xmm12
396 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*13], xmm13
397 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*14], xmm14
398 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*15], xmm15
399
400%endif
401.done:
402 ret
403
404%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
405ALIGNCODE(16)
406BITS 64
407.sixtyfourbit_mode:
408 and edx, 0ffffffffh
409
410 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*0], xmm0
411 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*1], xmm1
412 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*2], xmm2
413 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*3], xmm3
414 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*4], xmm4
415 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*5], xmm5
416 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*6], xmm6
417 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*7], xmm7
418
419 test qword [rdx + CPUMCTX.msrEFER], MSR_K6_EFER_LMA
420 jz .sixtyfourbit_done
421
422 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*8], xmm8
423 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*9], xmm9
424 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*10], xmm10
425 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*11], xmm11
426 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*12], xmm12
427 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*13], xmm13
428 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*14], xmm14
429 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*15], xmm15
430
431.sixtyfourbit_done:
432 jmp far [.fpret wrt rip]
433.fpret: ; 16:32 Pointer to .the_end.
434 dd .done, NAME(SUPR0AbsKernelCS)
435BITS 32
436%endif
437
438ENDPROC CPUMSaveXMM
439
440
441;;
442; Set the FPU control word; clearing exceptions first
443;
444; @param u16FCW x86:[esp+4] GCC:rdi MSC:rcx New FPU control word
445align 16
446BEGINPROC cpumR0SetFCW
447%ifdef RT_ARCH_AMD64
448 %ifdef RT_OS_WINDOWS
449 mov xAX, rcx
450 %else
451 mov xAX, rdi
452 %endif
453%else
454 mov xAX, dword [esp + 4]
455%endif
456 fnclex
457 push xAX
458 fldcw [xSP]
459 pop xAX
460 ret
461ENDPROC cpumR0SetFCW
462
463
464;;
465; Get the FPU control word
466;
467align 16
468BEGINPROC cpumR0GetFCW
469 fnstcw [xSP - 8]
470 mov ax, word [xSP - 8]
471 ret
472ENDPROC cpumR0GetFCW
473
474
475;;
476; Set the MXCSR;
477;
478; @param u32MXCSR x86:[esp+4] GCC:rdi MSC:rcx New MXCSR
479align 16
480BEGINPROC cpumR0SetMXCSR
481%ifdef RT_ARCH_AMD64
482 %ifdef RT_OS_WINDOWS
483 mov xAX, rcx
484 %else
485 mov xAX, rdi
486 %endif
487%else
488 mov xAX, dword [esp + 4]
489%endif
490 push xAX
491 ldmxcsr [xSP]
492 pop xAX
493 ret
494ENDPROC cpumR0SetMXCSR
495
496
497;;
498; Get the MXCSR
499;
500align 16
501BEGINPROC cpumR0GetMXCSR
502 stmxcsr [xSP - 8]
503 mov eax, dword [xSP - 8]
504 ret
505ENDPROC cpumR0GetMXCSR
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette