VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/CPUMR0A.asm@ 16113

Last change on this file since 16113 was 16113, checked in by vboxsync, 16 years ago

Save the host FPU/XMM state for the 32/64 case.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 19.7 KB
Line 
1; $Id: CPUMR0A.asm 16113 2009-01-21 09:08:29Z vboxsync $
2;; @file
3; CPUM - Guest Context Assembly Routines.
4;
5
6;
7; Copyright (C) 2006-2007 Sun Microsystems, Inc.
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.virtualbox.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License (GPL) as published by the Free Software
13; Foundation, in version 2 as it comes in the "COPYING" file of the
14; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17; Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18; Clara, CA 95054 USA or visit http://www.sun.com if you need
19; additional information or have any questions.
20;
21
22;*******************************************************************************
23;* Header Files *
24;*******************************************************************************
25%include "VBox/asmdefs.mac"
26%include "VBox/vm.mac"
27%include "VBox/err.mac"
28%include "VBox/stam.mac"
29%include "CPUMInternal.mac"
30%include "VBox/x86.mac"
31%include "VBox/cpum.mac"
32
33%ifdef IN_RING3
34 %error "The jump table doesn't link on leopard."
35%endif
36
37
38;*******************************************************************************
39;* External Symbols *
40;*******************************************************************************
41%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
42extern NAME(SUPR0AbsIs64bit)
43extern NAME(SUPR0Abs64bitKernelCS)
44extern NAME(SUPR0Abs64bitKernelSS)
45extern NAME(SUPR0Abs64bitKernelDS)
46extern NAME(SUPR0AbsKernelCS)
47%endif
48
49
50;*******************************************************************************
51;* Global Variables *
52;*******************************************************************************
53%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
54BEGINDATA
55;;
56; Store the SUPR0AbsIs64bit absolute value here so we can cmp/test without
57; needing to clobber a register. (This trick doesn't quite work for PE btw.
58; but that's not relevant atm.)
59GLOBALNAME g_fCPUMIs64bitHost
60 dd NAME(SUPR0AbsIs64bit)
61%endif
62
63
64BEGINCODE
65
66
67;;
68; Saves the host FPU/XMM state and restores the guest state.
69;
70; @returns 0
71; @param pCPUMCPU x86:[esp+4] GCC:rdi MSC:rcx CPUMCPU pointer
72;
73align 16
74BEGINPROC cpumR0SaveHostRestoreGuestFPUState
75%ifdef RT_ARCH_AMD64
76 %ifdef RT_OS_WINDOWS
77 mov xDX, rcx
78 %else
79 mov xDX, rdi
80 %endif
81%else
82 mov xDX, dword [esp + 4]
83%endif
84
85 ; Switch the state.
86 or dword [xDX + CPUMCPU.fUseFlags], (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM)
87
88 mov xAX, cr0 ; Make sure its safe to access the FPU state.
89 mov xCX, xAX ; save old CR0
90 and xAX, ~(X86_CR0_TS | X86_CR0_EM)
91 mov cr0, xAX ;; @todo optimize this.
92
93%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
94 cmp byte [NAME(g_fCPUMIs64bitHost)], 0
95 jz .legacy_mode
96 db 0xea ; jmp far .sixtyfourbit_mode
97 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
98.legacy_mode:
99%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
100
101 fxsave [xDX + CPUMCPU.Host.fpu] ; ASSUMES that all VT-x/AMD-V boxes sports fxsave/fxrstor (safe assumption)
102 fxrstor [xDX + CPUMCPU.Guest.fpu]
103
104.done:
105 mov cr0, xCX ; and restore old CR0 again ;; @todo optimize this.
106.fpu_not_used:
107 xor eax, eax
108 ret
109
110%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
111ALIGNCODE(16)
112BITS 64
113.sixtyfourbit_mode:
114 and edx, 0ffffffffh
115 fxsave [rdx + CPUMCPU.Host.fpu]
116 fxrstor [rdx + CPUMCPU.Guest.fpu]
117 jmp far [.fpret wrt rip]
118.fpret: ; 16:32 Pointer to .the_end.
119 dd .done, NAME(SUPR0AbsKernelCS)
120BITS 32
121%endif
122ENDPROC cpumR0SaveHostRestoreGuestFPUState
123
124%ifndef RT_ARCH_AMD64
125%ifdef VBOX_WITH_64_BITS_GUESTS
126%ifndef VBOX_WITH_HYBRID_32BIT_KERNEL
127;;
128; Saves the host FPU/XMM state
129;
130; @returns 0
131; @param pCPUMCPU x86:[esp+4] GCC:rdi MSC:rcx CPUMCPU pointer
132;
133align 16
134BEGINPROC cpumR0SaveHostFPUState
135 mov xDX, dword [esp + 4]
136
137 ; Switch the state.
138 or dword [xDX + CPUMCPU.fUseFlags], (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM)
139
140 mov xAX, cr0 ; Make sure its safe to access the FPU state.
141 mov xCX, xAX ; save old CR0
142 and xAX, ~(X86_CR0_TS | X86_CR0_EM)
143 mov cr0, xAX ;; @todo optimize this.
144
145 fxsave [xDX + CPUMCPU.Host.fpu] ; ASSUMES that all VT-x/AMD-V boxes sports fxsave/fxrstor (safe assumption)
146
147 mov cr0, xCX ; and restore old CR0 again ;; @todo optimize this.
148 xor eax, eax
149 ret
150ENDPROC cpumR0SaveHostFPUState
151%endif
152%endif
153%endif
154
155;;
156; Saves the guest FPU/XMM state and restores the host state.
157;
158; @returns 0
159; @param pCPUMCPU x86:[esp+4] GCC:rdi MSC:rcx CPUMCPU pointer
160;
161align 16
162BEGINPROC cpumR0SaveGuestRestoreHostFPUState
163%ifdef RT_ARCH_AMD64
164 %ifdef RT_OS_WINDOWS
165 mov xDX, rcx
166 %else
167 mov xDX, rdi
168 %endif
169%else
170 mov xDX, dword [esp + 4]
171%endif
172
173 ; Only restore FPU if guest has used it.
174 ; Using fxrstor should ensure that we're not causing unwanted exception on the host.
175 test dword [xDX + CPUMCPU.fUseFlags], CPUM_USED_FPU
176 jz short .fpu_not_used
177
178 mov xAX, cr0 ; Make sure it's safe to access the FPU state.
179 mov xCX, xAX ; save old CR0
180 and xAX, ~(X86_CR0_TS | X86_CR0_EM)
181 mov cr0, xAX ;; @todo optimize this.
182
183%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
184 cmp byte [NAME(g_fCPUMIs64bitHost)], 0
185 jz .legacy_mode
186 db 0xea ; jmp far .sixtyfourbit_mode
187 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
188.legacy_mode:
189%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
190
191 fxsave [xDX + CPUMCPU.Guest.fpu] ; ASSUMES that all VT-x/AMD-V boxes sports fxsave/fxrstor (safe assumption)
192 fxrstor [xDX + CPUMCPU.Host.fpu]
193
194.done:
195 mov cr0, xCX ; and restore old CR0 again ;; @todo optimize this.
196 and dword [xDX + CPUMCPU.fUseFlags], ~CPUM_USED_FPU
197.fpu_not_used:
198 xor eax, eax
199 ret
200
201%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
202ALIGNCODE(16)
203BITS 64
204.sixtyfourbit_mode:
205 and edx, 0ffffffffh
206 fxsave [rdx + CPUMCPU.Guest.fpu]
207 fxrstor [rdx + CPUMCPU.Host.fpu]
208 jmp far [.fpret wrt rip]
209.fpret: ; 16:32 Pointer to .the_end.
210 dd .done, NAME(SUPR0AbsKernelCS)
211BITS 32
212%endif
213ENDPROC cpumR0SaveGuestRestoreHostFPUState
214
215
216;;
217; Sets the host's FPU/XMM state
218;
219; @returns 0
220; @param pCPUMCPU x86:[esp+4] GCC:rdi MSC:rcx CPUMCPU pointer
221;
222align 16
223BEGINPROC cpumR0RestoreHostFPUState
224%ifdef RT_ARCH_AMD64
225 %ifdef RT_OS_WINDOWS
226 mov xDX, rcx
227 %else
228 mov xDX, rdi
229 %endif
230%else
231 mov xDX, dword [esp + 4]
232%endif
233
234 ; Restore FPU if guest has used it.
235 ; Using fxrstor should ensure that we're not causing unwanted exception on the host.
236 test dword [xDX + CPUMCPU.fUseFlags], CPUM_USED_FPU
237 jz short .fpu_not_used
238
239 mov xAX, cr0
240 mov xCX, xAX ; save old CR0
241 and xAX, ~(X86_CR0_TS | X86_CR0_EM)
242 mov cr0, xAX
243
244%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
245 cmp byte [NAME(g_fCPUMIs64bitHost)], 0
246 jz .legacy_mode
247 db 0xea ; jmp far .sixtyfourbit_mode
248 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
249.legacy_mode:
250%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
251
252 fxrstor [xDX + CPUMCPU.Host.fpu]
253
254.done:
255 mov cr0, xCX ; and restore old CR0 again
256 and dword [xDX + CPUMCPU.fUseFlags], ~CPUM_USED_FPU
257.fpu_not_used:
258 xor eax, eax
259 ret
260
261%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
262ALIGNCODE(16)
263BITS 64
264.sixtyfourbit_mode:
265 and edx, 0ffffffffh
266 fxrstor [rdx + CPUMCPU.Host.fpu]
267 jmp far [.fpret wrt rip]
268.fpret: ; 16:32 Pointer to .the_end.
269 dd .done, NAME(SUPR0AbsKernelCS)
270BITS 32
271%endif
272ENDPROC cpumR0RestoreHostFPUState
273
274
275;;
276; Restores the guest's FPU/XMM state
277;
278; @param pCtx x86:[esp+4] GCC:rdi MSC:rcx CPUMCTX pointer
279;
280align 16
281BEGINPROC CPUMLoadFPU
282%ifdef RT_ARCH_AMD64
283 %ifdef RT_OS_WINDOWS
284 mov xDX, rcx
285 %else
286 mov xDX, rdi
287 %endif
288%else
289 mov xDX, dword [esp + 4]
290%endif
291%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
292 cmp byte [NAME(g_fCPUMIs64bitHost)], 0
293 jz .legacy_mode
294 db 0xea ; jmp far .sixtyfourbit_mode
295 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
296.legacy_mode:
297%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
298
299 fxrstor [xDX + CPUMCTX.fpu]
300.done:
301 ret
302
303%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
304ALIGNCODE(16)
305BITS 64
306.sixtyfourbit_mode:
307 and edx, 0ffffffffh
308 fxrstor [rdx + CPUMCTX.fpu]
309 jmp far [.fpret wrt rip]
310.fpret: ; 16:32 Pointer to .the_end.
311 dd .done, NAME(SUPR0AbsKernelCS)
312BITS 32
313%endif
314ENDPROC CPUMLoadFPU
315
316
317;;
318; Restores the guest's FPU/XMM state
319;
320; @param pCtx x86:[esp+4] GCC:rdi MSC:rcx CPUMCTX pointer
321;
322align 16
323BEGINPROC CPUMSaveFPU
324%ifdef RT_ARCH_AMD64
325 %ifdef RT_OS_WINDOWS
326 mov xDX, rcx
327 %else
328 mov xDX, rdi
329 %endif
330%else
331 mov xDX, dword [esp + 4]
332%endif
333%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
334 cmp byte [NAME(g_fCPUMIs64bitHost)], 0
335 jz .legacy_mode
336 db 0xea ; jmp far .sixtyfourbit_mode
337 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
338.legacy_mode:
339%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
340 fxsave [xDX + CPUMCTX.fpu]
341.done:
342 ret
343
344%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
345ALIGNCODE(16)
346BITS 64
347.sixtyfourbit_mode:
348 and edx, 0ffffffffh
349 fxsave [rdx + CPUMCTX.fpu]
350 jmp far [.fpret wrt rip]
351.fpret: ; 16:32 Pointer to .the_end.
352 dd .done, NAME(SUPR0AbsKernelCS)
353BITS 32
354%endif
355ENDPROC CPUMSaveFPU
356
357
358;;
359; Restores the guest's XMM state
360;
361; @param pCtx x86:[esp+4] GCC:rdi MSC:rcx CPUMCTX pointer
362;
363align 16
364BEGINPROC CPUMLoadXMM
365%ifdef RT_ARCH_AMD64
366 %ifdef RT_OS_WINDOWS
367 mov xDX, rcx
368 %else
369 mov xDX, rdi
370 %endif
371%else
372 mov xDX, dword [esp + 4]
373%endif
374%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
375 cmp byte [NAME(g_fCPUMIs64bitHost)], 0
376 jz .legacy_mode
377 db 0xea ; jmp far .sixtyfourbit_mode
378 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
379.legacy_mode:
380%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
381
382 movdqa xmm0, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*0]
383 movdqa xmm1, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*1]
384 movdqa xmm2, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*2]
385 movdqa xmm3, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*3]
386 movdqa xmm4, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*4]
387 movdqa xmm5, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*5]
388 movdqa xmm6, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*6]
389 movdqa xmm7, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*7]
390
391%ifdef RT_ARCH_AMD64
392 test qword [xDX + CPUMCTX.msrEFER], MSR_K6_EFER_LMA
393 jz .done
394
395 movdqa xmm8, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*8]
396 movdqa xmm9, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*9]
397 movdqa xmm10, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*10]
398 movdqa xmm11, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*11]
399 movdqa xmm12, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*12]
400 movdqa xmm13, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*13]
401 movdqa xmm14, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*14]
402 movdqa xmm15, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*15]
403%endif
404.done:
405
406 ret
407
408%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
409ALIGNCODE(16)
410BITS 64
411.sixtyfourbit_mode:
412 and edx, 0ffffffffh
413
414 movdqa xmm0, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*0]
415 movdqa xmm1, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*1]
416 movdqa xmm2, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*2]
417 movdqa xmm3, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*3]
418 movdqa xmm4, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*4]
419 movdqa xmm5, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*5]
420 movdqa xmm6, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*6]
421 movdqa xmm7, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*7]
422
423 test qword [rdx + CPUMCTX.msrEFER], MSR_K6_EFER_LMA
424 jz .sixtyfourbit_done
425
426 movdqa xmm8, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*8]
427 movdqa xmm9, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*9]
428 movdqa xmm10, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*10]
429 movdqa xmm11, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*11]
430 movdqa xmm12, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*12]
431 movdqa xmm13, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*13]
432 movdqa xmm14, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*14]
433 movdqa xmm15, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*15]
434.sixtyfourbit_done:
435 jmp far [.fpret wrt rip]
436.fpret: ; 16:32 Pointer to .the_end.
437 dd .done, NAME(SUPR0AbsKernelCS)
438BITS 32
439%endif
440ENDPROC CPUMLoadXMM
441
442
443;;
444; Restores the guest's XMM state
445;
446; @param pCtx x86:[esp+4] GCC:rdi MSC:rcx CPUMCTX pointer
447;
448align 16
449BEGINPROC CPUMSaveXMM
450%ifdef RT_ARCH_AMD64
451 %ifdef RT_OS_WINDOWS
452 mov xDX, rcx
453 %else
454 mov xDX, rdi
455 %endif
456%else
457 mov xDX, dword [esp + 4]
458%endif
459%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
460 cmp byte [NAME(g_fCPUMIs64bitHost)], 0
461 jz .legacy_mode
462 db 0xea ; jmp far .sixtyfourbit_mode
463 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
464.legacy_mode:
465%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
466
467 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*0], xmm0
468 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*1], xmm1
469 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*2], xmm2
470 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*3], xmm3
471 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*4], xmm4
472 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*5], xmm5
473 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*6], xmm6
474 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*7], xmm7
475
476%ifdef RT_ARCH_AMD64
477 test qword [xDX + CPUMCTX.msrEFER], MSR_K6_EFER_LMA
478 jz .done
479
480 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*8], xmm8
481 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*9], xmm9
482 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*10], xmm10
483 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*11], xmm11
484 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*12], xmm12
485 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*13], xmm13
486 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*14], xmm14
487 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*15], xmm15
488
489%endif
490.done:
491 ret
492
493%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
494ALIGNCODE(16)
495BITS 64
496.sixtyfourbit_mode:
497 and edx, 0ffffffffh
498
499 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*0], xmm0
500 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*1], xmm1
501 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*2], xmm2
502 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*3], xmm3
503 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*4], xmm4
504 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*5], xmm5
505 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*6], xmm6
506 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*7], xmm7
507
508 test qword [rdx + CPUMCTX.msrEFER], MSR_K6_EFER_LMA
509 jz .sixtyfourbit_done
510
511 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*8], xmm8
512 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*9], xmm9
513 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*10], xmm10
514 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*11], xmm11
515 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*12], xmm12
516 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*13], xmm13
517 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*14], xmm14
518 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*15], xmm15
519
520.sixtyfourbit_done:
521 jmp far [.fpret wrt rip]
522.fpret: ; 16:32 Pointer to .the_end.
523 dd .done, NAME(SUPR0AbsKernelCS)
524BITS 32
525%endif
526ENDPROC CPUMSaveXMM
527
528
529;;
530; Set the FPU control word; clearing exceptions first
531;
532; @param u16FCW x86:[esp+4] GCC:rdi MSC:rcx New FPU control word
533align 16
534BEGINPROC cpumR0SetFCW
535%ifdef RT_ARCH_AMD64
536 %ifdef RT_OS_WINDOWS
537 mov xAX, rcx
538 %else
539 mov xAX, rdi
540 %endif
541%else
542 mov xAX, dword [esp + 4]
543%endif
544 fnclex
545 push xAX
546 fldcw [xSP]
547 pop xAX
548 ret
549ENDPROC cpumR0SetFCW
550
551
552;;
553; Get the FPU control word
554;
555align 16
556BEGINPROC cpumR0GetFCW
557 fnstcw [xSP - 8]
558 mov ax, word [xSP - 8]
559 ret
560ENDPROC cpumR0GetFCW
561
562
563;;
564; Set the MXCSR;
565;
566; @param u32MXCSR x86:[esp+4] GCC:rdi MSC:rcx New MXCSR
567align 16
568BEGINPROC cpumR0SetMXCSR
569%ifdef RT_ARCH_AMD64
570 %ifdef RT_OS_WINDOWS
571 mov xAX, rcx
572 %else
573 mov xAX, rdi
574 %endif
575%else
576 mov xAX, dword [esp + 4]
577%endif
578 push xAX
579 ldmxcsr [xSP]
580 pop xAX
581 ret
582ENDPROC cpumR0SetMXCSR
583
584
585;;
586; Get the MXCSR
587;
588align 16
589BEGINPROC cpumR0GetMXCSR
590 stmxcsr [xSP - 8]
591 mov eax, dword [xSP - 8]
592 ret
593ENDPROC cpumR0GetMXCSR
594
595
596%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
597;;
598; DECLASM(void) cpumR0SaveDRx(uint64_t *pa4Regs);
599;
600ALIGNCODE(16)
601BEGINPROC cpumR0SaveDRx
602%ifdef RT_ARCH_AMD64
603 %ifdef ASM_CALL64_GCC
604 mov xCX, rdi
605 %endif
606%else
607 mov xCX, dword [esp + 4]
608%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
609 cmp byte [NAME(g_fCPUMIs64bitHost)], 0
610 jz .legacy_mode
611 db 0xea ; jmp far .sixtyfourbit_mode
612 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
613.legacy_mode:
614%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
615%endif
616
617 ;
618 ; Do the job.
619 ;
620 mov xAX, dr0
621 mov xDX, dr1
622 mov [xCX], xAX
623 mov [xCX + 8 * 1], xDX
624 mov xAX, dr2
625 mov xDX, dr3
626 mov [xCX + 8 * 2], xAX
627 mov [xCX + 8 * 3], xDX
628
629.done:
630 ret
631
632%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
633ALIGNCODE(16)
634BITS 64
635.sixtyfourbit_mode:
636 and ecx, 0ffffffffh
637
638 mov rax, dr0
639 mov rdx, dr1
640 mov r8, dr2
641 mov r9, dr3
642 mov [rcx], rax
643 mov [rcx + 8 * 1], rdx
644 mov [rcx + 8 * 2], r8
645 mov [rcx + 8 * 3], r9
646 jmp far [.fpret wrt rip]
647.fpret: ; 16:32 Pointer to .the_end.
648 dd .done, NAME(SUPR0AbsKernelCS)
649BITS 32
650%endif
651ENDPROC cpumR0SaveDRx
652
653
654;;
655; DECLASM(void) cpumR0LoadDRx(uint64_t const *pa4Regs);
656;
657ALIGNCODE(16)
658BEGINPROC cpumR0LoadDRx
659%ifdef RT_ARCH_AMD64
660 %ifdef ASM_CALL64_GCC
661 mov xCX, rdi
662 %endif
663%else
664 mov xCX, dword [esp + 4]
665%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
666 cmp byte [NAME(g_fCPUMIs64bitHost)], 0
667 jz .legacy_mode
668 db 0xea ; jmp far .sixtyfourbit_mode
669 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
670.legacy_mode:
671%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
672%endif
673
674 ;
675 ; Do the job.
676 ;
677 mov xAX, [xCX]
678 mov xDX, [xCX + 8 * 1]
679 mov dr0, xAX
680 mov dr1, xDX
681 mov xAX, [xCX + 8 * 2]
682 mov xDX, [xCX + 8 * 3]
683 mov dr2, xAX
684 mov dr3, xDX
685
686.done:
687 ret
688
689%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
690ALIGNCODE(16)
691BITS 64
692.sixtyfourbit_mode:
693 and ecx, 0ffffffffh
694
695 mov rax, [rcx]
696 mov rdx, [rcx + 8 * 1]
697 mov r8, [rcx + 8 * 2]
698 mov r9, [rcx + 8 * 3]
699 mov dr0, rax
700 mov dr1, rdx
701 mov dr2, r8
702 mov dr3, r9
703 jmp far [.fpret wrt rip]
704.fpret: ; 16:32 Pointer to .the_end.
705 dd .done, NAME(SUPR0AbsKernelCS)
706BITS 32
707%endif
708ENDPROC cpumR0LoadDRx
709
710%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette