VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/CPUMR0A.asm@ 20535

Last change on this file since 20535 was 20535, checked in by vboxsync, 16 years ago

CPUMR0A.asm: must disable interrupt while messing with CR0 on darwin.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 20.2 KB
Line 
1; $Id: CPUMR0A.asm 20535 2009-06-13 20:58:29Z vboxsync $
2;; @file
3; CPUM - Guest Context Assembly Routines.
4;
5
6;
7; Copyright (C) 2006-2007 Sun Microsystems, Inc.
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.virtualbox.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License (GPL) as published by the Free Software
13; Foundation, in version 2 as it comes in the "COPYING" file of the
14; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17; Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18; Clara, CA 95054 USA or visit http://www.sun.com if you need
19; additional information or have any questions.
20;
21
22;*******************************************************************************
23;* Header Files *
24;*******************************************************************************
25%include "VBox/asmdefs.mac"
26%include "VBox/vm.mac"
27%include "VBox/err.mac"
28%include "VBox/stam.mac"
29%include "CPUMInternal.mac"
30%include "VBox/x86.mac"
31%include "VBox/cpum.mac"
32
33%ifdef IN_RING3
34 %error "The jump table doesn't link on leopard."
35%endif
36
37
38;*******************************************************************************
39;* External Symbols *
40;*******************************************************************************
41%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
42extern NAME(SUPR0AbsIs64bit)
43extern NAME(SUPR0Abs64bitKernelCS)
44extern NAME(SUPR0Abs64bitKernelSS)
45extern NAME(SUPR0Abs64bitKernelDS)
46extern NAME(SUPR0AbsKernelCS)
47%endif
48
49
50;*******************************************************************************
51;* Global Variables *
52;*******************************************************************************
53%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
54BEGINDATA
55;;
56; Store the SUPR0AbsIs64bit absolute value here so we can cmp/test without
57; needing to clobber a register. (This trick doesn't quite work for PE btw.
58; but that's not relevant atm.)
59GLOBALNAME g_fCPUMIs64bitHost
60 dd NAME(SUPR0AbsIs64bit)
61%endif
62
63
64BEGINCODE
65
66
67;;
68; Saves the host FPU/XMM state and restores the guest state.
69;
70; @returns 0
71; @param pCPUMCPU x86:[esp+4] GCC:rdi MSC:rcx CPUMCPU pointer
72;
73align 16
74BEGINPROC cpumR0SaveHostRestoreGuestFPUState
75%ifdef RT_ARCH_AMD64
76 %ifdef RT_OS_WINDOWS
77 mov xDX, rcx
78 %else
79 mov xDX, rdi
80 %endif
81%else
82 mov xDX, dword [esp + 4]
83%endif
84 pushf ; The darwin kernel can get upset or upset things if an
85 cli ; interrupt occurs while we're doing fxsave/fxrstor/cr0.
86
87 ; Switch the state.
88 or dword [xDX + CPUMCPU.fUseFlags], (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM)
89
90 mov xAX, cr0 ; Make sure its safe to access the FPU state.
91 mov xCX, xAX ; save old CR0
92 and xAX, ~(X86_CR0_TS | X86_CR0_EM)
93 mov cr0, xAX ;; @todo optimize this.
94
95%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
96 cmp byte [NAME(g_fCPUMIs64bitHost)], 0
97 jz .legacy_mode
98 db 0xea ; jmp far .sixtyfourbit_mode
99 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
100.legacy_mode:
101%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
102
103 fxsave [xDX + CPUMCPU.Host.fpu] ; ASSUMES that all VT-x/AMD-V boxes sports fxsave/fxrstor (safe assumption)
104 fxrstor [xDX + CPUMCPU.Guest.fpu]
105
106.done:
107 mov cr0, xCX ; and restore old CR0 again ;; @todo optimize this.
108 popf
109 xor eax, eax
110 ret
111
112%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
113ALIGNCODE(16)
114BITS 64
115.sixtyfourbit_mode:
116 and edx, 0ffffffffh
117 fxsave [rdx + CPUMCPU.Host.fpu]
118 fxrstor [rdx + CPUMCPU.Guest.fpu]
119 jmp far [.fpret wrt rip]
120.fpret: ; 16:32 Pointer to .the_end.
121 dd .done, NAME(SUPR0AbsKernelCS)
122BITS 32
123%endif
124ENDPROC cpumR0SaveHostRestoreGuestFPUState
125
126%ifndef RT_ARCH_AMD64
127%ifdef VBOX_WITH_64_BITS_GUESTS
128%ifndef VBOX_WITH_HYBRID_32BIT_KERNEL
129;;
130; Saves the host FPU/XMM state
131;
132; @returns 0
133; @param pCPUMCPU x86:[esp+4] GCC:rdi MSC:rcx CPUMCPU pointer
134;
135align 16
136BEGINPROC cpumR0SaveHostFPUState
137 mov xDX, dword [esp + 4]
138
139 ; Switch the state.
140 or dword [xDX + CPUMCPU.fUseFlags], (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM)
141
142 mov xAX, cr0 ; Make sure its safe to access the FPU state.
143 mov xCX, xAX ; save old CR0
144 and xAX, ~(X86_CR0_TS | X86_CR0_EM)
145 mov cr0, xAX ;; @todo optimize this.
146
147 fxsave [xDX + CPUMCPU.Host.fpu] ; ASSUMES that all VT-x/AMD-V boxes sports fxsave/fxrstor (safe assumption)
148
149 mov cr0, xCX ; and restore old CR0 again ;; @todo optimize this.
150 xor eax, eax
151 ret
152ENDPROC cpumR0SaveHostFPUState
153%endif
154%endif
155%endif
156
157;;
158; Saves the guest FPU/XMM state and restores the host state.
159;
160; @returns 0
161; @param pCPUMCPU x86:[esp+4] GCC:rdi MSC:rcx CPUMCPU pointer
162;
163align 16
164BEGINPROC cpumR0SaveGuestRestoreHostFPUState
165%ifdef RT_ARCH_AMD64
166 %ifdef RT_OS_WINDOWS
167 mov xDX, rcx
168 %else
169 mov xDX, rdi
170 %endif
171%else
172 mov xDX, dword [esp + 4]
173%endif
174
175 ; Only restore FPU if guest has used it.
176 ; Using fxrstor should ensure that we're not causing unwanted exception on the host.
177 test dword [xDX + CPUMCPU.fUseFlags], CPUM_USED_FPU
178 jz short .fpu_not_used
179
180 pushf ; The darwin kernel can get upset or upset things if an
181 cli ; interrupt occurs while we're doing fxsave/fxrstor/cr0.
182
183 mov xAX, cr0 ; Make sure it's safe to access the FPU state.
184 mov xCX, xAX ; save old CR0
185 and xAX, ~(X86_CR0_TS | X86_CR0_EM)
186 mov cr0, xAX ;; @todo optimize this.
187
188%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
189 cmp byte [NAME(g_fCPUMIs64bitHost)], 0
190 jz .legacy_mode
191 db 0xea ; jmp far .sixtyfourbit_mode
192 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
193.legacy_mode:
194%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
195
196 fxsave [xDX + CPUMCPU.Guest.fpu] ; ASSUMES that all VT-x/AMD-V boxes sports fxsave/fxrstor (safe assumption)
197 fxrstor [xDX + CPUMCPU.Host.fpu]
198
199.done:
200 mov cr0, xCX ; and restore old CR0 again ;; @todo optimize this.
201 and dword [xDX + CPUMCPU.fUseFlags], ~CPUM_USED_FPU
202 popf
203.fpu_not_used:
204 xor eax, eax
205 ret
206
207%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
208ALIGNCODE(16)
209BITS 64
210.sixtyfourbit_mode:
211 and edx, 0ffffffffh
212 fxsave [rdx + CPUMCPU.Guest.fpu]
213 fxrstor [rdx + CPUMCPU.Host.fpu]
214 jmp far [.fpret wrt rip]
215.fpret: ; 16:32 Pointer to .the_end.
216 dd .done, NAME(SUPR0AbsKernelCS)
217BITS 32
218%endif
219ENDPROC cpumR0SaveGuestRestoreHostFPUState
220
221
222;;
223; Sets the host's FPU/XMM state
224;
225; @returns 0
226; @param pCPUMCPU x86:[esp+4] GCC:rdi MSC:rcx CPUMCPU pointer
227;
228align 16
229BEGINPROC cpumR0RestoreHostFPUState
230%ifdef RT_ARCH_AMD64
231 %ifdef RT_OS_WINDOWS
232 mov xDX, rcx
233 %else
234 mov xDX, rdi
235 %endif
236%else
237 mov xDX, dword [esp + 4]
238%endif
239
240 ; Restore FPU if guest has used it.
241 ; Using fxrstor should ensure that we're not causing unwanted exception on the host.
242 test dword [xDX + CPUMCPU.fUseFlags], CPUM_USED_FPU
243 jz short .fpu_not_used
244
245 pushf ; The darwin kernel can get upset or upset things if an
246 cli ; interrupt occurs while we're doing fxsave/fxrstor/cr0.
247
248 mov xAX, cr0
249 mov xCX, xAX ; save old CR0
250 and xAX, ~(X86_CR0_TS | X86_CR0_EM)
251 mov cr0, xAX
252
253%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
254 cmp byte [NAME(g_fCPUMIs64bitHost)], 0
255 jz .legacy_mode
256 db 0xea ; jmp far .sixtyfourbit_mode
257 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
258.legacy_mode:
259%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
260
261 fxrstor [xDX + CPUMCPU.Host.fpu]
262
263.done:
264 mov cr0, xCX ; and restore old CR0 again
265 and dword [xDX + CPUMCPU.fUseFlags], ~CPUM_USED_FPU
266 popf
267.fpu_not_used:
268 xor eax, eax
269 ret
270
271%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
272ALIGNCODE(16)
273BITS 64
274.sixtyfourbit_mode:
275 and edx, 0ffffffffh
276 fxrstor [rdx + CPUMCPU.Host.fpu]
277 jmp far [.fpret wrt rip]
278.fpret: ; 16:32 Pointer to .the_end.
279 dd .done, NAME(SUPR0AbsKernelCS)
280BITS 32
281%endif
282ENDPROC cpumR0RestoreHostFPUState
283
284
285;;
286; Restores the guest's FPU/XMM state
287;
288; @param pCtx x86:[esp+4] GCC:rdi MSC:rcx CPUMCTX pointer
289;
290align 16
291BEGINPROC CPUMLoadFPU
292%ifdef RT_ARCH_AMD64
293 %ifdef RT_OS_WINDOWS
294 mov xDX, rcx
295 %else
296 mov xDX, rdi
297 %endif
298%else
299 mov xDX, dword [esp + 4]
300%endif
301%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
302 cmp byte [NAME(g_fCPUMIs64bitHost)], 0
303 jz .legacy_mode
304 db 0xea ; jmp far .sixtyfourbit_mode
305 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
306.legacy_mode:
307%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
308
309 fxrstor [xDX + CPUMCTX.fpu]
310.done:
311 ret
312
313%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
314ALIGNCODE(16)
315BITS 64
316.sixtyfourbit_mode:
317 and edx, 0ffffffffh
318 fxrstor [rdx + CPUMCTX.fpu]
319 jmp far [.fpret wrt rip]
320.fpret: ; 16:32 Pointer to .the_end.
321 dd .done, NAME(SUPR0AbsKernelCS)
322BITS 32
323%endif
324ENDPROC CPUMLoadFPU
325
326
327;;
328; Restores the guest's FPU/XMM state
329;
330; @param pCtx x86:[esp+4] GCC:rdi MSC:rcx CPUMCTX pointer
331;
332align 16
333BEGINPROC CPUMSaveFPU
334%ifdef RT_ARCH_AMD64
335 %ifdef RT_OS_WINDOWS
336 mov xDX, rcx
337 %else
338 mov xDX, rdi
339 %endif
340%else
341 mov xDX, dword [esp + 4]
342%endif
343%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
344 cmp byte [NAME(g_fCPUMIs64bitHost)], 0
345 jz .legacy_mode
346 db 0xea ; jmp far .sixtyfourbit_mode
347 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
348.legacy_mode:
349%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
350 fxsave [xDX + CPUMCTX.fpu]
351.done:
352 ret
353
354%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
355ALIGNCODE(16)
356BITS 64
357.sixtyfourbit_mode:
358 and edx, 0ffffffffh
359 fxsave [rdx + CPUMCTX.fpu]
360 jmp far [.fpret wrt rip]
361.fpret: ; 16:32 Pointer to .the_end.
362 dd .done, NAME(SUPR0AbsKernelCS)
363BITS 32
364%endif
365ENDPROC CPUMSaveFPU
366
367
368;;
369; Restores the guest's XMM state
370;
371; @param pCtx x86:[esp+4] GCC:rdi MSC:rcx CPUMCTX pointer
372;
373align 16
374BEGINPROC CPUMLoadXMM
375%ifdef RT_ARCH_AMD64
376 %ifdef RT_OS_WINDOWS
377 mov xDX, rcx
378 %else
379 mov xDX, rdi
380 %endif
381%else
382 mov xDX, dword [esp + 4]
383%endif
384%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
385 cmp byte [NAME(g_fCPUMIs64bitHost)], 0
386 jz .legacy_mode
387 db 0xea ; jmp far .sixtyfourbit_mode
388 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
389.legacy_mode:
390%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
391
392 movdqa xmm0, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*0]
393 movdqa xmm1, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*1]
394 movdqa xmm2, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*2]
395 movdqa xmm3, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*3]
396 movdqa xmm4, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*4]
397 movdqa xmm5, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*5]
398 movdqa xmm6, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*6]
399 movdqa xmm7, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*7]
400
401%ifdef RT_ARCH_AMD64
402 test qword [xDX + CPUMCTX.msrEFER], MSR_K6_EFER_LMA
403 jz .done
404
405 movdqa xmm8, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*8]
406 movdqa xmm9, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*9]
407 movdqa xmm10, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*10]
408 movdqa xmm11, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*11]
409 movdqa xmm12, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*12]
410 movdqa xmm13, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*13]
411 movdqa xmm14, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*14]
412 movdqa xmm15, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*15]
413%endif
414.done:
415
416 ret
417
418%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
419ALIGNCODE(16)
420BITS 64
421.sixtyfourbit_mode:
422 and edx, 0ffffffffh
423
424 movdqa xmm0, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*0]
425 movdqa xmm1, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*1]
426 movdqa xmm2, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*2]
427 movdqa xmm3, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*3]
428 movdqa xmm4, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*4]
429 movdqa xmm5, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*5]
430 movdqa xmm6, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*6]
431 movdqa xmm7, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*7]
432
433 test qword [rdx + CPUMCTX.msrEFER], MSR_K6_EFER_LMA
434 jz .sixtyfourbit_done
435
436 movdqa xmm8, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*8]
437 movdqa xmm9, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*9]
438 movdqa xmm10, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*10]
439 movdqa xmm11, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*11]
440 movdqa xmm12, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*12]
441 movdqa xmm13, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*13]
442 movdqa xmm14, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*14]
443 movdqa xmm15, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*15]
444.sixtyfourbit_done:
445 jmp far [.fpret wrt rip]
446.fpret: ; 16:32 Pointer to .the_end.
447 dd .done, NAME(SUPR0AbsKernelCS)
448BITS 32
449%endif
450ENDPROC CPUMLoadXMM
451
452
453;;
454; Restores the guest's XMM state
455;
456; @param pCtx x86:[esp+4] GCC:rdi MSC:rcx CPUMCTX pointer
457;
458align 16
459BEGINPROC CPUMSaveXMM
460%ifdef RT_ARCH_AMD64
461 %ifdef RT_OS_WINDOWS
462 mov xDX, rcx
463 %else
464 mov xDX, rdi
465 %endif
466%else
467 mov xDX, dword [esp + 4]
468%endif
469%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
470 cmp byte [NAME(g_fCPUMIs64bitHost)], 0
471 jz .legacy_mode
472 db 0xea ; jmp far .sixtyfourbit_mode
473 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
474.legacy_mode:
475%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
476
477 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*0], xmm0
478 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*1], xmm1
479 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*2], xmm2
480 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*3], xmm3
481 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*4], xmm4
482 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*5], xmm5
483 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*6], xmm6
484 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*7], xmm7
485
486%ifdef RT_ARCH_AMD64
487 test qword [xDX + CPUMCTX.msrEFER], MSR_K6_EFER_LMA
488 jz .done
489
490 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*8], xmm8
491 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*9], xmm9
492 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*10], xmm10
493 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*11], xmm11
494 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*12], xmm12
495 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*13], xmm13
496 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*14], xmm14
497 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*15], xmm15
498
499%endif
500.done:
501 ret
502
503%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
504ALIGNCODE(16)
505BITS 64
506.sixtyfourbit_mode:
507 and edx, 0ffffffffh
508
509 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*0], xmm0
510 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*1], xmm1
511 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*2], xmm2
512 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*3], xmm3
513 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*4], xmm4
514 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*5], xmm5
515 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*6], xmm6
516 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*7], xmm7
517
518 test qword [rdx + CPUMCTX.msrEFER], MSR_K6_EFER_LMA
519 jz .sixtyfourbit_done
520
521 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*8], xmm8
522 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*9], xmm9
523 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*10], xmm10
524 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*11], xmm11
525 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*12], xmm12
526 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*13], xmm13
527 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*14], xmm14
528 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*15], xmm15
529
530.sixtyfourbit_done:
531 jmp far [.fpret wrt rip]
532.fpret: ; 16:32 Pointer to .the_end.
533 dd .done, NAME(SUPR0AbsKernelCS)
534BITS 32
535%endif
536ENDPROC CPUMSaveXMM
537
538
539;;
540; Set the FPU control word; clearing exceptions first
541;
542; @param u16FCW x86:[esp+4] GCC:rdi MSC:rcx New FPU control word
543align 16
544BEGINPROC cpumR0SetFCW
545%ifdef RT_ARCH_AMD64
546 %ifdef RT_OS_WINDOWS
547 mov xAX, rcx
548 %else
549 mov xAX, rdi
550 %endif
551%else
552 mov xAX, dword [esp + 4]
553%endif
554 fnclex
555 push xAX
556 fldcw [xSP]
557 pop xAX
558 ret
559ENDPROC cpumR0SetFCW
560
561
562;;
563; Get the FPU control word
564;
565align 16
566BEGINPROC cpumR0GetFCW
567 fnstcw [xSP - 8]
568 mov ax, word [xSP - 8]
569 ret
570ENDPROC cpumR0GetFCW
571
572
573;;
574; Set the MXCSR;
575;
576; @param u32MXCSR x86:[esp+4] GCC:rdi MSC:rcx New MXCSR
577align 16
578BEGINPROC cpumR0SetMXCSR
579%ifdef RT_ARCH_AMD64
580 %ifdef RT_OS_WINDOWS
581 mov xAX, rcx
582 %else
583 mov xAX, rdi
584 %endif
585%else
586 mov xAX, dword [esp + 4]
587%endif
588 push xAX
589 ldmxcsr [xSP]
590 pop xAX
591 ret
592ENDPROC cpumR0SetMXCSR
593
594
595;;
596; Get the MXCSR
597;
598align 16
599BEGINPROC cpumR0GetMXCSR
600 stmxcsr [xSP - 8]
601 mov eax, dword [xSP - 8]
602 ret
603ENDPROC cpumR0GetMXCSR
604
605
606%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
607;;
608; DECLASM(void) cpumR0SaveDRx(uint64_t *pa4Regs);
609;
610ALIGNCODE(16)
611BEGINPROC cpumR0SaveDRx
612%ifdef RT_ARCH_AMD64
613 %ifdef ASM_CALL64_GCC
614 mov xCX, rdi
615 %endif
616%else
617 mov xCX, dword [esp + 4]
618%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
619 cmp byte [NAME(g_fCPUMIs64bitHost)], 0
620 jz .legacy_mode
621 db 0xea ; jmp far .sixtyfourbit_mode
622 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
623.legacy_mode:
624%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
625%endif
626
627 ;
628 ; Do the job.
629 ;
630 mov xAX, dr0
631 mov xDX, dr1
632 mov [xCX], xAX
633 mov [xCX + 8 * 1], xDX
634 mov xAX, dr2
635 mov xDX, dr3
636 mov [xCX + 8 * 2], xAX
637 mov [xCX + 8 * 3], xDX
638
639.done:
640 ret
641
642%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
643ALIGNCODE(16)
644BITS 64
645.sixtyfourbit_mode:
646 and ecx, 0ffffffffh
647
648 mov rax, dr0
649 mov rdx, dr1
650 mov r8, dr2
651 mov r9, dr3
652 mov [rcx], rax
653 mov [rcx + 8 * 1], rdx
654 mov [rcx + 8 * 2], r8
655 mov [rcx + 8 * 3], r9
656 jmp far [.fpret wrt rip]
657.fpret: ; 16:32 Pointer to .the_end.
658 dd .done, NAME(SUPR0AbsKernelCS)
659BITS 32
660%endif
661ENDPROC cpumR0SaveDRx
662
663
664;;
665; DECLASM(void) cpumR0LoadDRx(uint64_t const *pa4Regs);
666;
667ALIGNCODE(16)
668BEGINPROC cpumR0LoadDRx
669%ifdef RT_ARCH_AMD64
670 %ifdef ASM_CALL64_GCC
671 mov xCX, rdi
672 %endif
673%else
674 mov xCX, dword [esp + 4]
675%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
676 cmp byte [NAME(g_fCPUMIs64bitHost)], 0
677 jz .legacy_mode
678 db 0xea ; jmp far .sixtyfourbit_mode
679 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
680.legacy_mode:
681%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
682%endif
683
684 ;
685 ; Do the job.
686 ;
687 mov xAX, [xCX]
688 mov xDX, [xCX + 8 * 1]
689 mov dr0, xAX
690 mov dr1, xDX
691 mov xAX, [xCX + 8 * 2]
692 mov xDX, [xCX + 8 * 3]
693 mov dr2, xAX
694 mov dr3, xDX
695
696.done:
697 ret
698
699%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
700ALIGNCODE(16)
701BITS 64
702.sixtyfourbit_mode:
703 and ecx, 0ffffffffh
704
705 mov rax, [rcx]
706 mov rdx, [rcx + 8 * 1]
707 mov r8, [rcx + 8 * 2]
708 mov r9, [rcx + 8 * 3]
709 mov dr0, rax
710 mov dr1, rdx
711 mov dr2, r8
712 mov dr3, r9
713 jmp far [.fpret wrt rip]
714.fpret: ; 16:32 Pointer to .the_end.
715 dd .done, NAME(SUPR0AbsKernelCS)
716BITS 32
717%endif
718ENDPROC cpumR0LoadDRx
719
720%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette