VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/CPUMR0A.asm@ 20536

Last change on this file since 20536 was 20536, checked in by vboxsync, 15 years ago

CPUMR0A.asm: Name fixes and comments.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 20.6 KB
Line 
1; $Id: CPUMR0A.asm 20536 2009-06-13 21:06:54Z vboxsync $
2;; @file
3; CPUM - Guest Context Assembly Routines.
4;
5
6;
7; Copyright (C) 2006-2007 Sun Microsystems, Inc.
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.virtualbox.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License (GPL) as published by the Free Software
13; Foundation, in version 2 as it comes in the "COPYING" file of the
14; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17; Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18; Clara, CA 95054 USA or visit http://www.sun.com if you need
19; additional information or have any questions.
20;
21
22;*******************************************************************************
23;* Header Files *
24;*******************************************************************************
25%include "VBox/asmdefs.mac"
26%include "VBox/vm.mac"
27%include "VBox/err.mac"
28%include "VBox/stam.mac"
29%include "CPUMInternal.mac"
30%include "VBox/x86.mac"
31%include "VBox/cpum.mac"
32
33%ifdef IN_RING3
34 %error "The jump table doesn't link on leopard."
35%endif
36
37
38;*******************************************************************************
39;* External Symbols *
40;*******************************************************************************
41%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
42extern NAME(SUPR0AbsIs64bit)
43extern NAME(SUPR0Abs64bitKernelCS)
44extern NAME(SUPR0Abs64bitKernelSS)
45extern NAME(SUPR0Abs64bitKernelDS)
46extern NAME(SUPR0AbsKernelCS)
47%endif
48
49
50;*******************************************************************************
51;* Global Variables *
52;*******************************************************************************
53%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
54BEGINDATA
55;;
56; Store the SUPR0AbsIs64bit absolute value here so we can cmp/test without
57; needing to clobber a register. (This trick doesn't quite work for PE btw.
58; but that's not relevant atm.)
59GLOBALNAME g_fCPUMIs64bitHost
60 dd NAME(SUPR0AbsIs64bit)
61%endif
62
63
64BEGINCODE
65
66
67;;
68; Saves the host FPU/XMM state and restores the guest state.
69;
70; @returns 0
71; @param pCPUMCPU x86:[esp+4] GCC:rdi MSC:rcx CPUMCPU pointer
72;
73align 16
74BEGINPROC cpumR0SaveHostRestoreGuestFPUState
75%ifdef RT_ARCH_AMD64
76 %ifdef RT_OS_WINDOWS
77 mov xDX, rcx
78 %else
79 mov xDX, rdi
80 %endif
81%else
82 mov xDX, dword [esp + 4]
83%endif
84 pushf ; The darwin kernel can get upset or upset things if an
85 cli ; interrupt occurs while we're doing fxsave/fxrstor/cr0.
86
87 ; Switch the state.
88 or dword [xDX + CPUMCPU.fUseFlags], (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM)
89
90 mov xAX, cr0 ; Make sure its safe to access the FPU state.
91 mov xCX, xAX ; save old CR0
92 and xAX, ~(X86_CR0_TS | X86_CR0_EM)
93 mov cr0, xAX ;; @todo optimize this.
94
95%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
96 cmp byte [NAME(g_fCPUMIs64bitHost)], 0
97 jz .legacy_mode
98 db 0xea ; jmp far .sixtyfourbit_mode
99 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
100.legacy_mode:
101%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
102
103 fxsave [xDX + CPUMCPU.Host.fpu] ; ASSUMES that all VT-x/AMD-V boxes sports fxsave/fxrstor (safe assumption)
104 fxrstor [xDX + CPUMCPU.Guest.fpu]
105
106.done:
107 mov cr0, xCX ; and restore old CR0 again ;; @todo optimize this.
108 popf
109 xor eax, eax
110 ret
111
112%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
113ALIGNCODE(16)
114BITS 64
115.sixtyfourbit_mode:
116 and edx, 0ffffffffh
117 fxsave [rdx + CPUMCPU.Host.fpu]
118 fxrstor [rdx + CPUMCPU.Guest.fpu]
119 jmp far [.fpret wrt rip]
120.fpret: ; 16:32 Pointer to .the_end.
121 dd .done, NAME(SUPR0AbsKernelCS)
122BITS 32
123%endif
124ENDPROC cpumR0SaveHostRestoreGuestFPUState
125
126%ifndef RT_ARCH_AMD64
127%ifdef VBOX_WITH_64_BITS_GUESTS
128%ifndef VBOX_WITH_HYBRID_32BIT_KERNEL
129;;
130; Saves the host FPU/XMM state
131;
132; @returns 0
133; @param pCPUMCPU x86:[esp+4] GCC:rdi MSC:rcx CPUMCPU pointer
134;
135align 16
136BEGINPROC cpumR0SaveHostFPUState
137 mov xDX, dword [esp + 4]
138
139 ; Switch the state.
140 or dword [xDX + CPUMCPU.fUseFlags], (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM)
141
142 mov xAX, cr0 ; Make sure its safe to access the FPU state.
143 mov xCX, xAX ; save old CR0
144 and xAX, ~(X86_CR0_TS | X86_CR0_EM)
145 mov cr0, xAX ;; @todo optimize this.
146
147 fxsave [xDX + CPUMCPU.Host.fpu] ; ASSUMES that all VT-x/AMD-V boxes sports fxsave/fxrstor (safe assumption)
148
149 mov cr0, xCX ; and restore old CR0 again ;; @todo optimize this.
150 xor eax, eax
151 ret
152ENDPROC cpumR0SaveHostFPUState
153%endif
154%endif
155%endif
156
157;;
158; Saves the guest FPU/XMM state and restores the host state.
159;
160; @returns 0
161; @param pCPUMCPU x86:[esp+4] GCC:rdi MSC:rcx CPUMCPU pointer
162;
163align 16
164BEGINPROC cpumR0SaveGuestRestoreHostFPUState
165%ifdef RT_ARCH_AMD64
166 %ifdef RT_OS_WINDOWS
167 mov xDX, rcx
168 %else
169 mov xDX, rdi
170 %endif
171%else
172 mov xDX, dword [esp + 4]
173%endif
174
175 ; Only restore FPU if guest has used it.
176 ; Using fxrstor should ensure that we're not causing unwanted exception on the host.
177 test dword [xDX + CPUMCPU.fUseFlags], CPUM_USED_FPU
178 jz short .fpu_not_used
179
180 pushf ; The darwin kernel can get upset or upset things if an
181 cli ; interrupt occurs while we're doing fxsave/fxrstor/cr0.
182
183 mov xAX, cr0 ; Make sure it's safe to access the FPU state.
184 mov xCX, xAX ; save old CR0
185 and xAX, ~(X86_CR0_TS | X86_CR0_EM)
186 mov cr0, xAX ;; @todo optimize this.
187
188%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
189 cmp byte [NAME(g_fCPUMIs64bitHost)], 0
190 jz .legacy_mode
191 db 0xea ; jmp far .sixtyfourbit_mode
192 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
193.legacy_mode:
194%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
195
196 fxsave [xDX + CPUMCPU.Guest.fpu] ; ASSUMES that all VT-x/AMD-V boxes sports fxsave/fxrstor (safe assumption)
197 fxrstor [xDX + CPUMCPU.Host.fpu]
198
199.done:
200 mov cr0, xCX ; and restore old CR0 again ;; @todo optimize this.
201 and dword [xDX + CPUMCPU.fUseFlags], ~CPUM_USED_FPU
202 popf
203.fpu_not_used:
204 xor eax, eax
205 ret
206
207%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
208ALIGNCODE(16)
209BITS 64
210.sixtyfourbit_mode:
211 and edx, 0ffffffffh
212 fxsave [rdx + CPUMCPU.Guest.fpu]
213 fxrstor [rdx + CPUMCPU.Host.fpu]
214 jmp far [.fpret wrt rip]
215.fpret: ; 16:32 Pointer to .the_end.
216 dd .done, NAME(SUPR0AbsKernelCS)
217BITS 32
218%endif
219ENDPROC cpumR0SaveGuestRestoreHostFPUState
220
221
222;;
223; Sets the host's FPU/XMM state
224;
225; @returns 0
226; @param pCPUMCPU x86:[esp+4] GCC:rdi MSC:rcx CPUMCPU pointer
227;
228align 16
229BEGINPROC cpumR0RestoreHostFPUState
230%ifdef RT_ARCH_AMD64
231 %ifdef RT_OS_WINDOWS
232 mov xDX, rcx
233 %else
234 mov xDX, rdi
235 %endif
236%else
237 mov xDX, dword [esp + 4]
238%endif
239
240 ; Restore FPU if guest has used it.
241 ; Using fxrstor should ensure that we're not causing unwanted exception on the host.
242 test dword [xDX + CPUMCPU.fUseFlags], CPUM_USED_FPU
243 jz short .fpu_not_used
244
245 pushf ; The darwin kernel can get upset or upset things if an
246 cli ; interrupt occurs while we're doing fxsave/fxrstor/cr0.
247
248 mov xAX, cr0
249 mov xCX, xAX ; save old CR0
250 and xAX, ~(X86_CR0_TS | X86_CR0_EM)
251 mov cr0, xAX
252
253%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
254 cmp byte [NAME(g_fCPUMIs64bitHost)], 0
255 jz .legacy_mode
256 db 0xea ; jmp far .sixtyfourbit_mode
257 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
258.legacy_mode:
259%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
260
261 fxrstor [xDX + CPUMCPU.Host.fpu]
262
263.done:
264 mov cr0, xCX ; and restore old CR0 again
265 and dword [xDX + CPUMCPU.fUseFlags], ~CPUM_USED_FPU
266 popf
267.fpu_not_used:
268 xor eax, eax
269 ret
270
271%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
272ALIGNCODE(16)
273BITS 64
274.sixtyfourbit_mode:
275 and edx, 0ffffffffh
276 fxrstor [rdx + CPUMCPU.Host.fpu]
277 jmp far [.fpret wrt rip]
278.fpret: ; 16:32 Pointer to .the_end.
279 dd .done, NAME(SUPR0AbsKernelCS)
280BITS 32
281%endif
282ENDPROC cpumR0RestoreHostFPUState
283
284
285;;
286; Restores the guest's FPU/XMM state
287;
288; @param pCtx x86:[esp+4] GCC:rdi MSC:rcx CPUMCTX pointer
289;
290; @remarks Used by the disabled CPUM_CAN_HANDLE_NM_TRAPS_IN_KERNEL_MODE code.
291;
292align 16
293BEGINPROC cpumR0LoadFPU
294%ifdef RT_ARCH_AMD64
295 %ifdef RT_OS_WINDOWS
296 mov xDX, rcx
297 %else
298 mov xDX, rdi
299 %endif
300%else
301 mov xDX, dword [esp + 4]
302%endif
303%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
304 cmp byte [NAME(g_fCPUMIs64bitHost)], 0
305 jz .legacy_mode
306 db 0xea ; jmp far .sixtyfourbit_mode
307 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
308.legacy_mode:
309%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
310
311 fxrstor [xDX + CPUMCTX.fpu]
312.done:
313 ret
314
315%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
316ALIGNCODE(16)
317BITS 64
318.sixtyfourbit_mode:
319 and edx, 0ffffffffh
320 fxrstor [rdx + CPUMCTX.fpu]
321 jmp far [.fpret wrt rip]
322.fpret: ; 16:32 Pointer to .the_end.
323 dd .done, NAME(SUPR0AbsKernelCS)
324BITS 32
325%endif
326ENDPROC cpumR0LoadFPU
327
328
329;;
330; Restores the guest's FPU/XMM state
331;
332; @param pCtx x86:[esp+4] GCC:rdi MSC:rcx CPUMCTX pointer
333;
334; @remarks Used by the disabled CPUM_CAN_HANDLE_NM_TRAPS_IN_KERNEL_MODE code.
335;
336align 16
337BEGINPROC cpumR0SaveFPU
338%ifdef RT_ARCH_AMD64
339 %ifdef RT_OS_WINDOWS
340 mov xDX, rcx
341 %else
342 mov xDX, rdi
343 %endif
344%else
345 mov xDX, dword [esp + 4]
346%endif
347%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
348 cmp byte [NAME(g_fCPUMIs64bitHost)], 0
349 jz .legacy_mode
350 db 0xea ; jmp far .sixtyfourbit_mode
351 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
352.legacy_mode:
353%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
354 fxsave [xDX + CPUMCTX.fpu]
355.done:
356 ret
357
358%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
359ALIGNCODE(16)
360BITS 64
361.sixtyfourbit_mode:
362 and edx, 0ffffffffh
363 fxsave [rdx + CPUMCTX.fpu]
364 jmp far [.fpret wrt rip]
365.fpret: ; 16:32 Pointer to .the_end.
366 dd .done, NAME(SUPR0AbsKernelCS)
367BITS 32
368%endif
369ENDPROC cpumR0SaveFPU
370
371
372;;
373; Restores the guest's XMM state
374;
375; @param pCtx x86:[esp+4] GCC:rdi MSC:rcx CPUMCTX pointer
376;
377; @remarks Used by the disabled CPUM_CAN_HANDLE_NM_TRAPS_IN_KERNEL_MODE code.
378;
379align 16
380BEGINPROC cpumR0LoadXMM
381%ifdef RT_ARCH_AMD64
382 %ifdef RT_OS_WINDOWS
383 mov xDX, rcx
384 %else
385 mov xDX, rdi
386 %endif
387%else
388 mov xDX, dword [esp + 4]
389%endif
390%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
391 cmp byte [NAME(g_fCPUMIs64bitHost)], 0
392 jz .legacy_mode
393 db 0xea ; jmp far .sixtyfourbit_mode
394 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
395.legacy_mode:
396%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
397
398 movdqa xmm0, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*0]
399 movdqa xmm1, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*1]
400 movdqa xmm2, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*2]
401 movdqa xmm3, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*3]
402 movdqa xmm4, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*4]
403 movdqa xmm5, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*5]
404 movdqa xmm6, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*6]
405 movdqa xmm7, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*7]
406
407%ifdef RT_ARCH_AMD64
408 test qword [xDX + CPUMCTX.msrEFER], MSR_K6_EFER_LMA
409 jz .done
410
411 movdqa xmm8, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*8]
412 movdqa xmm9, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*9]
413 movdqa xmm10, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*10]
414 movdqa xmm11, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*11]
415 movdqa xmm12, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*12]
416 movdqa xmm13, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*13]
417 movdqa xmm14, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*14]
418 movdqa xmm15, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*15]
419%endif
420.done:
421 ret
422
423%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
424ALIGNCODE(16)
425BITS 64
426.sixtyfourbit_mode:
427 and edx, 0ffffffffh
428
429 movdqa xmm0, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*0]
430 movdqa xmm1, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*1]
431 movdqa xmm2, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*2]
432 movdqa xmm3, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*3]
433 movdqa xmm4, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*4]
434 movdqa xmm5, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*5]
435 movdqa xmm6, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*6]
436 movdqa xmm7, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*7]
437
438 test qword [rdx + CPUMCTX.msrEFER], MSR_K6_EFER_LMA
439 jz .sixtyfourbit_done
440
441 movdqa xmm8, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*8]
442 movdqa xmm9, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*9]
443 movdqa xmm10, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*10]
444 movdqa xmm11, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*11]
445 movdqa xmm12, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*12]
446 movdqa xmm13, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*13]
447 movdqa xmm14, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*14]
448 movdqa xmm15, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*15]
449.sixtyfourbit_done:
450 jmp far [.fpret wrt rip]
451.fpret: ; 16:32 Pointer to .the_end.
452 dd .done, NAME(SUPR0AbsKernelCS)
453BITS 32
454%endif
455ENDPROC cpumR0LoadXMM
456
457
458;;
459; Restores the guest's XMM state
460;
461; @param pCtx x86:[esp+4] GCC:rdi MSC:rcx CPUMCTX pointer
462;
463; @remarks Used by the disabled CPUM_CAN_HANDLE_NM_TRAPS_IN_KERNEL_MODE code.
464;
465align 16
466BEGINPROC cpumR0SaveXMM
467%ifdef RT_ARCH_AMD64
468 %ifdef RT_OS_WINDOWS
469 mov xDX, rcx
470 %else
471 mov xDX, rdi
472 %endif
473%else
474 mov xDX, dword [esp + 4]
475%endif
476%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
477 cmp byte [NAME(g_fCPUMIs64bitHost)], 0
478 jz .legacy_mode
479 db 0xea ; jmp far .sixtyfourbit_mode
480 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
481.legacy_mode:
482%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
483
484 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*0], xmm0
485 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*1], xmm1
486 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*2], xmm2
487 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*3], xmm3
488 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*4], xmm4
489 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*5], xmm5
490 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*6], xmm6
491 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*7], xmm7
492
493%ifdef RT_ARCH_AMD64
494 test qword [xDX + CPUMCTX.msrEFER], MSR_K6_EFER_LMA
495 jz .done
496
497 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*8], xmm8
498 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*9], xmm9
499 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*10], xmm10
500 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*11], xmm11
501 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*12], xmm12
502 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*13], xmm13
503 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*14], xmm14
504 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*15], xmm15
505
506%endif
507.done:
508 ret
509
510%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
511ALIGNCODE(16)
512BITS 64
513.sixtyfourbit_mode:
514 and edx, 0ffffffffh
515
516 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*0], xmm0
517 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*1], xmm1
518 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*2], xmm2
519 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*3], xmm3
520 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*4], xmm4
521 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*5], xmm5
522 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*6], xmm6
523 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*7], xmm7
524
525 test qword [rdx + CPUMCTX.msrEFER], MSR_K6_EFER_LMA
526 jz .sixtyfourbit_done
527
528 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*8], xmm8
529 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*9], xmm9
530 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*10], xmm10
531 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*11], xmm11
532 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*12], xmm12
533 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*13], xmm13
534 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*14], xmm14
535 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*15], xmm15
536
537.sixtyfourbit_done:
538 jmp far [.fpret wrt rip]
539.fpret: ; 16:32 Pointer to .the_end.
540 dd .done, NAME(SUPR0AbsKernelCS)
541BITS 32
542%endif
543ENDPROC cpumR0SaveXMM
544
545
546;;
547; Set the FPU control word; clearing exceptions first
548;
549; @param u16FCW x86:[esp+4] GCC:rdi MSC:rcx New FPU control word
550align 16
551BEGINPROC cpumR0SetFCW
552%ifdef RT_ARCH_AMD64
553 %ifdef RT_OS_WINDOWS
554 mov xAX, rcx
555 %else
556 mov xAX, rdi
557 %endif
558%else
559 mov xAX, dword [esp + 4]
560%endif
561 fnclex
562 push xAX
563 fldcw [xSP]
564 pop xAX
565 ret
566ENDPROC cpumR0SetFCW
567
568
569;;
570; Get the FPU control word
571;
572align 16
573BEGINPROC cpumR0GetFCW
574 fnstcw [xSP - 8]
575 mov ax, word [xSP - 8]
576 ret
577ENDPROC cpumR0GetFCW
578
579
580;;
581; Set the MXCSR;
582;
583; @param u32MXCSR x86:[esp+4] GCC:rdi MSC:rcx New MXCSR
584align 16
585BEGINPROC cpumR0SetMXCSR
586%ifdef RT_ARCH_AMD64
587 %ifdef RT_OS_WINDOWS
588 mov xAX, rcx
589 %else
590 mov xAX, rdi
591 %endif
592%else
593 mov xAX, dword [esp + 4]
594%endif
595 push xAX
596 ldmxcsr [xSP]
597 pop xAX
598 ret
599ENDPROC cpumR0SetMXCSR
600
601
602;;
603; Get the MXCSR
604;
605align 16
606BEGINPROC cpumR0GetMXCSR
607 stmxcsr [xSP - 8]
608 mov eax, dword [xSP - 8]
609 ret
610ENDPROC cpumR0GetMXCSR
611
612
613%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
614;;
615; DECLASM(void) cpumR0SaveDRx(uint64_t *pa4Regs);
616;
617ALIGNCODE(16)
618BEGINPROC cpumR0SaveDRx
619%ifdef RT_ARCH_AMD64
620 %ifdef ASM_CALL64_GCC
621 mov xCX, rdi
622 %endif
623%else
624 mov xCX, dword [esp + 4]
625%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
626 cmp byte [NAME(g_fCPUMIs64bitHost)], 0
627 jz .legacy_mode
628 db 0xea ; jmp far .sixtyfourbit_mode
629 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
630.legacy_mode:
631%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
632%endif
633
634 ;
635 ; Do the job.
636 ;
637 mov xAX, dr0
638 mov xDX, dr1
639 mov [xCX], xAX
640 mov [xCX + 8 * 1], xDX
641 mov xAX, dr2
642 mov xDX, dr3
643 mov [xCX + 8 * 2], xAX
644 mov [xCX + 8 * 3], xDX
645
646.done:
647 ret
648
649%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
650ALIGNCODE(16)
651BITS 64
652.sixtyfourbit_mode:
653 and ecx, 0ffffffffh
654
655 mov rax, dr0
656 mov rdx, dr1
657 mov r8, dr2
658 mov r9, dr3
659 mov [rcx], rax
660 mov [rcx + 8 * 1], rdx
661 mov [rcx + 8 * 2], r8
662 mov [rcx + 8 * 3], r9
663 jmp far [.fpret wrt rip]
664.fpret: ; 16:32 Pointer to .the_end.
665 dd .done, NAME(SUPR0AbsKernelCS)
666BITS 32
667%endif
668ENDPROC cpumR0SaveDRx
669
670
671;;
672; DECLASM(void) cpumR0LoadDRx(uint64_t const *pa4Regs);
673;
674ALIGNCODE(16)
675BEGINPROC cpumR0LoadDRx
676%ifdef RT_ARCH_AMD64
677 %ifdef ASM_CALL64_GCC
678 mov xCX, rdi
679 %endif
680%else
681 mov xCX, dword [esp + 4]
682%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
683 cmp byte [NAME(g_fCPUMIs64bitHost)], 0
684 jz .legacy_mode
685 db 0xea ; jmp far .sixtyfourbit_mode
686 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
687.legacy_mode:
688%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
689%endif
690
691 ;
692 ; Do the job.
693 ;
694 mov xAX, [xCX]
695 mov xDX, [xCX + 8 * 1]
696 mov dr0, xAX
697 mov dr1, xDX
698 mov xAX, [xCX + 8 * 2]
699 mov xDX, [xCX + 8 * 3]
700 mov dr2, xAX
701 mov dr3, xDX
702
703.done:
704 ret
705
706%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
707ALIGNCODE(16)
708BITS 64
709.sixtyfourbit_mode:
710 and ecx, 0ffffffffh
711
712 mov rax, [rcx]
713 mov rdx, [rcx + 8 * 1]
714 mov r8, [rcx + 8 * 2]
715 mov r9, [rcx + 8 * 3]
716 mov dr0, rax
717 mov dr1, rdx
718 mov dr2, r8
719 mov dr3, r9
720 jmp far [.fpret wrt rip]
721.fpret: ; 16:32 Pointer to .the_end.
722 dd .done, NAME(SUPR0AbsKernelCS)
723BITS 32
724%endif
725ENDPROC cpumR0LoadDRx
726
727%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette