VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/CPUMR0A.asm@ 55106

Last change on this file since 55106 was 55106, checked in by vboxsync, 10 years ago

VMM: host+guest xsave/xrstor state handling - not enabled.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 23.5 KB
Line 
1 ; $Id: CPUMR0A.asm 55106 2015-04-06 19:58:37Z vboxsync $
2;; @file
3; CPUM - Ring-0 Assembly Routines (supporting HM and IEM).
4;
5
6;
7; Copyright (C) 2006-2015 Oracle Corporation
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.virtualbox.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License (GPL) as published by the Free Software
13; Foundation, in version 2 as it comes in the "COPYING" file of the
14; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17
18;*******************************************************************************
19;* Header Files *
20;*******************************************************************************
21%include "VBox/asmdefs.mac"
22%include "VBox/vmm/vm.mac"
23%include "VBox/err.mac"
24%include "VBox/vmm/stam.mac"
25%include "CPUMInternal.mac"
26%include "iprt/x86.mac"
27%include "VBox/vmm/cpum.mac"
28
29%ifdef IN_RING3
30 %error "The jump table doesn't link on leopard."
31%endif
32
33;*******************************************************************************
34;* Defined Constants And Macros *
35;*******************************************************************************
36;; The offset of the XMM registers in X86FXSTATE.
37; Use define because I'm too lazy to convert the struct.
38%define XMM_OFF_IN_X86FXSTATE 160
39%define IP_OFF_IN_X86FXSTATE 08h
40%define CS_OFF_IN_X86FXSTATE 0ch
41%define DS_OFF_IN_X86FXSTATE 14h
42
43;; For numeric expressions
44%ifdef RT_ARCH_AMD64
45 %define CPUMR0_IS_AMD64 1
46%else
47 %define CPUMR0_IS_AMD64 0
48%endif
49
50
51;*******************************************************************************
52;* External Symbols *
53;*******************************************************************************
54%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
55extern NAME(SUPR0AbsIs64bit)
56extern NAME(SUPR0Abs64bitKernelCS)
57extern NAME(SUPR0Abs64bitKernelSS)
58extern NAME(SUPR0Abs64bitKernelDS)
59extern NAME(SUPR0AbsKernelCS)
60%endif
61
62
63;*******************************************************************************
64;* Global Variables *
65;*******************************************************************************
66%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
67BEGINDATA
68%if 0 ; Currently not used.
69g_r32_Zero: dd 0.0
70%endif
71
72;;
73; Store the SUPR0AbsIs64bit absolute value here so we can cmp/test without
74; needing to clobber a register. (This trick doesn't quite work for PE btw.
75; but that's not relevant atm.)
76GLOBALNAME g_fCPUMIs64bitHost
77 dd NAME(SUPR0AbsIs64bit)
78%endif
79
80
81BEGINCODE
82
83%if 0 ; Currently not used anywhere.
84;;
85; Macro for FXSAVE/FXRSTOR leaky behaviour on AMD CPUs, see cpumR3CheckLeakyFpu().
86;
87; Cleans the FPU state, if necessary, before restoring the FPU.
88;
89; This macro ASSUMES CR0.TS is not set!
90;
91; @param xDX Pointer to CPUMCPU.
92; @uses xAX, EFLAGS
93;
94; Changes here should also be reflected in CPUMRCA.asm's copy!
95;
96%macro CLEANFPU 0
97 test dword [xDX + CPUMCPU.fUseFlags], CPUM_USE_FFXSR_LEAKY
98 jz .nothing_to_clean
99
100 xor eax, eax
101 fnstsw ax ; FSW -> AX.
102 test eax, RT_BIT(7) ; If FSW.ES (bit 7) is set, clear it to not cause FPU exceptions
103 ; while clearing & loading the FPU bits in 'clean_fpu' below.
104 jz .clean_fpu
105 fnclex
106
107.clean_fpu:
108 ffree st7 ; Clear FPU stack register(7)'s tag entry to prevent overflow if a wraparound occurs.
109 ; for the upcoming push (load)
110 fild dword [g_r32_Zero xWrtRIP] ; Explicit FPU load to overwrite FIP, FOP, FDP registers in the FPU.
111.nothing_to_clean:
112%endmacro
113%endif ; Unused.
114
115
116;;
117; Clears CR0.TS and CR0.EM if necessary, saving the previous result.
118;
119; This is used to avoid FPU exceptions when touching the FPU state.
120;
121; @param %1 Register to save the old CR0 in (pass to RESTORE_CR0).
122; @param %2 Temporary scratch register.
123; @uses EFLAGS, CR0
124;
125%macro SAVE_CR0_CLEAR_FPU_TRAPS 2
126 xor %1, %1
127 mov %2, cr0
128 test %2, X86_CR0_TS | X86_CR0_EM ; Make sure its safe to access the FPU state.
129 jz %%skip_cr0_write
130 mov %1, %2 ; Save old CR0
131 and %2, ~(X86_CR0_TS | X86_CR0_EM)
132 mov cr0, %2
133%%skip_cr0_write:
134%endmacro
135
136;;
137; Restore CR0.TS and CR0.EM state if SAVE_CR0_CLEAR_FPU_TRAPS change it.
138;
139; @param %1 The register that SAVE_CR0_CLEAR_FPU_TRAPS saved the old CR0 in.
140;
141%macro RESTORE_CR0 1
142 cmp %1, 0
143 je %%skip_cr0_restore
144 mov cr0, %1
145%%skip_cr0_restore:
146%endmacro
147
148
149;;
150; Saves the host state.
151;
152; @uses rax, rdx
153; @param pCpumCpu Define for the register containing the CPUMCPU pointer.
154; @param pXState Define for the regsiter containing the extended state pointer.
155;
156%macro CPUMR0_SAVE_HOST 0
157 ;
158 ; Load a couple of registers we'll use later in all branches.
159 ;
160 mov pXState, [pCpumCpu + CPUMCPU.Host.pXStateR0]
161 mov eax, [pCpumCpu + CPUMCPU.Host.fXStateMask]
162
163%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
164 ; The joy of 32-bit darwin kernels that runs the CPU in 64-bit mode.
165 cmp byte [NAME(g_fCPUMIs64bitHost)], 0
166 jz %%host_legacy_mode
167 db 0xea ; jmp far .sixtyfourbit_mode
168 dd %%host_sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
169BITS 64
170%%host_sixtyfourbit_mode:
171 or eax, eax
172 jz %%host_sixtyfourbit_fxsave
173
174 ; XSAVE
175 mov edx, [pCpumCpu + CPUMCPU.Host.fXStateMask + 4]
176 o64 xsave [pXState]
177 jmp %%host_sixtyfourbit_done
178
179 ; FXSAVE
180%%host_sixtyfourbit_fxsave:
181 o64 fxsave [pXState]
182
183%%host_sixtyfourbit_done:
184 jmp far [%%host_fpret wrt rip]
185%%host_fpret: ; 16:32 Pointer to %%host_done.
186 dd %%host_done, NAME(SUPR0AbsKernelCS)
187BITS 32
188
189%%host_legacy_mode:
190%endif
191
192 ;
193 ; XSAVE or FXSAVE?
194 ;
195 or eax, eax
196 jz %%host_fxsave
197
198 ; XSAVE
199 mov edx, [pCpumCpu + CPUMCPU.Host.fXStateMask + 4]
200%ifdef RT_ARCH_AMD64
201 o64 xsave [pXState]
202%else
203 xsave [pXState]
204%endif
205 jmp %%host_done
206
207 ; FXSAVE
208%%host_fxsave:
209%ifdef RT_ARCH_AMD64
210 o64 fxsave [pXState] ; Use explicit REX prefix. See @bugref{6398}.
211%else
212 fxsave [pXState]
213%endif
214
215%%host_done:
216%endmacro ; CPUMR0_SAVE_HOST
217
218
219;;
220; Loads the host state.
221;
222; @uses rax, rdx
223; @param pCpumCpu Define for the register containing the CPUMCPU pointer.
224; @param pXState Define for the regsiter containing the extended state pointer.
225;
226%macro CPUMR0_LOAD_HOST 0
227 ;
228 ; Load a couple of registers we'll use later in all branches.
229 ;
230 mov pXState, [pCpumCpu + CPUMCPU.Host.pXStateR0]
231 mov eax, [pCpumCpu + CPUMCPU.Host.fXStateMask]
232
233%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
234 ; The joy of 32-bit darwin kernels that runs the CPU in 64-bit mode.
235 cmp byte [NAME(g_fCPUMIs64bitHost)], 0
236 jz %%host_legacy_mode
237 db 0xea ; jmp far .sixtyfourbit_mode
238 dd %%host_sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
239BITS 64
240%%host_sixtyfourbit_mode:
241 or eax, eax
242 jz %%host_sixtyfourbit_fxrstor
243
244 ; XRSTOR
245 mov edx, [pCpumCpu + CPUMCPU.Host.fXStateMask + 4]
246 o64 xrstor [pXState]
247 jmp %%host_sixtyfourbit_done
248
249 ; FXRSTOR
250%%host_sixtyfourbit_fxrstor:
251 o64 fxrstor [pXState]
252
253%%host_sixtyfourbit_done:
254 jmp far [%%host_fpret wrt rip]
255%%host_fpret: ; 16:32 Pointer to %%host_done.
256 dd %%host_done, NAME(SUPR0AbsKernelCS)
257BITS 32
258
259%%host_legacy_mode:
260%endif
261
262 ;
263 ; XRSTOR or FXRSTOR?
264 ;
265 or eax, eax
266 jz %%host_fxrstor
267
268 ; XRSTOR
269 mov edx, [pCpumCpu + CPUMCPU.Host.fXStateMask + 4]
270%ifdef RT_ARCH_AMD64
271 o64 xrstor [pXState]
272%else
273 xrstor [pXState]
274%endif
275 jmp %%host_done
276
277 ; FXRSTOR
278%%host_fxrstor:
279%ifdef RT_ARCH_AMD64
280 o64 fxrstor [pXState] ; Use explicit REX prefix. See @bugref{6398}.
281%else
282 fxrstor [pXState]
283%endif
284
285%%host_done:
286%endmacro ; CPUMR0_LOAD_HOST
287
288
289
290;; Macro for FXSAVE for the guest FPU but tries to figure out whether to
291; save the 32-bit FPU state or 64-bit FPU state.
292;
293; @param %1 Pointer to CPUMCPU.
294; @param %2 Pointer to XState.
295; @param %3 Force AMD64
296; @uses xAX, xDX, EFLAGS, 20h of stack.
297;
298%macro SAVE_32_OR_64_FPU 3
299%if CPUMR0_IS_AMD64 || %3
300 ; Save the guest FPU (32-bit or 64-bit), preserves existing broken state. See @bugref{7138}.
301 test dword [pCpumCpu + CPUMCPU.fUseFlags], CPUM_USE_SUPPORTS_LONGMODE
302 jnz short %%save_long_mode_guest
303%endif
304 fxsave [pXState]
305%if CPUMR0_IS_AMD64 || %3
306 jmp %%save_done_32bit_cs_ds
307
308%%save_long_mode_guest:
309 o64 fxsave [pXState]
310
311 xor edx, edx
312 cmp dword [pXState + CS_OFF_IN_X86FXSTATE], 0
313 jne short %%save_done
314
315 sub rsp, 20h ; Only need 1ch bytes but keep stack aligned otherwise we #GP(0).
316 fnstenv [rsp]
317 movzx eax, word [rsp + 10h]
318 mov [pXState + CS_OFF_IN_X86FXSTATE], eax
319 movzx eax, word [rsp + 18h]
320 add rsp, 20h
321 mov [pXState + DS_OFF_IN_X86FXSTATE], eax
322%endif
323%%save_done_32bit_cs_ds:
324 mov edx, X86_FXSTATE_RSVD_32BIT_MAGIC
325%%save_done:
326 mov dword [pXState + X86_OFF_FXSTATE_RSVD], edx
327%endmacro ; SAVE_32_OR_64_FPU
328
329
330;;
331; Save the guest state.
332;
333; @uses rax, rdx
334; @param pCpumCpu Define for the register containing the CPUMCPU pointer.
335; @param pXState Define for the regsiter containing the extended state pointer.
336;
337%macro CPUMR0_SAVE_GUEST 0
338 ;
339 ; Load a couple of registers we'll use later in all branches.
340 ;
341 mov pXState, [pCpumCpu + CPUMCPU.Guest.pXStateR0]
342 mov eax, [pCpumCpu + CPUMCPU.Guest.fXStateMask]
343
344%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
345 ; The joy of 32-bit darwin kernels that runs the CPU in 64-bit mode.
346 cmp byte [NAME(g_fCPUMIs64bitHost)], 0
347 jz %%guest_legacy_mode
348 db 0xea ; jmp far .sixtyfourbit_mode
349 dd %%guest_sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
350BITS 64
351%%guest_sixtyfourbit_mode:
352 or eax, eax
353 jz %%guest_sixtyfourbit_fxsave
354
355 ; XSAVE
356 mov edx, [pCpumCpu + CPUMCPU.Guest.fXStateMask + 4]
357 o64 xsave [pXState]
358 jmp %%guest_sixtyfourbit_done
359
360 ; FXSAVE
361%%guest_sixtyfourbit_fxsave:
362 SAVE_32_OR_64_FPU pCpumCpu, pXState, 1
363
364%%guest_sixtyfourbit_done:
365 jmp far [%%guest_fpret wrt rip]
366%%guest_fpret: ; 16:32 Pointer to %%guest_done.
367 dd %%guest_done, NAME(SUPR0AbsKernelCS)
368BITS 32
369
370%%guest_legacy_mode:
371%endif
372
373 ;
374 ; XSAVE or FXSAVE?
375 ;
376 or eax, eax
377 jz %%guest_fxsave
378
379 ; XSAVE
380 mov edx, [pCpumCpu + CPUMCPU.Guest.fXStateMask + 4]
381%ifdef RT_ARCH_AMD64
382 o64 xsave [pXState]
383%else
384 xsave [pXState]
385%endif
386 jmp %%guest_done
387
388 ; FXSAVE
389%%guest_fxsave:
390 SAVE_32_OR_64_FPU pCpumCpu, pXState, 0
391
392%%guest_done:
393%endmacro ; CPUMR0_SAVE_GUEST
394
395
396;;
397; Wrapper for selecting 32-bit or 64-bit FXRSTOR according to what SAVE_32_OR_64_FPU did.
398;
399; @param %1 Pointer to CPUMCPU.
400; @param %2 Pointer to XState.
401; @param %3 Force AMD64.
402; @uses xAX, xDX, EFLAGS
403;
404%macro RESTORE_32_OR_64_FPU 3
405%if CPUMR0_IS_AMD64 || %3
406 ; Restore the guest FPU (32-bit or 64-bit), preserves existing broken state. See @bugref{7138}.
407 test dword [pCpumCpu + CPUMCPU.fUseFlags], CPUM_USE_SUPPORTS_LONGMODE
408 jz %%restore_32bit_fpu
409 cmp dword [pXState + X86_OFF_FXSTATE_RSVD], X86_FXSTATE_RSVD_32BIT_MAGIC
410 jne short %%restore_64bit_fpu
411%%restore_32bit_fpu:
412%endif
413 fxrstor [pXState]
414%if CPUMR0_IS_AMD64 || %3
415 ; TODO: Restore XMM8-XMM15!
416 jmp short %%restore_fpu_done
417%%restore_64bit_fpu:
418 o64 fxrstor [pXState]
419%%restore_fpu_done:
420%endif
421%endmacro ; RESTORE_32_OR_64_FPU
422
423
424;;
425; Loads the guest state.
426;
427; @uses rax, rdx
428; @param pCpumCpu Define for the register containing the CPUMCPU pointer.
429; @param pXState Define for the regsiter containing the extended state pointer.
430;
431%macro CPUMR0_LOAD_GUEST 0
432 ;
433 ; Load a couple of registers we'll use later in all branches.
434 ;
435 mov pXState, [pCpumCpu + CPUMCPU.Guest.pXStateR0]
436 mov eax, [pCpumCpu + CPUMCPU.Guest.fXStateMask]
437
438%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
439 ; The joy of 32-bit darwin kernels that runs the CPU in 64-bit mode.
440 cmp byte [NAME(g_fCPUMIs64bitHost)], 0
441 jz %%guest_legacy_mode
442 db 0xea ; jmp far .sixtyfourbit_mode
443 dd %%guest_sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
444BITS 64
445%%guest_sixtyfourbit_mode:
446 or eax, eax
447 jz %%guest_sixtyfourbit_fxrstor
448
449 ; XRSTOR
450 mov edx, [pCpumCpu + CPUMCPU.Guest.fXStateMask + 4]
451 o64 xrstor [pXState]
452 jmp %%guest_sixtyfourbit_done
453
454 ; FXRSTOR
455%%guest_sixtyfourbit_fxrstor:
456 RESTORE_32_OR_64_FPU pCpumCpu, pXState, 1
457
458%%guest_sixtyfourbit_done:
459 jmp far [%%guest_fpret wrt rip]
460%%guest_fpret: ; 16:32 Pointer to %%guest_done.
461 dd %%guest_done, NAME(SUPR0AbsKernelCS)
462BITS 32
463
464%%guest_legacy_mode:
465%endif
466
467 ;
468 ; XRSTOR or FXRSTOR?
469 ;
470 or eax, eax
471 jz %%guest_fxrstor
472
473 ; XRSTOR
474 mov edx, [pCpumCpu + CPUMCPU.Guest.fXStateMask + 4]
475%ifdef RT_ARCH_AMD64
476 o64 xrstor [pXState]
477%else
478 xrstor [pXState]
479%endif
480 jmp %%guest_done
481
482 ; FXRSTOR
483%%guest_fxrstor:
484 RESTORE_32_OR_64_FPU pCpumCpu, pXState, 0
485
486%%guest_done:
487%endmacro ; CPUMR0_LOAD_GUEST
488
489
490;;
491; Saves the host FPU/SSE/AVX state and restores the guest FPU/SSE/AVX state.
492;
493; @returns 0
494; @param pCpumCpu x86:[ebp+8] gcc:rdi msc:rcx CPUMCPU pointer
495;
496align 16
497BEGINPROC cpumR0SaveHostRestoreGuestFPUState
498 ;
499 ; Prologue - xAX+xDX must be free for XSAVE/XRSTOR input.
500 ;
501%ifdef RT_ARCH_AMD64
502 %ifdef RT_OS_WINDOWS
503 mov r11, rcx
504 %else
505 mov r11, rdi
506 %endif
507 %define pCpumCpu r11
508 %define pXState r10
509%else
510 push ebp
511 mov ebp, esp
512 push ebx
513 push esi
514 mov ebx, dword [ebp + 8]
515 %define pCpumCpu ebx
516 %define pXState esi
517%endif
518
519 pushf ; The darwin kernel can get upset or upset things if an
520 cli ; interrupt occurs while we're doing fxsave/fxrstor/cr0.
521
522 SAVE_CR0_CLEAR_FPU_TRAPS xCX, xAX ; xCX is now old CR0 value, don't use!
523
524 CPUMR0_SAVE_HOST
525 CPUMR0_LOAD_GUEST
526
527%ifdef VBOX_WITH_KERNEL_USING_XMM
528 ; Restore the non-volatile xmm registers. ASSUMING 64-bit host.
529 mov pXState, [pCpumCpu + CPUMCPU.Host.pXStateR0]
530 movdqa xmm6, [pXState + XMM_OFF_IN_X86FXSTATE + 060h]
531 movdqa xmm7, [pXState + XMM_OFF_IN_X86FXSTATE + 070h]
532 movdqa xmm8, [pXState + XMM_OFF_IN_X86FXSTATE + 080h]
533 movdqa xmm9, [pXState + XMM_OFF_IN_X86FXSTATE + 090h]
534 movdqa xmm10, [pXState + XMM_OFF_IN_X86FXSTATE + 0a0h]
535 movdqa xmm11, [pXState + XMM_OFF_IN_X86FXSTATE + 0b0h]
536 movdqa xmm12, [pXState + XMM_OFF_IN_X86FXSTATE + 0c0h]
537 movdqa xmm13, [pXState + XMM_OFF_IN_X86FXSTATE + 0d0h]
538 movdqa xmm14, [pXState + XMM_OFF_IN_X86FXSTATE + 0e0h]
539 movdqa xmm15, [pXState + XMM_OFF_IN_X86FXSTATE + 0f0h]
540%endif
541
542 RESTORE_CR0 xCX
543 or dword [pCpumCpu + CPUMCPU.fUseFlags], (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM)
544 popf
545
546%ifdef RT_ARCH_X86
547 pop esi
548 pop ebx
549 leave
550%endif
551 xor eax, eax
552 ret
553ENDPROC cpumR0SaveHostRestoreGuestFPUState
554
555
556%ifndef RT_ARCH_AMD64
557%ifdef VBOX_WITH_64_BITS_GUESTS
558%ifndef VBOX_WITH_HYBRID_32BIT_KERNEL
559;;
560; Saves the host FPU/SSE/AVX state.
561;
562; @returns VINF_SUCCESS (0) in EAX
563; @param pCpumCpu x86:[ebp+8] gcc:rdi msc:rcx CPUMCPU pointer
564;
565align 16
566BEGINPROC cpumR0SaveHostFPUState
567 ;
568 ; Prologue - xAX+xDX must be free for XSAVE/XRSTOR input.
569 ;
570%ifdef RT_ARCH_AMD64
571 %ifdef RT_OS_WINDOWS
572 mov r11, rcx
573 %else
574 mov r11, rdi
575 %endif
576 %define pCpumCpu r11
577 %define pXState r10
578%else
579 push ebp
580 mov ebp, esp
581 push ebx
582 push esi
583 mov ebx, dword [ebp + 8]
584 %define pCpumCpu ebx
585 %define pXState esi
586%endif
587
588 pushf ; The darwin kernel can get upset or upset things if an
589 cli ; interrupt occurs while we're doing fxsave/fxrstor/cr0.
590 SAVE_CR0_CLEAR_FPU_TRAPS xCX, xAX ; xCX is now old CR0 value, don't use!
591
592 CPUMR0_SAVE_HOST
593
594 RESTORE_CR0 xCX
595 or dword [pCpumCpu + CPUMCPU.fUseFlags], (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM)
596 popf
597
598%ifdef RT_ARCH_X86
599 pop esi
600 pop ebx
601 leave
602%endif
603 xor eax, eax
604 ret
605
606%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
607ALIGNCODE(16)
608BITS 64
609.sixtyfourbit_mode:
610 ; Save the guest FPU (32-bit or 64-bit), preserves existing broken state. See @bugref{7138}.
611 o64 fxsave [pXstate]
612 jmp far [.fpret wrt rip]
613.fpret: ; 16:32 Pointer to .the_end.
614 dd .done, NAME(SUPR0AbsKernelCS)
615BITS 32
616%endif
617%undef pCpumCpu
618%undef pXState
619ENDPROC cpumR0SaveHostFPUState
620%endif
621%endif
622%endif
623
624
625;;
626; Saves the guest FPU/SSE/AVX state and restores the host FPU/SSE/AVX state.
627;
628; @returns VINF_SUCCESS (0) in eax.
629; @param pCpumCpu x86:[ebp+8] gcc:rdi msc:rcx CPUMCPU pointer
630;
631align 16
632BEGINPROC cpumR0SaveGuestRestoreHostFPUState
633 ;
634 ; Prologue - xAX+xDX must be free for XSAVE/XRSTOR input.
635 ;
636%ifdef RT_ARCH_AMD64
637 %ifdef RT_OS_WINDOWS
638 mov r11, rcx
639 %else
640 mov r11, rdi
641 %endif
642 %define pCpumCpu r11
643 %define pXState r10
644%else
645 push ebp
646 mov ebp, esp
647 push ebx
648 push esi
649 mov ebx, dword [ebp + 8]
650 %define pCpumCpu ebx
651 %define pXState esi
652%endif
653
654 ;
655 ; Only restore FPU if guest has used it.
656 ;
657 test dword [pCpumCpu + CPUMCPU.fUseFlags], CPUM_USED_FPU
658 jz .fpu_not_used
659
660 pushf ; The darwin kernel can get upset or upset things if an
661 cli ; interrupt occurs while we're doing fxsave/fxrstor/cr0.
662 SAVE_CR0_CLEAR_FPU_TRAPS xCX, xAX ; xCX is now old CR0 value, don't use!
663
664 CPUMR0_SAVE_GUEST
665 CPUMR0_LOAD_HOST
666
667 RESTORE_CR0 xCX
668 and dword [pCpumCpu + CPUMCPU.fUseFlags], ~CPUM_USED_FPU
669 popf
670
671.fpu_not_used:
672%ifdef RT_ARCH_X86
673 pop esi
674 pop ebx
675 leave
676%endif
677 xor eax, eax
678 ret
679%undef pCpumCpu
680%undef pXState
681ENDPROC cpumR0SaveGuestRestoreHostFPUState
682
683
684;;
685; Restores the host's FPU/SSE/AVX state from pCpumCpu->Host.
686;
687; @returns 0
688; @param pCpumCpu x86:[ebp+8] gcc:rdi msc:rcx CPUMCPU pointer
689;
690align 16
691BEGINPROC cpumR0RestoreHostFPUState
692 ;
693 ; Prologue - xAX+xDX must be free for XSAVE/XRSTOR input.
694 ;
695%ifdef RT_ARCH_AMD64
696 %ifdef RT_OS_WINDOWS
697 mov r11, rcx
698 %else
699 mov r11, rdi
700 %endif
701 %define pCpumCpu r11
702 %define pXState r10
703%else
704 push ebp
705 mov ebp, esp
706 push ebx
707 push esi
708 mov ebx, dword [ebp + 8]
709 %define pCpumCpu ebx
710 %define pXState esi
711%endif
712
713 ;
714 ; Restore FPU if guest has used it.
715 ;
716 test dword [pCpumCpu + CPUMCPU.fUseFlags], CPUM_USED_FPU
717 jz short .fpu_not_used
718
719 pushf ; The darwin kernel can get upset or upset things if an
720 cli ; interrupt occurs while we're doing fxsave/fxrstor/cr0.
721 SAVE_CR0_CLEAR_FPU_TRAPS xCX, xAX ; xCX is now old CR0 value, don't use!
722
723 CPUMR0_LOAD_HOST
724
725 RESTORE_CR0 xCX
726 and dword [pCpumCpu + CPUMCPU.fUseFlags], ~CPUM_USED_FPU
727 popf
728
729.fpu_not_used:
730%ifdef RT_ARCH_X86
731 pop esi
732 pop ebx
733 leave
734%endif
735 xor eax, eax
736 ret
737%undef pCpumCPu
738%undef pXState
739ENDPROC cpumR0RestoreHostFPUState
740
741
742%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
743;;
744; DECLASM(void) cpumR0SaveDRx(uint64_t *pa4Regs);
745;
746ALIGNCODE(16)
747BEGINPROC cpumR0SaveDRx
748%ifdef RT_ARCH_AMD64
749 %ifdef ASM_CALL64_GCC
750 mov xCX, rdi
751 %endif
752%else
753 mov xCX, dword [esp + 4]
754%endif
755 pushf ; Just to be on the safe side.
756 cli
757%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
758 cmp byte [NAME(g_fCPUMIs64bitHost)], 0
759 jz .legacy_mode
760 db 0xea ; jmp far .sixtyfourbit_mode
761 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
762.legacy_mode:
763%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
764
765 ;
766 ; Do the job.
767 ;
768 mov xAX, dr0
769 mov xDX, dr1
770 mov [xCX], xAX
771 mov [xCX + 8 * 1], xDX
772 mov xAX, dr2
773 mov xDX, dr3
774 mov [xCX + 8 * 2], xAX
775 mov [xCX + 8 * 3], xDX
776
777.done:
778 popf
779 ret
780
781%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
782ALIGNCODE(16)
783BITS 64
784.sixtyfourbit_mode:
785 and ecx, 0ffffffffh
786
787 mov rax, dr0
788 mov rdx, dr1
789 mov r8, dr2
790 mov r9, dr3
791 mov [rcx], rax
792 mov [rcx + 8 * 1], rdx
793 mov [rcx + 8 * 2], r8
794 mov [rcx + 8 * 3], r9
795 jmp far [.fpret wrt rip]
796.fpret: ; 16:32 Pointer to .the_end.
797 dd .done, NAME(SUPR0AbsKernelCS)
798BITS 32
799%endif
800ENDPROC cpumR0SaveDRx
801
802
803;;
804; DECLASM(void) cpumR0LoadDRx(uint64_t const *pa4Regs);
805;
806ALIGNCODE(16)
807BEGINPROC cpumR0LoadDRx
808%ifdef RT_ARCH_AMD64
809 %ifdef ASM_CALL64_GCC
810 mov xCX, rdi
811 %endif
812%else
813 mov xCX, dword [esp + 4]
814%endif
815 pushf ; Just to be on the safe side.
816 cli
817%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
818 cmp byte [NAME(g_fCPUMIs64bitHost)], 0
819 jz .legacy_mode
820 db 0xea ; jmp far .sixtyfourbit_mode
821 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
822.legacy_mode:
823%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
824
825 ;
826 ; Do the job.
827 ;
828 mov xAX, [xCX]
829 mov xDX, [xCX + 8 * 1]
830 mov dr0, xAX
831 mov dr1, xDX
832 mov xAX, [xCX + 8 * 2]
833 mov xDX, [xCX + 8 * 3]
834 mov dr2, xAX
835 mov dr3, xDX
836
837.done:
838 popf
839 ret
840
841%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
842ALIGNCODE(16)
843BITS 64
844.sixtyfourbit_mode:
845 and ecx, 0ffffffffh
846
847 mov rax, [rcx]
848 mov rdx, [rcx + 8 * 1]
849 mov r8, [rcx + 8 * 2]
850 mov r9, [rcx + 8 * 3]
851 mov dr0, rax
852 mov dr1, rdx
853 mov dr2, r8
854 mov dr3, r9
855 jmp far [.fpret wrt rip]
856.fpret: ; 16:32 Pointer to .the_end.
857 dd .done, NAME(SUPR0AbsKernelCS)
858BITS 32
859%endif
860ENDPROC cpumR0LoadDRx
861
862%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
863
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette