VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/CPUMR0A.asm@ 61058

Last change on this file since 61058 was 61058, checked in by vboxsync, 9 years ago

CPUM,++: Split up CPUM_USED_FPU into CPUM_USED_FPU_GUEST & CPUM_USED_FPU_HOST.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 19.8 KB
Line 
1 ; $Id: CPUMR0A.asm 61058 2016-05-19 19:12:56Z vboxsync $
2;; @file
3; CPUM - Ring-0 Assembly Routines (supporting HM and IEM).
4;
5
6;
7; Copyright (C) 2006-2016 Oracle Corporation
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.virtualbox.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License (GPL) as published by the Free Software
13; Foundation, in version 2 as it comes in the "COPYING" file of the
14; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17
18
19;*******************************************************************************
20;* Header Files *
21;*******************************************************************************
22%define RT_ASM_WITH_SEH64
23%include "iprt/asmdefs.mac"
24%include "VBox/asmdefs.mac"
25%include "VBox/vmm/vm.mac"
26%include "VBox/err.mac"
27%include "VBox/vmm/stam.mac"
28%include "CPUMInternal.mac"
29%include "iprt/x86.mac"
30%include "VBox/vmm/cpum.mac"
31
32
33;*******************************************************************************
34;* Defined Constants And Macros *
35;*******************************************************************************
36;; The offset of the XMM registers in X86FXSTATE.
37; Use define because I'm too lazy to convert the struct.
38%define XMM_OFF_IN_X86FXSTATE 160
39%define IP_OFF_IN_X86FXSTATE 08h
40%define CS_OFF_IN_X86FXSTATE 0ch
41%define DS_OFF_IN_X86FXSTATE 14h
42
43;; For numeric expressions
44%ifdef RT_ARCH_AMD64
45 %define CPUMR0_IS_AMD64 1
46%else
47 %define CPUMR0_IS_AMD64 0
48%endif
49
50
51
52BEGINCODE
53
54%if 0 ; Currently not used anywhere.
55;;
56; Macro for FXSAVE/FXRSTOR leaky behaviour on AMD CPUs, see cpumR3CheckLeakyFpu().
57;
58; Cleans the FPU state, if necessary, before restoring the FPU.
59;
60; This macro ASSUMES CR0.TS is not set!
61;
62; @param xDX Pointer to CPUMCPU.
63; @uses xAX, EFLAGS
64;
65; Changes here should also be reflected in CPUMRCA.asm's copy!
66;
67%macro CLEANFPU 0
68 test dword [xDX + CPUMCPU.fUseFlags], CPUM_USE_FFXSR_LEAKY
69 jz .nothing_to_clean
70
71 xor eax, eax
72 fnstsw ax ; FSW -> AX.
73 test eax, RT_BIT(7) ; If FSW.ES (bit 7) is set, clear it to not cause FPU exceptions
74 ; while clearing & loading the FPU bits in 'clean_fpu' below.
75 jz .clean_fpu
76 fnclex
77
78.clean_fpu:
79 ffree st7 ; Clear FPU stack register(7)'s tag entry to prevent overflow if a wraparound occurs.
80 ; for the upcoming push (load)
81 fild dword [g_r32_Zero xWrtRIP] ; Explicit FPU load to overwrite FIP, FOP, FDP registers in the FPU.
82.nothing_to_clean:
83%endmacro
84%endif ; Unused.
85
86
87;;
88; Clears CR0.TS and CR0.EM if necessary, saving the previous result.
89;
90; This is used to avoid FPU exceptions when touching the FPU state.
91;
92; @param %1 Register to save the old CR0 in (pass to RESTORE_CR0).
93; @param %2 Temporary scratch register.
94; @uses EFLAGS, CR0
95;
96%macro SAVE_CR0_CLEAR_FPU_TRAPS 2
97 xor %1, %1
98 mov %2, cr0
99 test %2, X86_CR0_TS | X86_CR0_EM ; Make sure its safe to access the FPU state.
100 jz %%skip_cr0_write
101 mov %1, %2 ; Save old CR0
102 and %2, ~(X86_CR0_TS | X86_CR0_EM)
103 mov cr0, %2
104%%skip_cr0_write:
105%endmacro
106
107;;
108; Restore CR0.TS and CR0.EM state if SAVE_CR0_CLEAR_FPU_TRAPS change it.
109;
110; @param %1 The register that SAVE_CR0_CLEAR_FPU_TRAPS saved the old CR0 in.
111;
112%macro RESTORE_CR0 1
113 cmp %1, 0
114 je %%skip_cr0_restore
115 mov cr0, %1
116%%skip_cr0_restore:
117%endmacro
118
119
120;;
121; Saves the host state.
122;
123; @uses rax, rdx
124; @param pCpumCpu Define for the register containing the CPUMCPU pointer.
125; @param pXState Define for the register containing the extended state pointer.
126;
127%macro CPUMR0_SAVE_HOST 0
128 ;
129 ; Load a couple of registers we'll use later in all branches.
130 ;
131 mov pXState, [pCpumCpu + CPUMCPU.Host.pXStateR0]
132 mov eax, [pCpumCpu + CPUMCPU.Host.fXStateMask]
133
134 ;
135 ; XSAVE or FXSAVE?
136 ;
137 or eax, eax
138 jz %%host_fxsave
139
140 ; XSAVE
141 mov edx, [pCpumCpu + CPUMCPU.Host.fXStateMask + 4]
142%ifdef RT_ARCH_AMD64
143 o64 xsave [pXState]
144%else
145 xsave [pXState]
146%endif
147 jmp %%host_done
148
149 ; FXSAVE
150%%host_fxsave:
151%ifdef RT_ARCH_AMD64
152 o64 fxsave [pXState] ; Use explicit REX prefix. See @bugref{6398}.
153%else
154 fxsave [pXState]
155%endif
156
157%%host_done:
158%endmacro ; CPUMR0_SAVE_HOST
159
160
161;;
162; Loads the host state.
163;
164; @uses rax, rdx
165; @param pCpumCpu Define for the register containing the CPUMCPU pointer.
166; @param pXState Define for the register containing the extended state pointer.
167;
168%macro CPUMR0_LOAD_HOST 0
169 ;
170 ; Load a couple of registers we'll use later in all branches.
171 ;
172 mov pXState, [pCpumCpu + CPUMCPU.Host.pXStateR0]
173 mov eax, [pCpumCpu + CPUMCPU.Host.fXStateMask]
174
175 ;
176 ; XRSTOR or FXRSTOR?
177 ;
178 or eax, eax
179 jz %%host_fxrstor
180
181 ; XRSTOR
182 mov edx, [pCpumCpu + CPUMCPU.Host.fXStateMask + 4]
183%ifdef RT_ARCH_AMD64
184 o64 xrstor [pXState]
185%else
186 xrstor [pXState]
187%endif
188 jmp %%host_done
189
190 ; FXRSTOR
191%%host_fxrstor:
192%ifdef RT_ARCH_AMD64
193 o64 fxrstor [pXState] ; Use explicit REX prefix. See @bugref{6398}.
194%else
195 fxrstor [pXState]
196%endif
197
198%%host_done:
199%endmacro ; CPUMR0_LOAD_HOST
200
201
202
203;; Macro for FXSAVE for the guest FPU but tries to figure out whether to
204; save the 32-bit FPU state or 64-bit FPU state.
205;
206; @param %1 Pointer to CPUMCPU.
207; @param %2 Pointer to XState.
208; @param %3 Force AMD64
209; @uses xAX, xDX, EFLAGS, 20h of stack.
210;
211%macro SAVE_32_OR_64_FPU 3
212%if CPUMR0_IS_AMD64 || %3
213 ; Save the guest FPU (32-bit or 64-bit), preserves existing broken state. See @bugref{7138}.
214 test dword [pCpumCpu + CPUMCPU.fUseFlags], CPUM_USE_SUPPORTS_LONGMODE
215 jnz short %%save_long_mode_guest
216%endif
217 fxsave [pXState]
218%if CPUMR0_IS_AMD64 || %3
219 jmp %%save_done_32bit_cs_ds
220
221%%save_long_mode_guest:
222 o64 fxsave [pXState]
223
224 xor edx, edx
225 cmp dword [pXState + CS_OFF_IN_X86FXSTATE], 0
226 jne short %%save_done
227
228 sub rsp, 20h ; Only need 1ch bytes but keep stack aligned otherwise we #GP(0).
229 fnstenv [rsp]
230 movzx eax, word [rsp + 10h]
231 mov [pXState + CS_OFF_IN_X86FXSTATE], eax
232 movzx eax, word [rsp + 18h]
233 add rsp, 20h
234 mov [pXState + DS_OFF_IN_X86FXSTATE], eax
235%endif
236%%save_done_32bit_cs_ds:
237 mov edx, X86_FXSTATE_RSVD_32BIT_MAGIC
238%%save_done:
239 mov dword [pXState + X86_OFF_FXSTATE_RSVD], edx
240%endmacro ; SAVE_32_OR_64_FPU
241
242
243;;
244; Save the guest state.
245;
246; @uses rax, rdx
247; @param pCpumCpu Define for the register containing the CPUMCPU pointer.
248; @param pXState Define for the register containing the extended state pointer.
249;
250%macro CPUMR0_SAVE_GUEST 0
251 ;
252 ; Load a couple of registers we'll use later in all branches.
253 ;
254 mov pXState, [pCpumCpu + CPUMCPU.Guest.pXStateR0]
255 mov eax, [pCpumCpu + CPUMCPU.Guest.fXStateMask]
256
257 ;
258 ; XSAVE or FXSAVE?
259 ;
260 or eax, eax
261 jz %%guest_fxsave
262
263 ; XSAVE
264 mov edx, [pCpumCpu + CPUMCPU.Guest.fXStateMask + 4]
265%ifdef VBOX_WITH_KERNEL_USING_XMM
266 and eax, ~CPUM_VOLATILE_XSAVE_GUEST_COMPONENTS ; Already saved in HMR0A.asm.
267%endif
268%ifdef RT_ARCH_AMD64
269 o64 xsave [pXState]
270%else
271 xsave [pXState]
272%endif
273 jmp %%guest_done
274
275 ; FXSAVE
276%%guest_fxsave:
277 SAVE_32_OR_64_FPU pCpumCpu, pXState, 0
278
279%%guest_done:
280%endmacro ; CPUMR0_SAVE_GUEST
281
282
283;;
284; Wrapper for selecting 32-bit or 64-bit FXRSTOR according to what SAVE_32_OR_64_FPU did.
285;
286; @param %1 Pointer to CPUMCPU.
287; @param %2 Pointer to XState.
288; @param %3 Force AMD64.
289; @uses xAX, xDX, EFLAGS
290;
291%macro RESTORE_32_OR_64_FPU 3
292%if CPUMR0_IS_AMD64 || %3
293 ; Restore the guest FPU (32-bit or 64-bit), preserves existing broken state. See @bugref{7138}.
294 test dword [pCpumCpu + CPUMCPU.fUseFlags], CPUM_USE_SUPPORTS_LONGMODE
295 jz %%restore_32bit_fpu
296 cmp dword [pXState + X86_OFF_FXSTATE_RSVD], X86_FXSTATE_RSVD_32BIT_MAGIC
297 jne short %%restore_64bit_fpu
298%%restore_32bit_fpu:
299%endif
300 fxrstor [pXState]
301%if CPUMR0_IS_AMD64 || %3
302 ; TODO: Restore XMM8-XMM15!
303 jmp short %%restore_fpu_done
304%%restore_64bit_fpu:
305 o64 fxrstor [pXState]
306%%restore_fpu_done:
307%endif
308%endmacro ; RESTORE_32_OR_64_FPU
309
310
311;;
312; Loads the guest state.
313;
314; @uses rax, rdx
315; @param pCpumCpu Define for the register containing the CPUMCPU pointer.
316; @param pXState Define for the register containing the extended state pointer.
317;
318%macro CPUMR0_LOAD_GUEST 0
319 ;
320 ; Load a couple of registers we'll use later in all branches.
321 ;
322 mov pXState, [pCpumCpu + CPUMCPU.Guest.pXStateR0]
323 mov eax, [pCpumCpu + CPUMCPU.Guest.fXStateMask]
324
325 ;
326 ; XRSTOR or FXRSTOR?
327 ;
328 or eax, eax
329 jz %%guest_fxrstor
330
331 ; XRSTOR
332 mov edx, [pCpumCpu + CPUMCPU.Guest.fXStateMask + 4]
333%ifdef VBOX_WITH_KERNEL_USING_XMM
334 and eax, ~CPUM_VOLATILE_XSAVE_GUEST_COMPONENTS ; Will be loaded by HMR0A.asm.
335%endif
336%ifdef RT_ARCH_AMD64
337 o64 xrstor [pXState]
338%else
339 xrstor [pXState]
340%endif
341 jmp %%guest_done
342
343 ; FXRSTOR
344%%guest_fxrstor:
345 RESTORE_32_OR_64_FPU pCpumCpu, pXState, 0
346
347%%guest_done:
348%endmacro ; CPUMR0_LOAD_GUEST
349
350
351;;
352; Saves the host FPU/SSE/AVX state and restores the guest FPU/SSE/AVX state.
353;
354; @param pCpumCpu x86:[ebp+8] gcc:rdi msc:rcx CPUMCPU pointer
355;
356align 16
357BEGINPROC cpumR0SaveHostRestoreGuestFPUState
358 push xBP
359 SEH64_PUSH_xBP
360 mov xBP, xSP
361 SEH64_SET_FRAME_xBP 0
362SEH64_END_PROLOGUE
363
364 ;
365 ; Prologue - xAX+xDX must be free for XSAVE/XRSTOR input.
366 ;
367%ifdef RT_ARCH_AMD64
368 %ifdef RT_OS_WINDOWS
369 mov r11, rcx
370 %else
371 mov r11, rdi
372 %endif
373 %define pCpumCpu r11
374 %define pXState r10
375%else
376 push ebx
377 push esi
378 mov ebx, dword [ebp + 8]
379 %define pCpumCpu ebx
380 %define pXState esi
381%endif
382
383 pushf ; The darwin kernel can get upset or upset things if an
384 cli ; interrupt occurs while we're doing fxsave/fxrstor/cr0.
385
386%ifdef VBOX_WITH_KERNEL_USING_XMM
387 movaps xmm0, xmm0 ; Make 100% sure it's used before we save it or mess with CR0/XCR0.
388%endif
389 SAVE_CR0_CLEAR_FPU_TRAPS xCX, xAX ; xCX is now old CR0 value, don't use!
390
391 ;
392 ; Save the host state.
393 ;
394 test dword [pCpumCpu + CPUMCPU.fUseFlags], CPUM_USED_FPU_HOST
395 jnz .already_saved_host
396 CPUMR0_SAVE_HOST
397%ifdef VBOX_WITH_KERNEL_USING_XMM
398 jmp .load_guest
399%endif
400.already_saved_host:
401%ifdef VBOX_WITH_KERNEL_USING_XMM
402 ; If we didn't save the host state, we must save the non-volatile XMM registers.
403 mov pXState, [pCpumCpu + CPUMCPU.Host.pXStateR0]
404 movdqa [pXState + XMM_OFF_IN_X86FXSTATE + 060h], xmm6
405 movdqa [pXState + XMM_OFF_IN_X86FXSTATE + 070h], xmm7
406 movdqa [pXState + XMM_OFF_IN_X86FXSTATE + 080h], xmm8
407 movdqa [pXState + XMM_OFF_IN_X86FXSTATE + 090h], xmm9
408 movdqa [pXState + XMM_OFF_IN_X86FXSTATE + 0a0h], xmm10
409 movdqa [pXState + XMM_OFF_IN_X86FXSTATE + 0b0h], xmm11
410 movdqa [pXState + XMM_OFF_IN_X86FXSTATE + 0c0h], xmm12
411 movdqa [pXState + XMM_OFF_IN_X86FXSTATE + 0d0h], xmm13
412 movdqa [pXState + XMM_OFF_IN_X86FXSTATE + 0e0h], xmm14
413 movdqa [pXState + XMM_OFF_IN_X86FXSTATE + 0f0h], xmm15
414
415 ;
416 ; Load the guest state.
417 ;
418.load_guest:
419%endif
420 CPUMR0_LOAD_GUEST
421
422%ifdef VBOX_WITH_KERNEL_USING_XMM
423 ; Restore the non-volatile xmm registers. ASSUMING 64-bit host.
424 mov pXState, [pCpumCpu + CPUMCPU.Host.pXStateR0]
425 movdqa xmm6, [pXState + XMM_OFF_IN_X86FXSTATE + 060h]
426 movdqa xmm7, [pXState + XMM_OFF_IN_X86FXSTATE + 070h]
427 movdqa xmm8, [pXState + XMM_OFF_IN_X86FXSTATE + 080h]
428 movdqa xmm9, [pXState + XMM_OFF_IN_X86FXSTATE + 090h]
429 movdqa xmm10, [pXState + XMM_OFF_IN_X86FXSTATE + 0a0h]
430 movdqa xmm11, [pXState + XMM_OFF_IN_X86FXSTATE + 0b0h]
431 movdqa xmm12, [pXState + XMM_OFF_IN_X86FXSTATE + 0c0h]
432 movdqa xmm13, [pXState + XMM_OFF_IN_X86FXSTATE + 0d0h]
433 movdqa xmm14, [pXState + XMM_OFF_IN_X86FXSTATE + 0e0h]
434 movdqa xmm15, [pXState + XMM_OFF_IN_X86FXSTATE + 0f0h]
435%endif
436
437 ;; @todo Save CR0 + XCR0 bits related to FPU, SSE and AVX*, leaving these register sets accessible to IEM.
438 RESTORE_CR0 xCX
439 or dword [pCpumCpu + CPUMCPU.fUseFlags], (CPUM_USED_FPU_GUEST | CPUM_USED_FPU_SINCE_REM | CPUM_USED_FPU_HOST)
440 popf
441
442%ifdef RT_ARCH_X86
443 pop esi
444 pop ebx
445%endif
446 leave
447 ret
448ENDPROC cpumR0SaveHostRestoreGuestFPUState
449
450
451;;
452; Saves the host FPU/SSE/AVX state.
453;
454; @returns VINF_SUCCESS (0) in EAX
455; @param pCpumCpu x86:[ebp+8] gcc:rdi msc:rcx CPUMCPU pointer
456;
457align 16
458BEGINPROC cpumR0SaveHostFPUState
459 push xBP
460 SEH64_PUSH_xBP
461 mov xBP, xSP
462 SEH64_SET_FRAME_xBP 0
463SEH64_END_PROLOGUE
464
465 ;
466 ; Prologue - xAX+xDX must be free for XSAVE/XRSTOR input.
467 ;
468%ifdef RT_ARCH_AMD64
469 %ifdef RT_OS_WINDOWS
470 mov r11, rcx
471 %else
472 mov r11, rdi
473 %endif
474 %define pCpumCpu r11
475 %define pXState r10
476%else
477 push ebx
478 push esi
479 mov ebx, dword [ebp + 8]
480 %define pCpumCpu ebx
481 %define pXState esi
482%endif
483
484 pushf ; The darwin kernel can get upset or upset things if an
485 cli ; interrupt occurs while we're doing fxsave/fxrstor/cr0.
486%ifdef VBOX_WITH_KERNEL_USING_XMM
487 movaps xmm0, xmm0 ; Make 100% sure it's used before we save it or mess with CR0/XCR0.
488%endif
489 SAVE_CR0_CLEAR_FPU_TRAPS xCX, xAX ; xCX is now old CR0 value, don't use!
490
491 CPUMR0_SAVE_HOST
492 ;; @todo Save CR0 + XCR0 bits related to FPU, SSE and AVX*, leaving these register sets accessible to IEM.
493
494 RESTORE_CR0 xCX
495 or dword [pCpumCpu + CPUMCPU.fUseFlags], (CPUM_USED_FPU_HOST | CPUM_USED_FPU_SINCE_REM) ; Latter is not necessarily true, but normally yes.
496 popf
497
498%ifdef RT_ARCH_X86
499 pop esi
500 pop ebx
501%endif
502 leave
503 ret
504%undef pCpumCpu
505%undef pXState
506ENDPROC cpumR0SaveHostFPUState
507
508
509;;
510; Saves the guest FPU/SSE/AVX state and restores the host FPU/SSE/AVX state.
511;
512; @param pCpumCpu x86:[ebp+8] gcc:rdi msc:rcx CPUMCPU pointer
513;
514align 16
515BEGINPROC cpumR0SaveGuestRestoreHostFPUState
516 push xBP
517 SEH64_PUSH_xBP
518 mov xBP, xSP
519 SEH64_SET_FRAME_xBP 0
520SEH64_END_PROLOGUE
521
522 ;
523 ; Prologue - xAX+xDX must be free for XSAVE/XRSTOR input.
524 ;
525%ifdef RT_ARCH_AMD64
526 %ifdef RT_OS_WINDOWS
527 mov r11, rcx
528 %else
529 mov r11, rdi
530 %endif
531 %define pCpumCpu r11
532 %define pXState r10
533%else
534 push ebx
535 push esi
536 mov ebx, dword [ebp + 8]
537 %define pCpumCpu ebx
538 %define pXState esi
539%endif
540 pushf ; The darwin kernel can get upset or upset things if an
541 cli ; interrupt occurs while we're doing fxsave/fxrstor/cr0.
542 SAVE_CR0_CLEAR_FPU_TRAPS xCX, xAX ; xCX is now old CR0 value, don't use!
543
544
545 %ifdef VBOX_WITH_KERNEL_USING_XMM
546 ;
547 ; Copy non-volatile XMM registers to the host state so we can use
548 ; them while saving the guest state (we've gotta do this anyway).
549 ;
550 mov pXState, [pCpumCpu + CPUMCPU.Host.pXStateR0]
551 movdqa [pXState + XMM_OFF_IN_X86FXSTATE + 060h], xmm6
552 movdqa [pXState + XMM_OFF_IN_X86FXSTATE + 070h], xmm7
553 movdqa [pXState + XMM_OFF_IN_X86FXSTATE + 080h], xmm8
554 movdqa [pXState + XMM_OFF_IN_X86FXSTATE + 090h], xmm9
555 movdqa [pXState + XMM_OFF_IN_X86FXSTATE + 0a0h], xmm10
556 movdqa [pXState + XMM_OFF_IN_X86FXSTATE + 0b0h], xmm11
557 movdqa [pXState + XMM_OFF_IN_X86FXSTATE + 0c0h], xmm12
558 movdqa [pXState + XMM_OFF_IN_X86FXSTATE + 0d0h], xmm13
559 movdqa [pXState + XMM_OFF_IN_X86FXSTATE + 0e0h], xmm14
560 movdqa [pXState + XMM_OFF_IN_X86FXSTATE + 0f0h], xmm15
561 %endif
562
563 ;
564 ; Save the guest state if necessary.
565 ;
566 test dword [pCpumCpu + CPUMCPU.fUseFlags], CPUM_USED_FPU_GUEST
567 jz .load_only_host
568
569 %ifdef VBOX_WITH_KERNEL_USING_XMM
570 ; Load the guest XMM register values we already saved in HMR0VMXStartVMWrapXMM.
571 mov pXState, [pCpumCpu + CPUMCPU.Guest.pXStateR0]
572 movdqa xmm0, [pXState + XMM_OFF_IN_X86FXSTATE + 000h]
573 movdqa xmm1, [pXState + XMM_OFF_IN_X86FXSTATE + 010h]
574 movdqa xmm2, [pXState + XMM_OFF_IN_X86FXSTATE + 020h]
575 movdqa xmm3, [pXState + XMM_OFF_IN_X86FXSTATE + 030h]
576 movdqa xmm4, [pXState + XMM_OFF_IN_X86FXSTATE + 040h]
577 movdqa xmm5, [pXState + XMM_OFF_IN_X86FXSTATE + 050h]
578 movdqa xmm6, [pXState + XMM_OFF_IN_X86FXSTATE + 060h]
579 movdqa xmm7, [pXState + XMM_OFF_IN_X86FXSTATE + 070h]
580 movdqa xmm8, [pXState + XMM_OFF_IN_X86FXSTATE + 080h]
581 movdqa xmm9, [pXState + XMM_OFF_IN_X86FXSTATE + 090h]
582 movdqa xmm10, [pXState + XMM_OFF_IN_X86FXSTATE + 0a0h]
583 movdqa xmm11, [pXState + XMM_OFF_IN_X86FXSTATE + 0b0h]
584 movdqa xmm12, [pXState + XMM_OFF_IN_X86FXSTATE + 0c0h]
585 movdqa xmm13, [pXState + XMM_OFF_IN_X86FXSTATE + 0d0h]
586 movdqa xmm14, [pXState + XMM_OFF_IN_X86FXSTATE + 0e0h]
587 movdqa xmm15, [pXState + XMM_OFF_IN_X86FXSTATE + 0f0h]
588 %endif
589 CPUMR0_SAVE_GUEST
590
591 ;
592 ; Load the host state.
593 ;
594.load_only_host:
595 CPUMR0_LOAD_HOST
596
597 ;; @todo Restore CR0 + XCR0 bits related to FPU, SSE and AVX* (for IEM).
598 RESTORE_CR0 xCX
599 and dword [pCpumCpu + CPUMCPU.fUseFlags], ~(CPUM_USED_FPU_GUEST | CPUM_USED_FPU_HOST)
600
601 popf
602%ifdef RT_ARCH_X86
603 pop esi
604 pop ebx
605%endif
606 leave
607 ret
608%undef pCpumCpu
609%undef pXState
610ENDPROC cpumR0SaveGuestRestoreHostFPUState
611
612
613%if ARCH_BITS == 32
614 %ifdef VBOX_WITH_64_BITS_GUESTS
615;;
616; Restores the host's FPU/SSE/AVX state from pCpumCpu->Host.
617;
618; @param pCpumCpu x86:[ebp+8] gcc:rdi msc:rcx CPUMCPU pointer
619;
620align 16
621BEGINPROC cpumR0RestoreHostFPUState
622 ;
623 ; Prologue - xAX+xDX must be free for XSAVE/XRSTOR input.
624 ;
625 push ebp
626 mov ebp, esp
627 push ebx
628 push esi
629 mov ebx, dword [ebp + 8]
630 %define pCpumCpu ebx
631 %define pXState esi
632
633 ;
634 ; Restore host CPU state.
635 ;
636 pushf ; The darwin kernel can get upset or upset things if an
637 cli ; interrupt occurs while we're doing fxsave/fxrstor/cr0.
638 SAVE_CR0_CLEAR_FPU_TRAPS xCX, xAX ; xCX is now old CR0 value, don't use!
639
640 CPUMR0_LOAD_HOST
641
642 RESTORE_CR0 xCX
643 and dword [pCpumCpu + CPUMCPU.fUseFlags], ~CPUM_USED_FPU_HOST
644 popf
645
646 pop esi
647 pop ebx
648 leave
649 ret
650 %undef pCpumCPu
651 %undef pXState
652ENDPROC cpumR0RestoreHostFPUState
653 %endif ; VBOX_WITH_64_BITS_GUESTS
654%endif ; ARCH_BITS == 32
655
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette