Changeset 61068 in vbox for trunk/src/VBox/VMM
- Timestamp:
- May 20, 2016 1:24:53 AM (9 years ago)
- svn:sync-xref-src-repo-rev:
- 107335
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 2 added
- 6 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/Makefile.kmk
r60398 r61068 478 478 VMMRC/PATMRC.cpp \ 479 479 ,) \ 480 VMMRZ/CPUMRZ.cpp \ 481 VMMRZ/CPUMRZA.asm \ 480 482 VMMRZ/DBGFRZ.cpp \ 481 483 VMMRZ/PGMRZDynMap.cpp \ … … 594 596 VMMR0/TRPMR0A.asm \ 595 597 VMMR0/VMMR0.cpp \ 598 VMMRZ/CPUMRZ.cpp \ 599 VMMRZ/CPUMRZA.asm \ 596 600 VMMRZ/DBGFRZ.cpp \ 597 601 VMMRZ/VMMRZ.cpp \ -
trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
r61064 r61068 5240 5240 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_FPU_REM); 5241 5241 #else 5242 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_FPU_REM); 5243 /** @todo RZ: FIXME */ 5244 //# error "Implement me" 5242 CPUMRZFpuStatePrepareHostCpuForUse(IEMCPU_TO_VMCPU(pIemCpu)); 5245 5243 #endif 5246 5244 } … … 5272 5270 NOREF(pIemCpu); 5273 5271 #else 5274 /** @todo RZ: FIXME */ 5275 //# error "Implement me" 5272 CPUMRZFpuStateActualizeForRead(IEMCPU_TO_VMCPU(pIemCpu)); 5276 5273 #endif 5277 5274 } … … 5288 5285 { 5289 5286 #ifdef IN_RING3 5290 NOREF(pIemCpu);5287 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_FPU_REM); 5291 5288 #else 5292 //CPUMRZFpuActualizeForChange(IEMCPU_TO_VMCPU(pIemCpu)); 5293 iemFpuPrepareUsage(pIemCpu); 5289 CPUMRZFpuStateActualizeForChange(IEMCPU_TO_VMCPU(pIemCpu)); 5294 5290 #endif 5295 5291 } … … 5308 5304 NOREF(pIemCpu); 5309 5305 #else 5310 iemFpuActualizeStateForRead(pIemCpu);5306 CPUMRZFpuStateActualizeSseForRead(IEMCPU_TO_VMCPU(pIemCpu)); 5311 5307 #endif 5312 5308 } … … 5325 5321 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_FPU_REM); 5326 5322 #else 5327 iemFpuActualizeStateForChange(pIemCpu);5323 CPUMRZFpuStateActualizeForChange(IEMCPU_TO_VMCPU(pIemCpu)); 5328 5324 #endif 5329 5325 } -
trunk/src/VBox/VMM/VMMR0/CPUMR0.cpp
r61061 r61068 404 404 /* Save the host state if necessary. */ 405 405 if (!(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_HOST)) 406 cpumR 0SaveHostFPUState(&pVCpu->cpum.s);406 cpumRZSaveHostFPUState(&pVCpu->cpum.s); 407 407 408 408 /* Restore the state on entry as we need to be in 64-bit mode to access the full state. */ -
trunk/src/VBox/VMM/VMMR0/CPUMR0A.asm
r61058 r61068 37 37 ; Use define because I'm too lazy to convert the struct. 38 38 %define XMM_OFF_IN_X86FXSTATE 160 39 %define IP_OFF_IN_X86FXSTATE 08h40 %define CS_OFF_IN_X86FXSTATE 0ch41 %define DS_OFF_IN_X86FXSTATE 14h42 43 ;; For numeric expressions44 %ifdef RT_ARCH_AMD6445 %define CPUMR0_IS_AMD64 146 %else47 %define CPUMR0_IS_AMD64 048 %endif49 39 50 40 … … 52 42 BEGINCODE 53 43 54 %if 0 ; Currently not used anywhere.55 ;;56 ; Macro for FXSAVE/FXRSTOR leaky behaviour on AMD CPUs, see cpumR3CheckLeakyFpu().57 ;58 ; Cleans the FPU state, if necessary, before restoring the FPU.59 ;60 ; This macro ASSUMES CR0.TS is not set!61 ;62 ; @param xDX Pointer to CPUMCPU.63 ; @uses xAX, EFLAGS64 ;65 ; Changes here should also be reflected in CPUMRCA.asm's copy!66 ;67 %macro CLEANFPU 068 test dword [xDX + CPUMCPU.fUseFlags], CPUM_USE_FFXSR_LEAKY69 jz .nothing_to_clean70 71 xor eax, eax72 fnstsw ax ; FSW -> AX.73 test eax, RT_BIT(7) ; If FSW.ES (bit 7) is set, clear it to not cause FPU exceptions74 ; while clearing & loading the FPU bits in 'clean_fpu' below.75 jz .clean_fpu76 fnclex77 78 .clean_fpu:79 ffree st7 ; Clear FPU stack register(7)'s tag entry to prevent overflow if a wraparound occurs.80 ; for the upcoming push (load)81 fild dword [g_r32_Zero xWrtRIP] ; Explicit FPU load to overwrite FIP, FOP, FDP registers in the FPU.82 .nothing_to_clean:83 %endmacro84 %endif ; Unused.85 86 87 ;;88 ; Clears CR0.TS and CR0.EM if necessary, saving the previous result.89 ;90 ; This is used to avoid FPU exceptions when touching the FPU state.91 ;92 ; @param %1 Register to save the old CR0 in (pass to RESTORE_CR0).93 ; @param %2 Temporary scratch register.94 ; @uses EFLAGS, CR095 ;96 %macro SAVE_CR0_CLEAR_FPU_TRAPS 297 xor %1, %198 mov %2, cr099 test %2, X86_CR0_TS | X86_CR0_EM ; Make sure its safe to access the FPU state.100 jz %%skip_cr0_write101 mov %1, %2 ; Save old CR0102 and %2, ~(X86_CR0_TS | X86_CR0_EM)103 mov cr0, %2104 %%skip_cr0_write:105 %endmacro106 107 ;;108 ; Restore CR0.TS and CR0.EM state if SAVE_CR0_CLEAR_FPU_TRAPS change it.109 ;110 ; @param %1 The register that SAVE_CR0_CLEAR_FPU_TRAPS saved the old CR0 in.111 ;112 %macro RESTORE_CR0 1113 cmp %1, 0114 je %%skip_cr0_restore115 mov cr0, %1116 %%skip_cr0_restore:117 %endmacro118 119 120 ;;121 ; Saves the host state.122 ;123 ; @uses rax, rdx124 ; @param pCpumCpu Define for the register containing the CPUMCPU pointer.125 ; @param pXState Define for the register containing the extended state pointer.126 ;127 %macro CPUMR0_SAVE_HOST 0128 ;129 ; Load a couple of registers we'll use later in all branches.130 ;131 mov pXState, [pCpumCpu + CPUMCPU.Host.pXStateR0]132 mov eax, [pCpumCpu + CPUMCPU.Host.fXStateMask]133 134 ;135 ; XSAVE or FXSAVE?136 ;137 or eax, eax138 jz %%host_fxsave139 140 ; XSAVE141 mov edx, [pCpumCpu + CPUMCPU.Host.fXStateMask + 4]142 %ifdef RT_ARCH_AMD64143 o64 xsave [pXState]144 %else145 xsave [pXState]146 %endif147 jmp %%host_done148 149 ; FXSAVE150 %%host_fxsave:151 %ifdef RT_ARCH_AMD64152 o64 fxsave [pXState] ; Use explicit REX prefix. See @bugref{6398}.153 %else154 fxsave [pXState]155 %endif156 157 %%host_done:158 %endmacro ; CPUMR0_SAVE_HOST159 160 161 ;;162 ; Loads the host state.163 ;164 ; @uses rax, rdx165 ; @param pCpumCpu Define for the register containing the CPUMCPU pointer.166 ; @param pXState Define for the register containing the extended state pointer.167 ;168 %macro CPUMR0_LOAD_HOST 0169 ;170 ; Load a couple of registers we'll use later in all branches.171 ;172 mov pXState, [pCpumCpu + CPUMCPU.Host.pXStateR0]173 mov eax, [pCpumCpu + CPUMCPU.Host.fXStateMask]174 175 ;176 ; XRSTOR or FXRSTOR?177 ;178 or eax, eax179 jz %%host_fxrstor180 181 ; XRSTOR182 mov edx, [pCpumCpu + CPUMCPU.Host.fXStateMask + 4]183 %ifdef RT_ARCH_AMD64184 o64 xrstor [pXState]185 %else186 xrstor [pXState]187 %endif188 jmp %%host_done189 190 ; FXRSTOR191 %%host_fxrstor:192 %ifdef RT_ARCH_AMD64193 o64 fxrstor [pXState] ; Use explicit REX prefix. See @bugref{6398}.194 %else195 fxrstor [pXState]196 %endif197 198 %%host_done:199 %endmacro ; CPUMR0_LOAD_HOST200 201 202 203 ;; Macro for FXSAVE for the guest FPU but tries to figure out whether to204 ; save the 32-bit FPU state or 64-bit FPU state.205 ;206 ; @param %1 Pointer to CPUMCPU.207 ; @param %2 Pointer to XState.208 ; @param %3 Force AMD64209 ; @uses xAX, xDX, EFLAGS, 20h of stack.210 ;211 %macro SAVE_32_OR_64_FPU 3212 %if CPUMR0_IS_AMD64 || %3213 ; Save the guest FPU (32-bit or 64-bit), preserves existing broken state. See @bugref{7138}.214 test dword [pCpumCpu + CPUMCPU.fUseFlags], CPUM_USE_SUPPORTS_LONGMODE215 jnz short %%save_long_mode_guest216 %endif217 fxsave [pXState]218 %if CPUMR0_IS_AMD64 || %3219 jmp %%save_done_32bit_cs_ds220 221 %%save_long_mode_guest:222 o64 fxsave [pXState]223 224 xor edx, edx225 cmp dword [pXState + CS_OFF_IN_X86FXSTATE], 0226 jne short %%save_done227 228 sub rsp, 20h ; Only need 1ch bytes but keep stack aligned otherwise we #GP(0).229 fnstenv [rsp]230 movzx eax, word [rsp + 10h]231 mov [pXState + CS_OFF_IN_X86FXSTATE], eax232 movzx eax, word [rsp + 18h]233 add rsp, 20h234 mov [pXState + DS_OFF_IN_X86FXSTATE], eax235 %endif236 %%save_done_32bit_cs_ds:237 mov edx, X86_FXSTATE_RSVD_32BIT_MAGIC238 %%save_done:239 mov dword [pXState + X86_OFF_FXSTATE_RSVD], edx240 %endmacro ; SAVE_32_OR_64_FPU241 242 243 ;;244 ; Save the guest state.245 ;246 ; @uses rax, rdx247 ; @param pCpumCpu Define for the register containing the CPUMCPU pointer.248 ; @param pXState Define for the register containing the extended state pointer.249 ;250 %macro CPUMR0_SAVE_GUEST 0251 ;252 ; Load a couple of registers we'll use later in all branches.253 ;254 mov pXState, [pCpumCpu + CPUMCPU.Guest.pXStateR0]255 mov eax, [pCpumCpu + CPUMCPU.Guest.fXStateMask]256 257 ;258 ; XSAVE or FXSAVE?259 ;260 or eax, eax261 jz %%guest_fxsave262 263 ; XSAVE264 mov edx, [pCpumCpu + CPUMCPU.Guest.fXStateMask + 4]265 %ifdef VBOX_WITH_KERNEL_USING_XMM266 and eax, ~CPUM_VOLATILE_XSAVE_GUEST_COMPONENTS ; Already saved in HMR0A.asm.267 %endif268 %ifdef RT_ARCH_AMD64269 o64 xsave [pXState]270 %else271 xsave [pXState]272 %endif273 jmp %%guest_done274 275 ; FXSAVE276 %%guest_fxsave:277 SAVE_32_OR_64_FPU pCpumCpu, pXState, 0278 279 %%guest_done:280 %endmacro ; CPUMR0_SAVE_GUEST281 282 283 ;;284 ; Wrapper for selecting 32-bit or 64-bit FXRSTOR according to what SAVE_32_OR_64_FPU did.285 ;286 ; @param %1 Pointer to CPUMCPU.287 ; @param %2 Pointer to XState.288 ; @param %3 Force AMD64.289 ; @uses xAX, xDX, EFLAGS290 ;291 %macro RESTORE_32_OR_64_FPU 3292 %if CPUMR0_IS_AMD64 || %3293 ; Restore the guest FPU (32-bit or 64-bit), preserves existing broken state. See @bugref{7138}.294 test dword [pCpumCpu + CPUMCPU.fUseFlags], CPUM_USE_SUPPORTS_LONGMODE295 jz %%restore_32bit_fpu296 cmp dword [pXState + X86_OFF_FXSTATE_RSVD], X86_FXSTATE_RSVD_32BIT_MAGIC297 jne short %%restore_64bit_fpu298 %%restore_32bit_fpu:299 %endif300 fxrstor [pXState]301 %if CPUMR0_IS_AMD64 || %3302 ; TODO: Restore XMM8-XMM15!303 jmp short %%restore_fpu_done304 %%restore_64bit_fpu:305 o64 fxrstor [pXState]306 %%restore_fpu_done:307 %endif308 %endmacro ; RESTORE_32_OR_64_FPU309 310 311 ;;312 ; Loads the guest state.313 ;314 ; @uses rax, rdx315 ; @param pCpumCpu Define for the register containing the CPUMCPU pointer.316 ; @param pXState Define for the register containing the extended state pointer.317 ;318 %macro CPUMR0_LOAD_GUEST 0319 ;320 ; Load a couple of registers we'll use later in all branches.321 ;322 mov pXState, [pCpumCpu + CPUMCPU.Guest.pXStateR0]323 mov eax, [pCpumCpu + CPUMCPU.Guest.fXStateMask]324 325 ;326 ; XRSTOR or FXRSTOR?327 ;328 or eax, eax329 jz %%guest_fxrstor330 331 ; XRSTOR332 mov edx, [pCpumCpu + CPUMCPU.Guest.fXStateMask + 4]333 %ifdef VBOX_WITH_KERNEL_USING_XMM334 and eax, ~CPUM_VOLATILE_XSAVE_GUEST_COMPONENTS ; Will be loaded by HMR0A.asm.335 %endif336 %ifdef RT_ARCH_AMD64337 o64 xrstor [pXState]338 %else339 xrstor [pXState]340 %endif341 jmp %%guest_done342 343 ; FXRSTOR344 %%guest_fxrstor:345 RESTORE_32_OR_64_FPU pCpumCpu, pXState, 0346 347 %%guest_done:348 %endmacro ; CPUMR0_LOAD_GUEST349 44 350 45 … … 402 97 ; If we didn't save the host state, we must save the non-volatile XMM registers. 403 98 mov pXState, [pCpumCpu + CPUMCPU.Host.pXStateR0] 404 movdqa [pXState + X MM_OFF_IN_X86FXSTATE + 060h], xmm6405 movdqa [pXState + X MM_OFF_IN_X86FXSTATE + 070h], xmm7406 movdqa [pXState + X MM_OFF_IN_X86FXSTATE + 080h], xmm8407 movdqa [pXState + X MM_OFF_IN_X86FXSTATE + 090h], xmm9408 movdqa [pXState + X MM_OFF_IN_X86FXSTATE + 0a0h], xmm10409 movdqa [pXState + X MM_OFF_IN_X86FXSTATE + 0b0h], xmm11410 movdqa [pXState + X MM_OFF_IN_X86FXSTATE + 0c0h], xmm12411 movdqa [pXState + X MM_OFF_IN_X86FXSTATE + 0d0h], xmm13412 movdqa [pXState + X MM_OFF_IN_X86FXSTATE + 0e0h], xmm14413 movdqa [pXState + X MM_OFF_IN_X86FXSTATE + 0f0h], xmm1599 movdqa [pXState + X86FXSTATE.xmm6 ], xmm6 100 movdqa [pXState + X86FXSTATE.xmm7 ], xmm7 101 movdqa [pXState + X86FXSTATE.xmm8 ], xmm8 102 movdqa [pXState + X86FXSTATE.xmm9 ], xmm9 103 movdqa [pXState + X86FXSTATE.xmm10], xmm10 104 movdqa [pXState + X86FXSTATE.xmm11], xmm11 105 movdqa [pXState + X86FXSTATE.xmm12], xmm12 106 movdqa [pXState + X86FXSTATE.xmm13], xmm13 107 movdqa [pXState + X86FXSTATE.xmm14], xmm14 108 movdqa [pXState + X86FXSTATE.xmm15], xmm15 414 109 415 110 ; … … 423 118 ; Restore the non-volatile xmm registers. ASSUMING 64-bit host. 424 119 mov pXState, [pCpumCpu + CPUMCPU.Host.pXStateR0] 425 movdqa xmm6, [pXState + X MM_OFF_IN_X86FXSTATE + 060h]426 movdqa xmm7, [pXState + X MM_OFF_IN_X86FXSTATE + 070h]427 movdqa xmm8, [pXState + X MM_OFF_IN_X86FXSTATE + 080h]428 movdqa xmm9, [pXState + X MM_OFF_IN_X86FXSTATE + 090h]429 movdqa xmm10, [pXState + X MM_OFF_IN_X86FXSTATE + 0a0h]430 movdqa xmm11, [pXState + X MM_OFF_IN_X86FXSTATE + 0b0h]431 movdqa xmm12, [pXState + X MM_OFF_IN_X86FXSTATE + 0c0h]432 movdqa xmm13, [pXState + X MM_OFF_IN_X86FXSTATE + 0d0h]433 movdqa xmm14, [pXState + X MM_OFF_IN_X86FXSTATE + 0e0h]434 movdqa xmm15, [pXState + X MM_OFF_IN_X86FXSTATE + 0f0h]120 movdqa xmm6, [pXState + X86FXSTATE.xmm6] 121 movdqa xmm7, [pXState + X86FXSTATE.xmm7] 122 movdqa xmm8, [pXState + X86FXSTATE.xmm8] 123 movdqa xmm9, [pXState + X86FXSTATE.xmm9] 124 movdqa xmm10, [pXState + X86FXSTATE.xmm10] 125 movdqa xmm11, [pXState + X86FXSTATE.xmm11] 126 movdqa xmm12, [pXState + X86FXSTATE.xmm12] 127 movdqa xmm13, [pXState + X86FXSTATE.xmm13] 128 movdqa xmm14, [pXState + X86FXSTATE.xmm14] 129 movdqa xmm15, [pXState + X86FXSTATE.xmm15] 435 130 %endif 436 131 … … 447 142 ret 448 143 ENDPROC cpumR0SaveHostRestoreGuestFPUState 449 450 451 ;;452 ; Saves the host FPU/SSE/AVX state.453 ;454 ; @returns VINF_SUCCESS (0) in EAX455 ; @param pCpumCpu x86:[ebp+8] gcc:rdi msc:rcx CPUMCPU pointer456 ;457 align 16458 BEGINPROC cpumR0SaveHostFPUState459 push xBP460 SEH64_PUSH_xBP461 mov xBP, xSP462 SEH64_SET_FRAME_xBP 0463 SEH64_END_PROLOGUE464 465 ;466 ; Prologue - xAX+xDX must be free for XSAVE/XRSTOR input.467 ;468 %ifdef RT_ARCH_AMD64469 %ifdef RT_OS_WINDOWS470 mov r11, rcx471 %else472 mov r11, rdi473 %endif474 %define pCpumCpu r11475 %define pXState r10476 %else477 push ebx478 push esi479 mov ebx, dword [ebp + 8]480 %define pCpumCpu ebx481 %define pXState esi482 %endif483 484 pushf ; The darwin kernel can get upset or upset things if an485 cli ; interrupt occurs while we're doing fxsave/fxrstor/cr0.486 %ifdef VBOX_WITH_KERNEL_USING_XMM487 movaps xmm0, xmm0 ; Make 100% sure it's used before we save it or mess with CR0/XCR0.488 %endif489 SAVE_CR0_CLEAR_FPU_TRAPS xCX, xAX ; xCX is now old CR0 value, don't use!490 491 CPUMR0_SAVE_HOST492 ;; @todo Save CR0 + XCR0 bits related to FPU, SSE and AVX*, leaving these register sets accessible to IEM.493 494 RESTORE_CR0 xCX495 or dword [pCpumCpu + CPUMCPU.fUseFlags], (CPUM_USED_FPU_HOST | CPUM_USED_FPU_SINCE_REM) ; Latter is not necessarily true, but normally yes.496 popf497 498 %ifdef RT_ARCH_X86499 pop esi500 pop ebx501 %endif502 leave503 ret504 %undef pCpumCpu505 %undef pXState506 ENDPROC cpumR0SaveHostFPUState507 144 508 145 … … 549 186 ; 550 187 mov pXState, [pCpumCpu + CPUMCPU.Host.pXStateR0] 551 movdqa [pXState + X MM_OFF_IN_X86FXSTATE + 060h], xmm6552 movdqa [pXState + X MM_OFF_IN_X86FXSTATE + 070h], xmm7553 movdqa [pXState + X MM_OFF_IN_X86FXSTATE + 080h], xmm8554 movdqa [pXState + X MM_OFF_IN_X86FXSTATE + 090h], xmm9555 movdqa [pXState + X MM_OFF_IN_X86FXSTATE + 0a0h], xmm10556 movdqa [pXState + X MM_OFF_IN_X86FXSTATE + 0b0h], xmm11557 movdqa [pXState + X MM_OFF_IN_X86FXSTATE + 0c0h], xmm12558 movdqa [pXState + X MM_OFF_IN_X86FXSTATE + 0d0h], xmm13559 movdqa [pXState + X MM_OFF_IN_X86FXSTATE + 0e0h], xmm14560 movdqa [pXState + X MM_OFF_IN_X86FXSTATE + 0f0h], xmm15188 movdqa [pXState + X86FXSTATE.xmm6], xmm6 189 movdqa [pXState + X86FXSTATE.xmm7], xmm7 190 movdqa [pXState + X86FXSTATE.xmm8], xmm8 191 movdqa [pXState + X86FXSTATE.xmm9], xmm9 192 movdqa [pXState + X86FXSTATE.xmm10], xmm10 193 movdqa [pXState + X86FXSTATE.xmm11], xmm11 194 movdqa [pXState + X86FXSTATE.xmm12], xmm12 195 movdqa [pXState + X86FXSTATE.xmm13], xmm13 196 movdqa [pXState + X86FXSTATE.xmm14], xmm14 197 movdqa [pXState + X86FXSTATE.xmm15], xmm15 561 198 %endif 562 199 … … 570 207 ; Load the guest XMM register values we already saved in HMR0VMXStartVMWrapXMM. 571 208 mov pXState, [pCpumCpu + CPUMCPU.Guest.pXStateR0] 572 movdqa xmm0, [pXState + X MM_OFF_IN_X86FXSTATE + 000h]573 movdqa xmm1, [pXState + X MM_OFF_IN_X86FXSTATE + 010h]574 movdqa xmm2, [pXState + X MM_OFF_IN_X86FXSTATE + 020h]575 movdqa xmm3, [pXState + X MM_OFF_IN_X86FXSTATE + 030h]576 movdqa xmm4, [pXState + X MM_OFF_IN_X86FXSTATE + 040h]577 movdqa xmm5, [pXState + X MM_OFF_IN_X86FXSTATE + 050h]578 movdqa xmm6, [pXState + X MM_OFF_IN_X86FXSTATE + 060h]579 movdqa xmm7, [pXState + X MM_OFF_IN_X86FXSTATE + 070h]580 movdqa xmm8, [pXState + X MM_OFF_IN_X86FXSTATE + 080h]581 movdqa xmm9, [pXState + X MM_OFF_IN_X86FXSTATE + 090h]582 movdqa xmm10, [pXState + X MM_OFF_IN_X86FXSTATE + 0a0h]583 movdqa xmm11, [pXState + X MM_OFF_IN_X86FXSTATE + 0b0h]584 movdqa xmm12, [pXState + X MM_OFF_IN_X86FXSTATE + 0c0h]585 movdqa xmm13, [pXState + X MM_OFF_IN_X86FXSTATE + 0d0h]586 movdqa xmm14, [pXState + X MM_OFF_IN_X86FXSTATE + 0e0h]587 movdqa xmm15, [pXState + X MM_OFF_IN_X86FXSTATE + 0f0h]209 movdqa xmm0, [pXState + X86FXSTATE.xmm0] 210 movdqa xmm1, [pXState + X86FXSTATE.xmm1] 211 movdqa xmm2, [pXState + X86FXSTATE.xmm2] 212 movdqa xmm3, [pXState + X86FXSTATE.xmm3] 213 movdqa xmm4, [pXState + X86FXSTATE.xmm4] 214 movdqa xmm5, [pXState + X86FXSTATE.xmm5] 215 movdqa xmm6, [pXState + X86FXSTATE.xmm6] 216 movdqa xmm7, [pXState + X86FXSTATE.xmm7] 217 movdqa xmm8, [pXState + X86FXSTATE.xmm8] 218 movdqa xmm9, [pXState + X86FXSTATE.xmm9] 219 movdqa xmm10, [pXState + X86FXSTATE.xmm10] 220 movdqa xmm11, [pXState + X86FXSTATE.xmm11] 221 movdqa xmm12, [pXState + X86FXSTATE.xmm12] 222 movdqa xmm13, [pXState + X86FXSTATE.xmm13] 223 movdqa xmm14, [pXState + X86FXSTATE.xmm14] 224 movdqa xmm15, [pXState + X86FXSTATE.xmm15] 588 225 %endif 589 226 CPUMR0_SAVE_GUEST -
trunk/src/VBox/VMM/include/CPUMInternal.h
r61058 r61068 539 539 DECLASM(void) cpumR0SaveHostRestoreGuestFPUState(PCPUMCPU pCPUM); 540 540 DECLASM(void) cpumR0SaveGuestRestoreHostFPUState(PCPUMCPU pCPUM); 541 DECLASM(void) cpumR0SaveHostFPUState(PCPUMCPU pCPUM);542 541 # if ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) 543 542 DECLASM(void) cpumR0RestoreHostFPUState(PCPUMCPU pCPUM); … … 545 544 # endif 546 545 546 # if defined(IN_RC) || defined(IN_RING0) 547 DECLASM(void) cpumRZSaveHostFPUState(PCPUMCPU pCPUM); 548 DECLASM(void) cpumRZSaveGuestFpuState(PCPUMCPU pCPUM); 549 DECLASM(void) cpumRZSaveGuestSseRegisters(PCPUMCPU pCPUM); 550 # endif 551 547 552 RT_C_DECLS_END 548 553 #endif /* !VBOX_FOR_DTRACE_LIB */ -
trunk/src/VBox/VMM/include/CPUMInternal.mac
r61058 r61068 18 18 %include "VBox/asmdefs.mac" 19 19 %include "VBox/vmm/cpum.mac" 20 21 22 ;; For numeric expressions 23 %ifdef RT_ARCH_AMD64 24 %define CPUM_IS_AMD64 1 25 %else 26 %define CPUM_IS_AMD64 0 27 %endif 28 20 29 21 30 ;; … … 505 514 %endmacro 506 515 516 517 518 %if 0 ; Currently not used anywhere. 519 ;; 520 ; Macro for FXSAVE/FXRSTOR leaky behaviour on AMD CPUs, see cpumR3CheckLeakyFpu(). 521 ; 522 ; Cleans the FPU state, if necessary, before restoring the FPU. 523 ; 524 ; This macro ASSUMES CR0.TS is not set! 525 ; 526 ; @param xDX Pointer to CPUMCPU. 527 ; @uses xAX, EFLAGS 528 ; 529 ; Changes here should also be reflected in CPUMRCA.asm's copy! 530 ; 531 %macro CLEANFPU 0 532 test dword [xDX + CPUMCPU.fUseFlags], CPUM_USE_FFXSR_LEAKY 533 jz .nothing_to_clean 534 535 xor eax, eax 536 fnstsw ax ; FSW -> AX. 537 test eax, RT_BIT(7) ; If FSW.ES (bit 7) is set, clear it to not cause FPU exceptions 538 ; while clearing & loading the FPU bits in 'clean_fpu' below. 539 jz .clean_fpu 540 fnclex 541 542 .clean_fpu: 543 ffree st7 ; Clear FPU stack register(7)'s tag entry to prevent overflow if a wraparound occurs. 544 ; for the upcoming push (load) 545 fild dword [g_r32_Zero xWrtRIP] ; Explicit FPU load to overwrite FIP, FOP, FDP registers in the FPU. 546 .nothing_to_clean: 547 %endmacro 548 %endif ; Unused. 549 550 551 ;; 552 ; Clears CR0.TS and CR0.EM if necessary, saving the previous result. 553 ; 554 ; This is used to avoid FPU exceptions when touching the FPU state. 555 ; 556 ; @param %1 Register to save the old CR0 in (pass to RESTORE_CR0). 557 ; @param %2 Temporary scratch register. 558 ; @uses EFLAGS, CR0 559 ; 560 %macro SAVE_CR0_CLEAR_FPU_TRAPS 2 561 xor %1, %1 562 mov %2, cr0 563 test %2, X86_CR0_TS | X86_CR0_EM ; Make sure its safe to access the FPU state. 564 jz %%skip_cr0_write 565 mov %1, %2 ; Save old CR0 566 and %2, ~(X86_CR0_TS | X86_CR0_EM) 567 mov cr0, %2 568 %%skip_cr0_write: 569 %endmacro 570 571 ;; 572 ; Restore CR0.TS and CR0.EM state if SAVE_CR0_CLEAR_FPU_TRAPS change it. 573 ; 574 ; @param %1 The register that SAVE_CR0_CLEAR_FPU_TRAPS saved the old CR0 in. 575 ; 576 %macro RESTORE_CR0 1 577 cmp %1, 0 578 je %%skip_cr0_restore 579 mov cr0, %1 580 %%skip_cr0_restore: 581 %endmacro 582 583 584 ;; 585 ; Saves the host state. 586 ; 587 ; @uses rax, rdx 588 ; @param pCpumCpu Define for the register containing the CPUMCPU pointer. 589 ; @param pXState Define for the register containing the extended state pointer. 590 ; 591 %macro CPUMR0_SAVE_HOST 0 592 ; 593 ; Load a couple of registers we'll use later in all branches. 594 ; 595 mov pXState, [pCpumCpu + CPUMCPU.Host.pXStateR0] 596 mov eax, [pCpumCpu + CPUMCPU.Host.fXStateMask] 597 598 ; 599 ; XSAVE or FXSAVE? 600 ; 601 or eax, eax 602 jz %%host_fxsave 603 604 ; XSAVE 605 mov edx, [pCpumCpu + CPUMCPU.Host.fXStateMask + 4] 606 %ifdef RT_ARCH_AMD64 607 o64 xsave [pXState] 608 %else 609 xsave [pXState] 610 %endif 611 jmp %%host_done 612 613 ; FXSAVE 614 %%host_fxsave: 615 %ifdef RT_ARCH_AMD64 616 o64 fxsave [pXState] ; Use explicit REX prefix. See @bugref{6398}. 617 %else 618 fxsave [pXState] 619 %endif 620 621 %%host_done: 622 %endmacro ; CPUMR0_SAVE_HOST 623 624 625 ;; 626 ; Loads the host state. 627 ; 628 ; @uses rax, rdx 629 ; @param pCpumCpu Define for the register containing the CPUMCPU pointer. 630 ; @param pXState Define for the register containing the extended state pointer. 631 ; 632 %macro CPUMR0_LOAD_HOST 0 633 ; 634 ; Load a couple of registers we'll use later in all branches. 635 ; 636 mov pXState, [pCpumCpu + CPUMCPU.Host.pXStateR0] 637 mov eax, [pCpumCpu + CPUMCPU.Host.fXStateMask] 638 639 ; 640 ; XRSTOR or FXRSTOR? 641 ; 642 or eax, eax 643 jz %%host_fxrstor 644 645 ; XRSTOR 646 mov edx, [pCpumCpu + CPUMCPU.Host.fXStateMask + 4] 647 %ifdef RT_ARCH_AMD64 648 o64 xrstor [pXState] 649 %else 650 xrstor [pXState] 651 %endif 652 jmp %%host_done 653 654 ; FXRSTOR 655 %%host_fxrstor: 656 %ifdef RT_ARCH_AMD64 657 o64 fxrstor [pXState] ; Use explicit REX prefix. See @bugref{6398}. 658 %else 659 fxrstor [pXState] 660 %endif 661 662 %%host_done: 663 %endmacro ; CPUMR0_LOAD_HOST 664 665 666 667 ;; Macro for FXSAVE for the guest FPU but tries to figure out whether to 668 ; save the 32-bit FPU state or 64-bit FPU state. 669 ; 670 ; @param %1 Pointer to CPUMCPU. 671 ; @param %2 Pointer to XState. 672 ; @param %3 Force AMD64 673 ; @uses xAX, xDX, EFLAGS, 20h of stack. 674 ; 675 %macro SAVE_32_OR_64_FPU 3 676 %if CPUM_IS_AMD64 || %3 677 ; Save the guest FPU (32-bit or 64-bit), preserves existing broken state. See @bugref{7138}. 678 test dword [pCpumCpu + CPUMCPU.fUseFlags], CPUM_USE_SUPPORTS_LONGMODE 679 jnz short %%save_long_mode_guest 680 %endif 681 fxsave [pXState] 682 %if CPUM_IS_AMD64 || %3 683 jmp %%save_done_32bit_cs_ds 684 685 %%save_long_mode_guest: 686 o64 fxsave [pXState] 687 688 xor edx, edx 689 cmp dword [pXState + X86FXSTATE.FPUCS], 0 690 jne short %%save_done 691 692 sub rsp, 20h ; Only need 1ch bytes but keep stack aligned otherwise we #GP(0). 693 fnstenv [rsp] 694 movzx eax, word [rsp + 10h] 695 mov [pXState + X86FXSTATE.FPUCS], eax 696 movzx eax, word [rsp + 18h] 697 add rsp, 20h 698 mov [pXState + X86FXSTATE.FPUDS], eax 699 %endif 700 %%save_done_32bit_cs_ds: 701 mov edx, X86_FXSTATE_RSVD_32BIT_MAGIC 702 %%save_done: 703 mov dword [pXState + X86_OFF_FXSTATE_RSVD], edx 704 %endmacro ; SAVE_32_OR_64_FPU 705 706 707 ;; 708 ; Save the guest state. 709 ; 710 ; @uses rax, rdx 711 ; @param pCpumCpu Define for the register containing the CPUMCPU pointer. 712 ; @param pXState Define for the register containing the extended state pointer. 713 ; 714 %macro CPUMR0_SAVE_GUEST 0 715 ; 716 ; Load a couple of registers we'll use later in all branches. 717 ; 718 mov pXState, [pCpumCpu + CPUMCPU.Guest.pXStateR0] 719 mov eax, [pCpumCpu + CPUMCPU.Guest.fXStateMask] 720 721 ; 722 ; XSAVE or FXSAVE? 723 ; 724 or eax, eax 725 jz %%guest_fxsave 726 727 ; XSAVE 728 mov edx, [pCpumCpu + CPUMCPU.Guest.fXStateMask + 4] 729 %ifdef VBOX_WITH_KERNEL_USING_XMM 730 and eax, ~CPUM_VOLATILE_XSAVE_GUEST_COMPONENTS ; Already saved in HMR0A.asm. 731 %endif 732 %ifdef RT_ARCH_AMD64 733 o64 xsave [pXState] 734 %else 735 xsave [pXState] 736 %endif 737 jmp %%guest_done 738 739 ; FXSAVE 740 %%guest_fxsave: 741 SAVE_32_OR_64_FPU pCpumCpu, pXState, 0 742 743 %%guest_done: 744 %endmacro ; CPUMR0_SAVE_GUEST 745 746 747 ;; 748 ; Wrapper for selecting 32-bit or 64-bit FXRSTOR according to what SAVE_32_OR_64_FPU did. 749 ; 750 ; @param %1 Pointer to CPUMCPU. 751 ; @param %2 Pointer to XState. 752 ; @param %3 Force AMD64. 753 ; @uses xAX, xDX, EFLAGS 754 ; 755 %macro RESTORE_32_OR_64_FPU 3 756 %if CPUM_IS_AMD64 || %3 757 ; Restore the guest FPU (32-bit or 64-bit), preserves existing broken state. See @bugref{7138}. 758 test dword [pCpumCpu + CPUMCPU.fUseFlags], CPUM_USE_SUPPORTS_LONGMODE 759 jz %%restore_32bit_fpu 760 cmp dword [pXState + X86_OFF_FXSTATE_RSVD], X86_FXSTATE_RSVD_32BIT_MAGIC 761 jne short %%restore_64bit_fpu 762 %%restore_32bit_fpu: 763 %endif 764 fxrstor [pXState] 765 %if CPUM_IS_AMD64 || %3 766 ; TODO: Restore XMM8-XMM15! 767 jmp short %%restore_fpu_done 768 %%restore_64bit_fpu: 769 o64 fxrstor [pXState] 770 %%restore_fpu_done: 771 %endif 772 %endmacro ; RESTORE_32_OR_64_FPU 773 774 775 ;; 776 ; Loads the guest state. 777 ; 778 ; @uses rax, rdx 779 ; @param pCpumCpu Define for the register containing the CPUMCPU pointer. 780 ; @param pXState Define for the register containing the extended state pointer. 781 ; 782 %macro CPUMR0_LOAD_GUEST 0 783 ; 784 ; Load a couple of registers we'll use later in all branches. 785 ; 786 mov pXState, [pCpumCpu + CPUMCPU.Guest.pXStateR0] 787 mov eax, [pCpumCpu + CPUMCPU.Guest.fXStateMask] 788 789 ; 790 ; XRSTOR or FXRSTOR? 791 ; 792 or eax, eax 793 jz %%guest_fxrstor 794 795 ; XRSTOR 796 mov edx, [pCpumCpu + CPUMCPU.Guest.fXStateMask + 4] 797 %ifdef VBOX_WITH_KERNEL_USING_XMM 798 and eax, ~CPUM_VOLATILE_XSAVE_GUEST_COMPONENTS ; Will be loaded by HMR0A.asm. 799 %endif 800 %ifdef RT_ARCH_AMD64 801 o64 xrstor [pXState] 802 %else 803 xrstor [pXState] 804 %endif 805 jmp %%guest_done 806 807 ; FXRSTOR 808 %%guest_fxrstor: 809 RESTORE_32_OR_64_FPU pCpumCpu, pXState, 0 810 811 %%guest_done: 812 %endmacro ; CPUMR0_LOAD_GUEST 813
Note:
See TracChangeset
for help on using the changeset viewer.