Changeset 49019 in vbox for trunk/src/VBox/VMM/VMMR0
- Timestamp:
- Oct 10, 2013 8:45:11 AM (12 years ago)
- svn:sync-xref-src-repo-rev:
- 89768
- Location:
- trunk/src/VBox/VMM/VMMR0
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/CPUMR0.cpp
r48683 r49019 117 117 * for the guest which do not exist on this CPU. We have seen systems where the 118 118 * X86_CPUID_FEATURE_ECX_MONITOR feature flag is only set on some host CPUs, see 119 * @ {bugref5436}.119 * @bugref{5436}. 120 120 * 121 121 * @note This function might be called simultaneously on more than one CPU! … … 235 235 uint32_t cExt = 0; 236 236 ASMCpuId(0x80000000, &cExt, &u32Dummy, &u32Dummy, &u32Dummy); 237 if ( cExt >= 0x80000001 238 && cExt <= 0x8000ffff) 237 if (ASMIsValidExtRange(cExt)) 239 238 { 240 239 uint32_t fExtFeaturesEDX = ASMCpuId_EDX(0x80000001); … … 280 279 281 280 /** 282 * Lazily sync the guest-FPU/XMM state if possible. 283 * 284 * Loads the guest-FPU state, if it isn't already loaded, into the CPU if the 285 * guest is not expecting a #NM trap. 281 * Trap handler for device-not-available fault (#NM). 282 * Device not available, FP or (F)WAIT instruction. 286 283 * 287 284 * @returns VBox status code. … … 289 286 * @retval VINF_EM_RAW_GUEST_TRAP if it is a guest trap. 290 287 * 291 * @remarks This relies on CPUMIsGuestFPUStateActive() reflecting reality. 292 */ 293 VMMR0_INT_DECL(int) CPUMR0LoadGuestFPU(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) 288 * @param pVM Pointer to the VM. 289 * @param pVCpu Pointer to the VMCPU. 290 * @param pCtx Pointer to the guest-CPU context. 291 */ 292 VMMR0_INT_DECL(int) CPUMR0Trap07Handler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) 294 293 { 295 294 Assert(pVM->cpum.s.CPUFeatures.edx.u1FXSR); … … 299 298 if (CPUMIsGuestFPUStateActive(pVCpu)) 300 299 { 301 Assert( ((pCtx->cr0 & (X86_CR0_MP | X86_CR0_EM | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_TS | X86_CR0_EM))302 || ((pCtx->cr0 & (X86_CR0_MP | X86_CR0_EM | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_TS )));300 Assert( ((pCtx->cr0 & (X86_CR0_MP | X86_CR0_EM | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_TS)) 301 || ((pCtx->cr0 & (X86_CR0_MP | X86_CR0_EM | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_TS | X86_CR0_EM))); 303 302 return VINF_EM_RAW_GUEST_TRAP; 304 303 } … … 338 337 } 339 338 339 return CPUMR0LoadGuestFPU(pVM, pVCpu, pCtx); 340 } 341 342 343 /** 344 * Saves the host-FPU/XMM state and loads the guest-FPU state into the CPU. 345 * 346 * @returns VBox status code. 347 * 348 * @param pVM Pointer to the VM. 349 * @param pVCpu Pointer to the VMCPU. 350 * @param pCtx Pointer to the guest-CPU context. 351 */ 352 VMMR0_INT_DECL(int) CPUMR0LoadGuestFPU(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) 353 { 354 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 340 355 #if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 341 356 if (CPUMIsGuestInLongModeEx(pCtx)) … … 346 361 cpumR0SaveHostFPUState(&pVCpu->cpum.s); 347 362 348 /* Restore the state on entry as we need to be in 64 bitsmode to access the full state. */363 /* Restore the state on entry as we need to be in 64-bit mode to access the full state. */ 349 364 pVCpu->cpum.s.fUseFlags |= CPUM_SYNC_FPU_STATE; 350 365 } … … 352 367 #endif 353 368 { 354 #ifndef CPUM_CAN_HANDLE_NM_TRAPS_IN_KERNEL_MODE355 # if defined(VBOX_WITH_HYBRID_32BIT_KERNEL) || defined(VBOX_WITH_KERNEL_USING_XMM) /** @todo remove the #else here and move cpumHandleLazyFPUAsm back to VMMGC after branching out 3.0!!. */356 369 Assert(!(pVCpu->cpum.s.fUseFlags & CPUM_USED_MANUAL_XMM_RESTORE)); 357 370 /** @todo Move the FFXR handling down into 358 * cpumR0SaveHostRestore guestFPUState to optimize the371 * cpumR0SaveHostRestoreGuestFPUState to optimize the 359 372 * VBOX_WITH_KERNEL_USING_XMM handling. */ 360 373 /* Clear MSR_K6_EFER_FFXSR or else we'll be unable to save/restore the XMM state with fxsave/fxrstor. */ 361 uint64_t SavedEFER = 0; 374 uint64_t uHostEfer = 0; 375 bool fRestoreEfer = false; 362 376 if (pVM->cpum.s.CPUFeaturesExt.edx & X86_CPUID_AMD_FEATURE_EDX_FFXSR) 363 377 { 364 SavedEFER= ASMRdMsr(MSR_K6_EFER);365 if ( SavedEFER& MSR_K6_EFER_FFXSR)378 uHostEfer = ASMRdMsr(MSR_K6_EFER); 379 if (uHostEfer & MSR_K6_EFER_FFXSR) 366 380 { 367 ASMWrMsr(MSR_K6_EFER, SavedEFER& ~MSR_K6_EFER_FFXSR);381 ASMWrMsr(MSR_K6_EFER, uHostEfer & ~MSR_K6_EFER_FFXSR); 368 382 pVCpu->cpum.s.fUseFlags |= CPUM_USED_MANUAL_XMM_RESTORE; 383 fRestoreEfer = true; 369 384 } 370 385 } … … 374 389 375 390 /* Restore EFER. */ 376 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_MANUAL_XMM_RESTORE) 377 ASMWrMsr(MSR_K6_EFER, SavedEFER); 378 379 # else 380 uint64_t oldMsrEFERHost = 0; 381 uint32_t oldCR0 = ASMGetCR0(); 382 383 /* Clear MSR_K6_EFER_FFXSR or else we'll be unable to save/restore the XMM state with fxsave/fxrstor. */ 384 if (pVM->cpum.s.CPUFeaturesExt.edx & X86_CPUID_AMD_FEATURE_EDX_FFXSR) 385 { 386 /** @todo Do we really need to read this every time?? The host could change this on the fly though. 387 * bird: what about starting by skipping the ASMWrMsr below if we didn't 388 * change anything? Ditto for the stuff in CPUMR0SaveGuestFPU. */ 389 oldMsrEFERHost = ASMRdMsr(MSR_K6_EFER); 390 if (oldMsrEFERHost & MSR_K6_EFER_FFXSR) 391 { 392 ASMWrMsr(MSR_K6_EFER, oldMsrEFERHost & ~MSR_K6_EFER_FFXSR); 393 pVCpu->cpum.s.fUseFlags |= CPUM_USED_MANUAL_XMM_RESTORE; 394 } 395 } 396 397 /* If we sync the FPU/XMM state on-demand, then we can continue execution as if nothing has happened. */ 398 int rc = CPUMHandleLazyFPU(pVCpu); 399 AssertRC(rc); 400 Assert(CPUMIsGuestFPUStateActive(pVCpu)); 401 402 /* Restore EFER MSR */ 403 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_MANUAL_XMM_RESTORE) 404 ASMWrMsr(MSR_K6_EFER, oldMsrEFERHost); 405 406 /* CPUMHandleLazyFPU could have changed CR0; restore it. */ 407 ASMSetCR0(oldCR0); 408 # endif 409 410 #else /* CPUM_CAN_HANDLE_NM_TRAPS_IN_KERNEL_MODE */ 411 412 /* 413 * Save the FPU control word and MXCSR, so we can restore the state properly afterwards. 414 * We don't want the guest to be able to trigger floating point/SSE exceptions on the host. 415 */ 416 pVCpu->cpum.s.Host.fpu.FCW = CPUMGetFCW(); 417 if (pVM->cpum.s.CPUFeatures.edx.u1SSE) 418 pVCpu->cpum.s.Host.fpu.MXCSR = CPUMGetMXCSR(); 419 420 cpumR0LoadFPU(pCtx); 421 422 /* 423 * The MSR_K6_EFER_FFXSR feature is AMD only so far, but check the cpuid just in case Intel adds it in the future. 424 * 425 * MSR_K6_EFER_FFXSR changes the behaviour of fxsave and fxrstore: the XMM state isn't saved/restored 426 */ 427 if (pVM->cpum.s.CPUFeaturesExt.edx & X86_CPUID_AMD_FEATURE_EDX_FFXSR) 428 { 429 /** @todo Do we really need to read this every time?? The host could change this on the fly though. */ 430 uint64_t msrEFERHost = ASMRdMsr(MSR_K6_EFER); 431 432 if (msrEFERHost & MSR_K6_EFER_FFXSR) 433 { 434 /* fxrstor doesn't restore the XMM state! */ 435 cpumR0LoadXMM(pCtx); 436 pVCpu->cpum.s.fUseFlags |= CPUM_USED_MANUAL_XMM_RESTORE; 437 } 438 } 439 440 #endif /* CPUM_CAN_HANDLE_NM_TRAPS_IN_KERNEL_MODE */ 391 if (fRestoreEfer) 392 ASMWrMsr(MSR_K6_EFER, uHostEfer); 441 393 } 442 394 … … 474 426 #endif 475 427 { 476 #ifndef CPUM_CAN_HANDLE_NM_TRAPS_IN_KERNEL_MODE 477 # ifdef VBOX_WITH_KERNEL_USING_XMM 428 #ifdef VBOX_WITH_KERNEL_USING_XMM 478 429 /* 479 430 * We've already saved the XMM registers in the assembly wrapper, so … … 486 437 uint128_t aGuestXmmRegs[16]; 487 438 memcpy(&aGuestXmmRegs[0], &pVCpu->cpum.s.Guest.fpu.aXMM[0], sizeof(aGuestXmmRegs)); 488 # 439 #endif 489 440 490 441 /* Clear MSR_K6_EFER_FFXSR or else we'll be unable to save/restore the XMM state with fxsave/fxrstor. */ 491 uint64_t oldMsrEFERHost= 0;492 bool fRestoreEfer = false;442 uint64_t uHostEfer = 0; 443 bool fRestoreEfer = false; 493 444 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_MANUAL_XMM_RESTORE) 494 445 { 495 oldMsrEFERHost= ASMRdMsr(MSR_K6_EFER);496 if ( oldMsrEFERHost& MSR_K6_EFER_FFXSR)446 uHostEfer = ASMRdMsr(MSR_K6_EFER); 447 if (uHostEfer & MSR_K6_EFER_FFXSR) 497 448 { 498 ASMWrMsr(MSR_K6_EFER, oldMsrEFERHost& ~MSR_K6_EFER_FFXSR);449 ASMWrMsr(MSR_K6_EFER, uHostEfer & ~MSR_K6_EFER_FFXSR); 499 450 fRestoreEfer = true; 500 451 } 501 452 } 453 502 454 cpumR0SaveGuestRestoreHostFPUState(&pVCpu->cpum.s); 503 455 504 456 /* Restore EFER MSR */ 505 457 if (fRestoreEfer) 506 ASMWrMsr(MSR_K6_EFER, oldMsrEFERHost| MSR_K6_EFER_FFXSR);507 508 # 458 ASMWrMsr(MSR_K6_EFER, uHostEfer | MSR_K6_EFER_FFXSR); 459 460 #ifdef VBOX_WITH_KERNEL_USING_XMM 509 461 memcpy(&pVCpu->cpum.s.Guest.fpu.aXMM[0], &aGuestXmmRegs[0], sizeof(aGuestXmmRegs)); 510 # endif 511 512 #else /* CPUM_CAN_HANDLE_NM_TRAPS_IN_KERNEL_MODE */ 513 # ifdef VBOX_WITH_KERNEL_USING_XMM 514 # error "Fix all the NM_TRAPS_IN_KERNEL_MODE code path. I'm not going to fix unused code now." 515 # endif 516 cpumR0SaveFPU(pCtx); 517 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_MANUAL_XMM_RESTORE) 518 { 519 /* fxsave doesn't save the XMM state! */ 520 cpumR0SaveXMM(pCtx); 521 } 522 523 /* 524 * Restore the original FPU control word and MXCSR. 525 * We don't want the guest to be able to trigger floating point/SSE exceptions on the host. 526 */ 527 cpumR0SetFCW(pVCpu->cpum.s.Host.fpu.FCW); 528 if (pVM->cpum.s.CPUFeatures.edx.u1SSE) 529 cpumR0SetMXCSR(pVCpu->cpum.s.Host.fpu.MXCSR); 530 #endif /* CPUM_CAN_HANDLE_NM_TRAPS_IN_KERNEL_MODE */ 462 #endif 531 463 } 532 464 … … 591 523 VMMR0_INT_DECL(bool) CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(PVMCPU pVCpu, bool fDr6) 592 524 { 525 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 593 526 bool const fDrXLoaded = RT_BOOL(pVCpu->cpum.s.fUseFlags & (CPUM_USED_DEBUG_REGS_GUEST | CPUM_USED_DEBUG_REGS_HYPER)); 594 527 -
trunk/src/VBox/VMM/VMMR0/CPUMR0A.asm
r48567 r49019 67 67 BEGINCODE 68 68 69 ;; Macro for FXSAVE/FXRSTOR leaky behaviour on AMD CPUs, see cpumR3CheckLeakyFpu(). 70 ; Cleans the FPU state, if necessary, before restoring the FPU. 71 ; 72 ; This macro ASSUMES CR0.TS is not set! 73 ; @remarks Trashes xAX!! 74 ; Changes here should also be reflected in CPUMAllA.asm's copy! 75 %macro CLEANFPU 0 76 test dword [xDX + CPUMCPU.fUseFlags], CPUM_USE_FFXSR_LEAKY 77 jz .nothing_to_clean 78 79 xor eax, eax 80 fnstsw ax ; Get FSW 81 test eax, RT_BIT(7) ; If FSW.ES (bit 7) is set, clear it to not cause FPU exceptions 82 ; while clearing & loading the FPU bits in 'clean_fpu' 83 jz .clean_fpu 84 fnclex 85 86 .clean_fpu: 87 ffree st7 ; Clear FPU stack register(7)'s tag entry to prevent overflow if a wraparound occurs 88 ; for the upcoming push (load) 89 fild dword [xDX + CPUMCPU.Guest.fpu] ; Explicit FPU load to overwrite FIP, FOP, FDP registers in the FPU. 90 91 .nothing_to_clean: 92 %endmacro 93 94 ;; Macro to save and modify CR0 (if necessary) before touching the FPU state 95 ; so as to not cause any FPU exceptions. 96 ; 97 ; @remarks Uses xCX for backing-up CR0 (if CR0 needs to be modified) otherwise clears xCX. 98 ; @remarks Trashes xAX. 99 %macro SAVE_CR0_CLEAR_FPU_TRAPS 0 100 xor ecx, ecx 101 mov xAX, cr0 102 test eax, X86_CR0_TS | X86_CR0_EM ; Make sure its safe to access the FPU state. 103 jz %%skip_cr0_write 104 mov xCX, xAX ; Save old CR0 105 and xAX, ~(X86_CR0_TS | X86_CR0_EM) 106 mov cr0, xAX 107 %%skip_cr0_write: 108 %endmacro 109 110 ;; Macro to restore CR0 from xCX if necessary. 111 ; 112 ; @remarks xCX should contain the CR0 value to restore or 0 if no restoration is needed. 113 %macro RESTORE_CR0 0 114 cmp ecx, 0 115 je %%skip_cr0_restore 116 mov cr0, xCX 117 %%skip_cr0_restore: 118 %endmacro 69 119 70 120 ;; … … 91 141 or dword [xDX + CPUMCPU.fUseFlags], (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM) 92 142 93 mov xAX, cr0 ; Make sure its safe to access the FPU state. 94 mov xCX, xAX ; save old CR0 95 and xAX, ~(X86_CR0_TS | X86_CR0_EM) 96 mov cr0, xAX ;; @todo optimize this. 143 ; Clear CR0 FPU bits to not cause exceptions, uses xCX 144 SAVE_CR0_CLEAR_FPU_TRAPS 145 ; Do NOT use xCX from this point! 97 146 98 147 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL … … 129 178 130 179 .done: 131 mov cr0, xCX ; and restore old CR0 again ;; @todo optimize this. 180 ; Restore CR0 from xCX if it was previously saved. 181 RESTORE_CR0 132 182 popf 133 183 xor eax, eax … … 167 217 or dword [xDX + CPUMCPU.fUseFlags], (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM) 168 218 169 mov xAX, cr0 ; Make sure its safe to access the FPU state. 170 mov xCX, xAX ; save old CR0 171 and xAX, ~(X86_CR0_TS | X86_CR0_EM) 172 mov cr0, xAX ;; @todo optimize this. 219 ; Clear CR0 FPU bits to not cause exceptions, uses xCX 220 SAVE_CR0_CLEAR_FPU_TRAPS 221 ; Do NOT use xCX from this point! 173 222 174 223 fxsave [xDX + CPUMCPU.Host.fpu] ; ASSUMES that all VT-x/AMD-V boxes support fxsave/fxrstor (safe assumption) 175 224 176 mov cr0, xCX ; and restore old CR0 again ;; @todo optimize this. 225 ; Restore CR0 from xCX if it was saved previously. 226 RESTORE_CR0 227 177 228 popf 178 229 xor eax, eax … … 210 261 cli ; interrupt occurs while we're doing fxsave/fxrstor/cr0. 211 262 212 mov xAX, cr0 ; Make sure it's safe to access the FPU state. 213 mov xCX, xAX ; save old CR0 214 and xAX, ~(X86_CR0_TS | X86_CR0_EM) 215 mov cr0, xAX ;; @todo optimize this. 263 ; Clear CR0 FPU bits to not cause exceptions, uses xCX 264 SAVE_CR0_CLEAR_FPU_TRAPS 265 ; Do NOT use xCX from this point! 216 266 217 267 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL … … 233 283 234 284 .done: 235 mov cr0, xCX ; and restore old CR0 again ;; @todo optimize this. 285 ; Restore CR0 from xCX if it was previously saved. 286 RESTORE_CR0 236 287 and dword [xDX + CPUMCPU.fUseFlags], ~CPUM_USED_FPU 237 288 popf … … 281 332 cli ; interrupt occurs while we're doing fxsave/fxrstor/cr0. 282 333 283 mov xAX, cr0 284 mov xCX, xAX ; save old CR0 285 and xAX, ~(X86_CR0_TS | X86_CR0_EM) 286 mov cr0, xAX 334 ; Clear CR0 FPU bits to not cause exceptions, uses xCX 335 SAVE_CR0_CLEAR_FPU_TRAPS 336 ; Do NOT use xCX from this point! 287 337 288 338 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL … … 301 351 302 352 .done: 303 mov cr0, xCX ; and restore old CR0 again 353 ; Restore CR0 from xCX if it was previously saved. 354 RESTORE_CR0 304 355 and dword [xDX + CPUMCPU.fUseFlags], ~CPUM_USED_FPU 305 356 popf -
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r49003 r49019 4833 4833 Assert(!pSvmTransient->fWasGuestFPUStateActive); 4834 4834 #endif 4835 /* Lazy FPU loading; load the guest-FPU state transparently and continue execution of the guest. */ 4836 rc = CPUMR0LoadGuestFPU(pVCpu->CTX_SUFF(pVM), pVCpu, pCtx); 4835 rc = CPUMR0Trap07Handler(pVCpu->CTX_SUFF(pVM), pVCpu, pCtx); 4837 4836 Assert(rc == VINF_EM_RAW_GUEST_TRAP || (rc == VINF_SUCCESS && CPUMIsGuestFPUStateActive(pVCpu))); 4838 4837 } … … 4843 4842 if (rc == VINF_SUCCESS) 4844 4843 { 4844 /* Guest FPU state was activated, we'll want to change CR0 FPU intercepts before the next VM-reentry. */ 4845 4845 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_CR0); 4846 4846 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowNM); -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r49004 r49019 10472 10472 Assert(!pVmxTransient->fWasGuestFPUStateActive); 10473 10473 #endif 10474 /* Lazy FPU loading; load the guest-FPU state transparently and continue execution of the guest. */ 10475 rc = CPUMR0LoadGuestFPU(pVCpu->CTX_SUFF(pVM), pVCpu, pMixedCtx); 10474 rc = CPUMR0Trap07Handler(pVCpu->CTX_SUFF(pVM), pVCpu, pMixedCtx); 10476 10475 Assert(rc == VINF_EM_RAW_GUEST_TRAP || (rc == VINF_SUCCESS && CPUMIsGuestFPUStateActive(pVCpu))); 10477 10476 } … … 10482 10481 if (rc == VINF_SUCCESS) 10483 10482 { 10483 /* Guest FPU state was activated, we'll want to change CR0 FPU intercepts before the next VM-reentry. */ 10484 10484 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_CR0); 10485 10485 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowNM);
Note:
See TracChangeset
for help on using the changeset viewer.