Changeset 14859 in vbox for trunk/src/VBox
- Timestamp:
- Dec 1, 2008 2:01:55 PM (16 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 7 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/CPUMInternal.h
r14785 r14859 72 72 /** The XMM state was manually restored. (AMD only) */ 73 73 #define CPUM_MANUAL_XMM_RESTORE RT_BIT(6) 74 /** Sync the FPU state on entry (32->64 switcher only). */ 75 #define CPUM_SYNC_FPU_STATE RT_BIT(7) 76 /** Sync the debug state on entry (32->64 switcher only). */ 77 #define CPUM_SYNC_DEBUG_STATE RT_BIT(8) 74 78 /** @} */ 75 79 … … 374 378 375 379 DECLASM(int) CPUMHandleLazyFPUAsm(PCPUMCPU pCPUM); 380 DECLASM(int) CPUMSaveGuestRestoreHostFPUStateAsm(PCPUMCPU pCPUM); 376 381 DECLASM(int) CPUMRestoreHostFPUStateAsm(PCPUMCPU pCPUM); 377 382 DECLASM(void) CPUMLoadFPUAsm(PCPUMCTX pCtx); -
trunk/src/VBox/VMM/VMMAll/CPUMAllA.asm
r13960 r14859 209 209 ; 210 210 align 16 211 BEGINPROC CPUM RestoreHostFPUStateAsm211 BEGINPROC CPUMSaveGuestRestoreHostFPUStateAsm 212 212 %ifdef RT_ARCH_AMD64 213 213 %ifdef RT_OS_WINDOWS … … 238 238 xor eax, eax 239 239 ret 240 ENDPROC CPUMSaveGuestRestoreHostFPUStateAsm 241 242 ;; 243 ; Sets the host's FPU/XMM state 244 ; 245 ; @returns 0 246 ; @param pCPUMCPU x86:[esp+4] GCC:rdi MSC:rcx CPUMCPU pointer 247 ; 248 align 16 249 BEGINPROC CPUMRestoreHostFPUStateAsm 250 %ifdef RT_ARCH_AMD64 251 %ifdef RT_OS_WINDOWS 252 mov xDX, rcx 253 %else 254 mov xDX, rdi 255 %endif 256 %else 257 mov xDX, dword [esp + 4] 258 %endif 259 260 ; Restore FPU if guest has used it. 261 ; Using fxrstor should ensure that we're not causing unwanted exception on the host. 262 test dword [xDX + CPUMCPU.fUseFlags], CPUM_USED_FPU 263 jz short gth_fpu_no_2 264 265 mov xAX, cr0 266 mov xCX, xAX ; save old CR0 267 and xAX, ~(X86_CR0_TS | X86_CR0_EM) 268 mov cr0, xAX 269 270 fxrstor [xDX + CPUMCPU.Host.fpu] 271 272 mov cr0, xCX ; and restore old CR0 again 273 and dword [xDX + CPUMCPU.fUseFlags], ~CPUM_USED_FPU 274 gth_fpu_no_2: 275 xor eax, eax 276 ret 240 277 ENDPROC CPUMRestoreHostFPUStateAsm 241 242 278 243 279 ;; -
trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp
r14704 r14859 2084 2084 * @param pVCpu VMCPU handle 2085 2085 */ 2086 VMMDECL(int) CPUMSaveGuestRestoreHostFPUState(PVM pVM, PVMCPU pVCpu) 2087 { 2088 Assert(pVM->cpum.s.CPUFeatures.edx.u1FXSR); 2089 return CPUMSaveGuestRestoreHostFPUStateAsm(&pVCpu->cpum.s); 2090 } 2091 2092 /** 2093 * Set host FPU/XMM state 2094 * 2095 * @returns VBox status code. 2096 * @param pVM VM handle. 2097 * @param pVCpu VMCPU handle 2098 */ 2086 2099 VMMDECL(int) CPUMRestoreHostFPUState(PVM pVM, PVMCPU pVCpu) 2087 2100 { -
trunk/src/VBox/VMM/VMMR0/CPUMR0.cpp
r13960 r14859 167 167 } 168 168 169 #if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) 170 if (CPUMIsGuestInLongModeEx(pCtx)) 171 { 172 /* Restore the state on entry as we need to be in 64 bits mode to access the full state. */ 173 pVCpu->cpum.s.fUseFlags |= CPUM_SYNC_FPU_STATE; 174 } 175 else 176 #endif 177 { 169 178 #ifndef CPUM_CAN_HANDLE_NM_TRAPS_IN_KERNEL_MODE 170 uint64_t oldMsrEFERHost;171 uint32_t oldCR0 = ASMGetCR0();172 173 /* Clear MSR_K6_EFER_FFXSR or else we'll be unable to save/restore the XMM state with fxsave/fxrstor. */174 if (pVM->cpum.s.CPUFeaturesExt.edx & X86_CPUID_AMD_FEATURE_EDX_FFXSR)175 {176 /** @todo Do we really need to read this every time?? The host could change this on the fly though.177 * bird: what about starting by skipping the ASMWrMsr below if we didn't178 * change anything? Ditto for the stuff in CPUMR0SaveGuestFPU. */179 oldMsrEFERHost = ASMRdMsr(MSR_K6_EFER);180 if (oldMsrEFERHost & MSR_K6_EFER_FFXSR)181 {182 ASMWrMsr(MSR_K6_EFER, oldMsrEFERHost & ~MSR_K6_EFER_FFXSR);183 pVCpu->cpum.s.fUseFlags |= CPUM_MANUAL_XMM_RESTORE;184 }185 }186 187 /* If we sync the FPU/XMM state on-demand, then we can continue execution as if nothing has happened. */188 int rc = CPUMHandleLazyFPU(pVM, pVCpu);189 AssertRC(rc);190 Assert(CPUMIsGuestFPUStateActive(pVCpu));191 192 /* Restore EFER MSR */193 if (pVCpu->cpum.s.fUseFlags & CPUM_MANUAL_XMM_RESTORE)194 ASMWrMsr(MSR_K6_EFER, oldMsrEFERHost);195 196 /* CPUMHandleLazyFPU could have changed CR0; restore it. */197 ASMSetCR0(oldCR0);179 uint64_t oldMsrEFERHost; 180 uint32_t oldCR0 = ASMGetCR0(); 181 182 /* Clear MSR_K6_EFER_FFXSR or else we'll be unable to save/restore the XMM state with fxsave/fxrstor. */ 183 if (pVM->cpum.s.CPUFeaturesExt.edx & X86_CPUID_AMD_FEATURE_EDX_FFXSR) 184 { 185 /** @todo Do we really need to read this every time?? The host could change this on the fly though. 186 * bird: what about starting by skipping the ASMWrMsr below if we didn't 187 * change anything? Ditto for the stuff in CPUMR0SaveGuestFPU. */ 188 oldMsrEFERHost = ASMRdMsr(MSR_K6_EFER); 189 if (oldMsrEFERHost & MSR_K6_EFER_FFXSR) 190 { 191 ASMWrMsr(MSR_K6_EFER, oldMsrEFERHost & ~MSR_K6_EFER_FFXSR); 192 pVCpu->cpum.s.fUseFlags |= CPUM_MANUAL_XMM_RESTORE; 193 } 194 } 195 196 /* If we sync the FPU/XMM state on-demand, then we can continue execution as if nothing has happened. */ 197 int rc = CPUMHandleLazyFPU(pVM, pVCpu); 198 AssertRC(rc); 199 Assert(CPUMIsGuestFPUStateActive(pVCpu)); 200 201 /* Restore EFER MSR */ 202 if (pVCpu->cpum.s.fUseFlags & CPUM_MANUAL_XMM_RESTORE) 203 ASMWrMsr(MSR_K6_EFER, oldMsrEFERHost); 204 205 /* CPUMHandleLazyFPU could have changed CR0; restore it. */ 206 ASMSetCR0(oldCR0); 198 207 199 208 #else /* CPUM_CAN_HANDLE_NM_TRAPS_IN_KERNEL_MODE */ 200 209 201 /*202 * Save the FPU control word and MXCSR, so we can restore the state properly afterwards.203 * We don't want the guest to be able to trigger floating point/SSE exceptions on the host.204 */205 pVCpu->cpum.s.Host.fpu.FCW = CPUMGetFCW();206 if (pVM->cpum.s.CPUFeatures.edx.u1SSE)207 pVCpu->cpum.s.Host.fpu.MXCSR = CPUMGetMXCSR();208 209 CPUMLoadFPUAsm(pCtx);210 211 /*212 * The MSR_K6_EFER_FFXSR feature is AMD only so far, but check the cpuid just in case Intel adds it in the future.213 *214 * MSR_K6_EFER_FFXSR changes the behaviour of fxsave and fxrstore: the XMM state isn't saved/restored215 */216 if (pVM->cpum.s.CPUFeaturesExt.edx & X86_CPUID_AMD_FEATURE_EDX_FFXSR)217 {218 /** @todo Do we really need to read this every time?? The host could change this on the fly though. */219 uint64_t msrEFERHost = ASMRdMsr(MSR_K6_EFER);220 221 if (msrEFERHost & MSR_K6_EFER_FFXSR)222 {223 /* fxrstor doesn't restore the XMM state! */224 CPUMLoadXMMAsm(pCtx);225 pVCpu->cpum.s.fUseFlags |= CPUM_MANUAL_XMM_RESTORE;226 }227 }210 /* 211 * Save the FPU control word and MXCSR, so we can restore the state properly afterwards. 212 * We don't want the guest to be able to trigger floating point/SSE exceptions on the host. 213 */ 214 pVCpu->cpum.s.Host.fpu.FCW = CPUMGetFCW(); 215 if (pVM->cpum.s.CPUFeatures.edx.u1SSE) 216 pVCpu->cpum.s.Host.fpu.MXCSR = CPUMGetMXCSR(); 217 218 CPUMLoadFPUAsm(pCtx); 219 220 /* 221 * The MSR_K6_EFER_FFXSR feature is AMD only so far, but check the cpuid just in case Intel adds it in the future. 222 * 223 * MSR_K6_EFER_FFXSR changes the behaviour of fxsave and fxrstore: the XMM state isn't saved/restored 224 */ 225 if (pVM->cpum.s.CPUFeaturesExt.edx & X86_CPUID_AMD_FEATURE_EDX_FFXSR) 226 { 227 /** @todo Do we really need to read this every time?? The host could change this on the fly though. */ 228 uint64_t msrEFERHost = ASMRdMsr(MSR_K6_EFER); 229 230 if (msrEFERHost & MSR_K6_EFER_FFXSR) 231 { 232 /* fxrstor doesn't restore the XMM state! */ 233 CPUMLoadXMMAsm(pCtx); 234 pVCpu->cpum.s.fUseFlags |= CPUM_MANUAL_XMM_RESTORE; 235 } 236 } 228 237 #endif /* CPUM_CAN_HANDLE_NM_TRAPS_IN_KERNEL_MODE */ 238 } 229 239 230 240 pVCpu->cpum.s.fUseFlags |= CPUM_USED_FPU; … … 247 257 AssertReturn((pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU), VINF_SUCCESS); 248 258 259 #if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) 260 if (CPUMIsGuestInLongModeEx(pCtx)) 261 { 262 Assert(!(pVCpu->cpum.s.fUseFlags & CPUM_SYNC_FPU_STATE)); 263 HWACCMR0SaveFPUState(pVM, pVCpu, pCtx); 264 CPUMRestoreHostFPUState(pVCpu); 265 } 266 else 267 #endif 268 { 249 269 #ifndef CPUM_CAN_HANDLE_NM_TRAPS_IN_KERNEL_MODE 250 uint64_t oldMsrEFERHost;251 252 /* Clear MSR_K6_EFER_FFXSR or else we'll be unable to save/restore the XMM state with fxsave/fxrstor. */253 if (pVCpu->cpum.s.fUseFlags & CPUM_MANUAL_XMM_RESTORE)254 {255 oldMsrEFERHost = ASMRdMsr(MSR_K6_EFER);256 ASMWrMsr(MSR_K6_EFER, oldMsrEFERHost & ~MSR_K6_EFER_FFXSR);257 }258 CPUMRestoreHostFPUState(pVM, pVCpu);259 260 /* Restore EFER MSR */261 if (pVCpu->cpum.s.fUseFlags & CPUM_MANUAL_XMM_RESTORE)262 ASMWrMsr(MSR_K6_EFER, oldMsrEFERHost | MSR_K6_EFER_FFXSR);270 uint64_t oldMsrEFERHost; 271 272 /* Clear MSR_K6_EFER_FFXSR or else we'll be unable to save/restore the XMM state with fxsave/fxrstor. */ 273 if (pVCpu->cpum.s.fUseFlags & CPUM_MANUAL_XMM_RESTORE) 274 { 275 oldMsrEFERHost = ASMRdMsr(MSR_K6_EFER); 276 ASMWrMsr(MSR_K6_EFER, oldMsrEFERHost & ~MSR_K6_EFER_FFXSR); 277 } 278 CPUMSaveGuestRestoreHostFPUState(pVM, pVCpu); 279 280 /* Restore EFER MSR */ 281 if (pVCpu->cpum.s.fUseFlags & CPUM_MANUAL_XMM_RESTORE) 282 ASMWrMsr(MSR_K6_EFER, oldMsrEFERHost | MSR_K6_EFER_FFXSR); 263 283 264 284 #else /* CPUM_CAN_HANDLE_NM_TRAPS_IN_KERNEL_MODE */ 265 CPUMSaveFPUAsm(pCtx);266 if (pVCpu->cpum.s.fUseFlags & CPUM_MANUAL_XMM_RESTORE)267 {268 /* fxsave doesn't save the XMM state! */269 CPUMSaveXMMAsm(pCtx);270 }271 272 /*273 * Restore the original FPU control word and MXCSR.274 * We don't want the guest to be able to trigger floating point/SSE exceptions on the host.275 */276 CPUMSetFCW(pVCpu->cpum.s.Host.fpu.FCW);277 if (pVM->cpum.s.CPUFeatures.edx.u1SSE)278 CPUMSetMXCSR(pVCpu->cpum.s.Host.fpu.MXCSR);285 CPUMSaveFPUAsm(pCtx); 286 if (pVCpu->cpum.s.fUseFlags & CPUM_MANUAL_XMM_RESTORE) 287 { 288 /* fxsave doesn't save the XMM state! */ 289 CPUMSaveXMMAsm(pCtx); 290 } 291 292 /* 293 * Restore the original FPU control word and MXCSR. 294 * We don't want the guest to be able to trigger floating point/SSE exceptions on the host. 295 */ 296 CPUMSetFCW(pVCpu->cpum.s.Host.fpu.FCW); 297 if (pVM->cpum.s.CPUFeatures.edx.u1SSE) 298 CPUMSetMXCSR(pVCpu->cpum.s.Host.fpu.MXCSR); 279 299 #endif /* CPUM_CAN_HANDLE_NM_TRAPS_IN_KERNEL_MODE */ 300 } 280 301 281 302 pVCpu->cpum.s.fUseFlags &= ~(CPUM_USED_FPU | CPUM_MANUAL_XMM_RESTORE); … … 298 319 299 320 /* Save the guest's debug state. The caller is responsible for DR7. */ 300 pCtx->dr[0] = ASMGetDR0(); 301 pCtx->dr[1] = ASMGetDR1(); 302 pCtx->dr[2] = ASMGetDR2(); 303 pCtx->dr[3] = ASMGetDR3(); 304 if (fDR6) 305 pCtx->dr[6] = ASMGetDR6(); 321 #if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) 322 if (CPUMIsGuestInLongModeEx(pCtx)) 323 { 324 Assert(!(pVCpu->cpum.s.fUseFlags & CPUM_SYNC_DEBUG_STATE)); 325 HWACCMR0SaveDebugState(pVM, pVCpu, pCtx, fDR6); 326 } 327 else 328 #endif 329 { 330 pCtx->dr[0] = ASMGetDR0(); 331 pCtx->dr[1] = ASMGetDR1(); 332 pCtx->dr[2] = ASMGetDR2(); 333 pCtx->dr[3] = ASMGetDR3(); 334 if (fDR6) 335 pCtx->dr[6] = ASMGetDR6(); 336 } 306 337 307 338 /* … … 344 375 345 376 /* Activate the guest state DR0-3; DR7 is left to the caller. */ 346 ASMSetDR0(pCtx->dr[0]); 347 ASMSetDR1(pCtx->dr[1]); 348 ASMSetDR2(pCtx->dr[2]); 349 ASMSetDR3(pCtx->dr[3]); 350 if (fDR6) 351 ASMSetDR6(pCtx->dr[6]); 377 #if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) 378 if (CPUMIsGuestInLongModeEx(pCtx)) 379 { 380 /* Restore the state on entry as we need to be in 64 bits mode to access the full state. */ 381 pVCpu->cpum.s.fUseFlags |= CPUM_SYNC_DEBUG_STATE; 382 } 383 else 384 #endif 385 { 386 ASMSetDR0(pCtx->dr[0]); 387 ASMSetDR1(pCtx->dr[1]); 388 ASMSetDR2(pCtx->dr[2]); 389 ASMSetDR3(pCtx->dr[3]); 390 if (fDR6) 391 ASMSetDR6(pCtx->dr[6]); 392 } 352 393 353 394 pVCpu->cpum.s.fUseFlags |= CPUM_USE_DEBUG_REGS; -
trunk/src/VBox/VMM/VMMR0/HWACCMR0.cpp
r14845 r14859 998 998 return rc; 999 999 } 1000 1001 1002 #if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) 1003 /** 1004 * Save guest FPU/XMM state 1005 * 1006 * @returns VBox status code. 1007 * @param pVM VM handle. 1008 * @param pVCpu VMCPU handle. 1009 * @param pCtx CPU context 1010 */ 1011 VMMR0DECL(int) HWACCMR0SaveFPUState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) 1012 { 1013 return VINF_SUCCESS; 1014 } 1015 1016 /** 1017 * Save guest debug state (64 bits guest mode & 32 bits host only) 1018 * 1019 * @returns VBox status code. 1020 * @param pVM VM handle. 1021 * @param pVCpu VMCPU handle. 1022 * @param pCtx CPU context 1023 * @param fDR6 Include DR6 or not 1024 */ 1025 VMMR0DECL(int) HWACCMR0SaveDebugState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, bool fDR6) 1026 { 1027 return VINF_SUCCESS; 1028 } 1029 #endif /* HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) */ 1000 1030 1001 1031 /** -
trunk/src/VBox/VMM/VMMR0/HWSVMR0.cpp
r14580 r14859 2277 2277 DECLASM(int) SVMR0VMSwitcherRun64(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx) 2278 2278 { 2279 /* @todo This code is not guest SMP safe (hyper context) */ 2280 AssertReturn(pVM->cCPUs == 1, VERR_ACCESS_DENIED); 2279 2281 return VERR_NOT_IMPLEMENTED; 2280 2282 } -
trunk/src/VBox/VMM/VMMR0/HWVMXR0.cpp
r14845 r14859 3434 3434 RTHCPHYS pPageCpuPhys; 3435 3435 3436 /* @todo This code is not guest SMP safe (hyper context) */ 3437 AssertReturn(pVM->cCPUs == 1, VERR_ACCESS_DENIED); 3438 3436 3439 pCpu = HWACCMR0GetCurrentCpuEx(pVCpu->idCpu); 3437 3440 pPageCpuPhys = RTR0MemObjGetPagePhysAddr(pCpu->pMemObj, 0);
Note:
See TracChangeset
for help on using the changeset viewer.