Changeset 14870 in vbox for trunk/src/VBox
- Timestamp:
- Dec 1, 2008 3:28:54 PM (16 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 1 added
- 6 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/CPUMInternal.h
r14859 r14870 378 378 379 379 DECLASM(int) CPUMHandleLazyFPUAsm(PCPUMCPU pCPUM); 380 DECLASM(int) CPUMSaveGuestRestoreHostFPUStateAsm(PCPUMCPU pCPUM); 381 DECLASM(int) CPUMRestoreHostFPUStateAsm(PCPUMCPU pCPUM); 382 DECLASM(void) CPUMLoadFPUAsm(PCPUMCTX pCtx); 383 DECLASM(void) CPUMSaveFPUAsm(PCPUMCTX pCtx); 384 DECLASM(void) CPUMLoadXMMAsm(PCPUMCTX pCtx); 385 DECLASM(void) CPUMSaveXMMAsm(PCPUMCTX pCtx); 386 DECLASM(void) CPUMSetFCW(uint16_t u16FCW); 387 DECLASM(uint16_t) CPUMGetFCW(); 388 DECLASM(void) CPUMSetMXCSR(uint32_t u32MXCSR); 389 DECLASM(uint32_t) CPUMGetMXCSR(); 380 381 #ifdef IN_RING0 382 DECLASM(int) CPUMR0SaveGuestRestoreHostFPUState(PCPUMCPU pCPUM); 383 DECLASM(int) CPUMR0RestoreHostFPUState(PCPUMCPU pCPUM); 384 DECLASM(void) CPUMR0LoadFPU(PCPUMCTX pCtx); 385 DECLASM(void) CPUMR0SaveFPU(PCPUMCTX pCtx); 386 DECLASM(void) CPUMR0LoadXMM(PCPUMCTX pCtx); 387 DECLASM(void) CPUMR0SaveXMM(PCPUMCTX pCtx); 388 DECLASM(void) CPUMR0SetFCW(uint16_t u16FCW); 389 DECLASM(uint16_t) CPUMR0GetFCW(); 390 DECLASM(void) CPUMR0SetMXCSR(uint32_t u32MXCSR); 391 DECLASM(uint32_t) CPUMR0GetMXCSR(); 392 #endif 390 393 391 394 __END_DECLS -
trunk/src/VBox/VMM/Makefile.kmk
r14833 r14870 418 418 VMMR0_SOURCES = \ 419 419 VMMR0/CPUMR0.cpp \ 420 VMMR0/CPUMR0A.asm \ 420 421 VMMR0/DBGFR0.cpp \ 421 422 VMMR0/GMMR0.cpp \ -
trunk/src/VBox/VMM/VMMAll/CPUMAllA.asm
r14859 r14870 202 202 203 203 204 ;;205 ; Restores the host's FPU/XMM state206 ;207 ; @returns 0208 ; @param pCPUMCPU x86:[esp+4] GCC:rdi MSC:rcx CPUMCPU pointer209 ;210 align 16211 BEGINPROC CPUMSaveGuestRestoreHostFPUStateAsm212 %ifdef RT_ARCH_AMD64213 %ifdef RT_OS_WINDOWS214 mov xDX, rcx215 %else216 mov xDX, rdi217 %endif218 %else219 mov xDX, dword [esp + 4]220 %endif221 222 ; Restore FPU if guest has used it.223 ; Using fxrstor should ensure that we're not causing unwanted exception on the host.224 test dword [xDX + CPUMCPU.fUseFlags], CPUM_USED_FPU225 jz short gth_fpu_no226 227 mov xAX, cr0228 mov xCX, xAX ; save old CR0229 and xAX, ~(X86_CR0_TS | X86_CR0_EM)230 mov cr0, xAX231 232 fxsave [xDX + CPUMCPU.Guest.fpu]233 fxrstor [xDX + CPUMCPU.Host.fpu]234 235 mov cr0, xCX ; and restore old CR0 again236 and dword [xDX + CPUMCPU.fUseFlags], ~CPUM_USED_FPU237 gth_fpu_no:238 xor eax, eax239 ret240 ENDPROC CPUMSaveGuestRestoreHostFPUStateAsm241 242 ;;243 ; Sets the host's FPU/XMM state244 ;245 ; @returns 0246 ; @param pCPUMCPU x86:[esp+4] GCC:rdi MSC:rcx CPUMCPU pointer247 ;248 align 16249 BEGINPROC CPUMRestoreHostFPUStateAsm250 %ifdef RT_ARCH_AMD64251 %ifdef RT_OS_WINDOWS252 mov xDX, rcx253 %else254 mov xDX, rdi255 %endif256 %else257 mov xDX, dword [esp + 4]258 %endif259 260 ; Restore FPU if guest has used it.261 ; Using fxrstor should ensure that we're not causing unwanted exception on the host.262 test dword [xDX + CPUMCPU.fUseFlags], CPUM_USED_FPU263 jz short gth_fpu_no_2264 265 mov xAX, cr0266 mov xCX, xAX ; save old CR0267 and xAX, ~(X86_CR0_TS | X86_CR0_EM)268 mov cr0, xAX269 270 fxrstor [xDX + CPUMCPU.Host.fpu]271 272 mov cr0, xCX ; and restore old CR0 again273 and dword [xDX + CPUMCPU.fUseFlags], ~CPUM_USED_FPU274 gth_fpu_no_2:275 xor eax, eax276 ret277 ENDPROC CPUMRestoreHostFPUStateAsm278 279 ;;280 ; Restores the guest's FPU/XMM state281 ;282 ; @param pCtx x86:[esp+4] GCC:rdi MSC:rcx CPUMCTX pointer283 ;284 align 16285 BEGINPROC CPUMLoadFPUAsm286 %ifdef RT_ARCH_AMD64287 %ifdef RT_OS_WINDOWS288 mov xDX, rcx289 %else290 mov xDX, rdi291 %endif292 %else293 mov xDX, dword [esp + 4]294 %endif295 fxrstor [xDX + CPUMCTX.fpu]296 ret297 ENDPROC CPUMLoadFPUAsm298 299 300 ;;301 ; Restores the guest's FPU/XMM state302 ;303 ; @param pCtx x86:[esp+4] GCC:rdi MSC:rcx CPUMCTX pointer304 ;305 align 16306 BEGINPROC CPUMSaveFPUAsm307 %ifdef RT_ARCH_AMD64308 %ifdef RT_OS_WINDOWS309 mov xDX, rcx310 %else311 mov xDX, rdi312 %endif313 %else314 mov xDX, dword [esp + 4]315 %endif316 fxsave [xDX + CPUMCTX.fpu]317 ret318 ENDPROC CPUMSaveFPUAsm319 320 321 ;;322 ; Restores the guest's XMM state323 ;324 ; @param pCtx x86:[esp+4] GCC:rdi MSC:rcx CPUMCTX pointer325 ;326 align 16327 BEGINPROC CPUMLoadXMMAsm328 %ifdef RT_ARCH_AMD64329 %ifdef RT_OS_WINDOWS330 mov xDX, rcx331 %else332 mov xDX, rdi333 %endif334 %else335 mov xDX, dword [esp + 4]336 %endif337 movdqa xmm0, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*0]338 movdqa xmm1, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*1]339 movdqa xmm2, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*2]340 movdqa xmm3, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*3]341 movdqa xmm4, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*4]342 movdqa xmm5, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*5]343 movdqa xmm6, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*6]344 movdqa xmm7, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*7]345 346 %ifdef RT_ARCH_AMD64347 test qword [xDX + CPUMCTX.msrEFER], MSR_K6_EFER_LMA348 jz CPUMLoadXMMAsm_done349 350 movdqa xmm8, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*8]351 movdqa xmm9, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*9]352 movdqa xmm10, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*10]353 movdqa xmm11, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*11]354 movdqa xmm12, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*12]355 movdqa xmm13, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*13]356 movdqa xmm14, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*14]357 movdqa xmm15, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*15]358 CPUMLoadXMMAsm_done:359 %endif360 361 ret362 ENDPROC CPUMLoadXMMAsm363 364 365 ;;366 ; Restores the guest's XMM state367 ;368 ; @param pCtx x86:[esp+4] GCC:rdi MSC:rcx CPUMCTX pointer369 ;370 align 16371 BEGINPROC CPUMSaveXMMAsm372 %ifdef RT_ARCH_AMD64373 %ifdef RT_OS_WINDOWS374 mov xDX, rcx375 %else376 mov xDX, rdi377 %endif378 %else379 mov xDX, dword [esp + 4]380 %endif381 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*0], xmm0382 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*1], xmm1383 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*2], xmm2384 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*3], xmm3385 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*4], xmm4386 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*5], xmm5387 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*6], xmm6388 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*7], xmm7389 390 %ifdef RT_ARCH_AMD64391 test qword [xDX + CPUMCTX.msrEFER], MSR_K6_EFER_LMA392 jz CPUMSaveXMMAsm_done393 394 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*8], xmm8395 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*9], xmm9396 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*10], xmm10397 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*11], xmm11398 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*12], xmm12399 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*13], xmm13400 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*14], xmm14401 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*15], xmm15402 403 CPUMSaveXMMAsm_done:404 %endif405 ret406 ENDPROC CPUMSaveXMMAsm407 408 409 ;;410 ; Set the FPU control word; clearing exceptions first411 ;412 ; @param u16FCW x86:[esp+4] GCC:rdi MSC:rcx New FPU control word413 align 16414 BEGINPROC CPUMSetFCW415 %ifdef RT_ARCH_AMD64416 %ifdef RT_OS_WINDOWS417 mov xAX, rcx418 %else419 mov xAX, rdi420 %endif421 %else422 mov xAX, dword [esp + 4]423 %endif424 fnclex425 push xAX426 fldcw [xSP]427 pop xAX428 ret429 ENDPROC CPUMSetFCW430 431 432 ;;433 ; Get the FPU control word434 ;435 align 16436 BEGINPROC CPUMGetFCW437 fnstcw [xSP - 8]438 mov ax, word [xSP - 8]439 ret440 ENDPROC CPUMGetFCW441 442 443 ;;444 ; Set the MXCSR;445 ;446 ; @param u32MXCSR x86:[esp+4] GCC:rdi MSC:rcx New MXCSR447 align 16448 BEGINPROC CPUMSetMXCSR449 %ifdef RT_ARCH_AMD64450 %ifdef RT_OS_WINDOWS451 mov xAX, rcx452 %else453 mov xAX, rdi454 %endif455 %else456 mov xAX, dword [esp + 4]457 %endif458 push xAX459 ldmxcsr [xSP]460 pop xAX461 ret462 ENDPROC CPUMSetMXCSR463 464 465 ;;466 ; Get the MXCSR467 ;468 align 16469 BEGINPROC CPUMGetMXCSR470 stmxcsr [xSP - 8]471 mov eax, dword [xSP - 8]472 ret473 ENDPROC CPUMGetMXCSR -
trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp
r14859 r14870 2076 2076 } 2077 2077 2078 2079 /**2080 * Restore host FPU/XMM state2081 *2082 * @returns VBox status code.2083 * @param pVM VM handle.2084 * @param pVCpu VMCPU handle2085 */2086 VMMDECL(int) CPUMSaveGuestRestoreHostFPUState(PVM pVM, PVMCPU pVCpu)2087 {2088 Assert(pVM->cpum.s.CPUFeatures.edx.u1FXSR);2089 return CPUMSaveGuestRestoreHostFPUStateAsm(&pVCpu->cpum.s);2090 }2091 2092 /**2093 * Set host FPU/XMM state2094 *2095 * @returns VBox status code.2096 * @param pVM VM handle.2097 * @param pVCpu VMCPU handle2098 */2099 VMMDECL(int) CPUMRestoreHostFPUState(PVM pVM, PVMCPU pVCpu)2100 {2101 Assert(pVM->cpum.s.CPUFeatures.edx.u1FXSR);2102 return CPUMRestoreHostFPUStateAsm(&pVCpu->cpum.s);2103 }2104 2105 2078 #endif /* !IN_RING3 */ 2106 2079 -
trunk/src/VBox/VMM/VMMR0/CPUMR0.cpp
r14859 r14870 216 216 pVCpu->cpum.s.Host.fpu.MXCSR = CPUMGetMXCSR(); 217 217 218 CPUM LoadFPUAsm(pCtx);218 CPUMR0LoadFPU(pCtx); 219 219 220 220 /* … … 231 231 { 232 232 /* fxrstor doesn't restore the XMM state! */ 233 CPUM LoadXMMAsm(pCtx);233 CPUMR0LoadXMM(pCtx); 234 234 pVCpu->cpum.s.fUseFlags |= CPUM_MANUAL_XMM_RESTORE; 235 235 } … … 262 262 Assert(!(pVCpu->cpum.s.fUseFlags & CPUM_SYNC_FPU_STATE)); 263 263 HWACCMR0SaveFPUState(pVM, pVCpu, pCtx); 264 CPUMR estoreHostFPUState(pVCpu);264 CPUMR0RestoreHostFPUState(&pVCpu->cpum.s); 265 265 } 266 266 else … … 276 276 ASMWrMsr(MSR_K6_EFER, oldMsrEFERHost & ~MSR_K6_EFER_FFXSR); 277 277 } 278 CPUM SaveGuestRestoreHostFPUState(pVM, pVCpu);278 CPUMR0SaveGuestRestoreHostFPUState(&pVCpu->cpum.s); 279 279 280 280 /* Restore EFER MSR */ … … 283 283 284 284 #else /* CPUM_CAN_HANDLE_NM_TRAPS_IN_KERNEL_MODE */ 285 CPUM SaveFPUAsm(pCtx);285 CPUMR0SaveFPU(pCtx); 286 286 if (pVCpu->cpum.s.fUseFlags & CPUM_MANUAL_XMM_RESTORE) 287 287 { 288 288 /* fxsave doesn't save the XMM state! */ 289 CPUM SaveXMMAsm(pCtx);289 CPUMR0SaveXMM(pCtx); 290 290 } 291 291 … … 294 294 * We don't want the guest to be able to trigger floating point/SSE exceptions on the host. 295 295 */ 296 CPUM SetFCW(pVCpu->cpum.s.Host.fpu.FCW);296 CPUMR0SetFCW(pVCpu->cpum.s.Host.fpu.FCW); 297 297 if (pVM->cpum.s.CPUFeatures.edx.u1SSE) 298 CPUM SetMXCSR(pVCpu->cpum.s.Host.fpu.MXCSR);298 CPUMR0SetMXCSR(pVCpu->cpum.s.Host.fpu.MXCSR); 299 299 #endif /* CPUM_CAN_HANDLE_NM_TRAPS_IN_KERNEL_MODE */ 300 300 } … … 396 396 } 397 397 398 -
trunk/src/VBox/VMM/VMMR0/HWACCMR0.cpp
r14859 r14870 1002 1002 #if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) 1003 1003 /** 1004 * Save guest FPU/XMM state 1004 * Save guest FPU/XMM state (64 bits guest mode & 32 bits host only) 1005 1005 * 1006 1006 * @returns VBox status code. … … 1011 1011 VMMR0DECL(int) HWACCMR0SaveFPUState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) 1012 1012 { 1013 if (pVM->hwaccm.s.vmx.fSupported) 1014 { 1015 } 1016 else 1017 { 1018 } 1013 1019 return VINF_SUCCESS; 1014 1020 } … … 1025 1031 VMMR0DECL(int) HWACCMR0SaveDebugState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, bool fDR6) 1026 1032 { 1033 if (pVM->hwaccm.s.vmx.fSupported) 1034 { 1035 } 1036 else 1037 { 1038 } 1027 1039 return VINF_SUCCESS; 1028 1040 }
Note:
See TracChangeset
for help on using the changeset viewer.