- Timestamp:
- Oct 19, 2007 4:45:07 PM (17 years ago)
- Location:
- trunk/src/VBox/VMM/VMMAll
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/CPUMAllA.asm
r4071 r5389 45 45 46 46 ;; 47 ; Handles lazy FPU sav eing and restoring.47 ; Handles lazy FPU saving and restoring. 48 48 ; 49 49 ; This handler will implement lazy fpu (sse/mmx/stuff) saving. … … 67 67 ; 2. Generate guest trap. 68 68 ; 69 ; When entering the hyp vervisor we'll always enable MP (for proper wait69 ; When entering the hypervisor we'll always enable MP (for proper wait 70 70 ; trapping) and TS (for intercepting all fpu/mmx/sse stuff). The EM flag 71 71 ; is taken from the guest OS in order to get proper SSE handling. -
trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp
r5285 r5389 441 441 442 442 443 /** 444 * Set the guest CR0. 445 * 446 * When called in GC, the hyper CR0 may be updated if that is 447 * required. The caller only has to take special action if AM, 448 * WP, PG or PE changes. 449 * 450 * @returns VINF_SUCCESS (consider it void). 451 * @param pVM Pointer to the shared VM structure. 452 * @param cr0 The new CR0 value. 453 */ 443 454 CPUMDECL(int) CPUMSetGuestCR0(PVM pVM, uint32_t cr0) 444 455 { 456 #ifdef IN_GC 457 /* 458 * Check if we need to change hypervisor CR0 because 459 * of math stuff. 460 */ 461 if ( (cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP)) 462 != (pVM->cpum.s.Guest.cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP))) 463 { 464 if (!(pVM->cpum.s.fUseFlags & CPUM_USED_FPU)) 465 { 466 /* 467 * We haven't saved the host FPU state yet, so TS and MT are both set 468 * and EM should be reflecting the guest EM (it always does this). 469 */ 470 if ((cr0 & X86_CR0_EM) != (pVM->cpum.s.Guest.cr0 & X86_CR0_EM)) 471 { 472 uint32_t HyperCR0 = ASMGetCR0(); 473 AssertMsg((HyperCR0 & (X86_CR0_TS | X86_CR0_MP)) == (X86_CR0_TS | X86_CR0_MP), ("%#x\n", HyperCR0)); 474 AssertMsg((HyperCR0 & X86_CR0_EM) == (pVM->cpum.s.Guest.cr0 & X86_CR0_EM), ("%#x\n", HyperCR0)); 475 HyperCR0 &= ~X86_CR0_EM; 476 HyperCR0 |= cr0 & X86_CR0_EM; 477 Log(("CPUM New HyperCR0=%#x\n", HyperCR0)); 478 ASMSetCR0(HyperCR0); 479 } 480 #ifdef VBOX_STRICT 481 else 482 { 483 uint32_t HyperCR0 = ASMGetCR0(); 484 AssertMsg((HyperCR0 & (X86_CR0_TS | X86_CR0_MP)) == (X86_CR0_TS | X86_CR0_MP), ("%#x\n", HyperCR0)); 485 AssertMsg((HyperCR0 & X86_CR0_EM) == (pVM->cpum.s.Guest.cr0 & X86_CR0_EM), ("%#x\n", HyperCR0)); 486 } 487 #endif 488 } 489 else 490 { 491 /* 492 * Already saved the state, so we're just mirroring 493 * the guest flags. 494 */ 495 uint32_t HyperCR0 = ASMGetCR0(); 496 AssertMsg( (HyperCR0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP)) 497 == (pVM->cpum.s.Guest.cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP)), 498 ("%#x %#x\n", HyperCR0, pVM->cpum.s.Guest.cr0)); 499 HyperCR0 &= ~(X86_CR0_TS | X86_CR0_EM | X86_CR0_MP); 500 HyperCR0 |= cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP); 501 Log(("CPUM New HyperCR0=%#x\n", HyperCR0)); 502 ASMSetCR0(HyperCR0); 503 } 504 } 505 #endif 506 507 /* 508 * Check for changes causing TLB flushes (for REM). 509 * The caller is responsible for calling PGM when appropriate. 510 */ 445 511 if ( (cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) 446 512 != (pVM->cpum.s.Guest.cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))) 447 513 pVM->cpum.s.fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH; 448 514 pVM->cpum.s.fChanged |= CPUM_CHANGED_CR0; 515 449 516 pVM->cpum.s.Guest.cr0 = cr0 | X86_CR0_ET; 450 517 return VINF_SUCCESS; -
trunk/src/VBox/VMM/VMMAll/EMAll.cpp
r5384 r5389 1526 1526 #ifdef IN_GC 1527 1527 /* Need to change the hyper CR0? Doing it the lazy way then. */ 1528 if ( (OldCr0 & (X86_CR0_ TS | X86_CR0_EM | X86_CR0_MP | X86_CR0_AM | X86_CR0_WP))1529 != (NewCr0 & (X86_CR0_ TS | X86_CR0_EM | X86_CR0_MP | X86_CR0_AM | X86_CR0_WP)))1528 if ( (OldCr0 & (X86_CR0_AM | X86_CR0_WP)) 1529 != (NewCr0 & (X86_CR0_AM | X86_CR0_WP))) 1530 1530 { 1531 1531 Log(("EMInterpretLMSW: CR0: %#x->%#x => R3\n", OldCr0, NewCr0)); … … 1547 1547 EMDECL(int) EMInterpretCLTS(PVM pVM) 1548 1548 { 1549 uint32_t Cr0 = CPUMGetGuestCR0(pVM);1550 if (!( Cr0 & X86_CR0_TS))1549 uint32_t cr0 = CPUMGetGuestCR0(pVM); 1550 if (!(cr0 & X86_CR0_TS)) 1551 1551 return VINF_SUCCESS; 1552 1553 #ifdef IN_GC 1554 /* Need to change the hyper CR0? Doing it the lazy way then. */ 1555 Log(("EMInterpretCLTS: CR0: %#x->%#x => R3\n", Cr0, Cr0 & ~X86_CR0_TS)); 1556 VM_FF_SET(pVM, VM_FF_TO_R3); 1557 #endif 1558 return CPUMSetGuestCR0(pVM, Cr0 & ~X86_CR0_TS); 1552 return CPUMSetGuestCR0(pVM, cr0 & ~X86_CR0_TS); 1559 1553 } 1560 1554 … … 1588 1582 oldval = CPUMGetGuestCR0(pVM); 1589 1583 #ifndef IN_RING3 1590 /* CR0.WP changes require a reschedule run in ring 3. */ 1591 if ((val32 & X86_CR0_WP) != (oldval & X86_CR0_WP)) 1592 return VERR_EM_INTERPRETER; 1593 #endif 1594 rc = CPUMSetGuestCR0(pVM, val32); AssertRC(rc); /** @todo CPUSetGuestCR0 stuff should be void, this is silly. */ 1584 /* CR0.WP and CR0.AM changes require a reschedule run in ring 3. */ 1585 if ( (val32 & (X86_CR0_WP | X86_CR0_AM)) 1586 != (oldval & (X86_CR0_WP | X86_CR0_AM))) 1587 return VERR_EM_INTERPRETER; 1588 #endif 1589 CPUMSetGuestCR0(pVM, val32); 1595 1590 val32 = CPUMGetGuestCR0(pVM); 1596 if ( (oldval & (X86_CR0_PG |X86_CR0_WP|X86_CR0_PE))1597 != (val32 & (X86_CR0_PG |X86_CR0_WP|X86_CR0_PE)))1591 if ( (oldval & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) 1592 != (val32 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))) 1598 1593 { 1599 1594 /* global flush */ … … 1601 1596 AssertRCReturn(rc, rc); 1602 1597 } 1603 # ifdef IN_GC1604 /* Feeling extremely lazy. */1605 if ( (oldval & (X86_CR0_TS|X86_CR0_EM|X86_CR0_MP|X86_CR0_AM))1606 != (val32 & (X86_CR0_TS|X86_CR0_EM|X86_CR0_MP|X86_CR0_AM)))1607 {1608 Log(("emInterpretMovCRx: CR0: %#x->%#x => R3\n", oldval, val32));1609 VM_FF_SET(pVM, VM_FF_TO_R3);1610 }1611 # endif1612 1598 return PGMChangeMode(pVM, CPUMGetGuestCR0(pVM), CPUMGetGuestCR4(pVM), 0); 1613 1599
Note:
See TracChangeset
for help on using the changeset viewer.