Changeset 43379 in vbox for trunk/src/VBox/VMM
- Timestamp:
- Sep 20, 2012 11:29:12 PM (12 years ago)
- svn:sync-xref-src-repo-rev:
- 80845
- Location:
- trunk/src/VBox/VMM/VMMR0
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HWACCMR0.cpp
r43376 r43379 1691 1691 * @returns VBox status code. 1692 1692 * @param pVM Pointer to the VM. 1693 * @param enmSwitcher The switcher we're about to use. 1693 1694 * @param pfVTxDisabled Where to store whether VT-x was disabled or not. 1694 1695 */ 1695 VMMR0DECL(int) HWACCMR0EnterSwitcher(PVM pVM, bool *pfVTxDisabled)1696 VMMR0DECL(int) HWACCMR0EnterSwitcher(PVM pVM, VMMSWITCHER enmSwitcher, bool *pfVTxDisabled) 1696 1697 { 1697 1698 Assert(!(ASMGetFlags() & X86_EFL_IF) || !RTThreadPreemptIsEnabled(NIL_RTTHREAD)); … … 1699 1700 *pfVTxDisabled = false; 1700 1701 1701 if ( !g_HvmR0.fEnabled1702 || !g_HvmR0.vmx.fSupported /* no such issues with AMD-V */1703 || !g_HvmR0.fGlobalInit /* Local init implies the CPU is currently not in VMX root mode. */)1704 return VINF_SUCCESS; /* nothing to do */ 1705 1706 switch ( VMMGetSwitcher(pVM))1702 /* No such issues with AMD-V */ 1703 if (!g_HvmR0.vmx.fSupported) 1704 return VINF_SUCCESS; 1705 1706 /* Check if the swithcing we're up to is safe. */ 1707 switch (enmSwitcher) 1707 1708 { 1708 1709 case VMMSWITCHER_32_TO_32: … … 1720 1721 } 1721 1722 1723 /* When using SUPR0EnableVTx we must let the host suspend and resume VT-x, 1724 regardless of whether we're currently using VT-x or not. */ 1725 if (g_HvmR0.vmx.fUsingSUPR0EnableVTx) 1726 { 1727 *pfVTxDisabled = SUPR0SuspendVTxOnCpu(); 1728 return VINF_SUCCESS; 1729 } 1730 1731 /** @todo Check if this code is presumtive wrt other VT-x users on the 1732 * system... */ 1733 1734 /* Nothing to do if we haven't enabled VT-x. */ 1735 if (!g_HvmR0.fEnabled) 1736 return VINF_SUCCESS; 1737 1738 /* Local init implies the CPU is currently not in VMX root mode. */ 1739 if (!g_HvmR0.fGlobalInit) 1740 return VINF_SUCCESS; 1741 1742 /* Ok, disable VT-x. */ 1722 1743 PHMGLOBLCPUINFO pCpu = HWACCMR0GetCurrentCpu(); 1723 1744 AssertReturn(pCpu && pCpu->hMemObj != NIL_RTR0MEMOBJ, VERR_HM_IPE_2); … … 1734 1755 * switcher turned off paging. 1735 1756 * 1736 * @returns VBox status code.1737 1757 * @param pVM Pointer to the VM. 1738 1758 * @param fVTxDisabled Whether VT-x was disabled or not. 1739 1759 */ 1740 VMMR0DECL( int) HWACCMR0LeaveSwitcher(PVM pVM, bool fVTxDisabled)1760 VMMR0DECL(void) HWACCMR0LeaveSwitcher(PVM pVM, bool fVTxDisabled) 1741 1761 { 1742 1762 Assert(!(ASMGetFlags() & X86_EFL_IF)); 1743 1763 1744 1764 if (!fVTxDisabled) 1745 return VINF_SUCCESS; /* nothing to do */ 1746 1747 Assert(g_HvmR0.fEnabled); 1765 return; /* nothing to do */ 1766 1748 1767 Assert(g_HvmR0.vmx.fSupported); 1749 Assert(g_HvmR0.fGlobalInit); 1750 1751 PHMGLOBLCPUINFO pCpu = HWACCMR0GetCurrentCpu(); 1752 AssertReturn(pCpu && pCpu->hMemObj != NIL_RTR0MEMOBJ, VERR_HM_IPE_2); 1753 1754 void *pvCpuPage = RTR0MemObjAddress(pCpu->hMemObj); 1755 RTHCPHYS HCPhysCpuPage = RTR0MemObjGetPagePhysAddr(pCpu->hMemObj, 0); 1756 return VMXR0EnableCpu(pCpu, pVM, pvCpuPage, HCPhysCpuPage, false); 1768 if (g_HvmR0.vmx.fUsingSUPR0EnableVTx) 1769 SUPR0ResumeVTxOnCpu(fVTxDisabled); 1770 else 1771 { 1772 Assert(g_HvmR0.fEnabled); 1773 Assert(g_HvmR0.fGlobalInit); 1774 1775 PHMGLOBLCPUINFO pCpu = HWACCMR0GetCurrentCpu(); 1776 AssertReturnVoid(pCpu && pCpu->hMemObj != NIL_RTR0MEMOBJ); 1777 1778 void *pvCpuPage = RTR0MemObjAddress(pCpu->hMemObj); 1779 RTHCPHYS HCPhysCpuPage = RTR0MemObjGetPagePhysAddr(pCpu->hMemObj, 0); 1780 VMXR0EnableCpu(pCpu, pVM, pvCpuPage, HCPhysCpuPage, false); 1781 } 1757 1782 } 1758 1783 -
trunk/src/VBox/VMM/VMMR0/HWVMXR0.cpp
r43361 r43379 137 137 */ 138 138 139 if (ASMGetCR4() & X86_CR4_VMXE) 140 return VERR_VMX_IN_VMX_ROOT_MODE; 141 142 ASMSetCR4(ASMGetCR4() | X86_CR4_VMXE); /* Make sure the VMX instructions don't cause #UD faults. */ 139 /** @todo r=bird: Why is this code different than the probing code earlier 140 * on? It just sets VMXE if needed and doesn't check that it isn't 141 * set. Mac OS X host_vmxoff may leave this set and we'll fail here 142 * and debug-assert in the calling code. This is what caused the 143 * "regression" after backing out the SUPR0EnableVTx code hours before 144 * 4.2.0GA (reboot fixed the issue). I've changed here to do the same 145 * as the init code. */ 146 uint64_t uCr4 = ASMGetCR4(); 147 if (!(uCr4 & X86_CR4_VMXE)) 148 ASMSetCR4(ASMGetCR4() | X86_CR4_VMXE); /* Make sure the VMX instructions don't cause #UD faults. */ 143 149 144 150 /* … … 148 154 if (RT_FAILURE(rc)) 149 155 { 150 ASMSetCR4( ASMGetCR4() & ~X86_CR4_VMXE);156 ASMSetCR4(uCr4); 151 157 return VERR_VMX_VMXON_FAILED; 152 158 } -
trunk/src/VBox/VMM/VMMR0/VMMR0.cpp
r43303 r43379 683 683 /* We might need to disable VT-x if the active switcher turns off paging. */ 684 684 bool fVTxDisabled; 685 int rc = HWACCMR0EnterSwitcher(pVM, &fVTxDisabled);685 int rc = HWACCMR0EnterSwitcher(pVM, pVM->vmm.s.enmSwitcher, &fVTxDisabled); 686 686 if (RT_SUCCESS(rc)) 687 687 { … … 999 999 1000 1000 /* We might need to disable VT-x if the active switcher turns off paging. */ 1001 rc = HWACCMR0EnterSwitcher(pVM, &fVTxDisabled);1001 rc = HWACCMR0EnterSwitcher(pVM, pVM->vmm.s.enmSwitcher, &fVTxDisabled); 1002 1002 if (RT_FAILURE(rc)) 1003 1003 return rc;
Note:
See TracChangeset
for help on using the changeset viewer.