Changeset 13069 in vbox
- Timestamp:
- Oct 8, 2008 11:36:59 AM (16 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/HWACCM.cpp
r13050 r13069 573 573 if (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_EPT) 574 574 pVM->hwaccm.s.fNestedPaging = pVM->hwaccm.s.fAllowNestedPaging; 575 575 #endif 576 577 #ifdef HWACCM_VTX_WITH_VPID 576 578 if (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VPID) 577 579 pVM->hwaccm.s.vmx.fVPID = true; -
trunk/src/VBox/VMM/VMMR0/HWVMXR0.cpp
r13051 r13069 41 41 #include "HWVMXR0.h" 42 42 43 44 43 /******************************************************************************* 45 44 * Global Variables * … … 48 47 static uint32_t const g_aIOSize[4] = {1, 2, 0, 4}; 49 48 static uint32_t const g_aIOOpAnd[4] = {0xff, 0xffff, 0, 0xffffffff}; 49 50 /******************************************************************************* 51 * Local Functions * 52 *******************************************************************************/ 53 #ifdef VBOX_STRICT 54 static void VMXR0ReportWorldSwitchError(PVM pVM, int rc, PCPUMCTX pCtx); 55 #else 56 #define VMXR0ReportWorldSwitchError(a, b, c) do { } while (0); 57 #endif /* VBOX_STRICT */ 50 58 51 59 … … 1323 1331 1324 1332 /** 1333 * Syncs back the guest state 1334 * 1335 * @returns VBox status code. 1336 * @param pVM The VM to operate on. 1337 * @param pCtx Guest context 1338 */ 1339 DECLINLINE(int) VMXR0SaveGuestState(PVM pVM, CPUMCTX *pCtx) 1340 { 1341 RTCCUINTREG val, valShadow; 1342 RTGCUINTPTR uInterruptState; 1343 int rc; 1344 1345 /* Let's first sync back eip, esp, and eflags. */ 1346 rc = VMXReadVMCS(VMX_VMCS_GUEST_RIP, &val); 1347 AssertRC(rc); 1348 pCtx->rip = val; 1349 rc = VMXReadVMCS(VMX_VMCS_GUEST_RSP, &val); 1350 AssertRC(rc); 1351 pCtx->rsp = val; 1352 rc = VMXReadVMCS(VMX_VMCS_GUEST_RFLAGS, &val); 1353 AssertRC(rc); 1354 pCtx->eflags.u32 = val; 1355 1356 /* Take care of instruction fusing (sti, mov ss) */ 1357 rc |= VMXReadVMCS(VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE, &val); 1358 uInterruptState = val; 1359 if (uInterruptState != 0) 1360 { 1361 Assert(uInterruptState <= 2); /* only sti & mov ss */ 1362 Log(("uInterruptState %x eip=%VGv\n", uInterruptState, pCtx->rip)); 1363 EMSetInhibitInterruptsPC(pVM, pCtx->rip); 1364 } 1365 else 1366 VM_FF_CLEAR(pVM, VM_FF_INHIBIT_INTERRUPTS); 1367 1368 /* Control registers. */ 1369 VMXReadVMCS(VMX_VMCS_CTRL_CR0_READ_SHADOW, &valShadow); 1370 VMXReadVMCS(VMX_VMCS_GUEST_CR0, &val); 1371 val = (valShadow & pVM->hwaccm.s.vmx.cr0_mask) | (val & ~pVM->hwaccm.s.vmx.cr0_mask); 1372 CPUMSetGuestCR0(pVM, val); 1373 1374 VMXReadVMCS(VMX_VMCS_CTRL_CR4_READ_SHADOW, &valShadow); 1375 VMXReadVMCS(VMX_VMCS_GUEST_CR4, &val); 1376 val = (valShadow & pVM->hwaccm.s.vmx.cr4_mask) | (val & ~pVM->hwaccm.s.vmx.cr4_mask); 1377 CPUMSetGuestCR4(pVM, val); 1378 1379 /* Can be updated behind our back in the nested paging case. */ 1380 CPUMSetGuestCR2(pVM, ASMGetCR2()); 1381 1382 /* Note: no reason to sync back the CRx registers. They can't be changed by the guest. */ 1383 /* Note: only in the nested paging case can CR3 & CR4 be changed by the guest. */ 1384 if (pVM->hwaccm.s.fNestedPaging) 1385 { 1386 VMXReadVMCS(VMX_VMCS_GUEST_CR3, &val); 1387 1388 if (val != pCtx->cr3) 1389 { 1390 CPUMSetGuestCR3(pVM, val); 1391 PGMUpdateCR3(pVM, val); 1392 } 1393 } 1394 1395 /* Sync back DR7 here. */ 1396 VMXReadVMCS(VMX_VMCS_GUEST_DR7, &val); 1397 pCtx->dr[7] = val; 1398 1399 /* Guest CPU context: ES, CS, SS, DS, FS, GS. */ 1400 VMX_READ_SELREG(ES, es); 1401 VMX_READ_SELREG(SS, ss); 1402 VMX_READ_SELREG(CS, cs); 1403 VMX_READ_SELREG(DS, ds); 1404 VMX_READ_SELREG(FS, fs); 1405 VMX_READ_SELREG(GS, gs); 1406 1407 /* 1408 * System MSRs 1409 */ 1410 VMXReadVMCS(VMX_VMCS_GUEST_SYSENTER_CS, &val); 1411 pCtx->SysEnter.cs = val; 1412 VMXReadVMCS(VMX_VMCS_GUEST_SYSENTER_EIP, &val); 1413 pCtx->SysEnter.eip = val; 1414 VMXReadVMCS(VMX_VMCS_GUEST_SYSENTER_ESP, &val); 1415 pCtx->SysEnter.esp = val; 1416 1417 /* Misc. registers; must sync everything otherwise we can get out of sync when jumping to ring 3. */ 1418 VMX_READ_SELREG(LDTR, ldtr); 1419 1420 VMXReadVMCS(VMX_VMCS_GUEST_GDTR_LIMIT, &val); 1421 pCtx->gdtr.cbGdt = val; 1422 VMXReadVMCS(VMX_VMCS_GUEST_GDTR_BASE, &val); 1423 pCtx->gdtr.pGdt = val; 1424 1425 VMXReadVMCS(VMX_VMCS_GUEST_IDTR_LIMIT, &val); 1426 pCtx->idtr.cbIdt = val; 1427 VMXReadVMCS(VMX_VMCS_GUEST_IDTR_BASE, &val); 1428 pCtx->idtr.pIdt = val; 1429 1430 #ifdef HWACCM_VMX_EMULATE_REALMODE 1431 /* Real mode emulation using v86 mode with CR4.VME (interrupt redirection using the int bitmap in the TSS) */ 1432 if (CPUMIsGuestInRealModeEx(pCtx)) 1433 { 1434 /* Hide our emulation flags */ 1435 pCtx->eflags.Bits.u1VM = 0; 1436 pCtx->eflags.Bits.u2IOPL = 0; 1437 1438 /* Force a TR resync every time in case we switch modes. */ 1439 pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_TR; 1440 } 1441 else 1442 #endif /* HWACCM_VMX_EMULATE_REALMODE */ 1443 { 1444 /* In real mode we have a fake TSS, so only sync it back when it's supposed to be valid. */ 1445 VMX_READ_SELREG(TR, tr); 1446 } 1447 return VINF_SUCCESS; 1448 } 1449 1450 /** 1325 1451 * Runs guest code in a VT-x VM. 1326 1452 * … … 1332 1458 { 1333 1459 int rc = VINF_SUCCESS; 1334 RTCCUINTREG val , valShadow;1460 RTCCUINTREG val; 1335 1461 RTCCUINTREG exitReason, instrError, cbInstr; 1336 1462 RTGCUINTPTR exitQualification; 1337 1463 RTGCUINTPTR intInfo = 0; /* shut up buggy gcc 4 */ 1338 RTGCUINTPTR errCode, instrInfo , uInterruptState;1464 RTGCUINTPTR errCode, instrInfo; 1339 1465 bool fSyncTPR = false; 1340 1466 PHWACCM_CPUINFO pCpu = 0; … … 1532 1658 } 1533 1659 1534 /* Deal with tagged TLBs if VPID is supported. */ 1535 if (pVM->hwaccm.s.vmx.fVPID) 1660 #ifdef HWACCM_VTX_WITH_EPT 1661 /* Deal with tagged TLBs if VPID or EPT is supported. */ 1662 if ( pVM->hwaccm.s.fNestedPaging 1663 || pVM->hwaccm.s.vmx.fVPID) 1536 1664 { 1537 1665 pCpu = HWACCMR0GetCurrentCpu(); … … 1580 1708 AssertMsg(pVM->hwaccm.s.uCurrentASID >= 1 && pVM->hwaccm.s.uCurrentASID < pVM->hwaccm.s.uMaxASID, ("cpu%d VM uCurrentASID = %x\n", pCpu->idCpu, pVM->hwaccm.s.uCurrentASID)); 1581 1709 1582 rc = VMXWriteVMCS(VMX_VMCS_GUEST_FIELD_VPID, pVM->hwaccm.s.uCurrentASID); 1583 AssertRC(rc); 1710 if (pVM->hwaccm.s.fVPID) 1711 { 1712 rc = VMXWriteVMCS(VMX_VMCS_GUEST_FIELD_VPID, pVM->hwaccm.s.uCurrentASID); 1713 AssertRC(rc); 1714 } 1584 1715 1585 1716 if (pVM->hwaccm.s.fForceTLBFlush) … … 1594 1725 STAM_COUNTER_INC(&pVM->hwaccm.s.StatNoFlushTLBWorldSwitch); 1595 1726 #endif 1596 1597 } 1727 } 1728 #endif /* HWACCM_VTX_WITH_EPT */ 1598 1729 1599 1730 /* Non-register state Guest Context */ … … 1638 1769 STAM_PROFILE_ADV_START(&pVM->hwaccm.s.StatExit, x); 1639 1770 1640 switch (rc) 1641 { 1642 case VINF_SUCCESS: 1643 break; 1644 1645 case VERR_VMX_INVALID_VMXON_PTR: 1646 AssertFailed(); 1647 goto end; 1648 1649 case VERR_VMX_UNABLE_TO_START_VM: 1650 case VERR_VMX_UNABLE_TO_RESUME_VM: 1651 { 1652 #ifdef VBOX_STRICT 1653 int rc1; 1654 1655 rc1 = VMXReadVMCS(VMX_VMCS_RO_EXIT_REASON, &exitReason); 1656 rc1 |= VMXReadVMCS(VMX_VMCS_RO_VM_INSTR_ERROR, &instrError); 1657 AssertRC(rc1); 1658 if (rc1 == VINF_SUCCESS) 1659 { 1660 RTGDTR gdtr; 1661 PX86DESCHC pDesc; 1662 1663 ASMGetGDTR(&gdtr); 1664 1665 Log(("Unable to start/resume VM for reason: %x. Instruction error %x\n", (uint32_t)exitReason, (uint32_t)instrError)); 1666 Log(("Current stack %08x\n", &rc1)); 1667 1668 1669 VMXReadVMCS(VMX_VMCS_GUEST_RIP, &val); 1670 Log(("Old eip %VGv new %VGv\n", pCtx->rip, (RTGCPTR)val)); 1671 VMXReadVMCS(VMX_VMCS_CTRL_PIN_EXEC_CONTROLS, &val); 1672 Log(("VMX_VMCS_CTRL_PIN_EXEC_CONTROLS %08x\n", val)); 1673 VMXReadVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, &val); 1674 Log(("VMX_VMCS_CTRL_PROC_EXEC_CONTROLS %08x\n", val)); 1675 VMXReadVMCS(VMX_VMCS_CTRL_ENTRY_CONTROLS, &val); 1676 Log(("VMX_VMCS_CTRL_ENTRY_CONTROLS %08x\n", val)); 1677 VMXReadVMCS(VMX_VMCS_CTRL_EXIT_CONTROLS, &val); 1678 Log(("VMX_VMCS_CTRL_EXIT_CONTROLS %08x\n", val)); 1679 1680 VMXReadVMCS(VMX_VMCS_HOST_CR0, &val); 1681 Log(("VMX_VMCS_HOST_CR0 %08x\n", val)); 1682 1683 VMXReadVMCS(VMX_VMCS_HOST_CR3, &val); 1684 Log(("VMX_VMCS_HOST_CR3 %VHp\n", val)); 1685 1686 VMXReadVMCS(VMX_VMCS_HOST_CR4, &val); 1687 Log(("VMX_VMCS_HOST_CR4 %08x\n", val)); 1688 1689 VMXReadVMCS(VMX_VMCS_HOST_FIELD_CS, &val); 1690 Log(("VMX_VMCS_HOST_FIELD_CS %08x\n", val)); 1691 1692 VMXReadVMCS(VMX_VMCS_GUEST_RFLAGS, &val); 1693 Log(("VMX_VMCS_GUEST_RFLAGS %08x\n", val)); 1694 1695 if (val < gdtr.cbGdt) 1696 { 1697 pDesc = &((PX86DESCHC)gdtr.pGdt)[val >> X86_SEL_SHIFT_HC]; 1698 HWACCMR0DumpDescriptor(pDesc, val, "CS: "); 1699 } 1700 1701 VMXReadVMCS(VMX_VMCS_HOST_FIELD_DS, &val); 1702 Log(("VMX_VMCS_HOST_FIELD_DS %08x\n", val)); 1703 if (val < gdtr.cbGdt) 1704 { 1705 pDesc = &((PX86DESCHC)gdtr.pGdt)[val >> X86_SEL_SHIFT_HC]; 1706 HWACCMR0DumpDescriptor(pDesc, val, "DS: "); 1707 } 1708 1709 VMXReadVMCS(VMX_VMCS_HOST_FIELD_ES, &val); 1710 Log(("VMX_VMCS_HOST_FIELD_ES %08x\n", val)); 1711 if (val < gdtr.cbGdt) 1712 { 1713 pDesc = &((PX86DESCHC)gdtr.pGdt)[val >> X86_SEL_SHIFT_HC]; 1714 HWACCMR0DumpDescriptor(pDesc, val, "ES: "); 1715 } 1716 1717 VMXReadVMCS(VMX_VMCS_HOST_FIELD_FS, &val); 1718 Log(("VMX_VMCS_HOST_FIELD_FS %08x\n", val)); 1719 if (val < gdtr.cbGdt) 1720 { 1721 pDesc = &((PX86DESCHC)gdtr.pGdt)[val >> X86_SEL_SHIFT_HC]; 1722 HWACCMR0DumpDescriptor(pDesc, val, "FS: "); 1723 } 1724 1725 VMXReadVMCS(VMX_VMCS_HOST_FIELD_GS, &val); 1726 Log(("VMX_VMCS_HOST_FIELD_GS %08x\n", val)); 1727 if (val < gdtr.cbGdt) 1728 { 1729 pDesc = &((PX86DESCHC)gdtr.pGdt)[val >> X86_SEL_SHIFT_HC]; 1730 HWACCMR0DumpDescriptor(pDesc, val, "GS: "); 1731 } 1732 1733 VMXReadVMCS(VMX_VMCS_HOST_FIELD_SS, &val); 1734 Log(("VMX_VMCS_HOST_FIELD_SS %08x\n", val)); 1735 if (val < gdtr.cbGdt) 1736 { 1737 pDesc = &((PX86DESCHC)gdtr.pGdt)[val >> X86_SEL_SHIFT_HC]; 1738 HWACCMR0DumpDescriptor(pDesc, val, "SS: "); 1739 } 1740 1741 VMXReadVMCS(VMX_VMCS_HOST_FIELD_TR, &val); 1742 Log(("VMX_VMCS_HOST_FIELD_TR %08x\n", val)); 1743 if (val < gdtr.cbGdt) 1744 { 1745 pDesc = &((PX86DESCHC)gdtr.pGdt)[val >> X86_SEL_SHIFT_HC]; 1746 HWACCMR0DumpDescriptor(pDesc, val, "TR: "); 1747 } 1748 1749 VMXReadVMCS(VMX_VMCS_HOST_TR_BASE, &val); 1750 Log(("VMX_VMCS_HOST_TR_BASE %VHv\n", val)); 1751 1752 VMXReadVMCS(VMX_VMCS_HOST_GDTR_BASE, &val); 1753 Log(("VMX_VMCS_HOST_GDTR_BASE %VHv\n", val)); 1754 VMXReadVMCS(VMX_VMCS_HOST_IDTR_BASE, &val); 1755 Log(("VMX_VMCS_HOST_IDTR_BASE %VHv\n", val)); 1756 1757 VMXReadVMCS(VMX_VMCS_HOST_SYSENTER_CS, &val); 1758 Log(("VMX_VMCS_HOST_SYSENTER_CS %08x\n", val)); 1759 1760 VMXReadVMCS(VMX_VMCS_HOST_SYSENTER_EIP, &val); 1761 Log(("VMX_VMCS_HOST_SYSENTER_EIP %VHv\n", val)); 1762 1763 VMXReadVMCS(VMX_VMCS_HOST_SYSENTER_ESP, &val); 1764 Log(("VMX_VMCS_HOST_SYSENTER_ESP %VHv\n", val)); 1765 1766 VMXReadVMCS(VMX_VMCS_HOST_RSP, &val); 1767 Log(("VMX_VMCS_HOST_RSP %VHv\n", val)); 1768 VMXReadVMCS(VMX_VMCS_HOST_RIP, &val); 1769 Log(("VMX_VMCS_HOST_RIP %VHv\n", val)); 1770 1771 #if HC_ARCH_BITS == 64 1772 Log(("MSR_K6_EFER = %VX64\n", ASMRdMsr(MSR_K6_EFER))); 1773 Log(("MSR_K6_STAR = %VX64\n", ASMRdMsr(MSR_K6_STAR))); 1774 Log(("MSR_K8_LSTAR = %VX64\n", ASMRdMsr(MSR_K8_LSTAR))); 1775 Log(("MSR_K8_CSTAR = %VX64\n", ASMRdMsr(MSR_K8_CSTAR))); 1776 Log(("MSR_K8_SF_MASK = %VX64\n", ASMRdMsr(MSR_K8_SF_MASK))); 1777 #endif 1778 } 1779 #endif /* VBOX_STRICT */ 1780 goto end; 1781 } 1782 1783 default: 1784 /* impossible */ 1785 AssertFailed(); 1771 if (rc != VINF_SUCCESS) 1772 { 1773 VMXR0ReportWorldSwitchError(pVM, rc, pCtx); 1786 1774 goto end; 1787 1775 } … … 1805 1793 AssertRC(rc); 1806 1794 1807 /* Let's first sync back eip, esp, and eflags.*/1808 rc = VMXR eadVMCS(VMX_VMCS_GUEST_RIP, &val);1795 /* Sync back the guest state */ 1796 rc = VMXR0SaveGuestState(pVM, pCtx); 1809 1797 AssertRC(rc); 1810 pCtx->rip = val;1811 rc = VMXReadVMCS(VMX_VMCS_GUEST_RSP, &val);1812 AssertRC(rc);1813 pCtx->rsp = val;1814 rc = VMXReadVMCS(VMX_VMCS_GUEST_RFLAGS, &val);1815 AssertRC(rc);1816 pCtx->eflags.u32 = val;1817 1818 /* Take care of instruction fusing (sti, mov ss) */1819 rc |= VMXReadVMCS(VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE, &val);1820 uInterruptState = val;1821 if (uInterruptState != 0)1822 {1823 Assert(uInterruptState <= 2); /* only sti & mov ss */1824 Log(("uInterruptState %x eip=%VGv\n", uInterruptState, pCtx->rip));1825 EMSetInhibitInterruptsPC(pVM, pCtx->rip);1826 }1827 else1828 VM_FF_CLEAR(pVM, VM_FF_INHIBIT_INTERRUPTS);1829 1830 /* Control registers. */1831 VMXReadVMCS(VMX_VMCS_CTRL_CR0_READ_SHADOW, &valShadow);1832 VMXReadVMCS(VMX_VMCS_GUEST_CR0, &val);1833 val = (valShadow & pVM->hwaccm.s.vmx.cr0_mask) | (val & ~pVM->hwaccm.s.vmx.cr0_mask);1834 CPUMSetGuestCR0(pVM, val);1835 1836 VMXReadVMCS(VMX_VMCS_CTRL_CR4_READ_SHADOW, &valShadow);1837 VMXReadVMCS(VMX_VMCS_GUEST_CR4, &val);1838 val = (valShadow & pVM->hwaccm.s.vmx.cr4_mask) | (val & ~pVM->hwaccm.s.vmx.cr4_mask);1839 CPUMSetGuestCR4(pVM, val);1840 1841 /* Can be updated behind our back in the nested paging case. */1842 CPUMSetGuestCR2(pVM, ASMGetCR2());1843 1844 /* Note: no reason to sync back the CRx registers. They can't be changed by the guest. */1845 /* Note: only in the nested paging case can CR3 & CR4 be changed by the guest. */1846 if (pVM->hwaccm.s.fNestedPaging)1847 {1848 VMXReadVMCS(VMX_VMCS_GUEST_CR3, &val);1849 1850 if (val != pCtx->cr3)1851 {1852 CPUMSetGuestCR3(pVM, val);1853 PGMUpdateCR3(pVM, val);1854 }1855 }1856 1857 /* Sync back DR7 here. */1858 VMXReadVMCS(VMX_VMCS_GUEST_DR7, &val);1859 pCtx->dr[7] = val;1860 1861 /* Guest CPU context: ES, CS, SS, DS, FS, GS. */1862 VMX_READ_SELREG(ES, es);1863 VMX_READ_SELREG(SS, ss);1864 VMX_READ_SELREG(CS, cs);1865 VMX_READ_SELREG(DS, ds);1866 VMX_READ_SELREG(FS, fs);1867 VMX_READ_SELREG(GS, gs);1868 1869 /*1870 * System MSRs1871 */1872 VMXReadVMCS(VMX_VMCS_GUEST_SYSENTER_CS, &val);1873 pCtx->SysEnter.cs = val;1874 VMXReadVMCS(VMX_VMCS_GUEST_SYSENTER_EIP, &val);1875 pCtx->SysEnter.eip = val;1876 VMXReadVMCS(VMX_VMCS_GUEST_SYSENTER_ESP, &val);1877 pCtx->SysEnter.esp = val;1878 1879 /* Misc. registers; must sync everything otherwise we can get out of sync when jumping to ring 3. */1880 VMX_READ_SELREG(LDTR, ldtr);1881 1882 VMXReadVMCS(VMX_VMCS_GUEST_GDTR_LIMIT, &val);1883 pCtx->gdtr.cbGdt = val;1884 VMXReadVMCS(VMX_VMCS_GUEST_GDTR_BASE, &val);1885 pCtx->gdtr.pGdt = val;1886 1887 VMXReadVMCS(VMX_VMCS_GUEST_IDTR_LIMIT, &val);1888 pCtx->idtr.cbIdt = val;1889 VMXReadVMCS(VMX_VMCS_GUEST_IDTR_BASE, &val);1890 pCtx->idtr.pIdt = val;1891 1892 #ifdef HWACCM_VMX_EMULATE_REALMODE1893 /* Real mode emulation using v86 mode with CR4.VME (interrupt redirection using the int bitmap in the TSS) */1894 if (CPUMIsGuestInRealModeEx(pCtx))1895 {1896 /* Hide our emulation flags */1897 pCtx->eflags.Bits.u1VM = 0;1898 pCtx->eflags.Bits.u2IOPL = 0;1899 1900 /* Force a TR resync every time in case we switch modes. */1901 pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_TR;1902 }1903 else1904 #endif /* HWACCM_VMX_EMULATE_REALMODE */1905 {1906 /* In real mode we have a fake TSS, so only sync it back when it's supposed to be valid. */1907 VMX_READ_SELREG(TR, tr);1908 }1909 1798 1910 1799 /* Note! NOW IT'S SAFE FOR LOGGING! */ … … 2973 2862 return VINF_SUCCESS; 2974 2863 } 2864 2865 #ifdef VBOX_STRICT 2866 /** 2867 * Report world switch error and dump some useful debug info 2868 * 2869 * @param pVM The VM to operate on. 2870 * @param rc Return code 2871 * @param pCtx Current CPU context (not updated) 2872 */ 2873 static void VMXR0ReportWorldSwitchError(PVM pVM, int rc, PCPUMCTX pCtx) 2874 { 2875 switch (rc) 2876 { 2877 case VERR_VMX_INVALID_VMXON_PTR: 2878 AssertFailed(); 2879 break; 2880 2881 case VERR_VMX_UNABLE_TO_START_VM: 2882 case VERR_VMX_UNABLE_TO_RESUME_VM: 2883 { 2884 int rc; 2885 RTCCUINTREG exitReason, instrError, val; 2886 2887 rc = VMXReadVMCS(VMX_VMCS_RO_EXIT_REASON, &exitReason); 2888 rc |= VMXReadVMCS(VMX_VMCS_RO_VM_INSTR_ERROR, &instrError); 2889 AssertRC(rc); 2890 if (rc == VINF_SUCCESS) 2891 { 2892 RTGDTR gdtr; 2893 PX86DESCHC pDesc; 2894 2895 ASMGetGDTR(&gdtr); 2896 2897 Log(("Unable to start/resume VM for reason: %x. Instruction error %x\n", (uint32_t)exitReason, (uint32_t)instrError)); 2898 Log(("Current stack %08x\n", &rc)); 2899 2900 2901 VMXReadVMCS(VMX_VMCS_GUEST_RIP, &val); 2902 Log(("Old eip %VGv new %VGv\n", pCtx->rip, (RTGCPTR)val)); 2903 VMXReadVMCS(VMX_VMCS_CTRL_PIN_EXEC_CONTROLS, &val); 2904 Log(("VMX_VMCS_CTRL_PIN_EXEC_CONTROLS %08x\n", val)); 2905 VMXReadVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, &val); 2906 Log(("VMX_VMCS_CTRL_PROC_EXEC_CONTROLS %08x\n", val)); 2907 VMXReadVMCS(VMX_VMCS_CTRL_ENTRY_CONTROLS, &val); 2908 Log(("VMX_VMCS_CTRL_ENTRY_CONTROLS %08x\n", val)); 2909 VMXReadVMCS(VMX_VMCS_CTRL_EXIT_CONTROLS, &val); 2910 Log(("VMX_VMCS_CTRL_EXIT_CONTROLS %08x\n", val)); 2911 2912 VMXReadVMCS(VMX_VMCS_HOST_CR0, &val); 2913 Log(("VMX_VMCS_HOST_CR0 %08x\n", val)); 2914 2915 VMXReadVMCS(VMX_VMCS_HOST_CR3, &val); 2916 Log(("VMX_VMCS_HOST_CR3 %VHp\n", val)); 2917 2918 VMXReadVMCS(VMX_VMCS_HOST_CR4, &val); 2919 Log(("VMX_VMCS_HOST_CR4 %08x\n", val)); 2920 2921 VMXReadVMCS(VMX_VMCS_HOST_FIELD_CS, &val); 2922 Log(("VMX_VMCS_HOST_FIELD_CS %08x\n", val)); 2923 2924 VMXReadVMCS(VMX_VMCS_GUEST_RFLAGS, &val); 2925 Log(("VMX_VMCS_GUEST_RFLAGS %08x\n", val)); 2926 2927 if (val < gdtr.cbGdt) 2928 { 2929 pDesc = &((PX86DESCHC)gdtr.pGdt)[val >> X86_SEL_SHIFT_HC]; 2930 HWACCMR0DumpDescriptor(pDesc, val, "CS: "); 2931 } 2932 2933 VMXReadVMCS(VMX_VMCS_HOST_FIELD_DS, &val); 2934 Log(("VMX_VMCS_HOST_FIELD_DS %08x\n", val)); 2935 if (val < gdtr.cbGdt) 2936 { 2937 pDesc = &((PX86DESCHC)gdtr.pGdt)[val >> X86_SEL_SHIFT_HC]; 2938 HWACCMR0DumpDescriptor(pDesc, val, "DS: "); 2939 } 2940 2941 VMXReadVMCS(VMX_VMCS_HOST_FIELD_ES, &val); 2942 Log(("VMX_VMCS_HOST_FIELD_ES %08x\n", val)); 2943 if (val < gdtr.cbGdt) 2944 { 2945 pDesc = &((PX86DESCHC)gdtr.pGdt)[val >> X86_SEL_SHIFT_HC]; 2946 HWACCMR0DumpDescriptor(pDesc, val, "ES: "); 2947 } 2948 2949 VMXReadVMCS(VMX_VMCS_HOST_FIELD_FS, &val); 2950 Log(("VMX_VMCS_HOST_FIELD_FS %08x\n", val)); 2951 if (val < gdtr.cbGdt) 2952 { 2953 pDesc = &((PX86DESCHC)gdtr.pGdt)[val >> X86_SEL_SHIFT_HC]; 2954 HWACCMR0DumpDescriptor(pDesc, val, "FS: "); 2955 } 2956 2957 VMXReadVMCS(VMX_VMCS_HOST_FIELD_GS, &val); 2958 Log(("VMX_VMCS_HOST_FIELD_GS %08x\n", val)); 2959 if (val < gdtr.cbGdt) 2960 { 2961 pDesc = &((PX86DESCHC)gdtr.pGdt)[val >> X86_SEL_SHIFT_HC]; 2962 HWACCMR0DumpDescriptor(pDesc, val, "GS: "); 2963 } 2964 2965 VMXReadVMCS(VMX_VMCS_HOST_FIELD_SS, &val); 2966 Log(("VMX_VMCS_HOST_FIELD_SS %08x\n", val)); 2967 if (val < gdtr.cbGdt) 2968 { 2969 pDesc = &((PX86DESCHC)gdtr.pGdt)[val >> X86_SEL_SHIFT_HC]; 2970 HWACCMR0DumpDescriptor(pDesc, val, "SS: "); 2971 } 2972 2973 VMXReadVMCS(VMX_VMCS_HOST_FIELD_TR, &val); 2974 Log(("VMX_VMCS_HOST_FIELD_TR %08x\n", val)); 2975 if (val < gdtr.cbGdt) 2976 { 2977 pDesc = &((PX86DESCHC)gdtr.pGdt)[val >> X86_SEL_SHIFT_HC]; 2978 HWACCMR0DumpDescriptor(pDesc, val, "TR: "); 2979 } 2980 2981 VMXReadVMCS(VMX_VMCS_HOST_TR_BASE, &val); 2982 Log(("VMX_VMCS_HOST_TR_BASE %VHv\n", val)); 2983 2984 VMXReadVMCS(VMX_VMCS_HOST_GDTR_BASE, &val); 2985 Log(("VMX_VMCS_HOST_GDTR_BASE %VHv\n", val)); 2986 VMXReadVMCS(VMX_VMCS_HOST_IDTR_BASE, &val); 2987 Log(("VMX_VMCS_HOST_IDTR_BASE %VHv\n", val)); 2988 2989 VMXReadVMCS(VMX_VMCS_HOST_SYSENTER_CS, &val); 2990 Log(("VMX_VMCS_HOST_SYSENTER_CS %08x\n", val)); 2991 2992 VMXReadVMCS(VMX_VMCS_HOST_SYSENTER_EIP, &val); 2993 Log(("VMX_VMCS_HOST_SYSENTER_EIP %VHv\n", val)); 2994 2995 VMXReadVMCS(VMX_VMCS_HOST_SYSENTER_ESP, &val); 2996 Log(("VMX_VMCS_HOST_SYSENTER_ESP %VHv\n", val)); 2997 2998 VMXReadVMCS(VMX_VMCS_HOST_RSP, &val); 2999 Log(("VMX_VMCS_HOST_RSP %VHv\n", val)); 3000 VMXReadVMCS(VMX_VMCS_HOST_RIP, &val); 3001 Log(("VMX_VMCS_HOST_RIP %VHv\n", val)); 3002 3003 #if HC_ARCH_BITS == 64 3004 Log(("MSR_K6_EFER = %VX64\n", ASMRdMsr(MSR_K6_EFER))); 3005 Log(("MSR_K6_STAR = %VX64\n", ASMRdMsr(MSR_K6_STAR))); 3006 Log(("MSR_K8_LSTAR = %VX64\n", ASMRdMsr(MSR_K8_LSTAR))); 3007 Log(("MSR_K8_CSTAR = %VX64\n", ASMRdMsr(MSR_K8_CSTAR))); 3008 Log(("MSR_K8_SF_MASK = %VX64\n", ASMRdMsr(MSR_K8_SF_MASK))); 3009 #endif 3010 } 3011 break; 3012 } 3013 3014 default: 3015 /* impossible */ 3016 AssertFailed(); 3017 break; 3018 } 3019 } 3020 #endif /* VBOX_STRICT */
Note:
See TracChangeset
for help on using the changeset viewer.