Changeset 72412 in vbox for trunk/src/VBox/VMM
- Timestamp:
- Jun 1, 2018 2:02:49 PM (7 years ago)
- svn:sync-xref-src-repo-rev:
- 122883
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/NEMAllNativeTemplate-win.cpp.h
r72403 r72412 737 737 if (fWhat & CPUMCTX_EXTRN_SREG_MASK) 738 738 { 739 GET_SEG(pCtx->es, WHvX64RegisterEs); 740 GET_SEG(pCtx->cs, WHvX64RegisterCs); 741 GET_SEG(pCtx->ss, WHvX64RegisterSs); 742 GET_SEG(pCtx->ds, WHvX64RegisterDs); 743 GET_SEG(pCtx->fs, WHvX64RegisterFs); 744 GET_SEG(pCtx->gs, WHvX64RegisterGs); 739 if (fWhat & CPUMCTX_EXTRN_ES) 740 GET_SEG(pCtx->es, WHvX64RegisterEs); 741 if (fWhat & CPUMCTX_EXTRN_CS) 742 GET_SEG(pCtx->cs, WHvX64RegisterCs); 743 if (fWhat & CPUMCTX_EXTRN_SS) 744 GET_SEG(pCtx->ss, WHvX64RegisterSs); 745 if (fWhat & CPUMCTX_EXTRN_DS) 746 GET_SEG(pCtx->ds, WHvX64RegisterDs); 747 if (fWhat & CPUMCTX_EXTRN_FS) 748 GET_SEG(pCtx->fs, WHvX64RegisterFs); 749 if (fWhat & CPUMCTX_EXTRN_GS) 750 GET_SEG(pCtx->gs, WHvX64RegisterGs); 745 751 } 746 752 … … 1152 1158 #endif /* NEM_WIN_USE_OUR_OWN_RUN_API */ 1153 1159 1160 1154 1161 #ifdef LOG_ENABLED 1155 1156 1162 /** 1157 1163 * Logs the current CPU state. … … 1201 1207 } 1202 1208 } 1203 1204 1209 #endif /* LOG_ENABLED */ 1210 1211 1212 #ifdef LOG_ENABLED 1213 /** Macro used by nemHCWinExecStateToLogStr and nemR3WinExecStateToLogStr. */ 1214 # define SWITCH_IT(a_szPrefix) \ 1215 do \ 1216 switch (u)\ 1217 { \ 1218 case 0x00: return a_szPrefix ""; \ 1219 case 0x01: return a_szPrefix ",Pnd"; \ 1220 case 0x02: return a_szPrefix ",Dbg"; \ 1221 case 0x03: return a_szPrefix ",Pnd,Dbg"; \ 1222 case 0x04: return a_szPrefix ",Shw"; \ 1223 case 0x05: return a_szPrefix ",Pnd,Shw"; \ 1224 case 0x06: return a_szPrefix ",Shw,Dbg"; \ 1225 case 0x07: return a_szPrefix ",Pnd,Shw,Dbg"; \ 1226 default: AssertFailedReturn("WTF?"); \ 1227 } \ 1228 while (0) 1229 1230 # ifdef NEM_WIN_USE_OUR_OWN_RUN_API 1205 1231 /** 1206 * Translates the execution stat bitfield into a short log string .1232 * Translates the execution stat bitfield into a short log string, VID version. 1207 1233 * 1208 1234 * @returns Read-only log string. … … 1214 1240 | ((unsigned)pMsgHdr->ExecutionState.DebugActive << 1) 1215 1241 | ((unsigned)pMsgHdr->ExecutionState.InterruptShadow << 2); 1216 # define SWITCH_IT(a_szPrefix) \1217 do \1218 switch (u)\1219 { \1220 case 0x00: return a_szPrefix ""; \1221 case 0x01: return a_szPrefix ",Pnd"; \1222 case 0x02: return a_szPrefix ",Dbg"; \1223 case 0x03: return a_szPrefix ",Pnd,Dbg"; \1224 case 0x04: return a_szPrefix ",Shw"; \1225 case 0x05: return a_szPrefix ",Pnd,Shw"; \1226 case 0x06: return a_szPrefix ",Shw,Dbg"; \1227 case 0x07: return a_szPrefix ",Pnd,Shw,Dbg"; \1228 default: AssertFailedReturn("WTF?"); \1229 } \1230 while (0)1231 1232 1242 if (pMsgHdr->ExecutionState.EferLma) 1233 1243 SWITCH_IT("LM"); … … 1236 1246 else 1237 1247 SWITCH_IT("RM"); 1248 } 1249 # elif defined(IN_RING3) 1250 /** 1251 * Translates the execution stat bitfield into a short log string, WinHv version. 1252 * 1253 * @returns Read-only log string. 1254 * @param pExitCtx The exit context which state to summarize. 1255 */ 1256 static const char *nemR3WinExecStateToLogStr(WHV_VP_EXIT_CONTEXT const *pExitCtx) 1257 { 1258 unsigned u = (unsigned)pExitCtx->ExecutionState.InterruptionPending 1259 | ((unsigned)pExitCtx->ExecutionState.DebugActive << 1) 1260 | ((unsigned)pExitCtx->ExecutionState.InterruptShadow << 2); 1261 if (pExitCtx->ExecutionState.EferLma) 1262 SWITCH_IT("LM"); 1263 else if (pExitCtx->ExecutionState.Cr0Pe) 1264 SWITCH_IT("PM"); 1265 else 1266 SWITCH_IT("RM"); 1267 } 1268 # endif /* IN_RING3 && !NEM_WIN_USE_OUR_OWN_RUN_API */ 1238 1269 # undef SWITCH_IT 1239 }1240 1241 1270 #endif /* LOG_ENABLED */ 1242 1271 1272 1273 #ifdef NEM_WIN_USE_OUR_OWN_RUN_API 1243 1274 /** 1244 * Advances the guest RIP and clear EFLAGS.RF .1275 * Advances the guest RIP and clear EFLAGS.RF, VID version. 1245 1276 * 1246 1277 * This may clear VMCPU_FF_INHIBIT_INTERRUPTS. … … 1265 1296 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); 1266 1297 } 1267 1268 1269 NEM_TMPL_STATIC VBOXSTRICTRC 1270 nemHCWinHandleHalt(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) 1271 { 1272 NOREF(pVM); NOREF(pVCpu); NOREF(pCtx); 1273 LogFlow(("nemHCWinHandleHalt\n")); 1274 return VINF_EM_HALT; 1275 } 1298 #elif defined(IN_RING3) 1299 /** 1300 * Advances the guest RIP and clear EFLAGS.RF, WinHv version. 1301 * 1302 * This may clear VMCPU_FF_INHIBIT_INTERRUPTS. 1303 * 1304 * @param pVCpu The cross context virtual CPU structure. 1305 * @param pCtx The CPU context to update. 1306 * @param pExitCtx The exit context. 1307 */ 1308 DECLINLINE(void) nemR3WinAdvanceGuestRipAndClearRF(PVMCPU pVCpu, PCPUMCTX pCtx, WHV_VP_EXIT_CONTEXT const *pExitCtx) 1309 { 1310 Assert(!(pCtx->fExtrn & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS))); 1311 1312 /* Advance the RIP. */ 1313 Assert(pExitCtx->InstructionLength > 0 && pExitCtx->InstructionLength < 16); 1314 pCtx->rip += pExitCtx->InstructionLength; 1315 pCtx->rflags.Bits.u1RF = 0; 1316 1317 /* Update interrupt inhibition. */ 1318 if (!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) 1319 { /* likely */ } 1320 else if (pCtx->rip != EMGetInhibitInterruptsPC(pVCpu)) 1321 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); 1322 } 1323 #endif /* IN_RING3 && !NEM_WIN_USE_OUR_OWN_RUN_API */ 1324 1276 1325 1277 1326 … … 1452 1501 1453 1502 default: 1454 AssertLogRelMsgFailedReturn(("u2State=%#x\n", u2State), VERR_ INTERNAL_ERROR_3);1503 AssertLogRelMsgFailedReturn(("u2State=%#x\n", u2State), VERR_NEM_IPE_4); 1455 1504 } 1456 1505 … … 1495 1544 1496 1545 1497 #ifdef NEM_WIN_USE_OUR_OWN_RUN_API 1498 1499 # ifdef IN_RING0 1546 1547 #if defined(IN_RING0) && defined(NEM_WIN_USE_OUR_OWN_RUN_API) 1500 1548 /** 1501 1549 * Wrapper around nemR0WinImportState that converts VERR_NEM_CHANGE_PGM_MODE and … … 1526 1574 AssertMsgFailedReturn(("%s/%u: nemR0WinImportState failed: %Rrc\n", pszCaller, pGVCpu->idCpu, rc), rc); 1527 1575 } 1528 # endif /* IN_RING0*/1529 1530 1576 #endif /* IN_RING0 && NEM_WIN_USE_OUR_OWN_RUN_API*/ 1577 1578 #if defined(NEM_WIN_USE_OUR_OWN_RUN_API) || defined(IN_RING3) 1531 1579 /** 1532 1580 * Wrapper around nemR0WinImportStateStrict and nemHCWinCopyStateFromHyperV. … … 1546 1594 if (pCtx->fExtrn & fWhat) 1547 1595 { 1548 # 1596 #ifdef IN_RING0 1549 1597 RT_NOREF(pVCpu); 1550 1598 return nemR0WinImportStateStrict(pGVCpu->pGVM, pGVCpu, pCtx, fWhat, pszCaller); 1551 # 1599 #else 1552 1600 RT_NOREF(pGVCpu, pszCaller); 1553 1601 int rc = nemHCWinCopyStateFromHyperV(pVCpu->pVMR3, pVCpu, pCtx, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM); 1554 1602 AssertRCReturn(rc, rc); 1555 # 1603 #endif 1556 1604 } 1557 1605 return VINF_SUCCESS; 1558 1606 } 1559 1560 1607 #endif /* NEM_WIN_USE_OUR_OWN_RUN_API || IN_RING3 */ 1608 1609 #ifdef NEM_WIN_USE_OUR_OWN_RUN_API 1561 1610 /** 1562 1611 * Copies register state from the X64 intercept message header. … … 1567 1616 * @param pCtx The registe rcontext. 1568 1617 * @param pHdr The X64 intercept message header. 1618 * @sa nemR3WinCopyStateFromX64Header 1569 1619 */ 1570 1620 DECLINLINE(void) nemHCWinCopyStateFromX64Header(PVMCPU pVCpu, PCPUMCTX pCtx, HV_X64_INTERCEPT_MESSAGE_HEADER const *pHdr) … … 1589 1639 pCtx->fExtrn &= ~(CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT); 1590 1640 } 1591 1592 1641 #elif defined(IN_RING3) 1642 /** 1643 * Copies register state from the (common) exit context. 1644 * 1645 * ASSUMES no state copied yet. 1646 * 1647 * @param pVCpu The cross context per CPU structure. 1648 * @param pCtx The registe rcontext. 1649 * @param pExitCtx The common exit context. 1650 * @sa nemHCWinCopyStateFromX64Header 1651 */ 1652 DECLINLINE(void) nemR3WinCopyStateFromX64Header(PVMCPU pVCpu, PCPUMCTX pCtx, WHV_VP_EXIT_CONTEXT const *pExitCtx) 1653 { 1654 Assert( (pCtx->fExtrn & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT)) 1655 == (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT)); 1656 NEM_WIN_COPY_BACK_SEG(pCtx->cs, pExitCtx->Cs); 1657 pCtx->rip = pExitCtx->Rip; 1658 pCtx->rflags.u = pExitCtx->Rflags; 1659 1660 pVCpu->nem.s.fLastInterruptShadow = pExitCtx->ExecutionState.InterruptShadow; 1661 if (!pExitCtx->ExecutionState.InterruptShadow) 1662 { 1663 if (!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) 1664 { /* likely */ } 1665 else 1666 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); 1667 } 1668 else 1669 EMSetInhibitInterruptsPC(pVCpu, pExitCtx->Rip); 1670 1671 pCtx->fExtrn &= ~(CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT); 1672 } 1673 #endif /* IN_RING3 && !NEM_WIN_USE_OUR_OWN_RUN_API */ 1674 1675 1676 #ifdef NEM_WIN_USE_OUR_OWN_RUN_API 1593 1677 /** 1594 1678 * Deals with memory intercept message. … … 1600 1684 * @param pCtx The register context. 1601 1685 * @param pGVCpu The global (ring-0) per CPU structure (NULL in r3). 1686 * @sa nemR3WinHandleExitMemory 1602 1687 */ 1603 NEM_TMPL_STATIC VBOXSTRICTRC nemHCWinHandleMessageMemory(PVM pVM, PVMCPU pVCpu, HV_X64_MEMORY_INTERCEPT_MESSAGE const *pMsg,1604 1688 NEM_TMPL_STATIC VBOXSTRICTRC 1689 nemHCWinHandleMessageMemory(PVM pVM, PVMCPU pVCpu, HV_X64_MEMORY_INTERCEPT_MESSAGE const *pMsg, PCPUMCTX pCtx, PGVMCPU pGVCpu) 1605 1690 { 1606 1691 Assert( pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_READ … … 1697 1782 /** @todo do we need to do anything wrt debugging here? */ 1698 1783 return rcStrict; 1699 1700 } 1701 1702 1784 } 1785 #elif defined(IN_RING3) 1786 /** 1787 * Deals with memory access exits (WHvRunVpExitReasonMemoryAccess). 1788 * 1789 * @returns Strict VBox status code. 1790 * @param pVM The cross context VM structure. 1791 * @param pVCpu The cross context per CPU structure. 1792 * @param pExit The VM exit information to handle. 1793 * @param pCtx The register context. 1794 * @sa nemHCWinHandleMessageMemory 1795 */ 1796 NEM_TMPL_STATIC VBOXSTRICTRC 1797 nemR3WinHandleExitMemory(PVM pVM, PVMCPU pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit, PCPUMCTX pCtx) 1798 { 1799 Assert(pExit->MemoryAccess.AccessInfo.AccessType != 3); 1800 AssertMsg(pExit->VpContext.InstructionLength < 0x10, ("%#x\n", pExit->VpContext.InstructionLength)); 1801 1802 /* 1803 * Whatever we do, we must clear pending event injection upon resume. 1804 */ 1805 if (pExit->VpContext.ExecutionState.InterruptionPending) 1806 pCtx->fExtrn &= ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT; 1807 1808 /* 1809 * Ask PGM for information about the given GCPhys. We need to check if we're 1810 * out of sync first. 1811 */ 1812 NEMHCWINHMACPCCSTATE State = { pExit->MemoryAccess.AccessInfo.AccessType == WHvMemoryAccessWrite, false, false }; 1813 PGMPHYSNEMPAGEINFO Info; 1814 int rc = PGMPhysNemPageInfoChecker(pVM, pVCpu, pExit->MemoryAccess.Gpa, State.fWriteAccess, &Info, 1815 nemHCWinHandleMemoryAccessPageCheckerCallback, &State); 1816 if (RT_SUCCESS(rc)) 1817 { 1818 if (Info.fNemProt & ( pExit->MemoryAccess.AccessInfo.AccessType == WHvMemoryAccessWrite 1819 ? NEM_PAGE_PROT_WRITE : NEM_PAGE_PROT_READ)) 1820 { 1821 if (State.fCanResume) 1822 { 1823 Log4(("MemExit/%u: %04x:%08RX64/%s: %RGp (=>%RHp) %s fProt=%u%s%s%s; restarting (%s)\n", 1824 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), 1825 pExit->MemoryAccess.Gpa, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt, 1826 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "", 1827 State.fDidSomething ? "" : " no-change", g_apszHvInterceptAccessTypes[pExit->MemoryAccess.AccessInfo.AccessType])); 1828 return VINF_SUCCESS; 1829 } 1830 } 1831 Log4(("MemExit/%u: %04x:%08RX64/%s: %RGp (=>%RHp) %s fProt=%u%s%s%s; emulating (%s)\n", 1832 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), 1833 pExit->MemoryAccess.Gpa, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt, 1834 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "", 1835 State.fDidSomething ? "" : " no-change", g_apszHvInterceptAccessTypes[pExit->MemoryAccess.AccessInfo.AccessType])); 1836 } 1837 else 1838 Log4(("MemExit/%u: %04x:%08RX64/%s: %RGp rc=%Rrc%s; emulating (%s)\n", 1839 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), 1840 pExit->MemoryAccess.Gpa, rc, State.fDidSomething ? " modified-backing" : "", 1841 g_apszHvInterceptAccessTypes[pExit->MemoryAccess.AccessInfo.AccessType])); 1842 1843 /* 1844 * Emulate the memory access, either access handler or special memory. 1845 */ 1846 nemR3WinCopyStateFromX64Header(pVCpu, pCtx, &pExit->VpContext); 1847 rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, pCtx, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM); 1848 AssertRCReturn(rc, rc); 1849 1850 if (pExit->VpContext.ExecutionState.Reserved0 || pExit->VpContext.ExecutionState.Reserved1) 1851 Log(("MemExit/Hdr/State: Reserved0=%#x Reserved1=%#x\n", pExit->VpContext.ExecutionState.Reserved0, pExit->VpContext.ExecutionState.Reserved1)); 1852 //if (pMsg->InstructionByteCount > 0) 1853 // Log4(("InstructionByteCount=%#x %.16Rhxs\n", pMsg->InstructionByteCount, pMsg->InstructionBytes)); 1854 1855 VBOXSTRICTRC rcStrict; 1856 if (pExit->MemoryAccess.InstructionByteCount > 0) 1857 rcStrict = IEMExecOneWithPrefetchedByPC(pVCpu, CPUMCTX2CORE(pCtx), pExit->VpContext.Rip, 1858 pExit->MemoryAccess.InstructionBytes, pExit->MemoryAccess.InstructionByteCount); 1859 else 1860 rcStrict = IEMExecOne(pVCpu); 1861 /** @todo do we need to do anything wrt debugging here? */ 1862 return rcStrict; 1863 } 1864 #endif /* IN_RING3 && !NEM_WIN_USE_OUR_OWN_RUN_API */ 1865 1866 1867 #ifdef NEM_WIN_USE_OUR_OWN_RUN_API 1703 1868 /** 1704 1869 * Deals with I/O port intercept message. … … 1711 1876 * @param pGVCpu The global (ring-0) per CPU structure (NULL in r3). 1712 1877 */ 1713 NEM_TMPL_STATIC VBOXSTRICTRC nemHCWinHandleMessageIoPort(PVM pVM, PVMCPU pVCpu, HV_X64_IO_PORT_INTERCEPT_MESSAGE const *pMsg,1714 1878 NEM_TMPL_STATIC VBOXSTRICTRC 1879 nemHCWinHandleMessageIoPort(PVM pVM, PVMCPU pVCpu, HV_X64_IO_PORT_INTERCEPT_MESSAGE const *pMsg, PCPUMCTX pCtx, PGVMCPU pGVCpu) 1715 1880 { 1716 1881 Assert( pMsg->AccessInfo.AccessSize == 1 … … 1815 1980 * Do debug checks. 1816 1981 */ 1817 if ( pMsg->Header.ExecutionState.DebugActive /** @todo Microsoft: Does DebugActive this only refle xt DR7? */1982 if ( pMsg->Header.ExecutionState.DebugActive /** @todo Microsoft: Does DebugActive this only reflect DR7? */ 1818 1983 || (pMsg->Header.Rflags & X86_EFL_TF) 1819 1984 || DBGFBpIsHwIoArmed(pVM) ) … … 1824 1989 return rcStrict; 1825 1990 } 1826 1827 1991 #elif defined(IN_RING3) 1992 /** 1993 * Deals with I/O port access exits (WHvRunVpExitReasonX64IoPortAccess). 1994 * 1995 * @returns Strict VBox status code. 1996 * @param pVM The cross context VM structure. 1997 * @param pVCpu The cross context per CPU structure. 1998 * @param pExit The VM exit information to handle. 1999 * @param pCtx The register context. 2000 * @sa nemHCWinHandleMessageIoPort 2001 */ 2002 NEM_TMPL_STATIC VBOXSTRICTRC 2003 nemR3WinHandleExitIoPort(PVM pVM, PVMCPU pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit, PCPUMCTX pCtx) 2004 { 2005 Assert( pExit->IoPortAccess.AccessInfo.AccessSize == 1 2006 || pExit->IoPortAccess.AccessInfo.AccessSize == 2 2007 || pExit->IoPortAccess.AccessInfo.AccessSize == 4); 2008 AssertMsg(pExit->VpContext.InstructionLength < 0x10, ("%#x\n", pExit->VpContext.InstructionLength)); 2009 2010 /* 2011 * Whatever we do, we must clear pending event injection upon resume. 2012 */ 2013 if (pExit->VpContext.ExecutionState.InterruptionPending) 2014 pCtx->fExtrn &= ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT; 2015 2016 VBOXSTRICTRC rcStrict; 2017 if (!pExit->IoPortAccess.AccessInfo.StringOp) 2018 { 2019 /* 2020 * Simple port I/O. 2021 */ 2022 static uint32_t const s_fAndMask[8] = 2023 { UINT32_MAX, UINT32_C(0xff), UINT32_C(0xffff), UINT32_MAX, UINT32_MAX, UINT32_MAX, UINT32_MAX, UINT32_MAX }; 2024 uint32_t const fAndMask = s_fAndMask[pExit->IoPortAccess.AccessInfo.AccessSize]; 2025 if (pExit->IoPortAccess.AccessInfo.IsWrite) 2026 { 2027 rcStrict = IOMIOPortWrite(pVM, pVCpu, pExit->IoPortAccess.PortNumber, (uint32_t)pExit->IoPortAccess.Rax & fAndMask, 2028 pExit->IoPortAccess.AccessInfo.AccessSize); 2029 Log4(("IOExit/%u: %04x:%08RX64/%s: OUT %#x, %#x LB %u rcStrict=%Rrc\n", 2030 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), 2031 pExit->IoPortAccess.PortNumber, (uint32_t)pExit->IoPortAccess.Rax & fAndMask, 2032 pExit->IoPortAccess.AccessInfo.AccessSize, VBOXSTRICTRC_VAL(rcStrict) )); 2033 if (IOM_SUCCESS(rcStrict)) 2034 { 2035 nemR3WinCopyStateFromX64Header(pVCpu, pCtx, &pExit->VpContext); 2036 nemR3WinAdvanceGuestRipAndClearRF(pVCpu, pCtx, &pExit->VpContext); 2037 } 2038 } 2039 else 2040 { 2041 uint32_t uValue = 0; 2042 rcStrict = IOMIOPortRead(pVM, pVCpu, pExit->IoPortAccess.PortNumber, &uValue, pExit->IoPortAccess.AccessInfo.AccessSize); 2043 Log4(("IOExit/%u: %04x:%08RX64/%s: IN %#x LB %u -> %#x, rcStrict=%Rrc\n", 2044 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), 2045 pExit->IoPortAccess.PortNumber, pExit->IoPortAccess.AccessInfo.AccessSize, uValue, VBOXSTRICTRC_VAL(rcStrict) )); 2046 if (IOM_SUCCESS(rcStrict)) 2047 { 2048 if (pExit->IoPortAccess.AccessInfo.AccessSize != 4) 2049 pCtx->rax = (pExit->IoPortAccess.Rax & ~(uint64_t)fAndMask) | (uValue & fAndMask); 2050 else 2051 pCtx->rax = uValue; 2052 pCtx->fExtrn &= ~CPUMCTX_EXTRN_RAX; 2053 Log4(("IOExit/%u: RAX %#RX64 -> %#RX64\n", pVCpu->idCpu, pExit->IoPortAccess.Rax, pCtx->rax)); 2054 nemR3WinCopyStateFromX64Header(pVCpu, pCtx, &pExit->VpContext); 2055 nemR3WinAdvanceGuestRipAndClearRF(pVCpu, pCtx, &pExit->VpContext); 2056 } 2057 } 2058 } 2059 else 2060 { 2061 /* 2062 * String port I/O. 2063 */ 2064 /** @todo Someone at Microsoft please explain how we can get the address mode 2065 * from the IoPortAccess.VpContext. CS.Attributes is only sufficient for 2066 * getting the default mode, it can always be overridden by a prefix. This 2067 * forces us to interpret the instruction from opcodes, which is suboptimal. 2068 * Both AMD-V and VT-x includes the address size in the exit info, at least on 2069 * CPUs that are reasonably new. 2070 * 2071 * Of course, it's possible this is an undocumented and we just need to do some 2072 * experiments to figure out how it's communicated. Alternatively, we can scan 2073 * the opcode bytes for possible evil prefixes. 2074 */ 2075 nemR3WinCopyStateFromX64Header(pVCpu, pCtx, &pExit->VpContext); 2076 pCtx->fExtrn &= ~( CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RDI | CPUMCTX_EXTRN_RSI 2077 | CPUMCTX_EXTRN_DS | CPUMCTX_EXTRN_ES); 2078 NEM_WIN_COPY_BACK_SEG(pCtx->ds, pExit->IoPortAccess.Ds); 2079 NEM_WIN_COPY_BACK_SEG(pCtx->es, pExit->IoPortAccess.Es); 2080 pCtx->rax = pExit->IoPortAccess.Rax; 2081 pCtx->rcx = pExit->IoPortAccess.Rcx; 2082 pCtx->rdi = pExit->IoPortAccess.Rdi; 2083 pCtx->rsi = pExit->IoPortAccess.Rsi; 2084 # ifdef IN_RING0 2085 rcStrict = nemR0WinImportStateStrict(pGVCpu->pGVM, pGVCpu, pCtx, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM, "IOExit"); 2086 if (rcStrict != VINF_SUCCESS) 2087 return rcStrict; 2088 # else 2089 int rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, pCtx, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM); 2090 AssertRCReturn(rc, rc); 2091 # endif 2092 2093 Log4(("IOExit/%u: %04x:%08RX64/%s: %s%s %#x LB %u (emulating)\n", 2094 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), 2095 pExit->IoPortAccess.AccessInfo.RepPrefix ? "REP " : "", 2096 pExit->IoPortAccess.AccessInfo.IsWrite ? "OUTS" : "INS", 2097 pExit->IoPortAccess.PortNumber, pExit->IoPortAccess.AccessInfo.AccessSize )); 2098 rcStrict = IEMExecOne(pVCpu); 2099 } 2100 if (IOM_SUCCESS(rcStrict)) 2101 { 2102 /* 2103 * Do debug checks. 2104 */ 2105 if ( pExit->VpContext.ExecutionState.DebugActive /** @todo Microsoft: Does DebugActive this only reflect DR7? */ 2106 || (pExit->VpContext.Rflags & X86_EFL_TF) 2107 || DBGFBpIsHwIoArmed(pVM) ) 2108 { 2109 /** @todo Debugging. */ 2110 } 2111 } 2112 return rcStrict; 2113 2114 } 2115 #endif /* IN_RING3 && !NEM_WIN_USE_OUR_OWN_RUN_API */ 2116 2117 #ifdef NEM_WIN_USE_OUR_OWN_RUN_API 1828 2118 /** 1829 2119 * Deals with interrupt window message. … … 1835 2125 * @param pCtx The register context. 1836 2126 * @param pGVCpu The global (ring-0) per CPU structure (NULL in r3). 2127 * @sa nemR3WinHandleExitInterruptWindow 1837 2128 */ 1838 NEM_TMPL_STATIC VBOXSTRICTRC nemHCWinHandleMessageInterruptWindow(PVM pVM, PVMCPU pVCpu,1839 1840 2129 NEM_TMPL_STATIC VBOXSTRICTRC 2130 nemHCWinHandleMessageInterruptWindow(PVM pVM, PVMCPU pVCpu, HV_X64_INTERRUPT_WINDOW_MESSAGE const *pMsg, 2131 PCPUMCTX pCtx, PGVMCPU pGVCpu) 1841 2132 { 1842 2133 /* … … 1861 2152 return VINF_SUCCESS; 1862 2153 } 1863 1864 2154 #elif defined(IN_RING3) 2155 /** 2156 * Deals with interrupt window exits (WHvRunVpExitReasonX64InterruptWindow). 2157 * 2158 * @returns Strict VBox status code. 2159 * @param pVM The cross context VM structure. 2160 * @param pVCpu The cross context per CPU structure. 2161 * @param pExit The VM exit information to handle. 2162 * @param pCtx The register context. 2163 * @sa nemHCWinHandleMessageInterruptWindow 2164 */ 2165 NEM_TMPL_STATIC VBOXSTRICTRC 2166 nemR3WinHandleExitInterruptWindow(PVM pVM, PVMCPU pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit, PCPUMCTX pCtx) 2167 { 2168 /* 2169 * Assert message sanity. 2170 */ 2171 AssertMsg(pExit->VpContext.InstructionLength < 0x10, ("%#x\n", pExit->VpContext.InstructionLength)); 2172 AssertMsg( pExit->InterruptWindow.DeliverableType == WHvX64PendingInterrupt 2173 || pExit->InterruptWindow.DeliverableType == WHvX64PendingNmi, 2174 ("%#x\n", pExit->InterruptWindow.DeliverableType)); 2175 2176 /* 2177 * Just copy the state we've got and handle it in the loop for now. 2178 */ 2179 nemR3WinCopyStateFromX64Header(pVCpu, pCtx, &pExit->VpContext); 2180 Log4(("IntWinExit/%u: %04x:%08RX64/%s: %u IF=%d InterruptShadow=%d\n", 2181 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), 2182 pExit->InterruptWindow.DeliverableType, RT_BOOL(pExit->VpContext.Rflags & X86_EFL_IF), 2183 pExit->VpContext.ExecutionState.InterruptShadow)); 2184 2185 /** @todo call nemHCWinHandleInterruptFF */ 2186 RT_NOREF(pVM); 2187 return VINF_SUCCESS; 2188 } 2189 #endif /* IN_RING3 && !NEM_WIN_USE_OUR_OWN_RUN_API */ 2190 2191 #ifdef NEM_WIN_USE_OUR_OWN_RUN_API 1865 2192 /** 1866 2193 * Deals with CPUID intercept message. … … 1906 2233 return VINF_SUCCESS; 1907 2234 } 1908 1909 2235 #elif defined(IN_RING3) 2236 /** 2237 * Deals with CPUID exits (WHvRunVpExitReasonX64Cpuid). 2238 * 2239 * @returns Strict VBox status code. 2240 * @param pVM The cross context VM structure. 2241 * @param pVCpu The cross context per CPU structure. 2242 * @param pExit The VM exit information to handle. 2243 * @param pCtx The register context. 2244 * @sa nemHCWinHandleMessageInterruptWindow 2245 */ 2246 NEM_TMPL_STATIC VBOXSTRICTRC 2247 nemR3WinHandleExitCpuId(PVM pVM, PVMCPU pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit, PCPUMCTX pCtx) 2248 { 2249 AssertMsg(pExit->VpContext.InstructionLength < 0x10, ("%#x\n", pExit->VpContext.InstructionLength)); 2250 2251 /* 2252 * Soak up state and execute the instruction. 2253 * 2254 * Note! If this grows slightly more complicated, combine into an IEMExecDecodedCpuId 2255 * function and make everyone use it. 2256 */ 2257 /** @todo Combine implementations into IEMExecDecodedCpuId as this will 2258 * only get weirder with nested VT-x and AMD-V support. */ 2259 nemR3WinCopyStateFromX64Header(pVCpu, pCtx, &pExit->VpContext); 2260 2261 /* Copy in the low register values (top is always cleared). */ 2262 pCtx->rax = (uint32_t)pExit->CpuidAccess.Rax; 2263 pCtx->rcx = (uint32_t)pExit->CpuidAccess.Rcx; 2264 pCtx->rdx = (uint32_t)pExit->CpuidAccess.Rdx; 2265 pCtx->rbx = (uint32_t)pExit->CpuidAccess.Rbx; 2266 pCtx->fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RBX); 2267 2268 /* Get the correct values. */ 2269 CPUMGetGuestCpuId(pVCpu, pCtx->eax, pCtx->ecx, &pCtx->eax, &pCtx->ebx, &pCtx->ecx, &pCtx->edx); 2270 2271 Log4(("CpuIdExit/%u: %04x:%08RX64/%s: rax=%08RX64 / rcx=%08RX64 / rdx=%08RX64 / rbx=%08RX64 -> %08RX32 / %08RX32 / %08RX32 / %08RX32 (hv: %08RX64 / %08RX64 / %08RX64 / %08RX64)\n", 2272 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), 2273 pExit->CpuidAccess.Rax, pExit->CpuidAccess.Rcx, pExit->CpuidAccess.Rdx, pExit->CpuidAccess.Rbx, 2274 pCtx->eax, pCtx->ecx, pCtx->edx, pCtx->ebx, 2275 pExit->CpuidAccess.DefaultResultRax, pExit->CpuidAccess.DefaultResultRcx, pExit->CpuidAccess.DefaultResultRdx, pExit->CpuidAccess.DefaultResultRbx)); 2276 2277 /* Move RIP and we're done. */ 2278 nemR3WinAdvanceGuestRipAndClearRF(pVCpu, pCtx, &pExit->VpContext); 2279 2280 RT_NOREF_PV(pVM); 2281 return VINF_SUCCESS; 2282 } 2283 #endif /* IN_RING3 && !NEM_WIN_USE_OUR_OWN_RUN_API */ 2284 2285 #ifdef NEM_WIN_USE_OUR_OWN_RUN_API 1910 2286 /** 1911 2287 * Deals with MSR intercept message. … … 1916 2292 * @param pCtx The register context. 1917 2293 * @param pGVCpu The global (ring-0) per CPU structure (NULL in r3). 2294 * @sa nemR3WinHandleExitMsr 1918 2295 */ 1919 2296 NEM_TMPL_STATIC VBOXSTRICTRC nemHCWinHandleMessageMsr(PVMCPU pVCpu, HV_X64_MSR_INTERCEPT_MESSAGE const *pMsg, … … 1931 2308 */ 1932 2309 VBOXSTRICTRC rcStrict; 1933 if (pMsg->Header. CsSegment.DescriptorPrivilegeLevel == 0)2310 if (pMsg->Header.ExecutionState.Cpl == 0) 1934 2311 { 1935 2312 /* … … 1938 2315 if (pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE) 1939 2316 { 1940 rcStrict = CPUMSetGuestMsr(pVCpu, pMsg->MsrNum mber, RT_MAKE_U64((uint32_t)pMsg->Rax, (uint32_t)pMsg->Rdx));2317 rcStrict = CPUMSetGuestMsr(pVCpu, pMsg->MsrNumber, RT_MAKE_U64((uint32_t)pMsg->Rax, (uint32_t)pMsg->Rdx)); 1941 2318 Log4(("MsrExit/%u: %04x:%08RX64/%s: WRMSR %08x, %08x:%08x -> %Rrc\n", 1942 2319 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header), 1943 pMsg->MsrNum mber, (uint32_t)pMsg->Rax, (uint32_t)pMsg->Rdx, VBOXSTRICTRC_VAL(rcStrict) ));2320 pMsg->MsrNumber, (uint32_t)pMsg->Rax, (uint32_t)pMsg->Rdx, VBOXSTRICTRC_VAL(rcStrict) )); 1944 2321 if (rcStrict == VINF_SUCCESS) 1945 2322 { … … 1948 2325 return VINF_SUCCESS; 1949 2326 } 1950 # ifndef IN_RING32327 # ifndef IN_RING3 1951 2328 /* move to ring-3 and handle the trap/whatever there, as we want to LogRel this. */ 1952 2329 if (rcStrict == VERR_CPUM_RAISE_GP_0) 1953 2330 rcStrict = VINF_CPUM_R3_MSR_WRITE; 1954 2331 return rcStrict; 1955 # else2332 # else 1956 2333 LogRel(("MsrExit/%u: %04x:%08RX64/%s: WRMSR %08x, %08x:%08x -> %Rrc!\n", 1957 2334 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header), 1958 pMsg->MsrNum mber, (uint32_t)pMsg->Rax, (uint32_t)pMsg->Rdx, VBOXSTRICTRC_VAL(rcStrict) ));1959 # endif2335 pMsg->MsrNumber, (uint32_t)pMsg->Rax, (uint32_t)pMsg->Rdx, VBOXSTRICTRC_VAL(rcStrict) )); 2336 # endif 1960 2337 } 1961 2338 /* … … 1965 2342 { 1966 2343 uint64_t uValue = 0; 1967 rcStrict = CPUMQueryGuestMsr(pVCpu, pMsg->MsrNum mber, &uValue);2344 rcStrict = CPUMQueryGuestMsr(pVCpu, pMsg->MsrNumber, &uValue); 1968 2345 Log4(("MsrExit/%u: %04x:%08RX64/%s: RDMSR %08x -> %08RX64 / %Rrc\n", 1969 2346 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header), 1970 pMsg->MsrNum mber, uValue, VBOXSTRICTRC_VAL(rcStrict) ));2347 pMsg->MsrNumber, uValue, VBOXSTRICTRC_VAL(rcStrict) )); 1971 2348 if (rcStrict == VINF_SUCCESS) 1972 2349 { 1973 2350 nemHCWinCopyStateFromX64Header(pVCpu, pCtx, &pMsg->Header); 2351 pCtx->rax = (uint32_t)uValue; 2352 pCtx->rdx = uValue >> 32; 2353 pCtx->fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX); 1974 2354 nemHCWinAdvanceGuestRipAndClearRF(pVCpu, pCtx, &pMsg->Header); 1975 2355 return VINF_SUCCESS; 1976 2356 } 1977 # ifndef IN_RING32357 # ifndef IN_RING3 1978 2358 /* move to ring-3 and handle the trap/whatever there, as we want to LogRel this. */ 1979 2359 if (rcStrict == VERR_CPUM_RAISE_GP_0) 1980 2360 rcStrict = VINF_CPUM_R3_MSR_READ; 1981 2361 return rcStrict; 1982 # else2362 # else 1983 2363 LogRel(("MsrExit/%u: %04x:%08RX64/%s: RDMSR %08x -> %08RX64 / %Rrc\n", 1984 2364 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header), 1985 pMsg->MsrNum mber, uValue, VBOXSTRICTRC_VAL(rcStrict) ));1986 # endif2365 pMsg->MsrNumber, uValue, VBOXSTRICTRC_VAL(rcStrict) )); 2366 # endif 1987 2367 } 1988 2368 } … … 1990 2370 Log4(("MsrExit/%u: %04x:%08RX64/%s: CPL %u -> #GP(0); WRMSR %08x, %08x:%08x\n", 1991 2371 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header), 1992 pMsg->Header. CsSegment.DescriptorPrivilegeLevel, pMsg->MsrNummber, (uint32_t)pMsg->Rax, (uint32_t)pMsg->Rdx ));2372 pMsg->Header.ExecutionState.Cpl, pMsg->MsrNumber, (uint32_t)pMsg->Rax, (uint32_t)pMsg->Rdx )); 1993 2373 else 1994 2374 Log4(("MsrExit/%u: %04x:%08RX64/%s: CPL %u -> #GP(0); RDMSR %08x\n", 1995 2375 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header), 1996 pMsg->Header. CsSegment.DescriptorPrivilegeLevel, pMsg->MsrNummber));2376 pMsg->Header.ExecutionState.Cpl, pMsg->MsrNumber)); 1997 2377 1998 2378 /* … … 2010 2390 return rcStrict; 2011 2391 } 2012 2013 2392 #elif defined(IN_RING3) 2393 /** 2394 * Deals with MSR access exits (WHvRunVpExitReasonX64MsrAccess). 2395 * 2396 * @returns Strict VBox status code. 2397 * @param pVM The cross context VM structure. 2398 * @param pVCpu The cross context per CPU structure. 2399 * @param pExit The VM exit information to handle. 2400 * @param pCtx The register context. 2401 * @sa nemHCWinHandleMessageMsr 2402 */ 2403 NEM_TMPL_STATIC VBOXSTRICTRC 2404 nemR3WinHandleExitMsr(PVM pVM, PVMCPU pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit, PCPUMCTX pCtx) 2405 { 2406 AssertMsg(pExit->VpContext.InstructionLength < 0x10, ("%#x\n", pExit->VpContext.InstructionLength)); 2407 2408 /* 2409 * Check CPL as that's common to both RDMSR and WRMSR. 2410 */ 2411 VBOXSTRICTRC rcStrict; 2412 if (pExit->VpContext.ExecutionState.Cpl == 0) 2413 { 2414 /* 2415 * Handle writes. 2416 */ 2417 if (pExit->MsrAccess.AccessInfo.IsWrite) 2418 { 2419 rcStrict = CPUMSetGuestMsr(pVCpu, pExit->MsrAccess.MsrNumber, 2420 RT_MAKE_U64((uint32_t)pExit->MsrAccess.Rax, (uint32_t)pExit->MsrAccess.Rdx)); 2421 Log4(("MsrExit/%u: %04x:%08RX64/%s: WRMSR %08x, %08x:%08x -> %Rrc\n", 2422 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), 2423 pExit->MsrAccess.MsrNumber, (uint32_t)pExit->MsrAccess.Rax, (uint32_t)pExit->MsrAccess.Rdx, VBOXSTRICTRC_VAL(rcStrict) )); 2424 if (rcStrict == VINF_SUCCESS) 2425 { 2426 nemR3WinCopyStateFromX64Header(pVCpu, pCtx, &pExit->VpContext); 2427 nemR3WinAdvanceGuestRipAndClearRF(pVCpu, pCtx, &pExit->VpContext); 2428 return VINF_SUCCESS; 2429 } 2430 LogRel(("MsrExit/%u: %04x:%08RX64/%s: WRMSR %08x, %08x:%08x -> %Rrc!\n", 2431 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), 2432 pExit->MsrAccess.MsrNumber, (uint32_t)pExit->MsrAccess.Rax, (uint32_t)pExit->MsrAccess.Rdx, VBOXSTRICTRC_VAL(rcStrict) )); 2433 } 2434 /* 2435 * Handle reads. 2436 */ 2437 else 2438 { 2439 uint64_t uValue = 0; 2440 rcStrict = CPUMQueryGuestMsr(pVCpu, pExit->MsrAccess.MsrNumber, &uValue); 2441 Log4(("MsrExit/%u: %04x:%08RX64/%s: RDMSR %08x -> %08RX64 / %Rrc\n", 2442 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), 2443 pExit->MsrAccess.MsrNumber, uValue, VBOXSTRICTRC_VAL(rcStrict) )); 2444 if (rcStrict == VINF_SUCCESS) 2445 { 2446 nemR3WinCopyStateFromX64Header(pVCpu, pCtx, &pExit->VpContext); 2447 pCtx->rax = (uint32_t)uValue; 2448 pCtx->rdx = uValue >> 32; 2449 pCtx->fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX); 2450 nemR3WinAdvanceGuestRipAndClearRF(pVCpu, pCtx, &pExit->VpContext); 2451 return VINF_SUCCESS; 2452 } 2453 LogRel(("MsrExit/%u: %04x:%08RX64/%s: RDMSR %08x -> %08RX64 / %Rrc\n", 2454 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), 2455 pExit->MsrAccess.MsrNumber, uValue, VBOXSTRICTRC_VAL(rcStrict) )); 2456 } 2457 } 2458 else if (pExit->MsrAccess.AccessInfo.IsWrite) 2459 Log4(("MsrExit/%u: %04x:%08RX64/%s: CPL %u -> #GP(0); WRMSR %08x, %08x:%08x\n", 2460 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), 2461 pExit->VpContext.ExecutionState.Cpl, pExit->MsrAccess.MsrNumber, (uint32_t)pExit->MsrAccess.Rax, (uint32_t)pExit->MsrAccess.Rdx )); 2462 else 2463 Log4(("MsrExit/%u: %04x:%08RX64/%s: CPL %u -> #GP(0); RDMSR %08x\n", 2464 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), 2465 pExit->VpContext.ExecutionState.Cpl, pExit->MsrAccess.MsrNumber)); 2466 2467 /* 2468 * If we get down here, we're supposed to #GP(0). 2469 */ 2470 rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, NULL, pCtx, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM, "NMI"); 2471 if (rcStrict == VINF_SUCCESS) 2472 { 2473 rcStrict = IEMInjectTrap(pVCpu, X86_XCPT_GP, TRPM_TRAP, 0, 0, 0); 2474 if (rcStrict == VINF_IEM_RAISED_XCPT) 2475 rcStrict = VINF_SUCCESS; 2476 else if (rcStrict != VINF_SUCCESS) 2477 Log4(("MsrExit/%u: Injecting #GP(0) failed: %Rrc\n", VBOXSTRICTRC_VAL(rcStrict) )); 2478 } 2479 2480 RT_NOREF_PV(pVM); 2481 return rcStrict; 2482 } 2483 #endif /* IN_RING3 && !NEM_WIN_USE_OUR_OWN_RUN_API */ 2484 2485 #ifdef NEM_WIN_USE_OUR_OWN_RUN_API 2014 2486 /** 2015 2487 * Deals with unrecoverable exception (triple fault). … … 2023 2495 * @param pCtx The register context. 2024 2496 * @param pGVCpu The global (ring-0) per CPU structure (NULL in r3). 2497 * @sa nemR3WinHandleExitUnrecoverableException 2025 2498 */ 2026 2499 NEM_TMPL_STATIC VBOXSTRICTRC nemHCWinHandleMessageUnrecoverableException(PVMCPU pVCpu, … … 2028 2501 PCPUMCTX pCtx, PGVMCPU pGVCpu) 2029 2502 { 2030 /*2031 * Assert message sanity.2032 */2033 //Assert( pMsgHdr->InterceptAccessType == HV_INTERCEPT_ACCESS_EXECUTE2034 // || pMsgHdr->InterceptAccessType == HV_INTERCEPT_ACCESS_READ // READ & WRITE are probably not used here2035 // || pMsgHdr->InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE);2036 2503 AssertMsg(pMsgHdr->InstructionLength < 0x10, ("%#x\n", pMsgHdr->InstructionLength)); 2037 2504 2038 # if 02505 # if 0 2039 2506 /* 2040 2507 * Just copy the state we've got and handle it in the loop for now. … … 2044 2511 pVCpu->idCpu, pMsgHdr->CsSegment.Selector, pMsgHdr->Rip, nemHCWinExecStateToLogStr(&pMsg->Header), pMsgHdr->Rflags)); 2045 2512 return VINF_EM_TRIPLE_FAULT; 2046 # else2513 # else 2047 2514 /* 2048 2515 * Let IEM decide whether this is really it. 2049 2516 */ 2050 /** @todo check if this happens becaused of incorrectly pending interrupts of smth. */2051 2517 nemHCWinCopyStateFromX64Header(pVCpu, pCtx, pMsgHdr); 2052 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, pGVCpu, pCtx, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_ALL, "TripleExit"); 2518 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, pGVCpu, pCtx, 2519 NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_ALL, "TripleExit"); 2053 2520 if (rcStrict == VINF_SUCCESS) 2054 2521 { … … 2072 2539 pMsgHdr->Rip, nemHCWinExecStateToLogStr(pMsgHdr), pMsgHdr->Rflags, VBOXSTRICTRC_VAL(rcStrict) )); 2073 2540 return rcStrict; 2074 #endif 2075 } 2076 2077 2541 # endif 2542 } 2543 #elif defined(IN_RING3) 2544 /** 2545 * Deals with MSR access exits (WHvRunVpExitReasonUnrecoverableException). 2546 * 2547 * @returns Strict VBox status code. 2548 * @param pVM The cross context VM structure. 2549 * @param pVCpu The cross context per CPU structure. 2550 * @param pExit The VM exit information to handle. 2551 * @param pCtx The register context. 2552 * @sa nemHCWinHandleMessageUnrecoverableException 2553 */ 2554 NEM_TMPL_STATIC VBOXSTRICTRC 2555 nemR3WinHandleExitUnrecoverableException(PVM pVM, PVMCPU pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit, PCPUMCTX pCtx) 2556 { 2557 AssertMsg(pExit->VpContext.InstructionLength < 0x10, ("%#x\n", pExit->VpContext.InstructionLength)); 2558 2559 # if 0 2560 /* 2561 * Just copy the state we've got and handle it in the loop for now. 2562 */ 2563 nemR3WinCopyStateFromX64Header(pVCpu, pCtx, &pExit->VpContext); 2564 Log(("TripleExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> VINF_EM_TRIPLE_FAULT\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector, 2565 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpContext.Rflags)); 2566 RT_NOREF_PV(pVM); 2567 return VINF_EM_TRIPLE_FAULT; 2568 # else 2569 /* 2570 * Let IEM decide whether this is really it. 2571 */ 2572 nemR3WinCopyStateFromX64Header(pVCpu, pCtx, &pExit->VpContext); 2573 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, NULL, pCtx, 2574 NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_ALL, "TripleExit"); 2575 if (rcStrict == VINF_SUCCESS) 2576 { 2577 rcStrict = IEMExecOne(pVCpu); 2578 if (rcStrict == VINF_SUCCESS) 2579 { 2580 Log(("UnrecovExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> VINF_SUCCESS\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector, 2581 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpContext.Rflags)); 2582 pCtx->fExtrn &= ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT; /* Make sure to reset pending #DB(0). */ 2583 return VINF_SUCCESS; 2584 } 2585 if (rcStrict == VINF_EM_TRIPLE_FAULT) 2586 Log(("UnrecovExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> VINF_EM_TRIPLE_FAULT!\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector, 2587 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpContext.Rflags, VBOXSTRICTRC_VAL(rcStrict) )); 2588 else 2589 Log(("UnrecovExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> %Rrc (IEMExecOne)\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector, 2590 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpContext.Rflags, VBOXSTRICTRC_VAL(rcStrict) )); 2591 } 2592 else 2593 Log(("UnrecovExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> %Rrc (state import)\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector, 2594 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpContext.Rflags, VBOXSTRICTRC_VAL(rcStrict) )); 2595 RT_NOREF_PV(pVM); 2596 return rcStrict; 2597 # endif 2598 2599 } 2600 #endif /* IN_RING3 && !NEM_WIN_USE_OUR_OWN_RUN_API */ 2601 2602 #ifdef NEM_WIN_USE_OUR_OWN_RUN_API 2078 2603 /** 2079 2604 * Handles messages (VM exits). … … 2085 2610 * @param pCtx The register context. 2086 2611 * @param pGVCpu The global (ring-0) per CPU structure (NULL in r3). 2612 * @sa nemR3WinHandleExit 2087 2613 */ 2088 2614 NEM_TMPL_STATIC VBOXSTRICTRC nemHCWinHandleMessage(PVM pVM, PVMCPU pVCpu, VID_MESSAGE_MAPPING_HEADER volatile *pMappingHeader, … … 2140 2666 LogRel(("Unimplemented msg:\n%.*Rhxd\n", (int)sizeof(*pMsg), pMsg)); 2141 2667 AssertLogRelMsgFailedReturn(("Message type %#x not implemented!\n%.32Rhxd\n", pMsg->Header.MessageType, pMsg), 2142 VERR_ INTERNAL_ERROR_2);2668 VERR_NEM_IPE_3); 2143 2669 2144 2670 case HvMessageTypeX64ExceptionIntercept: … … 2152 2678 LogRel(("Unexpected msg:\n%.*Rhxd\n", (int)sizeof(*pMsg), pMsg)); 2153 2679 AssertLogRelMsgFailedReturn(("Unexpected message on CPU #%u: %#x\n", pVCpu->idCpu, pMsg->Header.MessageType), 2154 VERR_ INTERNAL_ERROR_2);2680 VERR_NEM_IPE_3); 2155 2681 2156 2682 default: 2157 2683 LogRel(("Unknown msg:\n%.*Rhxd\n", (int)sizeof(*pMsg), pMsg)); 2158 2684 AssertLogRelMsgFailedReturn(("Unknown message on CPU #%u: %#x\n", pVCpu->idCpu, pMsg->Header.MessageType), 2159 VERR_ INTERNAL_ERROR_2);2685 VERR_NEM_IPE_3); 2160 2686 } 2161 2687 } … … 2163 2689 AssertLogRelMsgFailedReturn(("Unexpected VID message type on CPU #%u: %#x LB %u\n", 2164 2690 pVCpu->idCpu, pMappingHeader->enmVidMsgType, pMappingHeader->cbMessage), 2165 VERR_INTERNAL_ERROR_3); 2166 } 2167 2168 2691 VERR_NEM_IPE_4); 2692 } 2693 #elif defined(IN_RING3) 2694 /** 2695 * Handles VM exits. 2696 * 2697 * @returns Strict VBox status code. 2698 * @param pVM The cross context VM structure. 2699 * @param pVCpu The cross context per CPU structure. 2700 * @param pExit The VM exit information to handle. 2701 * @param pCtx The register context. 2702 * @sa nemHCWinHandleMessage 2703 */ 2704 NEM_TMPL_STATIC VBOXSTRICTRC nemR3WinHandleExit(PVM pVM, PVMCPU pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit, PCPUMCTX pCtx) 2705 { 2706 switch (pExit->ExitReason) 2707 { 2708 case WHvRunVpExitReasonMemoryAccess: 2709 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitMemUnmapped); 2710 return nemR3WinHandleExitMemory(pVM, pVCpu, pExit, pCtx); 2711 2712 case WHvRunVpExitReasonX64IoPortAccess: 2713 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitPortIo); 2714 return nemR3WinHandleExitIoPort(pVM, pVCpu, pExit, pCtx); 2715 2716 case WHvRunVpExitReasonX64Halt: 2717 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitHalt); 2718 Log4(("HaltExit\n")); 2719 return VINF_EM_HALT; 2720 2721 case WHvRunVpExitReasonCanceled: 2722 return VINF_SUCCESS; 2723 2724 case WHvRunVpExitReasonX64InterruptWindow: 2725 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitInterruptWindow); 2726 return nemR3WinHandleExitInterruptWindow(pVM, pVCpu, pExit, pCtx); 2727 2728 case WHvRunVpExitReasonX64Cpuid: 2729 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitCpuId); 2730 return nemR3WinHandleExitCpuId(pVM, pVCpu, pExit, pCtx); 2731 2732 case WHvRunVpExitReasonX64MsrAccess: 2733 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitMsr); 2734 return nemR3WinHandleExitMsr(pVM, pVCpu, pExit, pCtx); 2735 2736 case WHvRunVpExitReasonUnrecoverableException: 2737 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitUnrecoverable); 2738 return nemR3WinHandleExitUnrecoverableException(pVM, pVCpu, pExit, pCtx); 2739 2740 case WHvRunVpExitReasonException: /* needs configuring */ 2741 case WHvRunVpExitReasonUnsupportedFeature: 2742 case WHvRunVpExitReasonInvalidVpRegisterValue: 2743 LogRel(("Unimplemented exit:\n%.*Rhxd\n", (int)sizeof(*pExit), pExit)); 2744 AssertLogRelMsgFailedReturn(("Unexpected exit on CPU #%u: %#x\n%.32Rhxd\n", 2745 pVCpu->idCpu, pExit->ExitReason, pExit), VERR_NEM_IPE_3); 2746 2747 /* Undesired exits: */ 2748 case WHvRunVpExitReasonNone: 2749 default: 2750 LogRel(("Unknown exit:\n%.*Rhxd\n", (int)sizeof(*pExit), pExit)); 2751 AssertLogRelMsgFailedReturn(("Unknown exit on CPU #%u: %#x!\n", pVCpu->idCpu, pExit->ExitReason), VERR_NEM_IPE_3); 2752 } 2753 } 2754 #endif /* IN_RING3 && !NEM_WIN_USE_OUR_OWN_RUN_API */ 2755 2756 #ifdef NEM_WIN_USE_OUR_OWN_RUN_API 2169 2757 /** 2170 2758 * Worker for nemHCWinRunGC that stops the execution on the way out. … … 2219 2807 # ifdef IN_RING0 2220 2808 AssertLogRelMsgReturn(rcNt == ERROR_VID_STOP_PENDING, ("rcNt=%#x\n", rcNt), 2221 RT_SUCCESS(rcStrict) ? VERR_ INTERNAL_ERROR_3: rcStrict);2809 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict); 2222 2810 # else 2223 2811 DWORD dwErr = RTNtLastErrorValue(); 2224 2812 AssertLogRelMsgReturn(dwErr == ERROR_VID_STOP_PENDING, ("dwErr=%#u (%#x)\n", dwErr, dwErr), 2225 RT_SUCCESS(rcStrict) ? VERR_ INTERNAL_ERROR_3: rcStrict);2813 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict); 2226 2814 # endif 2227 2815 Log8(("nemHCWinStopCpu: Stopping CPU pending...\n")); … … 2241 2829 NULL, 0); 2242 2830 AssertLogRelMsgReturn(NT_SUCCESS(rcNt), ("1st VidMessageSlotHandleAndGetNext after ERROR_VID_STOP_PENDING failed: %#x\n", rcNt), 2243 RT_SUCCESS(rcStrict) ? VERR_ INTERNAL_ERROR_3: rcStrict);2831 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict); 2244 2832 # else 2245 2833 BOOL fWait = g_pfnVidMessageSlotHandleAndGetNext(pVM->nem.s.hPartitionDevice, pVCpu->idCpu, 2246 2834 VID_MSHAGN_F_GET_NEXT_MESSAGE, 30000 /*ms*/); 2247 2835 AssertLogRelMsgReturn(fWait, ("1st VidMessageSlotHandleAndGetNext after ERROR_VID_STOP_PENDING failed: %u\n", RTNtLastErrorValue()), 2248 RT_SUCCESS(rcStrict) ? VERR_ INTERNAL_ERROR_3: rcStrict);2836 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict); 2249 2837 # endif 2250 2838 … … 2254 2842 ("Unexpected 1st message following ERROR_VID_STOP_PENDING: %#x LB %#x\n", 2255 2843 enmVidMsgType, pMappingHeader->cbMessage), 2256 RT_SUCCESS(rcStrict) ? VERR_ INTERNAL_ERROR_3: rcStrict);2844 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict); 2257 2845 2258 2846 VBOXSTRICTRC rcStrict2 = nemHCWinHandleMessage(pVM, pVCpu, pMappingHeader, CPUMQueryGuestCtxPtr(pVCpu), pGVCpu); … … 2273 2861 NULL, 0); 2274 2862 AssertLogRelMsgReturn(NT_SUCCESS(rcNt), ("2st VidMessageSlotHandleAndGetNext after ERROR_VID_STOP_PENDING failed: %#x\n", rcNt), 2275 RT_SUCCESS(rcStrict) ? VERR_ INTERNAL_ERROR_3: rcStrict);2863 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict); 2276 2864 # else 2277 2865 fWait = g_pfnVidMessageSlotHandleAndGetNext(pVM->nem.s.hPartitionDevice, pVCpu->idCpu, 2278 2866 VID_MSHAGN_F_HANDLE_MESSAGE | VID_MSHAGN_F_GET_NEXT_MESSAGE, 30000 /*ms*/); 2279 2867 AssertLogRelMsgReturn(fWait, ("2nd VidMessageSlotHandleAndGetNext after ERROR_VID_STOP_PENDING failed: %u\n", RTNtLastErrorValue()), 2280 RT_SUCCESS(rcStrict) ? VERR_ INTERNAL_ERROR_3: rcStrict);2868 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict); 2281 2869 # endif 2282 2870 … … 2286 2874 ("Unexpected 2nd message following ERROR_VID_STOP_PENDING: %#x LB %#x\n", 2287 2875 enmVidMsgType, pMappingHeader->cbMessage), 2288 RT_SUCCESS(rcStrict) ? VERR_ INTERNAL_ERROR_3: rcStrict);2876 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict); 2289 2877 2290 2878 /* Mark this as handled. */ … … 2298 2886 NULL, 0); 2299 2887 AssertLogRelMsgReturn(NT_SUCCESS(rcNt), ("3rd VidMessageSlotHandleAndGetNext after ERROR_VID_STOP_PENDING failed: %#x\n", rcNt), 2300 RT_SUCCESS(rcStrict) ? VERR_ INTERNAL_ERROR_3: rcStrict);2888 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict); 2301 2889 # else 2302 2890 fWait = g_pfnVidMessageSlotHandleAndGetNext(pVM->nem.s.hPartitionDevice, pVCpu->idCpu, VID_MSHAGN_F_HANDLE_MESSAGE, 30000 /*ms*/); 2303 2891 AssertLogRelMsgReturn(fWait, ("3rd VidMessageSlotHandleAndGetNext after ERROR_VID_STOP_PENDING failed: %u\n", RTNtLastErrorValue()), 2304 RT_SUCCESS(rcStrict) ? VERR_ INTERNAL_ERROR_3: rcStrict);2892 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict); 2305 2893 # endif 2306 2894 Log8(("nemHCWinStopCpu: Stopped the CPU (rcStrict=%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict) )); 2307 2895 return rcStrict; 2308 2896 } 2309 2897 #endif /* NEM_WIN_USE_OUR_OWN_RUN_API */ 2898 2899 #if defined(NEM_WIN_USE_OUR_OWN_RUN_API) || defined(IN_RING3) 2310 2900 2311 2901 /** … … 2319 2909 * @param pfInterruptWindows Where to return interrupt window flags. 2320 2910 */ 2321 NEM_TMPL_STATIC VBOXSTRICTRC nemHCWinHandleInterruptFF(PVM pVM, PVMCPU pVCpu, PGVMCPU pGVCpu, PCPUMCTX pCtx, uint8_t *pfInterruptWindows) 2911 NEM_TMPL_STATIC VBOXSTRICTRC 2912 nemHCWinHandleInterruptFF(PVM pVM, PVMCPU pVCpu, PGVMCPU pGVCpu, PCPUMCTX pCtx, uint8_t *pfInterruptWindows) 2322 2913 { 2323 2914 Assert(!TRPMHasTrap(pVCpu)); … … 2338 2929 * We don't currently implement SMIs. 2339 2930 */ 2340 AssertReturn(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_SMI), V INF_NEM_IPE_0);2931 AssertReturn(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_SMI), VERR_NEM_IPE_0); 2341 2932 2342 2933 /* … … 2414 3005 2415 3006 3007 /** 3008 * Inner NEM runloop for windows. 3009 * 3010 * @returns Strict VBox status code. 3011 * @param pVM The cross context VM structure. 3012 * @param pVCpu The cross context per CPU structure. 3013 * @param pGVM The ring-0 VM structure (NULL in ring-3). 3014 * @param pGVCpu The ring-0 per CPU structure (NULL in ring-3). 3015 */ 2416 3016 NEM_TMPL_STATIC VBOXSTRICTRC nemHCWinRunGC(PVM pVM, PVMCPU pVCpu, PGVM pGVM, PGVMCPU pGVCpu) 2417 3017 { … … 2441 3041 * everything every time. This will be optimized later. 2442 3042 */ 3043 # ifdef NEM_WIN_USE_OUR_OWN_RUN_API 2443 3044 VID_MESSAGE_MAPPING_HEADER volatile *pMappingHeader = (VID_MESSAGE_MAPPING_HEADER volatile *)pVCpu->nem.s.pvMsgSlotMapping; 2444 3045 uint32_t cMillies = 5000; /** @todo lower this later... */ 3046 # endif 2445 3047 const bool fSingleStepping = DBGFIsStepping(pVCpu); 2446 3048 // const uint32_t fCheckVmFFs = !fSingleStepping ? VM_FF_HP_R0_PRE_HM_MASK … … 2450 3052 for (unsigned iLoop = 0;; iLoop++) 2451 3053 { 3054 # ifndef NEM_WIN_USE_HYPERCALLS_FOR_PAGES 3055 /* 3056 * Hack alert! 3057 */ 3058 uint32_t const cMappedPages = pVM->nem.s.cMappedPages; 3059 if (cMappedPages >= 4000) 3060 { 3061 PGMPhysNemEnumPagesByState(pVM, pVCpu, NEM_WIN_PAGE_STATE_READABLE, nemR3WinWHvUnmapOnePageCallback, NULL); 3062 Log(("nemHCWinRunGC: Unmapped all; cMappedPages=%u -> %u\n", cMappedPages, pVM->nem.s.cMappedPages)); 3063 } 3064 # endif 3065 2452 3066 /* 2453 3067 * Pending interrupts or such? Need to check and deal with this prior … … 2458 3072 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI)) 2459 3073 { 3074 # ifdef NEM_WIN_USE_OUR_OWN_RUN_API 2460 3075 /* Make sure the CPU isn't executing. */ 2461 3076 if (pVCpu->nem.s.fHandleAndGetFlags == VID_MSHAGN_F_GET_NEXT_MESSAGE) … … 2472 3087 } 2473 3088 } 3089 # endif 2474 3090 2475 3091 /* Try inject interrupt. */ … … 2495 3111 || pVCpu->nem.s.fCurrentInterruptWindows != pVCpu->nem.s.fDesiredInterruptWindows) 2496 3112 { 3113 # ifdef NEM_WIN_USE_OUR_OWN_RUN_API 2497 3114 Assert(pVCpu->nem.s.fHandleAndGetFlags != VID_MSHAGN_F_GET_NEXT_MESSAGE /* not running */); 3115 # endif 2498 3116 # ifdef IN_RING0 2499 3117 int rc2 = nemR0WinExportState(pGVM, pGVCpu, pCtx); … … 2511 3129 && !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK)) 2512 3130 { 3131 # ifdef NEM_WIN_USE_OUR_OWN_RUN_API 2513 3132 if (pVCpu->nem.s.fHandleAndGetFlags) 2514 3133 { /* Very likely that the CPU does NOT need starting (pending msg, running). */ } 2515 3134 else 2516 3135 { 2517 # ifdef IN_RING03136 # ifdef IN_RING0 2518 3137 pVCpu->nem.s.uIoCtlBuf.idCpu = pGVCpu->idCpu; 2519 3138 NTSTATUS rcNt = nemR0NtPerformIoControl(pGVM, pGVM->nem.s.IoCtlStartVirtualProcessor.uFunction, … … 2522 3141 LogFlow(("NEM/%u: IoCtlStartVirtualProcessor -> %#x\n", pVCpu->idCpu, rcNt)); 2523 3142 AssertLogRelMsgReturn(NT_SUCCESS(rcNt), ("VidStartVirtualProcessor failed for CPU #%u: %#x\n", pGVCpu->idCpu, rcNt), 2524 VERR_ INTERNAL_ERROR_3);2525 # else3143 VERR_NEM_IPE_5); 3144 # else 2526 3145 AssertLogRelMsgReturn(g_pfnVidStartVirtualProcessor(pVM->nem.s.hPartitionDevice, pVCpu->idCpu), 2527 3146 ("VidStartVirtualProcessor failed for CPU #%u: %u (%#x, rcNt=%#x)\n", 2528 3147 pVCpu->idCpu, RTNtLastErrorValue(), RTNtLastErrorValue(), RTNtLastStatusValue()), 2529 VERR_ INTERNAL_ERROR_3);2530 # endif3148 VERR_NEM_IPE_5); 3149 # endif 2531 3150 pVCpu->nem.s.fHandleAndGetFlags = VID_MSHAGN_F_GET_NEXT_MESSAGE; 2532 3151 } 3152 # endif /* NEM_WIN_USE_OUR_OWN_RUN_API */ 2533 3153 2534 3154 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM_WAIT, VMCPUSTATE_STARTED_EXEC_NEM)) 2535 3155 { 2536 # ifdef IN_RING0 3156 # ifdef NEM_WIN_USE_OUR_OWN_RUN_API 3157 # ifdef IN_RING0 2537 3158 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.iCpu = pGVCpu->idCpu; 2538 3159 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.fFlags = pVCpu->nem.s.fHandleAndGetFlags; … … 2544 3165 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_WAIT); 2545 3166 if (rcNt == STATUS_SUCCESS) 2546 # else3167 # else 2547 3168 BOOL fRet = VidMessageSlotHandleAndGetNext(pVM->nem.s.hPartitionDevice, pVCpu->idCpu, 2548 3169 pVCpu->nem.s.fHandleAndGetFlags, cMillies); 2549 3170 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_WAIT); 2550 3171 if (fRet) 3172 # endif 3173 # else 3174 WHV_RUN_VP_EXIT_CONTEXT ExitReason; 3175 RT_ZERO(ExitReason); 3176 HRESULT hrc = WHvRunVirtualProcessor(pVM->nem.s.hPartition, pVCpu->idCpu, &ExitReason, sizeof(ExitReason)); 3177 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_WAIT); 3178 if (SUCCEEDED(hrc)) 2551 3179 # endif 2552 3180 { … … 2554 3182 * Deal with the message. 2555 3183 */ 3184 # ifdef NEM_WIN_USE_OUR_OWN_RUN_API 2556 3185 rcStrict = nemHCWinHandleMessage(pVM, pVCpu, pMappingHeader, pCtx, pGVCpu); 2557 3186 pVCpu->nem.s.fHandleAndGetFlags |= VID_MSHAGN_F_HANDLE_MESSAGE; 3187 # else 3188 rcStrict = nemR3WinHandleExit(pVM, pVCpu, &ExitReason, pCtx); 3189 # endif 2558 3190 if (rcStrict == VINF_SUCCESS) 2559 3191 { /* hopefully likely */ } … … 2567 3199 else 2568 3200 { 3201 # ifdef NEM_WIN_USE_OUR_OWN_RUN_API 3202 2569 3203 /* VID.SYS merges STATUS_ALERTED and STATUS_USER_APC into STATUS_TIMEOUT, 2570 3204 so after NtAlertThread we end up here with a STATUS_TIMEOUT. And yeah, 2571 3205 the error code conversion is into WAIT_XXX, i.e. NT status codes. */ 2572 # ifndef IN_RING03206 # ifndef IN_RING0 2573 3207 DWORD rcNt = GetLastError(); 2574 # endif3208 # endif 2575 3209 LogFlow(("NEM/%u: VidMessageSlotHandleAndGetNext -> %#x\n", pVCpu->idCpu, rcNt)); 2576 3210 AssertLogRelMsgReturn( rcNt == STATUS_TIMEOUT 2577 3211 || rcNt == STATUS_ALERTED /* just in case */ 2578 3212 || rcNt == STATUS_USER_APC /* ditto */ 2579 , ("VidMessageSlotHandleAndGetNext failed for CPU #%u: %#x (%u)\n", pVCpu->idCpu, rcNt, rcNt), 2580 VERR_INTERNAL_ERROR_3); 3213 , ("VidMessageSlotHandleAndGetNext failed for CPU #%u: %#x (%u)\n", 3214 pVCpu->idCpu, rcNt, rcNt), 3215 VERR_NEM_IPE_0); 2581 3216 pVCpu->nem.s.fHandleAndGetFlags = VID_MSHAGN_F_GET_NEXT_MESSAGE; 2582 3217 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatGetMsgTimeout); 3218 # else 3219 AssertLogRelMsgFailedReturn(("WHvRunVirtualProcessor failed for CPU #%u: %#x (%u)\n", 3220 pVCpu->idCpu, hrc, GetLastError()), 3221 VERR_NEM_IPE_0); 3222 3223 # endif 2583 3224 } 2584 3225 … … 2615 3256 * state and return to EM. 2616 3257 */ 3258 # ifdef NEM_WIN_USE_OUR_OWN_RUN_API 2617 3259 if (pVCpu->nem.s.fHandleAndGetFlags == VID_MSHAGN_F_GET_NEXT_MESSAGE) 2618 3260 { … … 2620 3262 rcStrict = nemHCWinStopCpu(pVM, pVCpu, rcStrict, pMappingHeader, pGVM, pGVCpu); 2621 3263 } 3264 # endif 2622 3265 2623 3266 if (!VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM)) … … 2657 3300 } 2658 3301 2659 #endif /* NEM_WIN_USE_OUR_OWN_RUN_API */ 2660 3302 #endif /* defined(NEM_WIN_USE_OUR_OWN_RUN_API) || defined(IN_RING3) */ 2661 3303 2662 3304 /** … … 2690 3332 LogRel(("nemHCWinUnsetForA20CheckerCallback/unmap: GCPhys=%RGp hrc=%Rhrc (%#x) Last=%#x/%u\n", 2691 3333 GCPhys, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())); 2692 return VERR_ INTERNAL_ERROR_2;3334 return VERR_NEM_IPE_2; 2693 3335 #endif 2694 3336 } … … 3011 3653 LogRel(("nemHCJustUnmapPageFromHyperV(%RGp): failed! hrc=%Rhrc (%#x) Last=%#x/%u\n", 3012 3654 GCPhysDst, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())); 3013 return VERR_ INTERNAL_ERROR_3;3655 return VERR_NEM_IPE_6; 3014 3656 #endif 3015 3657 } -
trunk/src/VBox/VMM/VMMR0/NEMR0Native-win.cpp
r72403 r72412 2270 2270 LogRel(("HvCallGetMemoryBalance -> %#RX64 (%#RX64 %#RX64)!!\n", 2271 2271 uResult, pOutput->PagesAvailable, pOutput->PagesInUse)); 2272 rc = V INF_NEM_IPE_0;2272 rc = VERR_NEM_IPE_0; 2273 2273 } 2274 2274 -
trunk/src/VBox/VMM/VMMR3/NEMR3Native-win.cpp
r72392 r72412 1440 1440 } 1441 1441 1442 #if ndef NEM_WIN_USE_OUR_OWN_RUN_API1442 #if 0 //ndef NEM_WIN_USE_OUR_OWN_RUN_API - migrating to NEMAllNativeTemplate-win.cpp.h */ 1443 1443 1444 1444 # ifdef LOG_ENABLED … … 1559 1559 } 1560 1560 # endif /* LOG_ENABLED */ 1561 1562 1563 /**1564 * Advances the guest RIP and clear EFLAGS.RF.1565 *1566 * This may clear VMCPU_FF_INHIBIT_INTERRUPTS.1567 *1568 * @param pVCpu The cross context virtual CPU structure.1569 * @param pCtx The CPU context to update.1570 * @param pExitCtx The exit context.1571 */1572 DECLINLINE(void) nemR3WinAdvanceGuestRipAndClearRF(PVMCPU pVCpu, PCPUMCTX pCtx, WHV_VP_EXIT_CONTEXT const *pExitCtx)1573 {1574 /* Advance the RIP. */1575 Assert(pExitCtx->InstructionLength > 0 && pExitCtx->InstructionLength < 16);1576 pCtx->rip += pExitCtx->InstructionLength;1577 pCtx->rflags.Bits.u1RF = 0;1578 1579 /* Update interrupt inhibition. */1580 if (!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))1581 { /* likely */ }1582 else if (pCtx->rip != EMGetInhibitInterruptsPC(pVCpu))1583 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);1584 }1585 1561 1586 1562 … … 2031 2007 } 2032 2008 2033 #endif /* !NEM_WIN_USE_OUR_OWN_RUN_API */2009 #endif /* !NEM_WIN_USE_OUR_OWN_RUN_API - migrating to NEMAllNativeTemplate-win.cpp.h*/ 2034 2010 2035 2011 2036 2012 VBOXSTRICTRC nemR3NativeRunGC(PVM pVM, PVMCPU pVCpu) 2037 2013 { 2038 #ifndef NEM_WIN_USE_OUR_OWN_RUN_API 2039 return nemR3WinWHvRunGC(pVM, pVCpu); 2040 #elif 0 2014 #if !defined(NEM_WIN_USE_OUR_OWN_RUN_API) || 0 2041 2015 return nemHCWinRunGC(pVM, pVCpu, NULL /*pGVM*/, NULL /*pGVCpu*/); 2042 2016 #else -
trunk/src/VBox/VMM/include/NEMInternal.h
r72392 r72412 49 49 # if defined(NEM_WIN_USE_OUR_OWN_RUN_API) && !defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS) 50 50 # error "NEM_WIN_USE_OUR_OWN_RUN_API requires NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS" 51 # endif 52 # if defined(NEM_WIN_USE_OUR_OWN_RUN_API) && !defined(NEM_WIN_USE_HYPERCALLS_FOR_PAGES) 53 # error "NEM_WIN_USE_OUR_OWN_RUN_API requires NEM_WIN_USE_HYPERCALLS_FOR_PAGES" 51 54 # endif 52 55
Note:
See TracChangeset
for help on using the changeset viewer.