Changeset 93352 in vbox for trunk/src/VBox/VMM/VMMAll/NEMAllNativeTemplate-win.cpp.h
- Timestamp:
- Jan 20, 2022 12:41:56 AM (3 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/NEMAllNativeTemplate-win.cpp.h
r93351 r93352 15 15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind. 16 16 */ 17 18 #ifndef IN_RING3 19 # error "This is ring-3 only now" 20 #endif 17 21 18 22 … … 35 39 #if 0 36 40 # define NEMWIN_NEED_GET_REGISTER 37 # if defined(IN_RING0) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS) 38 # define NEMWIN_ASSERT_MSG_REG_VAL(a_pVCpu, a_enmReg, a_Expr, a_Msg) \ 39 do { \ 40 HV_REGISTER_VALUE TmpVal; \ 41 nemHCWinGetRegister(a_pVCpu, a_enmReg, &TmpVal); \ 42 AssertMsg(a_Expr, a_Msg); \ 43 } while (0) 44 # else 45 # define NEMWIN_ASSERT_MSG_REG_VAL(a_pVCpu, a_enmReg, a_Expr, a_Msg) \ 41 # define NEMWIN_ASSERT_MSG_REG_VAL(a_pVCpu, a_enmReg, a_Expr, a_Msg) \ 46 42 do { \ 47 43 WHV_REGISTER_VALUE TmpVal; \ … … 49 45 AssertMsg(a_Expr, a_Msg); \ 50 46 } while (0) 51 # endif52 47 #else 53 48 # define NEMWIN_ASSERT_MSG_REG_VAL(a_pVCpu, a_enmReg, a_Expr, a_Msg) do { } while (0) … … 74 69 75 70 76 #ifdef IN_RING3 77 # ifndef NTDDI_WIN10_19H1 78 # define NTDDI_WIN10_19H1 0x0a000007 79 # endif 71 #ifndef NTDDI_WIN10_19H1 72 # define NTDDI_WIN10_19H1 0x0a000007 73 #endif 80 74 81 75 /** WHvRegisterPendingEvent0 was renamed to WHvRegisterPendingEvent between 82 76 * SDK 17134 and 18362. */ 83 # if WDK_NTDDI_VERSION < NTDDI_WIN10_19H1 84 # define WHvRegisterPendingEvent WHvRegisterPendingEvent0 85 # endif 77 #if WDK_NTDDI_VERSION < NTDDI_WIN10_19H1 78 # define WHvRegisterPendingEvent WHvRegisterPendingEvent0 86 79 #endif 87 80 … … 105 98 106 99 107 #ifndef IN_RING0108 109 100 NEM_TMPL_STATIC int nemHCWinCopyStateToHyperV(PVMCC pVM, PVMCPUCC pVCpu) 110 101 { … … 121 112 uintptr_t iReg = 0; 122 113 123 # 114 #define ADD_REG64(a_enmName, a_uValue) do { \ 124 115 aenmNames[iReg] = (a_enmName); \ 125 116 aValues[iReg].Reg128.High64 = 0; \ … … 127 118 iReg++; \ 128 119 } while (0) 129 # 120 #define ADD_REG128(a_enmName, a_uValueLo, a_uValueHi) do { \ 130 121 aenmNames[iReg] = (a_enmName); \ 131 122 aValues[iReg].Reg128.Low64 = (a_uValueLo); \ … … 173 164 174 165 /* Segments */ 175 # 166 #define ADD_SEG(a_enmName, a_SReg) \ 176 167 do { \ 177 168 aenmNames[iReg] = a_enmName; \ … … 406 397 /// @todo WHvRegisterPendingEvent 407 398 399 #undef ADD_REG64 400 #undef ADD_REG128 401 #undef ADD_SEG 402 408 403 /* 409 404 * Set the registers. … … 411 406 Assert(iReg < RT_ELEMENTS(aValues)); 412 407 Assert(iReg < RT_ELEMENTS(aenmNames)); 413 # 408 #ifdef NEM_WIN_INTERCEPT_NT_IO_CTLS 414 409 Log12(("Calling WHvSetVirtualProcessorRegisters(%p, %u, %p, %u, %p)\n", 415 410 pVM->nem.s.hPartition, pVCpu->idCpu, aenmNames, iReg, aValues)); 416 # 411 #endif 417 412 HRESULT hrc = WHvSetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, aenmNames, iReg, aValues); 418 413 if (SUCCEEDED(hrc)) … … 425 420 hrc, RTNtLastStatusValue(), RTNtLastErrorValue())); 426 421 return VERR_INTERNAL_ERROR; 427 428 # undef ADD_REG64429 # undef ADD_REG128430 # undef ADD_SEG431 422 } 432 423 … … 649 640 Assert(RT_ELEMENTS(aValues) >= cRegs); 650 641 Assert(RT_ELEMENTS(aenmNames) >= cRegs); 651 # 642 #ifdef NEM_WIN_INTERCEPT_NT_IO_CTLS 652 643 Log12(("Calling WHvGetVirtualProcessorRegisters(%p, %u, %p, %u, %p)\n", 653 644 pVM->nem.s.hPartition, pVCpu->idCpu, aenmNames, cRegs, aValues)); 654 # 645 #endif 655 646 HRESULT hrc = WHvGetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, aenmNames, (uint32_t)cRegs, aValues); 656 647 AssertLogRelMsgReturn(SUCCEEDED(hrc), … … 660 651 661 652 iReg = 0; 662 # 653 #define GET_REG64(a_DstVar, a_enmName) do { \ 663 654 Assert(aenmNames[iReg] == (a_enmName)); \ 664 655 (a_DstVar) = aValues[iReg].Reg64; \ 665 656 iReg++; \ 666 657 } while (0) 667 # 658 #define GET_REG64_LOG7(a_DstVar, a_enmName, a_szLogName) do { \ 668 659 Assert(aenmNames[iReg] == (a_enmName)); \ 669 660 if ((a_DstVar) != aValues[iReg].Reg64) \ … … 672 663 iReg++; \ 673 664 } while (0) 674 # 665 #define GET_REG128(a_DstVarLo, a_DstVarHi, a_enmName) do { \ 675 666 Assert(aenmNames[iReg] == a_enmName); \ 676 667 (a_DstVarLo) = aValues[iReg].Reg128.Low64; \ … … 678 669 iReg++; \ 679 670 } while (0) 680 # 671 #define GET_SEG(a_SReg, a_enmName) do { \ 681 672 Assert(aenmNames[iReg] == (a_enmName)); \ 682 673 NEM_WIN_COPY_BACK_SEG(a_SReg, aValues[iReg].Segment); \ … … 1056 1047 } 1057 1048 1058 #endif /* !IN_RING0 */1059 1060 1049 1061 1050 /** … … 1069 1058 { 1070 1059 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnDemand); 1071 1072 #ifdef IN_RING01073 RT_NOREF(pVCpu, fWhat);1074 return VERR_NOT_IMPLEMENTED;1075 #else1076 1060 return nemHCWinCopyStateFromHyperV(pVCpu->pVMR3, pVCpu, fWhat); 1077 #endif1078 1061 } 1079 1062 … … 1091 1074 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatQueryCpuTick); 1092 1075 1093 #ifdef IN_RING31094 1076 PVMCC pVM = pVCpu->CTX_SUFF(pVM); 1095 1077 VMCPU_ASSERT_EMT_RETURN(pVCpu, VERR_VM_THREAD_NOT_EMT); … … 1109 1091 *pcTicks = pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_TSC_AUX ? aValues[0].Reg64 : CPUMGetGuestTscAux(pVCpu); 1110 1092 return VINF_SUCCESS; 1111 #else /* IN_RING0 */1112 RT_NOREF(pVCpu, pcTicks, puAux);1113 return VERR_NOT_IMPLEMENTED;1114 #endif /* IN_RING0 */1115 1093 } 1116 1094 … … 1128 1106 VMM_INT_DECL(int) NEMHCResumeCpuTickOnAll(PVMCC pVM, PVMCPUCC pVCpu, uint64_t uPausedTscValue) 1129 1107 { 1130 #ifdef IN_RING01131 RT_NOREF(pVM, pVCpu, uPausedTscValue);1132 return VERR_NOT_IMPLEMENTED;1133 #else /* IN_RING3 */1134 1108 VMCPU_ASSERT_EMT_RETURN(pVCpu, VERR_VM_THREAD_NOT_EMT); 1135 1109 AssertReturn(VM_IS_NEM_ENABLED(pVM), VERR_NEM_IPE_9); … … 1167 1141 1168 1142 return VINF_SUCCESS; 1169 #endif /* IN_RING3 */1170 1143 } 1171 1144 … … 1177 1150 DECLINLINE(VID_PROCESSOR_STATUS) nemHCWinCpuGetRunningStatus(PVMCPUCC pVCpu) 1178 1151 { 1179 # ifdef IN_RING01180 NOREF(pVCpu);1181 return VidProcessorStatusUndefined;1182 # else1183 1152 RTERRVARS Saved; 1184 1153 RTErrVarsSave(&Saved); … … 1194 1163 RTErrVarsRestore(&Saved); 1195 1164 return enmCpuStatus; 1196 # endif1197 1165 } 1198 1166 #endif /* LOG_ENABLED */ 1199 1200 1201 #if defined(NEM_WIN_USE_OUR_OWN_RUN_API)1202 # ifdef IN_RING3 /* hopefully not needed in ring-0, as we'd need KETHREADs and KeAlertThread. */1203 /**1204 * Our own WHvCancelRunVirtualProcessor that can later be moved to ring-0.1205 *1206 * This is an experiment only.1207 *1208 * @returns VBox status code.1209 * @param pVM The cross context VM structure.1210 * @param pVCpu The cross context virtual CPU structure of the1211 * calling EMT.1212 */1213 NEM_TMPL_STATIC int nemHCWinCancelRunVirtualProcessor(PVMCC pVM, PVMCPUCC pVCpu)1214 {1215 /*1216 * Work the state.1217 *1218 * From the looks of things, we should let the EMT call VidStopVirtualProcessor.1219 * So, we just need to modify the state and kick the EMT if it's waiting on1220 * messages. For the latter we use QueueUserAPC / KeAlterThread.1221 */1222 for (;;)1223 {1224 VMCPUSTATE enmState = VMCPU_GET_STATE(pVCpu);1225 switch (enmState)1226 {1227 case VMCPUSTATE_STARTED_EXEC_NEM:1228 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED, VMCPUSTATE_STARTED_EXEC_NEM))1229 {1230 DBGFTRACE_CUSTOM(pVM, "VMCPUSTATE_STARTED_EXEC_NEM -> CANCELED");1231 Log8(("nemHCWinCancelRunVirtualProcessor: Switched %u to canceled state\n", pVCpu->idCpu));1232 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatCancelChangedState);1233 return VINF_SUCCESS;1234 }1235 break;1236 1237 case VMCPUSTATE_STARTED_EXEC_NEM_WAIT:1238 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED, VMCPUSTATE_STARTED_EXEC_NEM_WAIT))1239 {1240 DBGFTRACE_CUSTOM(pVM, "VMCPUSTATE_STARTED_EXEC_NEM_WAIT -> CANCELED");1241 # ifdef IN_RING01242 NTSTATUS rcNt = KeAlertThread(??);1243 DBGFTRACE_CUSTOM(pVM, "KeAlertThread -> %#x", rcNt);1244 # else1245 NTSTATUS rcNt = NtAlertThread(pVCpu->nem.s.hNativeThreadHandle);1246 DBGFTRACE_CUSTOM(pVM, "NtAlertThread -> %#x", rcNt);1247 # endif1248 Log8(("nemHCWinCancelRunVirtualProcessor: Alerted %u: %#x\n", pVCpu->idCpu, rcNt));1249 Assert(rcNt == STATUS_SUCCESS);1250 if (NT_SUCCESS(rcNt))1251 {1252 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatCancelAlertedThread);1253 return VINF_SUCCESS;1254 }1255 AssertLogRelMsgFailedReturn(("NtAlertThread failed: %#x\n", rcNt), RTErrConvertFromNtStatus(rcNt));1256 }1257 break;1258 1259 default:1260 return VINF_SUCCESS;1261 }1262 1263 ASMNopPause();1264 RT_NOREF(pVM);1265 }1266 }1267 # endif /* IN_RING3 */1268 #endif /* NEM_WIN_USE_OUR_OWN_RUN_API */1269 1167 1270 1168 … … 1320 1218 1321 1219 1322 /** Macro used by nemHCWinExecStateToLogStr and nemR3WinExecStateToLogStr. */1323 #define SWITCH_IT(a_szPrefix) \1324 do \1325 switch (u)\1326 { \1327 case 0x00: return a_szPrefix ""; \1328 case 0x01: return a_szPrefix ",Pnd"; \1329 case 0x02: return a_szPrefix ",Dbg"; \1330 case 0x03: return a_szPrefix ",Pnd,Dbg"; \1331 case 0x04: return a_szPrefix ",Shw"; \1332 case 0x05: return a_szPrefix ",Pnd,Shw"; \1333 case 0x06: return a_szPrefix ",Shw,Dbg"; \1334 case 0x07: return a_szPrefix ",Pnd,Shw,Dbg"; \1335 default: AssertFailedReturn("WTF?"); \1336 } \1337 while (0)1338 1339 #ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API1340 /**1341 * Translates the execution stat bitfield into a short log string, VID version.1342 *1343 * @returns Read-only log string.1344 * @param pMsgHdr The header which state to summarize.1345 */1346 static const char *nemHCWinExecStateToLogStr(HV_X64_INTERCEPT_MESSAGE_HEADER const *pMsgHdr)1347 {1348 unsigned u = (unsigned)pMsgHdr->ExecutionState.InterruptionPending1349 | ((unsigned)pMsgHdr->ExecutionState.DebugActive << 1)1350 | ((unsigned)pMsgHdr->ExecutionState.InterruptShadow << 2);1351 if (pMsgHdr->ExecutionState.EferLma)1352 SWITCH_IT("LM");1353 else if (pMsgHdr->ExecutionState.Cr0Pe)1354 SWITCH_IT("PM");1355 else1356 SWITCH_IT("RM");1357 }1358 #elif defined(IN_RING3)1359 1220 /** 1360 1221 * Translates the execution stat bitfield into a short log string, WinHv version. … … 1368 1229 | ((unsigned)pExitCtx->ExecutionState.DebugActive << 1) 1369 1230 | ((unsigned)pExitCtx->ExecutionState.InterruptShadow << 2); 1231 #define SWITCH_IT(a_szPrefix) \ 1232 do \ 1233 switch (u)\ 1234 { \ 1235 case 0x00: return a_szPrefix ""; \ 1236 case 0x01: return a_szPrefix ",Pnd"; \ 1237 case 0x02: return a_szPrefix ",Dbg"; \ 1238 case 0x03: return a_szPrefix ",Pnd,Dbg"; \ 1239 case 0x04: return a_szPrefix ",Shw"; \ 1240 case 0x05: return a_szPrefix ",Pnd,Shw"; \ 1241 case 0x06: return a_szPrefix ",Shw,Dbg"; \ 1242 case 0x07: return a_szPrefix ",Pnd,Shw,Dbg"; \ 1243 default: AssertFailedReturn("WTF?"); \ 1244 } \ 1245 while (0) 1370 1246 if (pExitCtx->ExecutionState.EferLma) 1371 1247 SWITCH_IT("LM"); … … 1374 1250 else 1375 1251 SWITCH_IT("RM"); 1376 }1377 #endif /* IN_RING3 && !NEM_WIN_TEMPLATE_MODE_OWN_RUN_API */1378 1252 #undef SWITCH_IT 1379 1380 1381 #ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API 1382 /** 1383 * Advances the guest RIP and clear EFLAGS.RF, VID version. 1384 * 1385 * This may clear VMCPU_FF_INHIBIT_INTERRUPTS. 1386 * 1387 * @param pVCpu The cross context virtual CPU structure. 1388 * @param pExitCtx The exit context. 1389 * @param cbMinInstr The minimum instruction length, or 1 if not unknown. 1390 */ 1391 DECLINLINE(void) 1392 nemHCWinAdvanceGuestRipAndClearRF(PVMCPUCC pVCpu, HV_X64_INTERCEPT_MESSAGE_HEADER const *pMsgHdr, uint8_t cbMinInstr) 1393 { 1394 Assert(!(pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS))); 1395 1396 /* Advance the RIP. */ 1397 Assert(pMsgHdr->InstructionLength >= cbMinInstr); RT_NOREF_PV(cbMinInstr); 1398 pVCpu->cpum.GstCtx.rip += pMsgHdr->InstructionLength; 1399 pVCpu->cpum.GstCtx.rflags.Bits.u1RF = 0; 1400 1401 /* Update interrupt inhibition. */ 1402 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) 1403 { /* likely */ } 1404 else if (pVCpu->cpum.GstCtx.rip != EMGetInhibitInterruptsPC(pVCpu)) 1405 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); 1406 } 1407 #elif defined(IN_RING3) 1253 } 1254 1255 1408 1256 /** 1409 1257 * Advances the guest RIP and clear EFLAGS.RF, WinHv version. … … 1430 1278 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); 1431 1279 } 1432 #endif /* IN_RING3 && !NEM_WIN_TEMPLATE_MODE_OWN_RUN_API */ 1433 1434 #if defined(IN_RING3) 1280 1435 1281 1436 1282 NEM_TMPL_STATIC DECLCALLBACK(int) … … 1491 1337 uint8_t u2State = pInfo->u2NemState; 1492 1338 RTGCPHYS GCPhysSrc; 1493 # 1339 #ifdef NEM_WIN_WITH_A20 1494 1340 if ( pVM->nem.s.fA20Enabled 1495 1341 || !NEM_WIN_IS_SUBJECT_TO_A20(GCPhys)) 1496 # 1342 #endif 1497 1343 GCPhysSrc = GCPhys; 1498 # 1344 #ifdef NEM_WIN_WITH_A20 1499 1345 else 1500 1346 { … … 1507 1353 pInfo->u2NemState = u2State; 1508 1354 } 1509 # 1355 #endif 1510 1356 1511 1357 /* … … 1603 1449 } 1604 1450 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPageFailed); 1605 # 1451 #if defined(VBOX_WITH_PGM_NEM_MODE) 1606 1452 LogRel(("nemHCWinHandleMemoryAccessPageCheckerCallback/unmap: GCPhysDst=%RGp %s hrc=%Rhrc (%#x)\n", 1607 1453 GCPhys, g_apszPageStates[u2State], hrc, hrc)); 1608 1454 return VERR_NEM_UNMAP_PAGES_FAILED; 1609 # 1455 #else 1610 1456 LogRel(("nemHCWinHandleMemoryAccessPageCheckerCallback/unmap: GCPhysDst=%RGp %s hrc=%Rhrc (%#x) Last=%#x/%u (cMappedPages=%u)\n", 1611 1457 GCPhys, g_apszPageStates[u2State], hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue(), … … 1620 1466 pInfo->u2NemState = NEM_WIN_PAGE_STATE_UNMAPPED; 1621 1467 return VINF_SUCCESS; 1622 # endif 1623 } 1624 1625 #endif /* defined(IN_RING3) */ 1626 1627 1628 #if defined(IN_RING0) && defined(NEM_WIN_TEMPLATE_MODE_OWN_RUN_API) 1468 #endif 1469 } 1470 1471 1629 1472 /** 1630 * Wrapper around nemR0WinImportState that converts VERR_NEM_FLUSH_TLB 1631 * into informational status codes and logs+asserts statuses. 1632 * 1633 * @returns VBox strict status code. 1634 * @param pGVM The global (ring-0) VM structure. 1635 * @param pGVCpu The global (ring-0) per CPU structure. 1636 * @param fWhat What to import. 1637 * @param pszCaller Who is doing the importing. 1638 */ 1639 DECLINLINE(VBOXSTRICTRC) nemR0WinImportStateStrict(PGVM pGVM, PGVMCPU pGVCpu, uint64_t fWhat, const char *pszCaller) 1640 { 1641 int rc = nemR0WinImportState(pGVM, pGVCpu, &pGVCpu->cpum.GstCtx, fWhat, true /*fCanUpdateCr3*/); 1642 if (RT_SUCCESS(rc)) 1643 { 1644 Assert(rc == VINF_SUCCESS); 1645 return VINF_SUCCESS; 1646 } 1647 1648 if (rc == VERR_NEM_FLUSH_TLB) 1649 { 1650 Log4(("%s/%u: nemR0WinImportState -> %Rrc\n", pszCaller, pGVCpu->idCpu, -rc)); 1651 return -rc; 1652 } 1653 RT_NOREF(pszCaller); 1654 AssertMsgFailedReturn(("%s/%u: nemR0WinImportState failed: %Rrc\n", pszCaller, pGVCpu->idCpu, rc), rc); 1655 } 1656 #endif /* IN_RING0 && NEM_WIN_TEMPLATE_MODE_OWN_RUN_API*/ 1657 1658 #if defined(NEM_WIN_TEMPLATE_MODE_OWN_RUN_API) || defined(IN_RING3) 1659 /** 1660 * Wrapper around nemR0WinImportStateStrict and nemHCWinCopyStateFromHyperV. 1473 * Wrapper around nemHCWinCopyStateFromHyperV. 1661 1474 * 1662 1475 * Unlike the wrapped APIs, this checks whether it's necessary. … … 1671 1484 if (pVCpu->cpum.GstCtx.fExtrn & fWhat) 1672 1485 { 1673 # ifdef IN_RING01674 return nemR0WinImportStateStrict(pVCpu->pGVM, pVCpu, fWhat, pszCaller);1675 # else1676 1486 RT_NOREF(pszCaller); 1677 1487 int rc = nemHCWinCopyStateFromHyperV(pVCpu->pVMR3, pVCpu, fWhat); 1678 1488 AssertRCReturn(rc, rc); 1679 # endif1680 1489 } 1681 1490 return VINF_SUCCESS; 1682 1491 } 1683 #endif /* NEM_WIN_TEMPLATE_MODE_OWN_RUN_API || IN_RING3 */ 1684 1685 #ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API 1686 /** 1687 * Copies register state from the X64 intercept message header. 1688 * 1689 * ASSUMES no state copied yet. 1690 * 1691 * @param pVCpu The cross context per CPU structure. 1692 * @param pHdr The X64 intercept message header. 1693 * @sa nemR3WinCopyStateFromX64Header 1694 */ 1695 DECLINLINE(void) nemHCWinCopyStateFromX64Header(PVMCPUCC pVCpu, HV_X64_INTERCEPT_MESSAGE_HEADER const *pHdr) 1696 { 1697 Assert( (pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_INHIBIT_INT)) 1698 == (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_INHIBIT_INT)); 1699 NEM_WIN_COPY_BACK_SEG(pVCpu->cpum.GstCtx.cs, pHdr->CsSegment); 1700 pVCpu->cpum.GstCtx.rip = pHdr->Rip; 1701 pVCpu->cpum.GstCtx.rflags.u = pHdr->Rflags; 1702 1703 pVCpu->nem.s.fLastInterruptShadow = pHdr->ExecutionState.InterruptShadow; 1704 if (!pHdr->ExecutionState.InterruptShadow) 1705 { 1706 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) 1707 { /* likely */ } 1708 else 1709 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); 1710 } 1711 else 1712 EMSetInhibitInterruptsPC(pVCpu, pHdr->Rip); 1713 1714 APICSetTpr(pVCpu, pHdr->Cr8 << 4); 1715 1716 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_APIC_TPR); 1717 } 1718 #elif defined(IN_RING3) 1492 1493 1719 1494 /** 1720 1495 * Copies register state from the (common) exit context. … … 1749 1524 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_APIC_TPR); 1750 1525 } 1751 #endif /* IN_RING3 && !NEM_WIN_TEMPLATE_MODE_OWN_RUN_API */ 1752 1753 1754 #ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API 1755 /** 1756 * Deals with memory intercept message. 1757 * 1758 * @returns Strict VBox status code. 1759 * @param pVM The cross context VM structure. 1760 * @param pVCpu The cross context per CPU structure. 1761 * @param pMsg The message. 1762 * @sa nemR3WinHandleExitMemory 1763 */ 1764 NEM_TMPL_STATIC VBOXSTRICTRC 1765 nemHCWinHandleMessageMemory(PVMCC pVM, PVMCPUCC pVCpu, HV_X64_MEMORY_INTERCEPT_MESSAGE const *pMsg) 1766 { 1767 uint64_t const uHostTsc = ASMReadTSC(); 1768 Assert( pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_READ 1769 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE 1770 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_EXECUTE); 1771 1772 /* 1773 * Whatever we do, we must clear pending event injection upon resume. 1774 */ 1775 if (pMsg->Header.ExecutionState.InterruptionPending) 1776 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT; 1777 1778 # if 0 /* Experiment: 20K -> 34K exit/s. */ 1779 if ( pMsg->Header.ExecutionState.EferLma 1780 && pMsg->Header.CsSegment.Long 1781 && pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE) 1782 { 1783 if ( pMsg->Header.Rip - (uint64_t)0xf65a < (uint64_t)(0xf662 - 0xf65a) 1784 && pMsg->InstructionBytes[0] == 0x89 1785 && pMsg->InstructionBytes[1] == 0x03) 1786 { 1787 pVCpu->cpum.GstCtx.rip = pMsg->Header.Rip + 2; 1788 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RIP; 1789 AssertMsg(pMsg->Header.InstructionLength == 2, ("%#x\n", pMsg->Header.InstructionLength)); 1790 //Log(("%RX64 msg:\n%.80Rhxd\n", pVCpu->cpum.GstCtx.rip, pMsg)); 1791 return VINF_SUCCESS; 1792 } 1793 } 1794 # endif 1795 1796 /* 1797 * Ask PGM for information about the given GCPhys. We need to check if we're 1798 * out of sync first. 1799 */ 1800 NEMHCWINHMACPCCSTATE State = { pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE, false, false }; 1801 PGMPHYSNEMPAGEINFO Info; 1802 int rc = PGMPhysNemPageInfoChecker(pVM, pVCpu, pMsg->GuestPhysicalAddress, State.fWriteAccess, &Info, 1803 nemHCWinHandleMemoryAccessPageCheckerCallback, &State); 1804 if (RT_SUCCESS(rc)) 1805 { 1806 if (Info.fNemProt & ( pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE 1807 ? NEM_PAGE_PROT_WRITE : NEM_PAGE_PROT_READ)) 1808 { 1809 if (State.fCanResume) 1810 { 1811 Log4(("MemExit/%u: %04x:%08RX64/%s: %RGp (=>%RHp) %s fProt=%u%s%s%s; restarting (%s)\n", 1812 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header), 1813 pMsg->GuestPhysicalAddress, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt, 1814 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "", 1815 State.fDidSomething ? "" : " no-change", g_apszHvInterceptAccessTypes[pMsg->Header.InterceptAccessType])); 1816 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_MEMORY_ACCESS), 1817 pMsg->Header.Rip + pMsg->Header.CsSegment.Base, uHostTsc); 1818 return VINF_SUCCESS; 1819 } 1820 } 1821 Log4(("MemExit/%u: %04x:%08RX64/%s: %RGp (=>%RHp) %s fProt=%u%s%s%s; emulating (%s)\n", 1822 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header), 1823 pMsg->GuestPhysicalAddress, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt, 1824 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "", 1825 State.fDidSomething ? "" : " no-change", g_apszHvInterceptAccessTypes[pMsg->Header.InterceptAccessType])); 1826 } 1827 else 1828 Log4(("MemExit/%u: %04x:%08RX64/%s: %RGp rc=%Rrc%s; emulating (%s)\n", 1829 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header), 1830 pMsg->GuestPhysicalAddress, rc, State.fDidSomething ? " modified-backing" : "", 1831 g_apszHvInterceptAccessTypes[pMsg->Header.InterceptAccessType])); 1832 1833 /* 1834 * Emulate the memory access, either access handler or special memory. 1835 */ 1836 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu, 1837 pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE 1838 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_WRITE) 1839 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_READ), 1840 pMsg->Header.Rip + pMsg->Header.CsSegment.Base, uHostTsc); 1841 nemHCWinCopyStateFromX64Header(pVCpu, &pMsg->Header); 1842 VBOXSTRICTRC rcStrict; 1843 # ifdef IN_RING0 1844 rcStrict = nemR0WinImportStateStrict(pVM, pVCpu, 1845 NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_DS | CPUMCTX_EXTRN_ES, "MemExit"); 1846 if (rcStrict != VINF_SUCCESS) 1847 return rcStrict; 1848 # else 1849 rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_DS | CPUMCTX_EXTRN_ES); 1850 AssertRCReturn(rc, rc); 1851 # endif 1852 1853 if (pMsg->Reserved1) 1854 Log(("MemExit/Reserved1=%#x\n", pMsg->Reserved1)); 1855 if (pMsg->Header.ExecutionState.Reserved0 || pMsg->Header.ExecutionState.Reserved1) 1856 Log(("MemExit/Hdr/State: Reserved0=%#x Reserved1=%#x\n", pMsg->Header.ExecutionState.Reserved0, pMsg->Header.ExecutionState.Reserved1)); 1857 1858 if (!pExitRec) 1859 { 1860 //if (pMsg->InstructionByteCount > 0) 1861 // Log4(("InstructionByteCount=%#x %.16Rhxs\n", pMsg->InstructionByteCount, pMsg->InstructionBytes)); 1862 if (pMsg->InstructionByteCount > 0) 1863 rcStrict = IEMExecOneWithPrefetchedByPC(pVCpu, CPUMCTX2CORE(&pVCpu->cpum.GstCtx), pMsg->Header.Rip, 1864 pMsg->InstructionBytes, pMsg->InstructionByteCount); 1865 else 1866 rcStrict = IEMExecOne(pVCpu); 1867 /** @todo do we need to do anything wrt debugging here? */ 1868 } 1869 else 1870 { 1871 /* Frequent access or probing. */ 1872 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0); 1873 Log4(("MemExit/%u: %04x:%08RX64/%s: EMHistoryExec -> %Rrc + %04x:%08RX64\n", 1874 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header), 1875 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip)); 1876 } 1877 return rcStrict; 1878 } 1879 #elif defined(IN_RING3) 1526 1527 1880 1528 /** 1881 1529 * Deals with memory access exits (WHvRunVpExitReasonMemoryAccess). … … 1972 1620 return rcStrict; 1973 1621 } 1974 #endif /* IN_RING3 && !NEM_WIN_TEMPLATE_MODE_OWN_RUN_API */ 1975 1976 1977 #ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API 1978 /** 1979 * Deals with I/O port intercept message. 1980 * 1981 * @returns Strict VBox status code. 1982 * @param pVM The cross context VM structure. 1983 * @param pVCpu The cross context per CPU structure. 1984 * @param pMsg The message. 1985 */ 1986 NEM_TMPL_STATIC VBOXSTRICTRC 1987 nemHCWinHandleMessageIoPort(PVMCC pVM, PVMCPUCC pVCpu, HV_X64_IO_PORT_INTERCEPT_MESSAGE const *pMsg) 1988 { 1989 /* 1990 * Assert message sanity. 1991 */ 1992 Assert( pMsg->AccessInfo.AccessSize == 1 1993 || pMsg->AccessInfo.AccessSize == 2 1994 || pMsg->AccessInfo.AccessSize == 4); 1995 Assert( pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_READ 1996 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE); 1997 NEMWIN_ASSERT_MSG_REG_SEG( pVCpu, HvX64RegisterCs, pMsg->Header.CsSegment); 1998 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRip, pMsg->Header.Rip); 1999 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRflags, pMsg->Header.Rflags); 2000 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterCr8, (uint64_t)pMsg->Header.Cr8); 2001 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRax, pMsg->Rax); 2002 if (pMsg->AccessInfo.StringOp) 2003 { 2004 NEMWIN_ASSERT_MSG_REG_SEG( pVCpu, HvX64RegisterDs, pMsg->DsSegment); 2005 NEMWIN_ASSERT_MSG_REG_SEG( pVCpu, HvX64RegisterEs, pMsg->EsSegment); 2006 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRcx, pMsg->Rcx); 2007 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRsi, pMsg->Rsi); 2008 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRdi, pMsg->Rdi); 2009 } 2010 2011 /* 2012 * Whatever we do, we must clear pending event injection upon resume. 2013 */ 2014 if (pMsg->Header.ExecutionState.InterruptionPending) 2015 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT; 2016 2017 /* 2018 * Add history first to avoid two paths doing EMHistoryExec calls. 2019 */ 2020 VBOXSTRICTRC rcStrict; 2021 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu, 2022 !pMsg->AccessInfo.StringOp 2023 ? ( pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE 2024 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_IO_PORT_WRITE) 2025 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_IO_PORT_READ)) 2026 : ( pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE 2027 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_IO_PORT_STR_WRITE) 2028 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_IO_PORT_STR_READ)), 2029 pMsg->Header.Rip + pMsg->Header.CsSegment.Base, ASMReadTSC()); 2030 if (!pExitRec) 2031 { 2032 if (!pMsg->AccessInfo.StringOp) 2033 { 2034 /* 2035 * Simple port I/O. 2036 */ 2037 static uint32_t const s_fAndMask[8] = 2038 { UINT32_MAX, UINT32_C(0xff), UINT32_C(0xffff), UINT32_MAX, UINT32_MAX, UINT32_MAX, UINT32_MAX, UINT32_MAX }; 2039 uint32_t const fAndMask = s_fAndMask[pMsg->AccessInfo.AccessSize]; 2040 2041 nemHCWinCopyStateFromX64Header(pVCpu, &pMsg->Header); 2042 if (pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE) 2043 { 2044 rcStrict = IOMIOPortWrite(pVM, pVCpu, pMsg->PortNumber, (uint32_t)pMsg->Rax & fAndMask, pMsg->AccessInfo.AccessSize); 2045 Log4(("IOExit/%u: %04x:%08RX64/%s: OUT %#x, %#x LB %u rcStrict=%Rrc\n", 2046 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header), 2047 pMsg->PortNumber, (uint32_t)pMsg->Rax & fAndMask, pMsg->AccessInfo.AccessSize, VBOXSTRICTRC_VAL(rcStrict) )); 2048 if (IOM_SUCCESS(rcStrict)) 2049 nemHCWinAdvanceGuestRipAndClearRF(pVCpu, &pMsg->Header, 1); 2050 # ifdef IN_RING0 2051 else if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE 2052 && !pVCpu->cpum.GstCtx.rflags.Bits.u1TF 2053 /** @todo check for debug breakpoints */ ) 2054 return EMRZSetPendingIoPortWrite(pVCpu, pMsg->PortNumber, pMsg->Header.InstructionLength, 2055 pMsg->AccessInfo.AccessSize, (uint32_t)pMsg->Rax & fAndMask); 2056 # endif 2057 else 2058 { 2059 pVCpu->cpum.GstCtx.rax = pMsg->Rax; 2060 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RAX; 2061 } 2062 } 2063 else 2064 { 2065 uint32_t uValue = 0; 2066 rcStrict = IOMIOPortRead(pVM, pVCpu, pMsg->PortNumber, &uValue, pMsg->AccessInfo.AccessSize); 2067 Log4(("IOExit/%u: %04x:%08RX64/%s: IN %#x LB %u -> %#x, rcStrict=%Rrc\n", 2068 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header), 2069 pMsg->PortNumber, pMsg->AccessInfo.AccessSize, uValue, VBOXSTRICTRC_VAL(rcStrict) )); 2070 if (IOM_SUCCESS(rcStrict)) 2071 { 2072 if (pMsg->AccessInfo.AccessSize != 4) 2073 pVCpu->cpum.GstCtx.rax = (pMsg->Rax & ~(uint64_t)fAndMask) | (uValue & fAndMask); 2074 else 2075 pVCpu->cpum.GstCtx.rax = uValue; 2076 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RAX; 2077 Log4(("IOExit/%u: RAX %#RX64 -> %#RX64\n", pVCpu->idCpu, pMsg->Rax, pVCpu->cpum.GstCtx.rax)); 2078 nemHCWinAdvanceGuestRipAndClearRF(pVCpu, &pMsg->Header, 1); 2079 } 2080 else 2081 { 2082 pVCpu->cpum.GstCtx.rax = pMsg->Rax; 2083 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RAX; 2084 # ifdef IN_RING0 2085 if ( rcStrict == VINF_IOM_R3_IOPORT_READ 2086 && !pVCpu->cpum.GstCtx.rflags.Bits.u1TF 2087 /** @todo check for debug breakpoints */ ) 2088 return EMRZSetPendingIoPortRead(pVCpu, pMsg->PortNumber, pMsg->Header.InstructionLength, 2089 pMsg->AccessInfo.AccessSize); 2090 # endif 2091 } 2092 } 2093 } 2094 else 2095 { 2096 /* 2097 * String port I/O. 2098 */ 2099 /** @todo Someone at Microsoft please explain how we can get the address mode 2100 * from the IoPortAccess.VpContext. CS.Attributes is only sufficient for 2101 * getting the default mode, it can always be overridden by a prefix. This 2102 * forces us to interpret the instruction from opcodes, which is suboptimal. 2103 * Both AMD-V and VT-x includes the address size in the exit info, at least on 2104 * CPUs that are reasonably new. 2105 * 2106 * Of course, it's possible this is an undocumented and we just need to do some 2107 * experiments to figure out how it's communicated. Alternatively, we can scan 2108 * the opcode bytes for possible evil prefixes. 2109 */ 2110 nemHCWinCopyStateFromX64Header(pVCpu, &pMsg->Header); 2111 pVCpu->cpum.GstCtx.fExtrn &= ~( CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RDI | CPUMCTX_EXTRN_RSI 2112 | CPUMCTX_EXTRN_DS | CPUMCTX_EXTRN_ES); 2113 NEM_WIN_COPY_BACK_SEG(pVCpu->cpum.GstCtx.ds, pMsg->DsSegment); 2114 NEM_WIN_COPY_BACK_SEG(pVCpu->cpum.GstCtx.es, pMsg->EsSegment); 2115 pVCpu->cpum.GstCtx.rax = pMsg->Rax; 2116 pVCpu->cpum.GstCtx.rcx = pMsg->Rcx; 2117 pVCpu->cpum.GstCtx.rdi = pMsg->Rdi; 2118 pVCpu->cpum.GstCtx.rsi = pMsg->Rsi; 2119 # ifdef IN_RING0 2120 rcStrict = nemR0WinImportStateStrict(pVM, pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM, "IOExit"); 2121 if (rcStrict != VINF_SUCCESS) 2122 return rcStrict; 2123 # else 2124 int rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM); 2125 AssertRCReturn(rc, rc); 2126 # endif 2127 2128 Log4(("IOExit/%u: %04x:%08RX64/%s: %s%s %#x LB %u (emulating)\n", 2129 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header), 2130 pMsg->AccessInfo.RepPrefix ? "REP " : "", 2131 pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE ? "OUTS" : "INS", 2132 pMsg->PortNumber, pMsg->AccessInfo.AccessSize )); 2133 rcStrict = IEMExecOne(pVCpu); 2134 } 2135 if (IOM_SUCCESS(rcStrict)) 2136 { 2137 /* 2138 * Do debug checks. 2139 */ 2140 if ( pMsg->Header.ExecutionState.DebugActive /** @todo Microsoft: Does DebugActive this only reflect DR7? */ 2141 || (pMsg->Header.Rflags & X86_EFL_TF) 2142 || DBGFBpIsHwIoArmed(pVM) ) 2143 { 2144 /** @todo Debugging. */ 2145 } 2146 } 2147 return rcStrict; 2148 } 2149 2150 /* 2151 * Frequent exit or something needing probing. 2152 * Get state and call EMHistoryExec. 2153 */ 2154 nemHCWinCopyStateFromX64Header(pVCpu, &pMsg->Header); 2155 if (!pMsg->AccessInfo.StringOp) 2156 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RAX; 2157 else 2158 { 2159 pVCpu->cpum.GstCtx.fExtrn &= ~( CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RDI | CPUMCTX_EXTRN_RSI 2160 | CPUMCTX_EXTRN_DS | CPUMCTX_EXTRN_ES); 2161 NEM_WIN_COPY_BACK_SEG(pVCpu->cpum.GstCtx.ds, pMsg->DsSegment); 2162 NEM_WIN_COPY_BACK_SEG(pVCpu->cpum.GstCtx.es, pMsg->EsSegment); 2163 pVCpu->cpum.GstCtx.rcx = pMsg->Rcx; 2164 pVCpu->cpum.GstCtx.rdi = pMsg->Rdi; 2165 pVCpu->cpum.GstCtx.rsi = pMsg->Rsi; 2166 } 2167 pVCpu->cpum.GstCtx.rax = pMsg->Rax; 2168 2169 # ifdef IN_RING0 2170 rcStrict = nemR0WinImportStateStrict(pVM, pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM, "IOExit"); 2171 if (rcStrict != VINF_SUCCESS) 2172 return rcStrict; 2173 # else 2174 int rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM); 2175 AssertRCReturn(rc, rc); 2176 # endif 2177 2178 Log4(("IOExit/%u: %04x:%08RX64/%s: %s%s%s %#x LB %u -> EMHistoryExec\n", 2179 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header), 2180 pMsg->AccessInfo.RepPrefix ? "REP " : "", 2181 pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE ? "OUT" : "IN", 2182 pMsg->AccessInfo.StringOp ? "S" : "", 2183 pMsg->PortNumber, pMsg->AccessInfo.AccessSize)); 2184 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0); 2185 Log4(("IOExit/%u: %04x:%08RX64/%s: EMHistoryExec -> %Rrc + %04x:%08RX64\n", 2186 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header), 2187 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip)); 2188 return rcStrict; 2189 } 2190 #elif defined(IN_RING3) 1622 1623 2191 1624 /** 2192 1625 * Deals with I/O port access exits (WHvRunVpExitReasonX64IoPortAccess). … … 2351 1784 return rcStrict; 2352 1785 } 2353 #endif /* IN_RING3 && !NEM_WIN_TEMPLATE_MODE_OWN_RUN_API */ 2354 2355 2356 #ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API 2357 /** 2358 * Deals with interrupt window message. 2359 * 2360 * @returns Strict VBox status code. 2361 * @param pVM The cross context VM structure. 2362 * @param pVCpu The cross context per CPU structure. 2363 * @param pMsg The message. 2364 * @sa nemR3WinHandleExitInterruptWindow 2365 */ 2366 NEM_TMPL_STATIC VBOXSTRICTRC 2367 nemHCWinHandleMessageInterruptWindow(PVMCC pVM, PVMCPUCC pVCpu, HV_X64_INTERRUPT_WINDOW_MESSAGE const *pMsg) 2368 { 2369 /* 2370 * Assert message sanity. 2371 */ 2372 Assert( pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_EXECUTE 2373 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_READ // READ & WRITE are probably not used here 2374 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE); 2375 AssertMsg(pMsg->Type == HvX64PendingInterrupt || pMsg->Type == HvX64PendingNmi, ("%#x\n", pMsg->Type)); 2376 2377 /* 2378 * Just copy the state we've got and handle it in the loop for now. 2379 */ 2380 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_INTTERRUPT_WINDOW), 2381 pMsg->Header.Rip + pMsg->Header.CsSegment.Base, ASMReadTSC()); 2382 2383 nemHCWinCopyStateFromX64Header(pVCpu, &pMsg->Header); 2384 Log4(("IntWinExit/%u: %04x:%08RX64/%s: %u IF=%d InterruptShadow=%d\n", 2385 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header), 2386 pMsg->Type, RT_BOOL(pMsg->Header.Rflags & X86_EFL_IF), pMsg->Header.ExecutionState.InterruptShadow)); 2387 2388 /** @todo call nemHCWinHandleInterruptFF */ 2389 RT_NOREF(pVM); 2390 return VINF_SUCCESS; 2391 } 2392 #elif defined(IN_RING3) 1786 1787 2393 1788 /** 2394 1789 * Deals with interrupt window exits (WHvRunVpExitReasonX64InterruptWindow). … … 2425 1820 return VINF_SUCCESS; 2426 1821 } 2427 #endif /* IN_RING3 && !NEM_WIN_TEMPLATE_MODE_OWN_RUN_API */ 2428 2429 2430 #ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API 2431 /** 2432 * Deals with CPUID intercept message. 2433 * 2434 * @returns Strict VBox status code. 2435 * @param pVM The cross context VM structure. 2436 * @param pVCpu The cross context per CPU structure. 2437 * @param pMsg The message. 2438 * @sa nemR3WinHandleExitCpuId 2439 */ 2440 NEM_TMPL_STATIC VBOXSTRICTRC nemHCWinHandleMessageCpuId(PVMCC pVM, PVMCPUCC pVCpu, HV_X64_CPUID_INTERCEPT_MESSAGE const *pMsg) 2441 { 2442 /* Check message register value sanity. */ 2443 NEMWIN_ASSERT_MSG_REG_SEG( pVCpu, HvX64RegisterCs, pMsg->Header.CsSegment); 2444 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRip, pMsg->Header.Rip); 2445 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRflags, pMsg->Header.Rflags); 2446 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterCr8, (uint64_t)pMsg->Header.Cr8); 2447 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRax, pMsg->Rax); 2448 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRcx, pMsg->Rcx); 2449 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRdx, pMsg->Rdx); 2450 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRbx, pMsg->Rbx); 2451 2452 /* Do exit history. */ 2453 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_CPUID), 2454 pMsg->Header.Rip + pMsg->Header.CsSegment.Base, ASMReadTSC()); 2455 if (!pExitRec) 2456 { 2457 /* 2458 * Soak up state and execute the instruction. 2459 * 2460 * Note! If this grows slightly more complicated, combine into an IEMExecDecodedCpuId 2461 * function and make everyone use it. 2462 */ 2463 /** @todo Combine implementations into IEMExecDecodedCpuId as this will 2464 * only get weirder with nested VT-x and AMD-V support. */ 2465 nemHCWinCopyStateFromX64Header(pVCpu, &pMsg->Header); 2466 2467 /* Copy in the low register values (top is always cleared). */ 2468 pVCpu->cpum.GstCtx.rax = (uint32_t)pMsg->Rax; 2469 pVCpu->cpum.GstCtx.rcx = (uint32_t)pMsg->Rcx; 2470 pVCpu->cpum.GstCtx.rdx = (uint32_t)pMsg->Rdx; 2471 pVCpu->cpum.GstCtx.rbx = (uint32_t)pMsg->Rbx; 2472 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RBX); 2473 2474 /* Get the correct values. */ 2475 CPUMGetGuestCpuId(pVCpu, pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ecx, 2476 &pVCpu->cpum.GstCtx.eax, &pVCpu->cpum.GstCtx.ebx, &pVCpu->cpum.GstCtx.ecx, &pVCpu->cpum.GstCtx.edx); 2477 2478 Log4(("CpuIdExit/%u: %04x:%08RX64/%s: rax=%08RX64 / rcx=%08RX64 / rdx=%08RX64 / rbx=%08RX64 -> %08RX32 / %08RX32 / %08RX32 / %08RX32 (hv: %08RX64 / %08RX64 / %08RX64 / %08RX64)\n", 2479 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header), 2480 pMsg->Rax, pMsg->Rcx, pMsg->Rdx, pMsg->Rbx, 2481 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.ebx, 2482 pMsg->DefaultResultRax, pMsg->DefaultResultRcx, pMsg->DefaultResultRdx, pMsg->DefaultResultRbx)); 2483 2484 /* Move RIP and we're done. */ 2485 nemHCWinAdvanceGuestRipAndClearRF(pVCpu, &pMsg->Header, 2); 2486 2487 return VINF_SUCCESS; 2488 } 2489 2490 /* 2491 * Frequent exit or something needing probing. 2492 * Get state and call EMHistoryExec. 2493 */ 2494 nemHCWinCopyStateFromX64Header(pVCpu, &pMsg->Header); 2495 pVCpu->cpum.GstCtx.rax = pMsg->Rax; 2496 pVCpu->cpum.GstCtx.rcx = pMsg->Rcx; 2497 pVCpu->cpum.GstCtx.rdx = pMsg->Rdx; 2498 pVCpu->cpum.GstCtx.rbx = pMsg->Rbx; 2499 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RBX); 2500 Log4(("CpuIdExit/%u: %04x:%08RX64/%s: rax=%08RX64 / rcx=%08RX64 / rdx=%08RX64 / rbx=%08RX64 (hv: %08RX64 / %08RX64 / %08RX64 / %08RX64) ==> EMHistoryExec\n", 2501 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header), 2502 pMsg->Rax, pMsg->Rcx, pMsg->Rdx, pMsg->Rbx, 2503 pMsg->DefaultResultRax, pMsg->DefaultResultRcx, pMsg->DefaultResultRdx, pMsg->DefaultResultRbx)); 2504 # ifdef IN_RING0 2505 VBOXSTRICTRC rcStrict = nemR0WinImportStateStrict(pVM, pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM, "CpuIdExit"); 2506 if (rcStrict != VINF_SUCCESS) 2507 return rcStrict; 2508 RT_NOREF(pVM); 2509 # else 2510 int rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM); 2511 AssertRCReturn(rc, rc); 2512 # endif 2513 VBOXSTRICTRC rcStrictExec = EMHistoryExec(pVCpu, pExitRec, 0); 2514 Log4(("CpuIdExit/%u: %04x:%08RX64/%s: EMHistoryExec -> %Rrc + %04x:%08RX64\n", 2515 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header), 2516 VBOXSTRICTRC_VAL(rcStrictExec), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip)); 2517 return rcStrictExec; 2518 } 2519 #elif defined(IN_RING3) 1822 1823 2520 1824 /** 2521 1825 * Deals with CPUID exits (WHvRunVpExitReasonX64Cpuid). … … 2590 1894 return rcStrict; 2591 1895 } 2592 #endif /* IN_RING3 && !NEM_WIN_TEMPLATE_MODE_OWN_RUN_API */ 2593 2594 2595 #ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API 2596 /** 2597 * Deals with MSR intercept message. 2598 * 2599 * @returns Strict VBox status code. 2600 * @param pVCpu The cross context per CPU structure. 2601 * @param pMsg The message. 2602 * @sa nemR3WinHandleExitMsr 2603 */ 2604 NEM_TMPL_STATIC VBOXSTRICTRC nemHCWinHandleMessageMsr(PVMCPUCC pVCpu, HV_X64_MSR_INTERCEPT_MESSAGE const *pMsg) 2605 { 2606 /* 2607 * A wee bit of sanity first. 2608 */ 2609 Assert( pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_READ 2610 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE); 2611 NEMWIN_ASSERT_MSG_REG_SEG( pVCpu, HvX64RegisterCs, pMsg->Header.CsSegment); 2612 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRip, pMsg->Header.Rip); 2613 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRflags, pMsg->Header.Rflags); 2614 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterCr8, (uint64_t)pMsg->Header.Cr8); 2615 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRax, pMsg->Rax); 2616 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRdx, pMsg->Rdx); 2617 2618 /* 2619 * Check CPL as that's common to both RDMSR and WRMSR. 2620 */ 2621 VBOXSTRICTRC rcStrict; 2622 if (pMsg->Header.ExecutionState.Cpl == 0) 2623 { 2624 /* 2625 * Get all the MSR state. Since we're getting EFER, we also need to 2626 * get CR0, CR4 and CR3. 2627 */ 2628 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu, 2629 pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE 2630 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MSR_WRITE) 2631 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MSR_READ), 2632 pMsg->Header.Rip + pMsg->Header.CsSegment.Base, ASMReadTSC()); 2633 2634 nemHCWinCopyStateFromX64Header(pVCpu, &pMsg->Header); 2635 rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, 2636 (!pExitRec ? 0 : IEM_CPUMCTX_EXTRN_MUST_MASK) 2637 | CPUMCTX_EXTRN_ALL_MSRS | CPUMCTX_EXTRN_CR0 2638 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4, 2639 "MSRs"); 2640 if (rcStrict == VINF_SUCCESS) 2641 { 2642 if (!pExitRec) 2643 { 2644 /* 2645 * Handle writes. 2646 */ 2647 if (pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE) 2648 { 2649 rcStrict = CPUMSetGuestMsr(pVCpu, pMsg->MsrNumber, RT_MAKE_U64((uint32_t)pMsg->Rax, (uint32_t)pMsg->Rdx)); 2650 Log4(("MsrExit/%u: %04x:%08RX64/%s: WRMSR %08x, %08x:%08x -> %Rrc\n", 2651 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header), 2652 pMsg->MsrNumber, (uint32_t)pMsg->Rax, (uint32_t)pMsg->Rdx, VBOXSTRICTRC_VAL(rcStrict) )); 2653 if (rcStrict == VINF_SUCCESS) 2654 { 2655 nemHCWinAdvanceGuestRipAndClearRF(pVCpu, &pMsg->Header, 2); 2656 return VINF_SUCCESS; 2657 } 2658 # ifndef IN_RING3 2659 /* move to ring-3 and handle the trap/whatever there, as we want to LogRel this. */ 2660 if (rcStrict == VERR_CPUM_RAISE_GP_0) 2661 rcStrict = VINF_CPUM_R3_MSR_WRITE; 2662 return rcStrict; 2663 # else 2664 LogRel(("MsrExit/%u: %04x:%08RX64/%s: WRMSR %08x, %08x:%08x -> %Rrc!\n", 2665 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header), 2666 pMsg->MsrNumber, (uint32_t)pMsg->Rax, (uint32_t)pMsg->Rdx, VBOXSTRICTRC_VAL(rcStrict) )); 2667 # endif 2668 } 2669 /* 2670 * Handle reads. 2671 */ 2672 else 2673 { 2674 uint64_t uValue = 0; 2675 rcStrict = CPUMQueryGuestMsr(pVCpu, pMsg->MsrNumber, &uValue); 2676 Log4(("MsrExit/%u: %04x:%08RX64/%s: RDMSR %08x -> %08RX64 / %Rrc\n", 2677 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header), 2678 pMsg->MsrNumber, uValue, VBOXSTRICTRC_VAL(rcStrict) )); 2679 if (rcStrict == VINF_SUCCESS) 2680 { 2681 pVCpu->cpum.GstCtx.rax = (uint32_t)uValue; 2682 pVCpu->cpum.GstCtx.rdx = uValue >> 32; 2683 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX); 2684 nemHCWinAdvanceGuestRipAndClearRF(pVCpu, &pMsg->Header, 2); 2685 return VINF_SUCCESS; 2686 } 2687 # ifndef IN_RING3 2688 /* move to ring-3 and handle the trap/whatever there, as we want to LogRel this. */ 2689 if (rcStrict == VERR_CPUM_RAISE_GP_0) 2690 rcStrict = VINF_CPUM_R3_MSR_READ; 2691 return rcStrict; 2692 # else 2693 LogRel(("MsrExit/%u: %04x:%08RX64/%s: RDMSR %08x -> %08RX64 / %Rrc\n", 2694 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header), 2695 pMsg->MsrNumber, uValue, VBOXSTRICTRC_VAL(rcStrict) )); 2696 # endif 2697 } 2698 } 2699 else 2700 { 2701 /* 2702 * Handle frequent exit or something needing probing. 2703 */ 2704 Log4(("MsrExit/%u: %04x:%08RX64/%s: %sMSR %#08x\n", 2705 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header), 2706 pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE ? "WR" : "RD", pMsg->MsrNumber)); 2707 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0); 2708 Log4(("MsrExit/%u: %04x:%08RX64/%s: EMHistoryExec -> %Rrc + %04x:%08RX64\n", 2709 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header), 2710 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip)); 2711 return rcStrict; 2712 } 2713 } 2714 else 2715 { 2716 LogRel(("MsrExit/%u: %04x:%08RX64/%s: %sMSR %08x -> %Rrc - msr state import\n", 2717 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header), 2718 pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE ? "WR" : "RD", 2719 pMsg->MsrNumber, VBOXSTRICTRC_VAL(rcStrict) )); 2720 return rcStrict; 2721 } 2722 } 2723 else if (pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE) 2724 Log4(("MsrExit/%u: %04x:%08RX64/%s: CPL %u -> #GP(0); WRMSR %08x, %08x:%08x\n", 2725 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header), 2726 pMsg->Header.ExecutionState.Cpl, pMsg->MsrNumber, (uint32_t)pMsg->Rax, (uint32_t)pMsg->Rdx )); 2727 else 2728 Log4(("MsrExit/%u: %04x:%08RX64/%s: CPL %u -> #GP(0); RDMSR %08x\n", 2729 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header), 2730 pMsg->Header.ExecutionState.Cpl, pMsg->MsrNumber)); 2731 2732 /* 2733 * If we get down here, we're supposed to #GP(0). 2734 */ 2735 rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_ALL_MSRS, "MSR"); 2736 if (rcStrict == VINF_SUCCESS) 2737 { 2738 rcStrict = IEMInjectTrap(pVCpu, X86_XCPT_GP, TRPM_TRAP, 0, 0, 0); 2739 if (rcStrict == VINF_IEM_RAISED_XCPT) 2740 rcStrict = VINF_SUCCESS; 2741 else if (rcStrict != VINF_SUCCESS) 2742 Log4(("MsrExit/%u: Injecting #GP(0) failed: %Rrc\n", VBOXSTRICTRC_VAL(rcStrict) )); 2743 } 2744 return rcStrict; 2745 } 2746 #elif defined(IN_RING3) 1896 1897 2747 1898 /** 2748 1899 * Deals with MSR access exits (WHvRunVpExitReasonX64MsrAccess). … … 2872 2023 return rcStrict; 2873 2024 } 2874 #endif /* IN_RING3 && !NEM_WIN_TEMPLATE_MODE_OWN_RUN_API */2875 2025 2876 2026 … … 2940 2090 2941 2091 2942 #ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API2943 /**2944 * Copies state included in a exception intercept message.2945 *2946 * @param pVCpu The cross context per CPU structure.2947 * @param pMsg The message.2948 * @param fClearXcpt Clear pending exception.2949 */2950 DECLINLINE(void)2951 nemHCWinCopyStateFromExceptionMessage(PVMCPUCC pVCpu, HV_X64_EXCEPTION_INTERCEPT_MESSAGE const *pMsg, bool fClearXcpt)2952 {2953 nemHCWinCopyStateFromX64Header(pVCpu, &pMsg->Header);2954 pVCpu->cpum.GstCtx.fExtrn &= ~( CPUMCTX_EXTRN_GPRS_MASK | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_DS2955 | (fClearXcpt ? CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT : 0) );2956 pVCpu->cpum.GstCtx.rax = pMsg->Rax;2957 pVCpu->cpum.GstCtx.rcx = pMsg->Rcx;2958 pVCpu->cpum.GstCtx.rdx = pMsg->Rdx;2959 pVCpu->cpum.GstCtx.rbx = pMsg->Rbx;2960 pVCpu->cpum.GstCtx.rsp = pMsg->Rsp;2961 pVCpu->cpum.GstCtx.rbp = pMsg->Rbp;2962 pVCpu->cpum.GstCtx.rsi = pMsg->Rsi;2963 pVCpu->cpum.GstCtx.rdi = pMsg->Rdi;2964 pVCpu->cpum.GstCtx.r8 = pMsg->R8;2965 pVCpu->cpum.GstCtx.r9 = pMsg->R9;2966 pVCpu->cpum.GstCtx.r10 = pMsg->R10;2967 pVCpu->cpum.GstCtx.r11 = pMsg->R11;2968 pVCpu->cpum.GstCtx.r12 = pMsg->R12;2969 pVCpu->cpum.GstCtx.r13 = pMsg->R13;2970 pVCpu->cpum.GstCtx.r14 = pMsg->R14;2971 pVCpu->cpum.GstCtx.r15 = pMsg->R15;2972 NEM_WIN_COPY_BACK_SEG(pVCpu->cpum.GstCtx.ds, pMsg->DsSegment);2973 NEM_WIN_COPY_BACK_SEG(pVCpu->cpum.GstCtx.ss, pMsg->SsSegment);2974 }2975 #elif defined(IN_RING3)2976 2092 /** 2977 2093 * Copies state included in a exception intercept exit. … … 2987 2103 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT; 2988 2104 } 2989 #endif /* IN_RING3 && !NEM_WIN_TEMPLATE_MODE_OWN_RUN_API */2990 2105 2991 2106 … … 3073 2188 3074 2189 3075 #ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API3076 /**3077 * Deals with exception intercept message (HvMessageTypeX64ExceptionIntercept).3078 *3079 * @returns Strict VBox status code.3080 * @param pVCpu The cross context per CPU structure.3081 * @param pMsg The message.3082 * @sa nemR3WinHandleExitMsr3083 */3084 NEM_TMPL_STATIC VBOXSTRICTRC3085 nemHCWinHandleMessageException(PVMCPUCC pVCpu, HV_X64_EXCEPTION_INTERCEPT_MESSAGE const *pMsg)3086 {3087 /*3088 * Assert sanity.3089 */3090 Assert( pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_READ3091 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE3092 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_EXECUTE);3093 NEMWIN_ASSERT_MSG_REG_SEG( pVCpu, HvX64RegisterCs, pMsg->Header.CsSegment);3094 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRip, pMsg->Header.Rip);3095 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRflags, pMsg->Header.Rflags);3096 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterCr8, (uint64_t)pMsg->Header.Cr8);3097 NEMWIN_ASSERT_MSG_REG_SEG( pVCpu, HvX64RegisterDs, pMsg->DsSegment);3098 NEMWIN_ASSERT_MSG_REG_SEG( pVCpu, HvX64RegisterSs, pMsg->SsSegment);3099 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRax, pMsg->Rax);3100 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRcx, pMsg->Rcx);3101 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRdx, pMsg->Rdx);3102 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRbx, pMsg->Rbx);3103 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRsp, pMsg->Rsp);3104 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRbp, pMsg->Rbp);3105 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRsi, pMsg->Rsi);3106 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRdi, pMsg->Rdi);3107 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterR8, pMsg->R8);3108 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterR9, pMsg->R9);3109 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterR10, pMsg->R10);3110 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterR11, pMsg->R11);3111 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterR12, pMsg->R12);3112 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterR13, pMsg->R13);3113 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterR14, pMsg->R14);3114 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterR15, pMsg->R15);3115 3116 /*3117 * Get most of the register state since we'll end up making IEM inject the3118 * event. The exception isn't normally flaged as a pending event, so duh.3119 *3120 * Note! We can optimize this later with event injection.3121 */3122 Log4(("XcptExit/%u: %04x:%08RX64/%s: %x errcd=%#x parm=%RX64\n",3123 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),3124 pMsg->ExceptionVector, pMsg->ErrorCode, pMsg->ExceptionParameter));3125 nemHCWinCopyStateFromExceptionMessage(pVCpu, pMsg, true /*fClearXcpt*/);3126 uint64_t fWhat = NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM;3127 if (pMsg->ExceptionVector == X86_XCPT_DB)3128 fWhat |= CPUMCTX_EXTRN_DR0_DR3 | CPUMCTX_EXTRN_DR7 | CPUMCTX_EXTRN_DR6;3129 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, fWhat, "Xcpt");3130 if (rcStrict != VINF_SUCCESS)3131 return rcStrict;3132 3133 /*3134 * Handle the intercept.3135 */3136 TRPMEVENT enmEvtType = TRPM_TRAP;3137 switch (pMsg->ExceptionVector)3138 {3139 /*3140 * We get undefined opcodes on VMMCALL(AMD) & VMCALL(Intel) instructions3141 * and need to turn them over to GIM.3142 *3143 * Note! We do not check fGIMTrapXcptUD here ASSUMING that GIM only wants3144 * #UD for handling non-native hypercall instructions. (IEM will3145 * decode both and let the GIM provider decide whether to accept it.)3146 */3147 case X86_XCPT_UD:3148 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionUd);3149 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_XCPT_UD),3150 pMsg->Header.Rip + pMsg->Header.CsSegment.Base, ASMReadTSC());3151 3152 if (nemHcWinIsInterestingUndefinedOpcode(pMsg->InstructionByteCount, pMsg->InstructionBytes,3153 pMsg->Header.ExecutionState.EferLma && pMsg->Header.CsSegment.Long ))3154 {3155 rcStrict = IEMExecOneWithPrefetchedByPC(pVCpu, CPUMCTX2CORE(&pVCpu->cpum.GstCtx), pMsg->Header.Rip,3156 pMsg->InstructionBytes, pMsg->InstructionByteCount);3157 Log4(("XcptExit/%u: %04x:%08RX64/%s: #UD -> emulated -> %Rrc\n",3158 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip,3159 nemHCWinExecStateToLogStr(&pMsg->Header), VBOXSTRICTRC_VAL(rcStrict) ));3160 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionUdHandled);3161 return rcStrict;3162 }3163 Log4(("XcptExit/%u: %04x:%08RX64/%s: #UD [%.*Rhxs] -> re-injected\n", pVCpu->idCpu, pMsg->Header.CsSegment.Selector,3164 pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header), pMsg->InstructionByteCount, pMsg->InstructionBytes ));3165 break;3166 3167 /*3168 * Workaround the lovely mesa driver assuming that vmsvga means vmware3169 * hypervisor and tries to log stuff to the host.3170 */3171 case X86_XCPT_GP:3172 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionGp);3173 /** @todo r=bird: Need workaround in IEM for this, right?3174 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_XCPT_GP),3175 pMsg->Header.Rip + pMsg->Header.CsSegment.Base, ASMReadTSC()); */3176 if ( !pVCpu->hm.s.fTrapXcptGpForLovelyMesaDrv3177 || !nemHcWinIsMesaDrvGp(pVCpu, &pVCpu->cpum.GstCtx, pMsg->InstructionBytes, pMsg->InstructionByteCount))3178 {3179 # if 1 /** @todo Need to emulate instruction or we get a triple fault when trying to inject the #GP... */3180 rcStrict = IEMExecOneWithPrefetchedByPC(pVCpu, CPUMCTX2CORE(&pVCpu->cpum.GstCtx), pMsg->Header.Rip,3181 pMsg->InstructionBytes, pMsg->InstructionByteCount);3182 Log4(("XcptExit/%u: %04x:%08RX64/%s: #GP -> emulated -> %Rrc\n",3183 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip,3184 nemHCWinExecStateToLogStr(&pMsg->Header), VBOXSTRICTRC_VAL(rcStrict) ));3185 return rcStrict;3186 # else3187 break;3188 # endif3189 }3190 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionGpMesa);3191 return nemHcWinHandleMesaDrvGp(pVCpu, &pVCpu->cpum.GstCtx);3192 3193 /*3194 * Filter debug exceptions.3195 */3196 case X86_XCPT_DB:3197 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionDb);3198 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_XCPT_DB),3199 pMsg->Header.Rip + pMsg->Header.CsSegment.Base, ASMReadTSC());3200 Log4(("XcptExit/%u: %04x:%08RX64/%s: #DB - TODO\n",3201 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header) ));3202 break;3203 3204 case X86_XCPT_BP:3205 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionBp);3206 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_XCPT_BP),3207 pMsg->Header.Rip + pMsg->Header.CsSegment.Base, ASMReadTSC());3208 Log4(("XcptExit/%u: %04x:%08RX64/%s: #BP - TODO - %u\n", pVCpu->idCpu, pMsg->Header.CsSegment.Selector,3209 pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header), pMsg->Header.InstructionLength));3210 enmEvtType = TRPM_SOFTWARE_INT; /* We're at the INT3 instruction, not after it. */3211 break;3212 3213 /* This shouldn't happen. */3214 default:3215 AssertLogRelMsgFailedReturn(("ExceptionVector=%#x\n", pMsg->ExceptionVector), VERR_IEM_IPE_6);3216 }3217 3218 /*3219 * Inject it.3220 */3221 rcStrict = IEMInjectTrap(pVCpu, pMsg->ExceptionVector, enmEvtType, pMsg->ErrorCode,3222 pMsg->ExceptionParameter /*??*/, pMsg->Header.InstructionLength);3223 Log4(("XcptExit/%u: %04x:%08RX64/%s: %#u -> injected -> %Rrc\n",3224 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip,3225 nemHCWinExecStateToLogStr(&pMsg->Header), pMsg->ExceptionVector, VBOXSTRICTRC_VAL(rcStrict) ));3226 return rcStrict;3227 }3228 #elif defined(IN_RING3)3229 2190 /** 3230 2191 * Deals with MSR access exits (WHvRunVpExitReasonException). … … 3304 2265 pExit->VpException.InstructionByteCount)) 3305 2266 { 3306 # 2267 #if 1 /** @todo Need to emulate instruction or we get a triple fault when trying to inject the #GP... */ 3307 2268 rcStrict = IEMExecOneWithPrefetchedByPC(pVCpu, CPUMCTX2CORE(&pVCpu->cpum.GstCtx), pExit->VpContext.Rip, 3308 2269 pExit->VpException.InstructionBytes, … … 3313 2274 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionUdHandled); 3314 2275 return rcStrict; 3315 # 2276 #else 3316 2277 break; 3317 # 2278 #endif 3318 2279 } 3319 2280 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionGpMesa); … … 3357 2318 return rcStrict; 3358 2319 } 3359 #endif /* IN_RING3 && !NEM_WIN_TEMPLATE_MODE_OWN_RUN_API */ 3360 3361 3362 #ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API 3363 /** 3364 * Deals with unrecoverable exception (triple fault). 3365 * 3366 * Seen WRMSR 0x201 (IA32_MTRR_PHYSMASK0) writes from grub / debian9 ending up 3367 * here too. So we'll leave it to IEM to decide. 3368 * 3369 * @returns Strict VBox status code. 3370 * @param pVCpu The cross context per CPU structure. 3371 * @param pMsgHdr The message header. 3372 * @sa nemR3WinHandleExitUnrecoverableException 3373 */ 3374 NEM_TMPL_STATIC VBOXSTRICTRC 3375 nemHCWinHandleMessageUnrecoverableException(PVMCPUCC pVCpu, HV_X64_INTERCEPT_MESSAGE_HEADER const *pMsgHdr) 3376 { 3377 /* Check message register value sanity. */ 3378 NEMWIN_ASSERT_MSG_REG_SEG( pVCpu, HvX64RegisterCs, pMsgHdr->CsSegment); 3379 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRip, pMsgHdr->Rip); 3380 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRflags, pMsgHdr->Rflags); 3381 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterCr8, (uint64_t)pMsgHdr->Cr8); 3382 3383 # if 0 3384 /* 3385 * Just copy the state we've got and handle it in the loop for now. 3386 */ 3387 nemHCWinCopyStateFromX64Header(pVCpu, pMsgHdr); 3388 Log(("TripleExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> VINF_EM_TRIPLE_FAULT\n", 3389 pVCpu->idCpu, pMsgHdr->CsSegment.Selector, pMsgHdr->Rip, nemHCWinExecStateToLogStr(&pMsg->Header), pMsgHdr->Rflags)); 3390 return VINF_EM_TRIPLE_FAULT; 3391 # else 3392 /* 3393 * Let IEM decide whether this is really it. 3394 */ 3395 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_UNRECOVERABLE_EXCEPTION), 3396 pMsgHdr->Rip + pMsgHdr->CsSegment.Base, ASMReadTSC()); 3397 nemHCWinCopyStateFromX64Header(pVCpu, pMsgHdr); 3398 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_ALL, "TripleExit"); 3399 if (rcStrict == VINF_SUCCESS) 3400 { 3401 rcStrict = IEMExecOne(pVCpu); 3402 if (rcStrict == VINF_SUCCESS) 3403 { 3404 Log(("UnrecovExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> VINF_SUCCESS\n", pVCpu->idCpu, pMsgHdr->CsSegment.Selector, 3405 pMsgHdr->Rip, nemHCWinExecStateToLogStr(pMsgHdr), pMsgHdr->Rflags )); 3406 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT; /* Make sure to reset pending #DB(0). */ 3407 return VINF_SUCCESS; 3408 } 3409 if (rcStrict == VINF_EM_TRIPLE_FAULT) 3410 Log(("UnrecovExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> VINF_EM_TRIPLE_FAULT!\n", pVCpu->idCpu, pMsgHdr->CsSegment.Selector, 3411 pMsgHdr->Rip, nemHCWinExecStateToLogStr(pMsgHdr), pMsgHdr->Rflags, VBOXSTRICTRC_VAL(rcStrict) )); 3412 else 3413 Log(("UnrecovExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> %Rrc (IEMExecOne)\n", pVCpu->idCpu, pMsgHdr->CsSegment.Selector, 3414 pMsgHdr->Rip, nemHCWinExecStateToLogStr(pMsgHdr), pMsgHdr->Rflags, VBOXSTRICTRC_VAL(rcStrict) )); 3415 } 3416 else 3417 Log(("UnrecovExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> %Rrc (state import)\n", pVCpu->idCpu, pMsgHdr->CsSegment.Selector, 3418 pMsgHdr->Rip, nemHCWinExecStateToLogStr(pMsgHdr), pMsgHdr->Rflags, VBOXSTRICTRC_VAL(rcStrict) )); 3419 return rcStrict; 3420 # endif 3421 } 3422 #elif defined(IN_RING3) 2320 2321 3423 2322 /** 3424 2323 * Deals with MSR access exits (WHvRunVpExitReasonUnrecoverableException). … … 3432 2331 NEM_TMPL_STATIC VBOXSTRICTRC nemR3WinHandleExitUnrecoverableException(PVMCC pVM, PVMCPUCC pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit) 3433 2332 { 3434 # 2333 #if 0 3435 2334 /* 3436 2335 * Just copy the state we've got and handle it in the loop for now. … … 3441 2340 RT_NOREF_PV(pVM); 3442 2341 return VINF_EM_TRIPLE_FAULT; 3443 # 2342 #else 3444 2343 /* 3445 2344 * Let IEM decide whether this is really it. … … 3471 2370 RT_NOREF_PV(pVM); 3472 2371 return rcStrict; 3473 # endif 3474 3475 } 3476 #endif /* IN_RING3 && !NEM_WIN_TEMPLATE_MODE_OWN_RUN_API */ 3477 3478 3479 #ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API 3480 /** 3481 * Handles messages (VM exits). 3482 * 3483 * @returns Strict VBox status code. 3484 * @param pVM The cross context VM structure. 3485 * @param pVCpu The cross context per CPU structure. 3486 * @param pMappingHeader The message slot mapping. 3487 * @sa nemR3WinHandleExit 3488 */ 3489 NEM_TMPL_STATIC VBOXSTRICTRC 3490 nemHCWinHandleMessage(PVMCC pVM, PVMCPUCC pVCpu, VID_MESSAGE_MAPPING_HEADER volatile *pMappingHeader) 3491 { 3492 if (pMappingHeader->enmVidMsgType == VidMessageHypervisorMessage) 3493 { 3494 AssertMsg(pMappingHeader->cbMessage == HV_MESSAGE_SIZE, ("%#x\n", pMappingHeader->cbMessage)); 3495 HV_MESSAGE const *pMsg = (HV_MESSAGE const *)(pMappingHeader + 1); 3496 switch (pMsg->Header.MessageType) 3497 { 3498 case HvMessageTypeUnmappedGpa: 3499 Assert(pMsg->Header.PayloadSize == RT_UOFFSETOF(HV_X64_MEMORY_INTERCEPT_MESSAGE, DsSegment)); 3500 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitMemUnmapped); 3501 return nemHCWinHandleMessageMemory(pVM, pVCpu, &pMsg->X64MemoryIntercept); 3502 3503 case HvMessageTypeGpaIntercept: 3504 Assert(pMsg->Header.PayloadSize == RT_UOFFSETOF(HV_X64_MEMORY_INTERCEPT_MESSAGE, DsSegment)); 3505 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitMemIntercept); 3506 return nemHCWinHandleMessageMemory(pVM, pVCpu, &pMsg->X64MemoryIntercept); 3507 3508 case HvMessageTypeX64IoPortIntercept: 3509 Assert(pMsg->Header.PayloadSize == sizeof(pMsg->X64IoPortIntercept)); 3510 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitPortIo); 3511 return nemHCWinHandleMessageIoPort(pVM, pVCpu, &pMsg->X64IoPortIntercept); 3512 3513 case HvMessageTypeX64Halt: 3514 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitHalt); 3515 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_HALT), 3516 pMsg->X64InterceptHeader.Rip + pMsg->X64InterceptHeader.CsSegment.Base, ASMReadTSC()); 3517 Log4(("HaltExit\n")); 3518 return VINF_EM_HALT; 3519 3520 case HvMessageTypeX64InterruptWindow: 3521 Assert(pMsg->Header.PayloadSize == sizeof(pMsg->X64InterruptWindow)); 3522 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitInterruptWindow); 3523 return nemHCWinHandleMessageInterruptWindow(pVM, pVCpu, &pMsg->X64InterruptWindow); 3524 3525 case HvMessageTypeX64CpuidIntercept: 3526 Assert(pMsg->Header.PayloadSize == sizeof(pMsg->X64CpuIdIntercept)); 3527 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitCpuId); 3528 return nemHCWinHandleMessageCpuId(pVM, pVCpu, &pMsg->X64CpuIdIntercept); 3529 3530 case HvMessageTypeX64MsrIntercept: 3531 Assert(pMsg->Header.PayloadSize == sizeof(pMsg->X64MsrIntercept)); 3532 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitMsr); 3533 return nemHCWinHandleMessageMsr(pVCpu, &pMsg->X64MsrIntercept); 3534 3535 case HvMessageTypeX64ExceptionIntercept: 3536 Assert(pMsg->Header.PayloadSize == sizeof(pMsg->X64ExceptionIntercept)); 3537 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitException); 3538 return nemHCWinHandleMessageException(pVCpu, &pMsg->X64ExceptionIntercept); 3539 3540 case HvMessageTypeUnrecoverableException: 3541 Assert(pMsg->Header.PayloadSize == sizeof(pMsg->X64InterceptHeader)); 3542 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitUnrecoverable); 3543 return nemHCWinHandleMessageUnrecoverableException(pVCpu, &pMsg->X64InterceptHeader); 3544 3545 case HvMessageTypeInvalidVpRegisterValue: 3546 case HvMessageTypeUnsupportedFeature: 3547 case HvMessageTypeTlbPageSizeMismatch: 3548 LogRel(("Unimplemented msg:\n%.*Rhxd\n", (int)sizeof(*pMsg), pMsg)); 3549 AssertLogRelMsgFailedReturn(("Message type %#x not implemented!\n%.32Rhxd\n", pMsg->Header.MessageType, pMsg), 3550 VERR_NEM_IPE_3); 3551 3552 case HvMessageTypeX64ApicEoi: 3553 case HvMessageTypeX64LegacyFpError: 3554 case HvMessageTypeX64RegisterIntercept: 3555 case HvMessageTypeApicEoi: 3556 case HvMessageTypeFerrAsserted: 3557 case HvMessageTypeEventLogBufferComplete: 3558 case HvMessageTimerExpired: 3559 LogRel(("Unexpected msg:\n%.*Rhxd\n", (int)sizeof(*pMsg), pMsg)); 3560 AssertLogRelMsgFailedReturn(("Unexpected message on CPU #%u: %#x\n", pVCpu->idCpu, pMsg->Header.MessageType), 3561 VERR_NEM_IPE_3); 3562 3563 default: 3564 LogRel(("Unknown msg:\n%.*Rhxd\n", (int)sizeof(*pMsg), pMsg)); 3565 AssertLogRelMsgFailedReturn(("Unknown message on CPU #%u: %#x\n", pVCpu->idCpu, pMsg->Header.MessageType), 3566 VERR_NEM_IPE_3); 3567 } 3568 } 3569 else 3570 AssertLogRelMsgFailedReturn(("Unexpected VID message type on CPU #%u: %#x LB %u\n", 3571 pVCpu->idCpu, pMappingHeader->enmVidMsgType, pMappingHeader->cbMessage), 3572 VERR_NEM_IPE_4); 3573 } 3574 #elif defined(IN_RING3) 2372 #endif 2373 } 2374 2375 3575 2376 /** 3576 2377 * Handles VM exits. … … 3638 2439 } 3639 2440 } 3640 #endif /* IN_RING3 && !NEM_WIN_TEMPLATE_MODE_OWN_RUN_API */ 3641 3642 3643 #ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API 3644 /** 3645 * Worker for nemHCWinRunGC that stops the execution on the way out. 3646 * 3647 * The CPU was running the last time we checked, no there are no messages that 3648 * needs being marked handled/whatever. Caller checks this. 3649 * 3650 * @returns rcStrict on success, error status on failure. 3651 * @param pVM The cross context VM structure. 3652 * @param pVCpu The cross context per CPU structure. 3653 * @param rcStrict The nemHCWinRunGC return status. This is a little 3654 * bit unnecessary, except in internal error cases, 3655 * since we won't need to stop the CPU if we took an 3656 * exit. 3657 * @param pMappingHeader The message slot mapping. 3658 */ 3659 NEM_TMPL_STATIC VBOXSTRICTRC nemHCWinStopCpu(PVMCC pVM, PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict, 3660 VID_MESSAGE_MAPPING_HEADER volatile *pMappingHeader) 3661 { 3662 # ifdef DBGFTRACE_ENABLED 3663 HV_MESSAGE const volatile *pMsgForTrace = (HV_MESSAGE const volatile *)(pMappingHeader + 1); 3664 # endif 3665 3666 /* 3667 * Try stopping the processor. If we're lucky we manage to do this before it 3668 * does another VM exit. 3669 */ 3670 DBGFTRACE_CUSTOM(pVM, "nemStop#0"); 3671 # ifdef IN_RING0 3672 pVCpu->nem.s.uIoCtlBuf.idCpu = pVCpu->idCpu; 3673 NTSTATUS rcNt = nemR0NtPerformIoControl(pVM, pVCpu, pVM->nemr0.s.IoCtlStopVirtualProcessor.uFunction, 3674 &pVCpu->nem.s.uIoCtlBuf.idCpu, sizeof(pVCpu->nem.s.uIoCtlBuf.idCpu), 3675 NULL, 0); 3676 if (NT_SUCCESS(rcNt)) 3677 { 3678 DBGFTRACE_CUSTOM(pVM, "nemStop#0: okay (%#x)", rcNt); 3679 Log8(("nemHCWinStopCpu: Stopping CPU succeeded (cpu status %u)\n", nemHCWinCpuGetRunningStatus(pVCpu) )); 3680 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatStopCpuSuccess); 3681 return rcStrict; 3682 } 3683 # else 3684 BOOL fRet = VidStopVirtualProcessor(pVM->nem.s.hPartitionDevice, pVCpu->idCpu); 3685 if (fRet) 3686 { 3687 DBGFTRACE_CUSTOM(pVM, "nemStop#0: okay"); 3688 Log8(("nemHCWinStopCpu: Stopping CPU succeeded (cpu status %u)\n", nemHCWinCpuGetRunningStatus(pVCpu) )); 3689 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatStopCpuSuccess); 3690 return rcStrict; 3691 } 3692 # endif 3693 3694 /* 3695 * Dang. The CPU stopped by itself and we got a couple of message to deal with. 3696 */ 3697 # ifdef IN_RING0 3698 DBGFTRACE_CUSTOM(pVM, "nemStop#0: pending (%#x)", rcNt); 3699 AssertLogRelMsgReturn(rcNt == ERROR_VID_STOP_PENDING, ("rcNt=%#x\n", rcNt), 3700 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict); 3701 # else 3702 DWORD dwErr = RTNtLastErrorValue(); 3703 DBGFTRACE_CUSTOM(pVM, "nemStop#0: pending (%#x)", dwErr); 3704 AssertLogRelMsgReturn(dwErr == ERROR_VID_STOP_PENDING, ("dwErr=%#u (%#x)\n", dwErr, dwErr), 3705 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict); 3706 # endif 3707 Log8(("nemHCWinStopCpu: Stopping CPU #%u pending...\n", pVCpu->idCpu)); 3708 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatStopCpuPending); 3709 3710 /* 3711 * First message: Exit or similar, sometimes VidMessageStopRequestComplete. 3712 * Note! We can safely ASSUME that rcStrict isn't an important information one. 3713 */ 3714 # ifdef IN_RING0 3715 rcNt = nemR0NtPerformIoCtlMessageSlotHandleAndGetNext(pVM, pVCpu, VID_MSHAGN_F_GET_NEXT_MESSAGE, 30000 /*ms*/); 3716 DBGFTRACE_CUSTOM(pVM, "nemStop#1: %#x / %#x %#x %#x", rcNt, pMappingHeader->enmVidMsgType, pMappingHeader->cbMessage, 3717 pMsgForTrace->Header.MessageType); 3718 AssertLogRelMsgReturn(rcNt == STATUS_SUCCESS, 3719 ("1st VidMessageSlotHandleAndGetNext after ERROR_VID_STOP_PENDING failed: %#x\n", rcNt), 3720 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict); 3721 # else 3722 BOOL fWait = g_pfnVidMessageSlotHandleAndGetNext(pVM->nem.s.hPartitionDevice, pVCpu->idCpu, 3723 VID_MSHAGN_F_GET_NEXT_MESSAGE, 30000 /*ms*/); 3724 DBGFTRACE_CUSTOM(pVM, "nemStop#1: %d+%#x / %#x %#x %#x", fWait, RTNtLastErrorValue(), pMappingHeader->enmVidMsgType, 3725 pMappingHeader->cbMessage, pMsgForTrace->Header.MessageType); 3726 AssertLogRelMsgReturn(fWait, ("1st VidMessageSlotHandleAndGetNext after ERROR_VID_STOP_PENDING failed: %u\n", RTNtLastErrorValue()), 3727 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict); 3728 # endif 3729 3730 VID_MESSAGE_TYPE enmVidMsgType = pMappingHeader->enmVidMsgType; 3731 if (enmVidMsgType != VidMessageStopRequestComplete) 3732 { 3733 VBOXSTRICTRC rcStrict2 = nemHCWinHandleMessage(pVM, pVCpu, pMappingHeader); 3734 if (rcStrict2 != VINF_SUCCESS && RT_SUCCESS(rcStrict)) 3735 rcStrict = rcStrict2; 3736 DBGFTRACE_CUSTOM(pVM, "nemStop#1: handled %#x -> %d", pMsgForTrace->Header.MessageType, VBOXSTRICTRC_VAL(rcStrict)); 3737 3738 /* 3739 * Mark it as handled and get the stop request completed message, then mark 3740 * that as handled too. CPU is back into fully stopped stated then. 3741 */ 3742 # ifdef IN_RING0 3743 rcNt = nemR0NtPerformIoCtlMessageSlotHandleAndGetNext(pVM, pVCpu, 3744 VID_MSHAGN_F_HANDLE_MESSAGE | VID_MSHAGN_F_GET_NEXT_MESSAGE, 3745 30000 /*ms*/); 3746 DBGFTRACE_CUSTOM(pVM, "nemStop#2: %#x / %#x %#x %#x", rcNt, pMappingHeader->enmVidMsgType, pMappingHeader->cbMessage, 3747 pMsgForTrace->Header.MessageType); 3748 AssertLogRelMsgReturn(rcNt == STATUS_SUCCESS, 3749 ("2nd VidMessageSlotHandleAndGetNext after ERROR_VID_STOP_PENDING failed: %#x\n", rcNt), 3750 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict); 3751 # else 3752 fWait = g_pfnVidMessageSlotHandleAndGetNext(pVM->nem.s.hPartitionDevice, pVCpu->idCpu, 3753 VID_MSHAGN_F_HANDLE_MESSAGE | VID_MSHAGN_F_GET_NEXT_MESSAGE, 30000 /*ms*/); 3754 DBGFTRACE_CUSTOM(pVM, "nemStop#2: %d+%#x / %#x %#x %#x", fWait, RTNtLastErrorValue(), pMappingHeader->enmVidMsgType, 3755 pMappingHeader->cbMessage, pMsgForTrace->Header.MessageType); 3756 AssertLogRelMsgReturn(fWait, ("2nd VidMessageSlotHandleAndGetNext after ERROR_VID_STOP_PENDING failed: %u\n", RTNtLastErrorValue()), 3757 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict); 3758 # endif 3759 3760 /* It should be a stop request completed message. */ 3761 enmVidMsgType = pMappingHeader->enmVidMsgType; 3762 AssertLogRelMsgReturn(enmVidMsgType == VidMessageStopRequestComplete, 3763 ("Unexpected 2nd message following ERROR_VID_STOP_PENDING: %#x LB %#x\n", 3764 enmVidMsgType, pMappingHeader->cbMessage), 3765 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict); 3766 3767 /* 3768 * Mark the VidMessageStopRequestComplete message as handled. 3769 */ 3770 # ifdef IN_RING0 3771 rcNt = nemR0NtPerformIoCtlMessageSlotHandleAndGetNext(pVM, pVCpu, VID_MSHAGN_F_HANDLE_MESSAGE, 30000 /*ms*/); 3772 DBGFTRACE_CUSTOM(pVM, "nemStop#3: %#x / %#x %#x %#x", rcNt, pMappingHeader->enmVidMsgType, 3773 pMsgForTrace->Header.MessageType, pMappingHeader->cbMessage, pMsgForTrace->Header.MessageType); 3774 AssertLogRelMsgReturn(rcNt == STATUS_SUCCESS, 3775 ("3rd VidMessageSlotHandleAndGetNext after ERROR_VID_STOP_PENDING failed: %#x\n", rcNt), 3776 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict); 3777 # else 3778 fWait = g_pfnVidMessageSlotHandleAndGetNext(pVM->nem.s.hPartitionDevice, pVCpu->idCpu, VID_MSHAGN_F_HANDLE_MESSAGE, 30000 /*ms*/); 3779 DBGFTRACE_CUSTOM(pVM, "nemStop#3: %d+%#x / %#x %#x %#x", fWait, RTNtLastErrorValue(), pMappingHeader->enmVidMsgType, 3780 pMsgForTrace->Header.MessageType, pMappingHeader->cbMessage, pMsgForTrace->Header.MessageType); 3781 AssertLogRelMsgReturn(fWait, ("3rd VidMessageSlotHandleAndGetNext after ERROR_VID_STOP_PENDING failed: %u\n", RTNtLastErrorValue()), 3782 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict); 3783 # endif 3784 Log8(("nemHCWinStopCpu: Stopped the CPU (rcStrict=%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict) )); 3785 } 3786 else 3787 { 3788 /** @todo I'm not so sure about this now... */ 3789 DBGFTRACE_CUSTOM(pVM, "nemStop#9: %#x %#x %#x", pMappingHeader->enmVidMsgType, 3790 pMappingHeader->cbMessage, pMsgForTrace->Header.MessageType); 3791 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatStopCpuPendingOdd); 3792 Log8(("nemHCWinStopCpu: Stopped the CPU (rcStrict=%Rrc) - 1st VidMessageSlotHandleAndGetNext got VidMessageStopRequestComplete.\n", 3793 VBOXSTRICTRC_VAL(rcStrict) )); 3794 } 3795 return rcStrict; 3796 } 3797 #endif /* NEM_WIN_TEMPLATE_MODE_OWN_RUN_API */ 3798 3799 #if defined(NEM_WIN_TEMPLATE_MODE_OWN_RUN_API) || defined(IN_RING3) 2441 3800 2442 3801 2443 /** … … 3932 2574 { 3933 2575 LogFlow(("NEM/%u: %04x:%08RX64 efl=%#08RX64 <=\n", pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags)); 3934 # 2576 #ifdef LOG_ENABLED 3935 2577 if (LogIs3Enabled()) 3936 2578 nemHCWinLogState(pVM, pVCpu); 3937 # 2579 #endif 3938 2580 3939 2581 /* … … 3955 2597 * everything every time. This will be optimized later. 3956 2598 */ 3957 # ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API3958 VID_MESSAGE_MAPPING_HEADER volatile *pMappingHeader = (VID_MESSAGE_MAPPING_HEADER volatile *)pVCpu->nem.s.pvMsgSlotMapping;3959 # endif3960 2599 const bool fSingleStepping = DBGFIsStepping(pVCpu); 3961 2600 // const uint32_t fCheckVmFFs = !fSingleStepping ? VM_FF_HP_R0_PRE_HM_MASK … … 3965 2604 for (unsigned iLoop = 0;; iLoop++) 3966 2605 { 3967 # 2606 #ifndef VBOX_WITH_PGM_NEM_MODE 3968 2607 /* 3969 2608 * Hack alert! … … 3978 2617 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapAllPages); 3979 2618 } 3980 # 2619 #endif 3981 2620 3982 2621 /* … … 3988 2627 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI)) 3989 2628 { 3990 # ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API3991 /* Make sure the CPU isn't executing. */3992 if (pVCpu->nem.s.fHandleAndGetFlags == VID_MSHAGN_F_GET_NEXT_MESSAGE)3993 {3994 pVCpu->nem.s.fHandleAndGetFlags = 0;3995 rcStrict = nemHCWinStopCpu(pVM, pVCpu, rcStrict, pMappingHeader);3996 if (rcStrict == VINF_SUCCESS)3997 { /* likely */ }3998 else3999 {4000 LogFlow(("NEM/%u: breaking: nemHCWinStopCpu -> %Rrc\n", pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) ));4001 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnStatus);4002 break;4003 }4004 }4005 # endif4006 4007 2629 /* Try inject interrupt. */ 4008 2630 rcStrict = nemHCWinHandleInterruptFF(pVM, pVCpu, &pVCpu->nem.s.fDesiredInterruptWindows); … … 4017 2639 } 4018 2640 4019 # 2641 #ifndef NEM_WIN_WITH_A20 4020 2642 /* 4021 2643 * Do not execute in hyper-V if the A20 isn't enabled. … … 4029 2651 break; 4030 2652 } 4031 # 2653 #endif 4032 2654 4033 2655 /* … … 4039 2661 != (CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK) 4040 2662 || ( ( pVCpu->nem.s.fDesiredInterruptWindows 4041 || pVCpu->nem.s.fCurrentInterruptWindows != pVCpu->nem.s.fDesiredInterruptWindows) 4042 # ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API 4043 && pVCpu->nem.s.fHandleAndGetFlags != VID_MSHAGN_F_GET_NEXT_MESSAGE /* not running */ 4044 # endif 4045 ) 4046 ) 4047 { 4048 # ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API 4049 AssertMsg(pVCpu->nem.s.fHandleAndGetFlags != VID_MSHAGN_F_GET_NEXT_MESSAGE /* not running */, 4050 ("%#x fExtrn=%#RX64 (%#RX64) fDesiredInterruptWindows=%d fCurrentInterruptWindows=%#x vs %#x\n", 4051 pVCpu->nem.s.fHandleAndGetFlags, pVCpu->cpum.GstCtx.fExtrn, ~pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK), 4052 pVCpu->nem.s.fDesiredInterruptWindows, pVCpu->nem.s.fCurrentInterruptWindows, pVCpu->nem.s.fDesiredInterruptWindows)); 4053 # endif 4054 # ifdef IN_RING0 4055 int rc2 = nemR0WinExportState(pVM, pVCpu, &pVCpu->cpum.GstCtx); 4056 # else 2663 || pVCpu->nem.s.fCurrentInterruptWindows != pVCpu->nem.s.fDesiredInterruptWindows) ) ) 2664 { 4057 2665 int rc2 = nemHCWinCopyStateToHyperV(pVM, pVCpu); 4058 # endif4059 2666 AssertRCReturn(rc2, rc2); 4060 2667 } … … 4074 2681 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HM_TO_R3_MASK)) 4075 2682 { 4076 # ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API4077 if (pVCpu->nem.s.fHandleAndGetFlags)4078 { /* Very likely that the CPU does NOT need starting (pending msg, running). */ }4079 else4080 {4081 # ifdef IN_RING04082 pVCpu->nem.s.uIoCtlBuf.idCpu = pVCpu->idCpu;4083 NTSTATUS rcNt = nemR0NtPerformIoControl(pVM, pVCpu, pVM->nemr0.s.IoCtlStartVirtualProcessor.uFunction,4084 &pVCpu->nem.s.uIoCtlBuf.idCpu, sizeof(pVCpu->nem.s.uIoCtlBuf.idCpu),4085 NULL, 0);4086 LogFlow(("NEM/%u: IoCtlStartVirtualProcessor -> %#x\n", pVCpu->idCpu, rcNt));4087 AssertLogRelMsgReturn(NT_SUCCESS(rcNt), ("VidStartVirtualProcessor failed for CPU #%u: %#x\n", pVCpu->idCpu, rcNt),4088 VERR_NEM_IPE_5);4089 # else4090 AssertLogRelMsgReturn(g_pfnVidStartVirtualProcessor(pVM->nem.s.hPartitionDevice, pVCpu->idCpu),4091 ("VidStartVirtualProcessor failed for CPU #%u: %u (%#x, rcNt=%#x)\n",4092 pVCpu->idCpu, RTNtLastErrorValue(), RTNtLastErrorValue(), RTNtLastStatusValue()),4093 VERR_NEM_IPE_5);4094 # endif4095 pVCpu->nem.s.fHandleAndGetFlags = VID_MSHAGN_F_GET_NEXT_MESSAGE;4096 }4097 # endif /* NEM_WIN_TEMPLATE_MODE_OWN_RUN_API */4098 4099 2683 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM_WAIT, VMCPUSTATE_STARTED_EXEC_NEM)) 4100 2684 { 4101 # ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API 4102 uint64_t const nsNow = RTTimeNanoTS(); 4103 int64_t const cNsNextTimerEvt = nsNow - nsNextTimerEvt; 4104 uint32_t cMsWait; 4105 if (cNsNextTimerEvt < 100000 /* ns */) 4106 cMsWait = 0; 4107 else if ((uint64_t)cNsNextTimerEvt < RT_NS_1SEC) 4108 { 4109 if ((uint32_t)cNsNextTimerEvt < 2*RT_NS_1MS) 4110 cMsWait = 1; 4111 else 4112 cMsWait = ((uint32_t)cNsNextTimerEvt - 100000 /*ns*/) / RT_NS_1MS; 4113 } 4114 else 4115 cMsWait = RT_MS_1SEC; 4116 # ifdef IN_RING0 4117 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.iCpu = pVCpu->idCpu; 4118 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.fFlags = pVCpu->nem.s.fHandleAndGetFlags; 4119 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.cMillies = cMsWait; 4120 NTSTATUS rcNt = nemR0NtPerformIoControl(pVM, pVCpu, pVM->nemr0.s.IoCtlMessageSlotHandleAndGetNext.uFunction, 4121 &pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext, 4122 pVM->nemr0.s.IoCtlMessageSlotHandleAndGetNext.cbInput, 4123 NULL, 0); 4124 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_WAIT); 4125 if (rcNt == STATUS_SUCCESS) 4126 # else 4127 BOOL fRet = VidMessageSlotHandleAndGetNext(pVM->nem.s.hPartitionDevice, pVCpu->idCpu, 4128 pVCpu->nem.s.fHandleAndGetFlags, cMsWait); 4129 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_WAIT); 4130 if (fRet) 4131 # endif 4132 # else 4133 # ifdef LOG_ENABLED 2685 #ifdef LOG_ENABLED 4134 2686 if (LogIsFlowEnabled()) 4135 2687 { … … 4142 2694 aRegs[2].Reg64, aRegs[3].Segment.Selector, aRegs[4].Reg64, aRegs[5].Reg64)); 4143 2695 } 4144 # 2696 #endif 4145 2697 WHV_RUN_VP_EXIT_CONTEXT ExitReason = {0}; 4146 2698 TMNotifyStartOfExecution(pVM, pVCpu); … … 4150 2702 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_WAIT); 4151 2703 TMNotifyEndOfExecution(pVM, pVCpu, ASMReadTSC()); 4152 # 2704 #ifdef LOG_ENABLED 4153 2705 LogFlow(("NEM/%u: Exit @ %04X:%08RX64 IF=%d CR8=%#x Reason=%#x\n", pVCpu->idCpu, ExitReason.VpContext.Cs.Selector, 4154 2706 ExitReason.VpContext.Rip, RT_BOOL(ExitReason.VpContext.Rflags & X86_EFL_IF), ExitReason.VpContext.Cr8, 4155 2707 ExitReason.ExitReason)); 4156 # 2708 #endif 4157 2709 if (SUCCEEDED(hrc)) 4158 # endif4159 2710 { 4160 2711 /* 4161 2712 * Deal with the message. 4162 2713 */ 4163 # ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API4164 rcStrict = nemHCWinHandleMessage(pVM, pVCpu, pMappingHeader);4165 pVCpu->nem.s.fHandleAndGetFlags |= VID_MSHAGN_F_HANDLE_MESSAGE;4166 # else4167 2714 rcStrict = nemR3WinHandleExit(pVM, pVCpu, &ExitReason); 4168 # endif4169 2715 if (rcStrict == VINF_SUCCESS) 4170 2716 { /* hopefully likely */ } … … 4177 2723 } 4178 2724 else 4179 {4180 # ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API4181 4182 /* VID.SYS merges STATUS_ALERTED and STATUS_USER_APC into STATUS_TIMEOUT,4183 so after NtAlertThread we end up here with a STATUS_TIMEOUT. And yeah,4184 the error code conversion is into WAIT_XXX, i.e. NT status codes. */4185 # ifndef IN_RING04186 DWORD rcNt = GetLastError();4187 # endif4188 LogFlow(("NEM/%u: VidMessageSlotHandleAndGetNext -> %#x\n", pVCpu->idCpu, rcNt));4189 AssertLogRelMsgReturn( rcNt == STATUS_TIMEOUT4190 || rcNt == STATUS_ALERTED /* just in case */4191 || rcNt == STATUS_USER_APC /* ditto */4192 || rcNt == STATUS_KERNEL_APC /* ditto */4193 , ("VidMessageSlotHandleAndGetNext failed for CPU #%u: %#x (%u)\n",4194 pVCpu->idCpu, rcNt, rcNt),4195 VERR_NEM_IPE_0);4196 pVCpu->nem.s.fHandleAndGetFlags = VID_MSHAGN_F_GET_NEXT_MESSAGE;4197 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatGetMsgTimeout);4198 # else4199 2725 AssertLogRelMsgFailedReturn(("WHvRunVirtualProcessor failed for CPU #%u: %#x (%u)\n", 4200 2726 pVCpu->idCpu, hrc, GetLastError()), 4201 2727 VERR_NEM_IPE_0); 4202 # endif4203 }4204 2728 4205 2729 /* … … 4235 2759 * state and return to EM. We don't sync back the whole state if we can help it. 4236 2760 */ 4237 # ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API4238 if (pVCpu->nem.s.fHandleAndGetFlags == VID_MSHAGN_F_GET_NEXT_MESSAGE)4239 {4240 pVCpu->nem.s.fHandleAndGetFlags = 0;4241 rcStrict = nemHCWinStopCpu(pVM, pVCpu, rcStrict, pMappingHeader);4242 }4243 # endif4244 4245 2761 if (!VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM)) 4246 2762 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED); … … 4253 2769 || RT_FAILURE(rcStrict)) 4254 2770 fImport = CPUMCTX_EXTRN_ALL | (CPUMCTX_EXTRN_NEM_WIN_MASK & ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT); 4255 # 2771 #ifdef IN_RING0 /* Ring-3 I/O port access optimizations: */ 4256 2772 else if ( rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE 4257 2773 || rcStrict == VINF_EM_PENDING_R3_IOPORT_WRITE) … … 4259 2775 else if (rcStrict == VINF_EM_PENDING_R3_IOPORT_READ) 4260 2776 fImport = CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_INHIBIT_INT; 4261 # 2777 #endif 4262 2778 else if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_INTERRUPT_APIC 4263 2779 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI)) … … 4266 2782 if (pVCpu->cpum.GstCtx.fExtrn & fImport) 4267 2783 { 4268 # 2784 #ifdef IN_RING0 4269 2785 int rc2 = nemR0WinImportState(pVM, pVCpu, &pVCpu->cpum.GstCtx, fImport | CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT, 4270 2786 true /*fCanUpdateCr3*/); … … 4282 2798 } 4283 2799 } 4284 # 2800 #else 4285 2801 int rc2 = nemHCWinCopyStateFromHyperV(pVM, pVCpu, fImport | CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT); 4286 2802 if (RT_SUCCESS(rc2)) 4287 2803 pVCpu->cpum.GstCtx.fExtrn &= ~fImport; 4288 # 2804 #endif 4289 2805 else if (RT_SUCCESS(rcStrict)) 4290 2806 rcStrict = rc2; … … 4310 2826 } 4311 2827 4312 #endif /* defined(NEM_WIN_TEMPLATE_MODE_OWN_RUN_API) || defined(IN_RING3) */4313 #ifdef IN_RING34314 2828 4315 2829 /** … … 4358 2872 } 4359 2873 4360 #endif /* defined(NEM_WIN_USE_HYPERCALLS_FOR_PAGES) || defined(IN_RING3) */4361 2874 4362 2875 void nemHCNativeNotifyHandlerPhysicalRegister(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb) … … 4374 2887 4375 2888 *pu2State = UINT8_MAX; 4376 #if defined(VBOX_WITH_PGM_NEM_MODE) && defined(IN_RING3)2889 #if defined(VBOX_WITH_PGM_NEM_MODE) 4377 2890 if (pvMemR3) 4378 2891 { … … 4403 2916 4404 2917 4405 #ifdef IN_RING34406 2918 /** 4407 2919 * Worker that maps pages into Hyper-V. … … 4525 3037 return VINF_SUCCESS; 4526 3038 } 4527 #endif /* IN_RING3 */4528 3039 4529 3040 … … 4537 3048 } 4538 3049 4539 #if defined(IN_RING3)4540 3050 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfUnmapGpaRangePage, a); 4541 3051 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhysDst & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK, X86_PAGE_SIZE); … … 4553 3063 GCPhysDst, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())); 4554 3064 return VERR_NEM_IPE_6; 4555 #else4556 RT_NOREF(pVM, GCPhysDst, pu2State);4557 LogRel(("nemHCJustUnmapPageFromHyperV(%RGp): Why are we here?!?\n", GCPhysDst));4558 return VERR_NEM_IPE_6;4559 #endif4560 3065 } 4561 3066 … … 4643 3148 return NEM_FEAT_F_NESTED_PAGING | NEM_FEAT_F_FULL_GST_EXEC; 4644 3149 } 3150
Note:
See TracChangeset
for help on using the changeset viewer.