- Timestamp:
- Jan 19, 2022 11:35:13 PM (3 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 5 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/NEMAllNativeTemplate-win.cpp.h
r93305 r93351 105 105 106 106 107 #ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES108 109 /**110 * Wrapper around VMMR0_DO_NEM_MAP_PAGES for a single page.111 *112 * @returns VBox status code.113 * @param pVM The cross context VM structure.114 * @param pVCpu The cross context virtual CPU structure of the caller.115 * @param GCPhysSrc The source page. Does not need to be page aligned.116 * @param GCPhysDst The destination page. Same as @a GCPhysSrc except for117 * when A20 is disabled.118 * @param fFlags HV_MAP_GPA_XXX.119 */120 DECLINLINE(int) nemHCWinHypercallMapPage(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst, uint32_t fFlags)121 {122 #ifdef IN_RING0123 /** @todo optimize further, caller generally has the physical address. */124 return nemR0WinMapPages(pVM, pVCpu,125 GCPhysSrc & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK,126 GCPhysDst & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK,127 1, fFlags);128 #else129 pVCpu->nem.s.Hypercall.MapPages.GCPhysSrc = GCPhysSrc & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK;130 pVCpu->nem.s.Hypercall.MapPages.GCPhysDst = GCPhysDst & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK;131 pVCpu->nem.s.Hypercall.MapPages.cPages = 1;132 pVCpu->nem.s.Hypercall.MapPages.fFlags = fFlags;133 return VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_NEM_MAP_PAGES, 0, NULL);134 #endif135 }136 137 138 /**139 * Wrapper around VMMR0_DO_NEM_UNMAP_PAGES for a single page.140 *141 * @returns VBox status code.142 * @param pVM The cross context VM structure.143 * @param pVCpu The cross context virtual CPU structure of the caller.144 * @param GCPhys The page to unmap. Does not need to be page aligned.145 */146 DECLINLINE(int) nemHCWinHypercallUnmapPage(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys)147 {148 # ifdef IN_RING0149 return nemR0WinUnmapPages(pVM, pVCpu, GCPhys & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK, 1);150 # else151 pVCpu->nem.s.Hypercall.UnmapPages.GCPhys = GCPhys & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK;152 pVCpu->nem.s.Hypercall.UnmapPages.cPages = 1;153 return VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_NEM_UNMAP_PAGES, 0, NULL);154 # endif155 }156 157 #endif /* NEM_WIN_USE_HYPERCALLS_FOR_PAGES */158 107 #ifndef IN_RING0 159 108 160 109 NEM_TMPL_STATIC int nemHCWinCopyStateToHyperV(PVMCC pVM, PVMCPUCC pVCpu) 161 110 { 162 # if defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS) || defined(NEM_WIN_WITH_RING0_RUNLOOP)163 # if !defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS) && defined(NEM_WIN_WITH_RING0_RUNLOOP)164 if (pVM->nem.s.fUseRing0Runloop)165 # endif166 {167 int rc = VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_NEM_EXPORT_STATE, 0, NULL);168 AssertLogRelRCReturn(rc, rc);169 return rc;170 }171 # endif172 # ifndef NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS173 174 111 /* 175 112 * The following is very similar to what nemR0WinExportState() does. … … 492 429 # undef ADD_REG128 493 430 # undef ADD_SEG 494 495 # endif /* !NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS */496 431 } 497 432 … … 499 434 NEM_TMPL_STATIC int nemHCWinCopyStateFromHyperV(PVMCC pVM, PVMCPUCC pVCpu, uint64_t fWhat) 500 435 { 501 # if defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS) || defined(NEM_WIN_WITH_RING0_RUNLOOP)502 # if !defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS) && defined(NEM_WIN_WITH_RING0_RUNLOOP)503 if (pVM->nem.s.fUseRing0Runloop)504 # endif505 {506 /* See NEMR0ImportState */507 int rc = VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_NEM_IMPORT_STATE, fWhat, NULL);508 if (RT_SUCCESS(rc))509 return rc;510 if (rc == VERR_NEM_FLUSH_TLB)511 {512 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, true /*fGlobal*/);513 return rc;514 }515 AssertLogRelRCReturn(rc, rc);516 return rc;517 }518 # endif519 # ifndef NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS520 436 WHV_REGISTER_NAME aenmNames[128]; 521 437 … … 1138 1054 1139 1055 return VINF_SUCCESS; 1140 # endif /* !NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS */1141 1056 } 1142 1057 … … 1156 1071 1157 1072 #ifdef IN_RING0 1158 # ifdef NEM_WIN_WITH_RING0_RUNLOOP1159 return nemR0WinImportState(pVCpu->pGVM, pVCpu, &pVCpu->cpum.GstCtx, fWhat, true /*fCanUpdateCr3*/);1160 # else1161 1073 RT_NOREF(pVCpu, fWhat); 1162 1074 return VERR_NOT_IMPLEMENTED; 1163 # endif1164 1075 #else 1165 1076 return nemHCWinCopyStateFromHyperV(pVCpu->pVMR3, pVCpu, fWhat); … … 1185 1096 AssertReturn(VM_IS_NEM_ENABLED(pVM), VERR_NEM_IPE_9); 1186 1097 1187 # if defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS) || defined(NEM_WIN_WITH_RING0_RUNLOOP)1188 # if !defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS) && defined(NEM_WIN_WITH_RING0_RUNLOOP)1189 if (pVM->nem.s.fUseRing0Runloop)1190 # endif1191 {1192 /* Call ring-0 and get the values. */1193 int rc = VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_NEM_QUERY_CPU_TICK, 0, NULL);1194 AssertLogRelRCReturn(rc, rc);1195 *pcTicks = pVCpu->nem.s.Hypercall.QueryCpuTick.cTicks;1196 if (puAux)1197 *puAux = pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_TSC_AUX1198 ? pVCpu->nem.s.Hypercall.QueryCpuTick.uAux : CPUMGetGuestTscAux(pVCpu);1199 return VINF_SUCCESS;1200 }1201 # endif1202 # ifndef NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS1203 1098 /* Call the offical API. */ 1204 1099 WHV_REGISTER_NAME aenmNames[2] = { WHvX64RegisterTsc, WHvX64RegisterTscAux }; … … 1214 1109 *pcTicks = pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_TSC_AUX ? aValues[0].Reg64 : CPUMGetGuestTscAux(pVCpu); 1215 1110 return VINF_SUCCESS; 1216 # endif /* !NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS */1217 1111 #else /* IN_RING0 */ 1218 # ifdef NEM_WIN_WITH_RING0_RUNLOOP1219 int rc = nemR0WinQueryCpuTick(pVCpu->pGVM, pVCpu, pcTicks, puAux);1220 if (RT_SUCCESS(rc) && puAux && !(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_TSC_AUX))1221 *puAux = CPUMGetGuestTscAux(pVCpu);1222 return rc;1223 # else1224 1112 RT_NOREF(pVCpu, pcTicks, puAux); 1225 1113 return VERR_NOT_IMPLEMENTED; 1226 # endif1227 1114 #endif /* IN_RING0 */ 1228 1115 } … … 1242 1129 { 1243 1130 #ifdef IN_RING0 1244 # ifdef NEM_WIN_WITH_RING0_RUNLOOP1245 return nemR0WinResumeCpuTickOnAll(pVM, pVCpu, uPausedTscValue);1246 # else1247 1131 RT_NOREF(pVM, pVCpu, uPausedTscValue); 1248 1132 return VERR_NOT_IMPLEMENTED; 1249 # endif1250 1133 #else /* IN_RING3 */ 1251 1134 VMCPU_ASSERT_EMT_RETURN(pVCpu, VERR_VM_THREAD_NOT_EMT); 1252 1135 AssertReturn(VM_IS_NEM_ENABLED(pVM), VERR_NEM_IPE_9); 1253 1136 1254 # if defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS) || defined(NEM_WIN_WITH_RING0_RUNLOOP)1255 # if !defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS) && defined(NEM_WIN_WITH_RING0_RUNLOOP)1256 if (pVM->nem.s.fUseRing0Runloop)1257 # endif1258 {1259 /* Call ring-0 and do it all there. */1260 return VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_NEM_RESUME_CPU_TICK_ON_ALL, uPausedTscValue, NULL);1261 }1262 # endif1263 # ifndef NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS1264 1137 /* 1265 1138 * Call the offical API to do the job. … … 1294 1167 1295 1168 return VINF_SUCCESS; 1296 # endif /* !NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS */1297 1169 #endif /* IN_RING3 */ 1298 1170 } 1299 1300 #ifdef NEMWIN_NEED_GET_REGISTER1301 # if defined(IN_RING0) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)1302 /** Worker for assertion macro. */1303 NEM_TMPL_STATIC int nemHCWinGetRegister(PVMCPUCC pVCpu, PGVMCPU pGVCpu, uint32_t enmReg, HV_REGISTER_VALUE *pRetValue)1304 {1305 RT_ZERO(*pRetValue);1306 # ifdef IN_RING31307 RT_NOREF(pVCpu, pGVCpu, enmReg);1308 return VERR_NOT_IMPLEMENTED;1309 # else1310 NOREF(pVCpu);1311 1312 /*1313 * Hypercall parameters.1314 */1315 HV_INPUT_GET_VP_REGISTERS *pInput = (HV_INPUT_GET_VP_REGISTERS *)pGVCpu->nem.s.HypercallData.pbPage;1316 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);1317 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);1318 1319 pInput->PartitionId = pVCpu->pGVM->nemr0.s.idHvPartition;1320 pInput->VpIndex = pVCpu->idCpu;1321 pInput->fFlags = 0;1322 pInput->Names[0] = (HV_REGISTER_NAME)enmReg;1323 1324 size_t const cbInput = RT_ALIGN_Z(RT_UOFFSETOF(HV_INPUT_GET_VP_REGISTERS, Names[1]), 32);1325 HV_REGISTER_VALUE *paValues = (HV_REGISTER_VALUE *)((uint8_t *)pInput + cbInput);1326 RT_BZERO(paValues, sizeof(paValues[0]) * 1);1327 1328 /*1329 * Make the hypercall and copy out the value.1330 */1331 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallGetVpRegisters, 1),1332 pGVCpu->nem.s.HypercallData.HCPhysPage,1333 pGVCpu->nem.s.HypercallData.HCPhysPage + cbInput);1334 AssertLogRelMsgReturn(uResult == HV_MAKE_CALL_REP_RET(1), ("uResult=%RX64 cRegs=%#x\n", uResult, 1),1335 VERR_NEM_GET_REGISTERS_FAILED);1336 1337 *pRetValue = paValues[0];1338 return VINF_SUCCESS;1339 # endif1340 }1341 # else1342 /** Worker for assertion macro. */1343 NEM_TMPL_STATIC int nemR3WinGetRegister(PVMCPUCC a_pVCpu, uint32_t a_enmReg, WHV_REGISTER_VALUE pValue)1344 {1345 RT_ZERO(*pRetValue);1346 RT_NOREF(pVCpu, pGVCpu, enmReg);1347 return VERR_NOT_IMPLEMENTED;1348 }1349 # endif1350 #endif1351 1171 1352 1172 … … 1379 1199 1380 1200 1381 #if defined(NEM_WIN_USE_OUR_OWN_RUN_API) || defined(NEM_WIN_WITH_RING0_RUNLOOP)1382 # ifdef IN_RING3 /* hopefully not needed in ring-0, as we'd need K THREADs and KeAlertThread. */1201 #if defined(NEM_WIN_USE_OUR_OWN_RUN_API) 1202 # ifdef IN_RING3 /* hopefully not needed in ring-0, as we'd need KETHREADs and KeAlertThread. */ 1383 1203 /** 1384 1204 * Our own WHvCancelRunVirtualProcessor that can later be moved to ring-0. … … 1446 1266 } 1447 1267 # endif /* IN_RING3 */ 1448 #endif /* NEM_WIN_USE_OUR_OWN_RUN_API || NEM_WIN_WITH_RING0_RUNLOOP*/1268 #endif /* NEM_WIN_USE_OUR_OWN_RUN_API */ 1449 1269 1450 1270 … … 1612 1432 #endif /* IN_RING3 && !NEM_WIN_TEMPLATE_MODE_OWN_RUN_API */ 1613 1433 1614 #if defined( NEM_WIN_USE_HYPERCALLS_FOR_PAGES) || defined(IN_RING3)1434 #if defined(IN_RING3) 1615 1435 1616 1436 NEM_TMPL_STATIC DECLCALLBACK(int) 1617 1437 nemHCWinUnmapOnePageCallback(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, uint8_t *pu2NemState, void *pvUser) 1618 1438 { 1619 RT_NOREF_PV(pvUser); 1620 # ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES 1621 int rc = nemHCWinHypercallUnmapPage(pVM, pVCpu, GCPhys); 1622 AssertRC(rc); 1623 if (RT_SUCCESS(rc)) 1624 # else 1625 RT_NOREF_PV(pVCpu); 1439 RT_NOREF(pvUser, pVCpu); 1626 1440 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfUnmapGpaRangePage, a); 1627 1441 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhys, X86_PAGE_SIZE); 1628 1442 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfUnmapGpaRangePage, a); 1629 1443 if (SUCCEEDED(hrc)) 1630 # endif1631 1444 { 1632 1445 Log5(("NEM GPA unmap all: %RGp (cMappedPages=%u)\n", GCPhys, pVM->nem.s.cMappedPages - 1)); … … 1636 1449 else 1637 1450 { 1638 # ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES1639 LogRel(("nemHCWinUnmapOnePageCallback: GCPhys=%RGp rc=%Rrc\n", GCPhys, rc));1640 # else1641 1451 LogRel(("nemHCWinUnmapOnePageCallback: GCPhys=%RGp %s hrc=%Rhrc (%#x) Last=%#x/%u (cMappedPages=%u)\n", 1642 1452 GCPhys, g_apszPageStates[*pu2NemState], hrc, hrc, RTNtLastStatusValue(), 1643 1453 RTNtLastErrorValue(), pVM->nem.s.cMappedPages)); 1644 # endif1645 1454 *pu2NemState = NEM_WIN_PAGE_STATE_NOT_SET; 1646 1455 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPageFailed); … … 1704 1513 * We don't really consider downgrades here, as they shouldn't happen. 1705 1514 */ 1706 # ifndef NEM_WIN_USE_HYPERCALLS_FOR_PAGES1707 1515 /** @todo Someone at microsoft please explain: 1708 1516 * I'm not sure WTF was going on, but I ended up in a loop if I remapped a … … 1713 1521 * with new protection or backing. 1714 1522 */ 1715 # endif1716 1523 int rc; 1717 1524 switch (u2State) … … 1756 1563 } 1757 1564 1758 # ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES1759 /* Upgrade page to writable. */1760 /** @todo test this*/1761 if ( (pInfo->fNemProt & NEM_PAGE_PROT_WRITE)1762 && pState->fWriteAccess)1763 {1764 rc = nemHCWinHypercallMapPage(pVM, pVCpu, GCPhysSrc, GCPhys,1765 HV_MAP_GPA_READABLE | HV_MAP_GPA_WRITABLE1766 | HV_MAP_GPA_EXECUTABLE | HV_MAP_GPA_EXECUTABLE_AGAIN);1767 AssertRC(rc);1768 if (RT_SUCCESS(rc))1769 {1770 STAM_REL_COUNTER_INC(&pVM->nem.s.StatRemapPage);1771 pInfo->u2NemState = NEM_WIN_PAGE_STATE_WRITABLE;1772 pState->fDidSomething = true;1773 pState->fCanResume = true;1774 Log5(("NEM GPA write-upgrade/exit: %RGp (was %s, cMappedPages=%u)\n",1775 GCPhys, g_apszPageStates[u2State], pVM->nem.s.cMappedPages));1776 }1777 else1778 STAM_REL_COUNTER_INC(&pVM->nem.s.StatRemapPageFailed);1779 }1780 else1781 {1782 /* Need to emulate the acces. */1783 AssertBreak(pInfo->fNemProt != NEM_PAGE_PROT_NONE); /* There should be no downgrades. */1784 rc = VINF_SUCCESS;1785 }1786 return rc;1787 # else1788 1565 break; 1789 # endif1790 1566 1791 1567 case NEM_WIN_PAGE_STATE_WRITABLE: … … 1802 1578 return VINF_SUCCESS; 1803 1579 } 1804 # ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES1805 AssertFailed(); /* There should be no downgrades. */1806 # endif1807 1580 break; 1808 1581 … … 1815 1588 * If this fails, which it does every so often, just unmap everything for now. 1816 1589 */ 1817 # ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES1818 rc = nemHCWinHypercallUnmapPage(pVM, pVCpu, GCPhys);1819 AssertRC(rc);1820 if (RT_SUCCESS(rc))1821 # else1822 1590 /** @todo figure out whether we mess up the state or if it's WHv. */ 1823 1591 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfUnmapGpaRangePage, a); … … 1825 1593 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfUnmapGpaRangePage, a); 1826 1594 if (SUCCEEDED(hrc)) 1827 # endif1828 1595 { 1829 1596 pState->fDidSomething = true; … … 1836 1603 } 1837 1604 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPageFailed); 1838 # ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES 1839 LogRel(("nemHCWinHandleMemoryAccessPageCheckerCallback/unmap: GCPhysDst=%RGp rc=%Rrc\n", GCPhys, rc)); 1840 return rc; 1841 # elif defined(VBOX_WITH_PGM_NEM_MODE) 1605 # if defined(VBOX_WITH_PGM_NEM_MODE) 1842 1606 LogRel(("nemHCWinHandleMemoryAccessPageCheckerCallback/unmap: GCPhysDst=%RGp %s hrc=%Rhrc (%#x)\n", 1843 1607 GCPhys, g_apszPageStates[u2State], hrc, hrc)); … … 1859 1623 } 1860 1624 1861 #endif /* defined( NEM_WIN_USE_HYPERCALLS_FOR_PAGES) || defined(IN_RING3) */1625 #endif /* defined(IN_RING3) */ 1862 1626 1863 1627 … … 3877 3641 3878 3642 3879 #if defined(IN_RING0) && defined(NEM_WIN_WITH_RING0_RUNLOOP)3880 /**3881 * Perform an I/O control operation on the partition handle (VID.SYS),3882 * restarting on alert-like behaviour.3883 *3884 * @returns NT status code.3885 * @param pGVM The ring-0 VM structure.3886 * @param pGVCpu The global (ring-0) per CPU structure.3887 * @param fFlags The wait flags.3888 * @param cMillies The timeout in milliseconds3889 */3890 static NTSTATUS nemR0NtPerformIoCtlMessageSlotHandleAndGetNext(PGVM pGVM, PGVMCPU pGVCpu, uint32_t fFlags, uint32_t cMillies)3891 {3892 pGVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.iCpu = pGVCpu->idCpu;3893 pGVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.fFlags = fFlags;3894 pGVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.cMillies = cMillies;3895 NTSTATUS rcNt = nemR0NtPerformIoControl(pGVM, pGVCpu, pGVM->nemr0.s.IoCtlMessageSlotHandleAndGetNext.uFunction,3896 &pGVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext,3897 pGVM->nemr0.s.IoCtlMessageSlotHandleAndGetNext.cbInput,3898 NULL, 0);3899 if (rcNt == STATUS_SUCCESS)3900 { /* likely */ }3901 /*3902 * Generally, if we get down here, we have been interrupted between ACK'ing3903 * a message and waiting for the next due to a NtAlertThread call. So, we3904 * should stop ACK'ing the previous message and get on waiting on the next.3905 * See similar stuff in nemHCWinRunGC().3906 */3907 else if ( rcNt == STATUS_TIMEOUT3908 || rcNt == STATUS_ALERTED /* just in case */3909 || rcNt == STATUS_KERNEL_APC /* just in case */3910 || rcNt == STATUS_USER_APC /* just in case */)3911 {3912 DBGFTRACE_CUSTOM(pGVCpu->CTX_SUFF(pVM), "IoCtlMessageSlotHandleAndGetNextRestart/1 %#x (f=%#x)", rcNt, fFlags);3913 STAM_REL_COUNTER_INC(&pGVCpu->nem.s.StatStopCpuPendingAlerts);3914 Assert(fFlags & VID_MSHAGN_F_GET_NEXT_MESSAGE);3915 3916 pGVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.iCpu = pGVCpu->idCpu;3917 pGVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.fFlags = fFlags & ~VID_MSHAGN_F_HANDLE_MESSAGE;3918 pGVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.cMillies = cMillies;3919 rcNt = nemR0NtPerformIoControl(pGVM, pGVCpu, pGVM->nemr0.s.IoCtlMessageSlotHandleAndGetNext.uFunction,3920 &pGVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext,3921 pGVM->nemr0.s.IoCtlMessageSlotHandleAndGetNext.cbInput,3922 NULL, 0);3923 DBGFTRACE_CUSTOM(pGVM, "IoCtlMessageSlotHandleAndGetNextRestart/2 %#x", rcNt);3924 }3925 return rcNt;3926 }3927 #endif /* IN_RING0 */3928 3929 3930 3643 #ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API 3931 3644 /** … … 4252 3965 for (unsigned iLoop = 0;; iLoop++) 4253 3966 { 4254 # if !defined(NEM_WIN_USE_HYPERCALLS_FOR_PAGES) && !defined(VBOX_WITH_PGM_NEM_MODE)3967 # ifndef VBOX_WITH_PGM_NEM_MODE 4255 3968 /* 4256 3969 * Hack alert! … … 4598 4311 4599 4312 #endif /* defined(NEM_WIN_TEMPLATE_MODE_OWN_RUN_API) || defined(IN_RING3) */ 4600 #if defined(NEM_WIN_USE_HYPERCALLS_FOR_PAGES) || defined(IN_RING3)4313 #ifdef IN_RING3 4601 4314 4602 4315 /** … … 4609 4322 if (pInfo->u2NemState > NEM_WIN_PAGE_STATE_UNMAPPED) 4610 4323 { 4611 # ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES4612 int rc = nemHCWinHypercallUnmapPage(pVM, pVCpu, GCPhys);4613 AssertRC(rc);4614 if (RT_SUCCESS(rc))4615 # else4616 4324 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhys, X86_PAGE_SIZE); 4617 4325 if (SUCCEEDED(hrc)) 4618 # endif4619 4326 { 4620 4327 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPage); … … 4626 4333 { 4627 4334 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPageFailed); 4628 # ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES4629 LogRel(("nemHCWinUnsetForA20CheckerCallback/unmap: GCPhys=%RGp rc=%Rrc\n", GCPhys, rc));4630 return rc;4631 # else4632 4335 LogRel(("nemHCWinUnsetForA20CheckerCallback/unmap: GCPhys=%RGp hrc=%Rhrc (%#x) Last=%#x/%u\n", 4633 4336 GCPhys, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())); 4634 4337 return VERR_NEM_IPE_2; 4635 # endif4636 4338 } 4637 4339 } … … 4672 4374 4673 4375 *pu2State = UINT8_MAX; 4674 #if !defined(NEM_WIN_USE_HYPERCALLS_FOR_PAGES) &&defined(VBOX_WITH_PGM_NEM_MODE) && defined(IN_RING3)4376 #if defined(VBOX_WITH_PGM_NEM_MODE) && defined(IN_RING3) 4675 4377 if (pvMemR3) 4676 4378 { … … 4701 4403 4702 4404 4703 #if defined(NEM_WIN_USE_HYPERCALLS_FOR_PAGES) || defined(IN_RING3)4405 #ifdef IN_RING3 4704 4406 /** 4705 4407 * Worker that maps pages into Hyper-V. … … 4723 4425 uint32_t fPageProt, uint8_t *pu2State, bool fBackingChanged) 4724 4426 { 4725 # ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES4726 /*4727 * When using the hypercalls instead of the ring-3 APIs, we don't need to4728 * unmap memory before modifying it. We still want to track the state though,4729 * since unmap will fail when called an unmapped page and we don't want to redo4730 * upgrades/downgrades.4731 */4732 uint8_t const u2OldState = *pu2State;4733 int rc;4734 if (fPageProt == NEM_PAGE_PROT_NONE)4735 {4736 if (u2OldState > NEM_WIN_PAGE_STATE_UNMAPPED)4737 {4738 rc = nemHCWinHypercallUnmapPage(pVM, pVCpu, GCPhysDst);4739 if (RT_SUCCESS(rc))4740 {4741 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;4742 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPage);4743 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);4744 Log5(("NEM GPA unmapped/set: %RGp (was %s, cMappedPages=%u)\n", GCPhysDst, g_apszPageStates[u2OldState], cMappedPages));4745 }4746 else4747 {4748 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPageFailed);4749 AssertLogRelMsgFailed(("nemHCNativeSetPhysPage/unmap: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));4750 }4751 }4752 else4753 rc = VINF_SUCCESS;4754 }4755 else if (fPageProt & NEM_PAGE_PROT_WRITE)4756 {4757 if (u2OldState != NEM_WIN_PAGE_STATE_WRITABLE || fBackingChanged)4758 {4759 rc = nemHCWinHypercallMapPage(pVM, pVCpu, GCPhysSrc, GCPhysDst,4760 HV_MAP_GPA_READABLE | HV_MAP_GPA_WRITABLE4761 | HV_MAP_GPA_EXECUTABLE | HV_MAP_GPA_EXECUTABLE_AGAIN);4762 if (RT_SUCCESS(rc))4763 {4764 *pu2State = NEM_WIN_PAGE_STATE_WRITABLE;4765 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPage);4766 uint32_t cMappedPages = u2OldState <= NEM_WIN_PAGE_STATE_UNMAPPED4767 ? ASMAtomicIncU32(&pVM->nem.s.cMappedPages) : pVM->nem.s.cMappedPages;4768 Log5(("NEM GPA writable/set: %RGp (was %s, cMappedPages=%u)\n", GCPhysDst, g_apszPageStates[u2OldState], cMappedPages));4769 NOREF(cMappedPages);4770 }4771 else4772 {4773 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPageFailed);4774 AssertLogRelMsgFailed(("nemHCNativeSetPhysPage/writable: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));4775 }4776 }4777 else4778 rc = VINF_SUCCESS;4779 }4780 else4781 {4782 if (u2OldState != NEM_WIN_PAGE_STATE_READABLE || fBackingChanged)4783 {4784 rc = nemHCWinHypercallMapPage(pVM, pVCpu, GCPhysSrc, GCPhysDst,4785 HV_MAP_GPA_READABLE | HV_MAP_GPA_EXECUTABLE | HV_MAP_GPA_EXECUTABLE_AGAIN);4786 if (RT_SUCCESS(rc))4787 {4788 *pu2State = NEM_WIN_PAGE_STATE_READABLE;4789 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPage);4790 uint32_t cMappedPages = u2OldState <= NEM_WIN_PAGE_STATE_UNMAPPED4791 ? ASMAtomicIncU32(&pVM->nem.s.cMappedPages) : pVM->nem.s.cMappedPages;4792 Log5(("NEM GPA read+exec/set: %RGp (was %s, cMappedPages=%u)\n", GCPhysDst, g_apszPageStates[u2OldState], cMappedPages));4793 NOREF(cMappedPages);4794 }4795 else4796 {4797 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPageFailed);4798 AssertLogRelMsgFailed(("nemHCNativeSetPhysPage/writable: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));4799 }4800 }4801 else4802 rc = VINF_SUCCESS;4803 }4804 4805 return VINF_SUCCESS;4806 4807 # else /* !NEM_WIN_USE_HYPERCALLS_FOR_PAGES */4808 4427 /* 4809 4428 * Looks like we need to unmap a page before we can change the backing … … 4820 4439 if (u2OldState > NEM_WIN_PAGE_STATE_UNMAPPED) 4821 4440 { 4822 # ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES4823 int rc = nemHCWinHypercallUnmapPage(pVM, pVCpu, GCPhysDst);4824 AssertRC(rc);4825 if (RT_SUCCESS(rc))4826 {4827 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;4828 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPage);4829 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);4830 if (u2NewState == NEM_WIN_PAGE_STATE_UNMAPPED)4831 {4832 Log5(("NEM GPA unmapped/set: %RGp (was %s, cMappedPages=%u)\n",4833 GCPhysDst, g_apszPageStates[u2OldState], cMappedPages));4834 return VINF_SUCCESS;4835 }4836 }4837 else4838 {4839 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPageFailed);4840 LogRel(("nemHCNativeSetPhysPage/unmap: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));4841 return rc;4842 }4843 # else4844 4441 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfUnmapGpaRangePage, a); 4845 4442 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhysDst, X86_PAGE_SIZE); … … 4864 4461 return VERR_NEM_INIT_FAILED; 4865 4462 } 4866 # endif4867 4463 } 4868 4464 } … … 4873 4469 if (fPageProt & NEM_PAGE_PROT_WRITE) 4874 4470 { 4875 # ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES4876 int rc = nemHCWinHypercallMapPage(pVM, pVCpu, GCPhysSrc, GCPhysDst,4877 HV_MAP_GPA_READABLE | HV_MAP_GPA_WRITABLE4878 | HV_MAP_GPA_EXECUTABLE | HV_MAP_GPA_EXECUTABLE_AGAIN);4879 AssertRC(rc);4880 if (RT_SUCCESS(rc))4881 {4882 *pu2State = NEM_WIN_PAGE_STATE_WRITABLE;4883 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPage);4884 uint32_t cMappedPages = ASMAtomicIncU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);4885 Log5(("NEM GPA mapped/set: %RGp %s (was %s, cMappedPages=%u)\n",4886 GCPhysDst, g_apszPageStates[u2NewState], g_apszPageStates[u2OldState], cMappedPages));4887 return VINF_SUCCESS;4888 }4889 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPageFailed);4890 LogRel(("nemHCNativeSetPhysPage/writable: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));4891 return rc;4892 # else4893 4471 void *pvPage; 4894 4472 int rc = nemR3NativeGCPhys2R3PtrWriteable(pVM, GCPhysSrc, &pvPage); … … 4913 4491 LogRel(("nemHCNativeSetPhysPage/writable: GCPhysSrc=%RGp rc=%Rrc\n", GCPhysSrc, rc)); 4914 4492 return rc; 4915 # endif4916 4493 } 4917 4494 4918 4495 if (fPageProt & NEM_PAGE_PROT_READ) 4919 4496 { 4920 # ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES4921 int rc = nemHCWinHypercallMapPage(pVM, pVCpu, GCPhysSrc, GCPhysDst,4922 HV_MAP_GPA_READABLE | HV_MAP_GPA_EXECUTABLE | HV_MAP_GPA_EXECUTABLE_AGAIN);4923 AssertRC(rc);4924 if (RT_SUCCESS(rc))4925 {4926 *pu2State = NEM_WIN_PAGE_STATE_READABLE;4927 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPage);4928 uint32_t cMappedPages = ASMAtomicIncU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);4929 Log5(("NEM GPA mapped/set: %RGp %s (was %s, cMappedPages=%u)\n",4930 GCPhysDst, g_apszPageStates[u2NewState], g_apszPageStates[u2OldState], cMappedPages));4931 return VINF_SUCCESS;4932 }4933 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPageFailed);4934 LogRel(("nemHCNativeSetPhysPage/readonly: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));4935 return rc;4936 # else4937 4497 const void *pvPage; 4938 4498 int rc = nemR3NativeGCPhys2R3PtrReadOnly(pVM, GCPhysSrc, &pvPage); … … 4959 4519 LogRel(("nemHCNativeSetPhysPage/readonly: GCPhysSrc=%RGp rc=%Rrc\n", GCPhysSrc, rc)); 4960 4520 return rc; 4961 # endif4962 4521 } 4963 4522 … … 4965 4524 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED; 4966 4525 return VINF_SUCCESS; 4967 # endif /* !NEM_WIN_USE_HYPERCALLS_FOR_PAGES */ 4968 } 4969 #endif /* defined(NEM_WIN_USE_HYPERCALLS_FOR_PAGES) || defined(IN_RING3) */ 4526 } 4527 #endif /* IN_RING3 */ 4970 4528 4971 4529 … … 4979 4537 } 4980 4538 4981 #if defined(NEM_WIN_USE_HYPERCALLS_FOR_PAGES) 4982 PVMCPUCC pVCpu = VMMGetCpu(pVM); 4983 int rc = nemHCWinHypercallUnmapPage(pVM, pVCpu, GCPhysDst); 4984 AssertRC(rc); 4985 if (RT_SUCCESS(rc)) 4986 { 4987 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPage); 4988 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages); 4989 Log5(("NEM GPA unmapped/just: %RGp (was %s, cMappedPages=%u)\n", GCPhysDst, g_apszPageStates[*pu2State], cMappedPages)); 4990 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED; 4991 return VINF_SUCCESS; 4992 } 4993 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPageFailed); 4994 LogRel(("nemHCJustUnmapPageFromHyperV/unmap: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc)); 4995 return rc; 4996 4997 #elif defined(IN_RING3) 4539 #if defined(IN_RING3) 4998 4540 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfUnmapGpaRangePage, a); 4999 4541 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhysDst & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK, X86_PAGE_SIZE); … … 5027 4569 5028 4570 int rc; 5029 #ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES 5030 PVMCPUCC pVCpu = VMMGetCpu(pVM); 5031 # ifdef NEM_WIN_WITH_A20 4571 RT_NOREF_PV(fPageProt); 4572 #ifdef NEM_WIN_WITH_A20 5032 4573 if ( pVM->nem.s.fA20Enabled 5033 4574 || !NEM_WIN_IS_RELEVANT_TO_A20(GCPhys)) 5034 # endif 5035 rc = nemHCNativeSetPhysPage(pVM, pVCpu, GCPhys, GCPhys, fPageProt, pu2State, true /*fBackingChanged*/); 5036 # ifdef NEM_WIN_WITH_A20 5037 else 5038 { 5039 /* To keep effort at a minimum, we unmap the HMA page alias and resync it lazily when needed. */ 5040 rc = nemHCWinUnmapPageForA20Gate(pVM, pVCpu, GCPhys | RT_BIT_32(20)); 5041 if (!NEM_WIN_IS_SUBJECT_TO_A20(GCPhys) && RT_SUCCESS(rc)) 5042 rc = nemHCNativeSetPhysPage(pVM, pVCpu, GCPhys, GCPhys, fPageProt, pu2State, true /*fBackingChanged*/); 5043 5044 } 5045 # endif 5046 #else 5047 RT_NOREF_PV(fPageProt); 5048 # ifdef NEM_WIN_WITH_A20 5049 if ( pVM->nem.s.fA20Enabled 5050 || !NEM_WIN_IS_RELEVANT_TO_A20(GCPhys)) 5051 # endif 4575 #endif 5052 4576 rc = nemHCJustUnmapPageFromHyperV(pVM, GCPhys, pu2State); 5053 # 4577 #ifdef NEM_WIN_WITH_A20 5054 4578 else if (!NEM_WIN_IS_SUBJECT_TO_A20(GCPhys)) 5055 4579 rc = nemHCJustUnmapPageFromHyperV(pVM, GCPhys, pu2State); 5056 4580 else 5057 4581 rc = VINF_SUCCESS; /* ignore since we've got the alias page at this address. */ 5058 # endif5059 4582 #endif 5060 4583 return rc; … … 5070 4593 RT_NOREF(HCPhys, enmType, pvR3); 5071 4594 5072 #ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES 5073 PVMCPUCC pVCpu = VMMGetCpu(pVM); 5074 # ifdef NEM_WIN_WITH_A20 4595 RT_NOREF_PV(fPageProt); 4596 #ifdef NEM_WIN_WITH_A20 5075 4597 if ( pVM->nem.s.fA20Enabled 5076 4598 || !NEM_WIN_IS_RELEVANT_TO_A20(GCPhys)) 5077 # endif 5078 nemHCNativeSetPhysPage(pVM, pVCpu, GCPhys, GCPhys, fPageProt, pu2State, false /*fBackingChanged*/); 5079 # ifdef NEM_WIN_WITH_A20 5080 else 5081 { 5082 /* To keep effort at a minimum, we unmap the HMA page alias and resync it lazily when needed. */ 5083 nemHCWinUnmapPageForA20Gate(pVM, pVCpu, GCPhys | RT_BIT_32(20)); 5084 if (!NEM_WIN_IS_SUBJECT_TO_A20(GCPhys)) 5085 nemHCNativeSetPhysPage(pVM, pVCpu, GCPhys, GCPhys, fPageProt, pu2State, false /*fBackingChanged*/); 5086 } 5087 # endif 5088 #else 5089 RT_NOREF_PV(fPageProt); 5090 # ifdef NEM_WIN_WITH_A20 5091 if ( pVM->nem.s.fA20Enabled 5092 || !NEM_WIN_IS_RELEVANT_TO_A20(GCPhys)) 5093 # endif 4599 #endif 5094 4600 nemHCJustUnmapPageFromHyperV(pVM, GCPhys, pu2State); 5095 # 4601 #ifdef NEM_WIN_WITH_A20 5096 4602 else if (!NEM_WIN_IS_SUBJECT_TO_A20(GCPhys)) 5097 4603 nemHCJustUnmapPageFromHyperV(pVM, GCPhys, pu2State); 5098 4604 /* else: ignore since we've got the alias page at this address. */ 5099 # endif5100 4605 #endif 5101 4606 } … … 5110 4615 RT_NOREF(HCPhysPrev, HCPhysNew, pvNewR3, enmType); 5111 4616 5112 #ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES 5113 PVMCPUCC pVCpu = VMMGetCpu(pVM); 5114 # ifdef NEM_WIN_WITH_A20 4617 RT_NOREF_PV(fPageProt); 4618 #ifdef NEM_WIN_WITH_A20 5115 4619 if ( pVM->nem.s.fA20Enabled 5116 4620 || !NEM_WIN_IS_RELEVANT_TO_A20(GCPhys)) 5117 # endif 5118 nemHCNativeSetPhysPage(pVM, pVCpu, GCPhys, GCPhys, fPageProt, pu2State, true /*fBackingChanged*/); 5119 # ifdef NEM_WIN_WITH_A20 5120 else 5121 { 5122 /* To keep effort at a minimum, we unmap the HMA page alias and resync it lazily when needed. */ 5123 nemHCWinUnmapPageForA20Gate(pVM, pVCpu, GCPhys | RT_BIT_32(20)); 5124 if (!NEM_WIN_IS_SUBJECT_TO_A20(GCPhys)) 5125 nemHCNativeSetPhysPage(pVM, pVCpu, GCPhys, GCPhys, fPageProt, pu2State, true /*fBackingChanged*/); 5126 } 5127 # endif 5128 #else 5129 RT_NOREF_PV(fPageProt); 5130 # ifdef NEM_WIN_WITH_A20 5131 if ( pVM->nem.s.fA20Enabled 5132 || !NEM_WIN_IS_RELEVANT_TO_A20(GCPhys)) 5133 # endif 4621 #endif 5134 4622 nemHCJustUnmapPageFromHyperV(pVM, GCPhys, pu2State); 5135 # 4623 #ifdef NEM_WIN_WITH_A20 5136 4624 else if (!NEM_WIN_IS_SUBJECT_TO_A20(GCPhys)) 5137 4625 nemHCJustUnmapPageFromHyperV(pVM, GCPhys, pu2State); 5138 4626 /* else: ignore since we've got the alias page at this address. */ 5139 # endif5140 4627 #endif 5141 4628 } -
trunk/src/VBox/VMM/VMMR0/NEMR0Native-win.cpp
r93115 r93351 66 66 67 67 68 /*********************************************************************************************************************************69 * Global Variables *70 *********************************************************************************************************************************/71 #ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES72 static uint64_t (*g_pfnHvlInvokeHypercall)(uint64_t uCallInfo, uint64_t HCPhysInput, uint64_t HCPhysOutput);73 74 /**75 * WinHvr.sys!WinHvDepositMemory76 *77 * This API will try allocates cPages on IdealNode and deposit it to the78 * hypervisor for use with the given partition. The memory will be freed when79 * VID.SYS calls WinHvWithdrawAllMemory when the partition is cleanedup.80 *81 * Apparently node numbers above 64 has a different meaning.82 */83 static NTSTATUS (*g_pfnWinHvDepositMemory)(uintptr_t idPartition, size_t cPages, uintptr_t IdealNode, size_t *pcActuallyAdded);84 #endif85 86 RT_C_DECLS_BEGIN87 /**88 * The WinHvGetPartitionProperty function we intercept in VID.SYS to get the89 * Hyper-V partition ID.90 *91 * This is used from assembly.92 */93 NTSTATUS WinHvGetPartitionProperty(uintptr_t idPartition, HV_PARTITION_PROPERTY_CODE enmProperty, PHV_PARTITION_PROPERTY puValue);94 decltype(WinHvGetPartitionProperty) *g_pfnWinHvGetPartitionProperty;95 RT_C_DECLS_END96 97 /** @name VID.SYS image details.98 * @{ */99 #ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES100 static uint8_t *g_pbVidSys = NULL;101 static uintptr_t g_cbVidSys = 0;102 static PIMAGE_NT_HEADERS g_pVidSysHdrs = NULL;103 /** Pointer to the import thunk entry in VID.SYS for WinHvGetPartitionProperty if we found it. */104 static decltype(WinHvGetPartitionProperty) **g_ppfnVidSysWinHvGetPartitionProperty = NULL;105 106 /** Critical section protecting the WinHvGetPartitionProperty hacking. */107 static RTCRITSECT g_VidSysCritSect;108 #endif /* NEM_WIN_USE_HYPERCALLS_FOR_PAGES */109 RT_C_DECLS_BEGIN110 /** The partition ID passed to WinHvGetPartitionProperty by VID.SYS. */111 HV_PARTITION_ID g_idVidSysFoundPartition = HV_PARTITION_ID_INVALID;112 /** The thread which is currently looking for a partition ID. */113 RTNATIVETHREAD g_hVidSysMatchThread = NIL_RTNATIVETHREAD;114 /** The property code we expect in WinHvGetPartitionProperty. */115 VID_PARTITION_PROPERTY_CODE g_enmVidSysMatchProperty = INT64_MAX;116 /* NEMR0NativeA-win.asm: */117 extern uint8_t g_abNemR0WinHvrWinHvGetPartitionProperty_OriginalProlog[64];118 RT_C_DECLS_END119 /** @} */120 121 122 123 /*********************************************************************************************************************************124 * Internal Functions *125 *********************************************************************************************************************************/126 NEM_TMPL_STATIC int nemR0WinMapPages(PGVM pGVM, PGVMCPU pGVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst,127 uint32_t cPages, uint32_t fFlags);128 NEM_TMPL_STATIC int nemR0WinUnmapPages(PGVM pGVM, PGVMCPU pGVCpu, RTGCPHYS GCPhys, uint32_t cPages);129 #if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)130 NEM_TMPL_STATIC int nemR0WinExportState(PGVM pGVM, PGVMCPU pGVCpu, PCPUMCTX pCtx);131 NEM_TMPL_STATIC int nemR0WinImportState(PGVM pGVM, PGVMCPU pGVCpu, PCPUMCTX pCtx, uint64_t fWhat, bool fCanUpdateCr3);132 NEM_TMPL_STATIC int nemR0WinQueryCpuTick(PGVM pGVM, PGVMCPU pGVCpu, uint64_t *pcTicks, uint32_t *pcAux);133 NEM_TMPL_STATIC int nemR0WinResumeCpuTickOnAll(PGVM pGVM, PGVMCPU pGVCpu, uint64_t uPausedTscValue);134 #endif135 DECLINLINE(NTSTATUS) nemR0NtPerformIoControl(PGVM pGVM, PGVMCPU pGVCpu, uint32_t uFunction, void *pvInput, uint32_t cbInput,136 void *pvOutput, uint32_t cbOutput);137 138 /* NEMR0NativeA-win.asm: */139 DECLASM(NTSTATUS) nemR0VidSysWinHvGetPartitionProperty(uintptr_t idPartition, HV_PARTITION_PROPERTY_CODE enmProperty,140 PHV_PARTITION_PROPERTY puValue);141 DECLASM(NTSTATUS) nemR0WinHvrWinHvGetPartitionProperty(uintptr_t idPartition, HV_PARTITION_PROPERTY_CODE enmProperty,142 PHV_PARTITION_PROPERTY puValue);143 144 145 68 /* 146 69 * Instantate the code we share with ring-0. 147 70 */ 148 #ifdef NEM_WIN_WITH_RING0_RUNLOOP 149 # define NEM_WIN_TEMPLATE_MODE_OWN_RUN_API 150 #else 151 # undef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API 152 #endif 71 #undef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API 153 72 #include "../VMMAll/NEMAllNativeTemplate-win.cpp.h" 154 73 … … 159 78 VMMR0_INT_DECL(int) NEMR0Init(void) 160 79 { 161 #ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES162 return RTCritSectInit(&g_VidSysCritSect);163 #else164 80 return VINF_SUCCESS; 165 #endif166 81 } 167 82 … … 172 87 VMMR0_INT_DECL(void) NEMR0Term(void) 173 88 { 174 #ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES 175 RTCritSectDelete(&g_VidSysCritSect); 176 #endif 177 } 178 179 #ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES 180 181 /** 182 * Worker for NEMR0InitVM that allocates a hypercall page. 183 * 184 * @returns VBox status code. 185 * @param pHypercallData The hypercall data page to initialize. 186 */ 187 static int nemR0InitHypercallData(PNEMR0HYPERCALLDATA pHypercallData) 188 { 189 int rc = RTR0MemObjAllocPage(&pHypercallData->hMemObj, PAGE_SIZE, false /*fExecutable*/); 190 if (RT_SUCCESS(rc)) 191 { 192 pHypercallData->HCPhysPage = RTR0MemObjGetPagePhysAddr(pHypercallData->hMemObj, 0 /*iPage*/); 193 AssertStmt(pHypercallData->HCPhysPage != NIL_RTHCPHYS, rc = VERR_INTERNAL_ERROR_3); 194 pHypercallData->pbPage = (uint8_t *)RTR0MemObjAddress(pHypercallData->hMemObj); 195 AssertStmt(pHypercallData->pbPage, rc = VERR_INTERNAL_ERROR_3); 196 if (RT_SUCCESS(rc)) 197 return VINF_SUCCESS; 198 199 /* bail out */ 200 RTR0MemObjFree(pHypercallData->hMemObj, true /*fFreeMappings*/); 201 } 202 pHypercallData->hMemObj = NIL_RTR0MEMOBJ; 203 pHypercallData->HCPhysPage = NIL_RTHCPHYS; 204 pHypercallData->pbPage = NULL; 205 return rc; 206 } 207 208 209 /** 210 * Worker for NEMR0CleanupVM and NEMR0InitVM that cleans up a hypercall page. 211 * 212 * @param pHypercallData The hypercall data page to uninitialize. 213 */ 214 static void nemR0DeleteHypercallData(PNEMR0HYPERCALLDATA pHypercallData) 215 { 216 /* Check pbPage here since it's NULL, whereas the hMemObj can be either 217 NIL_RTR0MEMOBJ or 0 (they aren't necessarily the same). */ 218 if (pHypercallData->pbPage != NULL) 219 { 220 RTR0MemObjFree(pHypercallData->hMemObj, true /*fFreeMappings*/); 221 pHypercallData->pbPage = NULL; 222 } 223 pHypercallData->hMemObj = NIL_RTR0MEMOBJ; 224 pHypercallData->HCPhysPage = NIL_RTHCPHYS; 225 } 226 227 228 static int nemR0StrICmp(const char *psz1, const char *psz2) 229 { 230 for (;;) 231 { 232 char ch1 = *psz1++; 233 char ch2 = *psz2++; 234 if ( ch1 != ch2 235 && RT_C_TO_LOWER(ch1) != RT_C_TO_LOWER(ch2)) 236 return ch1 - ch2; 237 if (!ch1) 238 return 0; 239 } 240 } 241 242 243 /** 244 * Worker for nemR0PrepareForVidSysIntercept(). 245 */ 246 static void nemR0PrepareForVidSysInterceptInner(void) 247 { 248 uint32_t const cbImage = g_cbVidSys; 249 uint8_t * const pbImage = g_pbVidSys; 250 PIMAGE_NT_HEADERS const pNtHdrs = g_pVidSysHdrs; 251 uintptr_t const offEndNtHdrs = (uintptr_t)(pNtHdrs + 1) - (uintptr_t)pbImage; 252 253 # define CHECK_LOG_RET(a_Expr, a_LogRel) do { \ 254 if (RT_LIKELY(a_Expr)) { /* likely */ } \ 255 else \ 256 { \ 257 LogRel(a_LogRel); \ 258 return; \ 259 } \ 260 } while (0) 261 262 //__try 263 { 264 /* 265 * Get and validate the import directory entry. 266 */ 267 CHECK_LOG_RET( pNtHdrs->OptionalHeader.NumberOfRvaAndSizes > IMAGE_DIRECTORY_ENTRY_IMPORT 268 || pNtHdrs->OptionalHeader.NumberOfRvaAndSizes <= IMAGE_NUMBEROF_DIRECTORY_ENTRIES * 4, 269 ("NEMR0: vid.sys: NumberOfRvaAndSizes is out of range: %#x\n", pNtHdrs->OptionalHeader.NumberOfRvaAndSizes)); 270 271 IMAGE_DATA_DIRECTORY const ImportDir = pNtHdrs->OptionalHeader.DataDirectory[IMAGE_DIRECTORY_ENTRY_IMPORT]; 272 CHECK_LOG_RET( ImportDir.Size >= sizeof(IMAGE_IMPORT_DESCRIPTOR) 273 && ImportDir.VirtualAddress >= offEndNtHdrs /* ASSUMES NT headers before imports */ 274 && (uint64_t)ImportDir.VirtualAddress + ImportDir.Size <= cbImage, 275 ("NEMR0: vid.sys: Bad import directory entry: %#x LB %#x (cbImage=%#x, offEndNtHdrs=%#zx)\n", 276 ImportDir.VirtualAddress, ImportDir.Size, cbImage, offEndNtHdrs)); 277 278 /* 279 * Walk the import descriptor table looking for NTDLL.DLL. 280 */ 281 for (PIMAGE_IMPORT_DESCRIPTOR pImps = (PIMAGE_IMPORT_DESCRIPTOR)&pbImage[ImportDir.VirtualAddress]; 282 pImps->Name != 0 && pImps->FirstThunk != 0; 283 pImps++) 284 { 285 CHECK_LOG_RET(pImps->Name < cbImage, ("NEMR0: vid.sys: Bad import directory entry name: %#x", pImps->Name)); 286 const char *pszModName = (const char *)&pbImage[pImps->Name]; 287 if (nemR0StrICmp(pszModName, "winhvr.sys")) 288 continue; 289 CHECK_LOG_RET(pImps->FirstThunk < cbImage && pImps->FirstThunk >= offEndNtHdrs, 290 ("NEMR0: vid.sys: Bad FirstThunk: %#x", pImps->FirstThunk)); 291 CHECK_LOG_RET( pImps->u.OriginalFirstThunk == 0 292 || (pImps->u.OriginalFirstThunk >= offEndNtHdrs && pImps->u.OriginalFirstThunk < cbImage), 293 ("NEMR0: vid.sys: Bad OriginalFirstThunk: %#x", pImps->u.OriginalFirstThunk)); 294 295 /* 296 * Walk the thunks table(s) looking for WinHvGetPartitionProperty. 297 */ 298 uintptr_t *puFirstThunk = (uintptr_t *)&pbImage[pImps->FirstThunk]; /* update this. */ 299 if ( pImps->u.OriginalFirstThunk != 0 300 && pImps->u.OriginalFirstThunk != pImps->FirstThunk) 301 { 302 uintptr_t const *puOrgThunk = (uintptr_t const *)&pbImage[pImps->u.OriginalFirstThunk]; /* read from this. */ 303 uintptr_t cLeft = (cbImage - (RT_MAX(pImps->FirstThunk, pImps->u.OriginalFirstThunk))) 304 / sizeof(*puFirstThunk); 305 while (cLeft-- > 0 && *puOrgThunk != 0) 306 { 307 if (!(*puOrgThunk & IMAGE_ORDINAL_FLAG64)) 308 { 309 CHECK_LOG_RET(*puOrgThunk >= offEndNtHdrs && *puOrgThunk < cbImage, 310 ("NEMR0: vid.sys: Bad thunk entry: %#x", *puOrgThunk)); 311 312 const char *pszSymbol = (const char *)&pbImage[*puOrgThunk + 2]; 313 if (strcmp(pszSymbol, "WinHvGetPartitionProperty") == 0) 314 g_ppfnVidSysWinHvGetPartitionProperty = (decltype(WinHvGetPartitionProperty) **)puFirstThunk; 315 } 316 317 puOrgThunk++; 318 puFirstThunk++; 319 } 320 } 321 else 322 { 323 /* No original thunk table, so scan the resolved symbols for a match 324 with the WinHvGetPartitionProperty address. */ 325 uintptr_t const uNeedle = (uintptr_t)g_pfnWinHvGetPartitionProperty; 326 uintptr_t cLeft = (cbImage - pImps->FirstThunk) / sizeof(*puFirstThunk); 327 while (cLeft-- > 0 && *puFirstThunk != 0) 328 { 329 if (*puFirstThunk == uNeedle) 330 g_ppfnVidSysWinHvGetPartitionProperty = (decltype(WinHvGetPartitionProperty) **)puFirstThunk; 331 puFirstThunk++; 332 } 333 } 334 } 335 336 /* Report the findings: */ 337 if (g_ppfnVidSysWinHvGetPartitionProperty) 338 LogRel(("NEMR0: vid.sys: Found WinHvGetPartitionProperty import thunk at %p (value %p vs %p)\n", 339 g_ppfnVidSysWinHvGetPartitionProperty,*g_ppfnVidSysWinHvGetPartitionProperty, g_pfnWinHvGetPartitionProperty)); 340 else 341 LogRel(("NEMR0: vid.sys: Did not find WinHvGetPartitionProperty!\n")); 342 } 343 //__except(EXCEPTION_EXECUTE_HANDLER) 344 //{ 345 // return; 346 //} 347 # undef CHECK_LOG_RET 348 } 349 350 351 /** 352 * Worker for NEMR0InitVM that prepares for intercepting stuff in VID.SYS. 353 */ 354 static void nemR0PrepareForVidSysIntercept(RTDBGKRNLINFO hKrnlInfo) 355 { 356 /* 357 * Resolve the symbols we need first. 358 */ 359 int rc = RTR0DbgKrnlInfoQuerySymbol(hKrnlInfo, "vid.sys", "__ImageBase", (void **)&g_pbVidSys); 360 if (RT_SUCCESS(rc)) 361 { 362 rc = RTR0DbgKrnlInfoQuerySymbol(hKrnlInfo, "vid.sys", "__ImageSize", (void **)&g_cbVidSys); 363 if (RT_SUCCESS(rc)) 364 { 365 rc = RTR0DbgKrnlInfoQuerySymbol(hKrnlInfo, "vid.sys", "__ImageNtHdrs", (void **)&g_pVidSysHdrs); 366 if (RT_SUCCESS(rc)) 367 { 368 rc = RTR0DbgKrnlInfoQuerySymbol(hKrnlInfo, "winhvr.sys", "WinHvGetPartitionProperty", 369 (void **)&g_pfnWinHvGetPartitionProperty); 370 if (RT_SUCCESS(rc)) 371 { 372 /* 373 * Now locate the import thunk entry for WinHvGetPartitionProperty in vid.sys. 374 */ 375 nemR0PrepareForVidSysInterceptInner(); 376 } 377 else 378 LogRel(("NEMR0: Failed to find winhvr.sys!WinHvGetPartitionProperty (%Rrc)\n", rc)); 379 } 380 else 381 LogRel(("NEMR0: Failed to find vid.sys!__ImageNtHdrs (%Rrc)\n", rc)); 382 } 383 else 384 LogRel(("NEMR0: Failed to find vid.sys!__ImageSize (%Rrc)\n", rc)); 385 } 386 else 387 LogRel(("NEMR0: Failed to find vid.sys!__ImageBase (%Rrc)\n", rc)); 388 } 389 390 #endif /* NEM_WIN_USE_HYPERCALLS_FOR_PAGES */ 89 } 391 90 392 91 … … 406 105 AssertRCReturn(rc, rc); 407 106 408 #ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES409 /*410 * We want to perform hypercalls here. The NT kernel started to expose a very low411 * level interface to do this thru somewhere between build 14271 and 16299. Since412 * we need build 17134 to get anywhere at all, the exact build is not relevant here.413 *414 * We also need to deposit memory to the hypervisor for use with partition (page415 * mapping structures, stuff).416 */417 RTDBGKRNLINFO hKrnlInfo;418 rc = RTR0DbgKrnlInfoOpen(&hKrnlInfo, 0);419 if (RT_SUCCESS(rc))420 {421 rc = RTR0DbgKrnlInfoQuerySymbol(hKrnlInfo, NULL, "HvlInvokeHypercall", (void **)&g_pfnHvlInvokeHypercall);422 if (RT_FAILURE(rc))423 rc = VERR_NEM_MISSING_KERNEL_API_1;424 if (RT_SUCCESS(rc))425 {426 rc = RTR0DbgKrnlInfoQuerySymbol(hKrnlInfo, "winhvr.sys", "WinHvDepositMemory", (void **)&g_pfnWinHvDepositMemory);427 if (RT_FAILURE(rc))428 rc = rc == VERR_MODULE_NOT_FOUND ? VERR_NEM_MISSING_KERNEL_API_2 : VERR_NEM_MISSING_KERNEL_API_3;429 }430 431 /*432 * Since late 2021 we may also need to do some nasty trickery with vid.sys to get433 * the partition ID. So, ge the necessary info while we have a hKrnlInfo instance.434 */435 if (RT_SUCCESS(rc))436 nemR0PrepareForVidSysIntercept(hKrnlInfo);437 438 RTR0DbgKrnlInfoRelease(hKrnlInfo);439 if (RT_SUCCESS(rc))440 {441 /*442 * Allocate a page for non-EMT threads to use for hypercalls (update443 * statistics and such) and a critical section protecting it.444 */445 rc = RTCritSectInit(&pGVM->nemr0.s.HypercallDataCritSect);446 if (RT_SUCCESS(rc))447 {448 rc = nemR0InitHypercallData(&pGVM->nemr0.s.HypercallData);449 if (RT_SUCCESS(rc))450 {451 /*452 * Allocate a page for each VCPU to place hypercall data on.453 */454 for (VMCPUID i = 0; i < pGVM->cCpus; i++)455 {456 rc = nemR0InitHypercallData(&pGVM->aCpus[i].nemr0.s.HypercallData);457 if (RT_FAILURE(rc))458 {459 while (i-- > 0)460 nemR0DeleteHypercallData(&pGVM->aCpus[i].nemr0.s.HypercallData);461 break;462 }463 }464 if (RT_SUCCESS(rc))465 {466 /*467 * So far, so good.468 */469 return rc;470 }471 472 /*473 * Bail out.474 */475 nemR0DeleteHypercallData(&pGVM->nemr0.s.HypercallData);476 }477 RTCritSectDelete(&pGVM->nemr0.s.HypercallDataCritSect);478 }479 }480 }481 #endif /* NEM_WIN_USE_HYPERCALLS_FOR_PAGES */482 483 107 return rc; 484 108 } 485 109 486 #ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES487 488 /**489 * Perform an I/O control operation on the partition handle (VID.SYS).490 *491 * @returns NT status code.492 * @param pGVM The ring-0 VM structure.493 * @param pGVCpu The global (ring-0) CPU structure of the calling EMT.494 * @param uFunction The function to perform.495 * @param pvInput The input buffer. This must point within the VM496 * structure so we can easily convert to a ring-3497 * pointer if necessary.498 * @param cbInput The size of the input. @a pvInput must be NULL when499 * zero.500 * @param pvOutput The output buffer. This must also point within the501 * VM structure for ring-3 pointer magic.502 * @param cbOutput The size of the output. @a pvOutput must be NULL503 * when zero.504 * @thread EMT(pGVCpu)505 */506 DECLINLINE(NTSTATUS) nemR0NtPerformIoControl(PGVM pGVM, PGVMCPU pGVCpu, uint32_t uFunction, void *pvInput, uint32_t cbInput,507 void *pvOutput, uint32_t cbOutput)508 {509 # ifdef RT_STRICT510 /*511 * Input and output parameters are part of the VM CPU structure.512 */513 VMCPU_ASSERT_EMT(pGVCpu);514 if (pvInput)515 AssertReturn(((uintptr_t)pvInput + cbInput) - (uintptr_t)pGVCpu <= sizeof(*pGVCpu), VERR_INVALID_PARAMETER);516 if (pvOutput)517 AssertReturn(((uintptr_t)pvOutput + cbOutput) - (uintptr_t)pGVCpu <= sizeof(*pGVCpu), VERR_INVALID_PARAMETER);518 # endif519 520 int32_t rcNt = STATUS_UNSUCCESSFUL;521 int rc = SUPR0IoCtlPerform(pGVM->nemr0.s.pIoCtlCtx, uFunction,522 pvInput,523 pvInput ? (uintptr_t)pvInput + pGVCpu->nemr0.s.offRing3ConversionDelta : NIL_RTR3PTR,524 cbInput,525 pvOutput,526 pvOutput ? (uintptr_t)pvOutput + pGVCpu->nemr0.s.offRing3ConversionDelta : NIL_RTR3PTR,527 cbOutput,528 &rcNt);529 if (RT_SUCCESS(rc) || !NT_SUCCESS((NTSTATUS)rcNt))530 return (NTSTATUS)rcNt;531 return STATUS_UNSUCCESSFUL;532 }533 534 535 /**536 * Here is something that we really do not wish to do, but find us force do to537 * right now as we cannot rewrite the memory management of VBox 6.1 in time for538 * windows 11.539 *540 * @returns VBox status code.541 * @param pGVM The ring-0 VM structure.542 * @param pahMemObjs Array of 6 memory objects that the caller will release.543 * ASSUMES that they are initialized to NIL.544 */545 static int nemR0InitVMPart2DontWannaDoTheseUglyPartitionIdFallbacks(PGVM pGVM, PRTR0MEMOBJ pahMemObjs)546 {547 /*548 * Check preconditions:549 */550 if ( !g_ppfnVidSysWinHvGetPartitionProperty551 || (uintptr_t)g_ppfnVidSysWinHvGetPartitionProperty & (sizeof(uintptr_t) - 1))552 {553 LogRel(("NEMR0: g_ppfnVidSysWinHvGetPartitionProperty is NULL or misaligned (%p), partition ID fallback not possible.\n",554 g_ppfnVidSysWinHvGetPartitionProperty));555 return VERR_NEM_INIT_FAILED;556 }557 if (!g_pfnWinHvGetPartitionProperty)558 {559 LogRel(("NEMR0: g_pfnWinHvGetPartitionProperty is NULL, partition ID fallback not possible.\n"));560 return VERR_NEM_INIT_FAILED;561 }562 if (!pGVM->nem.s.IoCtlGetPartitionProperty.uFunction)563 {564 LogRel(("NEMR0: IoCtlGetPartitionProperty.uFunction is 0, partition ID fallback not possible.\n"));565 return VERR_NEM_INIT_FAILED;566 }567 568 /*569 * Create an alias for the thunk table entry because its very likely to be read-only.570 */571 int rc = RTR0MemObjLockKernel(&pahMemObjs[0], g_ppfnVidSysWinHvGetPartitionProperty, sizeof(uintptr_t), RTMEM_PROT_READ);572 if (RT_FAILURE(rc))573 {574 LogRel(("NEMR0: RTR0MemObjLockKernel failed on VID.SYS thunk table entry: %Rrc\n", rc));575 return rc;576 }577 578 rc = RTR0MemObjEnterPhys(&pahMemObjs[1], RTR0MemObjGetPagePhysAddr(pahMemObjs[0], 0), PAGE_SIZE, RTMEM_CACHE_POLICY_DONT_CARE);579 if (RT_FAILURE(rc))580 {581 LogRel(("NEMR0: RTR0MemObjEnterPhys failed on VID.SYS thunk table entry: %Rrc\n", rc));582 return rc;583 }584 585 rc = RTR0MemObjMapKernel(&pahMemObjs[2], pahMemObjs[1], (void *)-1, 0, RTMEM_PROT_READ | RTMEM_PROT_WRITE);586 if (RT_FAILURE(rc))587 {588 LogRel(("NEMR0: RTR0MemObjMapKernel failed on VID.SYS thunk table entry: %Rrc\n", rc));589 return rc;590 }591 592 decltype(WinHvGetPartitionProperty) **ppfnThunkAlias593 = (decltype(WinHvGetPartitionProperty) **)( (uintptr_t)RTR0MemObjAddress(pahMemObjs[2])594 | ((uintptr_t)g_ppfnVidSysWinHvGetPartitionProperty & PAGE_OFFSET_MASK));595 LogRel(("NEMR0: ppfnThunkAlias=%p *ppfnThunkAlias=%p; original: %p & %p, phys %RHp\n", ppfnThunkAlias, *ppfnThunkAlias,596 g_ppfnVidSysWinHvGetPartitionProperty, *g_ppfnVidSysWinHvGetPartitionProperty,597 RTR0MemObjGetPagePhysAddr(pahMemObjs[0], 0) ));598 599 /*600 * Create an alias for the target code in WinHvr.sys as there is a very decent601 * chance we have to patch it.602 */603 rc = RTR0MemObjLockKernel(&pahMemObjs[3], g_pfnWinHvGetPartitionProperty, sizeof(uintptr_t), RTMEM_PROT_READ);604 if (RT_FAILURE(rc))605 {606 LogRel(("NEMR0: RTR0MemObjLockKernel failed on WinHvGetPartitionProperty (%p): %Rrc\n", g_pfnWinHvGetPartitionProperty, rc));607 return rc;608 }609 610 rc = RTR0MemObjEnterPhys(&pahMemObjs[4], RTR0MemObjGetPagePhysAddr(pahMemObjs[3], 0), PAGE_SIZE, RTMEM_CACHE_POLICY_DONT_CARE);611 if (RT_FAILURE(rc))612 {613 LogRel(("NEMR0: RTR0MemObjEnterPhys failed on WinHvGetPartitionProperty: %Rrc\n", rc));614 return rc;615 }616 617 rc = RTR0MemObjMapKernel(&pahMemObjs[5], pahMemObjs[4], (void *)-1, 0, RTMEM_PROT_READ | RTMEM_PROT_WRITE);618 if (RT_FAILURE(rc))619 {620 LogRel(("NEMR0: RTR0MemObjMapKernel failed on WinHvGetPartitionProperty: %Rrc\n", rc));621 return rc;622 }623 624 uint8_t *pbTargetAlias = (uint8_t *)( (uintptr_t)RTR0MemObjAddress(pahMemObjs[5])625 | ((uintptr_t)g_pfnWinHvGetPartitionProperty & PAGE_OFFSET_MASK));626 LogRel(("NEMR0: pbTargetAlias=%p %.16Rhxs; original: %p %.16Rhxs, phys %RHp\n", pbTargetAlias, pbTargetAlias,627 g_pfnWinHvGetPartitionProperty, g_pfnWinHvGetPartitionProperty, RTR0MemObjGetPagePhysAddr(pahMemObjs[3], 0) ));628 629 /*630 * Analyse the target functions prologue to figure out how much we should copy631 * when patching it. We repeat this every time because we don't want to get632 * tripped up by someone else doing the same stuff as we're doing here.633 * We need at least 12 bytes for the patch sequence (MOV RAX, QWORD; JMP RAX)634 */635 union636 {637 uint8_t ab[48]; /**< Must be equal or smallar than g_abNemR0WinHvrWinHvGetPartitionProperty_OriginalProlog */638 int64_t ai64[6];639 } Org;640 memcpy(Org.ab, g_pfnWinHvGetPartitionProperty, sizeof(Org)); /** @todo ASSUMES 48 valid bytes start at function... */641 642 uint32_t offJmpBack = 0;643 uint32_t const cbMinJmpPatch = 12;644 DISSTATE Dis;645 while (offJmpBack < cbMinJmpPatch && offJmpBack < sizeof(Org) - 16)646 {647 uint32_t cbInstr = 1;648 rc = DISInstr(&Org.ab[offJmpBack], DISCPUMODE_64BIT, &Dis, &cbInstr);649 if (RT_FAILURE(rc))650 {651 LogRel(("NEMR0: DISInstr failed %#x bytes into WinHvGetPartitionProperty: %Rrc (%.48Rhxs)\n",652 offJmpBack, rc, Org.ab));653 break;654 }655 if (Dis.pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW)656 {657 LogRel(("NEMR0: Control flow instruction %#x bytes into WinHvGetPartitionProperty prologue: %.48Rhxs\n",658 offJmpBack, Org.ab));659 break;660 }661 if (Dis.ModRM.Bits.Mod == 0 && Dis.ModRM.Bits.Rm == 5 /* wrt RIP */)662 {663 LogRel(("NEMR0: RIP relative addressing %#x bytes into WinHvGetPartitionProperty prologue: %.48Rhxs\n",664 offJmpBack, Org.ab));665 break;666 }667 offJmpBack += cbInstr;668 }669 670 uintptr_t const cbLeftInPage = PAGE_SIZE - ((uintptr_t)g_pfnWinHvGetPartitionProperty & PAGE_OFFSET_MASK);671 if (cbLeftInPage < 16 && offJmpBack >= cbMinJmpPatch)672 {673 LogRel(("NEMR0: WinHvGetPartitionProperty patching not possible do the page crossing: %p (%#zx)\n",674 g_pfnWinHvGetPartitionProperty, cbLeftInPage));675 offJmpBack = 0;676 }677 if (offJmpBack >= cbMinJmpPatch)678 LogRel(("NEMR0: offJmpBack=%#x for WinHvGetPartitionProperty (%p: %.48Rhxs)\n",679 offJmpBack, g_pfnWinHvGetPartitionProperty, Org.ab));680 else681 offJmpBack = 0;682 rc = VINF_SUCCESS;683 684 /*685 * Now enter serialization lock and get on with it...686 */687 PVMCPUCC const pVCpu0 = &pGVM->aCpus[0];688 NTSTATUS rcNt;689 RTCritSectEnter(&g_VidSysCritSect);690 691 /*692 * First attempt, patching the import table entry.693 */694 g_idVidSysFoundPartition = HV_PARTITION_ID_INVALID;695 g_hVidSysMatchThread = RTThreadNativeSelf();696 g_enmVidSysMatchProperty = pVCpu0->nem.s.uIoCtlBuf.GetProp.enmProperty = HvPartitionPropertyProcessorVendor;697 pVCpu0->nem.s.uIoCtlBuf.GetProp.uValue = 0;698 699 void *pvOld = NULL;700 if (ASMAtomicCmpXchgExPtr(ppfnThunkAlias, (void *)(uintptr_t)nemR0VidSysWinHvGetPartitionProperty,701 (void *)(uintptr_t)g_pfnWinHvGetPartitionProperty, &pvOld))702 {703 LogRel(("NEMR0: after switch to %p: ppfnThunkAlias=%p *ppfnThunkAlias=%p; original: %p & %p\n",704 nemR0VidSysWinHvGetPartitionProperty, ppfnThunkAlias, *ppfnThunkAlias,705 g_ppfnVidSysWinHvGetPartitionProperty, *g_ppfnVidSysWinHvGetPartitionProperty));706 707 rcNt = nemR0NtPerformIoControl(pGVM, pVCpu0, pGVM->nemr0.s.IoCtlGetPartitionProperty.uFunction,708 &pVCpu0->nem.s.uIoCtlBuf.GetProp.enmProperty,709 sizeof(pVCpu0->nem.s.uIoCtlBuf.GetProp.enmProperty),710 &pVCpu0->nem.s.uIoCtlBuf.GetProp.uValue,711 sizeof(pVCpu0->nem.s.uIoCtlBuf.GetProp.uValue));712 ASMAtomicWritePtr(ppfnThunkAlias, (void *)(uintptr_t)g_pfnWinHvGetPartitionProperty);713 HV_PARTITION_ID idHvPartition = g_idVidSysFoundPartition;714 715 LogRel(("NEMR0: WinHvGetPartitionProperty trick #1 yielded: rcNt=%#x idHvPartition=%#RX64 uValue=%#RX64\n",716 rcNt, idHvPartition, pVCpu0->nem.s.uIoCtlBuf.GetProp.uValue));717 pGVM->nemr0.s.idHvPartition = idHvPartition;718 }719 else720 {721 LogRel(("NEMR0: Unexpected WinHvGetPartitionProperty pointer in VID.SYS: %p, expected %p\n",722 pvOld, g_pfnWinHvGetPartitionProperty));723 rc = VERR_NEM_INIT_FAILED;724 }725 726 /*727 * If that didn't succeed, try patching the winhvr.sys code.728 */729 if ( pGVM->nemr0.s.idHvPartition == HV_PARTITION_ID_INVALID730 && offJmpBack >= cbMinJmpPatch)731 {732 g_idVidSysFoundPartition = HV_PARTITION_ID_INVALID;733 g_hVidSysMatchThread = RTThreadNativeSelf();734 g_enmVidSysMatchProperty = pVCpu0->nem.s.uIoCtlBuf.GetProp.enmProperty = HvPartitionPropertyProcessorVendor;735 pVCpu0->nem.s.uIoCtlBuf.GetProp.uValue = 0;736 737 /*738 * Prepare the hook area.739 */740 uint8_t *pbDst = g_abNemR0WinHvrWinHvGetPartitionProperty_OriginalProlog;741 memcpy(pbDst, (uint8_t const *)(uintptr_t)g_pfnWinHvGetPartitionProperty, offJmpBack);742 pbDst += offJmpBack;743 744 *pbDst++ = 0x48; /* mov rax, imm64 */745 *pbDst++ = 0xb8;746 *(uint64_t *)pbDst = (uintptr_t)g_pfnWinHvGetPartitionProperty + offJmpBack;747 pbDst += sizeof(uint64_t);748 *pbDst++ = 0xff; /* jmp rax */749 *pbDst++ = 0xe0;750 *pbDst++ = 0xcc; /* int3 */751 752 /*753 * Patch the original. We use cmpxchg16b here to avoid concurrency problems754 * (this also makes sure we don't trample over someone else doing similar755 * patching at the same time).756 */757 union758 {759 uint8_t ab[16];760 uint64_t au64[2];761 } Patch;762 memcpy(Patch.ab, Org.ab, sizeof(Patch));763 pbDst = Patch.ab;764 *pbDst++ = 0x48; /* mov rax, imm64 */765 *pbDst++ = 0xb8;766 *(uint64_t *)pbDst = (uintptr_t)nemR0WinHvrWinHvGetPartitionProperty;767 pbDst += sizeof(uint64_t);768 *pbDst++ = 0xff; /* jmp rax */769 *pbDst++ = 0xe0;770 771 int64_t ai64CmpCopy[2] = { Org.ai64[0], Org.ai64[1] }; /* paranoia */772 if (_InterlockedCompareExchange128((__int64 volatile *)pbTargetAlias, Patch.au64[1], Patch.au64[0], ai64CmpCopy) != 0)773 {774 rcNt = nemR0NtPerformIoControl(pGVM, pVCpu0, pGVM->nemr0.s.IoCtlGetPartitionProperty.uFunction,775 &pVCpu0->nem.s.uIoCtlBuf.GetProp.enmProperty,776 sizeof(pVCpu0->nem.s.uIoCtlBuf.GetProp.enmProperty),777 &pVCpu0->nem.s.uIoCtlBuf.GetProp.uValue,778 sizeof(pVCpu0->nem.s.uIoCtlBuf.GetProp.uValue));779 780 for (uint32_t cFailures = 0; cFailures < 10; cFailures++)781 {782 ai64CmpCopy[0] = Patch.au64[0]; /* paranoia */783 ai64CmpCopy[1] = Patch.au64[1];784 if (_InterlockedCompareExchange128((__int64 volatile *)pbTargetAlias, Org.ai64[1], Org.ai64[0], ai64CmpCopy) != 0)785 {786 if (cFailures > 0)787 LogRel(("NEMR0: Succeeded on try #%u.\n", cFailures));788 break;789 }790 LogRel(("NEMR0: Patch restore failure #%u: %.16Rhxs, expected %.16Rhxs\n",791 cFailures + 1, &ai64CmpCopy[0], &Patch.au64[0]));792 RTThreadSleep(1000);793 }794 795 HV_PARTITION_ID idHvPartition = g_idVidSysFoundPartition;796 LogRel(("NEMR0: WinHvGetPartitionProperty trick #2 yielded: rcNt=%#x idHvPartition=%#RX64 uValue=%#RX64\n",797 rcNt, idHvPartition, pVCpu0->nem.s.uIoCtlBuf.GetProp.uValue));798 pGVM->nemr0.s.idHvPartition = idHvPartition;799 800 }801 else802 {803 LogRel(("NEMR0: Failed to install WinHvGetPartitionProperty patch: %.16Rhxs, expected %.16Rhxs\n",804 &ai64CmpCopy[0], &Org.ai64[0]));805 rc = VERR_NEM_INIT_FAILED;806 }807 }808 809 RTCritSectLeave(&g_VidSysCritSect);810 811 return rc;812 }813 814 #endif /* NEM_WIN_USE_HYPERCALLS_FOR_PAGES */815 110 816 111 /** … … 825 120 int rc = GVMMR0ValidateGVMandEMT(pGVM, 0); 826 121 AssertRCReturn(rc, rc); 827 SUPR0Printf("NEMR0InitVMPart2\n"); LogRel(("2: NEMR0InitVMPart2\n"));828 #ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES829 # ifdef NEM_WIN_WITH_RING0_RUNLOOP830 Assert(pGVM->nemr0.s.fMayUseRing0Runloop == false);831 # endif832 833 /*834 * Copy and validate the I/O control information from ring-3.835 */836 NEMWINIOCTL Copy = pGVM->nem.s.IoCtlGetHvPartitionId;837 AssertLogRelReturn(Copy.uFunction != 0, VERR_NEM_INIT_FAILED);838 AssertLogRelReturn(Copy.cbInput == 0, VERR_NEM_INIT_FAILED);839 AssertLogRelReturn(Copy.cbOutput == sizeof(HV_PARTITION_ID), VERR_NEM_INIT_FAILED);840 pGVM->nemr0.s.IoCtlGetHvPartitionId = Copy;841 842 Copy = pGVM->nem.s.IoCtlGetPartitionProperty;843 AssertLogRelReturn(Copy.uFunction != 0, VERR_NEM_INIT_FAILED);844 AssertLogRelReturn(Copy.cbInput == sizeof(VID_PARTITION_PROPERTY_CODE), VERR_NEM_INIT_FAILED);845 AssertLogRelReturn(Copy.cbOutput == sizeof(HV_PARTITION_PROPERTY), VERR_NEM_INIT_FAILED);846 pGVM->nemr0.s.IoCtlGetPartitionProperty = Copy;847 848 # ifdef NEM_WIN_WITH_RING0_RUNLOOP849 pGVM->nemr0.s.fMayUseRing0Runloop = pGVM->nem.s.fUseRing0Runloop;850 851 Copy = pGVM->nem.s.IoCtlStartVirtualProcessor;852 AssertLogRelStmt(Copy.uFunction != 0, rc = VERR_NEM_INIT_FAILED);853 AssertLogRelStmt(Copy.cbInput == sizeof(HV_VP_INDEX), rc = VERR_NEM_INIT_FAILED);854 AssertLogRelStmt(Copy.cbOutput == 0, rc = VERR_NEM_INIT_FAILED);855 AssertLogRelStmt(Copy.uFunction != pGVM->nemr0.s.IoCtlGetHvPartitionId.uFunction, rc = VERR_NEM_INIT_FAILED);856 if (RT_SUCCESS(rc))857 pGVM->nemr0.s.IoCtlStartVirtualProcessor = Copy;858 859 Copy = pGVM->nem.s.IoCtlStopVirtualProcessor;860 AssertLogRelStmt(Copy.uFunction != 0, rc = VERR_NEM_INIT_FAILED);861 AssertLogRelStmt(Copy.cbInput == sizeof(HV_VP_INDEX), rc = VERR_NEM_INIT_FAILED);862 AssertLogRelStmt(Copy.cbOutput == 0, rc = VERR_NEM_INIT_FAILED);863 AssertLogRelStmt(Copy.uFunction != pGVM->nemr0.s.IoCtlGetHvPartitionId.uFunction, rc = VERR_NEM_INIT_FAILED);864 AssertLogRelStmt(Copy.uFunction != pGVM->nemr0.s.IoCtlStartVirtualProcessor.uFunction, rc = VERR_NEM_INIT_FAILED);865 if (RT_SUCCESS(rc))866 pGVM->nemr0.s.IoCtlStopVirtualProcessor = Copy;867 868 Copy = pGVM->nem.s.IoCtlMessageSlotHandleAndGetNext;869 AssertLogRelStmt(Copy.uFunction != 0, rc = VERR_NEM_INIT_FAILED);870 AssertLogRelStmt( Copy.cbInput == sizeof(VID_IOCTL_INPUT_MESSAGE_SLOT_HANDLE_AND_GET_NEXT)871 || Copy.cbInput == RT_OFFSETOF(VID_IOCTL_INPUT_MESSAGE_SLOT_HANDLE_AND_GET_NEXT, cMillies),872 rc = VERR_NEM_INIT_FAILED);873 AssertLogRelStmt(Copy.cbOutput == 0, VERR_NEM_INIT_FAILED);874 AssertLogRelStmt(Copy.uFunction != pGVM->nemr0.s.IoCtlGetHvPartitionId.uFunction, rc = VERR_NEM_INIT_FAILED);875 AssertLogRelStmt(Copy.uFunction != pGVM->nemr0.s.IoCtlStartVirtualProcessor.uFunction, rc = VERR_NEM_INIT_FAILED);876 AssertLogRelStmt(Copy.uFunction != pGVM->nemr0.s.IoCtlStopVirtualProcessor.uFunction, rc = VERR_NEM_INIT_FAILED);877 if (RT_SUCCESS(rc))878 pGVM->nemr0.s.IoCtlMessageSlotHandleAndGetNext = Copy;879 # endif880 881 if ( RT_SUCCESS(rc)882 || !pGVM->nem.s.fUseRing0Runloop)883 {884 /*885 * Setup of an I/O control context for the partition handle for later use.886 */887 rc = SUPR0IoCtlSetupForHandle(pGVM->pSession, pGVM->nem.s.hPartitionDevice, 0, &pGVM->nemr0.s.pIoCtlCtx);888 AssertLogRelRCReturn(rc, rc);889 for (VMCPUID idCpu = 0; idCpu < pGVM->cCpus; idCpu++)890 {891 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];892 pGVCpu->nemr0.s.offRing3ConversionDelta = (uintptr_t)pGVM->aCpus[idCpu].pVCpuR3 - (uintptr_t)pGVCpu;893 }894 895 /*896 * Get the partition ID.897 */898 PVMCPUCC pVCpu0 = &pGVM->aCpus[0];899 NTSTATUS rcNt = nemR0NtPerformIoControl(pGVM, pVCpu0, pGVM->nemr0.s.IoCtlGetHvPartitionId.uFunction, NULL, 0,900 &pVCpu0->nem.s.uIoCtlBuf.idPartition, sizeof(pVCpu0->nem.s.uIoCtlBuf.idPartition));901 # if 0902 AssertLogRelMsgReturn(NT_SUCCESS(rcNt), ("IoCtlGetHvPartitionId failed: %#x\n", rcNt), VERR_NEM_INIT_FAILED);903 pGVM->nemr0.s.idHvPartition = pVCpu0->nem.s.uIoCtlBuf.idPartition;904 # else905 /*906 * Since 2021 (Win11) the above I/O control doesn't work on exo-partitions907 * so we have to go to extremes to get at it. Sigh.908 */909 if ( !NT_SUCCESS(rcNt)910 || pVCpu0->nem.s.uIoCtlBuf.idPartition == HV_PARTITION_ID_INVALID)911 {912 LogRel(("IoCtlGetHvPartitionId failed: r0=%#RX64, r3=%#RX64, rcNt=%#x\n",913 pGVM->nemr0.s.idHvPartition, pGVM->nem.s.idHvPartition, rcNt));914 915 RTR0MEMOBJ ahMemObjs[6]916 = { NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ };917 rc = nemR0InitVMPart2DontWannaDoTheseUglyPartitionIdFallbacks(pGVM, ahMemObjs);918 size_t i = RT_ELEMENTS(ahMemObjs);919 while (i-- > 0)920 RTR0MemObjFree(ahMemObjs[i], false /*fFreeMappings*/);921 }922 else923 pGVM->nemr0.s.idHvPartition = pVCpu0->nem.s.uIoCtlBuf.idPartition;924 925 if (pGVM->nem.s.idHvPartition == HV_PARTITION_ID_INVALID)926 pGVM->nem.s.idHvPartition = pGVM->nemr0.s.idHvPartition;927 # endif928 AssertLogRelMsgReturn(pGVM->nemr0.s.idHvPartition == pGVM->nem.s.idHvPartition,929 ("idHvPartition mismatch: r0=%#RX64, r3=%#RX64\n", pGVM->nemr0.s.idHvPartition, pGVM->nem.s.idHvPartition),930 VERR_NEM_INIT_FAILED);931 if (RT_SUCCESS(rc) && pGVM->nemr0.s.idHvPartition == HV_PARTITION_ID_INVALID)932 rc = VERR_NEM_INIT_FAILED;933 }934 #endif /* NEM_WIN_USE_HYPERCALLS_FOR_PAGES */935 122 936 123 return rc; … … 948 135 VMMR0_INT_DECL(void) NEMR0CleanupVM(PGVM pGVM) 949 136 { 950 #ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES951 pGVM->nemr0.s.idHvPartition = HV_PARTITION_ID_INVALID;952 953 /* Clean up I/O control context. */954 if (pGVM->nemr0.s.pIoCtlCtx)955 {956 int rc = SUPR0IoCtlCleanup(pGVM->nemr0.s.pIoCtlCtx);957 AssertRC(rc);958 pGVM->nemr0.s.pIoCtlCtx = NULL;959 }960 961 /* Free the hypercall pages. */962 VMCPUID i = pGVM->cCpus;963 while (i-- > 0)964 nemR0DeleteHypercallData(&pGVM->aCpus[i].nemr0.s.HypercallData);965 966 /* The non-EMT one too. */967 if (RTCritSectIsInitialized(&pGVM->nemr0.s.HypercallDataCritSect))968 RTCritSectDelete(&pGVM->nemr0.s.HypercallDataCritSect);969 nemR0DeleteHypercallData(&pGVM->nemr0.s.HypercallData);970 #else971 137 RT_NOREF(pGVM); 972 #endif 973 } 974 975 976 #if 0 /* for debugging GPA unmapping. */ 977 static int nemR3WinDummyReadGpa(PGVM pGVM, PGVMCPU pGVCpu, RTGCPHYS GCPhys) 978 { 979 PHV_INPUT_READ_GPA pIn = (PHV_INPUT_READ_GPA)pGVCpu->nemr0.s.pbHypercallData; 980 PHV_OUTPUT_READ_GPA pOut = (PHV_OUTPUT_READ_GPA)(pIn + 1); 981 pIn->PartitionId = pGVM->nemr0.s.idHvPartition; 982 pIn->VpIndex = pGVCpu->idCpu; 983 pIn->ByteCount = 0x10; 984 pIn->BaseGpa = GCPhys; 985 pIn->ControlFlags.AsUINT64 = 0; 986 pIn->ControlFlags.CacheType = HvCacheTypeX64WriteCombining; 987 memset(pOut, 0xfe, sizeof(*pOut)); 988 uint64_t volatile uResult = g_pfnHvlInvokeHypercall(HvCallReadGpa, pGVCpu->nemr0.s.HCPhysHypercallData, 989 pGVCpu->nemr0.s.HCPhysHypercallData + sizeof(*pIn)); 990 LogRel(("nemR3WinDummyReadGpa: %RGp -> %#RX64; code=%u rsvd=%u abData=%.16Rhxs\n", 991 GCPhys, uResult, pOut->AccessResult.ResultCode, pOut->AccessResult.Reserved, pOut->Data)); 992 __debugbreak(); 993 994 return uResult != 0 ? VERR_READ_ERROR : VINF_SUCCESS; 995 } 996 #endif 997 998 999 #ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES 1000 /** 1001 * Worker for NEMR0MapPages and others. 1002 */ 1003 NEM_TMPL_STATIC int nemR0WinMapPages(PGVM pGVM, PGVMCPU pGVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst, 1004 uint32_t cPages, uint32_t fFlags) 1005 { 1006 /* 1007 * Validate. 1008 */ 1009 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API_1); 1010 1011 AssertReturn(cPages > 0, VERR_OUT_OF_RANGE); 1012 AssertReturn(cPages <= NEM_MAX_MAP_PAGES, VERR_OUT_OF_RANGE); 1013 AssertReturn(!(fFlags & ~(HV_MAP_GPA_MAYBE_ACCESS_MASK & ~HV_MAP_GPA_DUNNO_ACCESS)), VERR_INVALID_FLAGS); 1014 AssertMsgReturn(!(GCPhysDst & X86_PAGE_OFFSET_MASK), ("GCPhysDst=%RGp\n", GCPhysDst), VERR_OUT_OF_RANGE); 1015 AssertReturn(GCPhysDst < _1E, VERR_OUT_OF_RANGE); 1016 if (GCPhysSrc != GCPhysDst) 1017 { 1018 AssertMsgReturn(!(GCPhysSrc & X86_PAGE_OFFSET_MASK), ("GCPhysSrc=%RGp\n", GCPhysSrc), VERR_OUT_OF_RANGE); 1019 AssertReturn(GCPhysSrc < _1E, VERR_OUT_OF_RANGE); 1020 } 1021 1022 /* 1023 * Compose and make the hypercall. 1024 * Ring-3 is not allowed to fill in the host physical addresses of the call. 1025 */ 1026 for (uint32_t iTries = 0;; iTries++) 1027 { 1028 RTGCPHYS GCPhysSrcTmp = GCPhysSrc; 1029 HV_INPUT_MAP_GPA_PAGES *pMapPages = (HV_INPUT_MAP_GPA_PAGES *)pGVCpu->nemr0.s.HypercallData.pbPage; 1030 AssertPtrReturn(pMapPages, VERR_INTERNAL_ERROR_3); 1031 pMapPages->TargetPartitionId = pGVM->nemr0.s.idHvPartition; 1032 pMapPages->TargetGpaBase = GCPhysDst >> X86_PAGE_SHIFT; 1033 pMapPages->MapFlags = fFlags; 1034 pMapPages->u32ExplicitPadding = 0; 1035 1036 for (uint32_t iPage = 0; iPage < cPages; iPage++, GCPhysSrcTmp += X86_PAGE_SIZE) 1037 { 1038 RTHCPHYS HCPhys = NIL_RTGCPHYS; 1039 int rc = PGMPhysGCPhys2HCPhys(pGVM, GCPhysSrcTmp, &HCPhys); 1040 AssertRCReturn(rc, rc); 1041 pMapPages->PageList[iPage] = HCPhys >> X86_PAGE_SHIFT; 1042 } 1043 1044 uint64_t uResult = g_pfnHvlInvokeHypercall(HvCallMapGpaPages | ((uint64_t)cPages << 32), 1045 pGVCpu->nemr0.s.HypercallData.HCPhysPage, 0); 1046 Log6(("NEMR0MapPages: %RGp/%RGp L %u prot %#x -> %#RX64\n", 1047 GCPhysDst, GCPhysSrcTmp - cPages * X86_PAGE_SIZE, cPages, fFlags, uResult)); 1048 if (uResult == ((uint64_t)cPages << 32)) 1049 return VINF_SUCCESS; 1050 1051 /* 1052 * If the partition is out of memory, try donate another 512 pages to 1053 * it (2MB). VID.SYS does multiples of 512 pages, nothing smaller. 1054 */ 1055 if ( uResult != HV_STATUS_INSUFFICIENT_MEMORY 1056 || iTries > 16 1057 || g_pfnWinHvDepositMemory == NULL) 1058 { 1059 LogRel(("g_pfnHvlInvokeHypercall/MapGpaPages -> %#RX64\n", uResult)); 1060 return VERR_NEM_MAP_PAGES_FAILED; 1061 } 1062 1063 size_t cPagesAdded = 0; 1064 NTSTATUS rcNt = g_pfnWinHvDepositMemory(pGVM->nemr0.s.idHvPartition, 512, 0, &cPagesAdded); 1065 if (!cPagesAdded) 1066 { 1067 LogRel(("g_pfnWinHvDepositMemory -> %#x / %#RX64\n", rcNt, uResult)); 1068 return VERR_NEM_MAP_PAGES_FAILED; 1069 } 1070 } 1071 } 1072 #endif /* NEM_WIN_USE_HYPERCALLS_FOR_PAGES */ 138 } 1073 139 1074 140 … … 1087 153 VMMR0_INT_DECL(int) NEMR0MapPages(PGVM pGVM, VMCPUID idCpu) 1088 154 { 1089 #ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES1090 /*1091 * Unpack the call.1092 */1093 int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu);1094 if (RT_SUCCESS(rc))1095 {1096 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];1097 1098 RTGCPHYS const GCPhysSrc = pGVCpu->nem.s.Hypercall.MapPages.GCPhysSrc;1099 RTGCPHYS const GCPhysDst = pGVCpu->nem.s.Hypercall.MapPages.GCPhysDst;1100 uint32_t const cPages = pGVCpu->nem.s.Hypercall.MapPages.cPages;1101 HV_MAP_GPA_FLAGS const fFlags = pGVCpu->nem.s.Hypercall.MapPages.fFlags;1102 1103 /*1104 * Do the work.1105 */1106 rc = nemR0WinMapPages(pGVM, pGVCpu, GCPhysSrc, GCPhysDst, cPages, fFlags);1107 }1108 return rc;1109 #else1110 155 RT_NOREF(pGVM, idCpu); 1111 156 return VERR_NOT_IMPLEMENTED; 1112 #endif 1113 } 1114 1115 1116 #ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES 1117 /** 1118 * Worker for NEMR0UnmapPages and others. 1119 */ 1120 NEM_TMPL_STATIC int nemR0WinUnmapPages(PGVM pGVM, PGVMCPU pGVCpu, RTGCPHYS GCPhys, uint32_t cPages) 1121 { 1122 /* 1123 * Validate input. 1124 */ 1125 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API_1); 1126 1127 AssertReturn(cPages > 0, VERR_OUT_OF_RANGE); 1128 AssertReturn(cPages <= NEM_MAX_UNMAP_PAGES, VERR_OUT_OF_RANGE); 1129 AssertMsgReturn(!(GCPhys & X86_PAGE_OFFSET_MASK), ("%RGp\n", GCPhys), VERR_OUT_OF_RANGE); 1130 AssertReturn(GCPhys < _1E, VERR_OUT_OF_RANGE); 1131 1132 /* 1133 * Compose and make the hypercall. 1134 */ 1135 HV_INPUT_UNMAP_GPA_PAGES *pUnmapPages = (HV_INPUT_UNMAP_GPA_PAGES *)pGVCpu->nemr0.s.HypercallData.pbPage; 1136 AssertPtrReturn(pUnmapPages, VERR_INTERNAL_ERROR_3); 1137 pUnmapPages->TargetPartitionId = pGVM->nemr0.s.idHvPartition; 1138 pUnmapPages->TargetGpaBase = GCPhys >> X86_PAGE_SHIFT; 1139 pUnmapPages->fFlags = 0; 1140 1141 uint64_t uResult = g_pfnHvlInvokeHypercall(HvCallUnmapGpaPages | ((uint64_t)cPages << 32), 1142 pGVCpu->nemr0.s.HypercallData.HCPhysPage, 0); 1143 Log6(("NEMR0UnmapPages: %RGp L %u -> %#RX64\n", GCPhys, cPages, uResult)); 1144 if (uResult == ((uint64_t)cPages << 32)) 1145 { 1146 # if 1 /* Do we need to do this? Hopefully not... */ 1147 uint64_t volatile uR = g_pfnHvlInvokeHypercall(HvCallUncommitGpaPages | ((uint64_t)cPages << 32), 1148 pGVCpu->nemr0.s.HypercallData.HCPhysPage, 0); 1149 AssertMsg(uR == ((uint64_t)cPages << 32), ("uR=%#RX64\n", uR)); NOREF(uR); 1150 # endif 1151 return VINF_SUCCESS; 1152 } 1153 1154 LogRel(("g_pfnHvlInvokeHypercall/UnmapGpaPages -> %#RX64\n", uResult)); 1155 return VERR_NEM_UNMAP_PAGES_FAILED; 1156 } 1157 #endif /* NEM_WIN_USE_HYPERCALLS_FOR_PAGES */ 157 } 1158 158 1159 159 … … 1172 172 VMMR0_INT_DECL(int) NEMR0UnmapPages(PGVM pGVM, VMCPUID idCpu) 1173 173 { 1174 #ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES1175 /*1176 * Unpack the call.1177 */1178 int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu);1179 if (RT_SUCCESS(rc))1180 {1181 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];1182 1183 RTGCPHYS const GCPhys = pGVCpu->nem.s.Hypercall.UnmapPages.GCPhys;1184 uint32_t const cPages = pGVCpu->nem.s.Hypercall.UnmapPages.cPages;1185 1186 /*1187 * Do the work.1188 */1189 rc = nemR0WinUnmapPages(pGVM, pGVCpu, GCPhys, cPages);1190 }1191 return rc;1192 #else1193 174 RT_NOREF(pGVM, idCpu); 1194 175 return VERR_NOT_IMPLEMENTED; 1195 #endif 1196 } 1197 1198 1199 #if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS) 1200 /** 1201 * Worker for NEMR0ExportState. 1202 * 1203 * Intention is to use it internally later. 1204 * 1205 * @returns VBox status code. 1206 * @param pGVM The ring-0 VM handle. 1207 * @param pGVCpu The ring-0 VCPU handle. 1208 * @param pCtx The CPU context structure to import into. 1209 */ 1210 NEM_TMPL_STATIC int nemR0WinExportState(PGVM pGVM, PGVMCPU pGVCpu, PCPUMCTX pCtx) 1211 { 1212 HV_INPUT_SET_VP_REGISTERS *pInput = (HV_INPUT_SET_VP_REGISTERS *)pGVCpu->nemr0.s.HypercallData.pbPage; 1213 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3); 1214 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API_1); 1215 1216 pInput->PartitionId = pGVM->nemr0.s.idHvPartition; 1217 pInput->VpIndex = pGVCpu->idCpu; 1218 pInput->RsvdZ = 0; 1219 1220 uint64_t const fWhat = ~pCtx->fExtrn & (CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK); 1221 if ( !fWhat 1222 && pGVCpu->nem.s.fCurrentInterruptWindows == pGVCpu->nem.s.fDesiredInterruptWindows) 1223 return VINF_SUCCESS; 1224 uintptr_t iReg = 0; 1225 1226 /* GPRs */ 1227 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK) 1228 { 1229 if (fWhat & CPUMCTX_EXTRN_RAX) 1230 { 1231 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]); 1232 pInput->Elements[iReg].Name = HvX64RegisterRax; 1233 pInput->Elements[iReg].Value.Reg64 = pCtx->rax; 1234 iReg++; 1235 } 1236 if (fWhat & CPUMCTX_EXTRN_RCX) 1237 { 1238 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]); 1239 pInput->Elements[iReg].Name = HvX64RegisterRcx; 1240 pInput->Elements[iReg].Value.Reg64 = pCtx->rcx; 1241 iReg++; 1242 } 1243 if (fWhat & CPUMCTX_EXTRN_RDX) 1244 { 1245 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]); 1246 pInput->Elements[iReg].Name = HvX64RegisterRdx; 1247 pInput->Elements[iReg].Value.Reg64 = pCtx->rdx; 1248 iReg++; 1249 } 1250 if (fWhat & CPUMCTX_EXTRN_RBX) 1251 { 1252 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]); 1253 pInput->Elements[iReg].Name = HvX64RegisterRbx; 1254 pInput->Elements[iReg].Value.Reg64 = pCtx->rbx; 1255 iReg++; 1256 } 1257 if (fWhat & CPUMCTX_EXTRN_RSP) 1258 { 1259 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]); 1260 pInput->Elements[iReg].Name = HvX64RegisterRsp; 1261 pInput->Elements[iReg].Value.Reg64 = pCtx->rsp; 1262 iReg++; 1263 } 1264 if (fWhat & CPUMCTX_EXTRN_RBP) 1265 { 1266 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]); 1267 pInput->Elements[iReg].Name = HvX64RegisterRbp; 1268 pInput->Elements[iReg].Value.Reg64 = pCtx->rbp; 1269 iReg++; 1270 } 1271 if (fWhat & CPUMCTX_EXTRN_RSI) 1272 { 1273 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]); 1274 pInput->Elements[iReg].Name = HvX64RegisterRsi; 1275 pInput->Elements[iReg].Value.Reg64 = pCtx->rsi; 1276 iReg++; 1277 } 1278 if (fWhat & CPUMCTX_EXTRN_RDI) 1279 { 1280 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]); 1281 pInput->Elements[iReg].Name = HvX64RegisterRdi; 1282 pInput->Elements[iReg].Value.Reg64 = pCtx->rdi; 1283 iReg++; 1284 } 1285 if (fWhat & CPUMCTX_EXTRN_R8_R15) 1286 { 1287 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]); 1288 pInput->Elements[iReg].Name = HvX64RegisterR8; 1289 pInput->Elements[iReg].Value.Reg64 = pCtx->r8; 1290 iReg++; 1291 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]); 1292 pInput->Elements[iReg].Name = HvX64RegisterR9; 1293 pInput->Elements[iReg].Value.Reg64 = pCtx->r9; 1294 iReg++; 1295 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]); 1296 pInput->Elements[iReg].Name = HvX64RegisterR10; 1297 pInput->Elements[iReg].Value.Reg64 = pCtx->r10; 1298 iReg++; 1299 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]); 1300 pInput->Elements[iReg].Name = HvX64RegisterR11; 1301 pInput->Elements[iReg].Value.Reg64 = pCtx->r11; 1302 iReg++; 1303 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]); 1304 pInput->Elements[iReg].Name = HvX64RegisterR12; 1305 pInput->Elements[iReg].Value.Reg64 = pCtx->r12; 1306 iReg++; 1307 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]); 1308 pInput->Elements[iReg].Name = HvX64RegisterR13; 1309 pInput->Elements[iReg].Value.Reg64 = pCtx->r13; 1310 iReg++; 1311 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]); 1312 pInput->Elements[iReg].Name = HvX64RegisterR14; 1313 pInput->Elements[iReg].Value.Reg64 = pCtx->r14; 1314 iReg++; 1315 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]); 1316 pInput->Elements[iReg].Name = HvX64RegisterR15; 1317 pInput->Elements[iReg].Value.Reg64 = pCtx->r15; 1318 iReg++; 1319 } 1320 } 1321 1322 /* RIP & Flags */ 1323 if (fWhat & CPUMCTX_EXTRN_RIP) 1324 { 1325 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]); 1326 pInput->Elements[iReg].Name = HvX64RegisterRip; 1327 pInput->Elements[iReg].Value.Reg64 = pCtx->rip; 1328 iReg++; 1329 } 1330 if (fWhat & CPUMCTX_EXTRN_RFLAGS) 1331 { 1332 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]); 1333 pInput->Elements[iReg].Name = HvX64RegisterRflags; 1334 pInput->Elements[iReg].Value.Reg64 = pCtx->rflags.u; 1335 iReg++; 1336 } 1337 1338 /* Segments */ 1339 # define COPY_OUT_SEG(a_idx, a_enmName, a_SReg) \ 1340 do { \ 1341 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[a_idx]); \ 1342 pInput->Elements[a_idx].Name = a_enmName; \ 1343 pInput->Elements[a_idx].Value.Segment.Base = (a_SReg).u64Base; \ 1344 pInput->Elements[a_idx].Value.Segment.Limit = (a_SReg).u32Limit; \ 1345 pInput->Elements[a_idx].Value.Segment.Selector = (a_SReg).Sel; \ 1346 pInput->Elements[a_idx].Value.Segment.Attributes = (a_SReg).Attr.u; \ 1347 } while (0) 1348 if (fWhat & CPUMCTX_EXTRN_SREG_MASK) 1349 { 1350 if (fWhat & CPUMCTX_EXTRN_CS) 1351 { 1352 COPY_OUT_SEG(iReg, HvX64RegisterCs, pCtx->cs); 1353 iReg++; 1354 } 1355 if (fWhat & CPUMCTX_EXTRN_ES) 1356 { 1357 COPY_OUT_SEG(iReg, HvX64RegisterEs, pCtx->es); 1358 iReg++; 1359 } 1360 if (fWhat & CPUMCTX_EXTRN_SS) 1361 { 1362 COPY_OUT_SEG(iReg, HvX64RegisterSs, pCtx->ss); 1363 iReg++; 1364 } 1365 if (fWhat & CPUMCTX_EXTRN_DS) 1366 { 1367 COPY_OUT_SEG(iReg, HvX64RegisterDs, pCtx->ds); 1368 iReg++; 1369 } 1370 if (fWhat & CPUMCTX_EXTRN_FS) 1371 { 1372 COPY_OUT_SEG(iReg, HvX64RegisterFs, pCtx->fs); 1373 iReg++; 1374 } 1375 if (fWhat & CPUMCTX_EXTRN_GS) 1376 { 1377 COPY_OUT_SEG(iReg, HvX64RegisterGs, pCtx->gs); 1378 iReg++; 1379 } 1380 } 1381 1382 /* Descriptor tables & task segment. */ 1383 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK) 1384 { 1385 if (fWhat & CPUMCTX_EXTRN_LDTR) 1386 { 1387 COPY_OUT_SEG(iReg, HvX64RegisterLdtr, pCtx->ldtr); 1388 iReg++; 1389 } 1390 if (fWhat & CPUMCTX_EXTRN_TR) 1391 { 1392 COPY_OUT_SEG(iReg, HvX64RegisterTr, pCtx->tr); 1393 iReg++; 1394 } 1395 1396 if (fWhat & CPUMCTX_EXTRN_IDTR) 1397 { 1398 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]); 1399 pInput->Elements[iReg].Value.Table.Pad[0] = 0; 1400 pInput->Elements[iReg].Value.Table.Pad[1] = 0; 1401 pInput->Elements[iReg].Value.Table.Pad[2] = 0; 1402 pInput->Elements[iReg].Name = HvX64RegisterIdtr; 1403 pInput->Elements[iReg].Value.Table.Limit = pCtx->idtr.cbIdt; 1404 pInput->Elements[iReg].Value.Table.Base = pCtx->idtr.pIdt; 1405 iReg++; 1406 } 1407 if (fWhat & CPUMCTX_EXTRN_GDTR) 1408 { 1409 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]); 1410 pInput->Elements[iReg].Value.Table.Pad[0] = 0; 1411 pInput->Elements[iReg].Value.Table.Pad[1] = 0; 1412 pInput->Elements[iReg].Value.Table.Pad[2] = 0; 1413 pInput->Elements[iReg].Name = HvX64RegisterGdtr; 1414 pInput->Elements[iReg].Value.Table.Limit = pCtx->gdtr.cbGdt; 1415 pInput->Elements[iReg].Value.Table.Base = pCtx->gdtr.pGdt; 1416 iReg++; 1417 } 1418 } 1419 1420 /* Control registers. */ 1421 if (fWhat & CPUMCTX_EXTRN_CR_MASK) 1422 { 1423 if (fWhat & CPUMCTX_EXTRN_CR0) 1424 { 1425 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]); 1426 pInput->Elements[iReg].Name = HvX64RegisterCr0; 1427 pInput->Elements[iReg].Value.Reg64 = pCtx->cr0; 1428 iReg++; 1429 } 1430 if (fWhat & CPUMCTX_EXTRN_CR2) 1431 { 1432 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]); 1433 pInput->Elements[iReg].Name = HvX64RegisterCr2; 1434 pInput->Elements[iReg].Value.Reg64 = pCtx->cr2; 1435 iReg++; 1436 } 1437 if (fWhat & CPUMCTX_EXTRN_CR3) 1438 { 1439 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]); 1440 pInput->Elements[iReg].Name = HvX64RegisterCr3; 1441 pInput->Elements[iReg].Value.Reg64 = pCtx->cr3; 1442 iReg++; 1443 } 1444 if (fWhat & CPUMCTX_EXTRN_CR4) 1445 { 1446 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]); 1447 pInput->Elements[iReg].Name = HvX64RegisterCr4; 1448 pInput->Elements[iReg].Value.Reg64 = pCtx->cr4; 1449 iReg++; 1450 } 1451 } 1452 if (fWhat & CPUMCTX_EXTRN_APIC_TPR) 1453 { 1454 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]); 1455 pInput->Elements[iReg].Name = HvX64RegisterCr8; 1456 pInput->Elements[iReg].Value.Reg64 = CPUMGetGuestCR8(pGVCpu); 1457 iReg++; 1458 } 1459 1460 /** @todo does HvX64RegisterXfem mean XCR0? What about the related MSR. */ 1461 1462 /* Debug registers. */ 1463 /** @todo fixme. Figure out what the hyper-v version of KVM_SET_GUEST_DEBUG would be. */ 1464 if (fWhat & CPUMCTX_EXTRN_DR0_DR3) 1465 { 1466 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]); 1467 pInput->Elements[iReg].Name = HvX64RegisterDr0; 1468 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR0(pGVCpu); 1469 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[0]; 1470 iReg++; 1471 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]); 1472 pInput->Elements[iReg].Name = HvX64RegisterDr1; 1473 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR1(pGVCpu); 1474 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[1]; 1475 iReg++; 1476 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]); 1477 pInput->Elements[iReg].Name = HvX64RegisterDr2; 1478 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR2(pGVCpu); 1479 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[2]; 1480 iReg++; 1481 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]); 1482 pInput->Elements[iReg].Name = HvX64RegisterDr3; 1483 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR3(pGVCpu); 1484 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[3]; 1485 iReg++; 1486 } 1487 if (fWhat & CPUMCTX_EXTRN_DR6) 1488 { 1489 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]); 1490 pInput->Elements[iReg].Name = HvX64RegisterDr6; 1491 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR6(pGVCpu); 1492 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[6]; 1493 iReg++; 1494 } 1495 if (fWhat & CPUMCTX_EXTRN_DR7) 1496 { 1497 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]); 1498 pInput->Elements[iReg].Name = HvX64RegisterDr7; 1499 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR7(pGVCpu); 1500 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[7]; 1501 iReg++; 1502 } 1503 1504 /* Floating point state. */ 1505 if (fWhat & CPUMCTX_EXTRN_X87) 1506 { 1507 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]); 1508 pInput->Elements[iReg].Name = HvX64RegisterFpMmx0; 1509 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->XState.x87.aRegs[0].au64[0]; 1510 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->XState.x87.aRegs[0].au64[1]; 1511 iReg++; 1512 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]); 1513 pInput->Elements[iReg].Name = HvX64RegisterFpMmx1; 1514 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->XState.x87.aRegs[1].au64[0]; 1515 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->XState.x87.aRegs[1].au64[1]; 1516 iReg++; 1517 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]); 1518 pInput->Elements[iReg].Name = HvX64RegisterFpMmx2; 1519 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->XState.x87.aRegs[2].au64[0]; 1520 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->XState.x87.aRegs[2].au64[1]; 1521 iReg++; 1522 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]); 1523 pInput->Elements[iReg].Name = HvX64RegisterFpMmx3; 1524 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->XState.x87.aRegs[3].au64[0]; 1525 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->XState.x87.aRegs[3].au64[1]; 1526 iReg++; 1527 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]); 1528 pInput->Elements[iReg].Name = HvX64RegisterFpMmx4; 1529 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->XState.x87.aRegs[4].au64[0]; 1530 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->XState.x87.aRegs[4].au64[1]; 1531 iReg++; 1532 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]); 1533 pInput->Elements[iReg].Name = HvX64RegisterFpMmx5; 1534 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->XState.x87.aRegs[5].au64[0]; 1535 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->XState.x87.aRegs[5].au64[1]; 1536 iReg++; 1537 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]); 1538 pInput->Elements[iReg].Name = HvX64RegisterFpMmx6; 1539 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->XState.x87.aRegs[6].au64[0]; 1540 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->XState.x87.aRegs[6].au64[1]; 1541 iReg++; 1542 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]); 1543 pInput->Elements[iReg].Name = HvX64RegisterFpMmx7; 1544 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->XState.x87.aRegs[7].au64[0]; 1545 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->XState.x87.aRegs[7].au64[1]; 1546 iReg++; 1547 1548 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]); 1549 pInput->Elements[iReg].Name = HvX64RegisterFpControlStatus; 1550 pInput->Elements[iReg].Value.FpControlStatus.FpControl = pCtx->XState.x87.FCW; 1551 pInput->Elements[iReg].Value.FpControlStatus.FpStatus = pCtx->XState.x87.FSW; 1552 pInput->Elements[iReg].Value.FpControlStatus.FpTag = pCtx->XState.x87.FTW; 1553 pInput->Elements[iReg].Value.FpControlStatus.Reserved = pCtx->XState.x87.FTW >> 8; 1554 pInput->Elements[iReg].Value.FpControlStatus.LastFpOp = pCtx->XState.x87.FOP; 1555 pInput->Elements[iReg].Value.FpControlStatus.LastFpRip = (pCtx->XState.x87.FPUIP) 1556 | ((uint64_t)pCtx->XState.x87.CS << 32) 1557 | ((uint64_t)pCtx->XState.x87.Rsrvd1 << 48); 1558 iReg++; 1559 /** @todo we've got trouble if if we try write just SSE w/o X87. */ 1560 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]); 1561 pInput->Elements[iReg].Name = HvX64RegisterXmmControlStatus; 1562 pInput->Elements[iReg].Value.XmmControlStatus.LastFpRdp = (pCtx->XState.x87.FPUDP) 1563 | ((uint64_t)pCtx->XState.x87.DS << 32) 1564 | ((uint64_t)pCtx->XState.x87.Rsrvd2 << 48); 1565 pInput->Elements[iReg].Value.XmmControlStatus.XmmStatusControl = pCtx->XState.x87.MXCSR; 1566 pInput->Elements[iReg].Value.XmmControlStatus.XmmStatusControlMask = pCtx->XState.x87.MXCSR_MASK; /** @todo ??? (Isn't this an output field?) */ 1567 iReg++; 1568 } 1569 1570 /* Vector state. */ 1571 if (fWhat & CPUMCTX_EXTRN_SSE_AVX) 1572 { 1573 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]); 1574 pInput->Elements[iReg].Name = HvX64RegisterXmm0; 1575 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->XState.x87.aXMM[0].uXmm.s.Lo; 1576 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->XState.x87.aXMM[0].uXmm.s.Hi; 1577 iReg++; 1578 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]); 1579 pInput->Elements[iReg].Name = HvX64RegisterXmm1; 1580 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->XState.x87.aXMM[1].uXmm.s.Lo; 1581 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->XState.x87.aXMM[1].uXmm.s.Hi; 1582 iReg++; 1583 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]); 1584 pInput->Elements[iReg].Name = HvX64RegisterXmm2; 1585 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->XState.x87.aXMM[2].uXmm.s.Lo; 1586 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->XState.x87.aXMM[2].uXmm.s.Hi; 1587 iReg++; 1588 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]); 1589 pInput->Elements[iReg].Name = HvX64RegisterXmm3; 1590 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->XState.x87.aXMM[3].uXmm.s.Lo; 1591 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->XState.x87.aXMM[3].uXmm.s.Hi; 1592 iReg++; 1593 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]); 1594 pInput->Elements[iReg].Name = HvX64RegisterXmm4; 1595 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->XState.x87.aXMM[4].uXmm.s.Lo; 1596 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->XState.x87.aXMM[4].uXmm.s.Hi; 1597 iReg++; 1598 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]); 1599 pInput->Elements[iReg].Name = HvX64RegisterXmm5; 1600 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->XState.x87.aXMM[5].uXmm.s.Lo; 1601 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->XState.x87.aXMM[5].uXmm.s.Hi; 1602 iReg++; 1603 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]); 1604 pInput->Elements[iReg].Name = HvX64RegisterXmm6; 1605 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->XState.x87.aXMM[6].uXmm.s.Lo; 1606 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->XState.x87.aXMM[6].uXmm.s.Hi; 1607 iReg++; 1608 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]); 1609 pInput->Elements[iReg].Name = HvX64RegisterXmm7; 1610 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->XState.x87.aXMM[7].uXmm.s.Lo; 1611 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->XState.x87.aXMM[7].uXmm.s.Hi; 1612 iReg++; 1613 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]); 1614 pInput->Elements[iReg].Name = HvX64RegisterXmm8; 1615 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->XState.x87.aXMM[8].uXmm.s.Lo; 1616 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->XState.x87.aXMM[8].uXmm.s.Hi; 1617 iReg++; 1618 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]); 1619 pInput->Elements[iReg].Name = HvX64RegisterXmm9; 1620 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->XState.x87.aXMM[9].uXmm.s.Lo; 1621 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->XState.x87.aXMM[9].uXmm.s.Hi; 1622 iReg++; 1623 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]); 1624 pInput->Elements[iReg].Name = HvX64RegisterXmm10; 1625 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->XState.x87.aXMM[10].uXmm.s.Lo; 1626 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->XState.x87.aXMM[10].uXmm.s.Hi; 1627 iReg++; 1628 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]); 1629 pInput->Elements[iReg].Name = HvX64RegisterXmm11; 1630 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->XState.x87.aXMM[11].uXmm.s.Lo; 1631 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->XState.x87.aXMM[11].uXmm.s.Hi; 1632 iReg++; 1633 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]); 1634 pInput->Elements[iReg].Name = HvX64RegisterXmm12; 1635 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->XState.x87.aXMM[12].uXmm.s.Lo; 1636 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->XState.x87.aXMM[12].uXmm.s.Hi; 1637 iReg++; 1638 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]); 1639 pInput->Elements[iReg].Name = HvX64RegisterXmm13; 1640 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->XState.x87.aXMM[13].uXmm.s.Lo; 1641 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->XState.x87.aXMM[13].uXmm.s.Hi; 1642 iReg++; 1643 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]); 1644 pInput->Elements[iReg].Name = HvX64RegisterXmm14; 1645 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->XState.x87.aXMM[14].uXmm.s.Lo; 1646 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->XState.x87.aXMM[14].uXmm.s.Hi; 1647 iReg++; 1648 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]); 1649 pInput->Elements[iReg].Name = HvX64RegisterXmm15; 1650 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->XState.x87.aXMM[15].uXmm.s.Lo; 1651 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->XState.x87.aXMM[15].uXmm.s.Hi; 1652 iReg++; 1653 } 1654 1655 /* MSRs */ 1656 // HvX64RegisterTsc - don't touch 1657 if (fWhat & CPUMCTX_EXTRN_EFER) 1658 { 1659 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]); 1660 pInput->Elements[iReg].Name = HvX64RegisterEfer; 1661 pInput->Elements[iReg].Value.Reg64 = pCtx->msrEFER; 1662 iReg++; 1663 } 1664 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE) 1665 { 1666 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]); 1667 pInput->Elements[iReg].Name = HvX64RegisterKernelGsBase; 1668 pInput->Elements[iReg].Value.Reg64 = pCtx->msrKERNELGSBASE; 1669 iReg++; 1670 } 1671 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS) 1672 { 1673 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]); 1674 pInput->Elements[iReg].Name = HvX64RegisterSysenterCs; 1675 pInput->Elements[iReg].Value.Reg64 = pCtx->SysEnter.cs; 1676 iReg++; 1677 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]); 1678 pInput->Elements[iReg].Name = HvX64RegisterSysenterEip; 1679 pInput->Elements[iReg].Value.Reg64 = pCtx->SysEnter.eip; 1680 iReg++; 1681 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]); 1682 pInput->Elements[iReg].Name = HvX64RegisterSysenterEsp; 1683 pInput->Elements[iReg].Value.Reg64 = pCtx->SysEnter.esp; 1684 iReg++; 1685 } 1686 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS) 1687 { 1688 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]); 1689 pInput->Elements[iReg].Name = HvX64RegisterStar; 1690 pInput->Elements[iReg].Value.Reg64 = pCtx->msrSTAR; 1691 iReg++; 1692 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]); 1693 pInput->Elements[iReg].Name = HvX64RegisterLstar; 1694 pInput->Elements[iReg].Value.Reg64 = pCtx->msrLSTAR; 1695 iReg++; 1696 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]); 1697 pInput->Elements[iReg].Name = HvX64RegisterCstar; 1698 pInput->Elements[iReg].Value.Reg64 = pCtx->msrCSTAR; 1699 iReg++; 1700 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]); 1701 pInput->Elements[iReg].Name = HvX64RegisterSfmask; 1702 pInput->Elements[iReg].Value.Reg64 = pCtx->msrSFMASK; 1703 iReg++; 1704 } 1705 if (fWhat & CPUMCTX_EXTRN_TSC_AUX) 1706 { 1707 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]); 1708 pInput->Elements[iReg].Name = HvX64RegisterTscAux; 1709 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.TscAux; 1710 iReg++; 1711 } 1712 if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS) 1713 { 1714 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]); 1715 pInput->Elements[iReg].Name = HvX64RegisterApicBase; 1716 pInput->Elements[iReg].Value.Reg64 = APICGetBaseMsrNoCheck(pGVCpu); 1717 iReg++; 1718 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]); 1719 pInput->Elements[iReg].Name = HvX64RegisterPat; 1720 pInput->Elements[iReg].Value.Reg64 = pCtx->msrPAT; 1721 iReg++; 1722 # if 0 /** @todo HvX64RegisterMtrrCap is read only? Seems it's not even readable. */ 1723 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]); 1724 pInput->Elements[iReg].Name = HvX64RegisterMtrrCap; 1725 pInput->Elements[iReg].Value.Reg64 = CPUMGetGuestIa32MtrrCap(pGVCpu); 1726 iReg++; 1727 # endif 1728 1729 PCPUMCTXMSRS pCtxMsrs = CPUMQueryGuestCtxMsrsPtr(pGVCpu); 1730 1731 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]); 1732 pInput->Elements[iReg].Name = HvX64RegisterMtrrDefType; 1733 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrDefType; 1734 iReg++; 1735 1736 /** @todo we dont keep state for HvX64RegisterMtrrPhysBaseX and HvX64RegisterMtrrPhysMaskX */ 1737 1738 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]); 1739 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix64k00000; 1740 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix64K_00000; 1741 iReg++; 1742 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]); 1743 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix16k80000; 1744 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix16K_80000; 1745 iReg++; 1746 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]); 1747 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix16kA0000; 1748 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix16K_A0000; 1749 iReg++; 1750 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]); 1751 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kC0000; 1752 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_C0000; 1753 iReg++; 1754 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]); 1755 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kC8000; 1756 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_C8000; 1757 iReg++; 1758 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]); 1759 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kD0000; 1760 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_D0000; 1761 iReg++; 1762 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]); 1763 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kD8000; 1764 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_D8000; 1765 iReg++; 1766 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]); 1767 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kE0000; 1768 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_E0000; 1769 iReg++; 1770 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]); 1771 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kE8000; 1772 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_E8000; 1773 iReg++; 1774 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]); 1775 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kF0000; 1776 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_F0000; 1777 iReg++; 1778 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]); 1779 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kF8000; 1780 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_F8000; 1781 iReg++; 1782 1783 # if 0 /** @todo Why can't we write these on Intel systems? Not that we really care... */ 1784 const CPUMCPUVENDOR enmCpuVendor = CPUMGetHostCpuVendor(pGVM); 1785 if (enmCpuVendor != CPUMCPUVENDOR_AMD) 1786 { 1787 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]); 1788 pInput->Elements[iReg].Name = HvX64RegisterIa32MiscEnable; 1789 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MiscEnable; 1790 iReg++; 1791 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]); 1792 pInput->Elements[iReg].Name = HvX64RegisterIa32FeatureControl; 1793 pInput->Elements[iReg].Value.Reg64 = CPUMGetGuestIa32FeatureControl(pGVCpu); 1794 iReg++; 1795 } 1796 # endif 1797 } 1798 1799 /* event injection (clear it). */ 1800 if (fWhat & CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT) 1801 { 1802 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]); 1803 pInput->Elements[iReg].Name = HvRegisterPendingInterruption; 1804 pInput->Elements[iReg].Value.Reg64 = 0; 1805 iReg++; 1806 } 1807 1808 /* Interruptibility state. This can get a little complicated since we get 1809 half of the state via HV_X64_VP_EXECUTION_STATE. */ 1810 if ( (fWhat & (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI)) 1811 == (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI) ) 1812 { 1813 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]); 1814 pInput->Elements[iReg].Name = HvRegisterInterruptState; 1815 pInput->Elements[iReg].Value.Reg64 = 0; 1816 if ( VMCPU_FF_IS_SET(pGVCpu, VMCPU_FF_INHIBIT_INTERRUPTS) 1817 && EMGetInhibitInterruptsPC(pGVCpu) == pCtx->rip) 1818 pInput->Elements[iReg].Value.InterruptState.InterruptShadow = 1; 1819 if (VMCPU_FF_IS_SET(pGVCpu, VMCPU_FF_BLOCK_NMIS)) 1820 pInput->Elements[iReg].Value.InterruptState.NmiMasked = 1; 1821 iReg++; 1822 } 1823 else if (fWhat & CPUMCTX_EXTRN_INHIBIT_INT) 1824 { 1825 if ( pGVCpu->nem.s.fLastInterruptShadow 1826 || ( VMCPU_FF_IS_SET(pGVCpu, VMCPU_FF_INHIBIT_INTERRUPTS) 1827 && EMGetInhibitInterruptsPC(pGVCpu) == pCtx->rip)) 1828 { 1829 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]); 1830 pInput->Elements[iReg].Name = HvRegisterInterruptState; 1831 pInput->Elements[iReg].Value.Reg64 = 0; 1832 if ( VMCPU_FF_IS_SET(pGVCpu, VMCPU_FF_INHIBIT_INTERRUPTS) 1833 && EMGetInhibitInterruptsPC(pGVCpu) == pCtx->rip) 1834 pInput->Elements[iReg].Value.InterruptState.InterruptShadow = 1; 1835 /** @todo Retrieve NMI state, currently assuming it's zero. (yes this may happen on I/O) */ 1836 //if (VMCPU_FF_IS_ANY_SET(pGVCpu, VMCPU_FF_BLOCK_NMIS)) 1837 // pInput->Elements[iReg].Value.InterruptState.NmiMasked = 1; 1838 iReg++; 1839 } 1840 } 1841 else 1842 Assert(!(fWhat & CPUMCTX_EXTRN_INHIBIT_NMI)); 1843 1844 /* Interrupt windows. Always set if active as Hyper-V seems to be forgetful. */ 1845 uint8_t const fDesiredIntWin = pGVCpu->nem.s.fDesiredInterruptWindows; 1846 if ( fDesiredIntWin 1847 || pGVCpu->nem.s.fCurrentInterruptWindows != fDesiredIntWin) 1848 { 1849 pGVCpu->nem.s.fCurrentInterruptWindows = pGVCpu->nem.s.fDesiredInterruptWindows; 1850 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]); 1851 pInput->Elements[iReg].Name = HvX64RegisterDeliverabilityNotifications; 1852 pInput->Elements[iReg].Value.DeliverabilityNotifications.AsUINT64 = fDesiredIntWin; 1853 Assert(pInput->Elements[iReg].Value.DeliverabilityNotifications.NmiNotification == RT_BOOL(fDesiredIntWin & NEM_WIN_INTW_F_NMI)); 1854 Assert(pInput->Elements[iReg].Value.DeliverabilityNotifications.InterruptNotification == RT_BOOL(fDesiredIntWin & NEM_WIN_INTW_F_REGULAR)); 1855 Assert(pInput->Elements[iReg].Value.DeliverabilityNotifications.InterruptPriority == (fDesiredIntWin & NEM_WIN_INTW_F_PRIO_MASK) >> NEM_WIN_INTW_F_PRIO_SHIFT); 1856 iReg++; 1857 } 1858 1859 /// @todo HvRegisterPendingEvent0 1860 /// @todo HvRegisterPendingEvent1 1861 1862 /* 1863 * Set the registers. 1864 */ 1865 Assert((uintptr_t)&pInput->Elements[iReg] - (uintptr_t)pGVCpu->nemr0.s.HypercallData.pbPage < PAGE_SIZE); /* max is 127 */ 1866 1867 /* 1868 * Make the hypercall. 1869 */ 1870 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallSetVpRegisters, iReg), 1871 pGVCpu->nemr0.s.HypercallData.HCPhysPage, 0 /*GCPhysOutput*/); 1872 AssertLogRelMsgReturn(uResult == HV_MAKE_CALL_REP_RET(iReg), 1873 ("uResult=%RX64 iRegs=%#x\n", uResult, iReg), 1874 VERR_NEM_SET_REGISTERS_FAILED); 1875 //LogFlow(("nemR0WinExportState: uResult=%#RX64 iReg=%zu fWhat=%#018RX64 fExtrn=%#018RX64 -> %#018RX64\n", uResult, iReg, fWhat, pCtx->fExtrn, 1876 // pCtx->fExtrn | CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK | CPUMCTX_EXTRN_KEEPER_NEM )); 1877 pCtx->fExtrn |= CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK | CPUMCTX_EXTRN_KEEPER_NEM; 1878 return VINF_SUCCESS; 1879 } 1880 #endif /* NEM_WIN_WITH_RING0_RUNLOOP || NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS */ 176 } 1881 177 1882 178 … … 1891 187 VMMR0_INT_DECL(int) NEMR0ExportState(PGVM pGVM, VMCPUID idCpu) 1892 188 { 1893 #if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS) 1894 /* 1895 * Validate the call. 1896 */ 1897 int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu); 1898 if (RT_SUCCESS(rc)) 1899 { 1900 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu]; 1901 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API_1); 1902 1903 /* 1904 * Call worker. 1905 */ 1906 rc = nemR0WinExportState(pGVM, pGVCpu, &pGVCpu->cpum.GstCtx); 1907 } 1908 return rc; 1909 #else 1910 RT_NOREF(pGVM, idCpu); 1911 return VERR_NOT_IMPLEMENTED; 1912 #endif 1913 } 1914 1915 1916 #if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS) 1917 /** 1918 * Worker for NEMR0ImportState. 1919 * 1920 * Intention is to use it internally later. 1921 * 1922 * @returns VBox status code. 1923 * @param pGVM The ring-0 VM handle. 1924 * @param pGVCpu The ring-0 VCPU handle. 1925 * @param pCtx The CPU context structure to import into. 1926 * @param fWhat What to import, CPUMCTX_EXTRN_XXX. 1927 * @param fCanUpdateCr3 Whether it's safe to update CR3 or not. 1928 */ 1929 NEM_TMPL_STATIC int nemR0WinImportState(PGVM pGVM, PGVMCPU pGVCpu, PCPUMCTX pCtx, uint64_t fWhat, bool fCanUpdateCr3) 1930 { 1931 HV_INPUT_GET_VP_REGISTERS *pInput = (HV_INPUT_GET_VP_REGISTERS *)pGVCpu->nemr0.s.HypercallData.pbPage; 1932 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3); 1933 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API_1); 1934 Assert(pCtx == &pGVCpu->cpum.GstCtx); 1935 1936 fWhat &= pCtx->fExtrn; 1937 1938 pInput->PartitionId = pGVM->nemr0.s.idHvPartition; 1939 pInput->VpIndex = pGVCpu->idCpu; 1940 pInput->fFlags = 0; 1941 1942 /* GPRs */ 1943 uintptr_t iReg = 0; 1944 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK) 1945 { 1946 if (fWhat & CPUMCTX_EXTRN_RAX) 1947 pInput->Names[iReg++] = HvX64RegisterRax; 1948 if (fWhat & CPUMCTX_EXTRN_RCX) 1949 pInput->Names[iReg++] = HvX64RegisterRcx; 1950 if (fWhat & CPUMCTX_EXTRN_RDX) 1951 pInput->Names[iReg++] = HvX64RegisterRdx; 1952 if (fWhat & CPUMCTX_EXTRN_RBX) 1953 pInput->Names[iReg++] = HvX64RegisterRbx; 1954 if (fWhat & CPUMCTX_EXTRN_RSP) 1955 pInput->Names[iReg++] = HvX64RegisterRsp; 1956 if (fWhat & CPUMCTX_EXTRN_RBP) 1957 pInput->Names[iReg++] = HvX64RegisterRbp; 1958 if (fWhat & CPUMCTX_EXTRN_RSI) 1959 pInput->Names[iReg++] = HvX64RegisterRsi; 1960 if (fWhat & CPUMCTX_EXTRN_RDI) 1961 pInput->Names[iReg++] = HvX64RegisterRdi; 1962 if (fWhat & CPUMCTX_EXTRN_R8_R15) 1963 { 1964 pInput->Names[iReg++] = HvX64RegisterR8; 1965 pInput->Names[iReg++] = HvX64RegisterR9; 1966 pInput->Names[iReg++] = HvX64RegisterR10; 1967 pInput->Names[iReg++] = HvX64RegisterR11; 1968 pInput->Names[iReg++] = HvX64RegisterR12; 1969 pInput->Names[iReg++] = HvX64RegisterR13; 1970 pInput->Names[iReg++] = HvX64RegisterR14; 1971 pInput->Names[iReg++] = HvX64RegisterR15; 1972 } 1973 } 1974 1975 /* RIP & Flags */ 1976 if (fWhat & CPUMCTX_EXTRN_RIP) 1977 pInput->Names[iReg++] = HvX64RegisterRip; 1978 if (fWhat & CPUMCTX_EXTRN_RFLAGS) 1979 pInput->Names[iReg++] = HvX64RegisterRflags; 1980 1981 /* Segments */ 1982 if (fWhat & CPUMCTX_EXTRN_SREG_MASK) 1983 { 1984 if (fWhat & CPUMCTX_EXTRN_CS) 1985 pInput->Names[iReg++] = HvX64RegisterCs; 1986 if (fWhat & CPUMCTX_EXTRN_ES) 1987 pInput->Names[iReg++] = HvX64RegisterEs; 1988 if (fWhat & CPUMCTX_EXTRN_SS) 1989 pInput->Names[iReg++] = HvX64RegisterSs; 1990 if (fWhat & CPUMCTX_EXTRN_DS) 1991 pInput->Names[iReg++] = HvX64RegisterDs; 1992 if (fWhat & CPUMCTX_EXTRN_FS) 1993 pInput->Names[iReg++] = HvX64RegisterFs; 1994 if (fWhat & CPUMCTX_EXTRN_GS) 1995 pInput->Names[iReg++] = HvX64RegisterGs; 1996 } 1997 1998 /* Descriptor tables and the task segment. */ 1999 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK) 2000 { 2001 if (fWhat & CPUMCTX_EXTRN_LDTR) 2002 pInput->Names[iReg++] = HvX64RegisterLdtr; 2003 if (fWhat & CPUMCTX_EXTRN_TR) 2004 pInput->Names[iReg++] = HvX64RegisterTr; 2005 if (fWhat & CPUMCTX_EXTRN_IDTR) 2006 pInput->Names[iReg++] = HvX64RegisterIdtr; 2007 if (fWhat & CPUMCTX_EXTRN_GDTR) 2008 pInput->Names[iReg++] = HvX64RegisterGdtr; 2009 } 2010 2011 /* Control registers. */ 2012 if (fWhat & CPUMCTX_EXTRN_CR_MASK) 2013 { 2014 if (fWhat & CPUMCTX_EXTRN_CR0) 2015 pInput->Names[iReg++] = HvX64RegisterCr0; 2016 if (fWhat & CPUMCTX_EXTRN_CR2) 2017 pInput->Names[iReg++] = HvX64RegisterCr2; 2018 if (fWhat & CPUMCTX_EXTRN_CR3) 2019 pInput->Names[iReg++] = HvX64RegisterCr3; 2020 if (fWhat & CPUMCTX_EXTRN_CR4) 2021 pInput->Names[iReg++] = HvX64RegisterCr4; 2022 } 2023 if (fWhat & CPUMCTX_EXTRN_APIC_TPR) 2024 pInput->Names[iReg++] = HvX64RegisterCr8; 2025 2026 /* Debug registers. */ 2027 if (fWhat & CPUMCTX_EXTRN_DR7) 2028 pInput->Names[iReg++] = HvX64RegisterDr7; 2029 if (fWhat & CPUMCTX_EXTRN_DR0_DR3) 2030 { 2031 if (!(fWhat & CPUMCTX_EXTRN_DR7) && (pCtx->fExtrn & CPUMCTX_EXTRN_DR7)) 2032 { 2033 fWhat |= CPUMCTX_EXTRN_DR7; 2034 pInput->Names[iReg++] = HvX64RegisterDr7; 2035 } 2036 pInput->Names[iReg++] = HvX64RegisterDr0; 2037 pInput->Names[iReg++] = HvX64RegisterDr1; 2038 pInput->Names[iReg++] = HvX64RegisterDr2; 2039 pInput->Names[iReg++] = HvX64RegisterDr3; 2040 } 2041 if (fWhat & CPUMCTX_EXTRN_DR6) 2042 pInput->Names[iReg++] = HvX64RegisterDr6; 2043 2044 /* Floating point state. */ 2045 if (fWhat & CPUMCTX_EXTRN_X87) 2046 { 2047 pInput->Names[iReg++] = HvX64RegisterFpMmx0; 2048 pInput->Names[iReg++] = HvX64RegisterFpMmx1; 2049 pInput->Names[iReg++] = HvX64RegisterFpMmx2; 2050 pInput->Names[iReg++] = HvX64RegisterFpMmx3; 2051 pInput->Names[iReg++] = HvX64RegisterFpMmx4; 2052 pInput->Names[iReg++] = HvX64RegisterFpMmx5; 2053 pInput->Names[iReg++] = HvX64RegisterFpMmx6; 2054 pInput->Names[iReg++] = HvX64RegisterFpMmx7; 2055 pInput->Names[iReg++] = HvX64RegisterFpControlStatus; 2056 } 2057 if (fWhat & (CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX)) 2058 pInput->Names[iReg++] = HvX64RegisterXmmControlStatus; 2059 2060 /* Vector state. */ 2061 if (fWhat & CPUMCTX_EXTRN_SSE_AVX) 2062 { 2063 pInput->Names[iReg++] = HvX64RegisterXmm0; 2064 pInput->Names[iReg++] = HvX64RegisterXmm1; 2065 pInput->Names[iReg++] = HvX64RegisterXmm2; 2066 pInput->Names[iReg++] = HvX64RegisterXmm3; 2067 pInput->Names[iReg++] = HvX64RegisterXmm4; 2068 pInput->Names[iReg++] = HvX64RegisterXmm5; 2069 pInput->Names[iReg++] = HvX64RegisterXmm6; 2070 pInput->Names[iReg++] = HvX64RegisterXmm7; 2071 pInput->Names[iReg++] = HvX64RegisterXmm8; 2072 pInput->Names[iReg++] = HvX64RegisterXmm9; 2073 pInput->Names[iReg++] = HvX64RegisterXmm10; 2074 pInput->Names[iReg++] = HvX64RegisterXmm11; 2075 pInput->Names[iReg++] = HvX64RegisterXmm12; 2076 pInput->Names[iReg++] = HvX64RegisterXmm13; 2077 pInput->Names[iReg++] = HvX64RegisterXmm14; 2078 pInput->Names[iReg++] = HvX64RegisterXmm15; 2079 } 2080 2081 /* MSRs */ 2082 // HvX64RegisterTsc - don't touch 2083 if (fWhat & CPUMCTX_EXTRN_EFER) 2084 pInput->Names[iReg++] = HvX64RegisterEfer; 2085 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE) 2086 pInput->Names[iReg++] = HvX64RegisterKernelGsBase; 2087 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS) 2088 { 2089 pInput->Names[iReg++] = HvX64RegisterSysenterCs; 2090 pInput->Names[iReg++] = HvX64RegisterSysenterEip; 2091 pInput->Names[iReg++] = HvX64RegisterSysenterEsp; 2092 } 2093 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS) 2094 { 2095 pInput->Names[iReg++] = HvX64RegisterStar; 2096 pInput->Names[iReg++] = HvX64RegisterLstar; 2097 pInput->Names[iReg++] = HvX64RegisterCstar; 2098 pInput->Names[iReg++] = HvX64RegisterSfmask; 2099 } 2100 2101 # ifdef LOG_ENABLED 2102 const CPUMCPUVENDOR enmCpuVendor = CPUMGetHostCpuVendor(pGVM); 2103 # endif 2104 if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS) 2105 { 2106 pInput->Names[iReg++] = HvX64RegisterApicBase; /// @todo APIC BASE 2107 pInput->Names[iReg++] = HvX64RegisterPat; 2108 # if 0 /*def LOG_ENABLED*/ /** @todo something's wrong with HvX64RegisterMtrrCap? (AMD) */ 2109 pInput->Names[iReg++] = HvX64RegisterMtrrCap; 2110 # endif 2111 pInput->Names[iReg++] = HvX64RegisterMtrrDefType; 2112 pInput->Names[iReg++] = HvX64RegisterMtrrFix64k00000; 2113 pInput->Names[iReg++] = HvX64RegisterMtrrFix16k80000; 2114 pInput->Names[iReg++] = HvX64RegisterMtrrFix16kA0000; 2115 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kC0000; 2116 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kC8000; 2117 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kD0000; 2118 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kD8000; 2119 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kE0000; 2120 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kE8000; 2121 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kF0000; 2122 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kF8000; 2123 pInput->Names[iReg++] = HvX64RegisterTscAux; 2124 # if 0 /** @todo why can't we read HvX64RegisterIa32MiscEnable? */ 2125 if (enmCpuVendor != CPUMCPUVENDOR_AMD) 2126 pInput->Names[iReg++] = HvX64RegisterIa32MiscEnable; 2127 # endif 2128 # ifdef LOG_ENABLED 2129 if (enmCpuVendor != CPUMCPUVENDOR_AMD && enmCpuVendor != CPUMCPUVENDOR_HYGON) 2130 pInput->Names[iReg++] = HvX64RegisterIa32FeatureControl; 2131 # endif 2132 } 2133 2134 /* Interruptibility. */ 2135 if (fWhat & (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI)) 2136 { 2137 pInput->Names[iReg++] = HvRegisterInterruptState; 2138 pInput->Names[iReg++] = HvX64RegisterRip; 2139 } 2140 2141 /* event injection */ 2142 pInput->Names[iReg++] = HvRegisterPendingInterruption; 2143 pInput->Names[iReg++] = HvRegisterPendingEvent0; 2144 pInput->Names[iReg++] = HvRegisterPendingEvent1; 2145 size_t const cRegs = iReg; 2146 size_t const cbInput = RT_ALIGN_Z(RT_UOFFSETOF_DYN(HV_INPUT_GET_VP_REGISTERS, Names[cRegs]), 32); 2147 2148 HV_REGISTER_VALUE *paValues = (HV_REGISTER_VALUE *)((uint8_t *)pInput + cbInput); 2149 Assert((uintptr_t)&paValues[cRegs] - (uintptr_t)pGVCpu->nemr0.s.HypercallData.pbPage < PAGE_SIZE); /* (max is around 168 registers) */ 2150 RT_BZERO(paValues, cRegs * sizeof(paValues[0])); 2151 2152 /* 2153 * Make the hypercall. 2154 */ 2155 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallGetVpRegisters, cRegs), 2156 pGVCpu->nemr0.s.HypercallData.HCPhysPage, 2157 pGVCpu->nemr0.s.HypercallData.HCPhysPage + cbInput); 2158 AssertLogRelMsgReturn(uResult == HV_MAKE_CALL_REP_RET(cRegs), 2159 ("uResult=%RX64 cRegs=%#x\n", uResult, cRegs), 2160 VERR_NEM_GET_REGISTERS_FAILED); 2161 //LogFlow(("nemR0WinImportState: uResult=%#RX64 iReg=%zu fWhat=%#018RX64 fExtr=%#018RX64\n", uResult, cRegs, fWhat, pCtx->fExtrn)); 2162 2163 /* 2164 * Copy information to the CPUM context. 2165 */ 2166 iReg = 0; 2167 2168 /* GPRs */ 2169 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK) 2170 { 2171 if (fWhat & CPUMCTX_EXTRN_RAX) 2172 { 2173 Assert(pInput->Names[iReg] == HvX64RegisterRax); 2174 pCtx->rax = paValues[iReg++].Reg64; 2175 } 2176 if (fWhat & CPUMCTX_EXTRN_RCX) 2177 { 2178 Assert(pInput->Names[iReg] == HvX64RegisterRcx); 2179 pCtx->rcx = paValues[iReg++].Reg64; 2180 } 2181 if (fWhat & CPUMCTX_EXTRN_RDX) 2182 { 2183 Assert(pInput->Names[iReg] == HvX64RegisterRdx); 2184 pCtx->rdx = paValues[iReg++].Reg64; 2185 } 2186 if (fWhat & CPUMCTX_EXTRN_RBX) 2187 { 2188 Assert(pInput->Names[iReg] == HvX64RegisterRbx); 2189 pCtx->rbx = paValues[iReg++].Reg64; 2190 } 2191 if (fWhat & CPUMCTX_EXTRN_RSP) 2192 { 2193 Assert(pInput->Names[iReg] == HvX64RegisterRsp); 2194 pCtx->rsp = paValues[iReg++].Reg64; 2195 } 2196 if (fWhat & CPUMCTX_EXTRN_RBP) 2197 { 2198 Assert(pInput->Names[iReg] == HvX64RegisterRbp); 2199 pCtx->rbp = paValues[iReg++].Reg64; 2200 } 2201 if (fWhat & CPUMCTX_EXTRN_RSI) 2202 { 2203 Assert(pInput->Names[iReg] == HvX64RegisterRsi); 2204 pCtx->rsi = paValues[iReg++].Reg64; 2205 } 2206 if (fWhat & CPUMCTX_EXTRN_RDI) 2207 { 2208 Assert(pInput->Names[iReg] == HvX64RegisterRdi); 2209 pCtx->rdi = paValues[iReg++].Reg64; 2210 } 2211 if (fWhat & CPUMCTX_EXTRN_R8_R15) 2212 { 2213 Assert(pInput->Names[iReg] == HvX64RegisterR8); 2214 Assert(pInput->Names[iReg + 7] == HvX64RegisterR15); 2215 pCtx->r8 = paValues[iReg++].Reg64; 2216 pCtx->r9 = paValues[iReg++].Reg64; 2217 pCtx->r10 = paValues[iReg++].Reg64; 2218 pCtx->r11 = paValues[iReg++].Reg64; 2219 pCtx->r12 = paValues[iReg++].Reg64; 2220 pCtx->r13 = paValues[iReg++].Reg64; 2221 pCtx->r14 = paValues[iReg++].Reg64; 2222 pCtx->r15 = paValues[iReg++].Reg64; 2223 } 2224 } 2225 2226 /* RIP & Flags */ 2227 if (fWhat & CPUMCTX_EXTRN_RIP) 2228 { 2229 Assert(pInput->Names[iReg] == HvX64RegisterRip); 2230 pCtx->rip = paValues[iReg++].Reg64; 2231 } 2232 if (fWhat & CPUMCTX_EXTRN_RFLAGS) 2233 { 2234 Assert(pInput->Names[iReg] == HvX64RegisterRflags); 2235 pCtx->rflags.u = paValues[iReg++].Reg64; 2236 } 2237 2238 /* Segments */ 2239 # define COPY_BACK_SEG(a_idx, a_enmName, a_SReg) \ 2240 do { \ 2241 Assert(pInput->Names[a_idx] == a_enmName); \ 2242 (a_SReg).u64Base = paValues[a_idx].Segment.Base; \ 2243 (a_SReg).u32Limit = paValues[a_idx].Segment.Limit; \ 2244 (a_SReg).ValidSel = (a_SReg).Sel = paValues[a_idx].Segment.Selector; \ 2245 (a_SReg).Attr.u = paValues[a_idx].Segment.Attributes; \ 2246 (a_SReg).fFlags = CPUMSELREG_FLAGS_VALID; \ 2247 } while (0) 2248 if (fWhat & CPUMCTX_EXTRN_SREG_MASK) 2249 { 2250 if (fWhat & CPUMCTX_EXTRN_CS) 2251 { 2252 COPY_BACK_SEG(iReg, HvX64RegisterCs, pCtx->cs); 2253 iReg++; 2254 } 2255 if (fWhat & CPUMCTX_EXTRN_ES) 2256 { 2257 COPY_BACK_SEG(iReg, HvX64RegisterEs, pCtx->es); 2258 iReg++; 2259 } 2260 if (fWhat & CPUMCTX_EXTRN_SS) 2261 { 2262 COPY_BACK_SEG(iReg, HvX64RegisterSs, pCtx->ss); 2263 iReg++; 2264 } 2265 if (fWhat & CPUMCTX_EXTRN_DS) 2266 { 2267 COPY_BACK_SEG(iReg, HvX64RegisterDs, pCtx->ds); 2268 iReg++; 2269 } 2270 if (fWhat & CPUMCTX_EXTRN_FS) 2271 { 2272 COPY_BACK_SEG(iReg, HvX64RegisterFs, pCtx->fs); 2273 iReg++; 2274 } 2275 if (fWhat & CPUMCTX_EXTRN_GS) 2276 { 2277 COPY_BACK_SEG(iReg, HvX64RegisterGs, pCtx->gs); 2278 iReg++; 2279 } 2280 } 2281 /* Descriptor tables and the task segment. */ 2282 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK) 2283 { 2284 if (fWhat & CPUMCTX_EXTRN_LDTR) 2285 { 2286 COPY_BACK_SEG(iReg, HvX64RegisterLdtr, pCtx->ldtr); 2287 iReg++; 2288 } 2289 if (fWhat & CPUMCTX_EXTRN_TR) 2290 { 2291 /* AMD-V likes loading TR with in AVAIL state, whereas intel insists on BUSY. So, 2292 avoid to trigger sanity assertions around the code, always fix this. */ 2293 COPY_BACK_SEG(iReg, HvX64RegisterTr, pCtx->tr); 2294 switch (pCtx->tr.Attr.n.u4Type) 2295 { 2296 case X86_SEL_TYPE_SYS_386_TSS_BUSY: 2297 case X86_SEL_TYPE_SYS_286_TSS_BUSY: 2298 break; 2299 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: 2300 pCtx->tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY; 2301 break; 2302 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: 2303 pCtx->tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_286_TSS_BUSY; 2304 break; 2305 } 2306 iReg++; 2307 } 2308 if (fWhat & CPUMCTX_EXTRN_IDTR) 2309 { 2310 Assert(pInput->Names[iReg] == HvX64RegisterIdtr); 2311 pCtx->idtr.cbIdt = paValues[iReg].Table.Limit; 2312 pCtx->idtr.pIdt = paValues[iReg].Table.Base; 2313 iReg++; 2314 } 2315 if (fWhat & CPUMCTX_EXTRN_GDTR) 2316 { 2317 Assert(pInput->Names[iReg] == HvX64RegisterGdtr); 2318 pCtx->gdtr.cbGdt = paValues[iReg].Table.Limit; 2319 pCtx->gdtr.pGdt = paValues[iReg].Table.Base; 2320 iReg++; 2321 } 2322 } 2323 2324 /* Control registers. */ 2325 bool fMaybeChangedMode = false; 2326 bool fUpdateCr3 = false; 2327 if (fWhat & CPUMCTX_EXTRN_CR_MASK) 2328 { 2329 if (fWhat & CPUMCTX_EXTRN_CR0) 2330 { 2331 Assert(pInput->Names[iReg] == HvX64RegisterCr0); 2332 if (pCtx->cr0 != paValues[iReg].Reg64) 2333 { 2334 CPUMSetGuestCR0(pGVCpu, paValues[iReg].Reg64); 2335 fMaybeChangedMode = true; 2336 } 2337 iReg++; 2338 } 2339 if (fWhat & CPUMCTX_EXTRN_CR2) 2340 { 2341 Assert(pInput->Names[iReg] == HvX64RegisterCr2); 2342 pCtx->cr2 = paValues[iReg].Reg64; 2343 iReg++; 2344 } 2345 if (fWhat & CPUMCTX_EXTRN_CR3) 2346 { 2347 Assert(pInput->Names[iReg] == HvX64RegisterCr3); 2348 if (pCtx->cr3 != paValues[iReg].Reg64) 2349 { 2350 CPUMSetGuestCR3(pGVCpu, paValues[iReg].Reg64); 2351 fUpdateCr3 = true; 2352 } 2353 iReg++; 2354 } 2355 if (fWhat & CPUMCTX_EXTRN_CR4) 2356 { 2357 Assert(pInput->Names[iReg] == HvX64RegisterCr4); 2358 if (pCtx->cr4 != paValues[iReg].Reg64) 2359 { 2360 CPUMSetGuestCR4(pGVCpu, paValues[iReg].Reg64); 2361 fMaybeChangedMode = true; 2362 } 2363 iReg++; 2364 } 2365 } 2366 if (fWhat & CPUMCTX_EXTRN_APIC_TPR) 2367 { 2368 Assert(pInput->Names[iReg] == HvX64RegisterCr8); 2369 APICSetTpr(pGVCpu, (uint8_t)paValues[iReg].Reg64 << 4); 2370 iReg++; 2371 } 2372 2373 /* Debug registers. */ 2374 if (fWhat & CPUMCTX_EXTRN_DR7) 2375 { 2376 Assert(pInput->Names[iReg] == HvX64RegisterDr7); 2377 if (pCtx->dr[7] != paValues[iReg].Reg64) 2378 CPUMSetGuestDR7(pGVCpu, paValues[iReg].Reg64); 2379 pCtx->fExtrn &= ~CPUMCTX_EXTRN_DR7; /* Hack alert! Avoids asserting when processing CPUMCTX_EXTRN_DR0_DR3. */ 2380 iReg++; 2381 } 2382 if (fWhat & CPUMCTX_EXTRN_DR0_DR3) 2383 { 2384 Assert(pInput->Names[iReg] == HvX64RegisterDr0); 2385 Assert(pInput->Names[iReg+3] == HvX64RegisterDr3); 2386 if (pCtx->dr[0] != paValues[iReg].Reg64) 2387 CPUMSetGuestDR0(pGVCpu, paValues[iReg].Reg64); 2388 iReg++; 2389 if (pCtx->dr[1] != paValues[iReg].Reg64) 2390 CPUMSetGuestDR1(pGVCpu, paValues[iReg].Reg64); 2391 iReg++; 2392 if (pCtx->dr[2] != paValues[iReg].Reg64) 2393 CPUMSetGuestDR2(pGVCpu, paValues[iReg].Reg64); 2394 iReg++; 2395 if (pCtx->dr[3] != paValues[iReg].Reg64) 2396 CPUMSetGuestDR3(pGVCpu, paValues[iReg].Reg64); 2397 iReg++; 2398 } 2399 if (fWhat & CPUMCTX_EXTRN_DR6) 2400 { 2401 Assert(pInput->Names[iReg] == HvX64RegisterDr6); 2402 if (pCtx->dr[6] != paValues[iReg].Reg64) 2403 CPUMSetGuestDR6(pGVCpu, paValues[iReg].Reg64); 2404 iReg++; 2405 } 2406 2407 /* Floating point state. */ 2408 if (fWhat & CPUMCTX_EXTRN_X87) 2409 { 2410 Assert(pInput->Names[iReg] == HvX64RegisterFpMmx0); 2411 Assert(pInput->Names[iReg + 7] == HvX64RegisterFpMmx7); 2412 pCtx->XState.x87.aRegs[0].au64[0] = paValues[iReg].Fp.AsUINT128.Low64; 2413 pCtx->XState.x87.aRegs[0].au64[1] = paValues[iReg].Fp.AsUINT128.High64; 2414 iReg++; 2415 pCtx->XState.x87.aRegs[1].au64[0] = paValues[iReg].Fp.AsUINT128.Low64; 2416 pCtx->XState.x87.aRegs[1].au64[1] = paValues[iReg].Fp.AsUINT128.High64; 2417 iReg++; 2418 pCtx->XState.x87.aRegs[2].au64[0] = paValues[iReg].Fp.AsUINT128.Low64; 2419 pCtx->XState.x87.aRegs[2].au64[1] = paValues[iReg].Fp.AsUINT128.High64; 2420 iReg++; 2421 pCtx->XState.x87.aRegs[3].au64[0] = paValues[iReg].Fp.AsUINT128.Low64; 2422 pCtx->XState.x87.aRegs[3].au64[1] = paValues[iReg].Fp.AsUINT128.High64; 2423 iReg++; 2424 pCtx->XState.x87.aRegs[4].au64[0] = paValues[iReg].Fp.AsUINT128.Low64; 2425 pCtx->XState.x87.aRegs[4].au64[1] = paValues[iReg].Fp.AsUINT128.High64; 2426 iReg++; 2427 pCtx->XState.x87.aRegs[5].au64[0] = paValues[iReg].Fp.AsUINT128.Low64; 2428 pCtx->XState.x87.aRegs[5].au64[1] = paValues[iReg].Fp.AsUINT128.High64; 2429 iReg++; 2430 pCtx->XState.x87.aRegs[6].au64[0] = paValues[iReg].Fp.AsUINT128.Low64; 2431 pCtx->XState.x87.aRegs[6].au64[1] = paValues[iReg].Fp.AsUINT128.High64; 2432 iReg++; 2433 pCtx->XState.x87.aRegs[7].au64[0] = paValues[iReg].Fp.AsUINT128.Low64; 2434 pCtx->XState.x87.aRegs[7].au64[1] = paValues[iReg].Fp.AsUINT128.High64; 2435 iReg++; 2436 2437 Assert(pInput->Names[iReg] == HvX64RegisterFpControlStatus); 2438 pCtx->XState.x87.FCW = paValues[iReg].FpControlStatus.FpControl; 2439 pCtx->XState.x87.FSW = paValues[iReg].FpControlStatus.FpStatus; 2440 pCtx->XState.x87.FTW = paValues[iReg].FpControlStatus.FpTag 2441 /*| (paValues[iReg].FpControlStatus.Reserved << 8)*/; 2442 pCtx->XState.x87.FOP = paValues[iReg].FpControlStatus.LastFpOp; 2443 pCtx->XState.x87.FPUIP = (uint32_t)paValues[iReg].FpControlStatus.LastFpRip; 2444 pCtx->XState.x87.CS = (uint16_t)(paValues[iReg].FpControlStatus.LastFpRip >> 32); 2445 pCtx->XState.x87.Rsrvd1 = (uint16_t)(paValues[iReg].FpControlStatus.LastFpRip >> 48); 2446 iReg++; 2447 } 2448 2449 if (fWhat & (CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX)) 2450 { 2451 Assert(pInput->Names[iReg] == HvX64RegisterXmmControlStatus); 2452 if (fWhat & CPUMCTX_EXTRN_X87) 2453 { 2454 pCtx->XState.x87.FPUDP = (uint32_t)paValues[iReg].XmmControlStatus.LastFpRdp; 2455 pCtx->XState.x87.DS = (uint16_t)(paValues[iReg].XmmControlStatus.LastFpRdp >> 32); 2456 pCtx->XState.x87.Rsrvd2 = (uint16_t)(paValues[iReg].XmmControlStatus.LastFpRdp >> 48); 2457 } 2458 pCtx->XState.x87.MXCSR = paValues[iReg].XmmControlStatus.XmmStatusControl; 2459 pCtx->XState.x87.MXCSR_MASK = paValues[iReg].XmmControlStatus.XmmStatusControlMask; /** @todo ??? (Isn't this an output field?) */ 2460 iReg++; 2461 } 2462 2463 /* Vector state. */ 2464 if (fWhat & CPUMCTX_EXTRN_SSE_AVX) 2465 { 2466 Assert(pInput->Names[iReg] == HvX64RegisterXmm0); 2467 Assert(pInput->Names[iReg+15] == HvX64RegisterXmm15); 2468 pCtx->XState.x87.aXMM[0].uXmm.s.Lo = paValues[iReg].Reg128.Low64; 2469 pCtx->XState.x87.aXMM[0].uXmm.s.Hi = paValues[iReg].Reg128.High64; 2470 iReg++; 2471 pCtx->XState.x87.aXMM[1].uXmm.s.Lo = paValues[iReg].Reg128.Low64; 2472 pCtx->XState.x87.aXMM[1].uXmm.s.Hi = paValues[iReg].Reg128.High64; 2473 iReg++; 2474 pCtx->XState.x87.aXMM[2].uXmm.s.Lo = paValues[iReg].Reg128.Low64; 2475 pCtx->XState.x87.aXMM[2].uXmm.s.Hi = paValues[iReg].Reg128.High64; 2476 iReg++; 2477 pCtx->XState.x87.aXMM[3].uXmm.s.Lo = paValues[iReg].Reg128.Low64; 2478 pCtx->XState.x87.aXMM[3].uXmm.s.Hi = paValues[iReg].Reg128.High64; 2479 iReg++; 2480 pCtx->XState.x87.aXMM[4].uXmm.s.Lo = paValues[iReg].Reg128.Low64; 2481 pCtx->XState.x87.aXMM[4].uXmm.s.Hi = paValues[iReg].Reg128.High64; 2482 iReg++; 2483 pCtx->XState.x87.aXMM[5].uXmm.s.Lo = paValues[iReg].Reg128.Low64; 2484 pCtx->XState.x87.aXMM[5].uXmm.s.Hi = paValues[iReg].Reg128.High64; 2485 iReg++; 2486 pCtx->XState.x87.aXMM[6].uXmm.s.Lo = paValues[iReg].Reg128.Low64; 2487 pCtx->XState.x87.aXMM[6].uXmm.s.Hi = paValues[iReg].Reg128.High64; 2488 iReg++; 2489 pCtx->XState.x87.aXMM[7].uXmm.s.Lo = paValues[iReg].Reg128.Low64; 2490 pCtx->XState.x87.aXMM[7].uXmm.s.Hi = paValues[iReg].Reg128.High64; 2491 iReg++; 2492 pCtx->XState.x87.aXMM[8].uXmm.s.Lo = paValues[iReg].Reg128.Low64; 2493 pCtx->XState.x87.aXMM[8].uXmm.s.Hi = paValues[iReg].Reg128.High64; 2494 iReg++; 2495 pCtx->XState.x87.aXMM[9].uXmm.s.Lo = paValues[iReg].Reg128.Low64; 2496 pCtx->XState.x87.aXMM[9].uXmm.s.Hi = paValues[iReg].Reg128.High64; 2497 iReg++; 2498 pCtx->XState.x87.aXMM[10].uXmm.s.Lo = paValues[iReg].Reg128.Low64; 2499 pCtx->XState.x87.aXMM[10].uXmm.s.Hi = paValues[iReg].Reg128.High64; 2500 iReg++; 2501 pCtx->XState.x87.aXMM[11].uXmm.s.Lo = paValues[iReg].Reg128.Low64; 2502 pCtx->XState.x87.aXMM[11].uXmm.s.Hi = paValues[iReg].Reg128.High64; 2503 iReg++; 2504 pCtx->XState.x87.aXMM[12].uXmm.s.Lo = paValues[iReg].Reg128.Low64; 2505 pCtx->XState.x87.aXMM[12].uXmm.s.Hi = paValues[iReg].Reg128.High64; 2506 iReg++; 2507 pCtx->XState.x87.aXMM[13].uXmm.s.Lo = paValues[iReg].Reg128.Low64; 2508 pCtx->XState.x87.aXMM[13].uXmm.s.Hi = paValues[iReg].Reg128.High64; 2509 iReg++; 2510 pCtx->XState.x87.aXMM[14].uXmm.s.Lo = paValues[iReg].Reg128.Low64; 2511 pCtx->XState.x87.aXMM[14].uXmm.s.Hi = paValues[iReg].Reg128.High64; 2512 iReg++; 2513 pCtx->XState.x87.aXMM[15].uXmm.s.Lo = paValues[iReg].Reg128.Low64; 2514 pCtx->XState.x87.aXMM[15].uXmm.s.Hi = paValues[iReg].Reg128.High64; 2515 iReg++; 2516 } 2517 2518 2519 /* MSRs */ 2520 // HvX64RegisterTsc - don't touch 2521 if (fWhat & CPUMCTX_EXTRN_EFER) 2522 { 2523 Assert(pInput->Names[iReg] == HvX64RegisterEfer); 2524 if (paValues[iReg].Reg64 != pCtx->msrEFER) 2525 { 2526 Log7(("NEM/%u: MSR EFER changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtx->msrEFER, paValues[iReg].Reg64)); 2527 if ((paValues[iReg].Reg64 ^ pCtx->msrEFER) & MSR_K6_EFER_NXE) 2528 PGMNotifyNxeChanged(pGVCpu, RT_BOOL(paValues[iReg].Reg64 & MSR_K6_EFER_NXE)); 2529 pCtx->msrEFER = paValues[iReg].Reg64; 2530 fMaybeChangedMode = true; 2531 } 2532 iReg++; 2533 } 2534 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE) 2535 { 2536 Assert(pInput->Names[iReg] == HvX64RegisterKernelGsBase); 2537 if (pCtx->msrKERNELGSBASE != paValues[iReg].Reg64) 2538 Log7(("NEM/%u: MSR KERNELGSBASE changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtx->msrKERNELGSBASE, paValues[iReg].Reg64)); 2539 pCtx->msrKERNELGSBASE = paValues[iReg].Reg64; 2540 iReg++; 2541 } 2542 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS) 2543 { 2544 Assert(pInput->Names[iReg] == HvX64RegisterSysenterCs); 2545 if (pCtx->SysEnter.cs != paValues[iReg].Reg64) 2546 Log7(("NEM/%u: MSR SYSENTER.CS changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtx->SysEnter.cs, paValues[iReg].Reg64)); 2547 pCtx->SysEnter.cs = paValues[iReg].Reg64; 2548 iReg++; 2549 2550 Assert(pInput->Names[iReg] == HvX64RegisterSysenterEip); 2551 if (pCtx->SysEnter.eip != paValues[iReg].Reg64) 2552 Log7(("NEM/%u: MSR SYSENTER.EIP changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtx->SysEnter.eip, paValues[iReg].Reg64)); 2553 pCtx->SysEnter.eip = paValues[iReg].Reg64; 2554 iReg++; 2555 2556 Assert(pInput->Names[iReg] == HvX64RegisterSysenterEsp); 2557 if (pCtx->SysEnter.esp != paValues[iReg].Reg64) 2558 Log7(("NEM/%u: MSR SYSENTER.ESP changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtx->SysEnter.esp, paValues[iReg].Reg64)); 2559 pCtx->SysEnter.esp = paValues[iReg].Reg64; 2560 iReg++; 2561 } 2562 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS) 2563 { 2564 Assert(pInput->Names[iReg] == HvX64RegisterStar); 2565 if (pCtx->msrSTAR != paValues[iReg].Reg64) 2566 Log7(("NEM/%u: MSR STAR changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtx->msrSTAR, paValues[iReg].Reg64)); 2567 pCtx->msrSTAR = paValues[iReg].Reg64; 2568 iReg++; 2569 2570 Assert(pInput->Names[iReg] == HvX64RegisterLstar); 2571 if (pCtx->msrLSTAR != paValues[iReg].Reg64) 2572 Log7(("NEM/%u: MSR LSTAR changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtx->msrLSTAR, paValues[iReg].Reg64)); 2573 pCtx->msrLSTAR = paValues[iReg].Reg64; 2574 iReg++; 2575 2576 Assert(pInput->Names[iReg] == HvX64RegisterCstar); 2577 if (pCtx->msrCSTAR != paValues[iReg].Reg64) 2578 Log7(("NEM/%u: MSR CSTAR changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtx->msrCSTAR, paValues[iReg].Reg64)); 2579 pCtx->msrCSTAR = paValues[iReg].Reg64; 2580 iReg++; 2581 2582 Assert(pInput->Names[iReg] == HvX64RegisterSfmask); 2583 if (pCtx->msrSFMASK != paValues[iReg].Reg64) 2584 Log7(("NEM/%u: MSR SFMASK changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtx->msrSFMASK, paValues[iReg].Reg64)); 2585 pCtx->msrSFMASK = paValues[iReg].Reg64; 2586 iReg++; 2587 } 2588 if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS) 2589 { 2590 Assert(pInput->Names[iReg] == HvX64RegisterApicBase); 2591 const uint64_t uOldBase = APICGetBaseMsrNoCheck(pGVCpu); 2592 if (paValues[iReg].Reg64 != uOldBase) 2593 { 2594 Log7(("NEM/%u: MSR APICBase changed %RX64 -> %RX64 (%RX64)\n", 2595 pGVCpu->idCpu, uOldBase, paValues[iReg].Reg64, paValues[iReg].Reg64 ^ uOldBase)); 2596 int rc2 = APICSetBaseMsr(pGVCpu, paValues[iReg].Reg64); 2597 AssertLogRelMsg(rc2 == VINF_SUCCESS, ("rc2=%Rrc [%#RX64]\n", rc2, paValues[iReg].Reg64)); 2598 } 2599 iReg++; 2600 2601 Assert(pInput->Names[iReg] == HvX64RegisterPat); 2602 if (pCtx->msrPAT != paValues[iReg].Reg64) 2603 Log7(("NEM/%u: MSR PAT changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtx->msrPAT, paValues[iReg].Reg64)); 2604 pCtx->msrPAT = paValues[iReg].Reg64; 2605 iReg++; 2606 2607 # if 0 /*def LOG_ENABLED*/ /** @todo something's wrong with HvX64RegisterMtrrCap? (AMD) */ 2608 Assert(pInput->Names[iReg] == HvX64RegisterMtrrCap); 2609 if (paValues[iReg].Reg64 != CPUMGetGuestIa32MtrrCap(pGVCpu)) 2610 Log7(("NEM/%u: MSR MTRR_CAP changed %RX64 -> %RX64 (!!)\n", pGVCpu->idCpu, CPUMGetGuestIa32MtrrCap(pGVCpu), paValues[iReg].Reg64)); 2611 iReg++; 2612 # endif 2613 2614 PCPUMCTXMSRS pCtxMsrs = CPUMQueryGuestCtxMsrsPtr(pGVCpu); 2615 Assert(pInput->Names[iReg] == HvX64RegisterMtrrDefType); 2616 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrDefType ) 2617 Log7(("NEM/%u: MSR MTRR_DEF_TYPE changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrDefType, paValues[iReg].Reg64)); 2618 pCtxMsrs->msr.MtrrDefType = paValues[iReg].Reg64; 2619 iReg++; 2620 2621 /** @todo we dont keep state for HvX64RegisterMtrrPhysBaseX and HvX64RegisterMtrrPhysMaskX */ 2622 2623 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix64k00000); 2624 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix64K_00000 ) 2625 Log7(("NEM/%u: MSR MTRR_FIX16K_00000 changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrFix64K_00000, paValues[iReg].Reg64)); 2626 pCtxMsrs->msr.MtrrFix64K_00000 = paValues[iReg].Reg64; 2627 iReg++; 2628 2629 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix16k80000); 2630 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix16K_80000 ) 2631 Log7(("NEM/%u: MSR MTRR_FIX16K_80000 changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrFix16K_80000, paValues[iReg].Reg64)); 2632 pCtxMsrs->msr.MtrrFix16K_80000 = paValues[iReg].Reg64; 2633 iReg++; 2634 2635 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix16kA0000); 2636 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix16K_A0000 ) 2637 Log7(("NEM/%u: MSR MTRR_FIX16K_A0000 changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrFix16K_A0000, paValues[iReg].Reg64)); 2638 pCtxMsrs->msr.MtrrFix16K_A0000 = paValues[iReg].Reg64; 2639 iReg++; 2640 2641 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kC0000); 2642 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_C0000 ) 2643 Log7(("NEM/%u: MSR MTRR_FIX16K_C0000 changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_C0000, paValues[iReg].Reg64)); 2644 pCtxMsrs->msr.MtrrFix4K_C0000 = paValues[iReg].Reg64; 2645 iReg++; 2646 2647 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kC8000); 2648 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_C8000 ) 2649 Log7(("NEM/%u: MSR MTRR_FIX16K_C8000 changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_C8000, paValues[iReg].Reg64)); 2650 pCtxMsrs->msr.MtrrFix4K_C8000 = paValues[iReg].Reg64; 2651 iReg++; 2652 2653 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kD0000); 2654 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_D0000 ) 2655 Log7(("NEM/%u: MSR MTRR_FIX16K_D0000 changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_D0000, paValues[iReg].Reg64)); 2656 pCtxMsrs->msr.MtrrFix4K_D0000 = paValues[iReg].Reg64; 2657 iReg++; 2658 2659 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kD8000); 2660 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_D8000 ) 2661 Log7(("NEM/%u: MSR MTRR_FIX16K_D8000 changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_D8000, paValues[iReg].Reg64)); 2662 pCtxMsrs->msr.MtrrFix4K_D8000 = paValues[iReg].Reg64; 2663 iReg++; 2664 2665 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kE0000); 2666 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_E0000 ) 2667 Log7(("NEM/%u: MSR MTRR_FIX16K_E0000 changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_E0000, paValues[iReg].Reg64)); 2668 pCtxMsrs->msr.MtrrFix4K_E0000 = paValues[iReg].Reg64; 2669 iReg++; 2670 2671 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kE8000); 2672 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_E8000 ) 2673 Log7(("NEM/%u: MSR MTRR_FIX16K_E8000 changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_E8000, paValues[iReg].Reg64)); 2674 pCtxMsrs->msr.MtrrFix4K_E8000 = paValues[iReg].Reg64; 2675 iReg++; 2676 2677 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kF0000); 2678 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_F0000 ) 2679 Log7(("NEM/%u: MSR MTRR_FIX16K_F0000 changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_F0000, paValues[iReg].Reg64)); 2680 pCtxMsrs->msr.MtrrFix4K_F0000 = paValues[iReg].Reg64; 2681 iReg++; 2682 2683 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kF8000); 2684 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_F8000 ) 2685 Log7(("NEM/%u: MSR MTRR_FIX16K_F8000 changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_F8000, paValues[iReg].Reg64)); 2686 pCtxMsrs->msr.MtrrFix4K_F8000 = paValues[iReg].Reg64; 2687 iReg++; 2688 2689 Assert(pInput->Names[iReg] == HvX64RegisterTscAux); 2690 if (paValues[iReg].Reg64 != pCtxMsrs->msr.TscAux ) 2691 Log7(("NEM/%u: MSR TSC_AUX changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.TscAux, paValues[iReg].Reg64)); 2692 pCtxMsrs->msr.TscAux = paValues[iReg].Reg64; 2693 iReg++; 2694 2695 # if 0 /** @todo why can't we even read HvX64RegisterIa32MiscEnable? */ 2696 if (enmCpuVendor != CPUMCPUVENDOR_AMD) 2697 { 2698 Assert(pInput->Names[iReg] == HvX64RegisterIa32MiscEnable); 2699 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MiscEnable) 2700 Log7(("NEM/%u: MSR MISC_ENABLE changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MiscEnable, paValues[iReg].Reg64)); 2701 pCtxMsrs->msr.MiscEnable = paValues[iReg].Reg64; 2702 iReg++; 2703 } 2704 # endif 2705 # ifdef LOG_ENABLED 2706 if (enmCpuVendor != CPUMCPUVENDOR_AMD && enmCpuVendor != CPUMCPUVENDOR_HYGON) 2707 { 2708 Assert(pInput->Names[iReg] == HvX64RegisterIa32FeatureControl); 2709 uint64_t const uFeatCtrl = CPUMGetGuestIa32FeatCtrl(pVCpu); 2710 if (paValues[iReg].Reg64 != uFeatCtrl) 2711 Log7(("NEM/%u: MSR FEATURE_CONTROL changed %RX64 -> %RX64 (!!)\n", pGVCpu->idCpu, uFeatCtrl, paValues[iReg].Reg64)); 2712 iReg++; 2713 } 2714 # endif 2715 } 2716 2717 /* Interruptibility. */ 2718 if (fWhat & (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI)) 2719 { 2720 Assert(pInput->Names[iReg] == HvRegisterInterruptState); 2721 Assert(pInput->Names[iReg + 1] == HvX64RegisterRip); 2722 2723 if (!(pCtx->fExtrn & CPUMCTX_EXTRN_INHIBIT_INT)) 2724 { 2725 pGVCpu->nem.s.fLastInterruptShadow = paValues[iReg].InterruptState.InterruptShadow; 2726 if (paValues[iReg].InterruptState.InterruptShadow) 2727 EMSetInhibitInterruptsPC(pGVCpu, paValues[iReg + 1].Reg64); 2728 else 2729 VMCPU_FF_CLEAR(pGVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); 2730 } 2731 2732 if (!(pCtx->fExtrn & CPUMCTX_EXTRN_INHIBIT_NMI)) 2733 { 2734 if (paValues[iReg].InterruptState.NmiMasked) 2735 VMCPU_FF_SET(pGVCpu, VMCPU_FF_BLOCK_NMIS); 2736 else 2737 VMCPU_FF_CLEAR(pGVCpu, VMCPU_FF_BLOCK_NMIS); 2738 } 2739 2740 fWhat |= CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI; 2741 iReg += 2; 2742 } 2743 2744 /* Event injection. */ 2745 /// @todo HvRegisterPendingInterruption 2746 Assert(pInput->Names[iReg] == HvRegisterPendingInterruption); 2747 if (paValues[iReg].PendingInterruption.InterruptionPending) 2748 { 2749 Log7(("PendingInterruption: type=%u vector=%#x errcd=%RTbool/%#x instr-len=%u nested=%u\n", 2750 paValues[iReg].PendingInterruption.InterruptionType, paValues[iReg].PendingInterruption.InterruptionVector, 2751 paValues[iReg].PendingInterruption.DeliverErrorCode, paValues[iReg].PendingInterruption.ErrorCode, 2752 paValues[iReg].PendingInterruption.InstructionLength, paValues[iReg].PendingInterruption.NestedEvent)); 2753 AssertMsg((paValues[iReg].PendingInterruption.AsUINT64 & UINT64_C(0xfc00)) == 0, 2754 ("%#RX64\n", paValues[iReg].PendingInterruption.AsUINT64)); 2755 } 2756 2757 /// @todo HvRegisterPendingEvent0 2758 /// @todo HvRegisterPendingEvent1 2759 2760 /* Almost done, just update extrn flags and maybe change PGM mode. */ 2761 pCtx->fExtrn &= ~fWhat; 2762 if (!(pCtx->fExtrn & (CPUMCTX_EXTRN_ALL | (CPUMCTX_EXTRN_NEM_WIN_MASK & ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT)))) 2763 pCtx->fExtrn = 0; 2764 2765 /* Typical. */ 2766 if (!fMaybeChangedMode && !fUpdateCr3) 2767 return VINF_SUCCESS; 2768 2769 /* 2770 * Slow. 2771 */ 2772 int rc = VINF_SUCCESS; 2773 if (fMaybeChangedMode) 2774 { 2775 rc = PGMChangeMode(pGVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER, false /* fForce */); 2776 AssertMsgReturn(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc), RT_FAILURE_NP(rc) ? rc : VERR_NEM_IPE_1); 2777 } 2778 2779 if (fUpdateCr3) 2780 { 2781 if (fCanUpdateCr3) 2782 { 2783 LogFlow(("nemR0WinImportState: -> PGMUpdateCR3!\n")); 2784 rc = PGMUpdateCR3(pGVCpu, pCtx->cr3); 2785 if (rc == VINF_SUCCESS) 2786 { /* likely */ } 2787 else 2788 AssertMsgFailedReturn(("rc=%Rrc\n", rc), RT_FAILURE_NP(rc) ? rc : VERR_NEM_IPE_2); 2789 } 2790 else 2791 { 2792 LogFlow(("nemR0WinImportState: -> VERR_NEM_FLUSH_TLB!\n")); 2793 rc = VERR_NEM_FLUSH_TLB; /* Calling PGMFlushTLB w/o long jump setup doesn't work, ring-3 does it. */ 2794 } 2795 } 2796 2797 return rc; 2798 } 2799 #endif /* NEM_WIN_WITH_RING0_RUNLOOP || NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS */ 189 RT_NOREF(pGVM, idCpu); 190 return VERR_NOT_IMPLEMENTED; 191 } 2800 192 2801 193 … … 2812 204 VMMR0_INT_DECL(int) NEMR0ImportState(PGVM pGVM, VMCPUID idCpu, uint64_t fWhat) 2813 205 { 2814 #if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)2815 /*2816 * Validate the call.2817 */2818 int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu);2819 if (RT_SUCCESS(rc))2820 {2821 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];2822 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API_1);2823 2824 /*2825 * Call worker.2826 */2827 rc = nemR0WinImportState(pGVM, pGVCpu, &pGVCpu->cpum.GstCtx, fWhat, false /*fCanUpdateCr3*/);2828 }2829 return rc;2830 #else2831 206 RT_NOREF(pGVM, idCpu, fWhat); 2832 207 return VERR_NOT_IMPLEMENTED; 2833 #endif 2834 } 2835 2836 2837 #if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS) 2838 /** 2839 * Worker for NEMR0QueryCpuTick and the ring-0 NEMHCQueryCpuTick. 2840 * 2841 * @returns VBox status code. 2842 * @param pGVM The ring-0 VM handle. 2843 * @param pGVCpu The ring-0 VCPU handle. 2844 * @param pcTicks Where to return the current CPU tick count. 2845 * @param pcAux Where to return the hyper-V TSC_AUX value. Optional. 2846 */ 2847 NEM_TMPL_STATIC int nemR0WinQueryCpuTick(PGVM pGVM, PGVMCPU pGVCpu, uint64_t *pcTicks, uint32_t *pcAux) 2848 { 2849 /* 2850 * Hypercall parameters. 2851 */ 2852 HV_INPUT_GET_VP_REGISTERS *pInput = (HV_INPUT_GET_VP_REGISTERS *)pGVCpu->nemr0.s.HypercallData.pbPage; 2853 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3); 2854 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API_1); 2855 2856 pInput->PartitionId = pGVM->nemr0.s.idHvPartition; 2857 pInput->VpIndex = pGVCpu->idCpu; 2858 pInput->fFlags = 0; 2859 pInput->Names[0] = HvX64RegisterTsc; 2860 pInput->Names[1] = HvX64RegisterTscAux; 2861 2862 size_t const cbInput = RT_ALIGN_Z(RT_UOFFSETOF(HV_INPUT_GET_VP_REGISTERS, Names[2]), 32); 2863 HV_REGISTER_VALUE *paValues = (HV_REGISTER_VALUE *)((uint8_t *)pInput + cbInput); 2864 RT_BZERO(paValues, sizeof(paValues[0]) * 2); 2865 2866 /* 2867 * Make the hypercall. 2868 */ 2869 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallGetVpRegisters, 2), 2870 pGVCpu->nemr0.s.HypercallData.HCPhysPage, 2871 pGVCpu->nemr0.s.HypercallData.HCPhysPage + cbInput); 2872 AssertLogRelMsgReturn(uResult == HV_MAKE_CALL_REP_RET(2), ("uResult=%RX64 cRegs=%#x\n", uResult, 2), 2873 VERR_NEM_GET_REGISTERS_FAILED); 2874 2875 /* 2876 * Get results. 2877 */ 2878 *pcTicks = paValues[0].Reg64; 2879 if (pcAux) 2880 *pcAux = paValues[0].Reg32; 2881 return VINF_SUCCESS; 2882 } 2883 #endif /* NEM_WIN_WITH_RING0_RUNLOOP || NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS */ 208 } 2884 209 2885 210 … … 2894 219 VMMR0_INT_DECL(int) NEMR0QueryCpuTick(PGVM pGVM, VMCPUID idCpu) 2895 220 { 2896 #if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS) 2897 /* 2898 * Validate the call. 2899 */ 2900 int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu); 2901 if (RT_SUCCESS(rc)) 2902 { 2903 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu]; 2904 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API_1); 2905 2906 /* 2907 * Call worker. 2908 */ 2909 pGVCpu->nem.s.Hypercall.QueryCpuTick.cTicks = 0; 2910 pGVCpu->nem.s.Hypercall.QueryCpuTick.uAux = 0; 2911 rc = nemR0WinQueryCpuTick(pGVM, pGVCpu, &pGVCpu->nem.s.Hypercall.QueryCpuTick.cTicks, 2912 &pGVCpu->nem.s.Hypercall.QueryCpuTick.uAux); 2913 } 2914 return rc; 2915 #else 2916 RT_NOREF(pGVM, idCpu); 2917 return VERR_NOT_IMPLEMENTED; 2918 #endif 2919 } 2920 2921 2922 #if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS) 2923 /** 2924 * Worker for NEMR0ResumeCpuTickOnAll and the ring-0 NEMHCResumeCpuTickOnAll. 2925 * 2926 * @returns VBox status code. 2927 * @param pGVM The ring-0 VM handle. 2928 * @param pGVCpu The ring-0 VCPU handle. 2929 * @param uPausedTscValue The TSC value at the time of pausing. 2930 */ 2931 NEM_TMPL_STATIC int nemR0WinResumeCpuTickOnAll(PGVM pGVM, PGVMCPU pGVCpu, uint64_t uPausedTscValue) 2932 { 2933 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API_1); 2934 2935 /* 2936 * Set up the hypercall parameters. 2937 */ 2938 HV_INPUT_SET_VP_REGISTERS *pInput = (HV_INPUT_SET_VP_REGISTERS *)pGVCpu->nemr0.s.HypercallData.pbPage; 2939 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3); 2940 2941 pInput->PartitionId = pGVM->nemr0.s.idHvPartition; 2942 pInput->VpIndex = 0; 2943 pInput->RsvdZ = 0; 2944 pInput->Elements[0].Name = HvX64RegisterTsc; 2945 pInput->Elements[0].Pad0 = 0; 2946 pInput->Elements[0].Pad1 = 0; 2947 pInput->Elements[0].Value.Reg128.High64 = 0; 2948 pInput->Elements[0].Value.Reg64 = uPausedTscValue; 2949 2950 /* 2951 * Disable interrupts and do the first virtual CPU. 2952 */ 2953 RTCCINTREG const fSavedFlags = ASMIntDisableFlags(); 2954 uint64_t const uFirstTsc = ASMReadTSC(); 2955 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallSetVpRegisters, 1), 2956 pGVCpu->nemr0.s.HypercallData.HCPhysPage, 0 /* no output */); 2957 AssertLogRelMsgReturnStmt(uResult == HV_MAKE_CALL_REP_RET(1), ("uResult=%RX64 uTsc=%#RX64\n", uResult, uPausedTscValue), 2958 ASMSetFlags(fSavedFlags), VERR_NEM_SET_TSC); 2959 2960 /* 2961 * Do secondary processors, adjusting for elapsed TSC and keeping finger crossed 2962 * that we don't introduce too much drift here. 2963 */ 2964 for (VMCPUID iCpu = 1; iCpu < pGVM->cCpus; iCpu++) 2965 { 2966 Assert(pInput->PartitionId == pGVM->nemr0.s.idHvPartition); 2967 Assert(pInput->RsvdZ == 0); 2968 Assert(pInput->Elements[0].Name == HvX64RegisterTsc); 2969 Assert(pInput->Elements[0].Pad0 == 0); 2970 Assert(pInput->Elements[0].Pad1 == 0); 2971 Assert(pInput->Elements[0].Value.Reg128.High64 == 0); 2972 2973 pInput->VpIndex = iCpu; 2974 const uint64_t offDelta = (ASMReadTSC() - uFirstTsc); 2975 pInput->Elements[0].Value.Reg64 = uPausedTscValue + offDelta; 2976 2977 uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallSetVpRegisters, 1), 2978 pGVCpu->nemr0.s.HypercallData.HCPhysPage, 0 /* no output */); 2979 AssertLogRelMsgReturnStmt(uResult == HV_MAKE_CALL_REP_RET(1), 2980 ("uResult=%RX64 uTsc=%#RX64 + %#RX64\n", uResult, uPausedTscValue, offDelta), 2981 ASMSetFlags(fSavedFlags), VERR_NEM_SET_TSC); 2982 } 2983 2984 /* 2985 * Done. 2986 */ 2987 ASMSetFlags(fSavedFlags); 2988 return VINF_SUCCESS; 2989 } 2990 #endif /* NEM_WIN_WITH_RING0_RUNLOOP || NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS */ 221 RT_NOREF(pGVM, idCpu); 222 return VERR_NOT_IMPLEMENTED; 223 } 2991 224 2992 225 … … 3002 235 VMMR0_INT_DECL(int) NEMR0ResumeCpuTickOnAll(PGVM pGVM, VMCPUID idCpu, uint64_t uPausedTscValue) 3003 236 { 3004 #if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)3005 /*3006 * Validate the call.3007 */3008 int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu);3009 if (RT_SUCCESS(rc))3010 {3011 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];3012 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API_1);3013 3014 /*3015 * Call worker.3016 */3017 pGVCpu->nem.s.Hypercall.QueryCpuTick.cTicks = 0;3018 pGVCpu->nem.s.Hypercall.QueryCpuTick.uAux = 0;3019 rc = nemR0WinResumeCpuTickOnAll(pGVM, pGVCpu, uPausedTscValue);3020 }3021 return rc;3022 #else3023 237 RT_NOREF(pGVM, idCpu, uPausedTscValue); 3024 238 return VERR_NOT_IMPLEMENTED; 3025 #endif3026 239 } 3027 240 … … 3029 242 VMMR0_INT_DECL(VBOXSTRICTRC) NEMR0RunGuestCode(PGVM pGVM, VMCPUID idCpu) 3030 243 { 3031 #ifdef NEM_WIN_WITH_RING0_RUNLOOP 3032 if (pGVM->nemr0.s.fMayUseRing0Runloop) 3033 return nemHCWinRunGC(pGVM, &pGVM->aCpus[idCpu]); 3034 return VERR_NEM_RING3_ONLY; 3035 #else 3036 RT_NOREF(pGVM, idCpu); 3037 return VERR_NOT_IMPLEMENTED; 3038 #endif 244 RT_NOREF(pGVM, idCpu); 245 return VERR_NOT_IMPLEMENTED; 3039 246 } 3040 247 … … 3050 257 VMMR0_INT_DECL(int) NEMR0UpdateStatistics(PGVM pGVM, VMCPUID idCpu) 3051 258 { 3052 #ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES3053 /*3054 * Validate the call.3055 */3056 int rc;3057 if (idCpu == NIL_VMCPUID)3058 rc = GVMMR0ValidateGVM(pGVM);3059 else3060 rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu);3061 if (RT_SUCCESS(rc))3062 {3063 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API_1);3064 3065 PNEMR0HYPERCALLDATA pHypercallData = idCpu != NIL_VMCPUID3066 ? &pGVM->aCpus[idCpu].nemr0.s.HypercallData3067 : &pGVM->nemr0.s.HypercallData;3068 if ( RT_VALID_PTR(pHypercallData->pbPage)3069 && pHypercallData->HCPhysPage != NIL_RTHCPHYS)3070 {3071 if (idCpu == NIL_VMCPUID)3072 rc = RTCritSectEnter(&pGVM->nemr0.s.HypercallDataCritSect);3073 if (RT_SUCCESS(rc))3074 {3075 /*3076 * Query the memory statistics for the partition.3077 */3078 HV_INPUT_GET_MEMORY_BALANCE *pInput = (HV_INPUT_GET_MEMORY_BALANCE *)pHypercallData->pbPage;3079 pInput->TargetPartitionId = pGVM->nemr0.s.idHvPartition;3080 pInput->ProximityDomainInfo.Flags.ProximityPreferred = 0;3081 pInput->ProximityDomainInfo.Flags.ProxyimityInfoValid = 0;3082 pInput->ProximityDomainInfo.Flags.Reserved = 0;3083 pInput->ProximityDomainInfo.Id = 0;3084 3085 HV_OUTPUT_GET_MEMORY_BALANCE *pOutput = (HV_OUTPUT_GET_MEMORY_BALANCE *)(pInput + 1);3086 RT_ZERO(*pOutput);3087 3088 uint64_t uResult = g_pfnHvlInvokeHypercall(HvCallGetMemoryBalance,3089 pHypercallData->HCPhysPage,3090 pHypercallData->HCPhysPage + sizeof(*pInput));3091 if (uResult == HV_STATUS_SUCCESS)3092 {3093 pGVM->nem.s.R0Stats.cPagesAvailable = pOutput->PagesAvailable;3094 pGVM->nem.s.R0Stats.cPagesInUse = pOutput->PagesInUse;3095 rc = VINF_SUCCESS;3096 }3097 else3098 {3099 LogRel(("HvCallGetMemoryBalance -> %#RX64 (%#RX64 %#RX64)!!\n",3100 uResult, pOutput->PagesAvailable, pOutput->PagesInUse));3101 rc = VERR_NEM_IPE_0;3102 }3103 3104 if (idCpu == NIL_VMCPUID)3105 RTCritSectLeave(&pGVM->nemr0.s.HypercallDataCritSect);3106 }3107 }3108 else3109 rc = VERR_WRONG_ORDER;3110 }3111 return rc;3112 #else3113 259 RT_NOREF(pGVM, idCpu); 3114 260 return VINF_SUCCESS; 3115 #endif3116 261 } 3117 262 … … 3126 271 VMMR0_INT_DECL(int) NEMR0DoExperiment(PGVM pGVM, VMCPUID idCpu, uint64_t u64Arg) 3127 272 { 3128 #if defined(DEBUG_bird) && defined(NEM_WIN_USE_HYPERCALLS_FOR_PAGES)3129 /*3130 * Resolve CPU structures.3131 */3132 int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu);3133 if (RT_SUCCESS(rc))3134 {3135 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API_1);3136 3137 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];3138 if (u64Arg == 0)3139 {3140 /*3141 * Query register.3142 */3143 HV_INPUT_GET_VP_REGISTERS *pInput = (HV_INPUT_GET_VP_REGISTERS *)pGVCpu->nemr0.s.HypercallData.pbPage;3144 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);3145 3146 size_t const cbInput = RT_ALIGN_Z(RT_UOFFSETOF(HV_INPUT_GET_VP_REGISTERS, Names[1]), 32);3147 HV_REGISTER_VALUE *paValues = (HV_REGISTER_VALUE *)((uint8_t *)pInput + cbInput);3148 RT_BZERO(paValues, sizeof(paValues[0]) * 1);3149 3150 pInput->PartitionId = pGVM->nemr0.s.idHvPartition;3151 pInput->VpIndex = pGVCpu->idCpu;3152 pInput->fFlags = 0;3153 pInput->Names[0] = (HV_REGISTER_NAME)pGVCpu->nem.s.Hypercall.Experiment.uItem;3154 3155 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallGetVpRegisters, 1),3156 pGVCpu->nemr0.s.HypercallData.HCPhysPage,3157 pGVCpu->nemr0.s.HypercallData.HCPhysPage + cbInput);3158 pGVCpu->nem.s.Hypercall.Experiment.fSuccess = uResult == HV_MAKE_CALL_REP_RET(1);3159 pGVCpu->nem.s.Hypercall.Experiment.uStatus = uResult;3160 pGVCpu->nem.s.Hypercall.Experiment.uLoValue = paValues[0].Reg128.Low64;3161 pGVCpu->nem.s.Hypercall.Experiment.uHiValue = paValues[0].Reg128.High64;3162 rc = VINF_SUCCESS;3163 }3164 else if (u64Arg == 1)3165 {3166 /*3167 * Query partition property.3168 */3169 HV_INPUT_GET_PARTITION_PROPERTY *pInput = (HV_INPUT_GET_PARTITION_PROPERTY *)pGVCpu->nemr0.s.HypercallData.pbPage;3170 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);3171 3172 size_t const cbInput = RT_ALIGN_Z(sizeof(*pInput), 32);3173 HV_OUTPUT_GET_PARTITION_PROPERTY *pOutput = (HV_OUTPUT_GET_PARTITION_PROPERTY *)((uint8_t *)pInput + cbInput);3174 pOutput->PropertyValue = 0;3175 3176 pInput->PartitionId = pGVM->nemr0.s.idHvPartition;3177 pInput->PropertyCode = (HV_PARTITION_PROPERTY_CODE)pGVCpu->nem.s.Hypercall.Experiment.uItem;3178 pInput->uPadding = 0;3179 3180 uint64_t uResult = g_pfnHvlInvokeHypercall(HvCallGetPartitionProperty,3181 pGVCpu->nemr0.s.HypercallData.HCPhysPage,3182 pGVCpu->nemr0.s.HypercallData.HCPhysPage + cbInput);3183 pGVCpu->nem.s.Hypercall.Experiment.fSuccess = uResult == HV_STATUS_SUCCESS;3184 pGVCpu->nem.s.Hypercall.Experiment.uStatus = uResult;3185 pGVCpu->nem.s.Hypercall.Experiment.uLoValue = pOutput->PropertyValue;3186 pGVCpu->nem.s.Hypercall.Experiment.uHiValue = 0;3187 rc = VINF_SUCCESS;3188 }3189 else if (u64Arg == 2)3190 {3191 /*3192 * Set register.3193 */3194 HV_INPUT_SET_VP_REGISTERS *pInput = (HV_INPUT_SET_VP_REGISTERS *)pGVCpu->nemr0.s.HypercallData.pbPage;3195 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);3196 RT_BZERO(pInput, RT_UOFFSETOF(HV_INPUT_SET_VP_REGISTERS, Elements[1]));3197 3198 pInput->PartitionId = pGVM->nemr0.s.idHvPartition;3199 pInput->VpIndex = pGVCpu->idCpu;3200 pInput->RsvdZ = 0;3201 pInput->Elements[0].Name = (HV_REGISTER_NAME)pGVCpu->nem.s.Hypercall.Experiment.uItem;3202 pInput->Elements[0].Value.Reg128.High64 = pGVCpu->nem.s.Hypercall.Experiment.uHiValue;3203 pInput->Elements[0].Value.Reg128.Low64 = pGVCpu->nem.s.Hypercall.Experiment.uLoValue;3204 3205 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallSetVpRegisters, 1),3206 pGVCpu->nemr0.s.HypercallData.HCPhysPage, 0);3207 pGVCpu->nem.s.Hypercall.Experiment.fSuccess = uResult == HV_MAKE_CALL_REP_RET(1);3208 pGVCpu->nem.s.Hypercall.Experiment.uStatus = uResult;3209 rc = VINF_SUCCESS;3210 }3211 else3212 rc = VERR_INVALID_FUNCTION;3213 }3214 return rc;3215 #else /* !DEBUG_bird */3216 273 RT_NOREF(pGVM, idCpu, u64Arg); 3217 274 return VERR_NOT_SUPPORTED; 3218 #endif /* !DEBUG_bird */ 3219 } 3220 275 } 276 -
trunk/src/VBox/VMM/VMMR3/NEMR3.cpp
r93207 r93351 120 120 pVCpu->nem.s.fTrapXcptGpForLovelyMesaDrv = f; 121 121 } 122 123 #ifdef RT_OS_WINDOWS124 # ifndef VBOX_WITH_PGM_NEM_MODE125 126 /** @cfgm{/NEM/UseRing0Runloop, bool, true}127 * Whether to use the ring-0 runloop (if enabled in the build) or the ring-3 one.128 * The latter is generally slower. This option serves as a way out in case129 * something breaks in the ring-0 loop. */130 # ifdef NEM_WIN_USE_RING0_RUNLOOP_BY_DEFAULT131 bool fUseRing0Runloop = true;132 # else133 bool fUseRing0Runloop = false;134 # endif135 rc = CFGMR3QueryBoolDef(pCfgNem, "UseRing0Runloop", &fUseRing0Runloop, fUseRing0Runloop);136 AssertLogRelRCReturn(rc, rc);137 pVM->nem.s.fUseRing0Runloop = fUseRing0Runloop;138 # endif139 #endif140 122 141 123 return VINF_SUCCESS; -
trunk/src/VBox/VMM/VMMR3/NEMR3Native-win.cpp
r93207 r93351 181 181 /** Pointer to the NtDeviceIoControlFile import table entry. */ 182 182 static decltype(NtDeviceIoControlFile) **g_ppfnVidNtDeviceIoControlFile; 183 #if defined(NEM_WIN_USE_HYPERCALLS_FOR_PAGES) || defined(LOG_ENABLED)183 #ifdef LOG_ENABLED 184 184 /** Info about the VidGetHvPartitionId I/O control interface. */ 185 185 static NEMWINIOCTL g_IoCtlGetHvPartitionId; 186 186 /** Info about the VidGetPartitionProperty I/O control interface. */ 187 187 static NEMWINIOCTL g_IoCtlGetPartitionProperty; 188 #endif189 #if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(LOG_ENABLED)190 188 /** Info about the VidStartVirtualProcessor I/O control interface. */ 191 189 static NEMWINIOCTL g_IoCtlStartVirtualProcessor; … … 194 192 /** Info about the VidMessageSlotHandleAndGetNext I/O control interface. */ 195 193 static NEMWINIOCTL g_IoCtlMessageSlotHandleAndGetNext; 196 #endif197 #ifdef LOG_ENABLED198 194 /** Info about the VidMessageSlotMap I/O control interface - for logging. */ 199 195 static NEMWINIOCTL g_IoCtlMessageSlotMap; … … 809 805 } 810 806 811 #if defined(NEM_WIN_USE_HYPERCALLS_FOR_PAGES) || defined(LOG_ENABLED)807 #ifdef LOG_ENABLED 812 808 813 809 /** … … 864 860 } 865 861 866 #endif /* defined(NEM_WIN_USE_HYPERCALLS_FOR_PAGES) || defined(LOG_ENABLED) */867 #if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(LOG_ENABLED)868 862 869 863 /** … … 967 961 } 968 962 969 #endif /* defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(LOG_ENABLED)*/963 #endif /* LOG_ENABLED */ 970 964 971 965 #ifdef LOG_ENABLED … … 1012 1006 * 1013 1007 */ 1014 #if defined(NEM_WIN_USE_HYPERCALLS_FOR_PAGES) || defined(LOG_ENABLED)1008 #ifdef LOG_ENABLED 1015 1009 decltype(NtDeviceIoControlFile) * const pfnOrg = *g_ppfnVidNtDeviceIoControlFile; 1016 1010 … … 1042 1036 g_IoCtlGetPartitionProperty.uFunction, g_IoCtlGetPartitionProperty.cbInput, g_IoCtlGetPartitionProperty.cbOutput)); 1043 1037 1044 #endif1045 int rcRet = VINF_SUCCESS;1046 #if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(LOG_ENABLED)1047 1048 1038 /* VidStartVirtualProcessor */ 1049 1039 *g_ppfnVidNtDeviceIoControlFile = nemR3WinIoctlDetector_StartVirtualProcessor; … … 1051 1041 *g_ppfnVidNtDeviceIoControlFile = pfnOrg; 1052 1042 AssertStmt(fRet && g_IoCtlStartVirtualProcessor.uFunction != 0, 1053 rcRet = RTERRINFO_LOG_REL_SET_F(pErrInfo, VERR_NEM_RING3_ONLY, 1054 "Problem figuring out VidStartVirtualProcessor: fRet=%u dwErr=%u", 1055 fRet, GetLastError()) ); 1043 RTERRINFO_LOG_REL_SET_F(pErrInfo, VERR_NEM_RING3_ONLY, 1044 "Problem figuring out VidStartVirtualProcessor: fRet=%u dwErr=%u", fRet, GetLastError()) ); 1056 1045 LogRel(("NEM: VidStartVirtualProcessor -> fun:%#x in:%#x out:%#x\n", g_IoCtlStartVirtualProcessor.uFunction, 1057 1046 g_IoCtlStartVirtualProcessor.cbInput, g_IoCtlStartVirtualProcessor.cbOutput)); … … 1062 1051 *g_ppfnVidNtDeviceIoControlFile = pfnOrg; 1063 1052 AssertStmt(fRet && g_IoCtlStopVirtualProcessor.uFunction != 0, 1064 rcRet = RTERRINFO_LOG_REL_SET_F(pErrInfo, VERR_NEM_RING3_ONLY, 1065 "Problem figuring out VidStopVirtualProcessor: fRet=%u dwErr=%u", 1066 fRet, GetLastError()) ); 1053 RTERRINFO_LOG_REL_SET_F(pErrInfo, VERR_NEM_RING3_ONLY, 1054 "Problem figuring out VidStopVirtualProcessor: fRet=%u dwErr=%u", fRet, GetLastError()) ); 1067 1055 LogRel(("NEM: VidStopVirtualProcessor -> fun:%#x in:%#x out:%#x\n", g_IoCtlStopVirtualProcessor.uFunction, 1068 1056 g_IoCtlStopVirtualProcessor.cbInput, g_IoCtlStopVirtualProcessor.cbOutput)); … … 1075 1063 *g_ppfnVidNtDeviceIoControlFile = pfnOrg; 1076 1064 AssertStmt(fRet && g_IoCtlMessageSlotHandleAndGetNext.uFunction != 0, 1077 rcRet =RTERRINFO_LOG_REL_SET_F(pErrInfo, VERR_NEM_RING3_ONLY,1078 1079 1065 RTERRINFO_LOG_REL_SET_F(pErrInfo, VERR_NEM_RING3_ONLY, 1066 "Problem figuring out VidMessageSlotHandleAndGetNext: fRet=%u dwErr=%u", 1067 fRet, GetLastError()) ); 1080 1068 LogRel(("NEM: VidMessageSlotHandleAndGetNext -> fun:%#x in:%#x out:%#x\n", 1081 1069 g_IoCtlMessageSlotHandleAndGetNext.uFunction, g_IoCtlMessageSlotHandleAndGetNext.cbInput, 1082 1070 g_IoCtlMessageSlotHandleAndGetNext.cbOutput)); 1083 1071 1084 #endif /* defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(LOG_ENABLED) */1085 #ifdef LOG_ENABLED1086 1072 /* The following are only for logging: */ 1087 1073 union … … 1124 1110 1125 1111 g_pIoCtlDetectForLogging = NULL; 1126 #endif 1127 1128 /* Done. */ 1129 #ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES 1130 pVM->nem.s.IoCtlGetHvPartitionId = g_IoCtlGetHvPartitionId; 1131 pVM->nem.s.IoCtlGetPartitionProperty = g_IoCtlGetPartitionProperty; 1132 #endif 1133 #ifdef NEM_WIN_WITH_RING0_RUNLOOP 1134 pVM->nem.s.IoCtlStartVirtualProcessor = g_IoCtlStartVirtualProcessor; 1135 pVM->nem.s.IoCtlStopVirtualProcessor = g_IoCtlStopVirtualProcessor; 1136 pVM->nem.s.IoCtlMessageSlotHandleAndGetNext = g_IoCtlMessageSlotHandleAndGetNext; 1137 #endif 1138 return rcRet; 1112 #endif /* LOG_ENABLED */ 1113 1114 return VINF_SUCCESS; 1139 1115 } 1140 1116 … … 1303 1279 #endif 1304 1280 1305 #ifndef NEM_WIN_USE_HYPERCALLS_FOR_PAGES 1306 /* *Some guess working here. */1281 /** @todo Ditch for VBOX_WITH_PGM_NEM_MODE */ 1282 /* Some guess working here. */ 1307 1283 pVM->nem.s.cMaxMappedPages = 4000; 1308 1284 if (g_uBuildNo >= 22000) 1309 1285 pVM->nem.s.cMaxMappedPages = _64K; /* seems it can do lots more even */ 1310 #endif1311 1286 1312 1287 /* … … 1362 1337 Log(("NEM: Marked active!\n")); 1363 1338 nemR3WinDisableX2Apic(pVM); 1364 #if !defined(NEM_WIN_USE_HYPERCALLS_FOR_PAGES) && defined(VBOX_WITH_PGM_NEM_MODE)1339 #ifdef VBOX_WITH_PGM_NEM_MODE 1365 1340 PGMR3EnableNemMode(pVM); 1366 1341 #endif … … 1379 1354 STAMR3Register(pVM, (void *)&pVM->nem.s.StatUnmapPageFailed, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, 1380 1355 "/NEM/PagesUnmapFails", STAMUNIT_PAGES, "Calls to WHvUnmapGpaRange/HvCallUnmapGpaPages that failed"); 1381 #ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES 1382 STAMR3Register(pVM, (void *)&pVM->nem.s.StatRemapPage, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, 1383 "/NEM/PagesRemapCalls", STAMUNIT_PAGES, "Calls to HvCallMapGpaPages for changing page protection"); 1384 STAMR3Register(pVM, (void *)&pVM->nem.s.StatRemapPage, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, 1385 "/NEM/PagesRemapFails", STAMUNIT_PAGES, "Calls to HvCallMapGpaPages for changing page protection failed"); 1386 #elif !defined(VBOX_WITH_PGM_NEM_MODE) 1356 #ifndef VBOX_WITH_PGM_NEM_MODE 1387 1357 STAMR3Register(pVM, (void *)&pVM->nem.s.StatUnmapAllPages, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, 1388 1358 "/NEM/PagesUnmapAll", STAMUNIT_PAGES, "Times we had to unmap all the pages"); … … 1393 1363 STAMR3Register(pVM, &pVM->nem.s.StatProfUnmapGpaRange, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, 1394 1364 "/NEM/PagesUnmapGpaRange", STAMUNIT_TICKS_PER_CALL, "Profiling calls to WHvUnmapGpaRange for bigger stuff"); 1395 # endif 1396 # ifndef NEM_WIN_USE_HYPERCALLS_FOR_PAGES 1365 #endif 1397 1366 STAMR3Register(pVM, &pVM->nem.s.StatProfMapGpaRangePage, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, 1398 1367 "/NEM/PagesMapGpaRangePage", STAMUNIT_TICKS_PER_CALL, "Profiling calls to WHvMapGpaRange for single pages"); 1399 1368 STAMR3Register(pVM, &pVM->nem.s.StatProfUnmapGpaRangePage, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, 1400 1369 "/NEM/PagesUnmapGpaRangePage", STAMUNIT_TICKS_PER_CALL, "Profiling calls to WHvUnmapGpaRange for single pages"); 1401 # endif1402 1370 1403 1371 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++) … … 1623 1591 1624 1592 #ifndef NEM_WIN_USE_OUR_OWN_RUN_API 1625 # ifdef NEM_WIN_WITH_RING0_RUNLOOP 1626 if (!pVM->nem.s.fUseRing0Runloop) 1627 # endif 1593 hrc = WHvCreateVirtualProcessor(hPartition, idCpu, 0 /*fFlags*/); 1594 if (FAILED(hrc)) 1628 1595 { 1629 hrc = WHvCreateVirtualProcessor(hPartition, idCpu, 0 /*fFlags*/); 1630 if (FAILED(hrc)) 1596 NTSTATUS const rcNtLast = RTNtLastStatusValue(); 1597 DWORD const dwErrLast = RTNtLastErrorValue(); 1598 while (idCpu-- > 0) 1631 1599 { 1632 NTSTATUS const rcNtLast = RTNtLastStatusValue(); 1633 DWORD const dwErrLast = RTNtLastErrorValue(); 1634 while (idCpu-- > 0) 1635 { 1636 HRESULT hrc2 = WHvDeleteVirtualProcessor(hPartition, idCpu); 1637 AssertLogRelMsg(SUCCEEDED(hrc2), ("WHvDeleteVirtualProcessor(%p, %u) -> %Rhrc (Last=%#x/%u)\n", 1638 hPartition, idCpu, hrc2, RTNtLastStatusValue(), 1639 RTNtLastErrorValue())); 1640 } 1641 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS, 1642 "Call to WHvCreateVirtualProcessor failed: %Rhrc (Last=%#x/%u)", hrc, rcNtLast, dwErrLast); 1600 HRESULT hrc2 = WHvDeleteVirtualProcessor(hPartition, idCpu); 1601 AssertLogRelMsg(SUCCEEDED(hrc2), ("WHvDeleteVirtualProcessor(%p, %u) -> %Rhrc (Last=%#x/%u)\n", 1602 hPartition, idCpu, hrc2, RTNtLastStatusValue(), 1603 RTNtLastErrorValue())); 1643 1604 } 1605 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS, 1606 "Call to WHvCreateVirtualProcessor failed: %Rhrc (Last=%#x/%u)", hrc, rcNtLast, dwErrLast); 1644 1607 } 1645 # ifdef NEM_WIN_WITH_RING0_RUNLOOP 1608 #else /* NEM_WIN_USE_OUR_OWN_RUN_API */ 1609 VID_MAPPED_MESSAGE_SLOT MappedMsgSlot = { NULL, UINT32_MAX, UINT32_MAX }; 1610 if (g_pfnVidMessageSlotMap(hPartitionDevice, &MappedMsgSlot, idCpu)) 1611 { 1612 AssertLogRelMsg(MappedMsgSlot.iCpu == idCpu && MappedMsgSlot.uParentAdvisory == UINT32_MAX, 1613 ("%#x %#x (iCpu=%#x)\n", MappedMsgSlot.iCpu, MappedMsgSlot.uParentAdvisory, idCpu)); 1614 pVCpu->nem.s.pvMsgSlotMapping = MappedMsgSlot.pMsgBlock; 1615 } 1646 1616 else 1647 # endif1648 #endif /* !NEM_WIN_USE_OUR_OWN_RUN_API */1649 #if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_OUR_OWN_RUN_API)1650 1617 { 1651 VID_MAPPED_MESSAGE_SLOT MappedMsgSlot = { NULL, UINT32_MAX, UINT32_MAX }; 1652 if (g_pfnVidMessageSlotMap(hPartitionDevice, &MappedMsgSlot, idCpu)) 1653 { 1654 AssertLogRelMsg(MappedMsgSlot.iCpu == idCpu && MappedMsgSlot.uParentAdvisory == UINT32_MAX, 1655 ("%#x %#x (iCpu=%#x)\n", MappedMsgSlot.iCpu, MappedMsgSlot.uParentAdvisory, idCpu)); 1656 pVCpu->nem.s.pvMsgSlotMapping = MappedMsgSlot.pMsgBlock; 1657 } 1658 else 1659 { 1660 NTSTATUS const rcNtLast = RTNtLastStatusValue(); 1661 DWORD const dwErrLast = RTNtLastErrorValue(); 1662 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS, 1663 "Call to VidMessageSlotMap failed: Last=%#x/%u", rcNtLast, dwErrLast); 1664 } 1618 NTSTATUS const rcNtLast = RTNtLastStatusValue(); 1619 DWORD const dwErrLast = RTNtLastErrorValue(); 1620 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS, 1621 "Call to VidMessageSlotMap failed: Last=%#x/%u", rcNtLast, dwErrLast); 1665 1622 } 1666 #endif 1623 #endif /* NEM_WIN_USE_OUR_OWN_RUN_API */ 1667 1624 } 1668 1625 pVM->nem.s.fCreatedEmts = true; … … 1836 1793 pVCpu->nem.s.pvMsgSlotMapping = NULL; 1837 1794 #ifndef NEM_WIN_USE_OUR_OWN_RUN_API 1838 # ifdef NEM_WIN_WITH_RING0_RUNLOOP 1839 if (!pVM->nem.s.fUseRing0Runloop) 1840 # endif 1841 { 1842 HRESULT hrc = WHvDeleteVirtualProcessor(hPartition, idCpu); 1843 AssertLogRelMsg(SUCCEEDED(hrc), ("WHvDeleteVirtualProcessor(%p, %u) -> %Rhrc (Last=%#x/%u)\n", 1844 hPartition, idCpu, hrc, RTNtLastStatusValue(), 1845 RTNtLastErrorValue())); 1846 } 1795 HRESULT hrc = WHvDeleteVirtualProcessor(hPartition, idCpu); 1796 AssertLogRelMsg(SUCCEEDED(hrc), ("WHvDeleteVirtualProcessor(%p, %u) -> %Rhrc (Last=%#x/%u)\n", 1797 hPartition, idCpu, hrc, RTNtLastStatusValue(), 1798 RTNtLastErrorValue())); 1847 1799 #endif 1848 1800 } … … 1897 1849 VBOXSTRICTRC nemR3NativeRunGC(PVM pVM, PVMCPU pVCpu) 1898 1850 { 1899 #ifdef NEM_WIN_WITH_RING0_RUNLOOP1900 if (pVM->nem.s.fUseRing0Runloop)1901 {1902 for (;;)1903 {1904 VBOXSTRICTRC rcStrict = VMMR3CallR0EmtFast(pVM, pVCpu, VMMR0_DO_NEM_RUN);1905 if (RT_SUCCESS(rcStrict))1906 {1907 /*1908 * We deal with VINF_NEM_FLUSH_TLB here, since we're running the risk of1909 * getting these while we already got another RC (I/O ports).1910 */1911 /* Status codes: */1912 VBOXSTRICTRC rcPending = pVCpu->nem.s.rcPending;1913 pVCpu->nem.s.rcPending = VINF_SUCCESS;1914 if (rcStrict == VINF_NEM_FLUSH_TLB || rcPending == VINF_NEM_FLUSH_TLB)1915 {1916 LogFlow(("nemR3NativeRunGC: calling PGMFlushTLB...\n"));1917 int rc = PGMFlushTLB(pVCpu, CPUMGetGuestCR3(pVCpu), true /*fGlobal*/);1918 AssertRCReturn(rc, rc);1919 if (rcStrict == VINF_NEM_FLUSH_TLB)1920 {1921 if ( !VM_FF_IS_ANY_SET(pVM, VM_FF_HIGH_PRIORITY_POST_MASK | VM_FF_HP_R0_PRE_HM_MASK)1922 && !VMCPU_FF_IS_ANY_SET(pVCpu, (VMCPU_FF_HIGH_PRIORITY_POST_MASK | VMCPU_FF_HP_R0_PRE_HM_MASK)1923 & ~VMCPU_FF_RESUME_GUEST_MASK))1924 {1925 VMCPU_FF_CLEAR_MASK(pVCpu, VMCPU_FF_RESUME_GUEST_MASK);1926 continue;1927 }1928 rcStrict = VINF_SUCCESS;1929 }1930 }1931 else1932 AssertMsg(rcPending == VINF_SUCCESS, ("rcPending=%Rrc\n", VBOXSTRICTRC_VAL(rcPending) ));1933 }1934 LogFlow(("nemR3NativeRunGC: returns %Rrc\n", VBOXSTRICTRC_VAL(rcStrict) ));1935 return rcStrict;1936 }1937 }1938 #endif1939 1851 return nemHCWinRunGC(pVM, pVCpu); 1940 1852 } … … 1981 1893 nemHCWinCancelRunVirtualProcessor(pVM, pVCpu); 1982 1894 #else 1983 # ifdef NEM_WIN_WITH_RING0_RUNLOOP 1984 if (pVM->nem.s.fUseRing0Runloop) 1985 nemHCWinCancelRunVirtualProcessor(pVM, pVCpu); 1986 else 1987 # endif 1988 { 1989 Log8(("nemR3NativeNotifyFF: canceling %u\n", pVCpu->idCpu)); 1990 HRESULT hrc = WHvCancelRunVirtualProcessor(pVM->nem.s.hPartition, pVCpu->idCpu, 0); 1991 AssertMsg(SUCCEEDED(hrc), ("WHvCancelRunVirtualProcessor -> hrc=%Rhrc\n", hrc)); 1992 RT_NOREF_PV(hrc); 1993 } 1895 Log8(("nemR3NativeNotifyFF: canceling %u\n", pVCpu->idCpu)); 1896 HRESULT hrc = WHvCancelRunVirtualProcessor(pVM->nem.s.hPartition, pVCpu->idCpu, 0); 1897 AssertMsg(SUCCEEDED(hrc), ("WHvCancelRunVirtualProcessor -> hrc=%Rhrc\n", hrc)); 1898 RT_NOREF_PV(hrc); 1994 1899 #endif 1995 1900 RT_NOREF_PV(fFlags); … … 2026 1931 RT_NOREF(puNemRange); 2027 1932 2028 #if !defined(NEM_WIN_USE_HYPERCALLS_FOR_PAGES) && defined(VBOX_WITH_PGM_NEM_MODE)1933 #ifdef VBOX_WITH_PGM_NEM_MODE 2029 1934 if (pvR3) 2030 1935 { … … 2064 1969 RT_NOREF(puNemRange); 2065 1970 2066 #if !defined(NEM_WIN_USE_HYPERCALLS_FOR_PAGES) && defined(VBOX_WITH_PGM_NEM_MODE)1971 #ifdef VBOX_WITH_PGM_NEM_MODE 2067 1972 /* 2068 1973 * Unmap the RAM we're replacing. … … 2139 2044 2140 2045 int rc = VINF_SUCCESS; 2141 #if !defined(NEM_WIN_USE_HYPERCALLS_FOR_PAGES) && defined(VBOX_WITH_PGM_NEM_MODE)2046 #ifdef VBOX_WITH_PGM_NEM_MODE 2142 2047 /* 2143 2048 * Unmap the MMIO2 pages. … … 2198 2103 void *pvBitmap, size_t cbBitmap) 2199 2104 { 2200 #if !defined(NEM_WIN_USE_HYPERCALLS_FOR_PAGES) && defined(VBOX_WITH_PGM_NEM_MODE)2105 #ifdef VBOX_WITH_PGM_NEM_MODE 2201 2106 Assert(VM_IS_NEM_ENABLED(pVM)); 2202 2107 AssertReturn(g_pfnWHvQueryGpaRangeDirtyBitmap, VERR_INTERNAL_ERROR_2); … … 2268 2173 *pu2State = UINT8_MAX; 2269 2174 2270 #if !defined(NEM_WIN_USE_HYPERCALLS_FOR_PAGES) && defined(VBOX_WITH_PGM_NEM_MODE)2175 #ifdef VBOX_WITH_PGM_NEM_MODE 2271 2176 /* 2272 2177 * (Re-)map readonly. … … 2303 2208 if (pInfo->u2NemState > NEM_WIN_PAGE_STATE_UNMAPPED) 2304 2209 { 2305 #ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES2306 int rc = nemHCWinHypercallUnmapPage(pVM, pVCpu, GCPhys);2307 AssertRC(rc);2308 if (RT_SUCCESS(rc))2309 #else2310 2210 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhys, X86_PAGE_SIZE); 2311 2211 if (SUCCEEDED(hrc)) 2312 #endif2313 2212 { 2314 2213 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPage); … … 2320 2219 { 2321 2220 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPageFailed); 2322 #ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES2323 LogRel(("nemR3WinUnsetForA20CheckerCallback/unmap: GCPhys=%RGp rc=%Rrc\n", GCPhys, rc));2324 return rc;2325 #else2326 2221 LogRel(("nemR3WinUnsetForA20CheckerCallback/unmap: GCPhys=%RGp hrc=%Rhrc (%#x) Last=%#x/%u\n", 2327 2222 GCPhys, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())); 2328 2223 return VERR_INTERNAL_ERROR_2; 2329 #endif2330 2224 } 2331 2225 } -
trunk/src/VBox/VMM/include/NEMInternal.h
r93207 r93351 53 53 * Windows: Code configuration. 54 54 */ 55 # ifndef VBOX_WITH_PGM_NEM_MODE56 # define NEM_WIN_USE_HYPERCALLS_FOR_PAGES57 #endif58 55 //# define NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS /**< Applies to ring-3 code only. Useful for testing VID API. */ 59 56 //# define NEM_WIN_USE_OUR_OWN_RUN_API /**< Applies to ring-3 code only. Useful for testing VID API. */ 60 //# define NEM_WIN_WITH_RING0_RUNLOOP /**< Enables the ring-0 runloop. */61 //# define NEM_WIN_USE_RING0_RUNLOOP_BY_DEFAULT /**< For quickly testing ring-3 API without messing with CFGM. */62 57 # if defined(NEM_WIN_USE_OUR_OWN_RUN_API) && !defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS) 63 58 # error "NEM_WIN_USE_OUR_OWN_RUN_API requires NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS" 64 # endif65 # if defined(NEM_WIN_USE_OUR_OWN_RUN_API) && !defined(NEM_WIN_USE_HYPERCALLS_FOR_PAGES)66 # error "NEM_WIN_USE_OUR_OWN_RUN_API requires NEM_WIN_USE_HYPERCALLS_FOR_PAGES"67 # endif68 # if defined(NEM_WIN_WITH_RING0_RUNLOOP) && !defined(NEM_WIN_USE_HYPERCALLS_FOR_PAGES)69 # error "NEM_WIN_WITH_RING0_RUNLOOP requires NEM_WIN_USE_HYPERCALLS_FOR_PAGES"70 # endif71 # if defined(VBOX_WITH_PGM_NEM_MODE) && defined(NEM_WIN_USE_HYPERCALLS_FOR_PAGES)72 # error "VBOX_WITH_PGM_NEM_MODE cannot be used together with NEM_WIN_USE_HYPERCALLS_FOR_PAGES"73 59 # endif 74 60 … … 263 249 /** Number of currently mapped pages. */ 264 250 uint32_t volatile cMappedPages; 265 # ifndef NEM_WIN_USE_HYPERCALLS_FOR_PAGES266 251 /** Max number of pages we dare map at once. */ 252 #ifdef VBOX_WITH_PGM_NEM_MODE 253 /** @todo consider removing this. */ 254 #endif 267 255 uint32_t cMaxMappedPages; 268 # endif269 256 STAMCOUNTER StatMapPage; 270 257 STAMCOUNTER StatUnmapPage; 271 # ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES 272 STAMCOUNTER StatRemapPage; 273 STAMCOUNTER StatRemapPageFailed; 274 # elif !defined(VBOX_WITH_PGM_NEM_MODE) 258 # if !defined(VBOX_WITH_PGM_NEM_MODE) 275 259 STAMCOUNTER StatUnmapAllPages; 276 260 # endif … … 281 265 STAMPROFILE StatProfUnmapGpaRange; 282 266 # endif 283 # ifndef NEM_WIN_USE_HYPERCALLS_FOR_PAGES284 267 STAMPROFILE StatProfMapGpaRangePage; 285 268 STAMPROFILE StatProfUnmapGpaRangePage; 286 # endif287 288 # ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES289 /** Info about the VidGetHvPartitionId I/O control interface. */290 NEMWINIOCTL IoCtlGetHvPartitionId;291 /** Info about the VidGetPartitionProperty I/O control interface. */292 NEMWINIOCTL IoCtlGetPartitionProperty;293 # endif294 # ifdef NEM_WIN_WITH_RING0_RUNLOOP295 /** Info about the VidStartVirtualProcessor I/O control interface. */296 NEMWINIOCTL IoCtlStartVirtualProcessor;297 /** Info about the VidStopVirtualProcessor I/O control interface. */298 NEMWINIOCTL IoCtlStopVirtualProcessor;299 /** Info about the VidStopVirtualProcessor I/O control interface. */300 NEMWINIOCTL IoCtlMessageSlotHandleAndGetNext;301 # endif302 269 303 270 /** Statistics updated by NEMR0UpdateStatistics. */ … … 406 373 /** Last copy of HV_X64_VP_EXECUTION_STATE::InterruptShadow. */ 407 374 bool fLastInterruptShadow : 1; 408 # ifdef NEM_WIN_WITH_RING0_RUNLOOP409 /** Pending VINF_NEM_FLUSH_TLB. */410 int32_t rcPending;411 # else412 375 uint32_t uPadding; 413 # endif414 376 /** The VID_MSHAGN_F_XXX flags. 415 377 * Either VID_MSHAGN_F_HANDLE_MESSAGE | VID_MSHAGN_F_GET_NEXT_MESSAGE or zero. */ … … 635 597 typedef struct NEMR0PERVCPU 636 598 { 637 # if defined(RT_OS_WINDOWS) && defined(NEM_WIN_USE_HYPERCALLS_FOR_PAGES)638 /** Hypercall input/ouput page. */639 NEMR0HYPERCALLDATA HypercallData;640 /** Delta to add to convert a ring-0 pointer to a ring-3 one. */641 uintptr_t offRing3ConversionDelta;642 # else643 599 uint32_t uDummy; 644 # endif645 600 } NEMR0PERVCPU; 646 601 … … 650 605 typedef struct NEMR0PERVM 651 606 { 652 # ifdef RT_OS_WINDOWS653 # ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES654 /** The partition ID. */655 uint64_t idHvPartition;656 /** I/O control context. */657 PSUPR0IOCTLCTX pIoCtlCtx;658 /** Info about the VidGetHvPartitionId I/O control interface. */659 NEMWINIOCTL IoCtlGetHvPartitionId;660 /** Info about the VidGetPartitionProperty I/O control interface. */661 NEMWINIOCTL IoCtlGetPartitionProperty;662 # endif663 # ifdef NEM_WIN_WITH_RING0_RUNLOOP664 /** Info about the VidStartVirtualProcessor I/O control interface. */665 NEMWINIOCTL IoCtlStartVirtualProcessor;666 /** Info about the VidStopVirtualProcessor I/O control interface. */667 NEMWINIOCTL IoCtlStopVirtualProcessor;668 /** Info about the VidStopVirtualProcessor I/O control interface. */669 NEMWINIOCTL IoCtlMessageSlotHandleAndGetNext;670 /** Whether we may use the ring-0 runloop or not. */671 bool fMayUseRing0Runloop;672 # endif673 674 # ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES675 /** Hypercall input/ouput page for non-EMT. */676 NEMR0HYPERCALLDATA HypercallData;677 /** Critical section protecting use of HypercallData. */678 RTCRITSECT HypercallDataCritSect;679 # endif680 681 # else682 607 uint32_t uDummy; 683 # endif684 608 } NEMR0PERVM; 685 609
Note:
See TracChangeset
for help on using the changeset viewer.