Changeset 71222 in vbox for trunk/src/VBox
- Timestamp:
- Mar 5, 2018 10:07:48 PM (7 years ago)
- svn:sync-xref-src-repo-rev:
- 121132
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 14 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VBoxVMM.d
r69111 r71222 58 58 probe r0__vmm__return__to__ring3__rc(struct VMCPU *a_pVCpu, struct CPUMCTX *p_Ctx, int a_rc); 59 59 probe r0__vmm__return__to__ring3__hm(struct VMCPU *a_pVCpu, struct CPUMCTX *p_Ctx, int a_rc); 60 probe r0__vmm__return__to__ring3__nem(struct VMCPU *a_pVCpu, struct CPUMCTX *p_Ctx, int a_rc); 60 61 61 62 -
trunk/src/VBox/VMM/VMMAll/NEMAllNativeTemplate-win.cpp.h
r71184 r71222 40 40 NEM_TMPL_STATIC const char * const g_apszPageStates[4] = { "not-set", "unmapped", "readable", "writable" }; 41 41 42 /** HV_INTERCEPT_ACCESS_TYPE names. */ 43 static const char * const g_apszHvInterceptAccessTypes[4] = { "read", "write", "exec", "!undefined!" }; 44 42 45 43 46 /********************************************************************************************************************************* … … 67 70 PGVM pGVM = GVMMR0FastGetGVMByVM(pVM); 68 71 AssertReturn(pGVM, VERR_INVALID_VM_HANDLE); 69 return nemR0WinMapPages(pGVM, pVM, &pGVM->aCpus[pVCpu->idCpu], GCPhysSrc, GCPhysDst, 1, fFlags); 72 return nemR0WinMapPages(pGVM, pVM, &pGVM->aCpus[pVCpu->idCpu], 73 GCPhysSrc & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK, 74 GCPhysDst & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK, 75 1, fFlags); 70 76 #else 71 77 pVCpu->nem.s.Hypercall.MapPages.GCPhysSrc = GCPhysSrc & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK; … … 91 97 PGVM pGVM = GVMMR0FastGetGVMByVM(pVM); 92 98 AssertReturn(pGVM, VERR_INVALID_VM_HANDLE); 93 return nemR0WinUnmapPages(pGVM, &pGVM->aCpus[pVCpu->idCpu], GCPhys , 1);99 return nemR0WinUnmapPages(pGVM, &pGVM->aCpus[pVCpu->idCpu], GCPhys & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK, 1); 94 100 # else 95 101 pVCpu->nem.s.Hypercall.UnmapPages.GCPhys = GCPhys & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK; … … 100 106 101 107 #endif /* NEM_WIN_USE_HYPERCALLS_FOR_PAGES */ 102 103 104 108 #ifndef IN_RING0 105 109 106 110 NEM_TMPL_STATIC int nemHCWinCopyStateToHyperV(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) 107 111 { 108 # ifdef NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS112 # ifdef NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS 109 113 NOREF(pCtx); 110 114 int rc = VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_NEM_EXPORT_STATE, 0, NULL); … … 112 116 return rc; 113 117 114 # else118 # else 115 119 WHV_REGISTER_NAME aenmNames[128]; 116 120 WHV_REGISTER_VALUE aValues[128]; … … 157 161 158 162 /* Segments */ 159 # define COPY_OUT_SEG(a_idx, a_enmName, a_SReg) \163 # define COPY_OUT_SEG(a_idx, a_enmName, a_SReg) \ 160 164 do { \ 161 165 aenmNames[a_idx] = a_enmName; \ … … 391 395 Assert(iReg < RT_ELEMENTS(aValues)); 392 396 Assert(iReg < RT_ELEMENTS(aenmNames)); 393 # ifdef NEM_WIN_INTERCEPT_NT_IO_CTLS397 # ifdef NEM_WIN_INTERCEPT_NT_IO_CTLS 394 398 Log12(("Calling WHvSetVirtualProcessorRegisters(%p, %u, %p, %u, %p)\n", 395 399 pVM->nem.s.hPartition, pVCpu->idCpu, aenmNames, iReg, aValues)); 396 # endif400 # endif 397 401 HRESULT hrc = WHvSetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, aenmNames, iReg, aValues); 398 402 if (SUCCEEDED(hrc)) … … 402 406 hrc, RTNtLastStatusValue(), RTNtLastErrorValue())); 403 407 return VERR_INTERNAL_ERROR; 404 # endif /* !NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS */408 # endif /* !NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS */ 405 409 } 406 410 … … 408 412 NEM_TMPL_STATIC int nemHCWinCopyStateFromHyperV(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, uint64_t fWhat) 409 413 { 410 # ifdef NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS414 # ifdef NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS 411 415 /* See NEMR0ImportState */ 412 416 NOREF(pCtx); … … 421 425 return rc; 422 426 423 # else427 # else 424 428 WHV_REGISTER_NAME aenmNames[128]; 425 429 … … 534 538 Assert(RT_ELEMENTS(aValues) >= cRegs); 535 539 Assert(RT_ELEMENTS(aenmNames) >= cRegs); 536 # ifdef NEM_WIN_INTERCEPT_NT_IO_CTLS540 # ifdef NEM_WIN_INTERCEPT_NT_IO_CTLS 537 541 Log12(("Calling WHvGetVirtualProcessorRegisters(%p, %u, %p, %u, %p)\n", 538 542 pVM->nem.s.hPartition, pVCpu->idCpu, aenmNames, cRegs, aValues)); 539 # endif543 # endif 540 544 HRESULT hrc = WHvGetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, aenmNames, cRegs, aValues); 541 545 if (SUCCEEDED(hrc)) … … 567 571 568 572 /* Segments */ 569 # define COPY_BACK_SEG(a_idx, a_enmName, a_SReg) \573 # define COPY_BACK_SEG(a_idx, a_enmName, a_SReg) \ 570 574 do { \ 571 575 Assert(aenmNames[a_idx] == a_enmName); \ … … 774 778 hrc, RTNtLastStatusValue(), RTNtLastErrorValue())); 775 779 return VERR_INTERNAL_ERROR; 776 #endif /* !NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS */ 777 } 780 # endif /* !NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS */ 781 } 782 783 #endif /* !IN_RING0 */ 778 784 779 785 … … 1172 1178 } 1173 1179 1174 #ifdef IN_RING3 1180 #ifdef IN_RING0 1181 /** 1182 * Wrapper around nemR0WinImportState that converts VERR_NEM_CHANGE_PGM_MODE and 1183 * VERR_NEM_FLUSH_TBL into informational status codes and logs+asserts statuses. 1184 * 1185 * @returns VBox strict status code. 1186 * @param pGVM The global (ring-0) VM structure. 1187 * @param pGVCpu The global (ring-0) per CPU structure. 1188 * @param pCtx The CPU context to import into. 1189 * @param fWhat What to import. 1190 * @param pszCaller Whoe is doing the importing. 1191 */ 1192 DECLINLINE(VBOXSTRICTRC) nemR0WinImportStateStrict(PGVM pGVM, PGVMCPU pGVCpu, PCPUMCTX pCtx, uint64_t fWhat, const char *pszCaller) 1193 { 1194 int rc = nemR0WinImportState(pGVM, pGVCpu, pCtx, fWhat); 1195 if (RT_SUCCESS(rc)) 1196 { 1197 Assert(rc == VINF_SUCCESS); 1198 return VINF_SUCCESS; 1199 } 1200 1201 if (rc == VERR_NEM_CHANGE_PGM_MODE || rc == VERR_NEM_FLUSH_TLB) 1202 { 1203 Log4(("%s/%u: nemR0WinImportState -> %Rrc\n", pszCaller, pGVCpu->idCpu, -rc)); 1204 return -rc; 1205 } 1206 AssertMsgFailedReturn(("%s/%u: nemR0WinImportState failed: %Rrc\n", pszCaller, pGVCpu->idCpu, rc), rc); 1207 } 1208 #endif /* IN_RING0 */ 1175 1209 1176 1210 /** … … 1201 1235 * @param pMsg The message. 1202 1236 * @param pCtx The register context. 1237 * @param pGVCpu The global (ring-0) per CPU structure (NULL in r3). 1203 1238 */ 1204 1239 NEM_TMPL_STATIC VBOXSTRICTRC nemHCWinHandleMessageMemory(PVM pVM, PVMCPU pVCpu, HV_X64_MEMORY_INTERCEPT_MESSAGE const *pMsg, 1205 PCPUMCTX pCtx) 1206 { 1240 PCPUMCTX pCtx, PGVMCPU pGVCpu) 1241 { 1242 Assert( pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_READ 1243 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE 1244 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_EXECUTE); 1245 1207 1246 /* 1208 1247 * Whatever we do, we must clear pending event ejection upon resume. … … 1226 1265 if (State.fCanResume) 1227 1266 { 1228 Log4(("MemExit: %RGp (=>%RHp) %s fProt=%u%s%s%s; restarting (%s)\n", 1267 Log4(("MemExit/%u: %04x:%08RX64: %RGp (=>%RHp) %s fProt=%u%s%s%s; restarting (%s)\n", 1268 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, 1229 1269 pMsg->GuestPhysicalAddress, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt, 1230 1270 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "", 1231 State.fDidSomething ? "" : " no-change", g_apsz WHvMemAccesstypes[pMsg->Header.InterceptAccessType]));1271 State.fDidSomething ? "" : " no-change", g_apszHvInterceptAccessTypes[pMsg->Header.InterceptAccessType])); 1232 1272 return VINF_SUCCESS; 1233 1273 } 1234 1274 } 1235 Log4(("MemExit: %RGp (=>%RHp) %s fProt=%u%s%s%s; emulating (%s)\n", 1275 Log4(("MemExit/%u: %04x:%08RX64: %RGp (=>%RHp) %s fProt=%u%s%s%s; emulating (%s)\n", 1276 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, 1236 1277 pMsg->GuestPhysicalAddress, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt, 1237 1278 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "", 1238 State.fDidSomething ? "" : " no-change", g_apsz WHvMemAccesstypes[pMsg->Header.InterceptAccessType]));1279 State.fDidSomething ? "" : " no-change", g_apszHvInterceptAccessTypes[pMsg->Header.InterceptAccessType])); 1239 1280 } 1240 1281 else 1241 Log4(("MemExit: %RGp rc=%Rrc%s; emulating (%s)\n", pMsg->GuestPhysicalAddress, rc, 1242 State.fDidSomething ? " modified-backing" : "", g_apszWHvMemAccesstypes[pMsg->Header.InterceptAccessType])); 1282 Log4(("MemExit/%u: %04x:%08RX64: %RGp rc=%Rrc%s; emulating (%s)\n", 1283 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, pMsg->GuestPhysicalAddress, rc, 1284 State.fDidSomething ? " modified-backing" : "", g_apszHvInterceptAccessTypes[pMsg->Header.InterceptAccessType])); 1243 1285 1244 1286 /* … … 1246 1288 */ 1247 1289 nemHCWinCopyStateFromX64Header(pCtx, &pMsg->Header); 1290 VBOXSTRICTRC rcStrict; 1291 #ifdef IN_RING0 1292 rcStrict = nemR0WinImportStateStrict(pGVCpu->pGVM, pGVCpu, pCtx, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM, "MemExit"); 1293 if (rcStrict != VINF_SUCCESS) 1294 return rcStrict; 1295 #else 1248 1296 rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, pCtx, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM); 1249 1297 AssertRCReturn(rc, rc); 1250 1251 VBOXSTRICTRC rcStrict; 1298 NOREF(pGVCpu); 1299 #endif 1300 1252 1301 if (pMsg->InstructionByteCount > 0) 1253 1302 rcStrict = IEMExecOneWithPrefetchedByPC(pVCpu, CPUMCTX2CORE(pCtx), pMsg->Header.Rip, … … 1268 1317 * @param pVCpu The cross context per CPU structure. 1269 1318 * @param pMsg The message. 1319 * @param pGVCpu The global (ring-0) per CPU structure (NULL in r3). 1270 1320 */ 1271 1321 NEM_TMPL_STATIC VBOXSTRICTRC nemHCWinHandleMessageIoPort(PVM pVM, PVMCPU pVCpu, HV_X64_IO_PORT_INTERCEPT_MESSAGE const *pMsg, 1272 PCPUMCTX pCtx )1322 PCPUMCTX pCtx, PGVMCPU pGVCpu) 1273 1323 { 1274 1324 Assert( pMsg->AccessInfo.AccessSize == 1 1275 1325 || pMsg->AccessInfo.AccessSize == 2 1276 1326 || pMsg->AccessInfo.AccessSize == 4); 1327 Assert( pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_READ 1328 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE); 1277 1329 1278 1330 /* … … 1294 1346 { 1295 1347 rcStrict = IOMIOPortWrite(pVM, pVCpu, pMsg->PortNumber, (uint32_t)pMsg->Rax & fAndMask, pMsg->AccessInfo.AccessSize); 1296 Log4(("IOExit: %04x:%08RX64: OUT %#x, %#x LB %u rcStrict=%Rrc\n", pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, 1297 pMsg->PortNumber, (uint32_t)pMsg->Rax & fAndMask, pMsg->AccessInfo.AccessSize, VBOXSTRICTRC_VAL(rcStrict) )); 1348 Log4(("IOExit/%u: %04x:%08RX64: OUT %#x, %#x LB %u rcStrict=%Rrc\n", 1349 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, pMsg->PortNumber, 1350 (uint32_t)pMsg->Rax & fAndMask, pMsg->AccessInfo.AccessSize, VBOXSTRICTRC_VAL(rcStrict) )); 1298 1351 if (IOM_SUCCESS(rcStrict)) 1299 1352 { … … 1306 1359 uint32_t uValue = 0; 1307 1360 rcStrict = IOMIOPortRead(pVM, pVCpu, pMsg->PortNumber, &uValue, pMsg->AccessInfo.AccessSize); 1308 Log4(("IOExit : %04x:%08RX64: IN %#x LB %u -> %#x, rcStrict=%Rrc\n", pMsg->Header.CsSegment.Selector, pMsg->Header.Rip,1309 pMsg-> PortNumber, pMsg->AccessInfo.AccessSize, uValue, VBOXSTRICTRC_VAL(rcStrict) ));1361 Log4(("IOExit/%u: %04x:%08RX64: IN %#x LB %u -> %#x, rcStrict=%Rrc\n", pVCpu->idCpu, pMsg->Header.CsSegment.Selector, 1362 pMsg->Header.Rip, pMsg->PortNumber, pMsg->AccessInfo.AccessSize, uValue, VBOXSTRICTRC_VAL(rcStrict) )); 1310 1363 if (IOM_SUCCESS(rcStrict)) 1311 1364 { … … 1315 1368 pCtx->rax = uValue; 1316 1369 pCtx->fExtrn &= ~CPUMCTX_EXTRN_RAX; 1317 Log4(("IOExit : RAX %#RX64 -> %#RX64\n", pMsg->Rax, pCtx->rax));1370 Log4(("IOExit/%u: RAX %#RX64 -> %#RX64\n", pVCpu->idCpu, pMsg->Rax, pCtx->rax)); 1318 1371 nemHCWinCopyStateFromX64Header(pCtx, &pMsg->Header); 1319 1372 nemHCWinAdvanceGuestRipAndClearRF(pVCpu, pCtx, &pMsg->Header); … … 1346 1399 pCtx->rdi = pMsg->Rdi; 1347 1400 pCtx->rsi = pMsg->Rsi; 1401 #ifdef IN_RING0 1402 rcStrict = nemR0WinImportStateStrict(pGVCpu->pGVM, pGVCpu, pCtx, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM, "IOExit"); 1403 if (rcStrict != VINF_SUCCESS) 1404 return rcStrict; 1405 #else 1348 1406 int rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, pCtx, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM); 1349 1407 AssertRCReturn(rc, rc); 1350 1351 Log4(("IOExit: %04x:%08RX64: %s%s %#x LB %u (emulating)\n", pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, 1408 RT_NOREF(pGVCpu); 1409 #endif 1410 1411 Log4(("IOExit/%u: %04x:%08RX64: %s%s %#x LB %u (emulating)\n", 1412 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, 1352 1413 pMsg->AccessInfo.RepPrefix ? "REP " : "", 1353 1414 pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE ? "OUTS" : "INS", … … 1379 1440 * @param pMappingHeader The message slot mapping. 1380 1441 * @param pCtx The register context. 1442 * @param pGVCpu The global (ring-0) per CPU structure (NULL in r3). 1381 1443 */ 1382 1444 NEM_TMPL_STATIC VBOXSTRICTRC nemHCWinHandleMessage(PVM pVM, PVMCPU pVCpu, VID_MESSAGE_MAPPING_HEADER volatile *pMappingHeader, 1383 PCPUMCTX pCtx )1445 PCPUMCTX pCtx, PGVMCPU pGVCpu) 1384 1446 { 1385 1447 if (pMappingHeader->enmVidMsgType == VidMessageHypervisorMessage) … … 1391 1453 case HvMessageTypeUnmappedGpa: 1392 1454 Assert(pMsg->Header.PayloadSize == RT_UOFFSETOF(HV_X64_MEMORY_INTERCEPT_MESSAGE, DsSegment)); 1393 return nemHCWinHandleMessageMemory(pVM, pVCpu, &pMsg->X64MemoryIntercept, pCtx );1455 return nemHCWinHandleMessageMemory(pVM, pVCpu, &pMsg->X64MemoryIntercept, pCtx, pGVCpu); 1394 1456 1395 1457 case HvMessageTypeGpaIntercept: 1396 1458 Assert(pMsg->Header.PayloadSize == RT_UOFFSETOF(HV_X64_MEMORY_INTERCEPT_MESSAGE, DsSegment)); 1397 return nemHCWinHandleMessageMemory(pVM, pVCpu, &pMsg->X64MemoryIntercept, pCtx );1459 return nemHCWinHandleMessageMemory(pVM, pVCpu, &pMsg->X64MemoryIntercept, pCtx, pGVCpu); 1398 1460 1399 1461 case HvMessageTypeX64IoPortIntercept: 1400 1462 Assert(pMsg->Header.PayloadSize == sizeof(pMsg->X64IoPortIntercept)); 1401 return nemHCWinHandleMessageIoPort(pVM, pVCpu, &pMsg->X64IoPortIntercept, pCtx );1463 return nemHCWinHandleMessageIoPort(pVM, pVCpu, &pMsg->X64IoPortIntercept, pCtx, pGVCpu); 1402 1464 1403 1465 case HvMessageTypeX64Halt: … … 1454 1516 * exit. 1455 1517 * @param pMappingHeader The message slot mapping. 1518 * @param pGVM The global (ring-0) VM structure (NULL in r3). 1519 * @param pGVCpu The global (ring-0) per CPU structure (NULL in r3). 1456 1520 */ 1457 1521 NEM_TMPL_STATIC VBOXSTRICTRC nemHCWinStopCpu(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rcStrict, 1458 VID_MESSAGE_MAPPING_HEADER volatile *pMappingHeader) 1522 VID_MESSAGE_MAPPING_HEADER volatile *pMappingHeader, 1523 PGVM pGVM, PGVMCPU pGVCpu) 1459 1524 { 1460 1525 /* … … 1462 1527 * does another VM exit. 1463 1528 */ 1529 #ifdef IN_RING0 1530 pVCpu->nem.s.uIoCtlBuf.idCpu = pGVCpu->idCpu; 1531 NTSTATUS rcNt = nemR0NtPerformIoControl(pGVM, pGVM->nem.s.IoCtlStopVirtualProcessor.uFunction, 1532 &pVCpu->nem.s.uIoCtlBuf.idCpu, sizeof(pVCpu->nem.s.uIoCtlBuf.idCpu), 1533 NULL, 0); 1534 if (NT_SUCCESS(rcNt)) 1535 { 1536 Log8(("nemHCWinStopCpu: Stopping CPU succeeded (cpu status %u)\n", nemHCWinCpuGetRunningStatus(pVCpu) )); 1537 return rcStrict; 1538 } 1539 #else 1464 1540 BOOL fRet = VidStopVirtualProcessor(pVM->nem.s.hPartitionDevice, pVCpu->idCpu); 1465 1541 if (fRet) … … 1468 1544 return rcStrict; 1469 1545 } 1546 RT_NOREF(pGVM, pGVCpu); 1547 #endif 1470 1548 1471 1549 /* 1472 1550 * Dang. The CPU stopped by itself and we got a couple of message to deal with. 1473 1551 */ 1552 #ifdef IN_RING0 1553 AssertLogRelMsgReturn(rcNt == ERROR_VID_STOP_PENDING, ("rcNt=%#x\n", rcNt), 1554 RT_SUCCESS(rcStrict) ? VERR_INTERNAL_ERROR_3 : rcStrict); 1555 #else 1474 1556 DWORD dwErr = RTNtLastErrorValue(); 1475 AssertLogRelMsgReturn(dwErr == ERROR_VID_STOP_PENDING, ("dwErr=%#u \n", dwErr),1557 AssertLogRelMsgReturn(dwErr == ERROR_VID_STOP_PENDING, ("dwErr=%#u (%#x)\n", dwErr, dwErr), 1476 1558 RT_SUCCESS(rcStrict) ? VERR_INTERNAL_ERROR_3 : rcStrict); 1559 #endif 1477 1560 Log8(("nemHCWinStopCpu: Stopping CPU pending...\n")); 1478 1561 … … 1481 1564 * Note! We can safely ASSUME that rcStrict isn't an important information one. 1482 1565 */ 1566 #ifdef IN_RING0 1567 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.iCpu = pGVCpu->idCpu; 1568 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.fFlags = VID_MSHAGN_F_GET_NEXT_MESSAGE; 1569 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.cMillies = 30000; /*ms*/ 1570 rcNt = nemR0NtPerformIoControl(pGVM, pGVM->nem.s.IoCtlMessageSlotHandleAndGetNext.uFunction, 1571 &pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext, 1572 sizeof(pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext), 1573 NULL, 0); 1574 AssertLogRelMsgReturn(NT_SUCCESS(rcNt), ("1st VidMessageSlotHandleAndGetNext after ERROR_VID_STOP_PENDING failed: %#x\n", rcNt), 1575 RT_SUCCESS(rcStrict) ? VERR_INTERNAL_ERROR_3 : rcStrict); 1576 #else 1483 1577 BOOL fWait = g_pfnVidMessageSlotHandleAndGetNext(pVM->nem.s.hPartitionDevice, pVCpu->idCpu, 1484 1578 VID_MSHAGN_F_GET_NEXT_MESSAGE, 30000 /*ms*/); 1485 AssertLogRelMsgReturn(fWait, 1486 ("1st VidMessageSlotHandleAndGetNext after ERROR_VID_STOP_PENDING failed: %u\n", RTNtLastErrorValue()), 1579 AssertLogRelMsgReturn(fWait, ("1st VidMessageSlotHandleAndGetNext after ERROR_VID_STOP_PENDING failed: %u\n", RTNtLastErrorValue()), 1487 1580 RT_SUCCESS(rcStrict) ? VERR_INTERNAL_ERROR_3 : rcStrict); 1581 #endif 1488 1582 1489 1583 /* It should be a hypervisor message and definitely not a stop request completed message. */ … … 1494 1588 RT_SUCCESS(rcStrict) ? VERR_INTERNAL_ERROR_3 : rcStrict); 1495 1589 1496 VBOXSTRICTRC rcStrict2 = nemHCWinHandleMessage(pVM, pVCpu, pMappingHeader, CPUMQueryGuestCtxPtr(pVCpu) );1590 VBOXSTRICTRC rcStrict2 = nemHCWinHandleMessage(pVM, pVCpu, pMappingHeader, CPUMQueryGuestCtxPtr(pVCpu), pGVCpu); 1497 1591 if (rcStrict2 != VINF_SUCCESS && RT_SUCCESS(rcStrict)) 1498 1592 rcStrict = rcStrict2; … … 1502 1596 * that as handled too. CPU is back into fully stopped stated then. 1503 1597 */ 1598 #ifdef IN_RING0 1599 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.iCpu = pGVCpu->idCpu; 1600 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.fFlags = VID_MSHAGN_F_HANDLE_MESSAGE | VID_MSHAGN_F_GET_NEXT_MESSAGE; 1601 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.cMillies = 30000; /*ms*/ 1602 rcNt = nemR0NtPerformIoControl(pGVM, pGVM->nem.s.IoCtlMessageSlotHandleAndGetNext.uFunction, 1603 &pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext, 1604 sizeof(pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext), 1605 NULL, 0); 1606 AssertLogRelMsgReturn(NT_SUCCESS(rcNt), ("2st VidMessageSlotHandleAndGetNext after ERROR_VID_STOP_PENDING failed: %#x\n", rcNt), 1607 RT_SUCCESS(rcStrict) ? VERR_INTERNAL_ERROR_3 : rcStrict); 1608 #else 1504 1609 fWait = g_pfnVidMessageSlotHandleAndGetNext(pVM->nem.s.hPartitionDevice, pVCpu->idCpu, 1505 1610 VID_MSHAGN_F_HANDLE_MESSAGE | VID_MSHAGN_F_GET_NEXT_MESSAGE, 30000 /*ms*/); 1506 AssertLogRelMsgReturn(fWait, 1507 ("2nd VidMessageSlotHandleAndGetNext after ERROR_VID_STOP_PENDING failed: %u\n", RTNtLastErrorValue()), 1611 AssertLogRelMsgReturn(fWait, ("2nd VidMessageSlotHandleAndGetNext after ERROR_VID_STOP_PENDING failed: %u\n", RTNtLastErrorValue()), 1508 1612 RT_SUCCESS(rcStrict) ? VERR_INTERNAL_ERROR_3 : rcStrict); 1613 #endif 1509 1614 1510 1615 /* It should be a stop request completed message. */ … … 1516 1621 1517 1622 /* Mark this as handled. */ 1518 fWait = g_pfnVidMessageSlotHandleAndGetNext(pVM->nem.s.hPartitionDevice, pVCpu->idCpu, 1519 VID_MSHAGN_F_HANDLE_MESSAGE, 30000 /*ms*/); 1520 AssertLogRelMsgReturn(fWait, 1521 ("3rd VidMessageSlotHandleAndGetNext after ERROR_VID_STOP_PENDING failed: %u\n", RTNtLastErrorValue()), 1623 #ifdef IN_RING0 1624 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.iCpu = pGVCpu->idCpu; 1625 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.fFlags = VID_MSHAGN_F_HANDLE_MESSAGE; 1626 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.cMillies = 30000; /*ms*/ 1627 rcNt = nemR0NtPerformIoControl(pGVM, pGVM->nem.s.IoCtlMessageSlotHandleAndGetNext.uFunction, 1628 &pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext, 1629 sizeof(pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext), 1630 NULL, 0); 1631 AssertLogRelMsgReturn(NT_SUCCESS(rcNt), ("3rd VidMessageSlotHandleAndGetNext after ERROR_VID_STOP_PENDING failed: %#x\n", rcNt), 1522 1632 RT_SUCCESS(rcStrict) ? VERR_INTERNAL_ERROR_3 : rcStrict); 1633 #else 1634 fWait = g_pfnVidMessageSlotHandleAndGetNext(pVM->nem.s.hPartitionDevice, pVCpu->idCpu, VID_MSHAGN_F_HANDLE_MESSAGE, 30000 /*ms*/); 1635 AssertLogRelMsgReturn(fWait, ("3rd VidMessageSlotHandleAndGetNext after ERROR_VID_STOP_PENDING failed: %u\n", RTNtLastErrorValue()), 1636 RT_SUCCESS(rcStrict) ? VERR_INTERNAL_ERROR_3 : rcStrict); 1637 #endif 1523 1638 Log8(("nemHCWinStopCpu: Stopped the CPU (rcStrict=%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict) )); 1524 1639 return rcStrict; … … 1526 1641 1527 1642 1528 NEM_TMPL_STATIC VBOXSTRICTRC nemHCWinRunGC(PVM pVM, PVMCPU pVCpu )1643 NEM_TMPL_STATIC VBOXSTRICTRC nemHCWinRunGC(PVM pVM, PVMCPU pVCpu, PGVM pGVM, PGVMCPU pGVCpu) 1529 1644 { 1530 1645 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu); 1531 LogFlow((" nemHCWinRunGC: Entering #%u cs:rip=%04x:%08RX64 efl=%#08RX64\n", pVCpu->idCpu, pCtx->cs.Sel, pCtx->rip, pCtx->rflags));1646 LogFlow(("NEM/%u: %04x:%08RX64 efl=%#08RX64 <=\n", pVCpu->idCpu, pCtx->cs.Sel, pCtx->rip, pCtx->rflags)); 1532 1647 #ifdef LOG_ENABLED 1533 1648 if (LogIs3Enabled()) … … 1552 1667 if ((pCtx->fExtrn & (CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK)) != (CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK)) 1553 1668 { 1669 #ifdef IN_RING0 1670 int rc2 = nemR0WinExportState(pGVM, pGVCpu, pCtx); 1671 #else 1554 1672 int rc2 = nemHCWinCopyStateToHyperV(pVM, pVCpu, pCtx); 1673 RT_NOREF(pGVM, pGVCpu); 1674 #endif 1555 1675 AssertRCReturn(rc2, rc2); 1556 1676 } … … 1566 1686 else 1567 1687 { 1568 if (g_pfnVidStartVirtualProcessor(pVM->nem.s.hPartitionDevice, pVCpu->idCpu)) 1569 pVCpu->nem.s.fHandleAndGetFlags = VID_MSHAGN_F_GET_NEXT_MESSAGE; 1570 else 1571 AssertLogRelMsgFailedReturn(("VidStartVirtualProcessor failed for CPU #%u: %u (%#x, rcNt=%#x)\n", 1572 pVCpu->idCpu, RTNtLastErrorValue(), RTNtLastErrorValue(), RTNtLastStatusValue()), 1573 VERR_INTERNAL_ERROR_3); 1688 #ifdef IN_RING0 1689 pVCpu->nem.s.uIoCtlBuf.idCpu = pGVCpu->idCpu; 1690 NTSTATUS rcNt = nemR0NtPerformIoControl(pGVM, pGVM->nem.s.IoCtlStartVirtualProcessor.uFunction, 1691 &pVCpu->nem.s.uIoCtlBuf.idCpu, sizeof(pVCpu->nem.s.uIoCtlBuf.idCpu), 1692 NULL, 0); 1693 LogFlow(("NEM/%u: IoCtlStartVirtualProcessor -> %#x\n", pVCpu->idCpu, rcNt)); 1694 AssertLogRelMsgReturn(NT_SUCCESS(rcNt), ("VidStartVirtualProcessor failed for CPU #%u: %#x\n", pGVCpu->idCpu, rcNt), 1695 VERR_INTERNAL_ERROR_3); 1696 #else 1697 AssertLogRelMsgReturn(g_pfnVidStartVirtualProcessor(pVM->nem.s.hPartitionDevice, pVCpu->idCpu), 1698 ("VidStartVirtualProcessor failed for CPU #%u: %u (%#x, rcNt=%#x)\n", 1699 pVCpu->idCpu, RTNtLastErrorValue(), RTNtLastErrorValue(), RTNtLastStatusValue()), 1700 VERR_INTERNAL_ERROR_3); 1701 #endif 1702 pVCpu->nem.s.fHandleAndGetFlags = VID_MSHAGN_F_GET_NEXT_MESSAGE; 1574 1703 } 1575 1704 1576 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM , VMCPUSTATE_STARTED))1705 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM_WAIT, VMCPUSTATE_STARTED)) 1577 1706 { 1707 #ifdef IN_RING0 1708 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.iCpu = pGVCpu->idCpu; 1709 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.fFlags = pVCpu->nem.s.fHandleAndGetFlags; 1710 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.cMillies = cMillies; 1711 NTSTATUS rcNt = nemR0NtPerformIoControl(pGVM, pGVM->nem.s.IoCtlMessageSlotHandleAndGetNext.uFunction, 1712 &pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext, 1713 sizeof(pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext), 1714 NULL, 0); 1715 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM_WAIT); 1716 if (rcNt == STATUS_SUCCESS) 1717 #else 1578 1718 BOOL fRet = VidMessageSlotHandleAndGetNext(pVM->nem.s.hPartitionDevice, pVCpu->idCpu, 1579 1719 pVCpu->nem.s.fHandleAndGetFlags, cMillies); 1580 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM );1720 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM_WAIT); 1581 1721 if (fRet) 1722 #endif 1582 1723 { 1583 1724 /* 1584 1725 * Deal with the message. 1585 1726 */ 1586 rcStrict = nemHCWinHandleMessage(pVM, pVCpu, pMappingHeader, pCtx );1727 rcStrict = nemHCWinHandleMessage(pVM, pVCpu, pMappingHeader, pCtx, pGVCpu); 1587 1728 pVCpu->nem.s.fHandleAndGetFlags |= VID_MSHAGN_F_HANDLE_MESSAGE; 1729 if (rcStrict == VINF_SUCCESS) 1730 { /* hopefully likely */ } 1731 else 1732 { 1733 LogFlow(("NEM/%u: breaking: nemHCWinHandleMessage -> %Rrc\n", pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) )); 1734 break; 1735 } 1588 1736 } 1589 1737 else … … 1592 1740 so after NtAlertThread we end up here with a STATUS_TIMEOUT. And yeah, 1593 1741 the error code conversion is into WAIT_XXX, i.e. NT status codes. */ 1594 DWORD dwErr = GetLastError(); 1595 if ( dwErr == STATUS_TIMEOUT 1596 || dwErr == STATUS_ALERTED /* just in case */ 1597 || dwErr == STATUS_USER_APC /* ditto */ ) 1598 pVCpu->nem.s.fHandleAndGetFlags = VID_MSHAGN_F_GET_NEXT_MESSAGE; /* exits are likely */ 1599 else 1600 AssertLogRelMsgFailedReturn(("VidMessageSlotHandleAndGetNext failed for CPU #%u: %u (%#x, rcNt=%#x)\n", 1601 pVCpu->idCpu, dwErr, dwErr, RTNtLastStatusValue()), 1602 VERR_INTERNAL_ERROR_3); 1742 #ifndef IN_RING0 1743 DWORD rcNt = GetLastError(); 1744 #endif 1745 LogFlow(("NEM/%u: VidMessageSlotHandleAndGetNext -> %#x\n", pVCpu->idCpu, rcNt)); 1746 AssertLogRelMsgReturn( rcNt == STATUS_TIMEOUT 1747 || rcNt == STATUS_ALERTED /* just in case */ 1748 || rcNt == STATUS_USER_APC /* ditto */ 1749 , ("VidMessageSlotHandleAndGetNext failed for CPU #%u: %#x (%u)\n", pVCpu->idCpu, rcNt, rcNt), 1750 VERR_INTERNAL_ERROR_3); 1751 pVCpu->nem.s.fHandleAndGetFlags = VID_MSHAGN_F_GET_NEXT_MESSAGE; /* exits are likely */ 1603 1752 } 1604 1753 … … 1612 1761 /** @todo Try handle pending flags, not just return to EM loops. Take care 1613 1762 * not to set important RCs here unless we've handled a message. */ 1614 LogFlow(("nemHCWinRunGC: returning: pending FF (%#x / %#x)\n", pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions)); 1763 LogFlow(("NEM/%u: breaking: pending FF (%#x / %#x)\n", 1764 pVCpu->idCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions)); 1615 1765 } 1616 1766 else 1617 LogFlow((" nemHCWinRunGC: returning: canceled %d (pre exec)\n", VMCPU_GET_STATE(pVCpu) ));1767 LogFlow(("NEM/%u: breaking: canceled %d (pre exec)\n", pVCpu->idCpu, VMCPU_GET_STATE(pVCpu) )); 1618 1768 } 1619 1769 else 1620 LogFlow((" nemHCWinRunGC: returning: pending FF (pre exec)\n"));1770 LogFlow(("NEM/%u: breaking: pending FF (pre exec)\n", pVCpu->idCpu)); 1621 1771 break; 1622 1772 } /* the run loop */ … … 1630 1780 { 1631 1781 pVCpu->nem.s.fHandleAndGetFlags = 0; 1632 rcStrict = nemHCWinStopCpu(pVM, pVCpu, rcStrict, pMappingHeader );1782 rcStrict = nemHCWinStopCpu(pVM, pVCpu, rcStrict, pMappingHeader, pGVM, pGVCpu); 1633 1783 } 1634 1784 … … 1637 1787 if (pCtx->fExtrn & (CPUMCTX_EXTRN_ALL | (CPUMCTX_EXTRN_NEM_WIN_MASK & ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT))) 1638 1788 { 1789 #ifdef IN_RING0 1790 int rc2 = nemR0WinImportState(pGVM, pGVCpu, pCtx, CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK); 1791 if (RT_SUCCESS(rc2)) 1792 pCtx->fExtrn = 0; 1793 else if (rc2 == VERR_NEM_CHANGE_PGM_MODE || rc2 == VERR_NEM_FLUSH_TLB) 1794 { 1795 pCtx->fExtrn = 0; 1796 if (rcStrict == VINF_SUCCESS || rcStrict == -rc2) 1797 rcStrict = -rc2; 1798 else 1799 { 1800 pVCpu->nem.s.rcPgmPending = -rc2; 1801 LogFlow(("NEM/%u: rcPgmPending=%Rrc (rcStrict=%Rrc)\n", pVCpu->idCpu, rc2, VBOXSTRICTRC_VAL(rcStrict) )); 1802 } 1803 } 1804 #else 1639 1805 int rc2 = nemHCWinCopyStateFromHyperV(pVM, pVCpu, pCtx, CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK); 1640 1806 if (RT_SUCCESS(rc2)) 1641 1807 pCtx->fExtrn = 0; 1808 #endif 1642 1809 else if (RT_SUCCESS(rcStrict)) 1643 1810 rcStrict = rc2; … … 1646 1813 pCtx->fExtrn = 0; 1647 1814 1648 LogFlow(("nemHCWinRunGC: Leaving #%u cs:rip=%04x:%08RX64 efl=%#08RX64\n", pVCpu->idCpu, pCtx->cs.Sel, pCtx->rip, pCtx->rflags)); 1815 LogFlow(("NEM/%u: %04x:%08RX64 efl=%#08RX64 => %Rrc\n", 1816 pVCpu->idCpu, pCtx->cs.Sel, pCtx->rip, pCtx->rflags, VBOXSTRICTRC_VAL(rcStrict) )); 1649 1817 return rcStrict; 1650 1818 } 1651 #endif 1652 1653 #endif /* IN_RING0 */ 1819 1654 1820 1655 1821 -
trunk/src/VBox/VMM/VMMR0/GVMMR0.cpp
r71152 r71222 943 943 if (RT_SUCCESS(rc)) 944 944 { 945 pVM->pVMR3 = RTR0MemObjAddressR3(pGVM->gvmm.s.VMMapObj); 946 AssertPtr((void *)pVM->pVMR3); 945 PVMR3 pVMR3 = RTR0MemObjAddressR3(pGVM->gvmm.s.VMMapObj); 946 pVM->pVMR3 = pVMR3; 947 AssertPtr((void *)pVMR3); 947 948 948 949 /* Initialize all the VM pointers. */ … … 950 951 { 951 952 pVM->aCpus[i].pVMR0 = pVM; 952 pVM->aCpus[i].pVMR3 = pVM ->pVMR3;953 pVM->aCpus[i].pVMR3 = pVMR3; 953 954 pVM->aCpus[i].idHostCpu = NIL_RTCPUID; 954 955 pVM->aCpus[i].hNativeThreadR0 = NIL_RTNATIVETHREAD; … … 972 973 pHandle->ProcId = ProcId; 973 974 pGVM->pVM = pVM; 975 pGVM->pVMR3 = pVMR3; 974 976 pGVM->aCpus[0].hEMT = hEMT0; 975 977 pVM->aCpus[0].hNativeThreadR0 = hEMT0; 976 978 pGVMM->cEMTs += cCpus; 979 980 for (uint32_t i = 0; i < cCpus; i++) 981 { 982 pGVM->aCpus[i].pVCpu = &pVM->aCpus[i]; 983 pGVM->aCpus[i].pVM = pVM; 984 } 977 985 978 986 /* Associate it with the session and create the context hook for EMT0. */ … … 994 1002 995 1003 *ppVM = pVM; 996 Log(("GVMMR0CreateVM: pVM=%p pVMR3=%p pGVM=%p hGVM=%d\n", pVM, pVM ->pVMR3, pGVM, iHandle));1004 Log(("GVMMR0CreateVM: pVM=%p pVMR3=%p pGVM=%p hGVM=%d\n", pVM, pVMR3, pGVM, iHandle)); 997 1005 return VINF_SUCCESS; 998 1006 } … … 1063 1071 pGVM->aCpus[i].gvmm.s.HaltEventMulti = NIL_RTSEMEVENTMULTI; 1064 1072 pGVM->aCpus[i].hEMT = NIL_RTNATIVETHREAD; 1073 pGVM->aCpus[i].pGVM = pGVM; 1074 pGVM->aCpus[i].pVCpu = NULL; 1075 pGVM->aCpus[i].pVM = NULL; 1065 1076 } 1066 1077 } … … 1186 1197 1187 1198 /* Be careful here because we might theoretically be racing someone else cleaning up. */ 1188 if ( 1189 && 1190 1191 1192 && 1193 && 1194 && 1195 && 1199 if ( pHandle->pVM == pVM 1200 && ( ( pHandle->hEMT0 == hSelf 1201 && pHandle->ProcId == ProcId) 1202 || pHandle->hEMT0 == NIL_RTNATIVETHREAD) 1203 && VALID_PTR(pHandle->pvObj) 1204 && VALID_PTR(pHandle->pSession) 1205 && VALID_PTR(pHandle->pGVM) 1206 && pHandle->pGVM->u32Magic == GVM_MAGIC) 1196 1207 { 1197 1208 /* Check that other EMTs have deregistered. */ … … 1350 1361 */ 1351 1362 PGVM pGVM = pHandle->pGVM; 1352 if ( 1353 && 1363 if ( VALID_PTR(pGVM) 1364 && pGVM->u32Magic == GVM_MAGIC) 1354 1365 { 1355 1366 pGVMM->cEMTs -= pGVM->cCpus; -
trunk/src/VBox/VMM/VMMR0/NEMR0Native-win.cpp
r71184 r71222 24 24 #include <iprt/nt/hyperv.h> 25 25 #include <iprt/nt/vid.h> 26 #include <winerror.h> 26 27 27 28 #include <VBox/vmm/nem.h> … … 50 51 51 52 /********************************************************************************************************************************* 53 * Internal Functions * 54 *********************************************************************************************************************************/ 55 typedef uint32_t DWORD; /* for winerror.h constants */ 56 57 58 /********************************************************************************************************************************* 52 59 * Global Variables * 53 60 *********************************************************************************************************************************/ … … 58 65 * Internal Functions * 59 66 *********************************************************************************************************************************/ 60 NEM_TMPL_STATIC int nemR0WinMapPages(PGVM pGVM, PVM pVM, PGVMCPU pGVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst, 61 uint32_t cPages, uint32_t fFlags); 62 NEM_TMPL_STATIC int nemR0WinUnmapPages(PGVM pGVM, PGVMCPU pGVCpu, RTGCPHYS GCPhys, uint32_t cPages); 67 NEM_TMPL_STATIC int nemR0WinMapPages(PGVM pGVM, PVM pVM, PGVMCPU pGVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst, 68 uint32_t cPages, uint32_t fFlags); 69 NEM_TMPL_STATIC int nemR0WinUnmapPages(PGVM pGVM, PGVMCPU pGVCpu, RTGCPHYS GCPhys, uint32_t cPages); 70 NEM_TMPL_STATIC int nemR0WinExportState(PGVM pGVM, PGVMCPU pGVCpu, PCPUMCTX pCtx); 71 NEM_TMPL_STATIC int nemR0WinImportState(PGVM pGVM, PGVMCPU pGVCpu, PCPUMCTX pCtx, uint64_t fWhat); 72 DECLINLINE(NTSTATUS) nemR0NtPerformIoControl(PGVM pGVM, uint32_t uFunction, void *pvInput, uint32_t cbInput, 73 void *pvOutput, uint32_t cbOutput); 63 74 64 75 … … 497 508 * @param fWhat What to export. To be defined, UINT64_MAX for now. 498 509 */ 499 staticint nemR0WinExportState(PGVM pGVM, PGVMCPU pGVCpu, PCPUMCTX pCtx)510 NEM_TMPL_STATIC int nemR0WinExportState(PGVM pGVM, PGVMCPU pGVCpu, PCPUMCTX pCtx) 500 511 { 501 512 PVMCPU pVCpu = &pGVM->pVM->aCpus[pGVCpu->idCpu]; … … 1075 1086 * @param fWhat What to import, CPUMCTX_EXTRN_XXX. 1076 1087 */ 1077 staticint nemR0WinImportState(PGVM pGVM, PGVMCPU pGVCpu, PCPUMCTX pCtx, uint64_t fWhat)1088 NEM_TMPL_STATIC int nemR0WinImportState(PGVM pGVM, PGVMCPU pGVCpu, PCPUMCTX pCtx, uint64_t fWhat) 1078 1089 { 1079 1090 HV_INPUT_GET_VP_REGISTERS *pInput = (HV_INPUT_GET_VP_REGISTERS *)pGVCpu->nem.s.pbHypercallData; … … 1694 1705 pCtx->fExtrn &= ~fWhat; 1695 1706 1707 /* Typical. */ 1708 if (!fMaybeChangedMode && !fFlushTlb) 1709 return VINF_SUCCESS; 1710 1711 /* 1712 * Slow. 1713 */ 1696 1714 int rc = VINF_SUCCESS; 1697 1715 if (fMaybeChangedMode) … … 1699 1717 rc = PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER); 1700 1718 if (rc == VINF_PGM_CHANGE_MODE) 1701 rc = VERR_NEM_CHANGE_PGM_MODE; 1702 else 1703 AssertRC(rc); 1704 } 1705 if (fFlushTlb && rc == VINF_SUCCESS) 1719 { 1720 LogFlow(("nemR0WinImportState: -> VERR_NEM_CHANGE_PGM_MODE!\n")); 1721 return VERR_NEM_CHANGE_PGM_MODE; 1722 } 1723 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); 1724 } 1725 1726 if (fFlushTlb) 1727 { 1728 LogFlow(("nemR0WinImportState: -> VERR_NEM_FLUSH_TLB!\n")); 1706 1729 rc = VERR_NEM_FLUSH_TLB; /* Calling PGMFlushTLB w/o long jump setup doesn't work, ring-3 does it. */ 1730 } 1707 1731 1708 1732 return rc; … … 1741 1765 } 1742 1766 1767 1768 VMMR0_INT_DECL(VBOXSTRICTRC) NEMR0RunGuestCode(PGVM pGVM, VMCPUID idCpu) 1769 { 1770 PVM pVM = pGVM->pVM; 1771 return nemHCWinRunGC(pVM, &pVM->aCpus[idCpu], pGVM, &pGVM->aCpus[idCpu]); 1772 } 1773 -
trunk/src/VBox/VMM/VMMR0/VMMR0.cpp
r71198 r71222 506 506 507 507 /** 508 * Does EMT specific VM initialization. 509 * 510 * @returns VBox status code. 511 * @param pGVM The ring-0 VM structure. 512 * @param pVM The cross context VM structure. 513 * @param idCpu The EMT that's calling. 514 */ 515 static int vmmR0InitVMEmt(PGVM pGVM, PVM pVM, VMCPUID idCpu) 516 { 517 /* Paranoia (caller checked these already). */ 518 AssertReturn(idCpu < pGVM->cCpus, VERR_INVALID_CPU_ID); 519 AssertReturn(pGVM->aCpus[idCpu].hEMT == RTThreadNativeSelf(), VERR_INVALID_CPU_ID); 520 521 #ifdef LOG_ENABLED 522 /* 523 * Registration of ring 0 loggers. 524 */ 525 PVMCPU pVCpu = &pVM->aCpus[idCpu]; 526 PVMMR0LOGGER pR0Logger = pVCpu->vmm.s.pR0LoggerR0; 527 if ( pR0Logger 528 && !pR0Logger->fRegistered) 529 { 530 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession); 531 pR0Logger->fRegistered = true; 532 } 533 #endif 534 RT_NOREF(pVM); 535 536 return VINF_SUCCESS; 537 } 538 539 540 541 /** 508 542 * Terminates the R0 bits for a particular VM instance. 509 543 * … … 1167 1201 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING); 1168 1202 1169 #ifdef LOG_ENABLED1170 /*1171 * Ugly: Lazy registration of ring 0 loggers.1172 */1173 if (pVCpu->idCpu > 0)1174 {1175 PVMMR0LOGGER pR0Logger = pVCpu->vmm.s.pR0LoggerR0;1176 if ( pR0Logger1177 && RT_UNLIKELY(!pR0Logger->fRegistered))1178 {1179 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);1180 pR0Logger->fRegistered = true;1181 }1182 }1183 #endif1184 1185 1203 #ifdef VMM_R0_TOUCH_FPU 1186 1204 /* … … 1321 1339 } 1322 1340 1341 case VMMR0_DO_NEM_RUN: 1342 { 1343 /* 1344 * Setup the longjmp machinery and execute guest code (calls NEMR0RunGuestCode). 1345 */ 1346 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING); 1347 int rc = vmmR0CallRing3SetJmp2(&pVCpu->vmm.s.CallRing3JmpBufR0, NEMR0RunGuestCode, pGVM, idCpu); 1348 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING); 1349 STAM_COUNTER_INC(&pVM->vmm.s.StatRunRC); 1350 1351 pVCpu->vmm.s.iLastGZRc = rc; 1352 1353 /* 1354 * Fire dtrace probe and collect statistics. 1355 */ 1356 VBOXVMM_R0_VMM_RETURN_TO_RING3_NEM(pVCpu, CPUMQueryGuestCtxPtr(pVCpu), rc); 1357 #ifdef VBOX_WITH_STATISTICS 1358 vmmR0RecordRC(pVM, pVCpu, rc); 1359 #endif 1360 break; 1361 } 1362 1363 1323 1364 /* 1324 1365 * For profiling. … … 1539 1580 case VMMR0_DO_VMMR0_INIT: 1540 1581 rc = vmmR0InitVM(pGVM, pVM, RT_LODWORD(u64Arg), RT_HIDWORD(u64Arg)); 1582 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING); 1583 break; 1584 1585 /* 1586 * Does EMT specific ring-0 init. 1587 */ 1588 case VMMR0_DO_VMMR0_INIT_EMT: 1589 rc = vmmR0InitVMEmt(pGVM, pVM, idCpu); 1541 1590 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING); 1542 1591 break; -
trunk/src/VBox/VMM/VMMR0/VMMR0JmpA-amd64.asm
r69221 r71222 56 56 ; 57 57 BEGINPROC vmmR0CallRing3SetJmp 58 GLOBALNAME vmmR0CallRing3SetJmp2 58 59 GLOBALNAME vmmR0CallRing3SetJmpEx 59 60 ; -
trunk/src/VBox/VMM/VMMR0/VMMR0JmpA-x86.asm
r69221 r71222 53 53 ; 54 54 BEGINPROC vmmR0CallRing3SetJmp 55 GLOBALNAME vmmR0CallRing3SetJmp2 55 56 GLOBALNAME vmmR0CallRing3SetJmpEx 56 57 ; -
trunk/src/VBox/VMM/VMMR3/NEMR3Native-win.cpp
r71184 r71222 1914 1914 #ifndef NEM_WIN_USE_OUR_OWN_RUN_API 1915 1915 return nemR3WinWHvRunGC(pVM, pVCpu); 1916 #elif 11917 return nemHCWinRunGC(pVM, pVCpu );1916 #elif 0 1917 return nemHCWinRunGC(pVM, pVCpu, NULL /*pGVM*/, NULL /*pGVCpu*/); 1918 1918 #else 1919 int rc = VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_NEM_RUN_GC, 0, NULL); 1920 if (RT_SUCCESS(rc)) 1921 return pVCpu->nem.s.rcRing0; 1922 return rc; 1919 VBOXSTRICTRC rcStrict = VMMR3CallR0EmtFast(pVM, pVCpu, VMMR0_DO_NEM_RUN); 1920 if (RT_SUCCESS(rcStrict)) 1921 { 1922 /* We deal wtih VINF_NEM_CHANGE_PGM_MODE and VINF_NEM_FLUSH_TLB here, since we're running 1923 the risk of getting these while we already got another RC (I/O ports). */ 1924 VBOXSTRICTRC rcPgmPending = pVCpu->nem.s.rcPgmPending; 1925 pVCpu->nem.s.rcPgmPending = VINF_SUCCESS; 1926 if ( rcStrict == VINF_NEM_CHANGE_PGM_MODE 1927 || rcStrict == VINF_PGM_CHANGE_MODE 1928 || rcPgmPending == VINF_NEM_CHANGE_PGM_MODE ) 1929 { 1930 LogFlow(("nemR3NativeRunGC: calling PGMChangeMode...\n")); 1931 int rc = PGMChangeMode(pVCpu, CPUMGetGuestCR0(pVCpu), CPUMGetGuestCR4(pVCpu), CPUMGetGuestEFER(pVCpu)); 1932 AssertRCReturn(rc, rc); 1933 if (rcStrict == VINF_NEM_CHANGE_PGM_MODE || rcStrict == VINF_NEM_FLUSH_TLB) 1934 rcStrict = VINF_SUCCESS; 1935 } 1936 else if (rcStrict == VINF_NEM_FLUSH_TLB || rcPgmPending == VINF_NEM_FLUSH_TLB) 1937 { 1938 LogFlow(("nemR3NativeRunGC: calling PGMFlushTLB...\n")); 1939 int rc = PGMFlushTLB(pVCpu, CPUMGetGuestCR3(pVCpu), true); 1940 AssertRCReturn(rc, rc); 1941 if (rcStrict == VINF_NEM_FLUSH_TLB || rcStrict == VINF_NEM_CHANGE_PGM_MODE) 1942 rcStrict = VINF_SUCCESS; 1943 } 1944 else 1945 AssertMsg(rcPgmPending == VINF_SUCCESS, ("rcPgmPending=%Rrc\n", VBOXSTRICTRC_VAL(rcPgmPending) )); 1946 } 1947 return rcStrict; 1923 1948 #endif 1924 1949 } -
trunk/src/VBox/VMM/VMMR3/PGM.cpp
r70948 r71222 3504 3504 } 3505 3505 /* Override the shadow mode is nested paging is active. */ 3506 pVM->pgm.s.fNestedPaging = HMIsNestedPagingActive(pVM); 3507 if (pVM->pgm.s.fNestedPaging) 3508 enmShadowMode = HMGetShwPagingMode(pVM); 3506 if (VM_IS_NEM_ENABLED(pVM)) 3507 { 3508 pVM->pgm.s.fNestedPaging = true; 3509 enmShadowMode = PGMMODE_NESTED; 3510 } 3511 else 3512 { 3513 pVM->pgm.s.fNestedPaging = HMIsNestedPagingActive(pVM); 3514 if (pVM->pgm.s.fNestedPaging) 3515 enmShadowMode = HMGetShwPagingMode(pVM); 3516 } 3509 3517 3510 3518 *penmSwitcher = enmSwitcher; -
trunk/src/VBox/VMM/VMMR3/PGMBth.h
r70948 r71222 135 135 PVM pVM = pVCpu->pVMR3; 136 136 137 Assert( HMIsNestedPagingActive(pVM) == pVM->pgm.s.fNestedPaging);137 Assert((HMIsNestedPagingActive(pVM) || VM_IS_NEM_ENABLED(pVM)) == pVM->pgm.s.fNestedPaging); 138 138 Assert(!pVM->pgm.s.fNestedPaging); 139 139 -
trunk/src/VBox/VMM/VMMR3/PGMShw.h
r70948 r71222 191 191 PVM pVM = pVCpu->pVMR3; 192 192 193 Assert( HMIsNestedPagingActive(pVM) == pVM->pgm.s.fNestedPaging);193 Assert((HMIsNestedPagingActive(pVM) || VM_IS_NEM_ENABLED(pVM)) == pVM->pgm.s.fNestedPaging); 194 194 Assert(pVM->pgm.s.fNestedPaging); 195 195 Assert(!pVCpu->pgm.s.pShwPageCR3R3); -
trunk/src/VBox/VMM/VMMR3/VMM.cpp
r71083 r71222 528 528 529 529 /** 530 * Worker for VMMR3InitR0 that calls ring-0 to do EMT specific initialization. 531 * 532 * @returns VBox status code. 533 * @param pVM The cross context VM structure. 534 * @param pVCpu The cross context per CPU structure. 535 * @thread EMT(pVCpu) 536 */ 537 static DECLCALLBACK(int) vmmR3InitR0Emt(PVM pVM, PVMCPU pVCpu) 538 { 539 return VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_VMMR0_INIT_EMT, 0, NULL); 540 } 541 542 543 /** 530 544 * Initializes the R0 VMM. 531 545 * … … 561 575 rc = VINF_SUCCESS; 562 576 #else 563 rc = SUPR3CallVMMR0Ex(pVM->pVMR0, 0 /*idCpu*/, VMMR0_DO_VMMR0_INIT, 564 RT_MAKE_U64(VMMGetSvnRev(), vmmGetBuildType()), NULL); 577 rc = SUPR3CallVMMR0Ex(pVM->pVMR0, 0 /*idCpu*/, VMMR0_DO_VMMR0_INIT, RT_MAKE_U64(VMMGetSvnRev(), vmmGetBuildType()), NULL); 565 578 #endif 566 579 /* … … 592 605 else 593 606 LogRel(("VMM: Thread-context hooks unavailable\n")); 607 608 /* 609 * Send all EMTs to ring-0 to get their logger initialized. 610 */ 611 for (VMCPUID idCpu = 0; RT_SUCCESS(rc) && idCpu < pVM->cCpus; idCpu++) 612 rc = VMR3ReqCallWait(pVM, idCpu, (PFNRT)vmmR3InitR0Emt, 2, pVM, &pVM->aCpus[idCpu]); 594 613 595 614 return rc; … … 1431 1450 } 1432 1451 rc = vmmR3ServiceCallRing3Request(pVM, pVCpu); 1452 if (RT_FAILURE(rc)) 1453 return rc; 1454 /* Resume R0 */ 1455 } 1456 } 1457 1458 1459 /** 1460 * Perform one of the fast I/O control VMMR0 operation. 1461 * 1462 * @returns VBox strict status code. 1463 * @param pVM The cross context VM structure. 1464 * @param pVCpu The cross context virtual CPU structure. 1465 * @param enmOperation The operation to perform. 1466 */ 1467 VMMR3_INT_DECL(VBOXSTRICTRC) VMMR3CallR0EmtFast(PVM pVM, PVMCPU pVCpu, VMMR0OPERATION enmOperation) 1468 { 1469 for (;;) 1470 { 1471 VBOXSTRICTRC rcStrict; 1472 do 1473 { 1474 #ifdef NO_SUPCALLR0VMM 1475 rcStrict = VERR_GENERAL_FAILURE; 1476 #else 1477 rcStrict = SUPR3CallVMMR0Fast(pVM->pVMR0, enmOperation, pVCpu->idCpu); 1478 if (RT_LIKELY(rcStrict == VINF_SUCCESS)) 1479 rcStrict = pVCpu->vmm.s.iLastGZRc; 1480 #endif 1481 } while (rcStrict == VINF_EM_RAW_INTERRUPT_HYPER); 1482 1483 #ifdef LOG_ENABLED 1484 /* 1485 * Flush the log 1486 */ 1487 PVMMR0LOGGER pR0LoggerR3 = pVCpu->vmm.s.pR0LoggerR3; 1488 if ( pR0LoggerR3 1489 && pR0LoggerR3->Logger.offScratch > 0) 1490 RTLogFlushR0(NULL, &pR0LoggerR3->Logger); 1491 #endif /* !LOG_ENABLED */ 1492 if (rcStrict != VINF_VMM_CALL_HOST) 1493 return rcStrict; 1494 int rc = vmmR3ServiceCallRing3Request(pVM, pVCpu); 1433 1495 if (RT_FAILURE(rc)) 1434 1496 return rc; -
trunk/src/VBox/VMM/include/NEMInternal.h
r71184 r71222 169 169 #ifdef RT_OS_WINDOWS 170 170 # ifdef NEM_WIN_USE_OUR_OWN_RUN_API 171 /** Pending VERR_NEM_CHANGE_PGM_MODE or VERR_NEM_FLUSH_TLB. */ 172 int32_t rcPgmPending; 171 173 /** The VID_MSHAGN_F_XXX flags. 172 174 * Either VID_MSHAGN_F_HANDLE_MESSAGE | VID_MSHAGN_F_GET_NEXT_MESSAGE or zero. */ … … 201 203 uint8_t ab[64]; 202 204 HV_PARTITION_ID idPartition; 205 HV_VP_INDEX idCpu; 206 # ifdef VID_MSHAGN_F_GET_NEXT_MESSAGE 207 VID_IOCTL_INPUT_MESSAGE_SLOT_HANDLE_AND_GET_NEXT MsgSlotHandleAndGetNext; 208 # endif 203 209 } uIoCtlBuf; 204 210 #endif -
trunk/src/VBox/VMM/include/VMMInternal.h
r69474 r71222 610 610 DECLASM(int) vmmR0CallRing3SetJmp(PVMMR0JMPBUF pJmpBuf, PFNVMMR0SETJMP pfn, PVM pVM, PVMCPU pVCpu); 611 611 612 /** 613 * Callback function for vmmR0CallRing3SetJmpEx. 612 613 /** 614 * Callback function for vmmR0CallRing3SetJmp2. 614 615 * 615 616 * @returns VBox status code. 616 617 * @param pvUser The user argument. 617 618 */ 619 typedef DECLCALLBACK(int) FNVMMR0SETJMP2(PGVM pGVM, VMCPUID idCpu); 620 /** Pointer to FNVMMR0SETJMP2(). */ 621 typedef FNVMMR0SETJMP2 *PFNVMMR0SETJMP2; 622 623 /** 624 * Same as vmmR0CallRing3SetJmp except for the function signature. 625 * 626 * @returns VINF_SUCCESS on success or whatever is passed to vmmR0CallRing3LongJmp. 627 * @param pJmpBuf The jmp_buf to set. 628 * @param pfn The function to be called when not resuming. 629 * @param pGVM The ring-0 VM structure. 630 * @param idCpu The ID of the calling EMT. 631 */ 632 DECLASM(int) vmmR0CallRing3SetJmp2(PVMMR0JMPBUF pJmpBuf, PFNVMMR0SETJMP2 pfn, PGVM pGVM, VMCPUID idCpu); 633 634 635 /** 636 * Callback function for vmmR0CallRing3SetJmpEx. 637 * 638 * @returns VBox status code. 639 * @param pvUser The user argument. 640 */ 618 641 typedef DECLCALLBACK(int) FNVMMR0SETJMPEX(void *pvUser); 619 /** Pointer to FNVMMR0SETJMP (). */642 /** Pointer to FNVMMR0SETJMPEX(). */ 620 643 typedef FNVMMR0SETJMPEX *PFNVMMR0SETJMPEX; 621 644
Note:
See TracChangeset
for help on using the changeset viewer.