Changeset 23146 in vbox for trunk/src/VBox
- Timestamp:
- Sep 18, 2009 8:58:57 PM (16 years ago)
- svn:sync-xref-src-repo-rev:
- 52559
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VM.cpp
r23042 r23146 133 133 static int vmR3InitDoCompleted(PVM pVM, VMINITCOMPLETED enmWhat); 134 134 static DECLCALLBACK(size_t) vmR3LogPrefixCallback(PRTLOGGER pLogger, char *pchBuf, size_t cchBuf, void *pvUser); 135 static DECLCALLBACK(int) vmR3PowerOff(PVM pVM);136 135 static void vmR3DestroyUVM(PUVM pUVM, uint32_t cMilliesEMTWait); 137 136 static void vmR3AtDtor(PVM pVM); … … 1196 1195 1197 1196 /** 1198 * EMT worker for vmR3SuspendCommon. 1197 * Does the suspend notifications. 1198 * 1199 * @param pVM The VM handle. 1200 * @thread EMT(0) 1201 */ 1202 static void vmR3SuspendDoWork(PVM pVM) 1203 { 1204 PDMR3Suspend(pVM); 1205 } 1206 1207 1208 /** 1209 * EMT worker for VMR3Suspend. 1199 1210 * 1200 1211 * @returns VBox strict status code. … … 1202 1213 * @retval VERR_VM_INVALID_VM_STATE. 1203 1214 * 1204 * @param pVM VM to suspend.1205 * @param fFatal Whether it's a fatal error or normal suspend.1215 * @param pVM The VM to suspend. 1216 * @param pvUser Our fFatal flag. 1206 1217 * 1207 1218 * @thread EMT 1208 1219 */ 1209 static DECLCALLBACK(int) vmR3Suspend(PVM pVM, bool fFatal) 1210 { 1211 LogFlow(("vmR3Suspend: pVM=%p\n", pVM)); 1212 1213 /* 1214 * The first EMT switches the state to suspending. 1215 */ 1216 PVMCPU pVCpu = VMMGetCpu(pVM); 1220 static DECLCALLBACK(VBOXSTRICTRC) vmR3Suspend(PVM pVM, PVMCPU pVCpu, void *pvUser) 1221 { 1222 #if 0 1223 if (!pVCpu) pVCpu = VMMGetCpu(pVM); 1224 #endif 1225 LogFlow(("vmR3Suspend: pVM=%p pVCpu=%p/#%u\n", pVM, pVCpu, pVCpu->idCpu)); 1226 Assert(!pvUser); 1227 Assert(pVCpu); 1228 1229 /* 1230 * The first EMT switches the state to suspending. If this fails because 1231 * something was racing us in one way or the other, there will be no more 1232 * calls and thus the state assertion below is not going to annoy anyone. 1233 */ 1217 1234 if (pVCpu->idCpu == pVM->cCpus - 1) 1218 1235 { 1219 1236 int rc = vmR3TrySetState(pVM, "VMR3Suspend", 2, 1220 VMSTATE_SUSPENDING, VMSTATE_RUNNING,1221 VMSTATE_SUSPENDING_ LS, VMSTATE_RUNNING_LS);1237 VMSTATE_SUSPENDING, VMSTATE_RUNNING, 1238 VMSTATE_SUSPENDING_EXT_LS, VMSTATE_RUNNING_LS); 1222 1239 if (RT_FAILURE(rc)) 1223 1240 return rc; … … 1226 1243 VMSTATE enmVMState = VMR3GetState(pVM); 1227 1244 AssertMsgReturn( enmVMState == VMSTATE_SUSPENDING 1228 || enmVMState == VMSTATE_SUSPENDING_ LS,1245 || enmVMState == VMSTATE_SUSPENDING_EXT_LS, 1229 1246 ("%s\n", VMR3GetStateName(enmVMState)), 1230 VERR_INTERNAL_ERROR_ 5);1231 1232 /* 1233 * EMT(0) does the actually suspending *after* all the other CPUs ha s1247 VERR_INTERNAL_ERROR_4); 1248 1249 /* 1250 * EMT(0) does the actually suspending *after* all the other CPUs have 1234 1251 * been thru here. 1235 1252 */ 1236 1253 if (pVCpu->idCpu == 0) 1237 1254 { 1238 /* Perform suspend notification. */ 1239 PDMR3Suspend(pVM); 1240 1241 /* 1242 * Change to the final state. Live saving makes this a wee bit more 1243 * complicated than one would like. 1244 */ 1245 PUVM pUVM = pVM->pUVM; 1246 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect); 1247 VMSTATE enmVMState = pVM->enmVMState; 1248 if (enmVMState != VMSTATE_SUSPENDING_LS) 1249 vmR3SetStateLocked(pVM, pUVM, fFatal ? VMSTATE_FATAL_ERROR : VMSTATE_SUSPENDED, VMSTATE_SUSPENDING); 1250 else if (!fFatal) 1251 vmR3SetStateLocked(pVM, pUVM, VMSTATE_SUSPENDED_LS, VMSTATE_SUSPENDING_LS); 1252 else 1253 { 1254 vmR3SetStateLocked(pVM, pUVM, VMSTATE_FATAL_ERROR_LS, VMSTATE_SUSPENDING_LS); 1255 SSMR3Cancel(pVM); 1256 } 1257 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect); 1255 vmR3SuspendDoWork(pVM); 1256 1257 int rc = vmR3TrySetState(pVM, "VMR3Suspend", 2, 1258 VMSTATE_SUSPENDED, VMSTATE_SUSPENDING, 1259 VMSTATE_SUSPENDED_EXT_LS, VMSTATE_SUSPENDING_EXT_LS); 1260 if (RT_FAILURE(rc)) 1261 return VERR_INTERNAL_ERROR_3; 1258 1262 } 1259 1263 … … 1263 1267 1264 1268 /** 1265 * Common worker for VMR3Suspend and vmR3SetRuntimeErrorCommon. 1266 * 1267 * They both suspends the VM, but the latter ends up in the VMSTATE_FATAL_ERROR 1268 * instead of VMSTATE_SUSPENDED. 1269 * 1270 * @returns VBox strict status code. 1271 * @param pVM The VM handle. 1272 * @param fFatal Whether it's a fatal error or not. 1269 * Suspends a running VM. 1270 * 1271 * @returns VBox status code. When called on EMT, this will be a strict status 1272 * code that has to be propagated up the call stack. 1273 * 1274 * @param pVM The VM to suspend. 1273 1275 * 1274 1276 * @thread Any thread. 1275 1277 * @vmstate Running or RunningLS 1276 * @vmstateto Suspending + Suspended/FatalError or SuspendingLS + 1277 * SuspendedLS/FatalErrorLS 1278 */ 1279 static int vmR3SuspendCommon(PVM pVM, bool fFatal) 1280 { 1278 * @vmstateto Suspending + Suspended or SuspendingExtLS + SuspendedExtLS 1279 */ 1280 VMMR3DECL(int) VMR3Suspend(PVM pVM) 1281 { 1282 LogFlow(("VMR3Suspend: pVM=%p\n", pVM)); 1283 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE); 1284 1285 #if 0 /* if the code below screws up, this should still work */ 1281 1286 /* 1282 1287 * Forward the operation to EMT in reverse order so EMT(0) can do the 1283 1288 * actual suspending after the other ones have stopped running guest code. 1284 1289 */ 1285 return VMR3ReqCallWaitU(pVM->pUVM, VMCPUID_ALL_REVERSE, (PFNRT)vmR3Suspend, 2, pVM, fFatal); 1286 } 1287 1288 1289 /** 1290 * Suspends a running VM. 1291 * 1292 * @returns VBox status code. When called on EMT, this will be a strict status 1293 * code that has to be propagated up the call stack. 1294 * 1295 * @param pVM The VM to suspend. 1296 * 1297 * @thread Any thread. 1298 * @vmstate Running or RunningLS 1299 * @vmstateto Suspending + Suspended or SuspendingLS + SuspendedLS 1300 */ 1301 VMMR3DECL(int) VMR3Suspend(PVM pVM) 1302 { 1303 LogFlow(("VMR3Suspend: pVM=%p\n", pVM)); 1304 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE); 1305 int rc = vmR3SuspendCommon(pVM, false /*fFatal*/); 1290 int rc = VMR3ReqCallWaitU(pVM->pUVM, VMCPUID_ALL_REVERSE, 1291 (PFNRT)vmR3Suspend, 3, pVM, NULL, NULL); 1292 #else 1293 /* 1294 * Gather all the EMTs to make sure there are no races before 1295 * changing the VM state. 1296 */ 1297 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR, 1298 vmR3Suspend, NULL); 1299 #endif 1300 1306 1301 LogFlow(("VMR3Suspend: returns %Rrc\n", rc)); 1307 1302 return rc; … … 1454 1449 int rc2 = vmR3TrySetState(pVM, "vmR3SaveLiveStep1Cleanup", 8, 1455 1450 VMSTATE_RUNNING, VMSTATE_RUNNING_LS, 1456 VMSTATE_RUNNING, VMSTATE_RESET_LS,1451 // VMSTATE_RUNNING, VMSTATE_RESET_LS, 1457 1452 VMSTATE_SUSPENDING, VMSTATE_SUSPENDING_LS, /* external*/ 1458 1453 VMSTATE_GURU_MEDITATION, VMSTATE_GURU_MEDITATION_LS, … … 1468 1463 rc = rc2; 1469 1464 } 1470 /** @todo VMR3Reset during live save (ResetLS, ResettingLS) needs to be1471 * redone. We should suspend the VM after resetting the state, not1472 * cancelling the save operation. In the live migration scenario we1473 * would already have transfered most of the state and the little that1474 * remains after a reset isn't going to be very big and it's not worth1475 * making special paths for this. In the live snapshot case, there1476 * would be a gain in that we wouldn't require a potentially large saved1477 * state file. But that could be handled on VMR3Save return and size1478 * shouldn't matter much as already mentioned..1479 *1480 * Will address this tomorrow. */1481 1465 return rc; 1482 1466 } … … 1563 1547 * Live snapshot. 1564 1548 * 1565 * The state handling here is kind of tricky, doing it on EMT(0) 1566 * helps abit. See the VMSTATE diagram for details. The EMT(0) calls1549 * The state handling here is kind of tricky, doing it on EMT(0) helps 1550 * a bit. See the VMSTATE diagram for details. The EMT(0) calls 1567 1551 * consumes the pSSM handle and calls SSMR3LiveDone. 1568 1552 */ 1569 1553 rc = SSMR3LiveDoStep1(pSSM); 1570 1554 if (RT_SUCCESS(rc)) 1571 rc = vmR3SuspendCommon(pVM, false /*fFatal*/); /** @todo this races external VMR3Suspend calls and may cause trouble (goes for any VMCPUID_ALL* calls messing with the state in the handler). */ 1555 { 1556 #if 0 1557 /** @todo LS/SMP: Changes out of Running and RunningLS should be done using 1558 * VMMR3EmtRendezvous to avoid races and odd states while we're 1559 * still executing. */ 1560 for (;;) 1561 { 1562 /* quietly try switch to SuspendingLS and suspend the VM. */ 1563 PUVM pUVM = pVM->pUVM; 1564 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect); 1565 VMSTATE enmVMState = pVM->enmVMState; 1566 if (enmVMState == VMSTATE_RUNNING_LS) 1567 { 1568 vmR3SetStateLocked(pVM, pUVM, VMSTATE_SUSPENDING_LS, VMSTATE_RUNNING_LS); 1569 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect); 1570 // rc = vmR3SuspendCommon(pVM, false /*fFatal*/, true /*fSuspendingAlready*/); 1571 break; 1572 } 1573 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect); 1574 if (enmVMState != VMSTATE_SUSPENDED_LS) 1575 { 1576 rc = VINF_SUCCESS; 1577 break; 1578 } 1579 if ( enmVMState != VMSTATE_SUSPENDING_LS 1580 && enmVMState != VMSTATE_DEBUGGING_LS) 1581 { 1582 switch (enmVMState) 1583 { 1584 case VMSTATE_SUSPENDED 1585 } 1586 rc = VERR_SSM_LIVE_CANCELLED; 1587 break; 1588 } 1589 1590 /* 1591 * Wait for the state to change. 1592 */ 1593 /** @todo LS: fix this mess by some smart use of multiple release event 1594 * semaphores.. */ 1595 RTThreadSleep(250); 1596 } 1597 #endif 1598 } 1572 1599 if (RT_SUCCESS(rc)) 1573 1600 rc = VMR3ReqCallWaitU(pVM->pUVM, 0 /*idDstCpu*/, (PFNRT)vmR3SaveLiveStep2, 2, pVM, pSSM); … … 1681 1708 * 1682 1709 * @param pVM The VM handle. 1710 * @param pVCpu The VMCPU of the EMT. 1711 * @param pvUser Unused user argument. 1683 1712 * 1684 1713 * @thread EMT. 1685 1714 */ 1686 static DECLCALLBACK(int) vmR3PowerOff(PVM pVM) 1687 { 1688 LogFlow(("vmR3PowerOff: pVM=%p\n", pVM)); 1715 static DECLCALLBACK(VBOXSTRICTRC) vmR3PowerOff(PVM pVM, PVMCPU pVCpu, void *pvUser) 1716 { 1717 #if 0 1718 pVCpu = VMMGetCpu(pVM); 1719 #endif 1720 LogFlow(("vmR3PowerOff: pVM=%p pVCpu=%p/#%u\n", pVM, pVCpu, pVCpu->idCpu)); 1721 NOREF(pvUser); 1689 1722 1690 1723 /* 1691 1724 * The first EMT thru here will change the state to PoweringOff. 1692 1725 */ 1693 PVMCPU pVCpu = VMMGetCpu(pVM);1694 1726 if (pVCpu->idCpu == pVM->cCpus - 1) 1695 1727 { … … 1821 1853 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE); 1822 1854 1855 #if 0 1823 1856 /* 1824 1857 * Forward the request to the EMTs in reverse order, making all the other 1825 1858 * EMTs stop working before EMT(0) comes and does the actual powering off. 1826 1859 */ 1827 int rc = VMR3ReqCallWaitU(pVM->pUVM, VMCPUID_ALL_REVERSE, (PFNRT)vmR3PowerOff, 1, pVM); 1860 int rc = VMR3ReqCallWaitU(pVM->pUVM, VMCPUID_ALL_REVERSE, (PFNRT)vmR3PowerOff, 3, pVM, NULL, NULL); 1861 #else 1862 /* 1863 * Gather all the EMTs to make sure there are no races before 1864 * changing the VM state. 1865 */ 1866 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR, 1867 vmR3PowerOff, NULL); 1868 1869 #endif 1828 1870 LogFlow(("VMR3PowerOff: returns %Rrc\n", rc)); 1829 1871 return rc; … … 2219 2261 * Use NULL to start the enumeration. 2220 2262 */ 2221 VMMR3DECL(PVM) 2263 VMMR3DECL(PVM) VMR3EnumVMs(PVM pVMPrev) 2222 2264 { 2223 2265 /* … … 2241 2283 * @param pvUser User argument. 2242 2284 */ 2243 VMMR3DECL(int) 2285 VMMR3DECL(int) VMR3AtDtorRegister(PFNVMATDTOR pfnAtDtor, void *pvUser) 2244 2286 { 2245 2287 /* … … 2286 2328 * @param pfnAtDtor Pointer to callback. 2287 2329 */ 2288 VMMR3DECL(int) 2330 VMMR3DECL(int) VMR3AtDtorDeregister(PFNVMATDTOR pfnAtDtor) 2289 2331 { 2290 2332 /* … … 2354 2396 * reset request issued by VMR3Reset(). 2355 2397 * 2356 * @returns VBox status code. 2357 * @param pVM VM to reset. 2358 */ 2359 static DECLCALLBACK(int) vmR3Reset(PVM pVM) 2360 { 2361 int rcRet = VINF_EM_RESET; 2362 PVMCPU pVCpu = VMMGetCpu(pVM); 2363 2364 /* 2365 * The first EMT will try change the state to resetting. 2366 * We do the live save cancellation inside the state critsect because it 2367 * is cleaner and safer. 2398 * @returns VBox strict status code. 2399 * @param pVM The VM to reset. 2400 * @param pVCpu The VMCPU of the EMT. 2401 * @param pvUser Unused argument. 2402 */ 2403 static DECLCALLBACK(VBOXSTRICTRC) vmR3Reset(PVM pVM, PVMCPU pVCpu, void *pvUser) 2404 { 2405 #if 0 2406 pVCpu = VMMGetCpu(pVM); 2407 #endif 2408 Assert(!pvUser); 2409 NOREF(pvUser); 2410 2411 /* 2412 * The first EMT will try change the state to resetting. If this fails, 2413 * we won't get called for the other EMTs. 2368 2414 */ 2369 2415 if (pVCpu->idCpu == pVM->cCpus - 1) 2370 2416 { 2371 PUVM pUVM = pVM->pUVM;2372 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);2373 2417 int rc = vmR3TrySetState(pVM, "VMR3Reset", 3, 2374 2418 VMSTATE_RESETTING, VMSTATE_RUNNING, 2375 2419 VMSTATE_RESETTING, VMSTATE_SUSPENDED, 2376 2420 VMSTATE_RESETTING_LS, VMSTATE_RUNNING_LS); 2377 if (rc == 3)2378 SSMR3Cancel(pVM);2379 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);2380 2421 if (RT_FAILURE(rc)) 2381 2422 return rc; … … 2389 2430 || enmVMState == VMSTATE_RESETTING_LS, 2390 2431 ("%s\n", VMR3GetStateName(enmVMState)), 2391 VERR_ VM_INVALID_VM_STATE);2432 VERR_INTERNAL_ERROR_4); 2392 2433 2393 2434 /* … … 2441 2482 /* 2442 2483 * Since EMT(0) is the last to go thru here, it will advance the state. 2484 * When a live save is active, we will move on to SuspendingLS but 2485 * leave it for VMR3Reset to do the actual suspending due to deadlock risks. 2443 2486 */ 2444 2487 PUVM pUVM = pVM->pUVM; … … 2453 2496 } 2454 2497 else 2498 vmR3SetStateLocked(pVM, pUVM, VMSTATE_SUSPENDING_LS, VMSTATE_RESETTING_LS); 2499 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect); 2500 2501 vmR3CheckIntegrity(pVM); 2502 2503 /* 2504 * Do the suspend bit as well. 2505 * It only requires some EMT(0) work at present. 2506 */ 2507 if (enmVMState != VMSTATE_RESETTING) 2455 2508 { 2456 /** @todo EMT(0) should not execute code if the state is 2457 * VMSTATE_RESETTING_LS... This requires adding 2458 * VINF_EM_RESET_AND_SUSPEND. Can be done later. */ 2459 vmR3SetStateLocked(pVM, pUVM, VMSTATE_RESET_LS, VMSTATE_RESETTING_LS); 2460 rcRet = VINF_EM_RESET/*_AND_SUSPEND*/; 2509 vmR3SuspendDoWork(pVM); 2510 vmR3SetState(pVM, VMSTATE_SUSPENDED_LS, VMSTATE_SUSPENDING_LS); 2461 2511 } 2462 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect); 2463 2464 vmR3CheckIntegrity(pVM); 2465 } 2466 2467 return rcRet; 2512 } 2513 2514 return enmVMState == VMSTATE_RESETTING 2515 ? VINF_EM_RESET 2516 : VINF_EM_SUSPEND; /** @todo VINF_EM_SUSPEND has lower priority than VINF_EM_RESET, so fix races. Perhaps add a new code for this combined case. */ 2468 2517 } 2469 2518 … … 2480 2529 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE); 2481 2530 2482 /* 2483 * Forward the query on 2484 * Queue reset request to the emulation thread 2485 * and wait for it to be processed. (in reverse order as VCPU 0 does the real cleanup) 2486 */ 2487 int rc = VMR3ReqCallWaitU(pVM->pUVM, VMCPUID_ALL_REVERSE, (PFNRT)vmR3Reset, 1, pVM); 2488 AssertLogRelRC(rc); 2531 #if 0 2532 /* 2533 * Forward the query to the EMTs in reverse order, so all the other EMT are 2534 * dosile when EMT(0) does the actual resetting. 2535 */ 2536 int rc = VMR3ReqCallWaitU(pVM->pUVM, VMCPUID_ALL_REVERSE, (PFNRT)vmR3Reset, 3, pVM, NULL, NULL); 2537 #else 2538 /* 2539 * Gather all the EMTs to make sure there are no races before 2540 * changing the VM state. 2541 */ 2542 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR, 2543 vmR3Reset, NULL); 2544 #endif 2545 LogFlow(("VMR3Reset: returns %Rrc\n", rc)); 2489 2546 return rc; 2490 2547 } … … 2523 2580 case VMSTATE_RESETTING: return "RESETTING"; 2524 2581 case VMSTATE_RESETTING_LS: return "RESETTING_LS"; 2525 case VMSTATE_RESET_LS: return "RESET_LS";2526 2582 case VMSTATE_SUSPENDED: return "SUSPENDED"; 2527 2583 case VMSTATE_SUSPENDED_LS: return "SUSPENDED_LS"; 2584 case VMSTATE_SUSPENDED_EXT_LS: return "SUSPENDED_EXT_LS"; 2528 2585 case VMSTATE_SUSPENDING: return "SUSPENDING"; 2529 2586 case VMSTATE_SUSPENDING_LS: return "SUSPENDING_LS"; 2587 case VMSTATE_SUSPENDING_EXT_LS: return "SUSPENDING_EXT_LS"; 2530 2588 case VMSTATE_SAVING: return "SAVING"; 2531 2589 case VMSTATE_DEBUGGING: return "DEBUGGING"; … … 2608 2666 AssertMsgReturn( enmStateNew == VMSTATE_POWERING_OFF_LS 2609 2667 || enmStateNew == VMSTATE_SUSPENDING_LS 2668 || enmStateNew == VMSTATE_SUSPENDING_EXT_LS 2610 2669 || enmStateNew == VMSTATE_RESETTING_LS 2611 2670 || enmStateNew == VMSTATE_RUNNING … … 2621 2680 2622 2681 case VMSTATE_RESETTING_LS: 2623 AssertMsgReturn( enmStateNew == VMSTATE_RUNNING 2624 || enmStateNew == VMSTATE_RESET_LS 2682 AssertMsgReturn( enmStateNew == VMSTATE_SUSPENDING_LS 2625 2683 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false); 2626 break;2627 2628 case VMSTATE_RESET_LS:2629 AssertMsgReturn(enmStateNew == VMSTATE_RUNNING, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);2630 2684 break; 2631 2685 … … 2637 2691 AssertMsgReturn( enmStateNew == VMSTATE_SUSPENDING 2638 2692 || enmStateNew == VMSTATE_SUSPENDED_LS 2693 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false); 2694 break; 2695 2696 case VMSTATE_SUSPENDING_EXT_LS: 2697 AssertMsgReturn( enmStateNew == VMSTATE_SUSPENDING 2698 || enmStateNew == VMSTATE_SUSPENDED_EXT_LS 2639 2699 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false); 2640 2700 break; … … 2650 2710 2651 2711 case VMSTATE_SUSPENDED_LS: 2652 AssertMsgReturn( enmStateNew == VMSTATE_SUSPENDED_LS 2653 || enmStateNew == VMSTATE_SUSPENDED 2712 AssertMsgReturn( enmStateNew == VMSTATE_SUSPENDED 2713 || enmStateNew == VMSTATE_SAVING 2714 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false); 2715 break; 2716 2717 case VMSTATE_SUSPENDED_EXT_LS: 2718 AssertMsgReturn( enmStateNew == VMSTATE_SUSPENDED 2719 || enmStateNew == VMSTATE_SAVING 2654 2720 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false); 2655 2721 break; … … 2708 2774 case VMSTATE_GURU_MEDITATION_LS: 2709 2775 AssertMsgReturn( enmStateNew == VMSTATE_GURU_MEDITATION 2776 || enmStateNew == VMSTATE_DEBUGGING_LS 2710 2777 || enmStateNew == VMSTATE_POWERING_OFF_LS 2711 2778 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false); … … 3398 3465 3399 3466 /** 3467 * EMT rendezvous worker that vmR3SetRuntimeErrorCommon uses to safely change 3468 * the state to FatalError(LS). 3469 * 3470 * @returns VERR_VM_INVALID_VM_STATE or VINF_SUCCESS. (This is a strict return 3471 * code, see FNVMMEMTRENDEZVOUS.) 3472 * 3473 * @param pVM The VM handle. 3474 * @param pVCpu The VMCPU handle of the EMT. 3475 * @param pvUser Unused user argument. 3476 */ 3477 static DECLCALLBACK(VBOXSTRICTRC) vmR3SetRuntimeErrorChangeState(PVM pVM, PVMCPU pVCpu, void *pvUser) 3478 { 3479 NOREF(pVCpu); 3480 NOREF(pvUser); 3481 int rc = vmR3TrySetState(pVM, "VMSetRuntimeError", 2, 3482 VMSTATE_FATAL_ERROR, VMSTATE_RUNNING, 3483 VMSTATE_FATAL_ERROR_LS, VMSTATE_RUNNING_LS); 3484 if (rc == 2) 3485 SSMR3Cancel(pVM); 3486 return RT_SUCCESS(rc) ? VINF_SUCCESS : rc; 3487 } 3488 3489 3490 /** 3400 3491 * Worker for VMR3SetRuntimeErrorWorker and vmR3SetRuntimeErrorV. 3401 3492 * … … 3417 3508 * Take actions before the call. 3418 3509 */ 3419 int rc = VINF_SUCCESS;3510 int rc; 3420 3511 if (fFlags & VMSETRTERR_FLAGS_FATAL) 3421 rc = vmR3SuspendCommon(pVM, true /*fFatal*/);3512 rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE, vmR3SetRuntimeErrorChangeState, NULL); 3422 3513 else if (fFlags & VMSETRTERR_FLAGS_SUSPEND) 3423 rc = vmR3SuspendCommon(pVM, false /*fFatal*/); 3514 rc = VMR3Suspend(pVM); 3515 else 3516 rc = VINF_SUCCESS; 3424 3517 3425 3518 /*
Note:
See TracChangeset
for help on using the changeset viewer.