Changeset 92392 in vbox for trunk/src/VBox/VMM/VMMR3
- Timestamp:
- Nov 12, 2021 10:39:56 AM (3 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR3/VMM.cpp
r92391 r92392 174 174 static VBOXSTRICTRC vmmR3EmtRendezvousCommon(PVM pVM, PVMCPU pVCpu, bool fIsCaller, 175 175 uint32_t fFlags, PFNVMMEMTRENDEZVOUS pfnRendezvous, void *pvUser); 176 static int vmmR3 ServiceCallRing3Request(PVM pVM, PVMCPU pVCpu);176 static int vmmR3HandleRing0Assert(PVM pVM, PVMCPU pVCpu); 177 177 static FNRTTHREAD vmmR3LogFlusher; 178 178 static void vmmR3LogReturnFlush(PVM pVM, PVMCPU pVCpu, PVMMR3CPULOGGER pShared, size_t idxBuf, … … 429 429 STAM_REG(pVM, &pVM->vmm.s.StatRZRetPendingRequest, STAMTYPE_COUNTER, "/VMM/RZRet/PendingRequest", STAMUNIT_OCCURENCES, "Number of VINF_EM_PENDING_REQUEST returns."); 430 430 STAM_REG(pVM, &pVM->vmm.s.StatRZRetPatchTPR, STAMTYPE_COUNTER, "/VMM/RZRet/PatchTPR", STAMUNIT_OCCURENCES, "Number of VINF_EM_HM_PATCH_TPR_INSTR returns."); 431 STAM_REG(pVM, &pVM->vmm.s.StatRZRetCallRing3, STAMTYPE_COUNTER, "/VMM/RZCallR3/Misc", STAMUNIT_OCCURENCES, "Number of Other ring-3 calls.");432 431 433 432 STAMR3Register(pVM, &pVM->vmm.s.StatLogFlusherFlushes, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, "/VMM/LogFlush/00-Flushes", STAMUNIT_OCCURENCES, "Total number of buffer flushes"); … … 525 524 * Call Ring-0 entry with init code. 526 525 */ 527 for (;;)528 {529 526 #ifdef NO_SUPCALLR0VMM 530 531 527 //rc = VERR_GENERAL_FAILURE; 528 rc = VINF_SUCCESS; 532 529 #else 533 rc = SUPR3CallVMMR0Ex(VMCC_GET_VMR0_FOR_CALL(pVM), 0 /*idCpu*/, VMMR0_DO_VMMR0_INIT, RT_MAKE_U64(VMMGetSvnRev(), vmmGetBuildType()), NULL); 534 #endif 535 /* 536 * Flush the logs. 537 */ 530 rc = SUPR3CallVMMR0Ex(VMCC_GET_VMR0_FOR_CALL(pVM), 0 /*idCpu*/, VMMR0_DO_VMMR0_INIT, RT_MAKE_U64(VMMGetSvnRev(), vmmGetBuildType()), NULL); 531 #endif 532 533 /* 534 * Flush the logs & deal with assertions. 535 */ 538 536 #ifdef LOG_ENABLED 539 VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.Logger, NULL); 540 #endif 541 VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.RelLogger, RTLogRelGetDefaultInstance()); 542 if (rc != VINF_VMM_CALL_HOST) 543 break; 544 rc = vmmR3ServiceCallRing3Request(pVM, pVCpu); 545 if (RT_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)) 546 break; 547 /* Resume R0 */ 548 } 549 537 VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.Logger, NULL); 538 #endif 539 VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.RelLogger, RTLogRelGetDefaultInstance()); 540 if (rc == VERR_VMM_RING0_ASSERTION) 541 rc = vmmR3HandleRing0Assert(pVM, pVCpu); 550 542 if (RT_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)) 551 543 { … … 555 547 } 556 548 549 /* 550 * Log stuff we learned in ring-0. 551 */ 557 552 /* Log whether thread-context hooks are used (on Linux this can depend on how the kernel is configured). */ 558 553 if (pVM->vmm.s.fIsUsingContextHooks) … … 657 652 * Call Ring-0 entry with termination code. 658 653 */ 659 int rc;660 for (;;)661 {662 654 #ifdef NO_SUPCALLR0VMM 663 664 655 //rc = VERR_GENERAL_FAILURE; 656 int rc = VINF_SUCCESS; 665 657 #else 666 rc = SUPR3CallVMMR0Ex(VMCC_GET_VMR0_FOR_CALL(pVM), 0 /*idCpu*/, VMMR0_DO_VMMR0_TERM, 0, NULL); 667 #endif 668 /* 669 * Flush the logs. 670 */ 658 int rc = SUPR3CallVMMR0Ex(VMCC_GET_VMR0_FOR_CALL(pVM), 0 /*idCpu*/, VMMR0_DO_VMMR0_TERM, 0, NULL); 659 #endif 660 661 /* 662 * Flush the logs & deal with assertions. 663 */ 671 664 #ifdef LOG_ENABLED 672 VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.Logger, NULL); 673 #endif 674 VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.RelLogger, RTLogRelGetDefaultInstance()); 675 if (rc != VINF_VMM_CALL_HOST) 676 break; 677 rc = vmmR3ServiceCallRing3Request(pVM, pVCpu); 678 if (RT_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)) 679 break; 680 /* Resume R0 */ 681 } 665 VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.Logger, NULL); 666 #endif 667 VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.RelLogger, RTLogRelGetDefaultInstance()); 668 if (rc == VERR_VMM_RING0_ASSERTION) 669 rc = vmmR3HandleRing0Assert(pVM, pVCpu); 682 670 if (RT_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)) 683 671 { … … 687 675 } 688 676 677 /* 678 * Do clean ups. 679 */ 689 680 for (VMCPUID i = 0; i < pVM->cCpus; i++) 690 681 { … … 1238 1229 Log2(("VMMR3HmRunGC: (cs:rip=%04x:%RX64)\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestRIP(pVCpu))); 1239 1230 1240 for (;;) 1241 { 1242 int rc; 1243 do 1244 { 1231 int rc; 1232 do 1233 { 1245 1234 #ifdef NO_SUPCALLR0VMM 1246 1235 rc = VERR_GENERAL_FAILURE; 1247 1236 #else 1248 1249 1250 1251 #endif 1252 1237 rc = SUPR3CallVMMR0Fast(VMCC_GET_VMR0_FOR_CALL(pVM), VMMR0_DO_HM_RUN, pVCpu->idCpu); 1238 if (RT_LIKELY(rc == VINF_SUCCESS)) 1239 rc = pVCpu->vmm.s.iLastGZRc; 1240 #endif 1241 } while (rc == VINF_EM_RAW_INTERRUPT_HYPER); 1253 1242 1254 1243 #if 0 /** @todo triggers too often */ 1255 1256 #endif 1257 1258 1259 1260 1244 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TO_R3)); 1245 #endif 1246 1247 /* 1248 * Flush the logs 1249 */ 1261 1250 #ifdef LOG_ENABLED 1262 VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.Logger, NULL); 1263 #endif 1264 VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.RelLogger, RTLogRelGetDefaultInstance()); 1265 if (rc != VINF_VMM_CALL_HOST) 1266 { 1267 Log2(("VMMR3HmRunGC: returns %Rrc (cs:rip=%04x:%RX64)\n", rc, CPUMGetGuestCS(pVCpu), CPUMGetGuestRIP(pVCpu))); 1268 return rc; 1269 } 1270 rc = vmmR3ServiceCallRing3Request(pVM, pVCpu); 1271 if (RT_FAILURE(rc)) 1272 return rc; 1273 /* Resume R0 */ 1274 } 1251 VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.Logger, NULL); 1252 #endif 1253 VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.RelLogger, RTLogRelGetDefaultInstance()); 1254 if (rc != VERR_VMM_RING0_ASSERTION) 1255 { 1256 Log2(("VMMR3HmRunGC: returns %Rrc (cs:rip=%04x:%RX64)\n", rc, CPUMGetGuestCS(pVCpu), CPUMGetGuestRIP(pVCpu))); 1257 return rc; 1258 } 1259 return vmmR3HandleRing0Assert(pVM, pVCpu); 1275 1260 } 1276 1261 … … 1286 1271 VMMR3_INT_DECL(VBOXSTRICTRC) VMMR3CallR0EmtFast(PVM pVM, PVMCPU pVCpu, VMMR0OPERATION enmOperation) 1287 1272 { 1288 for (;;) 1289 { 1290 VBOXSTRICTRC rcStrict; 1291 do 1292 { 1273 VBOXSTRICTRC rcStrict; 1274 do 1275 { 1293 1276 #ifdef NO_SUPCALLR0VMM 1294 1277 rcStrict = VERR_GENERAL_FAILURE; 1295 1278 #else 1296 1297 1298 1299 #endif 1300 1301 1302 1303 1304 1279 rcStrict = SUPR3CallVMMR0Fast(VMCC_GET_VMR0_FOR_CALL(pVM), enmOperation, pVCpu->idCpu); 1280 if (RT_LIKELY(rcStrict == VINF_SUCCESS)) 1281 rcStrict = pVCpu->vmm.s.iLastGZRc; 1282 #endif 1283 } while (rcStrict == VINF_EM_RAW_INTERRUPT_HYPER); 1284 1285 /* 1286 * Flush the logs 1287 */ 1305 1288 #ifdef LOG_ENABLED 1306 VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.Logger, NULL); 1307 #endif 1308 VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.RelLogger, RTLogRelGetDefaultInstance()); 1309 if (rcStrict != VINF_VMM_CALL_HOST) 1310 return rcStrict; 1311 int rc = vmmR3ServiceCallRing3Request(pVM, pVCpu); 1312 if (RT_FAILURE(rc)) 1313 return rc; 1314 /* Resume R0 */ 1315 } 1289 VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.Logger, NULL); 1290 #endif 1291 VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.RelLogger, RTLogRelGetDefaultInstance()); 1292 if (rcStrict != VERR_VMM_RING0_ASSERTION) 1293 return rcStrict; 1294 return vmmR3HandleRing0Assert(pVM, pVCpu); 1316 1295 } 1317 1296 … … 2449 2428 VMMR3_INT_DECL(int) VMMR3CallR0Emt(PVM pVM, PVMCPU pVCpu, VMMR0OPERATION enmOperation, uint64_t u64Arg, PSUPVMMR0REQHDR pReqHdr) 2450 2429 { 2451 int rc;2452 for (;;)2453 {2430 /* 2431 * Call ring-0. 2432 */ 2454 2433 #ifdef NO_SUPCALLR0VMM 2455 2434 int rc = VERR_GENERAL_FAILURE; 2456 2435 #else 2457 rc = SUPR3CallVMMR0Ex(VMCC_GET_VMR0_FOR_CALL(pVM), pVCpu->idCpu, enmOperation, u64Arg, pReqHdr); 2458 #endif 2459 /* 2460 * Flush the logs. 2461 */ 2436 int rc = SUPR3CallVMMR0Ex(VMCC_GET_VMR0_FOR_CALL(pVM), pVCpu->idCpu, enmOperation, u64Arg, pReqHdr); 2437 #endif 2438 2439 /* 2440 * Flush the logs and deal with ring-0 assertions. 2441 */ 2462 2442 #ifdef LOG_ENABLED 2463 VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.Logger, NULL); 2464 #endif 2465 VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.RelLogger, RTLogRelGetDefaultInstance()); 2466 if (rc != VINF_VMM_CALL_HOST) 2467 break; 2468 rc = vmmR3ServiceCallRing3Request(pVM, pVCpu); 2469 if (RT_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)) 2470 break; 2471 /* Resume R0 */ 2472 } 2473 2474 AssertLogRelMsgReturn(rc == VINF_SUCCESS || RT_FAILURE(rc), 2475 ("enmOperation=%u rc=%Rrc\n", enmOperation, rc), 2476 VERR_IPE_UNEXPECTED_INFO_STATUS); 2477 return rc; 2478 } 2479 2480 2481 /** 2482 * Service a call to the ring-3 host code. 2443 VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.Logger, NULL); 2444 #endif 2445 VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.RelLogger, RTLogRelGetDefaultInstance()); 2446 if (rc != VERR_VMM_RING0_ASSERTION) 2447 { 2448 AssertLogRelMsgReturn(rc == VINF_SUCCESS || RT_FAILURE(rc), 2449 ("enmOperation=%u rc=%Rrc\n", enmOperation, rc), 2450 VERR_IPE_UNEXPECTED_INFO_STATUS); 2451 return rc; 2452 } 2453 return vmmR3HandleRing0Assert(pVM, pVCpu); 2454 } 2455 2456 2457 /** 2458 * Logs a ring-0 assertion ASAP after returning to ring-3. 2483 2459 * 2484 2460 * @returns VBox status code. 2485 * @param pVM The cross context VM structure. 2486 * @param pVCpu The cross context virtual CPU structure. 2487 * @remarks Careful with critsects. 2488 */ 2489 static int vmmR3ServiceCallRing3Request(PVM pVM, PVMCPU pVCpu) 2490 { 2491 /* 2492 * We must also check for pending critsect exits or else we can deadlock 2493 * when entering other critsects here. 2494 */ 2495 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PDM_CRITSECT)) 2496 PDMCritSectBothFF(pVM, pVCpu); 2497 2498 switch (pVCpu->vmm.s.enmCallRing3Operation) 2499 { 2500 /* 2501 * Signal a ring 0 hypervisor assertion. 2502 * Cancel the longjmp operation that's in progress. 2503 */ 2504 case VMMCALLRING3_VM_R0_ASSERTION: 2505 pVCpu->vmm.s.enmCallRing3Operation = VMMCALLRING3_INVALID; 2506 pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call = false; 2461 * @param pVM The cross context VM structure. 2462 * @param pVCpu The cross context virtual CPU structure. 2463 */ 2464 static int vmmR3HandleRing0Assert(PVM pVM, PVMCPU pVCpu) 2465 { 2466 /* 2467 * Signal a ring 0 hypervisor assertion. 2468 * Cancel the longjmp operation that's in progress. 2469 */ 2470 pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call = false; 2507 2471 #ifdef RT_ARCH_X86 2508 2472 pVCpu->vmm.s.CallRing3JmpBufR0.eip = 0; 2509 2473 #else 2510 2474 pVCpu->vmm.s.CallRing3JmpBufR0.rip = 0; 2511 2475 #endif 2512 2476 #ifdef VMM_R0_SWITCH_STACK 2513 *(uint64_t *)pVCpu->vmm.s.pbEMTStackR3 = 0; /* clear marker */ 2514 #endif 2515 LogRel(("%s", pVM->vmm.s.szRing0AssertMsg1)); 2516 LogRel(("%s", pVM->vmm.s.szRing0AssertMsg2)); 2517 return VERR_VMM_RING0_ASSERTION; 2518 2519 default: 2520 AssertMsgFailed(("enmCallRing3Operation=%d\n", pVCpu->vmm.s.enmCallRing3Operation)); 2521 return VERR_VMM_UNKNOWN_RING3_CALL; 2522 } 2477 *(uint64_t *)pVCpu->vmm.s.pbEMTStackR3 = 0; /* clear marker */ 2478 #endif 2479 LogRel(("%s", pVM->vmm.s.szRing0AssertMsg1)); 2480 LogRel(("%s", pVM->vmm.s.szRing0AssertMsg2)); 2481 return VERR_VMM_RING0_ASSERTION; 2523 2482 } 2524 2483
Note:
See TracChangeset
for help on using the changeset viewer.