Changeset 39917 in vbox for trunk/src/VBox
- Timestamp:
- Jan 31, 2012 2:04:52 PM (13 years ago)
- Location:
- trunk/src/VBox
- Files:
-
- 6 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/Debugger/VBoxDbgStatsQt4.cpp
r33540 r39917 126 126 /** STAMTYPE_U64 & STAMTYPE_U64_RESET. */ 127 127 uint64_t u64; 128 /** STAMTYPE_BOOL and STAMTYPE_BOOL_RESET. */ 129 bool f; 128 130 /** STAMTYPE_CALLBACK. */ 129 131 QString *pStr; … … 1087 1089 break; 1088 1090 1091 case STAMTYPE_BOOL: 1092 case STAMTYPE_BOOL_RESET: 1093 pNode->Data.f = *(bool *)pvSample; 1094 break; 1095 1089 1096 default: 1090 1097 AssertMsgFailed(("%d\n", enmType)); … … 1255 1262 break; 1256 1263 } 1264 1265 case STAMTYPE_BOOL: 1266 case STAMTYPE_BOOL_RESET: 1267 { 1268 bool fPrev = pNode->Data.f; 1269 pNode->Data.f = *(bool *)pvSample; 1270 iDelta = pNode->Data.f - fPrev; 1271 if (iDelta || pNode->i64Delta) 1272 { 1273 pNode->i64Delta = iDelta; 1274 pNode->enmState = kDbgGuiStatsNodeState_kRefresh; 1275 } 1276 break; 1277 } 1278 1257 1279 default: 1258 1280 AssertMsgFailed(("%d\n", enmType)); … … 1475 1497 { 1476 1498 int32_t i = iStart + (iLast + 1 - iStart) / 2; 1477 int iDiff = memcmp(pszSubName, pNode->papChildren[i]->pszName, cchSubName); 1499 int iDiff; 1500 size_t const cchCompare = RT_MIN(pNode->papChildren[i]->cchName, cchSubName); 1501 iDiff = memcmp(pszSubName, pNode->papChildren[i]->pszName, cchCompare); 1478 1502 if (!iDiff) 1479 iDiff = '\0' - pNode->papChildren[i]->pszName[cchSubName];1503 iDiff = cchSubName == cchCompare ? 0 : cchSubName > cchCompare ? 1 : -1; 1480 1504 if (iDiff > 0) 1481 1505 { … … 2152 2176 return formatHexNumber(sz, pNode->Data.u64, 16); 2153 2177 2178 case STAMTYPE_BOOL: 2179 case STAMTYPE_BOOL_RESET: 2180 return pNode->Data.f ? "true" : "false"; 2181 2154 2182 default: 2155 2183 AssertMsgFailed(("%d\n", pNode->enmType)); … … 2263 2291 case STAMTYPE_X64: 2264 2292 case STAMTYPE_X64_RESET: 2293 case STAMTYPE_BOOL: 2294 case STAMTYPE_BOOL_RESET: 2265 2295 return formatNumberSigned(sz, pNode->i64Delta); 2266 2296 default: … … 2425 2455 break; 2426 2456 2457 case STAMTYPE_BOOL: 2458 case STAMTYPE_BOOL_RESET: 2459 RTStrPrintf(szBuf, sizeof(szBuf), "%s %s", a_pNode->Data.f ? "true " : "false ", STAMR3GetUnit(a_pNode->enmUnit)); 2460 break; 2461 2427 2462 default: 2428 2463 AssertMsgFailed(("enmType=%d\n", a_pNode->enmType)); -
trunk/src/VBox/VMM/VMMR0/GMMR0.cpp
r39909 r39917 492 492 /** @todo separate trees for distinctly different guest OSes. */ 493 493 PAVLGCPTRNODECORE pGlobalSharedModuleTree; 494 /** Sharable modules (count of nodes in pGlobalSharedModuleTree). */ 495 uint32_t cShareableModules; 494 496 495 497 /** The chunk list. For simplifying the cleanup process. */ … … 874 876 AssertCompile(RT_SIZEOFMEMB(GVM,gmm.s) <= RT_SIZEOFMEMB(GVM,gmm.padding)); 875 877 876 pGVM->gmm.s. enmPolicy = GMMOCPOLICY_INVALID;877 pGVM->gmm.s. enmPriority = GMMPRIORITY_INVALID;878 pGVM->gmm.s. fMayAllocate = false;878 pGVM->gmm.s.Stats.enmPolicy = GMMOCPOLICY_INVALID; 879 pGVM->gmm.s.Stats.enmPriority = GMMPRIORITY_INVALID; 880 pGVM->gmm.s.Stats.fMayAllocate = false; 879 881 } 880 882 … … 1153 1155 * request has been serviced. 1154 1156 */ 1155 if ( pGVM->gmm.s. enmPolicy > GMMOCPOLICY_INVALID1156 && pGVM->gmm.s. enmPolicy < GMMOCPOLICY_END)1157 if ( pGVM->gmm.s.Stats.enmPolicy > GMMOCPOLICY_INVALID 1158 && pGVM->gmm.s.Stats.enmPolicy < GMMOCPOLICY_END) 1157 1159 { 1158 1160 /* … … 1171 1173 * shared pages will be 'left behind'.) 1172 1174 */ 1173 uint64_t cPrivatePages = pGVM->gmm.s. cPrivatePages; /* save */1175 uint64_t cPrivatePages = pGVM->gmm.s.Stats.cPrivatePages; /* save */ 1174 1176 1175 1177 unsigned iCountDown = 64; … … 1201 1203 } while (fRedoFromStart); 1202 1204 1203 if (pGVM->gmm.s. cPrivatePages)1204 SUPR0Printf("GMMR0CleanupVM: hGVM=%#x has %#x private pages that cannot be found!\n", pGVM->hSelf, pGVM->gmm.s. cPrivatePages);1205 if (pGVM->gmm.s.Stats.cPrivatePages) 1206 SUPR0Printf("GMMR0CleanupVM: hGVM=%#x has %#x private pages that cannot be found!\n", pGVM->hSelf, pGVM->gmm.s.Stats.cPrivatePages); 1205 1207 1206 1208 pGMM->cAllocatedPages -= cPrivatePages; … … 1251 1253 * Account for shared pages that weren't freed. 1252 1254 */ 1253 if (pGVM->gmm.s. cSharedPages)1254 { 1255 Assert(pGMM->cSharedPages >= pGVM->gmm.s. cSharedPages);1256 SUPR0Printf("GMMR0CleanupVM: hGVM=%#x left %#x shared pages behind!\n", pGVM->hSelf, pGVM->gmm.s. cSharedPages);1257 pGMM->cLeftBehindSharedPages += pGVM->gmm.s. cSharedPages;1255 if (pGVM->gmm.s.Stats.cSharedPages) 1256 { 1257 Assert(pGMM->cSharedPages >= pGVM->gmm.s.Stats.cSharedPages); 1258 SUPR0Printf("GMMR0CleanupVM: hGVM=%#x left %#x shared pages behind!\n", pGVM->hSelf, pGVM->gmm.s.Stats.cSharedPages); 1259 pGMM->cLeftBehindSharedPages += pGVM->gmm.s.Stats.cSharedPages; 1258 1260 } 1259 1261 … … 1261 1263 * Clean up balloon statistics in case the VM process crashed. 1262 1264 */ 1263 Assert(pGMM->cBalloonedPages >= pGVM->gmm.s. cBalloonedPages);1264 pGMM->cBalloonedPages -= pGVM->gmm.s. cBalloonedPages;1265 Assert(pGMM->cBalloonedPages >= pGVM->gmm.s.Stats.cBalloonedPages); 1266 pGMM->cBalloonedPages -= pGVM->gmm.s.Stats.cBalloonedPages; 1265 1267 1266 1268 /* 1267 1269 * Update the over-commitment management statistics. 1268 1270 */ 1269 pGMM->cReservedPages -= pGVM->gmm.s. Reserved.cBasePages1270 + pGVM->gmm.s. Reserved.cFixedPages1271 + pGVM->gmm.s. Reserved.cShadowPages;1272 switch (pGVM->gmm.s. enmPolicy)1271 pGMM->cReservedPages -= pGVM->gmm.s.Stats.Reserved.cBasePages 1272 + pGVM->gmm.s.Stats.Reserved.cFixedPages 1273 + pGVM->gmm.s.Stats.Reserved.cShadowPages; 1274 switch (pGVM->gmm.s.Stats.enmPolicy) 1273 1275 { 1274 1276 case GMMOCPOLICY_NO_OC: … … 1281 1283 1282 1284 /* zap the GVM data. */ 1283 pGVM->gmm.s. enmPolicy= GMMOCPOLICY_INVALID;1284 pGVM->gmm.s. enmPriority= GMMPRIORITY_INVALID;1285 pGVM->gmm.s. fMayAllocate = false;1285 pGVM->gmm.s.Stats.enmPolicy = GMMOCPOLICY_INVALID; 1286 pGVM->gmm.s.Stats.enmPriority = GMMPRIORITY_INVALID; 1287 pGVM->gmm.s.Stats.fMayAllocate = false; 1286 1288 1287 1289 GMM_CHECK_SANITY_UPON_LEAVING(pGMM); … … 1339 1341 pChunk->cPrivate--; 1340 1342 pChunk->cFree++; 1341 pGVM->gmm.s. cPrivatePages--;1343 pGVM->gmm.s.Stats.cPrivatePages--; 1342 1344 cFree++; 1343 1345 } … … 1477 1479 if (GMM_CHECK_SANITY_UPON_ENTERING(pGMM)) 1478 1480 { 1479 if ( !pGVM->gmm.s. Reserved.cBasePages1480 && !pGVM->gmm.s. Reserved.cFixedPages1481 && !pGVM->gmm.s. Reserved.cShadowPages)1481 if ( !pGVM->gmm.s.Stats.Reserved.cBasePages 1482 && !pGVM->gmm.s.Stats.Reserved.cFixedPages 1483 && !pGVM->gmm.s.Stats.Reserved.cShadowPages) 1482 1484 { 1483 1485 /* … … 1490 1492 * Update the records. 1491 1493 */ 1492 pGVM->gmm.s. Reserved.cBasePages= cBasePages;1493 pGVM->gmm.s. Reserved.cFixedPages= cFixedPages;1494 pGVM->gmm.s. Reserved.cShadowPages = cShadowPages;1495 pGVM->gmm.s. enmPolicy= enmPolicy;1496 pGVM->gmm.s. enmPriority= enmPriority;1497 pGVM->gmm.s. fMayAllocate= true;1494 pGVM->gmm.s.Stats.Reserved.cBasePages = cBasePages; 1495 pGVM->gmm.s.Stats.Reserved.cFixedPages = cFixedPages; 1496 pGVM->gmm.s.Stats.Reserved.cShadowPages = cShadowPages; 1497 pGVM->gmm.s.Stats.enmPolicy = enmPolicy; 1498 pGVM->gmm.s.Stats.enmPriority = enmPriority; 1499 pGVM->gmm.s.Stats.fMayAllocate = true; 1498 1500 1499 1501 pGMM->cReservedPages += cBasePages + cFixedPages + cShadowPages; … … 1572 1574 if (GMM_CHECK_SANITY_UPON_ENTERING(pGMM)) 1573 1575 { 1574 if ( pGVM->gmm.s. Reserved.cBasePages1575 && pGVM->gmm.s. Reserved.cFixedPages1576 && pGVM->gmm.s. Reserved.cShadowPages)1576 if ( pGVM->gmm.s.Stats.Reserved.cBasePages 1577 && pGVM->gmm.s.Stats.Reserved.cFixedPages 1578 && pGVM->gmm.s.Stats.Reserved.cShadowPages) 1577 1579 { 1578 1580 /* … … 1585 1587 * Update the records. 1586 1588 */ 1587 pGMM->cReservedPages -= pGVM->gmm.s. Reserved.cBasePages1588 + pGVM->gmm.s. Reserved.cFixedPages1589 + pGVM->gmm.s. Reserved.cShadowPages;1589 pGMM->cReservedPages -= pGVM->gmm.s.Stats.Reserved.cBasePages 1590 + pGVM->gmm.s.Stats.Reserved.cFixedPages 1591 + pGVM->gmm.s.Stats.Reserved.cShadowPages; 1590 1592 pGMM->cReservedPages += cBasePages + cFixedPages + cShadowPages; 1591 1593 1592 pGVM->gmm.s. Reserved.cBasePages= cBasePages;1593 pGVM->gmm.s. Reserved.cFixedPages= cFixedPages;1594 pGVM->gmm.s. Reserved.cShadowPages = cShadowPages;1594 pGVM->gmm.s.Stats.Reserved.cBasePages = cBasePages; 1595 pGVM->gmm.s.Stats.Reserved.cFixedPages = cFixedPages; 1596 pGVM->gmm.s.Stats.Reserved.cShadowPages = cShadowPages; 1595 1597 } 1596 1598 } … … 2339 2341 * Don't allocate a new chunk if we're 2340 2342 */ 2341 uint64_t cPgReserved = pGVM->gmm.s. Reserved.cBasePages2342 + pGVM->gmm.s. Reserved.cFixedPages2343 - pGVM->gmm.s. cBalloonedPages2343 uint64_t cPgReserved = pGVM->gmm.s.Stats.Reserved.cBasePages 2344 + pGVM->gmm.s.Stats.Reserved.cFixedPages 2345 - pGVM->gmm.s.Stats.cBalloonedPages 2344 2346 /** @todo what about shared pages? */; 2345 uint64_t cPgAllocated = pGVM->gmm.s. Allocated.cBasePages2346 + pGVM->gmm.s. Allocated.cFixedPages;2347 uint64_t cPgAllocated = pGVM->gmm.s.Stats.Allocated.cBasePages 2348 + pGVM->gmm.s.Stats.Allocated.cFixedPages; 2347 2349 uint64_t cPgDelta = cPgReserved - cPgAllocated; 2348 2350 if (cPgDelta < GMM_CHUNK_NUM_PAGES * 4) … … 2394 2396 { 2395 2397 case GMMACCOUNT_BASE: 2396 if (RT_UNLIKELY( pGVM->gmm.s. Allocated.cBasePages + pGVM->gmm.s.cBalloonedPages + cPages2397 > pGVM->gmm.s. Reserved.cBasePages))2398 if (RT_UNLIKELY( pGVM->gmm.s.Stats.Allocated.cBasePages + pGVM->gmm.s.Stats.cBalloonedPages + cPages 2399 > pGVM->gmm.s.Stats.Reserved.cBasePages)) 2398 2400 { 2399 2401 Log(("gmmR0AllocatePages:Base: Reserved=%#llx Allocated+Ballooned+Requested=%#llx+%#llx+%#x!\n", 2400 pGVM->gmm.s.Reserved.cBasePages, pGVM->gmm.s.Allocated.cBasePages, pGVM->gmm.s.cBalloonedPages, cPages)); 2402 pGVM->gmm.s.Stats.Reserved.cBasePages, pGVM->gmm.s.Stats.Allocated.cBasePages, 2403 pGVM->gmm.s.Stats.cBalloonedPages, cPages)); 2401 2404 return VERR_GMM_HIT_VM_ACCOUNT_LIMIT; 2402 2405 } 2403 2406 break; 2404 2407 case GMMACCOUNT_SHADOW: 2405 if (RT_UNLIKELY(pGVM->gmm.s. Allocated.cShadowPages + cPages > pGVM->gmm.s.Reserved.cShadowPages))2408 if (RT_UNLIKELY(pGVM->gmm.s.Stats.Allocated.cShadowPages + cPages > pGVM->gmm.s.Stats.Reserved.cShadowPages)) 2406 2409 { 2407 2410 Log(("gmmR0AllocatePages:Shadow: Reserved=%#x Allocated+Requested=%#x+%#x!\n", 2408 pGVM->gmm.s. Reserved.cShadowPages, pGVM->gmm.s.Allocated.cShadowPages, cPages));2411 pGVM->gmm.s.Stats.Reserved.cShadowPages, pGVM->gmm.s.Stats.Allocated.cShadowPages, cPages)); 2409 2412 return VERR_GMM_HIT_VM_ACCOUNT_LIMIT; 2410 2413 } 2411 2414 break; 2412 2415 case GMMACCOUNT_FIXED: 2413 if (RT_UNLIKELY(pGVM->gmm.s. Allocated.cFixedPages + cPages > pGVM->gmm.s.Reserved.cFixedPages))2416 if (RT_UNLIKELY(pGVM->gmm.s.Stats.Allocated.cFixedPages + cPages > pGVM->gmm.s.Stats.Reserved.cFixedPages)) 2414 2417 { 2415 2418 Log(("gmmR0AllocatePages:Fixed: Reserved=%#x Allocated+Requested=%#x+%#x!\n", 2416 pGVM->gmm.s. Reserved.cFixedPages, pGVM->gmm.s.Allocated.cFixedPages, cPages));2419 pGVM->gmm.s.Stats.Reserved.cFixedPages, pGVM->gmm.s.Stats.Allocated.cFixedPages, cPages)); 2417 2420 return VERR_GMM_HIT_VM_ACCOUNT_LIMIT; 2418 2421 } … … 2440 2443 switch (enmAccount) 2441 2444 { 2442 case GMMACCOUNT_BASE: pGVM->gmm.s. Allocated.cBasePages += cPages; break;2443 case GMMACCOUNT_SHADOW: pGVM->gmm.s. Allocated.cShadowPages += cPages; break;2444 case GMMACCOUNT_FIXED: pGVM->gmm.s. Allocated.cFixedPages += cPages; break;2445 case GMMACCOUNT_BASE: pGVM->gmm.s.Stats.Allocated.cBasePages += cPages; break; 2446 case GMMACCOUNT_SHADOW: pGVM->gmm.s.Stats.Allocated.cShadowPages += cPages; break; 2447 case GMMACCOUNT_FIXED: pGVM->gmm.s.Stats.Allocated.cFixedPages += cPages; break; 2445 2448 default: AssertMsgFailedReturn(("enmAccount=%d\n", enmAccount), VERR_IPE_NOT_REACHED_DEFAULT_CASE); 2446 2449 } 2447 pGVM->gmm.s. cPrivatePages += cPages;2448 pGMM->cAllocatedPages += cPages;2450 pGVM->gmm.s.Stats.cPrivatePages += cPages; 2451 pGMM->cAllocatedPages += cPages; 2449 2452 2450 2453 /* … … 2525 2528 { 2526 2529 /* Update the statistics. */ 2527 pGVM->gmm.s. cPrivatePages -= cPages;2528 pGMM->cAllocatedPages -= cPages - iPage;2530 pGVM->gmm.s.Stats.cPrivatePages -= cPages; 2531 pGMM->cAllocatedPages -= cPages - iPage; 2529 2532 switch (enmAccount) 2530 2533 { 2531 case GMMACCOUNT_BASE: pGVM->gmm.s. Allocated.cBasePages -= cPages; break;2532 case GMMACCOUNT_SHADOW: pGVM->gmm.s. Allocated.cShadowPages -= cPages; break;2533 case GMMACCOUNT_FIXED: pGVM->gmm.s. Allocated.cFixedPages -= cPages; break;2534 case GMMACCOUNT_BASE: pGVM->gmm.s.Stats.Allocated.cBasePages -= cPages; break; 2535 case GMMACCOUNT_SHADOW: pGVM->gmm.s.Stats.Allocated.cShadowPages -= cPages; break; 2536 case GMMACCOUNT_FIXED: pGVM->gmm.s.Stats.Allocated.cFixedPages -= cPages; break; 2534 2537 default: AssertMsgFailedReturn(("enmAccount=%d\n", enmAccount), VERR_IPE_NOT_REACHED_DEFAULT_CASE); 2535 2538 } … … 2643 2646 { 2644 2647 /* No allocations before the initial reservation has been made! */ 2645 if (RT_LIKELY( pGVM->gmm.s. Reserved.cBasePages2646 && pGVM->gmm.s. Reserved.cFixedPages2647 && pGVM->gmm.s. Reserved.cShadowPages))2648 if (RT_LIKELY( pGVM->gmm.s.Stats.Reserved.cBasePages 2649 && pGVM->gmm.s.Stats.Reserved.cFixedPages 2650 && pGVM->gmm.s.Stats.Reserved.cShadowPages)) 2648 2651 { 2649 2652 /* … … 2704 2707 AssertCompile(NIL_RTHCPHYS > GMM_GCPHYS_LAST && GMM_GCPHYS_UNSHAREABLE > GMM_GCPHYS_LAST); 2705 2708 Assert(pPage->Shared.cRefs); 2706 Assert(pGVM->gmm.s. cSharedPages);2707 Assert(pGVM->gmm.s. Allocated.cBasePages);2709 Assert(pGVM->gmm.s.Stats.cSharedPages); 2710 Assert(pGVM->gmm.s.Stats.Allocated.cBasePages); 2708 2711 2709 2712 Log(("GMMR0AllocateHandyPages: free shared page %x cRefs=%d\n", paPages[iPage].idSharedPage, pPage->Shared.cRefs)); 2710 pGVM->gmm.s. cSharedPages--;2711 pGVM->gmm.s. Allocated.cBasePages--;2713 pGVM->gmm.s.Stats.cSharedPages--; 2714 pGVM->gmm.s.Stats.Allocated.cBasePages--; 2712 2715 if (!--pPage->Shared.cRefs) 2713 2716 gmmR0FreeSharedPage(pGMM, pGVM, paPages[iPage].idSharedPage, pPage); … … 2825 2828 2826 2829 /* No allocations before the initial reservation has been made! */ 2827 if (RT_LIKELY( pGVM->gmm.s. Reserved.cBasePages2828 && pGVM->gmm.s. Reserved.cFixedPages2829 && pGVM->gmm.s. Reserved.cShadowPages))2830 if (RT_LIKELY( pGVM->gmm.s.Stats.Reserved.cBasePages 2831 && pGVM->gmm.s.Stats.Reserved.cFixedPages 2832 && pGVM->gmm.s.Stats.Reserved.cShadowPages)) 2830 2833 rc = gmmR0AllocatePagesNew(pGMM, pGVM, cPages, paPages, enmAccount); 2831 2834 else … … 2913 2916 { 2914 2917 const unsigned cPages = (GMM_CHUNK_SIZE >> PAGE_SHIFT); 2915 if (RT_UNLIKELY( pGVM->gmm.s. Allocated.cBasePages + pGVM->gmm.s.cBalloonedPages + cPages2916 > pGVM->gmm.s. Reserved.cBasePages))2918 if (RT_UNLIKELY( pGVM->gmm.s.Stats.Allocated.cBasePages + pGVM->gmm.s.Stats.cBalloonedPages + cPages 2919 > pGVM->gmm.s.Stats.Reserved.cBasePages)) 2917 2920 { 2918 2921 Log(("GMMR0AllocateLargePage: Reserved=%#llx Allocated+Requested=%#llx+%#x!\n", 2919 pGVM->gmm.s. Reserved.cBasePages, pGVM->gmm.s.Allocated.cBasePages, cPages));2922 pGVM->gmm.s.Stats.Reserved.cBasePages, pGVM->gmm.s.Stats.Allocated.cBasePages, cPages)); 2920 2923 gmmR0MutexRelease(pGMM); 2921 2924 return VERR_GMM_HIT_VM_ACCOUNT_LIMIT; … … 2959 2962 2960 2963 /* Update accounting. */ 2961 pGVM->gmm.s. Allocated.cBasePages += cPages;2962 pGVM->gmm.s. cPrivatePages += cPages;2963 pGMM->cAllocatedPages += cPages;2964 pGVM->gmm.s.Stats.Allocated.cBasePages += cPages; 2965 pGVM->gmm.s.Stats.cPrivatePages += cPages; 2966 pGMM->cAllocatedPages += cPages; 2964 2967 2965 2968 gmmR0LinkChunk(pChunk, pSet); … … 3012 3015 const unsigned cPages = (GMM_CHUNK_SIZE >> PAGE_SHIFT); 3013 3016 3014 if (RT_UNLIKELY(pGVM->gmm.s. Allocated.cBasePages < cPages))3015 { 3016 Log(("GMMR0FreeLargePage: allocated=%#llx cPages=%#x!\n", pGVM->gmm.s. Allocated.cBasePages, cPages));3017 if (RT_UNLIKELY(pGVM->gmm.s.Stats.Allocated.cBasePages < cPages)) 3018 { 3019 Log(("GMMR0FreeLargePage: allocated=%#llx cPages=%#x!\n", pGVM->gmm.s.Stats.Allocated.cBasePages, cPages)); 3017 3020 gmmR0MutexRelease(pGMM); 3018 3021 return VERR_GMM_ATTEMPT_TO_FREE_TOO_MUCH; … … 3032 3035 3033 3036 /* Update accounting. */ 3034 pGVM->gmm.s. Allocated.cBasePages -= cPages;3035 pGVM->gmm.s. cPrivatePages -= cPages;3036 pGMM->cAllocatedPages -= cPages;3037 pGVM->gmm.s.Stats.Allocated.cBasePages -= cPages; 3038 pGVM->gmm.s.Stats.cPrivatePages -= cPages; 3039 pGMM->cAllocatedPages -= cPages; 3037 3040 } 3038 3041 else … … 3293 3296 { 3294 3297 case GMMACCOUNT_BASE: 3295 if (RT_UNLIKELY(pGVM->gmm.s. Allocated.cBasePages < cPages))3298 if (RT_UNLIKELY(pGVM->gmm.s.Stats.Allocated.cBasePages < cPages)) 3296 3299 { 3297 Log(("gmmR0FreePages: allocated=%#llx cPages=%#x!\n", pGVM->gmm.s. Allocated.cBasePages, cPages));3300 Log(("gmmR0FreePages: allocated=%#llx cPages=%#x!\n", pGVM->gmm.s.Stats.Allocated.cBasePages, cPages)); 3298 3301 return VERR_GMM_ATTEMPT_TO_FREE_TOO_MUCH; 3299 3302 } 3300 3303 break; 3301 3304 case GMMACCOUNT_SHADOW: 3302 if (RT_UNLIKELY(pGVM->gmm.s. Allocated.cShadowPages < cPages))3305 if (RT_UNLIKELY(pGVM->gmm.s.Stats.Allocated.cShadowPages < cPages)) 3303 3306 { 3304 Log(("gmmR0FreePages: allocated=%#llx cPages=%#x!\n", pGVM->gmm.s. Allocated.cShadowPages, cPages));3307 Log(("gmmR0FreePages: allocated=%#llx cPages=%#x!\n", pGVM->gmm.s.Stats.Allocated.cShadowPages, cPages)); 3305 3308 return VERR_GMM_ATTEMPT_TO_FREE_TOO_MUCH; 3306 3309 } 3307 3310 break; 3308 3311 case GMMACCOUNT_FIXED: 3309 if (RT_UNLIKELY(pGVM->gmm.s. Allocated.cFixedPages < cPages))3312 if (RT_UNLIKELY(pGVM->gmm.s.Stats.Allocated.cFixedPages < cPages)) 3310 3313 { 3311 Log(("gmmR0FreePages: allocated=%#llx cPages=%#x!\n", pGVM->gmm.s. Allocated.cFixedPages, cPages));3314 Log(("gmmR0FreePages: allocated=%#llx cPages=%#x!\n", pGVM->gmm.s.Stats.Allocated.cFixedPages, cPages)); 3312 3315 return VERR_GMM_ATTEMPT_TO_FREE_TOO_MUCH; 3313 3316 } … … 3335 3338 if (RT_LIKELY(pPage->Private.hGVM == pGVM->hSelf)) 3336 3339 { 3337 Assert(pGVM->gmm.s. cPrivatePages);3338 pGVM->gmm.s. cPrivatePages--;3340 Assert(pGVM->gmm.s.Stats.cPrivatePages); 3341 pGVM->gmm.s.Stats.cPrivatePages--; 3339 3342 gmmR0FreePrivatePage(pGMM, pGVM, idPage, pPage); 3340 3343 } … … 3349 3352 else if (RT_LIKELY(GMM_PAGE_IS_SHARED(pPage))) 3350 3353 { 3351 Assert(pGVM->gmm.s. cSharedPages);3352 pGVM->gmm.s. cSharedPages--;3354 Assert(pGVM->gmm.s.Stats.cSharedPages); 3355 pGVM->gmm.s.Stats.cSharedPages--; 3353 3356 Assert(pPage->Shared.cRefs); 3354 3357 if (!--pPage->Shared.cRefs) … … 3381 3384 switch (enmAccount) 3382 3385 { 3383 case GMMACCOUNT_BASE: pGVM->gmm.s. Allocated.cBasePages -= iPage; break;3384 case GMMACCOUNT_SHADOW: pGVM->gmm.s. Allocated.cShadowPages -= iPage; break;3385 case GMMACCOUNT_FIXED: pGVM->gmm.s. Allocated.cFixedPages -= iPage; break;3386 case GMMACCOUNT_BASE: pGVM->gmm.s.Stats.Allocated.cBasePages -= iPage; break; 3387 case GMMACCOUNT_SHADOW: pGVM->gmm.s.Stats.Allocated.cShadowPages -= iPage; break; 3388 case GMMACCOUNT_FIXED: pGVM->gmm.s.Stats.Allocated.cFixedPages -= iPage; break; 3386 3389 default: 3387 3390 AssertMsgFailedReturn(("enmAccount=%d\n", enmAccount), VERR_IPE_NOT_REACHED_DEFAULT_CASE); … … 3527 3530 case GMMBALLOONACTION_INFLATE: 3528 3531 { 3529 if (RT_LIKELY(pGVM->gmm.s.Allocated.cBasePages + pGVM->gmm.s.cBalloonedPages + cBalloonedPages <= pGVM->gmm.s.Reserved.cBasePages)) 3532 if (RT_LIKELY(pGVM->gmm.s.Stats.Allocated.cBasePages + pGVM->gmm.s.Stats.cBalloonedPages + cBalloonedPages 3533 <= pGVM->gmm.s.Stats.Reserved.cBasePages)) 3530 3534 { 3531 3535 /* … … 3533 3537 */ 3534 3538 pGMM->cBalloonedPages += cBalloonedPages; 3535 if (pGVM->gmm.s. cReqBalloonedPages)3539 if (pGVM->gmm.s.Stats.cReqBalloonedPages) 3536 3540 { 3537 3541 /* Codepath never taken. Might be interesting in the future to request ballooned memory from guests in low memory conditions.. */ 3538 3542 AssertFailed(); 3539 3543 3540 pGVM->gmm.s.cBalloonedPages += cBalloonedPages; 3541 pGVM->gmm.s.cReqActuallyBalloonedPages += cBalloonedPages; 3542 Log(("GMMR0BalloonedPages: +%#x - Global=%#llx / VM: Total=%#llx Req=%#llx Actual=%#llx (pending)\n", cBalloonedPages, 3543 pGMM->cBalloonedPages, pGVM->gmm.s.cBalloonedPages, pGVM->gmm.s.cReqBalloonedPages, pGVM->gmm.s.cReqActuallyBalloonedPages)); 3544 pGVM->gmm.s.Stats.cBalloonedPages += cBalloonedPages; 3545 pGVM->gmm.s.Stats.cReqActuallyBalloonedPages += cBalloonedPages; 3546 Log(("GMMR0BalloonedPages: +%#x - Global=%#llx / VM: Total=%#llx Req=%#llx Actual=%#llx (pending)\n", 3547 cBalloonedPages, pGMM->cBalloonedPages, pGVM->gmm.s.Stats.cBalloonedPages, 3548 pGVM->gmm.s.Stats.cReqBalloonedPages, pGVM->gmm.s.Stats.cReqActuallyBalloonedPages)); 3544 3549 } 3545 3550 else 3546 3551 { 3547 pGVM->gmm.s. cBalloonedPages += cBalloonedPages;3552 pGVM->gmm.s.Stats.cBalloonedPages += cBalloonedPages; 3548 3553 Log(("GMMR0BalloonedPages: +%#x - Global=%#llx / VM: Total=%#llx (user)\n", 3549 cBalloonedPages, pGMM->cBalloonedPages, pGVM->gmm.s. cBalloonedPages));3554 cBalloonedPages, pGMM->cBalloonedPages, pGVM->gmm.s.Stats.cBalloonedPages)); 3550 3555 } 3551 3556 } … … 3553 3558 { 3554 3559 Log(("GMMR0BalloonedPages: cBasePages=%#llx Total=%#llx cBalloonedPages=%#llx Reserved=%#llx\n", 3555 pGVM->gmm.s.Allocated.cBasePages, pGVM->gmm.s.cBalloonedPages, cBalloonedPages, pGVM->gmm.s.Reserved.cBasePages)); 3560 pGVM->gmm.s.Stats.Allocated.cBasePages, pGVM->gmm.s.Stats.cBalloonedPages, cBalloonedPages, 3561 pGVM->gmm.s.Stats.Reserved.cBasePages)); 3556 3562 rc = VERR_GMM_ATTEMPT_TO_FREE_TOO_MUCH; 3557 3563 } … … 3562 3568 { 3563 3569 /* Deflate. */ 3564 if (pGVM->gmm.s. cBalloonedPages >= cBalloonedPages)3570 if (pGVM->gmm.s.Stats.cBalloonedPages >= cBalloonedPages) 3565 3571 { 3566 3572 /* … … 3568 3574 */ 3569 3575 Assert(pGMM->cBalloonedPages >= cBalloonedPages); 3570 pGMM->cBalloonedPages -= cBalloonedPages;3571 pGVM->gmm.s. cBalloonedPages -= cBalloonedPages;3572 if (pGVM->gmm.s. cReqDeflatePages)3576 pGMM->cBalloonedPages -= cBalloonedPages; 3577 pGVM->gmm.s.Stats.cBalloonedPages -= cBalloonedPages; 3578 if (pGVM->gmm.s.Stats.cReqDeflatePages) 3573 3579 { 3574 3580 AssertFailed(); /* This is path is for later. */ 3575 3581 Log(("GMMR0BalloonedPages: -%#x - Global=%#llx / VM: Total=%#llx Req=%#llx\n", 3576 cBalloonedPages, pGMM->cBalloonedPages, pGVM->gmm.s. cBalloonedPages, pGVM->gmm.s.cReqDeflatePages));3582 cBalloonedPages, pGMM->cBalloonedPages, pGVM->gmm.s.Stats.cBalloonedPages, pGVM->gmm.s.Stats.cReqDeflatePages)); 3577 3583 3578 3584 /* 3579 3585 * Anything we need to do here now when the request has been completed? 3580 3586 */ 3581 pGVM->gmm.s. cReqDeflatePages = 0;3587 pGVM->gmm.s.Stats.cReqDeflatePages = 0; 3582 3588 } 3583 3589 else 3584 3590 Log(("GMMR0BalloonedPages: -%#x - Global=%#llx / VM: Total=%#llx (user)\n", 3585 cBalloonedPages, pGMM->cBalloonedPages, pGVM->gmm.s. cBalloonedPages));3591 cBalloonedPages, pGMM->cBalloonedPages, pGVM->gmm.s.Stats.cBalloonedPages)); 3586 3592 } 3587 3593 else 3588 3594 { 3589 Log(("GMMR0BalloonedPages: Total=%#llx cBalloonedPages=%#llx\n", pGVM->gmm.s. cBalloonedPages, cBalloonedPages));3595 Log(("GMMR0BalloonedPages: Total=%#llx cBalloonedPages=%#llx\n", pGVM->gmm.s.Stats.cBalloonedPages, cBalloonedPages)); 3590 3596 rc = VERR_GMM_ATTEMPT_TO_DEFLATE_TOO_MUCH; 3591 3597 } … … 3596 3602 { 3597 3603 /* Reset to an empty balloon. */ 3598 Assert(pGMM->cBalloonedPages >= pGVM->gmm.s. cBalloonedPages);3599 3600 pGMM->cBalloonedPages -= pGVM->gmm.s.cBalloonedPages;3601 pGVM->gmm.s. cBalloonedPages = 0;3604 Assert(pGMM->cBalloonedPages >= pGVM->gmm.s.Stats.cBalloonedPages); 3605 3606 pGMM->cBalloonedPages -= pGVM->gmm.s.Stats.cBalloonedPages; 3607 pGVM->gmm.s.Stats.cBalloonedPages = 0; 3602 3608 break; 3603 3609 } … … 3708 3714 if (GMM_CHECK_SANITY_UPON_ENTERING(pGMM)) 3709 3715 { 3710 pReq->cAllocPages = pGVM->gmm.s. Allocated.cBasePages;3711 pReq->cBalloonedPages = pGVM->gmm.s. cBalloonedPages;3712 pReq->cMaxPages = pGVM->gmm.s. Reserved.cBasePages;3716 pReq->cAllocPages = pGVM->gmm.s.Stats.Allocated.cBasePages; 3717 pReq->cBalloonedPages = pGVM->gmm.s.Stats.cBalloonedPages; 3718 pReq->cMaxPages = pGVM->gmm.s.Stats.Reserved.cBasePages; 3713 3719 pReq->cFreePages = pReq->cMaxPages - pReq->cAllocPages; 3714 3720 } … … 4291 4297 bool fInsert = RTAvlGCPtrInsert(&pGMM->pGlobalSharedModuleTree, &pGlobalModule->Core); 4292 4298 Assert(fInsert); NOREF(fInsert); 4299 pGMM->cShareableModules++; 4293 4300 4294 4301 Log(("GMMR0RegisterSharedModule: new global module %s\n", pszModuleName)); … … 4428 4435 /* Remove from the tree and free memory. */ 4429 4436 RTAvlGCPtrRemove(&pGMM->pGlobalSharedModuleTree, pRec->Core.Key); 4437 pGMM->cShareableModules--; 4430 4438 RTMemFree(pRec); 4431 4439 } … … 4496 4504 4497 4505 pPage->Shared.cRefs++; 4498 pGVM->gmm.s. cSharedPages++;4499 pGVM->gmm.s. Allocated.cBasePages++;4506 pGVM->gmm.s.Stats.cSharedPages++; 4507 pGVM->gmm.s.Stats.Allocated.cBasePages++; 4500 4508 } 4501 4509 … … 4522 4530 pGMM->cSharedPages++; 4523 4531 4524 pGVM->gmm.s. cSharedPages++;4525 pGVM->gmm.s. cPrivatePages--;4532 pGVM->gmm.s.Stats.cSharedPages++; 4533 pGVM->gmm.s.Stats.cPrivatePages--; 4526 4534 4527 4535 /* Modify the page structure. */ … … 4733 4741 GMM_GET_VALID_INSTANCE(pGMM, VERR_GMM_INSTANCE); 4734 4742 RTAvlGCPtrRemove(&pGMM->pGlobalSharedModuleTree, pRec->Core.Key); 4743 pGMM->cShareableModules--; 4735 4744 RTMemFree(pRec); 4736 4745 } … … 5065 5074 #endif /* VBOX_STRICT && HC_ARCH_BITS == 64 */ 5066 5075 5076 5077 /** 5078 * Retrieves the GMM statistics visible to the caller. 5079 * 5080 * @returns VBox status code. 5081 * 5082 * @param pStats Where to put the statistics. 5083 * @param pSession The current session. 5084 * @param pVM The VM to obtain statistics for. Optional. 5085 */ 5086 GMMR0DECL(int) GMMR0QueryStatistics(PGMMSTATS pStats, PSUPDRVSESSION pSession, PVM pVM) 5087 { 5088 LogFlow(("GVMMR0QueryStatistics: pStats=%p pSession=%p pVM=%p\n", pStats, pSession, pVM)); 5089 5090 /* 5091 * Validate input. 5092 */ 5093 AssertPtrReturn(pSession, VERR_INVALID_POINTER); 5094 AssertPtrReturn(pStats, VERR_INVALID_POINTER); 5095 pStats->cMaxPages = 0; /* (crash before taking the mutex...) */ 5096 5097 PGMM pGMM; 5098 GMM_GET_VALID_INSTANCE(pGMM, VERR_GMM_INSTANCE); 5099 5100 /* 5101 * Resolve the VM handle, if not NULL, and lock the GMM. 5102 */ 5103 int rc; 5104 PGVM pGVM; 5105 if (pVM) 5106 { 5107 rc = GVMMR0ByVM(pVM, &pGVM); 5108 if (RT_FAILURE(rc)) 5109 return rc; 5110 } 5111 else 5112 pGVM = NULL; 5113 5114 rc = gmmR0MutexAcquire(pGMM); 5115 if (RT_FAILURE(rc)) 5116 return rc; 5117 5118 /* 5119 * Copy out the GMM statistics. 5120 */ 5121 pStats->cMaxPages = pGMM->cMaxPages; 5122 pStats->cReservedPages = pGMM->cReservedPages; 5123 pStats->cOverCommittedPages = pGMM->cOverCommittedPages; 5124 pStats->cAllocatedPages = pGMM->cAllocatedPages; 5125 pStats->cSharedPages = pGMM->cSharedPages; 5126 pStats->cDuplicatePages = pGMM->cDuplicatePages; 5127 pStats->cLeftBehindSharedPages = pGMM->cLeftBehindSharedPages; 5128 pStats->cBalloonedPages = pGMM->cBalloonedPages; 5129 pStats->cChunks = pGMM->cChunks; 5130 pStats->cFreedChunks = pGMM->cFreedChunks; 5131 pStats->cShareableModules = pGMM->cShareableModules; 5132 RT_ZERO(pStats->au64Reserved); 5133 5134 /* 5135 * Copy out the VM statistics. 5136 */ 5137 if (pGVM) 5138 pStats->VMStats = pGVM->gmm.s.Stats; 5139 else 5140 RT_ZERO(pStats->VMStats); 5141 5142 gmmR0MutexRelease(pGMM); 5143 return rc; 5144 } 5145 5146 5147 /** 5148 * VMMR0 request wrapper for GMMR0QueryStatistics. 5149 * 5150 * @returns see GMMR0QueryStatistics. 5151 * @param pVM Pointer to the shared VM structure. Optional. 5152 * @param pReq The request packet. 5153 */ 5154 GMMR0DECL(int) GMMR0QueryStatisticsReq(PVM pVM, PGMMQUERYSTATISTICSSREQ pReq) 5155 { 5156 /* 5157 * Validate input and pass it on. 5158 */ 5159 AssertPtrReturn(pReq, VERR_INVALID_POINTER); 5160 AssertMsgReturn(pReq->Hdr.cbReq == sizeof(*pReq), ("%#x != %#x\n", pReq->Hdr.cbReq, sizeof(*pReq)), VERR_INVALID_PARAMETER); 5161 5162 return GMMR0QueryStatistics(&pReq->Stats, pReq->pSession, pVM); 5163 } 5164 5165 5166 /** 5167 * Resets the specified GMM statistics. 5168 * 5169 * @returns VBox status code. 5170 * 5171 * @param pStats Which statistics to reset, that is, non-zero fields 5172 * indicates which to reset. 5173 * @param pSession The current session. 5174 * @param pVM The VM to reset statistics for. Optional. 5175 */ 5176 GMMR0DECL(int) GMMR0ResetStatistics(PCGMMSTATS pStats, PSUPDRVSESSION pSession, PVM pVM) 5177 { 5178 /* Currently nothing we can reset at the moment. */ 5179 return VINF_SUCCESS; 5180 } 5181 5182 5183 /** 5184 * VMMR0 request wrapper for GMMR0ResetStatistics. 5185 * 5186 * @returns see GMMR0ResetStatistics. 5187 * @param pVM Pointer to the shared VM structure. Optional. 5188 * @param pReq The request packet. 5189 */ 5190 GMMR0DECL(int) GMMR0ResetStatisticsReq(PVM pVM, PGMMRESETSTATISTICSSREQ pReq) 5191 { 5192 /* 5193 * Validate input and pass it on. 5194 */ 5195 AssertPtrReturn(pReq, VERR_INVALID_POINTER); 5196 AssertMsgReturn(pReq->Hdr.cbReq == sizeof(*pReq), ("%#x != %#x\n", pReq->Hdr.cbReq, sizeof(*pReq)), VERR_INVALID_PARAMETER); 5197 5198 return GMMR0ResetStatistics(&pReq->Stats, pReq->pSession, pVM); 5199 } 5200 -
trunk/src/VBox/VMM/VMMR0/GMMR0Internal.h
r37248 r39917 21 21 #include <VBox/vmm/gmm.h> 22 22 #include <iprt/avl.h> 23 24 /**25 * The allocation sizes.26 */27 typedef struct GMMVMSIZES28 {29 /** The number of pages of base memory.30 * This is the sum of RAM, ROMs and handy pages. */31 uint64_t cBasePages;32 /** The number of pages for the shadow pool. (Can be squeezed for memory.) */33 uint32_t cShadowPages;34 /** The number of pages for fixed allocations like MMIO2 and the hyper heap. */35 uint32_t cFixedPages;36 } GMMVMSIZES;37 /** Pointer to a GMMVMSIZES. */38 typedef GMMVMSIZES *PGMMVMSIZES;39 23 40 24 … … 99 83 /** Free set for use in bound mode. */ 100 84 GMMCHUNKFREESET Private; 101 102 /** The reservations. */ 103 GMMVMSIZES Reserved; 104 /** The actual allocations. 105 * This includes both private and shared page allocations. */ 106 GMMVMSIZES Allocated; 107 108 /** The current number of private pages. */ 109 uint64_t cPrivatePages; 110 /** The current number of shared pages. */ 111 uint64_t cSharedPages; 112 /** The current over-commitment policy. */ 113 GMMOCPOLICY enmPolicy; 114 /** The VM priority for arbitrating VMs in low and out of memory situation. 115 * Like which VMs to start squeezing first. */ 116 GMMPRIORITY enmPriority; 85 /** The VM statistics. */ 86 GMMVMSTATS Stats; 87 /** Shared module tree (per-vm). */ 88 PAVLGCPTRNODECORE pSharedModuleTree; 117 89 /** Hints at the last chunk we allocated some memory from. */ 118 90 uint32_t idLastChunkHint; 119 120 /** The current number of ballooned pages. */121 uint64_t cBalloonedPages;122 /** The max number of pages that can be ballooned. */123 uint64_t cMaxBalloonedPages;124 /** The number of pages we've currently requested the guest to give us.125 * This is 0 if no pages currently requested. */126 uint64_t cReqBalloonedPages;127 /** The number of pages the guest has given us in response to the request.128 * This is not reset on request completed and may be used in later decisions. */129 uint64_t cReqActuallyBalloonedPages;130 /** The number of pages we've currently requested the guest to take back. */131 uint64_t cReqDeflatePages;132 133 /** Shared module tree (per-vm). */134 PAVLGCPTRNODECORE pSharedModuleTree;135 136 /** Whether ballooning is enabled or not. */137 bool fBallooningEnabled;138 139 /** Whether shared paging is enabled or not. */140 bool fSharedPagingEnabled;141 142 /** Whether the VM is allowed to allocate memory or not.143 * This is used when the reservation update request fails or when the VM has144 * been told to suspend/save/die in an out-of-memory case. */145 bool fMayAllocate;146 91 } GMMPERVM; 147 92 /** Pointer to the per-VM GMM data. */ -
trunk/src/VBox/VMM/VMMR0/VMMR0.cpp
r39303 r39917 1099 1099 #if defined(VBOX_STRICT) && HC_ARCH_BITS == 64 1100 1100 case VMMR0_DO_GMM_FIND_DUPLICATE_PAGE: 1101 {1102 1101 if (u64Arg) 1103 1102 return VERR_INVALID_PARAMETER; 1104 1103 return GMMR0FindDuplicatePageReq(pVM, (PGMMFINDDUPLICATEPAGEREQ)pReqHdr); 1105 } 1106 #endif 1104 #endif 1105 1106 case VMMR0_DO_GMM_QUERY_STATISTICS: 1107 if (u64Arg) 1108 return VERR_INVALID_PARAMETER; 1109 return GMMR0QueryStatisticsReq(pVM, (PGMMQUERYSTATISTICSSREQ)pReqHdr); 1110 1111 case VMMR0_DO_GMM_RESET_STATISTICS: 1112 if (u64Arg) 1113 return VERR_INVALID_PARAMETER; 1114 return GMMR0ResetStatisticsReq(pVM, (PGMMRESETSTATISTICSSREQ)pReqHdr); 1107 1115 1108 1116 /* -
trunk/src/VBox/VMM/VMMR3/STAM.cpp
r39078 r39917 218 218 219 219 /** 220 * The GMM mapping records. 221 */ 222 static const STAMR0SAMPLE g_aGMMStats[] = 223 { 224 { RT_UOFFSETOF(GMMSTATS, cMaxPages), STAMTYPE_U64, STAMUNIT_PAGES, "/GMM/cMaxPages", "The maximum number of pages GMM is allowed to allocate." }, 225 { RT_UOFFSETOF(GMMSTATS, cReservedPages), STAMTYPE_U64, STAMUNIT_PAGES, "/GMM/cReservedPages", "The number of pages that has been reserved." }, 226 { RT_UOFFSETOF(GMMSTATS, cOverCommittedPages), STAMTYPE_U64, STAMUNIT_PAGES, "/GMM/cOverCommittedPages", "The number of pages that we have over-committed in reservations." }, 227 { RT_UOFFSETOF(GMMSTATS, cAllocatedPages), STAMTYPE_U64, STAMUNIT_PAGES, "/GMM/cAllocatedPages", "The number of actually allocated (committed if you like) pages." }, 228 { RT_UOFFSETOF(GMMSTATS, cSharedPages), STAMTYPE_U64, STAMUNIT_PAGES, "/GMM/cSharedPages", "The number of pages that are shared. A subset of cAllocatedPages." }, 229 { RT_UOFFSETOF(GMMSTATS, cDuplicatePages), STAMTYPE_U64, STAMUNIT_PAGES, "/GMM/cDuplicatePages", "The number of pages that are actually shared between VMs." }, 230 { RT_UOFFSETOF(GMMSTATS, cLeftBehindSharedPages), STAMTYPE_U64, STAMUNIT_PAGES, "/GMM/cLeftBehindSharedPages", "The number of pages that are shared that has been left behind by VMs not doing proper cleanups." }, 231 { RT_UOFFSETOF(GMMSTATS, cBalloonedPages), STAMTYPE_U64, STAMUNIT_PAGES, "/GMM/cBalloonedPages", "The number of current ballooned pages." }, 232 { RT_UOFFSETOF(GMMSTATS, cChunks), STAMTYPE_U32, STAMUNIT_PAGES, "/GMM/cChunks", "The number of allocation chunks." }, 233 { RT_UOFFSETOF(GMMSTATS, cFreedChunks), STAMTYPE_U32, STAMUNIT_PAGES, "/GMM/cFreedChunks", "The number of freed chunks ever." }, 234 { RT_UOFFSETOF(GMMSTATS, cShareableModules), STAMTYPE_U32, STAMUNIT_PAGES, "/GMM/cShareableModules", "The number of shareable modules." }, 235 { RT_UOFFSETOF(GMMSTATS, VMStats.Reserved.cBasePages), STAMTYPE_U64, STAMUNIT_PAGES, "/GMM/VM/Reserved/cBasePages", "The amount of base memory (RAM, ROM, ++) reserved by the VM." }, 236 { RT_UOFFSETOF(GMMSTATS, VMStats.Reserved.cShadowPages), STAMTYPE_U32, STAMUNIT_PAGES, "/GMM/VM/Reserved/cShadowPages", "The amount of memory reserved for shadow/nested page tables." }, 237 { RT_UOFFSETOF(GMMSTATS, VMStats.Reserved.cFixedPages), STAMTYPE_U32, STAMUNIT_PAGES, "/GMM/VM/Reserved/cFixedPages", "The amount of memory reserved for fixed allocations like MMIO2 and the hyper heap." }, 238 { RT_UOFFSETOF(GMMSTATS, VMStats.Allocated.cBasePages), STAMTYPE_U64, STAMUNIT_PAGES, "/GMM/VM/Allocated/cBasePages", "The amount of base memory (RAM, ROM, ++) allocated by the VM." }, 239 { RT_UOFFSETOF(GMMSTATS, VMStats.Allocated.cShadowPages), STAMTYPE_U32, STAMUNIT_PAGES, "/GMM/VM/Allocated/cShadowPages", "The amount of memory allocated for shadow/nested page tables." }, 240 { RT_UOFFSETOF(GMMSTATS, VMStats.Allocated.cFixedPages), STAMTYPE_U32, STAMUNIT_PAGES, "/GMM/VM/Allocated/cFixedPages", "The amount of memory allocated for fixed allocations like MMIO2 and the hyper heap." }, 241 { RT_UOFFSETOF(GMMSTATS, VMStats.cPrivatePages), STAMTYPE_U64, STAMUNIT_PAGES, "/GMM/VM/cPrivatePages", "The current number of private pages." }, 242 { RT_UOFFSETOF(GMMSTATS, VMStats.cSharedPages), STAMTYPE_U64, STAMUNIT_PAGES, "/GMM/VM/cSharedPages", "The current number of shared pages." }, 243 { RT_UOFFSETOF(GMMSTATS, VMStats.cBalloonedPages), STAMTYPE_U64, STAMUNIT_PAGES, "/GMM/VM/cBalloonedPages", "The current number of ballooned pages." }, 244 { RT_UOFFSETOF(GMMSTATS, VMStats.cMaxBalloonedPages), STAMTYPE_U64, STAMUNIT_PAGES, "/GMM/VM/cMaxBalloonedPages", "The max number of pages that can be ballooned." }, 245 { RT_UOFFSETOF(GMMSTATS, VMStats.cReqBalloonedPages), STAMTYPE_U64, STAMUNIT_PAGES, "/GMM/VM/cReqBalloonedPages", "The number of pages we've currently requested the guest to give us." }, 246 { RT_UOFFSETOF(GMMSTATS, VMStats.cReqActuallyBalloonedPages),STAMTYPE_U64, STAMUNIT_PAGES, "/GMM/VM/cReqActuallyBalloonedPages","The number of pages the guest has given us in response to the request." }, 247 { RT_UOFFSETOF(GMMSTATS, VMStats.cReqDeflatePages), STAMTYPE_U64, STAMUNIT_PAGES, "/GMM/VM/cReqDeflatePages", "The number of pages we've currently requested the guest to take back." }, 248 { RT_UOFFSETOF(GMMSTATS, VMStats.enmPolicy), STAMTYPE_U32, STAMUNIT_NONE, "/GMM/VM/enmPolicy", "The current over-commit policy." }, 249 { RT_UOFFSETOF(GMMSTATS, VMStats.enmPriority), STAMTYPE_U32, STAMUNIT_NONE, "/GMM/VM/enmPriority", "The VM priority for arbitrating VMs in low and out of memory situation." }, 250 { RT_UOFFSETOF(GMMSTATS, VMStats.fBallooningEnabled), STAMTYPE_BOOL, STAMUNIT_NONE, "/GMM/VM/fBallooningEnabled", "Whether ballooning is enabled or not." }, 251 { RT_UOFFSETOF(GMMSTATS, VMStats.fBallooningEnabled), STAMTYPE_BOOL, STAMUNIT_NONE, "/GMM/VM/fSharedPagingEnabled", "Whether shared paging is enabled or not." }, 252 { RT_UOFFSETOF(GMMSTATS, VMStats.fBallooningEnabled), STAMTYPE_BOOL, STAMUNIT_NONE, "/GMM/VM/fMayAllocate", "Whether the VM is allowed to allocate memory or not." }, 253 }; 254 255 256 /** 220 257 * Initializes the STAM. 221 258 * … … 636 673 case STAMTYPE_X8: 637 674 case STAMTYPE_X8_RESET: 675 case STAMTYPE_BOOL: 676 case STAMTYPE_BOOL_RESET: 638 677 case STAMTYPE_CALLBACK: 639 678 break; … … 765 804 /* ring-0 */ 766 805 GVMMRESETSTATISTICSSREQ GVMMReq; 767 //GMMRESETSTATISTICSSREQGMMReq;806 GMMRESETSTATISTICSSREQ GMMReq; 768 807 bool fGVMMMatched = !pszPat || !*pszPat; 769 //bool fGMMMatched= fGVMMMatched;808 bool fGMMMatched = fGVMMMatched; 770 809 if (fGVMMMatched) 810 { 771 811 memset(&GVMMReq.Stats, 0xff, sizeof(GVMMReq.Stats)); 812 memset(&GMMReq.Stats, 0xff, sizeof(GMMReq.Stats)); 813 } 772 814 else 773 815 { … … 779 821 780 822 /* GVMM */ 781 memset(&GVMMReq.Stats, 0, sizeof(GVMMReq.Stats));823 RT_ZERO(GVMMReq.Stats); 782 824 for (unsigned i = 0; i < RT_ELEMENTS(g_aGVMMStats); i++) 783 825 if (stamR3MultiMatch(papszExpressions, cExpressions, NULL, g_aGVMMStats[i].pszName)) … … 792 834 793 835 /* GMM */ 794 // memset(&GMMReq.Stats, 0, sizeof(GMMReq.Stats));795 //for (unsigned i = 0; i < RT_ELEMENTS(g_aGMMStats); i++)796 //if (stamR3MultiMatch(papszExpressions, cExpressions, NULL, g_aGMMStats[i].pszName))797 //{798 //*((uint8_t *)&GMMReq.Stats + g_aGMMStats[i].offVar) = 0xff;799 //fGMMMatched = true;800 //}836 RT_ZERO(GMMReq.Stats); 837 for (unsigned i = 0; i < RT_ELEMENTS(g_aGMMStats); i++) 838 if (stamR3MultiMatch(papszExpressions, cExpressions, NULL, g_aGMMStats[i].pszName)) 839 { 840 *((uint8_t *)&GMMReq.Stats + g_aGMMStats[i].offVar) = 0xff; 841 fGMMMatched = true; 842 } 801 843 802 844 RTMemTmpFree(papszExpressions); … … 805 847 806 848 STAM_LOCK_WR(pUVM); 849 807 850 if (fGVMMMatched) 808 851 { 809 852 PVM pVM = pUVM->pVM; 810 GVMMReq.Hdr.cbReq = sizeof(GVMMReq);853 GVMMReq.Hdr.cbReq = sizeof(GVMMReq); 811 854 GVMMReq.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC; 812 GVMMReq.pSession = pVM->pSession;855 GVMMReq.pSession = pVM->pSession; 813 856 rc = SUPR3CallVMMR0Ex(pVM->pVMR0, NIL_VMCPUID, VMMR0_DO_GVMM_RESET_STATISTICS, 0, &GVMMReq.Hdr); 814 857 } 815 858 816 //if (fGMMMatched)817 //{818 //PVM pVM = pUVM->pVM;819 // GMMReq.Hdr.cbReq = sizeof(Req);820 //GMMReq.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;821 // GMMReq.pSession= pVM->pSession;822 // rc = SUPR3CallVMMR0Ex(pVM->pVMR0, VMMR0_DO_GMM_RESET_STATISTICS, 0, &Req.Hdr);823 //}859 if (fGMMMatched) 860 { 861 PVM pVM = pUVM->pVM; 862 GMMReq.Hdr.cbReq = sizeof(GMMReq); 863 GMMReq.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC; 864 GMMReq.pSession = pVM->pSession; 865 rc = SUPR3CallVMMR0Ex(pVM->pVMR0, NIL_VMCPUID, VMMR0_DO_GMM_RESET_STATISTICS, 0, &GMMReq.Hdr); 866 } 824 867 825 868 /* and the reset */ … … 829 872 return rc; 830 873 } 874 831 875 832 876 /** … … 898 942 case STAMTYPE_X64_RESET: 899 943 ASMAtomicXchgU64(pDesc->u.pu64, 0); 944 break; 945 946 case STAMTYPE_BOOL_RESET: 947 ASMAtomicXchgBool(pDesc->u.pf, false); 900 948 break; 901 949 … … 910 958 case STAMTYPE_X64: 911 959 case STAMTYPE_RATIO_U32: 960 case STAMTYPE_BOOL: 912 961 break; 913 962 … … 1092 1141 return VINF_SUCCESS; 1093 1142 stamR3SnapshotPrintf(pThis, "<X64 val=\"%#llx\"", *pDesc->u.pu64); 1143 break; 1144 1145 case STAMTYPE_BOOL: 1146 case STAMTYPE_BOOL_RESET: 1147 if (pDesc->enmVisibility == STAMVISIBILITY_USED && *pDesc->u.pf == false) 1148 return VINF_SUCCESS; 1149 stamR3SnapshotPrintf(pThis, "<BOOL val=\"%RTbool\"", *pDesc->u.pf); 1094 1150 break; 1095 1151 … … 1512 1568 break; 1513 1569 1570 case STAMTYPE_BOOL: 1571 case STAMTYPE_BOOL_RESET: 1572 if (pDesc->enmVisibility == STAMVISIBILITY_USED && *pDesc->u.pf == false) 1573 return VINF_SUCCESS; 1574 pArgs->pfnPrintf(pArgs, "%-32s %s %s\n", pDesc->pszName, *pDesc->u.pf ? "true " : "false ", STAMR3GetUnit(pDesc->enmUnit)); 1575 break; 1576 1514 1577 default: 1515 1578 AssertMsgFailed(("enmType=%d\n", pDesc->enmType)); … … 1779 1842 g_aGVMMStats[i].enmUnit, g_aGVMMStats[i].pszDesc); 1780 1843 pUVM->stam.s.cRegisteredHostCpus = 0; 1844 1845 /* GMM */ 1846 for (unsigned i = 0; i < RT_ELEMENTS(g_aGMMStats); i++) 1847 stamR3RegisterU(pUVM, (uint8_t *)&pUVM->stam.s.GMMStats + g_aGMMStats[i].offVar, NULL, NULL, 1848 g_aGMMStats[i].enmType, STAMVISIBILITY_ALWAYS, g_aGMMStats[i].pszName, 1849 g_aGMMStats[i].enmUnit, g_aGMMStats[i].pszDesc); 1781 1850 } 1782 1851 … … 1809 1878 return; 1810 1879 1811 /* GVMM */ 1880 /* 1881 * GVMM 1882 */ 1812 1883 bool fUpdate = false; 1813 1884 for (unsigned i = 0; i < RT_ELEMENTS(g_aGVMMStats); i++) … … 1870 1941 } 1871 1942 } 1943 1944 /* 1945 * GMM 1946 */ 1947 fUpdate = false; 1948 for (unsigned i = 0; i < RT_ELEMENTS(g_aGMMStats); i++) 1949 if (stamR3MultiMatch(papszExpressions, cExpressions, NULL, g_aGMMStats[i].pszName)) 1950 { 1951 fUpdate = true; 1952 break; 1953 } 1954 if (fUpdate) 1955 { 1956 GMMQUERYSTATISTICSSREQ Req; 1957 Req.Hdr.cbReq = sizeof(Req); 1958 Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC; 1959 Req.pSession = pVM->pSession; 1960 int rc = SUPR3CallVMMR0Ex(pVM->pVMR0, NIL_VMCPUID, VMMR0_DO_GMM_QUERY_STATISTICS, 0, &Req.Hdr); 1961 if (RT_SUCCESS(rc)) 1962 pUVM->stam.s.GMMStats = Req.Stats; 1963 } 1872 1964 } 1873 1965 -
trunk/src/VBox/VMM/include/STAMInternal.h
r35346 r39917 23 23 #include <VBox/vmm/stam.h> 24 24 #include <VBox/vmm/gvmm.h> 25 #include <VBox/vmm/gmm.h> 25 26 #include <iprt/semaphore.h> 26 27 … … 69 70 /** Simple void pointer. */ 70 71 void *pv; 72 /** Boolean. */ 73 bool *pf; 71 74 /** */ 72 75 struct STAMDESCSAMPLEDATACALLBACKS … … 105 108 /** The number of registered host CPU leaves. */ 106 109 uint32_t cRegisteredHostCpus; 110 111 /** The copy of the GMM statistics. */ 112 GMMSTATS GMMStats; 107 113 } STAMUSERPERVM; 108 114 /** Pointer to the STAM data kept in the UVM. */
Note:
See TracChangeset
for help on using the changeset viewer.