Changeset 72300 in vbox for trunk/src/VBox/VMM
- Timestamp:
- May 23, 2018 3:13:06 PM (7 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 7 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/NEMR0Native-win.cpp
r72291 r72300 90 90 */ 91 91 #include "../VMMAll/NEMAllNativeTemplate-win.cpp.h" 92 93 /** 94 * Worker for NEMR0InitVM that allocates a hypercall page. 95 * 96 * @returns VBox status code. 97 * @param pHypercallData The hypercall data page to initialize. 98 */ 99 static int nemR0InitHypercallData(PNEMR0HYPERCALLDATA pHypercallData) 100 { 101 int rc = RTR0MemObjAllocPage(&pHypercallData->hMemObj, PAGE_SIZE, false /*fExecutable*/); 102 if (RT_SUCCESS(rc)) 103 { 104 pHypercallData->HCPhysPage = RTR0MemObjGetPagePhysAddr(pHypercallData->hMemObj, 0 /*iPage*/); 105 AssertStmt(pHypercallData->HCPhysPage != NIL_RTHCPHYS, rc = VERR_INTERNAL_ERROR_3); 106 pHypercallData->pbPage = (uint8_t *)RTR0MemObjAddress(pHypercallData->hMemObj); 107 AssertStmt(pHypercallData->pbPage, rc = VERR_INTERNAL_ERROR_3); 108 if (RT_SUCCESS(rc)) 109 return VINF_SUCCESS; 110 111 /* bail out */ 112 RTR0MemObjFree(pHypercallData->hMemObj, true /*fFreeMappings*/); 113 } 114 pHypercallData->hMemObj = NIL_RTR0MEMOBJ; 115 pHypercallData->HCPhysPage = NIL_RTHCPHYS; 116 pHypercallData->pbPage = NULL; 117 return rc; 118 } 119 120 /** 121 * Worker for NEMR0CleanupVM and NEMR0InitVM that cleans up a hypercall page. 122 * 123 * @param pHypercallData The hypercall data page to uninitialize. 124 */ 125 static void nemR0DeleteHypercallData(PNEMR0HYPERCALLDATA pHypercallData) 126 { 127 /* Check pbPage here since it's NULL, whereas the hMemObj can be either 128 NIL_RTR0MEMOBJ or 0 (they aren't necessarily the same). */ 129 if (pHypercallData->pbPage != NULL) 130 { 131 RTR0MemObjFree(pHypercallData->hMemObj, true /*fFreeMappings*/); 132 pHypercallData->pbPage = NULL; 133 } 134 pHypercallData->hMemObj = NIL_RTR0MEMOBJ; 135 pHypercallData->HCPhysPage = NIL_RTHCPHYS; 136 } 92 137 93 138 … … 127 172 { 128 173 /* 129 * Allocate a page for each VCPU to place hypercall data on. 174 * Allocate a page for non-EMT threads to use for hypercalls (update 175 * statistics and such) and a critical section protecting it. 130 176 */ 131 for (VMCPUID i = 0; i < pGVM->cCpus; i++) 177 rc = RTCritSectInit(&pGVM->nem.s.HypercallDataCritSect); 178 if (RT_SUCCESS(rc)) 132 179 { 133 PGVMCPU pGVCpu = &pGVM->aCpus[i]; 134 rc = RTR0MemObjAllocPage(&pGVCpu->nem.s.hHypercallDataMemObj, PAGE_SIZE, false /*fExecutable*/); 180 rc = nemR0InitHypercallData(&pGVM->nem.s.HypercallData); 135 181 if (RT_SUCCESS(rc)) 136 182 { 137 pGVCpu->nem.s.HCPhysHypercallData = RTR0MemObjGetPagePhysAddr(pGVCpu->nem.s.hHypercallDataMemObj, 0 /*iPage*/); 138 pGVCpu->nem.s.pbHypercallData = (uint8_t *)RTR0MemObjAddress(pGVCpu->nem.s.hHypercallDataMemObj); 139 AssertStmt(pGVCpu->nem.s.HCPhysHypercallData != NIL_RTHCPHYS, rc = VERR_INTERNAL_ERROR_3); 140 AssertStmt(pGVCpu->nem.s.pbHypercallData, rc = VERR_INTERNAL_ERROR_3); 183 /* 184 * Allocate a page for each VCPU to place hypercall data on. 185 */ 186 for (VMCPUID i = 0; i < pGVM->cCpus; i++) 187 { 188 rc = nemR0InitHypercallData(&pGVM->aCpus[i].nem.s.HypercallData); 189 if (RT_FAILURE(rc)) 190 { 191 while (i-- > 0) 192 nemR0DeleteHypercallData(&pGVM->aCpus[i].nem.s.HypercallData); 193 break; 194 } 195 } 196 if (RT_SUCCESS(rc)) 197 { 198 /* 199 * So far, so good. 200 */ 201 return rc; 202 } 203 204 /* 205 * Bail out. 206 */ 207 nemR0DeleteHypercallData(&pGVM->nem.s.HypercallData); 141 208 } 142 else 143 pGVCpu->nem.s.hHypercallDataMemObj = NIL_RTR0MEMOBJ; 144 if (RT_FAILURE(rc)) 145 { 146 /* bail. */ 147 do 148 { 149 RTR0MemObjFree(pGVCpu->nem.s.hHypercallDataMemObj, true /*fFreeMappings*/); 150 pGVCpu->nem.s.hHypercallDataMemObj = NIL_RTR0MEMOBJ; 151 pGVCpu->nem.s.HCPhysHypercallData = NIL_RTHCPHYS; 152 pGVCpu->nem.s.pbHypercallData = NULL; 153 } while (i-- > 0); 154 return rc; 155 } 209 RTCritSectDelete(&pGVM->nem.s.HypercallDataCritSect); 156 210 } 157 /* 158 * So far, so good. 159 */ 160 return rc; 161 } 162 163 rc = VERR_NEM_MISSING_KERNEL_API; 164 } 165 166 RT_NOREF(pGVM, pVM); 211 } 212 else 213 rc = VERR_NEM_MISSING_KERNEL_API; 214 } 215 216 RT_NOREF(pVM); 167 217 return rc; 168 218 } … … 309 359 VMCPUID i = pGVM->cCpus; 310 360 while (i-- > 0) 311 { 312 PGVMCPU pGVCpu = &pGVM->aCpus[i]; 313 if (pGVCpu->nem.s.pbHypercallData) 314 { 315 pGVCpu->nem.s.pbHypercallData = NULL; 316 int rc = RTR0MemObjFree(pGVCpu->nem.s.hHypercallDataMemObj, true /*fFreeMappings*/); 317 AssertRC(rc); 318 } 319 pGVCpu->nem.s.hHypercallDataMemObj = NIL_RTR0MEMOBJ; 320 pGVCpu->nem.s.HCPhysHypercallData = NIL_RTHCPHYS; 321 } 361 nemR0DeleteHypercallData(&pGVM->aCpus[i].nem.s.HypercallData); 362 363 /* The non-EMT one too. */ 364 if (RTCritSectIsInitialized(&pGVM->nem.s.HypercallDataCritSect)) 365 RTCritSectDelete(&pGVM->nem.s.HypercallDataCritSect); 366 nemR0DeleteHypercallData(&pGVM->nem.s.HypercallData); 322 367 } 323 368 … … 372 417 * Ring-3 is not allowed to fill in the host physical addresses of the call. 373 418 */ 374 HV_INPUT_MAP_GPA_PAGES *pMapPages = (HV_INPUT_MAP_GPA_PAGES *)pGVCpu->nem.s. pbHypercallData;419 HV_INPUT_MAP_GPA_PAGES *pMapPages = (HV_INPUT_MAP_GPA_PAGES *)pGVCpu->nem.s.HypercallData.pbPage; 375 420 AssertPtrReturn(pMapPages, VERR_INTERNAL_ERROR_3); 376 421 pMapPages->TargetPartitionId = pGVM->nem.s.idHvPartition; … … 387 432 388 433 uint64_t uResult = g_pfnHvlInvokeHypercall(HvCallMapGpaPages | ((uint64_t)cPages << 32), 389 pGVCpu->nem.s.H CPhysHypercallData, 0);434 pGVCpu->nem.s.HypercallData.HCPhysPage, 0); 390 435 Log6(("NEMR0MapPages: %RGp/%RGp L %u prot %#x -> %#RX64\n", 391 436 GCPhysDst, GCPhysSrc - cPages * X86_PAGE_SIZE, cPages, fFlags, uResult)); … … 454 499 * Compose and make the hypercall. 455 500 */ 456 HV_INPUT_UNMAP_GPA_PAGES *pUnmapPages = (HV_INPUT_UNMAP_GPA_PAGES *)pGVCpu->nem.s. pbHypercallData;501 HV_INPUT_UNMAP_GPA_PAGES *pUnmapPages = (HV_INPUT_UNMAP_GPA_PAGES *)pGVCpu->nem.s.HypercallData.pbPage; 457 502 AssertPtrReturn(pUnmapPages, VERR_INTERNAL_ERROR_3); 458 503 pUnmapPages->TargetPartitionId = pGVM->nem.s.idHvPartition; … … 461 506 462 507 uint64_t uResult = g_pfnHvlInvokeHypercall(HvCallUnmapGpaPages | ((uint64_t)cPages << 32), 463 pGVCpu->nem.s.H CPhysHypercallData, 0);508 pGVCpu->nem.s.HypercallData.HCPhysPage, 0); 464 509 Log6(("NEMR0UnmapPages: %RGp L %u -> %#RX64\n", GCPhys, cPages, uResult)); 465 510 if (uResult == ((uint64_t)cPages << 32)) … … 467 512 #if 1 /* Do we need to do this? Hopefully not... */ 468 513 uint64_t volatile uR = g_pfnHvlInvokeHypercall(HvCallUncommitGpaPages | ((uint64_t)cPages << 32), 469 pGVCpu->nem.s.H CPhysHypercallData, 0);514 pGVCpu->nem.s.HypercallData.HCPhysPage, 0); 470 515 AssertMsg(uR == ((uint64_t)cPages << 32), ("uR=%#RX64\n", uR)); NOREF(uR); 471 516 #endif … … 527 572 { 528 573 PVMCPU pVCpu = &pGVM->pVM->aCpus[pGVCpu->idCpu]; 529 HV_INPUT_SET_VP_REGISTERS *pInput = (HV_INPUT_SET_VP_REGISTERS *)pGVCpu->nem.s. pbHypercallData;574 HV_INPUT_SET_VP_REGISTERS *pInput = (HV_INPUT_SET_VP_REGISTERS *)pGVCpu->nem.s.HypercallData.pbPage; 530 575 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3); 531 576 … … 1096 1141 * Set the registers. 1097 1142 */ 1098 Assert((uintptr_t)&pInput->Elements[iReg] - (uintptr_t)pGVCpu->nem.s. pbHypercallData< PAGE_SIZE); /* max is 127 */1143 Assert((uintptr_t)&pInput->Elements[iReg] - (uintptr_t)pGVCpu->nem.s.HypercallData.pbPage < PAGE_SIZE); /* max is 127 */ 1099 1144 1100 1145 /* … … 1102 1147 */ 1103 1148 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallSetVpRegisters, iReg), 1104 pGVCpu->nem.s.H CPhysHypercallData, 0 /*GCPhysOutput*/);1149 pGVCpu->nem.s.HypercallData.HCPhysPage, 0 /*GCPhysOutput*/); 1105 1150 AssertLogRelMsgReturn(uResult == HV_MAKE_CALL_REP_RET(iReg), 1106 1151 ("uResult=%RX64 iRegs=%#x\n", uResult, iReg), … … 1156 1201 NEM_TMPL_STATIC int nemR0WinImportState(PGVM pGVM, PGVMCPU pGVCpu, PCPUMCTX pCtx, uint64_t fWhat) 1157 1202 { 1158 HV_INPUT_GET_VP_REGISTERS *pInput = (HV_INPUT_GET_VP_REGISTERS *)pGVCpu->nem.s. pbHypercallData;1203 HV_INPUT_GET_VP_REGISTERS *pInput = (HV_INPUT_GET_VP_REGISTERS *)pGVCpu->nem.s.HypercallData.pbPage; 1159 1204 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3); 1160 1205 … … 1339 1384 1340 1385 HV_REGISTER_VALUE *paValues = (HV_REGISTER_VALUE *)((uint8_t *)pInput + cbInput); 1341 Assert((uintptr_t)&paValues[cRegs] - (uintptr_t)pGVCpu->nem.s. pbHypercallData< PAGE_SIZE); /* (max is around 168 registers) */1386 Assert((uintptr_t)&paValues[cRegs] - (uintptr_t)pGVCpu->nem.s.HypercallData.pbPage < PAGE_SIZE); /* (max is around 168 registers) */ 1342 1387 RT_BZERO(paValues, cRegs * sizeof(paValues[0])); 1343 1388 … … 1346 1391 */ 1347 1392 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallGetVpRegisters, cRegs), 1348 pGVCpu->nem.s.H CPhysHypercallData,1349 pGVCpu->nem.s.H CPhysHypercallData+ cbInput);1393 pGVCpu->nem.s.HypercallData.HCPhysPage, 1394 pGVCpu->nem.s.HypercallData.HCPhysPage + cbInput); 1350 1395 AssertLogRelMsgReturn(uResult == HV_MAKE_CALL_REP_RET(cRegs), 1351 1396 ("uResult=%RX64 cRegs=%#x\n", uResult, cRegs), … … 1893 1938 } 1894 1939 1940 1941 /** 1942 * Updates statistics in the VM structure. 1943 * 1944 * @returns VBox status code. 1945 * @param pGVM The ring-0 VM handle. 1946 * @param pVM The cross context VM handle. 1947 * @param idCpu The calling EMT, or NIL. Necessary for getting the hypercall 1948 * page and arguments. 1949 */ 1950 VMMR0_INT_DECL(int) NEMR0UpdateStatistics(PGVM pGVM, PVM pVM, VMCPUID idCpu) 1951 { 1952 /* 1953 * Validate the call. 1954 */ 1955 int rc; 1956 if (idCpu == NIL_VMCPUID) 1957 rc = GVMMR0ValidateGVMandVM(pGVM, pVM); 1958 else 1959 rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu); 1960 if (RT_SUCCESS(rc)) 1961 { 1962 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API); 1963 1964 PNEMR0HYPERCALLDATA pHypercallData = idCpu != NIL_VMCPUID 1965 ? &pGVM->aCpus[idCpu].nem.s.HypercallData 1966 : &pGVM->nem.s.HypercallData; 1967 if ( RT_VALID_PTR(pHypercallData->pbPage) 1968 && pHypercallData->HCPhysPage != NIL_RTHCPHYS) 1969 { 1970 if (idCpu == NIL_VMCPUID) 1971 rc = RTCritSectEnter(&pGVM->nem.s.HypercallDataCritSect); 1972 if (RT_SUCCESS(rc)) 1973 { 1974 /* 1975 * Query the memory statistics for the partition. 1976 */ 1977 HV_INPUT_GET_MEMORY_BALANCE *pInput = (HV_INPUT_GET_MEMORY_BALANCE *)pHypercallData->pbPage; 1978 pInput->TargetPartitionId = pGVM->nem.s.idHvPartition; 1979 pInput->ProximityDomainInfo.Flags.ProximityPreferred = 0; 1980 pInput->ProximityDomainInfo.Flags.ProxyimityInfoValid = 0; 1981 pInput->ProximityDomainInfo.Flags.Reserved = 0; 1982 pInput->ProximityDomainInfo.Id = 0; 1983 1984 HV_OUTPUT_GET_MEMORY_BALANCE *pOutput = (HV_OUTPUT_GET_MEMORY_BALANCE *)(pInput + 1); 1985 RT_ZERO(*pOutput); 1986 1987 uint64_t uResult = g_pfnHvlInvokeHypercall(HvCallGetMemoryBalance, 1988 pHypercallData->HCPhysPage, 1989 pHypercallData->HCPhysPage + sizeof(*pInput)); 1990 if (uResult == HV_STATUS_SUCCESS) 1991 { 1992 pVM->nem.s.R0Stats.cPagesAvailable = pOutput->PagesAvailable; 1993 pVM->nem.s.R0Stats.cPagesInUse = pOutput->PagesInUse; 1994 rc = VINF_SUCCESS; 1995 } 1996 else 1997 { 1998 LogRel(("HvCallGetMemoryBalance -> %#RX64 (%#RX64 %#RX64)!!\n", 1999 uResult, pOutput->PagesAvailable, pOutput->PagesInUse)); 2000 rc = VINF_NEM_IPE_0; 2001 } 2002 2003 if (idCpu == NIL_VMCPUID) 2004 RTCritSectLeave(&pGVM->nem.s.HypercallDataCritSect); 2005 } 2006 } 2007 else 2008 rc = VERR_WRONG_ORDER; 2009 } 2010 return rc; 2011 } 2012 -
trunk/src/VBox/VMM/VMMR0/VMMR0.cpp
r71223 r72300 2062 2062 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING); 2063 2063 break; 2064 2065 case VMMR0_DO_NEM_UPDATE_STATISTICS: 2066 if (u64Arg || pReqHdr) 2067 return VERR_INVALID_PARAMETER; 2068 rc = NEMR0UpdateStatistics(pGVM, pVM, idCpu); 2069 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING); 2070 break; 2064 2071 # endif 2065 2072 #endif -
trunk/src/VBox/VMM/VMMR3/EM.cpp
r72208 r72300 2108 2108 /** @todo this really isn't nice, should properly handle this */ 2109 2109 rc2 = TRPMR3InjectEvent(pVM, pVCpu, TRPM_HARDWARE_INT); 2110 Log(("EM: TRPMR3InjectEvent -> %d\n", rc2)); 2110 2111 if (pVM->em.s.fIemExecutesAll && ( rc2 == VINF_EM_RESCHEDULE_REM 2111 2112 || rc2 == VINF_EM_RESCHEDULE_HM -
trunk/src/VBox/VMM/VMMR3/NEMR3Native-win.cpp
r72290 r72300 725 725 uint32_t iMin, iMax; } s_aUnknowns[] = 726 726 { 727 { 0x000 3, 0x000f },727 { 0x0004, 0x000f }, 728 728 { 0x1003, 0x100f }, 729 729 { 0x2000, 0x200f }, … … 1008 1008 1009 1009 /** 1010 * Wrapper for different WHvSetPartitionProperty signatures.1011 */1012 DECLINLINE(HRESULT) WHvSetPartitionPropertyWrapper(WHV_PARTITION_HANDLE hPartition, WHV_PARTITION_PROPERTY_CODE enmProp,1013 WHV_PARTITION_PROPERTY *pInput, uint32_t cbInput)1014 {1015 return g_pfnWHvSetPartitionProperty(hPartition, enmProp, pInput, cbInput - RT_UOFFSETOF(WHV_PARTITION_PROPERTY, ExtendedVmExits));1016 }1017 1018 1019 /**1020 1010 * Creates and sets up a Hyper-V (exo) partition. 1021 1011 * … … 1054 1044 RT_ZERO(Property); 1055 1045 Property.ProcessorCount = pVM->cCpus; 1056 hrc = WHvSetPartitionProperty Wrapper(hPartition, WHvPartitionPropertyCodeProcessorCount, &Property, sizeof(Property));1046 hrc = WHvSetPartitionProperty(hPartition, WHvPartitionPropertyCodeProcessorCount, &Property, sizeof(Property)); 1057 1047 if (SUCCEEDED(hrc)) 1058 1048 { … … 1063 1053 Property.ExtendedVmExits.ExceptionExit = pVM->nem.s.fExtendedXcptExit; 1064 1054 #endif 1065 hrc = WHvSetPartitionProperty Wrapper(hPartition, WHvPartitionPropertyCodeExtendedVmExits, &Property, sizeof(Property));1055 hrc = WHvSetPartitionProperty(hPartition, WHvPartitionPropertyCodeExtendedVmExits, &Property, sizeof(Property)); 1066 1056 if (SUCCEEDED(hrc)) 1067 1057 { … … 1163 1153 STAMR3RegisterF(pVM, &pNemCpu->StatBreakOnStatus, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of status code breaks", "/NEM/CPU%u/BreakOnStatus", iCpu); 1164 1154 } 1155 1156 PUVM pUVM = pVM->pUVM; 1157 STAMR3RegisterRefresh(pUVM, &pVM->nem.s.R0Stats.cPagesAvailable, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, 1158 STAMUNIT_PAGES, STAM_REFRESH_GRP_NEM, "Free pages available to the hypervisor", 1159 "/NEM/R0Stats/cPagesAvailable"); 1160 STAMR3RegisterRefresh(pUVM, &pVM->nem.s.R0Stats.cPagesInUse, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, 1161 STAMUNIT_PAGES, STAM_REFRESH_GRP_NEM, "Pages in use by hypervisor", 1162 "/NEM/R0Stats/cPagesInUse"); 1165 1163 } 1166 1164 } … … 1212 1210 Property.ProcessorVendor = pVM->nem.s.enmCpuVendor == CPUMCPUVENDOR_AMD ? WHvProcessorVendorAmd 1213 1211 : WHvProcessorVendorIntel; 1214 hrc = WHvSetPartitionProperty Wrapper(hPartition, WHvPartitionPropertyCodeProcessorVendor, &Property, sizeof(Property));1212 hrc = WHvSetPartitionProperty(hPartition, WHvPartitionPropertyCodeProcessorVendor, &Property, sizeof(Property)); 1215 1213 if (FAILED(hrc)) 1216 1214 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS, … … 1222 1220 RT_ZERO(Property); 1223 1221 Property.ProcessorClFlushSize = pVM->nem.s.cCacheLineFlushShift; 1224 hrc = WHvSetPartitionProperty Wrapper(hPartition, WHvPartitionPropertyCodeProcessorClFlushSize, &Property, sizeof(Property));1222 hrc = WHvSetPartitionProperty(hPartition, WHvPartitionPropertyCodeProcessorClFlushSize, &Property, sizeof(Property)); 1225 1223 if (FAILED(hrc)) 1226 1224 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS, … … 1236 1234 RT_ZERO(Property); 1237 1235 Property.ProcessorFeatures.AsUINT64 = pVM->nem.s.uCpuFeatures.u64; 1238 hrc = WHvSetPartitionProperty Wrapper(hPartition, WHvPartitionPropertyCodeProcessorFeatures, &Property, sizeof(Property));1236 hrc = WHvSetPartitionProperty(hPartition, WHvPartitionPropertyCodeProcessorFeatures, &Property, sizeof(Property)); 1239 1237 if (FAILED(hrc)) 1240 1238 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS, … … 1331 1329 { 1332 1330 LogRel(("NEM: Successfully set up partition (device handle %p, partition ID %#llx)\n", hPartitionDevice, idHvPartition)); 1331 1332 #if 1 1333 VMMR3CallR0Emt(pVM, &pVM->aCpus[0], VMMR0_DO_NEM_UPDATE_STATISTICS, 0, NULL); 1334 LogRel(("NEM: Memory balance: %#RX64 out of %#RX64 pages in use\n", 1335 pVM->nem.s.R0Stats.cPagesInUse, pVM->nem.s.R0Stats.cPagesAvailable)); 1336 #endif 1337 1338 /* 1339 * Register statistics on shared pages. 1340 */ 1341 /** @todo HvCallMapStatsPage */ 1333 1342 return VINF_SUCCESS; 1334 1343 } -
trunk/src/VBox/VMM/VMMR3/STAM.cpp
r69111 r72300 144 144 static void stamR3LookupDestroyTree(PSTAMLOOKUP pRoot); 145 145 #endif 146 static int stamR3RegisterU(PUVM pUVM, void *pvSample, PFNSTAMR3CALLBACKRESET pfnReset, PFNSTAMR3CALLBACKPRINT pfnPrint, 147 STAMTYPE enmType, STAMVISIBILITY enmVisibility, const char *pszName, STAMUNIT enmUnit, const char *pszDesc); 146 static int stamR3RegisterU(PUVM pUVM, void *pvSample, PFNSTAMR3CALLBACKRESET pfnReset, 147 PFNSTAMR3CALLBACKPRINT pfnPrint, STAMTYPE enmType, STAMVISIBILITY enmVisibility, 148 const char *pszName, STAMUNIT enmUnit, const char *pszDesc, uint8_t iRefreshGrp); 148 149 static int stamR3ResetOne(PSTAMDESC pDesc, void *pvArg); 149 150 static DECLCALLBACK(void) stamR3EnumLogPrintf(PSTAMR3PRINTONEARGS pvArg, const char *pszFormat, ...); … … 158 159 static int stamR3EnumU(PUVM pUVM, const char *pszPat, bool fUpdateRing0, int (pfnCallback)(PSTAMDESC pDesc, void *pvArg), void *pvArg); 159 160 static void stamR3Ring0StatsRegisterU(PUVM pUVM); 160 static void stamR3Ring0StatsUpdateU(PUVM pUVM, const char *pszPat);161 static void stamR3Ring0StatsUpdateMultiU(PUVM pUVM, const char * const *papszExpressions, unsigned cExpressions);162 161 163 162 #ifdef VBOX_WITH_DEBUGGER … … 389 388 * @param pszDesc Sample description. 390 389 */ 391 VMMR3DECL(int) STAMR3RegisterU(PUVM pUVM, void *pvSample, STAMTYPE enmType, STAMVISIBILITY enmVisibility, const char *pszName, STAMUNIT enmUnit, const char *pszDesc) 390 VMMR3DECL(int) STAMR3RegisterU(PUVM pUVM, void *pvSample, STAMTYPE enmType, STAMVISIBILITY enmVisibility, const char *pszName, 391 STAMUNIT enmUnit, const char *pszDesc) 392 392 { 393 393 AssertReturn(enmType != STAMTYPE_CALLBACK, VERR_INVALID_PARAMETER); 394 394 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE); 395 return stamR3RegisterU(pUVM, pvSample, NULL, NULL, enmType, enmVisibility, pszName, enmUnit, pszDesc );395 return stamR3RegisterU(pUVM, pvSample, NULL, NULL, enmType, enmVisibility, pszName, enmUnit, pszDesc, STAM_REFRESH_GRP_NONE); 396 396 } 397 397 … … 419 419 * @param pszDesc Sample description. 420 420 */ 421 VMMR3DECL(int) STAMR3Register(PVM pVM, void *pvSample, STAMTYPE enmType, STAMVISIBILITY enmVisibility, const char *pszName, STAMUNIT enmUnit, const char *pszDesc) 421 VMMR3DECL(int) STAMR3Register(PVM pVM, void *pvSample, STAMTYPE enmType, STAMVISIBILITY enmVisibility, const char *pszName, 422 STAMUNIT enmUnit, const char *pszDesc) 422 423 { 423 424 AssertReturn(enmType != STAMTYPE_CALLBACK, VERR_INVALID_PARAMETER); 424 return stamR3RegisterU(pVM->pUVM, pvSample, NULL, NULL, enmType, enmVisibility, pszName, enmUnit, pszDesc); 425 return stamR3RegisterU(pVM->pUVM, pvSample, NULL, NULL, enmType, enmVisibility, pszName, enmUnit, pszDesc, 426 STAM_REFRESH_GRP_NONE); 425 427 } 426 428 … … 576 578 return VERR_NO_MEMORY; 577 579 578 int rc = stamR3RegisterU(pVM->pUVM, pvSample, pfnReset, pfnPrint, STAMTYPE_CALLBACK, enmVisibility, pszFormattedName, enmUnit, pszDesc); 580 int rc = stamR3RegisterU(pVM->pUVM, pvSample, pfnReset, pfnPrint, STAMTYPE_CALLBACK, enmVisibility, pszFormattedName, 581 enmUnit, pszDesc, STAM_REFRESH_GRP_NONE); 579 582 RTStrFree(pszFormattedName); 580 583 return rc; 584 } 585 586 587 /** 588 * Same as STAMR3RegisterFU, except there is an extra refresh group parameter. 589 * 590 * @returns VBox status code. 591 * @param pUVM Pointer to the user mode VM structure. 592 * @param pvSample Pointer to the sample. 593 * @param enmType Sample type. This indicates what pvSample is pointing at. 594 * @param enmVisibility Visibility type specifying whether unused statistics should be visible or not. 595 * @param enmUnit Sample unit. 596 * @param iRefreshGrp The refresh group, STAM_REFRESH_GRP_XXX. 597 * @param pszDesc Sample description. 598 * @param pszName The sample name format string. 599 * @param ... Arguments to the format string. 600 */ 601 VMMR3DECL(int) STAMR3RegisterRefresh(PUVM pUVM, void *pvSample, STAMTYPE enmType, STAMVISIBILITY enmVisibility, STAMUNIT enmUnit, 602 uint8_t iRefreshGrp, const char *pszDesc, const char *pszName, ...) 603 { 604 va_list args; 605 va_start(args, pszName); 606 int rc = STAMR3RegisterRefreshV(pUVM, pvSample, enmType, enmVisibility, enmUnit, iRefreshGrp, pszDesc, pszName, args); 607 va_end(args); 608 return rc; 609 } 610 611 612 /** 613 * Same as STAMR3RegisterVU, except there is an extra refresh group parameter. 614 * 615 * @returns VBox status code. 616 * @param pUVM The user mode VM structure. 617 * @param pvSample Pointer to the sample. 618 * @param enmType Sample type. This indicates what pvSample is pointing at. 619 * @param enmVisibility Visibility type specifying whether unused statistics should be visible or not. 620 * @param enmUnit Sample unit. 621 * @param iRefreshGrp The refresh group, STAM_REFRESH_GRP_XXX. 622 * @param pszDesc Sample description. 623 * @param pszName The sample name format string. 624 * @param va Arguments to the format string. 625 */ 626 VMMR3DECL(int) STAMR3RegisterRefreshV(PUVM pUVM, void *pvSample, STAMTYPE enmType, STAMVISIBILITY enmVisibility, STAMUNIT enmUnit, 627 uint8_t iRefreshGrp, const char *pszDesc, const char *pszName, va_list va) 628 { 629 AssertReturn(enmType != STAMTYPE_CALLBACK, VERR_INVALID_PARAMETER); 630 631 char szFormattedName[STAM_MAX_NAME_LEN + 8]; 632 size_t cch = RTStrPrintfV(szFormattedName, sizeof(szFormattedName), pszName, va); 633 AssertReturn(cch <= STAM_MAX_NAME_LEN, VERR_OUT_OF_RANGE); 634 635 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE); 636 return stamR3RegisterU(pUVM, pvSample, NULL, NULL, enmType, enmVisibility, pszName, enmUnit, pszDesc, iRefreshGrp); 581 637 } 582 638 … … 1251 1307 * @param enmUnit Sample unit. 1252 1308 * @param pszDesc Sample description. 1309 * @param iRefreshGrp The refresh group, STAM_REFRESH_GRP_XXX. 1253 1310 * @remark There is currently no device or driver variant of this API. Add one if it should become necessary! 1254 1311 */ 1255 1312 static int stamR3RegisterU(PUVM pUVM, void *pvSample, PFNSTAMR3CALLBACKRESET pfnReset, PFNSTAMR3CALLBACKPRINT pfnPrint, 1256 1313 STAMTYPE enmType, STAMVISIBILITY enmVisibility, 1257 const char *pszName, STAMUNIT enmUnit, const char *pszDesc )1314 const char *pszName, STAMUNIT enmUnit, const char *pszDesc, uint8_t iRefreshGrp) 1258 1315 { 1259 1316 AssertReturn(pszName[0] == '/', VERR_INVALID_NAME); … … 1263 1320 AssertReturn(pszName[cchName - 1] != '/', VERR_INVALID_NAME); 1264 1321 AssertReturn(memchr(pszName, '\\', cchName) == NULL, VERR_INVALID_NAME); 1322 AssertReturn(iRefreshGrp == STAM_REFRESH_GRP_NONE || iRefreshGrp < 64, VERR_INVALID_PARAMETER); 1265 1323 1266 1324 STAM_LOCK_WR(pUVM); … … 1416 1474 } 1417 1475 pNew->enmUnit = enmUnit; 1476 pNew->iRefreshGroup = iRefreshGrp; 1418 1477 pNew->pszDesc = NULL; 1419 1478 if (pszDesc) … … 2379 2438 } 2380 2439 2440 static void stamR3RefreshGroup(PUVM pUVM, uint8_t iRefreshGroup, uint64_t *pbmRefreshedGroups) 2441 { 2442 *pbmRefreshedGroups |= RT_BIT_64(iRefreshGroup); 2443 2444 PVM pVM = pUVM->pVM; 2445 if (pVM && pVM->pSession) 2446 { 2447 switch (iRefreshGroup) 2448 { 2449 /* 2450 * GVMM 2451 */ 2452 case STAM_REFRESH_GRP_GVMM: 2453 { 2454 GVMMQUERYSTATISTICSSREQ Req; 2455 Req.Hdr.cbReq = sizeof(Req); 2456 Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC; 2457 Req.pSession = pVM->pSession; 2458 int rc = SUPR3CallVMMR0Ex(pVM->pVMR0, NIL_VMCPUID, VMMR0_DO_GVMM_QUERY_STATISTICS, 0, &Req.Hdr); 2459 if (RT_SUCCESS(rc)) 2460 { 2461 pUVM->stam.s.GVMMStats = Req.Stats; 2462 2463 /* 2464 * Check if the number of host CPUs has changed (it will the first 2465 * time around and normally never again). 2466 */ 2467 if (RT_UNLIKELY(pUVM->stam.s.GVMMStats.cHostCpus > pUVM->stam.s.cRegisteredHostCpus)) 2468 { 2469 if (RT_UNLIKELY(pUVM->stam.s.GVMMStats.cHostCpus > pUVM->stam.s.cRegisteredHostCpus)) 2470 { 2471 STAM_UNLOCK_RD(pUVM); 2472 STAM_LOCK_WR(pUVM); 2473 uint32_t cCpus = pUVM->stam.s.GVMMStats.cHostCpus; 2474 for (uint32_t iCpu = pUVM->stam.s.cRegisteredHostCpus; iCpu < cCpus; iCpu++) 2475 { 2476 char szName[120]; 2477 size_t cchBase = RTStrPrintf(szName, sizeof(szName), "/GVMM/HostCpus/%u", iCpu); 2478 stamR3RegisterU(pUVM, &pUVM->stam.s.GVMMStats.aHostCpus[iCpu].idCpu, NULL, NULL, 2479 STAMTYPE_U32, STAMVISIBILITY_ALWAYS, szName, STAMUNIT_NONE, 2480 "Host CPU ID", STAM_REFRESH_GRP_GVMM); 2481 strcpy(&szName[cchBase], "/idxCpuSet"); 2482 stamR3RegisterU(pUVM, &pUVM->stam.s.GVMMStats.aHostCpus[iCpu].idxCpuSet, NULL, NULL, 2483 STAMTYPE_U32, STAMVISIBILITY_ALWAYS, szName, STAMUNIT_NONE, 2484 "CPU Set index", STAM_REFRESH_GRP_GVMM); 2485 strcpy(&szName[cchBase], "/DesiredHz"); 2486 stamR3RegisterU(pUVM, &pUVM->stam.s.GVMMStats.aHostCpus[iCpu].uDesiredHz, NULL, NULL, 2487 STAMTYPE_U32, STAMVISIBILITY_ALWAYS, szName, STAMUNIT_HZ, 2488 "The desired frequency", STAM_REFRESH_GRP_GVMM); 2489 strcpy(&szName[cchBase], "/CurTimerHz"); 2490 stamR3RegisterU(pUVM, &pUVM->stam.s.GVMMStats.aHostCpus[iCpu].uTimerHz, NULL, NULL, 2491 STAMTYPE_U32, STAMVISIBILITY_ALWAYS, szName, STAMUNIT_HZ, 2492 "The current timer frequency", STAM_REFRESH_GRP_GVMM); 2493 strcpy(&szName[cchBase], "/PPTChanges"); 2494 stamR3RegisterU(pUVM, &pUVM->stam.s.GVMMStats.aHostCpus[iCpu].cChanges, NULL, NULL, 2495 STAMTYPE_U32, STAMVISIBILITY_ALWAYS, szName, STAMUNIT_OCCURENCES, 2496 "RTTimerChangeInterval calls", STAM_REFRESH_GRP_GVMM); 2497 strcpy(&szName[cchBase], "/PPTStarts"); 2498 stamR3RegisterU(pUVM, &pUVM->stam.s.GVMMStats.aHostCpus[iCpu].cStarts, NULL, NULL, 2499 STAMTYPE_U32, STAMVISIBILITY_ALWAYS, szName, STAMUNIT_OCCURENCES, 2500 "RTTimerStart calls", STAM_REFRESH_GRP_GVMM); 2501 } 2502 pUVM->stam.s.cRegisteredHostCpus = cCpus; 2503 STAM_UNLOCK_WR(pUVM); 2504 STAM_LOCK_RD(pUVM); 2505 } 2506 } 2507 } 2508 break; 2509 } 2510 2511 /* 2512 * GMM 2513 */ 2514 case STAM_REFRESH_GRP_GMM: 2515 { 2516 GMMQUERYSTATISTICSSREQ Req; 2517 Req.Hdr.cbReq = sizeof(Req); 2518 Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC; 2519 Req.pSession = pVM->pSession; 2520 int rc = SUPR3CallVMMR0Ex(pVM->pVMR0, NIL_VMCPUID, VMMR0_DO_GMM_QUERY_STATISTICS, 0, &Req.Hdr); 2521 if (RT_SUCCESS(rc)) 2522 pUVM->stam.s.GMMStats = Req.Stats; 2523 break; 2524 } 2525 2526 /* 2527 * NEM. 2528 */ 2529 case STAM_REFRESH_GRP_NEM: 2530 SUPR3CallVMMR0(pVM->pVMR0, NIL_VMCPUID, VMMR0_DO_NEM_UPDATE_STATISTICS, NULL); 2531 break; 2532 2533 default: 2534 AssertMsgFailed(("iRefreshGroup=%d\n", iRefreshGroup)); 2535 } 2536 } 2537 } 2538 2539 2540 /** 2541 * Refreshes the statistics behind the given entry, if necessary. 2542 * 2543 * This helps implement fetching global ring-0 stats into ring-3 accessible 2544 * storage. GVMM, GMM and NEM makes use of this. 2545 * 2546 * @param pUVM The user mode VM handle. 2547 * @param pCur The statistics descriptor which group to check 2548 * and maybe update. 2549 * @param pbmRefreshedGroups Bitmap tracking what has already been updated. 2550 */ 2551 DECLINLINE(void) stamR3Refresh(PUVM pUVM, PSTAMDESC pCur, uint64_t *pbmRefreshedGroups) 2552 { 2553 uint8_t const iRefreshGroup = pCur->iRefreshGroup; 2554 if (RT_LIKELY(iRefreshGroup == STAM_REFRESH_GRP_NONE)) 2555 { /* likely */ } 2556 else if (!(*pbmRefreshedGroups & RT_BIT_64(iRefreshGroup))) 2557 stamR3RefreshGroup(pUVM, iRefreshGroup, pbmRefreshedGroups); 2558 } 2559 2381 2560 2382 2561 /** … … 2413 2592 if (piExpression && i > *piExpression) 2414 2593 { 2415 check if we can skip some expressions 2594 Check if we can skip some expressions. 2595 Requires the expressions to be sorted. 2416 2596 }*/ 2417 2597 return true; … … 2490 2670 int (*pfnCallback)(PSTAMDESC pDesc, void *pvArg), void *pvArg) 2491 2671 { 2492 int rc = VINF_SUCCESS; 2493 PSTAMDESC pCur; 2672 int rc = VINF_SUCCESS; 2673 uint64_t bmRefreshedGroups = 0; 2674 PSTAMDESC pCur; 2494 2675 2495 2676 /* … … 2498 2679 if (!pszPat || !*pszPat || !strcmp(pszPat, "*")) 2499 2680 { 2500 if (fUpdateRing0)2501 stamR3Ring0StatsUpdateU(pUVM, "*");2502 2503 2681 STAM_LOCK_RD(pUVM); 2504 2682 RTListForEach(&pUVM->stam.s.List, pCur, STAMDESC, ListEntry) 2505 2683 { 2684 if (fUpdateRing0) 2685 stamR3Refresh(pUVM, pCur, &bmRefreshedGroups); 2506 2686 rc = pfnCallback(pCur, pvArg); 2507 2687 if (rc) … … 2516 2696 else if (!strchr(pszPat, '|')) 2517 2697 { 2518 if (fUpdateRing0)2519 stamR3Ring0StatsUpdateU(pUVM, pszPat);2520 2521 2698 STAM_LOCK_RD(pUVM); 2522 2699 #ifdef STAM_WITH_LOOKUP_TREE … … 2525 2702 pCur = stamR3LookupFindDesc(pUVM->stam.s.pRoot, pszPat); 2526 2703 if (pCur) 2704 { 2705 if (fUpdateRing0) 2706 stamR3Refresh(pUVM, pCur, &bmRefreshedGroups); 2527 2707 rc = pfnCallback(pCur, pvArg); 2708 } 2528 2709 } 2529 2710 else … … 2537 2718 if (RTStrSimplePatternMatch(pszPat, pCur->pszName)) 2538 2719 { 2720 if (fUpdateRing0) 2721 stamR3Refresh(pUVM, pCur, &bmRefreshedGroups); 2539 2722 rc = pfnCallback(pCur, pvArg); 2540 2723 if (rc) … … 2556 2739 if (RTStrSimplePatternMatch(pszPat, pCur->pszName)) 2557 2740 { 2741 if (fUpdateRing0) 2742 stamR3Refresh(pUVM, pCur, &bmRefreshedGroups); 2558 2743 rc = pfnCallback(pCur, pvArg); 2559 2744 if (rc) … … 2582 2767 * Perform the enumeration. 2583 2768 */ 2584 if (fUpdateRing0)2585 stamR3Ring0StatsUpdateMultiU(pUVM, papszExpressions, cExpressions);2586 2587 2769 STAM_LOCK_RD(pUVM); 2588 2770 unsigned iExpression = 0; … … 2591 2773 if (stamR3MultiMatch(papszExpressions, cExpressions, &iExpression, pCur->pszName)) 2592 2774 { 2775 if (fUpdateRing0) 2776 stamR3Refresh(pUVM, pCur, &bmRefreshedGroups); 2593 2777 rc = pfnCallback(pCur, pvArg); 2594 2778 if (rc) … … 2617 2801 stamR3RegisterU(pUVM, (uint8_t *)&pUVM->stam.s.GVMMStats + g_aGVMMStats[i].offVar, NULL, NULL, 2618 2802 g_aGVMMStats[i].enmType, STAMVISIBILITY_ALWAYS, g_aGVMMStats[i].pszName, 2619 g_aGVMMStats[i].enmUnit, g_aGVMMStats[i].pszDesc );2803 g_aGVMMStats[i].enmUnit, g_aGVMMStats[i].pszDesc, STAM_REFRESH_GRP_GVMM); 2620 2804 pUVM->stam.s.cRegisteredHostCpus = 0; 2621 2805 … … 2624 2808 stamR3RegisterU(pUVM, (uint8_t *)&pUVM->stam.s.GMMStats + g_aGMMStats[i].offVar, NULL, NULL, 2625 2809 g_aGMMStats[i].enmType, STAMVISIBILITY_ALWAYS, g_aGMMStats[i].pszName, 2626 g_aGMMStats[i].enmUnit, g_aGMMStats[i].pszDesc); 2627 } 2628 2629 2630 /** 2631 * Updates the ring-0 statistics (the copy). 2632 * 2633 * @param pUVM Pointer to the user mode VM structure. 2634 * @param pszPat The pattern. 2635 */ 2636 static void stamR3Ring0StatsUpdateU(PUVM pUVM, const char *pszPat) 2637 { 2638 stamR3Ring0StatsUpdateMultiU(pUVM, &pszPat, 1); 2639 } 2640 2641 2642 /** 2643 * Updates the ring-0 statistics. 2644 * 2645 * The ring-0 statistics aren't directly addressable from ring-3 and must be 2646 * copied when needed. 2647 * 2648 * @param pUVM Pointer to the user mode VM structure. 2649 * @param papszExpressions The patterns (for knowing when to skip). 2650 * @param cExpressions Number of patterns. 2651 */ 2652 static void stamR3Ring0StatsUpdateMultiU(PUVM pUVM, const char * const *papszExpressions, unsigned cExpressions) 2653 { 2654 PVM pVM = pUVM->pVM; 2655 if (!pVM || !pVM->pSession) 2656 return; 2657 2658 /* 2659 * GVMM 2660 */ 2661 bool fUpdate = false; 2662 for (unsigned i = 0; i < RT_ELEMENTS(g_aGVMMStats); i++) 2663 if (stamR3MultiMatch(papszExpressions, cExpressions, NULL, g_aGVMMStats[i].pszName)) 2664 { 2665 fUpdate = true; 2666 break; 2667 } 2668 if (!fUpdate) 2669 { 2670 /** @todo check the cpu leaves - rainy day. */ 2671 } 2672 if (fUpdate) 2673 { 2674 GVMMQUERYSTATISTICSSREQ Req; 2675 Req.Hdr.cbReq = sizeof(Req); 2676 Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC; 2677 Req.pSession = pVM->pSession; 2678 int rc = SUPR3CallVMMR0Ex(pVM->pVMR0, NIL_VMCPUID, VMMR0_DO_GVMM_QUERY_STATISTICS, 0, &Req.Hdr); 2679 if (RT_SUCCESS(rc)) 2680 { 2681 pUVM->stam.s.GVMMStats = Req.Stats; 2682 2683 /* 2684 * Check if the number of host CPUs has changed (it will the first 2685 * time around and normally never again). 2686 */ 2687 if (RT_UNLIKELY(pUVM->stam.s.GVMMStats.cHostCpus > pUVM->stam.s.cRegisteredHostCpus)) 2688 { 2689 STAM_LOCK_WR(pUVM); 2690 if (RT_UNLIKELY(pUVM->stam.s.GVMMStats.cHostCpus > pUVM->stam.s.cRegisteredHostCpus)) 2691 { 2692 uint32_t cCpus = pUVM->stam.s.GVMMStats.cHostCpus; 2693 for (uint32_t iCpu = pUVM->stam.s.cRegisteredHostCpus; iCpu < cCpus; iCpu++) 2694 { 2695 char szName[120]; 2696 size_t cchBase = RTStrPrintf(szName, sizeof(szName), "/GVMM/HostCpus/%u", iCpu); 2697 stamR3RegisterU(pUVM, &pUVM->stam.s.GVMMStats.aHostCpus[iCpu].idCpu, NULL, NULL, 2698 STAMTYPE_U32, STAMVISIBILITY_ALWAYS, szName, STAMUNIT_NONE, "Host CPU ID"); 2699 strcpy(&szName[cchBase], "/idxCpuSet"); 2700 stamR3RegisterU(pUVM, &pUVM->stam.s.GVMMStats.aHostCpus[iCpu].idxCpuSet, NULL, NULL, 2701 STAMTYPE_U32, STAMVISIBILITY_ALWAYS, szName, STAMUNIT_NONE, "CPU Set index"); 2702 strcpy(&szName[cchBase], "/DesiredHz"); 2703 stamR3RegisterU(pUVM, &pUVM->stam.s.GVMMStats.aHostCpus[iCpu].uDesiredHz, NULL, NULL, 2704 STAMTYPE_U32, STAMVISIBILITY_ALWAYS, szName, STAMUNIT_HZ, "The desired frequency"); 2705 strcpy(&szName[cchBase], "/CurTimerHz"); 2706 stamR3RegisterU(pUVM, &pUVM->stam.s.GVMMStats.aHostCpus[iCpu].uTimerHz, NULL, NULL, 2707 STAMTYPE_U32, STAMVISIBILITY_ALWAYS, szName, STAMUNIT_HZ, "The current timer frequency"); 2708 strcpy(&szName[cchBase], "/PPTChanges"); 2709 stamR3RegisterU(pUVM, &pUVM->stam.s.GVMMStats.aHostCpus[iCpu].cChanges, NULL, NULL, 2710 STAMTYPE_U32, STAMVISIBILITY_ALWAYS, szName, STAMUNIT_OCCURENCES, "RTTimerChangeInterval calls"); 2711 strcpy(&szName[cchBase], "/PPTStarts"); 2712 stamR3RegisterU(pUVM, &pUVM->stam.s.GVMMStats.aHostCpus[iCpu].cStarts, NULL, NULL, 2713 STAMTYPE_U32, STAMVISIBILITY_ALWAYS, szName, STAMUNIT_OCCURENCES, "RTTimerStart calls"); 2714 } 2715 pUVM->stam.s.cRegisteredHostCpus = cCpus; 2716 } 2717 STAM_UNLOCK_WR(pUVM); 2718 } 2719 } 2720 } 2721 2722 /* 2723 * GMM 2724 */ 2725 fUpdate = false; 2726 for (unsigned i = 0; i < RT_ELEMENTS(g_aGMMStats); i++) 2727 if (stamR3MultiMatch(papszExpressions, cExpressions, NULL, g_aGMMStats[i].pszName)) 2728 { 2729 fUpdate = true; 2730 break; 2731 } 2732 if (fUpdate) 2733 { 2734 GMMQUERYSTATISTICSSREQ Req; 2735 Req.Hdr.cbReq = sizeof(Req); 2736 Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC; 2737 Req.pSession = pVM->pSession; 2738 int rc = SUPR3CallVMMR0Ex(pVM->pVMR0, NIL_VMCPUID, VMMR0_DO_GMM_QUERY_STATISTICS, 0, &Req.Hdr); 2739 if (RT_SUCCESS(rc)) 2740 pUVM->stam.s.GMMStats = Req.Stats; 2741 } 2810 g_aGMMStats[i].enmUnit, g_aGMMStats[i].pszDesc, STAM_REFRESH_GRP_GMM); 2742 2811 } 2743 2812 -
trunk/src/VBox/VMM/include/NEMInternal.h
r72221 r72300 27 27 #ifdef RT_OS_WINDOWS 28 28 #include <iprt/nt/hyperv.h> 29 #include <iprt/critsect.h> 29 30 #endif 30 31 … … 160 161 NEMWINIOCTL IoCtlMessageSlotHandleAndGetNext; 161 162 163 /** Statistics updated by NEMR0UpdateStatistics. */ 164 struct 165 { 166 uint64_t cPagesAvailable; 167 uint64_t cPagesInUse; 168 } R0Stats; 162 169 #endif /* RT_OS_WINDOWS */ 163 170 } NEM; … … 256 263 257 264 #ifdef IN_RING0 265 # ifdef RT_OS_WINDOWS 266 /** 267 * Windows: Hypercall input/ouput page info. 268 */ 269 typedef struct NEMR0HYPERCALLDATA 270 { 271 /** Host physical address of the hypercall input/output page. */ 272 RTHCPHYS HCPhysPage; 273 /** Pointer to the hypercall input/output page. */ 274 uint8_t *pbPage; 275 /** Handle to the memory object of the hypercall input/output page. */ 276 RTR0MEMOBJ hMemObj; 277 } NEMR0HYPERCALLDATA; 278 /** Pointer to a Windows hypercall input/output page info. */ 279 typedef NEMR0HYPERCALLDATA *PNEMR0HYPERCALLDATA; 280 # endif /* RT_OS_WINDOWS */ 258 281 259 282 /** … … 263 286 { 264 287 # ifdef RT_OS_WINDOWS 265 /** @name Hypercall input/ouput page. 266 * @{ */ 267 /** Host physical address of the hypercall input/output page. */ 268 RTHCPHYS HCPhysHypercallData; 269 /** Pointer to the hypercall input/output page. */ 270 uint8_t *pbHypercallData; 271 /** Handle to the memory object of the hypercall input/output page. */ 272 RTR0MEMOBJ hHypercallDataMemObj; 273 /** @} */ 288 /** Hypercall input/ouput page. */ 289 NEMR0HYPERCALLDATA HypercallData; 274 290 # else 275 291 uint32_t uDummy; … … 298 314 NEMWINIOCTL IoCtlMessageSlotHandleAndGetNext; 299 315 316 /** Hypercall input/ouput page for non-EMT. */ 317 NEMR0HYPERCALLDATA HypercallData; 318 /** Critical section protecting use of HypercallData. */ 319 RTCRITSECT HypercallDataCritSect; 320 300 321 # else 301 322 uint32_t uDummy; -
trunk/src/VBox/VMM/include/STAMInternal.h
r69474 r72300 124 124 /** Unit. */ 125 125 STAMUNIT enmUnit; 126 /** The refresh group number (STAM_REFRESH_GRP_XXX). */ 127 uint8_t iRefreshGroup; 126 128 /** Description. */ 127 129 const char *pszDesc;
Note:
See TracChangeset
for help on using the changeset viewer.