VirtualBox

source: vbox/trunk/src/VBox/Additions/common/VBoxService/VBoxServiceStats.cpp@ 77938

Last change on this file since 77938 was 76553, checked in by vboxsync, 6 years ago

scm --update-copyright-year

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 29.5 KB
Line 
1/* $Id: VBoxServiceStats.cpp 76553 2019-01-01 01:45:53Z vboxsync $ */
2/** @file
3 * VBoxStats - Guest statistics notification
4 */
5
6/*
7 * Copyright (C) 2006-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/** @page pg_vgsvc_vmstats VBoxService - VM Statistics
19 *
20 * The VM statistics subservice helps out the performance collector API on the
21 * host side by providing metrics from inside the guest.
22 *
23 * See IPerformanceCollector, CollectorGuest and the "Guest/" submetrics that
24 * gets registered by Machine::i_registerMetrics in Main.
25 */
26
27
28/*********************************************************************************************************************************
29* Header Files *
30*********************************************************************************************************************************/
31#if defined(RT_OS_WINDOWS)
32# include <iprt/win/windows.h>
33# include <psapi.h>
34# include <winternl.h>
35
36#elif defined(RT_OS_LINUX)
37# include <iprt/ctype.h>
38# include <iprt/stream.h>
39# include <unistd.h>
40
41#elif defined(RT_OS_SOLARIS)
42# include <kstat.h>
43# include <sys/sysinfo.h>
44# include <unistd.h>
45#else
46/** @todo port me. */
47
48#endif
49
50#include <iprt/assert.h>
51#include <iprt/mem.h>
52#include <iprt/ldr.h>
53#include <VBox/param.h>
54#include <iprt/semaphore.h>
55#include <iprt/string.h>
56#include <iprt/system.h>
57#include <iprt/time.h>
58#include <iprt/thread.h>
59#include <VBox/err.h>
60#include <VBox/VMMDev.h> /* For VMMDevReportGuestStats and indirectly VbglR3StatReport. */
61#include <VBox/VBoxGuestLib.h>
62
63#include "VBoxServiceInternal.h"
64#include "VBoxServiceUtils.h"
65
66
67/*********************************************************************************************************************************
68* Structures and Typedefs *
69*********************************************************************************************************************************/
70typedef struct _VBOXSTATSCONTEXT
71{
72 RTMSINTERVAL cMsStatInterval;
73
74 uint64_t au64LastCpuLoad_Idle[VMM_MAX_CPU_COUNT];
75 uint64_t au64LastCpuLoad_Kernel[VMM_MAX_CPU_COUNT];
76 uint64_t au64LastCpuLoad_User[VMM_MAX_CPU_COUNT];
77 uint64_t au64LastCpuLoad_Nice[VMM_MAX_CPU_COUNT];
78
79#ifdef RT_OS_WINDOWS
80 NTSTATUS (WINAPI *pfnNtQuerySystemInformation)(SYSTEM_INFORMATION_CLASS SystemInformationClass, PVOID SystemInformation,
81 ULONG SystemInformationLength, PULONG ReturnLength);
82 void (WINAPI *pfnGlobalMemoryStatusEx)(LPMEMORYSTATUSEX lpBuffer);
83 BOOL (WINAPI *pfnGetPerformanceInfo)(PPERFORMANCE_INFORMATION pPerformanceInformation, DWORD cb);
84#endif
85} VBOXSTATSCONTEXT;
86
87
88/*********************************************************************************************************************************
89* Global Variables *
90*********************************************************************************************************************************/
91/** Global data. */
92static VBOXSTATSCONTEXT g_VMStat = {0};
93
94/** The semaphore we're blocking on. */
95static RTSEMEVENTMULTI g_VMStatEvent = NIL_RTSEMEVENTMULTI;
96
97
98/**
99 * @interface_method_impl{VBOXSERVICE,pfnInit}
100 */
101static DECLCALLBACK(int) vgsvcVMStatsInit(void)
102{
103 VGSvcVerbose(3, "vgsvcVMStatsInit\n");
104
105 int rc = RTSemEventMultiCreate(&g_VMStatEvent);
106 AssertRCReturn(rc, rc);
107
108 g_VMStat.cMsStatInterval = 0; /* default; update disabled */
109 RT_ZERO(g_VMStat.au64LastCpuLoad_Idle);
110 RT_ZERO(g_VMStat.au64LastCpuLoad_Kernel);
111 RT_ZERO(g_VMStat.au64LastCpuLoad_User);
112 RT_ZERO(g_VMStat.au64LastCpuLoad_Nice);
113
114 rc = VbglR3StatQueryInterval(&g_VMStat.cMsStatInterval);
115 if (RT_SUCCESS(rc))
116 VGSvcVerbose(3, "vgsvcVMStatsInit: New statistics interval %u seconds\n", g_VMStat.cMsStatInterval);
117 else
118 VGSvcVerbose(3, "vgsvcVMStatsInit: DeviceIoControl failed with %d\n", rc);
119
120#ifdef RT_OS_WINDOWS
121 /* NtQuerySystemInformation might be dropped in future releases, so load
122 it dynamically as per Microsoft's recommendation. */
123 *(void **)&g_VMStat.pfnNtQuerySystemInformation = RTLdrGetSystemSymbol("ntdll.dll", "NtQuerySystemInformation");
124 if (g_VMStat.pfnNtQuerySystemInformation)
125 VGSvcVerbose(3, "vgsvcVMStatsInit: g_VMStat.pfnNtQuerySystemInformation = %x\n", g_VMStat.pfnNtQuerySystemInformation);
126 else
127 {
128 VGSvcVerbose(3, "vgsvcVMStatsInit: ntdll.NtQuerySystemInformation not found!\n");
129 return VERR_SERVICE_DISABLED;
130 }
131
132 /* GlobalMemoryStatus is win2k and up, so load it dynamically */
133 *(void **)&g_VMStat.pfnGlobalMemoryStatusEx = RTLdrGetSystemSymbol("kernel32.dll", "GlobalMemoryStatusEx");
134 if (g_VMStat.pfnGlobalMemoryStatusEx)
135 VGSvcVerbose(3, "vgsvcVMStatsInit: g_VMStat.GlobalMemoryStatusEx = %x\n", g_VMStat.pfnGlobalMemoryStatusEx);
136 else
137 {
138 /** @todo Now fails in NT4; do we care? */
139 VGSvcVerbose(3, "vgsvcVMStatsInit: kernel32.GlobalMemoryStatusEx not found!\n");
140 return VERR_SERVICE_DISABLED;
141 }
142
143 /* GetPerformanceInfo is xp and up, so load it dynamically */
144 *(void **)&g_VMStat.pfnGetPerformanceInfo = RTLdrGetSystemSymbol("psapi.dll", "GetPerformanceInfo");
145 if (g_VMStat.pfnGetPerformanceInfo)
146 VGSvcVerbose(3, "vgsvcVMStatsInit: g_VMStat.pfnGetPerformanceInfo= %x\n", g_VMStat.pfnGetPerformanceInfo);
147#endif /* RT_OS_WINDOWS */
148
149 return VINF_SUCCESS;
150}
151
152
153/**
154 * Gathers VM statistics and reports them to the host.
155 */
156static void vgsvcVMStatsReport(void)
157{
158#if defined(RT_OS_WINDOWS)
159 Assert(g_VMStat.pfnGlobalMemoryStatusEx && g_VMStat.pfnNtQuerySystemInformation);
160 if ( !g_VMStat.pfnGlobalMemoryStatusEx
161 || !g_VMStat.pfnNtQuerySystemInformation)
162 return;
163
164 /* Clear the report so we don't report garbage should NtQuerySystemInformation
165 behave in an unexpected manner. */
166 VMMDevReportGuestStats req;
167 RT_ZERO(req);
168
169 /* Query and report guest statistics */
170 SYSTEM_INFO systemInfo;
171 GetSystemInfo(&systemInfo);
172
173 MEMORYSTATUSEX memStatus;
174 memStatus.dwLength = sizeof(memStatus);
175 g_VMStat.pfnGlobalMemoryStatusEx(&memStatus);
176
177 req.guestStats.u32PageSize = systemInfo.dwPageSize;
178 req.guestStats.u32PhysMemTotal = (uint32_t)(memStatus.ullTotalPhys / _4K);
179 req.guestStats.u32PhysMemAvail = (uint32_t)(memStatus.ullAvailPhys / _4K);
180 /* The current size of the committed memory limit, in bytes. This is physical
181 memory plus the size of the page file, minus a small overhead. */
182 req.guestStats.u32PageFileSize = (uint32_t)(memStatus.ullTotalPageFile / _4K) - req.guestStats.u32PhysMemTotal;
183 req.guestStats.u32MemoryLoad = memStatus.dwMemoryLoad;
184 req.guestStats.u32StatCaps = VBOX_GUEST_STAT_PHYS_MEM_TOTAL
185 | VBOX_GUEST_STAT_PHYS_MEM_AVAIL
186 | VBOX_GUEST_STAT_PAGE_FILE_SIZE
187 | VBOX_GUEST_STAT_MEMORY_LOAD;
188# ifdef VBOX_WITH_MEMBALLOON
189 req.guestStats.u32PhysMemBalloon = VGSvcBalloonQueryPages(_4K);
190 req.guestStats.u32StatCaps |= VBOX_GUEST_STAT_PHYS_MEM_BALLOON;
191# else
192 req.guestStats.u32PhysMemBalloon = 0;
193# endif
194
195 if (g_VMStat.pfnGetPerformanceInfo)
196 {
197 PERFORMANCE_INFORMATION perfInfo;
198
199 if (g_VMStat.pfnGetPerformanceInfo(&perfInfo, sizeof(perfInfo)))
200 {
201 req.guestStats.u32Processes = perfInfo.ProcessCount;
202 req.guestStats.u32Threads = perfInfo.ThreadCount;
203 req.guestStats.u32Handles = perfInfo.HandleCount;
204 req.guestStats.u32MemCommitTotal = perfInfo.CommitTotal; /* already in pages */
205 req.guestStats.u32MemKernelTotal = perfInfo.KernelTotal; /* already in pages */
206 req.guestStats.u32MemKernelPaged = perfInfo.KernelPaged; /* already in pages */
207 req.guestStats.u32MemKernelNonPaged = perfInfo.KernelNonpaged; /* already in pages */
208 req.guestStats.u32MemSystemCache = perfInfo.SystemCache; /* already in pages */
209 req.guestStats.u32StatCaps |= VBOX_GUEST_STAT_PROCESSES | VBOX_GUEST_STAT_THREADS | VBOX_GUEST_STAT_HANDLES
210 | VBOX_GUEST_STAT_MEM_COMMIT_TOTAL | VBOX_GUEST_STAT_MEM_KERNEL_TOTAL
211 | VBOX_GUEST_STAT_MEM_KERNEL_PAGED | VBOX_GUEST_STAT_MEM_KERNEL_NONPAGED
212 | VBOX_GUEST_STAT_MEM_SYSTEM_CACHE;
213 }
214 else
215 VGSvcVerbose(3, "vgsvcVMStatsReport: GetPerformanceInfo failed with %d\n", GetLastError());
216 }
217
218 /* Query CPU load information */
219 uint32_t cbStruct = systemInfo.dwNumberOfProcessors * sizeof(SYSTEM_PROCESSOR_PERFORMANCE_INFORMATION);
220 PSYSTEM_PROCESSOR_PERFORMANCE_INFORMATION pProcInfo;
221 pProcInfo = (PSYSTEM_PROCESSOR_PERFORMANCE_INFORMATION)RTMemAlloc(cbStruct);
222 if (!pProcInfo)
223 return;
224
225 /* Unfortunately GetSystemTimes is XP SP1 and up only, so we need to use the semi-undocumented NtQuerySystemInformation */
226 bool fCpuInfoAvail = false;
227 DWORD cbReturned;
228 NTSTATUS rcNt = g_VMStat.pfnNtQuerySystemInformation(SystemProcessorPerformanceInformation, pProcInfo, cbStruct, &cbReturned);
229 if ( !rcNt
230 && cbReturned == cbStruct)
231 {
232 for (uint32_t i = 0; i < systemInfo.dwNumberOfProcessors; i++)
233 {
234 if (i >= VMM_MAX_CPU_COUNT)
235 {
236 VGSvcVerbose(3, "vgsvcVMStatsReport: skipping information for CPUs %u..%u\n", i, systemInfo.dwNumberOfProcessors);
237 break;
238 }
239
240 if (g_VMStat.au64LastCpuLoad_Kernel[i] == 0)
241 {
242 /* first time */
243 g_VMStat.au64LastCpuLoad_Idle[i] = pProcInfo[i].IdleTime.QuadPart;
244 g_VMStat.au64LastCpuLoad_Kernel[i] = pProcInfo[i].KernelTime.QuadPart;
245 g_VMStat.au64LastCpuLoad_User[i] = pProcInfo[i].UserTime.QuadPart;
246
247 Sleep(250);
248
249 rcNt = g_VMStat.pfnNtQuerySystemInformation(SystemProcessorPerformanceInformation, pProcInfo, cbStruct, &cbReturned);
250 Assert(!rcNt);
251 }
252
253 uint64_t deltaIdle = (pProcInfo[i].IdleTime.QuadPart - g_VMStat.au64LastCpuLoad_Idle[i]);
254 uint64_t deltaKernel = (pProcInfo[i].KernelTime.QuadPart - g_VMStat.au64LastCpuLoad_Kernel[i]);
255 uint64_t deltaUser = (pProcInfo[i].UserTime.QuadPart - g_VMStat.au64LastCpuLoad_User[i]);
256 deltaKernel -= deltaIdle; /* idle time is added to kernel time */
257 uint64_t ullTotalTime = deltaIdle + deltaKernel + deltaUser;
258 if (ullTotalTime == 0) /* Prevent division through zero. */
259 ullTotalTime = 1;
260
261 req.guestStats.u32CpuLoad_Idle = (uint32_t)(deltaIdle * 100 / ullTotalTime);
262 req.guestStats.u32CpuLoad_Kernel = (uint32_t)(deltaKernel* 100 / ullTotalTime);
263 req.guestStats.u32CpuLoad_User = (uint32_t)(deltaUser * 100 / ullTotalTime);
264
265 req.guestStats.u32StatCaps |= VBOX_GUEST_STAT_CPU_LOAD_IDLE
266 | VBOX_GUEST_STAT_CPU_LOAD_KERNEL
267 | VBOX_GUEST_STAT_CPU_LOAD_USER;
268 req.guestStats.u32CpuId = i;
269 fCpuInfoAvail = true;
270 int rc = VbglR3StatReport(&req);
271 if (RT_SUCCESS(rc))
272 VGSvcVerbose(3, "vgsvcVMStatsReport: new statistics (CPU %u) reported successfully!\n", i);
273 else
274 VGSvcVerbose(3, "vgsvcVMStatsReport: VbglR3StatReport failed with rc=%Rrc\n", rc);
275
276 g_VMStat.au64LastCpuLoad_Idle[i] = pProcInfo[i].IdleTime.QuadPart;
277 g_VMStat.au64LastCpuLoad_Kernel[i] = pProcInfo[i].KernelTime.QuadPart;
278 g_VMStat.au64LastCpuLoad_User[i] = pProcInfo[i].UserTime.QuadPart;
279 }
280 }
281 RTMemFree(pProcInfo);
282
283 if (!fCpuInfoAvail)
284 {
285 VGSvcVerbose(3, "vgsvcVMStatsReport: CPU info not available!\n");
286 int rc = VbglR3StatReport(&req);
287 if (RT_SUCCESS(rc))
288 VGSvcVerbose(3, "vgsvcVMStatsReport: new statistics reported successfully!\n");
289 else
290 VGSvcVerbose(3, "vgsvcVMStatsReport: stats report failed with rc=%Rrc\n", rc);
291 }
292
293#elif defined(RT_OS_LINUX)
294 VMMDevReportGuestStats req;
295 RT_ZERO(req);
296 PRTSTREAM pStrm;
297 char szLine[256];
298 char *psz;
299
300 int rc = RTStrmOpen("/proc/meminfo", "r", &pStrm);
301 if (RT_SUCCESS(rc))
302 {
303 uint64_t u64Kb;
304 uint64_t u64Total = 0, u64Free = 0, u64Buffers = 0, u64Cached = 0, u64PagedTotal = 0;
305 for (;;)
306 {
307 rc = RTStrmGetLine(pStrm, szLine, sizeof(szLine));
308 if (RT_FAILURE(rc))
309 break;
310 if (strstr(szLine, "MemTotal:") == szLine)
311 {
312 rc = RTStrToUInt64Ex(RTStrStripL(&szLine[9]), &psz, 0, &u64Kb);
313 if (RT_SUCCESS(rc))
314 u64Total = u64Kb * _1K;
315 }
316 else if (strstr(szLine, "MemFree:") == szLine)
317 {
318 rc = RTStrToUInt64Ex(RTStrStripL(&szLine[8]), &psz, 0, &u64Kb);
319 if (RT_SUCCESS(rc))
320 u64Free = u64Kb * _1K;
321 }
322 else if (strstr(szLine, "Buffers:") == szLine)
323 {
324 rc = RTStrToUInt64Ex(RTStrStripL(&szLine[8]), &psz, 0, &u64Kb);
325 if (RT_SUCCESS(rc))
326 u64Buffers = u64Kb * _1K;
327 }
328 else if (strstr(szLine, "Cached:") == szLine)
329 {
330 rc = RTStrToUInt64Ex(RTStrStripL(&szLine[7]), &psz, 0, &u64Kb);
331 if (RT_SUCCESS(rc))
332 u64Cached = u64Kb * _1K;
333 }
334 else if (strstr(szLine, "SwapTotal:") == szLine)
335 {
336 rc = RTStrToUInt64Ex(RTStrStripL(&szLine[10]), &psz, 0, &u64Kb);
337 if (RT_SUCCESS(rc))
338 u64PagedTotal = u64Kb * _1K;
339 }
340 }
341 req.guestStats.u32PhysMemTotal = u64Total / _4K;
342 req.guestStats.u32PhysMemAvail = (u64Free + u64Buffers + u64Cached) / _4K;
343 req.guestStats.u32MemSystemCache = (u64Buffers + u64Cached) / _4K;
344 req.guestStats.u32PageFileSize = u64PagedTotal / _4K;
345 RTStrmClose(pStrm);
346 }
347 else
348 VGSvcVerbose(3, "vgsvcVMStatsReport: memory info not available!\n");
349
350 req.guestStats.u32PageSize = getpagesize();
351 req.guestStats.u32StatCaps = VBOX_GUEST_STAT_PHYS_MEM_TOTAL
352 | VBOX_GUEST_STAT_PHYS_MEM_AVAIL
353 | VBOX_GUEST_STAT_MEM_SYSTEM_CACHE
354 | VBOX_GUEST_STAT_PAGE_FILE_SIZE;
355# ifdef VBOX_WITH_MEMBALLOON
356 req.guestStats.u32PhysMemBalloon = VGSvcBalloonQueryPages(_4K);
357 req.guestStats.u32StatCaps |= VBOX_GUEST_STAT_PHYS_MEM_BALLOON;
358# else
359 req.guestStats.u32PhysMemBalloon = 0;
360# endif
361
362
363 /** @todo req.guestStats.u32Threads */
364 /** @todo req.guestStats.u32Processes */
365 /* req.guestStats.u32Handles doesn't make sense here. */
366 /** @todo req.guestStats.u32MemoryLoad */
367 /** @todo req.guestStats.u32MemCommitTotal */
368 /** @todo req.guestStats.u32MemKernelTotal */
369 /** @todo req.guestStats.u32MemKernelPaged, make any sense? = u32MemKernelTotal? */
370 /** @todo req.guestStats.u32MemKernelNonPaged, make any sense? = 0? */
371
372 bool fCpuInfoAvail = false;
373 rc = RTStrmOpen("/proc/stat", "r", &pStrm);
374 if (RT_SUCCESS(rc))
375 {
376 for (;;)
377 {
378 rc = RTStrmGetLine(pStrm, szLine, sizeof(szLine));
379 if (RT_FAILURE(rc))
380 break;
381 if ( strstr(szLine, "cpu") == szLine
382 && strlen(szLine) > 3
383 && RT_C_IS_DIGIT(szLine[3]))
384 {
385 uint32_t u32CpuId;
386 rc = RTStrToUInt32Ex(&szLine[3], &psz, 0, &u32CpuId);
387 if (u32CpuId < VMM_MAX_CPU_COUNT)
388 {
389 uint64_t u64User = 0;
390 if (RT_SUCCESS(rc))
391 rc = RTStrToUInt64Ex(RTStrStripL(psz), &psz, 0, &u64User);
392
393 uint64_t u64Nice = 0;
394 if (RT_SUCCESS(rc))
395 rc = RTStrToUInt64Ex(RTStrStripL(psz), &psz, 0, &u64Nice);
396
397 uint64_t u64System = 0;
398 if (RT_SUCCESS(rc))
399 rc = RTStrToUInt64Ex(RTStrStripL(psz), &psz, 0, &u64System);
400
401 uint64_t u64Idle = 0;
402 if (RT_SUCCESS(rc))
403 rc = RTStrToUInt64Ex(RTStrStripL(psz), &psz, 0, &u64Idle);
404
405 uint64_t u64DeltaIdle = u64Idle - g_VMStat.au64LastCpuLoad_Idle[u32CpuId];
406 uint64_t u64DeltaSystem = u64System - g_VMStat.au64LastCpuLoad_Kernel[u32CpuId];
407 uint64_t u64DeltaUser = u64User - g_VMStat.au64LastCpuLoad_User[u32CpuId];
408 uint64_t u64DeltaNice = u64Nice - g_VMStat.au64LastCpuLoad_Nice[u32CpuId];
409
410 uint64_t u64DeltaAll = u64DeltaIdle
411 + u64DeltaSystem
412 + u64DeltaUser
413 + u64DeltaNice;
414 if (u64DeltaAll == 0) /* Prevent division through zero. */
415 u64DeltaAll = 1;
416
417 g_VMStat.au64LastCpuLoad_Idle[u32CpuId] = u64Idle;
418 g_VMStat.au64LastCpuLoad_Kernel[u32CpuId] = u64System;
419 g_VMStat.au64LastCpuLoad_User[u32CpuId] = u64User;
420 g_VMStat.au64LastCpuLoad_Nice[u32CpuId] = u64Nice;
421
422 req.guestStats.u32CpuLoad_Idle = (uint32_t)(u64DeltaIdle * 100 / u64DeltaAll);
423 req.guestStats.u32CpuLoad_Kernel = (uint32_t)(u64DeltaSystem * 100 / u64DeltaAll);
424 req.guestStats.u32CpuLoad_User = (uint32_t)((u64DeltaUser
425 + u64DeltaNice) * 100 / u64DeltaAll);
426 req.guestStats.u32StatCaps |= VBOX_GUEST_STAT_CPU_LOAD_IDLE
427 | VBOX_GUEST_STAT_CPU_LOAD_KERNEL
428 | VBOX_GUEST_STAT_CPU_LOAD_USER;
429 req.guestStats.u32CpuId = u32CpuId;
430 fCpuInfoAvail = true;
431 rc = VbglR3StatReport(&req);
432 if (RT_SUCCESS(rc))
433 VGSvcVerbose(3, "vgsvcVMStatsReport: new statistics (CPU %u) reported successfully!\n", u32CpuId);
434 else
435 VGSvcVerbose(3, "vgsvcVMStatsReport: stats report failed with rc=%Rrc\n", rc);
436 }
437 else
438 VGSvcVerbose(3, "vgsvcVMStatsReport: skipping information for CPU%u\n", u32CpuId);
439 }
440 }
441 RTStrmClose(pStrm);
442 }
443 if (!fCpuInfoAvail)
444 {
445 VGSvcVerbose(3, "vgsvcVMStatsReport: CPU info not available!\n");
446 rc = VbglR3StatReport(&req);
447 if (RT_SUCCESS(rc))
448 VGSvcVerbose(3, "vgsvcVMStatsReport: new statistics reported successfully!\n");
449 else
450 VGSvcVerbose(3, "vgsvcVMStatsReport: stats report failed with rc=%Rrc\n", rc);
451 }
452
453#elif defined(RT_OS_SOLARIS)
454 VMMDevReportGuestStats req;
455 RT_ZERO(req);
456 kstat_ctl_t *pStatKern = kstat_open();
457 if (pStatKern)
458 {
459 /*
460 * Memory statistics.
461 */
462 uint64_t u64Total = 0, u64Free = 0, u64Buffers = 0, u64Cached = 0, u64PagedTotal = 0;
463 int rc = -1;
464 kstat_t *pStatPages = kstat_lookup(pStatKern, (char *)"unix", 0 /* instance */, (char *)"system_pages");
465 if (pStatPages)
466 {
467 rc = kstat_read(pStatKern, pStatPages, NULL /* optional-copy-buf */);
468 if (rc != -1)
469 {
470 kstat_named_t *pStat = NULL;
471 pStat = (kstat_named_t *)kstat_data_lookup(pStatPages, (char *)"pagestotal");
472 if (pStat)
473 u64Total = pStat->value.ul;
474
475 pStat = (kstat_named_t *)kstat_data_lookup(pStatPages, (char *)"freemem");
476 if (pStat)
477 u64Free = pStat->value.ul;
478 }
479 }
480
481 kstat_t *pStatZFS = kstat_lookup(pStatKern, (char *)"zfs", 0 /* instance */, (char *)"arcstats");
482 if (pStatZFS)
483 {
484 rc = kstat_read(pStatKern, pStatZFS, NULL /* optional-copy-buf */);
485 if (rc != -1)
486 {
487 kstat_named_t *pStat = (kstat_named_t *)kstat_data_lookup(pStatZFS, (char *)"size");
488 if (pStat)
489 u64Cached = pStat->value.ul;
490 }
491 }
492
493 /*
494 * The vminfo are accumulative counters updated every "N" ticks. Let's get the
495 * number of stat updates so far and use that to divide the swap counter.
496 */
497 kstat_t *pStatInfo = kstat_lookup(pStatKern, (char *)"unix", 0 /* instance */, (char *)"sysinfo");
498 if (pStatInfo)
499 {
500 sysinfo_t SysInfo;
501 rc = kstat_read(pStatKern, pStatInfo, &SysInfo);
502 if (rc != -1)
503 {
504 kstat_t *pStatVMInfo = kstat_lookup(pStatKern, (char *)"unix", 0 /* instance */, (char *)"vminfo");
505 if (pStatVMInfo)
506 {
507 vminfo_t VMInfo;
508 rc = kstat_read(pStatKern, pStatVMInfo, &VMInfo);
509 if (rc != -1)
510 {
511 Assert(SysInfo.updates != 0);
512 u64PagedTotal = VMInfo.swap_avail / SysInfo.updates;
513 }
514 }
515 }
516 }
517
518 req.guestStats.u32PhysMemTotal = u64Total; /* already in pages */
519 req.guestStats.u32PhysMemAvail = u64Free; /* already in pages */
520 req.guestStats.u32MemSystemCache = u64Cached / _4K;
521 req.guestStats.u32PageFileSize = u64PagedTotal; /* already in pages */
522 /** @todo req.guestStats.u32Threads */
523 /** @todo req.guestStats.u32Processes */
524 /** @todo req.guestStats.u32Handles -- ??? */
525 /** @todo req.guestStats.u32MemoryLoad */
526 /** @todo req.guestStats.u32MemCommitTotal */
527 /** @todo req.guestStats.u32MemKernelTotal */
528 /** @todo req.guestStats.u32MemKernelPaged */
529 /** @todo req.guestStats.u32MemKernelNonPaged */
530 req.guestStats.u32PageSize = getpagesize();
531
532 req.guestStats.u32StatCaps = VBOX_GUEST_STAT_PHYS_MEM_TOTAL
533 | VBOX_GUEST_STAT_PHYS_MEM_AVAIL
534 | VBOX_GUEST_STAT_MEM_SYSTEM_CACHE
535 | VBOX_GUEST_STAT_PAGE_FILE_SIZE;
536#ifdef VBOX_WITH_MEMBALLOON
537 req.guestStats.u32PhysMemBalloon = VGSvcBalloonQueryPages(_4K);
538 req.guestStats.u32StatCaps |= VBOX_GUEST_STAT_PHYS_MEM_BALLOON;
539#else
540 req.guestStats.u32PhysMemBalloon = 0;
541#endif
542
543 /*
544 * CPU statistics.
545 */
546 cpu_stat_t StatCPU;
547 RT_ZERO(StatCPU);
548 kstat_t *pStatNode = NULL;
549 uint32_t cCPUs = 0;
550 bool fCpuInfoAvail = false;
551 for (pStatNode = pStatKern->kc_chain; pStatNode != NULL; pStatNode = pStatNode->ks_next)
552 {
553 if (!strcmp(pStatNode->ks_module, "cpu_stat"))
554 {
555 rc = kstat_read(pStatKern, pStatNode, &StatCPU);
556 if (rc == -1)
557 break;
558
559 if (cCPUs < VMM_MAX_CPU_COUNT)
560 {
561 uint64_t u64Idle = StatCPU.cpu_sysinfo.cpu[CPU_IDLE];
562 uint64_t u64User = StatCPU.cpu_sysinfo.cpu[CPU_USER];
563 uint64_t u64System = StatCPU.cpu_sysinfo.cpu[CPU_KERNEL];
564
565 uint64_t u64DeltaIdle = u64Idle - g_VMStat.au64LastCpuLoad_Idle[cCPUs];
566 uint64_t u64DeltaSystem = u64System - g_VMStat.au64LastCpuLoad_Kernel[cCPUs];
567 uint64_t u64DeltaUser = u64User - g_VMStat.au64LastCpuLoad_User[cCPUs];
568
569 uint64_t u64DeltaAll = u64DeltaIdle + u64DeltaSystem + u64DeltaUser;
570 if (u64DeltaAll == 0) /* Prevent division through zero. */
571 u64DeltaAll = 1;
572
573 g_VMStat.au64LastCpuLoad_Idle[cCPUs] = u64Idle;
574 g_VMStat.au64LastCpuLoad_Kernel[cCPUs] = u64System;
575 g_VMStat.au64LastCpuLoad_User[cCPUs] = u64User;
576
577 req.guestStats.u32CpuId = cCPUs;
578 req.guestStats.u32CpuLoad_Idle = (uint32_t)(u64DeltaIdle * 100 / u64DeltaAll);
579 req.guestStats.u32CpuLoad_Kernel = (uint32_t)(u64DeltaSystem * 100 / u64DeltaAll);
580 req.guestStats.u32CpuLoad_User = (uint32_t)(u64DeltaUser * 100 / u64DeltaAll);
581
582 req.guestStats.u32StatCaps |= VBOX_GUEST_STAT_CPU_LOAD_IDLE
583 | VBOX_GUEST_STAT_CPU_LOAD_KERNEL
584 | VBOX_GUEST_STAT_CPU_LOAD_USER;
585 fCpuInfoAvail = true;
586 rc = VbglR3StatReport(&req);
587 if (RT_SUCCESS(rc))
588 VGSvcVerbose(3, "vgsvcVMStatsReport: new statistics (CPU %u) reported successfully!\n", cCPUs);
589 else
590 VGSvcVerbose(3, "vgsvcVMStatsReport: stats report failed with rc=%Rrc\n", rc);
591 cCPUs++;
592 }
593 else
594 VGSvcVerbose(3, "vgsvcVMStatsReport: skipping information for CPU%u\n", cCPUs);
595 }
596 }
597
598 /*
599 * Report whatever statistics were collected.
600 */
601 if (!fCpuInfoAvail)
602 {
603 VGSvcVerbose(3, "vgsvcVMStatsReport: CPU info not available!\n");
604 rc = VbglR3StatReport(&req);
605 if (RT_SUCCESS(rc))
606 VGSvcVerbose(3, "vgsvcVMStatsReport: new statistics reported successfully!\n");
607 else
608 VGSvcVerbose(3, "vgsvcVMStatsReport: stats report failed with rc=%Rrc\n", rc);
609 }
610
611 kstat_close(pStatKern);
612 }
613
614#else
615 /** @todo implement for other platforms. */
616
617#endif
618}
619
620
621/**
622 * @interface_method_impl{VBOXSERVICE,pfnWorker}
623 */
624DECLCALLBACK(int) vgsvcVMStatsWorker(bool volatile *pfShutdown)
625{
626 int rc = VINF_SUCCESS;
627
628 /* Start monitoring of the stat event change event. */
629 rc = VbglR3CtlFilterMask(VMMDEV_EVENT_STATISTICS_INTERVAL_CHANGE_REQUEST, 0);
630 if (RT_FAILURE(rc))
631 {
632 VGSvcVerbose(3, "vgsvcVMStatsWorker: VbglR3CtlFilterMask failed with %d\n", rc);
633 return rc;
634 }
635
636 /*
637 * Tell the control thread that it can continue
638 * spawning services.
639 */
640 RTThreadUserSignal(RTThreadSelf());
641
642 /*
643 * Now enter the loop retrieving runtime data continuously.
644 */
645 for (;;)
646 {
647 uint32_t fEvents = 0;
648 RTMSINTERVAL cWaitMillies;
649
650 /* Check if an update interval change is pending. */
651 rc = VbglR3WaitEvent(VMMDEV_EVENT_STATISTICS_INTERVAL_CHANGE_REQUEST, 0 /* no wait */, &fEvents);
652 if ( RT_SUCCESS(rc)
653 && (fEvents & VMMDEV_EVENT_STATISTICS_INTERVAL_CHANGE_REQUEST))
654 VbglR3StatQueryInterval(&g_VMStat.cMsStatInterval);
655
656 if (g_VMStat.cMsStatInterval)
657 {
658 vgsvcVMStatsReport();
659 cWaitMillies = g_VMStat.cMsStatInterval;
660 }
661 else
662 cWaitMillies = 3000;
663
664 /*
665 * Block for a while.
666 *
667 * The event semaphore takes care of ignoring interruptions and it
668 * allows us to implement service wakeup later.
669 */
670 if (*pfShutdown)
671 break;
672 int rc2 = RTSemEventMultiWait(g_VMStatEvent, cWaitMillies);
673 if (*pfShutdown)
674 break;
675 if (rc2 != VERR_TIMEOUT && RT_FAILURE(rc2))
676 {
677 VGSvcError("vgsvcVMStatsWorker: RTSemEventMultiWait failed; rc2=%Rrc\n", rc2);
678 rc = rc2;
679 break;
680 }
681 }
682
683 /* Cancel monitoring of the stat event change event. */
684 rc = VbglR3CtlFilterMask(0, VMMDEV_EVENT_STATISTICS_INTERVAL_CHANGE_REQUEST);
685 if (RT_FAILURE(rc))
686 VGSvcVerbose(3, "vgsvcVMStatsWorker: VbglR3CtlFilterMask failed with %d\n", rc);
687
688 VGSvcVerbose(3, "VBoxStatsThread: finished statistics change request thread\n");
689 return 0;
690}
691
692
693/**
694 * @interface_method_impl{VBOXSERVICE,pfnStop}
695 */
696static DECLCALLBACK(void) vgsvcVMStatsStop(void)
697{
698 RTSemEventMultiSignal(g_VMStatEvent);
699}
700
701
702/**
703 * @interface_method_impl{VBOXSERVICE,pfnTerm}
704 */
705static DECLCALLBACK(void) vgsvcVMStatsTerm(void)
706{
707 if (g_VMStatEvent != NIL_RTSEMEVENTMULTI)
708 {
709 RTSemEventMultiDestroy(g_VMStatEvent);
710 g_VMStatEvent = NIL_RTSEMEVENTMULTI;
711 }
712}
713
714
715/**
716 * The 'vminfo' service description.
717 */
718VBOXSERVICE g_VMStatistics =
719{
720 /* pszName. */
721 "vmstats",
722 /* pszDescription. */
723 "Virtual Machine Statistics",
724 /* pszUsage. */
725 NULL,
726 /* pszOptions. */
727 NULL,
728 /* methods */
729 VGSvcDefaultPreInit,
730 VGSvcDefaultOption,
731 vgsvcVMStatsInit,
732 vgsvcVMStatsWorker,
733 vgsvcVMStatsStop,
734 vgsvcVMStatsTerm
735};
736
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette