VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/GVMMR0.cpp@ 17522

Last change on this file since 17522 was 16575, checked in by vboxsync, 16 years ago

VBOX_WITH_PGMPOOL_PAGING_ONLY: update

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 57.0 KB
Line 
1/* $Id: GVMMR0.cpp 16575 2009-02-09 10:28:03Z vboxsync $ */
2/** @file
3 * GVMM - Global VM Manager.
4 */
5
6/*
7 * Copyright (C) 2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/** @page pg_gvmm GVMM - The Global VM Manager
24 *
25 * The Global VM Manager lives in ring-0. It's main function at the moment
26 * is to manage a list of all running VMs, keep a ring-0 only structure (GVM)
27 * for each of them, and assign them unique identifiers (so GMM can track
28 * page owners). The idea for the future is to add an idle priority kernel
29 * thread that can take care of tasks like page sharing.
30 *
31 * The GVMM will create a ring-0 object for each VM when it's registered,
32 * this is both for session cleanup purposes and for having a point where
33 * it's possible to implement usage polices later (in SUPR0ObjRegister).
34 */
35
36
37/*******************************************************************************
38* Header Files *
39*******************************************************************************/
40#define LOG_GROUP LOG_GROUP_GVMM
41#include <VBox/gvmm.h>
42#include "GVMMR0Internal.h"
43#include <VBox/gvm.h>
44#include <VBox/vm.h>
45#include <VBox/vmm.h>
46#include <VBox/err.h>
47#include <iprt/alloc.h>
48#include <iprt/semaphore.h>
49#include <iprt/time.h>
50#include <VBox/log.h>
51#include <iprt/thread.h>
52#include <iprt/param.h>
53#include <iprt/string.h>
54#include <iprt/assert.h>
55#include <iprt/mem.h>
56#include <iprt/memobj.h>
57
58
59/*******************************************************************************
60* Structures and Typedefs *
61*******************************************************************************/
62
63/**
64 * Global VM handle.
65 */
66typedef struct GVMHANDLE
67{
68 /** The index of the next handle in the list (free or used). (0 is nil.) */
69 uint16_t volatile iNext;
70 /** Our own index / handle value. */
71 uint16_t iSelf;
72 /** The pointer to the ring-0 only (aka global) VM structure. */
73 PGVM pGVM;
74 /** The ring-0 mapping of the shared VM instance data. */
75 PVM pVM;
76 /** The virtual machine object. */
77 void *pvObj;
78 /** The session this VM is associated with. */
79 PSUPDRVSESSION pSession;
80 /** The ring-0 handle of the EMT thread.
81 * This is used for assertions and similar cases where we need to find the VM handle. */
82 RTNATIVETHREAD hEMT;
83} GVMHANDLE;
84/** Pointer to a global VM handle. */
85typedef GVMHANDLE *PGVMHANDLE;
86
87/** Number of GVM handles (including the NIL handle). */
88#if HC_ARCH_BITS == 64
89# define GVMM_MAX_HANDLES 1024
90#else
91# define GVMM_MAX_HANDLES 128
92#endif
93
94/**
95 * The GVMM instance data.
96 */
97typedef struct GVMM
98{
99 /** Eyecatcher / magic. */
100 uint32_t u32Magic;
101 /** The index of the head of the free handle chain. (0 is nil.) */
102 uint16_t volatile iFreeHead;
103 /** The index of the head of the active handle chain. (0 is nil.) */
104 uint16_t volatile iUsedHead;
105 /** The number of VMs. */
106 uint16_t volatile cVMs;
107// /** The number of halted EMT threads. */
108// uint16_t volatile cHaltedEMTs;
109 /** The lock used to serialize VM creation, destruction and associated events that
110 * isn't performance critical. Owners may acquire the list lock. */
111 RTSEMFASTMUTEX CreateDestroyLock;
112 /** The lock used to serialize used list updates and accesses.
113 * This indirectly includes scheduling since the scheduler will have to walk the
114 * used list to examin running VMs. Owners may not acquire any other locks. */
115 RTSEMFASTMUTEX UsedLock;
116 /** The handle array.
117 * The size of this array defines the maximum number of currently running VMs.
118 * The first entry is unused as it represents the NIL handle. */
119 GVMHANDLE aHandles[GVMM_MAX_HANDLES];
120
121 /** @gcfgm{/GVMM/cVMsMeansCompany, 32-bit, 0, UINT32_MAX, 1}
122 * The number of VMs that means we no longer consider ourselves alone on a CPU/Core.
123 */
124 uint32_t cVMsMeansCompany;
125 /** @gcfgm{/GVMM/MinSleepAlone,32-bit, 0, 100000000, 750000, ns}
126 * The minimum sleep time for when we're alone, in nano seconds.
127 */
128 uint32_t nsMinSleepAlone;
129 /** @gcfgm{/GVMM/MinSleepCompany,32-bit,0, 100000000, 15000, ns}
130 * The minimum sleep time for when we've got company, in nano seconds.
131 */
132 uint32_t nsMinSleepCompany;
133 /** @gcfgm{/GVMM/EarlyWakeUp1, 32-bit, 0, 100000000, 25000, ns}
134 * The limit for the first round of early wakeups, given in nano seconds.
135 */
136 uint32_t nsEarlyWakeUp1;
137 /** @gcfgm{/GVMM/EarlyWakeUp2, 32-bit, 0, 100000000, 50000, ns}
138 * The limit for the second round of early wakeups, given in nano seconds.
139 */
140 uint32_t nsEarlyWakeUp2;
141} GVMM;
142/** Pointer to the GVMM instance data. */
143typedef GVMM *PGVMM;
144
145/** The GVMM::u32Magic value (Charlie Haden). */
146#define GVMM_MAGIC 0x19370806
147
148
149
150/*******************************************************************************
151* Global Variables *
152*******************************************************************************/
153/** Pointer to the GVMM instance data.
154 * (Just my general dislike for global variables.) */
155static PGVMM g_pGVMM = NULL;
156
157/** Macro for obtaining and validating the g_pGVMM pointer.
158 * On failure it will return from the invoking function with the specified return value.
159 *
160 * @param pGVMM The name of the pGVMM variable.
161 * @param rc The return value on failure. Use VERR_INTERNAL_ERROR for
162 * VBox status codes.
163 */
164#define GVMM_GET_VALID_INSTANCE(pGVMM, rc) \
165 do { \
166 (pGVMM) = g_pGVMM;\
167 AssertPtrReturn((pGVMM), (rc)); \
168 AssertMsgReturn((pGVMM)->u32Magic == GVMM_MAGIC, ("%p - %#x\n", (pGVMM), (pGVMM)->u32Magic), (rc)); \
169 } while (0)
170
171/** Macro for obtaining and validating the g_pGVMM pointer, void function variant.
172 * On failure it will return from the invoking function.
173 *
174 * @param pGVMM The name of the pGVMM variable.
175 */
176#define GVMM_GET_VALID_INSTANCE_VOID(pGVMM) \
177 do { \
178 (pGVMM) = g_pGVMM;\
179 AssertPtrReturnVoid((pGVMM)); \
180 AssertMsgReturnVoid((pGVMM)->u32Magic == GVMM_MAGIC, ("%p - %#x\n", (pGVMM), (pGVMM)->u32Magic)); \
181 } while (0)
182
183
184/*******************************************************************************
185* Internal Functions *
186*******************************************************************************/
187static void gvmmR0InitPerVMData(PGVM pGVM);
188static DECLCALLBACK(void) gvmmR0HandleObjDestructor(void *pvObj, void *pvGVMM, void *pvHandle);
189static int gvmmR0ByVM(PVM pVM, PGVM *ppGVM, PGVMM *ppGVMM, bool fTakeUsedLock);
190static int gvmmR0ByVMAndEMT(PVM pVM, PGVM *ppGVM, PGVMM *ppGVMM);
191
192
193/**
194 * Initializes the GVMM.
195 *
196 * This is called while owninng the loader sempahore (see supdrvIOCtl_LdrLoad()).
197 *
198 * @returns VBox status code.
199 */
200GVMMR0DECL(int) GVMMR0Init(void)
201{
202 LogFlow(("GVMMR0Init:\n"));
203
204 /*
205 * Allocate and initialize the instance data.
206 */
207 PGVMM pGVMM = (PGVMM)RTMemAllocZ(sizeof(*pGVMM));
208 if (!pGVMM)
209 return VERR_NO_MEMORY;
210 int rc = RTSemFastMutexCreate(&pGVMM->CreateDestroyLock);
211 if (RT_SUCCESS(rc))
212 {
213 rc = RTSemFastMutexCreate(&pGVMM->UsedLock);
214 if (RT_SUCCESS(rc))
215 {
216 pGVMM->u32Magic = GVMM_MAGIC;
217 pGVMM->iUsedHead = 0;
218 pGVMM->iFreeHead = 1;
219
220 /* the nil handle */
221 pGVMM->aHandles[0].iSelf = 0;
222 pGVMM->aHandles[0].iNext = 0;
223
224 /* the tail */
225 unsigned i = RT_ELEMENTS(pGVMM->aHandles) - 1;
226 pGVMM->aHandles[i].iSelf = i;
227 pGVMM->aHandles[i].iNext = 0; /* nil */
228
229 /* the rest */
230 while (i-- > 1)
231 {
232 pGVMM->aHandles[i].iSelf = i;
233 pGVMM->aHandles[i].iNext = i + 1;
234 }
235
236 /* The default configuration values. */
237 pGVMM->cVMsMeansCompany = 1; /** @todo should be adjusted to relative to the cpu count or something... */
238 pGVMM->nsMinSleepAlone = 750000 /* ns (0.750 ms) */; /** @todo this should be adjusted to be 75% (or something) of the scheduler granularity... */
239 pGVMM->nsMinSleepCompany = 15000 /* ns (0.015 ms) */;
240 pGVMM->nsEarlyWakeUp1 = 25000 /* ns (0.025 ms) */;
241 pGVMM->nsEarlyWakeUp2 = 50000 /* ns (0.050 ms) */;
242
243 g_pGVMM = pGVMM;
244 LogFlow(("GVMMR0Init: pGVMM=%p\n", pGVMM));
245 return VINF_SUCCESS;
246 }
247
248 RTSemFastMutexDestroy(pGVMM->CreateDestroyLock);
249 }
250
251 RTMemFree(pGVMM);
252 return rc;
253}
254
255
256/**
257 * Terminates the GVM.
258 *
259 * This is called while owning the loader semaphore (see supdrvLdrFree()).
260 * And unless something is wrong, there should be absolutely no VMs
261 * registered at this point.
262 */
263GVMMR0DECL(void) GVMMR0Term(void)
264{
265 LogFlow(("GVMMR0Term:\n"));
266
267 PGVMM pGVMM = g_pGVMM;
268 g_pGVMM = NULL;
269 if (RT_UNLIKELY(!VALID_PTR(pGVMM)))
270 {
271 SUPR0Printf("GVMMR0Term: pGVMM=%p\n", pGVMM);
272 return;
273 }
274
275 pGVMM->u32Magic++;
276
277 RTSemFastMutexDestroy(pGVMM->UsedLock);
278 pGVMM->UsedLock = NIL_RTSEMFASTMUTEX;
279 RTSemFastMutexDestroy(pGVMM->CreateDestroyLock);
280 pGVMM->CreateDestroyLock = NIL_RTSEMFASTMUTEX;
281
282 pGVMM->iFreeHead = 0;
283 if (pGVMM->iUsedHead)
284 {
285 SUPR0Printf("GVMMR0Term: iUsedHead=%#x! (cVMs=%#x)\n", pGVMM->iUsedHead, pGVMM->cVMs);
286 pGVMM->iUsedHead = 0;
287 }
288
289 RTMemFree(pGVMM);
290}
291
292
293/**
294 * A quick hack for setting global config values.
295 *
296 * @returns VBox status code.
297 *
298 * @param pSession The session handle. Used for authentication.
299 * @param pszName The variable name.
300 * @param u64Value The new value.
301 */
302GVMMR0DECL(int) GVMMR0SetConfig(PSUPDRVSESSION pSession, const char *pszName, uint64_t u64Value)
303{
304 /*
305 * Validate input.
306 */
307 PGVMM pGVMM;
308 GVMM_GET_VALID_INSTANCE(pGVMM, VERR_INTERNAL_ERROR);
309 AssertPtrReturn(pSession, VERR_INVALID_HANDLE);
310 AssertPtrReturn(pszName, VERR_INVALID_POINTER);
311
312 /*
313 * String switch time!
314 */
315 if (strncmp(pszName, "/GVMM/", sizeof("/GVMM/") - 1))
316 return VERR_CFGM_VALUE_NOT_FOUND; /* borrow status codes from CFGM... */
317 int rc = VINF_SUCCESS;
318 pszName += sizeof("/GVMM/") - 1;
319 if (!strcmp(pszName, "cVMsMeansCompany"))
320 {
321 if (u64Value <= UINT32_MAX)
322 pGVMM->cVMsMeansCompany = u64Value;
323 else
324 rc = VERR_OUT_OF_RANGE;
325 }
326 else if (!strcmp(pszName, "MinSleepAlone"))
327 {
328 if (u64Value <= 100000000)
329 pGVMM->nsMinSleepAlone = u64Value;
330 else
331 rc = VERR_OUT_OF_RANGE;
332 }
333 else if (!strcmp(pszName, "MinSleepCompany"))
334 {
335 if (u64Value <= 100000000)
336 pGVMM->nsMinSleepCompany = u64Value;
337 else
338 rc = VERR_OUT_OF_RANGE;
339 }
340 else if (!strcmp(pszName, "EarlyWakeUp1"))
341 {
342 if (u64Value <= 100000000)
343 pGVMM->nsEarlyWakeUp1 = u64Value;
344 else
345 rc = VERR_OUT_OF_RANGE;
346 }
347 else if (!strcmp(pszName, "EarlyWakeUp2"))
348 {
349 if (u64Value <= 100000000)
350 pGVMM->nsEarlyWakeUp2 = u64Value;
351 else
352 rc = VERR_OUT_OF_RANGE;
353 }
354 else
355 rc = VERR_CFGM_VALUE_NOT_FOUND;
356 return rc;
357}
358
359
360/**
361 * A quick hack for getting global config values.
362 *
363 * @returns VBox status code.
364 *
365 * @param pSession The session handle. Used for authentication.
366 * @param pszName The variable name.
367 * @param u64Value The new value.
368 */
369GVMMR0DECL(int) GVMMR0QueryConfig(PSUPDRVSESSION pSession, const char *pszName, uint64_t *pu64Value)
370{
371 /*
372 * Validate input.
373 */
374 PGVMM pGVMM;
375 GVMM_GET_VALID_INSTANCE(pGVMM, VERR_INTERNAL_ERROR);
376 AssertPtrReturn(pSession, VERR_INVALID_HANDLE);
377 AssertPtrReturn(pszName, VERR_INVALID_POINTER);
378 AssertPtrReturn(pu64Value, VERR_INVALID_POINTER);
379
380 /*
381 * String switch time!
382 */
383 if (strncmp(pszName, "/GVMM/", sizeof("/GVMM/") - 1))
384 return VERR_CFGM_VALUE_NOT_FOUND; /* borrow status codes from CFGM... */
385 int rc = VINF_SUCCESS;
386 pszName += sizeof("/GVMM/") - 1;
387 if (!strcmp(pszName, "cVMsMeansCompany"))
388 *pu64Value = pGVMM->cVMsMeansCompany;
389 else if (!strcmp(pszName, "MinSleepAlone"))
390 *pu64Value = pGVMM->nsMinSleepAlone;
391 else if (!strcmp(pszName, "MinSleepCompany"))
392 *pu64Value = pGVMM->nsMinSleepCompany;
393 else if (!strcmp(pszName, "EarlyWakeUp1"))
394 *pu64Value = pGVMM->nsEarlyWakeUp1;
395 else if (!strcmp(pszName, "EarlyWakeUp2"))
396 *pu64Value = pGVMM->nsEarlyWakeUp2;
397 else
398 rc = VERR_CFGM_VALUE_NOT_FOUND;
399 return rc;
400}
401
402
403/**
404 * Try acquire the 'used' lock.
405 *
406 * @returns IPRT status code, see RTSemFastMutexRequest.
407 * @param pGVMM The GVMM instance data.
408 */
409DECLINLINE(int) gvmmR0UsedLock(PGVMM pGVMM)
410{
411 LogFlow(("++gvmmR0UsedLock(%p)\n", pGVMM));
412 int rc = RTSemFastMutexRequest(pGVMM->UsedLock);
413 LogFlow(("gvmmR0UsedLock(%p)->%Rrc\n", pGVMM, rc));
414 return rc;
415}
416
417
418/**
419 * Release the 'used' lock.
420 *
421 * @returns IPRT status code, see RTSemFastMutexRelease.
422 * @param pGVMM The GVMM instance data.
423 */
424DECLINLINE(int) gvmmR0UsedUnlock(PGVMM pGVMM)
425{
426 LogFlow(("--gvmmR0UsedUnlock(%p)\n", pGVMM));
427 int rc = RTSemFastMutexRelease(pGVMM->UsedLock);
428 AssertRC(rc);
429 return rc;
430}
431
432
433/**
434 * Try acquire the 'create & destroy' lock.
435 *
436 * @returns IPRT status code, see RTSemFastMutexRequest.
437 * @param pGVMM The GVMM instance data.
438 */
439DECLINLINE(int) gvmmR0CreateDestroyLock(PGVMM pGVMM)
440{
441 LogFlow(("++gvmmR0CreateDestroyLock(%p)\n", pGVMM));
442 int rc = RTSemFastMutexRequest(pGVMM->CreateDestroyLock);
443 LogFlow(("gvmmR0CreateDestroyLock(%p)->%Rrc\n", pGVMM, rc));
444 return rc;
445}
446
447
448/**
449 * Release the 'create & destroy' lock.
450 *
451 * @returns IPRT status code, see RTSemFastMutexRequest.
452 * @param pGVMM The GVMM instance data.
453 */
454DECLINLINE(int) gvmmR0CreateDestroyUnlock(PGVMM pGVMM)
455{
456 LogFlow(("--gvmmR0CreateDestroyUnlock(%p)\n", pGVMM));
457 int rc = RTSemFastMutexRelease(pGVMM->CreateDestroyLock);
458 AssertRC(rc);
459 return rc;
460}
461
462
463/**
464 * Request wrapper for the GVMMR0CreateVM API.
465 *
466 * @returns VBox status code.
467 * @param pReq The request buffer.
468 */
469GVMMR0DECL(int) GVMMR0CreateVMReq(PGVMMCREATEVMREQ pReq)
470{
471 /*
472 * Validate the request.
473 */
474 if (!VALID_PTR(pReq))
475 return VERR_INVALID_POINTER;
476 if (pReq->Hdr.cbReq != sizeof(*pReq))
477 return VERR_INVALID_PARAMETER;
478 if (!VALID_PTR(pReq->pSession))
479 return VERR_INVALID_POINTER;
480
481 /*
482 * Execute it.
483 */
484 PVM pVM;
485 pReq->pVMR0 = NULL;
486 pReq->pVMR3 = NIL_RTR3PTR;
487 int rc = GVMMR0CreateVM(pReq->pSession, pReq->cCPUs, &pVM);
488 if (RT_SUCCESS(rc))
489 {
490 pReq->pVMR0 = pVM;
491 pReq->pVMR3 = pVM->pVMR3;
492 }
493 return rc;
494}
495
496
497/**
498 * Allocates the VM structure and registers it with GVM.
499 *
500 * The caller will become the VM owner and there by the EMT.
501 *
502 * @returns VBox status code.
503 * @param pSession The support driver session.
504 * @param cCPUs Number of virtual CPUs for the new VM.
505 * @param ppVM Where to store the pointer to the VM structure.
506 *
507 * @thread EMT.
508 */
509GVMMR0DECL(int) GVMMR0CreateVM(PSUPDRVSESSION pSession, uint32_t cCPUs, PVM *ppVM)
510{
511 LogFlow(("GVMMR0CreateVM: pSession=%p\n", pSession));
512 PGVMM pGVMM;
513 GVMM_GET_VALID_INSTANCE(pGVMM, VERR_INTERNAL_ERROR);
514
515 AssertPtrReturn(ppVM, VERR_INVALID_POINTER);
516 *ppVM = NULL;
517
518 if ( cCPUs == 0
519 || cCPUs > VMCPU_MAX_CPU_COUNT)
520 return VERR_INVALID_PARAMETER;
521
522 RTNATIVETHREAD hEMT = RTThreadNativeSelf();
523 AssertReturn(hEMT != NIL_RTNATIVETHREAD, VERR_INTERNAL_ERROR);
524
525 /*
526 * The whole allocation process is protected by the lock.
527 */
528 int rc = gvmmR0CreateDestroyLock(pGVMM);
529 AssertRCReturn(rc, rc);
530
531 /*
532 * Allocate a handle first so we don't waste resources unnecessarily.
533 */
534 uint16_t iHandle = pGVMM->iFreeHead;
535 if (iHandle)
536 {
537 PGVMHANDLE pHandle = &pGVMM->aHandles[iHandle];
538
539 /* consistency checks, a bit paranoid as always. */
540 if ( !pHandle->pVM
541 && !pHandle->pGVM
542 && !pHandle->pvObj
543 && pHandle->iSelf == iHandle)
544 {
545 pHandle->pvObj = SUPR0ObjRegister(pSession, SUPDRVOBJTYPE_VM, gvmmR0HandleObjDestructor, pGVMM, pHandle);
546 if (pHandle->pvObj)
547 {
548 /*
549 * Move the handle from the free to used list and perform permission checks.
550 */
551 rc = gvmmR0UsedLock(pGVMM);
552 AssertRC(rc);
553
554 pGVMM->iFreeHead = pHandle->iNext;
555 pHandle->iNext = pGVMM->iUsedHead;
556 pGVMM->iUsedHead = iHandle;
557 pGVMM->cVMs++;
558
559 pHandle->pVM = NULL;
560 pHandle->pGVM = NULL;
561 pHandle->pSession = pSession;
562 pHandle->hEMT = NIL_RTNATIVETHREAD;
563
564 gvmmR0UsedUnlock(pGVMM);
565
566 rc = SUPR0ObjVerifyAccess(pHandle->pvObj, pSession, NULL);
567 if (RT_SUCCESS(rc))
568 {
569 /*
570 * Allocate the global VM structure (GVM) and initialize it.
571 */
572 PGVM pGVM = (PGVM)RTMemAllocZ(sizeof(*pGVM));
573 if (pGVM)
574 {
575 pGVM->u32Magic = GVM_MAGIC;
576 pGVM->hSelf = iHandle;
577 pGVM->hEMT = NIL_RTNATIVETHREAD;
578 pGVM->pVM = NULL;
579
580 gvmmR0InitPerVMData(pGVM);
581 /* GMMR0InitPerVMData(pGVM); - later */
582
583 /*
584 * Allocate the shared VM structure and associated page array.
585 */
586 const size_t cbVM = RT_UOFFSETOF(VM, aCpus[cCPUs]);
587 const size_t cPages = RT_ALIGN(cbVM, PAGE_SIZE) >> PAGE_SHIFT;
588 rc = RTR0MemObjAllocLow(&pGVM->gvmm.s.VMMemObj, cPages << PAGE_SHIFT, false /* fExecutable */);
589 if (RT_SUCCESS(rc))
590 {
591 PVM pVM = (PVM)RTR0MemObjAddress(pGVM->gvmm.s.VMMemObj); AssertPtr(pVM);
592 memset(pVM, 0, cPages << PAGE_SHIFT);
593 pVM->enmVMState = VMSTATE_CREATING;
594 pVM->pVMR0 = pVM;
595 pVM->pSession = pSession;
596 pVM->hSelf = iHandle;
597 pVM->cbSelf = cbVM;
598 pVM->cCPUs = cCPUs;
599 pVM->offVMCPU = RT_UOFFSETOF(VM, aCpus);
600
601 rc = RTR0MemObjAllocPage(&pGVM->gvmm.s.VMPagesMemObj, cPages * sizeof(SUPPAGE), false /* fExecutable */);
602 if (RT_SUCCESS(rc))
603 {
604 PSUPPAGE paPages = (PSUPPAGE)RTR0MemObjAddress(pGVM->gvmm.s.VMPagesMemObj); AssertPtr(paPages);
605 for (size_t iPage = 0; iPage < cPages; iPage++)
606 {
607 paPages[iPage].uReserved = 0;
608 paPages[iPage].Phys = RTR0MemObjGetPagePhysAddr(pGVM->gvmm.s.VMMemObj, iPage);
609 Assert(paPages[iPage].Phys != NIL_RTHCPHYS);
610 }
611
612 /*
613 * Map them into ring-3.
614 */
615 rc = RTR0MemObjMapUser(&pGVM->gvmm.s.VMMapObj, pGVM->gvmm.s.VMMemObj, (RTR3PTR)-1, 0,
616 RTMEM_PROT_READ | RTMEM_PROT_WRITE, NIL_RTR0PROCESS);
617 if (RT_SUCCESS(rc))
618 {
619 pVM->pVMR3 = RTR0MemObjAddressR3(pGVM->gvmm.s.VMMapObj);
620 AssertPtr((void *)pVM->pVMR3);
621
622 /* Initialize all the VM pointers. */
623 for (uint32_t i = 0; i < cCPUs; i++)
624 {
625 pVM->aCpus[i].pVMR0 = pVM;
626 pVM->aCpus[i].pVMR3 = pVM->pVMR3;
627 }
628
629 rc = RTR0MemObjMapUser(&pGVM->gvmm.s.VMPagesMapObj, pGVM->gvmm.s.VMPagesMemObj, (RTR3PTR)-1, 0,
630 RTMEM_PROT_READ | RTMEM_PROT_WRITE, NIL_RTR0PROCESS);
631 if (RT_SUCCESS(rc))
632 {
633 pVM->paVMPagesR3 = RTR0MemObjAddressR3(pGVM->gvmm.s.VMPagesMapObj);
634 AssertPtr((void *)pVM->paVMPagesR3);
635
636 /* complete the handle - take the UsedLock sem just to be careful. */
637 rc = gvmmR0UsedLock(pGVMM);
638 AssertRC(rc);
639
640 pHandle->pVM = pVM;
641 pHandle->pGVM = pGVM;
642 pHandle->hEMT = hEMT;
643 pGVM->pVM = pVM;
644 pGVM->hEMT = hEMT;
645
646 gvmmR0UsedUnlock(pGVMM);
647 gvmmR0CreateDestroyUnlock(pGVMM);
648
649 *ppVM = pVM;
650 Log(("GVMMR0CreateVM: pVM=%p pVMR3=%p pGVM=%p hGVM=%d\n", pVM, pVM->pVMR3, pGVM, iHandle));
651 return VINF_SUCCESS;
652 }
653
654 RTR0MemObjFree(pGVM->gvmm.s.VMMapObj, false /* fFreeMappings */);
655 pGVM->gvmm.s.VMMapObj = NIL_RTR0MEMOBJ;
656 }
657 RTR0MemObjFree(pGVM->gvmm.s.VMPagesMemObj, false /* fFreeMappings */);
658 pGVM->gvmm.s.VMPagesMemObj = NIL_RTR0MEMOBJ;
659 }
660 RTR0MemObjFree(pGVM->gvmm.s.VMMemObj, false /* fFreeMappings */);
661 pGVM->gvmm.s.VMMemObj = NIL_RTR0MEMOBJ;
662 }
663 }
664 }
665 /* else: The user wasn't permitted to create this VM. */
666
667 /*
668 * The handle will be freed by gvmmR0HandleObjDestructor as we release the
669 * object reference here. A little extra mess because of non-recursive lock.
670 */
671 void *pvObj = pHandle->pvObj;
672 pHandle->pvObj = NULL;
673 gvmmR0CreateDestroyUnlock(pGVMM);
674
675 SUPR0ObjRelease(pvObj, pSession);
676
677 SUPR0Printf("GVMMR0CreateVM: failed, rc=%d\n", rc);
678 return rc;
679 }
680
681 rc = VERR_NO_MEMORY;
682 }
683 else
684 rc = VERR_INTERNAL_ERROR;
685 }
686 else
687 rc = VERR_GVM_TOO_MANY_VMS;
688
689 gvmmR0CreateDestroyUnlock(pGVMM);
690 return rc;
691}
692
693
694/**
695 * Initializes the per VM data belonging to GVMM.
696 *
697 * @param pGVM Pointer to the global VM structure.
698 */
699static void gvmmR0InitPerVMData(PGVM pGVM)
700{
701 AssertCompile(RT_SIZEOFMEMB(GVM,gvmm.s) <= RT_SIZEOFMEMB(GVM,gvmm.padding));
702 Assert(RT_SIZEOFMEMB(GVM,gvmm.s) <= RT_SIZEOFMEMB(GVM,gvmm.padding));
703 pGVM->gvmm.s.VMMemObj = NIL_RTR0MEMOBJ;
704 pGVM->gvmm.s.VMMapObj = NIL_RTR0MEMOBJ;
705 pGVM->gvmm.s.VMPagesMemObj = NIL_RTR0MEMOBJ;
706 pGVM->gvmm.s.VMPagesMapObj = NIL_RTR0MEMOBJ;
707 pGVM->gvmm.s.HaltEventMulti = NIL_RTSEMEVENTMULTI;
708 pGVM->gvmm.s.fDoneVMMR0Init = false;
709 pGVM->gvmm.s.fDoneVMMR0Term = false;
710}
711
712
713/**
714 * Does the VM initialization.
715 *
716 * @returns VBox status code.
717 * @param pVM Pointer to the shared VM structure.
718 */
719GVMMR0DECL(int) GVMMR0InitVM(PVM pVM)
720{
721 LogFlow(("GVMMR0InitVM: pVM=%p\n", pVM));
722
723 /*
724 * Validate the VM structure, state and handle.
725 */
726 PGVM pGVM;
727 PGVMM pGVMM;
728 int rc = gvmmR0ByVMAndEMT(pVM, &pGVM, &pGVMM);
729 if (RT_SUCCESS(rc))
730 {
731 if ( !pGVM->gvmm.s.fDoneVMMR0Init
732 && pGVM->gvmm.s.HaltEventMulti == NIL_RTSEMEVENTMULTI)
733 {
734 rc = RTSemEventMultiCreate(&pGVM->gvmm.s.HaltEventMulti);
735 if (RT_FAILURE(rc))
736 pGVM->gvmm.s.HaltEventMulti = NIL_RTSEMEVENTMULTI;
737 }
738 else
739 rc = VERR_WRONG_ORDER;
740 }
741
742 LogFlow(("GVMMR0InitVM: returns %Rrc\n", rc));
743 return rc;
744}
745
746
747/**
748 * Indicates that we're done with the ring-0 initialization
749 * of the VM.
750 *
751 * @param pVM Pointer to the shared VM structure.
752 */
753GVMMR0DECL(void) GVMMR0DoneInitVM(PVM pVM)
754{
755 /* Validate the VM structure, state and handle. */
756 PGVM pGVM;
757 PGVMM pGVMM;
758 int rc = gvmmR0ByVMAndEMT(pVM, &pGVM, &pGVMM);
759 AssertRCReturnVoid(rc);
760
761 /* Set the indicator. */
762 pGVM->gvmm.s.fDoneVMMR0Init = true;
763}
764
765
766/**
767 * Indicates that we're doing the ring-0 termination of the VM.
768 *
769 * @returns true if termination hasn't been done already, false if it has.
770 * @param pVM Pointer to the shared VM structure.
771 * @param pGVM Pointer to the global VM structure. Optional.
772 */
773GVMMR0DECL(bool) GVMMR0DoingTermVM(PVM pVM, PGVM pGVM)
774{
775 /* Validate the VM structure, state and handle. */
776 AssertPtrNullReturn(pGVM, false);
777 AssertReturn(!pGVM || pGVM->u32Magic == GVM_MAGIC, false);
778 if (!pGVM)
779 {
780 PGVMM pGVMM;
781 int rc = gvmmR0ByVMAndEMT(pVM, &pGVM, &pGVMM);
782 AssertRCReturn(rc, false);
783 }
784
785 /* Set the indicator. */
786 if (pGVM->gvmm.s.fDoneVMMR0Term)
787 return false;
788 pGVM->gvmm.s.fDoneVMMR0Term = true;
789 return true;
790}
791
792
793/**
794 * Destroys the VM, freeing all associated resources (the ring-0 ones anyway).
795 *
796 * This is call from the vmR3DestroyFinalBit and from a error path in VMR3Create,
797 * and the caller is not the EMT thread, unfortunately. For security reasons, it
798 * would've been nice if the caller was actually the EMT thread or that we somehow
799 * could've associated the calling thread with the VM up front.
800 *
801 * @returns VBox status code.
802 * @param pVM Where to store the pointer to the VM structure.
803 *
804 * @thread EMT if it's associated with the VM, otherwise any thread.
805 */
806GVMMR0DECL(int) GVMMR0DestroyVM(PVM pVM)
807{
808 LogFlow(("GVMMR0DestroyVM: pVM=%p\n", pVM));
809 PGVMM pGVMM;
810 GVMM_GET_VALID_INSTANCE(pGVMM, VERR_INTERNAL_ERROR);
811
812
813 /*
814 * Validate the VM structure, state and caller.
815 */
816 AssertPtrReturn(pVM, VERR_INVALID_POINTER);
817 AssertReturn(!((uintptr_t)pVM & PAGE_OFFSET_MASK), VERR_INVALID_POINTER);
818 AssertMsgReturn(pVM->enmVMState >= VMSTATE_CREATING && pVM->enmVMState <= VMSTATE_TERMINATED, ("%d\n", pVM->enmVMState), VERR_WRONG_ORDER);
819
820 uint32_t hGVM = pVM->hSelf;
821 AssertReturn(hGVM != NIL_GVM_HANDLE, VERR_INVALID_HANDLE);
822 AssertReturn(hGVM < RT_ELEMENTS(pGVMM->aHandles), VERR_INVALID_HANDLE);
823
824 PGVMHANDLE pHandle = &pGVMM->aHandles[hGVM];
825 AssertReturn(pHandle->pVM == pVM, VERR_NOT_OWNER);
826
827 RTNATIVETHREAD hSelf = RTThreadNativeSelf();
828 AssertReturn(pHandle->hEMT == hSelf || pHandle->hEMT == NIL_RTNATIVETHREAD, VERR_NOT_OWNER);
829
830 /*
831 * Lookup the handle and destroy the object.
832 * Since the lock isn't recursive and we'll have to leave it before dereferencing the
833 * object, we take some precautions against racing callers just in case...
834 */
835 int rc = gvmmR0CreateDestroyLock(pGVMM);
836 AssertRC(rc);
837
838 /* be careful here because we might theoretically be racing someone else cleaning up. */
839 if ( pHandle->pVM == pVM
840 && ( pHandle->hEMT == hSelf
841 || pHandle->hEMT == NIL_RTNATIVETHREAD)
842 && VALID_PTR(pHandle->pvObj)
843 && VALID_PTR(pHandle->pSession)
844 && VALID_PTR(pHandle->pGVM)
845 && pHandle->pGVM->u32Magic == GVM_MAGIC)
846 {
847 void *pvObj = pHandle->pvObj;
848 pHandle->pvObj = NULL;
849 gvmmR0CreateDestroyUnlock(pGVMM);
850
851 SUPR0ObjRelease(pvObj, pHandle->pSession);
852 }
853 else
854 {
855 SUPR0Printf("GVMMR0DestroyVM: pHandle=%p:{.pVM=%p, hEMT=%p, .pvObj=%p} pVM=%p hSelf=%p\n",
856 pHandle, pHandle->pVM, pHandle->hEMT, pHandle->pvObj, pVM, hSelf);
857 gvmmR0CreateDestroyUnlock(pGVMM);
858 rc = VERR_INTERNAL_ERROR;
859 }
860
861 return rc;
862}
863
864
865/**
866 * Performs VM cleanup task as part of object destruction.
867 *
868 * @param pGVM The GVM pointer.
869 */
870static void gmmR0CleanupVM(PGVM pGVM)
871{
872 if ( pGVM->gvmm.s.fDoneVMMR0Init
873 && !pGVM->gvmm.s.fDoneVMMR0Term)
874 {
875 if ( pGVM->gvmm.s.VMMemObj != NIL_RTR0MEMOBJ
876 && RTR0MemObjAddress(pGVM->gvmm.s.VMMemObj) == pGVM->pVM)
877 {
878 LogFlow(("gmmR0CleanupVM: Calling VMMR0TermVM\n"));
879 VMMR0TermVM(pGVM->pVM, pGVM);
880 }
881 else
882 AssertMsgFailed(("gmmR0CleanupVM: VMMemObj=%p pVM=%p\n", pGVM->gvmm.s.VMMemObj, pGVM->pVM));
883 }
884}
885
886
887/**
888 * Handle destructor.
889 *
890 * @param pvGVMM The GVM instance pointer.
891 * @param pvHandle The handle pointer.
892 */
893static DECLCALLBACK(void) gvmmR0HandleObjDestructor(void *pvObj, void *pvGVMM, void *pvHandle)
894{
895 LogFlow(("gvmmR0HandleObjDestructor: %p %p %p\n", pvObj, pvGVMM, pvHandle));
896
897 /*
898 * Some quick, paranoid, input validation.
899 */
900 PGVMHANDLE pHandle = (PGVMHANDLE)pvHandle;
901 AssertPtr(pHandle);
902 PGVMM pGVMM = (PGVMM)pvGVMM;
903 Assert(pGVMM == g_pGVMM);
904 const uint16_t iHandle = pHandle - &pGVMM->aHandles[0];
905 if ( !iHandle
906 || iHandle >= RT_ELEMENTS(pGVMM->aHandles)
907 || iHandle != pHandle->iSelf)
908 {
909 SUPR0Printf("GVM: handle %d is out of range or corrupt (iSelf=%d)!\n", iHandle, pHandle->iSelf);
910 return;
911 }
912
913 int rc = gvmmR0CreateDestroyLock(pGVMM);
914 AssertRC(rc);
915 rc = gvmmR0UsedLock(pGVMM);
916 AssertRC(rc);
917
918 /*
919 * This is a tad slow but a doubly linked list is too much hazzle.
920 */
921 if (RT_UNLIKELY(pHandle->iNext >= RT_ELEMENTS(pGVMM->aHandles)))
922 {
923 SUPR0Printf("GVM: used list index %d is out of range!\n", pHandle->iNext);
924 gvmmR0UsedUnlock(pGVMM);
925 gvmmR0CreateDestroyUnlock(pGVMM);
926 return;
927 }
928
929 if (pGVMM->iUsedHead == iHandle)
930 pGVMM->iUsedHead = pHandle->iNext;
931 else
932 {
933 uint16_t iPrev = pGVMM->iUsedHead;
934 int c = RT_ELEMENTS(pGVMM->aHandles) + 2;
935 while (iPrev)
936 {
937 if (RT_UNLIKELY(iPrev >= RT_ELEMENTS(pGVMM->aHandles)))
938 {
939 SUPR0Printf("GVM: used list index %d is out of range!\n");
940 gvmmR0UsedUnlock(pGVMM);
941 gvmmR0CreateDestroyUnlock(pGVMM);
942 return;
943 }
944 if (RT_UNLIKELY(c-- <= 0))
945 {
946 iPrev = 0;
947 break;
948 }
949
950 if (pGVMM->aHandles[iPrev].iNext == iHandle)
951 break;
952 iPrev = pGVMM->aHandles[iPrev].iNext;
953 }
954 if (!iPrev)
955 {
956 SUPR0Printf("GVM: can't find the handle previous previous of %d!\n", pHandle->iSelf);
957 gvmmR0UsedUnlock(pGVMM);
958 gvmmR0CreateDestroyUnlock(pGVMM);
959 return;
960 }
961
962 Assert(pGVMM->aHandles[iPrev].iNext == iHandle);
963 pGVMM->aHandles[iPrev].iNext = pHandle->iNext;
964 }
965 pHandle->iNext = 0;
966 pGVMM->cVMs--;
967
968 gvmmR0UsedUnlock(pGVMM);
969
970 /*
971 * Do the global cleanup round.
972 */
973 PGVM pGVM = pHandle->pGVM;
974 if ( VALID_PTR(pGVM)
975 && pGVM->u32Magic == GVM_MAGIC)
976 {
977 gmmR0CleanupVM(pGVM);
978
979 /*
980 * Do the GVMM cleanup - must be done last.
981 */
982 /* The VM and VM pages mappings/allocations. */
983 if (pGVM->gvmm.s.VMPagesMapObj != NIL_RTR0MEMOBJ)
984 {
985 rc = RTR0MemObjFree(pGVM->gvmm.s.VMPagesMapObj, false /* fFreeMappings */); AssertRC(rc);
986 pGVM->gvmm.s.VMPagesMapObj = NIL_RTR0MEMOBJ;
987 }
988
989 if (pGVM->gvmm.s.VMMapObj != NIL_RTR0MEMOBJ)
990 {
991 rc = RTR0MemObjFree(pGVM->gvmm.s.VMMapObj, false /* fFreeMappings */); AssertRC(rc);
992 pGVM->gvmm.s.VMMapObj = NIL_RTR0MEMOBJ;
993 }
994
995 if (pGVM->gvmm.s.VMPagesMemObj != NIL_RTR0MEMOBJ)
996 {
997 rc = RTR0MemObjFree(pGVM->gvmm.s.VMPagesMemObj, false /* fFreeMappings */); AssertRC(rc);
998 pGVM->gvmm.s.VMPagesMemObj = NIL_RTR0MEMOBJ;
999 }
1000
1001 if (pGVM->gvmm.s.VMMemObj != NIL_RTR0MEMOBJ)
1002 {
1003 rc = RTR0MemObjFree(pGVM->gvmm.s.VMMemObj, false /* fFreeMappings */); AssertRC(rc);
1004 pGVM->gvmm.s.VMMemObj = NIL_RTR0MEMOBJ;
1005 }
1006
1007 if (pGVM->gvmm.s.HaltEventMulti != NIL_RTSEMEVENTMULTI)
1008 {
1009 rc = RTSemEventMultiDestroy(pGVM->gvmm.s.HaltEventMulti); AssertRC(rc);
1010 pGVM->gvmm.s.HaltEventMulti = NIL_RTSEMEVENTMULTI;
1011 }
1012
1013 /* the GVM structure itself. */
1014 pGVM->u32Magic |= UINT32_C(0x80000000);
1015 RTMemFree(pGVM);
1016 }
1017 /* else: GVMMR0CreateVM cleanup. */
1018
1019 /*
1020 * Free the handle.
1021 * Reacquire the UsedLock here to since we're updating handle fields.
1022 */
1023 rc = gvmmR0UsedLock(pGVMM);
1024 AssertRC(rc);
1025
1026 pHandle->iNext = pGVMM->iFreeHead;
1027 pGVMM->iFreeHead = iHandle;
1028 ASMAtomicXchgPtr((void * volatile *)&pHandle->pGVM, NULL);
1029 ASMAtomicXchgPtr((void * volatile *)&pHandle->pVM, NULL);
1030 ASMAtomicXchgPtr((void * volatile *)&pHandle->pvObj, NULL);
1031 ASMAtomicXchgPtr((void * volatile *)&pHandle->pSession, NULL);
1032 ASMAtomicXchgSize(&pHandle->hEMT, NIL_RTNATIVETHREAD);
1033
1034 gvmmR0UsedUnlock(pGVMM);
1035 gvmmR0CreateDestroyUnlock(pGVMM);
1036 LogFlow(("gvmmR0HandleObjDestructor: returns\n"));
1037}
1038
1039
1040/**
1041 * Lookup a GVM structure by its handle.
1042 *
1043 * @returns The GVM pointer on success, NULL on failure.
1044 * @param hGVM The global VM handle. Asserts on bad handle.
1045 */
1046GVMMR0DECL(PGVM) GVMMR0ByHandle(uint32_t hGVM)
1047{
1048 PGVMM pGVMM;
1049 GVMM_GET_VALID_INSTANCE(pGVMM, NULL);
1050
1051 /*
1052 * Validate.
1053 */
1054 AssertReturn(hGVM != NIL_GVM_HANDLE, NULL);
1055 AssertReturn(hGVM < RT_ELEMENTS(pGVMM->aHandles), NULL);
1056
1057 /*
1058 * Look it up.
1059 */
1060 PGVMHANDLE pHandle = &pGVMM->aHandles[hGVM];
1061 AssertPtrReturn(pHandle->pVM, NULL);
1062 AssertPtrReturn(pHandle->pvObj, NULL);
1063 PGVM pGVM = pHandle->pGVM;
1064 AssertPtrReturn(pGVM, NULL);
1065 AssertReturn(pGVM->pVM == pHandle->pVM, NULL);
1066
1067 return pHandle->pGVM;
1068}
1069
1070
1071/**
1072 * Lookup a GVM structure by the shared VM structure.
1073 *
1074 * @returns VBox status code.
1075 * @param pVM The shared VM structure (the ring-0 mapping).
1076 * @param ppGVM Where to store the GVM pointer.
1077 * @param ppGVMM Where to store the pointer to the GVMM instance data.
1078 * @param fTakeUsedLock Whether to take the used lock or not.
1079 * Be very careful if not taking the lock as it's possible that
1080 * the VM will disappear then.
1081 *
1082 * @remark This will not assert on an invalid pVM but try return sliently.
1083 */
1084static int gvmmR0ByVM(PVM pVM, PGVM *ppGVM, PGVMM *ppGVMM, bool fTakeUsedLock)
1085{
1086 PGVMM pGVMM;
1087 GVMM_GET_VALID_INSTANCE(pGVMM, VERR_INTERNAL_ERROR);
1088
1089 /*
1090 * Validate.
1091 */
1092 if (RT_UNLIKELY( !VALID_PTR(pVM)
1093 || ((uintptr_t)pVM & PAGE_OFFSET_MASK)))
1094 return VERR_INVALID_POINTER;
1095 if (RT_UNLIKELY( pVM->enmVMState < VMSTATE_CREATING
1096 || pVM->enmVMState >= VMSTATE_TERMINATED))
1097 return VERR_INVALID_POINTER;
1098
1099 uint16_t hGVM = pVM->hSelf;
1100 if (RT_UNLIKELY( hGVM == NIL_GVM_HANDLE
1101 || hGVM >= RT_ELEMENTS(pGVMM->aHandles)))
1102 return VERR_INVALID_HANDLE;
1103
1104 /*
1105 * Look it up.
1106 */
1107 PGVMHANDLE pHandle = &pGVMM->aHandles[hGVM];
1108 PGVM pGVM;
1109 if (fTakeUsedLock)
1110 {
1111 int rc = gvmmR0UsedLock(pGVMM);
1112 AssertRCReturn(rc, rc);
1113
1114 pGVM = pHandle->pGVM;
1115 if (RT_UNLIKELY( pHandle->pVM != pVM
1116 || !VALID_PTR(pHandle->pvObj)
1117 || !VALID_PTR(pGVM)
1118 || pGVM->pVM != pVM))
1119 {
1120 gvmmR0UsedUnlock(pGVMM);
1121 return VERR_INVALID_HANDLE;
1122 }
1123 }
1124 else
1125 {
1126 if (RT_UNLIKELY(pHandle->pVM != pVM))
1127 return VERR_INVALID_HANDLE;
1128 if (RT_UNLIKELY(!VALID_PTR(pHandle->pvObj)))
1129 return VERR_INVALID_HANDLE;
1130
1131 pGVM = pHandle->pGVM;
1132 if (RT_UNLIKELY(!VALID_PTR(pGVM)))
1133 return VERR_INVALID_HANDLE;
1134 if (RT_UNLIKELY(pGVM->pVM != pVM))
1135 return VERR_INVALID_HANDLE;
1136 }
1137
1138 *ppGVM = pGVM;
1139 *ppGVMM = pGVMM;
1140 return VINF_SUCCESS;
1141}
1142
1143
1144/**
1145 * Lookup a GVM structure by the shared VM structure.
1146 *
1147 * @returns The GVM pointer on success, NULL on failure.
1148 * @param pVM The shared VM structure (the ring-0 mapping).
1149 *
1150 * @remark This will not take the 'used'-lock because it doesn't do
1151 * nesting and this function will be used from under the lock.
1152 */
1153GVMMR0DECL(PGVM) GVMMR0ByVM(PVM pVM)
1154{
1155 PGVMM pGVMM;
1156 PGVM pGVM;
1157 int rc = gvmmR0ByVM(pVM, &pGVM, &pGVMM, false /* fTakeUsedLock */);
1158 if (RT_SUCCESS(rc))
1159 return pGVM;
1160 AssertRC(rc);
1161 return NULL;
1162}
1163
1164
1165/**
1166 * Lookup a GVM structure by the shared VM structure
1167 * and ensuring that the caller is the EMT thread.
1168 *
1169 * @returns VBox status code.
1170 * @param pVM The shared VM structure (the ring-0 mapping).
1171 * @param ppGVM Where to store the GVM pointer.
1172 * @param ppGVMM Where to store the pointer to the GVMM instance data.
1173 * @thread EMT
1174 *
1175 * @remark This will assert in failure paths.
1176 */
1177static int gvmmR0ByVMAndEMT(PVM pVM, PGVM *ppGVM, PGVMM *ppGVMM)
1178{
1179 PGVMM pGVMM;
1180 GVMM_GET_VALID_INSTANCE(pGVMM, VERR_INTERNAL_ERROR);
1181
1182 /*
1183 * Validate.
1184 */
1185 AssertPtrReturn(pVM, VERR_INVALID_POINTER);
1186 AssertReturn(!((uintptr_t)pVM & PAGE_OFFSET_MASK), VERR_INVALID_POINTER);
1187
1188 uint16_t hGVM = pVM->hSelf;
1189 AssertReturn(hGVM != NIL_GVM_HANDLE, VERR_INVALID_HANDLE);
1190 AssertReturn(hGVM < RT_ELEMENTS(pGVMM->aHandles), VERR_INVALID_HANDLE);
1191
1192 /*
1193 * Look it up.
1194 */
1195 PGVMHANDLE pHandle = &pGVMM->aHandles[hGVM];
1196 RTNATIVETHREAD hAllegedEMT = RTThreadNativeSelf();
1197 AssertMsgReturn(pHandle->hEMT == hAllegedEMT, ("hEMT %x hAllegedEMT %x\n", pHandle->hEMT, hAllegedEMT), VERR_NOT_OWNER);
1198 AssertReturn(pHandle->pVM == pVM, VERR_NOT_OWNER);
1199 AssertPtrReturn(pHandle->pvObj, VERR_INTERNAL_ERROR);
1200
1201 PGVM pGVM = pHandle->pGVM;
1202 AssertPtrReturn(pGVM, VERR_INTERNAL_ERROR);
1203 AssertReturn(pGVM->pVM == pVM, VERR_INTERNAL_ERROR);
1204 AssertReturn(pGVM->hEMT == hAllegedEMT, VERR_INTERNAL_ERROR);
1205
1206 *ppGVM = pGVM;
1207 *ppGVMM = pGVMM;
1208 return VINF_SUCCESS;
1209}
1210
1211
1212/**
1213 * Lookup a GVM structure by the shared VM structure
1214 * and ensuring that the caller is the EMT thread.
1215 *
1216 * @returns VBox status code.
1217 * @param pVM The shared VM structure (the ring-0 mapping).
1218 * @param ppGVM Where to store the GVM pointer.
1219 * @thread EMT
1220 */
1221GVMMR0DECL(int) GVMMR0ByVMAndEMT(PVM pVM, PGVM *ppGVM)
1222{
1223 AssertPtrReturn(ppGVM, VERR_INVALID_POINTER);
1224 PGVMM pGVMM;
1225 return gvmmR0ByVMAndEMT(pVM, ppGVM, &pGVMM);
1226}
1227
1228
1229/**
1230 * Lookup a VM by its global handle.
1231 *
1232 * @returns The VM handle on success, NULL on failure.
1233 * @param hGVM The global VM handle. Asserts on bad handle.
1234 */
1235GVMMR0DECL(PVM) GVMMR0GetVMByHandle(uint32_t hGVM)
1236{
1237 PGVM pGVM = GVMMR0ByHandle(hGVM);
1238 return pGVM ? pGVM->pVM : NULL;
1239}
1240
1241
1242/**
1243 * Looks up the VM belonging to the specified EMT thread.
1244 *
1245 * This is used by the assertion machinery in VMMR0.cpp to avoid causing
1246 * unnecessary kernel panics when the EMT thread hits an assertion. The
1247 * call may or not be an EMT thread.
1248 *
1249 * @returns The VM handle on success, NULL on failure.
1250 * @param hEMT The native thread handle of the EMT.
1251 * NIL_RTNATIVETHREAD means the current thread
1252 */
1253GVMMR0DECL(PVM) GVMMR0GetVMByEMT(RTNATIVETHREAD hEMT)
1254{
1255 /*
1256 * No Assertions here as we're usually called in a AssertMsgN or
1257 * RTAssert* context.
1258 */
1259 PGVMM pGVMM = g_pGVMM;
1260 if ( !VALID_PTR(pGVMM)
1261 || pGVMM->u32Magic != GVMM_MAGIC)
1262 return NULL;
1263
1264 if (hEMT == NIL_RTNATIVETHREAD)
1265 hEMT = RTThreadNativeSelf();
1266
1267 /*
1268 * Search the handles in a linear fashion as we don't dare take the lock (assert).
1269 */
1270 for (unsigned i = 1; i < RT_ELEMENTS(pGVMM->aHandles); i++)
1271 if ( pGVMM->aHandles[i].hEMT == hEMT
1272 && pGVMM->aHandles[i].iSelf == i
1273 && VALID_PTR(pGVMM->aHandles[i].pvObj)
1274 && VALID_PTR(pGVMM->aHandles[i].pVM))
1275 return pGVMM->aHandles[i].pVM;
1276
1277 return NULL;
1278}
1279
1280
1281/**
1282 * This is will wake up expired and soon-to-be expired VMs.
1283 *
1284 * @returns Number of VMs that has been woken up.
1285 * @param pGVMM Pointer to the GVMM instance data.
1286 * @param u64Now The current time.
1287 */
1288static unsigned gvmmR0SchedDoWakeUps(PGVMM pGVMM, uint64_t u64Now)
1289{
1290 /*
1291 * The first pass will wake up VMs which have actually expired
1292 * and look for VMs that should be woken up in the 2nd and 3rd passes.
1293 */
1294 unsigned cWoken = 0;
1295 unsigned cHalted = 0;
1296 unsigned cTodo2nd = 0;
1297 unsigned cTodo3rd = 0;
1298 for (unsigned i = pGVMM->iUsedHead, cGuard = 0;
1299 i != NIL_GVM_HANDLE && i < RT_ELEMENTS(pGVMM->aHandles);
1300 i = pGVMM->aHandles[i].iNext)
1301 {
1302 PGVM pCurGVM = pGVMM->aHandles[i].pGVM;
1303 if ( VALID_PTR(pCurGVM)
1304 && pCurGVM->u32Magic == GVM_MAGIC)
1305 {
1306 uint64_t u64 = pCurGVM->gvmm.s.u64HaltExpire;
1307 if (u64)
1308 {
1309 if (u64 <= u64Now)
1310 {
1311 if (ASMAtomicXchgU64(&pCurGVM->gvmm.s.u64HaltExpire, 0))
1312 {
1313 int rc = RTSemEventMultiSignal(pCurGVM->gvmm.s.HaltEventMulti);
1314 AssertRC(rc);
1315 cWoken++;
1316 }
1317 }
1318 else
1319 {
1320 cHalted++;
1321 if (u64 <= u64Now + pGVMM->nsEarlyWakeUp1)
1322 cTodo2nd++;
1323 else if (u64 <= u64Now + pGVMM->nsEarlyWakeUp2)
1324 cTodo3rd++;
1325 }
1326 }
1327 }
1328 AssertLogRelBreak(cGuard++ < RT_ELEMENTS(pGVMM->aHandles));
1329 }
1330
1331 if (cTodo2nd)
1332 {
1333 for (unsigned i = pGVMM->iUsedHead, cGuard = 0;
1334 i != NIL_GVM_HANDLE && i < RT_ELEMENTS(pGVMM->aHandles);
1335 i = pGVMM->aHandles[i].iNext)
1336 {
1337 PGVM pCurGVM = pGVMM->aHandles[i].pGVM;
1338 if ( VALID_PTR(pCurGVM)
1339 && pCurGVM->u32Magic == GVM_MAGIC
1340 && pCurGVM->gvmm.s.u64HaltExpire
1341 && pCurGVM->gvmm.s.u64HaltExpire <= u64Now + pGVMM->nsEarlyWakeUp1)
1342 {
1343 if (ASMAtomicXchgU64(&pCurGVM->gvmm.s.u64HaltExpire, 0))
1344 {
1345 int rc = RTSemEventMultiSignal(pCurGVM->gvmm.s.HaltEventMulti);
1346 AssertRC(rc);
1347 cWoken++;
1348 }
1349 }
1350 AssertLogRelBreak(cGuard++ < RT_ELEMENTS(pGVMM->aHandles));
1351 }
1352 }
1353
1354 if (cTodo3rd)
1355 {
1356 for (unsigned i = pGVMM->iUsedHead, cGuard = 0;
1357 i != NIL_GVM_HANDLE && i < RT_ELEMENTS(pGVMM->aHandles);
1358 i = pGVMM->aHandles[i].iNext)
1359 {
1360 PGVM pCurGVM = pGVMM->aHandles[i].pGVM;
1361 if ( VALID_PTR(pCurGVM)
1362 && pCurGVM->u32Magic == GVM_MAGIC
1363 && pCurGVM->gvmm.s.u64HaltExpire
1364 && pCurGVM->gvmm.s.u64HaltExpire <= u64Now + pGVMM->nsEarlyWakeUp2)
1365 {
1366 if (ASMAtomicXchgU64(&pCurGVM->gvmm.s.u64HaltExpire, 0))
1367 {
1368 int rc = RTSemEventMultiSignal(pCurGVM->gvmm.s.HaltEventMulti);
1369 AssertRC(rc);
1370 cWoken++;
1371 }
1372 }
1373 AssertLogRelBreak(cGuard++ < RT_ELEMENTS(pGVMM->aHandles));
1374 }
1375 }
1376
1377 return cWoken;
1378}
1379
1380
1381/**
1382 * Halt the EMT thread.
1383 *
1384 * @returns VINF_SUCCESS normal wakeup (timeout or kicked by other thread).
1385 * VERR_INTERRUPTED if a signal was scheduled for the thread.
1386 * @param pVM Pointer to the shared VM structure.
1387 * @param u64ExpireGipTime The time for the sleep to expire expressed as GIP time.
1388 * @thread EMT.
1389 */
1390GVMMR0DECL(int) GVMMR0SchedHalt(PVM pVM, uint64_t u64ExpireGipTime)
1391{
1392 LogFlow(("GVMMR0SchedHalt: pVM=%p\n", pVM));
1393
1394 /*
1395 * Validate the VM structure, state and handle.
1396 */
1397 PGVMM pGVMM;
1398 PGVM pGVM;
1399 int rc = gvmmR0ByVMAndEMT(pVM, &pGVM, &pGVMM);
1400 if (RT_FAILURE(rc))
1401 return rc;
1402 pGVM->gvmm.s.StatsSched.cHaltCalls++;
1403
1404 Assert(!pGVM->gvmm.s.u64HaltExpire);
1405
1406 /*
1407 * Take the UsedList semaphore, get the current time
1408 * and check if anyone needs waking up.
1409 * Interrupts must NOT be disabled at this point because we ask for GIP time!
1410 */
1411 rc = gvmmR0UsedLock(pGVMM);
1412 AssertRC(rc);
1413
1414 pGVM->gvmm.s.iCpuEmt = ASMGetApicId();
1415
1416 Assert(ASMGetFlags() & X86_EFL_IF);
1417 const uint64_t u64Now = RTTimeNanoTS(); /* (GIP time) */
1418 pGVM->gvmm.s.StatsSched.cHaltWakeUps += gvmmR0SchedDoWakeUps(pGVMM, u64Now);
1419
1420 /*
1421 * Go to sleep if we must...
1422 */
1423 if ( u64Now < u64ExpireGipTime
1424 && u64ExpireGipTime - u64Now > (pGVMM->cVMs > pGVMM->cVMsMeansCompany
1425 ? pGVMM->nsMinSleepCompany
1426 : pGVMM->nsMinSleepAlone))
1427 {
1428 pGVM->gvmm.s.StatsSched.cHaltBlocking++;
1429 ASMAtomicXchgU64(&pGVM->gvmm.s.u64HaltExpire, u64ExpireGipTime);
1430 gvmmR0UsedUnlock(pGVMM);
1431
1432 uint32_t cMillies = (u64ExpireGipTime - u64Now) / 1000000;
1433 rc = RTSemEventMultiWaitNoResume(pGVM->gvmm.s.HaltEventMulti, cMillies ? cMillies : 1);
1434 ASMAtomicXchgU64(&pGVM->gvmm.s.u64HaltExpire, 0);
1435 if (rc == VERR_TIMEOUT)
1436 {
1437 pGVM->gvmm.s.StatsSched.cHaltTimeouts++;
1438 rc = VINF_SUCCESS;
1439 }
1440 }
1441 else
1442 {
1443 pGVM->gvmm.s.StatsSched.cHaltNotBlocking++;
1444 gvmmR0UsedUnlock(pGVMM);
1445 }
1446
1447 /* Make sure false wake up calls (gvmmR0SchedDoWakeUps) cause us to spin. */
1448 RTSemEventMultiReset(pGVM->gvmm.s.HaltEventMulti);
1449
1450 return rc;
1451}
1452
1453
1454/**
1455 * Wakes up the halted EMT thread so it can service a pending request.
1456 *
1457 * @returns VINF_SUCCESS if not yielded.
1458 * VINF_GVM_NOT_BLOCKED if the EMT thread wasn't blocked.
1459 * @param pVM Pointer to the shared VM structure.
1460 * @thread Any but EMT.
1461 */
1462GVMMR0DECL(int) GVMMR0SchedWakeUp(PVM pVM)
1463{
1464 /*
1465 * Validate input and take the UsedLock.
1466 */
1467 PGVM pGVM;
1468 PGVMM pGVMM;
1469 int rc = gvmmR0ByVM(pVM, &pGVM, &pGVMM, true /* fTakeUsedLock */);
1470 if (RT_SUCCESS(rc))
1471 {
1472 pGVM->gvmm.s.StatsSched.cWakeUpCalls++;
1473
1474 /*
1475 * Signal the semaphore regardless of whether it's current blocked on it.
1476 *
1477 * The reason for this is that there is absolutely no way we can be 100%
1478 * certain that it isn't *about* go to go to sleep on it and just got
1479 * delayed a bit en route. So, we will always signal the semaphore when
1480 * the it is flagged as halted in the VMM.
1481 */
1482 if (pGVM->gvmm.s.u64HaltExpire)
1483 {
1484 rc = VINF_SUCCESS;
1485 ASMAtomicXchgU64(&pGVM->gvmm.s.u64HaltExpire, 0);
1486 }
1487 else
1488 {
1489 rc = VINF_GVM_NOT_BLOCKED;
1490 pGVM->gvmm.s.StatsSched.cWakeUpNotHalted++;
1491 }
1492
1493 int rc2 = RTSemEventMultiSignal(pGVM->gvmm.s.HaltEventMulti);
1494 AssertRC(rc2);
1495
1496 /*
1497 * While we're here, do a round of scheduling.
1498 */
1499 Assert(ASMGetFlags() & X86_EFL_IF);
1500 const uint64_t u64Now = RTTimeNanoTS(); /* (GIP time) */
1501 pGVM->gvmm.s.StatsSched.cWakeUpWakeUps += gvmmR0SchedDoWakeUps(pGVMM, u64Now);
1502
1503
1504 rc2 = gvmmR0UsedUnlock(pGVMM);
1505 AssertRC(rc2);
1506 }
1507
1508 LogFlow(("GVMMR0SchedWakeUp: returns %Rrc\n", rc));
1509 return rc;
1510}
1511
1512
1513/**
1514 * Poll the schedule to see if someone else should get a chance to run.
1515 *
1516 * This is a bit hackish and will not work too well if the machine is
1517 * under heavy load from non-VM processes.
1518 *
1519 * @returns VINF_SUCCESS if not yielded.
1520 * VINF_GVM_YIELDED if an attempt to switch to a different VM task was made.
1521 * @param pVM Pointer to the shared VM structure.
1522 * @param u64ExpireGipTime The time for the sleep to expire expressed as GIP time.
1523 * @param fYield Whether to yield or not.
1524 * This is for when we're spinning in the halt loop.
1525 * @thread EMT.
1526 */
1527GVMMR0DECL(int) GVMMR0SchedPoll(PVM pVM, bool fYield)
1528{
1529 /*
1530 * Validate input.
1531 */
1532 PGVM pGVM;
1533 PGVMM pGVMM;
1534 int rc = gvmmR0ByVMAndEMT(pVM, &pGVM, &pGVMM);
1535 if (RT_SUCCESS(rc))
1536 {
1537 rc = gvmmR0UsedLock(pGVMM);
1538 AssertRC(rc);
1539 pGVM->gvmm.s.StatsSched.cPollCalls++;
1540
1541 Assert(ASMGetFlags() & X86_EFL_IF);
1542 const uint64_t u64Now = RTTimeNanoTS(); /* (GIP time) */
1543
1544 if (!fYield)
1545 pGVM->gvmm.s.StatsSched.cPollWakeUps += gvmmR0SchedDoWakeUps(pGVMM, u64Now);
1546 else
1547 {
1548 /** @todo implement this... */
1549 rc = VERR_NOT_IMPLEMENTED;
1550 }
1551
1552 gvmmR0UsedUnlock(pGVMM);
1553 }
1554
1555 LogFlow(("GVMMR0SchedWakeUp: returns %Rrc\n", rc));
1556 return rc;
1557}
1558
1559
1560
1561/**
1562 * Retrieves the GVMM statistics visible to the caller.
1563 *
1564 * @returns VBox status code.
1565 *
1566 * @param pStats Where to put the statistics.
1567 * @param pSession The current session.
1568 * @param pVM The VM to obtain statistics for. Optional.
1569 */
1570GVMMR0DECL(int) GVMMR0QueryStatistics(PGVMMSTATS pStats, PSUPDRVSESSION pSession, PVM pVM)
1571{
1572 LogFlow(("GVMMR0QueryStatistics: pStats=%p pSession=%p pVM=%p\n", pStats, pSession, pVM));
1573
1574 /*
1575 * Validate input.
1576 */
1577 AssertPtrReturn(pSession, VERR_INVALID_POINTER);
1578 AssertPtrReturn(pStats, VERR_INVALID_POINTER);
1579 pStats->cVMs = 0; /* (crash before taking the sem...) */
1580
1581 /*
1582 * Take the lock and get the VM statistics.
1583 */
1584 PGVMM pGVMM;
1585 if (pVM)
1586 {
1587 PGVM pGVM;
1588 int rc = gvmmR0ByVM(pVM, &pGVM, &pGVMM, true /*fTakeUsedLock*/);
1589 if (RT_FAILURE(rc))
1590 return rc;
1591 pStats->SchedVM = pGVM->gvmm.s.StatsSched;
1592 }
1593 else
1594 {
1595 GVMM_GET_VALID_INSTANCE(pGVMM, VERR_INTERNAL_ERROR);
1596 memset(&pStats->SchedVM, 0, sizeof(pStats->SchedVM));
1597
1598 int rc = gvmmR0UsedLock(pGVMM);
1599 AssertRCReturn(rc, rc);
1600 }
1601
1602 /*
1603 * Enumerate the VMs and add the ones visibile to the statistics.
1604 */
1605 pStats->cVMs = 0;
1606 memset(&pStats->SchedSum, 0, sizeof(pStats->SchedSum));
1607
1608 for (unsigned i = pGVMM->iUsedHead;
1609 i != NIL_GVM_HANDLE && i < RT_ELEMENTS(pGVMM->aHandles);
1610 i = pGVMM->aHandles[i].iNext)
1611 {
1612 PGVM pGVM = pGVMM->aHandles[i].pGVM;
1613 void *pvObj = pGVMM->aHandles[i].pvObj;
1614 if ( VALID_PTR(pvObj)
1615 && VALID_PTR(pGVM)
1616 && pGVM->u32Magic == GVM_MAGIC
1617 && RT_SUCCESS(SUPR0ObjVerifyAccess(pvObj, pSession, NULL)))
1618 {
1619 pStats->cVMs++;
1620
1621 pStats->SchedSum.cHaltCalls += pGVM->gvmm.s.StatsSched.cHaltCalls;
1622 pStats->SchedSum.cHaltBlocking += pGVM->gvmm.s.StatsSched.cHaltBlocking;
1623 pStats->SchedSum.cHaltTimeouts += pGVM->gvmm.s.StatsSched.cHaltTimeouts;
1624 pStats->SchedSum.cHaltNotBlocking += pGVM->gvmm.s.StatsSched.cHaltNotBlocking;
1625 pStats->SchedSum.cHaltWakeUps += pGVM->gvmm.s.StatsSched.cHaltWakeUps;
1626
1627 pStats->SchedSum.cWakeUpCalls += pGVM->gvmm.s.StatsSched.cWakeUpCalls;
1628 pStats->SchedSum.cWakeUpNotHalted += pGVM->gvmm.s.StatsSched.cWakeUpNotHalted;
1629 pStats->SchedSum.cWakeUpWakeUps += pGVM->gvmm.s.StatsSched.cWakeUpWakeUps;
1630
1631 pStats->SchedSum.cPollCalls += pGVM->gvmm.s.StatsSched.cPollCalls;
1632 pStats->SchedSum.cPollHalts += pGVM->gvmm.s.StatsSched.cPollHalts;
1633 pStats->SchedSum.cPollWakeUps += pGVM->gvmm.s.StatsSched.cPollWakeUps;
1634 }
1635 }
1636
1637 gvmmR0UsedUnlock(pGVMM);
1638
1639 return VINF_SUCCESS;
1640}
1641
1642
1643/**
1644 * VMMR0 request wrapper for GVMMR0QueryStatistics.
1645 *
1646 * @returns see GVMMR0QueryStatistics.
1647 * @param pVM Pointer to the shared VM structure. Optional.
1648 * @param pReq The request packet.
1649 */
1650GVMMR0DECL(int) GVMMR0QueryStatisticsReq(PVM pVM, PGVMMQUERYSTATISTICSSREQ pReq)
1651{
1652 /*
1653 * Validate input and pass it on.
1654 */
1655 AssertPtrReturn(pReq, VERR_INVALID_POINTER);
1656 AssertMsgReturn(pReq->Hdr.cbReq == sizeof(*pReq), ("%#x != %#x\n", pReq->Hdr.cbReq, sizeof(*pReq)), VERR_INVALID_PARAMETER);
1657
1658 return GVMMR0QueryStatistics(&pReq->Stats, pReq->pSession, pVM);
1659}
1660
1661
1662/**
1663 * Resets the specified GVMM statistics.
1664 *
1665 * @returns VBox status code.
1666 *
1667 * @param pStats Which statistics to reset, that is, non-zero fields indicates which to reset.
1668 * @param pSession The current session.
1669 * @param pVM The VM to reset statistics for. Optional.
1670 */
1671GVMMR0DECL(int) GVMMR0ResetStatistics(PCGVMMSTATS pStats, PSUPDRVSESSION pSession, PVM pVM)
1672{
1673 LogFlow(("GVMMR0ResetStatistics: pStats=%p pSession=%p pVM=%p\n", pStats, pSession, pVM));
1674
1675 /*
1676 * Validate input.
1677 */
1678 AssertPtrReturn(pSession, VERR_INVALID_POINTER);
1679 AssertPtrReturn(pStats, VERR_INVALID_POINTER);
1680
1681 /*
1682 * Take the lock and get the VM statistics.
1683 */
1684 PGVMM pGVMM;
1685 if (pVM)
1686 {
1687 PGVM pGVM;
1688 int rc = gvmmR0ByVM(pVM, &pGVM, &pGVMM, true /*fTakeUsedLock*/);
1689 if (RT_FAILURE(rc))
1690 return rc;
1691# define MAYBE_RESET_FIELD(field) \
1692 do { if (pStats->SchedVM. field ) { pGVM->gvmm.s.StatsSched. field = 0; } } while (0)
1693 MAYBE_RESET_FIELD(cHaltCalls);
1694 MAYBE_RESET_FIELD(cHaltBlocking);
1695 MAYBE_RESET_FIELD(cHaltTimeouts);
1696 MAYBE_RESET_FIELD(cHaltNotBlocking);
1697 MAYBE_RESET_FIELD(cHaltWakeUps);
1698 MAYBE_RESET_FIELD(cWakeUpCalls);
1699 MAYBE_RESET_FIELD(cWakeUpNotHalted);
1700 MAYBE_RESET_FIELD(cWakeUpWakeUps);
1701 MAYBE_RESET_FIELD(cPollCalls);
1702 MAYBE_RESET_FIELD(cPollHalts);
1703 MAYBE_RESET_FIELD(cPollWakeUps);
1704# undef MAYBE_RESET_FIELD
1705 }
1706 else
1707 {
1708 GVMM_GET_VALID_INSTANCE(pGVMM, VERR_INTERNAL_ERROR);
1709
1710 int rc = gvmmR0UsedLock(pGVMM);
1711 AssertRCReturn(rc, rc);
1712 }
1713
1714 /*
1715 * Enumerate the VMs and add the ones visibile to the statistics.
1716 */
1717 if (ASMMemIsAll8(&pStats->SchedSum, sizeof(pStats->SchedSum), 0))
1718 {
1719 for (unsigned i = pGVMM->iUsedHead;
1720 i != NIL_GVM_HANDLE && i < RT_ELEMENTS(pGVMM->aHandles);
1721 i = pGVMM->aHandles[i].iNext)
1722 {
1723 PGVM pGVM = pGVMM->aHandles[i].pGVM;
1724 void *pvObj = pGVMM->aHandles[i].pvObj;
1725 if ( VALID_PTR(pvObj)
1726 && VALID_PTR(pGVM)
1727 && pGVM->u32Magic == GVM_MAGIC
1728 && RT_SUCCESS(SUPR0ObjVerifyAccess(pvObj, pSession, NULL)))
1729 {
1730# define MAYBE_RESET_FIELD(field) \
1731 do { if (pStats->SchedSum. field ) { pGVM->gvmm.s.StatsSched. field = 0; } } while (0)
1732 MAYBE_RESET_FIELD(cHaltCalls);
1733 MAYBE_RESET_FIELD(cHaltBlocking);
1734 MAYBE_RESET_FIELD(cHaltTimeouts);
1735 MAYBE_RESET_FIELD(cHaltNotBlocking);
1736 MAYBE_RESET_FIELD(cHaltWakeUps);
1737 MAYBE_RESET_FIELD(cWakeUpCalls);
1738 MAYBE_RESET_FIELD(cWakeUpNotHalted);
1739 MAYBE_RESET_FIELD(cWakeUpWakeUps);
1740 MAYBE_RESET_FIELD(cPollCalls);
1741 MAYBE_RESET_FIELD(cPollHalts);
1742 MAYBE_RESET_FIELD(cPollWakeUps);
1743# undef MAYBE_RESET_FIELD
1744 }
1745 }
1746 }
1747
1748 gvmmR0UsedUnlock(pGVMM);
1749
1750 return VINF_SUCCESS;
1751}
1752
1753
1754/**
1755 * VMMR0 request wrapper for GVMMR0ResetStatistics.
1756 *
1757 * @returns see GVMMR0ResetStatistics.
1758 * @param pVM Pointer to the shared VM structure. Optional.
1759 * @param pReq The request packet.
1760 */
1761GVMMR0DECL(int) GVMMR0ResetStatisticsReq(PVM pVM, PGVMMRESETSTATISTICSSREQ pReq)
1762{
1763 /*
1764 * Validate input and pass it on.
1765 */
1766 AssertPtrReturn(pReq, VERR_INVALID_POINTER);
1767 AssertMsgReturn(pReq->Hdr.cbReq == sizeof(*pReq), ("%#x != %#x\n", pReq->Hdr.cbReq, sizeof(*pReq)), VERR_INVALID_PARAMETER);
1768
1769 return GVMMR0ResetStatistics(&pReq->Stats, pReq->pSession, pVM);
1770}
1771
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette