VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/GVMMR0.cpp@ 5941

Last change on this file since 5941 was 5941, checked in by vboxsync, 17 years ago

Fixed handle init bug.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 56.3 KB
Line 
1/* $Id: GVMMR0.cpp 5941 2007-12-03 12:18:45Z vboxsync $ */
2/** @file
3 * GVMM - Global VM Manager.
4 */
5
6/*
7 * Copyright (C) 2007 InnoTek Systemberatung GmbH
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License as published by the Free Software Foundation,
13 * in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
14 * distribution. VirtualBox OSE is distributed in the hope that it will
15 * be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 */
18
19
20/** @page pg_GVMM GVMM - The Global VM Manager
21 *
22 * The Global VM Manager lives in ring-0. It's main function at the moment
23 * is to manage a list of all running VMs, keep a ring-0 only structure (GVM)
24 * for each of them, and assign them unique identifiers (so GMM can track
25 * page owners). The idea for the future is to add an idle priority kernel
26 * thread that can take care of tasks like page sharing.
27 *
28 * The GVMM will create a ring-0 object for each VM when it's registered,
29 * this is both for session cleanup purposes and for having a point where
30 * it's possible to implement usage polices later (in SUPR0ObjRegister).
31 */
32
33
34/*******************************************************************************
35* Header Files *
36*******************************************************************************/
37#define LOG_GROUP LOG_GROUP_GVMM
38#include <VBox/gvmm.h>
39#include "GVMMR0Internal.h"
40#include <VBox/gvm.h>
41#include <VBox/vm.h>
42#include <VBox/err.h>
43#include <iprt/alloc.h>
44#include <iprt/semaphore.h>
45#include <iprt/time.h>
46#include <VBox/log.h>
47#include <iprt/thread.h>
48#include <iprt/param.h>
49#include <iprt/string.h>
50#include <iprt/assert.h>
51#include <iprt/mem.h>
52#include <iprt/memobj.h>
53
54
55/*******************************************************************************
56* Structures and Typedefs *
57*******************************************************************************/
58
59/**
60 * Global VM handle.
61 */
62typedef struct GVMHANDLE
63{
64 /** The index of the next handle in the list (free or used). (0 is nil.) */
65 uint16_t volatile iNext;
66 /** Our own index / handle value. */
67 uint16_t iSelf;
68 /** The pointer to the ring-0 only (aka global) VM structure. */
69 PGVM pGVM;
70 /** The ring-0 mapping of the shared VM instance data. */
71 PVM pVM;
72 /** The virtual machine object. */
73 void *pvObj;
74 /** The session this VM is associated with. */
75 PSUPDRVSESSION pSession;
76 /** The ring-0 handle of the EMT thread.
77 * This is used for assertions and similar cases where we need to find the VM handle. */
78 RTNATIVETHREAD hEMT;
79} GVMHANDLE;
80/** Pointer to a global VM handle. */
81typedef GVMHANDLE *PGVMHANDLE;
82
83/**
84 * The GVMM instance data.
85 */
86typedef struct GVMM
87{
88 /** Eyecatcher / magic. */
89 uint32_t u32Magic;
90 /** The index of the head of the free handle chain. (0 is nil.) */
91 uint16_t volatile iFreeHead;
92 /** The index of the head of the active handle chain. (0 is nil.) */
93 uint16_t volatile iUsedHead;
94 /** The number of VMs. */
95 uint16_t volatile cVMs;
96// /** The number of halted EMT threads. */
97// uint16_t volatile cHaltedEMTs;
98 /** The lock used to serialize VM creation, destruction and associated events that
99 * isn't performance critical. Owners may acquire the list lock. */
100 RTSEMFASTMUTEX CreateDestroyLock;
101 /** The lock used to serialize used list updates and accesses.
102 * This indirectly includes scheduling since the scheduler will have to walk the
103 * used list to examin running VMs. Owners may not acquire any other locks. */
104 RTSEMFASTMUTEX UsedLock;
105 /** The handle array.
106 * The size of this array defines the maximum number of currently running VMs.
107 * The first entry is unused as it represents the NIL handle. */
108 GVMHANDLE aHandles[128];
109
110 /** The number of VMs that means we no longer considers ourselves along on a CPU/Core.
111 * @gcfgm /GVMM/cVMsMeansCompany 32-bit 0..UINT32_MAX
112 */
113 uint32_t cVMsMeansCompany;
114 /** The minimum sleep time for when we're alone, in nano seconds.
115 * @gcfgm /GVMM/MinSleepAlone 32-bit 0..100000000
116 */
117 uint32_t nsMinSleepAlone;
118 /** The minimum sleep time for when we've got company, in nano seconds.
119 * @gcfgm /GVMM/MinSleepCompany 32-bit 0..100000000
120 */
121 uint32_t nsMinSleepCompany;
122 /** The limit for the first round of early wakeups, given in nano seconds.
123 * @gcfgm /GVMM/EarlyWakeUp1 32-bit 0..100000000
124 */
125 uint32_t nsEarlyWakeUp1;
126 /** The limit for the second round of early wakeups, given in nano seconds.
127 * @gcfgm /GVMM/EarlyWakeUp2 32-bit 0..100000000
128 */
129 uint32_t nsEarlyWakeUp2;
130} GVMM;
131/** Pointer to the GVMM instance data. */
132typedef GVMM *PGVMM;
133
134/** The GVMM::u32Magic value (Charlie Haden). */
135#define GVMM_MAGIC 0x19370806
136
137
138
139/*******************************************************************************
140* Global Variables *
141*******************************************************************************/
142/** Pointer to the GVMM instance data.
143 * (Just my general dislike for global variables.) */
144static PGVMM g_pGVMM = NULL;
145
146/** Macro for obtaining and validating the g_pGVMM pointer.
147 * On failure it will return from the invoking function with the specified return value.
148 *
149 * @param pGVMM The name of the pGVMM variable.
150 * @param rc The return value on failure. Use VERR_INTERNAL_ERROR for
151 * VBox status codes.
152 */
153#define GVMM_GET_VALID_INSTANCE(pGVMM, rc) \
154 do { \
155 (pGVMM) = g_pGVMM;\
156 AssertPtrReturn((pGVMM), (rc)); \
157 AssertMsgReturn((pGVMM)->u32Magic == GVMM_MAGIC, ("%p - %#x\n", (pGVMM), (pGVMM)->u32Magic), (rc)); \
158 } while (0)
159
160/** Macro for obtaining and validating the g_pGVMM pointer, void function variant.
161 * On failure it will return from the invoking function.
162 *
163 * @param pGVMM The name of the pGVMM variable.
164 */
165#define GVMM_GET_VALID_INSTANCE_VOID(pGVMM) \
166 do { \
167 (pGVMM) = g_pGVMM;\
168 AssertPtrReturnVoid((pGVMM)); \
169 AssertMsgReturnVoid((pGVMM)->u32Magic == GVMM_MAGIC, ("%p - %#x\n", (pGVMM), (pGVMM)->u32Magic)); \
170 } while (0)
171
172
173/*******************************************************************************
174* Internal Functions *
175*******************************************************************************/
176static void gvmmR0InitPerVMData(PGVM pGVM);
177static DECLCALLBACK(void) gvmmR0HandleObjDestructor(void *pvObj, void *pvGVMM, void *pvHandle);
178static int gvmmR0ByVM(PVM pVM, PGVM *ppGVM, PGVMM *ppGVMM, bool fTakeUsedLock);
179static int gvmmR0ByVMAndEMT(PVM pVM, PGVM *ppGVM, PGVMM *ppGVMM);
180
181
182/**
183 * Initializes the GVMM.
184 *
185 * This is called while owninng the loader sempahore (see supdrvIOCtl_LdrLoad()).
186 *
187 * @returns VBox status code.
188 */
189GVMMR0DECL(int) GVMMR0Init(void)
190{
191 LogFlow(("GVMMR0Init:\n"));
192
193 /*
194 * Allocate and initialize the instance data.
195 */
196 PGVMM pGVMM = (PGVMM)RTMemAllocZ(sizeof(*pGVMM));
197 if (!pGVMM)
198 return VERR_NO_MEMORY;
199 int rc = RTSemFastMutexCreate(&pGVMM->CreateDestroyLock);
200 if (RT_SUCCESS(rc))
201 {
202 rc = RTSemFastMutexCreate(&pGVMM->UsedLock);
203 if (RT_SUCCESS(rc))
204 {
205 pGVMM->u32Magic = GVMM_MAGIC;
206 pGVMM->iUsedHead = 0;
207 pGVMM->iFreeHead = 1;
208
209 /* the nil handle */
210 pGVMM->aHandles[0].iSelf = 0;
211 pGVMM->aHandles[0].iNext = 0;
212
213 /* the tail */
214 unsigned i = RT_ELEMENTS(pGVMM->aHandles) - 1;
215 pGVMM->aHandles[i].iSelf = i;
216 pGVMM->aHandles[i].iNext = 0; /* nil */
217
218 /* the rest */
219 while (i-- > 1)
220 {
221 pGVMM->aHandles[i].iSelf = i;
222 pGVMM->aHandles[i].iNext = i + 1;
223 }
224
225 /* The default configuration values. */
226 pGVMM->cVMsMeansCompany = 1; /** @todo should be adjusted to relative to the cpu count or something... */
227 pGVMM->nsMinSleepAlone = 750000 /* ns (0.750 ms) */; /** @todo this should be adjusted to be 75% (or something) of the scheduler granularity... */
228 pGVMM->nsMinSleepCompany = 15000 /* ns (0.015 ms) */;
229 pGVMM->nsEarlyWakeUp1 = 25000 /* ns (0.025 ms) */;
230 pGVMM->nsEarlyWakeUp2 = 50000 /* ns (0.050 ms) */;
231
232 g_pGVMM = pGVMM;
233 LogFlow(("GVMMR0Init: pGVMM=%p\n", pGVMM));
234 return VINF_SUCCESS;
235 }
236
237 RTSemFastMutexDestroy(pGVMM->CreateDestroyLock);
238 }
239
240 RTMemFree(pGVMM);
241 return rc;
242}
243
244
245/**
246 * Terminates the GVM.
247 *
248 * This is called while owning the loader semaphore (see supdrvLdrFree()).
249 * And unless something is wrong, there should be absolutely no VMs
250 * registered at this point.
251 */
252GVMMR0DECL(void) GVMMR0Term(void)
253{
254 LogFlow(("GVMMR0Term:\n"));
255
256 PGVMM pGVMM = g_pGVMM;
257 g_pGVMM = NULL;
258 if (RT_UNLIKELY(!VALID_PTR(pGVMM)))
259 {
260 SUPR0Printf("GVMMR0Term: pGVMM=%p\n", pGVMM);
261 return;
262 }
263
264 pGVMM->u32Magic++;
265
266 RTSemFastMutexDestroy(pGVMM->UsedLock);
267 pGVMM->UsedLock = NIL_RTSEMFASTMUTEX;
268 RTSemFastMutexDestroy(pGVMM->CreateDestroyLock);
269 pGVMM->CreateDestroyLock = NIL_RTSEMFASTMUTEX;
270
271 pGVMM->iFreeHead = 0;
272 if (pGVMM->iUsedHead)
273 {
274 SUPR0Printf("GVMMR0Term: iUsedHead=%#x! (cVMs=%#x)\n", pGVMM->iUsedHead, pGVMM->cVMs);
275 pGVMM->iUsedHead = 0;
276 }
277
278 RTMemFree(pGVMM);
279}
280
281
282/**
283 * A quick hack for setting global config values.
284 *
285 * @returns VBox status code.
286 *
287 * @param pSession The session handle. Used for authentication.
288 * @param pszName The variable name.
289 * @param u64Value The new value.
290 */
291GVMMR0DECL(int) GVMMR0SetConfig(PSUPDRVSESSION pSession, const char *pszName, uint64_t u64Value)
292{
293 /*
294 * Validate input.
295 */
296 PGVMM pGVMM;
297 GVMM_GET_VALID_INSTANCE(pGVMM, VERR_INTERNAL_ERROR);
298 AssertPtrReturn(pSession, VERR_INVALID_HANDLE);
299 AssertPtrReturn(pszName, VERR_INVALID_POINTER);
300
301 /*
302 * String switch time!
303 */
304 if (strncmp(pszName, "/GVMM/", sizeof("/GVMM/") - 1))
305 return VERR_CFGM_VALUE_NOT_FOUND; /* borrow status codes from CFGM... */
306 int rc = VINF_SUCCESS;
307 pszName += sizeof("/GVMM/") - 1;
308 if (!strcmp(pszName, "cVMsMeansCompany"))
309 {
310 if (u64Value <= UINT32_MAX)
311 pGVMM->cVMsMeansCompany = u64Value;
312 else
313 rc = VERR_OUT_OF_RANGE;
314 }
315 else if (!strcmp(pszName, "MinSleepAlone"))
316 {
317 if (u64Value <= 100000000)
318 pGVMM->nsMinSleepAlone = u64Value;
319 else
320 rc = VERR_OUT_OF_RANGE;
321 }
322 else if (!strcmp(pszName, "MinSleepCompany"))
323 {
324 if (u64Value <= 100000000)
325 pGVMM->nsMinSleepCompany = u64Value;
326 else
327 rc = VERR_OUT_OF_RANGE;
328 }
329 else if (!strcmp(pszName, "EarlyWakeUp1"))
330 {
331 if (u64Value <= 100000000)
332 pGVMM->nsEarlyWakeUp1 = u64Value;
333 else
334 rc = VERR_OUT_OF_RANGE;
335 }
336 else if (!strcmp(pszName, "EarlyWakeUp2"))
337 {
338 if (u64Value <= 100000000)
339 pGVMM->nsEarlyWakeUp2 = u64Value;
340 else
341 rc = VERR_OUT_OF_RANGE;
342 }
343 else
344 rc = VERR_CFGM_VALUE_NOT_FOUND;
345 return rc;
346}
347
348
349/**
350 * A quick hack for getting global config values.
351 *
352 * @returns VBox status code.
353 *
354 * @param pSession The session handle. Used for authentication.
355 * @param pszName The variable name.
356 * @param u64Value The new value.
357 */
358GVMMR0DECL(int) GVMMR0QueryConfig(PSUPDRVSESSION pSession, const char *pszName, uint64_t *pu64Value)
359{
360 /*
361 * Validate input.
362 */
363 PGVMM pGVMM;
364 GVMM_GET_VALID_INSTANCE(pGVMM, VERR_INTERNAL_ERROR);
365 AssertPtrReturn(pSession, VERR_INVALID_HANDLE);
366 AssertPtrReturn(pszName, VERR_INVALID_POINTER);
367 AssertPtrReturn(pu64Value, VERR_INVALID_POINTER);
368
369 /*
370 * String switch time!
371 */
372 if (strncmp(pszName, "/GVMM/", sizeof("/GVMM/") - 1))
373 return VERR_CFGM_VALUE_NOT_FOUND; /* borrow status codes from CFGM... */
374 int rc = VINF_SUCCESS;
375 pszName += sizeof("/GVMM/") - 1;
376 if (!strcmp(pszName, "cVMsMeansCompany"))
377 *pu64Value = pGVMM->cVMsMeansCompany;
378 else if (!strcmp(pszName, "MinSleepAlone"))
379 *pu64Value = pGVMM->nsMinSleepAlone;
380 else if (!strcmp(pszName, "MinSleepCompany"))
381 *pu64Value = pGVMM->nsMinSleepCompany;
382 else if (!strcmp(pszName, "EarlyWakeUp1"))
383 *pu64Value = pGVMM->nsEarlyWakeUp1;
384 else if (!strcmp(pszName, "EarlyWakeUp2"))
385 *pu64Value = pGVMM->nsEarlyWakeUp2;
386 else
387 rc = VERR_CFGM_VALUE_NOT_FOUND;
388 return rc;
389}
390
391
392/**
393 * Request wrapper for the GVMMR0CreateVM API.
394 *
395 * @returns VBox status code.
396 * @param pReq The request buffer.
397 */
398GVMMR0DECL(int) GVMMR0CreateVMReq(PGVMMCREATEVMREQ pReq)
399{
400 /*
401 * Validate the request.
402 */
403 if (!VALID_PTR(pReq))
404 return VERR_INVALID_POINTER;
405 if (pReq->Hdr.cbReq != sizeof(*pReq))
406 return VERR_INVALID_PARAMETER;
407 if (!VALID_PTR(pReq->pSession))
408 return VERR_INVALID_POINTER;
409
410 /*
411 * Execute it.
412 */
413 PVM pVM;
414 pReq->pVMR0 = NULL;
415 pReq->pVMR3 = NIL_RTR3PTR;
416 int rc = GVMMR0CreateVM(pReq->pSession, &pVM);
417 if (RT_SUCCESS(rc))
418 {
419 pReq->pVMR0 = pVM;
420 pReq->pVMR3 = pVM->pVMR3;
421 }
422 return rc;
423}
424
425
426/**
427 * Allocates the VM structure and registers it with GVM.
428 *
429 * @returns VBox status code.
430 * @param pSession The support driver session.
431 * @param ppVM Where to store the pointer to the VM structure.
432 *
433 * @thread Any thread.
434 */
435GVMMR0DECL(int) GVMMR0CreateVM(PSUPDRVSESSION pSession, PVM *ppVM)
436{
437 LogFlow(("GVMMR0CreateVM: pSession=%p\n", pSession));
438 PGVMM pGVMM;
439 GVMM_GET_VALID_INSTANCE(pGVMM, VERR_INTERNAL_ERROR);
440
441 AssertPtrReturn(ppVM, VERR_INVALID_POINTER);
442 *ppVM = NULL;
443
444 AssertReturn(RTThreadNativeSelf() != NIL_RTNATIVETHREAD, VERR_INTERNAL_ERROR);
445
446 /*
447 * The whole allocation process is protected by the lock.
448 */
449 int rc = RTSemFastMutexRequest(pGVMM->CreateDestroyLock);
450 AssertRCReturn(rc, rc);
451
452 /*
453 * Allocate a handle first so we don't waste resources unnecessarily.
454 */
455 uint16_t iHandle = pGVMM->iFreeHead;
456 if (iHandle)
457 {
458 PGVMHANDLE pHandle = &pGVMM->aHandles[iHandle];
459
460 /* consistency checks, a bit paranoid as always. */
461 if ( !pHandle->pVM
462 && !pHandle->pGVM
463 && !pHandle->pvObj
464 && pHandle->iSelf == iHandle)
465 {
466 pHandle->pvObj = SUPR0ObjRegister(pSession, SUPDRVOBJTYPE_VM, gvmmR0HandleObjDestructor, pGVMM, pHandle);
467 if (pHandle->pvObj)
468 {
469 /*
470 * Move the handle from the free to used list and perform permission checks.
471 */
472 rc = RTSemFastMutexRequest(pGVMM->UsedLock);
473 AssertRC(rc);
474
475 pGVMM->iFreeHead = pHandle->iNext;
476 pHandle->iNext = pGVMM->iUsedHead;
477 pGVMM->iUsedHead = iHandle;
478 pGVMM->cVMs++;
479
480 pHandle->pVM = NULL;
481 pHandle->pGVM = NULL;
482 pHandle->pSession = pSession;
483 pHandle->hEMT = NIL_RTNATIVETHREAD;
484
485 RTSemFastMutexRelease(pGVMM->UsedLock);
486
487 rc = SUPR0ObjVerifyAccess(pHandle->pvObj, pSession, NULL);
488 if (RT_SUCCESS(rc))
489 {
490 /*
491 * Allocate the global VM structure (GVM) and initialize it.
492 */
493 PGVM pGVM = (PGVM)RTMemAllocZ(sizeof(*pGVM));
494 if (pGVM)
495 {
496 pGVM->u32Magic = GVM_MAGIC;
497 pGVM->hSelf = iHandle;
498 pGVM->hEMT = NIL_RTNATIVETHREAD;
499 pGVM->pVM = NULL;
500
501 gvmmR0InitPerVMData(pGVM);
502 /* GMMR0InitPerVMData(pGVM); - later */
503
504 /*
505 * Allocate the shared VM structure and associated page array.
506 */
507 const size_t cPages = RT_ALIGN(sizeof(VM), PAGE_SIZE) >> PAGE_SHIFT;
508 rc = RTR0MemObjAllocLow(&pGVM->gvmm.s.VMMemObj, cPages << PAGE_SHIFT, false /* fExecutable */);
509 if (RT_SUCCESS(rc))
510 {
511 PVM pVM = (PVM)RTR0MemObjAddress(pGVM->gvmm.s.VMMemObj); AssertPtr(pVM);
512 memset(pVM, 0, cPages << PAGE_SHIFT);
513 pVM->enmVMState = VMSTATE_CREATING;
514 pVM->pVMR0 = pVM;
515 pVM->pSession = pSession;
516 pVM->hSelf = iHandle;
517
518 rc = RTR0MemObjAllocPage(&pGVM->gvmm.s.VMPagesMemObj, cPages * sizeof(SUPPAGE), false /* fExecutable */);
519 if (RT_SUCCESS(rc))
520 {
521 PSUPPAGE paPages = (PSUPPAGE)RTR0MemObjAddress(pGVM->gvmm.s.VMPagesMemObj); AssertPtr(paPages);
522 for (size_t iPage = 0; iPage < cPages; iPage++)
523 {
524 paPages[iPage].uReserved = 0;
525 paPages[iPage].Phys = RTR0MemObjGetPagePhysAddr(pGVM->gvmm.s.VMMemObj, iPage);
526 Assert(paPages[iPage].Phys != NIL_RTHCPHYS);
527 }
528
529 /*
530 * Map them into ring-3.
531 */
532 rc = RTR0MemObjMapUser(&pGVM->gvmm.s.VMMapObj, pGVM->gvmm.s.VMMemObj, (RTR3PTR)-1, 0,
533 RTMEM_PROT_READ | RTMEM_PROT_WRITE, NIL_RTR0PROCESS);
534 if (RT_SUCCESS(rc))
535 {
536 pVM->pVMR3 = RTR0MemObjAddressR3(pGVM->gvmm.s.VMMapObj);
537 AssertPtr((void *)pVM->pVMR3);
538
539 rc = RTR0MemObjMapUser(&pGVM->gvmm.s.VMPagesMapObj, pGVM->gvmm.s.VMPagesMemObj, (RTR3PTR)-1, 0,
540 RTMEM_PROT_READ | RTMEM_PROT_WRITE, NIL_RTR0PROCESS);
541 if (RT_SUCCESS(rc))
542 {
543 pVM->paVMPagesR3 = RTR0MemObjAddressR3(pGVM->gvmm.s.VMPagesMapObj);
544 AssertPtr((void *)pVM->paVMPagesR3);
545
546 /* complete the handle - take the UsedLock sem just to be careful. */
547 rc = RTSemFastMutexRequest(pGVMM->UsedLock);
548 AssertRC(rc);
549
550 pHandle->pVM = pVM;
551 pHandle->pGVM = pGVM;
552 pGVM->pVM = pVM;
553
554
555 RTSemFastMutexRelease(pGVMM->UsedLock);
556 RTSemFastMutexRelease(pGVMM->CreateDestroyLock);
557
558 *ppVM = pVM;
559 Log(("GVMMR0CreateVM: pVM=%p pVMR3=%p pGVM=%p hGVM=%d\n", pVM, pVM->pVMR3, pGVM, iHandle));
560 return VINF_SUCCESS;
561 }
562
563 RTR0MemObjFree(pGVM->gvmm.s.VMMapObj, false /* fFreeMappings */);
564 pGVM->gvmm.s.VMMapObj = NIL_RTR0MEMOBJ;
565 }
566 RTR0MemObjFree(pGVM->gvmm.s.VMPagesMemObj, false /* fFreeMappings */);
567 pGVM->gvmm.s.VMPagesMemObj = NIL_RTR0MEMOBJ;
568 }
569 RTR0MemObjFree(pGVM->gvmm.s.VMMemObj, false /* fFreeMappings */);
570 pGVM->gvmm.s.VMMemObj = NIL_RTR0MEMOBJ;
571 }
572 }
573 }
574 /* else: The user wasn't permitted to create this VM. */
575
576 /*
577 * The handle will be freed by gvmmR0HandleObjDestructor as we release the
578 * object reference here. A little extra mess because of non-recursive lock.
579 */
580 void *pvObj = pHandle->pvObj;
581 pHandle->pvObj = NULL;
582 RTSemFastMutexRelease(pGVMM->CreateDestroyLock);
583
584 SUPR0ObjRelease(pvObj, pSession);
585
586 SUPR0Printf("GVMMR0CreateVM: failed, rc=%d\n", rc);
587 return rc;
588 }
589
590 rc = VERR_NO_MEMORY;
591 }
592 else
593 rc = VERR_INTERNAL_ERROR;
594 }
595 else
596 rc = VERR_GVM_TOO_MANY_VMS;
597
598 RTSemFastMutexRelease(pGVMM->CreateDestroyLock);
599 return rc;
600}
601
602
603/**
604 * Initializes the per VM data belonging to GVMM.
605 *
606 * @param pGVM Pointer to the global VM structure.
607 */
608static void gvmmR0InitPerVMData(PGVM pGVM)
609{
610 AssertCompile(RT_SIZEOFMEMB(GVM,gvmm.s) <= RT_SIZEOFMEMB(GVM,gvmm.padding));
611 Assert(RT_SIZEOFMEMB(GVM,gvmm.s) <= RT_SIZEOFMEMB(GVM,gvmm.padding));
612 pGVM->gvmm.s.VMMemObj = NIL_RTR0MEMOBJ;
613 pGVM->gvmm.s.VMMapObj = NIL_RTR0MEMOBJ;
614 pGVM->gvmm.s.VMPagesMemObj = NIL_RTR0MEMOBJ;
615 pGVM->gvmm.s.VMPagesMapObj = NIL_RTR0MEMOBJ;
616 pGVM->gvmm.s.HaltEventMulti = NIL_RTSEMEVENTMULTI;
617}
618
619
620/**
621 * Associates an EMT thread with a VM.
622 *
623 * This is called early during the ring-0 VM initialization so assertions later in
624 * the process can be handled gracefully.
625 *
626 * @returns VBox status code.
627 *
628 * @param pVM The VM instance data (aka handle), ring-0 mapping of course.
629 * @thread EMT.
630 */
631GVMMR0DECL(int) GVMMR0AssociateEMTWithVM(PVM pVM)
632{
633 LogFlow(("GVMMR0AssociateEMTWithVM: pVM=%p\n", pVM));
634 PGVMM pGVMM;
635 GVMM_GET_VALID_INSTANCE(pGVMM, VERR_INTERNAL_ERROR);
636
637 /*
638 * Validate the VM structure, state and handle.
639 */
640 AssertPtrReturn(pVM, VERR_INVALID_POINTER);
641 AssertReturn(!((uintptr_t)pVM & PAGE_OFFSET_MASK), VERR_INVALID_POINTER);
642 AssertMsgReturn(pVM->enmVMState == VMSTATE_CREATING, ("%d\n", pVM->enmVMState), VERR_WRONG_ORDER);
643
644 RTNATIVETHREAD hEMT = RTThreadNativeSelf();
645 AssertReturn(hEMT != NIL_RTNATIVETHREAD, VERR_NOT_SUPPORTED);
646
647 const uint16_t hGVM = pVM->hSelf;
648 AssertReturn(hGVM != NIL_GVM_HANDLE, VERR_INVALID_HANDLE);
649 AssertReturn(hGVM < RT_ELEMENTS(pGVMM->aHandles), VERR_INVALID_HANDLE);
650
651 PGVMHANDLE pHandle = &pGVMM->aHandles[hGVM];
652 AssertReturn(pHandle->pVM == pVM, VERR_NOT_OWNER);
653
654 /*
655 * Take the lock, validate the handle and update the structure members.
656 */
657 int rc = RTSemFastMutexRequest(pGVMM->CreateDestroyLock);
658 AssertRCReturn(rc, rc);
659 rc = RTSemFastMutexRequest(pGVMM->UsedLock);
660 AssertRC(rc);
661
662 if ( pHandle->pVM == pVM
663 && VALID_PTR(pHandle->pvObj)
664 && VALID_PTR(pHandle->pSession)
665 && VALID_PTR(pHandle->pGVM)
666 && pHandle->pGVM->u32Magic == GVM_MAGIC)
667 {
668 pHandle->hEMT = hEMT;
669 pHandle->pGVM->hEMT = hEMT;
670 }
671 else
672 rc = VERR_INTERNAL_ERROR;
673
674 RTSemFastMutexRelease(pGVMM->UsedLock);
675 RTSemFastMutexRelease(pGVMM->CreateDestroyLock);
676 LogFlow(("GVMMR0AssociateEMTWithVM: returns %Vrc (hEMT=%RTnthrd)\n", rc, hEMT));
677 return rc;
678}
679
680
681/**
682 * Does the VM initialization.
683 *
684 * @returns VBox status code.
685 * @param pVM Pointer to the shared VM structure.
686 */
687GVMMR0DECL(int) GVMMR0InitVM(PVM pVM)
688{
689 LogFlow(("GVMMR0InitVM: pVM=%p\n", pVM));
690
691 /*
692 * Validate the VM structure, state and handle.
693 */
694 PGVM pGVM;
695 PGVMM pGVMM;
696 int rc = gvmmR0ByVMAndEMT(pVM, &pGVM, &pGVMM);
697 if (RT_SUCCESS(rc))
698 {
699 if (pGVM->gvmm.s.HaltEventMulti == NIL_RTSEMEVENTMULTI)
700 {
701 rc = RTSemEventMultiCreate(&pGVM->gvmm.s.HaltEventMulti);
702 if (RT_FAILURE(rc))
703 pGVM->gvmm.s.HaltEventMulti = NIL_RTSEMEVENTMULTI;
704 }
705 else
706 rc = VERR_WRONG_ORDER;
707 }
708
709 LogFlow(("GVMMR0InitVM: returns %Rrc\n", rc));
710 return rc;
711}
712
713
714/**
715 * Disassociates the EMT thread from a VM.
716 *
717 * This is called last in the ring-0 VM termination. After this point anyone is
718 * allowed to destroy the VM. Ideally, we should associate the VM with the thread
719 * that's going to call GVMMR0DestroyVM for optimal security, but that's impractical
720 * at present.
721 *
722 * @returns VBox status code.
723 *
724 * @param pVM The VM instance data (aka handle), ring-0 mapping of course.
725 * @thread EMT.
726 */
727GVMMR0DECL(int) GVMMR0DisassociateEMTFromVM(PVM pVM)
728{
729 LogFlow(("GVMMR0DisassociateEMTFromVM: pVM=%p\n", pVM));
730 PGVMM pGVMM;
731 GVMM_GET_VALID_INSTANCE(pGVMM, VERR_INTERNAL_ERROR);
732
733 /*
734 * Validate the VM structure, state and handle.
735 */
736 AssertPtrReturn(pVM, VERR_INVALID_POINTER);
737 AssertReturn(!((uintptr_t)pVM & PAGE_OFFSET_MASK), VERR_INVALID_POINTER);
738 AssertMsgReturn(pVM->enmVMState >= VMSTATE_CREATING && pVM->enmVMState <= VMSTATE_DESTROYING, ("%d\n", pVM->enmVMState), VERR_WRONG_ORDER);
739
740 RTNATIVETHREAD hEMT = RTThreadNativeSelf();
741 AssertReturn(hEMT != NIL_RTNATIVETHREAD, VERR_NOT_SUPPORTED);
742
743 const uint16_t hGVM = pVM->hSelf;
744 AssertReturn(hGVM != NIL_GVM_HANDLE, VERR_INVALID_HANDLE);
745 AssertReturn(hGVM < RT_ELEMENTS(pGVMM->aHandles), VERR_INVALID_HANDLE);
746
747 PGVMHANDLE pHandle = &pGVMM->aHandles[hGVM];
748 AssertReturn(pHandle->pVM == pVM, VERR_NOT_OWNER);
749
750 /*
751 * Take the lock, validate the handle and update the structure members.
752 */
753 int rc = RTSemFastMutexRequest(pGVMM->CreateDestroyLock);
754 AssertRCReturn(rc, rc);
755 rc = RTSemFastMutexRequest(pGVMM->UsedLock);
756 AssertRC(rc);
757
758 if ( VALID_PTR(pHandle->pvObj)
759 && VALID_PTR(pHandle->pSession)
760 && VALID_PTR(pHandle->pGVM)
761 && pHandle->pGVM->u32Magic == GVM_MAGIC)
762 {
763 if ( pHandle->pVM == pVM
764 && pHandle->hEMT == hEMT)
765 {
766 pHandle->hEMT = NIL_RTNATIVETHREAD;
767 pHandle->pGVM->hEMT = NIL_RTNATIVETHREAD;
768 }
769 else
770 rc = VERR_NOT_OWNER;
771 }
772 else
773 rc = VERR_INVALID_HANDLE;
774
775 RTSemFastMutexRelease(pGVMM->UsedLock);
776 RTSemFastMutexRelease(pGVMM->CreateDestroyLock);
777 LogFlow(("GVMMR0DisassociateEMTFromVM: returns %Vrc (hEMT=%RTnthrd)\n", rc, hEMT));
778 return rc;
779}
780
781
782/**
783 * Destroys the VM, freeing all associated resources (the ring-0 ones anyway).
784 *
785 * This is call from the vmR3DestroyFinalBit and from a error path in VMR3Create,
786 * and the caller is not the EMT thread, unfortunately. For security reasons, it
787 * would've been nice if the caller was actually the EMT thread or that we somehow
788 * could've associated the calling thread with the VM up front.
789 *
790 * @returns VBox status code.
791 * @param pVM Where to store the pointer to the VM structure.
792 *
793 * @thread EMT if it's associated with the VM, otherwise any thread.
794 */
795GVMMR0DECL(int) GVMMR0DestroyVM(PVM pVM)
796{
797 LogFlow(("GVMMR0DestroyVM: pVM=%p\n", pVM));
798 PGVMM pGVMM;
799 GVMM_GET_VALID_INSTANCE(pGVMM, VERR_INTERNAL_ERROR);
800
801
802 /*
803 * Validate the VM structure, state and caller.
804 */
805 AssertPtrReturn(pVM, VERR_INVALID_POINTER);
806 AssertReturn(!((uintptr_t)pVM & PAGE_OFFSET_MASK), VERR_INVALID_POINTER);
807 AssertMsgReturn(pVM->enmVMState >= VMSTATE_CREATING && pVM->enmVMState <= VMSTATE_TERMINATED, ("%d\n", pVM->enmVMState), VERR_WRONG_ORDER);
808
809 uint32_t hGVM = pVM->hSelf;
810 AssertReturn(hGVM != NIL_GVM_HANDLE, VERR_INVALID_HANDLE);
811 AssertReturn(hGVM < RT_ELEMENTS(pGVMM->aHandles), VERR_INVALID_HANDLE);
812
813 PGVMHANDLE pHandle = &pGVMM->aHandles[hGVM];
814 AssertReturn(pHandle->pVM == pVM, VERR_NOT_OWNER);
815
816 RTNATIVETHREAD hSelf = RTThreadNativeSelf();
817 AssertReturn(pHandle->hEMT == hSelf || pHandle->hEMT == NIL_RTNATIVETHREAD, VERR_NOT_OWNER);
818
819 /*
820 * Lookup the handle and destroy the object.
821 * Since the lock isn't recursive and we'll have to leave it before dereferencing the
822 * object, we take some precautions against racing callers just in case...
823 */
824 int rc = RTSemFastMutexRequest(pGVMM->CreateDestroyLock);
825 AssertRC(rc);
826
827 /* be careful here because we might theoretically be racing someone else cleaning up. */
828 if ( pHandle->pVM == pVM
829 && ( pHandle->hEMT == hSelf
830 || pHandle->hEMT == NIL_RTNATIVETHREAD)
831 && VALID_PTR(pHandle->pvObj)
832 && VALID_PTR(pHandle->pSession)
833 && VALID_PTR(pHandle->pGVM)
834 && pHandle->pGVM->u32Magic == GVM_MAGIC)
835 {
836 void *pvObj = pHandle->pvObj;
837 pHandle->pvObj = NULL;
838 RTSemFastMutexRelease(pGVMM->CreateDestroyLock);
839
840 SUPR0ObjRelease(pvObj, pHandle->pSession);
841 }
842 else
843 {
844 SUPR0Printf("GVMMR0DestroyVM: pHandle=%p:{.pVM=%p, hEMT=%p, .pvObj=%p} pVM=%p hSelf=%p\n",
845 pHandle, pHandle->pVM, pHandle->hEMT, pHandle->pvObj, pVM, hSelf);
846 RTSemFastMutexRelease(pGVMM->CreateDestroyLock);
847 rc = VERR_INTERNAL_ERROR;
848 }
849
850 return rc;
851}
852
853
854/**
855 * Handle destructor.
856 *
857 * @param pvGVMM The GVM instance pointer.
858 * @param pvHandle The handle pointer.
859 */
860static DECLCALLBACK(void) gvmmR0HandleObjDestructor(void *pvObj, void *pvGVMM, void *pvHandle)
861{
862 LogFlow(("gvmmR0HandleObjDestructor: %p %p %p\n", pvObj, pvGVMM, pvHandle));
863
864 /*
865 * Some quick, paranoid, input validation.
866 */
867 PGVMHANDLE pHandle = (PGVMHANDLE)pvHandle;
868 AssertPtr(pHandle);
869 PGVMM pGVMM = (PGVMM)pvGVMM;
870 Assert(pGVMM == g_pGVMM);
871 const uint16_t iHandle = pHandle - &pGVMM->aHandles[0];
872 if ( !iHandle
873 || iHandle >= RT_ELEMENTS(pGVMM->aHandles)
874 || iHandle != pHandle->iSelf)
875 {
876 SUPR0Printf("GVM: handle %d is out of range or corrupt (iSelf=%d)!\n", iHandle, pHandle->iSelf);
877 return;
878 }
879
880 int rc = RTSemFastMutexRequest(pGVMM->CreateDestroyLock);
881 AssertRC(rc);
882 rc = RTSemFastMutexRequest(pGVMM->UsedLock);
883 AssertRC(rc);
884
885 /*
886 * This is a tad slow but a doubly linked list is too much hazzle.
887 */
888 if (RT_UNLIKELY(pHandle->iNext >= RT_ELEMENTS(pGVMM->aHandles)))
889 {
890 SUPR0Printf("GVM: used list index %d is out of range!\n", pHandle->iNext);
891 RTSemFastMutexRelease(pGVMM->UsedLock);
892 RTSemFastMutexRelease(pGVMM->CreateDestroyLock);
893 return;
894 }
895
896 if (pGVMM->iUsedHead == iHandle)
897 pGVMM->iUsedHead = pHandle->iNext;
898 else
899 {
900 uint16_t iPrev = pGVMM->iUsedHead;
901 int c = RT_ELEMENTS(pGVMM->aHandles) + 2;
902 while (!iPrev)
903 {
904 if (RT_UNLIKELY(iPrev >= RT_ELEMENTS(pGVMM->aHandles)))
905 {
906 SUPR0Printf("GVM: used list index %d is out of range!\n");
907 RTSemFastMutexRelease(pGVMM->UsedLock);
908 RTSemFastMutexRelease(pGVMM->CreateDestroyLock);
909 return;
910 }
911 if (RT_UNLIKELY(c-- <= 0))
912 {
913 iPrev = 0;
914 break;
915 }
916
917 if (pGVMM->aHandles[iPrev].iNext == iHandle)
918 break;
919 iPrev = pGVMM->aHandles[iPrev].iNext;
920 }
921 if (!iPrev)
922 {
923 SUPR0Printf("GVM: can't find the handle previous previous of %d!\n", pHandle->iSelf);
924 RTSemFastMutexRelease(pGVMM->UsedLock);
925 RTSemFastMutexRelease(pGVMM->CreateDestroyLock);
926 return;
927 }
928
929 pGVMM->aHandles[iPrev].iNext = pHandle->iNext;
930 }
931 pHandle->iNext = 0;
932 pGVMM->cVMs--;
933
934 RTSemFastMutexRelease(pGVMM->UsedLock);
935
936 /*
937 * Do the global cleanup round.
938 */
939 PGVM pGVM = pHandle->pGVM;
940 if ( VALID_PTR(pGVM)
941 && pGVM->u32Magic == GVM_MAGIC)
942 {
943 /// @todo GMMR0CleanupVM(pGVM);
944
945 /*
946 * Do the GVMM cleanup - must be done last.
947 */
948 /* The VM and VM pages mappings/allocations. */
949 if (pGVM->gvmm.s.VMPagesMapObj != NIL_RTR0MEMOBJ)
950 {
951 rc = RTR0MemObjFree(pGVM->gvmm.s.VMPagesMapObj, false /* fFreeMappings */); AssertRC(rc);
952 pGVM->gvmm.s.VMPagesMapObj = NIL_RTR0MEMOBJ;
953 }
954
955 if (pGVM->gvmm.s.VMMapObj != NIL_RTR0MEMOBJ)
956 {
957 rc = RTR0MemObjFree(pGVM->gvmm.s.VMMapObj, false /* fFreeMappings */); AssertRC(rc);
958 pGVM->gvmm.s.VMMapObj = NIL_RTR0MEMOBJ;
959 }
960
961 if (pGVM->gvmm.s.VMPagesMemObj != NIL_RTR0MEMOBJ)
962 {
963 rc = RTR0MemObjFree(pGVM->gvmm.s.VMPagesMemObj, false /* fFreeMappings */); AssertRC(rc);
964 pGVM->gvmm.s.VMPagesMemObj = NIL_RTR0MEMOBJ;
965 }
966
967 if (pGVM->gvmm.s.VMMemObj != NIL_RTR0MEMOBJ)
968 {
969 rc = RTR0MemObjFree(pGVM->gvmm.s.VMMemObj, false /* fFreeMappings */); AssertRC(rc);
970 pGVM->gvmm.s.VMMemObj = NIL_RTR0MEMOBJ;
971 }
972
973 /* the GVM structure itself. */
974 pGVM->u32Magic++;
975 RTMemFree(pGVM);
976 }
977 /* else: GVMMR0CreateVM cleanup. */
978
979 /*
980 * Free the handle.
981 * Reacquire the UsedLock here to since we're updating handle fields.
982 */
983 rc = RTSemFastMutexRequest(pGVMM->UsedLock);
984 AssertRC(rc);
985
986 pHandle->iNext = pGVMM->iFreeHead;
987 pGVMM->iFreeHead = iHandle;
988 ASMAtomicXchgPtr((void * volatile *)&pHandle->pGVM, NULL);
989 ASMAtomicXchgPtr((void * volatile *)&pHandle->pVM, NULL);
990 ASMAtomicXchgPtr((void * volatile *)&pHandle->pvObj, NULL);
991 ASMAtomicXchgPtr((void * volatile *)&pHandle->pSession, NULL);
992 ASMAtomicXchgSize(&pHandle->hEMT, NIL_RTNATIVETHREAD);
993
994 RTSemFastMutexRelease(pGVMM->UsedLock);
995 RTSemFastMutexRelease(pGVMM->CreateDestroyLock);
996 LogFlow(("gvmmR0HandleObjDestructor: returns\n"));
997}
998
999
1000/**
1001 * Lookup a GVM structure by its handle.
1002 *
1003 * @returns The GVM pointer on success, NULL on failure.
1004 * @param hGVM The global VM handle. Asserts on bad handle.
1005 */
1006GVMMR0DECL(PGVM) GVMMR0ByHandle(uint32_t hGVM)
1007{
1008 PGVMM pGVMM;
1009 GVMM_GET_VALID_INSTANCE(pGVMM, NULL);
1010
1011 /*
1012 * Validate.
1013 */
1014 AssertReturn(hGVM != NIL_GVM_HANDLE, NULL);
1015 AssertReturn(hGVM < RT_ELEMENTS(pGVMM->aHandles), NULL);
1016
1017 /*
1018 * Look it up.
1019 */
1020 PGVMHANDLE pHandle = &pGVMM->aHandles[hGVM];
1021 AssertPtrReturn(pHandle->pVM, NULL);
1022 AssertPtrReturn(pHandle->pvObj, NULL);
1023 PGVM pGVM = pHandle->pGVM;
1024 AssertPtrReturn(pGVM, NULL);
1025 AssertReturn(pGVM->pVM == pHandle->pVM, NULL);
1026
1027 return pHandle->pGVM;
1028}
1029
1030
1031/**
1032 * Lookup a GVM structure by the shared VM structure.
1033 *
1034 * @returns VBox status code.
1035 * @param pVM The shared VM structure (the ring-0 mapping).
1036 * @param ppGVM Where to store the GVM pointer.
1037 * @param ppGVMM Where to store the pointer to the GVMM instance data.
1038 * @param fTakeUsedLock Whether to take the used lock or not.
1039 * Be very careful if not taking the lock as it's possible that
1040 * the VM will disappear then.
1041 *
1042 * @remark This will not assert on an invalid pVM but try return sliently.
1043 */
1044static int gvmmR0ByVM(PVM pVM, PGVM *ppGVM, PGVMM *ppGVMM, bool fTakeUsedLock)
1045{
1046 PGVMM pGVMM;
1047 GVMM_GET_VALID_INSTANCE(pGVMM, VERR_INTERNAL_ERROR);
1048
1049 /*
1050 * Validate.
1051 */
1052 if (RT_UNLIKELY( !VALID_PTR(pVM)
1053 || ((uintptr_t)pVM & PAGE_OFFSET_MASK)))
1054 return VERR_INVALID_POINTER;
1055 if (RT_UNLIKELY( pVM->enmVMState < VMSTATE_CREATING
1056 || pVM->enmVMState >= VMSTATE_TERMINATED))
1057 return VERR_INVALID_POINTER;
1058
1059 uint16_t hGVM = pVM->hSelf;
1060 if (RT_UNLIKELY( hGVM == NIL_GVM_HANDLE
1061 || hGVM >= RT_ELEMENTS(pGVMM->aHandles)))
1062 return VERR_INVALID_HANDLE;
1063
1064 /*
1065 * Look it up.
1066 */
1067 PGVMHANDLE pHandle = &pGVMM->aHandles[hGVM];
1068 PGVM pGVM;
1069 if (fTakeUsedLock)
1070 {
1071 int rc = RTSemFastMutexRequest(pGVMM->UsedLock);
1072 AssertRCReturn(rc, rc);
1073
1074 pGVM = pHandle->pGVM;
1075 if (RT_UNLIKELY( pHandle->pVM != pVM
1076 || !VALID_PTR(pHandle->pvObj)
1077 || !VALID_PTR(pGVM)
1078 || pGVM->pVM != pVM))
1079 {
1080 RTSemFastMutexRelease(pGVMM->UsedLock);
1081 return VERR_INVALID_HANDLE;
1082 }
1083 }
1084 else
1085 {
1086 if (RT_UNLIKELY(pHandle->pVM != pVM))
1087 return VERR_INVALID_HANDLE;
1088 if (RT_UNLIKELY(!VALID_PTR(pHandle->pvObj)))
1089 return VERR_INVALID_HANDLE;
1090
1091 pGVM = pHandle->pGVM;
1092 if (!RT_UNLIKELY(!VALID_PTR(pGVM)))
1093 return VERR_INVALID_HANDLE;
1094 if (!RT_UNLIKELY(pGVM->pVM != pVM))
1095 return VERR_INVALID_HANDLE;
1096 }
1097
1098 *ppGVM = pGVM;
1099 *ppGVMM = pGVMM;
1100 return VINF_SUCCESS;
1101}
1102
1103
1104/**
1105 * Lookup a GVM structure by the shared VM structure.
1106 *
1107 * @returns The GVM pointer on success, NULL on failure.
1108 * @param pVM The shared VM structure (the ring-0 mapping).
1109 */
1110GVMMR0DECL(PGVM) GVMMR0ByVM(PVM pVM)
1111{
1112 PGVMM pGVMM;
1113 PGVM pGVM;
1114 int rc = gvmmR0ByVM(pVM, &pGVM, &pGVMM, false /* fTakeUsedLock */);
1115 if (RT_SUCCESS(rc))
1116 return pGVM;
1117 AssertRC(rc);
1118 return NULL;
1119}
1120
1121
1122/**
1123 * Lookup a GVM structure by the shared VM structure
1124 * and ensuring that the caller is the EMT thread.
1125 *
1126 * @returns VBox status code.
1127 * @param pVM The shared VM structure (the ring-0 mapping).
1128 * @param ppGVM Where to store the GVM pointer.
1129 * @param ppGVMM Where to store the pointer to the GVMM instance data.
1130 * @thread EMT
1131 *
1132 * @remark This will assert in failure paths.
1133 */
1134static int gvmmR0ByVMAndEMT(PVM pVM, PGVM *ppGVM, PGVMM *ppGVMM)
1135{
1136 PGVMM pGVMM;
1137 GVMM_GET_VALID_INSTANCE(pGVMM, VERR_INTERNAL_ERROR);
1138
1139 /*
1140 * Validate.
1141 */
1142 AssertPtrReturn(pVM, VERR_INVALID_POINTER);
1143 AssertReturn(!((uintptr_t)pVM & PAGE_OFFSET_MASK), VERR_INVALID_POINTER);
1144
1145 uint16_t hGVM = pVM->hSelf;
1146 AssertReturn(hGVM != NIL_GVM_HANDLE, VERR_INVALID_HANDLE);
1147 AssertReturn(hGVM < RT_ELEMENTS(pGVMM->aHandles), VERR_INVALID_HANDLE);
1148
1149 /*
1150 * Look it up.
1151 */
1152 PGVMHANDLE pHandle = &pGVMM->aHandles[hGVM];
1153 RTNATIVETHREAD hAllegedEMT = RTThreadNativeSelf();
1154 AssertMsgReturn(pHandle->hEMT == hAllegedEMT, ("hEMT %x hAllegedEMT %x\n", pHandle->hEMT, hAllegedEMT), VERR_NOT_OWNER);
1155 AssertReturn(pHandle->pVM == pVM, VERR_NOT_OWNER);
1156 AssertPtrReturn(pHandle->pvObj, VERR_INTERNAL_ERROR);
1157
1158 PGVM pGVM = pHandle->pGVM;
1159 AssertPtrReturn(pGVM, VERR_INTERNAL_ERROR);
1160 AssertReturn(pGVM->pVM == pVM, VERR_INTERNAL_ERROR);
1161 AssertReturn(pGVM->hEMT == hAllegedEMT, VERR_INTERNAL_ERROR);
1162
1163 *ppGVM = pGVM;
1164 *ppGVMM = pGVMM;
1165 return VINF_SUCCESS;
1166}
1167
1168
1169/**
1170 * Lookup a GVM structure by the shared VM structure
1171 * and ensuring that the caller is the EMT thread.
1172 *
1173 * @returns VBox status code.
1174 * @param pVM The shared VM structure (the ring-0 mapping).
1175 * @param ppGVM Where to store the GVM pointer.
1176 * @thread EMT
1177 */
1178GVMMR0DECL(int) GVMMR0ByVMAndEMT(PVM pVM, PGVM *ppGVM)
1179{
1180 AssertPtrReturn(ppGVM, VERR_INVALID_POINTER);
1181 PGVMM pGVMM;
1182 return gvmmR0ByVMAndEMT(pVM, ppGVM, &pGVMM);
1183}
1184
1185
1186/**
1187 * Lookup a VM by its global handle.
1188 *
1189 * @returns The VM handle on success, NULL on failure.
1190 * @param hGVM The global VM handle. Asserts on bad handle.
1191 */
1192GVMMR0DECL(PVM) GVMMR0GetVMByHandle(uint32_t hGVM)
1193{
1194 PGVM pGVM = GVMMR0ByHandle(hGVM);
1195 return pGVM ? pGVM->pVM : NULL;
1196}
1197
1198
1199/**
1200 * Looks up the VM belonging to the specified EMT thread.
1201 *
1202 * This is used by the assertion machinery in VMMR0.cpp to avoid causing
1203 * unnecessary kernel panics when the EMT thread hits an assertion. The
1204 * call may or not be an EMT thread.
1205 *
1206 * @returns The VM handle on success, NULL on failure.
1207 * @param hEMT The native thread handle of the EMT.
1208 * NIL_RTNATIVETHREAD means the current thread
1209 */
1210GVMMR0DECL(PVM) GVMMR0GetVMByEMT(RTNATIVETHREAD hEMT)
1211{
1212 /*
1213 * No Assertions here as we're usually called in a AssertMsgN or
1214 * RTAssert* context.
1215 */
1216 PGVMM pGVMM = g_pGVMM;
1217 if ( !VALID_PTR(pGVMM)
1218 || pGVMM->u32Magic != GVMM_MAGIC)
1219 return NULL;
1220
1221 if (hEMT == NIL_RTNATIVETHREAD)
1222 hEMT = RTThreadNativeSelf();
1223
1224 /*
1225 * Search the handles in a linear fashion as we don't dare take the lock (assert).
1226 */
1227 for (unsigned i = 1; i < RT_ELEMENTS(pGVMM->aHandles); i++)
1228 if ( pGVMM->aHandles[i].hEMT == hEMT
1229 && pGVMM->aHandles[i].iSelf == i
1230 && VALID_PTR(pGVMM->aHandles[i].pvObj)
1231 && VALID_PTR(pGVMM->aHandles[i].pVM))
1232 return pGVMM->aHandles[i].pVM;
1233
1234 return NULL;
1235}
1236
1237
1238/**
1239 * This is will wake up expired and soon-to-be expired VMs.
1240 *
1241 * @returns Number of VMs that has been woken up.
1242 * @param pGVMM Pointer to the GVMM instance data.
1243 * @param u64Now The current time.
1244 */
1245static unsigned gvmmR0SchedDoWakeUps(PGVMM pGVMM, uint64_t u64Now)
1246{
1247 /*
1248 * The first pass will wake up VMs which has actually expired
1249 * and look for VMs that should be woken up in the 2nd and 3rd passes.
1250 */
1251 unsigned cWoken = 0;
1252 unsigned cHalted = 0;
1253 unsigned cTodo2nd = 0;
1254 unsigned cTodo3rd = 0;
1255 for (unsigned i = pGVMM->iUsedHead;
1256 i != NIL_GVM_HANDLE && i < RT_ELEMENTS(pGVMM->aHandles);
1257 i = pGVMM->aHandles[i].iNext)
1258 {
1259 PGVM pCurGVM = pGVMM->aHandles[i].pGVM;
1260 if ( VALID_PTR(pCurGVM)
1261 && pCurGVM->u32Magic == GVM_MAGIC)
1262 {
1263 uint64_t u64 = pCurGVM->gvmm.s.u64HaltExpire;
1264 if (u64)
1265 {
1266 if (u64 <= u64Now)
1267 {
1268 if (ASMAtomicXchgU64(&pCurGVM->gvmm.s.u64HaltExpire, 0))
1269 {
1270 int rc = RTSemEventMultiSignal(pCurGVM->gvmm.s.HaltEventMulti);
1271 AssertRC(rc);
1272 cWoken++;
1273 }
1274 }
1275 else
1276 {
1277 cHalted++;
1278 if (u64 <= u64Now + pGVMM->nsEarlyWakeUp1)
1279 cTodo2nd++;
1280 else if (u64 <= u64Now + pGVMM->nsEarlyWakeUp2)
1281 cTodo3rd++;
1282 }
1283 }
1284 }
1285 }
1286
1287 if (cTodo2nd)
1288 {
1289 for (unsigned i = pGVMM->iUsedHead;
1290 i != NIL_GVM_HANDLE && i < RT_ELEMENTS(pGVMM->aHandles);
1291 i = pGVMM->aHandles[i].iNext)
1292 {
1293 PGVM pCurGVM = pGVMM->aHandles[i].pGVM;
1294 if ( VALID_PTR(pCurGVM)
1295 && pCurGVM->u32Magic == GVM_MAGIC
1296 && pCurGVM->gvmm.s.u64HaltExpire
1297 && pCurGVM->gvmm.s.u64HaltExpire <= u64Now + pGVMM->nsEarlyWakeUp1)
1298 {
1299 if (ASMAtomicXchgU64(&pCurGVM->gvmm.s.u64HaltExpire, 0))
1300 {
1301 int rc = RTSemEventMultiSignal(pCurGVM->gvmm.s.HaltEventMulti);
1302 AssertRC(rc);
1303 cWoken++;
1304 }
1305 }
1306 }
1307 }
1308
1309 if (cTodo3rd)
1310 {
1311 for (unsigned i = pGVMM->iUsedHead;
1312 i != NIL_GVM_HANDLE && i < RT_ELEMENTS(pGVMM->aHandles);
1313 i = pGVMM->aHandles[i].iNext)
1314 {
1315 PGVM pCurGVM = pGVMM->aHandles[i].pGVM;
1316 if ( VALID_PTR(pCurGVM)
1317 && pCurGVM->u32Magic == GVM_MAGIC
1318 && pCurGVM->gvmm.s.u64HaltExpire
1319 && pCurGVM->gvmm.s.u64HaltExpire <= u64Now + pGVMM->nsEarlyWakeUp2)
1320 {
1321 if (ASMAtomicXchgU64(&pCurGVM->gvmm.s.u64HaltExpire, 0))
1322 {
1323 int rc = RTSemEventMultiSignal(pCurGVM->gvmm.s.HaltEventMulti);
1324 AssertRC(rc);
1325 cWoken++;
1326 }
1327 }
1328 }
1329 }
1330
1331 return cWoken;
1332}
1333
1334
1335/**
1336 * Halt the EMT thread.
1337 *
1338 * @returns VINF_SUCCESS normal wakeup (timeout or kicked by other thread).
1339 * VERR_INTERRUPTED if a signal was scheduled for the thread.
1340 * @param pVM Pointer to the shared VM structure.
1341 * @param u64ExpireGipTime The time for the sleep to expire expressed as GIP time.
1342 * @thread EMT.
1343 */
1344GVMMR0DECL(int) GVMMR0SchedHalt(PVM pVM, uint64_t u64ExpireGipTime)
1345{
1346 LogFlow(("GVMMR0DisassociateEMTFromVM: pVM=%p\n", pVM));
1347
1348 /*
1349 * Validate the VM structure, state and handle.
1350 */
1351 PGVMM pGVMM;
1352 PGVM pGVM;
1353 int rc = gvmmR0ByVMAndEMT(pVM, &pGVM, &pGVMM);
1354 if (RT_FAILURE(rc))
1355 return rc;
1356 pGVM->gvmm.s.StatsSched.cHaltCalls++;
1357
1358 Assert(!pGVM->gvmm.s.u64HaltExpire);
1359
1360 /*
1361 * Take the UsedList semaphore, get the current time
1362 * and check if anyone needs waking up.
1363 * Interrupts must NOT be disabled at this point because we ask for GIP time!
1364 */
1365 rc = RTSemFastMutexRequest(pGVMM->UsedLock);
1366 AssertRC(rc);
1367
1368 pGVM->gvmm.s.iCpuEmt = ASMGetApicId();
1369
1370 Assert(ASMGetFlags() & X86_EFL_IF);
1371 const uint64_t u64Now = RTTimeNanoTS(); /* (GIP time) */
1372 pGVM->gvmm.s.StatsSched.cHaltWakeUps += gvmmR0SchedDoWakeUps(pGVMM, u64Now);
1373
1374 /*
1375 * Go to sleep if we must...
1376 */
1377 if ( u64Now < u64ExpireGipTime
1378 && u64ExpireGipTime - u64Now > (pGVMM->cVMs > pGVMM->cVMsMeansCompany
1379 ? pGVMM->nsMinSleepCompany
1380 : pGVMM->nsMinSleepAlone))
1381 {
1382 pGVM->gvmm.s.StatsSched.cHaltBlocking++;
1383 ASMAtomicXchgU64(&pGVM->gvmm.s.u64HaltExpire, u64ExpireGipTime);
1384 RTSemFastMutexRelease(pGVMM->UsedLock);
1385
1386 uint32_t cMillies = (u64ExpireGipTime - u64Now) / 1000000;
1387 rc = RTSemEventMultiWaitNoResume(pGVM->gvmm.s.HaltEventMulti, cMillies ? cMillies : 1);
1388 ASMAtomicXchgU64(&pGVM->gvmm.s.u64HaltExpire, 0);
1389 if (rc == VERR_TIMEOUT)
1390 {
1391 pGVM->gvmm.s.StatsSched.cHaltTimeouts++;
1392 rc = VINF_SUCCESS;
1393 }
1394 }
1395 else
1396 {
1397 pGVM->gvmm.s.StatsSched.cHaltNotBlocking++;
1398 RTSemFastMutexRelease(pGVMM->UsedLock);
1399 }
1400
1401 /* Make sure false wake up calls (gvmmR0SchedDoWakeUps) cause us to spin. */
1402 RTSemEventMultiReset(pGVM->gvmm.s.HaltEventMulti);
1403
1404 return rc;
1405}
1406
1407
1408/**
1409 * Wakes up the halted EMT thread so it can service a pending request.
1410 *
1411 * @returns VINF_SUCCESS if not yielded.
1412 * VINF_GVM_NOT_BLOCKED if the EMT thread wasn't blocked.
1413 * @param pVM Pointer to the shared VM structure.
1414 * @thread Any but EMT.
1415 */
1416GVMMR0DECL(int) GVMMR0SchedWakeUp(PVM pVM)
1417{
1418 /*
1419 * Validate input and take the UsedLock.
1420 */
1421 PGVM pGVM;
1422 PGVMM pGVMM;
1423 int rc = gvmmR0ByVM(pVM, &pGVM, &pGVMM, true /* fTakeUsedLock */);
1424 if (RT_SUCCESS(rc))
1425 {
1426 pGVM->gvmm.s.StatsSched.cWakeUpCalls++;
1427
1428 /*
1429 * Signal the semaphore regardless of whether it's current blocked on it.
1430 *
1431 * The reason for this is that there is absolutely no way we can be 100%
1432 * certain that it isn't *about* go to go to sleep on it and just got
1433 * delayed a bit en route. So, we will always signal the semaphore when
1434 * the it is flagged as halted in the VMM.
1435 */
1436 if (pGVM->gvmm.s.u64HaltExpire)
1437 {
1438 rc = VINF_SUCCESS;
1439 ASMAtomicXchgU64(&pGVM->gvmm.s.u64HaltExpire, 0);
1440 }
1441 else
1442 {
1443 rc = VINF_GVM_NOT_BLOCKED;
1444 pGVM->gvmm.s.StatsSched.cWakeUpNotHalted++;
1445 }
1446
1447 int rc2 = RTSemEventMultiSignal(pGVM->gvmm.s.HaltEventMulti);
1448 AssertRC(rc2);
1449
1450 /*
1451 * While we're here, do a round of scheduling.
1452 */
1453 Assert(ASMGetFlags() & X86_EFL_IF);
1454 const uint64_t u64Now = RTTimeNanoTS(); /* (GIP time) */
1455 pGVM->gvmm.s.StatsSched.cWakeUpWakeUps += gvmmR0SchedDoWakeUps(pGVMM, u64Now);
1456
1457
1458 rc2 = RTSemFastMutexRelease(pGVMM->UsedLock);
1459 AssertRC(rc2);
1460 }
1461
1462 LogFlow(("GVMMR0SchedWakeUp: returns %Rrc\n", rc));
1463 return rc;
1464}
1465
1466
1467/**
1468 * Poll the schedule to see if someone else should get a chance to run.
1469 *
1470 * This is a bit hackish and will not work too well if the machine is
1471 * under heavy load from non-VM processes.
1472 *
1473 * @returns VINF_SUCCESS if not yielded.
1474 * VINF_GVM_YIELDED if an attempt to switch to a different VM task was made.
1475 * @param pVM Pointer to the shared VM structure.
1476 * @param u64ExpireGipTime The time for the sleep to expire expressed as GIP time.
1477 * @param fYield Whether to yield or not.
1478 * This is for when we're spinning in the halt loop.
1479 * @thread EMT.
1480 */
1481GVMMR0DECL(int) GVMMR0SchedPoll(PVM pVM, bool fYield)
1482{
1483 /*
1484 * Validate input.
1485 */
1486 PGVM pGVM;
1487 PGVMM pGVMM;
1488 int rc = gvmmR0ByVMAndEMT(pVM, &pGVM, &pGVMM);
1489 if (RT_SUCCESS(rc))
1490 {
1491 rc = RTSemFastMutexRequest(pGVMM->UsedLock);
1492 AssertRC(rc);
1493 pGVM->gvmm.s.StatsSched.cPollCalls++;
1494
1495 Assert(ASMGetFlags() & X86_EFL_IF);
1496 const uint64_t u64Now = RTTimeNanoTS(); /* (GIP time) */
1497
1498 if (!fYield)
1499 pGVM->gvmm.s.StatsSched.cPollWakeUps += gvmmR0SchedDoWakeUps(pGVMM, u64Now);
1500 else
1501 {
1502 /** @todo implement this... */
1503 rc = VERR_NOT_IMPLEMENTED;
1504 }
1505
1506 RTSemFastMutexRelease(pGVMM->UsedLock);
1507 }
1508
1509 LogFlow(("GVMMR0SchedWakeUp: returns %Rrc\n", rc));
1510 return rc;
1511}
1512
1513
1514
1515/**
1516 * Retrieves the GVMM statistics visible to the caller.
1517 *
1518 * @returns VBox status code.
1519 *
1520 * @param pStats Where to put the statistics.
1521 * @param pSession The current session.
1522 * @param pVM The VM to obtain statistics for. Optional.
1523 */
1524GVMMR0DECL(int) GVMMR0QueryStatistics(PGVMMSTATS pStats, PSUPDRVSESSION pSession, PVM pVM)
1525{
1526 LogFlow(("GVMMR0QueryStatistics: pStats=%p pSession=%p pVM=%p\n", pStats, pSession, pVM));
1527
1528 /*
1529 * Validate input.
1530 */
1531 AssertPtrReturn(pSession, VERR_INVALID_POINTER);
1532 AssertPtrReturn(pStats, VERR_INVALID_POINTER);
1533 pStats->cVMs = 0; /* (crash before taking the sem...) */
1534
1535 /*
1536 * Take the lock and get the VM statistics.
1537 */
1538 PGVMM pGVMM;
1539 if (pVM)
1540 {
1541 PGVM pGVM;
1542 int rc = gvmmR0ByVM(pVM, &pGVM, &pGVMM, true /*fTakeUsedLock*/);
1543 if (RT_FAILURE(rc))
1544 return rc;
1545 pStats->SchedVM = pGVM->gvmm.s.StatsSched;
1546 }
1547 else
1548 {
1549 GVMM_GET_VALID_INSTANCE(pGVMM, VERR_INTERNAL_ERROR);
1550 memset(&pStats->SchedVM, 0, sizeof(pStats->SchedVM));
1551
1552 int rc = RTSemFastMutexRequest(pGVMM->UsedLock);
1553 AssertRCReturn(rc, rc);
1554 }
1555
1556 /*
1557 * Enumerate the VMs and add the ones visibile to the statistics.
1558 */
1559 pStats->cVMs = 0;
1560 memset(&pStats->SchedSum, 0, sizeof(pStats->SchedSum));
1561
1562 for (unsigned i = pGVMM->iUsedHead;
1563 i != NIL_GVM_HANDLE && i < RT_ELEMENTS(pGVMM->aHandles);
1564 i = pGVMM->aHandles[i].iNext)
1565 {
1566 PGVM pGVM = pGVMM->aHandles[i].pGVM;
1567 void *pvObj = pGVMM->aHandles[i].pvObj;
1568 if ( VALID_PTR(pvObj)
1569 && VALID_PTR(pGVM)
1570 && pGVM->u32Magic == GVM_MAGIC
1571 && RT_SUCCESS(SUPR0ObjVerifyAccess(pvObj, pSession, NULL)))
1572 {
1573 pStats->cVMs++;
1574
1575 pStats->SchedSum.cHaltCalls += pGVM->gvmm.s.StatsSched.cHaltCalls;
1576 pStats->SchedSum.cHaltBlocking += pGVM->gvmm.s.StatsSched.cHaltBlocking;
1577 pStats->SchedSum.cHaltTimeouts += pGVM->gvmm.s.StatsSched.cHaltTimeouts;
1578 pStats->SchedSum.cHaltNotBlocking += pGVM->gvmm.s.StatsSched.cHaltNotBlocking;
1579 pStats->SchedSum.cHaltWakeUps += pGVM->gvmm.s.StatsSched.cHaltWakeUps;
1580
1581 pStats->SchedSum.cWakeUpCalls += pGVM->gvmm.s.StatsSched.cWakeUpCalls;
1582 pStats->SchedSum.cWakeUpNotHalted += pGVM->gvmm.s.StatsSched.cWakeUpNotHalted;
1583 pStats->SchedSum.cWakeUpWakeUps += pGVM->gvmm.s.StatsSched.cWakeUpWakeUps;
1584
1585 pStats->SchedSum.cPollCalls += pGVM->gvmm.s.StatsSched.cPollCalls;
1586 pStats->SchedSum.cPollHalts += pGVM->gvmm.s.StatsSched.cPollHalts;
1587 pStats->SchedSum.cPollWakeUps += pGVM->gvmm.s.StatsSched.cPollWakeUps;
1588 }
1589 }
1590
1591 RTSemFastMutexRelease(pGVMM->UsedLock);
1592
1593 return VINF_SUCCESS;
1594}
1595
1596
1597/**
1598 * VMMR0 request wrapper for GVMMR0QueryStatistics.
1599 *
1600 * @returns see GVMMR0QueryStatistics.
1601 * @param pVM Pointer to the shared VM structure. Optional.
1602 * @param pReq The request packet.
1603 */
1604GVMMR0DECL(int) GVMMR0QueryStatisticsReq(PVM pVM, PGVMMQUERYSTATISTICSSREQ pReq)
1605{
1606 /*
1607 * Validate input and pass it on.
1608 */
1609 AssertPtrReturn(pReq, VERR_INVALID_POINTER);
1610 AssertMsgReturn(pReq->Hdr.cbReq == sizeof(*pReq), ("%#x != %#x\n", pReq->Hdr.cbReq, sizeof(*pReq)), VERR_INVALID_PARAMETER);
1611
1612 return GVMMR0QueryStatistics(&pReq->Stats, pReq->pSession, pVM);
1613}
1614
1615
1616/**
1617 * Resets the specified GVMM statistics.
1618 *
1619 * @returns VBox status code.
1620 *
1621 * @param pStats Which statistics to reset, that is, non-zero fields indicates which to reset.
1622 * @param pSession The current session.
1623 * @param pVM The VM to reset statistics for. Optional.
1624 */
1625GVMMR0DECL(int) GVMMR0ResetStatistics(PCGVMMSTATS pStats, PSUPDRVSESSION pSession, PVM pVM)
1626{
1627 LogFlow(("GVMMR0ResetStatistics: pStats=%p pSession=%p pVM=%p\n", pStats, pSession, pVM));
1628
1629 /*
1630 * Validate input.
1631 */
1632 AssertPtrReturn(pSession, VERR_INVALID_POINTER);
1633 AssertPtrReturn(pStats, VERR_INVALID_POINTER);
1634
1635 /*
1636 * Take the lock and get the VM statistics.
1637 */
1638 PGVMM pGVMM;
1639 if (pVM)
1640 {
1641 PGVM pGVM;
1642 int rc = gvmmR0ByVM(pVM, &pGVM, &pGVMM, true /*fTakeUsedLock*/);
1643 if (RT_FAILURE(rc))
1644 return rc;
1645# define MAYBE_RESET_FIELD(field) \
1646 do { if (pStats->SchedVM. field ) { pGVM->gvmm.s.StatsSched. field = 0; } } while (0)
1647 MAYBE_RESET_FIELD(cHaltCalls);
1648 MAYBE_RESET_FIELD(cHaltBlocking);
1649 MAYBE_RESET_FIELD(cHaltTimeouts);
1650 MAYBE_RESET_FIELD(cHaltNotBlocking);
1651 MAYBE_RESET_FIELD(cHaltWakeUps);
1652 MAYBE_RESET_FIELD(cWakeUpCalls);
1653 MAYBE_RESET_FIELD(cWakeUpNotHalted);
1654 MAYBE_RESET_FIELD(cWakeUpWakeUps);
1655 MAYBE_RESET_FIELD(cPollCalls);
1656 MAYBE_RESET_FIELD(cPollHalts);
1657 MAYBE_RESET_FIELD(cPollWakeUps);
1658# undef MAYBE_RESET_FIELD
1659 }
1660 else
1661 {
1662 GVMM_GET_VALID_INSTANCE(pGVMM, VERR_INTERNAL_ERROR);
1663
1664 int rc = RTSemFastMutexRequest(pGVMM->UsedLock);
1665 AssertRCReturn(rc, rc);
1666 }
1667
1668 /*
1669 * Enumerate the VMs and add the ones visibile to the statistics.
1670 */
1671 if (ASMMemIsAll8(&pStats->SchedSum, sizeof(pStats->SchedSum), 0))
1672 {
1673 for (unsigned i = pGVMM->iUsedHead;
1674 i != NIL_GVM_HANDLE && i < RT_ELEMENTS(pGVMM->aHandles);
1675 i = pGVMM->aHandles[i].iNext)
1676 {
1677 PGVM pGVM = pGVMM->aHandles[i].pGVM;
1678 void *pvObj = pGVMM->aHandles[i].pvObj;
1679 if ( VALID_PTR(pvObj)
1680 && VALID_PTR(pGVM)
1681 && pGVM->u32Magic == GVM_MAGIC
1682 && RT_SUCCESS(SUPR0ObjVerifyAccess(pvObj, pSession, NULL)))
1683 {
1684# define MAYBE_RESET_FIELD(field) \
1685 do { if (pStats->SchedSum. field ) { pGVM->gvmm.s.StatsSched. field = 0; } } while (0)
1686 MAYBE_RESET_FIELD(cHaltCalls);
1687 MAYBE_RESET_FIELD(cHaltBlocking);
1688 MAYBE_RESET_FIELD(cHaltTimeouts);
1689 MAYBE_RESET_FIELD(cHaltNotBlocking);
1690 MAYBE_RESET_FIELD(cHaltWakeUps);
1691 MAYBE_RESET_FIELD(cWakeUpCalls);
1692 MAYBE_RESET_FIELD(cWakeUpNotHalted);
1693 MAYBE_RESET_FIELD(cWakeUpWakeUps);
1694 MAYBE_RESET_FIELD(cPollCalls);
1695 MAYBE_RESET_FIELD(cPollHalts);
1696 MAYBE_RESET_FIELD(cPollWakeUps);
1697# undef MAYBE_RESET_FIELD
1698 }
1699 }
1700 }
1701
1702 RTSemFastMutexRelease(pGVMM->UsedLock);
1703
1704 return VINF_SUCCESS;
1705}
1706
1707
1708/**
1709 * VMMR0 request wrapper for GVMMR0ResetStatistics.
1710 *
1711 * @returns see GVMMR0ResetStatistics.
1712 * @param pVM Pointer to the shared VM structure. Optional.
1713 * @param pReq The request packet.
1714 */
1715GVMMR0DECL(int) GVMMR0ResetStatisticsReq(PVM pVM, PGVMMRESETSTATISTICSSREQ pReq)
1716{
1717 /*
1718 * Validate input and pass it on.
1719 */
1720 AssertPtrReturn(pReq, VERR_INVALID_POINTER);
1721 AssertMsgReturn(pReq->Hdr.cbReq == sizeof(*pReq), ("%#x != %#x\n", pReq->Hdr.cbReq, sizeof(*pReq)), VERR_INVALID_PARAMETER);
1722
1723 return GVMMR0ResetStatistics(&pReq->Stats, pReq->pSession, pVM);
1724}
1725
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette