VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/GVMMR0.cpp@ 15741

Last change on this file since 15741 was 14821, checked in by vboxsync, 16 years ago

GVMMR0.cpp: cleanup bugfix.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 56.9 KB
Line 
1/* $Id: GVMMR0.cpp 14821 2008-11-30 01:08:47Z vboxsync $ */
2/** @file
3 * GVMM - Global VM Manager.
4 */
5
6/*
7 * Copyright (C) 2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/** @page pg_gvmm GVMM - The Global VM Manager
24 *
25 * The Global VM Manager lives in ring-0. It's main function at the moment
26 * is to manage a list of all running VMs, keep a ring-0 only structure (GVM)
27 * for each of them, and assign them unique identifiers (so GMM can track
28 * page owners). The idea for the future is to add an idle priority kernel
29 * thread that can take care of tasks like page sharing.
30 *
31 * The GVMM will create a ring-0 object for each VM when it's registered,
32 * this is both for session cleanup purposes and for having a point where
33 * it's possible to implement usage polices later (in SUPR0ObjRegister).
34 */
35
36
37/*******************************************************************************
38* Header Files *
39*******************************************************************************/
40#define LOG_GROUP LOG_GROUP_GVMM
41#include <VBox/gvmm.h>
42#include "GVMMR0Internal.h"
43#include <VBox/gvm.h>
44#include <VBox/vm.h>
45#include <VBox/vmm.h>
46#include <VBox/err.h>
47#include <iprt/alloc.h>
48#include <iprt/semaphore.h>
49#include <iprt/time.h>
50#include <VBox/log.h>
51#include <iprt/thread.h>
52#include <iprt/param.h>
53#include <iprt/string.h>
54#include <iprt/assert.h>
55#include <iprt/mem.h>
56#include <iprt/memobj.h>
57
58
59/*******************************************************************************
60* Structures and Typedefs *
61*******************************************************************************/
62
63/**
64 * Global VM handle.
65 */
66typedef struct GVMHANDLE
67{
68 /** The index of the next handle in the list (free or used). (0 is nil.) */
69 uint16_t volatile iNext;
70 /** Our own index / handle value. */
71 uint16_t iSelf;
72 /** The pointer to the ring-0 only (aka global) VM structure. */
73 PGVM pGVM;
74 /** The ring-0 mapping of the shared VM instance data. */
75 PVM pVM;
76 /** The virtual machine object. */
77 void *pvObj;
78 /** The session this VM is associated with. */
79 PSUPDRVSESSION pSession;
80 /** The ring-0 handle of the EMT thread.
81 * This is used for assertions and similar cases where we need to find the VM handle. */
82 RTNATIVETHREAD hEMT;
83} GVMHANDLE;
84/** Pointer to a global VM handle. */
85typedef GVMHANDLE *PGVMHANDLE;
86
87/**
88 * The GVMM instance data.
89 */
90typedef struct GVMM
91{
92 /** Eyecatcher / magic. */
93 uint32_t u32Magic;
94 /** The index of the head of the free handle chain. (0 is nil.) */
95 uint16_t volatile iFreeHead;
96 /** The index of the head of the active handle chain. (0 is nil.) */
97 uint16_t volatile iUsedHead;
98 /** The number of VMs. */
99 uint16_t volatile cVMs;
100// /** The number of halted EMT threads. */
101// uint16_t volatile cHaltedEMTs;
102 /** The lock used to serialize VM creation, destruction and associated events that
103 * isn't performance critical. Owners may acquire the list lock. */
104 RTSEMFASTMUTEX CreateDestroyLock;
105 /** The lock used to serialize used list updates and accesses.
106 * This indirectly includes scheduling since the scheduler will have to walk the
107 * used list to examin running VMs. Owners may not acquire any other locks. */
108 RTSEMFASTMUTEX UsedLock;
109 /** The handle array.
110 * The size of this array defines the maximum number of currently running VMs.
111 * The first entry is unused as it represents the NIL handle. */
112 GVMHANDLE aHandles[128];
113
114 /** @gcfgm{/GVMM/cVMsMeansCompany, 32-bit, 0, UINT32_MAX, 1}
115 * The number of VMs that means we no longer consider ourselves alone on a CPU/Core.
116 */
117 uint32_t cVMsMeansCompany;
118 /** @gcfgm{/GVMM/MinSleepAlone,32-bit, 0, 100000000, 750000, ns}
119 * The minimum sleep time for when we're alone, in nano seconds.
120 */
121 uint32_t nsMinSleepAlone;
122 /** @gcfgm{/GVMM/MinSleepCompany,32-bit,0, 100000000, 15000, ns}
123 * The minimum sleep time for when we've got company, in nano seconds.
124 */
125 uint32_t nsMinSleepCompany;
126 /** @gcfgm{/GVMM/EarlyWakeUp1, 32-bit, 0, 100000000, 25000, ns}
127 * The limit for the first round of early wakeups, given in nano seconds.
128 */
129 uint32_t nsEarlyWakeUp1;
130 /** @gcfgm{/GVMM/EarlyWakeUp2, 32-bit, 0, 100000000, 50000, ns}
131 * The limit for the second round of early wakeups, given in nano seconds.
132 */
133 uint32_t nsEarlyWakeUp2;
134} GVMM;
135/** Pointer to the GVMM instance data. */
136typedef GVMM *PGVMM;
137
138/** The GVMM::u32Magic value (Charlie Haden). */
139#define GVMM_MAGIC 0x19370806
140
141
142
143/*******************************************************************************
144* Global Variables *
145*******************************************************************************/
146/** Pointer to the GVMM instance data.
147 * (Just my general dislike for global variables.) */
148static PGVMM g_pGVMM = NULL;
149
150/** Macro for obtaining and validating the g_pGVMM pointer.
151 * On failure it will return from the invoking function with the specified return value.
152 *
153 * @param pGVMM The name of the pGVMM variable.
154 * @param rc The return value on failure. Use VERR_INTERNAL_ERROR for
155 * VBox status codes.
156 */
157#define GVMM_GET_VALID_INSTANCE(pGVMM, rc) \
158 do { \
159 (pGVMM) = g_pGVMM;\
160 AssertPtrReturn((pGVMM), (rc)); \
161 AssertMsgReturn((pGVMM)->u32Magic == GVMM_MAGIC, ("%p - %#x\n", (pGVMM), (pGVMM)->u32Magic), (rc)); \
162 } while (0)
163
164/** Macro for obtaining and validating the g_pGVMM pointer, void function variant.
165 * On failure it will return from the invoking function.
166 *
167 * @param pGVMM The name of the pGVMM variable.
168 */
169#define GVMM_GET_VALID_INSTANCE_VOID(pGVMM) \
170 do { \
171 (pGVMM) = g_pGVMM;\
172 AssertPtrReturnVoid((pGVMM)); \
173 AssertMsgReturnVoid((pGVMM)->u32Magic == GVMM_MAGIC, ("%p - %#x\n", (pGVMM), (pGVMM)->u32Magic)); \
174 } while (0)
175
176
177/*******************************************************************************
178* Internal Functions *
179*******************************************************************************/
180static void gvmmR0InitPerVMData(PGVM pGVM);
181static DECLCALLBACK(void) gvmmR0HandleObjDestructor(void *pvObj, void *pvGVMM, void *pvHandle);
182static int gvmmR0ByVM(PVM pVM, PGVM *ppGVM, PGVMM *ppGVMM, bool fTakeUsedLock);
183static int gvmmR0ByVMAndEMT(PVM pVM, PGVM *ppGVM, PGVMM *ppGVMM);
184
185
186/**
187 * Initializes the GVMM.
188 *
189 * This is called while owninng the loader sempahore (see supdrvIOCtl_LdrLoad()).
190 *
191 * @returns VBox status code.
192 */
193GVMMR0DECL(int) GVMMR0Init(void)
194{
195 LogFlow(("GVMMR0Init:\n"));
196
197 /*
198 * Allocate and initialize the instance data.
199 */
200 PGVMM pGVMM = (PGVMM)RTMemAllocZ(sizeof(*pGVMM));
201 if (!pGVMM)
202 return VERR_NO_MEMORY;
203 int rc = RTSemFastMutexCreate(&pGVMM->CreateDestroyLock);
204 if (RT_SUCCESS(rc))
205 {
206 rc = RTSemFastMutexCreate(&pGVMM->UsedLock);
207 if (RT_SUCCESS(rc))
208 {
209 pGVMM->u32Magic = GVMM_MAGIC;
210 pGVMM->iUsedHead = 0;
211 pGVMM->iFreeHead = 1;
212
213 /* the nil handle */
214 pGVMM->aHandles[0].iSelf = 0;
215 pGVMM->aHandles[0].iNext = 0;
216
217 /* the tail */
218 unsigned i = RT_ELEMENTS(pGVMM->aHandles) - 1;
219 pGVMM->aHandles[i].iSelf = i;
220 pGVMM->aHandles[i].iNext = 0; /* nil */
221
222 /* the rest */
223 while (i-- > 1)
224 {
225 pGVMM->aHandles[i].iSelf = i;
226 pGVMM->aHandles[i].iNext = i + 1;
227 }
228
229 /* The default configuration values. */
230 pGVMM->cVMsMeansCompany = 1; /** @todo should be adjusted to relative to the cpu count or something... */
231 pGVMM->nsMinSleepAlone = 750000 /* ns (0.750 ms) */; /** @todo this should be adjusted to be 75% (or something) of the scheduler granularity... */
232 pGVMM->nsMinSleepCompany = 15000 /* ns (0.015 ms) */;
233 pGVMM->nsEarlyWakeUp1 = 25000 /* ns (0.025 ms) */;
234 pGVMM->nsEarlyWakeUp2 = 50000 /* ns (0.050 ms) */;
235
236 g_pGVMM = pGVMM;
237 LogFlow(("GVMMR0Init: pGVMM=%p\n", pGVMM));
238 return VINF_SUCCESS;
239 }
240
241 RTSemFastMutexDestroy(pGVMM->CreateDestroyLock);
242 }
243
244 RTMemFree(pGVMM);
245 return rc;
246}
247
248
249/**
250 * Terminates the GVM.
251 *
252 * This is called while owning the loader semaphore (see supdrvLdrFree()).
253 * And unless something is wrong, there should be absolutely no VMs
254 * registered at this point.
255 */
256GVMMR0DECL(void) GVMMR0Term(void)
257{
258 LogFlow(("GVMMR0Term:\n"));
259
260 PGVMM pGVMM = g_pGVMM;
261 g_pGVMM = NULL;
262 if (RT_UNLIKELY(!VALID_PTR(pGVMM)))
263 {
264 SUPR0Printf("GVMMR0Term: pGVMM=%p\n", pGVMM);
265 return;
266 }
267
268 pGVMM->u32Magic++;
269
270 RTSemFastMutexDestroy(pGVMM->UsedLock);
271 pGVMM->UsedLock = NIL_RTSEMFASTMUTEX;
272 RTSemFastMutexDestroy(pGVMM->CreateDestroyLock);
273 pGVMM->CreateDestroyLock = NIL_RTSEMFASTMUTEX;
274
275 pGVMM->iFreeHead = 0;
276 if (pGVMM->iUsedHead)
277 {
278 SUPR0Printf("GVMMR0Term: iUsedHead=%#x! (cVMs=%#x)\n", pGVMM->iUsedHead, pGVMM->cVMs);
279 pGVMM->iUsedHead = 0;
280 }
281
282 RTMemFree(pGVMM);
283}
284
285
286/**
287 * A quick hack for setting global config values.
288 *
289 * @returns VBox status code.
290 *
291 * @param pSession The session handle. Used for authentication.
292 * @param pszName The variable name.
293 * @param u64Value The new value.
294 */
295GVMMR0DECL(int) GVMMR0SetConfig(PSUPDRVSESSION pSession, const char *pszName, uint64_t u64Value)
296{
297 /*
298 * Validate input.
299 */
300 PGVMM pGVMM;
301 GVMM_GET_VALID_INSTANCE(pGVMM, VERR_INTERNAL_ERROR);
302 AssertPtrReturn(pSession, VERR_INVALID_HANDLE);
303 AssertPtrReturn(pszName, VERR_INVALID_POINTER);
304
305 /*
306 * String switch time!
307 */
308 if (strncmp(pszName, "/GVMM/", sizeof("/GVMM/") - 1))
309 return VERR_CFGM_VALUE_NOT_FOUND; /* borrow status codes from CFGM... */
310 int rc = VINF_SUCCESS;
311 pszName += sizeof("/GVMM/") - 1;
312 if (!strcmp(pszName, "cVMsMeansCompany"))
313 {
314 if (u64Value <= UINT32_MAX)
315 pGVMM->cVMsMeansCompany = u64Value;
316 else
317 rc = VERR_OUT_OF_RANGE;
318 }
319 else if (!strcmp(pszName, "MinSleepAlone"))
320 {
321 if (u64Value <= 100000000)
322 pGVMM->nsMinSleepAlone = u64Value;
323 else
324 rc = VERR_OUT_OF_RANGE;
325 }
326 else if (!strcmp(pszName, "MinSleepCompany"))
327 {
328 if (u64Value <= 100000000)
329 pGVMM->nsMinSleepCompany = u64Value;
330 else
331 rc = VERR_OUT_OF_RANGE;
332 }
333 else if (!strcmp(pszName, "EarlyWakeUp1"))
334 {
335 if (u64Value <= 100000000)
336 pGVMM->nsEarlyWakeUp1 = u64Value;
337 else
338 rc = VERR_OUT_OF_RANGE;
339 }
340 else if (!strcmp(pszName, "EarlyWakeUp2"))
341 {
342 if (u64Value <= 100000000)
343 pGVMM->nsEarlyWakeUp2 = u64Value;
344 else
345 rc = VERR_OUT_OF_RANGE;
346 }
347 else
348 rc = VERR_CFGM_VALUE_NOT_FOUND;
349 return rc;
350}
351
352
353/**
354 * A quick hack for getting global config values.
355 *
356 * @returns VBox status code.
357 *
358 * @param pSession The session handle. Used for authentication.
359 * @param pszName The variable name.
360 * @param u64Value The new value.
361 */
362GVMMR0DECL(int) GVMMR0QueryConfig(PSUPDRVSESSION pSession, const char *pszName, uint64_t *pu64Value)
363{
364 /*
365 * Validate input.
366 */
367 PGVMM pGVMM;
368 GVMM_GET_VALID_INSTANCE(pGVMM, VERR_INTERNAL_ERROR);
369 AssertPtrReturn(pSession, VERR_INVALID_HANDLE);
370 AssertPtrReturn(pszName, VERR_INVALID_POINTER);
371 AssertPtrReturn(pu64Value, VERR_INVALID_POINTER);
372
373 /*
374 * String switch time!
375 */
376 if (strncmp(pszName, "/GVMM/", sizeof("/GVMM/") - 1))
377 return VERR_CFGM_VALUE_NOT_FOUND; /* borrow status codes from CFGM... */
378 int rc = VINF_SUCCESS;
379 pszName += sizeof("/GVMM/") - 1;
380 if (!strcmp(pszName, "cVMsMeansCompany"))
381 *pu64Value = pGVMM->cVMsMeansCompany;
382 else if (!strcmp(pszName, "MinSleepAlone"))
383 *pu64Value = pGVMM->nsMinSleepAlone;
384 else if (!strcmp(pszName, "MinSleepCompany"))
385 *pu64Value = pGVMM->nsMinSleepCompany;
386 else if (!strcmp(pszName, "EarlyWakeUp1"))
387 *pu64Value = pGVMM->nsEarlyWakeUp1;
388 else if (!strcmp(pszName, "EarlyWakeUp2"))
389 *pu64Value = pGVMM->nsEarlyWakeUp2;
390 else
391 rc = VERR_CFGM_VALUE_NOT_FOUND;
392 return rc;
393}
394
395
396/**
397 * Try acquire the 'used' lock.
398 *
399 * @returns IPRT status code, see RTSemFastMutexRequest.
400 * @param pGVMM The GVMM instance data.
401 */
402DECLINLINE(int) gvmmR0UsedLock(PGVMM pGVMM)
403{
404 LogFlow(("++gvmmR0UsedLock(%p)\n", pGVMM));
405 int rc = RTSemFastMutexRequest(pGVMM->UsedLock);
406 LogFlow(("gvmmR0UsedLock(%p)->%Rrc\n", pGVMM, rc));
407 return rc;
408}
409
410
411/**
412 * Release the 'used' lock.
413 *
414 * @returns IPRT status code, see RTSemFastMutexRelease.
415 * @param pGVMM The GVMM instance data.
416 */
417DECLINLINE(int) gvmmR0UsedUnlock(PGVMM pGVMM)
418{
419 LogFlow(("--gvmmR0UsedUnlock(%p)\n", pGVMM));
420 int rc = RTSemFastMutexRelease(pGVMM->UsedLock);
421 AssertRC(rc);
422 return rc;
423}
424
425
426/**
427 * Try acquire the 'create & destroy' lock.
428 *
429 * @returns IPRT status code, see RTSemFastMutexRequest.
430 * @param pGVMM The GVMM instance data.
431 */
432DECLINLINE(int) gvmmR0CreateDestroyLock(PGVMM pGVMM)
433{
434 LogFlow(("++gvmmR0CreateDestroyLock(%p)\n", pGVMM));
435 int rc = RTSemFastMutexRequest(pGVMM->CreateDestroyLock);
436 LogFlow(("gvmmR0CreateDestroyLock(%p)->%Rrc\n", pGVMM, rc));
437 return rc;
438}
439
440
441/**
442 * Release the 'create & destroy' lock.
443 *
444 * @returns IPRT status code, see RTSemFastMutexRequest.
445 * @param pGVMM The GVMM instance data.
446 */
447DECLINLINE(int) gvmmR0CreateDestroyUnlock(PGVMM pGVMM)
448{
449 LogFlow(("--gvmmR0CreateDestroyUnlock(%p)\n", pGVMM));
450 int rc = RTSemFastMutexRelease(pGVMM->CreateDestroyLock);
451 AssertRC(rc);
452 return rc;
453}
454
455
456/**
457 * Request wrapper for the GVMMR0CreateVM API.
458 *
459 * @returns VBox status code.
460 * @param pReq The request buffer.
461 */
462GVMMR0DECL(int) GVMMR0CreateVMReq(PGVMMCREATEVMREQ pReq)
463{
464 /*
465 * Validate the request.
466 */
467 if (!VALID_PTR(pReq))
468 return VERR_INVALID_POINTER;
469 if (pReq->Hdr.cbReq != sizeof(*pReq))
470 return VERR_INVALID_PARAMETER;
471 if (!VALID_PTR(pReq->pSession))
472 return VERR_INVALID_POINTER;
473
474 /*
475 * Execute it.
476 */
477 PVM pVM;
478 pReq->pVMR0 = NULL;
479 pReq->pVMR3 = NIL_RTR3PTR;
480 int rc = GVMMR0CreateVM(pReq->pSession, pReq->cCPUs, &pVM);
481 if (RT_SUCCESS(rc))
482 {
483 pReq->pVMR0 = pVM;
484 pReq->pVMR3 = pVM->pVMR3;
485 }
486 return rc;
487}
488
489
490/**
491 * Allocates the VM structure and registers it with GVM.
492 *
493 * The caller will become the VM owner and there by the EMT.
494 *
495 * @returns VBox status code.
496 * @param pSession The support driver session.
497 * @param cCPUs Number of virtual CPUs for the new VM.
498 * @param ppVM Where to store the pointer to the VM structure.
499 *
500 * @thread EMT.
501 */
502GVMMR0DECL(int) GVMMR0CreateVM(PSUPDRVSESSION pSession, uint32_t cCPUs, PVM *ppVM)
503{
504 LogFlow(("GVMMR0CreateVM: pSession=%p\n", pSession));
505 PGVMM pGVMM;
506 GVMM_GET_VALID_INSTANCE(pGVMM, VERR_INTERNAL_ERROR);
507
508 AssertPtrReturn(ppVM, VERR_INVALID_POINTER);
509 *ppVM = NULL;
510
511 if ( cCPUs == 0
512 || cCPUs > VMCPU_MAX_CPU_COUNT)
513 return VERR_INVALID_PARAMETER;
514
515 RTNATIVETHREAD hEMT = RTThreadNativeSelf();
516 AssertReturn(hEMT != NIL_RTNATIVETHREAD, VERR_INTERNAL_ERROR);
517
518 /*
519 * The whole allocation process is protected by the lock.
520 */
521 int rc = gvmmR0CreateDestroyLock(pGVMM);
522 AssertRCReturn(rc, rc);
523
524 /*
525 * Allocate a handle first so we don't waste resources unnecessarily.
526 */
527 uint16_t iHandle = pGVMM->iFreeHead;
528 if (iHandle)
529 {
530 PGVMHANDLE pHandle = &pGVMM->aHandles[iHandle];
531
532 /* consistency checks, a bit paranoid as always. */
533 if ( !pHandle->pVM
534 && !pHandle->pGVM
535 && !pHandle->pvObj
536 && pHandle->iSelf == iHandle)
537 {
538 pHandle->pvObj = SUPR0ObjRegister(pSession, SUPDRVOBJTYPE_VM, gvmmR0HandleObjDestructor, pGVMM, pHandle);
539 if (pHandle->pvObj)
540 {
541 /*
542 * Move the handle from the free to used list and perform permission checks.
543 */
544 rc = gvmmR0UsedLock(pGVMM);
545 AssertRC(rc);
546
547 pGVMM->iFreeHead = pHandle->iNext;
548 pHandle->iNext = pGVMM->iUsedHead;
549 pGVMM->iUsedHead = iHandle;
550 pGVMM->cVMs++;
551
552 pHandle->pVM = NULL;
553 pHandle->pGVM = NULL;
554 pHandle->pSession = pSession;
555 pHandle->hEMT = NIL_RTNATIVETHREAD;
556
557 gvmmR0UsedUnlock(pGVMM);
558
559 rc = SUPR0ObjVerifyAccess(pHandle->pvObj, pSession, NULL);
560 if (RT_SUCCESS(rc))
561 {
562 /*
563 * Allocate the global VM structure (GVM) and initialize it.
564 */
565 PGVM pGVM = (PGVM)RTMemAllocZ(sizeof(*pGVM));
566 if (pGVM)
567 {
568 pGVM->u32Magic = GVM_MAGIC;
569 pGVM->hSelf = iHandle;
570 pGVM->hEMT = NIL_RTNATIVETHREAD;
571 pGVM->pVM = NULL;
572
573 gvmmR0InitPerVMData(pGVM);
574 /* GMMR0InitPerVMData(pGVM); - later */
575
576 /*
577 * Allocate the shared VM structure and associated page array.
578 */
579 const size_t cbVM = RT_UOFFSETOF(VM, aCpus[cCPUs]);
580 const size_t cPages = RT_ALIGN(cbVM, PAGE_SIZE) >> PAGE_SHIFT;
581 rc = RTR0MemObjAllocLow(&pGVM->gvmm.s.VMMemObj, cPages << PAGE_SHIFT, false /* fExecutable */);
582 if (RT_SUCCESS(rc))
583 {
584 PVM pVM = (PVM)RTR0MemObjAddress(pGVM->gvmm.s.VMMemObj); AssertPtr(pVM);
585 memset(pVM, 0, cPages << PAGE_SHIFT);
586 pVM->enmVMState = VMSTATE_CREATING;
587 pVM->pVMR0 = pVM;
588 pVM->pSession = pSession;
589 pVM->hSelf = iHandle;
590 pVM->cbSelf = cbVM;
591 pVM->cCPUs = cCPUs;
592 pVM->offVMCPU = RT_UOFFSETOF(VM, aCpus);
593
594 rc = RTR0MemObjAllocPage(&pGVM->gvmm.s.VMPagesMemObj, cPages * sizeof(SUPPAGE), false /* fExecutable */);
595 if (RT_SUCCESS(rc))
596 {
597 PSUPPAGE paPages = (PSUPPAGE)RTR0MemObjAddress(pGVM->gvmm.s.VMPagesMemObj); AssertPtr(paPages);
598 for (size_t iPage = 0; iPage < cPages; iPage++)
599 {
600 paPages[iPage].uReserved = 0;
601 paPages[iPage].Phys = RTR0MemObjGetPagePhysAddr(pGVM->gvmm.s.VMMemObj, iPage);
602 Assert(paPages[iPage].Phys != NIL_RTHCPHYS);
603 }
604
605 /*
606 * Map them into ring-3.
607 */
608 rc = RTR0MemObjMapUser(&pGVM->gvmm.s.VMMapObj, pGVM->gvmm.s.VMMemObj, (RTR3PTR)-1, 0,
609 RTMEM_PROT_READ | RTMEM_PROT_WRITE, NIL_RTR0PROCESS);
610 if (RT_SUCCESS(rc))
611 {
612 pVM->pVMR3 = RTR0MemObjAddressR3(pGVM->gvmm.s.VMMapObj);
613 AssertPtr((void *)pVM->pVMR3);
614
615 /* Initialize all the VM pointers. */
616 for (uint32_t i = 0; i < cCPUs; i++)
617 {
618 pVM->aCpus[i].pVMR0 = pVM;
619 pVM->aCpus[i].pVMR3 = pVM->pVMR3;
620 }
621
622 rc = RTR0MemObjMapUser(&pGVM->gvmm.s.VMPagesMapObj, pGVM->gvmm.s.VMPagesMemObj, (RTR3PTR)-1, 0,
623 RTMEM_PROT_READ | RTMEM_PROT_WRITE, NIL_RTR0PROCESS);
624 if (RT_SUCCESS(rc))
625 {
626 pVM->paVMPagesR3 = RTR0MemObjAddressR3(pGVM->gvmm.s.VMPagesMapObj);
627 AssertPtr((void *)pVM->paVMPagesR3);
628
629 /* complete the handle - take the UsedLock sem just to be careful. */
630 rc = gvmmR0UsedLock(pGVMM);
631 AssertRC(rc);
632
633 pHandle->pVM = pVM;
634 pHandle->pGVM = pGVM;
635 pHandle->hEMT = hEMT;
636 pGVM->pVM = pVM;
637 pGVM->hEMT = hEMT;
638
639 gvmmR0UsedUnlock(pGVMM);
640 gvmmR0CreateDestroyUnlock(pGVMM);
641
642 *ppVM = pVM;
643 Log(("GVMMR0CreateVM: pVM=%p pVMR3=%p pGVM=%p hGVM=%d\n", pVM, pVM->pVMR3, pGVM, iHandle));
644 return VINF_SUCCESS;
645 }
646
647 RTR0MemObjFree(pGVM->gvmm.s.VMMapObj, false /* fFreeMappings */);
648 pGVM->gvmm.s.VMMapObj = NIL_RTR0MEMOBJ;
649 }
650 RTR0MemObjFree(pGVM->gvmm.s.VMPagesMemObj, false /* fFreeMappings */);
651 pGVM->gvmm.s.VMPagesMemObj = NIL_RTR0MEMOBJ;
652 }
653 RTR0MemObjFree(pGVM->gvmm.s.VMMemObj, false /* fFreeMappings */);
654 pGVM->gvmm.s.VMMemObj = NIL_RTR0MEMOBJ;
655 }
656 }
657 }
658 /* else: The user wasn't permitted to create this VM. */
659
660 /*
661 * The handle will be freed by gvmmR0HandleObjDestructor as we release the
662 * object reference here. A little extra mess because of non-recursive lock.
663 */
664 void *pvObj = pHandle->pvObj;
665 pHandle->pvObj = NULL;
666 gvmmR0CreateDestroyUnlock(pGVMM);
667
668 SUPR0ObjRelease(pvObj, pSession);
669
670 SUPR0Printf("GVMMR0CreateVM: failed, rc=%d\n", rc);
671 return rc;
672 }
673
674 rc = VERR_NO_MEMORY;
675 }
676 else
677 rc = VERR_INTERNAL_ERROR;
678 }
679 else
680 rc = VERR_GVM_TOO_MANY_VMS;
681
682 gvmmR0CreateDestroyUnlock(pGVMM);
683 return rc;
684}
685
686
687/**
688 * Initializes the per VM data belonging to GVMM.
689 *
690 * @param pGVM Pointer to the global VM structure.
691 */
692static void gvmmR0InitPerVMData(PGVM pGVM)
693{
694 AssertCompile(RT_SIZEOFMEMB(GVM,gvmm.s) <= RT_SIZEOFMEMB(GVM,gvmm.padding));
695 Assert(RT_SIZEOFMEMB(GVM,gvmm.s) <= RT_SIZEOFMEMB(GVM,gvmm.padding));
696 pGVM->gvmm.s.VMMemObj = NIL_RTR0MEMOBJ;
697 pGVM->gvmm.s.VMMapObj = NIL_RTR0MEMOBJ;
698 pGVM->gvmm.s.VMPagesMemObj = NIL_RTR0MEMOBJ;
699 pGVM->gvmm.s.VMPagesMapObj = NIL_RTR0MEMOBJ;
700 pGVM->gvmm.s.HaltEventMulti = NIL_RTSEMEVENTMULTI;
701 pGVM->gvmm.s.fDoneVMMR0Init = false;
702 pGVM->gvmm.s.fDoneVMMR0Term = false;
703}
704
705
706/**
707 * Does the VM initialization.
708 *
709 * @returns VBox status code.
710 * @param pVM Pointer to the shared VM structure.
711 */
712GVMMR0DECL(int) GVMMR0InitVM(PVM pVM)
713{
714 LogFlow(("GVMMR0InitVM: pVM=%p\n", pVM));
715
716 /*
717 * Validate the VM structure, state and handle.
718 */
719 PGVM pGVM;
720 PGVMM pGVMM;
721 int rc = gvmmR0ByVMAndEMT(pVM, &pGVM, &pGVMM);
722 if (RT_SUCCESS(rc))
723 {
724 if ( !pGVM->gvmm.s.fDoneVMMR0Init
725 && pGVM->gvmm.s.HaltEventMulti == NIL_RTSEMEVENTMULTI)
726 {
727 rc = RTSemEventMultiCreate(&pGVM->gvmm.s.HaltEventMulti);
728 if (RT_FAILURE(rc))
729 pGVM->gvmm.s.HaltEventMulti = NIL_RTSEMEVENTMULTI;
730 }
731 else
732 rc = VERR_WRONG_ORDER;
733 }
734
735 LogFlow(("GVMMR0InitVM: returns %Rrc\n", rc));
736 return rc;
737}
738
739
740/**
741 * Indicates that we're done with the ring-0 initialization
742 * of the VM.
743 *
744 * @param pVM Pointer to the shared VM structure.
745 */
746GVMMR0DECL(void) GVMMR0DoneInitVM(PVM pVM)
747{
748 /* Validate the VM structure, state and handle. */
749 PGVM pGVM;
750 PGVMM pGVMM;
751 int rc = gvmmR0ByVMAndEMT(pVM, &pGVM, &pGVMM);
752 AssertRCReturnVoid(rc);
753
754 /* Set the indicator. */
755 pGVM->gvmm.s.fDoneVMMR0Init = true;
756}
757
758
759/**
760 * Indicates that we're doing the ring-0 termination of the VM.
761 *
762 * @returns true if termination hasn't been done already, false if it has.
763 * @param pVM Pointer to the shared VM structure.
764 * @param pGVM Pointer to the global VM structure. Optional.
765 */
766GVMMR0DECL(bool) GVMMR0DoingTermVM(PVM pVM, PGVM pGVM)
767{
768 /* Validate the VM structure, state and handle. */
769 AssertPtrNullReturn(pGVM, false);
770 AssertReturn(!pGVM || pGVM->u32Magic == GVM_MAGIC, false);
771 if (!pGVM)
772 {
773 PGVMM pGVMM;
774 int rc = gvmmR0ByVMAndEMT(pVM, &pGVM, &pGVMM);
775 AssertRCReturn(rc, false);
776 }
777
778 /* Set the indicator. */
779 if (pGVM->gvmm.s.fDoneVMMR0Term)
780 return false;
781 pGVM->gvmm.s.fDoneVMMR0Term = true;
782 return true;
783}
784
785
786/**
787 * Destroys the VM, freeing all associated resources (the ring-0 ones anyway).
788 *
789 * This is call from the vmR3DestroyFinalBit and from a error path in VMR3Create,
790 * and the caller is not the EMT thread, unfortunately. For security reasons, it
791 * would've been nice if the caller was actually the EMT thread or that we somehow
792 * could've associated the calling thread with the VM up front.
793 *
794 * @returns VBox status code.
795 * @param pVM Where to store the pointer to the VM structure.
796 *
797 * @thread EMT if it's associated with the VM, otherwise any thread.
798 */
799GVMMR0DECL(int) GVMMR0DestroyVM(PVM pVM)
800{
801 LogFlow(("GVMMR0DestroyVM: pVM=%p\n", pVM));
802 PGVMM pGVMM;
803 GVMM_GET_VALID_INSTANCE(pGVMM, VERR_INTERNAL_ERROR);
804
805
806 /*
807 * Validate the VM structure, state and caller.
808 */
809 AssertPtrReturn(pVM, VERR_INVALID_POINTER);
810 AssertReturn(!((uintptr_t)pVM & PAGE_OFFSET_MASK), VERR_INVALID_POINTER);
811 AssertMsgReturn(pVM->enmVMState >= VMSTATE_CREATING && pVM->enmVMState <= VMSTATE_TERMINATED, ("%d\n", pVM->enmVMState), VERR_WRONG_ORDER);
812
813 uint32_t hGVM = pVM->hSelf;
814 AssertReturn(hGVM != NIL_GVM_HANDLE, VERR_INVALID_HANDLE);
815 AssertReturn(hGVM < RT_ELEMENTS(pGVMM->aHandles), VERR_INVALID_HANDLE);
816
817 PGVMHANDLE pHandle = &pGVMM->aHandles[hGVM];
818 AssertReturn(pHandle->pVM == pVM, VERR_NOT_OWNER);
819
820 RTNATIVETHREAD hSelf = RTThreadNativeSelf();
821 AssertReturn(pHandle->hEMT == hSelf || pHandle->hEMT == NIL_RTNATIVETHREAD, VERR_NOT_OWNER);
822
823 /*
824 * Lookup the handle and destroy the object.
825 * Since the lock isn't recursive and we'll have to leave it before dereferencing the
826 * object, we take some precautions against racing callers just in case...
827 */
828 int rc = gvmmR0CreateDestroyLock(pGVMM);
829 AssertRC(rc);
830
831 /* be careful here because we might theoretically be racing someone else cleaning up. */
832 if ( pHandle->pVM == pVM
833 && ( pHandle->hEMT == hSelf
834 || pHandle->hEMT == NIL_RTNATIVETHREAD)
835 && VALID_PTR(pHandle->pvObj)
836 && VALID_PTR(pHandle->pSession)
837 && VALID_PTR(pHandle->pGVM)
838 && pHandle->pGVM->u32Magic == GVM_MAGIC)
839 {
840 void *pvObj = pHandle->pvObj;
841 pHandle->pvObj = NULL;
842 gvmmR0CreateDestroyUnlock(pGVMM);
843
844 SUPR0ObjRelease(pvObj, pHandle->pSession);
845 }
846 else
847 {
848 SUPR0Printf("GVMMR0DestroyVM: pHandle=%p:{.pVM=%p, hEMT=%p, .pvObj=%p} pVM=%p hSelf=%p\n",
849 pHandle, pHandle->pVM, pHandle->hEMT, pHandle->pvObj, pVM, hSelf);
850 gvmmR0CreateDestroyUnlock(pGVMM);
851 rc = VERR_INTERNAL_ERROR;
852 }
853
854 return rc;
855}
856
857
858/**
859 * Performs VM cleanup task as part of object destruction.
860 *
861 * @param pGVM The GVM pointer.
862 */
863static void gmmR0CleanupVM(PGVM pGVM)
864{
865 if ( pGVM->gvmm.s.fDoneVMMR0Init
866 && !pGVM->gvmm.s.fDoneVMMR0Term)
867 {
868 if ( pGVM->gvmm.s.VMMemObj != NIL_RTR0MEMOBJ
869 && RTR0MemObjAddress(pGVM->gvmm.s.VMMemObj) == pGVM->pVM)
870 {
871 LogFlow(("gmmR0CleanupVM: Calling VMMR0TermVM\n"));
872 VMMR0TermVM(pGVM->pVM, pGVM);
873 }
874 else
875 AssertMsgFailed(("gmmR0CleanupVM: VMMemObj=%p pVM=%p\n", pGVM->gvmm.s.VMMemObj, pGVM->pVM));
876 }
877}
878
879
880/**
881 * Handle destructor.
882 *
883 * @param pvGVMM The GVM instance pointer.
884 * @param pvHandle The handle pointer.
885 */
886static DECLCALLBACK(void) gvmmR0HandleObjDestructor(void *pvObj, void *pvGVMM, void *pvHandle)
887{
888 LogFlow(("gvmmR0HandleObjDestructor: %p %p %p\n", pvObj, pvGVMM, pvHandle));
889
890 /*
891 * Some quick, paranoid, input validation.
892 */
893 PGVMHANDLE pHandle = (PGVMHANDLE)pvHandle;
894 AssertPtr(pHandle);
895 PGVMM pGVMM = (PGVMM)pvGVMM;
896 Assert(pGVMM == g_pGVMM);
897 const uint16_t iHandle = pHandle - &pGVMM->aHandles[0];
898 if ( !iHandle
899 || iHandle >= RT_ELEMENTS(pGVMM->aHandles)
900 || iHandle != pHandle->iSelf)
901 {
902 SUPR0Printf("GVM: handle %d is out of range or corrupt (iSelf=%d)!\n", iHandle, pHandle->iSelf);
903 return;
904 }
905
906 int rc = gvmmR0CreateDestroyLock(pGVMM);
907 AssertRC(rc);
908 rc = gvmmR0UsedLock(pGVMM);
909 AssertRC(rc);
910
911 /*
912 * This is a tad slow but a doubly linked list is too much hazzle.
913 */
914 if (RT_UNLIKELY(pHandle->iNext >= RT_ELEMENTS(pGVMM->aHandles)))
915 {
916 SUPR0Printf("GVM: used list index %d is out of range!\n", pHandle->iNext);
917 gvmmR0UsedUnlock(pGVMM);
918 gvmmR0CreateDestroyUnlock(pGVMM);
919 return;
920 }
921
922 if (pGVMM->iUsedHead == iHandle)
923 pGVMM->iUsedHead = pHandle->iNext;
924 else
925 {
926 uint16_t iPrev = pGVMM->iUsedHead;
927 int c = RT_ELEMENTS(pGVMM->aHandles) + 2;
928 while (iPrev)
929 {
930 if (RT_UNLIKELY(iPrev >= RT_ELEMENTS(pGVMM->aHandles)))
931 {
932 SUPR0Printf("GVM: used list index %d is out of range!\n");
933 gvmmR0UsedUnlock(pGVMM);
934 gvmmR0CreateDestroyUnlock(pGVMM);
935 return;
936 }
937 if (RT_UNLIKELY(c-- <= 0))
938 {
939 iPrev = 0;
940 break;
941 }
942
943 if (pGVMM->aHandles[iPrev].iNext == iHandle)
944 break;
945 iPrev = pGVMM->aHandles[iPrev].iNext;
946 }
947 if (!iPrev)
948 {
949 SUPR0Printf("GVM: can't find the handle previous previous of %d!\n", pHandle->iSelf);
950 gvmmR0UsedUnlock(pGVMM);
951 gvmmR0CreateDestroyUnlock(pGVMM);
952 return;
953 }
954
955 Assert(pGVMM->aHandles[iPrev].iNext == iHandle);
956 pGVMM->aHandles[iPrev].iNext = pHandle->iNext;
957 }
958 pHandle->iNext = 0;
959 pGVMM->cVMs--;
960
961 gvmmR0UsedUnlock(pGVMM);
962
963 /*
964 * Do the global cleanup round.
965 */
966 PGVM pGVM = pHandle->pGVM;
967 if ( VALID_PTR(pGVM)
968 && pGVM->u32Magic == GVM_MAGIC)
969 {
970 gmmR0CleanupVM(pGVM);
971
972 /*
973 * Do the GVMM cleanup - must be done last.
974 */
975 /* The VM and VM pages mappings/allocations. */
976 if (pGVM->gvmm.s.VMPagesMapObj != NIL_RTR0MEMOBJ)
977 {
978 rc = RTR0MemObjFree(pGVM->gvmm.s.VMPagesMapObj, false /* fFreeMappings */); AssertRC(rc);
979 pGVM->gvmm.s.VMPagesMapObj = NIL_RTR0MEMOBJ;
980 }
981
982 if (pGVM->gvmm.s.VMMapObj != NIL_RTR0MEMOBJ)
983 {
984 rc = RTR0MemObjFree(pGVM->gvmm.s.VMMapObj, false /* fFreeMappings */); AssertRC(rc);
985 pGVM->gvmm.s.VMMapObj = NIL_RTR0MEMOBJ;
986 }
987
988 if (pGVM->gvmm.s.VMPagesMemObj != NIL_RTR0MEMOBJ)
989 {
990 rc = RTR0MemObjFree(pGVM->gvmm.s.VMPagesMemObj, false /* fFreeMappings */); AssertRC(rc);
991 pGVM->gvmm.s.VMPagesMemObj = NIL_RTR0MEMOBJ;
992 }
993
994 if (pGVM->gvmm.s.VMMemObj != NIL_RTR0MEMOBJ)
995 {
996 rc = RTR0MemObjFree(pGVM->gvmm.s.VMMemObj, false /* fFreeMappings */); AssertRC(rc);
997 pGVM->gvmm.s.VMMemObj = NIL_RTR0MEMOBJ;
998 }
999
1000 if (pGVM->gvmm.s.HaltEventMulti != NIL_RTSEMEVENTMULTI)
1001 {
1002 rc = RTSemEventMultiDestroy(pGVM->gvmm.s.HaltEventMulti); AssertRC(rc);
1003 pGVM->gvmm.s.HaltEventMulti = NIL_RTSEMEVENTMULTI;
1004 }
1005
1006 /* the GVM structure itself. */
1007 pGVM->u32Magic |= UINT32_C(0x80000000);
1008 RTMemFree(pGVM);
1009 }
1010 /* else: GVMMR0CreateVM cleanup. */
1011
1012 /*
1013 * Free the handle.
1014 * Reacquire the UsedLock here to since we're updating handle fields.
1015 */
1016 rc = gvmmR0UsedLock(pGVMM);
1017 AssertRC(rc);
1018
1019 pHandle->iNext = pGVMM->iFreeHead;
1020 pGVMM->iFreeHead = iHandle;
1021 ASMAtomicXchgPtr((void * volatile *)&pHandle->pGVM, NULL);
1022 ASMAtomicXchgPtr((void * volatile *)&pHandle->pVM, NULL);
1023 ASMAtomicXchgPtr((void * volatile *)&pHandle->pvObj, NULL);
1024 ASMAtomicXchgPtr((void * volatile *)&pHandle->pSession, NULL);
1025 ASMAtomicXchgSize(&pHandle->hEMT, NIL_RTNATIVETHREAD);
1026
1027 gvmmR0UsedUnlock(pGVMM);
1028 gvmmR0CreateDestroyUnlock(pGVMM);
1029 LogFlow(("gvmmR0HandleObjDestructor: returns\n"));
1030}
1031
1032
1033/**
1034 * Lookup a GVM structure by its handle.
1035 *
1036 * @returns The GVM pointer on success, NULL on failure.
1037 * @param hGVM The global VM handle. Asserts on bad handle.
1038 */
1039GVMMR0DECL(PGVM) GVMMR0ByHandle(uint32_t hGVM)
1040{
1041 PGVMM pGVMM;
1042 GVMM_GET_VALID_INSTANCE(pGVMM, NULL);
1043
1044 /*
1045 * Validate.
1046 */
1047 AssertReturn(hGVM != NIL_GVM_HANDLE, NULL);
1048 AssertReturn(hGVM < RT_ELEMENTS(pGVMM->aHandles), NULL);
1049
1050 /*
1051 * Look it up.
1052 */
1053 PGVMHANDLE pHandle = &pGVMM->aHandles[hGVM];
1054 AssertPtrReturn(pHandle->pVM, NULL);
1055 AssertPtrReturn(pHandle->pvObj, NULL);
1056 PGVM pGVM = pHandle->pGVM;
1057 AssertPtrReturn(pGVM, NULL);
1058 AssertReturn(pGVM->pVM == pHandle->pVM, NULL);
1059
1060 return pHandle->pGVM;
1061}
1062
1063
1064/**
1065 * Lookup a GVM structure by the shared VM structure.
1066 *
1067 * @returns VBox status code.
1068 * @param pVM The shared VM structure (the ring-0 mapping).
1069 * @param ppGVM Where to store the GVM pointer.
1070 * @param ppGVMM Where to store the pointer to the GVMM instance data.
1071 * @param fTakeUsedLock Whether to take the used lock or not.
1072 * Be very careful if not taking the lock as it's possible that
1073 * the VM will disappear then.
1074 *
1075 * @remark This will not assert on an invalid pVM but try return sliently.
1076 */
1077static int gvmmR0ByVM(PVM pVM, PGVM *ppGVM, PGVMM *ppGVMM, bool fTakeUsedLock)
1078{
1079 PGVMM pGVMM;
1080 GVMM_GET_VALID_INSTANCE(pGVMM, VERR_INTERNAL_ERROR);
1081
1082 /*
1083 * Validate.
1084 */
1085 if (RT_UNLIKELY( !VALID_PTR(pVM)
1086 || ((uintptr_t)pVM & PAGE_OFFSET_MASK)))
1087 return VERR_INVALID_POINTER;
1088 if (RT_UNLIKELY( pVM->enmVMState < VMSTATE_CREATING
1089 || pVM->enmVMState >= VMSTATE_TERMINATED))
1090 return VERR_INVALID_POINTER;
1091
1092 uint16_t hGVM = pVM->hSelf;
1093 if (RT_UNLIKELY( hGVM == NIL_GVM_HANDLE
1094 || hGVM >= RT_ELEMENTS(pGVMM->aHandles)))
1095 return VERR_INVALID_HANDLE;
1096
1097 /*
1098 * Look it up.
1099 */
1100 PGVMHANDLE pHandle = &pGVMM->aHandles[hGVM];
1101 PGVM pGVM;
1102 if (fTakeUsedLock)
1103 {
1104 int rc = gvmmR0UsedLock(pGVMM);
1105 AssertRCReturn(rc, rc);
1106
1107 pGVM = pHandle->pGVM;
1108 if (RT_UNLIKELY( pHandle->pVM != pVM
1109 || !VALID_PTR(pHandle->pvObj)
1110 || !VALID_PTR(pGVM)
1111 || pGVM->pVM != pVM))
1112 {
1113 gvmmR0UsedUnlock(pGVMM);
1114 return VERR_INVALID_HANDLE;
1115 }
1116 }
1117 else
1118 {
1119 if (RT_UNLIKELY(pHandle->pVM != pVM))
1120 return VERR_INVALID_HANDLE;
1121 if (RT_UNLIKELY(!VALID_PTR(pHandle->pvObj)))
1122 return VERR_INVALID_HANDLE;
1123
1124 pGVM = pHandle->pGVM;
1125 if (RT_UNLIKELY(!VALID_PTR(pGVM)))
1126 return VERR_INVALID_HANDLE;
1127 if (RT_UNLIKELY(pGVM->pVM != pVM))
1128 return VERR_INVALID_HANDLE;
1129 }
1130
1131 *ppGVM = pGVM;
1132 *ppGVMM = pGVMM;
1133 return VINF_SUCCESS;
1134}
1135
1136
1137/**
1138 * Lookup a GVM structure by the shared VM structure.
1139 *
1140 * @returns The GVM pointer on success, NULL on failure.
1141 * @param pVM The shared VM structure (the ring-0 mapping).
1142 *
1143 * @remark This will not take the 'used'-lock because it doesn't do
1144 * nesting and this function will be used from under the lock.
1145 */
1146GVMMR0DECL(PGVM) GVMMR0ByVM(PVM pVM)
1147{
1148 PGVMM pGVMM;
1149 PGVM pGVM;
1150 int rc = gvmmR0ByVM(pVM, &pGVM, &pGVMM, false /* fTakeUsedLock */);
1151 if (RT_SUCCESS(rc))
1152 return pGVM;
1153 AssertRC(rc);
1154 return NULL;
1155}
1156
1157
1158/**
1159 * Lookup a GVM structure by the shared VM structure
1160 * and ensuring that the caller is the EMT thread.
1161 *
1162 * @returns VBox status code.
1163 * @param pVM The shared VM structure (the ring-0 mapping).
1164 * @param ppGVM Where to store the GVM pointer.
1165 * @param ppGVMM Where to store the pointer to the GVMM instance data.
1166 * @thread EMT
1167 *
1168 * @remark This will assert in failure paths.
1169 */
1170static int gvmmR0ByVMAndEMT(PVM pVM, PGVM *ppGVM, PGVMM *ppGVMM)
1171{
1172 PGVMM pGVMM;
1173 GVMM_GET_VALID_INSTANCE(pGVMM, VERR_INTERNAL_ERROR);
1174
1175 /*
1176 * Validate.
1177 */
1178 AssertPtrReturn(pVM, VERR_INVALID_POINTER);
1179 AssertReturn(!((uintptr_t)pVM & PAGE_OFFSET_MASK), VERR_INVALID_POINTER);
1180
1181 uint16_t hGVM = pVM->hSelf;
1182 AssertReturn(hGVM != NIL_GVM_HANDLE, VERR_INVALID_HANDLE);
1183 AssertReturn(hGVM < RT_ELEMENTS(pGVMM->aHandles), VERR_INVALID_HANDLE);
1184
1185 /*
1186 * Look it up.
1187 */
1188 PGVMHANDLE pHandle = &pGVMM->aHandles[hGVM];
1189 RTNATIVETHREAD hAllegedEMT = RTThreadNativeSelf();
1190 AssertMsgReturn(pHandle->hEMT == hAllegedEMT, ("hEMT %x hAllegedEMT %x\n", pHandle->hEMT, hAllegedEMT), VERR_NOT_OWNER);
1191 AssertReturn(pHandle->pVM == pVM, VERR_NOT_OWNER);
1192 AssertPtrReturn(pHandle->pvObj, VERR_INTERNAL_ERROR);
1193
1194 PGVM pGVM = pHandle->pGVM;
1195 AssertPtrReturn(pGVM, VERR_INTERNAL_ERROR);
1196 AssertReturn(pGVM->pVM == pVM, VERR_INTERNAL_ERROR);
1197 AssertReturn(pGVM->hEMT == hAllegedEMT, VERR_INTERNAL_ERROR);
1198
1199 *ppGVM = pGVM;
1200 *ppGVMM = pGVMM;
1201 return VINF_SUCCESS;
1202}
1203
1204
1205/**
1206 * Lookup a GVM structure by the shared VM structure
1207 * and ensuring that the caller is the EMT thread.
1208 *
1209 * @returns VBox status code.
1210 * @param pVM The shared VM structure (the ring-0 mapping).
1211 * @param ppGVM Where to store the GVM pointer.
1212 * @thread EMT
1213 */
1214GVMMR0DECL(int) GVMMR0ByVMAndEMT(PVM pVM, PGVM *ppGVM)
1215{
1216 AssertPtrReturn(ppGVM, VERR_INVALID_POINTER);
1217 PGVMM pGVMM;
1218 return gvmmR0ByVMAndEMT(pVM, ppGVM, &pGVMM);
1219}
1220
1221
1222/**
1223 * Lookup a VM by its global handle.
1224 *
1225 * @returns The VM handle on success, NULL on failure.
1226 * @param hGVM The global VM handle. Asserts on bad handle.
1227 */
1228GVMMR0DECL(PVM) GVMMR0GetVMByHandle(uint32_t hGVM)
1229{
1230 PGVM pGVM = GVMMR0ByHandle(hGVM);
1231 return pGVM ? pGVM->pVM : NULL;
1232}
1233
1234
1235/**
1236 * Looks up the VM belonging to the specified EMT thread.
1237 *
1238 * This is used by the assertion machinery in VMMR0.cpp to avoid causing
1239 * unnecessary kernel panics when the EMT thread hits an assertion. The
1240 * call may or not be an EMT thread.
1241 *
1242 * @returns The VM handle on success, NULL on failure.
1243 * @param hEMT The native thread handle of the EMT.
1244 * NIL_RTNATIVETHREAD means the current thread
1245 */
1246GVMMR0DECL(PVM) GVMMR0GetVMByEMT(RTNATIVETHREAD hEMT)
1247{
1248 /*
1249 * No Assertions here as we're usually called in a AssertMsgN or
1250 * RTAssert* context.
1251 */
1252 PGVMM pGVMM = g_pGVMM;
1253 if ( !VALID_PTR(pGVMM)
1254 || pGVMM->u32Magic != GVMM_MAGIC)
1255 return NULL;
1256
1257 if (hEMT == NIL_RTNATIVETHREAD)
1258 hEMT = RTThreadNativeSelf();
1259
1260 /*
1261 * Search the handles in a linear fashion as we don't dare take the lock (assert).
1262 */
1263 for (unsigned i = 1; i < RT_ELEMENTS(pGVMM->aHandles); i++)
1264 if ( pGVMM->aHandles[i].hEMT == hEMT
1265 && pGVMM->aHandles[i].iSelf == i
1266 && VALID_PTR(pGVMM->aHandles[i].pvObj)
1267 && VALID_PTR(pGVMM->aHandles[i].pVM))
1268 return pGVMM->aHandles[i].pVM;
1269
1270 return NULL;
1271}
1272
1273
1274/**
1275 * This is will wake up expired and soon-to-be expired VMs.
1276 *
1277 * @returns Number of VMs that has been woken up.
1278 * @param pGVMM Pointer to the GVMM instance data.
1279 * @param u64Now The current time.
1280 */
1281static unsigned gvmmR0SchedDoWakeUps(PGVMM pGVMM, uint64_t u64Now)
1282{
1283 /*
1284 * The first pass will wake up VMs which has actually expired
1285 * and look for VMs that should be woken up in the 2nd and 3rd passes.
1286 */
1287 unsigned cWoken = 0;
1288 unsigned cHalted = 0;
1289 unsigned cTodo2nd = 0;
1290 unsigned cTodo3rd = 0;
1291 for (unsigned i = pGVMM->iUsedHead, cGuard = 0;
1292 i != NIL_GVM_HANDLE && i < RT_ELEMENTS(pGVMM->aHandles);
1293 i = pGVMM->aHandles[i].iNext)
1294 {
1295 PGVM pCurGVM = pGVMM->aHandles[i].pGVM;
1296 if ( VALID_PTR(pCurGVM)
1297 && pCurGVM->u32Magic == GVM_MAGIC)
1298 {
1299 uint64_t u64 = pCurGVM->gvmm.s.u64HaltExpire;
1300 if (u64)
1301 {
1302 if (u64 <= u64Now)
1303 {
1304 if (ASMAtomicXchgU64(&pCurGVM->gvmm.s.u64HaltExpire, 0))
1305 {
1306 int rc = RTSemEventMultiSignal(pCurGVM->gvmm.s.HaltEventMulti);
1307 AssertRC(rc);
1308 cWoken++;
1309 }
1310 }
1311 else
1312 {
1313 cHalted++;
1314 if (u64 <= u64Now + pGVMM->nsEarlyWakeUp1)
1315 cTodo2nd++;
1316 else if (u64 <= u64Now + pGVMM->nsEarlyWakeUp2)
1317 cTodo3rd++;
1318 }
1319 }
1320 }
1321 AssertLogRelBreak(cGuard++ < RT_ELEMENTS(pGVMM->aHandles));
1322 }
1323
1324 if (cTodo2nd)
1325 {
1326 for (unsigned i = pGVMM->iUsedHead, cGuard = 0;
1327 i != NIL_GVM_HANDLE && i < RT_ELEMENTS(pGVMM->aHandles);
1328 i = pGVMM->aHandles[i].iNext)
1329 {
1330 PGVM pCurGVM = pGVMM->aHandles[i].pGVM;
1331 if ( VALID_PTR(pCurGVM)
1332 && pCurGVM->u32Magic == GVM_MAGIC
1333 && pCurGVM->gvmm.s.u64HaltExpire
1334 && pCurGVM->gvmm.s.u64HaltExpire <= u64Now + pGVMM->nsEarlyWakeUp1)
1335 {
1336 if (ASMAtomicXchgU64(&pCurGVM->gvmm.s.u64HaltExpire, 0))
1337 {
1338 int rc = RTSemEventMultiSignal(pCurGVM->gvmm.s.HaltEventMulti);
1339 AssertRC(rc);
1340 cWoken++;
1341 }
1342 }
1343 AssertLogRelBreak(cGuard++ < RT_ELEMENTS(pGVMM->aHandles));
1344 }
1345 }
1346
1347 if (cTodo3rd)
1348 {
1349 for (unsigned i = pGVMM->iUsedHead, cGuard = 0;
1350 i != NIL_GVM_HANDLE && i < RT_ELEMENTS(pGVMM->aHandles);
1351 i = pGVMM->aHandles[i].iNext)
1352 {
1353 PGVM pCurGVM = pGVMM->aHandles[i].pGVM;
1354 if ( VALID_PTR(pCurGVM)
1355 && pCurGVM->u32Magic == GVM_MAGIC
1356 && pCurGVM->gvmm.s.u64HaltExpire
1357 && pCurGVM->gvmm.s.u64HaltExpire <= u64Now + pGVMM->nsEarlyWakeUp2)
1358 {
1359 if (ASMAtomicXchgU64(&pCurGVM->gvmm.s.u64HaltExpire, 0))
1360 {
1361 int rc = RTSemEventMultiSignal(pCurGVM->gvmm.s.HaltEventMulti);
1362 AssertRC(rc);
1363 cWoken++;
1364 }
1365 }
1366 AssertLogRelBreak(cGuard++ < RT_ELEMENTS(pGVMM->aHandles));
1367 }
1368 }
1369
1370 return cWoken;
1371}
1372
1373
1374/**
1375 * Halt the EMT thread.
1376 *
1377 * @returns VINF_SUCCESS normal wakeup (timeout or kicked by other thread).
1378 * VERR_INTERRUPTED if a signal was scheduled for the thread.
1379 * @param pVM Pointer to the shared VM structure.
1380 * @param u64ExpireGipTime The time for the sleep to expire expressed as GIP time.
1381 * @thread EMT.
1382 */
1383GVMMR0DECL(int) GVMMR0SchedHalt(PVM pVM, uint64_t u64ExpireGipTime)
1384{
1385 LogFlow(("GVMMR0SchedHalt: pVM=%p\n", pVM));
1386
1387 /*
1388 * Validate the VM structure, state and handle.
1389 */
1390 PGVMM pGVMM;
1391 PGVM pGVM;
1392 int rc = gvmmR0ByVMAndEMT(pVM, &pGVM, &pGVMM);
1393 if (RT_FAILURE(rc))
1394 return rc;
1395 pGVM->gvmm.s.StatsSched.cHaltCalls++;
1396
1397 Assert(!pGVM->gvmm.s.u64HaltExpire);
1398
1399 /*
1400 * Take the UsedList semaphore, get the current time
1401 * and check if anyone needs waking up.
1402 * Interrupts must NOT be disabled at this point because we ask for GIP time!
1403 */
1404 rc = gvmmR0UsedLock(pGVMM);
1405 AssertRC(rc);
1406
1407 pGVM->gvmm.s.iCpuEmt = ASMGetApicId();
1408
1409 Assert(ASMGetFlags() & X86_EFL_IF);
1410 const uint64_t u64Now = RTTimeNanoTS(); /* (GIP time) */
1411 pGVM->gvmm.s.StatsSched.cHaltWakeUps += gvmmR0SchedDoWakeUps(pGVMM, u64Now);
1412
1413 /*
1414 * Go to sleep if we must...
1415 */
1416 if ( u64Now < u64ExpireGipTime
1417 && u64ExpireGipTime - u64Now > (pGVMM->cVMs > pGVMM->cVMsMeansCompany
1418 ? pGVMM->nsMinSleepCompany
1419 : pGVMM->nsMinSleepAlone))
1420 {
1421 pGVM->gvmm.s.StatsSched.cHaltBlocking++;
1422 ASMAtomicXchgU64(&pGVM->gvmm.s.u64HaltExpire, u64ExpireGipTime);
1423 gvmmR0UsedUnlock(pGVMM);
1424
1425 uint32_t cMillies = (u64ExpireGipTime - u64Now) / 1000000;
1426 rc = RTSemEventMultiWaitNoResume(pGVM->gvmm.s.HaltEventMulti, cMillies ? cMillies : 1);
1427 ASMAtomicXchgU64(&pGVM->gvmm.s.u64HaltExpire, 0);
1428 if (rc == VERR_TIMEOUT)
1429 {
1430 pGVM->gvmm.s.StatsSched.cHaltTimeouts++;
1431 rc = VINF_SUCCESS;
1432 }
1433 }
1434 else
1435 {
1436 pGVM->gvmm.s.StatsSched.cHaltNotBlocking++;
1437 gvmmR0UsedUnlock(pGVMM);
1438 }
1439
1440 /* Make sure false wake up calls (gvmmR0SchedDoWakeUps) cause us to spin. */
1441 RTSemEventMultiReset(pGVM->gvmm.s.HaltEventMulti);
1442
1443 return rc;
1444}
1445
1446
1447/**
1448 * Wakes up the halted EMT thread so it can service a pending request.
1449 *
1450 * @returns VINF_SUCCESS if not yielded.
1451 * VINF_GVM_NOT_BLOCKED if the EMT thread wasn't blocked.
1452 * @param pVM Pointer to the shared VM structure.
1453 * @thread Any but EMT.
1454 */
1455GVMMR0DECL(int) GVMMR0SchedWakeUp(PVM pVM)
1456{
1457 /*
1458 * Validate input and take the UsedLock.
1459 */
1460 PGVM pGVM;
1461 PGVMM pGVMM;
1462 int rc = gvmmR0ByVM(pVM, &pGVM, &pGVMM, true /* fTakeUsedLock */);
1463 if (RT_SUCCESS(rc))
1464 {
1465 pGVM->gvmm.s.StatsSched.cWakeUpCalls++;
1466
1467 /*
1468 * Signal the semaphore regardless of whether it's current blocked on it.
1469 *
1470 * The reason for this is that there is absolutely no way we can be 100%
1471 * certain that it isn't *about* go to go to sleep on it and just got
1472 * delayed a bit en route. So, we will always signal the semaphore when
1473 * the it is flagged as halted in the VMM.
1474 */
1475 if (pGVM->gvmm.s.u64HaltExpire)
1476 {
1477 rc = VINF_SUCCESS;
1478 ASMAtomicXchgU64(&pGVM->gvmm.s.u64HaltExpire, 0);
1479 }
1480 else
1481 {
1482 rc = VINF_GVM_NOT_BLOCKED;
1483 pGVM->gvmm.s.StatsSched.cWakeUpNotHalted++;
1484 }
1485
1486 int rc2 = RTSemEventMultiSignal(pGVM->gvmm.s.HaltEventMulti);
1487 AssertRC(rc2);
1488
1489 /*
1490 * While we're here, do a round of scheduling.
1491 */
1492 Assert(ASMGetFlags() & X86_EFL_IF);
1493 const uint64_t u64Now = RTTimeNanoTS(); /* (GIP time) */
1494 pGVM->gvmm.s.StatsSched.cWakeUpWakeUps += gvmmR0SchedDoWakeUps(pGVMM, u64Now);
1495
1496
1497 rc2 = gvmmR0UsedUnlock(pGVMM);
1498 AssertRC(rc2);
1499 }
1500
1501 LogFlow(("GVMMR0SchedWakeUp: returns %Rrc\n", rc));
1502 return rc;
1503}
1504
1505
1506/**
1507 * Poll the schedule to see if someone else should get a chance to run.
1508 *
1509 * This is a bit hackish and will not work too well if the machine is
1510 * under heavy load from non-VM processes.
1511 *
1512 * @returns VINF_SUCCESS if not yielded.
1513 * VINF_GVM_YIELDED if an attempt to switch to a different VM task was made.
1514 * @param pVM Pointer to the shared VM structure.
1515 * @param u64ExpireGipTime The time for the sleep to expire expressed as GIP time.
1516 * @param fYield Whether to yield or not.
1517 * This is for when we're spinning in the halt loop.
1518 * @thread EMT.
1519 */
1520GVMMR0DECL(int) GVMMR0SchedPoll(PVM pVM, bool fYield)
1521{
1522 /*
1523 * Validate input.
1524 */
1525 PGVM pGVM;
1526 PGVMM pGVMM;
1527 int rc = gvmmR0ByVMAndEMT(pVM, &pGVM, &pGVMM);
1528 if (RT_SUCCESS(rc))
1529 {
1530 rc = gvmmR0UsedLock(pGVMM);
1531 AssertRC(rc);
1532 pGVM->gvmm.s.StatsSched.cPollCalls++;
1533
1534 Assert(ASMGetFlags() & X86_EFL_IF);
1535 const uint64_t u64Now = RTTimeNanoTS(); /* (GIP time) */
1536
1537 if (!fYield)
1538 pGVM->gvmm.s.StatsSched.cPollWakeUps += gvmmR0SchedDoWakeUps(pGVMM, u64Now);
1539 else
1540 {
1541 /** @todo implement this... */
1542 rc = VERR_NOT_IMPLEMENTED;
1543 }
1544
1545 gvmmR0UsedUnlock(pGVMM);
1546 }
1547
1548 LogFlow(("GVMMR0SchedWakeUp: returns %Rrc\n", rc));
1549 return rc;
1550}
1551
1552
1553
1554/**
1555 * Retrieves the GVMM statistics visible to the caller.
1556 *
1557 * @returns VBox status code.
1558 *
1559 * @param pStats Where to put the statistics.
1560 * @param pSession The current session.
1561 * @param pVM The VM to obtain statistics for. Optional.
1562 */
1563GVMMR0DECL(int) GVMMR0QueryStatistics(PGVMMSTATS pStats, PSUPDRVSESSION pSession, PVM pVM)
1564{
1565 LogFlow(("GVMMR0QueryStatistics: pStats=%p pSession=%p pVM=%p\n", pStats, pSession, pVM));
1566
1567 /*
1568 * Validate input.
1569 */
1570 AssertPtrReturn(pSession, VERR_INVALID_POINTER);
1571 AssertPtrReturn(pStats, VERR_INVALID_POINTER);
1572 pStats->cVMs = 0; /* (crash before taking the sem...) */
1573
1574 /*
1575 * Take the lock and get the VM statistics.
1576 */
1577 PGVMM pGVMM;
1578 if (pVM)
1579 {
1580 PGVM pGVM;
1581 int rc = gvmmR0ByVM(pVM, &pGVM, &pGVMM, true /*fTakeUsedLock*/);
1582 if (RT_FAILURE(rc))
1583 return rc;
1584 pStats->SchedVM = pGVM->gvmm.s.StatsSched;
1585 }
1586 else
1587 {
1588 GVMM_GET_VALID_INSTANCE(pGVMM, VERR_INTERNAL_ERROR);
1589 memset(&pStats->SchedVM, 0, sizeof(pStats->SchedVM));
1590
1591 int rc = gvmmR0UsedLock(pGVMM);
1592 AssertRCReturn(rc, rc);
1593 }
1594
1595 /*
1596 * Enumerate the VMs and add the ones visibile to the statistics.
1597 */
1598 pStats->cVMs = 0;
1599 memset(&pStats->SchedSum, 0, sizeof(pStats->SchedSum));
1600
1601 for (unsigned i = pGVMM->iUsedHead;
1602 i != NIL_GVM_HANDLE && i < RT_ELEMENTS(pGVMM->aHandles);
1603 i = pGVMM->aHandles[i].iNext)
1604 {
1605 PGVM pGVM = pGVMM->aHandles[i].pGVM;
1606 void *pvObj = pGVMM->aHandles[i].pvObj;
1607 if ( VALID_PTR(pvObj)
1608 && VALID_PTR(pGVM)
1609 && pGVM->u32Magic == GVM_MAGIC
1610 && RT_SUCCESS(SUPR0ObjVerifyAccess(pvObj, pSession, NULL)))
1611 {
1612 pStats->cVMs++;
1613
1614 pStats->SchedSum.cHaltCalls += pGVM->gvmm.s.StatsSched.cHaltCalls;
1615 pStats->SchedSum.cHaltBlocking += pGVM->gvmm.s.StatsSched.cHaltBlocking;
1616 pStats->SchedSum.cHaltTimeouts += pGVM->gvmm.s.StatsSched.cHaltTimeouts;
1617 pStats->SchedSum.cHaltNotBlocking += pGVM->gvmm.s.StatsSched.cHaltNotBlocking;
1618 pStats->SchedSum.cHaltWakeUps += pGVM->gvmm.s.StatsSched.cHaltWakeUps;
1619
1620 pStats->SchedSum.cWakeUpCalls += pGVM->gvmm.s.StatsSched.cWakeUpCalls;
1621 pStats->SchedSum.cWakeUpNotHalted += pGVM->gvmm.s.StatsSched.cWakeUpNotHalted;
1622 pStats->SchedSum.cWakeUpWakeUps += pGVM->gvmm.s.StatsSched.cWakeUpWakeUps;
1623
1624 pStats->SchedSum.cPollCalls += pGVM->gvmm.s.StatsSched.cPollCalls;
1625 pStats->SchedSum.cPollHalts += pGVM->gvmm.s.StatsSched.cPollHalts;
1626 pStats->SchedSum.cPollWakeUps += pGVM->gvmm.s.StatsSched.cPollWakeUps;
1627 }
1628 }
1629
1630 gvmmR0UsedUnlock(pGVMM);
1631
1632 return VINF_SUCCESS;
1633}
1634
1635
1636/**
1637 * VMMR0 request wrapper for GVMMR0QueryStatistics.
1638 *
1639 * @returns see GVMMR0QueryStatistics.
1640 * @param pVM Pointer to the shared VM structure. Optional.
1641 * @param pReq The request packet.
1642 */
1643GVMMR0DECL(int) GVMMR0QueryStatisticsReq(PVM pVM, PGVMMQUERYSTATISTICSSREQ pReq)
1644{
1645 /*
1646 * Validate input and pass it on.
1647 */
1648 AssertPtrReturn(pReq, VERR_INVALID_POINTER);
1649 AssertMsgReturn(pReq->Hdr.cbReq == sizeof(*pReq), ("%#x != %#x\n", pReq->Hdr.cbReq, sizeof(*pReq)), VERR_INVALID_PARAMETER);
1650
1651 return GVMMR0QueryStatistics(&pReq->Stats, pReq->pSession, pVM);
1652}
1653
1654
1655/**
1656 * Resets the specified GVMM statistics.
1657 *
1658 * @returns VBox status code.
1659 *
1660 * @param pStats Which statistics to reset, that is, non-zero fields indicates which to reset.
1661 * @param pSession The current session.
1662 * @param pVM The VM to reset statistics for. Optional.
1663 */
1664GVMMR0DECL(int) GVMMR0ResetStatistics(PCGVMMSTATS pStats, PSUPDRVSESSION pSession, PVM pVM)
1665{
1666 LogFlow(("GVMMR0ResetStatistics: pStats=%p pSession=%p pVM=%p\n", pStats, pSession, pVM));
1667
1668 /*
1669 * Validate input.
1670 */
1671 AssertPtrReturn(pSession, VERR_INVALID_POINTER);
1672 AssertPtrReturn(pStats, VERR_INVALID_POINTER);
1673
1674 /*
1675 * Take the lock and get the VM statistics.
1676 */
1677 PGVMM pGVMM;
1678 if (pVM)
1679 {
1680 PGVM pGVM;
1681 int rc = gvmmR0ByVM(pVM, &pGVM, &pGVMM, true /*fTakeUsedLock*/);
1682 if (RT_FAILURE(rc))
1683 return rc;
1684# define MAYBE_RESET_FIELD(field) \
1685 do { if (pStats->SchedVM. field ) { pGVM->gvmm.s.StatsSched. field = 0; } } while (0)
1686 MAYBE_RESET_FIELD(cHaltCalls);
1687 MAYBE_RESET_FIELD(cHaltBlocking);
1688 MAYBE_RESET_FIELD(cHaltTimeouts);
1689 MAYBE_RESET_FIELD(cHaltNotBlocking);
1690 MAYBE_RESET_FIELD(cHaltWakeUps);
1691 MAYBE_RESET_FIELD(cWakeUpCalls);
1692 MAYBE_RESET_FIELD(cWakeUpNotHalted);
1693 MAYBE_RESET_FIELD(cWakeUpWakeUps);
1694 MAYBE_RESET_FIELD(cPollCalls);
1695 MAYBE_RESET_FIELD(cPollHalts);
1696 MAYBE_RESET_FIELD(cPollWakeUps);
1697# undef MAYBE_RESET_FIELD
1698 }
1699 else
1700 {
1701 GVMM_GET_VALID_INSTANCE(pGVMM, VERR_INTERNAL_ERROR);
1702
1703 int rc = gvmmR0UsedLock(pGVMM);
1704 AssertRCReturn(rc, rc);
1705 }
1706
1707 /*
1708 * Enumerate the VMs and add the ones visibile to the statistics.
1709 */
1710 if (ASMMemIsAll8(&pStats->SchedSum, sizeof(pStats->SchedSum), 0))
1711 {
1712 for (unsigned i = pGVMM->iUsedHead;
1713 i != NIL_GVM_HANDLE && i < RT_ELEMENTS(pGVMM->aHandles);
1714 i = pGVMM->aHandles[i].iNext)
1715 {
1716 PGVM pGVM = pGVMM->aHandles[i].pGVM;
1717 void *pvObj = pGVMM->aHandles[i].pvObj;
1718 if ( VALID_PTR(pvObj)
1719 && VALID_PTR(pGVM)
1720 && pGVM->u32Magic == GVM_MAGIC
1721 && RT_SUCCESS(SUPR0ObjVerifyAccess(pvObj, pSession, NULL)))
1722 {
1723# define MAYBE_RESET_FIELD(field) \
1724 do { if (pStats->SchedSum. field ) { pGVM->gvmm.s.StatsSched. field = 0; } } while (0)
1725 MAYBE_RESET_FIELD(cHaltCalls);
1726 MAYBE_RESET_FIELD(cHaltBlocking);
1727 MAYBE_RESET_FIELD(cHaltTimeouts);
1728 MAYBE_RESET_FIELD(cHaltNotBlocking);
1729 MAYBE_RESET_FIELD(cHaltWakeUps);
1730 MAYBE_RESET_FIELD(cWakeUpCalls);
1731 MAYBE_RESET_FIELD(cWakeUpNotHalted);
1732 MAYBE_RESET_FIELD(cWakeUpWakeUps);
1733 MAYBE_RESET_FIELD(cPollCalls);
1734 MAYBE_RESET_FIELD(cPollHalts);
1735 MAYBE_RESET_FIELD(cPollWakeUps);
1736# undef MAYBE_RESET_FIELD
1737 }
1738 }
1739 }
1740
1741 gvmmR0UsedUnlock(pGVMM);
1742
1743 return VINF_SUCCESS;
1744}
1745
1746
1747/**
1748 * VMMR0 request wrapper for GVMMR0ResetStatistics.
1749 *
1750 * @returns see GVMMR0ResetStatistics.
1751 * @param pVM Pointer to the shared VM structure. Optional.
1752 * @param pReq The request packet.
1753 */
1754GVMMR0DECL(int) GVMMR0ResetStatisticsReq(PVM pVM, PGVMMRESETSTATISTICSSREQ pReq)
1755{
1756 /*
1757 * Validate input and pass it on.
1758 */
1759 AssertPtrReturn(pReq, VERR_INVALID_POINTER);
1760 AssertMsgReturn(pReq->Hdr.cbReq == sizeof(*pReq), ("%#x != %#x\n", pReq->Hdr.cbReq, sizeof(*pReq)), VERR_INVALID_PARAMETER);
1761
1762 return GVMMR0ResetStatistics(&pReq->Stats, pReq->pSession, pVM);
1763}
1764
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette