VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/GVMMR0.cpp@ 90829

Last change on this file since 90829 was 90829, checked in by vboxsync, 3 years ago

IPRT,VMM,SUPDrv,++: Reworked the IPRT logger structure and how the VMM ring-0 uses it. bugref:10086

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 110.8 KB
Line 
1/* $Id: GVMMR0.cpp 90829 2021-08-24 10:26:07Z vboxsync $ */
2/** @file
3 * GVMM - Global VM Manager.
4 */
5
6/*
7 * Copyright (C) 2007-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_gvmm GVMM - The Global VM Manager
20 *
21 * The Global VM Manager lives in ring-0. Its main function at the moment is
22 * to manage a list of all running VMs, keep a ring-0 only structure (GVM) for
23 * each of them, and assign them unique identifiers (so GMM can track page
24 * owners). The GVMM also manage some of the host CPU resources, like the
25 * periodic preemption timer.
26 *
27 * The GVMM will create a ring-0 object for each VM when it is registered, this
28 * is both for session cleanup purposes and for having a point where it is
29 * possible to implement usage polices later (in SUPR0ObjRegister).
30 *
31 *
32 * @section sec_gvmm_ppt Periodic Preemption Timer (PPT)
33 *
34 * On system that sports a high resolution kernel timer API, we use per-cpu
35 * timers to generate interrupts that preempts VT-x, AMD-V and raw-mode guest
36 * execution. The timer frequency is calculating by taking the max
37 * TMCalcHostTimerFrequency for all VMs running on a CPU for the last ~160 ms
38 * (RT_ELEMENTS((PGVMMHOSTCPU)0, Ppt.aHzHistory) *
39 * GVMMHOSTCPU_PPT_HIST_INTERVAL_NS).
40 *
41 * The TMCalcHostTimerFrequency() part of the things gets its takes the max
42 * TMTimerSetFrequencyHint() value and adjusts by the current catch-up percent,
43 * warp drive percent and some fudge factors. VMMR0.cpp reports the result via
44 * GVMMR0SchedUpdatePeriodicPreemptionTimer() before switching to the VT-x,
45 * AMD-V and raw-mode execution environments.
46 */
47
48
49/*********************************************************************************************************************************
50* Header Files *
51*********************************************************************************************************************************/
52#define LOG_GROUP LOG_GROUP_GVMM
53#include <VBox/vmm/gvmm.h>
54#include <VBox/vmm/gmm.h>
55#include "GVMMR0Internal.h"
56#include <VBox/vmm/dbgf.h>
57#include <VBox/vmm/iom.h>
58#include <VBox/vmm/pdm.h>
59#include <VBox/vmm/pgm.h>
60#include <VBox/vmm/vmm.h>
61#ifdef VBOX_WITH_NEM_R0
62# include <VBox/vmm/nem.h>
63#endif
64#include <VBox/vmm/vmcpuset.h>
65#include <VBox/vmm/vmcc.h>
66#include <VBox/param.h>
67#include <VBox/err.h>
68
69#include <iprt/asm.h>
70#include <iprt/asm-amd64-x86.h>
71#include <iprt/critsect.h>
72#include <iprt/mem.h>
73#include <iprt/semaphore.h>
74#include <iprt/time.h>
75#include <VBox/log.h>
76#include <iprt/thread.h>
77#include <iprt/process.h>
78#include <iprt/param.h>
79#include <iprt/string.h>
80#include <iprt/assert.h>
81#include <iprt/mem.h>
82#include <iprt/memobj.h>
83#include <iprt/mp.h>
84#include <iprt/cpuset.h>
85#include <iprt/spinlock.h>
86#include <iprt/timer.h>
87
88#include "dtrace/VBoxVMM.h"
89
90
91/*********************************************************************************************************************************
92* Defined Constants And Macros *
93*********************************************************************************************************************************/
94#if defined(RT_OS_LINUX) || defined(RT_OS_SOLARIS) || defined(DOXYGEN_RUNNING)
95/** Define this to enable the periodic preemption timer. */
96# define GVMM_SCHED_WITH_PPT
97#endif
98
99
100/** @def GVMM_CHECK_SMAP_SETUP
101 * SMAP check setup. */
102/** @def GVMM_CHECK_SMAP_CHECK
103 * Checks that the AC flag is set if SMAP is enabled. If AC is not set,
104 * it will be logged and @a a_BadExpr is executed. */
105/** @def GVMM_CHECK_SMAP_CHECK2
106 * Checks that the AC flag is set if SMAP is enabled. If AC is not set, it will
107 * be logged, written to the VMs assertion text buffer, and @a a_BadExpr is
108 * executed. */
109#if (defined(VBOX_STRICT) || 1) && !defined(VBOX_WITH_RAM_IN_KERNEL)
110# define GVMM_CHECK_SMAP_SETUP() uint32_t const fKernelFeatures = SUPR0GetKernelFeatures()
111# define GVMM_CHECK_SMAP_CHECK(a_BadExpr) \
112 do { \
113 if (fKernelFeatures & SUPKERNELFEATURES_SMAP) \
114 { \
115 RTCCUINTREG fEflCheck = ASMGetFlags(); \
116 if (RT_LIKELY(fEflCheck & X86_EFL_AC)) \
117 { /* likely */ } \
118 else \
119 { \
120 SUPR0Printf("%s, line %d: EFLAGS.AC is clear! (%#x)\n", __FUNCTION__, __LINE__, (uint32_t)fEflCheck); \
121 a_BadExpr; \
122 } \
123 } \
124 } while (0)
125# define GVMM_CHECK_SMAP_CHECK2(a_pGVM, a_BadExpr) \
126 do { \
127 if (fKernelFeatures & SUPKERNELFEATURES_SMAP) \
128 { \
129 RTCCUINTREG fEflCheck = ASMGetFlags(); \
130 if (RT_LIKELY(fEflCheck & X86_EFL_AC)) \
131 { /* likely */ } \
132 else \
133 { \
134 SUPR0BadContext((a_pGVM) ? (a_pGVM)->pSession : NULL, __FILE__, __LINE__, "EFLAGS.AC is zero!"); \
135 a_BadExpr; \
136 } \
137 } \
138 } while (0)
139#else
140# define GVMM_CHECK_SMAP_SETUP() uint32_t const fKernelFeatures = 0
141# define GVMM_CHECK_SMAP_CHECK(a_BadExpr) NOREF(fKernelFeatures)
142# define GVMM_CHECK_SMAP_CHECK2(a_pGVM, a_BadExpr) NOREF(fKernelFeatures)
143#endif
144
145/** Special value that GVMMR0DeregisterVCpu sets. */
146#define GVMM_RTNATIVETHREAD_DESTROYED (~(RTNATIVETHREAD)1)
147AssertCompile(GVMM_RTNATIVETHREAD_DESTROYED != NIL_RTNATIVETHREAD);
148
149
150/*********************************************************************************************************************************
151* Structures and Typedefs *
152*********************************************************************************************************************************/
153
154/**
155 * Global VM handle.
156 */
157typedef struct GVMHANDLE
158{
159 /** The index of the next handle in the list (free or used). (0 is nil.) */
160 uint16_t volatile iNext;
161 /** Our own index / handle value. */
162 uint16_t iSelf;
163 /** The process ID of the handle owner.
164 * This is used for access checks. */
165 RTPROCESS ProcId;
166 /** The pointer to the ring-0 only (aka global) VM structure. */
167 PGVM pGVM;
168 /** The virtual machine object. */
169 void *pvObj;
170 /** The session this VM is associated with. */
171 PSUPDRVSESSION pSession;
172 /** The ring-0 handle of the EMT0 thread.
173 * This is used for ownership checks as well as looking up a VM handle by thread
174 * at times like assertions. */
175 RTNATIVETHREAD hEMT0;
176} GVMHANDLE;
177/** Pointer to a global VM handle. */
178typedef GVMHANDLE *PGVMHANDLE;
179
180/** Number of GVM handles (including the NIL handle). */
181#if HC_ARCH_BITS == 64
182# define GVMM_MAX_HANDLES 8192
183#else
184# define GVMM_MAX_HANDLES 128
185#endif
186
187/**
188 * Per host CPU GVMM data.
189 */
190typedef struct GVMMHOSTCPU
191{
192 /** Magic number (GVMMHOSTCPU_MAGIC). */
193 uint32_t volatile u32Magic;
194 /** The CPU ID. */
195 RTCPUID idCpu;
196 /** The CPU set index. */
197 uint32_t idxCpuSet;
198
199#ifdef GVMM_SCHED_WITH_PPT
200 /** Periodic preemption timer data. */
201 struct
202 {
203 /** The handle to the periodic preemption timer. */
204 PRTTIMER pTimer;
205 /** Spinlock protecting the data below. */
206 RTSPINLOCK hSpinlock;
207 /** The smalles Hz that we need to care about. (static) */
208 uint32_t uMinHz;
209 /** The number of ticks between each historization. */
210 uint32_t cTicksHistoriziationInterval;
211 /** The current historization tick (counting up to
212 * cTicksHistoriziationInterval and then resetting). */
213 uint32_t iTickHistorization;
214 /** The current timer interval. This is set to 0 when inactive. */
215 uint32_t cNsInterval;
216 /** The current timer frequency. This is set to 0 when inactive. */
217 uint32_t uTimerHz;
218 /** The current max frequency reported by the EMTs.
219 * This gets historicize and reset by the timer callback. This is
220 * read without holding the spinlock, so needs atomic updating. */
221 uint32_t volatile uDesiredHz;
222 /** Whether the timer was started or not. */
223 bool volatile fStarted;
224 /** Set if we're starting timer. */
225 bool volatile fStarting;
226 /** The index of the next history entry (mod it). */
227 uint32_t iHzHistory;
228 /** Historicized uDesiredHz values. The array wraps around, new entries
229 * are added at iHzHistory. This is updated approximately every
230 * GVMMHOSTCPU_PPT_HIST_INTERVAL_NS by the timer callback. */
231 uint32_t aHzHistory[8];
232 /** Statistics counter for recording the number of interval changes. */
233 uint32_t cChanges;
234 /** Statistics counter for recording the number of timer starts. */
235 uint32_t cStarts;
236 } Ppt;
237#endif /* GVMM_SCHED_WITH_PPT */
238
239} GVMMHOSTCPU;
240/** Pointer to the per host CPU GVMM data. */
241typedef GVMMHOSTCPU *PGVMMHOSTCPU;
242/** The GVMMHOSTCPU::u32Magic value (Petra, Tanya & Rachel Haden). */
243#define GVMMHOSTCPU_MAGIC UINT32_C(0x19711011)
244/** The interval on history entry should cover (approximately) give in
245 * nanoseconds. */
246#define GVMMHOSTCPU_PPT_HIST_INTERVAL_NS UINT32_C(20000000)
247
248
249/**
250 * The GVMM instance data.
251 */
252typedef struct GVMM
253{
254 /** Eyecatcher / magic. */
255 uint32_t u32Magic;
256 /** The index of the head of the free handle chain. (0 is nil.) */
257 uint16_t volatile iFreeHead;
258 /** The index of the head of the active handle chain. (0 is nil.) */
259 uint16_t volatile iUsedHead;
260 /** The number of VMs. */
261 uint16_t volatile cVMs;
262 /** Alignment padding. */
263 uint16_t u16Reserved;
264 /** The number of EMTs. */
265 uint32_t volatile cEMTs;
266 /** The number of EMTs that have halted in GVMMR0SchedHalt. */
267 uint32_t volatile cHaltedEMTs;
268 /** Mini lock for restricting early wake-ups to one thread. */
269 bool volatile fDoingEarlyWakeUps;
270 bool afPadding[3]; /**< explicit alignment padding. */
271 /** When the next halted or sleeping EMT will wake up.
272 * This is set to 0 when it needs recalculating and to UINT64_MAX when
273 * there are no halted or sleeping EMTs in the GVMM. */
274 uint64_t uNsNextEmtWakeup;
275 /** The lock used to serialize VM creation, destruction and associated events that
276 * isn't performance critical. Owners may acquire the list lock. */
277 RTCRITSECT CreateDestroyLock;
278 /** The lock used to serialize used list updates and accesses.
279 * This indirectly includes scheduling since the scheduler will have to walk the
280 * used list to examin running VMs. Owners may not acquire any other locks. */
281 RTCRITSECTRW UsedLock;
282 /** The handle array.
283 * The size of this array defines the maximum number of currently running VMs.
284 * The first entry is unused as it represents the NIL handle. */
285 GVMHANDLE aHandles[GVMM_MAX_HANDLES];
286
287 /** @gcfgm{/GVMM/cEMTsMeansCompany, 32-bit, 0, UINT32_MAX, 1}
288 * The number of EMTs that means we no longer consider ourselves alone on a
289 * CPU/Core.
290 */
291 uint32_t cEMTsMeansCompany;
292 /** @gcfgm{/GVMM/MinSleepAlone,32-bit, 0, 100000000, 750000, ns}
293 * The minimum sleep time for when we're alone, in nano seconds.
294 */
295 uint32_t nsMinSleepAlone;
296 /** @gcfgm{/GVMM/MinSleepCompany,32-bit,0, 100000000, 15000, ns}
297 * The minimum sleep time for when we've got company, in nano seconds.
298 */
299 uint32_t nsMinSleepCompany;
300 /** @gcfgm{/GVMM/EarlyWakeUp1, 32-bit, 0, 100000000, 25000, ns}
301 * The limit for the first round of early wake-ups, given in nano seconds.
302 */
303 uint32_t nsEarlyWakeUp1;
304 /** @gcfgm{/GVMM/EarlyWakeUp2, 32-bit, 0, 100000000, 50000, ns}
305 * The limit for the second round of early wake-ups, given in nano seconds.
306 */
307 uint32_t nsEarlyWakeUp2;
308
309 /** Set if we're doing early wake-ups.
310 * This reflects nsEarlyWakeUp1 and nsEarlyWakeUp2. */
311 bool volatile fDoEarlyWakeUps;
312
313 /** The number of entries in the host CPU array (aHostCpus). */
314 uint32_t cHostCpus;
315 /** Per host CPU data (variable length). */
316 GVMMHOSTCPU aHostCpus[1];
317} GVMM;
318AssertCompileMemberAlignment(GVMM, CreateDestroyLock, 8);
319AssertCompileMemberAlignment(GVMM, UsedLock, 8);
320AssertCompileMemberAlignment(GVMM, uNsNextEmtWakeup, 8);
321/** Pointer to the GVMM instance data. */
322typedef GVMM *PGVMM;
323
324/** The GVMM::u32Magic value (Charlie Haden). */
325#define GVMM_MAGIC UINT32_C(0x19370806)
326
327
328
329/*********************************************************************************************************************************
330* Global Variables *
331*********************************************************************************************************************************/
332/** Pointer to the GVMM instance data.
333 * (Just my general dislike for global variables.) */
334static PGVMM g_pGVMM = NULL;
335
336/** Macro for obtaining and validating the g_pGVMM pointer.
337 * On failure it will return from the invoking function with the specified return value.
338 *
339 * @param pGVMM The name of the pGVMM variable.
340 * @param rc The return value on failure. Use VERR_GVMM_INSTANCE for VBox
341 * status codes.
342 */
343#define GVMM_GET_VALID_INSTANCE(pGVMM, rc) \
344 do { \
345 (pGVMM) = g_pGVMM;\
346 AssertPtrReturn((pGVMM), (rc)); \
347 AssertMsgReturn((pGVMM)->u32Magic == GVMM_MAGIC, ("%p - %#x\n", (pGVMM), (pGVMM)->u32Magic), (rc)); \
348 } while (0)
349
350/** Macro for obtaining and validating the g_pGVMM pointer, void function variant.
351 * On failure it will return from the invoking function.
352 *
353 * @param pGVMM The name of the pGVMM variable.
354 */
355#define GVMM_GET_VALID_INSTANCE_VOID(pGVMM) \
356 do { \
357 (pGVMM) = g_pGVMM;\
358 AssertPtrReturnVoid((pGVMM)); \
359 AssertMsgReturnVoid((pGVMM)->u32Magic == GVMM_MAGIC, ("%p - %#x\n", (pGVMM), (pGVMM)->u32Magic)); \
360 } while (0)
361
362
363/*********************************************************************************************************************************
364* Internal Functions *
365*********************************************************************************************************************************/
366static void gvmmR0InitPerVMData(PGVM pGVM, int16_t hSelf, VMCPUID cCpus, PSUPDRVSESSION pSession);
367static DECLCALLBACK(void) gvmmR0HandleObjDestructor(void *pvObj, void *pvGVMM, void *pvHandle);
368static int gvmmR0ByGVM(PGVM pGVM, PGVMM *ppGVMM, bool fTakeUsedLock);
369static int gvmmR0ByGVMandEMT(PGVM pGVM, VMCPUID idCpu, PGVMM *ppGVMM);
370
371#ifdef GVMM_SCHED_WITH_PPT
372static DECLCALLBACK(void) gvmmR0SchedPeriodicPreemptionTimerCallback(PRTTIMER pTimer, void *pvUser, uint64_t iTick);
373#endif
374
375
376/**
377 * Initializes the GVMM.
378 *
379 * This is called while owning the loader semaphore (see supdrvIOCtl_LdrLoad()).
380 *
381 * @returns VBox status code.
382 */
383GVMMR0DECL(int) GVMMR0Init(void)
384{
385 LogFlow(("GVMMR0Init:\n"));
386
387 /*
388 * Allocate and initialize the instance data.
389 */
390 uint32_t cHostCpus = RTMpGetArraySize();
391 AssertMsgReturn(cHostCpus > 0 && cHostCpus < _64K, ("%d", (int)cHostCpus), VERR_GVMM_HOST_CPU_RANGE);
392
393 PGVMM pGVMM = (PGVMM)RTMemAllocZ(RT_UOFFSETOF_DYN(GVMM, aHostCpus[cHostCpus]));
394 if (!pGVMM)
395 return VERR_NO_MEMORY;
396 int rc = RTCritSectInitEx(&pGVMM->CreateDestroyLock, 0, NIL_RTLOCKVALCLASS, RTLOCKVAL_SUB_CLASS_NONE,
397 "GVMM-CreateDestroyLock");
398 if (RT_SUCCESS(rc))
399 {
400 rc = RTCritSectRwInitEx(&pGVMM->UsedLock, 0, NIL_RTLOCKVALCLASS, RTLOCKVAL_SUB_CLASS_NONE, "GVMM-UsedLock");
401 if (RT_SUCCESS(rc))
402 {
403 pGVMM->u32Magic = GVMM_MAGIC;
404 pGVMM->iUsedHead = 0;
405 pGVMM->iFreeHead = 1;
406
407 /* the nil handle */
408 pGVMM->aHandles[0].iSelf = 0;
409 pGVMM->aHandles[0].iNext = 0;
410
411 /* the tail */
412 unsigned i = RT_ELEMENTS(pGVMM->aHandles) - 1;
413 pGVMM->aHandles[i].iSelf = i;
414 pGVMM->aHandles[i].iNext = 0; /* nil */
415
416 /* the rest */
417 while (i-- > 1)
418 {
419 pGVMM->aHandles[i].iSelf = i;
420 pGVMM->aHandles[i].iNext = i + 1;
421 }
422
423 /* The default configuration values. */
424 uint32_t cNsResolution = RTSemEventMultiGetResolution();
425 pGVMM->cEMTsMeansCompany = 1; /** @todo should be adjusted to relative to the cpu count or something... */
426 if (cNsResolution >= 5*RT_NS_100US)
427 {
428 pGVMM->nsMinSleepAlone = 750000 /* ns (0.750 ms) */; /** @todo this should be adjusted to be 75% (or something) of the scheduler granularity... */
429 pGVMM->nsMinSleepCompany = 15000 /* ns (0.015 ms) */;
430 pGVMM->nsEarlyWakeUp1 = 25000 /* ns (0.025 ms) */;
431 pGVMM->nsEarlyWakeUp2 = 50000 /* ns (0.050 ms) */;
432 }
433 else if (cNsResolution > RT_NS_100US)
434 {
435 pGVMM->nsMinSleepAlone = cNsResolution / 2;
436 pGVMM->nsMinSleepCompany = cNsResolution / 4;
437 pGVMM->nsEarlyWakeUp1 = 0;
438 pGVMM->nsEarlyWakeUp2 = 0;
439 }
440 else
441 {
442 pGVMM->nsMinSleepAlone = 2000;
443 pGVMM->nsMinSleepCompany = 2000;
444 pGVMM->nsEarlyWakeUp1 = 0;
445 pGVMM->nsEarlyWakeUp2 = 0;
446 }
447 pGVMM->fDoEarlyWakeUps = pGVMM->nsEarlyWakeUp1 > 0 && pGVMM->nsEarlyWakeUp2 > 0;
448
449 /* The host CPU data. */
450 pGVMM->cHostCpus = cHostCpus;
451 uint32_t iCpu = cHostCpus;
452 RTCPUSET PossibleSet;
453 RTMpGetSet(&PossibleSet);
454 while (iCpu-- > 0)
455 {
456 pGVMM->aHostCpus[iCpu].idxCpuSet = iCpu;
457#ifdef GVMM_SCHED_WITH_PPT
458 pGVMM->aHostCpus[iCpu].Ppt.pTimer = NULL;
459 pGVMM->aHostCpus[iCpu].Ppt.hSpinlock = NIL_RTSPINLOCK;
460 pGVMM->aHostCpus[iCpu].Ppt.uMinHz = 5; /** @todo Add some API which figures this one out. (not *that* important) */
461 pGVMM->aHostCpus[iCpu].Ppt.cTicksHistoriziationInterval = 1;
462 //pGVMM->aHostCpus[iCpu].Ppt.iTickHistorization = 0;
463 //pGVMM->aHostCpus[iCpu].Ppt.cNsInterval = 0;
464 //pGVMM->aHostCpus[iCpu].Ppt.uTimerHz = 0;
465 //pGVMM->aHostCpus[iCpu].Ppt.uDesiredHz = 0;
466 //pGVMM->aHostCpus[iCpu].Ppt.fStarted = false;
467 //pGVMM->aHostCpus[iCpu].Ppt.fStarting = false;
468 //pGVMM->aHostCpus[iCpu].Ppt.iHzHistory = 0;
469 //pGVMM->aHostCpus[iCpu].Ppt.aHzHistory = {0};
470#endif
471
472 if (RTCpuSetIsMember(&PossibleSet, iCpu))
473 {
474 pGVMM->aHostCpus[iCpu].idCpu = RTMpCpuIdFromSetIndex(iCpu);
475 pGVMM->aHostCpus[iCpu].u32Magic = GVMMHOSTCPU_MAGIC;
476
477#ifdef GVMM_SCHED_WITH_PPT
478 rc = RTTimerCreateEx(&pGVMM->aHostCpus[iCpu].Ppt.pTimer,
479 50*1000*1000 /* whatever */,
480 RTTIMER_FLAGS_CPU(iCpu) | RTTIMER_FLAGS_HIGH_RES,
481 gvmmR0SchedPeriodicPreemptionTimerCallback,
482 &pGVMM->aHostCpus[iCpu]);
483 if (RT_SUCCESS(rc))
484 rc = RTSpinlockCreate(&pGVMM->aHostCpus[iCpu].Ppt.hSpinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "GVMM/CPU");
485 if (RT_FAILURE(rc))
486 {
487 while (iCpu < cHostCpus)
488 {
489 RTTimerDestroy(pGVMM->aHostCpus[iCpu].Ppt.pTimer);
490 RTSpinlockDestroy(pGVMM->aHostCpus[iCpu].Ppt.hSpinlock);
491 pGVMM->aHostCpus[iCpu].Ppt.hSpinlock = NIL_RTSPINLOCK;
492 iCpu++;
493 }
494 break;
495 }
496#endif
497 }
498 else
499 {
500 pGVMM->aHostCpus[iCpu].idCpu = NIL_RTCPUID;
501 pGVMM->aHostCpus[iCpu].u32Magic = 0;
502 }
503 }
504 if (RT_SUCCESS(rc))
505 {
506 g_pGVMM = pGVMM;
507 LogFlow(("GVMMR0Init: pGVMM=%p cHostCpus=%u\n", pGVMM, cHostCpus));
508 return VINF_SUCCESS;
509 }
510
511 /* bail out. */
512 RTCritSectRwDelete(&pGVMM->UsedLock);
513 }
514 RTCritSectDelete(&pGVMM->CreateDestroyLock);
515 }
516
517 RTMemFree(pGVMM);
518 return rc;
519}
520
521
522/**
523 * Terminates the GVM.
524 *
525 * This is called while owning the loader semaphore (see supdrvLdrFree()).
526 * And unless something is wrong, there should be absolutely no VMs
527 * registered at this point.
528 */
529GVMMR0DECL(void) GVMMR0Term(void)
530{
531 LogFlow(("GVMMR0Term:\n"));
532
533 PGVMM pGVMM = g_pGVMM;
534 g_pGVMM = NULL;
535 if (RT_UNLIKELY(!RT_VALID_PTR(pGVMM)))
536 {
537 SUPR0Printf("GVMMR0Term: pGVMM=%RKv\n", pGVMM);
538 return;
539 }
540
541 /*
542 * First of all, stop all active timers.
543 */
544 uint32_t cActiveTimers = 0;
545 uint32_t iCpu = pGVMM->cHostCpus;
546 while (iCpu-- > 0)
547 {
548 ASMAtomicWriteU32(&pGVMM->aHostCpus[iCpu].u32Magic, ~GVMMHOSTCPU_MAGIC);
549#ifdef GVMM_SCHED_WITH_PPT
550 if ( pGVMM->aHostCpus[iCpu].Ppt.pTimer != NULL
551 && RT_SUCCESS(RTTimerStop(pGVMM->aHostCpus[iCpu].Ppt.pTimer)))
552 cActiveTimers++;
553#endif
554 }
555 if (cActiveTimers)
556 RTThreadSleep(1); /* fudge */
557
558 /*
559 * Invalidate the and free resources.
560 */
561 pGVMM->u32Magic = ~GVMM_MAGIC;
562 RTCritSectRwDelete(&pGVMM->UsedLock);
563 RTCritSectDelete(&pGVMM->CreateDestroyLock);
564
565 pGVMM->iFreeHead = 0;
566 if (pGVMM->iUsedHead)
567 {
568 SUPR0Printf("GVMMR0Term: iUsedHead=%#x! (cVMs=%#x cEMTs=%#x)\n", pGVMM->iUsedHead, pGVMM->cVMs, pGVMM->cEMTs);
569 pGVMM->iUsedHead = 0;
570 }
571
572#ifdef GVMM_SCHED_WITH_PPT
573 iCpu = pGVMM->cHostCpus;
574 while (iCpu-- > 0)
575 {
576 RTTimerDestroy(pGVMM->aHostCpus[iCpu].Ppt.pTimer);
577 pGVMM->aHostCpus[iCpu].Ppt.pTimer = NULL;
578 RTSpinlockDestroy(pGVMM->aHostCpus[iCpu].Ppt.hSpinlock);
579 pGVMM->aHostCpus[iCpu].Ppt.hSpinlock = NIL_RTSPINLOCK;
580 }
581#endif
582
583 RTMemFree(pGVMM);
584}
585
586
587/**
588 * A quick hack for setting global config values.
589 *
590 * @returns VBox status code.
591 *
592 * @param pSession The session handle. Used for authentication.
593 * @param pszName The variable name.
594 * @param u64Value The new value.
595 */
596GVMMR0DECL(int) GVMMR0SetConfig(PSUPDRVSESSION pSession, const char *pszName, uint64_t u64Value)
597{
598 /*
599 * Validate input.
600 */
601 PGVMM pGVMM;
602 GVMM_GET_VALID_INSTANCE(pGVMM, VERR_GVMM_INSTANCE);
603 AssertPtrReturn(pSession, VERR_INVALID_HANDLE);
604 AssertPtrReturn(pszName, VERR_INVALID_POINTER);
605
606 /*
607 * String switch time!
608 */
609 if (strncmp(pszName, RT_STR_TUPLE("/GVMM/")))
610 return VERR_CFGM_VALUE_NOT_FOUND; /* borrow status codes from CFGM... */
611 int rc = VINF_SUCCESS;
612 pszName += sizeof("/GVMM/") - 1;
613 if (!strcmp(pszName, "cEMTsMeansCompany"))
614 {
615 if (u64Value <= UINT32_MAX)
616 pGVMM->cEMTsMeansCompany = u64Value;
617 else
618 rc = VERR_OUT_OF_RANGE;
619 }
620 else if (!strcmp(pszName, "MinSleepAlone"))
621 {
622 if (u64Value <= RT_NS_100MS)
623 pGVMM->nsMinSleepAlone = u64Value;
624 else
625 rc = VERR_OUT_OF_RANGE;
626 }
627 else if (!strcmp(pszName, "MinSleepCompany"))
628 {
629 if (u64Value <= RT_NS_100MS)
630 pGVMM->nsMinSleepCompany = u64Value;
631 else
632 rc = VERR_OUT_OF_RANGE;
633 }
634 else if (!strcmp(pszName, "EarlyWakeUp1"))
635 {
636 if (u64Value <= RT_NS_100MS)
637 {
638 pGVMM->nsEarlyWakeUp1 = u64Value;
639 pGVMM->fDoEarlyWakeUps = pGVMM->nsEarlyWakeUp1 > 0 && pGVMM->nsEarlyWakeUp2 > 0;
640 }
641 else
642 rc = VERR_OUT_OF_RANGE;
643 }
644 else if (!strcmp(pszName, "EarlyWakeUp2"))
645 {
646 if (u64Value <= RT_NS_100MS)
647 {
648 pGVMM->nsEarlyWakeUp2 = u64Value;
649 pGVMM->fDoEarlyWakeUps = pGVMM->nsEarlyWakeUp1 > 0 && pGVMM->nsEarlyWakeUp2 > 0;
650 }
651 else
652 rc = VERR_OUT_OF_RANGE;
653 }
654 else
655 rc = VERR_CFGM_VALUE_NOT_FOUND;
656 return rc;
657}
658
659
660/**
661 * A quick hack for getting global config values.
662 *
663 * @returns VBox status code.
664 *
665 * @param pSession The session handle. Used for authentication.
666 * @param pszName The variable name.
667 * @param pu64Value Where to return the value.
668 */
669GVMMR0DECL(int) GVMMR0QueryConfig(PSUPDRVSESSION pSession, const char *pszName, uint64_t *pu64Value)
670{
671 /*
672 * Validate input.
673 */
674 PGVMM pGVMM;
675 GVMM_GET_VALID_INSTANCE(pGVMM, VERR_GVMM_INSTANCE);
676 AssertPtrReturn(pSession, VERR_INVALID_HANDLE);
677 AssertPtrReturn(pszName, VERR_INVALID_POINTER);
678 AssertPtrReturn(pu64Value, VERR_INVALID_POINTER);
679
680 /*
681 * String switch time!
682 */
683 if (strncmp(pszName, RT_STR_TUPLE("/GVMM/")))
684 return VERR_CFGM_VALUE_NOT_FOUND; /* borrow status codes from CFGM... */
685 int rc = VINF_SUCCESS;
686 pszName += sizeof("/GVMM/") - 1;
687 if (!strcmp(pszName, "cEMTsMeansCompany"))
688 *pu64Value = pGVMM->cEMTsMeansCompany;
689 else if (!strcmp(pszName, "MinSleepAlone"))
690 *pu64Value = pGVMM->nsMinSleepAlone;
691 else if (!strcmp(pszName, "MinSleepCompany"))
692 *pu64Value = pGVMM->nsMinSleepCompany;
693 else if (!strcmp(pszName, "EarlyWakeUp1"))
694 *pu64Value = pGVMM->nsEarlyWakeUp1;
695 else if (!strcmp(pszName, "EarlyWakeUp2"))
696 *pu64Value = pGVMM->nsEarlyWakeUp2;
697 else
698 rc = VERR_CFGM_VALUE_NOT_FOUND;
699 return rc;
700}
701
702
703/**
704 * Acquire the 'used' lock in shared mode.
705 *
706 * This prevents destruction of the VM while we're in ring-0.
707 *
708 * @returns IPRT status code, see RTSemFastMutexRequest.
709 * @param a_pGVMM The GVMM instance data.
710 * @sa GVMMR0_USED_SHARED_UNLOCK, GVMMR0_USED_EXCLUSIVE_LOCK
711 */
712#define GVMMR0_USED_SHARED_LOCK(a_pGVMM) RTCritSectRwEnterShared(&(a_pGVMM)->UsedLock)
713
714/**
715 * Release the 'used' lock in when owning it in shared mode.
716 *
717 * @returns IPRT status code, see RTSemFastMutexRequest.
718 * @param a_pGVMM The GVMM instance data.
719 * @sa GVMMR0_USED_SHARED_LOCK
720 */
721#define GVMMR0_USED_SHARED_UNLOCK(a_pGVMM) RTCritSectRwLeaveShared(&(a_pGVMM)->UsedLock)
722
723/**
724 * Acquire the 'used' lock in exclusive mode.
725 *
726 * Only use this function when making changes to the used list.
727 *
728 * @returns IPRT status code, see RTSemFastMutexRequest.
729 * @param a_pGVMM The GVMM instance data.
730 * @sa GVMMR0_USED_EXCLUSIVE_UNLOCK
731 */
732#define GVMMR0_USED_EXCLUSIVE_LOCK(a_pGVMM) RTCritSectRwEnterExcl(&(a_pGVMM)->UsedLock)
733
734/**
735 * Release the 'used' lock when owning it in exclusive mode.
736 *
737 * @returns IPRT status code, see RTSemFastMutexRelease.
738 * @param a_pGVMM The GVMM instance data.
739 * @sa GVMMR0_USED_EXCLUSIVE_LOCK, GVMMR0_USED_SHARED_UNLOCK
740 */
741#define GVMMR0_USED_EXCLUSIVE_UNLOCK(a_pGVMM) RTCritSectRwLeaveExcl(&(a_pGVMM)->UsedLock)
742
743
744/**
745 * Try acquire the 'create & destroy' lock.
746 *
747 * @returns IPRT status code, see RTSemFastMutexRequest.
748 * @param pGVMM The GVMM instance data.
749 */
750DECLINLINE(int) gvmmR0CreateDestroyLock(PGVMM pGVMM)
751{
752 LogFlow(("++gvmmR0CreateDestroyLock(%p)\n", pGVMM));
753 int rc = RTCritSectEnter(&pGVMM->CreateDestroyLock);
754 LogFlow(("gvmmR0CreateDestroyLock(%p)->%Rrc\n", pGVMM, rc));
755 return rc;
756}
757
758
759/**
760 * Release the 'create & destroy' lock.
761 *
762 * @returns IPRT status code, see RTSemFastMutexRequest.
763 * @param pGVMM The GVMM instance data.
764 */
765DECLINLINE(int) gvmmR0CreateDestroyUnlock(PGVMM pGVMM)
766{
767 LogFlow(("--gvmmR0CreateDestroyUnlock(%p)\n", pGVMM));
768 int rc = RTCritSectLeave(&pGVMM->CreateDestroyLock);
769 AssertRC(rc);
770 return rc;
771}
772
773
774/**
775 * Request wrapper for the GVMMR0CreateVM API.
776 *
777 * @returns VBox status code.
778 * @param pReq The request buffer.
779 * @param pSession The session handle. The VM will be associated with this.
780 */
781GVMMR0DECL(int) GVMMR0CreateVMReq(PGVMMCREATEVMREQ pReq, PSUPDRVSESSION pSession)
782{
783 /*
784 * Validate the request.
785 */
786 if (!RT_VALID_PTR(pReq))
787 return VERR_INVALID_POINTER;
788 if (pReq->Hdr.cbReq != sizeof(*pReq))
789 return VERR_INVALID_PARAMETER;
790 if (pReq->pSession != pSession)
791 return VERR_INVALID_POINTER;
792
793 /*
794 * Execute it.
795 */
796 PGVM pGVM;
797 pReq->pVMR0 = NULL;
798 pReq->pVMR3 = NIL_RTR3PTR;
799 int rc = GVMMR0CreateVM(pSession, pReq->cCpus, &pGVM);
800 if (RT_SUCCESS(rc))
801 {
802 pReq->pVMR0 = pGVM; /** @todo don't expose this to ring-3, use a unique random number instead. */
803 pReq->pVMR3 = pGVM->pVMR3;
804 }
805 return rc;
806}
807
808
809/**
810 * Allocates the VM structure and registers it with GVM.
811 *
812 * The caller will become the VM owner and there by the EMT.
813 *
814 * @returns VBox status code.
815 * @param pSession The support driver session.
816 * @param cCpus Number of virtual CPUs for the new VM.
817 * @param ppGVM Where to store the pointer to the VM structure.
818 *
819 * @thread EMT.
820 */
821GVMMR0DECL(int) GVMMR0CreateVM(PSUPDRVSESSION pSession, uint32_t cCpus, PGVM *ppGVM)
822{
823 LogFlow(("GVMMR0CreateVM: pSession=%p\n", pSession));
824 PGVMM pGVMM;
825 GVMM_GET_VALID_INSTANCE(pGVMM, VERR_GVMM_INSTANCE);
826
827 AssertPtrReturn(ppGVM, VERR_INVALID_POINTER);
828 *ppGVM = NULL;
829
830 if ( cCpus == 0
831 || cCpus > VMM_MAX_CPU_COUNT)
832 return VERR_INVALID_PARAMETER;
833
834 RTNATIVETHREAD hEMT0 = RTThreadNativeSelf();
835 AssertReturn(hEMT0 != NIL_RTNATIVETHREAD, VERR_GVMM_BROKEN_IPRT);
836 RTPROCESS ProcId = RTProcSelf();
837 AssertReturn(ProcId != NIL_RTPROCESS, VERR_GVMM_BROKEN_IPRT);
838
839 /*
840 * The whole allocation process is protected by the lock.
841 */
842 int rc = gvmmR0CreateDestroyLock(pGVMM);
843 AssertRCReturn(rc, rc);
844
845 /*
846 * Only one VM per session.
847 */
848 if (SUPR0GetSessionVM(pSession) != NULL)
849 {
850 gvmmR0CreateDestroyUnlock(pGVMM);
851 SUPR0Printf("GVMMR0CreateVM: The session %p already got a VM: %p\n", pSession, SUPR0GetSessionVM(pSession));
852 return VERR_ALREADY_EXISTS;
853 }
854
855 /*
856 * Allocate a handle first so we don't waste resources unnecessarily.
857 */
858 uint16_t iHandle = pGVMM->iFreeHead;
859 if (iHandle)
860 {
861 PGVMHANDLE pHandle = &pGVMM->aHandles[iHandle];
862
863 /* consistency checks, a bit paranoid as always. */
864 if ( !pHandle->pGVM
865 && !pHandle->pvObj
866 && pHandle->iSelf == iHandle)
867 {
868 pHandle->pvObj = SUPR0ObjRegister(pSession, SUPDRVOBJTYPE_VM, gvmmR0HandleObjDestructor, pGVMM, pHandle);
869 if (pHandle->pvObj)
870 {
871 /*
872 * Move the handle from the free to used list and perform permission checks.
873 */
874 rc = GVMMR0_USED_EXCLUSIVE_LOCK(pGVMM);
875 AssertRC(rc);
876
877 pGVMM->iFreeHead = pHandle->iNext;
878 pHandle->iNext = pGVMM->iUsedHead;
879 pGVMM->iUsedHead = iHandle;
880 pGVMM->cVMs++;
881
882 pHandle->pGVM = NULL;
883 pHandle->pSession = pSession;
884 pHandle->hEMT0 = NIL_RTNATIVETHREAD;
885 pHandle->ProcId = NIL_RTPROCESS;
886
887 GVMMR0_USED_EXCLUSIVE_UNLOCK(pGVMM);
888
889 rc = SUPR0ObjVerifyAccess(pHandle->pvObj, pSession, NULL);
890 if (RT_SUCCESS(rc))
891 {
892 /*
893 * Allocate memory for the VM structure (combined VM + GVM).
894 */
895 const uint32_t cbVM = RT_UOFFSETOF_DYN(GVM, aCpus[cCpus]);
896 const uint32_t cPages = RT_ALIGN_32(cbVM, PAGE_SIZE) >> PAGE_SHIFT;
897 RTR0MEMOBJ hVMMemObj = NIL_RTR0MEMOBJ;
898 rc = RTR0MemObjAllocPage(&hVMMemObj, cPages << PAGE_SHIFT, false /* fExecutable */);
899 if (RT_SUCCESS(rc))
900 {
901 PGVM pGVM = (PGVM)RTR0MemObjAddress(hVMMemObj);
902 AssertPtr(pGVM);
903
904 /*
905 * Initialise the structure.
906 */
907 RT_BZERO(pGVM, cPages << PAGE_SHIFT);
908 gvmmR0InitPerVMData(pGVM, iHandle, cCpus, pSession);
909 pGVM->gvmm.s.VMMemObj = hVMMemObj;
910 rc = GMMR0InitPerVMData(pGVM);
911 int rc2 = PGMR0InitPerVMData(pGVM);
912 VMMR0InitPerVMData(pGVM);
913 DBGFR0InitPerVMData(pGVM);
914 PDMR0InitPerVMData(pGVM);
915 IOMR0InitPerVMData(pGVM);
916 TMR0InitPerVMData(pGVM);
917 if (RT_SUCCESS(rc) && RT_SUCCESS(rc2))
918 {
919 /*
920 * Allocate page array.
921 * This currently have to be made available to ring-3, but this is should change eventually.
922 */
923 rc = RTR0MemObjAllocPage(&pGVM->gvmm.s.VMPagesMemObj, cPages * sizeof(SUPPAGE), false /* fExecutable */);
924 if (RT_SUCCESS(rc))
925 {
926 PSUPPAGE paPages = (PSUPPAGE)RTR0MemObjAddress(pGVM->gvmm.s.VMPagesMemObj); AssertPtr(paPages);
927 for (uint32_t iPage = 0; iPage < cPages; iPage++)
928 {
929 paPages[iPage].uReserved = 0;
930 paPages[iPage].Phys = RTR0MemObjGetPagePhysAddr(pGVM->gvmm.s.VMMemObj, iPage);
931 Assert(paPages[iPage].Phys != NIL_RTHCPHYS);
932 }
933
934 /*
935 * Map the page array, VM and VMCPU structures into ring-3.
936 */
937 AssertCompileSizeAlignment(VM, PAGE_SIZE);
938 rc = RTR0MemObjMapUserEx(&pGVM->gvmm.s.VMMapObj, pGVM->gvmm.s.VMMemObj, (RTR3PTR)-1, 0,
939 RTMEM_PROT_READ | RTMEM_PROT_WRITE, NIL_RTR0PROCESS,
940 0 /*offSub*/, sizeof(VM));
941 for (VMCPUID i = 0; i < cCpus && RT_SUCCESS(rc); i++)
942 {
943 AssertCompileSizeAlignment(VMCPU, PAGE_SIZE);
944 rc = RTR0MemObjMapUserEx(&pGVM->aCpus[i].gvmm.s.VMCpuMapObj, pGVM->gvmm.s.VMMemObj,
945 (RTR3PTR)-1, 0, RTMEM_PROT_READ | RTMEM_PROT_WRITE, NIL_RTR0PROCESS,
946 RT_UOFFSETOF_DYN(GVM, aCpus[i]), sizeof(VMCPU));
947 }
948 if (RT_SUCCESS(rc))
949 rc = RTR0MemObjMapUser(&pGVM->gvmm.s.VMPagesMapObj, pGVM->gvmm.s.VMPagesMemObj, (RTR3PTR)-1,
950 0 /* uAlignment */, RTMEM_PROT_READ | RTMEM_PROT_WRITE,
951 NIL_RTR0PROCESS);
952 if (RT_SUCCESS(rc))
953 {
954 /*
955 * Initialize all the VM pointers.
956 */
957 PVMR3 pVMR3 = RTR0MemObjAddressR3(pGVM->gvmm.s.VMMapObj);
958 AssertMsg(RTR0MemUserIsValidAddr(pVMR3) && pVMR3 != NIL_RTR3PTR, ("%p\n", pVMR3));
959
960 for (VMCPUID i = 0; i < cCpus; i++)
961 {
962 pGVM->aCpus[i].pVMR0 = pGVM;
963 pGVM->aCpus[i].pVMR3 = pVMR3;
964 pGVM->apCpusR3[i] = RTR0MemObjAddressR3(pGVM->aCpus[i].gvmm.s.VMCpuMapObj);
965 pGVM->aCpus[i].pVCpuR3 = pGVM->apCpusR3[i];
966 pGVM->apCpusR0[i] = &pGVM->aCpus[i];
967 AssertMsg(RTR0MemUserIsValidAddr(pGVM->apCpusR3[i]) && pGVM->apCpusR3[i] != NIL_RTR3PTR,
968 ("apCpusR3[%u]=%p\n", i, pGVM->apCpusR3[i]));
969 }
970
971 pGVM->paVMPagesR3 = RTR0MemObjAddressR3(pGVM->gvmm.s.VMPagesMapObj);
972 AssertMsg(RTR0MemUserIsValidAddr(pGVM->paVMPagesR3) && pGVM->paVMPagesR3 != NIL_RTR3PTR,
973 ("%p\n", pGVM->paVMPagesR3));
974
975 /*
976 * Complete the handle - take the UsedLock sem just to be careful.
977 */
978 rc = GVMMR0_USED_EXCLUSIVE_LOCK(pGVMM);
979 AssertRC(rc);
980
981 pHandle->pGVM = pGVM;
982 pHandle->hEMT0 = hEMT0;
983 pHandle->ProcId = ProcId;
984 pGVM->pVMR3 = pVMR3;
985 pGVM->pVMR3Unsafe = pVMR3;
986 pGVM->aCpus[0].hEMT = hEMT0;
987 pGVM->aCpus[0].hNativeThreadR0 = hEMT0;
988 pGVM->aCpus[0].cEmtHashCollisions = 0;
989 uint32_t const idxHash = GVMM_EMT_HASH_1(hEMT0);
990 pGVM->aCpus[0].gvmm.s.idxEmtHash = (uint16_t)idxHash;
991 pGVM->gvmm.s.aEmtHash[idxHash].hNativeEmt = hEMT0;
992 pGVM->gvmm.s.aEmtHash[idxHash].idVCpu = 0;
993 pGVMM->cEMTs += cCpus;
994
995 /* Associate it with the session and create the context hook for EMT0. */
996 rc = SUPR0SetSessionVM(pSession, pGVM, pGVM);
997 if (RT_SUCCESS(rc))
998 {
999 rc = VMMR0ThreadCtxHookCreateForEmt(&pGVM->aCpus[0]);
1000 if (RT_SUCCESS(rc))
1001 {
1002 /*
1003 * Done!
1004 */
1005 VBOXVMM_R0_GVMM_VM_CREATED(pGVM, pGVM, ProcId, (void *)hEMT0, cCpus);
1006
1007 GVMMR0_USED_EXCLUSIVE_UNLOCK(pGVMM);
1008 gvmmR0CreateDestroyUnlock(pGVMM);
1009
1010 CPUMR0RegisterVCpuThread(&pGVM->aCpus[0]);
1011
1012 *ppGVM = pGVM;
1013 Log(("GVMMR0CreateVM: pVMR3=%p pGVM=%p hGVM=%d\n", pVMR3, pGVM, iHandle));
1014 return VINF_SUCCESS;
1015 }
1016
1017 SUPR0SetSessionVM(pSession, NULL, NULL);
1018 }
1019 GVMMR0_USED_EXCLUSIVE_UNLOCK(pGVMM);
1020 }
1021
1022 /* Cleanup mappings. */
1023 if (pGVM->gvmm.s.VMMapObj != NIL_RTR0MEMOBJ)
1024 {
1025 RTR0MemObjFree(pGVM->gvmm.s.VMMapObj, false /* fFreeMappings */);
1026 pGVM->gvmm.s.VMMapObj = NIL_RTR0MEMOBJ;
1027 }
1028 for (VMCPUID i = 0; i < cCpus; i++)
1029 if (pGVM->aCpus[i].gvmm.s.VMCpuMapObj != NIL_RTR0MEMOBJ)
1030 {
1031 RTR0MemObjFree(pGVM->aCpus[i].gvmm.s.VMCpuMapObj, false /* fFreeMappings */);
1032 pGVM->aCpus[i].gvmm.s.VMCpuMapObj = NIL_RTR0MEMOBJ;
1033 }
1034 if (pGVM->gvmm.s.VMPagesMapObj != NIL_RTR0MEMOBJ)
1035 {
1036 RTR0MemObjFree(pGVM->gvmm.s.VMPagesMapObj, false /* fFreeMappings */);
1037 pGVM->gvmm.s.VMPagesMapObj = NIL_RTR0MEMOBJ;
1038 }
1039 }
1040 }
1041 else if (RT_SUCCESS(rc))
1042 rc = rc2;
1043 }
1044 }
1045 /* else: The user wasn't permitted to create this VM. */
1046
1047 /*
1048 * The handle will be freed by gvmmR0HandleObjDestructor as we release the
1049 * object reference here. A little extra mess because of non-recursive lock.
1050 */
1051 void *pvObj = pHandle->pvObj;
1052 pHandle->pvObj = NULL;
1053 gvmmR0CreateDestroyUnlock(pGVMM);
1054
1055 SUPR0ObjRelease(pvObj, pSession);
1056
1057 SUPR0Printf("GVMMR0CreateVM: failed, rc=%Rrc\n", rc);
1058 return rc;
1059 }
1060
1061 rc = VERR_NO_MEMORY;
1062 }
1063 else
1064 rc = VERR_GVMM_IPE_1;
1065 }
1066 else
1067 rc = VERR_GVM_TOO_MANY_VMS;
1068
1069 gvmmR0CreateDestroyUnlock(pGVMM);
1070 return rc;
1071}
1072
1073
1074/**
1075 * Initializes the per VM data belonging to GVMM.
1076 *
1077 * @param pGVM Pointer to the global VM structure.
1078 * @param hSelf The handle.
1079 * @param cCpus The CPU count.
1080 * @param pSession The session this VM is associated with.
1081 */
1082static void gvmmR0InitPerVMData(PGVM pGVM, int16_t hSelf, VMCPUID cCpus, PSUPDRVSESSION pSession)
1083{
1084 AssertCompile(RT_SIZEOFMEMB(GVM,gvmm.s) <= RT_SIZEOFMEMB(GVM,gvmm.padding));
1085 AssertCompile(RT_SIZEOFMEMB(GVMCPU,gvmm.s) <= RT_SIZEOFMEMB(GVMCPU,gvmm.padding));
1086 AssertCompileMemberAlignment(VM, cpum, 64);
1087 AssertCompileMemberAlignment(VM, tm, 64);
1088
1089 /* GVM: */
1090 pGVM->u32Magic = GVM_MAGIC;
1091 pGVM->hSelf = hSelf;
1092 pGVM->cCpus = cCpus;
1093 pGVM->pSession = pSession;
1094 pGVM->pSelf = pGVM;
1095
1096 /* VM: */
1097 pGVM->enmVMState = VMSTATE_CREATING;
1098 pGVM->hSelfUnsafe = hSelf;
1099 pGVM->pSessionUnsafe = pSession;
1100 pGVM->pVMR0ForCall = pGVM;
1101 pGVM->cCpusUnsafe = cCpus;
1102 pGVM->uCpuExecutionCap = 100; /* default is no cap. */
1103 pGVM->uStructVersion = 1;
1104 pGVM->cbSelf = sizeof(VM);
1105 pGVM->cbVCpu = sizeof(VMCPU);
1106
1107 /* GVMM: */
1108 pGVM->gvmm.s.VMMemObj = NIL_RTR0MEMOBJ;
1109 pGVM->gvmm.s.VMMapObj = NIL_RTR0MEMOBJ;
1110 pGVM->gvmm.s.VMPagesMemObj = NIL_RTR0MEMOBJ;
1111 pGVM->gvmm.s.VMPagesMapObj = NIL_RTR0MEMOBJ;
1112 pGVM->gvmm.s.fDoneVMMR0Init = false;
1113 pGVM->gvmm.s.fDoneVMMR0Term = false;
1114 for (size_t i = 0; i < RT_ELEMENTS(pGVM->gvmm.s.aEmtHash); i++)
1115 {
1116 pGVM->gvmm.s.aEmtHash[i].hNativeEmt = NIL_RTNATIVETHREAD;
1117 pGVM->gvmm.s.aEmtHash[i].idVCpu = NIL_VMCPUID;
1118 }
1119
1120 /*
1121 * Per virtual CPU.
1122 */
1123 for (VMCPUID i = 0; i < pGVM->cCpus; i++)
1124 {
1125 pGVM->aCpus[i].idCpu = i;
1126 pGVM->aCpus[i].idCpuUnsafe = i;
1127 pGVM->aCpus[i].gvmm.s.HaltEventMulti = NIL_RTSEMEVENTMULTI;
1128 pGVM->aCpus[i].gvmm.s.VMCpuMapObj = NIL_RTR0MEMOBJ;
1129 pGVM->aCpus[i].gvmm.s.idxEmtHash = UINT16_MAX;
1130 pGVM->aCpus[i].hEMT = NIL_RTNATIVETHREAD;
1131 pGVM->aCpus[i].pGVM = pGVM;
1132 pGVM->aCpus[i].idHostCpu = NIL_RTCPUID;
1133 pGVM->aCpus[i].iHostCpuSet = UINT32_MAX;
1134 pGVM->aCpus[i].hNativeThread = NIL_RTNATIVETHREAD;
1135 pGVM->aCpus[i].hNativeThreadR0 = NIL_RTNATIVETHREAD;
1136 pGVM->aCpus[i].enmState = VMCPUSTATE_STOPPED;
1137 pGVM->aCpus[i].pVCpuR0ForVtg = &pGVM->aCpus[i];
1138 }
1139}
1140
1141
1142/**
1143 * Does the VM initialization.
1144 *
1145 * @returns VBox status code.
1146 * @param pGVM The global (ring-0) VM structure.
1147 */
1148GVMMR0DECL(int) GVMMR0InitVM(PGVM pGVM)
1149{
1150 LogFlow(("GVMMR0InitVM: pGVM=%p\n", pGVM));
1151
1152 int rc = VERR_INTERNAL_ERROR_3;
1153 if ( !pGVM->gvmm.s.fDoneVMMR0Init
1154 && pGVM->aCpus[0].gvmm.s.HaltEventMulti == NIL_RTSEMEVENTMULTI)
1155 {
1156 for (VMCPUID i = 0; i < pGVM->cCpus; i++)
1157 {
1158 rc = RTSemEventMultiCreate(&pGVM->aCpus[i].gvmm.s.HaltEventMulti);
1159 if (RT_FAILURE(rc))
1160 {
1161 pGVM->aCpus[i].gvmm.s.HaltEventMulti = NIL_RTSEMEVENTMULTI;
1162 break;
1163 }
1164 }
1165 }
1166 else
1167 rc = VERR_WRONG_ORDER;
1168
1169 LogFlow(("GVMMR0InitVM: returns %Rrc\n", rc));
1170 return rc;
1171}
1172
1173
1174/**
1175 * Indicates that we're done with the ring-0 initialization
1176 * of the VM.
1177 *
1178 * @param pGVM The global (ring-0) VM structure.
1179 * @thread EMT(0)
1180 */
1181GVMMR0DECL(void) GVMMR0DoneInitVM(PGVM pGVM)
1182{
1183 /* Set the indicator. */
1184 pGVM->gvmm.s.fDoneVMMR0Init = true;
1185}
1186
1187
1188/**
1189 * Indicates that we're doing the ring-0 termination of the VM.
1190 *
1191 * @returns true if termination hasn't been done already, false if it has.
1192 * @param pGVM Pointer to the global VM structure. Optional.
1193 * @thread EMT(0) or session cleanup thread.
1194 */
1195GVMMR0DECL(bool) GVMMR0DoingTermVM(PGVM pGVM)
1196{
1197 /* Validate the VM structure, state and handle. */
1198 AssertPtrReturn(pGVM, false);
1199
1200 /* Set the indicator. */
1201 if (pGVM->gvmm.s.fDoneVMMR0Term)
1202 return false;
1203 pGVM->gvmm.s.fDoneVMMR0Term = true;
1204 return true;
1205}
1206
1207
1208/**
1209 * Destroys the VM, freeing all associated resources (the ring-0 ones anyway).
1210 *
1211 * This is call from the vmR3DestroyFinalBit and from a error path in VMR3Create,
1212 * and the caller is not the EMT thread, unfortunately. For security reasons, it
1213 * would've been nice if the caller was actually the EMT thread or that we somehow
1214 * could've associated the calling thread with the VM up front.
1215 *
1216 * @returns VBox status code.
1217 * @param pGVM The global (ring-0) VM structure.
1218 *
1219 * @thread EMT(0) if it's associated with the VM, otherwise any thread.
1220 */
1221GVMMR0DECL(int) GVMMR0DestroyVM(PGVM pGVM)
1222{
1223 LogFlow(("GVMMR0DestroyVM: pGVM=%p\n", pGVM));
1224 PGVMM pGVMM;
1225 GVMM_GET_VALID_INSTANCE(pGVMM, VERR_GVMM_INSTANCE);
1226
1227 /*
1228 * Validate the VM structure, state and caller.
1229 */
1230 AssertPtrReturn(pGVM, VERR_INVALID_POINTER);
1231 AssertReturn(!((uintptr_t)pGVM & PAGE_OFFSET_MASK), VERR_INVALID_POINTER);
1232 AssertMsgReturn(pGVM->enmVMState >= VMSTATE_CREATING && pGVM->enmVMState <= VMSTATE_TERMINATED, ("%d\n", pGVM->enmVMState),
1233 VERR_WRONG_ORDER);
1234
1235 uint32_t hGVM = pGVM->hSelf;
1236 ASMCompilerBarrier();
1237 AssertReturn(hGVM != NIL_GVM_HANDLE, VERR_INVALID_VM_HANDLE);
1238 AssertReturn(hGVM < RT_ELEMENTS(pGVMM->aHandles), VERR_INVALID_VM_HANDLE);
1239
1240 PGVMHANDLE pHandle = &pGVMM->aHandles[hGVM];
1241 AssertReturn(pHandle->pGVM == pGVM, VERR_NOT_OWNER);
1242
1243 RTPROCESS ProcId = RTProcSelf();
1244 RTNATIVETHREAD hSelf = RTThreadNativeSelf();
1245 AssertReturn( ( pHandle->hEMT0 == hSelf
1246 && pHandle->ProcId == ProcId)
1247 || pHandle->hEMT0 == NIL_RTNATIVETHREAD, VERR_NOT_OWNER);
1248
1249 /*
1250 * Lookup the handle and destroy the object.
1251 * Since the lock isn't recursive and we'll have to leave it before dereferencing the
1252 * object, we take some precautions against racing callers just in case...
1253 */
1254 int rc = gvmmR0CreateDestroyLock(pGVMM);
1255 AssertRC(rc);
1256
1257 /* Be careful here because we might theoretically be racing someone else cleaning up. */
1258 if ( pHandle->pGVM == pGVM
1259 && ( ( pHandle->hEMT0 == hSelf
1260 && pHandle->ProcId == ProcId)
1261 || pHandle->hEMT0 == NIL_RTNATIVETHREAD)
1262 && RT_VALID_PTR(pHandle->pvObj)
1263 && RT_VALID_PTR(pHandle->pSession)
1264 && RT_VALID_PTR(pHandle->pGVM)
1265 && pHandle->pGVM->u32Magic == GVM_MAGIC)
1266 {
1267 /* Check that other EMTs have deregistered. */
1268 uint32_t cNotDeregistered = 0;
1269 for (VMCPUID idCpu = 1; idCpu < pGVM->cCpus; idCpu++)
1270 cNotDeregistered += pGVM->aCpus[idCpu].hEMT != GVMM_RTNATIVETHREAD_DESTROYED;
1271 if (cNotDeregistered == 0)
1272 {
1273 /* Grab the object pointer. */
1274 void *pvObj = pHandle->pvObj;
1275 pHandle->pvObj = NULL;
1276 gvmmR0CreateDestroyUnlock(pGVMM);
1277
1278 SUPR0ObjRelease(pvObj, pHandle->pSession);
1279 }
1280 else
1281 {
1282 gvmmR0CreateDestroyUnlock(pGVMM);
1283 rc = VERR_GVMM_NOT_ALL_EMTS_DEREGISTERED;
1284 }
1285 }
1286 else
1287 {
1288 SUPR0Printf("GVMMR0DestroyVM: pHandle=%RKv:{.pGVM=%p, .hEMT0=%p, .ProcId=%u, .pvObj=%p} pGVM=%p hSelf=%p\n",
1289 pHandle, pHandle->pGVM, pHandle->hEMT0, pHandle->ProcId, pHandle->pvObj, pGVM, hSelf);
1290 gvmmR0CreateDestroyUnlock(pGVMM);
1291 rc = VERR_GVMM_IPE_2;
1292 }
1293
1294 return rc;
1295}
1296
1297
1298/**
1299 * Performs VM cleanup task as part of object destruction.
1300 *
1301 * @param pGVM The GVM pointer.
1302 */
1303static void gvmmR0CleanupVM(PGVM pGVM)
1304{
1305 if ( pGVM->gvmm.s.fDoneVMMR0Init
1306 && !pGVM->gvmm.s.fDoneVMMR0Term)
1307 {
1308 if ( pGVM->gvmm.s.VMMemObj != NIL_RTR0MEMOBJ
1309 && RTR0MemObjAddress(pGVM->gvmm.s.VMMemObj) == pGVM)
1310 {
1311 LogFlow(("gvmmR0CleanupVM: Calling VMMR0TermVM\n"));
1312 VMMR0TermVM(pGVM, NIL_VMCPUID);
1313 }
1314 else
1315 AssertMsgFailed(("gvmmR0CleanupVM: VMMemObj=%p pGVM=%p\n", pGVM->gvmm.s.VMMemObj, pGVM));
1316 }
1317
1318 GMMR0CleanupVM(pGVM);
1319#ifdef VBOX_WITH_NEM_R0
1320 NEMR0CleanupVM(pGVM);
1321#endif
1322 PDMR0CleanupVM(pGVM);
1323 IOMR0CleanupVM(pGVM);
1324 DBGFR0CleanupVM(pGVM);
1325 PGMR0CleanupVM(pGVM);
1326 TMR0CleanupVM(pGVM);
1327 VMMR0CleanupVM(pGVM);
1328}
1329
1330
1331/**
1332 * @callback_method_impl{FNSUPDRVDESTRUCTOR,VM handle destructor}
1333 *
1334 * pvUser1 is the GVM instance pointer.
1335 * pvUser2 is the handle pointer.
1336 */
1337static DECLCALLBACK(void) gvmmR0HandleObjDestructor(void *pvObj, void *pvUser1, void *pvUser2)
1338{
1339 LogFlow(("gvmmR0HandleObjDestructor: %p %p %p\n", pvObj, pvUser1, pvUser2));
1340
1341 NOREF(pvObj);
1342
1343 /*
1344 * Some quick, paranoid, input validation.
1345 */
1346 PGVMHANDLE pHandle = (PGVMHANDLE)pvUser2;
1347 AssertPtr(pHandle);
1348 PGVMM pGVMM = (PGVMM)pvUser1;
1349 Assert(pGVMM == g_pGVMM);
1350 const uint16_t iHandle = pHandle - &pGVMM->aHandles[0];
1351 if ( !iHandle
1352 || iHandle >= RT_ELEMENTS(pGVMM->aHandles)
1353 || iHandle != pHandle->iSelf)
1354 {
1355 SUPR0Printf("GVM: handle %d is out of range or corrupt (iSelf=%d)!\n", iHandle, pHandle->iSelf);
1356 return;
1357 }
1358
1359 int rc = gvmmR0CreateDestroyLock(pGVMM);
1360 AssertRC(rc);
1361 rc = GVMMR0_USED_EXCLUSIVE_LOCK(pGVMM);
1362 AssertRC(rc);
1363
1364 /*
1365 * This is a tad slow but a doubly linked list is too much hassle.
1366 */
1367 if (RT_UNLIKELY(pHandle->iNext >= RT_ELEMENTS(pGVMM->aHandles)))
1368 {
1369 SUPR0Printf("GVM: used list index %d is out of range!\n", pHandle->iNext);
1370 GVMMR0_USED_EXCLUSIVE_UNLOCK(pGVMM);
1371 gvmmR0CreateDestroyUnlock(pGVMM);
1372 return;
1373 }
1374
1375 if (pGVMM->iUsedHead == iHandle)
1376 pGVMM->iUsedHead = pHandle->iNext;
1377 else
1378 {
1379 uint16_t iPrev = pGVMM->iUsedHead;
1380 int c = RT_ELEMENTS(pGVMM->aHandles) + 2;
1381 while (iPrev)
1382 {
1383 if (RT_UNLIKELY(iPrev >= RT_ELEMENTS(pGVMM->aHandles)))
1384 {
1385 SUPR0Printf("GVM: used list index %d is out of range!\n", iPrev);
1386 GVMMR0_USED_EXCLUSIVE_UNLOCK(pGVMM);
1387 gvmmR0CreateDestroyUnlock(pGVMM);
1388 return;
1389 }
1390 if (RT_UNLIKELY(c-- <= 0))
1391 {
1392 iPrev = 0;
1393 break;
1394 }
1395
1396 if (pGVMM->aHandles[iPrev].iNext == iHandle)
1397 break;
1398 iPrev = pGVMM->aHandles[iPrev].iNext;
1399 }
1400 if (!iPrev)
1401 {
1402 SUPR0Printf("GVM: can't find the handle previous previous of %d!\n", pHandle->iSelf);
1403 GVMMR0_USED_EXCLUSIVE_UNLOCK(pGVMM);
1404 gvmmR0CreateDestroyUnlock(pGVMM);
1405 return;
1406 }
1407
1408 Assert(pGVMM->aHandles[iPrev].iNext == iHandle);
1409 pGVMM->aHandles[iPrev].iNext = pHandle->iNext;
1410 }
1411 pHandle->iNext = 0;
1412 pGVMM->cVMs--;
1413
1414 /*
1415 * Do the global cleanup round.
1416 */
1417 PGVM pGVM = pHandle->pGVM;
1418 if ( RT_VALID_PTR(pGVM)
1419 && pGVM->u32Magic == GVM_MAGIC)
1420 {
1421 pGVMM->cEMTs -= pGVM->cCpus;
1422
1423 if (pGVM->pSession)
1424 SUPR0SetSessionVM(pGVM->pSession, NULL, NULL);
1425
1426 GVMMR0_USED_EXCLUSIVE_UNLOCK(pGVMM);
1427
1428 gvmmR0CleanupVM(pGVM);
1429
1430 /*
1431 * Do the GVMM cleanup - must be done last.
1432 */
1433 /* The VM and VM pages mappings/allocations. */
1434 if (pGVM->gvmm.s.VMPagesMapObj != NIL_RTR0MEMOBJ)
1435 {
1436 rc = RTR0MemObjFree(pGVM->gvmm.s.VMPagesMapObj, false /* fFreeMappings */); AssertRC(rc);
1437 pGVM->gvmm.s.VMPagesMapObj = NIL_RTR0MEMOBJ;
1438 }
1439
1440 if (pGVM->gvmm.s.VMMapObj != NIL_RTR0MEMOBJ)
1441 {
1442 rc = RTR0MemObjFree(pGVM->gvmm.s.VMMapObj, false /* fFreeMappings */); AssertRC(rc);
1443 pGVM->gvmm.s.VMMapObj = NIL_RTR0MEMOBJ;
1444 }
1445
1446 if (pGVM->gvmm.s.VMPagesMemObj != NIL_RTR0MEMOBJ)
1447 {
1448 rc = RTR0MemObjFree(pGVM->gvmm.s.VMPagesMemObj, false /* fFreeMappings */); AssertRC(rc);
1449 pGVM->gvmm.s.VMPagesMemObj = NIL_RTR0MEMOBJ;
1450 }
1451
1452 for (VMCPUID i = 0; i < pGVM->cCpus; i++)
1453 {
1454 if (pGVM->aCpus[i].gvmm.s.HaltEventMulti != NIL_RTSEMEVENTMULTI)
1455 {
1456 rc = RTSemEventMultiDestroy(pGVM->aCpus[i].gvmm.s.HaltEventMulti); AssertRC(rc);
1457 pGVM->aCpus[i].gvmm.s.HaltEventMulti = NIL_RTSEMEVENTMULTI;
1458 }
1459 if (pGVM->aCpus[i].gvmm.s.VMCpuMapObj != NIL_RTR0MEMOBJ)
1460 {
1461 rc = RTR0MemObjFree(pGVM->aCpus[i].gvmm.s.VMCpuMapObj, false /* fFreeMappings */); AssertRC(rc);
1462 pGVM->aCpus[i].gvmm.s.VMCpuMapObj = NIL_RTR0MEMOBJ;
1463 }
1464 }
1465
1466 /* the GVM structure itself. */
1467 pGVM->u32Magic |= UINT32_C(0x80000000);
1468 Assert(pGVM->gvmm.s.VMMemObj != NIL_RTR0MEMOBJ);
1469 rc = RTR0MemObjFree(pGVM->gvmm.s.VMMemObj, true /*fFreeMappings*/); AssertRC(rc);
1470 pGVM = NULL;
1471
1472 /* Re-acquire the UsedLock before freeing the handle since we're updating handle fields. */
1473 rc = GVMMR0_USED_EXCLUSIVE_LOCK(pGVMM);
1474 AssertRC(rc);
1475 }
1476 /* else: GVMMR0CreateVM cleanup. */
1477
1478 /*
1479 * Free the handle.
1480 */
1481 pHandle->iNext = pGVMM->iFreeHead;
1482 pGVMM->iFreeHead = iHandle;
1483 ASMAtomicWriteNullPtr(&pHandle->pGVM);
1484 ASMAtomicWriteNullPtr(&pHandle->pvObj);
1485 ASMAtomicWriteNullPtr(&pHandle->pSession);
1486 ASMAtomicWriteHandle(&pHandle->hEMT0, NIL_RTNATIVETHREAD);
1487 ASMAtomicWriteU32(&pHandle->ProcId, NIL_RTPROCESS);
1488
1489 GVMMR0_USED_EXCLUSIVE_UNLOCK(pGVMM);
1490 gvmmR0CreateDestroyUnlock(pGVMM);
1491 LogFlow(("gvmmR0HandleObjDestructor: returns\n"));
1492}
1493
1494
1495/**
1496 * Registers the calling thread as the EMT of a Virtual CPU.
1497 *
1498 * Note that VCPU 0 is automatically registered during VM creation.
1499 *
1500 * @returns VBox status code
1501 * @param pGVM The global (ring-0) VM structure.
1502 * @param idCpu VCPU id to register the current thread as.
1503 */
1504GVMMR0DECL(int) GVMMR0RegisterVCpu(PGVM pGVM, VMCPUID idCpu)
1505{
1506 AssertReturn(idCpu != 0, VERR_INVALID_FUNCTION);
1507
1508 /*
1509 * Validate the VM structure, state and handle.
1510 */
1511 PGVMM pGVMM;
1512 int rc = gvmmR0ByGVM(pGVM, &pGVMM, false /* fTakeUsedLock */);
1513 if (RT_SUCCESS(rc))
1514 {
1515 if (idCpu < pGVM->cCpus)
1516 {
1517 RTNATIVETHREAD const hNativeSelf = RTThreadNativeSelf();
1518
1519 gvmmR0CreateDestroyLock(pGVMM); /** @todo per-VM lock? */
1520
1521 /* Check that the EMT isn't already assigned to a thread. */
1522 if (pGVM->aCpus[idCpu].hEMT == NIL_RTNATIVETHREAD)
1523 {
1524 Assert(pGVM->aCpus[idCpu].hNativeThreadR0 == NIL_RTNATIVETHREAD);
1525
1526 /* A thread may only be one EMT (this makes sure hNativeSelf isn't NIL). */
1527 for (VMCPUID iCpu = 0; iCpu < pGVM->cCpus; iCpu++)
1528 AssertBreakStmt(pGVM->aCpus[iCpu].hEMT != hNativeSelf, rc = VERR_INVALID_PARAMETER);
1529 if (RT_SUCCESS(rc))
1530 {
1531 /*
1532 * Do the assignment, then try setup the hook. Undo if that fails.
1533 */
1534 unsigned cCollisions = 0;
1535 uint32_t idxHash = GVMM_EMT_HASH_1(hNativeSelf);
1536 if (pGVM->gvmm.s.aEmtHash[idxHash].hNativeEmt != NIL_RTNATIVETHREAD)
1537 {
1538 uint32_t const idxHash2 = GVMM_EMT_HASH_2(hNativeSelf);
1539 do
1540 {
1541 cCollisions++;
1542 Assert(cCollisions < GVMM_EMT_HASH_SIZE);
1543 idxHash = (idxHash + idxHash2) % GVMM_EMT_HASH_SIZE;
1544 } while (pGVM->gvmm.s.aEmtHash[idxHash].hNativeEmt != NIL_RTNATIVETHREAD);
1545 }
1546 pGVM->gvmm.s.aEmtHash[idxHash].hNativeEmt = hNativeSelf;
1547 pGVM->gvmm.s.aEmtHash[idxHash].idVCpu = idCpu;
1548 pGVM->aCpus[idCpu].hNativeThreadR0 = hNativeSelf;
1549 pGVM->aCpus[idCpu].hEMT = hNativeSelf;
1550 pGVM->aCpus[idCpu].cEmtHashCollisions = (uint8_t)cCollisions;
1551 pGVM->aCpus[idCpu].gvmm.s.idxEmtHash = (uint16_t)idxHash;
1552
1553 rc = VMMR0ThreadCtxHookCreateForEmt(&pGVM->aCpus[idCpu]);
1554 if (RT_SUCCESS(rc))
1555 CPUMR0RegisterVCpuThread(&pGVM->aCpus[idCpu]);
1556 else
1557 {
1558 pGVM->aCpus[idCpu].hNativeThreadR0 = NIL_RTNATIVETHREAD;
1559 pGVM->aCpus[idCpu].hEMT = NIL_RTNATIVETHREAD;
1560 pGVM->gvmm.s.aEmtHash[idxHash].hNativeEmt = NIL_RTNATIVETHREAD;
1561 pGVM->gvmm.s.aEmtHash[idxHash].idVCpu = NIL_VMCPUID;
1562 pGVM->aCpus[idCpu].gvmm.s.idxEmtHash = UINT16_MAX;
1563 }
1564 }
1565 }
1566 else
1567 rc = VERR_ACCESS_DENIED;
1568
1569 gvmmR0CreateDestroyUnlock(pGVMM);
1570 }
1571 else
1572 rc = VERR_INVALID_CPU_ID;
1573 }
1574 return rc;
1575}
1576
1577
1578/**
1579 * Deregisters the calling thread as the EMT of a Virtual CPU.
1580 *
1581 * Note that VCPU 0 shall call GVMMR0DestroyVM intead of this API.
1582 *
1583 * @returns VBox status code
1584 * @param pGVM The global (ring-0) VM structure.
1585 * @param idCpu VCPU id to register the current thread as.
1586 */
1587GVMMR0DECL(int) GVMMR0DeregisterVCpu(PGVM pGVM, VMCPUID idCpu)
1588{
1589 AssertReturn(idCpu != 0, VERR_INVALID_FUNCTION);
1590
1591 /*
1592 * Validate the VM structure, state and handle.
1593 */
1594 PGVMM pGVMM;
1595 int rc = gvmmR0ByGVMandEMT(pGVM, idCpu, &pGVMM);
1596 if (RT_SUCCESS(rc))
1597 {
1598 /*
1599 * Take the destruction lock and recheck the handle state to
1600 * prevent racing GVMMR0DestroyVM.
1601 */
1602 gvmmR0CreateDestroyLock(pGVMM);
1603
1604 uint32_t hSelf = pGVM->hSelf;
1605 ASMCompilerBarrier();
1606 if ( hSelf < RT_ELEMENTS(pGVMM->aHandles)
1607 && pGVMM->aHandles[hSelf].pvObj != NULL
1608 && pGVMM->aHandles[hSelf].pGVM == pGVM)
1609 {
1610 /*
1611 * Do per-EMT cleanups.
1612 */
1613 VMMR0ThreadCtxHookDestroyForEmt(&pGVM->aCpus[idCpu]);
1614
1615 /*
1616 * Invalidate hEMT. We don't use NIL here as that would allow
1617 * GVMMR0RegisterVCpu to be called again, and we don't want that.
1618 */
1619 pGVM->aCpus[idCpu].hEMT = GVMM_RTNATIVETHREAD_DESTROYED;
1620 pGVM->aCpus[idCpu].hNativeThreadR0 = NIL_RTNATIVETHREAD;
1621
1622 uint32_t const idxHash = pGVM->aCpus[idCpu].gvmm.s.idxEmtHash;
1623 if (idxHash < RT_ELEMENTS(pGVM->gvmm.s.aEmtHash))
1624 pGVM->gvmm.s.aEmtHash[idxHash].hNativeEmt = GVMM_RTNATIVETHREAD_DESTROYED;
1625 }
1626
1627 gvmmR0CreateDestroyUnlock(pGVMM);
1628 }
1629 return rc;
1630}
1631
1632
1633/**
1634 * Lookup a GVM structure by its handle.
1635 *
1636 * @returns The GVM pointer on success, NULL on failure.
1637 * @param hGVM The global VM handle. Asserts on bad handle.
1638 */
1639GVMMR0DECL(PGVM) GVMMR0ByHandle(uint32_t hGVM)
1640{
1641 PGVMM pGVMM;
1642 GVMM_GET_VALID_INSTANCE(pGVMM, NULL);
1643
1644 /*
1645 * Validate.
1646 */
1647 AssertReturn(hGVM != NIL_GVM_HANDLE, NULL);
1648 AssertReturn(hGVM < RT_ELEMENTS(pGVMM->aHandles), NULL);
1649
1650 /*
1651 * Look it up.
1652 */
1653 PGVMHANDLE pHandle = &pGVMM->aHandles[hGVM];
1654 AssertPtrReturn(pHandle->pvObj, NULL);
1655 PGVM pGVM = pHandle->pGVM;
1656 AssertPtrReturn(pGVM, NULL);
1657
1658 return pGVM;
1659}
1660
1661
1662/**
1663 * Check that the given GVM and VM structures match up.
1664 *
1665 * The calling thread must be in the same process as the VM. All current lookups
1666 * are by threads inside the same process, so this will not be an issue.
1667 *
1668 * @returns VBox status code.
1669 * @param pGVM The global (ring-0) VM structure.
1670 * @param ppGVMM Where to store the pointer to the GVMM instance data.
1671 * @param fTakeUsedLock Whether to take the used lock or not. We take it in
1672 * shared mode when requested.
1673 *
1674 * Be very careful if not taking the lock as it's
1675 * possible that the VM will disappear then!
1676 *
1677 * @remark This will not assert on an invalid pGVM but try return silently.
1678 */
1679static int gvmmR0ByGVM(PGVM pGVM, PGVMM *ppGVMM, bool fTakeUsedLock)
1680{
1681 /*
1682 * Check the pointers.
1683 */
1684 int rc;
1685 if (RT_LIKELY( RT_VALID_PTR(pGVM)
1686 && ((uintptr_t)pGVM & PAGE_OFFSET_MASK) == 0 ))
1687 {
1688 /*
1689 * Get the pGVMM instance and check the VM handle.
1690 */
1691 PGVMM pGVMM;
1692 GVMM_GET_VALID_INSTANCE(pGVMM, VERR_GVMM_INSTANCE);
1693
1694 uint16_t hGVM = pGVM->hSelf;
1695 if (RT_LIKELY( hGVM != NIL_GVM_HANDLE
1696 && hGVM < RT_ELEMENTS(pGVMM->aHandles)))
1697 {
1698 RTPROCESS const pidSelf = RTProcSelf();
1699 PGVMHANDLE pHandle = &pGVMM->aHandles[hGVM];
1700 if (fTakeUsedLock)
1701 {
1702 rc = GVMMR0_USED_SHARED_LOCK(pGVMM);
1703 AssertRCReturn(rc, rc);
1704 }
1705
1706 if (RT_LIKELY( pHandle->pGVM == pGVM
1707 && pHandle->ProcId == pidSelf
1708 && RT_VALID_PTR(pHandle->pvObj)))
1709 {
1710 /*
1711 * Some more VM data consistency checks.
1712 */
1713 if (RT_LIKELY( pGVM->cCpusUnsafe == pGVM->cCpus
1714 && pGVM->hSelfUnsafe == hGVM
1715 && pGVM->pSelf == pGVM))
1716 {
1717 if (RT_LIKELY( pGVM->enmVMState >= VMSTATE_CREATING
1718 && pGVM->enmVMState <= VMSTATE_TERMINATED))
1719 {
1720 *ppGVMM = pGVMM;
1721 return VINF_SUCCESS;
1722 }
1723 rc = VERR_INCONSISTENT_VM_HANDLE;
1724 }
1725 else
1726 rc = VERR_INCONSISTENT_VM_HANDLE;
1727 }
1728 else
1729 rc = VERR_INVALID_VM_HANDLE;
1730
1731 if (fTakeUsedLock)
1732 GVMMR0_USED_SHARED_UNLOCK(pGVMM);
1733 }
1734 else
1735 rc = VERR_INVALID_VM_HANDLE;
1736 }
1737 else
1738 rc = VERR_INVALID_POINTER;
1739 return rc;
1740}
1741
1742
1743/**
1744 * Validates a GVM/VM pair.
1745 *
1746 * @returns VBox status code.
1747 * @param pGVM The global (ring-0) VM structure.
1748 */
1749GVMMR0DECL(int) GVMMR0ValidateGVM(PGVM pGVM)
1750{
1751 PGVMM pGVMM;
1752 return gvmmR0ByGVM(pGVM, &pGVMM, false /*fTakeUsedLock*/);
1753}
1754
1755
1756/**
1757 * Check that the given GVM and VM structures match up.
1758 *
1759 * The calling thread must be in the same process as the VM. All current lookups
1760 * are by threads inside the same process, so this will not be an issue.
1761 *
1762 * @returns VBox status code.
1763 * @param pGVM The global (ring-0) VM structure.
1764 * @param idCpu The (alleged) Virtual CPU ID of the calling EMT.
1765 * @param ppGVMM Where to store the pointer to the GVMM instance data.
1766 * @thread EMT
1767 *
1768 * @remarks This will assert in all failure paths.
1769 */
1770static int gvmmR0ByGVMandEMT(PGVM pGVM, VMCPUID idCpu, PGVMM *ppGVMM)
1771{
1772 /*
1773 * Check the pointers.
1774 */
1775 AssertPtrReturn(pGVM, VERR_INVALID_POINTER);
1776 AssertReturn(((uintptr_t)pGVM & PAGE_OFFSET_MASK) == 0, VERR_INVALID_POINTER);
1777
1778 /*
1779 * Get the pGVMM instance and check the VM handle.
1780 */
1781 PGVMM pGVMM;
1782 GVMM_GET_VALID_INSTANCE(pGVMM, VERR_GVMM_INSTANCE);
1783
1784 uint16_t hGVM = pGVM->hSelf;
1785 ASMCompilerBarrier();
1786 AssertReturn( hGVM != NIL_GVM_HANDLE
1787 && hGVM < RT_ELEMENTS(pGVMM->aHandles), VERR_INVALID_VM_HANDLE);
1788
1789 RTPROCESS const pidSelf = RTProcSelf();
1790 PGVMHANDLE pHandle = &pGVMM->aHandles[hGVM];
1791 AssertReturn( pHandle->pGVM == pGVM
1792 && pHandle->ProcId == pidSelf
1793 && RT_VALID_PTR(pHandle->pvObj),
1794 VERR_INVALID_HANDLE);
1795
1796 /*
1797 * Check the EMT claim.
1798 */
1799 RTNATIVETHREAD const hAllegedEMT = RTThreadNativeSelf();
1800 AssertReturn(idCpu < pGVM->cCpus, VERR_INVALID_CPU_ID);
1801 AssertReturn(pGVM->aCpus[idCpu].hEMT == hAllegedEMT, VERR_NOT_OWNER);
1802
1803 /*
1804 * Some more VM data consistency checks.
1805 */
1806 AssertReturn(pGVM->cCpusUnsafe == pGVM->cCpus, VERR_INCONSISTENT_VM_HANDLE);
1807 AssertReturn(pGVM->hSelfUnsafe == hGVM, VERR_INCONSISTENT_VM_HANDLE);
1808 AssertReturn( pGVM->enmVMState >= VMSTATE_CREATING
1809 && pGVM->enmVMState <= VMSTATE_TERMINATED, VERR_INCONSISTENT_VM_HANDLE);
1810
1811 *ppGVMM = pGVMM;
1812 return VINF_SUCCESS;
1813}
1814
1815
1816/**
1817 * Validates a GVM/EMT pair.
1818 *
1819 * @returns VBox status code.
1820 * @param pGVM The global (ring-0) VM structure.
1821 * @param idCpu The Virtual CPU ID of the calling EMT.
1822 * @thread EMT(idCpu)
1823 */
1824GVMMR0DECL(int) GVMMR0ValidateGVMandEMT(PGVM pGVM, VMCPUID idCpu)
1825{
1826 PGVMM pGVMM;
1827 return gvmmR0ByGVMandEMT(pGVM, idCpu, &pGVMM);
1828}
1829
1830
1831/**
1832 * Looks up the VM belonging to the specified EMT thread.
1833 *
1834 * This is used by the assertion machinery in VMMR0.cpp to avoid causing
1835 * unnecessary kernel panics when the EMT thread hits an assertion. The
1836 * call may or not be an EMT thread.
1837 *
1838 * @returns Pointer to the VM on success, NULL on failure.
1839 * @param hEMT The native thread handle of the EMT.
1840 * NIL_RTNATIVETHREAD means the current thread
1841 */
1842GVMMR0DECL(PVMCC) GVMMR0GetVMByEMT(RTNATIVETHREAD hEMT)
1843{
1844 /*
1845 * No Assertions here as we're usually called in a AssertMsgN or
1846 * RTAssert* context.
1847 */
1848 PGVMM pGVMM = g_pGVMM;
1849 if ( !RT_VALID_PTR(pGVMM)
1850 || pGVMM->u32Magic != GVMM_MAGIC)
1851 return NULL;
1852
1853 if (hEMT == NIL_RTNATIVETHREAD)
1854 hEMT = RTThreadNativeSelf();
1855 RTPROCESS ProcId = RTProcSelf();
1856
1857 /*
1858 * Search the handles in a linear fashion as we don't dare to take the lock (assert).
1859 */
1860/** @todo introduce some pid hash table here, please. */
1861 for (unsigned i = 1; i < RT_ELEMENTS(pGVMM->aHandles); i++)
1862 {
1863 if ( pGVMM->aHandles[i].iSelf == i
1864 && pGVMM->aHandles[i].ProcId == ProcId
1865 && RT_VALID_PTR(pGVMM->aHandles[i].pvObj)
1866 && RT_VALID_PTR(pGVMM->aHandles[i].pGVM))
1867 {
1868 if (pGVMM->aHandles[i].hEMT0 == hEMT)
1869 return pGVMM->aHandles[i].pGVM;
1870
1871 /* This is fearly safe with the current process per VM approach. */
1872 PGVM pGVM = pGVMM->aHandles[i].pGVM;
1873 VMCPUID const cCpus = pGVM->cCpus;
1874 ASMCompilerBarrier();
1875 if ( cCpus < 1
1876 || cCpus > VMM_MAX_CPU_COUNT)
1877 continue;
1878 for (VMCPUID idCpu = 1; idCpu < cCpus; idCpu++)
1879 if (pGVM->aCpus[idCpu].hEMT == hEMT)
1880 return pGVMM->aHandles[i].pGVM;
1881 }
1882 }
1883 return NULL;
1884}
1885
1886
1887/**
1888 * Looks up the GVMCPU belonging to the specified EMT thread.
1889 *
1890 * This is used by the assertion machinery in VMMR0.cpp to avoid causing
1891 * unnecessary kernel panics when the EMT thread hits an assertion. The
1892 * call may or not be an EMT thread.
1893 *
1894 * @returns Pointer to the VM on success, NULL on failure.
1895 * @param hEMT The native thread handle of the EMT.
1896 * NIL_RTNATIVETHREAD means the current thread
1897 */
1898GVMMR0DECL(PGVMCPU) GVMMR0GetGVCpuByEMT(RTNATIVETHREAD hEMT)
1899{
1900 /*
1901 * No Assertions here as we're usually called in a AssertMsgN,
1902 * RTAssert*, Log and LogRel contexts.
1903 */
1904 PGVMM pGVMM = g_pGVMM;
1905 if ( !RT_VALID_PTR(pGVMM)
1906 || pGVMM->u32Magic != GVMM_MAGIC)
1907 return NULL;
1908
1909 if (hEMT == NIL_RTNATIVETHREAD)
1910 hEMT = RTThreadNativeSelf();
1911 RTPROCESS ProcId = RTProcSelf();
1912
1913 /*
1914 * Search the handles in a linear fashion as we don't dare to take the lock (assert).
1915 */
1916/** @todo introduce some pid hash table here, please. */
1917 for (unsigned i = 1; i < RT_ELEMENTS(pGVMM->aHandles); i++)
1918 {
1919 if ( pGVMM->aHandles[i].iSelf == i
1920 && pGVMM->aHandles[i].ProcId == ProcId
1921 && RT_VALID_PTR(pGVMM->aHandles[i].pvObj)
1922 && RT_VALID_PTR(pGVMM->aHandles[i].pGVM))
1923 {
1924 PGVM pGVM = pGVMM->aHandles[i].pGVM;
1925 if (pGVMM->aHandles[i].hEMT0 == hEMT)
1926 return &pGVM->aCpus[0];
1927
1928 /* This is fearly safe with the current process per VM approach. */
1929 VMCPUID const cCpus = pGVM->cCpus;
1930 ASMCompilerBarrier();
1931 ASMCompilerBarrier();
1932 if ( cCpus < 1
1933 || cCpus > VMM_MAX_CPU_COUNT)
1934 continue;
1935 for (VMCPUID idCpu = 1; idCpu < cCpus; idCpu++)
1936 if (pGVM->aCpus[idCpu].hEMT == hEMT)
1937 return &pGVM->aCpus[idCpu];
1938 }
1939 }
1940 return NULL;
1941}
1942
1943
1944/**
1945 * Get the GVMCPU structure for the given EMT.
1946 *
1947 * @returns The VCpu structure for @a hEMT, NULL if not an EMT.
1948 * @param pGVM The global (ring-0) VM structure.
1949 * @param hEMT The native thread handle of the EMT.
1950 * NIL_RTNATIVETHREAD means the current thread
1951 */
1952GVMMR0DECL(PGVMCPU) GVMMR0GetGVCpuByGVMandEMT(PGVM pGVM, RTNATIVETHREAD hEMT)
1953{
1954 /*
1955 * Validate & adjust input.
1956 */
1957 AssertPtr(pGVM);
1958 Assert(pGVM->u32Magic == GVM_MAGIC);
1959 if (hEMT == NIL_RTNATIVETHREAD /* likely */)
1960 {
1961 hEMT = RTThreadNativeSelf();
1962 AssertReturn(hEMT != NIL_RTNATIVETHREAD, NULL);
1963 }
1964
1965 /*
1966 * Find the matching hash table entry.
1967 */
1968 uint32_t idxHash = GVMM_EMT_HASH_1(hEMT);
1969 if (pGVM->gvmm.s.aEmtHash[idxHash].hNativeEmt == hEMT)
1970 { /* likely */ }
1971 else
1972 {
1973#ifdef VBOX_STRICT
1974 unsigned cCollisions = 0;
1975#endif
1976 uint32_t const idxHash2 = GVMM_EMT_HASH_2(hEMT);
1977 for (;;)
1978 {
1979 Assert(cCollisions++ < GVMM_EMT_HASH_SIZE);
1980 idxHash = (idxHash + idxHash2) % GVMM_EMT_HASH_SIZE;
1981 if (pGVM->gvmm.s.aEmtHash[idxHash].hNativeEmt == hEMT)
1982 break;
1983 if (pGVM->gvmm.s.aEmtHash[idxHash].hNativeEmt == NIL_RTNATIVETHREAD)
1984 {
1985#ifdef VBOX_STRICT
1986 uint32_t idxCpu = pGVM->cCpus;
1987 AssertStmt(idxCpu < VMM_MAX_CPU_COUNT, idxCpu = VMM_MAX_CPU_COUNT);
1988 while (idxCpu-- > 0)
1989 Assert(pGVM->aCpus[idxCpu].hNativeThreadR0 != hEMT);
1990#endif
1991 return NULL;
1992 }
1993 }
1994 }
1995
1996 /*
1997 * Validate the VCpu number and translate it into a pointer.
1998 */
1999 VMCPUID const idCpu = pGVM->gvmm.s.aEmtHash[idxHash].idVCpu;
2000 AssertReturn(idCpu < pGVM->cCpus, NULL);
2001 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2002 Assert(pGVCpu->hNativeThreadR0 == hEMT);
2003 Assert(pGVCpu->gvmm.s.idxEmtHash == idxHash);
2004 return pGVCpu;
2005}
2006
2007
2008/**
2009 * This is will wake up expired and soon-to-be expired VMs.
2010 *
2011 * @returns Number of VMs that has been woken up.
2012 * @param pGVMM Pointer to the GVMM instance data.
2013 * @param u64Now The current time.
2014 */
2015static unsigned gvmmR0SchedDoWakeUps(PGVMM pGVMM, uint64_t u64Now)
2016{
2017 /*
2018 * Skip this if we've got disabled because of high resolution wakeups or by
2019 * the user.
2020 */
2021 if (!pGVMM->fDoEarlyWakeUps)
2022 return 0;
2023
2024/** @todo Rewrite this algorithm. See performance defect XYZ. */
2025
2026 /*
2027 * A cheap optimization to stop wasting so much time here on big setups.
2028 */
2029 const uint64_t uNsEarlyWakeUp2 = u64Now + pGVMM->nsEarlyWakeUp2;
2030 if ( pGVMM->cHaltedEMTs == 0
2031 || uNsEarlyWakeUp2 > pGVMM->uNsNextEmtWakeup)
2032 return 0;
2033
2034 /*
2035 * Only one thread doing this at a time.
2036 */
2037 if (!ASMAtomicCmpXchgBool(&pGVMM->fDoingEarlyWakeUps, true, false))
2038 return 0;
2039
2040 /*
2041 * The first pass will wake up VMs which have actually expired
2042 * and look for VMs that should be woken up in the 2nd and 3rd passes.
2043 */
2044 const uint64_t uNsEarlyWakeUp1 = u64Now + pGVMM->nsEarlyWakeUp1;
2045 uint64_t u64Min = UINT64_MAX;
2046 unsigned cWoken = 0;
2047 unsigned cHalted = 0;
2048 unsigned cTodo2nd = 0;
2049 unsigned cTodo3rd = 0;
2050 for (unsigned i = pGVMM->iUsedHead, cGuard = 0;
2051 i != NIL_GVM_HANDLE && i < RT_ELEMENTS(pGVMM->aHandles);
2052 i = pGVMM->aHandles[i].iNext)
2053 {
2054 PGVM pCurGVM = pGVMM->aHandles[i].pGVM;
2055 if ( RT_VALID_PTR(pCurGVM)
2056 && pCurGVM->u32Magic == GVM_MAGIC)
2057 {
2058 for (VMCPUID idCpu = 0; idCpu < pCurGVM->cCpus; idCpu++)
2059 {
2060 PGVMCPU pCurGVCpu = &pCurGVM->aCpus[idCpu];
2061 uint64_t u64 = ASMAtomicUoReadU64(&pCurGVCpu->gvmm.s.u64HaltExpire);
2062 if (u64)
2063 {
2064 if (u64 <= u64Now)
2065 {
2066 if (ASMAtomicXchgU64(&pCurGVCpu->gvmm.s.u64HaltExpire, 0))
2067 {
2068 int rc = RTSemEventMultiSignal(pCurGVCpu->gvmm.s.HaltEventMulti);
2069 AssertRC(rc);
2070 cWoken++;
2071 }
2072 }
2073 else
2074 {
2075 cHalted++;
2076 if (u64 <= uNsEarlyWakeUp1)
2077 cTodo2nd++;
2078 else if (u64 <= uNsEarlyWakeUp2)
2079 cTodo3rd++;
2080 else if (u64 < u64Min)
2081 u64 = u64Min;
2082 }
2083 }
2084 }
2085 }
2086 AssertLogRelBreak(cGuard++ < RT_ELEMENTS(pGVMM->aHandles));
2087 }
2088
2089 if (cTodo2nd)
2090 {
2091 for (unsigned i = pGVMM->iUsedHead, cGuard = 0;
2092 i != NIL_GVM_HANDLE && i < RT_ELEMENTS(pGVMM->aHandles);
2093 i = pGVMM->aHandles[i].iNext)
2094 {
2095 PGVM pCurGVM = pGVMM->aHandles[i].pGVM;
2096 if ( RT_VALID_PTR(pCurGVM)
2097 && pCurGVM->u32Magic == GVM_MAGIC)
2098 {
2099 for (VMCPUID idCpu = 0; idCpu < pCurGVM->cCpus; idCpu++)
2100 {
2101 PGVMCPU pCurGVCpu = &pCurGVM->aCpus[idCpu];
2102 uint64_t u64 = ASMAtomicUoReadU64(&pCurGVCpu->gvmm.s.u64HaltExpire);
2103 if ( u64
2104 && u64 <= uNsEarlyWakeUp1)
2105 {
2106 if (ASMAtomicXchgU64(&pCurGVCpu->gvmm.s.u64HaltExpire, 0))
2107 {
2108 int rc = RTSemEventMultiSignal(pCurGVCpu->gvmm.s.HaltEventMulti);
2109 AssertRC(rc);
2110 cWoken++;
2111 }
2112 }
2113 }
2114 }
2115 AssertLogRelBreak(cGuard++ < RT_ELEMENTS(pGVMM->aHandles));
2116 }
2117 }
2118
2119 if (cTodo3rd)
2120 {
2121 for (unsigned i = pGVMM->iUsedHead, cGuard = 0;
2122 i != NIL_GVM_HANDLE && i < RT_ELEMENTS(pGVMM->aHandles);
2123 i = pGVMM->aHandles[i].iNext)
2124 {
2125 PGVM pCurGVM = pGVMM->aHandles[i].pGVM;
2126 if ( RT_VALID_PTR(pCurGVM)
2127 && pCurGVM->u32Magic == GVM_MAGIC)
2128 {
2129 for (VMCPUID idCpu = 0; idCpu < pCurGVM->cCpus; idCpu++)
2130 {
2131 PGVMCPU pCurGVCpu = &pCurGVM->aCpus[idCpu];
2132 uint64_t u64 = ASMAtomicUoReadU64(&pCurGVCpu->gvmm.s.u64HaltExpire);
2133 if ( u64
2134 && u64 <= uNsEarlyWakeUp2)
2135 {
2136 if (ASMAtomicXchgU64(&pCurGVCpu->gvmm.s.u64HaltExpire, 0))
2137 {
2138 int rc = RTSemEventMultiSignal(pCurGVCpu->gvmm.s.HaltEventMulti);
2139 AssertRC(rc);
2140 cWoken++;
2141 }
2142 }
2143 }
2144 }
2145 AssertLogRelBreak(cGuard++ < RT_ELEMENTS(pGVMM->aHandles));
2146 }
2147 }
2148
2149 /*
2150 * Set the minimum value.
2151 */
2152 pGVMM->uNsNextEmtWakeup = u64Min;
2153
2154 ASMAtomicWriteBool(&pGVMM->fDoingEarlyWakeUps, false);
2155 return cWoken;
2156}
2157
2158
2159/**
2160 * Halt the EMT thread.
2161 *
2162 * @returns VINF_SUCCESS normal wakeup (timeout or kicked by other thread).
2163 * VERR_INTERRUPTED if a signal was scheduled for the thread.
2164 * @param pGVM The global (ring-0) VM structure.
2165 * @param pGVCpu The global (ring-0) CPU structure of the calling
2166 * EMT.
2167 * @param u64ExpireGipTime The time for the sleep to expire expressed as GIP time.
2168 * @thread EMT(pGVCpu).
2169 */
2170GVMMR0DECL(int) GVMMR0SchedHalt(PGVM pGVM, PGVMCPU pGVCpu, uint64_t u64ExpireGipTime)
2171{
2172 LogFlow(("GVMMR0SchedHalt: pGVM=%p pGVCpu=%p(%d) u64ExpireGipTime=%#RX64\n",
2173 pGVM, pGVCpu, pGVCpu->idCpu, u64ExpireGipTime));
2174 GVMM_CHECK_SMAP_SETUP();
2175 GVMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2176
2177 PGVMM pGVMM;
2178 GVMM_GET_VALID_INSTANCE(pGVMM, VERR_GVMM_INSTANCE);
2179
2180 pGVM->gvmm.s.StatsSched.cHaltCalls++;
2181 Assert(!pGVCpu->gvmm.s.u64HaltExpire);
2182
2183 /*
2184 * If we're doing early wake-ups, we must take the UsedList lock before we
2185 * start querying the current time.
2186 * Note! Interrupts must NOT be disabled at this point because we ask for GIP time!
2187 */
2188 bool const fDoEarlyWakeUps = pGVMM->fDoEarlyWakeUps;
2189 if (fDoEarlyWakeUps)
2190 {
2191 int rc2 = GVMMR0_USED_SHARED_LOCK(pGVMM); AssertRC(rc2);
2192 GVMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2193 }
2194
2195 pGVCpu->gvmm.s.iCpuEmt = ASMGetApicId();
2196
2197 /* GIP hack: We might are frequently sleeping for short intervals where the
2198 difference between GIP and system time matters on systems with high resolution
2199 system time. So, convert the input from GIP to System time in that case. */
2200 Assert(ASMGetFlags() & X86_EFL_IF);
2201 const uint64_t u64NowSys = RTTimeSystemNanoTS();
2202 const uint64_t u64NowGip = RTTimeNanoTS();
2203 GVMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2204
2205 if (fDoEarlyWakeUps)
2206 {
2207 pGVM->gvmm.s.StatsSched.cHaltWakeUps += gvmmR0SchedDoWakeUps(pGVMM, u64NowGip);
2208 GVMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2209 }
2210
2211 /*
2212 * Go to sleep if we must...
2213 * Cap the sleep time to 1 second to be on the safe side.
2214 */
2215 int rc;
2216 uint64_t cNsInterval = u64ExpireGipTime - u64NowGip;
2217 if ( u64NowGip < u64ExpireGipTime
2218 && cNsInterval >= (pGVMM->cEMTs > pGVMM->cEMTsMeansCompany
2219 ? pGVMM->nsMinSleepCompany
2220 : pGVMM->nsMinSleepAlone))
2221 {
2222 pGVM->gvmm.s.StatsSched.cHaltBlocking++;
2223 if (cNsInterval > RT_NS_1SEC)
2224 u64ExpireGipTime = u64NowGip + RT_NS_1SEC;
2225 ASMAtomicWriteU64(&pGVCpu->gvmm.s.u64HaltExpire, u64ExpireGipTime);
2226 ASMAtomicIncU32(&pGVMM->cHaltedEMTs);
2227 if (fDoEarlyWakeUps)
2228 {
2229 if (u64ExpireGipTime < pGVMM->uNsNextEmtWakeup)
2230 pGVMM->uNsNextEmtWakeup = u64ExpireGipTime;
2231 GVMMR0_USED_SHARED_UNLOCK(pGVMM);
2232 }
2233 GVMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2234
2235 rc = RTSemEventMultiWaitEx(pGVCpu->gvmm.s.HaltEventMulti,
2236 RTSEMWAIT_FLAGS_ABSOLUTE | RTSEMWAIT_FLAGS_NANOSECS | RTSEMWAIT_FLAGS_INTERRUPTIBLE,
2237 u64NowGip > u64NowSys ? u64ExpireGipTime : u64NowSys + cNsInterval);
2238 GVMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2239
2240 ASMAtomicWriteU64(&pGVCpu->gvmm.s.u64HaltExpire, 0);
2241 ASMAtomicDecU32(&pGVMM->cHaltedEMTs);
2242
2243 /* Reset the semaphore to try prevent a few false wake-ups. */
2244 if (rc == VINF_SUCCESS)
2245 {
2246 RTSemEventMultiReset(pGVCpu->gvmm.s.HaltEventMulti);
2247 GVMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2248 }
2249 else if (rc == VERR_TIMEOUT)
2250 {
2251 pGVM->gvmm.s.StatsSched.cHaltTimeouts++;
2252 rc = VINF_SUCCESS;
2253 }
2254 }
2255 else
2256 {
2257 pGVM->gvmm.s.StatsSched.cHaltNotBlocking++;
2258 if (fDoEarlyWakeUps)
2259 GVMMR0_USED_SHARED_UNLOCK(pGVMM);
2260 GVMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2261 RTSemEventMultiReset(pGVCpu->gvmm.s.HaltEventMulti);
2262 GVMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2263 rc = VINF_SUCCESS;
2264 }
2265
2266 return rc;
2267}
2268
2269
2270/**
2271 * Halt the EMT thread.
2272 *
2273 * @returns VINF_SUCCESS normal wakeup (timeout or kicked by other thread).
2274 * VERR_INTERRUPTED if a signal was scheduled for the thread.
2275 * @param pGVM The global (ring-0) VM structure.
2276 * @param idCpu The Virtual CPU ID of the calling EMT.
2277 * @param u64ExpireGipTime The time for the sleep to expire expressed as GIP time.
2278 * @thread EMT(idCpu).
2279 */
2280GVMMR0DECL(int) GVMMR0SchedHaltReq(PGVM pGVM, VMCPUID idCpu, uint64_t u64ExpireGipTime)
2281{
2282 GVMM_CHECK_SMAP_SETUP();
2283 GVMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2284 PGVMM pGVMM;
2285 int rc = gvmmR0ByGVMandEMT(pGVM, idCpu, &pGVMM);
2286 if (RT_SUCCESS(rc))
2287 {
2288 GVMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2289 rc = GVMMR0SchedHalt(pGVM, &pGVM->aCpus[idCpu], u64ExpireGipTime);
2290 }
2291 GVMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2292 return rc;
2293}
2294
2295
2296
2297/**
2298 * Worker for GVMMR0SchedWakeUp and GVMMR0SchedWakeUpAndPokeCpus that wakes up
2299 * the a sleeping EMT.
2300 *
2301 * @retval VINF_SUCCESS if successfully woken up.
2302 * @retval VINF_GVM_NOT_BLOCKED if the EMT wasn't blocked.
2303 *
2304 * @param pGVM The global (ring-0) VM structure.
2305 * @param pGVCpu The global (ring-0) VCPU structure.
2306 */
2307DECLINLINE(int) gvmmR0SchedWakeUpOne(PGVM pGVM, PGVMCPU pGVCpu)
2308{
2309 pGVM->gvmm.s.StatsSched.cWakeUpCalls++;
2310
2311 /*
2312 * Signal the semaphore regardless of whether it's current blocked on it.
2313 *
2314 * The reason for this is that there is absolutely no way we can be 100%
2315 * certain that it isn't *about* go to go to sleep on it and just got
2316 * delayed a bit en route. So, we will always signal the semaphore when
2317 * the it is flagged as halted in the VMM.
2318 */
2319/** @todo we can optimize some of that by means of the pVCpu->enmState now. */
2320 int rc;
2321 if (pGVCpu->gvmm.s.u64HaltExpire)
2322 {
2323 rc = VINF_SUCCESS;
2324 ASMAtomicWriteU64(&pGVCpu->gvmm.s.u64HaltExpire, 0);
2325 }
2326 else
2327 {
2328 rc = VINF_GVM_NOT_BLOCKED;
2329 pGVM->gvmm.s.StatsSched.cWakeUpNotHalted++;
2330 }
2331
2332 int rc2 = RTSemEventMultiSignal(pGVCpu->gvmm.s.HaltEventMulti);
2333 AssertRC(rc2);
2334
2335 return rc;
2336}
2337
2338
2339/**
2340 * Wakes up the halted EMT thread so it can service a pending request.
2341 *
2342 * @returns VBox status code.
2343 * @retval VINF_SUCCESS if successfully woken up.
2344 * @retval VINF_GVM_NOT_BLOCKED if the EMT wasn't blocked.
2345 *
2346 * @param pGVM The global (ring-0) VM structure.
2347 * @param idCpu The Virtual CPU ID of the EMT to wake up.
2348 * @param fTakeUsedLock Take the used lock or not
2349 * @thread Any but EMT(idCpu).
2350 */
2351GVMMR0DECL(int) GVMMR0SchedWakeUpEx(PGVM pGVM, VMCPUID idCpu, bool fTakeUsedLock)
2352{
2353 GVMM_CHECK_SMAP_SETUP();
2354 GVMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2355
2356 /*
2357 * Validate input and take the UsedLock.
2358 */
2359 PGVMM pGVMM;
2360 int rc = gvmmR0ByGVM(pGVM, &pGVMM, fTakeUsedLock);
2361 GVMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2362 if (RT_SUCCESS(rc))
2363 {
2364 if (idCpu < pGVM->cCpus)
2365 {
2366 /*
2367 * Do the actual job.
2368 */
2369 rc = gvmmR0SchedWakeUpOne(pGVM, &pGVM->aCpus[idCpu]);
2370 GVMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2371
2372 if (fTakeUsedLock && pGVMM->fDoEarlyWakeUps)
2373 {
2374 /*
2375 * While we're here, do a round of scheduling.
2376 */
2377 Assert(ASMGetFlags() & X86_EFL_IF);
2378 const uint64_t u64Now = RTTimeNanoTS(); /* (GIP time) */
2379 pGVM->gvmm.s.StatsSched.cWakeUpWakeUps += gvmmR0SchedDoWakeUps(pGVMM, u64Now);
2380 GVMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2381 }
2382 }
2383 else
2384 rc = VERR_INVALID_CPU_ID;
2385
2386 if (fTakeUsedLock)
2387 {
2388 int rc2 = GVMMR0_USED_SHARED_UNLOCK(pGVMM);
2389 AssertRC(rc2);
2390 GVMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2391 }
2392 }
2393
2394 LogFlow(("GVMMR0SchedWakeUpEx: returns %Rrc\n", rc));
2395 return rc;
2396}
2397
2398
2399/**
2400 * Wakes up the halted EMT thread so it can service a pending request.
2401 *
2402 * @returns VBox status code.
2403 * @retval VINF_SUCCESS if successfully woken up.
2404 * @retval VINF_GVM_NOT_BLOCKED if the EMT wasn't blocked.
2405 *
2406 * @param pGVM The global (ring-0) VM structure.
2407 * @param idCpu The Virtual CPU ID of the EMT to wake up.
2408 * @thread Any but EMT(idCpu).
2409 */
2410GVMMR0DECL(int) GVMMR0SchedWakeUp(PGVM pGVM, VMCPUID idCpu)
2411{
2412 return GVMMR0SchedWakeUpEx(pGVM, idCpu, true /* fTakeUsedLock */);
2413}
2414
2415
2416/**
2417 * Wakes up the halted EMT thread so it can service a pending request, no GVM
2418 * parameter and no used locking.
2419 *
2420 * @returns VBox status code.
2421 * @retval VINF_SUCCESS if successfully woken up.
2422 * @retval VINF_GVM_NOT_BLOCKED if the EMT wasn't blocked.
2423 *
2424 * @param pGVM The global (ring-0) VM structure.
2425 * @param idCpu The Virtual CPU ID of the EMT to wake up.
2426 * @thread Any but EMT(idCpu).
2427 * @deprecated Don't use in new code if possible! Use the GVM variant.
2428 */
2429GVMMR0DECL(int) GVMMR0SchedWakeUpNoGVMNoLock(PGVM pGVM, VMCPUID idCpu)
2430{
2431 GVMM_CHECK_SMAP_SETUP();
2432 GVMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2433 PGVMM pGVMM;
2434 int rc = gvmmR0ByGVM(pGVM, &pGVMM, false /*fTakeUsedLock*/);
2435 GVMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2436 if (RT_SUCCESS(rc))
2437 rc = GVMMR0SchedWakeUpEx(pGVM, idCpu, false /*fTakeUsedLock*/);
2438 return rc;
2439}
2440
2441
2442/**
2443 * Worker common to GVMMR0SchedPoke and GVMMR0SchedWakeUpAndPokeCpus that pokes
2444 * the Virtual CPU if it's still busy executing guest code.
2445 *
2446 * @returns VBox status code.
2447 * @retval VINF_SUCCESS if poked successfully.
2448 * @retval VINF_GVM_NOT_BUSY_IN_GC if the EMT wasn't busy in GC.
2449 *
2450 * @param pGVM The global (ring-0) VM structure.
2451 * @param pVCpu The cross context virtual CPU structure.
2452 */
2453DECLINLINE(int) gvmmR0SchedPokeOne(PGVM pGVM, PVMCPUCC pVCpu)
2454{
2455 pGVM->gvmm.s.StatsSched.cPokeCalls++;
2456
2457 RTCPUID idHostCpu = pVCpu->idHostCpu;
2458 if ( idHostCpu == NIL_RTCPUID
2459 || VMCPU_GET_STATE(pVCpu) != VMCPUSTATE_STARTED_EXEC)
2460 {
2461 pGVM->gvmm.s.StatsSched.cPokeNotBusy++;
2462 return VINF_GVM_NOT_BUSY_IN_GC;
2463 }
2464
2465 /* Note: this function is not implemented on Darwin and Linux (kernel < 2.6.19) */
2466 RTMpPokeCpu(idHostCpu);
2467 return VINF_SUCCESS;
2468}
2469
2470
2471/**
2472 * Pokes an EMT if it's still busy running guest code.
2473 *
2474 * @returns VBox status code.
2475 * @retval VINF_SUCCESS if poked successfully.
2476 * @retval VINF_GVM_NOT_BUSY_IN_GC if the EMT wasn't busy in GC.
2477 *
2478 * @param pGVM The global (ring-0) VM structure.
2479 * @param idCpu The ID of the virtual CPU to poke.
2480 * @param fTakeUsedLock Take the used lock or not
2481 */
2482GVMMR0DECL(int) GVMMR0SchedPokeEx(PGVM pGVM, VMCPUID idCpu, bool fTakeUsedLock)
2483{
2484 /*
2485 * Validate input and take the UsedLock.
2486 */
2487 PGVMM pGVMM;
2488 int rc = gvmmR0ByGVM(pGVM, &pGVMM, fTakeUsedLock);
2489 if (RT_SUCCESS(rc))
2490 {
2491 if (idCpu < pGVM->cCpus)
2492 rc = gvmmR0SchedPokeOne(pGVM, &pGVM->aCpus[idCpu]);
2493 else
2494 rc = VERR_INVALID_CPU_ID;
2495
2496 if (fTakeUsedLock)
2497 {
2498 int rc2 = GVMMR0_USED_SHARED_UNLOCK(pGVMM);
2499 AssertRC(rc2);
2500 }
2501 }
2502
2503 LogFlow(("GVMMR0SchedWakeUpAndPokeCpus: returns %Rrc\n", rc));
2504 return rc;
2505}
2506
2507
2508/**
2509 * Pokes an EMT if it's still busy running guest code.
2510 *
2511 * @returns VBox status code.
2512 * @retval VINF_SUCCESS if poked successfully.
2513 * @retval VINF_GVM_NOT_BUSY_IN_GC if the EMT wasn't busy in GC.
2514 *
2515 * @param pGVM The global (ring-0) VM structure.
2516 * @param idCpu The ID of the virtual CPU to poke.
2517 */
2518GVMMR0DECL(int) GVMMR0SchedPoke(PGVM pGVM, VMCPUID idCpu)
2519{
2520 return GVMMR0SchedPokeEx(pGVM, idCpu, true /* fTakeUsedLock */);
2521}
2522
2523
2524/**
2525 * Pokes an EMT if it's still busy running guest code, no GVM parameter and no
2526 * used locking.
2527 *
2528 * @returns VBox status code.
2529 * @retval VINF_SUCCESS if poked successfully.
2530 * @retval VINF_GVM_NOT_BUSY_IN_GC if the EMT wasn't busy in GC.
2531 *
2532 * @param pGVM The global (ring-0) VM structure.
2533 * @param idCpu The ID of the virtual CPU to poke.
2534 *
2535 * @deprecated Don't use in new code if possible! Use the GVM variant.
2536 */
2537GVMMR0DECL(int) GVMMR0SchedPokeNoGVMNoLock(PGVM pGVM, VMCPUID idCpu)
2538{
2539 PGVMM pGVMM;
2540 int rc = gvmmR0ByGVM(pGVM, &pGVMM, false /*fTakeUsedLock*/);
2541 if (RT_SUCCESS(rc))
2542 {
2543 if (idCpu < pGVM->cCpus)
2544 rc = gvmmR0SchedPokeOne(pGVM, &pGVM->aCpus[idCpu]);
2545 else
2546 rc = VERR_INVALID_CPU_ID;
2547 }
2548 return rc;
2549}
2550
2551
2552/**
2553 * Wakes up a set of halted EMT threads so they can service pending request.
2554 *
2555 * @returns VBox status code, no informational stuff.
2556 *
2557 * @param pGVM The global (ring-0) VM structure.
2558 * @param pSleepSet The set of sleepers to wake up.
2559 * @param pPokeSet The set of CPUs to poke.
2560 */
2561GVMMR0DECL(int) GVMMR0SchedWakeUpAndPokeCpus(PGVM pGVM, PCVMCPUSET pSleepSet, PCVMCPUSET pPokeSet)
2562{
2563 AssertPtrReturn(pSleepSet, VERR_INVALID_POINTER);
2564 AssertPtrReturn(pPokeSet, VERR_INVALID_POINTER);
2565 GVMM_CHECK_SMAP_SETUP();
2566 GVMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2567 RTNATIVETHREAD hSelf = RTThreadNativeSelf();
2568
2569 /*
2570 * Validate input and take the UsedLock.
2571 */
2572 PGVMM pGVMM;
2573 int rc = gvmmR0ByGVM(pGVM, &pGVMM, true /* fTakeUsedLock */);
2574 GVMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2575 if (RT_SUCCESS(rc))
2576 {
2577 rc = VINF_SUCCESS;
2578 VMCPUID idCpu = pGVM->cCpus;
2579 while (idCpu-- > 0)
2580 {
2581 /* Don't try poke or wake up ourselves. */
2582 if (pGVM->aCpus[idCpu].hEMT == hSelf)
2583 continue;
2584
2585 /* just ignore errors for now. */
2586 if (VMCPUSET_IS_PRESENT(pSleepSet, idCpu))
2587 {
2588 gvmmR0SchedWakeUpOne(pGVM, &pGVM->aCpus[idCpu]);
2589 GVMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2590 }
2591 else if (VMCPUSET_IS_PRESENT(pPokeSet, idCpu))
2592 {
2593 gvmmR0SchedPokeOne(pGVM, &pGVM->aCpus[idCpu]);
2594 GVMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2595 }
2596 }
2597
2598 int rc2 = GVMMR0_USED_SHARED_UNLOCK(pGVMM);
2599 AssertRC(rc2);
2600 GVMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2601 }
2602
2603 LogFlow(("GVMMR0SchedWakeUpAndPokeCpus: returns %Rrc\n", rc));
2604 return rc;
2605}
2606
2607
2608/**
2609 * VMMR0 request wrapper for GVMMR0SchedWakeUpAndPokeCpus.
2610 *
2611 * @returns see GVMMR0SchedWakeUpAndPokeCpus.
2612 * @param pGVM The global (ring-0) VM structure.
2613 * @param pReq Pointer to the request packet.
2614 */
2615GVMMR0DECL(int) GVMMR0SchedWakeUpAndPokeCpusReq(PGVM pGVM, PGVMMSCHEDWAKEUPANDPOKECPUSREQ pReq)
2616{
2617 /*
2618 * Validate input and pass it on.
2619 */
2620 AssertPtrReturn(pReq, VERR_INVALID_POINTER);
2621 AssertMsgReturn(pReq->Hdr.cbReq == sizeof(*pReq), ("%#x != %#x\n", pReq->Hdr.cbReq, sizeof(*pReq)), VERR_INVALID_PARAMETER);
2622
2623 return GVMMR0SchedWakeUpAndPokeCpus(pGVM, &pReq->SleepSet, &pReq->PokeSet);
2624}
2625
2626
2627
2628/**
2629 * Poll the schedule to see if someone else should get a chance to run.
2630 *
2631 * This is a bit hackish and will not work too well if the machine is
2632 * under heavy load from non-VM processes.
2633 *
2634 * @returns VINF_SUCCESS if not yielded.
2635 * VINF_GVM_YIELDED if an attempt to switch to a different VM task was made.
2636 * @param pGVM The global (ring-0) VM structure.
2637 * @param idCpu The Virtual CPU ID of the calling EMT.
2638 * @param fYield Whether to yield or not.
2639 * This is for when we're spinning in the halt loop.
2640 * @thread EMT(idCpu).
2641 */
2642GVMMR0DECL(int) GVMMR0SchedPoll(PGVM pGVM, VMCPUID idCpu, bool fYield)
2643{
2644 /*
2645 * Validate input.
2646 */
2647 PGVMM pGVMM;
2648 int rc = gvmmR0ByGVMandEMT(pGVM, idCpu, &pGVMM);
2649 if (RT_SUCCESS(rc))
2650 {
2651 /*
2652 * We currently only implement helping doing wakeups (fYield = false), so don't
2653 * bother taking the lock if gvmmR0SchedDoWakeUps is not going to do anything.
2654 */
2655 if (!fYield && pGVMM->fDoEarlyWakeUps)
2656 {
2657 rc = GVMMR0_USED_SHARED_LOCK(pGVMM); AssertRC(rc);
2658 pGVM->gvmm.s.StatsSched.cPollCalls++;
2659
2660 Assert(ASMGetFlags() & X86_EFL_IF);
2661 const uint64_t u64Now = RTTimeNanoTS(); /* (GIP time) */
2662
2663 pGVM->gvmm.s.StatsSched.cPollWakeUps += gvmmR0SchedDoWakeUps(pGVMM, u64Now);
2664
2665 GVMMR0_USED_SHARED_UNLOCK(pGVMM);
2666 }
2667 /*
2668 * Not quite sure what we could do here...
2669 */
2670 else if (fYield)
2671 rc = VERR_NOT_IMPLEMENTED; /** @todo implement this... */
2672 else
2673 rc = VINF_SUCCESS;
2674 }
2675
2676 LogFlow(("GVMMR0SchedWakeUp: returns %Rrc\n", rc));
2677 return rc;
2678}
2679
2680
2681#ifdef GVMM_SCHED_WITH_PPT
2682/**
2683 * Timer callback for the periodic preemption timer.
2684 *
2685 * @param pTimer The timer handle.
2686 * @param pvUser Pointer to the per cpu structure.
2687 * @param iTick The current tick.
2688 */
2689static DECLCALLBACK(void) gvmmR0SchedPeriodicPreemptionTimerCallback(PRTTIMER pTimer, void *pvUser, uint64_t iTick)
2690{
2691 PGVMMHOSTCPU pCpu = (PGVMMHOSTCPU)pvUser;
2692 NOREF(pTimer); NOREF(iTick);
2693
2694 /*
2695 * Termination check
2696 */
2697 if (pCpu->u32Magic != GVMMHOSTCPU_MAGIC)
2698 return;
2699
2700 /*
2701 * Do the house keeping.
2702 */
2703 RTSpinlockAcquire(pCpu->Ppt.hSpinlock);
2704
2705 if (++pCpu->Ppt.iTickHistorization >= pCpu->Ppt.cTicksHistoriziationInterval)
2706 {
2707 /*
2708 * Historicize the max frequency.
2709 */
2710 uint32_t iHzHistory = ++pCpu->Ppt.iHzHistory % RT_ELEMENTS(pCpu->Ppt.aHzHistory);
2711 pCpu->Ppt.aHzHistory[iHzHistory] = pCpu->Ppt.uDesiredHz;
2712 pCpu->Ppt.iTickHistorization = 0;
2713 pCpu->Ppt.uDesiredHz = 0;
2714
2715 /*
2716 * Check if the current timer frequency.
2717 */
2718 uint32_t uHistMaxHz = 0;
2719 for (uint32_t i = 0; i < RT_ELEMENTS(pCpu->Ppt.aHzHistory); i++)
2720 if (pCpu->Ppt.aHzHistory[i] > uHistMaxHz)
2721 uHistMaxHz = pCpu->Ppt.aHzHistory[i];
2722 if (uHistMaxHz == pCpu->Ppt.uTimerHz)
2723 RTSpinlockRelease(pCpu->Ppt.hSpinlock);
2724 else if (uHistMaxHz)
2725 {
2726 /*
2727 * Reprogram it.
2728 */
2729 pCpu->Ppt.cChanges++;
2730 pCpu->Ppt.iTickHistorization = 0;
2731 pCpu->Ppt.uTimerHz = uHistMaxHz;
2732 uint32_t const cNsInterval = RT_NS_1SEC / uHistMaxHz;
2733 pCpu->Ppt.cNsInterval = cNsInterval;
2734 if (cNsInterval < GVMMHOSTCPU_PPT_HIST_INTERVAL_NS)
2735 pCpu->Ppt.cTicksHistoriziationInterval = ( GVMMHOSTCPU_PPT_HIST_INTERVAL_NS
2736 + GVMMHOSTCPU_PPT_HIST_INTERVAL_NS / 2 - 1)
2737 / cNsInterval;
2738 else
2739 pCpu->Ppt.cTicksHistoriziationInterval = 1;
2740 RTSpinlockRelease(pCpu->Ppt.hSpinlock);
2741
2742 /*SUPR0Printf("Cpu%u: change to %u Hz / %u ns\n", pCpu->idxCpuSet, uHistMaxHz, cNsInterval);*/
2743 RTTimerChangeInterval(pTimer, cNsInterval);
2744 }
2745 else
2746 {
2747 /*
2748 * Stop it.
2749 */
2750 pCpu->Ppt.fStarted = false;
2751 pCpu->Ppt.uTimerHz = 0;
2752 pCpu->Ppt.cNsInterval = 0;
2753 RTSpinlockRelease(pCpu->Ppt.hSpinlock);
2754
2755 /*SUPR0Printf("Cpu%u: stopping (%u Hz)\n", pCpu->idxCpuSet, uHistMaxHz);*/
2756 RTTimerStop(pTimer);
2757 }
2758 }
2759 else
2760 RTSpinlockRelease(pCpu->Ppt.hSpinlock);
2761}
2762#endif /* GVMM_SCHED_WITH_PPT */
2763
2764
2765/**
2766 * Updates the periodic preemption timer for the calling CPU.
2767 *
2768 * The caller must have disabled preemption!
2769 * The caller must check that the host can do high resolution timers.
2770 *
2771 * @param pGVM The global (ring-0) VM structure.
2772 * @param idHostCpu The current host CPU id.
2773 * @param uHz The desired frequency.
2774 */
2775GVMMR0DECL(void) GVMMR0SchedUpdatePeriodicPreemptionTimer(PGVM pGVM, RTCPUID idHostCpu, uint32_t uHz)
2776{
2777 NOREF(pGVM);
2778#ifdef GVMM_SCHED_WITH_PPT
2779 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
2780 Assert(RTTimerCanDoHighResolution());
2781
2782 /*
2783 * Resolve the per CPU data.
2784 */
2785 uint32_t iCpu = RTMpCpuIdToSetIndex(idHostCpu);
2786 PGVMM pGVMM = g_pGVMM;
2787 if ( !RT_VALID_PTR(pGVMM)
2788 || pGVMM->u32Magic != GVMM_MAGIC)
2789 return;
2790 AssertMsgReturnVoid(iCpu < pGVMM->cHostCpus, ("iCpu=%d cHostCpus=%d\n", iCpu, pGVMM->cHostCpus));
2791 PGVMMHOSTCPU pCpu = &pGVMM->aHostCpus[iCpu];
2792 AssertMsgReturnVoid( pCpu->u32Magic == GVMMHOSTCPU_MAGIC
2793 && pCpu->idCpu == idHostCpu,
2794 ("u32Magic=%#x idCpu=% idHostCpu=%d\n", pCpu->u32Magic, pCpu->idCpu, idHostCpu));
2795
2796 /*
2797 * Check whether we need to do anything about the timer.
2798 * We have to be a little bit careful since we might be race the timer
2799 * callback here.
2800 */
2801 if (uHz > 16384)
2802 uHz = 16384; /** @todo add a query method for this! */
2803 if (RT_UNLIKELY( uHz > ASMAtomicReadU32(&pCpu->Ppt.uDesiredHz)
2804 && uHz >= pCpu->Ppt.uMinHz
2805 && !pCpu->Ppt.fStarting /* solaris paranoia */))
2806 {
2807 RTSpinlockAcquire(pCpu->Ppt.hSpinlock);
2808
2809 pCpu->Ppt.uDesiredHz = uHz;
2810 uint32_t cNsInterval = 0;
2811 if (!pCpu->Ppt.fStarted)
2812 {
2813 pCpu->Ppt.cStarts++;
2814 pCpu->Ppt.fStarted = true;
2815 pCpu->Ppt.fStarting = true;
2816 pCpu->Ppt.iTickHistorization = 0;
2817 pCpu->Ppt.uTimerHz = uHz;
2818 pCpu->Ppt.cNsInterval = cNsInterval = RT_NS_1SEC / uHz;
2819 if (cNsInterval < GVMMHOSTCPU_PPT_HIST_INTERVAL_NS)
2820 pCpu->Ppt.cTicksHistoriziationInterval = ( GVMMHOSTCPU_PPT_HIST_INTERVAL_NS
2821 + GVMMHOSTCPU_PPT_HIST_INTERVAL_NS / 2 - 1)
2822 / cNsInterval;
2823 else
2824 pCpu->Ppt.cTicksHistoriziationInterval = 1;
2825 }
2826
2827 RTSpinlockRelease(pCpu->Ppt.hSpinlock);
2828
2829 if (cNsInterval)
2830 {
2831 RTTimerChangeInterval(pCpu->Ppt.pTimer, cNsInterval);
2832 int rc = RTTimerStart(pCpu->Ppt.pTimer, cNsInterval);
2833 AssertRC(rc);
2834
2835 RTSpinlockAcquire(pCpu->Ppt.hSpinlock);
2836 if (RT_FAILURE(rc))
2837 pCpu->Ppt.fStarted = false;
2838 pCpu->Ppt.fStarting = false;
2839 RTSpinlockRelease(pCpu->Ppt.hSpinlock);
2840 }
2841 }
2842#else /* !GVMM_SCHED_WITH_PPT */
2843 NOREF(idHostCpu); NOREF(uHz);
2844#endif /* !GVMM_SCHED_WITH_PPT */
2845}
2846
2847
2848/**
2849 * Calls @a pfnCallback for each VM in the system.
2850 *
2851 * This will enumerate the VMs while holding the global VM used list lock in
2852 * shared mode. So, only suitable for simple work. If more expensive work
2853 * needs doing, a different approach must be taken as using this API would
2854 * otherwise block VM creation and destruction.
2855 *
2856 * @returns VBox status code.
2857 * @param pfnCallback The callback function.
2858 * @param pvUser User argument to the callback.
2859 */
2860GVMMR0DECL(int) GVMMR0EnumVMs(PFNGVMMR0ENUMCALLBACK pfnCallback, void *pvUser)
2861{
2862 PGVMM pGVMM;
2863 GVMM_GET_VALID_INSTANCE(pGVMM, VERR_GVMM_INSTANCE);
2864
2865 int rc = VINF_SUCCESS;
2866 GVMMR0_USED_SHARED_LOCK(pGVMM);
2867 for (unsigned i = pGVMM->iUsedHead, cLoops = 0;
2868 i != NIL_GVM_HANDLE && i < RT_ELEMENTS(pGVMM->aHandles);
2869 i = pGVMM->aHandles[i].iNext, cLoops++)
2870 {
2871 PGVM pGVM = pGVMM->aHandles[i].pGVM;
2872 if ( RT_VALID_PTR(pGVM)
2873 && RT_VALID_PTR(pGVMM->aHandles[i].pvObj)
2874 && pGVM->u32Magic == GVM_MAGIC)
2875 {
2876 rc = pfnCallback(pGVM, pvUser);
2877 if (rc != VINF_SUCCESS)
2878 break;
2879 }
2880
2881 AssertBreak(cLoops < RT_ELEMENTS(pGVMM->aHandles) * 4); /* paranoia */
2882 }
2883 GVMMR0_USED_SHARED_UNLOCK(pGVMM);
2884 return rc;
2885}
2886
2887
2888/**
2889 * Retrieves the GVMM statistics visible to the caller.
2890 *
2891 * @returns VBox status code.
2892 *
2893 * @param pStats Where to put the statistics.
2894 * @param pSession The current session.
2895 * @param pGVM The GVM to obtain statistics for. Optional.
2896 */
2897GVMMR0DECL(int) GVMMR0QueryStatistics(PGVMMSTATS pStats, PSUPDRVSESSION pSession, PGVM pGVM)
2898{
2899 LogFlow(("GVMMR0QueryStatistics: pStats=%p pSession=%p pGVM=%p\n", pStats, pSession, pGVM));
2900
2901 /*
2902 * Validate input.
2903 */
2904 AssertPtrReturn(pSession, VERR_INVALID_POINTER);
2905 AssertPtrReturn(pStats, VERR_INVALID_POINTER);
2906 pStats->cVMs = 0; /* (crash before taking the sem...) */
2907
2908 /*
2909 * Take the lock and get the VM statistics.
2910 */
2911 PGVMM pGVMM;
2912 if (pGVM)
2913 {
2914 int rc = gvmmR0ByGVM(pGVM, &pGVMM, true /*fTakeUsedLock*/);
2915 if (RT_FAILURE(rc))
2916 return rc;
2917 pStats->SchedVM = pGVM->gvmm.s.StatsSched;
2918 }
2919 else
2920 {
2921 GVMM_GET_VALID_INSTANCE(pGVMM, VERR_GVMM_INSTANCE);
2922 memset(&pStats->SchedVM, 0, sizeof(pStats->SchedVM));
2923
2924 int rc = GVMMR0_USED_SHARED_LOCK(pGVMM);
2925 AssertRCReturn(rc, rc);
2926 }
2927
2928 /*
2929 * Enumerate the VMs and add the ones visible to the statistics.
2930 */
2931 pStats->cVMs = 0;
2932 pStats->cEMTs = 0;
2933 memset(&pStats->SchedSum, 0, sizeof(pStats->SchedSum));
2934
2935 for (unsigned i = pGVMM->iUsedHead;
2936 i != NIL_GVM_HANDLE && i < RT_ELEMENTS(pGVMM->aHandles);
2937 i = pGVMM->aHandles[i].iNext)
2938 {
2939 PGVM pOtherGVM = pGVMM->aHandles[i].pGVM;
2940 void *pvObj = pGVMM->aHandles[i].pvObj;
2941 if ( RT_VALID_PTR(pvObj)
2942 && RT_VALID_PTR(pOtherGVM)
2943 && pOtherGVM->u32Magic == GVM_MAGIC
2944 && RT_SUCCESS(SUPR0ObjVerifyAccess(pvObj, pSession, NULL)))
2945 {
2946 pStats->cVMs++;
2947 pStats->cEMTs += pOtherGVM->cCpus;
2948
2949 pStats->SchedSum.cHaltCalls += pOtherGVM->gvmm.s.StatsSched.cHaltCalls;
2950 pStats->SchedSum.cHaltBlocking += pOtherGVM->gvmm.s.StatsSched.cHaltBlocking;
2951 pStats->SchedSum.cHaltTimeouts += pOtherGVM->gvmm.s.StatsSched.cHaltTimeouts;
2952 pStats->SchedSum.cHaltNotBlocking += pOtherGVM->gvmm.s.StatsSched.cHaltNotBlocking;
2953 pStats->SchedSum.cHaltWakeUps += pOtherGVM->gvmm.s.StatsSched.cHaltWakeUps;
2954
2955 pStats->SchedSum.cWakeUpCalls += pOtherGVM->gvmm.s.StatsSched.cWakeUpCalls;
2956 pStats->SchedSum.cWakeUpNotHalted += pOtherGVM->gvmm.s.StatsSched.cWakeUpNotHalted;
2957 pStats->SchedSum.cWakeUpWakeUps += pOtherGVM->gvmm.s.StatsSched.cWakeUpWakeUps;
2958
2959 pStats->SchedSum.cPokeCalls += pOtherGVM->gvmm.s.StatsSched.cPokeCalls;
2960 pStats->SchedSum.cPokeNotBusy += pOtherGVM->gvmm.s.StatsSched.cPokeNotBusy;
2961
2962 pStats->SchedSum.cPollCalls += pOtherGVM->gvmm.s.StatsSched.cPollCalls;
2963 pStats->SchedSum.cPollHalts += pOtherGVM->gvmm.s.StatsSched.cPollHalts;
2964 pStats->SchedSum.cPollWakeUps += pOtherGVM->gvmm.s.StatsSched.cPollWakeUps;
2965 }
2966 }
2967
2968 /*
2969 * Copy out the per host CPU statistics.
2970 */
2971 uint32_t iDstCpu = 0;
2972 uint32_t cSrcCpus = pGVMM->cHostCpus;
2973 for (uint32_t iSrcCpu = 0; iSrcCpu < cSrcCpus; iSrcCpu++)
2974 {
2975 if (pGVMM->aHostCpus[iSrcCpu].idCpu != NIL_RTCPUID)
2976 {
2977 pStats->aHostCpus[iDstCpu].idCpu = pGVMM->aHostCpus[iSrcCpu].idCpu;
2978 pStats->aHostCpus[iDstCpu].idxCpuSet = pGVMM->aHostCpus[iSrcCpu].idxCpuSet;
2979#ifdef GVMM_SCHED_WITH_PPT
2980 pStats->aHostCpus[iDstCpu].uDesiredHz = pGVMM->aHostCpus[iSrcCpu].Ppt.uDesiredHz;
2981 pStats->aHostCpus[iDstCpu].uTimerHz = pGVMM->aHostCpus[iSrcCpu].Ppt.uTimerHz;
2982 pStats->aHostCpus[iDstCpu].cChanges = pGVMM->aHostCpus[iSrcCpu].Ppt.cChanges;
2983 pStats->aHostCpus[iDstCpu].cStarts = pGVMM->aHostCpus[iSrcCpu].Ppt.cStarts;
2984#else
2985 pStats->aHostCpus[iDstCpu].uDesiredHz = 0;
2986 pStats->aHostCpus[iDstCpu].uTimerHz = 0;
2987 pStats->aHostCpus[iDstCpu].cChanges = 0;
2988 pStats->aHostCpus[iDstCpu].cStarts = 0;
2989#endif
2990 iDstCpu++;
2991 if (iDstCpu >= RT_ELEMENTS(pStats->aHostCpus))
2992 break;
2993 }
2994 }
2995 pStats->cHostCpus = iDstCpu;
2996
2997 GVMMR0_USED_SHARED_UNLOCK(pGVMM);
2998
2999 return VINF_SUCCESS;
3000}
3001
3002
3003/**
3004 * VMMR0 request wrapper for GVMMR0QueryStatistics.
3005 *
3006 * @returns see GVMMR0QueryStatistics.
3007 * @param pGVM The global (ring-0) VM structure. Optional.
3008 * @param pReq Pointer to the request packet.
3009 * @param pSession The current session.
3010 */
3011GVMMR0DECL(int) GVMMR0QueryStatisticsReq(PGVM pGVM, PGVMMQUERYSTATISTICSSREQ pReq, PSUPDRVSESSION pSession)
3012{
3013 /*
3014 * Validate input and pass it on.
3015 */
3016 AssertPtrReturn(pReq, VERR_INVALID_POINTER);
3017 AssertMsgReturn(pReq->Hdr.cbReq == sizeof(*pReq), ("%#x != %#x\n", pReq->Hdr.cbReq, sizeof(*pReq)), VERR_INVALID_PARAMETER);
3018 AssertReturn(pReq->pSession == pSession, VERR_INVALID_PARAMETER);
3019
3020 return GVMMR0QueryStatistics(&pReq->Stats, pSession, pGVM);
3021}
3022
3023
3024/**
3025 * Resets the specified GVMM statistics.
3026 *
3027 * @returns VBox status code.
3028 *
3029 * @param pStats Which statistics to reset, that is, non-zero fields indicates which to reset.
3030 * @param pSession The current session.
3031 * @param pGVM The GVM to reset statistics for. Optional.
3032 */
3033GVMMR0DECL(int) GVMMR0ResetStatistics(PCGVMMSTATS pStats, PSUPDRVSESSION pSession, PGVM pGVM)
3034{
3035 LogFlow(("GVMMR0ResetStatistics: pStats=%p pSession=%p pGVM=%p\n", pStats, pSession, pGVM));
3036
3037 /*
3038 * Validate input.
3039 */
3040 AssertPtrReturn(pSession, VERR_INVALID_POINTER);
3041 AssertPtrReturn(pStats, VERR_INVALID_POINTER);
3042
3043 /*
3044 * Take the lock and get the VM statistics.
3045 */
3046 PGVMM pGVMM;
3047 if (pGVM)
3048 {
3049 int rc = gvmmR0ByGVM(pGVM, &pGVMM, true /*fTakeUsedLock*/);
3050 if (RT_FAILURE(rc))
3051 return rc;
3052# define MAYBE_RESET_FIELD(field) \
3053 do { if (pStats->SchedVM. field ) { pGVM->gvmm.s.StatsSched. field = 0; } } while (0)
3054 MAYBE_RESET_FIELD(cHaltCalls);
3055 MAYBE_RESET_FIELD(cHaltBlocking);
3056 MAYBE_RESET_FIELD(cHaltTimeouts);
3057 MAYBE_RESET_FIELD(cHaltNotBlocking);
3058 MAYBE_RESET_FIELD(cHaltWakeUps);
3059 MAYBE_RESET_FIELD(cWakeUpCalls);
3060 MAYBE_RESET_FIELD(cWakeUpNotHalted);
3061 MAYBE_RESET_FIELD(cWakeUpWakeUps);
3062 MAYBE_RESET_FIELD(cPokeCalls);
3063 MAYBE_RESET_FIELD(cPokeNotBusy);
3064 MAYBE_RESET_FIELD(cPollCalls);
3065 MAYBE_RESET_FIELD(cPollHalts);
3066 MAYBE_RESET_FIELD(cPollWakeUps);
3067# undef MAYBE_RESET_FIELD
3068 }
3069 else
3070 {
3071 GVMM_GET_VALID_INSTANCE(pGVMM, VERR_GVMM_INSTANCE);
3072
3073 int rc = GVMMR0_USED_SHARED_LOCK(pGVMM);
3074 AssertRCReturn(rc, rc);
3075 }
3076
3077 /*
3078 * Enumerate the VMs and add the ones visible to the statistics.
3079 */
3080 if (!ASMMemIsZero(&pStats->SchedSum, sizeof(pStats->SchedSum)))
3081 {
3082 for (unsigned i = pGVMM->iUsedHead;
3083 i != NIL_GVM_HANDLE && i < RT_ELEMENTS(pGVMM->aHandles);
3084 i = pGVMM->aHandles[i].iNext)
3085 {
3086 PGVM pOtherGVM = pGVMM->aHandles[i].pGVM;
3087 void *pvObj = pGVMM->aHandles[i].pvObj;
3088 if ( RT_VALID_PTR(pvObj)
3089 && RT_VALID_PTR(pOtherGVM)
3090 && pOtherGVM->u32Magic == GVM_MAGIC
3091 && RT_SUCCESS(SUPR0ObjVerifyAccess(pvObj, pSession, NULL)))
3092 {
3093# define MAYBE_RESET_FIELD(field) \
3094 do { if (pStats->SchedSum. field ) { pOtherGVM->gvmm.s.StatsSched. field = 0; } } while (0)
3095 MAYBE_RESET_FIELD(cHaltCalls);
3096 MAYBE_RESET_FIELD(cHaltBlocking);
3097 MAYBE_RESET_FIELD(cHaltTimeouts);
3098 MAYBE_RESET_FIELD(cHaltNotBlocking);
3099 MAYBE_RESET_FIELD(cHaltWakeUps);
3100 MAYBE_RESET_FIELD(cWakeUpCalls);
3101 MAYBE_RESET_FIELD(cWakeUpNotHalted);
3102 MAYBE_RESET_FIELD(cWakeUpWakeUps);
3103 MAYBE_RESET_FIELD(cPokeCalls);
3104 MAYBE_RESET_FIELD(cPokeNotBusy);
3105 MAYBE_RESET_FIELD(cPollCalls);
3106 MAYBE_RESET_FIELD(cPollHalts);
3107 MAYBE_RESET_FIELD(cPollWakeUps);
3108# undef MAYBE_RESET_FIELD
3109 }
3110 }
3111 }
3112
3113 GVMMR0_USED_SHARED_UNLOCK(pGVMM);
3114
3115 return VINF_SUCCESS;
3116}
3117
3118
3119/**
3120 * VMMR0 request wrapper for GVMMR0ResetStatistics.
3121 *
3122 * @returns see GVMMR0ResetStatistics.
3123 * @param pGVM The global (ring-0) VM structure. Optional.
3124 * @param pReq Pointer to the request packet.
3125 * @param pSession The current session.
3126 */
3127GVMMR0DECL(int) GVMMR0ResetStatisticsReq(PGVM pGVM, PGVMMRESETSTATISTICSSREQ pReq, PSUPDRVSESSION pSession)
3128{
3129 /*
3130 * Validate input and pass it on.
3131 */
3132 AssertPtrReturn(pReq, VERR_INVALID_POINTER);
3133 AssertMsgReturn(pReq->Hdr.cbReq == sizeof(*pReq), ("%#x != %#x\n", pReq->Hdr.cbReq, sizeof(*pReq)), VERR_INVALID_PARAMETER);
3134 AssertReturn(pReq->pSession == pSession, VERR_INVALID_PARAMETER);
3135
3136 return GVMMR0ResetStatistics(&pReq->Stats, pSession, pGVM);
3137}
3138
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette