VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/GVMMR0.cpp@ 82968

Last change on this file since 82968 was 82968, checked in by vboxsync, 5 years ago

Copyright year updates by scm.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 104.9 KB
Line 
1/* $Id: GVMMR0.cpp 82968 2020-02-04 10:35:17Z vboxsync $ */
2/** @file
3 * GVMM - Global VM Manager.
4 */
5
6/*
7 * Copyright (C) 2007-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_gvmm GVMM - The Global VM Manager
20 *
21 * The Global VM Manager lives in ring-0. Its main function at the moment is
22 * to manage a list of all running VMs, keep a ring-0 only structure (GVM) for
23 * each of them, and assign them unique identifiers (so GMM can track page
24 * owners). The GVMM also manage some of the host CPU resources, like the
25 * periodic preemption timer.
26 *
27 * The GVMM will create a ring-0 object for each VM when it is registered, this
28 * is both for session cleanup purposes and for having a point where it is
29 * possible to implement usage polices later (in SUPR0ObjRegister).
30 *
31 *
32 * @section sec_gvmm_ppt Periodic Preemption Timer (PPT)
33 *
34 * On system that sports a high resolution kernel timer API, we use per-cpu
35 * timers to generate interrupts that preempts VT-x, AMD-V and raw-mode guest
36 * execution. The timer frequency is calculating by taking the max
37 * TMCalcHostTimerFrequency for all VMs running on a CPU for the last ~160 ms
38 * (RT_ELEMENTS((PGVMMHOSTCPU)0, Ppt.aHzHistory) *
39 * GVMMHOSTCPU_PPT_HIST_INTERVAL_NS).
40 *
41 * The TMCalcHostTimerFrequency() part of the things gets its takes the max
42 * TMTimerSetFrequencyHint() value and adjusts by the current catch-up percent,
43 * warp drive percent and some fudge factors. VMMR0.cpp reports the result via
44 * GVMMR0SchedUpdatePeriodicPreemptionTimer() before switching to the VT-x,
45 * AMD-V and raw-mode execution environments.
46 */
47
48
49/*********************************************************************************************************************************
50* Header Files *
51*********************************************************************************************************************************/
52#define LOG_GROUP LOG_GROUP_GVMM
53#include <VBox/vmm/gvmm.h>
54#include <VBox/vmm/gmm.h>
55#include "GVMMR0Internal.h"
56#include <VBox/vmm/iom.h>
57#include <VBox/vmm/pdm.h>
58#include <VBox/vmm/pgm.h>
59#include <VBox/vmm/vmm.h>
60#ifdef VBOX_WITH_NEM_R0
61# include <VBox/vmm/nem.h>
62#endif
63#include <VBox/vmm/vmcpuset.h>
64#include <VBox/vmm/vmcc.h>
65#include <VBox/param.h>
66#include <VBox/err.h>
67
68#include <iprt/asm.h>
69#include <iprt/asm-amd64-x86.h>
70#include <iprt/critsect.h>
71#include <iprt/mem.h>
72#include <iprt/semaphore.h>
73#include <iprt/time.h>
74#include <VBox/log.h>
75#include <iprt/thread.h>
76#include <iprt/process.h>
77#include <iprt/param.h>
78#include <iprt/string.h>
79#include <iprt/assert.h>
80#include <iprt/mem.h>
81#include <iprt/memobj.h>
82#include <iprt/mp.h>
83#include <iprt/cpuset.h>
84#include <iprt/spinlock.h>
85#include <iprt/timer.h>
86
87#include "dtrace/VBoxVMM.h"
88
89
90/*********************************************************************************************************************************
91* Defined Constants And Macros *
92*********************************************************************************************************************************/
93#if defined(RT_OS_LINUX) || defined(RT_OS_SOLARIS) || defined(DOXYGEN_RUNNING)
94/** Define this to enable the periodic preemption timer. */
95# define GVMM_SCHED_WITH_PPT
96#endif
97
98
99/** @def GVMM_CHECK_SMAP_SETUP
100 * SMAP check setup. */
101/** @def GVMM_CHECK_SMAP_CHECK
102 * Checks that the AC flag is set if SMAP is enabled. If AC is not set,
103 * it will be logged and @a a_BadExpr is executed. */
104/** @def GVMM_CHECK_SMAP_CHECK2
105 * Checks that the AC flag is set if SMAP is enabled. If AC is not set, it will
106 * be logged, written to the VMs assertion text buffer, and @a a_BadExpr is
107 * executed. */
108#if (defined(VBOX_STRICT) || 1) && !defined(VBOX_WITH_RAM_IN_KERNEL)
109# define GVMM_CHECK_SMAP_SETUP() uint32_t const fKernelFeatures = SUPR0GetKernelFeatures()
110# define GVMM_CHECK_SMAP_CHECK(a_BadExpr) \
111 do { \
112 if (fKernelFeatures & SUPKERNELFEATURES_SMAP) \
113 { \
114 RTCCUINTREG fEflCheck = ASMGetFlags(); \
115 if (RT_LIKELY(fEflCheck & X86_EFL_AC)) \
116 { /* likely */ } \
117 else \
118 { \
119 SUPR0Printf("%s, line %d: EFLAGS.AC is clear! (%#x)\n", __FUNCTION__, __LINE__, (uint32_t)fEflCheck); \
120 a_BadExpr; \
121 } \
122 } \
123 } while (0)
124# define GVMM_CHECK_SMAP_CHECK2(a_pGVM, a_BadExpr) \
125 do { \
126 if (fKernelFeatures & SUPKERNELFEATURES_SMAP) \
127 { \
128 RTCCUINTREG fEflCheck = ASMGetFlags(); \
129 if (RT_LIKELY(fEflCheck & X86_EFL_AC)) \
130 { /* likely */ } \
131 else \
132 { \
133 SUPR0BadContext((a_pGVM) ? (a_pGVM)->pSession : NULL, __FILE__, __LINE__, "EFLAGS.AC is zero!"); \
134 a_BadExpr; \
135 } \
136 } \
137 } while (0)
138#else
139# define GVMM_CHECK_SMAP_SETUP() uint32_t const fKernelFeatures = 0
140# define GVMM_CHECK_SMAP_CHECK(a_BadExpr) NOREF(fKernelFeatures)
141# define GVMM_CHECK_SMAP_CHECK2(a_pGVM, a_BadExpr) NOREF(fKernelFeatures)
142#endif
143
144
145
146/*********************************************************************************************************************************
147* Structures and Typedefs *
148*********************************************************************************************************************************/
149
150/**
151 * Global VM handle.
152 */
153typedef struct GVMHANDLE
154{
155 /** The index of the next handle in the list (free or used). (0 is nil.) */
156 uint16_t volatile iNext;
157 /** Our own index / handle value. */
158 uint16_t iSelf;
159 /** The process ID of the handle owner.
160 * This is used for access checks. */
161 RTPROCESS ProcId;
162 /** The pointer to the ring-0 only (aka global) VM structure. */
163 PGVM pGVM;
164 /** The virtual machine object. */
165 void *pvObj;
166 /** The session this VM is associated with. */
167 PSUPDRVSESSION pSession;
168 /** The ring-0 handle of the EMT0 thread.
169 * This is used for ownership checks as well as looking up a VM handle by thread
170 * at times like assertions. */
171 RTNATIVETHREAD hEMT0;
172} GVMHANDLE;
173/** Pointer to a global VM handle. */
174typedef GVMHANDLE *PGVMHANDLE;
175
176/** Number of GVM handles (including the NIL handle). */
177#if HC_ARCH_BITS == 64
178# define GVMM_MAX_HANDLES 8192
179#else
180# define GVMM_MAX_HANDLES 128
181#endif
182
183/**
184 * Per host CPU GVMM data.
185 */
186typedef struct GVMMHOSTCPU
187{
188 /** Magic number (GVMMHOSTCPU_MAGIC). */
189 uint32_t volatile u32Magic;
190 /** The CPU ID. */
191 RTCPUID idCpu;
192 /** The CPU set index. */
193 uint32_t idxCpuSet;
194
195#ifdef GVMM_SCHED_WITH_PPT
196 /** Periodic preemption timer data. */
197 struct
198 {
199 /** The handle to the periodic preemption timer. */
200 PRTTIMER pTimer;
201 /** Spinlock protecting the data below. */
202 RTSPINLOCK hSpinlock;
203 /** The smalles Hz that we need to care about. (static) */
204 uint32_t uMinHz;
205 /** The number of ticks between each historization. */
206 uint32_t cTicksHistoriziationInterval;
207 /** The current historization tick (counting up to
208 * cTicksHistoriziationInterval and then resetting). */
209 uint32_t iTickHistorization;
210 /** The current timer interval. This is set to 0 when inactive. */
211 uint32_t cNsInterval;
212 /** The current timer frequency. This is set to 0 when inactive. */
213 uint32_t uTimerHz;
214 /** The current max frequency reported by the EMTs.
215 * This gets historicize and reset by the timer callback. This is
216 * read without holding the spinlock, so needs atomic updating. */
217 uint32_t volatile uDesiredHz;
218 /** Whether the timer was started or not. */
219 bool volatile fStarted;
220 /** Set if we're starting timer. */
221 bool volatile fStarting;
222 /** The index of the next history entry (mod it). */
223 uint32_t iHzHistory;
224 /** Historicized uDesiredHz values. The array wraps around, new entries
225 * are added at iHzHistory. This is updated approximately every
226 * GVMMHOSTCPU_PPT_HIST_INTERVAL_NS by the timer callback. */
227 uint32_t aHzHistory[8];
228 /** Statistics counter for recording the number of interval changes. */
229 uint32_t cChanges;
230 /** Statistics counter for recording the number of timer starts. */
231 uint32_t cStarts;
232 } Ppt;
233#endif /* GVMM_SCHED_WITH_PPT */
234
235} GVMMHOSTCPU;
236/** Pointer to the per host CPU GVMM data. */
237typedef GVMMHOSTCPU *PGVMMHOSTCPU;
238/** The GVMMHOSTCPU::u32Magic value (Petra, Tanya & Rachel Haden). */
239#define GVMMHOSTCPU_MAGIC UINT32_C(0x19711011)
240/** The interval on history entry should cover (approximately) give in
241 * nanoseconds. */
242#define GVMMHOSTCPU_PPT_HIST_INTERVAL_NS UINT32_C(20000000)
243
244
245/**
246 * The GVMM instance data.
247 */
248typedef struct GVMM
249{
250 /** Eyecatcher / magic. */
251 uint32_t u32Magic;
252 /** The index of the head of the free handle chain. (0 is nil.) */
253 uint16_t volatile iFreeHead;
254 /** The index of the head of the active handle chain. (0 is nil.) */
255 uint16_t volatile iUsedHead;
256 /** The number of VMs. */
257 uint16_t volatile cVMs;
258 /** Alignment padding. */
259 uint16_t u16Reserved;
260 /** The number of EMTs. */
261 uint32_t volatile cEMTs;
262 /** The number of EMTs that have halted in GVMMR0SchedHalt. */
263 uint32_t volatile cHaltedEMTs;
264 /** Mini lock for restricting early wake-ups to one thread. */
265 bool volatile fDoingEarlyWakeUps;
266 bool afPadding[3]; /**< explicit alignment padding. */
267 /** When the next halted or sleeping EMT will wake up.
268 * This is set to 0 when it needs recalculating and to UINT64_MAX when
269 * there are no halted or sleeping EMTs in the GVMM. */
270 uint64_t uNsNextEmtWakeup;
271 /** The lock used to serialize VM creation, destruction and associated events that
272 * isn't performance critical. Owners may acquire the list lock. */
273 RTCRITSECT CreateDestroyLock;
274 /** The lock used to serialize used list updates and accesses.
275 * This indirectly includes scheduling since the scheduler will have to walk the
276 * used list to examin running VMs. Owners may not acquire any other locks. */
277 RTCRITSECTRW UsedLock;
278 /** The handle array.
279 * The size of this array defines the maximum number of currently running VMs.
280 * The first entry is unused as it represents the NIL handle. */
281 GVMHANDLE aHandles[GVMM_MAX_HANDLES];
282
283 /** @gcfgm{/GVMM/cEMTsMeansCompany, 32-bit, 0, UINT32_MAX, 1}
284 * The number of EMTs that means we no longer consider ourselves alone on a
285 * CPU/Core.
286 */
287 uint32_t cEMTsMeansCompany;
288 /** @gcfgm{/GVMM/MinSleepAlone,32-bit, 0, 100000000, 750000, ns}
289 * The minimum sleep time for when we're alone, in nano seconds.
290 */
291 uint32_t nsMinSleepAlone;
292 /** @gcfgm{/GVMM/MinSleepCompany,32-bit,0, 100000000, 15000, ns}
293 * The minimum sleep time for when we've got company, in nano seconds.
294 */
295 uint32_t nsMinSleepCompany;
296 /** @gcfgm{/GVMM/EarlyWakeUp1, 32-bit, 0, 100000000, 25000, ns}
297 * The limit for the first round of early wake-ups, given in nano seconds.
298 */
299 uint32_t nsEarlyWakeUp1;
300 /** @gcfgm{/GVMM/EarlyWakeUp2, 32-bit, 0, 100000000, 50000, ns}
301 * The limit for the second round of early wake-ups, given in nano seconds.
302 */
303 uint32_t nsEarlyWakeUp2;
304
305 /** Set if we're doing early wake-ups.
306 * This reflects nsEarlyWakeUp1 and nsEarlyWakeUp2. */
307 bool volatile fDoEarlyWakeUps;
308
309 /** The number of entries in the host CPU array (aHostCpus). */
310 uint32_t cHostCpus;
311 /** Per host CPU data (variable length). */
312 GVMMHOSTCPU aHostCpus[1];
313} GVMM;
314AssertCompileMemberAlignment(GVMM, CreateDestroyLock, 8);
315AssertCompileMemberAlignment(GVMM, UsedLock, 8);
316AssertCompileMemberAlignment(GVMM, uNsNextEmtWakeup, 8);
317/** Pointer to the GVMM instance data. */
318typedef GVMM *PGVMM;
319
320/** The GVMM::u32Magic value (Charlie Haden). */
321#define GVMM_MAGIC UINT32_C(0x19370806)
322
323
324
325/*********************************************************************************************************************************
326* Global Variables *
327*********************************************************************************************************************************/
328/** Pointer to the GVMM instance data.
329 * (Just my general dislike for global variables.) */
330static PGVMM g_pGVMM = NULL;
331
332/** Macro for obtaining and validating the g_pGVMM pointer.
333 * On failure it will return from the invoking function with the specified return value.
334 *
335 * @param pGVMM The name of the pGVMM variable.
336 * @param rc The return value on failure. Use VERR_GVMM_INSTANCE for VBox
337 * status codes.
338 */
339#define GVMM_GET_VALID_INSTANCE(pGVMM, rc) \
340 do { \
341 (pGVMM) = g_pGVMM;\
342 AssertPtrReturn((pGVMM), (rc)); \
343 AssertMsgReturn((pGVMM)->u32Magic == GVMM_MAGIC, ("%p - %#x\n", (pGVMM), (pGVMM)->u32Magic), (rc)); \
344 } while (0)
345
346/** Macro for obtaining and validating the g_pGVMM pointer, void function variant.
347 * On failure it will return from the invoking function.
348 *
349 * @param pGVMM The name of the pGVMM variable.
350 */
351#define GVMM_GET_VALID_INSTANCE_VOID(pGVMM) \
352 do { \
353 (pGVMM) = g_pGVMM;\
354 AssertPtrReturnVoid((pGVMM)); \
355 AssertMsgReturnVoid((pGVMM)->u32Magic == GVMM_MAGIC, ("%p - %#x\n", (pGVMM), (pGVMM)->u32Magic)); \
356 } while (0)
357
358
359/*********************************************************************************************************************************
360* Internal Functions *
361*********************************************************************************************************************************/
362static void gvmmR0InitPerVMData(PGVM pGVM, int16_t hSelf, VMCPUID cCpus, PSUPDRVSESSION pSession);
363static DECLCALLBACK(void) gvmmR0HandleObjDestructor(void *pvObj, void *pvGVMM, void *pvHandle);
364static int gvmmR0ByGVM(PGVM pGVM, PGVMM *ppGVMM, bool fTakeUsedLock);
365static int gvmmR0ByGVMandEMT(PGVM pGVM, VMCPUID idCpu, PGVMM *ppGVMM);
366
367#ifdef GVMM_SCHED_WITH_PPT
368static DECLCALLBACK(void) gvmmR0SchedPeriodicPreemptionTimerCallback(PRTTIMER pTimer, void *pvUser, uint64_t iTick);
369#endif
370
371
372/**
373 * Initializes the GVMM.
374 *
375 * This is called while owning the loader semaphore (see supdrvIOCtl_LdrLoad()).
376 *
377 * @returns VBox status code.
378 */
379GVMMR0DECL(int) GVMMR0Init(void)
380{
381 LogFlow(("GVMMR0Init:\n"));
382
383 /*
384 * Allocate and initialize the instance data.
385 */
386 uint32_t cHostCpus = RTMpGetArraySize();
387 AssertMsgReturn(cHostCpus > 0 && cHostCpus < _64K, ("%d", (int)cHostCpus), VERR_GVMM_HOST_CPU_RANGE);
388
389 PGVMM pGVMM = (PGVMM)RTMemAllocZ(RT_UOFFSETOF_DYN(GVMM, aHostCpus[cHostCpus]));
390 if (!pGVMM)
391 return VERR_NO_MEMORY;
392 int rc = RTCritSectInitEx(&pGVMM->CreateDestroyLock, 0, NIL_RTLOCKVALCLASS, RTLOCKVAL_SUB_CLASS_NONE,
393 "GVMM-CreateDestroyLock");
394 if (RT_SUCCESS(rc))
395 {
396 rc = RTCritSectRwInitEx(&pGVMM->UsedLock, 0, NIL_RTLOCKVALCLASS, RTLOCKVAL_SUB_CLASS_NONE, "GVMM-UsedLock");
397 if (RT_SUCCESS(rc))
398 {
399 pGVMM->u32Magic = GVMM_MAGIC;
400 pGVMM->iUsedHead = 0;
401 pGVMM->iFreeHead = 1;
402
403 /* the nil handle */
404 pGVMM->aHandles[0].iSelf = 0;
405 pGVMM->aHandles[0].iNext = 0;
406
407 /* the tail */
408 unsigned i = RT_ELEMENTS(pGVMM->aHandles) - 1;
409 pGVMM->aHandles[i].iSelf = i;
410 pGVMM->aHandles[i].iNext = 0; /* nil */
411
412 /* the rest */
413 while (i-- > 1)
414 {
415 pGVMM->aHandles[i].iSelf = i;
416 pGVMM->aHandles[i].iNext = i + 1;
417 }
418
419 /* The default configuration values. */
420 uint32_t cNsResolution = RTSemEventMultiGetResolution();
421 pGVMM->cEMTsMeansCompany = 1; /** @todo should be adjusted to relative to the cpu count or something... */
422 if (cNsResolution >= 5*RT_NS_100US)
423 {
424 pGVMM->nsMinSleepAlone = 750000 /* ns (0.750 ms) */; /** @todo this should be adjusted to be 75% (or something) of the scheduler granularity... */
425 pGVMM->nsMinSleepCompany = 15000 /* ns (0.015 ms) */;
426 pGVMM->nsEarlyWakeUp1 = 25000 /* ns (0.025 ms) */;
427 pGVMM->nsEarlyWakeUp2 = 50000 /* ns (0.050 ms) */;
428 }
429 else if (cNsResolution > RT_NS_100US)
430 {
431 pGVMM->nsMinSleepAlone = cNsResolution / 2;
432 pGVMM->nsMinSleepCompany = cNsResolution / 4;
433 pGVMM->nsEarlyWakeUp1 = 0;
434 pGVMM->nsEarlyWakeUp2 = 0;
435 }
436 else
437 {
438 pGVMM->nsMinSleepAlone = 2000;
439 pGVMM->nsMinSleepCompany = 2000;
440 pGVMM->nsEarlyWakeUp1 = 0;
441 pGVMM->nsEarlyWakeUp2 = 0;
442 }
443 pGVMM->fDoEarlyWakeUps = pGVMM->nsEarlyWakeUp1 > 0 && pGVMM->nsEarlyWakeUp2 > 0;
444
445 /* The host CPU data. */
446 pGVMM->cHostCpus = cHostCpus;
447 uint32_t iCpu = cHostCpus;
448 RTCPUSET PossibleSet;
449 RTMpGetSet(&PossibleSet);
450 while (iCpu-- > 0)
451 {
452 pGVMM->aHostCpus[iCpu].idxCpuSet = iCpu;
453#ifdef GVMM_SCHED_WITH_PPT
454 pGVMM->aHostCpus[iCpu].Ppt.pTimer = NULL;
455 pGVMM->aHostCpus[iCpu].Ppt.hSpinlock = NIL_RTSPINLOCK;
456 pGVMM->aHostCpus[iCpu].Ppt.uMinHz = 5; /** @todo Add some API which figures this one out. (not *that* important) */
457 pGVMM->aHostCpus[iCpu].Ppt.cTicksHistoriziationInterval = 1;
458 //pGVMM->aHostCpus[iCpu].Ppt.iTickHistorization = 0;
459 //pGVMM->aHostCpus[iCpu].Ppt.cNsInterval = 0;
460 //pGVMM->aHostCpus[iCpu].Ppt.uTimerHz = 0;
461 //pGVMM->aHostCpus[iCpu].Ppt.uDesiredHz = 0;
462 //pGVMM->aHostCpus[iCpu].Ppt.fStarted = false;
463 //pGVMM->aHostCpus[iCpu].Ppt.fStarting = false;
464 //pGVMM->aHostCpus[iCpu].Ppt.iHzHistory = 0;
465 //pGVMM->aHostCpus[iCpu].Ppt.aHzHistory = {0};
466#endif
467
468 if (RTCpuSetIsMember(&PossibleSet, iCpu))
469 {
470 pGVMM->aHostCpus[iCpu].idCpu = RTMpCpuIdFromSetIndex(iCpu);
471 pGVMM->aHostCpus[iCpu].u32Magic = GVMMHOSTCPU_MAGIC;
472
473#ifdef GVMM_SCHED_WITH_PPT
474 rc = RTTimerCreateEx(&pGVMM->aHostCpus[iCpu].Ppt.pTimer,
475 50*1000*1000 /* whatever */,
476 RTTIMER_FLAGS_CPU(iCpu) | RTTIMER_FLAGS_HIGH_RES,
477 gvmmR0SchedPeriodicPreemptionTimerCallback,
478 &pGVMM->aHostCpus[iCpu]);
479 if (RT_SUCCESS(rc))
480 rc = RTSpinlockCreate(&pGVMM->aHostCpus[iCpu].Ppt.hSpinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "GVMM/CPU");
481 if (RT_FAILURE(rc))
482 {
483 while (iCpu < cHostCpus)
484 {
485 RTTimerDestroy(pGVMM->aHostCpus[iCpu].Ppt.pTimer);
486 RTSpinlockDestroy(pGVMM->aHostCpus[iCpu].Ppt.hSpinlock);
487 pGVMM->aHostCpus[iCpu].Ppt.hSpinlock = NIL_RTSPINLOCK;
488 iCpu++;
489 }
490 break;
491 }
492#endif
493 }
494 else
495 {
496 pGVMM->aHostCpus[iCpu].idCpu = NIL_RTCPUID;
497 pGVMM->aHostCpus[iCpu].u32Magic = 0;
498 }
499 }
500 if (RT_SUCCESS(rc))
501 {
502 g_pGVMM = pGVMM;
503 LogFlow(("GVMMR0Init: pGVMM=%p cHostCpus=%u\n", pGVMM, cHostCpus));
504 return VINF_SUCCESS;
505 }
506
507 /* bail out. */
508 RTCritSectRwDelete(&pGVMM->UsedLock);
509 }
510 RTCritSectDelete(&pGVMM->CreateDestroyLock);
511 }
512
513 RTMemFree(pGVMM);
514 return rc;
515}
516
517
518/**
519 * Terminates the GVM.
520 *
521 * This is called while owning the loader semaphore (see supdrvLdrFree()).
522 * And unless something is wrong, there should be absolutely no VMs
523 * registered at this point.
524 */
525GVMMR0DECL(void) GVMMR0Term(void)
526{
527 LogFlow(("GVMMR0Term:\n"));
528
529 PGVMM pGVMM = g_pGVMM;
530 g_pGVMM = NULL;
531 if (RT_UNLIKELY(!RT_VALID_PTR(pGVMM)))
532 {
533 SUPR0Printf("GVMMR0Term: pGVMM=%RKv\n", pGVMM);
534 return;
535 }
536
537 /*
538 * First of all, stop all active timers.
539 */
540 uint32_t cActiveTimers = 0;
541 uint32_t iCpu = pGVMM->cHostCpus;
542 while (iCpu-- > 0)
543 {
544 ASMAtomicWriteU32(&pGVMM->aHostCpus[iCpu].u32Magic, ~GVMMHOSTCPU_MAGIC);
545#ifdef GVMM_SCHED_WITH_PPT
546 if ( pGVMM->aHostCpus[iCpu].Ppt.pTimer != NULL
547 && RT_SUCCESS(RTTimerStop(pGVMM->aHostCpus[iCpu].Ppt.pTimer)))
548 cActiveTimers++;
549#endif
550 }
551 if (cActiveTimers)
552 RTThreadSleep(1); /* fudge */
553
554 /*
555 * Invalidate the and free resources.
556 */
557 pGVMM->u32Magic = ~GVMM_MAGIC;
558 RTCritSectRwDelete(&pGVMM->UsedLock);
559 RTCritSectDelete(&pGVMM->CreateDestroyLock);
560
561 pGVMM->iFreeHead = 0;
562 if (pGVMM->iUsedHead)
563 {
564 SUPR0Printf("GVMMR0Term: iUsedHead=%#x! (cVMs=%#x cEMTs=%#x)\n", pGVMM->iUsedHead, pGVMM->cVMs, pGVMM->cEMTs);
565 pGVMM->iUsedHead = 0;
566 }
567
568#ifdef GVMM_SCHED_WITH_PPT
569 iCpu = pGVMM->cHostCpus;
570 while (iCpu-- > 0)
571 {
572 RTTimerDestroy(pGVMM->aHostCpus[iCpu].Ppt.pTimer);
573 pGVMM->aHostCpus[iCpu].Ppt.pTimer = NULL;
574 RTSpinlockDestroy(pGVMM->aHostCpus[iCpu].Ppt.hSpinlock);
575 pGVMM->aHostCpus[iCpu].Ppt.hSpinlock = NIL_RTSPINLOCK;
576 }
577#endif
578
579 RTMemFree(pGVMM);
580}
581
582
583/**
584 * A quick hack for setting global config values.
585 *
586 * @returns VBox status code.
587 *
588 * @param pSession The session handle. Used for authentication.
589 * @param pszName The variable name.
590 * @param u64Value The new value.
591 */
592GVMMR0DECL(int) GVMMR0SetConfig(PSUPDRVSESSION pSession, const char *pszName, uint64_t u64Value)
593{
594 /*
595 * Validate input.
596 */
597 PGVMM pGVMM;
598 GVMM_GET_VALID_INSTANCE(pGVMM, VERR_GVMM_INSTANCE);
599 AssertPtrReturn(pSession, VERR_INVALID_HANDLE);
600 AssertPtrReturn(pszName, VERR_INVALID_POINTER);
601
602 /*
603 * String switch time!
604 */
605 if (strncmp(pszName, RT_STR_TUPLE("/GVMM/")))
606 return VERR_CFGM_VALUE_NOT_FOUND; /* borrow status codes from CFGM... */
607 int rc = VINF_SUCCESS;
608 pszName += sizeof("/GVMM/") - 1;
609 if (!strcmp(pszName, "cEMTsMeansCompany"))
610 {
611 if (u64Value <= UINT32_MAX)
612 pGVMM->cEMTsMeansCompany = u64Value;
613 else
614 rc = VERR_OUT_OF_RANGE;
615 }
616 else if (!strcmp(pszName, "MinSleepAlone"))
617 {
618 if (u64Value <= RT_NS_100MS)
619 pGVMM->nsMinSleepAlone = u64Value;
620 else
621 rc = VERR_OUT_OF_RANGE;
622 }
623 else if (!strcmp(pszName, "MinSleepCompany"))
624 {
625 if (u64Value <= RT_NS_100MS)
626 pGVMM->nsMinSleepCompany = u64Value;
627 else
628 rc = VERR_OUT_OF_RANGE;
629 }
630 else if (!strcmp(pszName, "EarlyWakeUp1"))
631 {
632 if (u64Value <= RT_NS_100MS)
633 {
634 pGVMM->nsEarlyWakeUp1 = u64Value;
635 pGVMM->fDoEarlyWakeUps = pGVMM->nsEarlyWakeUp1 > 0 && pGVMM->nsEarlyWakeUp2 > 0;
636 }
637 else
638 rc = VERR_OUT_OF_RANGE;
639 }
640 else if (!strcmp(pszName, "EarlyWakeUp2"))
641 {
642 if (u64Value <= RT_NS_100MS)
643 {
644 pGVMM->nsEarlyWakeUp2 = u64Value;
645 pGVMM->fDoEarlyWakeUps = pGVMM->nsEarlyWakeUp1 > 0 && pGVMM->nsEarlyWakeUp2 > 0;
646 }
647 else
648 rc = VERR_OUT_OF_RANGE;
649 }
650 else
651 rc = VERR_CFGM_VALUE_NOT_FOUND;
652 return rc;
653}
654
655
656/**
657 * A quick hack for getting global config values.
658 *
659 * @returns VBox status code.
660 *
661 * @param pSession The session handle. Used for authentication.
662 * @param pszName The variable name.
663 * @param pu64Value Where to return the value.
664 */
665GVMMR0DECL(int) GVMMR0QueryConfig(PSUPDRVSESSION pSession, const char *pszName, uint64_t *pu64Value)
666{
667 /*
668 * Validate input.
669 */
670 PGVMM pGVMM;
671 GVMM_GET_VALID_INSTANCE(pGVMM, VERR_GVMM_INSTANCE);
672 AssertPtrReturn(pSession, VERR_INVALID_HANDLE);
673 AssertPtrReturn(pszName, VERR_INVALID_POINTER);
674 AssertPtrReturn(pu64Value, VERR_INVALID_POINTER);
675
676 /*
677 * String switch time!
678 */
679 if (strncmp(pszName, RT_STR_TUPLE("/GVMM/")))
680 return VERR_CFGM_VALUE_NOT_FOUND; /* borrow status codes from CFGM... */
681 int rc = VINF_SUCCESS;
682 pszName += sizeof("/GVMM/") - 1;
683 if (!strcmp(pszName, "cEMTsMeansCompany"))
684 *pu64Value = pGVMM->cEMTsMeansCompany;
685 else if (!strcmp(pszName, "MinSleepAlone"))
686 *pu64Value = pGVMM->nsMinSleepAlone;
687 else if (!strcmp(pszName, "MinSleepCompany"))
688 *pu64Value = pGVMM->nsMinSleepCompany;
689 else if (!strcmp(pszName, "EarlyWakeUp1"))
690 *pu64Value = pGVMM->nsEarlyWakeUp1;
691 else if (!strcmp(pszName, "EarlyWakeUp2"))
692 *pu64Value = pGVMM->nsEarlyWakeUp2;
693 else
694 rc = VERR_CFGM_VALUE_NOT_FOUND;
695 return rc;
696}
697
698
699/**
700 * Acquire the 'used' lock in shared mode.
701 *
702 * This prevents destruction of the VM while we're in ring-0.
703 *
704 * @returns IPRT status code, see RTSemFastMutexRequest.
705 * @param a_pGVMM The GVMM instance data.
706 * @sa GVMMR0_USED_SHARED_UNLOCK, GVMMR0_USED_EXCLUSIVE_LOCK
707 */
708#define GVMMR0_USED_SHARED_LOCK(a_pGVMM) RTCritSectRwEnterShared(&(a_pGVMM)->UsedLock)
709
710/**
711 * Release the 'used' lock in when owning it in shared mode.
712 *
713 * @returns IPRT status code, see RTSemFastMutexRequest.
714 * @param a_pGVMM The GVMM instance data.
715 * @sa GVMMR0_USED_SHARED_LOCK
716 */
717#define GVMMR0_USED_SHARED_UNLOCK(a_pGVMM) RTCritSectRwLeaveShared(&(a_pGVMM)->UsedLock)
718
719/**
720 * Acquire the 'used' lock in exclusive mode.
721 *
722 * Only use this function when making changes to the used list.
723 *
724 * @returns IPRT status code, see RTSemFastMutexRequest.
725 * @param a_pGVMM The GVMM instance data.
726 * @sa GVMMR0_USED_EXCLUSIVE_UNLOCK
727 */
728#define GVMMR0_USED_EXCLUSIVE_LOCK(a_pGVMM) RTCritSectRwEnterExcl(&(a_pGVMM)->UsedLock)
729
730/**
731 * Release the 'used' lock when owning it in exclusive mode.
732 *
733 * @returns IPRT status code, see RTSemFastMutexRelease.
734 * @param a_pGVMM The GVMM instance data.
735 * @sa GVMMR0_USED_EXCLUSIVE_LOCK, GVMMR0_USED_SHARED_UNLOCK
736 */
737#define GVMMR0_USED_EXCLUSIVE_UNLOCK(a_pGVMM) RTCritSectRwLeaveExcl(&(a_pGVMM)->UsedLock)
738
739
740/**
741 * Try acquire the 'create & destroy' lock.
742 *
743 * @returns IPRT status code, see RTSemFastMutexRequest.
744 * @param pGVMM The GVMM instance data.
745 */
746DECLINLINE(int) gvmmR0CreateDestroyLock(PGVMM pGVMM)
747{
748 LogFlow(("++gvmmR0CreateDestroyLock(%p)\n", pGVMM));
749 int rc = RTCritSectEnter(&pGVMM->CreateDestroyLock);
750 LogFlow(("gvmmR0CreateDestroyLock(%p)->%Rrc\n", pGVMM, rc));
751 return rc;
752}
753
754
755/**
756 * Release the 'create & destroy' lock.
757 *
758 * @returns IPRT status code, see RTSemFastMutexRequest.
759 * @param pGVMM The GVMM instance data.
760 */
761DECLINLINE(int) gvmmR0CreateDestroyUnlock(PGVMM pGVMM)
762{
763 LogFlow(("--gvmmR0CreateDestroyUnlock(%p)\n", pGVMM));
764 int rc = RTCritSectLeave(&pGVMM->CreateDestroyLock);
765 AssertRC(rc);
766 return rc;
767}
768
769
770/**
771 * Request wrapper for the GVMMR0CreateVM API.
772 *
773 * @returns VBox status code.
774 * @param pReq The request buffer.
775 * @param pSession The session handle. The VM will be associated with this.
776 */
777GVMMR0DECL(int) GVMMR0CreateVMReq(PGVMMCREATEVMREQ pReq, PSUPDRVSESSION pSession)
778{
779 /*
780 * Validate the request.
781 */
782 if (!RT_VALID_PTR(pReq))
783 return VERR_INVALID_POINTER;
784 if (pReq->Hdr.cbReq != sizeof(*pReq))
785 return VERR_INVALID_PARAMETER;
786 if (pReq->pSession != pSession)
787 return VERR_INVALID_POINTER;
788
789 /*
790 * Execute it.
791 */
792 PGVM pGVM;
793 pReq->pVMR0 = NULL;
794 pReq->pVMR3 = NIL_RTR3PTR;
795 int rc = GVMMR0CreateVM(pSession, pReq->cCpus, &pGVM);
796 if (RT_SUCCESS(rc))
797 {
798 pReq->pVMR0 = pGVM; /** @todo don't expose this to ring-3, use a unique random number instead. */
799 pReq->pVMR3 = pGVM->pVMR3;
800 }
801 return rc;
802}
803
804
805/**
806 * Allocates the VM structure and registers it with GVM.
807 *
808 * The caller will become the VM owner and there by the EMT.
809 *
810 * @returns VBox status code.
811 * @param pSession The support driver session.
812 * @param cCpus Number of virtual CPUs for the new VM.
813 * @param ppGVM Where to store the pointer to the VM structure.
814 *
815 * @thread EMT.
816 */
817GVMMR0DECL(int) GVMMR0CreateVM(PSUPDRVSESSION pSession, uint32_t cCpus, PGVM *ppGVM)
818{
819 LogFlow(("GVMMR0CreateVM: pSession=%p\n", pSession));
820 PGVMM pGVMM;
821 GVMM_GET_VALID_INSTANCE(pGVMM, VERR_GVMM_INSTANCE);
822
823 AssertPtrReturn(ppGVM, VERR_INVALID_POINTER);
824 *ppGVM = NULL;
825
826 if ( cCpus == 0
827 || cCpus > VMM_MAX_CPU_COUNT)
828 return VERR_INVALID_PARAMETER;
829
830 RTNATIVETHREAD hEMT0 = RTThreadNativeSelf();
831 AssertReturn(hEMT0 != NIL_RTNATIVETHREAD, VERR_GVMM_BROKEN_IPRT);
832 RTPROCESS ProcId = RTProcSelf();
833 AssertReturn(ProcId != NIL_RTPROCESS, VERR_GVMM_BROKEN_IPRT);
834
835 /*
836 * The whole allocation process is protected by the lock.
837 */
838 int rc = gvmmR0CreateDestroyLock(pGVMM);
839 AssertRCReturn(rc, rc);
840
841 /*
842 * Only one VM per session.
843 */
844 if (SUPR0GetSessionVM(pSession) != NULL)
845 {
846 gvmmR0CreateDestroyUnlock(pGVMM);
847 SUPR0Printf("GVMMR0CreateVM: The session %p already got a VM: %p\n", pSession, SUPR0GetSessionVM(pSession));
848 return VERR_ALREADY_EXISTS;
849 }
850
851 /*
852 * Allocate a handle first so we don't waste resources unnecessarily.
853 */
854 uint16_t iHandle = pGVMM->iFreeHead;
855 if (iHandle)
856 {
857 PGVMHANDLE pHandle = &pGVMM->aHandles[iHandle];
858
859 /* consistency checks, a bit paranoid as always. */
860 if ( !pHandle->pGVM
861 && !pHandle->pvObj
862 && pHandle->iSelf == iHandle)
863 {
864 pHandle->pvObj = SUPR0ObjRegister(pSession, SUPDRVOBJTYPE_VM, gvmmR0HandleObjDestructor, pGVMM, pHandle);
865 if (pHandle->pvObj)
866 {
867 /*
868 * Move the handle from the free to used list and perform permission checks.
869 */
870 rc = GVMMR0_USED_EXCLUSIVE_LOCK(pGVMM);
871 AssertRC(rc);
872
873 pGVMM->iFreeHead = pHandle->iNext;
874 pHandle->iNext = pGVMM->iUsedHead;
875 pGVMM->iUsedHead = iHandle;
876 pGVMM->cVMs++;
877
878 pHandle->pGVM = NULL;
879 pHandle->pSession = pSession;
880 pHandle->hEMT0 = NIL_RTNATIVETHREAD;
881 pHandle->ProcId = NIL_RTPROCESS;
882
883 GVMMR0_USED_EXCLUSIVE_UNLOCK(pGVMM);
884
885 rc = SUPR0ObjVerifyAccess(pHandle->pvObj, pSession, NULL);
886 if (RT_SUCCESS(rc))
887 {
888 /*
889 * Allocate memory for the VM structure (combined VM + GVM).
890 */
891 const uint32_t cbVM = RT_UOFFSETOF_DYN(GVM, aCpus[cCpus]);
892 const uint32_t cPages = RT_ALIGN_32(cbVM, PAGE_SIZE) >> PAGE_SHIFT;
893 RTR0MEMOBJ hVMMemObj = NIL_RTR0MEMOBJ;
894 rc = RTR0MemObjAllocPage(&hVMMemObj, cPages << PAGE_SHIFT, false /* fExecutable */);
895 if (RT_SUCCESS(rc))
896 {
897 PGVM pGVM = (PGVM)RTR0MemObjAddress(hVMMemObj);
898 AssertPtr(pGVM);
899
900 /*
901 * Initialise the structure.
902 */
903 RT_BZERO(pGVM, cPages << PAGE_SHIFT);
904 gvmmR0InitPerVMData(pGVM, iHandle, cCpus, pSession);
905 pGVM->gvmm.s.VMMemObj = hVMMemObj;
906 GMMR0InitPerVMData(pGVM);
907 rc = PGMR0InitPerVMData(pGVM);
908 PDMR0InitPerVMData(pGVM);
909 IOMR0InitPerVMData(pGVM);
910 if (RT_SUCCESS(rc))
911 {
912 /*
913 * Allocate page array.
914 * This currently have to be made available to ring-3, but this is should change eventually.
915 */
916 rc = RTR0MemObjAllocPage(&pGVM->gvmm.s.VMPagesMemObj, cPages * sizeof(SUPPAGE), false /* fExecutable */);
917 if (RT_SUCCESS(rc))
918 {
919 PSUPPAGE paPages = (PSUPPAGE)RTR0MemObjAddress(pGVM->gvmm.s.VMPagesMemObj); AssertPtr(paPages);
920 for (uint32_t iPage = 0; iPage < cPages; iPage++)
921 {
922 paPages[iPage].uReserved = 0;
923 paPages[iPage].Phys = RTR0MemObjGetPagePhysAddr(pGVM->gvmm.s.VMMemObj, iPage);
924 Assert(paPages[iPage].Phys != NIL_RTHCPHYS);
925 }
926
927 /*
928 * Map the page array, VM and VMCPU structures into ring-3.
929 */
930 AssertCompileSizeAlignment(VM, PAGE_SIZE);
931 rc = RTR0MemObjMapUserEx(&pGVM->gvmm.s.VMMapObj, pGVM->gvmm.s.VMMemObj, (RTR3PTR)-1, 0,
932 RTMEM_PROT_READ | RTMEM_PROT_WRITE, NIL_RTR0PROCESS,
933 0 /*offSub*/, sizeof(VM));
934 for (VMCPUID i = 0; i < cCpus && RT_SUCCESS(rc); i++)
935 {
936 AssertCompileSizeAlignment(VMCPU, PAGE_SIZE);
937 rc = RTR0MemObjMapUserEx(&pGVM->aCpus[i].gvmm.s.VMCpuMapObj, pGVM->gvmm.s.VMMemObj,
938 (RTR3PTR)-1, 0, RTMEM_PROT_READ | RTMEM_PROT_WRITE, NIL_RTR0PROCESS,
939 RT_UOFFSETOF_DYN(GVM, aCpus[i]), sizeof(VMCPU));
940 }
941 if (RT_SUCCESS(rc))
942 rc = RTR0MemObjMapUser(&pGVM->gvmm.s.VMPagesMapObj, pGVM->gvmm.s.VMPagesMemObj, (RTR3PTR)-1,
943 0 /* uAlignment */, RTMEM_PROT_READ | RTMEM_PROT_WRITE,
944 NIL_RTR0PROCESS);
945 if (RT_SUCCESS(rc))
946 {
947 /*
948 * Initialize all the VM pointers.
949 */
950 PVMR3 pVMR3 = RTR0MemObjAddressR3(pGVM->gvmm.s.VMMapObj);
951 AssertPtr((void *)pVMR3);
952
953 for (VMCPUID i = 0; i < cCpus; i++)
954 {
955 pGVM->aCpus[i].pVMR0 = pGVM;
956 pGVM->aCpus[i].pVMR3 = pVMR3;
957 pGVM->apCpusR3[i] = RTR0MemObjAddressR3(pGVM->aCpus[i].gvmm.s.VMCpuMapObj);
958 pGVM->aCpus[i].pVCpuR3 = pGVM->apCpusR3[i];
959 pGVM->apCpusR0[i] = &pGVM->aCpus[i];
960 AssertPtr((void *)pGVM->apCpusR3[i]);
961 }
962
963 pGVM->paVMPagesR3 = RTR0MemObjAddressR3(pGVM->gvmm.s.VMPagesMapObj);
964 AssertPtr((void *)pGVM->paVMPagesR3);
965
966 /*
967 * Complete the handle - take the UsedLock sem just to be careful.
968 */
969 rc = GVMMR0_USED_EXCLUSIVE_LOCK(pGVMM);
970 AssertRC(rc);
971
972 pHandle->pGVM = pGVM;
973 pHandle->hEMT0 = hEMT0;
974 pHandle->ProcId = ProcId;
975 pGVM->pVMR3 = pVMR3;
976 pGVM->pVMR3Unsafe = pVMR3;
977 pGVM->aCpus[0].hEMT = hEMT0;
978 pGVM->aCpus[0].hNativeThreadR0 = hEMT0;
979 pGVMM->cEMTs += cCpus;
980
981 /* Associate it with the session and create the context hook for EMT0. */
982 rc = SUPR0SetSessionVM(pSession, pGVM, pGVM);
983 if (RT_SUCCESS(rc))
984 {
985 rc = VMMR0ThreadCtxHookCreateForEmt(&pGVM->aCpus[0]);
986 if (RT_SUCCESS(rc))
987 {
988 /*
989 * Done!
990 */
991 VBOXVMM_R0_GVMM_VM_CREATED(pGVM, pGVM, ProcId, (void *)hEMT0, cCpus);
992
993 GVMMR0_USED_EXCLUSIVE_UNLOCK(pGVMM);
994 gvmmR0CreateDestroyUnlock(pGVMM);
995
996 CPUMR0RegisterVCpuThread(&pGVM->aCpus[0]);
997
998 *ppGVM = pGVM;
999 Log(("GVMMR0CreateVM: pVMR3=%p pGVM=%p hGVM=%d\n", pVMR3, pGVM, iHandle));
1000 return VINF_SUCCESS;
1001 }
1002
1003 SUPR0SetSessionVM(pSession, NULL, NULL);
1004 }
1005 GVMMR0_USED_EXCLUSIVE_UNLOCK(pGVMM);
1006 }
1007
1008 /* Cleanup mappings. */
1009 if (pGVM->gvmm.s.VMMapObj != NIL_RTR0MEMOBJ)
1010 {
1011 RTR0MemObjFree(pGVM->gvmm.s.VMMapObj, false /* fFreeMappings */);
1012 pGVM->gvmm.s.VMMapObj = NIL_RTR0MEMOBJ;
1013 }
1014 for (VMCPUID i = 0; i < cCpus; i++)
1015 if (pGVM->aCpus[i].gvmm.s.VMCpuMapObj != NIL_RTR0MEMOBJ)
1016 {
1017 RTR0MemObjFree(pGVM->aCpus[i].gvmm.s.VMCpuMapObj, false /* fFreeMappings */);
1018 pGVM->aCpus[i].gvmm.s.VMCpuMapObj = NIL_RTR0MEMOBJ;
1019 }
1020 if (pGVM->gvmm.s.VMPagesMapObj != NIL_RTR0MEMOBJ)
1021 {
1022 RTR0MemObjFree(pGVM->gvmm.s.VMPagesMapObj, false /* fFreeMappings */);
1023 pGVM->gvmm.s.VMPagesMapObj = NIL_RTR0MEMOBJ;
1024 }
1025 }
1026 }
1027 }
1028 }
1029 /* else: The user wasn't permitted to create this VM. */
1030
1031 /*
1032 * The handle will be freed by gvmmR0HandleObjDestructor as we release the
1033 * object reference here. A little extra mess because of non-recursive lock.
1034 */
1035 void *pvObj = pHandle->pvObj;
1036 pHandle->pvObj = NULL;
1037 gvmmR0CreateDestroyUnlock(pGVMM);
1038
1039 SUPR0ObjRelease(pvObj, pSession);
1040
1041 SUPR0Printf("GVMMR0CreateVM: failed, rc=%Rrc\n", rc);
1042 return rc;
1043 }
1044
1045 rc = VERR_NO_MEMORY;
1046 }
1047 else
1048 rc = VERR_GVMM_IPE_1;
1049 }
1050 else
1051 rc = VERR_GVM_TOO_MANY_VMS;
1052
1053 gvmmR0CreateDestroyUnlock(pGVMM);
1054 return rc;
1055}
1056
1057
1058/**
1059 * Initializes the per VM data belonging to GVMM.
1060 *
1061 * @param pGVM Pointer to the global VM structure.
1062 * @param hSelf The handle.
1063 * @param cCpus The CPU count.
1064 * @param pSession The session this VM is associated with.
1065 */
1066static void gvmmR0InitPerVMData(PGVM pGVM, int16_t hSelf, VMCPUID cCpus, PSUPDRVSESSION pSession)
1067{
1068 AssertCompile(RT_SIZEOFMEMB(GVM,gvmm.s) <= RT_SIZEOFMEMB(GVM,gvmm.padding));
1069 AssertCompile(RT_SIZEOFMEMB(GVMCPU,gvmm.s) <= RT_SIZEOFMEMB(GVMCPU,gvmm.padding));
1070 AssertCompileMemberAlignment(VM, cpum, 64);
1071 AssertCompileMemberAlignment(VM, tm, 64);
1072
1073 /* GVM: */
1074 pGVM->u32Magic = GVM_MAGIC;
1075 pGVM->hSelf = hSelf;
1076 pGVM->cCpus = cCpus;
1077 pGVM->pSession = pSession;
1078 pGVM->pSelf = pGVM;
1079
1080 /* VM: */
1081 pGVM->enmVMState = VMSTATE_CREATING;
1082 pGVM->hSelfUnsafe = hSelf;
1083 pGVM->pSessionUnsafe = pSession;
1084 pGVM->pVMR0ForCall = pGVM;
1085 pGVM->cCpusUnsafe = cCpus;
1086 pGVM->uCpuExecutionCap = 100; /* default is no cap. */
1087 pGVM->uStructVersion = 1;
1088 pGVM->cbSelf = sizeof(VM);
1089 pGVM->cbVCpu = sizeof(VMCPU);
1090
1091 /* GVMM: */
1092 pGVM->gvmm.s.VMMemObj = NIL_RTR0MEMOBJ;
1093 pGVM->gvmm.s.VMMapObj = NIL_RTR0MEMOBJ;
1094 pGVM->gvmm.s.VMPagesMemObj = NIL_RTR0MEMOBJ;
1095 pGVM->gvmm.s.VMPagesMapObj = NIL_RTR0MEMOBJ;
1096 pGVM->gvmm.s.fDoneVMMR0Init = false;
1097 pGVM->gvmm.s.fDoneVMMR0Term = false;
1098
1099 /*
1100 * Per virtual CPU.
1101 */
1102 for (VMCPUID i = 0; i < pGVM->cCpus; i++)
1103 {
1104 pGVM->aCpus[i].idCpu = i;
1105 pGVM->aCpus[i].idCpuUnsafe = i;
1106 pGVM->aCpus[i].gvmm.s.HaltEventMulti = NIL_RTSEMEVENTMULTI;
1107 pGVM->aCpus[i].gvmm.s.VMCpuMapObj = NIL_RTR0MEMOBJ;
1108 pGVM->aCpus[i].hEMT = NIL_RTNATIVETHREAD;
1109 pGVM->aCpus[i].pGVM = pGVM;
1110 pGVM->aCpus[i].idHostCpu = NIL_RTCPUID;
1111 pGVM->aCpus[i].iHostCpuSet = UINT32_MAX;
1112 pGVM->aCpus[i].hNativeThread = NIL_RTNATIVETHREAD;
1113 pGVM->aCpus[i].hNativeThreadR0 = NIL_RTNATIVETHREAD;
1114 pGVM->aCpus[i].enmState = VMCPUSTATE_STOPPED;
1115 pGVM->aCpus[i].pVCpuR0ForVtg = &pGVM->aCpus[i];
1116 }
1117}
1118
1119
1120/**
1121 * Does the VM initialization.
1122 *
1123 * @returns VBox status code.
1124 * @param pGVM The global (ring-0) VM structure.
1125 */
1126GVMMR0DECL(int) GVMMR0InitVM(PGVM pGVM)
1127{
1128 LogFlow(("GVMMR0InitVM: pGVM=%p\n", pGVM));
1129
1130 int rc = VERR_INTERNAL_ERROR_3;
1131 if ( !pGVM->gvmm.s.fDoneVMMR0Init
1132 && pGVM->aCpus[0].gvmm.s.HaltEventMulti == NIL_RTSEMEVENTMULTI)
1133 {
1134 for (VMCPUID i = 0; i < pGVM->cCpus; i++)
1135 {
1136 rc = RTSemEventMultiCreate(&pGVM->aCpus[i].gvmm.s.HaltEventMulti);
1137 if (RT_FAILURE(rc))
1138 {
1139 pGVM->aCpus[i].gvmm.s.HaltEventMulti = NIL_RTSEMEVENTMULTI;
1140 break;
1141 }
1142 }
1143 }
1144 else
1145 rc = VERR_WRONG_ORDER;
1146
1147 LogFlow(("GVMMR0InitVM: returns %Rrc\n", rc));
1148 return rc;
1149}
1150
1151
1152/**
1153 * Indicates that we're done with the ring-0 initialization
1154 * of the VM.
1155 *
1156 * @param pGVM The global (ring-0) VM structure.
1157 * @thread EMT(0)
1158 */
1159GVMMR0DECL(void) GVMMR0DoneInitVM(PGVM pGVM)
1160{
1161 /* Set the indicator. */
1162 pGVM->gvmm.s.fDoneVMMR0Init = true;
1163}
1164
1165
1166/**
1167 * Indicates that we're doing the ring-0 termination of the VM.
1168 *
1169 * @returns true if termination hasn't been done already, false if it has.
1170 * @param pGVM Pointer to the global VM structure. Optional.
1171 * @thread EMT(0) or session cleanup thread.
1172 */
1173GVMMR0DECL(bool) GVMMR0DoingTermVM(PGVM pGVM)
1174{
1175 /* Validate the VM structure, state and handle. */
1176 AssertPtrReturn(pGVM, false);
1177
1178 /* Set the indicator. */
1179 if (pGVM->gvmm.s.fDoneVMMR0Term)
1180 return false;
1181 pGVM->gvmm.s.fDoneVMMR0Term = true;
1182 return true;
1183}
1184
1185
1186/**
1187 * Destroys the VM, freeing all associated resources (the ring-0 ones anyway).
1188 *
1189 * This is call from the vmR3DestroyFinalBit and from a error path in VMR3Create,
1190 * and the caller is not the EMT thread, unfortunately. For security reasons, it
1191 * would've been nice if the caller was actually the EMT thread or that we somehow
1192 * could've associated the calling thread with the VM up front.
1193 *
1194 * @returns VBox status code.
1195 * @param pGVM The global (ring-0) VM structure.
1196 *
1197 * @thread EMT(0) if it's associated with the VM, otherwise any thread.
1198 */
1199GVMMR0DECL(int) GVMMR0DestroyVM(PGVM pGVM)
1200{
1201 LogFlow(("GVMMR0DestroyVM: pGVM=%p\n", pGVM));
1202 PGVMM pGVMM;
1203 GVMM_GET_VALID_INSTANCE(pGVMM, VERR_GVMM_INSTANCE);
1204
1205 /*
1206 * Validate the VM structure, state and caller.
1207 */
1208 AssertPtrReturn(pGVM, VERR_INVALID_POINTER);
1209 AssertReturn(!((uintptr_t)pGVM & PAGE_OFFSET_MASK), VERR_INVALID_POINTER);
1210 AssertMsgReturn(pGVM->enmVMState >= VMSTATE_CREATING && pGVM->enmVMState <= VMSTATE_TERMINATED, ("%d\n", pGVM->enmVMState),
1211 VERR_WRONG_ORDER);
1212
1213 uint32_t hGVM = pGVM->hSelf;
1214 ASMCompilerBarrier();
1215 AssertReturn(hGVM != NIL_GVM_HANDLE, VERR_INVALID_VM_HANDLE);
1216 AssertReturn(hGVM < RT_ELEMENTS(pGVMM->aHandles), VERR_INVALID_VM_HANDLE);
1217
1218 PGVMHANDLE pHandle = &pGVMM->aHandles[hGVM];
1219 AssertReturn(pHandle->pGVM == pGVM, VERR_NOT_OWNER);
1220
1221 RTPROCESS ProcId = RTProcSelf();
1222 RTNATIVETHREAD hSelf = RTThreadNativeSelf();
1223 AssertReturn( ( pHandle->hEMT0 == hSelf
1224 && pHandle->ProcId == ProcId)
1225 || pHandle->hEMT0 == NIL_RTNATIVETHREAD, VERR_NOT_OWNER);
1226
1227 /*
1228 * Lookup the handle and destroy the object.
1229 * Since the lock isn't recursive and we'll have to leave it before dereferencing the
1230 * object, we take some precautions against racing callers just in case...
1231 */
1232 int rc = gvmmR0CreateDestroyLock(pGVMM);
1233 AssertRC(rc);
1234
1235 /* Be careful here because we might theoretically be racing someone else cleaning up. */
1236 if ( pHandle->pGVM == pGVM
1237 && ( ( pHandle->hEMT0 == hSelf
1238 && pHandle->ProcId == ProcId)
1239 || pHandle->hEMT0 == NIL_RTNATIVETHREAD)
1240 && RT_VALID_PTR(pHandle->pvObj)
1241 && RT_VALID_PTR(pHandle->pSession)
1242 && RT_VALID_PTR(pHandle->pGVM)
1243 && pHandle->pGVM->u32Magic == GVM_MAGIC)
1244 {
1245 /* Check that other EMTs have deregistered. */
1246 uint32_t cNotDeregistered = 0;
1247 for (VMCPUID idCpu = 1; idCpu < pGVM->cCpus; idCpu++)
1248 cNotDeregistered += pGVM->aCpus[idCpu].hEMT != ~(RTNATIVETHREAD)1; /* see GVMMR0DeregisterVCpu for the value */
1249 if (cNotDeregistered == 0)
1250 {
1251 /* Grab the object pointer. */
1252 void *pvObj = pHandle->pvObj;
1253 pHandle->pvObj = NULL;
1254 gvmmR0CreateDestroyUnlock(pGVMM);
1255
1256 SUPR0ObjRelease(pvObj, pHandle->pSession);
1257 }
1258 else
1259 {
1260 gvmmR0CreateDestroyUnlock(pGVMM);
1261 rc = VERR_GVMM_NOT_ALL_EMTS_DEREGISTERED;
1262 }
1263 }
1264 else
1265 {
1266 SUPR0Printf("GVMMR0DestroyVM: pHandle=%RKv:{.pGVM=%p, .hEMT0=%p, .ProcId=%u, .pvObj=%p} pGVM=%p hSelf=%p\n",
1267 pHandle, pHandle->pGVM, pHandle->hEMT0, pHandle->ProcId, pHandle->pvObj, pGVM, hSelf);
1268 gvmmR0CreateDestroyUnlock(pGVMM);
1269 rc = VERR_GVMM_IPE_2;
1270 }
1271
1272 return rc;
1273}
1274
1275
1276/**
1277 * Performs VM cleanup task as part of object destruction.
1278 *
1279 * @param pGVM The GVM pointer.
1280 */
1281static void gvmmR0CleanupVM(PGVM pGVM)
1282{
1283 if ( pGVM->gvmm.s.fDoneVMMR0Init
1284 && !pGVM->gvmm.s.fDoneVMMR0Term)
1285 {
1286 if ( pGVM->gvmm.s.VMMemObj != NIL_RTR0MEMOBJ
1287 && RTR0MemObjAddress(pGVM->gvmm.s.VMMemObj) == pGVM)
1288 {
1289 LogFlow(("gvmmR0CleanupVM: Calling VMMR0TermVM\n"));
1290 VMMR0TermVM(pGVM, NIL_VMCPUID);
1291 }
1292 else
1293 AssertMsgFailed(("gvmmR0CleanupVM: VMMemObj=%p pGVM=%p\n", pGVM->gvmm.s.VMMemObj, pGVM));
1294 }
1295
1296 GMMR0CleanupVM(pGVM);
1297#ifdef VBOX_WITH_NEM_R0
1298 NEMR0CleanupVM(pGVM);
1299#endif
1300 PDMR0CleanupVM(pGVM);
1301 IOMR0CleanupVM(pGVM);
1302 PGMR0CleanupVM(pGVM);
1303
1304 AssertCompile(NIL_RTTHREADCTXHOOK == (RTTHREADCTXHOOK)0); /* Depends on zero initialized memory working for NIL at the moment. */
1305 for (VMCPUID idCpu = 0; idCpu < pGVM->cCpus; idCpu++)
1306 {
1307 /** @todo Can we busy wait here for all thread-context hooks to be
1308 * deregistered before releasing (destroying) it? Only until we find a
1309 * solution for not deregistering hooks everytime we're leaving HMR0
1310 * context. */
1311 VMMR0ThreadCtxHookDestroyForEmt(&pGVM->aCpus[idCpu]);
1312 }
1313}
1314
1315
1316/**
1317 * @callback_method_impl{FNSUPDRVDESTRUCTOR,VM handle destructor}
1318 *
1319 * pvUser1 is the GVM instance pointer.
1320 * pvUser2 is the handle pointer.
1321 */
1322static DECLCALLBACK(void) gvmmR0HandleObjDestructor(void *pvObj, void *pvUser1, void *pvUser2)
1323{
1324 LogFlow(("gvmmR0HandleObjDestructor: %p %p %p\n", pvObj, pvUser1, pvUser2));
1325
1326 NOREF(pvObj);
1327
1328 /*
1329 * Some quick, paranoid, input validation.
1330 */
1331 PGVMHANDLE pHandle = (PGVMHANDLE)pvUser2;
1332 AssertPtr(pHandle);
1333 PGVMM pGVMM = (PGVMM)pvUser1;
1334 Assert(pGVMM == g_pGVMM);
1335 const uint16_t iHandle = pHandle - &pGVMM->aHandles[0];
1336 if ( !iHandle
1337 || iHandle >= RT_ELEMENTS(pGVMM->aHandles)
1338 || iHandle != pHandle->iSelf)
1339 {
1340 SUPR0Printf("GVM: handle %d is out of range or corrupt (iSelf=%d)!\n", iHandle, pHandle->iSelf);
1341 return;
1342 }
1343
1344 int rc = gvmmR0CreateDestroyLock(pGVMM);
1345 AssertRC(rc);
1346 rc = GVMMR0_USED_EXCLUSIVE_LOCK(pGVMM);
1347 AssertRC(rc);
1348
1349 /*
1350 * This is a tad slow but a doubly linked list is too much hassle.
1351 */
1352 if (RT_UNLIKELY(pHandle->iNext >= RT_ELEMENTS(pGVMM->aHandles)))
1353 {
1354 SUPR0Printf("GVM: used list index %d is out of range!\n", pHandle->iNext);
1355 GVMMR0_USED_EXCLUSIVE_UNLOCK(pGVMM);
1356 gvmmR0CreateDestroyUnlock(pGVMM);
1357 return;
1358 }
1359
1360 if (pGVMM->iUsedHead == iHandle)
1361 pGVMM->iUsedHead = pHandle->iNext;
1362 else
1363 {
1364 uint16_t iPrev = pGVMM->iUsedHead;
1365 int c = RT_ELEMENTS(pGVMM->aHandles) + 2;
1366 while (iPrev)
1367 {
1368 if (RT_UNLIKELY(iPrev >= RT_ELEMENTS(pGVMM->aHandles)))
1369 {
1370 SUPR0Printf("GVM: used list index %d is out of range!\n", iPrev);
1371 GVMMR0_USED_EXCLUSIVE_UNLOCK(pGVMM);
1372 gvmmR0CreateDestroyUnlock(pGVMM);
1373 return;
1374 }
1375 if (RT_UNLIKELY(c-- <= 0))
1376 {
1377 iPrev = 0;
1378 break;
1379 }
1380
1381 if (pGVMM->aHandles[iPrev].iNext == iHandle)
1382 break;
1383 iPrev = pGVMM->aHandles[iPrev].iNext;
1384 }
1385 if (!iPrev)
1386 {
1387 SUPR0Printf("GVM: can't find the handle previous previous of %d!\n", pHandle->iSelf);
1388 GVMMR0_USED_EXCLUSIVE_UNLOCK(pGVMM);
1389 gvmmR0CreateDestroyUnlock(pGVMM);
1390 return;
1391 }
1392
1393 Assert(pGVMM->aHandles[iPrev].iNext == iHandle);
1394 pGVMM->aHandles[iPrev].iNext = pHandle->iNext;
1395 }
1396 pHandle->iNext = 0;
1397 pGVMM->cVMs--;
1398
1399 /*
1400 * Do the global cleanup round.
1401 */
1402 PGVM pGVM = pHandle->pGVM;
1403 if ( RT_VALID_PTR(pGVM)
1404 && pGVM->u32Magic == GVM_MAGIC)
1405 {
1406 pGVMM->cEMTs -= pGVM->cCpus;
1407
1408 if (pGVM->pSession)
1409 SUPR0SetSessionVM(pGVM->pSession, NULL, NULL);
1410
1411 GVMMR0_USED_EXCLUSIVE_UNLOCK(pGVMM);
1412
1413 gvmmR0CleanupVM(pGVM);
1414
1415 /*
1416 * Do the GVMM cleanup - must be done last.
1417 */
1418 /* The VM and VM pages mappings/allocations. */
1419 if (pGVM->gvmm.s.VMPagesMapObj != NIL_RTR0MEMOBJ)
1420 {
1421 rc = RTR0MemObjFree(pGVM->gvmm.s.VMPagesMapObj, false /* fFreeMappings */); AssertRC(rc);
1422 pGVM->gvmm.s.VMPagesMapObj = NIL_RTR0MEMOBJ;
1423 }
1424
1425 if (pGVM->gvmm.s.VMMapObj != NIL_RTR0MEMOBJ)
1426 {
1427 rc = RTR0MemObjFree(pGVM->gvmm.s.VMMapObj, false /* fFreeMappings */); AssertRC(rc);
1428 pGVM->gvmm.s.VMMapObj = NIL_RTR0MEMOBJ;
1429 }
1430
1431 if (pGVM->gvmm.s.VMPagesMemObj != NIL_RTR0MEMOBJ)
1432 {
1433 rc = RTR0MemObjFree(pGVM->gvmm.s.VMPagesMemObj, false /* fFreeMappings */); AssertRC(rc);
1434 pGVM->gvmm.s.VMPagesMemObj = NIL_RTR0MEMOBJ;
1435 }
1436
1437 for (VMCPUID i = 0; i < pGVM->cCpus; i++)
1438 {
1439 if (pGVM->aCpus[i].gvmm.s.HaltEventMulti != NIL_RTSEMEVENTMULTI)
1440 {
1441 rc = RTSemEventMultiDestroy(pGVM->aCpus[i].gvmm.s.HaltEventMulti); AssertRC(rc);
1442 pGVM->aCpus[i].gvmm.s.HaltEventMulti = NIL_RTSEMEVENTMULTI;
1443 }
1444 if (pGVM->aCpus[i].gvmm.s.VMCpuMapObj != NIL_RTR0MEMOBJ)
1445 {
1446 rc = RTR0MemObjFree(pGVM->aCpus[i].gvmm.s.VMCpuMapObj, false /* fFreeMappings */); AssertRC(rc);
1447 pGVM->aCpus[i].gvmm.s.VMCpuMapObj = NIL_RTR0MEMOBJ;
1448 }
1449 }
1450
1451 /* the GVM structure itself. */
1452 pGVM->u32Magic |= UINT32_C(0x80000000);
1453 Assert(pGVM->gvmm.s.VMMemObj != NIL_RTR0MEMOBJ);
1454 rc = RTR0MemObjFree(pGVM->gvmm.s.VMMemObj, true /*fFreeMappings*/); AssertRC(rc);
1455 pGVM = NULL;
1456
1457 /* Re-acquire the UsedLock before freeing the handle since we're updating handle fields. */
1458 rc = GVMMR0_USED_EXCLUSIVE_LOCK(pGVMM);
1459 AssertRC(rc);
1460 }
1461 /* else: GVMMR0CreateVM cleanup. */
1462
1463 /*
1464 * Free the handle.
1465 */
1466 pHandle->iNext = pGVMM->iFreeHead;
1467 pGVMM->iFreeHead = iHandle;
1468 ASMAtomicWriteNullPtr(&pHandle->pGVM);
1469 ASMAtomicWriteNullPtr(&pHandle->pvObj);
1470 ASMAtomicWriteNullPtr(&pHandle->pSession);
1471 ASMAtomicWriteHandle(&pHandle->hEMT0, NIL_RTNATIVETHREAD);
1472 ASMAtomicWriteU32(&pHandle->ProcId, NIL_RTPROCESS);
1473
1474 GVMMR0_USED_EXCLUSIVE_UNLOCK(pGVMM);
1475 gvmmR0CreateDestroyUnlock(pGVMM);
1476 LogFlow(("gvmmR0HandleObjDestructor: returns\n"));
1477}
1478
1479
1480/**
1481 * Registers the calling thread as the EMT of a Virtual CPU.
1482 *
1483 * Note that VCPU 0 is automatically registered during VM creation.
1484 *
1485 * @returns VBox status code
1486 * @param pGVM The global (ring-0) VM structure.
1487 * @param idCpu VCPU id to register the current thread as.
1488 */
1489GVMMR0DECL(int) GVMMR0RegisterVCpu(PGVM pGVM, VMCPUID idCpu)
1490{
1491 AssertReturn(idCpu != 0, VERR_INVALID_FUNCTION);
1492
1493 /*
1494 * Validate the VM structure, state and handle.
1495 */
1496 PGVMM pGVMM;
1497 int rc = gvmmR0ByGVM(pGVM, &pGVMM, false /* fTakeUsedLock */); /** @todo take lock here. */
1498 if (RT_SUCCESS(rc))
1499 {
1500 if (idCpu < pGVM->cCpus)
1501 {
1502 /* Check that the EMT isn't already assigned to a thread. */
1503 if (pGVM->aCpus[idCpu].hEMT == NIL_RTNATIVETHREAD)
1504 {
1505 Assert(pGVM->aCpus[idCpu].hNativeThreadR0 == NIL_RTNATIVETHREAD);
1506
1507 /* A thread may only be one EMT. */
1508 RTNATIVETHREAD const hNativeSelf = RTThreadNativeSelf();
1509 for (VMCPUID iCpu = 0; iCpu < pGVM->cCpus; iCpu++)
1510 AssertBreakStmt(pGVM->aCpus[iCpu].hEMT != hNativeSelf, rc = VERR_INVALID_PARAMETER);
1511 if (RT_SUCCESS(rc))
1512 {
1513 /*
1514 * Do the assignment, then try setup the hook. Undo if that fails.
1515 */
1516 pGVM->aCpus[idCpu].hNativeThreadR0 = pGVM->aCpus[idCpu].hEMT = RTThreadNativeSelf();
1517
1518 rc = VMMR0ThreadCtxHookCreateForEmt(&pGVM->aCpus[idCpu]);
1519 if (RT_SUCCESS(rc))
1520 CPUMR0RegisterVCpuThread(&pGVM->aCpus[idCpu]);
1521 else
1522 pGVM->aCpus[idCpu].hNativeThreadR0 = pGVM->aCpus[idCpu].hEMT = NIL_RTNATIVETHREAD;
1523 }
1524 }
1525 else
1526 rc = VERR_ACCESS_DENIED;
1527 }
1528 else
1529 rc = VERR_INVALID_CPU_ID;
1530 }
1531 return rc;
1532}
1533
1534
1535/**
1536 * Deregisters the calling thread as the EMT of a Virtual CPU.
1537 *
1538 * Note that VCPU 0 shall call GVMMR0DestroyVM intead of this API.
1539 *
1540 * @returns VBox status code
1541 * @param pGVM The global (ring-0) VM structure.
1542 * @param idCpu VCPU id to register the current thread as.
1543 */
1544GVMMR0DECL(int) GVMMR0DeregisterVCpu(PGVM pGVM, VMCPUID idCpu)
1545{
1546 AssertReturn(idCpu != 0, VERR_INVALID_FUNCTION);
1547
1548 /*
1549 * Validate the VM structure, state and handle.
1550 */
1551 PGVMM pGVMM;
1552 int rc = gvmmR0ByGVMandEMT(pGVM, idCpu, &pGVMM);
1553 if (RT_SUCCESS(rc))
1554 {
1555 /*
1556 * Take the destruction lock and recheck the handle state to
1557 * prevent racing GVMMR0DestroyVM.
1558 */
1559 gvmmR0CreateDestroyLock(pGVMM);
1560 uint32_t hSelf = pGVM->hSelf;
1561 ASMCompilerBarrier();
1562 if ( hSelf < RT_ELEMENTS(pGVMM->aHandles)
1563 && pGVMM->aHandles[hSelf].pvObj != NULL
1564 && pGVMM->aHandles[hSelf].pGVM == pGVM)
1565 {
1566 /*
1567 * Do per-EMT cleanups.
1568 */
1569 VMMR0ThreadCtxHookDestroyForEmt(&pGVM->aCpus[idCpu]);
1570
1571 /*
1572 * Invalidate hEMT. We don't use NIL here as that would allow
1573 * GVMMR0RegisterVCpu to be called again, and we don't want that.
1574 */
1575 AssertCompile(~(RTNATIVETHREAD)1 != NIL_RTNATIVETHREAD);
1576 pGVM->aCpus[idCpu].hEMT = ~(RTNATIVETHREAD)1;
1577 pGVM->aCpus[idCpu].hNativeThreadR0 = NIL_RTNATIVETHREAD;
1578 }
1579
1580 gvmmR0CreateDestroyUnlock(pGVMM);
1581 }
1582 return rc;
1583}
1584
1585
1586/**
1587 * Lookup a GVM structure by its handle.
1588 *
1589 * @returns The GVM pointer on success, NULL on failure.
1590 * @param hGVM The global VM handle. Asserts on bad handle.
1591 */
1592GVMMR0DECL(PGVM) GVMMR0ByHandle(uint32_t hGVM)
1593{
1594 PGVMM pGVMM;
1595 GVMM_GET_VALID_INSTANCE(pGVMM, NULL);
1596
1597 /*
1598 * Validate.
1599 */
1600 AssertReturn(hGVM != NIL_GVM_HANDLE, NULL);
1601 AssertReturn(hGVM < RT_ELEMENTS(pGVMM->aHandles), NULL);
1602
1603 /*
1604 * Look it up.
1605 */
1606 PGVMHANDLE pHandle = &pGVMM->aHandles[hGVM];
1607 AssertPtrReturn(pHandle->pvObj, NULL);
1608 PGVM pGVM = pHandle->pGVM;
1609 AssertPtrReturn(pGVM, NULL);
1610
1611 return pGVM;
1612}
1613
1614
1615/**
1616 * Check that the given GVM and VM structures match up.
1617 *
1618 * The calling thread must be in the same process as the VM. All current lookups
1619 * are by threads inside the same process, so this will not be an issue.
1620 *
1621 * @returns VBox status code.
1622 * @param pGVM The global (ring-0) VM structure.
1623 * @param ppGVMM Where to store the pointer to the GVMM instance data.
1624 * @param fTakeUsedLock Whether to take the used lock or not. We take it in
1625 * shared mode when requested.
1626 *
1627 * Be very careful if not taking the lock as it's
1628 * possible that the VM will disappear then!
1629 *
1630 * @remark This will not assert on an invalid pGVM but try return silently.
1631 */
1632static int gvmmR0ByGVM(PGVM pGVM, PGVMM *ppGVMM, bool fTakeUsedLock)
1633{
1634 /*
1635 * Check the pointers.
1636 */
1637 int rc;
1638 if (RT_LIKELY( RT_VALID_PTR(pGVM)
1639 && ((uintptr_t)pGVM & PAGE_OFFSET_MASK) == 0 ))
1640 {
1641 /*
1642 * Get the pGVMM instance and check the VM handle.
1643 */
1644 PGVMM pGVMM;
1645 GVMM_GET_VALID_INSTANCE(pGVMM, VERR_GVMM_INSTANCE);
1646
1647 uint16_t hGVM = pGVM->hSelf;
1648 if (RT_LIKELY( hGVM != NIL_GVM_HANDLE
1649 && hGVM < RT_ELEMENTS(pGVMM->aHandles)))
1650 {
1651 RTPROCESS const pidSelf = RTProcSelf();
1652 PGVMHANDLE pHandle = &pGVMM->aHandles[hGVM];
1653 if (fTakeUsedLock)
1654 {
1655 rc = GVMMR0_USED_SHARED_LOCK(pGVMM);
1656 AssertRCReturn(rc, rc);
1657 }
1658
1659 if (RT_LIKELY( pHandle->pGVM == pGVM
1660 && pHandle->ProcId == pidSelf
1661 && RT_VALID_PTR(pHandle->pvObj)))
1662 {
1663 /*
1664 * Some more VM data consistency checks.
1665 */
1666 if (RT_LIKELY( pGVM->cCpusUnsafe == pGVM->cCpus
1667 && pGVM->hSelfUnsafe == hGVM
1668 && pGVM->pSelf == pGVM))
1669 {
1670 if (RT_LIKELY( pGVM->enmVMState >= VMSTATE_CREATING
1671 && pGVM->enmVMState <= VMSTATE_TERMINATED))
1672 {
1673 *ppGVMM = pGVMM;
1674 return VINF_SUCCESS;
1675 }
1676 rc = VERR_INCONSISTENT_VM_HANDLE;
1677 }
1678 else
1679 rc = VERR_INCONSISTENT_VM_HANDLE;
1680 }
1681 else
1682 rc = VERR_INVALID_VM_HANDLE;
1683
1684 if (fTakeUsedLock)
1685 GVMMR0_USED_SHARED_UNLOCK(pGVMM);
1686 }
1687 else
1688 rc = VERR_INVALID_VM_HANDLE;
1689 }
1690 else
1691 rc = VERR_INVALID_POINTER;
1692 return rc;
1693}
1694
1695
1696/**
1697 * Validates a GVM/VM pair.
1698 *
1699 * @returns VBox status code.
1700 * @param pGVM The global (ring-0) VM structure.
1701 */
1702GVMMR0DECL(int) GVMMR0ValidateGVM(PGVM pGVM)
1703{
1704 PGVMM pGVMM;
1705 return gvmmR0ByGVM(pGVM, &pGVMM, false /*fTakeUsedLock*/);
1706}
1707
1708
1709/**
1710 * Check that the given GVM and VM structures match up.
1711 *
1712 * The calling thread must be in the same process as the VM. All current lookups
1713 * are by threads inside the same process, so this will not be an issue.
1714 *
1715 * @returns VBox status code.
1716 * @param pGVM The global (ring-0) VM structure.
1717 * @param idCpu The (alleged) Virtual CPU ID of the calling EMT.
1718 * @param ppGVMM Where to store the pointer to the GVMM instance data.
1719 * @thread EMT
1720 *
1721 * @remarks This will assert in all failure paths.
1722 */
1723static int gvmmR0ByGVMandEMT(PGVM pGVM, VMCPUID idCpu, PGVMM *ppGVMM)
1724{
1725 /*
1726 * Check the pointers.
1727 */
1728 AssertPtrReturn(pGVM, VERR_INVALID_POINTER);
1729 AssertReturn(((uintptr_t)pGVM & PAGE_OFFSET_MASK) == 0, VERR_INVALID_POINTER);
1730
1731 /*
1732 * Get the pGVMM instance and check the VM handle.
1733 */
1734 PGVMM pGVMM;
1735 GVMM_GET_VALID_INSTANCE(pGVMM, VERR_GVMM_INSTANCE);
1736
1737 uint16_t hGVM = pGVM->hSelf;
1738 ASMCompilerBarrier();
1739 AssertReturn( hGVM != NIL_GVM_HANDLE
1740 && hGVM < RT_ELEMENTS(pGVMM->aHandles), VERR_INVALID_VM_HANDLE);
1741
1742 RTPROCESS const pidSelf = RTProcSelf();
1743 PGVMHANDLE pHandle = &pGVMM->aHandles[hGVM];
1744 AssertReturn( pHandle->pGVM == pGVM
1745 && pHandle->ProcId == pidSelf
1746 && RT_VALID_PTR(pHandle->pvObj),
1747 VERR_INVALID_HANDLE);
1748
1749 /*
1750 * Check the EMT claim.
1751 */
1752 RTNATIVETHREAD const hAllegedEMT = RTThreadNativeSelf();
1753 AssertReturn(idCpu < pGVM->cCpus, VERR_INVALID_CPU_ID);
1754 AssertReturn(pGVM->aCpus[idCpu].hEMT == hAllegedEMT, VERR_NOT_OWNER);
1755
1756 /*
1757 * Some more VM data consistency checks.
1758 */
1759 AssertReturn(pGVM->cCpusUnsafe == pGVM->cCpus, VERR_INCONSISTENT_VM_HANDLE);
1760 AssertReturn(pGVM->hSelfUnsafe == hGVM, VERR_INCONSISTENT_VM_HANDLE);
1761 AssertReturn( pGVM->enmVMState >= VMSTATE_CREATING
1762 && pGVM->enmVMState <= VMSTATE_TERMINATED, VERR_INCONSISTENT_VM_HANDLE);
1763
1764 *ppGVMM = pGVMM;
1765 return VINF_SUCCESS;
1766}
1767
1768
1769/**
1770 * Validates a GVM/EMT pair.
1771 *
1772 * @returns VBox status code.
1773 * @param pGVM The global (ring-0) VM structure.
1774 * @param idCpu The Virtual CPU ID of the calling EMT.
1775 * @thread EMT(idCpu)
1776 */
1777GVMMR0DECL(int) GVMMR0ValidateGVMandEMT(PGVM pGVM, VMCPUID idCpu)
1778{
1779 PGVMM pGVMM;
1780 return gvmmR0ByGVMandEMT(pGVM, idCpu, &pGVMM);
1781}
1782
1783
1784/**
1785 * Looks up the VM belonging to the specified EMT thread.
1786 *
1787 * This is used by the assertion machinery in VMMR0.cpp to avoid causing
1788 * unnecessary kernel panics when the EMT thread hits an assertion. The
1789 * call may or not be an EMT thread.
1790 *
1791 * @returns Pointer to the VM on success, NULL on failure.
1792 * @param hEMT The native thread handle of the EMT.
1793 * NIL_RTNATIVETHREAD means the current thread
1794 */
1795GVMMR0DECL(PVMCC) GVMMR0GetVMByEMT(RTNATIVETHREAD hEMT)
1796{
1797 /*
1798 * No Assertions here as we're usually called in a AssertMsgN or
1799 * RTAssert* context.
1800 */
1801 PGVMM pGVMM = g_pGVMM;
1802 if ( !RT_VALID_PTR(pGVMM)
1803 || pGVMM->u32Magic != GVMM_MAGIC)
1804 return NULL;
1805
1806 if (hEMT == NIL_RTNATIVETHREAD)
1807 hEMT = RTThreadNativeSelf();
1808 RTPROCESS ProcId = RTProcSelf();
1809
1810 /*
1811 * Search the handles in a linear fashion as we don't dare to take the lock (assert).
1812 */
1813/** @todo introduce some pid hash table here, please. */
1814 for (unsigned i = 1; i < RT_ELEMENTS(pGVMM->aHandles); i++)
1815 {
1816 if ( pGVMM->aHandles[i].iSelf == i
1817 && pGVMM->aHandles[i].ProcId == ProcId
1818 && RT_VALID_PTR(pGVMM->aHandles[i].pvObj)
1819 && RT_VALID_PTR(pGVMM->aHandles[i].pGVM))
1820 {
1821 if (pGVMM->aHandles[i].hEMT0 == hEMT)
1822 return pGVMM->aHandles[i].pGVM;
1823
1824 /* This is fearly safe with the current process per VM approach. */
1825 PGVM pGVM = pGVMM->aHandles[i].pGVM;
1826 VMCPUID const cCpus = pGVM->cCpus;
1827 ASMCompilerBarrier();
1828 if ( cCpus < 1
1829 || cCpus > VMM_MAX_CPU_COUNT)
1830 continue;
1831 for (VMCPUID idCpu = 1; idCpu < cCpus; idCpu++)
1832 if (pGVM->aCpus[idCpu].hEMT == hEMT)
1833 return pGVMM->aHandles[i].pGVM;
1834 }
1835 }
1836 return NULL;
1837}
1838
1839
1840/**
1841 * Looks up the GVMCPU belonging to the specified EMT thread.
1842 *
1843 * This is used by the assertion machinery in VMMR0.cpp to avoid causing
1844 * unnecessary kernel panics when the EMT thread hits an assertion. The
1845 * call may or not be an EMT thread.
1846 *
1847 * @returns Pointer to the VM on success, NULL on failure.
1848 * @param hEMT The native thread handle of the EMT.
1849 * NIL_RTNATIVETHREAD means the current thread
1850 */
1851GVMMR0DECL(PGVMCPU) GVMMR0GetGVCpuByEMT(RTNATIVETHREAD hEMT)
1852{
1853 /*
1854 * No Assertions here as we're usually called in a AssertMsgN,
1855 * RTAssert*, Log and LogRel contexts.
1856 */
1857 PGVMM pGVMM = g_pGVMM;
1858 if ( !RT_VALID_PTR(pGVMM)
1859 || pGVMM->u32Magic != GVMM_MAGIC)
1860 return NULL;
1861
1862 if (hEMT == NIL_RTNATIVETHREAD)
1863 hEMT = RTThreadNativeSelf();
1864 RTPROCESS ProcId = RTProcSelf();
1865
1866 /*
1867 * Search the handles in a linear fashion as we don't dare to take the lock (assert).
1868 */
1869/** @todo introduce some pid hash table here, please. */
1870 for (unsigned i = 1; i < RT_ELEMENTS(pGVMM->aHandles); i++)
1871 {
1872 if ( pGVMM->aHandles[i].iSelf == i
1873 && pGVMM->aHandles[i].ProcId == ProcId
1874 && RT_VALID_PTR(pGVMM->aHandles[i].pvObj)
1875 && RT_VALID_PTR(pGVMM->aHandles[i].pGVM))
1876 {
1877 PGVM pGVM = pGVMM->aHandles[i].pGVM;
1878 if (pGVMM->aHandles[i].hEMT0 == hEMT)
1879 return &pGVM->aCpus[0];
1880
1881 /* This is fearly safe with the current process per VM approach. */
1882 VMCPUID const cCpus = pGVM->cCpus;
1883 ASMCompilerBarrier();
1884 ASMCompilerBarrier();
1885 if ( cCpus < 1
1886 || cCpus > VMM_MAX_CPU_COUNT)
1887 continue;
1888 for (VMCPUID idCpu = 1; idCpu < cCpus; idCpu++)
1889 if (pGVM->aCpus[idCpu].hEMT == hEMT)
1890 return &pGVM->aCpus[idCpu];
1891 }
1892 }
1893 return NULL;
1894}
1895
1896
1897/**
1898 * This is will wake up expired and soon-to-be expired VMs.
1899 *
1900 * @returns Number of VMs that has been woken up.
1901 * @param pGVMM Pointer to the GVMM instance data.
1902 * @param u64Now The current time.
1903 */
1904static unsigned gvmmR0SchedDoWakeUps(PGVMM pGVMM, uint64_t u64Now)
1905{
1906 /*
1907 * Skip this if we've got disabled because of high resolution wakeups or by
1908 * the user.
1909 */
1910 if (!pGVMM->fDoEarlyWakeUps)
1911 return 0;
1912
1913/** @todo Rewrite this algorithm. See performance defect XYZ. */
1914
1915 /*
1916 * A cheap optimization to stop wasting so much time here on big setups.
1917 */
1918 const uint64_t uNsEarlyWakeUp2 = u64Now + pGVMM->nsEarlyWakeUp2;
1919 if ( pGVMM->cHaltedEMTs == 0
1920 || uNsEarlyWakeUp2 > pGVMM->uNsNextEmtWakeup)
1921 return 0;
1922
1923 /*
1924 * Only one thread doing this at a time.
1925 */
1926 if (!ASMAtomicCmpXchgBool(&pGVMM->fDoingEarlyWakeUps, true, false))
1927 return 0;
1928
1929 /*
1930 * The first pass will wake up VMs which have actually expired
1931 * and look for VMs that should be woken up in the 2nd and 3rd passes.
1932 */
1933 const uint64_t uNsEarlyWakeUp1 = u64Now + pGVMM->nsEarlyWakeUp1;
1934 uint64_t u64Min = UINT64_MAX;
1935 unsigned cWoken = 0;
1936 unsigned cHalted = 0;
1937 unsigned cTodo2nd = 0;
1938 unsigned cTodo3rd = 0;
1939 for (unsigned i = pGVMM->iUsedHead, cGuard = 0;
1940 i != NIL_GVM_HANDLE && i < RT_ELEMENTS(pGVMM->aHandles);
1941 i = pGVMM->aHandles[i].iNext)
1942 {
1943 PGVM pCurGVM = pGVMM->aHandles[i].pGVM;
1944 if ( RT_VALID_PTR(pCurGVM)
1945 && pCurGVM->u32Magic == GVM_MAGIC)
1946 {
1947 for (VMCPUID idCpu = 0; idCpu < pCurGVM->cCpus; idCpu++)
1948 {
1949 PGVMCPU pCurGVCpu = &pCurGVM->aCpus[idCpu];
1950 uint64_t u64 = ASMAtomicUoReadU64(&pCurGVCpu->gvmm.s.u64HaltExpire);
1951 if (u64)
1952 {
1953 if (u64 <= u64Now)
1954 {
1955 if (ASMAtomicXchgU64(&pCurGVCpu->gvmm.s.u64HaltExpire, 0))
1956 {
1957 int rc = RTSemEventMultiSignal(pCurGVCpu->gvmm.s.HaltEventMulti);
1958 AssertRC(rc);
1959 cWoken++;
1960 }
1961 }
1962 else
1963 {
1964 cHalted++;
1965 if (u64 <= uNsEarlyWakeUp1)
1966 cTodo2nd++;
1967 else if (u64 <= uNsEarlyWakeUp2)
1968 cTodo3rd++;
1969 else if (u64 < u64Min)
1970 u64 = u64Min;
1971 }
1972 }
1973 }
1974 }
1975 AssertLogRelBreak(cGuard++ < RT_ELEMENTS(pGVMM->aHandles));
1976 }
1977
1978 if (cTodo2nd)
1979 {
1980 for (unsigned i = pGVMM->iUsedHead, cGuard = 0;
1981 i != NIL_GVM_HANDLE && i < RT_ELEMENTS(pGVMM->aHandles);
1982 i = pGVMM->aHandles[i].iNext)
1983 {
1984 PGVM pCurGVM = pGVMM->aHandles[i].pGVM;
1985 if ( RT_VALID_PTR(pCurGVM)
1986 && pCurGVM->u32Magic == GVM_MAGIC)
1987 {
1988 for (VMCPUID idCpu = 0; idCpu < pCurGVM->cCpus; idCpu++)
1989 {
1990 PGVMCPU pCurGVCpu = &pCurGVM->aCpus[idCpu];
1991 uint64_t u64 = ASMAtomicUoReadU64(&pCurGVCpu->gvmm.s.u64HaltExpire);
1992 if ( u64
1993 && u64 <= uNsEarlyWakeUp1)
1994 {
1995 if (ASMAtomicXchgU64(&pCurGVCpu->gvmm.s.u64HaltExpire, 0))
1996 {
1997 int rc = RTSemEventMultiSignal(pCurGVCpu->gvmm.s.HaltEventMulti);
1998 AssertRC(rc);
1999 cWoken++;
2000 }
2001 }
2002 }
2003 }
2004 AssertLogRelBreak(cGuard++ < RT_ELEMENTS(pGVMM->aHandles));
2005 }
2006 }
2007
2008 if (cTodo3rd)
2009 {
2010 for (unsigned i = pGVMM->iUsedHead, cGuard = 0;
2011 i != NIL_GVM_HANDLE && i < RT_ELEMENTS(pGVMM->aHandles);
2012 i = pGVMM->aHandles[i].iNext)
2013 {
2014 PGVM pCurGVM = pGVMM->aHandles[i].pGVM;
2015 if ( RT_VALID_PTR(pCurGVM)
2016 && pCurGVM->u32Magic == GVM_MAGIC)
2017 {
2018 for (VMCPUID idCpu = 0; idCpu < pCurGVM->cCpus; idCpu++)
2019 {
2020 PGVMCPU pCurGVCpu = &pCurGVM->aCpus[idCpu];
2021 uint64_t u64 = ASMAtomicUoReadU64(&pCurGVCpu->gvmm.s.u64HaltExpire);
2022 if ( u64
2023 && u64 <= uNsEarlyWakeUp2)
2024 {
2025 if (ASMAtomicXchgU64(&pCurGVCpu->gvmm.s.u64HaltExpire, 0))
2026 {
2027 int rc = RTSemEventMultiSignal(pCurGVCpu->gvmm.s.HaltEventMulti);
2028 AssertRC(rc);
2029 cWoken++;
2030 }
2031 }
2032 }
2033 }
2034 AssertLogRelBreak(cGuard++ < RT_ELEMENTS(pGVMM->aHandles));
2035 }
2036 }
2037
2038 /*
2039 * Set the minimum value.
2040 */
2041 pGVMM->uNsNextEmtWakeup = u64Min;
2042
2043 ASMAtomicWriteBool(&pGVMM->fDoingEarlyWakeUps, false);
2044 return cWoken;
2045}
2046
2047
2048/**
2049 * Halt the EMT thread.
2050 *
2051 * @returns VINF_SUCCESS normal wakeup (timeout or kicked by other thread).
2052 * VERR_INTERRUPTED if a signal was scheduled for the thread.
2053 * @param pGVM The global (ring-0) VM structure.
2054 * @param pGVCpu The global (ring-0) CPU structure of the calling
2055 * EMT.
2056 * @param u64ExpireGipTime The time for the sleep to expire expressed as GIP time.
2057 * @thread EMT(pGVCpu).
2058 */
2059GVMMR0DECL(int) GVMMR0SchedHalt(PGVM pGVM, PGVMCPU pGVCpu, uint64_t u64ExpireGipTime)
2060{
2061 LogFlow(("GVMMR0SchedHalt: pGVM=%p pGVCpu=%p(%d) u64ExpireGipTime=%#RX64\n",
2062 pGVM, pGVCpu, pGVCpu->idCpu, u64ExpireGipTime));
2063 GVMM_CHECK_SMAP_SETUP();
2064 GVMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2065
2066 PGVMM pGVMM;
2067 GVMM_GET_VALID_INSTANCE(pGVMM, VERR_GVMM_INSTANCE);
2068
2069 pGVM->gvmm.s.StatsSched.cHaltCalls++;
2070 Assert(!pGVCpu->gvmm.s.u64HaltExpire);
2071
2072 /*
2073 * If we're doing early wake-ups, we must take the UsedList lock before we
2074 * start querying the current time.
2075 * Note! Interrupts must NOT be disabled at this point because we ask for GIP time!
2076 */
2077 bool const fDoEarlyWakeUps = pGVMM->fDoEarlyWakeUps;
2078 if (fDoEarlyWakeUps)
2079 {
2080 int rc2 = GVMMR0_USED_SHARED_LOCK(pGVMM); AssertRC(rc2);
2081 GVMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2082 }
2083
2084 pGVCpu->gvmm.s.iCpuEmt = ASMGetApicId();
2085
2086 /* GIP hack: We might are frequently sleeping for short intervals where the
2087 difference between GIP and system time matters on systems with high resolution
2088 system time. So, convert the input from GIP to System time in that case. */
2089 Assert(ASMGetFlags() & X86_EFL_IF);
2090 const uint64_t u64NowSys = RTTimeSystemNanoTS();
2091 const uint64_t u64NowGip = RTTimeNanoTS();
2092 GVMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2093
2094 if (fDoEarlyWakeUps)
2095 {
2096 pGVM->gvmm.s.StatsSched.cHaltWakeUps += gvmmR0SchedDoWakeUps(pGVMM, u64NowGip);
2097 GVMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2098 }
2099
2100 /*
2101 * Go to sleep if we must...
2102 * Cap the sleep time to 1 second to be on the safe side.
2103 */
2104 int rc;
2105 uint64_t cNsInterval = u64ExpireGipTime - u64NowGip;
2106 if ( u64NowGip < u64ExpireGipTime
2107 && cNsInterval >= (pGVMM->cEMTs > pGVMM->cEMTsMeansCompany
2108 ? pGVMM->nsMinSleepCompany
2109 : pGVMM->nsMinSleepAlone))
2110 {
2111 pGVM->gvmm.s.StatsSched.cHaltBlocking++;
2112 if (cNsInterval > RT_NS_1SEC)
2113 u64ExpireGipTime = u64NowGip + RT_NS_1SEC;
2114 ASMAtomicWriteU64(&pGVCpu->gvmm.s.u64HaltExpire, u64ExpireGipTime);
2115 ASMAtomicIncU32(&pGVMM->cHaltedEMTs);
2116 if (fDoEarlyWakeUps)
2117 {
2118 if (u64ExpireGipTime < pGVMM->uNsNextEmtWakeup)
2119 pGVMM->uNsNextEmtWakeup = u64ExpireGipTime;
2120 GVMMR0_USED_SHARED_UNLOCK(pGVMM);
2121 }
2122 GVMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2123
2124 rc = RTSemEventMultiWaitEx(pGVCpu->gvmm.s.HaltEventMulti,
2125 RTSEMWAIT_FLAGS_ABSOLUTE | RTSEMWAIT_FLAGS_NANOSECS | RTSEMWAIT_FLAGS_INTERRUPTIBLE,
2126 u64NowGip > u64NowSys ? u64ExpireGipTime : u64NowSys + cNsInterval);
2127 GVMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2128
2129 ASMAtomicWriteU64(&pGVCpu->gvmm.s.u64HaltExpire, 0);
2130 ASMAtomicDecU32(&pGVMM->cHaltedEMTs);
2131
2132 /* Reset the semaphore to try prevent a few false wake-ups. */
2133 if (rc == VINF_SUCCESS)
2134 {
2135 RTSemEventMultiReset(pGVCpu->gvmm.s.HaltEventMulti);
2136 GVMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2137 }
2138 else if (rc == VERR_TIMEOUT)
2139 {
2140 pGVM->gvmm.s.StatsSched.cHaltTimeouts++;
2141 rc = VINF_SUCCESS;
2142 }
2143 }
2144 else
2145 {
2146 pGVM->gvmm.s.StatsSched.cHaltNotBlocking++;
2147 if (fDoEarlyWakeUps)
2148 GVMMR0_USED_SHARED_UNLOCK(pGVMM);
2149 GVMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2150 RTSemEventMultiReset(pGVCpu->gvmm.s.HaltEventMulti);
2151 GVMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2152 rc = VINF_SUCCESS;
2153 }
2154
2155 return rc;
2156}
2157
2158
2159/**
2160 * Halt the EMT thread.
2161 *
2162 * @returns VINF_SUCCESS normal wakeup (timeout or kicked by other thread).
2163 * VERR_INTERRUPTED if a signal was scheduled for the thread.
2164 * @param pGVM The global (ring-0) VM structure.
2165 * @param idCpu The Virtual CPU ID of the calling EMT.
2166 * @param u64ExpireGipTime The time for the sleep to expire expressed as GIP time.
2167 * @thread EMT(idCpu).
2168 */
2169GVMMR0DECL(int) GVMMR0SchedHaltReq(PGVM pGVM, VMCPUID idCpu, uint64_t u64ExpireGipTime)
2170{
2171 GVMM_CHECK_SMAP_SETUP();
2172 GVMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2173 PGVMM pGVMM;
2174 int rc = gvmmR0ByGVMandEMT(pGVM, idCpu, &pGVMM);
2175 if (RT_SUCCESS(rc))
2176 {
2177 GVMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2178 rc = GVMMR0SchedHalt(pGVM, &pGVM->aCpus[idCpu], u64ExpireGipTime);
2179 }
2180 GVMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2181 return rc;
2182}
2183
2184
2185
2186/**
2187 * Worker for GVMMR0SchedWakeUp and GVMMR0SchedWakeUpAndPokeCpus that wakes up
2188 * the a sleeping EMT.
2189 *
2190 * @retval VINF_SUCCESS if successfully woken up.
2191 * @retval VINF_GVM_NOT_BLOCKED if the EMT wasn't blocked.
2192 *
2193 * @param pGVM The global (ring-0) VM structure.
2194 * @param pGVCpu The global (ring-0) VCPU structure.
2195 */
2196DECLINLINE(int) gvmmR0SchedWakeUpOne(PGVM pGVM, PGVMCPU pGVCpu)
2197{
2198 pGVM->gvmm.s.StatsSched.cWakeUpCalls++;
2199
2200 /*
2201 * Signal the semaphore regardless of whether it's current blocked on it.
2202 *
2203 * The reason for this is that there is absolutely no way we can be 100%
2204 * certain that it isn't *about* go to go to sleep on it and just got
2205 * delayed a bit en route. So, we will always signal the semaphore when
2206 * the it is flagged as halted in the VMM.
2207 */
2208/** @todo we can optimize some of that by means of the pVCpu->enmState now. */
2209 int rc;
2210 if (pGVCpu->gvmm.s.u64HaltExpire)
2211 {
2212 rc = VINF_SUCCESS;
2213 ASMAtomicWriteU64(&pGVCpu->gvmm.s.u64HaltExpire, 0);
2214 }
2215 else
2216 {
2217 rc = VINF_GVM_NOT_BLOCKED;
2218 pGVM->gvmm.s.StatsSched.cWakeUpNotHalted++;
2219 }
2220
2221 int rc2 = RTSemEventMultiSignal(pGVCpu->gvmm.s.HaltEventMulti);
2222 AssertRC(rc2);
2223
2224 return rc;
2225}
2226
2227
2228/**
2229 * Wakes up the halted EMT thread so it can service a pending request.
2230 *
2231 * @returns VBox status code.
2232 * @retval VINF_SUCCESS if successfully woken up.
2233 * @retval VINF_GVM_NOT_BLOCKED if the EMT wasn't blocked.
2234 *
2235 * @param pGVM The global (ring-0) VM structure.
2236 * @param idCpu The Virtual CPU ID of the EMT to wake up.
2237 * @param fTakeUsedLock Take the used lock or not
2238 * @thread Any but EMT(idCpu).
2239 */
2240GVMMR0DECL(int) GVMMR0SchedWakeUpEx(PGVM pGVM, VMCPUID idCpu, bool fTakeUsedLock)
2241{
2242 GVMM_CHECK_SMAP_SETUP();
2243 GVMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2244
2245 /*
2246 * Validate input and take the UsedLock.
2247 */
2248 PGVMM pGVMM;
2249 int rc = gvmmR0ByGVM(pGVM, &pGVMM, fTakeUsedLock);
2250 GVMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2251 if (RT_SUCCESS(rc))
2252 {
2253 if (idCpu < pGVM->cCpus)
2254 {
2255 /*
2256 * Do the actual job.
2257 */
2258 rc = gvmmR0SchedWakeUpOne(pGVM, &pGVM->aCpus[idCpu]);
2259 GVMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2260
2261 if (fTakeUsedLock && pGVMM->fDoEarlyWakeUps)
2262 {
2263 /*
2264 * While we're here, do a round of scheduling.
2265 */
2266 Assert(ASMGetFlags() & X86_EFL_IF);
2267 const uint64_t u64Now = RTTimeNanoTS(); /* (GIP time) */
2268 pGVM->gvmm.s.StatsSched.cWakeUpWakeUps += gvmmR0SchedDoWakeUps(pGVMM, u64Now);
2269 GVMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2270 }
2271 }
2272 else
2273 rc = VERR_INVALID_CPU_ID;
2274
2275 if (fTakeUsedLock)
2276 {
2277 int rc2 = GVMMR0_USED_SHARED_UNLOCK(pGVMM);
2278 AssertRC(rc2);
2279 GVMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2280 }
2281 }
2282
2283 LogFlow(("GVMMR0SchedWakeUpEx: returns %Rrc\n", rc));
2284 return rc;
2285}
2286
2287
2288/**
2289 * Wakes up the halted EMT thread so it can service a pending request.
2290 *
2291 * @returns VBox status code.
2292 * @retval VINF_SUCCESS if successfully woken up.
2293 * @retval VINF_GVM_NOT_BLOCKED if the EMT wasn't blocked.
2294 *
2295 * @param pGVM The global (ring-0) VM structure.
2296 * @param idCpu The Virtual CPU ID of the EMT to wake up.
2297 * @thread Any but EMT(idCpu).
2298 */
2299GVMMR0DECL(int) GVMMR0SchedWakeUp(PGVM pGVM, VMCPUID idCpu)
2300{
2301 return GVMMR0SchedWakeUpEx(pGVM, idCpu, true /* fTakeUsedLock */);
2302}
2303
2304
2305/**
2306 * Wakes up the halted EMT thread so it can service a pending request, no GVM
2307 * parameter and no used locking.
2308 *
2309 * @returns VBox status code.
2310 * @retval VINF_SUCCESS if successfully woken up.
2311 * @retval VINF_GVM_NOT_BLOCKED if the EMT wasn't blocked.
2312 *
2313 * @param pGVM The global (ring-0) VM structure.
2314 * @param idCpu The Virtual CPU ID of the EMT to wake up.
2315 * @thread Any but EMT(idCpu).
2316 * @deprecated Don't use in new code if possible! Use the GVM variant.
2317 */
2318GVMMR0DECL(int) GVMMR0SchedWakeUpNoGVMNoLock(PGVM pGVM, VMCPUID idCpu)
2319{
2320 GVMM_CHECK_SMAP_SETUP();
2321 GVMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2322 PGVMM pGVMM;
2323 int rc = gvmmR0ByGVM(pGVM, &pGVMM, false /*fTakeUsedLock*/);
2324 GVMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2325 if (RT_SUCCESS(rc))
2326 rc = GVMMR0SchedWakeUpEx(pGVM, idCpu, false /*fTakeUsedLock*/);
2327 return rc;
2328}
2329
2330
2331/**
2332 * Worker common to GVMMR0SchedPoke and GVMMR0SchedWakeUpAndPokeCpus that pokes
2333 * the Virtual CPU if it's still busy executing guest code.
2334 *
2335 * @returns VBox status code.
2336 * @retval VINF_SUCCESS if poked successfully.
2337 * @retval VINF_GVM_NOT_BUSY_IN_GC if the EMT wasn't busy in GC.
2338 *
2339 * @param pGVM The global (ring-0) VM structure.
2340 * @param pVCpu The cross context virtual CPU structure.
2341 */
2342DECLINLINE(int) gvmmR0SchedPokeOne(PGVM pGVM, PVMCPUCC pVCpu)
2343{
2344 pGVM->gvmm.s.StatsSched.cPokeCalls++;
2345
2346 RTCPUID idHostCpu = pVCpu->idHostCpu;
2347 if ( idHostCpu == NIL_RTCPUID
2348 || VMCPU_GET_STATE(pVCpu) != VMCPUSTATE_STARTED_EXEC)
2349 {
2350 pGVM->gvmm.s.StatsSched.cPokeNotBusy++;
2351 return VINF_GVM_NOT_BUSY_IN_GC;
2352 }
2353
2354 /* Note: this function is not implemented on Darwin and Linux (kernel < 2.6.19) */
2355 RTMpPokeCpu(idHostCpu);
2356 return VINF_SUCCESS;
2357}
2358
2359
2360/**
2361 * Pokes an EMT if it's still busy running guest code.
2362 *
2363 * @returns VBox status code.
2364 * @retval VINF_SUCCESS if poked successfully.
2365 * @retval VINF_GVM_NOT_BUSY_IN_GC if the EMT wasn't busy in GC.
2366 *
2367 * @param pGVM The global (ring-0) VM structure.
2368 * @param idCpu The ID of the virtual CPU to poke.
2369 * @param fTakeUsedLock Take the used lock or not
2370 */
2371GVMMR0DECL(int) GVMMR0SchedPokeEx(PGVM pGVM, VMCPUID idCpu, bool fTakeUsedLock)
2372{
2373 /*
2374 * Validate input and take the UsedLock.
2375 */
2376 PGVMM pGVMM;
2377 int rc = gvmmR0ByGVM(pGVM, &pGVMM, fTakeUsedLock);
2378 if (RT_SUCCESS(rc))
2379 {
2380 if (idCpu < pGVM->cCpus)
2381 rc = gvmmR0SchedPokeOne(pGVM, &pGVM->aCpus[idCpu]);
2382 else
2383 rc = VERR_INVALID_CPU_ID;
2384
2385 if (fTakeUsedLock)
2386 {
2387 int rc2 = GVMMR0_USED_SHARED_UNLOCK(pGVMM);
2388 AssertRC(rc2);
2389 }
2390 }
2391
2392 LogFlow(("GVMMR0SchedWakeUpAndPokeCpus: returns %Rrc\n", rc));
2393 return rc;
2394}
2395
2396
2397/**
2398 * Pokes an EMT if it's still busy running guest code.
2399 *
2400 * @returns VBox status code.
2401 * @retval VINF_SUCCESS if poked successfully.
2402 * @retval VINF_GVM_NOT_BUSY_IN_GC if the EMT wasn't busy in GC.
2403 *
2404 * @param pGVM The global (ring-0) VM structure.
2405 * @param idCpu The ID of the virtual CPU to poke.
2406 */
2407GVMMR0DECL(int) GVMMR0SchedPoke(PGVM pGVM, VMCPUID idCpu)
2408{
2409 return GVMMR0SchedPokeEx(pGVM, idCpu, true /* fTakeUsedLock */);
2410}
2411
2412
2413/**
2414 * Pokes an EMT if it's still busy running guest code, no GVM parameter and no
2415 * used locking.
2416 *
2417 * @returns VBox status code.
2418 * @retval VINF_SUCCESS if poked successfully.
2419 * @retval VINF_GVM_NOT_BUSY_IN_GC if the EMT wasn't busy in GC.
2420 *
2421 * @param pGVM The global (ring-0) VM structure.
2422 * @param idCpu The ID of the virtual CPU to poke.
2423 *
2424 * @deprecated Don't use in new code if possible! Use the GVM variant.
2425 */
2426GVMMR0DECL(int) GVMMR0SchedPokeNoGVMNoLock(PGVM pGVM, VMCPUID idCpu)
2427{
2428 PGVMM pGVMM;
2429 int rc = gvmmR0ByGVM(pGVM, &pGVMM, false /*fTakeUsedLock*/);
2430 if (RT_SUCCESS(rc))
2431 {
2432 if (idCpu < pGVM->cCpus)
2433 rc = gvmmR0SchedPokeOne(pGVM, &pGVM->aCpus[idCpu]);
2434 else
2435 rc = VERR_INVALID_CPU_ID;
2436 }
2437 return rc;
2438}
2439
2440
2441/**
2442 * Wakes up a set of halted EMT threads so they can service pending request.
2443 *
2444 * @returns VBox status code, no informational stuff.
2445 *
2446 * @param pGVM The global (ring-0) VM structure.
2447 * @param pSleepSet The set of sleepers to wake up.
2448 * @param pPokeSet The set of CPUs to poke.
2449 */
2450GVMMR0DECL(int) GVMMR0SchedWakeUpAndPokeCpus(PGVM pGVM, PCVMCPUSET pSleepSet, PCVMCPUSET pPokeSet)
2451{
2452 AssertPtrReturn(pSleepSet, VERR_INVALID_POINTER);
2453 AssertPtrReturn(pPokeSet, VERR_INVALID_POINTER);
2454 GVMM_CHECK_SMAP_SETUP();
2455 GVMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2456 RTNATIVETHREAD hSelf = RTThreadNativeSelf();
2457
2458 /*
2459 * Validate input and take the UsedLock.
2460 */
2461 PGVMM pGVMM;
2462 int rc = gvmmR0ByGVM(pGVM, &pGVMM, true /* fTakeUsedLock */);
2463 GVMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2464 if (RT_SUCCESS(rc))
2465 {
2466 rc = VINF_SUCCESS;
2467 VMCPUID idCpu = pGVM->cCpus;
2468 while (idCpu-- > 0)
2469 {
2470 /* Don't try poke or wake up ourselves. */
2471 if (pGVM->aCpus[idCpu].hEMT == hSelf)
2472 continue;
2473
2474 /* just ignore errors for now. */
2475 if (VMCPUSET_IS_PRESENT(pSleepSet, idCpu))
2476 {
2477 gvmmR0SchedWakeUpOne(pGVM, &pGVM->aCpus[idCpu]);
2478 GVMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2479 }
2480 else if (VMCPUSET_IS_PRESENT(pPokeSet, idCpu))
2481 {
2482 gvmmR0SchedPokeOne(pGVM, &pGVM->aCpus[idCpu]);
2483 GVMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2484 }
2485 }
2486
2487 int rc2 = GVMMR0_USED_SHARED_UNLOCK(pGVMM);
2488 AssertRC(rc2);
2489 GVMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2490 }
2491
2492 LogFlow(("GVMMR0SchedWakeUpAndPokeCpus: returns %Rrc\n", rc));
2493 return rc;
2494}
2495
2496
2497/**
2498 * VMMR0 request wrapper for GVMMR0SchedWakeUpAndPokeCpus.
2499 *
2500 * @returns see GVMMR0SchedWakeUpAndPokeCpus.
2501 * @param pGVM The global (ring-0) VM structure.
2502 * @param pReq Pointer to the request packet.
2503 */
2504GVMMR0DECL(int) GVMMR0SchedWakeUpAndPokeCpusReq(PGVM pGVM, PGVMMSCHEDWAKEUPANDPOKECPUSREQ pReq)
2505{
2506 /*
2507 * Validate input and pass it on.
2508 */
2509 AssertPtrReturn(pReq, VERR_INVALID_POINTER);
2510 AssertMsgReturn(pReq->Hdr.cbReq == sizeof(*pReq), ("%#x != %#x\n", pReq->Hdr.cbReq, sizeof(*pReq)), VERR_INVALID_PARAMETER);
2511
2512 return GVMMR0SchedWakeUpAndPokeCpus(pGVM, &pReq->SleepSet, &pReq->PokeSet);
2513}
2514
2515
2516
2517/**
2518 * Poll the schedule to see if someone else should get a chance to run.
2519 *
2520 * This is a bit hackish and will not work too well if the machine is
2521 * under heavy load from non-VM processes.
2522 *
2523 * @returns VINF_SUCCESS if not yielded.
2524 * VINF_GVM_YIELDED if an attempt to switch to a different VM task was made.
2525 * @param pGVM The global (ring-0) VM structure.
2526 * @param idCpu The Virtual CPU ID of the calling EMT.
2527 * @param fYield Whether to yield or not.
2528 * This is for when we're spinning in the halt loop.
2529 * @thread EMT(idCpu).
2530 */
2531GVMMR0DECL(int) GVMMR0SchedPoll(PGVM pGVM, VMCPUID idCpu, bool fYield)
2532{
2533 /*
2534 * Validate input.
2535 */
2536 PGVMM pGVMM;
2537 int rc = gvmmR0ByGVMandEMT(pGVM, idCpu, &pGVMM);
2538 if (RT_SUCCESS(rc))
2539 {
2540 /*
2541 * We currently only implement helping doing wakeups (fYield = false), so don't
2542 * bother taking the lock if gvmmR0SchedDoWakeUps is not going to do anything.
2543 */
2544 if (!fYield && pGVMM->fDoEarlyWakeUps)
2545 {
2546 rc = GVMMR0_USED_SHARED_LOCK(pGVMM); AssertRC(rc);
2547 pGVM->gvmm.s.StatsSched.cPollCalls++;
2548
2549 Assert(ASMGetFlags() & X86_EFL_IF);
2550 const uint64_t u64Now = RTTimeNanoTS(); /* (GIP time) */
2551
2552 pGVM->gvmm.s.StatsSched.cPollWakeUps += gvmmR0SchedDoWakeUps(pGVMM, u64Now);
2553
2554 GVMMR0_USED_SHARED_UNLOCK(pGVMM);
2555 }
2556 /*
2557 * Not quite sure what we could do here...
2558 */
2559 else if (fYield)
2560 rc = VERR_NOT_IMPLEMENTED; /** @todo implement this... */
2561 else
2562 rc = VINF_SUCCESS;
2563 }
2564
2565 LogFlow(("GVMMR0SchedWakeUp: returns %Rrc\n", rc));
2566 return rc;
2567}
2568
2569
2570#ifdef GVMM_SCHED_WITH_PPT
2571/**
2572 * Timer callback for the periodic preemption timer.
2573 *
2574 * @param pTimer The timer handle.
2575 * @param pvUser Pointer to the per cpu structure.
2576 * @param iTick The current tick.
2577 */
2578static DECLCALLBACK(void) gvmmR0SchedPeriodicPreemptionTimerCallback(PRTTIMER pTimer, void *pvUser, uint64_t iTick)
2579{
2580 PGVMMHOSTCPU pCpu = (PGVMMHOSTCPU)pvUser;
2581 NOREF(pTimer); NOREF(iTick);
2582
2583 /*
2584 * Termination check
2585 */
2586 if (pCpu->u32Magic != GVMMHOSTCPU_MAGIC)
2587 return;
2588
2589 /*
2590 * Do the house keeping.
2591 */
2592 RTSpinlockAcquire(pCpu->Ppt.hSpinlock);
2593
2594 if (++pCpu->Ppt.iTickHistorization >= pCpu->Ppt.cTicksHistoriziationInterval)
2595 {
2596 /*
2597 * Historicize the max frequency.
2598 */
2599 uint32_t iHzHistory = ++pCpu->Ppt.iHzHistory % RT_ELEMENTS(pCpu->Ppt.aHzHistory);
2600 pCpu->Ppt.aHzHistory[iHzHistory] = pCpu->Ppt.uDesiredHz;
2601 pCpu->Ppt.iTickHistorization = 0;
2602 pCpu->Ppt.uDesiredHz = 0;
2603
2604 /*
2605 * Check if the current timer frequency.
2606 */
2607 uint32_t uHistMaxHz = 0;
2608 for (uint32_t i = 0; i < RT_ELEMENTS(pCpu->Ppt.aHzHistory); i++)
2609 if (pCpu->Ppt.aHzHistory[i] > uHistMaxHz)
2610 uHistMaxHz = pCpu->Ppt.aHzHistory[i];
2611 if (uHistMaxHz == pCpu->Ppt.uTimerHz)
2612 RTSpinlockRelease(pCpu->Ppt.hSpinlock);
2613 else if (uHistMaxHz)
2614 {
2615 /*
2616 * Reprogram it.
2617 */
2618 pCpu->Ppt.cChanges++;
2619 pCpu->Ppt.iTickHistorization = 0;
2620 pCpu->Ppt.uTimerHz = uHistMaxHz;
2621 uint32_t const cNsInterval = RT_NS_1SEC / uHistMaxHz;
2622 pCpu->Ppt.cNsInterval = cNsInterval;
2623 if (cNsInterval < GVMMHOSTCPU_PPT_HIST_INTERVAL_NS)
2624 pCpu->Ppt.cTicksHistoriziationInterval = ( GVMMHOSTCPU_PPT_HIST_INTERVAL_NS
2625 + GVMMHOSTCPU_PPT_HIST_INTERVAL_NS / 2 - 1)
2626 / cNsInterval;
2627 else
2628 pCpu->Ppt.cTicksHistoriziationInterval = 1;
2629 RTSpinlockRelease(pCpu->Ppt.hSpinlock);
2630
2631 /*SUPR0Printf("Cpu%u: change to %u Hz / %u ns\n", pCpu->idxCpuSet, uHistMaxHz, cNsInterval);*/
2632 RTTimerChangeInterval(pTimer, cNsInterval);
2633 }
2634 else
2635 {
2636 /*
2637 * Stop it.
2638 */
2639 pCpu->Ppt.fStarted = false;
2640 pCpu->Ppt.uTimerHz = 0;
2641 pCpu->Ppt.cNsInterval = 0;
2642 RTSpinlockRelease(pCpu->Ppt.hSpinlock);
2643
2644 /*SUPR0Printf("Cpu%u: stopping (%u Hz)\n", pCpu->idxCpuSet, uHistMaxHz);*/
2645 RTTimerStop(pTimer);
2646 }
2647 }
2648 else
2649 RTSpinlockRelease(pCpu->Ppt.hSpinlock);
2650}
2651#endif /* GVMM_SCHED_WITH_PPT */
2652
2653
2654/**
2655 * Updates the periodic preemption timer for the calling CPU.
2656 *
2657 * The caller must have disabled preemption!
2658 * The caller must check that the host can do high resolution timers.
2659 *
2660 * @param pGVM The global (ring-0) VM structure.
2661 * @param idHostCpu The current host CPU id.
2662 * @param uHz The desired frequency.
2663 */
2664GVMMR0DECL(void) GVMMR0SchedUpdatePeriodicPreemptionTimer(PGVM pGVM, RTCPUID idHostCpu, uint32_t uHz)
2665{
2666 NOREF(pGVM);
2667#ifdef GVMM_SCHED_WITH_PPT
2668 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
2669 Assert(RTTimerCanDoHighResolution());
2670
2671 /*
2672 * Resolve the per CPU data.
2673 */
2674 uint32_t iCpu = RTMpCpuIdToSetIndex(idHostCpu);
2675 PGVMM pGVMM = g_pGVMM;
2676 if ( !RT_VALID_PTR(pGVMM)
2677 || pGVMM->u32Magic != GVMM_MAGIC)
2678 return;
2679 AssertMsgReturnVoid(iCpu < pGVMM->cHostCpus, ("iCpu=%d cHostCpus=%d\n", iCpu, pGVMM->cHostCpus));
2680 PGVMMHOSTCPU pCpu = &pGVMM->aHostCpus[iCpu];
2681 AssertMsgReturnVoid( pCpu->u32Magic == GVMMHOSTCPU_MAGIC
2682 && pCpu->idCpu == idHostCpu,
2683 ("u32Magic=%#x idCpu=% idHostCpu=%d\n", pCpu->u32Magic, pCpu->idCpu, idHostCpu));
2684
2685 /*
2686 * Check whether we need to do anything about the timer.
2687 * We have to be a little bit careful since we might be race the timer
2688 * callback here.
2689 */
2690 if (uHz > 16384)
2691 uHz = 16384; /** @todo add a query method for this! */
2692 if (RT_UNLIKELY( uHz > ASMAtomicReadU32(&pCpu->Ppt.uDesiredHz)
2693 && uHz >= pCpu->Ppt.uMinHz
2694 && !pCpu->Ppt.fStarting /* solaris paranoia */))
2695 {
2696 RTSpinlockAcquire(pCpu->Ppt.hSpinlock);
2697
2698 pCpu->Ppt.uDesiredHz = uHz;
2699 uint32_t cNsInterval = 0;
2700 if (!pCpu->Ppt.fStarted)
2701 {
2702 pCpu->Ppt.cStarts++;
2703 pCpu->Ppt.fStarted = true;
2704 pCpu->Ppt.fStarting = true;
2705 pCpu->Ppt.iTickHistorization = 0;
2706 pCpu->Ppt.uTimerHz = uHz;
2707 pCpu->Ppt.cNsInterval = cNsInterval = RT_NS_1SEC / uHz;
2708 if (cNsInterval < GVMMHOSTCPU_PPT_HIST_INTERVAL_NS)
2709 pCpu->Ppt.cTicksHistoriziationInterval = ( GVMMHOSTCPU_PPT_HIST_INTERVAL_NS
2710 + GVMMHOSTCPU_PPT_HIST_INTERVAL_NS / 2 - 1)
2711 / cNsInterval;
2712 else
2713 pCpu->Ppt.cTicksHistoriziationInterval = 1;
2714 }
2715
2716 RTSpinlockRelease(pCpu->Ppt.hSpinlock);
2717
2718 if (cNsInterval)
2719 {
2720 RTTimerChangeInterval(pCpu->Ppt.pTimer, cNsInterval);
2721 int rc = RTTimerStart(pCpu->Ppt.pTimer, cNsInterval);
2722 AssertRC(rc);
2723
2724 RTSpinlockAcquire(pCpu->Ppt.hSpinlock);
2725 if (RT_FAILURE(rc))
2726 pCpu->Ppt.fStarted = false;
2727 pCpu->Ppt.fStarting = false;
2728 RTSpinlockRelease(pCpu->Ppt.hSpinlock);
2729 }
2730 }
2731#else /* !GVMM_SCHED_WITH_PPT */
2732 NOREF(idHostCpu); NOREF(uHz);
2733#endif /* !GVMM_SCHED_WITH_PPT */
2734}
2735
2736
2737/**
2738 * Retrieves the GVMM statistics visible to the caller.
2739 *
2740 * @returns VBox status code.
2741 *
2742 * @param pStats Where to put the statistics.
2743 * @param pSession The current session.
2744 * @param pGVM The GVM to obtain statistics for. Optional.
2745 */
2746GVMMR0DECL(int) GVMMR0QueryStatistics(PGVMMSTATS pStats, PSUPDRVSESSION pSession, PGVM pGVM)
2747{
2748 LogFlow(("GVMMR0QueryStatistics: pStats=%p pSession=%p pGVM=%p\n", pStats, pSession, pGVM));
2749
2750 /*
2751 * Validate input.
2752 */
2753 AssertPtrReturn(pSession, VERR_INVALID_POINTER);
2754 AssertPtrReturn(pStats, VERR_INVALID_POINTER);
2755 pStats->cVMs = 0; /* (crash before taking the sem...) */
2756
2757 /*
2758 * Take the lock and get the VM statistics.
2759 */
2760 PGVMM pGVMM;
2761 if (pGVM)
2762 {
2763 int rc = gvmmR0ByGVM(pGVM, &pGVMM, true /*fTakeUsedLock*/);
2764 if (RT_FAILURE(rc))
2765 return rc;
2766 pStats->SchedVM = pGVM->gvmm.s.StatsSched;
2767 }
2768 else
2769 {
2770 GVMM_GET_VALID_INSTANCE(pGVMM, VERR_GVMM_INSTANCE);
2771 memset(&pStats->SchedVM, 0, sizeof(pStats->SchedVM));
2772
2773 int rc = GVMMR0_USED_SHARED_LOCK(pGVMM);
2774 AssertRCReturn(rc, rc);
2775 }
2776
2777 /*
2778 * Enumerate the VMs and add the ones visible to the statistics.
2779 */
2780 pStats->cVMs = 0;
2781 pStats->cEMTs = 0;
2782 memset(&pStats->SchedSum, 0, sizeof(pStats->SchedSum));
2783
2784 for (unsigned i = pGVMM->iUsedHead;
2785 i != NIL_GVM_HANDLE && i < RT_ELEMENTS(pGVMM->aHandles);
2786 i = pGVMM->aHandles[i].iNext)
2787 {
2788 PGVM pOtherGVM = pGVMM->aHandles[i].pGVM;
2789 void *pvObj = pGVMM->aHandles[i].pvObj;
2790 if ( RT_VALID_PTR(pvObj)
2791 && RT_VALID_PTR(pOtherGVM)
2792 && pOtherGVM->u32Magic == GVM_MAGIC
2793 && RT_SUCCESS(SUPR0ObjVerifyAccess(pvObj, pSession, NULL)))
2794 {
2795 pStats->cVMs++;
2796 pStats->cEMTs += pOtherGVM->cCpus;
2797
2798 pStats->SchedSum.cHaltCalls += pOtherGVM->gvmm.s.StatsSched.cHaltCalls;
2799 pStats->SchedSum.cHaltBlocking += pOtherGVM->gvmm.s.StatsSched.cHaltBlocking;
2800 pStats->SchedSum.cHaltTimeouts += pOtherGVM->gvmm.s.StatsSched.cHaltTimeouts;
2801 pStats->SchedSum.cHaltNotBlocking += pOtherGVM->gvmm.s.StatsSched.cHaltNotBlocking;
2802 pStats->SchedSum.cHaltWakeUps += pOtherGVM->gvmm.s.StatsSched.cHaltWakeUps;
2803
2804 pStats->SchedSum.cWakeUpCalls += pOtherGVM->gvmm.s.StatsSched.cWakeUpCalls;
2805 pStats->SchedSum.cWakeUpNotHalted += pOtherGVM->gvmm.s.StatsSched.cWakeUpNotHalted;
2806 pStats->SchedSum.cWakeUpWakeUps += pOtherGVM->gvmm.s.StatsSched.cWakeUpWakeUps;
2807
2808 pStats->SchedSum.cPokeCalls += pOtherGVM->gvmm.s.StatsSched.cPokeCalls;
2809 pStats->SchedSum.cPokeNotBusy += pOtherGVM->gvmm.s.StatsSched.cPokeNotBusy;
2810
2811 pStats->SchedSum.cPollCalls += pOtherGVM->gvmm.s.StatsSched.cPollCalls;
2812 pStats->SchedSum.cPollHalts += pOtherGVM->gvmm.s.StatsSched.cPollHalts;
2813 pStats->SchedSum.cPollWakeUps += pOtherGVM->gvmm.s.StatsSched.cPollWakeUps;
2814 }
2815 }
2816
2817 /*
2818 * Copy out the per host CPU statistics.
2819 */
2820 uint32_t iDstCpu = 0;
2821 uint32_t cSrcCpus = pGVMM->cHostCpus;
2822 for (uint32_t iSrcCpu = 0; iSrcCpu < cSrcCpus; iSrcCpu++)
2823 {
2824 if (pGVMM->aHostCpus[iSrcCpu].idCpu != NIL_RTCPUID)
2825 {
2826 pStats->aHostCpus[iDstCpu].idCpu = pGVMM->aHostCpus[iSrcCpu].idCpu;
2827 pStats->aHostCpus[iDstCpu].idxCpuSet = pGVMM->aHostCpus[iSrcCpu].idxCpuSet;
2828#ifdef GVMM_SCHED_WITH_PPT
2829 pStats->aHostCpus[iDstCpu].uDesiredHz = pGVMM->aHostCpus[iSrcCpu].Ppt.uDesiredHz;
2830 pStats->aHostCpus[iDstCpu].uTimerHz = pGVMM->aHostCpus[iSrcCpu].Ppt.uTimerHz;
2831 pStats->aHostCpus[iDstCpu].cChanges = pGVMM->aHostCpus[iSrcCpu].Ppt.cChanges;
2832 pStats->aHostCpus[iDstCpu].cStarts = pGVMM->aHostCpus[iSrcCpu].Ppt.cStarts;
2833#else
2834 pStats->aHostCpus[iDstCpu].uDesiredHz = 0;
2835 pStats->aHostCpus[iDstCpu].uTimerHz = 0;
2836 pStats->aHostCpus[iDstCpu].cChanges = 0;
2837 pStats->aHostCpus[iDstCpu].cStarts = 0;
2838#endif
2839 iDstCpu++;
2840 if (iDstCpu >= RT_ELEMENTS(pStats->aHostCpus))
2841 break;
2842 }
2843 }
2844 pStats->cHostCpus = iDstCpu;
2845
2846 GVMMR0_USED_SHARED_UNLOCK(pGVMM);
2847
2848 return VINF_SUCCESS;
2849}
2850
2851
2852/**
2853 * VMMR0 request wrapper for GVMMR0QueryStatistics.
2854 *
2855 * @returns see GVMMR0QueryStatistics.
2856 * @param pGVM The global (ring-0) VM structure. Optional.
2857 * @param pReq Pointer to the request packet.
2858 * @param pSession The current session.
2859 */
2860GVMMR0DECL(int) GVMMR0QueryStatisticsReq(PGVM pGVM, PGVMMQUERYSTATISTICSSREQ pReq, PSUPDRVSESSION pSession)
2861{
2862 /*
2863 * Validate input and pass it on.
2864 */
2865 AssertPtrReturn(pReq, VERR_INVALID_POINTER);
2866 AssertMsgReturn(pReq->Hdr.cbReq == sizeof(*pReq), ("%#x != %#x\n", pReq->Hdr.cbReq, sizeof(*pReq)), VERR_INVALID_PARAMETER);
2867 AssertReturn(pReq->pSession == pSession, VERR_INVALID_PARAMETER);
2868
2869 return GVMMR0QueryStatistics(&pReq->Stats, pSession, pGVM);
2870}
2871
2872
2873/**
2874 * Resets the specified GVMM statistics.
2875 *
2876 * @returns VBox status code.
2877 *
2878 * @param pStats Which statistics to reset, that is, non-zero fields indicates which to reset.
2879 * @param pSession The current session.
2880 * @param pGVM The GVM to reset statistics for. Optional.
2881 */
2882GVMMR0DECL(int) GVMMR0ResetStatistics(PCGVMMSTATS pStats, PSUPDRVSESSION pSession, PGVM pGVM)
2883{
2884 LogFlow(("GVMMR0ResetStatistics: pStats=%p pSession=%p pGVM=%p\n", pStats, pSession, pGVM));
2885
2886 /*
2887 * Validate input.
2888 */
2889 AssertPtrReturn(pSession, VERR_INVALID_POINTER);
2890 AssertPtrReturn(pStats, VERR_INVALID_POINTER);
2891
2892 /*
2893 * Take the lock and get the VM statistics.
2894 */
2895 PGVMM pGVMM;
2896 if (pGVM)
2897 {
2898 int rc = gvmmR0ByGVM(pGVM, &pGVMM, true /*fTakeUsedLock*/);
2899 if (RT_FAILURE(rc))
2900 return rc;
2901# define MAYBE_RESET_FIELD(field) \
2902 do { if (pStats->SchedVM. field ) { pGVM->gvmm.s.StatsSched. field = 0; } } while (0)
2903 MAYBE_RESET_FIELD(cHaltCalls);
2904 MAYBE_RESET_FIELD(cHaltBlocking);
2905 MAYBE_RESET_FIELD(cHaltTimeouts);
2906 MAYBE_RESET_FIELD(cHaltNotBlocking);
2907 MAYBE_RESET_FIELD(cHaltWakeUps);
2908 MAYBE_RESET_FIELD(cWakeUpCalls);
2909 MAYBE_RESET_FIELD(cWakeUpNotHalted);
2910 MAYBE_RESET_FIELD(cWakeUpWakeUps);
2911 MAYBE_RESET_FIELD(cPokeCalls);
2912 MAYBE_RESET_FIELD(cPokeNotBusy);
2913 MAYBE_RESET_FIELD(cPollCalls);
2914 MAYBE_RESET_FIELD(cPollHalts);
2915 MAYBE_RESET_FIELD(cPollWakeUps);
2916# undef MAYBE_RESET_FIELD
2917 }
2918 else
2919 {
2920 GVMM_GET_VALID_INSTANCE(pGVMM, VERR_GVMM_INSTANCE);
2921
2922 int rc = GVMMR0_USED_SHARED_LOCK(pGVMM);
2923 AssertRCReturn(rc, rc);
2924 }
2925
2926 /*
2927 * Enumerate the VMs and add the ones visible to the statistics.
2928 */
2929 if (!ASMMemIsZero(&pStats->SchedSum, sizeof(pStats->SchedSum)))
2930 {
2931 for (unsigned i = pGVMM->iUsedHead;
2932 i != NIL_GVM_HANDLE && i < RT_ELEMENTS(pGVMM->aHandles);
2933 i = pGVMM->aHandles[i].iNext)
2934 {
2935 PGVM pOtherGVM = pGVMM->aHandles[i].pGVM;
2936 void *pvObj = pGVMM->aHandles[i].pvObj;
2937 if ( RT_VALID_PTR(pvObj)
2938 && RT_VALID_PTR(pOtherGVM)
2939 && pOtherGVM->u32Magic == GVM_MAGIC
2940 && RT_SUCCESS(SUPR0ObjVerifyAccess(pvObj, pSession, NULL)))
2941 {
2942# define MAYBE_RESET_FIELD(field) \
2943 do { if (pStats->SchedSum. field ) { pOtherGVM->gvmm.s.StatsSched. field = 0; } } while (0)
2944 MAYBE_RESET_FIELD(cHaltCalls);
2945 MAYBE_RESET_FIELD(cHaltBlocking);
2946 MAYBE_RESET_FIELD(cHaltTimeouts);
2947 MAYBE_RESET_FIELD(cHaltNotBlocking);
2948 MAYBE_RESET_FIELD(cHaltWakeUps);
2949 MAYBE_RESET_FIELD(cWakeUpCalls);
2950 MAYBE_RESET_FIELD(cWakeUpNotHalted);
2951 MAYBE_RESET_FIELD(cWakeUpWakeUps);
2952 MAYBE_RESET_FIELD(cPokeCalls);
2953 MAYBE_RESET_FIELD(cPokeNotBusy);
2954 MAYBE_RESET_FIELD(cPollCalls);
2955 MAYBE_RESET_FIELD(cPollHalts);
2956 MAYBE_RESET_FIELD(cPollWakeUps);
2957# undef MAYBE_RESET_FIELD
2958 }
2959 }
2960 }
2961
2962 GVMMR0_USED_SHARED_UNLOCK(pGVMM);
2963
2964 return VINF_SUCCESS;
2965}
2966
2967
2968/**
2969 * VMMR0 request wrapper for GVMMR0ResetStatistics.
2970 *
2971 * @returns see GVMMR0ResetStatistics.
2972 * @param pGVM The global (ring-0) VM structure. Optional.
2973 * @param pReq Pointer to the request packet.
2974 * @param pSession The current session.
2975 */
2976GVMMR0DECL(int) GVMMR0ResetStatisticsReq(PGVM pGVM, PGVMMRESETSTATISTICSSREQ pReq, PSUPDRVSESSION pSession)
2977{
2978 /*
2979 * Validate input and pass it on.
2980 */
2981 AssertPtrReturn(pReq, VERR_INVALID_POINTER);
2982 AssertMsgReturn(pReq->Hdr.cbReq == sizeof(*pReq), ("%#x != %#x\n", pReq->Hdr.cbReq, sizeof(*pReq)), VERR_INVALID_PARAMETER);
2983 AssertReturn(pReq->pSession == pSession, VERR_INVALID_PARAMETER);
2984
2985 return GVMMR0ResetStatistics(&pReq->Stats, pSession, pGVM);
2986}
2987
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette