VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/VMMR0.cpp@ 90883

Last change on this file since 90883 was 90862, checked in by vboxsync, 4 years ago

IPRT,SUPDrv,VMM,++: Bumped major support driver version. Added RTLogSetR0ProgramStart and make the VMM use it when configuring the ring-0 loggers. Removed pfnFlush from the parameter list of RTLogCreateEx[V]. bugref:10086

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 119.3 KB
Line 
1/* $Id: VMMR0.cpp 90862 2021-08-25 00:37:59Z vboxsync $ */
2/** @file
3 * VMM - Host Context Ring 0.
4 */
5
6/*
7 * Copyright (C) 2006-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_VMM
23#include <VBox/vmm/vmm.h>
24#include <VBox/sup.h>
25#include <VBox/vmm/iom.h>
26#include <VBox/vmm/trpm.h>
27#include <VBox/vmm/cpum.h>
28#include <VBox/vmm/pdmapi.h>
29#include <VBox/vmm/pgm.h>
30#ifdef VBOX_WITH_NEM_R0
31# include <VBox/vmm/nem.h>
32#endif
33#include <VBox/vmm/em.h>
34#include <VBox/vmm/stam.h>
35#include <VBox/vmm/tm.h>
36#include "VMMInternal.h"
37#include <VBox/vmm/vmcc.h>
38#include <VBox/vmm/gvm.h>
39#ifdef VBOX_WITH_PCI_PASSTHROUGH
40# include <VBox/vmm/pdmpci.h>
41#endif
42#include <VBox/vmm/apic.h>
43
44#include <VBox/vmm/gvmm.h>
45#include <VBox/vmm/gmm.h>
46#include <VBox/vmm/gim.h>
47#include <VBox/intnet.h>
48#include <VBox/vmm/hm.h>
49#include <VBox/param.h>
50#include <VBox/err.h>
51#include <VBox/version.h>
52#include <VBox/log.h>
53
54#include <iprt/asm-amd64-x86.h>
55#include <iprt/assert.h>
56#include <iprt/crc.h>
57#include <iprt/mem.h>
58#include <iprt/memobj.h>
59#include <iprt/mp.h>
60#include <iprt/once.h>
61#include <iprt/stdarg.h>
62#include <iprt/string.h>
63#include <iprt/thread.h>
64#include <iprt/timer.h>
65#include <iprt/time.h>
66
67#include "dtrace/VBoxVMM.h"
68
69
70#if defined(_MSC_VER) && defined(RT_ARCH_AMD64) /** @todo check this with with VC7! */
71# pragma intrinsic(_AddressOfReturnAddress)
72#endif
73
74#if defined(RT_OS_DARWIN) && ARCH_BITS == 32
75# error "32-bit darwin is no longer supported. Go back to 4.3 or earlier!"
76#endif
77
78
79
80/*********************************************************************************************************************************
81* Defined Constants And Macros *
82*********************************************************************************************************************************/
83/** @def VMM_CHECK_SMAP_SETUP
84 * SMAP check setup. */
85/** @def VMM_CHECK_SMAP_CHECK
86 * Checks that the AC flag is set if SMAP is enabled. If AC is not set,
87 * it will be logged and @a a_BadExpr is executed. */
88/** @def VMM_CHECK_SMAP_CHECK2
89 * Checks that the AC flag is set if SMAP is enabled. If AC is not set, it will
90 * be logged, written to the VMs assertion text buffer, and @a a_BadExpr is
91 * executed. */
92#if (defined(VBOX_STRICT) || 1) && !defined(VBOX_WITH_RAM_IN_KERNEL)
93# define VMM_CHECK_SMAP_SETUP() uint32_t const fKernelFeatures = SUPR0GetKernelFeatures()
94# define VMM_CHECK_SMAP_CHECK(a_BadExpr) \
95 do { \
96 if (fKernelFeatures & SUPKERNELFEATURES_SMAP) \
97 { \
98 RTCCUINTREG fEflCheck = ASMGetFlags(); \
99 if (RT_LIKELY(fEflCheck & X86_EFL_AC)) \
100 { /* likely */ } \
101 else \
102 { \
103 SUPR0Printf("%s, line %d: EFLAGS.AC is clear! (%#x)\n", __FUNCTION__, __LINE__, (uint32_t)fEflCheck); \
104 a_BadExpr; \
105 } \
106 } \
107 } while (0)
108# define VMM_CHECK_SMAP_CHECK2(a_pGVM, a_BadExpr) \
109 do { \
110 if (fKernelFeatures & SUPKERNELFEATURES_SMAP) \
111 { \
112 RTCCUINTREG fEflCheck = ASMGetFlags(); \
113 if (RT_LIKELY(fEflCheck & X86_EFL_AC)) \
114 { /* likely */ } \
115 else if (a_pGVM) \
116 { \
117 SUPR0BadContext((a_pGVM)->pSession, __FILE__, __LINE__, "EFLAGS.AC is zero!"); \
118 RTStrPrintf((a_pGVM)->vmm.s.szRing0AssertMsg1, sizeof((a_pGVM)->vmm.s.szRing0AssertMsg1), \
119 "%s, line %d: EFLAGS.AC is clear! (%#x)\n", __FUNCTION__, __LINE__, (uint32_t)fEflCheck); \
120 a_BadExpr; \
121 } \
122 else \
123 { \
124 SUPR0Printf("%s, line %d: EFLAGS.AC is clear! (%#x)\n", __FUNCTION__, __LINE__, (uint32_t)fEflCheck); \
125 a_BadExpr; \
126 } \
127 } \
128 } while (0)
129#else
130# define VMM_CHECK_SMAP_SETUP() uint32_t const fKernelFeatures = 0
131# define VMM_CHECK_SMAP_CHECK(a_BadExpr) NOREF(fKernelFeatures)
132# define VMM_CHECK_SMAP_CHECK2(a_pGVM, a_BadExpr) NOREF(fKernelFeatures)
133#endif
134
135
136/*********************************************************************************************************************************
137* Internal Functions *
138*********************************************************************************************************************************/
139RT_C_DECLS_BEGIN
140#if defined(RT_ARCH_X86) && (defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD))
141extern uint64_t __udivdi3(uint64_t, uint64_t);
142extern uint64_t __umoddi3(uint64_t, uint64_t);
143#endif
144RT_C_DECLS_END
145static int vmmR0UpdateLoggers(PGVM pGVM, VMCPUID idCpu, PVMMR0UPDATELOGGERSREQ pReq, bool fRelease);
146static FNRTLOGFLUSH vmmR0LogFlush;
147static FNRTLOGFLUSH vmmR0LogRelFlush;
148
149
150/*********************************************************************************************************************************
151* Global Variables *
152*********************************************************************************************************************************/
153/** Drag in necessary library bits.
154 * The runtime lives here (in VMMR0.r0) and VBoxDD*R0.r0 links against us. */
155struct CLANG11WEIRDNOTHROW { PFNRT pfn; } g_VMMR0Deps[] =
156{
157 { (PFNRT)RTCrc32 },
158 { (PFNRT)RTOnce },
159#if defined(RT_ARCH_X86) && (defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD))
160 { (PFNRT)__udivdi3 },
161 { (PFNRT)__umoddi3 },
162#endif
163 { NULL }
164};
165
166#ifdef RT_OS_SOLARIS
167/* Dependency information for the native solaris loader. */
168extern "C" { char _depends_on[] = "vboxdrv"; }
169#endif
170
171
172/**
173 * Initialize the module.
174 * This is called when we're first loaded.
175 *
176 * @returns 0 on success.
177 * @returns VBox status on failure.
178 * @param hMod Image handle for use in APIs.
179 */
180DECLEXPORT(int) ModuleInit(void *hMod)
181{
182 VMM_CHECK_SMAP_SETUP();
183 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
184
185#ifdef VBOX_WITH_DTRACE_R0
186 /*
187 * The first thing to do is register the static tracepoints.
188 * (Deregistration is automatic.)
189 */
190 int rc2 = SUPR0TracerRegisterModule(hMod, &g_VTGObjHeader);
191 if (RT_FAILURE(rc2))
192 return rc2;
193#endif
194 LogFlow(("ModuleInit:\n"));
195
196#ifdef VBOX_WITH_64ON32_CMOS_DEBUG
197 /*
198 * Display the CMOS debug code.
199 */
200 ASMOutU8(0x72, 0x03);
201 uint8_t bDebugCode = ASMInU8(0x73);
202 LogRel(("CMOS Debug Code: %#x (%d)\n", bDebugCode, bDebugCode));
203 RTLogComPrintf("CMOS Debug Code: %#x (%d)\n", bDebugCode, bDebugCode);
204#endif
205
206 /*
207 * Initialize the VMM, GVMM, GMM, HM, PGM (Darwin) and INTNET.
208 */
209 int rc = vmmInitFormatTypes();
210 if (RT_SUCCESS(rc))
211 {
212 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
213 rc = GVMMR0Init();
214 if (RT_SUCCESS(rc))
215 {
216 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
217 rc = GMMR0Init();
218 if (RT_SUCCESS(rc))
219 {
220 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
221 rc = HMR0Init();
222 if (RT_SUCCESS(rc))
223 {
224 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
225
226 PDMR0Init(hMod);
227 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
228
229 rc = PGMRegisterStringFormatTypes();
230 if (RT_SUCCESS(rc))
231 {
232 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
233#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
234 rc = PGMR0DynMapInit();
235#endif
236 if (RT_SUCCESS(rc))
237 {
238 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
239 rc = IntNetR0Init();
240 if (RT_SUCCESS(rc))
241 {
242#ifdef VBOX_WITH_PCI_PASSTHROUGH
243 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
244 rc = PciRawR0Init();
245#endif
246 if (RT_SUCCESS(rc))
247 {
248 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
249 rc = CPUMR0ModuleInit();
250 if (RT_SUCCESS(rc))
251 {
252#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
253 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
254 rc = vmmR0TripleFaultHackInit();
255 if (RT_SUCCESS(rc))
256#endif
257 {
258 VMM_CHECK_SMAP_CHECK(rc = VERR_VMM_SMAP_BUT_AC_CLEAR);
259 if (RT_SUCCESS(rc))
260 {
261 LogFlow(("ModuleInit: returns success\n"));
262 return VINF_SUCCESS;
263 }
264 }
265
266 /*
267 * Bail out.
268 */
269#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
270 vmmR0TripleFaultHackTerm();
271#endif
272 }
273 else
274 LogRel(("ModuleInit: CPUMR0ModuleInit -> %Rrc\n", rc));
275#ifdef VBOX_WITH_PCI_PASSTHROUGH
276 PciRawR0Term();
277#endif
278 }
279 else
280 LogRel(("ModuleInit: PciRawR0Init -> %Rrc\n", rc));
281 IntNetR0Term();
282 }
283 else
284 LogRel(("ModuleInit: IntNetR0Init -> %Rrc\n", rc));
285#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
286 PGMR0DynMapTerm();
287#endif
288 }
289 else
290 LogRel(("ModuleInit: PGMR0DynMapInit -> %Rrc\n", rc));
291 PGMDeregisterStringFormatTypes();
292 }
293 else
294 LogRel(("ModuleInit: PGMRegisterStringFormatTypes -> %Rrc\n", rc));
295 HMR0Term();
296 }
297 else
298 LogRel(("ModuleInit: HMR0Init -> %Rrc\n", rc));
299 GMMR0Term();
300 }
301 else
302 LogRel(("ModuleInit: GMMR0Init -> %Rrc\n", rc));
303 GVMMR0Term();
304 }
305 else
306 LogRel(("ModuleInit: GVMMR0Init -> %Rrc\n", rc));
307 vmmTermFormatTypes();
308 }
309 else
310 LogRel(("ModuleInit: vmmInitFormatTypes -> %Rrc\n", rc));
311
312 LogFlow(("ModuleInit: failed %Rrc\n", rc));
313 return rc;
314}
315
316
317/**
318 * Terminate the module.
319 * This is called when we're finally unloaded.
320 *
321 * @param hMod Image handle for use in APIs.
322 */
323DECLEXPORT(void) ModuleTerm(void *hMod)
324{
325 NOREF(hMod);
326 LogFlow(("ModuleTerm:\n"));
327
328 /*
329 * Terminate the CPUM module (Local APIC cleanup).
330 */
331 CPUMR0ModuleTerm();
332
333 /*
334 * Terminate the internal network service.
335 */
336 IntNetR0Term();
337
338 /*
339 * PGM (Darwin), HM and PciRaw global cleanup.
340 */
341#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
342 PGMR0DynMapTerm();
343#endif
344#ifdef VBOX_WITH_PCI_PASSTHROUGH
345 PciRawR0Term();
346#endif
347 PGMDeregisterStringFormatTypes();
348 HMR0Term();
349#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
350 vmmR0TripleFaultHackTerm();
351#endif
352
353 /*
354 * Destroy the GMM and GVMM instances.
355 */
356 GMMR0Term();
357 GVMMR0Term();
358
359 vmmTermFormatTypes();
360
361 LogFlow(("ModuleTerm: returns\n"));
362}
363
364
365/**
366 * Initializes VMM specific members when the GVM structure is created.
367 *
368 * @param pGVM The global (ring-0) VM structure.
369 */
370VMMR0_INT_DECL(void) VMMR0InitPerVMData(PGVM pGVM)
371{
372 pGVM->vmmr0.s.hMemObjLogger = NIL_RTR0MEMOBJ;
373 pGVM->vmmr0.s.hMapObjLogger = NIL_RTR0MEMOBJ;
374 pGVM->vmmr0.s.hMemObjReleaseLogger = NIL_RTR0MEMOBJ;
375 pGVM->vmmr0.s.hMapObjReleaseLogger = NIL_RTR0MEMOBJ;
376 pGVM->vmmr0.s.fCalledInitVm = false;
377
378 for (VMCPUID idCpu = 0; idCpu < pGVM->cCpus; idCpu++)
379 {
380 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
381 Assert(pGVCpu->idHostCpu == NIL_RTCPUID);
382 Assert(pGVCpu->iHostCpuSet == UINT32_MAX);
383 pGVCpu->vmmr0.s.fInHmContext = false;
384 pGVCpu->vmmr0.s.pPreemptState = NULL;
385 pGVCpu->vmmr0.s.hCtxHook = NIL_RTTHREADCTXHOOK;
386 }
387}
388
389
390/**
391 * Helper for vmmR0InitLoggers
392 */
393static int vmmR0InitLoggerOne(PGVMCPU pGVCpu, bool fRelease, PVMMR0PERVCPULOGGER pR0Log, PVMMR3CPULOGGER pShared,
394 uint32_t cbBuf, char *pchBuf, RTR3PTR pchBufR3, uint64_t nsProgramStart)
395{
396 pR0Log->BufDesc.u32Magic = RTLOGBUFFERDESC_MAGIC;
397 pR0Log->BufDesc.uReserved = 0;
398 pR0Log->BufDesc.cbBuf = cbBuf;
399 pR0Log->BufDesc.offBuf = 0;
400 pR0Log->BufDesc.pchBuf = pchBuf;
401 pR0Log->BufDesc.pAux = &pShared->AuxDesc;
402
403 pShared->AuxDesc.fFlushedIndicator = false;
404 pShared->AuxDesc.afPadding[0] = 0;
405 pShared->AuxDesc.afPadding[1] = 0;
406 pShared->AuxDesc.afPadding[2] = 0;
407 pShared->AuxDesc.offBuf = 0;
408 pShared->pchBufR3 = pchBufR3;
409 pShared->cbBuf = cbBuf;
410
411 static const char * const s_apszGroups[] = VBOX_LOGGROUP_NAMES;
412 int rc = RTLogCreateEx(&pR0Log->pLogger, fRelease ? "VBOX_RELEASE_LOG" : "VBOX_LOG", RTLOG_F_NO_LOCKING | RTLOGFLAGS_BUFFERED,
413 "all", RT_ELEMENTS(s_apszGroups), s_apszGroups, UINT32_MAX,
414 1 /*cBufDescs*/, &pR0Log->BufDesc, RTLOGDEST_DUMMY,
415 NULL /*pfnPhase*/, 0 /*cHistory*/, 0 /*cbHistoryFileMax*/, 0 /*cSecsHistoryTimeSlot*/,
416 NULL /*pErrInfo*/, NULL /*pszFilenameFmt*/);
417 if (RT_SUCCESS(rc))
418 {
419 PRTLOGGER pLogger = pR0Log->pLogger;
420 pLogger->u32UserValue1 = VMMR0_LOGGER_FLAGS_MAGIC_VALUE;
421 pLogger->u64UserValue2 = (uintptr_t)pGVCpu;
422 pLogger->u64UserValue3 = (uintptr_t)pGVCpu;
423
424 rc = RTLogSetFlushCallback(pLogger, fRelease ? vmmR0LogRelFlush : vmmR0LogFlush);
425 if (RT_SUCCESS(rc))
426 {
427 RTLogSetR0ThreadNameF(pLogger, "EMT-%u-R0", pGVCpu->idCpu);
428 RTLogSetR0ProgramStart(pLogger, nsProgramStart);
429 return VINF_SUCCESS;
430 }
431
432 RTLogDestroy(pLogger);
433 }
434 pR0Log->pLogger = NULL;
435 return rc;
436}
437
438
439/**
440 * Initializes one type of loggers for each EMT.
441 */
442static int vmmR0InitLoggers(PGVM pGVM, bool fRelease, uint32_t cbBuf, PRTR0MEMOBJ phMemObj, PRTR0MEMOBJ phMapObj)
443{
444 /* Allocate buffers first. */
445 int rc = RTR0MemObjAllocPage(phMemObj, cbBuf * pGVM->cCpus, false /*fExecutable*/);
446 if (RT_SUCCESS(rc))
447 {
448 rc = RTR0MemObjMapUser(phMapObj, *phMemObj, (RTR3PTR)-1, 0 /*uAlignment*/, RTMEM_PROT_READ, NIL_RTR0PROCESS);
449 if (RT_SUCCESS(rc))
450 {
451 char * const pchBuf = (char *)RTR0MemObjAddress(*phMemObj);
452 AssertPtrReturn(pchBuf, VERR_INTERNAL_ERROR_2);
453
454 RTR3PTR const pchBufR3 = RTR0MemObjAddressR3(*phMapObj);
455 AssertReturn(pchBufR3 != NIL_RTR3PTR, VERR_INTERNAL_ERROR_3);
456
457 /* Initialize the per-CPU loggers. */
458 for (uint32_t i = 0; i < pGVM->cCpus; i++)
459 {
460 PGVMCPU pGVCpu = &pGVM->aCpus[i];
461 PVMMR0PERVCPULOGGER pR0Log = fRelease ? &pGVCpu->vmmr0.s.RelLogger : &pGVCpu->vmmr0.s.Logger;
462 PVMMR3CPULOGGER pShared = fRelease ? &pGVCpu->vmm.s.RelLogger : &pGVCpu->vmm.s.Logger;
463 rc = vmmR0InitLoggerOne(pGVCpu, fRelease, pR0Log, pShared, cbBuf, pchBuf + i * cbBuf, pchBufR3 + i * cbBuf,
464 pGVM->vmm.s.nsProgramStart);
465 if (RT_FAILURE(rc))
466 {
467 pR0Log->pLogger = NULL;
468 pShared->pchBufR3 = NIL_RTR3PTR;
469 while (i-- > 0)
470 {
471 pGVCpu = &pGVM->aCpus[i];
472 pR0Log = fRelease ? &pGVCpu->vmmr0.s.RelLogger : &pGVCpu->vmmr0.s.Logger;
473 pShared = fRelease ? &pGVCpu->vmm.s.RelLogger : &pGVCpu->vmm.s.Logger;
474 RTLogDestroy(pR0Log->pLogger);
475 pR0Log->pLogger = NULL;
476 pShared->pchBufR3 = NIL_RTR3PTR;
477 }
478 break;
479 }
480 }
481 if (RT_SUCCESS(rc))
482 return VINF_SUCCESS;
483
484 /* Bail out. */
485 RTR0MemObjFree(*phMapObj, false /*fFreeMappings*/);
486 *phMapObj = NIL_RTR0MEMOBJ;
487 }
488 RTR0MemObjFree(*phMemObj, true /*fFreeMappings*/);
489 *phMemObj = NIL_RTR0MEMOBJ;
490 }
491 return rc;
492}
493
494
495/**
496 * Initiates the R0 driver for a particular VM instance.
497 *
498 * @returns VBox status code.
499 *
500 * @param pGVM The global (ring-0) VM structure.
501 * @param uSvnRev The SVN revision of the ring-3 part.
502 * @param uBuildType Build type indicator.
503 * @thread EMT(0)
504 */
505static int vmmR0InitVM(PGVM pGVM, uint32_t uSvnRev, uint32_t uBuildType)
506{
507 VMM_CHECK_SMAP_SETUP();
508 VMM_CHECK_SMAP_CHECK(return VERR_VMM_SMAP_BUT_AC_CLEAR);
509
510 /*
511 * Match the SVN revisions and build type.
512 */
513 if (uSvnRev != VMMGetSvnRev())
514 {
515 LogRel(("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev()));
516 SUPR0Printf("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev());
517 return VERR_VMM_R0_VERSION_MISMATCH;
518 }
519 if (uBuildType != vmmGetBuildType())
520 {
521 LogRel(("VMMR0InitVM: Build type mismatch, r3=%#x r0=%#x\n", uBuildType, vmmGetBuildType()));
522 SUPR0Printf("VMMR0InitVM: Build type mismatch, r3=%#x r0=%#x\n", uBuildType, vmmGetBuildType());
523 return VERR_VMM_R0_VERSION_MISMATCH;
524 }
525
526 int rc = GVMMR0ValidateGVMandEMT(pGVM, 0 /*idCpu*/);
527 if (RT_FAILURE(rc))
528 return rc;
529
530 /* Don't allow this to be called more than once. */
531 if (!pGVM->vmmr0.s.fCalledInitVm)
532 pGVM->vmmr0.s.fCalledInitVm = true;
533 else
534 return VERR_ALREADY_INITIALIZED;
535
536 /*
537 * Create the ring-0 release loggers.
538 */
539 rc = vmmR0InitLoggers(pGVM, true /*fRelease*/, _8K, &pGVM->vmmr0.s.hMemObjReleaseLogger, &pGVM->vmmr0.s.hMapObjReleaseLogger);
540 if (RT_FAILURE(rc))
541 return rc;
542
543#ifdef LOG_ENABLED
544 /*
545 * Create debug loggers.
546 */
547 rc = vmmR0InitLoggers(pGVM, false /*fRelease*/, _64K, &pGVM->vmmr0.s.hMemObjLogger, &pGVM->vmmr0.s.hMapObjLogger);
548 if (RT_FAILURE(rc))
549 return rc;
550
551 /*
552 * Register the EMT R0 logger instance for VCPU 0.
553 */
554 PVMCPUCC pVCpu = VMCC_GET_CPU_0(pGVM);
555 if (pVCpu->vmmr0.s.Logger.pLogger)
556 {
557# if 0 /* testing of the logger. */
558 LogCom(("vmmR0InitVM: before %p\n", RTLogDefaultInstance()));
559 LogCom(("vmmR0InitVM: pfnFlush=%p actual=%p\n", pR0Logger->Logger.pfnFlush, vmmR0LoggerFlush));
560 LogCom(("vmmR0InitVM: pfnLogger=%p actual=%p\n", pR0Logger->Logger.pfnLogger, vmmR0LoggerWrapper));
561 LogCom(("vmmR0InitVM: offScratch=%d fFlags=%#x fDestFlags=%#x\n", pR0Logger->Logger.offScratch, pR0Logger->Logger.fFlags, pR0Logger->Logger.fDestFlags));
562
563 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pGVM->pSession);
564 LogCom(("vmmR0InitVM: after %p reg\n", RTLogDefaultInstance()));
565 RTLogSetDefaultInstanceThread(NULL, pGVM->pSession);
566 LogCom(("vmmR0InitVM: after %p dereg\n", RTLogDefaultInstance()));
567
568 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
569 LogCom(("vmmR0InitVM: returned successfully from direct logger call.\n"));
570 pR0Logger->Logger.pfnFlush(&pR0Logger->Logger);
571 LogCom(("vmmR0InitVM: returned successfully from direct flush call.\n"));
572
573 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pGVM->pSession);
574 LogCom(("vmmR0InitVM: after %p reg2\n", RTLogDefaultInstance()));
575 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
576 LogCom(("vmmR0InitVM: returned successfully from direct logger call (2). offScratch=%d\n", pR0Logger->Logger.offScratch));
577 RTLogSetDefaultInstanceThread(NULL, pGVM->pSession);
578 LogCom(("vmmR0InitVM: after %p dereg2\n", RTLogDefaultInstance()));
579
580 RTLogLoggerEx(&pR0Logger->Logger, 0, ~0U, "hello ring-0 logger (RTLogLoggerEx)\n");
581 LogCom(("vmmR0InitVM: RTLogLoggerEx returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
582
583 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pGVM->pSession);
584 RTLogPrintf("hello ring-0 logger (RTLogPrintf)\n");
585 LogCom(("vmmR0InitVM: RTLogPrintf returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
586# endif
587# ifdef VBOX_WITH_R0_LOGGING
588 Log(("Switching to per-thread logging instance %p (key=%p)\n", pVCpu->vmmr0.s.Logger.pLogger, pGVM->pSession));
589 RTLogSetDefaultInstanceThread(pVCpu->vmmr0.s.Logger.pLogger, (uintptr_t)pGVM->pSession);
590 pVCpu->vmmr0.s.Logger.fRegistered = true;
591# endif
592 }
593#endif /* LOG_ENABLED */
594
595 /*
596 * Check if the host supports high resolution timers or not.
597 */
598 if ( pGVM->vmm.s.fUsePeriodicPreemptionTimers
599 && !RTTimerCanDoHighResolution())
600 pGVM->vmm.s.fUsePeriodicPreemptionTimers = false;
601
602 /*
603 * Initialize the per VM data for GVMM and GMM.
604 */
605 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
606 rc = GVMMR0InitVM(pGVM);
607 if (RT_SUCCESS(rc))
608 {
609 /*
610 * Init HM, CPUM and PGM (Darwin only).
611 */
612 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
613 rc = HMR0InitVM(pGVM);
614 if (RT_SUCCESS(rc))
615 VMM_CHECK_SMAP_CHECK2(pGVM, rc = VERR_VMM_RING0_ASSERTION); /* CPUR0InitVM will otherwise panic the host */
616 if (RT_SUCCESS(rc))
617 {
618 rc = CPUMR0InitVM(pGVM);
619 if (RT_SUCCESS(rc))
620 {
621 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
622 rc = PGMR0InitVM(pGVM);
623 if (RT_SUCCESS(rc))
624 {
625 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
626 rc = EMR0InitVM(pGVM);
627 if (RT_SUCCESS(rc))
628 {
629 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
630#ifdef VBOX_WITH_PCI_PASSTHROUGH
631 rc = PciRawR0InitVM(pGVM);
632#endif
633 if (RT_SUCCESS(rc))
634 {
635 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
636 rc = GIMR0InitVM(pGVM);
637 if (RT_SUCCESS(rc))
638 {
639 VMM_CHECK_SMAP_CHECK2(pGVM, rc = VERR_VMM_RING0_ASSERTION);
640 if (RT_SUCCESS(rc))
641 {
642 GVMMR0DoneInitVM(pGVM);
643
644 /*
645 * Collect a bit of info for the VM release log.
646 */
647 pGVM->vmm.s.fIsPreemptPendingApiTrusty = RTThreadPreemptIsPendingTrusty();
648 pGVM->vmm.s.fIsPreemptPossible = RTThreadPreemptIsPossible();;
649
650 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
651 return rc;
652 }
653
654 /* bail out*/
655 GIMR0TermVM(pGVM);
656 }
657#ifdef VBOX_WITH_PCI_PASSTHROUGH
658 PciRawR0TermVM(pGVM);
659#endif
660 }
661 }
662 }
663 }
664 HMR0TermVM(pGVM);
665 }
666 }
667
668 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pGVM->pSession);
669 return rc;
670}
671
672
673/**
674 * Does EMT specific VM initialization.
675 *
676 * @returns VBox status code.
677 * @param pGVM The ring-0 VM structure.
678 * @param idCpu The EMT that's calling.
679 */
680static int vmmR0InitVMEmt(PGVM pGVM, VMCPUID idCpu)
681{
682 /* Paranoia (caller checked these already). */
683 AssertReturn(idCpu < pGVM->cCpus, VERR_INVALID_CPU_ID);
684 AssertReturn(pGVM->aCpus[idCpu].hEMT == RTThreadNativeSelf(), VERR_INVALID_CPU_ID);
685
686#if defined(LOG_ENABLED) && defined(VBOX_WITH_R0_LOGGING)
687 /*
688 * Registration of ring 0 loggers.
689 */
690 PVMCPUCC pVCpu = &pGVM->aCpus[idCpu];
691 if ( pVCpu->vmmr0.s.Logger.pLogger
692 && !pVCpu->vmmr0.s.Logger.fRegistered)
693 {
694 RTLogSetDefaultInstanceThread(pVCpu->vmmr0.s.Logger.pLogger, (uintptr_t)pGVM->pSession);
695 pVCpu->vmmr0.s.Logger.fRegistered = true;
696 }
697#endif
698
699 return VINF_SUCCESS;
700}
701
702
703
704/**
705 * Terminates the R0 bits for a particular VM instance.
706 *
707 * This is normally called by ring-3 as part of the VM termination process, but
708 * may alternatively be called during the support driver session cleanup when
709 * the VM object is destroyed (see GVMM).
710 *
711 * @returns VBox status code.
712 *
713 * @param pGVM The global (ring-0) VM structure.
714 * @param idCpu Set to 0 if EMT(0) or NIL_VMCPUID if session cleanup
715 * thread.
716 * @thread EMT(0) or session clean up thread.
717 */
718VMMR0_INT_DECL(int) VMMR0TermVM(PGVM pGVM, VMCPUID idCpu)
719{
720 /*
721 * Check EMT(0) claim if we're called from userland.
722 */
723 if (idCpu != NIL_VMCPUID)
724 {
725 AssertReturn(idCpu == 0, VERR_INVALID_CPU_ID);
726 int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu);
727 if (RT_FAILURE(rc))
728 return rc;
729 }
730
731#ifdef VBOX_WITH_PCI_PASSTHROUGH
732 PciRawR0TermVM(pGVM);
733#endif
734
735 /*
736 * Tell GVMM what we're up to and check that we only do this once.
737 */
738 if (GVMMR0DoingTermVM(pGVM))
739 {
740 GIMR0TermVM(pGVM);
741
742 /** @todo I wish to call PGMR0PhysFlushHandyPages(pGVM, &pGVM->aCpus[idCpu])
743 * here to make sure we don't leak any shared pages if we crash... */
744#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
745 PGMR0DynMapTermVM(pGVM);
746#endif
747 HMR0TermVM(pGVM);
748 }
749
750 /*
751 * Deregister the logger for this EMT.
752 */
753 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pGVM->pSession);
754 return VINF_SUCCESS;
755}
756
757
758/**
759 * This is called at the end of gvmmR0CleanupVM().
760 *
761 * @param pGVM The global (ring-0) VM structure.
762 */
763VMMR0_INT_DECL(void) VMMR0CleanupVM(PGVM pGVM)
764{
765 AssertCompile(NIL_RTTHREADCTXHOOK == (RTTHREADCTXHOOK)0); /* Depends on zero initialized memory working for NIL at the moment. */
766 for (VMCPUID idCpu = 0; idCpu < pGVM->cCpus; idCpu++)
767 {
768 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
769
770 /** @todo Can we busy wait here for all thread-context hooks to be
771 * deregistered before releasing (destroying) it? Only until we find a
772 * solution for not deregistering hooks everytime we're leaving HMR0
773 * context. */
774 VMMR0ThreadCtxHookDestroyForEmt(pGVCpu);
775
776 /* Destroy the release logger. */
777 RTLogDestroy(pGVCpu->vmmr0.s.RelLogger.pLogger);
778 pGVCpu->vmmr0.s.RelLogger.pLogger = NULL;
779 pGVCpu->vmm.s.RelLogger.pchBufR3 = NIL_RTR3PTR;
780
781 /* Destroy the regular logger. */
782 RTLogDestroy(pGVCpu->vmmr0.s.Logger.pLogger);
783 pGVCpu->vmmr0.s.Logger.pLogger = NULL;
784 pGVCpu->vmm.s.Logger.pchBufR3 = NIL_RTR3PTR;
785 }
786
787 /*
788 * Free logger buffer memory.
789 */
790 RTR0MemObjFree(pGVM->vmmr0.s.hMapObjReleaseLogger, false /*fFreeMappings*/);
791 pGVM->vmmr0.s.hMapObjReleaseLogger = NIL_RTR0MEMOBJ;
792 RTR0MemObjFree(pGVM->vmmr0.s.hMemObjReleaseLogger, true /*fFreeMappings*/);
793 pGVM->vmmr0.s.hMemObjReleaseLogger = NIL_RTR0MEMOBJ;
794
795 RTR0MemObjFree(pGVM->vmmr0.s.hMapObjLogger, false /*fFreeMappings*/);
796 pGVM->vmmr0.s.hMapObjLogger = NIL_RTR0MEMOBJ;
797 RTR0MemObjFree(pGVM->vmmr0.s.hMemObjLogger, true /*fFreeMappings*/);
798 pGVM->vmmr0.s.hMemObjLogger = NIL_RTR0MEMOBJ;
799}
800
801
802/**
803 * An interrupt or unhalt force flag is set, deal with it.
804 *
805 * @returns VINF_SUCCESS (or VINF_EM_HALT).
806 * @param pVCpu The cross context virtual CPU structure.
807 * @param uMWait Result from EMMonitorWaitIsActive().
808 * @param enmInterruptibility Guest CPU interruptbility level.
809 */
810static int vmmR0DoHaltInterrupt(PVMCPUCC pVCpu, unsigned uMWait, CPUMINTERRUPTIBILITY enmInterruptibility)
811{
812 Assert(!TRPMHasTrap(pVCpu));
813 Assert( enmInterruptibility > CPUMINTERRUPTIBILITY_INVALID
814 && enmInterruptibility < CPUMINTERRUPTIBILITY_END);
815
816 /*
817 * Pending interrupts w/o any SMIs or NMIs? That the usual case.
818 */
819 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
820 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_INTERRUPT_NMI))
821 {
822 if (enmInterruptibility <= CPUMINTERRUPTIBILITY_UNRESTRAINED)
823 {
824 uint8_t u8Interrupt = 0;
825 int rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
826 Log(("vmmR0DoHaltInterrupt: CPU%d u8Interrupt=%d (%#x) rc=%Rrc\n", pVCpu->idCpu, u8Interrupt, u8Interrupt, rc));
827 if (RT_SUCCESS(rc))
828 {
829 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
830
831 rc = TRPMAssertTrap(pVCpu, u8Interrupt, TRPM_HARDWARE_INT);
832 AssertRCSuccess(rc);
833 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltExec);
834 return rc;
835 }
836 }
837 }
838 /*
839 * SMI is not implemented yet, at least not here.
840 */
841 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_SMI))
842 {
843 Log12(("vmmR0DoHaltInterrupt: CPU%d failed #3\n", pVCpu->idCpu));
844 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltToR3);
845 return VINF_EM_HALT;
846 }
847 /*
848 * NMI.
849 */
850 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
851 {
852 if (enmInterruptibility < CPUMINTERRUPTIBILITY_NMI_INHIBIT)
853 {
854 /** @todo later. */
855 Log12(("vmmR0DoHaltInterrupt: CPU%d failed #2 (uMWait=%u enmInt=%d)\n", pVCpu->idCpu, uMWait, enmInterruptibility));
856 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltToR3);
857 return VINF_EM_HALT;
858 }
859 }
860 /*
861 * Nested-guest virtual interrupt.
862 */
863 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST))
864 {
865 if (enmInterruptibility < CPUMINTERRUPTIBILITY_VIRT_INT_DISABLED)
866 {
867 /** @todo NSTVMX: NSTSVM: Remember, we might have to check and perform VM-exits
868 * here before injecting the virtual interrupt. See emR3ForcedActions
869 * for details. */
870 Log12(("vmmR0DoHaltInterrupt: CPU%d failed #1 (uMWait=%u enmInt=%d)\n", pVCpu->idCpu, uMWait, enmInterruptibility));
871 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltToR3);
872 return VINF_EM_HALT;
873 }
874 }
875
876 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UNHALT))
877 {
878 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltExec);
879 Log11(("vmmR0DoHaltInterrupt: CPU%d success VINF_SUCCESS (UNHALT)\n", pVCpu->idCpu));
880 return VINF_SUCCESS;
881 }
882 if (uMWait > 1)
883 {
884 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltExec);
885 Log11(("vmmR0DoHaltInterrupt: CPU%d success VINF_SUCCESS (uMWait=%u > 1)\n", pVCpu->idCpu, uMWait));
886 return VINF_SUCCESS;
887 }
888
889 Log12(("vmmR0DoHaltInterrupt: CPU%d failed #0 (uMWait=%u enmInt=%d)\n", pVCpu->idCpu, uMWait, enmInterruptibility));
890 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltToR3);
891 return VINF_EM_HALT;
892}
893
894
895/**
896 * This does one round of vmR3HaltGlobal1Halt().
897 *
898 * The rational here is that we'll reduce latency in interrupt situations if we
899 * don't go to ring-3 immediately on a VINF_EM_HALT (guest executed HLT or
900 * MWAIT), but do one round of blocking here instead and hope the interrupt is
901 * raised in the meanwhile.
902 *
903 * If we go to ring-3 we'll quit the inner HM/NEM loop in EM and end up in the
904 * outer loop, which will then call VMR3WaitHalted() and that in turn will do a
905 * ring-0 call (unless we're too close to a timer event). When the interrupt
906 * wakes us up, we'll return from ring-0 and EM will by instinct do a
907 * rescheduling (because of raw-mode) before it resumes the HM/NEM loop and gets
908 * back to VMMR0EntryFast().
909 *
910 * @returns VINF_SUCCESS or VINF_EM_HALT.
911 * @param pGVM The ring-0 VM structure.
912 * @param pGVCpu The ring-0 virtual CPU structure.
913 *
914 * @todo r=bird: All the blocking/waiting and EMT managment should move out of
915 * the VM module, probably to VMM. Then this would be more weird wrt
916 * parameters and statistics.
917 */
918static int vmmR0DoHalt(PGVM pGVM, PGVMCPU pGVCpu)
919{
920 /*
921 * Do spin stat historization.
922 */
923 if (++pGVCpu->vmm.s.cR0Halts & 0xff)
924 { /* likely */ }
925 else if (pGVCpu->vmm.s.cR0HaltsSucceeded > pGVCpu->vmm.s.cR0HaltsToRing3)
926 {
927 pGVCpu->vmm.s.cR0HaltsSucceeded = 2;
928 pGVCpu->vmm.s.cR0HaltsToRing3 = 0;
929 }
930 else
931 {
932 pGVCpu->vmm.s.cR0HaltsSucceeded = 0;
933 pGVCpu->vmm.s.cR0HaltsToRing3 = 2;
934 }
935
936 /*
937 * Flags that makes us go to ring-3.
938 */
939 uint32_t const fVmFFs = VM_FF_TM_VIRTUAL_SYNC | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA
940 | VM_FF_DBGF | VM_FF_REQUEST | VM_FF_CHECK_VM_STATE
941 | VM_FF_RESET | VM_FF_EMT_RENDEZVOUS | VM_FF_PGM_NEED_HANDY_PAGES
942 | VM_FF_PGM_NO_MEMORY | VM_FF_DEBUG_SUSPEND;
943 uint64_t const fCpuFFs = VMCPU_FF_TIMER | VMCPU_FF_PDM_CRITSECT | VMCPU_FF_IEM
944 | VMCPU_FF_REQUEST | VMCPU_FF_DBGF | VMCPU_FF_HM_UPDATE_CR3
945 | VMCPU_FF_HM_UPDATE_PAE_PDPES | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
946 | VMCPU_FF_TO_R3 | VMCPU_FF_IOM;
947
948 /*
949 * Check preconditions.
950 */
951 unsigned const uMWait = EMMonitorWaitIsActive(pGVCpu);
952 CPUMINTERRUPTIBILITY const enmInterruptibility = CPUMGetGuestInterruptibility(pGVCpu);
953 if ( pGVCpu->vmm.s.fMayHaltInRing0
954 && !TRPMHasTrap(pGVCpu)
955 && ( enmInterruptibility == CPUMINTERRUPTIBILITY_UNRESTRAINED
956 || uMWait > 1))
957 {
958 if ( !VM_FF_IS_ANY_SET(pGVM, fVmFFs)
959 && !VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs))
960 {
961 /*
962 * Interrupts pending already?
963 */
964 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC))
965 APICUpdatePendingInterrupts(pGVCpu);
966
967 /*
968 * Flags that wake up from the halted state.
969 */
970 uint64_t const fIntMask = VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_INTERRUPT_NESTED_GUEST
971 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT;
972
973 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask))
974 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility);
975 ASMNopPause();
976
977 /*
978 * Check out how long till the next timer event.
979 */
980 uint64_t u64Delta;
981 uint64_t u64GipTime = TMTimerPollGIP(pGVM, pGVCpu, &u64Delta);
982
983 if ( !VM_FF_IS_ANY_SET(pGVM, fVmFFs)
984 && !VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs))
985 {
986 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC))
987 APICUpdatePendingInterrupts(pGVCpu);
988
989 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask))
990 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility);
991
992 /*
993 * Wait if there is enough time to the next timer event.
994 */
995 if (u64Delta >= pGVCpu->vmm.s.cNsSpinBlockThreshold)
996 {
997 /* If there are few other CPU cores around, we will procrastinate a
998 little before going to sleep, hoping for some device raising an
999 interrupt or similar. Though, the best thing here would be to
1000 dynamically adjust the spin count according to its usfulness or
1001 something... */
1002 if ( pGVCpu->vmm.s.cR0HaltsSucceeded > pGVCpu->vmm.s.cR0HaltsToRing3
1003 && RTMpGetOnlineCount() >= 4)
1004 {
1005 /** @todo Figure out how we can skip this if it hasn't help recently...
1006 * @bugref{9172#c12} */
1007 uint32_t cSpinLoops = 42;
1008 while (cSpinLoops-- > 0)
1009 {
1010 ASMNopPause();
1011 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC))
1012 APICUpdatePendingInterrupts(pGVCpu);
1013 ASMNopPause();
1014 if (VM_FF_IS_ANY_SET(pGVM, fVmFFs))
1015 {
1016 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3FromSpin);
1017 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3);
1018 return VINF_EM_HALT;
1019 }
1020 ASMNopPause();
1021 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs))
1022 {
1023 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3FromSpin);
1024 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3);
1025 return VINF_EM_HALT;
1026 }
1027 ASMNopPause();
1028 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask))
1029 {
1030 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltExecFromSpin);
1031 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility);
1032 }
1033 ASMNopPause();
1034 }
1035 }
1036
1037 /*
1038 * We have to set the state to VMCPUSTATE_STARTED_HALTED here so ring-3
1039 * knows when to notify us (cannot access VMINTUSERPERVMCPU::fWait from here).
1040 * After changing the state we must recheck the force flags of course.
1041 */
1042 if (VMCPU_CMPXCHG_STATE(pGVCpu, VMCPUSTATE_STARTED_HALTED, VMCPUSTATE_STARTED))
1043 {
1044 if ( !VM_FF_IS_ANY_SET(pGVM, fVmFFs)
1045 && !VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs))
1046 {
1047 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC))
1048 APICUpdatePendingInterrupts(pGVCpu);
1049
1050 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask))
1051 {
1052 VMCPU_CMPXCHG_STATE(pGVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_HALTED);
1053 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility);
1054 }
1055
1056 /* Okay, block! */
1057 uint64_t const u64StartSchedHalt = RTTimeNanoTS();
1058 int rc = GVMMR0SchedHalt(pGVM, pGVCpu, u64GipTime);
1059 uint64_t const u64EndSchedHalt = RTTimeNanoTS();
1060 uint64_t const cNsElapsedSchedHalt = u64EndSchedHalt - u64StartSchedHalt;
1061 Log10(("vmmR0DoHalt: CPU%d: halted %llu ns\n", pGVCpu->idCpu, cNsElapsedSchedHalt));
1062
1063 VMCPU_CMPXCHG_STATE(pGVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_HALTED);
1064 STAM_REL_PROFILE_ADD_PERIOD(&pGVCpu->vmm.s.StatR0HaltBlock, cNsElapsedSchedHalt);
1065 if ( rc == VINF_SUCCESS
1066 || rc == VERR_INTERRUPTED)
1067 {
1068 /* Keep some stats like ring-3 does. */
1069 int64_t const cNsOverslept = u64EndSchedHalt - u64GipTime;
1070 if (cNsOverslept > 50000)
1071 STAM_REL_PROFILE_ADD_PERIOD(&pGVCpu->vmm.s.StatR0HaltBlockOverslept, cNsOverslept);
1072 else if (cNsOverslept < -50000)
1073 STAM_REL_PROFILE_ADD_PERIOD(&pGVCpu->vmm.s.StatR0HaltBlockInsomnia, cNsElapsedSchedHalt);
1074 else
1075 STAM_REL_PROFILE_ADD_PERIOD(&pGVCpu->vmm.s.StatR0HaltBlockOnTime, cNsElapsedSchedHalt);
1076
1077 /*
1078 * Recheck whether we can resume execution or have to go to ring-3.
1079 */
1080 if ( !VM_FF_IS_ANY_SET(pGVM, fVmFFs)
1081 && !VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs))
1082 {
1083 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC))
1084 APICUpdatePendingInterrupts(pGVCpu);
1085 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask))
1086 {
1087 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltExecFromBlock);
1088 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility);
1089 }
1090 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3PostNoInt);
1091 Log12(("vmmR0DoHalt: CPU%d post #2 - No pending interrupt\n", pGVCpu->idCpu));
1092 }
1093 else
1094 {
1095 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3PostPendingFF);
1096 Log12(("vmmR0DoHalt: CPU%d post #1 - Pending FF\n", pGVCpu->idCpu));
1097 }
1098 }
1099 else
1100 {
1101 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3Other);
1102 Log12(("vmmR0DoHalt: CPU%d GVMMR0SchedHalt failed: %Rrc\n", pGVCpu->idCpu, rc));
1103 }
1104 }
1105 else
1106 {
1107 VMCPU_CMPXCHG_STATE(pGVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_HALTED);
1108 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3PendingFF);
1109 Log12(("vmmR0DoHalt: CPU%d failed #5 - Pending FF\n", pGVCpu->idCpu));
1110 }
1111 }
1112 else
1113 {
1114 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3Other);
1115 Log12(("vmmR0DoHalt: CPU%d failed #4 - enmState=%d\n", pGVCpu->idCpu, VMCPU_GET_STATE(pGVCpu)));
1116 }
1117 }
1118 else
1119 {
1120 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3SmallDelta);
1121 Log12(("vmmR0DoHalt: CPU%d failed #3 - delta too small: %RU64\n", pGVCpu->idCpu, u64Delta));
1122 }
1123 }
1124 else
1125 {
1126 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3PendingFF);
1127 Log12(("vmmR0DoHalt: CPU%d failed #2 - Pending FF\n", pGVCpu->idCpu));
1128 }
1129 }
1130 else
1131 {
1132 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3PendingFF);
1133 Log12(("vmmR0DoHalt: CPU%d failed #1 - Pending FF\n", pGVCpu->idCpu));
1134 }
1135 }
1136 else
1137 {
1138 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3Other);
1139 Log12(("vmmR0DoHalt: CPU%d failed #0 - fMayHaltInRing0=%d TRPMHasTrap=%d enmInt=%d uMWait=%u\n",
1140 pGVCpu->idCpu, pGVCpu->vmm.s.fMayHaltInRing0, TRPMHasTrap(pGVCpu), enmInterruptibility, uMWait));
1141 }
1142
1143 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3);
1144 return VINF_EM_HALT;
1145}
1146
1147
1148/**
1149 * VMM ring-0 thread-context callback.
1150 *
1151 * This does common HM state updating and calls the HM-specific thread-context
1152 * callback.
1153 *
1154 * This is used together with RTThreadCtxHookCreate() on platforms which
1155 * supports it, and directly from VMMR0EmtPrepareForBlocking() and
1156 * VMMR0EmtResumeAfterBlocking() on platforms which don't.
1157 *
1158 * @param enmEvent The thread-context event.
1159 * @param pvUser Opaque pointer to the VMCPU.
1160 *
1161 * @thread EMT(pvUser)
1162 */
1163static DECLCALLBACK(void) vmmR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, void *pvUser)
1164{
1165 PVMCPUCC pVCpu = (PVMCPUCC)pvUser;
1166
1167 switch (enmEvent)
1168 {
1169 case RTTHREADCTXEVENT_IN:
1170 {
1171 /*
1172 * Linux may call us with preemption enabled (really!) but technically we
1173 * cannot get preempted here, otherwise we end up in an infinite recursion
1174 * scenario (i.e. preempted in resume hook -> preempt hook -> resume hook...
1175 * ad infinitum). Let's just disable preemption for now...
1176 */
1177 /** @todo r=bird: I don't believe the above. The linux code is clearly enabling
1178 * preemption after doing the callout (one or two functions up the
1179 * call chain). */
1180 /** @todo r=ramshankar: See @bugref{5313#c30}. */
1181 RTTHREADPREEMPTSTATE ParanoidPreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
1182 RTThreadPreemptDisable(&ParanoidPreemptState);
1183
1184 /* We need to update the VCPU <-> host CPU mapping. */
1185 RTCPUID idHostCpu;
1186 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
1187 pVCpu->iHostCpuSet = iHostCpuSet;
1188 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
1189
1190 /* In the very unlikely event that the GIP delta for the CPU we're
1191 rescheduled needs calculating, try force a return to ring-3.
1192 We unfortunately cannot do the measurements right here. */
1193 if (RT_LIKELY(!SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
1194 { /* likely */ }
1195 else
1196 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
1197
1198 /* Invoke the HM-specific thread-context callback. */
1199 HMR0ThreadCtxCallback(enmEvent, pvUser);
1200
1201 /* Restore preemption. */
1202 RTThreadPreemptRestore(&ParanoidPreemptState);
1203 break;
1204 }
1205
1206 case RTTHREADCTXEVENT_OUT:
1207 {
1208 /* Invoke the HM-specific thread-context callback. */
1209 HMR0ThreadCtxCallback(enmEvent, pvUser);
1210
1211 /*
1212 * Sigh. See VMMGetCpu() used by VMCPU_ASSERT_EMT(). We cannot let several VCPUs
1213 * have the same host CPU associated with it.
1214 */
1215 pVCpu->iHostCpuSet = UINT32_MAX;
1216 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1217 break;
1218 }
1219
1220 default:
1221 /* Invoke the HM-specific thread-context callback. */
1222 HMR0ThreadCtxCallback(enmEvent, pvUser);
1223 break;
1224 }
1225}
1226
1227
1228/**
1229 * Creates thread switching hook for the current EMT thread.
1230 *
1231 * This is called by GVMMR0CreateVM and GVMMR0RegisterVCpu. If the host
1232 * platform does not implement switcher hooks, no hooks will be create and the
1233 * member set to NIL_RTTHREADCTXHOOK.
1234 *
1235 * @returns VBox status code.
1236 * @param pVCpu The cross context virtual CPU structure.
1237 * @thread EMT(pVCpu)
1238 */
1239VMMR0_INT_DECL(int) VMMR0ThreadCtxHookCreateForEmt(PVMCPUCC pVCpu)
1240{
1241 VMCPU_ASSERT_EMT(pVCpu);
1242 Assert(pVCpu->vmmr0.s.hCtxHook == NIL_RTTHREADCTXHOOK);
1243
1244#if 1 /* To disable this stuff change to zero. */
1245 int rc = RTThreadCtxHookCreate(&pVCpu->vmmr0.s.hCtxHook, 0, vmmR0ThreadCtxCallback, pVCpu);
1246 if (RT_SUCCESS(rc))
1247 {
1248 pVCpu->pGVM->vmm.s.fIsUsingContextHooks = true;
1249 return rc;
1250 }
1251#else
1252 RT_NOREF(vmmR0ThreadCtxCallback);
1253 int rc = VERR_NOT_SUPPORTED;
1254#endif
1255
1256 pVCpu->vmmr0.s.hCtxHook = NIL_RTTHREADCTXHOOK;
1257 pVCpu->pGVM->vmm.s.fIsUsingContextHooks = false;
1258 if (rc == VERR_NOT_SUPPORTED)
1259 return VINF_SUCCESS;
1260
1261 LogRelMax(32, ("RTThreadCtxHookCreate failed! rc=%Rrc pVCpu=%p idCpu=%RU32\n", rc, pVCpu, pVCpu->idCpu));
1262 return VINF_SUCCESS; /* Just ignore it, we can live without context hooks. */
1263}
1264
1265
1266/**
1267 * Destroys the thread switching hook for the specified VCPU.
1268 *
1269 * @param pVCpu The cross context virtual CPU structure.
1270 * @remarks Can be called from any thread.
1271 */
1272VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDestroyForEmt(PVMCPUCC pVCpu)
1273{
1274 int rc = RTThreadCtxHookDestroy(pVCpu->vmmr0.s.hCtxHook);
1275 AssertRC(rc);
1276 pVCpu->vmmr0.s.hCtxHook = NIL_RTTHREADCTXHOOK;
1277}
1278
1279
1280/**
1281 * Disables the thread switching hook for this VCPU (if we got one).
1282 *
1283 * @param pVCpu The cross context virtual CPU structure.
1284 * @thread EMT(pVCpu)
1285 *
1286 * @remarks This also clears GVMCPU::idHostCpu, so the mapping is invalid after
1287 * this call. This means you have to be careful with what you do!
1288 */
1289VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDisable(PVMCPUCC pVCpu)
1290{
1291 /*
1292 * Clear the VCPU <-> host CPU mapping as we've left HM context.
1293 * @bugref{7726#c19} explains the need for this trick:
1294 *
1295 * VMXR0CallRing3Callback/SVMR0CallRing3Callback &
1296 * hmR0VmxLeaveSession/hmR0SvmLeaveSession disables context hooks during
1297 * longjmp & normal return to ring-3, which opens a window where we may be
1298 * rescheduled without changing GVMCPUID::idHostCpu and cause confusion if
1299 * the CPU starts executing a different EMT. Both functions first disables
1300 * preemption and then calls HMR0LeaveCpu which invalids idHostCpu, leaving
1301 * an opening for getting preempted.
1302 */
1303 /** @todo Make HM not need this API! Then we could leave the hooks enabled
1304 * all the time. */
1305
1306 /*
1307 * Disable the context hook, if we got one.
1308 */
1309 if (pVCpu->vmmr0.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1310 {
1311 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1312 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1313 int rc = RTThreadCtxHookDisable(pVCpu->vmmr0.s.hCtxHook);
1314 AssertRC(rc);
1315 }
1316}
1317
1318
1319/**
1320 * Internal version of VMMR0ThreadCtxHooksAreRegistered.
1321 *
1322 * @returns true if registered, false otherwise.
1323 * @param pVCpu The cross context virtual CPU structure.
1324 */
1325DECLINLINE(bool) vmmR0ThreadCtxHookIsEnabled(PVMCPUCC pVCpu)
1326{
1327 return RTThreadCtxHookIsEnabled(pVCpu->vmmr0.s.hCtxHook);
1328}
1329
1330
1331/**
1332 * Whether thread-context hooks are registered for this VCPU.
1333 *
1334 * @returns true if registered, false otherwise.
1335 * @param pVCpu The cross context virtual CPU structure.
1336 */
1337VMMR0_INT_DECL(bool) VMMR0ThreadCtxHookIsEnabled(PVMCPUCC pVCpu)
1338{
1339 return vmmR0ThreadCtxHookIsEnabled(pVCpu);
1340}
1341
1342
1343/**
1344 * Returns the ring-0 release logger instance.
1345 *
1346 * @returns Pointer to release logger, NULL if not configured.
1347 * @param pVCpu The cross context virtual CPU structure of the caller.
1348 * @thread EMT(pVCpu)
1349 */
1350VMMR0_INT_DECL(PRTLOGGER) VMMR0GetReleaseLogger(PVMCPUCC pVCpu)
1351{
1352 return pVCpu->vmmr0.s.RelLogger.pLogger;
1353}
1354
1355
1356#ifdef VBOX_WITH_STATISTICS
1357/**
1358 * Record return code statistics
1359 * @param pVM The cross context VM structure.
1360 * @param pVCpu The cross context virtual CPU structure.
1361 * @param rc The status code.
1362 */
1363static void vmmR0RecordRC(PVMCC pVM, PVMCPUCC pVCpu, int rc)
1364{
1365 /*
1366 * Collect statistics.
1367 */
1368 switch (rc)
1369 {
1370 case VINF_SUCCESS:
1371 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetNormal);
1372 break;
1373 case VINF_EM_RAW_INTERRUPT:
1374 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterrupt);
1375 break;
1376 case VINF_EM_RAW_INTERRUPT_HYPER:
1377 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptHyper);
1378 break;
1379 case VINF_EM_RAW_GUEST_TRAP:
1380 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGuestTrap);
1381 break;
1382 case VINF_EM_RAW_RING_SWITCH:
1383 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitch);
1384 break;
1385 case VINF_EM_RAW_RING_SWITCH_INT:
1386 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitchInt);
1387 break;
1388 case VINF_EM_RAW_STALE_SELECTOR:
1389 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetStaleSelector);
1390 break;
1391 case VINF_EM_RAW_IRET_TRAP:
1392 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIRETTrap);
1393 break;
1394 case VINF_IOM_R3_IOPORT_READ:
1395 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIORead);
1396 break;
1397 case VINF_IOM_R3_IOPORT_WRITE:
1398 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOWrite);
1399 break;
1400 case VINF_IOM_R3_IOPORT_COMMIT_WRITE:
1401 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOCommitWrite);
1402 break;
1403 case VINF_IOM_R3_MMIO_READ:
1404 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIORead);
1405 break;
1406 case VINF_IOM_R3_MMIO_WRITE:
1407 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOWrite);
1408 break;
1409 case VINF_IOM_R3_MMIO_COMMIT_WRITE:
1410 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOCommitWrite);
1411 break;
1412 case VINF_IOM_R3_MMIO_READ_WRITE:
1413 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOReadWrite);
1414 break;
1415 case VINF_PATM_HC_MMIO_PATCH_READ:
1416 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchRead);
1417 break;
1418 case VINF_PATM_HC_MMIO_PATCH_WRITE:
1419 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchWrite);
1420 break;
1421 case VINF_CPUM_R3_MSR_READ:
1422 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMSRRead);
1423 break;
1424 case VINF_CPUM_R3_MSR_WRITE:
1425 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMSRWrite);
1426 break;
1427 case VINF_EM_RAW_EMULATE_INSTR:
1428 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetEmulate);
1429 break;
1430 case VINF_PATCH_EMULATE_INSTR:
1431 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchEmulate);
1432 break;
1433 case VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT:
1434 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetLDTFault);
1435 break;
1436 case VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT:
1437 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGDTFault);
1438 break;
1439 case VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT:
1440 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIDTFault);
1441 break;
1442 case VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT:
1443 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTSSFault);
1444 break;
1445 case VINF_CSAM_PENDING_ACTION:
1446 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCSAMTask);
1447 break;
1448 case VINF_PGM_SYNC_CR3:
1449 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetSyncCR3);
1450 break;
1451 case VINF_PATM_PATCH_INT3:
1452 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchInt3);
1453 break;
1454 case VINF_PATM_PATCH_TRAP_PF:
1455 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchPF);
1456 break;
1457 case VINF_PATM_PATCH_TRAP_GP:
1458 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchGP);
1459 break;
1460 case VINF_PATM_PENDING_IRQ_AFTER_IRET:
1461 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchIretIRQ);
1462 break;
1463 case VINF_EM_RESCHEDULE_REM:
1464 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRescheduleREM);
1465 break;
1466 case VINF_EM_RAW_TO_R3:
1467 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Total);
1468 if (VM_FF_IS_SET(pVM, VM_FF_TM_VIRTUAL_SYNC))
1469 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3TMVirt);
1470 else if (VM_FF_IS_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES))
1471 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3HandyPages);
1472 else if (VM_FF_IS_SET(pVM, VM_FF_PDM_QUEUES))
1473 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3PDMQueues);
1474 else if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
1475 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Rendezvous);
1476 else if (VM_FF_IS_SET(pVM, VM_FF_PDM_DMA))
1477 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3DMA);
1478 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TIMER))
1479 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Timer);
1480 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PDM_CRITSECT))
1481 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3CritSect);
1482 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TO_R3))
1483 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3FF);
1484 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
1485 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Iem);
1486 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IOM))
1487 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Iom);
1488 else
1489 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Unknown);
1490 break;
1491
1492 case VINF_EM_RAW_TIMER_PENDING:
1493 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTimerPending);
1494 break;
1495 case VINF_EM_RAW_INTERRUPT_PENDING:
1496 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptPending);
1497 break;
1498 case VINF_VMM_CALL_HOST:
1499 switch (pVCpu->vmm.s.enmCallRing3Operation)
1500 {
1501 case VMMCALLRING3_PDM_CRIT_SECT_ENTER:
1502 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPDMCritSectEnter);
1503 break;
1504 case VMMCALLRING3_PDM_LOCK:
1505 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPDMLock);
1506 break;
1507 case VMMCALLRING3_PGM_POOL_GROW:
1508 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMPoolGrow);
1509 break;
1510 case VMMCALLRING3_PGM_LOCK:
1511 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMLock);
1512 break;
1513 case VMMCALLRING3_PGM_MAP_CHUNK:
1514 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMMapChunk);
1515 break;
1516 case VMMCALLRING3_PGM_ALLOCATE_HANDY_PAGES:
1517 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMAllocHandy);
1518 break;
1519 case VMMCALLRING3_VMM_LOGGER_FLUSH:
1520 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallLogFlush);
1521 break;
1522 case VMMCALLRING3_VM_SET_ERROR:
1523 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallVMSetError);
1524 break;
1525 case VMMCALLRING3_VM_SET_RUNTIME_ERROR:
1526 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallVMSetRuntimeError);
1527 break;
1528 case VMMCALLRING3_VM_R0_ASSERTION:
1529 default:
1530 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCallRing3);
1531 break;
1532 }
1533 break;
1534 case VINF_PATM_DUPLICATE_FUNCTION:
1535 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPATMDuplicateFn);
1536 break;
1537 case VINF_PGM_CHANGE_MODE:
1538 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMChangeMode);
1539 break;
1540 case VINF_PGM_POOL_FLUSH_PENDING:
1541 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMFlushPending);
1542 break;
1543 case VINF_EM_PENDING_REQUEST:
1544 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPendingRequest);
1545 break;
1546 case VINF_EM_HM_PATCH_TPR_INSTR:
1547 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchTPR);
1548 break;
1549 default:
1550 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMisc);
1551 break;
1552 }
1553}
1554#endif /* VBOX_WITH_STATISTICS */
1555
1556
1557/**
1558 * The Ring 0 entry point, called by the fast-ioctl path.
1559 *
1560 * @param pGVM The global (ring-0) VM structure.
1561 * @param pVMIgnored The cross context VM structure. The return code is
1562 * stored in pVM->vmm.s.iLastGZRc.
1563 * @param idCpu The Virtual CPU ID of the calling EMT.
1564 * @param enmOperation Which operation to execute.
1565 * @remarks Assume called with interrupts _enabled_.
1566 */
1567VMMR0DECL(void) VMMR0EntryFast(PGVM pGVM, PVMCC pVMIgnored, VMCPUID idCpu, VMMR0OPERATION enmOperation)
1568{
1569 RT_NOREF(pVMIgnored);
1570
1571 /*
1572 * Validation.
1573 */
1574 if ( idCpu < pGVM->cCpus
1575 && pGVM->cCpus == pGVM->cCpusUnsafe)
1576 { /*likely*/ }
1577 else
1578 {
1579 SUPR0Printf("VMMR0EntryFast: Bad idCpu=%#x cCpus=%#x cCpusUnsafe=%#x\n", idCpu, pGVM->cCpus, pGVM->cCpusUnsafe);
1580 return;
1581 }
1582
1583 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
1584 RTNATIVETHREAD const hNativeThread = RTThreadNativeSelf();
1585 if (RT_LIKELY( pGVCpu->hEMT == hNativeThread
1586 && pGVCpu->hNativeThreadR0 == hNativeThread))
1587 { /* likely */ }
1588 else
1589 {
1590 SUPR0Printf("VMMR0EntryFast: Bad thread idCpu=%#x hNativeSelf=%p pGVCpu->hEmt=%p pGVCpu->hNativeThreadR0=%p\n",
1591 idCpu, hNativeThread, pGVCpu->hEMT, pGVCpu->hNativeThreadR0);
1592 return;
1593 }
1594
1595 /*
1596 * SMAP fun.
1597 */
1598 VMM_CHECK_SMAP_SETUP();
1599 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1600
1601 /*
1602 * Perform requested operation.
1603 */
1604 switch (enmOperation)
1605 {
1606 /*
1607 * Run guest code using the available hardware acceleration technology.
1608 */
1609 case VMMR0_DO_HM_RUN:
1610 {
1611 for (;;) /* hlt loop */
1612 {
1613 /*
1614 * Disable preemption.
1615 */
1616 Assert(!vmmR0ThreadCtxHookIsEnabled(pGVCpu));
1617 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
1618 RTThreadPreemptDisable(&PreemptState);
1619 pGVCpu->vmmr0.s.pPreemptState = &PreemptState;
1620
1621 /*
1622 * Get the host CPU identifiers, make sure they are valid and that
1623 * we've got a TSC delta for the CPU.
1624 */
1625 RTCPUID idHostCpu;
1626 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
1627 if (RT_LIKELY( iHostCpuSet < RTCPUSET_MAX_CPUS
1628 && SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
1629 {
1630 pGVCpu->iHostCpuSet = iHostCpuSet;
1631 ASMAtomicWriteU32(&pGVCpu->idHostCpu, idHostCpu);
1632
1633 /*
1634 * Update the periodic preemption timer if it's active.
1635 */
1636 if (pGVM->vmm.s.fUsePeriodicPreemptionTimers)
1637 GVMMR0SchedUpdatePeriodicPreemptionTimer(pGVM, pGVCpu->idHostCpu, TMCalcHostTimerFrequency(pGVM, pGVCpu));
1638 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1639
1640#ifdef VMM_R0_TOUCH_FPU
1641 /*
1642 * Make sure we've got the FPU state loaded so and we don't need to clear
1643 * CR0.TS and get out of sync with the host kernel when loading the guest
1644 * FPU state. @ref sec_cpum_fpu (CPUM.cpp) and @bugref{4053}.
1645 */
1646 CPUMR0TouchHostFpu();
1647#endif
1648 int rc;
1649 bool fPreemptRestored = false;
1650 if (!HMR0SuspendPending())
1651 {
1652 /*
1653 * Enable the context switching hook.
1654 */
1655 if (pGVCpu->vmmr0.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1656 {
1657 Assert(!RTThreadCtxHookIsEnabled(pGVCpu->vmmr0.s.hCtxHook));
1658 int rc2 = RTThreadCtxHookEnable(pGVCpu->vmmr0.s.hCtxHook); AssertRC(rc2);
1659 }
1660
1661 /*
1662 * Enter HM context.
1663 */
1664 rc = HMR0Enter(pGVCpu);
1665 if (RT_SUCCESS(rc))
1666 {
1667 pGVCpu->vmmr0.s.fInHmContext = true;
1668 VMCPU_SET_STATE(pGVCpu, VMCPUSTATE_STARTED_HM);
1669
1670 /*
1671 * When preemption hooks are in place, enable preemption now that
1672 * we're in HM context.
1673 */
1674 if (vmmR0ThreadCtxHookIsEnabled(pGVCpu))
1675 {
1676 fPreemptRestored = true;
1677 pGVCpu->vmmr0.s.pPreemptState = NULL;
1678 RTThreadPreemptRestore(&PreemptState);
1679 }
1680
1681 /*
1682 * Setup the longjmp machinery and execute guest code (calls HMR0RunGuestCode).
1683 */
1684 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1685 rc = vmmR0CallRing3SetJmp(&pGVCpu->vmm.s.CallRing3JmpBufR0, HMR0RunGuestCode, pGVM, pGVCpu);
1686 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1687
1688 /*
1689 * Assert sanity on the way out. Using manual assertions code here as normal
1690 * assertions are going to panic the host since we're outside the setjmp/longjmp zone.
1691 */
1692 if (RT_UNLIKELY( VMCPU_GET_STATE(pGVCpu) != VMCPUSTATE_STARTED_HM
1693 && RT_SUCCESS_NP(rc)
1694 && rc != VINF_VMM_CALL_HOST ))
1695 {
1696 pGVM->vmm.s.szRing0AssertMsg1[0] = '\0';
1697 RTStrPrintf(pGVM->vmm.s.szRing0AssertMsg2, sizeof(pGVM->vmm.s.szRing0AssertMsg2),
1698 "Got VMCPU state %d expected %d.\n", VMCPU_GET_STATE(pGVCpu), VMCPUSTATE_STARTED_HM);
1699 rc = VERR_VMM_WRONG_HM_VMCPU_STATE;
1700 }
1701#if 0
1702 /** @todo Get rid of this. HM shouldn't disable the context hook. */
1703 else if (RT_UNLIKELY(vmmR0ThreadCtxHookIsEnabled(pGVCpu)))
1704 {
1705 pGVM->vmm.s.szRing0AssertMsg1[0] = '\0';
1706 RTStrPrintf(pGVM->vmm.s.szRing0AssertMsg2, sizeof(pGVM->vmm.s.szRing0AssertMsg2),
1707 "Thread-context hooks still enabled! VCPU=%p Id=%u rc=%d.\n", pGVCpu, pGVCpu->idCpu, rc);
1708 rc = VERR_VMM_CONTEXT_HOOK_STILL_ENABLED;
1709 }
1710#endif
1711
1712 pGVCpu->vmmr0.s.fInHmContext = false;
1713 VMCPU_SET_STATE(pGVCpu, VMCPUSTATE_STARTED);
1714 }
1715 STAM_COUNTER_INC(&pGVM->vmm.s.StatRunGC);
1716
1717 /*
1718 * Invalidate the host CPU identifiers before we disable the context
1719 * hook / restore preemption.
1720 */
1721 pGVCpu->iHostCpuSet = UINT32_MAX;
1722 ASMAtomicWriteU32(&pGVCpu->idHostCpu, NIL_RTCPUID);
1723
1724 /*
1725 * Disable context hooks. Due to unresolved cleanup issues, we
1726 * cannot leave the hooks enabled when we return to ring-3.
1727 *
1728 * Note! At the moment HM may also have disabled the hook
1729 * when we get here, but the IPRT API handles that.
1730 */
1731 if (pGVCpu->vmmr0.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1732 RTThreadCtxHookDisable(pGVCpu->vmmr0.s.hCtxHook);
1733 }
1734 /*
1735 * The system is about to go into suspend mode; go back to ring 3.
1736 */
1737 else
1738 {
1739 pGVCpu->iHostCpuSet = UINT32_MAX;
1740 ASMAtomicWriteU32(&pGVCpu->idHostCpu, NIL_RTCPUID);
1741 rc = VINF_EM_RAW_INTERRUPT;
1742 }
1743
1744 /** @todo When HM stops messing with the context hook state, we'll disable
1745 * preemption again before the RTThreadCtxHookDisable call. */
1746 if (!fPreemptRestored)
1747 {
1748 pGVCpu->vmmr0.s.pPreemptState = NULL;
1749 RTThreadPreemptRestore(&PreemptState);
1750 }
1751
1752 pGVCpu->vmm.s.iLastGZRc = rc;
1753
1754 /* Fire dtrace probe and collect statistics. */
1755 VBOXVMM_R0_VMM_RETURN_TO_RING3_HM(pGVCpu, CPUMQueryGuestCtxPtr(pGVCpu), rc);
1756#ifdef VBOX_WITH_STATISTICS
1757 vmmR0RecordRC(pGVM, pGVCpu, rc);
1758#endif
1759 /*
1760 * If this is a halt.
1761 */
1762 if (rc != VINF_EM_HALT)
1763 { /* we're not in a hurry for a HLT, so prefer this path */ }
1764 else
1765 {
1766 pGVCpu->vmm.s.iLastGZRc = rc = vmmR0DoHalt(pGVM, pGVCpu);
1767 if (rc == VINF_SUCCESS)
1768 {
1769 pGVCpu->vmm.s.cR0HaltsSucceeded++;
1770 continue;
1771 }
1772 pGVCpu->vmm.s.cR0HaltsToRing3++;
1773 }
1774 }
1775 /*
1776 * Invalid CPU set index or TSC delta in need of measuring.
1777 */
1778 else
1779 {
1780 pGVCpu->vmmr0.s.pPreemptState = NULL;
1781 pGVCpu->iHostCpuSet = UINT32_MAX;
1782 ASMAtomicWriteU32(&pGVCpu->idHostCpu, NIL_RTCPUID);
1783 RTThreadPreemptRestore(&PreemptState);
1784 if (iHostCpuSet < RTCPUSET_MAX_CPUS)
1785 {
1786 int rc = SUPR0TscDeltaMeasureBySetIndex(pGVM->pSession, iHostCpuSet, 0 /*fFlags*/,
1787 2 /*cMsWaitRetry*/, 5*RT_MS_1SEC /*cMsWaitThread*/,
1788 0 /*default cTries*/);
1789 if (RT_SUCCESS(rc) || rc == VERR_CPU_OFFLINE)
1790 pGVCpu->vmm.s.iLastGZRc = VINF_EM_RAW_TO_R3;
1791 else
1792 pGVCpu->vmm.s.iLastGZRc = rc;
1793 }
1794 else
1795 pGVCpu->vmm.s.iLastGZRc = VERR_INVALID_CPU_INDEX;
1796 }
1797 break;
1798
1799 } /* halt loop. */
1800 break;
1801 }
1802
1803#ifdef VBOX_WITH_NEM_R0
1804# if defined(RT_ARCH_AMD64) && defined(RT_OS_WINDOWS)
1805 case VMMR0_DO_NEM_RUN:
1806 {
1807 /*
1808 * Setup the longjmp machinery and execute guest code (calls NEMR0RunGuestCode).
1809 */
1810 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1811# ifdef VBOXSTRICTRC_STRICT_ENABLED
1812 int rc = vmmR0CallRing3SetJmp2(&pGVCpu->vmm.s.CallRing3JmpBufR0, (PFNVMMR0SETJMP2)NEMR0RunGuestCode, pGVM, idCpu);
1813# else
1814 int rc = vmmR0CallRing3SetJmp2(&pGVCpu->vmm.s.CallRing3JmpBufR0, NEMR0RunGuestCode, pGVM, idCpu);
1815# endif
1816 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1817 STAM_COUNTER_INC(&pGVM->vmm.s.StatRunGC);
1818
1819 pGVCpu->vmm.s.iLastGZRc = rc;
1820
1821 /*
1822 * Fire dtrace probe and collect statistics.
1823 */
1824 VBOXVMM_R0_VMM_RETURN_TO_RING3_NEM(pGVCpu, CPUMQueryGuestCtxPtr(pGVCpu), rc);
1825# ifdef VBOX_WITH_STATISTICS
1826 vmmR0RecordRC(pGVM, pGVCpu, rc);
1827# endif
1828 break;
1829 }
1830# endif
1831#endif
1832
1833 /*
1834 * For profiling.
1835 */
1836 case VMMR0_DO_NOP:
1837 pGVCpu->vmm.s.iLastGZRc = VINF_SUCCESS;
1838 break;
1839
1840 /*
1841 * Shouldn't happen.
1842 */
1843 default:
1844 AssertMsgFailed(("%#x\n", enmOperation));
1845 pGVCpu->vmm.s.iLastGZRc = VERR_NOT_SUPPORTED;
1846 break;
1847 }
1848 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1849}
1850
1851
1852/**
1853 * Validates a session or VM session argument.
1854 *
1855 * @returns true / false accordingly.
1856 * @param pGVM The global (ring-0) VM structure.
1857 * @param pClaimedSession The session claim to validate.
1858 * @param pSession The session argument.
1859 */
1860DECLINLINE(bool) vmmR0IsValidSession(PGVM pGVM, PSUPDRVSESSION pClaimedSession, PSUPDRVSESSION pSession)
1861{
1862 /* This must be set! */
1863 if (!pSession)
1864 return false;
1865
1866 /* Only one out of the two. */
1867 if (pGVM && pClaimedSession)
1868 return false;
1869 if (pGVM)
1870 pClaimedSession = pGVM->pSession;
1871 return pClaimedSession == pSession;
1872}
1873
1874
1875/**
1876 * VMMR0EntryEx worker function, either called directly or when ever possible
1877 * called thru a longjmp so we can exit safely on failure.
1878 *
1879 * @returns VBox status code.
1880 * @param pGVM The global (ring-0) VM structure.
1881 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
1882 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
1883 * @param enmOperation Which operation to execute.
1884 * @param pReqHdr This points to a SUPVMMR0REQHDR packet. Optional.
1885 * The support driver validates this if it's present.
1886 * @param u64Arg Some simple constant argument.
1887 * @param pSession The session of the caller.
1888 *
1889 * @remarks Assume called with interrupts _enabled_.
1890 */
1891DECL_NO_INLINE(static, int) vmmR0EntryExWorker(PGVM pGVM, VMCPUID idCpu, VMMR0OPERATION enmOperation,
1892 PSUPVMMR0REQHDR pReqHdr, uint64_t u64Arg, PSUPDRVSESSION pSession)
1893{
1894 /*
1895 * Validate pGVM and idCpu for consistency and validity.
1896 */
1897 if (pGVM != NULL)
1898 {
1899 if (RT_LIKELY(((uintptr_t)pGVM & PAGE_OFFSET_MASK) == 0))
1900 { /* likely */ }
1901 else
1902 {
1903 SUPR0Printf("vmmR0EntryExWorker: Invalid pGVM=%p! (op=%d)\n", pGVM, enmOperation);
1904 return VERR_INVALID_POINTER;
1905 }
1906
1907 if (RT_LIKELY(idCpu == NIL_VMCPUID || idCpu < pGVM->cCpus))
1908 { /* likely */ }
1909 else
1910 {
1911 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu %#x (cCpus=%#x)\n", idCpu, pGVM->cCpus);
1912 return VERR_INVALID_PARAMETER;
1913 }
1914
1915 if (RT_LIKELY( pGVM->enmVMState >= VMSTATE_CREATING
1916 && pGVM->enmVMState <= VMSTATE_TERMINATED
1917 && pGVM->pSession == pSession
1918 && pGVM->pSelf == pGVM))
1919 { /* likely */ }
1920 else
1921 {
1922 SUPR0Printf("vmmR0EntryExWorker: Invalid pGVM=%p:{.enmVMState=%d, .cCpus=%#x, .pSession=%p(==%p), .pSelf=%p(==%p)}! (op=%d)\n",
1923 pGVM, pGVM->enmVMState, pGVM->cCpus, pGVM->pSession, pSession, pGVM->pSelf, pGVM, enmOperation);
1924 return VERR_INVALID_POINTER;
1925 }
1926 }
1927 else if (RT_LIKELY(idCpu == NIL_VMCPUID))
1928 { /* likely */ }
1929 else
1930 {
1931 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu=%u\n", idCpu);
1932 return VERR_INVALID_PARAMETER;
1933 }
1934
1935 /*
1936 * SMAP fun.
1937 */
1938 VMM_CHECK_SMAP_SETUP();
1939 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
1940
1941 /*
1942 * Process the request.
1943 */
1944 int rc;
1945 switch (enmOperation)
1946 {
1947 /*
1948 * GVM requests
1949 */
1950 case VMMR0_DO_GVMM_CREATE_VM:
1951 if (pGVM == NULL && u64Arg == 0 && idCpu == NIL_VMCPUID)
1952 rc = GVMMR0CreateVMReq((PGVMMCREATEVMREQ)pReqHdr, pSession);
1953 else
1954 rc = VERR_INVALID_PARAMETER;
1955 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
1956 break;
1957
1958 case VMMR0_DO_GVMM_DESTROY_VM:
1959 if (pReqHdr == NULL && u64Arg == 0)
1960 rc = GVMMR0DestroyVM(pGVM);
1961 else
1962 rc = VERR_INVALID_PARAMETER;
1963 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
1964 break;
1965
1966 case VMMR0_DO_GVMM_REGISTER_VMCPU:
1967 if (pGVM != NULL)
1968 rc = GVMMR0RegisterVCpu(pGVM, idCpu);
1969 else
1970 rc = VERR_INVALID_PARAMETER;
1971 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1972 break;
1973
1974 case VMMR0_DO_GVMM_DEREGISTER_VMCPU:
1975 if (pGVM != NULL)
1976 rc = GVMMR0DeregisterVCpu(pGVM, idCpu);
1977 else
1978 rc = VERR_INVALID_PARAMETER;
1979 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1980 break;
1981
1982 case VMMR0_DO_GVMM_SCHED_HALT:
1983 if (pReqHdr)
1984 return VERR_INVALID_PARAMETER;
1985 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1986 rc = GVMMR0SchedHaltReq(pGVM, idCpu, u64Arg);
1987 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1988 break;
1989
1990 case VMMR0_DO_GVMM_SCHED_WAKE_UP:
1991 if (pReqHdr || u64Arg)
1992 return VERR_INVALID_PARAMETER;
1993 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1994 rc = GVMMR0SchedWakeUp(pGVM, idCpu);
1995 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1996 break;
1997
1998 case VMMR0_DO_GVMM_SCHED_POKE:
1999 if (pReqHdr || u64Arg)
2000 return VERR_INVALID_PARAMETER;
2001 rc = GVMMR0SchedPoke(pGVM, idCpu);
2002 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2003 break;
2004
2005 case VMMR0_DO_GVMM_SCHED_WAKE_UP_AND_POKE_CPUS:
2006 if (u64Arg)
2007 return VERR_INVALID_PARAMETER;
2008 rc = GVMMR0SchedWakeUpAndPokeCpusReq(pGVM, (PGVMMSCHEDWAKEUPANDPOKECPUSREQ)pReqHdr);
2009 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2010 break;
2011
2012 case VMMR0_DO_GVMM_SCHED_POLL:
2013 if (pReqHdr || u64Arg > 1)
2014 return VERR_INVALID_PARAMETER;
2015 rc = GVMMR0SchedPoll(pGVM, idCpu, !!u64Arg);
2016 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2017 break;
2018
2019 case VMMR0_DO_GVMM_QUERY_STATISTICS:
2020 if (u64Arg)
2021 return VERR_INVALID_PARAMETER;
2022 rc = GVMMR0QueryStatisticsReq(pGVM, (PGVMMQUERYSTATISTICSSREQ)pReqHdr, pSession);
2023 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2024 break;
2025
2026 case VMMR0_DO_GVMM_RESET_STATISTICS:
2027 if (u64Arg)
2028 return VERR_INVALID_PARAMETER;
2029 rc = GVMMR0ResetStatisticsReq(pGVM, (PGVMMRESETSTATISTICSSREQ)pReqHdr, pSession);
2030 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2031 break;
2032
2033 /*
2034 * Initialize the R0 part of a VM instance.
2035 */
2036 case VMMR0_DO_VMMR0_INIT:
2037 rc = vmmR0InitVM(pGVM, RT_LODWORD(u64Arg), RT_HIDWORD(u64Arg));
2038 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2039 break;
2040
2041 /*
2042 * Does EMT specific ring-0 init.
2043 */
2044 case VMMR0_DO_VMMR0_INIT_EMT:
2045 rc = vmmR0InitVMEmt(pGVM, idCpu);
2046 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2047 break;
2048
2049 /*
2050 * Terminate the R0 part of a VM instance.
2051 */
2052 case VMMR0_DO_VMMR0_TERM:
2053 rc = VMMR0TermVM(pGVM, 0 /*idCpu*/);
2054 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2055 break;
2056
2057 /*
2058 * Update release or debug logger instances.
2059 */
2060 case VMMR0_DO_VMMR0_UPDATE_LOGGERS:
2061 if (idCpu == NIL_VMCPUID)
2062 return VERR_INVALID_CPU_ID;
2063 if (u64Arg <= 1 && pReqHdr != NULL)
2064 rc = vmmR0UpdateLoggers(pGVM, idCpu /*idCpu*/, (PVMMR0UPDATELOGGERSREQ)pReqHdr, u64Arg != 0);
2065 else
2066 return VERR_INVALID_PARAMETER;
2067 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2068 break;
2069
2070 /*
2071 * Attempt to enable hm mode and check the current setting.
2072 */
2073 case VMMR0_DO_HM_ENABLE:
2074 rc = HMR0EnableAllCpus(pGVM);
2075 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2076 break;
2077
2078 /*
2079 * Setup the hardware accelerated session.
2080 */
2081 case VMMR0_DO_HM_SETUP_VM:
2082 rc = HMR0SetupVM(pGVM);
2083 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2084 break;
2085
2086 /*
2087 * PGM wrappers.
2088 */
2089 case VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES:
2090 if (idCpu == NIL_VMCPUID)
2091 return VERR_INVALID_CPU_ID;
2092 rc = PGMR0PhysAllocateHandyPages(pGVM, idCpu);
2093 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2094 break;
2095
2096 case VMMR0_DO_PGM_FLUSH_HANDY_PAGES:
2097 if (idCpu == NIL_VMCPUID)
2098 return VERR_INVALID_CPU_ID;
2099 rc = PGMR0PhysFlushHandyPages(pGVM, idCpu);
2100 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2101 break;
2102
2103 case VMMR0_DO_PGM_ALLOCATE_LARGE_HANDY_PAGE:
2104 if (idCpu == NIL_VMCPUID)
2105 return VERR_INVALID_CPU_ID;
2106 rc = PGMR0PhysAllocateLargeHandyPage(pGVM, idCpu);
2107 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2108 break;
2109
2110 case VMMR0_DO_PGM_PHYS_SETUP_IOMMU:
2111 if (idCpu != 0)
2112 return VERR_INVALID_CPU_ID;
2113 rc = PGMR0PhysSetupIoMmu(pGVM);
2114 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2115 break;
2116
2117 case VMMR0_DO_PGM_POOL_GROW:
2118 if (idCpu == NIL_VMCPUID)
2119 return VERR_INVALID_CPU_ID;
2120 rc = PGMR0PoolGrow(pGVM);
2121 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2122 break;
2123
2124 /*
2125 * GMM wrappers.
2126 */
2127 case VMMR0_DO_GMM_INITIAL_RESERVATION:
2128 if (u64Arg)
2129 return VERR_INVALID_PARAMETER;
2130 rc = GMMR0InitialReservationReq(pGVM, idCpu, (PGMMINITIALRESERVATIONREQ)pReqHdr);
2131 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2132 break;
2133
2134 case VMMR0_DO_GMM_UPDATE_RESERVATION:
2135 if (u64Arg)
2136 return VERR_INVALID_PARAMETER;
2137 rc = GMMR0UpdateReservationReq(pGVM, idCpu, (PGMMUPDATERESERVATIONREQ)pReqHdr);
2138 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2139 break;
2140
2141 case VMMR0_DO_GMM_ALLOCATE_PAGES:
2142 if (u64Arg)
2143 return VERR_INVALID_PARAMETER;
2144 rc = GMMR0AllocatePagesReq(pGVM, idCpu, (PGMMALLOCATEPAGESREQ)pReqHdr);
2145 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2146 break;
2147
2148 case VMMR0_DO_GMM_FREE_PAGES:
2149 if (u64Arg)
2150 return VERR_INVALID_PARAMETER;
2151 rc = GMMR0FreePagesReq(pGVM, idCpu, (PGMMFREEPAGESREQ)pReqHdr);
2152 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2153 break;
2154
2155 case VMMR0_DO_GMM_FREE_LARGE_PAGE:
2156 if (u64Arg)
2157 return VERR_INVALID_PARAMETER;
2158 rc = GMMR0FreeLargePageReq(pGVM, idCpu, (PGMMFREELARGEPAGEREQ)pReqHdr);
2159 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2160 break;
2161
2162 case VMMR0_DO_GMM_QUERY_HYPERVISOR_MEM_STATS:
2163 if (u64Arg)
2164 return VERR_INVALID_PARAMETER;
2165 rc = GMMR0QueryHypervisorMemoryStatsReq((PGMMMEMSTATSREQ)pReqHdr);
2166 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2167 break;
2168
2169 case VMMR0_DO_GMM_QUERY_MEM_STATS:
2170 if (idCpu == NIL_VMCPUID)
2171 return VERR_INVALID_CPU_ID;
2172 if (u64Arg)
2173 return VERR_INVALID_PARAMETER;
2174 rc = GMMR0QueryMemoryStatsReq(pGVM, idCpu, (PGMMMEMSTATSREQ)pReqHdr);
2175 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2176 break;
2177
2178 case VMMR0_DO_GMM_BALLOONED_PAGES:
2179 if (u64Arg)
2180 return VERR_INVALID_PARAMETER;
2181 rc = GMMR0BalloonedPagesReq(pGVM, idCpu, (PGMMBALLOONEDPAGESREQ)pReqHdr);
2182 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2183 break;
2184
2185 case VMMR0_DO_GMM_MAP_UNMAP_CHUNK:
2186 if (u64Arg)
2187 return VERR_INVALID_PARAMETER;
2188 rc = GMMR0MapUnmapChunkReq(pGVM, (PGMMMAPUNMAPCHUNKREQ)pReqHdr);
2189 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2190 break;
2191
2192 case VMMR0_DO_GMM_SEED_CHUNK:
2193 if (pReqHdr)
2194 return VERR_INVALID_PARAMETER;
2195 rc = GMMR0SeedChunk(pGVM, idCpu, (RTR3PTR)u64Arg);
2196 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2197 break;
2198
2199 case VMMR0_DO_GMM_REGISTER_SHARED_MODULE:
2200 if (idCpu == NIL_VMCPUID)
2201 return VERR_INVALID_CPU_ID;
2202 if (u64Arg)
2203 return VERR_INVALID_PARAMETER;
2204 rc = GMMR0RegisterSharedModuleReq(pGVM, idCpu, (PGMMREGISTERSHAREDMODULEREQ)pReqHdr);
2205 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2206 break;
2207
2208 case VMMR0_DO_GMM_UNREGISTER_SHARED_MODULE:
2209 if (idCpu == NIL_VMCPUID)
2210 return VERR_INVALID_CPU_ID;
2211 if (u64Arg)
2212 return VERR_INVALID_PARAMETER;
2213 rc = GMMR0UnregisterSharedModuleReq(pGVM, idCpu, (PGMMUNREGISTERSHAREDMODULEREQ)pReqHdr);
2214 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2215 break;
2216
2217 case VMMR0_DO_GMM_RESET_SHARED_MODULES:
2218 if (idCpu == NIL_VMCPUID)
2219 return VERR_INVALID_CPU_ID;
2220 if ( u64Arg
2221 || pReqHdr)
2222 return VERR_INVALID_PARAMETER;
2223 rc = GMMR0ResetSharedModules(pGVM, idCpu);
2224 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2225 break;
2226
2227#ifdef VBOX_WITH_PAGE_SHARING
2228 case VMMR0_DO_GMM_CHECK_SHARED_MODULES:
2229 {
2230 if (idCpu == NIL_VMCPUID)
2231 return VERR_INVALID_CPU_ID;
2232 if ( u64Arg
2233 || pReqHdr)
2234 return VERR_INVALID_PARAMETER;
2235 rc = GMMR0CheckSharedModules(pGVM, idCpu);
2236 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2237 break;
2238 }
2239#endif
2240
2241#if defined(VBOX_STRICT) && HC_ARCH_BITS == 64
2242 case VMMR0_DO_GMM_FIND_DUPLICATE_PAGE:
2243 if (u64Arg)
2244 return VERR_INVALID_PARAMETER;
2245 rc = GMMR0FindDuplicatePageReq(pGVM, (PGMMFINDDUPLICATEPAGEREQ)pReqHdr);
2246 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2247 break;
2248#endif
2249
2250 case VMMR0_DO_GMM_QUERY_STATISTICS:
2251 if (u64Arg)
2252 return VERR_INVALID_PARAMETER;
2253 rc = GMMR0QueryStatisticsReq(pGVM, (PGMMQUERYSTATISTICSSREQ)pReqHdr);
2254 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2255 break;
2256
2257 case VMMR0_DO_GMM_RESET_STATISTICS:
2258 if (u64Arg)
2259 return VERR_INVALID_PARAMETER;
2260 rc = GMMR0ResetStatisticsReq(pGVM, (PGMMRESETSTATISTICSSREQ)pReqHdr);
2261 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2262 break;
2263
2264 /*
2265 * A quick GCFGM mock-up.
2266 */
2267 /** @todo GCFGM with proper access control, ring-3 management interface and all that. */
2268 case VMMR0_DO_GCFGM_SET_VALUE:
2269 case VMMR0_DO_GCFGM_QUERY_VALUE:
2270 {
2271 if (pGVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
2272 return VERR_INVALID_PARAMETER;
2273 PGCFGMVALUEREQ pReq = (PGCFGMVALUEREQ)pReqHdr;
2274 if (pReq->Hdr.cbReq != sizeof(*pReq))
2275 return VERR_INVALID_PARAMETER;
2276 if (enmOperation == VMMR0_DO_GCFGM_SET_VALUE)
2277 {
2278 rc = GVMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
2279 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
2280 // rc = GMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
2281 }
2282 else
2283 {
2284 rc = GVMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
2285 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
2286 // rc = GMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
2287 }
2288 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2289 break;
2290 }
2291
2292 /*
2293 * PDM Wrappers.
2294 */
2295 case VMMR0_DO_PDM_DRIVER_CALL_REQ_HANDLER:
2296 {
2297 if (!pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
2298 return VERR_INVALID_PARAMETER;
2299 rc = PDMR0DriverCallReqHandler(pGVM, (PPDMDRIVERCALLREQHANDLERREQ)pReqHdr);
2300 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2301 break;
2302 }
2303
2304 case VMMR0_DO_PDM_DEVICE_CREATE:
2305 {
2306 if (!pReqHdr || u64Arg || idCpu != 0)
2307 return VERR_INVALID_PARAMETER;
2308 rc = PDMR0DeviceCreateReqHandler(pGVM, (PPDMDEVICECREATEREQ)pReqHdr);
2309 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2310 break;
2311 }
2312
2313 case VMMR0_DO_PDM_DEVICE_GEN_CALL:
2314 {
2315 if (!pReqHdr || u64Arg)
2316 return VERR_INVALID_PARAMETER;
2317 rc = PDMR0DeviceGenCallReqHandler(pGVM, (PPDMDEVICEGENCALLREQ)pReqHdr, idCpu);
2318 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2319 break;
2320 }
2321
2322 /** @todo Remove the once all devices has been converted to new style! @bugref{9218} */
2323 case VMMR0_DO_PDM_DEVICE_COMPAT_SET_CRITSECT:
2324 {
2325 if (!pReqHdr || u64Arg || idCpu != 0)
2326 return VERR_INVALID_PARAMETER;
2327 rc = PDMR0DeviceCompatSetCritSectReqHandler(pGVM, (PPDMDEVICECOMPATSETCRITSECTREQ)pReqHdr);
2328 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2329 break;
2330 }
2331
2332 /*
2333 * Requests to the internal networking service.
2334 */
2335 case VMMR0_DO_INTNET_OPEN:
2336 {
2337 PINTNETOPENREQ pReq = (PINTNETOPENREQ)pReqHdr;
2338 if (u64Arg || !pReq || !vmmR0IsValidSession(pGVM, pReq->pSession, pSession) || idCpu != NIL_VMCPUID)
2339 return VERR_INVALID_PARAMETER;
2340 rc = IntNetR0OpenReq(pSession, pReq);
2341 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2342 break;
2343 }
2344
2345 case VMMR0_DO_INTNET_IF_CLOSE:
2346 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFCLOSEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2347 return VERR_INVALID_PARAMETER;
2348 rc = IntNetR0IfCloseReq(pSession, (PINTNETIFCLOSEREQ)pReqHdr);
2349 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2350 break;
2351
2352
2353 case VMMR0_DO_INTNET_IF_GET_BUFFER_PTRS:
2354 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFGETBUFFERPTRSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2355 return VERR_INVALID_PARAMETER;
2356 rc = IntNetR0IfGetBufferPtrsReq(pSession, (PINTNETIFGETBUFFERPTRSREQ)pReqHdr);
2357 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2358 break;
2359
2360 case VMMR0_DO_INTNET_IF_SET_PROMISCUOUS_MODE:
2361 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2362 return VERR_INVALID_PARAMETER;
2363 rc = IntNetR0IfSetPromiscuousModeReq(pSession, (PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr);
2364 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2365 break;
2366
2367 case VMMR0_DO_INTNET_IF_SET_MAC_ADDRESS:
2368 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFSETMACADDRESSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2369 return VERR_INVALID_PARAMETER;
2370 rc = IntNetR0IfSetMacAddressReq(pSession, (PINTNETIFSETMACADDRESSREQ)pReqHdr);
2371 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2372 break;
2373
2374 case VMMR0_DO_INTNET_IF_SET_ACTIVE:
2375 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFSETACTIVEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2376 return VERR_INVALID_PARAMETER;
2377 rc = IntNetR0IfSetActiveReq(pSession, (PINTNETIFSETACTIVEREQ)pReqHdr);
2378 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2379 break;
2380
2381 case VMMR0_DO_INTNET_IF_SEND:
2382 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2383 return VERR_INVALID_PARAMETER;
2384 rc = IntNetR0IfSendReq(pSession, (PINTNETIFSENDREQ)pReqHdr);
2385 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2386 break;
2387
2388 case VMMR0_DO_INTNET_IF_WAIT:
2389 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2390 return VERR_INVALID_PARAMETER;
2391 rc = IntNetR0IfWaitReq(pSession, (PINTNETIFWAITREQ)pReqHdr);
2392 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2393 break;
2394
2395 case VMMR0_DO_INTNET_IF_ABORT_WAIT:
2396 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2397 return VERR_INVALID_PARAMETER;
2398 rc = IntNetR0IfAbortWaitReq(pSession, (PINTNETIFABORTWAITREQ)pReqHdr);
2399 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2400 break;
2401
2402#if 0 //def VBOX_WITH_PCI_PASSTHROUGH
2403 /*
2404 * Requests to host PCI driver service.
2405 */
2406 case VMMR0_DO_PCIRAW_REQ:
2407 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PPCIRAWSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2408 return VERR_INVALID_PARAMETER;
2409 rc = PciRawR0ProcessReq(pGVM, pSession, (PPCIRAWSENDREQ)pReqHdr);
2410 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2411 break;
2412#endif
2413
2414 /*
2415 * NEM requests.
2416 */
2417#ifdef VBOX_WITH_NEM_R0
2418# if defined(RT_ARCH_AMD64) && defined(RT_OS_WINDOWS)
2419 case VMMR0_DO_NEM_INIT_VM:
2420 if (u64Arg || pReqHdr || idCpu != 0)
2421 return VERR_INVALID_PARAMETER;
2422 rc = NEMR0InitVM(pGVM);
2423 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2424 break;
2425
2426 case VMMR0_DO_NEM_INIT_VM_PART_2:
2427 if (u64Arg || pReqHdr || idCpu != 0)
2428 return VERR_INVALID_PARAMETER;
2429 rc = NEMR0InitVMPart2(pGVM);
2430 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2431 break;
2432
2433 case VMMR0_DO_NEM_MAP_PAGES:
2434 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2435 return VERR_INVALID_PARAMETER;
2436 rc = NEMR0MapPages(pGVM, idCpu);
2437 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2438 break;
2439
2440 case VMMR0_DO_NEM_UNMAP_PAGES:
2441 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2442 return VERR_INVALID_PARAMETER;
2443 rc = NEMR0UnmapPages(pGVM, idCpu);
2444 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2445 break;
2446
2447 case VMMR0_DO_NEM_EXPORT_STATE:
2448 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2449 return VERR_INVALID_PARAMETER;
2450 rc = NEMR0ExportState(pGVM, idCpu);
2451 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2452 break;
2453
2454 case VMMR0_DO_NEM_IMPORT_STATE:
2455 if (pReqHdr || idCpu == NIL_VMCPUID)
2456 return VERR_INVALID_PARAMETER;
2457 rc = NEMR0ImportState(pGVM, idCpu, u64Arg);
2458 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2459 break;
2460
2461 case VMMR0_DO_NEM_QUERY_CPU_TICK:
2462 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2463 return VERR_INVALID_PARAMETER;
2464 rc = NEMR0QueryCpuTick(pGVM, idCpu);
2465 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2466 break;
2467
2468 case VMMR0_DO_NEM_RESUME_CPU_TICK_ON_ALL:
2469 if (pReqHdr || idCpu == NIL_VMCPUID)
2470 return VERR_INVALID_PARAMETER;
2471 rc = NEMR0ResumeCpuTickOnAll(pGVM, idCpu, u64Arg);
2472 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2473 break;
2474
2475 case VMMR0_DO_NEM_UPDATE_STATISTICS:
2476 if (u64Arg || pReqHdr)
2477 return VERR_INVALID_PARAMETER;
2478 rc = NEMR0UpdateStatistics(pGVM, idCpu);
2479 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2480 break;
2481
2482# if 1 && defined(DEBUG_bird)
2483 case VMMR0_DO_NEM_EXPERIMENT:
2484 if (pReqHdr)
2485 return VERR_INVALID_PARAMETER;
2486 rc = NEMR0DoExperiment(pGVM, idCpu, u64Arg);
2487 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2488 break;
2489# endif
2490# endif
2491#endif
2492
2493 /*
2494 * IOM requests.
2495 */
2496 case VMMR0_DO_IOM_GROW_IO_PORTS:
2497 {
2498 if (pReqHdr || idCpu != 0)
2499 return VERR_INVALID_PARAMETER;
2500 rc = IOMR0IoPortGrowRegistrationTables(pGVM, u64Arg);
2501 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2502 break;
2503 }
2504
2505 case VMMR0_DO_IOM_GROW_IO_PORT_STATS:
2506 {
2507 if (pReqHdr || idCpu != 0)
2508 return VERR_INVALID_PARAMETER;
2509 rc = IOMR0IoPortGrowStatisticsTable(pGVM, u64Arg);
2510 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2511 break;
2512 }
2513
2514 case VMMR0_DO_IOM_GROW_MMIO_REGS:
2515 {
2516 if (pReqHdr || idCpu != 0)
2517 return VERR_INVALID_PARAMETER;
2518 rc = IOMR0MmioGrowRegistrationTables(pGVM, u64Arg);
2519 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2520 break;
2521 }
2522
2523 case VMMR0_DO_IOM_GROW_MMIO_STATS:
2524 {
2525 if (pReqHdr || idCpu != 0)
2526 return VERR_INVALID_PARAMETER;
2527 rc = IOMR0MmioGrowStatisticsTable(pGVM, u64Arg);
2528 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2529 break;
2530 }
2531
2532 case VMMR0_DO_IOM_SYNC_STATS_INDICES:
2533 {
2534 if (pReqHdr || idCpu != 0)
2535 return VERR_INVALID_PARAMETER;
2536 rc = IOMR0IoPortSyncStatisticsIndices(pGVM);
2537 if (RT_SUCCESS(rc))
2538 rc = IOMR0MmioSyncStatisticsIndices(pGVM);
2539 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2540 break;
2541 }
2542
2543 /*
2544 * DBGF requests.
2545 */
2546#ifdef VBOX_WITH_DBGF_TRACING
2547 case VMMR0_DO_DBGF_TRACER_CREATE:
2548 {
2549 if (!pReqHdr || u64Arg || idCpu != 0)
2550 return VERR_INVALID_PARAMETER;
2551 rc = DBGFR0TracerCreateReqHandler(pGVM, (PDBGFTRACERCREATEREQ)pReqHdr);
2552 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2553 break;
2554 }
2555
2556 case VMMR0_DO_DBGF_TRACER_CALL_REQ_HANDLER:
2557 {
2558 if (!pReqHdr || u64Arg)
2559 return VERR_INVALID_PARAMETER;
2560# if 0 /** @todo */
2561 rc = DBGFR0TracerGenCallReqHandler(pGVM, (PDBGFTRACERGENCALLREQ)pReqHdr, idCpu);
2562# else
2563 rc = VERR_NOT_IMPLEMENTED;
2564# endif
2565 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2566 break;
2567 }
2568#endif
2569
2570 case VMMR0_DO_DBGF_BP_INIT:
2571 {
2572 if (!pReqHdr || u64Arg || idCpu != 0)
2573 return VERR_INVALID_PARAMETER;
2574 rc = DBGFR0BpInitReqHandler(pGVM, (PDBGFBPINITREQ)pReqHdr);
2575 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2576 break;
2577 }
2578
2579 case VMMR0_DO_DBGF_BP_CHUNK_ALLOC:
2580 {
2581 if (!pReqHdr || u64Arg || idCpu != 0)
2582 return VERR_INVALID_PARAMETER;
2583 rc = DBGFR0BpChunkAllocReqHandler(pGVM, (PDBGFBPCHUNKALLOCREQ)pReqHdr);
2584 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2585 break;
2586 }
2587
2588 case VMMR0_DO_DBGF_BP_L2_TBL_CHUNK_ALLOC:
2589 {
2590 if (!pReqHdr || u64Arg || idCpu != 0)
2591 return VERR_INVALID_PARAMETER;
2592 rc = DBGFR0BpL2TblChunkAllocReqHandler(pGVM, (PDBGFBPL2TBLCHUNKALLOCREQ)pReqHdr);
2593 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2594 break;
2595 }
2596
2597 case VMMR0_DO_DBGF_BP_OWNER_INIT:
2598 {
2599 if (!pReqHdr || u64Arg || idCpu != 0)
2600 return VERR_INVALID_PARAMETER;
2601 rc = DBGFR0BpOwnerInitReqHandler(pGVM, (PDBGFBPOWNERINITREQ)pReqHdr);
2602 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2603 break;
2604 }
2605
2606 case VMMR0_DO_DBGF_BP_PORTIO_INIT:
2607 {
2608 if (!pReqHdr || u64Arg || idCpu != 0)
2609 return VERR_INVALID_PARAMETER;
2610 rc = DBGFR0BpPortIoInitReqHandler(pGVM, (PDBGFBPINITREQ)pReqHdr);
2611 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2612 break;
2613 }
2614
2615
2616 /*
2617 * TM requests.
2618 */
2619 case VMMR0_DO_TM_GROW_TIMER_QUEUE:
2620 {
2621 if (pReqHdr || idCpu == NIL_VMCPUID)
2622 return VERR_INVALID_PARAMETER;
2623 rc = TMR0TimerQueueGrow(pGVM, RT_HI_U32(u64Arg), RT_LO_U32(u64Arg));
2624 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2625 break;
2626 }
2627
2628 /*
2629 * For profiling.
2630 */
2631 case VMMR0_DO_NOP:
2632 case VMMR0_DO_SLOW_NOP:
2633 return VINF_SUCCESS;
2634
2635 /*
2636 * For testing Ring-0 APIs invoked in this environment.
2637 */
2638 case VMMR0_DO_TESTS:
2639 /** @todo make new test */
2640 return VINF_SUCCESS;
2641
2642 default:
2643 /*
2644 * We're returning VERR_NOT_SUPPORT here so we've got something else
2645 * than -1 which the interrupt gate glue code might return.
2646 */
2647 Log(("operation %#x is not supported\n", enmOperation));
2648 return VERR_NOT_SUPPORTED;
2649 }
2650 return rc;
2651}
2652
2653
2654/**
2655 * This is just a longjmp wrapper function for VMMR0EntryEx calls.
2656 *
2657 * @returns VBox status code.
2658 * @param pvArgs The argument package
2659 */
2660static DECLCALLBACK(int) vmmR0EntryExWrapper(void *pvArgs)
2661{
2662 PGVMCPU pGVCpu = (PGVMCPU)pvArgs;
2663 return vmmR0EntryExWorker(pGVCpu->vmmr0.s.pGVM,
2664 pGVCpu->vmmr0.s.idCpu,
2665 pGVCpu->vmmr0.s.enmOperation,
2666 pGVCpu->vmmr0.s.pReq,
2667 pGVCpu->vmmr0.s.u64Arg,
2668 pGVCpu->vmmr0.s.pSession);
2669}
2670
2671
2672/**
2673 * The Ring 0 entry point, called by the support library (SUP).
2674 *
2675 * @returns VBox status code.
2676 * @param pGVM The global (ring-0) VM structure.
2677 * @param pVM The cross context VM structure.
2678 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
2679 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
2680 * @param enmOperation Which operation to execute.
2681 * @param pReq Pointer to the SUPVMMR0REQHDR packet. Optional.
2682 * @param u64Arg Some simple constant argument.
2683 * @param pSession The session of the caller.
2684 * @remarks Assume called with interrupts _enabled_.
2685 */
2686VMMR0DECL(int) VMMR0EntryEx(PGVM pGVM, PVMCC pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation,
2687 PSUPVMMR0REQHDR pReq, uint64_t u64Arg, PSUPDRVSESSION pSession)
2688{
2689 /*
2690 * Requests that should only happen on the EMT thread will be
2691 * wrapped in a setjmp so we can assert without causing trouble.
2692 */
2693 if ( pVM != NULL
2694 && pGVM != NULL
2695 && pVM == pGVM /** @todo drop pVM or pGVM */
2696 && idCpu < pGVM->cCpus
2697 && pGVM->pSession == pSession
2698 && pGVM->pSelf == pVM)
2699 {
2700 switch (enmOperation)
2701 {
2702 /* These might/will be called before VMMR3Init. */
2703 case VMMR0_DO_GMM_INITIAL_RESERVATION:
2704 case VMMR0_DO_GMM_UPDATE_RESERVATION:
2705 case VMMR0_DO_GMM_ALLOCATE_PAGES:
2706 case VMMR0_DO_GMM_FREE_PAGES:
2707 case VMMR0_DO_GMM_BALLOONED_PAGES:
2708 /* On the mac we might not have a valid jmp buf, so check these as well. */
2709 case VMMR0_DO_VMMR0_INIT:
2710 case VMMR0_DO_VMMR0_TERM:
2711
2712 case VMMR0_DO_PDM_DEVICE_CREATE:
2713 case VMMR0_DO_PDM_DEVICE_GEN_CALL:
2714 case VMMR0_DO_IOM_GROW_IO_PORTS:
2715 case VMMR0_DO_IOM_GROW_IO_PORT_STATS:
2716 case VMMR0_DO_DBGF_BP_INIT:
2717 case VMMR0_DO_DBGF_BP_CHUNK_ALLOC:
2718 case VMMR0_DO_DBGF_BP_L2_TBL_CHUNK_ALLOC:
2719 {
2720 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2721 RTNATIVETHREAD hNativeThread = RTThreadNativeSelf();
2722 if (RT_LIKELY( pGVCpu->hEMT == hNativeThread
2723 && pGVCpu->hNativeThreadR0 == hNativeThread))
2724 {
2725 if (!pGVCpu->vmm.s.CallRing3JmpBufR0.pvSavedStack)
2726 break;
2727
2728 pGVCpu->vmmr0.s.pGVM = pGVM;
2729 pGVCpu->vmmr0.s.idCpu = idCpu;
2730 pGVCpu->vmmr0.s.enmOperation = enmOperation;
2731 pGVCpu->vmmr0.s.pReq = pReq;
2732 pGVCpu->vmmr0.s.u64Arg = u64Arg;
2733 pGVCpu->vmmr0.s.pSession = pSession;
2734 return vmmR0CallRing3SetJmpEx(&pGVCpu->vmm.s.CallRing3JmpBufR0, vmmR0EntryExWrapper, pGVCpu,
2735 ((uintptr_t)u64Arg << 16) | (uintptr_t)enmOperation);
2736 }
2737 return VERR_VM_THREAD_NOT_EMT;
2738 }
2739
2740 default:
2741 case VMMR0_DO_PGM_POOL_GROW:
2742 break;
2743 }
2744 }
2745 return vmmR0EntryExWorker(pGVM, idCpu, enmOperation, pReq, u64Arg, pSession);
2746}
2747
2748
2749/**
2750 * Checks whether we've armed the ring-0 long jump machinery.
2751 *
2752 * @returns @c true / @c false
2753 * @param pVCpu The cross context virtual CPU structure.
2754 * @thread EMT
2755 * @sa VMMIsLongJumpArmed
2756 */
2757VMMR0_INT_DECL(bool) VMMR0IsLongJumpArmed(PVMCPUCC pVCpu)
2758{
2759#ifdef RT_ARCH_X86
2760 return pVCpu->vmm.s.CallRing3JmpBufR0.eip
2761 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2762#else
2763 return pVCpu->vmm.s.CallRing3JmpBufR0.rip
2764 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2765#endif
2766}
2767
2768
2769/**
2770 * Checks whether we've done a ring-3 long jump.
2771 *
2772 * @returns @c true / @c false
2773 * @param pVCpu The cross context virtual CPU structure.
2774 * @thread EMT
2775 */
2776VMMR0_INT_DECL(bool) VMMR0IsInRing3LongJump(PVMCPUCC pVCpu)
2777{
2778 return pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2779}
2780
2781
2782/**
2783 * Locking helper that deals with HM context and checks if the thread can block.
2784 *
2785 * @returns VINF_SUCCESS if we can block. Returns @a rcBusy or
2786 * VERR_VMM_CANNOT_BLOCK if not able to block.
2787 * @param pVCpu The cross context virtual CPU structure of the calling
2788 * thread.
2789 * @param rcBusy What to return in case of a blocking problem. Will IPE
2790 * if VINF_SUCCESS and we cannot block.
2791 * @param pszCaller The caller (for logging problems).
2792 * @param pvLock The lock address (for logging problems).
2793 * @param pCtx Where to return context info for the resume call.
2794 * @thread EMT(pVCpu)
2795 */
2796VMMR0_INT_DECL(int) VMMR0EmtPrepareToBlock(PVMCPUCC pVCpu, int rcBusy, const char *pszCaller, void *pvLock,
2797 PVMMR0EMTBLOCKCTX pCtx)
2798{
2799 const char *pszMsg;
2800
2801 /*
2802 * Check that we are allowed to block.
2803 */
2804 if (RT_LIKELY(VMMRZCallRing3IsEnabled(pVCpu)))
2805 {
2806 /*
2807 * Are we in HM context and w/o a context hook? If so work the context hook.
2808 */
2809 if (pVCpu->idHostCpu != NIL_RTCPUID)
2810 {
2811 Assert(pVCpu->iHostCpuSet != UINT32_MAX);
2812 Assert(pVCpu->vmmr0.s.fInHmContext);
2813
2814 if (pVCpu->vmmr0.s.hCtxHook == NIL_RTTHREADCTXHOOK)
2815 {
2816 vmmR0ThreadCtxCallback(RTTHREADCTXEVENT_OUT, pVCpu);
2817 if (pVCpu->vmmr0.s.pPreemptState)
2818 RTThreadPreemptRestore(pVCpu->vmmr0.s.pPreemptState);
2819
2820 pCtx->uMagic = VMMR0EMTBLOCKCTX_MAGIC;
2821 pCtx->fWasInHmContext = true;
2822 return VINF_SUCCESS;
2823 }
2824 }
2825
2826 if (RT_LIKELY(!pVCpu->vmmr0.s.pPreemptState))
2827 {
2828 /*
2829 * Not in HM context or we've got hooks, so just check that preemption
2830 * is enabled.
2831 */
2832 if (RT_LIKELY(RTThreadPreemptIsEnabled(NIL_RTTHREAD)))
2833 {
2834 pCtx->uMagic = VMMR0EMTBLOCKCTX_MAGIC;
2835 pCtx->fWasInHmContext = false;
2836 return VINF_SUCCESS;
2837 }
2838 pszMsg = "Preemption is disabled!";
2839 }
2840 else
2841 pszMsg = "Preemption state w/o HM state!";
2842 }
2843 else
2844 pszMsg = "Ring-3 calls are disabled!";
2845
2846 static uint32_t volatile s_cWarnings = 0;
2847 if (++s_cWarnings < 50)
2848 SUPR0Printf("VMMR0EmtPrepareToBlock: %s pvLock=%p pszCaller=%s rcBusy=%p\n", pszMsg, pvLock, pszCaller, rcBusy);
2849 pCtx->uMagic = VMMR0EMTBLOCKCTX_MAGIC_DEAD;
2850 pCtx->fWasInHmContext = false;
2851 return rcBusy != VINF_SUCCESS ? rcBusy : VERR_VMM_CANNOT_BLOCK;
2852}
2853
2854
2855/**
2856 * Counterpart to VMMR0EmtPrepareToBlock.
2857 *
2858 * @param pVCpu The cross context virtual CPU structure of the calling
2859 * thread.
2860 * @param pCtx The context structure used with VMMR0EmtPrepareToBlock.
2861 * @thread EMT(pVCpu)
2862 */
2863VMMR0_INT_DECL(void) VMMR0EmtResumeAfterBlocking(PVMCPUCC pVCpu, PVMMR0EMTBLOCKCTX pCtx)
2864{
2865 AssertReturnVoid(pCtx->uMagic == VMMR0EMTBLOCKCTX_MAGIC);
2866 if (pCtx->fWasInHmContext)
2867 {
2868 if (pVCpu->vmmr0.s.pPreemptState)
2869 RTThreadPreemptDisable(pVCpu->vmmr0.s.pPreemptState);
2870
2871 pCtx->fWasInHmContext = false;
2872 vmmR0ThreadCtxCallback(RTTHREADCTXEVENT_IN, pVCpu);
2873 }
2874 pCtx->uMagic = VMMR0EMTBLOCKCTX_MAGIC_DEAD;
2875}
2876
2877
2878/**
2879 * Updates the EMT loggers for the VM.
2880 *
2881 * @returns VBox status code.
2882 * @param pGVM The global (ring-0) VM structure.
2883 * @param idCpu The ID of the calling EMT.
2884 * @param pReq The request data.
2885 * @param fRelease Which logger set to update.
2886 * @thread EMT(idCpu)
2887 */
2888static int vmmR0UpdateLoggers(PGVM pGVM, VMCPUID idCpu, PVMMR0UPDATELOGGERSREQ pReq, bool fRelease)
2889{
2890 /*
2891 * Check sanity. First we require EMT to be calling us.
2892 */
2893 AssertReturn(idCpu < pGVM->cCpus, VERR_INVALID_CPU_ID);
2894 AssertReturn(pGVM->aCpus[idCpu].hEMT == RTThreadNativeSelf(), VERR_INVALID_CPU_ID);
2895
2896 AssertReturn(pReq->Hdr.cbReq >= RT_UOFFSETOF_DYN(VMMR0UPDATELOGGERSREQ, afGroups[0]), VERR_INVALID_PARAMETER);
2897 AssertReturn(pReq->cGroups < _8K, VERR_INVALID_PARAMETER);
2898 AssertReturn(pReq->Hdr.cbReq == RT_UOFFSETOF_DYN(VMMR0UPDATELOGGERSREQ, afGroups[pReq->cGroups]), VERR_INVALID_PARAMETER);
2899
2900 /*
2901 * Adjust flags.
2902 */
2903 /* Always buffered: */
2904 pReq->fFlags |= RTLOGFLAGS_BUFFERED;
2905 /* These doesn't make sense at present: */
2906 pReq->fFlags &= ~(RTLOGFLAGS_FLUSH | RTLOGFLAGS_WRITE_THROUGH);
2907 /* We've traditionally skipped the group restrictions. */
2908 pReq->fFlags &= ~RTLOGFLAGS_RESTRICT_GROUPS;
2909
2910 /*
2911 * Do the updating.
2912 */
2913 int rc = VINF_SUCCESS;
2914 for (idCpu = 0; idCpu < pGVM->cCpus; idCpu++)
2915 {
2916 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2917 PRTLOGGER pLogger = fRelease ? pGVCpu->vmmr0.s.RelLogger.pLogger : pGVCpu->vmmr0.s.Logger.pLogger;
2918 if (pLogger)
2919 rc = RTLogBulkUpdate(pLogger, pReq->fFlags, pReq->uGroupCrc32, pReq->cGroups, pReq->afGroups);
2920 }
2921
2922 return rc;
2923}
2924
2925
2926/**
2927 * Common worker for vmmR0LogFlush and vmmR0LogRelFlush.
2928 */
2929static bool vmmR0LoggerFlushCommon(PRTLOGGER pLogger, PRTLOGBUFFERDESC pBufDesc, bool fRelease)
2930{
2931 RT_NOREF(pBufDesc, fRelease);
2932
2933 /*
2934 * Convert the pLogger into a GVMCPU handle and 'call' back to Ring-3.
2935 * (This is a bit paranoid code.)
2936 */
2937 if (RT_VALID_PTR(pLogger))
2938 {
2939 if ( pLogger->u32Magic == RTLOGGER_MAGIC
2940 && (pLogger->u32UserValue1 & VMMR0_LOGGER_FLAGS_MAGIC_MASK) == VMMR0_LOGGER_FLAGS_MAGIC_VALUE
2941 && pLogger->u64UserValue2 == pLogger->u64UserValue3)
2942 {
2943 if (!(pLogger->u32UserValue1 & VMMR0_LOGGER_FLAGS_FLUSHING_DISABLED))
2944 {
2945 PGVMCPU const pGVCpu = (PGVMCPU)(uintptr_t)pLogger->u64UserValue2;
2946 if ( RT_VALID_PTR(pGVCpu)
2947 && ((uintptr_t)pGVCpu & PAGE_OFFSET_MASK) == 0)
2948 {
2949 RTNATIVETHREAD const hNativeSelf = RTThreadNativeSelf();
2950 if ( hNativeSelf == pGVCpu->hEMT
2951 && RT_VALID_PTR(pGVCpu->pGVM))
2952 {
2953 /*
2954 * Check that the jump buffer is armed.
2955 */
2956#ifdef RT_ARCH_X86
2957 if ( pGVCpu->vmm.s.CallRing3JmpBufR0.eip != 0
2958 && !pGVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2959#else
2960 if ( pGVCpu->vmm.s.CallRing3JmpBufR0.rip != 0
2961 && !pGVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2962#endif
2963 {
2964 VMMRZCallRing3(pGVCpu->pGVM, pGVCpu, VMMCALLRING3_VMM_LOGGER_FLUSH, fRelease);
2965 }
2966#ifdef DEBUG
2967 else SUPR0Printf("vmmR0LoggerFlush: Jump buffer isn't armed!\n");
2968#endif
2969 }
2970#ifdef DEBUG
2971 else SUPR0Printf("vmmR0LoggerFlush: pLogger=%p pGVCpu=%p hEMT=%p hNativeSelf=%p!\n",
2972 pLogger, pGVCpu, pGVCpu->hEMT, hNativeSelf);
2973#endif
2974
2975 }
2976#ifdef DEBUG
2977 else SUPR0Printf("vmmR0LoggerFlush: pLogger=%p pGVCpu=%p!\n", pLogger, pGVCpu);
2978#endif
2979 }
2980 /* else: quiet */
2981 }
2982#ifdef DEBUG
2983 else SUPR0Printf("vmmR0LoggerFlush: pLogger=%p u32Magic=%#x u32UserValue1=%#x u64UserValue2=%#RX64 u64UserValue3=%#RX64!\n",
2984 pLogger, pLogger->u32Magic, pLogger->u32UserValue1, pLogger->u64UserValue2, pLogger->u64UserValue3);
2985#endif
2986 }
2987#ifdef DEBUG
2988 else SUPR0Printf("vmmR0LoggerFlush: pLogger=%p!\n", pLogger);
2989#endif
2990 return true;
2991}
2992
2993
2994/**
2995 * @callback_method_impl{FNRTLOGFLUSH, Release logger buffer flush callback.}
2996 */
2997static DECLCALLBACK(bool) vmmR0LogRelFlush(PRTLOGGER pLogger, PRTLOGBUFFERDESC pBufDesc)
2998{
2999 return vmmR0LoggerFlushCommon(pLogger, pBufDesc, true /*fRelease*/);
3000}
3001
3002
3003/**
3004 * @callback_method_impl{FNRTLOGFLUSH, Logger (debug) buffer flush callback.}
3005 */
3006static DECLCALLBACK(bool) vmmR0LogFlush(PRTLOGGER pLogger, PRTLOGBUFFERDESC pBufDesc)
3007{
3008#ifdef LOG_ENABLED
3009 return vmmR0LoggerFlushCommon(pLogger, pBufDesc, false /*fRelease*/);
3010#else
3011 RT_NOREF(pLogger, pBufDesc);
3012 return true;
3013#endif
3014}
3015
3016#ifdef LOG_ENABLED
3017
3018/**
3019 * Disables flushing of the ring-0 debug log.
3020 *
3021 * @param pVCpu The cross context virtual CPU structure.
3022 */
3023VMMR0_INT_DECL(void) VMMR0LogFlushDisable(PVMCPUCC pVCpu)
3024{
3025 pVCpu->vmmr0.s.fLogFlushingDisabled = true;
3026 if (pVCpu->vmmr0.s.Logger.pLogger)
3027 pVCpu->vmmr0.s.Logger.pLogger->u32UserValue1 |= VMMR0_LOGGER_FLAGS_FLUSHING_DISABLED;
3028 if (pVCpu->vmmr0.s.RelLogger.pLogger)
3029 pVCpu->vmmr0.s.RelLogger.pLogger->u32UserValue1 |= VMMR0_LOGGER_FLAGS_FLUSHING_DISABLED;
3030}
3031
3032
3033/**
3034 * Enables flushing of the ring-0 debug log.
3035 *
3036 * @param pVCpu The cross context virtual CPU structure.
3037 */
3038VMMR0_INT_DECL(void) VMMR0LogFlushEnable(PVMCPUCC pVCpu)
3039{
3040 pVCpu->vmmr0.s.fLogFlushingDisabled = false;
3041 if (pVCpu->vmmr0.s.Logger.pLogger)
3042 pVCpu->vmmr0.s.Logger.pLogger->u32UserValue1 &= ~VMMR0_LOGGER_FLAGS_FLUSHING_DISABLED;
3043 if (pVCpu->vmmr0.s.RelLogger.pLogger)
3044 pVCpu->vmmr0.s.RelLogger.pLogger->u32UserValue1 &= ~VMMR0_LOGGER_FLAGS_FLUSHING_DISABLED;
3045}
3046
3047
3048/**
3049 * Checks if log flushing is disabled or not.
3050 *
3051 * @param pVCpu The cross context virtual CPU structure.
3052 */
3053VMMR0_INT_DECL(bool) VMMR0IsLogFlushDisabled(PVMCPUCC pVCpu)
3054{
3055 return pVCpu->vmmr0.s.fLogFlushingDisabled;
3056}
3057
3058#endif /* LOG_ENABLED */
3059
3060/*
3061 * Override RTLogDefaultInstanceEx so we can do logging from EMTs in ring-0.
3062 */
3063DECLEXPORT(PRTLOGGER) RTLogDefaultInstanceEx(uint32_t fFlagsAndGroup)
3064{
3065#ifdef LOG_ENABLED
3066 PGVMCPU pGVCpu = GVMMR0GetGVCpuByEMT(NIL_RTNATIVETHREAD);
3067 if (pGVCpu)
3068 {
3069 PRTLOGGER pLogger = pGVCpu->vmmr0.s.Logger.pLogger;
3070 if (RT_VALID_PTR(pLogger))
3071 {
3072 if ( pLogger->u64UserValue2 == (uintptr_t)pGVCpu
3073 && pLogger->u64UserValue3 == (uintptr_t)pGVCpu)
3074 {
3075 if (!(pGVCpu->vmmr0.s.fLogFlushingDisabled))
3076 return RTLogCheckGroupFlags(pLogger, fFlagsAndGroup);
3077 return NULL;
3078 }
3079 }
3080 }
3081#endif
3082 return SUPR0DefaultLogInstanceEx(fFlagsAndGroup);
3083}
3084
3085
3086/*
3087 * Override RTLogRelGetDefaultInstanceEx so we can do LogRel to VBox.log from EMTs in ring-0.
3088 */
3089DECLEXPORT(PRTLOGGER) RTLogRelGetDefaultInstanceEx(uint32_t fFlagsAndGroup)
3090{
3091 PGVMCPU pGVCpu = GVMMR0GetGVCpuByEMT(NIL_RTNATIVETHREAD);
3092 if (pGVCpu)
3093 {
3094 PRTLOGGER pLogger = pGVCpu->vmmr0.s.RelLogger.pLogger;
3095 if (RT_VALID_PTR(pLogger))
3096 {
3097 if ( pLogger->u64UserValue2 == (uintptr_t)pGVCpu
3098 && pLogger->u64UserValue3 == (uintptr_t)pGVCpu)
3099 {
3100 if (!(pGVCpu->vmmr0.s.fLogFlushingDisabled))
3101 return RTLogCheckGroupFlags(pLogger, fFlagsAndGroup);
3102 return NULL;
3103 }
3104 }
3105 }
3106 return SUPR0GetDefaultLogRelInstanceEx(fFlagsAndGroup);
3107}
3108
3109
3110/*
3111 * Jump back to ring-3 if we're the EMT and the longjmp is armed.
3112 *
3113 * @returns true if the breakpoint should be hit, false if it should be ignored.
3114 */
3115DECLEXPORT(bool) RTCALL RTAssertShouldPanic(void)
3116{
3117#if 0
3118 return true;
3119#else
3120 PVMCC pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
3121 if (pVM)
3122 {
3123 PVMCPUCC pVCpu = VMMGetCpu(pVM);
3124
3125 if (pVCpu)
3126 {
3127# ifdef RT_ARCH_X86
3128 if ( pVCpu->vmm.s.CallRing3JmpBufR0.eip
3129 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
3130# else
3131 if ( pVCpu->vmm.s.CallRing3JmpBufR0.rip
3132 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
3133# endif
3134 {
3135 int rc = VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VM_R0_ASSERTION, 0);
3136 return RT_FAILURE_NP(rc);
3137 }
3138 }
3139 }
3140# ifdef RT_OS_LINUX
3141 return true;
3142# else
3143 return false;
3144# endif
3145#endif
3146}
3147
3148
3149/*
3150 * Override this so we can push it up to ring-3.
3151 */
3152DECLEXPORT(void) RTCALL RTAssertMsg1Weak(const char *pszExpr, unsigned uLine, const char *pszFile, const char *pszFunction)
3153{
3154 /*
3155 * To the log.
3156 */
3157 LogAlways(("\n!!R0-Assertion Failed!!\n"
3158 "Expression: %s\n"
3159 "Location : %s(%d) %s\n",
3160 pszExpr, pszFile, uLine, pszFunction));
3161
3162 /*
3163 * To the global VMM buffer.
3164 */
3165 PVMCC pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
3166 if (pVM)
3167 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg1, sizeof(pVM->vmm.s.szRing0AssertMsg1),
3168 "\n!!R0-Assertion Failed!!\n"
3169 "Expression: %.*s\n"
3170 "Location : %s(%d) %s\n",
3171 sizeof(pVM->vmm.s.szRing0AssertMsg1) / 4 * 3, pszExpr,
3172 pszFile, uLine, pszFunction);
3173
3174 /*
3175 * Continue the normal way.
3176 */
3177 RTAssertMsg1(pszExpr, uLine, pszFile, pszFunction);
3178}
3179
3180
3181/**
3182 * Callback for RTLogFormatV which writes to the ring-3 log port.
3183 * See PFNLOGOUTPUT() for details.
3184 */
3185static DECLCALLBACK(size_t) rtLogOutput(void *pv, const char *pachChars, size_t cbChars)
3186{
3187 for (size_t i = 0; i < cbChars; i++)
3188 {
3189 LogAlways(("%c", pachChars[i])); NOREF(pachChars);
3190 }
3191
3192 NOREF(pv);
3193 return cbChars;
3194}
3195
3196
3197/*
3198 * Override this so we can push it up to ring-3.
3199 */
3200DECLEXPORT(void) RTCALL RTAssertMsg2WeakV(const char *pszFormat, va_list va)
3201{
3202 va_list vaCopy;
3203
3204 /*
3205 * Push the message to the loggers.
3206 */
3207 PRTLOGGER pLog = RTLogGetDefaultInstance(); /* Don't initialize it here... */
3208 if (pLog)
3209 {
3210 va_copy(vaCopy, va);
3211 RTLogFormatV(rtLogOutput, pLog, pszFormat, vaCopy);
3212 va_end(vaCopy);
3213 }
3214 pLog = RTLogRelGetDefaultInstance();
3215 if (pLog)
3216 {
3217 va_copy(vaCopy, va);
3218 RTLogFormatV(rtLogOutput, pLog, pszFormat, vaCopy);
3219 va_end(vaCopy);
3220 }
3221
3222 /*
3223 * Push it to the global VMM buffer.
3224 */
3225 PVMCC pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
3226 if (pVM)
3227 {
3228 va_copy(vaCopy, va);
3229 RTStrPrintfV(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2), pszFormat, vaCopy);
3230 va_end(vaCopy);
3231 }
3232
3233 /*
3234 * Continue the normal way.
3235 */
3236 RTAssertMsg2V(pszFormat, va);
3237}
3238
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette