VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/VMMR0.cpp@ 90920

Last change on this file since 90920 was 90897, checked in by vboxsync, 4 years ago

VMM: Initialize the ring-0 loggers as early as possible, so we can update their settings before doing vmmR0InitVM and similar where we'd like to have working log output. bugref:10086

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 119.5 KB
Line 
1/* $Id: VMMR0.cpp 90897 2021-08-25 20:00:41Z vboxsync $ */
2/** @file
3 * VMM - Host Context Ring 0.
4 */
5
6/*
7 * Copyright (C) 2006-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_VMM
23#include <VBox/vmm/vmm.h>
24#include <VBox/sup.h>
25#include <VBox/vmm/iom.h>
26#include <VBox/vmm/trpm.h>
27#include <VBox/vmm/cpum.h>
28#include <VBox/vmm/pdmapi.h>
29#include <VBox/vmm/pgm.h>
30#ifdef VBOX_WITH_NEM_R0
31# include <VBox/vmm/nem.h>
32#endif
33#include <VBox/vmm/em.h>
34#include <VBox/vmm/stam.h>
35#include <VBox/vmm/tm.h>
36#include "VMMInternal.h"
37#include <VBox/vmm/vmcc.h>
38#include <VBox/vmm/gvm.h>
39#ifdef VBOX_WITH_PCI_PASSTHROUGH
40# include <VBox/vmm/pdmpci.h>
41#endif
42#include <VBox/vmm/apic.h>
43
44#include <VBox/vmm/gvmm.h>
45#include <VBox/vmm/gmm.h>
46#include <VBox/vmm/gim.h>
47#include <VBox/intnet.h>
48#include <VBox/vmm/hm.h>
49#include <VBox/param.h>
50#include <VBox/err.h>
51#include <VBox/version.h>
52#include <VBox/log.h>
53
54#include <iprt/asm-amd64-x86.h>
55#include <iprt/assert.h>
56#include <iprt/crc.h>
57#include <iprt/mem.h>
58#include <iprt/memobj.h>
59#include <iprt/mp.h>
60#include <iprt/once.h>
61#include <iprt/stdarg.h>
62#include <iprt/string.h>
63#include <iprt/thread.h>
64#include <iprt/timer.h>
65#include <iprt/time.h>
66
67#include "dtrace/VBoxVMM.h"
68
69
70#if defined(_MSC_VER) && defined(RT_ARCH_AMD64) /** @todo check this with with VC7! */
71# pragma intrinsic(_AddressOfReturnAddress)
72#endif
73
74#if defined(RT_OS_DARWIN) && ARCH_BITS == 32
75# error "32-bit darwin is no longer supported. Go back to 4.3 or earlier!"
76#endif
77
78
79
80/*********************************************************************************************************************************
81* Defined Constants And Macros *
82*********************************************************************************************************************************/
83/** @def VMM_CHECK_SMAP_SETUP
84 * SMAP check setup. */
85/** @def VMM_CHECK_SMAP_CHECK
86 * Checks that the AC flag is set if SMAP is enabled. If AC is not set,
87 * it will be logged and @a a_BadExpr is executed. */
88/** @def VMM_CHECK_SMAP_CHECK2
89 * Checks that the AC flag is set if SMAP is enabled. If AC is not set, it will
90 * be logged, written to the VMs assertion text buffer, and @a a_BadExpr is
91 * executed. */
92#if (defined(VBOX_STRICT) || 1) && !defined(VBOX_WITH_RAM_IN_KERNEL)
93# define VMM_CHECK_SMAP_SETUP() uint32_t const fKernelFeatures = SUPR0GetKernelFeatures()
94# define VMM_CHECK_SMAP_CHECK(a_BadExpr) \
95 do { \
96 if (fKernelFeatures & SUPKERNELFEATURES_SMAP) \
97 { \
98 RTCCUINTREG fEflCheck = ASMGetFlags(); \
99 if (RT_LIKELY(fEflCheck & X86_EFL_AC)) \
100 { /* likely */ } \
101 else \
102 { \
103 SUPR0Printf("%s, line %d: EFLAGS.AC is clear! (%#x)\n", __FUNCTION__, __LINE__, (uint32_t)fEflCheck); \
104 a_BadExpr; \
105 } \
106 } \
107 } while (0)
108# define VMM_CHECK_SMAP_CHECK2(a_pGVM, a_BadExpr) \
109 do { \
110 if (fKernelFeatures & SUPKERNELFEATURES_SMAP) \
111 { \
112 RTCCUINTREG fEflCheck = ASMGetFlags(); \
113 if (RT_LIKELY(fEflCheck & X86_EFL_AC)) \
114 { /* likely */ } \
115 else if (a_pGVM) \
116 { \
117 SUPR0BadContext((a_pGVM)->pSession, __FILE__, __LINE__, "EFLAGS.AC is zero!"); \
118 RTStrPrintf((a_pGVM)->vmm.s.szRing0AssertMsg1, sizeof((a_pGVM)->vmm.s.szRing0AssertMsg1), \
119 "%s, line %d: EFLAGS.AC is clear! (%#x)\n", __FUNCTION__, __LINE__, (uint32_t)fEflCheck); \
120 a_BadExpr; \
121 } \
122 else \
123 { \
124 SUPR0Printf("%s, line %d: EFLAGS.AC is clear! (%#x)\n", __FUNCTION__, __LINE__, (uint32_t)fEflCheck); \
125 a_BadExpr; \
126 } \
127 } \
128 } while (0)
129#else
130# define VMM_CHECK_SMAP_SETUP() uint32_t const fKernelFeatures = 0
131# define VMM_CHECK_SMAP_CHECK(a_BadExpr) NOREF(fKernelFeatures)
132# define VMM_CHECK_SMAP_CHECK2(a_pGVM, a_BadExpr) NOREF(fKernelFeatures)
133#endif
134
135
136/*********************************************************************************************************************************
137* Internal Functions *
138*********************************************************************************************************************************/
139RT_C_DECLS_BEGIN
140#if defined(RT_ARCH_X86) && (defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD))
141extern uint64_t __udivdi3(uint64_t, uint64_t);
142extern uint64_t __umoddi3(uint64_t, uint64_t);
143#endif
144RT_C_DECLS_END
145static int vmmR0UpdateLoggers(PGVM pGVM, VMCPUID idCpu, PVMMR0UPDATELOGGERSREQ pReq, bool fRelease);
146static FNRTLOGFLUSH vmmR0LogFlush;
147static FNRTLOGFLUSH vmmR0LogRelFlush;
148
149
150/*********************************************************************************************************************************
151* Global Variables *
152*********************************************************************************************************************************/
153/** Drag in necessary library bits.
154 * The runtime lives here (in VMMR0.r0) and VBoxDD*R0.r0 links against us. */
155struct CLANG11WEIRDNOTHROW { PFNRT pfn; } g_VMMR0Deps[] =
156{
157 { (PFNRT)RTCrc32 },
158 { (PFNRT)RTOnce },
159#if defined(RT_ARCH_X86) && (defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD))
160 { (PFNRT)__udivdi3 },
161 { (PFNRT)__umoddi3 },
162#endif
163 { NULL }
164};
165
166#ifdef RT_OS_SOLARIS
167/* Dependency information for the native solaris loader. */
168extern "C" { char _depends_on[] = "vboxdrv"; }
169#endif
170
171
172/**
173 * Initialize the module.
174 * This is called when we're first loaded.
175 *
176 * @returns 0 on success.
177 * @returns VBox status on failure.
178 * @param hMod Image handle for use in APIs.
179 */
180DECLEXPORT(int) ModuleInit(void *hMod)
181{
182 VMM_CHECK_SMAP_SETUP();
183 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
184
185#ifdef VBOX_WITH_DTRACE_R0
186 /*
187 * The first thing to do is register the static tracepoints.
188 * (Deregistration is automatic.)
189 */
190 int rc2 = SUPR0TracerRegisterModule(hMod, &g_VTGObjHeader);
191 if (RT_FAILURE(rc2))
192 return rc2;
193#endif
194 LogFlow(("ModuleInit:\n"));
195
196#ifdef VBOX_WITH_64ON32_CMOS_DEBUG
197 /*
198 * Display the CMOS debug code.
199 */
200 ASMOutU8(0x72, 0x03);
201 uint8_t bDebugCode = ASMInU8(0x73);
202 LogRel(("CMOS Debug Code: %#x (%d)\n", bDebugCode, bDebugCode));
203 RTLogComPrintf("CMOS Debug Code: %#x (%d)\n", bDebugCode, bDebugCode);
204#endif
205
206 /*
207 * Initialize the VMM, GVMM, GMM, HM, PGM (Darwin) and INTNET.
208 */
209 int rc = vmmInitFormatTypes();
210 if (RT_SUCCESS(rc))
211 {
212 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
213 rc = GVMMR0Init();
214 if (RT_SUCCESS(rc))
215 {
216 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
217 rc = GMMR0Init();
218 if (RT_SUCCESS(rc))
219 {
220 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
221 rc = HMR0Init();
222 if (RT_SUCCESS(rc))
223 {
224 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
225
226 PDMR0Init(hMod);
227 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
228
229 rc = PGMRegisterStringFormatTypes();
230 if (RT_SUCCESS(rc))
231 {
232 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
233#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
234 rc = PGMR0DynMapInit();
235#endif
236 if (RT_SUCCESS(rc))
237 {
238 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
239 rc = IntNetR0Init();
240 if (RT_SUCCESS(rc))
241 {
242#ifdef VBOX_WITH_PCI_PASSTHROUGH
243 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
244 rc = PciRawR0Init();
245#endif
246 if (RT_SUCCESS(rc))
247 {
248 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
249 rc = CPUMR0ModuleInit();
250 if (RT_SUCCESS(rc))
251 {
252#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
253 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
254 rc = vmmR0TripleFaultHackInit();
255 if (RT_SUCCESS(rc))
256#endif
257 {
258 VMM_CHECK_SMAP_CHECK(rc = VERR_VMM_SMAP_BUT_AC_CLEAR);
259 if (RT_SUCCESS(rc))
260 {
261 LogFlow(("ModuleInit: returns success\n"));
262 return VINF_SUCCESS;
263 }
264 }
265
266 /*
267 * Bail out.
268 */
269#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
270 vmmR0TripleFaultHackTerm();
271#endif
272 }
273 else
274 LogRel(("ModuleInit: CPUMR0ModuleInit -> %Rrc\n", rc));
275#ifdef VBOX_WITH_PCI_PASSTHROUGH
276 PciRawR0Term();
277#endif
278 }
279 else
280 LogRel(("ModuleInit: PciRawR0Init -> %Rrc\n", rc));
281 IntNetR0Term();
282 }
283 else
284 LogRel(("ModuleInit: IntNetR0Init -> %Rrc\n", rc));
285#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
286 PGMR0DynMapTerm();
287#endif
288 }
289 else
290 LogRel(("ModuleInit: PGMR0DynMapInit -> %Rrc\n", rc));
291 PGMDeregisterStringFormatTypes();
292 }
293 else
294 LogRel(("ModuleInit: PGMRegisterStringFormatTypes -> %Rrc\n", rc));
295 HMR0Term();
296 }
297 else
298 LogRel(("ModuleInit: HMR0Init -> %Rrc\n", rc));
299 GMMR0Term();
300 }
301 else
302 LogRel(("ModuleInit: GMMR0Init -> %Rrc\n", rc));
303 GVMMR0Term();
304 }
305 else
306 LogRel(("ModuleInit: GVMMR0Init -> %Rrc\n", rc));
307 vmmTermFormatTypes();
308 }
309 else
310 LogRel(("ModuleInit: vmmInitFormatTypes -> %Rrc\n", rc));
311
312 LogFlow(("ModuleInit: failed %Rrc\n", rc));
313 return rc;
314}
315
316
317/**
318 * Terminate the module.
319 * This is called when we're finally unloaded.
320 *
321 * @param hMod Image handle for use in APIs.
322 */
323DECLEXPORT(void) ModuleTerm(void *hMod)
324{
325 NOREF(hMod);
326 LogFlow(("ModuleTerm:\n"));
327
328 /*
329 * Terminate the CPUM module (Local APIC cleanup).
330 */
331 CPUMR0ModuleTerm();
332
333 /*
334 * Terminate the internal network service.
335 */
336 IntNetR0Term();
337
338 /*
339 * PGM (Darwin), HM and PciRaw global cleanup.
340 */
341#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
342 PGMR0DynMapTerm();
343#endif
344#ifdef VBOX_WITH_PCI_PASSTHROUGH
345 PciRawR0Term();
346#endif
347 PGMDeregisterStringFormatTypes();
348 HMR0Term();
349#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
350 vmmR0TripleFaultHackTerm();
351#endif
352
353 /*
354 * Destroy the GMM and GVMM instances.
355 */
356 GMMR0Term();
357 GVMMR0Term();
358
359 vmmTermFormatTypes();
360
361 LogFlow(("ModuleTerm: returns\n"));
362}
363
364
365/**
366 * Helper for vmmR0InitLoggers
367 */
368static int vmmR0InitLoggerOne(PGVMCPU pGVCpu, bool fRelease, PVMMR0PERVCPULOGGER pR0Log, PVMMR3CPULOGGER pShared,
369 uint32_t cbBuf, char *pchBuf, RTR3PTR pchBufR3)
370{
371 pR0Log->BufDesc.u32Magic = RTLOGBUFFERDESC_MAGIC;
372 pR0Log->BufDesc.uReserved = 0;
373 pR0Log->BufDesc.cbBuf = cbBuf;
374 pR0Log->BufDesc.offBuf = 0;
375 pR0Log->BufDesc.pchBuf = pchBuf;
376 pR0Log->BufDesc.pAux = &pShared->AuxDesc;
377
378 pShared->AuxDesc.fFlushedIndicator = false;
379 pShared->AuxDesc.afPadding[0] = 0;
380 pShared->AuxDesc.afPadding[1] = 0;
381 pShared->AuxDesc.afPadding[2] = 0;
382 pShared->AuxDesc.offBuf = 0;
383 pShared->pchBufR3 = pchBufR3;
384 pShared->cbBuf = cbBuf;
385
386 static const char * const s_apszGroups[] = VBOX_LOGGROUP_NAMES;
387 int rc = RTLogCreateEx(&pR0Log->pLogger, fRelease ? "VBOX_RELEASE_LOG" : "VBOX_LOG", RTLOG_F_NO_LOCKING | RTLOGFLAGS_BUFFERED,
388 "all", RT_ELEMENTS(s_apszGroups), s_apszGroups, UINT32_MAX,
389 1 /*cBufDescs*/, &pR0Log->BufDesc, RTLOGDEST_DUMMY,
390 NULL /*pfnPhase*/, 0 /*cHistory*/, 0 /*cbHistoryFileMax*/, 0 /*cSecsHistoryTimeSlot*/,
391 NULL /*pErrInfo*/, NULL /*pszFilenameFmt*/);
392 if (RT_SUCCESS(rc))
393 {
394 PRTLOGGER pLogger = pR0Log->pLogger;
395 pLogger->u32UserValue1 = VMMR0_LOGGER_FLAGS_MAGIC_VALUE;
396 pLogger->u64UserValue2 = (uintptr_t)pGVCpu;
397 pLogger->u64UserValue3 = (uintptr_t)pGVCpu;
398
399 rc = RTLogSetFlushCallback(pLogger, fRelease ? vmmR0LogRelFlush : vmmR0LogFlush);
400 if (RT_SUCCESS(rc))
401 {
402 RTLogSetR0ThreadNameF(pLogger, "EMT-%u-R0", pGVCpu->idCpu);
403 return VINF_SUCCESS;
404 }
405
406 RTLogDestroy(pLogger);
407 }
408 pR0Log->pLogger = NULL;
409 return rc;
410}
411
412
413/**
414 * Initializes one type of loggers for each EMT.
415 */
416static int vmmR0InitLoggers(PGVM pGVM, bool fRelease, uint32_t cbBuf, PRTR0MEMOBJ phMemObj, PRTR0MEMOBJ phMapObj)
417{
418 /* Allocate buffers first. */
419 int rc = RTR0MemObjAllocPage(phMemObj, cbBuf * pGVM->cCpus, false /*fExecutable*/);
420 if (RT_SUCCESS(rc))
421 {
422 rc = RTR0MemObjMapUser(phMapObj, *phMemObj, (RTR3PTR)-1, 0 /*uAlignment*/, RTMEM_PROT_READ, NIL_RTR0PROCESS);
423 if (RT_SUCCESS(rc))
424 {
425 char * const pchBuf = (char *)RTR0MemObjAddress(*phMemObj);
426 AssertPtrReturn(pchBuf, VERR_INTERNAL_ERROR_2);
427
428 RTR3PTR const pchBufR3 = RTR0MemObjAddressR3(*phMapObj);
429 AssertReturn(pchBufR3 != NIL_RTR3PTR, VERR_INTERNAL_ERROR_3);
430
431 /* Initialize the per-CPU loggers. */
432 for (uint32_t i = 0; i < pGVM->cCpus; i++)
433 {
434 PGVMCPU pGVCpu = &pGVM->aCpus[i];
435 PVMMR0PERVCPULOGGER pR0Log = fRelease ? &pGVCpu->vmmr0.s.RelLogger : &pGVCpu->vmmr0.s.Logger;
436 PVMMR3CPULOGGER pShared = fRelease ? &pGVCpu->vmm.s.RelLogger : &pGVCpu->vmm.s.Logger;
437 rc = vmmR0InitLoggerOne(pGVCpu, fRelease, pR0Log, pShared, cbBuf, pchBuf + i * cbBuf, pchBufR3 + i * cbBuf);
438 if (RT_FAILURE(rc))
439 {
440 pR0Log->pLogger = NULL;
441 pShared->pchBufR3 = NIL_RTR3PTR;
442 while (i-- > 0)
443 {
444 pGVCpu = &pGVM->aCpus[i];
445 pR0Log = fRelease ? &pGVCpu->vmmr0.s.RelLogger : &pGVCpu->vmmr0.s.Logger;
446 pShared = fRelease ? &pGVCpu->vmm.s.RelLogger : &pGVCpu->vmm.s.Logger;
447 RTLogDestroy(pR0Log->pLogger);
448 pR0Log->pLogger = NULL;
449 pShared->pchBufR3 = NIL_RTR3PTR;
450 }
451 break;
452 }
453 }
454 if (RT_SUCCESS(rc))
455 return VINF_SUCCESS;
456
457 /* Bail out. */
458 RTR0MemObjFree(*phMapObj, false /*fFreeMappings*/);
459 *phMapObj = NIL_RTR0MEMOBJ;
460 }
461 RTR0MemObjFree(*phMemObj, true /*fFreeMappings*/);
462 *phMemObj = NIL_RTR0MEMOBJ;
463 }
464 return rc;
465}
466
467
468/**
469 * Initializes VMM specific members when the GVM structure is created,
470 * allocating loggers and stuff.
471 *
472 * The loggers are allocated here so that we can update their settings before
473 * doing VMMR0_DO_VMMR0_INIT and have correct logging at that time.
474 *
475 * @returns VBox status code.
476 * @param pGVM The global (ring-0) VM structure.
477 */
478VMMR0_INT_DECL(int) VMMR0InitPerVMData(PGVM pGVM)
479{
480 /*
481 * Initialize all members first.
482 */
483 pGVM->vmmr0.s.hMemObjLogger = NIL_RTR0MEMOBJ;
484 pGVM->vmmr0.s.hMapObjLogger = NIL_RTR0MEMOBJ;
485 pGVM->vmmr0.s.hMemObjReleaseLogger = NIL_RTR0MEMOBJ;
486 pGVM->vmmr0.s.hMapObjReleaseLogger = NIL_RTR0MEMOBJ;
487 pGVM->vmmr0.s.fCalledInitVm = false;
488
489 for (VMCPUID idCpu = 0; idCpu < pGVM->cCpus; idCpu++)
490 {
491 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
492 Assert(pGVCpu->idHostCpu == NIL_RTCPUID);
493 Assert(pGVCpu->iHostCpuSet == UINT32_MAX);
494 pGVCpu->vmmr0.s.fInHmContext = false;
495 pGVCpu->vmmr0.s.pPreemptState = NULL;
496 pGVCpu->vmmr0.s.hCtxHook = NIL_RTTHREADCTXHOOK;
497 }
498
499 /*
500 * Create the ring-0 release loggers.
501 */
502 int rc = vmmR0InitLoggers(pGVM, true /*fRelease*/, _8K,
503 &pGVM->vmmr0.s.hMemObjReleaseLogger, &pGVM->vmmr0.s.hMapObjReleaseLogger);
504
505#ifdef LOG_ENABLED
506 /*
507 * Create debug loggers.
508 */
509 if (RT_SUCCESS(rc))
510 rc = vmmR0InitLoggers(pGVM, false /*fRelease*/, _64K, &pGVM->vmmr0.s.hMemObjLogger, &pGVM->vmmr0.s.hMapObjLogger);
511#endif
512
513 return rc;
514}
515
516
517/**
518 * Initiates the R0 driver for a particular VM instance.
519 *
520 * @returns VBox status code.
521 *
522 * @param pGVM The global (ring-0) VM structure.
523 * @param uSvnRev The SVN revision of the ring-3 part.
524 * @param uBuildType Build type indicator.
525 * @thread EMT(0)
526 */
527static int vmmR0InitVM(PGVM pGVM, uint32_t uSvnRev, uint32_t uBuildType)
528{
529 VMM_CHECK_SMAP_SETUP();
530 VMM_CHECK_SMAP_CHECK(return VERR_VMM_SMAP_BUT_AC_CLEAR);
531
532 /*
533 * Match the SVN revisions and build type.
534 */
535 if (uSvnRev != VMMGetSvnRev())
536 {
537 LogRel(("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev()));
538 SUPR0Printf("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev());
539 return VERR_VMM_R0_VERSION_MISMATCH;
540 }
541 if (uBuildType != vmmGetBuildType())
542 {
543 LogRel(("VMMR0InitVM: Build type mismatch, r3=%#x r0=%#x\n", uBuildType, vmmGetBuildType()));
544 SUPR0Printf("VMMR0InitVM: Build type mismatch, r3=%#x r0=%#x\n", uBuildType, vmmGetBuildType());
545 return VERR_VMM_R0_VERSION_MISMATCH;
546 }
547
548 int rc = GVMMR0ValidateGVMandEMT(pGVM, 0 /*idCpu*/);
549 if (RT_FAILURE(rc))
550 return rc;
551
552 /* Don't allow this to be called more than once. */
553 if (!pGVM->vmmr0.s.fCalledInitVm)
554 pGVM->vmmr0.s.fCalledInitVm = true;
555 else
556 return VERR_ALREADY_INITIALIZED;
557
558#ifdef LOG_ENABLED
559
560 /*
561 * Register the EMT R0 logger instance for VCPU 0.
562 */
563 PVMCPUCC pVCpu = VMCC_GET_CPU_0(pGVM);
564 if (pVCpu->vmmr0.s.Logger.pLogger)
565 {
566# if 0 /* testing of the logger. */
567 LogCom(("vmmR0InitVM: before %p\n", RTLogDefaultInstance()));
568 LogCom(("vmmR0InitVM: pfnFlush=%p actual=%p\n", pR0Logger->Logger.pfnFlush, vmmR0LoggerFlush));
569 LogCom(("vmmR0InitVM: pfnLogger=%p actual=%p\n", pR0Logger->Logger.pfnLogger, vmmR0LoggerWrapper));
570 LogCom(("vmmR0InitVM: offScratch=%d fFlags=%#x fDestFlags=%#x\n", pR0Logger->Logger.offScratch, pR0Logger->Logger.fFlags, pR0Logger->Logger.fDestFlags));
571
572 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pGVM->pSession);
573 LogCom(("vmmR0InitVM: after %p reg\n", RTLogDefaultInstance()));
574 RTLogSetDefaultInstanceThread(NULL, pGVM->pSession);
575 LogCom(("vmmR0InitVM: after %p dereg\n", RTLogDefaultInstance()));
576
577 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
578 LogCom(("vmmR0InitVM: returned successfully from direct logger call.\n"));
579 pR0Logger->Logger.pfnFlush(&pR0Logger->Logger);
580 LogCom(("vmmR0InitVM: returned successfully from direct flush call.\n"));
581
582 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pGVM->pSession);
583 LogCom(("vmmR0InitVM: after %p reg2\n", RTLogDefaultInstance()));
584 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
585 LogCom(("vmmR0InitVM: returned successfully from direct logger call (2). offScratch=%d\n", pR0Logger->Logger.offScratch));
586 RTLogSetDefaultInstanceThread(NULL, pGVM->pSession);
587 LogCom(("vmmR0InitVM: after %p dereg2\n", RTLogDefaultInstance()));
588
589 RTLogLoggerEx(&pR0Logger->Logger, 0, ~0U, "hello ring-0 logger (RTLogLoggerEx)\n");
590 LogCom(("vmmR0InitVM: RTLogLoggerEx returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
591
592 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pGVM->pSession);
593 RTLogPrintf("hello ring-0 logger (RTLogPrintf)\n");
594 LogCom(("vmmR0InitVM: RTLogPrintf returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
595# endif
596# ifdef VBOX_WITH_R0_LOGGING
597 Log(("Switching to per-thread logging instance %p (key=%p)\n", pVCpu->vmmr0.s.Logger.pLogger, pGVM->pSession));
598 RTLogSetDefaultInstanceThread(pVCpu->vmmr0.s.Logger.pLogger, (uintptr_t)pGVM->pSession);
599 pVCpu->vmmr0.s.Logger.fRegistered = true;
600# endif
601 }
602#endif /* LOG_ENABLED */
603
604 /*
605 * Check if the host supports high resolution timers or not.
606 */
607 if ( pGVM->vmm.s.fUsePeriodicPreemptionTimers
608 && !RTTimerCanDoHighResolution())
609 pGVM->vmm.s.fUsePeriodicPreemptionTimers = false;
610
611 /*
612 * Initialize the per VM data for GVMM and GMM.
613 */
614 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
615 rc = GVMMR0InitVM(pGVM);
616 if (RT_SUCCESS(rc))
617 {
618 /*
619 * Init HM, CPUM and PGM (Darwin only).
620 */
621 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
622 rc = HMR0InitVM(pGVM);
623 if (RT_SUCCESS(rc))
624 VMM_CHECK_SMAP_CHECK2(pGVM, rc = VERR_VMM_RING0_ASSERTION); /* CPUR0InitVM will otherwise panic the host */
625 if (RT_SUCCESS(rc))
626 {
627 rc = CPUMR0InitVM(pGVM);
628 if (RT_SUCCESS(rc))
629 {
630 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
631 rc = PGMR0InitVM(pGVM);
632 if (RT_SUCCESS(rc))
633 {
634 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
635 rc = EMR0InitVM(pGVM);
636 if (RT_SUCCESS(rc))
637 {
638 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
639#ifdef VBOX_WITH_PCI_PASSTHROUGH
640 rc = PciRawR0InitVM(pGVM);
641#endif
642 if (RT_SUCCESS(rc))
643 {
644 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
645 rc = GIMR0InitVM(pGVM);
646 if (RT_SUCCESS(rc))
647 {
648 VMM_CHECK_SMAP_CHECK2(pGVM, rc = VERR_VMM_RING0_ASSERTION);
649 if (RT_SUCCESS(rc))
650 {
651 GVMMR0DoneInitVM(pGVM);
652
653 /*
654 * Collect a bit of info for the VM release log.
655 */
656 pGVM->vmm.s.fIsPreemptPendingApiTrusty = RTThreadPreemptIsPendingTrusty();
657 pGVM->vmm.s.fIsPreemptPossible = RTThreadPreemptIsPossible();;
658
659 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
660 return rc;
661 }
662
663 /* bail out*/
664 GIMR0TermVM(pGVM);
665 }
666#ifdef VBOX_WITH_PCI_PASSTHROUGH
667 PciRawR0TermVM(pGVM);
668#endif
669 }
670 }
671 }
672 }
673 HMR0TermVM(pGVM);
674 }
675 }
676
677 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pGVM->pSession);
678 return rc;
679}
680
681
682/**
683 * Does EMT specific VM initialization.
684 *
685 * @returns VBox status code.
686 * @param pGVM The ring-0 VM structure.
687 * @param idCpu The EMT that's calling.
688 */
689static int vmmR0InitVMEmt(PGVM pGVM, VMCPUID idCpu)
690{
691 /* Paranoia (caller checked these already). */
692 AssertReturn(idCpu < pGVM->cCpus, VERR_INVALID_CPU_ID);
693 AssertReturn(pGVM->aCpus[idCpu].hEMT == RTThreadNativeSelf(), VERR_INVALID_CPU_ID);
694
695#if defined(LOG_ENABLED) && defined(VBOX_WITH_R0_LOGGING)
696 /*
697 * Registration of ring 0 loggers.
698 */
699 PVMCPUCC pVCpu = &pGVM->aCpus[idCpu];
700 if ( pVCpu->vmmr0.s.Logger.pLogger
701 && !pVCpu->vmmr0.s.Logger.fRegistered)
702 {
703 RTLogSetDefaultInstanceThread(pVCpu->vmmr0.s.Logger.pLogger, (uintptr_t)pGVM->pSession);
704 pVCpu->vmmr0.s.Logger.fRegistered = true;
705 }
706#endif
707
708 return VINF_SUCCESS;
709}
710
711
712
713/**
714 * Terminates the R0 bits for a particular VM instance.
715 *
716 * This is normally called by ring-3 as part of the VM termination process, but
717 * may alternatively be called during the support driver session cleanup when
718 * the VM object is destroyed (see GVMM).
719 *
720 * @returns VBox status code.
721 *
722 * @param pGVM The global (ring-0) VM structure.
723 * @param idCpu Set to 0 if EMT(0) or NIL_VMCPUID if session cleanup
724 * thread.
725 * @thread EMT(0) or session clean up thread.
726 */
727VMMR0_INT_DECL(int) VMMR0TermVM(PGVM pGVM, VMCPUID idCpu)
728{
729 /*
730 * Check EMT(0) claim if we're called from userland.
731 */
732 if (idCpu != NIL_VMCPUID)
733 {
734 AssertReturn(idCpu == 0, VERR_INVALID_CPU_ID);
735 int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu);
736 if (RT_FAILURE(rc))
737 return rc;
738 }
739
740#ifdef VBOX_WITH_PCI_PASSTHROUGH
741 PciRawR0TermVM(pGVM);
742#endif
743
744 /*
745 * Tell GVMM what we're up to and check that we only do this once.
746 */
747 if (GVMMR0DoingTermVM(pGVM))
748 {
749 GIMR0TermVM(pGVM);
750
751 /** @todo I wish to call PGMR0PhysFlushHandyPages(pGVM, &pGVM->aCpus[idCpu])
752 * here to make sure we don't leak any shared pages if we crash... */
753#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
754 PGMR0DynMapTermVM(pGVM);
755#endif
756 HMR0TermVM(pGVM);
757 }
758
759 /*
760 * Deregister the logger for this EMT.
761 */
762 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pGVM->pSession);
763 return VINF_SUCCESS;
764}
765
766
767/**
768 * This is called at the end of gvmmR0CleanupVM().
769 *
770 * @param pGVM The global (ring-0) VM structure.
771 */
772VMMR0_INT_DECL(void) VMMR0CleanupVM(PGVM pGVM)
773{
774 AssertCompile(NIL_RTTHREADCTXHOOK == (RTTHREADCTXHOOK)0); /* Depends on zero initialized memory working for NIL at the moment. */
775 for (VMCPUID idCpu = 0; idCpu < pGVM->cCpus; idCpu++)
776 {
777 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
778
779 /** @todo Can we busy wait here for all thread-context hooks to be
780 * deregistered before releasing (destroying) it? Only until we find a
781 * solution for not deregistering hooks everytime we're leaving HMR0
782 * context. */
783 VMMR0ThreadCtxHookDestroyForEmt(pGVCpu);
784
785 /* Destroy the release logger. */
786 RTLogDestroy(pGVCpu->vmmr0.s.RelLogger.pLogger);
787 pGVCpu->vmmr0.s.RelLogger.pLogger = NULL;
788 pGVCpu->vmm.s.RelLogger.pchBufR3 = NIL_RTR3PTR;
789
790 /* Destroy the regular logger. */
791 RTLogDestroy(pGVCpu->vmmr0.s.Logger.pLogger);
792 pGVCpu->vmmr0.s.Logger.pLogger = NULL;
793 pGVCpu->vmm.s.Logger.pchBufR3 = NIL_RTR3PTR;
794 }
795
796 /*
797 * Free logger buffer memory.
798 */
799 RTR0MemObjFree(pGVM->vmmr0.s.hMapObjReleaseLogger, false /*fFreeMappings*/);
800 pGVM->vmmr0.s.hMapObjReleaseLogger = NIL_RTR0MEMOBJ;
801 RTR0MemObjFree(pGVM->vmmr0.s.hMemObjReleaseLogger, true /*fFreeMappings*/);
802 pGVM->vmmr0.s.hMemObjReleaseLogger = NIL_RTR0MEMOBJ;
803
804 RTR0MemObjFree(pGVM->vmmr0.s.hMapObjLogger, false /*fFreeMappings*/);
805 pGVM->vmmr0.s.hMapObjLogger = NIL_RTR0MEMOBJ;
806 RTR0MemObjFree(pGVM->vmmr0.s.hMemObjLogger, true /*fFreeMappings*/);
807 pGVM->vmmr0.s.hMemObjLogger = NIL_RTR0MEMOBJ;
808}
809
810
811/**
812 * An interrupt or unhalt force flag is set, deal with it.
813 *
814 * @returns VINF_SUCCESS (or VINF_EM_HALT).
815 * @param pVCpu The cross context virtual CPU structure.
816 * @param uMWait Result from EMMonitorWaitIsActive().
817 * @param enmInterruptibility Guest CPU interruptbility level.
818 */
819static int vmmR0DoHaltInterrupt(PVMCPUCC pVCpu, unsigned uMWait, CPUMINTERRUPTIBILITY enmInterruptibility)
820{
821 Assert(!TRPMHasTrap(pVCpu));
822 Assert( enmInterruptibility > CPUMINTERRUPTIBILITY_INVALID
823 && enmInterruptibility < CPUMINTERRUPTIBILITY_END);
824
825 /*
826 * Pending interrupts w/o any SMIs or NMIs? That the usual case.
827 */
828 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
829 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_INTERRUPT_NMI))
830 {
831 if (enmInterruptibility <= CPUMINTERRUPTIBILITY_UNRESTRAINED)
832 {
833 uint8_t u8Interrupt = 0;
834 int rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
835 Log(("vmmR0DoHaltInterrupt: CPU%d u8Interrupt=%d (%#x) rc=%Rrc\n", pVCpu->idCpu, u8Interrupt, u8Interrupt, rc));
836 if (RT_SUCCESS(rc))
837 {
838 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
839
840 rc = TRPMAssertTrap(pVCpu, u8Interrupt, TRPM_HARDWARE_INT);
841 AssertRCSuccess(rc);
842 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltExec);
843 return rc;
844 }
845 }
846 }
847 /*
848 * SMI is not implemented yet, at least not here.
849 */
850 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_SMI))
851 {
852 Log12(("vmmR0DoHaltInterrupt: CPU%d failed #3\n", pVCpu->idCpu));
853 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltToR3);
854 return VINF_EM_HALT;
855 }
856 /*
857 * NMI.
858 */
859 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
860 {
861 if (enmInterruptibility < CPUMINTERRUPTIBILITY_NMI_INHIBIT)
862 {
863 /** @todo later. */
864 Log12(("vmmR0DoHaltInterrupt: CPU%d failed #2 (uMWait=%u enmInt=%d)\n", pVCpu->idCpu, uMWait, enmInterruptibility));
865 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltToR3);
866 return VINF_EM_HALT;
867 }
868 }
869 /*
870 * Nested-guest virtual interrupt.
871 */
872 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST))
873 {
874 if (enmInterruptibility < CPUMINTERRUPTIBILITY_VIRT_INT_DISABLED)
875 {
876 /** @todo NSTVMX: NSTSVM: Remember, we might have to check and perform VM-exits
877 * here before injecting the virtual interrupt. See emR3ForcedActions
878 * for details. */
879 Log12(("vmmR0DoHaltInterrupt: CPU%d failed #1 (uMWait=%u enmInt=%d)\n", pVCpu->idCpu, uMWait, enmInterruptibility));
880 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltToR3);
881 return VINF_EM_HALT;
882 }
883 }
884
885 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UNHALT))
886 {
887 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltExec);
888 Log11(("vmmR0DoHaltInterrupt: CPU%d success VINF_SUCCESS (UNHALT)\n", pVCpu->idCpu));
889 return VINF_SUCCESS;
890 }
891 if (uMWait > 1)
892 {
893 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltExec);
894 Log11(("vmmR0DoHaltInterrupt: CPU%d success VINF_SUCCESS (uMWait=%u > 1)\n", pVCpu->idCpu, uMWait));
895 return VINF_SUCCESS;
896 }
897
898 Log12(("vmmR0DoHaltInterrupt: CPU%d failed #0 (uMWait=%u enmInt=%d)\n", pVCpu->idCpu, uMWait, enmInterruptibility));
899 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltToR3);
900 return VINF_EM_HALT;
901}
902
903
904/**
905 * This does one round of vmR3HaltGlobal1Halt().
906 *
907 * The rational here is that we'll reduce latency in interrupt situations if we
908 * don't go to ring-3 immediately on a VINF_EM_HALT (guest executed HLT or
909 * MWAIT), but do one round of blocking here instead and hope the interrupt is
910 * raised in the meanwhile.
911 *
912 * If we go to ring-3 we'll quit the inner HM/NEM loop in EM and end up in the
913 * outer loop, which will then call VMR3WaitHalted() and that in turn will do a
914 * ring-0 call (unless we're too close to a timer event). When the interrupt
915 * wakes us up, we'll return from ring-0 and EM will by instinct do a
916 * rescheduling (because of raw-mode) before it resumes the HM/NEM loop and gets
917 * back to VMMR0EntryFast().
918 *
919 * @returns VINF_SUCCESS or VINF_EM_HALT.
920 * @param pGVM The ring-0 VM structure.
921 * @param pGVCpu The ring-0 virtual CPU structure.
922 *
923 * @todo r=bird: All the blocking/waiting and EMT managment should move out of
924 * the VM module, probably to VMM. Then this would be more weird wrt
925 * parameters and statistics.
926 */
927static int vmmR0DoHalt(PGVM pGVM, PGVMCPU pGVCpu)
928{
929 /*
930 * Do spin stat historization.
931 */
932 if (++pGVCpu->vmm.s.cR0Halts & 0xff)
933 { /* likely */ }
934 else if (pGVCpu->vmm.s.cR0HaltsSucceeded > pGVCpu->vmm.s.cR0HaltsToRing3)
935 {
936 pGVCpu->vmm.s.cR0HaltsSucceeded = 2;
937 pGVCpu->vmm.s.cR0HaltsToRing3 = 0;
938 }
939 else
940 {
941 pGVCpu->vmm.s.cR0HaltsSucceeded = 0;
942 pGVCpu->vmm.s.cR0HaltsToRing3 = 2;
943 }
944
945 /*
946 * Flags that makes us go to ring-3.
947 */
948 uint32_t const fVmFFs = VM_FF_TM_VIRTUAL_SYNC | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA
949 | VM_FF_DBGF | VM_FF_REQUEST | VM_FF_CHECK_VM_STATE
950 | VM_FF_RESET | VM_FF_EMT_RENDEZVOUS | VM_FF_PGM_NEED_HANDY_PAGES
951 | VM_FF_PGM_NO_MEMORY | VM_FF_DEBUG_SUSPEND;
952 uint64_t const fCpuFFs = VMCPU_FF_TIMER | VMCPU_FF_PDM_CRITSECT | VMCPU_FF_IEM
953 | VMCPU_FF_REQUEST | VMCPU_FF_DBGF | VMCPU_FF_HM_UPDATE_CR3
954 | VMCPU_FF_HM_UPDATE_PAE_PDPES | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
955 | VMCPU_FF_TO_R3 | VMCPU_FF_IOM;
956
957 /*
958 * Check preconditions.
959 */
960 unsigned const uMWait = EMMonitorWaitIsActive(pGVCpu);
961 CPUMINTERRUPTIBILITY const enmInterruptibility = CPUMGetGuestInterruptibility(pGVCpu);
962 if ( pGVCpu->vmm.s.fMayHaltInRing0
963 && !TRPMHasTrap(pGVCpu)
964 && ( enmInterruptibility == CPUMINTERRUPTIBILITY_UNRESTRAINED
965 || uMWait > 1))
966 {
967 if ( !VM_FF_IS_ANY_SET(pGVM, fVmFFs)
968 && !VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs))
969 {
970 /*
971 * Interrupts pending already?
972 */
973 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC))
974 APICUpdatePendingInterrupts(pGVCpu);
975
976 /*
977 * Flags that wake up from the halted state.
978 */
979 uint64_t const fIntMask = VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_INTERRUPT_NESTED_GUEST
980 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT;
981
982 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask))
983 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility);
984 ASMNopPause();
985
986 /*
987 * Check out how long till the next timer event.
988 */
989 uint64_t u64Delta;
990 uint64_t u64GipTime = TMTimerPollGIP(pGVM, pGVCpu, &u64Delta);
991
992 if ( !VM_FF_IS_ANY_SET(pGVM, fVmFFs)
993 && !VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs))
994 {
995 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC))
996 APICUpdatePendingInterrupts(pGVCpu);
997
998 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask))
999 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility);
1000
1001 /*
1002 * Wait if there is enough time to the next timer event.
1003 */
1004 if (u64Delta >= pGVCpu->vmm.s.cNsSpinBlockThreshold)
1005 {
1006 /* If there are few other CPU cores around, we will procrastinate a
1007 little before going to sleep, hoping for some device raising an
1008 interrupt or similar. Though, the best thing here would be to
1009 dynamically adjust the spin count according to its usfulness or
1010 something... */
1011 if ( pGVCpu->vmm.s.cR0HaltsSucceeded > pGVCpu->vmm.s.cR0HaltsToRing3
1012 && RTMpGetOnlineCount() >= 4)
1013 {
1014 /** @todo Figure out how we can skip this if it hasn't help recently...
1015 * @bugref{9172#c12} */
1016 uint32_t cSpinLoops = 42;
1017 while (cSpinLoops-- > 0)
1018 {
1019 ASMNopPause();
1020 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC))
1021 APICUpdatePendingInterrupts(pGVCpu);
1022 ASMNopPause();
1023 if (VM_FF_IS_ANY_SET(pGVM, fVmFFs))
1024 {
1025 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3FromSpin);
1026 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3);
1027 return VINF_EM_HALT;
1028 }
1029 ASMNopPause();
1030 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs))
1031 {
1032 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3FromSpin);
1033 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3);
1034 return VINF_EM_HALT;
1035 }
1036 ASMNopPause();
1037 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask))
1038 {
1039 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltExecFromSpin);
1040 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility);
1041 }
1042 ASMNopPause();
1043 }
1044 }
1045
1046 /*
1047 * We have to set the state to VMCPUSTATE_STARTED_HALTED here so ring-3
1048 * knows when to notify us (cannot access VMINTUSERPERVMCPU::fWait from here).
1049 * After changing the state we must recheck the force flags of course.
1050 */
1051 if (VMCPU_CMPXCHG_STATE(pGVCpu, VMCPUSTATE_STARTED_HALTED, VMCPUSTATE_STARTED))
1052 {
1053 if ( !VM_FF_IS_ANY_SET(pGVM, fVmFFs)
1054 && !VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs))
1055 {
1056 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC))
1057 APICUpdatePendingInterrupts(pGVCpu);
1058
1059 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask))
1060 {
1061 VMCPU_CMPXCHG_STATE(pGVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_HALTED);
1062 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility);
1063 }
1064
1065 /* Okay, block! */
1066 uint64_t const u64StartSchedHalt = RTTimeNanoTS();
1067 int rc = GVMMR0SchedHalt(pGVM, pGVCpu, u64GipTime);
1068 uint64_t const u64EndSchedHalt = RTTimeNanoTS();
1069 uint64_t const cNsElapsedSchedHalt = u64EndSchedHalt - u64StartSchedHalt;
1070 Log10(("vmmR0DoHalt: CPU%d: halted %llu ns\n", pGVCpu->idCpu, cNsElapsedSchedHalt));
1071
1072 VMCPU_CMPXCHG_STATE(pGVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_HALTED);
1073 STAM_REL_PROFILE_ADD_PERIOD(&pGVCpu->vmm.s.StatR0HaltBlock, cNsElapsedSchedHalt);
1074 if ( rc == VINF_SUCCESS
1075 || rc == VERR_INTERRUPTED)
1076 {
1077 /* Keep some stats like ring-3 does. */
1078 int64_t const cNsOverslept = u64EndSchedHalt - u64GipTime;
1079 if (cNsOverslept > 50000)
1080 STAM_REL_PROFILE_ADD_PERIOD(&pGVCpu->vmm.s.StatR0HaltBlockOverslept, cNsOverslept);
1081 else if (cNsOverslept < -50000)
1082 STAM_REL_PROFILE_ADD_PERIOD(&pGVCpu->vmm.s.StatR0HaltBlockInsomnia, cNsElapsedSchedHalt);
1083 else
1084 STAM_REL_PROFILE_ADD_PERIOD(&pGVCpu->vmm.s.StatR0HaltBlockOnTime, cNsElapsedSchedHalt);
1085
1086 /*
1087 * Recheck whether we can resume execution or have to go to ring-3.
1088 */
1089 if ( !VM_FF_IS_ANY_SET(pGVM, fVmFFs)
1090 && !VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs))
1091 {
1092 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC))
1093 APICUpdatePendingInterrupts(pGVCpu);
1094 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask))
1095 {
1096 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltExecFromBlock);
1097 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility);
1098 }
1099 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3PostNoInt);
1100 Log12(("vmmR0DoHalt: CPU%d post #2 - No pending interrupt\n", pGVCpu->idCpu));
1101 }
1102 else
1103 {
1104 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3PostPendingFF);
1105 Log12(("vmmR0DoHalt: CPU%d post #1 - Pending FF\n", pGVCpu->idCpu));
1106 }
1107 }
1108 else
1109 {
1110 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3Other);
1111 Log12(("vmmR0DoHalt: CPU%d GVMMR0SchedHalt failed: %Rrc\n", pGVCpu->idCpu, rc));
1112 }
1113 }
1114 else
1115 {
1116 VMCPU_CMPXCHG_STATE(pGVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_HALTED);
1117 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3PendingFF);
1118 Log12(("vmmR0DoHalt: CPU%d failed #5 - Pending FF\n", pGVCpu->idCpu));
1119 }
1120 }
1121 else
1122 {
1123 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3Other);
1124 Log12(("vmmR0DoHalt: CPU%d failed #4 - enmState=%d\n", pGVCpu->idCpu, VMCPU_GET_STATE(pGVCpu)));
1125 }
1126 }
1127 else
1128 {
1129 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3SmallDelta);
1130 Log12(("vmmR0DoHalt: CPU%d failed #3 - delta too small: %RU64\n", pGVCpu->idCpu, u64Delta));
1131 }
1132 }
1133 else
1134 {
1135 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3PendingFF);
1136 Log12(("vmmR0DoHalt: CPU%d failed #2 - Pending FF\n", pGVCpu->idCpu));
1137 }
1138 }
1139 else
1140 {
1141 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3PendingFF);
1142 Log12(("vmmR0DoHalt: CPU%d failed #1 - Pending FF\n", pGVCpu->idCpu));
1143 }
1144 }
1145 else
1146 {
1147 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3Other);
1148 Log12(("vmmR0DoHalt: CPU%d failed #0 - fMayHaltInRing0=%d TRPMHasTrap=%d enmInt=%d uMWait=%u\n",
1149 pGVCpu->idCpu, pGVCpu->vmm.s.fMayHaltInRing0, TRPMHasTrap(pGVCpu), enmInterruptibility, uMWait));
1150 }
1151
1152 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3);
1153 return VINF_EM_HALT;
1154}
1155
1156
1157/**
1158 * VMM ring-0 thread-context callback.
1159 *
1160 * This does common HM state updating and calls the HM-specific thread-context
1161 * callback.
1162 *
1163 * This is used together with RTThreadCtxHookCreate() on platforms which
1164 * supports it, and directly from VMMR0EmtPrepareForBlocking() and
1165 * VMMR0EmtResumeAfterBlocking() on platforms which don't.
1166 *
1167 * @param enmEvent The thread-context event.
1168 * @param pvUser Opaque pointer to the VMCPU.
1169 *
1170 * @thread EMT(pvUser)
1171 */
1172static DECLCALLBACK(void) vmmR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, void *pvUser)
1173{
1174 PVMCPUCC pVCpu = (PVMCPUCC)pvUser;
1175
1176 switch (enmEvent)
1177 {
1178 case RTTHREADCTXEVENT_IN:
1179 {
1180 /*
1181 * Linux may call us with preemption enabled (really!) but technically we
1182 * cannot get preempted here, otherwise we end up in an infinite recursion
1183 * scenario (i.e. preempted in resume hook -> preempt hook -> resume hook...
1184 * ad infinitum). Let's just disable preemption for now...
1185 */
1186 /** @todo r=bird: I don't believe the above. The linux code is clearly enabling
1187 * preemption after doing the callout (one or two functions up the
1188 * call chain). */
1189 /** @todo r=ramshankar: See @bugref{5313#c30}. */
1190 RTTHREADPREEMPTSTATE ParanoidPreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
1191 RTThreadPreemptDisable(&ParanoidPreemptState);
1192
1193 /* We need to update the VCPU <-> host CPU mapping. */
1194 RTCPUID idHostCpu;
1195 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
1196 pVCpu->iHostCpuSet = iHostCpuSet;
1197 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
1198
1199 /* In the very unlikely event that the GIP delta for the CPU we're
1200 rescheduled needs calculating, try force a return to ring-3.
1201 We unfortunately cannot do the measurements right here. */
1202 if (RT_LIKELY(!SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
1203 { /* likely */ }
1204 else
1205 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
1206
1207 /* Invoke the HM-specific thread-context callback. */
1208 HMR0ThreadCtxCallback(enmEvent, pvUser);
1209
1210 /* Restore preemption. */
1211 RTThreadPreemptRestore(&ParanoidPreemptState);
1212 break;
1213 }
1214
1215 case RTTHREADCTXEVENT_OUT:
1216 {
1217 /* Invoke the HM-specific thread-context callback. */
1218 HMR0ThreadCtxCallback(enmEvent, pvUser);
1219
1220 /*
1221 * Sigh. See VMMGetCpu() used by VMCPU_ASSERT_EMT(). We cannot let several VCPUs
1222 * have the same host CPU associated with it.
1223 */
1224 pVCpu->iHostCpuSet = UINT32_MAX;
1225 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1226 break;
1227 }
1228
1229 default:
1230 /* Invoke the HM-specific thread-context callback. */
1231 HMR0ThreadCtxCallback(enmEvent, pvUser);
1232 break;
1233 }
1234}
1235
1236
1237/**
1238 * Creates thread switching hook for the current EMT thread.
1239 *
1240 * This is called by GVMMR0CreateVM and GVMMR0RegisterVCpu. If the host
1241 * platform does not implement switcher hooks, no hooks will be create and the
1242 * member set to NIL_RTTHREADCTXHOOK.
1243 *
1244 * @returns VBox status code.
1245 * @param pVCpu The cross context virtual CPU structure.
1246 * @thread EMT(pVCpu)
1247 */
1248VMMR0_INT_DECL(int) VMMR0ThreadCtxHookCreateForEmt(PVMCPUCC pVCpu)
1249{
1250 VMCPU_ASSERT_EMT(pVCpu);
1251 Assert(pVCpu->vmmr0.s.hCtxHook == NIL_RTTHREADCTXHOOK);
1252
1253#if 1 /* To disable this stuff change to zero. */
1254 int rc = RTThreadCtxHookCreate(&pVCpu->vmmr0.s.hCtxHook, 0, vmmR0ThreadCtxCallback, pVCpu);
1255 if (RT_SUCCESS(rc))
1256 {
1257 pVCpu->pGVM->vmm.s.fIsUsingContextHooks = true;
1258 return rc;
1259 }
1260#else
1261 RT_NOREF(vmmR0ThreadCtxCallback);
1262 int rc = VERR_NOT_SUPPORTED;
1263#endif
1264
1265 pVCpu->vmmr0.s.hCtxHook = NIL_RTTHREADCTXHOOK;
1266 pVCpu->pGVM->vmm.s.fIsUsingContextHooks = false;
1267 if (rc == VERR_NOT_SUPPORTED)
1268 return VINF_SUCCESS;
1269
1270 LogRelMax(32, ("RTThreadCtxHookCreate failed! rc=%Rrc pVCpu=%p idCpu=%RU32\n", rc, pVCpu, pVCpu->idCpu));
1271 return VINF_SUCCESS; /* Just ignore it, we can live without context hooks. */
1272}
1273
1274
1275/**
1276 * Destroys the thread switching hook for the specified VCPU.
1277 *
1278 * @param pVCpu The cross context virtual CPU structure.
1279 * @remarks Can be called from any thread.
1280 */
1281VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDestroyForEmt(PVMCPUCC pVCpu)
1282{
1283 int rc = RTThreadCtxHookDestroy(pVCpu->vmmr0.s.hCtxHook);
1284 AssertRC(rc);
1285 pVCpu->vmmr0.s.hCtxHook = NIL_RTTHREADCTXHOOK;
1286}
1287
1288
1289/**
1290 * Disables the thread switching hook for this VCPU (if we got one).
1291 *
1292 * @param pVCpu The cross context virtual CPU structure.
1293 * @thread EMT(pVCpu)
1294 *
1295 * @remarks This also clears GVMCPU::idHostCpu, so the mapping is invalid after
1296 * this call. This means you have to be careful with what you do!
1297 */
1298VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDisable(PVMCPUCC pVCpu)
1299{
1300 /*
1301 * Clear the VCPU <-> host CPU mapping as we've left HM context.
1302 * @bugref{7726#c19} explains the need for this trick:
1303 *
1304 * VMXR0CallRing3Callback/SVMR0CallRing3Callback &
1305 * hmR0VmxLeaveSession/hmR0SvmLeaveSession disables context hooks during
1306 * longjmp & normal return to ring-3, which opens a window where we may be
1307 * rescheduled without changing GVMCPUID::idHostCpu and cause confusion if
1308 * the CPU starts executing a different EMT. Both functions first disables
1309 * preemption and then calls HMR0LeaveCpu which invalids idHostCpu, leaving
1310 * an opening for getting preempted.
1311 */
1312 /** @todo Make HM not need this API! Then we could leave the hooks enabled
1313 * all the time. */
1314
1315 /*
1316 * Disable the context hook, if we got one.
1317 */
1318 if (pVCpu->vmmr0.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1319 {
1320 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1321 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1322 int rc = RTThreadCtxHookDisable(pVCpu->vmmr0.s.hCtxHook);
1323 AssertRC(rc);
1324 }
1325}
1326
1327
1328/**
1329 * Internal version of VMMR0ThreadCtxHooksAreRegistered.
1330 *
1331 * @returns true if registered, false otherwise.
1332 * @param pVCpu The cross context virtual CPU structure.
1333 */
1334DECLINLINE(bool) vmmR0ThreadCtxHookIsEnabled(PVMCPUCC pVCpu)
1335{
1336 return RTThreadCtxHookIsEnabled(pVCpu->vmmr0.s.hCtxHook);
1337}
1338
1339
1340/**
1341 * Whether thread-context hooks are registered for this VCPU.
1342 *
1343 * @returns true if registered, false otherwise.
1344 * @param pVCpu The cross context virtual CPU structure.
1345 */
1346VMMR0_INT_DECL(bool) VMMR0ThreadCtxHookIsEnabled(PVMCPUCC pVCpu)
1347{
1348 return vmmR0ThreadCtxHookIsEnabled(pVCpu);
1349}
1350
1351
1352/**
1353 * Returns the ring-0 release logger instance.
1354 *
1355 * @returns Pointer to release logger, NULL if not configured.
1356 * @param pVCpu The cross context virtual CPU structure of the caller.
1357 * @thread EMT(pVCpu)
1358 */
1359VMMR0_INT_DECL(PRTLOGGER) VMMR0GetReleaseLogger(PVMCPUCC pVCpu)
1360{
1361 return pVCpu->vmmr0.s.RelLogger.pLogger;
1362}
1363
1364
1365#ifdef VBOX_WITH_STATISTICS
1366/**
1367 * Record return code statistics
1368 * @param pVM The cross context VM structure.
1369 * @param pVCpu The cross context virtual CPU structure.
1370 * @param rc The status code.
1371 */
1372static void vmmR0RecordRC(PVMCC pVM, PVMCPUCC pVCpu, int rc)
1373{
1374 /*
1375 * Collect statistics.
1376 */
1377 switch (rc)
1378 {
1379 case VINF_SUCCESS:
1380 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetNormal);
1381 break;
1382 case VINF_EM_RAW_INTERRUPT:
1383 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterrupt);
1384 break;
1385 case VINF_EM_RAW_INTERRUPT_HYPER:
1386 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptHyper);
1387 break;
1388 case VINF_EM_RAW_GUEST_TRAP:
1389 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGuestTrap);
1390 break;
1391 case VINF_EM_RAW_RING_SWITCH:
1392 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitch);
1393 break;
1394 case VINF_EM_RAW_RING_SWITCH_INT:
1395 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitchInt);
1396 break;
1397 case VINF_EM_RAW_STALE_SELECTOR:
1398 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetStaleSelector);
1399 break;
1400 case VINF_EM_RAW_IRET_TRAP:
1401 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIRETTrap);
1402 break;
1403 case VINF_IOM_R3_IOPORT_READ:
1404 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIORead);
1405 break;
1406 case VINF_IOM_R3_IOPORT_WRITE:
1407 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOWrite);
1408 break;
1409 case VINF_IOM_R3_IOPORT_COMMIT_WRITE:
1410 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOCommitWrite);
1411 break;
1412 case VINF_IOM_R3_MMIO_READ:
1413 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIORead);
1414 break;
1415 case VINF_IOM_R3_MMIO_WRITE:
1416 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOWrite);
1417 break;
1418 case VINF_IOM_R3_MMIO_COMMIT_WRITE:
1419 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOCommitWrite);
1420 break;
1421 case VINF_IOM_R3_MMIO_READ_WRITE:
1422 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOReadWrite);
1423 break;
1424 case VINF_PATM_HC_MMIO_PATCH_READ:
1425 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchRead);
1426 break;
1427 case VINF_PATM_HC_MMIO_PATCH_WRITE:
1428 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchWrite);
1429 break;
1430 case VINF_CPUM_R3_MSR_READ:
1431 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMSRRead);
1432 break;
1433 case VINF_CPUM_R3_MSR_WRITE:
1434 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMSRWrite);
1435 break;
1436 case VINF_EM_RAW_EMULATE_INSTR:
1437 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetEmulate);
1438 break;
1439 case VINF_PATCH_EMULATE_INSTR:
1440 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchEmulate);
1441 break;
1442 case VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT:
1443 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetLDTFault);
1444 break;
1445 case VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT:
1446 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGDTFault);
1447 break;
1448 case VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT:
1449 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIDTFault);
1450 break;
1451 case VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT:
1452 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTSSFault);
1453 break;
1454 case VINF_CSAM_PENDING_ACTION:
1455 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCSAMTask);
1456 break;
1457 case VINF_PGM_SYNC_CR3:
1458 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetSyncCR3);
1459 break;
1460 case VINF_PATM_PATCH_INT3:
1461 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchInt3);
1462 break;
1463 case VINF_PATM_PATCH_TRAP_PF:
1464 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchPF);
1465 break;
1466 case VINF_PATM_PATCH_TRAP_GP:
1467 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchGP);
1468 break;
1469 case VINF_PATM_PENDING_IRQ_AFTER_IRET:
1470 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchIretIRQ);
1471 break;
1472 case VINF_EM_RESCHEDULE_REM:
1473 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRescheduleREM);
1474 break;
1475 case VINF_EM_RAW_TO_R3:
1476 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Total);
1477 if (VM_FF_IS_SET(pVM, VM_FF_TM_VIRTUAL_SYNC))
1478 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3TMVirt);
1479 else if (VM_FF_IS_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES))
1480 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3HandyPages);
1481 else if (VM_FF_IS_SET(pVM, VM_FF_PDM_QUEUES))
1482 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3PDMQueues);
1483 else if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
1484 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Rendezvous);
1485 else if (VM_FF_IS_SET(pVM, VM_FF_PDM_DMA))
1486 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3DMA);
1487 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TIMER))
1488 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Timer);
1489 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PDM_CRITSECT))
1490 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3CritSect);
1491 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TO_R3))
1492 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3FF);
1493 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
1494 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Iem);
1495 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IOM))
1496 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Iom);
1497 else
1498 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Unknown);
1499 break;
1500
1501 case VINF_EM_RAW_TIMER_PENDING:
1502 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTimerPending);
1503 break;
1504 case VINF_EM_RAW_INTERRUPT_PENDING:
1505 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptPending);
1506 break;
1507 case VINF_VMM_CALL_HOST:
1508 switch (pVCpu->vmm.s.enmCallRing3Operation)
1509 {
1510 case VMMCALLRING3_PDM_CRIT_SECT_ENTER:
1511 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPDMCritSectEnter);
1512 break;
1513 case VMMCALLRING3_PDM_LOCK:
1514 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPDMLock);
1515 break;
1516 case VMMCALLRING3_PGM_POOL_GROW:
1517 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMPoolGrow);
1518 break;
1519 case VMMCALLRING3_PGM_LOCK:
1520 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMLock);
1521 break;
1522 case VMMCALLRING3_PGM_MAP_CHUNK:
1523 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMMapChunk);
1524 break;
1525 case VMMCALLRING3_PGM_ALLOCATE_HANDY_PAGES:
1526 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMAllocHandy);
1527 break;
1528 case VMMCALLRING3_VMM_LOGGER_FLUSH:
1529 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallLogFlush);
1530 break;
1531 case VMMCALLRING3_VM_SET_ERROR:
1532 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallVMSetError);
1533 break;
1534 case VMMCALLRING3_VM_SET_RUNTIME_ERROR:
1535 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallVMSetRuntimeError);
1536 break;
1537 case VMMCALLRING3_VM_R0_ASSERTION:
1538 default:
1539 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCallRing3);
1540 break;
1541 }
1542 break;
1543 case VINF_PATM_DUPLICATE_FUNCTION:
1544 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPATMDuplicateFn);
1545 break;
1546 case VINF_PGM_CHANGE_MODE:
1547 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMChangeMode);
1548 break;
1549 case VINF_PGM_POOL_FLUSH_PENDING:
1550 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMFlushPending);
1551 break;
1552 case VINF_EM_PENDING_REQUEST:
1553 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPendingRequest);
1554 break;
1555 case VINF_EM_HM_PATCH_TPR_INSTR:
1556 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchTPR);
1557 break;
1558 default:
1559 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMisc);
1560 break;
1561 }
1562}
1563#endif /* VBOX_WITH_STATISTICS */
1564
1565
1566/**
1567 * The Ring 0 entry point, called by the fast-ioctl path.
1568 *
1569 * @param pGVM The global (ring-0) VM structure.
1570 * @param pVMIgnored The cross context VM structure. The return code is
1571 * stored in pVM->vmm.s.iLastGZRc.
1572 * @param idCpu The Virtual CPU ID of the calling EMT.
1573 * @param enmOperation Which operation to execute.
1574 * @remarks Assume called with interrupts _enabled_.
1575 */
1576VMMR0DECL(void) VMMR0EntryFast(PGVM pGVM, PVMCC pVMIgnored, VMCPUID idCpu, VMMR0OPERATION enmOperation)
1577{
1578 RT_NOREF(pVMIgnored);
1579
1580 /*
1581 * Validation.
1582 */
1583 if ( idCpu < pGVM->cCpus
1584 && pGVM->cCpus == pGVM->cCpusUnsafe)
1585 { /*likely*/ }
1586 else
1587 {
1588 SUPR0Printf("VMMR0EntryFast: Bad idCpu=%#x cCpus=%#x cCpusUnsafe=%#x\n", idCpu, pGVM->cCpus, pGVM->cCpusUnsafe);
1589 return;
1590 }
1591
1592 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
1593 RTNATIVETHREAD const hNativeThread = RTThreadNativeSelf();
1594 if (RT_LIKELY( pGVCpu->hEMT == hNativeThread
1595 && pGVCpu->hNativeThreadR0 == hNativeThread))
1596 { /* likely */ }
1597 else
1598 {
1599 SUPR0Printf("VMMR0EntryFast: Bad thread idCpu=%#x hNativeSelf=%p pGVCpu->hEmt=%p pGVCpu->hNativeThreadR0=%p\n",
1600 idCpu, hNativeThread, pGVCpu->hEMT, pGVCpu->hNativeThreadR0);
1601 return;
1602 }
1603
1604 /*
1605 * SMAP fun.
1606 */
1607 VMM_CHECK_SMAP_SETUP();
1608 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1609
1610 /*
1611 * Perform requested operation.
1612 */
1613 switch (enmOperation)
1614 {
1615 /*
1616 * Run guest code using the available hardware acceleration technology.
1617 */
1618 case VMMR0_DO_HM_RUN:
1619 {
1620 for (;;) /* hlt loop */
1621 {
1622 /*
1623 * Disable preemption.
1624 */
1625 Assert(!vmmR0ThreadCtxHookIsEnabled(pGVCpu));
1626 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
1627 RTThreadPreemptDisable(&PreemptState);
1628 pGVCpu->vmmr0.s.pPreemptState = &PreemptState;
1629
1630 /*
1631 * Get the host CPU identifiers, make sure they are valid and that
1632 * we've got a TSC delta for the CPU.
1633 */
1634 RTCPUID idHostCpu;
1635 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
1636 if (RT_LIKELY( iHostCpuSet < RTCPUSET_MAX_CPUS
1637 && SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
1638 {
1639 pGVCpu->iHostCpuSet = iHostCpuSet;
1640 ASMAtomicWriteU32(&pGVCpu->idHostCpu, idHostCpu);
1641
1642 /*
1643 * Update the periodic preemption timer if it's active.
1644 */
1645 if (pGVM->vmm.s.fUsePeriodicPreemptionTimers)
1646 GVMMR0SchedUpdatePeriodicPreemptionTimer(pGVM, pGVCpu->idHostCpu, TMCalcHostTimerFrequency(pGVM, pGVCpu));
1647 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1648
1649#ifdef VMM_R0_TOUCH_FPU
1650 /*
1651 * Make sure we've got the FPU state loaded so and we don't need to clear
1652 * CR0.TS and get out of sync with the host kernel when loading the guest
1653 * FPU state. @ref sec_cpum_fpu (CPUM.cpp) and @bugref{4053}.
1654 */
1655 CPUMR0TouchHostFpu();
1656#endif
1657 int rc;
1658 bool fPreemptRestored = false;
1659 if (!HMR0SuspendPending())
1660 {
1661 /*
1662 * Enable the context switching hook.
1663 */
1664 if (pGVCpu->vmmr0.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1665 {
1666 Assert(!RTThreadCtxHookIsEnabled(pGVCpu->vmmr0.s.hCtxHook));
1667 int rc2 = RTThreadCtxHookEnable(pGVCpu->vmmr0.s.hCtxHook); AssertRC(rc2);
1668 }
1669
1670 /*
1671 * Enter HM context.
1672 */
1673 rc = HMR0Enter(pGVCpu);
1674 if (RT_SUCCESS(rc))
1675 {
1676 pGVCpu->vmmr0.s.fInHmContext = true;
1677 VMCPU_SET_STATE(pGVCpu, VMCPUSTATE_STARTED_HM);
1678
1679 /*
1680 * When preemption hooks are in place, enable preemption now that
1681 * we're in HM context.
1682 */
1683 if (vmmR0ThreadCtxHookIsEnabled(pGVCpu))
1684 {
1685 fPreemptRestored = true;
1686 pGVCpu->vmmr0.s.pPreemptState = NULL;
1687 RTThreadPreemptRestore(&PreemptState);
1688 }
1689
1690 /*
1691 * Setup the longjmp machinery and execute guest code (calls HMR0RunGuestCode).
1692 */
1693 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1694 rc = vmmR0CallRing3SetJmp(&pGVCpu->vmm.s.CallRing3JmpBufR0, HMR0RunGuestCode, pGVM, pGVCpu);
1695 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1696
1697 /*
1698 * Assert sanity on the way out. Using manual assertions code here as normal
1699 * assertions are going to panic the host since we're outside the setjmp/longjmp zone.
1700 */
1701 if (RT_UNLIKELY( VMCPU_GET_STATE(pGVCpu) != VMCPUSTATE_STARTED_HM
1702 && RT_SUCCESS_NP(rc)
1703 && rc != VINF_VMM_CALL_HOST ))
1704 {
1705 pGVM->vmm.s.szRing0AssertMsg1[0] = '\0';
1706 RTStrPrintf(pGVM->vmm.s.szRing0AssertMsg2, sizeof(pGVM->vmm.s.szRing0AssertMsg2),
1707 "Got VMCPU state %d expected %d.\n", VMCPU_GET_STATE(pGVCpu), VMCPUSTATE_STARTED_HM);
1708 rc = VERR_VMM_WRONG_HM_VMCPU_STATE;
1709 }
1710#if 0
1711 /** @todo Get rid of this. HM shouldn't disable the context hook. */
1712 else if (RT_UNLIKELY(vmmR0ThreadCtxHookIsEnabled(pGVCpu)))
1713 {
1714 pGVM->vmm.s.szRing0AssertMsg1[0] = '\0';
1715 RTStrPrintf(pGVM->vmm.s.szRing0AssertMsg2, sizeof(pGVM->vmm.s.szRing0AssertMsg2),
1716 "Thread-context hooks still enabled! VCPU=%p Id=%u rc=%d.\n", pGVCpu, pGVCpu->idCpu, rc);
1717 rc = VERR_VMM_CONTEXT_HOOK_STILL_ENABLED;
1718 }
1719#endif
1720
1721 pGVCpu->vmmr0.s.fInHmContext = false;
1722 VMCPU_SET_STATE(pGVCpu, VMCPUSTATE_STARTED);
1723 }
1724 STAM_COUNTER_INC(&pGVM->vmm.s.StatRunGC);
1725
1726 /*
1727 * Invalidate the host CPU identifiers before we disable the context
1728 * hook / restore preemption.
1729 */
1730 pGVCpu->iHostCpuSet = UINT32_MAX;
1731 ASMAtomicWriteU32(&pGVCpu->idHostCpu, NIL_RTCPUID);
1732
1733 /*
1734 * Disable context hooks. Due to unresolved cleanup issues, we
1735 * cannot leave the hooks enabled when we return to ring-3.
1736 *
1737 * Note! At the moment HM may also have disabled the hook
1738 * when we get here, but the IPRT API handles that.
1739 */
1740 if (pGVCpu->vmmr0.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1741 RTThreadCtxHookDisable(pGVCpu->vmmr0.s.hCtxHook);
1742 }
1743 /*
1744 * The system is about to go into suspend mode; go back to ring 3.
1745 */
1746 else
1747 {
1748 pGVCpu->iHostCpuSet = UINT32_MAX;
1749 ASMAtomicWriteU32(&pGVCpu->idHostCpu, NIL_RTCPUID);
1750 rc = VINF_EM_RAW_INTERRUPT;
1751 }
1752
1753 /** @todo When HM stops messing with the context hook state, we'll disable
1754 * preemption again before the RTThreadCtxHookDisable call. */
1755 if (!fPreemptRestored)
1756 {
1757 pGVCpu->vmmr0.s.pPreemptState = NULL;
1758 RTThreadPreemptRestore(&PreemptState);
1759 }
1760
1761 pGVCpu->vmm.s.iLastGZRc = rc;
1762
1763 /* Fire dtrace probe and collect statistics. */
1764 VBOXVMM_R0_VMM_RETURN_TO_RING3_HM(pGVCpu, CPUMQueryGuestCtxPtr(pGVCpu), rc);
1765#ifdef VBOX_WITH_STATISTICS
1766 vmmR0RecordRC(pGVM, pGVCpu, rc);
1767#endif
1768 /*
1769 * If this is a halt.
1770 */
1771 if (rc != VINF_EM_HALT)
1772 { /* we're not in a hurry for a HLT, so prefer this path */ }
1773 else
1774 {
1775 pGVCpu->vmm.s.iLastGZRc = rc = vmmR0DoHalt(pGVM, pGVCpu);
1776 if (rc == VINF_SUCCESS)
1777 {
1778 pGVCpu->vmm.s.cR0HaltsSucceeded++;
1779 continue;
1780 }
1781 pGVCpu->vmm.s.cR0HaltsToRing3++;
1782 }
1783 }
1784 /*
1785 * Invalid CPU set index or TSC delta in need of measuring.
1786 */
1787 else
1788 {
1789 pGVCpu->vmmr0.s.pPreemptState = NULL;
1790 pGVCpu->iHostCpuSet = UINT32_MAX;
1791 ASMAtomicWriteU32(&pGVCpu->idHostCpu, NIL_RTCPUID);
1792 RTThreadPreemptRestore(&PreemptState);
1793 if (iHostCpuSet < RTCPUSET_MAX_CPUS)
1794 {
1795 int rc = SUPR0TscDeltaMeasureBySetIndex(pGVM->pSession, iHostCpuSet, 0 /*fFlags*/,
1796 2 /*cMsWaitRetry*/, 5*RT_MS_1SEC /*cMsWaitThread*/,
1797 0 /*default cTries*/);
1798 if (RT_SUCCESS(rc) || rc == VERR_CPU_OFFLINE)
1799 pGVCpu->vmm.s.iLastGZRc = VINF_EM_RAW_TO_R3;
1800 else
1801 pGVCpu->vmm.s.iLastGZRc = rc;
1802 }
1803 else
1804 pGVCpu->vmm.s.iLastGZRc = VERR_INVALID_CPU_INDEX;
1805 }
1806 break;
1807
1808 } /* halt loop. */
1809 break;
1810 }
1811
1812#ifdef VBOX_WITH_NEM_R0
1813# if defined(RT_ARCH_AMD64) && defined(RT_OS_WINDOWS)
1814 case VMMR0_DO_NEM_RUN:
1815 {
1816 /*
1817 * Setup the longjmp machinery and execute guest code (calls NEMR0RunGuestCode).
1818 */
1819 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1820# ifdef VBOXSTRICTRC_STRICT_ENABLED
1821 int rc = vmmR0CallRing3SetJmp2(&pGVCpu->vmm.s.CallRing3JmpBufR0, (PFNVMMR0SETJMP2)NEMR0RunGuestCode, pGVM, idCpu);
1822# else
1823 int rc = vmmR0CallRing3SetJmp2(&pGVCpu->vmm.s.CallRing3JmpBufR0, NEMR0RunGuestCode, pGVM, idCpu);
1824# endif
1825 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1826 STAM_COUNTER_INC(&pGVM->vmm.s.StatRunGC);
1827
1828 pGVCpu->vmm.s.iLastGZRc = rc;
1829
1830 /*
1831 * Fire dtrace probe and collect statistics.
1832 */
1833 VBOXVMM_R0_VMM_RETURN_TO_RING3_NEM(pGVCpu, CPUMQueryGuestCtxPtr(pGVCpu), rc);
1834# ifdef VBOX_WITH_STATISTICS
1835 vmmR0RecordRC(pGVM, pGVCpu, rc);
1836# endif
1837 break;
1838 }
1839# endif
1840#endif
1841
1842 /*
1843 * For profiling.
1844 */
1845 case VMMR0_DO_NOP:
1846 pGVCpu->vmm.s.iLastGZRc = VINF_SUCCESS;
1847 break;
1848
1849 /*
1850 * Shouldn't happen.
1851 */
1852 default:
1853 AssertMsgFailed(("%#x\n", enmOperation));
1854 pGVCpu->vmm.s.iLastGZRc = VERR_NOT_SUPPORTED;
1855 break;
1856 }
1857 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1858}
1859
1860
1861/**
1862 * Validates a session or VM session argument.
1863 *
1864 * @returns true / false accordingly.
1865 * @param pGVM The global (ring-0) VM structure.
1866 * @param pClaimedSession The session claim to validate.
1867 * @param pSession The session argument.
1868 */
1869DECLINLINE(bool) vmmR0IsValidSession(PGVM pGVM, PSUPDRVSESSION pClaimedSession, PSUPDRVSESSION pSession)
1870{
1871 /* This must be set! */
1872 if (!pSession)
1873 return false;
1874
1875 /* Only one out of the two. */
1876 if (pGVM && pClaimedSession)
1877 return false;
1878 if (pGVM)
1879 pClaimedSession = pGVM->pSession;
1880 return pClaimedSession == pSession;
1881}
1882
1883
1884/**
1885 * VMMR0EntryEx worker function, either called directly or when ever possible
1886 * called thru a longjmp so we can exit safely on failure.
1887 *
1888 * @returns VBox status code.
1889 * @param pGVM The global (ring-0) VM structure.
1890 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
1891 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
1892 * @param enmOperation Which operation to execute.
1893 * @param pReqHdr This points to a SUPVMMR0REQHDR packet. Optional.
1894 * The support driver validates this if it's present.
1895 * @param u64Arg Some simple constant argument.
1896 * @param pSession The session of the caller.
1897 *
1898 * @remarks Assume called with interrupts _enabled_.
1899 */
1900DECL_NO_INLINE(static, int) vmmR0EntryExWorker(PGVM pGVM, VMCPUID idCpu, VMMR0OPERATION enmOperation,
1901 PSUPVMMR0REQHDR pReqHdr, uint64_t u64Arg, PSUPDRVSESSION pSession)
1902{
1903 /*
1904 * Validate pGVM and idCpu for consistency and validity.
1905 */
1906 if (pGVM != NULL)
1907 {
1908 if (RT_LIKELY(((uintptr_t)pGVM & PAGE_OFFSET_MASK) == 0))
1909 { /* likely */ }
1910 else
1911 {
1912 SUPR0Printf("vmmR0EntryExWorker: Invalid pGVM=%p! (op=%d)\n", pGVM, enmOperation);
1913 return VERR_INVALID_POINTER;
1914 }
1915
1916 if (RT_LIKELY(idCpu == NIL_VMCPUID || idCpu < pGVM->cCpus))
1917 { /* likely */ }
1918 else
1919 {
1920 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu %#x (cCpus=%#x)\n", idCpu, pGVM->cCpus);
1921 return VERR_INVALID_PARAMETER;
1922 }
1923
1924 if (RT_LIKELY( pGVM->enmVMState >= VMSTATE_CREATING
1925 && pGVM->enmVMState <= VMSTATE_TERMINATED
1926 && pGVM->pSession == pSession
1927 && pGVM->pSelf == pGVM))
1928 { /* likely */ }
1929 else
1930 {
1931 SUPR0Printf("vmmR0EntryExWorker: Invalid pGVM=%p:{.enmVMState=%d, .cCpus=%#x, .pSession=%p(==%p), .pSelf=%p(==%p)}! (op=%d)\n",
1932 pGVM, pGVM->enmVMState, pGVM->cCpus, pGVM->pSession, pSession, pGVM->pSelf, pGVM, enmOperation);
1933 return VERR_INVALID_POINTER;
1934 }
1935 }
1936 else if (RT_LIKELY(idCpu == NIL_VMCPUID))
1937 { /* likely */ }
1938 else
1939 {
1940 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu=%u\n", idCpu);
1941 return VERR_INVALID_PARAMETER;
1942 }
1943
1944 /*
1945 * SMAP fun.
1946 */
1947 VMM_CHECK_SMAP_SETUP();
1948 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
1949
1950 /*
1951 * Process the request.
1952 */
1953 int rc;
1954 switch (enmOperation)
1955 {
1956 /*
1957 * GVM requests
1958 */
1959 case VMMR0_DO_GVMM_CREATE_VM:
1960 if (pGVM == NULL && u64Arg == 0 && idCpu == NIL_VMCPUID)
1961 rc = GVMMR0CreateVMReq((PGVMMCREATEVMREQ)pReqHdr, pSession);
1962 else
1963 rc = VERR_INVALID_PARAMETER;
1964 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
1965 break;
1966
1967 case VMMR0_DO_GVMM_DESTROY_VM:
1968 if (pReqHdr == NULL && u64Arg == 0)
1969 rc = GVMMR0DestroyVM(pGVM);
1970 else
1971 rc = VERR_INVALID_PARAMETER;
1972 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
1973 break;
1974
1975 case VMMR0_DO_GVMM_REGISTER_VMCPU:
1976 if (pGVM != NULL)
1977 rc = GVMMR0RegisterVCpu(pGVM, idCpu);
1978 else
1979 rc = VERR_INVALID_PARAMETER;
1980 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1981 break;
1982
1983 case VMMR0_DO_GVMM_DEREGISTER_VMCPU:
1984 if (pGVM != NULL)
1985 rc = GVMMR0DeregisterVCpu(pGVM, idCpu);
1986 else
1987 rc = VERR_INVALID_PARAMETER;
1988 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1989 break;
1990
1991 case VMMR0_DO_GVMM_SCHED_HALT:
1992 if (pReqHdr)
1993 return VERR_INVALID_PARAMETER;
1994 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1995 rc = GVMMR0SchedHaltReq(pGVM, idCpu, u64Arg);
1996 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1997 break;
1998
1999 case VMMR0_DO_GVMM_SCHED_WAKE_UP:
2000 if (pReqHdr || u64Arg)
2001 return VERR_INVALID_PARAMETER;
2002 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2003 rc = GVMMR0SchedWakeUp(pGVM, idCpu);
2004 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2005 break;
2006
2007 case VMMR0_DO_GVMM_SCHED_POKE:
2008 if (pReqHdr || u64Arg)
2009 return VERR_INVALID_PARAMETER;
2010 rc = GVMMR0SchedPoke(pGVM, idCpu);
2011 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2012 break;
2013
2014 case VMMR0_DO_GVMM_SCHED_WAKE_UP_AND_POKE_CPUS:
2015 if (u64Arg)
2016 return VERR_INVALID_PARAMETER;
2017 rc = GVMMR0SchedWakeUpAndPokeCpusReq(pGVM, (PGVMMSCHEDWAKEUPANDPOKECPUSREQ)pReqHdr);
2018 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2019 break;
2020
2021 case VMMR0_DO_GVMM_SCHED_POLL:
2022 if (pReqHdr || u64Arg > 1)
2023 return VERR_INVALID_PARAMETER;
2024 rc = GVMMR0SchedPoll(pGVM, idCpu, !!u64Arg);
2025 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2026 break;
2027
2028 case VMMR0_DO_GVMM_QUERY_STATISTICS:
2029 if (u64Arg)
2030 return VERR_INVALID_PARAMETER;
2031 rc = GVMMR0QueryStatisticsReq(pGVM, (PGVMMQUERYSTATISTICSSREQ)pReqHdr, pSession);
2032 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2033 break;
2034
2035 case VMMR0_DO_GVMM_RESET_STATISTICS:
2036 if (u64Arg)
2037 return VERR_INVALID_PARAMETER;
2038 rc = GVMMR0ResetStatisticsReq(pGVM, (PGVMMRESETSTATISTICSSREQ)pReqHdr, pSession);
2039 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2040 break;
2041
2042 /*
2043 * Initialize the R0 part of a VM instance.
2044 */
2045 case VMMR0_DO_VMMR0_INIT:
2046 rc = vmmR0InitVM(pGVM, RT_LODWORD(u64Arg), RT_HIDWORD(u64Arg));
2047 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2048 break;
2049
2050 /*
2051 * Does EMT specific ring-0 init.
2052 */
2053 case VMMR0_DO_VMMR0_INIT_EMT:
2054 rc = vmmR0InitVMEmt(pGVM, idCpu);
2055 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2056 break;
2057
2058 /*
2059 * Terminate the R0 part of a VM instance.
2060 */
2061 case VMMR0_DO_VMMR0_TERM:
2062 rc = VMMR0TermVM(pGVM, 0 /*idCpu*/);
2063 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2064 break;
2065
2066 /*
2067 * Update release or debug logger instances.
2068 */
2069 case VMMR0_DO_VMMR0_UPDATE_LOGGERS:
2070 if (idCpu == NIL_VMCPUID)
2071 return VERR_INVALID_CPU_ID;
2072 if (u64Arg <= 1 && pReqHdr != NULL)
2073 rc = vmmR0UpdateLoggers(pGVM, idCpu /*idCpu*/, (PVMMR0UPDATELOGGERSREQ)pReqHdr, u64Arg != 0);
2074 else
2075 return VERR_INVALID_PARAMETER;
2076 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2077 break;
2078
2079 /*
2080 * Attempt to enable hm mode and check the current setting.
2081 */
2082 case VMMR0_DO_HM_ENABLE:
2083 rc = HMR0EnableAllCpus(pGVM);
2084 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2085 break;
2086
2087 /*
2088 * Setup the hardware accelerated session.
2089 */
2090 case VMMR0_DO_HM_SETUP_VM:
2091 rc = HMR0SetupVM(pGVM);
2092 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2093 break;
2094
2095 /*
2096 * PGM wrappers.
2097 */
2098 case VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES:
2099 if (idCpu == NIL_VMCPUID)
2100 return VERR_INVALID_CPU_ID;
2101 rc = PGMR0PhysAllocateHandyPages(pGVM, idCpu);
2102 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2103 break;
2104
2105 case VMMR0_DO_PGM_FLUSH_HANDY_PAGES:
2106 if (idCpu == NIL_VMCPUID)
2107 return VERR_INVALID_CPU_ID;
2108 rc = PGMR0PhysFlushHandyPages(pGVM, idCpu);
2109 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2110 break;
2111
2112 case VMMR0_DO_PGM_ALLOCATE_LARGE_HANDY_PAGE:
2113 if (idCpu == NIL_VMCPUID)
2114 return VERR_INVALID_CPU_ID;
2115 rc = PGMR0PhysAllocateLargeHandyPage(pGVM, idCpu);
2116 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2117 break;
2118
2119 case VMMR0_DO_PGM_PHYS_SETUP_IOMMU:
2120 if (idCpu != 0)
2121 return VERR_INVALID_CPU_ID;
2122 rc = PGMR0PhysSetupIoMmu(pGVM);
2123 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2124 break;
2125
2126 case VMMR0_DO_PGM_POOL_GROW:
2127 if (idCpu == NIL_VMCPUID)
2128 return VERR_INVALID_CPU_ID;
2129 rc = PGMR0PoolGrow(pGVM);
2130 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2131 break;
2132
2133 /*
2134 * GMM wrappers.
2135 */
2136 case VMMR0_DO_GMM_INITIAL_RESERVATION:
2137 if (u64Arg)
2138 return VERR_INVALID_PARAMETER;
2139 rc = GMMR0InitialReservationReq(pGVM, idCpu, (PGMMINITIALRESERVATIONREQ)pReqHdr);
2140 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2141 break;
2142
2143 case VMMR0_DO_GMM_UPDATE_RESERVATION:
2144 if (u64Arg)
2145 return VERR_INVALID_PARAMETER;
2146 rc = GMMR0UpdateReservationReq(pGVM, idCpu, (PGMMUPDATERESERVATIONREQ)pReqHdr);
2147 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2148 break;
2149
2150 case VMMR0_DO_GMM_ALLOCATE_PAGES:
2151 if (u64Arg)
2152 return VERR_INVALID_PARAMETER;
2153 rc = GMMR0AllocatePagesReq(pGVM, idCpu, (PGMMALLOCATEPAGESREQ)pReqHdr);
2154 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2155 break;
2156
2157 case VMMR0_DO_GMM_FREE_PAGES:
2158 if (u64Arg)
2159 return VERR_INVALID_PARAMETER;
2160 rc = GMMR0FreePagesReq(pGVM, idCpu, (PGMMFREEPAGESREQ)pReqHdr);
2161 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2162 break;
2163
2164 case VMMR0_DO_GMM_FREE_LARGE_PAGE:
2165 if (u64Arg)
2166 return VERR_INVALID_PARAMETER;
2167 rc = GMMR0FreeLargePageReq(pGVM, idCpu, (PGMMFREELARGEPAGEREQ)pReqHdr);
2168 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2169 break;
2170
2171 case VMMR0_DO_GMM_QUERY_HYPERVISOR_MEM_STATS:
2172 if (u64Arg)
2173 return VERR_INVALID_PARAMETER;
2174 rc = GMMR0QueryHypervisorMemoryStatsReq((PGMMMEMSTATSREQ)pReqHdr);
2175 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2176 break;
2177
2178 case VMMR0_DO_GMM_QUERY_MEM_STATS:
2179 if (idCpu == NIL_VMCPUID)
2180 return VERR_INVALID_CPU_ID;
2181 if (u64Arg)
2182 return VERR_INVALID_PARAMETER;
2183 rc = GMMR0QueryMemoryStatsReq(pGVM, idCpu, (PGMMMEMSTATSREQ)pReqHdr);
2184 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2185 break;
2186
2187 case VMMR0_DO_GMM_BALLOONED_PAGES:
2188 if (u64Arg)
2189 return VERR_INVALID_PARAMETER;
2190 rc = GMMR0BalloonedPagesReq(pGVM, idCpu, (PGMMBALLOONEDPAGESREQ)pReqHdr);
2191 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2192 break;
2193
2194 case VMMR0_DO_GMM_MAP_UNMAP_CHUNK:
2195 if (u64Arg)
2196 return VERR_INVALID_PARAMETER;
2197 rc = GMMR0MapUnmapChunkReq(pGVM, (PGMMMAPUNMAPCHUNKREQ)pReqHdr);
2198 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2199 break;
2200
2201 case VMMR0_DO_GMM_SEED_CHUNK:
2202 if (pReqHdr)
2203 return VERR_INVALID_PARAMETER;
2204 rc = GMMR0SeedChunk(pGVM, idCpu, (RTR3PTR)u64Arg);
2205 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2206 break;
2207
2208 case VMMR0_DO_GMM_REGISTER_SHARED_MODULE:
2209 if (idCpu == NIL_VMCPUID)
2210 return VERR_INVALID_CPU_ID;
2211 if (u64Arg)
2212 return VERR_INVALID_PARAMETER;
2213 rc = GMMR0RegisterSharedModuleReq(pGVM, idCpu, (PGMMREGISTERSHAREDMODULEREQ)pReqHdr);
2214 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2215 break;
2216
2217 case VMMR0_DO_GMM_UNREGISTER_SHARED_MODULE:
2218 if (idCpu == NIL_VMCPUID)
2219 return VERR_INVALID_CPU_ID;
2220 if (u64Arg)
2221 return VERR_INVALID_PARAMETER;
2222 rc = GMMR0UnregisterSharedModuleReq(pGVM, idCpu, (PGMMUNREGISTERSHAREDMODULEREQ)pReqHdr);
2223 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2224 break;
2225
2226 case VMMR0_DO_GMM_RESET_SHARED_MODULES:
2227 if (idCpu == NIL_VMCPUID)
2228 return VERR_INVALID_CPU_ID;
2229 if ( u64Arg
2230 || pReqHdr)
2231 return VERR_INVALID_PARAMETER;
2232 rc = GMMR0ResetSharedModules(pGVM, idCpu);
2233 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2234 break;
2235
2236#ifdef VBOX_WITH_PAGE_SHARING
2237 case VMMR0_DO_GMM_CHECK_SHARED_MODULES:
2238 {
2239 if (idCpu == NIL_VMCPUID)
2240 return VERR_INVALID_CPU_ID;
2241 if ( u64Arg
2242 || pReqHdr)
2243 return VERR_INVALID_PARAMETER;
2244 rc = GMMR0CheckSharedModules(pGVM, idCpu);
2245 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2246 break;
2247 }
2248#endif
2249
2250#if defined(VBOX_STRICT) && HC_ARCH_BITS == 64
2251 case VMMR0_DO_GMM_FIND_DUPLICATE_PAGE:
2252 if (u64Arg)
2253 return VERR_INVALID_PARAMETER;
2254 rc = GMMR0FindDuplicatePageReq(pGVM, (PGMMFINDDUPLICATEPAGEREQ)pReqHdr);
2255 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2256 break;
2257#endif
2258
2259 case VMMR0_DO_GMM_QUERY_STATISTICS:
2260 if (u64Arg)
2261 return VERR_INVALID_PARAMETER;
2262 rc = GMMR0QueryStatisticsReq(pGVM, (PGMMQUERYSTATISTICSSREQ)pReqHdr);
2263 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2264 break;
2265
2266 case VMMR0_DO_GMM_RESET_STATISTICS:
2267 if (u64Arg)
2268 return VERR_INVALID_PARAMETER;
2269 rc = GMMR0ResetStatisticsReq(pGVM, (PGMMRESETSTATISTICSSREQ)pReqHdr);
2270 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2271 break;
2272
2273 /*
2274 * A quick GCFGM mock-up.
2275 */
2276 /** @todo GCFGM with proper access control, ring-3 management interface and all that. */
2277 case VMMR0_DO_GCFGM_SET_VALUE:
2278 case VMMR0_DO_GCFGM_QUERY_VALUE:
2279 {
2280 if (pGVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
2281 return VERR_INVALID_PARAMETER;
2282 PGCFGMVALUEREQ pReq = (PGCFGMVALUEREQ)pReqHdr;
2283 if (pReq->Hdr.cbReq != sizeof(*pReq))
2284 return VERR_INVALID_PARAMETER;
2285 if (enmOperation == VMMR0_DO_GCFGM_SET_VALUE)
2286 {
2287 rc = GVMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
2288 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
2289 // rc = GMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
2290 }
2291 else
2292 {
2293 rc = GVMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
2294 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
2295 // rc = GMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
2296 }
2297 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2298 break;
2299 }
2300
2301 /*
2302 * PDM Wrappers.
2303 */
2304 case VMMR0_DO_PDM_DRIVER_CALL_REQ_HANDLER:
2305 {
2306 if (!pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
2307 return VERR_INVALID_PARAMETER;
2308 rc = PDMR0DriverCallReqHandler(pGVM, (PPDMDRIVERCALLREQHANDLERREQ)pReqHdr);
2309 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2310 break;
2311 }
2312
2313 case VMMR0_DO_PDM_DEVICE_CREATE:
2314 {
2315 if (!pReqHdr || u64Arg || idCpu != 0)
2316 return VERR_INVALID_PARAMETER;
2317 rc = PDMR0DeviceCreateReqHandler(pGVM, (PPDMDEVICECREATEREQ)pReqHdr);
2318 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2319 break;
2320 }
2321
2322 case VMMR0_DO_PDM_DEVICE_GEN_CALL:
2323 {
2324 if (!pReqHdr || u64Arg)
2325 return VERR_INVALID_PARAMETER;
2326 rc = PDMR0DeviceGenCallReqHandler(pGVM, (PPDMDEVICEGENCALLREQ)pReqHdr, idCpu);
2327 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2328 break;
2329 }
2330
2331 /** @todo Remove the once all devices has been converted to new style! @bugref{9218} */
2332 case VMMR0_DO_PDM_DEVICE_COMPAT_SET_CRITSECT:
2333 {
2334 if (!pReqHdr || u64Arg || idCpu != 0)
2335 return VERR_INVALID_PARAMETER;
2336 rc = PDMR0DeviceCompatSetCritSectReqHandler(pGVM, (PPDMDEVICECOMPATSETCRITSECTREQ)pReqHdr);
2337 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2338 break;
2339 }
2340
2341 /*
2342 * Requests to the internal networking service.
2343 */
2344 case VMMR0_DO_INTNET_OPEN:
2345 {
2346 PINTNETOPENREQ pReq = (PINTNETOPENREQ)pReqHdr;
2347 if (u64Arg || !pReq || !vmmR0IsValidSession(pGVM, pReq->pSession, pSession) || idCpu != NIL_VMCPUID)
2348 return VERR_INVALID_PARAMETER;
2349 rc = IntNetR0OpenReq(pSession, pReq);
2350 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2351 break;
2352 }
2353
2354 case VMMR0_DO_INTNET_IF_CLOSE:
2355 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFCLOSEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2356 return VERR_INVALID_PARAMETER;
2357 rc = IntNetR0IfCloseReq(pSession, (PINTNETIFCLOSEREQ)pReqHdr);
2358 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2359 break;
2360
2361
2362 case VMMR0_DO_INTNET_IF_GET_BUFFER_PTRS:
2363 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFGETBUFFERPTRSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2364 return VERR_INVALID_PARAMETER;
2365 rc = IntNetR0IfGetBufferPtrsReq(pSession, (PINTNETIFGETBUFFERPTRSREQ)pReqHdr);
2366 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2367 break;
2368
2369 case VMMR0_DO_INTNET_IF_SET_PROMISCUOUS_MODE:
2370 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2371 return VERR_INVALID_PARAMETER;
2372 rc = IntNetR0IfSetPromiscuousModeReq(pSession, (PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr);
2373 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2374 break;
2375
2376 case VMMR0_DO_INTNET_IF_SET_MAC_ADDRESS:
2377 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFSETMACADDRESSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2378 return VERR_INVALID_PARAMETER;
2379 rc = IntNetR0IfSetMacAddressReq(pSession, (PINTNETIFSETMACADDRESSREQ)pReqHdr);
2380 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2381 break;
2382
2383 case VMMR0_DO_INTNET_IF_SET_ACTIVE:
2384 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFSETACTIVEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2385 return VERR_INVALID_PARAMETER;
2386 rc = IntNetR0IfSetActiveReq(pSession, (PINTNETIFSETACTIVEREQ)pReqHdr);
2387 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2388 break;
2389
2390 case VMMR0_DO_INTNET_IF_SEND:
2391 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2392 return VERR_INVALID_PARAMETER;
2393 rc = IntNetR0IfSendReq(pSession, (PINTNETIFSENDREQ)pReqHdr);
2394 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2395 break;
2396
2397 case VMMR0_DO_INTNET_IF_WAIT:
2398 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2399 return VERR_INVALID_PARAMETER;
2400 rc = IntNetR0IfWaitReq(pSession, (PINTNETIFWAITREQ)pReqHdr);
2401 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2402 break;
2403
2404 case VMMR0_DO_INTNET_IF_ABORT_WAIT:
2405 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2406 return VERR_INVALID_PARAMETER;
2407 rc = IntNetR0IfAbortWaitReq(pSession, (PINTNETIFABORTWAITREQ)pReqHdr);
2408 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2409 break;
2410
2411#if 0 //def VBOX_WITH_PCI_PASSTHROUGH
2412 /*
2413 * Requests to host PCI driver service.
2414 */
2415 case VMMR0_DO_PCIRAW_REQ:
2416 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PPCIRAWSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2417 return VERR_INVALID_PARAMETER;
2418 rc = PciRawR0ProcessReq(pGVM, pSession, (PPCIRAWSENDREQ)pReqHdr);
2419 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2420 break;
2421#endif
2422
2423 /*
2424 * NEM requests.
2425 */
2426#ifdef VBOX_WITH_NEM_R0
2427# if defined(RT_ARCH_AMD64) && defined(RT_OS_WINDOWS)
2428 case VMMR0_DO_NEM_INIT_VM:
2429 if (u64Arg || pReqHdr || idCpu != 0)
2430 return VERR_INVALID_PARAMETER;
2431 rc = NEMR0InitVM(pGVM);
2432 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2433 break;
2434
2435 case VMMR0_DO_NEM_INIT_VM_PART_2:
2436 if (u64Arg || pReqHdr || idCpu != 0)
2437 return VERR_INVALID_PARAMETER;
2438 rc = NEMR0InitVMPart2(pGVM);
2439 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2440 break;
2441
2442 case VMMR0_DO_NEM_MAP_PAGES:
2443 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2444 return VERR_INVALID_PARAMETER;
2445 rc = NEMR0MapPages(pGVM, idCpu);
2446 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2447 break;
2448
2449 case VMMR0_DO_NEM_UNMAP_PAGES:
2450 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2451 return VERR_INVALID_PARAMETER;
2452 rc = NEMR0UnmapPages(pGVM, idCpu);
2453 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2454 break;
2455
2456 case VMMR0_DO_NEM_EXPORT_STATE:
2457 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2458 return VERR_INVALID_PARAMETER;
2459 rc = NEMR0ExportState(pGVM, idCpu);
2460 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2461 break;
2462
2463 case VMMR0_DO_NEM_IMPORT_STATE:
2464 if (pReqHdr || idCpu == NIL_VMCPUID)
2465 return VERR_INVALID_PARAMETER;
2466 rc = NEMR0ImportState(pGVM, idCpu, u64Arg);
2467 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2468 break;
2469
2470 case VMMR0_DO_NEM_QUERY_CPU_TICK:
2471 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2472 return VERR_INVALID_PARAMETER;
2473 rc = NEMR0QueryCpuTick(pGVM, idCpu);
2474 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2475 break;
2476
2477 case VMMR0_DO_NEM_RESUME_CPU_TICK_ON_ALL:
2478 if (pReqHdr || idCpu == NIL_VMCPUID)
2479 return VERR_INVALID_PARAMETER;
2480 rc = NEMR0ResumeCpuTickOnAll(pGVM, idCpu, u64Arg);
2481 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2482 break;
2483
2484 case VMMR0_DO_NEM_UPDATE_STATISTICS:
2485 if (u64Arg || pReqHdr)
2486 return VERR_INVALID_PARAMETER;
2487 rc = NEMR0UpdateStatistics(pGVM, idCpu);
2488 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2489 break;
2490
2491# if 1 && defined(DEBUG_bird)
2492 case VMMR0_DO_NEM_EXPERIMENT:
2493 if (pReqHdr)
2494 return VERR_INVALID_PARAMETER;
2495 rc = NEMR0DoExperiment(pGVM, idCpu, u64Arg);
2496 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2497 break;
2498# endif
2499# endif
2500#endif
2501
2502 /*
2503 * IOM requests.
2504 */
2505 case VMMR0_DO_IOM_GROW_IO_PORTS:
2506 {
2507 if (pReqHdr || idCpu != 0)
2508 return VERR_INVALID_PARAMETER;
2509 rc = IOMR0IoPortGrowRegistrationTables(pGVM, u64Arg);
2510 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2511 break;
2512 }
2513
2514 case VMMR0_DO_IOM_GROW_IO_PORT_STATS:
2515 {
2516 if (pReqHdr || idCpu != 0)
2517 return VERR_INVALID_PARAMETER;
2518 rc = IOMR0IoPortGrowStatisticsTable(pGVM, u64Arg);
2519 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2520 break;
2521 }
2522
2523 case VMMR0_DO_IOM_GROW_MMIO_REGS:
2524 {
2525 if (pReqHdr || idCpu != 0)
2526 return VERR_INVALID_PARAMETER;
2527 rc = IOMR0MmioGrowRegistrationTables(pGVM, u64Arg);
2528 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2529 break;
2530 }
2531
2532 case VMMR0_DO_IOM_GROW_MMIO_STATS:
2533 {
2534 if (pReqHdr || idCpu != 0)
2535 return VERR_INVALID_PARAMETER;
2536 rc = IOMR0MmioGrowStatisticsTable(pGVM, u64Arg);
2537 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2538 break;
2539 }
2540
2541 case VMMR0_DO_IOM_SYNC_STATS_INDICES:
2542 {
2543 if (pReqHdr || idCpu != 0)
2544 return VERR_INVALID_PARAMETER;
2545 rc = IOMR0IoPortSyncStatisticsIndices(pGVM);
2546 if (RT_SUCCESS(rc))
2547 rc = IOMR0MmioSyncStatisticsIndices(pGVM);
2548 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2549 break;
2550 }
2551
2552 /*
2553 * DBGF requests.
2554 */
2555#ifdef VBOX_WITH_DBGF_TRACING
2556 case VMMR0_DO_DBGF_TRACER_CREATE:
2557 {
2558 if (!pReqHdr || u64Arg || idCpu != 0)
2559 return VERR_INVALID_PARAMETER;
2560 rc = DBGFR0TracerCreateReqHandler(pGVM, (PDBGFTRACERCREATEREQ)pReqHdr);
2561 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2562 break;
2563 }
2564
2565 case VMMR0_DO_DBGF_TRACER_CALL_REQ_HANDLER:
2566 {
2567 if (!pReqHdr || u64Arg)
2568 return VERR_INVALID_PARAMETER;
2569# if 0 /** @todo */
2570 rc = DBGFR0TracerGenCallReqHandler(pGVM, (PDBGFTRACERGENCALLREQ)pReqHdr, idCpu);
2571# else
2572 rc = VERR_NOT_IMPLEMENTED;
2573# endif
2574 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2575 break;
2576 }
2577#endif
2578
2579 case VMMR0_DO_DBGF_BP_INIT:
2580 {
2581 if (!pReqHdr || u64Arg || idCpu != 0)
2582 return VERR_INVALID_PARAMETER;
2583 rc = DBGFR0BpInitReqHandler(pGVM, (PDBGFBPINITREQ)pReqHdr);
2584 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2585 break;
2586 }
2587
2588 case VMMR0_DO_DBGF_BP_CHUNK_ALLOC:
2589 {
2590 if (!pReqHdr || u64Arg || idCpu != 0)
2591 return VERR_INVALID_PARAMETER;
2592 rc = DBGFR0BpChunkAllocReqHandler(pGVM, (PDBGFBPCHUNKALLOCREQ)pReqHdr);
2593 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2594 break;
2595 }
2596
2597 case VMMR0_DO_DBGF_BP_L2_TBL_CHUNK_ALLOC:
2598 {
2599 if (!pReqHdr || u64Arg || idCpu != 0)
2600 return VERR_INVALID_PARAMETER;
2601 rc = DBGFR0BpL2TblChunkAllocReqHandler(pGVM, (PDBGFBPL2TBLCHUNKALLOCREQ)pReqHdr);
2602 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2603 break;
2604 }
2605
2606 case VMMR0_DO_DBGF_BP_OWNER_INIT:
2607 {
2608 if (!pReqHdr || u64Arg || idCpu != 0)
2609 return VERR_INVALID_PARAMETER;
2610 rc = DBGFR0BpOwnerInitReqHandler(pGVM, (PDBGFBPOWNERINITREQ)pReqHdr);
2611 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2612 break;
2613 }
2614
2615 case VMMR0_DO_DBGF_BP_PORTIO_INIT:
2616 {
2617 if (!pReqHdr || u64Arg || idCpu != 0)
2618 return VERR_INVALID_PARAMETER;
2619 rc = DBGFR0BpPortIoInitReqHandler(pGVM, (PDBGFBPINITREQ)pReqHdr);
2620 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2621 break;
2622 }
2623
2624
2625 /*
2626 * TM requests.
2627 */
2628 case VMMR0_DO_TM_GROW_TIMER_QUEUE:
2629 {
2630 if (pReqHdr || idCpu == NIL_VMCPUID)
2631 return VERR_INVALID_PARAMETER;
2632 rc = TMR0TimerQueueGrow(pGVM, RT_HI_U32(u64Arg), RT_LO_U32(u64Arg));
2633 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2634 break;
2635 }
2636
2637 /*
2638 * For profiling.
2639 */
2640 case VMMR0_DO_NOP:
2641 case VMMR0_DO_SLOW_NOP:
2642 return VINF_SUCCESS;
2643
2644 /*
2645 * For testing Ring-0 APIs invoked in this environment.
2646 */
2647 case VMMR0_DO_TESTS:
2648 /** @todo make new test */
2649 return VINF_SUCCESS;
2650
2651 default:
2652 /*
2653 * We're returning VERR_NOT_SUPPORT here so we've got something else
2654 * than -1 which the interrupt gate glue code might return.
2655 */
2656 Log(("operation %#x is not supported\n", enmOperation));
2657 return VERR_NOT_SUPPORTED;
2658 }
2659 return rc;
2660}
2661
2662
2663/**
2664 * This is just a longjmp wrapper function for VMMR0EntryEx calls.
2665 *
2666 * @returns VBox status code.
2667 * @param pvArgs The argument package
2668 */
2669static DECLCALLBACK(int) vmmR0EntryExWrapper(void *pvArgs)
2670{
2671 PGVMCPU pGVCpu = (PGVMCPU)pvArgs;
2672 return vmmR0EntryExWorker(pGVCpu->vmmr0.s.pGVM,
2673 pGVCpu->vmmr0.s.idCpu,
2674 pGVCpu->vmmr0.s.enmOperation,
2675 pGVCpu->vmmr0.s.pReq,
2676 pGVCpu->vmmr0.s.u64Arg,
2677 pGVCpu->vmmr0.s.pSession);
2678}
2679
2680
2681/**
2682 * The Ring 0 entry point, called by the support library (SUP).
2683 *
2684 * @returns VBox status code.
2685 * @param pGVM The global (ring-0) VM structure.
2686 * @param pVM The cross context VM structure.
2687 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
2688 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
2689 * @param enmOperation Which operation to execute.
2690 * @param pReq Pointer to the SUPVMMR0REQHDR packet. Optional.
2691 * @param u64Arg Some simple constant argument.
2692 * @param pSession The session of the caller.
2693 * @remarks Assume called with interrupts _enabled_.
2694 */
2695VMMR0DECL(int) VMMR0EntryEx(PGVM pGVM, PVMCC pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation,
2696 PSUPVMMR0REQHDR pReq, uint64_t u64Arg, PSUPDRVSESSION pSession)
2697{
2698 /*
2699 * Requests that should only happen on the EMT thread will be
2700 * wrapped in a setjmp so we can assert without causing trouble.
2701 */
2702 if ( pVM != NULL
2703 && pGVM != NULL
2704 && pVM == pGVM /** @todo drop pVM or pGVM */
2705 && idCpu < pGVM->cCpus
2706 && pGVM->pSession == pSession
2707 && pGVM->pSelf == pVM)
2708 {
2709 switch (enmOperation)
2710 {
2711 /* These might/will be called before VMMR3Init. */
2712 case VMMR0_DO_GMM_INITIAL_RESERVATION:
2713 case VMMR0_DO_GMM_UPDATE_RESERVATION:
2714 case VMMR0_DO_GMM_ALLOCATE_PAGES:
2715 case VMMR0_DO_GMM_FREE_PAGES:
2716 case VMMR0_DO_GMM_BALLOONED_PAGES:
2717 /* On the mac we might not have a valid jmp buf, so check these as well. */
2718 case VMMR0_DO_VMMR0_INIT:
2719 case VMMR0_DO_VMMR0_TERM:
2720
2721 case VMMR0_DO_PDM_DEVICE_CREATE:
2722 case VMMR0_DO_PDM_DEVICE_GEN_CALL:
2723 case VMMR0_DO_IOM_GROW_IO_PORTS:
2724 case VMMR0_DO_IOM_GROW_IO_PORT_STATS:
2725 case VMMR0_DO_DBGF_BP_INIT:
2726 case VMMR0_DO_DBGF_BP_CHUNK_ALLOC:
2727 case VMMR0_DO_DBGF_BP_L2_TBL_CHUNK_ALLOC:
2728 {
2729 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2730 RTNATIVETHREAD hNativeThread = RTThreadNativeSelf();
2731 if (RT_LIKELY( pGVCpu->hEMT == hNativeThread
2732 && pGVCpu->hNativeThreadR0 == hNativeThread))
2733 {
2734 if (!pGVCpu->vmm.s.CallRing3JmpBufR0.pvSavedStack)
2735 break;
2736
2737 pGVCpu->vmmr0.s.pGVM = pGVM;
2738 pGVCpu->vmmr0.s.idCpu = idCpu;
2739 pGVCpu->vmmr0.s.enmOperation = enmOperation;
2740 pGVCpu->vmmr0.s.pReq = pReq;
2741 pGVCpu->vmmr0.s.u64Arg = u64Arg;
2742 pGVCpu->vmmr0.s.pSession = pSession;
2743 return vmmR0CallRing3SetJmpEx(&pGVCpu->vmm.s.CallRing3JmpBufR0, vmmR0EntryExWrapper, pGVCpu,
2744 ((uintptr_t)u64Arg << 16) | (uintptr_t)enmOperation);
2745 }
2746 return VERR_VM_THREAD_NOT_EMT;
2747 }
2748
2749 default:
2750 case VMMR0_DO_PGM_POOL_GROW:
2751 break;
2752 }
2753 }
2754 return vmmR0EntryExWorker(pGVM, idCpu, enmOperation, pReq, u64Arg, pSession);
2755}
2756
2757
2758/**
2759 * Checks whether we've armed the ring-0 long jump machinery.
2760 *
2761 * @returns @c true / @c false
2762 * @param pVCpu The cross context virtual CPU structure.
2763 * @thread EMT
2764 * @sa VMMIsLongJumpArmed
2765 */
2766VMMR0_INT_DECL(bool) VMMR0IsLongJumpArmed(PVMCPUCC pVCpu)
2767{
2768#ifdef RT_ARCH_X86
2769 return pVCpu->vmm.s.CallRing3JmpBufR0.eip
2770 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2771#else
2772 return pVCpu->vmm.s.CallRing3JmpBufR0.rip
2773 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2774#endif
2775}
2776
2777
2778/**
2779 * Checks whether we've done a ring-3 long jump.
2780 *
2781 * @returns @c true / @c false
2782 * @param pVCpu The cross context virtual CPU structure.
2783 * @thread EMT
2784 */
2785VMMR0_INT_DECL(bool) VMMR0IsInRing3LongJump(PVMCPUCC pVCpu)
2786{
2787 return pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2788}
2789
2790
2791/**
2792 * Locking helper that deals with HM context and checks if the thread can block.
2793 *
2794 * @returns VINF_SUCCESS if we can block. Returns @a rcBusy or
2795 * VERR_VMM_CANNOT_BLOCK if not able to block.
2796 * @param pVCpu The cross context virtual CPU structure of the calling
2797 * thread.
2798 * @param rcBusy What to return in case of a blocking problem. Will IPE
2799 * if VINF_SUCCESS and we cannot block.
2800 * @param pszCaller The caller (for logging problems).
2801 * @param pvLock The lock address (for logging problems).
2802 * @param pCtx Where to return context info for the resume call.
2803 * @thread EMT(pVCpu)
2804 */
2805VMMR0_INT_DECL(int) VMMR0EmtPrepareToBlock(PVMCPUCC pVCpu, int rcBusy, const char *pszCaller, void *pvLock,
2806 PVMMR0EMTBLOCKCTX pCtx)
2807{
2808 const char *pszMsg;
2809
2810 /*
2811 * Check that we are allowed to block.
2812 */
2813 if (RT_LIKELY(VMMRZCallRing3IsEnabled(pVCpu)))
2814 {
2815 /*
2816 * Are we in HM context and w/o a context hook? If so work the context hook.
2817 */
2818 if (pVCpu->idHostCpu != NIL_RTCPUID)
2819 {
2820 Assert(pVCpu->iHostCpuSet != UINT32_MAX);
2821 Assert(pVCpu->vmmr0.s.fInHmContext);
2822
2823 if (pVCpu->vmmr0.s.hCtxHook == NIL_RTTHREADCTXHOOK)
2824 {
2825 vmmR0ThreadCtxCallback(RTTHREADCTXEVENT_OUT, pVCpu);
2826 if (pVCpu->vmmr0.s.pPreemptState)
2827 RTThreadPreemptRestore(pVCpu->vmmr0.s.pPreemptState);
2828
2829 pCtx->uMagic = VMMR0EMTBLOCKCTX_MAGIC;
2830 pCtx->fWasInHmContext = true;
2831 return VINF_SUCCESS;
2832 }
2833 }
2834
2835 if (RT_LIKELY(!pVCpu->vmmr0.s.pPreemptState))
2836 {
2837 /*
2838 * Not in HM context or we've got hooks, so just check that preemption
2839 * is enabled.
2840 */
2841 if (RT_LIKELY(RTThreadPreemptIsEnabled(NIL_RTTHREAD)))
2842 {
2843 pCtx->uMagic = VMMR0EMTBLOCKCTX_MAGIC;
2844 pCtx->fWasInHmContext = false;
2845 return VINF_SUCCESS;
2846 }
2847 pszMsg = "Preemption is disabled!";
2848 }
2849 else
2850 pszMsg = "Preemption state w/o HM state!";
2851 }
2852 else
2853 pszMsg = "Ring-3 calls are disabled!";
2854
2855 static uint32_t volatile s_cWarnings = 0;
2856 if (++s_cWarnings < 50)
2857 SUPR0Printf("VMMR0EmtPrepareToBlock: %s pvLock=%p pszCaller=%s rcBusy=%p\n", pszMsg, pvLock, pszCaller, rcBusy);
2858 pCtx->uMagic = VMMR0EMTBLOCKCTX_MAGIC_DEAD;
2859 pCtx->fWasInHmContext = false;
2860 return rcBusy != VINF_SUCCESS ? rcBusy : VERR_VMM_CANNOT_BLOCK;
2861}
2862
2863
2864/**
2865 * Counterpart to VMMR0EmtPrepareToBlock.
2866 *
2867 * @param pVCpu The cross context virtual CPU structure of the calling
2868 * thread.
2869 * @param pCtx The context structure used with VMMR0EmtPrepareToBlock.
2870 * @thread EMT(pVCpu)
2871 */
2872VMMR0_INT_DECL(void) VMMR0EmtResumeAfterBlocking(PVMCPUCC pVCpu, PVMMR0EMTBLOCKCTX pCtx)
2873{
2874 AssertReturnVoid(pCtx->uMagic == VMMR0EMTBLOCKCTX_MAGIC);
2875 if (pCtx->fWasInHmContext)
2876 {
2877 if (pVCpu->vmmr0.s.pPreemptState)
2878 RTThreadPreemptDisable(pVCpu->vmmr0.s.pPreemptState);
2879
2880 pCtx->fWasInHmContext = false;
2881 vmmR0ThreadCtxCallback(RTTHREADCTXEVENT_IN, pVCpu);
2882 }
2883 pCtx->uMagic = VMMR0EMTBLOCKCTX_MAGIC_DEAD;
2884}
2885
2886
2887/**
2888 * Updates the EMT loggers for the VM.
2889 *
2890 * @returns VBox status code.
2891 * @param pGVM The global (ring-0) VM structure.
2892 * @param idCpu The ID of the calling EMT.
2893 * @param pReq The request data.
2894 * @param fRelease Which logger set to update.
2895 * @thread EMT(idCpu)
2896 */
2897static int vmmR0UpdateLoggers(PGVM pGVM, VMCPUID idCpu, PVMMR0UPDATELOGGERSREQ pReq, bool fRelease)
2898{
2899 /*
2900 * Check sanity. First we require EMT to be calling us.
2901 */
2902 AssertReturn(idCpu < pGVM->cCpus, VERR_INVALID_CPU_ID);
2903 AssertReturn(pGVM->aCpus[idCpu].hEMT == RTThreadNativeSelf(), VERR_INVALID_CPU_ID);
2904
2905 AssertReturn(pReq->Hdr.cbReq >= RT_UOFFSETOF_DYN(VMMR0UPDATELOGGERSREQ, afGroups[0]), VERR_INVALID_PARAMETER);
2906 AssertReturn(pReq->cGroups < _8K, VERR_INVALID_PARAMETER);
2907 AssertReturn(pReq->Hdr.cbReq == RT_UOFFSETOF_DYN(VMMR0UPDATELOGGERSREQ, afGroups[pReq->cGroups]), VERR_INVALID_PARAMETER);
2908
2909 /*
2910 * Adjust flags.
2911 */
2912 /* Always buffered: */
2913 pReq->fFlags |= RTLOGFLAGS_BUFFERED;
2914 /* These doesn't make sense at present: */
2915 pReq->fFlags &= ~(RTLOGFLAGS_FLUSH | RTLOGFLAGS_WRITE_THROUGH);
2916 /* We've traditionally skipped the group restrictions. */
2917 pReq->fFlags &= ~RTLOGFLAGS_RESTRICT_GROUPS;
2918
2919 /*
2920 * Do the updating.
2921 */
2922 int rc = VINF_SUCCESS;
2923 for (idCpu = 0; idCpu < pGVM->cCpus; idCpu++)
2924 {
2925 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2926 PRTLOGGER pLogger = fRelease ? pGVCpu->vmmr0.s.RelLogger.pLogger : pGVCpu->vmmr0.s.Logger.pLogger;
2927 if (pLogger)
2928 {
2929 RTLogSetR0ProgramStart(pLogger, pGVM->vmm.s.nsProgramStart);
2930 rc = RTLogBulkUpdate(pLogger, pReq->fFlags, pReq->uGroupCrc32, pReq->cGroups, pReq->afGroups);
2931 }
2932 }
2933
2934 return rc;
2935}
2936
2937
2938/**
2939 * Common worker for vmmR0LogFlush and vmmR0LogRelFlush.
2940 */
2941static bool vmmR0LoggerFlushCommon(PRTLOGGER pLogger, PRTLOGBUFFERDESC pBufDesc, bool fRelease)
2942{
2943 RT_NOREF(pBufDesc, fRelease);
2944
2945 /*
2946 * Convert the pLogger into a GVMCPU handle and 'call' back to Ring-3.
2947 * (This is a bit paranoid code.)
2948 */
2949 if (RT_VALID_PTR(pLogger))
2950 {
2951 if ( pLogger->u32Magic == RTLOGGER_MAGIC
2952 && (pLogger->u32UserValue1 & VMMR0_LOGGER_FLAGS_MAGIC_MASK) == VMMR0_LOGGER_FLAGS_MAGIC_VALUE
2953 && pLogger->u64UserValue2 == pLogger->u64UserValue3)
2954 {
2955 if (!(pLogger->u32UserValue1 & VMMR0_LOGGER_FLAGS_FLUSHING_DISABLED))
2956 {
2957 PGVMCPU const pGVCpu = (PGVMCPU)(uintptr_t)pLogger->u64UserValue2;
2958 if ( RT_VALID_PTR(pGVCpu)
2959 && ((uintptr_t)pGVCpu & PAGE_OFFSET_MASK) == 0)
2960 {
2961 RTNATIVETHREAD const hNativeSelf = RTThreadNativeSelf();
2962 if ( hNativeSelf == pGVCpu->hEMT
2963 && RT_VALID_PTR(pGVCpu->pGVM))
2964 {
2965 /*
2966 * Check that the jump buffer is armed.
2967 */
2968#ifdef RT_ARCH_X86
2969 if ( pGVCpu->vmm.s.CallRing3JmpBufR0.eip != 0
2970 && !pGVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2971#else
2972 if ( pGVCpu->vmm.s.CallRing3JmpBufR0.rip != 0
2973 && !pGVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2974#endif
2975 {
2976 VMMRZCallRing3(pGVCpu->pGVM, pGVCpu, VMMCALLRING3_VMM_LOGGER_FLUSH, fRelease);
2977 }
2978#ifdef DEBUG
2979 else SUPR0Printf("vmmR0LoggerFlush: Jump buffer isn't armed!\n");
2980#endif
2981 }
2982#ifdef DEBUG
2983 else SUPR0Printf("vmmR0LoggerFlush: pLogger=%p pGVCpu=%p hEMT=%p hNativeSelf=%p!\n",
2984 pLogger, pGVCpu, pGVCpu->hEMT, hNativeSelf);
2985#endif
2986
2987 }
2988#ifdef DEBUG
2989 else SUPR0Printf("vmmR0LoggerFlush: pLogger=%p pGVCpu=%p!\n", pLogger, pGVCpu);
2990#endif
2991 }
2992 /* else: quiet */
2993 }
2994#ifdef DEBUG
2995 else SUPR0Printf("vmmR0LoggerFlush: pLogger=%p u32Magic=%#x u32UserValue1=%#x u64UserValue2=%#RX64 u64UserValue3=%#RX64!\n",
2996 pLogger, pLogger->u32Magic, pLogger->u32UserValue1, pLogger->u64UserValue2, pLogger->u64UserValue3);
2997#endif
2998 }
2999#ifdef DEBUG
3000 else SUPR0Printf("vmmR0LoggerFlush: pLogger=%p!\n", pLogger);
3001#endif
3002 return true;
3003}
3004
3005
3006/**
3007 * @callback_method_impl{FNRTLOGFLUSH, Release logger buffer flush callback.}
3008 */
3009static DECLCALLBACK(bool) vmmR0LogRelFlush(PRTLOGGER pLogger, PRTLOGBUFFERDESC pBufDesc)
3010{
3011 return vmmR0LoggerFlushCommon(pLogger, pBufDesc, true /*fRelease*/);
3012}
3013
3014
3015/**
3016 * @callback_method_impl{FNRTLOGFLUSH, Logger (debug) buffer flush callback.}
3017 */
3018static DECLCALLBACK(bool) vmmR0LogFlush(PRTLOGGER pLogger, PRTLOGBUFFERDESC pBufDesc)
3019{
3020#ifdef LOG_ENABLED
3021 return vmmR0LoggerFlushCommon(pLogger, pBufDesc, false /*fRelease*/);
3022#else
3023 RT_NOREF(pLogger, pBufDesc);
3024 return true;
3025#endif
3026}
3027
3028#ifdef LOG_ENABLED
3029
3030/**
3031 * Disables flushing of the ring-0 debug log.
3032 *
3033 * @param pVCpu The cross context virtual CPU structure.
3034 */
3035VMMR0_INT_DECL(void) VMMR0LogFlushDisable(PVMCPUCC pVCpu)
3036{
3037 pVCpu->vmmr0.s.fLogFlushingDisabled = true;
3038 if (pVCpu->vmmr0.s.Logger.pLogger)
3039 pVCpu->vmmr0.s.Logger.pLogger->u32UserValue1 |= VMMR0_LOGGER_FLAGS_FLUSHING_DISABLED;
3040 if (pVCpu->vmmr0.s.RelLogger.pLogger)
3041 pVCpu->vmmr0.s.RelLogger.pLogger->u32UserValue1 |= VMMR0_LOGGER_FLAGS_FLUSHING_DISABLED;
3042}
3043
3044
3045/**
3046 * Enables flushing of the ring-0 debug log.
3047 *
3048 * @param pVCpu The cross context virtual CPU structure.
3049 */
3050VMMR0_INT_DECL(void) VMMR0LogFlushEnable(PVMCPUCC pVCpu)
3051{
3052 pVCpu->vmmr0.s.fLogFlushingDisabled = false;
3053 if (pVCpu->vmmr0.s.Logger.pLogger)
3054 pVCpu->vmmr0.s.Logger.pLogger->u32UserValue1 &= ~VMMR0_LOGGER_FLAGS_FLUSHING_DISABLED;
3055 if (pVCpu->vmmr0.s.RelLogger.pLogger)
3056 pVCpu->vmmr0.s.RelLogger.pLogger->u32UserValue1 &= ~VMMR0_LOGGER_FLAGS_FLUSHING_DISABLED;
3057}
3058
3059
3060/**
3061 * Checks if log flushing is disabled or not.
3062 *
3063 * @param pVCpu The cross context virtual CPU structure.
3064 */
3065VMMR0_INT_DECL(bool) VMMR0IsLogFlushDisabled(PVMCPUCC pVCpu)
3066{
3067 return pVCpu->vmmr0.s.fLogFlushingDisabled;
3068}
3069
3070#endif /* LOG_ENABLED */
3071
3072/*
3073 * Override RTLogDefaultInstanceEx so we can do logging from EMTs in ring-0.
3074 */
3075DECLEXPORT(PRTLOGGER) RTLogDefaultInstanceEx(uint32_t fFlagsAndGroup)
3076{
3077#ifdef LOG_ENABLED
3078 PGVMCPU pGVCpu = GVMMR0GetGVCpuByEMT(NIL_RTNATIVETHREAD);
3079 if (pGVCpu)
3080 {
3081 PRTLOGGER pLogger = pGVCpu->vmmr0.s.Logger.pLogger;
3082 if (RT_VALID_PTR(pLogger))
3083 {
3084 if ( pLogger->u64UserValue2 == (uintptr_t)pGVCpu
3085 && pLogger->u64UserValue3 == (uintptr_t)pGVCpu)
3086 {
3087 if (!(pGVCpu->vmmr0.s.fLogFlushingDisabled))
3088 return RTLogCheckGroupFlags(pLogger, fFlagsAndGroup);
3089 return NULL;
3090 }
3091 }
3092 }
3093#endif
3094 return SUPR0DefaultLogInstanceEx(fFlagsAndGroup);
3095}
3096
3097
3098/*
3099 * Override RTLogRelGetDefaultInstanceEx so we can do LogRel to VBox.log from EMTs in ring-0.
3100 */
3101DECLEXPORT(PRTLOGGER) RTLogRelGetDefaultInstanceEx(uint32_t fFlagsAndGroup)
3102{
3103 PGVMCPU pGVCpu = GVMMR0GetGVCpuByEMT(NIL_RTNATIVETHREAD);
3104 if (pGVCpu)
3105 {
3106 PRTLOGGER pLogger = pGVCpu->vmmr0.s.RelLogger.pLogger;
3107 if (RT_VALID_PTR(pLogger))
3108 {
3109 if ( pLogger->u64UserValue2 == (uintptr_t)pGVCpu
3110 && pLogger->u64UserValue3 == (uintptr_t)pGVCpu)
3111 {
3112 if (!(pGVCpu->vmmr0.s.fLogFlushingDisabled))
3113 return RTLogCheckGroupFlags(pLogger, fFlagsAndGroup);
3114 return NULL;
3115 }
3116 }
3117 }
3118 return SUPR0GetDefaultLogRelInstanceEx(fFlagsAndGroup);
3119}
3120
3121
3122/*
3123 * Jump back to ring-3 if we're the EMT and the longjmp is armed.
3124 *
3125 * @returns true if the breakpoint should be hit, false if it should be ignored.
3126 */
3127DECLEXPORT(bool) RTCALL RTAssertShouldPanic(void)
3128{
3129#if 0
3130 return true;
3131#else
3132 PVMCC pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
3133 if (pVM)
3134 {
3135 PVMCPUCC pVCpu = VMMGetCpu(pVM);
3136
3137 if (pVCpu)
3138 {
3139# ifdef RT_ARCH_X86
3140 if ( pVCpu->vmm.s.CallRing3JmpBufR0.eip
3141 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
3142# else
3143 if ( pVCpu->vmm.s.CallRing3JmpBufR0.rip
3144 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
3145# endif
3146 {
3147 int rc = VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VM_R0_ASSERTION, 0);
3148 return RT_FAILURE_NP(rc);
3149 }
3150 }
3151 }
3152# ifdef RT_OS_LINUX
3153 return true;
3154# else
3155 return false;
3156# endif
3157#endif
3158}
3159
3160
3161/*
3162 * Override this so we can push it up to ring-3.
3163 */
3164DECLEXPORT(void) RTCALL RTAssertMsg1Weak(const char *pszExpr, unsigned uLine, const char *pszFile, const char *pszFunction)
3165{
3166 /*
3167 * To the log.
3168 */
3169 LogAlways(("\n!!R0-Assertion Failed!!\n"
3170 "Expression: %s\n"
3171 "Location : %s(%d) %s\n",
3172 pszExpr, pszFile, uLine, pszFunction));
3173
3174 /*
3175 * To the global VMM buffer.
3176 */
3177 PVMCC pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
3178 if (pVM)
3179 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg1, sizeof(pVM->vmm.s.szRing0AssertMsg1),
3180 "\n!!R0-Assertion Failed!!\n"
3181 "Expression: %.*s\n"
3182 "Location : %s(%d) %s\n",
3183 sizeof(pVM->vmm.s.szRing0AssertMsg1) / 4 * 3, pszExpr,
3184 pszFile, uLine, pszFunction);
3185
3186 /*
3187 * Continue the normal way.
3188 */
3189 RTAssertMsg1(pszExpr, uLine, pszFile, pszFunction);
3190}
3191
3192
3193/**
3194 * Callback for RTLogFormatV which writes to the ring-3 log port.
3195 * See PFNLOGOUTPUT() for details.
3196 */
3197static DECLCALLBACK(size_t) rtLogOutput(void *pv, const char *pachChars, size_t cbChars)
3198{
3199 for (size_t i = 0; i < cbChars; i++)
3200 {
3201 LogAlways(("%c", pachChars[i])); NOREF(pachChars);
3202 }
3203
3204 NOREF(pv);
3205 return cbChars;
3206}
3207
3208
3209/*
3210 * Override this so we can push it up to ring-3.
3211 */
3212DECLEXPORT(void) RTCALL RTAssertMsg2WeakV(const char *pszFormat, va_list va)
3213{
3214 va_list vaCopy;
3215
3216 /*
3217 * Push the message to the loggers.
3218 */
3219 PRTLOGGER pLog = RTLogGetDefaultInstance(); /* Don't initialize it here... */
3220 if (pLog)
3221 {
3222 va_copy(vaCopy, va);
3223 RTLogFormatV(rtLogOutput, pLog, pszFormat, vaCopy);
3224 va_end(vaCopy);
3225 }
3226 pLog = RTLogRelGetDefaultInstance();
3227 if (pLog)
3228 {
3229 va_copy(vaCopy, va);
3230 RTLogFormatV(rtLogOutput, pLog, pszFormat, vaCopy);
3231 va_end(vaCopy);
3232 }
3233
3234 /*
3235 * Push it to the global VMM buffer.
3236 */
3237 PVMCC pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
3238 if (pVM)
3239 {
3240 va_copy(vaCopy, va);
3241 RTStrPrintfV(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2), pszFormat, vaCopy);
3242 va_end(vaCopy);
3243 }
3244
3245 /*
3246 * Continue the normal way.
3247 */
3248 RTAssertMsg2V(pszFormat, va);
3249}
3250
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette