VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/VMMR0.cpp@ 90975

Last change on this file since 90975 was 90975, checked in by vboxsync, 4 years ago

VMM: Ensure proper log flush order by going to ring-0 to wait for the flusher thread to complete. bugref:10086

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 142.1 KB
Line 
1/* $Id: VMMR0.cpp 90975 2021-08-28 23:35:23Z vboxsync $ */
2/** @file
3 * VMM - Host Context Ring 0.
4 */
5
6/*
7 * Copyright (C) 2006-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_VMM
23#include <VBox/vmm/vmm.h>
24#include <VBox/sup.h>
25#include <VBox/vmm/iom.h>
26#include <VBox/vmm/trpm.h>
27#include <VBox/vmm/cpum.h>
28#include <VBox/vmm/pdmapi.h>
29#include <VBox/vmm/pgm.h>
30#ifdef VBOX_WITH_NEM_R0
31# include <VBox/vmm/nem.h>
32#endif
33#include <VBox/vmm/em.h>
34#include <VBox/vmm/stam.h>
35#include <VBox/vmm/tm.h>
36#include "VMMInternal.h"
37#include <VBox/vmm/vmcc.h>
38#include <VBox/vmm/gvm.h>
39#ifdef VBOX_WITH_PCI_PASSTHROUGH
40# include <VBox/vmm/pdmpci.h>
41#endif
42#include <VBox/vmm/apic.h>
43
44#include <VBox/vmm/gvmm.h>
45#include <VBox/vmm/gmm.h>
46#include <VBox/vmm/gim.h>
47#include <VBox/intnet.h>
48#include <VBox/vmm/hm.h>
49#include <VBox/param.h>
50#include <VBox/err.h>
51#include <VBox/version.h>
52#include <VBox/log.h>
53
54#include <iprt/asm-amd64-x86.h>
55#include <iprt/assert.h>
56#include <iprt/crc.h>
57#include <iprt/mem.h>
58#include <iprt/memobj.h>
59#include <iprt/mp.h>
60#include <iprt/once.h>
61#include <iprt/semaphore.h>
62#include <iprt/spinlock.h>
63#include <iprt/stdarg.h>
64#include <iprt/string.h>
65#include <iprt/thread.h>
66#include <iprt/timer.h>
67#include <iprt/time.h>
68
69#include "dtrace/VBoxVMM.h"
70
71
72#if defined(_MSC_VER) && defined(RT_ARCH_AMD64) /** @todo check this with with VC7! */
73# pragma intrinsic(_AddressOfReturnAddress)
74#endif
75
76#if defined(RT_OS_DARWIN) && ARCH_BITS == 32
77# error "32-bit darwin is no longer supported. Go back to 4.3 or earlier!"
78#endif
79
80
81
82/*********************************************************************************************************************************
83* Defined Constants And Macros *
84*********************************************************************************************************************************/
85/** @def VMM_CHECK_SMAP_SETUP
86 * SMAP check setup. */
87/** @def VMM_CHECK_SMAP_CHECK
88 * Checks that the AC flag is set if SMAP is enabled. If AC is not set,
89 * it will be logged and @a a_BadExpr is executed. */
90/** @def VMM_CHECK_SMAP_CHECK2
91 * Checks that the AC flag is set if SMAP is enabled. If AC is not set, it will
92 * be logged, written to the VMs assertion text buffer, and @a a_BadExpr is
93 * executed. */
94#if (defined(VBOX_STRICT) || 1) && !defined(VBOX_WITH_RAM_IN_KERNEL)
95# define VMM_CHECK_SMAP_SETUP() uint32_t const fKernelFeatures = SUPR0GetKernelFeatures()
96# define VMM_CHECK_SMAP_CHECK(a_BadExpr) \
97 do { \
98 if (fKernelFeatures & SUPKERNELFEATURES_SMAP) \
99 { \
100 RTCCUINTREG fEflCheck = ASMGetFlags(); \
101 if (RT_LIKELY(fEflCheck & X86_EFL_AC)) \
102 { /* likely */ } \
103 else \
104 { \
105 SUPR0Printf("%s, line %d: EFLAGS.AC is clear! (%#x)\n", __FUNCTION__, __LINE__, (uint32_t)fEflCheck); \
106 a_BadExpr; \
107 } \
108 } \
109 } while (0)
110# define VMM_CHECK_SMAP_CHECK2(a_pGVM, a_BadExpr) \
111 do { \
112 if (fKernelFeatures & SUPKERNELFEATURES_SMAP) \
113 { \
114 RTCCUINTREG fEflCheck = ASMGetFlags(); \
115 if (RT_LIKELY(fEflCheck & X86_EFL_AC)) \
116 { /* likely */ } \
117 else if (a_pGVM) \
118 { \
119 SUPR0BadContext((a_pGVM)->pSession, __FILE__, __LINE__, "EFLAGS.AC is zero!"); \
120 RTStrPrintf((a_pGVM)->vmm.s.szRing0AssertMsg1, sizeof((a_pGVM)->vmm.s.szRing0AssertMsg1), \
121 "%s, line %d: EFLAGS.AC is clear! (%#x)\n", __FUNCTION__, __LINE__, (uint32_t)fEflCheck); \
122 a_BadExpr; \
123 } \
124 else \
125 { \
126 SUPR0Printf("%s, line %d: EFLAGS.AC is clear! (%#x)\n", __FUNCTION__, __LINE__, (uint32_t)fEflCheck); \
127 a_BadExpr; \
128 } \
129 } \
130 } while (0)
131#else
132# define VMM_CHECK_SMAP_SETUP() uint32_t const fKernelFeatures = 0
133# define VMM_CHECK_SMAP_CHECK(a_BadExpr) NOREF(fKernelFeatures)
134# define VMM_CHECK_SMAP_CHECK2(a_pGVM, a_BadExpr) NOREF(fKernelFeatures)
135#endif
136
137
138/*********************************************************************************************************************************
139* Internal Functions *
140*********************************************************************************************************************************/
141RT_C_DECLS_BEGIN
142#if defined(RT_ARCH_X86) && (defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD))
143extern uint64_t __udivdi3(uint64_t, uint64_t);
144extern uint64_t __umoddi3(uint64_t, uint64_t);
145#endif
146RT_C_DECLS_END
147static int vmmR0UpdateLoggers(PGVM pGVM, VMCPUID idCpu, PVMMR0UPDATELOGGERSREQ pReq, size_t idxLogger);
148static int vmmR0LogFlusher(PGVM pGVM);
149static int vmmR0LogWaitFlushed(PGVM pGVM, VMCPUID idCpu, size_t idxLogger);
150static int vmmR0InitLoggers(PGVM pGVM);
151static void vmmR0CleanupLoggers(PGVM pGVM);
152
153
154/*********************************************************************************************************************************
155* Global Variables *
156*********************************************************************************************************************************/
157/** Drag in necessary library bits.
158 * The runtime lives here (in VMMR0.r0) and VBoxDD*R0.r0 links against us. */
159struct CLANG11WEIRDNOTHROW { PFNRT pfn; } g_VMMR0Deps[] =
160{
161 { (PFNRT)RTCrc32 },
162 { (PFNRT)RTOnce },
163#if defined(RT_ARCH_X86) && (defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD))
164 { (PFNRT)__udivdi3 },
165 { (PFNRT)__umoddi3 },
166#endif
167 { NULL }
168};
169
170#ifdef RT_OS_SOLARIS
171/* Dependency information for the native solaris loader. */
172extern "C" { char _depends_on[] = "vboxdrv"; }
173#endif
174
175
176/**
177 * Initialize the module.
178 * This is called when we're first loaded.
179 *
180 * @returns 0 on success.
181 * @returns VBox status on failure.
182 * @param hMod Image handle for use in APIs.
183 */
184DECLEXPORT(int) ModuleInit(void *hMod)
185{
186 VMM_CHECK_SMAP_SETUP();
187 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
188
189#ifdef VBOX_WITH_DTRACE_R0
190 /*
191 * The first thing to do is register the static tracepoints.
192 * (Deregistration is automatic.)
193 */
194 int rc2 = SUPR0TracerRegisterModule(hMod, &g_VTGObjHeader);
195 if (RT_FAILURE(rc2))
196 return rc2;
197#endif
198 LogFlow(("ModuleInit:\n"));
199
200#ifdef VBOX_WITH_64ON32_CMOS_DEBUG
201 /*
202 * Display the CMOS debug code.
203 */
204 ASMOutU8(0x72, 0x03);
205 uint8_t bDebugCode = ASMInU8(0x73);
206 LogRel(("CMOS Debug Code: %#x (%d)\n", bDebugCode, bDebugCode));
207 RTLogComPrintf("CMOS Debug Code: %#x (%d)\n", bDebugCode, bDebugCode);
208#endif
209
210 /*
211 * Initialize the VMM, GVMM, GMM, HM, PGM (Darwin) and INTNET.
212 */
213 int rc = vmmInitFormatTypes();
214 if (RT_SUCCESS(rc))
215 {
216 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
217 rc = GVMMR0Init();
218 if (RT_SUCCESS(rc))
219 {
220 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
221 rc = GMMR0Init();
222 if (RT_SUCCESS(rc))
223 {
224 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
225 rc = HMR0Init();
226 if (RT_SUCCESS(rc))
227 {
228 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
229
230 PDMR0Init(hMod);
231 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
232
233 rc = PGMRegisterStringFormatTypes();
234 if (RT_SUCCESS(rc))
235 {
236 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
237#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
238 rc = PGMR0DynMapInit();
239#endif
240 if (RT_SUCCESS(rc))
241 {
242 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
243 rc = IntNetR0Init();
244 if (RT_SUCCESS(rc))
245 {
246#ifdef VBOX_WITH_PCI_PASSTHROUGH
247 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
248 rc = PciRawR0Init();
249#endif
250 if (RT_SUCCESS(rc))
251 {
252 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
253 rc = CPUMR0ModuleInit();
254 if (RT_SUCCESS(rc))
255 {
256#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
257 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
258 rc = vmmR0TripleFaultHackInit();
259 if (RT_SUCCESS(rc))
260#endif
261 {
262 VMM_CHECK_SMAP_CHECK(rc = VERR_VMM_SMAP_BUT_AC_CLEAR);
263 if (RT_SUCCESS(rc))
264 {
265 LogFlow(("ModuleInit: returns success\n"));
266 return VINF_SUCCESS;
267 }
268 }
269
270 /*
271 * Bail out.
272 */
273#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
274 vmmR0TripleFaultHackTerm();
275#endif
276 }
277 else
278 LogRel(("ModuleInit: CPUMR0ModuleInit -> %Rrc\n", rc));
279#ifdef VBOX_WITH_PCI_PASSTHROUGH
280 PciRawR0Term();
281#endif
282 }
283 else
284 LogRel(("ModuleInit: PciRawR0Init -> %Rrc\n", rc));
285 IntNetR0Term();
286 }
287 else
288 LogRel(("ModuleInit: IntNetR0Init -> %Rrc\n", rc));
289#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
290 PGMR0DynMapTerm();
291#endif
292 }
293 else
294 LogRel(("ModuleInit: PGMR0DynMapInit -> %Rrc\n", rc));
295 PGMDeregisterStringFormatTypes();
296 }
297 else
298 LogRel(("ModuleInit: PGMRegisterStringFormatTypes -> %Rrc\n", rc));
299 HMR0Term();
300 }
301 else
302 LogRel(("ModuleInit: HMR0Init -> %Rrc\n", rc));
303 GMMR0Term();
304 }
305 else
306 LogRel(("ModuleInit: GMMR0Init -> %Rrc\n", rc));
307 GVMMR0Term();
308 }
309 else
310 LogRel(("ModuleInit: GVMMR0Init -> %Rrc\n", rc));
311 vmmTermFormatTypes();
312 }
313 else
314 LogRel(("ModuleInit: vmmInitFormatTypes -> %Rrc\n", rc));
315
316 LogFlow(("ModuleInit: failed %Rrc\n", rc));
317 return rc;
318}
319
320
321/**
322 * Terminate the module.
323 * This is called when we're finally unloaded.
324 *
325 * @param hMod Image handle for use in APIs.
326 */
327DECLEXPORT(void) ModuleTerm(void *hMod)
328{
329 NOREF(hMod);
330 LogFlow(("ModuleTerm:\n"));
331
332 /*
333 * Terminate the CPUM module (Local APIC cleanup).
334 */
335 CPUMR0ModuleTerm();
336
337 /*
338 * Terminate the internal network service.
339 */
340 IntNetR0Term();
341
342 /*
343 * PGM (Darwin), HM and PciRaw global cleanup.
344 */
345#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
346 PGMR0DynMapTerm();
347#endif
348#ifdef VBOX_WITH_PCI_PASSTHROUGH
349 PciRawR0Term();
350#endif
351 PGMDeregisterStringFormatTypes();
352 HMR0Term();
353#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
354 vmmR0TripleFaultHackTerm();
355#endif
356
357 /*
358 * Destroy the GMM and GVMM instances.
359 */
360 GMMR0Term();
361 GVMMR0Term();
362
363 vmmTermFormatTypes();
364
365 LogFlow(("ModuleTerm: returns\n"));
366}
367
368
369/**
370 * Initializes VMM specific members when the GVM structure is created,
371 * allocating loggers and stuff.
372 *
373 * The loggers are allocated here so that we can update their settings before
374 * doing VMMR0_DO_VMMR0_INIT and have correct logging at that time.
375 *
376 * @returns VBox status code.
377 * @param pGVM The global (ring-0) VM structure.
378 */
379VMMR0_INT_DECL(int) VMMR0InitPerVMData(PGVM pGVM)
380{
381 AssertCompile(sizeof(pGVM->vmmr0.s) <= sizeof(pGVM->vmmr0.padding));
382
383 /*
384 * Initialize all members first.
385 */
386 pGVM->vmmr0.s.fCalledInitVm = false;
387 pGVM->vmmr0.s.hMemObjLogger = NIL_RTR0MEMOBJ;
388 pGVM->vmmr0.s.hMapObjLogger = NIL_RTR0MEMOBJ;
389 pGVM->vmmr0.s.hMemObjReleaseLogger = NIL_RTR0MEMOBJ;
390 pGVM->vmmr0.s.hMapObjReleaseLogger = NIL_RTR0MEMOBJ;
391 pGVM->vmmr0.s.LogFlusher.hSpinlock = NIL_RTSPINLOCK;
392 pGVM->vmmr0.s.LogFlusher.hThread = NIL_RTNATIVETHREAD;
393 pGVM->vmmr0.s.LogFlusher.hEvent = NIL_RTSEMEVENT;
394 pGVM->vmmr0.s.LogFlusher.idxRingHead = 0;
395 pGVM->vmmr0.s.LogFlusher.idxRingTail = 0;
396 pGVM->vmmr0.s.LogFlusher.fThreadWaiting = false;
397
398 for (VMCPUID idCpu = 0; idCpu < pGVM->cCpus; idCpu++)
399 {
400 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
401 Assert(pGVCpu->idHostCpu == NIL_RTCPUID);
402 Assert(pGVCpu->iHostCpuSet == UINT32_MAX);
403 pGVCpu->vmmr0.s.pPreemptState = NULL;
404 pGVCpu->vmmr0.s.hCtxHook = NIL_RTTHREADCTXHOOK;
405 for (size_t iLogger = 0; iLogger < RT_ELEMENTS(pGVCpu->vmmr0.s.u.aLoggers); iLogger++)
406 pGVCpu->vmmr0.s.u.aLoggers[iLogger].hEventFlushWait = NIL_RTSEMEVENT;
407 }
408
409 /*
410 * Create the loggers.
411 */
412 return vmmR0InitLoggers(pGVM);
413}
414
415
416/**
417 * Initiates the R0 driver for a particular VM instance.
418 *
419 * @returns VBox status code.
420 *
421 * @param pGVM The global (ring-0) VM structure.
422 * @param uSvnRev The SVN revision of the ring-3 part.
423 * @param uBuildType Build type indicator.
424 * @thread EMT(0)
425 */
426static int vmmR0InitVM(PGVM pGVM, uint32_t uSvnRev, uint32_t uBuildType)
427{
428 VMM_CHECK_SMAP_SETUP();
429 VMM_CHECK_SMAP_CHECK(return VERR_VMM_SMAP_BUT_AC_CLEAR);
430
431 /*
432 * Match the SVN revisions and build type.
433 */
434 if (uSvnRev != VMMGetSvnRev())
435 {
436 LogRel(("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev()));
437 SUPR0Printf("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev());
438 return VERR_VMM_R0_VERSION_MISMATCH;
439 }
440 if (uBuildType != vmmGetBuildType())
441 {
442 LogRel(("VMMR0InitVM: Build type mismatch, r3=%#x r0=%#x\n", uBuildType, vmmGetBuildType()));
443 SUPR0Printf("VMMR0InitVM: Build type mismatch, r3=%#x r0=%#x\n", uBuildType, vmmGetBuildType());
444 return VERR_VMM_R0_VERSION_MISMATCH;
445 }
446
447 int rc = GVMMR0ValidateGVMandEMT(pGVM, 0 /*idCpu*/);
448 if (RT_FAILURE(rc))
449 return rc;
450
451 /* Don't allow this to be called more than once. */
452 if (!pGVM->vmmr0.s.fCalledInitVm)
453 pGVM->vmmr0.s.fCalledInitVm = true;
454 else
455 return VERR_ALREADY_INITIALIZED;
456
457#ifdef LOG_ENABLED
458
459 /*
460 * Register the EMT R0 logger instance for VCPU 0.
461 */
462 PVMCPUCC pVCpu = VMCC_GET_CPU_0(pGVM);
463 if (pVCpu->vmmr0.s.u.s.Logger.pLogger)
464 {
465# if 0 /* testing of the logger. */
466 LogCom(("vmmR0InitVM: before %p\n", RTLogDefaultInstance()));
467 LogCom(("vmmR0InitVM: pfnFlush=%p actual=%p\n", pR0Logger->Logger.pfnFlush, vmmR0LoggerFlush));
468 LogCom(("vmmR0InitVM: pfnLogger=%p actual=%p\n", pR0Logger->Logger.pfnLogger, vmmR0LoggerWrapper));
469 LogCom(("vmmR0InitVM: offScratch=%d fFlags=%#x fDestFlags=%#x\n", pR0Logger->Logger.offScratch, pR0Logger->Logger.fFlags, pR0Logger->Logger.fDestFlags));
470
471 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pGVM->pSession);
472 LogCom(("vmmR0InitVM: after %p reg\n", RTLogDefaultInstance()));
473 RTLogSetDefaultInstanceThread(NULL, pGVM->pSession);
474 LogCom(("vmmR0InitVM: after %p dereg\n", RTLogDefaultInstance()));
475
476 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
477 LogCom(("vmmR0InitVM: returned successfully from direct logger call.\n"));
478 pR0Logger->Logger.pfnFlush(&pR0Logger->Logger);
479 LogCom(("vmmR0InitVM: returned successfully from direct flush call.\n"));
480
481 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pGVM->pSession);
482 LogCom(("vmmR0InitVM: after %p reg2\n", RTLogDefaultInstance()));
483 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
484 LogCom(("vmmR0InitVM: returned successfully from direct logger call (2). offScratch=%d\n", pR0Logger->Logger.offScratch));
485 RTLogSetDefaultInstanceThread(NULL, pGVM->pSession);
486 LogCom(("vmmR0InitVM: after %p dereg2\n", RTLogDefaultInstance()));
487
488 RTLogLoggerEx(&pR0Logger->Logger, 0, ~0U, "hello ring-0 logger (RTLogLoggerEx)\n");
489 LogCom(("vmmR0InitVM: RTLogLoggerEx returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
490
491 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pGVM->pSession);
492 RTLogPrintf("hello ring-0 logger (RTLogPrintf)\n");
493 LogCom(("vmmR0InitVM: RTLogPrintf returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
494# endif
495# ifdef VBOX_WITH_R0_LOGGING
496 Log(("Switching to per-thread logging instance %p (key=%p)\n", pVCpu->vmmr0.s.u.s.Logger.pLogger, pGVM->pSession));
497 RTLogSetDefaultInstanceThread(pVCpu->vmmr0.s.u.s.Logger.pLogger, (uintptr_t)pGVM->pSession);
498 pVCpu->vmmr0.s.u.s.Logger.fRegistered = true;
499# endif
500 }
501#endif /* LOG_ENABLED */
502
503 /*
504 * Check if the host supports high resolution timers or not.
505 */
506 if ( pGVM->vmm.s.fUsePeriodicPreemptionTimers
507 && !RTTimerCanDoHighResolution())
508 pGVM->vmm.s.fUsePeriodicPreemptionTimers = false;
509
510 /*
511 * Initialize the per VM data for GVMM and GMM.
512 */
513 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
514 rc = GVMMR0InitVM(pGVM);
515 if (RT_SUCCESS(rc))
516 {
517 /*
518 * Init HM, CPUM and PGM (Darwin only).
519 */
520 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
521 rc = HMR0InitVM(pGVM);
522 if (RT_SUCCESS(rc))
523 VMM_CHECK_SMAP_CHECK2(pGVM, rc = VERR_VMM_RING0_ASSERTION); /* CPUR0InitVM will otherwise panic the host */
524 if (RT_SUCCESS(rc))
525 {
526 rc = CPUMR0InitVM(pGVM);
527 if (RT_SUCCESS(rc))
528 {
529 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
530 rc = PGMR0InitVM(pGVM);
531 if (RT_SUCCESS(rc))
532 {
533 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
534 rc = EMR0InitVM(pGVM);
535 if (RT_SUCCESS(rc))
536 {
537 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
538#ifdef VBOX_WITH_PCI_PASSTHROUGH
539 rc = PciRawR0InitVM(pGVM);
540#endif
541 if (RT_SUCCESS(rc))
542 {
543 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
544 rc = GIMR0InitVM(pGVM);
545 if (RT_SUCCESS(rc))
546 {
547 VMM_CHECK_SMAP_CHECK2(pGVM, rc = VERR_VMM_RING0_ASSERTION);
548 if (RT_SUCCESS(rc))
549 {
550 GVMMR0DoneInitVM(pGVM);
551
552 /*
553 * Collect a bit of info for the VM release log.
554 */
555 pGVM->vmm.s.fIsPreemptPendingApiTrusty = RTThreadPreemptIsPendingTrusty();
556 pGVM->vmm.s.fIsPreemptPossible = RTThreadPreemptIsPossible();;
557
558 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
559 return rc;
560 }
561
562 /* bail out*/
563 GIMR0TermVM(pGVM);
564 }
565#ifdef VBOX_WITH_PCI_PASSTHROUGH
566 PciRawR0TermVM(pGVM);
567#endif
568 }
569 }
570 }
571 }
572 HMR0TermVM(pGVM);
573 }
574 }
575
576 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pGVM->pSession);
577 return rc;
578}
579
580
581/**
582 * Does EMT specific VM initialization.
583 *
584 * @returns VBox status code.
585 * @param pGVM The ring-0 VM structure.
586 * @param idCpu The EMT that's calling.
587 */
588static int vmmR0InitVMEmt(PGVM pGVM, VMCPUID idCpu)
589{
590 /* Paranoia (caller checked these already). */
591 AssertReturn(idCpu < pGVM->cCpus, VERR_INVALID_CPU_ID);
592 AssertReturn(pGVM->aCpus[idCpu].hEMT == RTThreadNativeSelf(), VERR_INVALID_CPU_ID);
593
594#if defined(LOG_ENABLED) && defined(VBOX_WITH_R0_LOGGING)
595 /*
596 * Registration of ring 0 loggers.
597 */
598 PVMCPUCC pVCpu = &pGVM->aCpus[idCpu];
599 if ( pVCpu->vmmr0.s.u.s.Logger.pLogger
600 && !pVCpu->vmmr0.s.u.s.Logger.fRegistered)
601 {
602 RTLogSetDefaultInstanceThread(pVCpu->vmmr0.s.u.s.Logger.pLogger, (uintptr_t)pGVM->pSession);
603 pVCpu->vmmr0.s.u.s.Logger.fRegistered = true;
604 }
605#endif
606
607 return VINF_SUCCESS;
608}
609
610
611
612/**
613 * Terminates the R0 bits for a particular VM instance.
614 *
615 * This is normally called by ring-3 as part of the VM termination process, but
616 * may alternatively be called during the support driver session cleanup when
617 * the VM object is destroyed (see GVMM).
618 *
619 * @returns VBox status code.
620 *
621 * @param pGVM The global (ring-0) VM structure.
622 * @param idCpu Set to 0 if EMT(0) or NIL_VMCPUID if session cleanup
623 * thread.
624 * @thread EMT(0) or session clean up thread.
625 */
626VMMR0_INT_DECL(int) VMMR0TermVM(PGVM pGVM, VMCPUID idCpu)
627{
628 /*
629 * Check EMT(0) claim if we're called from userland.
630 */
631 if (idCpu != NIL_VMCPUID)
632 {
633 AssertReturn(idCpu == 0, VERR_INVALID_CPU_ID);
634 int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu);
635 if (RT_FAILURE(rc))
636 return rc;
637 }
638
639#ifdef VBOX_WITH_PCI_PASSTHROUGH
640 PciRawR0TermVM(pGVM);
641#endif
642
643 /*
644 * Tell GVMM what we're up to and check that we only do this once.
645 */
646 if (GVMMR0DoingTermVM(pGVM))
647 {
648 GIMR0TermVM(pGVM);
649
650 /** @todo I wish to call PGMR0PhysFlushHandyPages(pGVM, &pGVM->aCpus[idCpu])
651 * here to make sure we don't leak any shared pages if we crash... */
652#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
653 PGMR0DynMapTermVM(pGVM);
654#endif
655 HMR0TermVM(pGVM);
656 }
657
658 /*
659 * Deregister the logger for this EMT.
660 */
661 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pGVM->pSession);
662
663 /*
664 * Start log flusher thread termination.
665 */
666 ASMAtomicWriteBool(&pGVM->vmmr0.s.LogFlusher.fThreadShutdown, true);
667 if (pGVM->vmmr0.s.LogFlusher.hEvent != NIL_RTSEMEVENT)
668 RTSemEventSignal(pGVM->vmmr0.s.LogFlusher.hEvent);
669
670 return VINF_SUCCESS;
671}
672
673
674/**
675 * This is called at the end of gvmmR0CleanupVM().
676 *
677 * @param pGVM The global (ring-0) VM structure.
678 */
679VMMR0_INT_DECL(void) VMMR0CleanupVM(PGVM pGVM)
680{
681 AssertCompile(NIL_RTTHREADCTXHOOK == (RTTHREADCTXHOOK)0); /* Depends on zero initialized memory working for NIL at the moment. */
682 for (VMCPUID idCpu = 0; idCpu < pGVM->cCpus; idCpu++)
683 {
684 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
685
686 /** @todo Can we busy wait here for all thread-context hooks to be
687 * deregistered before releasing (destroying) it? Only until we find a
688 * solution for not deregistering hooks everytime we're leaving HMR0
689 * context. */
690 VMMR0ThreadCtxHookDestroyForEmt(pGVCpu);
691 }
692
693 vmmR0CleanupLoggers(pGVM);
694}
695
696
697/**
698 * An interrupt or unhalt force flag is set, deal with it.
699 *
700 * @returns VINF_SUCCESS (or VINF_EM_HALT).
701 * @param pVCpu The cross context virtual CPU structure.
702 * @param uMWait Result from EMMonitorWaitIsActive().
703 * @param enmInterruptibility Guest CPU interruptbility level.
704 */
705static int vmmR0DoHaltInterrupt(PVMCPUCC pVCpu, unsigned uMWait, CPUMINTERRUPTIBILITY enmInterruptibility)
706{
707 Assert(!TRPMHasTrap(pVCpu));
708 Assert( enmInterruptibility > CPUMINTERRUPTIBILITY_INVALID
709 && enmInterruptibility < CPUMINTERRUPTIBILITY_END);
710
711 /*
712 * Pending interrupts w/o any SMIs or NMIs? That the usual case.
713 */
714 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
715 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_INTERRUPT_NMI))
716 {
717 if (enmInterruptibility <= CPUMINTERRUPTIBILITY_UNRESTRAINED)
718 {
719 uint8_t u8Interrupt = 0;
720 int rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
721 Log(("vmmR0DoHaltInterrupt: CPU%d u8Interrupt=%d (%#x) rc=%Rrc\n", pVCpu->idCpu, u8Interrupt, u8Interrupt, rc));
722 if (RT_SUCCESS(rc))
723 {
724 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
725
726 rc = TRPMAssertTrap(pVCpu, u8Interrupt, TRPM_HARDWARE_INT);
727 AssertRCSuccess(rc);
728 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltExec);
729 return rc;
730 }
731 }
732 }
733 /*
734 * SMI is not implemented yet, at least not here.
735 */
736 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_SMI))
737 {
738 Log12(("vmmR0DoHaltInterrupt: CPU%d failed #3\n", pVCpu->idCpu));
739 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltToR3);
740 return VINF_EM_HALT;
741 }
742 /*
743 * NMI.
744 */
745 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
746 {
747 if (enmInterruptibility < CPUMINTERRUPTIBILITY_NMI_INHIBIT)
748 {
749 /** @todo later. */
750 Log12(("vmmR0DoHaltInterrupt: CPU%d failed #2 (uMWait=%u enmInt=%d)\n", pVCpu->idCpu, uMWait, enmInterruptibility));
751 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltToR3);
752 return VINF_EM_HALT;
753 }
754 }
755 /*
756 * Nested-guest virtual interrupt.
757 */
758 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST))
759 {
760 if (enmInterruptibility < CPUMINTERRUPTIBILITY_VIRT_INT_DISABLED)
761 {
762 /** @todo NSTVMX: NSTSVM: Remember, we might have to check and perform VM-exits
763 * here before injecting the virtual interrupt. See emR3ForcedActions
764 * for details. */
765 Log12(("vmmR0DoHaltInterrupt: CPU%d failed #1 (uMWait=%u enmInt=%d)\n", pVCpu->idCpu, uMWait, enmInterruptibility));
766 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltToR3);
767 return VINF_EM_HALT;
768 }
769 }
770
771 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UNHALT))
772 {
773 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltExec);
774 Log11(("vmmR0DoHaltInterrupt: CPU%d success VINF_SUCCESS (UNHALT)\n", pVCpu->idCpu));
775 return VINF_SUCCESS;
776 }
777 if (uMWait > 1)
778 {
779 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltExec);
780 Log11(("vmmR0DoHaltInterrupt: CPU%d success VINF_SUCCESS (uMWait=%u > 1)\n", pVCpu->idCpu, uMWait));
781 return VINF_SUCCESS;
782 }
783
784 Log12(("vmmR0DoHaltInterrupt: CPU%d failed #0 (uMWait=%u enmInt=%d)\n", pVCpu->idCpu, uMWait, enmInterruptibility));
785 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltToR3);
786 return VINF_EM_HALT;
787}
788
789
790/**
791 * This does one round of vmR3HaltGlobal1Halt().
792 *
793 * The rational here is that we'll reduce latency in interrupt situations if we
794 * don't go to ring-3 immediately on a VINF_EM_HALT (guest executed HLT or
795 * MWAIT), but do one round of blocking here instead and hope the interrupt is
796 * raised in the meanwhile.
797 *
798 * If we go to ring-3 we'll quit the inner HM/NEM loop in EM and end up in the
799 * outer loop, which will then call VMR3WaitHalted() and that in turn will do a
800 * ring-0 call (unless we're too close to a timer event). When the interrupt
801 * wakes us up, we'll return from ring-0 and EM will by instinct do a
802 * rescheduling (because of raw-mode) before it resumes the HM/NEM loop and gets
803 * back to VMMR0EntryFast().
804 *
805 * @returns VINF_SUCCESS or VINF_EM_HALT.
806 * @param pGVM The ring-0 VM structure.
807 * @param pGVCpu The ring-0 virtual CPU structure.
808 *
809 * @todo r=bird: All the blocking/waiting and EMT managment should move out of
810 * the VM module, probably to VMM. Then this would be more weird wrt
811 * parameters and statistics.
812 */
813static int vmmR0DoHalt(PGVM pGVM, PGVMCPU pGVCpu)
814{
815 /*
816 * Do spin stat historization.
817 */
818 if (++pGVCpu->vmm.s.cR0Halts & 0xff)
819 { /* likely */ }
820 else if (pGVCpu->vmm.s.cR0HaltsSucceeded > pGVCpu->vmm.s.cR0HaltsToRing3)
821 {
822 pGVCpu->vmm.s.cR0HaltsSucceeded = 2;
823 pGVCpu->vmm.s.cR0HaltsToRing3 = 0;
824 }
825 else
826 {
827 pGVCpu->vmm.s.cR0HaltsSucceeded = 0;
828 pGVCpu->vmm.s.cR0HaltsToRing3 = 2;
829 }
830
831 /*
832 * Flags that makes us go to ring-3.
833 */
834 uint32_t const fVmFFs = VM_FF_TM_VIRTUAL_SYNC | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA
835 | VM_FF_DBGF | VM_FF_REQUEST | VM_FF_CHECK_VM_STATE
836 | VM_FF_RESET | VM_FF_EMT_RENDEZVOUS | VM_FF_PGM_NEED_HANDY_PAGES
837 | VM_FF_PGM_NO_MEMORY | VM_FF_DEBUG_SUSPEND;
838 uint64_t const fCpuFFs = VMCPU_FF_TIMER | VMCPU_FF_PDM_CRITSECT | VMCPU_FF_IEM
839 | VMCPU_FF_REQUEST | VMCPU_FF_DBGF | VMCPU_FF_HM_UPDATE_CR3
840 | VMCPU_FF_HM_UPDATE_PAE_PDPES | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
841 | VMCPU_FF_TO_R3 | VMCPU_FF_IOM;
842
843 /*
844 * Check preconditions.
845 */
846 unsigned const uMWait = EMMonitorWaitIsActive(pGVCpu);
847 CPUMINTERRUPTIBILITY const enmInterruptibility = CPUMGetGuestInterruptibility(pGVCpu);
848 if ( pGVCpu->vmm.s.fMayHaltInRing0
849 && !TRPMHasTrap(pGVCpu)
850 && ( enmInterruptibility == CPUMINTERRUPTIBILITY_UNRESTRAINED
851 || uMWait > 1))
852 {
853 if ( !VM_FF_IS_ANY_SET(pGVM, fVmFFs)
854 && !VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs))
855 {
856 /*
857 * Interrupts pending already?
858 */
859 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC))
860 APICUpdatePendingInterrupts(pGVCpu);
861
862 /*
863 * Flags that wake up from the halted state.
864 */
865 uint64_t const fIntMask = VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_INTERRUPT_NESTED_GUEST
866 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT;
867
868 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask))
869 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility);
870 ASMNopPause();
871
872 /*
873 * Check out how long till the next timer event.
874 */
875 uint64_t u64Delta;
876 uint64_t u64GipTime = TMTimerPollGIP(pGVM, pGVCpu, &u64Delta);
877
878 if ( !VM_FF_IS_ANY_SET(pGVM, fVmFFs)
879 && !VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs))
880 {
881 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC))
882 APICUpdatePendingInterrupts(pGVCpu);
883
884 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask))
885 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility);
886
887 /*
888 * Wait if there is enough time to the next timer event.
889 */
890 if (u64Delta >= pGVCpu->vmm.s.cNsSpinBlockThreshold)
891 {
892 /* If there are few other CPU cores around, we will procrastinate a
893 little before going to sleep, hoping for some device raising an
894 interrupt or similar. Though, the best thing here would be to
895 dynamically adjust the spin count according to its usfulness or
896 something... */
897 if ( pGVCpu->vmm.s.cR0HaltsSucceeded > pGVCpu->vmm.s.cR0HaltsToRing3
898 && RTMpGetOnlineCount() >= 4)
899 {
900 /** @todo Figure out how we can skip this if it hasn't help recently...
901 * @bugref{9172#c12} */
902 uint32_t cSpinLoops = 42;
903 while (cSpinLoops-- > 0)
904 {
905 ASMNopPause();
906 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC))
907 APICUpdatePendingInterrupts(pGVCpu);
908 ASMNopPause();
909 if (VM_FF_IS_ANY_SET(pGVM, fVmFFs))
910 {
911 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3FromSpin);
912 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3);
913 return VINF_EM_HALT;
914 }
915 ASMNopPause();
916 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs))
917 {
918 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3FromSpin);
919 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3);
920 return VINF_EM_HALT;
921 }
922 ASMNopPause();
923 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask))
924 {
925 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltExecFromSpin);
926 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility);
927 }
928 ASMNopPause();
929 }
930 }
931
932 /*
933 * We have to set the state to VMCPUSTATE_STARTED_HALTED here so ring-3
934 * knows when to notify us (cannot access VMINTUSERPERVMCPU::fWait from here).
935 * After changing the state we must recheck the force flags of course.
936 */
937 if (VMCPU_CMPXCHG_STATE(pGVCpu, VMCPUSTATE_STARTED_HALTED, VMCPUSTATE_STARTED))
938 {
939 if ( !VM_FF_IS_ANY_SET(pGVM, fVmFFs)
940 && !VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs))
941 {
942 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC))
943 APICUpdatePendingInterrupts(pGVCpu);
944
945 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask))
946 {
947 VMCPU_CMPXCHG_STATE(pGVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_HALTED);
948 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility);
949 }
950
951 /* Okay, block! */
952 uint64_t const u64StartSchedHalt = RTTimeNanoTS();
953 int rc = GVMMR0SchedHalt(pGVM, pGVCpu, u64GipTime);
954 uint64_t const u64EndSchedHalt = RTTimeNanoTS();
955 uint64_t const cNsElapsedSchedHalt = u64EndSchedHalt - u64StartSchedHalt;
956 Log10(("vmmR0DoHalt: CPU%d: halted %llu ns\n", pGVCpu->idCpu, cNsElapsedSchedHalt));
957
958 VMCPU_CMPXCHG_STATE(pGVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_HALTED);
959 STAM_REL_PROFILE_ADD_PERIOD(&pGVCpu->vmm.s.StatR0HaltBlock, cNsElapsedSchedHalt);
960 if ( rc == VINF_SUCCESS
961 || rc == VERR_INTERRUPTED)
962 {
963 /* Keep some stats like ring-3 does. */
964 int64_t const cNsOverslept = u64EndSchedHalt - u64GipTime;
965 if (cNsOverslept > 50000)
966 STAM_REL_PROFILE_ADD_PERIOD(&pGVCpu->vmm.s.StatR0HaltBlockOverslept, cNsOverslept);
967 else if (cNsOverslept < -50000)
968 STAM_REL_PROFILE_ADD_PERIOD(&pGVCpu->vmm.s.StatR0HaltBlockInsomnia, cNsElapsedSchedHalt);
969 else
970 STAM_REL_PROFILE_ADD_PERIOD(&pGVCpu->vmm.s.StatR0HaltBlockOnTime, cNsElapsedSchedHalt);
971
972 /*
973 * Recheck whether we can resume execution or have to go to ring-3.
974 */
975 if ( !VM_FF_IS_ANY_SET(pGVM, fVmFFs)
976 && !VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs))
977 {
978 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC))
979 APICUpdatePendingInterrupts(pGVCpu);
980 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask))
981 {
982 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltExecFromBlock);
983 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility);
984 }
985 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3PostNoInt);
986 Log12(("vmmR0DoHalt: CPU%d post #2 - No pending interrupt\n", pGVCpu->idCpu));
987 }
988 else
989 {
990 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3PostPendingFF);
991 Log12(("vmmR0DoHalt: CPU%d post #1 - Pending FF\n", pGVCpu->idCpu));
992 }
993 }
994 else
995 {
996 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3Other);
997 Log12(("vmmR0DoHalt: CPU%d GVMMR0SchedHalt failed: %Rrc\n", pGVCpu->idCpu, rc));
998 }
999 }
1000 else
1001 {
1002 VMCPU_CMPXCHG_STATE(pGVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_HALTED);
1003 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3PendingFF);
1004 Log12(("vmmR0DoHalt: CPU%d failed #5 - Pending FF\n", pGVCpu->idCpu));
1005 }
1006 }
1007 else
1008 {
1009 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3Other);
1010 Log12(("vmmR0DoHalt: CPU%d failed #4 - enmState=%d\n", pGVCpu->idCpu, VMCPU_GET_STATE(pGVCpu)));
1011 }
1012 }
1013 else
1014 {
1015 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3SmallDelta);
1016 Log12(("vmmR0DoHalt: CPU%d failed #3 - delta too small: %RU64\n", pGVCpu->idCpu, u64Delta));
1017 }
1018 }
1019 else
1020 {
1021 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3PendingFF);
1022 Log12(("vmmR0DoHalt: CPU%d failed #2 - Pending FF\n", pGVCpu->idCpu));
1023 }
1024 }
1025 else
1026 {
1027 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3PendingFF);
1028 Log12(("vmmR0DoHalt: CPU%d failed #1 - Pending FF\n", pGVCpu->idCpu));
1029 }
1030 }
1031 else
1032 {
1033 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3Other);
1034 Log12(("vmmR0DoHalt: CPU%d failed #0 - fMayHaltInRing0=%d TRPMHasTrap=%d enmInt=%d uMWait=%u\n",
1035 pGVCpu->idCpu, pGVCpu->vmm.s.fMayHaltInRing0, TRPMHasTrap(pGVCpu), enmInterruptibility, uMWait));
1036 }
1037
1038 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3);
1039 return VINF_EM_HALT;
1040}
1041
1042
1043/**
1044 * VMM ring-0 thread-context callback.
1045 *
1046 * This does common HM state updating and calls the HM-specific thread-context
1047 * callback.
1048 *
1049 * This is used together with RTThreadCtxHookCreate() on platforms which
1050 * supports it, and directly from VMMR0EmtPrepareForBlocking() and
1051 * VMMR0EmtResumeAfterBlocking() on platforms which don't.
1052 *
1053 * @param enmEvent The thread-context event.
1054 * @param pvUser Opaque pointer to the VMCPU.
1055 *
1056 * @thread EMT(pvUser)
1057 */
1058static DECLCALLBACK(void) vmmR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, void *pvUser)
1059{
1060 PVMCPUCC pVCpu = (PVMCPUCC)pvUser;
1061
1062 switch (enmEvent)
1063 {
1064 case RTTHREADCTXEVENT_IN:
1065 {
1066 /*
1067 * Linux may call us with preemption enabled (really!) but technically we
1068 * cannot get preempted here, otherwise we end up in an infinite recursion
1069 * scenario (i.e. preempted in resume hook -> preempt hook -> resume hook...
1070 * ad infinitum). Let's just disable preemption for now...
1071 */
1072 /** @todo r=bird: I don't believe the above. The linux code is clearly enabling
1073 * preemption after doing the callout (one or two functions up the
1074 * call chain). */
1075 /** @todo r=ramshankar: See @bugref{5313#c30}. */
1076 RTTHREADPREEMPTSTATE ParanoidPreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
1077 RTThreadPreemptDisable(&ParanoidPreemptState);
1078
1079 /* We need to update the VCPU <-> host CPU mapping. */
1080 RTCPUID idHostCpu;
1081 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
1082 pVCpu->iHostCpuSet = iHostCpuSet;
1083 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
1084
1085 /* In the very unlikely event that the GIP delta for the CPU we're
1086 rescheduled needs calculating, try force a return to ring-3.
1087 We unfortunately cannot do the measurements right here. */
1088 if (RT_LIKELY(!SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
1089 { /* likely */ }
1090 else
1091 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
1092
1093 /* Invoke the HM-specific thread-context callback. */
1094 HMR0ThreadCtxCallback(enmEvent, pvUser);
1095
1096 /* Restore preemption. */
1097 RTThreadPreemptRestore(&ParanoidPreemptState);
1098 break;
1099 }
1100
1101 case RTTHREADCTXEVENT_OUT:
1102 {
1103 /* Invoke the HM-specific thread-context callback. */
1104 HMR0ThreadCtxCallback(enmEvent, pvUser);
1105
1106 /*
1107 * Sigh. See VMMGetCpu() used by VMCPU_ASSERT_EMT(). We cannot let several VCPUs
1108 * have the same host CPU associated with it.
1109 */
1110 pVCpu->iHostCpuSet = UINT32_MAX;
1111 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1112 break;
1113 }
1114
1115 default:
1116 /* Invoke the HM-specific thread-context callback. */
1117 HMR0ThreadCtxCallback(enmEvent, pvUser);
1118 break;
1119 }
1120}
1121
1122
1123/**
1124 * Creates thread switching hook for the current EMT thread.
1125 *
1126 * This is called by GVMMR0CreateVM and GVMMR0RegisterVCpu. If the host
1127 * platform does not implement switcher hooks, no hooks will be create and the
1128 * member set to NIL_RTTHREADCTXHOOK.
1129 *
1130 * @returns VBox status code.
1131 * @param pVCpu The cross context virtual CPU structure.
1132 * @thread EMT(pVCpu)
1133 */
1134VMMR0_INT_DECL(int) VMMR0ThreadCtxHookCreateForEmt(PVMCPUCC pVCpu)
1135{
1136 VMCPU_ASSERT_EMT(pVCpu);
1137 Assert(pVCpu->vmmr0.s.hCtxHook == NIL_RTTHREADCTXHOOK);
1138
1139#if 1 /* To disable this stuff change to zero. */
1140 int rc = RTThreadCtxHookCreate(&pVCpu->vmmr0.s.hCtxHook, 0, vmmR0ThreadCtxCallback, pVCpu);
1141 if (RT_SUCCESS(rc))
1142 {
1143 pVCpu->pGVM->vmm.s.fIsUsingContextHooks = true;
1144 return rc;
1145 }
1146#else
1147 RT_NOREF(vmmR0ThreadCtxCallback);
1148 int rc = VERR_NOT_SUPPORTED;
1149#endif
1150
1151 pVCpu->vmmr0.s.hCtxHook = NIL_RTTHREADCTXHOOK;
1152 pVCpu->pGVM->vmm.s.fIsUsingContextHooks = false;
1153 if (rc == VERR_NOT_SUPPORTED)
1154 return VINF_SUCCESS;
1155
1156 LogRelMax(32, ("RTThreadCtxHookCreate failed! rc=%Rrc pVCpu=%p idCpu=%RU32\n", rc, pVCpu, pVCpu->idCpu));
1157 return VINF_SUCCESS; /* Just ignore it, we can live without context hooks. */
1158}
1159
1160
1161/**
1162 * Destroys the thread switching hook for the specified VCPU.
1163 *
1164 * @param pVCpu The cross context virtual CPU structure.
1165 * @remarks Can be called from any thread.
1166 */
1167VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDestroyForEmt(PVMCPUCC pVCpu)
1168{
1169 int rc = RTThreadCtxHookDestroy(pVCpu->vmmr0.s.hCtxHook);
1170 AssertRC(rc);
1171 pVCpu->vmmr0.s.hCtxHook = NIL_RTTHREADCTXHOOK;
1172}
1173
1174
1175/**
1176 * Disables the thread switching hook for this VCPU (if we got one).
1177 *
1178 * @param pVCpu The cross context virtual CPU structure.
1179 * @thread EMT(pVCpu)
1180 *
1181 * @remarks This also clears GVMCPU::idHostCpu, so the mapping is invalid after
1182 * this call. This means you have to be careful with what you do!
1183 */
1184VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDisable(PVMCPUCC pVCpu)
1185{
1186 /*
1187 * Clear the VCPU <-> host CPU mapping as we've left HM context.
1188 * @bugref{7726#c19} explains the need for this trick:
1189 *
1190 * VMXR0CallRing3Callback/SVMR0CallRing3Callback &
1191 * hmR0VmxLeaveSession/hmR0SvmLeaveSession disables context hooks during
1192 * longjmp & normal return to ring-3, which opens a window where we may be
1193 * rescheduled without changing GVMCPUID::idHostCpu and cause confusion if
1194 * the CPU starts executing a different EMT. Both functions first disables
1195 * preemption and then calls HMR0LeaveCpu which invalids idHostCpu, leaving
1196 * an opening for getting preempted.
1197 */
1198 /** @todo Make HM not need this API! Then we could leave the hooks enabled
1199 * all the time. */
1200
1201 /*
1202 * Disable the context hook, if we got one.
1203 */
1204 if (pVCpu->vmmr0.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1205 {
1206 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1207 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1208 int rc = RTThreadCtxHookDisable(pVCpu->vmmr0.s.hCtxHook);
1209 AssertRC(rc);
1210 }
1211}
1212
1213
1214/**
1215 * Internal version of VMMR0ThreadCtxHooksAreRegistered.
1216 *
1217 * @returns true if registered, false otherwise.
1218 * @param pVCpu The cross context virtual CPU structure.
1219 */
1220DECLINLINE(bool) vmmR0ThreadCtxHookIsEnabled(PVMCPUCC pVCpu)
1221{
1222 return RTThreadCtxHookIsEnabled(pVCpu->vmmr0.s.hCtxHook);
1223}
1224
1225
1226/**
1227 * Whether thread-context hooks are registered for this VCPU.
1228 *
1229 * @returns true if registered, false otherwise.
1230 * @param pVCpu The cross context virtual CPU structure.
1231 */
1232VMMR0_INT_DECL(bool) VMMR0ThreadCtxHookIsEnabled(PVMCPUCC pVCpu)
1233{
1234 return vmmR0ThreadCtxHookIsEnabled(pVCpu);
1235}
1236
1237
1238/**
1239 * Returns the ring-0 release logger instance.
1240 *
1241 * @returns Pointer to release logger, NULL if not configured.
1242 * @param pVCpu The cross context virtual CPU structure of the caller.
1243 * @thread EMT(pVCpu)
1244 */
1245VMMR0_INT_DECL(PRTLOGGER) VMMR0GetReleaseLogger(PVMCPUCC pVCpu)
1246{
1247 return pVCpu->vmmr0.s.u.s.RelLogger.pLogger;
1248}
1249
1250
1251#ifdef VBOX_WITH_STATISTICS
1252/**
1253 * Record return code statistics
1254 * @param pVM The cross context VM structure.
1255 * @param pVCpu The cross context virtual CPU structure.
1256 * @param rc The status code.
1257 */
1258static void vmmR0RecordRC(PVMCC pVM, PVMCPUCC pVCpu, int rc)
1259{
1260 /*
1261 * Collect statistics.
1262 */
1263 switch (rc)
1264 {
1265 case VINF_SUCCESS:
1266 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetNormal);
1267 break;
1268 case VINF_EM_RAW_INTERRUPT:
1269 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterrupt);
1270 break;
1271 case VINF_EM_RAW_INTERRUPT_HYPER:
1272 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptHyper);
1273 break;
1274 case VINF_EM_RAW_GUEST_TRAP:
1275 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGuestTrap);
1276 break;
1277 case VINF_EM_RAW_RING_SWITCH:
1278 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitch);
1279 break;
1280 case VINF_EM_RAW_RING_SWITCH_INT:
1281 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitchInt);
1282 break;
1283 case VINF_EM_RAW_STALE_SELECTOR:
1284 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetStaleSelector);
1285 break;
1286 case VINF_EM_RAW_IRET_TRAP:
1287 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIRETTrap);
1288 break;
1289 case VINF_IOM_R3_IOPORT_READ:
1290 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIORead);
1291 break;
1292 case VINF_IOM_R3_IOPORT_WRITE:
1293 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOWrite);
1294 break;
1295 case VINF_IOM_R3_IOPORT_COMMIT_WRITE:
1296 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOCommitWrite);
1297 break;
1298 case VINF_IOM_R3_MMIO_READ:
1299 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIORead);
1300 break;
1301 case VINF_IOM_R3_MMIO_WRITE:
1302 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOWrite);
1303 break;
1304 case VINF_IOM_R3_MMIO_COMMIT_WRITE:
1305 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOCommitWrite);
1306 break;
1307 case VINF_IOM_R3_MMIO_READ_WRITE:
1308 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOReadWrite);
1309 break;
1310 case VINF_PATM_HC_MMIO_PATCH_READ:
1311 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchRead);
1312 break;
1313 case VINF_PATM_HC_MMIO_PATCH_WRITE:
1314 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchWrite);
1315 break;
1316 case VINF_CPUM_R3_MSR_READ:
1317 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMSRRead);
1318 break;
1319 case VINF_CPUM_R3_MSR_WRITE:
1320 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMSRWrite);
1321 break;
1322 case VINF_EM_RAW_EMULATE_INSTR:
1323 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetEmulate);
1324 break;
1325 case VINF_PATCH_EMULATE_INSTR:
1326 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchEmulate);
1327 break;
1328 case VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT:
1329 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetLDTFault);
1330 break;
1331 case VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT:
1332 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGDTFault);
1333 break;
1334 case VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT:
1335 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIDTFault);
1336 break;
1337 case VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT:
1338 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTSSFault);
1339 break;
1340 case VINF_CSAM_PENDING_ACTION:
1341 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCSAMTask);
1342 break;
1343 case VINF_PGM_SYNC_CR3:
1344 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetSyncCR3);
1345 break;
1346 case VINF_PATM_PATCH_INT3:
1347 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchInt3);
1348 break;
1349 case VINF_PATM_PATCH_TRAP_PF:
1350 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchPF);
1351 break;
1352 case VINF_PATM_PATCH_TRAP_GP:
1353 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchGP);
1354 break;
1355 case VINF_PATM_PENDING_IRQ_AFTER_IRET:
1356 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchIretIRQ);
1357 break;
1358 case VINF_EM_RESCHEDULE_REM:
1359 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRescheduleREM);
1360 break;
1361 case VINF_EM_RAW_TO_R3:
1362 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Total);
1363 if (VM_FF_IS_SET(pVM, VM_FF_TM_VIRTUAL_SYNC))
1364 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3TMVirt);
1365 else if (VM_FF_IS_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES))
1366 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3HandyPages);
1367 else if (VM_FF_IS_SET(pVM, VM_FF_PDM_QUEUES))
1368 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3PDMQueues);
1369 else if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
1370 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Rendezvous);
1371 else if (VM_FF_IS_SET(pVM, VM_FF_PDM_DMA))
1372 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3DMA);
1373 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TIMER))
1374 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Timer);
1375 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PDM_CRITSECT))
1376 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3CritSect);
1377 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TO_R3))
1378 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3FF);
1379 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
1380 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Iem);
1381 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IOM))
1382 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Iom);
1383 else
1384 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Unknown);
1385 break;
1386
1387 case VINF_EM_RAW_TIMER_PENDING:
1388 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTimerPending);
1389 break;
1390 case VINF_EM_RAW_INTERRUPT_PENDING:
1391 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptPending);
1392 break;
1393 case VINF_VMM_CALL_HOST:
1394 switch (pVCpu->vmm.s.enmCallRing3Operation)
1395 {
1396 case VMMCALLRING3_PDM_CRIT_SECT_ENTER:
1397 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPDMCritSectEnter);
1398 break;
1399 case VMMCALLRING3_PDM_LOCK:
1400 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPDMLock);
1401 break;
1402 case VMMCALLRING3_PGM_POOL_GROW:
1403 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMPoolGrow);
1404 break;
1405 case VMMCALLRING3_PGM_LOCK:
1406 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMLock);
1407 break;
1408 case VMMCALLRING3_PGM_MAP_CHUNK:
1409 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMMapChunk);
1410 break;
1411 case VMMCALLRING3_PGM_ALLOCATE_HANDY_PAGES:
1412 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMAllocHandy);
1413 break;
1414 case VMMCALLRING3_VM_SET_ERROR:
1415 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallVMSetError);
1416 break;
1417 case VMMCALLRING3_VM_SET_RUNTIME_ERROR:
1418 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallVMSetRuntimeError);
1419 break;
1420 case VMMCALLRING3_VM_R0_ASSERTION:
1421 default:
1422 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCallRing3);
1423 break;
1424 }
1425 break;
1426 case VINF_PATM_DUPLICATE_FUNCTION:
1427 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPATMDuplicateFn);
1428 break;
1429 case VINF_PGM_CHANGE_MODE:
1430 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMChangeMode);
1431 break;
1432 case VINF_PGM_POOL_FLUSH_PENDING:
1433 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMFlushPending);
1434 break;
1435 case VINF_EM_PENDING_REQUEST:
1436 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPendingRequest);
1437 break;
1438 case VINF_EM_HM_PATCH_TPR_INSTR:
1439 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchTPR);
1440 break;
1441 default:
1442 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMisc);
1443 break;
1444 }
1445}
1446#endif /* VBOX_WITH_STATISTICS */
1447
1448
1449/**
1450 * The Ring 0 entry point, called by the fast-ioctl path.
1451 *
1452 * @param pGVM The global (ring-0) VM structure.
1453 * @param pVMIgnored The cross context VM structure. The return code is
1454 * stored in pVM->vmm.s.iLastGZRc.
1455 * @param idCpu The Virtual CPU ID of the calling EMT.
1456 * @param enmOperation Which operation to execute.
1457 * @remarks Assume called with interrupts _enabled_.
1458 */
1459VMMR0DECL(void) VMMR0EntryFast(PGVM pGVM, PVMCC pVMIgnored, VMCPUID idCpu, VMMR0OPERATION enmOperation)
1460{
1461 RT_NOREF(pVMIgnored);
1462
1463 /*
1464 * Validation.
1465 */
1466 if ( idCpu < pGVM->cCpus
1467 && pGVM->cCpus == pGVM->cCpusUnsafe)
1468 { /*likely*/ }
1469 else
1470 {
1471 SUPR0Printf("VMMR0EntryFast: Bad idCpu=%#x cCpus=%#x cCpusUnsafe=%#x\n", idCpu, pGVM->cCpus, pGVM->cCpusUnsafe);
1472 return;
1473 }
1474
1475 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
1476 RTNATIVETHREAD const hNativeThread = RTThreadNativeSelf();
1477 if (RT_LIKELY( pGVCpu->hEMT == hNativeThread
1478 && pGVCpu->hNativeThreadR0 == hNativeThread))
1479 { /* likely */ }
1480 else
1481 {
1482 SUPR0Printf("VMMR0EntryFast: Bad thread idCpu=%#x hNativeSelf=%p pGVCpu->hEmt=%p pGVCpu->hNativeThreadR0=%p\n",
1483 idCpu, hNativeThread, pGVCpu->hEMT, pGVCpu->hNativeThreadR0);
1484 return;
1485 }
1486
1487 /*
1488 * SMAP fun.
1489 */
1490 VMM_CHECK_SMAP_SETUP();
1491 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1492
1493 /*
1494 * Perform requested operation.
1495 */
1496 switch (enmOperation)
1497 {
1498 /*
1499 * Run guest code using the available hardware acceleration technology.
1500 */
1501 case VMMR0_DO_HM_RUN:
1502 {
1503 for (;;) /* hlt loop */
1504 {
1505 /*
1506 * Disable ring-3 calls & blocking till we've successfully entered HM.
1507 * Otherwise we sometimes end up blocking at the finall Log4 statement
1508 * in VMXR0Enter, while still in a somewhat inbetween state.
1509 */
1510 VMMRZCallRing3Disable(pGVCpu);
1511
1512 /*
1513 * Disable preemption.
1514 */
1515 Assert(!vmmR0ThreadCtxHookIsEnabled(pGVCpu));
1516 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
1517 RTThreadPreemptDisable(&PreemptState);
1518 pGVCpu->vmmr0.s.pPreemptState = &PreemptState;
1519
1520 /*
1521 * Get the host CPU identifiers, make sure they are valid and that
1522 * we've got a TSC delta for the CPU.
1523 */
1524 RTCPUID idHostCpu;
1525 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
1526 if (RT_LIKELY( iHostCpuSet < RTCPUSET_MAX_CPUS
1527 && SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
1528 {
1529 pGVCpu->iHostCpuSet = iHostCpuSet;
1530 ASMAtomicWriteU32(&pGVCpu->idHostCpu, idHostCpu);
1531
1532 /*
1533 * Update the periodic preemption timer if it's active.
1534 */
1535 if (pGVM->vmm.s.fUsePeriodicPreemptionTimers)
1536 GVMMR0SchedUpdatePeriodicPreemptionTimer(pGVM, pGVCpu->idHostCpu, TMCalcHostTimerFrequency(pGVM, pGVCpu));
1537 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1538
1539#ifdef VMM_R0_TOUCH_FPU
1540 /*
1541 * Make sure we've got the FPU state loaded so and we don't need to clear
1542 * CR0.TS and get out of sync with the host kernel when loading the guest
1543 * FPU state. @ref sec_cpum_fpu (CPUM.cpp) and @bugref{4053}.
1544 */
1545 CPUMR0TouchHostFpu();
1546#endif
1547 int rc;
1548 bool fPreemptRestored = false;
1549 if (!HMR0SuspendPending())
1550 {
1551 /*
1552 * Enable the context switching hook.
1553 */
1554 if (pGVCpu->vmmr0.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1555 {
1556 Assert(!RTThreadCtxHookIsEnabled(pGVCpu->vmmr0.s.hCtxHook));
1557 int rc2 = RTThreadCtxHookEnable(pGVCpu->vmmr0.s.hCtxHook); AssertRC(rc2);
1558 }
1559
1560 /*
1561 * Enter HM context.
1562 */
1563 rc = HMR0Enter(pGVCpu);
1564 if (RT_SUCCESS(rc))
1565 {
1566 VMCPU_SET_STATE(pGVCpu, VMCPUSTATE_STARTED_HM);
1567
1568 /*
1569 * When preemption hooks are in place, enable preemption now that
1570 * we're in HM context.
1571 */
1572 if (vmmR0ThreadCtxHookIsEnabled(pGVCpu))
1573 {
1574 fPreemptRestored = true;
1575 pGVCpu->vmmr0.s.pPreemptState = NULL;
1576 RTThreadPreemptRestore(&PreemptState);
1577 }
1578 VMMRZCallRing3Enable(pGVCpu);
1579
1580 /*
1581 * Setup the longjmp machinery and execute guest code (calls HMR0RunGuestCode).
1582 */
1583 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1584 rc = vmmR0CallRing3SetJmp(&pGVCpu->vmm.s.CallRing3JmpBufR0, HMR0RunGuestCode, pGVM, pGVCpu);
1585 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1586
1587 /*
1588 * Assert sanity on the way out. Using manual assertions code here as normal
1589 * assertions are going to panic the host since we're outside the setjmp/longjmp zone.
1590 */
1591 if (RT_UNLIKELY( VMCPU_GET_STATE(pGVCpu) != VMCPUSTATE_STARTED_HM
1592 && RT_SUCCESS_NP(rc)
1593 && rc != VINF_VMM_CALL_HOST ))
1594 {
1595 pGVM->vmm.s.szRing0AssertMsg1[0] = '\0';
1596 RTStrPrintf(pGVM->vmm.s.szRing0AssertMsg2, sizeof(pGVM->vmm.s.szRing0AssertMsg2),
1597 "Got VMCPU state %d expected %d.\n", VMCPU_GET_STATE(pGVCpu), VMCPUSTATE_STARTED_HM);
1598 rc = VERR_VMM_WRONG_HM_VMCPU_STATE;
1599 }
1600#if 0
1601 /** @todo Get rid of this. HM shouldn't disable the context hook. */
1602 else if (RT_UNLIKELY(vmmR0ThreadCtxHookIsEnabled(pGVCpu)))
1603 {
1604 pGVM->vmm.s.szRing0AssertMsg1[0] = '\0';
1605 RTStrPrintf(pGVM->vmm.s.szRing0AssertMsg2, sizeof(pGVM->vmm.s.szRing0AssertMsg2),
1606 "Thread-context hooks still enabled! VCPU=%p Id=%u rc=%d.\n", pGVCpu, pGVCpu->idCpu, rc);
1607 rc = VERR_VMM_CONTEXT_HOOK_STILL_ENABLED;
1608 }
1609#endif
1610
1611 VMMRZCallRing3Disable(pGVCpu); /* Lazy bird: Simpler just disabling it again... */
1612 VMCPU_SET_STATE(pGVCpu, VMCPUSTATE_STARTED);
1613 }
1614 STAM_COUNTER_INC(&pGVM->vmm.s.StatRunGC);
1615
1616 /*
1617 * Invalidate the host CPU identifiers before we disable the context
1618 * hook / restore preemption.
1619 */
1620 pGVCpu->iHostCpuSet = UINT32_MAX;
1621 ASMAtomicWriteU32(&pGVCpu->idHostCpu, NIL_RTCPUID);
1622
1623 /*
1624 * Disable context hooks. Due to unresolved cleanup issues, we
1625 * cannot leave the hooks enabled when we return to ring-3.
1626 *
1627 * Note! At the moment HM may also have disabled the hook
1628 * when we get here, but the IPRT API handles that.
1629 */
1630 if (pGVCpu->vmmr0.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1631 RTThreadCtxHookDisable(pGVCpu->vmmr0.s.hCtxHook);
1632 }
1633 /*
1634 * The system is about to go into suspend mode; go back to ring 3.
1635 */
1636 else
1637 {
1638 pGVCpu->iHostCpuSet = UINT32_MAX;
1639 ASMAtomicWriteU32(&pGVCpu->idHostCpu, NIL_RTCPUID);
1640 rc = VINF_EM_RAW_INTERRUPT;
1641 }
1642
1643 /** @todo When HM stops messing with the context hook state, we'll disable
1644 * preemption again before the RTThreadCtxHookDisable call. */
1645 if (!fPreemptRestored)
1646 {
1647 pGVCpu->vmmr0.s.pPreemptState = NULL;
1648 RTThreadPreemptRestore(&PreemptState);
1649 }
1650
1651 pGVCpu->vmm.s.iLastGZRc = rc;
1652
1653 /* Fire dtrace probe and collect statistics. */
1654 VBOXVMM_R0_VMM_RETURN_TO_RING3_HM(pGVCpu, CPUMQueryGuestCtxPtr(pGVCpu), rc);
1655#ifdef VBOX_WITH_STATISTICS
1656 vmmR0RecordRC(pGVM, pGVCpu, rc);
1657#endif
1658 VMMRZCallRing3Enable(pGVCpu);
1659
1660 /*
1661 * If this is a halt.
1662 */
1663 if (rc != VINF_EM_HALT)
1664 { /* we're not in a hurry for a HLT, so prefer this path */ }
1665 else
1666 {
1667 pGVCpu->vmm.s.iLastGZRc = rc = vmmR0DoHalt(pGVM, pGVCpu);
1668 if (rc == VINF_SUCCESS)
1669 {
1670 pGVCpu->vmm.s.cR0HaltsSucceeded++;
1671 continue;
1672 }
1673 pGVCpu->vmm.s.cR0HaltsToRing3++;
1674 }
1675 }
1676 /*
1677 * Invalid CPU set index or TSC delta in need of measuring.
1678 */
1679 else
1680 {
1681 pGVCpu->vmmr0.s.pPreemptState = NULL;
1682 pGVCpu->iHostCpuSet = UINT32_MAX;
1683 ASMAtomicWriteU32(&pGVCpu->idHostCpu, NIL_RTCPUID);
1684 RTThreadPreemptRestore(&PreemptState);
1685
1686 VMMRZCallRing3Enable(pGVCpu);
1687
1688 if (iHostCpuSet < RTCPUSET_MAX_CPUS)
1689 {
1690 int rc = SUPR0TscDeltaMeasureBySetIndex(pGVM->pSession, iHostCpuSet, 0 /*fFlags*/,
1691 2 /*cMsWaitRetry*/, 5*RT_MS_1SEC /*cMsWaitThread*/,
1692 0 /*default cTries*/);
1693 if (RT_SUCCESS(rc) || rc == VERR_CPU_OFFLINE)
1694 pGVCpu->vmm.s.iLastGZRc = VINF_EM_RAW_TO_R3;
1695 else
1696 pGVCpu->vmm.s.iLastGZRc = rc;
1697 }
1698 else
1699 pGVCpu->vmm.s.iLastGZRc = VERR_INVALID_CPU_INDEX;
1700 }
1701 break;
1702 } /* halt loop. */
1703 break;
1704 }
1705
1706#ifdef VBOX_WITH_NEM_R0
1707# if defined(RT_ARCH_AMD64) && defined(RT_OS_WINDOWS)
1708 case VMMR0_DO_NEM_RUN:
1709 {
1710 /*
1711 * Setup the longjmp machinery and execute guest code (calls NEMR0RunGuestCode).
1712 */
1713 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1714# ifdef VBOXSTRICTRC_STRICT_ENABLED
1715 int rc = vmmR0CallRing3SetJmp2(&pGVCpu->vmm.s.CallRing3JmpBufR0, (PFNVMMR0SETJMP2)NEMR0RunGuestCode, pGVM, idCpu);
1716# else
1717 int rc = vmmR0CallRing3SetJmp2(&pGVCpu->vmm.s.CallRing3JmpBufR0, NEMR0RunGuestCode, pGVM, idCpu);
1718# endif
1719 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1720 STAM_COUNTER_INC(&pGVM->vmm.s.StatRunGC);
1721
1722 pGVCpu->vmm.s.iLastGZRc = rc;
1723
1724 /*
1725 * Fire dtrace probe and collect statistics.
1726 */
1727 VBOXVMM_R0_VMM_RETURN_TO_RING3_NEM(pGVCpu, CPUMQueryGuestCtxPtr(pGVCpu), rc);
1728# ifdef VBOX_WITH_STATISTICS
1729 vmmR0RecordRC(pGVM, pGVCpu, rc);
1730# endif
1731 break;
1732 }
1733# endif
1734#endif
1735
1736 /*
1737 * For profiling.
1738 */
1739 case VMMR0_DO_NOP:
1740 pGVCpu->vmm.s.iLastGZRc = VINF_SUCCESS;
1741 break;
1742
1743 /*
1744 * Shouldn't happen.
1745 */
1746 default:
1747 AssertMsgFailed(("%#x\n", enmOperation));
1748 pGVCpu->vmm.s.iLastGZRc = VERR_NOT_SUPPORTED;
1749 break;
1750 }
1751 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1752}
1753
1754
1755/**
1756 * Validates a session or VM session argument.
1757 *
1758 * @returns true / false accordingly.
1759 * @param pGVM The global (ring-0) VM structure.
1760 * @param pClaimedSession The session claim to validate.
1761 * @param pSession The session argument.
1762 */
1763DECLINLINE(bool) vmmR0IsValidSession(PGVM pGVM, PSUPDRVSESSION pClaimedSession, PSUPDRVSESSION pSession)
1764{
1765 /* This must be set! */
1766 if (!pSession)
1767 return false;
1768
1769 /* Only one out of the two. */
1770 if (pGVM && pClaimedSession)
1771 return false;
1772 if (pGVM)
1773 pClaimedSession = pGVM->pSession;
1774 return pClaimedSession == pSession;
1775}
1776
1777
1778/**
1779 * VMMR0EntryEx worker function, either called directly or when ever possible
1780 * called thru a longjmp so we can exit safely on failure.
1781 *
1782 * @returns VBox status code.
1783 * @param pGVM The global (ring-0) VM structure.
1784 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
1785 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
1786 * @param enmOperation Which operation to execute.
1787 * @param pReqHdr This points to a SUPVMMR0REQHDR packet. Optional.
1788 * The support driver validates this if it's present.
1789 * @param u64Arg Some simple constant argument.
1790 * @param pSession The session of the caller.
1791 *
1792 * @remarks Assume called with interrupts _enabled_.
1793 */
1794DECL_NO_INLINE(static, int) vmmR0EntryExWorker(PGVM pGVM, VMCPUID idCpu, VMMR0OPERATION enmOperation,
1795 PSUPVMMR0REQHDR pReqHdr, uint64_t u64Arg, PSUPDRVSESSION pSession)
1796{
1797 /*
1798 * Validate pGVM and idCpu for consistency and validity.
1799 */
1800 if (pGVM != NULL)
1801 {
1802 if (RT_LIKELY(((uintptr_t)pGVM & PAGE_OFFSET_MASK) == 0))
1803 { /* likely */ }
1804 else
1805 {
1806 SUPR0Printf("vmmR0EntryExWorker: Invalid pGVM=%p! (op=%d)\n", pGVM, enmOperation);
1807 return VERR_INVALID_POINTER;
1808 }
1809
1810 if (RT_LIKELY(idCpu == NIL_VMCPUID || idCpu < pGVM->cCpus))
1811 { /* likely */ }
1812 else
1813 {
1814 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu %#x (cCpus=%#x)\n", idCpu, pGVM->cCpus);
1815 return VERR_INVALID_PARAMETER;
1816 }
1817
1818 if (RT_LIKELY( pGVM->enmVMState >= VMSTATE_CREATING
1819 && pGVM->enmVMState <= VMSTATE_TERMINATED
1820 && pGVM->pSession == pSession
1821 && pGVM->pSelf == pGVM))
1822 { /* likely */ }
1823 else
1824 {
1825 SUPR0Printf("vmmR0EntryExWorker: Invalid pGVM=%p:{.enmVMState=%d, .cCpus=%#x, .pSession=%p(==%p), .pSelf=%p(==%p)}! (op=%d)\n",
1826 pGVM, pGVM->enmVMState, pGVM->cCpus, pGVM->pSession, pSession, pGVM->pSelf, pGVM, enmOperation);
1827 return VERR_INVALID_POINTER;
1828 }
1829 }
1830 else if (RT_LIKELY(idCpu == NIL_VMCPUID))
1831 { /* likely */ }
1832 else
1833 {
1834 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu=%u\n", idCpu);
1835 return VERR_INVALID_PARAMETER;
1836 }
1837
1838 /*
1839 * SMAP fun.
1840 */
1841 VMM_CHECK_SMAP_SETUP();
1842 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
1843
1844 /*
1845 * Process the request.
1846 */
1847 int rc;
1848 switch (enmOperation)
1849 {
1850 /*
1851 * GVM requests
1852 */
1853 case VMMR0_DO_GVMM_CREATE_VM:
1854 if (pGVM == NULL && u64Arg == 0 && idCpu == NIL_VMCPUID)
1855 rc = GVMMR0CreateVMReq((PGVMMCREATEVMREQ)pReqHdr, pSession);
1856 else
1857 rc = VERR_INVALID_PARAMETER;
1858 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
1859 break;
1860
1861 case VMMR0_DO_GVMM_DESTROY_VM:
1862 if (pReqHdr == NULL && u64Arg == 0)
1863 rc = GVMMR0DestroyVM(pGVM);
1864 else
1865 rc = VERR_INVALID_PARAMETER;
1866 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
1867 break;
1868
1869 case VMMR0_DO_GVMM_REGISTER_VMCPU:
1870 if (pGVM != NULL)
1871 rc = GVMMR0RegisterVCpu(pGVM, idCpu);
1872 else
1873 rc = VERR_INVALID_PARAMETER;
1874 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1875 break;
1876
1877 case VMMR0_DO_GVMM_DEREGISTER_VMCPU:
1878 if (pGVM != NULL)
1879 rc = GVMMR0DeregisterVCpu(pGVM, idCpu);
1880 else
1881 rc = VERR_INVALID_PARAMETER;
1882 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1883 break;
1884
1885 case VMMR0_DO_GVMM_SCHED_HALT:
1886 if (pReqHdr)
1887 return VERR_INVALID_PARAMETER;
1888 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1889 rc = GVMMR0SchedHaltReq(pGVM, idCpu, u64Arg);
1890 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1891 break;
1892
1893 case VMMR0_DO_GVMM_SCHED_WAKE_UP:
1894 if (pReqHdr || u64Arg)
1895 return VERR_INVALID_PARAMETER;
1896 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1897 rc = GVMMR0SchedWakeUp(pGVM, idCpu);
1898 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1899 break;
1900
1901 case VMMR0_DO_GVMM_SCHED_POKE:
1902 if (pReqHdr || u64Arg)
1903 return VERR_INVALID_PARAMETER;
1904 rc = GVMMR0SchedPoke(pGVM, idCpu);
1905 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1906 break;
1907
1908 case VMMR0_DO_GVMM_SCHED_WAKE_UP_AND_POKE_CPUS:
1909 if (u64Arg)
1910 return VERR_INVALID_PARAMETER;
1911 rc = GVMMR0SchedWakeUpAndPokeCpusReq(pGVM, (PGVMMSCHEDWAKEUPANDPOKECPUSREQ)pReqHdr);
1912 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1913 break;
1914
1915 case VMMR0_DO_GVMM_SCHED_POLL:
1916 if (pReqHdr || u64Arg > 1)
1917 return VERR_INVALID_PARAMETER;
1918 rc = GVMMR0SchedPoll(pGVM, idCpu, !!u64Arg);
1919 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1920 break;
1921
1922 case VMMR0_DO_GVMM_QUERY_STATISTICS:
1923 if (u64Arg)
1924 return VERR_INVALID_PARAMETER;
1925 rc = GVMMR0QueryStatisticsReq(pGVM, (PGVMMQUERYSTATISTICSSREQ)pReqHdr, pSession);
1926 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1927 break;
1928
1929 case VMMR0_DO_GVMM_RESET_STATISTICS:
1930 if (u64Arg)
1931 return VERR_INVALID_PARAMETER;
1932 rc = GVMMR0ResetStatisticsReq(pGVM, (PGVMMRESETSTATISTICSSREQ)pReqHdr, pSession);
1933 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1934 break;
1935
1936 /*
1937 * Initialize the R0 part of a VM instance.
1938 */
1939 case VMMR0_DO_VMMR0_INIT:
1940 rc = vmmR0InitVM(pGVM, RT_LODWORD(u64Arg), RT_HIDWORD(u64Arg));
1941 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1942 break;
1943
1944 /*
1945 * Does EMT specific ring-0 init.
1946 */
1947 case VMMR0_DO_VMMR0_INIT_EMT:
1948 rc = vmmR0InitVMEmt(pGVM, idCpu);
1949 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1950 break;
1951
1952 /*
1953 * Terminate the R0 part of a VM instance.
1954 */
1955 case VMMR0_DO_VMMR0_TERM:
1956 rc = VMMR0TermVM(pGVM, 0 /*idCpu*/);
1957 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1958 break;
1959
1960 /*
1961 * Update release or debug logger instances.
1962 */
1963 case VMMR0_DO_VMMR0_UPDATE_LOGGERS:
1964 if (idCpu == NIL_VMCPUID)
1965 return VERR_INVALID_CPU_ID;
1966 if (u64Arg < VMMLOGGER_IDX_MAX && pReqHdr != NULL)
1967 rc = vmmR0UpdateLoggers(pGVM, idCpu /*idCpu*/, (PVMMR0UPDATELOGGERSREQ)pReqHdr, (size_t)u64Arg);
1968 else
1969 return VERR_INVALID_PARAMETER;
1970 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1971 break;
1972
1973 /*
1974 * Log flusher thread.
1975 */
1976 case VMMR0_DO_VMMR0_LOG_FLUSHER:
1977 if (idCpu != NIL_VMCPUID)
1978 return VERR_INVALID_CPU_ID;
1979 if (pReqHdr == NULL)
1980 rc = vmmR0LogFlusher(pGVM);
1981 else
1982 return VERR_INVALID_PARAMETER;
1983 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1984 break;
1985
1986 /*
1987 * Wait for the flush to finish with all the buffers for the given logger.
1988 */
1989 case VMMR0_DO_VMMR0_LOG_WAIT_FLUSHED:
1990 if (idCpu == NIL_VMCPUID)
1991 return VERR_INVALID_CPU_ID;
1992 if (u64Arg < VMMLOGGER_IDX_MAX && pReqHdr == NULL)
1993 rc = vmmR0LogWaitFlushed(pGVM, idCpu /*idCpu*/, (size_t)u64Arg);
1994 else
1995 return VERR_INVALID_PARAMETER;
1996 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1997 break;
1998
1999 /*
2000 * Attempt to enable hm mode and check the current setting.
2001 */
2002 case VMMR0_DO_HM_ENABLE:
2003 rc = HMR0EnableAllCpus(pGVM);
2004 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2005 break;
2006
2007 /*
2008 * Setup the hardware accelerated session.
2009 */
2010 case VMMR0_DO_HM_SETUP_VM:
2011 rc = HMR0SetupVM(pGVM);
2012 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2013 break;
2014
2015 /*
2016 * PGM wrappers.
2017 */
2018 case VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES:
2019 if (idCpu == NIL_VMCPUID)
2020 return VERR_INVALID_CPU_ID;
2021 rc = PGMR0PhysAllocateHandyPages(pGVM, idCpu);
2022 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2023 break;
2024
2025 case VMMR0_DO_PGM_FLUSH_HANDY_PAGES:
2026 if (idCpu == NIL_VMCPUID)
2027 return VERR_INVALID_CPU_ID;
2028 rc = PGMR0PhysFlushHandyPages(pGVM, idCpu);
2029 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2030 break;
2031
2032 case VMMR0_DO_PGM_ALLOCATE_LARGE_HANDY_PAGE:
2033 if (idCpu == NIL_VMCPUID)
2034 return VERR_INVALID_CPU_ID;
2035 rc = PGMR0PhysAllocateLargeHandyPage(pGVM, idCpu);
2036 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2037 break;
2038
2039 case VMMR0_DO_PGM_PHYS_SETUP_IOMMU:
2040 if (idCpu != 0)
2041 return VERR_INVALID_CPU_ID;
2042 rc = PGMR0PhysSetupIoMmu(pGVM);
2043 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2044 break;
2045
2046 case VMMR0_DO_PGM_POOL_GROW:
2047 if (idCpu == NIL_VMCPUID)
2048 return VERR_INVALID_CPU_ID;
2049 rc = PGMR0PoolGrow(pGVM);
2050 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2051 break;
2052
2053 /*
2054 * GMM wrappers.
2055 */
2056 case VMMR0_DO_GMM_INITIAL_RESERVATION:
2057 if (u64Arg)
2058 return VERR_INVALID_PARAMETER;
2059 rc = GMMR0InitialReservationReq(pGVM, idCpu, (PGMMINITIALRESERVATIONREQ)pReqHdr);
2060 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2061 break;
2062
2063 case VMMR0_DO_GMM_UPDATE_RESERVATION:
2064 if (u64Arg)
2065 return VERR_INVALID_PARAMETER;
2066 rc = GMMR0UpdateReservationReq(pGVM, idCpu, (PGMMUPDATERESERVATIONREQ)pReqHdr);
2067 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2068 break;
2069
2070 case VMMR0_DO_GMM_ALLOCATE_PAGES:
2071 if (u64Arg)
2072 return VERR_INVALID_PARAMETER;
2073 rc = GMMR0AllocatePagesReq(pGVM, idCpu, (PGMMALLOCATEPAGESREQ)pReqHdr);
2074 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2075 break;
2076
2077 case VMMR0_DO_GMM_FREE_PAGES:
2078 if (u64Arg)
2079 return VERR_INVALID_PARAMETER;
2080 rc = GMMR0FreePagesReq(pGVM, idCpu, (PGMMFREEPAGESREQ)pReqHdr);
2081 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2082 break;
2083
2084 case VMMR0_DO_GMM_FREE_LARGE_PAGE:
2085 if (u64Arg)
2086 return VERR_INVALID_PARAMETER;
2087 rc = GMMR0FreeLargePageReq(pGVM, idCpu, (PGMMFREELARGEPAGEREQ)pReqHdr);
2088 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2089 break;
2090
2091 case VMMR0_DO_GMM_QUERY_HYPERVISOR_MEM_STATS:
2092 if (u64Arg)
2093 return VERR_INVALID_PARAMETER;
2094 rc = GMMR0QueryHypervisorMemoryStatsReq((PGMMMEMSTATSREQ)pReqHdr);
2095 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2096 break;
2097
2098 case VMMR0_DO_GMM_QUERY_MEM_STATS:
2099 if (idCpu == NIL_VMCPUID)
2100 return VERR_INVALID_CPU_ID;
2101 if (u64Arg)
2102 return VERR_INVALID_PARAMETER;
2103 rc = GMMR0QueryMemoryStatsReq(pGVM, idCpu, (PGMMMEMSTATSREQ)pReqHdr);
2104 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2105 break;
2106
2107 case VMMR0_DO_GMM_BALLOONED_PAGES:
2108 if (u64Arg)
2109 return VERR_INVALID_PARAMETER;
2110 rc = GMMR0BalloonedPagesReq(pGVM, idCpu, (PGMMBALLOONEDPAGESREQ)pReqHdr);
2111 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2112 break;
2113
2114 case VMMR0_DO_GMM_MAP_UNMAP_CHUNK:
2115 if (u64Arg)
2116 return VERR_INVALID_PARAMETER;
2117 rc = GMMR0MapUnmapChunkReq(pGVM, (PGMMMAPUNMAPCHUNKREQ)pReqHdr);
2118 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2119 break;
2120
2121 case VMMR0_DO_GMM_SEED_CHUNK:
2122 if (pReqHdr)
2123 return VERR_INVALID_PARAMETER;
2124 rc = GMMR0SeedChunk(pGVM, idCpu, (RTR3PTR)u64Arg);
2125 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2126 break;
2127
2128 case VMMR0_DO_GMM_REGISTER_SHARED_MODULE:
2129 if (idCpu == NIL_VMCPUID)
2130 return VERR_INVALID_CPU_ID;
2131 if (u64Arg)
2132 return VERR_INVALID_PARAMETER;
2133 rc = GMMR0RegisterSharedModuleReq(pGVM, idCpu, (PGMMREGISTERSHAREDMODULEREQ)pReqHdr);
2134 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2135 break;
2136
2137 case VMMR0_DO_GMM_UNREGISTER_SHARED_MODULE:
2138 if (idCpu == NIL_VMCPUID)
2139 return VERR_INVALID_CPU_ID;
2140 if (u64Arg)
2141 return VERR_INVALID_PARAMETER;
2142 rc = GMMR0UnregisterSharedModuleReq(pGVM, idCpu, (PGMMUNREGISTERSHAREDMODULEREQ)pReqHdr);
2143 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2144 break;
2145
2146 case VMMR0_DO_GMM_RESET_SHARED_MODULES:
2147 if (idCpu == NIL_VMCPUID)
2148 return VERR_INVALID_CPU_ID;
2149 if ( u64Arg
2150 || pReqHdr)
2151 return VERR_INVALID_PARAMETER;
2152 rc = GMMR0ResetSharedModules(pGVM, idCpu);
2153 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2154 break;
2155
2156#ifdef VBOX_WITH_PAGE_SHARING
2157 case VMMR0_DO_GMM_CHECK_SHARED_MODULES:
2158 {
2159 if (idCpu == NIL_VMCPUID)
2160 return VERR_INVALID_CPU_ID;
2161 if ( u64Arg
2162 || pReqHdr)
2163 return VERR_INVALID_PARAMETER;
2164 rc = GMMR0CheckSharedModules(pGVM, idCpu);
2165 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2166 break;
2167 }
2168#endif
2169
2170#if defined(VBOX_STRICT) && HC_ARCH_BITS == 64
2171 case VMMR0_DO_GMM_FIND_DUPLICATE_PAGE:
2172 if (u64Arg)
2173 return VERR_INVALID_PARAMETER;
2174 rc = GMMR0FindDuplicatePageReq(pGVM, (PGMMFINDDUPLICATEPAGEREQ)pReqHdr);
2175 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2176 break;
2177#endif
2178
2179 case VMMR0_DO_GMM_QUERY_STATISTICS:
2180 if (u64Arg)
2181 return VERR_INVALID_PARAMETER;
2182 rc = GMMR0QueryStatisticsReq(pGVM, (PGMMQUERYSTATISTICSSREQ)pReqHdr);
2183 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2184 break;
2185
2186 case VMMR0_DO_GMM_RESET_STATISTICS:
2187 if (u64Arg)
2188 return VERR_INVALID_PARAMETER;
2189 rc = GMMR0ResetStatisticsReq(pGVM, (PGMMRESETSTATISTICSSREQ)pReqHdr);
2190 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2191 break;
2192
2193 /*
2194 * A quick GCFGM mock-up.
2195 */
2196 /** @todo GCFGM with proper access control, ring-3 management interface and all that. */
2197 case VMMR0_DO_GCFGM_SET_VALUE:
2198 case VMMR0_DO_GCFGM_QUERY_VALUE:
2199 {
2200 if (pGVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
2201 return VERR_INVALID_PARAMETER;
2202 PGCFGMVALUEREQ pReq = (PGCFGMVALUEREQ)pReqHdr;
2203 if (pReq->Hdr.cbReq != sizeof(*pReq))
2204 return VERR_INVALID_PARAMETER;
2205 if (enmOperation == VMMR0_DO_GCFGM_SET_VALUE)
2206 {
2207 rc = GVMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
2208 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
2209 // rc = GMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
2210 }
2211 else
2212 {
2213 rc = GVMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
2214 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
2215 // rc = GMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
2216 }
2217 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2218 break;
2219 }
2220
2221 /*
2222 * PDM Wrappers.
2223 */
2224 case VMMR0_DO_PDM_DRIVER_CALL_REQ_HANDLER:
2225 {
2226 if (!pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
2227 return VERR_INVALID_PARAMETER;
2228 rc = PDMR0DriverCallReqHandler(pGVM, (PPDMDRIVERCALLREQHANDLERREQ)pReqHdr);
2229 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2230 break;
2231 }
2232
2233 case VMMR0_DO_PDM_DEVICE_CREATE:
2234 {
2235 if (!pReqHdr || u64Arg || idCpu != 0)
2236 return VERR_INVALID_PARAMETER;
2237 rc = PDMR0DeviceCreateReqHandler(pGVM, (PPDMDEVICECREATEREQ)pReqHdr);
2238 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2239 break;
2240 }
2241
2242 case VMMR0_DO_PDM_DEVICE_GEN_CALL:
2243 {
2244 if (!pReqHdr || u64Arg)
2245 return VERR_INVALID_PARAMETER;
2246 rc = PDMR0DeviceGenCallReqHandler(pGVM, (PPDMDEVICEGENCALLREQ)pReqHdr, idCpu);
2247 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2248 break;
2249 }
2250
2251 /** @todo Remove the once all devices has been converted to new style! @bugref{9218} */
2252 case VMMR0_DO_PDM_DEVICE_COMPAT_SET_CRITSECT:
2253 {
2254 if (!pReqHdr || u64Arg || idCpu != 0)
2255 return VERR_INVALID_PARAMETER;
2256 rc = PDMR0DeviceCompatSetCritSectReqHandler(pGVM, (PPDMDEVICECOMPATSETCRITSECTREQ)pReqHdr);
2257 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2258 break;
2259 }
2260
2261 /*
2262 * Requests to the internal networking service.
2263 */
2264 case VMMR0_DO_INTNET_OPEN:
2265 {
2266 PINTNETOPENREQ pReq = (PINTNETOPENREQ)pReqHdr;
2267 if (u64Arg || !pReq || !vmmR0IsValidSession(pGVM, pReq->pSession, pSession) || idCpu != NIL_VMCPUID)
2268 return VERR_INVALID_PARAMETER;
2269 rc = IntNetR0OpenReq(pSession, pReq);
2270 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2271 break;
2272 }
2273
2274 case VMMR0_DO_INTNET_IF_CLOSE:
2275 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFCLOSEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2276 return VERR_INVALID_PARAMETER;
2277 rc = IntNetR0IfCloseReq(pSession, (PINTNETIFCLOSEREQ)pReqHdr);
2278 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2279 break;
2280
2281
2282 case VMMR0_DO_INTNET_IF_GET_BUFFER_PTRS:
2283 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFGETBUFFERPTRSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2284 return VERR_INVALID_PARAMETER;
2285 rc = IntNetR0IfGetBufferPtrsReq(pSession, (PINTNETIFGETBUFFERPTRSREQ)pReqHdr);
2286 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2287 break;
2288
2289 case VMMR0_DO_INTNET_IF_SET_PROMISCUOUS_MODE:
2290 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2291 return VERR_INVALID_PARAMETER;
2292 rc = IntNetR0IfSetPromiscuousModeReq(pSession, (PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr);
2293 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2294 break;
2295
2296 case VMMR0_DO_INTNET_IF_SET_MAC_ADDRESS:
2297 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFSETMACADDRESSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2298 return VERR_INVALID_PARAMETER;
2299 rc = IntNetR0IfSetMacAddressReq(pSession, (PINTNETIFSETMACADDRESSREQ)pReqHdr);
2300 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2301 break;
2302
2303 case VMMR0_DO_INTNET_IF_SET_ACTIVE:
2304 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFSETACTIVEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2305 return VERR_INVALID_PARAMETER;
2306 rc = IntNetR0IfSetActiveReq(pSession, (PINTNETIFSETACTIVEREQ)pReqHdr);
2307 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2308 break;
2309
2310 case VMMR0_DO_INTNET_IF_SEND:
2311 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2312 return VERR_INVALID_PARAMETER;
2313 rc = IntNetR0IfSendReq(pSession, (PINTNETIFSENDREQ)pReqHdr);
2314 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2315 break;
2316
2317 case VMMR0_DO_INTNET_IF_WAIT:
2318 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2319 return VERR_INVALID_PARAMETER;
2320 rc = IntNetR0IfWaitReq(pSession, (PINTNETIFWAITREQ)pReqHdr);
2321 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2322 break;
2323
2324 case VMMR0_DO_INTNET_IF_ABORT_WAIT:
2325 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2326 return VERR_INVALID_PARAMETER;
2327 rc = IntNetR0IfAbortWaitReq(pSession, (PINTNETIFABORTWAITREQ)pReqHdr);
2328 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2329 break;
2330
2331#if 0 //def VBOX_WITH_PCI_PASSTHROUGH
2332 /*
2333 * Requests to host PCI driver service.
2334 */
2335 case VMMR0_DO_PCIRAW_REQ:
2336 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PPCIRAWSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2337 return VERR_INVALID_PARAMETER;
2338 rc = PciRawR0ProcessReq(pGVM, pSession, (PPCIRAWSENDREQ)pReqHdr);
2339 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2340 break;
2341#endif
2342
2343 /*
2344 * NEM requests.
2345 */
2346#ifdef VBOX_WITH_NEM_R0
2347# if defined(RT_ARCH_AMD64) && defined(RT_OS_WINDOWS)
2348 case VMMR0_DO_NEM_INIT_VM:
2349 if (u64Arg || pReqHdr || idCpu != 0)
2350 return VERR_INVALID_PARAMETER;
2351 rc = NEMR0InitVM(pGVM);
2352 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2353 break;
2354
2355 case VMMR0_DO_NEM_INIT_VM_PART_2:
2356 if (u64Arg || pReqHdr || idCpu != 0)
2357 return VERR_INVALID_PARAMETER;
2358 rc = NEMR0InitVMPart2(pGVM);
2359 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2360 break;
2361
2362 case VMMR0_DO_NEM_MAP_PAGES:
2363 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2364 return VERR_INVALID_PARAMETER;
2365 rc = NEMR0MapPages(pGVM, idCpu);
2366 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2367 break;
2368
2369 case VMMR0_DO_NEM_UNMAP_PAGES:
2370 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2371 return VERR_INVALID_PARAMETER;
2372 rc = NEMR0UnmapPages(pGVM, idCpu);
2373 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2374 break;
2375
2376 case VMMR0_DO_NEM_EXPORT_STATE:
2377 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2378 return VERR_INVALID_PARAMETER;
2379 rc = NEMR0ExportState(pGVM, idCpu);
2380 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2381 break;
2382
2383 case VMMR0_DO_NEM_IMPORT_STATE:
2384 if (pReqHdr || idCpu == NIL_VMCPUID)
2385 return VERR_INVALID_PARAMETER;
2386 rc = NEMR0ImportState(pGVM, idCpu, u64Arg);
2387 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2388 break;
2389
2390 case VMMR0_DO_NEM_QUERY_CPU_TICK:
2391 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2392 return VERR_INVALID_PARAMETER;
2393 rc = NEMR0QueryCpuTick(pGVM, idCpu);
2394 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2395 break;
2396
2397 case VMMR0_DO_NEM_RESUME_CPU_TICK_ON_ALL:
2398 if (pReqHdr || idCpu == NIL_VMCPUID)
2399 return VERR_INVALID_PARAMETER;
2400 rc = NEMR0ResumeCpuTickOnAll(pGVM, idCpu, u64Arg);
2401 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2402 break;
2403
2404 case VMMR0_DO_NEM_UPDATE_STATISTICS:
2405 if (u64Arg || pReqHdr)
2406 return VERR_INVALID_PARAMETER;
2407 rc = NEMR0UpdateStatistics(pGVM, idCpu);
2408 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2409 break;
2410
2411# if 1 && defined(DEBUG_bird)
2412 case VMMR0_DO_NEM_EXPERIMENT:
2413 if (pReqHdr)
2414 return VERR_INVALID_PARAMETER;
2415 rc = NEMR0DoExperiment(pGVM, idCpu, u64Arg);
2416 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2417 break;
2418# endif
2419# endif
2420#endif
2421
2422 /*
2423 * IOM requests.
2424 */
2425 case VMMR0_DO_IOM_GROW_IO_PORTS:
2426 {
2427 if (pReqHdr || idCpu != 0)
2428 return VERR_INVALID_PARAMETER;
2429 rc = IOMR0IoPortGrowRegistrationTables(pGVM, u64Arg);
2430 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2431 break;
2432 }
2433
2434 case VMMR0_DO_IOM_GROW_IO_PORT_STATS:
2435 {
2436 if (pReqHdr || idCpu != 0)
2437 return VERR_INVALID_PARAMETER;
2438 rc = IOMR0IoPortGrowStatisticsTable(pGVM, u64Arg);
2439 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2440 break;
2441 }
2442
2443 case VMMR0_DO_IOM_GROW_MMIO_REGS:
2444 {
2445 if (pReqHdr || idCpu != 0)
2446 return VERR_INVALID_PARAMETER;
2447 rc = IOMR0MmioGrowRegistrationTables(pGVM, u64Arg);
2448 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2449 break;
2450 }
2451
2452 case VMMR0_DO_IOM_GROW_MMIO_STATS:
2453 {
2454 if (pReqHdr || idCpu != 0)
2455 return VERR_INVALID_PARAMETER;
2456 rc = IOMR0MmioGrowStatisticsTable(pGVM, u64Arg);
2457 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2458 break;
2459 }
2460
2461 case VMMR0_DO_IOM_SYNC_STATS_INDICES:
2462 {
2463 if (pReqHdr || idCpu != 0)
2464 return VERR_INVALID_PARAMETER;
2465 rc = IOMR0IoPortSyncStatisticsIndices(pGVM);
2466 if (RT_SUCCESS(rc))
2467 rc = IOMR0MmioSyncStatisticsIndices(pGVM);
2468 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2469 break;
2470 }
2471
2472 /*
2473 * DBGF requests.
2474 */
2475#ifdef VBOX_WITH_DBGF_TRACING
2476 case VMMR0_DO_DBGF_TRACER_CREATE:
2477 {
2478 if (!pReqHdr || u64Arg || idCpu != 0)
2479 return VERR_INVALID_PARAMETER;
2480 rc = DBGFR0TracerCreateReqHandler(pGVM, (PDBGFTRACERCREATEREQ)pReqHdr);
2481 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2482 break;
2483 }
2484
2485 case VMMR0_DO_DBGF_TRACER_CALL_REQ_HANDLER:
2486 {
2487 if (!pReqHdr || u64Arg)
2488 return VERR_INVALID_PARAMETER;
2489# if 0 /** @todo */
2490 rc = DBGFR0TracerGenCallReqHandler(pGVM, (PDBGFTRACERGENCALLREQ)pReqHdr, idCpu);
2491# else
2492 rc = VERR_NOT_IMPLEMENTED;
2493# endif
2494 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2495 break;
2496 }
2497#endif
2498
2499 case VMMR0_DO_DBGF_BP_INIT:
2500 {
2501 if (!pReqHdr || u64Arg || idCpu != 0)
2502 return VERR_INVALID_PARAMETER;
2503 rc = DBGFR0BpInitReqHandler(pGVM, (PDBGFBPINITREQ)pReqHdr);
2504 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2505 break;
2506 }
2507
2508 case VMMR0_DO_DBGF_BP_CHUNK_ALLOC:
2509 {
2510 if (!pReqHdr || u64Arg || idCpu != 0)
2511 return VERR_INVALID_PARAMETER;
2512 rc = DBGFR0BpChunkAllocReqHandler(pGVM, (PDBGFBPCHUNKALLOCREQ)pReqHdr);
2513 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2514 break;
2515 }
2516
2517 case VMMR0_DO_DBGF_BP_L2_TBL_CHUNK_ALLOC:
2518 {
2519 if (!pReqHdr || u64Arg || idCpu != 0)
2520 return VERR_INVALID_PARAMETER;
2521 rc = DBGFR0BpL2TblChunkAllocReqHandler(pGVM, (PDBGFBPL2TBLCHUNKALLOCREQ)pReqHdr);
2522 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2523 break;
2524 }
2525
2526 case VMMR0_DO_DBGF_BP_OWNER_INIT:
2527 {
2528 if (!pReqHdr || u64Arg || idCpu != 0)
2529 return VERR_INVALID_PARAMETER;
2530 rc = DBGFR0BpOwnerInitReqHandler(pGVM, (PDBGFBPOWNERINITREQ)pReqHdr);
2531 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2532 break;
2533 }
2534
2535 case VMMR0_DO_DBGF_BP_PORTIO_INIT:
2536 {
2537 if (!pReqHdr || u64Arg || idCpu != 0)
2538 return VERR_INVALID_PARAMETER;
2539 rc = DBGFR0BpPortIoInitReqHandler(pGVM, (PDBGFBPINITREQ)pReqHdr);
2540 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2541 break;
2542 }
2543
2544
2545 /*
2546 * TM requests.
2547 */
2548 case VMMR0_DO_TM_GROW_TIMER_QUEUE:
2549 {
2550 if (pReqHdr || idCpu == NIL_VMCPUID)
2551 return VERR_INVALID_PARAMETER;
2552 rc = TMR0TimerQueueGrow(pGVM, RT_HI_U32(u64Arg), RT_LO_U32(u64Arg));
2553 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2554 break;
2555 }
2556
2557 /*
2558 * For profiling.
2559 */
2560 case VMMR0_DO_NOP:
2561 case VMMR0_DO_SLOW_NOP:
2562 return VINF_SUCCESS;
2563
2564 /*
2565 * For testing Ring-0 APIs invoked in this environment.
2566 */
2567 case VMMR0_DO_TESTS:
2568 /** @todo make new test */
2569 return VINF_SUCCESS;
2570
2571 default:
2572 /*
2573 * We're returning VERR_NOT_SUPPORT here so we've got something else
2574 * than -1 which the interrupt gate glue code might return.
2575 */
2576 Log(("operation %#x is not supported\n", enmOperation));
2577 return VERR_NOT_SUPPORTED;
2578 }
2579 return rc;
2580}
2581
2582
2583/**
2584 * This is just a longjmp wrapper function for VMMR0EntryEx calls.
2585 *
2586 * @returns VBox status code.
2587 * @param pvArgs The argument package
2588 */
2589static DECLCALLBACK(int) vmmR0EntryExWrapper(void *pvArgs)
2590{
2591 PGVMCPU pGVCpu = (PGVMCPU)pvArgs;
2592 return vmmR0EntryExWorker(pGVCpu->vmmr0.s.pGVM,
2593 pGVCpu->vmmr0.s.idCpu,
2594 pGVCpu->vmmr0.s.enmOperation,
2595 pGVCpu->vmmr0.s.pReq,
2596 pGVCpu->vmmr0.s.u64Arg,
2597 pGVCpu->vmmr0.s.pSession);
2598}
2599
2600
2601/**
2602 * The Ring 0 entry point, called by the support library (SUP).
2603 *
2604 * @returns VBox status code.
2605 * @param pGVM The global (ring-0) VM structure.
2606 * @param pVM The cross context VM structure.
2607 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
2608 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
2609 * @param enmOperation Which operation to execute.
2610 * @param pReq Pointer to the SUPVMMR0REQHDR packet. Optional.
2611 * @param u64Arg Some simple constant argument.
2612 * @param pSession The session of the caller.
2613 * @remarks Assume called with interrupts _enabled_.
2614 */
2615VMMR0DECL(int) VMMR0EntryEx(PGVM pGVM, PVMCC pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation,
2616 PSUPVMMR0REQHDR pReq, uint64_t u64Arg, PSUPDRVSESSION pSession)
2617{
2618 /*
2619 * Requests that should only happen on the EMT thread will be
2620 * wrapped in a setjmp so we can assert without causing trouble.
2621 */
2622 if ( pVM != NULL
2623 && pGVM != NULL
2624 && pVM == pGVM /** @todo drop pVM or pGVM */
2625 && idCpu < pGVM->cCpus
2626 && pGVM->pSession == pSession
2627 && pGVM->pSelf == pVM)
2628 {
2629 switch (enmOperation)
2630 {
2631 /* These might/will be called before VMMR3Init. */
2632 case VMMR0_DO_GMM_INITIAL_RESERVATION:
2633 case VMMR0_DO_GMM_UPDATE_RESERVATION:
2634 case VMMR0_DO_GMM_ALLOCATE_PAGES:
2635 case VMMR0_DO_GMM_FREE_PAGES:
2636 case VMMR0_DO_GMM_BALLOONED_PAGES:
2637 /* On the mac we might not have a valid jmp buf, so check these as well. */
2638 case VMMR0_DO_VMMR0_INIT:
2639 case VMMR0_DO_VMMR0_TERM:
2640
2641 case VMMR0_DO_PDM_DEVICE_CREATE:
2642 case VMMR0_DO_PDM_DEVICE_GEN_CALL:
2643 case VMMR0_DO_IOM_GROW_IO_PORTS:
2644 case VMMR0_DO_IOM_GROW_IO_PORT_STATS:
2645 case VMMR0_DO_DBGF_BP_INIT:
2646 case VMMR0_DO_DBGF_BP_CHUNK_ALLOC:
2647 case VMMR0_DO_DBGF_BP_L2_TBL_CHUNK_ALLOC:
2648 {
2649 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2650 RTNATIVETHREAD hNativeThread = RTThreadNativeSelf();
2651 if (RT_LIKELY( pGVCpu->hEMT == hNativeThread
2652 && pGVCpu->hNativeThreadR0 == hNativeThread))
2653 {
2654 if (!pGVCpu->vmm.s.CallRing3JmpBufR0.pvSavedStack)
2655 break;
2656
2657 pGVCpu->vmmr0.s.pGVM = pGVM;
2658 pGVCpu->vmmr0.s.idCpu = idCpu;
2659 pGVCpu->vmmr0.s.enmOperation = enmOperation;
2660 pGVCpu->vmmr0.s.pReq = pReq;
2661 pGVCpu->vmmr0.s.u64Arg = u64Arg;
2662 pGVCpu->vmmr0.s.pSession = pSession;
2663 return vmmR0CallRing3SetJmpEx(&pGVCpu->vmm.s.CallRing3JmpBufR0, vmmR0EntryExWrapper, pGVCpu,
2664 ((uintptr_t)u64Arg << 16) | (uintptr_t)enmOperation);
2665 }
2666 return VERR_VM_THREAD_NOT_EMT;
2667 }
2668
2669 default:
2670 case VMMR0_DO_PGM_POOL_GROW:
2671 break;
2672 }
2673 }
2674 return vmmR0EntryExWorker(pGVM, idCpu, enmOperation, pReq, u64Arg, pSession);
2675}
2676
2677
2678/*********************************************************************************************************************************
2679* EMT Blocking *
2680*********************************************************************************************************************************/
2681
2682/**
2683 * Checks whether we've armed the ring-0 long jump machinery.
2684 *
2685 * @returns @c true / @c false
2686 * @param pVCpu The cross context virtual CPU structure.
2687 * @thread EMT
2688 * @sa VMMIsLongJumpArmed
2689 */
2690VMMR0_INT_DECL(bool) VMMR0IsLongJumpArmed(PVMCPUCC pVCpu)
2691{
2692#ifdef RT_ARCH_X86
2693 return pVCpu->vmm.s.CallRing3JmpBufR0.eip
2694 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2695#else
2696 return pVCpu->vmm.s.CallRing3JmpBufR0.rip
2697 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2698#endif
2699}
2700
2701
2702/**
2703 * Checks whether we've done a ring-3 long jump.
2704 *
2705 * @returns @c true / @c false
2706 * @param pVCpu The cross context virtual CPU structure.
2707 * @thread EMT
2708 */
2709VMMR0_INT_DECL(bool) VMMR0IsInRing3LongJump(PVMCPUCC pVCpu)
2710{
2711 return pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2712}
2713
2714
2715/**
2716 * Locking helper that deals with HM context and checks if the thread can block.
2717 *
2718 * @returns VINF_SUCCESS if we can block. Returns @a rcBusy or
2719 * VERR_VMM_CANNOT_BLOCK if not able to block.
2720 * @param pVCpu The cross context virtual CPU structure of the calling
2721 * thread.
2722 * @param rcBusy What to return in case of a blocking problem. Will IPE
2723 * if VINF_SUCCESS and we cannot block.
2724 * @param pszCaller The caller (for logging problems).
2725 * @param pvLock The lock address (for logging problems).
2726 * @param pCtx Where to return context info for the resume call.
2727 * @thread EMT(pVCpu)
2728 */
2729VMMR0_INT_DECL(int) VMMR0EmtPrepareToBlock(PVMCPUCC pVCpu, int rcBusy, const char *pszCaller, void *pvLock,
2730 PVMMR0EMTBLOCKCTX pCtx)
2731{
2732 const char *pszMsg;
2733
2734 /*
2735 * Check that we are allowed to block.
2736 */
2737 if (RT_LIKELY(VMMRZCallRing3IsEnabled(pVCpu)))
2738 {
2739 /*
2740 * Are we in HM context and w/o a context hook? If so work the context hook.
2741 */
2742 if (pVCpu->idHostCpu != NIL_RTCPUID)
2743 {
2744 Assert(pVCpu->iHostCpuSet != UINT32_MAX);
2745
2746 if (pVCpu->vmmr0.s.hCtxHook == NIL_RTTHREADCTXHOOK)
2747 {
2748 vmmR0ThreadCtxCallback(RTTHREADCTXEVENT_OUT, pVCpu);
2749 if (pVCpu->vmmr0.s.pPreemptState)
2750 RTThreadPreemptRestore(pVCpu->vmmr0.s.pPreemptState);
2751
2752 pCtx->uMagic = VMMR0EMTBLOCKCTX_MAGIC;
2753 pCtx->fWasInHmContext = true;
2754 return VINF_SUCCESS;
2755 }
2756 }
2757
2758 if (RT_LIKELY(!pVCpu->vmmr0.s.pPreemptState))
2759 {
2760 /*
2761 * Not in HM context or we've got hooks, so just check that preemption
2762 * is enabled.
2763 */
2764 if (RT_LIKELY(RTThreadPreemptIsEnabled(NIL_RTTHREAD)))
2765 {
2766 pCtx->uMagic = VMMR0EMTBLOCKCTX_MAGIC;
2767 pCtx->fWasInHmContext = false;
2768 return VINF_SUCCESS;
2769 }
2770 pszMsg = "Preemption is disabled!";
2771 }
2772 else
2773 pszMsg = "Preemption state w/o HM state!";
2774 }
2775 else
2776 pszMsg = "Ring-3 calls are disabled!";
2777
2778 static uint32_t volatile s_cWarnings = 0;
2779 if (++s_cWarnings < 50)
2780 SUPR0Printf("VMMR0EmtPrepareToBlock: %s pvLock=%p pszCaller=%s rcBusy=%p\n", pszMsg, pvLock, pszCaller, rcBusy);
2781 pCtx->uMagic = VMMR0EMTBLOCKCTX_MAGIC_DEAD;
2782 pCtx->fWasInHmContext = false;
2783 return rcBusy != VINF_SUCCESS ? rcBusy : VERR_VMM_CANNOT_BLOCK;
2784}
2785
2786
2787/**
2788 * Counterpart to VMMR0EmtPrepareToBlock.
2789 *
2790 * @param pVCpu The cross context virtual CPU structure of the calling
2791 * thread.
2792 * @param pCtx The context structure used with VMMR0EmtPrepareToBlock.
2793 * @thread EMT(pVCpu)
2794 */
2795VMMR0_INT_DECL(void) VMMR0EmtResumeAfterBlocking(PVMCPUCC pVCpu, PVMMR0EMTBLOCKCTX pCtx)
2796{
2797 AssertReturnVoid(pCtx->uMagic == VMMR0EMTBLOCKCTX_MAGIC);
2798 if (pCtx->fWasInHmContext)
2799 {
2800 if (pVCpu->vmmr0.s.pPreemptState)
2801 RTThreadPreemptDisable(pVCpu->vmmr0.s.pPreemptState);
2802
2803 pCtx->fWasInHmContext = false;
2804 vmmR0ThreadCtxCallback(RTTHREADCTXEVENT_IN, pVCpu);
2805 }
2806 pCtx->uMagic = VMMR0EMTBLOCKCTX_MAGIC_DEAD;
2807}
2808
2809/** @name VMMR0EMTWAIT_F_XXX - flags for VMMR0EmtWaitEventInner and friends.
2810 * @{ */
2811/** Try suppress VERR_INTERRUPTED for a little while (~10 sec). */
2812#define VMMR0EMTWAIT_F_TRY_SUPPRESS_INTERRUPTED RT_BIT_32(0)
2813/** @} */
2814
2815/**
2816 * Helper for waiting on an RTSEMEVENT, caller did VMMR0EmtPrepareToBlock.
2817 *
2818 * @returns
2819 * @retval VERR_THREAD_IS_TERMINATING
2820 * @retval VERR_TIMEOUT if we ended up waiting too long, either according to
2821 * @a cMsTimeout or to maximum wait values.
2822 *
2823 * @param pGVCpu The ring-0 virtual CPU structure.
2824 * @param fFlags VMMR0EMTWAIT_F_XXX.
2825 * @param hEvent The event to wait on.
2826 * @param cMsTimeout The timeout or RT_INDEFINITE_WAIT.
2827 */
2828VMMR0DECL(int) VMMR0EmtWaitEventInner(PGVMCPU pGVCpu, uint32_t fFlags, RTSEMEVENT hEvent, RTMSINTERVAL cMsTimeout)
2829{
2830 AssertReturn(pGVCpu->hEMT == RTThreadNativeSelf(), VERR_VM_THREAD_NOT_EMT);
2831
2832 /*
2833 * Note! Similar code is found in the PDM critical sections too.
2834 */
2835 uint64_t const nsStart = RTTimeNanoTS();
2836 uint64_t cNsMaxTotal = cMsTimeout == RT_INDEFINITE_WAIT
2837 ? RT_NS_5MIN : RT_MIN(RT_NS_5MIN, RT_NS_1MS_64 * cMsTimeout);
2838 uint32_t cMsMaxOne = RT_MS_5SEC;
2839 bool fNonInterruptible = false;
2840 for (;;)
2841 {
2842 /* Wait. */
2843 int rcWait = !fNonInterruptible
2844 ? RTSemEventWaitNoResume(hEvent, cMsMaxOne)
2845 : RTSemEventWait(hEvent, cMsMaxOne);
2846 if (RT_SUCCESS(rcWait))
2847 return rcWait;
2848
2849 if (rcWait == VERR_TIMEOUT || rcWait == VERR_INTERRUPTED)
2850 {
2851 uint64_t const cNsElapsed = RTTimeNanoTS() - nsStart;
2852
2853 /*
2854 * Check the thread termination status.
2855 */
2856 int const rcTerm = RTThreadQueryTerminationStatus(NIL_RTTHREAD);
2857 AssertMsg(rcTerm == VINF_SUCCESS || rcTerm == VERR_NOT_SUPPORTED || rcTerm == VINF_THREAD_IS_TERMINATING,
2858 ("rcTerm=%Rrc\n", rcTerm));
2859 if ( rcTerm == VERR_NOT_SUPPORTED
2860 && !fNonInterruptible
2861 && cNsMaxTotal > RT_NS_1MIN)
2862 cNsMaxTotal = RT_NS_1MIN;
2863
2864 /* We return immediately if it looks like the thread is terminating. */
2865 if (rcTerm == VINF_THREAD_IS_TERMINATING)
2866 return VERR_THREAD_IS_TERMINATING;
2867
2868 /* We may suppress VERR_INTERRUPTED if VMMR0EMTWAIT_F_TRY_SUPPRESS_INTERRUPTED was
2869 specified, otherwise we'll just return it. */
2870 if (rcWait == VERR_INTERRUPTED)
2871 {
2872 if (!(fFlags & VMMR0EMTWAIT_F_TRY_SUPPRESS_INTERRUPTED))
2873 return VERR_INTERRUPTED;
2874 if (!fNonInterruptible)
2875 {
2876 /* First time: Adjust down the wait parameters and make sure we get at least
2877 one non-interruptible wait before timing out. */
2878 fNonInterruptible = true;
2879 cMsMaxOne = 32;
2880 uint64_t const cNsLeft = cNsMaxTotal - cNsElapsed;
2881 if (cNsLeft > RT_NS_10SEC)
2882 cNsMaxTotal = cNsElapsed + RT_NS_10SEC;
2883 continue;
2884 }
2885 }
2886
2887 /* Check for timeout. */
2888 if (cNsElapsed > cNsMaxTotal)
2889 return VERR_TIMEOUT;
2890 }
2891 else
2892 return rcWait;
2893 }
2894 /* not reached */
2895}
2896
2897
2898/*********************************************************************************************************************************
2899* Logging. *
2900*********************************************************************************************************************************/
2901
2902/**
2903 * VMMR0_DO_VMMR0_UPDATE_LOGGERS: Updates the EMT loggers for the VM.
2904 *
2905 * @returns VBox status code.
2906 * @param pGVM The global (ring-0) VM structure.
2907 * @param idCpu The ID of the calling EMT.
2908 * @param pReq The request data.
2909 * @param idxLogger Which logger set to update.
2910 * @thread EMT(idCpu)
2911 */
2912static int vmmR0UpdateLoggers(PGVM pGVM, VMCPUID idCpu, PVMMR0UPDATELOGGERSREQ pReq, size_t idxLogger)
2913{
2914 /*
2915 * Check sanity. First we require EMT to be calling us.
2916 */
2917 AssertReturn(idCpu < pGVM->cCpus, VERR_INVALID_CPU_ID);
2918 AssertReturn(pGVM->aCpus[idCpu].hEMT == RTThreadNativeSelf(), VERR_INVALID_CPU_ID);
2919
2920 AssertReturn(pReq->Hdr.cbReq >= RT_UOFFSETOF_DYN(VMMR0UPDATELOGGERSREQ, afGroups[0]), VERR_INVALID_PARAMETER);
2921 AssertReturn(pReq->cGroups < _8K, VERR_INVALID_PARAMETER);
2922 AssertReturn(pReq->Hdr.cbReq == RT_UOFFSETOF_DYN(VMMR0UPDATELOGGERSREQ, afGroups[pReq->cGroups]), VERR_INVALID_PARAMETER);
2923
2924 AssertReturn(idxLogger < VMMLOGGER_IDX_MAX, VERR_OUT_OF_RANGE);
2925
2926 /*
2927 * Adjust flags.
2928 */
2929 /* Always buffered: */
2930 pReq->fFlags |= RTLOGFLAGS_BUFFERED;
2931 /* These doesn't make sense at present: */
2932 pReq->fFlags &= ~(RTLOGFLAGS_FLUSH | RTLOGFLAGS_WRITE_THROUGH);
2933 /* We've traditionally skipped the group restrictions. */
2934 pReq->fFlags &= ~RTLOGFLAGS_RESTRICT_GROUPS;
2935
2936 /*
2937 * Do the updating.
2938 */
2939 int rc = VINF_SUCCESS;
2940 for (idCpu = 0; idCpu < pGVM->cCpus; idCpu++)
2941 {
2942 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2943 PRTLOGGER pLogger = pGVCpu->vmmr0.s.u.aLoggers[idxLogger].pLogger;
2944 if (pLogger)
2945 {
2946 RTLogSetR0ProgramStart(pLogger, pGVM->vmm.s.nsProgramStart);
2947 rc = RTLogBulkUpdate(pLogger, pReq->fFlags, pReq->uGroupCrc32, pReq->cGroups, pReq->afGroups);
2948 }
2949 }
2950
2951 return rc;
2952}
2953
2954
2955/**
2956 * VMMR0_DO_VMMR0_LOG_FLUSHER: Get the next log flushing job.
2957 *
2958 * The job info is copied into VMM::LogFlusherItem.
2959 *
2960 * @returns VBox status code.
2961 * @retval VERR_OBJECT_DESTROYED if we're shutting down.
2962 * @retval VERR_NOT_OWNER if the calling thread is not the flusher thread.
2963 * @param pGVM The global (ring-0) VM structure.
2964 * @thread The log flusher thread (first caller automatically becomes the log
2965 * flusher).
2966 */
2967static int vmmR0LogFlusher(PGVM pGVM)
2968{
2969 /*
2970 * Check that this really is the flusher thread.
2971 */
2972 RTNATIVETHREAD const hNativeSelf = RTThreadNativeSelf();
2973 AssertReturn(hNativeSelf != NIL_RTNATIVETHREAD, VERR_INTERNAL_ERROR_3);
2974 if (RT_LIKELY(pGVM->vmmr0.s.LogFlusher.hThread == hNativeSelf))
2975 { /* likely */ }
2976 else
2977 {
2978 /* The first caller becomes the flusher thread. */
2979 bool fOk;
2980 ASMAtomicCmpXchgHandle(&pGVM->vmmr0.s.LogFlusher.hThread, hNativeSelf, NIL_RTNATIVETHREAD, fOk);
2981 if (!fOk)
2982 return VERR_NOT_OWNER;
2983 pGVM->vmmr0.s.LogFlusher.fThreadRunning = true;
2984 }
2985
2986 /*
2987 * Acknowledge flush, waking up waiting EMT.
2988 */
2989 RTSpinlockAcquire(pGVM->vmmr0.s.LogFlusher.hSpinlock);
2990
2991 uint32_t idxTail = pGVM->vmmr0.s.LogFlusher.idxRingTail % RT_ELEMENTS(pGVM->vmmr0.s.LogFlusher.aRing);
2992 uint32_t idxHead = pGVM->vmmr0.s.LogFlusher.idxRingHead % RT_ELEMENTS(pGVM->vmmr0.s.LogFlusher.aRing);
2993 if ( idxTail != idxHead
2994 && pGVM->vmmr0.s.LogFlusher.aRing[idxHead].s.fProcessing)
2995 {
2996 /* Pop the head off the ring buffer. */
2997 uint32_t const idCpu = pGVM->vmmr0.s.LogFlusher.aRing[idxHead].s.idCpu;
2998 uint32_t const idxLogger = pGVM->vmmr0.s.LogFlusher.aRing[idxHead].s.idxLogger;
2999 uint32_t const idxBuffer = pGVM->vmmr0.s.LogFlusher.aRing[idxHead].s.idxBuffer;
3000
3001 pGVM->vmmr0.s.LogFlusher.aRing[idxHead].u32 = UINT32_MAX >> 1; /* invalidate the entry */
3002 pGVM->vmmr0.s.LogFlusher.idxRingHead = (idxHead + 1) % RT_ELEMENTS(pGVM->vmmr0.s.LogFlusher.aRing);
3003
3004 /* Validate content. */
3005 if ( idCpu < pGVM->cCpus
3006 && idxLogger < VMMLOGGER_IDX_MAX
3007 && idxBuffer < VMMLOGGER_BUFFER_COUNT)
3008 {
3009 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
3010 PVMMR0PERVCPULOGGER pR0Log = &pGVCpu->vmmr0.s.u.aLoggers[idxLogger];
3011 PVMMR3CPULOGGER pShared = &pGVCpu->vmm.s.u.aLoggers[idxLogger];
3012
3013 /*
3014 * Accounting.
3015 */
3016 uint32_t cFlushing = pR0Log->cFlushing - 1;
3017 if (RT_LIKELY(cFlushing < VMMLOGGER_BUFFER_COUNT))
3018 { /*likely*/ }
3019 else
3020 cFlushing = 0;
3021 pR0Log->cFlushing = cFlushing;
3022 ASMAtomicWriteU32(&pShared->cFlushing, cFlushing);
3023
3024 /*
3025 * Wake up the EMT if it's waiting.
3026 */
3027 if (!pR0Log->fEmtWaiting)
3028 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
3029 else
3030 {
3031 pR0Log->fEmtWaiting = false;
3032 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
3033
3034 int rc = RTSemEventSignal(pR0Log->hEventFlushWait);
3035 if (RT_FAILURE(rc))
3036 LogRelMax(64, ("vmmR0LogFlusher: RTSemEventSignal failed ACKing entry #%u (%u/%u/%u): %Rrc!\n",
3037 idxHead, idCpu, idxLogger, idxBuffer, rc));
3038 }
3039 }
3040 else
3041 {
3042 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
3043 LogRelMax(64, ("vmmR0LogFlusher: Bad ACK entry #%u: %u/%u/%u!\n", idxHead, idCpu, idxLogger, idxBuffer));
3044 }
3045
3046 RTSpinlockAcquire(pGVM->vmmr0.s.LogFlusher.hSpinlock);
3047 }
3048
3049 /*
3050 * The wait loop.
3051 */
3052 int rc;
3053 for (;;)
3054 {
3055 /*
3056 * Work pending?
3057 */
3058 idxTail = pGVM->vmmr0.s.LogFlusher.idxRingTail % RT_ELEMENTS(pGVM->vmmr0.s.LogFlusher.aRing);
3059 idxHead = pGVM->vmmr0.s.LogFlusher.idxRingHead % RT_ELEMENTS(pGVM->vmmr0.s.LogFlusher.aRing);
3060 if (idxTail != idxHead)
3061 {
3062 pGVM->vmmr0.s.LogFlusher.aRing[idxHead].s.fProcessing = true;
3063 pGVM->vmm.s.LogFlusherItem.u32 = pGVM->vmmr0.s.LogFlusher.aRing[idxHead].u32;
3064
3065 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
3066 return VINF_SUCCESS;
3067 }
3068
3069 /*
3070 * Nothing to do, so, check for termination and go to sleep.
3071 */
3072 if (!pGVM->vmmr0.s.LogFlusher.fThreadShutdown)
3073 { /* likely */ }
3074 else
3075 {
3076 rc = VERR_OBJECT_DESTROYED;
3077 break;
3078 }
3079
3080 pGVM->vmmr0.s.LogFlusher.fThreadWaiting = true;
3081 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
3082
3083 rc = RTSemEventWaitNoResume(pGVM->vmmr0.s.LogFlusher.hEvent, RT_MS_5MIN);
3084
3085 RTSpinlockAcquire(pGVM->vmmr0.s.LogFlusher.hSpinlock);
3086 pGVM->vmmr0.s.LogFlusher.fThreadWaiting = false;
3087
3088 if (RT_SUCCESS(rc) || rc == VERR_TIMEOUT)
3089 { /* likely */ }
3090 else if (rc == VERR_INTERRUPTED)
3091 {
3092 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
3093 return rc;
3094 }
3095 else if (rc == VERR_SEM_DESTROYED || rc == VERR_INVALID_HANDLE)
3096 break;
3097 else
3098 {
3099 LogRel(("vmmR0LogFlusher: RTSemEventWaitNoResume returned unexpected status %Rrc\n", rc));
3100 break;
3101 }
3102 }
3103
3104 /*
3105 * Terminating - prevent further calls and indicate to the EMTs that we're no longer around.
3106 */
3107 pGVM->vmmr0.s.LogFlusher.hThread = ~pGVM->vmmr0.s.LogFlusher.hThread; /* (should be reasonably safe) */
3108 pGVM->vmmr0.s.LogFlusher.fThreadRunning = false;
3109
3110 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
3111 return rc;
3112}
3113
3114
3115/**
3116 * VMMR0_DO_VMMR0_LOG_WAIT_FLUSHED: Waits for the flusher thread to finish all
3117 * buffers for logger @a idxLogger.
3118 *
3119 * @returns VBox status code.
3120 * @param pGVM The global (ring-0) VM structure.
3121 * @param idCpu The ID of the calling EMT.
3122 * @param idxLogger Which logger to wait on.
3123 * @thread EMT(idCpu)
3124 */
3125static int vmmR0LogWaitFlushed(PGVM pGVM, VMCPUID idCpu, size_t idxLogger)
3126{
3127 /*
3128 * Check sanity. First we require EMT to be calling us.
3129 */
3130 AssertReturn(idCpu < pGVM->cCpus, VERR_INVALID_CPU_ID);
3131 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
3132 AssertReturn(pGVCpu->hEMT == RTThreadNativeSelf(), VERR_INVALID_CPU_ID);
3133 AssertReturn(idxLogger < VMMLOGGER_IDX_MAX, VERR_OUT_OF_RANGE);
3134 PVMMR0PERVCPULOGGER const pR0Log = &pGVCpu->vmmr0.s.u.aLoggers[idxLogger];
3135
3136 /*
3137 * Do the waiting.
3138 */
3139 uint64_t const nsStart = RTTimeNanoTS();
3140 int rc = VINF_SUCCESS;
3141
3142 RTSpinlockAcquire(pGVM->vmmr0.s.LogFlusher.hSpinlock);
3143 uint32_t cFlushing = pR0Log->cFlushing;
3144 while (cFlushing > 0)
3145 {
3146 pR0Log->fEmtWaiting = true;
3147 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
3148
3149 rc = RTSemEventWaitNoResume(pR0Log->hEventFlushWait, RT_MS_5MIN);
3150
3151 RTSpinlockAcquire(pGVM->vmmr0.s.LogFlusher.hSpinlock);
3152 pR0Log->fEmtWaiting = false;
3153 if (RT_SUCCESS(rc))
3154 {
3155 /* Read the new count, make sure it decreased before looping. That
3156 way we can guarentee that we will only wait more than 5 min * buffers. */
3157 uint32_t const cPrevFlushing = cFlushing;
3158 cFlushing = pR0Log->cFlushing;
3159 if (cFlushing < cPrevFlushing)
3160 continue;
3161 rc = VERR_INTERNAL_ERROR_3;
3162 }
3163 break;
3164 }
3165 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
3166 return rc;
3167}
3168
3169
3170/**
3171 * Inner worker for vmmR0LoggerFlushCommon.
3172 */
3173static bool vmmR0LoggerFlushInner(PGVM pGVM, PGVMCPU pGVCpu, uint32_t idxLogger, size_t idxBuffer, uint32_t cbToFlush)
3174{
3175 PVMMR0PERVCPULOGGER const pR0Log = &pGVCpu->vmmr0.s.u.aLoggers[idxLogger];
3176 PVMMR3CPULOGGER const pShared = &pGVCpu->vmm.s.u.aLoggers[idxLogger];
3177
3178 /*
3179 * Figure out what we need to do and whether we can.
3180 */
3181 enum { kJustSignal, kPrepAndSignal, kPrepSignalAndWait } enmAction;
3182#if VMMLOGGER_BUFFER_COUNT >= 2
3183 if (pR0Log->cFlushing < VMMLOGGER_BUFFER_COUNT - 1)
3184 {
3185 if (RTSemEventIsSignalSafe())
3186 enmAction = kJustSignal;
3187 else if (VMMRZCallRing3IsEnabled(pGVCpu))
3188 enmAction = kPrepAndSignal;
3189 else
3190 {
3191 /** @todo This is a bit simplistic. We could introduce a FF to signal the
3192 * thread or similar. */
3193 STAM_REL_COUNTER_INC(&pShared->StatCannotBlock);
3194# if defined(RT_OS_LINUX)
3195 SUP_DPRINTF(("vmmR0LoggerFlush: Signalling not safe and EMT blocking disabled! (%u bytes)\n", cbToFlush));
3196# endif
3197 pShared->cbDropped += cbToFlush;
3198 return true;
3199 }
3200 }
3201 else
3202#endif
3203 if (VMMRZCallRing3IsEnabled(pGVCpu))
3204 enmAction = kPrepSignalAndWait;
3205 else
3206 {
3207 STAM_REL_COUNTER_INC(&pShared->StatCannotBlock);
3208# if defined(RT_OS_LINUX)
3209 SUP_DPRINTF(("vmmR0LoggerFlush: EMT blocking disabled! (%u bytes)\n", cbToFlush));
3210# endif
3211 pShared->cbDropped += cbToFlush;
3212 return true;
3213 }
3214
3215 /*
3216 * Prepare for blocking if necessary.
3217 */
3218 VMMR0EMTBLOCKCTX Ctx;
3219 if (enmAction != kJustSignal)
3220 {
3221 int rc = VMMR0EmtPrepareToBlock(pGVCpu, VINF_SUCCESS, "vmmR0LoggerFlushInner", pR0Log->hEventFlushWait, &Ctx);
3222 if (RT_SUCCESS(rc))
3223 { /* likely */ }
3224 else
3225 {
3226 STAM_REL_COUNTER_INC(&pShared->StatCannotBlock);
3227 SUP_DPRINTF(("vmmR0LoggerFlush: VMMR0EmtPrepareToBlock failed! rc=%d\n", rc));
3228 return false;
3229 }
3230 }
3231
3232 /*
3233 * Queue the flush job.
3234 */
3235 bool fFlushedBuffer;
3236 RTSpinlockAcquire(pGVM->vmmr0.s.LogFlusher.hSpinlock);
3237 if (pGVM->vmmr0.s.LogFlusher.fThreadRunning)
3238 {
3239 uint32_t const idxHead = pGVM->vmmr0.s.LogFlusher.idxRingHead % RT_ELEMENTS(pGVM->vmmr0.s.LogFlusher.aRing);
3240 uint32_t const idxTail = pGVM->vmmr0.s.LogFlusher.idxRingTail % RT_ELEMENTS(pGVM->vmmr0.s.LogFlusher.aRing);
3241 uint32_t const idxNewTail = (idxTail + 1) % RT_ELEMENTS(pGVM->vmmr0.s.LogFlusher.aRing);
3242 if (idxNewTail != idxHead)
3243 {
3244 /* Queue it. */
3245 pGVM->vmmr0.s.LogFlusher.aRing[idxTail].s.idCpu = pGVCpu->idCpu;
3246 pGVM->vmmr0.s.LogFlusher.aRing[idxTail].s.idxLogger = idxLogger;
3247 pGVM->vmmr0.s.LogFlusher.aRing[idxTail].s.idxBuffer = (uint32_t)idxBuffer;
3248 pGVM->vmmr0.s.LogFlusher.aRing[idxTail].s.fProcessing = 0;
3249 pGVM->vmmr0.s.LogFlusher.idxRingTail = idxNewTail;
3250
3251 /* Update the number of buffers currently being flushed. */
3252 uint32_t cFlushing = pR0Log->cFlushing;
3253 cFlushing = RT_MIN(cFlushing + 1, VMMLOGGER_BUFFER_COUNT);
3254 pShared->cFlushing = pR0Log->cFlushing = cFlushing;
3255
3256 /* We must wait if all buffers are currently being flushed. */
3257 bool const fEmtWaiting = cFlushing >= VMMLOGGER_BUFFER_COUNT && enmAction != kJustSignal /* paranoia */;
3258 pR0Log->fEmtWaiting = fEmtWaiting;
3259
3260 /* Stats. */
3261 STAM_REL_COUNTER_INC(&pShared->StatFlushes);
3262 STAM_REL_COUNTER_INC(&pGVM->vmm.s.StatLogFlusherFlushes);
3263
3264 /* Signal the worker thread. */
3265 if (pGVM->vmmr0.s.LogFlusher.fThreadWaiting)
3266 {
3267 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
3268 RTSemEventSignal(pGVM->vmmr0.s.LogFlusher.hEvent);
3269 }
3270 else
3271 {
3272 STAM_REL_COUNTER_INC(&pGVM->vmm.s.StatLogFlusherNoWakeUp);
3273 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
3274 }
3275
3276 /*
3277 * Wait for a buffer to finish flushing.
3278 *
3279 * Note! Lazy bird is ignoring the status code here. The result is
3280 * that we might end up with an extra even signalling and the
3281 * next time we need to wait we won't and end up with some log
3282 * corruption. However, it's too much hazzle right now for
3283 * a scenario which would most likely end the process rather
3284 * than causing log corruption.
3285 */
3286 if (fEmtWaiting)
3287 {
3288 STAM_REL_PROFILE_START(&pShared->StatWait, a);
3289 VMMR0EmtWaitEventInner(pGVCpu, VMMR0EMTWAIT_F_TRY_SUPPRESS_INTERRUPTED,
3290 pR0Log->hEventFlushWait, RT_INDEFINITE_WAIT);
3291 STAM_REL_PROFILE_STOP(&pShared->StatWait, a);
3292 }
3293
3294 /*
3295 * We always switch buffer if we have more than one.
3296 */
3297#if VMMLOGGER_BUFFER_COUNT == 1
3298 fFlushedBuffer = true;
3299#else
3300 AssertCompile(VMMLOGGER_BUFFER_COUNT >= 1);
3301 pShared->idxBuf = (idxBuffer + 1) % VMMLOGGER_BUFFER_COUNT;
3302 fFlushedBuffer = false;
3303#endif
3304 }
3305 else
3306 {
3307 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
3308 SUP_DPRINTF(("vmmR0LoggerFlush: ring buffer is full!\n"));
3309 fFlushedBuffer = true;
3310 }
3311 }
3312 else
3313 {
3314 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
3315 SUP_DPRINTF(("vmmR0LoggerFlush: flusher not active - dropping %u bytes\n", cbToFlush));
3316 fFlushedBuffer = true;
3317 }
3318
3319 /*
3320 * Restore the HM context.
3321 */
3322 if (enmAction != kJustSignal)
3323 VMMR0EmtResumeAfterBlocking(pGVCpu, &Ctx);
3324
3325 return fFlushedBuffer;
3326}
3327
3328
3329/**
3330 * Common worker for vmmR0LogFlush and vmmR0LogRelFlush.
3331 */
3332static bool vmmR0LoggerFlushCommon(PRTLOGGER pLogger, PRTLOGBUFFERDESC pBufDesc, uint32_t idxLogger)
3333{
3334 /*
3335 * Convert the pLogger into a GVMCPU handle and 'call' back to Ring-3.
3336 * (This is a bit paranoid code.)
3337 */
3338 if (RT_VALID_PTR(pLogger))
3339 {
3340 if ( pLogger->u32Magic == RTLOGGER_MAGIC
3341 && (pLogger->u32UserValue1 & VMMR0_LOGGER_FLAGS_MAGIC_MASK) == VMMR0_LOGGER_FLAGS_MAGIC_VALUE
3342 && pLogger->u64UserValue2 == pLogger->u64UserValue3)
3343 {
3344 PGVMCPU const pGVCpu = (PGVMCPU)(uintptr_t)pLogger->u64UserValue2;
3345 if ( RT_VALID_PTR(pGVCpu)
3346 && ((uintptr_t)pGVCpu & PAGE_OFFSET_MASK) == 0)
3347 {
3348 RTNATIVETHREAD const hNativeSelf = RTThreadNativeSelf();
3349 PGVM const pGVM = pGVCpu->pGVM;
3350 if ( hNativeSelf == pGVCpu->hEMT
3351 && RT_VALID_PTR(pGVM))
3352 {
3353 PVMMR0PERVCPULOGGER const pR0Log = &pGVCpu->vmmr0.s.u.aLoggers[idxLogger];
3354 size_t const idxBuffer = pBufDesc - &pR0Log->aBufDescs[0];
3355 if (idxBuffer < VMMLOGGER_BUFFER_COUNT)
3356 {
3357 /*
3358 * Make sure we don't recurse forever here should something in the
3359 * following code trigger logging or an assertion. Do the rest in
3360 * an inner work to avoid hitting the right margin too hard.
3361 */
3362 if (!pR0Log->fFlushing)
3363 {
3364 pR0Log->fFlushing = true;
3365 bool fFlushed = vmmR0LoggerFlushInner(pGVM, pGVCpu, idxLogger, idxBuffer, pBufDesc->offBuf);
3366 pR0Log->fFlushing = false;
3367 return fFlushed;
3368 }
3369
3370 SUP_DPRINTF(("vmmR0LoggerFlush: Recursive flushing!\n"));
3371 }
3372 else
3373 SUP_DPRINTF(("vmmR0LoggerFlush: pLogger=%p pGVCpu=%p: idxBuffer=%#zx\n", pLogger, pGVCpu, idxBuffer));
3374 }
3375 else
3376 SUP_DPRINTF(("vmmR0LoggerFlush: pLogger=%p pGVCpu=%p hEMT=%p hNativeSelf=%p!\n",
3377 pLogger, pGVCpu, pGVCpu->hEMT, hNativeSelf));
3378 }
3379 else
3380 SUP_DPRINTF(("vmmR0LoggerFlush: pLogger=%p pGVCpu=%p!\n", pLogger, pGVCpu));
3381 }
3382 else
3383 SUP_DPRINTF(("vmmR0LoggerFlush: pLogger=%p u32Magic=%#x u32UserValue1=%#x u64UserValue2=%#RX64 u64UserValue3=%#RX64!\n",
3384 pLogger, pLogger->u32Magic, pLogger->u32UserValue1, pLogger->u64UserValue2, pLogger->u64UserValue3));
3385 }
3386 else
3387 SUP_DPRINTF(("vmmR0LoggerFlush: pLogger=%p!\n", pLogger));
3388 return true;
3389}
3390
3391
3392/**
3393 * @callback_method_impl{FNRTLOGFLUSH, Release logger buffer flush callback.}
3394 */
3395static DECLCALLBACK(bool) vmmR0LogRelFlush(PRTLOGGER pLogger, PRTLOGBUFFERDESC pBufDesc)
3396{
3397 return vmmR0LoggerFlushCommon(pLogger, pBufDesc, VMMLOGGER_IDX_RELEASE);
3398}
3399
3400
3401/**
3402 * @callback_method_impl{FNRTLOGFLUSH, Logger (debug) buffer flush callback.}
3403 */
3404static DECLCALLBACK(bool) vmmR0LogFlush(PRTLOGGER pLogger, PRTLOGBUFFERDESC pBufDesc)
3405{
3406#ifdef LOG_ENABLED
3407 return vmmR0LoggerFlushCommon(pLogger, pBufDesc, VMMLOGGER_IDX_REGULAR);
3408#else
3409 RT_NOREF(pLogger, pBufDesc);
3410 return true;
3411#endif
3412}
3413
3414
3415/*
3416 * Override RTLogDefaultInstanceEx so we can do logging from EMTs in ring-0.
3417 */
3418DECLEXPORT(PRTLOGGER) RTLogDefaultInstanceEx(uint32_t fFlagsAndGroup)
3419{
3420#ifdef LOG_ENABLED
3421 PGVMCPU pGVCpu = GVMMR0GetGVCpuByEMT(NIL_RTNATIVETHREAD);
3422 if (pGVCpu)
3423 {
3424 PRTLOGGER pLogger = pGVCpu->vmmr0.s.u.s.Logger.pLogger;
3425 if (RT_VALID_PTR(pLogger))
3426 {
3427 if ( pLogger->u64UserValue2 == (uintptr_t)pGVCpu
3428 && pLogger->u64UserValue3 == (uintptr_t)pGVCpu)
3429 {
3430 if (!pGVCpu->vmmr0.s.u.s.Logger.fFlushing)
3431 {
3432 if (!(pGVCpu->vmmr0.s.fLogFlushingDisabled))
3433 return RTLogCheckGroupFlags(pLogger, fFlagsAndGroup);
3434 return NULL;
3435 }
3436
3437 /*
3438 * When we're flushing we _must_ return NULL here to suppress any
3439 * attempts at using the logger while in vmmR0LoggerFlushCommon.
3440 * The VMMR0EmtPrepareToBlock code may trigger logging in HM,
3441 * which will reset the buffer content before we even get to queue
3442 * the flush request. (Only an issue when VBOX_WITH_R0_LOGGING
3443 * is enabled.)
3444 */
3445 return NULL;
3446 }
3447 }
3448 }
3449#endif
3450 return SUPR0DefaultLogInstanceEx(fFlagsAndGroup);
3451}
3452
3453
3454/*
3455 * Override RTLogRelGetDefaultInstanceEx so we can do LogRel to VBox.log from EMTs in ring-0.
3456 */
3457DECLEXPORT(PRTLOGGER) RTLogRelGetDefaultInstanceEx(uint32_t fFlagsAndGroup)
3458{
3459 PGVMCPU pGVCpu = GVMMR0GetGVCpuByEMT(NIL_RTNATIVETHREAD);
3460 if (pGVCpu)
3461 {
3462 PRTLOGGER pLogger = pGVCpu->vmmr0.s.u.s.RelLogger.pLogger;
3463 if (RT_VALID_PTR(pLogger))
3464 {
3465 if ( pLogger->u64UserValue2 == (uintptr_t)pGVCpu
3466 && pLogger->u64UserValue3 == (uintptr_t)pGVCpu)
3467 {
3468 if (!pGVCpu->vmmr0.s.u.s.RelLogger.fFlushing)
3469 {
3470 if (!(pGVCpu->vmmr0.s.fLogFlushingDisabled))
3471 return RTLogCheckGroupFlags(pLogger, fFlagsAndGroup);
3472 return NULL;
3473 }
3474 }
3475 }
3476 }
3477 return SUPR0GetDefaultLogRelInstanceEx(fFlagsAndGroup);
3478}
3479
3480
3481/**
3482 * Helper for vmmR0InitLoggerSet
3483 */
3484static int vmmR0InitLoggerOne(PGVMCPU pGVCpu, bool fRelease, PVMMR0PERVCPULOGGER pR0Log, PVMMR3CPULOGGER pShared,
3485 uint32_t cbBuf, char *pchBuf, RTR3PTR pchBufR3)
3486{
3487 /*
3488 * Create and configure the logger.
3489 */
3490 for (size_t i = 0; i < VMMLOGGER_BUFFER_COUNT; i++)
3491 {
3492 pR0Log->aBufDescs[i].u32Magic = RTLOGBUFFERDESC_MAGIC;
3493 pR0Log->aBufDescs[i].uReserved = 0;
3494 pR0Log->aBufDescs[i].cbBuf = cbBuf;
3495 pR0Log->aBufDescs[i].offBuf = 0;
3496 pR0Log->aBufDescs[i].pchBuf = pchBuf + i * cbBuf;
3497 pR0Log->aBufDescs[i].pAux = &pShared->aBufs[i].AuxDesc;
3498
3499 pShared->aBufs[i].AuxDesc.fFlushedIndicator = false;
3500 pShared->aBufs[i].AuxDesc.afPadding[0] = 0;
3501 pShared->aBufs[i].AuxDesc.afPadding[1] = 0;
3502 pShared->aBufs[i].AuxDesc.afPadding[2] = 0;
3503 pShared->aBufs[i].AuxDesc.offBuf = 0;
3504 pShared->aBufs[i].pchBufR3 = pchBufR3 + i * cbBuf;
3505 }
3506 pShared->cbBuf = cbBuf;
3507
3508 static const char * const s_apszGroups[] = VBOX_LOGGROUP_NAMES;
3509 int rc = RTLogCreateEx(&pR0Log->pLogger, fRelease ? "VBOX_RELEASE_LOG" : "VBOX_LOG", RTLOG_F_NO_LOCKING | RTLOGFLAGS_BUFFERED,
3510 "all", RT_ELEMENTS(s_apszGroups), s_apszGroups, UINT32_MAX,
3511 VMMLOGGER_BUFFER_COUNT, pR0Log->aBufDescs, RTLOGDEST_DUMMY,
3512 NULL /*pfnPhase*/, 0 /*cHistory*/, 0 /*cbHistoryFileMax*/, 0 /*cSecsHistoryTimeSlot*/,
3513 NULL /*pErrInfo*/, NULL /*pszFilenameFmt*/);
3514 if (RT_SUCCESS(rc))
3515 {
3516 PRTLOGGER pLogger = pR0Log->pLogger;
3517 pLogger->u32UserValue1 = VMMR0_LOGGER_FLAGS_MAGIC_VALUE;
3518 pLogger->u64UserValue2 = (uintptr_t)pGVCpu;
3519 pLogger->u64UserValue3 = (uintptr_t)pGVCpu;
3520
3521 rc = RTLogSetFlushCallback(pLogger, fRelease ? vmmR0LogRelFlush : vmmR0LogFlush);
3522 if (RT_SUCCESS(rc))
3523 {
3524 RTLogSetR0ThreadNameF(pLogger, "EMT-%u-R0", pGVCpu->idCpu);
3525
3526 /*
3527 * Create the event sem the EMT waits on while flushing is happening.
3528 */
3529 rc = RTSemEventCreate(&pR0Log->hEventFlushWait);
3530 if (RT_SUCCESS(rc))
3531 return VINF_SUCCESS;
3532 pR0Log->hEventFlushWait = NIL_RTSEMEVENT;
3533 }
3534 RTLogDestroy(pLogger);
3535 }
3536 pR0Log->pLogger = NULL;
3537 return rc;
3538}
3539
3540
3541/**
3542 * Worker for VMMR0CleanupVM and vmmR0InitLoggerSet that destroys one logger.
3543 */
3544static void vmmR0TermLoggerOne(PVMMR0PERVCPULOGGER pR0Log, PVMMR3CPULOGGER pShared)
3545{
3546 RTLogDestroy(pR0Log->pLogger);
3547 pR0Log->pLogger = NULL;
3548
3549 for (size_t i = 0; i < VMMLOGGER_BUFFER_COUNT; i++)
3550 pShared->aBufs[i].pchBufR3 = NIL_RTR3PTR;
3551
3552 RTSemEventDestroy(pR0Log->hEventFlushWait);
3553 pR0Log->hEventFlushWait = NIL_RTSEMEVENT;
3554}
3555
3556
3557/**
3558 * Initializes one type of loggers for each EMT.
3559 */
3560static int vmmR0InitLoggerSet(PGVM pGVM, uint8_t idxLogger, uint32_t cbBuf, PRTR0MEMOBJ phMemObj, PRTR0MEMOBJ phMapObj)
3561{
3562 /* Allocate buffers first. */
3563 int rc = RTR0MemObjAllocPage(phMemObj, cbBuf * pGVM->cCpus * VMMLOGGER_BUFFER_COUNT, false /*fExecutable*/);
3564 if (RT_SUCCESS(rc))
3565 {
3566 rc = RTR0MemObjMapUser(phMapObj, *phMemObj, (RTR3PTR)-1, 0 /*uAlignment*/, RTMEM_PROT_READ, NIL_RTR0PROCESS);
3567 if (RT_SUCCESS(rc))
3568 {
3569 char * const pchBuf = (char *)RTR0MemObjAddress(*phMemObj);
3570 AssertPtrReturn(pchBuf, VERR_INTERNAL_ERROR_2);
3571
3572 RTR3PTR const pchBufR3 = RTR0MemObjAddressR3(*phMapObj);
3573 AssertReturn(pchBufR3 != NIL_RTR3PTR, VERR_INTERNAL_ERROR_3);
3574
3575 /* Initialize the per-CPU loggers. */
3576 for (uint32_t i = 0; i < pGVM->cCpus; i++)
3577 {
3578 PGVMCPU pGVCpu = &pGVM->aCpus[i];
3579 PVMMR0PERVCPULOGGER pR0Log = &pGVCpu->vmmr0.s.u.aLoggers[idxLogger];
3580 PVMMR3CPULOGGER pShared = &pGVCpu->vmm.s.u.aLoggers[idxLogger];
3581 rc = vmmR0InitLoggerOne(pGVCpu, idxLogger == VMMLOGGER_IDX_RELEASE, pR0Log, pShared, cbBuf,
3582 pchBuf + i * cbBuf * VMMLOGGER_BUFFER_COUNT,
3583 pchBufR3 + i * cbBuf * VMMLOGGER_BUFFER_COUNT);
3584 if (RT_FAILURE(rc))
3585 {
3586 vmmR0TermLoggerOne(pR0Log, pShared);
3587 while (i-- > 0)
3588 {
3589 pGVCpu = &pGVM->aCpus[i];
3590 vmmR0TermLoggerOne(&pGVCpu->vmmr0.s.u.aLoggers[idxLogger], &pGVCpu->vmm.s.u.aLoggers[idxLogger]);
3591 }
3592 break;
3593 }
3594 }
3595 if (RT_SUCCESS(rc))
3596 return VINF_SUCCESS;
3597
3598 /* Bail out. */
3599 RTR0MemObjFree(*phMapObj, false /*fFreeMappings*/);
3600 *phMapObj = NIL_RTR0MEMOBJ;
3601 }
3602 RTR0MemObjFree(*phMemObj, true /*fFreeMappings*/);
3603 *phMemObj = NIL_RTR0MEMOBJ;
3604 }
3605 return rc;
3606}
3607
3608
3609/**
3610 * Worker for VMMR0InitPerVMData that initializes all the logging related stuff.
3611 *
3612 * @returns VBox status code.
3613 * @param pGVM The global (ring-0) VM structure.
3614 */
3615static int vmmR0InitLoggers(PGVM pGVM)
3616{
3617 /*
3618 * Invalidate the ring buffer (not really necessary).
3619 */
3620 for (size_t idx = 0; idx < RT_ELEMENTS(pGVM->vmmr0.s.LogFlusher.aRing); idx++)
3621 pGVM->vmmr0.s.LogFlusher.aRing[idx].u32 = UINT32_MAX >> 1; /* (all bits except fProcessing set) */
3622
3623 /*
3624 * Create the spinlock and flusher event semaphore.
3625 */
3626 int rc = RTSpinlockCreate(&pGVM->vmmr0.s.LogFlusher.hSpinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "VM-Log-Flusher");
3627 if (RT_SUCCESS(rc))
3628 {
3629 rc = RTSemEventCreate(&pGVM->vmmr0.s.LogFlusher.hEvent);
3630 if (RT_SUCCESS(rc))
3631 {
3632 /*
3633 * Create the ring-0 release loggers.
3634 */
3635 rc = vmmR0InitLoggerSet(pGVM, VMMLOGGER_IDX_RELEASE, _4K,
3636 &pGVM->vmmr0.s.hMemObjReleaseLogger, &pGVM->vmmr0.s.hMapObjReleaseLogger);
3637#ifdef LOG_ENABLED
3638 if (RT_SUCCESS(rc))
3639 {
3640 /*
3641 * Create debug loggers.
3642 */
3643 rc = vmmR0InitLoggerSet(pGVM, VMMLOGGER_IDX_REGULAR, _64K,
3644 &pGVM->vmmr0.s.hMemObjLogger, &pGVM->vmmr0.s.hMapObjLogger);
3645 }
3646#endif
3647 }
3648 }
3649 return rc;
3650}
3651
3652
3653/**
3654 * Worker for VMMR0InitPerVMData that initializes all the logging related stuff.
3655 *
3656 * @param pGVM The global (ring-0) VM structure.
3657 */
3658static void vmmR0CleanupLoggers(PGVM pGVM)
3659{
3660 for (VMCPUID idCpu = 0; idCpu < pGVM->cCpus; idCpu++)
3661 {
3662 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
3663 for (size_t iLogger = 0; iLogger < RT_ELEMENTS(pGVCpu->vmmr0.s.u.aLoggers); iLogger++)
3664 vmmR0TermLoggerOne(&pGVCpu->vmmr0.s.u.aLoggers[iLogger], &pGVCpu->vmm.s.u.aLoggers[iLogger]);
3665 }
3666
3667 /*
3668 * Free logger buffer memory.
3669 */
3670 RTR0MemObjFree(pGVM->vmmr0.s.hMapObjReleaseLogger, false /*fFreeMappings*/);
3671 pGVM->vmmr0.s.hMapObjReleaseLogger = NIL_RTR0MEMOBJ;
3672 RTR0MemObjFree(pGVM->vmmr0.s.hMemObjReleaseLogger, true /*fFreeMappings*/);
3673 pGVM->vmmr0.s.hMemObjReleaseLogger = NIL_RTR0MEMOBJ;
3674
3675 RTR0MemObjFree(pGVM->vmmr0.s.hMapObjLogger, false /*fFreeMappings*/);
3676 pGVM->vmmr0.s.hMapObjLogger = NIL_RTR0MEMOBJ;
3677 RTR0MemObjFree(pGVM->vmmr0.s.hMemObjLogger, true /*fFreeMappings*/);
3678 pGVM->vmmr0.s.hMemObjLogger = NIL_RTR0MEMOBJ;
3679
3680 /*
3681 * Free log flusher related stuff.
3682 */
3683 RTSpinlockDestroy(pGVM->vmmr0.s.LogFlusher.hSpinlock);
3684 pGVM->vmmr0.s.LogFlusher.hSpinlock = NIL_RTSPINLOCK;
3685 RTSemEventDestroy(pGVM->vmmr0.s.LogFlusher.hEvent);
3686 pGVM->vmmr0.s.LogFlusher.hEvent = NIL_RTSEMEVENT;
3687}
3688
3689
3690/*********************************************************************************************************************************
3691* Assertions *
3692*********************************************************************************************************************************/
3693
3694/*
3695 * Jump back to ring-3 if we're the EMT and the longjmp is armed.
3696 *
3697 * @returns true if the breakpoint should be hit, false if it should be ignored.
3698 */
3699DECLEXPORT(bool) RTCALL RTAssertShouldPanic(void)
3700{
3701#if 0
3702 return true;
3703#else
3704 PVMCC pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
3705 if (pVM)
3706 {
3707 PVMCPUCC pVCpu = VMMGetCpu(pVM);
3708
3709 if (pVCpu)
3710 {
3711# ifdef RT_ARCH_X86
3712 if ( pVCpu->vmm.s.CallRing3JmpBufR0.eip
3713 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
3714# else
3715 if ( pVCpu->vmm.s.CallRing3JmpBufR0.rip
3716 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
3717# endif
3718 {
3719 int rc = VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VM_R0_ASSERTION, 0);
3720 return RT_FAILURE_NP(rc);
3721 }
3722 }
3723 }
3724# ifdef RT_OS_LINUX
3725 return true;
3726# else
3727 return false;
3728# endif
3729#endif
3730}
3731
3732
3733/*
3734 * Override this so we can push it up to ring-3.
3735 */
3736DECLEXPORT(void) RTCALL RTAssertMsg1Weak(const char *pszExpr, unsigned uLine, const char *pszFile, const char *pszFunction)
3737{
3738 /*
3739 * To host kernel log/whatever.
3740 */
3741 SUPR0Printf("!!R0-Assertion Failed!!\n"
3742 "Expression: %s\n"
3743 "Location : %s(%d) %s\n",
3744 pszExpr, pszFile, uLine, pszFunction);
3745
3746 /*
3747 * To the log.
3748 */
3749 LogAlways(("\n!!R0-Assertion Failed!!\n"
3750 "Expression: %s\n"
3751 "Location : %s(%d) %s\n",
3752 pszExpr, pszFile, uLine, pszFunction));
3753
3754 /*
3755 * To the global VMM buffer.
3756 */
3757 PVMCC pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
3758 if (pVM)
3759 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg1, sizeof(pVM->vmm.s.szRing0AssertMsg1),
3760 "\n!!R0-Assertion Failed!!\n"
3761 "Expression: %.*s\n"
3762 "Location : %s(%d) %s\n",
3763 sizeof(pVM->vmm.s.szRing0AssertMsg1) / 4 * 3, pszExpr,
3764 pszFile, uLine, pszFunction);
3765
3766 /*
3767 * Continue the normal way.
3768 */
3769 RTAssertMsg1(pszExpr, uLine, pszFile, pszFunction);
3770}
3771
3772
3773/**
3774 * Callback for RTLogFormatV which writes to the ring-3 log port.
3775 * See PFNLOGOUTPUT() for details.
3776 */
3777static DECLCALLBACK(size_t) rtLogOutput(void *pv, const char *pachChars, size_t cbChars)
3778{
3779 for (size_t i = 0; i < cbChars; i++)
3780 {
3781 LogAlways(("%c", pachChars[i])); NOREF(pachChars);
3782 }
3783
3784 NOREF(pv);
3785 return cbChars;
3786}
3787
3788
3789/*
3790 * Override this so we can push it up to ring-3.
3791 */
3792DECLEXPORT(void) RTCALL RTAssertMsg2WeakV(const char *pszFormat, va_list va)
3793{
3794 va_list vaCopy;
3795
3796 /*
3797 * Push the message to the loggers.
3798 */
3799 PRTLOGGER pLog = RTLogRelGetDefaultInstance();
3800 if (pLog)
3801 {
3802 va_copy(vaCopy, va);
3803 RTLogFormatV(rtLogOutput, pLog, pszFormat, vaCopy);
3804 va_end(vaCopy);
3805 }
3806 pLog = RTLogGetDefaultInstance(); /* Don't initialize it here... */
3807 if (pLog)
3808 {
3809 va_copy(vaCopy, va);
3810 RTLogFormatV(rtLogOutput, pLog, pszFormat, vaCopy);
3811 va_end(vaCopy);
3812 }
3813
3814 /*
3815 * Push it to the global VMM buffer.
3816 */
3817 PVMCC pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
3818 if (pVM)
3819 {
3820 va_copy(vaCopy, va);
3821 RTStrPrintfV(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2), pszFormat, vaCopy);
3822 va_end(vaCopy);
3823 }
3824
3825 /*
3826 * Continue the normal way.
3827 */
3828 RTAssertMsg2V(pszFormat, va);
3829}
3830
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette