VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/VMMR0.cpp@ 91497

Last change on this file since 91497 was 91271, checked in by vboxsync, 3 years ago

VMM: bugref:10092 Moved the PAE PDPTEs out of PGM into CPUMCTX.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 131.5 KB
Line 
1/* $Id: VMMR0.cpp 91271 2021-09-16 07:42:37Z vboxsync $ */
2/** @file
3 * VMM - Host Context Ring 0.
4 */
5
6/*
7 * Copyright (C) 2006-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_VMM
23#include <VBox/vmm/vmm.h>
24#include <VBox/sup.h>
25#include <VBox/vmm/iom.h>
26#include <VBox/vmm/trpm.h>
27#include <VBox/vmm/cpum.h>
28#include <VBox/vmm/pdmapi.h>
29#include <VBox/vmm/pgm.h>
30#ifdef VBOX_WITH_NEM_R0
31# include <VBox/vmm/nem.h>
32#endif
33#include <VBox/vmm/em.h>
34#include <VBox/vmm/stam.h>
35#include <VBox/vmm/tm.h>
36#include "VMMInternal.h"
37#include <VBox/vmm/vmcc.h>
38#include <VBox/vmm/gvm.h>
39#ifdef VBOX_WITH_PCI_PASSTHROUGH
40# include <VBox/vmm/pdmpci.h>
41#endif
42#include <VBox/vmm/apic.h>
43
44#include <VBox/vmm/gvmm.h>
45#include <VBox/vmm/gmm.h>
46#include <VBox/vmm/gim.h>
47#include <VBox/intnet.h>
48#include <VBox/vmm/hm.h>
49#include <VBox/param.h>
50#include <VBox/err.h>
51#include <VBox/version.h>
52#include <VBox/log.h>
53
54#include <iprt/asm-amd64-x86.h>
55#include <iprt/assert.h>
56#include <iprt/crc.h>
57#include <iprt/mem.h>
58#include <iprt/memobj.h>
59#include <iprt/mp.h>
60#include <iprt/once.h>
61#include <iprt/semaphore.h>
62#include <iprt/spinlock.h>
63#include <iprt/stdarg.h>
64#include <iprt/string.h>
65#include <iprt/thread.h>
66#include <iprt/timer.h>
67#include <iprt/time.h>
68
69#include "dtrace/VBoxVMM.h"
70
71
72#if defined(_MSC_VER) && defined(RT_ARCH_AMD64) /** @todo check this with with VC7! */
73# pragma intrinsic(_AddressOfReturnAddress)
74#endif
75
76#if defined(RT_OS_DARWIN) && ARCH_BITS == 32
77# error "32-bit darwin is no longer supported. Go back to 4.3 or earlier!"
78#endif
79
80
81/*********************************************************************************************************************************
82* Internal Functions *
83*********************************************************************************************************************************/
84RT_C_DECLS_BEGIN
85#if defined(RT_ARCH_X86) && (defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD))
86extern uint64_t __udivdi3(uint64_t, uint64_t);
87extern uint64_t __umoddi3(uint64_t, uint64_t);
88#endif
89RT_C_DECLS_END
90static int vmmR0UpdateLoggers(PGVM pGVM, VMCPUID idCpu, PVMMR0UPDATELOGGERSREQ pReq, size_t idxLogger);
91static int vmmR0LogFlusher(PGVM pGVM);
92static int vmmR0LogWaitFlushed(PGVM pGVM, VMCPUID idCpu, size_t idxLogger);
93static int vmmR0InitLoggers(PGVM pGVM);
94static void vmmR0CleanupLoggers(PGVM pGVM);
95
96
97/*********************************************************************************************************************************
98* Global Variables *
99*********************************************************************************************************************************/
100/** Drag in necessary library bits.
101 * The runtime lives here (in VMMR0.r0) and VBoxDD*R0.r0 links against us. */
102struct CLANG11WEIRDNOTHROW { PFNRT pfn; } g_VMMR0Deps[] =
103{
104 { (PFNRT)RTCrc32 },
105 { (PFNRT)RTOnce },
106#if defined(RT_ARCH_X86) && (defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD))
107 { (PFNRT)__udivdi3 },
108 { (PFNRT)__umoddi3 },
109#endif
110 { NULL }
111};
112
113#ifdef RT_OS_SOLARIS
114/* Dependency information for the native solaris loader. */
115extern "C" { char _depends_on[] = "vboxdrv"; }
116#endif
117
118
119/**
120 * Initialize the module.
121 * This is called when we're first loaded.
122 *
123 * @returns 0 on success.
124 * @returns VBox status on failure.
125 * @param hMod Image handle for use in APIs.
126 */
127DECLEXPORT(int) ModuleInit(void *hMod)
128{
129#ifdef VBOX_WITH_DTRACE_R0
130 /*
131 * The first thing to do is register the static tracepoints.
132 * (Deregistration is automatic.)
133 */
134 int rc2 = SUPR0TracerRegisterModule(hMod, &g_VTGObjHeader);
135 if (RT_FAILURE(rc2))
136 return rc2;
137#endif
138 LogFlow(("ModuleInit:\n"));
139
140#ifdef VBOX_WITH_64ON32_CMOS_DEBUG
141 /*
142 * Display the CMOS debug code.
143 */
144 ASMOutU8(0x72, 0x03);
145 uint8_t bDebugCode = ASMInU8(0x73);
146 LogRel(("CMOS Debug Code: %#x (%d)\n", bDebugCode, bDebugCode));
147 RTLogComPrintf("CMOS Debug Code: %#x (%d)\n", bDebugCode, bDebugCode);
148#endif
149
150 /*
151 * Initialize the VMM, GVMM, GMM, HM, PGM (Darwin) and INTNET.
152 */
153 int rc = vmmInitFormatTypes();
154 if (RT_SUCCESS(rc))
155 {
156 rc = GVMMR0Init();
157 if (RT_SUCCESS(rc))
158 {
159 rc = GMMR0Init();
160 if (RT_SUCCESS(rc))
161 {
162 rc = HMR0Init();
163 if (RT_SUCCESS(rc))
164 {
165 PDMR0Init(hMod);
166
167 rc = PGMRegisterStringFormatTypes();
168 if (RT_SUCCESS(rc))
169 {
170 rc = IntNetR0Init();
171 if (RT_SUCCESS(rc))
172 {
173#ifdef VBOX_WITH_PCI_PASSTHROUGH
174 rc = PciRawR0Init();
175#endif
176 if (RT_SUCCESS(rc))
177 {
178 rc = CPUMR0ModuleInit();
179 if (RT_SUCCESS(rc))
180 {
181#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
182 rc = vmmR0TripleFaultHackInit();
183 if (RT_SUCCESS(rc))
184#endif
185 {
186 if (RT_SUCCESS(rc))
187 {
188 LogFlow(("ModuleInit: returns success\n"));
189 return VINF_SUCCESS;
190 }
191 }
192
193 /*
194 * Bail out.
195 */
196#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
197 vmmR0TripleFaultHackTerm();
198#endif
199 }
200 else
201 LogRel(("ModuleInit: CPUMR0ModuleInit -> %Rrc\n", rc));
202#ifdef VBOX_WITH_PCI_PASSTHROUGH
203 PciRawR0Term();
204#endif
205 }
206 else
207 LogRel(("ModuleInit: PciRawR0Init -> %Rrc\n", rc));
208 IntNetR0Term();
209 }
210 else
211 LogRel(("ModuleInit: IntNetR0Init -> %Rrc\n", rc));
212 PGMDeregisterStringFormatTypes();
213 }
214 else
215 LogRel(("ModuleInit: PGMRegisterStringFormatTypes -> %Rrc\n", rc));
216 HMR0Term();
217 }
218 else
219 LogRel(("ModuleInit: HMR0Init -> %Rrc\n", rc));
220 GMMR0Term();
221 }
222 else
223 LogRel(("ModuleInit: GMMR0Init -> %Rrc\n", rc));
224 GVMMR0Term();
225 }
226 else
227 LogRel(("ModuleInit: GVMMR0Init -> %Rrc\n", rc));
228 vmmTermFormatTypes();
229 }
230 else
231 LogRel(("ModuleInit: vmmInitFormatTypes -> %Rrc\n", rc));
232
233 LogFlow(("ModuleInit: failed %Rrc\n", rc));
234 return rc;
235}
236
237
238/**
239 * Terminate the module.
240 * This is called when we're finally unloaded.
241 *
242 * @param hMod Image handle for use in APIs.
243 */
244DECLEXPORT(void) ModuleTerm(void *hMod)
245{
246 NOREF(hMod);
247 LogFlow(("ModuleTerm:\n"));
248
249 /*
250 * Terminate the CPUM module (Local APIC cleanup).
251 */
252 CPUMR0ModuleTerm();
253
254 /*
255 * Terminate the internal network service.
256 */
257 IntNetR0Term();
258
259 /*
260 * PGM (Darwin), HM and PciRaw global cleanup.
261 */
262#ifdef VBOX_WITH_PCI_PASSTHROUGH
263 PciRawR0Term();
264#endif
265 PGMDeregisterStringFormatTypes();
266 HMR0Term();
267#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
268 vmmR0TripleFaultHackTerm();
269#endif
270
271 /*
272 * Destroy the GMM and GVMM instances.
273 */
274 GMMR0Term();
275 GVMMR0Term();
276
277 vmmTermFormatTypes();
278
279 LogFlow(("ModuleTerm: returns\n"));
280}
281
282
283/**
284 * Initializes VMM specific members when the GVM structure is created,
285 * allocating loggers and stuff.
286 *
287 * The loggers are allocated here so that we can update their settings before
288 * doing VMMR0_DO_VMMR0_INIT and have correct logging at that time.
289 *
290 * @returns VBox status code.
291 * @param pGVM The global (ring-0) VM structure.
292 */
293VMMR0_INT_DECL(int) VMMR0InitPerVMData(PGVM pGVM)
294{
295 AssertCompile(sizeof(pGVM->vmmr0.s) <= sizeof(pGVM->vmmr0.padding));
296
297 /*
298 * Initialize all members first.
299 */
300 pGVM->vmmr0.s.fCalledInitVm = false;
301 pGVM->vmmr0.s.hMemObjLogger = NIL_RTR0MEMOBJ;
302 pGVM->vmmr0.s.hMapObjLogger = NIL_RTR0MEMOBJ;
303 pGVM->vmmr0.s.hMemObjReleaseLogger = NIL_RTR0MEMOBJ;
304 pGVM->vmmr0.s.hMapObjReleaseLogger = NIL_RTR0MEMOBJ;
305 pGVM->vmmr0.s.LogFlusher.hSpinlock = NIL_RTSPINLOCK;
306 pGVM->vmmr0.s.LogFlusher.hThread = NIL_RTNATIVETHREAD;
307 pGVM->vmmr0.s.LogFlusher.hEvent = NIL_RTSEMEVENT;
308 pGVM->vmmr0.s.LogFlusher.idxRingHead = 0;
309 pGVM->vmmr0.s.LogFlusher.idxRingTail = 0;
310 pGVM->vmmr0.s.LogFlusher.fThreadWaiting = false;
311
312 for (VMCPUID idCpu = 0; idCpu < pGVM->cCpus; idCpu++)
313 {
314 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
315 Assert(pGVCpu->idHostCpu == NIL_RTCPUID);
316 Assert(pGVCpu->iHostCpuSet == UINT32_MAX);
317 pGVCpu->vmmr0.s.pPreemptState = NULL;
318 pGVCpu->vmmr0.s.hCtxHook = NIL_RTTHREADCTXHOOK;
319 for (size_t iLogger = 0; iLogger < RT_ELEMENTS(pGVCpu->vmmr0.s.u.aLoggers); iLogger++)
320 pGVCpu->vmmr0.s.u.aLoggers[iLogger].hEventFlushWait = NIL_RTSEMEVENT;
321 }
322
323 /*
324 * Create the loggers.
325 */
326 return vmmR0InitLoggers(pGVM);
327}
328
329
330/**
331 * Initiates the R0 driver for a particular VM instance.
332 *
333 * @returns VBox status code.
334 *
335 * @param pGVM The global (ring-0) VM structure.
336 * @param uSvnRev The SVN revision of the ring-3 part.
337 * @param uBuildType Build type indicator.
338 * @thread EMT(0)
339 */
340static int vmmR0InitVM(PGVM pGVM, uint32_t uSvnRev, uint32_t uBuildType)
341{
342 /*
343 * Match the SVN revisions and build type.
344 */
345 if (uSvnRev != VMMGetSvnRev())
346 {
347 LogRel(("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev()));
348 SUPR0Printf("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev());
349 return VERR_VMM_R0_VERSION_MISMATCH;
350 }
351 if (uBuildType != vmmGetBuildType())
352 {
353 LogRel(("VMMR0InitVM: Build type mismatch, r3=%#x r0=%#x\n", uBuildType, vmmGetBuildType()));
354 SUPR0Printf("VMMR0InitVM: Build type mismatch, r3=%#x r0=%#x\n", uBuildType, vmmGetBuildType());
355 return VERR_VMM_R0_VERSION_MISMATCH;
356 }
357
358 int rc = GVMMR0ValidateGVMandEMT(pGVM, 0 /*idCpu*/);
359 if (RT_FAILURE(rc))
360 return rc;
361
362 /* Don't allow this to be called more than once. */
363 if (!pGVM->vmmr0.s.fCalledInitVm)
364 pGVM->vmmr0.s.fCalledInitVm = true;
365 else
366 return VERR_ALREADY_INITIALIZED;
367
368#ifdef LOG_ENABLED
369
370 /*
371 * Register the EMT R0 logger instance for VCPU 0.
372 */
373 PVMCPUCC pVCpu = VMCC_GET_CPU_0(pGVM);
374 if (pVCpu->vmmr0.s.u.s.Logger.pLogger)
375 {
376# if 0 /* testing of the logger. */
377 LogCom(("vmmR0InitVM: before %p\n", RTLogDefaultInstance()));
378 LogCom(("vmmR0InitVM: pfnFlush=%p actual=%p\n", pR0Logger->Logger.pfnFlush, vmmR0LoggerFlush));
379 LogCom(("vmmR0InitVM: pfnLogger=%p actual=%p\n", pR0Logger->Logger.pfnLogger, vmmR0LoggerWrapper));
380 LogCom(("vmmR0InitVM: offScratch=%d fFlags=%#x fDestFlags=%#x\n", pR0Logger->Logger.offScratch, pR0Logger->Logger.fFlags, pR0Logger->Logger.fDestFlags));
381
382 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pGVM->pSession);
383 LogCom(("vmmR0InitVM: after %p reg\n", RTLogDefaultInstance()));
384 RTLogSetDefaultInstanceThread(NULL, pGVM->pSession);
385 LogCom(("vmmR0InitVM: after %p dereg\n", RTLogDefaultInstance()));
386
387 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
388 LogCom(("vmmR0InitVM: returned successfully from direct logger call.\n"));
389 pR0Logger->Logger.pfnFlush(&pR0Logger->Logger);
390 LogCom(("vmmR0InitVM: returned successfully from direct flush call.\n"));
391
392 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pGVM->pSession);
393 LogCom(("vmmR0InitVM: after %p reg2\n", RTLogDefaultInstance()));
394 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
395 LogCom(("vmmR0InitVM: returned successfully from direct logger call (2). offScratch=%d\n", pR0Logger->Logger.offScratch));
396 RTLogSetDefaultInstanceThread(NULL, pGVM->pSession);
397 LogCom(("vmmR0InitVM: after %p dereg2\n", RTLogDefaultInstance()));
398
399 RTLogLoggerEx(&pR0Logger->Logger, 0, ~0U, "hello ring-0 logger (RTLogLoggerEx)\n");
400 LogCom(("vmmR0InitVM: RTLogLoggerEx returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
401
402 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pGVM->pSession);
403 RTLogPrintf("hello ring-0 logger (RTLogPrintf)\n");
404 LogCom(("vmmR0InitVM: RTLogPrintf returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
405# endif
406# ifdef VBOX_WITH_R0_LOGGING
407 Log(("Switching to per-thread logging instance %p (key=%p)\n", pVCpu->vmmr0.s.u.s.Logger.pLogger, pGVM->pSession));
408 RTLogSetDefaultInstanceThread(pVCpu->vmmr0.s.u.s.Logger.pLogger, (uintptr_t)pGVM->pSession);
409 pVCpu->vmmr0.s.u.s.Logger.fRegistered = true;
410# endif
411 }
412#endif /* LOG_ENABLED */
413
414 /*
415 * Check if the host supports high resolution timers or not.
416 */
417 if ( pGVM->vmm.s.fUsePeriodicPreemptionTimers
418 && !RTTimerCanDoHighResolution())
419 pGVM->vmm.s.fUsePeriodicPreemptionTimers = false;
420
421 /*
422 * Initialize the per VM data for GVMM and GMM.
423 */
424 rc = GVMMR0InitVM(pGVM);
425 if (RT_SUCCESS(rc))
426 {
427 /*
428 * Init HM, CPUM and PGM (Darwin only).
429 */
430 rc = HMR0InitVM(pGVM);
431 if (RT_SUCCESS(rc))
432 {
433 rc = CPUMR0InitVM(pGVM);
434 if (RT_SUCCESS(rc))
435 {
436 rc = PGMR0InitVM(pGVM);
437 if (RT_SUCCESS(rc))
438 {
439 rc = EMR0InitVM(pGVM);
440 if (RT_SUCCESS(rc))
441 {
442#ifdef VBOX_WITH_PCI_PASSTHROUGH
443 rc = PciRawR0InitVM(pGVM);
444#endif
445 if (RT_SUCCESS(rc))
446 {
447 rc = GIMR0InitVM(pGVM);
448 if (RT_SUCCESS(rc))
449 {
450 GVMMR0DoneInitVM(pGVM);
451
452 /*
453 * Collect a bit of info for the VM release log.
454 */
455 pGVM->vmm.s.fIsPreemptPendingApiTrusty = RTThreadPreemptIsPendingTrusty();
456 pGVM->vmm.s.fIsPreemptPossible = RTThreadPreemptIsPossible();;
457 return rc;
458
459 /* bail out*/
460 //GIMR0TermVM(pGVM);
461 }
462#ifdef VBOX_WITH_PCI_PASSTHROUGH
463 PciRawR0TermVM(pGVM);
464#endif
465 }
466 }
467 }
468 }
469 HMR0TermVM(pGVM);
470 }
471 }
472
473 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pGVM->pSession);
474 return rc;
475}
476
477
478/**
479 * Does EMT specific VM initialization.
480 *
481 * @returns VBox status code.
482 * @param pGVM The ring-0 VM structure.
483 * @param idCpu The EMT that's calling.
484 */
485static int vmmR0InitVMEmt(PGVM pGVM, VMCPUID idCpu)
486{
487 /* Paranoia (caller checked these already). */
488 AssertReturn(idCpu < pGVM->cCpus, VERR_INVALID_CPU_ID);
489 AssertReturn(pGVM->aCpus[idCpu].hEMT == RTThreadNativeSelf(), VERR_INVALID_CPU_ID);
490
491#if defined(LOG_ENABLED) && defined(VBOX_WITH_R0_LOGGING)
492 /*
493 * Registration of ring 0 loggers.
494 */
495 PVMCPUCC pVCpu = &pGVM->aCpus[idCpu];
496 if ( pVCpu->vmmr0.s.u.s.Logger.pLogger
497 && !pVCpu->vmmr0.s.u.s.Logger.fRegistered)
498 {
499 RTLogSetDefaultInstanceThread(pVCpu->vmmr0.s.u.s.Logger.pLogger, (uintptr_t)pGVM->pSession);
500 pVCpu->vmmr0.s.u.s.Logger.fRegistered = true;
501 }
502#endif
503
504 return VINF_SUCCESS;
505}
506
507
508
509/**
510 * Terminates the R0 bits for a particular VM instance.
511 *
512 * This is normally called by ring-3 as part of the VM termination process, but
513 * may alternatively be called during the support driver session cleanup when
514 * the VM object is destroyed (see GVMM).
515 *
516 * @returns VBox status code.
517 *
518 * @param pGVM The global (ring-0) VM structure.
519 * @param idCpu Set to 0 if EMT(0) or NIL_VMCPUID if session cleanup
520 * thread.
521 * @thread EMT(0) or session clean up thread.
522 */
523VMMR0_INT_DECL(int) VMMR0TermVM(PGVM pGVM, VMCPUID idCpu)
524{
525 /*
526 * Check EMT(0) claim if we're called from userland.
527 */
528 if (idCpu != NIL_VMCPUID)
529 {
530 AssertReturn(idCpu == 0, VERR_INVALID_CPU_ID);
531 int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu);
532 if (RT_FAILURE(rc))
533 return rc;
534 }
535
536#ifdef VBOX_WITH_PCI_PASSTHROUGH
537 PciRawR0TermVM(pGVM);
538#endif
539
540 /*
541 * Tell GVMM what we're up to and check that we only do this once.
542 */
543 if (GVMMR0DoingTermVM(pGVM))
544 {
545 GIMR0TermVM(pGVM);
546
547 /** @todo I wish to call PGMR0PhysFlushHandyPages(pGVM, &pGVM->aCpus[idCpu])
548 * here to make sure we don't leak any shared pages if we crash... */
549 HMR0TermVM(pGVM);
550 }
551
552 /*
553 * Deregister the logger for this EMT.
554 */
555 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pGVM->pSession);
556
557 /*
558 * Start log flusher thread termination.
559 */
560 ASMAtomicWriteBool(&pGVM->vmmr0.s.LogFlusher.fThreadShutdown, true);
561 if (pGVM->vmmr0.s.LogFlusher.hEvent != NIL_RTSEMEVENT)
562 RTSemEventSignal(pGVM->vmmr0.s.LogFlusher.hEvent);
563
564 return VINF_SUCCESS;
565}
566
567
568/**
569 * This is called at the end of gvmmR0CleanupVM().
570 *
571 * @param pGVM The global (ring-0) VM structure.
572 */
573VMMR0_INT_DECL(void) VMMR0CleanupVM(PGVM pGVM)
574{
575 AssertCompile(NIL_RTTHREADCTXHOOK == (RTTHREADCTXHOOK)0); /* Depends on zero initialized memory working for NIL at the moment. */
576 for (VMCPUID idCpu = 0; idCpu < pGVM->cCpus; idCpu++)
577 {
578 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
579
580 /** @todo Can we busy wait here for all thread-context hooks to be
581 * deregistered before releasing (destroying) it? Only until we find a
582 * solution for not deregistering hooks everytime we're leaving HMR0
583 * context. */
584 VMMR0ThreadCtxHookDestroyForEmt(pGVCpu);
585 }
586
587 vmmR0CleanupLoggers(pGVM);
588}
589
590
591/**
592 * An interrupt or unhalt force flag is set, deal with it.
593 *
594 * @returns VINF_SUCCESS (or VINF_EM_HALT).
595 * @param pVCpu The cross context virtual CPU structure.
596 * @param uMWait Result from EMMonitorWaitIsActive().
597 * @param enmInterruptibility Guest CPU interruptbility level.
598 */
599static int vmmR0DoHaltInterrupt(PVMCPUCC pVCpu, unsigned uMWait, CPUMINTERRUPTIBILITY enmInterruptibility)
600{
601 Assert(!TRPMHasTrap(pVCpu));
602 Assert( enmInterruptibility > CPUMINTERRUPTIBILITY_INVALID
603 && enmInterruptibility < CPUMINTERRUPTIBILITY_END);
604
605 /*
606 * Pending interrupts w/o any SMIs or NMIs? That the usual case.
607 */
608 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
609 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_INTERRUPT_NMI))
610 {
611 if (enmInterruptibility <= CPUMINTERRUPTIBILITY_UNRESTRAINED)
612 {
613 uint8_t u8Interrupt = 0;
614 int rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
615 Log(("vmmR0DoHaltInterrupt: CPU%d u8Interrupt=%d (%#x) rc=%Rrc\n", pVCpu->idCpu, u8Interrupt, u8Interrupt, rc));
616 if (RT_SUCCESS(rc))
617 {
618 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
619
620 rc = TRPMAssertTrap(pVCpu, u8Interrupt, TRPM_HARDWARE_INT);
621 AssertRCSuccess(rc);
622 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltExec);
623 return rc;
624 }
625 }
626 }
627 /*
628 * SMI is not implemented yet, at least not here.
629 */
630 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_SMI))
631 {
632 Log12(("vmmR0DoHaltInterrupt: CPU%d failed #3\n", pVCpu->idCpu));
633 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltToR3);
634 return VINF_EM_HALT;
635 }
636 /*
637 * NMI.
638 */
639 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
640 {
641 if (enmInterruptibility < CPUMINTERRUPTIBILITY_NMI_INHIBIT)
642 {
643 /** @todo later. */
644 Log12(("vmmR0DoHaltInterrupt: CPU%d failed #2 (uMWait=%u enmInt=%d)\n", pVCpu->idCpu, uMWait, enmInterruptibility));
645 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltToR3);
646 return VINF_EM_HALT;
647 }
648 }
649 /*
650 * Nested-guest virtual interrupt.
651 */
652 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST))
653 {
654 if (enmInterruptibility < CPUMINTERRUPTIBILITY_VIRT_INT_DISABLED)
655 {
656 /** @todo NSTVMX: NSTSVM: Remember, we might have to check and perform VM-exits
657 * here before injecting the virtual interrupt. See emR3ForcedActions
658 * for details. */
659 Log12(("vmmR0DoHaltInterrupt: CPU%d failed #1 (uMWait=%u enmInt=%d)\n", pVCpu->idCpu, uMWait, enmInterruptibility));
660 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltToR3);
661 return VINF_EM_HALT;
662 }
663 }
664
665 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UNHALT))
666 {
667 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltExec);
668 Log11(("vmmR0DoHaltInterrupt: CPU%d success VINF_SUCCESS (UNHALT)\n", pVCpu->idCpu));
669 return VINF_SUCCESS;
670 }
671 if (uMWait > 1)
672 {
673 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltExec);
674 Log11(("vmmR0DoHaltInterrupt: CPU%d success VINF_SUCCESS (uMWait=%u > 1)\n", pVCpu->idCpu, uMWait));
675 return VINF_SUCCESS;
676 }
677
678 Log12(("vmmR0DoHaltInterrupt: CPU%d failed #0 (uMWait=%u enmInt=%d)\n", pVCpu->idCpu, uMWait, enmInterruptibility));
679 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltToR3);
680 return VINF_EM_HALT;
681}
682
683
684/**
685 * This does one round of vmR3HaltGlobal1Halt().
686 *
687 * The rational here is that we'll reduce latency in interrupt situations if we
688 * don't go to ring-3 immediately on a VINF_EM_HALT (guest executed HLT or
689 * MWAIT), but do one round of blocking here instead and hope the interrupt is
690 * raised in the meanwhile.
691 *
692 * If we go to ring-3 we'll quit the inner HM/NEM loop in EM and end up in the
693 * outer loop, which will then call VMR3WaitHalted() and that in turn will do a
694 * ring-0 call (unless we're too close to a timer event). When the interrupt
695 * wakes us up, we'll return from ring-0 and EM will by instinct do a
696 * rescheduling (because of raw-mode) before it resumes the HM/NEM loop and gets
697 * back to VMMR0EntryFast().
698 *
699 * @returns VINF_SUCCESS or VINF_EM_HALT.
700 * @param pGVM The ring-0 VM structure.
701 * @param pGVCpu The ring-0 virtual CPU structure.
702 *
703 * @todo r=bird: All the blocking/waiting and EMT managment should move out of
704 * the VM module, probably to VMM. Then this would be more weird wrt
705 * parameters and statistics.
706 */
707static int vmmR0DoHalt(PGVM pGVM, PGVMCPU pGVCpu)
708{
709 /*
710 * Do spin stat historization.
711 */
712 if (++pGVCpu->vmm.s.cR0Halts & 0xff)
713 { /* likely */ }
714 else if (pGVCpu->vmm.s.cR0HaltsSucceeded > pGVCpu->vmm.s.cR0HaltsToRing3)
715 {
716 pGVCpu->vmm.s.cR0HaltsSucceeded = 2;
717 pGVCpu->vmm.s.cR0HaltsToRing3 = 0;
718 }
719 else
720 {
721 pGVCpu->vmm.s.cR0HaltsSucceeded = 0;
722 pGVCpu->vmm.s.cR0HaltsToRing3 = 2;
723 }
724
725 /*
726 * Flags that makes us go to ring-3.
727 */
728 uint32_t const fVmFFs = VM_FF_TM_VIRTUAL_SYNC | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA
729 | VM_FF_DBGF | VM_FF_REQUEST | VM_FF_CHECK_VM_STATE
730 | VM_FF_RESET | VM_FF_EMT_RENDEZVOUS | VM_FF_PGM_NEED_HANDY_PAGES
731 | VM_FF_PGM_NO_MEMORY | VM_FF_DEBUG_SUSPEND;
732 uint64_t const fCpuFFs = VMCPU_FF_TIMER | VMCPU_FF_PDM_CRITSECT | VMCPU_FF_IEM
733 | VMCPU_FF_REQUEST | VMCPU_FF_DBGF | VMCPU_FF_HM_UPDATE_CR3
734 | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
735 | VMCPU_FF_TO_R3 | VMCPU_FF_IOM;
736
737 /*
738 * Check preconditions.
739 */
740 unsigned const uMWait = EMMonitorWaitIsActive(pGVCpu);
741 CPUMINTERRUPTIBILITY const enmInterruptibility = CPUMGetGuestInterruptibility(pGVCpu);
742 if ( pGVCpu->vmm.s.fMayHaltInRing0
743 && !TRPMHasTrap(pGVCpu)
744 && ( enmInterruptibility == CPUMINTERRUPTIBILITY_UNRESTRAINED
745 || uMWait > 1))
746 {
747 if ( !VM_FF_IS_ANY_SET(pGVM, fVmFFs)
748 && !VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs))
749 {
750 /*
751 * Interrupts pending already?
752 */
753 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC))
754 APICUpdatePendingInterrupts(pGVCpu);
755
756 /*
757 * Flags that wake up from the halted state.
758 */
759 uint64_t const fIntMask = VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_INTERRUPT_NESTED_GUEST
760 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT;
761
762 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask))
763 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility);
764 ASMNopPause();
765
766 /*
767 * Check out how long till the next timer event.
768 */
769 uint64_t u64Delta;
770 uint64_t u64GipTime = TMTimerPollGIP(pGVM, pGVCpu, &u64Delta);
771
772 if ( !VM_FF_IS_ANY_SET(pGVM, fVmFFs)
773 && !VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs))
774 {
775 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC))
776 APICUpdatePendingInterrupts(pGVCpu);
777
778 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask))
779 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility);
780
781 /*
782 * Wait if there is enough time to the next timer event.
783 */
784 if (u64Delta >= pGVCpu->vmm.s.cNsSpinBlockThreshold)
785 {
786 /* If there are few other CPU cores around, we will procrastinate a
787 little before going to sleep, hoping for some device raising an
788 interrupt or similar. Though, the best thing here would be to
789 dynamically adjust the spin count according to its usfulness or
790 something... */
791 if ( pGVCpu->vmm.s.cR0HaltsSucceeded > pGVCpu->vmm.s.cR0HaltsToRing3
792 && RTMpGetOnlineCount() >= 4)
793 {
794 /** @todo Figure out how we can skip this if it hasn't help recently...
795 * @bugref{9172#c12} */
796 uint32_t cSpinLoops = 42;
797 while (cSpinLoops-- > 0)
798 {
799 ASMNopPause();
800 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC))
801 APICUpdatePendingInterrupts(pGVCpu);
802 ASMNopPause();
803 if (VM_FF_IS_ANY_SET(pGVM, fVmFFs))
804 {
805 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3FromSpin);
806 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3);
807 return VINF_EM_HALT;
808 }
809 ASMNopPause();
810 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs))
811 {
812 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3FromSpin);
813 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3);
814 return VINF_EM_HALT;
815 }
816 ASMNopPause();
817 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask))
818 {
819 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltExecFromSpin);
820 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility);
821 }
822 ASMNopPause();
823 }
824 }
825
826 /*
827 * We have to set the state to VMCPUSTATE_STARTED_HALTED here so ring-3
828 * knows when to notify us (cannot access VMINTUSERPERVMCPU::fWait from here).
829 * After changing the state we must recheck the force flags of course.
830 */
831 if (VMCPU_CMPXCHG_STATE(pGVCpu, VMCPUSTATE_STARTED_HALTED, VMCPUSTATE_STARTED))
832 {
833 if ( !VM_FF_IS_ANY_SET(pGVM, fVmFFs)
834 && !VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs))
835 {
836 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC))
837 APICUpdatePendingInterrupts(pGVCpu);
838
839 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask))
840 {
841 VMCPU_CMPXCHG_STATE(pGVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_HALTED);
842 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility);
843 }
844
845 /* Okay, block! */
846 uint64_t const u64StartSchedHalt = RTTimeNanoTS();
847 int rc = GVMMR0SchedHalt(pGVM, pGVCpu, u64GipTime);
848 uint64_t const u64EndSchedHalt = RTTimeNanoTS();
849 uint64_t const cNsElapsedSchedHalt = u64EndSchedHalt - u64StartSchedHalt;
850 Log10(("vmmR0DoHalt: CPU%d: halted %llu ns\n", pGVCpu->idCpu, cNsElapsedSchedHalt));
851
852 VMCPU_CMPXCHG_STATE(pGVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_HALTED);
853 STAM_REL_PROFILE_ADD_PERIOD(&pGVCpu->vmm.s.StatR0HaltBlock, cNsElapsedSchedHalt);
854 if ( rc == VINF_SUCCESS
855 || rc == VERR_INTERRUPTED)
856 {
857 /* Keep some stats like ring-3 does. */
858 int64_t const cNsOverslept = u64EndSchedHalt - u64GipTime;
859 if (cNsOverslept > 50000)
860 STAM_REL_PROFILE_ADD_PERIOD(&pGVCpu->vmm.s.StatR0HaltBlockOverslept, cNsOverslept);
861 else if (cNsOverslept < -50000)
862 STAM_REL_PROFILE_ADD_PERIOD(&pGVCpu->vmm.s.StatR0HaltBlockInsomnia, cNsElapsedSchedHalt);
863 else
864 STAM_REL_PROFILE_ADD_PERIOD(&pGVCpu->vmm.s.StatR0HaltBlockOnTime, cNsElapsedSchedHalt);
865
866 /*
867 * Recheck whether we can resume execution or have to go to ring-3.
868 */
869 if ( !VM_FF_IS_ANY_SET(pGVM, fVmFFs)
870 && !VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs))
871 {
872 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC))
873 APICUpdatePendingInterrupts(pGVCpu);
874 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask))
875 {
876 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltExecFromBlock);
877 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility);
878 }
879 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3PostNoInt);
880 Log12(("vmmR0DoHalt: CPU%d post #2 - No pending interrupt\n", pGVCpu->idCpu));
881 }
882 else
883 {
884 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3PostPendingFF);
885 Log12(("vmmR0DoHalt: CPU%d post #1 - Pending FF\n", pGVCpu->idCpu));
886 }
887 }
888 else
889 {
890 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3Other);
891 Log12(("vmmR0DoHalt: CPU%d GVMMR0SchedHalt failed: %Rrc\n", pGVCpu->idCpu, rc));
892 }
893 }
894 else
895 {
896 VMCPU_CMPXCHG_STATE(pGVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_HALTED);
897 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3PendingFF);
898 Log12(("vmmR0DoHalt: CPU%d failed #5 - Pending FF\n", pGVCpu->idCpu));
899 }
900 }
901 else
902 {
903 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3Other);
904 Log12(("vmmR0DoHalt: CPU%d failed #4 - enmState=%d\n", pGVCpu->idCpu, VMCPU_GET_STATE(pGVCpu)));
905 }
906 }
907 else
908 {
909 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3SmallDelta);
910 Log12(("vmmR0DoHalt: CPU%d failed #3 - delta too small: %RU64\n", pGVCpu->idCpu, u64Delta));
911 }
912 }
913 else
914 {
915 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3PendingFF);
916 Log12(("vmmR0DoHalt: CPU%d failed #2 - Pending FF\n", pGVCpu->idCpu));
917 }
918 }
919 else
920 {
921 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3PendingFF);
922 Log12(("vmmR0DoHalt: CPU%d failed #1 - Pending FF\n", pGVCpu->idCpu));
923 }
924 }
925 else
926 {
927 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3Other);
928 Log12(("vmmR0DoHalt: CPU%d failed #0 - fMayHaltInRing0=%d TRPMHasTrap=%d enmInt=%d uMWait=%u\n",
929 pGVCpu->idCpu, pGVCpu->vmm.s.fMayHaltInRing0, TRPMHasTrap(pGVCpu), enmInterruptibility, uMWait));
930 }
931
932 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3);
933 return VINF_EM_HALT;
934}
935
936
937/**
938 * VMM ring-0 thread-context callback.
939 *
940 * This does common HM state updating and calls the HM-specific thread-context
941 * callback.
942 *
943 * This is used together with RTThreadCtxHookCreate() on platforms which
944 * supports it, and directly from VMMR0EmtPrepareForBlocking() and
945 * VMMR0EmtResumeAfterBlocking() on platforms which don't.
946 *
947 * @param enmEvent The thread-context event.
948 * @param pvUser Opaque pointer to the VMCPU.
949 *
950 * @thread EMT(pvUser)
951 */
952static DECLCALLBACK(void) vmmR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, void *pvUser)
953{
954 PVMCPUCC pVCpu = (PVMCPUCC)pvUser;
955
956 switch (enmEvent)
957 {
958 case RTTHREADCTXEVENT_IN:
959 {
960 /*
961 * Linux may call us with preemption enabled (really!) but technically we
962 * cannot get preempted here, otherwise we end up in an infinite recursion
963 * scenario (i.e. preempted in resume hook -> preempt hook -> resume hook...
964 * ad infinitum). Let's just disable preemption for now...
965 */
966 /** @todo r=bird: I don't believe the above. The linux code is clearly enabling
967 * preemption after doing the callout (one or two functions up the
968 * call chain). */
969 /** @todo r=ramshankar: See @bugref{5313#c30}. */
970 RTTHREADPREEMPTSTATE ParanoidPreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
971 RTThreadPreemptDisable(&ParanoidPreemptState);
972
973 /* We need to update the VCPU <-> host CPU mapping. */
974 RTCPUID idHostCpu;
975 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
976 pVCpu->iHostCpuSet = iHostCpuSet;
977 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
978
979 /* In the very unlikely event that the GIP delta for the CPU we're
980 rescheduled needs calculating, try force a return to ring-3.
981 We unfortunately cannot do the measurements right here. */
982 if (RT_LIKELY(!SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
983 { /* likely */ }
984 else
985 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
986
987 /* Invoke the HM-specific thread-context callback. */
988 HMR0ThreadCtxCallback(enmEvent, pvUser);
989
990 /* Restore preemption. */
991 RTThreadPreemptRestore(&ParanoidPreemptState);
992 break;
993 }
994
995 case RTTHREADCTXEVENT_OUT:
996 {
997 /* Invoke the HM-specific thread-context callback. */
998 HMR0ThreadCtxCallback(enmEvent, pvUser);
999
1000 /*
1001 * Sigh. See VMMGetCpu() used by VMCPU_ASSERT_EMT(). We cannot let several VCPUs
1002 * have the same host CPU associated with it.
1003 */
1004 pVCpu->iHostCpuSet = UINT32_MAX;
1005 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1006 break;
1007 }
1008
1009 default:
1010 /* Invoke the HM-specific thread-context callback. */
1011 HMR0ThreadCtxCallback(enmEvent, pvUser);
1012 break;
1013 }
1014}
1015
1016
1017/**
1018 * Creates thread switching hook for the current EMT thread.
1019 *
1020 * This is called by GVMMR0CreateVM and GVMMR0RegisterVCpu. If the host
1021 * platform does not implement switcher hooks, no hooks will be create and the
1022 * member set to NIL_RTTHREADCTXHOOK.
1023 *
1024 * @returns VBox status code.
1025 * @param pVCpu The cross context virtual CPU structure.
1026 * @thread EMT(pVCpu)
1027 */
1028VMMR0_INT_DECL(int) VMMR0ThreadCtxHookCreateForEmt(PVMCPUCC pVCpu)
1029{
1030 VMCPU_ASSERT_EMT(pVCpu);
1031 Assert(pVCpu->vmmr0.s.hCtxHook == NIL_RTTHREADCTXHOOK);
1032
1033#if 1 /* To disable this stuff change to zero. */
1034 int rc = RTThreadCtxHookCreate(&pVCpu->vmmr0.s.hCtxHook, 0, vmmR0ThreadCtxCallback, pVCpu);
1035 if (RT_SUCCESS(rc))
1036 {
1037 pVCpu->pGVM->vmm.s.fIsUsingContextHooks = true;
1038 return rc;
1039 }
1040#else
1041 RT_NOREF(vmmR0ThreadCtxCallback);
1042 int rc = VERR_NOT_SUPPORTED;
1043#endif
1044
1045 pVCpu->vmmr0.s.hCtxHook = NIL_RTTHREADCTXHOOK;
1046 pVCpu->pGVM->vmm.s.fIsUsingContextHooks = false;
1047 if (rc == VERR_NOT_SUPPORTED)
1048 return VINF_SUCCESS;
1049
1050 LogRelMax(32, ("RTThreadCtxHookCreate failed! rc=%Rrc pVCpu=%p idCpu=%RU32\n", rc, pVCpu, pVCpu->idCpu));
1051 return VINF_SUCCESS; /* Just ignore it, we can live without context hooks. */
1052}
1053
1054
1055/**
1056 * Destroys the thread switching hook for the specified VCPU.
1057 *
1058 * @param pVCpu The cross context virtual CPU structure.
1059 * @remarks Can be called from any thread.
1060 */
1061VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDestroyForEmt(PVMCPUCC pVCpu)
1062{
1063 int rc = RTThreadCtxHookDestroy(pVCpu->vmmr0.s.hCtxHook);
1064 AssertRC(rc);
1065 pVCpu->vmmr0.s.hCtxHook = NIL_RTTHREADCTXHOOK;
1066}
1067
1068
1069/**
1070 * Disables the thread switching hook for this VCPU (if we got one).
1071 *
1072 * @param pVCpu The cross context virtual CPU structure.
1073 * @thread EMT(pVCpu)
1074 *
1075 * @remarks This also clears GVMCPU::idHostCpu, so the mapping is invalid after
1076 * this call. This means you have to be careful with what you do!
1077 */
1078VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDisable(PVMCPUCC pVCpu)
1079{
1080 /*
1081 * Clear the VCPU <-> host CPU mapping as we've left HM context.
1082 * @bugref{7726#c19} explains the need for this trick:
1083 *
1084 * VMXR0CallRing3Callback/SVMR0CallRing3Callback &
1085 * hmR0VmxLeaveSession/hmR0SvmLeaveSession disables context hooks during
1086 * longjmp & normal return to ring-3, which opens a window where we may be
1087 * rescheduled without changing GVMCPUID::idHostCpu and cause confusion if
1088 * the CPU starts executing a different EMT. Both functions first disables
1089 * preemption and then calls HMR0LeaveCpu which invalids idHostCpu, leaving
1090 * an opening for getting preempted.
1091 */
1092 /** @todo Make HM not need this API! Then we could leave the hooks enabled
1093 * all the time. */
1094
1095 /*
1096 * Disable the context hook, if we got one.
1097 */
1098 if (pVCpu->vmmr0.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1099 {
1100 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1101 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1102 int rc = RTThreadCtxHookDisable(pVCpu->vmmr0.s.hCtxHook);
1103 AssertRC(rc);
1104 }
1105}
1106
1107
1108/**
1109 * Internal version of VMMR0ThreadCtxHooksAreRegistered.
1110 *
1111 * @returns true if registered, false otherwise.
1112 * @param pVCpu The cross context virtual CPU structure.
1113 */
1114DECLINLINE(bool) vmmR0ThreadCtxHookIsEnabled(PVMCPUCC pVCpu)
1115{
1116 return RTThreadCtxHookIsEnabled(pVCpu->vmmr0.s.hCtxHook);
1117}
1118
1119
1120/**
1121 * Whether thread-context hooks are registered for this VCPU.
1122 *
1123 * @returns true if registered, false otherwise.
1124 * @param pVCpu The cross context virtual CPU structure.
1125 */
1126VMMR0_INT_DECL(bool) VMMR0ThreadCtxHookIsEnabled(PVMCPUCC pVCpu)
1127{
1128 return vmmR0ThreadCtxHookIsEnabled(pVCpu);
1129}
1130
1131
1132/**
1133 * Returns the ring-0 release logger instance.
1134 *
1135 * @returns Pointer to release logger, NULL if not configured.
1136 * @param pVCpu The cross context virtual CPU structure of the caller.
1137 * @thread EMT(pVCpu)
1138 */
1139VMMR0_INT_DECL(PRTLOGGER) VMMR0GetReleaseLogger(PVMCPUCC pVCpu)
1140{
1141 return pVCpu->vmmr0.s.u.s.RelLogger.pLogger;
1142}
1143
1144
1145#ifdef VBOX_WITH_STATISTICS
1146/**
1147 * Record return code statistics
1148 * @param pVM The cross context VM structure.
1149 * @param pVCpu The cross context virtual CPU structure.
1150 * @param rc The status code.
1151 */
1152static void vmmR0RecordRC(PVMCC pVM, PVMCPUCC pVCpu, int rc)
1153{
1154 /*
1155 * Collect statistics.
1156 */
1157 switch (rc)
1158 {
1159 case VINF_SUCCESS:
1160 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetNormal);
1161 break;
1162 case VINF_EM_RAW_INTERRUPT:
1163 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterrupt);
1164 break;
1165 case VINF_EM_RAW_INTERRUPT_HYPER:
1166 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptHyper);
1167 break;
1168 case VINF_EM_RAW_GUEST_TRAP:
1169 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGuestTrap);
1170 break;
1171 case VINF_EM_RAW_RING_SWITCH:
1172 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitch);
1173 break;
1174 case VINF_EM_RAW_RING_SWITCH_INT:
1175 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitchInt);
1176 break;
1177 case VINF_EM_RAW_STALE_SELECTOR:
1178 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetStaleSelector);
1179 break;
1180 case VINF_EM_RAW_IRET_TRAP:
1181 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIRETTrap);
1182 break;
1183 case VINF_IOM_R3_IOPORT_READ:
1184 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIORead);
1185 break;
1186 case VINF_IOM_R3_IOPORT_WRITE:
1187 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOWrite);
1188 break;
1189 case VINF_IOM_R3_IOPORT_COMMIT_WRITE:
1190 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOCommitWrite);
1191 break;
1192 case VINF_IOM_R3_MMIO_READ:
1193 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIORead);
1194 break;
1195 case VINF_IOM_R3_MMIO_WRITE:
1196 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOWrite);
1197 break;
1198 case VINF_IOM_R3_MMIO_COMMIT_WRITE:
1199 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOCommitWrite);
1200 break;
1201 case VINF_IOM_R3_MMIO_READ_WRITE:
1202 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOReadWrite);
1203 break;
1204 case VINF_PATM_HC_MMIO_PATCH_READ:
1205 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchRead);
1206 break;
1207 case VINF_PATM_HC_MMIO_PATCH_WRITE:
1208 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchWrite);
1209 break;
1210 case VINF_CPUM_R3_MSR_READ:
1211 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMSRRead);
1212 break;
1213 case VINF_CPUM_R3_MSR_WRITE:
1214 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMSRWrite);
1215 break;
1216 case VINF_EM_RAW_EMULATE_INSTR:
1217 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetEmulate);
1218 break;
1219 case VINF_PATCH_EMULATE_INSTR:
1220 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchEmulate);
1221 break;
1222 case VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT:
1223 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetLDTFault);
1224 break;
1225 case VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT:
1226 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGDTFault);
1227 break;
1228 case VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT:
1229 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIDTFault);
1230 break;
1231 case VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT:
1232 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTSSFault);
1233 break;
1234 case VINF_CSAM_PENDING_ACTION:
1235 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCSAMTask);
1236 break;
1237 case VINF_PGM_SYNC_CR3:
1238 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetSyncCR3);
1239 break;
1240 case VINF_PATM_PATCH_INT3:
1241 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchInt3);
1242 break;
1243 case VINF_PATM_PATCH_TRAP_PF:
1244 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchPF);
1245 break;
1246 case VINF_PATM_PATCH_TRAP_GP:
1247 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchGP);
1248 break;
1249 case VINF_PATM_PENDING_IRQ_AFTER_IRET:
1250 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchIretIRQ);
1251 break;
1252 case VINF_EM_RESCHEDULE_REM:
1253 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRescheduleREM);
1254 break;
1255 case VINF_EM_RAW_TO_R3:
1256 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Total);
1257 if (VM_FF_IS_SET(pVM, VM_FF_TM_VIRTUAL_SYNC))
1258 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3TMVirt);
1259 else if (VM_FF_IS_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES))
1260 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3HandyPages);
1261 else if (VM_FF_IS_SET(pVM, VM_FF_PDM_QUEUES))
1262 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3PDMQueues);
1263 else if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
1264 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Rendezvous);
1265 else if (VM_FF_IS_SET(pVM, VM_FF_PDM_DMA))
1266 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3DMA);
1267 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TIMER))
1268 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Timer);
1269 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PDM_CRITSECT))
1270 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3CritSect);
1271 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TO_R3))
1272 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3FF);
1273 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
1274 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Iem);
1275 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IOM))
1276 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Iom);
1277 else
1278 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Unknown);
1279 break;
1280
1281 case VINF_EM_RAW_TIMER_PENDING:
1282 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTimerPending);
1283 break;
1284 case VINF_EM_RAW_INTERRUPT_PENDING:
1285 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptPending);
1286 break;
1287 case VINF_VMM_CALL_HOST:
1288 switch (pVCpu->vmm.s.enmCallRing3Operation)
1289 {
1290 case VMMCALLRING3_PGM_ALLOCATE_HANDY_PAGES:
1291 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMAllocHandy);
1292 break;
1293 case VMMCALLRING3_VM_R0_ASSERTION:
1294 default:
1295 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCallRing3);
1296 break;
1297 }
1298 break;
1299 case VINF_PATM_DUPLICATE_FUNCTION:
1300 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPATMDuplicateFn);
1301 break;
1302 case VINF_PGM_CHANGE_MODE:
1303 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMChangeMode);
1304 break;
1305 case VINF_PGM_POOL_FLUSH_PENDING:
1306 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMFlushPending);
1307 break;
1308 case VINF_EM_PENDING_REQUEST:
1309 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPendingRequest);
1310 break;
1311 case VINF_EM_HM_PATCH_TPR_INSTR:
1312 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchTPR);
1313 break;
1314 default:
1315 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMisc);
1316 break;
1317 }
1318}
1319#endif /* VBOX_WITH_STATISTICS */
1320
1321
1322/**
1323 * The Ring 0 entry point, called by the fast-ioctl path.
1324 *
1325 * @param pGVM The global (ring-0) VM structure.
1326 * @param pVMIgnored The cross context VM structure. The return code is
1327 * stored in pVM->vmm.s.iLastGZRc.
1328 * @param idCpu The Virtual CPU ID of the calling EMT.
1329 * @param enmOperation Which operation to execute.
1330 * @remarks Assume called with interrupts _enabled_.
1331 */
1332VMMR0DECL(void) VMMR0EntryFast(PGVM pGVM, PVMCC pVMIgnored, VMCPUID idCpu, VMMR0OPERATION enmOperation)
1333{
1334 RT_NOREF(pVMIgnored);
1335
1336 /*
1337 * Validation.
1338 */
1339 if ( idCpu < pGVM->cCpus
1340 && pGVM->cCpus == pGVM->cCpusUnsafe)
1341 { /*likely*/ }
1342 else
1343 {
1344 SUPR0Printf("VMMR0EntryFast: Bad idCpu=%#x cCpus=%#x cCpusUnsafe=%#x\n", idCpu, pGVM->cCpus, pGVM->cCpusUnsafe);
1345 return;
1346 }
1347
1348 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
1349 RTNATIVETHREAD const hNativeThread = RTThreadNativeSelf();
1350 if (RT_LIKELY( pGVCpu->hEMT == hNativeThread
1351 && pGVCpu->hNativeThreadR0 == hNativeThread))
1352 { /* likely */ }
1353 else
1354 {
1355 SUPR0Printf("VMMR0EntryFast: Bad thread idCpu=%#x hNativeSelf=%p pGVCpu->hEmt=%p pGVCpu->hNativeThreadR0=%p\n",
1356 idCpu, hNativeThread, pGVCpu->hEMT, pGVCpu->hNativeThreadR0);
1357 return;
1358 }
1359
1360 /*
1361 * Perform requested operation.
1362 */
1363 switch (enmOperation)
1364 {
1365 /*
1366 * Run guest code using the available hardware acceleration technology.
1367 */
1368 case VMMR0_DO_HM_RUN:
1369 {
1370 for (;;) /* hlt loop */
1371 {
1372 /*
1373 * Disable ring-3 calls & blocking till we've successfully entered HM.
1374 * Otherwise we sometimes end up blocking at the finall Log4 statement
1375 * in VMXR0Enter, while still in a somewhat inbetween state.
1376 */
1377 VMMRZCallRing3Disable(pGVCpu);
1378
1379 /*
1380 * Disable preemption.
1381 */
1382 Assert(!vmmR0ThreadCtxHookIsEnabled(pGVCpu));
1383 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
1384 RTThreadPreemptDisable(&PreemptState);
1385 pGVCpu->vmmr0.s.pPreemptState = &PreemptState;
1386
1387 /*
1388 * Get the host CPU identifiers, make sure they are valid and that
1389 * we've got a TSC delta for the CPU.
1390 */
1391 RTCPUID idHostCpu;
1392 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
1393 if (RT_LIKELY( iHostCpuSet < RTCPUSET_MAX_CPUS
1394 && SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
1395 {
1396 pGVCpu->iHostCpuSet = iHostCpuSet;
1397 ASMAtomicWriteU32(&pGVCpu->idHostCpu, idHostCpu);
1398
1399 /*
1400 * Update the periodic preemption timer if it's active.
1401 */
1402 if (pGVM->vmm.s.fUsePeriodicPreemptionTimers)
1403 GVMMR0SchedUpdatePeriodicPreemptionTimer(pGVM, pGVCpu->idHostCpu, TMCalcHostTimerFrequency(pGVM, pGVCpu));
1404
1405#ifdef VMM_R0_TOUCH_FPU
1406 /*
1407 * Make sure we've got the FPU state loaded so and we don't need to clear
1408 * CR0.TS and get out of sync with the host kernel when loading the guest
1409 * FPU state. @ref sec_cpum_fpu (CPUM.cpp) and @bugref{4053}.
1410 */
1411 CPUMR0TouchHostFpu();
1412#endif
1413 int rc;
1414 bool fPreemptRestored = false;
1415 if (!HMR0SuspendPending())
1416 {
1417 /*
1418 * Enable the context switching hook.
1419 */
1420 if (pGVCpu->vmmr0.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1421 {
1422 Assert(!RTThreadCtxHookIsEnabled(pGVCpu->vmmr0.s.hCtxHook));
1423 int rc2 = RTThreadCtxHookEnable(pGVCpu->vmmr0.s.hCtxHook); AssertRC(rc2);
1424 }
1425
1426 /*
1427 * Enter HM context.
1428 */
1429 rc = HMR0Enter(pGVCpu);
1430 if (RT_SUCCESS(rc))
1431 {
1432 VMCPU_SET_STATE(pGVCpu, VMCPUSTATE_STARTED_HM);
1433
1434 /*
1435 * When preemption hooks are in place, enable preemption now that
1436 * we're in HM context.
1437 */
1438 if (vmmR0ThreadCtxHookIsEnabled(pGVCpu))
1439 {
1440 fPreemptRestored = true;
1441 pGVCpu->vmmr0.s.pPreemptState = NULL;
1442 RTThreadPreemptRestore(&PreemptState);
1443 }
1444 VMMRZCallRing3Enable(pGVCpu);
1445
1446 /*
1447 * Setup the longjmp machinery and execute guest code (calls HMR0RunGuestCode).
1448 */
1449 rc = vmmR0CallRing3SetJmp(&pGVCpu->vmm.s.CallRing3JmpBufR0, HMR0RunGuestCode, pGVM, pGVCpu);
1450
1451 /*
1452 * Assert sanity on the way out. Using manual assertions code here as normal
1453 * assertions are going to panic the host since we're outside the setjmp/longjmp zone.
1454 */
1455 if (RT_UNLIKELY( VMCPU_GET_STATE(pGVCpu) != VMCPUSTATE_STARTED_HM
1456 && RT_SUCCESS_NP(rc)
1457 && rc != VINF_VMM_CALL_HOST ))
1458 {
1459 pGVM->vmm.s.szRing0AssertMsg1[0] = '\0';
1460 RTStrPrintf(pGVM->vmm.s.szRing0AssertMsg2, sizeof(pGVM->vmm.s.szRing0AssertMsg2),
1461 "Got VMCPU state %d expected %d.\n", VMCPU_GET_STATE(pGVCpu), VMCPUSTATE_STARTED_HM);
1462 rc = VERR_VMM_WRONG_HM_VMCPU_STATE;
1463 }
1464#if 0
1465 /** @todo Get rid of this. HM shouldn't disable the context hook. */
1466 else if (RT_UNLIKELY(vmmR0ThreadCtxHookIsEnabled(pGVCpu)))
1467 {
1468 pGVM->vmm.s.szRing0AssertMsg1[0] = '\0';
1469 RTStrPrintf(pGVM->vmm.s.szRing0AssertMsg2, sizeof(pGVM->vmm.s.szRing0AssertMsg2),
1470 "Thread-context hooks still enabled! VCPU=%p Id=%u rc=%d.\n", pGVCpu, pGVCpu->idCpu, rc);
1471 rc = VERR_VMM_CONTEXT_HOOK_STILL_ENABLED;
1472 }
1473#endif
1474
1475 VMMRZCallRing3Disable(pGVCpu); /* Lazy bird: Simpler just disabling it again... */
1476 VMCPU_SET_STATE(pGVCpu, VMCPUSTATE_STARTED);
1477 }
1478 STAM_COUNTER_INC(&pGVM->vmm.s.StatRunGC);
1479
1480 /*
1481 * Invalidate the host CPU identifiers before we disable the context
1482 * hook / restore preemption.
1483 */
1484 pGVCpu->iHostCpuSet = UINT32_MAX;
1485 ASMAtomicWriteU32(&pGVCpu->idHostCpu, NIL_RTCPUID);
1486
1487 /*
1488 * Disable context hooks. Due to unresolved cleanup issues, we
1489 * cannot leave the hooks enabled when we return to ring-3.
1490 *
1491 * Note! At the moment HM may also have disabled the hook
1492 * when we get here, but the IPRT API handles that.
1493 */
1494 if (pGVCpu->vmmr0.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1495 RTThreadCtxHookDisable(pGVCpu->vmmr0.s.hCtxHook);
1496 }
1497 /*
1498 * The system is about to go into suspend mode; go back to ring 3.
1499 */
1500 else
1501 {
1502 pGVCpu->iHostCpuSet = UINT32_MAX;
1503 ASMAtomicWriteU32(&pGVCpu->idHostCpu, NIL_RTCPUID);
1504 rc = VINF_EM_RAW_INTERRUPT;
1505 }
1506
1507 /** @todo When HM stops messing with the context hook state, we'll disable
1508 * preemption again before the RTThreadCtxHookDisable call. */
1509 if (!fPreemptRestored)
1510 {
1511 pGVCpu->vmmr0.s.pPreemptState = NULL;
1512 RTThreadPreemptRestore(&PreemptState);
1513 }
1514
1515 pGVCpu->vmm.s.iLastGZRc = rc;
1516
1517 /* Fire dtrace probe and collect statistics. */
1518 VBOXVMM_R0_VMM_RETURN_TO_RING3_HM(pGVCpu, CPUMQueryGuestCtxPtr(pGVCpu), rc);
1519#ifdef VBOX_WITH_STATISTICS
1520 vmmR0RecordRC(pGVM, pGVCpu, rc);
1521#endif
1522 VMMRZCallRing3Enable(pGVCpu);
1523
1524 /*
1525 * If this is a halt.
1526 */
1527 if (rc != VINF_EM_HALT)
1528 { /* we're not in a hurry for a HLT, so prefer this path */ }
1529 else
1530 {
1531 pGVCpu->vmm.s.iLastGZRc = rc = vmmR0DoHalt(pGVM, pGVCpu);
1532 if (rc == VINF_SUCCESS)
1533 {
1534 pGVCpu->vmm.s.cR0HaltsSucceeded++;
1535 continue;
1536 }
1537 pGVCpu->vmm.s.cR0HaltsToRing3++;
1538 }
1539 }
1540 /*
1541 * Invalid CPU set index or TSC delta in need of measuring.
1542 */
1543 else
1544 {
1545 pGVCpu->vmmr0.s.pPreemptState = NULL;
1546 pGVCpu->iHostCpuSet = UINT32_MAX;
1547 ASMAtomicWriteU32(&pGVCpu->idHostCpu, NIL_RTCPUID);
1548 RTThreadPreemptRestore(&PreemptState);
1549
1550 VMMRZCallRing3Enable(pGVCpu);
1551
1552 if (iHostCpuSet < RTCPUSET_MAX_CPUS)
1553 {
1554 int rc = SUPR0TscDeltaMeasureBySetIndex(pGVM->pSession, iHostCpuSet, 0 /*fFlags*/,
1555 2 /*cMsWaitRetry*/, 5*RT_MS_1SEC /*cMsWaitThread*/,
1556 0 /*default cTries*/);
1557 if (RT_SUCCESS(rc) || rc == VERR_CPU_OFFLINE)
1558 pGVCpu->vmm.s.iLastGZRc = VINF_EM_RAW_TO_R3;
1559 else
1560 pGVCpu->vmm.s.iLastGZRc = rc;
1561 }
1562 else
1563 pGVCpu->vmm.s.iLastGZRc = VERR_INVALID_CPU_INDEX;
1564 }
1565 break;
1566 } /* halt loop. */
1567 break;
1568 }
1569
1570#ifdef VBOX_WITH_NEM_R0
1571# if defined(RT_ARCH_AMD64) && defined(RT_OS_WINDOWS)
1572 case VMMR0_DO_NEM_RUN:
1573 {
1574 /*
1575 * Setup the longjmp machinery and execute guest code (calls NEMR0RunGuestCode).
1576 */
1577# ifdef VBOXSTRICTRC_STRICT_ENABLED
1578 int rc = vmmR0CallRing3SetJmp2(&pGVCpu->vmm.s.CallRing3JmpBufR0, (PFNVMMR0SETJMP2)NEMR0RunGuestCode, pGVM, idCpu);
1579# else
1580 int rc = vmmR0CallRing3SetJmp2(&pGVCpu->vmm.s.CallRing3JmpBufR0, NEMR0RunGuestCode, pGVM, idCpu);
1581# endif
1582 STAM_COUNTER_INC(&pGVM->vmm.s.StatRunGC);
1583
1584 pGVCpu->vmm.s.iLastGZRc = rc;
1585
1586 /*
1587 * Fire dtrace probe and collect statistics.
1588 */
1589 VBOXVMM_R0_VMM_RETURN_TO_RING3_NEM(pGVCpu, CPUMQueryGuestCtxPtr(pGVCpu), rc);
1590# ifdef VBOX_WITH_STATISTICS
1591 vmmR0RecordRC(pGVM, pGVCpu, rc);
1592# endif
1593 break;
1594 }
1595# endif
1596#endif
1597
1598 /*
1599 * For profiling.
1600 */
1601 case VMMR0_DO_NOP:
1602 pGVCpu->vmm.s.iLastGZRc = VINF_SUCCESS;
1603 break;
1604
1605 /*
1606 * Shouldn't happen.
1607 */
1608 default:
1609 AssertMsgFailed(("%#x\n", enmOperation));
1610 pGVCpu->vmm.s.iLastGZRc = VERR_NOT_SUPPORTED;
1611 break;
1612 }
1613}
1614
1615
1616/**
1617 * Validates a session or VM session argument.
1618 *
1619 * @returns true / false accordingly.
1620 * @param pGVM The global (ring-0) VM structure.
1621 * @param pClaimedSession The session claim to validate.
1622 * @param pSession The session argument.
1623 */
1624DECLINLINE(bool) vmmR0IsValidSession(PGVM pGVM, PSUPDRVSESSION pClaimedSession, PSUPDRVSESSION pSession)
1625{
1626 /* This must be set! */
1627 if (!pSession)
1628 return false;
1629
1630 /* Only one out of the two. */
1631 if (pGVM && pClaimedSession)
1632 return false;
1633 if (pGVM)
1634 pClaimedSession = pGVM->pSession;
1635 return pClaimedSession == pSession;
1636}
1637
1638
1639/**
1640 * VMMR0EntryEx worker function, either called directly or when ever possible
1641 * called thru a longjmp so we can exit safely on failure.
1642 *
1643 * @returns VBox status code.
1644 * @param pGVM The global (ring-0) VM structure.
1645 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
1646 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
1647 * @param enmOperation Which operation to execute.
1648 * @param pReqHdr This points to a SUPVMMR0REQHDR packet. Optional.
1649 * The support driver validates this if it's present.
1650 * @param u64Arg Some simple constant argument.
1651 * @param pSession The session of the caller.
1652 *
1653 * @remarks Assume called with interrupts _enabled_.
1654 */
1655DECL_NO_INLINE(static, int) vmmR0EntryExWorker(PGVM pGVM, VMCPUID idCpu, VMMR0OPERATION enmOperation,
1656 PSUPVMMR0REQHDR pReqHdr, uint64_t u64Arg, PSUPDRVSESSION pSession)
1657{
1658 /*
1659 * Validate pGVM and idCpu for consistency and validity.
1660 */
1661 if (pGVM != NULL)
1662 {
1663 if (RT_LIKELY(((uintptr_t)pGVM & PAGE_OFFSET_MASK) == 0))
1664 { /* likely */ }
1665 else
1666 {
1667 SUPR0Printf("vmmR0EntryExWorker: Invalid pGVM=%p! (op=%d)\n", pGVM, enmOperation);
1668 return VERR_INVALID_POINTER;
1669 }
1670
1671 if (RT_LIKELY(idCpu == NIL_VMCPUID || idCpu < pGVM->cCpus))
1672 { /* likely */ }
1673 else
1674 {
1675 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu %#x (cCpus=%#x)\n", idCpu, pGVM->cCpus);
1676 return VERR_INVALID_PARAMETER;
1677 }
1678
1679 if (RT_LIKELY( pGVM->enmVMState >= VMSTATE_CREATING
1680 && pGVM->enmVMState <= VMSTATE_TERMINATED
1681 && pGVM->pSession == pSession
1682 && pGVM->pSelf == pGVM))
1683 { /* likely */ }
1684 else
1685 {
1686 SUPR0Printf("vmmR0EntryExWorker: Invalid pGVM=%p:{.enmVMState=%d, .cCpus=%#x, .pSession=%p(==%p), .pSelf=%p(==%p)}! (op=%d)\n",
1687 pGVM, pGVM->enmVMState, pGVM->cCpus, pGVM->pSession, pSession, pGVM->pSelf, pGVM, enmOperation);
1688 return VERR_INVALID_POINTER;
1689 }
1690 }
1691 else if (RT_LIKELY(idCpu == NIL_VMCPUID))
1692 { /* likely */ }
1693 else
1694 {
1695 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu=%u\n", idCpu);
1696 return VERR_INVALID_PARAMETER;
1697 }
1698
1699 /*
1700 * Process the request.
1701 */
1702 int rc;
1703 switch (enmOperation)
1704 {
1705 /*
1706 * GVM requests
1707 */
1708 case VMMR0_DO_GVMM_CREATE_VM:
1709 if (pGVM == NULL && u64Arg == 0 && idCpu == NIL_VMCPUID)
1710 rc = GVMMR0CreateVMReq((PGVMMCREATEVMREQ)pReqHdr, pSession);
1711 else
1712 rc = VERR_INVALID_PARAMETER;
1713 break;
1714
1715 case VMMR0_DO_GVMM_DESTROY_VM:
1716 if (pReqHdr == NULL && u64Arg == 0)
1717 rc = GVMMR0DestroyVM(pGVM);
1718 else
1719 rc = VERR_INVALID_PARAMETER;
1720 break;
1721
1722 case VMMR0_DO_GVMM_REGISTER_VMCPU:
1723 if (pGVM != NULL)
1724 rc = GVMMR0RegisterVCpu(pGVM, idCpu);
1725 else
1726 rc = VERR_INVALID_PARAMETER;
1727 break;
1728
1729 case VMMR0_DO_GVMM_DEREGISTER_VMCPU:
1730 if (pGVM != NULL)
1731 rc = GVMMR0DeregisterVCpu(pGVM, idCpu);
1732 else
1733 rc = VERR_INVALID_PARAMETER;
1734 break;
1735
1736 case VMMR0_DO_GVMM_SCHED_HALT:
1737 if (pReqHdr)
1738 return VERR_INVALID_PARAMETER;
1739 rc = GVMMR0SchedHaltReq(pGVM, idCpu, u64Arg);
1740 break;
1741
1742 case VMMR0_DO_GVMM_SCHED_WAKE_UP:
1743 if (pReqHdr || u64Arg)
1744 return VERR_INVALID_PARAMETER;
1745 rc = GVMMR0SchedWakeUp(pGVM, idCpu);
1746 break;
1747
1748 case VMMR0_DO_GVMM_SCHED_POKE:
1749 if (pReqHdr || u64Arg)
1750 return VERR_INVALID_PARAMETER;
1751 rc = GVMMR0SchedPoke(pGVM, idCpu);
1752 break;
1753
1754 case VMMR0_DO_GVMM_SCHED_WAKE_UP_AND_POKE_CPUS:
1755 if (u64Arg)
1756 return VERR_INVALID_PARAMETER;
1757 rc = GVMMR0SchedWakeUpAndPokeCpusReq(pGVM, (PGVMMSCHEDWAKEUPANDPOKECPUSREQ)pReqHdr);
1758 break;
1759
1760 case VMMR0_DO_GVMM_SCHED_POLL:
1761 if (pReqHdr || u64Arg > 1)
1762 return VERR_INVALID_PARAMETER;
1763 rc = GVMMR0SchedPoll(pGVM, idCpu, !!u64Arg);
1764 break;
1765
1766 case VMMR0_DO_GVMM_QUERY_STATISTICS:
1767 if (u64Arg)
1768 return VERR_INVALID_PARAMETER;
1769 rc = GVMMR0QueryStatisticsReq(pGVM, (PGVMMQUERYSTATISTICSSREQ)pReqHdr, pSession);
1770 break;
1771
1772 case VMMR0_DO_GVMM_RESET_STATISTICS:
1773 if (u64Arg)
1774 return VERR_INVALID_PARAMETER;
1775 rc = GVMMR0ResetStatisticsReq(pGVM, (PGVMMRESETSTATISTICSSREQ)pReqHdr, pSession);
1776 break;
1777
1778 /*
1779 * Initialize the R0 part of a VM instance.
1780 */
1781 case VMMR0_DO_VMMR0_INIT:
1782 rc = vmmR0InitVM(pGVM, RT_LODWORD(u64Arg), RT_HIDWORD(u64Arg));
1783 break;
1784
1785 /*
1786 * Does EMT specific ring-0 init.
1787 */
1788 case VMMR0_DO_VMMR0_INIT_EMT:
1789 rc = vmmR0InitVMEmt(pGVM, idCpu);
1790 break;
1791
1792 /*
1793 * Terminate the R0 part of a VM instance.
1794 */
1795 case VMMR0_DO_VMMR0_TERM:
1796 rc = VMMR0TermVM(pGVM, 0 /*idCpu*/);
1797 break;
1798
1799 /*
1800 * Update release or debug logger instances.
1801 */
1802 case VMMR0_DO_VMMR0_UPDATE_LOGGERS:
1803 if (idCpu == NIL_VMCPUID)
1804 return VERR_INVALID_CPU_ID;
1805 if (u64Arg < VMMLOGGER_IDX_MAX && pReqHdr != NULL)
1806 rc = vmmR0UpdateLoggers(pGVM, idCpu /*idCpu*/, (PVMMR0UPDATELOGGERSREQ)pReqHdr, (size_t)u64Arg);
1807 else
1808 return VERR_INVALID_PARAMETER;
1809 break;
1810
1811 /*
1812 * Log flusher thread.
1813 */
1814 case VMMR0_DO_VMMR0_LOG_FLUSHER:
1815 if (idCpu != NIL_VMCPUID)
1816 return VERR_INVALID_CPU_ID;
1817 if (pReqHdr == NULL)
1818 rc = vmmR0LogFlusher(pGVM);
1819 else
1820 return VERR_INVALID_PARAMETER;
1821 break;
1822
1823 /*
1824 * Wait for the flush to finish with all the buffers for the given logger.
1825 */
1826 case VMMR0_DO_VMMR0_LOG_WAIT_FLUSHED:
1827 if (idCpu == NIL_VMCPUID)
1828 return VERR_INVALID_CPU_ID;
1829 if (u64Arg < VMMLOGGER_IDX_MAX && pReqHdr == NULL)
1830 rc = vmmR0LogWaitFlushed(pGVM, idCpu /*idCpu*/, (size_t)u64Arg);
1831 else
1832 return VERR_INVALID_PARAMETER;
1833 break;
1834
1835 /*
1836 * Attempt to enable hm mode and check the current setting.
1837 */
1838 case VMMR0_DO_HM_ENABLE:
1839 rc = HMR0EnableAllCpus(pGVM);
1840 break;
1841
1842 /*
1843 * Setup the hardware accelerated session.
1844 */
1845 case VMMR0_DO_HM_SETUP_VM:
1846 rc = HMR0SetupVM(pGVM);
1847 break;
1848
1849 /*
1850 * PGM wrappers.
1851 */
1852 case VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES:
1853 if (idCpu == NIL_VMCPUID)
1854 return VERR_INVALID_CPU_ID;
1855 rc = PGMR0PhysAllocateHandyPages(pGVM, idCpu);
1856 break;
1857
1858 case VMMR0_DO_PGM_FLUSH_HANDY_PAGES:
1859 if (idCpu == NIL_VMCPUID)
1860 return VERR_INVALID_CPU_ID;
1861 rc = PGMR0PhysFlushHandyPages(pGVM, idCpu);
1862 break;
1863
1864 case VMMR0_DO_PGM_ALLOCATE_LARGE_HANDY_PAGE:
1865 if (idCpu == NIL_VMCPUID)
1866 return VERR_INVALID_CPU_ID;
1867 rc = PGMR0PhysAllocateLargeHandyPage(pGVM, idCpu);
1868 break;
1869
1870 case VMMR0_DO_PGM_PHYS_SETUP_IOMMU:
1871 if (idCpu != 0)
1872 return VERR_INVALID_CPU_ID;
1873 rc = PGMR0PhysSetupIoMmu(pGVM);
1874 break;
1875
1876 case VMMR0_DO_PGM_POOL_GROW:
1877 if (idCpu == NIL_VMCPUID)
1878 return VERR_INVALID_CPU_ID;
1879 rc = PGMR0PoolGrow(pGVM, idCpu);
1880 break;
1881
1882 /*
1883 * GMM wrappers.
1884 */
1885 case VMMR0_DO_GMM_INITIAL_RESERVATION:
1886 if (u64Arg)
1887 return VERR_INVALID_PARAMETER;
1888 rc = GMMR0InitialReservationReq(pGVM, idCpu, (PGMMINITIALRESERVATIONREQ)pReqHdr);
1889 break;
1890
1891 case VMMR0_DO_GMM_UPDATE_RESERVATION:
1892 if (u64Arg)
1893 return VERR_INVALID_PARAMETER;
1894 rc = GMMR0UpdateReservationReq(pGVM, idCpu, (PGMMUPDATERESERVATIONREQ)pReqHdr);
1895 break;
1896
1897 case VMMR0_DO_GMM_ALLOCATE_PAGES:
1898 if (u64Arg)
1899 return VERR_INVALID_PARAMETER;
1900 rc = GMMR0AllocatePagesReq(pGVM, idCpu, (PGMMALLOCATEPAGESREQ)pReqHdr);
1901 break;
1902
1903 case VMMR0_DO_GMM_FREE_PAGES:
1904 if (u64Arg)
1905 return VERR_INVALID_PARAMETER;
1906 rc = GMMR0FreePagesReq(pGVM, idCpu, (PGMMFREEPAGESREQ)pReqHdr);
1907 break;
1908
1909 case VMMR0_DO_GMM_FREE_LARGE_PAGE:
1910 if (u64Arg)
1911 return VERR_INVALID_PARAMETER;
1912 rc = GMMR0FreeLargePageReq(pGVM, idCpu, (PGMMFREELARGEPAGEREQ)pReqHdr);
1913 break;
1914
1915 case VMMR0_DO_GMM_QUERY_HYPERVISOR_MEM_STATS:
1916 if (u64Arg)
1917 return VERR_INVALID_PARAMETER;
1918 rc = GMMR0QueryHypervisorMemoryStatsReq((PGMMMEMSTATSREQ)pReqHdr);
1919 break;
1920
1921 case VMMR0_DO_GMM_QUERY_MEM_STATS:
1922 if (idCpu == NIL_VMCPUID)
1923 return VERR_INVALID_CPU_ID;
1924 if (u64Arg)
1925 return VERR_INVALID_PARAMETER;
1926 rc = GMMR0QueryMemoryStatsReq(pGVM, idCpu, (PGMMMEMSTATSREQ)pReqHdr);
1927 break;
1928
1929 case VMMR0_DO_GMM_BALLOONED_PAGES:
1930 if (u64Arg)
1931 return VERR_INVALID_PARAMETER;
1932 rc = GMMR0BalloonedPagesReq(pGVM, idCpu, (PGMMBALLOONEDPAGESREQ)pReqHdr);
1933 break;
1934
1935 case VMMR0_DO_GMM_MAP_UNMAP_CHUNK:
1936 if (u64Arg)
1937 return VERR_INVALID_PARAMETER;
1938 rc = GMMR0MapUnmapChunkReq(pGVM, (PGMMMAPUNMAPCHUNKREQ)pReqHdr);
1939 break;
1940
1941 case VMMR0_DO_GMM_SEED_CHUNK:
1942 if (pReqHdr)
1943 return VERR_INVALID_PARAMETER;
1944 rc = GMMR0SeedChunk(pGVM, idCpu, (RTR3PTR)u64Arg);
1945 break;
1946
1947 case VMMR0_DO_GMM_REGISTER_SHARED_MODULE:
1948 if (idCpu == NIL_VMCPUID)
1949 return VERR_INVALID_CPU_ID;
1950 if (u64Arg)
1951 return VERR_INVALID_PARAMETER;
1952 rc = GMMR0RegisterSharedModuleReq(pGVM, idCpu, (PGMMREGISTERSHAREDMODULEREQ)pReqHdr);
1953 break;
1954
1955 case VMMR0_DO_GMM_UNREGISTER_SHARED_MODULE:
1956 if (idCpu == NIL_VMCPUID)
1957 return VERR_INVALID_CPU_ID;
1958 if (u64Arg)
1959 return VERR_INVALID_PARAMETER;
1960 rc = GMMR0UnregisterSharedModuleReq(pGVM, idCpu, (PGMMUNREGISTERSHAREDMODULEREQ)pReqHdr);
1961 break;
1962
1963 case VMMR0_DO_GMM_RESET_SHARED_MODULES:
1964 if (idCpu == NIL_VMCPUID)
1965 return VERR_INVALID_CPU_ID;
1966 if ( u64Arg
1967 || pReqHdr)
1968 return VERR_INVALID_PARAMETER;
1969 rc = GMMR0ResetSharedModules(pGVM, idCpu);
1970 break;
1971
1972#ifdef VBOX_WITH_PAGE_SHARING
1973 case VMMR0_DO_GMM_CHECK_SHARED_MODULES:
1974 {
1975 if (idCpu == NIL_VMCPUID)
1976 return VERR_INVALID_CPU_ID;
1977 if ( u64Arg
1978 || pReqHdr)
1979 return VERR_INVALID_PARAMETER;
1980 rc = GMMR0CheckSharedModules(pGVM, idCpu);
1981 break;
1982 }
1983#endif
1984
1985#if defined(VBOX_STRICT) && HC_ARCH_BITS == 64
1986 case VMMR0_DO_GMM_FIND_DUPLICATE_PAGE:
1987 if (u64Arg)
1988 return VERR_INVALID_PARAMETER;
1989 rc = GMMR0FindDuplicatePageReq(pGVM, (PGMMFINDDUPLICATEPAGEREQ)pReqHdr);
1990 break;
1991#endif
1992
1993 case VMMR0_DO_GMM_QUERY_STATISTICS:
1994 if (u64Arg)
1995 return VERR_INVALID_PARAMETER;
1996 rc = GMMR0QueryStatisticsReq(pGVM, (PGMMQUERYSTATISTICSSREQ)pReqHdr);
1997 break;
1998
1999 case VMMR0_DO_GMM_RESET_STATISTICS:
2000 if (u64Arg)
2001 return VERR_INVALID_PARAMETER;
2002 rc = GMMR0ResetStatisticsReq(pGVM, (PGMMRESETSTATISTICSSREQ)pReqHdr);
2003 break;
2004
2005 /*
2006 * A quick GCFGM mock-up.
2007 */
2008 /** @todo GCFGM with proper access control, ring-3 management interface and all that. */
2009 case VMMR0_DO_GCFGM_SET_VALUE:
2010 case VMMR0_DO_GCFGM_QUERY_VALUE:
2011 {
2012 if (pGVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
2013 return VERR_INVALID_PARAMETER;
2014 PGCFGMVALUEREQ pReq = (PGCFGMVALUEREQ)pReqHdr;
2015 if (pReq->Hdr.cbReq != sizeof(*pReq))
2016 return VERR_INVALID_PARAMETER;
2017 if (enmOperation == VMMR0_DO_GCFGM_SET_VALUE)
2018 {
2019 rc = GVMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
2020 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
2021 // rc = GMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
2022 }
2023 else
2024 {
2025 rc = GVMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
2026 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
2027 // rc = GMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
2028 }
2029 break;
2030 }
2031
2032 /*
2033 * PDM Wrappers.
2034 */
2035 case VMMR0_DO_PDM_DRIVER_CALL_REQ_HANDLER:
2036 {
2037 if (!pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
2038 return VERR_INVALID_PARAMETER;
2039 rc = PDMR0DriverCallReqHandler(pGVM, (PPDMDRIVERCALLREQHANDLERREQ)pReqHdr);
2040 break;
2041 }
2042
2043 case VMMR0_DO_PDM_DEVICE_CREATE:
2044 {
2045 if (!pReqHdr || u64Arg || idCpu != 0)
2046 return VERR_INVALID_PARAMETER;
2047 rc = PDMR0DeviceCreateReqHandler(pGVM, (PPDMDEVICECREATEREQ)pReqHdr);
2048 break;
2049 }
2050
2051 case VMMR0_DO_PDM_DEVICE_GEN_CALL:
2052 {
2053 if (!pReqHdr || u64Arg)
2054 return VERR_INVALID_PARAMETER;
2055 rc = PDMR0DeviceGenCallReqHandler(pGVM, (PPDMDEVICEGENCALLREQ)pReqHdr, idCpu);
2056 break;
2057 }
2058
2059 /** @todo Remove the once all devices has been converted to new style! @bugref{9218} */
2060 case VMMR0_DO_PDM_DEVICE_COMPAT_SET_CRITSECT:
2061 {
2062 if (!pReqHdr || u64Arg || idCpu != 0)
2063 return VERR_INVALID_PARAMETER;
2064 rc = PDMR0DeviceCompatSetCritSectReqHandler(pGVM, (PPDMDEVICECOMPATSETCRITSECTREQ)pReqHdr);
2065 break;
2066 }
2067
2068 /*
2069 * Requests to the internal networking service.
2070 */
2071 case VMMR0_DO_INTNET_OPEN:
2072 {
2073 PINTNETOPENREQ pReq = (PINTNETOPENREQ)pReqHdr;
2074 if (u64Arg || !pReq || !vmmR0IsValidSession(pGVM, pReq->pSession, pSession) || idCpu != NIL_VMCPUID)
2075 return VERR_INVALID_PARAMETER;
2076 rc = IntNetR0OpenReq(pSession, pReq);
2077 break;
2078 }
2079
2080 case VMMR0_DO_INTNET_IF_CLOSE:
2081 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFCLOSEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2082 return VERR_INVALID_PARAMETER;
2083 rc = IntNetR0IfCloseReq(pSession, (PINTNETIFCLOSEREQ)pReqHdr);
2084 break;
2085
2086
2087 case VMMR0_DO_INTNET_IF_GET_BUFFER_PTRS:
2088 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFGETBUFFERPTRSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2089 return VERR_INVALID_PARAMETER;
2090 rc = IntNetR0IfGetBufferPtrsReq(pSession, (PINTNETIFGETBUFFERPTRSREQ)pReqHdr);
2091 break;
2092
2093 case VMMR0_DO_INTNET_IF_SET_PROMISCUOUS_MODE:
2094 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2095 return VERR_INVALID_PARAMETER;
2096 rc = IntNetR0IfSetPromiscuousModeReq(pSession, (PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr);
2097 break;
2098
2099 case VMMR0_DO_INTNET_IF_SET_MAC_ADDRESS:
2100 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFSETMACADDRESSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2101 return VERR_INVALID_PARAMETER;
2102 rc = IntNetR0IfSetMacAddressReq(pSession, (PINTNETIFSETMACADDRESSREQ)pReqHdr);
2103 break;
2104
2105 case VMMR0_DO_INTNET_IF_SET_ACTIVE:
2106 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFSETACTIVEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2107 return VERR_INVALID_PARAMETER;
2108 rc = IntNetR0IfSetActiveReq(pSession, (PINTNETIFSETACTIVEREQ)pReqHdr);
2109 break;
2110
2111 case VMMR0_DO_INTNET_IF_SEND:
2112 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2113 return VERR_INVALID_PARAMETER;
2114 rc = IntNetR0IfSendReq(pSession, (PINTNETIFSENDREQ)pReqHdr);
2115 break;
2116
2117 case VMMR0_DO_INTNET_IF_WAIT:
2118 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2119 return VERR_INVALID_PARAMETER;
2120 rc = IntNetR0IfWaitReq(pSession, (PINTNETIFWAITREQ)pReqHdr);
2121 break;
2122
2123 case VMMR0_DO_INTNET_IF_ABORT_WAIT:
2124 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2125 return VERR_INVALID_PARAMETER;
2126 rc = IntNetR0IfAbortWaitReq(pSession, (PINTNETIFABORTWAITREQ)pReqHdr);
2127 break;
2128
2129#if 0 //def VBOX_WITH_PCI_PASSTHROUGH
2130 /*
2131 * Requests to host PCI driver service.
2132 */
2133 case VMMR0_DO_PCIRAW_REQ:
2134 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PPCIRAWSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2135 return VERR_INVALID_PARAMETER;
2136 rc = PciRawR0ProcessReq(pGVM, pSession, (PPCIRAWSENDREQ)pReqHdr);
2137 break;
2138#endif
2139
2140 /*
2141 * NEM requests.
2142 */
2143#ifdef VBOX_WITH_NEM_R0
2144# if defined(RT_ARCH_AMD64) && defined(RT_OS_WINDOWS)
2145 case VMMR0_DO_NEM_INIT_VM:
2146 if (u64Arg || pReqHdr || idCpu != 0)
2147 return VERR_INVALID_PARAMETER;
2148 rc = NEMR0InitVM(pGVM);
2149 break;
2150
2151 case VMMR0_DO_NEM_INIT_VM_PART_2:
2152 if (u64Arg || pReqHdr || idCpu != 0)
2153 return VERR_INVALID_PARAMETER;
2154 rc = NEMR0InitVMPart2(pGVM);
2155 break;
2156
2157 case VMMR0_DO_NEM_MAP_PAGES:
2158 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2159 return VERR_INVALID_PARAMETER;
2160 rc = NEMR0MapPages(pGVM, idCpu);
2161 break;
2162
2163 case VMMR0_DO_NEM_UNMAP_PAGES:
2164 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2165 return VERR_INVALID_PARAMETER;
2166 rc = NEMR0UnmapPages(pGVM, idCpu);
2167 break;
2168
2169 case VMMR0_DO_NEM_EXPORT_STATE:
2170 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2171 return VERR_INVALID_PARAMETER;
2172 rc = NEMR0ExportState(pGVM, idCpu);
2173 break;
2174
2175 case VMMR0_DO_NEM_IMPORT_STATE:
2176 if (pReqHdr || idCpu == NIL_VMCPUID)
2177 return VERR_INVALID_PARAMETER;
2178 rc = NEMR0ImportState(pGVM, idCpu, u64Arg);
2179 break;
2180
2181 case VMMR0_DO_NEM_QUERY_CPU_TICK:
2182 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2183 return VERR_INVALID_PARAMETER;
2184 rc = NEMR0QueryCpuTick(pGVM, idCpu);
2185 break;
2186
2187 case VMMR0_DO_NEM_RESUME_CPU_TICK_ON_ALL:
2188 if (pReqHdr || idCpu == NIL_VMCPUID)
2189 return VERR_INVALID_PARAMETER;
2190 rc = NEMR0ResumeCpuTickOnAll(pGVM, idCpu, u64Arg);
2191 break;
2192
2193 case VMMR0_DO_NEM_UPDATE_STATISTICS:
2194 if (u64Arg || pReqHdr)
2195 return VERR_INVALID_PARAMETER;
2196 rc = NEMR0UpdateStatistics(pGVM, idCpu);
2197 break;
2198
2199# if 1 && defined(DEBUG_bird)
2200 case VMMR0_DO_NEM_EXPERIMENT:
2201 if (pReqHdr)
2202 return VERR_INVALID_PARAMETER;
2203 rc = NEMR0DoExperiment(pGVM, idCpu, u64Arg);
2204 break;
2205# endif
2206# endif
2207#endif
2208
2209 /*
2210 * IOM requests.
2211 */
2212 case VMMR0_DO_IOM_GROW_IO_PORTS:
2213 {
2214 if (pReqHdr || idCpu != 0)
2215 return VERR_INVALID_PARAMETER;
2216 rc = IOMR0IoPortGrowRegistrationTables(pGVM, u64Arg);
2217 break;
2218 }
2219
2220 case VMMR0_DO_IOM_GROW_IO_PORT_STATS:
2221 {
2222 if (pReqHdr || idCpu != 0)
2223 return VERR_INVALID_PARAMETER;
2224 rc = IOMR0IoPortGrowStatisticsTable(pGVM, u64Arg);
2225 break;
2226 }
2227
2228 case VMMR0_DO_IOM_GROW_MMIO_REGS:
2229 {
2230 if (pReqHdr || idCpu != 0)
2231 return VERR_INVALID_PARAMETER;
2232 rc = IOMR0MmioGrowRegistrationTables(pGVM, u64Arg);
2233 break;
2234 }
2235
2236 case VMMR0_DO_IOM_GROW_MMIO_STATS:
2237 {
2238 if (pReqHdr || idCpu != 0)
2239 return VERR_INVALID_PARAMETER;
2240 rc = IOMR0MmioGrowStatisticsTable(pGVM, u64Arg);
2241 break;
2242 }
2243
2244 case VMMR0_DO_IOM_SYNC_STATS_INDICES:
2245 {
2246 if (pReqHdr || idCpu != 0)
2247 return VERR_INVALID_PARAMETER;
2248 rc = IOMR0IoPortSyncStatisticsIndices(pGVM);
2249 if (RT_SUCCESS(rc))
2250 rc = IOMR0MmioSyncStatisticsIndices(pGVM);
2251 break;
2252 }
2253
2254 /*
2255 * DBGF requests.
2256 */
2257#ifdef VBOX_WITH_DBGF_TRACING
2258 case VMMR0_DO_DBGF_TRACER_CREATE:
2259 {
2260 if (!pReqHdr || u64Arg || idCpu != 0)
2261 return VERR_INVALID_PARAMETER;
2262 rc = DBGFR0TracerCreateReqHandler(pGVM, (PDBGFTRACERCREATEREQ)pReqHdr);
2263 break;
2264 }
2265
2266 case VMMR0_DO_DBGF_TRACER_CALL_REQ_HANDLER:
2267 {
2268 if (!pReqHdr || u64Arg)
2269 return VERR_INVALID_PARAMETER;
2270# if 0 /** @todo */
2271 rc = DBGFR0TracerGenCallReqHandler(pGVM, (PDBGFTRACERGENCALLREQ)pReqHdr, idCpu);
2272# else
2273 rc = VERR_NOT_IMPLEMENTED;
2274# endif
2275 break;
2276 }
2277#endif
2278
2279 case VMMR0_DO_DBGF_BP_INIT:
2280 {
2281 if (!pReqHdr || u64Arg || idCpu != 0)
2282 return VERR_INVALID_PARAMETER;
2283 rc = DBGFR0BpInitReqHandler(pGVM, (PDBGFBPINITREQ)pReqHdr);
2284 break;
2285 }
2286
2287 case VMMR0_DO_DBGF_BP_CHUNK_ALLOC:
2288 {
2289 if (!pReqHdr || u64Arg || idCpu != 0)
2290 return VERR_INVALID_PARAMETER;
2291 rc = DBGFR0BpChunkAllocReqHandler(pGVM, (PDBGFBPCHUNKALLOCREQ)pReqHdr);
2292 break;
2293 }
2294
2295 case VMMR0_DO_DBGF_BP_L2_TBL_CHUNK_ALLOC:
2296 {
2297 if (!pReqHdr || u64Arg || idCpu != 0)
2298 return VERR_INVALID_PARAMETER;
2299 rc = DBGFR0BpL2TblChunkAllocReqHandler(pGVM, (PDBGFBPL2TBLCHUNKALLOCREQ)pReqHdr);
2300 break;
2301 }
2302
2303 case VMMR0_DO_DBGF_BP_OWNER_INIT:
2304 {
2305 if (!pReqHdr || u64Arg || idCpu != 0)
2306 return VERR_INVALID_PARAMETER;
2307 rc = DBGFR0BpOwnerInitReqHandler(pGVM, (PDBGFBPOWNERINITREQ)pReqHdr);
2308 break;
2309 }
2310
2311 case VMMR0_DO_DBGF_BP_PORTIO_INIT:
2312 {
2313 if (!pReqHdr || u64Arg || idCpu != 0)
2314 return VERR_INVALID_PARAMETER;
2315 rc = DBGFR0BpPortIoInitReqHandler(pGVM, (PDBGFBPINITREQ)pReqHdr);
2316 break;
2317 }
2318
2319
2320 /*
2321 * TM requests.
2322 */
2323 case VMMR0_DO_TM_GROW_TIMER_QUEUE:
2324 {
2325 if (pReqHdr || idCpu == NIL_VMCPUID)
2326 return VERR_INVALID_PARAMETER;
2327 rc = TMR0TimerQueueGrow(pGVM, RT_HI_U32(u64Arg), RT_LO_U32(u64Arg));
2328 break;
2329 }
2330
2331 /*
2332 * For profiling.
2333 */
2334 case VMMR0_DO_NOP:
2335 case VMMR0_DO_SLOW_NOP:
2336 return VINF_SUCCESS;
2337
2338 /*
2339 * For testing Ring-0 APIs invoked in this environment.
2340 */
2341 case VMMR0_DO_TESTS:
2342 /** @todo make new test */
2343 return VINF_SUCCESS;
2344
2345 default:
2346 /*
2347 * We're returning VERR_NOT_SUPPORT here so we've got something else
2348 * than -1 which the interrupt gate glue code might return.
2349 */
2350 Log(("operation %#x is not supported\n", enmOperation));
2351 return VERR_NOT_SUPPORTED;
2352 }
2353 return rc;
2354}
2355
2356
2357/**
2358 * This is just a longjmp wrapper function for VMMR0EntryEx calls.
2359 *
2360 * @returns VBox status code.
2361 * @param pvArgs The argument package
2362 */
2363static DECLCALLBACK(int) vmmR0EntryExWrapper(void *pvArgs)
2364{
2365 PGVMCPU pGVCpu = (PGVMCPU)pvArgs;
2366 return vmmR0EntryExWorker(pGVCpu->vmmr0.s.pGVM,
2367 pGVCpu->vmmr0.s.idCpu,
2368 pGVCpu->vmmr0.s.enmOperation,
2369 pGVCpu->vmmr0.s.pReq,
2370 pGVCpu->vmmr0.s.u64Arg,
2371 pGVCpu->vmmr0.s.pSession);
2372}
2373
2374
2375/**
2376 * The Ring 0 entry point, called by the support library (SUP).
2377 *
2378 * @returns VBox status code.
2379 * @param pGVM The global (ring-0) VM structure.
2380 * @param pVM The cross context VM structure.
2381 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
2382 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
2383 * @param enmOperation Which operation to execute.
2384 * @param pReq Pointer to the SUPVMMR0REQHDR packet. Optional.
2385 * @param u64Arg Some simple constant argument.
2386 * @param pSession The session of the caller.
2387 * @remarks Assume called with interrupts _enabled_.
2388 */
2389VMMR0DECL(int) VMMR0EntryEx(PGVM pGVM, PVMCC pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation,
2390 PSUPVMMR0REQHDR pReq, uint64_t u64Arg, PSUPDRVSESSION pSession)
2391{
2392 /*
2393 * Requests that should only happen on the EMT thread will be
2394 * wrapped in a setjmp so we can assert without causing trouble.
2395 */
2396 if ( pVM != NULL
2397 && pGVM != NULL
2398 && pVM == pGVM /** @todo drop pVM or pGVM */
2399 && idCpu < pGVM->cCpus
2400 && pGVM->pSession == pSession
2401 && pGVM->pSelf == pVM)
2402 {
2403 switch (enmOperation)
2404 {
2405 /* These might/will be called before VMMR3Init. */
2406 case VMMR0_DO_GMM_INITIAL_RESERVATION:
2407 case VMMR0_DO_GMM_UPDATE_RESERVATION:
2408 case VMMR0_DO_GMM_ALLOCATE_PAGES:
2409 case VMMR0_DO_GMM_FREE_PAGES:
2410 case VMMR0_DO_GMM_BALLOONED_PAGES:
2411 /* On the mac we might not have a valid jmp buf, so check these as well. */
2412 case VMMR0_DO_VMMR0_INIT:
2413 case VMMR0_DO_VMMR0_TERM:
2414
2415 case VMMR0_DO_PDM_DEVICE_CREATE:
2416 case VMMR0_DO_PDM_DEVICE_GEN_CALL:
2417 case VMMR0_DO_IOM_GROW_IO_PORTS:
2418 case VMMR0_DO_IOM_GROW_IO_PORT_STATS:
2419 case VMMR0_DO_DBGF_BP_INIT:
2420 case VMMR0_DO_DBGF_BP_CHUNK_ALLOC:
2421 case VMMR0_DO_DBGF_BP_L2_TBL_CHUNK_ALLOC:
2422 {
2423 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2424 RTNATIVETHREAD hNativeThread = RTThreadNativeSelf();
2425 if (RT_LIKELY( pGVCpu->hEMT == hNativeThread
2426 && pGVCpu->hNativeThreadR0 == hNativeThread))
2427 {
2428 if (!pGVCpu->vmm.s.CallRing3JmpBufR0.pvSavedStack)
2429 break;
2430
2431 pGVCpu->vmmr0.s.pGVM = pGVM;
2432 pGVCpu->vmmr0.s.idCpu = idCpu;
2433 pGVCpu->vmmr0.s.enmOperation = enmOperation;
2434 pGVCpu->vmmr0.s.pReq = pReq;
2435 pGVCpu->vmmr0.s.u64Arg = u64Arg;
2436 pGVCpu->vmmr0.s.pSession = pSession;
2437 return vmmR0CallRing3SetJmpEx(&pGVCpu->vmm.s.CallRing3JmpBufR0, vmmR0EntryExWrapper, pGVCpu,
2438 ((uintptr_t)u64Arg << 16) | (uintptr_t)enmOperation);
2439 }
2440 return VERR_VM_THREAD_NOT_EMT;
2441 }
2442
2443 default:
2444 case VMMR0_DO_PGM_POOL_GROW:
2445 break;
2446 }
2447 }
2448 return vmmR0EntryExWorker(pGVM, idCpu, enmOperation, pReq, u64Arg, pSession);
2449}
2450
2451
2452/*********************************************************************************************************************************
2453* EMT Blocking *
2454*********************************************************************************************************************************/
2455
2456/**
2457 * Checks whether we've armed the ring-0 long jump machinery.
2458 *
2459 * @returns @c true / @c false
2460 * @param pVCpu The cross context virtual CPU structure.
2461 * @thread EMT
2462 * @sa VMMIsLongJumpArmed
2463 */
2464VMMR0_INT_DECL(bool) VMMR0IsLongJumpArmed(PVMCPUCC pVCpu)
2465{
2466#ifdef RT_ARCH_X86
2467 return pVCpu->vmm.s.CallRing3JmpBufR0.eip
2468 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2469#else
2470 return pVCpu->vmm.s.CallRing3JmpBufR0.rip
2471 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2472#endif
2473}
2474
2475
2476/**
2477 * Checks whether we've done a ring-3 long jump.
2478 *
2479 * @returns @c true / @c false
2480 * @param pVCpu The cross context virtual CPU structure.
2481 * @thread EMT
2482 */
2483VMMR0_INT_DECL(bool) VMMR0IsInRing3LongJump(PVMCPUCC pVCpu)
2484{
2485 return pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2486}
2487
2488
2489/**
2490 * Locking helper that deals with HM context and checks if the thread can block.
2491 *
2492 * @returns VINF_SUCCESS if we can block. Returns @a rcBusy or
2493 * VERR_VMM_CANNOT_BLOCK if not able to block.
2494 * @param pVCpu The cross context virtual CPU structure of the calling
2495 * thread.
2496 * @param rcBusy What to return in case of a blocking problem. Will IPE
2497 * if VINF_SUCCESS and we cannot block.
2498 * @param pszCaller The caller (for logging problems).
2499 * @param pvLock The lock address (for logging problems).
2500 * @param pCtx Where to return context info for the resume call.
2501 * @thread EMT(pVCpu)
2502 */
2503VMMR0_INT_DECL(int) VMMR0EmtPrepareToBlock(PVMCPUCC pVCpu, int rcBusy, const char *pszCaller, void *pvLock,
2504 PVMMR0EMTBLOCKCTX pCtx)
2505{
2506 const char *pszMsg;
2507
2508 /*
2509 * Check that we are allowed to block.
2510 */
2511 if (RT_LIKELY(VMMRZCallRing3IsEnabled(pVCpu)))
2512 {
2513 /*
2514 * Are we in HM context and w/o a context hook? If so work the context hook.
2515 */
2516 if (pVCpu->idHostCpu != NIL_RTCPUID)
2517 {
2518 Assert(pVCpu->iHostCpuSet != UINT32_MAX);
2519
2520 if (pVCpu->vmmr0.s.hCtxHook == NIL_RTTHREADCTXHOOK)
2521 {
2522 vmmR0ThreadCtxCallback(RTTHREADCTXEVENT_OUT, pVCpu);
2523 if (pVCpu->vmmr0.s.pPreemptState)
2524 RTThreadPreemptRestore(pVCpu->vmmr0.s.pPreemptState);
2525
2526 pCtx->uMagic = VMMR0EMTBLOCKCTX_MAGIC;
2527 pCtx->fWasInHmContext = true;
2528 return VINF_SUCCESS;
2529 }
2530 }
2531
2532 if (RT_LIKELY(!pVCpu->vmmr0.s.pPreemptState))
2533 {
2534 /*
2535 * Not in HM context or we've got hooks, so just check that preemption
2536 * is enabled.
2537 */
2538 if (RT_LIKELY(RTThreadPreemptIsEnabled(NIL_RTTHREAD)))
2539 {
2540 pCtx->uMagic = VMMR0EMTBLOCKCTX_MAGIC;
2541 pCtx->fWasInHmContext = false;
2542 return VINF_SUCCESS;
2543 }
2544 pszMsg = "Preemption is disabled!";
2545 }
2546 else
2547 pszMsg = "Preemption state w/o HM state!";
2548 }
2549 else
2550 pszMsg = "Ring-3 calls are disabled!";
2551
2552 static uint32_t volatile s_cWarnings = 0;
2553 if (++s_cWarnings < 50)
2554 SUPR0Printf("VMMR0EmtPrepareToBlock: %s pvLock=%p pszCaller=%s rcBusy=%p\n", pszMsg, pvLock, pszCaller, rcBusy);
2555 pCtx->uMagic = VMMR0EMTBLOCKCTX_MAGIC_DEAD;
2556 pCtx->fWasInHmContext = false;
2557 return rcBusy != VINF_SUCCESS ? rcBusy : VERR_VMM_CANNOT_BLOCK;
2558}
2559
2560
2561/**
2562 * Counterpart to VMMR0EmtPrepareToBlock.
2563 *
2564 * @param pVCpu The cross context virtual CPU structure of the calling
2565 * thread.
2566 * @param pCtx The context structure used with VMMR0EmtPrepareToBlock.
2567 * @thread EMT(pVCpu)
2568 */
2569VMMR0_INT_DECL(void) VMMR0EmtResumeAfterBlocking(PVMCPUCC pVCpu, PVMMR0EMTBLOCKCTX pCtx)
2570{
2571 AssertReturnVoid(pCtx->uMagic == VMMR0EMTBLOCKCTX_MAGIC);
2572 if (pCtx->fWasInHmContext)
2573 {
2574 if (pVCpu->vmmr0.s.pPreemptState)
2575 RTThreadPreemptDisable(pVCpu->vmmr0.s.pPreemptState);
2576
2577 pCtx->fWasInHmContext = false;
2578 vmmR0ThreadCtxCallback(RTTHREADCTXEVENT_IN, pVCpu);
2579 }
2580 pCtx->uMagic = VMMR0EMTBLOCKCTX_MAGIC_DEAD;
2581}
2582
2583/** @name VMMR0EMTWAIT_F_XXX - flags for VMMR0EmtWaitEventInner and friends.
2584 * @{ */
2585/** Try suppress VERR_INTERRUPTED for a little while (~10 sec). */
2586#define VMMR0EMTWAIT_F_TRY_SUPPRESS_INTERRUPTED RT_BIT_32(0)
2587/** @} */
2588
2589/**
2590 * Helper for waiting on an RTSEMEVENT, caller did VMMR0EmtPrepareToBlock.
2591 *
2592 * @returns
2593 * @retval VERR_THREAD_IS_TERMINATING
2594 * @retval VERR_TIMEOUT if we ended up waiting too long, either according to
2595 * @a cMsTimeout or to maximum wait values.
2596 *
2597 * @param pGVCpu The ring-0 virtual CPU structure.
2598 * @param fFlags VMMR0EMTWAIT_F_XXX.
2599 * @param hEvent The event to wait on.
2600 * @param cMsTimeout The timeout or RT_INDEFINITE_WAIT.
2601 */
2602VMMR0DECL(int) VMMR0EmtWaitEventInner(PGVMCPU pGVCpu, uint32_t fFlags, RTSEMEVENT hEvent, RTMSINTERVAL cMsTimeout)
2603{
2604 AssertReturn(pGVCpu->hEMT == RTThreadNativeSelf(), VERR_VM_THREAD_NOT_EMT);
2605
2606 /*
2607 * Note! Similar code is found in the PDM critical sections too.
2608 */
2609 uint64_t const nsStart = RTTimeNanoTS();
2610 uint64_t cNsMaxTotal = cMsTimeout == RT_INDEFINITE_WAIT
2611 ? RT_NS_5MIN : RT_MIN(RT_NS_5MIN, RT_NS_1MS_64 * cMsTimeout);
2612 uint32_t cMsMaxOne = RT_MS_5SEC;
2613 bool fNonInterruptible = false;
2614 for (;;)
2615 {
2616 /* Wait. */
2617 int rcWait = !fNonInterruptible
2618 ? RTSemEventWaitNoResume(hEvent, cMsMaxOne)
2619 : RTSemEventWait(hEvent, cMsMaxOne);
2620 if (RT_SUCCESS(rcWait))
2621 return rcWait;
2622
2623 if (rcWait == VERR_TIMEOUT || rcWait == VERR_INTERRUPTED)
2624 {
2625 uint64_t const cNsElapsed = RTTimeNanoTS() - nsStart;
2626
2627 /*
2628 * Check the thread termination status.
2629 */
2630 int const rcTerm = RTThreadQueryTerminationStatus(NIL_RTTHREAD);
2631 AssertMsg(rcTerm == VINF_SUCCESS || rcTerm == VERR_NOT_SUPPORTED || rcTerm == VINF_THREAD_IS_TERMINATING,
2632 ("rcTerm=%Rrc\n", rcTerm));
2633 if ( rcTerm == VERR_NOT_SUPPORTED
2634 && !fNonInterruptible
2635 && cNsMaxTotal > RT_NS_1MIN)
2636 cNsMaxTotal = RT_NS_1MIN;
2637
2638 /* We return immediately if it looks like the thread is terminating. */
2639 if (rcTerm == VINF_THREAD_IS_TERMINATING)
2640 return VERR_THREAD_IS_TERMINATING;
2641
2642 /* We may suppress VERR_INTERRUPTED if VMMR0EMTWAIT_F_TRY_SUPPRESS_INTERRUPTED was
2643 specified, otherwise we'll just return it. */
2644 if (rcWait == VERR_INTERRUPTED)
2645 {
2646 if (!(fFlags & VMMR0EMTWAIT_F_TRY_SUPPRESS_INTERRUPTED))
2647 return VERR_INTERRUPTED;
2648 if (!fNonInterruptible)
2649 {
2650 /* First time: Adjust down the wait parameters and make sure we get at least
2651 one non-interruptible wait before timing out. */
2652 fNonInterruptible = true;
2653 cMsMaxOne = 32;
2654 uint64_t const cNsLeft = cNsMaxTotal - cNsElapsed;
2655 if (cNsLeft > RT_NS_10SEC)
2656 cNsMaxTotal = cNsElapsed + RT_NS_10SEC;
2657 continue;
2658 }
2659 }
2660
2661 /* Check for timeout. */
2662 if (cNsElapsed > cNsMaxTotal)
2663 return VERR_TIMEOUT;
2664 }
2665 else
2666 return rcWait;
2667 }
2668 /* not reached */
2669}
2670
2671
2672/*********************************************************************************************************************************
2673* Logging. *
2674*********************************************************************************************************************************/
2675
2676/**
2677 * VMMR0_DO_VMMR0_UPDATE_LOGGERS: Updates the EMT loggers for the VM.
2678 *
2679 * @returns VBox status code.
2680 * @param pGVM The global (ring-0) VM structure.
2681 * @param idCpu The ID of the calling EMT.
2682 * @param pReq The request data.
2683 * @param idxLogger Which logger set to update.
2684 * @thread EMT(idCpu)
2685 */
2686static int vmmR0UpdateLoggers(PGVM pGVM, VMCPUID idCpu, PVMMR0UPDATELOGGERSREQ pReq, size_t idxLogger)
2687{
2688 /*
2689 * Check sanity. First we require EMT to be calling us.
2690 */
2691 AssertReturn(idCpu < pGVM->cCpus, VERR_INVALID_CPU_ID);
2692 AssertReturn(pGVM->aCpus[idCpu].hEMT == RTThreadNativeSelf(), VERR_INVALID_CPU_ID);
2693
2694 AssertReturn(pReq->Hdr.cbReq >= RT_UOFFSETOF_DYN(VMMR0UPDATELOGGERSREQ, afGroups[0]), VERR_INVALID_PARAMETER);
2695 AssertReturn(pReq->cGroups < _8K, VERR_INVALID_PARAMETER);
2696 AssertReturn(pReq->Hdr.cbReq == RT_UOFFSETOF_DYN(VMMR0UPDATELOGGERSREQ, afGroups[pReq->cGroups]), VERR_INVALID_PARAMETER);
2697
2698 AssertReturn(idxLogger < VMMLOGGER_IDX_MAX, VERR_OUT_OF_RANGE);
2699
2700 /*
2701 * Adjust flags.
2702 */
2703 /* Always buffered: */
2704 pReq->fFlags |= RTLOGFLAGS_BUFFERED;
2705 /* These doesn't make sense at present: */
2706 pReq->fFlags &= ~(RTLOGFLAGS_FLUSH | RTLOGFLAGS_WRITE_THROUGH);
2707 /* We've traditionally skipped the group restrictions. */
2708 pReq->fFlags &= ~RTLOGFLAGS_RESTRICT_GROUPS;
2709
2710 /*
2711 * Do the updating.
2712 */
2713 int rc = VINF_SUCCESS;
2714 for (idCpu = 0; idCpu < pGVM->cCpus; idCpu++)
2715 {
2716 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2717 PRTLOGGER pLogger = pGVCpu->vmmr0.s.u.aLoggers[idxLogger].pLogger;
2718 if (pLogger)
2719 {
2720 RTLogSetR0ProgramStart(pLogger, pGVM->vmm.s.nsProgramStart);
2721 rc = RTLogBulkUpdate(pLogger, pReq->fFlags, pReq->uGroupCrc32, pReq->cGroups, pReq->afGroups);
2722 }
2723 }
2724
2725 return rc;
2726}
2727
2728
2729/**
2730 * VMMR0_DO_VMMR0_LOG_FLUSHER: Get the next log flushing job.
2731 *
2732 * The job info is copied into VMM::LogFlusherItem.
2733 *
2734 * @returns VBox status code.
2735 * @retval VERR_OBJECT_DESTROYED if we're shutting down.
2736 * @retval VERR_NOT_OWNER if the calling thread is not the flusher thread.
2737 * @param pGVM The global (ring-0) VM structure.
2738 * @thread The log flusher thread (first caller automatically becomes the log
2739 * flusher).
2740 */
2741static int vmmR0LogFlusher(PGVM pGVM)
2742{
2743 /*
2744 * Check that this really is the flusher thread.
2745 */
2746 RTNATIVETHREAD const hNativeSelf = RTThreadNativeSelf();
2747 AssertReturn(hNativeSelf != NIL_RTNATIVETHREAD, VERR_INTERNAL_ERROR_3);
2748 if (RT_LIKELY(pGVM->vmmr0.s.LogFlusher.hThread == hNativeSelf))
2749 { /* likely */ }
2750 else
2751 {
2752 /* The first caller becomes the flusher thread. */
2753 bool fOk;
2754 ASMAtomicCmpXchgHandle(&pGVM->vmmr0.s.LogFlusher.hThread, hNativeSelf, NIL_RTNATIVETHREAD, fOk);
2755 if (!fOk)
2756 return VERR_NOT_OWNER;
2757 pGVM->vmmr0.s.LogFlusher.fThreadRunning = true;
2758 }
2759
2760 /*
2761 * Acknowledge flush, waking up waiting EMT.
2762 */
2763 RTSpinlockAcquire(pGVM->vmmr0.s.LogFlusher.hSpinlock);
2764
2765 uint32_t idxTail = pGVM->vmmr0.s.LogFlusher.idxRingTail % RT_ELEMENTS(pGVM->vmmr0.s.LogFlusher.aRing);
2766 uint32_t idxHead = pGVM->vmmr0.s.LogFlusher.idxRingHead % RT_ELEMENTS(pGVM->vmmr0.s.LogFlusher.aRing);
2767 if ( idxTail != idxHead
2768 && pGVM->vmmr0.s.LogFlusher.aRing[idxHead].s.fProcessing)
2769 {
2770 /* Pop the head off the ring buffer. */
2771 uint32_t const idCpu = pGVM->vmmr0.s.LogFlusher.aRing[idxHead].s.idCpu;
2772 uint32_t const idxLogger = pGVM->vmmr0.s.LogFlusher.aRing[idxHead].s.idxLogger;
2773 uint32_t const idxBuffer = pGVM->vmmr0.s.LogFlusher.aRing[idxHead].s.idxBuffer;
2774
2775 pGVM->vmmr0.s.LogFlusher.aRing[idxHead].u32 = UINT32_MAX >> 1; /* invalidate the entry */
2776 pGVM->vmmr0.s.LogFlusher.idxRingHead = (idxHead + 1) % RT_ELEMENTS(pGVM->vmmr0.s.LogFlusher.aRing);
2777
2778 /* Validate content. */
2779 if ( idCpu < pGVM->cCpus
2780 && idxLogger < VMMLOGGER_IDX_MAX
2781 && idxBuffer < VMMLOGGER_BUFFER_COUNT)
2782 {
2783 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2784 PVMMR0PERVCPULOGGER pR0Log = &pGVCpu->vmmr0.s.u.aLoggers[idxLogger];
2785 PVMMR3CPULOGGER pShared = &pGVCpu->vmm.s.u.aLoggers[idxLogger];
2786
2787 /*
2788 * Accounting.
2789 */
2790 uint32_t cFlushing = pR0Log->cFlushing - 1;
2791 if (RT_LIKELY(cFlushing < VMMLOGGER_BUFFER_COUNT))
2792 { /*likely*/ }
2793 else
2794 cFlushing = 0;
2795 pR0Log->cFlushing = cFlushing;
2796 ASMAtomicWriteU32(&pShared->cFlushing, cFlushing);
2797
2798 /*
2799 * Wake up the EMT if it's waiting.
2800 */
2801 if (!pR0Log->fEmtWaiting)
2802 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
2803 else
2804 {
2805 pR0Log->fEmtWaiting = false;
2806 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
2807
2808 int rc = RTSemEventSignal(pR0Log->hEventFlushWait);
2809 if (RT_FAILURE(rc))
2810 LogRelMax(64, ("vmmR0LogFlusher: RTSemEventSignal failed ACKing entry #%u (%u/%u/%u): %Rrc!\n",
2811 idxHead, idCpu, idxLogger, idxBuffer, rc));
2812 }
2813 }
2814 else
2815 {
2816 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
2817 LogRelMax(64, ("vmmR0LogFlusher: Bad ACK entry #%u: %u/%u/%u!\n", idxHead, idCpu, idxLogger, idxBuffer));
2818 }
2819
2820 RTSpinlockAcquire(pGVM->vmmr0.s.LogFlusher.hSpinlock);
2821 }
2822
2823 /*
2824 * The wait loop.
2825 */
2826 int rc;
2827 for (;;)
2828 {
2829 /*
2830 * Work pending?
2831 */
2832 idxTail = pGVM->vmmr0.s.LogFlusher.idxRingTail % RT_ELEMENTS(pGVM->vmmr0.s.LogFlusher.aRing);
2833 idxHead = pGVM->vmmr0.s.LogFlusher.idxRingHead % RT_ELEMENTS(pGVM->vmmr0.s.LogFlusher.aRing);
2834 if (idxTail != idxHead)
2835 {
2836 pGVM->vmmr0.s.LogFlusher.aRing[idxHead].s.fProcessing = true;
2837 pGVM->vmm.s.LogFlusherItem.u32 = pGVM->vmmr0.s.LogFlusher.aRing[idxHead].u32;
2838
2839 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
2840 return VINF_SUCCESS;
2841 }
2842
2843 /*
2844 * Nothing to do, so, check for termination and go to sleep.
2845 */
2846 if (!pGVM->vmmr0.s.LogFlusher.fThreadShutdown)
2847 { /* likely */ }
2848 else
2849 {
2850 rc = VERR_OBJECT_DESTROYED;
2851 break;
2852 }
2853
2854 pGVM->vmmr0.s.LogFlusher.fThreadWaiting = true;
2855 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
2856
2857 rc = RTSemEventWaitNoResume(pGVM->vmmr0.s.LogFlusher.hEvent, RT_MS_5MIN);
2858
2859 RTSpinlockAcquire(pGVM->vmmr0.s.LogFlusher.hSpinlock);
2860 pGVM->vmmr0.s.LogFlusher.fThreadWaiting = false;
2861
2862 if (RT_SUCCESS(rc) || rc == VERR_TIMEOUT)
2863 { /* likely */ }
2864 else if (rc == VERR_INTERRUPTED)
2865 {
2866 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
2867 return rc;
2868 }
2869 else if (rc == VERR_SEM_DESTROYED || rc == VERR_INVALID_HANDLE)
2870 break;
2871 else
2872 {
2873 LogRel(("vmmR0LogFlusher: RTSemEventWaitNoResume returned unexpected status %Rrc\n", rc));
2874 break;
2875 }
2876 }
2877
2878 /*
2879 * Terminating - prevent further calls and indicate to the EMTs that we're no longer around.
2880 */
2881 pGVM->vmmr0.s.LogFlusher.hThread = ~pGVM->vmmr0.s.LogFlusher.hThread; /* (should be reasonably safe) */
2882 pGVM->vmmr0.s.LogFlusher.fThreadRunning = false;
2883
2884 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
2885 return rc;
2886}
2887
2888
2889/**
2890 * VMMR0_DO_VMMR0_LOG_WAIT_FLUSHED: Waits for the flusher thread to finish all
2891 * buffers for logger @a idxLogger.
2892 *
2893 * @returns VBox status code.
2894 * @param pGVM The global (ring-0) VM structure.
2895 * @param idCpu The ID of the calling EMT.
2896 * @param idxLogger Which logger to wait on.
2897 * @thread EMT(idCpu)
2898 */
2899static int vmmR0LogWaitFlushed(PGVM pGVM, VMCPUID idCpu, size_t idxLogger)
2900{
2901 /*
2902 * Check sanity. First we require EMT to be calling us.
2903 */
2904 AssertReturn(idCpu < pGVM->cCpus, VERR_INVALID_CPU_ID);
2905 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2906 AssertReturn(pGVCpu->hEMT == RTThreadNativeSelf(), VERR_INVALID_CPU_ID);
2907 AssertReturn(idxLogger < VMMLOGGER_IDX_MAX, VERR_OUT_OF_RANGE);
2908 PVMMR0PERVCPULOGGER const pR0Log = &pGVCpu->vmmr0.s.u.aLoggers[idxLogger];
2909
2910 /*
2911 * Do the waiting.
2912 */
2913 int rc = VINF_SUCCESS;
2914 RTSpinlockAcquire(pGVM->vmmr0.s.LogFlusher.hSpinlock);
2915 uint32_t cFlushing = pR0Log->cFlushing;
2916 while (cFlushing > 0)
2917 {
2918 pR0Log->fEmtWaiting = true;
2919 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
2920
2921 rc = RTSemEventWaitNoResume(pR0Log->hEventFlushWait, RT_MS_5MIN);
2922
2923 RTSpinlockAcquire(pGVM->vmmr0.s.LogFlusher.hSpinlock);
2924 pR0Log->fEmtWaiting = false;
2925 if (RT_SUCCESS(rc))
2926 {
2927 /* Read the new count, make sure it decreased before looping. That
2928 way we can guarentee that we will only wait more than 5 min * buffers. */
2929 uint32_t const cPrevFlushing = cFlushing;
2930 cFlushing = pR0Log->cFlushing;
2931 if (cFlushing < cPrevFlushing)
2932 continue;
2933 rc = VERR_INTERNAL_ERROR_3;
2934 }
2935 break;
2936 }
2937 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
2938 return rc;
2939}
2940
2941
2942/**
2943 * Inner worker for vmmR0LoggerFlushCommon.
2944 */
2945static bool vmmR0LoggerFlushInner(PGVM pGVM, PGVMCPU pGVCpu, uint32_t idxLogger, size_t idxBuffer, uint32_t cbToFlush)
2946{
2947 PVMMR0PERVCPULOGGER const pR0Log = &pGVCpu->vmmr0.s.u.aLoggers[idxLogger];
2948 PVMMR3CPULOGGER const pShared = &pGVCpu->vmm.s.u.aLoggers[idxLogger];
2949
2950 /*
2951 * Figure out what we need to do and whether we can.
2952 */
2953 enum { kJustSignal, kPrepAndSignal, kPrepSignalAndWait } enmAction;
2954#if VMMLOGGER_BUFFER_COUNT >= 2
2955 if (pR0Log->cFlushing < VMMLOGGER_BUFFER_COUNT - 1)
2956 {
2957 if (RTSemEventIsSignalSafe())
2958 enmAction = kJustSignal;
2959 else if (VMMRZCallRing3IsEnabled(pGVCpu))
2960 enmAction = kPrepAndSignal;
2961 else
2962 {
2963 /** @todo This is a bit simplistic. We could introduce a FF to signal the
2964 * thread or similar. */
2965 STAM_REL_COUNTER_INC(&pShared->StatCannotBlock);
2966# if defined(RT_OS_LINUX)
2967 SUP_DPRINTF(("vmmR0LoggerFlush: Signalling not safe and EMT blocking disabled! (%u bytes)\n", cbToFlush));
2968# endif
2969 pShared->cbDropped += cbToFlush;
2970 return true;
2971 }
2972 }
2973 else
2974#endif
2975 if (VMMRZCallRing3IsEnabled(pGVCpu))
2976 enmAction = kPrepSignalAndWait;
2977 else
2978 {
2979 STAM_REL_COUNTER_INC(&pShared->StatCannotBlock);
2980# if defined(RT_OS_LINUX)
2981 SUP_DPRINTF(("vmmR0LoggerFlush: EMT blocking disabled! (%u bytes)\n", cbToFlush));
2982# endif
2983 pShared->cbDropped += cbToFlush;
2984 return true;
2985 }
2986
2987 /*
2988 * Prepare for blocking if necessary.
2989 */
2990 VMMR0EMTBLOCKCTX Ctx;
2991 if (enmAction != kJustSignal)
2992 {
2993 int rc = VMMR0EmtPrepareToBlock(pGVCpu, VINF_SUCCESS, "vmmR0LoggerFlushInner", pR0Log->hEventFlushWait, &Ctx);
2994 if (RT_SUCCESS(rc))
2995 { /* likely */ }
2996 else
2997 {
2998 STAM_REL_COUNTER_INC(&pShared->StatCannotBlock);
2999 SUP_DPRINTF(("vmmR0LoggerFlush: VMMR0EmtPrepareToBlock failed! rc=%d\n", rc));
3000 return false;
3001 }
3002 }
3003
3004 /*
3005 * Queue the flush job.
3006 */
3007 bool fFlushedBuffer;
3008 RTSpinlockAcquire(pGVM->vmmr0.s.LogFlusher.hSpinlock);
3009 if (pGVM->vmmr0.s.LogFlusher.fThreadRunning)
3010 {
3011 uint32_t const idxHead = pGVM->vmmr0.s.LogFlusher.idxRingHead % RT_ELEMENTS(pGVM->vmmr0.s.LogFlusher.aRing);
3012 uint32_t const idxTail = pGVM->vmmr0.s.LogFlusher.idxRingTail % RT_ELEMENTS(pGVM->vmmr0.s.LogFlusher.aRing);
3013 uint32_t const idxNewTail = (idxTail + 1) % RT_ELEMENTS(pGVM->vmmr0.s.LogFlusher.aRing);
3014 if (idxNewTail != idxHead)
3015 {
3016 /* Queue it. */
3017 pGVM->vmmr0.s.LogFlusher.aRing[idxTail].s.idCpu = pGVCpu->idCpu;
3018 pGVM->vmmr0.s.LogFlusher.aRing[idxTail].s.idxLogger = idxLogger;
3019 pGVM->vmmr0.s.LogFlusher.aRing[idxTail].s.idxBuffer = (uint32_t)idxBuffer;
3020 pGVM->vmmr0.s.LogFlusher.aRing[idxTail].s.fProcessing = 0;
3021 pGVM->vmmr0.s.LogFlusher.idxRingTail = idxNewTail;
3022
3023 /* Update the number of buffers currently being flushed. */
3024 uint32_t cFlushing = pR0Log->cFlushing;
3025 cFlushing = RT_MIN(cFlushing + 1, VMMLOGGER_BUFFER_COUNT);
3026 pShared->cFlushing = pR0Log->cFlushing = cFlushing;
3027
3028 /* We must wait if all buffers are currently being flushed. */
3029 bool const fEmtWaiting = cFlushing >= VMMLOGGER_BUFFER_COUNT && enmAction != kJustSignal /* paranoia */;
3030 pR0Log->fEmtWaiting = fEmtWaiting;
3031
3032 /* Stats. */
3033 STAM_REL_COUNTER_INC(&pShared->StatFlushes);
3034 STAM_REL_COUNTER_INC(&pGVM->vmm.s.StatLogFlusherFlushes);
3035
3036 /* Signal the worker thread. */
3037 if (pGVM->vmmr0.s.LogFlusher.fThreadWaiting)
3038 {
3039 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
3040 RTSemEventSignal(pGVM->vmmr0.s.LogFlusher.hEvent);
3041 }
3042 else
3043 {
3044 STAM_REL_COUNTER_INC(&pGVM->vmm.s.StatLogFlusherNoWakeUp);
3045 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
3046 }
3047
3048 /*
3049 * Wait for a buffer to finish flushing.
3050 *
3051 * Note! Lazy bird is ignoring the status code here. The result is
3052 * that we might end up with an extra even signalling and the
3053 * next time we need to wait we won't and end up with some log
3054 * corruption. However, it's too much hazzle right now for
3055 * a scenario which would most likely end the process rather
3056 * than causing log corruption.
3057 */
3058 if (fEmtWaiting)
3059 {
3060 STAM_REL_PROFILE_START(&pShared->StatWait, a);
3061 VMMR0EmtWaitEventInner(pGVCpu, VMMR0EMTWAIT_F_TRY_SUPPRESS_INTERRUPTED,
3062 pR0Log->hEventFlushWait, RT_INDEFINITE_WAIT);
3063 STAM_REL_PROFILE_STOP(&pShared->StatWait, a);
3064 }
3065
3066 /*
3067 * We always switch buffer if we have more than one.
3068 */
3069#if VMMLOGGER_BUFFER_COUNT == 1
3070 fFlushedBuffer = true;
3071#else
3072 AssertCompile(VMMLOGGER_BUFFER_COUNT >= 1);
3073 pShared->idxBuf = (idxBuffer + 1) % VMMLOGGER_BUFFER_COUNT;
3074 fFlushedBuffer = false;
3075#endif
3076 }
3077 else
3078 {
3079 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
3080 SUP_DPRINTF(("vmmR0LoggerFlush: ring buffer is full!\n"));
3081 fFlushedBuffer = true;
3082 }
3083 }
3084 else
3085 {
3086 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
3087 SUP_DPRINTF(("vmmR0LoggerFlush: flusher not active - dropping %u bytes\n", cbToFlush));
3088 fFlushedBuffer = true;
3089 }
3090
3091 /*
3092 * Restore the HM context.
3093 */
3094 if (enmAction != kJustSignal)
3095 VMMR0EmtResumeAfterBlocking(pGVCpu, &Ctx);
3096
3097 return fFlushedBuffer;
3098}
3099
3100
3101/**
3102 * Common worker for vmmR0LogFlush and vmmR0LogRelFlush.
3103 */
3104static bool vmmR0LoggerFlushCommon(PRTLOGGER pLogger, PRTLOGBUFFERDESC pBufDesc, uint32_t idxLogger)
3105{
3106 /*
3107 * Convert the pLogger into a GVMCPU handle and 'call' back to Ring-3.
3108 * (This is a bit paranoid code.)
3109 */
3110 if (RT_VALID_PTR(pLogger))
3111 {
3112 if ( pLogger->u32Magic == RTLOGGER_MAGIC
3113 && (pLogger->u32UserValue1 & VMMR0_LOGGER_FLAGS_MAGIC_MASK) == VMMR0_LOGGER_FLAGS_MAGIC_VALUE
3114 && pLogger->u64UserValue2 == pLogger->u64UserValue3)
3115 {
3116 PGVMCPU const pGVCpu = (PGVMCPU)(uintptr_t)pLogger->u64UserValue2;
3117 if ( RT_VALID_PTR(pGVCpu)
3118 && ((uintptr_t)pGVCpu & PAGE_OFFSET_MASK) == 0)
3119 {
3120 RTNATIVETHREAD const hNativeSelf = RTThreadNativeSelf();
3121 PGVM const pGVM = pGVCpu->pGVM;
3122 if ( hNativeSelf == pGVCpu->hEMT
3123 && RT_VALID_PTR(pGVM))
3124 {
3125 PVMMR0PERVCPULOGGER const pR0Log = &pGVCpu->vmmr0.s.u.aLoggers[idxLogger];
3126 size_t const idxBuffer = pBufDesc - &pR0Log->aBufDescs[0];
3127 if (idxBuffer < VMMLOGGER_BUFFER_COUNT)
3128 {
3129 /*
3130 * Make sure we don't recurse forever here should something in the
3131 * following code trigger logging or an assertion. Do the rest in
3132 * an inner work to avoid hitting the right margin too hard.
3133 */
3134 if (!pR0Log->fFlushing)
3135 {
3136 pR0Log->fFlushing = true;
3137 bool fFlushed = vmmR0LoggerFlushInner(pGVM, pGVCpu, idxLogger, idxBuffer, pBufDesc->offBuf);
3138 pR0Log->fFlushing = false;
3139 return fFlushed;
3140 }
3141
3142 SUP_DPRINTF(("vmmR0LoggerFlush: Recursive flushing!\n"));
3143 }
3144 else
3145 SUP_DPRINTF(("vmmR0LoggerFlush: pLogger=%p pGVCpu=%p: idxBuffer=%#zx\n", pLogger, pGVCpu, idxBuffer));
3146 }
3147 else
3148 SUP_DPRINTF(("vmmR0LoggerFlush: pLogger=%p pGVCpu=%p hEMT=%p hNativeSelf=%p!\n",
3149 pLogger, pGVCpu, pGVCpu->hEMT, hNativeSelf));
3150 }
3151 else
3152 SUP_DPRINTF(("vmmR0LoggerFlush: pLogger=%p pGVCpu=%p!\n", pLogger, pGVCpu));
3153 }
3154 else
3155 SUP_DPRINTF(("vmmR0LoggerFlush: pLogger=%p u32Magic=%#x u32UserValue1=%#x u64UserValue2=%#RX64 u64UserValue3=%#RX64!\n",
3156 pLogger, pLogger->u32Magic, pLogger->u32UserValue1, pLogger->u64UserValue2, pLogger->u64UserValue3));
3157 }
3158 else
3159 SUP_DPRINTF(("vmmR0LoggerFlush: pLogger=%p!\n", pLogger));
3160 return true;
3161}
3162
3163
3164/**
3165 * @callback_method_impl{FNRTLOGFLUSH, Release logger buffer flush callback.}
3166 */
3167static DECLCALLBACK(bool) vmmR0LogRelFlush(PRTLOGGER pLogger, PRTLOGBUFFERDESC pBufDesc)
3168{
3169 return vmmR0LoggerFlushCommon(pLogger, pBufDesc, VMMLOGGER_IDX_RELEASE);
3170}
3171
3172
3173/**
3174 * @callback_method_impl{FNRTLOGFLUSH, Logger (debug) buffer flush callback.}
3175 */
3176static DECLCALLBACK(bool) vmmR0LogFlush(PRTLOGGER pLogger, PRTLOGBUFFERDESC pBufDesc)
3177{
3178#ifdef LOG_ENABLED
3179 return vmmR0LoggerFlushCommon(pLogger, pBufDesc, VMMLOGGER_IDX_REGULAR);
3180#else
3181 RT_NOREF(pLogger, pBufDesc);
3182 return true;
3183#endif
3184}
3185
3186
3187/*
3188 * Override RTLogDefaultInstanceEx so we can do logging from EMTs in ring-0.
3189 */
3190DECLEXPORT(PRTLOGGER) RTLogDefaultInstanceEx(uint32_t fFlagsAndGroup)
3191{
3192#ifdef LOG_ENABLED
3193 PGVMCPU pGVCpu = GVMMR0GetGVCpuByEMT(NIL_RTNATIVETHREAD);
3194 if (pGVCpu)
3195 {
3196 PRTLOGGER pLogger = pGVCpu->vmmr0.s.u.s.Logger.pLogger;
3197 if (RT_VALID_PTR(pLogger))
3198 {
3199 if ( pLogger->u64UserValue2 == (uintptr_t)pGVCpu
3200 && pLogger->u64UserValue3 == (uintptr_t)pGVCpu)
3201 {
3202 if (!pGVCpu->vmmr0.s.u.s.Logger.fFlushing)
3203 {
3204 if (!(pGVCpu->vmmr0.s.fLogFlushingDisabled))
3205 return RTLogCheckGroupFlags(pLogger, fFlagsAndGroup);
3206 return NULL;
3207 }
3208
3209 /*
3210 * When we're flushing we _must_ return NULL here to suppress any
3211 * attempts at using the logger while in vmmR0LoggerFlushCommon.
3212 * The VMMR0EmtPrepareToBlock code may trigger logging in HM,
3213 * which will reset the buffer content before we even get to queue
3214 * the flush request. (Only an issue when VBOX_WITH_R0_LOGGING
3215 * is enabled.)
3216 */
3217 return NULL;
3218 }
3219 }
3220 }
3221#endif
3222 return SUPR0DefaultLogInstanceEx(fFlagsAndGroup);
3223}
3224
3225
3226/*
3227 * Override RTLogRelGetDefaultInstanceEx so we can do LogRel to VBox.log from EMTs in ring-0.
3228 */
3229DECLEXPORT(PRTLOGGER) RTLogRelGetDefaultInstanceEx(uint32_t fFlagsAndGroup)
3230{
3231 PGVMCPU pGVCpu = GVMMR0GetGVCpuByEMT(NIL_RTNATIVETHREAD);
3232 if (pGVCpu)
3233 {
3234 PRTLOGGER pLogger = pGVCpu->vmmr0.s.u.s.RelLogger.pLogger;
3235 if (RT_VALID_PTR(pLogger))
3236 {
3237 if ( pLogger->u64UserValue2 == (uintptr_t)pGVCpu
3238 && pLogger->u64UserValue3 == (uintptr_t)pGVCpu)
3239 {
3240 if (!pGVCpu->vmmr0.s.u.s.RelLogger.fFlushing)
3241 {
3242 if (!(pGVCpu->vmmr0.s.fLogFlushingDisabled))
3243 return RTLogCheckGroupFlags(pLogger, fFlagsAndGroup);
3244 return NULL;
3245 }
3246 }
3247 }
3248 }
3249 return SUPR0GetDefaultLogRelInstanceEx(fFlagsAndGroup);
3250}
3251
3252
3253/**
3254 * Helper for vmmR0InitLoggerSet
3255 */
3256static int vmmR0InitLoggerOne(PGVMCPU pGVCpu, bool fRelease, PVMMR0PERVCPULOGGER pR0Log, PVMMR3CPULOGGER pShared,
3257 uint32_t cbBuf, char *pchBuf, RTR3PTR pchBufR3)
3258{
3259 /*
3260 * Create and configure the logger.
3261 */
3262 for (size_t i = 0; i < VMMLOGGER_BUFFER_COUNT; i++)
3263 {
3264 pR0Log->aBufDescs[i].u32Magic = RTLOGBUFFERDESC_MAGIC;
3265 pR0Log->aBufDescs[i].uReserved = 0;
3266 pR0Log->aBufDescs[i].cbBuf = cbBuf;
3267 pR0Log->aBufDescs[i].offBuf = 0;
3268 pR0Log->aBufDescs[i].pchBuf = pchBuf + i * cbBuf;
3269 pR0Log->aBufDescs[i].pAux = &pShared->aBufs[i].AuxDesc;
3270
3271 pShared->aBufs[i].AuxDesc.fFlushedIndicator = false;
3272 pShared->aBufs[i].AuxDesc.afPadding[0] = 0;
3273 pShared->aBufs[i].AuxDesc.afPadding[1] = 0;
3274 pShared->aBufs[i].AuxDesc.afPadding[2] = 0;
3275 pShared->aBufs[i].AuxDesc.offBuf = 0;
3276 pShared->aBufs[i].pchBufR3 = pchBufR3 + i * cbBuf;
3277 }
3278 pShared->cbBuf = cbBuf;
3279
3280 static const char * const s_apszGroups[] = VBOX_LOGGROUP_NAMES;
3281 int rc = RTLogCreateEx(&pR0Log->pLogger, fRelease ? "VBOX_RELEASE_LOG" : "VBOX_LOG", RTLOG_F_NO_LOCKING | RTLOGFLAGS_BUFFERED,
3282 "all", RT_ELEMENTS(s_apszGroups), s_apszGroups, UINT32_MAX,
3283 VMMLOGGER_BUFFER_COUNT, pR0Log->aBufDescs, RTLOGDEST_DUMMY,
3284 NULL /*pfnPhase*/, 0 /*cHistory*/, 0 /*cbHistoryFileMax*/, 0 /*cSecsHistoryTimeSlot*/,
3285 NULL /*pErrInfo*/, NULL /*pszFilenameFmt*/);
3286 if (RT_SUCCESS(rc))
3287 {
3288 PRTLOGGER pLogger = pR0Log->pLogger;
3289 pLogger->u32UserValue1 = VMMR0_LOGGER_FLAGS_MAGIC_VALUE;
3290 pLogger->u64UserValue2 = (uintptr_t)pGVCpu;
3291 pLogger->u64UserValue3 = (uintptr_t)pGVCpu;
3292
3293 rc = RTLogSetFlushCallback(pLogger, fRelease ? vmmR0LogRelFlush : vmmR0LogFlush);
3294 if (RT_SUCCESS(rc))
3295 {
3296 RTLogSetR0ThreadNameF(pLogger, "EMT-%u-R0", pGVCpu->idCpu);
3297
3298 /*
3299 * Create the event sem the EMT waits on while flushing is happening.
3300 */
3301 rc = RTSemEventCreate(&pR0Log->hEventFlushWait);
3302 if (RT_SUCCESS(rc))
3303 return VINF_SUCCESS;
3304 pR0Log->hEventFlushWait = NIL_RTSEMEVENT;
3305 }
3306 RTLogDestroy(pLogger);
3307 }
3308 pR0Log->pLogger = NULL;
3309 return rc;
3310}
3311
3312
3313/**
3314 * Worker for VMMR0CleanupVM and vmmR0InitLoggerSet that destroys one logger.
3315 */
3316static void vmmR0TermLoggerOne(PVMMR0PERVCPULOGGER pR0Log, PVMMR3CPULOGGER pShared)
3317{
3318 RTLogDestroy(pR0Log->pLogger);
3319 pR0Log->pLogger = NULL;
3320
3321 for (size_t i = 0; i < VMMLOGGER_BUFFER_COUNT; i++)
3322 pShared->aBufs[i].pchBufR3 = NIL_RTR3PTR;
3323
3324 RTSemEventDestroy(pR0Log->hEventFlushWait);
3325 pR0Log->hEventFlushWait = NIL_RTSEMEVENT;
3326}
3327
3328
3329/**
3330 * Initializes one type of loggers for each EMT.
3331 */
3332static int vmmR0InitLoggerSet(PGVM pGVM, uint8_t idxLogger, uint32_t cbBuf, PRTR0MEMOBJ phMemObj, PRTR0MEMOBJ phMapObj)
3333{
3334 /* Allocate buffers first. */
3335 int rc = RTR0MemObjAllocPage(phMemObj, cbBuf * pGVM->cCpus * VMMLOGGER_BUFFER_COUNT, false /*fExecutable*/);
3336 if (RT_SUCCESS(rc))
3337 {
3338 rc = RTR0MemObjMapUser(phMapObj, *phMemObj, (RTR3PTR)-1, 0 /*uAlignment*/, RTMEM_PROT_READ, NIL_RTR0PROCESS);
3339 if (RT_SUCCESS(rc))
3340 {
3341 char * const pchBuf = (char *)RTR0MemObjAddress(*phMemObj);
3342 AssertPtrReturn(pchBuf, VERR_INTERNAL_ERROR_2);
3343
3344 RTR3PTR const pchBufR3 = RTR0MemObjAddressR3(*phMapObj);
3345 AssertReturn(pchBufR3 != NIL_RTR3PTR, VERR_INTERNAL_ERROR_3);
3346
3347 /* Initialize the per-CPU loggers. */
3348 for (uint32_t i = 0; i < pGVM->cCpus; i++)
3349 {
3350 PGVMCPU pGVCpu = &pGVM->aCpus[i];
3351 PVMMR0PERVCPULOGGER pR0Log = &pGVCpu->vmmr0.s.u.aLoggers[idxLogger];
3352 PVMMR3CPULOGGER pShared = &pGVCpu->vmm.s.u.aLoggers[idxLogger];
3353 rc = vmmR0InitLoggerOne(pGVCpu, idxLogger == VMMLOGGER_IDX_RELEASE, pR0Log, pShared, cbBuf,
3354 pchBuf + i * cbBuf * VMMLOGGER_BUFFER_COUNT,
3355 pchBufR3 + i * cbBuf * VMMLOGGER_BUFFER_COUNT);
3356 if (RT_FAILURE(rc))
3357 {
3358 vmmR0TermLoggerOne(pR0Log, pShared);
3359 while (i-- > 0)
3360 {
3361 pGVCpu = &pGVM->aCpus[i];
3362 vmmR0TermLoggerOne(&pGVCpu->vmmr0.s.u.aLoggers[idxLogger], &pGVCpu->vmm.s.u.aLoggers[idxLogger]);
3363 }
3364 break;
3365 }
3366 }
3367 if (RT_SUCCESS(rc))
3368 return VINF_SUCCESS;
3369
3370 /* Bail out. */
3371 RTR0MemObjFree(*phMapObj, false /*fFreeMappings*/);
3372 *phMapObj = NIL_RTR0MEMOBJ;
3373 }
3374 RTR0MemObjFree(*phMemObj, true /*fFreeMappings*/);
3375 *phMemObj = NIL_RTR0MEMOBJ;
3376 }
3377 return rc;
3378}
3379
3380
3381/**
3382 * Worker for VMMR0InitPerVMData that initializes all the logging related stuff.
3383 *
3384 * @returns VBox status code.
3385 * @param pGVM The global (ring-0) VM structure.
3386 */
3387static int vmmR0InitLoggers(PGVM pGVM)
3388{
3389 /*
3390 * Invalidate the ring buffer (not really necessary).
3391 */
3392 for (size_t idx = 0; idx < RT_ELEMENTS(pGVM->vmmr0.s.LogFlusher.aRing); idx++)
3393 pGVM->vmmr0.s.LogFlusher.aRing[idx].u32 = UINT32_MAX >> 1; /* (all bits except fProcessing set) */
3394
3395 /*
3396 * Create the spinlock and flusher event semaphore.
3397 */
3398 int rc = RTSpinlockCreate(&pGVM->vmmr0.s.LogFlusher.hSpinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "VM-Log-Flusher");
3399 if (RT_SUCCESS(rc))
3400 {
3401 rc = RTSemEventCreate(&pGVM->vmmr0.s.LogFlusher.hEvent);
3402 if (RT_SUCCESS(rc))
3403 {
3404 /*
3405 * Create the ring-0 release loggers.
3406 */
3407 rc = vmmR0InitLoggerSet(pGVM, VMMLOGGER_IDX_RELEASE, _4K,
3408 &pGVM->vmmr0.s.hMemObjReleaseLogger, &pGVM->vmmr0.s.hMapObjReleaseLogger);
3409#ifdef LOG_ENABLED
3410 if (RT_SUCCESS(rc))
3411 {
3412 /*
3413 * Create debug loggers.
3414 */
3415 rc = vmmR0InitLoggerSet(pGVM, VMMLOGGER_IDX_REGULAR, _64K,
3416 &pGVM->vmmr0.s.hMemObjLogger, &pGVM->vmmr0.s.hMapObjLogger);
3417 }
3418#endif
3419 }
3420 }
3421 return rc;
3422}
3423
3424
3425/**
3426 * Worker for VMMR0InitPerVMData that initializes all the logging related stuff.
3427 *
3428 * @param pGVM The global (ring-0) VM structure.
3429 */
3430static void vmmR0CleanupLoggers(PGVM pGVM)
3431{
3432 for (VMCPUID idCpu = 0; idCpu < pGVM->cCpus; idCpu++)
3433 {
3434 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
3435 for (size_t iLogger = 0; iLogger < RT_ELEMENTS(pGVCpu->vmmr0.s.u.aLoggers); iLogger++)
3436 vmmR0TermLoggerOne(&pGVCpu->vmmr0.s.u.aLoggers[iLogger], &pGVCpu->vmm.s.u.aLoggers[iLogger]);
3437 }
3438
3439 /*
3440 * Free logger buffer memory.
3441 */
3442 RTR0MemObjFree(pGVM->vmmr0.s.hMapObjReleaseLogger, false /*fFreeMappings*/);
3443 pGVM->vmmr0.s.hMapObjReleaseLogger = NIL_RTR0MEMOBJ;
3444 RTR0MemObjFree(pGVM->vmmr0.s.hMemObjReleaseLogger, true /*fFreeMappings*/);
3445 pGVM->vmmr0.s.hMemObjReleaseLogger = NIL_RTR0MEMOBJ;
3446
3447 RTR0MemObjFree(pGVM->vmmr0.s.hMapObjLogger, false /*fFreeMappings*/);
3448 pGVM->vmmr0.s.hMapObjLogger = NIL_RTR0MEMOBJ;
3449 RTR0MemObjFree(pGVM->vmmr0.s.hMemObjLogger, true /*fFreeMappings*/);
3450 pGVM->vmmr0.s.hMemObjLogger = NIL_RTR0MEMOBJ;
3451
3452 /*
3453 * Free log flusher related stuff.
3454 */
3455 RTSpinlockDestroy(pGVM->vmmr0.s.LogFlusher.hSpinlock);
3456 pGVM->vmmr0.s.LogFlusher.hSpinlock = NIL_RTSPINLOCK;
3457 RTSemEventDestroy(pGVM->vmmr0.s.LogFlusher.hEvent);
3458 pGVM->vmmr0.s.LogFlusher.hEvent = NIL_RTSEMEVENT;
3459}
3460
3461
3462/*********************************************************************************************************************************
3463* Assertions *
3464*********************************************************************************************************************************/
3465
3466/*
3467 * Jump back to ring-3 if we're the EMT and the longjmp is armed.
3468 *
3469 * @returns true if the breakpoint should be hit, false if it should be ignored.
3470 */
3471DECLEXPORT(bool) RTCALL RTAssertShouldPanic(void)
3472{
3473#if 0
3474 return true;
3475#else
3476 PVMCC pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
3477 if (pVM)
3478 {
3479 PVMCPUCC pVCpu = VMMGetCpu(pVM);
3480
3481 if (pVCpu)
3482 {
3483# ifdef RT_ARCH_X86
3484 if ( pVCpu->vmm.s.CallRing3JmpBufR0.eip
3485 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
3486# else
3487 if ( pVCpu->vmm.s.CallRing3JmpBufR0.rip
3488 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
3489# endif
3490 {
3491 int rc = VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VM_R0_ASSERTION, 0);
3492 return RT_FAILURE_NP(rc);
3493 }
3494 }
3495 }
3496# ifdef RT_OS_LINUX
3497 return true;
3498# else
3499 return false;
3500# endif
3501#endif
3502}
3503
3504
3505/*
3506 * Override this so we can push it up to ring-3.
3507 */
3508DECLEXPORT(void) RTCALL RTAssertMsg1Weak(const char *pszExpr, unsigned uLine, const char *pszFile, const char *pszFunction)
3509{
3510 /*
3511 * To host kernel log/whatever.
3512 */
3513 SUPR0Printf("!!R0-Assertion Failed!!\n"
3514 "Expression: %s\n"
3515 "Location : %s(%d) %s\n",
3516 pszExpr, pszFile, uLine, pszFunction);
3517
3518 /*
3519 * To the log.
3520 */
3521 LogAlways(("\n!!R0-Assertion Failed!!\n"
3522 "Expression: %s\n"
3523 "Location : %s(%d) %s\n",
3524 pszExpr, pszFile, uLine, pszFunction));
3525
3526 /*
3527 * To the global VMM buffer.
3528 */
3529 PVMCC pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
3530 if (pVM)
3531 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg1, sizeof(pVM->vmm.s.szRing0AssertMsg1),
3532 "\n!!R0-Assertion Failed!!\n"
3533 "Expression: %.*s\n"
3534 "Location : %s(%d) %s\n",
3535 sizeof(pVM->vmm.s.szRing0AssertMsg1) / 4 * 3, pszExpr,
3536 pszFile, uLine, pszFunction);
3537
3538 /*
3539 * Continue the normal way.
3540 */
3541 RTAssertMsg1(pszExpr, uLine, pszFile, pszFunction);
3542}
3543
3544
3545/**
3546 * Callback for RTLogFormatV which writes to the ring-3 log port.
3547 * See PFNLOGOUTPUT() for details.
3548 */
3549static DECLCALLBACK(size_t) rtLogOutput(void *pv, const char *pachChars, size_t cbChars)
3550{
3551 for (size_t i = 0; i < cbChars; i++)
3552 {
3553 LogAlways(("%c", pachChars[i])); NOREF(pachChars);
3554 }
3555
3556 NOREF(pv);
3557 return cbChars;
3558}
3559
3560
3561/*
3562 * Override this so we can push it up to ring-3.
3563 */
3564DECLEXPORT(void) RTCALL RTAssertMsg2WeakV(const char *pszFormat, va_list va)
3565{
3566 va_list vaCopy;
3567
3568 /*
3569 * Push the message to the loggers.
3570 */
3571 PRTLOGGER pLog = RTLogRelGetDefaultInstance();
3572 if (pLog)
3573 {
3574 va_copy(vaCopy, va);
3575 RTLogFormatV(rtLogOutput, pLog, pszFormat, vaCopy);
3576 va_end(vaCopy);
3577 }
3578 pLog = RTLogGetDefaultInstance(); /* Don't initialize it here... */
3579 if (pLog)
3580 {
3581 va_copy(vaCopy, va);
3582 RTLogFormatV(rtLogOutput, pLog, pszFormat, vaCopy);
3583 va_end(vaCopy);
3584 }
3585
3586 /*
3587 * Push it to the global VMM buffer.
3588 */
3589 PVMCC pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
3590 if (pVM)
3591 {
3592 va_copy(vaCopy, va);
3593 RTStrPrintfV(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2), pszFormat, vaCopy);
3594 va_end(vaCopy);
3595 }
3596
3597 /*
3598 * Continue the normal way.
3599 */
3600 RTAssertMsg2V(pszFormat, va);
3601}
3602
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette