VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/VMMR0.cpp@ 93610

Last change on this file since 93610 was 93609, checked in by vboxsync, 3 years ago

VMM/PDMQueue: Rewrote the queue code to not use the hyper heap and be a bit safer. Added a testcase (driverless). bugref:10093

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 133.9 KB
Line 
1/* $Id: VMMR0.cpp 93609 2022-02-05 19:03:08Z vboxsync $ */
2/** @file
3 * VMM - Host Context Ring 0.
4 */
5
6/*
7 * Copyright (C) 2006-2022 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_VMM
23#include <VBox/vmm/vmm.h>
24#include <VBox/sup.h>
25#include <VBox/vmm/iom.h>
26#include <VBox/vmm/trpm.h>
27#include <VBox/vmm/cpum.h>
28#include <VBox/vmm/pdmapi.h>
29#include <VBox/vmm/pgm.h>
30#ifdef VBOX_WITH_NEM_R0
31# include <VBox/vmm/nem.h>
32#endif
33#include <VBox/vmm/em.h>
34#include <VBox/vmm/stam.h>
35#include <VBox/vmm/tm.h>
36#include "VMMInternal.h"
37#include <VBox/vmm/vmcc.h>
38#include <VBox/vmm/gvm.h>
39#ifdef VBOX_WITH_PCI_PASSTHROUGH
40# include <VBox/vmm/pdmpci.h>
41#endif
42#include <VBox/vmm/apic.h>
43
44#include <VBox/vmm/gvmm.h>
45#include <VBox/vmm/gmm.h>
46#include <VBox/vmm/gim.h>
47#include <VBox/intnet.h>
48#include <VBox/vmm/hm.h>
49#include <VBox/param.h>
50#include <VBox/err.h>
51#include <VBox/version.h>
52#include <VBox/log.h>
53
54#include <iprt/asm-amd64-x86.h>
55#include <iprt/assert.h>
56#include <iprt/crc.h>
57#include <iprt/mem.h>
58#include <iprt/memobj.h>
59#include <iprt/mp.h>
60#include <iprt/once.h>
61#include <iprt/semaphore.h>
62#include <iprt/spinlock.h>
63#include <iprt/stdarg.h>
64#include <iprt/string.h>
65#include <iprt/thread.h>
66#include <iprt/timer.h>
67#include <iprt/time.h>
68
69#include "dtrace/VBoxVMM.h"
70
71
72#if defined(_MSC_VER) && defined(RT_ARCH_AMD64) /** @todo check this with with VC7! */
73# pragma intrinsic(_AddressOfReturnAddress)
74#endif
75
76#if defined(RT_OS_DARWIN) && ARCH_BITS == 32
77# error "32-bit darwin is no longer supported. Go back to 4.3 or earlier!"
78#endif
79
80
81/*********************************************************************************************************************************
82* Internal Functions *
83*********************************************************************************************************************************/
84RT_C_DECLS_BEGIN
85#if defined(RT_ARCH_X86) && (defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD))
86extern uint64_t __udivdi3(uint64_t, uint64_t);
87extern uint64_t __umoddi3(uint64_t, uint64_t);
88#endif
89RT_C_DECLS_END
90static int vmmR0UpdateLoggers(PGVM pGVM, VMCPUID idCpu, PVMMR0UPDATELOGGERSREQ pReq, size_t idxLogger);
91static int vmmR0LogFlusher(PGVM pGVM);
92static int vmmR0LogWaitFlushed(PGVM pGVM, VMCPUID idCpu, size_t idxLogger);
93static int vmmR0InitLoggers(PGVM pGVM);
94static void vmmR0CleanupLoggers(PGVM pGVM);
95
96
97/*********************************************************************************************************************************
98* Global Variables *
99*********************************************************************************************************************************/
100/** Drag in necessary library bits.
101 * The runtime lives here (in VMMR0.r0) and VBoxDD*R0.r0 links against us. */
102struct CLANG11WEIRDNOTHROW { PFNRT pfn; } g_VMMR0Deps[] =
103{
104 { (PFNRT)RTCrc32 },
105 { (PFNRT)RTOnce },
106#if defined(RT_ARCH_X86) && (defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD))
107 { (PFNRT)__udivdi3 },
108 { (PFNRT)__umoddi3 },
109#endif
110 { NULL }
111};
112
113#ifdef RT_OS_SOLARIS
114/* Dependency information for the native solaris loader. */
115extern "C" { char _depends_on[] = "vboxdrv"; }
116#endif
117
118
119/**
120 * Initialize the module.
121 * This is called when we're first loaded.
122 *
123 * @returns 0 on success.
124 * @returns VBox status on failure.
125 * @param hMod Image handle for use in APIs.
126 */
127DECLEXPORT(int) ModuleInit(void *hMod)
128{
129#ifdef VBOX_WITH_DTRACE_R0
130 /*
131 * The first thing to do is register the static tracepoints.
132 * (Deregistration is automatic.)
133 */
134 int rc2 = SUPR0TracerRegisterModule(hMod, &g_VTGObjHeader);
135 if (RT_FAILURE(rc2))
136 return rc2;
137#endif
138 LogFlow(("ModuleInit:\n"));
139
140#ifdef VBOX_WITH_64ON32_CMOS_DEBUG
141 /*
142 * Display the CMOS debug code.
143 */
144 ASMOutU8(0x72, 0x03);
145 uint8_t bDebugCode = ASMInU8(0x73);
146 LogRel(("CMOS Debug Code: %#x (%d)\n", bDebugCode, bDebugCode));
147 RTLogComPrintf("CMOS Debug Code: %#x (%d)\n", bDebugCode, bDebugCode);
148#endif
149
150 /*
151 * Initialize the VMM, GVMM, GMM, HM, PGM (Darwin) and INTNET.
152 */
153 int rc = vmmInitFormatTypes();
154 if (RT_SUCCESS(rc))
155 {
156 rc = GVMMR0Init();
157 if (RT_SUCCESS(rc))
158 {
159 rc = GMMR0Init();
160 if (RT_SUCCESS(rc))
161 {
162 rc = HMR0Init();
163 if (RT_SUCCESS(rc))
164 {
165 PDMR0Init(hMod);
166
167 rc = PGMRegisterStringFormatTypes();
168 if (RT_SUCCESS(rc))
169 {
170 rc = IntNetR0Init();
171 if (RT_SUCCESS(rc))
172 {
173#ifdef VBOX_WITH_PCI_PASSTHROUGH
174 rc = PciRawR0Init();
175#endif
176 if (RT_SUCCESS(rc))
177 {
178 rc = CPUMR0ModuleInit();
179 if (RT_SUCCESS(rc))
180 {
181#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
182 rc = vmmR0TripleFaultHackInit();
183 if (RT_SUCCESS(rc))
184#endif
185 {
186#ifdef VBOX_WITH_NEM_R0
187 rc = NEMR0Init();
188 if (RT_SUCCESS(rc))
189#endif
190 {
191 LogFlow(("ModuleInit: returns success\n"));
192 return VINF_SUCCESS;
193 }
194 }
195
196 /*
197 * Bail out.
198 */
199#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
200 vmmR0TripleFaultHackTerm();
201#endif
202 }
203 else
204 LogRel(("ModuleInit: CPUMR0ModuleInit -> %Rrc\n", rc));
205#ifdef VBOX_WITH_PCI_PASSTHROUGH
206 PciRawR0Term();
207#endif
208 }
209 else
210 LogRel(("ModuleInit: PciRawR0Init -> %Rrc\n", rc));
211 IntNetR0Term();
212 }
213 else
214 LogRel(("ModuleInit: IntNetR0Init -> %Rrc\n", rc));
215 PGMDeregisterStringFormatTypes();
216 }
217 else
218 LogRel(("ModuleInit: PGMRegisterStringFormatTypes -> %Rrc\n", rc));
219 HMR0Term();
220 }
221 else
222 LogRel(("ModuleInit: HMR0Init -> %Rrc\n", rc));
223 GMMR0Term();
224 }
225 else
226 LogRel(("ModuleInit: GMMR0Init -> %Rrc\n", rc));
227 GVMMR0Term();
228 }
229 else
230 LogRel(("ModuleInit: GVMMR0Init -> %Rrc\n", rc));
231 vmmTermFormatTypes();
232 }
233 else
234 LogRel(("ModuleInit: vmmInitFormatTypes -> %Rrc\n", rc));
235
236 LogFlow(("ModuleInit: failed %Rrc\n", rc));
237 return rc;
238}
239
240
241/**
242 * Terminate the module.
243 * This is called when we're finally unloaded.
244 *
245 * @param hMod Image handle for use in APIs.
246 */
247DECLEXPORT(void) ModuleTerm(void *hMod)
248{
249 NOREF(hMod);
250 LogFlow(("ModuleTerm:\n"));
251
252 /*
253 * Terminate the CPUM module (Local APIC cleanup).
254 */
255 CPUMR0ModuleTerm();
256
257 /*
258 * Terminate the internal network service.
259 */
260 IntNetR0Term();
261
262 /*
263 * PGM (Darwin), HM and PciRaw global cleanup.
264 */
265#ifdef VBOX_WITH_PCI_PASSTHROUGH
266 PciRawR0Term();
267#endif
268 PGMDeregisterStringFormatTypes();
269 HMR0Term();
270#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
271 vmmR0TripleFaultHackTerm();
272#endif
273#ifdef VBOX_WITH_NEM_R0
274 NEMR0Term();
275#endif
276
277 /*
278 * Destroy the GMM and GVMM instances.
279 */
280 GMMR0Term();
281 GVMMR0Term();
282
283 vmmTermFormatTypes();
284
285 LogFlow(("ModuleTerm: returns\n"));
286}
287
288
289/**
290 * Initializes VMM specific members when the GVM structure is created,
291 * allocating loggers and stuff.
292 *
293 * The loggers are allocated here so that we can update their settings before
294 * doing VMMR0_DO_VMMR0_INIT and have correct logging at that time.
295 *
296 * @returns VBox status code.
297 * @param pGVM The global (ring-0) VM structure.
298 */
299VMMR0_INT_DECL(int) VMMR0InitPerVMData(PGVM pGVM)
300{
301 AssertCompile(sizeof(pGVM->vmmr0.s) <= sizeof(pGVM->vmmr0.padding));
302
303 /*
304 * Initialize all members first.
305 */
306 pGVM->vmmr0.s.fCalledInitVm = false;
307 pGVM->vmmr0.s.hMemObjLogger = NIL_RTR0MEMOBJ;
308 pGVM->vmmr0.s.hMapObjLogger = NIL_RTR0MEMOBJ;
309 pGVM->vmmr0.s.hMemObjReleaseLogger = NIL_RTR0MEMOBJ;
310 pGVM->vmmr0.s.hMapObjReleaseLogger = NIL_RTR0MEMOBJ;
311 pGVM->vmmr0.s.LogFlusher.hSpinlock = NIL_RTSPINLOCK;
312 pGVM->vmmr0.s.LogFlusher.hThread = NIL_RTNATIVETHREAD;
313 pGVM->vmmr0.s.LogFlusher.hEvent = NIL_RTSEMEVENT;
314 pGVM->vmmr0.s.LogFlusher.idxRingHead = 0;
315 pGVM->vmmr0.s.LogFlusher.idxRingTail = 0;
316 pGVM->vmmr0.s.LogFlusher.fThreadWaiting = false;
317
318 for (VMCPUID idCpu = 0; idCpu < pGVM->cCpus; idCpu++)
319 {
320 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
321 Assert(pGVCpu->idHostCpu == NIL_RTCPUID);
322 Assert(pGVCpu->iHostCpuSet == UINT32_MAX);
323 pGVCpu->vmmr0.s.pPreemptState = NULL;
324 pGVCpu->vmmr0.s.hCtxHook = NIL_RTTHREADCTXHOOK;
325 pGVCpu->vmmr0.s.AssertJmpBuf.pMirrorBuf = &pGVCpu->vmm.s.AssertJmpBuf;
326 pGVCpu->vmmr0.s.AssertJmpBuf.pvStackBuf = &pGVCpu->vmm.s.abAssertStack[0];
327 pGVCpu->vmmr0.s.AssertJmpBuf.cbStackBuf = sizeof(pGVCpu->vmm.s.abAssertStack);
328
329 for (size_t iLogger = 0; iLogger < RT_ELEMENTS(pGVCpu->vmmr0.s.u.aLoggers); iLogger++)
330 pGVCpu->vmmr0.s.u.aLoggers[iLogger].hEventFlushWait = NIL_RTSEMEVENT;
331 }
332
333 /*
334 * Create the loggers.
335 */
336 return vmmR0InitLoggers(pGVM);
337}
338
339
340/**
341 * Initiates the R0 driver for a particular VM instance.
342 *
343 * @returns VBox status code.
344 *
345 * @param pGVM The global (ring-0) VM structure.
346 * @param uSvnRev The SVN revision of the ring-3 part.
347 * @param uBuildType Build type indicator.
348 * @thread EMT(0)
349 */
350static int vmmR0InitVM(PGVM pGVM, uint32_t uSvnRev, uint32_t uBuildType)
351{
352 /*
353 * Match the SVN revisions and build type.
354 */
355 if (uSvnRev != VMMGetSvnRev())
356 {
357 LogRel(("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev()));
358 SUPR0Printf("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev());
359 return VERR_VMM_R0_VERSION_MISMATCH;
360 }
361 if (uBuildType != vmmGetBuildType())
362 {
363 LogRel(("VMMR0InitVM: Build type mismatch, r3=%#x r0=%#x\n", uBuildType, vmmGetBuildType()));
364 SUPR0Printf("VMMR0InitVM: Build type mismatch, r3=%#x r0=%#x\n", uBuildType, vmmGetBuildType());
365 return VERR_VMM_R0_VERSION_MISMATCH;
366 }
367
368 int rc = GVMMR0ValidateGVMandEMT(pGVM, 0 /*idCpu*/);
369 if (RT_FAILURE(rc))
370 return rc;
371
372 /* Don't allow this to be called more than once. */
373 if (!pGVM->vmmr0.s.fCalledInitVm)
374 pGVM->vmmr0.s.fCalledInitVm = true;
375 else
376 return VERR_ALREADY_INITIALIZED;
377
378#ifdef LOG_ENABLED
379
380 /*
381 * Register the EMT R0 logger instance for VCPU 0.
382 */
383 PVMCPUCC pVCpu = VMCC_GET_CPU_0(pGVM);
384 if (pVCpu->vmmr0.s.u.s.Logger.pLogger)
385 {
386# if 0 /* testing of the logger. */
387 LogCom(("vmmR0InitVM: before %p\n", RTLogDefaultInstance()));
388 LogCom(("vmmR0InitVM: pfnFlush=%p actual=%p\n", pR0Logger->Logger.pfnFlush, vmmR0LoggerFlush));
389 LogCom(("vmmR0InitVM: pfnLogger=%p actual=%p\n", pR0Logger->Logger.pfnLogger, vmmR0LoggerWrapper));
390 LogCom(("vmmR0InitVM: offScratch=%d fFlags=%#x fDestFlags=%#x\n", pR0Logger->Logger.offScratch, pR0Logger->Logger.fFlags, pR0Logger->Logger.fDestFlags));
391
392 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pGVM->pSession);
393 LogCom(("vmmR0InitVM: after %p reg\n", RTLogDefaultInstance()));
394 RTLogSetDefaultInstanceThread(NULL, pGVM->pSession);
395 LogCom(("vmmR0InitVM: after %p dereg\n", RTLogDefaultInstance()));
396
397 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
398 LogCom(("vmmR0InitVM: returned successfully from direct logger call.\n"));
399 pR0Logger->Logger.pfnFlush(&pR0Logger->Logger);
400 LogCom(("vmmR0InitVM: returned successfully from direct flush call.\n"));
401
402 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pGVM->pSession);
403 LogCom(("vmmR0InitVM: after %p reg2\n", RTLogDefaultInstance()));
404 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
405 LogCom(("vmmR0InitVM: returned successfully from direct logger call (2). offScratch=%d\n", pR0Logger->Logger.offScratch));
406 RTLogSetDefaultInstanceThread(NULL, pGVM->pSession);
407 LogCom(("vmmR0InitVM: after %p dereg2\n", RTLogDefaultInstance()));
408
409 RTLogLoggerEx(&pR0Logger->Logger, 0, ~0U, "hello ring-0 logger (RTLogLoggerEx)\n");
410 LogCom(("vmmR0InitVM: RTLogLoggerEx returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
411
412 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pGVM->pSession);
413 RTLogPrintf("hello ring-0 logger (RTLogPrintf)\n");
414 LogCom(("vmmR0InitVM: RTLogPrintf returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
415# endif
416# ifdef VBOX_WITH_R0_LOGGING
417 Log(("Switching to per-thread logging instance %p (key=%p)\n", pVCpu->vmmr0.s.u.s.Logger.pLogger, pGVM->pSession));
418 RTLogSetDefaultInstanceThread(pVCpu->vmmr0.s.u.s.Logger.pLogger, (uintptr_t)pGVM->pSession);
419 pVCpu->vmmr0.s.u.s.Logger.fRegistered = true;
420# endif
421 }
422#endif /* LOG_ENABLED */
423
424 /*
425 * Check if the host supports high resolution timers or not.
426 */
427 if ( pGVM->vmm.s.fUsePeriodicPreemptionTimers
428 && !RTTimerCanDoHighResolution())
429 pGVM->vmm.s.fUsePeriodicPreemptionTimers = false;
430
431 /*
432 * Initialize the per VM data for GVMM and GMM.
433 */
434 rc = GVMMR0InitVM(pGVM);
435 if (RT_SUCCESS(rc))
436 {
437 /*
438 * Init HM, CPUM and PGM (Darwin only).
439 */
440 rc = HMR0InitVM(pGVM);
441 if (RT_SUCCESS(rc))
442 {
443 rc = CPUMR0InitVM(pGVM);
444 if (RT_SUCCESS(rc))
445 {
446 rc = PGMR0InitVM(pGVM);
447 if (RT_SUCCESS(rc))
448 {
449 rc = EMR0InitVM(pGVM);
450 if (RT_SUCCESS(rc))
451 {
452#ifdef VBOX_WITH_PCI_PASSTHROUGH
453 rc = PciRawR0InitVM(pGVM);
454#endif
455 if (RT_SUCCESS(rc))
456 {
457 rc = GIMR0InitVM(pGVM);
458 if (RT_SUCCESS(rc))
459 {
460 GVMMR0DoneInitVM(pGVM);
461
462 /*
463 * Collect a bit of info for the VM release log.
464 */
465 pGVM->vmm.s.fIsPreemptPendingApiTrusty = RTThreadPreemptIsPendingTrusty();
466 pGVM->vmm.s.fIsPreemptPossible = RTThreadPreemptIsPossible();;
467 return rc;
468
469 /* bail out*/
470 //GIMR0TermVM(pGVM);
471 }
472#ifdef VBOX_WITH_PCI_PASSTHROUGH
473 PciRawR0TermVM(pGVM);
474#endif
475 }
476 }
477 }
478 }
479 HMR0TermVM(pGVM);
480 }
481 }
482
483 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pGVM->pSession);
484 return rc;
485}
486
487
488/**
489 * Does EMT specific VM initialization.
490 *
491 * @returns VBox status code.
492 * @param pGVM The ring-0 VM structure.
493 * @param idCpu The EMT that's calling.
494 */
495static int vmmR0InitVMEmt(PGVM pGVM, VMCPUID idCpu)
496{
497 /* Paranoia (caller checked these already). */
498 AssertReturn(idCpu < pGVM->cCpus, VERR_INVALID_CPU_ID);
499 AssertReturn(pGVM->aCpus[idCpu].hEMT == RTThreadNativeSelf(), VERR_INVALID_CPU_ID);
500
501#if defined(LOG_ENABLED) && defined(VBOX_WITH_R0_LOGGING)
502 /*
503 * Registration of ring 0 loggers.
504 */
505 PVMCPUCC pVCpu = &pGVM->aCpus[idCpu];
506 if ( pVCpu->vmmr0.s.u.s.Logger.pLogger
507 && !pVCpu->vmmr0.s.u.s.Logger.fRegistered)
508 {
509 RTLogSetDefaultInstanceThread(pVCpu->vmmr0.s.u.s.Logger.pLogger, (uintptr_t)pGVM->pSession);
510 pVCpu->vmmr0.s.u.s.Logger.fRegistered = true;
511 }
512#endif
513
514 return VINF_SUCCESS;
515}
516
517
518
519/**
520 * Terminates the R0 bits for a particular VM instance.
521 *
522 * This is normally called by ring-3 as part of the VM termination process, but
523 * may alternatively be called during the support driver session cleanup when
524 * the VM object is destroyed (see GVMM).
525 *
526 * @returns VBox status code.
527 *
528 * @param pGVM The global (ring-0) VM structure.
529 * @param idCpu Set to 0 if EMT(0) or NIL_VMCPUID if session cleanup
530 * thread.
531 * @thread EMT(0) or session clean up thread.
532 */
533VMMR0_INT_DECL(int) VMMR0TermVM(PGVM pGVM, VMCPUID idCpu)
534{
535 /*
536 * Check EMT(0) claim if we're called from userland.
537 */
538 if (idCpu != NIL_VMCPUID)
539 {
540 AssertReturn(idCpu == 0, VERR_INVALID_CPU_ID);
541 int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu);
542 if (RT_FAILURE(rc))
543 return rc;
544 }
545
546#ifdef VBOX_WITH_PCI_PASSTHROUGH
547 PciRawR0TermVM(pGVM);
548#endif
549
550 /*
551 * Tell GVMM what we're up to and check that we only do this once.
552 */
553 if (GVMMR0DoingTermVM(pGVM))
554 {
555 GIMR0TermVM(pGVM);
556
557 /** @todo I wish to call PGMR0PhysFlushHandyPages(pGVM, &pGVM->aCpus[idCpu])
558 * here to make sure we don't leak any shared pages if we crash... */
559 HMR0TermVM(pGVM);
560 }
561
562 /*
563 * Deregister the logger for this EMT.
564 */
565 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pGVM->pSession);
566
567 /*
568 * Start log flusher thread termination.
569 */
570 ASMAtomicWriteBool(&pGVM->vmmr0.s.LogFlusher.fThreadShutdown, true);
571 if (pGVM->vmmr0.s.LogFlusher.hEvent != NIL_RTSEMEVENT)
572 RTSemEventSignal(pGVM->vmmr0.s.LogFlusher.hEvent);
573
574 return VINF_SUCCESS;
575}
576
577
578/**
579 * This is called at the end of gvmmR0CleanupVM().
580 *
581 * @param pGVM The global (ring-0) VM structure.
582 */
583VMMR0_INT_DECL(void) VMMR0CleanupVM(PGVM pGVM)
584{
585 AssertCompile(NIL_RTTHREADCTXHOOK == (RTTHREADCTXHOOK)0); /* Depends on zero initialized memory working for NIL at the moment. */
586 for (VMCPUID idCpu = 0; idCpu < pGVM->cCpus; idCpu++)
587 {
588 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
589
590 /** @todo Can we busy wait here for all thread-context hooks to be
591 * deregistered before releasing (destroying) it? Only until we find a
592 * solution for not deregistering hooks everytime we're leaving HMR0
593 * context. */
594 VMMR0ThreadCtxHookDestroyForEmt(pGVCpu);
595 }
596
597 vmmR0CleanupLoggers(pGVM);
598}
599
600
601/**
602 * An interrupt or unhalt force flag is set, deal with it.
603 *
604 * @returns VINF_SUCCESS (or VINF_EM_HALT).
605 * @param pVCpu The cross context virtual CPU structure.
606 * @param uMWait Result from EMMonitorWaitIsActive().
607 * @param enmInterruptibility Guest CPU interruptbility level.
608 */
609static int vmmR0DoHaltInterrupt(PVMCPUCC pVCpu, unsigned uMWait, CPUMINTERRUPTIBILITY enmInterruptibility)
610{
611 Assert(!TRPMHasTrap(pVCpu));
612 Assert( enmInterruptibility > CPUMINTERRUPTIBILITY_INVALID
613 && enmInterruptibility < CPUMINTERRUPTIBILITY_END);
614
615 /*
616 * Pending interrupts w/o any SMIs or NMIs? That the usual case.
617 */
618 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
619 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_INTERRUPT_NMI))
620 {
621 if (enmInterruptibility <= CPUMINTERRUPTIBILITY_UNRESTRAINED)
622 {
623 uint8_t u8Interrupt = 0;
624 int rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
625 Log(("vmmR0DoHaltInterrupt: CPU%d u8Interrupt=%d (%#x) rc=%Rrc\n", pVCpu->idCpu, u8Interrupt, u8Interrupt, rc));
626 if (RT_SUCCESS(rc))
627 {
628 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
629
630 rc = TRPMAssertTrap(pVCpu, u8Interrupt, TRPM_HARDWARE_INT);
631 AssertRCSuccess(rc);
632 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltExec);
633 return rc;
634 }
635 }
636 }
637 /*
638 * SMI is not implemented yet, at least not here.
639 */
640 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_SMI))
641 {
642 Log12(("vmmR0DoHaltInterrupt: CPU%d failed #3\n", pVCpu->idCpu));
643 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltToR3);
644 return VINF_EM_HALT;
645 }
646 /*
647 * NMI.
648 */
649 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
650 {
651 if (enmInterruptibility < CPUMINTERRUPTIBILITY_NMI_INHIBIT)
652 {
653 /** @todo later. */
654 Log12(("vmmR0DoHaltInterrupt: CPU%d failed #2 (uMWait=%u enmInt=%d)\n", pVCpu->idCpu, uMWait, enmInterruptibility));
655 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltToR3);
656 return VINF_EM_HALT;
657 }
658 }
659 /*
660 * Nested-guest virtual interrupt.
661 */
662 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST))
663 {
664 if (enmInterruptibility < CPUMINTERRUPTIBILITY_VIRT_INT_DISABLED)
665 {
666 /** @todo NSTVMX: NSTSVM: Remember, we might have to check and perform VM-exits
667 * here before injecting the virtual interrupt. See emR3ForcedActions
668 * for details. */
669 Log12(("vmmR0DoHaltInterrupt: CPU%d failed #1 (uMWait=%u enmInt=%d)\n", pVCpu->idCpu, uMWait, enmInterruptibility));
670 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltToR3);
671 return VINF_EM_HALT;
672 }
673 }
674
675 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UNHALT))
676 {
677 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltExec);
678 Log11(("vmmR0DoHaltInterrupt: CPU%d success VINF_SUCCESS (UNHALT)\n", pVCpu->idCpu));
679 return VINF_SUCCESS;
680 }
681 if (uMWait > 1)
682 {
683 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltExec);
684 Log11(("vmmR0DoHaltInterrupt: CPU%d success VINF_SUCCESS (uMWait=%u > 1)\n", pVCpu->idCpu, uMWait));
685 return VINF_SUCCESS;
686 }
687
688 Log12(("vmmR0DoHaltInterrupt: CPU%d failed #0 (uMWait=%u enmInt=%d)\n", pVCpu->idCpu, uMWait, enmInterruptibility));
689 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltToR3);
690 return VINF_EM_HALT;
691}
692
693
694/**
695 * This does one round of vmR3HaltGlobal1Halt().
696 *
697 * The rational here is that we'll reduce latency in interrupt situations if we
698 * don't go to ring-3 immediately on a VINF_EM_HALT (guest executed HLT or
699 * MWAIT), but do one round of blocking here instead and hope the interrupt is
700 * raised in the meanwhile.
701 *
702 * If we go to ring-3 we'll quit the inner HM/NEM loop in EM and end up in the
703 * outer loop, which will then call VMR3WaitHalted() and that in turn will do a
704 * ring-0 call (unless we're too close to a timer event). When the interrupt
705 * wakes us up, we'll return from ring-0 and EM will by instinct do a
706 * rescheduling (because of raw-mode) before it resumes the HM/NEM loop and gets
707 * back to VMMR0EntryFast().
708 *
709 * @returns VINF_SUCCESS or VINF_EM_HALT.
710 * @param pGVM The ring-0 VM structure.
711 * @param pGVCpu The ring-0 virtual CPU structure.
712 *
713 * @todo r=bird: All the blocking/waiting and EMT managment should move out of
714 * the VM module, probably to VMM. Then this would be more weird wrt
715 * parameters and statistics.
716 */
717static int vmmR0DoHalt(PGVM pGVM, PGVMCPU pGVCpu)
718{
719 /*
720 * Do spin stat historization.
721 */
722 if (++pGVCpu->vmm.s.cR0Halts & 0xff)
723 { /* likely */ }
724 else if (pGVCpu->vmm.s.cR0HaltsSucceeded > pGVCpu->vmm.s.cR0HaltsToRing3)
725 {
726 pGVCpu->vmm.s.cR0HaltsSucceeded = 2;
727 pGVCpu->vmm.s.cR0HaltsToRing3 = 0;
728 }
729 else
730 {
731 pGVCpu->vmm.s.cR0HaltsSucceeded = 0;
732 pGVCpu->vmm.s.cR0HaltsToRing3 = 2;
733 }
734
735 /*
736 * Flags that makes us go to ring-3.
737 */
738 uint32_t const fVmFFs = VM_FF_TM_VIRTUAL_SYNC | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA
739 | VM_FF_DBGF | VM_FF_REQUEST | VM_FF_CHECK_VM_STATE
740 | VM_FF_RESET | VM_FF_EMT_RENDEZVOUS | VM_FF_PGM_NEED_HANDY_PAGES
741 | VM_FF_PGM_NO_MEMORY | VM_FF_DEBUG_SUSPEND;
742 uint64_t const fCpuFFs = VMCPU_FF_TIMER | VMCPU_FF_PDM_CRITSECT | VMCPU_FF_IEM
743 | VMCPU_FF_REQUEST | VMCPU_FF_DBGF | VMCPU_FF_HM_UPDATE_CR3
744 | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
745 | VMCPU_FF_TO_R3 | VMCPU_FF_IOM;
746
747 /*
748 * Check preconditions.
749 */
750 unsigned const uMWait = EMMonitorWaitIsActive(pGVCpu);
751 CPUMINTERRUPTIBILITY const enmInterruptibility = CPUMGetGuestInterruptibility(pGVCpu);
752 if ( pGVCpu->vmm.s.fMayHaltInRing0
753 && !TRPMHasTrap(pGVCpu)
754 && ( enmInterruptibility == CPUMINTERRUPTIBILITY_UNRESTRAINED
755 || uMWait > 1))
756 {
757 if ( !VM_FF_IS_ANY_SET(pGVM, fVmFFs)
758 && !VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs))
759 {
760 /*
761 * Interrupts pending already?
762 */
763 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC))
764 APICUpdatePendingInterrupts(pGVCpu);
765
766 /*
767 * Flags that wake up from the halted state.
768 */
769 uint64_t const fIntMask = VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_INTERRUPT_NESTED_GUEST
770 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT;
771
772 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask))
773 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility);
774 ASMNopPause();
775
776 /*
777 * Check out how long till the next timer event.
778 */
779 uint64_t u64Delta;
780 uint64_t u64GipTime = TMTimerPollGIP(pGVM, pGVCpu, &u64Delta);
781
782 if ( !VM_FF_IS_ANY_SET(pGVM, fVmFFs)
783 && !VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs))
784 {
785 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC))
786 APICUpdatePendingInterrupts(pGVCpu);
787
788 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask))
789 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility);
790
791 /*
792 * Wait if there is enough time to the next timer event.
793 */
794 if (u64Delta >= pGVCpu->vmm.s.cNsSpinBlockThreshold)
795 {
796 /* If there are few other CPU cores around, we will procrastinate a
797 little before going to sleep, hoping for some device raising an
798 interrupt or similar. Though, the best thing here would be to
799 dynamically adjust the spin count according to its usfulness or
800 something... */
801 if ( pGVCpu->vmm.s.cR0HaltsSucceeded > pGVCpu->vmm.s.cR0HaltsToRing3
802 && RTMpGetOnlineCount() >= 4)
803 {
804 /** @todo Figure out how we can skip this if it hasn't help recently...
805 * @bugref{9172#c12} */
806 uint32_t cSpinLoops = 42;
807 while (cSpinLoops-- > 0)
808 {
809 ASMNopPause();
810 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC))
811 APICUpdatePendingInterrupts(pGVCpu);
812 ASMNopPause();
813 if (VM_FF_IS_ANY_SET(pGVM, fVmFFs))
814 {
815 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3FromSpin);
816 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3);
817 return VINF_EM_HALT;
818 }
819 ASMNopPause();
820 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs))
821 {
822 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3FromSpin);
823 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3);
824 return VINF_EM_HALT;
825 }
826 ASMNopPause();
827 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask))
828 {
829 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltExecFromSpin);
830 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility);
831 }
832 ASMNopPause();
833 }
834 }
835
836 /*
837 * We have to set the state to VMCPUSTATE_STARTED_HALTED here so ring-3
838 * knows when to notify us (cannot access VMINTUSERPERVMCPU::fWait from here).
839 * After changing the state we must recheck the force flags of course.
840 */
841 if (VMCPU_CMPXCHG_STATE(pGVCpu, VMCPUSTATE_STARTED_HALTED, VMCPUSTATE_STARTED))
842 {
843 if ( !VM_FF_IS_ANY_SET(pGVM, fVmFFs)
844 && !VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs))
845 {
846 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC))
847 APICUpdatePendingInterrupts(pGVCpu);
848
849 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask))
850 {
851 VMCPU_CMPXCHG_STATE(pGVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_HALTED);
852 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility);
853 }
854
855 /* Okay, block! */
856 uint64_t const u64StartSchedHalt = RTTimeNanoTS();
857 int rc = GVMMR0SchedHalt(pGVM, pGVCpu, u64GipTime);
858 uint64_t const u64EndSchedHalt = RTTimeNanoTS();
859 uint64_t const cNsElapsedSchedHalt = u64EndSchedHalt - u64StartSchedHalt;
860 Log10(("vmmR0DoHalt: CPU%d: halted %llu ns\n", pGVCpu->idCpu, cNsElapsedSchedHalt));
861
862 VMCPU_CMPXCHG_STATE(pGVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_HALTED);
863 STAM_REL_PROFILE_ADD_PERIOD(&pGVCpu->vmm.s.StatR0HaltBlock, cNsElapsedSchedHalt);
864 if ( rc == VINF_SUCCESS
865 || rc == VERR_INTERRUPTED)
866 {
867 /* Keep some stats like ring-3 does. */
868 int64_t const cNsOverslept = u64EndSchedHalt - u64GipTime;
869 if (cNsOverslept > 50000)
870 STAM_REL_PROFILE_ADD_PERIOD(&pGVCpu->vmm.s.StatR0HaltBlockOverslept, cNsOverslept);
871 else if (cNsOverslept < -50000)
872 STAM_REL_PROFILE_ADD_PERIOD(&pGVCpu->vmm.s.StatR0HaltBlockInsomnia, cNsElapsedSchedHalt);
873 else
874 STAM_REL_PROFILE_ADD_PERIOD(&pGVCpu->vmm.s.StatR0HaltBlockOnTime, cNsElapsedSchedHalt);
875
876 /*
877 * Recheck whether we can resume execution or have to go to ring-3.
878 */
879 if ( !VM_FF_IS_ANY_SET(pGVM, fVmFFs)
880 && !VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs))
881 {
882 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC))
883 APICUpdatePendingInterrupts(pGVCpu);
884 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask))
885 {
886 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltExecFromBlock);
887 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility);
888 }
889 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3PostNoInt);
890 Log12(("vmmR0DoHalt: CPU%d post #2 - No pending interrupt\n", pGVCpu->idCpu));
891 }
892 else
893 {
894 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3PostPendingFF);
895 Log12(("vmmR0DoHalt: CPU%d post #1 - Pending FF\n", pGVCpu->idCpu));
896 }
897 }
898 else
899 {
900 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3Other);
901 Log12(("vmmR0DoHalt: CPU%d GVMMR0SchedHalt failed: %Rrc\n", pGVCpu->idCpu, rc));
902 }
903 }
904 else
905 {
906 VMCPU_CMPXCHG_STATE(pGVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_HALTED);
907 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3PendingFF);
908 Log12(("vmmR0DoHalt: CPU%d failed #5 - Pending FF\n", pGVCpu->idCpu));
909 }
910 }
911 else
912 {
913 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3Other);
914 Log12(("vmmR0DoHalt: CPU%d failed #4 - enmState=%d\n", pGVCpu->idCpu, VMCPU_GET_STATE(pGVCpu)));
915 }
916 }
917 else
918 {
919 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3SmallDelta);
920 Log12(("vmmR0DoHalt: CPU%d failed #3 - delta too small: %RU64\n", pGVCpu->idCpu, u64Delta));
921 }
922 }
923 else
924 {
925 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3PendingFF);
926 Log12(("vmmR0DoHalt: CPU%d failed #2 - Pending FF\n", pGVCpu->idCpu));
927 }
928 }
929 else
930 {
931 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3PendingFF);
932 Log12(("vmmR0DoHalt: CPU%d failed #1 - Pending FF\n", pGVCpu->idCpu));
933 }
934 }
935 else
936 {
937 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3Other);
938 Log12(("vmmR0DoHalt: CPU%d failed #0 - fMayHaltInRing0=%d TRPMHasTrap=%d enmInt=%d uMWait=%u\n",
939 pGVCpu->idCpu, pGVCpu->vmm.s.fMayHaltInRing0, TRPMHasTrap(pGVCpu), enmInterruptibility, uMWait));
940 }
941
942 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3);
943 return VINF_EM_HALT;
944}
945
946
947/**
948 * VMM ring-0 thread-context callback.
949 *
950 * This does common HM state updating and calls the HM-specific thread-context
951 * callback.
952 *
953 * This is used together with RTThreadCtxHookCreate() on platforms which
954 * supports it, and directly from VMMR0EmtPrepareForBlocking() and
955 * VMMR0EmtResumeAfterBlocking() on platforms which don't.
956 *
957 * @param enmEvent The thread-context event.
958 * @param pvUser Opaque pointer to the VMCPU.
959 *
960 * @thread EMT(pvUser)
961 */
962static DECLCALLBACK(void) vmmR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, void *pvUser)
963{
964 PVMCPUCC pVCpu = (PVMCPUCC)pvUser;
965
966 switch (enmEvent)
967 {
968 case RTTHREADCTXEVENT_IN:
969 {
970 /*
971 * Linux may call us with preemption enabled (really!) but technically we
972 * cannot get preempted here, otherwise we end up in an infinite recursion
973 * scenario (i.e. preempted in resume hook -> preempt hook -> resume hook...
974 * ad infinitum). Let's just disable preemption for now...
975 */
976 /** @todo r=bird: I don't believe the above. The linux code is clearly enabling
977 * preemption after doing the callout (one or two functions up the
978 * call chain). */
979 /** @todo r=ramshankar: See @bugref{5313#c30}. */
980 RTTHREADPREEMPTSTATE ParanoidPreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
981 RTThreadPreemptDisable(&ParanoidPreemptState);
982
983 /* We need to update the VCPU <-> host CPU mapping. */
984 RTCPUID idHostCpu;
985 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
986 pVCpu->iHostCpuSet = iHostCpuSet;
987 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
988
989 /* In the very unlikely event that the GIP delta for the CPU we're
990 rescheduled needs calculating, try force a return to ring-3.
991 We unfortunately cannot do the measurements right here. */
992 if (RT_LIKELY(!SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
993 { /* likely */ }
994 else
995 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
996
997 /* Invoke the HM-specific thread-context callback. */
998 HMR0ThreadCtxCallback(enmEvent, pvUser);
999
1000 /* Restore preemption. */
1001 RTThreadPreemptRestore(&ParanoidPreemptState);
1002 break;
1003 }
1004
1005 case RTTHREADCTXEVENT_OUT:
1006 {
1007 /* Invoke the HM-specific thread-context callback. */
1008 HMR0ThreadCtxCallback(enmEvent, pvUser);
1009
1010 /*
1011 * Sigh. See VMMGetCpu() used by VMCPU_ASSERT_EMT(). We cannot let several VCPUs
1012 * have the same host CPU associated with it.
1013 */
1014 pVCpu->iHostCpuSet = UINT32_MAX;
1015 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1016 break;
1017 }
1018
1019 default:
1020 /* Invoke the HM-specific thread-context callback. */
1021 HMR0ThreadCtxCallback(enmEvent, pvUser);
1022 break;
1023 }
1024}
1025
1026
1027/**
1028 * Creates thread switching hook for the current EMT thread.
1029 *
1030 * This is called by GVMMR0CreateVM and GVMMR0RegisterVCpu. If the host
1031 * platform does not implement switcher hooks, no hooks will be create and the
1032 * member set to NIL_RTTHREADCTXHOOK.
1033 *
1034 * @returns VBox status code.
1035 * @param pVCpu The cross context virtual CPU structure.
1036 * @thread EMT(pVCpu)
1037 */
1038VMMR0_INT_DECL(int) VMMR0ThreadCtxHookCreateForEmt(PVMCPUCC pVCpu)
1039{
1040 VMCPU_ASSERT_EMT(pVCpu);
1041 Assert(pVCpu->vmmr0.s.hCtxHook == NIL_RTTHREADCTXHOOK);
1042
1043#if 1 /* To disable this stuff change to zero. */
1044 int rc = RTThreadCtxHookCreate(&pVCpu->vmmr0.s.hCtxHook, 0, vmmR0ThreadCtxCallback, pVCpu);
1045 if (RT_SUCCESS(rc))
1046 {
1047 pVCpu->pGVM->vmm.s.fIsUsingContextHooks = true;
1048 return rc;
1049 }
1050#else
1051 RT_NOREF(vmmR0ThreadCtxCallback);
1052 int rc = VERR_NOT_SUPPORTED;
1053#endif
1054
1055 pVCpu->vmmr0.s.hCtxHook = NIL_RTTHREADCTXHOOK;
1056 pVCpu->pGVM->vmm.s.fIsUsingContextHooks = false;
1057 if (rc == VERR_NOT_SUPPORTED)
1058 return VINF_SUCCESS;
1059
1060 LogRelMax(32, ("RTThreadCtxHookCreate failed! rc=%Rrc pVCpu=%p idCpu=%RU32\n", rc, pVCpu, pVCpu->idCpu));
1061 return VINF_SUCCESS; /* Just ignore it, we can live without context hooks. */
1062}
1063
1064
1065/**
1066 * Destroys the thread switching hook for the specified VCPU.
1067 *
1068 * @param pVCpu The cross context virtual CPU structure.
1069 * @remarks Can be called from any thread.
1070 */
1071VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDestroyForEmt(PVMCPUCC pVCpu)
1072{
1073 int rc = RTThreadCtxHookDestroy(pVCpu->vmmr0.s.hCtxHook);
1074 AssertRC(rc);
1075 pVCpu->vmmr0.s.hCtxHook = NIL_RTTHREADCTXHOOK;
1076}
1077
1078
1079/**
1080 * Disables the thread switching hook for this VCPU (if we got one).
1081 *
1082 * @param pVCpu The cross context virtual CPU structure.
1083 * @thread EMT(pVCpu)
1084 *
1085 * @remarks This also clears GVMCPU::idHostCpu, so the mapping is invalid after
1086 * this call. This means you have to be careful with what you do!
1087 */
1088VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDisable(PVMCPUCC pVCpu)
1089{
1090 /*
1091 * Clear the VCPU <-> host CPU mapping as we've left HM context.
1092 * @bugref{7726#c19} explains the need for this trick:
1093 *
1094 * VMXR0CallRing3Callback/SVMR0CallRing3Callback &
1095 * hmR0VmxLeaveSession/hmR0SvmLeaveSession disables context hooks during
1096 * longjmp & normal return to ring-3, which opens a window where we may be
1097 * rescheduled without changing GVMCPUID::idHostCpu and cause confusion if
1098 * the CPU starts executing a different EMT. Both functions first disables
1099 * preemption and then calls HMR0LeaveCpu which invalids idHostCpu, leaving
1100 * an opening for getting preempted.
1101 */
1102 /** @todo Make HM not need this API! Then we could leave the hooks enabled
1103 * all the time. */
1104
1105 /*
1106 * Disable the context hook, if we got one.
1107 */
1108 if (pVCpu->vmmr0.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1109 {
1110 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1111 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1112 int rc = RTThreadCtxHookDisable(pVCpu->vmmr0.s.hCtxHook);
1113 AssertRC(rc);
1114 }
1115}
1116
1117
1118/**
1119 * Internal version of VMMR0ThreadCtxHooksAreRegistered.
1120 *
1121 * @returns true if registered, false otherwise.
1122 * @param pVCpu The cross context virtual CPU structure.
1123 */
1124DECLINLINE(bool) vmmR0ThreadCtxHookIsEnabled(PVMCPUCC pVCpu)
1125{
1126 return RTThreadCtxHookIsEnabled(pVCpu->vmmr0.s.hCtxHook);
1127}
1128
1129
1130/**
1131 * Whether thread-context hooks are registered for this VCPU.
1132 *
1133 * @returns true if registered, false otherwise.
1134 * @param pVCpu The cross context virtual CPU structure.
1135 */
1136VMMR0_INT_DECL(bool) VMMR0ThreadCtxHookIsEnabled(PVMCPUCC pVCpu)
1137{
1138 return vmmR0ThreadCtxHookIsEnabled(pVCpu);
1139}
1140
1141
1142/**
1143 * Returns the ring-0 release logger instance.
1144 *
1145 * @returns Pointer to release logger, NULL if not configured.
1146 * @param pVCpu The cross context virtual CPU structure of the caller.
1147 * @thread EMT(pVCpu)
1148 */
1149VMMR0_INT_DECL(PRTLOGGER) VMMR0GetReleaseLogger(PVMCPUCC pVCpu)
1150{
1151 return pVCpu->vmmr0.s.u.s.RelLogger.pLogger;
1152}
1153
1154
1155#ifdef VBOX_WITH_STATISTICS
1156/**
1157 * Record return code statistics
1158 * @param pVM The cross context VM structure.
1159 * @param pVCpu The cross context virtual CPU structure.
1160 * @param rc The status code.
1161 */
1162static void vmmR0RecordRC(PVMCC pVM, PVMCPUCC pVCpu, int rc)
1163{
1164 /*
1165 * Collect statistics.
1166 */
1167 switch (rc)
1168 {
1169 case VINF_SUCCESS:
1170 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetNormal);
1171 break;
1172 case VINF_EM_RAW_INTERRUPT:
1173 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterrupt);
1174 break;
1175 case VINF_EM_RAW_INTERRUPT_HYPER:
1176 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptHyper);
1177 break;
1178 case VINF_EM_RAW_GUEST_TRAP:
1179 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGuestTrap);
1180 break;
1181 case VINF_EM_RAW_RING_SWITCH:
1182 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitch);
1183 break;
1184 case VINF_EM_RAW_RING_SWITCH_INT:
1185 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitchInt);
1186 break;
1187 case VINF_EM_RAW_STALE_SELECTOR:
1188 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetStaleSelector);
1189 break;
1190 case VINF_EM_RAW_IRET_TRAP:
1191 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIRETTrap);
1192 break;
1193 case VINF_IOM_R3_IOPORT_READ:
1194 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIORead);
1195 break;
1196 case VINF_IOM_R3_IOPORT_WRITE:
1197 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOWrite);
1198 break;
1199 case VINF_IOM_R3_IOPORT_COMMIT_WRITE:
1200 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOCommitWrite);
1201 break;
1202 case VINF_IOM_R3_MMIO_READ:
1203 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIORead);
1204 break;
1205 case VINF_IOM_R3_MMIO_WRITE:
1206 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOWrite);
1207 break;
1208 case VINF_IOM_R3_MMIO_COMMIT_WRITE:
1209 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOCommitWrite);
1210 break;
1211 case VINF_IOM_R3_MMIO_READ_WRITE:
1212 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOReadWrite);
1213 break;
1214 case VINF_PATM_HC_MMIO_PATCH_READ:
1215 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchRead);
1216 break;
1217 case VINF_PATM_HC_MMIO_PATCH_WRITE:
1218 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchWrite);
1219 break;
1220 case VINF_CPUM_R3_MSR_READ:
1221 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMSRRead);
1222 break;
1223 case VINF_CPUM_R3_MSR_WRITE:
1224 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMSRWrite);
1225 break;
1226 case VINF_EM_RAW_EMULATE_INSTR:
1227 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetEmulate);
1228 break;
1229 case VINF_PATCH_EMULATE_INSTR:
1230 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchEmulate);
1231 break;
1232 case VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT:
1233 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetLDTFault);
1234 break;
1235 case VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT:
1236 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGDTFault);
1237 break;
1238 case VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT:
1239 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIDTFault);
1240 break;
1241 case VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT:
1242 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTSSFault);
1243 break;
1244 case VINF_CSAM_PENDING_ACTION:
1245 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCSAMTask);
1246 break;
1247 case VINF_PGM_SYNC_CR3:
1248 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetSyncCR3);
1249 break;
1250 case VINF_PATM_PATCH_INT3:
1251 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchInt3);
1252 break;
1253 case VINF_PATM_PATCH_TRAP_PF:
1254 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchPF);
1255 break;
1256 case VINF_PATM_PATCH_TRAP_GP:
1257 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchGP);
1258 break;
1259 case VINF_PATM_PENDING_IRQ_AFTER_IRET:
1260 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchIretIRQ);
1261 break;
1262 case VINF_EM_RESCHEDULE_REM:
1263 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRescheduleREM);
1264 break;
1265 case VINF_EM_RAW_TO_R3:
1266 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Total);
1267 if (VM_FF_IS_SET(pVM, VM_FF_TM_VIRTUAL_SYNC))
1268 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3TMVirt);
1269 else if (VM_FF_IS_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES))
1270 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3HandyPages);
1271 else if (VM_FF_IS_SET(pVM, VM_FF_PDM_QUEUES))
1272 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3PDMQueues);
1273 else if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
1274 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Rendezvous);
1275 else if (VM_FF_IS_SET(pVM, VM_FF_PDM_DMA))
1276 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3DMA);
1277 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TIMER))
1278 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Timer);
1279 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PDM_CRITSECT))
1280 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3CritSect);
1281 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TO_R3))
1282 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3FF);
1283 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
1284 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Iem);
1285 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IOM))
1286 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Iom);
1287 else
1288 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Unknown);
1289 break;
1290
1291 case VINF_EM_RAW_TIMER_PENDING:
1292 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTimerPending);
1293 break;
1294 case VINF_EM_RAW_INTERRUPT_PENDING:
1295 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptPending);
1296 break;
1297 case VINF_PATM_DUPLICATE_FUNCTION:
1298 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPATMDuplicateFn);
1299 break;
1300 case VINF_PGM_POOL_FLUSH_PENDING:
1301 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMFlushPending);
1302 break;
1303 case VINF_EM_PENDING_REQUEST:
1304 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPendingRequest);
1305 break;
1306 case VINF_EM_HM_PATCH_TPR_INSTR:
1307 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchTPR);
1308 break;
1309 default:
1310 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMisc);
1311 break;
1312 }
1313}
1314#endif /* VBOX_WITH_STATISTICS */
1315
1316
1317/**
1318 * The Ring 0 entry point, called by the fast-ioctl path.
1319 *
1320 * @param pGVM The global (ring-0) VM structure.
1321 * @param pVMIgnored The cross context VM structure. The return code is
1322 * stored in pVM->vmm.s.iLastGZRc.
1323 * @param idCpu The Virtual CPU ID of the calling EMT.
1324 * @param enmOperation Which operation to execute.
1325 * @remarks Assume called with interrupts _enabled_.
1326 */
1327VMMR0DECL(void) VMMR0EntryFast(PGVM pGVM, PVMCC pVMIgnored, VMCPUID idCpu, VMMR0OPERATION enmOperation)
1328{
1329 RT_NOREF(pVMIgnored);
1330
1331 /*
1332 * Validation.
1333 */
1334 if ( idCpu < pGVM->cCpus
1335 && pGVM->cCpus == pGVM->cCpusUnsafe)
1336 { /*likely*/ }
1337 else
1338 {
1339 SUPR0Printf("VMMR0EntryFast: Bad idCpu=%#x cCpus=%#x cCpusUnsafe=%#x\n", idCpu, pGVM->cCpus, pGVM->cCpusUnsafe);
1340 return;
1341 }
1342
1343 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
1344 RTNATIVETHREAD const hNativeThread = RTThreadNativeSelf();
1345 if (RT_LIKELY( pGVCpu->hEMT == hNativeThread
1346 && pGVCpu->hNativeThreadR0 == hNativeThread))
1347 { /* likely */ }
1348 else
1349 {
1350 SUPR0Printf("VMMR0EntryFast: Bad thread idCpu=%#x hNativeSelf=%p pGVCpu->hEmt=%p pGVCpu->hNativeThreadR0=%p\n",
1351 idCpu, hNativeThread, pGVCpu->hEMT, pGVCpu->hNativeThreadR0);
1352 return;
1353 }
1354
1355 /*
1356 * Perform requested operation.
1357 */
1358 switch (enmOperation)
1359 {
1360 /*
1361 * Run guest code using the available hardware acceleration technology.
1362 */
1363 case VMMR0_DO_HM_RUN:
1364 {
1365 for (;;) /* hlt loop */
1366 {
1367 /*
1368 * Disable ring-3 calls & blocking till we've successfully entered HM.
1369 * Otherwise we sometimes end up blocking at the finall Log4 statement
1370 * in VMXR0Enter, while still in a somewhat inbetween state.
1371 */
1372 VMMRZCallRing3Disable(pGVCpu);
1373
1374 /*
1375 * Disable preemption.
1376 */
1377 Assert(!vmmR0ThreadCtxHookIsEnabled(pGVCpu));
1378 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
1379 RTThreadPreemptDisable(&PreemptState);
1380 pGVCpu->vmmr0.s.pPreemptState = &PreemptState;
1381
1382 /*
1383 * Get the host CPU identifiers, make sure they are valid and that
1384 * we've got a TSC delta for the CPU.
1385 */
1386 RTCPUID idHostCpu;
1387 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
1388 if (RT_LIKELY( iHostCpuSet < RTCPUSET_MAX_CPUS
1389 && SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
1390 {
1391 pGVCpu->iHostCpuSet = iHostCpuSet;
1392 ASMAtomicWriteU32(&pGVCpu->idHostCpu, idHostCpu);
1393
1394 /*
1395 * Update the periodic preemption timer if it's active.
1396 */
1397 if (pGVM->vmm.s.fUsePeriodicPreemptionTimers)
1398 GVMMR0SchedUpdatePeriodicPreemptionTimer(pGVM, pGVCpu->idHostCpu, TMCalcHostTimerFrequency(pGVM, pGVCpu));
1399
1400#ifdef VMM_R0_TOUCH_FPU
1401 /*
1402 * Make sure we've got the FPU state loaded so and we don't need to clear
1403 * CR0.TS and get out of sync with the host kernel when loading the guest
1404 * FPU state. @ref sec_cpum_fpu (CPUM.cpp) and @bugref{4053}.
1405 */
1406 CPUMR0TouchHostFpu();
1407#endif
1408 int rc;
1409 bool fPreemptRestored = false;
1410 if (!HMR0SuspendPending())
1411 {
1412 /*
1413 * Enable the context switching hook.
1414 */
1415 if (pGVCpu->vmmr0.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1416 {
1417 Assert(!RTThreadCtxHookIsEnabled(pGVCpu->vmmr0.s.hCtxHook));
1418 int rc2 = RTThreadCtxHookEnable(pGVCpu->vmmr0.s.hCtxHook); AssertRC(rc2);
1419 }
1420
1421 /*
1422 * Enter HM context.
1423 */
1424 rc = HMR0Enter(pGVCpu);
1425 if (RT_SUCCESS(rc))
1426 {
1427 VMCPU_SET_STATE(pGVCpu, VMCPUSTATE_STARTED_HM);
1428
1429 /*
1430 * When preemption hooks are in place, enable preemption now that
1431 * we're in HM context.
1432 */
1433 if (vmmR0ThreadCtxHookIsEnabled(pGVCpu))
1434 {
1435 fPreemptRestored = true;
1436 pGVCpu->vmmr0.s.pPreemptState = NULL;
1437 RTThreadPreemptRestore(&PreemptState);
1438 }
1439 VMMRZCallRing3Enable(pGVCpu);
1440
1441 /*
1442 * Setup the longjmp machinery and execute guest code (calls HMR0RunGuestCode).
1443 */
1444 rc = vmmR0CallRing3SetJmp(&pGVCpu->vmmr0.s.AssertJmpBuf, HMR0RunGuestCode, pGVM, pGVCpu);
1445
1446 /*
1447 * Assert sanity on the way out. Using manual assertions code here as normal
1448 * assertions are going to panic the host since we're outside the setjmp/longjmp zone.
1449 */
1450 if (RT_UNLIKELY( VMCPU_GET_STATE(pGVCpu) != VMCPUSTATE_STARTED_HM
1451 && RT_SUCCESS_NP(rc)
1452 && rc != VERR_VMM_RING0_ASSERTION ))
1453 {
1454 pGVM->vmm.s.szRing0AssertMsg1[0] = '\0';
1455 RTStrPrintf(pGVM->vmm.s.szRing0AssertMsg2, sizeof(pGVM->vmm.s.szRing0AssertMsg2),
1456 "Got VMCPU state %d expected %d.\n", VMCPU_GET_STATE(pGVCpu), VMCPUSTATE_STARTED_HM);
1457 rc = VERR_VMM_WRONG_HM_VMCPU_STATE;
1458 }
1459#if 0
1460 /** @todo Get rid of this. HM shouldn't disable the context hook. */
1461 else if (RT_UNLIKELY(vmmR0ThreadCtxHookIsEnabled(pGVCpu)))
1462 {
1463 pGVM->vmm.s.szRing0AssertMsg1[0] = '\0';
1464 RTStrPrintf(pGVM->vmm.s.szRing0AssertMsg2, sizeof(pGVM->vmm.s.szRing0AssertMsg2),
1465 "Thread-context hooks still enabled! VCPU=%p Id=%u rc=%d.\n", pGVCpu, pGVCpu->idCpu, rc);
1466 rc = VERR_VMM_CONTEXT_HOOK_STILL_ENABLED;
1467 }
1468#endif
1469
1470 VMMRZCallRing3Disable(pGVCpu); /* Lazy bird: Simpler just disabling it again... */
1471 VMCPU_SET_STATE(pGVCpu, VMCPUSTATE_STARTED);
1472 }
1473 STAM_COUNTER_INC(&pGVM->vmm.s.StatRunGC);
1474
1475 /*
1476 * Invalidate the host CPU identifiers before we disable the context
1477 * hook / restore preemption.
1478 */
1479 pGVCpu->iHostCpuSet = UINT32_MAX;
1480 ASMAtomicWriteU32(&pGVCpu->idHostCpu, NIL_RTCPUID);
1481
1482 /*
1483 * Disable context hooks. Due to unresolved cleanup issues, we
1484 * cannot leave the hooks enabled when we return to ring-3.
1485 *
1486 * Note! At the moment HM may also have disabled the hook
1487 * when we get here, but the IPRT API handles that.
1488 */
1489 if (pGVCpu->vmmr0.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1490 RTThreadCtxHookDisable(pGVCpu->vmmr0.s.hCtxHook);
1491 }
1492 /*
1493 * The system is about to go into suspend mode; go back to ring 3.
1494 */
1495 else
1496 {
1497 pGVCpu->iHostCpuSet = UINT32_MAX;
1498 ASMAtomicWriteU32(&pGVCpu->idHostCpu, NIL_RTCPUID);
1499 rc = VINF_EM_RAW_INTERRUPT;
1500 }
1501
1502 /** @todo When HM stops messing with the context hook state, we'll disable
1503 * preemption again before the RTThreadCtxHookDisable call. */
1504 if (!fPreemptRestored)
1505 {
1506 pGVCpu->vmmr0.s.pPreemptState = NULL;
1507 RTThreadPreemptRestore(&PreemptState);
1508 }
1509
1510 pGVCpu->vmm.s.iLastGZRc = rc;
1511
1512 /* Fire dtrace probe and collect statistics. */
1513 VBOXVMM_R0_VMM_RETURN_TO_RING3_HM(pGVCpu, CPUMQueryGuestCtxPtr(pGVCpu), rc);
1514#ifdef VBOX_WITH_STATISTICS
1515 vmmR0RecordRC(pGVM, pGVCpu, rc);
1516#endif
1517 VMMRZCallRing3Enable(pGVCpu);
1518
1519 /*
1520 * If this is a halt.
1521 */
1522 if (rc != VINF_EM_HALT)
1523 { /* we're not in a hurry for a HLT, so prefer this path */ }
1524 else
1525 {
1526 pGVCpu->vmm.s.iLastGZRc = rc = vmmR0DoHalt(pGVM, pGVCpu);
1527 if (rc == VINF_SUCCESS)
1528 {
1529 pGVCpu->vmm.s.cR0HaltsSucceeded++;
1530 continue;
1531 }
1532 pGVCpu->vmm.s.cR0HaltsToRing3++;
1533 }
1534 }
1535 /*
1536 * Invalid CPU set index or TSC delta in need of measuring.
1537 */
1538 else
1539 {
1540 pGVCpu->vmmr0.s.pPreemptState = NULL;
1541 pGVCpu->iHostCpuSet = UINT32_MAX;
1542 ASMAtomicWriteU32(&pGVCpu->idHostCpu, NIL_RTCPUID);
1543 RTThreadPreemptRestore(&PreemptState);
1544
1545 VMMRZCallRing3Enable(pGVCpu);
1546
1547 if (iHostCpuSet < RTCPUSET_MAX_CPUS)
1548 {
1549 int rc = SUPR0TscDeltaMeasureBySetIndex(pGVM->pSession, iHostCpuSet, 0 /*fFlags*/,
1550 2 /*cMsWaitRetry*/, 5*RT_MS_1SEC /*cMsWaitThread*/,
1551 0 /*default cTries*/);
1552 if (RT_SUCCESS(rc) || rc == VERR_CPU_OFFLINE)
1553 pGVCpu->vmm.s.iLastGZRc = VINF_EM_RAW_TO_R3;
1554 else
1555 pGVCpu->vmm.s.iLastGZRc = rc;
1556 }
1557 else
1558 pGVCpu->vmm.s.iLastGZRc = VERR_INVALID_CPU_INDEX;
1559 }
1560 break;
1561 } /* halt loop. */
1562 break;
1563 }
1564
1565#ifdef VBOX_WITH_NEM_R0
1566# if defined(RT_ARCH_AMD64) && defined(RT_OS_WINDOWS)
1567 case VMMR0_DO_NEM_RUN:
1568 {
1569 /*
1570 * Setup the longjmp machinery and execute guest code (calls NEMR0RunGuestCode).
1571 */
1572# ifdef VBOXSTRICTRC_STRICT_ENABLED
1573 int rc = vmmR0CallRing3SetJmp2(&pGVCpu->vmmr0.s.AssertJmpBuf, (PFNVMMR0SETJMP2)NEMR0RunGuestCode, pGVM, idCpu);
1574# else
1575 int rc = vmmR0CallRing3SetJmp2(&pGVCpu->vmmr0.s.AssertJmpBuf, NEMR0RunGuestCode, pGVM, idCpu);
1576# endif
1577 STAM_COUNTER_INC(&pGVM->vmm.s.StatRunGC);
1578
1579 pGVCpu->vmm.s.iLastGZRc = rc;
1580
1581 /*
1582 * Fire dtrace probe and collect statistics.
1583 */
1584 VBOXVMM_R0_VMM_RETURN_TO_RING3_NEM(pGVCpu, CPUMQueryGuestCtxPtr(pGVCpu), rc);
1585# ifdef VBOX_WITH_STATISTICS
1586 vmmR0RecordRC(pGVM, pGVCpu, rc);
1587# endif
1588 break;
1589 }
1590# endif
1591#endif
1592
1593 /*
1594 * For profiling.
1595 */
1596 case VMMR0_DO_NOP:
1597 pGVCpu->vmm.s.iLastGZRc = VINF_SUCCESS;
1598 break;
1599
1600 /*
1601 * Shouldn't happen.
1602 */
1603 default:
1604 AssertMsgFailed(("%#x\n", enmOperation));
1605 pGVCpu->vmm.s.iLastGZRc = VERR_NOT_SUPPORTED;
1606 break;
1607 }
1608}
1609
1610
1611/**
1612 * Validates a session or VM session argument.
1613 *
1614 * @returns true / false accordingly.
1615 * @param pGVM The global (ring-0) VM structure.
1616 * @param pClaimedSession The session claim to validate.
1617 * @param pSession The session argument.
1618 */
1619DECLINLINE(bool) vmmR0IsValidSession(PGVM pGVM, PSUPDRVSESSION pClaimedSession, PSUPDRVSESSION pSession)
1620{
1621 /* This must be set! */
1622 if (!pSession)
1623 return false;
1624
1625 /* Only one out of the two. */
1626 if (pGVM && pClaimedSession)
1627 return false;
1628 if (pGVM)
1629 pClaimedSession = pGVM->pSession;
1630 return pClaimedSession == pSession;
1631}
1632
1633
1634/**
1635 * VMMR0EntryEx worker function, either called directly or when ever possible
1636 * called thru a longjmp so we can exit safely on failure.
1637 *
1638 * @returns VBox status code.
1639 * @param pGVM The global (ring-0) VM structure.
1640 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
1641 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
1642 * @param enmOperation Which operation to execute.
1643 * @param pReqHdr This points to a SUPVMMR0REQHDR packet. Optional.
1644 * The support driver validates this if it's present.
1645 * @param u64Arg Some simple constant argument.
1646 * @param pSession The session of the caller.
1647 *
1648 * @remarks Assume called with interrupts _enabled_.
1649 */
1650DECL_NO_INLINE(static, int) vmmR0EntryExWorker(PGVM pGVM, VMCPUID idCpu, VMMR0OPERATION enmOperation,
1651 PSUPVMMR0REQHDR pReqHdr, uint64_t u64Arg, PSUPDRVSESSION pSession)
1652{
1653 /*
1654 * Validate pGVM and idCpu for consistency and validity.
1655 */
1656 if (pGVM != NULL)
1657 {
1658 if (RT_LIKELY(((uintptr_t)pGVM & HOST_PAGE_OFFSET_MASK) == 0))
1659 { /* likely */ }
1660 else
1661 {
1662 SUPR0Printf("vmmR0EntryExWorker: Invalid pGVM=%p! (op=%d)\n", pGVM, enmOperation);
1663 return VERR_INVALID_POINTER;
1664 }
1665
1666 if (RT_LIKELY(idCpu == NIL_VMCPUID || idCpu < pGVM->cCpus))
1667 { /* likely */ }
1668 else
1669 {
1670 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu %#x (cCpus=%#x)\n", idCpu, pGVM->cCpus);
1671 return VERR_INVALID_PARAMETER;
1672 }
1673
1674 if (RT_LIKELY( pGVM->enmVMState >= VMSTATE_CREATING
1675 && pGVM->enmVMState <= VMSTATE_TERMINATED
1676 && pGVM->pSession == pSession
1677 && pGVM->pSelf == pGVM))
1678 { /* likely */ }
1679 else
1680 {
1681 SUPR0Printf("vmmR0EntryExWorker: Invalid pGVM=%p:{.enmVMState=%d, .cCpus=%#x, .pSession=%p(==%p), .pSelf=%p(==%p)}! (op=%d)\n",
1682 pGVM, pGVM->enmVMState, pGVM->cCpus, pGVM->pSession, pSession, pGVM->pSelf, pGVM, enmOperation);
1683 return VERR_INVALID_POINTER;
1684 }
1685 }
1686 else if (RT_LIKELY(idCpu == NIL_VMCPUID))
1687 { /* likely */ }
1688 else
1689 {
1690 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu=%u\n", idCpu);
1691 return VERR_INVALID_PARAMETER;
1692 }
1693
1694 /*
1695 * Process the request.
1696 */
1697 int rc;
1698 switch (enmOperation)
1699 {
1700 /*
1701 * GVM requests
1702 */
1703 case VMMR0_DO_GVMM_CREATE_VM:
1704 if (pGVM == NULL && u64Arg == 0 && idCpu == NIL_VMCPUID)
1705 rc = GVMMR0CreateVMReq((PGVMMCREATEVMREQ)pReqHdr, pSession);
1706 else
1707 rc = VERR_INVALID_PARAMETER;
1708 break;
1709
1710 case VMMR0_DO_GVMM_DESTROY_VM:
1711 if (pReqHdr == NULL && u64Arg == 0)
1712 rc = GVMMR0DestroyVM(pGVM);
1713 else
1714 rc = VERR_INVALID_PARAMETER;
1715 break;
1716
1717 case VMMR0_DO_GVMM_REGISTER_VMCPU:
1718 if (pGVM != NULL)
1719 rc = GVMMR0RegisterVCpu(pGVM, idCpu);
1720 else
1721 rc = VERR_INVALID_PARAMETER;
1722 break;
1723
1724 case VMMR0_DO_GVMM_DEREGISTER_VMCPU:
1725 if (pGVM != NULL)
1726 rc = GVMMR0DeregisterVCpu(pGVM, idCpu);
1727 else
1728 rc = VERR_INVALID_PARAMETER;
1729 break;
1730
1731 case VMMR0_DO_GVMM_REGISTER_WORKER_THREAD:
1732 if (pGVM != NULL && pReqHdr && pReqHdr->cbReq == sizeof(GVMMREGISTERWORKERTHREADREQ))
1733 rc = GVMMR0RegisterWorkerThread(pGVM, (GVMMWORKERTHREAD)(unsigned)u64Arg,
1734 ((PGVMMREGISTERWORKERTHREADREQ)(pReqHdr))->hNativeThreadR3);
1735 else
1736 rc = VERR_INVALID_PARAMETER;
1737 break;
1738
1739 case VMMR0_DO_GVMM_DEREGISTER_WORKER_THREAD:
1740 if (pGVM != NULL)
1741 rc = GVMMR0DeregisterWorkerThread(pGVM, (GVMMWORKERTHREAD)(unsigned)u64Arg);
1742 else
1743 rc = VERR_INVALID_PARAMETER;
1744 break;
1745
1746 case VMMR0_DO_GVMM_SCHED_HALT:
1747 if (pReqHdr)
1748 return VERR_INVALID_PARAMETER;
1749 rc = GVMMR0SchedHaltReq(pGVM, idCpu, u64Arg);
1750 break;
1751
1752 case VMMR0_DO_GVMM_SCHED_WAKE_UP:
1753 if (pReqHdr || u64Arg)
1754 return VERR_INVALID_PARAMETER;
1755 rc = GVMMR0SchedWakeUp(pGVM, idCpu);
1756 break;
1757
1758 case VMMR0_DO_GVMM_SCHED_POKE:
1759 if (pReqHdr || u64Arg)
1760 return VERR_INVALID_PARAMETER;
1761 rc = GVMMR0SchedPoke(pGVM, idCpu);
1762 break;
1763
1764 case VMMR0_DO_GVMM_SCHED_WAKE_UP_AND_POKE_CPUS:
1765 if (u64Arg)
1766 return VERR_INVALID_PARAMETER;
1767 rc = GVMMR0SchedWakeUpAndPokeCpusReq(pGVM, (PGVMMSCHEDWAKEUPANDPOKECPUSREQ)pReqHdr);
1768 break;
1769
1770 case VMMR0_DO_GVMM_SCHED_POLL:
1771 if (pReqHdr || u64Arg > 1)
1772 return VERR_INVALID_PARAMETER;
1773 rc = GVMMR0SchedPoll(pGVM, idCpu, !!u64Arg);
1774 break;
1775
1776 case VMMR0_DO_GVMM_QUERY_STATISTICS:
1777 if (u64Arg)
1778 return VERR_INVALID_PARAMETER;
1779 rc = GVMMR0QueryStatisticsReq(pGVM, (PGVMMQUERYSTATISTICSSREQ)pReqHdr, pSession);
1780 break;
1781
1782 case VMMR0_DO_GVMM_RESET_STATISTICS:
1783 if (u64Arg)
1784 return VERR_INVALID_PARAMETER;
1785 rc = GVMMR0ResetStatisticsReq(pGVM, (PGVMMRESETSTATISTICSSREQ)pReqHdr, pSession);
1786 break;
1787
1788 /*
1789 * Initialize the R0 part of a VM instance.
1790 */
1791 case VMMR0_DO_VMMR0_INIT:
1792 rc = vmmR0InitVM(pGVM, RT_LODWORD(u64Arg), RT_HIDWORD(u64Arg));
1793 break;
1794
1795 /*
1796 * Does EMT specific ring-0 init.
1797 */
1798 case VMMR0_DO_VMMR0_INIT_EMT:
1799 if (idCpu == NIL_VMCPUID)
1800 return VERR_INVALID_CPU_ID;
1801 rc = vmmR0InitVMEmt(pGVM, idCpu);
1802 break;
1803
1804 /*
1805 * Terminate the R0 part of a VM instance.
1806 */
1807 case VMMR0_DO_VMMR0_TERM:
1808 rc = VMMR0TermVM(pGVM, 0 /*idCpu*/);
1809 break;
1810
1811 /*
1812 * Update release or debug logger instances.
1813 */
1814 case VMMR0_DO_VMMR0_UPDATE_LOGGERS:
1815 if (idCpu == NIL_VMCPUID)
1816 return VERR_INVALID_CPU_ID;
1817 if (u64Arg < VMMLOGGER_IDX_MAX && pReqHdr != NULL)
1818 rc = vmmR0UpdateLoggers(pGVM, idCpu /*idCpu*/, (PVMMR0UPDATELOGGERSREQ)pReqHdr, (size_t)u64Arg);
1819 else
1820 return VERR_INVALID_PARAMETER;
1821 break;
1822
1823 /*
1824 * Log flusher thread.
1825 */
1826 case VMMR0_DO_VMMR0_LOG_FLUSHER:
1827 if (idCpu != NIL_VMCPUID)
1828 return VERR_INVALID_CPU_ID;
1829 if (pReqHdr == NULL && pGVM != NULL)
1830 rc = vmmR0LogFlusher(pGVM);
1831 else
1832 return VERR_INVALID_PARAMETER;
1833 break;
1834
1835 /*
1836 * Wait for the flush to finish with all the buffers for the given logger.
1837 */
1838 case VMMR0_DO_VMMR0_LOG_WAIT_FLUSHED:
1839 if (idCpu == NIL_VMCPUID)
1840 return VERR_INVALID_CPU_ID;
1841 if (u64Arg < VMMLOGGER_IDX_MAX && pReqHdr == NULL)
1842 rc = vmmR0LogWaitFlushed(pGVM, idCpu /*idCpu*/, (size_t)u64Arg);
1843 else
1844 return VERR_INVALID_PARAMETER;
1845 break;
1846
1847 /*
1848 * Attempt to enable hm mode and check the current setting.
1849 */
1850 case VMMR0_DO_HM_ENABLE:
1851 rc = HMR0EnableAllCpus(pGVM);
1852 break;
1853
1854 /*
1855 * Setup the hardware accelerated session.
1856 */
1857 case VMMR0_DO_HM_SETUP_VM:
1858 rc = HMR0SetupVM(pGVM);
1859 break;
1860
1861 /*
1862 * PGM wrappers.
1863 */
1864 case VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES:
1865 if (idCpu == NIL_VMCPUID)
1866 return VERR_INVALID_CPU_ID;
1867 rc = PGMR0PhysAllocateHandyPages(pGVM, idCpu);
1868 break;
1869
1870 case VMMR0_DO_PGM_FLUSH_HANDY_PAGES:
1871 if (idCpu == NIL_VMCPUID)
1872 return VERR_INVALID_CPU_ID;
1873 rc = PGMR0PhysFlushHandyPages(pGVM, idCpu);
1874 break;
1875
1876 case VMMR0_DO_PGM_ALLOCATE_LARGE_PAGE:
1877 if (idCpu == NIL_VMCPUID)
1878 return VERR_INVALID_CPU_ID;
1879 rc = PGMR0PhysAllocateLargePage(pGVM, idCpu, u64Arg);
1880 break;
1881
1882 case VMMR0_DO_PGM_PHYS_SETUP_IOMMU:
1883 if (idCpu != 0)
1884 return VERR_INVALID_CPU_ID;
1885 rc = PGMR0PhysSetupIoMmu(pGVM);
1886 break;
1887
1888 case VMMR0_DO_PGM_POOL_GROW:
1889 if (idCpu == NIL_VMCPUID)
1890 return VERR_INVALID_CPU_ID;
1891 rc = PGMR0PoolGrow(pGVM, idCpu);
1892 break;
1893
1894 /*
1895 * GMM wrappers.
1896 */
1897 case VMMR0_DO_GMM_INITIAL_RESERVATION:
1898 if (u64Arg)
1899 return VERR_INVALID_PARAMETER;
1900 rc = GMMR0InitialReservationReq(pGVM, idCpu, (PGMMINITIALRESERVATIONREQ)pReqHdr);
1901 break;
1902
1903 case VMMR0_DO_GMM_UPDATE_RESERVATION:
1904 if (u64Arg)
1905 return VERR_INVALID_PARAMETER;
1906 rc = GMMR0UpdateReservationReq(pGVM, idCpu, (PGMMUPDATERESERVATIONREQ)pReqHdr);
1907 break;
1908
1909 case VMMR0_DO_GMM_ALLOCATE_PAGES:
1910 if (u64Arg)
1911 return VERR_INVALID_PARAMETER;
1912 rc = GMMR0AllocatePagesReq(pGVM, idCpu, (PGMMALLOCATEPAGESREQ)pReqHdr);
1913 break;
1914
1915 case VMMR0_DO_GMM_FREE_PAGES:
1916 if (u64Arg)
1917 return VERR_INVALID_PARAMETER;
1918 rc = GMMR0FreePagesReq(pGVM, idCpu, (PGMMFREEPAGESREQ)pReqHdr);
1919 break;
1920
1921 case VMMR0_DO_GMM_FREE_LARGE_PAGE:
1922 if (u64Arg)
1923 return VERR_INVALID_PARAMETER;
1924 rc = GMMR0FreeLargePageReq(pGVM, idCpu, (PGMMFREELARGEPAGEREQ)pReqHdr);
1925 break;
1926
1927 case VMMR0_DO_GMM_QUERY_HYPERVISOR_MEM_STATS:
1928 if (u64Arg)
1929 return VERR_INVALID_PARAMETER;
1930 rc = GMMR0QueryHypervisorMemoryStatsReq((PGMMMEMSTATSREQ)pReqHdr);
1931 break;
1932
1933 case VMMR0_DO_GMM_QUERY_MEM_STATS:
1934 if (idCpu == NIL_VMCPUID)
1935 return VERR_INVALID_CPU_ID;
1936 if (u64Arg)
1937 return VERR_INVALID_PARAMETER;
1938 rc = GMMR0QueryMemoryStatsReq(pGVM, idCpu, (PGMMMEMSTATSREQ)pReqHdr);
1939 break;
1940
1941 case VMMR0_DO_GMM_BALLOONED_PAGES:
1942 if (u64Arg)
1943 return VERR_INVALID_PARAMETER;
1944 rc = GMMR0BalloonedPagesReq(pGVM, idCpu, (PGMMBALLOONEDPAGESREQ)pReqHdr);
1945 break;
1946
1947 case VMMR0_DO_GMM_MAP_UNMAP_CHUNK:
1948 if (u64Arg)
1949 return VERR_INVALID_PARAMETER;
1950 rc = GMMR0MapUnmapChunkReq(pGVM, (PGMMMAPUNMAPCHUNKREQ)pReqHdr);
1951 break;
1952
1953 case VMMR0_DO_GMM_REGISTER_SHARED_MODULE:
1954 if (idCpu == NIL_VMCPUID)
1955 return VERR_INVALID_CPU_ID;
1956 if (u64Arg)
1957 return VERR_INVALID_PARAMETER;
1958 rc = GMMR0RegisterSharedModuleReq(pGVM, idCpu, (PGMMREGISTERSHAREDMODULEREQ)pReqHdr);
1959 break;
1960
1961 case VMMR0_DO_GMM_UNREGISTER_SHARED_MODULE:
1962 if (idCpu == NIL_VMCPUID)
1963 return VERR_INVALID_CPU_ID;
1964 if (u64Arg)
1965 return VERR_INVALID_PARAMETER;
1966 rc = GMMR0UnregisterSharedModuleReq(pGVM, idCpu, (PGMMUNREGISTERSHAREDMODULEREQ)pReqHdr);
1967 break;
1968
1969 case VMMR0_DO_GMM_RESET_SHARED_MODULES:
1970 if (idCpu == NIL_VMCPUID)
1971 return VERR_INVALID_CPU_ID;
1972 if ( u64Arg
1973 || pReqHdr)
1974 return VERR_INVALID_PARAMETER;
1975 rc = GMMR0ResetSharedModules(pGVM, idCpu);
1976 break;
1977
1978#ifdef VBOX_WITH_PAGE_SHARING
1979 case VMMR0_DO_GMM_CHECK_SHARED_MODULES:
1980 {
1981 if (idCpu == NIL_VMCPUID)
1982 return VERR_INVALID_CPU_ID;
1983 if ( u64Arg
1984 || pReqHdr)
1985 return VERR_INVALID_PARAMETER;
1986 rc = GMMR0CheckSharedModules(pGVM, idCpu);
1987 break;
1988 }
1989#endif
1990
1991#if defined(VBOX_STRICT) && HC_ARCH_BITS == 64
1992 case VMMR0_DO_GMM_FIND_DUPLICATE_PAGE:
1993 if (u64Arg)
1994 return VERR_INVALID_PARAMETER;
1995 rc = GMMR0FindDuplicatePageReq(pGVM, (PGMMFINDDUPLICATEPAGEREQ)pReqHdr);
1996 break;
1997#endif
1998
1999 case VMMR0_DO_GMM_QUERY_STATISTICS:
2000 if (u64Arg)
2001 return VERR_INVALID_PARAMETER;
2002 rc = GMMR0QueryStatisticsReq(pGVM, (PGMMQUERYSTATISTICSSREQ)pReqHdr);
2003 break;
2004
2005 case VMMR0_DO_GMM_RESET_STATISTICS:
2006 if (u64Arg)
2007 return VERR_INVALID_PARAMETER;
2008 rc = GMMR0ResetStatisticsReq(pGVM, (PGMMRESETSTATISTICSSREQ)pReqHdr);
2009 break;
2010
2011 /*
2012 * A quick GCFGM mock-up.
2013 */
2014 /** @todo GCFGM with proper access control, ring-3 management interface and all that. */
2015 case VMMR0_DO_GCFGM_SET_VALUE:
2016 case VMMR0_DO_GCFGM_QUERY_VALUE:
2017 {
2018 if (pGVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
2019 return VERR_INVALID_PARAMETER;
2020 PGCFGMVALUEREQ pReq = (PGCFGMVALUEREQ)pReqHdr;
2021 if (pReq->Hdr.cbReq != sizeof(*pReq))
2022 return VERR_INVALID_PARAMETER;
2023 if (enmOperation == VMMR0_DO_GCFGM_SET_VALUE)
2024 {
2025 rc = GVMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
2026 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
2027 // rc = GMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
2028 }
2029 else
2030 {
2031 rc = GVMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
2032 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
2033 // rc = GMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
2034 }
2035 break;
2036 }
2037
2038 /*
2039 * PDM Wrappers.
2040 */
2041 case VMMR0_DO_PDM_DRIVER_CALL_REQ_HANDLER:
2042 {
2043 if (!pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
2044 return VERR_INVALID_PARAMETER;
2045 rc = PDMR0DriverCallReqHandler(pGVM, (PPDMDRIVERCALLREQHANDLERREQ)pReqHdr);
2046 break;
2047 }
2048
2049 case VMMR0_DO_PDM_DEVICE_CREATE:
2050 {
2051 if (!pReqHdr || u64Arg || idCpu != 0)
2052 return VERR_INVALID_PARAMETER;
2053 rc = PDMR0DeviceCreateReqHandler(pGVM, (PPDMDEVICECREATEREQ)pReqHdr);
2054 break;
2055 }
2056
2057 case VMMR0_DO_PDM_DEVICE_GEN_CALL:
2058 {
2059 if (!pReqHdr || u64Arg)
2060 return VERR_INVALID_PARAMETER;
2061 rc = PDMR0DeviceGenCallReqHandler(pGVM, (PPDMDEVICEGENCALLREQ)pReqHdr, idCpu);
2062 break;
2063 }
2064
2065 /** @todo Remove the once all devices has been converted to new style! @bugref{9218} */
2066 case VMMR0_DO_PDM_DEVICE_COMPAT_SET_CRITSECT:
2067 {
2068 if (!pReqHdr || u64Arg || idCpu != 0)
2069 return VERR_INVALID_PARAMETER;
2070 rc = PDMR0DeviceCompatSetCritSectReqHandler(pGVM, (PPDMDEVICECOMPATSETCRITSECTREQ)pReqHdr);
2071 break;
2072 }
2073
2074 case VMMR0_DO_PDM_QUEUE_CREATE:
2075 {
2076 if (!pReqHdr || u64Arg || idCpu != 0)
2077 return VERR_INVALID_PARAMETER;
2078 rc = PDMR0QueueCreateReqHandler(pGVM, (PPDMQUEUECREATEREQ)pReqHdr);
2079 break;
2080 }
2081
2082 /*
2083 * Requests to the internal networking service.
2084 */
2085 case VMMR0_DO_INTNET_OPEN:
2086 {
2087 PINTNETOPENREQ pReq = (PINTNETOPENREQ)pReqHdr;
2088 if (u64Arg || !pReq || !vmmR0IsValidSession(pGVM, pReq->pSession, pSession) || idCpu != NIL_VMCPUID)
2089 return VERR_INVALID_PARAMETER;
2090 rc = IntNetR0OpenReq(pSession, pReq);
2091 break;
2092 }
2093
2094 case VMMR0_DO_INTNET_IF_CLOSE:
2095 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFCLOSEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2096 return VERR_INVALID_PARAMETER;
2097 rc = IntNetR0IfCloseReq(pSession, (PINTNETIFCLOSEREQ)pReqHdr);
2098 break;
2099
2100
2101 case VMMR0_DO_INTNET_IF_GET_BUFFER_PTRS:
2102 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFGETBUFFERPTRSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2103 return VERR_INVALID_PARAMETER;
2104 rc = IntNetR0IfGetBufferPtrsReq(pSession, (PINTNETIFGETBUFFERPTRSREQ)pReqHdr);
2105 break;
2106
2107 case VMMR0_DO_INTNET_IF_SET_PROMISCUOUS_MODE:
2108 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2109 return VERR_INVALID_PARAMETER;
2110 rc = IntNetR0IfSetPromiscuousModeReq(pSession, (PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr);
2111 break;
2112
2113 case VMMR0_DO_INTNET_IF_SET_MAC_ADDRESS:
2114 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFSETMACADDRESSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2115 return VERR_INVALID_PARAMETER;
2116 rc = IntNetR0IfSetMacAddressReq(pSession, (PINTNETIFSETMACADDRESSREQ)pReqHdr);
2117 break;
2118
2119 case VMMR0_DO_INTNET_IF_SET_ACTIVE:
2120 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFSETACTIVEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2121 return VERR_INVALID_PARAMETER;
2122 rc = IntNetR0IfSetActiveReq(pSession, (PINTNETIFSETACTIVEREQ)pReqHdr);
2123 break;
2124
2125 case VMMR0_DO_INTNET_IF_SEND:
2126 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2127 return VERR_INVALID_PARAMETER;
2128 rc = IntNetR0IfSendReq(pSession, (PINTNETIFSENDREQ)pReqHdr);
2129 break;
2130
2131 case VMMR0_DO_INTNET_IF_WAIT:
2132 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2133 return VERR_INVALID_PARAMETER;
2134 rc = IntNetR0IfWaitReq(pSession, (PINTNETIFWAITREQ)pReqHdr);
2135 break;
2136
2137 case VMMR0_DO_INTNET_IF_ABORT_WAIT:
2138 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2139 return VERR_INVALID_PARAMETER;
2140 rc = IntNetR0IfAbortWaitReq(pSession, (PINTNETIFABORTWAITREQ)pReqHdr);
2141 break;
2142
2143#if 0 //def VBOX_WITH_PCI_PASSTHROUGH
2144 /*
2145 * Requests to host PCI driver service.
2146 */
2147 case VMMR0_DO_PCIRAW_REQ:
2148 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PPCIRAWSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2149 return VERR_INVALID_PARAMETER;
2150 rc = PciRawR0ProcessReq(pGVM, pSession, (PPCIRAWSENDREQ)pReqHdr);
2151 break;
2152#endif
2153
2154 /*
2155 * NEM requests.
2156 */
2157#ifdef VBOX_WITH_NEM_R0
2158# if defined(RT_ARCH_AMD64) && defined(RT_OS_WINDOWS)
2159 case VMMR0_DO_NEM_INIT_VM:
2160 if (u64Arg || pReqHdr || idCpu != 0)
2161 return VERR_INVALID_PARAMETER;
2162 rc = NEMR0InitVM(pGVM);
2163 break;
2164
2165 case VMMR0_DO_NEM_INIT_VM_PART_2:
2166 if (u64Arg || pReqHdr || idCpu != 0)
2167 return VERR_INVALID_PARAMETER;
2168 rc = NEMR0InitVMPart2(pGVM);
2169 break;
2170
2171 case VMMR0_DO_NEM_MAP_PAGES:
2172 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2173 return VERR_INVALID_PARAMETER;
2174 rc = NEMR0MapPages(pGVM, idCpu);
2175 break;
2176
2177 case VMMR0_DO_NEM_UNMAP_PAGES:
2178 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2179 return VERR_INVALID_PARAMETER;
2180 rc = NEMR0UnmapPages(pGVM, idCpu);
2181 break;
2182
2183 case VMMR0_DO_NEM_EXPORT_STATE:
2184 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2185 return VERR_INVALID_PARAMETER;
2186 rc = NEMR0ExportState(pGVM, idCpu);
2187 break;
2188
2189 case VMMR0_DO_NEM_IMPORT_STATE:
2190 if (pReqHdr || idCpu == NIL_VMCPUID)
2191 return VERR_INVALID_PARAMETER;
2192 rc = NEMR0ImportState(pGVM, idCpu, u64Arg);
2193 break;
2194
2195 case VMMR0_DO_NEM_QUERY_CPU_TICK:
2196 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2197 return VERR_INVALID_PARAMETER;
2198 rc = NEMR0QueryCpuTick(pGVM, idCpu);
2199 break;
2200
2201 case VMMR0_DO_NEM_RESUME_CPU_TICK_ON_ALL:
2202 if (pReqHdr || idCpu == NIL_VMCPUID)
2203 return VERR_INVALID_PARAMETER;
2204 rc = NEMR0ResumeCpuTickOnAll(pGVM, idCpu, u64Arg);
2205 break;
2206
2207 case VMMR0_DO_NEM_UPDATE_STATISTICS:
2208 if (u64Arg || pReqHdr)
2209 return VERR_INVALID_PARAMETER;
2210 rc = NEMR0UpdateStatistics(pGVM, idCpu);
2211 break;
2212
2213# if 1 && defined(DEBUG_bird)
2214 case VMMR0_DO_NEM_EXPERIMENT:
2215 if (pReqHdr)
2216 return VERR_INVALID_PARAMETER;
2217 rc = NEMR0DoExperiment(pGVM, idCpu, u64Arg);
2218 break;
2219# endif
2220# endif
2221#endif
2222
2223 /*
2224 * IOM requests.
2225 */
2226 case VMMR0_DO_IOM_GROW_IO_PORTS:
2227 {
2228 if (pReqHdr || idCpu != 0)
2229 return VERR_INVALID_PARAMETER;
2230 rc = IOMR0IoPortGrowRegistrationTables(pGVM, u64Arg);
2231 break;
2232 }
2233
2234 case VMMR0_DO_IOM_GROW_IO_PORT_STATS:
2235 {
2236 if (pReqHdr || idCpu != 0)
2237 return VERR_INVALID_PARAMETER;
2238 rc = IOMR0IoPortGrowStatisticsTable(pGVM, u64Arg);
2239 break;
2240 }
2241
2242 case VMMR0_DO_IOM_GROW_MMIO_REGS:
2243 {
2244 if (pReqHdr || idCpu != 0)
2245 return VERR_INVALID_PARAMETER;
2246 rc = IOMR0MmioGrowRegistrationTables(pGVM, u64Arg);
2247 break;
2248 }
2249
2250 case VMMR0_DO_IOM_GROW_MMIO_STATS:
2251 {
2252 if (pReqHdr || idCpu != 0)
2253 return VERR_INVALID_PARAMETER;
2254 rc = IOMR0MmioGrowStatisticsTable(pGVM, u64Arg);
2255 break;
2256 }
2257
2258 case VMMR0_DO_IOM_SYNC_STATS_INDICES:
2259 {
2260 if (pReqHdr || idCpu != 0)
2261 return VERR_INVALID_PARAMETER;
2262 rc = IOMR0IoPortSyncStatisticsIndices(pGVM);
2263 if (RT_SUCCESS(rc))
2264 rc = IOMR0MmioSyncStatisticsIndices(pGVM);
2265 break;
2266 }
2267
2268 /*
2269 * DBGF requests.
2270 */
2271#ifdef VBOX_WITH_DBGF_TRACING
2272 case VMMR0_DO_DBGF_TRACER_CREATE:
2273 {
2274 if (!pReqHdr || u64Arg || idCpu != 0)
2275 return VERR_INVALID_PARAMETER;
2276 rc = DBGFR0TracerCreateReqHandler(pGVM, (PDBGFTRACERCREATEREQ)pReqHdr);
2277 break;
2278 }
2279
2280 case VMMR0_DO_DBGF_TRACER_CALL_REQ_HANDLER:
2281 {
2282 if (!pReqHdr || u64Arg)
2283 return VERR_INVALID_PARAMETER;
2284# if 0 /** @todo */
2285 rc = DBGFR0TracerGenCallReqHandler(pGVM, (PDBGFTRACERGENCALLREQ)pReqHdr, idCpu);
2286# else
2287 rc = VERR_NOT_IMPLEMENTED;
2288# endif
2289 break;
2290 }
2291#endif
2292
2293 case VMMR0_DO_DBGF_BP_INIT:
2294 {
2295 if (!pReqHdr || u64Arg || idCpu != 0)
2296 return VERR_INVALID_PARAMETER;
2297 rc = DBGFR0BpInitReqHandler(pGVM, (PDBGFBPINITREQ)pReqHdr);
2298 break;
2299 }
2300
2301 case VMMR0_DO_DBGF_BP_CHUNK_ALLOC:
2302 {
2303 if (!pReqHdr || u64Arg || idCpu != 0)
2304 return VERR_INVALID_PARAMETER;
2305 rc = DBGFR0BpChunkAllocReqHandler(pGVM, (PDBGFBPCHUNKALLOCREQ)pReqHdr);
2306 break;
2307 }
2308
2309 case VMMR0_DO_DBGF_BP_L2_TBL_CHUNK_ALLOC:
2310 {
2311 if (!pReqHdr || u64Arg || idCpu != 0)
2312 return VERR_INVALID_PARAMETER;
2313 rc = DBGFR0BpL2TblChunkAllocReqHandler(pGVM, (PDBGFBPL2TBLCHUNKALLOCREQ)pReqHdr);
2314 break;
2315 }
2316
2317 case VMMR0_DO_DBGF_BP_OWNER_INIT:
2318 {
2319 if (!pReqHdr || u64Arg || idCpu != 0)
2320 return VERR_INVALID_PARAMETER;
2321 rc = DBGFR0BpOwnerInitReqHandler(pGVM, (PDBGFBPOWNERINITREQ)pReqHdr);
2322 break;
2323 }
2324
2325 case VMMR0_DO_DBGF_BP_PORTIO_INIT:
2326 {
2327 if (!pReqHdr || u64Arg || idCpu != 0)
2328 return VERR_INVALID_PARAMETER;
2329 rc = DBGFR0BpPortIoInitReqHandler(pGVM, (PDBGFBPINITREQ)pReqHdr);
2330 break;
2331 }
2332
2333
2334 /*
2335 * TM requests.
2336 */
2337 case VMMR0_DO_TM_GROW_TIMER_QUEUE:
2338 {
2339 if (pReqHdr || idCpu == NIL_VMCPUID)
2340 return VERR_INVALID_PARAMETER;
2341 rc = TMR0TimerQueueGrow(pGVM, RT_HI_U32(u64Arg), RT_LO_U32(u64Arg));
2342 break;
2343 }
2344
2345 /*
2346 * For profiling.
2347 */
2348 case VMMR0_DO_NOP:
2349 case VMMR0_DO_SLOW_NOP:
2350 return VINF_SUCCESS;
2351
2352 /*
2353 * For testing Ring-0 APIs invoked in this environment.
2354 */
2355 case VMMR0_DO_TESTS:
2356 /** @todo make new test */
2357 return VINF_SUCCESS;
2358
2359 default:
2360 /*
2361 * We're returning VERR_NOT_SUPPORT here so we've got something else
2362 * than -1 which the interrupt gate glue code might return.
2363 */
2364 Log(("operation %#x is not supported\n", enmOperation));
2365 return VERR_NOT_SUPPORTED;
2366 }
2367 return rc;
2368}
2369
2370
2371/**
2372 * This is just a longjmp wrapper function for VMMR0EntryEx calls.
2373 *
2374 * @returns VBox status code.
2375 * @param pvArgs The argument package
2376 */
2377static DECLCALLBACK(int) vmmR0EntryExWrapper(void *pvArgs)
2378{
2379 PGVMCPU pGVCpu = (PGVMCPU)pvArgs;
2380 return vmmR0EntryExWorker(pGVCpu->vmmr0.s.pGVM,
2381 pGVCpu->vmmr0.s.idCpu,
2382 pGVCpu->vmmr0.s.enmOperation,
2383 pGVCpu->vmmr0.s.pReq,
2384 pGVCpu->vmmr0.s.u64Arg,
2385 pGVCpu->vmmr0.s.pSession);
2386}
2387
2388
2389/**
2390 * The Ring 0 entry point, called by the support library (SUP).
2391 *
2392 * @returns VBox status code.
2393 * @param pGVM The global (ring-0) VM structure.
2394 * @param pVM The cross context VM structure.
2395 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
2396 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
2397 * @param enmOperation Which operation to execute.
2398 * @param pReq Pointer to the SUPVMMR0REQHDR packet. Optional.
2399 * @param u64Arg Some simple constant argument.
2400 * @param pSession The session of the caller.
2401 * @remarks Assume called with interrupts _enabled_.
2402 */
2403VMMR0DECL(int) VMMR0EntryEx(PGVM pGVM, PVMCC pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation,
2404 PSUPVMMR0REQHDR pReq, uint64_t u64Arg, PSUPDRVSESSION pSession)
2405{
2406 /*
2407 * Requests that should only happen on the EMT thread will be
2408 * wrapped in a setjmp so we can assert without causing too much trouble.
2409 */
2410 if ( pVM != NULL
2411 && pGVM != NULL
2412 && pVM == pGVM /** @todo drop pVM or pGVM */
2413 && idCpu < pGVM->cCpus
2414 && pGVM->pSession == pSession
2415 && pGVM->pSelf == pGVM
2416 && enmOperation != VMMR0_DO_GVMM_DESTROY_VM
2417 && enmOperation != VMMR0_DO_GVMM_REGISTER_VMCPU
2418 && enmOperation != VMMR0_DO_GVMM_SCHED_WAKE_UP /* idCpu is not caller but target. Sigh. */ /** @todo fix*/
2419 && enmOperation != VMMR0_DO_GVMM_SCHED_POKE /* idCpu is not caller but target. Sigh. */ /** @todo fix*/
2420 )
2421 {
2422 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2423 RTNATIVETHREAD hNativeThread = RTThreadNativeSelf();
2424 if (RT_LIKELY( pGVCpu->hEMT == hNativeThread
2425 && pGVCpu->hNativeThreadR0 == hNativeThread))
2426 {
2427 pGVCpu->vmmr0.s.pGVM = pGVM;
2428 pGVCpu->vmmr0.s.idCpu = idCpu;
2429 pGVCpu->vmmr0.s.enmOperation = enmOperation;
2430 pGVCpu->vmmr0.s.pReq = pReq;
2431 pGVCpu->vmmr0.s.u64Arg = u64Arg;
2432 pGVCpu->vmmr0.s.pSession = pSession;
2433 return vmmR0CallRing3SetJmpEx(&pGVCpu->vmmr0.s.AssertJmpBuf, vmmR0EntryExWrapper, pGVCpu,
2434 ((uintptr_t)u64Arg << 16) | (uintptr_t)enmOperation);
2435 }
2436 return VERR_VM_THREAD_NOT_EMT;
2437 }
2438 return vmmR0EntryExWorker(pGVM, idCpu, enmOperation, pReq, u64Arg, pSession);
2439}
2440
2441
2442/*********************************************************************************************************************************
2443* EMT Blocking *
2444*********************************************************************************************************************************/
2445
2446/**
2447 * Checks whether we've armed the ring-0 long jump machinery.
2448 *
2449 * @returns @c true / @c false
2450 * @param pVCpu The cross context virtual CPU structure.
2451 * @thread EMT
2452 * @sa VMMIsLongJumpArmed
2453 */
2454VMMR0_INT_DECL(bool) VMMR0IsLongJumpArmed(PVMCPUCC pVCpu)
2455{
2456#ifdef RT_ARCH_X86
2457 return pVCpu->vmmr0.s.AssertJmpBuf.eip != 0;
2458#else
2459 return pVCpu->vmmr0.s.AssertJmpBuf.rip != 0;
2460#endif
2461}
2462
2463
2464/**
2465 * Locking helper that deals with HM context and checks if the thread can block.
2466 *
2467 * @returns VINF_SUCCESS if we can block. Returns @a rcBusy or
2468 * VERR_VMM_CANNOT_BLOCK if not able to block.
2469 * @param pVCpu The cross context virtual CPU structure of the calling
2470 * thread.
2471 * @param rcBusy What to return in case of a blocking problem. Will IPE
2472 * if VINF_SUCCESS and we cannot block.
2473 * @param pszCaller The caller (for logging problems).
2474 * @param pvLock The lock address (for logging problems).
2475 * @param pCtx Where to return context info for the resume call.
2476 * @thread EMT(pVCpu)
2477 */
2478VMMR0_INT_DECL(int) VMMR0EmtPrepareToBlock(PVMCPUCC pVCpu, int rcBusy, const char *pszCaller, void *pvLock,
2479 PVMMR0EMTBLOCKCTX pCtx)
2480{
2481 const char *pszMsg;
2482
2483 /*
2484 * Check that we are allowed to block.
2485 */
2486 if (RT_LIKELY(VMMRZCallRing3IsEnabled(pVCpu)))
2487 {
2488 /*
2489 * Are we in HM context and w/o a context hook? If so work the context hook.
2490 */
2491 if (pVCpu->idHostCpu != NIL_RTCPUID)
2492 {
2493 Assert(pVCpu->iHostCpuSet != UINT32_MAX);
2494
2495 if (pVCpu->vmmr0.s.hCtxHook == NIL_RTTHREADCTXHOOK)
2496 {
2497 vmmR0ThreadCtxCallback(RTTHREADCTXEVENT_OUT, pVCpu);
2498 if (pVCpu->vmmr0.s.pPreemptState)
2499 RTThreadPreemptRestore(pVCpu->vmmr0.s.pPreemptState);
2500
2501 pCtx->uMagic = VMMR0EMTBLOCKCTX_MAGIC;
2502 pCtx->fWasInHmContext = true;
2503 return VINF_SUCCESS;
2504 }
2505 }
2506
2507 if (RT_LIKELY(!pVCpu->vmmr0.s.pPreemptState))
2508 {
2509 /*
2510 * Not in HM context or we've got hooks, so just check that preemption
2511 * is enabled.
2512 */
2513 if (RT_LIKELY(RTThreadPreemptIsEnabled(NIL_RTTHREAD)))
2514 {
2515 pCtx->uMagic = VMMR0EMTBLOCKCTX_MAGIC;
2516 pCtx->fWasInHmContext = false;
2517 return VINF_SUCCESS;
2518 }
2519 pszMsg = "Preemption is disabled!";
2520 }
2521 else
2522 pszMsg = "Preemption state w/o HM state!";
2523 }
2524 else
2525 pszMsg = "Ring-3 calls are disabled!";
2526
2527 static uint32_t volatile s_cWarnings = 0;
2528 if (++s_cWarnings < 50)
2529 SUPR0Printf("VMMR0EmtPrepareToBlock: %s pvLock=%p pszCaller=%s rcBusy=%p\n", pszMsg, pvLock, pszCaller, rcBusy);
2530 pCtx->uMagic = VMMR0EMTBLOCKCTX_MAGIC_DEAD;
2531 pCtx->fWasInHmContext = false;
2532 return rcBusy != VINF_SUCCESS ? rcBusy : VERR_VMM_CANNOT_BLOCK;
2533}
2534
2535
2536/**
2537 * Counterpart to VMMR0EmtPrepareToBlock.
2538 *
2539 * @param pVCpu The cross context virtual CPU structure of the calling
2540 * thread.
2541 * @param pCtx The context structure used with VMMR0EmtPrepareToBlock.
2542 * @thread EMT(pVCpu)
2543 */
2544VMMR0_INT_DECL(void) VMMR0EmtResumeAfterBlocking(PVMCPUCC pVCpu, PVMMR0EMTBLOCKCTX pCtx)
2545{
2546 AssertReturnVoid(pCtx->uMagic == VMMR0EMTBLOCKCTX_MAGIC);
2547 if (pCtx->fWasInHmContext)
2548 {
2549 if (pVCpu->vmmr0.s.pPreemptState)
2550 RTThreadPreemptDisable(pVCpu->vmmr0.s.pPreemptState);
2551
2552 pCtx->fWasInHmContext = false;
2553 vmmR0ThreadCtxCallback(RTTHREADCTXEVENT_IN, pVCpu);
2554 }
2555 pCtx->uMagic = VMMR0EMTBLOCKCTX_MAGIC_DEAD;
2556}
2557
2558
2559/**
2560 * Helper for waiting on an RTSEMEVENT, caller did VMMR0EmtPrepareToBlock.
2561 *
2562 * @returns
2563 * @retval VERR_THREAD_IS_TERMINATING
2564 * @retval VERR_TIMEOUT if we ended up waiting too long, either according to
2565 * @a cMsTimeout or to maximum wait values.
2566 *
2567 * @param pGVCpu The ring-0 virtual CPU structure.
2568 * @param fFlags VMMR0EMTWAIT_F_XXX.
2569 * @param hEvent The event to wait on.
2570 * @param cMsTimeout The timeout or RT_INDEFINITE_WAIT.
2571 */
2572VMMR0_INT_DECL(int) VMMR0EmtWaitEventInner(PGVMCPU pGVCpu, uint32_t fFlags, RTSEMEVENT hEvent, RTMSINTERVAL cMsTimeout)
2573{
2574 AssertReturn(pGVCpu->hEMT == RTThreadNativeSelf(), VERR_VM_THREAD_NOT_EMT);
2575
2576 /*
2577 * Note! Similar code is found in the PDM critical sections too.
2578 */
2579 uint64_t const nsStart = RTTimeNanoTS();
2580 uint64_t cNsMaxTotal = cMsTimeout == RT_INDEFINITE_WAIT
2581 ? RT_NS_5MIN : RT_MIN(RT_NS_5MIN, RT_NS_1MS_64 * cMsTimeout);
2582 uint32_t cMsMaxOne = RT_MS_5SEC;
2583 bool fNonInterruptible = false;
2584 for (;;)
2585 {
2586 /* Wait. */
2587 int rcWait = !fNonInterruptible
2588 ? RTSemEventWaitNoResume(hEvent, cMsMaxOne)
2589 : RTSemEventWait(hEvent, cMsMaxOne);
2590 if (RT_SUCCESS(rcWait))
2591 return rcWait;
2592
2593 if (rcWait == VERR_TIMEOUT || rcWait == VERR_INTERRUPTED)
2594 {
2595 uint64_t const cNsElapsed = RTTimeNanoTS() - nsStart;
2596
2597 /*
2598 * Check the thread termination status.
2599 */
2600 int const rcTerm = RTThreadQueryTerminationStatus(NIL_RTTHREAD);
2601 AssertMsg(rcTerm == VINF_SUCCESS || rcTerm == VERR_NOT_SUPPORTED || rcTerm == VINF_THREAD_IS_TERMINATING,
2602 ("rcTerm=%Rrc\n", rcTerm));
2603 if ( rcTerm == VERR_NOT_SUPPORTED
2604 && !fNonInterruptible
2605 && cNsMaxTotal > RT_NS_1MIN)
2606 cNsMaxTotal = RT_NS_1MIN;
2607
2608 /* We return immediately if it looks like the thread is terminating. */
2609 if (rcTerm == VINF_THREAD_IS_TERMINATING)
2610 return VERR_THREAD_IS_TERMINATING;
2611
2612 /* We may suppress VERR_INTERRUPTED if VMMR0EMTWAIT_F_TRY_SUPPRESS_INTERRUPTED was
2613 specified, otherwise we'll just return it. */
2614 if (rcWait == VERR_INTERRUPTED)
2615 {
2616 if (!(fFlags & VMMR0EMTWAIT_F_TRY_SUPPRESS_INTERRUPTED))
2617 return VERR_INTERRUPTED;
2618 if (!fNonInterruptible)
2619 {
2620 /* First time: Adjust down the wait parameters and make sure we get at least
2621 one non-interruptible wait before timing out. */
2622 fNonInterruptible = true;
2623 cMsMaxOne = 32;
2624 uint64_t const cNsLeft = cNsMaxTotal - cNsElapsed;
2625 if (cNsLeft > RT_NS_10SEC)
2626 cNsMaxTotal = cNsElapsed + RT_NS_10SEC;
2627 continue;
2628 }
2629 }
2630
2631 /* Check for timeout. */
2632 if (cNsElapsed > cNsMaxTotal)
2633 return VERR_TIMEOUT;
2634 }
2635 else
2636 return rcWait;
2637 }
2638 /* not reached */
2639}
2640
2641
2642/**
2643 * Helper for signalling an SUPSEMEVENT.
2644 *
2645 * This may temporarily leave the HM context if the host requires that for
2646 * signalling SUPSEMEVENT objects.
2647 *
2648 * @returns VBox status code (see VMMR0EmtPrepareToBlock)
2649 * @param pGVM The ring-0 VM structure.
2650 * @param pGVCpu The ring-0 virtual CPU structure.
2651 * @param hEvent The event to signal.
2652 */
2653VMMR0_INT_DECL(int) VMMR0EmtSignalSupEvent(PGVM pGVM, PGVMCPU pGVCpu, SUPSEMEVENT hEvent)
2654{
2655 AssertReturn(pGVCpu->hEMT == RTThreadNativeSelf(), VERR_VM_THREAD_NOT_EMT);
2656 if (RTSemEventIsSignalSafe())
2657 return SUPSemEventSignal(pGVM->pSession, hEvent);
2658
2659 VMMR0EMTBLOCKCTX Ctx;
2660 int rc = VMMR0EmtPrepareToBlock(pGVCpu, VINF_SUCCESS, __FUNCTION__, (void *)(uintptr_t)hEvent, &Ctx);
2661 if (RT_SUCCESS(rc))
2662 {
2663 rc = SUPSemEventSignal(pGVM->pSession, hEvent);
2664 VMMR0EmtResumeAfterBlocking(pGVCpu, &Ctx);
2665 }
2666 return rc;
2667}
2668
2669
2670/**
2671 * Helper for signalling an SUPSEMEVENT, variant supporting non-EMTs.
2672 *
2673 * This may temporarily leave the HM context if the host requires that for
2674 * signalling SUPSEMEVENT objects.
2675 *
2676 * @returns VBox status code (see VMMR0EmtPrepareToBlock)
2677 * @param pGVM The ring-0 VM structure.
2678 * @param hEvent The event to signal.
2679 */
2680VMMR0_INT_DECL(int) VMMR0EmtSignalSupEventByGVM(PGVM pGVM, SUPSEMEVENT hEvent)
2681{
2682 if (!RTSemEventIsSignalSafe())
2683 {
2684 PGVMCPU pGVCpu = GVMMR0GetGVCpuByGVMandEMT(pGVM, NIL_RTNATIVETHREAD);
2685 if (pGVCpu)
2686 {
2687 VMMR0EMTBLOCKCTX Ctx;
2688 int rc = VMMR0EmtPrepareToBlock(pGVCpu, VINF_SUCCESS, __FUNCTION__, (void *)(uintptr_t)hEvent, &Ctx);
2689 if (RT_SUCCESS(rc))
2690 {
2691 rc = SUPSemEventSignal(pGVM->pSession, hEvent);
2692 VMMR0EmtResumeAfterBlocking(pGVCpu, &Ctx);
2693 }
2694 return rc;
2695 }
2696 }
2697 return SUPSemEventSignal(pGVM->pSession, hEvent);
2698}
2699
2700
2701/*********************************************************************************************************************************
2702* Logging. *
2703*********************************************************************************************************************************/
2704
2705/**
2706 * VMMR0_DO_VMMR0_UPDATE_LOGGERS: Updates the EMT loggers for the VM.
2707 *
2708 * @returns VBox status code.
2709 * @param pGVM The global (ring-0) VM structure.
2710 * @param idCpu The ID of the calling EMT.
2711 * @param pReq The request data.
2712 * @param idxLogger Which logger set to update.
2713 * @thread EMT(idCpu)
2714 */
2715static int vmmR0UpdateLoggers(PGVM pGVM, VMCPUID idCpu, PVMMR0UPDATELOGGERSREQ pReq, size_t idxLogger)
2716{
2717 /*
2718 * Check sanity. First we require EMT to be calling us.
2719 */
2720 AssertReturn(idCpu < pGVM->cCpus, VERR_INVALID_CPU_ID);
2721 AssertReturn(pGVM->aCpus[idCpu].hEMT == RTThreadNativeSelf(), VERR_INVALID_CPU_ID);
2722
2723 AssertReturn(pReq->Hdr.cbReq >= RT_UOFFSETOF_DYN(VMMR0UPDATELOGGERSREQ, afGroups[0]), VERR_INVALID_PARAMETER);
2724 AssertReturn(pReq->cGroups < _8K, VERR_INVALID_PARAMETER);
2725 AssertReturn(pReq->Hdr.cbReq == RT_UOFFSETOF_DYN(VMMR0UPDATELOGGERSREQ, afGroups[pReq->cGroups]), VERR_INVALID_PARAMETER);
2726
2727 AssertReturn(idxLogger < VMMLOGGER_IDX_MAX, VERR_OUT_OF_RANGE);
2728
2729 /*
2730 * Adjust flags.
2731 */
2732 /* Always buffered: */
2733 pReq->fFlags |= RTLOGFLAGS_BUFFERED;
2734 /* These doesn't make sense at present: */
2735 pReq->fFlags &= ~(RTLOGFLAGS_FLUSH | RTLOGFLAGS_WRITE_THROUGH);
2736 /* We've traditionally skipped the group restrictions. */
2737 pReq->fFlags &= ~RTLOGFLAGS_RESTRICT_GROUPS;
2738
2739 /*
2740 * Do the updating.
2741 */
2742 int rc = VINF_SUCCESS;
2743 for (idCpu = 0; idCpu < pGVM->cCpus; idCpu++)
2744 {
2745 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2746 PRTLOGGER pLogger = pGVCpu->vmmr0.s.u.aLoggers[idxLogger].pLogger;
2747 if (pLogger)
2748 {
2749 RTLogSetR0ProgramStart(pLogger, pGVM->vmm.s.nsProgramStart);
2750 rc = RTLogBulkUpdate(pLogger, pReq->fFlags, pReq->uGroupCrc32, pReq->cGroups, pReq->afGroups);
2751 }
2752 }
2753
2754 return rc;
2755}
2756
2757
2758/**
2759 * VMMR0_DO_VMMR0_LOG_FLUSHER: Get the next log flushing job.
2760 *
2761 * The job info is copied into VMM::LogFlusherItem.
2762 *
2763 * @returns VBox status code.
2764 * @retval VERR_OBJECT_DESTROYED if we're shutting down.
2765 * @retval VERR_NOT_OWNER if the calling thread is not the flusher thread.
2766 * @param pGVM The global (ring-0) VM structure.
2767 * @thread The log flusher thread (first caller automatically becomes the log
2768 * flusher).
2769 */
2770static int vmmR0LogFlusher(PGVM pGVM)
2771{
2772 /*
2773 * Check that this really is the flusher thread.
2774 */
2775 RTNATIVETHREAD const hNativeSelf = RTThreadNativeSelf();
2776 AssertReturn(hNativeSelf != NIL_RTNATIVETHREAD, VERR_INTERNAL_ERROR_3);
2777 if (RT_LIKELY(pGVM->vmmr0.s.LogFlusher.hThread == hNativeSelf))
2778 { /* likely */ }
2779 else
2780 {
2781 /* The first caller becomes the flusher thread. */
2782 bool fOk;
2783 ASMAtomicCmpXchgHandle(&pGVM->vmmr0.s.LogFlusher.hThread, hNativeSelf, NIL_RTNATIVETHREAD, fOk);
2784 if (!fOk)
2785 return VERR_NOT_OWNER;
2786 pGVM->vmmr0.s.LogFlusher.fThreadRunning = true;
2787 }
2788
2789 /*
2790 * Acknowledge flush, waking up waiting EMT.
2791 */
2792 RTSpinlockAcquire(pGVM->vmmr0.s.LogFlusher.hSpinlock);
2793
2794 uint32_t idxTail = pGVM->vmmr0.s.LogFlusher.idxRingTail % RT_ELEMENTS(pGVM->vmmr0.s.LogFlusher.aRing);
2795 uint32_t idxHead = pGVM->vmmr0.s.LogFlusher.idxRingHead % RT_ELEMENTS(pGVM->vmmr0.s.LogFlusher.aRing);
2796 if ( idxTail != idxHead
2797 && pGVM->vmmr0.s.LogFlusher.aRing[idxHead].s.fProcessing)
2798 {
2799 /* Pop the head off the ring buffer. */
2800 uint32_t const idCpu = pGVM->vmmr0.s.LogFlusher.aRing[idxHead].s.idCpu;
2801 uint32_t const idxLogger = pGVM->vmmr0.s.LogFlusher.aRing[idxHead].s.idxLogger;
2802 uint32_t const idxBuffer = pGVM->vmmr0.s.LogFlusher.aRing[idxHead].s.idxBuffer;
2803
2804 pGVM->vmmr0.s.LogFlusher.aRing[idxHead].u32 = UINT32_MAX >> 1; /* invalidate the entry */
2805 pGVM->vmmr0.s.LogFlusher.idxRingHead = (idxHead + 1) % RT_ELEMENTS(pGVM->vmmr0.s.LogFlusher.aRing);
2806
2807 /* Validate content. */
2808 if ( idCpu < pGVM->cCpus
2809 && idxLogger < VMMLOGGER_IDX_MAX
2810 && idxBuffer < VMMLOGGER_BUFFER_COUNT)
2811 {
2812 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2813 PVMMR0PERVCPULOGGER pR0Log = &pGVCpu->vmmr0.s.u.aLoggers[idxLogger];
2814 PVMMR3CPULOGGER pShared = &pGVCpu->vmm.s.u.aLoggers[idxLogger];
2815
2816 /*
2817 * Accounting.
2818 */
2819 uint32_t cFlushing = pR0Log->cFlushing - 1;
2820 if (RT_LIKELY(cFlushing < VMMLOGGER_BUFFER_COUNT))
2821 { /*likely*/ }
2822 else
2823 cFlushing = 0;
2824 pR0Log->cFlushing = cFlushing;
2825 ASMAtomicWriteU32(&pShared->cFlushing, cFlushing);
2826
2827 /*
2828 * Wake up the EMT if it's waiting.
2829 */
2830 if (!pR0Log->fEmtWaiting)
2831 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
2832 else
2833 {
2834 pR0Log->fEmtWaiting = false;
2835 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
2836
2837 int rc = RTSemEventSignal(pR0Log->hEventFlushWait);
2838 if (RT_FAILURE(rc))
2839 LogRelMax(64, ("vmmR0LogFlusher: RTSemEventSignal failed ACKing entry #%u (%u/%u/%u): %Rrc!\n",
2840 idxHead, idCpu, idxLogger, idxBuffer, rc));
2841 }
2842 }
2843 else
2844 {
2845 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
2846 LogRelMax(64, ("vmmR0LogFlusher: Bad ACK entry #%u: %u/%u/%u!\n", idxHead, idCpu, idxLogger, idxBuffer));
2847 }
2848
2849 RTSpinlockAcquire(pGVM->vmmr0.s.LogFlusher.hSpinlock);
2850 }
2851
2852 /*
2853 * The wait loop.
2854 */
2855 int rc;
2856 for (;;)
2857 {
2858 /*
2859 * Work pending?
2860 */
2861 idxTail = pGVM->vmmr0.s.LogFlusher.idxRingTail % RT_ELEMENTS(pGVM->vmmr0.s.LogFlusher.aRing);
2862 idxHead = pGVM->vmmr0.s.LogFlusher.idxRingHead % RT_ELEMENTS(pGVM->vmmr0.s.LogFlusher.aRing);
2863 if (idxTail != idxHead)
2864 {
2865 pGVM->vmmr0.s.LogFlusher.aRing[idxHead].s.fProcessing = true;
2866 pGVM->vmm.s.LogFlusherItem.u32 = pGVM->vmmr0.s.LogFlusher.aRing[idxHead].u32;
2867
2868 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
2869 return VINF_SUCCESS;
2870 }
2871
2872 /*
2873 * Nothing to do, so, check for termination and go to sleep.
2874 */
2875 if (!pGVM->vmmr0.s.LogFlusher.fThreadShutdown)
2876 { /* likely */ }
2877 else
2878 {
2879 rc = VERR_OBJECT_DESTROYED;
2880 break;
2881 }
2882
2883 pGVM->vmmr0.s.LogFlusher.fThreadWaiting = true;
2884 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
2885
2886 rc = RTSemEventWaitNoResume(pGVM->vmmr0.s.LogFlusher.hEvent, RT_MS_5MIN);
2887
2888 RTSpinlockAcquire(pGVM->vmmr0.s.LogFlusher.hSpinlock);
2889 pGVM->vmmr0.s.LogFlusher.fThreadWaiting = false;
2890
2891 if (RT_SUCCESS(rc) || rc == VERR_TIMEOUT)
2892 { /* likely */ }
2893 else if (rc == VERR_INTERRUPTED)
2894 {
2895 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
2896 return rc;
2897 }
2898 else if (rc == VERR_SEM_DESTROYED || rc == VERR_INVALID_HANDLE)
2899 break;
2900 else
2901 {
2902 LogRel(("vmmR0LogFlusher: RTSemEventWaitNoResume returned unexpected status %Rrc\n", rc));
2903 break;
2904 }
2905 }
2906
2907 /*
2908 * Terminating - prevent further calls and indicate to the EMTs that we're no longer around.
2909 */
2910 pGVM->vmmr0.s.LogFlusher.hThread = ~pGVM->vmmr0.s.LogFlusher.hThread; /* (should be reasonably safe) */
2911 pGVM->vmmr0.s.LogFlusher.fThreadRunning = false;
2912
2913 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
2914 return rc;
2915}
2916
2917
2918/**
2919 * VMMR0_DO_VMMR0_LOG_WAIT_FLUSHED: Waits for the flusher thread to finish all
2920 * buffers for logger @a idxLogger.
2921 *
2922 * @returns VBox status code.
2923 * @param pGVM The global (ring-0) VM structure.
2924 * @param idCpu The ID of the calling EMT.
2925 * @param idxLogger Which logger to wait on.
2926 * @thread EMT(idCpu)
2927 */
2928static int vmmR0LogWaitFlushed(PGVM pGVM, VMCPUID idCpu, size_t idxLogger)
2929{
2930 /*
2931 * Check sanity. First we require EMT to be calling us.
2932 */
2933 AssertReturn(idCpu < pGVM->cCpus, VERR_INVALID_CPU_ID);
2934 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2935 AssertReturn(pGVCpu->hEMT == RTThreadNativeSelf(), VERR_INVALID_CPU_ID);
2936 AssertReturn(idxLogger < VMMLOGGER_IDX_MAX, VERR_OUT_OF_RANGE);
2937 PVMMR0PERVCPULOGGER const pR0Log = &pGVCpu->vmmr0.s.u.aLoggers[idxLogger];
2938
2939 /*
2940 * Do the waiting.
2941 */
2942 int rc = VINF_SUCCESS;
2943 RTSpinlockAcquire(pGVM->vmmr0.s.LogFlusher.hSpinlock);
2944 uint32_t cFlushing = pR0Log->cFlushing;
2945 while (cFlushing > 0)
2946 {
2947 pR0Log->fEmtWaiting = true;
2948 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
2949
2950 rc = RTSemEventWaitNoResume(pR0Log->hEventFlushWait, RT_MS_5MIN);
2951
2952 RTSpinlockAcquire(pGVM->vmmr0.s.LogFlusher.hSpinlock);
2953 pR0Log->fEmtWaiting = false;
2954 if (RT_SUCCESS(rc))
2955 {
2956 /* Read the new count, make sure it decreased before looping. That
2957 way we can guarentee that we will only wait more than 5 min * buffers. */
2958 uint32_t const cPrevFlushing = cFlushing;
2959 cFlushing = pR0Log->cFlushing;
2960 if (cFlushing < cPrevFlushing)
2961 continue;
2962 rc = VERR_INTERNAL_ERROR_3;
2963 }
2964 break;
2965 }
2966 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
2967 return rc;
2968}
2969
2970
2971/**
2972 * Inner worker for vmmR0LoggerFlushCommon.
2973 */
2974static bool vmmR0LoggerFlushInner(PGVM pGVM, PGVMCPU pGVCpu, uint32_t idxLogger, size_t idxBuffer, uint32_t cbToFlush)
2975{
2976 PVMMR0PERVCPULOGGER const pR0Log = &pGVCpu->vmmr0.s.u.aLoggers[idxLogger];
2977 PVMMR3CPULOGGER const pShared = &pGVCpu->vmm.s.u.aLoggers[idxLogger];
2978
2979 /*
2980 * Figure out what we need to do and whether we can.
2981 */
2982 enum { kJustSignal, kPrepAndSignal, kPrepSignalAndWait } enmAction;
2983#if VMMLOGGER_BUFFER_COUNT >= 2
2984 if (pR0Log->cFlushing < VMMLOGGER_BUFFER_COUNT - 1)
2985 {
2986 if (RTSemEventIsSignalSafe())
2987 enmAction = kJustSignal;
2988 else if (VMMRZCallRing3IsEnabled(pGVCpu))
2989 enmAction = kPrepAndSignal;
2990 else
2991 {
2992 /** @todo This is a bit simplistic. We could introduce a FF to signal the
2993 * thread or similar. */
2994 STAM_REL_COUNTER_INC(&pShared->StatCannotBlock);
2995# if defined(RT_OS_LINUX)
2996 SUP_DPRINTF(("vmmR0LoggerFlush: Signalling not safe and EMT blocking disabled! (%u bytes)\n", cbToFlush));
2997# endif
2998 pShared->cbDropped += cbToFlush;
2999 return true;
3000 }
3001 }
3002 else
3003#endif
3004 if (VMMRZCallRing3IsEnabled(pGVCpu))
3005 enmAction = kPrepSignalAndWait;
3006 else
3007 {
3008 STAM_REL_COUNTER_INC(&pShared->StatCannotBlock);
3009# if defined(RT_OS_LINUX)
3010 SUP_DPRINTF(("vmmR0LoggerFlush: EMT blocking disabled! (%u bytes)\n", cbToFlush));
3011# endif
3012 pShared->cbDropped += cbToFlush;
3013 return true;
3014 }
3015
3016 /*
3017 * Prepare for blocking if necessary.
3018 */
3019 VMMR0EMTBLOCKCTX Ctx;
3020 if (enmAction != kJustSignal)
3021 {
3022 int rc = VMMR0EmtPrepareToBlock(pGVCpu, VINF_SUCCESS, "vmmR0LoggerFlushInner", pR0Log->hEventFlushWait, &Ctx);
3023 if (RT_SUCCESS(rc))
3024 { /* likely */ }
3025 else
3026 {
3027 STAM_REL_COUNTER_INC(&pShared->StatCannotBlock);
3028 SUP_DPRINTF(("vmmR0LoggerFlush: VMMR0EmtPrepareToBlock failed! rc=%d\n", rc));
3029 return false;
3030 }
3031 }
3032
3033 /*
3034 * Queue the flush job.
3035 */
3036 bool fFlushedBuffer;
3037 RTSpinlockAcquire(pGVM->vmmr0.s.LogFlusher.hSpinlock);
3038 if (pGVM->vmmr0.s.LogFlusher.fThreadRunning)
3039 {
3040 uint32_t const idxHead = pGVM->vmmr0.s.LogFlusher.idxRingHead % RT_ELEMENTS(pGVM->vmmr0.s.LogFlusher.aRing);
3041 uint32_t const idxTail = pGVM->vmmr0.s.LogFlusher.idxRingTail % RT_ELEMENTS(pGVM->vmmr0.s.LogFlusher.aRing);
3042 uint32_t const idxNewTail = (idxTail + 1) % RT_ELEMENTS(pGVM->vmmr0.s.LogFlusher.aRing);
3043 if (idxNewTail != idxHead)
3044 {
3045 /* Queue it. */
3046 pGVM->vmmr0.s.LogFlusher.aRing[idxTail].s.idCpu = pGVCpu->idCpu;
3047 pGVM->vmmr0.s.LogFlusher.aRing[idxTail].s.idxLogger = idxLogger;
3048 pGVM->vmmr0.s.LogFlusher.aRing[idxTail].s.idxBuffer = (uint32_t)idxBuffer;
3049 pGVM->vmmr0.s.LogFlusher.aRing[idxTail].s.fProcessing = 0;
3050 pGVM->vmmr0.s.LogFlusher.idxRingTail = idxNewTail;
3051
3052 /* Update the number of buffers currently being flushed. */
3053 uint32_t cFlushing = pR0Log->cFlushing;
3054 cFlushing = RT_MIN(cFlushing + 1, VMMLOGGER_BUFFER_COUNT);
3055 pShared->cFlushing = pR0Log->cFlushing = cFlushing;
3056
3057 /* We must wait if all buffers are currently being flushed. */
3058 bool const fEmtWaiting = cFlushing >= VMMLOGGER_BUFFER_COUNT && enmAction != kJustSignal /* paranoia */;
3059 pR0Log->fEmtWaiting = fEmtWaiting;
3060
3061 /* Stats. */
3062 STAM_REL_COUNTER_INC(&pShared->StatFlushes);
3063 STAM_REL_COUNTER_INC(&pGVM->vmm.s.StatLogFlusherFlushes);
3064
3065 /* Signal the worker thread. */
3066 if (pGVM->vmmr0.s.LogFlusher.fThreadWaiting)
3067 {
3068 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
3069 RTSemEventSignal(pGVM->vmmr0.s.LogFlusher.hEvent);
3070 }
3071 else
3072 {
3073 STAM_REL_COUNTER_INC(&pGVM->vmm.s.StatLogFlusherNoWakeUp);
3074 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
3075 }
3076
3077 /*
3078 * Wait for a buffer to finish flushing.
3079 *
3080 * Note! Lazy bird is ignoring the status code here. The result is
3081 * that we might end up with an extra even signalling and the
3082 * next time we need to wait we won't and end up with some log
3083 * corruption. However, it's too much hazzle right now for
3084 * a scenario which would most likely end the process rather
3085 * than causing log corruption.
3086 */
3087 if (fEmtWaiting)
3088 {
3089 STAM_REL_PROFILE_START(&pShared->StatWait, a);
3090 VMMR0EmtWaitEventInner(pGVCpu, VMMR0EMTWAIT_F_TRY_SUPPRESS_INTERRUPTED,
3091 pR0Log->hEventFlushWait, RT_INDEFINITE_WAIT);
3092 STAM_REL_PROFILE_STOP(&pShared->StatWait, a);
3093 }
3094
3095 /*
3096 * We always switch buffer if we have more than one.
3097 */
3098#if VMMLOGGER_BUFFER_COUNT == 1
3099 fFlushedBuffer = true;
3100#else
3101 AssertCompile(VMMLOGGER_BUFFER_COUNT >= 1);
3102 pShared->idxBuf = (idxBuffer + 1) % VMMLOGGER_BUFFER_COUNT;
3103 fFlushedBuffer = false;
3104#endif
3105 }
3106 else
3107 {
3108 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
3109 SUP_DPRINTF(("vmmR0LoggerFlush: ring buffer is full!\n"));
3110 fFlushedBuffer = true;
3111 }
3112 }
3113 else
3114 {
3115 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
3116 SUP_DPRINTF(("vmmR0LoggerFlush: flusher not active - dropping %u bytes\n", cbToFlush));
3117 fFlushedBuffer = true;
3118 }
3119
3120 /*
3121 * Restore the HM context.
3122 */
3123 if (enmAction != kJustSignal)
3124 VMMR0EmtResumeAfterBlocking(pGVCpu, &Ctx);
3125
3126 return fFlushedBuffer;
3127}
3128
3129
3130/**
3131 * Common worker for vmmR0LogFlush and vmmR0LogRelFlush.
3132 */
3133static bool vmmR0LoggerFlushCommon(PRTLOGGER pLogger, PRTLOGBUFFERDESC pBufDesc, uint32_t idxLogger)
3134{
3135 /*
3136 * Convert the pLogger into a GVMCPU handle and 'call' back to Ring-3.
3137 * (This is a bit paranoid code.)
3138 */
3139 if (RT_VALID_PTR(pLogger))
3140 {
3141 if ( pLogger->u32Magic == RTLOGGER_MAGIC
3142 && (pLogger->u32UserValue1 & VMMR0_LOGGER_FLAGS_MAGIC_MASK) == VMMR0_LOGGER_FLAGS_MAGIC_VALUE
3143 && pLogger->u64UserValue2 == pLogger->u64UserValue3)
3144 {
3145 PGVMCPU const pGVCpu = (PGVMCPU)(uintptr_t)pLogger->u64UserValue2;
3146 if ( RT_VALID_PTR(pGVCpu)
3147 && ((uintptr_t)pGVCpu & HOST_PAGE_OFFSET_MASK) == 0)
3148 {
3149 RTNATIVETHREAD const hNativeSelf = RTThreadNativeSelf();
3150 PGVM const pGVM = pGVCpu->pGVM;
3151 if ( hNativeSelf == pGVCpu->hEMT
3152 && RT_VALID_PTR(pGVM))
3153 {
3154 PVMMR0PERVCPULOGGER const pR0Log = &pGVCpu->vmmr0.s.u.aLoggers[idxLogger];
3155 size_t const idxBuffer = pBufDesc - &pR0Log->aBufDescs[0];
3156 if (idxBuffer < VMMLOGGER_BUFFER_COUNT)
3157 {
3158 /*
3159 * Make sure we don't recurse forever here should something in the
3160 * following code trigger logging or an assertion. Do the rest in
3161 * an inner work to avoid hitting the right margin too hard.
3162 */
3163 if (!pR0Log->fFlushing)
3164 {
3165 pR0Log->fFlushing = true;
3166 bool fFlushed = vmmR0LoggerFlushInner(pGVM, pGVCpu, idxLogger, idxBuffer, pBufDesc->offBuf);
3167 pR0Log->fFlushing = false;
3168 return fFlushed;
3169 }
3170
3171 SUP_DPRINTF(("vmmR0LoggerFlush: Recursive flushing!\n"));
3172 }
3173 else
3174 SUP_DPRINTF(("vmmR0LoggerFlush: pLogger=%p pGVCpu=%p: idxBuffer=%#zx\n", pLogger, pGVCpu, idxBuffer));
3175 }
3176 else
3177 SUP_DPRINTF(("vmmR0LoggerFlush: pLogger=%p pGVCpu=%p hEMT=%p hNativeSelf=%p!\n",
3178 pLogger, pGVCpu, pGVCpu->hEMT, hNativeSelf));
3179 }
3180 else
3181 SUP_DPRINTF(("vmmR0LoggerFlush: pLogger=%p pGVCpu=%p!\n", pLogger, pGVCpu));
3182 }
3183 else
3184 SUP_DPRINTF(("vmmR0LoggerFlush: pLogger=%p u32Magic=%#x u32UserValue1=%#x u64UserValue2=%#RX64 u64UserValue3=%#RX64!\n",
3185 pLogger, pLogger->u32Magic, pLogger->u32UserValue1, pLogger->u64UserValue2, pLogger->u64UserValue3));
3186 }
3187 else
3188 SUP_DPRINTF(("vmmR0LoggerFlush: pLogger=%p!\n", pLogger));
3189 return true;
3190}
3191
3192
3193/**
3194 * @callback_method_impl{FNRTLOGFLUSH, Release logger buffer flush callback.}
3195 */
3196static DECLCALLBACK(bool) vmmR0LogRelFlush(PRTLOGGER pLogger, PRTLOGBUFFERDESC pBufDesc)
3197{
3198 return vmmR0LoggerFlushCommon(pLogger, pBufDesc, VMMLOGGER_IDX_RELEASE);
3199}
3200
3201
3202/**
3203 * @callback_method_impl{FNRTLOGFLUSH, Logger (debug) buffer flush callback.}
3204 */
3205static DECLCALLBACK(bool) vmmR0LogFlush(PRTLOGGER pLogger, PRTLOGBUFFERDESC pBufDesc)
3206{
3207#ifdef LOG_ENABLED
3208 return vmmR0LoggerFlushCommon(pLogger, pBufDesc, VMMLOGGER_IDX_REGULAR);
3209#else
3210 RT_NOREF(pLogger, pBufDesc);
3211 return true;
3212#endif
3213}
3214
3215
3216/*
3217 * Override RTLogDefaultInstanceEx so we can do logging from EMTs in ring-0.
3218 */
3219DECLEXPORT(PRTLOGGER) RTLogDefaultInstanceEx(uint32_t fFlagsAndGroup)
3220{
3221#ifdef LOG_ENABLED
3222 PGVMCPU pGVCpu = GVMMR0GetGVCpuByEMT(NIL_RTNATIVETHREAD);
3223 if (pGVCpu)
3224 {
3225 PRTLOGGER pLogger = pGVCpu->vmmr0.s.u.s.Logger.pLogger;
3226 if (RT_VALID_PTR(pLogger))
3227 {
3228 if ( pLogger->u64UserValue2 == (uintptr_t)pGVCpu
3229 && pLogger->u64UserValue3 == (uintptr_t)pGVCpu)
3230 {
3231 if (!pGVCpu->vmmr0.s.u.s.Logger.fFlushing)
3232 {
3233 if (!(pGVCpu->vmmr0.s.fLogFlushingDisabled))
3234 return RTLogCheckGroupFlags(pLogger, fFlagsAndGroup);
3235 return NULL;
3236 }
3237
3238 /*
3239 * When we're flushing we _must_ return NULL here to suppress any
3240 * attempts at using the logger while in vmmR0LoggerFlushCommon.
3241 * The VMMR0EmtPrepareToBlock code may trigger logging in HM,
3242 * which will reset the buffer content before we even get to queue
3243 * the flush request. (Only an issue when VBOX_WITH_R0_LOGGING
3244 * is enabled.)
3245 */
3246 return NULL;
3247 }
3248 }
3249 }
3250#endif
3251 return SUPR0DefaultLogInstanceEx(fFlagsAndGroup);
3252}
3253
3254
3255/*
3256 * Override RTLogRelGetDefaultInstanceEx so we can do LogRel to VBox.log from EMTs in ring-0.
3257 */
3258DECLEXPORT(PRTLOGGER) RTLogRelGetDefaultInstanceEx(uint32_t fFlagsAndGroup)
3259{
3260 PGVMCPU pGVCpu = GVMMR0GetGVCpuByEMT(NIL_RTNATIVETHREAD);
3261 if (pGVCpu)
3262 {
3263 PRTLOGGER pLogger = pGVCpu->vmmr0.s.u.s.RelLogger.pLogger;
3264 if (RT_VALID_PTR(pLogger))
3265 {
3266 if ( pLogger->u64UserValue2 == (uintptr_t)pGVCpu
3267 && pLogger->u64UserValue3 == (uintptr_t)pGVCpu)
3268 {
3269 if (!pGVCpu->vmmr0.s.u.s.RelLogger.fFlushing)
3270 {
3271 if (!(pGVCpu->vmmr0.s.fLogFlushingDisabled))
3272 return RTLogCheckGroupFlags(pLogger, fFlagsAndGroup);
3273 return NULL;
3274 }
3275 }
3276 }
3277 }
3278 return SUPR0GetDefaultLogRelInstanceEx(fFlagsAndGroup);
3279}
3280
3281
3282/**
3283 * Helper for vmmR0InitLoggerSet
3284 */
3285static int vmmR0InitLoggerOne(PGVMCPU pGVCpu, bool fRelease, PVMMR0PERVCPULOGGER pR0Log, PVMMR3CPULOGGER pShared,
3286 uint32_t cbBuf, char *pchBuf, RTR3PTR pchBufR3)
3287{
3288 /*
3289 * Create and configure the logger.
3290 */
3291 for (size_t i = 0; i < VMMLOGGER_BUFFER_COUNT; i++)
3292 {
3293 pR0Log->aBufDescs[i].u32Magic = RTLOGBUFFERDESC_MAGIC;
3294 pR0Log->aBufDescs[i].uReserved = 0;
3295 pR0Log->aBufDescs[i].cbBuf = cbBuf;
3296 pR0Log->aBufDescs[i].offBuf = 0;
3297 pR0Log->aBufDescs[i].pchBuf = pchBuf + i * cbBuf;
3298 pR0Log->aBufDescs[i].pAux = &pShared->aBufs[i].AuxDesc;
3299
3300 pShared->aBufs[i].AuxDesc.fFlushedIndicator = false;
3301 pShared->aBufs[i].AuxDesc.afPadding[0] = 0;
3302 pShared->aBufs[i].AuxDesc.afPadding[1] = 0;
3303 pShared->aBufs[i].AuxDesc.afPadding[2] = 0;
3304 pShared->aBufs[i].AuxDesc.offBuf = 0;
3305 pShared->aBufs[i].pchBufR3 = pchBufR3 + i * cbBuf;
3306 }
3307 pShared->cbBuf = cbBuf;
3308
3309 static const char * const s_apszGroups[] = VBOX_LOGGROUP_NAMES;
3310 int rc = RTLogCreateEx(&pR0Log->pLogger, fRelease ? "VBOX_RELEASE_LOG" : "VBOX_LOG", RTLOG_F_NO_LOCKING | RTLOGFLAGS_BUFFERED,
3311 "all", RT_ELEMENTS(s_apszGroups), s_apszGroups, UINT32_MAX,
3312 VMMLOGGER_BUFFER_COUNT, pR0Log->aBufDescs, RTLOGDEST_DUMMY,
3313 NULL /*pfnPhase*/, 0 /*cHistory*/, 0 /*cbHistoryFileMax*/, 0 /*cSecsHistoryTimeSlot*/,
3314 NULL /*pErrInfo*/, NULL /*pszFilenameFmt*/);
3315 if (RT_SUCCESS(rc))
3316 {
3317 PRTLOGGER pLogger = pR0Log->pLogger;
3318 pLogger->u32UserValue1 = VMMR0_LOGGER_FLAGS_MAGIC_VALUE;
3319 pLogger->u64UserValue2 = (uintptr_t)pGVCpu;
3320 pLogger->u64UserValue3 = (uintptr_t)pGVCpu;
3321
3322 rc = RTLogSetFlushCallback(pLogger, fRelease ? vmmR0LogRelFlush : vmmR0LogFlush);
3323 if (RT_SUCCESS(rc))
3324 {
3325 RTLogSetR0ThreadNameF(pLogger, "EMT-%u-R0", pGVCpu->idCpu);
3326
3327 /*
3328 * Create the event sem the EMT waits on while flushing is happening.
3329 */
3330 rc = RTSemEventCreate(&pR0Log->hEventFlushWait);
3331 if (RT_SUCCESS(rc))
3332 return VINF_SUCCESS;
3333 pR0Log->hEventFlushWait = NIL_RTSEMEVENT;
3334 }
3335 RTLogDestroy(pLogger);
3336 }
3337 pR0Log->pLogger = NULL;
3338 return rc;
3339}
3340
3341
3342/**
3343 * Worker for VMMR0CleanupVM and vmmR0InitLoggerSet that destroys one logger.
3344 */
3345static void vmmR0TermLoggerOne(PVMMR0PERVCPULOGGER pR0Log, PVMMR3CPULOGGER pShared)
3346{
3347 RTLogDestroy(pR0Log->pLogger);
3348 pR0Log->pLogger = NULL;
3349
3350 for (size_t i = 0; i < VMMLOGGER_BUFFER_COUNT; i++)
3351 pShared->aBufs[i].pchBufR3 = NIL_RTR3PTR;
3352
3353 RTSemEventDestroy(pR0Log->hEventFlushWait);
3354 pR0Log->hEventFlushWait = NIL_RTSEMEVENT;
3355}
3356
3357
3358/**
3359 * Initializes one type of loggers for each EMT.
3360 */
3361static int vmmR0InitLoggerSet(PGVM pGVM, uint8_t idxLogger, uint32_t cbBuf, PRTR0MEMOBJ phMemObj, PRTR0MEMOBJ phMapObj)
3362{
3363 /* Allocate buffers first. */
3364 int rc = RTR0MemObjAllocPage(phMemObj, cbBuf * pGVM->cCpus * VMMLOGGER_BUFFER_COUNT, false /*fExecutable*/);
3365 if (RT_SUCCESS(rc))
3366 {
3367 rc = RTR0MemObjMapUser(phMapObj, *phMemObj, (RTR3PTR)-1, 0 /*uAlignment*/, RTMEM_PROT_READ, NIL_RTR0PROCESS);
3368 if (RT_SUCCESS(rc))
3369 {
3370 char * const pchBuf = (char *)RTR0MemObjAddress(*phMemObj);
3371 AssertPtrReturn(pchBuf, VERR_INTERNAL_ERROR_2);
3372
3373 RTR3PTR const pchBufR3 = RTR0MemObjAddressR3(*phMapObj);
3374 AssertReturn(pchBufR3 != NIL_RTR3PTR, VERR_INTERNAL_ERROR_3);
3375
3376 /* Initialize the per-CPU loggers. */
3377 for (uint32_t i = 0; i < pGVM->cCpus; i++)
3378 {
3379 PGVMCPU pGVCpu = &pGVM->aCpus[i];
3380 PVMMR0PERVCPULOGGER pR0Log = &pGVCpu->vmmr0.s.u.aLoggers[idxLogger];
3381 PVMMR3CPULOGGER pShared = &pGVCpu->vmm.s.u.aLoggers[idxLogger];
3382 rc = vmmR0InitLoggerOne(pGVCpu, idxLogger == VMMLOGGER_IDX_RELEASE, pR0Log, pShared, cbBuf,
3383 pchBuf + i * cbBuf * VMMLOGGER_BUFFER_COUNT,
3384 pchBufR3 + i * cbBuf * VMMLOGGER_BUFFER_COUNT);
3385 if (RT_FAILURE(rc))
3386 {
3387 vmmR0TermLoggerOne(pR0Log, pShared);
3388 while (i-- > 0)
3389 {
3390 pGVCpu = &pGVM->aCpus[i];
3391 vmmR0TermLoggerOne(&pGVCpu->vmmr0.s.u.aLoggers[idxLogger], &pGVCpu->vmm.s.u.aLoggers[idxLogger]);
3392 }
3393 break;
3394 }
3395 }
3396 if (RT_SUCCESS(rc))
3397 return VINF_SUCCESS;
3398
3399 /* Bail out. */
3400 RTR0MemObjFree(*phMapObj, false /*fFreeMappings*/);
3401 *phMapObj = NIL_RTR0MEMOBJ;
3402 }
3403 RTR0MemObjFree(*phMemObj, true /*fFreeMappings*/);
3404 *phMemObj = NIL_RTR0MEMOBJ;
3405 }
3406 return rc;
3407}
3408
3409
3410/**
3411 * Worker for VMMR0InitPerVMData that initializes all the logging related stuff.
3412 *
3413 * @returns VBox status code.
3414 * @param pGVM The global (ring-0) VM structure.
3415 */
3416static int vmmR0InitLoggers(PGVM pGVM)
3417{
3418 /*
3419 * Invalidate the ring buffer (not really necessary).
3420 */
3421 for (size_t idx = 0; idx < RT_ELEMENTS(pGVM->vmmr0.s.LogFlusher.aRing); idx++)
3422 pGVM->vmmr0.s.LogFlusher.aRing[idx].u32 = UINT32_MAX >> 1; /* (all bits except fProcessing set) */
3423
3424 /*
3425 * Create the spinlock and flusher event semaphore.
3426 */
3427 int rc = RTSpinlockCreate(&pGVM->vmmr0.s.LogFlusher.hSpinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "VM-Log-Flusher");
3428 if (RT_SUCCESS(rc))
3429 {
3430 rc = RTSemEventCreate(&pGVM->vmmr0.s.LogFlusher.hEvent);
3431 if (RT_SUCCESS(rc))
3432 {
3433 /*
3434 * Create the ring-0 release loggers.
3435 */
3436 rc = vmmR0InitLoggerSet(pGVM, VMMLOGGER_IDX_RELEASE, _4K,
3437 &pGVM->vmmr0.s.hMemObjReleaseLogger, &pGVM->vmmr0.s.hMapObjReleaseLogger);
3438#ifdef LOG_ENABLED
3439 if (RT_SUCCESS(rc))
3440 {
3441 /*
3442 * Create debug loggers.
3443 */
3444 rc = vmmR0InitLoggerSet(pGVM, VMMLOGGER_IDX_REGULAR, _64K,
3445 &pGVM->vmmr0.s.hMemObjLogger, &pGVM->vmmr0.s.hMapObjLogger);
3446 }
3447#endif
3448 }
3449 }
3450 return rc;
3451}
3452
3453
3454/**
3455 * Worker for VMMR0InitPerVMData that initializes all the logging related stuff.
3456 *
3457 * @param pGVM The global (ring-0) VM structure.
3458 */
3459static void vmmR0CleanupLoggers(PGVM pGVM)
3460{
3461 for (VMCPUID idCpu = 0; idCpu < pGVM->cCpus; idCpu++)
3462 {
3463 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
3464 for (size_t iLogger = 0; iLogger < RT_ELEMENTS(pGVCpu->vmmr0.s.u.aLoggers); iLogger++)
3465 vmmR0TermLoggerOne(&pGVCpu->vmmr0.s.u.aLoggers[iLogger], &pGVCpu->vmm.s.u.aLoggers[iLogger]);
3466 }
3467
3468 /*
3469 * Free logger buffer memory.
3470 */
3471 RTR0MemObjFree(pGVM->vmmr0.s.hMapObjReleaseLogger, false /*fFreeMappings*/);
3472 pGVM->vmmr0.s.hMapObjReleaseLogger = NIL_RTR0MEMOBJ;
3473 RTR0MemObjFree(pGVM->vmmr0.s.hMemObjReleaseLogger, true /*fFreeMappings*/);
3474 pGVM->vmmr0.s.hMemObjReleaseLogger = NIL_RTR0MEMOBJ;
3475
3476 RTR0MemObjFree(pGVM->vmmr0.s.hMapObjLogger, false /*fFreeMappings*/);
3477 pGVM->vmmr0.s.hMapObjLogger = NIL_RTR0MEMOBJ;
3478 RTR0MemObjFree(pGVM->vmmr0.s.hMemObjLogger, true /*fFreeMappings*/);
3479 pGVM->vmmr0.s.hMemObjLogger = NIL_RTR0MEMOBJ;
3480
3481 /*
3482 * Free log flusher related stuff.
3483 */
3484 RTSpinlockDestroy(pGVM->vmmr0.s.LogFlusher.hSpinlock);
3485 pGVM->vmmr0.s.LogFlusher.hSpinlock = NIL_RTSPINLOCK;
3486 RTSemEventDestroy(pGVM->vmmr0.s.LogFlusher.hEvent);
3487 pGVM->vmmr0.s.LogFlusher.hEvent = NIL_RTSEMEVENT;
3488}
3489
3490
3491/*********************************************************************************************************************************
3492* Assertions *
3493*********************************************************************************************************************************/
3494
3495/**
3496 * Installs a notification callback for ring-0 assertions.
3497 *
3498 * @param pVCpu The cross context virtual CPU structure.
3499 * @param pfnCallback Pointer to the callback.
3500 * @param pvUser The user argument.
3501 *
3502 * @return VBox status code.
3503 */
3504VMMR0_INT_DECL(int) VMMR0AssertionSetNotification(PVMCPUCC pVCpu, PFNVMMR0ASSERTIONNOTIFICATION pfnCallback, RTR0PTR pvUser)
3505{
3506 AssertPtrReturn(pVCpu, VERR_INVALID_POINTER);
3507 AssertPtrReturn(pfnCallback, VERR_INVALID_POINTER);
3508
3509 if (!pVCpu->vmmr0.s.pfnAssertCallback)
3510 {
3511 pVCpu->vmmr0.s.pfnAssertCallback = pfnCallback;
3512 pVCpu->vmmr0.s.pvAssertCallbackUser = pvUser;
3513 return VINF_SUCCESS;
3514 }
3515 return VERR_ALREADY_EXISTS;
3516}
3517
3518
3519/**
3520 * Removes the ring-0 callback.
3521 *
3522 * @param pVCpu The cross context virtual CPU structure.
3523 */
3524VMMR0_INT_DECL(void) VMMR0AssertionRemoveNotification(PVMCPUCC pVCpu)
3525{
3526 pVCpu->vmmr0.s.pfnAssertCallback = NULL;
3527 pVCpu->vmmr0.s.pvAssertCallbackUser = NULL;
3528}
3529
3530
3531/**
3532 * Checks whether there is a ring-0 callback notification active.
3533 *
3534 * @param pVCpu The cross context virtual CPU structure.
3535 * @returns true if there the notification is active, false otherwise.
3536 */
3537VMMR0_INT_DECL(bool) VMMR0AssertionIsNotificationSet(PVMCPUCC pVCpu)
3538{
3539 return pVCpu->vmmr0.s.pfnAssertCallback != NULL;
3540}
3541
3542
3543/*
3544 * Jump back to ring-3 if we're the EMT and the longjmp is armed.
3545 *
3546 * @returns true if the breakpoint should be hit, false if it should be ignored.
3547 */
3548DECLEXPORT(bool) RTCALL RTAssertShouldPanic(void)
3549{
3550#if 0
3551 return true;
3552#else
3553 PVMCC pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
3554 if (pVM)
3555 {
3556 PVMCPUCC pVCpu = VMMGetCpu(pVM);
3557
3558 if (pVCpu)
3559 {
3560# ifdef RT_ARCH_X86
3561 if (pVCpu->vmmr0.s.AssertJmpBuf.eip)
3562# else
3563 if (pVCpu->vmmr0.s.AssertJmpBuf.rip)
3564# endif
3565 {
3566 if (pVCpu->vmmr0.s.pfnAssertCallback)
3567 pVCpu->vmmr0.s.pfnAssertCallback(pVCpu, pVCpu->vmmr0.s.pvAssertCallbackUser);
3568 int rc = vmmR0CallRing3LongJmp(&pVCpu->vmmr0.s.AssertJmpBuf, VERR_VMM_RING0_ASSERTION);
3569 return RT_FAILURE_NP(rc);
3570 }
3571 }
3572 }
3573# ifdef RT_OS_LINUX
3574 return true;
3575# else
3576 return false;
3577# endif
3578#endif
3579}
3580
3581
3582/*
3583 * Override this so we can push it up to ring-3.
3584 */
3585DECLEXPORT(void) RTCALL RTAssertMsg1Weak(const char *pszExpr, unsigned uLine, const char *pszFile, const char *pszFunction)
3586{
3587 /*
3588 * To host kernel log/whatever.
3589 */
3590 SUPR0Printf("!!R0-Assertion Failed!!\n"
3591 "Expression: %s\n"
3592 "Location : %s(%d) %s\n",
3593 pszExpr, pszFile, uLine, pszFunction);
3594
3595 /*
3596 * To the log.
3597 */
3598 LogAlways(("\n!!R0-Assertion Failed!!\n"
3599 "Expression: %s\n"
3600 "Location : %s(%d) %s\n",
3601 pszExpr, pszFile, uLine, pszFunction));
3602
3603 /*
3604 * To the global VMM buffer.
3605 */
3606 PVMCC pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
3607 if (pVM)
3608 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg1, sizeof(pVM->vmm.s.szRing0AssertMsg1),
3609 "\n!!R0-Assertion Failed!!\n"
3610 "Expression: %.*s\n"
3611 "Location : %s(%d) %s\n",
3612 sizeof(pVM->vmm.s.szRing0AssertMsg1) / 4 * 3, pszExpr,
3613 pszFile, uLine, pszFunction);
3614
3615 /*
3616 * Continue the normal way.
3617 */
3618 RTAssertMsg1(pszExpr, uLine, pszFile, pszFunction);
3619}
3620
3621
3622/**
3623 * Callback for RTLogFormatV which writes to the ring-3 log port.
3624 * See PFNLOGOUTPUT() for details.
3625 */
3626static DECLCALLBACK(size_t) rtLogOutput(void *pv, const char *pachChars, size_t cbChars)
3627{
3628 for (size_t i = 0; i < cbChars; i++)
3629 {
3630 LogAlways(("%c", pachChars[i])); NOREF(pachChars);
3631 }
3632
3633 NOREF(pv);
3634 return cbChars;
3635}
3636
3637
3638/*
3639 * Override this so we can push it up to ring-3.
3640 */
3641DECLEXPORT(void) RTCALL RTAssertMsg2WeakV(const char *pszFormat, va_list va)
3642{
3643 va_list vaCopy;
3644
3645 /*
3646 * Push the message to the loggers.
3647 */
3648 PRTLOGGER pLog = RTLogRelGetDefaultInstance();
3649 if (pLog)
3650 {
3651 va_copy(vaCopy, va);
3652 RTLogFormatV(rtLogOutput, pLog, pszFormat, vaCopy);
3653 va_end(vaCopy);
3654 }
3655 pLog = RTLogGetDefaultInstance(); /* Don't initialize it here... */
3656 if (pLog)
3657 {
3658 va_copy(vaCopy, va);
3659 RTLogFormatV(rtLogOutput, pLog, pszFormat, vaCopy);
3660 va_end(vaCopy);
3661 }
3662
3663 /*
3664 * Push it to the global VMM buffer.
3665 */
3666 PVMCC pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
3667 if (pVM)
3668 {
3669 va_copy(vaCopy, va);
3670 RTStrPrintfV(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2), pszFormat, vaCopy);
3671 va_end(vaCopy);
3672 }
3673
3674 /*
3675 * Continue the normal way.
3676 */
3677 RTAssertMsg2V(pszFormat, va);
3678}
3679
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette