VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/VMMR0.cpp@ 92392

Last change on this file since 92392 was 92392, checked in by vboxsync, 3 years ago

VMM: Removed the callring-3 API and some of the associated stuff. bugref:10093

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 135.3 KB
Line 
1/* $Id: VMMR0.cpp 92392 2021-11-12 10:39:56Z vboxsync $ */
2/** @file
3 * VMM - Host Context Ring 0.
4 */
5
6/*
7 * Copyright (C) 2006-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_VMM
23#include <VBox/vmm/vmm.h>
24#include <VBox/sup.h>
25#include <VBox/vmm/iom.h>
26#include <VBox/vmm/trpm.h>
27#include <VBox/vmm/cpum.h>
28#include <VBox/vmm/pdmapi.h>
29#include <VBox/vmm/pgm.h>
30#ifdef VBOX_WITH_NEM_R0
31# include <VBox/vmm/nem.h>
32#endif
33#include <VBox/vmm/em.h>
34#include <VBox/vmm/stam.h>
35#include <VBox/vmm/tm.h>
36#include "VMMInternal.h"
37#include <VBox/vmm/vmcc.h>
38#include <VBox/vmm/gvm.h>
39#ifdef VBOX_WITH_PCI_PASSTHROUGH
40# include <VBox/vmm/pdmpci.h>
41#endif
42#include <VBox/vmm/apic.h>
43
44#include <VBox/vmm/gvmm.h>
45#include <VBox/vmm/gmm.h>
46#include <VBox/vmm/gim.h>
47#include <VBox/intnet.h>
48#include <VBox/vmm/hm.h>
49#include <VBox/param.h>
50#include <VBox/err.h>
51#include <VBox/version.h>
52#include <VBox/log.h>
53
54#include <iprt/asm-amd64-x86.h>
55#include <iprt/assert.h>
56#include <iprt/crc.h>
57#include <iprt/mem.h>
58#include <iprt/memobj.h>
59#include <iprt/mp.h>
60#include <iprt/once.h>
61#include <iprt/semaphore.h>
62#include <iprt/spinlock.h>
63#include <iprt/stdarg.h>
64#include <iprt/string.h>
65#include <iprt/thread.h>
66#include <iprt/timer.h>
67#include <iprt/time.h>
68
69#include "dtrace/VBoxVMM.h"
70
71
72#if defined(_MSC_VER) && defined(RT_ARCH_AMD64) /** @todo check this with with VC7! */
73# pragma intrinsic(_AddressOfReturnAddress)
74#endif
75
76#if defined(RT_OS_DARWIN) && ARCH_BITS == 32
77# error "32-bit darwin is no longer supported. Go back to 4.3 or earlier!"
78#endif
79
80
81/*********************************************************************************************************************************
82* Internal Functions *
83*********************************************************************************************************************************/
84RT_C_DECLS_BEGIN
85#if defined(RT_ARCH_X86) && (defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD))
86extern uint64_t __udivdi3(uint64_t, uint64_t);
87extern uint64_t __umoddi3(uint64_t, uint64_t);
88#endif
89RT_C_DECLS_END
90static int vmmR0UpdateLoggers(PGVM pGVM, VMCPUID idCpu, PVMMR0UPDATELOGGERSREQ pReq, size_t idxLogger);
91static int vmmR0LogFlusher(PGVM pGVM);
92static int vmmR0LogWaitFlushed(PGVM pGVM, VMCPUID idCpu, size_t idxLogger);
93static int vmmR0InitLoggers(PGVM pGVM);
94static void vmmR0CleanupLoggers(PGVM pGVM);
95
96
97/*********************************************************************************************************************************
98* Global Variables *
99*********************************************************************************************************************************/
100/** Drag in necessary library bits.
101 * The runtime lives here (in VMMR0.r0) and VBoxDD*R0.r0 links against us. */
102struct CLANG11WEIRDNOTHROW { PFNRT pfn; } g_VMMR0Deps[] =
103{
104 { (PFNRT)RTCrc32 },
105 { (PFNRT)RTOnce },
106#if defined(RT_ARCH_X86) && (defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD))
107 { (PFNRT)__udivdi3 },
108 { (PFNRT)__umoddi3 },
109#endif
110 { NULL }
111};
112
113#ifdef RT_OS_SOLARIS
114/* Dependency information for the native solaris loader. */
115extern "C" { char _depends_on[] = "vboxdrv"; }
116#endif
117
118
119/**
120 * Initialize the module.
121 * This is called when we're first loaded.
122 *
123 * @returns 0 on success.
124 * @returns VBox status on failure.
125 * @param hMod Image handle for use in APIs.
126 */
127DECLEXPORT(int) ModuleInit(void *hMod)
128{
129#ifdef VBOX_WITH_DTRACE_R0
130 /*
131 * The first thing to do is register the static tracepoints.
132 * (Deregistration is automatic.)
133 */
134 int rc2 = SUPR0TracerRegisterModule(hMod, &g_VTGObjHeader);
135 if (RT_FAILURE(rc2))
136 return rc2;
137#endif
138 LogFlow(("ModuleInit:\n"));
139
140#ifdef VBOX_WITH_64ON32_CMOS_DEBUG
141 /*
142 * Display the CMOS debug code.
143 */
144 ASMOutU8(0x72, 0x03);
145 uint8_t bDebugCode = ASMInU8(0x73);
146 LogRel(("CMOS Debug Code: %#x (%d)\n", bDebugCode, bDebugCode));
147 RTLogComPrintf("CMOS Debug Code: %#x (%d)\n", bDebugCode, bDebugCode);
148#endif
149
150 /*
151 * Initialize the VMM, GVMM, GMM, HM, PGM (Darwin) and INTNET.
152 */
153 int rc = vmmInitFormatTypes();
154 if (RT_SUCCESS(rc))
155 {
156 rc = GVMMR0Init();
157 if (RT_SUCCESS(rc))
158 {
159 rc = GMMR0Init();
160 if (RT_SUCCESS(rc))
161 {
162 rc = HMR0Init();
163 if (RT_SUCCESS(rc))
164 {
165 PDMR0Init(hMod);
166
167 rc = PGMRegisterStringFormatTypes();
168 if (RT_SUCCESS(rc))
169 {
170 rc = IntNetR0Init();
171 if (RT_SUCCESS(rc))
172 {
173#ifdef VBOX_WITH_PCI_PASSTHROUGH
174 rc = PciRawR0Init();
175#endif
176 if (RT_SUCCESS(rc))
177 {
178 rc = CPUMR0ModuleInit();
179 if (RT_SUCCESS(rc))
180 {
181#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
182 rc = vmmR0TripleFaultHackInit();
183 if (RT_SUCCESS(rc))
184#endif
185 {
186#ifdef VBOX_WITH_NEM_R0
187 rc = NEMR0Init();
188 if (RT_SUCCESS(rc))
189#endif
190 {
191 LogFlow(("ModuleInit: returns success\n"));
192 return VINF_SUCCESS;
193 }
194 }
195
196 /*
197 * Bail out.
198 */
199#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
200 vmmR0TripleFaultHackTerm();
201#endif
202 }
203 else
204 LogRel(("ModuleInit: CPUMR0ModuleInit -> %Rrc\n", rc));
205#ifdef VBOX_WITH_PCI_PASSTHROUGH
206 PciRawR0Term();
207#endif
208 }
209 else
210 LogRel(("ModuleInit: PciRawR0Init -> %Rrc\n", rc));
211 IntNetR0Term();
212 }
213 else
214 LogRel(("ModuleInit: IntNetR0Init -> %Rrc\n", rc));
215 PGMDeregisterStringFormatTypes();
216 }
217 else
218 LogRel(("ModuleInit: PGMRegisterStringFormatTypes -> %Rrc\n", rc));
219 HMR0Term();
220 }
221 else
222 LogRel(("ModuleInit: HMR0Init -> %Rrc\n", rc));
223 GMMR0Term();
224 }
225 else
226 LogRel(("ModuleInit: GMMR0Init -> %Rrc\n", rc));
227 GVMMR0Term();
228 }
229 else
230 LogRel(("ModuleInit: GVMMR0Init -> %Rrc\n", rc));
231 vmmTermFormatTypes();
232 }
233 else
234 LogRel(("ModuleInit: vmmInitFormatTypes -> %Rrc\n", rc));
235
236 LogFlow(("ModuleInit: failed %Rrc\n", rc));
237 return rc;
238}
239
240
241/**
242 * Terminate the module.
243 * This is called when we're finally unloaded.
244 *
245 * @param hMod Image handle for use in APIs.
246 */
247DECLEXPORT(void) ModuleTerm(void *hMod)
248{
249 NOREF(hMod);
250 LogFlow(("ModuleTerm:\n"));
251
252 /*
253 * Terminate the CPUM module (Local APIC cleanup).
254 */
255 CPUMR0ModuleTerm();
256
257 /*
258 * Terminate the internal network service.
259 */
260 IntNetR0Term();
261
262 /*
263 * PGM (Darwin), HM and PciRaw global cleanup.
264 */
265#ifdef VBOX_WITH_PCI_PASSTHROUGH
266 PciRawR0Term();
267#endif
268 PGMDeregisterStringFormatTypes();
269 HMR0Term();
270#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
271 vmmR0TripleFaultHackTerm();
272#endif
273#ifdef VBOX_WITH_NEM_R0
274 NEMR0Term();
275#endif
276
277 /*
278 * Destroy the GMM and GVMM instances.
279 */
280 GMMR0Term();
281 GVMMR0Term();
282
283 vmmTermFormatTypes();
284
285 LogFlow(("ModuleTerm: returns\n"));
286}
287
288
289/**
290 * Initializes VMM specific members when the GVM structure is created,
291 * allocating loggers and stuff.
292 *
293 * The loggers are allocated here so that we can update their settings before
294 * doing VMMR0_DO_VMMR0_INIT and have correct logging at that time.
295 *
296 * @returns VBox status code.
297 * @param pGVM The global (ring-0) VM structure.
298 */
299VMMR0_INT_DECL(int) VMMR0InitPerVMData(PGVM pGVM)
300{
301 AssertCompile(sizeof(pGVM->vmmr0.s) <= sizeof(pGVM->vmmr0.padding));
302
303 /*
304 * Initialize all members first.
305 */
306 pGVM->vmmr0.s.fCalledInitVm = false;
307 pGVM->vmmr0.s.hMemObjLogger = NIL_RTR0MEMOBJ;
308 pGVM->vmmr0.s.hMapObjLogger = NIL_RTR0MEMOBJ;
309 pGVM->vmmr0.s.hMemObjReleaseLogger = NIL_RTR0MEMOBJ;
310 pGVM->vmmr0.s.hMapObjReleaseLogger = NIL_RTR0MEMOBJ;
311 pGVM->vmmr0.s.LogFlusher.hSpinlock = NIL_RTSPINLOCK;
312 pGVM->vmmr0.s.LogFlusher.hThread = NIL_RTNATIVETHREAD;
313 pGVM->vmmr0.s.LogFlusher.hEvent = NIL_RTSEMEVENT;
314 pGVM->vmmr0.s.LogFlusher.idxRingHead = 0;
315 pGVM->vmmr0.s.LogFlusher.idxRingTail = 0;
316 pGVM->vmmr0.s.LogFlusher.fThreadWaiting = false;
317
318 for (VMCPUID idCpu = 0; idCpu < pGVM->cCpus; idCpu++)
319 {
320 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
321 Assert(pGVCpu->idHostCpu == NIL_RTCPUID);
322 Assert(pGVCpu->iHostCpuSet == UINT32_MAX);
323 pGVCpu->vmmr0.s.pPreemptState = NULL;
324 pGVCpu->vmmr0.s.hCtxHook = NIL_RTTHREADCTXHOOK;
325 for (size_t iLogger = 0; iLogger < RT_ELEMENTS(pGVCpu->vmmr0.s.u.aLoggers); iLogger++)
326 pGVCpu->vmmr0.s.u.aLoggers[iLogger].hEventFlushWait = NIL_RTSEMEVENT;
327 }
328
329 /*
330 * Create the loggers.
331 */
332 return vmmR0InitLoggers(pGVM);
333}
334
335
336/**
337 * Initiates the R0 driver for a particular VM instance.
338 *
339 * @returns VBox status code.
340 *
341 * @param pGVM The global (ring-0) VM structure.
342 * @param uSvnRev The SVN revision of the ring-3 part.
343 * @param uBuildType Build type indicator.
344 * @thread EMT(0)
345 */
346static int vmmR0InitVM(PGVM pGVM, uint32_t uSvnRev, uint32_t uBuildType)
347{
348 /*
349 * Match the SVN revisions and build type.
350 */
351 if (uSvnRev != VMMGetSvnRev())
352 {
353 LogRel(("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev()));
354 SUPR0Printf("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev());
355 return VERR_VMM_R0_VERSION_MISMATCH;
356 }
357 if (uBuildType != vmmGetBuildType())
358 {
359 LogRel(("VMMR0InitVM: Build type mismatch, r3=%#x r0=%#x\n", uBuildType, vmmGetBuildType()));
360 SUPR0Printf("VMMR0InitVM: Build type mismatch, r3=%#x r0=%#x\n", uBuildType, vmmGetBuildType());
361 return VERR_VMM_R0_VERSION_MISMATCH;
362 }
363
364 int rc = GVMMR0ValidateGVMandEMT(pGVM, 0 /*idCpu*/);
365 if (RT_FAILURE(rc))
366 return rc;
367
368 /* Don't allow this to be called more than once. */
369 if (!pGVM->vmmr0.s.fCalledInitVm)
370 pGVM->vmmr0.s.fCalledInitVm = true;
371 else
372 return VERR_ALREADY_INITIALIZED;
373
374#ifdef LOG_ENABLED
375
376 /*
377 * Register the EMT R0 logger instance for VCPU 0.
378 */
379 PVMCPUCC pVCpu = VMCC_GET_CPU_0(pGVM);
380 if (pVCpu->vmmr0.s.u.s.Logger.pLogger)
381 {
382# if 0 /* testing of the logger. */
383 LogCom(("vmmR0InitVM: before %p\n", RTLogDefaultInstance()));
384 LogCom(("vmmR0InitVM: pfnFlush=%p actual=%p\n", pR0Logger->Logger.pfnFlush, vmmR0LoggerFlush));
385 LogCom(("vmmR0InitVM: pfnLogger=%p actual=%p\n", pR0Logger->Logger.pfnLogger, vmmR0LoggerWrapper));
386 LogCom(("vmmR0InitVM: offScratch=%d fFlags=%#x fDestFlags=%#x\n", pR0Logger->Logger.offScratch, pR0Logger->Logger.fFlags, pR0Logger->Logger.fDestFlags));
387
388 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pGVM->pSession);
389 LogCom(("vmmR0InitVM: after %p reg\n", RTLogDefaultInstance()));
390 RTLogSetDefaultInstanceThread(NULL, pGVM->pSession);
391 LogCom(("vmmR0InitVM: after %p dereg\n", RTLogDefaultInstance()));
392
393 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
394 LogCom(("vmmR0InitVM: returned successfully from direct logger call.\n"));
395 pR0Logger->Logger.pfnFlush(&pR0Logger->Logger);
396 LogCom(("vmmR0InitVM: returned successfully from direct flush call.\n"));
397
398 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pGVM->pSession);
399 LogCom(("vmmR0InitVM: after %p reg2\n", RTLogDefaultInstance()));
400 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
401 LogCom(("vmmR0InitVM: returned successfully from direct logger call (2). offScratch=%d\n", pR0Logger->Logger.offScratch));
402 RTLogSetDefaultInstanceThread(NULL, pGVM->pSession);
403 LogCom(("vmmR0InitVM: after %p dereg2\n", RTLogDefaultInstance()));
404
405 RTLogLoggerEx(&pR0Logger->Logger, 0, ~0U, "hello ring-0 logger (RTLogLoggerEx)\n");
406 LogCom(("vmmR0InitVM: RTLogLoggerEx returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
407
408 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pGVM->pSession);
409 RTLogPrintf("hello ring-0 logger (RTLogPrintf)\n");
410 LogCom(("vmmR0InitVM: RTLogPrintf returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
411# endif
412# ifdef VBOX_WITH_R0_LOGGING
413 Log(("Switching to per-thread logging instance %p (key=%p)\n", pVCpu->vmmr0.s.u.s.Logger.pLogger, pGVM->pSession));
414 RTLogSetDefaultInstanceThread(pVCpu->vmmr0.s.u.s.Logger.pLogger, (uintptr_t)pGVM->pSession);
415 pVCpu->vmmr0.s.u.s.Logger.fRegistered = true;
416# endif
417 }
418#endif /* LOG_ENABLED */
419
420 /*
421 * Check if the host supports high resolution timers or not.
422 */
423 if ( pGVM->vmm.s.fUsePeriodicPreemptionTimers
424 && !RTTimerCanDoHighResolution())
425 pGVM->vmm.s.fUsePeriodicPreemptionTimers = false;
426
427 /*
428 * Initialize the per VM data for GVMM and GMM.
429 */
430 rc = GVMMR0InitVM(pGVM);
431 if (RT_SUCCESS(rc))
432 {
433 /*
434 * Init HM, CPUM and PGM (Darwin only).
435 */
436 rc = HMR0InitVM(pGVM);
437 if (RT_SUCCESS(rc))
438 {
439 rc = CPUMR0InitVM(pGVM);
440 if (RT_SUCCESS(rc))
441 {
442 rc = PGMR0InitVM(pGVM);
443 if (RT_SUCCESS(rc))
444 {
445 rc = EMR0InitVM(pGVM);
446 if (RT_SUCCESS(rc))
447 {
448#ifdef VBOX_WITH_PCI_PASSTHROUGH
449 rc = PciRawR0InitVM(pGVM);
450#endif
451 if (RT_SUCCESS(rc))
452 {
453 rc = GIMR0InitVM(pGVM);
454 if (RT_SUCCESS(rc))
455 {
456 GVMMR0DoneInitVM(pGVM);
457
458 /*
459 * Collect a bit of info for the VM release log.
460 */
461 pGVM->vmm.s.fIsPreemptPendingApiTrusty = RTThreadPreemptIsPendingTrusty();
462 pGVM->vmm.s.fIsPreemptPossible = RTThreadPreemptIsPossible();;
463 return rc;
464
465 /* bail out*/
466 //GIMR0TermVM(pGVM);
467 }
468#ifdef VBOX_WITH_PCI_PASSTHROUGH
469 PciRawR0TermVM(pGVM);
470#endif
471 }
472 }
473 }
474 }
475 HMR0TermVM(pGVM);
476 }
477 }
478
479 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pGVM->pSession);
480 return rc;
481}
482
483
484/**
485 * Does EMT specific VM initialization.
486 *
487 * @returns VBox status code.
488 * @param pGVM The ring-0 VM structure.
489 * @param idCpu The EMT that's calling.
490 */
491static int vmmR0InitVMEmt(PGVM pGVM, VMCPUID idCpu)
492{
493 /* Paranoia (caller checked these already). */
494 AssertReturn(idCpu < pGVM->cCpus, VERR_INVALID_CPU_ID);
495 AssertReturn(pGVM->aCpus[idCpu].hEMT == RTThreadNativeSelf(), VERR_INVALID_CPU_ID);
496
497#if defined(LOG_ENABLED) && defined(VBOX_WITH_R0_LOGGING)
498 /*
499 * Registration of ring 0 loggers.
500 */
501 PVMCPUCC pVCpu = &pGVM->aCpus[idCpu];
502 if ( pVCpu->vmmr0.s.u.s.Logger.pLogger
503 && !pVCpu->vmmr0.s.u.s.Logger.fRegistered)
504 {
505 RTLogSetDefaultInstanceThread(pVCpu->vmmr0.s.u.s.Logger.pLogger, (uintptr_t)pGVM->pSession);
506 pVCpu->vmmr0.s.u.s.Logger.fRegistered = true;
507 }
508#endif
509
510 return VINF_SUCCESS;
511}
512
513
514
515/**
516 * Terminates the R0 bits for a particular VM instance.
517 *
518 * This is normally called by ring-3 as part of the VM termination process, but
519 * may alternatively be called during the support driver session cleanup when
520 * the VM object is destroyed (see GVMM).
521 *
522 * @returns VBox status code.
523 *
524 * @param pGVM The global (ring-0) VM structure.
525 * @param idCpu Set to 0 if EMT(0) or NIL_VMCPUID if session cleanup
526 * thread.
527 * @thread EMT(0) or session clean up thread.
528 */
529VMMR0_INT_DECL(int) VMMR0TermVM(PGVM pGVM, VMCPUID idCpu)
530{
531 /*
532 * Check EMT(0) claim if we're called from userland.
533 */
534 if (idCpu != NIL_VMCPUID)
535 {
536 AssertReturn(idCpu == 0, VERR_INVALID_CPU_ID);
537 int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu);
538 if (RT_FAILURE(rc))
539 return rc;
540 }
541
542#ifdef VBOX_WITH_PCI_PASSTHROUGH
543 PciRawR0TermVM(pGVM);
544#endif
545
546 /*
547 * Tell GVMM what we're up to and check that we only do this once.
548 */
549 if (GVMMR0DoingTermVM(pGVM))
550 {
551 GIMR0TermVM(pGVM);
552
553 /** @todo I wish to call PGMR0PhysFlushHandyPages(pGVM, &pGVM->aCpus[idCpu])
554 * here to make sure we don't leak any shared pages if we crash... */
555 HMR0TermVM(pGVM);
556 }
557
558 /*
559 * Deregister the logger for this EMT.
560 */
561 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pGVM->pSession);
562
563 /*
564 * Start log flusher thread termination.
565 */
566 ASMAtomicWriteBool(&pGVM->vmmr0.s.LogFlusher.fThreadShutdown, true);
567 if (pGVM->vmmr0.s.LogFlusher.hEvent != NIL_RTSEMEVENT)
568 RTSemEventSignal(pGVM->vmmr0.s.LogFlusher.hEvent);
569
570 return VINF_SUCCESS;
571}
572
573
574/**
575 * This is called at the end of gvmmR0CleanupVM().
576 *
577 * @param pGVM The global (ring-0) VM structure.
578 */
579VMMR0_INT_DECL(void) VMMR0CleanupVM(PGVM pGVM)
580{
581 AssertCompile(NIL_RTTHREADCTXHOOK == (RTTHREADCTXHOOK)0); /* Depends on zero initialized memory working for NIL at the moment. */
582 for (VMCPUID idCpu = 0; idCpu < pGVM->cCpus; idCpu++)
583 {
584 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
585
586 /** @todo Can we busy wait here for all thread-context hooks to be
587 * deregistered before releasing (destroying) it? Only until we find a
588 * solution for not deregistering hooks everytime we're leaving HMR0
589 * context. */
590 VMMR0ThreadCtxHookDestroyForEmt(pGVCpu);
591 }
592
593 vmmR0CleanupLoggers(pGVM);
594}
595
596
597/**
598 * An interrupt or unhalt force flag is set, deal with it.
599 *
600 * @returns VINF_SUCCESS (or VINF_EM_HALT).
601 * @param pVCpu The cross context virtual CPU structure.
602 * @param uMWait Result from EMMonitorWaitIsActive().
603 * @param enmInterruptibility Guest CPU interruptbility level.
604 */
605static int vmmR0DoHaltInterrupt(PVMCPUCC pVCpu, unsigned uMWait, CPUMINTERRUPTIBILITY enmInterruptibility)
606{
607 Assert(!TRPMHasTrap(pVCpu));
608 Assert( enmInterruptibility > CPUMINTERRUPTIBILITY_INVALID
609 && enmInterruptibility < CPUMINTERRUPTIBILITY_END);
610
611 /*
612 * Pending interrupts w/o any SMIs or NMIs? That the usual case.
613 */
614 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
615 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_INTERRUPT_NMI))
616 {
617 if (enmInterruptibility <= CPUMINTERRUPTIBILITY_UNRESTRAINED)
618 {
619 uint8_t u8Interrupt = 0;
620 int rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
621 Log(("vmmR0DoHaltInterrupt: CPU%d u8Interrupt=%d (%#x) rc=%Rrc\n", pVCpu->idCpu, u8Interrupt, u8Interrupt, rc));
622 if (RT_SUCCESS(rc))
623 {
624 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
625
626 rc = TRPMAssertTrap(pVCpu, u8Interrupt, TRPM_HARDWARE_INT);
627 AssertRCSuccess(rc);
628 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltExec);
629 return rc;
630 }
631 }
632 }
633 /*
634 * SMI is not implemented yet, at least not here.
635 */
636 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_SMI))
637 {
638 Log12(("vmmR0DoHaltInterrupt: CPU%d failed #3\n", pVCpu->idCpu));
639 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltToR3);
640 return VINF_EM_HALT;
641 }
642 /*
643 * NMI.
644 */
645 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
646 {
647 if (enmInterruptibility < CPUMINTERRUPTIBILITY_NMI_INHIBIT)
648 {
649 /** @todo later. */
650 Log12(("vmmR0DoHaltInterrupt: CPU%d failed #2 (uMWait=%u enmInt=%d)\n", pVCpu->idCpu, uMWait, enmInterruptibility));
651 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltToR3);
652 return VINF_EM_HALT;
653 }
654 }
655 /*
656 * Nested-guest virtual interrupt.
657 */
658 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST))
659 {
660 if (enmInterruptibility < CPUMINTERRUPTIBILITY_VIRT_INT_DISABLED)
661 {
662 /** @todo NSTVMX: NSTSVM: Remember, we might have to check and perform VM-exits
663 * here before injecting the virtual interrupt. See emR3ForcedActions
664 * for details. */
665 Log12(("vmmR0DoHaltInterrupt: CPU%d failed #1 (uMWait=%u enmInt=%d)\n", pVCpu->idCpu, uMWait, enmInterruptibility));
666 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltToR3);
667 return VINF_EM_HALT;
668 }
669 }
670
671 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UNHALT))
672 {
673 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltExec);
674 Log11(("vmmR0DoHaltInterrupt: CPU%d success VINF_SUCCESS (UNHALT)\n", pVCpu->idCpu));
675 return VINF_SUCCESS;
676 }
677 if (uMWait > 1)
678 {
679 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltExec);
680 Log11(("vmmR0DoHaltInterrupt: CPU%d success VINF_SUCCESS (uMWait=%u > 1)\n", pVCpu->idCpu, uMWait));
681 return VINF_SUCCESS;
682 }
683
684 Log12(("vmmR0DoHaltInterrupt: CPU%d failed #0 (uMWait=%u enmInt=%d)\n", pVCpu->idCpu, uMWait, enmInterruptibility));
685 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltToR3);
686 return VINF_EM_HALT;
687}
688
689
690/**
691 * This does one round of vmR3HaltGlobal1Halt().
692 *
693 * The rational here is that we'll reduce latency in interrupt situations if we
694 * don't go to ring-3 immediately on a VINF_EM_HALT (guest executed HLT or
695 * MWAIT), but do one round of blocking here instead and hope the interrupt is
696 * raised in the meanwhile.
697 *
698 * If we go to ring-3 we'll quit the inner HM/NEM loop in EM and end up in the
699 * outer loop, which will then call VMR3WaitHalted() and that in turn will do a
700 * ring-0 call (unless we're too close to a timer event). When the interrupt
701 * wakes us up, we'll return from ring-0 and EM will by instinct do a
702 * rescheduling (because of raw-mode) before it resumes the HM/NEM loop and gets
703 * back to VMMR0EntryFast().
704 *
705 * @returns VINF_SUCCESS or VINF_EM_HALT.
706 * @param pGVM The ring-0 VM structure.
707 * @param pGVCpu The ring-0 virtual CPU structure.
708 *
709 * @todo r=bird: All the blocking/waiting and EMT managment should move out of
710 * the VM module, probably to VMM. Then this would be more weird wrt
711 * parameters and statistics.
712 */
713static int vmmR0DoHalt(PGVM pGVM, PGVMCPU pGVCpu)
714{
715 /*
716 * Do spin stat historization.
717 */
718 if (++pGVCpu->vmm.s.cR0Halts & 0xff)
719 { /* likely */ }
720 else if (pGVCpu->vmm.s.cR0HaltsSucceeded > pGVCpu->vmm.s.cR0HaltsToRing3)
721 {
722 pGVCpu->vmm.s.cR0HaltsSucceeded = 2;
723 pGVCpu->vmm.s.cR0HaltsToRing3 = 0;
724 }
725 else
726 {
727 pGVCpu->vmm.s.cR0HaltsSucceeded = 0;
728 pGVCpu->vmm.s.cR0HaltsToRing3 = 2;
729 }
730
731 /*
732 * Flags that makes us go to ring-3.
733 */
734 uint32_t const fVmFFs = VM_FF_TM_VIRTUAL_SYNC | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA
735 | VM_FF_DBGF | VM_FF_REQUEST | VM_FF_CHECK_VM_STATE
736 | VM_FF_RESET | VM_FF_EMT_RENDEZVOUS | VM_FF_PGM_NEED_HANDY_PAGES
737 | VM_FF_PGM_NO_MEMORY | VM_FF_DEBUG_SUSPEND;
738 uint64_t const fCpuFFs = VMCPU_FF_TIMER | VMCPU_FF_PDM_CRITSECT | VMCPU_FF_IEM
739 | VMCPU_FF_REQUEST | VMCPU_FF_DBGF | VMCPU_FF_HM_UPDATE_CR3
740 | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
741 | VMCPU_FF_TO_R3 | VMCPU_FF_IOM;
742
743 /*
744 * Check preconditions.
745 */
746 unsigned const uMWait = EMMonitorWaitIsActive(pGVCpu);
747 CPUMINTERRUPTIBILITY const enmInterruptibility = CPUMGetGuestInterruptibility(pGVCpu);
748 if ( pGVCpu->vmm.s.fMayHaltInRing0
749 && !TRPMHasTrap(pGVCpu)
750 && ( enmInterruptibility == CPUMINTERRUPTIBILITY_UNRESTRAINED
751 || uMWait > 1))
752 {
753 if ( !VM_FF_IS_ANY_SET(pGVM, fVmFFs)
754 && !VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs))
755 {
756 /*
757 * Interrupts pending already?
758 */
759 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC))
760 APICUpdatePendingInterrupts(pGVCpu);
761
762 /*
763 * Flags that wake up from the halted state.
764 */
765 uint64_t const fIntMask = VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_INTERRUPT_NESTED_GUEST
766 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT;
767
768 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask))
769 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility);
770 ASMNopPause();
771
772 /*
773 * Check out how long till the next timer event.
774 */
775 uint64_t u64Delta;
776 uint64_t u64GipTime = TMTimerPollGIP(pGVM, pGVCpu, &u64Delta);
777
778 if ( !VM_FF_IS_ANY_SET(pGVM, fVmFFs)
779 && !VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs))
780 {
781 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC))
782 APICUpdatePendingInterrupts(pGVCpu);
783
784 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask))
785 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility);
786
787 /*
788 * Wait if there is enough time to the next timer event.
789 */
790 if (u64Delta >= pGVCpu->vmm.s.cNsSpinBlockThreshold)
791 {
792 /* If there are few other CPU cores around, we will procrastinate a
793 little before going to sleep, hoping for some device raising an
794 interrupt or similar. Though, the best thing here would be to
795 dynamically adjust the spin count according to its usfulness or
796 something... */
797 if ( pGVCpu->vmm.s.cR0HaltsSucceeded > pGVCpu->vmm.s.cR0HaltsToRing3
798 && RTMpGetOnlineCount() >= 4)
799 {
800 /** @todo Figure out how we can skip this if it hasn't help recently...
801 * @bugref{9172#c12} */
802 uint32_t cSpinLoops = 42;
803 while (cSpinLoops-- > 0)
804 {
805 ASMNopPause();
806 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC))
807 APICUpdatePendingInterrupts(pGVCpu);
808 ASMNopPause();
809 if (VM_FF_IS_ANY_SET(pGVM, fVmFFs))
810 {
811 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3FromSpin);
812 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3);
813 return VINF_EM_HALT;
814 }
815 ASMNopPause();
816 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs))
817 {
818 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3FromSpin);
819 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3);
820 return VINF_EM_HALT;
821 }
822 ASMNopPause();
823 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask))
824 {
825 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltExecFromSpin);
826 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility);
827 }
828 ASMNopPause();
829 }
830 }
831
832 /*
833 * We have to set the state to VMCPUSTATE_STARTED_HALTED here so ring-3
834 * knows when to notify us (cannot access VMINTUSERPERVMCPU::fWait from here).
835 * After changing the state we must recheck the force flags of course.
836 */
837 if (VMCPU_CMPXCHG_STATE(pGVCpu, VMCPUSTATE_STARTED_HALTED, VMCPUSTATE_STARTED))
838 {
839 if ( !VM_FF_IS_ANY_SET(pGVM, fVmFFs)
840 && !VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs))
841 {
842 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC))
843 APICUpdatePendingInterrupts(pGVCpu);
844
845 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask))
846 {
847 VMCPU_CMPXCHG_STATE(pGVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_HALTED);
848 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility);
849 }
850
851 /* Okay, block! */
852 uint64_t const u64StartSchedHalt = RTTimeNanoTS();
853 int rc = GVMMR0SchedHalt(pGVM, pGVCpu, u64GipTime);
854 uint64_t const u64EndSchedHalt = RTTimeNanoTS();
855 uint64_t const cNsElapsedSchedHalt = u64EndSchedHalt - u64StartSchedHalt;
856 Log10(("vmmR0DoHalt: CPU%d: halted %llu ns\n", pGVCpu->idCpu, cNsElapsedSchedHalt));
857
858 VMCPU_CMPXCHG_STATE(pGVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_HALTED);
859 STAM_REL_PROFILE_ADD_PERIOD(&pGVCpu->vmm.s.StatR0HaltBlock, cNsElapsedSchedHalt);
860 if ( rc == VINF_SUCCESS
861 || rc == VERR_INTERRUPTED)
862 {
863 /* Keep some stats like ring-3 does. */
864 int64_t const cNsOverslept = u64EndSchedHalt - u64GipTime;
865 if (cNsOverslept > 50000)
866 STAM_REL_PROFILE_ADD_PERIOD(&pGVCpu->vmm.s.StatR0HaltBlockOverslept, cNsOverslept);
867 else if (cNsOverslept < -50000)
868 STAM_REL_PROFILE_ADD_PERIOD(&pGVCpu->vmm.s.StatR0HaltBlockInsomnia, cNsElapsedSchedHalt);
869 else
870 STAM_REL_PROFILE_ADD_PERIOD(&pGVCpu->vmm.s.StatR0HaltBlockOnTime, cNsElapsedSchedHalt);
871
872 /*
873 * Recheck whether we can resume execution or have to go to ring-3.
874 */
875 if ( !VM_FF_IS_ANY_SET(pGVM, fVmFFs)
876 && !VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs))
877 {
878 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC))
879 APICUpdatePendingInterrupts(pGVCpu);
880 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask))
881 {
882 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltExecFromBlock);
883 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility);
884 }
885 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3PostNoInt);
886 Log12(("vmmR0DoHalt: CPU%d post #2 - No pending interrupt\n", pGVCpu->idCpu));
887 }
888 else
889 {
890 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3PostPendingFF);
891 Log12(("vmmR0DoHalt: CPU%d post #1 - Pending FF\n", pGVCpu->idCpu));
892 }
893 }
894 else
895 {
896 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3Other);
897 Log12(("vmmR0DoHalt: CPU%d GVMMR0SchedHalt failed: %Rrc\n", pGVCpu->idCpu, rc));
898 }
899 }
900 else
901 {
902 VMCPU_CMPXCHG_STATE(pGVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_HALTED);
903 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3PendingFF);
904 Log12(("vmmR0DoHalt: CPU%d failed #5 - Pending FF\n", pGVCpu->idCpu));
905 }
906 }
907 else
908 {
909 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3Other);
910 Log12(("vmmR0DoHalt: CPU%d failed #4 - enmState=%d\n", pGVCpu->idCpu, VMCPU_GET_STATE(pGVCpu)));
911 }
912 }
913 else
914 {
915 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3SmallDelta);
916 Log12(("vmmR0DoHalt: CPU%d failed #3 - delta too small: %RU64\n", pGVCpu->idCpu, u64Delta));
917 }
918 }
919 else
920 {
921 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3PendingFF);
922 Log12(("vmmR0DoHalt: CPU%d failed #2 - Pending FF\n", pGVCpu->idCpu));
923 }
924 }
925 else
926 {
927 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3PendingFF);
928 Log12(("vmmR0DoHalt: CPU%d failed #1 - Pending FF\n", pGVCpu->idCpu));
929 }
930 }
931 else
932 {
933 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3Other);
934 Log12(("vmmR0DoHalt: CPU%d failed #0 - fMayHaltInRing0=%d TRPMHasTrap=%d enmInt=%d uMWait=%u\n",
935 pGVCpu->idCpu, pGVCpu->vmm.s.fMayHaltInRing0, TRPMHasTrap(pGVCpu), enmInterruptibility, uMWait));
936 }
937
938 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3);
939 return VINF_EM_HALT;
940}
941
942
943/**
944 * VMM ring-0 thread-context callback.
945 *
946 * This does common HM state updating and calls the HM-specific thread-context
947 * callback.
948 *
949 * This is used together with RTThreadCtxHookCreate() on platforms which
950 * supports it, and directly from VMMR0EmtPrepareForBlocking() and
951 * VMMR0EmtResumeAfterBlocking() on platforms which don't.
952 *
953 * @param enmEvent The thread-context event.
954 * @param pvUser Opaque pointer to the VMCPU.
955 *
956 * @thread EMT(pvUser)
957 */
958static DECLCALLBACK(void) vmmR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, void *pvUser)
959{
960 PVMCPUCC pVCpu = (PVMCPUCC)pvUser;
961
962 switch (enmEvent)
963 {
964 case RTTHREADCTXEVENT_IN:
965 {
966 /*
967 * Linux may call us with preemption enabled (really!) but technically we
968 * cannot get preempted here, otherwise we end up in an infinite recursion
969 * scenario (i.e. preempted in resume hook -> preempt hook -> resume hook...
970 * ad infinitum). Let's just disable preemption for now...
971 */
972 /** @todo r=bird: I don't believe the above. The linux code is clearly enabling
973 * preemption after doing the callout (one or two functions up the
974 * call chain). */
975 /** @todo r=ramshankar: See @bugref{5313#c30}. */
976 RTTHREADPREEMPTSTATE ParanoidPreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
977 RTThreadPreemptDisable(&ParanoidPreemptState);
978
979 /* We need to update the VCPU <-> host CPU mapping. */
980 RTCPUID idHostCpu;
981 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
982 pVCpu->iHostCpuSet = iHostCpuSet;
983 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
984
985 /* In the very unlikely event that the GIP delta for the CPU we're
986 rescheduled needs calculating, try force a return to ring-3.
987 We unfortunately cannot do the measurements right here. */
988 if (RT_LIKELY(!SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
989 { /* likely */ }
990 else
991 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
992
993 /* Invoke the HM-specific thread-context callback. */
994 HMR0ThreadCtxCallback(enmEvent, pvUser);
995
996 /* Restore preemption. */
997 RTThreadPreemptRestore(&ParanoidPreemptState);
998 break;
999 }
1000
1001 case RTTHREADCTXEVENT_OUT:
1002 {
1003 /* Invoke the HM-specific thread-context callback. */
1004 HMR0ThreadCtxCallback(enmEvent, pvUser);
1005
1006 /*
1007 * Sigh. See VMMGetCpu() used by VMCPU_ASSERT_EMT(). We cannot let several VCPUs
1008 * have the same host CPU associated with it.
1009 */
1010 pVCpu->iHostCpuSet = UINT32_MAX;
1011 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1012 break;
1013 }
1014
1015 default:
1016 /* Invoke the HM-specific thread-context callback. */
1017 HMR0ThreadCtxCallback(enmEvent, pvUser);
1018 break;
1019 }
1020}
1021
1022
1023/**
1024 * Creates thread switching hook for the current EMT thread.
1025 *
1026 * This is called by GVMMR0CreateVM and GVMMR0RegisterVCpu. If the host
1027 * platform does not implement switcher hooks, no hooks will be create and the
1028 * member set to NIL_RTTHREADCTXHOOK.
1029 *
1030 * @returns VBox status code.
1031 * @param pVCpu The cross context virtual CPU structure.
1032 * @thread EMT(pVCpu)
1033 */
1034VMMR0_INT_DECL(int) VMMR0ThreadCtxHookCreateForEmt(PVMCPUCC pVCpu)
1035{
1036 VMCPU_ASSERT_EMT(pVCpu);
1037 Assert(pVCpu->vmmr0.s.hCtxHook == NIL_RTTHREADCTXHOOK);
1038
1039#if 1 /* To disable this stuff change to zero. */
1040 int rc = RTThreadCtxHookCreate(&pVCpu->vmmr0.s.hCtxHook, 0, vmmR0ThreadCtxCallback, pVCpu);
1041 if (RT_SUCCESS(rc))
1042 {
1043 pVCpu->pGVM->vmm.s.fIsUsingContextHooks = true;
1044 return rc;
1045 }
1046#else
1047 RT_NOREF(vmmR0ThreadCtxCallback);
1048 int rc = VERR_NOT_SUPPORTED;
1049#endif
1050
1051 pVCpu->vmmr0.s.hCtxHook = NIL_RTTHREADCTXHOOK;
1052 pVCpu->pGVM->vmm.s.fIsUsingContextHooks = false;
1053 if (rc == VERR_NOT_SUPPORTED)
1054 return VINF_SUCCESS;
1055
1056 LogRelMax(32, ("RTThreadCtxHookCreate failed! rc=%Rrc pVCpu=%p idCpu=%RU32\n", rc, pVCpu, pVCpu->idCpu));
1057 return VINF_SUCCESS; /* Just ignore it, we can live without context hooks. */
1058}
1059
1060
1061/**
1062 * Destroys the thread switching hook for the specified VCPU.
1063 *
1064 * @param pVCpu The cross context virtual CPU structure.
1065 * @remarks Can be called from any thread.
1066 */
1067VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDestroyForEmt(PVMCPUCC pVCpu)
1068{
1069 int rc = RTThreadCtxHookDestroy(pVCpu->vmmr0.s.hCtxHook);
1070 AssertRC(rc);
1071 pVCpu->vmmr0.s.hCtxHook = NIL_RTTHREADCTXHOOK;
1072}
1073
1074
1075/**
1076 * Disables the thread switching hook for this VCPU (if we got one).
1077 *
1078 * @param pVCpu The cross context virtual CPU structure.
1079 * @thread EMT(pVCpu)
1080 *
1081 * @remarks This also clears GVMCPU::idHostCpu, so the mapping is invalid after
1082 * this call. This means you have to be careful with what you do!
1083 */
1084VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDisable(PVMCPUCC pVCpu)
1085{
1086 /*
1087 * Clear the VCPU <-> host CPU mapping as we've left HM context.
1088 * @bugref{7726#c19} explains the need for this trick:
1089 *
1090 * VMXR0CallRing3Callback/SVMR0CallRing3Callback &
1091 * hmR0VmxLeaveSession/hmR0SvmLeaveSession disables context hooks during
1092 * longjmp & normal return to ring-3, which opens a window where we may be
1093 * rescheduled without changing GVMCPUID::idHostCpu and cause confusion if
1094 * the CPU starts executing a different EMT. Both functions first disables
1095 * preemption and then calls HMR0LeaveCpu which invalids idHostCpu, leaving
1096 * an opening for getting preempted.
1097 */
1098 /** @todo Make HM not need this API! Then we could leave the hooks enabled
1099 * all the time. */
1100
1101 /*
1102 * Disable the context hook, if we got one.
1103 */
1104 if (pVCpu->vmmr0.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1105 {
1106 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1107 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1108 int rc = RTThreadCtxHookDisable(pVCpu->vmmr0.s.hCtxHook);
1109 AssertRC(rc);
1110 }
1111}
1112
1113
1114/**
1115 * Internal version of VMMR0ThreadCtxHooksAreRegistered.
1116 *
1117 * @returns true if registered, false otherwise.
1118 * @param pVCpu The cross context virtual CPU structure.
1119 */
1120DECLINLINE(bool) vmmR0ThreadCtxHookIsEnabled(PVMCPUCC pVCpu)
1121{
1122 return RTThreadCtxHookIsEnabled(pVCpu->vmmr0.s.hCtxHook);
1123}
1124
1125
1126/**
1127 * Whether thread-context hooks are registered for this VCPU.
1128 *
1129 * @returns true if registered, false otherwise.
1130 * @param pVCpu The cross context virtual CPU structure.
1131 */
1132VMMR0_INT_DECL(bool) VMMR0ThreadCtxHookIsEnabled(PVMCPUCC pVCpu)
1133{
1134 return vmmR0ThreadCtxHookIsEnabled(pVCpu);
1135}
1136
1137
1138/**
1139 * Returns the ring-0 release logger instance.
1140 *
1141 * @returns Pointer to release logger, NULL if not configured.
1142 * @param pVCpu The cross context virtual CPU structure of the caller.
1143 * @thread EMT(pVCpu)
1144 */
1145VMMR0_INT_DECL(PRTLOGGER) VMMR0GetReleaseLogger(PVMCPUCC pVCpu)
1146{
1147 return pVCpu->vmmr0.s.u.s.RelLogger.pLogger;
1148}
1149
1150
1151#ifdef VBOX_WITH_STATISTICS
1152/**
1153 * Record return code statistics
1154 * @param pVM The cross context VM structure.
1155 * @param pVCpu The cross context virtual CPU structure.
1156 * @param rc The status code.
1157 */
1158static void vmmR0RecordRC(PVMCC pVM, PVMCPUCC pVCpu, int rc)
1159{
1160 /*
1161 * Collect statistics.
1162 */
1163 switch (rc)
1164 {
1165 case VINF_SUCCESS:
1166 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetNormal);
1167 break;
1168 case VINF_EM_RAW_INTERRUPT:
1169 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterrupt);
1170 break;
1171 case VINF_EM_RAW_INTERRUPT_HYPER:
1172 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptHyper);
1173 break;
1174 case VINF_EM_RAW_GUEST_TRAP:
1175 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGuestTrap);
1176 break;
1177 case VINF_EM_RAW_RING_SWITCH:
1178 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitch);
1179 break;
1180 case VINF_EM_RAW_RING_SWITCH_INT:
1181 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitchInt);
1182 break;
1183 case VINF_EM_RAW_STALE_SELECTOR:
1184 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetStaleSelector);
1185 break;
1186 case VINF_EM_RAW_IRET_TRAP:
1187 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIRETTrap);
1188 break;
1189 case VINF_IOM_R3_IOPORT_READ:
1190 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIORead);
1191 break;
1192 case VINF_IOM_R3_IOPORT_WRITE:
1193 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOWrite);
1194 break;
1195 case VINF_IOM_R3_IOPORT_COMMIT_WRITE:
1196 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOCommitWrite);
1197 break;
1198 case VINF_IOM_R3_MMIO_READ:
1199 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIORead);
1200 break;
1201 case VINF_IOM_R3_MMIO_WRITE:
1202 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOWrite);
1203 break;
1204 case VINF_IOM_R3_MMIO_COMMIT_WRITE:
1205 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOCommitWrite);
1206 break;
1207 case VINF_IOM_R3_MMIO_READ_WRITE:
1208 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOReadWrite);
1209 break;
1210 case VINF_PATM_HC_MMIO_PATCH_READ:
1211 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchRead);
1212 break;
1213 case VINF_PATM_HC_MMIO_PATCH_WRITE:
1214 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchWrite);
1215 break;
1216 case VINF_CPUM_R3_MSR_READ:
1217 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMSRRead);
1218 break;
1219 case VINF_CPUM_R3_MSR_WRITE:
1220 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMSRWrite);
1221 break;
1222 case VINF_EM_RAW_EMULATE_INSTR:
1223 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetEmulate);
1224 break;
1225 case VINF_PATCH_EMULATE_INSTR:
1226 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchEmulate);
1227 break;
1228 case VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT:
1229 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetLDTFault);
1230 break;
1231 case VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT:
1232 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGDTFault);
1233 break;
1234 case VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT:
1235 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIDTFault);
1236 break;
1237 case VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT:
1238 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTSSFault);
1239 break;
1240 case VINF_CSAM_PENDING_ACTION:
1241 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCSAMTask);
1242 break;
1243 case VINF_PGM_SYNC_CR3:
1244 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetSyncCR3);
1245 break;
1246 case VINF_PATM_PATCH_INT3:
1247 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchInt3);
1248 break;
1249 case VINF_PATM_PATCH_TRAP_PF:
1250 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchPF);
1251 break;
1252 case VINF_PATM_PATCH_TRAP_GP:
1253 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchGP);
1254 break;
1255 case VINF_PATM_PENDING_IRQ_AFTER_IRET:
1256 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchIretIRQ);
1257 break;
1258 case VINF_EM_RESCHEDULE_REM:
1259 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRescheduleREM);
1260 break;
1261 case VINF_EM_RAW_TO_R3:
1262 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Total);
1263 if (VM_FF_IS_SET(pVM, VM_FF_TM_VIRTUAL_SYNC))
1264 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3TMVirt);
1265 else if (VM_FF_IS_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES))
1266 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3HandyPages);
1267 else if (VM_FF_IS_SET(pVM, VM_FF_PDM_QUEUES))
1268 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3PDMQueues);
1269 else if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
1270 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Rendezvous);
1271 else if (VM_FF_IS_SET(pVM, VM_FF_PDM_DMA))
1272 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3DMA);
1273 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TIMER))
1274 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Timer);
1275 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PDM_CRITSECT))
1276 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3CritSect);
1277 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TO_R3))
1278 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3FF);
1279 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
1280 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Iem);
1281 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IOM))
1282 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Iom);
1283 else
1284 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Unknown);
1285 break;
1286
1287 case VINF_EM_RAW_TIMER_PENDING:
1288 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTimerPending);
1289 break;
1290 case VINF_EM_RAW_INTERRUPT_PENDING:
1291 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptPending);
1292 break;
1293 case VINF_PATM_DUPLICATE_FUNCTION:
1294 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPATMDuplicateFn);
1295 break;
1296 case VINF_PGM_CHANGE_MODE:
1297 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMChangeMode);
1298 break;
1299 case VINF_PGM_POOL_FLUSH_PENDING:
1300 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMFlushPending);
1301 break;
1302 case VINF_EM_PENDING_REQUEST:
1303 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPendingRequest);
1304 break;
1305 case VINF_EM_HM_PATCH_TPR_INSTR:
1306 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchTPR);
1307 break;
1308 default:
1309 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMisc);
1310 break;
1311 }
1312}
1313#endif /* VBOX_WITH_STATISTICS */
1314
1315
1316/**
1317 * The Ring 0 entry point, called by the fast-ioctl path.
1318 *
1319 * @param pGVM The global (ring-0) VM structure.
1320 * @param pVMIgnored The cross context VM structure. The return code is
1321 * stored in pVM->vmm.s.iLastGZRc.
1322 * @param idCpu The Virtual CPU ID of the calling EMT.
1323 * @param enmOperation Which operation to execute.
1324 * @remarks Assume called with interrupts _enabled_.
1325 */
1326VMMR0DECL(void) VMMR0EntryFast(PGVM pGVM, PVMCC pVMIgnored, VMCPUID idCpu, VMMR0OPERATION enmOperation)
1327{
1328 RT_NOREF(pVMIgnored);
1329
1330 /*
1331 * Validation.
1332 */
1333 if ( idCpu < pGVM->cCpus
1334 && pGVM->cCpus == pGVM->cCpusUnsafe)
1335 { /*likely*/ }
1336 else
1337 {
1338 SUPR0Printf("VMMR0EntryFast: Bad idCpu=%#x cCpus=%#x cCpusUnsafe=%#x\n", idCpu, pGVM->cCpus, pGVM->cCpusUnsafe);
1339 return;
1340 }
1341
1342 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
1343 RTNATIVETHREAD const hNativeThread = RTThreadNativeSelf();
1344 if (RT_LIKELY( pGVCpu->hEMT == hNativeThread
1345 && pGVCpu->hNativeThreadR0 == hNativeThread))
1346 { /* likely */ }
1347 else
1348 {
1349 SUPR0Printf("VMMR0EntryFast: Bad thread idCpu=%#x hNativeSelf=%p pGVCpu->hEmt=%p pGVCpu->hNativeThreadR0=%p\n",
1350 idCpu, hNativeThread, pGVCpu->hEMT, pGVCpu->hNativeThreadR0);
1351 return;
1352 }
1353
1354 /*
1355 * Perform requested operation.
1356 */
1357 switch (enmOperation)
1358 {
1359 /*
1360 * Run guest code using the available hardware acceleration technology.
1361 */
1362 case VMMR0_DO_HM_RUN:
1363 {
1364 for (;;) /* hlt loop */
1365 {
1366 /*
1367 * Disable ring-3 calls & blocking till we've successfully entered HM.
1368 * Otherwise we sometimes end up blocking at the finall Log4 statement
1369 * in VMXR0Enter, while still in a somewhat inbetween state.
1370 */
1371 VMMRZCallRing3Disable(pGVCpu);
1372
1373 /*
1374 * Disable preemption.
1375 */
1376 Assert(!vmmR0ThreadCtxHookIsEnabled(pGVCpu));
1377 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
1378 RTThreadPreemptDisable(&PreemptState);
1379 pGVCpu->vmmr0.s.pPreemptState = &PreemptState;
1380
1381 /*
1382 * Get the host CPU identifiers, make sure they are valid and that
1383 * we've got a TSC delta for the CPU.
1384 */
1385 RTCPUID idHostCpu;
1386 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
1387 if (RT_LIKELY( iHostCpuSet < RTCPUSET_MAX_CPUS
1388 && SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
1389 {
1390 pGVCpu->iHostCpuSet = iHostCpuSet;
1391 ASMAtomicWriteU32(&pGVCpu->idHostCpu, idHostCpu);
1392
1393 /*
1394 * Update the periodic preemption timer if it's active.
1395 */
1396 if (pGVM->vmm.s.fUsePeriodicPreemptionTimers)
1397 GVMMR0SchedUpdatePeriodicPreemptionTimer(pGVM, pGVCpu->idHostCpu, TMCalcHostTimerFrequency(pGVM, pGVCpu));
1398
1399#ifdef VMM_R0_TOUCH_FPU
1400 /*
1401 * Make sure we've got the FPU state loaded so and we don't need to clear
1402 * CR0.TS and get out of sync with the host kernel when loading the guest
1403 * FPU state. @ref sec_cpum_fpu (CPUM.cpp) and @bugref{4053}.
1404 */
1405 CPUMR0TouchHostFpu();
1406#endif
1407 int rc;
1408 bool fPreemptRestored = false;
1409 if (!HMR0SuspendPending())
1410 {
1411 /*
1412 * Enable the context switching hook.
1413 */
1414 if (pGVCpu->vmmr0.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1415 {
1416 Assert(!RTThreadCtxHookIsEnabled(pGVCpu->vmmr0.s.hCtxHook));
1417 int rc2 = RTThreadCtxHookEnable(pGVCpu->vmmr0.s.hCtxHook); AssertRC(rc2);
1418 }
1419
1420 /*
1421 * Enter HM context.
1422 */
1423 rc = HMR0Enter(pGVCpu);
1424 if (RT_SUCCESS(rc))
1425 {
1426 VMCPU_SET_STATE(pGVCpu, VMCPUSTATE_STARTED_HM);
1427
1428 /*
1429 * When preemption hooks are in place, enable preemption now that
1430 * we're in HM context.
1431 */
1432 if (vmmR0ThreadCtxHookIsEnabled(pGVCpu))
1433 {
1434 fPreemptRestored = true;
1435 pGVCpu->vmmr0.s.pPreemptState = NULL;
1436 RTThreadPreemptRestore(&PreemptState);
1437 }
1438 VMMRZCallRing3Enable(pGVCpu);
1439
1440 /*
1441 * Setup the longjmp machinery and execute guest code (calls HMR0RunGuestCode).
1442 */
1443 rc = vmmR0CallRing3SetJmp(&pGVCpu->vmm.s.CallRing3JmpBufR0, HMR0RunGuestCode, pGVM, pGVCpu);
1444
1445 /*
1446 * Assert sanity on the way out. Using manual assertions code here as normal
1447 * assertions are going to panic the host since we're outside the setjmp/longjmp zone.
1448 */
1449 if (RT_UNLIKELY( VMCPU_GET_STATE(pGVCpu) != VMCPUSTATE_STARTED_HM
1450 && RT_SUCCESS_NP(rc)
1451 && rc != VERR_VMM_RING0_ASSERTION ))
1452 {
1453 pGVM->vmm.s.szRing0AssertMsg1[0] = '\0';
1454 RTStrPrintf(pGVM->vmm.s.szRing0AssertMsg2, sizeof(pGVM->vmm.s.szRing0AssertMsg2),
1455 "Got VMCPU state %d expected %d.\n", VMCPU_GET_STATE(pGVCpu), VMCPUSTATE_STARTED_HM);
1456 rc = VERR_VMM_WRONG_HM_VMCPU_STATE;
1457 }
1458#if 0
1459 /** @todo Get rid of this. HM shouldn't disable the context hook. */
1460 else if (RT_UNLIKELY(vmmR0ThreadCtxHookIsEnabled(pGVCpu)))
1461 {
1462 pGVM->vmm.s.szRing0AssertMsg1[0] = '\0';
1463 RTStrPrintf(pGVM->vmm.s.szRing0AssertMsg2, sizeof(pGVM->vmm.s.szRing0AssertMsg2),
1464 "Thread-context hooks still enabled! VCPU=%p Id=%u rc=%d.\n", pGVCpu, pGVCpu->idCpu, rc);
1465 rc = VERR_VMM_CONTEXT_HOOK_STILL_ENABLED;
1466 }
1467#endif
1468
1469 VMMRZCallRing3Disable(pGVCpu); /* Lazy bird: Simpler just disabling it again... */
1470 VMCPU_SET_STATE(pGVCpu, VMCPUSTATE_STARTED);
1471 }
1472 STAM_COUNTER_INC(&pGVM->vmm.s.StatRunGC);
1473
1474 /*
1475 * Invalidate the host CPU identifiers before we disable the context
1476 * hook / restore preemption.
1477 */
1478 pGVCpu->iHostCpuSet = UINT32_MAX;
1479 ASMAtomicWriteU32(&pGVCpu->idHostCpu, NIL_RTCPUID);
1480
1481 /*
1482 * Disable context hooks. Due to unresolved cleanup issues, we
1483 * cannot leave the hooks enabled when we return to ring-3.
1484 *
1485 * Note! At the moment HM may also have disabled the hook
1486 * when we get here, but the IPRT API handles that.
1487 */
1488 if (pGVCpu->vmmr0.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1489 RTThreadCtxHookDisable(pGVCpu->vmmr0.s.hCtxHook);
1490 }
1491 /*
1492 * The system is about to go into suspend mode; go back to ring 3.
1493 */
1494 else
1495 {
1496 pGVCpu->iHostCpuSet = UINT32_MAX;
1497 ASMAtomicWriteU32(&pGVCpu->idHostCpu, NIL_RTCPUID);
1498 rc = VINF_EM_RAW_INTERRUPT;
1499 }
1500
1501 /** @todo When HM stops messing with the context hook state, we'll disable
1502 * preemption again before the RTThreadCtxHookDisable call. */
1503 if (!fPreemptRestored)
1504 {
1505 pGVCpu->vmmr0.s.pPreemptState = NULL;
1506 RTThreadPreemptRestore(&PreemptState);
1507 }
1508
1509 pGVCpu->vmm.s.iLastGZRc = rc;
1510
1511 /* Fire dtrace probe and collect statistics. */
1512 VBOXVMM_R0_VMM_RETURN_TO_RING3_HM(pGVCpu, CPUMQueryGuestCtxPtr(pGVCpu), rc);
1513#ifdef VBOX_WITH_STATISTICS
1514 vmmR0RecordRC(pGVM, pGVCpu, rc);
1515#endif
1516 VMMRZCallRing3Enable(pGVCpu);
1517
1518 /*
1519 * If this is a halt.
1520 */
1521 if (rc != VINF_EM_HALT)
1522 { /* we're not in a hurry for a HLT, so prefer this path */ }
1523 else
1524 {
1525 pGVCpu->vmm.s.iLastGZRc = rc = vmmR0DoHalt(pGVM, pGVCpu);
1526 if (rc == VINF_SUCCESS)
1527 {
1528 pGVCpu->vmm.s.cR0HaltsSucceeded++;
1529 continue;
1530 }
1531 pGVCpu->vmm.s.cR0HaltsToRing3++;
1532 }
1533 }
1534 /*
1535 * Invalid CPU set index or TSC delta in need of measuring.
1536 */
1537 else
1538 {
1539 pGVCpu->vmmr0.s.pPreemptState = NULL;
1540 pGVCpu->iHostCpuSet = UINT32_MAX;
1541 ASMAtomicWriteU32(&pGVCpu->idHostCpu, NIL_RTCPUID);
1542 RTThreadPreemptRestore(&PreemptState);
1543
1544 VMMRZCallRing3Enable(pGVCpu);
1545
1546 if (iHostCpuSet < RTCPUSET_MAX_CPUS)
1547 {
1548 int rc = SUPR0TscDeltaMeasureBySetIndex(pGVM->pSession, iHostCpuSet, 0 /*fFlags*/,
1549 2 /*cMsWaitRetry*/, 5*RT_MS_1SEC /*cMsWaitThread*/,
1550 0 /*default cTries*/);
1551 if (RT_SUCCESS(rc) || rc == VERR_CPU_OFFLINE)
1552 pGVCpu->vmm.s.iLastGZRc = VINF_EM_RAW_TO_R3;
1553 else
1554 pGVCpu->vmm.s.iLastGZRc = rc;
1555 }
1556 else
1557 pGVCpu->vmm.s.iLastGZRc = VERR_INVALID_CPU_INDEX;
1558 }
1559 break;
1560 } /* halt loop. */
1561 break;
1562 }
1563
1564#ifdef VBOX_WITH_NEM_R0
1565# if defined(RT_ARCH_AMD64) && defined(RT_OS_WINDOWS)
1566 case VMMR0_DO_NEM_RUN:
1567 {
1568 /*
1569 * Setup the longjmp machinery and execute guest code (calls NEMR0RunGuestCode).
1570 */
1571# ifdef VBOXSTRICTRC_STRICT_ENABLED
1572 int rc = vmmR0CallRing3SetJmp2(&pGVCpu->vmm.s.CallRing3JmpBufR0, (PFNVMMR0SETJMP2)NEMR0RunGuestCode, pGVM, idCpu);
1573# else
1574 int rc = vmmR0CallRing3SetJmp2(&pGVCpu->vmm.s.CallRing3JmpBufR0, NEMR0RunGuestCode, pGVM, idCpu);
1575# endif
1576 STAM_COUNTER_INC(&pGVM->vmm.s.StatRunGC);
1577
1578 pGVCpu->vmm.s.iLastGZRc = rc;
1579
1580 /*
1581 * Fire dtrace probe and collect statistics.
1582 */
1583 VBOXVMM_R0_VMM_RETURN_TO_RING3_NEM(pGVCpu, CPUMQueryGuestCtxPtr(pGVCpu), rc);
1584# ifdef VBOX_WITH_STATISTICS
1585 vmmR0RecordRC(pGVM, pGVCpu, rc);
1586# endif
1587 break;
1588 }
1589# endif
1590#endif
1591
1592 /*
1593 * For profiling.
1594 */
1595 case VMMR0_DO_NOP:
1596 pGVCpu->vmm.s.iLastGZRc = VINF_SUCCESS;
1597 break;
1598
1599 /*
1600 * Shouldn't happen.
1601 */
1602 default:
1603 AssertMsgFailed(("%#x\n", enmOperation));
1604 pGVCpu->vmm.s.iLastGZRc = VERR_NOT_SUPPORTED;
1605 break;
1606 }
1607}
1608
1609
1610/**
1611 * Validates a session or VM session argument.
1612 *
1613 * @returns true / false accordingly.
1614 * @param pGVM The global (ring-0) VM structure.
1615 * @param pClaimedSession The session claim to validate.
1616 * @param pSession The session argument.
1617 */
1618DECLINLINE(bool) vmmR0IsValidSession(PGVM pGVM, PSUPDRVSESSION pClaimedSession, PSUPDRVSESSION pSession)
1619{
1620 /* This must be set! */
1621 if (!pSession)
1622 return false;
1623
1624 /* Only one out of the two. */
1625 if (pGVM && pClaimedSession)
1626 return false;
1627 if (pGVM)
1628 pClaimedSession = pGVM->pSession;
1629 return pClaimedSession == pSession;
1630}
1631
1632
1633/**
1634 * VMMR0EntryEx worker function, either called directly or when ever possible
1635 * called thru a longjmp so we can exit safely on failure.
1636 *
1637 * @returns VBox status code.
1638 * @param pGVM The global (ring-0) VM structure.
1639 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
1640 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
1641 * @param enmOperation Which operation to execute.
1642 * @param pReqHdr This points to a SUPVMMR0REQHDR packet. Optional.
1643 * The support driver validates this if it's present.
1644 * @param u64Arg Some simple constant argument.
1645 * @param pSession The session of the caller.
1646 *
1647 * @remarks Assume called with interrupts _enabled_.
1648 */
1649DECL_NO_INLINE(static, int) vmmR0EntryExWorker(PGVM pGVM, VMCPUID idCpu, VMMR0OPERATION enmOperation,
1650 PSUPVMMR0REQHDR pReqHdr, uint64_t u64Arg, PSUPDRVSESSION pSession)
1651{
1652 /*
1653 * Validate pGVM and idCpu for consistency and validity.
1654 */
1655 if (pGVM != NULL)
1656 {
1657 if (RT_LIKELY(((uintptr_t)pGVM & PAGE_OFFSET_MASK) == 0))
1658 { /* likely */ }
1659 else
1660 {
1661 SUPR0Printf("vmmR0EntryExWorker: Invalid pGVM=%p! (op=%d)\n", pGVM, enmOperation);
1662 return VERR_INVALID_POINTER;
1663 }
1664
1665 if (RT_LIKELY(idCpu == NIL_VMCPUID || idCpu < pGVM->cCpus))
1666 { /* likely */ }
1667 else
1668 {
1669 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu %#x (cCpus=%#x)\n", idCpu, pGVM->cCpus);
1670 return VERR_INVALID_PARAMETER;
1671 }
1672
1673 if (RT_LIKELY( pGVM->enmVMState >= VMSTATE_CREATING
1674 && pGVM->enmVMState <= VMSTATE_TERMINATED
1675 && pGVM->pSession == pSession
1676 && pGVM->pSelf == pGVM))
1677 { /* likely */ }
1678 else
1679 {
1680 SUPR0Printf("vmmR0EntryExWorker: Invalid pGVM=%p:{.enmVMState=%d, .cCpus=%#x, .pSession=%p(==%p), .pSelf=%p(==%p)}! (op=%d)\n",
1681 pGVM, pGVM->enmVMState, pGVM->cCpus, pGVM->pSession, pSession, pGVM->pSelf, pGVM, enmOperation);
1682 return VERR_INVALID_POINTER;
1683 }
1684 }
1685 else if (RT_LIKELY(idCpu == NIL_VMCPUID))
1686 { /* likely */ }
1687 else
1688 {
1689 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu=%u\n", idCpu);
1690 return VERR_INVALID_PARAMETER;
1691 }
1692
1693 /*
1694 * Process the request.
1695 */
1696 int rc;
1697 switch (enmOperation)
1698 {
1699 /*
1700 * GVM requests
1701 */
1702 case VMMR0_DO_GVMM_CREATE_VM:
1703 if (pGVM == NULL && u64Arg == 0 && idCpu == NIL_VMCPUID)
1704 rc = GVMMR0CreateVMReq((PGVMMCREATEVMREQ)pReqHdr, pSession);
1705 else
1706 rc = VERR_INVALID_PARAMETER;
1707 break;
1708
1709 case VMMR0_DO_GVMM_DESTROY_VM:
1710 if (pReqHdr == NULL && u64Arg == 0)
1711 rc = GVMMR0DestroyVM(pGVM);
1712 else
1713 rc = VERR_INVALID_PARAMETER;
1714 break;
1715
1716 case VMMR0_DO_GVMM_REGISTER_VMCPU:
1717 if (pGVM != NULL)
1718 rc = GVMMR0RegisterVCpu(pGVM, idCpu);
1719 else
1720 rc = VERR_INVALID_PARAMETER;
1721 break;
1722
1723 case VMMR0_DO_GVMM_DEREGISTER_VMCPU:
1724 if (pGVM != NULL)
1725 rc = GVMMR0DeregisterVCpu(pGVM, idCpu);
1726 else
1727 rc = VERR_INVALID_PARAMETER;
1728 break;
1729
1730 case VMMR0_DO_GVMM_REGISTER_WORKER_THREAD:
1731 if (pGVM != NULL && pReqHdr && pReqHdr->cbReq == sizeof(GVMMREGISTERWORKERTHREADREQ))
1732 rc = GVMMR0RegisterWorkerThread(pGVM, (GVMMWORKERTHREAD)(unsigned)u64Arg,
1733 ((PGVMMREGISTERWORKERTHREADREQ)(pReqHdr))->hNativeThreadR3);
1734 else
1735 rc = VERR_INVALID_PARAMETER;
1736 break;
1737
1738 case VMMR0_DO_GVMM_DEREGISTER_WORKER_THREAD:
1739 if (pGVM != NULL)
1740 rc = GVMMR0DeregisterWorkerThread(pGVM, (GVMMWORKERTHREAD)(unsigned)u64Arg);
1741 else
1742 rc = VERR_INVALID_PARAMETER;
1743 break;
1744
1745 case VMMR0_DO_GVMM_SCHED_HALT:
1746 if (pReqHdr)
1747 return VERR_INVALID_PARAMETER;
1748 rc = GVMMR0SchedHaltReq(pGVM, idCpu, u64Arg);
1749 break;
1750
1751 case VMMR0_DO_GVMM_SCHED_WAKE_UP:
1752 if (pReqHdr || u64Arg)
1753 return VERR_INVALID_PARAMETER;
1754 rc = GVMMR0SchedWakeUp(pGVM, idCpu);
1755 break;
1756
1757 case VMMR0_DO_GVMM_SCHED_POKE:
1758 if (pReqHdr || u64Arg)
1759 return VERR_INVALID_PARAMETER;
1760 rc = GVMMR0SchedPoke(pGVM, idCpu);
1761 break;
1762
1763 case VMMR0_DO_GVMM_SCHED_WAKE_UP_AND_POKE_CPUS:
1764 if (u64Arg)
1765 return VERR_INVALID_PARAMETER;
1766 rc = GVMMR0SchedWakeUpAndPokeCpusReq(pGVM, (PGVMMSCHEDWAKEUPANDPOKECPUSREQ)pReqHdr);
1767 break;
1768
1769 case VMMR0_DO_GVMM_SCHED_POLL:
1770 if (pReqHdr || u64Arg > 1)
1771 return VERR_INVALID_PARAMETER;
1772 rc = GVMMR0SchedPoll(pGVM, idCpu, !!u64Arg);
1773 break;
1774
1775 case VMMR0_DO_GVMM_QUERY_STATISTICS:
1776 if (u64Arg)
1777 return VERR_INVALID_PARAMETER;
1778 rc = GVMMR0QueryStatisticsReq(pGVM, (PGVMMQUERYSTATISTICSSREQ)pReqHdr, pSession);
1779 break;
1780
1781 case VMMR0_DO_GVMM_RESET_STATISTICS:
1782 if (u64Arg)
1783 return VERR_INVALID_PARAMETER;
1784 rc = GVMMR0ResetStatisticsReq(pGVM, (PGVMMRESETSTATISTICSSREQ)pReqHdr, pSession);
1785 break;
1786
1787 /*
1788 * Initialize the R0 part of a VM instance.
1789 */
1790 case VMMR0_DO_VMMR0_INIT:
1791 rc = vmmR0InitVM(pGVM, RT_LODWORD(u64Arg), RT_HIDWORD(u64Arg));
1792 break;
1793
1794 /*
1795 * Does EMT specific ring-0 init.
1796 */
1797 case VMMR0_DO_VMMR0_INIT_EMT:
1798 rc = vmmR0InitVMEmt(pGVM, idCpu);
1799 break;
1800
1801 /*
1802 * Terminate the R0 part of a VM instance.
1803 */
1804 case VMMR0_DO_VMMR0_TERM:
1805 rc = VMMR0TermVM(pGVM, 0 /*idCpu*/);
1806 break;
1807
1808 /*
1809 * Update release or debug logger instances.
1810 */
1811 case VMMR0_DO_VMMR0_UPDATE_LOGGERS:
1812 if (idCpu == NIL_VMCPUID)
1813 return VERR_INVALID_CPU_ID;
1814 if (u64Arg < VMMLOGGER_IDX_MAX && pReqHdr != NULL)
1815 rc = vmmR0UpdateLoggers(pGVM, idCpu /*idCpu*/, (PVMMR0UPDATELOGGERSREQ)pReqHdr, (size_t)u64Arg);
1816 else
1817 return VERR_INVALID_PARAMETER;
1818 break;
1819
1820 /*
1821 * Log flusher thread.
1822 */
1823 case VMMR0_DO_VMMR0_LOG_FLUSHER:
1824 if (idCpu != NIL_VMCPUID)
1825 return VERR_INVALID_CPU_ID;
1826 if (pReqHdr == NULL)
1827 rc = vmmR0LogFlusher(pGVM);
1828 else
1829 return VERR_INVALID_PARAMETER;
1830 break;
1831
1832 /*
1833 * Wait for the flush to finish with all the buffers for the given logger.
1834 */
1835 case VMMR0_DO_VMMR0_LOG_WAIT_FLUSHED:
1836 if (idCpu == NIL_VMCPUID)
1837 return VERR_INVALID_CPU_ID;
1838 if (u64Arg < VMMLOGGER_IDX_MAX && pReqHdr == NULL)
1839 rc = vmmR0LogWaitFlushed(pGVM, idCpu /*idCpu*/, (size_t)u64Arg);
1840 else
1841 return VERR_INVALID_PARAMETER;
1842 break;
1843
1844 /*
1845 * Attempt to enable hm mode and check the current setting.
1846 */
1847 case VMMR0_DO_HM_ENABLE:
1848 rc = HMR0EnableAllCpus(pGVM);
1849 break;
1850
1851 /*
1852 * Setup the hardware accelerated session.
1853 */
1854 case VMMR0_DO_HM_SETUP_VM:
1855 rc = HMR0SetupVM(pGVM);
1856 break;
1857
1858 /*
1859 * PGM wrappers.
1860 */
1861 case VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES:
1862 if (idCpu == NIL_VMCPUID)
1863 return VERR_INVALID_CPU_ID;
1864 rc = PGMR0PhysAllocateHandyPages(pGVM, idCpu);
1865 break;
1866
1867 case VMMR0_DO_PGM_FLUSH_HANDY_PAGES:
1868 if (idCpu == NIL_VMCPUID)
1869 return VERR_INVALID_CPU_ID;
1870 rc = PGMR0PhysFlushHandyPages(pGVM, idCpu);
1871 break;
1872
1873 case VMMR0_DO_PGM_ALLOCATE_LARGE_PAGE:
1874 if (idCpu == NIL_VMCPUID)
1875 return VERR_INVALID_CPU_ID;
1876 rc = PGMR0PhysAllocateLargePage(pGVM, idCpu, u64Arg);
1877 break;
1878
1879 case VMMR0_DO_PGM_PHYS_SETUP_IOMMU:
1880 if (idCpu != 0)
1881 return VERR_INVALID_CPU_ID;
1882 rc = PGMR0PhysSetupIoMmu(pGVM);
1883 break;
1884
1885 case VMMR0_DO_PGM_POOL_GROW:
1886 if (idCpu == NIL_VMCPUID)
1887 return VERR_INVALID_CPU_ID;
1888 rc = PGMR0PoolGrow(pGVM, idCpu);
1889 break;
1890
1891 /*
1892 * GMM wrappers.
1893 */
1894 case VMMR0_DO_GMM_INITIAL_RESERVATION:
1895 if (u64Arg)
1896 return VERR_INVALID_PARAMETER;
1897 rc = GMMR0InitialReservationReq(pGVM, idCpu, (PGMMINITIALRESERVATIONREQ)pReqHdr);
1898 break;
1899
1900 case VMMR0_DO_GMM_UPDATE_RESERVATION:
1901 if (u64Arg)
1902 return VERR_INVALID_PARAMETER;
1903 rc = GMMR0UpdateReservationReq(pGVM, idCpu, (PGMMUPDATERESERVATIONREQ)pReqHdr);
1904 break;
1905
1906 case VMMR0_DO_GMM_ALLOCATE_PAGES:
1907 if (u64Arg)
1908 return VERR_INVALID_PARAMETER;
1909 rc = GMMR0AllocatePagesReq(pGVM, idCpu, (PGMMALLOCATEPAGESREQ)pReqHdr);
1910 break;
1911
1912 case VMMR0_DO_GMM_FREE_PAGES:
1913 if (u64Arg)
1914 return VERR_INVALID_PARAMETER;
1915 rc = GMMR0FreePagesReq(pGVM, idCpu, (PGMMFREEPAGESREQ)pReqHdr);
1916 break;
1917
1918 case VMMR0_DO_GMM_FREE_LARGE_PAGE:
1919 if (u64Arg)
1920 return VERR_INVALID_PARAMETER;
1921 rc = GMMR0FreeLargePageReq(pGVM, idCpu, (PGMMFREELARGEPAGEREQ)pReqHdr);
1922 break;
1923
1924 case VMMR0_DO_GMM_QUERY_HYPERVISOR_MEM_STATS:
1925 if (u64Arg)
1926 return VERR_INVALID_PARAMETER;
1927 rc = GMMR0QueryHypervisorMemoryStatsReq((PGMMMEMSTATSREQ)pReqHdr);
1928 break;
1929
1930 case VMMR0_DO_GMM_QUERY_MEM_STATS:
1931 if (idCpu == NIL_VMCPUID)
1932 return VERR_INVALID_CPU_ID;
1933 if (u64Arg)
1934 return VERR_INVALID_PARAMETER;
1935 rc = GMMR0QueryMemoryStatsReq(pGVM, idCpu, (PGMMMEMSTATSREQ)pReqHdr);
1936 break;
1937
1938 case VMMR0_DO_GMM_BALLOONED_PAGES:
1939 if (u64Arg)
1940 return VERR_INVALID_PARAMETER;
1941 rc = GMMR0BalloonedPagesReq(pGVM, idCpu, (PGMMBALLOONEDPAGESREQ)pReqHdr);
1942 break;
1943
1944 case VMMR0_DO_GMM_MAP_UNMAP_CHUNK:
1945 if (u64Arg)
1946 return VERR_INVALID_PARAMETER;
1947 rc = GMMR0MapUnmapChunkReq(pGVM, (PGMMMAPUNMAPCHUNKREQ)pReqHdr);
1948 break;
1949
1950 case VMMR0_DO_GMM_REGISTER_SHARED_MODULE:
1951 if (idCpu == NIL_VMCPUID)
1952 return VERR_INVALID_CPU_ID;
1953 if (u64Arg)
1954 return VERR_INVALID_PARAMETER;
1955 rc = GMMR0RegisterSharedModuleReq(pGVM, idCpu, (PGMMREGISTERSHAREDMODULEREQ)pReqHdr);
1956 break;
1957
1958 case VMMR0_DO_GMM_UNREGISTER_SHARED_MODULE:
1959 if (idCpu == NIL_VMCPUID)
1960 return VERR_INVALID_CPU_ID;
1961 if (u64Arg)
1962 return VERR_INVALID_PARAMETER;
1963 rc = GMMR0UnregisterSharedModuleReq(pGVM, idCpu, (PGMMUNREGISTERSHAREDMODULEREQ)pReqHdr);
1964 break;
1965
1966 case VMMR0_DO_GMM_RESET_SHARED_MODULES:
1967 if (idCpu == NIL_VMCPUID)
1968 return VERR_INVALID_CPU_ID;
1969 if ( u64Arg
1970 || pReqHdr)
1971 return VERR_INVALID_PARAMETER;
1972 rc = GMMR0ResetSharedModules(pGVM, idCpu);
1973 break;
1974
1975#ifdef VBOX_WITH_PAGE_SHARING
1976 case VMMR0_DO_GMM_CHECK_SHARED_MODULES:
1977 {
1978 if (idCpu == NIL_VMCPUID)
1979 return VERR_INVALID_CPU_ID;
1980 if ( u64Arg
1981 || pReqHdr)
1982 return VERR_INVALID_PARAMETER;
1983 rc = GMMR0CheckSharedModules(pGVM, idCpu);
1984 break;
1985 }
1986#endif
1987
1988#if defined(VBOX_STRICT) && HC_ARCH_BITS == 64
1989 case VMMR0_DO_GMM_FIND_DUPLICATE_PAGE:
1990 if (u64Arg)
1991 return VERR_INVALID_PARAMETER;
1992 rc = GMMR0FindDuplicatePageReq(pGVM, (PGMMFINDDUPLICATEPAGEREQ)pReqHdr);
1993 break;
1994#endif
1995
1996 case VMMR0_DO_GMM_QUERY_STATISTICS:
1997 if (u64Arg)
1998 return VERR_INVALID_PARAMETER;
1999 rc = GMMR0QueryStatisticsReq(pGVM, (PGMMQUERYSTATISTICSSREQ)pReqHdr);
2000 break;
2001
2002 case VMMR0_DO_GMM_RESET_STATISTICS:
2003 if (u64Arg)
2004 return VERR_INVALID_PARAMETER;
2005 rc = GMMR0ResetStatisticsReq(pGVM, (PGMMRESETSTATISTICSSREQ)pReqHdr);
2006 break;
2007
2008 /*
2009 * A quick GCFGM mock-up.
2010 */
2011 /** @todo GCFGM with proper access control, ring-3 management interface and all that. */
2012 case VMMR0_DO_GCFGM_SET_VALUE:
2013 case VMMR0_DO_GCFGM_QUERY_VALUE:
2014 {
2015 if (pGVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
2016 return VERR_INVALID_PARAMETER;
2017 PGCFGMVALUEREQ pReq = (PGCFGMVALUEREQ)pReqHdr;
2018 if (pReq->Hdr.cbReq != sizeof(*pReq))
2019 return VERR_INVALID_PARAMETER;
2020 if (enmOperation == VMMR0_DO_GCFGM_SET_VALUE)
2021 {
2022 rc = GVMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
2023 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
2024 // rc = GMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
2025 }
2026 else
2027 {
2028 rc = GVMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
2029 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
2030 // rc = GMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
2031 }
2032 break;
2033 }
2034
2035 /*
2036 * PDM Wrappers.
2037 */
2038 case VMMR0_DO_PDM_DRIVER_CALL_REQ_HANDLER:
2039 {
2040 if (!pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
2041 return VERR_INVALID_PARAMETER;
2042 rc = PDMR0DriverCallReqHandler(pGVM, (PPDMDRIVERCALLREQHANDLERREQ)pReqHdr);
2043 break;
2044 }
2045
2046 case VMMR0_DO_PDM_DEVICE_CREATE:
2047 {
2048 if (!pReqHdr || u64Arg || idCpu != 0)
2049 return VERR_INVALID_PARAMETER;
2050 rc = PDMR0DeviceCreateReqHandler(pGVM, (PPDMDEVICECREATEREQ)pReqHdr);
2051 break;
2052 }
2053
2054 case VMMR0_DO_PDM_DEVICE_GEN_CALL:
2055 {
2056 if (!pReqHdr || u64Arg)
2057 return VERR_INVALID_PARAMETER;
2058 rc = PDMR0DeviceGenCallReqHandler(pGVM, (PPDMDEVICEGENCALLREQ)pReqHdr, idCpu);
2059 break;
2060 }
2061
2062 /** @todo Remove the once all devices has been converted to new style! @bugref{9218} */
2063 case VMMR0_DO_PDM_DEVICE_COMPAT_SET_CRITSECT:
2064 {
2065 if (!pReqHdr || u64Arg || idCpu != 0)
2066 return VERR_INVALID_PARAMETER;
2067 rc = PDMR0DeviceCompatSetCritSectReqHandler(pGVM, (PPDMDEVICECOMPATSETCRITSECTREQ)pReqHdr);
2068 break;
2069 }
2070
2071 /*
2072 * Requests to the internal networking service.
2073 */
2074 case VMMR0_DO_INTNET_OPEN:
2075 {
2076 PINTNETOPENREQ pReq = (PINTNETOPENREQ)pReqHdr;
2077 if (u64Arg || !pReq || !vmmR0IsValidSession(pGVM, pReq->pSession, pSession) || idCpu != NIL_VMCPUID)
2078 return VERR_INVALID_PARAMETER;
2079 rc = IntNetR0OpenReq(pSession, pReq);
2080 break;
2081 }
2082
2083 case VMMR0_DO_INTNET_IF_CLOSE:
2084 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFCLOSEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2085 return VERR_INVALID_PARAMETER;
2086 rc = IntNetR0IfCloseReq(pSession, (PINTNETIFCLOSEREQ)pReqHdr);
2087 break;
2088
2089
2090 case VMMR0_DO_INTNET_IF_GET_BUFFER_PTRS:
2091 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFGETBUFFERPTRSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2092 return VERR_INVALID_PARAMETER;
2093 rc = IntNetR0IfGetBufferPtrsReq(pSession, (PINTNETIFGETBUFFERPTRSREQ)pReqHdr);
2094 break;
2095
2096 case VMMR0_DO_INTNET_IF_SET_PROMISCUOUS_MODE:
2097 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2098 return VERR_INVALID_PARAMETER;
2099 rc = IntNetR0IfSetPromiscuousModeReq(pSession, (PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr);
2100 break;
2101
2102 case VMMR0_DO_INTNET_IF_SET_MAC_ADDRESS:
2103 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFSETMACADDRESSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2104 return VERR_INVALID_PARAMETER;
2105 rc = IntNetR0IfSetMacAddressReq(pSession, (PINTNETIFSETMACADDRESSREQ)pReqHdr);
2106 break;
2107
2108 case VMMR0_DO_INTNET_IF_SET_ACTIVE:
2109 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFSETACTIVEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2110 return VERR_INVALID_PARAMETER;
2111 rc = IntNetR0IfSetActiveReq(pSession, (PINTNETIFSETACTIVEREQ)pReqHdr);
2112 break;
2113
2114 case VMMR0_DO_INTNET_IF_SEND:
2115 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2116 return VERR_INVALID_PARAMETER;
2117 rc = IntNetR0IfSendReq(pSession, (PINTNETIFSENDREQ)pReqHdr);
2118 break;
2119
2120 case VMMR0_DO_INTNET_IF_WAIT:
2121 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2122 return VERR_INVALID_PARAMETER;
2123 rc = IntNetR0IfWaitReq(pSession, (PINTNETIFWAITREQ)pReqHdr);
2124 break;
2125
2126 case VMMR0_DO_INTNET_IF_ABORT_WAIT:
2127 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2128 return VERR_INVALID_PARAMETER;
2129 rc = IntNetR0IfAbortWaitReq(pSession, (PINTNETIFABORTWAITREQ)pReqHdr);
2130 break;
2131
2132#if 0 //def VBOX_WITH_PCI_PASSTHROUGH
2133 /*
2134 * Requests to host PCI driver service.
2135 */
2136 case VMMR0_DO_PCIRAW_REQ:
2137 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PPCIRAWSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2138 return VERR_INVALID_PARAMETER;
2139 rc = PciRawR0ProcessReq(pGVM, pSession, (PPCIRAWSENDREQ)pReqHdr);
2140 break;
2141#endif
2142
2143 /*
2144 * NEM requests.
2145 */
2146#ifdef VBOX_WITH_NEM_R0
2147# if defined(RT_ARCH_AMD64) && defined(RT_OS_WINDOWS)
2148 case VMMR0_DO_NEM_INIT_VM:
2149 if (u64Arg || pReqHdr || idCpu != 0)
2150 return VERR_INVALID_PARAMETER;
2151 rc = NEMR0InitVM(pGVM);
2152 break;
2153
2154 case VMMR0_DO_NEM_INIT_VM_PART_2:
2155 if (u64Arg || pReqHdr || idCpu != 0)
2156 return VERR_INVALID_PARAMETER;
2157 rc = NEMR0InitVMPart2(pGVM);
2158 break;
2159
2160 case VMMR0_DO_NEM_MAP_PAGES:
2161 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2162 return VERR_INVALID_PARAMETER;
2163 rc = NEMR0MapPages(pGVM, idCpu);
2164 break;
2165
2166 case VMMR0_DO_NEM_UNMAP_PAGES:
2167 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2168 return VERR_INVALID_PARAMETER;
2169 rc = NEMR0UnmapPages(pGVM, idCpu);
2170 break;
2171
2172 case VMMR0_DO_NEM_EXPORT_STATE:
2173 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2174 return VERR_INVALID_PARAMETER;
2175 rc = NEMR0ExportState(pGVM, idCpu);
2176 break;
2177
2178 case VMMR0_DO_NEM_IMPORT_STATE:
2179 if (pReqHdr || idCpu == NIL_VMCPUID)
2180 return VERR_INVALID_PARAMETER;
2181 rc = NEMR0ImportState(pGVM, idCpu, u64Arg);
2182 break;
2183
2184 case VMMR0_DO_NEM_QUERY_CPU_TICK:
2185 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2186 return VERR_INVALID_PARAMETER;
2187 rc = NEMR0QueryCpuTick(pGVM, idCpu);
2188 break;
2189
2190 case VMMR0_DO_NEM_RESUME_CPU_TICK_ON_ALL:
2191 if (pReqHdr || idCpu == NIL_VMCPUID)
2192 return VERR_INVALID_PARAMETER;
2193 rc = NEMR0ResumeCpuTickOnAll(pGVM, idCpu, u64Arg);
2194 break;
2195
2196 case VMMR0_DO_NEM_UPDATE_STATISTICS:
2197 if (u64Arg || pReqHdr)
2198 return VERR_INVALID_PARAMETER;
2199 rc = NEMR0UpdateStatistics(pGVM, idCpu);
2200 break;
2201
2202# if 1 && defined(DEBUG_bird)
2203 case VMMR0_DO_NEM_EXPERIMENT:
2204 if (pReqHdr)
2205 return VERR_INVALID_PARAMETER;
2206 rc = NEMR0DoExperiment(pGVM, idCpu, u64Arg);
2207 break;
2208# endif
2209# endif
2210#endif
2211
2212 /*
2213 * IOM requests.
2214 */
2215 case VMMR0_DO_IOM_GROW_IO_PORTS:
2216 {
2217 if (pReqHdr || idCpu != 0)
2218 return VERR_INVALID_PARAMETER;
2219 rc = IOMR0IoPortGrowRegistrationTables(pGVM, u64Arg);
2220 break;
2221 }
2222
2223 case VMMR0_DO_IOM_GROW_IO_PORT_STATS:
2224 {
2225 if (pReqHdr || idCpu != 0)
2226 return VERR_INVALID_PARAMETER;
2227 rc = IOMR0IoPortGrowStatisticsTable(pGVM, u64Arg);
2228 break;
2229 }
2230
2231 case VMMR0_DO_IOM_GROW_MMIO_REGS:
2232 {
2233 if (pReqHdr || idCpu != 0)
2234 return VERR_INVALID_PARAMETER;
2235 rc = IOMR0MmioGrowRegistrationTables(pGVM, u64Arg);
2236 break;
2237 }
2238
2239 case VMMR0_DO_IOM_GROW_MMIO_STATS:
2240 {
2241 if (pReqHdr || idCpu != 0)
2242 return VERR_INVALID_PARAMETER;
2243 rc = IOMR0MmioGrowStatisticsTable(pGVM, u64Arg);
2244 break;
2245 }
2246
2247 case VMMR0_DO_IOM_SYNC_STATS_INDICES:
2248 {
2249 if (pReqHdr || idCpu != 0)
2250 return VERR_INVALID_PARAMETER;
2251 rc = IOMR0IoPortSyncStatisticsIndices(pGVM);
2252 if (RT_SUCCESS(rc))
2253 rc = IOMR0MmioSyncStatisticsIndices(pGVM);
2254 break;
2255 }
2256
2257 /*
2258 * DBGF requests.
2259 */
2260#ifdef VBOX_WITH_DBGF_TRACING
2261 case VMMR0_DO_DBGF_TRACER_CREATE:
2262 {
2263 if (!pReqHdr || u64Arg || idCpu != 0)
2264 return VERR_INVALID_PARAMETER;
2265 rc = DBGFR0TracerCreateReqHandler(pGVM, (PDBGFTRACERCREATEREQ)pReqHdr);
2266 break;
2267 }
2268
2269 case VMMR0_DO_DBGF_TRACER_CALL_REQ_HANDLER:
2270 {
2271 if (!pReqHdr || u64Arg)
2272 return VERR_INVALID_PARAMETER;
2273# if 0 /** @todo */
2274 rc = DBGFR0TracerGenCallReqHandler(pGVM, (PDBGFTRACERGENCALLREQ)pReqHdr, idCpu);
2275# else
2276 rc = VERR_NOT_IMPLEMENTED;
2277# endif
2278 break;
2279 }
2280#endif
2281
2282 case VMMR0_DO_DBGF_BP_INIT:
2283 {
2284 if (!pReqHdr || u64Arg || idCpu != 0)
2285 return VERR_INVALID_PARAMETER;
2286 rc = DBGFR0BpInitReqHandler(pGVM, (PDBGFBPINITREQ)pReqHdr);
2287 break;
2288 }
2289
2290 case VMMR0_DO_DBGF_BP_CHUNK_ALLOC:
2291 {
2292 if (!pReqHdr || u64Arg || idCpu != 0)
2293 return VERR_INVALID_PARAMETER;
2294 rc = DBGFR0BpChunkAllocReqHandler(pGVM, (PDBGFBPCHUNKALLOCREQ)pReqHdr);
2295 break;
2296 }
2297
2298 case VMMR0_DO_DBGF_BP_L2_TBL_CHUNK_ALLOC:
2299 {
2300 if (!pReqHdr || u64Arg || idCpu != 0)
2301 return VERR_INVALID_PARAMETER;
2302 rc = DBGFR0BpL2TblChunkAllocReqHandler(pGVM, (PDBGFBPL2TBLCHUNKALLOCREQ)pReqHdr);
2303 break;
2304 }
2305
2306 case VMMR0_DO_DBGF_BP_OWNER_INIT:
2307 {
2308 if (!pReqHdr || u64Arg || idCpu != 0)
2309 return VERR_INVALID_PARAMETER;
2310 rc = DBGFR0BpOwnerInitReqHandler(pGVM, (PDBGFBPOWNERINITREQ)pReqHdr);
2311 break;
2312 }
2313
2314 case VMMR0_DO_DBGF_BP_PORTIO_INIT:
2315 {
2316 if (!pReqHdr || u64Arg || idCpu != 0)
2317 return VERR_INVALID_PARAMETER;
2318 rc = DBGFR0BpPortIoInitReqHandler(pGVM, (PDBGFBPINITREQ)pReqHdr);
2319 break;
2320 }
2321
2322
2323 /*
2324 * TM requests.
2325 */
2326 case VMMR0_DO_TM_GROW_TIMER_QUEUE:
2327 {
2328 if (pReqHdr || idCpu == NIL_VMCPUID)
2329 return VERR_INVALID_PARAMETER;
2330 rc = TMR0TimerQueueGrow(pGVM, RT_HI_U32(u64Arg), RT_LO_U32(u64Arg));
2331 break;
2332 }
2333
2334 /*
2335 * For profiling.
2336 */
2337 case VMMR0_DO_NOP:
2338 case VMMR0_DO_SLOW_NOP:
2339 return VINF_SUCCESS;
2340
2341 /*
2342 * For testing Ring-0 APIs invoked in this environment.
2343 */
2344 case VMMR0_DO_TESTS:
2345 /** @todo make new test */
2346 return VINF_SUCCESS;
2347
2348 default:
2349 /*
2350 * We're returning VERR_NOT_SUPPORT here so we've got something else
2351 * than -1 which the interrupt gate glue code might return.
2352 */
2353 Log(("operation %#x is not supported\n", enmOperation));
2354 return VERR_NOT_SUPPORTED;
2355 }
2356 return rc;
2357}
2358
2359#ifndef VMM_R0_SWITCH_STACK /* Not safe unless we disable preemption first. */
2360/**
2361 * This is just a longjmp wrapper function for VMMR0EntryEx calls.
2362 *
2363 * @returns VBox status code.
2364 * @param pvArgs The argument package
2365 */
2366static DECLCALLBACK(int) vmmR0EntryExWrapper(void *pvArgs)
2367{
2368 PGVMCPU pGVCpu = (PGVMCPU)pvArgs;
2369 return vmmR0EntryExWorker(pGVCpu->vmmr0.s.pGVM,
2370 pGVCpu->vmmr0.s.idCpu,
2371 pGVCpu->vmmr0.s.enmOperation,
2372 pGVCpu->vmmr0.s.pReq,
2373 pGVCpu->vmmr0.s.u64Arg,
2374 pGVCpu->vmmr0.s.pSession);
2375}
2376#endif
2377
2378
2379/**
2380 * The Ring 0 entry point, called by the support library (SUP).
2381 *
2382 * @returns VBox status code.
2383 * @param pGVM The global (ring-0) VM structure.
2384 * @param pVM The cross context VM structure.
2385 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
2386 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
2387 * @param enmOperation Which operation to execute.
2388 * @param pReq Pointer to the SUPVMMR0REQHDR packet. Optional.
2389 * @param u64Arg Some simple constant argument.
2390 * @param pSession The session of the caller.
2391 * @remarks Assume called with interrupts _enabled_.
2392 */
2393VMMR0DECL(int) VMMR0EntryEx(PGVM pGVM, PVMCC pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation,
2394 PSUPVMMR0REQHDR pReq, uint64_t u64Arg, PSUPDRVSESSION pSession)
2395{
2396#ifndef VMM_R0_SWITCH_STACK /* Not safe unless we disable preemption first. */
2397 /*
2398 * Requests that should only happen on the EMT thread will be
2399 * wrapped in a setjmp so we can assert without causing trouble.
2400 */
2401 if ( pVM != NULL
2402 && pGVM != NULL
2403 && pVM == pGVM /** @todo drop pVM or pGVM */
2404 && idCpu < pGVM->cCpus
2405 && pGVM->pSession == pSession
2406 && pGVM->pSelf == pVM)
2407 {
2408 switch (enmOperation)
2409 {
2410 /* These might/will be called before VMMR3Init. */
2411 case VMMR0_DO_GMM_INITIAL_RESERVATION:
2412 case VMMR0_DO_GMM_UPDATE_RESERVATION:
2413 case VMMR0_DO_GMM_ALLOCATE_PAGES:
2414 case VMMR0_DO_GMM_FREE_PAGES:
2415 case VMMR0_DO_GMM_BALLOONED_PAGES:
2416 /* On the mac we might not have a valid jmp buf, so check these as well. */
2417 case VMMR0_DO_VMMR0_INIT:
2418 case VMMR0_DO_VMMR0_TERM:
2419
2420 case VMMR0_DO_PDM_DEVICE_CREATE:
2421 case VMMR0_DO_PDM_DEVICE_GEN_CALL:
2422 case VMMR0_DO_IOM_GROW_IO_PORTS:
2423 case VMMR0_DO_IOM_GROW_IO_PORT_STATS:
2424 case VMMR0_DO_DBGF_BP_INIT:
2425 case VMMR0_DO_DBGF_BP_CHUNK_ALLOC:
2426 case VMMR0_DO_DBGF_BP_L2_TBL_CHUNK_ALLOC:
2427 {
2428 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2429 RTNATIVETHREAD hNativeThread = RTThreadNativeSelf();
2430 if (RT_LIKELY( pGVCpu->hEMT == hNativeThread
2431 && pGVCpu->hNativeThreadR0 == hNativeThread))
2432 {
2433 if (!pGVCpu->vmm.s.CallRing3JmpBufR0.pvSavedStack)
2434 break;
2435
2436 pGVCpu->vmmr0.s.pGVM = pGVM;
2437 pGVCpu->vmmr0.s.idCpu = idCpu;
2438 pGVCpu->vmmr0.s.enmOperation = enmOperation;
2439 pGVCpu->vmmr0.s.pReq = pReq;
2440 pGVCpu->vmmr0.s.u64Arg = u64Arg;
2441 pGVCpu->vmmr0.s.pSession = pSession;
2442 return vmmR0CallRing3SetJmpEx(&pGVCpu->vmm.s.CallRing3JmpBufR0, vmmR0EntryExWrapper, pGVCpu,
2443 ((uintptr_t)u64Arg << 16) | (uintptr_t)enmOperation);
2444 }
2445 return VERR_VM_THREAD_NOT_EMT;
2446 }
2447
2448 default:
2449 case VMMR0_DO_PGM_POOL_GROW:
2450 break;
2451 }
2452 }
2453#else
2454 RT_NOREF(pVM);
2455#endif
2456 return vmmR0EntryExWorker(pGVM, idCpu, enmOperation, pReq, u64Arg, pSession);
2457}
2458
2459
2460/*********************************************************************************************************************************
2461* EMT Blocking *
2462*********************************************************************************************************************************/
2463
2464/**
2465 * Checks whether we've armed the ring-0 long jump machinery.
2466 *
2467 * @returns @c true / @c false
2468 * @param pVCpu The cross context virtual CPU structure.
2469 * @thread EMT
2470 * @sa VMMIsLongJumpArmed
2471 */
2472VMMR0_INT_DECL(bool) VMMR0IsLongJumpArmed(PVMCPUCC pVCpu)
2473{
2474#ifdef RT_ARCH_X86
2475 return pVCpu->vmm.s.CallRing3JmpBufR0.eip
2476 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2477#else
2478 return pVCpu->vmm.s.CallRing3JmpBufR0.rip
2479 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2480#endif
2481}
2482
2483
2484/**
2485 * Checks whether we've done a ring-3 long jump.
2486 *
2487 * @returns @c true / @c false
2488 * @param pVCpu The cross context virtual CPU structure.
2489 * @thread EMT
2490 */
2491VMMR0_INT_DECL(bool) VMMR0IsInRing3LongJump(PVMCPUCC pVCpu)
2492{
2493 return pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2494}
2495
2496
2497/**
2498 * Locking helper that deals with HM context and checks if the thread can block.
2499 *
2500 * @returns VINF_SUCCESS if we can block. Returns @a rcBusy or
2501 * VERR_VMM_CANNOT_BLOCK if not able to block.
2502 * @param pVCpu The cross context virtual CPU structure of the calling
2503 * thread.
2504 * @param rcBusy What to return in case of a blocking problem. Will IPE
2505 * if VINF_SUCCESS and we cannot block.
2506 * @param pszCaller The caller (for logging problems).
2507 * @param pvLock The lock address (for logging problems).
2508 * @param pCtx Where to return context info for the resume call.
2509 * @thread EMT(pVCpu)
2510 */
2511VMMR0_INT_DECL(int) VMMR0EmtPrepareToBlock(PVMCPUCC pVCpu, int rcBusy, const char *pszCaller, void *pvLock,
2512 PVMMR0EMTBLOCKCTX pCtx)
2513{
2514 const char *pszMsg;
2515
2516 /*
2517 * Check that we are allowed to block.
2518 */
2519 if (RT_LIKELY(VMMRZCallRing3IsEnabled(pVCpu)))
2520 {
2521 /*
2522 * Are we in HM context and w/o a context hook? If so work the context hook.
2523 */
2524 if (pVCpu->idHostCpu != NIL_RTCPUID)
2525 {
2526 Assert(pVCpu->iHostCpuSet != UINT32_MAX);
2527
2528 if (pVCpu->vmmr0.s.hCtxHook == NIL_RTTHREADCTXHOOK)
2529 {
2530 vmmR0ThreadCtxCallback(RTTHREADCTXEVENT_OUT, pVCpu);
2531 if (pVCpu->vmmr0.s.pPreemptState)
2532 RTThreadPreemptRestore(pVCpu->vmmr0.s.pPreemptState);
2533
2534 pCtx->uMagic = VMMR0EMTBLOCKCTX_MAGIC;
2535 pCtx->fWasInHmContext = true;
2536 return VINF_SUCCESS;
2537 }
2538 }
2539
2540 if (RT_LIKELY(!pVCpu->vmmr0.s.pPreemptState))
2541 {
2542 /*
2543 * Not in HM context or we've got hooks, so just check that preemption
2544 * is enabled.
2545 */
2546 if (RT_LIKELY(RTThreadPreemptIsEnabled(NIL_RTTHREAD)))
2547 {
2548 pCtx->uMagic = VMMR0EMTBLOCKCTX_MAGIC;
2549 pCtx->fWasInHmContext = false;
2550 return VINF_SUCCESS;
2551 }
2552 pszMsg = "Preemption is disabled!";
2553 }
2554 else
2555 pszMsg = "Preemption state w/o HM state!";
2556 }
2557 else
2558 pszMsg = "Ring-3 calls are disabled!";
2559
2560 static uint32_t volatile s_cWarnings = 0;
2561 if (++s_cWarnings < 50)
2562 SUPR0Printf("VMMR0EmtPrepareToBlock: %s pvLock=%p pszCaller=%s rcBusy=%p\n", pszMsg, pvLock, pszCaller, rcBusy);
2563 pCtx->uMagic = VMMR0EMTBLOCKCTX_MAGIC_DEAD;
2564 pCtx->fWasInHmContext = false;
2565 return rcBusy != VINF_SUCCESS ? rcBusy : VERR_VMM_CANNOT_BLOCK;
2566}
2567
2568
2569/**
2570 * Counterpart to VMMR0EmtPrepareToBlock.
2571 *
2572 * @param pVCpu The cross context virtual CPU structure of the calling
2573 * thread.
2574 * @param pCtx The context structure used with VMMR0EmtPrepareToBlock.
2575 * @thread EMT(pVCpu)
2576 */
2577VMMR0_INT_DECL(void) VMMR0EmtResumeAfterBlocking(PVMCPUCC pVCpu, PVMMR0EMTBLOCKCTX pCtx)
2578{
2579 AssertReturnVoid(pCtx->uMagic == VMMR0EMTBLOCKCTX_MAGIC);
2580 if (pCtx->fWasInHmContext)
2581 {
2582 if (pVCpu->vmmr0.s.pPreemptState)
2583 RTThreadPreemptDisable(pVCpu->vmmr0.s.pPreemptState);
2584
2585 pCtx->fWasInHmContext = false;
2586 vmmR0ThreadCtxCallback(RTTHREADCTXEVENT_IN, pVCpu);
2587 }
2588 pCtx->uMagic = VMMR0EMTBLOCKCTX_MAGIC_DEAD;
2589}
2590
2591
2592/**
2593 * Helper for waiting on an RTSEMEVENT, caller did VMMR0EmtPrepareToBlock.
2594 *
2595 * @returns
2596 * @retval VERR_THREAD_IS_TERMINATING
2597 * @retval VERR_TIMEOUT if we ended up waiting too long, either according to
2598 * @a cMsTimeout or to maximum wait values.
2599 *
2600 * @param pGVCpu The ring-0 virtual CPU structure.
2601 * @param fFlags VMMR0EMTWAIT_F_XXX.
2602 * @param hEvent The event to wait on.
2603 * @param cMsTimeout The timeout or RT_INDEFINITE_WAIT.
2604 */
2605VMMR0_INT_DECL(int) VMMR0EmtWaitEventInner(PGVMCPU pGVCpu, uint32_t fFlags, RTSEMEVENT hEvent, RTMSINTERVAL cMsTimeout)
2606{
2607 AssertReturn(pGVCpu->hEMT == RTThreadNativeSelf(), VERR_VM_THREAD_NOT_EMT);
2608
2609 /*
2610 * Note! Similar code is found in the PDM critical sections too.
2611 */
2612 uint64_t const nsStart = RTTimeNanoTS();
2613 uint64_t cNsMaxTotal = cMsTimeout == RT_INDEFINITE_WAIT
2614 ? RT_NS_5MIN : RT_MIN(RT_NS_5MIN, RT_NS_1MS_64 * cMsTimeout);
2615 uint32_t cMsMaxOne = RT_MS_5SEC;
2616 bool fNonInterruptible = false;
2617 for (;;)
2618 {
2619 /* Wait. */
2620 int rcWait = !fNonInterruptible
2621 ? RTSemEventWaitNoResume(hEvent, cMsMaxOne)
2622 : RTSemEventWait(hEvent, cMsMaxOne);
2623 if (RT_SUCCESS(rcWait))
2624 return rcWait;
2625
2626 if (rcWait == VERR_TIMEOUT || rcWait == VERR_INTERRUPTED)
2627 {
2628 uint64_t const cNsElapsed = RTTimeNanoTS() - nsStart;
2629
2630 /*
2631 * Check the thread termination status.
2632 */
2633 int const rcTerm = RTThreadQueryTerminationStatus(NIL_RTTHREAD);
2634 AssertMsg(rcTerm == VINF_SUCCESS || rcTerm == VERR_NOT_SUPPORTED || rcTerm == VINF_THREAD_IS_TERMINATING,
2635 ("rcTerm=%Rrc\n", rcTerm));
2636 if ( rcTerm == VERR_NOT_SUPPORTED
2637 && !fNonInterruptible
2638 && cNsMaxTotal > RT_NS_1MIN)
2639 cNsMaxTotal = RT_NS_1MIN;
2640
2641 /* We return immediately if it looks like the thread is terminating. */
2642 if (rcTerm == VINF_THREAD_IS_TERMINATING)
2643 return VERR_THREAD_IS_TERMINATING;
2644
2645 /* We may suppress VERR_INTERRUPTED if VMMR0EMTWAIT_F_TRY_SUPPRESS_INTERRUPTED was
2646 specified, otherwise we'll just return it. */
2647 if (rcWait == VERR_INTERRUPTED)
2648 {
2649 if (!(fFlags & VMMR0EMTWAIT_F_TRY_SUPPRESS_INTERRUPTED))
2650 return VERR_INTERRUPTED;
2651 if (!fNonInterruptible)
2652 {
2653 /* First time: Adjust down the wait parameters and make sure we get at least
2654 one non-interruptible wait before timing out. */
2655 fNonInterruptible = true;
2656 cMsMaxOne = 32;
2657 uint64_t const cNsLeft = cNsMaxTotal - cNsElapsed;
2658 if (cNsLeft > RT_NS_10SEC)
2659 cNsMaxTotal = cNsElapsed + RT_NS_10SEC;
2660 continue;
2661 }
2662 }
2663
2664 /* Check for timeout. */
2665 if (cNsElapsed > cNsMaxTotal)
2666 return VERR_TIMEOUT;
2667 }
2668 else
2669 return rcWait;
2670 }
2671 /* not reached */
2672}
2673
2674
2675/**
2676 * Helper for signalling an SUPSEMEVENT.
2677 *
2678 * This may temporarily leave the HM context if the host requires that for
2679 * signalling SUPSEMEVENT objects.
2680 *
2681 * @returns VBox status code (see VMMR0EmtPrepareToBlock)
2682 * @param pGVM The ring-0 VM structure.
2683 * @param pGVCpu The ring-0 virtual CPU structure.
2684 * @param hEvent The event to signal.
2685 */
2686VMMR0_INT_DECL(int) VMMR0EmtSignalSupEvent(PGVM pGVM, PGVMCPU pGVCpu, SUPSEMEVENT hEvent)
2687{
2688 AssertReturn(pGVCpu->hEMT == RTThreadNativeSelf(), VERR_VM_THREAD_NOT_EMT);
2689 if (RTSemEventIsSignalSafe())
2690 return SUPSemEventSignal(pGVM->pSession, hEvent);
2691
2692 VMMR0EMTBLOCKCTX Ctx;
2693 int rc = VMMR0EmtPrepareToBlock(pGVCpu, VINF_SUCCESS, __FUNCTION__, (void *)(uintptr_t)hEvent, &Ctx);
2694 if (RT_SUCCESS(rc))
2695 {
2696 rc = SUPSemEventSignal(pGVM->pSession, hEvent);
2697 VMMR0EmtResumeAfterBlocking(pGVCpu, &Ctx);
2698 }
2699 return rc;
2700}
2701
2702
2703/**
2704 * Helper for signalling an SUPSEMEVENT, variant supporting non-EMTs.
2705 *
2706 * This may temporarily leave the HM context if the host requires that for
2707 * signalling SUPSEMEVENT objects.
2708 *
2709 * @returns VBox status code (see VMMR0EmtPrepareToBlock)
2710 * @param pGVM The ring-0 VM structure.
2711 * @param hEvent The event to signal.
2712 */
2713VMMR0_INT_DECL(int) VMMR0EmtSignalSupEventByGVM(PGVM pGVM, SUPSEMEVENT hEvent)
2714{
2715 if (!RTSemEventIsSignalSafe())
2716 {
2717 PGVMCPU pGVCpu = GVMMR0GetGVCpuByGVMandEMT(pGVM, NIL_RTNATIVETHREAD);
2718 if (pGVCpu)
2719 {
2720 VMMR0EMTBLOCKCTX Ctx;
2721 int rc = VMMR0EmtPrepareToBlock(pGVCpu, VINF_SUCCESS, __FUNCTION__, (void *)(uintptr_t)hEvent, &Ctx);
2722 if (RT_SUCCESS(rc))
2723 {
2724 rc = SUPSemEventSignal(pGVM->pSession, hEvent);
2725 VMMR0EmtResumeAfterBlocking(pGVCpu, &Ctx);
2726 }
2727 return rc;
2728 }
2729 }
2730 return SUPSemEventSignal(pGVM->pSession, hEvent);
2731}
2732
2733
2734/*********************************************************************************************************************************
2735* Logging. *
2736*********************************************************************************************************************************/
2737
2738/**
2739 * VMMR0_DO_VMMR0_UPDATE_LOGGERS: Updates the EMT loggers for the VM.
2740 *
2741 * @returns VBox status code.
2742 * @param pGVM The global (ring-0) VM structure.
2743 * @param idCpu The ID of the calling EMT.
2744 * @param pReq The request data.
2745 * @param idxLogger Which logger set to update.
2746 * @thread EMT(idCpu)
2747 */
2748static int vmmR0UpdateLoggers(PGVM pGVM, VMCPUID idCpu, PVMMR0UPDATELOGGERSREQ pReq, size_t idxLogger)
2749{
2750 /*
2751 * Check sanity. First we require EMT to be calling us.
2752 */
2753 AssertReturn(idCpu < pGVM->cCpus, VERR_INVALID_CPU_ID);
2754 AssertReturn(pGVM->aCpus[idCpu].hEMT == RTThreadNativeSelf(), VERR_INVALID_CPU_ID);
2755
2756 AssertReturn(pReq->Hdr.cbReq >= RT_UOFFSETOF_DYN(VMMR0UPDATELOGGERSREQ, afGroups[0]), VERR_INVALID_PARAMETER);
2757 AssertReturn(pReq->cGroups < _8K, VERR_INVALID_PARAMETER);
2758 AssertReturn(pReq->Hdr.cbReq == RT_UOFFSETOF_DYN(VMMR0UPDATELOGGERSREQ, afGroups[pReq->cGroups]), VERR_INVALID_PARAMETER);
2759
2760 AssertReturn(idxLogger < VMMLOGGER_IDX_MAX, VERR_OUT_OF_RANGE);
2761
2762 /*
2763 * Adjust flags.
2764 */
2765 /* Always buffered: */
2766 pReq->fFlags |= RTLOGFLAGS_BUFFERED;
2767 /* These doesn't make sense at present: */
2768 pReq->fFlags &= ~(RTLOGFLAGS_FLUSH | RTLOGFLAGS_WRITE_THROUGH);
2769 /* We've traditionally skipped the group restrictions. */
2770 pReq->fFlags &= ~RTLOGFLAGS_RESTRICT_GROUPS;
2771
2772 /*
2773 * Do the updating.
2774 */
2775 int rc = VINF_SUCCESS;
2776 for (idCpu = 0; idCpu < pGVM->cCpus; idCpu++)
2777 {
2778 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2779 PRTLOGGER pLogger = pGVCpu->vmmr0.s.u.aLoggers[idxLogger].pLogger;
2780 if (pLogger)
2781 {
2782 RTLogSetR0ProgramStart(pLogger, pGVM->vmm.s.nsProgramStart);
2783 rc = RTLogBulkUpdate(pLogger, pReq->fFlags, pReq->uGroupCrc32, pReq->cGroups, pReq->afGroups);
2784 }
2785 }
2786
2787 return rc;
2788}
2789
2790
2791/**
2792 * VMMR0_DO_VMMR0_LOG_FLUSHER: Get the next log flushing job.
2793 *
2794 * The job info is copied into VMM::LogFlusherItem.
2795 *
2796 * @returns VBox status code.
2797 * @retval VERR_OBJECT_DESTROYED if we're shutting down.
2798 * @retval VERR_NOT_OWNER if the calling thread is not the flusher thread.
2799 * @param pGVM The global (ring-0) VM structure.
2800 * @thread The log flusher thread (first caller automatically becomes the log
2801 * flusher).
2802 */
2803static int vmmR0LogFlusher(PGVM pGVM)
2804{
2805 /*
2806 * Check that this really is the flusher thread.
2807 */
2808 RTNATIVETHREAD const hNativeSelf = RTThreadNativeSelf();
2809 AssertReturn(hNativeSelf != NIL_RTNATIVETHREAD, VERR_INTERNAL_ERROR_3);
2810 if (RT_LIKELY(pGVM->vmmr0.s.LogFlusher.hThread == hNativeSelf))
2811 { /* likely */ }
2812 else
2813 {
2814 /* The first caller becomes the flusher thread. */
2815 bool fOk;
2816 ASMAtomicCmpXchgHandle(&pGVM->vmmr0.s.LogFlusher.hThread, hNativeSelf, NIL_RTNATIVETHREAD, fOk);
2817 if (!fOk)
2818 return VERR_NOT_OWNER;
2819 pGVM->vmmr0.s.LogFlusher.fThreadRunning = true;
2820 }
2821
2822 /*
2823 * Acknowledge flush, waking up waiting EMT.
2824 */
2825 RTSpinlockAcquire(pGVM->vmmr0.s.LogFlusher.hSpinlock);
2826
2827 uint32_t idxTail = pGVM->vmmr0.s.LogFlusher.idxRingTail % RT_ELEMENTS(pGVM->vmmr0.s.LogFlusher.aRing);
2828 uint32_t idxHead = pGVM->vmmr0.s.LogFlusher.idxRingHead % RT_ELEMENTS(pGVM->vmmr0.s.LogFlusher.aRing);
2829 if ( idxTail != idxHead
2830 && pGVM->vmmr0.s.LogFlusher.aRing[idxHead].s.fProcessing)
2831 {
2832 /* Pop the head off the ring buffer. */
2833 uint32_t const idCpu = pGVM->vmmr0.s.LogFlusher.aRing[idxHead].s.idCpu;
2834 uint32_t const idxLogger = pGVM->vmmr0.s.LogFlusher.aRing[idxHead].s.idxLogger;
2835 uint32_t const idxBuffer = pGVM->vmmr0.s.LogFlusher.aRing[idxHead].s.idxBuffer;
2836
2837 pGVM->vmmr0.s.LogFlusher.aRing[idxHead].u32 = UINT32_MAX >> 1; /* invalidate the entry */
2838 pGVM->vmmr0.s.LogFlusher.idxRingHead = (idxHead + 1) % RT_ELEMENTS(pGVM->vmmr0.s.LogFlusher.aRing);
2839
2840 /* Validate content. */
2841 if ( idCpu < pGVM->cCpus
2842 && idxLogger < VMMLOGGER_IDX_MAX
2843 && idxBuffer < VMMLOGGER_BUFFER_COUNT)
2844 {
2845 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2846 PVMMR0PERVCPULOGGER pR0Log = &pGVCpu->vmmr0.s.u.aLoggers[idxLogger];
2847 PVMMR3CPULOGGER pShared = &pGVCpu->vmm.s.u.aLoggers[idxLogger];
2848
2849 /*
2850 * Accounting.
2851 */
2852 uint32_t cFlushing = pR0Log->cFlushing - 1;
2853 if (RT_LIKELY(cFlushing < VMMLOGGER_BUFFER_COUNT))
2854 { /*likely*/ }
2855 else
2856 cFlushing = 0;
2857 pR0Log->cFlushing = cFlushing;
2858 ASMAtomicWriteU32(&pShared->cFlushing, cFlushing);
2859
2860 /*
2861 * Wake up the EMT if it's waiting.
2862 */
2863 if (!pR0Log->fEmtWaiting)
2864 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
2865 else
2866 {
2867 pR0Log->fEmtWaiting = false;
2868 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
2869
2870 int rc = RTSemEventSignal(pR0Log->hEventFlushWait);
2871 if (RT_FAILURE(rc))
2872 LogRelMax(64, ("vmmR0LogFlusher: RTSemEventSignal failed ACKing entry #%u (%u/%u/%u): %Rrc!\n",
2873 idxHead, idCpu, idxLogger, idxBuffer, rc));
2874 }
2875 }
2876 else
2877 {
2878 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
2879 LogRelMax(64, ("vmmR0LogFlusher: Bad ACK entry #%u: %u/%u/%u!\n", idxHead, idCpu, idxLogger, idxBuffer));
2880 }
2881
2882 RTSpinlockAcquire(pGVM->vmmr0.s.LogFlusher.hSpinlock);
2883 }
2884
2885 /*
2886 * The wait loop.
2887 */
2888 int rc;
2889 for (;;)
2890 {
2891 /*
2892 * Work pending?
2893 */
2894 idxTail = pGVM->vmmr0.s.LogFlusher.idxRingTail % RT_ELEMENTS(pGVM->vmmr0.s.LogFlusher.aRing);
2895 idxHead = pGVM->vmmr0.s.LogFlusher.idxRingHead % RT_ELEMENTS(pGVM->vmmr0.s.LogFlusher.aRing);
2896 if (idxTail != idxHead)
2897 {
2898 pGVM->vmmr0.s.LogFlusher.aRing[idxHead].s.fProcessing = true;
2899 pGVM->vmm.s.LogFlusherItem.u32 = pGVM->vmmr0.s.LogFlusher.aRing[idxHead].u32;
2900
2901 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
2902 return VINF_SUCCESS;
2903 }
2904
2905 /*
2906 * Nothing to do, so, check for termination and go to sleep.
2907 */
2908 if (!pGVM->vmmr0.s.LogFlusher.fThreadShutdown)
2909 { /* likely */ }
2910 else
2911 {
2912 rc = VERR_OBJECT_DESTROYED;
2913 break;
2914 }
2915
2916 pGVM->vmmr0.s.LogFlusher.fThreadWaiting = true;
2917 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
2918
2919 rc = RTSemEventWaitNoResume(pGVM->vmmr0.s.LogFlusher.hEvent, RT_MS_5MIN);
2920
2921 RTSpinlockAcquire(pGVM->vmmr0.s.LogFlusher.hSpinlock);
2922 pGVM->vmmr0.s.LogFlusher.fThreadWaiting = false;
2923
2924 if (RT_SUCCESS(rc) || rc == VERR_TIMEOUT)
2925 { /* likely */ }
2926 else if (rc == VERR_INTERRUPTED)
2927 {
2928 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
2929 return rc;
2930 }
2931 else if (rc == VERR_SEM_DESTROYED || rc == VERR_INVALID_HANDLE)
2932 break;
2933 else
2934 {
2935 LogRel(("vmmR0LogFlusher: RTSemEventWaitNoResume returned unexpected status %Rrc\n", rc));
2936 break;
2937 }
2938 }
2939
2940 /*
2941 * Terminating - prevent further calls and indicate to the EMTs that we're no longer around.
2942 */
2943 pGVM->vmmr0.s.LogFlusher.hThread = ~pGVM->vmmr0.s.LogFlusher.hThread; /* (should be reasonably safe) */
2944 pGVM->vmmr0.s.LogFlusher.fThreadRunning = false;
2945
2946 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
2947 return rc;
2948}
2949
2950
2951/**
2952 * VMMR0_DO_VMMR0_LOG_WAIT_FLUSHED: Waits for the flusher thread to finish all
2953 * buffers for logger @a idxLogger.
2954 *
2955 * @returns VBox status code.
2956 * @param pGVM The global (ring-0) VM structure.
2957 * @param idCpu The ID of the calling EMT.
2958 * @param idxLogger Which logger to wait on.
2959 * @thread EMT(idCpu)
2960 */
2961static int vmmR0LogWaitFlushed(PGVM pGVM, VMCPUID idCpu, size_t idxLogger)
2962{
2963 /*
2964 * Check sanity. First we require EMT to be calling us.
2965 */
2966 AssertReturn(idCpu < pGVM->cCpus, VERR_INVALID_CPU_ID);
2967 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2968 AssertReturn(pGVCpu->hEMT == RTThreadNativeSelf(), VERR_INVALID_CPU_ID);
2969 AssertReturn(idxLogger < VMMLOGGER_IDX_MAX, VERR_OUT_OF_RANGE);
2970 PVMMR0PERVCPULOGGER const pR0Log = &pGVCpu->vmmr0.s.u.aLoggers[idxLogger];
2971
2972 /*
2973 * Do the waiting.
2974 */
2975 int rc = VINF_SUCCESS;
2976 RTSpinlockAcquire(pGVM->vmmr0.s.LogFlusher.hSpinlock);
2977 uint32_t cFlushing = pR0Log->cFlushing;
2978 while (cFlushing > 0)
2979 {
2980 pR0Log->fEmtWaiting = true;
2981 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
2982
2983 rc = RTSemEventWaitNoResume(pR0Log->hEventFlushWait, RT_MS_5MIN);
2984
2985 RTSpinlockAcquire(pGVM->vmmr0.s.LogFlusher.hSpinlock);
2986 pR0Log->fEmtWaiting = false;
2987 if (RT_SUCCESS(rc))
2988 {
2989 /* Read the new count, make sure it decreased before looping. That
2990 way we can guarentee that we will only wait more than 5 min * buffers. */
2991 uint32_t const cPrevFlushing = cFlushing;
2992 cFlushing = pR0Log->cFlushing;
2993 if (cFlushing < cPrevFlushing)
2994 continue;
2995 rc = VERR_INTERNAL_ERROR_3;
2996 }
2997 break;
2998 }
2999 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
3000 return rc;
3001}
3002
3003
3004/**
3005 * Inner worker for vmmR0LoggerFlushCommon.
3006 */
3007#ifndef VMM_R0_SWITCH_STACK
3008static bool vmmR0LoggerFlushInner(PGVM pGVM, PGVMCPU pGVCpu, uint32_t idxLogger, size_t idxBuffer, uint32_t cbToFlush)
3009#else
3010DECLASM(bool) vmmR0LoggerFlushInner(PGVM pGVM, PGVMCPU pGVCpu, uint32_t idxLogger, size_t idxBuffer, uint32_t cbToFlush);
3011DECLASM(bool) StkBack_vmmR0LoggerFlushInner(PGVM pGVM, PGVMCPU pGVCpu, uint32_t idxLogger, size_t idxBuffer, uint32_t cbToFlush)
3012#endif
3013{
3014 PVMMR0PERVCPULOGGER const pR0Log = &pGVCpu->vmmr0.s.u.aLoggers[idxLogger];
3015 PVMMR3CPULOGGER const pShared = &pGVCpu->vmm.s.u.aLoggers[idxLogger];
3016
3017 /*
3018 * Figure out what we need to do and whether we can.
3019 */
3020 enum { kJustSignal, kPrepAndSignal, kPrepSignalAndWait } enmAction;
3021#if VMMLOGGER_BUFFER_COUNT >= 2
3022 if (pR0Log->cFlushing < VMMLOGGER_BUFFER_COUNT - 1)
3023 {
3024 if (RTSemEventIsSignalSafe())
3025 enmAction = kJustSignal;
3026 else if (VMMRZCallRing3IsEnabled(pGVCpu))
3027 enmAction = kPrepAndSignal;
3028 else
3029 {
3030 /** @todo This is a bit simplistic. We could introduce a FF to signal the
3031 * thread or similar. */
3032 STAM_REL_COUNTER_INC(&pShared->StatCannotBlock);
3033# if defined(RT_OS_LINUX)
3034 SUP_DPRINTF(("vmmR0LoggerFlush: Signalling not safe and EMT blocking disabled! (%u bytes)\n", cbToFlush));
3035# endif
3036 pShared->cbDropped += cbToFlush;
3037 return true;
3038 }
3039 }
3040 else
3041#endif
3042 if (VMMRZCallRing3IsEnabled(pGVCpu))
3043 enmAction = kPrepSignalAndWait;
3044 else
3045 {
3046 STAM_REL_COUNTER_INC(&pShared->StatCannotBlock);
3047# if defined(RT_OS_LINUX)
3048 SUP_DPRINTF(("vmmR0LoggerFlush: EMT blocking disabled! (%u bytes)\n", cbToFlush));
3049# endif
3050 pShared->cbDropped += cbToFlush;
3051 return true;
3052 }
3053
3054 /*
3055 * Prepare for blocking if necessary.
3056 */
3057 VMMR0EMTBLOCKCTX Ctx;
3058 if (enmAction != kJustSignal)
3059 {
3060 int rc = VMMR0EmtPrepareToBlock(pGVCpu, VINF_SUCCESS, "vmmR0LoggerFlushInner", pR0Log->hEventFlushWait, &Ctx);
3061 if (RT_SUCCESS(rc))
3062 { /* likely */ }
3063 else
3064 {
3065 STAM_REL_COUNTER_INC(&pShared->StatCannotBlock);
3066 SUP_DPRINTF(("vmmR0LoggerFlush: VMMR0EmtPrepareToBlock failed! rc=%d\n", rc));
3067 return false;
3068 }
3069 }
3070
3071 /*
3072 * Queue the flush job.
3073 */
3074 bool fFlushedBuffer;
3075 RTSpinlockAcquire(pGVM->vmmr0.s.LogFlusher.hSpinlock);
3076 if (pGVM->vmmr0.s.LogFlusher.fThreadRunning)
3077 {
3078 uint32_t const idxHead = pGVM->vmmr0.s.LogFlusher.idxRingHead % RT_ELEMENTS(pGVM->vmmr0.s.LogFlusher.aRing);
3079 uint32_t const idxTail = pGVM->vmmr0.s.LogFlusher.idxRingTail % RT_ELEMENTS(pGVM->vmmr0.s.LogFlusher.aRing);
3080 uint32_t const idxNewTail = (idxTail + 1) % RT_ELEMENTS(pGVM->vmmr0.s.LogFlusher.aRing);
3081 if (idxNewTail != idxHead)
3082 {
3083 /* Queue it. */
3084 pGVM->vmmr0.s.LogFlusher.aRing[idxTail].s.idCpu = pGVCpu->idCpu;
3085 pGVM->vmmr0.s.LogFlusher.aRing[idxTail].s.idxLogger = idxLogger;
3086 pGVM->vmmr0.s.LogFlusher.aRing[idxTail].s.idxBuffer = (uint32_t)idxBuffer;
3087 pGVM->vmmr0.s.LogFlusher.aRing[idxTail].s.fProcessing = 0;
3088 pGVM->vmmr0.s.LogFlusher.idxRingTail = idxNewTail;
3089
3090 /* Update the number of buffers currently being flushed. */
3091 uint32_t cFlushing = pR0Log->cFlushing;
3092 cFlushing = RT_MIN(cFlushing + 1, VMMLOGGER_BUFFER_COUNT);
3093 pShared->cFlushing = pR0Log->cFlushing = cFlushing;
3094
3095 /* We must wait if all buffers are currently being flushed. */
3096 bool const fEmtWaiting = cFlushing >= VMMLOGGER_BUFFER_COUNT && enmAction != kJustSignal /* paranoia */;
3097 pR0Log->fEmtWaiting = fEmtWaiting;
3098
3099 /* Stats. */
3100 STAM_REL_COUNTER_INC(&pShared->StatFlushes);
3101 STAM_REL_COUNTER_INC(&pGVM->vmm.s.StatLogFlusherFlushes);
3102
3103 /* Signal the worker thread. */
3104 if (pGVM->vmmr0.s.LogFlusher.fThreadWaiting)
3105 {
3106 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
3107 RTSemEventSignal(pGVM->vmmr0.s.LogFlusher.hEvent);
3108 }
3109 else
3110 {
3111 STAM_REL_COUNTER_INC(&pGVM->vmm.s.StatLogFlusherNoWakeUp);
3112 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
3113 }
3114
3115 /*
3116 * Wait for a buffer to finish flushing.
3117 *
3118 * Note! Lazy bird is ignoring the status code here. The result is
3119 * that we might end up with an extra even signalling and the
3120 * next time we need to wait we won't and end up with some log
3121 * corruption. However, it's too much hazzle right now for
3122 * a scenario which would most likely end the process rather
3123 * than causing log corruption.
3124 */
3125 if (fEmtWaiting)
3126 {
3127 STAM_REL_PROFILE_START(&pShared->StatWait, a);
3128 VMMR0EmtWaitEventInner(pGVCpu, VMMR0EMTWAIT_F_TRY_SUPPRESS_INTERRUPTED,
3129 pR0Log->hEventFlushWait, RT_INDEFINITE_WAIT);
3130 STAM_REL_PROFILE_STOP(&pShared->StatWait, a);
3131 }
3132
3133 /*
3134 * We always switch buffer if we have more than one.
3135 */
3136#if VMMLOGGER_BUFFER_COUNT == 1
3137 fFlushedBuffer = true;
3138#else
3139 AssertCompile(VMMLOGGER_BUFFER_COUNT >= 1);
3140 pShared->idxBuf = (idxBuffer + 1) % VMMLOGGER_BUFFER_COUNT;
3141 fFlushedBuffer = false;
3142#endif
3143 }
3144 else
3145 {
3146 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
3147 SUP_DPRINTF(("vmmR0LoggerFlush: ring buffer is full!\n"));
3148 fFlushedBuffer = true;
3149 }
3150 }
3151 else
3152 {
3153 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
3154 SUP_DPRINTF(("vmmR0LoggerFlush: flusher not active - dropping %u bytes\n", cbToFlush));
3155 fFlushedBuffer = true;
3156 }
3157
3158 /*
3159 * Restore the HM context.
3160 */
3161 if (enmAction != kJustSignal)
3162 VMMR0EmtResumeAfterBlocking(pGVCpu, &Ctx);
3163
3164 return fFlushedBuffer;
3165}
3166
3167
3168/**
3169 * Common worker for vmmR0LogFlush and vmmR0LogRelFlush.
3170 */
3171static bool vmmR0LoggerFlushCommon(PRTLOGGER pLogger, PRTLOGBUFFERDESC pBufDesc, uint32_t idxLogger)
3172{
3173 /*
3174 * Convert the pLogger into a GVMCPU handle and 'call' back to Ring-3.
3175 * (This is a bit paranoid code.)
3176 */
3177 if (RT_VALID_PTR(pLogger))
3178 {
3179 if ( pLogger->u32Magic == RTLOGGER_MAGIC
3180 && (pLogger->u32UserValue1 & VMMR0_LOGGER_FLAGS_MAGIC_MASK) == VMMR0_LOGGER_FLAGS_MAGIC_VALUE
3181 && pLogger->u64UserValue2 == pLogger->u64UserValue3)
3182 {
3183 PGVMCPU const pGVCpu = (PGVMCPU)(uintptr_t)pLogger->u64UserValue2;
3184 if ( RT_VALID_PTR(pGVCpu)
3185 && ((uintptr_t)pGVCpu & PAGE_OFFSET_MASK) == 0)
3186 {
3187 RTNATIVETHREAD const hNativeSelf = RTThreadNativeSelf();
3188 PGVM const pGVM = pGVCpu->pGVM;
3189 if ( hNativeSelf == pGVCpu->hEMT
3190 && RT_VALID_PTR(pGVM))
3191 {
3192 PVMMR0PERVCPULOGGER const pR0Log = &pGVCpu->vmmr0.s.u.aLoggers[idxLogger];
3193 size_t const idxBuffer = pBufDesc - &pR0Log->aBufDescs[0];
3194 if (idxBuffer < VMMLOGGER_BUFFER_COUNT)
3195 {
3196 /*
3197 * Make sure we don't recurse forever here should something in the
3198 * following code trigger logging or an assertion. Do the rest in
3199 * an inner work to avoid hitting the right margin too hard.
3200 */
3201 if (!pR0Log->fFlushing)
3202 {
3203 pR0Log->fFlushing = true;
3204 bool fFlushed = vmmR0LoggerFlushInner(pGVM, pGVCpu, idxLogger, idxBuffer, pBufDesc->offBuf);
3205 pR0Log->fFlushing = false;
3206 return fFlushed;
3207 }
3208
3209 SUP_DPRINTF(("vmmR0LoggerFlush: Recursive flushing!\n"));
3210 }
3211 else
3212 SUP_DPRINTF(("vmmR0LoggerFlush: pLogger=%p pGVCpu=%p: idxBuffer=%#zx\n", pLogger, pGVCpu, idxBuffer));
3213 }
3214 else
3215 SUP_DPRINTF(("vmmR0LoggerFlush: pLogger=%p pGVCpu=%p hEMT=%p hNativeSelf=%p!\n",
3216 pLogger, pGVCpu, pGVCpu->hEMT, hNativeSelf));
3217 }
3218 else
3219 SUP_DPRINTF(("vmmR0LoggerFlush: pLogger=%p pGVCpu=%p!\n", pLogger, pGVCpu));
3220 }
3221 else
3222 SUP_DPRINTF(("vmmR0LoggerFlush: pLogger=%p u32Magic=%#x u32UserValue1=%#x u64UserValue2=%#RX64 u64UserValue3=%#RX64!\n",
3223 pLogger, pLogger->u32Magic, pLogger->u32UserValue1, pLogger->u64UserValue2, pLogger->u64UserValue3));
3224 }
3225 else
3226 SUP_DPRINTF(("vmmR0LoggerFlush: pLogger=%p!\n", pLogger));
3227 return true;
3228}
3229
3230
3231/**
3232 * @callback_method_impl{FNRTLOGFLUSH, Release logger buffer flush callback.}
3233 */
3234static DECLCALLBACK(bool) vmmR0LogRelFlush(PRTLOGGER pLogger, PRTLOGBUFFERDESC pBufDesc)
3235{
3236 return vmmR0LoggerFlushCommon(pLogger, pBufDesc, VMMLOGGER_IDX_RELEASE);
3237}
3238
3239
3240/**
3241 * @callback_method_impl{FNRTLOGFLUSH, Logger (debug) buffer flush callback.}
3242 */
3243static DECLCALLBACK(bool) vmmR0LogFlush(PRTLOGGER pLogger, PRTLOGBUFFERDESC pBufDesc)
3244{
3245#ifdef LOG_ENABLED
3246 return vmmR0LoggerFlushCommon(pLogger, pBufDesc, VMMLOGGER_IDX_REGULAR);
3247#else
3248 RT_NOREF(pLogger, pBufDesc);
3249 return true;
3250#endif
3251}
3252
3253
3254/*
3255 * Override RTLogDefaultInstanceEx so we can do logging from EMTs in ring-0.
3256 */
3257DECLEXPORT(PRTLOGGER) RTLogDefaultInstanceEx(uint32_t fFlagsAndGroup)
3258{
3259#ifdef LOG_ENABLED
3260 PGVMCPU pGVCpu = GVMMR0GetGVCpuByEMT(NIL_RTNATIVETHREAD);
3261 if (pGVCpu)
3262 {
3263 PRTLOGGER pLogger = pGVCpu->vmmr0.s.u.s.Logger.pLogger;
3264 if (RT_VALID_PTR(pLogger))
3265 {
3266 if ( pLogger->u64UserValue2 == (uintptr_t)pGVCpu
3267 && pLogger->u64UserValue3 == (uintptr_t)pGVCpu)
3268 {
3269 if (!pGVCpu->vmmr0.s.u.s.Logger.fFlushing)
3270 {
3271 if (!(pGVCpu->vmmr0.s.fLogFlushingDisabled))
3272 return RTLogCheckGroupFlags(pLogger, fFlagsAndGroup);
3273 return NULL;
3274 }
3275
3276 /*
3277 * When we're flushing we _must_ return NULL here to suppress any
3278 * attempts at using the logger while in vmmR0LoggerFlushCommon.
3279 * The VMMR0EmtPrepareToBlock code may trigger logging in HM,
3280 * which will reset the buffer content before we even get to queue
3281 * the flush request. (Only an issue when VBOX_WITH_R0_LOGGING
3282 * is enabled.)
3283 */
3284 return NULL;
3285 }
3286 }
3287 }
3288#endif
3289 return SUPR0DefaultLogInstanceEx(fFlagsAndGroup);
3290}
3291
3292
3293/*
3294 * Override RTLogRelGetDefaultInstanceEx so we can do LogRel to VBox.log from EMTs in ring-0.
3295 */
3296DECLEXPORT(PRTLOGGER) RTLogRelGetDefaultInstanceEx(uint32_t fFlagsAndGroup)
3297{
3298 PGVMCPU pGVCpu = GVMMR0GetGVCpuByEMT(NIL_RTNATIVETHREAD);
3299 if (pGVCpu)
3300 {
3301 PRTLOGGER pLogger = pGVCpu->vmmr0.s.u.s.RelLogger.pLogger;
3302 if (RT_VALID_PTR(pLogger))
3303 {
3304 if ( pLogger->u64UserValue2 == (uintptr_t)pGVCpu
3305 && pLogger->u64UserValue3 == (uintptr_t)pGVCpu)
3306 {
3307 if (!pGVCpu->vmmr0.s.u.s.RelLogger.fFlushing)
3308 {
3309 if (!(pGVCpu->vmmr0.s.fLogFlushingDisabled))
3310 return RTLogCheckGroupFlags(pLogger, fFlagsAndGroup);
3311 return NULL;
3312 }
3313 }
3314 }
3315 }
3316 return SUPR0GetDefaultLogRelInstanceEx(fFlagsAndGroup);
3317}
3318
3319
3320/**
3321 * Helper for vmmR0InitLoggerSet
3322 */
3323static int vmmR0InitLoggerOne(PGVMCPU pGVCpu, bool fRelease, PVMMR0PERVCPULOGGER pR0Log, PVMMR3CPULOGGER pShared,
3324 uint32_t cbBuf, char *pchBuf, RTR3PTR pchBufR3)
3325{
3326 /*
3327 * Create and configure the logger.
3328 */
3329 for (size_t i = 0; i < VMMLOGGER_BUFFER_COUNT; i++)
3330 {
3331 pR0Log->aBufDescs[i].u32Magic = RTLOGBUFFERDESC_MAGIC;
3332 pR0Log->aBufDescs[i].uReserved = 0;
3333 pR0Log->aBufDescs[i].cbBuf = cbBuf;
3334 pR0Log->aBufDescs[i].offBuf = 0;
3335 pR0Log->aBufDescs[i].pchBuf = pchBuf + i * cbBuf;
3336 pR0Log->aBufDescs[i].pAux = &pShared->aBufs[i].AuxDesc;
3337
3338 pShared->aBufs[i].AuxDesc.fFlushedIndicator = false;
3339 pShared->aBufs[i].AuxDesc.afPadding[0] = 0;
3340 pShared->aBufs[i].AuxDesc.afPadding[1] = 0;
3341 pShared->aBufs[i].AuxDesc.afPadding[2] = 0;
3342 pShared->aBufs[i].AuxDesc.offBuf = 0;
3343 pShared->aBufs[i].pchBufR3 = pchBufR3 + i * cbBuf;
3344 }
3345 pShared->cbBuf = cbBuf;
3346
3347 static const char * const s_apszGroups[] = VBOX_LOGGROUP_NAMES;
3348 int rc = RTLogCreateEx(&pR0Log->pLogger, fRelease ? "VBOX_RELEASE_LOG" : "VBOX_LOG", RTLOG_F_NO_LOCKING | RTLOGFLAGS_BUFFERED,
3349 "all", RT_ELEMENTS(s_apszGroups), s_apszGroups, UINT32_MAX,
3350 VMMLOGGER_BUFFER_COUNT, pR0Log->aBufDescs, RTLOGDEST_DUMMY,
3351 NULL /*pfnPhase*/, 0 /*cHistory*/, 0 /*cbHistoryFileMax*/, 0 /*cSecsHistoryTimeSlot*/,
3352 NULL /*pErrInfo*/, NULL /*pszFilenameFmt*/);
3353 if (RT_SUCCESS(rc))
3354 {
3355 PRTLOGGER pLogger = pR0Log->pLogger;
3356 pLogger->u32UserValue1 = VMMR0_LOGGER_FLAGS_MAGIC_VALUE;
3357 pLogger->u64UserValue2 = (uintptr_t)pGVCpu;
3358 pLogger->u64UserValue3 = (uintptr_t)pGVCpu;
3359
3360 rc = RTLogSetFlushCallback(pLogger, fRelease ? vmmR0LogRelFlush : vmmR0LogFlush);
3361 if (RT_SUCCESS(rc))
3362 {
3363 RTLogSetR0ThreadNameF(pLogger, "EMT-%u-R0", pGVCpu->idCpu);
3364
3365 /*
3366 * Create the event sem the EMT waits on while flushing is happening.
3367 */
3368 rc = RTSemEventCreate(&pR0Log->hEventFlushWait);
3369 if (RT_SUCCESS(rc))
3370 return VINF_SUCCESS;
3371 pR0Log->hEventFlushWait = NIL_RTSEMEVENT;
3372 }
3373 RTLogDestroy(pLogger);
3374 }
3375 pR0Log->pLogger = NULL;
3376 return rc;
3377}
3378
3379
3380/**
3381 * Worker for VMMR0CleanupVM and vmmR0InitLoggerSet that destroys one logger.
3382 */
3383static void vmmR0TermLoggerOne(PVMMR0PERVCPULOGGER pR0Log, PVMMR3CPULOGGER pShared)
3384{
3385 RTLogDestroy(pR0Log->pLogger);
3386 pR0Log->pLogger = NULL;
3387
3388 for (size_t i = 0; i < VMMLOGGER_BUFFER_COUNT; i++)
3389 pShared->aBufs[i].pchBufR3 = NIL_RTR3PTR;
3390
3391 RTSemEventDestroy(pR0Log->hEventFlushWait);
3392 pR0Log->hEventFlushWait = NIL_RTSEMEVENT;
3393}
3394
3395
3396/**
3397 * Initializes one type of loggers for each EMT.
3398 */
3399static int vmmR0InitLoggerSet(PGVM pGVM, uint8_t idxLogger, uint32_t cbBuf, PRTR0MEMOBJ phMemObj, PRTR0MEMOBJ phMapObj)
3400{
3401 /* Allocate buffers first. */
3402 int rc = RTR0MemObjAllocPage(phMemObj, cbBuf * pGVM->cCpus * VMMLOGGER_BUFFER_COUNT, false /*fExecutable*/);
3403 if (RT_SUCCESS(rc))
3404 {
3405 rc = RTR0MemObjMapUser(phMapObj, *phMemObj, (RTR3PTR)-1, 0 /*uAlignment*/, RTMEM_PROT_READ, NIL_RTR0PROCESS);
3406 if (RT_SUCCESS(rc))
3407 {
3408 char * const pchBuf = (char *)RTR0MemObjAddress(*phMemObj);
3409 AssertPtrReturn(pchBuf, VERR_INTERNAL_ERROR_2);
3410
3411 RTR3PTR const pchBufR3 = RTR0MemObjAddressR3(*phMapObj);
3412 AssertReturn(pchBufR3 != NIL_RTR3PTR, VERR_INTERNAL_ERROR_3);
3413
3414 /* Initialize the per-CPU loggers. */
3415 for (uint32_t i = 0; i < pGVM->cCpus; i++)
3416 {
3417 PGVMCPU pGVCpu = &pGVM->aCpus[i];
3418 PVMMR0PERVCPULOGGER pR0Log = &pGVCpu->vmmr0.s.u.aLoggers[idxLogger];
3419 PVMMR3CPULOGGER pShared = &pGVCpu->vmm.s.u.aLoggers[idxLogger];
3420 rc = vmmR0InitLoggerOne(pGVCpu, idxLogger == VMMLOGGER_IDX_RELEASE, pR0Log, pShared, cbBuf,
3421 pchBuf + i * cbBuf * VMMLOGGER_BUFFER_COUNT,
3422 pchBufR3 + i * cbBuf * VMMLOGGER_BUFFER_COUNT);
3423 if (RT_FAILURE(rc))
3424 {
3425 vmmR0TermLoggerOne(pR0Log, pShared);
3426 while (i-- > 0)
3427 {
3428 pGVCpu = &pGVM->aCpus[i];
3429 vmmR0TermLoggerOne(&pGVCpu->vmmr0.s.u.aLoggers[idxLogger], &pGVCpu->vmm.s.u.aLoggers[idxLogger]);
3430 }
3431 break;
3432 }
3433 }
3434 if (RT_SUCCESS(rc))
3435 return VINF_SUCCESS;
3436
3437 /* Bail out. */
3438 RTR0MemObjFree(*phMapObj, false /*fFreeMappings*/);
3439 *phMapObj = NIL_RTR0MEMOBJ;
3440 }
3441 RTR0MemObjFree(*phMemObj, true /*fFreeMappings*/);
3442 *phMemObj = NIL_RTR0MEMOBJ;
3443 }
3444 return rc;
3445}
3446
3447
3448/**
3449 * Worker for VMMR0InitPerVMData that initializes all the logging related stuff.
3450 *
3451 * @returns VBox status code.
3452 * @param pGVM The global (ring-0) VM structure.
3453 */
3454static int vmmR0InitLoggers(PGVM pGVM)
3455{
3456 /*
3457 * Invalidate the ring buffer (not really necessary).
3458 */
3459 for (size_t idx = 0; idx < RT_ELEMENTS(pGVM->vmmr0.s.LogFlusher.aRing); idx++)
3460 pGVM->vmmr0.s.LogFlusher.aRing[idx].u32 = UINT32_MAX >> 1; /* (all bits except fProcessing set) */
3461
3462 /*
3463 * Create the spinlock and flusher event semaphore.
3464 */
3465 int rc = RTSpinlockCreate(&pGVM->vmmr0.s.LogFlusher.hSpinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "VM-Log-Flusher");
3466 if (RT_SUCCESS(rc))
3467 {
3468 rc = RTSemEventCreate(&pGVM->vmmr0.s.LogFlusher.hEvent);
3469 if (RT_SUCCESS(rc))
3470 {
3471 /*
3472 * Create the ring-0 release loggers.
3473 */
3474 rc = vmmR0InitLoggerSet(pGVM, VMMLOGGER_IDX_RELEASE, _4K,
3475 &pGVM->vmmr0.s.hMemObjReleaseLogger, &pGVM->vmmr0.s.hMapObjReleaseLogger);
3476#ifdef LOG_ENABLED
3477 if (RT_SUCCESS(rc))
3478 {
3479 /*
3480 * Create debug loggers.
3481 */
3482 rc = vmmR0InitLoggerSet(pGVM, VMMLOGGER_IDX_REGULAR, _64K,
3483 &pGVM->vmmr0.s.hMemObjLogger, &pGVM->vmmr0.s.hMapObjLogger);
3484 }
3485#endif
3486 }
3487 }
3488 return rc;
3489}
3490
3491
3492/**
3493 * Worker for VMMR0InitPerVMData that initializes all the logging related stuff.
3494 *
3495 * @param pGVM The global (ring-0) VM structure.
3496 */
3497static void vmmR0CleanupLoggers(PGVM pGVM)
3498{
3499 for (VMCPUID idCpu = 0; idCpu < pGVM->cCpus; idCpu++)
3500 {
3501 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
3502 for (size_t iLogger = 0; iLogger < RT_ELEMENTS(pGVCpu->vmmr0.s.u.aLoggers); iLogger++)
3503 vmmR0TermLoggerOne(&pGVCpu->vmmr0.s.u.aLoggers[iLogger], &pGVCpu->vmm.s.u.aLoggers[iLogger]);
3504 }
3505
3506 /*
3507 * Free logger buffer memory.
3508 */
3509 RTR0MemObjFree(pGVM->vmmr0.s.hMapObjReleaseLogger, false /*fFreeMappings*/);
3510 pGVM->vmmr0.s.hMapObjReleaseLogger = NIL_RTR0MEMOBJ;
3511 RTR0MemObjFree(pGVM->vmmr0.s.hMemObjReleaseLogger, true /*fFreeMappings*/);
3512 pGVM->vmmr0.s.hMemObjReleaseLogger = NIL_RTR0MEMOBJ;
3513
3514 RTR0MemObjFree(pGVM->vmmr0.s.hMapObjLogger, false /*fFreeMappings*/);
3515 pGVM->vmmr0.s.hMapObjLogger = NIL_RTR0MEMOBJ;
3516 RTR0MemObjFree(pGVM->vmmr0.s.hMemObjLogger, true /*fFreeMappings*/);
3517 pGVM->vmmr0.s.hMemObjLogger = NIL_RTR0MEMOBJ;
3518
3519 /*
3520 * Free log flusher related stuff.
3521 */
3522 RTSpinlockDestroy(pGVM->vmmr0.s.LogFlusher.hSpinlock);
3523 pGVM->vmmr0.s.LogFlusher.hSpinlock = NIL_RTSPINLOCK;
3524 RTSemEventDestroy(pGVM->vmmr0.s.LogFlusher.hEvent);
3525 pGVM->vmmr0.s.LogFlusher.hEvent = NIL_RTSEMEVENT;
3526}
3527
3528
3529/*********************************************************************************************************************************
3530* Assertions *
3531*********************************************************************************************************************************/
3532
3533/**
3534 * Installs a notification callback for ring-0 assertions.
3535 *
3536 * @param pVCpu The cross context virtual CPU structure.
3537 * @param pfnCallback Pointer to the callback.
3538 * @param pvUser The user argument.
3539 *
3540 * @return VBox status code.
3541 */
3542VMMR0_INT_DECL(int) VMMR0AssertionSetNotification(PVMCPUCC pVCpu, PFNVMMR0ASSERTIONNOTIFICATION pfnCallback, RTR0PTR pvUser)
3543{
3544 AssertPtrReturn(pVCpu, VERR_INVALID_POINTER);
3545 AssertPtrReturn(pfnCallback, VERR_INVALID_POINTER);
3546
3547 if (!pVCpu->vmm.s.pfnRing0AssertCallback)
3548 {
3549 pVCpu->vmm.s.pfnRing0AssertCallback = pfnCallback;
3550 pVCpu->vmm.s.pvRing0AssertCallbackUser = pvUser;
3551 return VINF_SUCCESS;
3552 }
3553 return VERR_ALREADY_EXISTS;
3554}
3555
3556
3557/**
3558 * Removes the ring-0 callback.
3559 *
3560 * @param pVCpu The cross context virtual CPU structure.
3561 */
3562VMMR0_INT_DECL(void) VMMR0AssertionRemoveNotification(PVMCPUCC pVCpu)
3563{
3564 pVCpu->vmm.s.pfnRing0AssertCallback = NULL;
3565 pVCpu->vmm.s.pvRing0AssertCallbackUser = NULL;
3566}
3567
3568
3569/**
3570 * Checks whether there is a ring-0 callback notification active.
3571 *
3572 * @param pVCpu The cross context virtual CPU structure.
3573 * @returns true if there the notification is active, false otherwise.
3574 */
3575VMMR0_INT_DECL(bool) VMMR0AssertionIsNotificationSet(PVMCPUCC pVCpu)
3576{
3577 return pVCpu->vmm.s.pfnRing0AssertCallback != NULL;
3578}
3579
3580
3581/*
3582 * Jump back to ring-3 if we're the EMT and the longjmp is armed.
3583 *
3584 * @returns true if the breakpoint should be hit, false if it should be ignored.
3585 */
3586DECLEXPORT(bool) RTCALL RTAssertShouldPanic(void)
3587{
3588#if 0
3589 return true;
3590#else
3591 PVMCC pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
3592 if (pVM)
3593 {
3594 PVMCPUCC pVCpu = VMMGetCpu(pVM);
3595
3596 if (pVCpu)
3597 {
3598# ifdef RT_ARCH_X86
3599 if ( pVCpu->vmm.s.CallRing3JmpBufR0.eip
3600 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
3601# else
3602 if ( pVCpu->vmm.s.CallRing3JmpBufR0.rip
3603 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
3604# endif
3605 {
3606 if (pVCpu->vmm.s.pfnRing0AssertCallback)
3607 pVCpu->vmm.s.pfnRing0AssertCallback(pVCpu, pVCpu->vmm.s.pvRing0AssertCallbackUser);
3608 int rc = vmmR0CallRing3LongJmp(&pVCpu->vmm.s.CallRing3JmpBufR0, VERR_VMM_RING0_ASSERTION);
3609 return RT_FAILURE_NP(rc);
3610 }
3611 }
3612 }
3613# ifdef RT_OS_LINUX
3614 return true;
3615# else
3616 return false;
3617# endif
3618#endif
3619}
3620
3621
3622/*
3623 * Override this so we can push it up to ring-3.
3624 */
3625DECLEXPORT(void) RTCALL RTAssertMsg1Weak(const char *pszExpr, unsigned uLine, const char *pszFile, const char *pszFunction)
3626{
3627 /*
3628 * To host kernel log/whatever.
3629 */
3630 SUPR0Printf("!!R0-Assertion Failed!!\n"
3631 "Expression: %s\n"
3632 "Location : %s(%d) %s\n",
3633 pszExpr, pszFile, uLine, pszFunction);
3634
3635 /*
3636 * To the log.
3637 */
3638 LogAlways(("\n!!R0-Assertion Failed!!\n"
3639 "Expression: %s\n"
3640 "Location : %s(%d) %s\n",
3641 pszExpr, pszFile, uLine, pszFunction));
3642
3643 /*
3644 * To the global VMM buffer.
3645 */
3646 PVMCC pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
3647 if (pVM)
3648 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg1, sizeof(pVM->vmm.s.szRing0AssertMsg1),
3649 "\n!!R0-Assertion Failed!!\n"
3650 "Expression: %.*s\n"
3651 "Location : %s(%d) %s\n",
3652 sizeof(pVM->vmm.s.szRing0AssertMsg1) / 4 * 3, pszExpr,
3653 pszFile, uLine, pszFunction);
3654
3655 /*
3656 * Continue the normal way.
3657 */
3658 RTAssertMsg1(pszExpr, uLine, pszFile, pszFunction);
3659}
3660
3661
3662/**
3663 * Callback for RTLogFormatV which writes to the ring-3 log port.
3664 * See PFNLOGOUTPUT() for details.
3665 */
3666static DECLCALLBACK(size_t) rtLogOutput(void *pv, const char *pachChars, size_t cbChars)
3667{
3668 for (size_t i = 0; i < cbChars; i++)
3669 {
3670 LogAlways(("%c", pachChars[i])); NOREF(pachChars);
3671 }
3672
3673 NOREF(pv);
3674 return cbChars;
3675}
3676
3677
3678/*
3679 * Override this so we can push it up to ring-3.
3680 */
3681DECLEXPORT(void) RTCALL RTAssertMsg2WeakV(const char *pszFormat, va_list va)
3682{
3683 va_list vaCopy;
3684
3685 /*
3686 * Push the message to the loggers.
3687 */
3688 PRTLOGGER pLog = RTLogRelGetDefaultInstance();
3689 if (pLog)
3690 {
3691 va_copy(vaCopy, va);
3692 RTLogFormatV(rtLogOutput, pLog, pszFormat, vaCopy);
3693 va_end(vaCopy);
3694 }
3695 pLog = RTLogGetDefaultInstance(); /* Don't initialize it here... */
3696 if (pLog)
3697 {
3698 va_copy(vaCopy, va);
3699 RTLogFormatV(rtLogOutput, pLog, pszFormat, vaCopy);
3700 va_end(vaCopy);
3701 }
3702
3703 /*
3704 * Push it to the global VMM buffer.
3705 */
3706 PVMCC pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
3707 if (pVM)
3708 {
3709 va_copy(vaCopy, va);
3710 RTStrPrintfV(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2), pszFormat, vaCopy);
3711 va_end(vaCopy);
3712 }
3713
3714 /*
3715 * Continue the normal way.
3716 */
3717 RTAssertMsg2V(pszFormat, va);
3718}
3719
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette