VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/VMMR0.cpp@ 92489

Last change on this file since 92489 was 92411, checked in by vboxsync, 3 years ago

VMM: Two more VMMR0 calls that must not valid idCpu as a valid EMT. bugref:10093 bugref:10124

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 133.7 KB
Line 
1/* $Id: VMMR0.cpp 92411 2021-11-13 14:48:51Z vboxsync $ */
2/** @file
3 * VMM - Host Context Ring 0.
4 */
5
6/*
7 * Copyright (C) 2006-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_VMM
23#include <VBox/vmm/vmm.h>
24#include <VBox/sup.h>
25#include <VBox/vmm/iom.h>
26#include <VBox/vmm/trpm.h>
27#include <VBox/vmm/cpum.h>
28#include <VBox/vmm/pdmapi.h>
29#include <VBox/vmm/pgm.h>
30#ifdef VBOX_WITH_NEM_R0
31# include <VBox/vmm/nem.h>
32#endif
33#include <VBox/vmm/em.h>
34#include <VBox/vmm/stam.h>
35#include <VBox/vmm/tm.h>
36#include "VMMInternal.h"
37#include <VBox/vmm/vmcc.h>
38#include <VBox/vmm/gvm.h>
39#ifdef VBOX_WITH_PCI_PASSTHROUGH
40# include <VBox/vmm/pdmpci.h>
41#endif
42#include <VBox/vmm/apic.h>
43
44#include <VBox/vmm/gvmm.h>
45#include <VBox/vmm/gmm.h>
46#include <VBox/vmm/gim.h>
47#include <VBox/intnet.h>
48#include <VBox/vmm/hm.h>
49#include <VBox/param.h>
50#include <VBox/err.h>
51#include <VBox/version.h>
52#include <VBox/log.h>
53
54#include <iprt/asm-amd64-x86.h>
55#include <iprt/assert.h>
56#include <iprt/crc.h>
57#include <iprt/mem.h>
58#include <iprt/memobj.h>
59#include <iprt/mp.h>
60#include <iprt/once.h>
61#include <iprt/semaphore.h>
62#include <iprt/spinlock.h>
63#include <iprt/stdarg.h>
64#include <iprt/string.h>
65#include <iprt/thread.h>
66#include <iprt/timer.h>
67#include <iprt/time.h>
68
69#include "dtrace/VBoxVMM.h"
70
71
72#if defined(_MSC_VER) && defined(RT_ARCH_AMD64) /** @todo check this with with VC7! */
73# pragma intrinsic(_AddressOfReturnAddress)
74#endif
75
76#if defined(RT_OS_DARWIN) && ARCH_BITS == 32
77# error "32-bit darwin is no longer supported. Go back to 4.3 or earlier!"
78#endif
79
80
81/*********************************************************************************************************************************
82* Internal Functions *
83*********************************************************************************************************************************/
84RT_C_DECLS_BEGIN
85#if defined(RT_ARCH_X86) && (defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD))
86extern uint64_t __udivdi3(uint64_t, uint64_t);
87extern uint64_t __umoddi3(uint64_t, uint64_t);
88#endif
89RT_C_DECLS_END
90static int vmmR0UpdateLoggers(PGVM pGVM, VMCPUID idCpu, PVMMR0UPDATELOGGERSREQ pReq, size_t idxLogger);
91static int vmmR0LogFlusher(PGVM pGVM);
92static int vmmR0LogWaitFlushed(PGVM pGVM, VMCPUID idCpu, size_t idxLogger);
93static int vmmR0InitLoggers(PGVM pGVM);
94static void vmmR0CleanupLoggers(PGVM pGVM);
95
96
97/*********************************************************************************************************************************
98* Global Variables *
99*********************************************************************************************************************************/
100/** Drag in necessary library bits.
101 * The runtime lives here (in VMMR0.r0) and VBoxDD*R0.r0 links against us. */
102struct CLANG11WEIRDNOTHROW { PFNRT pfn; } g_VMMR0Deps[] =
103{
104 { (PFNRT)RTCrc32 },
105 { (PFNRT)RTOnce },
106#if defined(RT_ARCH_X86) && (defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD))
107 { (PFNRT)__udivdi3 },
108 { (PFNRT)__umoddi3 },
109#endif
110 { NULL }
111};
112
113#ifdef RT_OS_SOLARIS
114/* Dependency information for the native solaris loader. */
115extern "C" { char _depends_on[] = "vboxdrv"; }
116#endif
117
118
119/**
120 * Initialize the module.
121 * This is called when we're first loaded.
122 *
123 * @returns 0 on success.
124 * @returns VBox status on failure.
125 * @param hMod Image handle for use in APIs.
126 */
127DECLEXPORT(int) ModuleInit(void *hMod)
128{
129#ifdef VBOX_WITH_DTRACE_R0
130 /*
131 * The first thing to do is register the static tracepoints.
132 * (Deregistration is automatic.)
133 */
134 int rc2 = SUPR0TracerRegisterModule(hMod, &g_VTGObjHeader);
135 if (RT_FAILURE(rc2))
136 return rc2;
137#endif
138 LogFlow(("ModuleInit:\n"));
139
140#ifdef VBOX_WITH_64ON32_CMOS_DEBUG
141 /*
142 * Display the CMOS debug code.
143 */
144 ASMOutU8(0x72, 0x03);
145 uint8_t bDebugCode = ASMInU8(0x73);
146 LogRel(("CMOS Debug Code: %#x (%d)\n", bDebugCode, bDebugCode));
147 RTLogComPrintf("CMOS Debug Code: %#x (%d)\n", bDebugCode, bDebugCode);
148#endif
149
150 /*
151 * Initialize the VMM, GVMM, GMM, HM, PGM (Darwin) and INTNET.
152 */
153 int rc = vmmInitFormatTypes();
154 if (RT_SUCCESS(rc))
155 {
156 rc = GVMMR0Init();
157 if (RT_SUCCESS(rc))
158 {
159 rc = GMMR0Init();
160 if (RT_SUCCESS(rc))
161 {
162 rc = HMR0Init();
163 if (RT_SUCCESS(rc))
164 {
165 PDMR0Init(hMod);
166
167 rc = PGMRegisterStringFormatTypes();
168 if (RT_SUCCESS(rc))
169 {
170 rc = IntNetR0Init();
171 if (RT_SUCCESS(rc))
172 {
173#ifdef VBOX_WITH_PCI_PASSTHROUGH
174 rc = PciRawR0Init();
175#endif
176 if (RT_SUCCESS(rc))
177 {
178 rc = CPUMR0ModuleInit();
179 if (RT_SUCCESS(rc))
180 {
181#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
182 rc = vmmR0TripleFaultHackInit();
183 if (RT_SUCCESS(rc))
184#endif
185 {
186#ifdef VBOX_WITH_NEM_R0
187 rc = NEMR0Init();
188 if (RT_SUCCESS(rc))
189#endif
190 {
191 LogFlow(("ModuleInit: returns success\n"));
192 return VINF_SUCCESS;
193 }
194 }
195
196 /*
197 * Bail out.
198 */
199#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
200 vmmR0TripleFaultHackTerm();
201#endif
202 }
203 else
204 LogRel(("ModuleInit: CPUMR0ModuleInit -> %Rrc\n", rc));
205#ifdef VBOX_WITH_PCI_PASSTHROUGH
206 PciRawR0Term();
207#endif
208 }
209 else
210 LogRel(("ModuleInit: PciRawR0Init -> %Rrc\n", rc));
211 IntNetR0Term();
212 }
213 else
214 LogRel(("ModuleInit: IntNetR0Init -> %Rrc\n", rc));
215 PGMDeregisterStringFormatTypes();
216 }
217 else
218 LogRel(("ModuleInit: PGMRegisterStringFormatTypes -> %Rrc\n", rc));
219 HMR0Term();
220 }
221 else
222 LogRel(("ModuleInit: HMR0Init -> %Rrc\n", rc));
223 GMMR0Term();
224 }
225 else
226 LogRel(("ModuleInit: GMMR0Init -> %Rrc\n", rc));
227 GVMMR0Term();
228 }
229 else
230 LogRel(("ModuleInit: GVMMR0Init -> %Rrc\n", rc));
231 vmmTermFormatTypes();
232 }
233 else
234 LogRel(("ModuleInit: vmmInitFormatTypes -> %Rrc\n", rc));
235
236 LogFlow(("ModuleInit: failed %Rrc\n", rc));
237 return rc;
238}
239
240
241/**
242 * Terminate the module.
243 * This is called when we're finally unloaded.
244 *
245 * @param hMod Image handle for use in APIs.
246 */
247DECLEXPORT(void) ModuleTerm(void *hMod)
248{
249 NOREF(hMod);
250 LogFlow(("ModuleTerm:\n"));
251
252 /*
253 * Terminate the CPUM module (Local APIC cleanup).
254 */
255 CPUMR0ModuleTerm();
256
257 /*
258 * Terminate the internal network service.
259 */
260 IntNetR0Term();
261
262 /*
263 * PGM (Darwin), HM and PciRaw global cleanup.
264 */
265#ifdef VBOX_WITH_PCI_PASSTHROUGH
266 PciRawR0Term();
267#endif
268 PGMDeregisterStringFormatTypes();
269 HMR0Term();
270#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
271 vmmR0TripleFaultHackTerm();
272#endif
273#ifdef VBOX_WITH_NEM_R0
274 NEMR0Term();
275#endif
276
277 /*
278 * Destroy the GMM and GVMM instances.
279 */
280 GMMR0Term();
281 GVMMR0Term();
282
283 vmmTermFormatTypes();
284
285 LogFlow(("ModuleTerm: returns\n"));
286}
287
288
289/**
290 * Initializes VMM specific members when the GVM structure is created,
291 * allocating loggers and stuff.
292 *
293 * The loggers are allocated here so that we can update their settings before
294 * doing VMMR0_DO_VMMR0_INIT and have correct logging at that time.
295 *
296 * @returns VBox status code.
297 * @param pGVM The global (ring-0) VM structure.
298 */
299VMMR0_INT_DECL(int) VMMR0InitPerVMData(PGVM pGVM)
300{
301 AssertCompile(sizeof(pGVM->vmmr0.s) <= sizeof(pGVM->vmmr0.padding));
302
303 /*
304 * Initialize all members first.
305 */
306 pGVM->vmmr0.s.fCalledInitVm = false;
307 pGVM->vmmr0.s.hMemObjLogger = NIL_RTR0MEMOBJ;
308 pGVM->vmmr0.s.hMapObjLogger = NIL_RTR0MEMOBJ;
309 pGVM->vmmr0.s.hMemObjReleaseLogger = NIL_RTR0MEMOBJ;
310 pGVM->vmmr0.s.hMapObjReleaseLogger = NIL_RTR0MEMOBJ;
311 pGVM->vmmr0.s.LogFlusher.hSpinlock = NIL_RTSPINLOCK;
312 pGVM->vmmr0.s.LogFlusher.hThread = NIL_RTNATIVETHREAD;
313 pGVM->vmmr0.s.LogFlusher.hEvent = NIL_RTSEMEVENT;
314 pGVM->vmmr0.s.LogFlusher.idxRingHead = 0;
315 pGVM->vmmr0.s.LogFlusher.idxRingTail = 0;
316 pGVM->vmmr0.s.LogFlusher.fThreadWaiting = false;
317
318 for (VMCPUID idCpu = 0; idCpu < pGVM->cCpus; idCpu++)
319 {
320 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
321 Assert(pGVCpu->idHostCpu == NIL_RTCPUID);
322 Assert(pGVCpu->iHostCpuSet == UINT32_MAX);
323 pGVCpu->vmmr0.s.pPreemptState = NULL;
324 pGVCpu->vmmr0.s.hCtxHook = NIL_RTTHREADCTXHOOK;
325 pGVCpu->vmmr0.s.AssertJmpBuf.pMirrorBuf = &pGVCpu->vmm.s.AssertJmpBuf;
326 pGVCpu->vmmr0.s.AssertJmpBuf.pvStackBuf = &pGVCpu->vmm.s.abAssertStack[0];
327 pGVCpu->vmmr0.s.AssertJmpBuf.cbStackBuf = sizeof(pGVCpu->vmm.s.abAssertStack);
328
329 for (size_t iLogger = 0; iLogger < RT_ELEMENTS(pGVCpu->vmmr0.s.u.aLoggers); iLogger++)
330 pGVCpu->vmmr0.s.u.aLoggers[iLogger].hEventFlushWait = NIL_RTSEMEVENT;
331 }
332
333 /*
334 * Create the loggers.
335 */
336 return vmmR0InitLoggers(pGVM);
337}
338
339
340/**
341 * Initiates the R0 driver for a particular VM instance.
342 *
343 * @returns VBox status code.
344 *
345 * @param pGVM The global (ring-0) VM structure.
346 * @param uSvnRev The SVN revision of the ring-3 part.
347 * @param uBuildType Build type indicator.
348 * @thread EMT(0)
349 */
350static int vmmR0InitVM(PGVM pGVM, uint32_t uSvnRev, uint32_t uBuildType)
351{
352 /*
353 * Match the SVN revisions and build type.
354 */
355 if (uSvnRev != VMMGetSvnRev())
356 {
357 LogRel(("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev()));
358 SUPR0Printf("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev());
359 return VERR_VMM_R0_VERSION_MISMATCH;
360 }
361 if (uBuildType != vmmGetBuildType())
362 {
363 LogRel(("VMMR0InitVM: Build type mismatch, r3=%#x r0=%#x\n", uBuildType, vmmGetBuildType()));
364 SUPR0Printf("VMMR0InitVM: Build type mismatch, r3=%#x r0=%#x\n", uBuildType, vmmGetBuildType());
365 return VERR_VMM_R0_VERSION_MISMATCH;
366 }
367
368 int rc = GVMMR0ValidateGVMandEMT(pGVM, 0 /*idCpu*/);
369 if (RT_FAILURE(rc))
370 return rc;
371
372 /* Don't allow this to be called more than once. */
373 if (!pGVM->vmmr0.s.fCalledInitVm)
374 pGVM->vmmr0.s.fCalledInitVm = true;
375 else
376 return VERR_ALREADY_INITIALIZED;
377
378#ifdef LOG_ENABLED
379
380 /*
381 * Register the EMT R0 logger instance for VCPU 0.
382 */
383 PVMCPUCC pVCpu = VMCC_GET_CPU_0(pGVM);
384 if (pVCpu->vmmr0.s.u.s.Logger.pLogger)
385 {
386# if 0 /* testing of the logger. */
387 LogCom(("vmmR0InitVM: before %p\n", RTLogDefaultInstance()));
388 LogCom(("vmmR0InitVM: pfnFlush=%p actual=%p\n", pR0Logger->Logger.pfnFlush, vmmR0LoggerFlush));
389 LogCom(("vmmR0InitVM: pfnLogger=%p actual=%p\n", pR0Logger->Logger.pfnLogger, vmmR0LoggerWrapper));
390 LogCom(("vmmR0InitVM: offScratch=%d fFlags=%#x fDestFlags=%#x\n", pR0Logger->Logger.offScratch, pR0Logger->Logger.fFlags, pR0Logger->Logger.fDestFlags));
391
392 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pGVM->pSession);
393 LogCom(("vmmR0InitVM: after %p reg\n", RTLogDefaultInstance()));
394 RTLogSetDefaultInstanceThread(NULL, pGVM->pSession);
395 LogCom(("vmmR0InitVM: after %p dereg\n", RTLogDefaultInstance()));
396
397 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
398 LogCom(("vmmR0InitVM: returned successfully from direct logger call.\n"));
399 pR0Logger->Logger.pfnFlush(&pR0Logger->Logger);
400 LogCom(("vmmR0InitVM: returned successfully from direct flush call.\n"));
401
402 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pGVM->pSession);
403 LogCom(("vmmR0InitVM: after %p reg2\n", RTLogDefaultInstance()));
404 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
405 LogCom(("vmmR0InitVM: returned successfully from direct logger call (2). offScratch=%d\n", pR0Logger->Logger.offScratch));
406 RTLogSetDefaultInstanceThread(NULL, pGVM->pSession);
407 LogCom(("vmmR0InitVM: after %p dereg2\n", RTLogDefaultInstance()));
408
409 RTLogLoggerEx(&pR0Logger->Logger, 0, ~0U, "hello ring-0 logger (RTLogLoggerEx)\n");
410 LogCom(("vmmR0InitVM: RTLogLoggerEx returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
411
412 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pGVM->pSession);
413 RTLogPrintf("hello ring-0 logger (RTLogPrintf)\n");
414 LogCom(("vmmR0InitVM: RTLogPrintf returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
415# endif
416# ifdef VBOX_WITH_R0_LOGGING
417 Log(("Switching to per-thread logging instance %p (key=%p)\n", pVCpu->vmmr0.s.u.s.Logger.pLogger, pGVM->pSession));
418 RTLogSetDefaultInstanceThread(pVCpu->vmmr0.s.u.s.Logger.pLogger, (uintptr_t)pGVM->pSession);
419 pVCpu->vmmr0.s.u.s.Logger.fRegistered = true;
420# endif
421 }
422#endif /* LOG_ENABLED */
423
424 /*
425 * Check if the host supports high resolution timers or not.
426 */
427 if ( pGVM->vmm.s.fUsePeriodicPreemptionTimers
428 && !RTTimerCanDoHighResolution())
429 pGVM->vmm.s.fUsePeriodicPreemptionTimers = false;
430
431 /*
432 * Initialize the per VM data for GVMM and GMM.
433 */
434 rc = GVMMR0InitVM(pGVM);
435 if (RT_SUCCESS(rc))
436 {
437 /*
438 * Init HM, CPUM and PGM (Darwin only).
439 */
440 rc = HMR0InitVM(pGVM);
441 if (RT_SUCCESS(rc))
442 {
443 rc = CPUMR0InitVM(pGVM);
444 if (RT_SUCCESS(rc))
445 {
446 rc = PGMR0InitVM(pGVM);
447 if (RT_SUCCESS(rc))
448 {
449 rc = EMR0InitVM(pGVM);
450 if (RT_SUCCESS(rc))
451 {
452#ifdef VBOX_WITH_PCI_PASSTHROUGH
453 rc = PciRawR0InitVM(pGVM);
454#endif
455 if (RT_SUCCESS(rc))
456 {
457 rc = GIMR0InitVM(pGVM);
458 if (RT_SUCCESS(rc))
459 {
460 GVMMR0DoneInitVM(pGVM);
461
462 /*
463 * Collect a bit of info for the VM release log.
464 */
465 pGVM->vmm.s.fIsPreemptPendingApiTrusty = RTThreadPreemptIsPendingTrusty();
466 pGVM->vmm.s.fIsPreemptPossible = RTThreadPreemptIsPossible();;
467 return rc;
468
469 /* bail out*/
470 //GIMR0TermVM(pGVM);
471 }
472#ifdef VBOX_WITH_PCI_PASSTHROUGH
473 PciRawR0TermVM(pGVM);
474#endif
475 }
476 }
477 }
478 }
479 HMR0TermVM(pGVM);
480 }
481 }
482
483 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pGVM->pSession);
484 return rc;
485}
486
487
488/**
489 * Does EMT specific VM initialization.
490 *
491 * @returns VBox status code.
492 * @param pGVM The ring-0 VM structure.
493 * @param idCpu The EMT that's calling.
494 */
495static int vmmR0InitVMEmt(PGVM pGVM, VMCPUID idCpu)
496{
497 /* Paranoia (caller checked these already). */
498 AssertReturn(idCpu < pGVM->cCpus, VERR_INVALID_CPU_ID);
499 AssertReturn(pGVM->aCpus[idCpu].hEMT == RTThreadNativeSelf(), VERR_INVALID_CPU_ID);
500
501#if defined(LOG_ENABLED) && defined(VBOX_WITH_R0_LOGGING)
502 /*
503 * Registration of ring 0 loggers.
504 */
505 PVMCPUCC pVCpu = &pGVM->aCpus[idCpu];
506 if ( pVCpu->vmmr0.s.u.s.Logger.pLogger
507 && !pVCpu->vmmr0.s.u.s.Logger.fRegistered)
508 {
509 RTLogSetDefaultInstanceThread(pVCpu->vmmr0.s.u.s.Logger.pLogger, (uintptr_t)pGVM->pSession);
510 pVCpu->vmmr0.s.u.s.Logger.fRegistered = true;
511 }
512#endif
513
514 return VINF_SUCCESS;
515}
516
517
518
519/**
520 * Terminates the R0 bits for a particular VM instance.
521 *
522 * This is normally called by ring-3 as part of the VM termination process, but
523 * may alternatively be called during the support driver session cleanup when
524 * the VM object is destroyed (see GVMM).
525 *
526 * @returns VBox status code.
527 *
528 * @param pGVM The global (ring-0) VM structure.
529 * @param idCpu Set to 0 if EMT(0) or NIL_VMCPUID if session cleanup
530 * thread.
531 * @thread EMT(0) or session clean up thread.
532 */
533VMMR0_INT_DECL(int) VMMR0TermVM(PGVM pGVM, VMCPUID idCpu)
534{
535 /*
536 * Check EMT(0) claim if we're called from userland.
537 */
538 if (idCpu != NIL_VMCPUID)
539 {
540 AssertReturn(idCpu == 0, VERR_INVALID_CPU_ID);
541 int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu);
542 if (RT_FAILURE(rc))
543 return rc;
544 }
545
546#ifdef VBOX_WITH_PCI_PASSTHROUGH
547 PciRawR0TermVM(pGVM);
548#endif
549
550 /*
551 * Tell GVMM what we're up to and check that we only do this once.
552 */
553 if (GVMMR0DoingTermVM(pGVM))
554 {
555 GIMR0TermVM(pGVM);
556
557 /** @todo I wish to call PGMR0PhysFlushHandyPages(pGVM, &pGVM->aCpus[idCpu])
558 * here to make sure we don't leak any shared pages if we crash... */
559 HMR0TermVM(pGVM);
560 }
561
562 /*
563 * Deregister the logger for this EMT.
564 */
565 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pGVM->pSession);
566
567 /*
568 * Start log flusher thread termination.
569 */
570 ASMAtomicWriteBool(&pGVM->vmmr0.s.LogFlusher.fThreadShutdown, true);
571 if (pGVM->vmmr0.s.LogFlusher.hEvent != NIL_RTSEMEVENT)
572 RTSemEventSignal(pGVM->vmmr0.s.LogFlusher.hEvent);
573
574 return VINF_SUCCESS;
575}
576
577
578/**
579 * This is called at the end of gvmmR0CleanupVM().
580 *
581 * @param pGVM The global (ring-0) VM structure.
582 */
583VMMR0_INT_DECL(void) VMMR0CleanupVM(PGVM pGVM)
584{
585 AssertCompile(NIL_RTTHREADCTXHOOK == (RTTHREADCTXHOOK)0); /* Depends on zero initialized memory working for NIL at the moment. */
586 for (VMCPUID idCpu = 0; idCpu < pGVM->cCpus; idCpu++)
587 {
588 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
589
590 /** @todo Can we busy wait here for all thread-context hooks to be
591 * deregistered before releasing (destroying) it? Only until we find a
592 * solution for not deregistering hooks everytime we're leaving HMR0
593 * context. */
594 VMMR0ThreadCtxHookDestroyForEmt(pGVCpu);
595 }
596
597 vmmR0CleanupLoggers(pGVM);
598}
599
600
601/**
602 * An interrupt or unhalt force flag is set, deal with it.
603 *
604 * @returns VINF_SUCCESS (or VINF_EM_HALT).
605 * @param pVCpu The cross context virtual CPU structure.
606 * @param uMWait Result from EMMonitorWaitIsActive().
607 * @param enmInterruptibility Guest CPU interruptbility level.
608 */
609static int vmmR0DoHaltInterrupt(PVMCPUCC pVCpu, unsigned uMWait, CPUMINTERRUPTIBILITY enmInterruptibility)
610{
611 Assert(!TRPMHasTrap(pVCpu));
612 Assert( enmInterruptibility > CPUMINTERRUPTIBILITY_INVALID
613 && enmInterruptibility < CPUMINTERRUPTIBILITY_END);
614
615 /*
616 * Pending interrupts w/o any SMIs or NMIs? That the usual case.
617 */
618 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
619 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_INTERRUPT_NMI))
620 {
621 if (enmInterruptibility <= CPUMINTERRUPTIBILITY_UNRESTRAINED)
622 {
623 uint8_t u8Interrupt = 0;
624 int rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
625 Log(("vmmR0DoHaltInterrupt: CPU%d u8Interrupt=%d (%#x) rc=%Rrc\n", pVCpu->idCpu, u8Interrupt, u8Interrupt, rc));
626 if (RT_SUCCESS(rc))
627 {
628 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
629
630 rc = TRPMAssertTrap(pVCpu, u8Interrupt, TRPM_HARDWARE_INT);
631 AssertRCSuccess(rc);
632 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltExec);
633 return rc;
634 }
635 }
636 }
637 /*
638 * SMI is not implemented yet, at least not here.
639 */
640 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_SMI))
641 {
642 Log12(("vmmR0DoHaltInterrupt: CPU%d failed #3\n", pVCpu->idCpu));
643 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltToR3);
644 return VINF_EM_HALT;
645 }
646 /*
647 * NMI.
648 */
649 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
650 {
651 if (enmInterruptibility < CPUMINTERRUPTIBILITY_NMI_INHIBIT)
652 {
653 /** @todo later. */
654 Log12(("vmmR0DoHaltInterrupt: CPU%d failed #2 (uMWait=%u enmInt=%d)\n", pVCpu->idCpu, uMWait, enmInterruptibility));
655 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltToR3);
656 return VINF_EM_HALT;
657 }
658 }
659 /*
660 * Nested-guest virtual interrupt.
661 */
662 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST))
663 {
664 if (enmInterruptibility < CPUMINTERRUPTIBILITY_VIRT_INT_DISABLED)
665 {
666 /** @todo NSTVMX: NSTSVM: Remember, we might have to check and perform VM-exits
667 * here before injecting the virtual interrupt. See emR3ForcedActions
668 * for details. */
669 Log12(("vmmR0DoHaltInterrupt: CPU%d failed #1 (uMWait=%u enmInt=%d)\n", pVCpu->idCpu, uMWait, enmInterruptibility));
670 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltToR3);
671 return VINF_EM_HALT;
672 }
673 }
674
675 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UNHALT))
676 {
677 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltExec);
678 Log11(("vmmR0DoHaltInterrupt: CPU%d success VINF_SUCCESS (UNHALT)\n", pVCpu->idCpu));
679 return VINF_SUCCESS;
680 }
681 if (uMWait > 1)
682 {
683 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltExec);
684 Log11(("vmmR0DoHaltInterrupt: CPU%d success VINF_SUCCESS (uMWait=%u > 1)\n", pVCpu->idCpu, uMWait));
685 return VINF_SUCCESS;
686 }
687
688 Log12(("vmmR0DoHaltInterrupt: CPU%d failed #0 (uMWait=%u enmInt=%d)\n", pVCpu->idCpu, uMWait, enmInterruptibility));
689 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltToR3);
690 return VINF_EM_HALT;
691}
692
693
694/**
695 * This does one round of vmR3HaltGlobal1Halt().
696 *
697 * The rational here is that we'll reduce latency in interrupt situations if we
698 * don't go to ring-3 immediately on a VINF_EM_HALT (guest executed HLT or
699 * MWAIT), but do one round of blocking here instead and hope the interrupt is
700 * raised in the meanwhile.
701 *
702 * If we go to ring-3 we'll quit the inner HM/NEM loop in EM and end up in the
703 * outer loop, which will then call VMR3WaitHalted() and that in turn will do a
704 * ring-0 call (unless we're too close to a timer event). When the interrupt
705 * wakes us up, we'll return from ring-0 and EM will by instinct do a
706 * rescheduling (because of raw-mode) before it resumes the HM/NEM loop and gets
707 * back to VMMR0EntryFast().
708 *
709 * @returns VINF_SUCCESS or VINF_EM_HALT.
710 * @param pGVM The ring-0 VM structure.
711 * @param pGVCpu The ring-0 virtual CPU structure.
712 *
713 * @todo r=bird: All the blocking/waiting and EMT managment should move out of
714 * the VM module, probably to VMM. Then this would be more weird wrt
715 * parameters and statistics.
716 */
717static int vmmR0DoHalt(PGVM pGVM, PGVMCPU pGVCpu)
718{
719 /*
720 * Do spin stat historization.
721 */
722 if (++pGVCpu->vmm.s.cR0Halts & 0xff)
723 { /* likely */ }
724 else if (pGVCpu->vmm.s.cR0HaltsSucceeded > pGVCpu->vmm.s.cR0HaltsToRing3)
725 {
726 pGVCpu->vmm.s.cR0HaltsSucceeded = 2;
727 pGVCpu->vmm.s.cR0HaltsToRing3 = 0;
728 }
729 else
730 {
731 pGVCpu->vmm.s.cR0HaltsSucceeded = 0;
732 pGVCpu->vmm.s.cR0HaltsToRing3 = 2;
733 }
734
735 /*
736 * Flags that makes us go to ring-3.
737 */
738 uint32_t const fVmFFs = VM_FF_TM_VIRTUAL_SYNC | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA
739 | VM_FF_DBGF | VM_FF_REQUEST | VM_FF_CHECK_VM_STATE
740 | VM_FF_RESET | VM_FF_EMT_RENDEZVOUS | VM_FF_PGM_NEED_HANDY_PAGES
741 | VM_FF_PGM_NO_MEMORY | VM_FF_DEBUG_SUSPEND;
742 uint64_t const fCpuFFs = VMCPU_FF_TIMER | VMCPU_FF_PDM_CRITSECT | VMCPU_FF_IEM
743 | VMCPU_FF_REQUEST | VMCPU_FF_DBGF | VMCPU_FF_HM_UPDATE_CR3
744 | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
745 | VMCPU_FF_TO_R3 | VMCPU_FF_IOM;
746
747 /*
748 * Check preconditions.
749 */
750 unsigned const uMWait = EMMonitorWaitIsActive(pGVCpu);
751 CPUMINTERRUPTIBILITY const enmInterruptibility = CPUMGetGuestInterruptibility(pGVCpu);
752 if ( pGVCpu->vmm.s.fMayHaltInRing0
753 && !TRPMHasTrap(pGVCpu)
754 && ( enmInterruptibility == CPUMINTERRUPTIBILITY_UNRESTRAINED
755 || uMWait > 1))
756 {
757 if ( !VM_FF_IS_ANY_SET(pGVM, fVmFFs)
758 && !VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs))
759 {
760 /*
761 * Interrupts pending already?
762 */
763 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC))
764 APICUpdatePendingInterrupts(pGVCpu);
765
766 /*
767 * Flags that wake up from the halted state.
768 */
769 uint64_t const fIntMask = VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_INTERRUPT_NESTED_GUEST
770 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT;
771
772 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask))
773 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility);
774 ASMNopPause();
775
776 /*
777 * Check out how long till the next timer event.
778 */
779 uint64_t u64Delta;
780 uint64_t u64GipTime = TMTimerPollGIP(pGVM, pGVCpu, &u64Delta);
781
782 if ( !VM_FF_IS_ANY_SET(pGVM, fVmFFs)
783 && !VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs))
784 {
785 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC))
786 APICUpdatePendingInterrupts(pGVCpu);
787
788 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask))
789 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility);
790
791 /*
792 * Wait if there is enough time to the next timer event.
793 */
794 if (u64Delta >= pGVCpu->vmm.s.cNsSpinBlockThreshold)
795 {
796 /* If there are few other CPU cores around, we will procrastinate a
797 little before going to sleep, hoping for some device raising an
798 interrupt or similar. Though, the best thing here would be to
799 dynamically adjust the spin count according to its usfulness or
800 something... */
801 if ( pGVCpu->vmm.s.cR0HaltsSucceeded > pGVCpu->vmm.s.cR0HaltsToRing3
802 && RTMpGetOnlineCount() >= 4)
803 {
804 /** @todo Figure out how we can skip this if it hasn't help recently...
805 * @bugref{9172#c12} */
806 uint32_t cSpinLoops = 42;
807 while (cSpinLoops-- > 0)
808 {
809 ASMNopPause();
810 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC))
811 APICUpdatePendingInterrupts(pGVCpu);
812 ASMNopPause();
813 if (VM_FF_IS_ANY_SET(pGVM, fVmFFs))
814 {
815 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3FromSpin);
816 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3);
817 return VINF_EM_HALT;
818 }
819 ASMNopPause();
820 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs))
821 {
822 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3FromSpin);
823 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3);
824 return VINF_EM_HALT;
825 }
826 ASMNopPause();
827 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask))
828 {
829 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltExecFromSpin);
830 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility);
831 }
832 ASMNopPause();
833 }
834 }
835
836 /*
837 * We have to set the state to VMCPUSTATE_STARTED_HALTED here so ring-3
838 * knows when to notify us (cannot access VMINTUSERPERVMCPU::fWait from here).
839 * After changing the state we must recheck the force flags of course.
840 */
841 if (VMCPU_CMPXCHG_STATE(pGVCpu, VMCPUSTATE_STARTED_HALTED, VMCPUSTATE_STARTED))
842 {
843 if ( !VM_FF_IS_ANY_SET(pGVM, fVmFFs)
844 && !VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs))
845 {
846 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC))
847 APICUpdatePendingInterrupts(pGVCpu);
848
849 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask))
850 {
851 VMCPU_CMPXCHG_STATE(pGVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_HALTED);
852 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility);
853 }
854
855 /* Okay, block! */
856 uint64_t const u64StartSchedHalt = RTTimeNanoTS();
857 int rc = GVMMR0SchedHalt(pGVM, pGVCpu, u64GipTime);
858 uint64_t const u64EndSchedHalt = RTTimeNanoTS();
859 uint64_t const cNsElapsedSchedHalt = u64EndSchedHalt - u64StartSchedHalt;
860 Log10(("vmmR0DoHalt: CPU%d: halted %llu ns\n", pGVCpu->idCpu, cNsElapsedSchedHalt));
861
862 VMCPU_CMPXCHG_STATE(pGVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_HALTED);
863 STAM_REL_PROFILE_ADD_PERIOD(&pGVCpu->vmm.s.StatR0HaltBlock, cNsElapsedSchedHalt);
864 if ( rc == VINF_SUCCESS
865 || rc == VERR_INTERRUPTED)
866 {
867 /* Keep some stats like ring-3 does. */
868 int64_t const cNsOverslept = u64EndSchedHalt - u64GipTime;
869 if (cNsOverslept > 50000)
870 STAM_REL_PROFILE_ADD_PERIOD(&pGVCpu->vmm.s.StatR0HaltBlockOverslept, cNsOverslept);
871 else if (cNsOverslept < -50000)
872 STAM_REL_PROFILE_ADD_PERIOD(&pGVCpu->vmm.s.StatR0HaltBlockInsomnia, cNsElapsedSchedHalt);
873 else
874 STAM_REL_PROFILE_ADD_PERIOD(&pGVCpu->vmm.s.StatR0HaltBlockOnTime, cNsElapsedSchedHalt);
875
876 /*
877 * Recheck whether we can resume execution or have to go to ring-3.
878 */
879 if ( !VM_FF_IS_ANY_SET(pGVM, fVmFFs)
880 && !VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs))
881 {
882 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC))
883 APICUpdatePendingInterrupts(pGVCpu);
884 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask))
885 {
886 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltExecFromBlock);
887 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility);
888 }
889 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3PostNoInt);
890 Log12(("vmmR0DoHalt: CPU%d post #2 - No pending interrupt\n", pGVCpu->idCpu));
891 }
892 else
893 {
894 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3PostPendingFF);
895 Log12(("vmmR0DoHalt: CPU%d post #1 - Pending FF\n", pGVCpu->idCpu));
896 }
897 }
898 else
899 {
900 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3Other);
901 Log12(("vmmR0DoHalt: CPU%d GVMMR0SchedHalt failed: %Rrc\n", pGVCpu->idCpu, rc));
902 }
903 }
904 else
905 {
906 VMCPU_CMPXCHG_STATE(pGVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_HALTED);
907 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3PendingFF);
908 Log12(("vmmR0DoHalt: CPU%d failed #5 - Pending FF\n", pGVCpu->idCpu));
909 }
910 }
911 else
912 {
913 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3Other);
914 Log12(("vmmR0DoHalt: CPU%d failed #4 - enmState=%d\n", pGVCpu->idCpu, VMCPU_GET_STATE(pGVCpu)));
915 }
916 }
917 else
918 {
919 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3SmallDelta);
920 Log12(("vmmR0DoHalt: CPU%d failed #3 - delta too small: %RU64\n", pGVCpu->idCpu, u64Delta));
921 }
922 }
923 else
924 {
925 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3PendingFF);
926 Log12(("vmmR0DoHalt: CPU%d failed #2 - Pending FF\n", pGVCpu->idCpu));
927 }
928 }
929 else
930 {
931 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3PendingFF);
932 Log12(("vmmR0DoHalt: CPU%d failed #1 - Pending FF\n", pGVCpu->idCpu));
933 }
934 }
935 else
936 {
937 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3Other);
938 Log12(("vmmR0DoHalt: CPU%d failed #0 - fMayHaltInRing0=%d TRPMHasTrap=%d enmInt=%d uMWait=%u\n",
939 pGVCpu->idCpu, pGVCpu->vmm.s.fMayHaltInRing0, TRPMHasTrap(pGVCpu), enmInterruptibility, uMWait));
940 }
941
942 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3);
943 return VINF_EM_HALT;
944}
945
946
947/**
948 * VMM ring-0 thread-context callback.
949 *
950 * This does common HM state updating and calls the HM-specific thread-context
951 * callback.
952 *
953 * This is used together with RTThreadCtxHookCreate() on platforms which
954 * supports it, and directly from VMMR0EmtPrepareForBlocking() and
955 * VMMR0EmtResumeAfterBlocking() on platforms which don't.
956 *
957 * @param enmEvent The thread-context event.
958 * @param pvUser Opaque pointer to the VMCPU.
959 *
960 * @thread EMT(pvUser)
961 */
962static DECLCALLBACK(void) vmmR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, void *pvUser)
963{
964 PVMCPUCC pVCpu = (PVMCPUCC)pvUser;
965
966 switch (enmEvent)
967 {
968 case RTTHREADCTXEVENT_IN:
969 {
970 /*
971 * Linux may call us with preemption enabled (really!) but technically we
972 * cannot get preempted here, otherwise we end up in an infinite recursion
973 * scenario (i.e. preempted in resume hook -> preempt hook -> resume hook...
974 * ad infinitum). Let's just disable preemption for now...
975 */
976 /** @todo r=bird: I don't believe the above. The linux code is clearly enabling
977 * preemption after doing the callout (one or two functions up the
978 * call chain). */
979 /** @todo r=ramshankar: See @bugref{5313#c30}. */
980 RTTHREADPREEMPTSTATE ParanoidPreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
981 RTThreadPreemptDisable(&ParanoidPreemptState);
982
983 /* We need to update the VCPU <-> host CPU mapping. */
984 RTCPUID idHostCpu;
985 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
986 pVCpu->iHostCpuSet = iHostCpuSet;
987 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
988
989 /* In the very unlikely event that the GIP delta for the CPU we're
990 rescheduled needs calculating, try force a return to ring-3.
991 We unfortunately cannot do the measurements right here. */
992 if (RT_LIKELY(!SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
993 { /* likely */ }
994 else
995 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
996
997 /* Invoke the HM-specific thread-context callback. */
998 HMR0ThreadCtxCallback(enmEvent, pvUser);
999
1000 /* Restore preemption. */
1001 RTThreadPreemptRestore(&ParanoidPreemptState);
1002 break;
1003 }
1004
1005 case RTTHREADCTXEVENT_OUT:
1006 {
1007 /* Invoke the HM-specific thread-context callback. */
1008 HMR0ThreadCtxCallback(enmEvent, pvUser);
1009
1010 /*
1011 * Sigh. See VMMGetCpu() used by VMCPU_ASSERT_EMT(). We cannot let several VCPUs
1012 * have the same host CPU associated with it.
1013 */
1014 pVCpu->iHostCpuSet = UINT32_MAX;
1015 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1016 break;
1017 }
1018
1019 default:
1020 /* Invoke the HM-specific thread-context callback. */
1021 HMR0ThreadCtxCallback(enmEvent, pvUser);
1022 break;
1023 }
1024}
1025
1026
1027/**
1028 * Creates thread switching hook for the current EMT thread.
1029 *
1030 * This is called by GVMMR0CreateVM and GVMMR0RegisterVCpu. If the host
1031 * platform does not implement switcher hooks, no hooks will be create and the
1032 * member set to NIL_RTTHREADCTXHOOK.
1033 *
1034 * @returns VBox status code.
1035 * @param pVCpu The cross context virtual CPU structure.
1036 * @thread EMT(pVCpu)
1037 */
1038VMMR0_INT_DECL(int) VMMR0ThreadCtxHookCreateForEmt(PVMCPUCC pVCpu)
1039{
1040 VMCPU_ASSERT_EMT(pVCpu);
1041 Assert(pVCpu->vmmr0.s.hCtxHook == NIL_RTTHREADCTXHOOK);
1042
1043#if 1 /* To disable this stuff change to zero. */
1044 int rc = RTThreadCtxHookCreate(&pVCpu->vmmr0.s.hCtxHook, 0, vmmR0ThreadCtxCallback, pVCpu);
1045 if (RT_SUCCESS(rc))
1046 {
1047 pVCpu->pGVM->vmm.s.fIsUsingContextHooks = true;
1048 return rc;
1049 }
1050#else
1051 RT_NOREF(vmmR0ThreadCtxCallback);
1052 int rc = VERR_NOT_SUPPORTED;
1053#endif
1054
1055 pVCpu->vmmr0.s.hCtxHook = NIL_RTTHREADCTXHOOK;
1056 pVCpu->pGVM->vmm.s.fIsUsingContextHooks = false;
1057 if (rc == VERR_NOT_SUPPORTED)
1058 return VINF_SUCCESS;
1059
1060 LogRelMax(32, ("RTThreadCtxHookCreate failed! rc=%Rrc pVCpu=%p idCpu=%RU32\n", rc, pVCpu, pVCpu->idCpu));
1061 return VINF_SUCCESS; /* Just ignore it, we can live without context hooks. */
1062}
1063
1064
1065/**
1066 * Destroys the thread switching hook for the specified VCPU.
1067 *
1068 * @param pVCpu The cross context virtual CPU structure.
1069 * @remarks Can be called from any thread.
1070 */
1071VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDestroyForEmt(PVMCPUCC pVCpu)
1072{
1073 int rc = RTThreadCtxHookDestroy(pVCpu->vmmr0.s.hCtxHook);
1074 AssertRC(rc);
1075 pVCpu->vmmr0.s.hCtxHook = NIL_RTTHREADCTXHOOK;
1076}
1077
1078
1079/**
1080 * Disables the thread switching hook for this VCPU (if we got one).
1081 *
1082 * @param pVCpu The cross context virtual CPU structure.
1083 * @thread EMT(pVCpu)
1084 *
1085 * @remarks This also clears GVMCPU::idHostCpu, so the mapping is invalid after
1086 * this call. This means you have to be careful with what you do!
1087 */
1088VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDisable(PVMCPUCC pVCpu)
1089{
1090 /*
1091 * Clear the VCPU <-> host CPU mapping as we've left HM context.
1092 * @bugref{7726#c19} explains the need for this trick:
1093 *
1094 * VMXR0CallRing3Callback/SVMR0CallRing3Callback &
1095 * hmR0VmxLeaveSession/hmR0SvmLeaveSession disables context hooks during
1096 * longjmp & normal return to ring-3, which opens a window where we may be
1097 * rescheduled without changing GVMCPUID::idHostCpu and cause confusion if
1098 * the CPU starts executing a different EMT. Both functions first disables
1099 * preemption and then calls HMR0LeaveCpu which invalids idHostCpu, leaving
1100 * an opening for getting preempted.
1101 */
1102 /** @todo Make HM not need this API! Then we could leave the hooks enabled
1103 * all the time. */
1104
1105 /*
1106 * Disable the context hook, if we got one.
1107 */
1108 if (pVCpu->vmmr0.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1109 {
1110 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1111 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1112 int rc = RTThreadCtxHookDisable(pVCpu->vmmr0.s.hCtxHook);
1113 AssertRC(rc);
1114 }
1115}
1116
1117
1118/**
1119 * Internal version of VMMR0ThreadCtxHooksAreRegistered.
1120 *
1121 * @returns true if registered, false otherwise.
1122 * @param pVCpu The cross context virtual CPU structure.
1123 */
1124DECLINLINE(bool) vmmR0ThreadCtxHookIsEnabled(PVMCPUCC pVCpu)
1125{
1126 return RTThreadCtxHookIsEnabled(pVCpu->vmmr0.s.hCtxHook);
1127}
1128
1129
1130/**
1131 * Whether thread-context hooks are registered for this VCPU.
1132 *
1133 * @returns true if registered, false otherwise.
1134 * @param pVCpu The cross context virtual CPU structure.
1135 */
1136VMMR0_INT_DECL(bool) VMMR0ThreadCtxHookIsEnabled(PVMCPUCC pVCpu)
1137{
1138 return vmmR0ThreadCtxHookIsEnabled(pVCpu);
1139}
1140
1141
1142/**
1143 * Returns the ring-0 release logger instance.
1144 *
1145 * @returns Pointer to release logger, NULL if not configured.
1146 * @param pVCpu The cross context virtual CPU structure of the caller.
1147 * @thread EMT(pVCpu)
1148 */
1149VMMR0_INT_DECL(PRTLOGGER) VMMR0GetReleaseLogger(PVMCPUCC pVCpu)
1150{
1151 return pVCpu->vmmr0.s.u.s.RelLogger.pLogger;
1152}
1153
1154
1155#ifdef VBOX_WITH_STATISTICS
1156/**
1157 * Record return code statistics
1158 * @param pVM The cross context VM structure.
1159 * @param pVCpu The cross context virtual CPU structure.
1160 * @param rc The status code.
1161 */
1162static void vmmR0RecordRC(PVMCC pVM, PVMCPUCC pVCpu, int rc)
1163{
1164 /*
1165 * Collect statistics.
1166 */
1167 switch (rc)
1168 {
1169 case VINF_SUCCESS:
1170 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetNormal);
1171 break;
1172 case VINF_EM_RAW_INTERRUPT:
1173 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterrupt);
1174 break;
1175 case VINF_EM_RAW_INTERRUPT_HYPER:
1176 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptHyper);
1177 break;
1178 case VINF_EM_RAW_GUEST_TRAP:
1179 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGuestTrap);
1180 break;
1181 case VINF_EM_RAW_RING_SWITCH:
1182 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitch);
1183 break;
1184 case VINF_EM_RAW_RING_SWITCH_INT:
1185 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitchInt);
1186 break;
1187 case VINF_EM_RAW_STALE_SELECTOR:
1188 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetStaleSelector);
1189 break;
1190 case VINF_EM_RAW_IRET_TRAP:
1191 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIRETTrap);
1192 break;
1193 case VINF_IOM_R3_IOPORT_READ:
1194 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIORead);
1195 break;
1196 case VINF_IOM_R3_IOPORT_WRITE:
1197 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOWrite);
1198 break;
1199 case VINF_IOM_R3_IOPORT_COMMIT_WRITE:
1200 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOCommitWrite);
1201 break;
1202 case VINF_IOM_R3_MMIO_READ:
1203 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIORead);
1204 break;
1205 case VINF_IOM_R3_MMIO_WRITE:
1206 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOWrite);
1207 break;
1208 case VINF_IOM_R3_MMIO_COMMIT_WRITE:
1209 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOCommitWrite);
1210 break;
1211 case VINF_IOM_R3_MMIO_READ_WRITE:
1212 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOReadWrite);
1213 break;
1214 case VINF_PATM_HC_MMIO_PATCH_READ:
1215 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchRead);
1216 break;
1217 case VINF_PATM_HC_MMIO_PATCH_WRITE:
1218 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchWrite);
1219 break;
1220 case VINF_CPUM_R3_MSR_READ:
1221 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMSRRead);
1222 break;
1223 case VINF_CPUM_R3_MSR_WRITE:
1224 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMSRWrite);
1225 break;
1226 case VINF_EM_RAW_EMULATE_INSTR:
1227 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetEmulate);
1228 break;
1229 case VINF_PATCH_EMULATE_INSTR:
1230 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchEmulate);
1231 break;
1232 case VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT:
1233 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetLDTFault);
1234 break;
1235 case VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT:
1236 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGDTFault);
1237 break;
1238 case VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT:
1239 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIDTFault);
1240 break;
1241 case VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT:
1242 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTSSFault);
1243 break;
1244 case VINF_CSAM_PENDING_ACTION:
1245 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCSAMTask);
1246 break;
1247 case VINF_PGM_SYNC_CR3:
1248 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetSyncCR3);
1249 break;
1250 case VINF_PATM_PATCH_INT3:
1251 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchInt3);
1252 break;
1253 case VINF_PATM_PATCH_TRAP_PF:
1254 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchPF);
1255 break;
1256 case VINF_PATM_PATCH_TRAP_GP:
1257 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchGP);
1258 break;
1259 case VINF_PATM_PENDING_IRQ_AFTER_IRET:
1260 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchIretIRQ);
1261 break;
1262 case VINF_EM_RESCHEDULE_REM:
1263 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRescheduleREM);
1264 break;
1265 case VINF_EM_RAW_TO_R3:
1266 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Total);
1267 if (VM_FF_IS_SET(pVM, VM_FF_TM_VIRTUAL_SYNC))
1268 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3TMVirt);
1269 else if (VM_FF_IS_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES))
1270 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3HandyPages);
1271 else if (VM_FF_IS_SET(pVM, VM_FF_PDM_QUEUES))
1272 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3PDMQueues);
1273 else if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
1274 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Rendezvous);
1275 else if (VM_FF_IS_SET(pVM, VM_FF_PDM_DMA))
1276 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3DMA);
1277 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TIMER))
1278 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Timer);
1279 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PDM_CRITSECT))
1280 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3CritSect);
1281 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TO_R3))
1282 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3FF);
1283 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
1284 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Iem);
1285 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IOM))
1286 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Iom);
1287 else
1288 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Unknown);
1289 break;
1290
1291 case VINF_EM_RAW_TIMER_PENDING:
1292 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTimerPending);
1293 break;
1294 case VINF_EM_RAW_INTERRUPT_PENDING:
1295 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptPending);
1296 break;
1297 case VINF_PATM_DUPLICATE_FUNCTION:
1298 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPATMDuplicateFn);
1299 break;
1300 case VINF_PGM_CHANGE_MODE:
1301 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMChangeMode);
1302 break;
1303 case VINF_PGM_POOL_FLUSH_PENDING:
1304 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMFlushPending);
1305 break;
1306 case VINF_EM_PENDING_REQUEST:
1307 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPendingRequest);
1308 break;
1309 case VINF_EM_HM_PATCH_TPR_INSTR:
1310 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchTPR);
1311 break;
1312 default:
1313 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMisc);
1314 break;
1315 }
1316}
1317#endif /* VBOX_WITH_STATISTICS */
1318
1319
1320/**
1321 * The Ring 0 entry point, called by the fast-ioctl path.
1322 *
1323 * @param pGVM The global (ring-0) VM structure.
1324 * @param pVMIgnored The cross context VM structure. The return code is
1325 * stored in pVM->vmm.s.iLastGZRc.
1326 * @param idCpu The Virtual CPU ID of the calling EMT.
1327 * @param enmOperation Which operation to execute.
1328 * @remarks Assume called with interrupts _enabled_.
1329 */
1330VMMR0DECL(void) VMMR0EntryFast(PGVM pGVM, PVMCC pVMIgnored, VMCPUID idCpu, VMMR0OPERATION enmOperation)
1331{
1332 RT_NOREF(pVMIgnored);
1333
1334 /*
1335 * Validation.
1336 */
1337 if ( idCpu < pGVM->cCpus
1338 && pGVM->cCpus == pGVM->cCpusUnsafe)
1339 { /*likely*/ }
1340 else
1341 {
1342 SUPR0Printf("VMMR0EntryFast: Bad idCpu=%#x cCpus=%#x cCpusUnsafe=%#x\n", idCpu, pGVM->cCpus, pGVM->cCpusUnsafe);
1343 return;
1344 }
1345
1346 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
1347 RTNATIVETHREAD const hNativeThread = RTThreadNativeSelf();
1348 if (RT_LIKELY( pGVCpu->hEMT == hNativeThread
1349 && pGVCpu->hNativeThreadR0 == hNativeThread))
1350 { /* likely */ }
1351 else
1352 {
1353 SUPR0Printf("VMMR0EntryFast: Bad thread idCpu=%#x hNativeSelf=%p pGVCpu->hEmt=%p pGVCpu->hNativeThreadR0=%p\n",
1354 idCpu, hNativeThread, pGVCpu->hEMT, pGVCpu->hNativeThreadR0);
1355 return;
1356 }
1357
1358 /*
1359 * Perform requested operation.
1360 */
1361 switch (enmOperation)
1362 {
1363 /*
1364 * Run guest code using the available hardware acceleration technology.
1365 */
1366 case VMMR0_DO_HM_RUN:
1367 {
1368 for (;;) /* hlt loop */
1369 {
1370 /*
1371 * Disable ring-3 calls & blocking till we've successfully entered HM.
1372 * Otherwise we sometimes end up blocking at the finall Log4 statement
1373 * in VMXR0Enter, while still in a somewhat inbetween state.
1374 */
1375 VMMRZCallRing3Disable(pGVCpu);
1376
1377 /*
1378 * Disable preemption.
1379 */
1380 Assert(!vmmR0ThreadCtxHookIsEnabled(pGVCpu));
1381 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
1382 RTThreadPreemptDisable(&PreemptState);
1383 pGVCpu->vmmr0.s.pPreemptState = &PreemptState;
1384
1385 /*
1386 * Get the host CPU identifiers, make sure they are valid and that
1387 * we've got a TSC delta for the CPU.
1388 */
1389 RTCPUID idHostCpu;
1390 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
1391 if (RT_LIKELY( iHostCpuSet < RTCPUSET_MAX_CPUS
1392 && SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
1393 {
1394 pGVCpu->iHostCpuSet = iHostCpuSet;
1395 ASMAtomicWriteU32(&pGVCpu->idHostCpu, idHostCpu);
1396
1397 /*
1398 * Update the periodic preemption timer if it's active.
1399 */
1400 if (pGVM->vmm.s.fUsePeriodicPreemptionTimers)
1401 GVMMR0SchedUpdatePeriodicPreemptionTimer(pGVM, pGVCpu->idHostCpu, TMCalcHostTimerFrequency(pGVM, pGVCpu));
1402
1403#ifdef VMM_R0_TOUCH_FPU
1404 /*
1405 * Make sure we've got the FPU state loaded so and we don't need to clear
1406 * CR0.TS and get out of sync with the host kernel when loading the guest
1407 * FPU state. @ref sec_cpum_fpu (CPUM.cpp) and @bugref{4053}.
1408 */
1409 CPUMR0TouchHostFpu();
1410#endif
1411 int rc;
1412 bool fPreemptRestored = false;
1413 if (!HMR0SuspendPending())
1414 {
1415 /*
1416 * Enable the context switching hook.
1417 */
1418 if (pGVCpu->vmmr0.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1419 {
1420 Assert(!RTThreadCtxHookIsEnabled(pGVCpu->vmmr0.s.hCtxHook));
1421 int rc2 = RTThreadCtxHookEnable(pGVCpu->vmmr0.s.hCtxHook); AssertRC(rc2);
1422 }
1423
1424 /*
1425 * Enter HM context.
1426 */
1427 rc = HMR0Enter(pGVCpu);
1428 if (RT_SUCCESS(rc))
1429 {
1430 VMCPU_SET_STATE(pGVCpu, VMCPUSTATE_STARTED_HM);
1431
1432 /*
1433 * When preemption hooks are in place, enable preemption now that
1434 * we're in HM context.
1435 */
1436 if (vmmR0ThreadCtxHookIsEnabled(pGVCpu))
1437 {
1438 fPreemptRestored = true;
1439 pGVCpu->vmmr0.s.pPreemptState = NULL;
1440 RTThreadPreemptRestore(&PreemptState);
1441 }
1442 VMMRZCallRing3Enable(pGVCpu);
1443
1444 /*
1445 * Setup the longjmp machinery and execute guest code (calls HMR0RunGuestCode).
1446 */
1447 rc = vmmR0CallRing3SetJmp(&pGVCpu->vmmr0.s.AssertJmpBuf, HMR0RunGuestCode, pGVM, pGVCpu);
1448
1449 /*
1450 * Assert sanity on the way out. Using manual assertions code here as normal
1451 * assertions are going to panic the host since we're outside the setjmp/longjmp zone.
1452 */
1453 if (RT_UNLIKELY( VMCPU_GET_STATE(pGVCpu) != VMCPUSTATE_STARTED_HM
1454 && RT_SUCCESS_NP(rc)
1455 && rc != VERR_VMM_RING0_ASSERTION ))
1456 {
1457 pGVM->vmm.s.szRing0AssertMsg1[0] = '\0';
1458 RTStrPrintf(pGVM->vmm.s.szRing0AssertMsg2, sizeof(pGVM->vmm.s.szRing0AssertMsg2),
1459 "Got VMCPU state %d expected %d.\n", VMCPU_GET_STATE(pGVCpu), VMCPUSTATE_STARTED_HM);
1460 rc = VERR_VMM_WRONG_HM_VMCPU_STATE;
1461 }
1462#if 0
1463 /** @todo Get rid of this. HM shouldn't disable the context hook. */
1464 else if (RT_UNLIKELY(vmmR0ThreadCtxHookIsEnabled(pGVCpu)))
1465 {
1466 pGVM->vmm.s.szRing0AssertMsg1[0] = '\0';
1467 RTStrPrintf(pGVM->vmm.s.szRing0AssertMsg2, sizeof(pGVM->vmm.s.szRing0AssertMsg2),
1468 "Thread-context hooks still enabled! VCPU=%p Id=%u rc=%d.\n", pGVCpu, pGVCpu->idCpu, rc);
1469 rc = VERR_VMM_CONTEXT_HOOK_STILL_ENABLED;
1470 }
1471#endif
1472
1473 VMMRZCallRing3Disable(pGVCpu); /* Lazy bird: Simpler just disabling it again... */
1474 VMCPU_SET_STATE(pGVCpu, VMCPUSTATE_STARTED);
1475 }
1476 STAM_COUNTER_INC(&pGVM->vmm.s.StatRunGC);
1477
1478 /*
1479 * Invalidate the host CPU identifiers before we disable the context
1480 * hook / restore preemption.
1481 */
1482 pGVCpu->iHostCpuSet = UINT32_MAX;
1483 ASMAtomicWriteU32(&pGVCpu->idHostCpu, NIL_RTCPUID);
1484
1485 /*
1486 * Disable context hooks. Due to unresolved cleanup issues, we
1487 * cannot leave the hooks enabled when we return to ring-3.
1488 *
1489 * Note! At the moment HM may also have disabled the hook
1490 * when we get here, but the IPRT API handles that.
1491 */
1492 if (pGVCpu->vmmr0.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1493 RTThreadCtxHookDisable(pGVCpu->vmmr0.s.hCtxHook);
1494 }
1495 /*
1496 * The system is about to go into suspend mode; go back to ring 3.
1497 */
1498 else
1499 {
1500 pGVCpu->iHostCpuSet = UINT32_MAX;
1501 ASMAtomicWriteU32(&pGVCpu->idHostCpu, NIL_RTCPUID);
1502 rc = VINF_EM_RAW_INTERRUPT;
1503 }
1504
1505 /** @todo When HM stops messing with the context hook state, we'll disable
1506 * preemption again before the RTThreadCtxHookDisable call. */
1507 if (!fPreemptRestored)
1508 {
1509 pGVCpu->vmmr0.s.pPreemptState = NULL;
1510 RTThreadPreemptRestore(&PreemptState);
1511 }
1512
1513 pGVCpu->vmm.s.iLastGZRc = rc;
1514
1515 /* Fire dtrace probe and collect statistics. */
1516 VBOXVMM_R0_VMM_RETURN_TO_RING3_HM(pGVCpu, CPUMQueryGuestCtxPtr(pGVCpu), rc);
1517#ifdef VBOX_WITH_STATISTICS
1518 vmmR0RecordRC(pGVM, pGVCpu, rc);
1519#endif
1520 VMMRZCallRing3Enable(pGVCpu);
1521
1522 /*
1523 * If this is a halt.
1524 */
1525 if (rc != VINF_EM_HALT)
1526 { /* we're not in a hurry for a HLT, so prefer this path */ }
1527 else
1528 {
1529 pGVCpu->vmm.s.iLastGZRc = rc = vmmR0DoHalt(pGVM, pGVCpu);
1530 if (rc == VINF_SUCCESS)
1531 {
1532 pGVCpu->vmm.s.cR0HaltsSucceeded++;
1533 continue;
1534 }
1535 pGVCpu->vmm.s.cR0HaltsToRing3++;
1536 }
1537 }
1538 /*
1539 * Invalid CPU set index or TSC delta in need of measuring.
1540 */
1541 else
1542 {
1543 pGVCpu->vmmr0.s.pPreemptState = NULL;
1544 pGVCpu->iHostCpuSet = UINT32_MAX;
1545 ASMAtomicWriteU32(&pGVCpu->idHostCpu, NIL_RTCPUID);
1546 RTThreadPreemptRestore(&PreemptState);
1547
1548 VMMRZCallRing3Enable(pGVCpu);
1549
1550 if (iHostCpuSet < RTCPUSET_MAX_CPUS)
1551 {
1552 int rc = SUPR0TscDeltaMeasureBySetIndex(pGVM->pSession, iHostCpuSet, 0 /*fFlags*/,
1553 2 /*cMsWaitRetry*/, 5*RT_MS_1SEC /*cMsWaitThread*/,
1554 0 /*default cTries*/);
1555 if (RT_SUCCESS(rc) || rc == VERR_CPU_OFFLINE)
1556 pGVCpu->vmm.s.iLastGZRc = VINF_EM_RAW_TO_R3;
1557 else
1558 pGVCpu->vmm.s.iLastGZRc = rc;
1559 }
1560 else
1561 pGVCpu->vmm.s.iLastGZRc = VERR_INVALID_CPU_INDEX;
1562 }
1563 break;
1564 } /* halt loop. */
1565 break;
1566 }
1567
1568#ifdef VBOX_WITH_NEM_R0
1569# if defined(RT_ARCH_AMD64) && defined(RT_OS_WINDOWS)
1570 case VMMR0_DO_NEM_RUN:
1571 {
1572 /*
1573 * Setup the longjmp machinery and execute guest code (calls NEMR0RunGuestCode).
1574 */
1575# ifdef VBOXSTRICTRC_STRICT_ENABLED
1576 int rc = vmmR0CallRing3SetJmp2(&pGVCpu->vmmr0.s.AssertJmpBuf, (PFNVMMR0SETJMP2)NEMR0RunGuestCode, pGVM, idCpu);
1577# else
1578 int rc = vmmR0CallRing3SetJmp2(&pGVCpu->vmmr0.s.AssertJmpBuf, NEMR0RunGuestCode, pGVM, idCpu);
1579# endif
1580 STAM_COUNTER_INC(&pGVM->vmm.s.StatRunGC);
1581
1582 pGVCpu->vmm.s.iLastGZRc = rc;
1583
1584 /*
1585 * Fire dtrace probe and collect statistics.
1586 */
1587 VBOXVMM_R0_VMM_RETURN_TO_RING3_NEM(pGVCpu, CPUMQueryGuestCtxPtr(pGVCpu), rc);
1588# ifdef VBOX_WITH_STATISTICS
1589 vmmR0RecordRC(pGVM, pGVCpu, rc);
1590# endif
1591 break;
1592 }
1593# endif
1594#endif
1595
1596 /*
1597 * For profiling.
1598 */
1599 case VMMR0_DO_NOP:
1600 pGVCpu->vmm.s.iLastGZRc = VINF_SUCCESS;
1601 break;
1602
1603 /*
1604 * Shouldn't happen.
1605 */
1606 default:
1607 AssertMsgFailed(("%#x\n", enmOperation));
1608 pGVCpu->vmm.s.iLastGZRc = VERR_NOT_SUPPORTED;
1609 break;
1610 }
1611}
1612
1613
1614/**
1615 * Validates a session or VM session argument.
1616 *
1617 * @returns true / false accordingly.
1618 * @param pGVM The global (ring-0) VM structure.
1619 * @param pClaimedSession The session claim to validate.
1620 * @param pSession The session argument.
1621 */
1622DECLINLINE(bool) vmmR0IsValidSession(PGVM pGVM, PSUPDRVSESSION pClaimedSession, PSUPDRVSESSION pSession)
1623{
1624 /* This must be set! */
1625 if (!pSession)
1626 return false;
1627
1628 /* Only one out of the two. */
1629 if (pGVM && pClaimedSession)
1630 return false;
1631 if (pGVM)
1632 pClaimedSession = pGVM->pSession;
1633 return pClaimedSession == pSession;
1634}
1635
1636
1637/**
1638 * VMMR0EntryEx worker function, either called directly or when ever possible
1639 * called thru a longjmp so we can exit safely on failure.
1640 *
1641 * @returns VBox status code.
1642 * @param pGVM The global (ring-0) VM structure.
1643 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
1644 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
1645 * @param enmOperation Which operation to execute.
1646 * @param pReqHdr This points to a SUPVMMR0REQHDR packet. Optional.
1647 * The support driver validates this if it's present.
1648 * @param u64Arg Some simple constant argument.
1649 * @param pSession The session of the caller.
1650 *
1651 * @remarks Assume called with interrupts _enabled_.
1652 */
1653DECL_NO_INLINE(static, int) vmmR0EntryExWorker(PGVM pGVM, VMCPUID idCpu, VMMR0OPERATION enmOperation,
1654 PSUPVMMR0REQHDR pReqHdr, uint64_t u64Arg, PSUPDRVSESSION pSession)
1655{
1656 /*
1657 * Validate pGVM and idCpu for consistency and validity.
1658 */
1659 if (pGVM != NULL)
1660 {
1661 if (RT_LIKELY(((uintptr_t)pGVM & PAGE_OFFSET_MASK) == 0))
1662 { /* likely */ }
1663 else
1664 {
1665 SUPR0Printf("vmmR0EntryExWorker: Invalid pGVM=%p! (op=%d)\n", pGVM, enmOperation);
1666 return VERR_INVALID_POINTER;
1667 }
1668
1669 if (RT_LIKELY(idCpu == NIL_VMCPUID || idCpu < pGVM->cCpus))
1670 { /* likely */ }
1671 else
1672 {
1673 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu %#x (cCpus=%#x)\n", idCpu, pGVM->cCpus);
1674 return VERR_INVALID_PARAMETER;
1675 }
1676
1677 if (RT_LIKELY( pGVM->enmVMState >= VMSTATE_CREATING
1678 && pGVM->enmVMState <= VMSTATE_TERMINATED
1679 && pGVM->pSession == pSession
1680 && pGVM->pSelf == pGVM))
1681 { /* likely */ }
1682 else
1683 {
1684 SUPR0Printf("vmmR0EntryExWorker: Invalid pGVM=%p:{.enmVMState=%d, .cCpus=%#x, .pSession=%p(==%p), .pSelf=%p(==%p)}! (op=%d)\n",
1685 pGVM, pGVM->enmVMState, pGVM->cCpus, pGVM->pSession, pSession, pGVM->pSelf, pGVM, enmOperation);
1686 return VERR_INVALID_POINTER;
1687 }
1688 }
1689 else if (RT_LIKELY(idCpu == NIL_VMCPUID))
1690 { /* likely */ }
1691 else
1692 {
1693 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu=%u\n", idCpu);
1694 return VERR_INVALID_PARAMETER;
1695 }
1696
1697 /*
1698 * Process the request.
1699 */
1700 int rc;
1701 switch (enmOperation)
1702 {
1703 /*
1704 * GVM requests
1705 */
1706 case VMMR0_DO_GVMM_CREATE_VM:
1707 if (pGVM == NULL && u64Arg == 0 && idCpu == NIL_VMCPUID)
1708 rc = GVMMR0CreateVMReq((PGVMMCREATEVMREQ)pReqHdr, pSession);
1709 else
1710 rc = VERR_INVALID_PARAMETER;
1711 break;
1712
1713 case VMMR0_DO_GVMM_DESTROY_VM:
1714 if (pReqHdr == NULL && u64Arg == 0)
1715 rc = GVMMR0DestroyVM(pGVM);
1716 else
1717 rc = VERR_INVALID_PARAMETER;
1718 break;
1719
1720 case VMMR0_DO_GVMM_REGISTER_VMCPU:
1721 if (pGVM != NULL)
1722 rc = GVMMR0RegisterVCpu(pGVM, idCpu);
1723 else
1724 rc = VERR_INVALID_PARAMETER;
1725 break;
1726
1727 case VMMR0_DO_GVMM_DEREGISTER_VMCPU:
1728 if (pGVM != NULL)
1729 rc = GVMMR0DeregisterVCpu(pGVM, idCpu);
1730 else
1731 rc = VERR_INVALID_PARAMETER;
1732 break;
1733
1734 case VMMR0_DO_GVMM_REGISTER_WORKER_THREAD:
1735 if (pGVM != NULL && pReqHdr && pReqHdr->cbReq == sizeof(GVMMREGISTERWORKERTHREADREQ))
1736 rc = GVMMR0RegisterWorkerThread(pGVM, (GVMMWORKERTHREAD)(unsigned)u64Arg,
1737 ((PGVMMREGISTERWORKERTHREADREQ)(pReqHdr))->hNativeThreadR3);
1738 else
1739 rc = VERR_INVALID_PARAMETER;
1740 break;
1741
1742 case VMMR0_DO_GVMM_DEREGISTER_WORKER_THREAD:
1743 if (pGVM != NULL)
1744 rc = GVMMR0DeregisterWorkerThread(pGVM, (GVMMWORKERTHREAD)(unsigned)u64Arg);
1745 else
1746 rc = VERR_INVALID_PARAMETER;
1747 break;
1748
1749 case VMMR0_DO_GVMM_SCHED_HALT:
1750 if (pReqHdr)
1751 return VERR_INVALID_PARAMETER;
1752 rc = GVMMR0SchedHaltReq(pGVM, idCpu, u64Arg);
1753 break;
1754
1755 case VMMR0_DO_GVMM_SCHED_WAKE_UP:
1756 if (pReqHdr || u64Arg)
1757 return VERR_INVALID_PARAMETER;
1758 rc = GVMMR0SchedWakeUp(pGVM, idCpu);
1759 break;
1760
1761 case VMMR0_DO_GVMM_SCHED_POKE:
1762 if (pReqHdr || u64Arg)
1763 return VERR_INVALID_PARAMETER;
1764 rc = GVMMR0SchedPoke(pGVM, idCpu);
1765 break;
1766
1767 case VMMR0_DO_GVMM_SCHED_WAKE_UP_AND_POKE_CPUS:
1768 if (u64Arg)
1769 return VERR_INVALID_PARAMETER;
1770 rc = GVMMR0SchedWakeUpAndPokeCpusReq(pGVM, (PGVMMSCHEDWAKEUPANDPOKECPUSREQ)pReqHdr);
1771 break;
1772
1773 case VMMR0_DO_GVMM_SCHED_POLL:
1774 if (pReqHdr || u64Arg > 1)
1775 return VERR_INVALID_PARAMETER;
1776 rc = GVMMR0SchedPoll(pGVM, idCpu, !!u64Arg);
1777 break;
1778
1779 case VMMR0_DO_GVMM_QUERY_STATISTICS:
1780 if (u64Arg)
1781 return VERR_INVALID_PARAMETER;
1782 rc = GVMMR0QueryStatisticsReq(pGVM, (PGVMMQUERYSTATISTICSSREQ)pReqHdr, pSession);
1783 break;
1784
1785 case VMMR0_DO_GVMM_RESET_STATISTICS:
1786 if (u64Arg)
1787 return VERR_INVALID_PARAMETER;
1788 rc = GVMMR0ResetStatisticsReq(pGVM, (PGVMMRESETSTATISTICSSREQ)pReqHdr, pSession);
1789 break;
1790
1791 /*
1792 * Initialize the R0 part of a VM instance.
1793 */
1794 case VMMR0_DO_VMMR0_INIT:
1795 rc = vmmR0InitVM(pGVM, RT_LODWORD(u64Arg), RT_HIDWORD(u64Arg));
1796 break;
1797
1798 /*
1799 * Does EMT specific ring-0 init.
1800 */
1801 case VMMR0_DO_VMMR0_INIT_EMT:
1802 rc = vmmR0InitVMEmt(pGVM, idCpu);
1803 break;
1804
1805 /*
1806 * Terminate the R0 part of a VM instance.
1807 */
1808 case VMMR0_DO_VMMR0_TERM:
1809 rc = VMMR0TermVM(pGVM, 0 /*idCpu*/);
1810 break;
1811
1812 /*
1813 * Update release or debug logger instances.
1814 */
1815 case VMMR0_DO_VMMR0_UPDATE_LOGGERS:
1816 if (idCpu == NIL_VMCPUID)
1817 return VERR_INVALID_CPU_ID;
1818 if (u64Arg < VMMLOGGER_IDX_MAX && pReqHdr != NULL)
1819 rc = vmmR0UpdateLoggers(pGVM, idCpu /*idCpu*/, (PVMMR0UPDATELOGGERSREQ)pReqHdr, (size_t)u64Arg);
1820 else
1821 return VERR_INVALID_PARAMETER;
1822 break;
1823
1824 /*
1825 * Log flusher thread.
1826 */
1827 case VMMR0_DO_VMMR0_LOG_FLUSHER:
1828 if (idCpu != NIL_VMCPUID)
1829 return VERR_INVALID_CPU_ID;
1830 if (pReqHdr == NULL)
1831 rc = vmmR0LogFlusher(pGVM);
1832 else
1833 return VERR_INVALID_PARAMETER;
1834 break;
1835
1836 /*
1837 * Wait for the flush to finish with all the buffers for the given logger.
1838 */
1839 case VMMR0_DO_VMMR0_LOG_WAIT_FLUSHED:
1840 if (idCpu == NIL_VMCPUID)
1841 return VERR_INVALID_CPU_ID;
1842 if (u64Arg < VMMLOGGER_IDX_MAX && pReqHdr == NULL)
1843 rc = vmmR0LogWaitFlushed(pGVM, idCpu /*idCpu*/, (size_t)u64Arg);
1844 else
1845 return VERR_INVALID_PARAMETER;
1846 break;
1847
1848 /*
1849 * Attempt to enable hm mode and check the current setting.
1850 */
1851 case VMMR0_DO_HM_ENABLE:
1852 rc = HMR0EnableAllCpus(pGVM);
1853 break;
1854
1855 /*
1856 * Setup the hardware accelerated session.
1857 */
1858 case VMMR0_DO_HM_SETUP_VM:
1859 rc = HMR0SetupVM(pGVM);
1860 break;
1861
1862 /*
1863 * PGM wrappers.
1864 */
1865 case VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES:
1866 if (idCpu == NIL_VMCPUID)
1867 return VERR_INVALID_CPU_ID;
1868 rc = PGMR0PhysAllocateHandyPages(pGVM, idCpu);
1869 break;
1870
1871 case VMMR0_DO_PGM_FLUSH_HANDY_PAGES:
1872 if (idCpu == NIL_VMCPUID)
1873 return VERR_INVALID_CPU_ID;
1874 rc = PGMR0PhysFlushHandyPages(pGVM, idCpu);
1875 break;
1876
1877 case VMMR0_DO_PGM_ALLOCATE_LARGE_PAGE:
1878 if (idCpu == NIL_VMCPUID)
1879 return VERR_INVALID_CPU_ID;
1880 rc = PGMR0PhysAllocateLargePage(pGVM, idCpu, u64Arg);
1881 break;
1882
1883 case VMMR0_DO_PGM_PHYS_SETUP_IOMMU:
1884 if (idCpu != 0)
1885 return VERR_INVALID_CPU_ID;
1886 rc = PGMR0PhysSetupIoMmu(pGVM);
1887 break;
1888
1889 case VMMR0_DO_PGM_POOL_GROW:
1890 if (idCpu == NIL_VMCPUID)
1891 return VERR_INVALID_CPU_ID;
1892 rc = PGMR0PoolGrow(pGVM, idCpu);
1893 break;
1894
1895 /*
1896 * GMM wrappers.
1897 */
1898 case VMMR0_DO_GMM_INITIAL_RESERVATION:
1899 if (u64Arg)
1900 return VERR_INVALID_PARAMETER;
1901 rc = GMMR0InitialReservationReq(pGVM, idCpu, (PGMMINITIALRESERVATIONREQ)pReqHdr);
1902 break;
1903
1904 case VMMR0_DO_GMM_UPDATE_RESERVATION:
1905 if (u64Arg)
1906 return VERR_INVALID_PARAMETER;
1907 rc = GMMR0UpdateReservationReq(pGVM, idCpu, (PGMMUPDATERESERVATIONREQ)pReqHdr);
1908 break;
1909
1910 case VMMR0_DO_GMM_ALLOCATE_PAGES:
1911 if (u64Arg)
1912 return VERR_INVALID_PARAMETER;
1913 rc = GMMR0AllocatePagesReq(pGVM, idCpu, (PGMMALLOCATEPAGESREQ)pReqHdr);
1914 break;
1915
1916 case VMMR0_DO_GMM_FREE_PAGES:
1917 if (u64Arg)
1918 return VERR_INVALID_PARAMETER;
1919 rc = GMMR0FreePagesReq(pGVM, idCpu, (PGMMFREEPAGESREQ)pReqHdr);
1920 break;
1921
1922 case VMMR0_DO_GMM_FREE_LARGE_PAGE:
1923 if (u64Arg)
1924 return VERR_INVALID_PARAMETER;
1925 rc = GMMR0FreeLargePageReq(pGVM, idCpu, (PGMMFREELARGEPAGEREQ)pReqHdr);
1926 break;
1927
1928 case VMMR0_DO_GMM_QUERY_HYPERVISOR_MEM_STATS:
1929 if (u64Arg)
1930 return VERR_INVALID_PARAMETER;
1931 rc = GMMR0QueryHypervisorMemoryStatsReq((PGMMMEMSTATSREQ)pReqHdr);
1932 break;
1933
1934 case VMMR0_DO_GMM_QUERY_MEM_STATS:
1935 if (idCpu == NIL_VMCPUID)
1936 return VERR_INVALID_CPU_ID;
1937 if (u64Arg)
1938 return VERR_INVALID_PARAMETER;
1939 rc = GMMR0QueryMemoryStatsReq(pGVM, idCpu, (PGMMMEMSTATSREQ)pReqHdr);
1940 break;
1941
1942 case VMMR0_DO_GMM_BALLOONED_PAGES:
1943 if (u64Arg)
1944 return VERR_INVALID_PARAMETER;
1945 rc = GMMR0BalloonedPagesReq(pGVM, idCpu, (PGMMBALLOONEDPAGESREQ)pReqHdr);
1946 break;
1947
1948 case VMMR0_DO_GMM_MAP_UNMAP_CHUNK:
1949 if (u64Arg)
1950 return VERR_INVALID_PARAMETER;
1951 rc = GMMR0MapUnmapChunkReq(pGVM, (PGMMMAPUNMAPCHUNKREQ)pReqHdr);
1952 break;
1953
1954 case VMMR0_DO_GMM_REGISTER_SHARED_MODULE:
1955 if (idCpu == NIL_VMCPUID)
1956 return VERR_INVALID_CPU_ID;
1957 if (u64Arg)
1958 return VERR_INVALID_PARAMETER;
1959 rc = GMMR0RegisterSharedModuleReq(pGVM, idCpu, (PGMMREGISTERSHAREDMODULEREQ)pReqHdr);
1960 break;
1961
1962 case VMMR0_DO_GMM_UNREGISTER_SHARED_MODULE:
1963 if (idCpu == NIL_VMCPUID)
1964 return VERR_INVALID_CPU_ID;
1965 if (u64Arg)
1966 return VERR_INVALID_PARAMETER;
1967 rc = GMMR0UnregisterSharedModuleReq(pGVM, idCpu, (PGMMUNREGISTERSHAREDMODULEREQ)pReqHdr);
1968 break;
1969
1970 case VMMR0_DO_GMM_RESET_SHARED_MODULES:
1971 if (idCpu == NIL_VMCPUID)
1972 return VERR_INVALID_CPU_ID;
1973 if ( u64Arg
1974 || pReqHdr)
1975 return VERR_INVALID_PARAMETER;
1976 rc = GMMR0ResetSharedModules(pGVM, idCpu);
1977 break;
1978
1979#ifdef VBOX_WITH_PAGE_SHARING
1980 case VMMR0_DO_GMM_CHECK_SHARED_MODULES:
1981 {
1982 if (idCpu == NIL_VMCPUID)
1983 return VERR_INVALID_CPU_ID;
1984 if ( u64Arg
1985 || pReqHdr)
1986 return VERR_INVALID_PARAMETER;
1987 rc = GMMR0CheckSharedModules(pGVM, idCpu);
1988 break;
1989 }
1990#endif
1991
1992#if defined(VBOX_STRICT) && HC_ARCH_BITS == 64
1993 case VMMR0_DO_GMM_FIND_DUPLICATE_PAGE:
1994 if (u64Arg)
1995 return VERR_INVALID_PARAMETER;
1996 rc = GMMR0FindDuplicatePageReq(pGVM, (PGMMFINDDUPLICATEPAGEREQ)pReqHdr);
1997 break;
1998#endif
1999
2000 case VMMR0_DO_GMM_QUERY_STATISTICS:
2001 if (u64Arg)
2002 return VERR_INVALID_PARAMETER;
2003 rc = GMMR0QueryStatisticsReq(pGVM, (PGMMQUERYSTATISTICSSREQ)pReqHdr);
2004 break;
2005
2006 case VMMR0_DO_GMM_RESET_STATISTICS:
2007 if (u64Arg)
2008 return VERR_INVALID_PARAMETER;
2009 rc = GMMR0ResetStatisticsReq(pGVM, (PGMMRESETSTATISTICSSREQ)pReqHdr);
2010 break;
2011
2012 /*
2013 * A quick GCFGM mock-up.
2014 */
2015 /** @todo GCFGM with proper access control, ring-3 management interface and all that. */
2016 case VMMR0_DO_GCFGM_SET_VALUE:
2017 case VMMR0_DO_GCFGM_QUERY_VALUE:
2018 {
2019 if (pGVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
2020 return VERR_INVALID_PARAMETER;
2021 PGCFGMVALUEREQ pReq = (PGCFGMVALUEREQ)pReqHdr;
2022 if (pReq->Hdr.cbReq != sizeof(*pReq))
2023 return VERR_INVALID_PARAMETER;
2024 if (enmOperation == VMMR0_DO_GCFGM_SET_VALUE)
2025 {
2026 rc = GVMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
2027 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
2028 // rc = GMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
2029 }
2030 else
2031 {
2032 rc = GVMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
2033 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
2034 // rc = GMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
2035 }
2036 break;
2037 }
2038
2039 /*
2040 * PDM Wrappers.
2041 */
2042 case VMMR0_DO_PDM_DRIVER_CALL_REQ_HANDLER:
2043 {
2044 if (!pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
2045 return VERR_INVALID_PARAMETER;
2046 rc = PDMR0DriverCallReqHandler(pGVM, (PPDMDRIVERCALLREQHANDLERREQ)pReqHdr);
2047 break;
2048 }
2049
2050 case VMMR0_DO_PDM_DEVICE_CREATE:
2051 {
2052 if (!pReqHdr || u64Arg || idCpu != 0)
2053 return VERR_INVALID_PARAMETER;
2054 rc = PDMR0DeviceCreateReqHandler(pGVM, (PPDMDEVICECREATEREQ)pReqHdr);
2055 break;
2056 }
2057
2058 case VMMR0_DO_PDM_DEVICE_GEN_CALL:
2059 {
2060 if (!pReqHdr || u64Arg)
2061 return VERR_INVALID_PARAMETER;
2062 rc = PDMR0DeviceGenCallReqHandler(pGVM, (PPDMDEVICEGENCALLREQ)pReqHdr, idCpu);
2063 break;
2064 }
2065
2066 /** @todo Remove the once all devices has been converted to new style! @bugref{9218} */
2067 case VMMR0_DO_PDM_DEVICE_COMPAT_SET_CRITSECT:
2068 {
2069 if (!pReqHdr || u64Arg || idCpu != 0)
2070 return VERR_INVALID_PARAMETER;
2071 rc = PDMR0DeviceCompatSetCritSectReqHandler(pGVM, (PPDMDEVICECOMPATSETCRITSECTREQ)pReqHdr);
2072 break;
2073 }
2074
2075 /*
2076 * Requests to the internal networking service.
2077 */
2078 case VMMR0_DO_INTNET_OPEN:
2079 {
2080 PINTNETOPENREQ pReq = (PINTNETOPENREQ)pReqHdr;
2081 if (u64Arg || !pReq || !vmmR0IsValidSession(pGVM, pReq->pSession, pSession) || idCpu != NIL_VMCPUID)
2082 return VERR_INVALID_PARAMETER;
2083 rc = IntNetR0OpenReq(pSession, pReq);
2084 break;
2085 }
2086
2087 case VMMR0_DO_INTNET_IF_CLOSE:
2088 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFCLOSEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2089 return VERR_INVALID_PARAMETER;
2090 rc = IntNetR0IfCloseReq(pSession, (PINTNETIFCLOSEREQ)pReqHdr);
2091 break;
2092
2093
2094 case VMMR0_DO_INTNET_IF_GET_BUFFER_PTRS:
2095 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFGETBUFFERPTRSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2096 return VERR_INVALID_PARAMETER;
2097 rc = IntNetR0IfGetBufferPtrsReq(pSession, (PINTNETIFGETBUFFERPTRSREQ)pReqHdr);
2098 break;
2099
2100 case VMMR0_DO_INTNET_IF_SET_PROMISCUOUS_MODE:
2101 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2102 return VERR_INVALID_PARAMETER;
2103 rc = IntNetR0IfSetPromiscuousModeReq(pSession, (PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr);
2104 break;
2105
2106 case VMMR0_DO_INTNET_IF_SET_MAC_ADDRESS:
2107 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFSETMACADDRESSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2108 return VERR_INVALID_PARAMETER;
2109 rc = IntNetR0IfSetMacAddressReq(pSession, (PINTNETIFSETMACADDRESSREQ)pReqHdr);
2110 break;
2111
2112 case VMMR0_DO_INTNET_IF_SET_ACTIVE:
2113 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFSETACTIVEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2114 return VERR_INVALID_PARAMETER;
2115 rc = IntNetR0IfSetActiveReq(pSession, (PINTNETIFSETACTIVEREQ)pReqHdr);
2116 break;
2117
2118 case VMMR0_DO_INTNET_IF_SEND:
2119 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2120 return VERR_INVALID_PARAMETER;
2121 rc = IntNetR0IfSendReq(pSession, (PINTNETIFSENDREQ)pReqHdr);
2122 break;
2123
2124 case VMMR0_DO_INTNET_IF_WAIT:
2125 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2126 return VERR_INVALID_PARAMETER;
2127 rc = IntNetR0IfWaitReq(pSession, (PINTNETIFWAITREQ)pReqHdr);
2128 break;
2129
2130 case VMMR0_DO_INTNET_IF_ABORT_WAIT:
2131 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2132 return VERR_INVALID_PARAMETER;
2133 rc = IntNetR0IfAbortWaitReq(pSession, (PINTNETIFABORTWAITREQ)pReqHdr);
2134 break;
2135
2136#if 0 //def VBOX_WITH_PCI_PASSTHROUGH
2137 /*
2138 * Requests to host PCI driver service.
2139 */
2140 case VMMR0_DO_PCIRAW_REQ:
2141 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PPCIRAWSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2142 return VERR_INVALID_PARAMETER;
2143 rc = PciRawR0ProcessReq(pGVM, pSession, (PPCIRAWSENDREQ)pReqHdr);
2144 break;
2145#endif
2146
2147 /*
2148 * NEM requests.
2149 */
2150#ifdef VBOX_WITH_NEM_R0
2151# if defined(RT_ARCH_AMD64) && defined(RT_OS_WINDOWS)
2152 case VMMR0_DO_NEM_INIT_VM:
2153 if (u64Arg || pReqHdr || idCpu != 0)
2154 return VERR_INVALID_PARAMETER;
2155 rc = NEMR0InitVM(pGVM);
2156 break;
2157
2158 case VMMR0_DO_NEM_INIT_VM_PART_2:
2159 if (u64Arg || pReqHdr || idCpu != 0)
2160 return VERR_INVALID_PARAMETER;
2161 rc = NEMR0InitVMPart2(pGVM);
2162 break;
2163
2164 case VMMR0_DO_NEM_MAP_PAGES:
2165 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2166 return VERR_INVALID_PARAMETER;
2167 rc = NEMR0MapPages(pGVM, idCpu);
2168 break;
2169
2170 case VMMR0_DO_NEM_UNMAP_PAGES:
2171 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2172 return VERR_INVALID_PARAMETER;
2173 rc = NEMR0UnmapPages(pGVM, idCpu);
2174 break;
2175
2176 case VMMR0_DO_NEM_EXPORT_STATE:
2177 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2178 return VERR_INVALID_PARAMETER;
2179 rc = NEMR0ExportState(pGVM, idCpu);
2180 break;
2181
2182 case VMMR0_DO_NEM_IMPORT_STATE:
2183 if (pReqHdr || idCpu == NIL_VMCPUID)
2184 return VERR_INVALID_PARAMETER;
2185 rc = NEMR0ImportState(pGVM, idCpu, u64Arg);
2186 break;
2187
2188 case VMMR0_DO_NEM_QUERY_CPU_TICK:
2189 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2190 return VERR_INVALID_PARAMETER;
2191 rc = NEMR0QueryCpuTick(pGVM, idCpu);
2192 break;
2193
2194 case VMMR0_DO_NEM_RESUME_CPU_TICK_ON_ALL:
2195 if (pReqHdr || idCpu == NIL_VMCPUID)
2196 return VERR_INVALID_PARAMETER;
2197 rc = NEMR0ResumeCpuTickOnAll(pGVM, idCpu, u64Arg);
2198 break;
2199
2200 case VMMR0_DO_NEM_UPDATE_STATISTICS:
2201 if (u64Arg || pReqHdr)
2202 return VERR_INVALID_PARAMETER;
2203 rc = NEMR0UpdateStatistics(pGVM, idCpu);
2204 break;
2205
2206# if 1 && defined(DEBUG_bird)
2207 case VMMR0_DO_NEM_EXPERIMENT:
2208 if (pReqHdr)
2209 return VERR_INVALID_PARAMETER;
2210 rc = NEMR0DoExperiment(pGVM, idCpu, u64Arg);
2211 break;
2212# endif
2213# endif
2214#endif
2215
2216 /*
2217 * IOM requests.
2218 */
2219 case VMMR0_DO_IOM_GROW_IO_PORTS:
2220 {
2221 if (pReqHdr || idCpu != 0)
2222 return VERR_INVALID_PARAMETER;
2223 rc = IOMR0IoPortGrowRegistrationTables(pGVM, u64Arg);
2224 break;
2225 }
2226
2227 case VMMR0_DO_IOM_GROW_IO_PORT_STATS:
2228 {
2229 if (pReqHdr || idCpu != 0)
2230 return VERR_INVALID_PARAMETER;
2231 rc = IOMR0IoPortGrowStatisticsTable(pGVM, u64Arg);
2232 break;
2233 }
2234
2235 case VMMR0_DO_IOM_GROW_MMIO_REGS:
2236 {
2237 if (pReqHdr || idCpu != 0)
2238 return VERR_INVALID_PARAMETER;
2239 rc = IOMR0MmioGrowRegistrationTables(pGVM, u64Arg);
2240 break;
2241 }
2242
2243 case VMMR0_DO_IOM_GROW_MMIO_STATS:
2244 {
2245 if (pReqHdr || idCpu != 0)
2246 return VERR_INVALID_PARAMETER;
2247 rc = IOMR0MmioGrowStatisticsTable(pGVM, u64Arg);
2248 break;
2249 }
2250
2251 case VMMR0_DO_IOM_SYNC_STATS_INDICES:
2252 {
2253 if (pReqHdr || idCpu != 0)
2254 return VERR_INVALID_PARAMETER;
2255 rc = IOMR0IoPortSyncStatisticsIndices(pGVM);
2256 if (RT_SUCCESS(rc))
2257 rc = IOMR0MmioSyncStatisticsIndices(pGVM);
2258 break;
2259 }
2260
2261 /*
2262 * DBGF requests.
2263 */
2264#ifdef VBOX_WITH_DBGF_TRACING
2265 case VMMR0_DO_DBGF_TRACER_CREATE:
2266 {
2267 if (!pReqHdr || u64Arg || idCpu != 0)
2268 return VERR_INVALID_PARAMETER;
2269 rc = DBGFR0TracerCreateReqHandler(pGVM, (PDBGFTRACERCREATEREQ)pReqHdr);
2270 break;
2271 }
2272
2273 case VMMR0_DO_DBGF_TRACER_CALL_REQ_HANDLER:
2274 {
2275 if (!pReqHdr || u64Arg)
2276 return VERR_INVALID_PARAMETER;
2277# if 0 /** @todo */
2278 rc = DBGFR0TracerGenCallReqHandler(pGVM, (PDBGFTRACERGENCALLREQ)pReqHdr, idCpu);
2279# else
2280 rc = VERR_NOT_IMPLEMENTED;
2281# endif
2282 break;
2283 }
2284#endif
2285
2286 case VMMR0_DO_DBGF_BP_INIT:
2287 {
2288 if (!pReqHdr || u64Arg || idCpu != 0)
2289 return VERR_INVALID_PARAMETER;
2290 rc = DBGFR0BpInitReqHandler(pGVM, (PDBGFBPINITREQ)pReqHdr);
2291 break;
2292 }
2293
2294 case VMMR0_DO_DBGF_BP_CHUNK_ALLOC:
2295 {
2296 if (!pReqHdr || u64Arg || idCpu != 0)
2297 return VERR_INVALID_PARAMETER;
2298 rc = DBGFR0BpChunkAllocReqHandler(pGVM, (PDBGFBPCHUNKALLOCREQ)pReqHdr);
2299 break;
2300 }
2301
2302 case VMMR0_DO_DBGF_BP_L2_TBL_CHUNK_ALLOC:
2303 {
2304 if (!pReqHdr || u64Arg || idCpu != 0)
2305 return VERR_INVALID_PARAMETER;
2306 rc = DBGFR0BpL2TblChunkAllocReqHandler(pGVM, (PDBGFBPL2TBLCHUNKALLOCREQ)pReqHdr);
2307 break;
2308 }
2309
2310 case VMMR0_DO_DBGF_BP_OWNER_INIT:
2311 {
2312 if (!pReqHdr || u64Arg || idCpu != 0)
2313 return VERR_INVALID_PARAMETER;
2314 rc = DBGFR0BpOwnerInitReqHandler(pGVM, (PDBGFBPOWNERINITREQ)pReqHdr);
2315 break;
2316 }
2317
2318 case VMMR0_DO_DBGF_BP_PORTIO_INIT:
2319 {
2320 if (!pReqHdr || u64Arg || idCpu != 0)
2321 return VERR_INVALID_PARAMETER;
2322 rc = DBGFR0BpPortIoInitReqHandler(pGVM, (PDBGFBPINITREQ)pReqHdr);
2323 break;
2324 }
2325
2326
2327 /*
2328 * TM requests.
2329 */
2330 case VMMR0_DO_TM_GROW_TIMER_QUEUE:
2331 {
2332 if (pReqHdr || idCpu == NIL_VMCPUID)
2333 return VERR_INVALID_PARAMETER;
2334 rc = TMR0TimerQueueGrow(pGVM, RT_HI_U32(u64Arg), RT_LO_U32(u64Arg));
2335 break;
2336 }
2337
2338 /*
2339 * For profiling.
2340 */
2341 case VMMR0_DO_NOP:
2342 case VMMR0_DO_SLOW_NOP:
2343 return VINF_SUCCESS;
2344
2345 /*
2346 * For testing Ring-0 APIs invoked in this environment.
2347 */
2348 case VMMR0_DO_TESTS:
2349 /** @todo make new test */
2350 return VINF_SUCCESS;
2351
2352 default:
2353 /*
2354 * We're returning VERR_NOT_SUPPORT here so we've got something else
2355 * than -1 which the interrupt gate glue code might return.
2356 */
2357 Log(("operation %#x is not supported\n", enmOperation));
2358 return VERR_NOT_SUPPORTED;
2359 }
2360 return rc;
2361}
2362
2363
2364/**
2365 * This is just a longjmp wrapper function for VMMR0EntryEx calls.
2366 *
2367 * @returns VBox status code.
2368 * @param pvArgs The argument package
2369 */
2370static DECLCALLBACK(int) vmmR0EntryExWrapper(void *pvArgs)
2371{
2372 PGVMCPU pGVCpu = (PGVMCPU)pvArgs;
2373 return vmmR0EntryExWorker(pGVCpu->vmmr0.s.pGVM,
2374 pGVCpu->vmmr0.s.idCpu,
2375 pGVCpu->vmmr0.s.enmOperation,
2376 pGVCpu->vmmr0.s.pReq,
2377 pGVCpu->vmmr0.s.u64Arg,
2378 pGVCpu->vmmr0.s.pSession);
2379}
2380
2381
2382/**
2383 * The Ring 0 entry point, called by the support library (SUP).
2384 *
2385 * @returns VBox status code.
2386 * @param pGVM The global (ring-0) VM structure.
2387 * @param pVM The cross context VM structure.
2388 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
2389 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
2390 * @param enmOperation Which operation to execute.
2391 * @param pReq Pointer to the SUPVMMR0REQHDR packet. Optional.
2392 * @param u64Arg Some simple constant argument.
2393 * @param pSession The session of the caller.
2394 * @remarks Assume called with interrupts _enabled_.
2395 */
2396VMMR0DECL(int) VMMR0EntryEx(PGVM pGVM, PVMCC pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation,
2397 PSUPVMMR0REQHDR pReq, uint64_t u64Arg, PSUPDRVSESSION pSession)
2398{
2399 /*
2400 * Requests that should only happen on the EMT thread will be
2401 * wrapped in a setjmp so we can assert without causing too much trouble.
2402 */
2403 if ( pVM != NULL
2404 && pGVM != NULL
2405 && pVM == pGVM /** @todo drop pVM or pGVM */
2406 && idCpu < pGVM->cCpus
2407 && pGVM->pSession == pSession
2408 && pGVM->pSelf == pGVM
2409 && enmOperation != VMMR0_DO_GVMM_DESTROY_VM
2410 && enmOperation != VMMR0_DO_GVMM_REGISTER_VMCPU
2411 && enmOperation != VMMR0_DO_GVMM_SCHED_WAKE_UP /* idCpu is not caller but target. Sigh. */ /** @todo fix*/
2412 && enmOperation != VMMR0_DO_GVMM_SCHED_POKE /* idCpu is not caller but target. Sigh. */ /** @todo fix*/
2413 )
2414 {
2415 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2416 RTNATIVETHREAD hNativeThread = RTThreadNativeSelf();
2417 if (RT_LIKELY( pGVCpu->hEMT == hNativeThread
2418 && pGVCpu->hNativeThreadR0 == hNativeThread))
2419 {
2420 pGVCpu->vmmr0.s.pGVM = pGVM;
2421 pGVCpu->vmmr0.s.idCpu = idCpu;
2422 pGVCpu->vmmr0.s.enmOperation = enmOperation;
2423 pGVCpu->vmmr0.s.pReq = pReq;
2424 pGVCpu->vmmr0.s.u64Arg = u64Arg;
2425 pGVCpu->vmmr0.s.pSession = pSession;
2426 return vmmR0CallRing3SetJmpEx(&pGVCpu->vmmr0.s.AssertJmpBuf, vmmR0EntryExWrapper, pGVCpu,
2427 ((uintptr_t)u64Arg << 16) | (uintptr_t)enmOperation);
2428 }
2429 return VERR_VM_THREAD_NOT_EMT;
2430 }
2431 return vmmR0EntryExWorker(pGVM, idCpu, enmOperation, pReq, u64Arg, pSession);
2432}
2433
2434
2435/*********************************************************************************************************************************
2436* EMT Blocking *
2437*********************************************************************************************************************************/
2438
2439/**
2440 * Checks whether we've armed the ring-0 long jump machinery.
2441 *
2442 * @returns @c true / @c false
2443 * @param pVCpu The cross context virtual CPU structure.
2444 * @thread EMT
2445 * @sa VMMIsLongJumpArmed
2446 */
2447VMMR0_INT_DECL(bool) VMMR0IsLongJumpArmed(PVMCPUCC pVCpu)
2448{
2449#ifdef RT_ARCH_X86
2450 return pVCpu->vmmr0.s.AssertJmpBuf.eip != 0;
2451#else
2452 return pVCpu->vmmr0.s.AssertJmpBuf.rip != 0;
2453#endif
2454}
2455
2456
2457/**
2458 * Locking helper that deals with HM context and checks if the thread can block.
2459 *
2460 * @returns VINF_SUCCESS if we can block. Returns @a rcBusy or
2461 * VERR_VMM_CANNOT_BLOCK if not able to block.
2462 * @param pVCpu The cross context virtual CPU structure of the calling
2463 * thread.
2464 * @param rcBusy What to return in case of a blocking problem. Will IPE
2465 * if VINF_SUCCESS and we cannot block.
2466 * @param pszCaller The caller (for logging problems).
2467 * @param pvLock The lock address (for logging problems).
2468 * @param pCtx Where to return context info for the resume call.
2469 * @thread EMT(pVCpu)
2470 */
2471VMMR0_INT_DECL(int) VMMR0EmtPrepareToBlock(PVMCPUCC pVCpu, int rcBusy, const char *pszCaller, void *pvLock,
2472 PVMMR0EMTBLOCKCTX pCtx)
2473{
2474 const char *pszMsg;
2475
2476 /*
2477 * Check that we are allowed to block.
2478 */
2479 if (RT_LIKELY(VMMRZCallRing3IsEnabled(pVCpu)))
2480 {
2481 /*
2482 * Are we in HM context and w/o a context hook? If so work the context hook.
2483 */
2484 if (pVCpu->idHostCpu != NIL_RTCPUID)
2485 {
2486 Assert(pVCpu->iHostCpuSet != UINT32_MAX);
2487
2488 if (pVCpu->vmmr0.s.hCtxHook == NIL_RTTHREADCTXHOOK)
2489 {
2490 vmmR0ThreadCtxCallback(RTTHREADCTXEVENT_OUT, pVCpu);
2491 if (pVCpu->vmmr0.s.pPreemptState)
2492 RTThreadPreemptRestore(pVCpu->vmmr0.s.pPreemptState);
2493
2494 pCtx->uMagic = VMMR0EMTBLOCKCTX_MAGIC;
2495 pCtx->fWasInHmContext = true;
2496 return VINF_SUCCESS;
2497 }
2498 }
2499
2500 if (RT_LIKELY(!pVCpu->vmmr0.s.pPreemptState))
2501 {
2502 /*
2503 * Not in HM context or we've got hooks, so just check that preemption
2504 * is enabled.
2505 */
2506 if (RT_LIKELY(RTThreadPreemptIsEnabled(NIL_RTTHREAD)))
2507 {
2508 pCtx->uMagic = VMMR0EMTBLOCKCTX_MAGIC;
2509 pCtx->fWasInHmContext = false;
2510 return VINF_SUCCESS;
2511 }
2512 pszMsg = "Preemption is disabled!";
2513 }
2514 else
2515 pszMsg = "Preemption state w/o HM state!";
2516 }
2517 else
2518 pszMsg = "Ring-3 calls are disabled!";
2519
2520 static uint32_t volatile s_cWarnings = 0;
2521 if (++s_cWarnings < 50)
2522 SUPR0Printf("VMMR0EmtPrepareToBlock: %s pvLock=%p pszCaller=%s rcBusy=%p\n", pszMsg, pvLock, pszCaller, rcBusy);
2523 pCtx->uMagic = VMMR0EMTBLOCKCTX_MAGIC_DEAD;
2524 pCtx->fWasInHmContext = false;
2525 return rcBusy != VINF_SUCCESS ? rcBusy : VERR_VMM_CANNOT_BLOCK;
2526}
2527
2528
2529/**
2530 * Counterpart to VMMR0EmtPrepareToBlock.
2531 *
2532 * @param pVCpu The cross context virtual CPU structure of the calling
2533 * thread.
2534 * @param pCtx The context structure used with VMMR0EmtPrepareToBlock.
2535 * @thread EMT(pVCpu)
2536 */
2537VMMR0_INT_DECL(void) VMMR0EmtResumeAfterBlocking(PVMCPUCC pVCpu, PVMMR0EMTBLOCKCTX pCtx)
2538{
2539 AssertReturnVoid(pCtx->uMagic == VMMR0EMTBLOCKCTX_MAGIC);
2540 if (pCtx->fWasInHmContext)
2541 {
2542 if (pVCpu->vmmr0.s.pPreemptState)
2543 RTThreadPreemptDisable(pVCpu->vmmr0.s.pPreemptState);
2544
2545 pCtx->fWasInHmContext = false;
2546 vmmR0ThreadCtxCallback(RTTHREADCTXEVENT_IN, pVCpu);
2547 }
2548 pCtx->uMagic = VMMR0EMTBLOCKCTX_MAGIC_DEAD;
2549}
2550
2551
2552/**
2553 * Helper for waiting on an RTSEMEVENT, caller did VMMR0EmtPrepareToBlock.
2554 *
2555 * @returns
2556 * @retval VERR_THREAD_IS_TERMINATING
2557 * @retval VERR_TIMEOUT if we ended up waiting too long, either according to
2558 * @a cMsTimeout or to maximum wait values.
2559 *
2560 * @param pGVCpu The ring-0 virtual CPU structure.
2561 * @param fFlags VMMR0EMTWAIT_F_XXX.
2562 * @param hEvent The event to wait on.
2563 * @param cMsTimeout The timeout or RT_INDEFINITE_WAIT.
2564 */
2565VMMR0_INT_DECL(int) VMMR0EmtWaitEventInner(PGVMCPU pGVCpu, uint32_t fFlags, RTSEMEVENT hEvent, RTMSINTERVAL cMsTimeout)
2566{
2567 AssertReturn(pGVCpu->hEMT == RTThreadNativeSelf(), VERR_VM_THREAD_NOT_EMT);
2568
2569 /*
2570 * Note! Similar code is found in the PDM critical sections too.
2571 */
2572 uint64_t const nsStart = RTTimeNanoTS();
2573 uint64_t cNsMaxTotal = cMsTimeout == RT_INDEFINITE_WAIT
2574 ? RT_NS_5MIN : RT_MIN(RT_NS_5MIN, RT_NS_1MS_64 * cMsTimeout);
2575 uint32_t cMsMaxOne = RT_MS_5SEC;
2576 bool fNonInterruptible = false;
2577 for (;;)
2578 {
2579 /* Wait. */
2580 int rcWait = !fNonInterruptible
2581 ? RTSemEventWaitNoResume(hEvent, cMsMaxOne)
2582 : RTSemEventWait(hEvent, cMsMaxOne);
2583 if (RT_SUCCESS(rcWait))
2584 return rcWait;
2585
2586 if (rcWait == VERR_TIMEOUT || rcWait == VERR_INTERRUPTED)
2587 {
2588 uint64_t const cNsElapsed = RTTimeNanoTS() - nsStart;
2589
2590 /*
2591 * Check the thread termination status.
2592 */
2593 int const rcTerm = RTThreadQueryTerminationStatus(NIL_RTTHREAD);
2594 AssertMsg(rcTerm == VINF_SUCCESS || rcTerm == VERR_NOT_SUPPORTED || rcTerm == VINF_THREAD_IS_TERMINATING,
2595 ("rcTerm=%Rrc\n", rcTerm));
2596 if ( rcTerm == VERR_NOT_SUPPORTED
2597 && !fNonInterruptible
2598 && cNsMaxTotal > RT_NS_1MIN)
2599 cNsMaxTotal = RT_NS_1MIN;
2600
2601 /* We return immediately if it looks like the thread is terminating. */
2602 if (rcTerm == VINF_THREAD_IS_TERMINATING)
2603 return VERR_THREAD_IS_TERMINATING;
2604
2605 /* We may suppress VERR_INTERRUPTED if VMMR0EMTWAIT_F_TRY_SUPPRESS_INTERRUPTED was
2606 specified, otherwise we'll just return it. */
2607 if (rcWait == VERR_INTERRUPTED)
2608 {
2609 if (!(fFlags & VMMR0EMTWAIT_F_TRY_SUPPRESS_INTERRUPTED))
2610 return VERR_INTERRUPTED;
2611 if (!fNonInterruptible)
2612 {
2613 /* First time: Adjust down the wait parameters and make sure we get at least
2614 one non-interruptible wait before timing out. */
2615 fNonInterruptible = true;
2616 cMsMaxOne = 32;
2617 uint64_t const cNsLeft = cNsMaxTotal - cNsElapsed;
2618 if (cNsLeft > RT_NS_10SEC)
2619 cNsMaxTotal = cNsElapsed + RT_NS_10SEC;
2620 continue;
2621 }
2622 }
2623
2624 /* Check for timeout. */
2625 if (cNsElapsed > cNsMaxTotal)
2626 return VERR_TIMEOUT;
2627 }
2628 else
2629 return rcWait;
2630 }
2631 /* not reached */
2632}
2633
2634
2635/**
2636 * Helper for signalling an SUPSEMEVENT.
2637 *
2638 * This may temporarily leave the HM context if the host requires that for
2639 * signalling SUPSEMEVENT objects.
2640 *
2641 * @returns VBox status code (see VMMR0EmtPrepareToBlock)
2642 * @param pGVM The ring-0 VM structure.
2643 * @param pGVCpu The ring-0 virtual CPU structure.
2644 * @param hEvent The event to signal.
2645 */
2646VMMR0_INT_DECL(int) VMMR0EmtSignalSupEvent(PGVM pGVM, PGVMCPU pGVCpu, SUPSEMEVENT hEvent)
2647{
2648 AssertReturn(pGVCpu->hEMT == RTThreadNativeSelf(), VERR_VM_THREAD_NOT_EMT);
2649 if (RTSemEventIsSignalSafe())
2650 return SUPSemEventSignal(pGVM->pSession, hEvent);
2651
2652 VMMR0EMTBLOCKCTX Ctx;
2653 int rc = VMMR0EmtPrepareToBlock(pGVCpu, VINF_SUCCESS, __FUNCTION__, (void *)(uintptr_t)hEvent, &Ctx);
2654 if (RT_SUCCESS(rc))
2655 {
2656 rc = SUPSemEventSignal(pGVM->pSession, hEvent);
2657 VMMR0EmtResumeAfterBlocking(pGVCpu, &Ctx);
2658 }
2659 return rc;
2660}
2661
2662
2663/**
2664 * Helper for signalling an SUPSEMEVENT, variant supporting non-EMTs.
2665 *
2666 * This may temporarily leave the HM context if the host requires that for
2667 * signalling SUPSEMEVENT objects.
2668 *
2669 * @returns VBox status code (see VMMR0EmtPrepareToBlock)
2670 * @param pGVM The ring-0 VM structure.
2671 * @param hEvent The event to signal.
2672 */
2673VMMR0_INT_DECL(int) VMMR0EmtSignalSupEventByGVM(PGVM pGVM, SUPSEMEVENT hEvent)
2674{
2675 if (!RTSemEventIsSignalSafe())
2676 {
2677 PGVMCPU pGVCpu = GVMMR0GetGVCpuByGVMandEMT(pGVM, NIL_RTNATIVETHREAD);
2678 if (pGVCpu)
2679 {
2680 VMMR0EMTBLOCKCTX Ctx;
2681 int rc = VMMR0EmtPrepareToBlock(pGVCpu, VINF_SUCCESS, __FUNCTION__, (void *)(uintptr_t)hEvent, &Ctx);
2682 if (RT_SUCCESS(rc))
2683 {
2684 rc = SUPSemEventSignal(pGVM->pSession, hEvent);
2685 VMMR0EmtResumeAfterBlocking(pGVCpu, &Ctx);
2686 }
2687 return rc;
2688 }
2689 }
2690 return SUPSemEventSignal(pGVM->pSession, hEvent);
2691}
2692
2693
2694/*********************************************************************************************************************************
2695* Logging. *
2696*********************************************************************************************************************************/
2697
2698/**
2699 * VMMR0_DO_VMMR0_UPDATE_LOGGERS: Updates the EMT loggers for the VM.
2700 *
2701 * @returns VBox status code.
2702 * @param pGVM The global (ring-0) VM structure.
2703 * @param idCpu The ID of the calling EMT.
2704 * @param pReq The request data.
2705 * @param idxLogger Which logger set to update.
2706 * @thread EMT(idCpu)
2707 */
2708static int vmmR0UpdateLoggers(PGVM pGVM, VMCPUID idCpu, PVMMR0UPDATELOGGERSREQ pReq, size_t idxLogger)
2709{
2710 /*
2711 * Check sanity. First we require EMT to be calling us.
2712 */
2713 AssertReturn(idCpu < pGVM->cCpus, VERR_INVALID_CPU_ID);
2714 AssertReturn(pGVM->aCpus[idCpu].hEMT == RTThreadNativeSelf(), VERR_INVALID_CPU_ID);
2715
2716 AssertReturn(pReq->Hdr.cbReq >= RT_UOFFSETOF_DYN(VMMR0UPDATELOGGERSREQ, afGroups[0]), VERR_INVALID_PARAMETER);
2717 AssertReturn(pReq->cGroups < _8K, VERR_INVALID_PARAMETER);
2718 AssertReturn(pReq->Hdr.cbReq == RT_UOFFSETOF_DYN(VMMR0UPDATELOGGERSREQ, afGroups[pReq->cGroups]), VERR_INVALID_PARAMETER);
2719
2720 AssertReturn(idxLogger < VMMLOGGER_IDX_MAX, VERR_OUT_OF_RANGE);
2721
2722 /*
2723 * Adjust flags.
2724 */
2725 /* Always buffered: */
2726 pReq->fFlags |= RTLOGFLAGS_BUFFERED;
2727 /* These doesn't make sense at present: */
2728 pReq->fFlags &= ~(RTLOGFLAGS_FLUSH | RTLOGFLAGS_WRITE_THROUGH);
2729 /* We've traditionally skipped the group restrictions. */
2730 pReq->fFlags &= ~RTLOGFLAGS_RESTRICT_GROUPS;
2731
2732 /*
2733 * Do the updating.
2734 */
2735 int rc = VINF_SUCCESS;
2736 for (idCpu = 0; idCpu < pGVM->cCpus; idCpu++)
2737 {
2738 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2739 PRTLOGGER pLogger = pGVCpu->vmmr0.s.u.aLoggers[idxLogger].pLogger;
2740 if (pLogger)
2741 {
2742 RTLogSetR0ProgramStart(pLogger, pGVM->vmm.s.nsProgramStart);
2743 rc = RTLogBulkUpdate(pLogger, pReq->fFlags, pReq->uGroupCrc32, pReq->cGroups, pReq->afGroups);
2744 }
2745 }
2746
2747 return rc;
2748}
2749
2750
2751/**
2752 * VMMR0_DO_VMMR0_LOG_FLUSHER: Get the next log flushing job.
2753 *
2754 * The job info is copied into VMM::LogFlusherItem.
2755 *
2756 * @returns VBox status code.
2757 * @retval VERR_OBJECT_DESTROYED if we're shutting down.
2758 * @retval VERR_NOT_OWNER if the calling thread is not the flusher thread.
2759 * @param pGVM The global (ring-0) VM structure.
2760 * @thread The log flusher thread (first caller automatically becomes the log
2761 * flusher).
2762 */
2763static int vmmR0LogFlusher(PGVM pGVM)
2764{
2765 /*
2766 * Check that this really is the flusher thread.
2767 */
2768 RTNATIVETHREAD const hNativeSelf = RTThreadNativeSelf();
2769 AssertReturn(hNativeSelf != NIL_RTNATIVETHREAD, VERR_INTERNAL_ERROR_3);
2770 if (RT_LIKELY(pGVM->vmmr0.s.LogFlusher.hThread == hNativeSelf))
2771 { /* likely */ }
2772 else
2773 {
2774 /* The first caller becomes the flusher thread. */
2775 bool fOk;
2776 ASMAtomicCmpXchgHandle(&pGVM->vmmr0.s.LogFlusher.hThread, hNativeSelf, NIL_RTNATIVETHREAD, fOk);
2777 if (!fOk)
2778 return VERR_NOT_OWNER;
2779 pGVM->vmmr0.s.LogFlusher.fThreadRunning = true;
2780 }
2781
2782 /*
2783 * Acknowledge flush, waking up waiting EMT.
2784 */
2785 RTSpinlockAcquire(pGVM->vmmr0.s.LogFlusher.hSpinlock);
2786
2787 uint32_t idxTail = pGVM->vmmr0.s.LogFlusher.idxRingTail % RT_ELEMENTS(pGVM->vmmr0.s.LogFlusher.aRing);
2788 uint32_t idxHead = pGVM->vmmr0.s.LogFlusher.idxRingHead % RT_ELEMENTS(pGVM->vmmr0.s.LogFlusher.aRing);
2789 if ( idxTail != idxHead
2790 && pGVM->vmmr0.s.LogFlusher.aRing[idxHead].s.fProcessing)
2791 {
2792 /* Pop the head off the ring buffer. */
2793 uint32_t const idCpu = pGVM->vmmr0.s.LogFlusher.aRing[idxHead].s.idCpu;
2794 uint32_t const idxLogger = pGVM->vmmr0.s.LogFlusher.aRing[idxHead].s.idxLogger;
2795 uint32_t const idxBuffer = pGVM->vmmr0.s.LogFlusher.aRing[idxHead].s.idxBuffer;
2796
2797 pGVM->vmmr0.s.LogFlusher.aRing[idxHead].u32 = UINT32_MAX >> 1; /* invalidate the entry */
2798 pGVM->vmmr0.s.LogFlusher.idxRingHead = (idxHead + 1) % RT_ELEMENTS(pGVM->vmmr0.s.LogFlusher.aRing);
2799
2800 /* Validate content. */
2801 if ( idCpu < pGVM->cCpus
2802 && idxLogger < VMMLOGGER_IDX_MAX
2803 && idxBuffer < VMMLOGGER_BUFFER_COUNT)
2804 {
2805 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2806 PVMMR0PERVCPULOGGER pR0Log = &pGVCpu->vmmr0.s.u.aLoggers[idxLogger];
2807 PVMMR3CPULOGGER pShared = &pGVCpu->vmm.s.u.aLoggers[idxLogger];
2808
2809 /*
2810 * Accounting.
2811 */
2812 uint32_t cFlushing = pR0Log->cFlushing - 1;
2813 if (RT_LIKELY(cFlushing < VMMLOGGER_BUFFER_COUNT))
2814 { /*likely*/ }
2815 else
2816 cFlushing = 0;
2817 pR0Log->cFlushing = cFlushing;
2818 ASMAtomicWriteU32(&pShared->cFlushing, cFlushing);
2819
2820 /*
2821 * Wake up the EMT if it's waiting.
2822 */
2823 if (!pR0Log->fEmtWaiting)
2824 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
2825 else
2826 {
2827 pR0Log->fEmtWaiting = false;
2828 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
2829
2830 int rc = RTSemEventSignal(pR0Log->hEventFlushWait);
2831 if (RT_FAILURE(rc))
2832 LogRelMax(64, ("vmmR0LogFlusher: RTSemEventSignal failed ACKing entry #%u (%u/%u/%u): %Rrc!\n",
2833 idxHead, idCpu, idxLogger, idxBuffer, rc));
2834 }
2835 }
2836 else
2837 {
2838 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
2839 LogRelMax(64, ("vmmR0LogFlusher: Bad ACK entry #%u: %u/%u/%u!\n", idxHead, idCpu, idxLogger, idxBuffer));
2840 }
2841
2842 RTSpinlockAcquire(pGVM->vmmr0.s.LogFlusher.hSpinlock);
2843 }
2844
2845 /*
2846 * The wait loop.
2847 */
2848 int rc;
2849 for (;;)
2850 {
2851 /*
2852 * Work pending?
2853 */
2854 idxTail = pGVM->vmmr0.s.LogFlusher.idxRingTail % RT_ELEMENTS(pGVM->vmmr0.s.LogFlusher.aRing);
2855 idxHead = pGVM->vmmr0.s.LogFlusher.idxRingHead % RT_ELEMENTS(pGVM->vmmr0.s.LogFlusher.aRing);
2856 if (idxTail != idxHead)
2857 {
2858 pGVM->vmmr0.s.LogFlusher.aRing[idxHead].s.fProcessing = true;
2859 pGVM->vmm.s.LogFlusherItem.u32 = pGVM->vmmr0.s.LogFlusher.aRing[idxHead].u32;
2860
2861 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
2862 return VINF_SUCCESS;
2863 }
2864
2865 /*
2866 * Nothing to do, so, check for termination and go to sleep.
2867 */
2868 if (!pGVM->vmmr0.s.LogFlusher.fThreadShutdown)
2869 { /* likely */ }
2870 else
2871 {
2872 rc = VERR_OBJECT_DESTROYED;
2873 break;
2874 }
2875
2876 pGVM->vmmr0.s.LogFlusher.fThreadWaiting = true;
2877 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
2878
2879 rc = RTSemEventWaitNoResume(pGVM->vmmr0.s.LogFlusher.hEvent, RT_MS_5MIN);
2880
2881 RTSpinlockAcquire(pGVM->vmmr0.s.LogFlusher.hSpinlock);
2882 pGVM->vmmr0.s.LogFlusher.fThreadWaiting = false;
2883
2884 if (RT_SUCCESS(rc) || rc == VERR_TIMEOUT)
2885 { /* likely */ }
2886 else if (rc == VERR_INTERRUPTED)
2887 {
2888 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
2889 return rc;
2890 }
2891 else if (rc == VERR_SEM_DESTROYED || rc == VERR_INVALID_HANDLE)
2892 break;
2893 else
2894 {
2895 LogRel(("vmmR0LogFlusher: RTSemEventWaitNoResume returned unexpected status %Rrc\n", rc));
2896 break;
2897 }
2898 }
2899
2900 /*
2901 * Terminating - prevent further calls and indicate to the EMTs that we're no longer around.
2902 */
2903 pGVM->vmmr0.s.LogFlusher.hThread = ~pGVM->vmmr0.s.LogFlusher.hThread; /* (should be reasonably safe) */
2904 pGVM->vmmr0.s.LogFlusher.fThreadRunning = false;
2905
2906 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
2907 return rc;
2908}
2909
2910
2911/**
2912 * VMMR0_DO_VMMR0_LOG_WAIT_FLUSHED: Waits for the flusher thread to finish all
2913 * buffers for logger @a idxLogger.
2914 *
2915 * @returns VBox status code.
2916 * @param pGVM The global (ring-0) VM structure.
2917 * @param idCpu The ID of the calling EMT.
2918 * @param idxLogger Which logger to wait on.
2919 * @thread EMT(idCpu)
2920 */
2921static int vmmR0LogWaitFlushed(PGVM pGVM, VMCPUID idCpu, size_t idxLogger)
2922{
2923 /*
2924 * Check sanity. First we require EMT to be calling us.
2925 */
2926 AssertReturn(idCpu < pGVM->cCpus, VERR_INVALID_CPU_ID);
2927 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2928 AssertReturn(pGVCpu->hEMT == RTThreadNativeSelf(), VERR_INVALID_CPU_ID);
2929 AssertReturn(idxLogger < VMMLOGGER_IDX_MAX, VERR_OUT_OF_RANGE);
2930 PVMMR0PERVCPULOGGER const pR0Log = &pGVCpu->vmmr0.s.u.aLoggers[idxLogger];
2931
2932 /*
2933 * Do the waiting.
2934 */
2935 int rc = VINF_SUCCESS;
2936 RTSpinlockAcquire(pGVM->vmmr0.s.LogFlusher.hSpinlock);
2937 uint32_t cFlushing = pR0Log->cFlushing;
2938 while (cFlushing > 0)
2939 {
2940 pR0Log->fEmtWaiting = true;
2941 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
2942
2943 rc = RTSemEventWaitNoResume(pR0Log->hEventFlushWait, RT_MS_5MIN);
2944
2945 RTSpinlockAcquire(pGVM->vmmr0.s.LogFlusher.hSpinlock);
2946 pR0Log->fEmtWaiting = false;
2947 if (RT_SUCCESS(rc))
2948 {
2949 /* Read the new count, make sure it decreased before looping. That
2950 way we can guarentee that we will only wait more than 5 min * buffers. */
2951 uint32_t const cPrevFlushing = cFlushing;
2952 cFlushing = pR0Log->cFlushing;
2953 if (cFlushing < cPrevFlushing)
2954 continue;
2955 rc = VERR_INTERNAL_ERROR_3;
2956 }
2957 break;
2958 }
2959 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
2960 return rc;
2961}
2962
2963
2964/**
2965 * Inner worker for vmmR0LoggerFlushCommon.
2966 */
2967static bool vmmR0LoggerFlushInner(PGVM pGVM, PGVMCPU pGVCpu, uint32_t idxLogger, size_t idxBuffer, uint32_t cbToFlush)
2968{
2969 PVMMR0PERVCPULOGGER const pR0Log = &pGVCpu->vmmr0.s.u.aLoggers[idxLogger];
2970 PVMMR3CPULOGGER const pShared = &pGVCpu->vmm.s.u.aLoggers[idxLogger];
2971
2972 /*
2973 * Figure out what we need to do and whether we can.
2974 */
2975 enum { kJustSignal, kPrepAndSignal, kPrepSignalAndWait } enmAction;
2976#if VMMLOGGER_BUFFER_COUNT >= 2
2977 if (pR0Log->cFlushing < VMMLOGGER_BUFFER_COUNT - 1)
2978 {
2979 if (RTSemEventIsSignalSafe())
2980 enmAction = kJustSignal;
2981 else if (VMMRZCallRing3IsEnabled(pGVCpu))
2982 enmAction = kPrepAndSignal;
2983 else
2984 {
2985 /** @todo This is a bit simplistic. We could introduce a FF to signal the
2986 * thread or similar. */
2987 STAM_REL_COUNTER_INC(&pShared->StatCannotBlock);
2988# if defined(RT_OS_LINUX)
2989 SUP_DPRINTF(("vmmR0LoggerFlush: Signalling not safe and EMT blocking disabled! (%u bytes)\n", cbToFlush));
2990# endif
2991 pShared->cbDropped += cbToFlush;
2992 return true;
2993 }
2994 }
2995 else
2996#endif
2997 if (VMMRZCallRing3IsEnabled(pGVCpu))
2998 enmAction = kPrepSignalAndWait;
2999 else
3000 {
3001 STAM_REL_COUNTER_INC(&pShared->StatCannotBlock);
3002# if defined(RT_OS_LINUX)
3003 SUP_DPRINTF(("vmmR0LoggerFlush: EMT blocking disabled! (%u bytes)\n", cbToFlush));
3004# endif
3005 pShared->cbDropped += cbToFlush;
3006 return true;
3007 }
3008
3009 /*
3010 * Prepare for blocking if necessary.
3011 */
3012 VMMR0EMTBLOCKCTX Ctx;
3013 if (enmAction != kJustSignal)
3014 {
3015 int rc = VMMR0EmtPrepareToBlock(pGVCpu, VINF_SUCCESS, "vmmR0LoggerFlushInner", pR0Log->hEventFlushWait, &Ctx);
3016 if (RT_SUCCESS(rc))
3017 { /* likely */ }
3018 else
3019 {
3020 STAM_REL_COUNTER_INC(&pShared->StatCannotBlock);
3021 SUP_DPRINTF(("vmmR0LoggerFlush: VMMR0EmtPrepareToBlock failed! rc=%d\n", rc));
3022 return false;
3023 }
3024 }
3025
3026 /*
3027 * Queue the flush job.
3028 */
3029 bool fFlushedBuffer;
3030 RTSpinlockAcquire(pGVM->vmmr0.s.LogFlusher.hSpinlock);
3031 if (pGVM->vmmr0.s.LogFlusher.fThreadRunning)
3032 {
3033 uint32_t const idxHead = pGVM->vmmr0.s.LogFlusher.idxRingHead % RT_ELEMENTS(pGVM->vmmr0.s.LogFlusher.aRing);
3034 uint32_t const idxTail = pGVM->vmmr0.s.LogFlusher.idxRingTail % RT_ELEMENTS(pGVM->vmmr0.s.LogFlusher.aRing);
3035 uint32_t const idxNewTail = (idxTail + 1) % RT_ELEMENTS(pGVM->vmmr0.s.LogFlusher.aRing);
3036 if (idxNewTail != idxHead)
3037 {
3038 /* Queue it. */
3039 pGVM->vmmr0.s.LogFlusher.aRing[idxTail].s.idCpu = pGVCpu->idCpu;
3040 pGVM->vmmr0.s.LogFlusher.aRing[idxTail].s.idxLogger = idxLogger;
3041 pGVM->vmmr0.s.LogFlusher.aRing[idxTail].s.idxBuffer = (uint32_t)idxBuffer;
3042 pGVM->vmmr0.s.LogFlusher.aRing[idxTail].s.fProcessing = 0;
3043 pGVM->vmmr0.s.LogFlusher.idxRingTail = idxNewTail;
3044
3045 /* Update the number of buffers currently being flushed. */
3046 uint32_t cFlushing = pR0Log->cFlushing;
3047 cFlushing = RT_MIN(cFlushing + 1, VMMLOGGER_BUFFER_COUNT);
3048 pShared->cFlushing = pR0Log->cFlushing = cFlushing;
3049
3050 /* We must wait if all buffers are currently being flushed. */
3051 bool const fEmtWaiting = cFlushing >= VMMLOGGER_BUFFER_COUNT && enmAction != kJustSignal /* paranoia */;
3052 pR0Log->fEmtWaiting = fEmtWaiting;
3053
3054 /* Stats. */
3055 STAM_REL_COUNTER_INC(&pShared->StatFlushes);
3056 STAM_REL_COUNTER_INC(&pGVM->vmm.s.StatLogFlusherFlushes);
3057
3058 /* Signal the worker thread. */
3059 if (pGVM->vmmr0.s.LogFlusher.fThreadWaiting)
3060 {
3061 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
3062 RTSemEventSignal(pGVM->vmmr0.s.LogFlusher.hEvent);
3063 }
3064 else
3065 {
3066 STAM_REL_COUNTER_INC(&pGVM->vmm.s.StatLogFlusherNoWakeUp);
3067 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
3068 }
3069
3070 /*
3071 * Wait for a buffer to finish flushing.
3072 *
3073 * Note! Lazy bird is ignoring the status code here. The result is
3074 * that we might end up with an extra even signalling and the
3075 * next time we need to wait we won't and end up with some log
3076 * corruption. However, it's too much hazzle right now for
3077 * a scenario which would most likely end the process rather
3078 * than causing log corruption.
3079 */
3080 if (fEmtWaiting)
3081 {
3082 STAM_REL_PROFILE_START(&pShared->StatWait, a);
3083 VMMR0EmtWaitEventInner(pGVCpu, VMMR0EMTWAIT_F_TRY_SUPPRESS_INTERRUPTED,
3084 pR0Log->hEventFlushWait, RT_INDEFINITE_WAIT);
3085 STAM_REL_PROFILE_STOP(&pShared->StatWait, a);
3086 }
3087
3088 /*
3089 * We always switch buffer if we have more than one.
3090 */
3091#if VMMLOGGER_BUFFER_COUNT == 1
3092 fFlushedBuffer = true;
3093#else
3094 AssertCompile(VMMLOGGER_BUFFER_COUNT >= 1);
3095 pShared->idxBuf = (idxBuffer + 1) % VMMLOGGER_BUFFER_COUNT;
3096 fFlushedBuffer = false;
3097#endif
3098 }
3099 else
3100 {
3101 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
3102 SUP_DPRINTF(("vmmR0LoggerFlush: ring buffer is full!\n"));
3103 fFlushedBuffer = true;
3104 }
3105 }
3106 else
3107 {
3108 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
3109 SUP_DPRINTF(("vmmR0LoggerFlush: flusher not active - dropping %u bytes\n", cbToFlush));
3110 fFlushedBuffer = true;
3111 }
3112
3113 /*
3114 * Restore the HM context.
3115 */
3116 if (enmAction != kJustSignal)
3117 VMMR0EmtResumeAfterBlocking(pGVCpu, &Ctx);
3118
3119 return fFlushedBuffer;
3120}
3121
3122
3123/**
3124 * Common worker for vmmR0LogFlush and vmmR0LogRelFlush.
3125 */
3126static bool vmmR0LoggerFlushCommon(PRTLOGGER pLogger, PRTLOGBUFFERDESC pBufDesc, uint32_t idxLogger)
3127{
3128 /*
3129 * Convert the pLogger into a GVMCPU handle and 'call' back to Ring-3.
3130 * (This is a bit paranoid code.)
3131 */
3132 if (RT_VALID_PTR(pLogger))
3133 {
3134 if ( pLogger->u32Magic == RTLOGGER_MAGIC
3135 && (pLogger->u32UserValue1 & VMMR0_LOGGER_FLAGS_MAGIC_MASK) == VMMR0_LOGGER_FLAGS_MAGIC_VALUE
3136 && pLogger->u64UserValue2 == pLogger->u64UserValue3)
3137 {
3138 PGVMCPU const pGVCpu = (PGVMCPU)(uintptr_t)pLogger->u64UserValue2;
3139 if ( RT_VALID_PTR(pGVCpu)
3140 && ((uintptr_t)pGVCpu & PAGE_OFFSET_MASK) == 0)
3141 {
3142 RTNATIVETHREAD const hNativeSelf = RTThreadNativeSelf();
3143 PGVM const pGVM = pGVCpu->pGVM;
3144 if ( hNativeSelf == pGVCpu->hEMT
3145 && RT_VALID_PTR(pGVM))
3146 {
3147 PVMMR0PERVCPULOGGER const pR0Log = &pGVCpu->vmmr0.s.u.aLoggers[idxLogger];
3148 size_t const idxBuffer = pBufDesc - &pR0Log->aBufDescs[0];
3149 if (idxBuffer < VMMLOGGER_BUFFER_COUNT)
3150 {
3151 /*
3152 * Make sure we don't recurse forever here should something in the
3153 * following code trigger logging or an assertion. Do the rest in
3154 * an inner work to avoid hitting the right margin too hard.
3155 */
3156 if (!pR0Log->fFlushing)
3157 {
3158 pR0Log->fFlushing = true;
3159 bool fFlushed = vmmR0LoggerFlushInner(pGVM, pGVCpu, idxLogger, idxBuffer, pBufDesc->offBuf);
3160 pR0Log->fFlushing = false;
3161 return fFlushed;
3162 }
3163
3164 SUP_DPRINTF(("vmmR0LoggerFlush: Recursive flushing!\n"));
3165 }
3166 else
3167 SUP_DPRINTF(("vmmR0LoggerFlush: pLogger=%p pGVCpu=%p: idxBuffer=%#zx\n", pLogger, pGVCpu, idxBuffer));
3168 }
3169 else
3170 SUP_DPRINTF(("vmmR0LoggerFlush: pLogger=%p pGVCpu=%p hEMT=%p hNativeSelf=%p!\n",
3171 pLogger, pGVCpu, pGVCpu->hEMT, hNativeSelf));
3172 }
3173 else
3174 SUP_DPRINTF(("vmmR0LoggerFlush: pLogger=%p pGVCpu=%p!\n", pLogger, pGVCpu));
3175 }
3176 else
3177 SUP_DPRINTF(("vmmR0LoggerFlush: pLogger=%p u32Magic=%#x u32UserValue1=%#x u64UserValue2=%#RX64 u64UserValue3=%#RX64!\n",
3178 pLogger, pLogger->u32Magic, pLogger->u32UserValue1, pLogger->u64UserValue2, pLogger->u64UserValue3));
3179 }
3180 else
3181 SUP_DPRINTF(("vmmR0LoggerFlush: pLogger=%p!\n", pLogger));
3182 return true;
3183}
3184
3185
3186/**
3187 * @callback_method_impl{FNRTLOGFLUSH, Release logger buffer flush callback.}
3188 */
3189static DECLCALLBACK(bool) vmmR0LogRelFlush(PRTLOGGER pLogger, PRTLOGBUFFERDESC pBufDesc)
3190{
3191 return vmmR0LoggerFlushCommon(pLogger, pBufDesc, VMMLOGGER_IDX_RELEASE);
3192}
3193
3194
3195/**
3196 * @callback_method_impl{FNRTLOGFLUSH, Logger (debug) buffer flush callback.}
3197 */
3198static DECLCALLBACK(bool) vmmR0LogFlush(PRTLOGGER pLogger, PRTLOGBUFFERDESC pBufDesc)
3199{
3200#ifdef LOG_ENABLED
3201 return vmmR0LoggerFlushCommon(pLogger, pBufDesc, VMMLOGGER_IDX_REGULAR);
3202#else
3203 RT_NOREF(pLogger, pBufDesc);
3204 return true;
3205#endif
3206}
3207
3208
3209/*
3210 * Override RTLogDefaultInstanceEx so we can do logging from EMTs in ring-0.
3211 */
3212DECLEXPORT(PRTLOGGER) RTLogDefaultInstanceEx(uint32_t fFlagsAndGroup)
3213{
3214#ifdef LOG_ENABLED
3215 PGVMCPU pGVCpu = GVMMR0GetGVCpuByEMT(NIL_RTNATIVETHREAD);
3216 if (pGVCpu)
3217 {
3218 PRTLOGGER pLogger = pGVCpu->vmmr0.s.u.s.Logger.pLogger;
3219 if (RT_VALID_PTR(pLogger))
3220 {
3221 if ( pLogger->u64UserValue2 == (uintptr_t)pGVCpu
3222 && pLogger->u64UserValue3 == (uintptr_t)pGVCpu)
3223 {
3224 if (!pGVCpu->vmmr0.s.u.s.Logger.fFlushing)
3225 {
3226 if (!(pGVCpu->vmmr0.s.fLogFlushingDisabled))
3227 return RTLogCheckGroupFlags(pLogger, fFlagsAndGroup);
3228 return NULL;
3229 }
3230
3231 /*
3232 * When we're flushing we _must_ return NULL here to suppress any
3233 * attempts at using the logger while in vmmR0LoggerFlushCommon.
3234 * The VMMR0EmtPrepareToBlock code may trigger logging in HM,
3235 * which will reset the buffer content before we even get to queue
3236 * the flush request. (Only an issue when VBOX_WITH_R0_LOGGING
3237 * is enabled.)
3238 */
3239 return NULL;
3240 }
3241 }
3242 }
3243#endif
3244 return SUPR0DefaultLogInstanceEx(fFlagsAndGroup);
3245}
3246
3247
3248/*
3249 * Override RTLogRelGetDefaultInstanceEx so we can do LogRel to VBox.log from EMTs in ring-0.
3250 */
3251DECLEXPORT(PRTLOGGER) RTLogRelGetDefaultInstanceEx(uint32_t fFlagsAndGroup)
3252{
3253 PGVMCPU pGVCpu = GVMMR0GetGVCpuByEMT(NIL_RTNATIVETHREAD);
3254 if (pGVCpu)
3255 {
3256 PRTLOGGER pLogger = pGVCpu->vmmr0.s.u.s.RelLogger.pLogger;
3257 if (RT_VALID_PTR(pLogger))
3258 {
3259 if ( pLogger->u64UserValue2 == (uintptr_t)pGVCpu
3260 && pLogger->u64UserValue3 == (uintptr_t)pGVCpu)
3261 {
3262 if (!pGVCpu->vmmr0.s.u.s.RelLogger.fFlushing)
3263 {
3264 if (!(pGVCpu->vmmr0.s.fLogFlushingDisabled))
3265 return RTLogCheckGroupFlags(pLogger, fFlagsAndGroup);
3266 return NULL;
3267 }
3268 }
3269 }
3270 }
3271 return SUPR0GetDefaultLogRelInstanceEx(fFlagsAndGroup);
3272}
3273
3274
3275/**
3276 * Helper for vmmR0InitLoggerSet
3277 */
3278static int vmmR0InitLoggerOne(PGVMCPU pGVCpu, bool fRelease, PVMMR0PERVCPULOGGER pR0Log, PVMMR3CPULOGGER pShared,
3279 uint32_t cbBuf, char *pchBuf, RTR3PTR pchBufR3)
3280{
3281 /*
3282 * Create and configure the logger.
3283 */
3284 for (size_t i = 0; i < VMMLOGGER_BUFFER_COUNT; i++)
3285 {
3286 pR0Log->aBufDescs[i].u32Magic = RTLOGBUFFERDESC_MAGIC;
3287 pR0Log->aBufDescs[i].uReserved = 0;
3288 pR0Log->aBufDescs[i].cbBuf = cbBuf;
3289 pR0Log->aBufDescs[i].offBuf = 0;
3290 pR0Log->aBufDescs[i].pchBuf = pchBuf + i * cbBuf;
3291 pR0Log->aBufDescs[i].pAux = &pShared->aBufs[i].AuxDesc;
3292
3293 pShared->aBufs[i].AuxDesc.fFlushedIndicator = false;
3294 pShared->aBufs[i].AuxDesc.afPadding[0] = 0;
3295 pShared->aBufs[i].AuxDesc.afPadding[1] = 0;
3296 pShared->aBufs[i].AuxDesc.afPadding[2] = 0;
3297 pShared->aBufs[i].AuxDesc.offBuf = 0;
3298 pShared->aBufs[i].pchBufR3 = pchBufR3 + i * cbBuf;
3299 }
3300 pShared->cbBuf = cbBuf;
3301
3302 static const char * const s_apszGroups[] = VBOX_LOGGROUP_NAMES;
3303 int rc = RTLogCreateEx(&pR0Log->pLogger, fRelease ? "VBOX_RELEASE_LOG" : "VBOX_LOG", RTLOG_F_NO_LOCKING | RTLOGFLAGS_BUFFERED,
3304 "all", RT_ELEMENTS(s_apszGroups), s_apszGroups, UINT32_MAX,
3305 VMMLOGGER_BUFFER_COUNT, pR0Log->aBufDescs, RTLOGDEST_DUMMY,
3306 NULL /*pfnPhase*/, 0 /*cHistory*/, 0 /*cbHistoryFileMax*/, 0 /*cSecsHistoryTimeSlot*/,
3307 NULL /*pErrInfo*/, NULL /*pszFilenameFmt*/);
3308 if (RT_SUCCESS(rc))
3309 {
3310 PRTLOGGER pLogger = pR0Log->pLogger;
3311 pLogger->u32UserValue1 = VMMR0_LOGGER_FLAGS_MAGIC_VALUE;
3312 pLogger->u64UserValue2 = (uintptr_t)pGVCpu;
3313 pLogger->u64UserValue3 = (uintptr_t)pGVCpu;
3314
3315 rc = RTLogSetFlushCallback(pLogger, fRelease ? vmmR0LogRelFlush : vmmR0LogFlush);
3316 if (RT_SUCCESS(rc))
3317 {
3318 RTLogSetR0ThreadNameF(pLogger, "EMT-%u-R0", pGVCpu->idCpu);
3319
3320 /*
3321 * Create the event sem the EMT waits on while flushing is happening.
3322 */
3323 rc = RTSemEventCreate(&pR0Log->hEventFlushWait);
3324 if (RT_SUCCESS(rc))
3325 return VINF_SUCCESS;
3326 pR0Log->hEventFlushWait = NIL_RTSEMEVENT;
3327 }
3328 RTLogDestroy(pLogger);
3329 }
3330 pR0Log->pLogger = NULL;
3331 return rc;
3332}
3333
3334
3335/**
3336 * Worker for VMMR0CleanupVM and vmmR0InitLoggerSet that destroys one logger.
3337 */
3338static void vmmR0TermLoggerOne(PVMMR0PERVCPULOGGER pR0Log, PVMMR3CPULOGGER pShared)
3339{
3340 RTLogDestroy(pR0Log->pLogger);
3341 pR0Log->pLogger = NULL;
3342
3343 for (size_t i = 0; i < VMMLOGGER_BUFFER_COUNT; i++)
3344 pShared->aBufs[i].pchBufR3 = NIL_RTR3PTR;
3345
3346 RTSemEventDestroy(pR0Log->hEventFlushWait);
3347 pR0Log->hEventFlushWait = NIL_RTSEMEVENT;
3348}
3349
3350
3351/**
3352 * Initializes one type of loggers for each EMT.
3353 */
3354static int vmmR0InitLoggerSet(PGVM pGVM, uint8_t idxLogger, uint32_t cbBuf, PRTR0MEMOBJ phMemObj, PRTR0MEMOBJ phMapObj)
3355{
3356 /* Allocate buffers first. */
3357 int rc = RTR0MemObjAllocPage(phMemObj, cbBuf * pGVM->cCpus * VMMLOGGER_BUFFER_COUNT, false /*fExecutable*/);
3358 if (RT_SUCCESS(rc))
3359 {
3360 rc = RTR0MemObjMapUser(phMapObj, *phMemObj, (RTR3PTR)-1, 0 /*uAlignment*/, RTMEM_PROT_READ, NIL_RTR0PROCESS);
3361 if (RT_SUCCESS(rc))
3362 {
3363 char * const pchBuf = (char *)RTR0MemObjAddress(*phMemObj);
3364 AssertPtrReturn(pchBuf, VERR_INTERNAL_ERROR_2);
3365
3366 RTR3PTR const pchBufR3 = RTR0MemObjAddressR3(*phMapObj);
3367 AssertReturn(pchBufR3 != NIL_RTR3PTR, VERR_INTERNAL_ERROR_3);
3368
3369 /* Initialize the per-CPU loggers. */
3370 for (uint32_t i = 0; i < pGVM->cCpus; i++)
3371 {
3372 PGVMCPU pGVCpu = &pGVM->aCpus[i];
3373 PVMMR0PERVCPULOGGER pR0Log = &pGVCpu->vmmr0.s.u.aLoggers[idxLogger];
3374 PVMMR3CPULOGGER pShared = &pGVCpu->vmm.s.u.aLoggers[idxLogger];
3375 rc = vmmR0InitLoggerOne(pGVCpu, idxLogger == VMMLOGGER_IDX_RELEASE, pR0Log, pShared, cbBuf,
3376 pchBuf + i * cbBuf * VMMLOGGER_BUFFER_COUNT,
3377 pchBufR3 + i * cbBuf * VMMLOGGER_BUFFER_COUNT);
3378 if (RT_FAILURE(rc))
3379 {
3380 vmmR0TermLoggerOne(pR0Log, pShared);
3381 while (i-- > 0)
3382 {
3383 pGVCpu = &pGVM->aCpus[i];
3384 vmmR0TermLoggerOne(&pGVCpu->vmmr0.s.u.aLoggers[idxLogger], &pGVCpu->vmm.s.u.aLoggers[idxLogger]);
3385 }
3386 break;
3387 }
3388 }
3389 if (RT_SUCCESS(rc))
3390 return VINF_SUCCESS;
3391
3392 /* Bail out. */
3393 RTR0MemObjFree(*phMapObj, false /*fFreeMappings*/);
3394 *phMapObj = NIL_RTR0MEMOBJ;
3395 }
3396 RTR0MemObjFree(*phMemObj, true /*fFreeMappings*/);
3397 *phMemObj = NIL_RTR0MEMOBJ;
3398 }
3399 return rc;
3400}
3401
3402
3403/**
3404 * Worker for VMMR0InitPerVMData that initializes all the logging related stuff.
3405 *
3406 * @returns VBox status code.
3407 * @param pGVM The global (ring-0) VM structure.
3408 */
3409static int vmmR0InitLoggers(PGVM pGVM)
3410{
3411 /*
3412 * Invalidate the ring buffer (not really necessary).
3413 */
3414 for (size_t idx = 0; idx < RT_ELEMENTS(pGVM->vmmr0.s.LogFlusher.aRing); idx++)
3415 pGVM->vmmr0.s.LogFlusher.aRing[idx].u32 = UINT32_MAX >> 1; /* (all bits except fProcessing set) */
3416
3417 /*
3418 * Create the spinlock and flusher event semaphore.
3419 */
3420 int rc = RTSpinlockCreate(&pGVM->vmmr0.s.LogFlusher.hSpinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "VM-Log-Flusher");
3421 if (RT_SUCCESS(rc))
3422 {
3423 rc = RTSemEventCreate(&pGVM->vmmr0.s.LogFlusher.hEvent);
3424 if (RT_SUCCESS(rc))
3425 {
3426 /*
3427 * Create the ring-0 release loggers.
3428 */
3429 rc = vmmR0InitLoggerSet(pGVM, VMMLOGGER_IDX_RELEASE, _4K,
3430 &pGVM->vmmr0.s.hMemObjReleaseLogger, &pGVM->vmmr0.s.hMapObjReleaseLogger);
3431#ifdef LOG_ENABLED
3432 if (RT_SUCCESS(rc))
3433 {
3434 /*
3435 * Create debug loggers.
3436 */
3437 rc = vmmR0InitLoggerSet(pGVM, VMMLOGGER_IDX_REGULAR, _64K,
3438 &pGVM->vmmr0.s.hMemObjLogger, &pGVM->vmmr0.s.hMapObjLogger);
3439 }
3440#endif
3441 }
3442 }
3443 return rc;
3444}
3445
3446
3447/**
3448 * Worker for VMMR0InitPerVMData that initializes all the logging related stuff.
3449 *
3450 * @param pGVM The global (ring-0) VM structure.
3451 */
3452static void vmmR0CleanupLoggers(PGVM pGVM)
3453{
3454 for (VMCPUID idCpu = 0; idCpu < pGVM->cCpus; idCpu++)
3455 {
3456 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
3457 for (size_t iLogger = 0; iLogger < RT_ELEMENTS(pGVCpu->vmmr0.s.u.aLoggers); iLogger++)
3458 vmmR0TermLoggerOne(&pGVCpu->vmmr0.s.u.aLoggers[iLogger], &pGVCpu->vmm.s.u.aLoggers[iLogger]);
3459 }
3460
3461 /*
3462 * Free logger buffer memory.
3463 */
3464 RTR0MemObjFree(pGVM->vmmr0.s.hMapObjReleaseLogger, false /*fFreeMappings*/);
3465 pGVM->vmmr0.s.hMapObjReleaseLogger = NIL_RTR0MEMOBJ;
3466 RTR0MemObjFree(pGVM->vmmr0.s.hMemObjReleaseLogger, true /*fFreeMappings*/);
3467 pGVM->vmmr0.s.hMemObjReleaseLogger = NIL_RTR0MEMOBJ;
3468
3469 RTR0MemObjFree(pGVM->vmmr0.s.hMapObjLogger, false /*fFreeMappings*/);
3470 pGVM->vmmr0.s.hMapObjLogger = NIL_RTR0MEMOBJ;
3471 RTR0MemObjFree(pGVM->vmmr0.s.hMemObjLogger, true /*fFreeMappings*/);
3472 pGVM->vmmr0.s.hMemObjLogger = NIL_RTR0MEMOBJ;
3473
3474 /*
3475 * Free log flusher related stuff.
3476 */
3477 RTSpinlockDestroy(pGVM->vmmr0.s.LogFlusher.hSpinlock);
3478 pGVM->vmmr0.s.LogFlusher.hSpinlock = NIL_RTSPINLOCK;
3479 RTSemEventDestroy(pGVM->vmmr0.s.LogFlusher.hEvent);
3480 pGVM->vmmr0.s.LogFlusher.hEvent = NIL_RTSEMEVENT;
3481}
3482
3483
3484/*********************************************************************************************************************************
3485* Assertions *
3486*********************************************************************************************************************************/
3487
3488/**
3489 * Installs a notification callback for ring-0 assertions.
3490 *
3491 * @param pVCpu The cross context virtual CPU structure.
3492 * @param pfnCallback Pointer to the callback.
3493 * @param pvUser The user argument.
3494 *
3495 * @return VBox status code.
3496 */
3497VMMR0_INT_DECL(int) VMMR0AssertionSetNotification(PVMCPUCC pVCpu, PFNVMMR0ASSERTIONNOTIFICATION pfnCallback, RTR0PTR pvUser)
3498{
3499 AssertPtrReturn(pVCpu, VERR_INVALID_POINTER);
3500 AssertPtrReturn(pfnCallback, VERR_INVALID_POINTER);
3501
3502 if (!pVCpu->vmmr0.s.pfnAssertCallback)
3503 {
3504 pVCpu->vmmr0.s.pfnAssertCallback = pfnCallback;
3505 pVCpu->vmmr0.s.pvAssertCallbackUser = pvUser;
3506 return VINF_SUCCESS;
3507 }
3508 return VERR_ALREADY_EXISTS;
3509}
3510
3511
3512/**
3513 * Removes the ring-0 callback.
3514 *
3515 * @param pVCpu The cross context virtual CPU structure.
3516 */
3517VMMR0_INT_DECL(void) VMMR0AssertionRemoveNotification(PVMCPUCC pVCpu)
3518{
3519 pVCpu->vmmr0.s.pfnAssertCallback = NULL;
3520 pVCpu->vmmr0.s.pvAssertCallbackUser = NULL;
3521}
3522
3523
3524/**
3525 * Checks whether there is a ring-0 callback notification active.
3526 *
3527 * @param pVCpu The cross context virtual CPU structure.
3528 * @returns true if there the notification is active, false otherwise.
3529 */
3530VMMR0_INT_DECL(bool) VMMR0AssertionIsNotificationSet(PVMCPUCC pVCpu)
3531{
3532 return pVCpu->vmmr0.s.pfnAssertCallback != NULL;
3533}
3534
3535
3536/*
3537 * Jump back to ring-3 if we're the EMT and the longjmp is armed.
3538 *
3539 * @returns true if the breakpoint should be hit, false if it should be ignored.
3540 */
3541DECLEXPORT(bool) RTCALL RTAssertShouldPanic(void)
3542{
3543#if 0
3544 return true;
3545#else
3546 PVMCC pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
3547 if (pVM)
3548 {
3549 PVMCPUCC pVCpu = VMMGetCpu(pVM);
3550
3551 if (pVCpu)
3552 {
3553# ifdef RT_ARCH_X86
3554 if (pVCpu->vmmr0.s.AssertJmpBuf.eip)
3555# else
3556 if (pVCpu->vmmr0.s.AssertJmpBuf.rip)
3557# endif
3558 {
3559 if (pVCpu->vmmr0.s.pfnAssertCallback)
3560 pVCpu->vmmr0.s.pfnAssertCallback(pVCpu, pVCpu->vmmr0.s.pvAssertCallbackUser);
3561 int rc = vmmR0CallRing3LongJmp(&pVCpu->vmmr0.s.AssertJmpBuf, VERR_VMM_RING0_ASSERTION);
3562 return RT_FAILURE_NP(rc);
3563 }
3564 }
3565 }
3566# ifdef RT_OS_LINUX
3567 return true;
3568# else
3569 return false;
3570# endif
3571#endif
3572}
3573
3574
3575/*
3576 * Override this so we can push it up to ring-3.
3577 */
3578DECLEXPORT(void) RTCALL RTAssertMsg1Weak(const char *pszExpr, unsigned uLine, const char *pszFile, const char *pszFunction)
3579{
3580 /*
3581 * To host kernel log/whatever.
3582 */
3583 SUPR0Printf("!!R0-Assertion Failed!!\n"
3584 "Expression: %s\n"
3585 "Location : %s(%d) %s\n",
3586 pszExpr, pszFile, uLine, pszFunction);
3587
3588 /*
3589 * To the log.
3590 */
3591 LogAlways(("\n!!R0-Assertion Failed!!\n"
3592 "Expression: %s\n"
3593 "Location : %s(%d) %s\n",
3594 pszExpr, pszFile, uLine, pszFunction));
3595
3596 /*
3597 * To the global VMM buffer.
3598 */
3599 PVMCC pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
3600 if (pVM)
3601 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg1, sizeof(pVM->vmm.s.szRing0AssertMsg1),
3602 "\n!!R0-Assertion Failed!!\n"
3603 "Expression: %.*s\n"
3604 "Location : %s(%d) %s\n",
3605 sizeof(pVM->vmm.s.szRing0AssertMsg1) / 4 * 3, pszExpr,
3606 pszFile, uLine, pszFunction);
3607
3608 /*
3609 * Continue the normal way.
3610 */
3611 RTAssertMsg1(pszExpr, uLine, pszFile, pszFunction);
3612}
3613
3614
3615/**
3616 * Callback for RTLogFormatV which writes to the ring-3 log port.
3617 * See PFNLOGOUTPUT() for details.
3618 */
3619static DECLCALLBACK(size_t) rtLogOutput(void *pv, const char *pachChars, size_t cbChars)
3620{
3621 for (size_t i = 0; i < cbChars; i++)
3622 {
3623 LogAlways(("%c", pachChars[i])); NOREF(pachChars);
3624 }
3625
3626 NOREF(pv);
3627 return cbChars;
3628}
3629
3630
3631/*
3632 * Override this so we can push it up to ring-3.
3633 */
3634DECLEXPORT(void) RTCALL RTAssertMsg2WeakV(const char *pszFormat, va_list va)
3635{
3636 va_list vaCopy;
3637
3638 /*
3639 * Push the message to the loggers.
3640 */
3641 PRTLOGGER pLog = RTLogRelGetDefaultInstance();
3642 if (pLog)
3643 {
3644 va_copy(vaCopy, va);
3645 RTLogFormatV(rtLogOutput, pLog, pszFormat, vaCopy);
3646 va_end(vaCopy);
3647 }
3648 pLog = RTLogGetDefaultInstance(); /* Don't initialize it here... */
3649 if (pLog)
3650 {
3651 va_copy(vaCopy, va);
3652 RTLogFormatV(rtLogOutput, pLog, pszFormat, vaCopy);
3653 va_end(vaCopy);
3654 }
3655
3656 /*
3657 * Push it to the global VMM buffer.
3658 */
3659 PVMCC pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
3660 if (pVM)
3661 {
3662 va_copy(vaCopy, va);
3663 RTStrPrintfV(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2), pszFormat, vaCopy);
3664 va_end(vaCopy);
3665 }
3666
3667 /*
3668 * Continue the normal way.
3669 */
3670 RTAssertMsg2V(pszFormat, va);
3671}
3672
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette