VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/VMMR0.cpp@ 84909

Last change on this file since 84909 was 84458, checked in by vboxsync, 5 years ago

VMM/DBGF: First commit of new tracing facility, bugref:9210

The new DBGF tracing facility allows efficient capturing of events to a compact binary
trace log for later analysis. It is primarily intended for recording device/guest
interactions for now but can be extended easily for other types of events later on.
It supports capturing events happening in both R0 and R3 by using a shared ring buffer
to post events to. The events are processed by a dedicated I/O thread which writes
new events into the binary trace log file.

This is only the core VMM/DBGF part providing the API, the integration with PDM
comes in a separate commit.

Disabled by default for now because it is still work in progress,
enable with VBOX_WITH_DBGF_TRACING.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 97.3 KB
Line 
1/* $Id: VMMR0.cpp 84458 2020-05-22 12:51:49Z vboxsync $ */
2/** @file
3 * VMM - Host Context Ring 0.
4 */
5
6/*
7 * Copyright (C) 2006-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_VMM
23#include <VBox/vmm/vmm.h>
24#include <VBox/sup.h>
25#include <VBox/vmm/iom.h>
26#include <VBox/vmm/trpm.h>
27#include <VBox/vmm/cpum.h>
28#include <VBox/vmm/pdmapi.h>
29#include <VBox/vmm/pgm.h>
30#ifdef VBOX_WITH_NEM_R0
31# include <VBox/vmm/nem.h>
32#endif
33#include <VBox/vmm/em.h>
34#include <VBox/vmm/stam.h>
35#include <VBox/vmm/tm.h>
36#include "VMMInternal.h"
37#include <VBox/vmm/vmcc.h>
38#include <VBox/vmm/gvm.h>
39#ifdef VBOX_WITH_PCI_PASSTHROUGH
40# include <VBox/vmm/pdmpci.h>
41#endif
42#include <VBox/vmm/apic.h>
43
44#include <VBox/vmm/gvmm.h>
45#include <VBox/vmm/gmm.h>
46#include <VBox/vmm/gim.h>
47#include <VBox/intnet.h>
48#include <VBox/vmm/hm.h>
49#include <VBox/param.h>
50#include <VBox/err.h>
51#include <VBox/version.h>
52#include <VBox/log.h>
53
54#include <iprt/asm-amd64-x86.h>
55#include <iprt/assert.h>
56#include <iprt/crc.h>
57#include <iprt/mp.h>
58#include <iprt/once.h>
59#include <iprt/stdarg.h>
60#include <iprt/string.h>
61#include <iprt/thread.h>
62#include <iprt/timer.h>
63#include <iprt/time.h>
64
65#include "dtrace/VBoxVMM.h"
66
67
68#if defined(_MSC_VER) && defined(RT_ARCH_AMD64) /** @todo check this with with VC7! */
69# pragma intrinsic(_AddressOfReturnAddress)
70#endif
71
72#if defined(RT_OS_DARWIN) && ARCH_BITS == 32
73# error "32-bit darwin is no longer supported. Go back to 4.3 or earlier!"
74#endif
75
76
77
78/*********************************************************************************************************************************
79* Defined Constants And Macros *
80*********************************************************************************************************************************/
81/** @def VMM_CHECK_SMAP_SETUP
82 * SMAP check setup. */
83/** @def VMM_CHECK_SMAP_CHECK
84 * Checks that the AC flag is set if SMAP is enabled. If AC is not set,
85 * it will be logged and @a a_BadExpr is executed. */
86/** @def VMM_CHECK_SMAP_CHECK2
87 * Checks that the AC flag is set if SMAP is enabled. If AC is not set, it will
88 * be logged, written to the VMs assertion text buffer, and @a a_BadExpr is
89 * executed. */
90#if (defined(VBOX_STRICT) || 1) && !defined(VBOX_WITH_RAM_IN_KERNEL)
91# define VMM_CHECK_SMAP_SETUP() uint32_t const fKernelFeatures = SUPR0GetKernelFeatures()
92# define VMM_CHECK_SMAP_CHECK(a_BadExpr) \
93 do { \
94 if (fKernelFeatures & SUPKERNELFEATURES_SMAP) \
95 { \
96 RTCCUINTREG fEflCheck = ASMGetFlags(); \
97 if (RT_LIKELY(fEflCheck & X86_EFL_AC)) \
98 { /* likely */ } \
99 else \
100 { \
101 SUPR0Printf("%s, line %d: EFLAGS.AC is clear! (%#x)\n", __FUNCTION__, __LINE__, (uint32_t)fEflCheck); \
102 a_BadExpr; \
103 } \
104 } \
105 } while (0)
106# define VMM_CHECK_SMAP_CHECK2(a_pGVM, a_BadExpr) \
107 do { \
108 if (fKernelFeatures & SUPKERNELFEATURES_SMAP) \
109 { \
110 RTCCUINTREG fEflCheck = ASMGetFlags(); \
111 if (RT_LIKELY(fEflCheck & X86_EFL_AC)) \
112 { /* likely */ } \
113 else if (a_pGVM) \
114 { \
115 SUPR0BadContext((a_pGVM)->pSession, __FILE__, __LINE__, "EFLAGS.AC is zero!"); \
116 RTStrPrintf((a_pGVM)->vmm.s.szRing0AssertMsg1, sizeof((a_pGVM)->vmm.s.szRing0AssertMsg1), \
117 "%s, line %d: EFLAGS.AC is clear! (%#x)\n", __FUNCTION__, __LINE__, (uint32_t)fEflCheck); \
118 a_BadExpr; \
119 } \
120 else \
121 { \
122 SUPR0Printf("%s, line %d: EFLAGS.AC is clear! (%#x)\n", __FUNCTION__, __LINE__, (uint32_t)fEflCheck); \
123 a_BadExpr; \
124 } \
125 } \
126 } while (0)
127#else
128# define VMM_CHECK_SMAP_SETUP() uint32_t const fKernelFeatures = 0
129# define VMM_CHECK_SMAP_CHECK(a_BadExpr) NOREF(fKernelFeatures)
130# define VMM_CHECK_SMAP_CHECK2(a_pGVM, a_BadExpr) NOREF(fKernelFeatures)
131#endif
132
133
134/*********************************************************************************************************************************
135* Internal Functions *
136*********************************************************************************************************************************/
137RT_C_DECLS_BEGIN
138#if defined(RT_ARCH_X86) && (defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD))
139extern uint64_t __udivdi3(uint64_t, uint64_t);
140extern uint64_t __umoddi3(uint64_t, uint64_t);
141#endif
142RT_C_DECLS_END
143
144
145/*********************************************************************************************************************************
146* Global Variables *
147*********************************************************************************************************************************/
148/** Drag in necessary library bits.
149 * The runtime lives here (in VMMR0.r0) and VBoxDD*R0.r0 links against us. */
150PFNRT g_VMMR0Deps[] =
151{
152 (PFNRT)RTCrc32,
153 (PFNRT)RTOnce,
154#if defined(RT_ARCH_X86) && (defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD))
155 (PFNRT)__udivdi3,
156 (PFNRT)__umoddi3,
157#endif
158 NULL
159};
160
161#ifdef RT_OS_SOLARIS
162/* Dependency information for the native solaris loader. */
163extern "C" { char _depends_on[] = "vboxdrv"; }
164#endif
165
166
167/**
168 * Initialize the module.
169 * This is called when we're first loaded.
170 *
171 * @returns 0 on success.
172 * @returns VBox status on failure.
173 * @param hMod Image handle for use in APIs.
174 */
175DECLEXPORT(int) ModuleInit(void *hMod)
176{
177 VMM_CHECK_SMAP_SETUP();
178 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
179
180#ifdef VBOX_WITH_DTRACE_R0
181 /*
182 * The first thing to do is register the static tracepoints.
183 * (Deregistration is automatic.)
184 */
185 int rc2 = SUPR0TracerRegisterModule(hMod, &g_VTGObjHeader);
186 if (RT_FAILURE(rc2))
187 return rc2;
188#endif
189 LogFlow(("ModuleInit:\n"));
190
191#ifdef VBOX_WITH_64ON32_CMOS_DEBUG
192 /*
193 * Display the CMOS debug code.
194 */
195 ASMOutU8(0x72, 0x03);
196 uint8_t bDebugCode = ASMInU8(0x73);
197 LogRel(("CMOS Debug Code: %#x (%d)\n", bDebugCode, bDebugCode));
198 RTLogComPrintf("CMOS Debug Code: %#x (%d)\n", bDebugCode, bDebugCode);
199#endif
200
201 /*
202 * Initialize the VMM, GVMM, GMM, HM, PGM (Darwin) and INTNET.
203 */
204 int rc = vmmInitFormatTypes();
205 if (RT_SUCCESS(rc))
206 {
207 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
208 rc = GVMMR0Init();
209 if (RT_SUCCESS(rc))
210 {
211 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
212 rc = GMMR0Init();
213 if (RT_SUCCESS(rc))
214 {
215 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
216 rc = HMR0Init();
217 if (RT_SUCCESS(rc))
218 {
219 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
220
221 PDMR0Init(hMod);
222 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
223
224 rc = PGMRegisterStringFormatTypes();
225 if (RT_SUCCESS(rc))
226 {
227 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
228#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
229 rc = PGMR0DynMapInit();
230#endif
231 if (RT_SUCCESS(rc))
232 {
233 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
234 rc = IntNetR0Init();
235 if (RT_SUCCESS(rc))
236 {
237#ifdef VBOX_WITH_PCI_PASSTHROUGH
238 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
239 rc = PciRawR0Init();
240#endif
241 if (RT_SUCCESS(rc))
242 {
243 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
244 rc = CPUMR0ModuleInit();
245 if (RT_SUCCESS(rc))
246 {
247#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
248 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
249 rc = vmmR0TripleFaultHackInit();
250 if (RT_SUCCESS(rc))
251#endif
252 {
253 VMM_CHECK_SMAP_CHECK(rc = VERR_VMM_SMAP_BUT_AC_CLEAR);
254 if (RT_SUCCESS(rc))
255 {
256 LogFlow(("ModuleInit: returns success\n"));
257 return VINF_SUCCESS;
258 }
259 }
260
261 /*
262 * Bail out.
263 */
264#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
265 vmmR0TripleFaultHackTerm();
266#endif
267 }
268 else
269 LogRel(("ModuleInit: CPUMR0ModuleInit -> %Rrc\n", rc));
270#ifdef VBOX_WITH_PCI_PASSTHROUGH
271 PciRawR0Term();
272#endif
273 }
274 else
275 LogRel(("ModuleInit: PciRawR0Init -> %Rrc\n", rc));
276 IntNetR0Term();
277 }
278 else
279 LogRel(("ModuleInit: IntNetR0Init -> %Rrc\n", rc));
280#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
281 PGMR0DynMapTerm();
282#endif
283 }
284 else
285 LogRel(("ModuleInit: PGMR0DynMapInit -> %Rrc\n", rc));
286 PGMDeregisterStringFormatTypes();
287 }
288 else
289 LogRel(("ModuleInit: PGMRegisterStringFormatTypes -> %Rrc\n", rc));
290 HMR0Term();
291 }
292 else
293 LogRel(("ModuleInit: HMR0Init -> %Rrc\n", rc));
294 GMMR0Term();
295 }
296 else
297 LogRel(("ModuleInit: GMMR0Init -> %Rrc\n", rc));
298 GVMMR0Term();
299 }
300 else
301 LogRel(("ModuleInit: GVMMR0Init -> %Rrc\n", rc));
302 vmmTermFormatTypes();
303 }
304 else
305 LogRel(("ModuleInit: vmmInitFormatTypes -> %Rrc\n", rc));
306
307 LogFlow(("ModuleInit: failed %Rrc\n", rc));
308 return rc;
309}
310
311
312/**
313 * Terminate the module.
314 * This is called when we're finally unloaded.
315 *
316 * @param hMod Image handle for use in APIs.
317 */
318DECLEXPORT(void) ModuleTerm(void *hMod)
319{
320 NOREF(hMod);
321 LogFlow(("ModuleTerm:\n"));
322
323 /*
324 * Terminate the CPUM module (Local APIC cleanup).
325 */
326 CPUMR0ModuleTerm();
327
328 /*
329 * Terminate the internal network service.
330 */
331 IntNetR0Term();
332
333 /*
334 * PGM (Darwin), HM and PciRaw global cleanup.
335 */
336#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
337 PGMR0DynMapTerm();
338#endif
339#ifdef VBOX_WITH_PCI_PASSTHROUGH
340 PciRawR0Term();
341#endif
342 PGMDeregisterStringFormatTypes();
343 HMR0Term();
344#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
345 vmmR0TripleFaultHackTerm();
346#endif
347
348 /*
349 * Destroy the GMM and GVMM instances.
350 */
351 GMMR0Term();
352 GVMMR0Term();
353
354 vmmTermFormatTypes();
355
356 LogFlow(("ModuleTerm: returns\n"));
357}
358
359
360/**
361 * Initiates the R0 driver for a particular VM instance.
362 *
363 * @returns VBox status code.
364 *
365 * @param pGVM The global (ring-0) VM structure.
366 * @param uSvnRev The SVN revision of the ring-3 part.
367 * @param uBuildType Build type indicator.
368 * @thread EMT(0)
369 */
370static int vmmR0InitVM(PGVM pGVM, uint32_t uSvnRev, uint32_t uBuildType)
371{
372 VMM_CHECK_SMAP_SETUP();
373 VMM_CHECK_SMAP_CHECK(return VERR_VMM_SMAP_BUT_AC_CLEAR);
374
375 /*
376 * Match the SVN revisions and build type.
377 */
378 if (uSvnRev != VMMGetSvnRev())
379 {
380 LogRel(("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev()));
381 SUPR0Printf("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev());
382 return VERR_VMM_R0_VERSION_MISMATCH;
383 }
384 if (uBuildType != vmmGetBuildType())
385 {
386 LogRel(("VMMR0InitVM: Build type mismatch, r3=%#x r0=%#x\n", uBuildType, vmmGetBuildType()));
387 SUPR0Printf("VMMR0InitVM: Build type mismatch, r3=%#x r0=%#x\n", uBuildType, vmmGetBuildType());
388 return VERR_VMM_R0_VERSION_MISMATCH;
389 }
390
391 int rc = GVMMR0ValidateGVMandEMT(pGVM, 0 /*idCpu*/);
392 if (RT_FAILURE(rc))
393 return rc;
394
395#ifdef LOG_ENABLED
396 /*
397 * Register the EMT R0 logger instance for VCPU 0.
398 */
399 PVMCPUCC pVCpu = VMCC_GET_CPU_0(pGVM);
400
401 PVMMR0LOGGER pR0Logger = pVCpu->vmm.s.pR0LoggerR0;
402 if (pR0Logger)
403 {
404# if 0 /* testing of the logger. */
405 LogCom(("vmmR0InitVM: before %p\n", RTLogDefaultInstance()));
406 LogCom(("vmmR0InitVM: pfnFlush=%p actual=%p\n", pR0Logger->Logger.pfnFlush, vmmR0LoggerFlush));
407 LogCom(("vmmR0InitVM: pfnLogger=%p actual=%p\n", pR0Logger->Logger.pfnLogger, vmmR0LoggerWrapper));
408 LogCom(("vmmR0InitVM: offScratch=%d fFlags=%#x fDestFlags=%#x\n", pR0Logger->Logger.offScratch, pR0Logger->Logger.fFlags, pR0Logger->Logger.fDestFlags));
409
410 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pGVM->pSession);
411 LogCom(("vmmR0InitVM: after %p reg\n", RTLogDefaultInstance()));
412 RTLogSetDefaultInstanceThread(NULL, pGVM->pSession);
413 LogCom(("vmmR0InitVM: after %p dereg\n", RTLogDefaultInstance()));
414
415 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
416 LogCom(("vmmR0InitVM: returned successfully from direct logger call.\n"));
417 pR0Logger->Logger.pfnFlush(&pR0Logger->Logger);
418 LogCom(("vmmR0InitVM: returned successfully from direct flush call.\n"));
419
420 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pGVM->pSession);
421 LogCom(("vmmR0InitVM: after %p reg2\n", RTLogDefaultInstance()));
422 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
423 LogCom(("vmmR0InitVM: returned successfully from direct logger call (2). offScratch=%d\n", pR0Logger->Logger.offScratch));
424 RTLogSetDefaultInstanceThread(NULL, pGVM->pSession);
425 LogCom(("vmmR0InitVM: after %p dereg2\n", RTLogDefaultInstance()));
426
427 RTLogLoggerEx(&pR0Logger->Logger, 0, ~0U, "hello ring-0 logger (RTLogLoggerEx)\n");
428 LogCom(("vmmR0InitVM: RTLogLoggerEx returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
429
430 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pGVM->pSession);
431 RTLogPrintf("hello ring-0 logger (RTLogPrintf)\n");
432 LogCom(("vmmR0InitVM: RTLogPrintf returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
433# endif
434 Log(("Switching to per-thread logging instance %p (key=%p)\n", &pR0Logger->Logger, pGVM->pSession));
435 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pGVM->pSession);
436 pR0Logger->fRegistered = true;
437 }
438#endif /* LOG_ENABLED */
439SUPR0Printf("VMMR0InitVM: eflags=%x fKernelFeatures=%#x (SUPKERNELFEATURES_SMAP=%d)\n",
440 ASMGetFlags(), fKernelFeatures, RT_BOOL(fKernelFeatures & SUPKERNELFEATURES_SMAP));
441
442 /*
443 * Check if the host supports high resolution timers or not.
444 */
445 if ( pGVM->vmm.s.fUsePeriodicPreemptionTimers
446 && !RTTimerCanDoHighResolution())
447 pGVM->vmm.s.fUsePeriodicPreemptionTimers = false;
448
449 /*
450 * Initialize the per VM data for GVMM and GMM.
451 */
452 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
453 rc = GVMMR0InitVM(pGVM);
454 if (RT_SUCCESS(rc))
455 {
456 /*
457 * Init HM, CPUM and PGM (Darwin only).
458 */
459 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
460 rc = HMR0InitVM(pGVM);
461 if (RT_SUCCESS(rc))
462 VMM_CHECK_SMAP_CHECK2(pGVM, rc = VERR_VMM_RING0_ASSERTION); /* CPUR0InitVM will otherwise panic the host */
463 if (RT_SUCCESS(rc))
464 {
465 rc = CPUMR0InitVM(pGVM);
466 if (RT_SUCCESS(rc))
467 {
468 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
469 rc = PGMR0InitVM(pGVM);
470 if (RT_SUCCESS(rc))
471 {
472 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
473 rc = EMR0InitVM(pGVM);
474 if (RT_SUCCESS(rc))
475 {
476 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
477#ifdef VBOX_WITH_PCI_PASSTHROUGH
478 rc = PciRawR0InitVM(pGVM);
479#endif
480 if (RT_SUCCESS(rc))
481 {
482 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
483 rc = GIMR0InitVM(pGVM);
484 if (RT_SUCCESS(rc))
485 {
486 VMM_CHECK_SMAP_CHECK2(pGVM, rc = VERR_VMM_RING0_ASSERTION);
487 if (RT_SUCCESS(rc))
488 {
489 GVMMR0DoneInitVM(pGVM);
490
491 /*
492 * Collect a bit of info for the VM release log.
493 */
494 pGVM->vmm.s.fIsPreemptPendingApiTrusty = RTThreadPreemptIsPendingTrusty();
495 pGVM->vmm.s.fIsPreemptPossible = RTThreadPreemptIsPossible();;
496
497 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
498 return rc;
499 }
500
501 /* bail out*/
502 GIMR0TermVM(pGVM);
503 }
504#ifdef VBOX_WITH_PCI_PASSTHROUGH
505 PciRawR0TermVM(pGVM);
506#endif
507 }
508 }
509 }
510 }
511 HMR0TermVM(pGVM);
512 }
513 }
514
515 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pGVM->pSession);
516 return rc;
517}
518
519
520/**
521 * Does EMT specific VM initialization.
522 *
523 * @returns VBox status code.
524 * @param pGVM The ring-0 VM structure.
525 * @param idCpu The EMT that's calling.
526 */
527static int vmmR0InitVMEmt(PGVM pGVM, VMCPUID idCpu)
528{
529 /* Paranoia (caller checked these already). */
530 AssertReturn(idCpu < pGVM->cCpus, VERR_INVALID_CPU_ID);
531 AssertReturn(pGVM->aCpus[idCpu].hEMT == RTThreadNativeSelf(), VERR_INVALID_CPU_ID);
532
533#ifdef LOG_ENABLED
534 /*
535 * Registration of ring 0 loggers.
536 */
537 PVMCPUCC pVCpu = &pGVM->aCpus[idCpu];
538 PVMMR0LOGGER pR0Logger = pVCpu->vmm.s.pR0LoggerR0;
539 if ( pR0Logger
540 && !pR0Logger->fRegistered)
541 {
542 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pGVM->pSession);
543 pR0Logger->fRegistered = true;
544 }
545#endif
546
547 return VINF_SUCCESS;
548}
549
550
551
552/**
553 * Terminates the R0 bits for a particular VM instance.
554 *
555 * This is normally called by ring-3 as part of the VM termination process, but
556 * may alternatively be called during the support driver session cleanup when
557 * the VM object is destroyed (see GVMM).
558 *
559 * @returns VBox status code.
560 *
561 * @param pGVM The global (ring-0) VM structure.
562 * @param idCpu Set to 0 if EMT(0) or NIL_VMCPUID if session cleanup
563 * thread.
564 * @thread EMT(0) or session clean up thread.
565 */
566VMMR0_INT_DECL(int) VMMR0TermVM(PGVM pGVM, VMCPUID idCpu)
567{
568 /*
569 * Check EMT(0) claim if we're called from userland.
570 */
571 if (idCpu != NIL_VMCPUID)
572 {
573 AssertReturn(idCpu == 0, VERR_INVALID_CPU_ID);
574 int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu);
575 if (RT_FAILURE(rc))
576 return rc;
577 }
578
579#ifdef VBOX_WITH_PCI_PASSTHROUGH
580 PciRawR0TermVM(pGVM);
581#endif
582
583 /*
584 * Tell GVMM what we're up to and check that we only do this once.
585 */
586 if (GVMMR0DoingTermVM(pGVM))
587 {
588 GIMR0TermVM(pGVM);
589
590 /** @todo I wish to call PGMR0PhysFlushHandyPages(pGVM, &pGVM->aCpus[idCpu])
591 * here to make sure we don't leak any shared pages if we crash... */
592#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
593 PGMR0DynMapTermVM(pGVM);
594#endif
595 HMR0TermVM(pGVM);
596 }
597
598 /*
599 * Deregister the logger.
600 */
601 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pGVM->pSession);
602 return VINF_SUCCESS;
603}
604
605
606/**
607 * An interrupt or unhalt force flag is set, deal with it.
608 *
609 * @returns VINF_SUCCESS (or VINF_EM_HALT).
610 * @param pVCpu The cross context virtual CPU structure.
611 * @param uMWait Result from EMMonitorWaitIsActive().
612 * @param enmInterruptibility Guest CPU interruptbility level.
613 */
614static int vmmR0DoHaltInterrupt(PVMCPUCC pVCpu, unsigned uMWait, CPUMINTERRUPTIBILITY enmInterruptibility)
615{
616 Assert(!TRPMHasTrap(pVCpu));
617 Assert( enmInterruptibility > CPUMINTERRUPTIBILITY_INVALID
618 && enmInterruptibility < CPUMINTERRUPTIBILITY_END);
619
620 /*
621 * Pending interrupts w/o any SMIs or NMIs? That the usual case.
622 */
623 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
624 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_INTERRUPT_NMI))
625 {
626 if (enmInterruptibility <= CPUMINTERRUPTIBILITY_UNRESTRAINED)
627 {
628 uint8_t u8Interrupt = 0;
629 int rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
630 Log(("vmmR0DoHaltInterrupt: CPU%d u8Interrupt=%d (%#x) rc=%Rrc\n", pVCpu->idCpu, u8Interrupt, u8Interrupt, rc));
631 if (RT_SUCCESS(rc))
632 {
633 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
634
635 rc = TRPMAssertTrap(pVCpu, u8Interrupt, TRPM_HARDWARE_INT);
636 AssertRCSuccess(rc);
637 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltExec);
638 return rc;
639 }
640 }
641 }
642 /*
643 * SMI is not implemented yet, at least not here.
644 */
645 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_SMI))
646 {
647 return VINF_EM_HALT;
648 }
649 /*
650 * NMI.
651 */
652 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
653 {
654 if (enmInterruptibility < CPUMINTERRUPTIBILITY_NMI_INHIBIT)
655 {
656 /** @todo later. */
657 return VINF_EM_HALT;
658 }
659 }
660 /*
661 * Nested-guest virtual interrupt.
662 */
663 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST))
664 {
665 if (enmInterruptibility < CPUMINTERRUPTIBILITY_VIRT_INT_DISABLED)
666 {
667 /** @todo NSTVMX: NSTSVM: Remember, we might have to check and perform VM-exits
668 * here before injecting the virtual interrupt. See emR3ForcedActions
669 * for details. */
670 return VINF_EM_HALT;
671 }
672 }
673
674 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UNHALT))
675 {
676 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltExec);
677 return VINF_SUCCESS;
678 }
679 if (uMWait > 1)
680 {
681 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltExec);
682 return VINF_SUCCESS;
683 }
684
685 return VINF_EM_HALT;
686}
687
688
689/**
690 * This does one round of vmR3HaltGlobal1Halt().
691 *
692 * The rational here is that we'll reduce latency in interrupt situations if we
693 * don't go to ring-3 immediately on a VINF_EM_HALT (guest executed HLT or
694 * MWAIT), but do one round of blocking here instead and hope the interrupt is
695 * raised in the meanwhile.
696 *
697 * If we go to ring-3 we'll quit the inner HM/NEM loop in EM and end up in the
698 * outer loop, which will then call VMR3WaitHalted() and that in turn will do a
699 * ring-0 call (unless we're too close to a timer event). When the interrupt
700 * wakes us up, we'll return from ring-0 and EM will by instinct do a
701 * rescheduling (because of raw-mode) before it resumes the HM/NEM loop and gets
702 * back to VMMR0EntryFast().
703 *
704 * @returns VINF_SUCCESS or VINF_EM_HALT.
705 * @param pGVM The ring-0 VM structure.
706 * @param pGVCpu The ring-0 virtual CPU structure.
707 *
708 * @todo r=bird: All the blocking/waiting and EMT managment should move out of
709 * the VM module, probably to VMM. Then this would be more weird wrt
710 * parameters and statistics.
711 */
712static int vmmR0DoHalt(PGVM pGVM, PGVMCPU pGVCpu)
713{
714 /*
715 * Do spin stat historization.
716 */
717 if (++pGVCpu->vmm.s.cR0Halts & 0xff)
718 { /* likely */ }
719 else if (pGVCpu->vmm.s.cR0HaltsSucceeded > pGVCpu->vmm.s.cR0HaltsToRing3)
720 {
721 pGVCpu->vmm.s.cR0HaltsSucceeded = 2;
722 pGVCpu->vmm.s.cR0HaltsToRing3 = 0;
723 }
724 else
725 {
726 pGVCpu->vmm.s.cR0HaltsSucceeded = 0;
727 pGVCpu->vmm.s.cR0HaltsToRing3 = 2;
728 }
729
730 /*
731 * Flags that makes us go to ring-3.
732 */
733 uint32_t const fVmFFs = VM_FF_TM_VIRTUAL_SYNC | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA
734 | VM_FF_DBGF | VM_FF_REQUEST | VM_FF_CHECK_VM_STATE
735 | VM_FF_RESET | VM_FF_EMT_RENDEZVOUS | VM_FF_PGM_NEED_HANDY_PAGES
736 | VM_FF_PGM_NO_MEMORY | VM_FF_DEBUG_SUSPEND;
737 uint64_t const fCpuFFs = VMCPU_FF_TIMER | VMCPU_FF_PDM_CRITSECT | VMCPU_FF_IEM
738 | VMCPU_FF_REQUEST | VMCPU_FF_DBGF | VMCPU_FF_HM_UPDATE_CR3
739 | VMCPU_FF_HM_UPDATE_PAE_PDPES | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
740 | VMCPU_FF_TO_R3 | VMCPU_FF_IOM;
741
742 /*
743 * Check preconditions.
744 */
745 unsigned const uMWait = EMMonitorWaitIsActive(pGVCpu);
746 CPUMINTERRUPTIBILITY const enmInterruptibility = CPUMGetGuestInterruptibility(pGVCpu);
747 if ( pGVCpu->vmm.s.fMayHaltInRing0
748 && !TRPMHasTrap(pGVCpu)
749 && ( enmInterruptibility == CPUMINTERRUPTIBILITY_UNRESTRAINED
750 || uMWait > 1))
751 {
752 if ( !VM_FF_IS_ANY_SET(pGVM, fVmFFs)
753 && !VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs))
754 {
755 /*
756 * Interrupts pending already?
757 */
758 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC))
759 APICUpdatePendingInterrupts(pGVCpu);
760
761 /*
762 * Flags that wake up from the halted state.
763 */
764 uint64_t const fIntMask = VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_INTERRUPT_NESTED_GUEST
765 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT;
766
767 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask))
768 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility);
769 ASMNopPause();
770
771 /*
772 * Check out how long till the next timer event.
773 */
774 uint64_t u64Delta;
775 uint64_t u64GipTime = TMTimerPollGIP(pGVM, pGVCpu, &u64Delta);
776
777 if ( !VM_FF_IS_ANY_SET(pGVM, fVmFFs)
778 && !VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs))
779 {
780 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC))
781 APICUpdatePendingInterrupts(pGVCpu);
782
783 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask))
784 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility);
785
786 /*
787 * Wait if there is enough time to the next timer event.
788 */
789 if (u64Delta >= pGVCpu->vmm.s.cNsSpinBlockThreshold)
790 {
791 /* If there are few other CPU cores around, we will procrastinate a
792 little before going to sleep, hoping for some device raising an
793 interrupt or similar. Though, the best thing here would be to
794 dynamically adjust the spin count according to its usfulness or
795 something... */
796 if ( pGVCpu->vmm.s.cR0HaltsSucceeded > pGVCpu->vmm.s.cR0HaltsToRing3
797 && RTMpGetOnlineCount() >= 4)
798 {
799 /** @todo Figure out how we can skip this if it hasn't help recently...
800 * @bugref{9172#c12} */
801 uint32_t cSpinLoops = 42;
802 while (cSpinLoops-- > 0)
803 {
804 ASMNopPause();
805 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC))
806 APICUpdatePendingInterrupts(pGVCpu);
807 ASMNopPause();
808 if (VM_FF_IS_ANY_SET(pGVM, fVmFFs))
809 {
810 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3FromSpin);
811 return VINF_EM_HALT;
812 }
813 ASMNopPause();
814 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs))
815 {
816 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3FromSpin);
817 return VINF_EM_HALT;
818 }
819 ASMNopPause();
820 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask))
821 {
822 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltExecFromSpin);
823 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility);
824 }
825 ASMNopPause();
826 }
827 }
828
829 /* Block. We have to set the state to VMCPUSTATE_STARTED_HALTED here so ring-3
830 knows when to notify us (cannot access VMINTUSERPERVMCPU::fWait from here). */
831 VMCPU_CMPXCHG_STATE(pGVCpu, VMCPUSTATE_STARTED_HALTED, VMCPUSTATE_STARTED);
832 uint64_t const u64StartSchedHalt = RTTimeNanoTS();
833 int rc = GVMMR0SchedHalt(pGVM, pGVCpu, u64GipTime);
834 uint64_t const u64EndSchedHalt = RTTimeNanoTS();
835 uint64_t const cNsElapsedSchedHalt = u64EndSchedHalt - u64StartSchedHalt;
836 VMCPU_CMPXCHG_STATE(pGVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_HALTED);
837 STAM_REL_PROFILE_ADD_PERIOD(&pGVCpu->vmm.s.StatR0HaltBlock, cNsElapsedSchedHalt);
838 if ( rc == VINF_SUCCESS
839 || rc == VERR_INTERRUPTED)
840
841 {
842 /* Keep some stats like ring-3 does. */
843 int64_t const cNsOverslept = u64EndSchedHalt - u64GipTime;
844 if (cNsOverslept > 50000)
845 STAM_REL_PROFILE_ADD_PERIOD(&pGVCpu->vmm.s.StatR0HaltBlockOverslept, cNsOverslept);
846 else if (cNsOverslept < -50000)
847 STAM_REL_PROFILE_ADD_PERIOD(&pGVCpu->vmm.s.StatR0HaltBlockInsomnia, cNsElapsedSchedHalt);
848 else
849 STAM_REL_PROFILE_ADD_PERIOD(&pGVCpu->vmm.s.StatR0HaltBlockOnTime, cNsElapsedSchedHalt);
850
851 /*
852 * Recheck whether we can resume execution or have to go to ring-3.
853 */
854 if ( !VM_FF_IS_ANY_SET(pGVM, fVmFFs)
855 && !VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs))
856 {
857 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC))
858 APICUpdatePendingInterrupts(pGVCpu);
859 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask))
860 {
861 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltExecFromBlock);
862 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility);
863 }
864 }
865 }
866 }
867 }
868 }
869 }
870 return VINF_EM_HALT;
871}
872
873
874/**
875 * VMM ring-0 thread-context callback.
876 *
877 * This does common HM state updating and calls the HM-specific thread-context
878 * callback.
879 *
880 * @param enmEvent The thread-context event.
881 * @param pvUser Opaque pointer to the VMCPU.
882 *
883 * @thread EMT(pvUser)
884 */
885static DECLCALLBACK(void) vmmR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, void *pvUser)
886{
887 PVMCPUCC pVCpu = (PVMCPUCC)pvUser;
888
889 switch (enmEvent)
890 {
891 case RTTHREADCTXEVENT_IN:
892 {
893 /*
894 * Linux may call us with preemption enabled (really!) but technically we
895 * cannot get preempted here, otherwise we end up in an infinite recursion
896 * scenario (i.e. preempted in resume hook -> preempt hook -> resume hook...
897 * ad infinitum). Let's just disable preemption for now...
898 */
899 /** @todo r=bird: I don't believe the above. The linux code is clearly enabling
900 * preemption after doing the callout (one or two functions up the
901 * call chain). */
902 /** @todo r=ramshankar: See @bugref{5313#c30}. */
903 RTTHREADPREEMPTSTATE ParanoidPreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
904 RTThreadPreemptDisable(&ParanoidPreemptState);
905
906 /* We need to update the VCPU <-> host CPU mapping. */
907 RTCPUID idHostCpu;
908 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
909 pVCpu->iHostCpuSet = iHostCpuSet;
910 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
911
912 /* In the very unlikely event that the GIP delta for the CPU we're
913 rescheduled needs calculating, try force a return to ring-3.
914 We unfortunately cannot do the measurements right here. */
915 if (RT_UNLIKELY(SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
916 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
917
918 /* Invoke the HM-specific thread-context callback. */
919 HMR0ThreadCtxCallback(enmEvent, pvUser);
920
921 /* Restore preemption. */
922 RTThreadPreemptRestore(&ParanoidPreemptState);
923 break;
924 }
925
926 case RTTHREADCTXEVENT_OUT:
927 {
928 /* Invoke the HM-specific thread-context callback. */
929 HMR0ThreadCtxCallback(enmEvent, pvUser);
930
931 /*
932 * Sigh. See VMMGetCpu() used by VMCPU_ASSERT_EMT(). We cannot let several VCPUs
933 * have the same host CPU associated with it.
934 */
935 pVCpu->iHostCpuSet = UINT32_MAX;
936 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
937 break;
938 }
939
940 default:
941 /* Invoke the HM-specific thread-context callback. */
942 HMR0ThreadCtxCallback(enmEvent, pvUser);
943 break;
944 }
945}
946
947
948/**
949 * Creates thread switching hook for the current EMT thread.
950 *
951 * This is called by GVMMR0CreateVM and GVMMR0RegisterVCpu. If the host
952 * platform does not implement switcher hooks, no hooks will be create and the
953 * member set to NIL_RTTHREADCTXHOOK.
954 *
955 * @returns VBox status code.
956 * @param pVCpu The cross context virtual CPU structure.
957 * @thread EMT(pVCpu)
958 */
959VMMR0_INT_DECL(int) VMMR0ThreadCtxHookCreateForEmt(PVMCPUCC pVCpu)
960{
961 VMCPU_ASSERT_EMT(pVCpu);
962 Assert(pVCpu->vmm.s.hCtxHook == NIL_RTTHREADCTXHOOK);
963
964#if 1 /* To disable this stuff change to zero. */
965 int rc = RTThreadCtxHookCreate(&pVCpu->vmm.s.hCtxHook, 0, vmmR0ThreadCtxCallback, pVCpu);
966 if (RT_SUCCESS(rc))
967 return rc;
968#else
969 RT_NOREF(vmmR0ThreadCtxCallback);
970 int rc = VERR_NOT_SUPPORTED;
971#endif
972
973 pVCpu->vmm.s.hCtxHook = NIL_RTTHREADCTXHOOK;
974 if (rc == VERR_NOT_SUPPORTED)
975 return VINF_SUCCESS;
976
977 LogRelMax(32, ("RTThreadCtxHookCreate failed! rc=%Rrc pVCpu=%p idCpu=%RU32\n", rc, pVCpu, pVCpu->idCpu));
978 return VINF_SUCCESS; /* Just ignore it, we can live without context hooks. */
979}
980
981
982/**
983 * Destroys the thread switching hook for the specified VCPU.
984 *
985 * @param pVCpu The cross context virtual CPU structure.
986 * @remarks Can be called from any thread.
987 */
988VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDestroyForEmt(PVMCPUCC pVCpu)
989{
990 int rc = RTThreadCtxHookDestroy(pVCpu->vmm.s.hCtxHook);
991 AssertRC(rc);
992 pVCpu->vmm.s.hCtxHook = NIL_RTTHREADCTXHOOK;
993}
994
995
996/**
997 * Disables the thread switching hook for this VCPU (if we got one).
998 *
999 * @param pVCpu The cross context virtual CPU structure.
1000 * @thread EMT(pVCpu)
1001 *
1002 * @remarks This also clears VMCPU::idHostCpu, so the mapping is invalid after
1003 * this call. This means you have to be careful with what you do!
1004 */
1005VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDisable(PVMCPUCC pVCpu)
1006{
1007 /*
1008 * Clear the VCPU <-> host CPU mapping as we've left HM context.
1009 * @bugref{7726#c19} explains the need for this trick:
1010 *
1011 * VMXR0CallRing3Callback/SVMR0CallRing3Callback &
1012 * hmR0VmxLeaveSession/hmR0SvmLeaveSession disables context hooks during
1013 * longjmp & normal return to ring-3, which opens a window where we may be
1014 * rescheduled without changing VMCPUID::idHostCpu and cause confusion if
1015 * the CPU starts executing a different EMT. Both functions first disables
1016 * preemption and then calls HMR0LeaveCpu which invalids idHostCpu, leaving
1017 * an opening for getting preempted.
1018 */
1019 /** @todo Make HM not need this API! Then we could leave the hooks enabled
1020 * all the time. */
1021 /** @todo move this into the context hook disabling if(). */
1022 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1023
1024 /*
1025 * Disable the context hook, if we got one.
1026 */
1027 if (pVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1028 {
1029 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1030 int rc = RTThreadCtxHookDisable(pVCpu->vmm.s.hCtxHook);
1031 AssertRC(rc);
1032 }
1033}
1034
1035
1036/**
1037 * Internal version of VMMR0ThreadCtxHooksAreRegistered.
1038 *
1039 * @returns true if registered, false otherwise.
1040 * @param pVCpu The cross context virtual CPU structure.
1041 */
1042DECLINLINE(bool) vmmR0ThreadCtxHookIsEnabled(PVMCPUCC pVCpu)
1043{
1044 return RTThreadCtxHookIsEnabled(pVCpu->vmm.s.hCtxHook);
1045}
1046
1047
1048/**
1049 * Whether thread-context hooks are registered for this VCPU.
1050 *
1051 * @returns true if registered, false otherwise.
1052 * @param pVCpu The cross context virtual CPU structure.
1053 */
1054VMMR0_INT_DECL(bool) VMMR0ThreadCtxHookIsEnabled(PVMCPUCC pVCpu)
1055{
1056 return vmmR0ThreadCtxHookIsEnabled(pVCpu);
1057}
1058
1059
1060#ifdef VBOX_WITH_STATISTICS
1061/**
1062 * Record return code statistics
1063 * @param pVM The cross context VM structure.
1064 * @param pVCpu The cross context virtual CPU structure.
1065 * @param rc The status code.
1066 */
1067static void vmmR0RecordRC(PVMCC pVM, PVMCPUCC pVCpu, int rc)
1068{
1069 /*
1070 * Collect statistics.
1071 */
1072 switch (rc)
1073 {
1074 case VINF_SUCCESS:
1075 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetNormal);
1076 break;
1077 case VINF_EM_RAW_INTERRUPT:
1078 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterrupt);
1079 break;
1080 case VINF_EM_RAW_INTERRUPT_HYPER:
1081 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptHyper);
1082 break;
1083 case VINF_EM_RAW_GUEST_TRAP:
1084 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGuestTrap);
1085 break;
1086 case VINF_EM_RAW_RING_SWITCH:
1087 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitch);
1088 break;
1089 case VINF_EM_RAW_RING_SWITCH_INT:
1090 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitchInt);
1091 break;
1092 case VINF_EM_RAW_STALE_SELECTOR:
1093 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetStaleSelector);
1094 break;
1095 case VINF_EM_RAW_IRET_TRAP:
1096 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIRETTrap);
1097 break;
1098 case VINF_IOM_R3_IOPORT_READ:
1099 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIORead);
1100 break;
1101 case VINF_IOM_R3_IOPORT_WRITE:
1102 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOWrite);
1103 break;
1104 case VINF_IOM_R3_IOPORT_COMMIT_WRITE:
1105 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOCommitWrite);
1106 break;
1107 case VINF_IOM_R3_MMIO_READ:
1108 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIORead);
1109 break;
1110 case VINF_IOM_R3_MMIO_WRITE:
1111 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOWrite);
1112 break;
1113 case VINF_IOM_R3_MMIO_COMMIT_WRITE:
1114 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOCommitWrite);
1115 break;
1116 case VINF_IOM_R3_MMIO_READ_WRITE:
1117 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOReadWrite);
1118 break;
1119 case VINF_PATM_HC_MMIO_PATCH_READ:
1120 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchRead);
1121 break;
1122 case VINF_PATM_HC_MMIO_PATCH_WRITE:
1123 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchWrite);
1124 break;
1125 case VINF_CPUM_R3_MSR_READ:
1126 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMSRRead);
1127 break;
1128 case VINF_CPUM_R3_MSR_WRITE:
1129 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMSRWrite);
1130 break;
1131 case VINF_EM_RAW_EMULATE_INSTR:
1132 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetEmulate);
1133 break;
1134 case VINF_PATCH_EMULATE_INSTR:
1135 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchEmulate);
1136 break;
1137 case VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT:
1138 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetLDTFault);
1139 break;
1140 case VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT:
1141 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGDTFault);
1142 break;
1143 case VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT:
1144 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIDTFault);
1145 break;
1146 case VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT:
1147 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTSSFault);
1148 break;
1149 case VINF_CSAM_PENDING_ACTION:
1150 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCSAMTask);
1151 break;
1152 case VINF_PGM_SYNC_CR3:
1153 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetSyncCR3);
1154 break;
1155 case VINF_PATM_PATCH_INT3:
1156 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchInt3);
1157 break;
1158 case VINF_PATM_PATCH_TRAP_PF:
1159 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchPF);
1160 break;
1161 case VINF_PATM_PATCH_TRAP_GP:
1162 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchGP);
1163 break;
1164 case VINF_PATM_PENDING_IRQ_AFTER_IRET:
1165 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchIretIRQ);
1166 break;
1167 case VINF_EM_RESCHEDULE_REM:
1168 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRescheduleREM);
1169 break;
1170 case VINF_EM_RAW_TO_R3:
1171 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Total);
1172 if (VM_FF_IS_SET(pVM, VM_FF_TM_VIRTUAL_SYNC))
1173 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3TMVirt);
1174 else if (VM_FF_IS_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES))
1175 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3HandyPages);
1176 else if (VM_FF_IS_SET(pVM, VM_FF_PDM_QUEUES))
1177 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3PDMQueues);
1178 else if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
1179 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Rendezvous);
1180 else if (VM_FF_IS_SET(pVM, VM_FF_PDM_DMA))
1181 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3DMA);
1182 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TIMER))
1183 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Timer);
1184 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PDM_CRITSECT))
1185 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3CritSect);
1186 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TO_R3))
1187 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3FF);
1188 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
1189 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Iem);
1190 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IOM))
1191 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Iom);
1192 else
1193 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Unknown);
1194 break;
1195
1196 case VINF_EM_RAW_TIMER_PENDING:
1197 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTimerPending);
1198 break;
1199 case VINF_EM_RAW_INTERRUPT_PENDING:
1200 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptPending);
1201 break;
1202 case VINF_VMM_CALL_HOST:
1203 switch (pVCpu->vmm.s.enmCallRing3Operation)
1204 {
1205 case VMMCALLRING3_PDM_CRIT_SECT_ENTER:
1206 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPDMCritSectEnter);
1207 break;
1208 case VMMCALLRING3_PDM_LOCK:
1209 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPDMLock);
1210 break;
1211 case VMMCALLRING3_PGM_POOL_GROW:
1212 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMPoolGrow);
1213 break;
1214 case VMMCALLRING3_PGM_LOCK:
1215 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMLock);
1216 break;
1217 case VMMCALLRING3_PGM_MAP_CHUNK:
1218 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMMapChunk);
1219 break;
1220 case VMMCALLRING3_PGM_ALLOCATE_HANDY_PAGES:
1221 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMAllocHandy);
1222 break;
1223 case VMMCALLRING3_VMM_LOGGER_FLUSH:
1224 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallLogFlush);
1225 break;
1226 case VMMCALLRING3_VM_SET_ERROR:
1227 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallVMSetError);
1228 break;
1229 case VMMCALLRING3_VM_SET_RUNTIME_ERROR:
1230 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallVMSetRuntimeError);
1231 break;
1232 case VMMCALLRING3_VM_R0_ASSERTION:
1233 default:
1234 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCallRing3);
1235 break;
1236 }
1237 break;
1238 case VINF_PATM_DUPLICATE_FUNCTION:
1239 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPATMDuplicateFn);
1240 break;
1241 case VINF_PGM_CHANGE_MODE:
1242 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMChangeMode);
1243 break;
1244 case VINF_PGM_POOL_FLUSH_PENDING:
1245 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMFlushPending);
1246 break;
1247 case VINF_EM_PENDING_REQUEST:
1248 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPendingRequest);
1249 break;
1250 case VINF_EM_HM_PATCH_TPR_INSTR:
1251 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchTPR);
1252 break;
1253 default:
1254 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMisc);
1255 break;
1256 }
1257}
1258#endif /* VBOX_WITH_STATISTICS */
1259
1260
1261/**
1262 * The Ring 0 entry point, called by the fast-ioctl path.
1263 *
1264 * @param pGVM The global (ring-0) VM structure.
1265 * @param pVMIgnored The cross context VM structure. The return code is
1266 * stored in pVM->vmm.s.iLastGZRc.
1267 * @param idCpu The Virtual CPU ID of the calling EMT.
1268 * @param enmOperation Which operation to execute.
1269 * @remarks Assume called with interrupts _enabled_.
1270 */
1271VMMR0DECL(void) VMMR0EntryFast(PGVM pGVM, PVMCC pVMIgnored, VMCPUID idCpu, VMMR0OPERATION enmOperation)
1272{
1273 RT_NOREF(pVMIgnored);
1274
1275 /*
1276 * Validation.
1277 */
1278 if ( idCpu < pGVM->cCpus
1279 && pGVM->cCpus == pGVM->cCpusUnsafe)
1280 { /*likely*/ }
1281 else
1282 {
1283 SUPR0Printf("VMMR0EntryFast: Bad idCpu=%#x cCpus=%#x cCpusUnsafe=%#x\n", idCpu, pGVM->cCpus, pGVM->cCpusUnsafe);
1284 return;
1285 }
1286
1287 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
1288 RTNATIVETHREAD const hNativeThread = RTThreadNativeSelf();
1289 if (RT_LIKELY( pGVCpu->hEMT == hNativeThread
1290 && pGVCpu->hNativeThreadR0 == hNativeThread))
1291 { /* likely */ }
1292 else
1293 {
1294 SUPR0Printf("VMMR0EntryFast: Bad thread idCpu=%#x hNativeSelf=%p pGVCpu->hEmt=%p pGVCpu->hNativeThreadR0=%p\n",
1295 idCpu, hNativeThread, pGVCpu->hEMT, pGVCpu->hNativeThreadR0);
1296 return;
1297 }
1298
1299 /*
1300 * SMAP fun.
1301 */
1302 VMM_CHECK_SMAP_SETUP();
1303 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1304
1305 /*
1306 * Perform requested operation.
1307 */
1308 switch (enmOperation)
1309 {
1310 /*
1311 * Run guest code using the available hardware acceleration technology.
1312 */
1313 case VMMR0_DO_HM_RUN:
1314 {
1315 for (;;) /* hlt loop */
1316 {
1317 /*
1318 * Disable preemption.
1319 */
1320 Assert(!vmmR0ThreadCtxHookIsEnabled(pGVCpu));
1321 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
1322 RTThreadPreemptDisable(&PreemptState);
1323
1324 /*
1325 * Get the host CPU identifiers, make sure they are valid and that
1326 * we've got a TSC delta for the CPU.
1327 */
1328 RTCPUID idHostCpu;
1329 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
1330 if (RT_LIKELY( iHostCpuSet < RTCPUSET_MAX_CPUS
1331 && SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
1332 {
1333 pGVCpu->iHostCpuSet = iHostCpuSet;
1334 ASMAtomicWriteU32(&pGVCpu->idHostCpu, idHostCpu);
1335
1336 /*
1337 * Update the periodic preemption timer if it's active.
1338 */
1339 if (pGVM->vmm.s.fUsePeriodicPreemptionTimers)
1340 GVMMR0SchedUpdatePeriodicPreemptionTimer(pGVM, pGVCpu->idHostCpu, TMCalcHostTimerFrequency(pGVM, pGVCpu));
1341 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1342
1343#ifdef VMM_R0_TOUCH_FPU
1344 /*
1345 * Make sure we've got the FPU state loaded so and we don't need to clear
1346 * CR0.TS and get out of sync with the host kernel when loading the guest
1347 * FPU state. @ref sec_cpum_fpu (CPUM.cpp) and @bugref{4053}.
1348 */
1349 CPUMR0TouchHostFpu();
1350#endif
1351 int rc;
1352 bool fPreemptRestored = false;
1353 if (!HMR0SuspendPending())
1354 {
1355 /*
1356 * Enable the context switching hook.
1357 */
1358 if (pGVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1359 {
1360 Assert(!RTThreadCtxHookIsEnabled(pGVCpu->vmm.s.hCtxHook));
1361 int rc2 = RTThreadCtxHookEnable(pGVCpu->vmm.s.hCtxHook); AssertRC(rc2);
1362 }
1363
1364 /*
1365 * Enter HM context.
1366 */
1367 rc = HMR0Enter(pGVCpu);
1368 if (RT_SUCCESS(rc))
1369 {
1370 VMCPU_SET_STATE(pGVCpu, VMCPUSTATE_STARTED_HM);
1371
1372 /*
1373 * When preemption hooks are in place, enable preemption now that
1374 * we're in HM context.
1375 */
1376 if (vmmR0ThreadCtxHookIsEnabled(pGVCpu))
1377 {
1378 fPreemptRestored = true;
1379 RTThreadPreemptRestore(&PreemptState);
1380 }
1381
1382 /*
1383 * Setup the longjmp machinery and execute guest code (calls HMR0RunGuestCode).
1384 */
1385 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1386 rc = vmmR0CallRing3SetJmp(&pGVCpu->vmm.s.CallRing3JmpBufR0, HMR0RunGuestCode, pGVM, pGVCpu);
1387 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1388
1389 /*
1390 * Assert sanity on the way out. Using manual assertions code here as normal
1391 * assertions are going to panic the host since we're outside the setjmp/longjmp zone.
1392 */
1393 if (RT_UNLIKELY( VMCPU_GET_STATE(pGVCpu) != VMCPUSTATE_STARTED_HM
1394 && RT_SUCCESS_NP(rc) && rc != VINF_VMM_CALL_HOST ))
1395 {
1396 pGVM->vmm.s.szRing0AssertMsg1[0] = '\0';
1397 RTStrPrintf(pGVM->vmm.s.szRing0AssertMsg2, sizeof(pGVM->vmm.s.szRing0AssertMsg2),
1398 "Got VMCPU state %d expected %d.\n", VMCPU_GET_STATE(pGVCpu), VMCPUSTATE_STARTED_HM);
1399 rc = VERR_VMM_WRONG_HM_VMCPU_STATE;
1400 }
1401 /** @todo Get rid of this. HM shouldn't disable the context hook. */
1402 else if (RT_UNLIKELY(vmmR0ThreadCtxHookIsEnabled(pGVCpu)))
1403 {
1404 pGVM->vmm.s.szRing0AssertMsg1[0] = '\0';
1405 RTStrPrintf(pGVM->vmm.s.szRing0AssertMsg2, sizeof(pGVM->vmm.s.szRing0AssertMsg2),
1406 "Thread-context hooks still enabled! VCPU=%p Id=%u rc=%d.\n", pGVCpu, pGVCpu->idCpu, rc);
1407 rc = VERR_INVALID_STATE;
1408 }
1409
1410 VMCPU_SET_STATE(pGVCpu, VMCPUSTATE_STARTED);
1411 }
1412 STAM_COUNTER_INC(&pGVM->vmm.s.StatRunGC);
1413
1414 /*
1415 * Invalidate the host CPU identifiers before we disable the context
1416 * hook / restore preemption.
1417 */
1418 pGVCpu->iHostCpuSet = UINT32_MAX;
1419 ASMAtomicWriteU32(&pGVCpu->idHostCpu, NIL_RTCPUID);
1420
1421 /*
1422 * Disable context hooks. Due to unresolved cleanup issues, we
1423 * cannot leave the hooks enabled when we return to ring-3.
1424 *
1425 * Note! At the moment HM may also have disabled the hook
1426 * when we get here, but the IPRT API handles that.
1427 */
1428 if (pGVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1429 {
1430 ASMAtomicWriteU32(&pGVCpu->idHostCpu, NIL_RTCPUID);
1431 RTThreadCtxHookDisable(pGVCpu->vmm.s.hCtxHook);
1432 }
1433 }
1434 /*
1435 * The system is about to go into suspend mode; go back to ring 3.
1436 */
1437 else
1438 {
1439 rc = VINF_EM_RAW_INTERRUPT;
1440 pGVCpu->iHostCpuSet = UINT32_MAX;
1441 ASMAtomicWriteU32(&pGVCpu->idHostCpu, NIL_RTCPUID);
1442 }
1443
1444 /** @todo When HM stops messing with the context hook state, we'll disable
1445 * preemption again before the RTThreadCtxHookDisable call. */
1446 if (!fPreemptRestored)
1447 RTThreadPreemptRestore(&PreemptState);
1448
1449 pGVCpu->vmm.s.iLastGZRc = rc;
1450
1451 /* Fire dtrace probe and collect statistics. */
1452 VBOXVMM_R0_VMM_RETURN_TO_RING3_HM(pGVCpu, CPUMQueryGuestCtxPtr(pGVCpu), rc);
1453#ifdef VBOX_WITH_STATISTICS
1454 vmmR0RecordRC(pGVM, pGVCpu, rc);
1455#endif
1456#if 1
1457 /*
1458 * If this is a halt.
1459 */
1460 if (rc != VINF_EM_HALT)
1461 { /* we're not in a hurry for a HLT, so prefer this path */ }
1462 else
1463 {
1464 pGVCpu->vmm.s.iLastGZRc = rc = vmmR0DoHalt(pGVM, pGVCpu);
1465 if (rc == VINF_SUCCESS)
1466 {
1467 pGVCpu->vmm.s.cR0HaltsSucceeded++;
1468 continue;
1469 }
1470 pGVCpu->vmm.s.cR0HaltsToRing3++;
1471 }
1472#endif
1473 }
1474 /*
1475 * Invalid CPU set index or TSC delta in need of measuring.
1476 */
1477 else
1478 {
1479 pGVCpu->iHostCpuSet = UINT32_MAX;
1480 ASMAtomicWriteU32(&pGVCpu->idHostCpu, NIL_RTCPUID);
1481 RTThreadPreemptRestore(&PreemptState);
1482 if (iHostCpuSet < RTCPUSET_MAX_CPUS)
1483 {
1484 int rc = SUPR0TscDeltaMeasureBySetIndex(pGVM->pSession, iHostCpuSet, 0 /*fFlags*/,
1485 2 /*cMsWaitRetry*/, 5*RT_MS_1SEC /*cMsWaitThread*/,
1486 0 /*default cTries*/);
1487 if (RT_SUCCESS(rc) || rc == VERR_CPU_OFFLINE)
1488 pGVCpu->vmm.s.iLastGZRc = VINF_EM_RAW_TO_R3;
1489 else
1490 pGVCpu->vmm.s.iLastGZRc = rc;
1491 }
1492 else
1493 pGVCpu->vmm.s.iLastGZRc = VERR_INVALID_CPU_INDEX;
1494 }
1495 break;
1496
1497 } /* halt loop. */
1498 break;
1499 }
1500
1501#ifdef VBOX_WITH_NEM_R0
1502# if defined(RT_ARCH_AMD64) && defined(RT_OS_WINDOWS)
1503 case VMMR0_DO_NEM_RUN:
1504 {
1505 /*
1506 * Setup the longjmp machinery and execute guest code (calls NEMR0RunGuestCode).
1507 */
1508 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1509# ifdef VBOXSTRICTRC_STRICT_ENABLED
1510 int rc = vmmR0CallRing3SetJmp2(&pGVCpu->vmm.s.CallRing3JmpBufR0, (PFNVMMR0SETJMP2)NEMR0RunGuestCode, pGVM, idCpu);
1511# else
1512 int rc = vmmR0CallRing3SetJmp2(&pGVCpu->vmm.s.CallRing3JmpBufR0, NEMR0RunGuestCode, pGVM, idCpu);
1513# endif
1514 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1515 STAM_COUNTER_INC(&pGVM->vmm.s.StatRunGC);
1516
1517 pGVCpu->vmm.s.iLastGZRc = rc;
1518
1519 /*
1520 * Fire dtrace probe and collect statistics.
1521 */
1522 VBOXVMM_R0_VMM_RETURN_TO_RING3_NEM(pGVCpu, CPUMQueryGuestCtxPtr(pGVCpu), rc);
1523# ifdef VBOX_WITH_STATISTICS
1524 vmmR0RecordRC(pGVM, pGVCpu, rc);
1525# endif
1526 break;
1527 }
1528# endif
1529#endif
1530
1531 /*
1532 * For profiling.
1533 */
1534 case VMMR0_DO_NOP:
1535 pGVCpu->vmm.s.iLastGZRc = VINF_SUCCESS;
1536 break;
1537
1538 /*
1539 * Shouldn't happen.
1540 */
1541 default:
1542 AssertMsgFailed(("%#x\n", enmOperation));
1543 pGVCpu->vmm.s.iLastGZRc = VERR_NOT_SUPPORTED;
1544 break;
1545 }
1546 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1547}
1548
1549
1550/**
1551 * Validates a session or VM session argument.
1552 *
1553 * @returns true / false accordingly.
1554 * @param pGVM The global (ring-0) VM structure.
1555 * @param pClaimedSession The session claim to validate.
1556 * @param pSession The session argument.
1557 */
1558DECLINLINE(bool) vmmR0IsValidSession(PGVM pGVM, PSUPDRVSESSION pClaimedSession, PSUPDRVSESSION pSession)
1559{
1560 /* This must be set! */
1561 if (!pSession)
1562 return false;
1563
1564 /* Only one out of the two. */
1565 if (pGVM && pClaimedSession)
1566 return false;
1567 if (pGVM)
1568 pClaimedSession = pGVM->pSession;
1569 return pClaimedSession == pSession;
1570}
1571
1572
1573/**
1574 * VMMR0EntryEx worker function, either called directly or when ever possible
1575 * called thru a longjmp so we can exit safely on failure.
1576 *
1577 * @returns VBox status code.
1578 * @param pGVM The global (ring-0) VM structure.
1579 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
1580 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
1581 * @param enmOperation Which operation to execute.
1582 * @param pReqHdr This points to a SUPVMMR0REQHDR packet. Optional.
1583 * The support driver validates this if it's present.
1584 * @param u64Arg Some simple constant argument.
1585 * @param pSession The session of the caller.
1586 *
1587 * @remarks Assume called with interrupts _enabled_.
1588 */
1589static int vmmR0EntryExWorker(PGVM pGVM, VMCPUID idCpu, VMMR0OPERATION enmOperation,
1590 PSUPVMMR0REQHDR pReqHdr, uint64_t u64Arg, PSUPDRVSESSION pSession)
1591{
1592 /*
1593 * Validate pGVM and idCpu for consistency and validity.
1594 */
1595 if (pGVM != NULL)
1596 {
1597 if (RT_LIKELY(((uintptr_t)pGVM & PAGE_OFFSET_MASK) == 0))
1598 { /* likely */ }
1599 else
1600 {
1601 SUPR0Printf("vmmR0EntryExWorker: Invalid pGVM=%p! (op=%d)\n", pGVM, enmOperation);
1602 return VERR_INVALID_POINTER;
1603 }
1604
1605 if (RT_LIKELY(idCpu == NIL_VMCPUID || idCpu < pGVM->cCpus))
1606 { /* likely */ }
1607 else
1608 {
1609 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu %#x (cCpus=%#x)\n", idCpu, pGVM->cCpus);
1610 return VERR_INVALID_PARAMETER;
1611 }
1612
1613 if (RT_LIKELY( pGVM->enmVMState >= VMSTATE_CREATING
1614 && pGVM->enmVMState <= VMSTATE_TERMINATED
1615 && pGVM->pSession == pSession
1616 && pGVM->pSelf == pGVM))
1617 { /* likely */ }
1618 else
1619 {
1620 SUPR0Printf("vmmR0EntryExWorker: Invalid pGVM=%p:{.enmVMState=%d, .cCpus=%#x, .pSession=%p(==%p), .pSelf=%p(==%p)}! (op=%d)\n",
1621 pGVM, pGVM->enmVMState, pGVM->cCpus, pGVM->pSession, pSession, pGVM->pSelf, pGVM, enmOperation);
1622 return VERR_INVALID_POINTER;
1623 }
1624 }
1625 else if (RT_LIKELY(idCpu == NIL_VMCPUID))
1626 { /* likely */ }
1627 else
1628 {
1629 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu=%u\n", idCpu);
1630 return VERR_INVALID_PARAMETER;
1631 }
1632
1633 /*
1634 * SMAP fun.
1635 */
1636 VMM_CHECK_SMAP_SETUP();
1637 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
1638
1639 /*
1640 * Process the request.
1641 */
1642 int rc;
1643 switch (enmOperation)
1644 {
1645 /*
1646 * GVM requests
1647 */
1648 case VMMR0_DO_GVMM_CREATE_VM:
1649 if (pGVM == NULL && u64Arg == 0 && idCpu == NIL_VMCPUID)
1650 rc = GVMMR0CreateVMReq((PGVMMCREATEVMREQ)pReqHdr, pSession);
1651 else
1652 rc = VERR_INVALID_PARAMETER;
1653 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
1654 break;
1655
1656 case VMMR0_DO_GVMM_DESTROY_VM:
1657 if (pReqHdr == NULL && u64Arg == 0)
1658 rc = GVMMR0DestroyVM(pGVM);
1659 else
1660 rc = VERR_INVALID_PARAMETER;
1661 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
1662 break;
1663
1664 case VMMR0_DO_GVMM_REGISTER_VMCPU:
1665 if (pGVM != NULL)
1666 rc = GVMMR0RegisterVCpu(pGVM, idCpu);
1667 else
1668 rc = VERR_INVALID_PARAMETER;
1669 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1670 break;
1671
1672 case VMMR0_DO_GVMM_DEREGISTER_VMCPU:
1673 if (pGVM != NULL)
1674 rc = GVMMR0DeregisterVCpu(pGVM, idCpu);
1675 else
1676 rc = VERR_INVALID_PARAMETER;
1677 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1678 break;
1679
1680 case VMMR0_DO_GVMM_SCHED_HALT:
1681 if (pReqHdr)
1682 return VERR_INVALID_PARAMETER;
1683 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1684 rc = GVMMR0SchedHaltReq(pGVM, idCpu, u64Arg);
1685 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1686 break;
1687
1688 case VMMR0_DO_GVMM_SCHED_WAKE_UP:
1689 if (pReqHdr || u64Arg)
1690 return VERR_INVALID_PARAMETER;
1691 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1692 rc = GVMMR0SchedWakeUp(pGVM, idCpu);
1693 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1694 break;
1695
1696 case VMMR0_DO_GVMM_SCHED_POKE:
1697 if (pReqHdr || u64Arg)
1698 return VERR_INVALID_PARAMETER;
1699 rc = GVMMR0SchedPoke(pGVM, idCpu);
1700 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1701 break;
1702
1703 case VMMR0_DO_GVMM_SCHED_WAKE_UP_AND_POKE_CPUS:
1704 if (u64Arg)
1705 return VERR_INVALID_PARAMETER;
1706 rc = GVMMR0SchedWakeUpAndPokeCpusReq(pGVM, (PGVMMSCHEDWAKEUPANDPOKECPUSREQ)pReqHdr);
1707 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1708 break;
1709
1710 case VMMR0_DO_GVMM_SCHED_POLL:
1711 if (pReqHdr || u64Arg > 1)
1712 return VERR_INVALID_PARAMETER;
1713 rc = GVMMR0SchedPoll(pGVM, idCpu, !!u64Arg);
1714 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1715 break;
1716
1717 case VMMR0_DO_GVMM_QUERY_STATISTICS:
1718 if (u64Arg)
1719 return VERR_INVALID_PARAMETER;
1720 rc = GVMMR0QueryStatisticsReq(pGVM, (PGVMMQUERYSTATISTICSSREQ)pReqHdr, pSession);
1721 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1722 break;
1723
1724 case VMMR0_DO_GVMM_RESET_STATISTICS:
1725 if (u64Arg)
1726 return VERR_INVALID_PARAMETER;
1727 rc = GVMMR0ResetStatisticsReq(pGVM, (PGVMMRESETSTATISTICSSREQ)pReqHdr, pSession);
1728 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1729 break;
1730
1731 /*
1732 * Initialize the R0 part of a VM instance.
1733 */
1734 case VMMR0_DO_VMMR0_INIT:
1735 rc = vmmR0InitVM(pGVM, RT_LODWORD(u64Arg), RT_HIDWORD(u64Arg));
1736 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1737 break;
1738
1739 /*
1740 * Does EMT specific ring-0 init.
1741 */
1742 case VMMR0_DO_VMMR0_INIT_EMT:
1743 rc = vmmR0InitVMEmt(pGVM, idCpu);
1744 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1745 break;
1746
1747 /*
1748 * Terminate the R0 part of a VM instance.
1749 */
1750 case VMMR0_DO_VMMR0_TERM:
1751 rc = VMMR0TermVM(pGVM, 0 /*idCpu*/);
1752 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1753 break;
1754
1755 /*
1756 * Attempt to enable hm mode and check the current setting.
1757 */
1758 case VMMR0_DO_HM_ENABLE:
1759 rc = HMR0EnableAllCpus(pGVM);
1760 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1761 break;
1762
1763 /*
1764 * Setup the hardware accelerated session.
1765 */
1766 case VMMR0_DO_HM_SETUP_VM:
1767 rc = HMR0SetupVM(pGVM);
1768 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1769 break;
1770
1771 /*
1772 * PGM wrappers.
1773 */
1774 case VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES:
1775 if (idCpu == NIL_VMCPUID)
1776 return VERR_INVALID_CPU_ID;
1777 rc = PGMR0PhysAllocateHandyPages(pGVM, idCpu);
1778 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1779 break;
1780
1781 case VMMR0_DO_PGM_FLUSH_HANDY_PAGES:
1782 if (idCpu == NIL_VMCPUID)
1783 return VERR_INVALID_CPU_ID;
1784 rc = PGMR0PhysFlushHandyPages(pGVM, idCpu);
1785 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1786 break;
1787
1788 case VMMR0_DO_PGM_ALLOCATE_LARGE_HANDY_PAGE:
1789 if (idCpu == NIL_VMCPUID)
1790 return VERR_INVALID_CPU_ID;
1791 rc = PGMR0PhysAllocateLargeHandyPage(pGVM, idCpu);
1792 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1793 break;
1794
1795 case VMMR0_DO_PGM_PHYS_SETUP_IOMMU:
1796 if (idCpu != 0)
1797 return VERR_INVALID_CPU_ID;
1798 rc = PGMR0PhysSetupIoMmu(pGVM);
1799 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1800 break;
1801
1802 case VMMR0_DO_PGM_POOL_GROW:
1803 if (idCpu == NIL_VMCPUID)
1804 return VERR_INVALID_CPU_ID;
1805 rc = PGMR0PoolGrow(pGVM);
1806 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1807 break;
1808
1809 /*
1810 * GMM wrappers.
1811 */
1812 case VMMR0_DO_GMM_INITIAL_RESERVATION:
1813 if (u64Arg)
1814 return VERR_INVALID_PARAMETER;
1815 rc = GMMR0InitialReservationReq(pGVM, idCpu, (PGMMINITIALRESERVATIONREQ)pReqHdr);
1816 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1817 break;
1818
1819 case VMMR0_DO_GMM_UPDATE_RESERVATION:
1820 if (u64Arg)
1821 return VERR_INVALID_PARAMETER;
1822 rc = GMMR0UpdateReservationReq(pGVM, idCpu, (PGMMUPDATERESERVATIONREQ)pReqHdr);
1823 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1824 break;
1825
1826 case VMMR0_DO_GMM_ALLOCATE_PAGES:
1827 if (u64Arg)
1828 return VERR_INVALID_PARAMETER;
1829 rc = GMMR0AllocatePagesReq(pGVM, idCpu, (PGMMALLOCATEPAGESREQ)pReqHdr);
1830 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1831 break;
1832
1833 case VMMR0_DO_GMM_FREE_PAGES:
1834 if (u64Arg)
1835 return VERR_INVALID_PARAMETER;
1836 rc = GMMR0FreePagesReq(pGVM, idCpu, (PGMMFREEPAGESREQ)pReqHdr);
1837 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1838 break;
1839
1840 case VMMR0_DO_GMM_FREE_LARGE_PAGE:
1841 if (u64Arg)
1842 return VERR_INVALID_PARAMETER;
1843 rc = GMMR0FreeLargePageReq(pGVM, idCpu, (PGMMFREELARGEPAGEREQ)pReqHdr);
1844 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1845 break;
1846
1847 case VMMR0_DO_GMM_QUERY_HYPERVISOR_MEM_STATS:
1848 if (u64Arg)
1849 return VERR_INVALID_PARAMETER;
1850 rc = GMMR0QueryHypervisorMemoryStatsReq((PGMMMEMSTATSREQ)pReqHdr);
1851 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1852 break;
1853
1854 case VMMR0_DO_GMM_QUERY_MEM_STATS:
1855 if (idCpu == NIL_VMCPUID)
1856 return VERR_INVALID_CPU_ID;
1857 if (u64Arg)
1858 return VERR_INVALID_PARAMETER;
1859 rc = GMMR0QueryMemoryStatsReq(pGVM, idCpu, (PGMMMEMSTATSREQ)pReqHdr);
1860 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1861 break;
1862
1863 case VMMR0_DO_GMM_BALLOONED_PAGES:
1864 if (u64Arg)
1865 return VERR_INVALID_PARAMETER;
1866 rc = GMMR0BalloonedPagesReq(pGVM, idCpu, (PGMMBALLOONEDPAGESREQ)pReqHdr);
1867 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1868 break;
1869
1870 case VMMR0_DO_GMM_MAP_UNMAP_CHUNK:
1871 if (u64Arg)
1872 return VERR_INVALID_PARAMETER;
1873 rc = GMMR0MapUnmapChunkReq(pGVM, (PGMMMAPUNMAPCHUNKREQ)pReqHdr);
1874 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1875 break;
1876
1877 case VMMR0_DO_GMM_SEED_CHUNK:
1878 if (pReqHdr)
1879 return VERR_INVALID_PARAMETER;
1880 rc = GMMR0SeedChunk(pGVM, idCpu, (RTR3PTR)u64Arg);
1881 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1882 break;
1883
1884 case VMMR0_DO_GMM_REGISTER_SHARED_MODULE:
1885 if (idCpu == NIL_VMCPUID)
1886 return VERR_INVALID_CPU_ID;
1887 if (u64Arg)
1888 return VERR_INVALID_PARAMETER;
1889 rc = GMMR0RegisterSharedModuleReq(pGVM, idCpu, (PGMMREGISTERSHAREDMODULEREQ)pReqHdr);
1890 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1891 break;
1892
1893 case VMMR0_DO_GMM_UNREGISTER_SHARED_MODULE:
1894 if (idCpu == NIL_VMCPUID)
1895 return VERR_INVALID_CPU_ID;
1896 if (u64Arg)
1897 return VERR_INVALID_PARAMETER;
1898 rc = GMMR0UnregisterSharedModuleReq(pGVM, idCpu, (PGMMUNREGISTERSHAREDMODULEREQ)pReqHdr);
1899 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1900 break;
1901
1902 case VMMR0_DO_GMM_RESET_SHARED_MODULES:
1903 if (idCpu == NIL_VMCPUID)
1904 return VERR_INVALID_CPU_ID;
1905 if ( u64Arg
1906 || pReqHdr)
1907 return VERR_INVALID_PARAMETER;
1908 rc = GMMR0ResetSharedModules(pGVM, idCpu);
1909 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1910 break;
1911
1912#ifdef VBOX_WITH_PAGE_SHARING
1913 case VMMR0_DO_GMM_CHECK_SHARED_MODULES:
1914 {
1915 if (idCpu == NIL_VMCPUID)
1916 return VERR_INVALID_CPU_ID;
1917 if ( u64Arg
1918 || pReqHdr)
1919 return VERR_INVALID_PARAMETER;
1920 rc = GMMR0CheckSharedModules(pGVM, idCpu);
1921 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1922 break;
1923 }
1924#endif
1925
1926#if defined(VBOX_STRICT) && HC_ARCH_BITS == 64
1927 case VMMR0_DO_GMM_FIND_DUPLICATE_PAGE:
1928 if (u64Arg)
1929 return VERR_INVALID_PARAMETER;
1930 rc = GMMR0FindDuplicatePageReq(pGVM, (PGMMFINDDUPLICATEPAGEREQ)pReqHdr);
1931 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1932 break;
1933#endif
1934
1935 case VMMR0_DO_GMM_QUERY_STATISTICS:
1936 if (u64Arg)
1937 return VERR_INVALID_PARAMETER;
1938 rc = GMMR0QueryStatisticsReq(pGVM, (PGMMQUERYSTATISTICSSREQ)pReqHdr);
1939 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1940 break;
1941
1942 case VMMR0_DO_GMM_RESET_STATISTICS:
1943 if (u64Arg)
1944 return VERR_INVALID_PARAMETER;
1945 rc = GMMR0ResetStatisticsReq(pGVM, (PGMMRESETSTATISTICSSREQ)pReqHdr);
1946 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1947 break;
1948
1949 /*
1950 * A quick GCFGM mock-up.
1951 */
1952 /** @todo GCFGM with proper access control, ring-3 management interface and all that. */
1953 case VMMR0_DO_GCFGM_SET_VALUE:
1954 case VMMR0_DO_GCFGM_QUERY_VALUE:
1955 {
1956 if (pGVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
1957 return VERR_INVALID_PARAMETER;
1958 PGCFGMVALUEREQ pReq = (PGCFGMVALUEREQ)pReqHdr;
1959 if (pReq->Hdr.cbReq != sizeof(*pReq))
1960 return VERR_INVALID_PARAMETER;
1961 if (enmOperation == VMMR0_DO_GCFGM_SET_VALUE)
1962 {
1963 rc = GVMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
1964 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
1965 // rc = GMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
1966 }
1967 else
1968 {
1969 rc = GVMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
1970 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
1971 // rc = GMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
1972 }
1973 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1974 break;
1975 }
1976
1977 /*
1978 * PDM Wrappers.
1979 */
1980 case VMMR0_DO_PDM_DRIVER_CALL_REQ_HANDLER:
1981 {
1982 if (!pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
1983 return VERR_INVALID_PARAMETER;
1984 rc = PDMR0DriverCallReqHandler(pGVM, (PPDMDRIVERCALLREQHANDLERREQ)pReqHdr);
1985 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1986 break;
1987 }
1988
1989 case VMMR0_DO_PDM_DEVICE_CREATE:
1990 {
1991 if (!pReqHdr || u64Arg || idCpu != 0)
1992 return VERR_INVALID_PARAMETER;
1993 rc = PDMR0DeviceCreateReqHandler(pGVM, (PPDMDEVICECREATEREQ)pReqHdr);
1994 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1995 break;
1996 }
1997
1998 case VMMR0_DO_PDM_DEVICE_GEN_CALL:
1999 {
2000 if (!pReqHdr || u64Arg)
2001 return VERR_INVALID_PARAMETER;
2002 rc = PDMR0DeviceGenCallReqHandler(pGVM, (PPDMDEVICEGENCALLREQ)pReqHdr, idCpu);
2003 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2004 break;
2005 }
2006
2007 /** @todo Remove the once all devices has been converted to new style! @bugref{9218} */
2008 case VMMR0_DO_PDM_DEVICE_COMPAT_SET_CRITSECT:
2009 {
2010 if (!pReqHdr || u64Arg || idCpu != 0)
2011 return VERR_INVALID_PARAMETER;
2012 rc = PDMR0DeviceCompatSetCritSectReqHandler(pGVM, (PPDMDEVICECOMPATSETCRITSECTREQ)pReqHdr);
2013 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2014 break;
2015 }
2016
2017 /*
2018 * Requests to the internal networking service.
2019 */
2020 case VMMR0_DO_INTNET_OPEN:
2021 {
2022 PINTNETOPENREQ pReq = (PINTNETOPENREQ)pReqHdr;
2023 if (u64Arg || !pReq || !vmmR0IsValidSession(pGVM, pReq->pSession, pSession) || idCpu != NIL_VMCPUID)
2024 return VERR_INVALID_PARAMETER;
2025 rc = IntNetR0OpenReq(pSession, pReq);
2026 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2027 break;
2028 }
2029
2030 case VMMR0_DO_INTNET_IF_CLOSE:
2031 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFCLOSEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2032 return VERR_INVALID_PARAMETER;
2033 rc = IntNetR0IfCloseReq(pSession, (PINTNETIFCLOSEREQ)pReqHdr);
2034 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2035 break;
2036
2037
2038 case VMMR0_DO_INTNET_IF_GET_BUFFER_PTRS:
2039 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFGETBUFFERPTRSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2040 return VERR_INVALID_PARAMETER;
2041 rc = IntNetR0IfGetBufferPtrsReq(pSession, (PINTNETIFGETBUFFERPTRSREQ)pReqHdr);
2042 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2043 break;
2044
2045 case VMMR0_DO_INTNET_IF_SET_PROMISCUOUS_MODE:
2046 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2047 return VERR_INVALID_PARAMETER;
2048 rc = IntNetR0IfSetPromiscuousModeReq(pSession, (PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr);
2049 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2050 break;
2051
2052 case VMMR0_DO_INTNET_IF_SET_MAC_ADDRESS:
2053 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFSETMACADDRESSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2054 return VERR_INVALID_PARAMETER;
2055 rc = IntNetR0IfSetMacAddressReq(pSession, (PINTNETIFSETMACADDRESSREQ)pReqHdr);
2056 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2057 break;
2058
2059 case VMMR0_DO_INTNET_IF_SET_ACTIVE:
2060 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFSETACTIVEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2061 return VERR_INVALID_PARAMETER;
2062 rc = IntNetR0IfSetActiveReq(pSession, (PINTNETIFSETACTIVEREQ)pReqHdr);
2063 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2064 break;
2065
2066 case VMMR0_DO_INTNET_IF_SEND:
2067 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2068 return VERR_INVALID_PARAMETER;
2069 rc = IntNetR0IfSendReq(pSession, (PINTNETIFSENDREQ)pReqHdr);
2070 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2071 break;
2072
2073 case VMMR0_DO_INTNET_IF_WAIT:
2074 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2075 return VERR_INVALID_PARAMETER;
2076 rc = IntNetR0IfWaitReq(pSession, (PINTNETIFWAITREQ)pReqHdr);
2077 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2078 break;
2079
2080 case VMMR0_DO_INTNET_IF_ABORT_WAIT:
2081 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2082 return VERR_INVALID_PARAMETER;
2083 rc = IntNetR0IfAbortWaitReq(pSession, (PINTNETIFABORTWAITREQ)pReqHdr);
2084 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2085 break;
2086
2087#if 0 //def VBOX_WITH_PCI_PASSTHROUGH
2088 /*
2089 * Requests to host PCI driver service.
2090 */
2091 case VMMR0_DO_PCIRAW_REQ:
2092 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PPCIRAWSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2093 return VERR_INVALID_PARAMETER;
2094 rc = PciRawR0ProcessReq(pGVM, pSession, (PPCIRAWSENDREQ)pReqHdr);
2095 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2096 break;
2097#endif
2098
2099 /*
2100 * NEM requests.
2101 */
2102#ifdef VBOX_WITH_NEM_R0
2103# if defined(RT_ARCH_AMD64) && defined(RT_OS_WINDOWS)
2104 case VMMR0_DO_NEM_INIT_VM:
2105 if (u64Arg || pReqHdr || idCpu != 0)
2106 return VERR_INVALID_PARAMETER;
2107 rc = NEMR0InitVM(pGVM);
2108 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2109 break;
2110
2111 case VMMR0_DO_NEM_INIT_VM_PART_2:
2112 if (u64Arg || pReqHdr || idCpu != 0)
2113 return VERR_INVALID_PARAMETER;
2114 rc = NEMR0InitVMPart2(pGVM);
2115 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2116 break;
2117
2118 case VMMR0_DO_NEM_MAP_PAGES:
2119 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2120 return VERR_INVALID_PARAMETER;
2121 rc = NEMR0MapPages(pGVM, idCpu);
2122 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2123 break;
2124
2125 case VMMR0_DO_NEM_UNMAP_PAGES:
2126 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2127 return VERR_INVALID_PARAMETER;
2128 rc = NEMR0UnmapPages(pGVM, idCpu);
2129 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2130 break;
2131
2132 case VMMR0_DO_NEM_EXPORT_STATE:
2133 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2134 return VERR_INVALID_PARAMETER;
2135 rc = NEMR0ExportState(pGVM, idCpu);
2136 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2137 break;
2138
2139 case VMMR0_DO_NEM_IMPORT_STATE:
2140 if (pReqHdr || idCpu == NIL_VMCPUID)
2141 return VERR_INVALID_PARAMETER;
2142 rc = NEMR0ImportState(pGVM, idCpu, u64Arg);
2143 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2144 break;
2145
2146 case VMMR0_DO_NEM_QUERY_CPU_TICK:
2147 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2148 return VERR_INVALID_PARAMETER;
2149 rc = NEMR0QueryCpuTick(pGVM, idCpu);
2150 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2151 break;
2152
2153 case VMMR0_DO_NEM_RESUME_CPU_TICK_ON_ALL:
2154 if (pReqHdr || idCpu == NIL_VMCPUID)
2155 return VERR_INVALID_PARAMETER;
2156 rc = NEMR0ResumeCpuTickOnAll(pGVM, idCpu, u64Arg);
2157 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2158 break;
2159
2160 case VMMR0_DO_NEM_UPDATE_STATISTICS:
2161 if (u64Arg || pReqHdr)
2162 return VERR_INVALID_PARAMETER;
2163 rc = NEMR0UpdateStatistics(pGVM, idCpu);
2164 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2165 break;
2166
2167# if 1 && defined(DEBUG_bird)
2168 case VMMR0_DO_NEM_EXPERIMENT:
2169 if (pReqHdr)
2170 return VERR_INVALID_PARAMETER;
2171 rc = NEMR0DoExperiment(pGVM, idCpu, u64Arg);
2172 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2173 break;
2174# endif
2175# endif
2176#endif
2177
2178 /*
2179 * IOM requests.
2180 */
2181 case VMMR0_DO_IOM_GROW_IO_PORTS:
2182 {
2183 if (pReqHdr || idCpu != 0)
2184 return VERR_INVALID_PARAMETER;
2185 rc = IOMR0IoPortGrowRegistrationTables(pGVM, u64Arg);
2186 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2187 break;
2188 }
2189
2190 case VMMR0_DO_IOM_GROW_IO_PORT_STATS:
2191 {
2192 if (pReqHdr || idCpu != 0)
2193 return VERR_INVALID_PARAMETER;
2194 rc = IOMR0IoPortGrowStatisticsTable(pGVM, u64Arg);
2195 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2196 break;
2197 }
2198
2199 case VMMR0_DO_IOM_GROW_MMIO_REGS:
2200 {
2201 if (pReqHdr || idCpu != 0)
2202 return VERR_INVALID_PARAMETER;
2203 rc = IOMR0MmioGrowRegistrationTables(pGVM, u64Arg);
2204 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2205 break;
2206 }
2207
2208 case VMMR0_DO_IOM_GROW_MMIO_STATS:
2209 {
2210 if (pReqHdr || idCpu != 0)
2211 return VERR_INVALID_PARAMETER;
2212 rc = IOMR0MmioGrowStatisticsTable(pGVM, u64Arg);
2213 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2214 break;
2215 }
2216
2217 case VMMR0_DO_IOM_SYNC_STATS_INDICES:
2218 {
2219 if (pReqHdr || idCpu != 0)
2220 return VERR_INVALID_PARAMETER;
2221 rc = IOMR0IoPortSyncStatisticsIndices(pGVM);
2222 if (RT_SUCCESS(rc))
2223 rc = IOMR0MmioSyncStatisticsIndices(pGVM);
2224 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2225 break;
2226 }
2227
2228#ifdef VBOX_WITH_DBGF_TRACING
2229 case VMMR0_DO_DBGF_TRACER_CREATE:
2230 {
2231 if (!pReqHdr || u64Arg || idCpu != 0)
2232 return VERR_INVALID_PARAMETER;
2233 rc = DBGFR0TracerCreateReqHandler(pGVM, (PDBGFTRACERCREATEREQ)pReqHdr);
2234 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2235 break;
2236 }
2237
2238 case VMMR0_DO_DBGF_TRACER_CALL_REQ_HANDLER:
2239 {
2240 if (!pReqHdr || u64Arg)
2241 return VERR_INVALID_PARAMETER;
2242#if 0 /** @todo */
2243 rc = DBGFR0TracerGenCallReqHandler(pGVM, (PDBGFTRACERGENCALLREQ)pReqHdr, idCpu);
2244#else
2245 rc = VERR_NOT_IMPLEMENTED;
2246#endif
2247 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2248 break;
2249 }
2250#endif
2251
2252 /*
2253 * For profiling.
2254 */
2255 case VMMR0_DO_NOP:
2256 case VMMR0_DO_SLOW_NOP:
2257 return VINF_SUCCESS;
2258
2259 /*
2260 * For testing Ring-0 APIs invoked in this environment.
2261 */
2262 case VMMR0_DO_TESTS:
2263 /** @todo make new test */
2264 return VINF_SUCCESS;
2265
2266 default:
2267 /*
2268 * We're returning VERR_NOT_SUPPORT here so we've got something else
2269 * than -1 which the interrupt gate glue code might return.
2270 */
2271 Log(("operation %#x is not supported\n", enmOperation));
2272 return VERR_NOT_SUPPORTED;
2273 }
2274 return rc;
2275}
2276
2277
2278/**
2279 * Argument for vmmR0EntryExWrapper containing the arguments for VMMR0EntryEx.
2280 */
2281typedef struct VMMR0ENTRYEXARGS
2282{
2283 PGVM pGVM;
2284 VMCPUID idCpu;
2285 VMMR0OPERATION enmOperation;
2286 PSUPVMMR0REQHDR pReq;
2287 uint64_t u64Arg;
2288 PSUPDRVSESSION pSession;
2289} VMMR0ENTRYEXARGS;
2290/** Pointer to a vmmR0EntryExWrapper argument package. */
2291typedef VMMR0ENTRYEXARGS *PVMMR0ENTRYEXARGS;
2292
2293/**
2294 * This is just a longjmp wrapper function for VMMR0EntryEx calls.
2295 *
2296 * @returns VBox status code.
2297 * @param pvArgs The argument package
2298 */
2299static DECLCALLBACK(int) vmmR0EntryExWrapper(void *pvArgs)
2300{
2301 return vmmR0EntryExWorker(((PVMMR0ENTRYEXARGS)pvArgs)->pGVM,
2302 ((PVMMR0ENTRYEXARGS)pvArgs)->idCpu,
2303 ((PVMMR0ENTRYEXARGS)pvArgs)->enmOperation,
2304 ((PVMMR0ENTRYEXARGS)pvArgs)->pReq,
2305 ((PVMMR0ENTRYEXARGS)pvArgs)->u64Arg,
2306 ((PVMMR0ENTRYEXARGS)pvArgs)->pSession);
2307}
2308
2309
2310/**
2311 * The Ring 0 entry point, called by the support library (SUP).
2312 *
2313 * @returns VBox status code.
2314 * @param pGVM The global (ring-0) VM structure.
2315 * @param pVM The cross context VM structure.
2316 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
2317 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
2318 * @param enmOperation Which operation to execute.
2319 * @param pReq Pointer to the SUPVMMR0REQHDR packet. Optional.
2320 * @param u64Arg Some simple constant argument.
2321 * @param pSession The session of the caller.
2322 * @remarks Assume called with interrupts _enabled_.
2323 */
2324VMMR0DECL(int) VMMR0EntryEx(PGVM pGVM, PVMCC pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation,
2325 PSUPVMMR0REQHDR pReq, uint64_t u64Arg, PSUPDRVSESSION pSession)
2326{
2327 /*
2328 * Requests that should only happen on the EMT thread will be
2329 * wrapped in a setjmp so we can assert without causing trouble.
2330 */
2331 if ( pVM != NULL
2332 && pGVM != NULL
2333 && pVM == pGVM /** @todo drop pGVM */
2334 && idCpu < pGVM->cCpus
2335 && pGVM->pSession == pSession
2336 && pGVM->pSelf == pVM)
2337 {
2338 switch (enmOperation)
2339 {
2340 /* These might/will be called before VMMR3Init. */
2341 case VMMR0_DO_GMM_INITIAL_RESERVATION:
2342 case VMMR0_DO_GMM_UPDATE_RESERVATION:
2343 case VMMR0_DO_GMM_ALLOCATE_PAGES:
2344 case VMMR0_DO_GMM_FREE_PAGES:
2345 case VMMR0_DO_GMM_BALLOONED_PAGES:
2346 /* On the mac we might not have a valid jmp buf, so check these as well. */
2347 case VMMR0_DO_VMMR0_INIT:
2348 case VMMR0_DO_VMMR0_TERM:
2349
2350 case VMMR0_DO_PDM_DEVICE_CREATE:
2351 case VMMR0_DO_PDM_DEVICE_GEN_CALL:
2352 case VMMR0_DO_IOM_GROW_IO_PORTS:
2353 case VMMR0_DO_IOM_GROW_IO_PORT_STATS:
2354 {
2355 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2356 RTNATIVETHREAD hNativeThread = RTThreadNativeSelf();
2357 if (RT_LIKELY( pGVCpu->hEMT == hNativeThread
2358 && pGVCpu->hNativeThreadR0 == hNativeThread))
2359 {
2360 if (!pGVCpu->vmm.s.CallRing3JmpBufR0.pvSavedStack)
2361 break;
2362
2363 /** @todo validate this EMT claim... GVM knows. */
2364 VMMR0ENTRYEXARGS Args;
2365 Args.pGVM = pGVM;
2366 Args.idCpu = idCpu;
2367 Args.enmOperation = enmOperation;
2368 Args.pReq = pReq;
2369 Args.u64Arg = u64Arg;
2370 Args.pSession = pSession;
2371 return vmmR0CallRing3SetJmpEx(&pGVCpu->vmm.s.CallRing3JmpBufR0, vmmR0EntryExWrapper, &Args);
2372 }
2373 return VERR_VM_THREAD_NOT_EMT;
2374 }
2375
2376 default:
2377 case VMMR0_DO_PGM_POOL_GROW:
2378 break;
2379 }
2380 }
2381 return vmmR0EntryExWorker(pGVM, idCpu, enmOperation, pReq, u64Arg, pSession);
2382}
2383
2384
2385/**
2386 * Checks whether we've armed the ring-0 long jump machinery.
2387 *
2388 * @returns @c true / @c false
2389 * @param pVCpu The cross context virtual CPU structure.
2390 * @thread EMT
2391 * @sa VMMIsLongJumpArmed
2392 */
2393VMMR0_INT_DECL(bool) VMMR0IsLongJumpArmed(PVMCPUCC pVCpu)
2394{
2395#ifdef RT_ARCH_X86
2396 return pVCpu->vmm.s.CallRing3JmpBufR0.eip
2397 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2398#else
2399 return pVCpu->vmm.s.CallRing3JmpBufR0.rip
2400 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2401#endif
2402}
2403
2404
2405/**
2406 * Checks whether we've done a ring-3 long jump.
2407 *
2408 * @returns @c true / @c false
2409 * @param pVCpu The cross context virtual CPU structure.
2410 * @thread EMT
2411 */
2412VMMR0_INT_DECL(bool) VMMR0IsInRing3LongJump(PVMCPUCC pVCpu)
2413{
2414 return pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2415}
2416
2417
2418/**
2419 * Internal R0 logger worker: Flush logger.
2420 *
2421 * @param pLogger The logger instance to flush.
2422 * @remark This function must be exported!
2423 */
2424VMMR0DECL(void) vmmR0LoggerFlush(PRTLOGGER pLogger)
2425{
2426#ifdef LOG_ENABLED
2427 /*
2428 * Convert the pLogger into a VM handle and 'call' back to Ring-3.
2429 * (This is a bit paranoid code.)
2430 */
2431 PVMMR0LOGGER pR0Logger = (PVMMR0LOGGER)((uintptr_t)pLogger - RT_UOFFSETOF(VMMR0LOGGER, Logger));
2432 if ( !VALID_PTR(pR0Logger)
2433 || !VALID_PTR(pR0Logger + 1)
2434 || pLogger->u32Magic != RTLOGGER_MAGIC)
2435 {
2436# ifdef DEBUG
2437 SUPR0Printf("vmmR0LoggerFlush: pLogger=%p!\n", pLogger);
2438# endif
2439 return;
2440 }
2441 if (pR0Logger->fFlushingDisabled)
2442 return; /* quietly */
2443
2444 PVMCC pVM = pR0Logger->pVM;
2445 if ( !VALID_PTR(pVM)
2446 || pVM->pSelf != pVM)
2447 {
2448# ifdef DEBUG
2449 SUPR0Printf("vmmR0LoggerFlush: pVM=%p! pSelf=%p! pLogger=%p\n", pVM, pVM->pSelf, pLogger);
2450# endif
2451 return;
2452 }
2453
2454 PVMCPUCC pVCpu = VMMGetCpu(pVM);
2455 if (pVCpu)
2456 {
2457 /*
2458 * Check that the jump buffer is armed.
2459 */
2460# ifdef RT_ARCH_X86
2461 if ( !pVCpu->vmm.s.CallRing3JmpBufR0.eip
2462 || pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2463# else
2464 if ( !pVCpu->vmm.s.CallRing3JmpBufR0.rip
2465 || pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2466# endif
2467 {
2468# ifdef DEBUG
2469 SUPR0Printf("vmmR0LoggerFlush: Jump buffer isn't armed!\n");
2470# endif
2471 return;
2472 }
2473 VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VMM_LOGGER_FLUSH, 0);
2474 }
2475# ifdef DEBUG
2476 else
2477 SUPR0Printf("vmmR0LoggerFlush: invalid VCPU context!\n");
2478# endif
2479#else
2480 NOREF(pLogger);
2481#endif /* LOG_ENABLED */
2482}
2483
2484#ifdef LOG_ENABLED
2485
2486/**
2487 * Disables flushing of the ring-0 debug log.
2488 *
2489 * @param pVCpu The cross context virtual CPU structure.
2490 */
2491VMMR0_INT_DECL(void) VMMR0LogFlushDisable(PVMCPUCC pVCpu)
2492{
2493 if (pVCpu->vmm.s.pR0LoggerR0)
2494 pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled = true;
2495 if (pVCpu->vmm.s.pR0RelLoggerR0)
2496 pVCpu->vmm.s.pR0RelLoggerR0->fFlushingDisabled = true;
2497}
2498
2499
2500/**
2501 * Enables flushing of the ring-0 debug log.
2502 *
2503 * @param pVCpu The cross context virtual CPU structure.
2504 */
2505VMMR0_INT_DECL(void) VMMR0LogFlushEnable(PVMCPUCC pVCpu)
2506{
2507 if (pVCpu->vmm.s.pR0LoggerR0)
2508 pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled = false;
2509 if (pVCpu->vmm.s.pR0RelLoggerR0)
2510 pVCpu->vmm.s.pR0RelLoggerR0->fFlushingDisabled = false;
2511}
2512
2513
2514/**
2515 * Checks if log flushing is disabled or not.
2516 *
2517 * @param pVCpu The cross context virtual CPU structure.
2518 */
2519VMMR0_INT_DECL(bool) VMMR0IsLogFlushDisabled(PVMCPUCC pVCpu)
2520{
2521 if (pVCpu->vmm.s.pR0LoggerR0)
2522 return pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled;
2523 if (pVCpu->vmm.s.pR0RelLoggerR0)
2524 return pVCpu->vmm.s.pR0RelLoggerR0->fFlushingDisabled;
2525 return true;
2526}
2527
2528#endif /* LOG_ENABLED */
2529
2530/**
2531 * Override RTLogRelGetDefaultInstanceEx so we can do LogRel to VBox.log from EMTs in ring-0.
2532 */
2533DECLEXPORT(PRTLOGGER) RTLogRelGetDefaultInstanceEx(uint32_t fFlagsAndGroup)
2534{
2535 PGVMCPU pGVCpu = GVMMR0GetGVCpuByEMT(NIL_RTNATIVETHREAD);
2536 if (pGVCpu)
2537 {
2538 PVMCPUCC pVCpu = pGVCpu;
2539 if (RT_VALID_PTR(pVCpu))
2540 {
2541 PVMMR0LOGGER pVmmLogger = pVCpu->vmm.s.pR0RelLoggerR0;
2542 if (RT_VALID_PTR(pVmmLogger))
2543 {
2544 if ( pVmmLogger->fCreated
2545 && pVmmLogger->pVM == pGVCpu->pGVM)
2546 {
2547 if (pVmmLogger->Logger.fFlags & RTLOGFLAGS_DISABLED)
2548 return NULL;
2549 uint16_t const fFlags = RT_LO_U16(fFlagsAndGroup);
2550 uint16_t const iGroup = RT_HI_U16(fFlagsAndGroup);
2551 if ( iGroup != UINT16_MAX
2552 && ( ( pVmmLogger->Logger.afGroups[iGroup < pVmmLogger->Logger.cGroups ? iGroup : 0]
2553 & (fFlags | (uint32_t)RTLOGGRPFLAGS_ENABLED))
2554 != (fFlags | (uint32_t)RTLOGGRPFLAGS_ENABLED)))
2555 return NULL;
2556 return &pVmmLogger->Logger;
2557 }
2558 }
2559 }
2560 }
2561 return SUPR0GetDefaultLogRelInstanceEx(fFlagsAndGroup);
2562}
2563
2564
2565/**
2566 * Jump back to ring-3 if we're the EMT and the longjmp is armed.
2567 *
2568 * @returns true if the breakpoint should be hit, false if it should be ignored.
2569 */
2570DECLEXPORT(bool) RTCALL RTAssertShouldPanic(void)
2571{
2572#if 0
2573 return true;
2574#else
2575 PVMCC pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
2576 if (pVM)
2577 {
2578 PVMCPUCC pVCpu = VMMGetCpu(pVM);
2579
2580 if (pVCpu)
2581 {
2582#ifdef RT_ARCH_X86
2583 if ( pVCpu->vmm.s.CallRing3JmpBufR0.eip
2584 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2585#else
2586 if ( pVCpu->vmm.s.CallRing3JmpBufR0.rip
2587 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2588#endif
2589 {
2590 int rc = VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VM_R0_ASSERTION, 0);
2591 return RT_FAILURE_NP(rc);
2592 }
2593 }
2594 }
2595#ifdef RT_OS_LINUX
2596 return true;
2597#else
2598 return false;
2599#endif
2600#endif
2601}
2602
2603
2604/**
2605 * Override this so we can push it up to ring-3.
2606 *
2607 * @param pszExpr Expression. Can be NULL.
2608 * @param uLine Location line number.
2609 * @param pszFile Location file name.
2610 * @param pszFunction Location function name.
2611 */
2612DECLEXPORT(void) RTCALL RTAssertMsg1Weak(const char *pszExpr, unsigned uLine, const char *pszFile, const char *pszFunction)
2613{
2614 /*
2615 * To the log.
2616 */
2617 LogAlways(("\n!!R0-Assertion Failed!!\n"
2618 "Expression: %s\n"
2619 "Location : %s(%d) %s\n",
2620 pszExpr, pszFile, uLine, pszFunction));
2621
2622 /*
2623 * To the global VMM buffer.
2624 */
2625 PVMCC pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
2626 if (pVM)
2627 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg1, sizeof(pVM->vmm.s.szRing0AssertMsg1),
2628 "\n!!R0-Assertion Failed!!\n"
2629 "Expression: %.*s\n"
2630 "Location : %s(%d) %s\n",
2631 sizeof(pVM->vmm.s.szRing0AssertMsg1) / 4 * 3, pszExpr,
2632 pszFile, uLine, pszFunction);
2633
2634 /*
2635 * Continue the normal way.
2636 */
2637 RTAssertMsg1(pszExpr, uLine, pszFile, pszFunction);
2638}
2639
2640
2641/**
2642 * Callback for RTLogFormatV which writes to the ring-3 log port.
2643 * See PFNLOGOUTPUT() for details.
2644 */
2645static DECLCALLBACK(size_t) rtLogOutput(void *pv, const char *pachChars, size_t cbChars)
2646{
2647 for (size_t i = 0; i < cbChars; i++)
2648 {
2649 LogAlways(("%c", pachChars[i])); NOREF(pachChars);
2650 }
2651
2652 NOREF(pv);
2653 return cbChars;
2654}
2655
2656
2657/**
2658 * Override this so we can push it up to ring-3.
2659 *
2660 * @param pszFormat The format string.
2661 * @param va Arguments.
2662 */
2663DECLEXPORT(void) RTCALL RTAssertMsg2WeakV(const char *pszFormat, va_list va)
2664{
2665 va_list vaCopy;
2666
2667 /*
2668 * Push the message to the loggers.
2669 */
2670 PRTLOGGER pLog = RTLogGetDefaultInstance(); /* Don't initialize it here... */
2671 if (pLog)
2672 {
2673 va_copy(vaCopy, va);
2674 RTLogFormatV(rtLogOutput, pLog, pszFormat, vaCopy);
2675 va_end(vaCopy);
2676 }
2677 pLog = RTLogRelGetDefaultInstance();
2678 if (pLog)
2679 {
2680 va_copy(vaCopy, va);
2681 RTLogFormatV(rtLogOutput, pLog, pszFormat, vaCopy);
2682 va_end(vaCopy);
2683 }
2684
2685 /*
2686 * Push it to the global VMM buffer.
2687 */
2688 PVMCC pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
2689 if (pVM)
2690 {
2691 va_copy(vaCopy, va);
2692 RTStrPrintfV(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2), pszFormat, vaCopy);
2693 va_end(vaCopy);
2694 }
2695
2696 /*
2697 * Continue the normal way.
2698 */
2699 RTAssertMsg2V(pszFormat, va);
2700}
2701
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette