VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/VMMR0.cpp@ 80540

Last change on this file since 80540 was 80531, checked in by vboxsync, 5 years ago

VMM,Devices: Some PDM device model refactoring. bugref:9218

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 95.9 KB
Line 
1/* $Id: VMMR0.cpp 80531 2019-09-01 23:03:34Z vboxsync $ */
2/** @file
3 * VMM - Host Context Ring 0.
4 */
5
6/*
7 * Copyright (C) 2006-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_VMM
23#include <VBox/vmm/vmm.h>
24#include <VBox/sup.h>
25#include <VBox/vmm/trpm.h>
26#include <VBox/vmm/cpum.h>
27#include <VBox/vmm/pdmapi.h>
28#include <VBox/vmm/pgm.h>
29#ifdef VBOX_WITH_NEM_R0
30# include <VBox/vmm/nem.h>
31#endif
32#include <VBox/vmm/em.h>
33#include <VBox/vmm/stam.h>
34#include <VBox/vmm/tm.h>
35#include "VMMInternal.h"
36#include <VBox/vmm/vmcc.h>
37#include <VBox/vmm/gvm.h>
38#ifdef VBOX_WITH_PCI_PASSTHROUGH
39# include <VBox/vmm/pdmpci.h>
40#endif
41#include <VBox/vmm/apic.h>
42
43#include <VBox/vmm/gvmm.h>
44#include <VBox/vmm/gmm.h>
45#include <VBox/vmm/gim.h>
46#include <VBox/intnet.h>
47#include <VBox/vmm/hm.h>
48#include <VBox/param.h>
49#include <VBox/err.h>
50#include <VBox/version.h>
51#include <VBox/log.h>
52
53#include <iprt/asm-amd64-x86.h>
54#include <iprt/assert.h>
55#include <iprt/crc.h>
56#include <iprt/mp.h>
57#include <iprt/once.h>
58#include <iprt/stdarg.h>
59#include <iprt/string.h>
60#include <iprt/thread.h>
61#include <iprt/timer.h>
62#include <iprt/time.h>
63
64#include "dtrace/VBoxVMM.h"
65
66
67#if defined(_MSC_VER) && defined(RT_ARCH_AMD64) /** @todo check this with with VC7! */
68# pragma intrinsic(_AddressOfReturnAddress)
69#endif
70
71#if defined(RT_OS_DARWIN) && ARCH_BITS == 32
72# error "32-bit darwin is no longer supported. Go back to 4.3 or earlier!"
73#endif
74
75
76
77/*********************************************************************************************************************************
78* Defined Constants And Macros *
79*********************************************************************************************************************************/
80/** @def VMM_CHECK_SMAP_SETUP
81 * SMAP check setup. */
82/** @def VMM_CHECK_SMAP_CHECK
83 * Checks that the AC flag is set if SMAP is enabled. If AC is not set,
84 * it will be logged and @a a_BadExpr is executed. */
85/** @def VMM_CHECK_SMAP_CHECK2
86 * Checks that the AC flag is set if SMAP is enabled. If AC is not set, it will
87 * be logged, written to the VMs assertion text buffer, and @a a_BadExpr is
88 * executed. */
89#if defined(VBOX_STRICT) || 1
90# define VMM_CHECK_SMAP_SETUP() uint32_t const fKernelFeatures = SUPR0GetKernelFeatures()
91# define VMM_CHECK_SMAP_CHECK(a_BadExpr) \
92 do { \
93 if (fKernelFeatures & SUPKERNELFEATURES_SMAP) \
94 { \
95 RTCCUINTREG fEflCheck = ASMGetFlags(); \
96 if (RT_LIKELY(fEflCheck & X86_EFL_AC)) \
97 { /* likely */ } \
98 else \
99 { \
100 SUPR0Printf("%s, line %d: EFLAGS.AC is clear! (%#x)\n", __FUNCTION__, __LINE__, (uint32_t)fEflCheck); \
101 a_BadExpr; \
102 } \
103 } \
104 } while (0)
105# define VMM_CHECK_SMAP_CHECK2(a_pGVM, a_BadExpr) \
106 do { \
107 if (fKernelFeatures & SUPKERNELFEATURES_SMAP) \
108 { \
109 RTCCUINTREG fEflCheck = ASMGetFlags(); \
110 if (RT_LIKELY(fEflCheck & X86_EFL_AC)) \
111 { /* likely */ } \
112 else if (a_pGVM) \
113 { \
114 SUPR0BadContext((a_pGVM)->pSession, __FILE__, __LINE__, "EFLAGS.AC is zero!"); \
115 RTStrPrintf((a_pGVM)->vmm.s.szRing0AssertMsg1, sizeof((a_pGVM)->vmm.s.szRing0AssertMsg1), \
116 "%s, line %d: EFLAGS.AC is clear! (%#x)\n", __FUNCTION__, __LINE__, (uint32_t)fEflCheck); \
117 a_BadExpr; \
118 } \
119 else \
120 { \
121 SUPR0Printf("%s, line %d: EFLAGS.AC is clear! (%#x)\n", __FUNCTION__, __LINE__, (uint32_t)fEflCheck); \
122 a_BadExpr; \
123 } \
124 } \
125 } while (0)
126#else
127# define VMM_CHECK_SMAP_SETUP() uint32_t const fKernelFeatures = 0
128# define VMM_CHECK_SMAP_CHECK(a_BadExpr) NOREF(fKernelFeatures)
129# define VMM_CHECK_SMAP_CHECK2(a_pGVM, a_BadExpr) NOREF(fKernelFeatures)
130#endif
131
132
133/*********************************************************************************************************************************
134* Internal Functions *
135*********************************************************************************************************************************/
136RT_C_DECLS_BEGIN
137#if defined(RT_ARCH_X86) && (defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD))
138extern uint64_t __udivdi3(uint64_t, uint64_t);
139extern uint64_t __umoddi3(uint64_t, uint64_t);
140#endif
141RT_C_DECLS_END
142
143
144/*********************************************************************************************************************************
145* Global Variables *
146*********************************************************************************************************************************/
147/** Drag in necessary library bits.
148 * The runtime lives here (in VMMR0.r0) and VBoxDD*R0.r0 links against us. */
149PFNRT g_VMMR0Deps[] =
150{
151 (PFNRT)RTCrc32,
152 (PFNRT)RTOnce,
153#if defined(RT_ARCH_X86) && (defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD))
154 (PFNRT)__udivdi3,
155 (PFNRT)__umoddi3,
156#endif
157 NULL
158};
159
160#ifdef RT_OS_SOLARIS
161/* Dependency information for the native solaris loader. */
162extern "C" { char _depends_on[] = "vboxdrv"; }
163#endif
164
165/** The result of SUPR0GetRawModeUsability(), set by ModuleInit(). */
166int g_rcRawModeUsability = VINF_SUCCESS;
167
168
169/**
170 * Initialize the module.
171 * This is called when we're first loaded.
172 *
173 * @returns 0 on success.
174 * @returns VBox status on failure.
175 * @param hMod Image handle for use in APIs.
176 */
177DECLEXPORT(int) ModuleInit(void *hMod)
178{
179 VMM_CHECK_SMAP_SETUP();
180 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
181
182#ifdef VBOX_WITH_DTRACE_R0
183 /*
184 * The first thing to do is register the static tracepoints.
185 * (Deregistration is automatic.)
186 */
187 int rc2 = SUPR0TracerRegisterModule(hMod, &g_VTGObjHeader);
188 if (RT_FAILURE(rc2))
189 return rc2;
190#endif
191 LogFlow(("ModuleInit:\n"));
192
193#ifdef VBOX_WITH_64ON32_CMOS_DEBUG
194 /*
195 * Display the CMOS debug code.
196 */
197 ASMOutU8(0x72, 0x03);
198 uint8_t bDebugCode = ASMInU8(0x73);
199 LogRel(("CMOS Debug Code: %#x (%d)\n", bDebugCode, bDebugCode));
200 RTLogComPrintf("CMOS Debug Code: %#x (%d)\n", bDebugCode, bDebugCode);
201#endif
202
203 /*
204 * Initialize the VMM, GVMM, GMM, HM, PGM (Darwin) and INTNET.
205 */
206 int rc = vmmInitFormatTypes();
207 if (RT_SUCCESS(rc))
208 {
209 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
210 rc = GVMMR0Init();
211 if (RT_SUCCESS(rc))
212 {
213 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
214 rc = GMMR0Init();
215 if (RT_SUCCESS(rc))
216 {
217 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
218 rc = HMR0Init();
219 if (RT_SUCCESS(rc))
220 {
221 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
222
223 PDMR0Init(hMod);
224 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
225
226 rc = PGMRegisterStringFormatTypes();
227 if (RT_SUCCESS(rc))
228 {
229 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
230#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
231 rc = PGMR0DynMapInit();
232#endif
233 if (RT_SUCCESS(rc))
234 {
235 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
236 rc = IntNetR0Init();
237 if (RT_SUCCESS(rc))
238 {
239#ifdef VBOX_WITH_PCI_PASSTHROUGH
240 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
241 rc = PciRawR0Init();
242#endif
243 if (RT_SUCCESS(rc))
244 {
245 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
246 rc = CPUMR0ModuleInit();
247 if (RT_SUCCESS(rc))
248 {
249#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
250 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
251 rc = vmmR0TripleFaultHackInit();
252 if (RT_SUCCESS(rc))
253#endif
254 {
255 VMM_CHECK_SMAP_CHECK(rc = VERR_VMM_SMAP_BUT_AC_CLEAR);
256 if (RT_SUCCESS(rc))
257 {
258 g_rcRawModeUsability = SUPR0GetRawModeUsability();
259 if (g_rcRawModeUsability != VINF_SUCCESS)
260 SUPR0Printf("VMMR0!ModuleInit: SUPR0GetRawModeUsability -> %Rrc\n",
261 g_rcRawModeUsability);
262 LogFlow(("ModuleInit: returns success\n"));
263 return VINF_SUCCESS;
264 }
265 }
266
267 /*
268 * Bail out.
269 */
270#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
271 vmmR0TripleFaultHackTerm();
272#endif
273 }
274 else
275 LogRel(("ModuleInit: CPUMR0ModuleInit -> %Rrc\n", rc));
276#ifdef VBOX_WITH_PCI_PASSTHROUGH
277 PciRawR0Term();
278#endif
279 }
280 else
281 LogRel(("ModuleInit: PciRawR0Init -> %Rrc\n", rc));
282 IntNetR0Term();
283 }
284 else
285 LogRel(("ModuleInit: IntNetR0Init -> %Rrc\n", rc));
286#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
287 PGMR0DynMapTerm();
288#endif
289 }
290 else
291 LogRel(("ModuleInit: PGMR0DynMapInit -> %Rrc\n", rc));
292 PGMDeregisterStringFormatTypes();
293 }
294 else
295 LogRel(("ModuleInit: PGMRegisterStringFormatTypes -> %Rrc\n", rc));
296 HMR0Term();
297 }
298 else
299 LogRel(("ModuleInit: HMR0Init -> %Rrc\n", rc));
300 GMMR0Term();
301 }
302 else
303 LogRel(("ModuleInit: GMMR0Init -> %Rrc\n", rc));
304 GVMMR0Term();
305 }
306 else
307 LogRel(("ModuleInit: GVMMR0Init -> %Rrc\n", rc));
308 vmmTermFormatTypes();
309 }
310 else
311 LogRel(("ModuleInit: vmmInitFormatTypes -> %Rrc\n", rc));
312
313 LogFlow(("ModuleInit: failed %Rrc\n", rc));
314 return rc;
315}
316
317
318/**
319 * Terminate the module.
320 * This is called when we're finally unloaded.
321 *
322 * @param hMod Image handle for use in APIs.
323 */
324DECLEXPORT(void) ModuleTerm(void *hMod)
325{
326 NOREF(hMod);
327 LogFlow(("ModuleTerm:\n"));
328
329 /*
330 * Terminate the CPUM module (Local APIC cleanup).
331 */
332 CPUMR0ModuleTerm();
333
334 /*
335 * Terminate the internal network service.
336 */
337 IntNetR0Term();
338
339 /*
340 * PGM (Darwin), HM and PciRaw global cleanup.
341 */
342#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
343 PGMR0DynMapTerm();
344#endif
345#ifdef VBOX_WITH_PCI_PASSTHROUGH
346 PciRawR0Term();
347#endif
348 PGMDeregisterStringFormatTypes();
349 HMR0Term();
350#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
351 vmmR0TripleFaultHackTerm();
352#endif
353
354 /*
355 * Destroy the GMM and GVMM instances.
356 */
357 GMMR0Term();
358 GVMMR0Term();
359
360 vmmTermFormatTypes();
361
362 LogFlow(("ModuleTerm: returns\n"));
363}
364
365
366/**
367 * Initiates the R0 driver for a particular VM instance.
368 *
369 * @returns VBox status code.
370 *
371 * @param pGVM The global (ring-0) VM structure.
372 * @param uSvnRev The SVN revision of the ring-3 part.
373 * @param uBuildType Build type indicator.
374 * @thread EMT(0)
375 */
376static int vmmR0InitVM(PGVM pGVM, uint32_t uSvnRev, uint32_t uBuildType)
377{
378 VMM_CHECK_SMAP_SETUP();
379 VMM_CHECK_SMAP_CHECK(return VERR_VMM_SMAP_BUT_AC_CLEAR);
380
381 /*
382 * Match the SVN revisions and build type.
383 */
384 if (uSvnRev != VMMGetSvnRev())
385 {
386 LogRel(("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev()));
387 SUPR0Printf("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev());
388 return VERR_VMM_R0_VERSION_MISMATCH;
389 }
390 if (uBuildType != vmmGetBuildType())
391 {
392 LogRel(("VMMR0InitVM: Build type mismatch, r3=%#x r0=%#x\n", uBuildType, vmmGetBuildType()));
393 SUPR0Printf("VMMR0InitVM: Build type mismatch, r3=%#x r0=%#x\n", uBuildType, vmmGetBuildType());
394 return VERR_VMM_R0_VERSION_MISMATCH;
395 }
396
397 int rc = GVMMR0ValidateGVMandEMT(pGVM, 0 /*idCpu*/);
398 if (RT_FAILURE(rc))
399 return rc;
400
401#ifdef LOG_ENABLED
402 /*
403 * Register the EMT R0 logger instance for VCPU 0.
404 */
405 PVMCPUCC pVCpu = VMCC_GET_CPU_0(pGVM);
406
407 PVMMR0LOGGER pR0Logger = pVCpu->vmm.s.pR0LoggerR0;
408 if (pR0Logger)
409 {
410# if 0 /* testing of the logger. */
411 LogCom(("vmmR0InitVM: before %p\n", RTLogDefaultInstance()));
412 LogCom(("vmmR0InitVM: pfnFlush=%p actual=%p\n", pR0Logger->Logger.pfnFlush, vmmR0LoggerFlush));
413 LogCom(("vmmR0InitVM: pfnLogger=%p actual=%p\n", pR0Logger->Logger.pfnLogger, vmmR0LoggerWrapper));
414 LogCom(("vmmR0InitVM: offScratch=%d fFlags=%#x fDestFlags=%#x\n", pR0Logger->Logger.offScratch, pR0Logger->Logger.fFlags, pR0Logger->Logger.fDestFlags));
415
416 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pGVM->pSession);
417 LogCom(("vmmR0InitVM: after %p reg\n", RTLogDefaultInstance()));
418 RTLogSetDefaultInstanceThread(NULL, pGVM->pSession);
419 LogCom(("vmmR0InitVM: after %p dereg\n", RTLogDefaultInstance()));
420
421 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
422 LogCom(("vmmR0InitVM: returned successfully from direct logger call.\n"));
423 pR0Logger->Logger.pfnFlush(&pR0Logger->Logger);
424 LogCom(("vmmR0InitVM: returned successfully from direct flush call.\n"));
425
426 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pGVM->pSession);
427 LogCom(("vmmR0InitVM: after %p reg2\n", RTLogDefaultInstance()));
428 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
429 LogCom(("vmmR0InitVM: returned successfully from direct logger call (2). offScratch=%d\n", pR0Logger->Logger.offScratch));
430 RTLogSetDefaultInstanceThread(NULL, pGVM->pSession);
431 LogCom(("vmmR0InitVM: after %p dereg2\n", RTLogDefaultInstance()));
432
433 RTLogLoggerEx(&pR0Logger->Logger, 0, ~0U, "hello ring-0 logger (RTLogLoggerEx)\n");
434 LogCom(("vmmR0InitVM: RTLogLoggerEx returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
435
436 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pGVM->pSession);
437 RTLogPrintf("hello ring-0 logger (RTLogPrintf)\n");
438 LogCom(("vmmR0InitVM: RTLogPrintf returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
439# endif
440 Log(("Switching to per-thread logging instance %p (key=%p)\n", &pR0Logger->Logger, pGVM->pSession));
441 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pGVM->pSession);
442 pR0Logger->fRegistered = true;
443 }
444#endif /* LOG_ENABLED */
445
446 /*
447 * Check if the host supports high resolution timers or not.
448 */
449 if ( pGVM->vmm.s.fUsePeriodicPreemptionTimers
450 && !RTTimerCanDoHighResolution())
451 pGVM->vmm.s.fUsePeriodicPreemptionTimers = false;
452
453 /*
454 * Initialize the per VM data for GVMM and GMM.
455 */
456 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
457 rc = GVMMR0InitVM(pGVM);
458 if (RT_SUCCESS(rc))
459 {
460 /*
461 * Init HM, CPUM and PGM (Darwin only).
462 */
463 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
464 rc = HMR0InitVM(pGVM);
465 if (RT_SUCCESS(rc))
466 VMM_CHECK_SMAP_CHECK2(pGVM, rc = VERR_VMM_RING0_ASSERTION); /* CPUR0InitVM will otherwise panic the host */
467 if (RT_SUCCESS(rc))
468 {
469 rc = CPUMR0InitVM(pGVM);
470 if (RT_SUCCESS(rc))
471 {
472 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
473#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
474 rc = PGMR0DynMapInitVM(pGVM);
475#endif
476 if (RT_SUCCESS(rc))
477 {
478 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
479 rc = EMR0InitVM(pGVM);
480 if (RT_SUCCESS(rc))
481 {
482 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
483#ifdef VBOX_WITH_PCI_PASSTHROUGH
484 rc = PciRawR0InitVM(pGVM);
485#endif
486 if (RT_SUCCESS(rc))
487 {
488 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
489 rc = GIMR0InitVM(pGVM);
490 if (RT_SUCCESS(rc))
491 {
492 VMM_CHECK_SMAP_CHECK2(pGVM, rc = VERR_VMM_RING0_ASSERTION);
493 if (RT_SUCCESS(rc))
494 {
495 GVMMR0DoneInitVM(pGVM);
496
497 /*
498 * Collect a bit of info for the VM release log.
499 */
500 pGVM->vmm.s.fIsPreemptPendingApiTrusty = RTThreadPreemptIsPendingTrusty();
501 pGVM->vmm.s.fIsPreemptPossible = RTThreadPreemptIsPossible();;
502
503 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
504 return rc;
505 }
506
507 /* bail out*/
508 GIMR0TermVM(pGVM);
509 }
510#ifdef VBOX_WITH_PCI_PASSTHROUGH
511 PciRawR0TermVM(pGVM);
512#endif
513 }
514 }
515 }
516 }
517 HMR0TermVM(pGVM);
518 }
519 }
520
521 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pGVM->pSession);
522 return rc;
523}
524
525
526/**
527 * Does EMT specific VM initialization.
528 *
529 * @returns VBox status code.
530 * @param pGVM The ring-0 VM structure.
531 * @param idCpu The EMT that's calling.
532 */
533static int vmmR0InitVMEmt(PGVM pGVM, VMCPUID idCpu)
534{
535 /* Paranoia (caller checked these already). */
536 AssertReturn(idCpu < pGVM->cCpus, VERR_INVALID_CPU_ID);
537 AssertReturn(pGVM->aCpus[idCpu].hEMT == RTThreadNativeSelf(), VERR_INVALID_CPU_ID);
538
539#ifdef LOG_ENABLED
540 /*
541 * Registration of ring 0 loggers.
542 */
543 PVMCPUCC pVCpu = &pGVM->aCpus[idCpu];
544 PVMMR0LOGGER pR0Logger = pVCpu->vmm.s.pR0LoggerR0;
545 if ( pR0Logger
546 && !pR0Logger->fRegistered)
547 {
548 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pGVM->pSession);
549 pR0Logger->fRegistered = true;
550 }
551#endif
552
553 return VINF_SUCCESS;
554}
555
556
557
558/**
559 * Terminates the R0 bits for a particular VM instance.
560 *
561 * This is normally called by ring-3 as part of the VM termination process, but
562 * may alternatively be called during the support driver session cleanup when
563 * the VM object is destroyed (see GVMM).
564 *
565 * @returns VBox status code.
566 *
567 * @param pGVM The global (ring-0) VM structure.
568 * @param idCpu Set to 0 if EMT(0) or NIL_VMCPUID if session cleanup
569 * thread.
570 * @thread EMT(0) or session clean up thread.
571 */
572VMMR0_INT_DECL(int) VMMR0TermVM(PGVM pGVM, VMCPUID idCpu)
573{
574 /*
575 * Check EMT(0) claim if we're called from userland.
576 */
577 if (idCpu != NIL_VMCPUID)
578 {
579 AssertReturn(idCpu == 0, VERR_INVALID_CPU_ID);
580 int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu);
581 if (RT_FAILURE(rc))
582 return rc;
583 }
584
585#ifdef VBOX_WITH_PCI_PASSTHROUGH
586 PciRawR0TermVM(pGVM);
587#endif
588
589 /*
590 * Tell GVMM what we're up to and check that we only do this once.
591 */
592 if (GVMMR0DoingTermVM(pGVM))
593 {
594 GIMR0TermVM(pGVM);
595
596 /** @todo I wish to call PGMR0PhysFlushHandyPages(pGVM, &pGVM->aCpus[idCpu])
597 * here to make sure we don't leak any shared pages if we crash... */
598#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
599 PGMR0DynMapTermVM(pGVM);
600#endif
601 HMR0TermVM(pGVM);
602 }
603
604 /*
605 * Deregister the logger.
606 */
607 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pGVM->pSession);
608 return VINF_SUCCESS;
609}
610
611
612/**
613 * An interrupt or unhalt force flag is set, deal with it.
614 *
615 * @returns VINF_SUCCESS (or VINF_EM_HALT).
616 * @param pVCpu The cross context virtual CPU structure.
617 * @param uMWait Result from EMMonitorWaitIsActive().
618 * @param enmInterruptibility Guest CPU interruptbility level.
619 */
620static int vmmR0DoHaltInterrupt(PVMCPUCC pVCpu, unsigned uMWait, CPUMINTERRUPTIBILITY enmInterruptibility)
621{
622 Assert(!TRPMHasTrap(pVCpu));
623 Assert( enmInterruptibility > CPUMINTERRUPTIBILITY_INVALID
624 && enmInterruptibility < CPUMINTERRUPTIBILITY_END);
625
626 /*
627 * Pending interrupts w/o any SMIs or NMIs? That the usual case.
628 */
629 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
630 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_INTERRUPT_NMI))
631 {
632 if (enmInterruptibility <= CPUMINTERRUPTIBILITY_UNRESTRAINED)
633 {
634 uint8_t u8Interrupt = 0;
635 int rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
636 Log(("vmmR0DoHaltInterrupt: CPU%d u8Interrupt=%d (%#x) rc=%Rrc\n", pVCpu->idCpu, u8Interrupt, u8Interrupt, rc));
637 if (RT_SUCCESS(rc))
638 {
639 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
640
641 rc = TRPMAssertTrap(pVCpu, u8Interrupt, TRPM_HARDWARE_INT);
642 AssertRCSuccess(rc);
643 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltExec);
644 return rc;
645 }
646 }
647 }
648 /*
649 * SMI is not implemented yet, at least not here.
650 */
651 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_SMI))
652 {
653 return VINF_EM_HALT;
654 }
655 /*
656 * NMI.
657 */
658 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
659 {
660 if (enmInterruptibility < CPUMINTERRUPTIBILITY_NMI_INHIBIT)
661 {
662 /** @todo later. */
663 return VINF_EM_HALT;
664 }
665 }
666 /*
667 * Nested-guest virtual interrupt.
668 */
669 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST))
670 {
671 if (enmInterruptibility < CPUMINTERRUPTIBILITY_VIRT_INT_DISABLED)
672 {
673 /** @todo NSTVMX: NSTSVM: Remember, we might have to check and perform VM-exits
674 * here before injecting the virtual interrupt. See emR3ForcedActions
675 * for details. */
676 return VINF_EM_HALT;
677 }
678 }
679
680 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UNHALT))
681 {
682 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltExec);
683 return VINF_SUCCESS;
684 }
685 if (uMWait > 1)
686 {
687 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltExec);
688 return VINF_SUCCESS;
689 }
690
691 return VINF_EM_HALT;
692}
693
694
695/**
696 * This does one round of vmR3HaltGlobal1Halt().
697 *
698 * The rational here is that we'll reduce latency in interrupt situations if we
699 * don't go to ring-3 immediately on a VINF_EM_HALT (guest executed HLT or
700 * MWAIT), but do one round of blocking here instead and hope the interrupt is
701 * raised in the meanwhile.
702 *
703 * If we go to ring-3 we'll quit the inner HM/NEM loop in EM and end up in the
704 * outer loop, which will then call VMR3WaitHalted() and that in turn will do a
705 * ring-0 call (unless we're too close to a timer event). When the interrupt
706 * wakes us up, we'll return from ring-0 and EM will by instinct do a
707 * rescheduling (because of raw-mode) before it resumes the HM/NEM loop and gets
708 * back to VMMR0EntryFast().
709 *
710 * @returns VINF_SUCCESS or VINF_EM_HALT.
711 * @param pGVM The ring-0 VM structure.
712 * @param pGVCpu The ring-0 virtual CPU structure.
713 *
714 * @todo r=bird: All the blocking/waiting and EMT managment should move out of
715 * the VM module, probably to VMM. Then this would be more weird wrt
716 * parameters and statistics.
717 */
718static int vmmR0DoHalt(PGVM pGVM, PGVMCPU pGVCpu)
719{
720 /*
721 * Do spin stat historization.
722 */
723 if (++pGVCpu->vmm.s.cR0Halts & 0xff)
724 { /* likely */ }
725 else if (pGVCpu->vmm.s.cR0HaltsSucceeded > pGVCpu->vmm.s.cR0HaltsToRing3)
726 {
727 pGVCpu->vmm.s.cR0HaltsSucceeded = 2;
728 pGVCpu->vmm.s.cR0HaltsToRing3 = 0;
729 }
730 else
731 {
732 pGVCpu->vmm.s.cR0HaltsSucceeded = 0;
733 pGVCpu->vmm.s.cR0HaltsToRing3 = 2;
734 }
735
736 /*
737 * Flags that makes us go to ring-3.
738 */
739 uint32_t const fVmFFs = VM_FF_TM_VIRTUAL_SYNC | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA
740 | VM_FF_DBGF | VM_FF_REQUEST | VM_FF_CHECK_VM_STATE
741 | VM_FF_RESET | VM_FF_EMT_RENDEZVOUS | VM_FF_PGM_NEED_HANDY_PAGES
742 | VM_FF_PGM_NO_MEMORY | VM_FF_REM_HANDLER_NOTIFY | VM_FF_DEBUG_SUSPEND;
743 uint64_t const fCpuFFs = VMCPU_FF_TIMER | VMCPU_FF_PDM_CRITSECT | VMCPU_FF_IEM
744 | VMCPU_FF_REQUEST | VMCPU_FF_DBGF | VMCPU_FF_HM_UPDATE_CR3
745 | VMCPU_FF_HM_UPDATE_PAE_PDPES | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
746 | VMCPU_FF_TO_R3 | VMCPU_FF_IOM;
747
748 /*
749 * Check preconditions.
750 */
751 unsigned const uMWait = EMMonitorWaitIsActive(pGVCpu);
752 CPUMINTERRUPTIBILITY const enmInterruptibility = CPUMGetGuestInterruptibility(pGVCpu);
753 if ( pGVCpu->vmm.s.fMayHaltInRing0
754 && !TRPMHasTrap(pGVCpu)
755 && ( enmInterruptibility == CPUMINTERRUPTIBILITY_UNRESTRAINED
756 || uMWait > 1))
757 {
758 if ( !VM_FF_IS_ANY_SET(pGVM, fVmFFs)
759 && !VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs))
760 {
761 /*
762 * Interrupts pending already?
763 */
764 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC))
765 APICUpdatePendingInterrupts(pGVCpu);
766
767 /*
768 * Flags that wake up from the halted state.
769 */
770 uint64_t const fIntMask = VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_INTERRUPT_NESTED_GUEST
771 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT;
772
773 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask))
774 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility);
775 ASMNopPause();
776
777 /*
778 * Check out how long till the next timer event.
779 */
780 uint64_t u64Delta;
781 uint64_t u64GipTime = TMTimerPollGIP(pGVM, pGVCpu, &u64Delta);
782
783 if ( !VM_FF_IS_ANY_SET(pGVM, fVmFFs)
784 && !VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs))
785 {
786 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC))
787 APICUpdatePendingInterrupts(pGVCpu);
788
789 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask))
790 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility);
791
792 /*
793 * Wait if there is enough time to the next timer event.
794 */
795 if (u64Delta >= pGVCpu->vmm.s.cNsSpinBlockThreshold)
796 {
797 /* If there are few other CPU cores around, we will procrastinate a
798 little before going to sleep, hoping for some device raising an
799 interrupt or similar. Though, the best thing here would be to
800 dynamically adjust the spin count according to its usfulness or
801 something... */
802 if ( pGVCpu->vmm.s.cR0HaltsSucceeded > pGVCpu->vmm.s.cR0HaltsToRing3
803 && RTMpGetOnlineCount() >= 4)
804 {
805 /** @todo Figure out how we can skip this if it hasn't help recently...
806 * @bugref{9172#c12} */
807 uint32_t cSpinLoops = 42;
808 while (cSpinLoops-- > 0)
809 {
810 ASMNopPause();
811 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC))
812 APICUpdatePendingInterrupts(pGVCpu);
813 ASMNopPause();
814 if (VM_FF_IS_ANY_SET(pGVM, fVmFFs))
815 {
816 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3FromSpin);
817 return VINF_EM_HALT;
818 }
819 ASMNopPause();
820 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs))
821 {
822 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3FromSpin);
823 return VINF_EM_HALT;
824 }
825 ASMNopPause();
826 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask))
827 {
828 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltExecFromSpin);
829 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility);
830 }
831 ASMNopPause();
832 }
833 }
834
835 /* Block. We have to set the state to VMCPUSTATE_STARTED_HALTED here so ring-3
836 knows when to notify us (cannot access VMINTUSERPERVMCPU::fWait from here). */
837 VMCPU_CMPXCHG_STATE(pGVCpu, VMCPUSTATE_STARTED_HALTED, VMCPUSTATE_STARTED);
838 uint64_t const u64StartSchedHalt = RTTimeNanoTS();
839 int rc = GVMMR0SchedHalt(pGVM, pGVCpu, u64GipTime);
840 uint64_t const u64EndSchedHalt = RTTimeNanoTS();
841 uint64_t const cNsElapsedSchedHalt = u64EndSchedHalt - u64StartSchedHalt;
842 VMCPU_CMPXCHG_STATE(pGVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_HALTED);
843 STAM_REL_PROFILE_ADD_PERIOD(&pGVCpu->vmm.s.StatR0HaltBlock, cNsElapsedSchedHalt);
844 if ( rc == VINF_SUCCESS
845 || rc == VERR_INTERRUPTED)
846
847 {
848 /* Keep some stats like ring-3 does. */
849 int64_t const cNsOverslept = u64EndSchedHalt - u64GipTime;
850 if (cNsOverslept > 50000)
851 STAM_REL_PROFILE_ADD_PERIOD(&pGVCpu->vmm.s.StatR0HaltBlockOverslept, cNsOverslept);
852 else if (cNsOverslept < -50000)
853 STAM_REL_PROFILE_ADD_PERIOD(&pGVCpu->vmm.s.StatR0HaltBlockInsomnia, cNsElapsedSchedHalt);
854 else
855 STAM_REL_PROFILE_ADD_PERIOD(&pGVCpu->vmm.s.StatR0HaltBlockOnTime, cNsElapsedSchedHalt);
856
857 /*
858 * Recheck whether we can resume execution or have to go to ring-3.
859 */
860 if ( !VM_FF_IS_ANY_SET(pGVM, fVmFFs)
861 && !VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs))
862 {
863 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC))
864 APICUpdatePendingInterrupts(pGVCpu);
865 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask))
866 {
867 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltExecFromBlock);
868 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility);
869 }
870 }
871 }
872 }
873 }
874 }
875 }
876 return VINF_EM_HALT;
877}
878
879
880/**
881 * VMM ring-0 thread-context callback.
882 *
883 * This does common HM state updating and calls the HM-specific thread-context
884 * callback.
885 *
886 * @param enmEvent The thread-context event.
887 * @param pvUser Opaque pointer to the VMCPU.
888 *
889 * @thread EMT(pvUser)
890 */
891static DECLCALLBACK(void) vmmR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, void *pvUser)
892{
893 PVMCPUCC pVCpu = (PVMCPUCC)pvUser;
894
895 switch (enmEvent)
896 {
897 case RTTHREADCTXEVENT_IN:
898 {
899 /*
900 * Linux may call us with preemption enabled (really!) but technically we
901 * cannot get preempted here, otherwise we end up in an infinite recursion
902 * scenario (i.e. preempted in resume hook -> preempt hook -> resume hook...
903 * ad infinitum). Let's just disable preemption for now...
904 */
905 /** @todo r=bird: I don't believe the above. The linux code is clearly enabling
906 * preemption after doing the callout (one or two functions up the
907 * call chain). */
908 /** @todo r=ramshankar: See @bugref{5313#c30}. */
909 RTTHREADPREEMPTSTATE ParanoidPreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
910 RTThreadPreemptDisable(&ParanoidPreemptState);
911
912 /* We need to update the VCPU <-> host CPU mapping. */
913 RTCPUID idHostCpu;
914 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
915 pVCpu->iHostCpuSet = iHostCpuSet;
916 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
917
918 /* In the very unlikely event that the GIP delta for the CPU we're
919 rescheduled needs calculating, try force a return to ring-3.
920 We unfortunately cannot do the measurements right here. */
921 if (RT_UNLIKELY(SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
922 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
923
924 /* Invoke the HM-specific thread-context callback. */
925 HMR0ThreadCtxCallback(enmEvent, pvUser);
926
927 /* Restore preemption. */
928 RTThreadPreemptRestore(&ParanoidPreemptState);
929 break;
930 }
931
932 case RTTHREADCTXEVENT_OUT:
933 {
934 /* Invoke the HM-specific thread-context callback. */
935 HMR0ThreadCtxCallback(enmEvent, pvUser);
936
937 /*
938 * Sigh. See VMMGetCpu() used by VMCPU_ASSERT_EMT(). We cannot let several VCPUs
939 * have the same host CPU associated with it.
940 */
941 pVCpu->iHostCpuSet = UINT32_MAX;
942 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
943 break;
944 }
945
946 default:
947 /* Invoke the HM-specific thread-context callback. */
948 HMR0ThreadCtxCallback(enmEvent, pvUser);
949 break;
950 }
951}
952
953
954/**
955 * Creates thread switching hook for the current EMT thread.
956 *
957 * This is called by GVMMR0CreateVM and GVMMR0RegisterVCpu. If the host
958 * platform does not implement switcher hooks, no hooks will be create and the
959 * member set to NIL_RTTHREADCTXHOOK.
960 *
961 * @returns VBox status code.
962 * @param pVCpu The cross context virtual CPU structure.
963 * @thread EMT(pVCpu)
964 */
965VMMR0_INT_DECL(int) VMMR0ThreadCtxHookCreateForEmt(PVMCPUCC pVCpu)
966{
967 VMCPU_ASSERT_EMT(pVCpu);
968 Assert(pVCpu->vmm.s.hCtxHook == NIL_RTTHREADCTXHOOK);
969
970#if 1 /* To disable this stuff change to zero. */
971 int rc = RTThreadCtxHookCreate(&pVCpu->vmm.s.hCtxHook, 0, vmmR0ThreadCtxCallback, pVCpu);
972 if (RT_SUCCESS(rc))
973 return rc;
974#else
975 RT_NOREF(vmmR0ThreadCtxCallback);
976 int rc = VERR_NOT_SUPPORTED;
977#endif
978
979 pVCpu->vmm.s.hCtxHook = NIL_RTTHREADCTXHOOK;
980 if (rc == VERR_NOT_SUPPORTED)
981 return VINF_SUCCESS;
982
983 LogRelMax(32, ("RTThreadCtxHookCreate failed! rc=%Rrc pVCpu=%p idCpu=%RU32\n", rc, pVCpu, pVCpu->idCpu));
984 return VINF_SUCCESS; /* Just ignore it, we can live without context hooks. */
985}
986
987
988/**
989 * Destroys the thread switching hook for the specified VCPU.
990 *
991 * @param pVCpu The cross context virtual CPU structure.
992 * @remarks Can be called from any thread.
993 */
994VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDestroyForEmt(PVMCPUCC pVCpu)
995{
996 int rc = RTThreadCtxHookDestroy(pVCpu->vmm.s.hCtxHook);
997 AssertRC(rc);
998 pVCpu->vmm.s.hCtxHook = NIL_RTTHREADCTXHOOK;
999}
1000
1001
1002/**
1003 * Disables the thread switching hook for this VCPU (if we got one).
1004 *
1005 * @param pVCpu The cross context virtual CPU structure.
1006 * @thread EMT(pVCpu)
1007 *
1008 * @remarks This also clears VMCPU::idHostCpu, so the mapping is invalid after
1009 * this call. This means you have to be careful with what you do!
1010 */
1011VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDisable(PVMCPUCC pVCpu)
1012{
1013 /*
1014 * Clear the VCPU <-> host CPU mapping as we've left HM context.
1015 * @bugref{7726#c19} explains the need for this trick:
1016 *
1017 * hmR0VmxCallRing3Callback/hmR0SvmCallRing3Callback &
1018 * hmR0VmxLeaveSession/hmR0SvmLeaveSession disables context hooks during
1019 * longjmp & normal return to ring-3, which opens a window where we may be
1020 * rescheduled without changing VMCPUID::idHostCpu and cause confusion if
1021 * the CPU starts executing a different EMT. Both functions first disables
1022 * preemption and then calls HMR0LeaveCpu which invalids idHostCpu, leaving
1023 * an opening for getting preempted.
1024 */
1025 /** @todo Make HM not need this API! Then we could leave the hooks enabled
1026 * all the time. */
1027 /** @todo move this into the context hook disabling if(). */
1028 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1029
1030 /*
1031 * Disable the context hook, if we got one.
1032 */
1033 if (pVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1034 {
1035 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1036 int rc = RTThreadCtxHookDisable(pVCpu->vmm.s.hCtxHook);
1037 AssertRC(rc);
1038 }
1039}
1040
1041
1042/**
1043 * Internal version of VMMR0ThreadCtxHooksAreRegistered.
1044 *
1045 * @returns true if registered, false otherwise.
1046 * @param pVCpu The cross context virtual CPU structure.
1047 */
1048DECLINLINE(bool) vmmR0ThreadCtxHookIsEnabled(PVMCPUCC pVCpu)
1049{
1050 return RTThreadCtxHookIsEnabled(pVCpu->vmm.s.hCtxHook);
1051}
1052
1053
1054/**
1055 * Whether thread-context hooks are registered for this VCPU.
1056 *
1057 * @returns true if registered, false otherwise.
1058 * @param pVCpu The cross context virtual CPU structure.
1059 */
1060VMMR0_INT_DECL(bool) VMMR0ThreadCtxHookIsEnabled(PVMCPUCC pVCpu)
1061{
1062 return vmmR0ThreadCtxHookIsEnabled(pVCpu);
1063}
1064
1065
1066#ifdef VBOX_WITH_STATISTICS
1067/**
1068 * Record return code statistics
1069 * @param pVM The cross context VM structure.
1070 * @param pVCpu The cross context virtual CPU structure.
1071 * @param rc The status code.
1072 */
1073static void vmmR0RecordRC(PVMCC pVM, PVMCPUCC pVCpu, int rc)
1074{
1075 /*
1076 * Collect statistics.
1077 */
1078 switch (rc)
1079 {
1080 case VINF_SUCCESS:
1081 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetNormal);
1082 break;
1083 case VINF_EM_RAW_INTERRUPT:
1084 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterrupt);
1085 break;
1086 case VINF_EM_RAW_INTERRUPT_HYPER:
1087 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptHyper);
1088 break;
1089 case VINF_EM_RAW_GUEST_TRAP:
1090 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGuestTrap);
1091 break;
1092 case VINF_EM_RAW_RING_SWITCH:
1093 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitch);
1094 break;
1095 case VINF_EM_RAW_RING_SWITCH_INT:
1096 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitchInt);
1097 break;
1098 case VINF_EM_RAW_STALE_SELECTOR:
1099 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetStaleSelector);
1100 break;
1101 case VINF_EM_RAW_IRET_TRAP:
1102 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIRETTrap);
1103 break;
1104 case VINF_IOM_R3_IOPORT_READ:
1105 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIORead);
1106 break;
1107 case VINF_IOM_R3_IOPORT_WRITE:
1108 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOWrite);
1109 break;
1110 case VINF_IOM_R3_IOPORT_COMMIT_WRITE:
1111 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOCommitWrite);
1112 break;
1113 case VINF_IOM_R3_MMIO_READ:
1114 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIORead);
1115 break;
1116 case VINF_IOM_R3_MMIO_WRITE:
1117 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOWrite);
1118 break;
1119 case VINF_IOM_R3_MMIO_COMMIT_WRITE:
1120 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOCommitWrite);
1121 break;
1122 case VINF_IOM_R3_MMIO_READ_WRITE:
1123 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOReadWrite);
1124 break;
1125 case VINF_PATM_HC_MMIO_PATCH_READ:
1126 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchRead);
1127 break;
1128 case VINF_PATM_HC_MMIO_PATCH_WRITE:
1129 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchWrite);
1130 break;
1131 case VINF_CPUM_R3_MSR_READ:
1132 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMSRRead);
1133 break;
1134 case VINF_CPUM_R3_MSR_WRITE:
1135 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMSRWrite);
1136 break;
1137 case VINF_EM_RAW_EMULATE_INSTR:
1138 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetEmulate);
1139 break;
1140 case VINF_PATCH_EMULATE_INSTR:
1141 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchEmulate);
1142 break;
1143 case VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT:
1144 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetLDTFault);
1145 break;
1146 case VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT:
1147 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGDTFault);
1148 break;
1149 case VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT:
1150 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIDTFault);
1151 break;
1152 case VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT:
1153 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTSSFault);
1154 break;
1155 case VINF_CSAM_PENDING_ACTION:
1156 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCSAMTask);
1157 break;
1158 case VINF_PGM_SYNC_CR3:
1159 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetSyncCR3);
1160 break;
1161 case VINF_PATM_PATCH_INT3:
1162 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchInt3);
1163 break;
1164 case VINF_PATM_PATCH_TRAP_PF:
1165 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchPF);
1166 break;
1167 case VINF_PATM_PATCH_TRAP_GP:
1168 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchGP);
1169 break;
1170 case VINF_PATM_PENDING_IRQ_AFTER_IRET:
1171 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchIretIRQ);
1172 break;
1173 case VINF_EM_RESCHEDULE_REM:
1174 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRescheduleREM);
1175 break;
1176 case VINF_EM_RAW_TO_R3:
1177 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Total);
1178 if (VM_FF_IS_SET(pVM, VM_FF_TM_VIRTUAL_SYNC))
1179 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3TMVirt);
1180 else if (VM_FF_IS_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES))
1181 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3HandyPages);
1182 else if (VM_FF_IS_SET(pVM, VM_FF_PDM_QUEUES))
1183 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3PDMQueues);
1184 else if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
1185 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Rendezvous);
1186 else if (VM_FF_IS_SET(pVM, VM_FF_PDM_DMA))
1187 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3DMA);
1188 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TIMER))
1189 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Timer);
1190 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PDM_CRITSECT))
1191 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3CritSect);
1192 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TO_R3))
1193 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3FF);
1194 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
1195 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Iem);
1196 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IOM))
1197 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Iom);
1198 else
1199 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Unknown);
1200 break;
1201
1202 case VINF_EM_RAW_TIMER_PENDING:
1203 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTimerPending);
1204 break;
1205 case VINF_EM_RAW_INTERRUPT_PENDING:
1206 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptPending);
1207 break;
1208 case VINF_VMM_CALL_HOST:
1209 switch (pVCpu->vmm.s.enmCallRing3Operation)
1210 {
1211 case VMMCALLRING3_PDM_CRIT_SECT_ENTER:
1212 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPDMCritSectEnter);
1213 break;
1214 case VMMCALLRING3_PDM_LOCK:
1215 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPDMLock);
1216 break;
1217 case VMMCALLRING3_PGM_POOL_GROW:
1218 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMPoolGrow);
1219 break;
1220 case VMMCALLRING3_PGM_LOCK:
1221 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMLock);
1222 break;
1223 case VMMCALLRING3_PGM_MAP_CHUNK:
1224 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMMapChunk);
1225 break;
1226 case VMMCALLRING3_PGM_ALLOCATE_HANDY_PAGES:
1227 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMAllocHandy);
1228 break;
1229 case VMMCALLRING3_REM_REPLAY_HANDLER_NOTIFICATIONS:
1230 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallRemReplay);
1231 break;
1232 case VMMCALLRING3_VMM_LOGGER_FLUSH:
1233 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallLogFlush);
1234 break;
1235 case VMMCALLRING3_VM_SET_ERROR:
1236 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallVMSetError);
1237 break;
1238 case VMMCALLRING3_VM_SET_RUNTIME_ERROR:
1239 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallVMSetRuntimeError);
1240 break;
1241 case VMMCALLRING3_VM_R0_ASSERTION:
1242 default:
1243 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCallRing3);
1244 break;
1245 }
1246 break;
1247 case VINF_PATM_DUPLICATE_FUNCTION:
1248 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPATMDuplicateFn);
1249 break;
1250 case VINF_PGM_CHANGE_MODE:
1251 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMChangeMode);
1252 break;
1253 case VINF_PGM_POOL_FLUSH_PENDING:
1254 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMFlushPending);
1255 break;
1256 case VINF_EM_PENDING_REQUEST:
1257 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPendingRequest);
1258 break;
1259 case VINF_EM_HM_PATCH_TPR_INSTR:
1260 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchTPR);
1261 break;
1262 default:
1263 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMisc);
1264 break;
1265 }
1266}
1267#endif /* VBOX_WITH_STATISTICS */
1268
1269
1270/**
1271 * The Ring 0 entry point, called by the fast-ioctl path.
1272 *
1273 * @param pGVM The global (ring-0) VM structure.
1274 * @param pVMIgnored The cross context VM structure. The return code is
1275 * stored in pVM->vmm.s.iLastGZRc.
1276 * @param idCpu The Virtual CPU ID of the calling EMT.
1277 * @param enmOperation Which operation to execute.
1278 * @remarks Assume called with interrupts _enabled_.
1279 */
1280VMMR0DECL(void) VMMR0EntryFast(PGVM pGVM, PVMCC pVMIgnored, VMCPUID idCpu, VMMR0OPERATION enmOperation)
1281{
1282 RT_NOREF(pVMIgnored);
1283
1284 /*
1285 * Validation.
1286 */
1287 if ( idCpu < pGVM->cCpus
1288 && pGVM->cCpus == pGVM->cCpusUnsafe)
1289 { /*likely*/ }
1290 else
1291 {
1292 SUPR0Printf("VMMR0EntryFast: Bad idCpu=%#x cCpus=%#x cCpusUnsafe=%#x\n", idCpu, pGVM->cCpus, pGVM->cCpusUnsafe);
1293 return;
1294 }
1295
1296 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
1297 RTNATIVETHREAD const hNativeThread = RTThreadNativeSelf();
1298 if (RT_LIKELY( pGVCpu->hEMT == hNativeThread
1299 && pGVCpu->hNativeThreadR0 == hNativeThread))
1300 { /* likely */ }
1301 else
1302 {
1303 SUPR0Printf("VMMR0EntryFast: Bad thread idCpu=%#x hNativeSelf=%p pGVCpu->hEmt=%p pGVCpu->hNativeThreadR0=%p\n",
1304 idCpu, hNativeThread, pGVCpu->hEMT, pGVCpu->hNativeThreadR0);
1305 return;
1306 }
1307
1308 /*
1309 * SMAP fun.
1310 */
1311 VMM_CHECK_SMAP_SETUP();
1312 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1313
1314 /*
1315 * Perform requested operation.
1316 */
1317 switch (enmOperation)
1318 {
1319 /*
1320 * Run guest code using the available hardware acceleration technology.
1321 */
1322 case VMMR0_DO_HM_RUN:
1323 {
1324 for (;;) /* hlt loop */
1325 {
1326 /*
1327 * Disable preemption.
1328 */
1329 Assert(!vmmR0ThreadCtxHookIsEnabled(pGVCpu));
1330 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
1331 RTThreadPreemptDisable(&PreemptState);
1332
1333 /*
1334 * Get the host CPU identifiers, make sure they are valid and that
1335 * we've got a TSC delta for the CPU.
1336 */
1337 RTCPUID idHostCpu;
1338 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
1339 if (RT_LIKELY( iHostCpuSet < RTCPUSET_MAX_CPUS
1340 && SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
1341 {
1342 pGVCpu->iHostCpuSet = iHostCpuSet;
1343 ASMAtomicWriteU32(&pGVCpu->idHostCpu, idHostCpu);
1344
1345 /*
1346 * Update the periodic preemption timer if it's active.
1347 */
1348 if (pGVM->vmm.s.fUsePeriodicPreemptionTimers)
1349 GVMMR0SchedUpdatePeriodicPreemptionTimer(pGVM, pGVCpu->idHostCpu, TMCalcHostTimerFrequency(pGVM, pGVCpu));
1350 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1351
1352#ifdef VMM_R0_TOUCH_FPU
1353 /*
1354 * Make sure we've got the FPU state loaded so and we don't need to clear
1355 * CR0.TS and get out of sync with the host kernel when loading the guest
1356 * FPU state. @ref sec_cpum_fpu (CPUM.cpp) and @bugref{4053}.
1357 */
1358 CPUMR0TouchHostFpu();
1359#endif
1360 int rc;
1361 bool fPreemptRestored = false;
1362 if (!HMR0SuspendPending())
1363 {
1364 /*
1365 * Enable the context switching hook.
1366 */
1367 if (pGVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1368 {
1369 Assert(!RTThreadCtxHookIsEnabled(pGVCpu->vmm.s.hCtxHook));
1370 int rc2 = RTThreadCtxHookEnable(pGVCpu->vmm.s.hCtxHook); AssertRC(rc2);
1371 }
1372
1373 /*
1374 * Enter HM context.
1375 */
1376 rc = HMR0Enter(pGVCpu);
1377 if (RT_SUCCESS(rc))
1378 {
1379 VMCPU_SET_STATE(pGVCpu, VMCPUSTATE_STARTED_HM);
1380
1381 /*
1382 * When preemption hooks are in place, enable preemption now that
1383 * we're in HM context.
1384 */
1385 if (vmmR0ThreadCtxHookIsEnabled(pGVCpu))
1386 {
1387 fPreemptRestored = true;
1388 RTThreadPreemptRestore(&PreemptState);
1389 }
1390
1391 /*
1392 * Setup the longjmp machinery and execute guest code (calls HMR0RunGuestCode).
1393 */
1394 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1395 rc = vmmR0CallRing3SetJmp(&pGVCpu->vmm.s.CallRing3JmpBufR0, HMR0RunGuestCode, pGVM, pGVCpu);
1396 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1397
1398 /*
1399 * Assert sanity on the way out. Using manual assertions code here as normal
1400 * assertions are going to panic the host since we're outside the setjmp/longjmp zone.
1401 */
1402 if (RT_UNLIKELY( VMCPU_GET_STATE(pGVCpu) != VMCPUSTATE_STARTED_HM
1403 && RT_SUCCESS_NP(rc) && rc != VINF_VMM_CALL_HOST ))
1404 {
1405 pGVM->vmm.s.szRing0AssertMsg1[0] = '\0';
1406 RTStrPrintf(pGVM->vmm.s.szRing0AssertMsg2, sizeof(pGVM->vmm.s.szRing0AssertMsg2),
1407 "Got VMCPU state %d expected %d.\n", VMCPU_GET_STATE(pGVCpu), VMCPUSTATE_STARTED_HM);
1408 rc = VERR_VMM_WRONG_HM_VMCPU_STATE;
1409 }
1410 /** @todo Get rid of this. HM shouldn't disable the context hook. */
1411 else if (RT_UNLIKELY(vmmR0ThreadCtxHookIsEnabled(pGVCpu)))
1412 {
1413 pGVM->vmm.s.szRing0AssertMsg1[0] = '\0';
1414 RTStrPrintf(pGVM->vmm.s.szRing0AssertMsg2, sizeof(pGVM->vmm.s.szRing0AssertMsg2),
1415 "Thread-context hooks still enabled! VCPU=%p Id=%u rc=%d.\n", pGVCpu, pGVCpu->idCpu, rc);
1416 rc = VERR_INVALID_STATE;
1417 }
1418
1419 VMCPU_SET_STATE(pGVCpu, VMCPUSTATE_STARTED);
1420 }
1421 STAM_COUNTER_INC(&pGVM->vmm.s.StatRunGC);
1422
1423 /*
1424 * Invalidate the host CPU identifiers before we disable the context
1425 * hook / restore preemption.
1426 */
1427 pGVCpu->iHostCpuSet = UINT32_MAX;
1428 ASMAtomicWriteU32(&pGVCpu->idHostCpu, NIL_RTCPUID);
1429
1430 /*
1431 * Disable context hooks. Due to unresolved cleanup issues, we
1432 * cannot leave the hooks enabled when we return to ring-3.
1433 *
1434 * Note! At the moment HM may also have disabled the hook
1435 * when we get here, but the IPRT API handles that.
1436 */
1437 if (pGVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1438 {
1439 ASMAtomicWriteU32(&pGVCpu->idHostCpu, NIL_RTCPUID);
1440 RTThreadCtxHookDisable(pGVCpu->vmm.s.hCtxHook);
1441 }
1442 }
1443 /*
1444 * The system is about to go into suspend mode; go back to ring 3.
1445 */
1446 else
1447 {
1448 rc = VINF_EM_RAW_INTERRUPT;
1449 pGVCpu->iHostCpuSet = UINT32_MAX;
1450 ASMAtomicWriteU32(&pGVCpu->idHostCpu, NIL_RTCPUID);
1451 }
1452
1453 /** @todo When HM stops messing with the context hook state, we'll disable
1454 * preemption again before the RTThreadCtxHookDisable call. */
1455 if (!fPreemptRestored)
1456 RTThreadPreemptRestore(&PreemptState);
1457
1458 pGVCpu->vmm.s.iLastGZRc = rc;
1459
1460 /* Fire dtrace probe and collect statistics. */
1461 VBOXVMM_R0_VMM_RETURN_TO_RING3_HM(pGVCpu, CPUMQueryGuestCtxPtr(pGVCpu), rc);
1462#ifdef VBOX_WITH_STATISTICS
1463 vmmR0RecordRC(pGVM, pGVCpu, rc);
1464#endif
1465#if 1
1466 /*
1467 * If this is a halt.
1468 */
1469 if (rc != VINF_EM_HALT)
1470 { /* we're not in a hurry for a HLT, so prefer this path */ }
1471 else
1472 {
1473 pGVCpu->vmm.s.iLastGZRc = rc = vmmR0DoHalt(pGVM, pGVCpu);
1474 if (rc == VINF_SUCCESS)
1475 {
1476 pGVCpu->vmm.s.cR0HaltsSucceeded++;
1477 continue;
1478 }
1479 pGVCpu->vmm.s.cR0HaltsToRing3++;
1480 }
1481#endif
1482 }
1483 /*
1484 * Invalid CPU set index or TSC delta in need of measuring.
1485 */
1486 else
1487 {
1488 pGVCpu->iHostCpuSet = UINT32_MAX;
1489 ASMAtomicWriteU32(&pGVCpu->idHostCpu, NIL_RTCPUID);
1490 RTThreadPreemptRestore(&PreemptState);
1491 if (iHostCpuSet < RTCPUSET_MAX_CPUS)
1492 {
1493 int rc = SUPR0TscDeltaMeasureBySetIndex(pGVM->pSession, iHostCpuSet, 0 /*fFlags*/,
1494 2 /*cMsWaitRetry*/, 5*RT_MS_1SEC /*cMsWaitThread*/,
1495 0 /*default cTries*/);
1496 if (RT_SUCCESS(rc) || rc == VERR_CPU_OFFLINE)
1497 pGVCpu->vmm.s.iLastGZRc = VINF_EM_RAW_TO_R3;
1498 else
1499 pGVCpu->vmm.s.iLastGZRc = rc;
1500 }
1501 else
1502 pGVCpu->vmm.s.iLastGZRc = VERR_INVALID_CPU_INDEX;
1503 }
1504 break;
1505
1506 } /* halt loop. */
1507 break;
1508 }
1509
1510#ifdef VBOX_WITH_NEM_R0
1511# if defined(RT_ARCH_AMD64) && defined(RT_OS_WINDOWS)
1512 case VMMR0_DO_NEM_RUN:
1513 {
1514 /*
1515 * Setup the longjmp machinery and execute guest code (calls NEMR0RunGuestCode).
1516 */
1517 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1518# ifdef VBOXSTRICTRC_STRICT_ENABLED
1519 int rc = vmmR0CallRing3SetJmp2(&pGVCpu->vmm.s.CallRing3JmpBufR0, (PFNVMMR0SETJMP2)NEMR0RunGuestCode, pGVM, idCpu);
1520# else
1521 int rc = vmmR0CallRing3SetJmp2(&pGVCpu->vmm.s.CallRing3JmpBufR0, NEMR0RunGuestCode, pGVM, idCpu);
1522# endif
1523 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1524 STAM_COUNTER_INC(&pGVM->vmm.s.StatRunGC);
1525
1526 pGVCpu->vmm.s.iLastGZRc = rc;
1527
1528 /*
1529 * Fire dtrace probe and collect statistics.
1530 */
1531 VBOXVMM_R0_VMM_RETURN_TO_RING3_NEM(pGVCpu, CPUMQueryGuestCtxPtr(pGVCpu), rc);
1532# ifdef VBOX_WITH_STATISTICS
1533 vmmR0RecordRC(pGVM, pGVCpu, rc);
1534# endif
1535 break;
1536 }
1537# endif
1538#endif
1539
1540 /*
1541 * For profiling.
1542 */
1543 case VMMR0_DO_NOP:
1544 pGVCpu->vmm.s.iLastGZRc = VINF_SUCCESS;
1545 break;
1546
1547 /*
1548 * Shouldn't happen.
1549 */
1550 default:
1551 AssertMsgFailed(("%#x\n", enmOperation));
1552 pGVCpu->vmm.s.iLastGZRc = VERR_NOT_SUPPORTED;
1553 break;
1554 }
1555 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1556}
1557
1558
1559/**
1560 * Validates a session or VM session argument.
1561 *
1562 * @returns true / false accordingly.
1563 * @param pGVM The global (ring-0) VM structure.
1564 * @param pClaimedSession The session claim to validate.
1565 * @param pSession The session argument.
1566 */
1567DECLINLINE(bool) vmmR0IsValidSession(PGVM pGVM, PSUPDRVSESSION pClaimedSession, PSUPDRVSESSION pSession)
1568{
1569 /* This must be set! */
1570 if (!pSession)
1571 return false;
1572
1573 /* Only one out of the two. */
1574 if (pGVM && pClaimedSession)
1575 return false;
1576 if (pGVM)
1577 pClaimedSession = pGVM->pSession;
1578 return pClaimedSession == pSession;
1579}
1580
1581
1582/**
1583 * VMMR0EntryEx worker function, either called directly or when ever possible
1584 * called thru a longjmp so we can exit safely on failure.
1585 *
1586 * @returns VBox status code.
1587 * @param pGVM The global (ring-0) VM structure.
1588 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
1589 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
1590 * @param enmOperation Which operation to execute.
1591 * @param pReqHdr This points to a SUPVMMR0REQHDR packet. Optional.
1592 * The support driver validates this if it's present.
1593 * @param u64Arg Some simple constant argument.
1594 * @param pSession The session of the caller.
1595 *
1596 * @remarks Assume called with interrupts _enabled_.
1597 */
1598static int vmmR0EntryExWorker(PGVM pGVM, VMCPUID idCpu, VMMR0OPERATION enmOperation,
1599 PSUPVMMR0REQHDR pReqHdr, uint64_t u64Arg, PSUPDRVSESSION pSession)
1600{
1601 /*
1602 * Validate pGVM and idCpu for consistency and validity.
1603 */
1604 if (pGVM != NULL)
1605 {
1606 if (RT_LIKELY(((uintptr_t)pGVM & PAGE_OFFSET_MASK) == 0))
1607 { /* likely */ }
1608 else
1609 {
1610 SUPR0Printf("vmmR0EntryExWorker: Invalid pGVM=%p! (op=%d)\n", pGVM, enmOperation);
1611 return VERR_INVALID_POINTER;
1612 }
1613
1614 if (RT_LIKELY(idCpu == NIL_VMCPUID || idCpu < pGVM->cCpus))
1615 { /* likely */ }
1616 else
1617 {
1618 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu %#x (cCpus=%#x)\n", idCpu, pGVM->cCpus);
1619 return VERR_INVALID_PARAMETER;
1620 }
1621
1622 if (RT_LIKELY( pGVM->enmVMState >= VMSTATE_CREATING
1623 && pGVM->enmVMState <= VMSTATE_TERMINATED
1624 && pGVM->pSession == pSession
1625 && pGVM->pSelf == pGVM))
1626 { /* likely */ }
1627 else
1628 {
1629 SUPR0Printf("vmmR0EntryExWorker: Invalid pGVM=%p:{.enmVMState=%d, .cCpus=%#x, .pSession=%p(==%p), .pSelf=%p(==%p)}! (op=%d)\n",
1630 pGVM, pGVM->enmVMState, pGVM->cCpus, pGVM->pSession, pSession, pGVM->pSelf, pGVM, enmOperation);
1631 return VERR_INVALID_POINTER;
1632 }
1633 }
1634 else if (RT_LIKELY(idCpu == NIL_VMCPUID))
1635 { /* likely */ }
1636 else
1637 {
1638 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu=%u\n", idCpu);
1639 return VERR_INVALID_PARAMETER;
1640 }
1641
1642 /*
1643 * SMAP fun.
1644 */
1645 VMM_CHECK_SMAP_SETUP();
1646 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
1647
1648 /*
1649 * Process the request.
1650 */
1651 int rc;
1652 switch (enmOperation)
1653 {
1654 /*
1655 * GVM requests
1656 */
1657 case VMMR0_DO_GVMM_CREATE_VM:
1658 if (pGVM == NULL && u64Arg == 0 && idCpu == NIL_VMCPUID)
1659 rc = GVMMR0CreateVMReq((PGVMMCREATEVMREQ)pReqHdr, pSession);
1660 else
1661 rc = VERR_INVALID_PARAMETER;
1662 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
1663 break;
1664
1665 case VMMR0_DO_GVMM_DESTROY_VM:
1666 if (pReqHdr == NULL && u64Arg == 0)
1667 rc = GVMMR0DestroyVM(pGVM);
1668 else
1669 rc = VERR_INVALID_PARAMETER;
1670 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
1671 break;
1672
1673 case VMMR0_DO_GVMM_REGISTER_VMCPU:
1674 if (pGVM != NULL)
1675 rc = GVMMR0RegisterVCpu(pGVM, idCpu);
1676 else
1677 rc = VERR_INVALID_PARAMETER;
1678 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1679 break;
1680
1681 case VMMR0_DO_GVMM_DEREGISTER_VMCPU:
1682 if (pGVM != NULL)
1683 rc = GVMMR0DeregisterVCpu(pGVM, idCpu);
1684 else
1685 rc = VERR_INVALID_PARAMETER;
1686 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1687 break;
1688
1689 case VMMR0_DO_GVMM_SCHED_HALT:
1690 if (pReqHdr)
1691 return VERR_INVALID_PARAMETER;
1692 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1693 rc = GVMMR0SchedHaltReq(pGVM, idCpu, u64Arg);
1694 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1695 break;
1696
1697 case VMMR0_DO_GVMM_SCHED_WAKE_UP:
1698 if (pReqHdr || u64Arg)
1699 return VERR_INVALID_PARAMETER;
1700 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1701 rc = GVMMR0SchedWakeUp(pGVM, idCpu);
1702 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1703 break;
1704
1705 case VMMR0_DO_GVMM_SCHED_POKE:
1706 if (pReqHdr || u64Arg)
1707 return VERR_INVALID_PARAMETER;
1708 rc = GVMMR0SchedPoke(pGVM, idCpu);
1709 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1710 break;
1711
1712 case VMMR0_DO_GVMM_SCHED_WAKE_UP_AND_POKE_CPUS:
1713 if (u64Arg)
1714 return VERR_INVALID_PARAMETER;
1715 rc = GVMMR0SchedWakeUpAndPokeCpusReq(pGVM, (PGVMMSCHEDWAKEUPANDPOKECPUSREQ)pReqHdr);
1716 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1717 break;
1718
1719 case VMMR0_DO_GVMM_SCHED_POLL:
1720 if (pReqHdr || u64Arg > 1)
1721 return VERR_INVALID_PARAMETER;
1722 rc = GVMMR0SchedPoll(pGVM, idCpu, !!u64Arg);
1723 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1724 break;
1725
1726 case VMMR0_DO_GVMM_QUERY_STATISTICS:
1727 if (u64Arg)
1728 return VERR_INVALID_PARAMETER;
1729 rc = GVMMR0QueryStatisticsReq(pGVM, (PGVMMQUERYSTATISTICSSREQ)pReqHdr, pSession);
1730 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1731 break;
1732
1733 case VMMR0_DO_GVMM_RESET_STATISTICS:
1734 if (u64Arg)
1735 return VERR_INVALID_PARAMETER;
1736 rc = GVMMR0ResetStatisticsReq(pGVM, (PGVMMRESETSTATISTICSSREQ)pReqHdr, pSession);
1737 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1738 break;
1739
1740 /*
1741 * Initialize the R0 part of a VM instance.
1742 */
1743 case VMMR0_DO_VMMR0_INIT:
1744 rc = vmmR0InitVM(pGVM, RT_LODWORD(u64Arg), RT_HIDWORD(u64Arg));
1745 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1746 break;
1747
1748 /*
1749 * Does EMT specific ring-0 init.
1750 */
1751 case VMMR0_DO_VMMR0_INIT_EMT:
1752 rc = vmmR0InitVMEmt(pGVM, idCpu);
1753 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1754 break;
1755
1756 /*
1757 * Terminate the R0 part of a VM instance.
1758 */
1759 case VMMR0_DO_VMMR0_TERM:
1760 rc = VMMR0TermVM(pGVM, 0 /*idCpu*/);
1761 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1762 break;
1763
1764 /*
1765 * Attempt to enable hm mode and check the current setting.
1766 */
1767 case VMMR0_DO_HM_ENABLE:
1768 rc = HMR0EnableAllCpus(pGVM);
1769 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1770 break;
1771
1772 /*
1773 * Setup the hardware accelerated session.
1774 */
1775 case VMMR0_DO_HM_SETUP_VM:
1776 rc = HMR0SetupVM(pGVM);
1777 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1778 break;
1779
1780 /*
1781 * PGM wrappers.
1782 */
1783 case VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES:
1784 if (idCpu == NIL_VMCPUID)
1785 return VERR_INVALID_CPU_ID;
1786 rc = PGMR0PhysAllocateHandyPages(pGVM, idCpu);
1787 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1788 break;
1789
1790 case VMMR0_DO_PGM_FLUSH_HANDY_PAGES:
1791 if (idCpu == NIL_VMCPUID)
1792 return VERR_INVALID_CPU_ID;
1793 rc = PGMR0PhysFlushHandyPages(pGVM, idCpu);
1794 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1795 break;
1796
1797 case VMMR0_DO_PGM_ALLOCATE_LARGE_HANDY_PAGE:
1798 if (idCpu == NIL_VMCPUID)
1799 return VERR_INVALID_CPU_ID;
1800 rc = PGMR0PhysAllocateLargeHandyPage(pGVM, idCpu);
1801 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1802 break;
1803
1804 case VMMR0_DO_PGM_PHYS_SETUP_IOMMU:
1805 if (idCpu != 0)
1806 return VERR_INVALID_CPU_ID;
1807 rc = PGMR0PhysSetupIoMmu(pGVM);
1808 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1809 break;
1810
1811 /*
1812 * GMM wrappers.
1813 */
1814 case VMMR0_DO_GMM_INITIAL_RESERVATION:
1815 if (u64Arg)
1816 return VERR_INVALID_PARAMETER;
1817 rc = GMMR0InitialReservationReq(pGVM, idCpu, (PGMMINITIALRESERVATIONREQ)pReqHdr);
1818 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1819 break;
1820
1821 case VMMR0_DO_GMM_UPDATE_RESERVATION:
1822 if (u64Arg)
1823 return VERR_INVALID_PARAMETER;
1824 rc = GMMR0UpdateReservationReq(pGVM, idCpu, (PGMMUPDATERESERVATIONREQ)pReqHdr);
1825 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1826 break;
1827
1828 case VMMR0_DO_GMM_ALLOCATE_PAGES:
1829 if (u64Arg)
1830 return VERR_INVALID_PARAMETER;
1831 rc = GMMR0AllocatePagesReq(pGVM, idCpu, (PGMMALLOCATEPAGESREQ)pReqHdr);
1832 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1833 break;
1834
1835 case VMMR0_DO_GMM_FREE_PAGES:
1836 if (u64Arg)
1837 return VERR_INVALID_PARAMETER;
1838 rc = GMMR0FreePagesReq(pGVM, idCpu, (PGMMFREEPAGESREQ)pReqHdr);
1839 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1840 break;
1841
1842 case VMMR0_DO_GMM_FREE_LARGE_PAGE:
1843 if (u64Arg)
1844 return VERR_INVALID_PARAMETER;
1845 rc = GMMR0FreeLargePageReq(pGVM, idCpu, (PGMMFREELARGEPAGEREQ)pReqHdr);
1846 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1847 break;
1848
1849 case VMMR0_DO_GMM_QUERY_HYPERVISOR_MEM_STATS:
1850 if (u64Arg)
1851 return VERR_INVALID_PARAMETER;
1852 rc = GMMR0QueryHypervisorMemoryStatsReq((PGMMMEMSTATSREQ)pReqHdr);
1853 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1854 break;
1855
1856 case VMMR0_DO_GMM_QUERY_MEM_STATS:
1857 if (idCpu == NIL_VMCPUID)
1858 return VERR_INVALID_CPU_ID;
1859 if (u64Arg)
1860 return VERR_INVALID_PARAMETER;
1861 rc = GMMR0QueryMemoryStatsReq(pGVM, idCpu, (PGMMMEMSTATSREQ)pReqHdr);
1862 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1863 break;
1864
1865 case VMMR0_DO_GMM_BALLOONED_PAGES:
1866 if (u64Arg)
1867 return VERR_INVALID_PARAMETER;
1868 rc = GMMR0BalloonedPagesReq(pGVM, idCpu, (PGMMBALLOONEDPAGESREQ)pReqHdr);
1869 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1870 break;
1871
1872 case VMMR0_DO_GMM_MAP_UNMAP_CHUNK:
1873 if (u64Arg)
1874 return VERR_INVALID_PARAMETER;
1875 rc = GMMR0MapUnmapChunkReq(pGVM, (PGMMMAPUNMAPCHUNKREQ)pReqHdr);
1876 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1877 break;
1878
1879 case VMMR0_DO_GMM_SEED_CHUNK:
1880 if (pReqHdr)
1881 return VERR_INVALID_PARAMETER;
1882 rc = GMMR0SeedChunk(pGVM, idCpu, (RTR3PTR)u64Arg);
1883 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1884 break;
1885
1886 case VMMR0_DO_GMM_REGISTER_SHARED_MODULE:
1887 if (idCpu == NIL_VMCPUID)
1888 return VERR_INVALID_CPU_ID;
1889 if (u64Arg)
1890 return VERR_INVALID_PARAMETER;
1891 rc = GMMR0RegisterSharedModuleReq(pGVM, idCpu, (PGMMREGISTERSHAREDMODULEREQ)pReqHdr);
1892 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1893 break;
1894
1895 case VMMR0_DO_GMM_UNREGISTER_SHARED_MODULE:
1896 if (idCpu == NIL_VMCPUID)
1897 return VERR_INVALID_CPU_ID;
1898 if (u64Arg)
1899 return VERR_INVALID_PARAMETER;
1900 rc = GMMR0UnregisterSharedModuleReq(pGVM, idCpu, (PGMMUNREGISTERSHAREDMODULEREQ)pReqHdr);
1901 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1902 break;
1903
1904 case VMMR0_DO_GMM_RESET_SHARED_MODULES:
1905 if (idCpu == NIL_VMCPUID)
1906 return VERR_INVALID_CPU_ID;
1907 if ( u64Arg
1908 || pReqHdr)
1909 return VERR_INVALID_PARAMETER;
1910 rc = GMMR0ResetSharedModules(pGVM, idCpu);
1911 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1912 break;
1913
1914#ifdef VBOX_WITH_PAGE_SHARING
1915 case VMMR0_DO_GMM_CHECK_SHARED_MODULES:
1916 {
1917 if (idCpu == NIL_VMCPUID)
1918 return VERR_INVALID_CPU_ID;
1919 if ( u64Arg
1920 || pReqHdr)
1921 return VERR_INVALID_PARAMETER;
1922 rc = GMMR0CheckSharedModules(pGVM, idCpu);
1923 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1924 break;
1925 }
1926#endif
1927
1928#if defined(VBOX_STRICT) && HC_ARCH_BITS == 64
1929 case VMMR0_DO_GMM_FIND_DUPLICATE_PAGE:
1930 if (u64Arg)
1931 return VERR_INVALID_PARAMETER;
1932 rc = GMMR0FindDuplicatePageReq(pGVM, (PGMMFINDDUPLICATEPAGEREQ)pReqHdr);
1933 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1934 break;
1935#endif
1936
1937 case VMMR0_DO_GMM_QUERY_STATISTICS:
1938 if (u64Arg)
1939 return VERR_INVALID_PARAMETER;
1940 rc = GMMR0QueryStatisticsReq(pGVM, (PGMMQUERYSTATISTICSSREQ)pReqHdr);
1941 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1942 break;
1943
1944 case VMMR0_DO_GMM_RESET_STATISTICS:
1945 if (u64Arg)
1946 return VERR_INVALID_PARAMETER;
1947 rc = GMMR0ResetStatisticsReq(pGVM, (PGMMRESETSTATISTICSSREQ)pReqHdr);
1948 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1949 break;
1950
1951 /*
1952 * A quick GCFGM mock-up.
1953 */
1954 /** @todo GCFGM with proper access control, ring-3 management interface and all that. */
1955 case VMMR0_DO_GCFGM_SET_VALUE:
1956 case VMMR0_DO_GCFGM_QUERY_VALUE:
1957 {
1958 if (pGVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
1959 return VERR_INVALID_PARAMETER;
1960 PGCFGMVALUEREQ pReq = (PGCFGMVALUEREQ)pReqHdr;
1961 if (pReq->Hdr.cbReq != sizeof(*pReq))
1962 return VERR_INVALID_PARAMETER;
1963 if (enmOperation == VMMR0_DO_GCFGM_SET_VALUE)
1964 {
1965 rc = GVMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
1966 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
1967 // rc = GMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
1968 }
1969 else
1970 {
1971 rc = GVMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
1972 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
1973 // rc = GMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
1974 }
1975 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1976 break;
1977 }
1978
1979 /*
1980 * PDM Wrappers.
1981 */
1982 case VMMR0_DO_PDM_DRIVER_CALL_REQ_HANDLER:
1983 {
1984 if (!pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
1985 return VERR_INVALID_PARAMETER;
1986 rc = PDMR0DriverCallReqHandler(pGVM, (PPDMDRIVERCALLREQHANDLERREQ)pReqHdr);
1987 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1988 break;
1989 }
1990
1991 case VMMR0_DO_PDM_DEVICE_CALL_REQ_HANDLER:
1992 {
1993 if (!pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
1994 return VERR_INVALID_PARAMETER;
1995 rc = PDMR0DeviceCallReqHandler(pGVM, (PPDMDEVICECALLREQHANDLERREQ)pReqHdr);
1996 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1997 break;
1998 }
1999
2000 case VMMR0_DO_PDM_DEVICE_CREATE:
2001 {
2002 if (!pReqHdr || u64Arg || idCpu != 0)
2003 return VERR_INVALID_PARAMETER;
2004 rc = PDMR0DeviceCreateReqHandler(pGVM, (PPDMDEVICECREATEREQ)pReqHdr);
2005 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2006 break;
2007 }
2008
2009 case VMMR0_DO_PDM_DEVICE_GEN_CALL:
2010 {
2011 if (!pReqHdr || u64Arg || idCpu != 0)
2012 return VERR_INVALID_PARAMETER;
2013 rc = PDMR0DeviceGenCallReqHandler(pGVM, (PPDMDEVICEGENCALLREQ)pReqHdr);
2014 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2015 break;
2016 }
2017
2018 /** @todo Remove the once all devices has been converted to new style! @bugref{9218} */
2019 case VMMR0_DO_PDM_DEVICE_COMPAT_SET_CRITSECT:
2020 {
2021 if (!pReqHdr || u64Arg || idCpu != 0)
2022 return VERR_INVALID_PARAMETER;
2023 rc = PDMR0DeviceCompatSetCritSectReqHandler(pGVM, (PPDMDEVICECOMPATSETCRITSECTREQ)pReqHdr);
2024 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2025 break;
2026 }
2027
2028 /** @todo Remove the once all devices has been converted to new style! @bugref{9218} */
2029 case VMMR0_DO_PDM_DEVICE_COMPAT_REG_PCIDEV:
2030 {
2031 if (!pReqHdr || u64Arg || idCpu != 0)
2032 return VERR_INVALID_PARAMETER;
2033 rc = PDMR0DeviceCompatRegPciDevReqHandler(pGVM, (PPDMDEVICECOMPATREGPCIDEVREQ)pReqHdr);
2034 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2035 break;
2036 }
2037
2038 /*
2039 * Requests to the internal networking service.
2040 */
2041 case VMMR0_DO_INTNET_OPEN:
2042 {
2043 PINTNETOPENREQ pReq = (PINTNETOPENREQ)pReqHdr;
2044 if (u64Arg || !pReq || !vmmR0IsValidSession(pGVM, pReq->pSession, pSession) || idCpu != NIL_VMCPUID)
2045 return VERR_INVALID_PARAMETER;
2046 rc = IntNetR0OpenReq(pSession, pReq);
2047 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2048 break;
2049 }
2050
2051 case VMMR0_DO_INTNET_IF_CLOSE:
2052 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFCLOSEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2053 return VERR_INVALID_PARAMETER;
2054 rc = IntNetR0IfCloseReq(pSession, (PINTNETIFCLOSEREQ)pReqHdr);
2055 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2056 break;
2057
2058
2059 case VMMR0_DO_INTNET_IF_GET_BUFFER_PTRS:
2060 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFGETBUFFERPTRSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2061 return VERR_INVALID_PARAMETER;
2062 rc = IntNetR0IfGetBufferPtrsReq(pSession, (PINTNETIFGETBUFFERPTRSREQ)pReqHdr);
2063 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2064 break;
2065
2066 case VMMR0_DO_INTNET_IF_SET_PROMISCUOUS_MODE:
2067 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2068 return VERR_INVALID_PARAMETER;
2069 rc = IntNetR0IfSetPromiscuousModeReq(pSession, (PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr);
2070 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2071 break;
2072
2073 case VMMR0_DO_INTNET_IF_SET_MAC_ADDRESS:
2074 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFSETMACADDRESSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2075 return VERR_INVALID_PARAMETER;
2076 rc = IntNetR0IfSetMacAddressReq(pSession, (PINTNETIFSETMACADDRESSREQ)pReqHdr);
2077 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2078 break;
2079
2080 case VMMR0_DO_INTNET_IF_SET_ACTIVE:
2081 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFSETACTIVEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2082 return VERR_INVALID_PARAMETER;
2083 rc = IntNetR0IfSetActiveReq(pSession, (PINTNETIFSETACTIVEREQ)pReqHdr);
2084 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2085 break;
2086
2087 case VMMR0_DO_INTNET_IF_SEND:
2088 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2089 return VERR_INVALID_PARAMETER;
2090 rc = IntNetR0IfSendReq(pSession, (PINTNETIFSENDREQ)pReqHdr);
2091 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2092 break;
2093
2094 case VMMR0_DO_INTNET_IF_WAIT:
2095 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2096 return VERR_INVALID_PARAMETER;
2097 rc = IntNetR0IfWaitReq(pSession, (PINTNETIFWAITREQ)pReqHdr);
2098 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2099 break;
2100
2101 case VMMR0_DO_INTNET_IF_ABORT_WAIT:
2102 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2103 return VERR_INVALID_PARAMETER;
2104 rc = IntNetR0IfAbortWaitReq(pSession, (PINTNETIFABORTWAITREQ)pReqHdr);
2105 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2106 break;
2107
2108#ifdef VBOX_WITH_PCI_PASSTHROUGH
2109 /*
2110 * Requests to host PCI driver service.
2111 */
2112 case VMMR0_DO_PCIRAW_REQ:
2113 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PPCIRAWSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2114 return VERR_INVALID_PARAMETER;
2115 rc = PciRawR0ProcessReq(pGVM, pSession, (PPCIRAWSENDREQ)pReqHdr);
2116 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2117 break;
2118#endif
2119
2120 /*
2121 * NEM requests.
2122 */
2123#ifdef VBOX_WITH_NEM_R0
2124# if defined(RT_ARCH_AMD64) && defined(RT_OS_WINDOWS)
2125 case VMMR0_DO_NEM_INIT_VM:
2126 if (u64Arg || pReqHdr || idCpu != 0)
2127 return VERR_INVALID_PARAMETER;
2128 rc = NEMR0InitVM(pGVM);
2129 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2130 break;
2131
2132 case VMMR0_DO_NEM_INIT_VM_PART_2:
2133 if (u64Arg || pReqHdr || idCpu != 0)
2134 return VERR_INVALID_PARAMETER;
2135 rc = NEMR0InitVMPart2(pGVM);
2136 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2137 break;
2138
2139 case VMMR0_DO_NEM_MAP_PAGES:
2140 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2141 return VERR_INVALID_PARAMETER;
2142 rc = NEMR0MapPages(pGVM, idCpu);
2143 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2144 break;
2145
2146 case VMMR0_DO_NEM_UNMAP_PAGES:
2147 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2148 return VERR_INVALID_PARAMETER;
2149 rc = NEMR0UnmapPages(pGVM, idCpu);
2150 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2151 break;
2152
2153 case VMMR0_DO_NEM_EXPORT_STATE:
2154 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2155 return VERR_INVALID_PARAMETER;
2156 rc = NEMR0ExportState(pGVM, idCpu);
2157 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2158 break;
2159
2160 case VMMR0_DO_NEM_IMPORT_STATE:
2161 if (pReqHdr || idCpu == NIL_VMCPUID)
2162 return VERR_INVALID_PARAMETER;
2163 rc = NEMR0ImportState(pGVM, idCpu, u64Arg);
2164 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2165 break;
2166
2167 case VMMR0_DO_NEM_QUERY_CPU_TICK:
2168 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2169 return VERR_INVALID_PARAMETER;
2170 rc = NEMR0QueryCpuTick(pGVM, idCpu);
2171 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2172 break;
2173
2174 case VMMR0_DO_NEM_RESUME_CPU_TICK_ON_ALL:
2175 if (pReqHdr || idCpu == NIL_VMCPUID)
2176 return VERR_INVALID_PARAMETER;
2177 rc = NEMR0ResumeCpuTickOnAll(pGVM, idCpu, u64Arg);
2178 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2179 break;
2180
2181 case VMMR0_DO_NEM_UPDATE_STATISTICS:
2182 if (u64Arg || pReqHdr)
2183 return VERR_INVALID_PARAMETER;
2184 rc = NEMR0UpdateStatistics(pGVM, idCpu);
2185 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2186 break;
2187
2188# if 1 && defined(DEBUG_bird)
2189 case VMMR0_DO_NEM_EXPERIMENT:
2190 if (pReqHdr)
2191 return VERR_INVALID_PARAMETER;
2192 rc = NEMR0DoExperiment(pGVM, idCpu, u64Arg);
2193 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2194 break;
2195# endif
2196# endif
2197#endif
2198
2199 /*
2200 * For profiling.
2201 */
2202 case VMMR0_DO_NOP:
2203 case VMMR0_DO_SLOW_NOP:
2204 return VINF_SUCCESS;
2205
2206 /*
2207 * For testing Ring-0 APIs invoked in this environment.
2208 */
2209 case VMMR0_DO_TESTS:
2210 /** @todo make new test */
2211 return VINF_SUCCESS;
2212
2213 default:
2214 /*
2215 * We're returning VERR_NOT_SUPPORT here so we've got something else
2216 * than -1 which the interrupt gate glue code might return.
2217 */
2218 Log(("operation %#x is not supported\n", enmOperation));
2219 return VERR_NOT_SUPPORTED;
2220 }
2221 return rc;
2222}
2223
2224
2225/**
2226 * Argument for vmmR0EntryExWrapper containing the arguments for VMMR0EntryEx.
2227 */
2228typedef struct VMMR0ENTRYEXARGS
2229{
2230 PGVM pGVM;
2231 VMCPUID idCpu;
2232 VMMR0OPERATION enmOperation;
2233 PSUPVMMR0REQHDR pReq;
2234 uint64_t u64Arg;
2235 PSUPDRVSESSION pSession;
2236} VMMR0ENTRYEXARGS;
2237/** Pointer to a vmmR0EntryExWrapper argument package. */
2238typedef VMMR0ENTRYEXARGS *PVMMR0ENTRYEXARGS;
2239
2240/**
2241 * This is just a longjmp wrapper function for VMMR0EntryEx calls.
2242 *
2243 * @returns VBox status code.
2244 * @param pvArgs The argument package
2245 */
2246static DECLCALLBACK(int) vmmR0EntryExWrapper(void *pvArgs)
2247{
2248 return vmmR0EntryExWorker(((PVMMR0ENTRYEXARGS)pvArgs)->pGVM,
2249 ((PVMMR0ENTRYEXARGS)pvArgs)->idCpu,
2250 ((PVMMR0ENTRYEXARGS)pvArgs)->enmOperation,
2251 ((PVMMR0ENTRYEXARGS)pvArgs)->pReq,
2252 ((PVMMR0ENTRYEXARGS)pvArgs)->u64Arg,
2253 ((PVMMR0ENTRYEXARGS)pvArgs)->pSession);
2254}
2255
2256
2257/**
2258 * The Ring 0 entry point, called by the support library (SUP).
2259 *
2260 * @returns VBox status code.
2261 * @param pGVM The global (ring-0) VM structure.
2262 * @param pVM The cross context VM structure.
2263 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
2264 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
2265 * @param enmOperation Which operation to execute.
2266 * @param pReq Pointer to the SUPVMMR0REQHDR packet. Optional.
2267 * @param u64Arg Some simple constant argument.
2268 * @param pSession The session of the caller.
2269 * @remarks Assume called with interrupts _enabled_.
2270 */
2271VMMR0DECL(int) VMMR0EntryEx(PGVM pGVM, PVMCC pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation,
2272 PSUPVMMR0REQHDR pReq, uint64_t u64Arg, PSUPDRVSESSION pSession)
2273{
2274 /*
2275 * Requests that should only happen on the EMT thread will be
2276 * wrapped in a setjmp so we can assert without causing trouble.
2277 */
2278 if ( pVM != NULL
2279 && pGVM != NULL
2280 && pVM == pGVM /** @todo drop pGVM */
2281 && idCpu < pGVM->cCpus
2282 && pGVM->pSession == pSession
2283 && pGVM->pSelf == pVM)
2284 {
2285 switch (enmOperation)
2286 {
2287 /* These might/will be called before VMMR3Init. */
2288 case VMMR0_DO_GMM_INITIAL_RESERVATION:
2289 case VMMR0_DO_GMM_UPDATE_RESERVATION:
2290 case VMMR0_DO_GMM_ALLOCATE_PAGES:
2291 case VMMR0_DO_GMM_FREE_PAGES:
2292 case VMMR0_DO_GMM_BALLOONED_PAGES:
2293 /* On the mac we might not have a valid jmp buf, so check these as well. */
2294 case VMMR0_DO_VMMR0_INIT:
2295 case VMMR0_DO_VMMR0_TERM:
2296
2297 case VMMR0_DO_PDM_DEVICE_CREATE:
2298 {
2299 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2300 RTNATIVETHREAD hNativeThread = RTThreadNativeSelf();
2301 if (RT_LIKELY( pGVCpu->hEMT == hNativeThread
2302 && pGVCpu->hNativeThreadR0 == hNativeThread))
2303 {
2304 if (!pGVCpu->vmm.s.CallRing3JmpBufR0.pvSavedStack)
2305 break;
2306
2307 /** @todo validate this EMT claim... GVM knows. */
2308 VMMR0ENTRYEXARGS Args;
2309 Args.pGVM = pGVM;
2310 Args.idCpu = idCpu;
2311 Args.enmOperation = enmOperation;
2312 Args.pReq = pReq;
2313 Args.u64Arg = u64Arg;
2314 Args.pSession = pSession;
2315 return vmmR0CallRing3SetJmpEx(&pGVCpu->vmm.s.CallRing3JmpBufR0, vmmR0EntryExWrapper, &Args);
2316 }
2317 return VERR_VM_THREAD_NOT_EMT;
2318 }
2319
2320 default:
2321 break;
2322 }
2323 }
2324 return vmmR0EntryExWorker(pGVM, idCpu, enmOperation, pReq, u64Arg, pSession);
2325}
2326
2327
2328/**
2329 * Checks whether we've armed the ring-0 long jump machinery.
2330 *
2331 * @returns @c true / @c false
2332 * @param pVCpu The cross context virtual CPU structure.
2333 * @thread EMT
2334 * @sa VMMIsLongJumpArmed
2335 */
2336VMMR0_INT_DECL(bool) VMMR0IsLongJumpArmed(PVMCPUCC pVCpu)
2337{
2338#ifdef RT_ARCH_X86
2339 return pVCpu->vmm.s.CallRing3JmpBufR0.eip
2340 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2341#else
2342 return pVCpu->vmm.s.CallRing3JmpBufR0.rip
2343 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2344#endif
2345}
2346
2347
2348/**
2349 * Checks whether we've done a ring-3 long jump.
2350 *
2351 * @returns @c true / @c false
2352 * @param pVCpu The cross context virtual CPU structure.
2353 * @thread EMT
2354 */
2355VMMR0_INT_DECL(bool) VMMR0IsInRing3LongJump(PVMCPUCC pVCpu)
2356{
2357 return pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2358}
2359
2360
2361/**
2362 * Internal R0 logger worker: Flush logger.
2363 *
2364 * @param pLogger The logger instance to flush.
2365 * @remark This function must be exported!
2366 */
2367VMMR0DECL(void) vmmR0LoggerFlush(PRTLOGGER pLogger)
2368{
2369#ifdef LOG_ENABLED
2370 /*
2371 * Convert the pLogger into a VM handle and 'call' back to Ring-3.
2372 * (This is a bit paranoid code.)
2373 */
2374 PVMMR0LOGGER pR0Logger = (PVMMR0LOGGER)((uintptr_t)pLogger - RT_UOFFSETOF(VMMR0LOGGER, Logger));
2375 if ( !VALID_PTR(pR0Logger)
2376 || !VALID_PTR(pR0Logger + 1)
2377 || pLogger->u32Magic != RTLOGGER_MAGIC)
2378 {
2379# ifdef DEBUG
2380 SUPR0Printf("vmmR0LoggerFlush: pLogger=%p!\n", pLogger);
2381# endif
2382 return;
2383 }
2384 if (pR0Logger->fFlushingDisabled)
2385 return; /* quietly */
2386
2387 PVMCC pVM = pR0Logger->pVM;
2388 if ( !VALID_PTR(pVM)
2389 || pVM->pSelf != pVM)
2390 {
2391# ifdef DEBUG
2392 SUPR0Printf("vmmR0LoggerFlush: pVM=%p! pSelf=%p! pLogger=%p\n", pVM, pVM->pSelf, pLogger);
2393# endif
2394 return;
2395 }
2396
2397 PVMCPUCC pVCpu = VMMGetCpu(pVM);
2398 if (pVCpu)
2399 {
2400 /*
2401 * Check that the jump buffer is armed.
2402 */
2403# ifdef RT_ARCH_X86
2404 if ( !pVCpu->vmm.s.CallRing3JmpBufR0.eip
2405 || pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2406# else
2407 if ( !pVCpu->vmm.s.CallRing3JmpBufR0.rip
2408 || pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2409# endif
2410 {
2411# ifdef DEBUG
2412 SUPR0Printf("vmmR0LoggerFlush: Jump buffer isn't armed!\n");
2413# endif
2414 return;
2415 }
2416 VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VMM_LOGGER_FLUSH, 0);
2417 }
2418# ifdef DEBUG
2419 else
2420 SUPR0Printf("vmmR0LoggerFlush: invalid VCPU context!\n");
2421# endif
2422#else
2423 NOREF(pLogger);
2424#endif /* LOG_ENABLED */
2425}
2426
2427#ifdef LOG_ENABLED
2428
2429/**
2430 * Disables flushing of the ring-0 debug log.
2431 *
2432 * @param pVCpu The cross context virtual CPU structure.
2433 */
2434VMMR0_INT_DECL(void) VMMR0LogFlushDisable(PVMCPUCC pVCpu)
2435{
2436 if (pVCpu->vmm.s.pR0LoggerR0)
2437 pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled = true;
2438 if (pVCpu->vmm.s.pR0RelLoggerR0)
2439 pVCpu->vmm.s.pR0RelLoggerR0->fFlushingDisabled = true;
2440}
2441
2442
2443/**
2444 * Enables flushing of the ring-0 debug log.
2445 *
2446 * @param pVCpu The cross context virtual CPU structure.
2447 */
2448VMMR0_INT_DECL(void) VMMR0LogFlushEnable(PVMCPUCC pVCpu)
2449{
2450 if (pVCpu->vmm.s.pR0LoggerR0)
2451 pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled = false;
2452 if (pVCpu->vmm.s.pR0RelLoggerR0)
2453 pVCpu->vmm.s.pR0RelLoggerR0->fFlushingDisabled = false;
2454}
2455
2456
2457/**
2458 * Checks if log flushing is disabled or not.
2459 *
2460 * @param pVCpu The cross context virtual CPU structure.
2461 */
2462VMMR0_INT_DECL(bool) VMMR0IsLogFlushDisabled(PVMCPUCC pVCpu)
2463{
2464 if (pVCpu->vmm.s.pR0LoggerR0)
2465 return pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled;
2466 if (pVCpu->vmm.s.pR0RelLoggerR0)
2467 return pVCpu->vmm.s.pR0RelLoggerR0->fFlushingDisabled;
2468 return true;
2469}
2470
2471#endif /* LOG_ENABLED */
2472
2473/**
2474 * Override RTLogRelGetDefaultInstanceEx so we can do LogRel to VBox.log from EMTs in ring-0.
2475 */
2476DECLEXPORT(PRTLOGGER) RTLogRelGetDefaultInstanceEx(uint32_t fFlagsAndGroup)
2477{
2478 PGVMCPU pGVCpu = GVMMR0GetGVCpuByEMT(NIL_RTNATIVETHREAD);
2479 if (pGVCpu)
2480 {
2481 PVMCPUCC pVCpu = pGVCpu;
2482 if (RT_VALID_PTR(pVCpu))
2483 {
2484 PVMMR0LOGGER pVmmLogger = pVCpu->vmm.s.pR0RelLoggerR0;
2485 if (RT_VALID_PTR(pVmmLogger))
2486 {
2487 if ( pVmmLogger->fCreated
2488 && pVmmLogger->pVM == pGVCpu->pGVM)
2489 {
2490 if (pVmmLogger->Logger.fFlags & RTLOGFLAGS_DISABLED)
2491 return NULL;
2492 uint16_t const fFlags = RT_LO_U16(fFlagsAndGroup);
2493 uint16_t const iGroup = RT_HI_U16(fFlagsAndGroup);
2494 if ( iGroup != UINT16_MAX
2495 && ( ( pVmmLogger->Logger.afGroups[iGroup < pVmmLogger->Logger.cGroups ? iGroup : 0]
2496 & (fFlags | (uint32_t)RTLOGGRPFLAGS_ENABLED))
2497 != (fFlags | (uint32_t)RTLOGGRPFLAGS_ENABLED)))
2498 return NULL;
2499 return &pVmmLogger->Logger;
2500 }
2501 }
2502 }
2503 }
2504 return SUPR0GetDefaultLogRelInstanceEx(fFlagsAndGroup);
2505}
2506
2507
2508/**
2509 * Jump back to ring-3 if we're the EMT and the longjmp is armed.
2510 *
2511 * @returns true if the breakpoint should be hit, false if it should be ignored.
2512 */
2513DECLEXPORT(bool) RTCALL RTAssertShouldPanic(void)
2514{
2515#if 0
2516 return true;
2517#else
2518 PVMCC pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
2519 if (pVM)
2520 {
2521 PVMCPUCC pVCpu = VMMGetCpu(pVM);
2522
2523 if (pVCpu)
2524 {
2525#ifdef RT_ARCH_X86
2526 if ( pVCpu->vmm.s.CallRing3JmpBufR0.eip
2527 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2528#else
2529 if ( pVCpu->vmm.s.CallRing3JmpBufR0.rip
2530 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2531#endif
2532 {
2533 int rc = VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VM_R0_ASSERTION, 0);
2534 return RT_FAILURE_NP(rc);
2535 }
2536 }
2537 }
2538#ifdef RT_OS_LINUX
2539 return true;
2540#else
2541 return false;
2542#endif
2543#endif
2544}
2545
2546
2547/**
2548 * Override this so we can push it up to ring-3.
2549 *
2550 * @param pszExpr Expression. Can be NULL.
2551 * @param uLine Location line number.
2552 * @param pszFile Location file name.
2553 * @param pszFunction Location function name.
2554 */
2555DECLEXPORT(void) RTCALL RTAssertMsg1Weak(const char *pszExpr, unsigned uLine, const char *pszFile, const char *pszFunction)
2556{
2557 /*
2558 * To the log.
2559 */
2560 LogAlways(("\n!!R0-Assertion Failed!!\n"
2561 "Expression: %s\n"
2562 "Location : %s(%d) %s\n",
2563 pszExpr, pszFile, uLine, pszFunction));
2564
2565 /*
2566 * To the global VMM buffer.
2567 */
2568 PVMCC pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
2569 if (pVM)
2570 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg1, sizeof(pVM->vmm.s.szRing0AssertMsg1),
2571 "\n!!R0-Assertion Failed!!\n"
2572 "Expression: %.*s\n"
2573 "Location : %s(%d) %s\n",
2574 sizeof(pVM->vmm.s.szRing0AssertMsg1) / 4 * 3, pszExpr,
2575 pszFile, uLine, pszFunction);
2576
2577 /*
2578 * Continue the normal way.
2579 */
2580 RTAssertMsg1(pszExpr, uLine, pszFile, pszFunction);
2581}
2582
2583
2584/**
2585 * Callback for RTLogFormatV which writes to the ring-3 log port.
2586 * See PFNLOGOUTPUT() for details.
2587 */
2588static DECLCALLBACK(size_t) rtLogOutput(void *pv, const char *pachChars, size_t cbChars)
2589{
2590 for (size_t i = 0; i < cbChars; i++)
2591 {
2592 LogAlways(("%c", pachChars[i])); NOREF(pachChars);
2593 }
2594
2595 NOREF(pv);
2596 return cbChars;
2597}
2598
2599
2600/**
2601 * Override this so we can push it up to ring-3.
2602 *
2603 * @param pszFormat The format string.
2604 * @param va Arguments.
2605 */
2606DECLEXPORT(void) RTCALL RTAssertMsg2WeakV(const char *pszFormat, va_list va)
2607{
2608 va_list vaCopy;
2609
2610 /*
2611 * Push the message to the loggers.
2612 */
2613 PRTLOGGER pLog = RTLogGetDefaultInstance(); /* Don't initialize it here... */
2614 if (pLog)
2615 {
2616 va_copy(vaCopy, va);
2617 RTLogFormatV(rtLogOutput, pLog, pszFormat, vaCopy);
2618 va_end(vaCopy);
2619 }
2620 pLog = RTLogRelGetDefaultInstance();
2621 if (pLog)
2622 {
2623 va_copy(vaCopy, va);
2624 RTLogFormatV(rtLogOutput, pLog, pszFormat, vaCopy);
2625 va_end(vaCopy);
2626 }
2627
2628 /*
2629 * Push it to the global VMM buffer.
2630 */
2631 PVMCC pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
2632 if (pVM)
2633 {
2634 va_copy(vaCopy, va);
2635 RTStrPrintfV(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2), pszFormat, vaCopy);
2636 va_end(vaCopy);
2637 }
2638
2639 /*
2640 * Continue the normal way.
2641 */
2642 RTAssertMsg2V(pszFormat, va);
2643}
2644
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette